summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/accel/drm_accel.c2
-rw-r--r--drivers/accel/habanalabs/common/command_submission.c3
-rw-r--r--drivers/accel/habanalabs/common/debugfs.c18
-rw-r--r--drivers/accel/habanalabs/common/device.c55
-rw-r--r--drivers/accel/habanalabs/common/firmware_if.c25
-rw-r--r--drivers/accel/habanalabs/common/habanalabs.h43
-rw-r--r--drivers/accel/habanalabs/common/hw_queue.c17
-rw-r--r--drivers/accel/habanalabs/common/hwmon.c29
-rw-r--r--drivers/accel/habanalabs/common/mmu/Makefile2
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu.c223
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu_v1.c354
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu_v2.c338
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c24
-rw-r--r--drivers/accel/habanalabs/common/security.c33
-rw-r--r--drivers/accel/habanalabs/common/security.h3
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi.c9
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2.c308
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2P.h15
-rw-r--r--drivers/accel/habanalabs/goya/goya.c12
-rw-r--r--drivers/accel/habanalabs/goya/goya_coresight.c3
-rw-r--r--drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h2
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c32
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c12
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h7
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c49
-rw-r--r--drivers/accel/ivpu/ivpu_fw_log.c6
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c70
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h6
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c10
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c12
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c12
-rw-r--r--drivers/accel/ivpu/ivpu_job.c20
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c12
-rw-r--r--drivers/accel/ivpu/vpu_boot_api.h46
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h32
-rw-r--r--drivers/accel/qaic/mhi_controller.c6
-rw-r--r--drivers/accel/qaic/qaic.h3
-rw-r--r--drivers/accel/qaic/qaic_data.c59
-rw-r--r--drivers/accel/qaic/qaic_drv.c140
-rw-r--r--drivers/acpi/Kconfig14
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_processor.c5
-rw-r--r--drivers/acpi/acpi_tad.c5
-rw-r--r--drivers/acpi/acpi_video.c28
-rw-r--r--drivers/acpi/acpi_watchdog.c2
-rw-r--r--drivers/acpi/apei/ghes.c80
-rw-r--r--drivers/acpi/apei/hest.c51
-rw-r--r--drivers/acpi/arm64/agdi.c8
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/cppc_acpi.c44
-rw-r--r--drivers/acpi/custom_method.c103
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c6
-rw-r--r--drivers/acpi/dptf/dptf_power.c6
-rw-r--r--drivers/acpi/ec.c112
-rw-r--r--drivers/acpi/evged.c5
-rw-r--r--drivers/acpi/fan_core.c6
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/mipi-disco-img.c71
-rw-r--r--drivers/acpi/nfit/core.c5
-rw-r--r--drivers/acpi/pci_slot.c2
-rw-r--r--drivers/acpi/pfr_telemetry.c6
-rw-r--r--drivers/acpi/pfr_update.c6
-rw-r--r--drivers/acpi/processor_driver.c6
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/property.c3
-rw-r--r--drivers/acpi/resource.c42
-rw-r--r--drivers/acpi/scan.c174
-rw-r--r--drivers/acpi/sleep.c12
-rw-r--r--drivers/acpi/thermal.c61
-rw-r--r--drivers/acpi/thermal_lib.c8
-rw-r--r--drivers/acpi/utils.c2
-rw-r--r--drivers/acpi/x86/s2idle.c37
-rw-r--r--drivers/acpi/x86/utils.c38
-rw-r--r--drivers/ata/Kconfig5
-rw-r--r--drivers/ata/ahci.c448
-rw-r--r--drivers/ata/ahci.h10
-rw-r--r--drivers/ata/ahci_ceva.c125
-rw-r--r--drivers/ata/libahci.c21
-rw-r--r--drivers/ata/libata-core.c59
-rw-r--r--drivers/ata/pata_parport/pata_parport.c2
-rw-r--r--drivers/atm/fore200e.c6
-rw-r--r--drivers/auxdisplay/Kconfig25
-rw-r--r--drivers/auxdisplay/Makefile2
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c6
-rw-r--r--drivers/auxdisplay/hd44780.c5
-rw-r--r--drivers/auxdisplay/ht16k33.c174
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c45
-rw-r--r--drivers/auxdisplay/line-display.c166
-rw-r--r--drivers/auxdisplay/line-display.h53
-rw-r--r--drivers/auxdisplay/max6959.c194
-rw-r--r--drivers/auxdisplay/panel.c202
-rw-r--r--drivers/auxdisplay/seg-led-gpio.c113
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/cpu.c3
-rw-r--r--drivers/base/platform-msi.c119
-rw-r--r--drivers/base/power/common.c134
-rw-r--r--drivers/base/power/main.c267
-rw-r--r--drivers/base/power/runtime.c36
-rw-r--r--drivers/base/power/wakeirq.c4
-rw-r--r--drivers/base/regmap/internal.h1
-rw-r--r--drivers/base/regmap/regcache-flat.c2
-rw-r--r--drivers/base/regmap/regcache.c4
-rw-r--r--drivers/base/regmap/regmap-kunit.c66
-rw-r--r--drivers/base/regmap/regmap.c10
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoeblk.c15
-rw-r--r--drivers/block/aoe/aoecmd.c12
-rw-r--r--drivers/block/aoe/aoenet.c1
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/brd.c26
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_main.c17
-rw-r--r--drivers/block/drbd/drbd_nl.c268
-rw-r--r--drivers/block/drbd/drbd_state.c24
-rw-r--r--drivers/block/drbd/drbd_state_change.h8
-rw-r--r--drivers/block/floppy.c17
-rw-r--r--drivers/block/loop.c75
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c13
-rw-r--r--drivers/block/n64cart.c12
-rw-r--r--drivers/block/nbd.c49
-rw-r--r--drivers/block/null_blk/main.c535
-rw-r--r--drivers/block/null_blk/null_blk.h24
-rw-r--r--drivers/block/null_blk/trace.h5
-rw-r--r--drivers/block/null_blk/zoned.c25
-rw-r--r--drivers/block/pktcdvd.c109
-rw-r--r--drivers/block/ps3disk.c17
-rw-r--r--drivers/block/ps3vram.c6
-rw-r--r--drivers/block/rbd.c31
-rw-r--r--drivers/block/rnbd/rnbd-clt.c64
-rw-r--r--drivers/block/rnbd/rnbd-srv.c28
-rw-r--r--drivers/block/rnbd/rnbd-srv.h2
-rw-r--r--drivers/block/sunvdc.c18
-rw-r--r--drivers/block/swim.c8
-rw-r--r--drivers/block/swim3.c2
-rw-r--r--drivers/block/ublk_drv.c111
-rw-r--r--drivers/block/virtio_blk.c303
-rw-r--r--drivers/block/xen-blkback/blkback.c4
-rw-r--r--drivers/block/xen-blkback/common.h4
-rw-r--r--drivers/block/xen-blkback/xenbus.c37
-rw-r--r--drivers/block/xen-blkfront.c53
-rw-r--r--drivers/block/z2ram.c2
-rw-r--r--drivers/block/zram/zram_drv.c77
-rw-r--r--drivers/block/zram/zram_drv.h2
-rw-r--r--drivers/bluetooth/btbcm.c12
-rw-r--r--drivers/bluetooth/btintel.c116
-rw-r--r--drivers/bluetooth/btmtk.c5
-rw-r--r--drivers/bluetooth/btmtk.h1
-rw-r--r--drivers/bluetooth/btnxpuart.c27
-rw-r--r--drivers/bluetooth/btqca.c2
-rw-r--r--drivers/bluetooth/btrtl.c14
-rw-r--r--drivers/bluetooth/btusb.c30
-rw-r--r--drivers/bluetooth/hci_bcm4377.c3
-rw-r--r--drivers/bluetooth/hci_h5.c5
-rw-r--r--drivers/bluetooth/hci_qca.c28
-rw-r--r--drivers/bluetooth/hci_serdev.c9
-rw-r--r--drivers/bluetooth/hci_uart.h12
-rw-r--r--drivers/bus/Kconfig5
-rw-r--r--drivers/bus/imx-weim.c2
-rw-r--r--drivers/bus/sunxi-rsb.c4
-rw-r--r--drivers/bus/ti-sysc.c2
-rw-r--r--drivers/cache/ax45mp_cache.c4
-rw-r--r--drivers/cdrom/gdrom.c20
-rw-r--r--drivers/char/agp/agp.h1
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c6
-rw-r--r--drivers/char/tpm/tpm_tis.c1
-rw-r--r--drivers/char/tpm/tpm_tis_core.c3
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c2
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c5
-rw-r--r--drivers/clk/rockchip/clk.c17
-rw-r--r--drivers/clk/rockchip/clk.h2
-rw-r--r--drivers/clk/samsung/clk-gs101.c2
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c13
-rw-r--r--drivers/clocksource/arm_arch_timer.c6
-rw-r--r--drivers/comedi/drivers/comedi_8255.c1
-rw-r--r--drivers/comedi/drivers/comedi_test.c30
-rw-r--r--drivers/counter/counter-core.c7
-rw-r--r--drivers/cpufreq/Kconfig.arm1
-rw-r--r--drivers/cpufreq/amd-pstate.c200
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq.c32
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c1
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c45
-rw-r--r--drivers/cpufreq/intel_pstate.c49
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c19
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c26
-rw-r--r--drivers/cpuidle/driver.c3
-rw-r--r--drivers/cpuidle/governors/haltpoll.c9
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c34
-rw-r--r--drivers/crypto/ccp/Kconfig2
-rw-r--r--drivers/crypto/ccp/sev-dev.c1150
-rw-r--r--drivers/crypto/ccp/sev-dev.h5
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c4
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c5
-rw-r--r--drivers/cxl/acpi.c46
-rw-r--r--drivers/cxl/core/cdat.c86
-rw-r--r--drivers/cxl/core/mbox.c4
-rw-r--r--drivers/cxl/core/memdev.c63
-rw-r--r--drivers/cxl/core/pci.c49
-rw-r--r--drivers/cxl/core/region.c62
-rw-r--r--drivers/cxl/cxl.h2
-rw-r--r--drivers/cxl/cxlmem.h10
-rw-r--r--drivers/cxl/mem.c56
-rw-r--r--drivers/cxl/pci.c57
-rw-r--r--drivers/dax/super.c3
-rw-r--r--drivers/dma-buf/dma-fence.c8
-rw-r--r--drivers/dma-buf/dma-resv.c4
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c17
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-core.c39
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-regs.h2
-rw-r--r--drivers/dma/fsl-edma-common.c2
-rw-r--r--drivers/dma/fsl-edma-common.h5
-rw-r--r--drivers/dma/fsl-edma-main.c4
-rw-r--r--drivers/dma/fsl-qdma.c38
-rw-r--r--drivers/dma/idxd/cdev.c2
-rw-r--r--drivers/dma/idxd/debugfs.c2
-rw-r--r--drivers/dma/idxd/idxd.h1
-rw-r--r--drivers/dma/idxd/init.c15
-rw-r--r--drivers/dma/idxd/irq.c3
-rw-r--r--drivers/dma/mv_xor_v2.c8
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c2
-rw-r--r--drivers/dma/qcom/hidma.c6
-rw-r--r--drivers/dpll/dpll_core.c35
-rw-r--r--drivers/dpll/dpll_core.h2
-rw-r--r--drivers/dpll/dpll_netlink.c47
-rw-r--r--drivers/edac/Kconfig1
-rw-r--r--drivers/edac/amd64_edac.c290
-rw-r--r--drivers/edac/i10nm_base.c1
-rw-r--r--drivers/edac/igen6_edac.c2
-rw-r--r--drivers/edac/mce_amd.c4
-rw-r--r--drivers/edac/synopsys_edac.c4
-rw-r--r--drivers/edac/versal_edac.c199
-rw-r--r--drivers/firewire/core-card.c14
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firmware/Kconfig1
-rw-r--r--drivers/firmware/arm_ffa/bus.c2
-rw-r--r--drivers/firmware/arm_scmi/bus.c26
-rw-r--r--drivers/firmware/arm_scmi/clock.c194
-rw-r--r--drivers/firmware/arm_scmi/common.h2
-rw-r--r--drivers/firmware/arm_scmi/driver.c104
-rw-r--r--drivers/firmware/arm_scmi/notify.c17
-rw-r--r--drivers/firmware/arm_scmi/notify.h4
-rw-r--r--drivers/firmware/arm_scmi/optee.c6
-rw-r--r--drivers/firmware/arm_scmi/perf.c216
-rw-r--r--drivers/firmware/arm_scmi/power.c30
-rw-r--r--drivers/firmware/arm_scmi/powercap.c57
-rw-r--r--drivers/firmware/arm_scmi/protocols.h9
-rw-r--r--drivers/firmware/arm_scmi/reset.c37
-rw-r--r--drivers/firmware/arm_scmi/sensors.c37
-rw-r--r--drivers/firmware/arm_scmi/smc.c7
-rw-r--r--drivers/firmware/arm_scmi/system.c16
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c7
-rw-r--r--drivers/firmware/efi/capsule-loader.c2
-rw-r--r--drivers/firmware/efi/cper.c4
-rw-r--r--drivers/firmware/efi/efi-pstore.c43
-rw-r--r--drivers/firmware/efi/efi.c3
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c106
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c2
-rw-r--r--drivers/firmware/efi/libstub/efistub.h97
-rw-r--r--drivers/firmware/efi/libstub/tpm.c82
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c16
-rw-r--r--drivers/firmware/google/cbmem.c8
-rw-r--r--drivers/firmware/google/coreboot_table.c22
-rw-r--r--drivers/firmware/google/coreboot_table.h3
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c8
-rw-r--r--drivers/firmware/google/memconsole-coreboot.c8
-rw-r--r--drivers/firmware/google/vpd.c8
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c5
-rw-r--r--drivers/firmware/sysfb.c53
-rw-r--r--drivers/firmware/sysfb_simplefb.c5
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c2
-rw-r--r--drivers/fpga/ice40-spi.c4
-rw-r--r--drivers/gpio/Kconfig27
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-74x164.c4
-rw-r--r--drivers/gpio/gpio-cros-ec.c209
-rw-r--r--drivers/gpio/gpio-eic-sprd.c10
-rw-r--r--drivers/gpio/gpio-mvebu.c18
-rw-r--r--drivers/gpio/gpio-nomadik.c730
-rw-r--r--drivers/gpio/gpio-sim.c49
-rw-r--r--drivers/gpio/gpiolib-acpi.c15
-rw-r--r--drivers/gpio/gpiolib-acpi.h5
-rw-r--r--drivers/gpio/gpiolib-cdev.c96
-rw-r--r--drivers/gpio/gpiolib-devres.c2
-rw-r--r--drivers/gpio/gpiolib-legacy.c12
-rw-r--r--drivers/gpio/gpiolib-of.c29
-rw-r--r--drivers/gpio/gpiolib-of.h6
-rw-r--r--drivers/gpio/gpiolib-sysfs.c150
-rw-r--r--drivers/gpio/gpiolib.c984
-rw-r--r--drivers/gpio/gpiolib.h95
-rw-r--r--drivers/gpu/drm/Kconfig24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c879
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h202
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c251
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c686
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c155
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c255
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_si.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c767
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c491
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c570
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c495
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c672
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c263
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c1339
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vpe_6_1_fw_if.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c281
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h545
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm173
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c94
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c30
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c8
-rw-r--r--drivers/gpu/drm/amd/display/TODO110
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c281
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c72
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c55
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c119
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c217
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c293
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c574
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c97
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c109
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c167
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/audio.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h256
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c409
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c372
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c2156
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h23
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h23
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h145
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c20
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.c34
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c129
-rw-r--r--drivers/gpu/drm/amd/display/include/audio_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_stats.h4
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/arct_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_offset.h287
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_sh_mask.h1348
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h24
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h65
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_1_offset.h15259
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_1_sh_mask.h53464
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_offset.h219
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_sh_mask.h735
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_offset.h388
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_sh_mask.h1411
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_offset.h468
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_sh_mask.h692
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_3_1_offset.h11287
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_3_1_sh_mask.h32806
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_offset.h279
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_sh_mask.h1029
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/pcie/pcie_6_1_0_offset.h630
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/pcie/pcie_6_1_0_sh_mask.h4250
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h1672
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h7627
-rw-r--r--drivers/gpu/drm/amd/include/atom-bits.h2
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h32
-rw-r--r--drivers/gpu/drm/amd/include/beige_goby_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h23
-rw-r--r--drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h9
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h6
-rw-r--r--drivers/gpu/drm/amd/include/navi12_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/navi14_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/pptable.h6
-rw-r--r--drivers/gpu/drm/amd/include/renoir_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/v10_structs.h3
-rw-r--r--drivers/gpu/drm/amd/include/vangogh_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/vega10_ip_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/vega20_ip_offset.h78
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c15
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c94
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c29
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c42
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c42
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c332
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h10
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c69
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c30
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c70
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c28
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c12
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c8
-rw-r--r--drivers/gpu/drm/bridge/imx/Kconfig18
-rw-r--r--drivers/gpu/drm/bridge/imx/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c207
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c154
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c21
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c16
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c20
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c9
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c19
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c18
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c22
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c18
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c38
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c17
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c45
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c195
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c40
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c18
-rw-r--r--drivers/gpu/drm/ci/build.sh1
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml14
-rw-r--r--drivers/gpu/drm/ci/test.yml31
-rw-r--r--drivers/gpu/drm/ci/testlist.txt1937
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt30
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt17
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt18
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt18
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt7
-rw-r--r--drivers/gpu/drm/display/Kconfig21
-rw-r--r--drivers/gpu/drm/display/Makefile2
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c179
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c23
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c1949
-rw-r--r--drivers/gpu/drm/drm_bridge.c17
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c16
-rw-r--r--drivers/gpu/drm/drm_buddy.c20
-rw-r--r--drivers/gpu/drm/drm_crtc.c23
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c25
-rw-r--r--drivers/gpu/drm/drm_edid_load.c162
-rw-r--r--drivers/gpu/drm/drm_exec.c2
-rw-r--r--drivers/gpu/drm/drm_file.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c46
-rw-r--r--drivers/gpu/drm/drm_ioc32.c4
-rw-r--r--drivers/gpu/drm/drm_managed.c39
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/drm_mode_config.c2
-rw-r--r--drivers/gpu/drm/drm_modes.c22
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c19
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c12
-rw-r--r--drivers/gpu/drm/drm_print.c29
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c65
-rw-r--r--drivers/gpu/drm/drm_syncobj.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c93
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c33
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c43
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c1
-rw-r--r--drivers/gpu/drm/i915/Kconfig14
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c6
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c30
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h7
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c109
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c426
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c128
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c261
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h63
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c70
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c235
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c94
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c188
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h69
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c575
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c149
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c811
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.h133
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c186
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c137
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c296
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c165
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c182
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c255
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c205
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h63
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c240
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c130
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h12
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c33
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.h2
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c108
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h4
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark_regs.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c25
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c50
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c45
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.h14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c18
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c10
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c126
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c64
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h11
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h1
-rw-r--r--drivers/gpu/drm/i915/i915_query.c35
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h18
-rw-r--r--drivers/gpu/drm/i915/i915_request.c1
-rw-r--r--drivers/gpu/drm/i915/i915_syncmap.c19
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c17
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h1
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c33
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h5
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c8
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c5
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c6
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c16
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.h6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-blkctl.c13
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-ctxld.c14
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.c17
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.h1
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dpr.c21
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c12
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dtg.c26
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-scaler.c21
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-ss.c12
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig1
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.c2
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.h1
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c23
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c39
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.c6
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c18
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.c3
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c37
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c38
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h3
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.c2
-rw-r--r--drivers/gpu/drm/loongson/lsdc_ttm.c2
-rw-r--r--drivers/gpu/drm/mcde/Kconfig1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_merge.c65
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c43
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c31
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c29
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c310
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c26
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c6
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c25
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.h2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c24
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.h2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c36
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.h2
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig12
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c26
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c22
-rw-r--r--drivers/gpu/drm/msm/Makefile5
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h73
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h131
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h182
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h666
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h5257
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h179
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c220
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c727
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h311
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h260
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c69
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h928
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h753
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h31
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h573
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h291
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h225
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h449
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c347
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h33
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h41
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c95
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c92
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c188
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c133
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c154
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h74
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c61
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c42
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c42
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c71
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c101
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c9
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c271
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c375
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h17
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c205
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h23
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c119
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.c327
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.h155
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.c183
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.h95
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h9
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.c96
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.h36
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h22
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c51
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c65
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c33
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c33
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h36
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c13
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h4
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c64
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c7
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c7
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c59
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvif/outp.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c22
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c12
-rw-r--r--drivers/gpu/drm/panel/Kconfig231
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c18
-rw-r--r--drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c322
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c10
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c119
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112a.c372
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c23
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c265
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c424
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c8
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c643
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c81
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c104
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c1
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c1
-rw-r--r--drivers/gpu/drm/pl111/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/atom-bits.h2
-rw-r--r--drivers/gpu/drm/radeon/atom.c47
-rw-r--r--drivers/gpu/drm/radeon/atom.h4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c28
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c38
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c90
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c31
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h6
-rw-r--r--drivers/gpu/drm/radeon/cik.c40
-rw-r--r--drivers/gpu/drm/radeon/clearstate_cayman.h9
-rw-r--r--drivers/gpu/drm/radeon/clearstate_ci.h3
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c20
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h10
-rw-r--r--drivers/gpu/drm/radeon/evergreen_smc.h9
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c33
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.h12
-rw-r--r--drivers/gpu/drm/radeon/nislands_smc.h51
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c1
-rw-r--r--drivers/gpu/drm/radeon/rs400.c4
-rw-r--r--drivers/gpu/drm/radeon/rs600.c3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c3
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.h27
-rw-r--r--drivers/gpu/drm/radeon/si.c103
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c132
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.h21
-rw-r--r--drivers/gpu/drm/radeon/smu7.h6
-rw-r--r--drivers/gpu/drm/radeon/smu7_discrete.h51
-rw-r--r--drivers/gpu/drm/radeon/smu7_fusion.h42
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c18
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c22
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c2
-rw-r--r--drivers/gpu/drm/renesas/Kconfig1
-rw-r--r--drivers/gpu/drm/renesas/Makefile1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig12
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Makefile8
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c422
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.h89
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c175
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.h78
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c72
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.h32
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c371
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h43
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c349
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.h82
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c3
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c549
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.h5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c13
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h3
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c4
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c11
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c7
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c370
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.h5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c134
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c14
-rw-r--r--drivers/gpu/drm/tegra/drm.c23
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c59
-rw-r--r--drivers/gpu/drm/tegra/fb.c1
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c21
-rw-r--r--drivers/gpu/drm/tegra/output.c17
-rw-r--r--drivers/gpu/drm/tegra/rgb.c18
-rw-r--r--drivers/gpu/drm/tegra/sor.c1
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c237
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c170
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c150
-rw-r--r--drivers/gpu/drm/tests/drm_managed_test.c77
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c8
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c10
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c19
-rw-r--r--drivers/gpu/drm/ttm/tests/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c622
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c48
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c335
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_tt_test.c295
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c76
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c15
-rw-r--r--drivers/gpu/drm/tve200/Kconfig1
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c12
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c17
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h2
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c10
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_submit.c6
-rw-r--r--drivers/gpu/drm/vkms/Kconfig15
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c300
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c32
-rw-r--r--drivers/gpu/drm/xe/.kunitconfig5
-rw-r--r--drivers/gpu/drm/xe/Kconfig3
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/xe/Makefile45
-rw-r--r--drivers/gpu/drm/xe/abi/gsc_proxy_commands_abi.h44
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h174
-rw-r--r--drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h3
-rw-r--r--drivers/gpu/drm/xe/abi/guc_messages_abi.h2
-rw-r--r--drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h79
-rw-r--r--drivers/gpu/drm/xe/abi/guc_relay_communication_abi.h118
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h10
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h3
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c (renamed from drivers/gpu/drm/xe/xe_display.c)0
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h (renamed from drivers/gpu/drm/xe/xe_display.h)0
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c67
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h6
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h27
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h21
-rw-r--r--drivers/gpu/drm/xe/tests/Makefile7
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_db_mgr_test.c201
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_relay_test.c522
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.c90
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.h17
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c8
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c36
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.h1
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c3
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test_mod.c10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c16
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c134
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h7
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c1
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c55
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h6
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h13
-rw-r--r--drivers/gpu/drm/xe/xe_device.c114
-rw-r--r--drivers/gpu/drm/xe/xe_device.h14
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h174
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c14
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c42
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c238
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h3
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h63
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c12
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c81
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c71
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c537
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.h20
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_submit.c20
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_submit.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_types.h33
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c92
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c17
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c44
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h44
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_printk.h34
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c25
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h118
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c115
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c255
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h22
-rw-r--r--drivers/gpu/drm/xe/xe_guc_db_mgr.c266
-rw-r--r--drivers/gpu/drm/xe/xe_guc_db_mgr.h22
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hwconfig.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hxg_helpers.h108
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.c941
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.h37
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c90
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit_types.h18
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h47
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c19
-rw-r--r--drivers/gpu/drm/xe/xe_huc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c144
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c38
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h82
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c32
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c136
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c48
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c430
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.h26
-rw-r--r--drivers/gpu/drm/xe/xe_memirq_types.h37
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c27
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c11
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c27
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c5
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c10
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h7
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c40
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c16
-rw-r--r--drivers/gpu/drm/xe/xe_query.c50
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c2
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c8
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c60
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c38
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.h5
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h11
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c32
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h12
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c58
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sync_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c5
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c3
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h59
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c8
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c9
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c33
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c60
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c293
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h7
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h45
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c128
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.h13
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c191
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules12
-rw-r--r--drivers/gpu/drm/xe/xe_wait_user_fence.c2
-rw-r--r--drivers/gpu/drm/xe/xe_wopcm_types.h4
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c2
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c22
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/bus.h2
-rw-r--r--drivers/gpu/host1x/cdma.c3
-rw-r--r--drivers/gpu/host1x/dev.c15
-rw-r--r--drivers/gpu/host1x/dev.h6
-rw-r--r--drivers/hid/amd-sfh-hid/Kconfig1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h16
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c118
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h6
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c2
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c4
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c10
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c8
-rw-r--r--drivers/hid/hid-apple.c1
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-lenovo.c57
-rw-r--r--drivers/hid/hid-lg3ff.c4
-rw-r--r--drivers/hid/hid-multitouch.c1
-rw-r--r--drivers/hid/hid-nintendo.c22
-rw-r--r--drivers/hid/hid-prodikeys.c115
-rw-r--r--drivers/hid/hid-samsung.c437
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/wacom_wac.c8
-rw-r--r--drivers/hid/wacom_wac.h1
-rw-r--r--drivers/hsi/clients/ssi_protocol.c3
-rw-r--r--drivers/hsi/hsi_core.c2
-rw-r--r--drivers/hv/channel.c176
-rw-r--r--drivers/hv/hv_util.c31
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/Kconfig76
-rw-r--r--drivers/hwmon/Makefile7
-rw-r--r--drivers/hwmon/adm1177.c1
-rw-r--r--drivers/hwmon/adt7310.c2
-rw-r--r--drivers/hwmon/adt7410.c4
-rw-r--r--drivers/hwmon/amc6821.c11
-rw-r--r--drivers/hwmon/aspeed-g6-pwm-tach.c549
-rw-r--r--drivers/hwmon/asus_rog_ryujin.c609
-rw-r--r--drivers/hwmon/axi-fan-control.c75
-rw-r--r--drivers/hwmon/chipcap2.c822
-rw-r--r--drivers/hwmon/coretemp.c206
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c14
-rw-r--r--drivers/hwmon/ds1621.c1
-rw-r--r--drivers/hwmon/ds620.c1
-rw-r--r--drivers/hwmon/emc1403.c2
-rw-r--r--drivers/hwmon/emc2305.c5
-rw-r--r--drivers/hwmon/fam15h_power.c2
-rw-r--r--drivers/hwmon/hwmon.c3
-rw-r--r--drivers/hwmon/ina209.c1
-rw-r--r--drivers/hwmon/ina238.c1
-rw-r--r--drivers/hwmon/ina3221.c2
-rw-r--r--drivers/hwmon/jc42.c2
-rw-r--r--drivers/hwmon/lm83.c2
-rw-r--r--drivers/hwmon/ltc4282.c1782
-rw-r--r--drivers/hwmon/max127.c1
-rw-r--r--drivers/hwmon/max31760.c3
-rw-r--r--drivers/hwmon/max31790.c1
-rw-r--r--drivers/hwmon/max31827.c1
-rw-r--r--drivers/hwmon/max6621.c1
-rw-r--r--drivers/hwmon/max6697.c1
-rw-r--r--drivers/hwmon/nct6683.c3
-rw-r--r--drivers/hwmon/nct6775-core.c14
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/nzxt-kraken3.c1008
-rw-r--r--drivers/hwmon/occ/p8_i2c.c1
-rw-r--r--drivers/hwmon/oxp-sensors.c10
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/ir36021.c1
-rw-r--r--drivers/hwmon/pmbus/ir38064.c2
-rw-r--r--drivers/hwmon/pmbus/lm25066.c2
-rw-r--r--drivers/hwmon/pmbus/mpq8785.c90
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c2
-rw-r--r--drivers/hwmon/pmbus/tda38640.c2
-rw-r--r--drivers/hwmon/powr1220.c1
-rw-r--r--drivers/hwmon/pt5161l.c667
-rw-r--r--drivers/hwmon/sbrmi.c1
-rw-r--r--drivers/hwmon/sbtsi_temp.c1
-rw-r--r--drivers/hwmon/sch5627.c2
-rw-r--r--drivers/hwmon/sht3x.c66
-rw-r--r--drivers/hwmon/sis5595.c8
-rw-r--r--drivers/hwmon/surface_fan.c91
-rw-r--r--drivers/hwmon/tmp401.c2
-rw-r--r--drivers/hwmon/w83773g.c1
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i2c/busses/i2c-imx.c5
-rw-r--r--drivers/i2c/busses/i2c-wmt.c6
-rw-r--r--drivers/idle/intel_idle.c3
-rw-r--r--drivers/iio/accel/adxl367.c8
-rw-r--r--drivers/iio/accel/adxl367_i2c.c2
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c14
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c5
-rw-r--r--drivers/iio/pressure/bmp280-spi.c50
-rw-r--r--drivers/iio/pressure/dlhl60d.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c43
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c3
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c6
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/irdma/defs.h1
-rw-r--r--drivers/infiniband/hw/irdma/hw.c8
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c9
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c6
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c2
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c17
-rw-r--r--drivers/input/joystick/psxpad-spi.c4
-rw-r--r--drivers/input/joystick/xpad.c6
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c10
-rw-r--r--drivers/input/mouse/bcm5974.c20
-rw-r--r--drivers/input/rmi4/rmi_driver.c6
-rw-r--r--drivers/input/rmi4/rmi_spi.c2
-rw-r--r--drivers/iommu/Kconfig8
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd/amd_iommu.h42
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h34
-rw-r--r--drivers/iommu/amd/init.c137
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c21
-rw-r--r--drivers/iommu/amd/iommu.c634
-rw-r--r--drivers/iommu/apple-dart.c3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c60
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c819
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h4
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c1
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c20
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c3
-rw-r--r--drivers/iommu/dma-iommu.c5
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/intel/Kconfig12
-rw-r--r--drivers/iommu/intel/Makefile2
-rw-r--r--drivers/iommu/intel/dmar.c26
-rw-r--r--drivers/iommu/intel/iommu.c551
-rw-r--r--drivers/iommu/intel/iommu.h21
-rw-r--r--drivers/iommu/intel/nested.c16
-rw-r--r--drivers/iommu/intel/pasid.c210
-rw-r--r--drivers/iommu/intel/pasid.h3
-rw-r--r--drivers/iommu/intel/perf.c2
-rw-r--r--drivers/iommu/intel/svm.c76
-rw-r--r--drivers/iommu/io-pgfault.c463
-rw-r--r--drivers/iommu/iommu-priv.h5
-rw-r--r--drivers/iommu/iommu-sva.c88
-rw-r--r--drivers/iommu/iommu-sva.h71
-rw-r--r--drivers/iommu/iommu.c280
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c3
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c9
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h1
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c68
-rw-r--r--drivers/iommu/iommufd/selftest.c148
-rw-r--r--drivers/iommu/iova.c143
-rw-r--r--drivers/iommu/ipmmu-vmsa.c19
-rw-r--r--drivers/iommu/irq_remapping.c3
-rw-r--r--drivers/iommu/msm_iommu.c4
-rw-r--r--drivers/iommu/mtk_iommu.c5
-rw-r--r--drivers/iommu/mtk_iommu_v1.c7
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/rockchip-iommu.c2
-rw-r--r--drivers/iommu/sprd-iommu.c3
-rw-r--r--drivers/iommu/sun50i-iommu.c2
-rw-r--r--drivers/iommu/tegra-smmu.c4
-rw-r--r--drivers/iommu/virtio-iommu.c3
-rw-r--r--drivers/irqchip/Kconfig11
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c2
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c6
-rw-r--r--drivers/irqchip/irq-gic-v3.c57
-rw-r--r--drivers/irqchip/irq-gic.c27
-rw-r--r--drivers/irqchip/irq-imgpdc.c7
-rw-r--r--drivers/irqchip/irq-imx-intmux.c18
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c14
-rw-r--r--drivers/irqchip/irq-keystone.c5
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c22
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c12
-rw-r--r--drivers/irqchip/irq-madera.c8
-rw-r--r--drivers/irqchip/irq-mbigen.c8
-rw-r--r--drivers/irqchip/irq-meson-gpio.c5
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c12
-rw-r--r--drivers/irqchip/irq-pruss-intc.c14
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c11
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c9
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c7
-rw-r--r--drivers/irqchip/irq-riscv-intc.c104
-rw-r--r--drivers/irqchip/irq-sifive-plic.c283
-rw-r--r--drivers/irqchip/irq-starfive-jh8100-intc.c207
-rw-r--r--drivers/irqchip/irq-stm32-exti.c9
-rw-r--r--drivers/irqchip/irq-ts4800.c12
-rw-r--r--drivers/irqchip/irq-vic.c3
-rw-r--r--drivers/isdn/capi/capi.c21
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c16
-rw-r--r--drivers/leds/Kconfig14
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/flash/Kconfig7
-rw-r--r--drivers/leds/flash/leds-ktd2692.c116
-rw-r--r--drivers/leds/flash/leds-lm3601x.c3
-rw-r--r--drivers/leds/flash/leds-sgm3140.c3
-rw-r--r--drivers/leds/led-class.c6
-rw-r--r--drivers/leds/led-triggers.c38
-rw-r--r--drivers/leds/leds-aw200xx.c2
-rw-r--r--drivers/leds/leds-aw2013.c1
-rw-r--r--drivers/leds/leds-expresswire.c72
-rw-r--r--drivers/leds/leds-mlxcpld.c2
-rw-r--r--drivers/leds/leds-mlxreg.c1
-rw-r--r--drivers/leds/leds-pca963x.c28
-rw-r--r--drivers/leds/leds-spi-byte.c11
-rw-r--r--drivers/leds/leds.h1
-rw-r--r--drivers/leds/rgb/Kconfig12
-rw-r--r--drivers/leds/rgb/Makefile1
-rw-r--r--drivers/leds/rgb/leds-group-multicolor.c8
-rw-r--r--drivers/leds/rgb/leds-ncp5623.c271
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c382
-rw-r--r--drivers/leds/trigger/ledtrig-audio.c2
-rw-r--r--drivers/leds/trigger/ledtrig-default-on.c1
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c102
-rw-r--r--drivers/leds/trigger/ledtrig-panic.c23
-rw-r--r--drivers/macintosh/via-pmu-backlight.c1
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c8
-rw-r--r--drivers/mailbox/imx-mailbox.c88
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/super.c133
-rw-r--r--drivers/md/dm-bio-prison-v1.c2
-rw-r--r--drivers/md/dm-bufio.c74
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-crypt.c109
-rw-r--r--drivers/md/dm-dust.c2
-rw-r--r--drivers/md/dm-ebs-target.c2
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-integrity.c105
-rw-r--r--drivers/md/dm-io.c23
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-kcopyd.c4
-rw-r--r--drivers/md/dm-log-userspace-base.c2
-rw-r--r--drivers/md/dm-log.c6
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-ps-round-robin.c2
-rw-r--r--drivers/md/dm-raid.c101
-rw-r--r--drivers/md/dm-raid1.c6
-rw-r--r--drivers/md/dm-region-hash.c2
-rw-r--r--drivers/md/dm-snap-persistent.c4
-rw-r--r--drivers/md/dm-thin.c22
-rw-r--r--drivers/md/dm-vdo/Kconfig17
-rw-r--r--drivers/md/dm-vdo/Makefile57
-rw-r--r--drivers/md/dm-vdo/action-manager.c388
-rw-r--r--drivers/md/dm-vdo/action-manager.h110
-rw-r--r--drivers/md/dm-vdo/admin-state.c506
-rw-r--r--drivers/md/dm-vdo/admin-state.h178
-rw-r--r--drivers/md/dm-vdo/block-map.c3318
-rw-r--r--drivers/md/dm-vdo/block-map.h394
-rw-r--r--drivers/md/dm-vdo/completion.c140
-rw-r--r--drivers/md/dm-vdo/completion.h152
-rw-r--r--drivers/md/dm-vdo/constants.h96
-rw-r--r--drivers/md/dm-vdo/cpu.h59
-rw-r--r--drivers/md/dm-vdo/data-vio.c2063
-rw-r--r--drivers/md/dm-vdo/data-vio.h670
-rw-r--r--drivers/md/dm-vdo/dedupe.c3003
-rw-r--r--drivers/md/dm-vdo/dedupe.h120
-rw-r--r--drivers/md/dm-vdo/dm-vdo-target.c2910
-rw-r--r--drivers/md/dm-vdo/dump.c275
-rw-r--r--drivers/md/dm-vdo/dump.h17
-rw-r--r--drivers/md/dm-vdo/encodings.c1483
-rw-r--r--drivers/md/dm-vdo/encodings.h1298
-rw-r--r--drivers/md/dm-vdo/errors.c307
-rw-r--r--drivers/md/dm-vdo/errors.h73
-rw-r--r--drivers/md/dm-vdo/flush.c560
-rw-r--r--drivers/md/dm-vdo/flush.h44
-rw-r--r--drivers/md/dm-vdo/funnel-queue.c170
-rw-r--r--drivers/md/dm-vdo/funnel-queue.h110
-rw-r--r--drivers/md/dm-vdo/funnel-workqueue.c638
-rw-r--r--drivers/md/dm-vdo/funnel-workqueue.h51
-rw-r--r--drivers/md/dm-vdo/indexer/chapter-index.c293
-rw-r--r--drivers/md/dm-vdo/indexer/chapter-index.h61
-rw-r--r--drivers/md/dm-vdo/indexer/config.c376
-rw-r--r--drivers/md/dm-vdo/indexer/config.h124
-rw-r--r--drivers/md/dm-vdo/indexer/delta-index.c1970
-rw-r--r--drivers/md/dm-vdo/indexer/delta-index.h279
-rw-r--r--drivers/md/dm-vdo/indexer/funnel-requestqueue.c279
-rw-r--r--drivers/md/dm-vdo/indexer/funnel-requestqueue.h31
-rw-r--r--drivers/md/dm-vdo/indexer/geometry.c201
-rw-r--r--drivers/md/dm-vdo/indexer/geometry.h140
-rw-r--r--drivers/md/dm-vdo/indexer/hash-utils.h66
-rw-r--r--drivers/md/dm-vdo/indexer/index-layout.c1765
-rw-r--r--drivers/md/dm-vdo/indexer/index-layout.h43
-rw-r--r--drivers/md/dm-vdo/indexer/index-page-map.c173
-rw-r--r--drivers/md/dm-vdo/indexer/index-page-map.h50
-rw-r--r--drivers/md/dm-vdo/indexer/index-session.c739
-rw-r--r--drivers/md/dm-vdo/indexer/index-session.h85
-rw-r--r--drivers/md/dm-vdo/indexer/index.c1388
-rw-r--r--drivers/md/dm-vdo/indexer/index.h83
-rw-r--r--drivers/md/dm-vdo/indexer/indexer.h353
-rw-r--r--drivers/md/dm-vdo/indexer/io-factory.c415
-rw-r--r--drivers/md/dm-vdo/indexer/io-factory.h64
-rw-r--r--drivers/md/dm-vdo/indexer/open-chapter.c426
-rw-r--r--drivers/md/dm-vdo/indexer/open-chapter.h79
-rw-r--r--drivers/md/dm-vdo/indexer/radix-sort.c330
-rw-r--r--drivers/md/dm-vdo/indexer/radix-sort.h26
-rw-r--r--drivers/md/dm-vdo/indexer/sparse-cache.c624
-rw-r--r--drivers/md/dm-vdo/indexer/sparse-cache.h46
-rw-r--r--drivers/md/dm-vdo/indexer/volume-index.c1283
-rw-r--r--drivers/md/dm-vdo/indexer/volume-index.h193
-rw-r--r--drivers/md/dm-vdo/indexer/volume.c1693
-rw-r--r--drivers/md/dm-vdo/indexer/volume.h172
-rw-r--r--drivers/md/dm-vdo/int-map.c707
-rw-r--r--drivers/md/dm-vdo/int-map.h39
-rw-r--r--drivers/md/dm-vdo/io-submitter.c477
-rw-r--r--drivers/md/dm-vdo/io-submitter.h47
-rw-r--r--drivers/md/dm-vdo/logger.c239
-rw-r--r--drivers/md/dm-vdo/logger.h100
-rw-r--r--drivers/md/dm-vdo/logical-zone.c373
-rw-r--r--drivers/md/dm-vdo/logical-zone.h89
-rw-r--r--drivers/md/dm-vdo/memory-alloc.c438
-rw-r--r--drivers/md/dm-vdo/memory-alloc.h162
-rw-r--r--drivers/md/dm-vdo/message-stats.c432
-rw-r--r--drivers/md/dm-vdo/message-stats.h13
-rw-r--r--drivers/md/dm-vdo/murmurhash3.c175
-rw-r--r--drivers/md/dm-vdo/murmurhash3.h15
-rw-r--r--drivers/md/dm-vdo/numeric.h78
-rw-r--r--drivers/md/dm-vdo/packer.c780
-rw-r--r--drivers/md/dm-vdo/packer.h122
-rw-r--r--drivers/md/dm-vdo/permassert.c26
-rw-r--r--drivers/md/dm-vdo/permassert.h45
-rw-r--r--drivers/md/dm-vdo/physical-zone.c644
-rw-r--r--drivers/md/dm-vdo/physical-zone.h115
-rw-r--r--drivers/md/dm-vdo/priority-table.c224
-rw-r--r--drivers/md/dm-vdo/priority-table.h47
-rw-r--r--drivers/md/dm-vdo/recovery-journal.c1762
-rw-r--r--drivers/md/dm-vdo/recovery-journal.h316
-rw-r--r--drivers/md/dm-vdo/repair.c1756
-rw-r--r--drivers/md/dm-vdo/repair.h14
-rw-r--r--drivers/md/dm-vdo/slab-depot.c5101
-rw-r--r--drivers/md/dm-vdo/slab-depot.h601
-rw-r--r--drivers/md/dm-vdo/statistics.h278
-rw-r--r--drivers/md/dm-vdo/status-codes.c94
-rw-r--r--drivers/md/dm-vdo/status-codes.h86
-rw-r--r--drivers/md/dm-vdo/string-utils.c22
-rw-r--r--drivers/md/dm-vdo/string-utils.h23
-rw-r--r--drivers/md/dm-vdo/thread-device.c34
-rw-r--r--drivers/md/dm-vdo/thread-device.h20
-rw-r--r--drivers/md/dm-vdo/thread-registry.c93
-rw-r--r--drivers/md/dm-vdo/thread-registry.h32
-rw-r--r--drivers/md/dm-vdo/thread-utils.c108
-rw-r--r--drivers/md/dm-vdo/thread-utils.h20
-rw-r--r--drivers/md/dm-vdo/time-utils.h28
-rw-r--r--drivers/md/dm-vdo/types.h393
-rw-r--r--drivers/md/dm-vdo/vdo.c1730
-rw-r--r--drivers/md/dm-vdo/vdo.h362
-rw-r--r--drivers/md/dm-vdo/vio.c500
-rw-r--r--drivers/md/dm-vdo/vio.h199
-rw-r--r--drivers/md/dm-vdo/wait-queue.c205
-rw-r--r--drivers/md/dm-vdo/wait-queue.h138
-rw-r--r--drivers/md/dm-verity-fec.c21
-rw-r--r--drivers/md/dm-verity-target.c173
-rw-r--r--drivers/md/dm-verity.h15
-rw-r--r--drivers/md/dm-writecache.c10
-rw-r--r--drivers/md/dm-zoned-metadata.c5
-rw-r--r--drivers/md/dm.c55
-rw-r--r--drivers/md/md-bitmap.c18
-rw-r--r--drivers/md/md-linear.h17
-rw-r--r--drivers/md/md-multipath.h32
-rw-r--r--drivers/md/md.c482
-rw-r--r--drivers/md/md.h79
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c2
-rw-r--r--drivers/md/raid0.c42
-rw-r--r--drivers/md/raid1-10.c69
-rw-r--r--drivers/md/raid1.c601
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c159
-rw-r--r--drivers/md/raid5-ppl.c3
-rw-r--r--drivers/md/raid5.c302
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c2
-rw-r--r--drivers/media/i2c/ov64a40.c2
-rw-r--r--drivers/media/i2c/tc358743.c7
-rw-r--r--drivers/media/i2c/thp7312.c2
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c14
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_spi.c48
-rw-r--r--drivers/media/platform/qcom/venus/core.c12
-rw-r--r--drivers/media/platform/qcom/venus/core.h7
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c48
-rw-r--r--drivers/media/rc/bpf-lirc.c2
-rw-r--r--drivers/media/usb/msi2500/msi2500.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-spi.c4
-rw-r--r--drivers/memory/emif.c65
-rw-r--r--drivers/memory/stm32-fmc2-ebi.c729
-rw-r--r--drivers/memory/tegra/tegra234.c48
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/memstick/core/ms_block.c14
-rw-r--r--drivers/memstick/core/mspro_block.c15
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/ac100.c2
-rw-r--r--drivers/mfd/altera-sysmgr.c4
-rw-r--r--drivers/mfd/as3711.c2
-rw-r--r--drivers/mfd/as3722.c2
-rw-r--r--drivers/mfd/axp20x.c4
-rw-r--r--drivers/mfd/bcm590xx.c4
-rw-r--r--drivers/mfd/bd9571mwv.c4
-rw-r--r--drivers/mfd/cros_ec_dev.c18
-rw-r--r--drivers/mfd/cs42l43-i2c.c15
-rw-r--r--drivers/mfd/cs42l43-sdw.c15
-rw-r--r--drivers/mfd/cs42l43.c124
-rw-r--r--drivers/mfd/cs42l43.h10
-rw-r--r--drivers/mfd/da9052-core.c2
-rw-r--r--drivers/mfd/da9055-core.c2
-rw-r--r--drivers/mfd/da9062-core.c4
-rw-r--r--drivers/mfd/da9063-i2c.c2
-rw-r--r--drivers/mfd/da9150-core.c2
-rw-r--r--drivers/mfd/intel-lpss-pci.c28
-rw-r--r--drivers/mfd/intel-lpss.c9
-rw-r--r--drivers/mfd/intel-lpss.h14
-rw-r--r--drivers/mfd/kempld-core.c37
-rw-r--r--drivers/mfd/khadas-mcu.c2
-rw-r--r--drivers/mfd/lochnagar-i2c.c4
-rw-r--r--drivers/mfd/lpc_ich.c3
-rw-r--r--drivers/mfd/mc13xxx-core.c22
-rw-r--r--drivers/mfd/mcp-core.c2
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/mt6397-core.c3
-rw-r--r--drivers/mfd/omap-usb-host.c2
-rw-r--r--drivers/mfd/rave-sp.c2
-rw-r--r--drivers/mfd/rc5t583.c2
-rw-r--r--drivers/mfd/rk8xx-core.c2
-rw-r--r--drivers/mfd/rk8xx-spi.c2
-rw-r--r--drivers/mfd/rn5t618.c2
-rw-r--r--drivers/mfd/rohm-bd71828.c4
-rw-r--r--drivers/mfd/rohm-bd718x7.c2
-rw-r--r--drivers/mfd/rohm-bd9576.c2
-rw-r--r--drivers/mfd/rsmu_i2c.c2
-rw-r--r--drivers/mfd/si476x-prop.c2
-rw-r--r--drivers/mfd/stmfx.c2
-rw-r--r--drivers/mfd/stpmic1.c2
-rw-r--r--drivers/mfd/syscon.c4
-rw-r--r--drivers/mfd/twl-core.c28
-rw-r--r--drivers/mfd/twl4030-power.c3
-rw-r--r--drivers/mfd/wm5102-tables.c2
-rw-r--r--drivers/mfd/wm5110-tables.c2
-rw-r--r--drivers/mfd/wm831x-auxadc.c43
-rw-r--r--drivers/mfd/wm8350-regmap.c2
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/mfd/wm97xx-core.c6
-rw-r--r--drivers/misc/fastrpc.c10
-rw-r--r--drivers/misc/gehc-achc.c8
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c21
-rw-r--r--drivers/misc/lkdtm/bugs.c3
-rw-r--r--drivers/misc/lkdtm/core.c22
-rw-r--r--drivers/misc/lkdtm/heap.c2
-rw-r--r--drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c8
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/vsc-tp.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.c10
-rw-r--r--drivers/mmc/core/block.c24
-rw-r--r--drivers/mmc/core/bus.c4
-rw-r--r--drivers/mmc/core/bus.h2
-rw-r--r--drivers/mmc/core/host.c11
-rw-r--r--drivers/mmc/core/mmc.c4
-rw-r--r--drivers/mmc/core/queue.c98
-rw-r--r--drivers/mmc/core/sd.c2
-rw-r--r--drivers/mmc/core/sd.h2
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/core/sdio_bus.c2
-rw-r--r--drivers/mmc/host/Kconfig9
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/davinci_mmc.c59
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c1
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c1
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798mv200.c251
-rw-r--r--drivers/mmc/host/dw_mmc.c1
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-clkc.c43
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c13
-rw-r--r--drivers/mmc/host/mmc_spi.c36
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c24
-rw-r--r--drivers/mmc/host/moxart-mmc.c90
-rw-r--r--drivers/mmc/host/mvsdio.c71
-rw-r--r--drivers/mmc/host/mxcmmc.c53
-rw-r--r--drivers/mmc/host/omap.c53
-rw-r--r--drivers/mmc/host/renesas_sdhi.h3
-rw-r--r--drivers/mmc/host/sdhci-esdhc-mcf.c12
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c2
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c66
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c48
-rw-r--r--drivers/mmc/host/sh_mmcif.c114
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c6
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c4
-rw-r--r--drivers/mtd/devices/block2mtd.c46
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c12
-rw-r--r--drivers/mtd/mtdcore.c1
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c13
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c6
-rw-r--r--drivers/mtd/ubi/block.c6
-rw-r--r--drivers/net/amt.c10
-rw-r--r--drivers/net/arcnet/arcnet.c1
-rw-r--r--drivers/net/bareudp.c25
-rw-r--r--drivers/net/bonding/bond_3ad.c165
-rw-r--r--drivers/net/bonding/bond_main.c58
-rw-r--r--drivers/net/bonding/bond_netlink.c16
-rw-r--r--drivers/net/bonding/bond_options.c28
-rw-r--r--drivers/net/can/Kconfig3
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/esd/Kconfig12
-rw-r--r--drivers/net/can/esd/Makefile7
-rw-r--r--drivers/net/can/esd/esd_402_pci-core.c514
-rw-r--r--drivers/net/can/esd/esdacc.c764
-rw-r--r--drivers/net/can/esd/esdacc.h356
-rw-r--r--drivers/net/can/kvaser_pciefd.c62
-rw-r--r--drivers/net/can/m_can/m_can.c579
-rw-r--r--drivers/net/can/m_can/m_can.h35
-rw-r--r--drivers/net/can/m_can/m_can_pci.c1
-rw-r--r--drivers/net/can/m_can/m_can_platform.c5
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c33
-rw-r--r--drivers/net/can/softing/softing_fw.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c2
-rw-r--r--drivers/net/can/usb/Kconfig1
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c3
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/can/xilinx_can.c169
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/b53/b53_common.c42
-rw-r--r--drivers/net/dsa/b53/b53_priv.h7
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c404
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h1
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c112
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h2
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c5
-rw-r--r--drivers/net/dsa/mt7530-mdio.c7
-rw-r--r--drivers/net/dsa/mt7530.c570
-rw-r--r--drivers/net/dsa/mt7530.h38
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c35
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c3
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c19
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c4
-rw-r--r--drivers/net/dsa/qca/qca8k.h4
-rw-r--r--drivers/net/dsa/realtek/Kconfig20
-rw-r--r--drivers/net/dsa/realtek/Makefile13
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c205
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.h48
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c279
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.h48
-rw-r--r--drivers/net/dsa/realtek/realtek.h14
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c132
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c22
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c119
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c335
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.h24
-rw-r--r--drivers/net/dummy.c11
-rw-r--r--drivers/net/ethernet/Kconfig3
-rw-r--r--drivers/net/ethernet/adi/Kconfig1
-rw-r--r--drivers/net/ethernet/adi/adin1110.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c323
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c49
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h39
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c181
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.c1
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c10
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c30
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c95
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h4
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c8
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c22
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c47
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c25
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c96
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h25
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c12
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c211
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c50
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c921
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h74
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c464
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c11
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c54
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c14
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.c3
-rw-r--r--drivers/net/ethernet/ec_bhf.c1
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c36
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c4
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c148
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c18
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve.h171
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c50
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h20
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h18
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c62
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c928
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c135
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c159
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c128
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c108
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c48
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h1
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig9
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h93
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c97
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c569
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c144
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c183
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c95
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c309
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c276
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c229
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c31
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h146
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c39
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c2280
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h70
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c43
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c27
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile1
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h10
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c25
-rw-r--r--drivers/net/ethernet/intel/igc/igc_leds.c280
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c54
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c70
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c155
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c262
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h112
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c70
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c118
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c242
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h189
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c66
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c294
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig19
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c489
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c500
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h160
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c273
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c1231
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h334
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c430
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h166
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h154
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h162
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c510
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h224
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c330
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h276
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h617
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c186
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c524
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c734
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c168
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c327
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c8
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c4
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c5
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c88
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c19
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c115
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h90
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_fw.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c379
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h23
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c120
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c18
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c945
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c64
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.h16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k_common.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k_common.h29
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c21
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.h15
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c71
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h22
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c17
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169_leds.c145
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c267
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c7
-rw-r--r--drivers/net/ethernet/renesas/Kconfig1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h60
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1185
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c1
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/siena/tx_common.c5
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c5
-rw-r--r--drivers/net/ethernet/sfc/tx_tso.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c1
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c1
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c87
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c20
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c269
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h7
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c141
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c82
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h17
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c3
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c4
-rw-r--r--drivers/net/geneve.c58
-rw-r--r--drivers/net/gtp.c53
-rw-r--r--drivers/net/ieee802154/at86rf230.c5
-rw-r--r--drivers/net/ieee802154/ca8210.c12
-rw-r--r--drivers/net/ieee802154/mcr20a.c5
-rw-r--r--drivers/net/ieee802154/mrf24j40.c4
-rw-r--r--drivers/net/ipa/ipa.h5
-rw-r--r--drivers/net/ipa/ipa_cmd.c6
-rw-r--r--drivers/net/ipa/ipa_endpoint.c29
-rw-r--r--drivers/net/ipa/ipa_interrupt.c119
-rw-r--r--drivers/net/ipa/ipa_interrupt.h30
-rw-r--r--drivers/net/ipa/ipa_main.c60
-rw-r--r--drivers/net/ipa/ipa_mem.c37
-rw-r--r--drivers/net/ipa/ipa_mem.h5
-rw-r--r--drivers/net/ipa/ipa_modem.c110
-rw-r--r--drivers/net/ipa/ipa_power.c108
-rw-r--r--drivers/net/ipa/ipa_power.h29
-rw-r--r--drivers/net/ipa/ipa_qmi.c10
-rw-r--r--drivers/net/ipa/ipa_reg.c8
-rw-r--r--drivers/net/ipa/ipa_reg.h4
-rw-r--r--drivers/net/ipa/ipa_smp2p.c33
-rw-r--r--drivers/net/ipa/ipa_smp2p.h7
-rw-r--r--drivers/net/ipa/ipa_table.c18
-rw-r--r--drivers/net/ipa/ipa_uc.c9
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/macsec.c12
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c94
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c109
-rw-r--r--drivers/net/mdio/of_mdio.c79
-rw-r--r--drivers/net/netconsole.c365
-rw-r--r--drivers/net/netdevsim/bus.c149
-rw-r--r--drivers/net/netdevsim/netdev.c53
-rw-r--r--drivers/net/netdevsim/netdevsim.h3
-rw-r--r--drivers/net/netkit.c2
-rw-r--r--drivers/net/nlmon.c24
-rw-r--r--drivers/net/pcs/pcs-lynx.c1
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c1
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c5
-rw-r--r--drivers/net/pcs/pcs-xpcs.c18
-rw-r--r--drivers/net/phy/Kconfig8
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/adin1100.c55
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c103
-rw-r--r--drivers/net/phy/at803x.c2432
-rw-r--r--drivers/net/phy/broadcom.c3
-rw-r--r--drivers/net/phy/dp83822.c211
-rw-r--r--drivers/net/phy/dp83867.c22
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c640
-rw-r--r--drivers/net/phy/marvell-88x2222.c2
-rw-r--r--drivers/net/phy/marvell.c7
-rw-r--r--drivers/net/phy/mdio_bus.c48
-rw-r--r--drivers/net/phy/micrel.c109
-rw-r--r--drivers/net/phy/mxl-gpy.c20
-rw-r--r--drivers/net/phy/phy-c45.c137
-rw-r--r--drivers/net/phy/phy.c61
-rw-r--r--drivers/net/phy/phy_device.c208
-rw-r--r--drivers/net/phy/phylink.c8
-rw-r--r--drivers/net/phy/qcom/Kconfig30
-rw-r--r--drivers/net/phy/qcom/Makefile6
-rw-r--r--drivers/net/phy/qcom/at803x.c1106
-rw-r--r--drivers/net/phy/qcom/qca807x.c849
-rw-r--r--drivers/net/phy/qcom/qca808x.c663
-rw-r--r--drivers/net/phy/qcom/qca83xx.c275
-rw-r--r--drivers/net/phy/qcom/qcom-phy-lib.c676
-rw-r--r--drivers/net/phy/qcom/qcom.h243
-rw-r--r--drivers/net/phy/realtek.c48
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c20
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c35
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/ax88179_178a.c20
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/lan78xx.c12
-rw-r--r--drivers/net/usb/r8152.c49
-rw-r--r--drivers/net/usb/smsc95xx.c5
-rw-r--r--drivers/net/usb/sr9800.c4
-rw-r--r--drivers/net/usb/usbnet.c13
-rw-r--r--drivers/net/veth.c117
-rw-r--r--drivers/net/vsockmon.c19
-rw-r--r--drivers/net/vxlan/vxlan_core.c68
-rw-r--r--drivers/net/wan/Kconfig12
-rw-r--r--drivers/net/wan/Makefile1
-rw-r--r--drivers/net/wan/framer/framer-core.c30
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c6
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c797
-rw-r--r--drivers/net/wireguard/receive.c2
-rw-r--r--drivers/net/wireless/admtek/adm8211.c4
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h12
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h62
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c108
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h42
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c20
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c19
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c1202
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c73
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c62
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c267
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c303
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h151
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c270
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h84
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c25
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c166
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c30
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c171
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h33
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c415
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c15
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c33
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h55
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1309
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c52
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c142
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h23
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c94
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c429
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h35
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c13
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h116
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h29
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c330
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h202
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_aic.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c4
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h16
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c20
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ht.c6
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c76
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c46
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c152
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h60
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c116
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h127
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c617
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h216
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c500
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h199
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c427
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h210
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c132
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c159
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c340
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c234
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c162
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h97
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/module.c10
-rw-r--r--drivers/net/wireless/intersil/p54/main.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c13
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c106
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c55
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c212
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h94
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_regs.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c52
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c213
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c16
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c110
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c93
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h6
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c81
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c40
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h11
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h28
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c33
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c588
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c109
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c195
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h43
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c44
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c40
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c40
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c61
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h109
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c646
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c393
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h21
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c382
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h362
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse_be.c142
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c2496
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h1532
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c341
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h93
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c19
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c363
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c215
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c121
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c1105
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h113
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c331
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h572
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c162
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c72
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c78
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c82
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.c142
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c81
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c1773
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c378
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c50
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c12
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c19
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_sdio.c42
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c75
-rw-r--r--drivers/net/wireless/st/cw1200/main.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c9
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c147
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h5
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_def.h2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c4
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c5
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c47
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h18
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c14
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c103
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h14
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c110
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h10
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c115
-rw-r--r--drivers/net/wwan/t7xx/t7xx_reg.h24
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c132
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h1
-rw-r--r--drivers/net/wwan/wwan_core.c36
-rw-r--r--drivers/net/wwan/wwan_hwsim.c16
-rw-r--r--drivers/ntb/core.c8
-rw-r--r--drivers/nvdimm/btt.c14
-rw-r--r--drivers/nvdimm/pmem.c14
-rw-r--r--drivers/nvme/host/apple.c2
-rw-r--r--drivers/nvme/host/core.c458
-rw-r--r--drivers/nvme/host/fabrics.c22
-rw-r--r--drivers/nvme/host/multipath.c17
-rw-r--r--drivers/nvme/host/nvme.h12
-rw-r--r--drivers/nvme/host/rdma.c14
-rw-r--r--drivers/nvme/host/sysfs.c7
-rw-r--r--drivers/nvme/host/tcp.c7
-rw-r--r--drivers/nvme/host/zns.c24
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/configfs.c28
-rw-r--r--drivers/nvme/target/core.c18
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd.c5
-rw-r--r--drivers/nvme/target/fcloop.c17
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c16
-rw-r--r--drivers/nvme/target/nvmet.h8
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/rdma.c10
-rw-r--r--drivers/nvme/target/tcp.c4
-rw-r--r--drivers/nvme/target/zns.c5
-rw-r--r--drivers/of/property.c2
-rw-r--r--drivers/opp/core.c1
-rw-r--r--drivers/opp/debugfs.c14
-rw-r--r--drivers/pci/Kconfig5
-rw-r--r--drivers/pci/Makefile7
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c14
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h6
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c630
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c12
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c5
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c21
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c42
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c15
-rw-r--r--drivers/pci/controller/pci-hyperv.c3
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c14
-rw-r--r--drivers/pci/devres.c448
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c21
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c6
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c21
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c25
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c25
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c20
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c65
-rw-r--r--drivers/pci/iomap.c177
-rw-r--r--drivers/pci/irq.c204
-rw-r--r--drivers/pci/mmap.c29
-rw-r--r--drivers/pci/msi/irqdomain.c2
-rw-r--r--drivers/pci/p2pdma.c2
-rw-r--r--drivers/pci/pci-driver.c23
-rw-r--r--drivers/pci/pci-sysfs.c167
-rw-r--r--drivers/pci/pci.c502
-rw-r--r--drivers/pci/pci.h60
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer.c20
-rw-r--r--drivers/pci/pcie/aspm.c268
-rw-r--r--drivers/pci/pcie/dpc.c76
-rw-r--r--drivers/pci/pcie/err.c20
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/pci/probe.c66
-rw-r--r--drivers/pci/quirks.c11
-rw-r--r--drivers/pci/setup-irq.c64
-rw-r--r--drivers/pci/switch/switchtec.c4
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c6
-rw-r--r--drivers/perf/amlogic/meson_g12_ddr_pmu.c6
-rw-r--r--drivers/perf/arm-cci.c8
-rw-r--r--drivers/perf/arm-ccn.c6
-rw-r--r--drivers/perf/arm-cmn.c14
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c159
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.h1
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c6
-rw-r--r--drivers/perf/arm_dmc620_pmu.c6
-rw-r--r--drivers/perf/arm_dsu_pmu.c6
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c10
-rw-r--r--drivers/perf/arm_spe_pmu.c5
-rw-r--r--drivers/perf/cxl_pmu.c10
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c5
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c6
-rw-r--r--drivers/perf/hisilicon/hisi_pcie_pmu.c102
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_uc_pmu.c42
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c5
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c6
-rw-r--r--drivers/perf/qcom_l2_pmu.c5
-rw-r--r--drivers/perf/riscv_pmu.c18
-rw-r--r--drivers/perf/riscv_pmu_legacy.c10
-rw-r--r--drivers/perf/riscv_pmu_sbi.c8
-rw-r--r--drivers/perf/starfive_starlink_pmu.c642
-rw-r--r--drivers/perf/thunderx2_pmu.c5
-rw-r--r--drivers/perf/xgene_pmu.c6
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c166
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c16
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c10
-rw-r--r--drivers/pinctrl/Kconfig18
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs42l43.c18
-rw-r--r--drivers/pinctrl/core.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c6
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7981.c24
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7986.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8186.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8192.c1
-rw-r--r--drivers/pinctrl/nomadik/Kconfig8
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c955
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.h180
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c2
-rw-r--r--drivers/pinctrl/pinctrl-aw9523.c1119
-rw-r--r--drivers/pinctrl/pinctrl-da9062.c7
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c15
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c1
-rw-r--r--drivers/pinctrl/pinctrl-st.c3
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c8
-rw-r--r--drivers/pinctrl/pinmux.c6
-rw-r--r--drivers/pinctrl/qcom/Kconfig2
-rw-r--r--drivers/pinctrl/renesas/Kconfig7
-rw-r--r--drivers/pinctrl/renesas/Makefile1
-rw-r--r--drivers/pinctrl/renesas/core.c14
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c14
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779h0.c3967
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c790
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp257.c2
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c8
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c14
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c267
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c14
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c7
-rw-r--r--drivers/platform/x86/Kconfig7
-rw-r--r--drivers/platform/x86/acer-wmi.c17
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/amd/Kconfig2
-rw-r--r--drivers/platform/x86/amd/hsmp.c584
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c136
-rw-r--r--drivers/platform/x86/amd/pmf/core.c32
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h98
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c145
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c97
-rw-r--r--drivers/platform/x86/asus-wmi.c84
-rw-r--r--drivers/platform/x86/dell/Kconfig3
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c2
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c1
-rw-r--r--drivers/platform/x86/dell/dell-wmi-privacy.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c2
-rw-r--r--drivers/platform/x86/firmware_attributes_class.c4
-rw-r--r--drivers/platform/x86/firmware_attributes_class.h2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c117
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c2
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c71
-rw-r--r--drivers/platform/x86/huawei-wmi.c1
-rw-r--r--drivers/platform/x86/ibm_rtl.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c4
-rw-r--r--drivers/platform/x86/intel/ifs/load.c2
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c101
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c2
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/core.c47
-rw-r--r--drivers/platform/x86/intel/pmc/core.h7
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c40
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c4
-rw-r--r--drivers/platform/x86/intel/tpmi.c9
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c2
-rw-r--r--drivers/platform/x86/intel/vbtn.c3
-rw-r--r--drivers/platform/x86/intel/vsec.c5
-rw-r--r--drivers/platform/x86/intel/wmi/sbl-fw-update.c1
-rw-r--r--drivers/platform/x86/intel/wmi/thunderbolt.c1
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c2
-rw-r--r--drivers/platform/x86/intel_scu_pcidrv.c1
-rw-r--r--drivers/platform/x86/intel_scu_wdt.c1
-rw-r--r--drivers/platform/x86/p2sb.c25
-rw-r--r--drivers/platform/x86/pmc_atom.c79
-rw-r--r--drivers/platform/x86/serdev_helpers.h80
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c28
-rw-r--r--drivers/platform/x86/silicom-platform.c7
-rw-r--r--drivers/platform/x86/think-lmi.c22
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c258
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c38
-rw-r--r--drivers/platform/x86/wmi-bmof.c1
-rw-r--r--drivers/platform/x86/wmi.c226
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c38
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c1
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c4
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h1
-rw-r--r--drivers/pmdomain/arm/scmi_perf_domain.c3
-rw-r--r--drivers/pmdomain/core.c141
-rw-r--r--drivers/pmdomain/imx/imx8m-blk-ctrl.c9
-rw-r--r--drivers/pmdomain/imx/imx8mp-blk-ctrl.c19
-rw-r--r--drivers/pmdomain/imx/scu-pd.c2
-rw-r--r--drivers/pmdomain/mediatek/mtk-scpsys.c4
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c8
-rw-r--r--drivers/pmdomain/qcom/rpmpd.c96
-rw-r--r--drivers/pmdomain/renesas/Kconfig4
-rw-r--r--drivers/pmdomain/renesas/Makefile1
-rw-r--r--drivers/pmdomain/renesas/r8a779a0-sysc.c12
-rw-r--r--drivers/pmdomain/renesas/r8a779f0-sysc.c12
-rw-r--r--drivers/pmdomain/renesas/r8a779g0-sysc.c12
-rw-r--r--drivers/pmdomain/renesas/r8a779h0-sysc.c54
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.c17
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.h1
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c4
-rw-r--r--drivers/pmdomain/tegra/powergate-bpmp.c2
-rw-r--r--drivers/pmdomain/ti/omap_prm.c2
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c2
-rw-r--r--drivers/pmdomain/xilinx/zynqmp-pm-domains.c2
-rw-r--r--drivers/power/reset/as3722-poweroff.c30
-rw-r--r--drivers/power/reset/atc260x-poweroff.c55
-rw-r--r--drivers/power/reset/axxia-reset.c16
-rw-r--r--drivers/power/reset/brcm-kona-reset.c11
-rw-r--r--drivers/power/reset/gemini-poweroff.c16
-rw-r--r--drivers/power/reset/msm-poweroff.c21
-rw-r--r--drivers/power/reset/mt6323-poweroff.c26
-rw-r--r--drivers/power/reset/regulator-poweroff.c36
-rw-r--r--drivers/power/reset/restart-poweroff.c25
-rw-r--r--drivers/power/reset/rmobile-reset.c38
-rw-r--r--drivers/power/reset/syscon-poweroff.c66
-rw-r--r--drivers/power/reset/tps65086-restart.c58
-rw-r--r--drivers/power/reset/xgene-reboot.c25
-rw-r--r--drivers/power/supply/Kconfig1
-rw-r--r--drivers/power/supply/ab8500_btemp.c3
-rw-r--r--drivers/power/supply/ab8500_chargalg.c3
-rw-r--r--drivers/power/supply/ab8500_charger.c3
-rw-r--r--drivers/power/supply/ab8500_fg.c3
-rw-r--r--drivers/power/supply/apm_power.c3
-rw-r--r--drivers/power/supply/axp20x_usb_power.c147
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c18
-rw-r--r--drivers/power/supply/bq2415x_charger.c10
-rw-r--r--drivers/power/supply/bq27xxx_battery.c56
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c50
-rw-r--r--drivers/power/supply/da9030_battery.c6
-rw-r--r--drivers/power/supply/da9052-battery.c4
-rw-r--r--drivers/power/supply/da9150-charger.c72
-rw-r--r--drivers/power/supply/ds2760_battery.c4
-rw-r--r--drivers/power/supply/goldfish_battery.c24
-rw-r--r--drivers/power/supply/lp8727_charger.c35
-rw-r--r--drivers/power/supply/lp8788-charger.c21
-rw-r--r--drivers/power/supply/max14577_charger.c8
-rw-r--r--drivers/power/supply/max77693_charger.c10
-rw-r--r--drivers/power/supply/max8925_power.c37
-rw-r--r--drivers/power/supply/mm8013.c13
-rw-r--r--drivers/power/supply/pcf50633-charger.c23
-rw-r--r--drivers/power/supply/power_supply.h6
-rw-r--r--drivers/power/supply/power_supply_core.c65
-rw-r--r--drivers/power/supply/power_supply_sysfs.c40
-rw-r--r--drivers/power/supply/rt5033_battery.c14
-rw-r--r--drivers/power/supply/rx51_battery.c57
-rw-r--r--drivers/power/supply/tps65090-charger.c18
-rw-r--r--drivers/power/supply/twl4030_madc_battery.c59
-rw-r--r--drivers/power/supply/wm831x_backup.c13
-rw-r--r--drivers/power/supply/wm831x_power.c24
-rw-r--r--drivers/power/supply/wm8350_power.c30
-rw-r--r--drivers/powercap/dtpm.c2
-rw-r--r--drivers/powercap/dtpm_cpu.c43
-rw-r--r--drivers/powercap/dtpm_devfreq.c34
-rw-r--r--drivers/powercap/intel_rapl_common.c38
-rw-r--r--drivers/powercap/intel_rapl_msr.c8
-rw-r--r--drivers/powercap/intel_rapl_tpmi.c15
-rw-r--r--drivers/ptp/Kconfig12
-rw-r--r--drivers/ptp/Makefile1
-rw-r--r--drivers/ptp/ptp_clock.c66
-rw-r--r--drivers/ptp/ptp_fc3.c1014
-rw-r--r--drivers/ptp/ptp_fc3.h45
-rw-r--r--drivers/ptp/ptp_kvm_common.c10
-rw-r--r--drivers/ptp/ptp_kvm_x86.c4
-rw-r--r--drivers/ptp/ptp_ocp.c311
-rw-r--r--drivers/ptp/ptp_private.h2
-rw-r--r--drivers/ptp/ptp_sysfs.c13
-rw-r--r--drivers/ptp/ptp_vclock.c2
-rw-r--r--drivers/pwm/core.c724
-rw-r--r--drivers/pwm/pwm-ab8500.c36
-rw-r--r--drivers/pwm/pwm-apple.c18
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c42
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c32
-rw-r--r--drivers/pwm/pwm-atmel.c47
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c19
-rw-r--r--drivers/pwm/pwm-bcm-kona.c23
-rw-r--r--drivers/pwm/pwm-bcm2835.c22
-rw-r--r--drivers/pwm/pwm-berlin.c29
-rw-r--r--drivers/pwm/pwm-brcmstb.c17
-rw-r--r--drivers/pwm/pwm-clk.c27
-rw-r--r--drivers/pwm/pwm-clps711x.c28
-rw-r--r--drivers/pwm/pwm-crc.c22
-rw-r--r--drivers/pwm/pwm-cros-ec.c57
-rw-r--r--drivers/pwm/pwm-dwc-core.c26
-rw-r--r--drivers/pwm/pwm-dwc.c77
-rw-r--r--drivers/pwm/pwm-dwc.h14
-rw-r--r--drivers/pwm/pwm-ep93xx.c21
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c49
-rw-r--r--drivers/pwm/pwm-hibvt.c70
-rw-r--r--drivers/pwm/pwm-img.c60
-rw-r--r--drivers/pwm/pwm-imx-tpm.c44
-rw-r--r--drivers/pwm/pwm-imx1.c20
-rw-r--r--drivers/pwm/pwm-imx27.c35
-rw-r--r--drivers/pwm/pwm-intel-lgm.c17
-rw-r--r--drivers/pwm/pwm-iqs620a.c30
-rw-r--r--drivers/pwm/pwm-jz4740.c36
-rw-r--r--drivers/pwm/pwm-keembay.c17
-rw-r--r--drivers/pwm/pwm-lp3943.c17
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c34
-rw-r--r--drivers/pwm/pwm-lpc32xx.c21
-rw-r--r--drivers/pwm/pwm-lpss-pci.c10
-rw-r--r--drivers/pwm/pwm-lpss-platform.c10
-rw-r--r--drivers/pwm/pwm-lpss.c38
-rw-r--r--drivers/pwm/pwm-lpss.h1
-rw-r--r--drivers/pwm/pwm-mediatek.c38
-rw-r--r--drivers/pwm/pwm-meson.c110
-rw-r--r--drivers/pwm/pwm-microchip-core.c17
-rw-r--r--drivers/pwm/pwm-mtk-disp.c25
-rw-r--r--drivers/pwm/pwm-mxs.c32
-rw-r--r--drivers/pwm/pwm-ntxec.c14
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c47
-rw-r--r--drivers/pwm/pwm-pca9685.c161
-rw-r--r--drivers/pwm/pwm-pxa.c25
-rw-r--r--drivers/pwm/pwm-raspberrypi-poe.c20
-rw-r--r--drivers/pwm/pwm-rcar.c27
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c20
-rw-r--r--drivers/pwm/pwm-rockchip.c24
-rw-r--r--drivers/pwm/pwm-rz-mtu3.c60
-rw-r--r--drivers/pwm/pwm-samsung.c94
-rw-r--r--drivers/pwm/pwm-sifive.c30
-rw-r--r--drivers/pwm/pwm-sl28cpld.c13
-rw-r--r--drivers/pwm/pwm-spear.c18
-rw-r--r--drivers/pwm/pwm-sprd.c58
-rw-r--r--drivers/pwm/pwm-sti.c70
-rw-r--r--drivers/pwm/pwm-stm32-lp.c31
-rw-r--r--drivers/pwm/pwm-stm32.c56
-rw-r--r--drivers/pwm/pwm-stmpe.c58
-rw-r--r--drivers/pwm/pwm-sun4i.c100
-rw-r--r--drivers/pwm/pwm-sunplus.c17
-rw-r--r--drivers/pwm/pwm-tegra.c50
-rw-r--r--drivers/pwm/pwm-tiecap.c55
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c72
-rw-r--r--drivers/pwm/pwm-twl-led.c55
-rw-r--r--drivers/pwm/pwm-twl.c50
-rw-r--r--drivers/pwm/pwm-visconti.c17
-rw-r--r--drivers/pwm/pwm-vt8500.c43
-rw-r--r--drivers/pwm/pwm-xilinx.c34
-rw-r--r--drivers/pwm/sysfs.c4
-rw-r--r--drivers/ras/Kconfig13
-rw-r--r--drivers/ras/Makefile3
-rw-r--r--drivers/ras/amd/atl/Kconfig21
-rw-r--r--drivers/ras/amd/atl/Makefile18
-rw-r--r--drivers/ras/amd/atl/access.c133
-rw-r--r--drivers/ras/amd/atl/core.c225
-rw-r--r--drivers/ras/amd/atl/dehash.c500
-rw-r--r--drivers/ras/amd/atl/denormalize.c718
-rw-r--r--drivers/ras/amd/atl/internal.h306
-rw-r--r--drivers/ras/amd/atl/map.c682
-rw-r--r--drivers/ras/amd/atl/reg_fields.h606
-rw-r--r--drivers/ras/amd/atl/system.c288
-rw-r--r--drivers/ras/amd/atl/umc.c341
-rw-r--r--drivers/ras/amd/fmpm.c1013
-rw-r--r--drivers/ras/cec.c10
-rw-r--r--drivers/ras/debugfs.c8
-rw-r--r--drivers/ras/debugfs.h2
-rw-r--r--drivers/ras/ras.c31
-rw-r--r--drivers/regulator/core.c3
-rw-r--r--drivers/regulator/da9055-regulator.c48
-rw-r--r--drivers/regulator/da9121-regulator.c1
-rw-r--r--drivers/regulator/fixed-helper.c4
-rw-r--r--drivers/regulator/internal.h2
-rw-r--r--drivers/regulator/lp873x-regulator.c3
-rw-r--r--drivers/regulator/lp87565-regulator.c3
-rw-r--r--drivers/regulator/lp8788-buck.c64
-rw-r--r--drivers/regulator/max5970-regulator.c8
-rw-r--r--drivers/regulator/max8973-regulator.c36
-rw-r--r--drivers/regulator/max8997-regulator.c85
-rw-r--r--drivers/regulator/max8998.c150
-rw-r--r--drivers/regulator/mp8859.c252
-rw-r--r--drivers/regulator/pwm-regulator.c40
-rw-r--r--drivers/regulator/qcom_smd-regulator.c19
-rw-r--r--drivers/regulator/rk808-regulator.c10
-rw-r--r--drivers/regulator/userspace-consumer.c1
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c82
-rw-r--r--drivers/remoteproc/imx_rproc.c73
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c160
-rw-r--r--drivers/rtc/lib_test.c2
-rw-r--r--drivers/s390/block/dasd.c190
-rw-r--r--drivers/s390/block/dasd_3990_erp.c80
-rw-r--r--drivers/s390/block/dasd_alias.c8
-rw-r--r--drivers/s390/block/dasd_devmap.c34
-rw-r--r--drivers/s390/block/dasd_diag.c26
-rw-r--r--drivers/s390/block/dasd_eckd.c186
-rw-r--r--drivers/s390/block/dasd_eer.c7
-rw-r--r--drivers/s390/block/dasd_erp.c9
-rw-r--r--drivers/s390/block/dasd_fba.c88
-rw-r--r--drivers/s390/block/dasd_genhd.c54
-rw-r--r--drivers/s390/block/dasd_int.h37
-rw-r--r--drivers/s390/block/dasd_ioctl.c8
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c10
-rw-r--r--drivers/s390/block/scm_blk.c17
-rw-r--r--drivers/s390/char/vmur.c4
-rw-r--r--drivers/s390/char/zcore.c1
-rw-r--r--drivers/s390/cio/ccwgroup.c4
-rw-r--r--drivers/s390/cio/chsc.c4
-rw-r--r--drivers/s390/cio/chsc_sch.c20
-rw-r--r--drivers/s390/cio/cmf.c6
-rw-r--r--drivers/s390/cio/css.c4
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_ops.c6
-rw-r--r--drivers/s390/cio/scm.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c257
-rw-r--r--drivers/s390/crypto/ap_bus.h8
-rw-r--r--drivers/s390/crypto/ap_debug.h4
-rw-r--r--drivers/s390/crypto/ap_queue.c31
-rw-r--r--drivers/s390/crypto/pkey_api.c226
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c35
-rw-r--r--drivers/s390/crypto/zcrypt_api.c228
-rw-r--r--drivers/s390/crypto/zcrypt_api.h9
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c214
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h4
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c127
-rw-r--r--drivers/s390/crypto/zcrypt_error.h5
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c14
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c45
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4
-rw-r--r--drivers/scsi/scsi.c22
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/sd.c26
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c5
-rw-r--r--drivers/soc/mediatek/Kconfig9
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c191
-rw-r--r--drivers/soc/microchip/Kconfig2
-rw-r--r--drivers/soc/qcom/Kconfig9
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/apr.c2
-rw-r--r--drivers/soc/qcom/llcc-qcom.c2
-rw-r--r--drivers/soc/qcom/pmic_glink.c21
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c16
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c1
-rw-r--r--drivers/soc/qcom/qcom-pbs.c236
-rw-r--r--drivers/soc/qcom/qcom_aoss.c105
-rw-r--r--drivers/soc/qcom/smem.c11
-rw-r--r--drivers/soc/qcom/smp2p.c6
-rw-r--r--drivers/soc/qcom/socinfo.c7
-rw-r--r--drivers/soc/qcom/spm.c248
-rw-r--r--drivers/soc/qcom/trace-aoss.h48
-rw-r--r--drivers/soc/renesas/Kconfig17
-rw-r--r--drivers/soc/renesas/rcar-rst.c1
-rw-r--r--drivers/soc/renesas/renesas-soc.c8
-rw-r--r--drivers/soc/samsung/Kconfig1
-rw-r--r--drivers/soc/samsung/exynos-pmu.c235
-rw-r--r--drivers/soc/samsung/exynos-pmu.h1
-rw-r--r--drivers/soc/tegra/Kconfig5
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c118
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c23
-rw-r--r--drivers/soc/tegra/fuse/fuse.h8
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c108
-rw-r--r--drivers/soc/tegra/pmc.c87
-rw-r--r--drivers/soundwire/Makefile2
-rw-r--r--drivers/soundwire/amd_init.c235
-rw-r--r--drivers/soundwire/amd_init.h13
-rw-r--r--drivers/soundwire/amd_manager.c47
-rw-r--r--drivers/soundwire/amd_manager.h16
-rw-r--r--drivers/soundwire/dmi-quirks.c8
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-ath79.c4
-rw-r--r--drivers/spi/spi-au1550.c2
-rw-r--r--drivers/spi/spi-axi-spi-engine.c138
-rw-r--r--drivers/spi/spi-bcm2835.c27
-rw-r--r--drivers/spi/spi-bitbang.c64
-rw-r--r--drivers/spi/spi-butterfly.c6
-rw-r--r--drivers/spi/spi-cadence-quadspi.c54
-rw-r--r--drivers/spi/spi-cavium.c6
-rw-r--r--drivers/spi/spi-cavium.h2
-rw-r--r--drivers/spi/spi-cs42l43.c27
-rw-r--r--drivers/spi/spi-davinci.c6
-rw-r--r--drivers/spi/spi-dw-dma.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c15
-rw-r--r--drivers/spi/spi-fsl-lib.c14
-rw-r--r--drivers/spi/spi-geni-qcom.c2
-rw-r--r--drivers/spi/spi-gpio.c2
-rw-r--r--drivers/spi/spi-intel.c34
-rw-r--r--drivers/spi/spi-lm70llp.c6
-rw-r--r--drivers/spi/spi-loopback-test.c4
-rw-r--r--drivers/spi/spi-mem.c49
-rw-r--r--drivers/spi/spi-mt65xx.c5
-rw-r--r--drivers/spi/spi-nxp-fspi.c2
-rw-r--r--drivers/spi/spi-oc-tiny.c6
-rw-r--r--drivers/spi/spi-omap-uwire.c4
-rw-r--r--drivers/spi/spi-pci1xxxx.c510
-rw-r--r--drivers/spi/spi-pic32.c2
-rw-r--r--drivers/spi/spi-rockchip.c13
-rw-r--r--drivers/spi/spi-s3c64xx.c301
-rw-r--r--drivers/spi/spi-sh-sci.c10
-rw-r--r--drivers/spi/spi-slave-mt27xx.c2
-rw-r--r--drivers/spi/spi-stm32-qspi.c2
-rw-r--r--drivers/spi/spi-stm32.c30
-rw-r--r--drivers/spi/spi-xilinx.c4
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c2
-rw-r--r--drivers/spi/spi.c520
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/ssb/main.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c2
-rw-r--r--drivers/staging/fbtft/fbtft-core.c4
-rw-r--r--drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt2
-rw-r--r--drivers/staging/greybus/Kconfig2
-rw-r--r--drivers/staging/greybus/light.c21
-rw-r--r--drivers/staging/greybus/pwm.c133
-rw-r--r--drivers/staging/greybus/spilib.c66
-rw-r--r--drivers/staging/sm750fb/Kconfig1
-rw-r--r--drivers/staging/vt6655/device_main.c6
-rw-r--r--drivers/staging/vt6656/main_usb.c6
-rw-r--r--drivers/target/target_core_iblock.c18
-rw-r--r--drivers/target/target_core_iblock.h2
-rw-r--r--drivers/target/target_core_pscsi.c31
-rw-r--r--drivers/target/target_core_pscsi.h2
-rw-r--r--drivers/tee/optee/device.c3
-rw-r--r--drivers/tee/tee_core.c2
-rw-r--r--drivers/thermal/Kconfig11
-rw-r--r--drivers/thermal/cpufreq_cooling.c45
-rw-r--r--drivers/thermal/da9062-thermal.c2
-rw-r--r--drivers/thermal/devfreq_cooling.c49
-rw-r--r--drivers/thermal/gov_bang_bang.c2
-rw-r--r--drivers/thermal/gov_fair_share.c16
-rw-r--r--drivers/thermal/gov_power_allocator.c2
-rw-r--r--drivers/thermal/imx_thermal.c6
-rw-r--r--drivers/thermal/intel/Kconfig2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c43
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c8
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c13
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c8
-rw-r--r--drivers/thermal/intel/intel_hfi.c2
-rw-r--r--drivers/thermal/intel/intel_pch_thermal.c28
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c2
-rw-r--r--drivers/thermal/intel/intel_quark_dts_thermal.c34
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c77
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.h2
-rw-r--r--drivers/thermal/intel/intel_tcc.c12
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c49
-rw-r--r--drivers/thermal/rcar_thermal.c2
-rw-r--r--drivers/thermal/st/st_thermal.c2
-rw-r--r--drivers/thermal/thermal_core.c76
-rw-r--r--drivers/thermal/thermal_core.h2
-rw-r--r--drivers/thermal/thermal_helpers.c10
-rw-r--r--drivers/thermal/thermal_hwmon.c4
-rw-r--r--drivers/thermal/thermal_of.c37
-rw-r--r--drivers/thermal/thermal_sysfs.c38
-rw-r--r--drivers/thermal/thermal_trip.c6
-rw-r--r--drivers/thunderbolt/switch.c3
-rw-r--r--drivers/tty/hvc/Kconfig8
-rw-r--r--drivers/tty/serial/8250/8250_dw.c6
-rw-r--r--drivers/tty/serial/amba-pl011.c60
-rw-r--r--drivers/tty/serial/fsl_lpuart.c7
-rw-r--r--drivers/tty/serial/imx.c22
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c10
-rw-r--r--drivers/tty/serial/serial_port.c25
-rw-r--r--drivers/tty/serial/stm32-usart.c4
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/ufs/core/ufshcd.c4
-rw-r--r--drivers/ufs/host/ufs-qcom.c8
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c8
-rw-r--r--drivers/usb/cdns3/core.c1
-rw-r--r--drivers/usb/cdns3/drd.c13
-rw-r--r--drivers/usb/cdns3/drd.h6
-rw-r--r--drivers/usb/cdns3/host.c16
-rw-r--r--drivers/usb/core/hcd.c23
-rw-r--r--drivers/usb/core/port.c5
-rw-r--r--drivers/usb/dwc3/gadget.c5
-rw-r--r--drivers/usb/gadget/function/f_ncm.c10
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c2
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c3
-rw-r--r--drivers/usb/host/uhci-grlib.c1
-rw-r--r--drivers/usb/host/xhci-ring.c8
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c8
-rw-r--r--drivers/usb/roles/class.c29
-rw-r--r--drivers/usb/storage/isd200.c23
-rw-r--r--drivers/usb/storage/scsiglue.c7
-rw-r--r--drivers/usb/storage/uas.c7
-rw-r--r--drivers/usb/typec/altmodes/displayport.c18
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c9
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c1
-rw-r--r--drivers/vhost/net.c91
-rw-r--r--drivers/video/Kconfig9
-rw-r--r--drivers/video/Makefile7
-rw-r--r--drivers/video/backlight/Kconfig7
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/as3711_bl.c6
-rw-r--r--drivers/video/backlight/bd6107.c9
-rw-r--r--drivers/video/backlight/corgi_lcd.c1
-rw-r--r--drivers/video/backlight/da9052_bl.c1
-rw-r--r--drivers/video/backlight/gpio_backlight.c10
-rw-r--r--drivers/video/backlight/hx8357.c67
-rw-r--r--drivers/video/backlight/ktd2801-backlight.c128
-rw-r--r--drivers/video/backlight/ktz8866.c6
-rw-r--r--drivers/video/backlight/l4f00242t03.c34
-rw-r--r--drivers/video/backlight/lm3630a_bl.c42
-rw-r--r--drivers/video/backlight/lm3639_bl.c1
-rw-r--r--drivers/video/backlight/lp8788_bl.c1
-rw-r--r--drivers/video/backlight/mp3309c.c93
-rw-r--r--drivers/video/backlight/pandora_bl.c4
-rw-r--r--drivers/video/cmdline.c2
-rw-r--r--drivers/video/fbdev/Kconfig35
-rw-r--r--drivers/video/fbdev/chipsfb.c1
-rw-r--r--drivers/video/fbdev/core/Kconfig2
-rw-r--r--drivers/video/fbdev/core/fbcon.c8
-rw-r--r--drivers/video/fbdev/core/fbmem.c2
-rw-r--r--drivers/video/fbdev/efifb.c225
-rw-r--r--drivers/video/fbdev/geode/Kconfig3
-rw-r--r--drivers/video/fbdev/hyperv_fb.c2
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_spi.c26
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/fbdev/vesafb.c78
-rw-r--r--drivers/video/screen_info_generic.c146
-rw-r--r--drivers/video/screen_info_pci.c136
-rw-r--r--drivers/virt/coco/efi_secret/efi_secret.c5
-rw-r--r--drivers/watchdog/Kconfig12
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/cros_ec_wdt.c204
-rw-r--r--drivers/watchdog/s3c2410_wdt.c8
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/zorro/zorro-driver.c2
-rw-r--r--drivers/zorro/zorro.h2
3433 files changed, 342261 insertions, 53055 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 37fd6ce3bd7f..3bf5cab4b451 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -135,7 +135,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
obj-y += ufs/
obj-$(CONFIG_MEMSTICK) += memstick/
-obj-$(CONFIG_NEW_LEDS) += leds/
+obj-y += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
index 24cac4c0274b..16c3edb8c46e 100644
--- a/drivers/accel/drm_accel.c
+++ b/drivers/accel/drm_accel.c
@@ -23,7 +23,7 @@ static struct idr accel_minors_idr;
static struct dentry *accel_debugfs_root;
-static struct device_type accel_sysfs_device_minor = {
+static const struct device_type accel_sysfs_device_minor = {
.name = "accel_minor"
};
diff --git a/drivers/accel/habanalabs/common/command_submission.c b/drivers/accel/habanalabs/common/command_submission.c
index 3aa6eeef443b..39e23d625a3c 100644
--- a/drivers/accel/habanalabs/common/command_submission.c
+++ b/drivers/accel/habanalabs/common/command_submission.c
@@ -1360,9 +1360,8 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
return -EINVAL;
}
- if (!hl_device_operational(hdev, &status)) {
+ if (!hl_device_operational(hdev, &status))
return -EBUSY;
- }
if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
!hdev->supports_staged_submission) {
diff --git a/drivers/accel/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
index 01f071d52570..b1c88d1837d9 100644
--- a/drivers/accel/habanalabs/common/debugfs.c
+++ b/drivers/accel/habanalabs/common/debugfs.c
@@ -484,7 +484,7 @@ static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
- char kbuf[MMU_KBUF_SIZE];
+ char kbuf[MMU_KBUF_SIZE] = {0};
char *c;
ssize_t rc;
@@ -546,7 +546,7 @@ static ssize_t mmu_ack_error_value_write(struct file *file,
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
- char kbuf[MMU_KBUF_SIZE];
+ char kbuf[MMU_KBUF_SIZE] = {0};
ssize_t rc;
if (count > sizeof(kbuf) - 1)
@@ -1643,19 +1643,19 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hl_data64b_fops);
debugfs_create_file("set_power_state",
- 0200,
+ 0644,
root,
dev_entry,
&hl_power_fops);
debugfs_create_file("device",
- 0200,
+ 0644,
root,
dev_entry,
&hl_device_fops);
debugfs_create_file("clk_gate",
- 0200,
+ 0644,
root,
dev_entry,
&hl_clk_gate_fops);
@@ -1667,13 +1667,13 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hl_stop_on_err_fops);
debugfs_create_file("dump_security_violations",
- 0644,
+ 0400,
root,
dev_entry,
&hl_security_violations_fops);
debugfs_create_file("dump_razwi_events",
- 0644,
+ 0400,
root,
dev_entry,
&hl_razwi_check_fops);
@@ -1706,7 +1706,7 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hdev->reset_info.skip_reset_on_timeout);
debugfs_create_file("state_dump",
- 0600,
+ 0644,
root,
dev_entry,
&hl_state_dump_fops);
@@ -1724,7 +1724,7 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
- 0444,
+ 0644,
root,
entry,
&hl_debugfs_fops);
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index a73bd4be94b1..8f92445c5a90 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -55,7 +55,8 @@ static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_regi
if (is_power_of_2(prop->dram_pci_bar_size))
bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
else
- bar_base_addr = DIV_ROUND_DOWN_ULL(addr, prop->dram_pci_bar_size) *
+ bar_base_addr = region->region_base +
+ div64_u64((addr - region->region_base), prop->dram_pci_bar_size) *
prop->dram_pci_bar_size;
old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
@@ -1034,14 +1035,14 @@ static void device_early_fini(struct hl_device *hdev)
static bool is_pci_link_healthy(struct hl_device *hdev)
{
- u16 vendor_id;
+ u16 device_id;
if (!hdev->pdev)
return false;
- pci_read_config_word(hdev->pdev, PCI_VENDOR_ID, &vendor_id);
+ pci_read_config_word(hdev->pdev, PCI_DEVICE_ID, &device_id);
- return (vendor_id == PCI_VENDOR_ID_HABANALABS);
+ return (device_id == hdev->pdev->device);
}
static int hl_device_eq_heartbeat_check(struct hl_device *hdev)
@@ -1768,14 +1769,16 @@ kill_processes:
hdev->device_cpu_disabled = false;
hdev->reset_info.hard_reset_pending = false;
+ /*
+ * Put the device in an unusable state if there are 2 back to back resets due to
+ * fatal errors.
+ */
if (hdev->reset_info.reset_trigger_repeated &&
- (hdev->reset_info.prev_reset_trigger ==
- HL_DRV_RESET_FW_FATAL_ERR)) {
- /* if there 2 back to back resets from FW,
- * ensure driver puts the driver in a unusable state
- */
+ (hdev->reset_info.prev_reset_trigger == HL_DRV_RESET_FW_FATAL_ERR ||
+ hdev->reset_info.prev_reset_trigger ==
+ HL_DRV_RESET_HEARTBEAT)) {
dev_crit(hdev->dev,
- "%s Consecutive FW fatal errors received, stopping hard reset\n",
+ "%s Consecutive fatal errors, stopping hard reset\n",
dev_name(&(hdev)->pdev->dev));
rc = -EIO;
goto out_err;
@@ -2801,3 +2804,35 @@ void hl_enable_err_info_capture(struct hl_error_info *captured_err_info)
atomic_set(&captured_err_info->cs_timeout.write_enable, 1);
captured_err_info->undef_opcode.write_enable = true;
}
+
+void hl_init_cpu_for_irq(struct hl_device *hdev)
+{
+#ifdef CONFIG_NUMA
+ struct cpumask *available_mask = &hdev->irq_affinity_mask;
+ int numa_node = hdev->pdev->dev.numa_node, i;
+ static struct cpumask cpu_mask;
+
+ if (numa_node < 0)
+ return;
+
+ if (!cpumask_and(&cpu_mask, cpumask_of_node(numa_node), cpu_online_mask)) {
+ dev_err(hdev->dev, "No available affinities in current numa node\n");
+ return;
+ }
+
+ /* Remove HT siblings */
+ for_each_cpu(i, &cpu_mask)
+ cpumask_set_cpu(cpumask_first(topology_sibling_cpumask(i)), available_mask);
+#endif
+}
+
+void hl_set_irq_affinity(struct hl_device *hdev, int irq)
+{
+ if (cpumask_empty(&hdev->irq_affinity_mask)) {
+ dev_dbg(hdev->dev, "affinity mask is empty\n");
+ return;
+ }
+
+ if (irq_set_affinity_and_hint(irq, &hdev->irq_affinity_mask))
+ dev_err(hdev->dev, "Failed setting irq %d affinity\n", irq);
+}
diff --git a/drivers/accel/habanalabs/common/firmware_if.c b/drivers/accel/habanalabs/common/firmware_if.c
index 3558a6a8e192..4bd02778a970 100644
--- a/drivers/accel/habanalabs/common/firmware_if.c
+++ b/drivers/accel/habanalabs/common/firmware_if.c
@@ -501,7 +501,7 @@ int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
0, &result);
if (rc)
- dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
+ dev_err(hdev->dev, "failed to unmask event %d", event_type);
return rc;
}
@@ -540,7 +540,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
total_pkt_size, 0, &result);
if (rc)
- dev_err(hdev->dev, "failed to unmask IRQ array\n");
+ dev_err(hdev->dev, "failed to unmask event array\n");
kfree(pkt);
@@ -2718,18 +2718,20 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
}
+ rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, sizeof(struct lkd_msg_comms));
+ if (rc)
+ goto protocol_err;
+
+ if (hdev->asic_prop.support_dynamic_resereved_fw_size)
+ hdev->asic_prop.reserved_fw_mem_size =
+ le32_to_cpu(fw_loader->dynamic_loader.comm_desc.rsvd_mem_size_mb) * SZ_1M;
+
if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
struct lkd_fw_binning_info *binning_info;
- rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
- sizeof(struct lkd_msg_comms));
- if (rc)
- goto protocol_err;
-
/* read preboot version */
rc = hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
-
if (rc)
return rc;
@@ -2756,11 +2758,6 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
hdev->decoder_binning, hdev->rotator_binning);
}
- if (hdev->asic_prop.support_dynamic_resereved_fw_size) {
- hdev->asic_prop.reserved_fw_mem_size =
- le32_to_cpu(fw_loader->dynamic_loader.comm_desc.rsvd_mem_size_mb);
- }
-
return 0;
}
@@ -2795,7 +2792,7 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
if (!(hdev->fw_components & FW_TYPE_LINUX)) {
- dev_info(hdev->dev, "Skip loading Linux F/W\n");
+ dev_dbg(hdev->dev, "Skip loading Linux F/W\n");
return 0;
}
diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
index 2a900c9941fe..48f0f3eea1ef 100644
--- a/drivers/accel/habanalabs/common/habanalabs.h
+++ b/drivers/accel/habanalabs/common/habanalabs.h
@@ -443,18 +443,22 @@ enum hl_collective_mode {
* a CB handle can be provided for jobs on this queue.
* Otherwise, a CB address must be provided.
* @collective_mode: collective mode of current queue
+ * @q_dram_bd_address: PQ dram address, used when PQ need to reside in DRAM.
* @driver_only: true if only the driver is allowed to send a job to this queue,
* false otherwise.
* @binned: True if the queue is binned out and should not be used
* @supports_sync_stream: True if queue supports sync stream
+ * @dram_bd: True if the bd should be copied to dram, needed for PQ which has been allocated on dram
*/
struct hw_queue_properties {
enum hl_queue_type type;
enum queue_cb_alloc_flags cb_alloc_flags;
enum hl_collective_mode collective_mode;
+ u64 q_dram_bd_address;
u8 driver_only;
u8 binned;
u8 supports_sync_stream;
+ u8 dram_bd;
};
/**
@@ -590,8 +594,6 @@ struct hl_hints_range {
* we display to the user
* @mmu_pgt_size: MMU page tables total size.
* @mmu_pte_size: PTE size in MMU page tables.
- * @mmu_hop_table_size: MMU hop table size.
- * @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
* @dram_page_size: The DRAM physical page size.
* @cfg_size: configuration space size on SRAM.
* @sram_size: total size of SRAM.
@@ -645,10 +647,10 @@ struct hl_hints_range {
* @num_engine_cores: number of engine cpu cores.
* @max_num_of_engines: maximum number of all engines in the ASIC.
* @num_of_special_blocks: special_blocks array size.
- * @glbl_err_cause_num: global err cause number.
+ * @glbl_err_max_cause_num: global err max cause number.
* @hbw_flush_reg: register to read to generate HBW flush. value of 0 means HBW flush is
* not supported.
- * @reserved_fw_mem_size: size in MB of dram memory reserved for FW.
+ * @reserved_fw_mem_size: size of dram memory reserved for FW.
* @collective_first_sob: first sync object available for collective use
* @collective_first_mon: first monitor available for collective use
* @sync_stream_first_sob: first sync object available for sync stream use
@@ -743,8 +745,6 @@ struct asic_fixed_properties {
u32 clk_pll_index;
u32 mmu_pgt_size;
u32 mmu_pte_size;
- u32 mmu_hop_table_size;
- u32 mmu_hop0_tables_total_size;
u32 dram_page_size;
u32 cfg_size;
u32 sram_size;
@@ -779,7 +779,7 @@ struct asic_fixed_properties {
u32 num_engine_cores;
u32 max_num_of_engines;
u32 num_of_special_blocks;
- u32 glbl_err_cause_num;
+ u32 glbl_err_max_cause_num;
u32 hbw_flush_reg;
u32 reserved_fw_mem_size;
u16 collective_first_sob;
@@ -1052,6 +1052,8 @@ struct hl_encaps_signals_mgr {
* @collective_mode: collective mode of current queue
* @kernel_address: holds the queue's kernel virtual address.
* @bus_address: holds the queue's DMA address.
+ * @pq_dram_address: hold the dram address when the PQ is allocated, used when dram_bd is true in
+ * queue properites.
* @pi: holds the queue's pi value.
* @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
* @hw_queue_id: the id of the H/W queue.
@@ -1061,6 +1063,7 @@ struct hl_encaps_signals_mgr {
* @valid: is the queue valid (we have array of 32 queues, not all of them
* exist).
* @supports_sync_stream: True if queue supports sync stream
+ * @dram_bd: True if the bd should be copied to dram, needed for PQ which has been allocated on dram
*/
struct hl_hw_queue {
struct hl_cs_job **shadow_queue;
@@ -1069,6 +1072,7 @@ struct hl_hw_queue {
enum hl_collective_mode collective_mode;
void *kernel_address;
dma_addr_t bus_address;
+ u64 pq_dram_address;
u32 pi;
atomic_t ci;
u32 hw_queue_id;
@@ -1077,6 +1081,7 @@ struct hl_hw_queue {
u16 int_queue_len;
u8 valid;
u8 supports_sync_stream;
+ u8 dram_bd;
};
/**
@@ -2547,7 +2552,7 @@ struct hl_state_dump_specs {
* DEVICES
*/
-#define HL_STR_MAX 32
+#define HL_STR_MAX 64
#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_LAST + 1)
@@ -3257,6 +3262,7 @@ struct hl_reset_info {
* @clk_throttling: holds information about current/previous clock throttling events
* @captured_err_info: holds information about errors.
* @reset_info: holds current device reset information.
+ * @irq_affinity_mask: mask of available CPU cores for user and decoder interrupt handling.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
* @fw_inner_major_ver: the major of current loaded preboot inner version.
* @fw_inner_minor_ver: the minor of current loaded preboot inner version.
@@ -3446,6 +3452,8 @@ struct hl_device {
struct hl_reset_info reset_info;
+ cpumask_t irq_affinity_mask;
+
u32 *stream_master_qid_arr;
u32 fw_inner_major_ver;
u32 fw_inner_minor_ver;
@@ -3886,6 +3894,7 @@ int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_
struct hl_hr_mmu_funcs *hr_func);
int hl_mmu_if_set_funcs(struct hl_device *hdev);
void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
+void hl_mmu_v2_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
void hl_mmu_v2_hr_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
@@ -3893,6 +3902,22 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr);
u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr);
bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
+struct pgt_info *hl_mmu_dr_get_pgt_info(struct hl_ctx *ctx, u64 hop_addr);
+void hl_mmu_dr_free_hop(struct hl_ctx *ctx, u64 hop_addr);
+void hl_mmu_dr_free_pgt_node(struct hl_ctx *ctx, struct pgt_info *pgt_info);
+u64 hl_mmu_dr_get_phys_hop0_addr(struct hl_ctx *ctx);
+u64 hl_mmu_dr_get_hop0_addr(struct hl_ctx *ctx);
+void hl_mmu_dr_write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val);
+void hl_mmu_dr_write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val);
+void hl_mmu_dr_clear_pte(struct hl_ctx *ctx, u64 pte_addr);
+u64 hl_mmu_dr_get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
+void hl_mmu_dr_get_pte(struct hl_ctx *ctx, u64 hop_addr);
+int hl_mmu_dr_put_pte(struct hl_ctx *ctx, u64 hop_addr);
+u64 hl_mmu_dr_get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, bool *is_new_hop);
+u64 hl_mmu_dr_alloc_hop(struct hl_ctx *ctx);
+void hl_mmu_dr_flush(struct hl_ctx *ctx);
+int hl_mmu_dr_init(struct hl_device *hdev);
+void hl_mmu_dr_fini(struct hl_device *hdev);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst, u32 src_offset, u32 size);
@@ -4032,6 +4057,8 @@ void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_
void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info);
void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_count);
void hl_enable_err_info_capture(struct hl_error_info *captured_err_info);
+void hl_init_cpu_for_irq(struct hl_device *hdev);
+void hl_set_irq_affinity(struct hl_device *hdev, int irq);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/accel/habanalabs/common/hw_queue.c b/drivers/accel/habanalabs/common/hw_queue.c
index d0087c0ec48c..3d04a7507cce 100644
--- a/drivers/accel/habanalabs/common/hw_queue.c
+++ b/drivers/accel/habanalabs/common/hw_queue.c
@@ -84,6 +84,8 @@ void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
u32 ctl, u32 len, u64 ptr)
{
struct hl_bd *bd;
+ u64 addr;
+ int i;
bd = q->kernel_address;
bd += hl_pi_2_offset(q->pi);
@@ -91,7 +93,16 @@ void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
bd->len = cpu_to_le32(len);
bd->ptr = cpu_to_le64(ptr);
+ if (q->dram_bd)
+ for (i = 0 ; i < 2 ; i++) {
+ addr = q->pq_dram_address +
+ ((hl_pi_2_offset(q->pi) * sizeof(struct hl_bd)) + (i * sizeof(u64)));
+ hdev->asic_funcs->access_dev_mem(hdev, PCI_REGION_DRAM, addr,
+ (u64 *)(bd) + i, DEBUGFS_WRITE64);
+ }
+
q->pi = hl_queue_inc_ptr(q->pi);
+
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
}
@@ -1087,12 +1098,18 @@ int hl_hw_queues_create(struct hl_device *hdev)
q->supports_sync_stream =
asic->hw_queues_props[i].supports_sync_stream;
q->collective_mode = asic->hw_queues_props[i].collective_mode;
+ q->dram_bd = asic->hw_queues_props[i].dram_bd;
+
rc = queue_init(hdev, q, i);
if (rc) {
dev_err(hdev->dev,
"failed to initialize queue %d\n", i);
goto release_queues;
}
+
+ /* Set DRAM PQ address for the queue if it should be at DRAM */
+ if (q->dram_bd)
+ q->pq_dram_address = asic->hw_queues_props[i].q_dram_bd_address;
}
return 0;
diff --git a/drivers/accel/habanalabs/common/hwmon.c b/drivers/accel/habanalabs/common/hwmon.c
index 1ee2ee07e9ed..36b951b5f503 100644
--- a/drivers/accel/habanalabs/common/hwmon.c
+++ b/drivers/accel/habanalabs/common/hwmon.c
@@ -46,7 +46,7 @@ static u32 fixup_flags_legacy_fw(struct hl_device *hdev, enum hwmon_sensor_types
break;
default:
- dev_err(hdev->dev, "unsupported h/w sensor type %d\n", type);
+ dev_err_ratelimited(hdev->dev, "unsupported h/w sensor type %d\n", type);
flags = cpucp_flags;
break;
}
@@ -134,7 +134,7 @@ static u32 adjust_hwmon_flags(struct hl_device *hdev, enum hwmon_sensor_types ty
break;
default:
- dev_err(hdev->dev, "unsupported h/w sensor type %d\n", type);
+ dev_err_ratelimited(hdev->dev, "unsupported h/w sensor type %d\n", type);
flags = cpucp_flags;
break;
}
@@ -162,7 +162,8 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev, struct cpucp_sensor *sen
break;
if (type >= HWMON_NR_SENSOR_TYPES) {
- dev_err(hdev->dev, "Got wrong sensor type %d from device\n", type);
+ dev_err_ratelimited(hdev->dev,
+ "Got wrong sensor type %d from device\n", type);
return -EINVAL;
}
@@ -584,7 +585,7 @@ int hl_get_temperature(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to get temperature from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
@@ -611,7 +612,7 @@ int hl_set_temperature(struct hl_device *hdev,
0, NULL);
if (rc)
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to set temperature of sensor %d, error %d\n",
sensor_index, rc);
@@ -638,7 +639,7 @@ int hl_get_voltage(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to get voltage from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
@@ -667,7 +668,7 @@ int hl_get_current(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to get current from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
@@ -696,7 +697,7 @@ int hl_get_fan_speed(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to get fan speed from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
@@ -725,7 +726,7 @@ int hl_get_pwm_info(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to get pwm info from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
@@ -752,7 +753,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
0, NULL);
if (rc)
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to set pwm info to sensor %d, error %d\n",
sensor_index, rc);
}
@@ -775,7 +776,7 @@ int hl_set_voltage(struct hl_device *hdev,
0, NULL);
if (rc)
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to set voltage of sensor %d, error %d\n",
sensor_index, rc);
@@ -800,7 +801,7 @@ int hl_set_current(struct hl_device *hdev,
0, NULL);
if (rc)
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to set current of sensor %d, error %d\n",
sensor_index, rc);
@@ -831,7 +832,7 @@ int hl_set_power(struct hl_device *hdev,
0, NULL);
if (rc)
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to set power of sensor %d, error %d\n",
sensor_index, rc);
@@ -858,7 +859,7 @@ int hl_get_power(struct hl_device *hdev,
*value = (long) result;
if (rc) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"Failed to get power of sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
diff --git a/drivers/accel/habanalabs/common/mmu/Makefile b/drivers/accel/habanalabs/common/mmu/Makefile
index 1806c524e04a..f4b815bf4f7d 100644
--- a/drivers/accel/habanalabs/common/mmu/Makefile
+++ b/drivers/accel/habanalabs/common/mmu/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
HL_COMMON_MMU_FILES := common/mmu/mmu.o common/mmu/mmu_v1.o \
- common/mmu/mmu_v2_hr.o
+ common/mmu/mmu_v2.o common/mmu/mmu_v2_hr.o
diff --git a/drivers/accel/habanalabs/common/mmu/mmu.c b/drivers/accel/habanalabs/common/mmu/mmu.c
index b654302a68fc..d3eaab908457 100644
--- a/drivers/accel/habanalabs/common/mmu/mmu.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu.c
@@ -585,6 +585,8 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
int hl_mmu_if_set_funcs(struct hl_device *hdev)
{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
if (hdev->mmu_disable)
return 0;
@@ -597,8 +599,9 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
case ASIC_GAUDI2:
case ASIC_GAUDI2B:
case ASIC_GAUDI2C:
- /* MMUs in Gaudi2 are always host resident */
- hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
+ hl_mmu_v2_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
+ if (prop->pmmu.host_resident)
+ hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
@@ -1209,3 +1212,219 @@ int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_
return 0;
}
+struct pgt_info *hl_mmu_dr_get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = NULL;
+
+ hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
+ (unsigned long) hop_addr)
+ if (hop_addr == pgt_info->shadow_addr)
+ break;
+
+ return pgt_info;
+}
+
+void hl_mmu_dr_free_hop(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = hl_mmu_dr_get_pgt_info(ctx, hop_addr);
+
+ hl_mmu_dr_free_pgt_node(ctx, pgt_info);
+}
+
+void hl_mmu_dr_free_pgt_node(struct hl_ctx *ctx, struct pgt_info *pgt_info)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr,
+ hdev->asic_prop.dmmu.hop_table_size);
+ hash_del(&pgt_info->node);
+ kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
+ kfree(pgt_info);
+}
+
+u64 hl_mmu_dr_get_phys_hop0_addr(struct hl_ctx *ctx)
+{
+ return ctx->hdev->asic_prop.mmu_pgt_addr +
+ (ctx->asid * ctx->hdev->asic_prop.dmmu.hop_table_size);
+}
+
+u64 hl_mmu_dr_get_hop0_addr(struct hl_ctx *ctx)
+{
+ return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 +
+ (ctx->asid * ctx->hdev->asic_prop.dmmu.hop_table_size);
+}
+
+u64 hl_mmu_dr_get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
+{
+ u64 page_mask = ctx->hdev->asic_prop.dmmu.hop_table_size - 1;
+ u64 shadow_hop_addr = shadow_addr & (~page_mask);
+ u64 pte_offset = shadow_addr & page_mask;
+ u64 phys_hop_addr;
+
+ if (shadow_hop_addr != hl_mmu_dr_get_hop0_addr(ctx))
+ phys_hop_addr = hl_mmu_dr_get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
+ else
+ phys_hop_addr = hl_mmu_dr_get_phys_hop0_addr(ctx);
+
+ return phys_hop_addr + pte_offset;
+}
+
+void hl_mmu_dr_write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
+{
+ u64 phys_val = hl_mmu_dr_get_phys_addr(ctx, val);
+
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev, hl_mmu_dr_get_phys_addr(ctx, shadow_pte_addr),
+ phys_val);
+
+ *(u64 *) (uintptr_t) shadow_pte_addr = val;
+}
+
+void hl_mmu_dr_write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
+{
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev,
+ hl_mmu_dr_get_phys_addr(ctx, shadow_pte_addr), val);
+ *(u64 *) (uintptr_t) shadow_pte_addr = val;
+}
+
+void hl_mmu_dr_clear_pte(struct hl_ctx *ctx, u64 pte_addr)
+{
+ hl_mmu_dr_write_final_pte(ctx, pte_addr, 0);
+}
+
+void hl_mmu_dr_get_pte(struct hl_ctx *ctx, u64 hop_addr)
+{
+ hl_mmu_dr_get_pgt_info(ctx, hop_addr)->num_of_ptes++;
+}
+
+int hl_mmu_dr_put_pte(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = hl_mmu_dr_get_pgt_info(ctx, hop_addr);
+ int num_of_ptes_left;
+
+ pgt_info->num_of_ptes--;
+
+ /*
+ * Need to save the number of ptes left because hl_mmu_free_hop might free
+ * the pgt_info
+ */
+ num_of_ptes_left = pgt_info->num_of_ptes;
+ if (!num_of_ptes_left)
+ hl_mmu_dr_free_pgt_node(ctx, pgt_info);
+
+ return num_of_ptes_left;
+}
+
+u64 hl_mmu_dr_alloc_hop(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct pgt_info *pgt_info;
+ u64 phys_addr, shadow_addr;
+
+ pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
+ if (!pgt_info)
+ return ULLONG_MAX;
+
+ phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool,
+ prop->dmmu.hop_table_size);
+ if (!phys_addr) {
+ dev_err(hdev->dev, "failed to allocate page\n");
+ goto pool_add_err;
+ }
+
+ shadow_addr = (u64) (uintptr_t) kzalloc(prop->dmmu.hop_table_size,
+ GFP_KERNEL);
+ if (!shadow_addr)
+ goto shadow_err;
+
+ pgt_info->phys_addr = phys_addr;
+ pgt_info->shadow_addr = shadow_addr;
+ pgt_info->ctx = ctx;
+ pgt_info->num_of_ptes = 0;
+ hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
+
+ return shadow_addr;
+
+shadow_err:
+ gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool,
+ phys_addr, prop->dmmu.hop_table_size);
+pool_add_err:
+ kfree(pgt_info);
+
+ return ULLONG_MAX;
+}
+
+u64 hl_mmu_dr_get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, bool *is_new_hop)
+{
+ u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+
+ if (hop_addr == ULLONG_MAX) {
+ hop_addr = hl_mmu_dr_alloc_hop(ctx);
+ *is_new_hop = (hop_addr != ULLONG_MAX);
+ }
+
+ return hop_addr;
+}
+
+void hl_mmu_dr_flush(struct hl_ctx *ctx)
+{
+ /* flush all writes from all cores to reach PCI */
+ mb();
+ ctx->hdev->asic_funcs->read_pte(ctx->hdev, hl_mmu_dr_get_phys_hop0_addr(ctx));
+}
+
+int hl_mmu_dr_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ hdev->mmu_priv.dr.mmu_pgt_pool =
+ gen_pool_create(__ffs(prop->dmmu.hop_table_size), -1);
+
+ if (!hdev->mmu_priv.dr.mmu_pgt_pool) {
+ dev_err(hdev->dev, "Failed to create page gen pool\n");
+ return -ENOMEM;
+ }
+
+ rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr +
+ prop->dmmu.hop0_tables_total_size,
+ prop->dmmu.pgt_size - prop->dmmu.hop0_tables_total_size,
+ -1);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
+ goto err_pool_add;
+ }
+
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid,
+ prop->dmmu.hop_table_size, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
+ rc = -ENOMEM;
+ goto err_pool_add;
+ }
+
+ /* MMU H/W init will be done in device hw_init() */
+
+ return 0;
+
+err_pool_add:
+ gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+
+ return rc;
+}
+
+void hl_mmu_dr_fini(struct hl_device *hdev)
+{
+ /* MMU H/W fini was already done in device hw_fini() */
+
+ if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0))
+ return;
+
+ kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
+ gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+
+ /* Make sure that if we arrive here again without init was
+ * called we won't cause kernel panic. This can happen for
+ * example if we fail during hard reset code at certain points
+ */
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
+}
diff --git a/drivers/accel/habanalabs/common/mmu/mmu_v1.c b/drivers/accel/habanalabs/common/mmu/mmu_v1.c
index d925dc4dd097..845d16aaa637 100644
--- a/drivers/accel/habanalabs/common/mmu/mmu_v1.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu_v1.c
@@ -12,166 +12,6 @@
#define MMU_V1_MAX_HOPS (MMU_HOP4 + 1)
-static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
-
-static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
-{
- struct pgt_info *pgt_info = NULL;
-
- hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
- (unsigned long) hop_addr)
- if (hop_addr == pgt_info->shadow_addr)
- break;
-
- return pgt_info;
-}
-
-static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
-{
- struct hl_device *hdev = ctx->hdev;
-
- gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr,
- hdev->asic_prop.mmu_hop_table_size);
- hash_del(&pgt_info->node);
- kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
- kfree(pgt_info);
-}
-
-static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
-{
- struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
-
- _free_hop(ctx, pgt_info);
-}
-
-static u64 alloc_hop(struct hl_ctx *ctx)
-{
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- struct pgt_info *pgt_info;
- u64 phys_addr, shadow_addr;
-
- pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
- if (!pgt_info)
- return ULLONG_MAX;
-
- phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool,
- prop->mmu_hop_table_size);
- if (!phys_addr) {
- dev_err(hdev->dev, "failed to allocate page\n");
- goto pool_add_err;
- }
-
- shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
- GFP_KERNEL);
- if (!shadow_addr)
- goto shadow_err;
-
- pgt_info->phys_addr = phys_addr;
- pgt_info->shadow_addr = shadow_addr;
- pgt_info->ctx = ctx;
- pgt_info->num_of_ptes = 0;
- hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
-
- return shadow_addr;
-
-shadow_err:
- gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr,
- prop->mmu_hop_table_size);
-pool_add_err:
- kfree(pgt_info);
-
- return ULLONG_MAX;
-}
-
-static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
-{
- return ctx->hdev->asic_prop.mmu_pgt_addr +
- (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
-}
-
-static inline u64 get_hop0_addr(struct hl_ctx *ctx)
-{
- return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 +
- (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
-}
-
-static void flush(struct hl_ctx *ctx)
-{
- /* flush all writes from all cores to reach PCI */
- mb();
- ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
-}
-
-/* transform the value to physical address when writing to H/W */
-static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
-{
- /*
- * The value to write is actually the address of the next shadow hop +
- * flags at the 12 LSBs.
- * Hence in order to get the value to write to the physical PTE, we
- * clear the 12 LSBs and translate the shadow hop to its associated
- * physical hop, and add back the original 12 LSBs.
- */
- u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
- (val & FLAGS_MASK);
-
- ctx->hdev->asic_funcs->write_pte(ctx->hdev,
- get_phys_addr(ctx, shadow_pte_addr),
- phys_val);
-
- *(u64 *) (uintptr_t) shadow_pte_addr = val;
-}
-
-/* do not transform the value to physical address when writing to H/W */
-static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
- u64 val)
-{
- ctx->hdev->asic_funcs->write_pte(ctx->hdev,
- get_phys_addr(ctx, shadow_pte_addr),
- val);
- *(u64 *) (uintptr_t) shadow_pte_addr = val;
-}
-
-/* clear the last and present bits */
-static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
-{
- /* no need to transform the value to physical address */
- write_final_pte(ctx, pte_addr, 0);
-}
-
-static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
-{
- get_pgt_info(ctx, hop_addr)->num_of_ptes++;
-}
-
-/*
- * put_pte - decrement the num of ptes and free the hop if possible
- *
- * @ctx: pointer to the context structure
- * @hop_addr: addr of the hop
- *
- * This function returns the number of ptes left on this hop. If the number is
- * 0, it means the pte was freed.
- */
-static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
-{
- struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
- int num_of_ptes_left;
-
- pgt_info->num_of_ptes--;
-
- /*
- * Need to save the number of ptes left because free_hop might free
- * the pgt_info
- */
- num_of_ptes_left = pgt_info->num_of_ptes;
- if (!num_of_ptes_left)
- _free_hop(ctx, pgt_info);
-
- return num_of_ptes_left;
-}
-
static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
u64 *hop_addr_arr, u64 virt_addr, enum mmu_hop_num hop_idx)
{
@@ -183,35 +23,6 @@ static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties
ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
}
-static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
- bool *is_new_hop)
-{
- u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
- if (hop_addr == ULLONG_MAX) {
- hop_addr = alloc_hop(ctx);
- *is_new_hop = (hop_addr != ULLONG_MAX);
- }
-
- return hop_addr;
-}
-
-/* translates shadow address inside hop to a physical address */
-static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
-{
- u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
- u64 shadow_hop_addr = shadow_addr & ~page_mask;
- u64 pte_offset = shadow_addr & page_mask;
- u64 phys_hop_addr;
-
- if (shadow_hop_addr != get_hop0_addr(ctx))
- phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
- else
- phys_hop_addr = get_phys_hop0_addr(ctx);
-
- return phys_hop_addr + pte_offset;
-}
-
static int dram_default_mapping_init(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
@@ -232,13 +43,13 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
/* add hop1 and hop2 */
total_hops = num_of_hop3 + 2;
- ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
+ ctx->dram_default_hops = kcalloc(total_hops, HL_PTE_SIZE, GFP_KERNEL);
if (!ctx->dram_default_hops)
return -ENOMEM;
- hop0_addr = get_hop0_addr(ctx);
+ hop0_addr = hl_mmu_dr_get_hop0_addr(ctx);
- hop1_addr = alloc_hop(ctx);
+ hop1_addr = hl_mmu_dr_alloc_hop(ctx);
if (hop1_addr == ULLONG_MAX) {
dev_err(hdev->dev, "failed to alloc hop 1\n");
rc = -ENOMEM;
@@ -247,7 +58,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
ctx->dram_default_hops[total_hops - 1] = hop1_addr;
- hop2_addr = alloc_hop(ctx);
+ hop2_addr = hl_mmu_dr_alloc_hop(ctx);
if (hop2_addr == ULLONG_MAX) {
dev_err(hdev->dev, "failed to alloc hop 2\n");
rc = -ENOMEM;
@@ -257,7 +68,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
ctx->dram_default_hops[total_hops - 2] = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
- ctx->dram_default_hops[i] = alloc_hop(ctx);
+ ctx->dram_default_hops[i] = hl_mmu_dr_alloc_hop(ctx);
if (ctx->dram_default_hops[i] == ULLONG_MAX) {
dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
rc = -ENOMEM;
@@ -268,18 +79,18 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
/* need only pte 0 in hops 0 and 1 */
pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop0_addr, pte_val);
+ hl_mmu_dr_write_pte(ctx, hop0_addr, pte_val);
pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop1_addr, pte_val);
- get_pte(ctx, hop1_addr);
+ hl_mmu_dr_write_pte(ctx, hop1_addr, pte_val);
+ hl_mmu_dr_get_pte(ctx, hop1_addr);
hop2_pte_addr = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
- write_pte(ctx, hop2_pte_addr, pte_val);
- get_pte(ctx, hop2_addr);
+ hl_mmu_dr_write_pte(ctx, hop2_pte_addr, pte_val);
+ hl_mmu_dr_get_pte(ctx, hop2_addr);
hop2_pte_addr += HL_PTE_SIZE;
}
@@ -289,23 +100,23 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
for (i = 0 ; i < num_of_hop3 ; i++) {
hop3_pte_addr = ctx->dram_default_hops[i];
for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
- write_final_pte(ctx, hop3_pte_addr, pte_val);
- get_pte(ctx, ctx->dram_default_hops[i]);
+ hl_mmu_dr_write_final_pte(ctx, hop3_pte_addr, pte_val);
+ hl_mmu_dr_get_pte(ctx, ctx->dram_default_hops[i]);
hop3_pte_addr += HL_PTE_SIZE;
}
}
- flush(ctx);
+ hl_mmu_dr_flush(ctx);
return 0;
hop3_err:
for (i = 0 ; i < hop3_allocated ; i++)
- free_hop(ctx, ctx->dram_default_hops[i]);
+ hl_mmu_dr_free_hop(ctx, ctx->dram_default_hops[i]);
- free_hop(ctx, hop2_addr);
+ hl_mmu_dr_free_hop(ctx, hop2_addr);
hop2_err:
- free_hop(ctx, hop1_addr);
+ hl_mmu_dr_free_hop(ctx, hop1_addr);
hop1_err:
kfree(ctx->dram_default_hops);
@@ -329,7 +140,7 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
do_div(num_of_hop3, prop->dram_page_size);
do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
- hop0_addr = get_hop0_addr(ctx);
+ hop0_addr = hl_mmu_dr_get_hop0_addr(ctx);
/* add hop1 and hop2 */
total_hops = num_of_hop3 + 2;
hop1_addr = ctx->dram_default_hops[total_hops - 1];
@@ -338,101 +149,26 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
for (i = 0 ; i < num_of_hop3 ; i++) {
hop3_pte_addr = ctx->dram_default_hops[i];
for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
- clear_pte(ctx, hop3_pte_addr);
- put_pte(ctx, ctx->dram_default_hops[i]);
+ hl_mmu_dr_clear_pte(ctx, hop3_pte_addr);
+ hl_mmu_dr_put_pte(ctx, ctx->dram_default_hops[i]);
hop3_pte_addr += HL_PTE_SIZE;
}
}
hop2_pte_addr = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
- clear_pte(ctx, hop2_pte_addr);
- put_pte(ctx, hop2_addr);
+ hl_mmu_dr_clear_pte(ctx, hop2_pte_addr);
+ hl_mmu_dr_put_pte(ctx, hop2_addr);
hop2_pte_addr += HL_PTE_SIZE;
}
- clear_pte(ctx, hop1_addr);
- put_pte(ctx, hop1_addr);
- clear_pte(ctx, hop0_addr);
+ hl_mmu_dr_clear_pte(ctx, hop1_addr);
+ hl_mmu_dr_put_pte(ctx, hop1_addr);
+ hl_mmu_dr_clear_pte(ctx, hop0_addr);
kfree(ctx->dram_default_hops);
- flush(ctx);
-}
-
-/**
- * hl_mmu_v1_init() - initialize the MMU module.
- * @hdev: habanalabs device structure.
- *
- * This function does the following:
- * - Create a pool of pages for pgt_infos.
- * - Create a shadow table for pgt
- *
- * Return: 0 for success, non-zero for failure.
- */
-static int hl_mmu_v1_init(struct hl_device *hdev)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- int rc;
-
- hdev->mmu_priv.dr.mmu_pgt_pool =
- gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
-
- if (!hdev->mmu_priv.dr.mmu_pgt_pool) {
- dev_err(hdev->dev, "Failed to create page gen pool\n");
- return -ENOMEM;
- }
-
- rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr +
- prop->mmu_hop0_tables_total_size,
- prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
- -1);
- if (rc) {
- dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
- goto err_pool_add;
- }
-
- hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid, prop->mmu_hop_table_size,
- GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
- rc = -ENOMEM;
- goto err_pool_add;
- }
-
- /* MMU H/W init will be done in device hw_init() */
-
- return 0;
-
-err_pool_add:
- gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
-
- return rc;
-}
-
-/**
- * hl_mmu_v1_fini() - release the MMU module.
- * @hdev: habanalabs device structure.
- *
- * This function does the following:
- * - Disable MMU in H/W.
- * - Free the pgt_infos pool.
- *
- * All contexts should be freed before calling this function.
- */
-static void hl_mmu_v1_fini(struct hl_device *hdev)
-{
- /* MMU H/W fini was already done in device hw_fini() */
-
- if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
- kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
- gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
-
- /* Make sure that if we arrive here again without init was
- * called we won't cause kernel panic. This can happen for
- * example if we fail during hard reset code at certain points
- */
- hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
- }
+ hl_mmu_dr_flush(ctx);
}
/**
@@ -476,7 +212,7 @@ static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx)
dev_err_ratelimited(hdev->dev,
"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
- _free_hop(ctx, pgt_info);
+ hl_mmu_dr_free_pgt_node(ctx, pgt_info);
}
}
@@ -495,7 +231,7 @@ static int hl_mmu_v1_unmap(struct hl_ctx *ctx,
for (hop_idx = MMU_HOP0; hop_idx < MMU_HOP4; hop_idx++) {
if (hop_idx == MMU_HOP0) {
- hop_addr[hop_idx] = get_hop0_addr(ctx);
+ hop_addr[hop_idx] = hl_mmu_dr_get_hop0_addr(ctx);
} else {
hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
if (hop_addr[hop_idx] == ULLONG_MAX)
@@ -546,30 +282,30 @@ static int hl_mmu_v1_unmap(struct hl_ctx *ctx,
}
hop_idx = MMU_HOP3;
- write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte);
- put_pte(ctx, hop_addr[hop_idx]);
+ hl_mmu_dr_write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte);
+ hl_mmu_dr_put_pte(ctx, hop_addr[hop_idx]);
} else {
if (!(curr_pte & PAGE_PRESENT_MASK))
goto not_mapped;
if (hop_addr[MMU_HOP4])
- clear_pte(ctx, hop_pte_addr[MMU_HOP4]);
+ hl_mmu_dr_clear_pte(ctx, hop_pte_addr[MMU_HOP4]);
else
- clear_pte(ctx, hop_pte_addr[MMU_HOP3]);
+ hl_mmu_dr_clear_pte(ctx, hop_pte_addr[MMU_HOP3]);
- if (hop_addr[MMU_HOP4] && !put_pte(ctx, hop_addr[MMU_HOP4]))
+ if (hop_addr[MMU_HOP4] && !hl_mmu_dr_put_pte(ctx, hop_addr[MMU_HOP4]))
clear_hop3 = true;
if (!clear_hop3)
goto mapped;
for (hop_idx = MMU_HOP3; hop_idx >= 0; hop_idx--) {
- clear_pte(ctx, hop_pte_addr[hop_idx]);
+ hl_mmu_dr_clear_pte(ctx, hop_pte_addr[hop_idx]);
if (hop_idx == MMU_HOP0)
break;
- if (put_pte(ctx, hop_addr[hop_idx]))
+ if (hl_mmu_dr_put_pte(ctx, hop_addr[hop_idx]))
goto mapped;
}
}
@@ -616,10 +352,10 @@ static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++) {
if (hop_idx == MMU_HOP0) {
- hop_addr[hop_idx] = get_hop0_addr(ctx);
+ hop_addr[hop_idx] = hl_mmu_dr_get_hop0_addr(ctx);
} else {
hop_addr[hop_idx] =
- get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]);
+ hl_mmu_dr_get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]);
if (hop_addr[hop_idx] == ULLONG_MAX)
goto err;
}
@@ -666,27 +402,27 @@ static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
| PAGE_PRESENT_MASK;
- write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte);
+ hl_mmu_dr_write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte);
for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
prev_hop = hop_idx - 1;
if (hop_new[hop_idx]) {
curr_pte = (hop_addr[hop_idx] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop_pte_addr[prev_hop], curr_pte);
+ hl_mmu_dr_write_pte(ctx, hop_pte_addr[prev_hop], curr_pte);
if (hop_idx != MMU_HOP1)
- get_pte(ctx, hop_addr[prev_hop]);
+ hl_mmu_dr_get_pte(ctx, hop_addr[prev_hop]);
}
}
- get_pte(ctx, hop_addr[num_hops - 1]);
+ hl_mmu_dr_get_pte(ctx, hop_addr[num_hops - 1]);
return 0;
err:
for (hop_idx = num_hops; hop_idx > MMU_HOP0; hop_idx--) {
if (hop_new[hop_idx])
- free_hop(ctx, hop_addr[hop_idx]);
+ hl_mmu_dr_free_hop(ctx, hop_addr[hop_idx]);
}
return rc;
@@ -752,7 +488,7 @@ static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
if (is_huge)
used_hops--;
- hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx);
+ hops->hop_info[0].hop_addr = hl_mmu_dr_get_phys_hop0_addr(ctx);
hops->hop_info[0].hop_pte_addr =
hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
hops->hop_info[0].hop_addr, virt_addr);
@@ -801,13 +537,13 @@ static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
*/
void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
{
- mmu->init = hl_mmu_v1_init;
- mmu->fini = hl_mmu_v1_fini;
+ mmu->init = hl_mmu_dr_init;
+ mmu->fini = hl_mmu_dr_fini;
mmu->ctx_init = hl_mmu_v1_ctx_init;
mmu->ctx_fini = hl_mmu_v1_ctx_fini;
mmu->map = hl_mmu_v1_map;
mmu->unmap = hl_mmu_v1_unmap;
- mmu->flush = flush;
+ mmu->flush = hl_mmu_dr_flush;
mmu->swap_out = hl_mmu_v1_swap_out;
mmu->swap_in = hl_mmu_v1_swap_in;
mmu->get_tlb_info = hl_mmu_v1_get_tlb_info;
diff --git a/drivers/accel/habanalabs/common/mmu/mmu_v2.c b/drivers/accel/habanalabs/common/mmu/mmu_v2.c
new file mode 100644
index 000000000000..4bc0268fff1c
--- /dev/null
+++ b/drivers/accel/habanalabs/common/mmu/mmu_v2.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "../habanalabs.h"
+#include "../../include/hw_ip/mmu/mmu_general.h"
+#include "../../include/hw_ip/mmu/mmu_v2_0.h"
+
+#include <linux/slab.h>
+
+/**
+ * hl_mmu_v2_ctx_init() - initialize a context for using the MMU module.
+ * @ctx: pointer to the context structure to initialize.
+ *
+ * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
+ * page tables hops related to this context.
+ * Return: 0 on success, non-zero otherwise.
+ */
+static int hl_mmu_v2_ctx_init(struct hl_ctx *ctx)
+{
+ hash_init(ctx->mmu_shadow_hash);
+
+ return 0;
+}
+
+/*
+ * hl_mmu_v2_ctx_fini - disable a ctx from using the mmu module
+ *
+ * @ctx: pointer to the context structure
+ *
+ * This function does the following:
+ * - Free any pgts which were not freed yet
+ * - Free the mutex
+ * - Free DRAM default page mapping hops
+ */
+static void hl_mmu_v2_ctx_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct pgt_info *pgt_info;
+ struct hlist_node *tmp;
+ int i;
+
+ if (!hash_empty(ctx->mmu_shadow_hash))
+ dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
+ ctx->asid);
+
+ hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
+ dev_err_ratelimited(hdev->dev,
+ "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
+ pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
+ hl_mmu_dr_free_pgt_node(ctx, pgt_info);
+ }
+}
+
+static int hl_mmu_v2_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
+{
+ u64 hop_addr[MMU_ARCH_6_HOPS] = { 0 }, hop_pte_addr[MMU_ARCH_6_HOPS] = { 0 }, curr_pte,
+ scrambled_virt_addr;
+ struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_mmu_properties *mmu_prop;
+ bool is_huge = false;
+ int i, hop_last;
+
+ /* device resident in V2 are allowed only for HMMU */
+ if (!is_dram_addr)
+ return -EINVAL;
+
+ mmu_prop = &prop->dmmu;
+
+ hop_last = mmu_prop->num_hops - 1;
+
+ scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
+
+ hop_addr[0] = hl_mmu_dr_get_hop0_addr(ctx);
+ hop_pte_addr[0] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
+ hop_addr[0], scrambled_virt_addr);
+ if (hop_pte_addr[0] == U64_MAX)
+ return -EFAULT;
+
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[0];
+
+ for (i = 1 ; i < mmu_prop->num_hops ; i++) {
+ hop_addr[i] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+ if (hop_addr[i] == ULLONG_MAX)
+ goto not_mapped;
+
+ hop_pte_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
+ hop_addr[i], scrambled_virt_addr);
+ if (hop_pte_addr[i] == U64_MAX)
+ return -EFAULT;
+
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[i];
+
+ if ((i <= hop_last) && (curr_pte & mmu_prop->last_mask)) {
+ hop_last = i;
+ is_huge = true;
+ break;
+ }
+ }
+
+ if (is_dram_addr && !is_huge) {
+ dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
+ return -EFAULT;
+ }
+
+ if (!(curr_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+
+ for (i = hop_last ; i > 0 ; i--) {
+ hl_mmu_dr_clear_pte(ctx, hop_pte_addr[i]);
+ if (hl_mmu_dr_put_pte(ctx, hop_addr[i]))
+ goto mapped;
+ }
+ hl_mmu_dr_clear_pte(ctx, hop_pte_addr[0]);
+
+mapped:
+ return 0;
+
+not_mapped:
+ dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
+ virt_addr);
+
+ return -EINVAL;
+}
+
+static int hl_mmu_v2_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
+ u32 page_size, bool is_dram_addr)
+{
+ u64 hop_addr[MMU_ARCH_6_HOPS] = { 0 }, hop_pte_addr[MMU_ARCH_6_HOPS] = { 0 },
+ curr_pte = 0, scrambled_virt_addr, scrambled_phys_addr;
+ struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
+ bool hop_new[MMU_ARCH_6_HOPS] = { false };
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_mmu_properties *mmu_prop;
+ int rc, i, hop_last;
+
+ /* device resident in V2 are allowed only for HMMU */
+ if (!is_dram_addr)
+ return -EINVAL;
+
+ mmu_prop = &prop->dmmu;
+
+ hop_last = mmu_prop->num_hops - 1;
+
+ scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
+ scrambled_phys_addr = hdev->asic_funcs->scramble_addr(hdev, phys_addr);
+
+ /* First hop is preallocated therefore it is treated differently */
+ hop_addr[0] = hl_mmu_dr_get_hop0_addr(ctx);
+ hop_pte_addr[0] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
+ hop_addr[0], scrambled_virt_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[0];
+
+ /* Handle hop1 to hop_last */
+ for (i = 1 ; i <= hop_last ; i++) {
+ hop_addr[i] = hl_mmu_dr_get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[i]);
+ if (hop_addr[i] == ULLONG_MAX) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ hop_pte_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
+ hop_addr[i], scrambled_virt_addr);
+ if (hop_pte_addr[i] == U64_MAX) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (!hop_pte_addr[i]) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[i];
+ }
+
+ if (curr_pte & PAGE_PRESENT_MASK) {
+ dev_err(hdev->dev,
+ "mapping already exists for virt_addr 0x%llx\n",
+ virt_addr);
+
+ for (i = 0 ; i <= hop_last ; i++)
+ dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n",
+ i, *(u64 *) (uintptr_t) hop_pte_addr[i],
+ hop_pte_addr[i]);
+
+ rc = -EINVAL;
+ goto err;
+ }
+
+ curr_pte = (scrambled_phys_addr & HOP_PHYS_ADDR_MASK)
+ | mmu_prop->last_mask | PAGE_PRESENT_MASK;
+
+ /* Write the PTEs */
+ hl_mmu_dr_write_final_pte(ctx, hop_pte_addr[hop_last], curr_pte);
+
+ /* for each new hop, add its address to the table of previous-hop */
+ for (i = 1 ; i <= hop_last ; i++) {
+ if (hop_new[i]) {
+ curr_pte = (hop_addr[i] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ hl_mmu_dr_write_pte(ctx, hop_pte_addr[i - 1], curr_pte);
+
+ if (i - 1)
+ hl_mmu_dr_get_pte(ctx, hop_addr[i - 1]);
+ }
+ }
+ hl_mmu_dr_get_pte(ctx, hop_addr[hop_last]);
+
+ return 0;
+
+err:
+ for (i = 1 ; i <= hop_last ; i++)
+ if (hop_new[i] && (hop_addr[i] != U64_MAX))
+ hl_mmu_dr_free_hop(ctx, hop_addr[i]);
+
+ return rc;
+}
+
+/*
+ * hl_mmu_v2_swap_out - marks all mapping of the given ctx as swapped out
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+static void hl_mmu_v2_swap_out(struct hl_ctx *ctx)
+{
+
+}
+
+/*
+ * hl_mmu_v2_swap_in - marks all mapping of the given ctx as swapped in
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+static void hl_mmu_v2_swap_in(struct hl_ctx *ctx)
+{
+
+}
+
+static int hl_mmu_v2_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops)
+{
+ struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_mmu_properties *mmu_prop;
+ bool is_dram_addr;
+ int i;
+
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
+
+ /* device resident in V2 are allowed only for HMMU */
+ if (!is_dram_addr)
+ return -EINVAL;
+
+ mmu_prop = &prop->dmmu;
+ hops->range_type = HL_VA_RANGE_TYPE_DRAM;
+
+ hops->scrambled_vaddr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
+
+ hops->hop_info[0].hop_addr = hl_mmu_dr_get_phys_hop0_addr(ctx);
+ hops->hop_info[0].hop_pte_addr = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
+ hops->hop_info[0].hop_addr,
+ hops->scrambled_vaddr);
+ if (hops->hop_info[0].hop_pte_addr == U64_MAX)
+ return -EFAULT;
+
+ hops->hop_info[0].hop_pte_val = hdev->asic_funcs->read_pte(hdev,
+ hops->hop_info[0].hop_pte_addr);
+ if (hops->hop_info[0].hop_pte_val == U64_MAX)
+ return -EFAULT;
+
+ for (i = 1 ; i < mmu_prop->num_hops ; i++) {
+ hops->hop_info[i].hop_addr =
+ hl_mmu_get_next_hop_addr(ctx, hops->hop_info[i - 1].hop_pte_val);
+ if (hops->hop_info[i].hop_addr == ULLONG_MAX)
+ return -EFAULT;
+
+ hops->hop_info[i].hop_pte_addr =
+ hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
+ hops->hop_info[i].hop_addr,
+ hops->scrambled_vaddr);
+ if (hops->hop_info[i].hop_pte_addr == U64_MAX)
+ return -EFAULT;
+
+ hops->hop_info[i].hop_pte_val =
+ hdev->asic_funcs->read_pte(hdev,
+ hops->hop_info[i].hop_pte_addr);
+
+ if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
+ return -EFAULT;
+
+ if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask)
+ break;
+ }
+
+ /* if passed over all hops then no last hop was found */
+ if (i == mmu_prop->num_hops)
+ return -EFAULT;
+
+ if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
+ return -EFAULT;
+
+ if (hops->scrambled_vaddr != virt_addr)
+ hops->unscrambled_paddr = hdev->asic_funcs->descramble_addr
+ (hdev, hops->hop_info[i].hop_pte_val);
+ else
+ hops->unscrambled_paddr = hops->hop_info[i].hop_pte_val;
+
+ hops->used_hops = i + 1;
+
+ return 0;
+}
+
+/*
+ * hl_mmu_v2_prepare - prepare mmu_if for working with mmu v2
+ *
+ * @hdev: pointer to the device structure
+ * @mmu_if: pointer to the mmu interface structure
+ */
+void hl_mmu_v2_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
+{
+ mmu->init = hl_mmu_dr_init;
+ mmu->fini = hl_mmu_dr_fini;
+ mmu->ctx_init = hl_mmu_v2_ctx_init;
+ mmu->ctx_fini = hl_mmu_v2_ctx_fini;
+ mmu->map = hl_mmu_v2_map;
+ mmu->unmap = hl_mmu_v2_unmap;
+ mmu->flush = hl_mmu_dr_flush;
+ mmu->swap_out = hl_mmu_v2_swap_out;
+ mmu->swap_in = hl_mmu_v2_swap_in;
+ mmu->get_tlb_info = hl_mmu_v2_get_tlb_info;
+}
diff --git a/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c b/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
index afe7ef964f82..31507b2a431b 100644
--- a/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
@@ -47,7 +47,7 @@ static inline int hl_mmu_v2_hr_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- return hl_mmu_hr_init(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size,
+ return hl_mmu_hr_init(hdev, &hdev->mmu_priv.hr, prop->pmmu.hop_table_size,
prop->mmu_pgt_size);
}
@@ -65,7 +65,7 @@ static inline void hl_mmu_v2_hr_fini(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- hl_mmu_hr_fini(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size);
+ hl_mmu_hr_fini(hdev, &hdev->mmu_priv.hr, prop->pmmu.hop_table_size);
}
/**
@@ -108,7 +108,7 @@ static void hl_mmu_v2_hr_ctx_fini(struct hl_ctx *ctx)
"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
hl_mmu_hr_free_hop_remove_pgt(pgt_info, &ctx->hdev->mmu_priv.hr,
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
}
}
@@ -150,7 +150,7 @@ static int _hl_mmu_v2_hr_unmap(struct hl_ctx *ctx,
curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
hop_pte_phys_addr[i],
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
if ((i < hop_last) && (curr_pte & mmu_prop->last_mask)) {
hop_last = i;
@@ -169,14 +169,14 @@ static int _hl_mmu_v2_hr_unmap(struct hl_ctx *ctx,
for (i = hop_last ; i > 0 ; i--) {
hl_mmu_hr_clear_pte(ctx, hops_pgt_info[i], hop_pte_phys_addr[i],
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
if (hl_mmu_hr_put_pte(ctx, hops_pgt_info[i], &ctx->hdev->mmu_priv.hr,
- ctx->hdev->asic_prop.mmu_hop_table_size))
+ ctx->hdev->asic_prop.pmmu.hop_table_size))
goto mapped;
}
hl_mmu_hr_clear_pte(ctx, hops_pgt_info[0], hop_pte_phys_addr[0],
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
mapped:
return 0;
@@ -255,7 +255,7 @@ static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx,
scrambled_virt_addr);
curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
hop_pte_phys_addr[i],
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
}
if (curr_pte & PAGE_PRESENT_MASK) {
@@ -268,7 +268,7 @@ static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx,
*(u64 *) (uintptr_t)
hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
hop_pte_phys_addr[i],
- ctx->hdev->asic_prop.mmu_hop_table_size),
+ ctx->hdev->asic_prop.pmmu.hop_table_size),
hop_pte_phys_addr[i]);
rc = -EINVAL;
goto err;
@@ -279,7 +279,7 @@ static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx,
/* Write the PTEs */
hl_mmu_hr_write_pte(ctx, hops_pgt_info[hop_last], hop_pte_phys_addr[hop_last], curr_pte,
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
/* for each new hop, add its address to the table of previous-hop */
for (i = 1 ; i <= hop_last ; i++) {
@@ -287,7 +287,7 @@ static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx,
curr_pte = (hops_pgt_info[i]->phys_addr & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
hl_mmu_hr_write_pte(ctx, hops_pgt_info[i - 1], hop_pte_phys_addr[i - 1],
- curr_pte, ctx->hdev->asic_prop.mmu_hop_table_size);
+ curr_pte, ctx->hdev->asic_prop.pmmu.hop_table_size);
if (i - 1)
hl_mmu_hr_get_pte(ctx, &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
hops_pgt_info[i - 1]->phys_addr);
@@ -303,7 +303,7 @@ err:
for (i = 1 ; i <= hop_last ; i++)
if (hop_new[i] && hops_pgt_info[i])
hl_mmu_hr_free_hop_remove_pgt(hops_pgt_info[i], &ctx->hdev->mmu_priv.hr,
- ctx->hdev->asic_prop.mmu_hop_table_size);
+ ctx->hdev->asic_prop.pmmu.hop_table_size);
return rc;
}
diff --git a/drivers/accel/habanalabs/common/security.c b/drivers/accel/habanalabs/common/security.c
index fe913965dbad..5402a3cd0491 100644
--- a/drivers/accel/habanalabs/common/security.c
+++ b/drivers/accel/habanalabs/common/security.c
@@ -7,15 +7,31 @@
#include "habanalabs.h"
-static const char * const hl_glbl_error_cause[HL_MAX_NUM_OF_GLBL_ERR_CAUSE] = {
+static const char * const hl_glbl_error_cause[] = {
"Error due to un-priv read",
"Error due to un-secure read",
"Error due to read from unmapped reg",
"Error due to un-priv write",
"Error due to un-secure write",
"Error due to write to unmapped reg",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
"External I/F write sec violation",
"External I/F write to un-mapped reg",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
"Read to write only",
"Write to read only"
};
@@ -671,10 +687,11 @@ static bool hl_check_block_range_exclusion(struct hl_device *hdev,
static int hl_read_glbl_errors(struct hl_device *hdev,
u32 blk_idx, u32 major, u32 minor, u32 sub_minor, void *data)
{
- struct hl_special_block_info *special_blocks = hdev->asic_prop.special_blocks;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_special_block_info *special_blocks = prop->special_blocks;
struct hl_special_block_info *current_block = &special_blocks[blk_idx];
u32 glbl_err_addr, glbl_err_cause, addr_val, cause_val, block_base,
- base = current_block->base_addr - lower_32_bits(hdev->asic_prop.cfg_base_address);
+ base = current_block->base_addr - lower_32_bits(prop->cfg_base_address);
int i;
block_base = base + major * current_block->major_offset +
@@ -689,13 +706,13 @@ static int hl_read_glbl_errors(struct hl_device *hdev,
glbl_err_addr = block_base + HL_GLBL_ERR_ADDR_OFFSET;
addr_val = RREG32(glbl_err_addr);
- for (i = 0 ; i < hdev->asic_prop.glbl_err_cause_num ; i++) {
+ for (i = 0 ; i <= prop->glbl_err_max_cause_num ; i++) {
if (cause_val & BIT(i))
dev_err_ratelimited(hdev->dev,
- "%s, addr %#llx\n",
- hl_glbl_error_cause[i],
- hdev->asic_prop.cfg_base_address + block_base +
- FIELD_GET(HL_GLBL_ERR_ADDRESS_MASK, addr_val));
+ "%s, addr %#llx\n",
+ hl_glbl_error_cause[i],
+ prop->cfg_base_address + block_base +
+ FIELD_GET(HL_GLBL_ERR_ADDRESS_MASK, addr_val));
}
WREG32(glbl_err_cause, cause_val);
diff --git a/drivers/accel/habanalabs/common/security.h b/drivers/accel/habanalabs/common/security.h
index d7a3b3e82ea4..476f70687c09 100644
--- a/drivers/accel/habanalabs/common/security.h
+++ b/drivers/accel/habanalabs/common/security.h
@@ -13,8 +13,7 @@
struct hl_device;
/* special blocks */
-#define HL_MAX_NUM_OF_GLBL_ERR_CAUSE 10
-#define HL_GLBL_ERR_ADDRESS_MASK GENMASK(11, 0)
+#define HL_GLBL_ERR_ADDRESS_MASK GENMASK(11, 0)
/* GLBL_ERR_ADDR register offset from the start of the block */
#define HL_GLBL_ERR_ADDR_OFFSET 0xF44
/* GLBL_ERR_CAUSE register offset from the start of the block */
diff --git a/drivers/accel/habanalabs/gaudi/gaudi.c b/drivers/accel/habanalabs/gaudi/gaudi.c
index 53292d4c15c8..f2b04ffb0ecb 100644
--- a/drivers/accel/habanalabs/gaudi/gaudi.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi.c
@@ -614,8 +614,6 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
else
prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
prop->mmu_pte_size = HL_PTE_SIZE;
- prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
- prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
prop->device_mem_alloc_default_page_size = prop->dram_page_size;
prop->dram_supports_virtual_memory = false;
@@ -637,8 +635,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
prop->pmmu.last_mask = LAST_MASK;
/* TODO: will be duplicated until implementing per-MMU props */
- prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
- prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
+ prop->pmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->pmmu.hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
/* PMMU and HPMMU are the same except of page size */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
@@ -649,6 +647,7 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->dmmu.start_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2);
prop->dmmu.end_addr = VA_HOST_SPACE_END;
prop->dmmu.page_size = PAGE_SIZE_2MB;
+ prop->dmmu.pgt_size = prop->mmu_pgt_size;
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
@@ -3652,7 +3651,7 @@ static int gaudi_mmu_init(struct hl_device *hdev)
for (i = 0 ; i < prop->max_asid ; i++) {
hop0_addr = prop->mmu_pgt_addr +
- (i * prop->mmu_hop_table_size);
+ (i * prop->dmmu.hop_table_size);
rc = gaudi_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
if (rc) {
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
index e0e5615ef9b0..fa1c4feb9f89 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
@@ -158,11 +158,13 @@
#define RAZWI_INITIATOR_ID_X_Y(xl, yl, xh) \
(RAZWI_INITIATOR_ID_X_Y_LOW(xl, yl) | RAZWI_INITIATOR_ID_X_HIGH(xh))
-#define PSOC_RAZWI_ENG_STR_SIZE 128
-#define PSOC_RAZWI_MAX_ENG_PER_RTR 5
+#define PSOC_RAZWI_ENG_STR_SIZE 128
+#define PSOC_RAZWI_MAX_ENG_PER_RTR 5
/* HW scrambles only bits 0-25 */
-#define HW_UNSCRAMBLED_BITS_MASK GENMASK_ULL(63, 26)
+#define HW_UNSCRAMBLED_BITS_MASK GENMASK_ULL(63, 26)
+
+#define GAUDI2_GLBL_ERR_MAX_CAUSE_NUM 17
struct gaudi2_razwi_info {
u32 axuser_xy;
@@ -2308,11 +2310,26 @@ static int set_number_of_functional_hbms(struct hl_device *hdev)
return 0;
}
+static bool gaudi2_is_edma_queue_id(u32 queue_id)
+{
+
+ switch (queue_id) {
+ case GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3:
+ case GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3:
+ case GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3:
+ case GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int gaudi2_set_dram_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 basic_hbm_page_size;
- int rc;
+ u64 hbm_drv_base_offset = 0, edma_pq_base_addr;
+ u32 basic_hbm_page_size, edma_idx = 0;
+ int rc, i;
rc = set_number_of_functional_hbms(hdev);
if (rc)
@@ -2356,9 +2373,35 @@ static int gaudi2_set_dram_properties(struct hl_device *hdev)
prop->dmmu.start_addr = prop->dram_base_address +
(prop->dram_page_size *
DIV_ROUND_UP_SECTOR_T(prop->dram_size, prop->dram_page_size));
-
prop->dmmu.end_addr = prop->dmmu.start_addr + prop->dram_page_size *
div_u64((VA_HBM_SPACE_END - prop->dmmu.start_addr), prop->dmmu.page_size);
+ /*
+ * Driver can't share an (48MB) HBM page with the F/W in order to prevent FW to block
+ * the driver part by range register, so it must start at the next (48MB) page
+ */
+ hbm_drv_base_offset = roundup(CPU_FW_IMAGE_SIZE, prop->num_functional_hbms * SZ_8M);
+
+ /*
+ * The NIC driver section size and the HMMU page tables section in the HBM needs
+ * to be the remaining size in the first dram page after taking into
+ * account the F/W image size
+ */
+
+ /* Reserve region in HBM for HMMU page tables */
+ prop->mmu_pgt_addr = DRAM_PHYS_BASE + hbm_drv_base_offset +
+ ((prop->dram_page_size - hbm_drv_base_offset) -
+ (HMMU_PAGE_TABLES_SIZE + EDMA_PQS_SIZE + EDMA_SCRATCHPAD_SIZE));
+
+ /* Set EDMA PQs HBM addresses */
+ edma_pq_base_addr = prop->mmu_pgt_addr + HMMU_PAGE_TABLES_SIZE;
+
+ for (i = 0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i++) {
+ if (gaudi2_is_edma_queue_id(i)) {
+ prop->hw_queues_props[i].q_dram_bd_address = edma_pq_base_addr +
+ (edma_idx * HL_QUEUE_SIZE_IN_BYTES);
+ edma_idx++;
+ }
+ }
return 0;
}
@@ -2368,7 +2411,7 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hw_queue_properties *q_props;
u32 num_sync_stream_queues = 0;
- int i;
+ int i, rc;
prop->max_queues = GAUDI2_QUEUE_ID_SIZE;
prop->hw_queues_props = kcalloc(prop->max_queues, sizeof(struct hw_queue_properties),
@@ -2391,6 +2434,9 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
}
q_props[i].cb_alloc_flags = CB_ALLOC_USER;
+
+ if (gaudi2_is_edma_queue_id(i))
+ q_props[i].dram_bd = 1;
}
q_props[GAUDI2_QUEUE_ID_CPU_PQ].type = QUEUE_TYPE_CPU;
@@ -2419,46 +2465,43 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->rotator_enabled_mask = BIT(NUM_OF_ROT) - 1;
- if (hdev->pldm)
- prop->mmu_pgt_size = 0x800000; /* 8MB */
- else
- prop->mmu_pgt_size = MMU_PAGE_TABLES_INITIAL_SIZE;
+ prop->max_asid = 2;
+ prop->dmmu.pgt_size = HMMU_PAGE_TABLES_SIZE;
prop->mmu_pte_size = HL_PTE_SIZE;
- prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
- prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dmmu.hop_shifts[MMU_HOP0] = DHOP0_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP1] = DHOP1_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP2] = DHOP2_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP3] = DHOP3_SHIFT;
- prop->dmmu.hop_shifts[MMU_HOP4] = DHOP4_SHIFT;
prop->dmmu.hop_masks[MMU_HOP0] = DHOP0_MASK;
prop->dmmu.hop_masks[MMU_HOP1] = DHOP1_MASK;
prop->dmmu.hop_masks[MMU_HOP2] = DHOP2_MASK;
prop->dmmu.hop_masks[MMU_HOP3] = DHOP3_MASK;
- prop->dmmu.hop_masks[MMU_HOP4] = DHOP4_MASK;
prop->dmmu.page_size = PAGE_SIZE_1GB;
- prop->dmmu.num_hops = MMU_ARCH_6_HOPS;
+ prop->dmmu.num_hops = MMU_ARCH_4_HOPS;
prop->dmmu.last_mask = LAST_MASK;
- prop->dmmu.host_resident = 1;
- prop->dmmu.hop_table_size = prop->mmu_hop_table_size;
- prop->dmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
+ prop->dmmu.host_resident = 0;
+ prop->dmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->dmmu.hop0_tables_total_size = HOP_TABLE_SIZE_512_PTE * prop->max_asid;
- /*
- * this is done in order to be able to validate FW descriptor (i.e. validating that
- * the addresses and allocated space for FW image does not cross memory bounds).
- * for this reason we set the DRAM size to the minimum possible and later it will
- * be modified according to what reported in the cpucp info packet
+ /* As we need to set the pgt address in dram for HMMU init so we cannot
+ * wait to the fw cpucp info to set the dram props as mmu init comes before
+ * hw init
*/
- prop->dram_size = (GAUDI2_HBM_NUM - 1) * SZ_16G;
+ rc = hdev->asic_funcs->set_dram_properties(hdev);
+ if (rc)
+ goto free_qprops;
+ prop->mmu_pgt_size = PMMU_PAGE_TABLES_SIZE;
+
+ prop->pmmu.pgt_size = prop->mmu_pgt_size;
hdev->pmmu_huge_range = true;
prop->pmmu.host_resident = 1;
prop->pmmu.num_hops = MMU_ARCH_6_HOPS;
prop->pmmu.last_mask = LAST_MASK;
- prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
- prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
+ prop->pmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->pmmu.hop0_tables_total_size = HOP_TABLE_SIZE_512_PTE * prop->max_asid;
prop->hints_host_reserved_va_range.start_addr = RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START;
prop->hints_host_reserved_va_range.end_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HOST_END;
@@ -2516,7 +2559,6 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->max_num_of_engines = GAUDI2_ENGINE_ID_SIZE;
prop->num_engine_cores = CPU_ID_MAX;
prop->cfg_size = CFG_SIZE;
- prop->max_asid = MAX_ASID;
prop->num_of_events = GAUDI2_EVENT_SIZE;
prop->supports_engine_modes = true;
@@ -2560,6 +2602,10 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->hbw_flush_reg = mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0;
return 0;
+
+free_qprops:
+ kfree(prop->hw_queues_props);
+ return rc;
}
static int gaudi2_pci_bars_map(struct hl_device *hdev)
@@ -3033,6 +3079,25 @@ static int gaudi2_fetch_psoc_frequency(struct hl_device *hdev)
return 0;
}
+static int gaudi2_mmu_clear_pgt_range(struct hl_device *hdev)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_MMU_MASK))
+ return 0;
+
+ if (prop->dmmu.host_resident)
+ return 0;
+
+ rc = gaudi2_memset_device_memory(hdev, prop->mmu_pgt_addr, prop->dmmu.pgt_size, 0);
+ if (rc)
+ dev_err(hdev->dev, "Failed to clear mmu pgt");
+
+ return rc;
+}
+
static int gaudi2_early_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -3258,6 +3323,12 @@ static int gaudi2_late_init(struct hl_device *hdev)
goto disable_pci_access;
}
+ rc = gaudi2_mmu_clear_pgt_range(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
+ goto disable_pci_access;
+ }
+
gaudi2_init_arcs(hdev);
rc = gaudi2_scrub_arcs_dccm(hdev);
@@ -3518,7 +3589,7 @@ static int gaudi2_special_blocks_config(struct hl_device *hdev)
int i, rc;
/* Configure Special blocks */
- prop->glbl_err_cause_num = GAUDI2_NUM_OF_GLBL_ERR_CAUSE;
+ prop->glbl_err_max_cause_num = GAUDI2_GLBL_ERR_MAX_CAUSE_NUM;
prop->num_of_special_blocks = ARRAY_SIZE(gaudi2_special_blocks);
prop->special_blocks = kmalloc_array(prop->num_of_special_blocks,
sizeof(*prop->special_blocks), GFP_KERNEL);
@@ -3697,13 +3768,7 @@ static int gaudi2_sw_init(struct hl_device *hdev)
spin_lock_init(&gaudi2->hw_queues_lock);
- gaudi2->scratchpad_kernel_address = hl_asic_dma_alloc_coherent(hdev, PAGE_SIZE,
- &gaudi2->scratchpad_bus_address,
- GFP_KERNEL | __GFP_ZERO);
- if (!gaudi2->scratchpad_kernel_address) {
- rc = -ENOMEM;
- goto free_virt_msix_db_mem;
- }
+ gaudi2->scratchpad_bus_address = prop->mmu_pgt_addr + HMMU_PAGE_TABLES_SIZE + EDMA_PQS_SIZE;
gaudi2_user_mapped_blocks_init(hdev);
@@ -3727,7 +3792,7 @@ static int gaudi2_sw_init(struct hl_device *hdev)
rc = gaudi2_special_blocks_iterator_config(hdev);
if (rc)
- goto free_scratchpad_mem;
+ goto free_virt_msix_db_mem;
rc = gaudi2_test_queues_msgs_alloc(hdev);
if (rc)
@@ -3737,9 +3802,6 @@ static int gaudi2_sw_init(struct hl_device *hdev)
special_blocks_free:
gaudi2_special_blocks_iterator_free(hdev);
-free_scratchpad_mem:
- hl_asic_dma_free_coherent(hdev, PAGE_SIZE, gaudi2->scratchpad_kernel_address,
- gaudi2->scratchpad_bus_address);
free_virt_msix_db_mem:
hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr);
free_cpu_accessible_dma_pool:
@@ -3770,9 +3832,6 @@ static int gaudi2_sw_fini(struct hl_device *hdev)
hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
hdev->cpu_accessible_dma_address);
- hl_asic_dma_free_coherent(hdev, PAGE_SIZE, gaudi2->scratchpad_kernel_address,
- gaudi2->scratchpad_bus_address);
-
dma_pool_destroy(hdev->dma_pool);
kfree(gaudi2);
@@ -4254,6 +4313,8 @@ static int gaudi2_enable_msix(struct hl_device *hdev)
if (gaudi2->hw_cap_initialized & HW_CAP_MSIX)
return 0;
+ hl_init_cpu_for_irq(hdev);
+
rc = pci_alloc_irq_vectors(hdev->pdev, GAUDI2_MSIX_ENTRIES, GAUDI2_MSIX_ENTRIES,
PCI_IRQ_MSIX);
if (rc < 0) {
@@ -4307,6 +4368,7 @@ static int gaudi2_enable_msix(struct hl_device *hdev)
i++, j++, user_irq_init_cnt++) {
irq = pci_irq_vector(hdev->pdev, i);
+ hl_set_irq_affinity(hdev, irq);
rc = request_irq(irq, hl_irq_user_interrupt_handler, 0, gaudi2_irq_name(i),
&hdev->user_interrupt[j]);
if (rc) {
@@ -4333,6 +4395,7 @@ free_user_irq:
i < GAUDI2_IRQ_NUM_USER_FIRST + user_irq_init_cnt ; i++, j++) {
irq = pci_irq_vector(hdev->pdev, i);
+ irq_set_affinity_and_hint(irq, NULL);
free_irq(irq, &hdev->user_interrupt[j]);
}
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR);
@@ -4413,6 +4476,7 @@ static void gaudi2_disable_msix(struct hl_device *hdev)
k < hdev->asic_prop.user_interrupt_count ; i++, j++, k++) {
irq = pci_irq_vector(hdev->pdev, i);
+ irq_set_affinity_and_hint(irq, NULL);
free_irq(irq, &hdev->user_interrupt[j]);
}
@@ -4957,10 +5021,17 @@ static void gaudi2_init_qman_pq(struct hl_device *hdev, u32 reg_base,
q = &hdev->kernel_queues[queue_id_base + pq_id];
pq_offset = pq_id * 4;
- WREG32(reg_base + QM_PQ_BASE_LO_0_OFFSET + pq_offset,
- lower_32_bits(q->bus_address));
- WREG32(reg_base + QM_PQ_BASE_HI_0_OFFSET + pq_offset,
- upper_32_bits(q->bus_address));
+ if (q->dram_bd) {
+ WREG32(reg_base + QM_PQ_BASE_LO_0_OFFSET + pq_offset,
+ lower_32_bits(q->pq_dram_address));
+ WREG32(reg_base + QM_PQ_BASE_HI_0_OFFSET + pq_offset,
+ upper_32_bits(q->pq_dram_address));
+ } else {
+ WREG32(reg_base + QM_PQ_BASE_LO_0_OFFSET + pq_offset,
+ lower_32_bits(q->bus_address));
+ WREG32(reg_base + QM_PQ_BASE_HI_0_OFFSET + pq_offset,
+ upper_32_bits(q->bus_address));
+ }
WREG32(reg_base + QM_PQ_SIZE_0_OFFSET + pq_offset, ilog2(HL_QUEUE_LENGTH));
WREG32(reg_base + QM_PQ_PI_0_OFFSET + pq_offset, 0);
WREG32(reg_base + QM_PQ_CI_0_OFFSET + pq_offset, 0);
@@ -5847,7 +5918,8 @@ static int gaudi2_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_har
return rc;
}
-static int gaudi2_mmu_update_hop0_addr(struct hl_device *hdev, u32 stlb_base)
+static int gaudi2_mmu_update_hop0_addr(struct hl_device *hdev, u32 stlb_base,
+ bool host_resident_pgt)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 hop0_addr;
@@ -5859,7 +5931,11 @@ static int gaudi2_mmu_update_hop0_addr(struct hl_device *hdev, u32 stlb_base)
max_asid = min((u32) 8, max_asid);
for (asid = 0 ; asid < max_asid ; asid++) {
- hop0_addr = hdev->mmu_priv.hr.mmu_asid_hop0[asid].phys_addr;
+ if (host_resident_pgt)
+ hop0_addr = hdev->mmu_priv.hr.mmu_asid_hop0[asid].phys_addr;
+ else
+ hop0_addr = prop->mmu_pgt_addr + (asid * prop->dmmu.hop_table_size);
+
rc = gaudi2_mmu_update_asid_hop0_addr(hdev, stlb_base, asid, hop0_addr);
if (rc) {
dev_err(hdev->dev, "failed to set hop0 addr for asid %d\n", asid);
@@ -5870,7 +5946,8 @@ static int gaudi2_mmu_update_hop0_addr(struct hl_device *hdev, u32 stlb_base)
return 0;
}
-static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base, u32 stlb_base)
+static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base, u32 stlb_base,
+ bool host_resident_pgt)
{
u32 status, timeout_usec;
int rc;
@@ -5893,7 +5970,7 @@ static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base, u32 stlb
if (rc)
dev_notice_ratelimited(hdev->dev, "Timeout when waiting for MMU SRAM init\n");
- rc = gaudi2_mmu_update_hop0_addr(hdev, stlb_base);
+ rc = gaudi2_mmu_update_hop0_addr(hdev, stlb_base, host_resident_pgt);
if (rc)
return rc;
@@ -5917,6 +5994,7 @@ static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base, u32 stlb
static int gaudi2_pci_mmu_init(struct hl_device *hdev)
{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 mmu_base, stlb_base;
int rc;
@@ -5956,7 +6034,7 @@ static int gaudi2_pci_mmu_init(struct hl_device *hdev)
WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, GAUDI2_PMMU_SPI_SEI_ENABLE_MASK);
- rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base);
+ rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base, prop->pmmu.host_resident);
if (rc)
return rc;
@@ -6008,7 +6086,7 @@ static int gaudi2_dcore_hmmu_init(struct hl_device *hdev, int dcore_id,
WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, GAUDI2_HMMU_SPI_SEI_ENABLE_MASK);
- rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base);
+ rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base, prop->dmmu.host_resident);
if (rc)
return rc;
@@ -7046,7 +7124,7 @@ static int gaudi2_test_queues(struct hl_device *hdev)
/* send test message on all enabled Qs */
for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ; i++) {
- if (!gaudi2_is_queue_enabled(hdev, i))
+ if (!gaudi2_is_queue_enabled(hdev, i) || gaudi2_is_edma_queue_id(i))
continue;
msg_info = &gaudi2->queues_test_info[i - GAUDI2_QUEUE_ID_PDMA_0_0];
@@ -7063,7 +7141,7 @@ static int gaudi2_test_queues(struct hl_device *hdev)
/* verify that all messages were processed */
for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ; i++) {
- if (!gaudi2_is_queue_enabled(hdev, i))
+ if (!gaudi2_is_queue_enabled(hdev, i) || gaudi2_is_edma_queue_id(i))
continue;
rc = gaudi2_test_queue_wait_completion(hdev, i, sob_val);
@@ -8907,9 +8985,6 @@ static int gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u16 event_typ
u32 error_count = 0;
int i;
- gaudi2_print_event(hdev, event_type, true,
- "intr_cause_data: %#llx", intr_cause_data);
-
for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE ; i++) {
if (!(intr_cause_data & BIT_ULL(i)))
continue;
@@ -8918,15 +8993,16 @@ static int gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u16 event_typ
"err cause: %s", gaudi2_pcie_addr_dec_error_cause[i]);
error_count++;
- /*
- * Always check for LBW and HBW additional info as the indication itself is
- * sometimes missing
- */
+ switch (intr_cause_data & BIT_ULL(i)) {
+ case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK:
+ hl_check_for_glbl_errors(hdev);
+ break;
+ case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK:
+ gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask);
+ break;
+ }
}
- hl_check_for_glbl_errors(hdev);
- gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask);
-
return error_count;
}
@@ -8983,7 +9059,6 @@ static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool
if (is_pmmu) {
dev_err_ratelimited(hdev->dev, "PMMU page fault on va 0x%llx\n", addr);
} else {
-
addr = gaudi2_mmu_descramble_addr(hdev, addr);
addr &= HW_UNSCRAMBLED_BITS_MASK;
dev_err_ratelimited(hdev->dev, "HMMU page fault on va range 0x%llx - 0x%llx\n",
@@ -9514,25 +9589,17 @@ static int gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev, u16 event_type)
static int gaudi2_handle_pcie_drain(struct hl_device *hdev,
struct hl_eq_pcie_drain_ind_data *drain_data)
{
- u64 lbw_rd, lbw_wr, hbw_rd, hbw_wr, cause, error_count = 0;
+ u64 cause, error_count = 0;
cause = le64_to_cpu(drain_data->intr_cause.intr_cause_data);
- lbw_rd = le64_to_cpu(drain_data->drain_rd_addr_lbw);
- lbw_wr = le64_to_cpu(drain_data->drain_wr_addr_lbw);
- hbw_rd = le64_to_cpu(drain_data->drain_rd_addr_hbw);
- hbw_wr = le64_to_cpu(drain_data->drain_wr_addr_hbw);
if (cause & BIT_ULL(0)) {
- dev_err_ratelimited(hdev->dev,
- "PCIE AXI drain LBW completed, read_err %u, write_err %u\n",
- !!lbw_rd, !!lbw_wr);
+ dev_err_ratelimited(hdev->dev, "PCIE AXI drain LBW completed\n");
error_count++;
}
if (cause & BIT_ULL(1)) {
- dev_err_ratelimited(hdev->dev,
- "PCIE AXI drain HBW completed, raddr %#llx, waddr %#llx\n",
- hbw_rd, hbw_wr);
+ dev_err_ratelimited(hdev->dev, "PCIE AXI drain HBW completed\n");
error_count++;
}
@@ -10250,11 +10317,11 @@ reset_device:
}
static int gaudi2_memset_memory_chunk_using_edma_qm(struct hl_device *hdev,
- struct packet_lin_dma *lin_dma_pkt, dma_addr_t pkt_dma_addr,
- u32 hw_queue_id, u32 size, u64 addr, u32 val)
+ struct packet_lin_dma *lin_dma_pkt,
+ u64 phys_addr, u32 hw_queue_id, u32 size, u64 addr, u32 val)
{
u32 ctl, pkt_size;
- int rc = 0;
+ int rc = 0, i;
ctl = FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA);
ctl |= FIELD_PREP(GAUDI2_PKT_LIN_DMA_CTL_MEMSET_MASK, 1);
@@ -10268,9 +10335,20 @@ static int gaudi2_memset_memory_chunk_using_edma_qm(struct hl_device *hdev,
pkt_size = sizeof(struct packet_lin_dma);
- rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, pkt_dma_addr);
+ for (i = 0; i < 3; i++) {
+ rc = hdev->asic_funcs->access_dev_mem(hdev, PCI_REGION_DRAM,
+ phys_addr + (i * sizeof(u64)),
+ ((u64 *)(lin_dma_pkt)) + i, DEBUGFS_WRITE64);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to copy lin_dma packet to HBM (%#llx)\n",
+ phys_addr);
+ return rc;
+ }
+ }
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, phys_addr);
if (rc)
- dev_err(hdev->dev, "Failed to send lin dma packet to H/W queue %d\n",
+ dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %d\n",
hw_queue_id);
return rc;
@@ -10283,12 +10361,11 @@ static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 siz
GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0,
GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0};
u32 chunk_size, dcore, edma_idx, sob_offset, sob_addr, comp_val,
- old_mmubp, mmubp, num_of_pkts, busy, pkt_size;
+ old_mmubp, mmubp, num_of_pkts, busy, pkt_size, cb_len;
u64 comp_addr, cur_addr = addr, end_addr = addr + size;
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc = 0, dma_num = 0, i;
void *lin_dma_pkts_arr;
- dma_addr_t pkt_dma_addr;
- int rc = 0, dma_num = 0;
if (prop->edma_enabled_mask == 0) {
dev_info(hdev->dev, "non of the EDMA engines is enabled - skip dram scrubbing\n");
@@ -10306,9 +10383,19 @@ static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 siz
/* Calculate how many lin dma pkts we'll need */
num_of_pkts = div64_u64(round_up(size, SZ_2G), SZ_2G);
pkt_size = sizeof(struct packet_lin_dma);
+ cb_len = pkt_size * num_of_pkts;
+
+ /*
+ * if we're not scrubing HMMU or NIC reserved sections in hbm,
+ * then it the scrubing of the user section, as we use the start of the user section
+ * to store the CB of the EDMA QM, so shift the start address of the scrubbing accordingly
+ * and scrub the CB section before leaving this function.
+ */
+ if ((addr >= prop->dram_user_base_address) &&
+ (addr < prop->dram_user_base_address + cb_len))
+ cur_addr += (prop->dram_user_base_address + cb_len) - addr;
- lin_dma_pkts_arr = hl_asic_dma_alloc_coherent(hdev, pkt_size * num_of_pkts,
- &pkt_dma_addr, GFP_KERNEL);
+ lin_dma_pkts_arr = kvcalloc(num_of_pkts, pkt_size, GFP_KERNEL);
if (!lin_dma_pkts_arr)
return -ENOMEM;
@@ -10354,7 +10441,7 @@ static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 siz
rc = gaudi2_memset_memory_chunk_using_edma_qm(hdev,
(struct packet_lin_dma *)lin_dma_pkts_arr + dma_num,
- pkt_dma_addr + dma_num * pkt_size,
+ prop->dram_user_base_address + (dma_num * pkt_size),
edma_queues_id[dcore] + edma_idx * 4,
chunk_size, cur_addr, val);
if (rc)
@@ -10363,14 +10450,16 @@ static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 siz
dma_num++;
cur_addr += chunk_size;
if (cur_addr == end_addr)
- break;
+ goto edma_wait;
}
}
}
+edma_wait:
rc = hl_poll_timeout(hdev, sob_addr, busy, (busy == dma_num), 1000, 1000000);
if (rc) {
- dev_err(hdev->dev, "DMA Timeout during HBM scrubbing\n");
+ dev_err(hdev->dev, "DMA Timeout during HBM scrubbing(sob: 0x%x, dma_num: 0x%x)\n",
+ busy, dma_num);
goto end;
}
end:
@@ -10391,8 +10480,16 @@ end:
}
}
+ memset(lin_dma_pkts_arr, 0, sizeof(u64));
+
+ /* Zero the HBM area where we copied the CB */
+ for (i = 0; i < cb_len / sizeof(u64); i += sizeof(u64))
+ rc = hdev->asic_funcs->access_dev_mem(hdev, PCI_REGION_DRAM,
+ prop->dram_user_base_address + i,
+ (u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64);
WREG32(sob_addr, 0);
- hl_asic_dma_free_coherent(hdev, pkt_size * num_of_pkts, lin_dma_pkts_arr, pkt_dma_addr);
+
+ kfree(lin_dma_pkts_arr);
return rc;
}
@@ -11450,7 +11547,7 @@ static int gaudi2_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_p
return 0;
page_size_err:
- dev_err(hdev->dev, "page size of %u is not %uKB aligned, can't map\n",
+ dev_err(hdev->dev, "page size of 0x%X is not 0x%X aligned, can't map\n",
page_size, mmu_prop->page_size >> 10);
return -EFAULT;
}
@@ -11470,6 +11567,29 @@ int gaudi2_send_device_activity(struct hl_device *hdev, bool open)
return hl_fw_send_device_activity(hdev, open);
}
+static u64 gaudi2_read_pte(struct hl_device *hdev, u64 addr)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u64 val;
+
+ if (hdev->reset_info.hard_reset_pending)
+ return U64_MAX;
+
+ val = readq(hdev->pcie_bar[DRAM_BAR_ID] + (addr - gaudi2->dram_bar_cur_addr));
+
+ return val;
+}
+
+static void gaudi2_write_pte(struct hl_device *hdev, u64 addr, u64 val)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ if (hdev->reset_info.hard_reset_pending)
+ return;
+
+ writeq(val, hdev->pcie_bar[DRAM_BAR_ID] + (addr - gaudi2->dram_bar_cur_addr));
+}
+
static const struct hl_asic_funcs gaudi2_funcs = {
.early_init = gaudi2_early_init,
.early_fini = gaudi2_early_fini,
@@ -11506,8 +11626,8 @@ static const struct hl_asic_funcs gaudi2_funcs = {
.add_device_attr = gaudi2_add_device_attr,
.handle_eqe = gaudi2_handle_eqe,
.get_events_stat = gaudi2_get_events_stat,
- .read_pte = NULL,
- .write_pte = NULL,
+ .read_pte = gaudi2_read_pte,
+ .write_pte = gaudi2_write_pte,
.mmu_invalidate_cache = gaudi2_mmu_invalidate_cache,
.mmu_invalidate_cache_range = gaudi2_mmu_invalidate_cache_range,
.mmu_prefetch_cache_range = NULL,
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2P.h b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
index 9b9eef0d97d6..eee41387b269 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2P.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
@@ -19,8 +19,6 @@
#define GAUDI2_LINUX_FW_FILE "habanalabs/gaudi2/gaudi2-fit.itb"
#define GAUDI2_BOOT_FIT_FILE "habanalabs/gaudi2/gaudi2-boot-fit.itb"
-#define MMU_PAGE_TABLES_INITIAL_SIZE 0x10000000 /* 256MB */
-
#define GAUDI2_CPU_TIMEOUT_USEC 30000000 /* 30s */
#define NUMBER_OF_PDMA_QUEUES 2
@@ -109,13 +107,11 @@
/* DRAM Memory Map */
#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
-
-/* This define should be used only when working in a debug mode without dram.
- * When working with dram, the driver size will be calculated dynamically.
- */
-#define NIC_DEFAULT_DRV_SIZE 0x20000000 /* 512MB */
-
#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE
+#define PMMU_PAGE_TABLES_SIZE 0x10000000 /* 256MB */
+#define EDMA_PQS_SIZE SZ_2M
+#define EDMA_SCRATCHPAD_SIZE SZ_1M
+#define HMMU_PAGE_TABLES_SIZE SZ_1M
#define NIC_NUMBER_OF_PORTS NIC_NUMBER_OF_ENGINES
@@ -241,9 +237,8 @@
#define GAUDI2_SOB_INCREMENT_BY_ONE (FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1) | \
FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1))
-#define GAUDI2_NUM_TESTED_QS (GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
+#define GAUDI2_NUM_TESTED_QS (GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
-#define GAUDI2_NUM_OF_GLBL_ERR_CAUSE 8
enum gaudi2_reserved_sob_id {
GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
diff --git a/drivers/accel/habanalabs/goya/goya.c b/drivers/accel/habanalabs/goya/goya.c
index 1322cb330c57..5a359c3bdc78 100644
--- a/drivers/accel/habanalabs/goya/goya.c
+++ b/drivers/accel/habanalabs/goya/goya.c
@@ -413,8 +413,6 @@ int goya_set_fixed_properties(struct hl_device *hdev)
else
prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
prop->mmu_pte_size = HL_PTE_SIZE;
- prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
- prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
prop->device_mem_alloc_default_page_size = prop->dram_page_size;
prop->dram_supports_virtual_memory = true;
@@ -435,8 +433,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->dmmu.num_hops = MMU_ARCH_5_HOPS;
prop->dmmu.last_mask = LAST_MASK;
/* TODO: will be duplicated until implementing per-MMU props */
- prop->dmmu.hop_table_size = prop->mmu_hop_table_size;
- prop->dmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
+ prop->dmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->dmmu.hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
/* shifts and masks are the same in PMMU and DMMU */
memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
@@ -446,8 +444,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
prop->pmmu.last_mask = LAST_MASK;
/* TODO: will be duplicated until implementing per-MMU props */
- prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
- prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
+ prop->pmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->pmmu.hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
/* PMMU and HPMMU are the same except of page size */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
@@ -2678,7 +2676,7 @@ int goya_mmu_init(struct hl_device *hdev)
for (i = 0 ; i < prop->max_asid ; i++) {
hop0_addr = prop->mmu_pgt_addr +
- (i * prop->mmu_hop_table_size);
+ (i * prop->dmmu.hop_table_size);
rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
if (rc) {
diff --git a/drivers/accel/habanalabs/goya/goya_coresight.c b/drivers/accel/habanalabs/goya/goya_coresight.c
index 41cae5fd843b..3827ea4c02f7 100644
--- a/drivers/accel/habanalabs/goya/goya_coresight.c
+++ b/drivers/accel/habanalabs/goya/goya_coresight.c
@@ -576,7 +576,6 @@ static int goya_config_spmu(struct hl_device *hdev,
struct hl_debug_params *params)
{
u64 base_reg;
- struct hl_debug_params_spmu *input = params->input;
u64 *output;
u32 output_arr_len;
u32 events_num;
@@ -592,7 +591,7 @@ static int goya_config_spmu(struct hl_device *hdev,
base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
if (params->enable) {
- input = params->input;
+ struct hl_debug_params_spmu *input = params->input;
if (!input)
return -EINVAL;
diff --git a/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h
index d408feecd483..b4a5e95be354 100644
--- a/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -26,6 +26,8 @@
#define LAST_MASK 0x0000000000800ull
#define FLAGS_MASK 0x0000000000FFFull
+#define MMU_ARCH_3_HOPS 3
+#define MMU_ARCH_4_HOPS 4
#define MMU_ARCH_5_HOPS 5
#define MMU_ARCH_6_HOPS 6
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index 7cb962e21453..d09d29775b3f 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -287,22 +287,6 @@ static const struct file_operations fw_trace_level_fops = {
};
static ssize_t
-ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
-{
- struct ivpu_device *vdev = file->private_data;
-
- if (!size)
- return -EINVAL;
-
- if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
- return -ENODEV;
- if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY))
- return -ENODEV;
-
- return size;
-}
-
-static ssize_t
ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
@@ -327,6 +311,22 @@ static const struct file_operations ivpu_force_recovery_fops = {
.write = ivpu_force_recovery_fn,
};
+static ssize_t
+ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+ struct ivpu_device *vdev = file->private_data;
+
+ if (!size)
+ return -EINVAL;
+
+ if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
+ return -ENODEV;
+ if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY))
+ return -ENODEV;
+
+ return size;
+}
+
static const struct file_operations ivpu_reset_engine_fops = {
.owner = THIS_MODULE,
.open = simple_open,
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 4b0640226986..39f6d1b98fd6 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -45,11 +45,11 @@ MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
u8 ivpu_pll_min_ratio;
module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
-MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set VPU frequency");
+MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency");
u8 ivpu_pll_max_ratio = U8_MAX;
module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
-MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency");
+MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
bool ivpu_disable_mmu_cont_pages;
module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
@@ -328,13 +328,13 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
ivpu_ipc_consumer_del(vdev, &cons);
if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
- ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n",
+ ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
ipc_hdr.data_addr);
return -EIO;
}
if (!ret)
- ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
+ ivpu_dbg(vdev, PM, "NPU ready message received successfully\n");
return ret;
}
@@ -532,6 +532,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
atomic64_set(&vdev->unique_id_counter, 0);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
+ xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);
@@ -605,6 +606,7 @@ err_power_down:
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
err_xa_destroy:
+ xa_destroy(&vdev->db_xa);
xa_destroy(&vdev->submitted_jobs_xa);
xa_destroy(&vdev->context_xa);
return ret;
@@ -640,6 +642,8 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
ivpu_mmu_reserved_context_fini(vdev);
ivpu_mmu_global_context_fini(vdev);
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->db_xa));
+ xa_destroy(&vdev->db_xa);
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
xa_destroy(&vdev->submitted_jobs_xa);
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 069ace4adb2d..7be0500d9bb8 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -36,6 +36,9 @@
#define IVPU_USER_CONTEXT_MIN_SSID 2
#define IVPU_USER_CONTEXT_MAX_SSID (IVPU_USER_CONTEXT_MIN_SSID + 63)
+#define IVPU_MIN_DB 1
+#define IVPU_MAX_DB 255
+
#define IVPU_NUM_ENGINES 2
#define IVPU_PLATFORM_SILICON 0
@@ -119,6 +122,8 @@ struct ivpu_device {
struct xarray context_xa;
struct xa_limit context_xa_limit;
+ struct xarray db_xa;
+
struct mutex bo_list_lock; /* Protects bo_list */
struct list_head bo_list;
@@ -189,7 +194,7 @@ static inline int ivpu_hw_gen(struct ivpu_device *vdev)
case PCI_DEVICE_ID_LNL:
return IVPU_HW_40XX;
default:
- ivpu_err(vdev, "Unknown VPU device\n");
+ ivpu_err(vdev, "Unknown NPU device\n");
return 0;
}
}
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 5fa8bd4603d5..1457300828bf 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -46,15 +46,13 @@
static char *ivpu_firmware;
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
-MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
+MODULE_PARM_DESC(firmware, "NPU firmware binary in /lib/firmware/..");
-/* TODO: Remove mtl_vpu.bin from names after transition to generation based FW names */
static struct {
int gen;
const char *name;
} fw_names[] = {
{ IVPU_HW_37XX, "vpu_37xx.bin" },
- { IVPU_HW_37XX, "mtl_vpu.bin" },
{ IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
{ IVPU_HW_40XX, "vpu_40xx.bin" },
{ IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
@@ -250,6 +248,7 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
static int ivpu_fw_mem_init(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
+ struct ivpu_addr_range fw_range;
int log_verb_size;
int ret;
@@ -257,16 +256,19 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
if (ret)
return ret;
- fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
+ fw_range.start = fw->runtime_addr;
+ fw_range.end = fw->runtime_addr + fw->runtime_size;
+ fw->mem = ivpu_bo_create(vdev, &vdev->gctx, &fw_range, fw->runtime_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!fw->mem) {
- ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
+ ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n");
return -ENOMEM;
}
- fw->mem_log_crit = ivpu_bo_alloc_internal(vdev, 0, IVPU_FW_CRITICAL_BUFFER_SIZE,
- DRM_IVPU_BO_CACHED);
+ fw->mem_log_crit = ivpu_bo_create_global(vdev, IVPU_FW_CRITICAL_BUFFER_SIZE,
+ DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
if (!fw->mem_log_crit) {
- ivpu_err(vdev, "Failed to allocate critical log buffer\n");
+ ivpu_err(vdev, "Failed to create critical log buffer\n");
ret = -ENOMEM;
goto err_free_fw_mem;
}
@@ -276,18 +278,19 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
else
log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
- fw->mem_log_verb = ivpu_bo_alloc_internal(vdev, 0, log_verb_size, DRM_IVPU_BO_CACHED);
+ fw->mem_log_verb = ivpu_bo_create_global(vdev, log_verb_size,
+ DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
if (!fw->mem_log_verb) {
- ivpu_err(vdev, "Failed to allocate verbose log buffer\n");
+ ivpu_err(vdev, "Failed to create verbose log buffer\n");
ret = -ENOMEM;
goto err_free_log_crit;
}
if (fw->shave_nn_size) {
- fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
- fw->shave_nn_size, DRM_IVPU_BO_WC);
+ fw->mem_shave_nn = ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.shave,
+ fw->shave_nn_size, DRM_IVPU_BO_WC);
if (!fw->mem_shave_nn) {
- ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
+ ivpu_err(vdev, "Failed to create shavenn buffer\n");
ret = -ENOMEM;
goto err_free_log_verb;
}
@@ -296,11 +299,11 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
return 0;
err_free_log_verb:
- ivpu_bo_free_internal(fw->mem_log_verb);
+ ivpu_bo_free(fw->mem_log_verb);
err_free_log_crit:
- ivpu_bo_free_internal(fw->mem_log_crit);
+ ivpu_bo_free(fw->mem_log_crit);
err_free_fw_mem:
- ivpu_bo_free_internal(fw->mem);
+ ivpu_bo_free(fw->mem);
return ret;
}
@@ -309,13 +312,13 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
struct ivpu_fw_info *fw = vdev->fw;
if (fw->mem_shave_nn) {
- ivpu_bo_free_internal(fw->mem_shave_nn);
+ ivpu_bo_free(fw->mem_shave_nn);
fw->mem_shave_nn = NULL;
}
- ivpu_bo_free_internal(fw->mem_log_verb);
- ivpu_bo_free_internal(fw->mem_log_crit);
- ivpu_bo_free_internal(fw->mem);
+ ivpu_bo_free(fw->mem_log_verb);
+ ivpu_bo_free(fw->mem_log_crit);
+ ivpu_bo_free(fw->mem);
fw->mem_log_verb = NULL;
fw->mem_log_crit = NULL;
@@ -469,6 +472,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->d0i3_residency_time_us);
ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n",
boot_params->d0i3_entry_vpu_ts);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
+ boot_params->system_time_us);
}
void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
@@ -480,11 +485,14 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->d0i3_residency_time_us =
ktime_us_delta(ktime_get_boottime(), vdev->hw->d0i3_entry_host_ts);
boot_params->d0i3_entry_vpu_ts = vdev->hw->d0i3_entry_vpu_ts;
+ boot_params->system_time_us = ktime_to_us(ktime_get_real());
ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_residency_time_us = %lld\n",
boot_params->d0i3_residency_time_us);
ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n",
boot_params->d0i3_entry_vpu_ts);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
+ boot_params->system_time_us);
boot_params->save_restore_ret_address = 0;
vdev->pm->is_warmboot = true;
@@ -562,6 +570,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->d0i3_residency_time_us = 0;
boot_params->d0i3_entry_vpu_ts = 0;
+ boot_params->system_time_us = ktime_to_us(ktime_get_real());
wmb(); /* Flush WC buffers after writing bootparams */
ivpu_fw_boot_params_print(vdev, boot_params);
diff --git a/drivers/accel/ivpu/ivpu_fw_log.c b/drivers/accel/ivpu/ivpu_fw_log.c
index f6770f5e82a2..ef0adb5e0fbe 100644
--- a/drivers/accel/ivpu/ivpu_fw_log.c
+++ b/drivers/accel/ivpu/ivpu_fw_log.c
@@ -20,7 +20,7 @@
unsigned int ivpu_log_level = IVPU_FW_LOG_ERROR;
module_param(ivpu_log_level, uint, 0444);
MODULE_PARM_DESC(ivpu_log_level,
- "VPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
+ "NPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
" info=" __stringify(IVPU_FW_LOG_INFO)
" warn=" __stringify(IVPU_FW_LOG_WARN)
" error=" __stringify(IVPU_FW_LOG_ERROR)
@@ -121,11 +121,11 @@ void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_
u32 next = 0;
while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
- fw_log_print_buffer(vdev, log_header, "VPU critical", only_new_msgs, p);
+ fw_log_print_buffer(vdev, log_header, "NPU critical", only_new_msgs, p);
next = 0;
while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
- fw_log_print_buffer(vdev, log_header, "VPU verbose", only_new_msgs, p);
+ fw_log_print_buffer(vdev, log_header, "NPU verbose", only_new_msgs, p);
}
void ivpu_fw_log_clear(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index e9ddbe9f50eb..1b409dbd332d 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -172,8 +172,7 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
return &bo->base.base;
}
-static struct ivpu_bo *
-ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags)
+static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
{
struct drm_gem_shmem_object *shmem;
struct ivpu_bo *bo;
@@ -201,7 +200,7 @@ ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags)
return bo;
}
-static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
+static int ivpu_gem_bo_open(struct drm_gem_object *obj, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
@@ -224,7 +223,7 @@ static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
return ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, range);
}
-static void ivpu_bo_free(struct drm_gem_object *obj)
+static void ivpu_gem_bo_free(struct drm_gem_object *obj)
{
struct ivpu_device *vdev = to_ivpu_device(obj->dev);
struct ivpu_bo *bo = to_ivpu_bo(obj);
@@ -245,8 +244,8 @@ static void ivpu_bo_free(struct drm_gem_object *obj)
}
static const struct drm_gem_object_funcs ivpu_gem_funcs = {
- .free = ivpu_bo_free,
- .open = ivpu_bo_open,
+ .free = ivpu_gem_bo_free,
+ .open = ivpu_gem_bo_open,
.print_info = drm_gem_shmem_object_print_info,
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
@@ -272,9 +271,9 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (size == 0)
return -EINVAL;
- bo = ivpu_bo_create(vdev, size, args->flags);
+ bo = ivpu_bo_alloc(vdev, size, args->flags);
if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)",
+ ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
bo, file_priv->ctx.id, args->size, args->flags);
return PTR_ERR(bo);
}
@@ -289,33 +288,28 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
}
struct ivpu_bo *
-ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags)
+ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ struct ivpu_addr_range *range, u64 size, u32 flags)
{
- const struct ivpu_addr_range *range;
- struct ivpu_addr_range fixed_range;
struct iosys_map map;
struct ivpu_bo *bo;
int ret;
- drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr));
- drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
+ if (drm_WARN_ON(&vdev->drm, !range))
+ return NULL;
- if (vpu_addr) {
- fixed_range.start = vpu_addr;
- fixed_range.end = vpu_addr + size;
- range = &fixed_range;
- } else {
- range = &vdev->hw->ranges.global;
- }
+ drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->start));
+ drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
+ drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
- bo = ivpu_bo_create(vdev, size, flags);
+ bo = ivpu_bo_alloc(vdev, size, flags);
if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
- bo, vpu_addr, size, flags);
+ ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
+ bo, range->start, size, flags);
return NULL;
}
- ret = ivpu_bo_alloc_vpu_addr(bo, &vdev->gctx, range);
+ ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range);
if (ret)
goto err_put;
@@ -323,11 +317,14 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
if (ret)
goto err_put;
- dma_resv_lock(bo->base.base.resv, NULL);
- ret = drm_gem_shmem_vmap(&bo->base, &map);
- dma_resv_unlock(bo->base.base.resv);
- if (ret)
- goto err_put;
+ if (flags & DRM_IVPU_BO_MAPPABLE) {
+ dma_resv_lock(bo->base.base.resv, NULL);
+ ret = drm_gem_shmem_vmap(&bo->base, &map);
+ dma_resv_unlock(bo->base.base.resv);
+
+ if (ret)
+ goto err_put;
+ }
return bo;
@@ -336,13 +333,20 @@ err_put:
return NULL;
}
-void ivpu_bo_free_internal(struct ivpu_bo *bo)
+struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags)
+{
+ return ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.global, size, flags);
+}
+
+void ivpu_bo_free(struct ivpu_bo *bo)
{
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
- dma_resv_lock(bo->base.base.resv, NULL);
- drm_gem_shmem_vunmap(&bo->base, &map);
- dma_resv_unlock(bo->base.base.resv);
+ if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
+ dma_resv_lock(bo->base.base.resv, NULL);
+ drm_gem_shmem_vunmap(&bo->base, &map);
+ dma_resv_unlock(bo->base.base.resv);
+ }
drm_gem_object_put(&bo->base.base);
}
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index a8559211c70d..fb7117c13eec 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -28,8 +28,10 @@ int ivpu_bo_pin(struct ivpu_bo *bo);
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
-struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
-void ivpu_bo_free_internal(struct ivpu_bo *bo);
+struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ struct ivpu_addr_range *range, u64 size, u32 flags);
+struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags);
+void ivpu_bo_free(struct ivpu_bo *bo);
int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 89af1006df55..9a0c9498baba 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -13,7 +13,7 @@
#include "ivpu_pm.h"
#define TILE_FUSE_ENABLE_BOTH 0x0
-#define TILE_SKU_BOTH_MTL 0x3630
+#define TILE_SKU_BOTH 0x3630
/* Work point configuration values */
#define CONFIG_1_TILE 0x01
@@ -228,7 +228,7 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev);
if (ret) {
- ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
+ ivpu_err(vdev, "Timed out waiting for NPU IP bar\n");
return ret;
}
}
@@ -589,7 +589,7 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
struct ivpu_hw_info *hw = vdev->hw;
hw->tile_fuse = TILE_FUSE_ENABLE_BOTH;
- hw->sku = TILE_SKU_BOTH_MTL;
+ hw->sku = TILE_SKU_BOTH;
hw->config = WP_CONFIG_2_TILE_4_3_RATIO;
ivpu_pll_init_frequency_ratios(vdev);
@@ -762,10 +762,10 @@ static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
ivpu_hw_37xx_save_d0i3_entry_timestamp(vdev);
if (!ivpu_hw_37xx_is_idle(vdev))
- ivpu_warn(vdev, "VPU not idle during power down\n");
+ ivpu_warn(vdev, "NPU not idle during power down\n");
if (ivpu_hw_37xx_reset(vdev)) {
- ivpu_err(vdev, "Failed to reset VPU\n");
+ ivpu_err(vdev, "Failed to reset NPU\n");
ret = -EIO;
}
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index 1c995307c113..e4eddbf5d11c 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -24,7 +24,7 @@
#define SKU_HW_ID_SHIFT 16u
#define SKU_HW_ID_MASK 0xffff0000u
-#define PLL_CONFIG_DEFAULT 0x1
+#define PLL_CONFIG_DEFAULT 0x0
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
#define PLL_REF_CLK_FREQ (50 * 1000000)
@@ -80,11 +80,11 @@ static char *ivpu_platform_to_str(u32 platform)
{
switch (platform) {
case IVPU_PLATFORM_SILICON:
- return "IVPU_PLATFORM_SILICON";
+ return "SILICON";
case IVPU_PLATFORM_SIMICS:
- return "IVPU_PLATFORM_SIMICS";
+ return "SIMICS";
case IVPU_PLATFORM_FPGA:
- return "IVPU_PLATFORM_FPGA";
+ return "FPGA";
default:
return "Invalid platform";
}
@@ -768,7 +768,7 @@ static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
int ret = 0;
if (ivpu_hw_40xx_ip_reset(vdev)) {
- ivpu_err(vdev, "Failed to reset VPU IP\n");
+ ivpu_err(vdev, "Failed to reset NPU IP\n");
ret = -EIO;
}
@@ -926,7 +926,7 @@ static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev);
if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_ip_reset(vdev))
- ivpu_warn(vdev, "Failed to reset the VPU\n");
+ ivpu_warn(vdev, "Failed to reset the NPU\n");
if (ivpu_pll_disable(vdev)) {
ivpu_err(vdev, "Failed to disable PLL\n");
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index fa66c39b57ec..04ac4b9840fb 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -58,8 +58,8 @@ static void ivpu_ipc_mem_fini(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
- ivpu_bo_free_internal(ipc->mem_rx);
- ivpu_bo_free_internal(ipc->mem_tx);
+ ivpu_bo_free(ipc->mem_rx);
+ ivpu_bo_free(ipc->mem_tx);
}
static int
@@ -471,13 +471,13 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
struct ivpu_ipc_info *ipc = vdev->ipc;
int ret;
- ipc->mem_tx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
+ ipc->mem_tx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!ipc->mem_tx) {
ivpu_err(vdev, "Failed to allocate mem_tx\n");
return -ENOMEM;
}
- ipc->mem_rx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
+ ipc->mem_rx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!ipc->mem_rx) {
ivpu_err(vdev, "Failed to allocate mem_rx\n");
ret = -ENOMEM;
@@ -506,9 +506,9 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
return 0;
err_free_rx:
- ivpu_bo_free_internal(ipc->mem_rx);
+ ivpu_bo_free(ipc->mem_rx);
err_free_tx:
- ivpu_bo_free_internal(ipc->mem_tx);
+ ivpu_bo_free(ipc->mem_tx);
return ret;
}
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index e70cfb859339..a49bc9105ed0 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -30,19 +30,26 @@ static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine)
{
+ struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB};
struct ivpu_device *vdev = file_priv->vdev;
struct vpu_job_queue_header *jobq_header;
struct ivpu_cmdq *cmdq;
+ int ret;
cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
return NULL;
- cmdq->mem = ivpu_bo_alloc_internal(vdev, 0, SZ_4K, DRM_IVPU_BO_WC);
+ ret = xa_alloc(&vdev->db_xa, &cmdq->db_id, NULL, db_xa_limit, GFP_KERNEL);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
+ goto err_free_cmdq;
+ }
+
+ cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!cmdq->mem)
- goto cmdq_free;
+ goto err_erase_xa;
- cmdq->db_id = file_priv->ctx.id + engine * ivpu_get_context_count(vdev);
cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
sizeof(struct vpu_job_queue_entry));
@@ -55,7 +62,9 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 e
return cmdq;
-cmdq_free:
+err_erase_xa:
+ xa_erase(&vdev->db_xa, cmdq->db_id);
+err_free_cmdq:
kfree(cmdq);
return NULL;
}
@@ -65,7 +74,8 @@ static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *c
if (!cmdq)
return;
- ivpu_bo_free_internal(cmdq->mem);
+ ivpu_bo_free(cmdq->mem);
+ xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
kfree(cmdq);
}
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 5f73854234ba..7cce1c928a7f 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -22,7 +22,7 @@
static bool ivpu_disable_recovery;
module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
-MODULE_PARM_DESC(disable_recovery, "Disables recovery when VPU hang is detected");
+MODULE_PARM_DESC(disable_recovery, "Disables recovery when NPU hang is detected");
static unsigned long ivpu_tdr_timeout_ms;
module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
@@ -118,11 +118,11 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
int ret;
- ivpu_err(vdev, "Recovering the VPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+ ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
ret = pm_runtime_resume_and_get(vdev->drm.dev);
if (ret)
- ivpu_err(vdev, "Failed to resume VPU: %d\n", ret);
+ ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
ivpu_fw_log_dump(vdev);
@@ -260,10 +260,10 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
ret = ivpu_suspend(vdev);
if (ret)
- ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret);
+ ivpu_err(vdev, "Failed to suspend NPU: %d\n", ret);
if (!hw_is_idle) {
- ivpu_err(vdev, "VPU failed to enter idle, force suspended.\n");
+ ivpu_err(vdev, "NPU failed to enter idle, force suspended.\n");
ivpu_fw_log_dump(vdev);
ivpu_pm_prepare_cold_boot(vdev);
} else {
@@ -309,7 +309,7 @@ int ivpu_rpm_get_if_active(struct ivpu_device *vdev)
{
int ret;
- ret = pm_runtime_get_if_active(vdev->drm.dev, false);
+ ret = pm_runtime_get_if_in_use(vdev->drm.dev);
drm_WARN_ON(&vdev->drm, ret < 0);
return ret;
diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h
index 04c954258563..87cac7bc730a 100644
--- a/drivers/accel/ivpu/vpu_boot_api.h
+++ b/drivers/accel/ivpu/vpu_boot_api.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (c) 2020-2023, Intel Corporation.
*/
#ifndef VPU_BOOT_API_H
@@ -27,12 +27,12 @@
* Minor version changes when API backward compatibility is preserved.
* Resets to 0 if Major version is incremented.
*/
-#define VPU_BOOT_API_VER_MINOR 20
+#define VPU_BOOT_API_VER_MINOR 22
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_BOOT_API_VER_PATCH 4
+#define VPU_BOOT_API_VER_PATCH 0
/*
* Index in the API version table
@@ -41,7 +41,7 @@
#define VPU_BOOT_API_VER_INDEX 0
/* ------------ FW API version information end ---------------------*/
-#pragma pack(push, 1)
+#pragma pack(push, 4)
/*
* Firmware image header format
@@ -66,9 +66,17 @@ struct vpu_firmware_header {
/* Size of memory require for firmware execution */
u32 runtime_size;
u32 shave_nn_fw_size;
- /* Size of primary preemption buffer. */
+ /*
+ * Size of primary preemption buffer, assuming a 2-job submission queue.
+ * NOTE: host driver is expected to adapt size accordingly to actual
+ * submission queue size and device capabilities.
+ */
u32 preemption_buffer_1_size;
- /* Size of secondary preemption buffer. */
+ /*
+ * Size of secondary preemption buffer, assuming a 2-job submission queue.
+ * NOTE: host driver is expected to adapt size accordingly to actual
+ * submission queue size and device capabilities.
+ */
u32 preemption_buffer_2_size;
/* Space reserved for future preemption-related fields. */
u32 preemption_reserved[6];
@@ -181,10 +189,10 @@ struct vpu_warm_boot_section {
#define VPU_PRESENT_CALL_PERIOD_MS_MAX 10000
/**
- * Macros to enable various operation modes within the VPU.
+ * Macros to enable various power profiles within the NPU.
* To be defined as part of 32 bit mask.
*/
-#define VPU_OP_MODE_SURVIVABILITY 0x1
+#define POWER_PROFILE_SURVIVABILITY 0x1
struct vpu_boot_params {
u32 magic;
@@ -317,7 +325,15 @@ struct vpu_boot_params {
u64 d0i3_residency_time_us;
/* Value of VPU perf counter at the time of entering D0i3 state . */
u64 d0i3_entry_vpu_ts;
- u32 pad4[20];
+ /*
+ * The system time of the host operating system in microseconds.
+ * E.g the number of microseconds since 1st of January 1970, or whatever date the
+ * host operating system uses to maintain system time.
+ * This value will be used to track system time on the VPU.
+ * The KMD is required to update this value on every VPU reset.
+ */
+ u64 system_time_us;
+ u32 pad4[18];
/* Warm boot information: 0x400 - 0x43F */
u32 warm_boot_sections_count;
u32 warm_boot_start_address_reference;
@@ -344,10 +360,14 @@ struct vpu_boot_params {
u32 vpu_focus_present_timer_ms;
/* VPU ECC Signaling */
u32 vpu_uses_ecc_mca_signal;
- /* Values defined by VPU_OP_MODE* macros */
- u32 vpu_operation_mode;
- /* Unused/reserved: 0x480 - 0xFFF */
- u32 pad6[736];
+ /* Values defined by POWER_PROFILE* macros */
+ u32 power_profile;
+ /* Microsecond value for DCT active cycle */
+ u32 dct_active_us;
+ /* Microsecond value for DCT inactive cycle */
+ u32 dct_inactive_us;
+ /* Unused/reserved: 0x488 - 0xFFF */
+ u32 pad6[734];
};
/*
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
index 7da7622742be..e46f3531211a 100644
--- a/drivers/accel/ivpu/vpu_jsm_api.h
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (c) 2020-2023, Intel Corporation.
*/
/**
@@ -27,7 +27,7 @@
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_JSM_API_VER_PATCH 0
+#define VPU_JSM_API_VER_PATCH 6
/*
* Index in the API version table
@@ -43,8 +43,11 @@
/* Max number of impacted contexts that can be dealt with the engine reset command */
#define VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS 3
-/** Pack the API structures for now, once alignment issues are fixed this can be removed */
-#pragma pack(push, 1)
+/*
+ * Pack the API structures to enforce binary compatibility
+ * Align to 8 bytes for optimal performance
+ */
+#pragma pack(push, 8)
/*
* Engine indexes.
@@ -125,6 +128,19 @@
#define VPU_HWS_MAX_REALTIME_PRIORITY_LEVEL 31U
/*
+ * vpu_jsm_engine_reset_context flag definitions
+ */
+#define VPU_ENGINE_RESET_CONTEXT_FLAG_COLLATERAL_DAMAGE_MASK BIT(0)
+#define VPU_ENGINE_RESET_CONTEXT_HANG_PRIMARY_CAUSE 0
+#define VPU_ENGINE_RESET_CONTEXT_COLLATERAL_DAMAGE 1
+
+/*
+ * Invalid command queue handle identifier. Applies to cmdq_id and cmdq_group
+ * in this API.
+ */
+#define VPU_HWS_INVALID_CMDQ_HANDLE 0ULL
+
+/*
* Job format.
*/
struct vpu_job_queue_entry {
@@ -613,7 +629,7 @@ struct vpu_jsm_engine_reset_context {
u32 reserved_0;
/* Command queue id */
u64 cmdq_id;
- /* Flags: 0: cause of hang; 1: collateral damage of reset */
+ /* See VPU_ENGINE_RESET_CONTEXT_* defines */
u64 flags;
};
@@ -730,11 +746,7 @@ struct vpu_ipc_msg_payload_hws_create_cmdq {
u32 host_ssid;
/* Engine for which queue is being created */
u32 engine_idx;
- /*
- * Cmdq group may be set to 0 or equal to
- * cmdq_id while each priority band contains
- * only single engine instances.
- */
+ /* Cmdq group: only used for HWS logging of state changes */
u64 cmdq_group;
/* Command queue id */
u64 cmdq_id;
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index cb77d048ed54..ada9b1eb0787 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -20,7 +20,7 @@ static unsigned int mhi_timeout_ms = 2000; /* 2 sec default */
module_param(mhi_timeout_ms, uint, 0600);
MODULE_PARM_DESC(mhi_timeout_ms, "MHI controller timeout value");
-static struct mhi_channel_config aic100_channels[] = {
+static const struct mhi_channel_config aic100_channels[] = {
{
.name = "QAIC_LOOPBACK",
.num = 0,
@@ -358,8 +358,8 @@ static struct mhi_channel_config aic100_channels[] = {
.wake_capable = false,
},
{
- .num = 21,
.name = "QAIC_TIMESYNC",
+ .num = 21,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
@@ -390,8 +390,8 @@ static struct mhi_channel_config aic100_channels[] = {
.wake_capable = false,
},
{
- .num = 23,
.name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 23,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index 582836f9538f..9256653b3036 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -30,6 +30,7 @@
#define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm)
#define to_drm(qddev) (&(qddev)->drm)
#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
+#define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev)
enum __packed dev_states {
/* Device is offline or will be very soon */
@@ -191,8 +192,6 @@ struct qaic_bo {
u32 nr_slice;
/* Number of slice that have been transferred by DMA engine */
u32 nr_slice_xfer_done;
- /* true = BO is queued for execution, true = BO is not queued */
- bool queued;
/*
* If true then user has attached slicing information to this BO by
* calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl.
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 03c9a793da35..2459fe4a3f95 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -141,6 +141,11 @@ struct dbc_rsp {
__le16 status;
} __packed;
+static inline bool bo_queued(struct qaic_bo *bo)
+{
+ return !list_empty(&bo->xfer_list);
+}
+
inline int get_dbc_req_elem_size(void)
{
return sizeof(struct dbc_req);
@@ -569,6 +574,9 @@ static void qaic_free_sgt(struct sg_table *sgt)
{
struct scatterlist *sg;
+ if (!sgt)
+ return;
+
for (sg = sgt->sgl; sg; sg = sg_next(sg))
if (sg_page(sg))
__free_pages(sg_page(sg), get_order(sg->length));
@@ -648,6 +656,7 @@ static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
}
complete_all(&bo->xfer_done);
INIT_LIST_HEAD(&bo->slices);
+ INIT_LIST_HEAD(&bo->xfer_list);
}
static struct qaic_bo *qaic_alloc_init_bo(void)
@@ -709,9 +718,13 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (ret)
goto free_bo;
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto free_bo;
+
ret = drm_gem_handle_create(file_priv, obj, &args->handle);
if (ret)
- goto free_sgt;
+ goto free_bo;
bo->handle = args->handle;
drm_gem_object_put(obj);
@@ -720,10 +733,8 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
return 0;
-free_sgt:
- qaic_free_sgt(bo->sgt);
free_bo:
- kfree(bo);
+ drm_gem_object_put(obj);
unlock_dev_srcu:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
unlock_usr_srcu:
@@ -738,7 +749,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
struct drm_gem_object *obj;
struct qaic_device *qdev;
struct qaic_user *usr;
- int ret;
+ int ret = 0;
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
@@ -760,9 +771,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
goto unlock_dev_srcu;
}
- ret = drm_gem_create_mmap_offset(obj);
- if (ret == 0)
- args->offset = drm_vma_node_offset_addr(&obj->vma_node);
+ args->offset = drm_vma_node_offset_addr(&obj->vma_node);
drm_gem_object_put(obj);
@@ -828,9 +837,6 @@ static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_h
struct sg_table *sgt;
int ret;
- if (obj->import_attach->dmabuf->size < hdr->size)
- return -EINVAL;
-
sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
@@ -847,9 +853,6 @@ static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo,
{
int ret;
- if (bo->base.size < hdr->size)
- return -EINVAL;
-
ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0);
if (ret)
return -EFAULT;
@@ -950,9 +953,6 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
if (arg_size / args->hdr.count != sizeof(*slice_ent))
return -EINVAL;
- if (args->hdr.size == 0)
- return -EINVAL;
-
if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE))
return -EINVAL;
@@ -992,16 +992,16 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
goto free_slice_ent;
}
- ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, args->hdr.size);
- if (ret)
- goto free_slice_ent;
-
obj = drm_gem_object_lookup(file_priv, args->hdr.handle);
if (!obj) {
ret = -ENOENT;
goto free_slice_ent;
}
+ ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, obj->size);
+ if (ret)
+ goto put_bo;
+
bo = to_qaic_bo(obj);
ret = mutex_lock_interruptible(&bo->lock);
if (ret)
@@ -1173,7 +1173,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
struct bo_slice *slice;
unsigned long flags;
struct qaic_bo *bo;
- bool queued;
int i, j;
int ret;
@@ -1205,9 +1204,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
}
spin_lock_irqsave(&dbc->xfer_lock, flags);
- queued = bo->queued;
- bo->queued = true;
- if (queued) {
+ if (bo_queued(bo)) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
ret = -EINVAL;
goto unlock_bo;
@@ -1230,7 +1227,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
else
ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
if (ret) {
- bo->queued = false;
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
goto unlock_bo;
}
@@ -1253,8 +1249,7 @@ failed_to_send_bo:
spin_lock_irqsave(&dbc->xfer_lock, flags);
bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list);
obj = &bo->base;
- bo->queued = false;
- list_del(&bo->xfer_list);
+ list_del_init(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
drm_gem_object_put(obj);
@@ -1615,8 +1610,7 @@ read_fifo:
*/
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
bo->nr_slice_xfer_done = 0;
- bo->queued = false;
- list_del(&bo->xfer_list);
+ list_del_init(&bo->xfer_list);
bo->perf_stats.req_processed_ts = ktime_get_ns();
complete_all(&bo->xfer_done);
drm_gem_object_put(&bo->base);
@@ -1875,7 +1869,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
/* Check if BO is committed to H/W for DMA */
spin_lock_irqsave(&dbc->xfer_lock, flags);
- if (bo->queued) {
+ if (bo_queued(bo)) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
ret = -EBUSY;
goto unlock_ch_srcu;
@@ -1905,8 +1899,7 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db
spin_lock_irqsave(&dbc->xfer_lock, flags);
while (!list_empty(&dbc->xfer_list)) {
bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list);
- bo->queued = false;
- list_del(&bo->xfer_list);
+ list_del_init(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
bo->nr_slice_xfer_done = 0;
bo->req_id = 0;
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 2a313eb69b12..d1a632dbaec6 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -44,6 +44,53 @@ MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
static bool link_up;
static DEFINE_IDA(qaic_usrs);
+static void qaicm_wq_release(struct drm_device *dev, void *res)
+{
+ struct workqueue_struct *wq = res;
+
+ destroy_workqueue(wq);
+}
+
+static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *fmt)
+{
+ struct workqueue_struct *wq;
+ int ret;
+
+ wq = alloc_workqueue(fmt, WQ_UNBOUND, 0);
+ if (!wq)
+ return ERR_PTR(-ENOMEM);
+ ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return wq;
+}
+
+static void qaicm_srcu_release(struct drm_device *dev, void *res)
+{
+ struct srcu_struct *lock = res;
+
+ cleanup_srcu_struct(lock);
+}
+
+static int qaicm_srcu_init(struct drm_device *dev, struct srcu_struct *lock)
+{
+ int ret;
+
+ ret = init_srcu_struct(lock);
+ if (ret)
+ return ret;
+
+ return drmm_add_action_or_reset(dev, qaicm_srcu_release, lock);
+}
+
+static void qaicm_pci_release(struct drm_device *dev, void *res)
+{
+ struct qaic_device *qdev = to_qaic_device(dev);
+
+ pci_set_drvdata(qdev->pdev, NULL);
+}
+
static void free_usr(struct kref *kref)
{
struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
@@ -299,74 +346,73 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
release_dbc(qdev, i);
}
-static void cleanup_qdev(struct qaic_device *qdev)
-{
- int i;
-
- for (i = 0; i < qdev->num_dbc; ++i)
- cleanup_srcu_struct(&qdev->dbc[i].ch_lock);
- cleanup_srcu_struct(&qdev->dev_lock);
- pci_set_drvdata(qdev->pdev, NULL);
- destroy_workqueue(qdev->cntl_wq);
- destroy_workqueue(qdev->qts_wq);
-}
-
static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct device *dev = &pdev->dev;
struct qaic_drm_device *qddev;
struct qaic_device *qdev;
- int i;
+ struct drm_device *drm;
+ int i, ret;
- qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
+ qdev = devm_kzalloc(dev, sizeof(*qdev), GFP_KERNEL);
if (!qdev)
return NULL;
qdev->dev_state = QAIC_OFFLINE;
if (id->device == PCI_DEV_AIC100) {
qdev->num_dbc = 16;
- qdev->dbc = devm_kcalloc(&pdev->dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
+ qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
if (!qdev->dbc)
return NULL;
}
- qdev->cntl_wq = alloc_workqueue("qaic_cntl", WQ_UNBOUND, 0);
- if (!qdev->cntl_wq)
+ qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
+ if (IS_ERR(qddev))
+ return NULL;
+
+ drm = to_drm(qddev);
+ pci_set_drvdata(pdev, qdev);
+
+ ret = drmm_mutex_init(drm, &qddev->users_mutex);
+ if (ret)
+ return NULL;
+ ret = drmm_add_action_or_reset(drm, qaicm_pci_release, NULL);
+ if (ret)
+ return NULL;
+ ret = drmm_mutex_init(drm, &qdev->cntl_mutex);
+ if (ret)
return NULL;
- qdev->qts_wq = alloc_workqueue("qaic_ts", WQ_UNBOUND, 0);
- if (!qdev->qts_wq) {
- destroy_workqueue(qdev->cntl_wq);
+ qdev->cntl_wq = qaicm_wq_init(drm, "qaic_cntl");
+ if (IS_ERR(qdev->cntl_wq))
+ return NULL;
+ qdev->qts_wq = qaicm_wq_init(drm, "qaic_ts");
+ if (IS_ERR(qdev->qts_wq))
return NULL;
- }
- pci_set_drvdata(pdev, qdev);
+ ret = qaicm_srcu_init(drm, &qdev->dev_lock);
+ if (ret)
+ return NULL;
+
+ qdev->qddev = qddev;
qdev->pdev = pdev;
+ qddev->qdev = qdev;
- mutex_init(&qdev->cntl_mutex);
INIT_LIST_HEAD(&qdev->cntl_xfer_list);
- init_srcu_struct(&qdev->dev_lock);
+ INIT_LIST_HEAD(&qddev->users);
for (i = 0; i < qdev->num_dbc; ++i) {
spin_lock_init(&qdev->dbc[i].xfer_lock);
qdev->dbc[i].qdev = qdev;
qdev->dbc[i].id = i;
INIT_LIST_HEAD(&qdev->dbc[i].xfer_list);
- init_srcu_struct(&qdev->dbc[i].ch_lock);
+ ret = qaicm_srcu_init(drm, &qdev->dbc[i].ch_lock);
+ if (ret)
+ return NULL;
init_waitqueue_head(&qdev->dbc[i].dbc_release);
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
}
- qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
- if (IS_ERR(qddev)) {
- cleanup_qdev(qdev);
- return NULL;
- }
-
- drmm_mutex_init(to_drm(qddev), &qddev->users_mutex);
- INIT_LIST_HEAD(&qddev->users);
- qddev->qdev = qdev;
- qdev->qddev = qddev;
-
return qdev;
}
@@ -472,35 +518,28 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = init_pci(qdev, pdev);
if (ret)
- goto cleanup_qdev;
+ return ret;
for (i = 0; i < qdev->num_dbc; ++i)
qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i);
mhi_irq = init_msi(qdev, pdev);
- if (mhi_irq < 0) {
- ret = mhi_irq;
- goto cleanup_qdev;
- }
+ if (mhi_irq < 0)
+ return mhi_irq;
ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
if (ret)
- goto cleanup_qdev;
+ return ret;
qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq,
qdev->single_msi);
if (IS_ERR(qdev->mhi_cntrl)) {
ret = PTR_ERR(qdev->mhi_cntrl);
- goto cleanup_drm_dev;
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
+ return ret;
}
return 0;
-
-cleanup_drm_dev:
- qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
-cleanup_qdev:
- cleanup_qdev(qdev);
- return ret;
}
static void qaic_pci_remove(struct pci_dev *pdev)
@@ -511,9 +550,8 @@ static void qaic_pci_remove(struct pci_dev *pdev)
return;
qaic_dev_reset_clean_local_state(qdev);
- qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
- cleanup_qdev(qdev);
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
}
static void qaic_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 3c3f8037ebed..c645bb453f3b 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -449,20 +449,6 @@ config ACPI_HED
which is used to report some hardware errors notified via
SCI, mainly the corrected errors.
-config ACPI_CUSTOM_METHOD
- tristate "Allow ACPI methods to be inserted/replaced at run time"
- depends on DEBUG_FS
- help
- This debug facility allows ACPI AML methods to be inserted and/or
- replaced without rebooting the system. For details refer to:
- Documentation/firmware-guide/acpi/method-customizing.rst.
-
- NOTE: This option is security sensitive, because it allows arbitrary
- kernel memory to be written to by root (uid=0) users, allowing them
- to bypass certain security measures (e.g. if root is not allowed to
- load additional kernel modules after boot, this feature may be used
- to override that restriction).
-
config ACPI_BGRT
bool "Boottime Graphics Resource Table support"
depends on EFI && (X86 || ARM64)
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 12ef8180d272..8cc8c0d9c873 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -101,7 +101,6 @@ obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_HED) += hed.o
obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
-obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
obj-$(CONFIG_ACPI_BGRT) += bgrt.o
obj-$(CONFIG_ACPI_CPPC_LIB) += cppc_acpi.o
obj-$(CONFIG_ACPI_SPCR_TABLE) += spcr.o
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 4fe2ef54088c..7a0dd35d62c9 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -161,7 +161,7 @@ static void cpufreq_add_device(const char *name)
pdev = platform_device_register_simple(name, PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(pdev))
- pr_info("%s device creation failed: %ld\n", name, PTR_ERR(pdev));
+ pr_info("%s device creation failed: %pe\n", name, pdev);
}
#ifdef CONFIG_X86
@@ -381,6 +381,9 @@ static int acpi_processor_add(struct acpi_device *device,
struct device *dev;
int result = 0;
+ if (!acpi_device_is_enabled(device))
+ return -ENODEV;
+
pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
if (!pr)
return -ENOMEM;
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index 33c3b16af556..1d670dbe4d1d 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -554,7 +554,7 @@ static int acpi_tad_disable_timer(struct device *dev, u32 timer_id)
return acpi_tad_wake_set(dev, "_STV", timer_id, ACPI_TAD_WAKE_DISABLED);
}
-static int acpi_tad_remove(struct platform_device *pdev)
+static void acpi_tad_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
acpi_handle handle = ACPI_HANDLE(dev);
@@ -579,7 +579,6 @@ static int acpi_tad_remove(struct platform_device *pdev)
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
acpi_remove_cmos_rtc_space_handler(handle);
- return 0;
}
static int acpi_tad_probe(struct platform_device *pdev)
@@ -684,7 +683,7 @@ static struct platform_driver acpi_tad_driver = {
.acpi_match_table = acpi_tad_ids,
},
.probe = acpi_tad_probe,
- .remove = acpi_tad_remove,
+ .remove_new = acpi_tad_remove,
};
MODULE_DEVICE_TABLE(acpi, acpi_tad_ids);
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 4afdda9db019..1fda30388297 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -612,7 +612,7 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
static int
acpi_video_device_EDID(struct acpi_video_device *device,
- union acpi_object **edid, ssize_t length)
+ union acpi_object **edid, int length)
{
int status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -625,13 +625,11 @@ acpi_video_device_EDID(struct acpi_video_device *device,
if (!device)
return -ENODEV;
- if (length == 128)
- arg0.integer.value = 1;
- else if (length == 256)
- arg0.integer.value = 2;
- else
+ if (!length || (length % 128))
return -EINVAL;
+ arg0.integer.value = length / 128;
+
status = acpi_evaluate_object(device->dev->handle, "_DDC", &args, &buffer);
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -641,7 +639,8 @@ acpi_video_device_EDID(struct acpi_video_device *device,
if (obj && obj->type == ACPI_TYPE_BUFFER)
*edid = obj;
else {
- acpi_handle_info(device->dev->handle, "Invalid _DDC data\n");
+ acpi_handle_debug(device->dev->handle,
+ "Invalid _DDC data for length %d\n", length);
status = -EFAULT;
kfree(obj);
}
@@ -1447,7 +1446,6 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
for (i = 0; i < video->attached_count; i++) {
video_device = video->attached_array[i].bind_info;
- length = 256;
if (!video_device)
continue;
@@ -1478,18 +1476,14 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
continue;
}
- status = acpi_video_device_EDID(video_device, &buffer, length);
-
- if (ACPI_FAILURE(status) || !buffer ||
- buffer->type != ACPI_TYPE_BUFFER) {
- length = 128;
+ for (length = 512; length > 0; length -= 128) {
status = acpi_video_device_EDID(video_device, &buffer,
length);
- if (ACPI_FAILURE(status) || !buffer ||
- buffer->type != ACPI_TYPE_BUFFER) {
- continue;
- }
+ if (ACPI_SUCCESS(status))
+ break;
}
+ if (!length)
+ continue;
*edid = buffer->buffer.pointer;
return length;
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 8e9e001da38f..14b24157799c 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -179,7 +179,7 @@ void __init acpi_watchdog_init(void)
pdev = platform_device_register_simple("wdat_wdt", PLATFORM_DEVID_NONE,
resources, nresources);
if (IS_ERR(pdev))
- pr_err("Device creation failed: %ld\n", PTR_ERR(pdev));
+ pr_err("Device creation failed: %pe\n", pdev);
kfree(resources);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index fe825a432c5b..512067cac170 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -26,7 +26,6 @@
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cper.h>
-#include <linux/cxl-event.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
@@ -674,52 +673,6 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
schedule_work(&entry->work);
}
-/*
- * Only a single callback can be registered for CXL CPER events.
- */
-static DECLARE_RWSEM(cxl_cper_rw_sem);
-static cxl_cper_callback cper_callback;
-
-static void cxl_cper_post_event(enum cxl_event_type event_type,
- struct cxl_cper_event_rec *rec)
-{
- if (rec->hdr.length <= sizeof(rec->hdr) ||
- rec->hdr.length > sizeof(*rec)) {
- pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
- rec->hdr.length);
- return;
- }
-
- if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
- pr_err(FW_WARN "CXL CPER invalid event\n");
- return;
- }
-
- guard(rwsem_read)(&cxl_cper_rw_sem);
- if (cper_callback)
- cper_callback(event_type, rec);
-}
-
-int cxl_cper_register_callback(cxl_cper_callback callback)
-{
- guard(rwsem_write)(&cxl_cper_rw_sem);
- if (cper_callback)
- return -EINVAL;
- cper_callback = callback;
- return 0;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_register_callback, CXL);
-
-int cxl_cper_unregister_callback(cxl_cper_callback callback)
-{
- guard(rwsem_write)(&cxl_cper_rw_sem);
- if (callback != cper_callback)
- return -EINVAL;
- cper_callback = NULL;
- return 0;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_callback, CXL);
-
static bool ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@@ -754,22 +707,6 @@ static bool ghes_do_proc(struct ghes *ghes,
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
- } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
- } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
- } else if (guid_equal(sec_type,
- &CPER_SEC_CXL_MEM_MODULE_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
} else {
void *err = acpi_hest_get_payload(gdata);
@@ -1518,7 +1455,7 @@ err:
return rc;
}
-static int ghes_remove(struct platform_device *ghes_dev)
+static void ghes_remove(struct platform_device *ghes_dev)
{
int rc;
struct ghes *ghes;
@@ -1555,8 +1492,15 @@ static int ghes_remove(struct platform_device *ghes_dev)
break;
case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
rc = apei_sdei_unregister_ghes(ghes);
- if (rc)
- return rc;
+ if (rc) {
+ /*
+ * Returning early results in a resource leak, but we're
+ * only here if stopping the hardware failed.
+ */
+ dev_err(&ghes_dev->dev, "Failed to unregister ghes (%pe)\n",
+ ERR_PTR(rc));
+ return;
+ }
break;
default:
BUG();
@@ -1570,8 +1514,6 @@ static int ghes_remove(struct platform_device *ghes_dev)
mutex_unlock(&ghes_devs_mutex);
kfree(ghes);
-
- return 0;
}
static struct platform_driver ghes_platform_driver = {
@@ -1579,7 +1521,7 @@ static struct platform_driver ghes_platform_driver = {
.name = "GHES",
},
.probe = ghes_probe,
- .remove = ghes_remove,
+ .remove_new = ghes_remove,
};
void __init acpi_ghes_init(void)
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 6aef1ee5e1bd..20d757687e3d 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -37,6 +37,20 @@ EXPORT_SYMBOL_GPL(hest_disable);
static struct acpi_table_hest *__read_mostly hest_tab;
+/*
+ * Since GHES_ASSIST is not supported, skip initialization of GHES_ASSIST
+ * structures for MCA.
+ * During HEST parsing, detected MCA error sources are cached from early
+ * table entries so that the Flags and Source Id fields from these cached
+ * values are then referred to in later table entries to determine if the
+ * encountered GHES_ASSIST structure should be initialized.
+ */
+static struct {
+ struct acpi_hest_ia_corrected *cmc;
+ struct acpi_hest_ia_machine_check *mc;
+ struct acpi_hest_ia_deferred_check *dmc;
+} mces;
+
static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
[ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */
[ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1,
@@ -70,22 +84,54 @@ static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
len = sizeof(*cmc) + cmc->num_hardware_banks *
sizeof(struct acpi_hest_ia_error_bank);
+ mces.cmc = cmc;
} else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) {
struct acpi_hest_ia_machine_check *mc;
mc = (struct acpi_hest_ia_machine_check *)hest_hdr;
len = sizeof(*mc) + mc->num_hardware_banks *
sizeof(struct acpi_hest_ia_error_bank);
+ mces.mc = mc;
} else if (hest_type == ACPI_HEST_TYPE_IA32_DEFERRED_CHECK) {
struct acpi_hest_ia_deferred_check *mc;
mc = (struct acpi_hest_ia_deferred_check *)hest_hdr;
len = sizeof(*mc) + mc->num_hardware_banks *
sizeof(struct acpi_hest_ia_error_bank);
+ mces.dmc = mc;
}
BUG_ON(len == -1);
return len;
};
+/*
+ * GHES and GHESv2 structures share the same format, starting from
+ * Source Id and ending in Error Status Block Length (inclusive).
+ */
+static bool is_ghes_assist_struct(struct acpi_hest_header *hest_hdr)
+{
+ struct acpi_hest_generic *ghes;
+ u16 related_source_id;
+
+ if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR &&
+ hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR_V2)
+ return false;
+
+ ghes = (struct acpi_hest_generic *)hest_hdr;
+ related_source_id = ghes->related_source_id;
+
+ if (mces.cmc && mces.cmc->flags & ACPI_HEST_GHES_ASSIST &&
+ related_source_id == mces.cmc->header.source_id)
+ return true;
+ if (mces.mc && mces.mc->flags & ACPI_HEST_GHES_ASSIST &&
+ related_source_id == mces.mc->header.source_id)
+ return true;
+ if (mces.dmc && mces.dmc->flags & ACPI_HEST_GHES_ASSIST &&
+ related_source_id == mces.dmc->header.source_id)
+ return true;
+
+ return false;
+}
+
typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
static int apei_hest_parse(apei_hest_func_t func, void *data)
@@ -114,6 +160,11 @@ static int apei_hest_parse(apei_hest_func_t func, void *data)
return -EINVAL;
}
+ if (is_ghes_assist_struct(hest_hdr)) {
+ hest_hdr = (void *)hest_hdr + len;
+ continue;
+ }
+
rc = func(hest_hdr, data);
if (rc)
return rc;
diff --git a/drivers/acpi/arm64/agdi.c b/drivers/acpi/arm64/agdi.c
index 8b3c7d42b41b..f5f21dd0d277 100644
--- a/drivers/acpi/arm64/agdi.c
+++ b/drivers/acpi/arm64/agdi.c
@@ -58,7 +58,7 @@ static int agdi_probe(struct platform_device *pdev)
return agdi_sdei_probe(pdev, adata);
}
-static int agdi_remove(struct platform_device *pdev)
+static void agdi_remove(struct platform_device *pdev)
{
struct agdi_data *adata = dev_get_platdata(&pdev->dev);
int err, i;
@@ -67,7 +67,7 @@ static int agdi_remove(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "Failed to disable sdei-event #%d (%pe)\n",
adata->sdei_event, ERR_PTR(err));
- return 0;
+ return;
}
for (i = 0; i < 3; i++) {
@@ -81,8 +81,6 @@ static int agdi_remove(struct platform_device *pdev)
if (err)
dev_err(&pdev->dev, "Failed to unregister sdei-event #%d (%pe)\n",
adata->sdei_event, ERR_PTR(err));
-
- return 0;
}
static struct platform_driver agdi_driver = {
@@ -90,7 +88,7 @@ static struct platform_driver agdi_driver = {
.name = "agdi",
},
.probe = agdi_probe,
- .remove = agdi_remove,
+ .remove_new = agdi_remove,
};
void __init acpi_agdi_init(void)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 569bd15f211b..d9fa730416f1 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1097,7 +1097,7 @@ static void acpi_device_remove(struct device *dev)
put_device(dev);
}
-struct bus_type acpi_bus_type = {
+const struct bus_type acpi_bus_type = {
.name = "acpi",
.match = acpi_bus_match,
.probe = acpi_device_probe,
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index d155a86a8614..4bfbe55553f4 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -166,6 +166,13 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
+/* Check for valid access_width, otherwise, fallback to using bit_width */
+#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
+
+/* Shift and apply the mask for CPC reads/writes */
+#define MASK_VAL(reg, val) ((val) >> ((reg)->bit_offset & \
+ GENMASK(((reg)->bit_width), 0)))
+
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -780,6 +787,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
if (gas_t->address) {
void __iomem *addr;
+ size_t access_width;
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
@@ -787,7 +795,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
- addr = ioremap(gas_t->address, gas_t->bit_width/8);
+ access_width = GET_BIT_WIDTH(gas_t) / 8;
+ addr = ioremap(gas_t->address, access_width);
if (!addr)
goto out_free;
cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
@@ -983,6 +992,7 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
{
void __iomem *vaddr = NULL;
+ int size;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
@@ -994,7 +1004,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = 0;
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = 8 << (reg->access_width - 1);
+ u32 width = GET_BIT_WIDTH(reg);
u32 val_u32;
acpi_status status;
@@ -1018,7 +1028,9 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
return acpi_os_read_memory((acpi_physical_address)reg->address,
val, reg->bit_width);
- switch (reg->bit_width) {
+ size = GET_BIT_WIDTH(reg);
+
+ switch (size) {
case 8:
*val = readb_relaxed(vaddr);
break;
@@ -1037,18 +1049,22 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
return -EFAULT;
}
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ *val = MASK_VAL(reg, *val);
+
return 0;
}
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{
int ret_val = 0;
+ int size;
void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = 8 << (reg->access_width - 1);
+ u32 width = GET_BIT_WIDTH(reg);
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
@@ -1070,7 +1086,12 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return acpi_os_write_memory((acpi_physical_address)reg->address,
val, reg->bit_width);
- switch (reg->bit_width) {
+ size = GET_BIT_WIDTH(reg);
+
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ val = MASK_VAL(reg, val);
+
+ switch (size) {
case 8:
writeb_relaxed(val, vaddr);
break;
@@ -1158,6 +1179,19 @@ int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
}
/**
+ * cppc_get_highest_perf - Get the highest performance register value.
+ * @cpunum: CPU from which to get highest performance.
+ * @highest_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
+{
+ return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
+}
+EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
+
+/**
* cppc_get_epp_perf - Get the epp register value.
* @cpunum: CPU from which to get epp preference value.
* @epp_perf: Return address.
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
deleted file mode 100644
index d39a9b474727..000000000000
--- a/drivers/acpi/custom_method.c
+++ /dev/null
@@ -1,103 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * custom_method.c - debugfs interface for customizing ACPI control method
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-#include <linux/acpi.h>
-#include <linux/security.h>
-
-#include "internal.h"
-
-MODULE_LICENSE("GPL");
-
-static struct dentry *cm_dentry;
-
-/* /sys/kernel/debug/acpi/custom_method */
-
-static ssize_t cm_write(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- static char *buf;
- static u32 max_size;
- static u32 uncopied_bytes;
-
- struct acpi_table_header table;
- acpi_status status;
- int ret;
-
- ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
- if (ret)
- return ret;
-
- if (!(*ppos)) {
- /* parse the table header to get the table length */
- if (count <= sizeof(struct acpi_table_header))
- return -EINVAL;
- if (copy_from_user(&table, user_buf,
- sizeof(struct acpi_table_header)))
- return -EFAULT;
- uncopied_bytes = max_size = table.length;
- /* make sure the buf is not allocated */
- kfree(buf);
- buf = kzalloc(max_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- if (buf == NULL)
- return -EINVAL;
-
- if ((*ppos > max_size) ||
- (*ppos + count > max_size) ||
- (*ppos + count < count) ||
- (count > uncopied_bytes)) {
- kfree(buf);
- buf = NULL;
- return -EINVAL;
- }
-
- if (copy_from_user(buf + (*ppos), user_buf, count)) {
- kfree(buf);
- buf = NULL;
- return -EFAULT;
- }
-
- uncopied_bytes -= count;
- *ppos += count;
-
- if (!uncopied_bytes) {
- status = acpi_install_method(buf);
- kfree(buf);
- buf = NULL;
- if (ACPI_FAILURE(status))
- return -EINVAL;
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
- }
-
- return count;
-}
-
-static const struct file_operations cm_fops = {
- .write = cm_write,
- .llseek = default_llseek,
-};
-
-static int __init acpi_custom_method_init(void)
-{
- cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
- acpi_debugfs_dir, NULL, &cm_fops);
- return 0;
-}
-
-static void __exit acpi_custom_method_exit(void)
-{
- debugfs_remove(cm_dentry);
-}
-
-module_init(acpi_custom_method_init);
-module_exit(acpi_custom_method_exit);
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index 4919e7abe93f..654aaa53c67f 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -141,11 +141,9 @@ static int pch_fivr_add(struct platform_device *pdev)
return 0;
}
-static int pch_fivr_remove(struct platform_device *pdev)
+static void pch_fivr_remove(struct platform_device *pdev)
{
sysfs_remove_group(&pdev->dev.kobj, &pch_fivr_attribute_group);
-
- return 0;
}
static const struct acpi_device_id pch_fivr_device_ids[] = {
@@ -159,7 +157,7 @@ MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
static struct platform_driver pch_fivr_driver = {
.probe = pch_fivr_add,
- .remove = pch_fivr_remove,
+ .remove_new = pch_fivr_remove,
.driver = {
.name = "dptf_pch_fivr",
.acpi_match_table = pch_fivr_device_ids,
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 86561eda939f..b8187babbbbb 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -209,7 +209,7 @@ static int dptf_power_add(struct platform_device *pdev)
return 0;
}
-static int dptf_power_remove(struct platform_device *pdev)
+static void dptf_power_remove(struct platform_device *pdev)
{
struct acpi_device *acpi_dev = platform_get_drvdata(pdev);
@@ -221,8 +221,6 @@ static int dptf_power_remove(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj, &dptf_battery_attribute_group);
else
sysfs_remove_group(&pdev->dev.kobj, &dptf_power_attribute_group);
-
- return 0;
}
static const struct acpi_device_id int3407_device_ids[] = {
@@ -242,7 +240,7 @@ MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
static struct platform_driver dptf_power_driver = {
.probe = dptf_power_add,
- .remove = dptf_power_remove,
+ .remove_new = dptf_power_remove,
.driver = {
.name = "dptf_power",
.acpi_match_table = int3407_device_ids,
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index dbdee2924594..02255795b800 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -525,10 +525,12 @@ static void acpi_ec_clear(struct acpi_ec *ec)
static void acpi_ec_enable_event(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (acpi_ec_started(ec))
__acpi_ec_enable_event(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
/* Drain additional events if hardware requires that */
if (EC_FLAGS_CLEAR_ON_RESUME)
@@ -544,9 +546,11 @@ static void __acpi_ec_flush_work(void)
static void acpi_ec_disable_event(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
__acpi_ec_disable_event(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
/*
* When ec_freeze_events is true, we need to flush events in
@@ -567,9 +571,10 @@ void acpi_ec_flush_work(void)
static bool acpi_ec_guard_event(struct acpi_ec *ec)
{
+ unsigned long flags;
bool guarded;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
/*
* If firmware SCI_EVT clearing timing is "event", we actually
* don't know when the SCI_EVT will be cleared by firmware after
@@ -585,29 +590,31 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec)
guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
ec->event_state != EC_EVENT_READY &&
(!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return guarded;
}
static int ec_transaction_polled(struct acpi_ec *ec)
{
+ unsigned long flags;
int ret = 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
ret = 1;
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
static int ec_transaction_completed(struct acpi_ec *ec)
{
+ unsigned long flags;
int ret = 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
ret = 1;
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
@@ -749,6 +756,7 @@ static int ec_guard(struct acpi_ec *ec)
static int ec_poll(struct acpi_ec *ec)
{
+ unsigned long flags;
int repeat = 5; /* number of command restarts */
while (repeat--) {
@@ -757,14 +765,14 @@ static int ec_poll(struct acpi_ec *ec)
do {
if (!ec_guard(ec))
return 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
advance_transaction(ec, false);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
} while (time_before(jiffies, delay));
pr_debug("controller reset, restart transaction\n");
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
start_transaction(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
return -ETIME;
}
@@ -772,10 +780,11 @@ static int ec_poll(struct acpi_ec *ec)
static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
struct transaction *t)
{
+ unsigned long tmp;
int ret = 0;
/* start transaction */
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */
if (!acpi_ec_submit_flushable_request(ec)) {
ret = -EINVAL;
@@ -786,11 +795,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
ec->curr = t;
ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
start_transaction(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, tmp);
ret = ec_poll(ec);
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
acpi_ec_unmask_events(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
@@ -799,7 +808,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
acpi_ec_complete_request(ec);
ec_dbg_ref(ec, "Decrease command");
unlock:
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, tmp);
return ret;
}
@@ -927,7 +936,9 @@ EXPORT_SYMBOL(ec_get_handle);
static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
ec_dbg_drv("Starting EC");
/* Enable GPE for event processing (SCI_EVT=1) */
@@ -937,28 +948,31 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
}
ec_log_drv("EC started");
}
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static bool acpi_ec_stopped(struct acpi_ec *ec)
{
+ unsigned long flags;
bool flushed;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
flushed = acpi_ec_flushed(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return flushed;
}
static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (acpi_ec_started(ec)) {
ec_dbg_drv("Stopping EC");
set_bit(EC_FLAGS_STOPPED, &ec->flags);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
wait_event(ec->wait, acpi_ec_stopped(ec));
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
/* Disable GPE for event processing (SCI_EVT=1) */
if (!suspending) {
acpi_ec_complete_request(ec);
@@ -969,25 +983,29 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
clear_bit(EC_FLAGS_STOPPED, &ec->flags);
ec_log_drv("EC stopped");
}
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static void acpi_ec_enter_noirq(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
ec->busy_polling = true;
ec->polling_guard = 0;
ec_log_drv("interrupt blocked");
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static void acpi_ec_leave_noirq(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
ec->busy_polling = ec_busy_polling;
ec->polling_guard = ec_polling_guard;
ec_log_drv("interrupt unblocked");
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
void acpi_ec_block_transactions(void)
@@ -1119,9 +1137,9 @@ static void acpi_ec_event_processor(struct work_struct *work)
ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->queries_in_progress--;
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
acpi_ec_put_query_handler(handler);
kfree(q);
@@ -1184,12 +1202,12 @@ static int acpi_ec_submit_query(struct acpi_ec *ec)
*/
ec_dbg_evt("Query(0x%02x) scheduled", value);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->queries_in_progress++;
queue_work(ec_query_wq, &q->work);
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
return 0;
@@ -1205,14 +1223,14 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec_dbg_evt("Event started");
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
while (ec->events_to_process) {
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
acpi_ec_submit_query(ec);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->events_to_process--;
}
@@ -1229,11 +1247,11 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec_dbg_evt("Event stopped");
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
guard_timeout = !!ec_guard(ec);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
/* Take care of SCI_EVT unless someone else is doing that. */
if (guard_timeout && !ec->curr)
@@ -1246,7 +1264,7 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec->events_in_progress--;
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
}
static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt)
@@ -1271,11 +1289,13 @@ static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt
static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
clear_gpe_and_advance_transaction(ec, true);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -2085,7 +2105,7 @@ bool acpi_ec_dispatch_gpe(void)
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
- spin_lock(&first_ec->lock);
+ spin_lock_irq(&first_ec->lock);
if (acpi_ec_gpe_status_set(first_ec)) {
pm_pr_dbg("ACPI EC GPE status set\n");
@@ -2094,7 +2114,7 @@ bool acpi_ec_dispatch_gpe(void)
work_in_progress = acpi_ec_work_in_progress(first_ec);
}
- spin_unlock(&first_ec->lock);
+ spin_unlock_irq(&first_ec->lock);
if (!work_in_progress)
return false;
@@ -2107,11 +2127,11 @@ bool acpi_ec_dispatch_gpe(void)
pm_pr_dbg("ACPI EC work flushed\n");
- spin_lock(&first_ec->lock);
+ spin_lock_irq(&first_ec->lock);
work_in_progress = acpi_ec_work_in_progress(first_ec);
- spin_unlock(&first_ec->lock);
+ spin_unlock_irq(&first_ec->lock);
} while (work_in_progress && !pm_wakeup_pending());
return false;
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index fe6b6792c8bb..11778c93254b 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -173,10 +173,9 @@ static void ged_shutdown(struct platform_device *pdev)
}
}
-static int ged_remove(struct platform_device *pdev)
+static void ged_remove(struct platform_device *pdev)
{
ged_shutdown(pdev);
- return 0;
}
static const struct acpi_device_id ged_acpi_ids[] = {
@@ -186,7 +185,7 @@ static const struct acpi_device_id ged_acpi_ids[] = {
static struct platform_driver ged_driver = {
.probe = ged_probe,
- .remove = ged_remove,
+ .remove_new = ged_remove,
.shutdown = ged_shutdown,
.driver = {
.name = MODULE_NAME,
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 9dccbae9e8ea..ff72e4ef8738 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -387,7 +387,7 @@ err_end:
return result;
}
-static int acpi_fan_remove(struct platform_device *pdev)
+static void acpi_fan_remove(struct platform_device *pdev)
{
struct acpi_fan *fan = platform_get_drvdata(pdev);
@@ -399,8 +399,6 @@ static int acpi_fan_remove(struct platform_device *pdev)
sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
sysfs_remove_link(&fan->cdev->device.kobj, "device");
thermal_cooling_device_unregister(fan->cdev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -446,7 +444,7 @@ static const struct dev_pm_ops acpi_fan_pm = {
static struct platform_driver acpi_fan_driver = {
.probe = acpi_fan_probe,
- .remove = acpi_fan_remove,
+ .remove_new = acpi_fan_remove,
.driver = {
.name = "acpi-fan",
.acpi_match_table = fan_device_ids,
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 6588525c45ef..ca72a0dc5715 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -121,6 +121,7 @@ int acpi_device_setup_files(struct acpi_device *dev);
void acpi_device_remove_files(struct acpi_device *dev);
void acpi_device_add_finalize(struct acpi_device *device);
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
+bool acpi_device_is_enabled(const struct acpi_device *adev);
bool acpi_device_is_present(const struct acpi_device *adev);
bool acpi_device_is_battery(struct acpi_device *adev);
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
@@ -301,5 +302,6 @@ void acpi_mipi_check_crs_csi2(acpi_handle handle);
void acpi_mipi_scan_crs_csi2(void);
void acpi_mipi_init_crs_csi2_swnodes(void);
void acpi_mipi_crs_csi2_cleanup(void);
+bool acpi_graph_ignore_port(acpi_handle handle);
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c
index 7286cf4579bc..d05413a0672a 100644
--- a/drivers/acpi/mipi-disco-img.c
+++ b/drivers/acpi/mipi-disco-img.c
@@ -19,6 +19,7 @@
*/
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -723,3 +724,73 @@ void acpi_mipi_crs_csi2_cleanup(void)
list_for_each_entry_safe(csi2, csi2_tmp, &acpi_mipi_crs_csi2_list, entry)
acpi_mipi_del_crs_csi2(csi2);
}
+
+static const struct dmi_system_id dmi_ignore_port_nodes[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 9315"),
+ },
+ },
+ { }
+};
+
+static const char *strnext(const char *s1, const char *s2)
+{
+ s1 = strstr(s1, s2);
+
+ if (!s1)
+ return NULL;
+
+ return s1 + strlen(s2);
+}
+
+/**
+ * acpi_graph_ignore_port - Tell whether a port node should be ignored
+ * @handle: The ACPI handle of the node (which may be a port node)
+ *
+ * Return: true if a port node should be ignored and the data to that should
+ * come from other sources instead (Windows ACPI definitions and
+ * ipu-bridge). This is currently used to ignore bad port nodes related to IPU6
+ * ("IPU?") and camera sensor devices ("LNK?") in certain Dell systems with
+ * Intel VSC.
+ */
+bool acpi_graph_ignore_port(acpi_handle handle)
+{
+ const char *path = NULL, *orig_path;
+ static bool dmi_tested, ignore_port;
+
+ if (!dmi_tested) {
+ ignore_port = dmi_first_match(dmi_ignore_port_nodes);
+ dmi_tested = true;
+ }
+
+ if (!ignore_port)
+ return false;
+
+ /* Check if the device is either "IPU" or "LNK" (sensor). */
+ orig_path = acpi_handle_path(handle);
+ if (!orig_path)
+ return false;
+ path = strnext(orig_path, "IPU");
+ if (!path)
+ path = strnext(orig_path, "LNK");
+ if (!path)
+ goto out_free;
+
+ if (!(isdigit(path[0]) && path[1] == '.'))
+ goto out_free;
+
+ /* Check if the node has a "PRT" prefix. */
+ path = strnext(path, "PRT");
+ if (path && isdigit(path[0]) && !path[1]) {
+ acpi_handle_debug(handle, "ignoring data node\n");
+
+ kfree(orig_path);
+ return true;
+ }
+
+out_free:
+ kfree(orig_path);
+ return false;
+}
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 802f8a56d1fa..d4595d1985b1 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1737,9 +1737,8 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
if ((nfit_mem->dsm_mask & (1 << func)) == 0)
return;
- out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
- if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
- || out_obj->buffer.length < sizeof(smart)) {
+ out_obj = acpi_evaluate_dsm_typed(handle, guid, revid, func, &in_obj, ACPI_TYPE_BUFFER);
+ if (!out_obj || out_obj->buffer.length < sizeof(smart)) {
dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
dev_name(dev));
ACPI_FREE(out_obj);
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index d6cb2c27a23b..741bcc9d6d6a 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -111,7 +111,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
snprintf(name, sizeof(name), "%llu", sun);
pci_slot = pci_create_slot(pci_bus, device, name, NULL);
if (IS_ERR(pci_slot)) {
- pr_err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
+ pr_err("pci_create_slot returned %pe\n", pci_slot);
kfree(slot);
return AE_OK;
}
diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c
index 843f678ade0c..998264a7333d 100644
--- a/drivers/acpi/pfr_telemetry.c
+++ b/drivers/acpi/pfr_telemetry.c
@@ -347,13 +347,11 @@ static const struct file_operations acpi_pfrt_log_fops = {
.llseek = noop_llseek,
};
-static int acpi_pfrt_log_remove(struct platform_device *pdev)
+static void acpi_pfrt_log_remove(struct platform_device *pdev)
{
struct pfrt_log_device *pfrt_log_dev = platform_get_drvdata(pdev);
misc_deregister(&pfrt_log_dev->miscdev);
-
- return 0;
}
static void pfrt_log_put_idx(void *data)
@@ -427,7 +425,7 @@ static struct platform_driver acpi_pfrt_log_driver = {
.acpi_match_table = acpi_pfrt_log_ids,
},
.probe = acpi_pfrt_log_probe,
- .remove = acpi_pfrt_log_remove,
+ .remove_new = acpi_pfrt_log_remove,
};
module_platform_driver(acpi_pfrt_log_driver);
diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
index 98267f163e2b..8b2910995fc1 100644
--- a/drivers/acpi/pfr_update.c
+++ b/drivers/acpi/pfr_update.c
@@ -489,13 +489,11 @@ static const struct file_operations acpi_pfru_fops = {
.llseek = noop_llseek,
};
-static int acpi_pfru_remove(struct platform_device *pdev)
+static void acpi_pfru_remove(struct platform_device *pdev)
{
struct pfru_device *pfru_dev = platform_get_drvdata(pdev);
misc_deregister(&pfru_dev->miscdev);
-
- return 0;
}
static void pfru_put_idx(void *data)
@@ -567,7 +565,7 @@ static struct platform_driver acpi_pfru_driver = {
.acpi_match_table = acpi_pfru_ids,
},
.probe = acpi_pfru_probe,
- .remove = acpi_pfru_remove,
+ .remove_new = acpi_pfru_remove,
};
module_platform_driver(acpi_pfru_driver);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 4bd16b3f0781..67db60eda370 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -27,6 +27,7 @@
#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
#define ACPI_PROCESSOR_NOTIFY_POWER 0x81
#define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
+#define ACPI_PROCESSOR_NOTIFY_HIGEST_PERF_CHANGED 0x85
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Processor Driver");
@@ -83,6 +84,11 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
+ case ACPI_PROCESSOR_NOTIFY_HIGEST_PERF_CHANGED:
+ cpufreq_update_limits(pr->id);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event, 0);
+ break;
default:
acpi_handle_debug(handle, "Unsupported event [0x%x]\n", event);
break;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 55437f5e0c3a..bd6a7857ce05 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1430,6 +1430,8 @@ int acpi_processor_power_exit(struct acpi_processor *pr)
acpi_processor_registered--;
if (acpi_processor_registered == 0)
cpuidle_unregister_driver(&acpi_idle_driver);
+
+ kfree(dev);
}
pr->flags.power_setup_done = 0;
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index a6ead5204046..2b73580c9f36 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -80,6 +80,9 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
struct acpi_data_node *dn;
bool result;
+ if (acpi_graph_ignore_port(handle))
+ return false;
+
dn = kzalloc(sizeof(*dn), GFP_KERNEL);
if (!dn)
return false;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index dacad1d846c0..59423fe9d0f2 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -468,6 +468,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "B1502CGA"),
},
},
+ {
+ /* Asus ExpertBook B1502CVA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B1502CVA"),
+ },
+ },
{
/* Asus ExpertBook B2402CBA */
.matches = {
@@ -490,6 +497,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus ExpertBook B2502FBA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B2502FBA"),
+ },
+ },
+ {
/* Asus Vivobook E1504GA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -588,6 +602,34 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "GM5RGEE0016COM"),
},
},
+ {
+ /* Lunnen Ground 15 / AMD Ryzen 5 5500U */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"),
+ DMI_MATCH(DMI_BOARD_NAME, "LLL5DAW"),
+ },
+ },
+ {
+ /* Lunnen Ground 16 / AMD Ryzen 7 5800U */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"),
+ DMI_MATCH(DMI_BOARD_NAME, "LL6FA"),
+ },
+ },
+ {
+ /* MAIBENBEN X577 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MAIBENBEN"),
+ DMI_MATCH(DMI_BOARD_NAME, "X577"),
+ },
+ },
+ {
+ /* Maibenben X565 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MAIBENBEN"),
+ DMI_MATCH(DMI_BOARD_NAME, "X565"),
+ },
+ },
{ }
};
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e6ed1ba91e5c..7c157bf92695 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -244,6 +244,53 @@ static int acpi_scan_try_to_offline(struct acpi_device *device)
return 0;
}
+static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check)
+{
+ struct acpi_scan_handler *handler = adev->handler;
+
+ acpi_dev_for_each_child_reverse(adev, acpi_scan_check_and_detach, check);
+
+ if (check) {
+ acpi_bus_get_status(adev);
+ /*
+ * Skip devices that are still there and take the enabled
+ * flag into account.
+ */
+ if (acpi_device_is_enabled(adev))
+ return 0;
+
+ /* Skip device that have not been enumerated. */
+ if (!acpi_device_enumerated(adev)) {
+ dev_dbg(&adev->dev, "Still not enumerated\n");
+ return 0;
+ }
+ }
+
+ adev->flags.match_driver = false;
+ if (handler) {
+ if (handler->detach)
+ handler->detach(adev);
+
+ adev->handler = NULL;
+ } else {
+ device_release_driver(&adev->dev);
+ }
+ /*
+ * Most likely, the device is going away, so put it into D3cold before
+ * that.
+ */
+ acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
+ adev->flags.initialized = false;
+ acpi_device_clear_enumerated(adev);
+
+ return 0;
+}
+
+static void acpi_scan_check_subtree(struct acpi_device *adev)
+{
+ acpi_scan_check_and_detach(adev, (void *)true);
+}
+
static int acpi_scan_hot_remove(struct acpi_device *device)
{
acpi_handle handle = device->handle;
@@ -289,75 +336,62 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
return 0;
}
-static int acpi_scan_device_not_enumerated(struct acpi_device *adev)
+static int acpi_scan_rescan_bus(struct acpi_device *adev)
{
- if (!acpi_device_enumerated(adev)) {
- dev_warn(&adev->dev, "Still not enumerated\n");
- return -EALREADY;
- }
- acpi_bus_trim(adev);
- return 0;
+ struct acpi_scan_handler *handler = adev->handler;
+ int ret;
+
+ if (handler && handler->hotplug.scan_dependent)
+ ret = handler->hotplug.scan_dependent(adev);
+ else
+ ret = acpi_bus_scan(adev->handle);
+
+ if (ret)
+ dev_info(&adev->dev, "Namespace scan failure\n");
+
+ return ret;
}
static int acpi_scan_device_check(struct acpi_device *adev)
{
- int error;
+ struct acpi_device *parent;
- acpi_bus_get_status(adev);
- if (acpi_device_is_present(adev)) {
- /*
- * This function is only called for device objects for which
- * matching scan handlers exist. The only situation in which
- * the scan handler is not attached to this device object yet
- * is when the device has just appeared (either it wasn't
- * present at all before or it was removed and then added
- * again).
- */
- if (adev->handler) {
- dev_warn(&adev->dev, "Already enumerated\n");
- return -EALREADY;
- }
- error = acpi_bus_scan(adev->handle);
- if (error) {
- dev_warn(&adev->dev, "Namespace scan failure\n");
- return error;
- }
- if (!adev->handler) {
- dev_warn(&adev->dev, "Enumeration failure\n");
- error = -ENODEV;
- }
- } else {
- error = acpi_scan_device_not_enumerated(adev);
- }
- return error;
-}
+ acpi_scan_check_subtree(adev);
-static int acpi_scan_bus_check(struct acpi_device *adev, void *not_used)
-{
- struct acpi_scan_handler *handler = adev->handler;
- int error;
+ if (!acpi_device_is_present(adev))
+ return 0;
- acpi_bus_get_status(adev);
- if (!acpi_device_is_present(adev)) {
- acpi_scan_device_not_enumerated(adev);
+ /*
+ * This function is only called for device objects for which matching
+ * scan handlers exist. The only situation in which the scan handler
+ * is not attached to this device object yet is when the device has
+ * just appeared (either it wasn't present at all before or it was
+ * removed and then added again).
+ */
+ if (adev->handler) {
+ dev_dbg(&adev->dev, "Already enumerated\n");
return 0;
}
- if (handler && handler->hotplug.scan_dependent)
- return handler->hotplug.scan_dependent(adev);
- error = acpi_bus_scan(adev->handle);
- if (error) {
- dev_warn(&adev->dev, "Namespace scan failure\n");
- return error;
- }
- return acpi_dev_for_each_child(adev, acpi_scan_bus_check, NULL);
+ parent = acpi_dev_parent(adev);
+ if (!parent)
+ parent = adev;
+
+ return acpi_scan_rescan_bus(parent);
+}
+
+static int acpi_scan_bus_check(struct acpi_device *adev)
+{
+ acpi_scan_check_subtree(adev);
+
+ return acpi_scan_rescan_bus(adev);
}
static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type)
{
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
- return acpi_scan_bus_check(adev, NULL);
+ return acpi_scan_bus_check(adev);
case ACPI_NOTIFY_DEVICE_CHECK:
return acpi_scan_device_check(adev);
case ACPI_NOTIFY_EJECT_REQUEST:
@@ -798,6 +832,7 @@ static const char * const acpi_honor_dep_ids[] = {
"INTC1059", /* IVSC (TGL) driver must be loaded to allow i2c access to camera sensors */
"INTC1095", /* IVSC (ADL) driver must be loaded to allow i2c access to camera sensors */
"INTC100A", /* IVSC (RPL) driver must be loaded to allow i2c access to camera sensors */
+ "INTC10CF", /* IVSC (MTL) driver must be loaded to allow i2c access to camera sensors */
NULL
};
@@ -1725,7 +1760,9 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"BSG1160", },
{"BSG2150", },
{"CSC3551", },
+ {"CSC3554", },
{"CSC3556", },
+ {"CSC3557", },
{"INT33FE", },
{"INT3515", },
/* Non-conforming _HID for Cirrus Logic already released */
@@ -1922,6 +1959,11 @@ bool acpi_device_is_present(const struct acpi_device *adev)
return adev->status.present || adev->status.functional;
}
+bool acpi_device_is_enabled(const struct acpi_device *adev)
+{
+ return adev->status.present && adev->status.enabled;
+}
+
static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
const char *idstr,
const struct acpi_device_id **matchid)
@@ -2550,32 +2592,6 @@ int acpi_bus_scan(acpi_handle handle)
}
EXPORT_SYMBOL(acpi_bus_scan);
-static int acpi_bus_trim_one(struct acpi_device *adev, void *not_used)
-{
- struct acpi_scan_handler *handler = adev->handler;
-
- acpi_dev_for_each_child_reverse(adev, acpi_bus_trim_one, NULL);
-
- adev->flags.match_driver = false;
- if (handler) {
- if (handler->detach)
- handler->detach(adev);
-
- adev->handler = NULL;
- } else {
- device_release_driver(&adev->dev);
- }
- /*
- * Most likely, the device is going away, so put it into D3cold before
- * that.
- */
- acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
- adev->flags.initialized = false;
- acpi_device_clear_enumerated(adev);
-
- return 0;
-}
-
/**
* acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects.
* @adev: Root of the ACPI namespace scope to walk.
@@ -2584,7 +2600,7 @@ static int acpi_bus_trim_one(struct acpi_device *adev, void *not_used)
*/
void acpi_bus_trim(struct acpi_device *adev)
{
- acpi_bus_trim_one(adev, NULL);
+ acpi_scan_check_and_detach(adev, NULL);
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 808484d11209..728acfeb774d 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -385,18 +385,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
},
},
- /*
- * ASUS B1400CEAE hangs on resume from suspend (see
- * https://bugzilla.kernel.org/show_bug.cgi?id=215742).
- */
- {
- .callback = init_default_s3,
- .ident = "ASUS B1400CEAE",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
- },
- },
{},
};
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 4748e8061253..302dce0b2b50 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -47,6 +47,8 @@
#define ACPI_THERMAL_TRIP_PASSIVE (-1)
+#define ACPI_THERMAL_MAX_NR_TRIPS (ACPI_THERMAL_MAX_ACTIVE + 3)
+
/*
* This exception is thrown out in two cases:
* 1.An invalid trip point becomes invalid or a valid trip point becomes invalid
@@ -112,7 +114,6 @@ struct acpi_thermal {
unsigned long polling_frequency;
volatile u8 zombie;
struct acpi_thermal_trips trips;
- struct thermal_trip *trip_table;
struct thermal_zone_device *thermal_zone;
int kelvin_offset; /* in millidegrees */
struct work_struct thermal_check_work;
@@ -451,26 +452,19 @@ fail:
return false;
}
-static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
+static void acpi_thermal_get_trip_points(struct acpi_thermal *tz)
{
- unsigned int count = 0;
int i;
- if (acpi_thermal_init_trip(tz, ACPI_THERMAL_TRIP_PASSIVE))
- count++;
+ acpi_thermal_init_trip(tz, ACPI_THERMAL_TRIP_PASSIVE);
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
- if (acpi_thermal_init_trip(tz, i))
- count++;
- else
+ if (!acpi_thermal_init_trip(tz, i))
break;
-
}
while (++i < ACPI_THERMAL_MAX_ACTIVE)
tz->trips.active[i].trip.temp_dk = THERMAL_TEMP_INVALID;
-
- return count;
}
/* sys I/F for generic thermal sysfs support */
@@ -626,7 +620,7 @@ acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
return acpi_thermal_bind_unbind_cdev(thermal, cdev, false);
}
-static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
+static const struct thermal_zone_device_ops acpi_thermal_zone_ops = {
.bind = acpi_thermal_bind_cooling_device,
.unbind = acpi_thermal_unbind_cooling_device,
.get_temp = thermal_get_temp,
@@ -662,15 +656,16 @@ static void acpi_thermal_zone_sysfs_remove(struct acpi_thermal *tz)
}
static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz,
+ const struct thermal_trip *trip_table,
unsigned int trip_count,
int passive_delay)
{
int result;
tz->thermal_zone = thermal_zone_device_register_with_trips("acpitz",
- tz->trip_table,
+ trip_table,
trip_count,
- 0, tz,
+ tz,
&acpi_thermal_zone_ops,
NULL,
passive_delay,
@@ -823,10 +818,10 @@ static void acpi_thermal_free_thermal_zone(struct acpi_thermal *tz)
static int acpi_thermal_add(struct acpi_device *device)
{
+ struct thermal_trip trip_table[ACPI_THERMAL_MAX_NR_TRIPS] = { 0 };
struct acpi_thermal_trip *acpi_trip;
struct thermal_trip *trip;
struct acpi_thermal *tz;
- unsigned int trip_count;
int crit_temp, hot_temp;
int passive_delay = 0;
int result;
@@ -848,21 +843,10 @@ static int acpi_thermal_add(struct acpi_device *device)
acpi_thermal_aml_dependency_fix(tz);
/* Get trip points [_CRT, _PSV, etc.] (required). */
- trip_count = acpi_thermal_get_trip_points(tz);
+ acpi_thermal_get_trip_points(tz);
crit_temp = acpi_thermal_get_critical_trip(tz);
- if (crit_temp != THERMAL_TEMP_INVALID)
- trip_count++;
-
hot_temp = acpi_thermal_get_hot_trip(tz);
- if (hot_temp != THERMAL_TEMP_INVALID)
- trip_count++;
-
- if (!trip_count) {
- pr_warn(FW_BUG "No valid trip points!\n");
- result = -ENODEV;
- goto free_memory;
- }
/* Get temperature [_TMP] (required). */
result = acpi_thermal_get_temperature(tz);
@@ -881,13 +865,7 @@ static int acpi_thermal_add(struct acpi_device *device)
acpi_thermal_guess_offset(tz, crit_temp);
- trip = kcalloc(trip_count, sizeof(*trip), GFP_KERNEL);
- if (!trip) {
- result = -ENOMEM;
- goto free_memory;
- }
-
- tz->trip_table = trip;
+ trip = trip_table;
if (crit_temp != THERMAL_TEMP_INVALID) {
trip->type = THERMAL_TRIP_CRITICAL;
@@ -923,9 +901,17 @@ static int acpi_thermal_add(struct acpi_device *device)
trip++;
}
- result = acpi_thermal_register_thermal_zone(tz, trip_count, passive_delay);
+ if (trip == trip_table) {
+ pr_warn(FW_BUG "No valid trip points!\n");
+ result = -ENODEV;
+ goto free_memory;
+ }
+
+ result = acpi_thermal_register_thermal_zone(tz, trip_table,
+ trip - trip_table,
+ passive_delay);
if (result)
- goto free_trips;
+ goto free_memory;
refcount_set(&tz->thermal_check_count, 3);
mutex_init(&tz->thermal_check_lock);
@@ -944,8 +930,6 @@ static int acpi_thermal_add(struct acpi_device *device)
flush_wq:
flush_workqueue(acpi_thermal_pm_queue);
acpi_thermal_unregister_thermal_zone(tz);
-free_trips:
- kfree(tz->trip_table);
free_memory:
acpi_thermal_free_thermal_zone(tz);
@@ -966,7 +950,6 @@ static void acpi_thermal_remove(struct acpi_device *device)
flush_workqueue(acpi_thermal_pm_queue);
acpi_thermal_unregister_thermal_zone(tz);
- kfree(tz->trip_table);
acpi_thermal_free_thermal_zone(tz);
}
diff --git a/drivers/acpi/thermal_lib.c b/drivers/acpi/thermal_lib.c
index 4e0519ca9739..6214d6ebe1fa 100644
--- a/drivers/acpi/thermal_lib.c
+++ b/drivers/acpi/thermal_lib.c
@@ -100,7 +100,7 @@ static int thermal_temp(int error, int temp_decik, int *ret_temp)
*/
int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp)
{
- int temp_decik;
+ int temp_decik = 0;
int ret = acpi_active_trip_temp(adev, id, &temp_decik);
return thermal_temp(ret, temp_decik, ret_temp);
@@ -119,7 +119,7 @@ EXPORT_SYMBOL_GPL(thermal_acpi_active_trip_temp);
*/
int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp)
{
- int temp_decik;
+ int temp_decik = 0;
int ret = acpi_passive_trip_temp(adev, &temp_decik);
return thermal_temp(ret, temp_decik, ret_temp);
@@ -139,7 +139,7 @@ EXPORT_SYMBOL_GPL(thermal_acpi_passive_trip_temp);
*/
int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp)
{
- int temp_decik;
+ int temp_decik = 0;
int ret = acpi_hot_trip_temp(adev, &temp_decik);
return thermal_temp(ret, temp_decik, ret_temp);
@@ -158,7 +158,7 @@ EXPORT_SYMBOL_GPL(thermal_acpi_hot_trip_temp);
*/
int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp)
{
- int temp_decik;
+ int temp_decik = 0;
int ret = acpi_critical_trip_temp(adev, &temp_decik);
return thermal_temp(ret, temp_decik, ret_temp);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index abac5cc25477..202234ba54bd 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -559,7 +559,7 @@ EXPORT_SYMBOL(acpi_evaluate_ost);
*
* Caller must free the returned buffer
*/
-static char *acpi_handle_path(acpi_handle handle)
+char *acpi_handle_path(acpi_handle handle)
{
struct acpi_buffer buffer = {
.length = ACPI_ALLOCATE_BUFFER,
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 7d64e655f1b8..cd84af23f7ea 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -488,7 +488,21 @@ static int lps0_device_attach(struct acpi_device *adev,
rev_id = 1;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
- lps0_dsm_func_mask_microsoft = -EINVAL;
+ if (lps0_dsm_func_mask > 0 && lps0_dsm_func_mask_microsoft > 0) {
+ unsigned int func_mask;
+
+ /*
+ * Avoid evaluating the same _DSM function for two
+ * different UUIDs and prioritize the MSFT one.
+ */
+ func_mask = lps0_dsm_func_mask & lps0_dsm_func_mask_microsoft;
+ if (func_mask) {
+ acpi_handle_info(adev->handle,
+ "Duplicate LPS0 _DSM functions (mask: 0x%x)\n",
+ func_mask);
+ lps0_dsm_func_mask &= ~func_mask;
+ }
+ }
}
if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
@@ -549,19 +563,22 @@ int acpi_s2idle_prepare_late(void)
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
/* LPS0 entry */
- if (lps0_dsm_func_mask > 0)
- acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
- ACPI_LPS0_ENTRY_AMD :
- ACPI_LPS0_ENTRY,
+ if (lps0_dsm_func_mask > 0 && acpi_s2idle_vendor_amd())
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
lps0_dsm_func_mask, lps0_dsm_guid);
+
if (lps0_dsm_func_mask_microsoft > 0) {
- /* modern standby entry */
+ /* Modern Standby entry */
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
}
+ if (lps0_dsm_func_mask > 0 && !acpi_s2idle_vendor_amd())
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+
list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) {
if (handler->prepare)
handler->prepare();
@@ -600,14 +617,14 @@ void acpi_s2idle_restore_early(void)
ACPI_LPS0_EXIT_AMD :
ACPI_LPS0_EXIT,
lps0_dsm_func_mask, lps0_dsm_guid);
- if (lps0_dsm_func_mask_microsoft > 0)
+
+ if (lps0_dsm_func_mask_microsoft > 0) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-
- /* Modern standby exit */
- if (lps0_dsm_func_mask_microsoft > 0)
+ /* Modern Standby exit */
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ }
/* Screen on */
if (lps0_dsm_func_mask_microsoft > 0)
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index bc65ebfcdf76..90c3d2eab9e9 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -428,7 +428,7 @@ bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
}
EXPORT_SYMBOL_GPL(acpi_quirk_skip_i2c_client_enumeration);
-int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
{
struct acpi_device *adev = ACPI_COMPANION(controller_parent);
const struct dmi_system_id *dmi_id;
@@ -436,8 +436,6 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
u64 uid;
int ret;
- *skip = false;
-
ret = acpi_dev_uid_to_integer(adev, &uid);
if (ret)
return 0;
@@ -463,7 +461,6 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
return 0;
}
-EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
bool acpi_quirk_skip_gpio_event_handlers(void)
{
@@ -478,8 +475,41 @@ bool acpi_quirk_skip_gpio_event_handlers(void)
return (quirks & ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS);
}
EXPORT_SYMBOL_GPL(acpi_quirk_skip_gpio_event_handlers);
+#else
+static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+{
+ return 0;
+}
#endif
+int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+{
+ struct acpi_device *adev = ACPI_COMPANION(controller_parent);
+
+ *skip = false;
+
+ /*
+ * The DELL0501 ACPI HID represents an UART (CID is set to PNP0501) with
+ * a backlight-controller attached. There is no separate ACPI device with
+ * an UartSerialBusV2() resource to model the backlight-controller.
+ * Set skip to true so that the tty core creates a serdev ctrl device.
+ * The backlight driver will manually create the serdev client device.
+ */
+ if (acpi_dev_hid_match(adev, "DELL0501")) {
+ *skip = true;
+ /*
+ * Create a platform dev for dell-uart-backlight to bind to.
+ * This is a static device, so no need to store the result.
+ */
+ platform_device_register_simple("dell-uart-backlight", PLATFORM_DEVID_NONE,
+ NULL, 0);
+ return 0;
+ }
+
+ return acpi_dmi_skip_serdev_enumeration(controller_parent, skip);
+}
+EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
+
/* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
static const struct {
const char *hid;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 42b51c9812a0..928ec93c6b45 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -116,15 +116,14 @@ config SATA_AHCI
If unsure, say N.
config SATA_MOBILE_LPM_POLICY
- int "Default SATA Link Power Management policy for low power chipsets"
+ int "Default SATA Link Power Management policy"
range 0 4
default 0
depends on SATA_AHCI
help
Select the Default SATA Link Power Management (LPM) policy to use
for chipsets / "South Bridges" supporting low-power modes. Such
- chipsets are typically found on most laptops but desktops and
- servers now also widely use chipsets supporting low power modes.
+ chipsets are ubiquitous across laptops, desktops and servers.
The value set has the following meanings:
0 => Keep firmware settings
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index da2e74fce2d9..78570684ff68 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -50,11 +50,18 @@ enum board_ids {
board_ahci,
board_ahci_43bit_dma,
board_ahci_ign_iferr,
- board_ahci_low_power,
board_ahci_no_debounce_delay,
- board_ahci_nomsi,
- board_ahci_noncq,
- board_ahci_nosntf,
+ board_ahci_no_msi,
+ /*
+ * board_ahci_pcs_quirk is for legacy Intel platforms.
+ * Modern Intel platforms should use board_ahci instead.
+ * (Some modern Intel platforms might have been added with
+ * board_ahci_pcs_quirk, however, we cannot change them to board_ahci
+ * without testing that the platform actually works without the quirk.)
+ */
+ board_ahci_pcs_quirk,
+ board_ahci_pcs_quirk_no_devslp,
+ board_ahci_pcs_quirk_no_sntf,
board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
@@ -68,12 +75,6 @@ enum board_ids {
board_ahci_sb700, /* for SB700 and SB800 */
board_ahci_vt8251,
- /*
- * board IDs for Intel chipsets that support more than 6 ports
- * *and* end up needing the PCS quirk.
- */
- board_ahci_pcs7,
-
/* aliases */
board_ahci_mcp_linux = board_ahci_mcp65,
board_ahci_mcp67 = board_ahci_mcp65,
@@ -143,36 +144,38 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_low_power] = {
- AHCI_HFLAGS (AHCI_HFLAG_USE_LPM_POLICY),
+ [board_ahci_no_debounce_delay] = {
.flags = AHCI_FLAG_COMMON,
+ .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_no_debounce_delay] = {
+ [board_ahci_no_msi] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
.flags = AHCI_FLAG_COMMON,
- .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_nomsi] = {
- AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
+ [board_ahci_pcs_quirk] = {
+ AHCI_HFLAGS (AHCI_HFLAG_INTEL_PCS_QUIRK),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_noncq] = {
- AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
+ [board_ahci_pcs_quirk_no_devslp] = {
+ AHCI_HFLAGS (AHCI_HFLAG_INTEL_PCS_QUIRK |
+ AHCI_HFLAG_NO_DEVSLP),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_nosntf] = {
- AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
+ [board_ahci_pcs_quirk_no_sntf] = {
+ AHCI_HFLAGS (AHCI_HFLAG_INTEL_PCS_QUIRK |
+ AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -194,6 +197,7 @@ static const struct ata_port_info ahci_port_info[] = {
.port_ops = &ahci_ops,
},
[board_ahci_avn] = {
+ AHCI_HFLAGS (AHCI_HFLAG_INTEL_PCS_QUIRK),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -252,119 +256,113 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_vt8251_ops,
},
- [board_ahci_pcs7] = {
- .flags = AHCI_FLAG_COMMON,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
- },
};
static const struct pci_device_id ahci_pci_tbl[] = {
/* Intel */
- { PCI_VDEVICE(INTEL, 0x06d6), board_ahci }, /* Comet Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
- { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
- { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
- { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
- { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
+ { PCI_VDEVICE(INTEL, 0x06d6), board_ahci_pcs_quirk }, /* Comet Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x2652), board_ahci_pcs_quirk }, /* ICH6 */
+ { PCI_VDEVICE(INTEL, 0x2653), board_ahci_pcs_quirk }, /* ICH6M */
+ { PCI_VDEVICE(INTEL, 0x27c1), board_ahci_pcs_quirk }, /* ICH7 */
+ { PCI_VDEVICE(INTEL, 0x27c5), board_ahci_pcs_quirk }, /* ICH7M */
+ { PCI_VDEVICE(INTEL, 0x27c3), board_ahci_pcs_quirk }, /* ICH7R */
{ PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
- { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
- { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
- { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
- { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
- { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
- { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8/Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
- { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
- { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
- { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2929), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292a), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292b), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292c), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292f), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x294e), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
- { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
- { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
- { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
- { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
- { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
- { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
- { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_low_power }, /* PCH M AHCI */
- { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_low_power }, /* PCH M RAID */
- { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
- { PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
- { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_low_power }, /* CPT M AHCI */
- { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_low_power }, /* CPT M RAID */
- { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
- { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
- { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
- { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
- { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
- { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_low_power }, /* Panther M AHCI */
- { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_low_power }, /* Panther M RAID */
- { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
- { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_low_power }, /* Lynx M AHCI */
- { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_low_power }, /* Lynx M RAID */
- { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_low_power }, /* Lynx M RAID */
- { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_low_power }, /* Lynx M RAID */
- { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_low_power }, /* Lynx LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_low_power }, /* Lynx LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_low_power }, /* Cannon Lake PCH-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
- { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
- { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x2681), board_ahci_pcs_quirk }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x2682), board_ahci_pcs_quirk }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x2683), board_ahci_pcs_quirk }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x27c6), board_ahci_pcs_quirk }, /* ICH7-M DH */
+ { PCI_VDEVICE(INTEL, 0x2821), board_ahci_pcs_quirk }, /* ICH8 */
+ { PCI_VDEVICE(INTEL, 0x2822), board_ahci_pcs_quirk_no_sntf }, /* ICH8/Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2824), board_ahci_pcs_quirk }, /* ICH8 */
+ { PCI_VDEVICE(INTEL, 0x2829), board_ahci_pcs_quirk }, /* ICH8M */
+ { PCI_VDEVICE(INTEL, 0x282a), board_ahci_pcs_quirk }, /* ICH8M */
+ { PCI_VDEVICE(INTEL, 0x2922), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2923), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2924), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2925), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2927), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2929), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292a), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292b), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292c), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292f), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x294d), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x294e), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x502a), board_ahci_pcs_quirk }, /* Tolapai */
+ { PCI_VDEVICE(INTEL, 0x502b), board_ahci_pcs_quirk }, /* Tolapai */
+ { PCI_VDEVICE(INTEL, 0x3a05), board_ahci_pcs_quirk }, /* ICH10 */
+ { PCI_VDEVICE(INTEL, 0x3a22), board_ahci_pcs_quirk }, /* ICH10 */
+ { PCI_VDEVICE(INTEL, 0x3a25), board_ahci_pcs_quirk }, /* ICH10 */
+ { PCI_VDEVICE(INTEL, 0x3b22), board_ahci_pcs_quirk }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b23), board_ahci_pcs_quirk }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci_pcs_quirk }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b25), board_ahci_pcs_quirk }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_pcs_quirk }, /* PCH M AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci_pcs_quirk }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_pcs_quirk }, /* PCH M RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci_pcs_quirk }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci_pcs_quirk }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_pcs_quirk }, /* CPT M AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci_pcs_quirk }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_pcs_quirk }, /* CPT M RAID */
+ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci_pcs_quirk }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci_pcs_quirk }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1d02), board_ahci_pcs_quirk }, /* PBG AHCI */
+ { PCI_VDEVICE(INTEL, 0x1d04), board_ahci_pcs_quirk }, /* PBG RAID */
+ { PCI_VDEVICE(INTEL, 0x1d06), board_ahci_pcs_quirk }, /* PBG RAID */
+ { PCI_VDEVICE(INTEL, 0x2323), board_ahci_pcs_quirk }, /* DH89xxCC AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e02), board_ahci_pcs_quirk }, /* Panther Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_pcs_quirk }, /* Panther M AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e04), board_ahci_pcs_quirk }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e05), board_ahci_pcs_quirk }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e06), board_ahci_pcs_quirk }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_pcs_quirk }, /* Panther M RAID */
+ { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci_pcs_quirk }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c02), board_ahci_pcs_quirk }, /* Lynx Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_pcs_quirk }, /* Lynx M AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c04), board_ahci_pcs_quirk }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_pcs_quirk }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c06), board_ahci_pcs_quirk }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_pcs_quirk }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci_pcs_quirk }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_pcs_quirk }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_pcs_quirk }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_pcs_quirk }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_pcs_quirk }, /* Cannon Lake PCH-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f22), board_ahci_pcs_quirk }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f23), board_ahci_pcs_quirk }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f24), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f25), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f26), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f27), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci_pcs_quirk }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
@@ -373,65 +371,65 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg/Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* *burg SATA0 'RAID' */
- { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* *burg SATA1 'RAID' */
- { PCI_VDEVICE(INTEL, 0x282f), board_ahci }, /* *burg SATA2 'RAID' */
- { PCI_VDEVICE(INTEL, 0x43d4), board_ahci }, /* Rocket Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x43d5), board_ahci }, /* Rocket Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x43d6), board_ahci }, /* Rocket Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x43d7), board_ahci }, /* Rocket Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
- { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
- { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
- { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_low_power }, /* Wildcat LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_low_power }, /* Wildcat LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_low_power }, /* Wildcat LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_low_power }, /* Wildcat LP RAID */
- { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
- { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_low_power }, /* 9 Series M AHCI */
- { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_low_power }, /* 9 Series M RAID */
- { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_low_power }, /* 9 Series M RAID */
- { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_low_power }, /* 9 Series M RAID */
- { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_low_power }, /* Sunrise LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_low_power }, /* Sunrise LP RAID */
- { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_low_power }, /* Sunrise LP RAID */
- { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci_low_power }, /* Sunrise M AHCI */
- { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa107), board_ahci_low_power }, /* Sunrise M RAID */
- { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */
- { PCI_VDEVICE(INTEL, 0xa386), board_ahci }, /* Comet Lake PCH-V RAID */
- { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_low_power }, /* Bay Trail AHCI */
- { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_low_power }, /* Bay Trail AHCI */
- { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_low_power }, /* Cherry Tr. AHCI */
- { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_low_power }, /* ApolloLake AHCI */
- { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
- { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
- { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x2823), board_ahci_pcs_quirk }, /* Wellsburg/Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0x2826), board_ahci_pcs_quirk }, /* *burg SATA0 'RAID' */
+ { PCI_VDEVICE(INTEL, 0x2827), board_ahci_pcs_quirk }, /* *burg SATA1 'RAID' */
+ { PCI_VDEVICE(INTEL, 0x282f), board_ahci_pcs_quirk }, /* *burg SATA2 'RAID' */
+ { PCI_VDEVICE(INTEL, 0x43d4), board_ahci_pcs_quirk }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x43d5), board_ahci_pcs_quirk }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x43d6), board_ahci_pcs_quirk }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x43d7), board_ahci_pcs_quirk }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci_pcs_quirk }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d04), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d06), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d62), board_ahci_pcs_quirk }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d64), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x23a3), board_ahci_pcs_quirk }, /* Coleto Creek AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_pcs_quirk }, /* Wildcat LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_pcs_quirk }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_pcs_quirk }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_pcs_quirk }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci_pcs_quirk }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_pcs_quirk }, /* 9 Series M AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci_pcs_quirk }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_pcs_quirk }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci_pcs_quirk }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_pcs_quirk }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci_pcs_quirk }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_pcs_quirk }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_pcs_quirk }, /* Sunrise LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_pcs_quirk }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_pcs_quirk }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0xa102), board_ahci_pcs_quirk }, /* Sunrise Point-H AHCI */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci_pcs_quirk }, /* Sunrise M AHCI */
+ { PCI_VDEVICE(INTEL, 0xa105), board_ahci_pcs_quirk }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa106), board_ahci_pcs_quirk }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci_pcs_quirk }, /* Sunrise M RAID */
+ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci_pcs_quirk }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa182), board_ahci_pcs_quirk }, /* Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0xa186), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa202), board_ahci_pcs_quirk }, /* Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0xa206), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa252), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa256), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa356), board_ahci_pcs_quirk }, /* Cannon Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x06d7), board_ahci_pcs_quirk }, /* Comet Lake-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa386), board_ahci_pcs_quirk }, /* Comet Lake PCH-V RAID */
+ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_pcs_quirk }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_pcs_quirk_no_devslp }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_pcs_quirk }, /* Cherry Tr. AHCI */
+ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_pcs_quirk }, /* ApolloLake AHCI */
+ { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_pcs_quirk }, /* Ice Lake LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_pcs_quirk }, /* Comet Lake PCH-U AHCI */
+ { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_pcs_quirk }, /* Comet Lake PCH RAID */
/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
- { PCI_VDEVICE(INTEL, 0x4b63), board_ahci_low_power }, /* Elkhart Lake AHCI */
- { PCI_VDEVICE(INTEL, 0x7ae2), board_ahci_low_power }, /* Alder Lake-P AHCI */
+ { PCI_VDEVICE(INTEL, 0x4b63), board_ahci_pcs_quirk }, /* Elkhart Lake AHCI */
+ { PCI_VDEVICE(INTEL, 0x7ae2), board_ahci_pcs_quirk }, /* Alder Lake-P AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -459,14 +457,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
{ PCI_VDEVICE(AMD, 0x7801), board_ahci_no_debounce_delay }, /* AMD Hudson-2 (AHCI mode) */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
- { PCI_VDEVICE(AMD, 0x7901), board_ahci_low_power }, /* AMD Green Sardine */
+ { PCI_VDEVICE(AMD, 0x7901), board_ahci }, /* AMD Green Sardine */
/* AMD is using RAID class only for ahci controllers */
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
/* Dell S140/S150 */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_SUBVENDOR_ID_DELL, PCI_ANY_ID,
- PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
+ PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci_pcs_quirk },
/* VIA */
{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
@@ -623,8 +621,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
* Samsung SSDs found on some macbooks. NCQ times out if MSI is
* enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
*/
- { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
- { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
+ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_no_msi },
+ { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_no_msi },
/* Enmotus */
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
@@ -671,9 +669,17 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
- if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
- dev_info(&pdev->dev, "ASM1166 has only six ports\n");
- hpriv->saved_port_map = 0x3f;
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA) {
+ switch (pdev->device) {
+ case 0x1166:
+ dev_info(&pdev->dev, "ASM1166 has only six ports\n");
+ hpriv->saved_port_map = 0x3f;
+ break;
+ case 0x1064:
+ dev_info(&pdev->dev, "ASM1064 has only four ports\n");
+ hpriv->saved_port_map = 0xf;
+ break;
+ }
}
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
@@ -1423,17 +1429,6 @@ static bool ahci_broken_online(struct pci_dev *pdev)
return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
}
-static bool ahci_broken_devslp(struct pci_dev *pdev)
-{
- /* device with broken DEVSLP but still showing SDS capability */
- static const struct pci_device_id ids[] = {
- { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
- {}
- };
-
- return pci_match_id(ids, pdev);
-}
-
#ifdef CONFIG_ATA_ACPI
static void ahci_gtf_filter_workaround(struct ata_host *host)
{
@@ -1642,14 +1637,31 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
}
-static void ahci_update_initial_lpm_policy(struct ata_port *ap,
- struct ahci_host_priv *hpriv)
+static void ahci_mark_external_port(struct ata_port *ap)
{
- int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+ /* mark external ports (hotplug-capable, eSATA) */
+ tmp = readl(port_mmio + PORT_CMD);
+ if (((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)) ||
+ (tmp & PORT_CMD_HPCP))
+ ap->pflags |= ATA_PFLAG_EXTERNAL;
+}
+
+static void ahci_update_initial_lpm_policy(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
- /* Ignore processing for chipsets that don't use policy */
- if (!(hpriv->flags & AHCI_HFLAG_USE_LPM_POLICY))
+ /*
+ * AHCI contains a known incompatibility between LPM and hot-plug
+ * removal events, see 7.3.1 Hot Plug Removal Detection and Power
+ * Management Interaction in AHCI 1.3.1. Therefore, do not enable
+ * LPM if the port advertises itself as an external port.
+ */
+ if (ap->pflags & ATA_PFLAG_EXTERNAL)
return;
/* user modified policy via module param */
@@ -1672,17 +1684,9 @@ update_policy:
static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
{
- const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev);
u16 tmp16;
- /*
- * Only apply the 6-port PCS quirk for known legacy platforms.
- */
- if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
- return;
-
- /* Skip applying the quirk on Denverton and beyond */
- if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
+ if (!(hpriv->flags & AHCI_HFLAG_INTEL_PCS_QUIRK))
return;
/*
@@ -1817,10 +1821,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
&dev_attr_remapped_nvme.attr,
NULL);
- /* must set flag prior to save config in order to take effect */
- if (ahci_broken_devslp(pdev))
- hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
-
#ifdef CONFIG_ARM64
if (pdev->vendor == PCI_VENDOR_ID_HUAWEI &&
pdev->device == 0xa235 &&
@@ -1934,7 +1934,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = hpriv->em_msg_type;
- ahci_update_initial_lpm_policy(ap, hpriv);
+ ahci_mark_external_port(ap);
+
+ ahci_update_initial_lpm_policy(ap);
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index df8f8a1a3a34..344c87210d8f 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -241,13 +241,11 @@ enum {
AHCI_HFLAG_YES_ALPM = BIT(23), /* force ALPM cap on */
AHCI_HFLAG_NO_WRITE_TO_RO = BIT(24), /* don't write to read
only registers */
- AHCI_HFLAG_USE_LPM_POLICY = BIT(25), /* chipset that should use
- SATA_MOBILE_LPM_POLICY
- as default lpm_policy */
- AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during
+ AHCI_HFLAG_SUSPEND_PHYS = BIT(25), /* handle PHYs during
suspend/resume */
- AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */
- AHCI_HFLAG_43BIT_ONLY = BIT(29), /* 43bit DMA addr limit */
+ AHCI_HFLAG_NO_SXS = BIT(26), /* SXS not supported */
+ AHCI_HFLAG_43BIT_ONLY = BIT(27), /* 43bit DMA addr limit */
+ AHCI_HFLAG_INTEL_PCS_QUIRK = BIT(28), /* apply Intel PCS quirk */
/* ap->flags bits */
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 64f7f7d6ba84..11a2c199a7c2 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -88,7 +88,6 @@ struct ceva_ahci_priv {
u32 axicc;
bool is_cci_enabled;
int flags;
- struct reset_control *rst;
};
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
@@ -189,6 +188,60 @@ static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
+static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
+{
+ int rc, i;
+
+ rc = ahci_platform_enable_regulators(hpriv);
+ if (rc)
+ return rc;
+
+ rc = ahci_platform_enable_clks(hpriv);
+ if (rc)
+ goto disable_regulator;
+
+ /* Assert the controller reset */
+ rc = ahci_platform_assert_rsts(hpriv);
+ if (rc)
+ goto disable_clks;
+
+ for (i = 0; i < hpriv->nports; i++) {
+ rc = phy_init(hpriv->phys[i]);
+ if (rc)
+ goto disable_rsts;
+ }
+
+ /* De-assert the controller reset */
+ ahci_platform_deassert_rsts(hpriv);
+
+ for (i = 0; i < hpriv->nports; i++) {
+ rc = phy_power_on(hpriv->phys[i]);
+ if (rc) {
+ phy_exit(hpriv->phys[i]);
+ goto disable_phys;
+ }
+ }
+
+ return 0;
+
+disable_rsts:
+ ahci_platform_deassert_rsts(hpriv);
+
+disable_phys:
+ while (--i >= 0) {
+ phy_power_off(hpriv->phys[i]);
+ phy_exit(hpriv->phys[i]);
+ }
+
+disable_clks:
+ ahci_platform_disable_clks(hpriv);
+
+disable_regulator:
+ ahci_platform_disable_regulators(hpriv);
+
+ return rc;
+}
+
static int ceva_ahci_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -203,47 +256,19 @@ static int ceva_ahci_probe(struct platform_device *pdev)
return -ENOMEM;
cevapriv->ahci_pdev = pdev;
-
- cevapriv->rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
- NULL);
- if (IS_ERR(cevapriv->rst))
- dev_err_probe(&pdev->dev, PTR_ERR(cevapriv->rst),
- "failed to get reset\n");
-
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
- if (!cevapriv->rst) {
- rc = ahci_platform_enable_resources(hpriv);
- if (rc)
- return rc;
- } else {
- int i;
+ hpriv->rsts = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ NULL);
+ if (IS_ERR(hpriv->rsts))
+ return dev_err_probe(&pdev->dev, PTR_ERR(hpriv->rsts),
+ "failed to get reset\n");
- rc = ahci_platform_enable_clks(hpriv);
- if (rc)
- return rc;
- /* Assert the controller reset */
- reset_control_assert(cevapriv->rst);
-
- for (i = 0; i < hpriv->nports; i++) {
- rc = phy_init(hpriv->phys[i]);
- if (rc)
- return rc;
- }
-
- /* De-assert the controller reset */
- reset_control_deassert(cevapriv->rst);
-
- for (i = 0; i < hpriv->nports; i++) {
- rc = phy_power_on(hpriv->phys[i]);
- if (rc) {
- phy_exit(hpriv->phys[i]);
- return rc;
- }
- }
- }
+ rc = ceva_ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
if (of_property_read_bool(np, "ceva,broken-gen2"))
cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
@@ -252,52 +277,60 @@ static int ceva_ahci_probe(struct platform_device *pdev)
if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
(u8 *)&cevapriv->pp2c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
(u8 *)&cevapriv->pp2c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/* Read OOB timing value for COMWAKE from device-tree*/
if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
(u8 *)&cevapriv->pp3c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
(u8 *)&cevapriv->pp3c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/* Read phy BURST timing value from device-tree */
if (of_property_read_u8_array(np, "ceva,p0-burst-params",
(u8 *)&cevapriv->pp4c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-burst-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-burst-params",
(u8 *)&cevapriv->pp4c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-burst-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/* Read phy RETRY interval timing value from device-tree */
if (of_property_read_u16_array(np, "ceva,p0-retry-params",
(u16 *)&cevapriv->pp5c[0], 2) < 0) {
dev_warn(dev, "ceva,p0-retry-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u16_array(np, "ceva,p1-retry-params",
(u16 *)&cevapriv->pp5c[1], 2) < 0) {
dev_warn(dev, "ceva,p1-retry-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/*
@@ -335,7 +368,7 @@ static int __maybe_unused ceva_ahci_resume(struct device *dev)
struct ahci_host_priv *hpriv = host->private_data;
int rc;
- rc = ahci_platform_enable_resources(hpriv);
+ rc = ceva_ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 1a63200ea437..83431aae74d8 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1280,10 +1280,8 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
int port_no, void __iomem *mmio,
void __iomem *port_mmio)
{
- struct ahci_host_priv *hpriv = ap->host->private_data;
const char *emsg = NULL;
int rc;
- u32 tmp;
/* make sure port is not active */
rc = ahci_deinit_port(ap, &emsg);
@@ -1291,11 +1289,6 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
dev_warn(dev, "%s (%d)\n", emsg, rc);
ahci_port_clear_pending_irq(ap);
-
- /* mark esata ports */
- tmp = readl(port_mmio + PORT_CMD);
- if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
- ap->pflags |= ATA_PFLAG_EXTERNAL;
}
void ahci_init_controller(struct ata_host *host)
@@ -2627,8 +2620,8 @@ void ahci_print_info(struct ata_host *host, const char *scc_s)
speed_s = "?";
dev_info(host->dev,
- "AHCI %02x%02x.%02x%02x "
- "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
+ "AHCI vers %02x%02x.%02x%02x, "
+ "%u command slots, %s Gbps, %s mode\n"
,
(vers >> 24) & 0xff,
@@ -2637,12 +2630,18 @@ void ahci_print_info(struct ata_host *host, const char *scc_s)
vers & 0xff,
((cap >> 8) & 0x1f) + 1,
- (cap & 0x1f) + 1,
speed_s,
- impl,
scc_s);
dev_info(host->dev,
+ "%u/%u ports implemented (port mask 0x%x)\n"
+ ,
+
+ hweight32(impl),
+ (cap & 0x1f) + 1,
+ impl);
+
+ dev_info(host->dev,
"flags: "
"%s%s%s%s%s%s%s"
"%s%s%s%s%s%s%s"
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 09ed67772fae..be3412cdb22e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2001,6 +2001,33 @@ bool ata_dev_power_init_tf(struct ata_device *dev, struct ata_taskfile *tf,
return true;
}
+static bool ata_dev_power_is_active(struct ata_device *dev)
+{
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ ata_tf_init(dev, &tf);
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.command = ATA_CMD_CHK_POWER;
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask) {
+ ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
+ err_mask);
+ /*
+ * Assume we are in standby mode so that we always force a
+ * spinup in ata_dev_power_set_active().
+ */
+ return false;
+ }
+
+ ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
+
+ /* Active or idle */
+ return tf.nsect == 0xff;
+}
+
/**
* ata_dev_power_set_standby - Set a device power mode to standby
* @dev: target device
@@ -2017,6 +2044,11 @@ void ata_dev_power_set_standby(struct ata_device *dev)
struct ata_taskfile tf;
unsigned int err_mask;
+ /* If the device is already sleeping or in standby, do nothing. */
+ if ((dev->flags & ATA_DFLAG_SLEEPING) ||
+ !ata_dev_power_is_active(dev))
+ return;
+
/*
* Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
* causing some drives to spin up and down again. For these, do nothing
@@ -2042,33 +2074,6 @@ void ata_dev_power_set_standby(struct ata_device *dev)
err_mask);
}
-static bool ata_dev_power_is_active(struct ata_device *dev)
-{
- struct ata_taskfile tf;
- unsigned int err_mask;
-
- ata_tf_init(dev, &tf);
- tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
- tf.protocol = ATA_PROT_NODATA;
- tf.command = ATA_CMD_CHK_POWER;
-
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
- if (err_mask) {
- ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
- err_mask);
- /*
- * Assume we are in standby mode so that we always force a
- * spinup in ata_dev_power_set_active().
- */
- return false;
- }
-
- ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
-
- /* Active or idle */
- return tf.nsect == 0xff;
-}
-
/**
* ata_dev_power_set_active - Set a device power mode to active
* @dev: target device
diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c
index a7adfdcb5e27..9a2cb9ca9d1d 100644
--- a/drivers/ata/pata_parport/pata_parport.c
+++ b/drivers/ata/pata_parport/pata_parport.c
@@ -464,7 +464,7 @@ static void pata_parport_bus_release(struct device *dev)
/* nothing to do here but required to avoid warning on device removal */
}
-static struct bus_type pata_parport_bus_type = {
+static const struct bus_type pata_parport_bus_type = {
.name = DRV_NAME,
};
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 50d8ce20ae5b..9fb1575f8d88 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2550,14 +2550,12 @@ static int fore200e_sba_probe(struct platform_device *op)
return 0;
}
-static int fore200e_sba_remove(struct platform_device *op)
+static void fore200e_sba_remove(struct platform_device *op)
{
struct fore200e *fore200e = dev_get_drvdata(&op->dev);
fore200e_shutdown(fore200e);
kfree(fore200e);
-
- return 0;
}
static const struct of_device_id fore200e_sba_match[] = {
@@ -2574,7 +2572,7 @@ static struct platform_driver fore200e_sba_driver = {
.of_match_table = fore200e_sba_match,
},
.probe = fore200e_sba_probe,
- .remove = fore200e_sba_remove,
+ .remove_new = fore200e_sba_remove,
};
#endif
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index d944d5298eca..151d95f96b11 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -177,6 +177,20 @@ config HT16K33
Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
LED controller driver with keyscan.
+config MAX6959
+ tristate "Maxim MAX6958/6959 7-segment LED controller"
+ depends on I2C
+ select REGMAP_I2C
+ select LINEDISP
+ help
+ If you say yes here you get support for the following Maxim chips
+ (I2C 7-segment LED display controller):
+ - MAX6958
+ - MAX6959 (input support)
+
+ This driver can also be built as a module. If so, the module
+ will be called max6959.
+
config LCD2S
tristate "lcd2s 20x4 character display over I2C console"
depends on I2C
@@ -197,6 +211,17 @@ config ARM_CHARLCD
line and the Linux version on the second line, but that's
still useful.
+config SEG_LED_GPIO
+ tristate "Generic 7-segment LED display"
+ depends on GPIOLIB || COMPILE_TEST
+ select LINEDISP
+ help
+ This driver supports a generic 7-segment LED display made up
+ of GPIO pins connected to the individual segments.
+
+ This driver can also be built as a module. If so, the module
+ will be called seg-led-gpio.
+
menuconfig PARPORT_PANEL
tristate "Parallel port LCD/Keypad Panel support"
depends on PARPORT
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 6968ed4d3f0a..4a8ea41b0550 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -14,3 +14,5 @@ obj-$(CONFIG_HT16K33) += ht16k33.o
obj-$(CONFIG_PARPORT_PANEL) += panel.o
obj-$(CONFIG_LCD2S) += lcd2s.o
obj-$(CONFIG_LINEDISP) += line-display.o
+obj-$(CONFIG_MAX6959) += max6959.o
+obj-$(CONFIG_SEG_LED_GPIO) += seg-led-gpio.o
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 5ba19c339f08..2b74dabe7e17 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -96,7 +96,7 @@ none:
return ret;
}
-static int cfag12864bfb_remove(struct platform_device *device)
+static void cfag12864bfb_remove(struct platform_device *device)
{
struct fb_info *info = platform_get_drvdata(device);
@@ -104,13 +104,11 @@ static int cfag12864bfb_remove(struct platform_device *device)
unregister_framebuffer(info);
framebuffer_release(info);
}
-
- return 0;
}
static struct platform_driver cfag12864bfb_driver = {
.probe = cfag12864bfb_probe,
- .remove = cfag12864bfb_remove,
+ .remove_new = cfag12864bfb_remove,
.driver = {
.name = CFAG12864BFB_NAME,
},
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index d56a5d508ccd..7ac0b1b1d548 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -319,7 +319,7 @@ fail1:
return ret;
}
-static int hd44780_remove(struct platform_device *pdev)
+static void hd44780_remove(struct platform_device *pdev)
{
struct charlcd *lcd = platform_get_drvdata(pdev);
struct hd44780_common *hdc = lcd->drvdata;
@@ -329,7 +329,6 @@ static int hd44780_remove(struct platform_device *pdev)
kfree(lcd->drvdata);
kfree(lcd);
- return 0;
}
static const struct of_device_id hd44780_of_match[] = {
@@ -340,7 +339,7 @@ MODULE_DEVICE_TABLE(of, hd44780_of_match);
static struct platform_driver hd44780_driver = {
.probe = hd44780_probe,
- .remove = hd44780_remove,
+ .remove_new = hd44780_remove,
.driver = {
.name = "hd44780",
.of_match_table = hd44780_of_match,
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index a90430b7d07b..96acfb2b58cd 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -15,6 +15,7 @@
#include <linux/property.h>
#include <linux/fb.h>
#include <linux/backlight.h>
+#include <linux/container_of.h>
#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/leds.h>
@@ -85,16 +86,6 @@ struct ht16k33_fbdev {
uint8_t *cache;
};
-struct ht16k33_seg {
- struct linedisp linedisp;
- union {
- struct seg7_conversion_map seg7;
- struct seg14_conversion_map seg14;
- } map;
- unsigned int map_size;
- char curr[4];
-};
-
struct ht16k33_priv {
struct i2c_client *client;
struct delayed_work work;
@@ -102,12 +93,21 @@ struct ht16k33_priv {
struct ht16k33_keypad keypad;
union {
struct ht16k33_fbdev fbdev;
- struct ht16k33_seg seg;
+ struct linedisp linedisp;
};
enum display_type type;
uint8_t blink;
};
+#define ht16k33_work_to_priv(p) \
+ container_of(p, struct ht16k33_priv, work.work)
+
+#define ht16k33_led_to_priv(p) \
+ container_of(p, struct ht16k33_priv, led)
+
+#define ht16k33_linedisp_to_priv(p) \
+ container_of(p, struct ht16k33_priv, linedisp)
+
static const struct fb_fix_screeninfo ht16k33_fb_fix = {
.id = DRIVER_NAME,
.type = FB_TYPE_PACKED_PIXELS,
@@ -135,33 +135,6 @@ static const struct fb_var_screeninfo ht16k33_fb_var = {
.vmode = FB_VMODE_NONINTERLACED,
};
-static const SEG7_DEFAULT_MAP(initial_map_seg7);
-static const SEG14_DEFAULT_MAP(initial_map_seg14);
-
-static ssize_t map_seg_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct ht16k33_priv *priv = dev_get_drvdata(dev);
-
- memcpy(buf, &priv->seg.map, priv->seg.map_size);
- return priv->seg.map_size;
-}
-
-static ssize_t map_seg_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t cnt)
-{
- struct ht16k33_priv *priv = dev_get_drvdata(dev);
-
- if (cnt != priv->seg.map_size)
- return -EINVAL;
-
- memcpy(&priv->seg.map, buf, cnt);
- return cnt;
-}
-
-static DEVICE_ATTR(map_seg7, 0644, map_seg_show, map_seg_store);
-static DEVICE_ATTR(map_seg14, 0644, map_seg_show, map_seg_store);
-
static int ht16k33_display_on(struct ht16k33_priv *priv)
{
uint8_t data = REG_DISPLAY_SETUP | REG_DISPLAY_SETUP_ON | priv->blink;
@@ -195,8 +168,7 @@ static int ht16k33_brightness_set(struct ht16k33_priv *priv,
static int ht16k33_brightness_set_blocking(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
- struct ht16k33_priv *priv = container_of(led_cdev, struct ht16k33_priv,
- led);
+ struct ht16k33_priv *priv = ht16k33_led_to_priv(led_cdev);
return ht16k33_brightness_set(priv, brightness);
}
@@ -204,8 +176,7 @@ static int ht16k33_brightness_set_blocking(struct led_classdev *led_cdev,
static int ht16k33_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on, unsigned long *delay_off)
{
- struct ht16k33_priv *priv = container_of(led_cdev, struct ht16k33_priv,
- led);
+ struct ht16k33_priv *priv = ht16k33_led_to_priv(led_cdev);
unsigned int delay;
uint8_t blink;
int err;
@@ -247,8 +218,7 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv)
*/
static void ht16k33_fb_update(struct work_struct *work)
{
- struct ht16k33_priv *priv = container_of(work, struct ht16k33_priv,
- work.work);
+ struct ht16k33_priv *priv = ht16k33_work_to_priv(work);
struct ht16k33_fbdev *fbdev = &priv->fbdev;
uint8_t *p1, *p2;
@@ -440,51 +410,71 @@ static void ht16k33_keypad_stop(struct input_dev *dev)
disable_irq(keypad->client->irq);
}
-static void ht16k33_linedisp_update(struct linedisp *linedisp)
-{
- struct ht16k33_priv *priv = container_of(linedisp, struct ht16k33_priv,
- seg.linedisp);
-
- schedule_delayed_work(&priv->work, 0);
-}
-
static void ht16k33_seg7_update(struct work_struct *work)
{
- struct ht16k33_priv *priv = container_of(work, struct ht16k33_priv,
- work.work);
- struct ht16k33_seg *seg = &priv->seg;
- char *s = seg->curr;
+ struct ht16k33_priv *priv = ht16k33_work_to_priv(work);
+ struct linedisp_map *map = priv->linedisp.map;
+ char *s = priv->linedisp.buf;
uint8_t buf[9];
- buf[0] = map_to_seg7(&seg->map.seg7, *s++);
+ buf[0] = map_to_seg7(&map->map.seg7, *s++);
buf[1] = 0;
- buf[2] = map_to_seg7(&seg->map.seg7, *s++);
+ buf[2] = map_to_seg7(&map->map.seg7, *s++);
buf[3] = 0;
buf[4] = 0;
buf[5] = 0;
- buf[6] = map_to_seg7(&seg->map.seg7, *s++);
+ buf[6] = map_to_seg7(&map->map.seg7, *s++);
buf[7] = 0;
- buf[8] = map_to_seg7(&seg->map.seg7, *s++);
+ buf[8] = map_to_seg7(&map->map.seg7, *s++);
i2c_smbus_write_i2c_block_data(priv->client, 0, ARRAY_SIZE(buf), buf);
}
static void ht16k33_seg14_update(struct work_struct *work)
{
- struct ht16k33_priv *priv = container_of(work, struct ht16k33_priv,
- work.work);
- struct ht16k33_seg *seg = &priv->seg;
- char *s = seg->curr;
+ struct ht16k33_priv *priv = ht16k33_work_to_priv(work);
+ struct linedisp_map *map = priv->linedisp.map;
+ char *s = priv->linedisp.buf;
uint8_t buf[8];
- put_unaligned_le16(map_to_seg14(&seg->map.seg14, *s++), buf);
- put_unaligned_le16(map_to_seg14(&seg->map.seg14, *s++), buf + 2);
- put_unaligned_le16(map_to_seg14(&seg->map.seg14, *s++), buf + 4);
- put_unaligned_le16(map_to_seg14(&seg->map.seg14, *s++), buf + 6);
+ put_unaligned_le16(map_to_seg14(&map->map.seg14, *s++), buf + 0);
+ put_unaligned_le16(map_to_seg14(&map->map.seg14, *s++), buf + 2);
+ put_unaligned_le16(map_to_seg14(&map->map.seg14, *s++), buf + 4);
+ put_unaligned_le16(map_to_seg14(&map->map.seg14, *s++), buf + 6);
i2c_smbus_write_i2c_block_data(priv->client, 0, ARRAY_SIZE(buf), buf);
}
+static int ht16k33_linedisp_get_map_type(struct linedisp *linedisp)
+{
+ struct ht16k33_priv *priv = ht16k33_linedisp_to_priv(linedisp);
+
+ switch (priv->type) {
+ case DISP_QUAD_7SEG:
+ INIT_DELAYED_WORK(&priv->work, ht16k33_seg7_update);
+ return LINEDISP_MAP_SEG7;
+
+ case DISP_QUAD_14SEG:
+ INIT_DELAYED_WORK(&priv->work, ht16k33_seg14_update);
+ return LINEDISP_MAP_SEG14;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ht16k33_linedisp_update(struct linedisp *linedisp)
+{
+ struct ht16k33_priv *priv = ht16k33_linedisp_to_priv(linedisp);
+
+ schedule_delayed_work(&priv->work, 0);
+}
+
+static const struct linedisp_ops ht16k33_linedisp_ops = {
+ .get_map_type = ht16k33_linedisp_get_map_type,
+ .update = ht16k33_linedisp_update,
+};
+
static int ht16k33_led_probe(struct device *dev, struct led_classdev *led,
unsigned int brightness)
{
@@ -666,47 +656,14 @@ err_fbdev_buffer:
static int ht16k33_seg_probe(struct device *dev, struct ht16k33_priv *priv,
uint32_t brightness)
{
- struct ht16k33_seg *seg = &priv->seg;
+ struct linedisp *linedisp = &priv->linedisp;
int err;
err = ht16k33_brightness_set(priv, brightness);
if (err)
return err;
- switch (priv->type) {
- case DISP_MATRIX:
- /* not handled here */
- err = -EINVAL;
- break;
-
- case DISP_QUAD_7SEG:
- INIT_DELAYED_WORK(&priv->work, ht16k33_seg7_update);
- seg->map.seg7 = initial_map_seg7;
- seg->map_size = sizeof(seg->map.seg7);
- err = device_create_file(dev, &dev_attr_map_seg7);
- break;
-
- case DISP_QUAD_14SEG:
- INIT_DELAYED_WORK(&priv->work, ht16k33_seg14_update);
- seg->map.seg14 = initial_map_seg14;
- seg->map_size = sizeof(seg->map.seg14);
- err = device_create_file(dev, &dev_attr_map_seg14);
- break;
- }
- if (err)
- return err;
-
- err = linedisp_register(&seg->linedisp, dev, 4, seg->curr,
- ht16k33_linedisp_update);
- if (err)
- goto err_remove_map_file;
-
- return 0;
-
-err_remove_map_file:
- device_remove_file(dev, &dev_attr_map_seg7);
- device_remove_file(dev, &dev_attr_map_seg14);
- return err;
+ return linedisp_register(linedisp, dev, 4, &ht16k33_linedisp_ops);
}
static int ht16k33_probe(struct i2c_client *client)
@@ -770,6 +727,9 @@ static int ht16k33_probe(struct i2c_client *client)
/* Segment Display */
err = ht16k33_seg_probe(dev, priv, dft_brightness);
break;
+
+ default:
+ return -EINVAL;
}
return err;
}
@@ -790,9 +750,10 @@ static void ht16k33_remove(struct i2c_client *client)
case DISP_QUAD_7SEG:
case DISP_QUAD_14SEG:
- linedisp_unregister(&priv->seg.linedisp);
- device_remove_file(&client->dev, &dev_attr_map_seg7);
- device_remove_file(&client->dev, &dev_attr_map_seg14);
+ linedisp_unregister(&priv->linedisp);
+ break;
+
+ default:
break;
}
}
@@ -831,4 +792,5 @@ module_i2c_driver(ht16k33_driver);
MODULE_DESCRIPTION("Holtek HT16K33 driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(LINEDISP);
MODULE_AUTHOR("Robin van der Gracht <robin@protonic.nl>");
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index 56efda0740fb..9ba132dc6143 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -22,32 +22,30 @@ struct img_ascii_lcd_ctx;
* struct img_ascii_lcd_config - Configuration information about an LCD model
* @num_chars: the number of characters the LCD can display
* @external_regmap: true if registers are in a system controller, else false
- * @update: function called to update the LCD
+ * @ops: character line display operations
*/
struct img_ascii_lcd_config {
unsigned int num_chars;
bool external_regmap;
- void (*update)(struct linedisp *linedisp);
+ const struct linedisp_ops ops;
};
/**
* struct img_ascii_lcd_ctx - Private data structure
+ * @linedisp: line display structure
* @base: the base address of the LCD registers
* @regmap: the regmap through which LCD registers are accessed
* @offset: the offset within regmap to the start of the LCD registers
* @cfg: pointer to the LCD model configuration
- * @linedisp: line display structure
- * @curr: the string currently displayed on the LCD
*/
struct img_ascii_lcd_ctx {
+ struct linedisp linedisp;
union {
void __iomem *base;
struct regmap *regmap;
};
u32 offset;
const struct img_ascii_lcd_config *cfg;
- struct linedisp linedisp;
- char curr[] __aligned(8);
};
/*
@@ -61,12 +59,12 @@ static void boston_update(struct linedisp *linedisp)
ulong val;
#if BITS_PER_LONG == 64
- val = *((u64 *)&ctx->curr[0]);
+ val = *((u64 *)&linedisp->buf[0]);
__raw_writeq(val, ctx->base);
#elif BITS_PER_LONG == 32
- val = *((u32 *)&ctx->curr[0]);
+ val = *((u32 *)&linedisp->buf[0]);
__raw_writel(val, ctx->base);
- val = *((u32 *)&ctx->curr[4]);
+ val = *((u32 *)&linedisp->buf[4]);
__raw_writel(val, ctx->base + 4);
#else
# error Not 32 or 64 bit
@@ -75,7 +73,9 @@ static void boston_update(struct linedisp *linedisp)
static struct img_ascii_lcd_config boston_config = {
.num_chars = 8,
- .update = boston_update,
+ .ops = {
+ .update = boston_update,
+ },
};
/*
@@ -91,7 +91,7 @@ static void malta_update(struct linedisp *linedisp)
for (i = 0; i < linedisp->num_chars; i++) {
err = regmap_write(ctx->regmap,
- ctx->offset + (i * 8), ctx->curr[i]);
+ ctx->offset + (i * 8), linedisp->buf[i]);
if (err)
break;
}
@@ -103,7 +103,9 @@ static void malta_update(struct linedisp *linedisp)
static struct img_ascii_lcd_config malta_config = {
.num_chars = 8,
.external_regmap = true,
- .update = malta_update,
+ .ops = {
+ .update = malta_update,
+ },
};
/*
@@ -191,7 +193,7 @@ static void sead3_update(struct linedisp *linedisp)
err = regmap_write(ctx->regmap,
ctx->offset + SEAD3_REG_LCD_DATA,
- ctx->curr[i]);
+ linedisp->buf[i]);
if (err)
break;
}
@@ -203,7 +205,9 @@ static void sead3_update(struct linedisp *linedisp)
static struct img_ascii_lcd_config sead3_config = {
.num_chars = 16,
.external_regmap = true,
- .update = sead3_update,
+ .ops = {
+ .update = sead3_update,
+ },
};
static const struct of_device_id img_ascii_lcd_matches[] = {
@@ -230,7 +234,7 @@ static int img_ascii_lcd_probe(struct platform_device *pdev)
struct img_ascii_lcd_ctx *ctx;
int err;
- ctx = devm_kzalloc(dev, sizeof(*ctx) + cfg->num_chars, GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -247,8 +251,7 @@ static int img_ascii_lcd_probe(struct platform_device *pdev)
return PTR_ERR(ctx->base);
}
- err = linedisp_register(&ctx->linedisp, dev, cfg->num_chars, ctx->curr,
- cfg->update);
+ err = linedisp_register(&ctx->linedisp, dev, cfg->num_chars, &cfg->ops);
if (err)
return err;
@@ -273,16 +276,13 @@ err_unregister:
*
* Remove an LCD display device, freeing private resources & ensuring that the
* driver stops using the LCD display registers.
- *
- * Return: 0
*/
-static int img_ascii_lcd_remove(struct platform_device *pdev)
+static void img_ascii_lcd_remove(struct platform_device *pdev)
{
struct img_ascii_lcd_ctx *ctx = platform_get_drvdata(pdev);
sysfs_remove_link(&pdev->dev.kobj, "message");
linedisp_unregister(&ctx->linedisp);
- return 0;
}
static struct platform_driver img_ascii_lcd_driver = {
@@ -291,10 +291,11 @@ static struct platform_driver img_ascii_lcd_driver = {
.of_match_table = img_ascii_lcd_matches,
},
.probe = img_ascii_lcd_probe,
- .remove = img_ascii_lcd_remove,
+ .remove_new = img_ascii_lcd_remove,
};
module_platform_driver(img_ascii_lcd_driver);
MODULE_DESCRIPTION("Imagination Technologies ASCII LCD Display");
MODULE_AUTHOR("Paul Burton <paul.burton@mips.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(LINEDISP);
diff --git a/drivers/auxdisplay/line-display.c b/drivers/auxdisplay/line-display.c
index 03e7f104aa1a..e2b546210f8d 100644
--- a/drivers/auxdisplay/line-display.c
+++ b/drivers/auxdisplay/line-display.c
@@ -10,13 +10,21 @@
#include <generated/utsrelease.h>
+#include <linux/container_of.h>
#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
+#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/timer.h>
+#include <linux/map_to_7segment.h>
+#include <linux/map_to_14segment.h>
+
#include "line-display.h"
#define DEFAULT_SCROLL_RATE (HZ / 2)
@@ -45,7 +53,7 @@ static void linedisp_scroll(struct timer_list *t)
}
/* update the display */
- linedisp->update(linedisp);
+ linedisp->ops->update(linedisp);
/* move on to the next character */
linedisp->scroll_pos++;
@@ -89,7 +97,7 @@ static int linedisp_display(struct linedisp *linedisp, const char *msg,
linedisp->message = NULL;
linedisp->message_len = 0;
memset(linedisp->buf, ' ', linedisp->num_chars);
- linedisp->update(linedisp);
+ linedisp->ops->update(linedisp);
return 0;
}
@@ -165,9 +173,11 @@ static ssize_t scroll_step_ms_store(struct device *dev,
{
struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
unsigned int ms;
+ int err;
- if (kstrtouint(buf, 10, &ms) != 0)
- return -EINVAL;
+ err = kstrtouint(buf, 10, &ms);
+ if (err)
+ return err;
linedisp->scroll_rate = msecs_to_jiffies(ms);
if (linedisp->message && linedisp->message_len > linedisp->num_chars) {
@@ -181,45 +191,165 @@ static ssize_t scroll_step_ms_store(struct device *dev,
static DEVICE_ATTR_RW(scroll_step_ms);
+static ssize_t map_seg_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp_map *map = linedisp->map;
+
+ memcpy(buf, &map->map, map->size);
+ return map->size;
+}
+
+static ssize_t map_seg_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp_map *map = linedisp->map;
+
+ if (count != map->size)
+ return -EINVAL;
+
+ memcpy(&map->map, buf, count);
+ return count;
+}
+
+static const SEG7_DEFAULT_MAP(initial_map_seg7);
+static DEVICE_ATTR(map_seg7, 0644, map_seg_show, map_seg_store);
+
+static const SEG14_DEFAULT_MAP(initial_map_seg14);
+static DEVICE_ATTR(map_seg14, 0644, map_seg_show, map_seg_store);
+
static struct attribute *linedisp_attrs[] = {
&dev_attr_message.attr,
&dev_attr_scroll_step_ms.attr,
- NULL,
+ &dev_attr_map_seg7.attr,
+ &dev_attr_map_seg14.attr,
+ NULL
+};
+
+static umode_t linedisp_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp_map *map = linedisp->map;
+ umode_t mode = attr->mode;
+
+ if (attr == &dev_attr_map_seg7.attr) {
+ if (!map)
+ return 0;
+ if (map->type != LINEDISP_MAP_SEG7)
+ return 0;
+ }
+
+ if (attr == &dev_attr_map_seg14.attr) {
+ if (!map)
+ return 0;
+ if (map->type != LINEDISP_MAP_SEG14)
+ return 0;
+ }
+
+ return mode;
};
-ATTRIBUTE_GROUPS(linedisp);
+
+static const struct attribute_group linedisp_group = {
+ .is_visible = linedisp_attr_is_visible,
+ .attrs = linedisp_attrs,
+};
+__ATTRIBUTE_GROUPS(linedisp);
+
+static DEFINE_IDA(linedisp_id);
+
+static void linedisp_release(struct device *dev)
+{
+ struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+
+ kfree(linedisp->map);
+ kfree(linedisp->message);
+ kfree(linedisp->buf);
+ ida_free(&linedisp_id, linedisp->id);
+}
static const struct device_type linedisp_type = {
.groups = linedisp_groups,
+ .release = linedisp_release,
};
+static int linedisp_init_map(struct linedisp *linedisp)
+{
+ struct linedisp_map *map;
+ int err;
+
+ if (!linedisp->ops->get_map_type)
+ return 0;
+
+ err = linedisp->ops->get_map_type(linedisp);
+ if (err < 0)
+ return err;
+
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ map->type = err;
+
+ /* assign initial mapping */
+ switch (map->type) {
+ case LINEDISP_MAP_SEG7:
+ map->map.seg7 = initial_map_seg7;
+ map->size = sizeof(map->map.seg7);
+ break;
+ case LINEDISP_MAP_SEG14:
+ map->map.seg14 = initial_map_seg14;
+ map->size = sizeof(map->map.seg14);
+ break;
+ default:
+ kfree(map);
+ return -EINVAL;
+ }
+
+ linedisp->map = map;
+
+ return 0;
+}
+
/**
* linedisp_register - register a character line display
* @linedisp: pointer to character line display structure
* @parent: parent device
* @num_chars: the number of characters that can be displayed
- * @buf: pointer to a buffer that can hold @num_chars characters
- * @update: Function called to update the display. This must not sleep!
+ * @ops: character line display operations
*
* Return: zero on success, else a negative error code.
*/
int linedisp_register(struct linedisp *linedisp, struct device *parent,
- unsigned int num_chars, char *buf,
- void (*update)(struct linedisp *linedisp))
+ unsigned int num_chars, const struct linedisp_ops *ops)
{
- static atomic_t linedisp_id = ATOMIC_INIT(-1);
int err;
memset(linedisp, 0, sizeof(*linedisp));
linedisp->dev.parent = parent;
linedisp->dev.type = &linedisp_type;
- linedisp->update = update;
- linedisp->buf = buf;
+ linedisp->ops = ops;
linedisp->num_chars = num_chars;
linedisp->scroll_rate = DEFAULT_SCROLL_RATE;
+ err = ida_alloc(&linedisp_id, GFP_KERNEL);
+ if (err < 0)
+ return err;
+ linedisp->id = err;
+
device_initialize(&linedisp->dev);
- dev_set_name(&linedisp->dev, "linedisp.%lu",
- (unsigned long)atomic_inc_return(&linedisp_id));
+ dev_set_name(&linedisp->dev, "linedisp.%u", linedisp->id);
+
+ err = -ENOMEM;
+ linedisp->buf = kzalloc(linedisp->num_chars, GFP_KERNEL);
+ if (!linedisp->buf)
+ goto out_put_device;
+
+ /* initialise a character mapping, if required */
+ err = linedisp_init_map(linedisp);
+ if (err)
+ goto out_put_device;
/* initialise a timer for scrolling the message */
timer_setup(&linedisp->timer, linedisp_scroll, 0);
@@ -239,10 +369,11 @@ out_del_dev:
device_del(&linedisp->dev);
out_del_timer:
del_timer_sync(&linedisp->timer);
+out_put_device:
put_device(&linedisp->dev);
return err;
}
-EXPORT_SYMBOL_GPL(linedisp_register);
+EXPORT_SYMBOL_NS_GPL(linedisp_register, LINEDISP);
/**
* linedisp_unregister - unregister a character line display
@@ -253,9 +384,8 @@ void linedisp_unregister(struct linedisp *linedisp)
{
device_del(&linedisp->dev);
del_timer_sync(&linedisp->timer);
- kfree(linedisp->message);
put_device(&linedisp->dev);
}
-EXPORT_SYMBOL_GPL(linedisp_unregister);
+EXPORT_SYMBOL_NS_GPL(linedisp_unregister, LINEDISP);
MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/line-display.h b/drivers/auxdisplay/line-display.h
index 0f5891d34c48..4348d7a2f69a 100644
--- a/drivers/auxdisplay/line-display.h
+++ b/drivers/auxdisplay/line-display.h
@@ -11,33 +11,78 @@
#ifndef _LINEDISP_H
#define _LINEDISP_H
+#include <linux/device.h>
+#include <linux/timer_types.h>
+
+#include <linux/map_to_7segment.h>
+#include <linux/map_to_14segment.h>
+
+struct linedisp;
+
+/**
+ * enum linedisp_map_type - type of the character mapping
+ * @LINEDISP_MAP_SEG7: Map characters to 7 segment display
+ * @LINEDISP_MAP_SEG14: Map characters to 14 segment display
+ */
+enum linedisp_map_type {
+ LINEDISP_MAP_SEG7,
+ LINEDISP_MAP_SEG14,
+};
+
+/**
+ * struct linedisp_map - character mapping
+ * @type: type of the character mapping
+ * @map: conversion character mapping
+ * @size: size of the @map
+ */
+struct linedisp_map {
+ enum linedisp_map_type type;
+ union {
+ struct seg7_conversion_map seg7;
+ struct seg14_conversion_map seg14;
+ } map;
+ unsigned int size;
+};
+
+/**
+ * struct linedisp_ops - character line display operations
+ * @get_map_type: Function called to get the character mapping, if required
+ * @update: Function called to update the display. This must not sleep!
+ */
+struct linedisp_ops {
+ int (*get_map_type)(struct linedisp *linedisp);
+ void (*update)(struct linedisp *linedisp);
+};
+
/**
* struct linedisp - character line display private data structure
* @dev: the line display device
* @timer: timer used to implement scrolling
- * @update: function called to update the display
+ * @ops: character line display operations
* @buf: pointer to the buffer for the string currently displayed
* @message: the full message to display or scroll on the display
* @num_chars: the number of characters that can be displayed
* @message_len: the length of the @message string
* @scroll_pos: index of the first character of @message currently displayed
* @scroll_rate: scroll interval in jiffies
+ * @id: instance id of this display
*/
struct linedisp {
struct device dev;
struct timer_list timer;
- void (*update)(struct linedisp *linedisp);
+ const struct linedisp_ops *ops;
+ struct linedisp_map *map;
char *buf;
char *message;
unsigned int num_chars;
unsigned int message_len;
unsigned int scroll_pos;
unsigned int scroll_rate;
+ unsigned int id;
};
int linedisp_register(struct linedisp *linedisp, struct device *parent,
- unsigned int num_chars, char *buf,
- void (*update)(struct linedisp *linedisp));
+ unsigned int num_chars, const struct linedisp_ops *ops);
void linedisp_unregister(struct linedisp *linedisp);
#endif /* LINEDISP_H */
diff --git a/drivers/auxdisplay/max6959.c b/drivers/auxdisplay/max6959.c
new file mode 100644
index 000000000000..5519c014bd29
--- /dev/null
+++ b/drivers/auxdisplay/max6959.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MAX6958/6959 7-segment LED display controller
+ * Datasheet:
+ * https://www.analog.com/media/en/technical-documentation/data-sheets/MAX6958-MAX6959.pdf
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+#include <linux/array_size.h>
+#include <linux/bitrev.h>
+#include <linux/bits.h>
+#include <linux/container_of.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/map_to_7segment.h>
+
+#include "line-display.h"
+
+/* Registers */
+#define REG_DECODE_MODE 0x01
+#define REG_INTENSITY 0x02
+#define REG_SCAN_LIMIT 0x03
+#define REG_CONFIGURATION 0x04
+#define REG_CONFIGURATION_S_BIT BIT(0)
+
+#define REG_DIGIT(x) (0x20 + (x))
+#define REG_DIGIT0 0x20
+#define REG_DIGIT1 0x21
+#define REG_DIGIT2 0x22
+#define REG_DIGIT3 0x23
+
+#define REG_SEGMENTS 0x24
+#define REG_MAX REG_SEGMENTS
+
+struct max6959_priv {
+ struct linedisp linedisp;
+ struct delayed_work work;
+ struct regmap *regmap;
+};
+
+static void max6959_disp_update(struct work_struct *work)
+{
+ struct max6959_priv *priv = container_of(work, struct max6959_priv, work.work);
+ struct linedisp *linedisp = &priv->linedisp;
+ struct linedisp_map *map = linedisp->map;
+ char *s = linedisp->buf;
+ u8 buf[4];
+
+ /* Map segments according to datasheet */
+ buf[0] = bitrev8(map_to_seg7(&map->map.seg7, *s++)) >> 1;
+ buf[1] = bitrev8(map_to_seg7(&map->map.seg7, *s++)) >> 1;
+ buf[2] = bitrev8(map_to_seg7(&map->map.seg7, *s++)) >> 1;
+ buf[3] = bitrev8(map_to_seg7(&map->map.seg7, *s++)) >> 1;
+
+ regmap_bulk_write(priv->regmap, REG_DIGIT(0), buf, ARRAY_SIZE(buf));
+}
+
+static int max6959_linedisp_get_map_type(struct linedisp *linedisp)
+{
+ struct max6959_priv *priv = container_of(linedisp, struct max6959_priv, linedisp);
+
+ INIT_DELAYED_WORK(&priv->work, max6959_disp_update);
+ return LINEDISP_MAP_SEG7;
+}
+
+static void max6959_linedisp_update(struct linedisp *linedisp)
+{
+ struct max6959_priv *priv = container_of(linedisp, struct max6959_priv, linedisp);
+
+ schedule_delayed_work(&priv->work, 0);
+}
+
+static const struct linedisp_ops max6959_linedisp_ops = {
+ .get_map_type = max6959_linedisp_get_map_type,
+ .update = max6959_linedisp_update,
+};
+
+static int max6959_enable(struct max6959_priv *priv, bool enable)
+{
+ u8 mask = REG_CONFIGURATION_S_BIT;
+ u8 value = enable ? mask : 0;
+
+ return regmap_update_bits(priv->regmap, REG_CONFIGURATION, mask, value);
+}
+
+static void max6959_power_off(void *priv)
+{
+ max6959_enable(priv, false);
+}
+
+static int max6959_power_on(struct max6959_priv *priv)
+{
+ struct device *dev = regmap_get_device(priv->regmap);
+ int ret;
+
+ ret = max6959_enable(priv, true);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, max6959_power_off, priv);
+}
+
+static const struct regmap_config max6959_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = REG_MAX,
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static int max6959_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct max6959_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init_i2c(client, &max6959_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ ret = max6959_power_on(priv);
+ if (ret)
+ return ret;
+
+ ret = linedisp_register(&priv->linedisp, dev, 4, &max6959_linedisp_ops);
+ if (ret)
+ return ret;
+
+ i2c_set_clientdata(client, priv);
+
+ return 0;
+}
+
+static void max6959_i2c_remove(struct i2c_client *client)
+{
+ struct max6959_priv *priv = i2c_get_clientdata(client);
+
+ cancel_delayed_work_sync(&priv->work);
+ linedisp_unregister(&priv->linedisp);
+}
+
+static int max6959_suspend(struct device *dev)
+{
+ return max6959_enable(dev_get_drvdata(dev), false);
+}
+
+static int max6959_resume(struct device *dev)
+{
+ return max6959_enable(dev_get_drvdata(dev), true);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(max6959_pm_ops, max6959_suspend, max6959_resume);
+
+static const struct i2c_device_id max6959_i2c_id[] = {
+ { "max6959" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max6959_i2c_id);
+
+static const struct of_device_id max6959_of_table[] = {
+ { .compatible = "maxim,max6959" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max6959_of_table);
+
+static struct i2c_driver max6959_i2c_driver = {
+ .driver = {
+ .name = "max6959",
+ .pm = pm_sleep_ptr(&max6959_pm_ops),
+ .of_match_table = max6959_of_table,
+ },
+ .probe = max6959_i2c_probe,
+ .remove = max6959_i2c_remove,
+ .id_table = max6959_i2c_id,
+};
+module_i2c_driver(max6959_i2c_driver);
+
+MODULE_DESCRIPTION("MAX6958/6959 7-segment LED controller");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(LINEDISP);
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index e20d35bdf5fe..049ff443e790 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1519,106 +1519,9 @@ static void keypad_init(void)
static void panel_attach(struct parport *port)
{
+ int selected_keypad_type = NOT_SET;
struct pardev_cb panel_cb;
- if (port->number != parport)
- return;
-
- if (pprt) {
- pr_err("%s: port->number=%d parport=%d, already registered!\n",
- __func__, port->number, parport);
- return;
- }
-
- memset(&panel_cb, 0, sizeof(panel_cb));
- panel_cb.private = &pprt;
- /* panel_cb.flags = 0 should be PARPORT_DEV_EXCL? */
-
- pprt = parport_register_dev_model(port, "panel", &panel_cb, 0);
- if (!pprt) {
- pr_err("%s: port->number=%d parport=%d, parport_register_device() failed\n",
- __func__, port->number, parport);
- return;
- }
-
- if (parport_claim(pprt)) {
- pr_err("could not claim access to parport%d. Aborting.\n",
- parport);
- goto err_unreg_device;
- }
-
- /* must init LCD first, just in case an IRQ from the keypad is
- * generated at keypad init
- */
- if (lcd.enabled) {
- lcd_init();
- if (!lcd.charlcd || charlcd_register(lcd.charlcd))
- goto err_unreg_device;
- }
-
- if (keypad.enabled) {
- keypad_init();
- if (misc_register(&keypad_dev))
- goto err_lcd_unreg;
- }
- return;
-
-err_lcd_unreg:
- if (scan_timer.function)
- del_timer_sync(&scan_timer);
- if (lcd.enabled)
- charlcd_unregister(lcd.charlcd);
-err_unreg_device:
- kfree(lcd.charlcd);
- lcd.charlcd = NULL;
- parport_unregister_device(pprt);
- pprt = NULL;
-}
-
-static void panel_detach(struct parport *port)
-{
- if (port->number != parport)
- return;
-
- if (!pprt) {
- pr_err("%s: port->number=%d parport=%d, nothing to unregister.\n",
- __func__, port->number, parport);
- return;
- }
- if (scan_timer.function)
- del_timer_sync(&scan_timer);
-
- if (keypad.enabled) {
- misc_deregister(&keypad_dev);
- keypad_initialized = 0;
- }
-
- if (lcd.enabled) {
- charlcd_unregister(lcd.charlcd);
- lcd.initialized = false;
- kfree(lcd.charlcd->drvdata);
- kfree(lcd.charlcd);
- lcd.charlcd = NULL;
- }
-
- /* TODO: free all input signals */
- parport_release(pprt);
- parport_unregister_device(pprt);
- pprt = NULL;
-}
-
-static struct parport_driver panel_driver = {
- .name = "panel",
- .match_port = panel_attach,
- .detach = panel_detach,
- .devmodel = true,
-};
-
-/* init function */
-static int __init panel_init_module(void)
-{
- int selected_keypad_type = NOT_SET, err;
-
/* take care of an eventual profile */
switch (profile) {
case PANEL_PROFILE_CUSTOM:
@@ -1710,29 +1613,102 @@ static int __init panel_init_module(void)
if (!lcd.enabled && !keypad.enabled) {
/* no device enabled, let's exit */
pr_err("panel driver disabled.\n");
- return -ENODEV;
+ return;
}
- err = parport_register_driver(&panel_driver);
- if (err) {
- pr_err("could not register with parport. Aborting.\n");
- return err;
+ if (port->number != parport)
+ return;
+
+ if (pprt) {
+ pr_err("%s: port->number=%d parport=%d, already registered!\n",
+ __func__, port->number, parport);
+ return;
}
- if (pprt)
- pr_info("panel driver registered on parport%d (io=0x%lx).\n",
- parport, pprt->port->base);
- else
- pr_info("panel driver not yet registered\n");
- return 0;
+ memset(&panel_cb, 0, sizeof(panel_cb));
+ panel_cb.private = &pprt;
+ /* panel_cb.flags = 0 should be PARPORT_DEV_EXCL? */
+
+ pprt = parport_register_dev_model(port, "panel", &panel_cb, 0);
+ if (!pprt) {
+ pr_err("%s: port->number=%d parport=%d, parport_register_device() failed\n",
+ __func__, port->number, parport);
+ return;
+ }
+
+ if (parport_claim(pprt)) {
+ pr_err("could not claim access to parport%d. Aborting.\n",
+ parport);
+ goto err_unreg_device;
+ }
+
+ /* must init LCD first, just in case an IRQ from the keypad is
+ * generated at keypad init
+ */
+ if (lcd.enabled) {
+ lcd_init();
+ if (!lcd.charlcd || charlcd_register(lcd.charlcd))
+ goto err_unreg_device;
+ }
+
+ if (keypad.enabled) {
+ keypad_init();
+ if (misc_register(&keypad_dev))
+ goto err_lcd_unreg;
+ }
+ return;
+
+err_lcd_unreg:
+ if (scan_timer.function)
+ del_timer_sync(&scan_timer);
+ if (lcd.enabled)
+ charlcd_unregister(lcd.charlcd);
+err_unreg_device:
+ kfree(lcd.charlcd);
+ lcd.charlcd = NULL;
+ parport_unregister_device(pprt);
+ pprt = NULL;
}
-static void __exit panel_cleanup_module(void)
+static void panel_detach(struct parport *port)
{
- parport_unregister_driver(&panel_driver);
+ if (port->number != parport)
+ return;
+
+ if (!pprt) {
+ pr_err("%s: port->number=%d parport=%d, nothing to unregister.\n",
+ __func__, port->number, parport);
+ return;
+ }
+ if (scan_timer.function)
+ del_timer_sync(&scan_timer);
+
+ if (keypad.enabled) {
+ misc_deregister(&keypad_dev);
+ keypad_initialized = 0;
+ }
+
+ if (lcd.enabled) {
+ charlcd_unregister(lcd.charlcd);
+ lcd.initialized = false;
+ kfree(lcd.charlcd->drvdata);
+ kfree(lcd.charlcd);
+ lcd.charlcd = NULL;
+ }
+
+ /* TODO: free all input signals */
+ parport_release(pprt);
+ parport_unregister_device(pprt);
+ pprt = NULL;
}
-module_init(panel_init_module);
-module_exit(panel_cleanup_module);
+static struct parport_driver panel_driver = {
+ .name = "panel",
+ .match_port = panel_attach,
+ .detach = panel_detach,
+ .devmodel = true,
+};
+module_parport_driver(panel_driver);
+
MODULE_AUTHOR("Willy Tarreau");
MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/seg-led-gpio.c b/drivers/auxdisplay/seg-led-gpio.c
new file mode 100644
index 000000000000..35a8dbb1e9d2
--- /dev/null
+++ b/drivers/auxdisplay/seg-led-gpio.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for a 7-segment LED display
+ *
+ * The decimal point LED present on some devices is currently not
+ * supported.
+ *
+ * Copyright (C) Allied Telesis Labs
+ */
+
+#include <linux/bitmap.h>
+#include <linux/container_of.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/map_to_7segment.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "line-display.h"
+
+struct seg_led_priv {
+ struct linedisp linedisp;
+ struct delayed_work work;
+ struct gpio_descs *segment_gpios;
+};
+
+static void seg_led_update(struct work_struct *work)
+{
+ struct seg_led_priv *priv = container_of(work, struct seg_led_priv, work.work);
+ struct linedisp *linedisp = &priv->linedisp;
+ struct linedisp_map *map = linedisp->map;
+ DECLARE_BITMAP(values, 8) = { };
+
+ bitmap_set_value8(values, map_to_seg7(&map->map.seg7, linedisp->buf[0]), 0);
+
+ gpiod_set_array_value_cansleep(priv->segment_gpios->ndescs, priv->segment_gpios->desc,
+ priv->segment_gpios->info, values);
+}
+
+static int seg_led_linedisp_get_map_type(struct linedisp *linedisp)
+{
+ struct seg_led_priv *priv = container_of(linedisp, struct seg_led_priv, linedisp);
+
+ INIT_DELAYED_WORK(&priv->work, seg_led_update);
+ return LINEDISP_MAP_SEG7;
+}
+
+static void seg_led_linedisp_update(struct linedisp *linedisp)
+{
+ struct seg_led_priv *priv = container_of(linedisp, struct seg_led_priv, linedisp);
+
+ schedule_delayed_work(&priv->work, 0);
+}
+
+static const struct linedisp_ops seg_led_linedisp_ops = {
+ .get_map_type = seg_led_linedisp_get_map_type,
+ .update = seg_led_linedisp_update,
+};
+
+static int seg_led_probe(struct platform_device *pdev)
+{
+ struct seg_led_priv *priv;
+ struct device *dev = &pdev->dev;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->segment_gpios = devm_gpiod_get_array(dev, "segment", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->segment_gpios))
+ return PTR_ERR(priv->segment_gpios);
+
+ if (priv->segment_gpios->ndescs < 7 || priv->segment_gpios->ndescs > 8)
+ return -EINVAL;
+
+ return linedisp_register(&priv->linedisp, dev, 1, &seg_led_linedisp_ops);
+}
+
+static int seg_led_remove(struct platform_device *pdev)
+{
+ struct seg_led_priv *priv = platform_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&priv->work);
+ linedisp_unregister(&priv->linedisp);
+
+ return 0;
+}
+
+static const struct of_device_id seg_led_of_match[] = {
+ { .compatible = "gpio-7-segment"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, seg_led_of_match);
+
+static struct platform_driver seg_led_driver = {
+ .probe = seg_led_probe,
+ .remove = seg_led_remove,
+ .driver = {
+ .name = "seg-led-gpio",
+ .of_match_table = seg_led_of_match,
+ },
+};
+module_platform_driver(seg_led_driver);
+
+MODULE_AUTHOR("Chris Packham <chris.packham@alliedtelesis.co.nz>");
+MODULE_DESCRIPTION("7 segment LED driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(LINEDISP);
diff --git a/drivers/base/base.h b/drivers/base/base.h
index eb4c0ace9242..0738ccad08b2 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -207,7 +207,7 @@ static inline int devtmpfs_init(void) { return 0; }
#endif
#ifdef CONFIG_BLOCK
-extern struct class block_class;
+extern const struct class block_class;
static inline bool is_blockdev(struct device *dev)
{
return dev->class == &block_class;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index b621a0fc75e1..f5a6bffce518 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -588,6 +588,7 @@ CPU_SHOW_VULN_FALLBACK(mmio_stale_data);
CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
+CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -602,6 +603,7 @@ static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
+static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -617,6 +619,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_retbleed.attr,
&dev_attr_spec_rstack_overflow.attr,
&dev_attr_gather_data_sampling.attr,
+ &dev_attr_reg_file_data_sampling.attr,
NULL
};
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index f37ad34c80ec..0d01890160f3 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -13,6 +13,8 @@
#include <linux/msi.h>
#include <linux/slab.h>
+/* Begin of removal area. Once everything is converted over. Cleanup the includes too! */
+
#define DEV_ID_SHIFT 21
#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT))
@@ -204,8 +206,8 @@ static void platform_msi_free_priv_data(struct device *dev)
* Returns:
* Zero for success, or an error code in case of failure
*/
-int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
+static int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
{
int err;
@@ -219,18 +221,6 @@ int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
return err;
}
-EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
-
-/**
- * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
- * @dev: The device for which to free interrupts
- */
-void platform_msi_domain_free_irqs(struct device *dev)
-{
- msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
- platform_msi_free_priv_data(dev);
-}
-EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
/**
* platform_msi_get_host_data - Query the private data associated with
@@ -350,3 +340,104 @@ int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int vir
return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg);
}
+
+/* End of removal area */
+
+/* Real per device domain interfaces */
+
+/*
+ * This indirection can go when platform_device_msi_init_and_alloc_irqs()
+ * is switched to a proper irq_chip::irq_write_msi_msg() callback. Keep it
+ * simple for now.
+ */
+static void platform_msi_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ irq_write_msi_msg_t cb = d->chip_data;
+
+ cb(irq_data_get_msi_desc(d), msg);
+}
+
+static void platform_msi_set_desc_byindex(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = desc->msi_index;
+}
+
+static const struct msi_domain_template platform_msi_template = {
+ .chip = {
+ .name = "pMSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_write_msi_msg = platform_msi_write_msi_msg,
+ /* The rest is filled in by the platform MSI parent */
+ },
+
+ .ops = {
+ .set_desc = platform_msi_set_desc_byindex,
+ },
+
+ .info = {
+ .bus_token = DOMAIN_BUS_DEVICE_MSI,
+ },
+};
+
+/**
+ * platform_device_msi_init_and_alloc_irqs - Initialize platform device MSI
+ * and allocate interrupts for @dev
+ * @dev: The device for which to allocate interrupts
+ * @nvec: The number of interrupts to allocate
+ * @write_msi_msg: Callback to write an interrupt message for @dev
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ *
+ * This creates a MSI domain on @dev which has @dev->msi.domain as
+ * parent. The parent domain sets up the new domain. The domain has
+ * a fixed size of @nvec. The domain is managed by devres and will
+ * be removed when the device is removed.
+ *
+ * Note: For migration purposes this falls back to the original platform_msi code
+ * up to the point where all platforms have been converted to the MSI
+ * parent model.
+ */
+int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
+{
+ struct irq_domain *domain = dev->msi.domain;
+
+ if (!domain || !write_msi_msg)
+ return -EINVAL;
+
+ /* Migration support. Will go away once everything is converted */
+ if (!irq_domain_is_msi_parent(domain))
+ return platform_msi_domain_alloc_irqs(dev, nvec, write_msi_msg);
+
+ /*
+ * @write_msi_msg is stored in the resulting msi_domain_info::data.
+ * The underlying domain creation mechanism will assign that
+ * callback to the resulting irq chip.
+ */
+ if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN,
+ &platform_msi_template,
+ nvec, NULL, write_msi_msg))
+ return -ENODEV;
+
+ return msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, nvec - 1);
+}
+EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs);
+
+/**
+ * platform_device_msi_free_irqs_all - Free all interrupts for @dev
+ * @dev: The device for which to free interrupts
+ */
+void platform_device_msi_free_irqs_all(struct device *dev)
+{
+ struct irq_domain *domain = dev->msi.domain;
+
+ msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
+
+ /* Migration support. Will go away once everything is converted */
+ if (!irq_domain_is_msi_parent(domain))
+ platform_msi_free_priv_data(dev);
+}
+EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all);
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 44ec20918a4d..327d168dd37a 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -168,6 +168,115 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name);
/**
+ * dev_pm_domain_attach_list - Associate a device with its PM domains.
+ * @dev: The device used to lookup the PM domains for.
+ * @data: The data used for attaching to the PM domains.
+ * @list: An out-parameter with an allocated list of attached PM domains.
+ *
+ * This function helps to attach a device to its multiple PM domains. The
+ * caller, which is typically a driver's probe function, may provide a list of
+ * names for the PM domains that we should try to attach the device to, but it
+ * may also provide an empty list, in case the attach should be done for all of
+ * the available PM domains.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ *
+ * Returns the number of attached PM domains or a negative error code in case of
+ * a failure. Note that, to detach the list of PM domains, the driver shall call
+ * dev_pm_domain_detach_list(), typically during the remove phase.
+ */
+int dev_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ struct device_node *np = dev->of_node;
+ struct dev_pm_domain_list *pds;
+ struct device *pd_dev = NULL;
+ int ret, i, num_pds = 0;
+ bool by_id = true;
+ u32 pd_flags = data ? data->pd_flags : 0;
+ u32 link_flags = pd_flags & PD_FLAG_NO_DEV_LINK ? 0 :
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
+
+ if (dev->pm_domain)
+ return -EEXIST;
+
+ /* For now this is limited to OF based platforms. */
+ if (!np)
+ return 0;
+
+ if (data && data->pd_names) {
+ num_pds = data->num_pd_names;
+ by_id = false;
+ } else {
+ num_pds = of_count_phandle_with_args(np, "power-domains",
+ "#power-domain-cells");
+ }
+
+ if (num_pds <= 0)
+ return 0;
+
+ pds = devm_kzalloc(dev, sizeof(*pds), GFP_KERNEL);
+ if (!pds)
+ return -ENOMEM;
+
+ pds->pd_devs = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_devs),
+ GFP_KERNEL);
+ if (!pds->pd_devs)
+ return -ENOMEM;
+
+ pds->pd_links = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_links),
+ GFP_KERNEL);
+ if (!pds->pd_links)
+ return -ENOMEM;
+
+ if (link_flags && pd_flags & PD_FLAG_DEV_LINK_ON)
+ link_flags |= DL_FLAG_RPM_ACTIVE;
+
+ for (i = 0; i < num_pds; i++) {
+ if (by_id)
+ pd_dev = dev_pm_domain_attach_by_id(dev, i);
+ else
+ pd_dev = dev_pm_domain_attach_by_name(dev,
+ data->pd_names[i]);
+ if (IS_ERR_OR_NULL(pd_dev)) {
+ ret = pd_dev ? PTR_ERR(pd_dev) : -ENODEV;
+ goto err_attach;
+ }
+
+ if (link_flags) {
+ struct device_link *link;
+
+ link = device_link_add(dev, pd_dev, link_flags);
+ if (!link) {
+ ret = -ENODEV;
+ goto err_link;
+ }
+
+ pds->pd_links[i] = link;
+ }
+
+ pds->pd_devs[i] = pd_dev;
+ }
+
+ pds->num_pds = num_pds;
+ *list = pds;
+ return num_pds;
+
+err_link:
+ dev_pm_domain_detach(pd_dev, true);
+err_attach:
+ while (--i >= 0) {
+ if (pds->pd_links[i])
+ device_link_del(pds->pd_links[i]);
+ dev_pm_domain_detach(pds->pd_devs[i], true);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list);
+
+/**
* dev_pm_domain_detach - Detach a device from its PM domain.
* @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
@@ -188,6 +297,31 @@ void dev_pm_domain_detach(struct device *dev, bool power_off)
EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
/**
+ * dev_pm_domain_detach_list - Detach a list of PM domains.
+ * @list: The list of PM domains to detach.
+ *
+ * This function reverse the actions from dev_pm_domain_attach_list().
+ * Typically it should be invoked during the remove phase from drivers.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+void dev_pm_domain_detach_list(struct dev_pm_domain_list *list)
+{
+ int i;
+
+ if (!list)
+ return;
+
+ for (i = 0; i < list->num_pds; i++) {
+ if (list->pd_links[i])
+ device_link_del(list->pd_links[i]);
+ dev_pm_domain_detach(list->pd_devs[i], true);
+ }
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_detach_list);
+
+/**
* dev_pm_domain_start - Start the device through its PM domain.
* @dev: Device to start.
*
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index fadcd0379dc2..5679f966f676 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -60,7 +60,6 @@ static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
-struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
@@ -578,6 +577,35 @@ bool dev_pm_skip_resume(struct device *dev)
return !dev->power.must_resume;
}
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (is_async(dev)) {
+ dev->power.async_in_progress = true;
+
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
+ return true;
+
+ put_device(dev);
+ }
+ /*
+ * Because async_schedule_dev_nocall() above has returned false or it
+ * has not been called at all, func() is not running and it is safe to
+ * update the async_in_progress flag without extra synchronization.
+ */
+ dev->power.async_in_progress = false;
+ return false;
+}
+
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -657,42 +685,12 @@ Out:
TRACE_RESUME(error);
if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
}
-static bool is_async(struct device *dev)
-{
- return dev->power.async_suspend && pm_async_enabled
- && !pm_trace_is_enabled();
-}
-
-static bool dpm_async_fn(struct device *dev, async_func_t func)
-{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- dev->power.async_in_progress = true;
-
- get_device(dev);
-
- if (async_schedule_dev_nocall(func, dev))
- return true;
-
- put_device(dev);
- }
- /*
- * Because async_schedule_dev_nocall() above has returned false or it
- * has not been called at all, func() is not running and it is safe to
- * update the async_in_progress flag without extra synchronization.
- */
- dev->power.async_in_progress = false;
- return false;
-}
-
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
@@ -707,9 +705,12 @@ static void dpm_noirq_resume_devices(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
+ async_error = 0;
pm_transition = state;
+ mutex_lock(&dpm_list_mtx);
+
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@@ -736,6 +737,9 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
+ if (async_error)
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
@@ -817,8 +821,7 @@ Out:
complete_all(&dev->power.completion);
if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
@@ -842,9 +845,12 @@ void dpm_resume_early(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
+ async_error = 0;
pm_transition = state;
+ mutex_lock(&dpm_list_mtx);
+
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@@ -871,6 +877,9 @@ void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
+ if (async_error)
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
@@ -974,8 +983,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
if (error) {
- suspend_stats.failed_resume++;
- dpm_save_failed_step(SUSPEND_RESUME);
+ async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
@@ -1004,10 +1012,11 @@ void dpm_resume(pm_message_t state)
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
might_sleep();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@@ -1017,29 +1026,25 @@ void dpm_resume(pm_message_t state)
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
-
- get_device(dev);
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
if (!dev->power.async_in_progress) {
+ get_device(dev);
+
mutex_unlock(&dpm_list_mtx);
device_resume(dev, state, false);
+ put_device(dev);
+
mutex_lock(&dpm_list_mtx);
}
-
- if (!list_empty(&dev->power.entry))
- list_move_tail(&dev->power.entry, &dpm_prepared_list);
-
- mutex_unlock(&dpm_list_mtx);
-
- put_device(dev);
-
- mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, NULL);
+ if (async_error)
+ dpm_save_failed_step(SUSPEND_RESUME);
cpufreq_resume();
devfreq_resume();
@@ -1187,7 +1192,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
}
/**
- * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
+ * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1195,7 +1200,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
+static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1240,6 +1245,8 @@ Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
goto Complete;
}
@@ -1269,54 +1276,37 @@ Complete:
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = __device_suspend_noirq(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ device_suspend_noirq(dev, pm_transition, true);
put_device(dev);
}
-static int device_suspend_noirq(struct device *dev)
-{
- if (dpm_async_fn(dev, async_suspend_noirq))
- return 0;
-
- return __device_suspend_noirq(dev, pm_transition, false);
-}
-
static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.prev);
- get_device(dev);
- mutex_unlock(&dpm_list_mtx);
-
- error = device_suspend_noirq(dev);
+ list_move(&dev->power.entry, &dpm_noirq_list);
- mutex_lock(&dpm_list_mtx);
+ if (dpm_async_fn(dev, async_suspend_noirq))
+ continue;
- if (error) {
- pm_dev_err(dev, state, " noirq", error);
- dpm_save_failed_dev(dev_name(dev));
- } else if (!list_empty(&dev->power.entry)) {
- list_move(&dev->power.entry, &dpm_noirq_list);
- }
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
+ error = device_suspend_noirq(dev, state, false);
+
put_device(dev);
mutex_lock(&dpm_list_mtx);
@@ -1324,15 +1314,16 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
- if (error) {
- suspend_stats.failed_suspend_noirq++;
+ if (error)
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
- }
+
dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
@@ -1375,14 +1366,14 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
}
/**
- * __device_suspend_late - Execute a "late suspend" callback for given device.
+ * device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
+static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1434,6 +1425,8 @@ Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async late" : " late", error);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
@@ -1450,24 +1443,11 @@ Complete:
static void async_suspend_late(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
- error = __device_suspend_late(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ device_suspend_late(dev, pm_transition, true);
put_device(dev);
}
-static int device_suspend_late(struct device *dev)
-{
- if (dpm_async_fn(dev, async_suspend_late))
- return 0;
-
- return __device_suspend_late(dev, pm_transition, false);
-}
-
/**
* dpm_suspend_late - Execute "late suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -1478,32 +1458,28 @@ int dpm_suspend_late(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
- wake_up_all_idle_cpus();
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
- while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
-
- get_device(dev);
+ wake_up_all_idle_cpus();
- mutex_unlock(&dpm_list_mtx);
+ mutex_lock(&dpm_list_mtx);
- error = device_suspend_late(dev);
+ while (!list_empty(&dpm_suspended_list)) {
+ struct device *dev = to_device(dpm_suspended_list.prev);
- mutex_lock(&dpm_list_mtx);
+ list_move(&dev->power.entry, &dpm_late_early_list);
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_late_early_list);
+ if (dpm_async_fn(dev, async_suspend_late))
+ continue;
- if (error) {
- pm_dev_err(dev, state, " late", error);
- dpm_save_failed_dev(dev_name(dev));
- }
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
+ error = device_suspend_late(dev, state, false);
+
put_device(dev);
mutex_lock(&dpm_list_mtx);
@@ -1511,12 +1487,14 @@ int dpm_suspend_late(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
+
if (error) {
- suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
}
@@ -1597,12 +1575,12 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
}
/**
- * __device_suspend - Execute "suspend" callbacks for given device.
+ * device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*/
-static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+static int device_suspend(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1716,8 +1694,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_watchdog_clear(&wd);
Complete:
- if (error)
+ if (error) {
async_error = error;
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async" : "", error);
+ }
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
@@ -1727,25 +1708,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
static void async_suspend(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = __device_suspend(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ device_suspend(dev, pm_transition, true);
put_device(dev);
}
-static int device_suspend(struct device *dev)
-{
- if (dpm_async_fn(dev, async_suspend))
- return 0;
-
- return __device_suspend(dev, pm_transition, false);
-}
-
/**
* dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -1761,29 +1728,25 @@ int dpm_suspend(pm_message_t state)
devfreq_suspend();
cpufreq_suspend();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
- while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
- get_device(dev);
+ mutex_lock(&dpm_list_mtx);
- mutex_unlock(&dpm_list_mtx);
+ while (!list_empty(&dpm_prepared_list)) {
+ struct device *dev = to_device(dpm_prepared_list.prev);
- error = device_suspend(dev);
+ list_move(&dev->power.entry, &dpm_suspended_list);
- mutex_lock(&dpm_list_mtx);
+ if (dpm_async_fn(dev, async_suspend))
+ continue;
- if (error) {
- pm_dev_err(dev, state, "", error);
- dpm_save_failed_dev(dev_name(dev));
- } else if (!list_empty(&dev->power.entry)) {
- list_move(&dev->power.entry, &dpm_suspended_list);
- }
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
+ error = device_suspend(dev, state, false);
+
put_device(dev);
mutex_lock(&dpm_list_mtx);
@@ -1791,14 +1754,16 @@ int dpm_suspend(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
- if (error) {
- suspend_stats.failed_suspend++;
+
+ if (error)
dpm_save_failed_step(SUSPEND_SUSPEND);
- }
+
dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
@@ -1949,11 +1914,11 @@ int dpm_suspend_start(pm_message_t state)
int error;
error = dpm_prepare(state);
- if (error) {
- suspend_stats.failed_prepare++;
+ if (error)
dpm_save_failed_step(SUSPEND_PREPARE);
- } else
+ else
error = dpm_suspend(state);
+
dpm_show_time(starttime, state, error, "start");
return error;
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 05793c9fbb84..2ee45841486b 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -94,6 +94,7 @@ static void update_pm_runtime_accounting(struct device *dev)
static void __update_runtime_status(struct device *dev, enum rpm_status status)
{
update_pm_runtime_accounting(dev);
+ trace_rpm_status(dev, status);
dev->power.runtime_status = status;
}
@@ -1176,7 +1177,7 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * pm_runtime_get_if_active - Conditionally bump up device usage counter.
+ * pm_runtime_get_conditional - Conditionally bump up device usage counter.
* @dev: Device to handle.
* @ign_usage_count: Whether or not to look at the current usage counter value.
*
@@ -1197,7 +1198,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
* The caller is responsible for decrementing the runtime PM usage counter of
* @dev after this function has returned a positive value for it.
*/
-int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
+static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
{
unsigned long flags;
int retval;
@@ -1218,9 +1219,40 @@ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
return retval;
}
+
+/**
+ * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is
+ * in active state
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE, in which case it returns 1. If the device is in a different
+ * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the
+ * device, in which case also the usage_count will remain unmodified.
+ */
+int pm_runtime_get_if_active(struct device *dev)
+{
+ return pm_runtime_get_conditional(dev, true);
+}
EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
/**
+ * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case
+ * it returns 1. If the device is in a different state or its usage_count is 0,
+ * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device,
+ * in which case also the usage_count will remain unmodified.
+ */
+int pm_runtime_get_if_in_use(struct device *dev)
+{
+ return pm_runtime_get_conditional(dev, false);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
+
+/**
* __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
* @status: New runtime PM status of the device.
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 42171f766dcb..5a5a9e978e85 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -313,8 +313,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev)
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
- wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
+ wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
enable_irq(wirq->irq);
+ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
+ }
}
/**
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 583dd5d7d46b..bcdb25bec77c 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -93,6 +93,7 @@ struct regmap {
#endif
unsigned int max_register;
+ bool max_register_is_set;
bool (*writeable_reg)(struct device *dev, unsigned int reg);
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index b7e4b2464102..9b17c77dec9d 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -23,7 +23,7 @@ static int regcache_flat_init(struct regmap *map)
int i;
unsigned int *cache;
- if (!map || map->reg_stride_order < 0 || !map->max_register)
+ if (!map || map->reg_stride_order < 0 || !map->max_register_is_set)
return -EINVAL;
map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index ac63a73ccdaa..2e41cb12b8e2 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -187,8 +187,10 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
return 0;
}
- if (!map->max_register && map->num_reg_defaults_raw)
+ if (!map->max_register_is_set && map->num_reg_defaults_raw) {
map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride;
+ map->max_register_is_set = true;
+ }
if (map->cache_ops->init) {
dev_dbg(map->dev, "Initializing %s cache\n",
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 0d957c5f1bcc..bb2ab6129f38 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -1341,6 +1341,71 @@ static void raw_sync(struct kunit *test)
regmap_exit(map);
}
+static void raw_ranges(struct kunit *test)
+{
+ struct raw_test_types *t = (struct raw_test_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val;
+ int i;
+
+ config = raw_regmap_config;
+ config.volatile_reg = test_range_all_volatile;
+ config.ranges = &test_range;
+ config.num_ranges = 1;
+ config.max_register = test_range.range_max;
+
+ map = gen_raw_regmap(&config, t, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Reset the page to a non-zero value to trigger a change */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
+ test_range.range_max));
+
+ /* Check we set the page and use the window for writes */
+ data->written[test_range.selector_reg] = false;
+ data->written[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
+
+ data->written[test_range.selector_reg] = false;
+ data->written[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
+ test_range.range_min +
+ test_range.window_len,
+ 0));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
+
+ /* Same for reads */
+ data->written[test_range.selector_reg] = false;
+ data->read[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
+
+ data->written[test_range.selector_reg] = false;
+ data->read[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
+ test_range.range_min +
+ test_range.window_len,
+ &val));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
+
+ /* No physical access triggered in the virtual range */
+ for (i = test_range.range_min; i < test_range.range_max; i++) {
+ KUNIT_EXPECT_FALSE(test, data->read[i]);
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+ }
+
+ regmap_exit(map);
+}
+
static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
@@ -1368,6 +1433,7 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
+ KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params),
{}
};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 6db77d8e45f9..5cb425f6f02d 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(regmap_check_range_table);
bool regmap_writeable(struct regmap *map, unsigned int reg)
{
- if (map->max_register && reg > map->max_register)
+ if (map->max_register_is_set && reg > map->max_register)
return false;
if (map->writeable_reg)
@@ -112,7 +112,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg)
if (!map->cache_ops)
return false;
- if (map->max_register && reg > map->max_register)
+ if (map->max_register_is_set && reg > map->max_register)
return false;
map->lock(map->lock_arg);
@@ -129,7 +129,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
if (!map->reg_read)
return false;
- if (map->max_register && reg > map->max_register)
+ if (map->max_register_is_set && reg > map->max_register)
return false;
if (map->format.format_write)
@@ -787,6 +787,7 @@ struct regmap *__regmap_init(struct device *dev,
map->bus = bus;
map->bus_context = bus_context;
map->max_register = config->max_register;
+ map->max_register_is_set = map->max_register ?: config->max_register_is_0;
map->wr_table = config->wr_table;
map->rd_table = config->rd_table;
map->volatile_table = config->volatile_table;
@@ -1412,6 +1413,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
regmap_debugfs_exit(map);
map->max_register = config->max_register;
+ map->max_register_is_set = map->max_register ?: config->max_register_is_0;
map->writeable_reg = config->writeable_reg;
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
@@ -3383,7 +3385,7 @@ EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
*/
int regmap_get_max_register(struct regmap *map)
{
- return map->max_register ? map->max_register : -EINVAL;
+ return map->max_register_is_set ? map->max_register : -EINVAL;
}
EXPORT_SYMBOL_GPL(regmap_get_max_register);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 7061d3ee836a..6b5d34919c72 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -68,7 +68,7 @@ static struct attribute *bcma_device_attrs[] = {
};
ATTRIBUTE_GROUPS(bcma_device);
-static struct bus_type bcma_bus_type = {
+static const struct bus_type bcma_bus_type = {
.name = "bcma",
.match = bcma_bus_match,
.probe = bcma_device_probe,
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 2b98114a9fe0..a25414228e47 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1779,7 +1779,7 @@ static int fd_alloc_disk(int drive, int system)
struct gendisk *disk;
int err;
- disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL);
+ disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index b1b47d88f5db..b6dac8cee70f 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -24,8 +24,8 @@ static DEFINE_MUTEX(aoeblk_mutex);
static struct kmem_cache *buf_pool_cache;
static struct dentry *aoe_debugfs_dir;
-/* GPFS needs a larger value than the default. */
-static int aoe_maxsectors;
+/* random default picked from the historic block max_sectors cap */
+static int aoe_maxsectors = 2560;
module_param(aoe_maxsectors, int, 0644);
MODULE_PARM_DESC(aoe_maxsectors,
"When nonzero, set the maximum number of sectors per I/O request");
@@ -334,6 +334,10 @@ aoeblk_gdalloc(void *vp)
mempool_t *mp;
struct blk_mq_tag_set *set;
sector_t ssize;
+ struct queue_limits lim = {
+ .max_hw_sectors = aoe_maxsectors,
+ .io_opt = SZ_2M,
+ };
ulong flags;
int late = 0;
int err;
@@ -371,7 +375,7 @@ aoeblk_gdalloc(void *vp)
goto err_mempool;
}
- gd = blk_mq_alloc_disk(set, d);
+ gd = blk_mq_alloc_disk(set, &lim, d);
if (IS_ERR(gd)) {
pr_err("aoe: cannot allocate block queue for %ld.%d\n",
d->aoemajor, d->aoeminor);
@@ -384,14 +388,9 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->flags & DEVFL_TKILL);
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
- /* random number picked from the history block max_sectors cap */
- blk_queue_max_hw_sectors(gd->queue, 2560u);
- blk_queue_io_opt(gd->queue, SZ_2M);
d->bufpool = mp;
d->blkq = gd->queue;
d->gd = gd;
- if (aoe_maxsectors)
- blk_queue_max_hw_sectors(gd->queue, aoe_maxsectors);
gd->major = AOE_MAJOR;
gd->first_minor = d->sysminor;
gd->minors = AOE_PARTITIONS;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d7317425be51..cc9077b588d7 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -419,13 +419,16 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
rcu_read_lock();
for_each_netdev_rcu(&init_net, ifp) {
dev_hold(ifp);
- if (!is_aoe_netif(ifp))
- goto cont;
+ if (!is_aoe_netif(ifp)) {
+ dev_put(ifp);
+ continue;
+ }
skb = new_skb(sizeof *h + sizeof *ch);
if (skb == NULL) {
printk(KERN_INFO "aoe: skb alloc failure\n");
- goto cont;
+ dev_put(ifp);
+ continue;
}
skb_put(skb, sizeof *h + sizeof *ch);
skb->dev = ifp;
@@ -440,9 +443,6 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
h->major = cpu_to_be16(aoemajor);
h->minor = aoeminor;
h->cmd = AOECMD_CFG;
-
-cont:
- dev_put(ifp);
}
rcu_read_unlock();
}
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index c51ea95bc2ce..923a134fd766 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -63,6 +63,7 @@ tx(int id) __must_hold(&txlock)
pr_warn("aoe: packet could not be sent on %s. %s\n",
ifp ? ifp->name : "netif",
"consider increasing tx_queue_len");
+ dev_put(ifp);
spin_lock_irq(&txlock);
}
return 0;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 50949207798d..cacc4ba942a8 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1994,7 +1994,7 @@ static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
{
struct gendisk *disk;
- disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL);
+ disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 970bd6ff38c4..e322cef6596b 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -318,6 +318,16 @@ static int brd_alloc(int i)
struct gendisk *disk;
char buf[DISK_NAME_LEN];
int err = -ENOMEM;
+ struct queue_limits lim = {
+ /*
+ * This is so fdisk will align partitions on 4k, because of
+ * direct_access API needing 4k alignment, returning a PFN
+ * (This is only a problem on very small devices <= 4M,
+ * otherwise fdisk will align on 1M. Regardless this call
+ * is harmless)
+ */
+ .physical_block_size = PAGE_SIZE,
+ };
list_for_each_entry(brd, &brd_devices, brd_list)
if (brd->brd_number == i)
@@ -335,10 +345,11 @@ static int brd_alloc(int i)
debugfs_create_u64(buf, 0444, brd_debugfs_dir,
&brd->brd_nr_pages);
- disk = brd->brd_disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!disk)
+ disk = brd->brd_disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(disk)) {
+ err = PTR_ERR(disk);
goto out_free_dev;
-
+ }
disk->major = RAMDISK_MAJOR;
disk->first_minor = i * max_part;
disk->minors = max_part;
@@ -347,15 +358,6 @@ static int brd_alloc(int i)
strscpy(disk->disk_name, buf, DISK_NAME_LEN);
set_capacity(disk, rd_size * 2);
- /*
- * This is so fdisk will align partitions on 4k, because of
- * direct_access API needing 4k alignment, returning a PFN
- * (This is only a problem on very small devices <= 4M,
- * otherwise fdisk will align on 1M. Regardless this call
- * is harmless)
- */
- blk_queue_physical_block_size(disk->queue, PAGE_SIZE);
-
/* Tell the block layer that this is not a rotational device */
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c21e3732759e..94dc0a235919 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -524,9 +524,9 @@ struct drbd_md {
struct drbd_backing_dev {
struct block_device *backing_bdev;
- struct bdev_handle *backing_bdev_handle;
+ struct file *backing_bdev_file;
struct block_device *md_bdev;
- struct bdev_handle *md_bdev_handle;
+ struct file *f_md_bdev;
struct drbd_md md;
struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
sector_t known_size; /* last known size of that backing device */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 6bc86106c7b2..113b441d4d36 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2690,6 +2690,14 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
int id;
int vnr = adm_ctx->volume;
enum drbd_ret_code err = ERR_NOMEM;
+ struct queue_limits lim = {
+ /*
+ * Setting the max_hw_sectors to an odd value of 8kibyte here.
+ * This triggers a max_bio_size message upon first attach or
+ * connect.
+ */
+ .max_hw_sectors = DRBD_MAX_BIO_SIZE_SAFE >> 8,
+ };
device = minor_to_device(minor);
if (device)
@@ -2708,9 +2716,11 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_init_set_defaults(device);
- disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!disk)
+ disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(disk)) {
+ err = PTR_ERR(disk);
goto out_no_disk;
+ }
device->vdisk = disk;
device->rq_queue = disk->queue;
@@ -2727,9 +2737,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
blk_queue_write_cache(disk->queue, true, true);
- /* Setting the max_hw_sectors to an odd value of 8kibyte here
- This triggers a max_bio_size message upon first attach or connect */
- blk_queue_max_hw_sectors(disk->queue, DRBD_MAX_BIO_SIZE_SAFE >> 8);
device->md_io.page = alloc_page(GFP_KERNEL);
if (!device->md_io.page)
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 43747a1aae43..5d65c9754d83 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1189,9 +1189,31 @@ static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
return 0;
}
-static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
+static unsigned int drbd_max_peer_bio_size(struct drbd_device *device)
{
- q->limits.discard_granularity = granularity;
+ /*
+ * We may ignore peer limits if the peer is modern enough. From 8.3.8
+ * onwards the peer can use multiple BIOs for a single peer_request.
+ */
+ if (device->state.conn < C_WF_REPORT_PARAMS)
+ return device->peer_max_bio_size;
+
+ if (first_peer_device(device)->connection->agreed_pro_version < 94)
+ return min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+
+ /*
+ * Correct old drbd (up to 8.3.7) if it believes it can do more than
+ * 32KiB.
+ */
+ if (first_peer_device(device)->connection->agreed_pro_version == 94)
+ return DRBD_MAX_SIZE_H80_PACKET;
+
+ /*
+ * drbd 8.3.8 onwards, before 8.4.0
+ */
+ if (first_peer_device(device)->connection->agreed_pro_version < 100)
+ return DRBD_MAX_BIO_SIZE_P95;
+ return DRBD_MAX_BIO_SIZE;
}
static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
@@ -1204,149 +1226,119 @@ static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
return AL_EXTENT_SIZE >> 9;
}
-static void decide_on_discard_support(struct drbd_device *device,
+static bool drbd_discard_supported(struct drbd_connection *connection,
struct drbd_backing_dev *bdev)
{
- struct drbd_connection *connection =
- first_peer_device(device)->connection;
- struct request_queue *q = device->rq_queue;
- unsigned int max_discard_sectors;
-
if (bdev && !bdev_max_discard_sectors(bdev->backing_bdev))
- goto not_supported;
+ return false;
if (connection->cstate >= C_CONNECTED &&
!(connection->agreed_features & DRBD_FF_TRIM)) {
drbd_info(connection,
"peer DRBD too old, does not support TRIM: disabling discards\n");
- goto not_supported;
+ return false;
}
- /*
- * We don't care for the granularity, really.
- *
- * Stacking limits below should fix it for the local device. Whether or
- * not it is a suitable granularity on the remote device is not our
- * problem, really. If you care, you need to use devices with similar
- * topology on all peers.
- */
- blk_queue_discard_granularity(q, 512);
- max_discard_sectors = drbd_max_discard_sectors(connection);
- blk_queue_max_discard_sectors(q, max_discard_sectors);
- blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
- return;
-
-not_supported:
- blk_queue_discard_granularity(q, 0);
- blk_queue_max_discard_sectors(q, 0);
+ return true;
}
-static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
+/* This is the workaround for "bio would need to, but cannot, be split" */
+static unsigned int drbd_backing_dev_max_segments(struct drbd_device *device)
{
- /* Fixup max_write_zeroes_sectors after blk_stack_limits():
- * if we can handle "zeroes" efficiently on the protocol,
- * we want to do that, even if our backend does not announce
- * max_write_zeroes_sectors itself. */
- struct drbd_connection *connection = first_peer_device(device)->connection;
- /* If the peer announces WZEROES support, use it. Otherwise, rather
- * send explicit zeroes than rely on some discard-zeroes-data magic. */
- if (connection->agreed_features & DRBD_FF_WZEROES)
- q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
- else
- q->limits.max_write_zeroes_sectors = 0;
-}
+ unsigned int max_segments;
-static void fixup_discard_support(struct drbd_device *device, struct request_queue *q)
-{
- unsigned int max_discard = device->rq_queue->limits.max_discard_sectors;
- unsigned int discard_granularity =
- device->rq_queue->limits.discard_granularity >> SECTOR_SHIFT;
+ rcu_read_lock();
+ max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
+ rcu_read_unlock();
- if (discard_granularity > max_discard) {
- blk_queue_discard_granularity(q, 0);
- blk_queue_max_discard_sectors(q, 0);
- }
+ if (!max_segments)
+ return BLK_MAX_SEGMENTS;
+ return max_segments;
}
-static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
- unsigned int max_bio_size, struct o_qlim *o)
+void drbd_reconsider_queue_parameters(struct drbd_device *device,
+ struct drbd_backing_dev *bdev, struct o_qlim *o)
{
+ struct drbd_connection *connection =
+ first_peer_device(device)->connection;
struct request_queue * const q = device->rq_queue;
- unsigned int max_hw_sectors = max_bio_size >> 9;
- unsigned int max_segments = 0;
+ unsigned int now = queue_max_hw_sectors(q) << 9;
+ struct queue_limits lim;
struct request_queue *b = NULL;
- struct disk_conf *dc;
+ unsigned int new;
if (bdev) {
b = bdev->backing_bdev->bd_disk->queue;
- max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
- rcu_read_lock();
- dc = rcu_dereference(device->ldev->disk_conf);
- max_segments = dc->max_bio_bvecs;
- rcu_read_unlock();
-
- blk_set_stacking_limits(&q->limits);
+ device->local_max_bio_size =
+ queue_max_hw_sectors(b) << SECTOR_SHIFT;
}
- blk_queue_max_hw_sectors(q, max_hw_sectors);
- /* This is the workaround for "bio would need to, but cannot, be split" */
- blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
- blk_queue_segment_boundary(q, PAGE_SIZE-1);
- decide_on_discard_support(device, bdev);
-
- if (b) {
- blk_stack_limits(&q->limits, &b->limits, 0);
- disk_update_readahead(device->vdisk);
+ /*
+ * We may later detach and re-attach on a disconnected Primary. Avoid
+ * decreasing the value in this case.
+ *
+ * We want to store what we know the peer DRBD can handle, not what the
+ * peer IO backend can handle.
+ */
+ new = min3(DRBD_MAX_BIO_SIZE, device->local_max_bio_size,
+ max(drbd_max_peer_bio_size(device), device->peer_max_bio_size));
+ if (new != now) {
+ if (device->state.role == R_PRIMARY && new < now)
+ drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n",
+ new, now);
+ drbd_info(device, "max BIO size = %u\n", new);
}
- fixup_write_zeroes(device, q);
- fixup_discard_support(device, q);
-}
-
-void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
-{
- unsigned int now, new, local, peer;
-
- now = queue_max_hw_sectors(device->rq_queue) << 9;
- local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
- peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
+ lim = queue_limits_start_update(q);
if (bdev) {
- local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
- device->local_max_bio_size = local;
+ blk_set_stacking_limits(&lim);
+ lim.max_segments = drbd_backing_dev_max_segments(device);
+ } else {
+ lim.max_segments = BLK_MAX_SEGMENTS;
}
- local = min(local, DRBD_MAX_BIO_SIZE);
- /* We may ignore peer limits if the peer is modern enough.
- Because new from 8.3.8 onwards the peer can use multiple
- BIOs for a single peer_request */
- if (device->state.conn >= C_WF_REPORT_PARAMS) {
- if (first_peer_device(device)->connection->agreed_pro_version < 94)
- peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
- /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
- else if (first_peer_device(device)->connection->agreed_pro_version == 94)
- peer = DRBD_MAX_SIZE_H80_PACKET;
- else if (first_peer_device(device)->connection->agreed_pro_version < 100)
- peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
- else
- peer = DRBD_MAX_BIO_SIZE;
+ lim.max_hw_sectors = new >> SECTOR_SHIFT;
+ lim.seg_boundary_mask = PAGE_SIZE - 1;
- /* We may later detach and re-attach on a disconnected Primary.
- * Avoid this setting to jump back in that case.
- * We want to store what we know the peer DRBD can handle,
- * not what the peer IO backend can handle. */
- if (peer > device->peer_max_bio_size)
- device->peer_max_bio_size = peer;
+ /*
+ * We don't care for the granularity, really.
+ *
+ * Stacking limits below should fix it for the local device. Whether or
+ * not it is a suitable granularity on the remote device is not our
+ * problem, really. If you care, you need to use devices with similar
+ * topology on all peers.
+ */
+ if (drbd_discard_supported(connection, bdev)) {
+ lim.discard_granularity = 512;
+ lim.max_hw_discard_sectors =
+ drbd_max_discard_sectors(connection);
+ } else {
+ lim.discard_granularity = 0;
+ lim.max_hw_discard_sectors = 0;
}
- new = min(local, peer);
- if (device->state.role == R_PRIMARY && new < now)
- drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
+ if (bdev)
+ blk_stack_limits(&lim, &b->limits, 0);
- if (new != now)
- drbd_info(device, "max BIO size = %u\n", new);
+ /*
+ * If we can handle "zeroes" efficiently on the protocol, we want to do
+ * that, even if our backend does not announce max_write_zeroes_sectors
+ * itself.
+ */
+ if (connection->agreed_features & DRBD_FF_WZEROES)
+ lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
+ else
+ lim.max_write_zeroes_sectors = 0;
+
+ if ((lim.discard_granularity >> SECTOR_SHIFT) >
+ lim.max_hw_discard_sectors) {
+ lim.discard_granularity = 0;
+ lim.max_hw_discard_sectors = 0;
+ }
- drbd_setup_queue_param(device, bdev, new, o);
+ if (queue_limits_commit_update(q, &lim))
+ drbd_err(device, "setting new queue limits failed\n");
}
/* Starts the worker thread */
@@ -1635,45 +1627,45 @@ success:
return 0;
}
-static struct bdev_handle *open_backing_dev(struct drbd_device *device,
+static struct file *open_backing_dev(struct drbd_device *device,
const char *bdev_path, void *claim_ptr, bool do_bd_link)
{
- struct bdev_handle *handle;
+ struct file *file;
int err = 0;
- handle = bdev_open_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
- claim_ptr, NULL);
- if (IS_ERR(handle)) {
+ file = bdev_file_open_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ claim_ptr, NULL);
+ if (IS_ERR(file)) {
drbd_err(device, "open(\"%s\") failed with %ld\n",
- bdev_path, PTR_ERR(handle));
- return handle;
+ bdev_path, PTR_ERR(file));
+ return file;
}
if (!do_bd_link)
- return handle;
+ return file;
- err = bd_link_disk_holder(handle->bdev, device->vdisk);
+ err = bd_link_disk_holder(file_bdev(file), device->vdisk);
if (err) {
- bdev_release(handle);
+ fput(file);
drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
bdev_path, err);
- handle = ERR_PTR(err);
+ file = ERR_PTR(err);
}
- return handle;
+ return file;
}
static int open_backing_devices(struct drbd_device *device,
struct disk_conf *new_disk_conf,
struct drbd_backing_dev *nbc)
{
- struct bdev_handle *handle;
+ struct file *file;
- handle = open_backing_dev(device, new_disk_conf->backing_dev, device,
+ file = open_backing_dev(device, new_disk_conf->backing_dev, device,
true);
- if (IS_ERR(handle))
+ if (IS_ERR(file))
return ERR_OPEN_DISK;
- nbc->backing_bdev = handle->bdev;
- nbc->backing_bdev_handle = handle;
+ nbc->backing_bdev = file_bdev(file);
+ nbc->backing_bdev_file = file;
/*
* meta_dev_idx >= 0: external fixed size, possibly multiple
@@ -1683,7 +1675,7 @@ static int open_backing_devices(struct drbd_device *device,
* should check it for you already; but if you don't, or
* someone fooled it, we need to double check here)
*/
- handle = open_backing_dev(device, new_disk_conf->meta_dev,
+ file = open_backing_dev(device, new_disk_conf->meta_dev,
/* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
* if potentially shared with other drbd minors */
(new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
@@ -1691,21 +1683,21 @@ static int open_backing_devices(struct drbd_device *device,
* as would happen with internal metadata. */
(new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
- if (IS_ERR(handle))
+ if (IS_ERR(file))
return ERR_OPEN_MD_DISK;
- nbc->md_bdev = handle->bdev;
- nbc->md_bdev_handle = handle;
+ nbc->md_bdev = file_bdev(file);
+ nbc->f_md_bdev = file;
return NO_ERROR;
}
static void close_backing_dev(struct drbd_device *device,
- struct bdev_handle *handle, bool do_bd_unlink)
+ struct file *bdev_file, bool do_bd_unlink)
{
- if (!handle)
+ if (!bdev_file)
return;
if (do_bd_unlink)
- bd_unlink_disk_holder(handle->bdev, device->vdisk);
- bdev_release(handle);
+ bd_unlink_disk_holder(file_bdev(bdev_file), device->vdisk);
+ fput(bdev_file);
}
void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
@@ -1713,9 +1705,9 @@ void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *
if (ldev == NULL)
return;
- close_backing_dev(device, ldev->md_bdev_handle,
+ close_backing_dev(device, ldev->f_md_bdev,
ldev->md_bdev != ldev->backing_bdev);
- close_backing_dev(device, ldev->backing_bdev_handle, true);
+ close_backing_dev(device, ldev->backing_bdev_file, true);
kfree(ldev->disk_conf);
kfree(ldev);
@@ -2131,9 +2123,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
fail:
conn_reconfig_done(connection);
if (nbc) {
- close_backing_dev(device, nbc->md_bdev_handle,
+ close_backing_dev(device, nbc->f_md_bdev,
nbc->md_bdev != nbc->backing_bdev);
- close_backing_dev(device, nbc->backing_bdev_handle, true);
+ close_backing_dev(device, nbc->backing_bdev_file, true);
kfree(nbc);
}
kfree(new_disk_conf);
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 287a8d1d3f70..e858e7e0383f 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -1542,9 +1542,10 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
int notify_resource_state_change(struct sk_buff *skb,
unsigned int seq,
- struct drbd_resource_state_change *resource_state_change,
+ void *state_change,
enum drbd_notification_type type)
{
+ struct drbd_resource_state_change *resource_state_change = state_change;
struct drbd_resource *resource = resource_state_change->resource;
struct resource_info resource_info = {
.res_role = resource_state_change->role[NEW],
@@ -1558,13 +1559,14 @@ int notify_resource_state_change(struct sk_buff *skb,
int notify_connection_state_change(struct sk_buff *skb,
unsigned int seq,
- struct drbd_connection_state_change *connection_state_change,
+ void *state_change,
enum drbd_notification_type type)
{
- struct drbd_connection *connection = connection_state_change->connection;
+ struct drbd_connection_state_change *p = state_change;
+ struct drbd_connection *connection = p->connection;
struct connection_info connection_info = {
- .conn_connection_state = connection_state_change->cstate[NEW],
- .conn_role = connection_state_change->peer_role[NEW],
+ .conn_connection_state = p->cstate[NEW],
+ .conn_role = p->peer_role[NEW],
};
return notify_connection_state(skb, seq, connection, &connection_info, type);
@@ -1572,9 +1574,10 @@ int notify_connection_state_change(struct sk_buff *skb,
int notify_device_state_change(struct sk_buff *skb,
unsigned int seq,
- struct drbd_device_state_change *device_state_change,
+ void *state_change,
enum drbd_notification_type type)
{
+ struct drbd_device_state_change *device_state_change = state_change;
struct drbd_device *device = device_state_change->device;
struct device_info device_info = {
.dev_disk_state = device_state_change->disk_state[NEW],
@@ -1585,9 +1588,10 @@ int notify_device_state_change(struct sk_buff *skb,
int notify_peer_device_state_change(struct sk_buff *skb,
unsigned int seq,
- struct drbd_peer_device_state_change *p,
+ void *state_change,
enum drbd_notification_type type)
{
+ struct drbd_peer_device_state_change *p = state_change;
struct drbd_peer_device *peer_device = p->peer_device;
struct peer_device_info peer_device_info = {
.peer_repl_state = p->repl_state[NEW],
@@ -1605,8 +1609,8 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
bool resource_state_has_changed;
unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
- int (*last_func)(struct sk_buff *, unsigned int, void *,
- enum drbd_notification_type) = NULL;
+ int (*last_func)(struct sk_buff *, unsigned int,
+ void *, enum drbd_notification_type) = NULL;
void *last_arg = NULL;
#define HAS_CHANGED(state) ((state)[OLD] != (state)[NEW])
@@ -1616,7 +1620,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
})
#define REMEMBER_STATE_CHANGE(func, arg, type) \
({ FINAL_STATE_CHANGE(type | NOTIFY_CONTINUES); \
- last_func = (typeof(last_func))func; \
+ last_func = func; \
last_arg = arg; \
})
diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h
index 9d78d8e3912e..a56a57d67686 100644
--- a/drivers/block/drbd/drbd_state_change.h
+++ b/drivers/block/drbd/drbd_state_change.h
@@ -46,19 +46,19 @@ extern void forget_state_change(struct drbd_state_change *);
extern int notify_resource_state_change(struct sk_buff *,
unsigned int,
- struct drbd_resource_state_change *,
+ void *,
enum drbd_notification_type type);
extern int notify_connection_state_change(struct sk_buff *,
unsigned int,
- struct drbd_connection_state_change *,
+ void *,
enum drbd_notification_type type);
extern int notify_device_state_change(struct sk_buff *,
unsigned int,
- struct drbd_device_state_change *,
+ void *,
enum drbd_notification_type type);
extern int notify_peer_device_state_change(struct sk_buff *,
unsigned int,
- struct drbd_peer_device_state_change *,
+ void *,
enum drbd_notification_type type);
#endif /* DRBD_STATE_CHANGE_H */
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index d0e41d52d6a9..1b399ec8c07d 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -530,14 +530,13 @@ static struct format_descr format_req;
static char *floppy_track_buffer;
static int max_buffer_sectors;
-typedef void (*done_f)(int);
static const struct cont_t {
void (*interrupt)(void);
/* this is called after the interrupt of the
* main command */
void (*redo)(void); /* this is called to retry the operation */
void (*error)(void); /* this is called to tally an error */
- done_f done; /* this is called to say if the operation has
+ void (*done)(int); /* this is called to say if the operation has
* succeeded/failed */
} *cont;
@@ -985,6 +984,10 @@ static void empty(void)
{
}
+static void empty_done(int result)
+{
+}
+
static void (*floppy_work_fn)(void);
static void floppy_work_workfn(struct work_struct *work)
@@ -1998,14 +2001,14 @@ static const struct cont_t wakeup_cont = {
.interrupt = empty,
.redo = do_wakeup,
.error = empty,
- .done = (done_f)empty
+ .done = empty_done,
};
static const struct cont_t intr_cont = {
.interrupt = empty,
.redo = process_fd_request,
.error = empty,
- .done = (done_f)empty
+ .done = empty_done,
};
/* schedules handler, waiting for completion. May be interrupted, will then
@@ -4513,13 +4516,15 @@ static bool floppy_available(int drive)
static int floppy_alloc_disk(unsigned int drive, unsigned int type)
{
+ struct queue_limits lim = {
+ .max_hw_sectors = 64,
+ };
struct gendisk *disk;
- disk = blk_mq_alloc_disk(&tag_sets[drive], NULL);
+ disk = blk_mq_alloc_disk(&tag_sets[drive], &lim, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
- blk_queue_max_hw_sectors(disk->queue, 64);
disk->major = FLOPPY_MAJOR;
disk->first_minor = TOMINOR(drive) | (type << 2);
disk->minors = 1;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f8145499da38..28a95fd366fe 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -750,12 +750,13 @@ static void loop_sysfs_exit(struct loop_device *lo)
&loop_attribute_group);
}
-static void loop_config_discard(struct loop_device *lo)
+static void loop_config_discard(struct loop_device *lo,
+ struct queue_limits *lim)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
- struct request_queue *q = lo->lo_queue;
- u32 granularity, max_discard_sectors;
+ u32 granularity = 0, max_discard_sectors = 0;
+ struct kstatfs sbuf;
/*
* If the backing device is a block device, mirror its zeroing
@@ -775,29 +776,17 @@ static void loop_config_discard(struct loop_device *lo)
* We use punch hole to reclaim the free space used by the
* image a.k.a. discard.
*/
- } else if (!file->f_op->fallocate) {
- max_discard_sectors = 0;
- granularity = 0;
-
- } else {
- struct kstatfs sbuf;
-
+ } else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) {
max_discard_sectors = UINT_MAX >> 9;
- if (!vfs_statfs(&file->f_path, &sbuf))
- granularity = sbuf.f_bsize;
- else
- max_discard_sectors = 0;
+ granularity = sbuf.f_bsize;
}
- if (max_discard_sectors) {
- q->limits.discard_granularity = granularity;
- blk_queue_max_discard_sectors(q, max_discard_sectors);
- blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
- } else {
- q->limits.discard_granularity = 0;
- blk_queue_max_discard_sectors(q, 0);
- blk_queue_max_write_zeroes_sectors(q, 0);
- }
+ lim->max_hw_discard_sectors = max_discard_sectors;
+ lim->max_write_zeroes_sectors = max_discard_sectors;
+ if (max_discard_sectors)
+ lim->discard_granularity = granularity;
+ else
+ lim->discard_granularity = 0;
}
struct loop_worker {
@@ -986,6 +975,20 @@ loop_set_status_from_info(struct loop_device *lo,
return 0;
}
+static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize,
+ bool update_discard_settings)
+{
+ struct queue_limits lim;
+
+ lim = queue_limits_start_update(lo->lo_queue);
+ lim.logical_block_size = bsize;
+ lim.physical_block_size = bsize;
+ lim.io_min = bsize;
+ if (update_discard_settings)
+ loop_config_discard(lo, &lim);
+ return queue_limits_commit_update(lo->lo_queue, &lim);
+}
+
static int loop_configure(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev,
const struct loop_config *config)
@@ -1083,11 +1086,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
else
bsize = 512;
- blk_queue_logical_block_size(lo->lo_queue, bsize);
- blk_queue_physical_block_size(lo->lo_queue, bsize);
- blk_queue_io_min(lo->lo_queue, bsize);
+ error = loop_reconfigure_limits(lo, bsize, true);
+ if (WARN_ON_ONCE(error))
+ goto out_unlock;
- loop_config_discard(lo);
loop_update_rotational(lo);
loop_update_dio(lo);
loop_sysfs_init(lo);
@@ -1154,9 +1156,7 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
- blk_queue_logical_block_size(lo->lo_queue, 512);
- blk_queue_physical_block_size(lo->lo_queue, 512);
- blk_queue_io_min(lo->lo_queue, 512);
+ loop_reconfigure_limits(lo, 512, false);
invalidate_disk(lo->lo_disk);
loop_sysfs_exit(lo);
/* let user-space know about this change */
@@ -1488,9 +1488,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
invalidate_bdev(lo->lo_device);
blk_mq_freeze_queue(lo->lo_queue);
- blk_queue_logical_block_size(lo->lo_queue, arg);
- blk_queue_physical_block_size(lo->lo_queue, arg);
- blk_queue_io_min(lo->lo_queue, arg);
+ err = loop_reconfigure_limits(lo, arg, false);
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);
@@ -1982,6 +1980,12 @@ static const struct blk_mq_ops loop_mq_ops = {
static int loop_add(int i)
{
+ struct queue_limits lim = {
+ /*
+ * Random number picked from the historic block max_sectors cap.
+ */
+ .max_hw_sectors = 2560u,
+ };
struct loop_device *lo;
struct gendisk *disk;
int err;
@@ -2025,16 +2029,13 @@ static int loop_add(int i)
if (err)
goto out_free_idr;
- disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, lo);
+ disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, &lim, lo);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_cleanup_tags;
}
lo->lo_queue = lo->lo_disk->queue;
- /* random number picked from the history block max_sectors cap */
- blk_queue_max_hw_sectors(lo->lo_queue, 2560u);
-
/*
* By default, we do buffer IO, so it doesn't make sense to enable
* merge because the I/O submitted to backing file is handled page by
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index b200950e8fb5..43a187609ef7 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3401,6 +3401,12 @@ static const struct blk_mq_ops mtip_mq_ops = {
*/
static int mtip_block_initialize(struct driver_data *dd)
{
+ struct queue_limits lim = {
+ .physical_block_size = 4096,
+ .max_hw_sectors = 0xffff,
+ .max_segments = MTIP_MAX_SG,
+ .max_segment_size = 0x400000,
+ };
int rv = 0, wait_for_rebuild = 0;
sector_t capacity;
unsigned int index = 0;
@@ -3431,7 +3437,7 @@ static int mtip_block_initialize(struct driver_data *dd)
goto block_queue_alloc_tag_error;
}
- dd->disk = blk_mq_alloc_disk(&dd->tags, dd);
+ dd->disk = blk_mq_alloc_disk(&dd->tags, &lim, dd);
if (IS_ERR(dd->disk)) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
@@ -3481,12 +3487,7 @@ skip_create_disk:
/* Set device limits. */
blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue);
- blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
- blk_queue_physical_block_size(dd->queue, 4096);
- blk_queue_max_hw_sectors(dd->queue, 0xffff);
- blk_queue_max_segment_size(dd->queue, 0x400000);
dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
- blk_queue_io_min(dd->queue, 4096);
/* Set the capacity of the device in 512 byte sectors. */
if (!(mtip_hw_get_capacity(dd, &capacity))) {
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index d914156db2d8..27b2187e7a6d 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -114,6 +114,10 @@ static const struct block_device_operations n64cart_fops = {
*/
static int __init n64cart_probe(struct platform_device *pdev)
{
+ struct queue_limits lim = {
+ .physical_block_size = 4096,
+ .logical_block_size = 4096,
+ };
struct gendisk *disk;
int err = -ENOMEM;
@@ -131,9 +135,11 @@ static int __init n64cart_probe(struct platform_device *pdev)
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
- disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!disk)
+ disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(disk)) {
+ err = PTR_ERR(disk);
goto out;
+ }
disk->first_minor = 0;
disk->flags = GENHD_FL_NO_PART;
@@ -145,8 +151,6 @@ static int __init n64cart_probe(struct platform_device *pdev)
set_disk_ro(disk, 1);
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
- blk_queue_physical_block_size(disk->queue, 4096);
- blk_queue_logical_block_size(disk->queue, 4096);
err = add_disk(disk);
if (err)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 33a8f37bb6a1..9d4ec9273bf9 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -316,9 +316,12 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
nsock->sent = 0;
}
-static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
+static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
loff_t blksize)
{
+ struct queue_limits lim;
+ int error;
+
if (!blksize)
blksize = 1u << NBD_DEF_BLKSIZE_BITS;
@@ -334,10 +337,16 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
if (!nbd->pid)
return 0;
+ lim = queue_limits_start_update(nbd->disk->queue);
if (nbd->config->flags & NBD_FLAG_SEND_TRIM)
- blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
- blk_queue_logical_block_size(nbd->disk->queue, blksize);
- blk_queue_physical_block_size(nbd->disk->queue, blksize);
+ lim.max_hw_discard_sectors = UINT_MAX;
+ else
+ lim.max_hw_discard_sectors = 0;
+ lim.logical_block_size = blksize;
+ lim.physical_block_size = blksize;
+ error = queue_limits_commit_update(nbd->disk->queue, &lim);
+ if (error)
+ return error;
if (max_part)
set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
@@ -346,6 +355,18 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
return 0;
}
+static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
+ loff_t blksize)
+{
+ int error;
+
+ blk_mq_freeze_queue(nbd->disk->queue);
+ error = __nbd_set_size(nbd, bytesize, blksize);
+ blk_mq_unfreeze_queue(nbd->disk->queue);
+
+ return error;
+}
+
static void nbd_complete_rq(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -1351,7 +1372,6 @@ static void nbd_config_put(struct nbd_device *nbd)
nbd->config = NULL;
nbd->tag_set.timeout = 0;
- blk_queue_max_discard_sectors(nbd->disk->queue, 0);
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
@@ -1783,6 +1803,12 @@ static const struct blk_mq_ops nbd_mq_ops = {
static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
{
+ struct queue_limits lim = {
+ .max_hw_sectors = 65536,
+ .max_user_sectors = 256,
+ .max_segments = USHRT_MAX,
+ .max_segment_size = UINT_MAX,
+ };
struct nbd_device *nbd;
struct gendisk *disk;
int err = -ENOMEM;
@@ -1823,7 +1849,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
if (err < 0)
goto out_free_tags;
- disk = blk_mq_alloc_disk(&nbd->tag_set, NULL);
+ disk = blk_mq_alloc_disk(&nbd->tag_set, &lim, NULL);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_free_idr;
@@ -1843,11 +1869,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
* Tell the block layer that we are not a rotational device
*/
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
- blk_queue_max_discard_sectors(disk->queue, 0);
- blk_queue_max_segment_size(disk->queue, UINT_MAX);
- blk_queue_max_segments(disk->queue, USHRT_MAX);
- blk_queue_max_hw_sectors(disk->queue, 65536);
- disk->queue->limits.max_sectors = 256;
mutex_init(&nbd->config_lock);
refcount_set(&nbd->config_refs, 0);
@@ -2433,6 +2454,12 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
}
dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
+ if (!dev_list) {
+ nlmsg_free(reply);
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
if (index == -1) {
ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
if (ret) {
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 36755f263e8e..71c39bcd872c 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -115,6 +115,18 @@ module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444);
MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>");
#endif
+/*
+ * Historic queue modes.
+ *
+ * These days nothing but NULL_Q_MQ is actually supported, but we keep it the
+ * enum for error reporting.
+ */
+enum {
+ NULL_Q_BIO = 0,
+ NULL_Q_RQ = 1,
+ NULL_Q_MQ = 2,
+};
+
static int g_queue_mode = NULL_Q_MQ;
static int null_param_store_val(const char *str, int *val, int min, int max)
@@ -165,8 +177,8 @@ static bool g_blocking;
module_param_named(blocking, g_blocking, bool, 0444);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
-static bool shared_tags;
-module_param(shared_tags, bool, 0444);
+static bool g_shared_tags;
+module_param_named(shared_tags, g_shared_tags, bool, 0444);
MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
static bool g_shared_tag_bitmap;
@@ -426,6 +438,7 @@ NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
NULLB_DEVICE_ATTR(no_sched, bool, NULL);
+NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
@@ -571,6 +584,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_zone_offline,
&nullb_device_attr_virt_boundary,
&nullb_device_attr_no_sched,
+ &nullb_device_attr_shared_tags,
&nullb_device_attr_shared_tag_bitmap,
NULL,
};
@@ -653,10 +667,11 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
"badblocks,blocking,blocksize,cache_size,"
"completion_nsec,discard,home_node,hw_queue_depth,"
"irqmode,max_sectors,mbps,memory_backed,no_sched,"
- "poll_queues,power,queue_mode,shared_tag_bitmap,size,"
- "submit_queues,use_per_node_hctx,virt_boundary,zoned,"
- "zone_capacity,zone_max_active,zone_max_open,"
- "zone_nr_conv,zone_offline,zone_readonly,zone_size\n");
+ "poll_queues,power,queue_mode,shared_tag_bitmap,"
+ "shared_tags,size,submit_queues,use_per_node_hctx,"
+ "virt_boundary,zoned,zone_capacity,zone_max_active,"
+ "zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
+ "zone_size\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -738,6 +753,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->zone_max_active = g_zone_max_active;
dev->virt_boundary = g_virt_boundary;
dev->no_sched = g_no_sched;
+ dev->shared_tags = g_shared_tags;
dev->shared_tag_bitmap = g_shared_tag_bitmap;
return dev;
}
@@ -752,98 +768,11 @@ static void null_free_dev(struct nullb_device *dev)
kfree(dev);
}
-static void put_tag(struct nullb_queue *nq, unsigned int tag)
-{
- clear_bit_unlock(tag, nq->tag_map);
-
- if (waitqueue_active(&nq->wait))
- wake_up(&nq->wait);
-}
-
-static unsigned int get_tag(struct nullb_queue *nq)
-{
- unsigned int tag;
-
- do {
- tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
- if (tag >= nq->queue_depth)
- return -1U;
- } while (test_and_set_bit_lock(tag, nq->tag_map));
-
- return tag;
-}
-
-static void free_cmd(struct nullb_cmd *cmd)
-{
- put_tag(cmd->nq, cmd->tag);
-}
-
-static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
-
-static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
-{
- struct nullb_cmd *cmd;
- unsigned int tag;
-
- tag = get_tag(nq);
- if (tag != -1U) {
- cmd = &nq->cmds[tag];
- cmd->tag = tag;
- cmd->error = BLK_STS_OK;
- cmd->nq = nq;
- if (nq->dev->irqmode == NULL_IRQ_TIMER) {
- hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- cmd->timer.function = null_cmd_timer_expired;
- }
- return cmd;
- }
-
- return NULL;
-}
-
-static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, struct bio *bio)
-{
- struct nullb_cmd *cmd;
- DEFINE_WAIT(wait);
-
- do {
- /*
- * This avoids multiple return statements, multiple calls to
- * __alloc_cmd() and a fast path call to prepare_to_wait().
- */
- cmd = __alloc_cmd(nq);
- if (cmd) {
- cmd->bio = bio;
- return cmd;
- }
- prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
- io_schedule();
- finish_wait(&nq->wait, &wait);
- } while (1);
-}
-
-static void end_cmd(struct nullb_cmd *cmd)
-{
- int queue_mode = cmd->nq->dev->queue_mode;
-
- switch (queue_mode) {
- case NULL_Q_MQ:
- blk_mq_end_request(cmd->rq, cmd->error);
- return;
- case NULL_Q_BIO:
- cmd->bio->bi_status = cmd->error;
- bio_endio(cmd->bio);
- break;
- }
-
- free_cmd(cmd);
-}
-
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
{
- end_cmd(container_of(timer, struct nullb_cmd, timer));
+ struct nullb_cmd *cmd = container_of(timer, struct nullb_cmd, timer);
+ blk_mq_end_request(blk_mq_rq_from_pdu(cmd), cmd->error);
return HRTIMER_NORESTART;
}
@@ -856,7 +785,9 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
static void null_complete_rq(struct request *rq)
{
- end_cmd(blk_mq_rq_to_pdu(rq));
+ struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
+
+ blk_mq_end_request(rq, cmd->error);
}
static struct nullb_page *null_alloc_page(void)
@@ -1273,7 +1204,7 @@ static int null_transfer(struct nullb *nullb, struct page *page,
static int null_handle_rq(struct nullb_cmd *cmd)
{
- struct request *rq = cmd->rq;
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
struct nullb *nullb = cmd->nq->dev->nullb;
int err;
unsigned int len;
@@ -1298,63 +1229,21 @@ static int null_handle_rq(struct nullb_cmd *cmd)
return 0;
}
-static int null_handle_bio(struct nullb_cmd *cmd)
-{
- struct bio *bio = cmd->bio;
- struct nullb *nullb = cmd->nq->dev->nullb;
- int err;
- unsigned int len;
- sector_t sector = bio->bi_iter.bi_sector;
- struct bio_vec bvec;
- struct bvec_iter iter;
-
- spin_lock_irq(&nullb->lock);
- bio_for_each_segment(bvec, bio, iter) {
- len = bvec.bv_len;
- err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
- op_is_write(bio_op(bio)), sector,
- bio->bi_opf & REQ_FUA);
- if (err) {
- spin_unlock_irq(&nullb->lock);
- return err;
- }
- sector += len >> SECTOR_SHIFT;
- }
- spin_unlock_irq(&nullb->lock);
- return 0;
-}
-
-static void null_stop_queue(struct nullb *nullb)
-{
- struct request_queue *q = nullb->q;
-
- if (nullb->dev->queue_mode == NULL_Q_MQ)
- blk_mq_stop_hw_queues(q);
-}
-
-static void null_restart_queue_async(struct nullb *nullb)
-{
- struct request_queue *q = nullb->q;
-
- if (nullb->dev->queue_mode == NULL_Q_MQ)
- blk_mq_start_stopped_hw_queues(q, true);
-}
-
static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
blk_status_t sts = BLK_STS_OK;
- struct request *rq = cmd->rq;
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
if (!hrtimer_active(&nullb->bw_timer))
hrtimer_restart(&nullb->bw_timer);
if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
- null_stop_queue(nullb);
+ blk_mq_stop_hw_queues(nullb->q);
/* race with timer */
if (atomic_long_read(&nullb->cur_bytes) > 0)
- null_restart_queue_async(nullb);
+ blk_mq_start_stopped_hw_queues(nullb->q, true);
/* requeue request */
sts = BLK_STS_DEV_RESOURCE;
}
@@ -1381,37 +1270,29 @@ static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
sector_t nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
- int err;
if (op == REQ_OP_DISCARD)
return null_handle_discard(dev, sector, nr_sectors);
+ return errno_to_blk_status(null_handle_rq(cmd));
- if (dev->queue_mode == NULL_Q_BIO)
- err = null_handle_bio(cmd);
- else
- err = null_handle_rq(cmd);
-
- return errno_to_blk_status(err);
}
static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
struct nullb_device *dev = cmd->nq->dev;
struct bio *bio;
- if (dev->memory_backed)
- return;
-
- if (dev->queue_mode == NULL_Q_BIO && bio_op(cmd->bio) == REQ_OP_READ) {
- zero_fill_bio(cmd->bio);
- } else if (req_op(cmd->rq) == REQ_OP_READ) {
- __rq_for_each_bio(bio, cmd->rq)
+ if (!dev->memory_backed && req_op(rq) == REQ_OP_READ) {
+ __rq_for_each_bio(bio, rq)
zero_fill_bio(bio);
}
}
static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+
/*
* Since root privileges are required to configure the null_blk
* driver, it is fine that this driver does not initialize the
@@ -1425,20 +1306,10 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
/* Complete IO by inline, softirq or timer */
switch (cmd->nq->dev->irqmode) {
case NULL_IRQ_SOFTIRQ:
- switch (cmd->nq->dev->queue_mode) {
- case NULL_Q_MQ:
- blk_mq_complete_request(cmd->rq);
- break;
- case NULL_Q_BIO:
- /*
- * XXX: no proper submitting cpu information available.
- */
- end_cmd(cmd);
- break;
- }
+ blk_mq_complete_request(rq);
break;
case NULL_IRQ_NONE:
- end_cmd(cmd);
+ blk_mq_end_request(rq, cmd->error);
break;
case NULL_IRQ_TIMER:
null_cmd_end_timer(cmd);
@@ -1499,7 +1370,7 @@ static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
return HRTIMER_NORESTART;
atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
- null_restart_queue_async(nullb);
+ blk_mq_start_stopped_hw_queues(nullb->q, true);
hrtimer_forward_now(&nullb->bw_timer, timer_interval);
@@ -1516,26 +1387,6 @@ static void nullb_setup_bwtimer(struct nullb *nullb)
hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
}
-static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
-{
- int index = 0;
-
- if (nullb->nr_queues != 1)
- index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
-
- return &nullb->queues[index];
-}
-
-static void null_submit_bio(struct bio *bio)
-{
- sector_t sector = bio->bi_iter.bi_sector;
- sector_t nr_sectors = bio_sectors(bio);
- struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
- struct nullb_queue *nq = nullb_to_queue(nullb);
-
- null_handle_cmd(alloc_cmd(nq, bio), sector, nr_sectors, bio_op(bio));
-}
-
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
static bool should_timeout_request(struct request *rq)
@@ -1655,7 +1506,7 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
blk_rq_sectors(req));
if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
blk_mq_end_request_batch))
- end_cmd(cmd);
+ blk_mq_end_request(req, cmd->error);
nr++;
}
@@ -1711,7 +1562,6 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
- cmd->rq = rq;
cmd->error = BLK_STS_OK;
cmd->nq = nq;
cmd->fake_timeout = should_timeout_request(rq) ||
@@ -1770,34 +1620,8 @@ static void null_queue_rqs(struct request **rqlist)
*rqlist = requeue_list;
}
-static void cleanup_queue(struct nullb_queue *nq)
-{
- bitmap_free(nq->tag_map);
- kfree(nq->cmds);
-}
-
-static void cleanup_queues(struct nullb *nullb)
-{
- int i;
-
- for (i = 0; i < nullb->nr_queues; i++)
- cleanup_queue(&nullb->queues[i]);
-
- kfree(nullb->queues);
-}
-
-static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
-{
- struct nullb_queue *nq = hctx->driver_data;
- struct nullb *nullb = nq->dev->nullb;
-
- nullb->nr_queues--;
-}
-
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
{
- init_waitqueue_head(&nq->wait);
- nq->queue_depth = nullb->queue_depth;
nq->dev = nullb->dev;
INIT_LIST_HEAD(&nq->poll_list);
spin_lock_init(&nq->poll_lock);
@@ -1815,7 +1639,6 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
nq = &nullb->queues[hctx_idx];
hctx->driver_data = nq;
null_init_queue(nullb, nq);
- nullb->nr_queues++;
return 0;
}
@@ -1828,7 +1651,6 @@ static const struct blk_mq_ops null_mq_ops = {
.poll = null_poll,
.map_queues = null_map_queues,
.init_hctx = null_init_hctx,
- .exit_hctx = null_exit_hctx,
};
static void null_del_dev(struct nullb *nullb)
@@ -1849,21 +1671,20 @@ static void null_del_dev(struct nullb *nullb)
if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
hrtimer_cancel(&nullb->bw_timer);
atomic_long_set(&nullb->cur_bytes, LONG_MAX);
- null_restart_queue_async(nullb);
+ blk_mq_start_stopped_hw_queues(nullb->q, true);
}
put_disk(nullb->disk);
- if (dev->queue_mode == NULL_Q_MQ &&
- nullb->tag_set == &nullb->__tag_set)
+ if (nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
- cleanup_queues(nullb);
+ kfree(nullb->queues);
if (null_cache_active(nullb))
null_free_device_storage(nullb->dev, true);
kfree(nullb);
dev->nullb = NULL;
}
-static void null_config_discard(struct nullb *nullb)
+static void null_config_discard(struct nullb *nullb, struct queue_limits *lim)
{
if (nullb->dev->discard == false)
return;
@@ -1880,43 +1701,14 @@ static void null_config_discard(struct nullb *nullb)
return;
}
- blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
+ lim->max_hw_discard_sectors = UINT_MAX >> 9;
}
-static const struct block_device_operations null_bio_ops = {
- .owner = THIS_MODULE,
- .submit_bio = null_submit_bio,
- .report_zones = null_report_zones,
-};
-
-static const struct block_device_operations null_rq_ops = {
+static const struct block_device_operations null_ops = {
.owner = THIS_MODULE,
.report_zones = null_report_zones,
};
-static int setup_commands(struct nullb_queue *nq)
-{
- struct nullb_cmd *cmd;
- int i;
-
- nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
- if (!nq->cmds)
- return -ENOMEM;
-
- nq->tag_map = bitmap_zalloc(nq->queue_depth, GFP_KERNEL);
- if (!nq->tag_map) {
- kfree(nq->cmds);
- return -ENOMEM;
- }
-
- for (i = 0; i < nq->queue_depth; i++) {
- cmd = &nq->cmds[i];
- cmd->tag = -1U;
- }
-
- return 0;
-}
-
static int setup_queues(struct nullb *nullb)
{
int nqueues = nr_cpu_ids;
@@ -1929,101 +1721,66 @@ static int setup_queues(struct nullb *nullb)
if (!nullb->queues)
return -ENOMEM;
- nullb->queue_depth = nullb->dev->hw_queue_depth;
return 0;
}
-static int init_driver_queues(struct nullb *nullb)
+static int null_init_tag_set(struct blk_mq_tag_set *set, int poll_queues)
{
- struct nullb_queue *nq;
- int i, ret = 0;
-
- for (i = 0; i < nullb->dev->submit_queues; i++) {
- nq = &nullb->queues[i];
-
- null_init_queue(nullb, nq);
-
- ret = setup_commands(nq);
- if (ret)
- return ret;
- nullb->nr_queues++;
+ set->ops = &null_mq_ops;
+ set->cmd_size = sizeof(struct nullb_cmd);
+ set->timeout = 5 * HZ;
+ set->nr_maps = 1;
+ if (poll_queues) {
+ set->nr_hw_queues += poll_queues;
+ set->nr_maps += 2;
}
- return 0;
+ return blk_mq_alloc_tag_set(set);
}
-static int null_gendisk_register(struct nullb *nullb)
+static int null_init_global_tag_set(void)
{
- sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT;
- struct gendisk *disk = nullb->disk;
+ int error;
- set_capacity(disk, size);
-
- disk->major = null_major;
- disk->first_minor = nullb->index;
- disk->minors = 1;
- if (queue_is_mq(nullb->q))
- disk->fops = &null_rq_ops;
- else
- disk->fops = &null_bio_ops;
- disk->private_data = nullb;
- strscpy_pad(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+ if (tag_set.ops)
+ return 0;
- if (nullb->dev->zoned) {
- int ret = null_register_zoned_dev(nullb);
+ tag_set.nr_hw_queues = g_submit_queues;
+ tag_set.queue_depth = g_hw_queue_depth;
+ tag_set.numa_node = g_home_node;
+ tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ if (g_no_sched)
+ tag_set.flags |= BLK_MQ_F_NO_SCHED;
+ if (g_shared_tag_bitmap)
+ tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ if (g_blocking)
+ tag_set.flags |= BLK_MQ_F_BLOCKING;
- if (ret)
- return ret;
- }
-
- return add_disk(disk);
+ error = null_init_tag_set(&tag_set, g_poll_queues);
+ if (error)
+ tag_set.ops = NULL;
+ return error;
}
-static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
+static int null_setup_tagset(struct nullb *nullb)
{
- unsigned int flags = BLK_MQ_F_SHOULD_MERGE;
- int hw_queues, numa_node;
- unsigned int queue_depth;
- int poll_queues;
-
- if (nullb) {
- hw_queues = nullb->dev->submit_queues;
- poll_queues = nullb->dev->poll_queues;
- queue_depth = nullb->dev->hw_queue_depth;
- numa_node = nullb->dev->home_node;
- if (nullb->dev->no_sched)
- flags |= BLK_MQ_F_NO_SCHED;
- if (nullb->dev->shared_tag_bitmap)
- flags |= BLK_MQ_F_TAG_HCTX_SHARED;
- if (nullb->dev->blocking)
- flags |= BLK_MQ_F_BLOCKING;
- } else {
- hw_queues = g_submit_queues;
- poll_queues = g_poll_queues;
- queue_depth = g_hw_queue_depth;
- numa_node = g_home_node;
- if (g_no_sched)
- flags |= BLK_MQ_F_NO_SCHED;
- if (g_shared_tag_bitmap)
- flags |= BLK_MQ_F_TAG_HCTX_SHARED;
- if (g_blocking)
- flags |= BLK_MQ_F_BLOCKING;
- }
-
- set->ops = &null_mq_ops;
- set->cmd_size = sizeof(struct nullb_cmd);
- set->flags = flags;
- set->driver_data = nullb;
- set->nr_hw_queues = hw_queues;
- set->queue_depth = queue_depth;
- set->numa_node = numa_node;
- if (poll_queues) {
- set->nr_hw_queues += poll_queues;
- set->nr_maps = 3;
- } else {
- set->nr_maps = 1;
+ if (nullb->dev->shared_tags) {
+ nullb->tag_set = &tag_set;
+ return null_init_global_tag_set();
}
- return blk_mq_alloc_tag_set(set);
+ nullb->tag_set = &nullb->__tag_set;
+ nullb->tag_set->driver_data = nullb;
+ nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues;
+ nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth;
+ nullb->tag_set->numa_node = nullb->dev->home_node;
+ nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
+ if (nullb->dev->no_sched)
+ nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED;
+ if (nullb->dev->shared_tag_bitmap)
+ nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ if (nullb->dev->blocking)
+ nullb->tag_set->flags |= BLK_MQ_F_BLOCKING;
+ return null_init_tag_set(nullb->tag_set, nullb->dev->poll_queues);
}
static int null_validate_conf(struct nullb_device *dev)
@@ -2032,11 +1789,15 @@ static int null_validate_conf(struct nullb_device *dev)
pr_err("legacy IO path is no longer available\n");
return -EINVAL;
}
+ if (dev->queue_mode == NULL_Q_BIO) {
+ pr_err("BIO-based IO path is no longer available, using blk-mq instead.\n");
+ dev->queue_mode = NULL_Q_MQ;
+ }
dev->blocksize = round_down(dev->blocksize, 512);
dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
- if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
+ if (dev->use_per_node_hctx) {
if (dev->submit_queues != nr_online_nodes)
dev->submit_queues = nr_online_nodes;
} else if (dev->submit_queues > nr_cpu_ids)
@@ -2048,8 +1809,6 @@ static int null_validate_conf(struct nullb_device *dev)
if (dev->poll_queues > g_poll_queues)
dev->poll_queues = g_poll_queues;
dev->prev_poll_queues = dev->poll_queues;
-
- dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
/* Do memory allocation, so set blocking */
@@ -2060,9 +1819,6 @@ static int null_validate_conf(struct nullb_device *dev)
dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
dev->cache_size);
dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
- /* can not stop a queue */
- if (dev->queue_mode == NULL_Q_BIO)
- dev->mbps = 0;
if (dev->zoned &&
(!dev->zone_size || !is_power_of_2(dev->zone_size))) {
@@ -2102,6 +1858,12 @@ static bool null_setup_fault(void)
static int null_add_dev(struct nullb_device *dev)
{
+ struct queue_limits lim = {
+ .logical_block_size = dev->blocksize,
+ .physical_block_size = dev->blocksize,
+ .max_hw_sectors = dev->max_sectors,
+ };
+
struct nullb *nullb;
int rv;
@@ -2123,36 +1885,25 @@ static int null_add_dev(struct nullb_device *dev)
if (rv)
goto out_free_nullb;
- if (dev->queue_mode == NULL_Q_MQ) {
- if (shared_tags) {
- nullb->tag_set = &tag_set;
- rv = 0;
- } else {
- nullb->tag_set = &nullb->__tag_set;
- rv = null_init_tag_set(nullb, nullb->tag_set);
- }
+ rv = null_setup_tagset(nullb);
+ if (rv)
+ goto out_cleanup_queues;
+ if (dev->virt_boundary)
+ lim.virt_boundary_mask = PAGE_SIZE - 1;
+ null_config_discard(nullb, &lim);
+ if (dev->zoned) {
+ rv = null_init_zoned_dev(dev, &lim);
if (rv)
- goto out_cleanup_queues;
-
- nullb->tag_set->timeout = 5 * HZ;
- nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb);
- if (IS_ERR(nullb->disk)) {
- rv = PTR_ERR(nullb->disk);
goto out_cleanup_tags;
- }
- nullb->q = nullb->disk->queue;
- } else if (dev->queue_mode == NULL_Q_BIO) {
- rv = -ENOMEM;
- nullb->disk = blk_alloc_disk(nullb->dev->home_node);
- if (!nullb->disk)
- goto out_cleanup_queues;
+ }
- nullb->q = nullb->disk->queue;
- rv = init_driver_queues(nullb);
- if (rv)
- goto out_cleanup_disk;
+ nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
+ if (IS_ERR(nullb->disk)) {
+ rv = PTR_ERR(nullb->disk);
+ goto out_cleanup_zone;
}
+ nullb->q = nullb->disk->queue;
if (dev->mbps) {
set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
@@ -2164,12 +1915,6 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_write_cache(nullb->q, true, true);
}
- if (dev->zoned) {
- rv = null_init_zoned_dev(dev, nullb->q);
- if (rv)
- goto out_cleanup_disk;
- }
-
nullb->q->queuedata = nullb;
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
@@ -2177,22 +1922,12 @@ static int null_add_dev(struct nullb_device *dev)
rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
if (rv < 0) {
mutex_unlock(&lock);
- goto out_cleanup_zone;
+ goto out_cleanup_disk;
}
nullb->index = rv;
dev->index = rv;
mutex_unlock(&lock);
- blk_queue_logical_block_size(nullb->q, dev->blocksize);
- blk_queue_physical_block_size(nullb->q, dev->blocksize);
- if (dev->max_sectors)
- blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
-
- if (dev->virt_boundary)
- blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
-
- null_config_discard(nullb);
-
if (config_item_name(&dev->group.cg_item)) {
/* Use configfs dir name as the device name */
snprintf(nullb->disk_name, sizeof(nullb->disk_name),
@@ -2201,7 +1936,22 @@ static int null_add_dev(struct nullb_device *dev)
sprintf(nullb->disk_name, "nullb%d", nullb->index);
}
- rv = null_gendisk_register(nullb);
+ set_capacity(nullb->disk,
+ ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT);
+ nullb->disk->major = null_major;
+ nullb->disk->first_minor = nullb->index;
+ nullb->disk->minors = 1;
+ nullb->disk->fops = &null_ops;
+ nullb->disk->private_data = nullb;
+ strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+
+ if (nullb->dev->zoned) {
+ rv = null_register_zoned_dev(nullb);
+ if (rv)
+ goto out_ida_free;
+ }
+
+ rv = add_disk(nullb->disk);
if (rv)
goto out_ida_free;
@@ -2220,10 +1970,10 @@ out_cleanup_zone:
out_cleanup_disk:
put_disk(nullb->disk);
out_cleanup_tags:
- if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+ if (nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
out_cleanup_queues:
- cleanup_queues(nullb);
+ kfree(nullb->queues);
out_free_nullb:
kfree(nullb);
dev->nullb = NULL;
@@ -2299,7 +2049,7 @@ static int __init null_init(void)
return -EINVAL;
}
- if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
+ if (g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("submit_queues param is set to %u.\n",
nr_online_nodes);
@@ -2311,18 +2061,12 @@ static int __init null_init(void)
g_submit_queues = 1;
}
- if (g_queue_mode == NULL_Q_MQ && shared_tags) {
- ret = null_init_tag_set(NULL, &tag_set);
- if (ret)
- return ret;
- }
-
config_group_init(&nullb_subsys.su_group);
mutex_init(&nullb_subsys.su_mutex);
ret = configfs_register_subsystem(&nullb_subsys);
if (ret)
- goto err_tagset;
+ return ret;
mutex_init(&lock);
@@ -2349,9 +2093,6 @@ err_dev:
unregister_blkdev(null_major, "nullb");
err_conf:
configfs_unregister_subsystem(&nullb_subsys);
-err_tagset:
- if (g_queue_mode == NULL_Q_MQ && shared_tags)
- blk_mq_free_tag_set(&tag_set);
return ret;
}
@@ -2370,7 +2111,7 @@ static void __exit null_exit(void)
}
mutex_unlock(&lock);
- if (g_queue_mode == NULL_Q_MQ && shared_tags)
+ if (tag_set.ops)
blk_mq_free_tag_set(&tag_set);
}
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 929f659dd255..477b97746823 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -16,11 +16,6 @@
#include <linux/mutex.h>
struct nullb_cmd {
- union {
- struct request *rq;
- struct bio *bio;
- };
- unsigned int tag;
blk_status_t error;
bool fake_timeout;
struct nullb_queue *nq;
@@ -28,16 +23,11 @@ struct nullb_cmd {
};
struct nullb_queue {
- unsigned long *tag_map;
- wait_queue_head_t wait;
- unsigned int queue_depth;
struct nullb_device *dev;
unsigned int requeue_selection;
struct list_head poll_list;
spinlock_t poll_lock;
-
- struct nullb_cmd *cmds;
};
struct nullb_zone {
@@ -60,13 +50,6 @@ struct nullb_zone {
unsigned int capacity;
};
-/* Queue modes */
-enum {
- NULL_Q_BIO = 0,
- NULL_Q_RQ = 1,
- NULL_Q_MQ = 2,
-};
-
struct nullb_device {
struct nullb *nullb;
struct config_group group;
@@ -119,6 +102,7 @@ struct nullb_device {
bool zoned; /* if device is zoned */
bool virt_boundary; /* virtual boundary on/off for the device */
bool no_sched; /* no IO scheduler for the device */
+ bool shared_tags; /* share tag set between devices for blk-mq */
bool shared_tag_bitmap; /* use hostwide shared tags */
};
@@ -130,14 +114,12 @@ struct nullb {
struct gendisk *disk;
struct blk_mq_tag_set *tag_set;
struct blk_mq_tag_set __tag_set;
- unsigned int queue_depth;
atomic_long_t cur_bytes;
struct hrtimer bw_timer;
unsigned long cache_flush_pos;
spinlock_t lock;
struct nullb_queue *queues;
- unsigned int nr_queues;
char disk_name[DISK_NAME_LEN];
};
@@ -147,7 +129,7 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, unsigned int nr_sectors);
#ifdef CONFIG_BLK_DEV_ZONED
-int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
+int null_init_zoned_dev(struct nullb_device *dev, struct queue_limits *lim);
int null_register_zoned_dev(struct nullb *nullb);
void null_free_zoned_dev(struct nullb_device *dev);
int null_report_zones(struct gendisk *disk, sector_t sector,
@@ -160,7 +142,7 @@ ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
size_t count, enum blk_zone_cond cond);
#else
static inline int null_init_zoned_dev(struct nullb_device *dev,
- struct request_queue *q)
+ struct queue_limits *lim)
{
pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
return -EINVAL;
diff --git a/drivers/block/null_blk/trace.h b/drivers/block/null_blk/trace.h
index 6b2b370e786f..ef2d05d5f0df 100644
--- a/drivers/block/null_blk/trace.h
+++ b/drivers/block/null_blk/trace.h
@@ -41,10 +41,11 @@ TRACE_EVENT(nullb_zone_op,
__field(unsigned int, zone_cond)
),
TP_fast_assign(
- __entry->op = req_op(cmd->rq);
+ __entry->op = req_op(blk_mq_rq_from_pdu(cmd));
__entry->zone_no = zone_no;
__entry->zone_cond = zone_cond;
- __assign_disk_name(__entry->disk, cmd->rq->q->disk);
+ __assign_disk_name(__entry->disk,
+ blk_mq_rq_from_pdu(cmd)->q->disk);
),
TP_printk("%s req=%-15s zone_no=%u zone_cond=%-10s",
__print_disk_name(__entry->disk),
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 6f5e0994862e..1689e2584104 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -58,7 +58,8 @@ static inline void null_unlock_zone(struct nullb_device *dev,
mutex_unlock(&zone->mutex);
}
-int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
+int null_init_zoned_dev(struct nullb_device *dev,
+ struct queue_limits *lim)
{
sector_t dev_capacity_sects, zone_capacity_sects;
struct nullb_zone *zone;
@@ -151,27 +152,22 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
sector += dev->zone_size_sects;
}
+ lim->zoned = true;
+ lim->chunk_sectors = dev->zone_size_sects;
+ lim->max_zone_append_sectors = dev->zone_size_sects;
+ lim->max_open_zones = dev->zone_max_open;
+ lim->max_active_zones = dev->zone_max_active;
return 0;
}
int null_register_zoned_dev(struct nullb *nullb)
{
- struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
- disk_set_zoned(nullb->disk);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
- blk_queue_chunk_sectors(q, dev->zone_size_sects);
nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
- blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
- disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
- disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
-
- if (queue_is_mq(q))
- return blk_revalidate_disk_zones(nullb->disk, NULL);
-
- return 0;
+ return blk_revalidate_disk_zones(nullb->disk, NULL);
}
void null_free_zoned_dev(struct nullb_device *dev)
@@ -394,10 +390,7 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
*/
if (append) {
sector = zone->wp;
- if (dev->queue_mode == NULL_Q_MQ)
- cmd->rq->__sector = sector;
- else
- cmd->bio->bi_iter.bi_sector = sector;
+ blk_mq_rq_from_pdu(cmd)->__sector = sector;
} else if (sector != zone->wp) {
ret = BLK_STS_IOERR;
goto unlock;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index d56d972aadb3..21728e9ea5c3 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -340,8 +340,8 @@ static ssize_t device_map_show(const struct class *c, const struct class_attribu
n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n",
pd->disk->disk_name,
MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
- MAJOR(pd->bdev_handle->bdev->bd_dev),
- MINOR(pd->bdev_handle->bdev->bd_dev));
+ MAJOR(file_bdev(pd->bdev_file)->bd_dev),
+ MINOR(file_bdev(pd->bdev_file)->bd_dev));
}
mutex_unlock(&ctl_mutex);
return n;
@@ -438,7 +438,7 @@ static int pkt_seq_show(struct seq_file *m, void *p)
int states[PACKET_NUM_STATES];
seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name,
- pd->bdev_handle->bdev);
+ file_bdev(pd->bdev_file));
seq_printf(m, "\nSettings:\n");
seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
@@ -715,7 +715,7 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
*/
static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
{
- struct request_queue *q = bdev_get_queue(pd->bdev_handle->bdev);
+ struct request_queue *q = bdev_get_queue(file_bdev(pd->bdev_file));
struct scsi_cmnd *scmd;
struct request *rq;
int ret = 0;
@@ -828,6 +828,12 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
*/
static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
{
+ /*
+ * Some CDRW drives can not handle writes larger than one packet,
+ * even if the size is a multiple of the packet size.
+ */
+ bio->bi_opf |= REQ_NOMERGE;
+
spin_lock(&pd->iosched.lock);
if (bio_data_dir(bio) == READ)
bio_list_add(&pd->iosched.read_queue, bio);
@@ -1048,7 +1054,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
continue;
bio = pkt->r_bios[f];
- bio_init(bio, pd->bdev_handle->bdev, bio->bi_inline_vecs, 1,
+ bio_init(bio, file_bdev(pd->bdev_file), bio->bi_inline_vecs, 1,
REQ_OP_READ);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_end_io = pkt_end_io_read;
@@ -1264,7 +1270,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
struct device *ddev = disk_to_dev(pd->disk);
int f;
- bio_init(pkt->w_bio, pd->bdev_handle->bdev, pkt->w_bio->bi_inline_vecs,
+ bio_init(pkt->w_bio, file_bdev(pd->bdev_file), pkt->w_bio->bi_inline_vecs,
pkt->frames, REQ_OP_WRITE);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
@@ -2162,20 +2168,20 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
int ret;
long lba;
struct request_queue *q;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
/*
* We need to re-open the cdrom device without O_NONBLOCK to be able
* to read/write from/to it. It is already opened in O_NONBLOCK mode
* so open should not fail.
*/
- bdev_handle = bdev_open_by_dev(pd->bdev_handle->bdev->bd_dev,
+ bdev_file = bdev_file_open_by_dev(file_bdev(pd->bdev_file)->bd_dev,
BLK_OPEN_READ, pd, NULL);
- if (IS_ERR(bdev_handle)) {
- ret = PTR_ERR(bdev_handle);
+ if (IS_ERR(bdev_file)) {
+ ret = PTR_ERR(bdev_file);
goto out;
}
- pd->open_bdev_handle = bdev_handle;
+ pd->f_open_bdev = bdev_file;
ret = pkt_get_last_written(pd, &lba);
if (ret) {
@@ -2184,18 +2190,13 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
}
set_capacity(pd->disk, lba << 2);
- set_capacity_and_notify(pd->bdev_handle->bdev->bd_disk, lba << 2);
+ set_capacity_and_notify(file_bdev(pd->bdev_file)->bd_disk, lba << 2);
- q = bdev_get_queue(pd->bdev_handle->bdev);
+ q = bdev_get_queue(file_bdev(pd->bdev_file));
if (write) {
ret = pkt_open_write(pd);
if (ret)
goto out_putdev;
- /*
- * Some CDRW drives can not handle writes larger than one packet,
- * even if the size is a multiple of the packet size.
- */
- blk_queue_max_hw_sectors(q, pd->settings.size);
set_bit(PACKET_WRITABLE, &pd->flags);
} else {
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
@@ -2218,7 +2219,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
return 0;
out_putdev:
- bdev_release(bdev_handle);
+ fput(bdev_file);
out:
return ret;
}
@@ -2237,8 +2238,8 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
pkt_lock_door(pd, 0);
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- bdev_release(pd->open_bdev_handle);
- pd->open_bdev_handle = NULL;
+ fput(pd->f_open_bdev);
+ pd->f_open_bdev = NULL;
pkt_shrink_pktlist(pd);
}
@@ -2326,7 +2327,7 @@ static void pkt_end_io_read_cloned(struct bio *bio)
static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{
- struct bio *cloned_bio = bio_alloc_clone(pd->bdev_handle->bdev, bio,
+ struct bio *cloned_bio = bio_alloc_clone(file_bdev(pd->bdev_file), bio,
GFP_NOIO, &pkt_bio_set);
struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
@@ -2338,9 +2339,9 @@ static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
pkt_queue_bio(pd, cloned_bio);
}
-static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
+static void pkt_make_request_write(struct bio *bio)
{
- struct pktcdvd_device *pd = q->queuedata;
+ struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
sector_t zone;
struct packet_data *pkt;
int was_empty, blocked_bio;
@@ -2432,7 +2433,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
static void pkt_submit_bio(struct bio *bio)
{
- struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
+ struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
struct device *ddev = disk_to_dev(pd->disk);
struct bio *split;
@@ -2476,7 +2477,7 @@ static void pkt_submit_bio(struct bio *bio)
split = bio;
}
- pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
+ pkt_make_request_write(split);
} while (split != bio);
return;
@@ -2484,20 +2485,11 @@ end_io:
bio_io_error(bio);
}
-static void pkt_init_queue(struct pktcdvd_device *pd)
-{
- struct request_queue *q = pd->disk->queue;
-
- blk_queue_logical_block_size(q, CD_FRAMESIZE);
- blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
- q->queuedata = pd;
-}
-
static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
{
struct device *ddev = disk_to_dev(pd->disk);
int i;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct scsi_device *sdev;
if (pd->pkt_dev == dev) {
@@ -2508,9 +2500,9 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
struct pktcdvd_device *pd2 = pkt_devs[i];
if (!pd2)
continue;
- if (pd2->bdev_handle->bdev->bd_dev == dev) {
+ if (file_bdev(pd2->bdev_file)->bd_dev == dev) {
dev_err(ddev, "%pg already setup\n",
- pd2->bdev_handle->bdev);
+ file_bdev(pd2->bdev_file));
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
@@ -2519,13 +2511,13 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
}
- bdev_handle = bdev_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY,
+ bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY,
NULL, NULL);
- if (IS_ERR(bdev_handle))
- return PTR_ERR(bdev_handle);
- sdev = scsi_device_from_queue(bdev_handle->bdev->bd_disk->queue);
+ if (IS_ERR(bdev_file))
+ return PTR_ERR(bdev_file);
+ sdev = scsi_device_from_queue(file_bdev(bdev_file)->bd_disk->queue);
if (!sdev) {
- bdev_release(bdev_handle);
+ fput(bdev_file);
return -EINVAL;
}
put_device(&sdev->sdev_gendev);
@@ -2533,10 +2525,8 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
- pd->bdev_handle = bdev_handle;
- set_blocksize(bdev_handle->bdev, CD_FRAMESIZE);
-
- pkt_init_queue(pd);
+ pd->bdev_file = bdev_file;
+ set_blocksize(file_bdev(bdev_file), CD_FRAMESIZE);
atomic_set(&pd->cdrw.pending_bios, 0);
pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name);
@@ -2546,11 +2536,11 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd);
- dev_notice(ddev, "writer mapped to %pg\n", bdev_handle->bdev);
+ dev_notice(ddev, "writer mapped to %pg\n", file_bdev(bdev_file));
return 0;
out_mem:
- bdev_release(bdev_handle);
+ fput(bdev_file);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return -ENOMEM;
@@ -2605,9 +2595,9 @@ static unsigned int pkt_check_events(struct gendisk *disk,
if (!pd)
return 0;
- if (!pd->bdev_handle)
+ if (!pd->bdev_file)
return 0;
- attached_disk = pd->bdev_handle->bdev->bd_disk;
+ attached_disk = file_bdev(pd->bdev_file)->bd_disk;
if (!attached_disk || !attached_disk->fops->check_events)
return 0;
return attached_disk->fops->check_events(attached_disk, clearing);
@@ -2634,6 +2624,10 @@ static const struct block_device_operations pktcdvd_ops = {
*/
static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
{
+ struct queue_limits lim = {
+ .max_hw_sectors = PACKET_MAX_SECTORS,
+ .logical_block_size = CD_FRAMESIZE,
+ };
int idx;
int ret = -ENOMEM;
struct pktcdvd_device *pd;
@@ -2673,10 +2667,11 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->write_congestion_on = write_congestion_on;
pd->write_congestion_off = write_congestion_off;
- ret = -ENOMEM;
- disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!disk)
+ disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(disk)) {
+ ret = PTR_ERR(disk);
goto out_mem;
+ }
pd->disk = disk;
disk->major = pktdev_major;
disk->first_minor = idx;
@@ -2692,7 +2687,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
goto out_mem2;
/* inherit events of the host device */
- disk->events = pd->bdev_handle->bdev->bd_disk->events;
+ disk->events = file_bdev(pd->bdev_file)->bd_disk->events;
ret = add_disk(disk);
if (ret)
@@ -2757,7 +2752,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_debugfs_dev_remove(pd);
pkt_sysfs_dev_remove(pd);
- bdev_release(pd->bdev_handle);
+ fput(pd->bdev_file);
remove_proc_entry(pd->disk->disk_name, pkt_proc);
dev_notice(ddev, "writer unmapped\n");
@@ -2784,7 +2779,7 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
if (pd) {
- ctrl_cmd->dev = new_encode_dev(pd->bdev_handle->bdev->bd_dev);
+ ctrl_cmd->dev = new_encode_dev(file_bdev(pd->bdev_file)->bd_dev);
ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
} else {
ctrl_cmd->dev = 0;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 36d7b36c60c7..b810ac0a5c4b 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -382,6 +382,14 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
struct ps3disk_private *priv;
int error;
unsigned int devidx;
+ struct queue_limits lim = {
+ .logical_block_size = dev->blk_size,
+ .max_hw_sectors = dev->bounce_size >> 9,
+ .max_segments = -1,
+ .max_segment_size = dev->bounce_size,
+ .dma_alignment = dev->blk_size - 1,
+ };
+
struct request_queue *queue;
struct gendisk *gendisk;
@@ -431,7 +439,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
if (error)
goto fail_teardown;
- gendisk = blk_mq_alloc_disk(&priv->tag_set, dev);
+ gendisk = blk_mq_alloc_disk(&priv->tag_set, &lim, dev);
if (IS_ERR(gendisk)) {
dev_err(&dev->sbd.core, "%s:%u: blk_mq_alloc_disk failed\n",
__func__, __LINE__);
@@ -441,15 +449,8 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
queue = gendisk->queue;
- blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
- blk_queue_dma_alignment(queue, dev->blk_size-1);
- blk_queue_logical_block_size(queue, dev->blk_size);
-
blk_queue_write_cache(queue, true, false);
- blk_queue_max_segments(queue, -1);
- blk_queue_max_segment_size(queue, dev->bounce_size);
-
priv->gendisk = gendisk;
gendisk->major = ps3disk_major;
gendisk->first_minor = devidx * PS3DISK_MINORS;
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 38d42af01b25..bdcf083b45e2 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -730,10 +730,10 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
ps3vram_proc_init(dev);
- gendisk = blk_alloc_disk(NUMA_NO_NODE);
- if (!gendisk) {
+ gendisk = blk_alloc_disk(NULL, NUMA_NO_NODE);
+ if (IS_ERR(gendisk)) {
dev_err(&dev->core, "blk_alloc_disk failed\n");
- error = -ENOMEM;
+ error = PTR_ERR(gendisk);
goto out_cache_cleanup;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 12b5d53ec856..26ff5cd2bf0a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -575,7 +575,7 @@ static const struct attribute_group rbd_bus_group = {
};
__ATTRIBUTE_GROUPS(rbd_bus);
-static struct bus_type rbd_bus_type = {
+static const struct bus_type rbd_bus_type = {
.name = "rbd",
.bus_groups = rbd_bus_groups,
};
@@ -4952,6 +4952,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
struct request_queue *q;
unsigned int objset_bytes =
rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
+ struct queue_limits lim = {
+ .max_hw_sectors = objset_bytes >> SECTOR_SHIFT,
+ .max_user_sectors = objset_bytes >> SECTOR_SHIFT,
+ .io_min = rbd_dev->opts->alloc_size,
+ .io_opt = rbd_dev->opts->alloc_size,
+ .max_segments = USHRT_MAX,
+ .max_segment_size = UINT_MAX,
+ };
int err;
memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
@@ -4966,7 +4974,13 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
if (err)
return err;
- disk = blk_mq_alloc_disk(&rbd_dev->tag_set, rbd_dev);
+ if (rbd_dev->opts->trim) {
+ lim.discard_granularity = rbd_dev->opts->alloc_size;
+ lim.max_hw_discard_sectors = objset_bytes >> SECTOR_SHIFT;
+ lim.max_write_zeroes_sectors = objset_bytes >> SECTOR_SHIFT;
+ }
+
+ disk = blk_mq_alloc_disk(&rbd_dev->tag_set, &lim, rbd_dev);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_tag_set;
@@ -4987,19 +5001,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
- blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
- q->limits.max_sectors = queue_max_hw_sectors(q);
- blk_queue_max_segments(q, USHRT_MAX);
- blk_queue_max_segment_size(q, UINT_MAX);
- blk_queue_io_min(q, rbd_dev->opts->alloc_size);
- blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
-
- if (rbd_dev->opts->trim) {
- q->limits.discard_granularity = rbd_dev->opts->alloc_size;
- blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
- blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
- }
-
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 4044c369d22a..b7ffe03c6160 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1329,43 +1329,6 @@ static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
}
}
-static void setup_request_queue(struct rnbd_clt_dev *dev,
- struct rnbd_msg_open_rsp *rsp)
-{
- blk_queue_logical_block_size(dev->queue,
- le16_to_cpu(rsp->logical_block_size));
- blk_queue_physical_block_size(dev->queue,
- le16_to_cpu(rsp->physical_block_size));
- blk_queue_max_hw_sectors(dev->queue,
- dev->sess->max_io_size / SECTOR_SIZE);
-
- /*
- * we don't support discards to "discontiguous" segments
- * in on request
- */
- blk_queue_max_discard_segments(dev->queue, 1);
-
- blk_queue_max_discard_sectors(dev->queue,
- le32_to_cpu(rsp->max_discard_sectors));
- dev->queue->limits.discard_granularity =
- le32_to_cpu(rsp->discard_granularity);
- dev->queue->limits.discard_alignment =
- le32_to_cpu(rsp->discard_alignment);
- if (le16_to_cpu(rsp->secure_discard))
- blk_queue_max_secure_erase_sectors(dev->queue,
- le32_to_cpu(rsp->max_discard_sectors));
- blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
- blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
- blk_queue_max_segments(dev->queue, dev->sess->max_segments);
- blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
- blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
- blk_queue_write_cache(dev->queue,
- !!(rsp->cache_policy & RNBD_WRITEBACK),
- !!(rsp->cache_policy & RNBD_FUA));
- blk_queue_max_write_zeroes_sectors(dev->queue,
- le32_to_cpu(rsp->max_write_zeroes_sectors));
-}
-
static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
struct rnbd_msg_open_rsp *rsp, int idx)
{
@@ -1403,18 +1366,41 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
static int rnbd_client_setup_device(struct rnbd_clt_dev *dev,
struct rnbd_msg_open_rsp *rsp)
{
+ struct queue_limits lim = {
+ .logical_block_size = le16_to_cpu(rsp->logical_block_size),
+ .physical_block_size = le16_to_cpu(rsp->physical_block_size),
+ .io_opt = dev->sess->max_io_size,
+ .max_hw_sectors = dev->sess->max_io_size / SECTOR_SIZE,
+ .max_hw_discard_sectors = le32_to_cpu(rsp->max_discard_sectors),
+ .discard_granularity = le32_to_cpu(rsp->discard_granularity),
+ .discard_alignment = le32_to_cpu(rsp->discard_alignment),
+ .max_segments = dev->sess->max_segments,
+ .virt_boundary_mask = SZ_4K - 1,
+ .max_write_zeroes_sectors =
+ le32_to_cpu(rsp->max_write_zeroes_sectors),
+ };
int idx = dev->clt_device_id;
dev->size = le64_to_cpu(rsp->nsectors) *
le16_to_cpu(rsp->logical_block_size);
- dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, dev);
+ if (rsp->secure_discard) {
+ lim.max_secure_erase_sectors =
+ le32_to_cpu(rsp->max_discard_sectors);
+ }
+
+ dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, &lim, dev);
if (IS_ERR(dev->gd))
return PTR_ERR(dev->gd);
dev->queue = dev->gd->queue;
rnbd_init_mq_hw_queues(dev);
- setup_request_queue(dev, rsp);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
+ blk_queue_write_cache(dev->queue,
+ !!(rsp->cache_policy & RNBD_WRITEBACK),
+ !!(rsp->cache_policy & RNBD_FUA));
+
return rnbd_clt_setup_gen_disk(dev, rsp, idx);
}
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 3a0d5dcec6f2..f6e3a3c4b76c 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -145,7 +145,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
priv->sess_dev = sess_dev;
priv->id = id;
- bio = bio_alloc(sess_dev->bdev_handle->bdev, 1,
+ bio = bio_alloc(file_bdev(sess_dev->bdev_file), 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
if (bio_add_page(bio, virt_to_page(data), datalen,
offset_in_page(data)) != datalen) {
@@ -219,7 +219,7 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
rnbd_put_sess_dev(sess_dev);
wait_for_completion(&dc); /* wait for inflights to drop to zero */
- bdev_release(sess_dev->bdev_handle);
+ fput(sess_dev->bdev_file);
mutex_lock(&sess_dev->dev->lock);
list_del(&sess_dev->dev_list);
if (!sess_dev->readonly)
@@ -534,7 +534,7 @@ rnbd_srv_get_or_create_srv_dev(struct block_device *bdev,
static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
struct rnbd_srv_sess_dev *sess_dev)
{
- struct block_device *bdev = sess_dev->bdev_handle->bdev;
+ struct block_device *bdev = file_bdev(sess_dev->bdev_file);
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
rsp->device_id = cpu_to_le32(sess_dev->device_id);
@@ -560,7 +560,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
static struct rnbd_srv_sess_dev *
rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
const struct rnbd_msg_open *open_msg,
- struct bdev_handle *handle, bool readonly,
+ struct file *bdev_file, bool readonly,
struct rnbd_srv_dev *srv_dev)
{
struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess);
@@ -572,7 +572,7 @@ rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
strscpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname));
- sdev->bdev_handle = handle;
+ sdev->bdev_file = bdev_file;
sdev->sess = srv_sess;
sdev->dev = srv_dev;
sdev->readonly = readonly;
@@ -678,7 +678,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
struct rnbd_srv_dev *srv_dev;
struct rnbd_srv_sess_dev *srv_sess_dev;
const struct rnbd_msg_open *open_msg = msg;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
blk_mode_t open_flags = BLK_OPEN_READ;
char *full_path;
struct rnbd_msg_open_rsp *rsp = data;
@@ -716,15 +716,15 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto reject;
}
- bdev_handle = bdev_open_by_path(full_path, open_flags, NULL, NULL);
- if (IS_ERR(bdev_handle)) {
- ret = PTR_ERR(bdev_handle);
+ bdev_file = bdev_file_open_by_path(full_path, open_flags, NULL, NULL);
+ if (IS_ERR(bdev_file)) {
+ ret = PTR_ERR(bdev_file);
pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %pe\n",
- full_path, srv_sess->sessname, bdev_handle);
+ full_path, srv_sess->sessname, bdev_file);
goto free_path;
}
- srv_dev = rnbd_srv_get_or_create_srv_dev(bdev_handle->bdev, srv_sess,
+ srv_dev = rnbd_srv_get_or_create_srv_dev(file_bdev(bdev_file), srv_sess,
open_msg->access_mode);
if (IS_ERR(srv_dev)) {
pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %pe\n",
@@ -734,7 +734,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
}
srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg,
- bdev_handle,
+ bdev_file,
open_msg->access_mode == RNBD_ACCESS_RO,
srv_dev);
if (IS_ERR(srv_sess_dev)) {
@@ -750,7 +750,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
*/
mutex_lock(&srv_dev->lock);
if (!srv_dev->dev_kobj.state_in_sysfs) {
- ret = rnbd_srv_create_dev_sysfs(srv_dev, bdev_handle->bdev);
+ ret = rnbd_srv_create_dev_sysfs(srv_dev, file_bdev(bdev_file));
if (ret) {
mutex_unlock(&srv_dev->lock);
rnbd_srv_err(srv_sess_dev,
@@ -793,7 +793,7 @@ srv_dev_put:
}
rnbd_put_srv_dev(srv_dev);
blkdev_put:
- bdev_release(bdev_handle);
+ fput(bdev_file);
free_path:
kfree(full_path);
reject:
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index 343cc682b617..18d873808b8d 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -46,7 +46,7 @@ struct rnbd_srv_dev {
struct rnbd_srv_sess_dev {
/* Entry inside rnbd_srv_dev struct */
struct list_head dev_list;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct rnbd_srv_session *sess;
struct rnbd_srv_dev *dev;
struct kobject kobj;
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 7bf4b48e2282..c99dd6698977 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -784,6 +784,14 @@ static const struct blk_mq_ops vdc_mq_ops = {
static int probe_disk(struct vdc_port *port)
{
+ struct queue_limits lim = {
+ .physical_block_size = port->vdisk_phys_blksz,
+ .max_hw_sectors = port->max_xfer_size,
+ /* Each segment in a request is up to an aligned page in size. */
+ .seg_boundary_mask = PAGE_SIZE - 1,
+ .max_segment_size = PAGE_SIZE,
+ .max_segments = port->ring_cookies,
+ };
struct request_queue *q;
struct gendisk *g;
int err;
@@ -824,7 +832,7 @@ static int probe_disk(struct vdc_port *port)
if (err)
return err;
- g = blk_mq_alloc_disk(&port->tag_set, port);
+ g = blk_mq_alloc_disk(&port->tag_set, &lim, port);
if (IS_ERR(g)) {
printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
port->vio.name);
@@ -835,12 +843,6 @@ static int probe_disk(struct vdc_port *port)
port->disk = g;
q = g->queue;
- /* Each segment in a request is up to an aligned page in size. */
- blk_queue_segment_boundary(q, PAGE_SIZE - 1);
- blk_queue_max_segment_size(q, PAGE_SIZE);
-
- blk_queue_max_segments(q, port->ring_cookies);
- blk_queue_max_hw_sectors(q, port->max_xfer_size);
g->major = vdc_major;
g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
g->minors = 1 << PARTITION_SHIFT;
@@ -872,8 +874,6 @@ static int probe_disk(struct vdc_port *port)
}
}
- blk_queue_physical_block_size(q, port->vdisk_phys_blksz);
-
pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
g->disk_name,
port->vdisk_size, (port->vdisk_size >> (20 - 9)),
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index f85b6af414b4..6731678f3a41 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -820,7 +820,7 @@ static int swim_floppy_init(struct swim_priv *swd)
goto exit_put_disks;
swd->unit[drive].disk =
- blk_mq_alloc_disk(&swd->unit[drive].tag_set,
+ blk_mq_alloc_disk(&swd->unit[drive].tag_set, NULL,
&swd->unit[drive]);
if (IS_ERR(swd->unit[drive].disk)) {
blk_mq_free_tag_set(&swd->unit[drive].tag_set);
@@ -916,7 +916,7 @@ out:
return ret;
}
-static int swim_remove(struct platform_device *dev)
+static void swim_remove(struct platform_device *dev)
{
struct swim_priv *swd = platform_get_drvdata(dev);
int drive;
@@ -937,13 +937,11 @@ static int swim_remove(struct platform_device *dev)
release_mem_region(res->start, resource_size(res));
kfree(swd);
-
- return 0;
}
static struct platform_driver swim_driver = {
.probe = swim_probe,
- .remove = swim_remove,
+ .remove_new = swim_remove,
.driver = {
.name = CARDNAME,
},
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index c2bc85826358..a04756ac778e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1210,7 +1210,7 @@ static int swim3_attach(struct macio_dev *mdev,
if (rc)
goto out_unregister;
- disk = blk_mq_alloc_disk(&fs->tag_set, fs);
+ disk = blk_mq_alloc_disk(&fs->tag_set, NULL, fs);
if (IS_ERR(disk)) {
rc = PTR_ERR(disk);
goto out_free_tag_set;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 1dfb2e77898b..bea3d5cf8a83 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -246,21 +246,12 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
return 0;
}
-static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
+static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
{
- const struct ublk_param_zoned *p = &ub->params.zoned;
-
- disk_set_zoned(ub->ub_disk);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
blk_queue_required_elevator_features(ub->ub_disk->queue,
ELEVATOR_F_ZBD_SEQ_WRITE);
- disk_set_max_active_zones(ub->ub_disk, p->max_active_zones);
- disk_set_max_open_zones(ub->ub_disk, p->max_open_zones);
- blk_queue_max_zone_append_sectors(ub->ub_disk->queue, p->max_zone_append_sectors);
-
ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
-
- return 0;
}
/* Based on virtblk_alloc_report_buffer */
@@ -432,9 +423,8 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
return -EOPNOTSUPP;
}
-static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
+static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
{
- return -EOPNOTSUPP;
}
static int ublk_revalidate_disk_zones(struct ublk_device *ub)
@@ -498,11 +488,6 @@ static void ublk_dev_param_basic_apply(struct ublk_device *ub)
struct request_queue *q = ub->ub_disk->queue;
const struct ublk_param_basic *p = &ub->params.basic;
- blk_queue_logical_block_size(q, 1 << p->logical_bs_shift);
- blk_queue_physical_block_size(q, 1 << p->physical_bs_shift);
- blk_queue_io_min(q, 1 << p->io_min_shift);
- blk_queue_io_opt(q, 1 << p->io_opt_shift);
-
blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
p->attrs & UBLK_ATTR_FUA);
if (p->attrs & UBLK_ATTR_ROTATIONAL)
@@ -510,29 +495,12 @@ static void ublk_dev_param_basic_apply(struct ublk_device *ub)
else
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- blk_queue_max_hw_sectors(q, p->max_sectors);
- blk_queue_chunk_sectors(q, p->chunk_sectors);
- blk_queue_virt_boundary(q, p->virt_boundary_mask);
-
if (p->attrs & UBLK_ATTR_READ_ONLY)
set_disk_ro(ub->ub_disk, true);
set_capacity(ub->ub_disk, p->dev_sectors);
}
-static void ublk_dev_param_discard_apply(struct ublk_device *ub)
-{
- struct request_queue *q = ub->ub_disk->queue;
- const struct ublk_param_discard *p = &ub->params.discard;
-
- q->limits.discard_alignment = p->discard_alignment;
- q->limits.discard_granularity = p->discard_granularity;
- blk_queue_max_discard_sectors(q, p->max_discard_sectors);
- blk_queue_max_write_zeroes_sectors(q,
- p->max_write_zeroes_sectors);
- blk_queue_max_discard_segments(q, p->max_discard_segments);
-}
-
static int ublk_validate_params(const struct ublk_device *ub)
{
/* basic param is the only one which must be set */
@@ -576,20 +544,12 @@ static int ublk_validate_params(const struct ublk_device *ub)
return 0;
}
-static int ublk_apply_params(struct ublk_device *ub)
+static void ublk_apply_params(struct ublk_device *ub)
{
- if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
- return -EINVAL;
-
ublk_dev_param_basic_apply(ub);
- if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
- ublk_dev_param_discard_apply(ub);
-
if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
- return ublk_dev_param_zoned_apply(ub);
-
- return 0;
+ ublk_dev_param_zoned_apply(ub);
}
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
@@ -645,14 +605,16 @@ static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_NEED_GET_DATA;
}
-static struct ublk_device *ublk_get_device(struct ublk_device *ub)
+/* Called in slow path only, keep it noinline for trace purpose */
+static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub)
{
if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
return ub;
return NULL;
}
-static void ublk_put_device(struct ublk_device *ub)
+/* Called in slow path only, keep it noinline for trace purpose */
+static noinline void ublk_put_device(struct ublk_device *ub)
{
put_device(&ub->cdev_dev);
}
@@ -711,7 +673,7 @@ static void ublk_free_disk(struct gendisk *disk)
struct ublk_device *ub = disk->private_data;
clear_bit(UB_STATE_USED, &ub->state);
- put_device(&ub->cdev_dev);
+ ublk_put_device(ub);
}
static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
@@ -2182,7 +2144,7 @@ static void ublk_remove(struct ublk_device *ub)
cancel_work_sync(&ub->stop_work);
cancel_work_sync(&ub->quiesce_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
- put_device(&ub->cdev_dev);
+ ublk_put_device(ub);
ublks_added--;
}
@@ -2205,12 +2167,47 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
+ const struct ublk_param_basic *p = &ub->params.basic;
int ublksrv_pid = (int)header->data[0];
+ struct queue_limits lim = {
+ .logical_block_size = 1 << p->logical_bs_shift,
+ .physical_block_size = 1 << p->physical_bs_shift,
+ .io_min = 1 << p->io_min_shift,
+ .io_opt = 1 << p->io_opt_shift,
+ .max_hw_sectors = p->max_sectors,
+ .chunk_sectors = p->chunk_sectors,
+ .virt_boundary_mask = p->virt_boundary_mask,
+
+ };
struct gendisk *disk;
int ret = -EINVAL;
if (ublksrv_pid <= 0)
return -EINVAL;
+ if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
+ return -EINVAL;
+
+ if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
+ const struct ublk_param_discard *pd = &ub->params.discard;
+
+ lim.discard_alignment = pd->discard_alignment;
+ lim.discard_granularity = pd->discard_granularity;
+ lim.max_hw_discard_sectors = pd->max_discard_sectors;
+ lim.max_write_zeroes_sectors = pd->max_write_zeroes_sectors;
+ lim.max_discard_segments = pd->max_discard_segments;
+ }
+
+ if (ub->params.types & UBLK_PARAM_TYPE_ZONED) {
+ const struct ublk_param_zoned *p = &ub->params.zoned;
+
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ return -EOPNOTSUPP;
+
+ lim.zoned = true;
+ lim.max_active_zones = p->max_active_zones;
+ lim.max_open_zones = p->max_open_zones;
+ lim.max_zone_append_sectors = p->max_zone_append_sectors;
+ }
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
@@ -2222,7 +2219,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
goto out_unlock;
}
- disk = blk_mq_alloc_disk(&ub->tag_set, NULL);
+ disk = blk_mq_alloc_disk(&ub->tag_set, &lim, NULL);
if (IS_ERR(disk)) {
ret = PTR_ERR(disk);
goto out_unlock;
@@ -2234,15 +2231,13 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
ub->dev_info.ublksrv_pid = ublksrv_pid;
ub->ub_disk = disk;
- ret = ublk_apply_params(ub);
- if (ret)
- goto out_put_disk;
+ ublk_apply_params(ub);
/* don't probe partitions if any one ubq daemon is un-trusted */
if (ub->nr_privileged_daemon != ub->nr_queues_ready)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
- get_device(&ub->cdev_dev);
+ ublk_get_device(ub);
ub->dev_info.state = UBLK_S_DEV_LIVE;
if (ublk_dev_is_zoned(ub)) {
@@ -2262,7 +2257,6 @@ out_put_cdev:
ub->dev_info.state = UBLK_S_DEV_DEAD;
ublk_put_device(ub);
}
-out_put_disk:
if (ret)
put_disk(disk);
out_unlock:
@@ -2474,7 +2468,7 @@ static inline bool ublk_idr_freed(int id)
return ptr == NULL;
}
-static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
+static int ublk_ctrl_del_dev(struct ublk_device **p_ub, bool wait)
{
struct ublk_device *ub = *p_ub;
int idx = ub->ub_number;
@@ -2508,7 +2502,7 @@ static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
* - the device number is freed already, we will not find this
* device via ublk_get_device_from_id()
*/
- if (wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
+ if (wait && wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
return -EINTR;
return 0;
}
@@ -2907,7 +2901,10 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
ret = ublk_ctrl_add_dev(cmd);
break;
case UBLK_CMD_DEL_DEV:
- ret = ublk_ctrl_del_dev(&ub);
+ ret = ublk_ctrl_del_dev(&ub, true);
+ break;
+ case UBLK_U_CMD_DEL_DEV_ASYNC:
+ ret = ublk_ctrl_del_dev(&ub, false);
break;
case UBLK_CMD_GET_QUEUE_AFFINITY:
ret = ublk_ctrl_get_queue_affinity(ub, cmd);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2bf14a0e2815..42dea7601d87 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -720,25 +720,24 @@ fail_report:
return ret;
}
-static int virtblk_probe_zoned_device(struct virtio_device *vdev,
- struct virtio_blk *vblk,
- struct request_queue *q)
+static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
+ struct queue_limits *lim)
{
+ struct virtio_device *vdev = vblk->vdev;
u32 v, wg;
dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
- disk_set_zoned(vblk->disk);
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ lim->zoned = true;
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_open_zones, &v);
- disk_set_max_open_zones(vblk->disk, v);
+ lim->max_open_zones = v;
dev_dbg(&vdev->dev, "max open zones = %u\n", v);
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_active_zones, &v);
- disk_set_max_active_zones(vblk->disk, v);
+ lim->max_active_zones = v;
dev_dbg(&vdev->dev, "max active zones = %u\n", v);
virtio_cread(vdev, struct virtio_blk_config,
@@ -747,8 +746,8 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
dev_warn(&vdev->dev, "zero write granularity reported\n");
return -ENODEV;
}
- blk_queue_physical_block_size(q, wg);
- blk_queue_io_min(q, wg);
+ lim->physical_block_size = wg;
+ lim->io_min = wg;
dev_dbg(&vdev->dev, "write granularity = %u\n", wg);
@@ -764,13 +763,13 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
vblk->zone_sectors);
return -ENODEV;
}
- blk_queue_chunk_sectors(q, vblk->zone_sectors);
+ lim->chunk_sectors = vblk->zone_sectors;
dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
dev_warn(&vblk->vdev->dev,
"ignoring negotiated F_DISCARD for zoned device\n");
- blk_queue_max_discard_sectors(q, 0);
+ lim->max_hw_discard_sectors = 0;
}
virtio_cread(vdev, struct virtio_blk_config,
@@ -785,25 +784,21 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
wg, v);
return -ENODEV;
}
- blk_queue_max_zone_append_sectors(q, v);
+ lim->max_zone_append_sectors = v;
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
- return blk_revalidate_disk_zones(vblk->disk, NULL);
+ return 0;
}
-
#else
-
/*
- * Zoned block device support is not configured in this kernel.
- * Host-managed zoned devices can't be supported, but others are
- * good to go as regular block devices.
+ * Zoned block device support is not configured in this kernel, host-managed
+ * zoned devices can't be supported.
*/
#define virtblk_report_zones NULL
-
-static inline int virtblk_probe_zoned_device(struct virtio_device *vdev,
- struct virtio_blk *vblk, struct request_queue *q)
+static inline int virtblk_read_zoned_limits(struct virtio_blk *vblk,
+ struct queue_limits *lim)
{
- dev_err(&vdev->dev,
+ dev_err(&vblk->vdev->dev,
"virtio_blk: zoned devices are not supported");
return -EOPNOTSUPP;
}
@@ -1248,31 +1243,17 @@ static const struct blk_mq_ops virtio_mq_ops = {
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
-static int virtblk_probe(struct virtio_device *vdev)
+static int virtblk_read_limits(struct virtio_blk *vblk,
+ struct queue_limits *lim)
{
- struct virtio_blk *vblk;
- struct request_queue *q;
- int err, index;
-
+ struct virtio_device *vdev = vblk->vdev;
u32 v, blk_size, max_size, sg_elems, opt_io_size;
u32 max_discard_segs = 0;
u32 discard_granularity = 0;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
- unsigned int queue_depth;
size_t max_dma_size;
-
- if (!vdev->config->get) {
- dev_err(&vdev->dev, "%s failure: config access disabled\n",
- __func__);
- return -EINVAL;
- }
-
- err = ida_alloc_range(&vd_index_ida, 0,
- minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL);
- if (err < 0)
- goto out;
- index = err;
+ int err;
/* We need to know how many segments before we allocate. */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
@@ -1286,78 +1267,11 @@ static int virtblk_probe(struct virtio_device *vdev)
/* Prevent integer overflows and honor max vq size */
sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
- vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
- if (!vblk) {
- err = -ENOMEM;
- goto out_free_index;
- }
-
- mutex_init(&vblk->vdev_mutex);
-
- vblk->vdev = vdev;
-
- INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
-
- err = init_vq(vblk);
- if (err)
- goto out_free_vblk;
-
- /* Default queue sizing is to fill the ring. */
- if (!virtblk_queue_depth) {
- queue_depth = vblk->vqs[0].vq->num_free;
- /* ... but without indirect descs, we use 2 descs per req */
- if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
- queue_depth /= 2;
- } else {
- queue_depth = virtblk_queue_depth;
- }
-
- memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
- vblk->tag_set.ops = &virtio_mq_ops;
- vblk->tag_set.queue_depth = queue_depth;
- vblk->tag_set.numa_node = NUMA_NO_NODE;
- vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- vblk->tag_set.cmd_size =
- sizeof(struct virtblk_req) +
- sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
- vblk->tag_set.driver_data = vblk;
- vblk->tag_set.nr_hw_queues = vblk->num_vqs;
- vblk->tag_set.nr_maps = 1;
- if (vblk->io_queues[HCTX_TYPE_POLL])
- vblk->tag_set.nr_maps = 3;
-
- err = blk_mq_alloc_tag_set(&vblk->tag_set);
- if (err)
- goto out_free_vq;
-
- vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
- if (IS_ERR(vblk->disk)) {
- err = PTR_ERR(vblk->disk);
- goto out_free_tags;
- }
- q = vblk->disk->queue;
-
- virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
-
- vblk->disk->major = major;
- vblk->disk->first_minor = index_to_minor(index);
- vblk->disk->minors = 1 << PART_BITS;
- vblk->disk->private_data = vblk;
- vblk->disk->fops = &virtblk_fops;
- vblk->index = index;
-
- /* configure queue flush support */
- virtblk_update_cache_mode(vdev);
-
- /* If disk is read-only in the host, the guest should obey */
- if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
- set_disk_ro(vblk->disk, 1);
-
/* We can handle whatever the host told us to handle. */
- blk_queue_max_segments(q, sg_elems);
+ lim->max_segments = sg_elems;
/* No real sector limit. */
- blk_queue_max_hw_sectors(q, UINT_MAX);
+ lim->max_hw_sectors = UINT_MAX;
max_dma_size = virtio_max_dma_size(vdev);
max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
@@ -1369,7 +1283,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!err)
max_size = min(max_size, v);
- blk_queue_max_segment_size(q, max_size);
+ lim->max_segment_size = max_size;
/* Host can optionally specify the block size of the device */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
@@ -1381,38 +1295,37 @@ static int virtblk_probe(struct virtio_device *vdev)
dev_err(&vdev->dev,
"virtio_blk: invalid block size: 0x%x\n",
blk_size);
- goto out_cleanup_disk;
+ return err;
}
- blk_queue_logical_block_size(q, blk_size);
+ lim->logical_block_size = blk_size;
} else
- blk_size = queue_logical_block_size(q);
+ blk_size = lim->logical_block_size;
/* Use topology information if available */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, physical_block_exp,
&physical_block_exp);
if (!err && physical_block_exp)
- blk_queue_physical_block_size(q,
- blk_size * (1 << physical_block_exp));
+ lim->physical_block_size = blk_size * (1 << physical_block_exp);
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, alignment_offset,
&alignment_offset);
if (!err && alignment_offset)
- blk_queue_alignment_offset(q, blk_size * alignment_offset);
+ lim->alignment_offset = blk_size * alignment_offset;
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, min_io_size,
&min_io_size);
if (!err && min_io_size)
- blk_queue_io_min(q, blk_size * min_io_size);
+ lim->io_min = blk_size * min_io_size;
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, opt_io_size,
&opt_io_size);
if (!err && opt_io_size)
- blk_queue_io_opt(q, blk_size * opt_io_size);
+ lim->io_opt = blk_size * opt_io_size;
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
virtio_cread(vdev, struct virtio_blk_config,
@@ -1420,7 +1333,7 @@ static int virtblk_probe(struct virtio_device *vdev)
virtio_cread(vdev, struct virtio_blk_config,
max_discard_sectors, &v);
- blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
+ lim->max_hw_discard_sectors = v ? v : UINT_MAX;
virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
&max_discard_segs);
@@ -1429,7 +1342,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
virtio_cread(vdev, struct virtio_blk_config,
max_write_zeroes_sectors, &v);
- blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
+ lim->max_write_zeroes_sectors = v ? v : UINT_MAX;
}
/* The discard and secure erase limits are combined since the Linux
@@ -1455,8 +1368,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!v) {
dev_err(&vdev->dev,
"virtio_blk: secure_erase_sector_alignment can't be 0\n");
- err = -EINVAL;
- goto out_cleanup_disk;
+ return -EINVAL;
}
discard_granularity = min_not_zero(discard_granularity, v);
@@ -1470,11 +1382,10 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!v) {
dev_err(&vdev->dev,
"virtio_blk: max_secure_erase_sectors can't be 0\n");
- err = -EINVAL;
- goto out_cleanup_disk;
+ return -EINVAL;
}
- blk_queue_max_secure_erase_sectors(q, v);
+ lim->max_secure_erase_sectors = v;
virtio_cread(vdev, struct virtio_blk_config,
max_secure_erase_seg, &v);
@@ -1485,8 +1396,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!v) {
dev_err(&vdev->dev,
"virtio_blk: max_secure_erase_seg can't be 0\n");
- err = -EINVAL;
- goto out_cleanup_disk;
+ return -EINVAL;
}
max_discard_segs = min_not_zero(max_discard_segs, v);
@@ -1502,45 +1412,142 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!max_discard_segs)
max_discard_segs = sg_elems;
- blk_queue_max_discard_segments(q,
- min(max_discard_segs, MAX_DISCARD_SEGMENTS));
+ lim->max_discard_segments =
+ min(max_discard_segs, MAX_DISCARD_SEGMENTS);
if (discard_granularity)
- q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT;
+ lim->discard_granularity =
+ discard_granularity << SECTOR_SHIFT;
else
- q->limits.discard_granularity = blk_size;
+ lim->discard_granularity = blk_size;
}
- virtblk_update_capacity(vblk, false);
- virtio_device_ready(vdev);
-
- /*
- * All steps that follow use the VQs therefore they need to be
- * placed after the virtio_device_ready() call above.
- */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
u8 model;
- virtio_cread(vdev, struct virtio_blk_config, zoned.model,
- &model);
+ virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
switch (model) {
case VIRTIO_BLK_Z_NONE:
case VIRTIO_BLK_Z_HA:
- /* Present the host-aware device as non-zoned */
- break;
+ /* treat host-aware devices as non-zoned */
+ return 0;
case VIRTIO_BLK_Z_HM:
- err = virtblk_probe_zoned_device(vdev, vblk, q);
+ err = virtblk_read_zoned_limits(vblk, lim);
if (err)
- goto out_cleanup_disk;
+ return err;
break;
default:
- dev_err(&vdev->dev, "unsupported zone model %d\n",
- model);
- err = -EINVAL;
- goto out_cleanup_disk;
+ dev_err(&vdev->dev, "unsupported zone model %d\n", model);
+ return -EINVAL;
}
}
+ return 0;
+}
+
+static int virtblk_probe(struct virtio_device *vdev)
+{
+ struct virtio_blk *vblk;
+ struct queue_limits lim = { };
+ int err, index;
+ unsigned int queue_depth;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ err = ida_alloc_range(&vd_index_ida, 0,
+ minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL);
+ if (err < 0)
+ goto out;
+ index = err;
+
+ vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
+ if (!vblk) {
+ err = -ENOMEM;
+ goto out_free_index;
+ }
+
+ mutex_init(&vblk->vdev_mutex);
+
+ vblk->vdev = vdev;
+
+ INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
+
+ err = init_vq(vblk);
+ if (err)
+ goto out_free_vblk;
+
+ /* Default queue sizing is to fill the ring. */
+ if (!virtblk_queue_depth) {
+ queue_depth = vblk->vqs[0].vq->num_free;
+ /* ... but without indirect descs, we use 2 descs per req */
+ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
+ queue_depth /= 2;
+ } else {
+ queue_depth = virtblk_queue_depth;
+ }
+
+ memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
+ vblk->tag_set.ops = &virtio_mq_ops;
+ vblk->tag_set.queue_depth = queue_depth;
+ vblk->tag_set.numa_node = NUMA_NO_NODE;
+ vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ vblk->tag_set.cmd_size =
+ sizeof(struct virtblk_req) +
+ sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
+ vblk->tag_set.driver_data = vblk;
+ vblk->tag_set.nr_hw_queues = vblk->num_vqs;
+ vblk->tag_set.nr_maps = 1;
+ if (vblk->io_queues[HCTX_TYPE_POLL])
+ vblk->tag_set.nr_maps = 3;
+
+ err = blk_mq_alloc_tag_set(&vblk->tag_set);
+ if (err)
+ goto out_free_vq;
+
+ err = virtblk_read_limits(vblk, &lim);
+ if (err)
+ goto out_free_tags;
+
+ vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk);
+ if (IS_ERR(vblk->disk)) {
+ err = PTR_ERR(vblk->disk);
+ goto out_free_tags;
+ }
+
+ virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
+
+ vblk->disk->major = major;
+ vblk->disk->first_minor = index_to_minor(index);
+ vblk->disk->minors = 1 << PART_BITS;
+ vblk->disk->private_data = vblk;
+ vblk->disk->fops = &virtblk_fops;
+ vblk->index = index;
+
+ /* configure queue flush support */
+ virtblk_update_cache_mode(vdev);
+
+ /* If disk is read-only in the host, the guest should obey */
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
+ set_disk_ro(vblk->disk, 1);
+
+ virtblk_update_capacity(vblk, false);
+ virtio_device_ready(vdev);
+
+ /*
+ * All steps that follow use the VQs therefore they need to be
+ * placed after the virtio_device_ready() call above.
+ */
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) {
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue);
+ err = blk_revalidate_disk_zones(vblk->disk, NULL);
+ if (err)
+ goto out_cleanup_disk;
+ }
+
err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
if (err)
goto out_cleanup_disk;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4defd7f387c7..944576d582fb 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -465,7 +465,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
}
req->dev = vbd->pdevice;
- req->bdev = vbd->bdev_handle->bdev;
+ req->bdev = file_bdev(vbd->bdev_file);
rc = 0;
out:
@@ -969,7 +969,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
int err = 0;
int status = BLKIF_RSP_OKAY;
struct xen_blkif *blkif = ring->blkif;
- struct block_device *bdev = blkif->vbd.bdev_handle->bdev;
+ struct block_device *bdev = file_bdev(blkif->vbd.bdev_file);
struct phys_req preq;
xen_blkif_get(blkif);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 1432c83183d0..b427d54bc120 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -221,7 +221,7 @@ struct xen_vbd {
unsigned char type;
/* phys device that this vbd maps to. */
u32 pdevice;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
/* Cached size parameter. */
sector_t size;
unsigned int flush_support:1;
@@ -360,7 +360,7 @@ struct pending_req {
};
-#define vbd_sz(_v) bdev_nr_sectors((_v)->bdev_handle->bdev)
+#define vbd_sz(_v) bdev_nr_sectors(file_bdev((_v)->bdev_file))
#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
#define xen_blkif_put(_b) \
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index e34219ea2b05..0621878940ae 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -81,7 +81,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
int i;
/* Not ready to connect? */
- if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_handle)
+ if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_file)
return;
/* Already connected? */
@@ -99,13 +99,12 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
return;
}
- err = sync_blockdev(blkif->vbd.bdev_handle->bdev);
+ err = sync_blockdev(file_bdev(blkif->vbd.bdev_file));
if (err) {
xenbus_dev_error(blkif->be->dev, err, "block flush");
return;
}
- invalidate_inode_pages2(
- blkif->vbd.bdev_handle->bdev->bd_inode->i_mapping);
+ invalidate_inode_pages2(blkif->vbd.bdev_file->f_mapping);
for (i = 0; i < blkif->nr_rings; i++) {
ring = &blkif->rings[i];
@@ -473,9 +472,9 @@ static void xenvbd_sysfs_delif(struct xenbus_device *dev)
static void xen_vbd_free(struct xen_vbd *vbd)
{
- if (vbd->bdev_handle)
- bdev_release(vbd->bdev_handle);
- vbd->bdev_handle = NULL;
+ if (vbd->bdev_file)
+ fput(vbd->bdev_file);
+ vbd->bdev_file = NULL;
}
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
@@ -483,7 +482,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
int cdrom)
{
struct xen_vbd *vbd;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
vbd = &blkif->vbd;
vbd->handle = handle;
@@ -492,17 +491,17 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
vbd->pdevice = MKDEV(major, minor);
- bdev_handle = bdev_open_by_dev(vbd->pdevice, vbd->readonly ?
+ bdev_file = bdev_file_open_by_dev(vbd->pdevice, vbd->readonly ?
BLK_OPEN_READ : BLK_OPEN_WRITE, NULL, NULL);
- if (IS_ERR(bdev_handle)) {
+ if (IS_ERR(bdev_file)) {
pr_warn("xen_vbd_create: device %08x could not be opened\n",
vbd->pdevice);
return -ENOENT;
}
- vbd->bdev_handle = bdev_handle;
- if (vbd->bdev_handle->bdev->bd_disk == NULL) {
+ vbd->bdev_file = bdev_file;
+ if (file_bdev(vbd->bdev_file)->bd_disk == NULL) {
pr_warn("xen_vbd_create: device %08x doesn't exist\n",
vbd->pdevice);
xen_vbd_free(vbd);
@@ -510,14 +509,14 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
}
vbd->size = vbd_sz(vbd);
- if (cdrom || disk_to_cdi(vbd->bdev_handle->bdev->bd_disk))
+ if (cdrom || disk_to_cdi(file_bdev(vbd->bdev_file)->bd_disk))
vbd->type |= VDISK_CDROM;
- if (vbd->bdev_handle->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
+ if (file_bdev(vbd->bdev_file)->bd_disk->flags & GENHD_FL_REMOVABLE)
vbd->type |= VDISK_REMOVABLE;
- if (bdev_write_cache(bdev_handle->bdev))
+ if (bdev_write_cache(file_bdev(bdev_file)))
vbd->flush_support = true;
- if (bdev_max_secure_erase_sectors(bdev_handle->bdev))
+ if (bdev_max_secure_erase_sectors(file_bdev(bdev_file)))
vbd->discard_secure = true;
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
@@ -570,7 +569,7 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
struct xen_blkif *blkif = be->blkif;
int err;
int state = 0;
- struct block_device *bdev = be->blkif->vbd.bdev_handle->bdev;
+ struct block_device *bdev = file_bdev(be->blkif->vbd.bdev_file);
if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
return;
@@ -932,7 +931,7 @@ again:
}
err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
(unsigned long)bdev_logical_block_size(
- be->blkif->vbd.bdev_handle->bdev));
+ file_bdev(be->blkif->vbd.bdev_file)));
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/sector-size",
dev->nodename);
@@ -940,7 +939,7 @@ again:
}
err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
bdev_physical_block_size(
- be->blkif->vbd.bdev_handle->bdev));
+ file_bdev(be->blkif->vbd.bdev_file)));
if (err)
xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
dev->nodename);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 434fab306777..fd7c0ff2139c 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -941,39 +941,35 @@ static const struct blk_mq_ops blkfront_mq_ops = {
.complete = blkif_complete_rq,
};
-static void blkif_set_queue_limits(struct blkfront_info *info)
+static void blkif_set_queue_limits(const struct blkfront_info *info,
+ struct queue_limits *lim)
{
- struct request_queue *rq = info->rq;
- struct gendisk *gd = info->gd;
unsigned int segments = info->max_indirect_segments ? :
BLKIF_MAX_SEGMENTS_PER_REQUEST;
- blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
-
if (info->feature_discard) {
- blk_queue_max_discard_sectors(rq, get_capacity(gd));
- rq->limits.discard_granularity = info->discard_granularity ?:
- info->physical_sector_size;
- rq->limits.discard_alignment = info->discard_alignment;
+ lim->max_hw_discard_sectors = UINT_MAX;
+ if (info->discard_granularity)
+ lim->discard_granularity = info->discard_granularity;
+ lim->discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
- blk_queue_max_secure_erase_sectors(rq,
- get_capacity(gd));
+ lim->max_secure_erase_sectors = UINT_MAX;
}
/* Hard sector size and max sectors impersonate the equiv. hardware. */
- blk_queue_logical_block_size(rq, info->sector_size);
- blk_queue_physical_block_size(rq, info->physical_sector_size);
- blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
+ lim->logical_block_size = info->sector_size;
+ lim->physical_block_size = info->physical_sector_size;
+ lim->max_hw_sectors = (segments * XEN_PAGE_SIZE) / 512;
/* Each segment in a request is up to an aligned page in size. */
- blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
- blk_queue_max_segment_size(rq, PAGE_SIZE);
+ lim->seg_boundary_mask = PAGE_SIZE - 1;
+ lim->max_segment_size = PAGE_SIZE;
/* Ensure a merged request will fit in a single I/O ring slot. */
- blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
+ lim->max_segments = segments / GRANTS_PER_PSEG;
/* Make sure buffer addresses are sector-aligned. */
- blk_queue_dma_alignment(rq, 511);
+ lim->dma_alignment = 511;
}
static const char *flush_info(struct blkfront_info *info)
@@ -1070,6 +1066,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
struct blkfront_info *info, u16 sector_size,
unsigned int physical_sector_size)
{
+ struct queue_limits lim = {};
struct gendisk *gd;
int nr_minors = 1;
int err;
@@ -1136,11 +1133,13 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
if (err)
goto out_release_minors;
- gd = blk_mq_alloc_disk(&info->tag_set, info);
+ blkif_set_queue_limits(info, &lim);
+ gd = blk_mq_alloc_disk(&info->tag_set, &lim, info);
if (IS_ERR(gd)) {
err = PTR_ERR(gd);
goto out_free_tag_set;
}
+ blk_queue_flag_set(QUEUE_FLAG_VIRT, gd->queue);
strcpy(gd->disk_name, DEV_NAME);
ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
@@ -1162,7 +1161,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
info->gd = gd;
info->sector_size = sector_size;
info->physical_sector_size = physical_sector_size;
- blkif_set_queue_limits(info);
xlvbd_flush(info);
@@ -2006,18 +2004,19 @@ static int blkfront_probe(struct xenbus_device *dev,
static int blkif_recover(struct blkfront_info *info)
{
+ struct queue_limits lim;
unsigned int r_index;
struct request *req, *n;
int rc;
struct bio *bio;
- unsigned int segs;
struct blkfront_ring_info *rinfo;
+ lim = queue_limits_start_update(info->rq);
blkfront_gather_backend_features(info);
- /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
- blkif_set_queue_limits(info);
- segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
- blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
+ blkif_set_queue_limits(info, &lim);
+ rc = queue_limits_commit_update(info->rq, &lim);
+ if (rc)
+ return rc;
for_each_rinfo(info, rinfo, r_index) {
rc = blkfront_setup_indirect(rinfo);
@@ -2037,7 +2036,9 @@ static int blkif_recover(struct blkfront_info *info)
list_for_each_entry_safe(req, n, &info->requests, queuelist) {
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
- BUG_ON(req->nr_phys_segments > segs);
+ BUG_ON(req->nr_phys_segments >
+ (info->max_indirect_segments ? :
+ BLKIF_MAX_SEGMENTS_PER_REQUEST));
blk_mq_requeue_request(req, false);
}
blk_mq_start_stopped_hw_queues(info->rq, true);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 11493167b0a8..7c5f4e4d9b50 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -318,7 +318,7 @@ static int z2ram_register_disk(int minor)
struct gendisk *disk;
int err;
- disk = blk_mq_alloc_disk(&tag_set, NULL);
+ disk = blk_mq_alloc_disk(&tag_set, NULL, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 242a1fece18d..f0639df6cd18 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -426,11 +426,11 @@ static void reset_bdev(struct zram *zram)
if (!zram->backing_dev)
return;
- bdev_release(zram->bdev_handle);
+ fput(zram->bdev_file);
/* hope filp_close flush all of IO */
filp_close(zram->backing_dev, NULL);
zram->backing_dev = NULL;
- zram->bdev_handle = NULL;
+ zram->bdev_file = NULL;
zram->disk->fops = &zram_devops;
kvfree(zram->bitmap);
zram->bitmap = NULL;
@@ -476,7 +476,7 @@ static ssize_t backing_dev_store(struct device *dev,
struct address_space *mapping;
unsigned int bitmap_sz;
unsigned long nr_pages, *bitmap = NULL;
- struct bdev_handle *bdev_handle = NULL;
+ struct file *bdev_file = NULL;
int err;
struct zram *zram = dev_to_zram(dev);
@@ -513,11 +513,11 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- bdev_handle = bdev_open_by_dev(inode->i_rdev,
+ bdev_file = bdev_file_open_by_dev(inode->i_rdev,
BLK_OPEN_READ | BLK_OPEN_WRITE, zram, NULL);
- if (IS_ERR(bdev_handle)) {
- err = PTR_ERR(bdev_handle);
- bdev_handle = NULL;
+ if (IS_ERR(bdev_file)) {
+ err = PTR_ERR(bdev_file);
+ bdev_file = NULL;
goto out;
}
@@ -531,7 +531,7 @@ static ssize_t backing_dev_store(struct device *dev,
reset_bdev(zram);
- zram->bdev_handle = bdev_handle;
+ zram->bdev_file = bdev_file;
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
@@ -544,8 +544,8 @@ static ssize_t backing_dev_store(struct device *dev,
out:
kvfree(bitmap);
- if (bdev_handle)
- bdev_release(bdev_handle);
+ if (bdev_file)
+ fput(bdev_file);
if (backing_dev)
filp_close(backing_dev, NULL);
@@ -587,7 +587,7 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
{
struct bio *bio;
- bio = bio_alloc(zram->bdev_handle->bdev, 1, parent->bi_opf, GFP_NOIO);
+ bio = bio_alloc(file_bdev(zram->bdev_file), 1, parent->bi_opf, GFP_NOIO);
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
__bio_add_page(bio, page, PAGE_SIZE, 0);
bio_chain(bio, parent);
@@ -703,7 +703,7 @@ static ssize_t writeback_store(struct device *dev,
continue;
}
- bio_init(&bio, zram->bdev_handle->bdev, &bio_vec, 1,
+ bio_init(&bio, file_bdev(zram->bdev_file), &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
__bio_add_page(&bio, page, PAGE_SIZE, 0);
@@ -785,7 +785,7 @@ static void zram_sync_read(struct work_struct *work)
struct bio_vec bv;
struct bio bio;
- bio_init(&bio, zw->zram->bdev_handle->bdev, &bv, 1, REQ_OP_READ);
+ bio_init(&bio, file_bdev(zw->zram->bdev_file), &bv, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
__bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
zw->error = submit_bio_wait(&bio);
@@ -2177,6 +2177,28 @@ ATTRIBUTE_GROUPS(zram_disk);
*/
static int zram_add(void)
{
+ struct queue_limits lim = {
+ .logical_block_size = ZRAM_LOGICAL_BLOCK_SIZE,
+ /*
+ * To ensure that we always get PAGE_SIZE aligned and
+ * n*PAGE_SIZED sized I/O requests.
+ */
+ .physical_block_size = PAGE_SIZE,
+ .io_min = PAGE_SIZE,
+ .io_opt = PAGE_SIZE,
+ .max_hw_discard_sectors = UINT_MAX,
+ /*
+ * zram_bio_discard() will clear all logical blocks if logical
+ * block size is identical with physical block size(PAGE_SIZE).
+ * But if it is different, we will skip discarding some parts of
+ * logical blocks in the part of the request range which isn't
+ * aligned to physical block size. So we can't ensure that all
+ * discarded logical blocks are zeroed.
+ */
+#if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE
+ .max_write_zeroes_sectors = UINT_MAX,
+#endif
+ };
struct zram *zram;
int ret, device_id;
@@ -2195,11 +2217,11 @@ static int zram_add(void)
#endif
/* gendisk structure */
- zram->disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!zram->disk) {
+ zram->disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(zram->disk)) {
pr_err("Error allocating disk structure for device %d\n",
device_id);
- ret = -ENOMEM;
+ ret = PTR_ERR(zram->disk);
goto out_free_idr;
}
@@ -2216,29 +2238,6 @@ static int zram_add(void)
/* zram devices sort of resembles non-rotational disks */
blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
-
- /*
- * To ensure that we always get PAGE_SIZE aligned
- * and n*PAGE_SIZED sized I/O requests.
- */
- blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
- blk_queue_logical_block_size(zram->disk->queue,
- ZRAM_LOGICAL_BLOCK_SIZE);
- blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
- blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
- blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
-
- /*
- * zram_bio_discard() will clear all logical blocks if logical block
- * size is identical with physical block size(PAGE_SIZE). But if it is
- * different, we will skip discarding some parts of logical blocks in
- * the part of the request range which isn't aligned to physical block
- * size. So we can't ensure that all discarded logical blocks are
- * zeroed.
- */
- if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
- blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
-
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 3b94d12f41b4..37bf29f34d26 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -132,7 +132,7 @@ struct zram {
spinlock_t wb_limit_lock;
bool wb_limit_enable;
u64 bd_wb_limit;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
unsigned long *bitmap;
unsigned long nr_pages;
#endif
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 0a5445ac5e1b..f9a7c790d7e2 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -11,6 +11,7 @@
#include <linux/firmware.h>
#include <linux/dmi.h>
#include <linux/of.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -543,8 +544,6 @@ static const char *btbcm_get_board_name(struct device *dev)
struct device_node *root;
char *board_type;
const char *tmp;
- int len;
- int i;
root = of_find_node_by_path("/");
if (!root)
@@ -554,13 +553,8 @@ static const char *btbcm_get_board_name(struct device *dev)
return NULL;
/* get rid of any '/' in the compatible string */
- len = strlen(tmp) + 1;
- board_type = devm_kzalloc(dev, len, GFP_KERNEL);
- strscpy(board_type, tmp, len);
- for (i = 0; i < len; i++) {
- if (board_type[i] == '/')
- board_type[i] = '-';
- }
+ board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ strreplace(board_type, '/', '-');
of_node_put(root);
return board_type;
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index cdc5c08824a0..6ba7f5d1b837 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -441,7 +441,7 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
return PTR_ERR(skb);
}
- if (skb->len != sizeof(*ver)) {
+ if (!skb || skb->len != sizeof(*ver)) {
bt_dev_err(hdev, "Intel version event size mismatch");
kfree_skb(skb);
return -EILSEQ;
@@ -2670,6 +2670,119 @@ static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
}
}
+static void btintel_print_fseq_info(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ u8 *p;
+ u32 val;
+ const char *str;
+
+ skb = __hci_cmd_sync(hdev, 0xfcb3, 0, NULL, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_dbg(hdev, "Reading fseq status command failed (%ld)",
+ PTR_ERR(skb));
+ return;
+ }
+
+ if (skb->len < (sizeof(u32) * 16 + 2)) {
+ bt_dev_dbg(hdev, "Malformed packet of length %u received",
+ skb->len);
+ kfree_skb(skb);
+ return;
+ }
+
+ p = skb_pull_data(skb, 1);
+ if (*p) {
+ bt_dev_dbg(hdev, "Failed to get fseq status (0x%2.2x)", *p);
+ kfree_skb(skb);
+ return;
+ }
+
+ p = skb_pull_data(skb, 1);
+ switch (*p) {
+ case 0:
+ str = "Success";
+ break;
+ case 1:
+ str = "Fatal error";
+ break;
+ case 2:
+ str = "Semaphore acquire error";
+ break;
+ default:
+ str = "Unknown error";
+ break;
+ }
+
+ if (*p) {
+ bt_dev_err(hdev, "Fseq status: %s (0x%2.2x)", str, *p);
+ kfree_skb(skb);
+ return;
+ }
+
+ bt_dev_info(hdev, "Fseq status: %s (0x%2.2x)", str, *p);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Reason: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Global version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Installed version: 0x%8.8x", val);
+
+ p = skb->data;
+ skb_pull_data(skb, 4);
+ bt_dev_info(hdev, "Fseq executed: %2.2u.%2.2u.%2.2u.%2.2u", p[0], p[1],
+ p[2], p[3]);
+
+ p = skb->data;
+ skb_pull_data(skb, 4);
+ bt_dev_info(hdev, "Fseq BT Top: %2.2u.%2.2u.%2.2u.%2.2u", p[0], p[1],
+ p[2], p[3]);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Top init version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Cnvio init version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq MBX Wifi file version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq BT version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Top reset address: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq MBX timeout: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq MBX ack: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq CNVi id: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq CNVr id: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Error handle: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Magic noalive indication: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq OTP version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq MBX otp version: 0x%8.8x", val);
+
+ kfree_skb(skb);
+}
+
static int btintel_setup_combined(struct hci_dev *hdev)
{
const u8 param[1] = { 0xFF };
@@ -2902,6 +3015,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
btintel_register_devcoredump_support(hdev);
+ btintel_print_fseq_info(hdev);
break;
default:
bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index aaabb732082c..ac8ebccd3507 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -372,8 +372,10 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
struct btmediatek_data *data = hci_get_priv(hdev);
int err;
- if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
+ kfree_skb(skb);
return 0;
+ }
switch (data->cd_info.state) {
case HCI_DEVCOREDUMP_IDLE:
@@ -420,5 +422,6 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_MT7622);
MODULE_FIRMWARE(FIRMWARE_MT7663);
MODULE_FIRMWARE(FIRMWARE_MT7668);
+MODULE_FIRMWARE(FIRMWARE_MT7922);
MODULE_FIRMWARE(FIRMWARE_MT7961);
MODULE_FIRMWARE(FIRMWARE_MT7925);
diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
index 56f5502baadf..cbcdb99a22e6 100644
--- a/drivers/bluetooth/btmtk.h
+++ b/drivers/bluetooth/btmtk.h
@@ -4,6 +4,7 @@
#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
+#define FIRMWARE_MT7922 "mediatek/BT_RAM_CODE_MT7922_1_1_hdr.bin"
#define FIRMWARE_MT7961 "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
#define FIRMWARE_MT7925 "mediatek/mt7925/BT_RAM_CODE_MT7925_1_1_hdr.bin"
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 1d592ac413d1..0b93c2ff29e4 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -126,6 +126,7 @@ struct ps_data {
struct hci_dev *hdev;
struct work_struct work;
struct timer_list ps_timer;
+ struct mutex ps_lock;
};
struct wakeup_cmd_payload {
@@ -317,6 +318,9 @@ static void ps_start_timer(struct btnxpuart_dev *nxpdev)
if (psdata->cur_psmode == PS_MODE_ENABLE)
mod_timer(&psdata->ps_timer, jiffies + msecs_to_jiffies(psdata->h2c_ps_interval));
+
+ if (psdata->ps_state == PS_STATE_AWAKE && psdata->ps_cmd == PS_CMD_ENTER_PS)
+ cancel_work_sync(&psdata->work);
}
static void ps_cancel_timer(struct btnxpuart_dev *nxpdev)
@@ -337,6 +341,7 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
!test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state))
return;
+ mutex_lock(&psdata->ps_lock);
switch (psdata->cur_h2c_wakeupmode) {
case WAKEUP_METHOD_DTR:
if (ps_state == PS_STATE_AWAKE)
@@ -350,12 +355,15 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
status = serdev_device_break_ctl(nxpdev->serdev, 0);
else
status = serdev_device_break_ctl(nxpdev->serdev, -1);
+ msleep(20); /* Allow chip to detect UART-break and enter sleep */
bt_dev_dbg(hdev, "Set UART break: %s, status=%d",
str_on_off(ps_state == PS_STATE_SLEEP), status);
break;
}
if (!status)
psdata->ps_state = ps_state;
+ mutex_unlock(&psdata->ps_lock);
+
if (ps_state == PS_STATE_AWAKE)
btnxpuart_tx_wakeup(nxpdev);
}
@@ -391,17 +399,25 @@ static void ps_setup(struct hci_dev *hdev)
psdata->hdev = hdev;
INIT_WORK(&psdata->work, ps_work_func);
+ mutex_init(&psdata->ps_lock);
timer_setup(&psdata->ps_timer, ps_timeout_func, 0);
}
-static void ps_wakeup(struct btnxpuart_dev *nxpdev)
+static bool ps_wakeup(struct btnxpuart_dev *nxpdev)
{
struct ps_data *psdata = &nxpdev->psdata;
+ u8 ps_state;
+
+ mutex_lock(&psdata->ps_lock);
+ ps_state = psdata->ps_state;
+ mutex_unlock(&psdata->ps_lock);
- if (psdata->ps_state != PS_STATE_AWAKE) {
+ if (ps_state != PS_STATE_AWAKE) {
psdata->ps_cmd = PS_CMD_EXIT_PS;
schedule_work(&psdata->work);
+ return true;
}
+ return false;
}
static int send_ps_cmd(struct hci_dev *hdev, void *data)
@@ -1171,7 +1187,6 @@ static struct sk_buff *nxp_dequeue(void *data)
{
struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)data;
- ps_wakeup(nxpdev);
ps_start_timer(nxpdev);
return skb_dequeue(&nxpdev->txq);
}
@@ -1186,6 +1201,9 @@ static void btnxpuart_tx_work(struct work_struct *work)
struct sk_buff *skb;
int len;
+ if (ps_wakeup(nxpdev))
+ return;
+
while ((skb = nxp_dequeue(nxpdev))) {
len = serdev_device_write_buf(serdev, skb->data, skb->len);
hdev->stat.byte_tx += len;
@@ -1234,6 +1252,9 @@ static int btnxpuart_close(struct hci_dev *hdev)
ps_wakeup(nxpdev);
serdev_device_close(nxpdev->serdev);
+ skb_queue_purge(&nxpdev->txq);
+ kfree_skb(nxpdev->rx_skb);
+ nxpdev->rx_skb = NULL;
clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
return 0;
}
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index fdb0fae88d1c..b40b32fa7f1c 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -152,7 +152,7 @@ static int qca_send_patch_config_cmd(struct hci_dev *hdev)
bt_dev_dbg(hdev, "QCA Patch config");
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, sizeof(cmd),
- cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ cmd, 0, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "Sending QCA Patch config failed (%d)", err);
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 277d039ecbb4..cc50de69e8dc 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -69,6 +69,7 @@ enum btrtl_chip_id {
CHIP_ID_8852B = 20,
CHIP_ID_8852C = 25,
CHIP_ID_8851B = 36,
+ CHIP_ID_8852BT = 47,
};
struct id_table {
@@ -307,6 +308,15 @@ static const struct id_table ic_id_table[] = {
.fw_name = "rtl_bt/rtl8851bu_fw",
.cfg_name = "rtl_bt/rtl8851bu_config",
.hw_info = "rtl8851bu" },
+
+ /* 8852BT/8852BE-VT */
+ { IC_INFO(RTL_ROM_LMP_8852A, 0x87, 0xc, HCI_USB),
+ .config_needed = false,
+ .has_rom_version = true,
+ .has_msft_ext = true,
+ .fw_name = "rtl_bt/rtl8852btu_fw",
+ .cfg_name = "rtl_bt/rtl8852btu_config",
+ .hw_info = "rtl8852btu" },
};
static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
@@ -645,6 +655,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8852A, 20 }, /* 8852B */
{ RTL_ROM_LMP_8852A, 25 }, /* 8852C */
{ RTL_ROM_LMP_8851B, 36 }, /* 8851B */
+ { RTL_ROM_LMP_8852A, 47 }, /* 8852BT */
};
if (btrtl_dev->fw_len <= 8)
@@ -1275,6 +1286,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8852B:
case CHIP_ID_8852C:
case CHIP_ID_8851B:
+ case CHIP_ID_8852BT:
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
@@ -1505,6 +1517,8 @@ MODULE_FIRMWARE("rtl_bt/rtl8852bs_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bs_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8852btu_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8852btu_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw_v2.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index d31edad7a056..06e915b57283 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -553,6 +553,9 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x13d3, 0x3572), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Realtek 8852BT/8852BE-VT Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_REALTEK },
@@ -655,6 +658,11 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ /* Additional MediaTek MT7925 Bluetooth devices */
+ { USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK },
@@ -3080,7 +3088,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
int err, status;
u32 dev_id = 0;
char fw_bin_name[64];
- u32 fw_version = 0;
+ u32 fw_version = 0, fw_flavor = 0;
u8 param;
struct btmediatek_data *mediatek;
@@ -3103,6 +3111,11 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
bt_dev_err(hdev, "Failed to get fw version (%d)", err);
return err;
}
+ err = btusb_mtk_id_get(data, 0x70010020, &fw_flavor);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get fw flavor (%d)", err);
+ return err;
+ }
}
mediatek = hci_get_priv(hdev);
@@ -3127,6 +3140,10 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
dev_id & 0xffff, dev_id & 0xffff, (fw_version & 0xff) + 1);
+ else if (dev_id == 0x7961 && fw_flavor)
+ snprintf(fw_bin_name, sizeof(fw_bin_name),
+ "mediatek/BT_RAM_CODE_MT%04x_1a_%x_hdr.bin",
+ dev_id & 0xffff, (fw_version & 0xff) + 1);
else
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
@@ -3273,7 +3290,6 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btusb_data *data = hci_get_drvdata(hdev);
u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
- struct sk_buff *skb_cd;
switch (handle) {
case 0xfc6f: /* Firmware dump from device */
@@ -3286,9 +3302,12 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
* for backward compatibility, so we have to clone the packet
* extraly for the in-kernel coredump support.
*/
- skb_cd = skb_clone(skb, GFP_ATOMIC);
- if (skb_cd)
- btmtk_process_coredump(hdev, skb_cd);
+ if (IS_ENABLED(CONFIG_DEV_COREDUMP)) {
+ struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC);
+
+ if (skb_cd)
+ btmtk_process_coredump(hdev, skb_cd);
+ }
fallthrough;
case 0x05ff: /* Firmware debug logging 1 */
@@ -4481,6 +4500,7 @@ static int btusb_probe(struct usb_interface *intf,
set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
}
if (!reset)
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index a61757835695..9a7243d5db71 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -1417,7 +1417,7 @@ static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
- set_bit(HCI_QUIRK_INVALID_BDADDR, &bcm4377->hdev->quirks);
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks);
kfree_skb(skb);
return 0;
@@ -2368,7 +2368,6 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hdev->set_bdaddr = bcm4377_hci_set_bdaddr;
hdev->setup = bcm4377_hci_setup;
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
if (bcm4377->hw->broken_mws_transport_config)
set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
if (bcm4377->hw->broken_ext_scan)
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 71e748a9477e..c0436881a533 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -113,6 +113,7 @@ struct h5_vnd {
int (*suspend)(struct h5 *h5);
int (*resume)(struct h5 *h5);
const struct acpi_gpio_mapping *acpi_gpio_map;
+ int sizeof_priv;
};
struct h5_device_data {
@@ -863,7 +864,8 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (IS_ERR(h5->device_wake_gpio))
return PTR_ERR(h5->device_wake_gpio);
- return hci_uart_register_device(&h5->serdev_hu, &h5p);
+ return hci_uart_register_device_priv(&h5->serdev_hu, &h5p,
+ h5->vnd->sizeof_priv);
}
static void h5_serdev_remove(struct serdev_device *serdev)
@@ -1070,6 +1072,7 @@ static struct h5_vnd rtl_vnd = {
.suspend = h5_btrtl_suspend,
.resume = h5_btrtl_resume,
.acpi_gpio_map = acpi_btrtl_gpios,
+ .sizeof_priv = sizeof(struct btrealtek_data),
};
static const struct h5_device_data h5_data_rtl8822cs = {
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 94b8c406f0c0..8a60ad7acd70 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -7,6 +7,7 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_ll.c, which was...
@@ -1806,13 +1807,12 @@ static int qca_power_on(struct hci_dev *hdev)
static void hci_coredump_qca(struct hci_dev *hdev)
{
+ int err;
static const u8 param[] = { 0x26 };
- struct sk_buff *skb;
- skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
- bt_dev_err(hdev, "%s: trigger crash failed (%ld)", __func__, PTR_ERR(skb));
- kfree_skb(skb);
+ err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
+ if (err < 0)
+ bt_dev_err(hdev, "%s: trigger crash failed (%d)", __func__, err);
}
static int qca_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
@@ -1904,7 +1904,17 @@ retry:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
+ /* Set BDA quirk bit for reading BDA value from fwnode property
+ * only if that property exist in DT.
+ */
+ if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
+ } else {
+ bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
+ }
+
hci_set_aosp_capable(hdev);
ret = qca_read_soc_version(hdev, &ver, soc_type);
@@ -2316,7 +2326,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR_OR_NULL(qcadev->bt_en) &&
+ if (IS_ERR(qcadev->bt_en) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
@@ -2325,7 +2335,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
- if (IS_ERR_OR_NULL(qcadev->sw_ctrl) &&
+ if (IS_ERR(qcadev->sw_ctrl) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855 ||
data->soc_type == QCA_WCN7850))
@@ -2347,7 +2357,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
default:
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR_OR_NULL(qcadev->bt_en)) {
+ if (IS_ERR(qcadev->bt_en)) {
dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
power_ctrl_enabled = false;
}
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 39c8b567da3c..214fff876eae 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -300,8 +300,9 @@ static const struct serdev_device_ops hci_serdev_client_ops = {
.write_wakeup = hci_uart_write_wakeup,
};
-int hci_uart_register_device(struct hci_uart *hu,
- const struct hci_uart_proto *p)
+int hci_uart_register_device_priv(struct hci_uart *hu,
+ const struct hci_uart_proto *p,
+ int sizeof_priv)
{
int err;
struct hci_dev *hdev;
@@ -325,7 +326,7 @@ int hci_uart_register_device(struct hci_uart *hu,
set_bit(HCI_UART_PROTO_READY, &hu->flags);
/* Initialize and register HCI device */
- hdev = hci_alloc_dev();
+ hdev = hci_alloc_dev_priv(sizeof_priv);
if (!hdev) {
BT_ERR("Can't allocate HCI device");
err = -ENOMEM;
@@ -394,7 +395,7 @@ err_rwsem:
percpu_free_rwsem(&hu->proto_lock);
return err;
}
-EXPORT_SYMBOL_GPL(hci_uart_register_device);
+EXPORT_SYMBOL_GPL(hci_uart_register_device_priv);
void hci_uart_unregister_device(struct hci_uart *hu)
{
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index fb4a2d0d8cc8..68c8c7e95d64 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -97,7 +97,17 @@ struct hci_uart {
int hci_uart_register_proto(const struct hci_uart_proto *p);
int hci_uart_unregister_proto(const struct hci_uart_proto *p);
-int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p);
+
+int hci_uart_register_device_priv(struct hci_uart *hu,
+ const struct hci_uart_proto *p,
+ int sizeof_priv);
+
+static inline int hci_uart_register_device(struct hci_uart *hu,
+ const struct hci_uart_proto *p)
+{
+ return hci_uart_register_device_priv(hu, p, 0);
+}
+
void hci_uart_unregister_device(struct hci_uart *hu);
int hci_uart_tx_wakeup(struct hci_uart *hu);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index e6742998f372..d5e7fa9173a1 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -186,11 +186,12 @@ config SUNXI_RSB
config TEGRA_ACONNECT
tristate "Tegra ACONNECT Bus Driver"
- depends on ARCH_TEGRA_210_SOC
+ depends on ARCH_TEGRA
depends on OF && PM
help
Driver for the Tegra ACONNECT bus which is used to interface with
- the devices inside the Audio Processing Engine (APE) for Tegra210.
+ the devices inside the Audio Processing Engine (APE) for
+ Tegra210 and later.
config TEGRA_GMI
tristate "Tegra Generic Memory Interface bus driver"
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 6b5da73c8541..837bf9d51c6e 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -120,7 +120,7 @@ static int imx_weim_gpr_setup(struct platform_device *pdev)
i++;
}
- if (i == 0 || i % 4)
+ if (i == 0)
goto err;
for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index fd3e9d82340a..1e29ba76615d 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -128,7 +128,7 @@ struct sunxi_rsb {
};
/* bus / slave device related functions */
-static struct bus_type sunxi_rsb_bus;
+static const struct bus_type sunxi_rsb_bus;
static int sunxi_rsb_device_match(struct device *dev, struct device_driver *drv)
{
@@ -177,7 +177,7 @@ static int sunxi_rsb_device_modalias(const struct device *dev, struct kobj_ueven
return of_device_uevent_modalias(dev, env);
}
-static struct bus_type sunxi_rsb_bus = {
+static const struct bus_type sunxi_rsb_bus = {
.name = RSB_CTRL_NAME,
.match = sunxi_rsb_device_match,
.probe = sunxi_rsb_device_probe,
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 245e5e827d0d..41d33f39efe5 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -2400,7 +2400,7 @@ static int sysc_child_add_clocks(struct sysc *ddata,
return 0;
}
-static struct device_type sysc_device_type = {
+static const struct device_type sysc_device_type = {
};
static struct sysc *sysc_child_to_parent(struct device *dev)
diff --git a/drivers/cache/ax45mp_cache.c b/drivers/cache/ax45mp_cache.c
index 57186c58dc84..1d7dd3d2c101 100644
--- a/drivers/cache/ax45mp_cache.c
+++ b/drivers/cache/ax45mp_cache.c
@@ -129,8 +129,12 @@ static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
unsigned long line_size;
unsigned long flags;
+ if (unlikely(start == end))
+ return;
+
line_size = ax45mp_priv.ax45mp_cache_line_size;
start = start & (~(line_size - 1));
+ end = ((end + line_size - 1) & (~(line_size - 1)));
local_irq_save(flags);
ax45mp_cpu_dcache_wb_range(start, end);
local_irq_restore(flags);
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index d668b174ace9..eefdd422ad8e 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -724,11 +724,6 @@ static void probe_gdrom_setupdisk(void)
static int probe_gdrom_setupqueue(void)
{
- blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
- /* using DMA so memory will need to be contiguous */
- blk_queue_max_segments(gd.gdrom_rq, 1);
- /* set a large max size to get most from DMA */
- blk_queue_max_segment_size(gd.gdrom_rq, 0x40000);
gd.disk->queue = gd.gdrom_rq;
return gdrom_init_dma_mode();
}
@@ -743,6 +738,13 @@ static const struct blk_mq_ops gdrom_mq_ops = {
*/
static int probe_gdrom(struct platform_device *devptr)
{
+ struct queue_limits lim = {
+ .logical_block_size = GDROM_HARD_SECTOR,
+ /* using DMA so memory will need to be contiguous */
+ .max_segments = 1,
+ /* set a large max size to get most from DMA */
+ .max_segment_size = 0x40000,
+ };
int err;
/*
@@ -778,7 +780,7 @@ static int probe_gdrom(struct platform_device *devptr)
if (err)
goto probe_fail_free_cd_info;
- gd.disk = blk_mq_alloc_disk(&gd.tag_set, NULL);
+ gd.disk = blk_mq_alloc_disk(&gd.tag_set, &lim, NULL);
if (IS_ERR(gd.disk)) {
err = PTR_ERR(gd.disk);
goto probe_fail_free_tag_set;
@@ -829,7 +831,7 @@ probe_fail_no_mem:
return err;
}
-static int remove_gdrom(struct platform_device *devptr)
+static void remove_gdrom(struct platform_device *devptr)
{
blk_mq_free_tag_set(&gd.tag_set);
free_irq(HW_EVENT_GDROM_CMD, &gd);
@@ -840,13 +842,11 @@ static int remove_gdrom(struct platform_device *devptr)
unregister_cdrom(gd.cd_info);
kfree(gd.cd_info);
kfree(gd.toc);
-
- return 0;
}
static struct platform_driver gdrom_driver = {
.probe = probe_gdrom,
- .remove = remove_gdrom,
+ .remove_new = remove_gdrom,
.driver = {
.name = GDROM_DEV_NAME,
},
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 5c36ab85f80b..67d7be800a7c 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -138,7 +138,6 @@ struct agp_bridge_data {
unsigned long gart_bus_addr;
unsigned long gatt_bus_addr;
u32 mode;
- enum chipset_type type;
unsigned long *key_list;
atomic_t current_memory_agp;
atomic_t agp_in_use;
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 76adb108076c..2ea4882251cf 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -208,7 +208,7 @@ static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data)
/**
* ftpm_tee_probe() - initialize the fTPM
- * @pdev: the platform_device description.
+ * @dev: the device description.
*
* Return:
* On success, 0. On failure, -errno.
@@ -304,7 +304,7 @@ static int ftpm_plat_tee_probe(struct platform_device *pdev)
/**
* ftpm_tee_remove() - remove the TPM device
- * @pdev: the platform_device description.
+ * @dev: the device description.
*
* Return:
* 0 always.
@@ -341,7 +341,7 @@ static void ftpm_plat_tee_remove(struct platform_device *pdev)
}
/**
- * ftpm_tee_shutdown() - shutdown the TPM device
+ * ftpm_plat_tee_shutdown() - shutdown the TPM device
* @pdev: the platform_device description.
*/
static void ftpm_plat_tee_shutdown(struct platform_device *pdev)
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 2c52b7905b07..14652aaf8254 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -347,6 +347,7 @@ static void tpm_tis_plat_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id tis_of_platform_match[] = {
+ {.compatible = "atmel,at97sc3204"},
{.compatible = "tcg,tpm-tis-mmio"},
{},
};
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 1b350412d8a6..64c875657687 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -919,8 +919,6 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
int rc;
u32 int_status;
- INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func);
-
rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL,
tis_int_handler, IRQF_ONESHOT | flags,
dev_name(&chip->dev), chip);
@@ -1132,6 +1130,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->phy_ops = phy_ops;
priv->locality_count = 0;
mutex_init(&priv->locality_count_mutex);
+ INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func);
dev_set_drvdata(&chip->dev, priv);
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
index a897402cc36a..9511c0d50185 100644
--- a/drivers/char/tpm/tpm_tis_i2c.c
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -383,6 +383,8 @@ MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id);
#ifdef CONFIG_OF
static const struct of_device_id of_tis_i2c_match[] = {
{ .compatible = "infineon,slb9673", },
+ { .compatible = "nuvoton,npct75x", },
+ { .compatible = "tcg,tpm-tis-i2c", },
{}
};
MODULE_DEVICE_TABLE(of, of_tis_i2c_match);
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index c5c3197ee29f..3f9eaf27b41b 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -146,7 +146,7 @@ static int tpm_tis_spi_transfer_full(struct tpm_tis_data *data, u32 addr,
struct spi_transfer spi_xfer;
u8 transfer_len;
- spi_bus_lock(phy->spi_device->master);
+ spi_bus_lock(phy->spi_device->controller);
while (len) {
transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
@@ -210,7 +210,7 @@ exit:
spi_sync_locked(phy->spi_device, &m);
}
- spi_bus_unlock(phy->spi_device->master);
+ spi_bus_unlock(phy->spi_device->controller);
return ret;
}
@@ -327,6 +327,7 @@ static const struct spi_device_id tpm_tis_spi_id[] = {
MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
static const struct of_device_id of_tis_spi_match[] __maybe_unused = {
+ { .compatible = "atmel,attpm20p", .data = tpm_tis_spi_probe },
{ .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
{ .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
{ .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index 6994165e0395..0b60ae78f9d8 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -2458,15 +2458,18 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
static void __init rk3588_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3588_clk_branches,
+ ARRAY_SIZE(rk3588_clk_branches)) + 1;
reg_base = of_iomap(np, 0);
if (!reg_base) {
pr_err("%s: could not map cru region\n", __func__);
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 4059d9365ae6..73d2cbdc716b 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -429,6 +429,23 @@ void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_plls);
+unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
+ unsigned int nr_clk)
+{
+ unsigned long max = 0;
+ unsigned int idx;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ if (list->id > max)
+ max = list->id;
+ if (list->child && list->child->id > max)
+ max = list->id;
+ }
+
+ return max;
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_find_max_clk_id);
+
void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 758ebaf2236b..fd3b476dedda 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -973,6 +973,8 @@ struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
void __iomem *base, unsigned long nr_clks);
void rockchip_clk_of_add_provider(struct device_node *np,
struct rockchip_clk_provider *ctx);
+unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
+ unsigned int nr_clk);
void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk);
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index 0964bb11657f..782993951fff 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -2475,7 +2475,7 @@ static const struct samsung_cmu_info misc_cmu_info __initconst = {
.nr_clk_ids = CLKS_NR_MISC,
.clk_regs = misc_clk_regs,
.nr_clk_regs = ARRAY_SIZE(misc_clk_regs),
- .clk_name = "dout_cmu_misc_bus",
+ .clk_name = "bus",
};
/* ---- platform_driver ----------------------------------------------------- */
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index 2974dd0ec6f4..5ec9255e33fa 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -11,23 +11,12 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_data/x86/clk-pmc-atom.h>
+#include <linux/platform_data/x86/pmc_atom.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define PLT_CLK_NAME_BASE "pmc_plt_clk"
-#define PMC_CLK_CTL_OFFSET 0x60
-#define PMC_CLK_CTL_SIZE 4
-#define PMC_CLK_NUM 6
-#define PMC_CLK_CTL_GATED_ON_D3 0x0
-#define PMC_CLK_CTL_FORCE_ON 0x1
-#define PMC_CLK_CTL_FORCE_OFF 0x2
-#define PMC_CLK_CTL_RESERVED 0x3
-#define PMC_MASK_CLK_CTL GENMASK(1, 0)
-#define PMC_MASK_CLK_FREQ BIT(2)
-#define PMC_CLK_FREQ_XTAL (0 << 2) /* 25 MHz */
-#define PMC_CLK_FREQ_PLL (1 << 2) /* 19.2 MHz */
-
struct clk_plt_fixed {
struct clk_hw *clk;
struct clk_lookup *lookup;
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index e054de92de91..8d4a52056684 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1807,7 +1807,7 @@ TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
#endif
int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts,
- struct clocksource **cs)
+ enum clocksource_ids *cs_id)
{
struct arm_smccc_res hvc_res;
u32 ptp_counter;
@@ -1831,8 +1831,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts,
*ts = ktime_to_timespec64(ktime);
if (cycle)
*cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3;
- if (cs)
- *cs = &clocksource_counter;
+ if (cs_id)
+ *cs_id = CSID_ARM_ARCH_COUNTER;
return 0;
}
diff --git a/drivers/comedi/drivers/comedi_8255.c b/drivers/comedi/drivers/comedi_8255.c
index e4974b508328..a933ef53845a 100644
--- a/drivers/comedi/drivers/comedi_8255.c
+++ b/drivers/comedi/drivers/comedi_8255.c
@@ -159,6 +159,7 @@ static int __subdev_8255_init(struct comedi_device *dev,
return -ENOMEM;
spriv->context = context;
+ spriv->io = io;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
index 30ea8b53ebf8..05ae9122823f 100644
--- a/drivers/comedi/drivers/comedi_test.c
+++ b/drivers/comedi/drivers/comedi_test.c
@@ -87,6 +87,8 @@ struct waveform_private {
struct comedi_device *dev; /* parent comedi device */
u64 ao_last_scan_time; /* time of previous AO scan in usec */
unsigned int ao_scan_period; /* AO scan period in usec */
+ bool ai_timer_enable:1; /* should AI timer be running? */
+ bool ao_timer_enable:1; /* should AO timer be running? */
unsigned short ao_loopbacks[N_CHANS];
};
@@ -236,8 +238,12 @@ static void waveform_ai_timer(struct timer_list *t)
time_increment = devpriv->ai_convert_time - now;
else
time_increment = 1;
- mod_timer(&devpriv->ai_timer,
- jiffies + usecs_to_jiffies(time_increment));
+ spin_lock(&dev->spinlock);
+ if (devpriv->ai_timer_enable) {
+ mod_timer(&devpriv->ai_timer,
+ jiffies + usecs_to_jiffies(time_increment));
+ }
+ spin_unlock(&dev->spinlock);
}
overrun:
@@ -393,9 +399,12 @@ static int waveform_ai_cmd(struct comedi_device *dev,
* Seem to need an extra jiffy here, otherwise timer expires slightly
* early!
*/
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ai_timer_enable = true;
devpriv->ai_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
add_timer(&devpriv->ai_timer);
+ spin_unlock_bh(&dev->spinlock);
return 0;
}
@@ -404,6 +413,9 @@ static int waveform_ai_cancel(struct comedi_device *dev,
{
struct waveform_private *devpriv = dev->private;
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ai_timer_enable = false;
+ spin_unlock_bh(&dev->spinlock);
if (in_softirq()) {
/* Assume we were called from the timer routine itself. */
del_timer(&devpriv->ai_timer);
@@ -495,8 +507,12 @@ static void waveform_ao_timer(struct timer_list *t)
unsigned int time_inc = devpriv->ao_last_scan_time +
devpriv->ao_scan_period - now;
- mod_timer(&devpriv->ao_timer,
- jiffies + usecs_to_jiffies(time_inc));
+ spin_lock(&dev->spinlock);
+ if (devpriv->ao_timer_enable) {
+ mod_timer(&devpriv->ao_timer,
+ jiffies + usecs_to_jiffies(time_inc));
+ }
+ spin_unlock(&dev->spinlock);
}
underrun:
@@ -517,9 +533,12 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
async->inttrig = NULL;
devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ao_timer_enable = true;
devpriv->ao_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
add_timer(&devpriv->ao_timer);
+ spin_unlock_bh(&dev->spinlock);
return 1;
}
@@ -604,6 +623,9 @@ static int waveform_ao_cancel(struct comedi_device *dev,
struct waveform_private *devpriv = dev->private;
s->async->inttrig = NULL;
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ao_timer_enable = false;
+ spin_unlock_bh(&dev->spinlock);
if (in_softirq()) {
/* Assume we were called from the timer routine itself. */
del_timer(&devpriv->ao_timer);
diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c
index 09c77afb33ca..3f24481fc04a 100644
--- a/drivers/counter/counter-core.c
+++ b/drivers/counter/counter-core.c
@@ -31,10 +31,11 @@ struct counter_device_allochelper {
struct counter_device counter;
/*
- * This is cache line aligned to ensure private data behaves like if it
- * were kmalloced separately.
+ * This ensures private data behaves like if it were kmalloced
+ * separately. Also ensures the minimum alignment for safe DMA
+ * operations (which may or may not mean cache alignment).
*/
- unsigned long privdata[] ____cacheline_aligned;
+ unsigned long privdata[] __aligned(ARCH_DMA_MINALIGN);
};
static void counter_device_release(struct device *dev)
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index f911606897b8..a0ebad77666e 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -173,6 +173,7 @@ config ARM_QCOM_CPUFREQ_NVMEM
config ARM_QCOM_CPUFREQ_HW
tristate "QCOM CPUFreq HW driver"
depends on ARCH_QCOM || COMPILE_TEST
+ depends on COMMON_CLK
help
Support for the CPUFreq HW driver.
Some QCOM chipsets have a HW engine to offload the steps
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 1791d37fbc53..2015c9fcc3c9 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <linux/static_call.h>
#include <linux/amd-pstate.h>
+#include <linux/topology.h>
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>
@@ -49,6 +50,7 @@
#define AMD_PSTATE_TRANSITION_LATENCY 20000
#define AMD_PSTATE_TRANSITION_DELAY 1000
+#define AMD_PSTATE_PREFCORE_THRESHOLD 166
/*
* TODO: We need more time to fine tune processors with shared memory solution
@@ -64,6 +66,7 @@ static struct cpufreq_driver amd_pstate_driver;
static struct cpufreq_driver amd_pstate_epp_driver;
static int cppc_state = AMD_PSTATE_UNDEFINED;
static bool cppc_enabled;
+static bool amd_pstate_prefcore = true;
/*
* AMD Energy Preference Performance (EPP)
@@ -297,13 +300,14 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
- /*
- * TODO: Introduce AMD specific power feature.
- *
- * CPPC entry doesn't indicate the highest performance in some ASICs.
+ /* For platforms that do not support the preferred core feature, the
+ * highest_pef may be configured with 166 or 255, to avoid max frequency
+ * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as
+ * the default max perf.
*/
- highest_perf = amd_get_highest_perf();
- if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
+ if (cpudata->hw_prefcore)
+ highest_perf = AMD_PSTATE_PREFCORE_THRESHOLD;
+ else
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
WRITE_ONCE(cpudata->highest_perf, highest_perf);
@@ -311,6 +315,7 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
+ WRITE_ONCE(cpudata->prefcore_ranking, AMD_CPPC_HIGHEST_PERF(cap1));
WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
return 0;
}
@@ -324,8 +329,9 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
- highest_perf = amd_get_highest_perf();
- if (highest_perf > cppc_perf.highest_perf)
+ if (cpudata->hw_prefcore)
+ highest_perf = AMD_PSTATE_PREFCORE_THRESHOLD;
+ else
highest_perf = cppc_perf.highest_perf;
WRITE_ONCE(cpudata->highest_perf, highest_perf);
@@ -334,6 +340,7 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
cppc_perf.lowest_nonlinear_perf);
WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+ WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf);
WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
if (cppc_state == AMD_PSTATE_ACTIVE)
@@ -477,12 +484,19 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- u32 max_limit_perf, min_limit_perf;
+ u32 max_limit_perf, min_limit_perf, lowest_perf;
struct amd_cpudata *cpudata = policy->driver_data;
max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+ lowest_perf = READ_ONCE(cpudata->lowest_perf);
+ if (min_limit_perf < lowest_perf)
+ min_limit_perf = lowest_perf;
+
+ if (max_limit_perf < min_limit_perf)
+ max_limit_perf = min_limit_perf;
+
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
WRITE_ONCE(cpudata->max_limit_freq, policy->max);
@@ -570,7 +584,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (target_perf < capacity)
des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
- min_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
if (_min_perf < capacity)
min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
@@ -706,6 +720,114 @@ static void amd_perf_ctl_reset(unsigned int cpu)
wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
}
+/*
+ * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
+ * due to locking, so queue the work for later.
+ */
+static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
+{
+ sched_set_itmt_support();
+}
+static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
+
+/*
+ * Get the highest performance register value.
+ * @cpu: CPU from which to get highest performance.
+ * @highest_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
+{
+ int ret;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ u64 cap1;
+
+ ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+ if (ret)
+ return ret;
+ WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
+ } else {
+ u64 cppc_highest_perf;
+
+ ret = cppc_get_highest_perf(cpu, &cppc_highest_perf);
+ if (ret)
+ return ret;
+ WRITE_ONCE(*highest_perf, cppc_highest_perf);
+ }
+
+ return (ret);
+}
+
+#define CPPC_MAX_PERF U8_MAX
+
+static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
+{
+ int ret, prio;
+ u32 highest_perf;
+
+ ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf);
+ if (ret)
+ return;
+
+ cpudata->hw_prefcore = true;
+ /* check if CPPC preferred core feature is enabled*/
+ if (highest_perf < CPPC_MAX_PERF)
+ prio = (int)highest_perf;
+ else {
+ pr_debug("AMD CPPC preferred core is unsupported!\n");
+ cpudata->hw_prefcore = false;
+ return;
+ }
+
+ if (!amd_pstate_prefcore)
+ return;
+
+ /*
+ * The priorities can be set regardless of whether or not
+ * sched_set_itmt_support(true) has been called and it is valid to
+ * update them at any time after it has been called.
+ */
+ sched_set_itmt_core_prio(prio, cpudata->cpu);
+
+ schedule_work(&sched_prefcore_work);
+}
+
+static void amd_pstate_update_limits(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u32 prev_high = 0, cur_high = 0;
+ int ret;
+ bool highest_perf_changed = false;
+
+ mutex_lock(&amd_pstate_driver_lock);
+ if ((!amd_pstate_prefcore) || (!cpudata->hw_prefcore))
+ goto free_cpufreq_put;
+
+ ret = amd_pstate_get_highest_perf(cpu, &cur_high);
+ if (ret)
+ goto free_cpufreq_put;
+
+ prev_high = READ_ONCE(cpudata->prefcore_ranking);
+ if (prev_high != cur_high) {
+ highest_perf_changed = true;
+ WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
+
+ if (cur_high < CPPC_MAX_PERF)
+ sched_set_itmt_core_prio((int)cur_high, cpu);
+ }
+
+free_cpufreq_put:
+ cpufreq_cpu_put(policy);
+
+ if (!highest_perf_changed)
+ cpufreq_update_policy(cpu);
+
+ mutex_unlock(&amd_pstate_driver_lock);
+}
+
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
@@ -727,6 +849,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
+ amd_pstate_init_prefcore(cpudata);
+
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
@@ -877,6 +1001,28 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
return sysfs_emit(buf, "%u\n", perf);
}
+static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
+ char *buf)
+{
+ u32 perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ perf = READ_ONCE(cpudata->prefcore_ranking);
+
+ return sysfs_emit(buf, "%u\n", perf);
+}
+
+static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
+ char *buf)
+{
+ bool hw_prefcore;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
+
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore));
+}
+
static ssize_t show_energy_performance_available_preferences(
struct cpufreq_policy *policy, char *buf)
{
@@ -1074,18 +1220,29 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
return ret < 0 ? ret : count;
}
+static ssize_t prefcore_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
+}
+
cpufreq_freq_attr_ro(amd_pstate_max_freq);
cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking);
+cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
cpufreq_freq_attr_rw(energy_performance_preference);
cpufreq_freq_attr_ro(energy_performance_available_preferences);
static DEVICE_ATTR_RW(status);
+static DEVICE_ATTR_RO(prefcore);
static struct freq_attr *amd_pstate_attr[] = {
&amd_pstate_max_freq,
&amd_pstate_lowest_nonlinear_freq,
&amd_pstate_highest_perf,
+ &amd_pstate_prefcore_ranking,
+ &amd_pstate_hw_prefcore,
NULL,
};
@@ -1093,6 +1250,8 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
&amd_pstate_max_freq,
&amd_pstate_lowest_nonlinear_freq,
&amd_pstate_highest_perf,
+ &amd_pstate_prefcore_ranking,
+ &amd_pstate_hw_prefcore,
&energy_performance_preference,
&energy_performance_available_preferences,
NULL,
@@ -1100,6 +1259,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
static struct attribute *pstate_global_attributes[] = {
&dev_attr_status.attr,
+ &dev_attr_prefcore.attr,
NULL
};
@@ -1151,6 +1311,8 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
cpudata->epp_policy = 0;
+ amd_pstate_init_prefcore(cpudata);
+
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
@@ -1232,6 +1394,12 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+ if (min_limit_perf < min_perf)
+ min_limit_perf = min_perf;
+
+ if (max_limit_perf < min_limit_perf)
+ max_limit_perf = min_limit_perf;
+
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
@@ -1432,6 +1600,7 @@ static struct cpufreq_driver amd_pstate_driver = {
.suspend = amd_pstate_cpu_suspend,
.resume = amd_pstate_cpu_resume,
.set_boost = amd_pstate_set_boost,
+ .update_limits = amd_pstate_update_limits,
.name = "amd-pstate",
.attr = amd_pstate_attr,
};
@@ -1446,6 +1615,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.online = amd_pstate_epp_cpu_online,
.suspend = amd_pstate_epp_suspend,
.resume = amd_pstate_epp_resume,
+ .update_limits = amd_pstate_update_limits,
.name = "amd-pstate-epp",
.attr = amd_pstate_epp_attr,
};
@@ -1567,7 +1737,17 @@ static int __init amd_pstate_param(char *str)
return amd_pstate_set_driver(mode_idx);
}
+
+static int __init amd_prefcore_param(char *str)
+{
+ if (!strcmp(str, "disable"))
+ amd_pstate_prefcore = false;
+
+ return 0;
+}
+
early_param("amd_pstate", amd_pstate_param);
+early_param("amd_prefcore", amd_prefcore_param);
MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 35fb3a559ea9..1a1857b0a6f4 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -481,6 +481,8 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return 0;
struct private_data *priv = policy->driver_data;
cpufreq_cpu_put(policy);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index bd1e1357cef8..b993a498084b 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -156,6 +156,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,sc7280", },
{ .compatible = "qcom,sc8180x", },
{ .compatible = "qcom,sc8280xp", },
+ { .compatible = "qcom,sdm670", },
{ .compatible = "qcom,sdm845", },
{ .compatible = "qcom,sdx75", },
{ .compatible = "qcom,sm6115", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 44db4f59c4cc..f6f8d7f450e7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -576,17 +576,26 @@ unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
if (latency) {
+ unsigned int max_delay_us = 2 * MSEC_PER_SEC;
+
+ /*
+ * If the platform already has high transition_latency, use it
+ * as-is.
+ */
+ if (latency > max_delay_us)
+ return latency;
+
/*
- * For platforms that can change the frequency very fast (< 10
+ * For platforms that can change the frequency very fast (< 2
* us), the above formula gives a decent transition delay. But
* for platforms where transition_latency is in milliseconds, it
* ends up giving unrealistic values.
*
- * Cap the default transition delay to 10 ms, which seems to be
+ * Cap the default transition delay to 2 ms, which seems to be
* a reasonable amount of time after which we should reevaluate
* the frequency.
*/
- return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
+ return min(latency * LATENCY_MULTIPLIER, max_delay_us);
}
return LATENCY_MULTIPLIER;
@@ -1571,7 +1580,8 @@ static int cpufreq_online(unsigned int cpu)
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
- if (cpufreq_thermal_control_enabled(cpufreq_driver))
+ /* Register cpufreq cooling only for a new policy */
+ if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
pr_debug("initialization complete\n");
@@ -1655,11 +1665,6 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
else
policy->last_policy = policy->policy;
- if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
- cpufreq_cooling_unregister(policy->cdev);
- policy->cdev = NULL;
- }
-
if (has_target())
cpufreq_exit_governor(policy);
@@ -1720,6 +1725,15 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
return;
}
+ /*
+ * Unregister cpufreq cooling once all the CPUs of the policy are
+ * removed.
+ */
+ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
+ cpufreq_cooling_unregister(policy->cdev);
+ policy->cdev = NULL;
+ }
+
/* We did light-weight exit earlier, do full tear down now */
if (cpufreq_driver->offline)
cpufreq_driver->exit(policy);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index c52d19d67557..a7c38b8b3e78 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -22,7 +22,6 @@
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
-#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define MIN_FREQUENCY_UP_THRESHOLD (1)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 33728c242f66..c20d3ecc5a81 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -14,6 +14,8 @@
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define PU_SOC_VOLTAGE_NORMAL 1250000
#define PU_SOC_VOLTAGE_HIGH 1275000
@@ -225,8 +227,6 @@ static void imx6x_disable_freq_in_opp(struct device *dev, unsigned long freq)
static int imx6q_opp_check_speed_grading(struct device *dev)
{
- struct device_node *np;
- void __iomem *base;
u32 val;
int ret;
@@ -235,16 +235,11 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
if (ret)
return ret;
} else {
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
- if (!np)
- return -ENOENT;
+ struct regmap *ocotp;
- base = of_iomap(np, 0);
- of_node_put(np);
- if (!base) {
- dev_err(dev, "failed to map ocotp\n");
- return -EFAULT;
- }
+ ocotp = syscon_regmap_lookup_by_compatible("fsl,imx6q-ocotp");
+ if (IS_ERR(ocotp))
+ return -ENOENT;
/*
* SPEED_GRADING[1:0] defines the max speed of ARM:
@@ -254,8 +249,7 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
* 2b'00: 792000000Hz;
* We need to set the max speed of ARM according to fuse map.
*/
- val = readl_relaxed(base + OCOTP_CFG3);
- iounmap(base);
+ regmap_read(ocotp, OCOTP_CFG3, &val);
}
val >>= OCOTP_CFG3_SPEED_SHIFT;
@@ -290,25 +284,16 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
if (ret)
return ret;
} else {
- struct device_node *np;
- void __iomem *base;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
- if (!np)
- np = of_find_compatible_node(NULL, NULL,
- "fsl,imx6ull-ocotp");
- if (!np)
- return -ENOENT;
+ struct regmap *ocotp;
- base = of_iomap(np, 0);
- of_node_put(np);
- if (!base) {
- dev_err(dev, "failed to map ocotp\n");
- return -EFAULT;
- }
+ ocotp = syscon_regmap_lookup_by_compatible("fsl,imx6ul-ocotp");
+ if (IS_ERR(ocotp))
+ ocotp = syscon_regmap_lookup_by_compatible("fsl,imx6ull-ocotp");
+
+ if (IS_ERR(ocotp))
+ return -ENOENT;
- val = readl_relaxed(base + OCOTP_CFG3);
- iounmap(base);
+ regmap_read(ocotp, OCOTP_CFG3, &val);
}
/*
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index ca94e60e705a..dbbf299f4219 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -25,6 +25,7 @@
#include <linux/acpi.h>
#include <linux/vmalloc.h>
#include <linux/pm_qos.h>
+#include <linux/bitfield.h>
#include <trace/events/power.h>
#include <asm/cpu.h>
@@ -201,8 +202,6 @@ struct global_params {
* @prev_aperf: Last APERF value read from APERF MSR
* @prev_mperf: Last MPERF value read from MPERF MSR
* @prev_tsc: Last timestamp counter (TSC) value
- * @prev_cummulative_iowait: IO Wait time difference from last and
- * current sample
* @sample: Storage for storing last Sample data
* @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
* @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
@@ -241,7 +240,6 @@ struct cpudata {
u64 prev_aperf;
u64 prev_mperf;
u64 prev_tsc;
- u64 prev_cummulative_iowait;
struct sample sample;
int32_t min_perf_ratio;
int32_t max_perf_ratio;
@@ -2987,6 +2985,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
if (min_pstate < cpu->min_perf_ratio)
min_pstate = cpu->min_perf_ratio;
+ if (min_pstate > cpu->max_perf_ratio)
+ min_pstate = cpu->max_perf_ratio;
+
max_pstate = min(cap_pstate, cpu->max_perf_ratio);
if (max_pstate < min_pstate)
max_pstate = min_pstate;
@@ -3404,14 +3405,31 @@ static bool intel_pstate_hwp_is_enabled(void)
return !!(value & 0x1);
}
-static const struct x86_cpu_id intel_epp_balance_perf[] = {
+#define POWERSAVE_MASK GENMASK(7, 0)
+#define BALANCE_POWER_MASK GENMASK(15, 8)
+#define BALANCE_PERFORMANCE_MASK GENMASK(23, 16)
+#define PERFORMANCE_MASK GENMASK(31, 24)
+
+#define HWP_SET_EPP_VALUES(powersave, balance_power, balance_perf, performance) \
+ (FIELD_PREP_CONST(POWERSAVE_MASK, powersave) |\
+ FIELD_PREP_CONST(BALANCE_POWER_MASK, balance_power) |\
+ FIELD_PREP_CONST(BALANCE_PERFORMANCE_MASK, balance_perf) |\
+ FIELD_PREP_CONST(PERFORMANCE_MASK, performance))
+
+#define HWP_SET_DEF_BALANCE_PERF_EPP(balance_perf) \
+ (HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, HWP_EPP_BALANCE_POWERSAVE,\
+ balance_perf, HWP_EPP_PERFORMANCE))
+
+static const struct x86_cpu_id intel_epp_default[] = {
/*
* Set EPP value as 102, this is the max suggested EPP
* which can result in one core turbo frequency for
* AlderLake Mobile CPUs.
*/
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 32),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
+ HWP_EPP_BALANCE_POWERSAVE, 115, 16)),
{}
};
@@ -3509,11 +3527,24 @@ hwp_cpu_matched:
intel_pstate_sysfs_expose_params();
if (hwp_active) {
- const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
+ const struct x86_cpu_id *id = x86_match_cpu(intel_epp_default);
const struct x86_cpu_id *hybrid_id = x86_match_cpu(intel_hybrid_scaling_factor);
- if (id)
- epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
+ if (id) {
+ epp_values[EPP_INDEX_POWERSAVE] =
+ FIELD_GET(POWERSAVE_MASK, id->driver_data);
+ epp_values[EPP_INDEX_BALANCE_POWERSAVE] =
+ FIELD_GET(BALANCE_POWER_MASK, id->driver_data);
+ epp_values[EPP_INDEX_BALANCE_PERFORMANCE] =
+ FIELD_GET(BALANCE_PERFORMANCE_MASK, id->driver_data);
+ epp_values[EPP_INDEX_PERFORMANCE] =
+ FIELD_GET(PERFORMANCE_MASK, id->driver_data);
+ pr_debug("Updated EPPs powersave:%x balanced power:%x balanced perf:%x performance:%x\n",
+ epp_values[EPP_INDEX_POWERSAVE],
+ epp_values[EPP_INDEX_BALANCE_POWERSAVE],
+ epp_values[EPP_INDEX_BALANCE_PERFORMANCE],
+ epp_values[EPP_INDEX_PERFORMANCE]);
+ }
if (hybrid_id) {
hybrid_scaling_factor = hybrid_id->driver_data;
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index d46afb3c0092..8d097dcddda4 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#define LUT_MAX_ENTRIES 32U
@@ -300,7 +301,23 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
const void *data;
- int ret;
+ int ret, cpu;
+ struct device *cpu_dev;
+ struct regulator *cpu_reg;
+
+ /* Make sure that all CPU supplies are available before proceeding. */
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
+ "Failed to get cpu%d device\n", cpu);
+
+ cpu_reg = devm_regulator_get(cpu_dev, "cpu");
+ if (IS_ERR(cpu_reg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cpu_reg),
+ "CPU%d regulator get failed\n", cpu);
+ }
+
data = of_device_get_match_data(&pdev->dev);
if (!data)
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 4ee23f4ebf4a..0b483bd0d3ca 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -144,6 +144,29 @@ scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power,
return 0;
}
+static int
+scmi_get_rate_limit(u32 domain, bool has_fast_switch)
+{
+ int ret, rate_limit;
+
+ if (has_fast_switch) {
+ /*
+ * Fast channels are used whenever available,
+ * so use their rate_limit value if populated.
+ */
+ ret = perf_ops->fast_switch_rate_limit(ph, domain,
+ &rate_limit);
+ if (!ret && rate_limit)
+ return rate_limit;
+ }
+
+ ret = perf_ops->rate_limit_get(ph, domain, &rate_limit);
+ if (ret)
+ return 0;
+
+ return rate_limit;
+}
+
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, nr_opp, domain;
@@ -250,6 +273,9 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible =
perf_ops->fast_switch_possible(ph, domain);
+ policy->transition_delay_us =
+ scmi_get_rate_limit(domain, policy->fast_switch_possible);
+
return 0;
out_free_opp:
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index d9cda7f6ccb9..cf5873cc45dc 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -16,6 +16,7 @@
#include <linux/cpumask.h>
#include <linux/tick.h>
#include <linux/cpu.h>
+#include <linux/math64.h>
#include "cpuidle.h"
@@ -187,7 +188,7 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
s->target_residency = div_u64(s->target_residency_ns, NSEC_PER_USEC);
if (s->exit_latency > 0)
- s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
+ s->exit_latency_ns = mul_u32_u32(s->exit_latency, NSEC_PER_USEC);
else if (s->exit_latency_ns < 0)
s->exit_latency_ns = 0;
else
diff --git a/drivers/cpuidle/governors/haltpoll.c b/drivers/cpuidle/governors/haltpoll.c
index 1dff3a52917d..663b7f164d20 100644
--- a/drivers/cpuidle/governors/haltpoll.c
+++ b/drivers/cpuidle/governors/haltpoll.c
@@ -98,10 +98,15 @@ static void adjust_poll_limit(struct cpuidle_device *dev, u64 block_ns)
unsigned int shrink = guest_halt_poll_shrink;
val = dev->poll_limit_ns;
- if (shrink == 0)
+ if (shrink == 0) {
val = 0;
- else
+ } else {
val /= shrink;
+ /* Reset value to 0 if shrunk below grow_start */
+ if (val < guest_halt_poll_grow_start)
+ val = 0;
+ }
+
trace_guest_halt_poll_ns_shrink(val, dev->poll_limit_ns);
dev->poll_limit_ns = val;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 1262a7773ef3..de50c00ba218 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -299,22 +299,6 @@ theend:
return err;
}
-static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
-{
- struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
- struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun8i_ce_dev *ce = op->ce;
- struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
- int flow, err;
-
- flow = rctx->flow;
- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
- local_bh_disable();
- crypto_finalize_skcipher_request(engine, breq, err);
- local_bh_enable();
-}
-
static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
void *async_req)
{
@@ -360,6 +344,23 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
}
+static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
+ int flow, err;
+
+ flow = rctx->flow;
+ err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
+ sun8i_ce_cipher_unprepare(engine, areq);
+ local_bh_disable();
+ crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
+}
+
int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
{
int err = sun8i_ce_cipher_prepare(engine, areq);
@@ -368,7 +369,6 @@ int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
return err;
sun8i_ce_cipher_run(engine, areq);
- sun8i_ce_cipher_unprepare(engine, areq);
return 0;
}
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 32268e239bf1..f394e45e11ab 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -38,7 +38,7 @@ config CRYPTO_DEV_CCP_CRYPTO
config CRYPTO_DEV_SP_PSP
bool "Platform Security Processor (PSP) device"
default y
- depends on CRYPTO_DEV_CCP_DD && X86_64
+ depends on CRYPTO_DEV_CCP_DD && X86_64 && AMD_IOMMU
help
Provide support for the AMD Platform Security Processor (PSP).
The PSP is a dedicated processor that provides support for key
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index b04bc1d3d627..f44efbb89c34 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -21,14 +21,18 @@
#include <linux/hw_random.h>
#include <linux/ccp.h>
#include <linux/firmware.h>
+#include <linux/panic_notifier.h>
#include <linux/gfp.h>
#include <linux/cpufeature.h>
#include <linux/fs.h>
#include <linux/fs_struct.h>
#include <linux/psp.h>
+#include <linux/amd-iommu.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
+#include <asm/e820/types.h>
+#include <asm/sev.h>
#include "psp-dev.h"
#include "sev-dev.h"
@@ -37,6 +41,19 @@
#define SEV_FW_FILE "amd/sev.fw"
#define SEV_FW_NAME_SIZE 64
+/* Minimum firmware version required for the SEV-SNP support */
+#define SNP_MIN_API_MAJOR 1
+#define SNP_MIN_API_MINOR 51
+
+/*
+ * Maximum number of firmware-writable buffers that might be specified
+ * in the parameters of a legacy SEV command buffer.
+ */
+#define CMD_BUF_FW_WRITABLE_MAX 2
+
+/* Leave room in the descriptor array for an end-of-list indicator. */
+#define CMD_BUF_DESC_MAX (CMD_BUF_FW_WRITABLE_MAX + 1)
+
static DEFINE_MUTEX(sev_cmd_mutex);
static struct sev_misc_dev *misc_dev;
@@ -68,9 +85,14 @@ static int psp_timeout;
* The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
* to allocate the memory, which will return aligned memory for the specified
* allocation order.
+ *
+ * When SEV-SNP is enabled the TMR needs to be 2MB aligned and 2MB sized.
*/
-#define SEV_ES_TMR_SIZE (1024 * 1024)
+#define SEV_TMR_SIZE (1024 * 1024)
+#define SNP_TMR_SIZE (2 * 1024 * 1024)
+
static void *sev_es_tmr;
+static size_t sev_es_tmr_size = SEV_TMR_SIZE;
/* INIT_EX NV Storage:
* The NV Storage is a 32Kb area and must be 4Kb page aligned. Use the page
@@ -80,6 +102,13 @@ static void *sev_es_tmr;
#define NV_LENGTH (32 * 1024)
static void *sev_init_ex_buffer;
+/*
+ * SEV_DATA_RANGE_LIST:
+ * Array containing range of pages that firmware transitions to HV-fixed
+ * page state.
+ */
+static struct sev_data_range_list *snp_range_list;
+
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@@ -115,6 +144,25 @@ static int sev_wait_cmd_ioc(struct sev_device *sev,
{
int ret;
+ /*
+ * If invoked during panic handling, local interrupts are disabled,
+ * so the PSP command completion interrupt can't be used. Poll for
+ * PSP command completion instead.
+ */
+ if (irqs_disabled()) {
+ unsigned long timeout_usecs = (timeout * USEC_PER_SEC) / 10;
+
+ /* Poll for SEV command completion: */
+ while (timeout_usecs--) {
+ *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
+ if (*reg & PSP_CMDRESP_RESP)
+ return 0;
+
+ udelay(10);
+ }
+ return -ETIMEDOUT;
+ }
+
ret = wait_event_timeout(sev->int_queue,
sev->int_rcvd, timeout * HZ);
if (!ret)
@@ -130,6 +178,8 @@ static int sev_cmd_buffer_len(int cmd)
switch (cmd) {
case SEV_CMD_INIT: return sizeof(struct sev_data_init);
case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex);
+ case SEV_CMD_SNP_SHUTDOWN_EX: return sizeof(struct sev_data_snp_shutdown_ex);
+ case SEV_CMD_SNP_INIT_EX: return sizeof(struct sev_data_snp_init_ex);
case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
@@ -158,23 +208,27 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report);
case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel);
+ case SEV_CMD_SNP_GCTX_CREATE: return sizeof(struct sev_data_snp_addr);
+ case SEV_CMD_SNP_LAUNCH_START: return sizeof(struct sev_data_snp_launch_start);
+ case SEV_CMD_SNP_LAUNCH_UPDATE: return sizeof(struct sev_data_snp_launch_update);
+ case SEV_CMD_SNP_ACTIVATE: return sizeof(struct sev_data_snp_activate);
+ case SEV_CMD_SNP_DECOMMISSION: return sizeof(struct sev_data_snp_addr);
+ case SEV_CMD_SNP_PAGE_RECLAIM: return sizeof(struct sev_data_snp_page_reclaim);
+ case SEV_CMD_SNP_GUEST_STATUS: return sizeof(struct sev_data_snp_guest_status);
+ case SEV_CMD_SNP_LAUNCH_FINISH: return sizeof(struct sev_data_snp_launch_finish);
+ case SEV_CMD_SNP_DBG_DECRYPT: return sizeof(struct sev_data_snp_dbg);
+ case SEV_CMD_SNP_DBG_ENCRYPT: return sizeof(struct sev_data_snp_dbg);
+ case SEV_CMD_SNP_PAGE_UNSMASH: return sizeof(struct sev_data_snp_page_unsmash);
+ case SEV_CMD_SNP_PLATFORM_STATUS: return sizeof(struct sev_data_snp_addr);
+ case SEV_CMD_SNP_GUEST_REQUEST: return sizeof(struct sev_data_snp_guest_request);
+ case SEV_CMD_SNP_CONFIG: return sizeof(struct sev_user_data_snp_config);
+ case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit);
default: return 0;
}
return 0;
}
-static void *sev_fw_alloc(unsigned long len)
-{
- struct page *page;
-
- page = alloc_pages(GFP_KERNEL, get_order(len));
- if (!page)
- return NULL;
-
- return page_address(page);
-}
-
static struct file *open_file_as_root(const char *filename, int flags, umode_t mode)
{
struct file *fp;
@@ -305,13 +359,485 @@ static int sev_write_init_ex_file_if_required(int cmd_id)
return sev_write_init_ex_file();
}
+/*
+ * snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked()
+ * needs snp_reclaim_pages(), so a forward declaration is needed.
+ */
+static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
+
+static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
+{
+ int ret, err, i;
+
+ paddr = __sme_clr(ALIGN_DOWN(paddr, PAGE_SIZE));
+
+ for (i = 0; i < npages; i++, paddr += PAGE_SIZE) {
+ struct sev_data_snp_page_reclaim data = {0};
+
+ data.paddr = paddr;
+
+ if (locked)
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err);
+ else
+ ret = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err);
+
+ if (ret)
+ goto cleanup;
+
+ ret = rmp_make_shared(__phys_to_pfn(paddr), PG_LEVEL_4K);
+ if (ret)
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ /*
+ * If there was a failure reclaiming the page then it is no longer safe
+ * to release it back to the system; leak it instead.
+ */
+ snp_leak_pages(__phys_to_pfn(paddr), npages - i);
+ return ret;
+}
+
+static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked)
+{
+ unsigned long pfn = __sme_clr(paddr) >> PAGE_SHIFT;
+ int rc, i;
+
+ for (i = 0; i < npages; i++, pfn++) {
+ rc = rmp_make_private(pfn, 0, PG_LEVEL_4K, 0, true);
+ if (rc)
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ /*
+ * Try unrolling the firmware state changes by
+ * reclaiming the pages which were already changed to the
+ * firmware state.
+ */
+ snp_reclaim_pages(paddr, i, locked);
+
+ return rc;
+}
+
+static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order)
+{
+ unsigned long npages = 1ul << order, paddr;
+ struct sev_device *sev;
+ struct page *page;
+
+ if (!psp_master || !psp_master->sev_data)
+ return NULL;
+
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
+ return NULL;
+
+ /* If SEV-SNP is initialized then add the page in RMP table. */
+ sev = psp_master->sev_data;
+ if (!sev->snp_initialized)
+ return page;
+
+ paddr = __pa((unsigned long)page_address(page));
+ if (rmp_mark_pages_firmware(paddr, npages, false))
+ return NULL;
+
+ return page;
+}
+
+void *snp_alloc_firmware_page(gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = __snp_alloc_firmware_pages(gfp_mask, 0);
+
+ return page ? page_address(page) : NULL;
+}
+EXPORT_SYMBOL_GPL(snp_alloc_firmware_page);
+
+static void __snp_free_firmware_pages(struct page *page, int order, bool locked)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ unsigned long paddr, npages = 1ul << order;
+
+ if (!page)
+ return;
+
+ paddr = __pa((unsigned long)page_address(page));
+ if (sev->snp_initialized &&
+ snp_reclaim_pages(paddr, npages, locked))
+ return;
+
+ __free_pages(page, order);
+}
+
+void snp_free_firmware_page(void *addr)
+{
+ if (!addr)
+ return;
+
+ __snp_free_firmware_pages(virt_to_page(addr), 0, false);
+}
+EXPORT_SYMBOL_GPL(snp_free_firmware_page);
+
+static void *sev_fw_alloc(unsigned long len)
+{
+ struct page *page;
+
+ page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len));
+ if (!page)
+ return NULL;
+
+ return page_address(page);
+}
+
+/**
+ * struct cmd_buf_desc - descriptors for managing legacy SEV command address
+ * parameters corresponding to buffers that may be written to by firmware.
+ *
+ * @paddr_ptr: pointer to the address parameter in the command buffer which may
+ * need to be saved/restored depending on whether a bounce buffer
+ * is used. In the case of a bounce buffer, the command buffer
+ * needs to be updated with the address of the new bounce buffer
+ * snp_map_cmd_buf_desc() has allocated specifically for it. Must
+ * be NULL if this descriptor is only an end-of-list indicator.
+ *
+ * @paddr_orig: storage for the original address parameter, which can be used to
+ * restore the original value in @paddr_ptr in cases where it is
+ * replaced with the address of a bounce buffer.
+ *
+ * @len: length of buffer located at the address originally stored at @paddr_ptr
+ *
+ * @guest_owned: true if the address corresponds to guest-owned pages, in which
+ * case bounce buffers are not needed.
+ */
+struct cmd_buf_desc {
+ u64 *paddr_ptr;
+ u64 paddr_orig;
+ u32 len;
+ bool guest_owned;
+};
+
+/*
+ * If a legacy SEV command parameter is a memory address, those pages in
+ * turn need to be transitioned to/from firmware-owned before/after
+ * executing the firmware command.
+ *
+ * Additionally, in cases where those pages are not guest-owned, a bounce
+ * buffer is needed in place of the original memory address parameter.
+ *
+ * A set of descriptors are used to keep track of this handling, and
+ * initialized here based on the specific commands being executed.
+ */
+static void snp_populate_cmd_buf_desc_list(int cmd, void *cmd_buf,
+ struct cmd_buf_desc *desc_list)
+{
+ switch (cmd) {
+ case SEV_CMD_PDH_CERT_EXPORT: {
+ struct sev_data_pdh_cert_export *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->pdh_cert_address;
+ desc_list[0].len = data->pdh_cert_len;
+ desc_list[1].paddr_ptr = &data->cert_chain_address;
+ desc_list[1].len = data->cert_chain_len;
+ break;
+ }
+ case SEV_CMD_GET_ID: {
+ struct sev_data_get_id *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ break;
+ }
+ case SEV_CMD_PEK_CSR: {
+ struct sev_data_pek_csr *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ break;
+ }
+ case SEV_CMD_LAUNCH_UPDATE_DATA: {
+ struct sev_data_launch_update_data *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_LAUNCH_UPDATE_VMSA: {
+ struct sev_data_launch_update_vmsa *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_LAUNCH_MEASURE: {
+ struct sev_data_launch_measure *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ break;
+ }
+ case SEV_CMD_LAUNCH_UPDATE_SECRET: {
+ struct sev_data_launch_secret *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->guest_address;
+ desc_list[0].len = data->guest_len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_DBG_DECRYPT: {
+ struct sev_data_dbg *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->dst_addr;
+ desc_list[0].len = data->len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_DBG_ENCRYPT: {
+ struct sev_data_dbg *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->dst_addr;
+ desc_list[0].len = data->len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_ATTESTATION_REPORT: {
+ struct sev_data_attestation_report *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ break;
+ }
+ case SEV_CMD_SEND_START: {
+ struct sev_data_send_start *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->session_address;
+ desc_list[0].len = data->session_len;
+ break;
+ }
+ case SEV_CMD_SEND_UPDATE_DATA: {
+ struct sev_data_send_update_data *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->hdr_address;
+ desc_list[0].len = data->hdr_len;
+ desc_list[1].paddr_ptr = &data->trans_address;
+ desc_list[1].len = data->trans_len;
+ break;
+ }
+ case SEV_CMD_SEND_UPDATE_VMSA: {
+ struct sev_data_send_update_vmsa *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->hdr_address;
+ desc_list[0].len = data->hdr_len;
+ desc_list[1].paddr_ptr = &data->trans_address;
+ desc_list[1].len = data->trans_len;
+ break;
+ }
+ case SEV_CMD_RECEIVE_UPDATE_DATA: {
+ struct sev_data_receive_update_data *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->guest_address;
+ desc_list[0].len = data->guest_len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_RECEIVE_UPDATE_VMSA: {
+ struct sev_data_receive_update_vmsa *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->guest_address;
+ desc_list[0].len = data->guest_len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static int snp_map_cmd_buf_desc(struct cmd_buf_desc *desc)
+{
+ unsigned int npages;
+
+ if (!desc->len)
+ return 0;
+
+ /* Allocate a bounce buffer if this isn't a guest owned page. */
+ if (!desc->guest_owned) {
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(desc->len));
+ if (!page) {
+ pr_warn("Failed to allocate bounce buffer for SEV legacy command.\n");
+ return -ENOMEM;
+ }
+
+ desc->paddr_orig = *desc->paddr_ptr;
+ *desc->paddr_ptr = __psp_pa(page_to_virt(page));
+ }
+
+ npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT;
+
+ /* Transition the buffer to firmware-owned. */
+ if (rmp_mark_pages_firmware(*desc->paddr_ptr, npages, true)) {
+ pr_warn("Error moving pages to firmware-owned state for SEV legacy command.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int snp_unmap_cmd_buf_desc(struct cmd_buf_desc *desc)
+{
+ unsigned int npages;
+
+ if (!desc->len)
+ return 0;
+
+ npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT;
+
+ /* Transition the buffers back to hypervisor-owned. */
+ if (snp_reclaim_pages(*desc->paddr_ptr, npages, true)) {
+ pr_warn("Failed to reclaim firmware-owned pages while issuing SEV legacy command.\n");
+ return -EFAULT;
+ }
+
+ /* Copy data from bounce buffer and then free it. */
+ if (!desc->guest_owned) {
+ void *bounce_buf = __va(__sme_clr(*desc->paddr_ptr));
+ void *dst_buf = __va(__sme_clr(desc->paddr_orig));
+
+ memcpy(dst_buf, bounce_buf, desc->len);
+ __free_pages(virt_to_page(bounce_buf), get_order(desc->len));
+
+ /* Restore the original address in the command buffer. */
+ *desc->paddr_ptr = desc->paddr_orig;
+ }
+
+ return 0;
+}
+
+static int snp_map_cmd_buf_desc_list(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list)
+{
+ int i;
+
+ snp_populate_cmd_buf_desc_list(cmd, cmd_buf, desc_list);
+
+ for (i = 0; i < CMD_BUF_DESC_MAX; i++) {
+ struct cmd_buf_desc *desc = &desc_list[i];
+
+ if (!desc->paddr_ptr)
+ break;
+
+ if (snp_map_cmd_buf_desc(desc))
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ for (i--; i >= 0; i--)
+ snp_unmap_cmd_buf_desc(&desc_list[i]);
+
+ return -EFAULT;
+}
+
+static int snp_unmap_cmd_buf_desc_list(struct cmd_buf_desc *desc_list)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < CMD_BUF_DESC_MAX; i++) {
+ struct cmd_buf_desc *desc = &desc_list[i];
+
+ if (!desc->paddr_ptr)
+ break;
+
+ if (snp_unmap_cmd_buf_desc(&desc_list[i]))
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static bool sev_cmd_buf_writable(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_PLATFORM_STATUS:
+ case SEV_CMD_GUEST_STATUS:
+ case SEV_CMD_LAUNCH_START:
+ case SEV_CMD_RECEIVE_START:
+ case SEV_CMD_LAUNCH_MEASURE:
+ case SEV_CMD_SEND_START:
+ case SEV_CMD_SEND_UPDATE_DATA:
+ case SEV_CMD_SEND_UPDATE_VMSA:
+ case SEV_CMD_PEK_CSR:
+ case SEV_CMD_PDH_CERT_EXPORT:
+ case SEV_CMD_GET_ID:
+ case SEV_CMD_ATTESTATION_REPORT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* After SNP is INIT'ed, the behavior of legacy SEV commands is changed. */
+static bool snp_legacy_handling_needed(int cmd)
+{
+ struct sev_device *sev = psp_master->sev_data;
+
+ return cmd < SEV_CMD_SNP_INIT && sev->snp_initialized;
+}
+
+static int snp_prep_cmd_buf(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list)
+{
+ if (!snp_legacy_handling_needed(cmd))
+ return 0;
+
+ if (snp_map_cmd_buf_desc_list(cmd, cmd_buf, desc_list))
+ return -EFAULT;
+
+ /*
+ * Before command execution, the command buffer needs to be put into
+ * the firmware-owned state.
+ */
+ if (sev_cmd_buf_writable(cmd)) {
+ if (rmp_mark_pages_firmware(__pa(cmd_buf), 1, true))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf)
+{
+ if (!snp_legacy_handling_needed(cmd))
+ return 0;
+
+ /*
+ * After command completion, the command buffer needs to be put back
+ * into the hypervisor-owned state.
+ */
+ if (sev_cmd_buf_writable(cmd))
+ if (snp_reclaim_pages(__pa(cmd_buf), 1, true))
+ return -EFAULT;
+
+ return 0;
+}
+
static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
{
+ struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0};
struct psp_device *psp = psp_master;
struct sev_device *sev;
unsigned int cmdbuff_hi, cmdbuff_lo;
unsigned int phys_lsb, phys_msb;
unsigned int reg, ret = 0;
+ void *cmd_buf;
int buf_len;
if (!psp || !psp->sev_data)
@@ -331,12 +857,47 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
* work for some memory, e.g. vmalloc'd addresses, and @data may not be
* physically contiguous.
*/
- if (data)
- memcpy(sev->cmd_buf, data, buf_len);
+ if (data) {
+ /*
+ * Commands are generally issued one at a time and require the
+ * sev_cmd_mutex, but there could be recursive firmware requests
+ * due to SEV_CMD_SNP_PAGE_RECLAIM needing to be issued while
+ * preparing buffers for another command. This is the only known
+ * case of nesting in the current code, so exactly one
+ * additional command buffer is available for that purpose.
+ */
+ if (!sev->cmd_buf_active) {
+ cmd_buf = sev->cmd_buf;
+ sev->cmd_buf_active = true;
+ } else if (!sev->cmd_buf_backup_active) {
+ cmd_buf = sev->cmd_buf_backup;
+ sev->cmd_buf_backup_active = true;
+ } else {
+ dev_err(sev->dev,
+ "SEV: too many firmware commands in progress, no command buffers available.\n");
+ return -EBUSY;
+ }
+
+ memcpy(cmd_buf, data, buf_len);
+
+ /*
+ * The behavior of the SEV-legacy commands is altered when the
+ * SNP firmware is in the INIT state.
+ */
+ ret = snp_prep_cmd_buf(cmd, cmd_buf, desc_list);
+ if (ret) {
+ dev_err(sev->dev,
+ "SEV: failed to prepare buffer for legacy command 0x%x. Error: %d\n",
+ cmd, ret);
+ return ret;
+ }
+ } else {
+ cmd_buf = sev->cmd_buf;
+ }
/* Get the physical address of the command buffer */
- phys_lsb = data ? lower_32_bits(__psp_pa(sev->cmd_buf)) : 0;
- phys_msb = data ? upper_32_bits(__psp_pa(sev->cmd_buf)) : 0;
+ phys_lsb = data ? lower_32_bits(__psp_pa(cmd_buf)) : 0;
+ phys_msb = data ? upper_32_bits(__psp_pa(cmd_buf)) : 0;
dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
cmd, phys_msb, phys_lsb, psp_timeout);
@@ -390,20 +951,41 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
ret = sev_write_init_ex_file_if_required(cmd);
}
- print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- buf_len, false);
-
/*
* Copy potential output from the PSP back to data. Do this even on
* failure in case the caller wants to glean something from the error.
*/
- if (data)
- memcpy(data, sev->cmd_buf, buf_len);
+ if (data) {
+ int ret_reclaim;
+ /*
+ * Restore the page state after the command completes.
+ */
+ ret_reclaim = snp_reclaim_cmd_buf(cmd, cmd_buf);
+ if (ret_reclaim) {
+ dev_err(sev->dev,
+ "SEV: failed to reclaim buffer for legacy command %#x. Error: %d\n",
+ cmd, ret_reclaim);
+ return ret_reclaim;
+ }
+
+ memcpy(data, cmd_buf, buf_len);
+
+ if (sev->cmd_buf_backup_active)
+ sev->cmd_buf_backup_active = false;
+ else
+ sev->cmd_buf_active = false;
+
+ if (snp_unmap_cmd_buf_desc_list(desc_list))
+ return -EFAULT;
+ }
+
+ print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ buf_len, false);
return ret;
}
-static int sev_do_cmd(int cmd, void *data, int *psp_ret)
+int sev_do_cmd(int cmd, void *data, int *psp_ret)
{
int rc;
@@ -413,6 +995,7 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret)
return rc;
}
+EXPORT_SYMBOL_GPL(sev_do_cmd);
static int __sev_init_locked(int *error)
{
@@ -427,7 +1010,7 @@ static int __sev_init_locked(int *error)
data.tmr_address = __pa(sev_es_tmr);
data.flags |= SEV_INIT_FLAGS_SEV_ES;
- data.tmr_len = SEV_ES_TMR_SIZE;
+ data.tmr_len = sev_es_tmr_size;
}
return __sev_do_cmd_locked(SEV_CMD_INIT, &data, error);
@@ -450,7 +1033,7 @@ static int __sev_init_ex_locked(int *error)
data.tmr_address = __pa(sev_es_tmr);
data.flags |= SEV_INIT_FLAGS_SEV_ES;
- data.tmr_len = SEV_ES_TMR_SIZE;
+ data.tmr_len = sev_es_tmr_size;
}
return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error);
@@ -464,26 +1047,218 @@ static inline int __sev_do_init_locked(int *psp_ret)
return __sev_init_locked(psp_ret);
}
-static int __sev_platform_init_locked(int *error)
+static void snp_set_hsave_pa(void *arg)
+{
+ wrmsrl(MSR_VM_HSAVE_PA, 0);
+}
+
+static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
+{
+ struct sev_data_range_list *range_list = arg;
+ struct sev_data_range *range = &range_list->ranges[range_list->num_elements];
+ size_t size;
+
+ /*
+ * Ensure the list of HV_FIXED pages that will be passed to firmware
+ * do not exceed the page-sized argument buffer.
+ */
+ if ((range_list->num_elements * sizeof(struct sev_data_range) +
+ sizeof(struct sev_data_range_list)) > PAGE_SIZE)
+ return -E2BIG;
+
+ switch (rs->desc) {
+ case E820_TYPE_RESERVED:
+ case E820_TYPE_PMEM:
+ case E820_TYPE_ACPI:
+ range->base = rs->start & PAGE_MASK;
+ size = PAGE_ALIGN((rs->end + 1) - rs->start);
+ range->page_count = size >> PAGE_SHIFT;
+ range_list->num_elements++;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int __sev_snp_init_locked(int *error)
{
- int rc = 0, psp_ret = SEV_RET_NO_FW_CALL;
struct psp_device *psp = psp_master;
+ struct sev_data_snp_init_ex data;
struct sev_device *sev;
+ void *arg = &data;
+ int cmd, rc = 0;
- if (!psp || !psp->sev_data)
+ if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
return -ENODEV;
sev = psp->sev_data;
- if (sev->state == SEV_STATE_INIT)
+ if (sev->snp_initialized)
return 0;
- if (sev_init_ex_buffer) {
- rc = sev_read_init_ex_file();
- if (rc)
+ if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) {
+ dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n",
+ SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR);
+ return 0;
+ }
+
+ /* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */
+ on_each_cpu(snp_set_hsave_pa, NULL, 1);
+
+ /*
+ * Starting in SNP firmware v1.52, the SNP_INIT_EX command takes a list
+ * of system physical address ranges to convert into HV-fixed page
+ * states during the RMP initialization. For instance, the memory that
+ * UEFI reserves should be included in the that list. This allows system
+ * components that occasionally write to memory (e.g. logging to UEFI
+ * reserved regions) to not fail due to RMP initialization and SNP
+ * enablement.
+ *
+ */
+ if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) {
+ /*
+ * Firmware checks that the pages containing the ranges enumerated
+ * in the RANGES structure are either in the default page state or in the
+ * firmware page state.
+ */
+ snp_range_list = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!snp_range_list) {
+ dev_err(sev->dev,
+ "SEV: SNP_INIT_EX range list memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Retrieve all reserved memory regions from the e820 memory map
+ * to be setup as HV-fixed pages.
+ */
+ rc = walk_iomem_res_desc(IORES_DESC_NONE, IORESOURCE_MEM, 0, ~0,
+ snp_range_list, snp_filter_reserved_mem_regions);
+ if (rc) {
+ dev_err(sev->dev,
+ "SEV: SNP_INIT_EX walk_iomem_res_desc failed rc = %d\n", rc);
return rc;
+ }
+
+ memset(&data, 0, sizeof(data));
+ data.init_rmp = 1;
+ data.list_paddr_en = 1;
+ data.list_paddr = __psp_pa(snp_range_list);
+ cmd = SEV_CMD_SNP_INIT_EX;
+ } else {
+ cmd = SEV_CMD_SNP_INIT;
+ arg = NULL;
+ }
+
+ /*
+ * The following sequence must be issued before launching the first SNP
+ * guest to ensure all dirty cache lines are flushed, including from
+ * updates to the RMP table itself via the RMPUPDATE instruction:
+ *
+ * - WBINVD on all running CPUs
+ * - SEV_CMD_SNP_INIT[_EX] firmware command
+ * - WBINVD on all running CPUs
+ * - SEV_CMD_SNP_DF_FLUSH firmware command
+ */
+ wbinvd_on_all_cpus();
+
+ rc = __sev_do_cmd_locked(cmd, arg, error);
+ if (rc)
+ return rc;
+
+ /* Prepare for first SNP guest launch after INIT. */
+ wbinvd_on_all_cpus();
+ rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error);
+ if (rc)
+ return rc;
+
+ sev->snp_initialized = true;
+ dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
+
+ sev_es_tmr_size = SNP_TMR_SIZE;
+
+ return rc;
+}
+
+static void __sev_platform_init_handle_tmr(struct sev_device *sev)
+{
+ if (sev_es_tmr)
+ return;
+
+ /* Obtain the TMR memory area for SEV-ES use */
+ sev_es_tmr = sev_fw_alloc(sev_es_tmr_size);
+ if (sev_es_tmr) {
+ /* Must flush the cache before giving it to the firmware */
+ if (!sev->snp_initialized)
+ clflush_cache_range(sev_es_tmr, sev_es_tmr_size);
+ } else {
+ dev_warn(sev->dev, "SEV: TMR allocation failed, SEV-ES support unavailable\n");
+ }
+}
+
+/*
+ * If an init_ex_path is provided allocate a buffer for the file and
+ * read in the contents. Additionally, if SNP is initialized, convert
+ * the buffer pages to firmware pages.
+ */
+static int __sev_platform_init_handle_init_ex_path(struct sev_device *sev)
+{
+ struct page *page;
+ int rc;
+
+ if (!init_ex_path)
+ return 0;
+
+ if (sev_init_ex_buffer)
+ return 0;
+
+ page = alloc_pages(GFP_KERNEL, get_order(NV_LENGTH));
+ if (!page) {
+ dev_err(sev->dev, "SEV: INIT_EX NV memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ sev_init_ex_buffer = page_address(page);
+
+ rc = sev_read_init_ex_file();
+ if (rc)
+ return rc;
+
+ /* If SEV-SNP is initialized, transition to firmware page. */
+ if (sev->snp_initialized) {
+ unsigned long npages;
+
+ npages = 1UL << get_order(NV_LENGTH);
+ if (rmp_mark_pages_firmware(__pa(sev_init_ex_buffer), npages, false)) {
+ dev_err(sev->dev, "SEV: INIT_EX NV memory page state change failed.\n");
+ return -ENOMEM;
+ }
}
+ return 0;
+}
+
+static int __sev_platform_init_locked(int *error)
+{
+ int rc, psp_ret = SEV_RET_NO_FW_CALL;
+ struct sev_device *sev;
+
+ if (!psp_master || !psp_master->sev_data)
+ return -ENODEV;
+
+ sev = psp_master->sev_data;
+
+ if (sev->state == SEV_STATE_INIT)
+ return 0;
+
+ __sev_platform_init_handle_tmr(sev);
+
+ rc = __sev_platform_init_handle_init_ex_path(sev);
+ if (rc)
+ return rc;
+
rc = __sev_do_init_locked(&psp_ret);
if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) {
/*
@@ -520,12 +1295,46 @@ static int __sev_platform_init_locked(int *error)
return 0;
}
-int sev_platform_init(int *error)
+static int _sev_platform_init_locked(struct sev_platform_init_args *args)
+{
+ struct sev_device *sev;
+ int rc;
+
+ if (!psp_master || !psp_master->sev_data)
+ return -ENODEV;
+
+ sev = psp_master->sev_data;
+
+ if (sev->state == SEV_STATE_INIT)
+ return 0;
+
+ /*
+ * Legacy guests cannot be running while SNP_INIT(_EX) is executing,
+ * so perform SEV-SNP initialization at probe time.
+ */
+ rc = __sev_snp_init_locked(&args->error);
+ if (rc && rc != -ENODEV) {
+ /*
+ * Don't abort the probe if SNP INIT failed,
+ * continue to initialize the legacy SEV firmware.
+ */
+ dev_err(sev->dev, "SEV-SNP: failed to INIT rc %d, error %#x\n",
+ rc, args->error);
+ }
+
+ /* Defer legacy SEV/SEV-ES support if allowed by caller/module. */
+ if (args->probe && !psp_init_on_probe)
+ return 0;
+
+ return __sev_platform_init_locked(&args->error);
+}
+
+int sev_platform_init(struct sev_platform_init_args *args)
{
int rc;
mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_init_locked(error);
+ rc = _sev_platform_init_locked(args);
mutex_unlock(&sev_cmd_mutex);
return rc;
@@ -556,17 +1365,6 @@ static int __sev_platform_shutdown_locked(int *error)
return ret;
}
-static int sev_platform_shutdown(int *error)
-{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_shutdown_locked(NULL);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-
static int sev_get_platform_state(int *state, int *error)
{
struct sev_user_data_status data;
@@ -842,6 +1640,72 @@ fw_err:
return ret;
}
+static int __sev_snp_shutdown_locked(int *error, bool panic)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_data_snp_shutdown_ex data;
+ int ret;
+
+ if (!sev->snp_initialized)
+ return 0;
+
+ memset(&data, 0, sizeof(data));
+ data.len = sizeof(data);
+ data.iommu_snp_shutdown = 1;
+
+ /*
+ * If invoked during panic handling, local interrupts are disabled
+ * and all CPUs are stopped, so wbinvd_on_all_cpus() can't be called.
+ * In that case, a wbinvd() is done on remote CPUs via the NMI
+ * callback, so only a local wbinvd() is needed here.
+ */
+ if (!panic)
+ wbinvd_on_all_cpus();
+ else
+ wbinvd();
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error);
+ /* SHUTDOWN may require DF_FLUSH */
+ if (*error == SEV_RET_DFFLUSH_REQUIRED) {
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, NULL);
+ if (ret) {
+ dev_err(sev->dev, "SEV-SNP DF_FLUSH failed\n");
+ return ret;
+ }
+ /* reissue the shutdown command */
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data,
+ error);
+ }
+ if (ret) {
+ dev_err(sev->dev, "SEV-SNP firmware shutdown failed\n");
+ return ret;
+ }
+
+ /*
+ * SNP_SHUTDOWN_EX with IOMMU_SNP_SHUTDOWN set to 1 disables SNP
+ * enforcement by the IOMMU and also transitions all pages
+ * associated with the IOMMU to the Reclaim state.
+ * Firmware was transitioning the IOMMU pages to Hypervisor state
+ * before version 1.53. But, accounting for the number of assigned
+ * 4kB pages in a 2M page was done incorrectly by not transitioning
+ * to the Reclaim state. This resulted in RMP #PF when later accessing
+ * the 2M page containing those pages during kexec boot. Hence, the
+ * firmware now transitions these pages to Reclaim state and hypervisor
+ * needs to transition these pages to shared state. SNP Firmware
+ * version 1.53 and above are needed for kexec boot.
+ */
+ ret = amd_iommu_snp_disable();
+ if (ret) {
+ dev_err(sev->dev, "SNP IOMMU shutdown failed\n");
+ return ret;
+ }
+
+ sev->snp_initialized = false;
+ dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
+
+ return ret;
+}
+
static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
@@ -1084,6 +1948,85 @@ e_free_pdh:
return ret;
}
+static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_data_snp_addr buf;
+ struct page *status_page;
+ void *data;
+ int ret;
+
+ if (!sev->snp_initialized || !argp->data)
+ return -EINVAL;
+
+ status_page = alloc_page(GFP_KERNEL_ACCOUNT);
+ if (!status_page)
+ return -ENOMEM;
+
+ data = page_address(status_page);
+
+ /*
+ * Firmware expects status page to be in firmware-owned state, otherwise
+ * it will report firmware error code INVALID_PAGE_STATE (0x1A).
+ */
+ if (rmp_mark_pages_firmware(__pa(data), 1, true)) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ buf.address = __psp_pa(data);
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &argp->error);
+
+ /*
+ * Status page will be transitioned to Reclaim state upon success, or
+ * left in Firmware state in failure. Use snp_reclaim_pages() to
+ * transition either case back to Hypervisor-owned state.
+ */
+ if (snp_reclaim_pages(__pa(data), 1, true))
+ return -EFAULT;
+
+ if (ret)
+ goto cleanup;
+
+ if (copy_to_user((void __user *)argp->data, data,
+ sizeof(struct sev_user_data_snp_status)))
+ ret = -EFAULT;
+
+cleanup:
+ __free_pages(status_page, 0);
+ return ret;
+}
+
+static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_data_snp_commit buf;
+
+ if (!sev->snp_initialized)
+ return -EINVAL;
+
+ buf.len = sizeof(buf);
+
+ return __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
+}
+
+static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_snp_config config;
+
+ if (!sev->snp_initialized || !argp->data)
+ return -EINVAL;
+
+ if (!writable)
+ return -EPERM;
+
+ if (copy_from_user(&config, (void __user *)argp->data, sizeof(config)))
+ return -EFAULT;
+
+ return __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
+}
+
static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
{
void __user *argp = (void __user *)arg;
@@ -1135,6 +2078,15 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
case SEV_GET_ID2:
ret = sev_ioctl_do_get_id2(&input);
break;
+ case SNP_PLATFORM_STATUS:
+ ret = sev_ioctl_do_snp_platform_status(&input);
+ break;
+ case SNP_COMMIT:
+ ret = sev_ioctl_do_snp_commit(&input);
+ break;
+ case SNP_SET_CONFIG:
+ ret = sev_ioctl_do_snp_set_config(&input, writable);
+ break;
default:
ret = -EINVAL;
goto out;
@@ -1245,10 +2197,12 @@ int sev_dev_init(struct psp_device *psp)
if (!sev)
goto e_err;
- sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+ sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 1);
if (!sev->cmd_buf)
goto e_sev;
+ sev->cmd_buf_backup = (uint8_t *)sev->cmd_buf + PAGE_SIZE;
+
psp->sev_data = sev;
sev->dev = dev;
@@ -1287,24 +2241,51 @@ e_err:
return ret;
}
-static void sev_firmware_shutdown(struct sev_device *sev)
+static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
{
- sev_platform_shutdown(NULL);
+ int error;
+
+ __sev_platform_shutdown_locked(NULL);
if (sev_es_tmr) {
- /* The TMR area was encrypted, flush it from the cache */
- wbinvd_on_all_cpus();
+ /*
+ * The TMR area was encrypted, flush it from the cache.
+ *
+ * If invoked during panic handling, local interrupts are
+ * disabled and all CPUs are stopped, so wbinvd_on_all_cpus()
+ * can't be used. In that case, wbinvd() is done on remote CPUs
+ * via the NMI callback, and done for this CPU later during
+ * SNP shutdown, so wbinvd_on_all_cpus() can be skipped.
+ */
+ if (!panic)
+ wbinvd_on_all_cpus();
- free_pages((unsigned long)sev_es_tmr,
- get_order(SEV_ES_TMR_SIZE));
+ __snp_free_firmware_pages(virt_to_page(sev_es_tmr),
+ get_order(sev_es_tmr_size),
+ true);
sev_es_tmr = NULL;
}
if (sev_init_ex_buffer) {
- free_pages((unsigned long)sev_init_ex_buffer,
- get_order(NV_LENGTH));
+ __snp_free_firmware_pages(virt_to_page(sev_init_ex_buffer),
+ get_order(NV_LENGTH),
+ true);
sev_init_ex_buffer = NULL;
}
+
+ if (snp_range_list) {
+ kfree(snp_range_list);
+ snp_range_list = NULL;
+ }
+
+ __sev_snp_shutdown_locked(&error, panic);
+}
+
+static void sev_firmware_shutdown(struct sev_device *sev)
+{
+ mutex_lock(&sev_cmd_mutex);
+ __sev_firmware_shutdown(sev, false);
+ mutex_unlock(&sev_cmd_mutex);
}
void sev_dev_destroy(struct psp_device *psp)
@@ -1322,6 +2303,29 @@ void sev_dev_destroy(struct psp_device *psp)
psp_clear_sev_irq_handler(psp);
}
+static int snp_shutdown_on_panic(struct notifier_block *nb,
+ unsigned long reason, void *arg)
+{
+ struct sev_device *sev = psp_master->sev_data;
+
+ /*
+ * If sev_cmd_mutex is already acquired, then it's likely
+ * another PSP command is in flight and issuing a shutdown
+ * would fail in unexpected ways. Rather than create even
+ * more confusion during a panic, just bail out here.
+ */
+ if (mutex_is_locked(&sev_cmd_mutex))
+ return NOTIFY_DONE;
+
+ __sev_firmware_shutdown(sev, true);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block snp_panic_notifier = {
+ .notifier_call = snp_shutdown_on_panic,
+};
+
int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
void *data, int *error)
{
@@ -1335,7 +2339,8 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
- int error, rc;
+ struct sev_platform_init_args args = {0};
+ int rc;
if (!sev)
return;
@@ -1348,36 +2353,18 @@ void sev_pci_init(void)
if (sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
- /* If an init_ex_path is provided rely on INIT_EX for PSP initialization
- * instead of INIT.
- */
- if (init_ex_path) {
- sev_init_ex_buffer = sev_fw_alloc(NV_LENGTH);
- if (!sev_init_ex_buffer) {
- dev_err(sev->dev,
- "SEV: INIT_EX NV memory allocation failed\n");
- goto err;
- }
- }
-
- /* Obtain the TMR memory area for SEV-ES use */
- sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE);
- if (sev_es_tmr)
- /* Must flush the cache before giving it to the firmware */
- clflush_cache_range(sev_es_tmr, SEV_ES_TMR_SIZE);
- else
- dev_warn(sev->dev,
- "SEV: TMR allocation failed, SEV-ES support unavailable\n");
-
- if (!psp_init_on_probe)
- return;
-
/* Initialize the platform */
- rc = sev_platform_init(&error);
+ args.probe = true;
+ rc = sev_platform_init(&args);
if (rc)
dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n",
- error, rc);
+ args.error, rc);
+ dev_info(sev->dev, "SEV%s API:%d.%d build:%d\n", sev->snp_initialized ?
+ "-SNP" : "", sev->api_major, sev->api_minor, sev->build);
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &snp_panic_notifier);
return;
err:
@@ -1392,4 +2379,7 @@ void sev_pci_exit(void)
return;
sev_firmware_shutdown(sev);
+
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &snp_panic_notifier);
}
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index 778c95155e74..3e4e5574e88a 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -52,6 +52,11 @@ struct sev_device {
u8 build;
void *cmd_buf;
+ void *cmd_buf_backup;
+ bool cmd_buf_active;
+ bool cmd_buf_backup_active;
+
+ bool snp_initialized;
};
int sev_dev_init(struct psp_device *psp);
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 1b13b4aa16ec..a235e6c300f1 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -332,12 +332,12 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
theend:
pm_runtime_put_autosuspend(rkc->dev);
+ rk_hash_unprepare(engine, breq);
+
local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
local_bh_enable();
- rk_hash_unprepare(engine, breq);
-
return 0;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index 2621ff8a9376..de53eddf6796 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -104,7 +104,8 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
}
static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
- struct virtio_crypto_ctrl_header *header, void *para,
+ struct virtio_crypto_ctrl_header *header,
+ struct virtio_crypto_akcipher_session_para *para,
const uint8_t *key, unsigned int keylen)
{
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
@@ -128,7 +129,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
ctrl = &vc_ctrl_req->ctrl;
memcpy(&ctrl->header, header, sizeof(ctrl->header));
- memcpy(&ctrl->u, para, sizeof(ctrl->u));
+ memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para));
input = &vc_ctrl_req->input;
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index dcf2b39e1048..1a3e6aafbdcc 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -316,31 +316,27 @@ static const struct cxl_root_ops acpi_root_ops = {
.qos_class = cxl_acpi_qos_class,
};
-static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
- const unsigned long end)
+static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
+ struct cxl_cfmws_context *ctx)
{
int target_map[CXL_DECODER_MAX_INTERLEAVE];
- struct cxl_cfmws_context *ctx = arg;
struct cxl_port *root_port = ctx->root_port;
struct resource *cxl_res = ctx->cxl_res;
struct cxl_cxims_context cxims_ctx;
struct cxl_root_decoder *cxlrd;
struct device *dev = ctx->dev;
- struct acpi_cedt_cfmws *cfmws;
cxl_calc_hb_fn cxl_calc_hb;
struct cxl_decoder *cxld;
unsigned int ways, i, ig;
struct resource *res;
int rc;
- cfmws = (struct acpi_cedt_cfmws *) header;
-
rc = cxl_acpi_cfmws_verify(dev, cfmws);
if (rc) {
dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
- return 0;
+ return rc;
}
rc = eiw_to_ways(cfmws->interleave_ways, &ways);
@@ -376,7 +372,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
if (IS_ERR(cxlrd))
- return 0;
+ return PTR_ERR(cxlrd);
cxld = &cxlrd->cxlsd.cxld;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
@@ -420,16 +416,7 @@ err_xormap:
put_device(&cxld->dev);
else
rc = cxl_decoder_autoremove(dev, cxld);
- if (rc) {
- dev_err(dev, "Failed to add decode range: %pr", res);
- return rc;
- }
- dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
- dev_name(&cxld->dev),
- phys_to_target_node(cxld->hpa_range.start),
- cxld->hpa_range.start, cxld->hpa_range.end);
-
- return 0;
+ return rc;
err_insert:
kfree(res->name);
@@ -438,6 +425,29 @@ err_name:
return -ENOMEM;
}
+static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
+{
+ struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
+ struct cxl_cfmws_context *ctx = arg;
+ struct device *dev = ctx->dev;
+ int rc;
+
+ rc = __cxl_parse_cfmws(cfmws, ctx);
+ if (rc)
+ dev_err(dev,
+ "Failed to add decode range: [%#llx - %#llx] (%d)\n",
+ cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1, rc);
+ else
+ dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
+ phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1);
+
+ /* never fail cxl_acpi load for a single window failure */
+ return 0;
+}
+
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
struct device *dev)
{
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index 6fe11546889f..08fd0baea7a0 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -210,19 +210,12 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
return 0;
}
-static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
- struct list_head *list)
+static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
+ struct cxl_dpa_perf *dpa_perf)
{
- struct cxl_dpa_perf *dpa_perf;
-
- dpa_perf = kzalloc(sizeof(*dpa_perf), GFP_KERNEL);
- if (!dpa_perf)
- return;
-
dpa_perf->dpa_range = dent->dpa_range;
dpa_perf->coord = dent->coord;
dpa_perf->qos_class = dent->qos_class;
- list_add_tail(&dpa_perf->list, list);
dev_dbg(dev,
"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
dent->dpa_range.start, dpa_perf->qos_class,
@@ -230,20 +223,6 @@ static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
dent->coord.read_latency, dent->coord.write_latency);
}
-static void free_perf_ents(void *data)
-{
- struct cxl_memdev_state *mds = data;
- struct cxl_dpa_perf *dpa_perf, *n;
- LIST_HEAD(discard);
-
- list_splice_tail_init(&mds->ram_perf_list, &discard);
- list_splice_tail_init(&mds->pmem_perf_list, &discard);
- list_for_each_entry_safe(dpa_perf, n, &discard, list) {
- list_del(&dpa_perf->list);
- kfree(dpa_perf);
- }
-}
-
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
struct xarray *dsmas_xa)
{
@@ -263,16 +242,14 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
xa_for_each(dsmas_xa, index, dent) {
if (resource_size(&cxlds->ram_res) &&
range_contains(&ram_range, &dent->dpa_range))
- add_perf_entry(dev, dent, &mds->ram_perf_list);
+ update_perf_entry(dev, dent, &mds->ram_perf);
else if (resource_size(&cxlds->pmem_res) &&
range_contains(&pmem_range, &dent->dpa_range))
- add_perf_entry(dev, dent, &mds->pmem_perf_list);
+ update_perf_entry(dev, dent, &mds->pmem_perf);
else
dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
dent->dpa_range.start);
}
-
- devm_add_action_or_reset(&cxlds->cxlmd->dev, free_perf_ents, mds);
}
static int match_cxlrd_qos_class(struct device *dev, void *data)
@@ -293,24 +270,24 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
return 0;
}
-static void cxl_qos_match(struct cxl_port *root_port,
- struct list_head *work_list,
- struct list_head *discard_list)
+static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
{
- struct cxl_dpa_perf *dpa_perf, *n;
+ *dpa_perf = (struct cxl_dpa_perf) {
+ .qos_class = CXL_QOS_CLASS_INVALID,
+ };
+}
- list_for_each_entry_safe(dpa_perf, n, work_list, list) {
- int rc;
+static bool cxl_qos_match(struct cxl_port *root_port,
+ struct cxl_dpa_perf *dpa_perf)
+{
+ if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
+ return false;
- if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
- return;
+ if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class,
+ match_cxlrd_qos_class))
+ return false;
- rc = device_for_each_child(&root_port->dev,
- (void *)&dpa_perf->qos_class,
- match_cxlrd_qos_class);
- if (!rc)
- list_move_tail(&dpa_perf->list, discard_list);
- }
+ return true;
}
static int match_cxlrd_hb(struct device *dev, void *data)
@@ -334,23 +311,10 @@ static int match_cxlrd_hb(struct device *dev, void *data)
return 0;
}
-static void discard_dpa_perf(struct list_head *list)
-{
- struct cxl_dpa_perf *dpa_perf, *n;
-
- list_for_each_entry_safe(dpa_perf, n, list, list) {
- list_del(&dpa_perf->list);
- kfree(dpa_perf);
- }
-}
-DEFINE_FREE(dpa_perf, struct list_head *, if (!list_empty(_T)) discard_dpa_perf(_T))
-
static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- LIST_HEAD(__discard);
- struct list_head *discard __free(dpa_perf) = &__discard;
struct cxl_port *root_port;
int rc;
@@ -363,16 +327,17 @@ static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
root_port = &cxl_root->port;
/* Check that the QTG IDs are all sane between end device and root decoders */
- cxl_qos_match(root_port, &mds->ram_perf_list, discard);
- cxl_qos_match(root_port, &mds->pmem_perf_list, discard);
+ if (!cxl_qos_match(root_port, &mds->ram_perf))
+ reset_dpa_perf(&mds->ram_perf);
+ if (!cxl_qos_match(root_port, &mds->pmem_perf))
+ reset_dpa_perf(&mds->pmem_perf);
/* Check to make sure that the device's host bridge is under a root decoder */
rc = device_for_each_child(&root_port->dev,
- (void *)cxlmd->endpoint->host_bridge,
- match_cxlrd_hb);
+ cxlmd->endpoint->host_bridge, match_cxlrd_hb);
if (!rc) {
- list_splice_tail_init(&mds->ram_perf_list, discard);
- list_splice_tail_init(&mds->pmem_perf_list, discard);
+ reset_dpa_perf(&mds->ram_perf);
+ reset_dpa_perf(&mds->pmem_perf);
}
return rc;
@@ -417,6 +382,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port)
cxl_memdev_set_qos_class(cxlds, dsmas_xa);
cxl_qos_class_verify(cxlmd);
+ cxl_memdev_update_perf(cxlmd);
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 27166a411705..9adda4795eb7 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -1391,8 +1391,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mds->cxlds.reg_map.host = dev;
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
- INIT_LIST_HEAD(&mds->ram_perf_list);
- INIT_LIST_HEAD(&mds->pmem_perf_list);
+ mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
+ mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
return mds;
}
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index dae8802ecdb0..d4e259f3a7e9 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -447,13 +447,41 @@ static struct attribute *cxl_memdev_attributes[] = {
NULL,
};
+static ssize_t pmem_qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+ return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_pmem_qos_class =
+ __ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
+
static struct attribute *cxl_memdev_pmem_attributes[] = {
&dev_attr_pmem_size.attr,
+ &dev_attr_pmem_qos_class.attr,
NULL,
};
+static ssize_t ram_qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+ return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_ram_qos_class =
+ __ATTR(qos_class, 0444, ram_qos_class_show, NULL);
+
static struct attribute *cxl_memdev_ram_attributes[] = {
&dev_attr_ram_size.attr,
+ &dev_attr_ram_qos_class.attr,
NULL,
};
@@ -477,14 +505,42 @@ static struct attribute_group cxl_memdev_attribute_group = {
.is_visible = cxl_memdev_visible,
};
+static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (a == &dev_attr_ram_qos_class.attr)
+ if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_memdev_ram_attribute_group = {
.name = "ram",
.attrs = cxl_memdev_ram_attributes,
+ .is_visible = cxl_ram_visible,
};
+static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (a == &dev_attr_pmem_qos_class.attr)
+ if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_memdev_pmem_attribute_group = {
.name = "pmem",
.attrs = cxl_memdev_pmem_attributes,
+ .is_visible = cxl_pmem_visible,
};
static umode_t cxl_memdev_security_visible(struct kobject *kobj,
@@ -519,6 +575,13 @@ static const struct attribute_group *cxl_memdev_attribute_groups[] = {
NULL,
};
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd)
+{
+ sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group);
+ sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, CXL);
+
static const struct device_type cxl_memdev_type = {
.name = "cxl_memdev",
.release = cxl_memdev_release,
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 6c9c8d92f8f7..e9e6c81ce034 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -477,9 +477,9 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
allowed++;
}
- if (!allowed) {
- cxl_set_mem_enable(cxlds, 0);
- info->mem_enabled = 0;
+ if (!allowed && info->mem_enabled) {
+ dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
+ return -ENXIO;
}
/*
@@ -932,11 +932,21 @@ static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
void cxl_cor_error_detected(struct pci_dev *pdev)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ struct device *dev = &cxlds->cxlmd->dev;
+
+ scoped_guard(device, dev) {
+ if (!dev->driver) {
+ dev_warn(&pdev->dev,
+ "%s: memdev disabled, abort error handling\n",
+ dev_name(dev));
+ return;
+ }
- if (cxlds->rcd)
- cxl_handle_rdport_errors(cxlds);
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
- cxl_handle_endpoint_cor_ras(cxlds);
+ cxl_handle_endpoint_cor_ras(cxlds);
+ }
}
EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
@@ -948,16 +958,25 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
struct device *dev = &cxlmd->dev;
bool ue;
- if (cxlds->rcd)
- cxl_handle_rdport_errors(cxlds);
+ scoped_guard(device, dev) {
+ if (!dev->driver) {
+ dev_warn(&pdev->dev,
+ "%s: memdev disabled, abort error handling\n",
+ dev_name(dev));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
+ /*
+ * A frozen channel indicates an impending reset which is fatal to
+ * CXL.mem operation, and will likely crash the system. On the off
+ * chance the situation is recoverable dump the status of the RAS
+ * capability registers and bounce the active state of the memdev.
+ */
+ ue = cxl_handle_endpoint_ras(cxlds);
+ }
- /*
- * A frozen channel indicates an impending reset which is fatal to
- * CXL.mem operation, and will likely crash the system. On the off
- * chance the situation is recoverable dump the status of the RAS
- * capability registers and bounce the active state of the memdev.
- */
- ue = cxl_handle_endpoint_ras(cxlds);
switch (state) {
case pci_channel_io_normal:
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index ce0e2d82bb2b..4c7fd2d5cccb 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -730,12 +730,17 @@ static int match_auto_decoder(struct device *dev, void *data)
return 0;
}
-static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
- struct cxl_region *cxlr)
+static struct cxl_decoder *
+cxl_region_find_decoder(struct cxl_port *port,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region *cxlr)
{
struct device *dev;
int id = 0;
+ if (port == cxled_to_port(cxled))
+ return &cxled->cxld;
+
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
dev = device_find_child(&port->dev, &cxlr->params,
match_auto_decoder);
@@ -753,8 +758,31 @@ static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
return to_cxl_decoder(dev);
}
-static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
- struct cxl_region *cxlr)
+static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
+ struct cxl_decoder *cxld)
+{
+ struct cxl_region_ref *rr = cxl_rr_load(port, cxlr_iter);
+ struct cxl_decoder *cxld_iter = rr->decoder;
+
+ /*
+ * Allow the out of order assembly of auto-discovered regions.
+ * Per CXL Spec 3.1 8.2.4.20.12 software must commit decoders
+ * in HPA order. Confirm that the decoder with the lesser HPA
+ * starting address has the lesser id.
+ */
+ dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n",
+ dev_name(&cxld->dev), cxld->id,
+ dev_name(&cxld_iter->dev), cxld_iter->id);
+
+ if (cxld_iter->id > cxld->id)
+ return true;
+
+ return false;
+}
+
+static struct cxl_region_ref *
+alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_region_ref *cxl_rr, *iter;
@@ -764,16 +792,21 @@ static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
xa_for_each(&port->regions, index, iter) {
struct cxl_region_params *ip = &iter->region->params;
- if (!ip->res)
+ if (!ip->res || ip->res->start < p->res->start)
continue;
- if (ip->res->start > p->res->start) {
- dev_dbg(&cxlr->dev,
- "%s: HPA order violation %s:%pr vs %pr\n",
- dev_name(&port->dev),
- dev_name(&iter->region->dev), ip->res, p->res);
- return ERR_PTR(-EBUSY);
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
+ struct cxl_decoder *cxld;
+
+ cxld = cxl_region_find_decoder(port, cxled, cxlr);
+ if (auto_order_ok(port, iter->region, cxld))
+ continue;
}
+ dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n",
+ dev_name(&port->dev),
+ dev_name(&iter->region->dev), ip->res, p->res);
+
+ return ERR_PTR(-EBUSY);
}
cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
@@ -853,10 +886,7 @@ static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
{
struct cxl_decoder *cxld;
- if (port == cxled_to_port(cxled))
- cxld = &cxled->cxld;
- else
- cxld = cxl_region_find_decoder(port, cxlr);
+ cxld = cxl_region_find_decoder(port, cxled, cxlr);
if (!cxld) {
dev_dbg(&cxlr->dev, "%s: no decoder available\n",
dev_name(&port->dev));
@@ -953,7 +983,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
nr_targets_inc = true;
}
} else {
- cxl_rr = alloc_region_ref(port, cxlr);
+ cxl_rr = alloc_region_ref(port, cxlr, cxled);
if (IS_ERR(cxl_rr)) {
dev_dbg(&cxlr->dev,
"%s: failed to allocate region reference\n",
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index b6017c0c57b4..003feebab79b 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -880,6 +880,8 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd);
+
/*
* Unit test builds overrides this to __weak, find the 'strong' version
* of these symbols in tools/testing/cxl/.
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 5303d6942b88..20fb3b35e89e 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -395,13 +395,11 @@ enum cxl_devtype {
/**
* struct cxl_dpa_perf - DPA performance property entry
- * @list - list entry
* @dpa_range - range for DPA address
* @coord - QoS performance data (i.e. latency, bandwidth)
* @qos_class - QoS Class cookies
*/
struct cxl_dpa_perf {
- struct list_head list;
struct range dpa_range;
struct access_coordinate coord;
int qos_class;
@@ -471,8 +469,8 @@ struct cxl_dev_state {
* @security: security driver state info
* @fw: firmware upload / activation state
* @mbox_send: @dev specific transport for transmitting mailbox commands
- * @ram_perf_list: performance data entries matched to RAM
- * @pmem_perf_list: performance data entries matched to PMEM
+ * @ram_perf: performance data entry matched to RAM partition
+ * @pmem_perf: performance data entry matched to PMEM partition
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
@@ -494,8 +492,8 @@ struct cxl_memdev_state {
u64 next_volatile_bytes;
u64 next_persistent_bytes;
- struct list_head ram_perf_list;
- struct list_head pmem_perf_list;
+ struct cxl_dpa_perf ram_perf;
+ struct cxl_dpa_perf pmem_perf;
struct cxl_event_state event;
struct cxl_poison_state poison;
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index c5c9d8e0d88d..0c79d9ce877c 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -215,52 +215,6 @@ static ssize_t trigger_poison_list_store(struct device *dev,
}
static DEVICE_ATTR_WO(trigger_poison_list);
-static ssize_t ram_qos_class_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct cxl_dpa_perf *dpa_perf;
-
- if (!dev->driver)
- return -ENOENT;
-
- if (list_empty(&mds->ram_perf_list))
- return -ENOENT;
-
- dpa_perf = list_first_entry(&mds->ram_perf_list, struct cxl_dpa_perf,
- list);
-
- return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
-}
-
-static struct device_attribute dev_attr_ram_qos_class =
- __ATTR(qos_class, 0444, ram_qos_class_show, NULL);
-
-static ssize_t pmem_qos_class_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct cxl_dpa_perf *dpa_perf;
-
- if (!dev->driver)
- return -ENOENT;
-
- if (list_empty(&mds->pmem_perf_list))
- return -ENOENT;
-
- dpa_perf = list_first_entry(&mds->pmem_perf_list, struct cxl_dpa_perf,
- list);
-
- return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
-}
-
-static struct device_attribute dev_attr_pmem_qos_class =
- __ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
-
static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
@@ -272,21 +226,11 @@ static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
mds->poison.enabled_cmds))
return 0;
- if (a == &dev_attr_pmem_qos_class.attr)
- if (list_empty(&mds->pmem_perf_list))
- return 0;
-
- if (a == &dev_attr_ram_qos_class.attr)
- if (list_empty(&mds->ram_perf_list))
- return 0;
-
return a->mode;
}
static struct attribute *cxl_mem_attrs[] = {
&dev_attr_trigger_poison_list.attr,
- &dev_attr_ram_qos_class.attr,
- &dev_attr_pmem_qos_class.attr,
NULL
};
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 233e7c42c161..2ff361e756d6 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -974,61 +974,6 @@ static struct pci_driver cxl_pci_driver = {
},
};
-#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0)
-static void cxl_cper_event_call(enum cxl_event_type ev_type,
- struct cxl_cper_event_rec *rec)
-{
- struct cper_cxl_event_devid *device_id = &rec->hdr.device_id;
- struct pci_dev *pdev __free(pci_dev_put) = NULL;
- enum cxl_event_log_type log_type;
- struct cxl_dev_state *cxlds;
- unsigned int devfn;
- u32 hdr_flags;
-
- devfn = PCI_DEVFN(device_id->device_num, device_id->func_num);
- pdev = pci_get_domain_bus_and_slot(device_id->segment_num,
- device_id->bus_num, devfn);
- if (!pdev)
- return;
-
- guard(pci_dev)(pdev);
- if (pdev->driver != &cxl_pci_driver)
- return;
-
- cxlds = pci_get_drvdata(pdev);
- if (!cxlds)
- return;
-
- /* Fabricate a log type */
- hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags);
- log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags);
-
- cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type,
- &uuid_null, &rec->event);
-}
-
-static int __init cxl_pci_driver_init(void)
-{
- int rc;
-
- rc = cxl_cper_register_callback(cxl_cper_event_call);
- if (rc)
- return rc;
-
- rc = pci_register_driver(&cxl_pci_driver);
- if (rc)
- cxl_cper_unregister_callback(cxl_cper_event_call);
-
- return rc;
-}
-
-static void __exit cxl_pci_driver_exit(void)
-{
- pci_unregister_driver(&cxl_pci_driver);
- cxl_cper_unregister_callback(cxl_cper_event_call);
-}
-
-module_init(cxl_pci_driver_init);
-module_exit(cxl_pci_driver_exit);
+module_pci_driver(cxl_pci_driver);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 54e528779877..aca71d7fccc1 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -546,8 +546,7 @@ static int dax_fs_init(void)
int rc;
dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
- (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
init_once);
if (!dax_cache)
return -ENOMEM;
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index e0fd99e61a2d..0393a9bba3a8 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -102,7 +102,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
*
* * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
* respectively &mmu_interval_notifier callbacks. This means any code required
- * for fence completeion cannot allocate memory with GFP_NOFS or GFP_NOIO.
+ * for fence completion cannot allocate memory with GFP_NOFS or GFP_NOIO.
* Only GFP_ATOMIC is permissible, which might fail.
*
* Note that only GPU drivers have a reasonable excuse for both requiring
@@ -522,7 +522,7 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
EXPORT_SYMBOL(dma_fence_wait_timeout);
/**
- * dma_fence_release - default relese function for fences
+ * dma_fence_release - default release function for fences
* @kref: &dma_fence.recfount
*
* This is the default release functions for &dma_fence. Drivers shouldn't call
@@ -974,8 +974,8 @@ void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
EXPORT_SYMBOL(dma_fence_set_deadline);
/**
- * dma_fence_describe - Dump fence describtion into seq_file
- * @fence: the 6fence to describe
+ * dma_fence_describe - Dump fence description into seq_file
+ * @fence: the fence to describe
* @seq: the seq_file to put the textual description into
*
* Dump a textual description of the fence and it's state into the seq_file.
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index eb8b733065b2..e2869fb31140 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -405,7 +405,7 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
*
* Beware that the iterator can be restarted. Code which accumulates statistics
* or similar needs to check for this with dma_resv_iter_is_restarted(). For
- * this reason prefer the locked dma_resv_iter_first() whenver possible.
+ * this reason prefer the locked dma_resv_iter_first() whenever possible.
*
* Returns the first fence from an unlocked dma_resv obj.
*/
@@ -428,7 +428,7 @@ EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
*
* Beware that the iterator can be restarted. Code which accumulates statistics
* or similar needs to check for this with dma_resv_iter_is_restarted(). For
- * this reason prefer the locked dma_resv_iter_next() whenver possible.
+ * this reason prefer the locked dma_resv_iter_next() whenever possible.
*
* Returns the next fence from an unlocked dma_resv obj.
*/
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index b38786f0ad79..b75fdaffad9a 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -346,6 +346,20 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
+static void dw_edma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
+{
+ /*
+ * In case of remote eDMA engine setup, the DW PCIe RP/EP internal
+ * configuration registers and application memory are normally accessed
+ * over different buses. Ensure LL-data reaches the memory before the
+ * doorbell register is toggled by issuing the dummy-read from the remote
+ * LL memory in a hope that the MRd TLP will return only after the
+ * last MWr TLP is completed
+ */
+ if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ readl(chunk->ll_region.vaddr.io);
+}
+
static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
@@ -412,6 +426,9 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
}
+
+ dw_edma_v0_sync_ll_data(chunk);
+
/* Doorbell */
SET_RW_32(dw, chan->dir, doorbell,
FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c
index 00b735a0202a..10e8f0715114 100644
--- a/drivers/dma/dw-edma/dw-hdma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c
@@ -65,18 +65,12 @@ static void dw_hdma_v0_core_off(struct dw_edma *dw)
static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
{
- u32 num_ch = 0;
- int id;
-
- for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
- if (GET_CH_32(dw, id, dir, ch_en) & BIT(0))
- num_ch++;
- }
-
- if (num_ch > HDMA_V0_MAX_NR_CH)
- num_ch = HDMA_V0_MAX_NR_CH;
-
- return (u16)num_ch;
+ /*
+ * The HDMA IP have no way to know the number of hardware channels
+ * available, we set it to maximum channels and let the platform
+ * set the right number of channels.
+ */
+ return HDMA_V0_MAX_NR_CH;
}
static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
@@ -228,6 +222,20 @@ static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
+static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
+{
+ /*
+ * In case of remote HDMA engine setup, the DW PCIe RP/EP internal
+ * configuration registers and application memory are normally accessed
+ * over different buses. Ensure LL-data reaches the memory before the
+ * doorbell register is toggled by issuing the dummy-read from the remote
+ * LL memory in a hope that the MRd TLP will return only after the
+ * last MWr TLP is completed
+ */
+ if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ readl(chunk->ll_region.vaddr.io);
+}
+
static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
@@ -242,7 +250,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
/* Interrupt enable&unmask - done, abort */
tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
- HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN;
+ HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
+ if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
/* Channel control */
SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
@@ -256,6 +266,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
/* Set consumer cycle */
SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+
+ dw_hdma_v0_sync_ll_data(chunk);
+
/* Doorbell */
SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
}
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-regs.h b/drivers/dma/dw-edma/dw-hdma-v0-regs.h
index a974abdf8aaf..eab5fd7177e5 100644
--- a/drivers/dma/dw-edma/dw-hdma-v0-regs.h
+++ b/drivers/dma/dw-edma/dw-hdma-v0-regs.h
@@ -15,7 +15,7 @@
#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
-#define HDMA_V0_REMOTEL_STOP_INT_EN BIT(3)
+#define HDMA_V0_REMOTE_STOP_INT_EN BIT(3)
#define HDMA_V0_ABORT_INT_MASK BIT(2)
#define HDMA_V0_STOP_INT_MASK BIT(0)
#define HDMA_V0_LINKLIST_EN BIT(0)
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b53f46245c37..793f1a7ad5e3 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -503,7 +503,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
if (fsl_chan->is_multi_fifo) {
/* set mloff to support multiple fifo */
burst = cfg->direction == DMA_DEV_TO_MEM ?
- cfg->src_addr_width : cfg->dst_addr_width;
+ cfg->src_maxburst : cfg->dst_maxburst;
nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
/* enable DMLOE/SMLOE */
if (cfg->direction == DMA_MEM_TO_DEV) {
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index bb5221158a77..f5e216b157c7 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -30,8 +30,9 @@
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
-#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
-#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
+#define EDMA_TCD_ITER_MASK GENMASK(14, 0)
+#define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
+#define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
#define EDMA_TCD_CSR_START BIT(0)
#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 45cc419b1b4a..d36e28b9c767 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -10,6 +10,7 @@
*/
#include <dt-bindings/dma/fsl-edma.h>
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
@@ -582,7 +583,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
DMAENGINE_ALIGN_32_BYTES;
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
- dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
+ dma_set_max_seg_size(fsl_edma->dma_dev.dev,
+ FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK));
fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index f405c77060ad..5005e138fc23 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -109,6 +109,7 @@
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
#define FSL_QDMA_CMD_DSEN_OFFSET 19
#define FSL_QDMA_CMD_LWC_OFFSET 16
+#define FSL_QDMA_CMD_PF BIT(17)
/* Field definition for Descriptor status */
#define QDMA_CCDF_STATUS_RTE BIT(5)
@@ -160,6 +161,10 @@ struct fsl_qdma_format {
u8 __reserved1[2];
u8 cfg8b_w1;
} __packed;
+ struct {
+ __le32 __reserved2;
+ __le32 cmd;
+ } __packed;
__le64 data;
};
} __packed;
@@ -354,7 +359,6 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
dma_addr_t dst, dma_addr_t src, u32 len)
{
- u32 cmd;
struct fsl_qdma_format *sdf, *ddf;
struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
@@ -383,14 +387,11 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
/* This entry is the last entry. */
qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */
- cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET);
- sdf->data = QDMA_SDDF_CMD(cmd);
-
- cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET);
- cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
- ddf->data = QDMA_SDDF_CMD(cmd);
+ sdf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ FSL_QDMA_CMD_PF);
+
+ ddf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ (FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET));
}
/*
@@ -624,7 +625,7 @@ static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
static int
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
- void *block,
+ __iomem void *block,
int id)
{
bool duplicate;
@@ -1196,10 +1197,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->queue)
return -ENOMEM;
- ret = fsl_qdma_irq_init(pdev, fsl_qdma);
- if (ret)
- return ret;
-
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
if (fsl_qdma->irq_base < 0)
return fsl_qdma->irq_base;
@@ -1238,16 +1235,19 @@ static int fsl_qdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsl_qdma);
- ret = dma_async_device_register(&fsl_qdma->dma_dev);
+ ret = fsl_qdma_reg_init(fsl_qdma);
if (ret) {
- dev_err(&pdev->dev,
- "Can't register NXP Layerscape qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
return ret;
}
- ret = fsl_qdma_reg_init(fsl_qdma);
+ ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+ if (ret)
+ return ret;
+
+ ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) {
- dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
return ret;
}
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 77f8885cf407..e5a94a93a3cc 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -345,7 +345,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
spin_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
- h = evl->head;
+ h = status.head;
size = evl->size;
while (h != t) {
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
index 9cfbd9b14c4c..f3f25ee676f3 100644
--- a/drivers/dma/idxd/debugfs.c
+++ b/drivers/dma/idxd/debugfs.c
@@ -68,9 +68,9 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
spin_lock(&evl->lock);
- h = evl->head;
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
+ h = evl_status.head;
evl_size = evl->size;
seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n",
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 47de3f93ff1e..d0f5db6cf1ed 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -300,7 +300,6 @@ struct idxd_evl {
unsigned int log_size;
/* The number of entries in the event log. */
u16 size;
- u16 head;
unsigned long *bmap;
bool batch_fail[IDXD_MAX_BATCH_IDENT];
};
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 14df1f1347a8..4954adc6bb60 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -343,7 +343,9 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
static int idxd_init_evl(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
+ unsigned int evl_cache_size;
struct idxd_evl *evl;
+ const char *idxd_name;
if (idxd->hw.gen_cap.evl_support == 0)
return 0;
@@ -355,9 +357,16 @@ static int idxd_init_evl(struct idxd_device *idxd)
spin_lock_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
- idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)),
- sizeof(struct idxd_evl_fault) + evl_ent_size(idxd),
- 0, 0, NULL);
+ idxd_name = dev_name(idxd_confdev(idxd));
+ evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
+ /*
+ * Since completion record in evl_cache will be copied to user
+ * when handling completion record page fault, need to create
+ * the cache suitable for user copy.
+ */
+ idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
+ 0, 0, 0, evl_cache_size,
+ NULL);
if (!idxd->evl_cache) {
kfree(evl);
return -ENOMEM;
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index c8a0aa874b11..348aa21389a9 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -367,9 +367,9 @@ static void process_evl_entries(struct idxd_device *idxd)
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
- h = evl->head;
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
+ h = evl_status.head;
size = idxd->evl->size;
while (h != t) {
@@ -378,7 +378,6 @@ static void process_evl_entries(struct idxd_device *idxd)
h = (h + 1) % size;
}
- evl->head = h;
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
spin_unlock(&evl->lock);
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 1ebfbe88e733..97ebc791a30b 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -747,8 +747,8 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
if (IS_ERR(xor_dev->clk))
return PTR_ERR(xor_dev->clk);
- ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
- mv_xor_v2_set_msi_msg);
+ ret = platform_device_msi_init_and_alloc_irqs(&pdev->dev, 1,
+ mv_xor_v2_set_msi_msg);
if (ret)
return ret;
@@ -851,7 +851,7 @@ free_hw_desq:
xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
xor_dev->hw_desq_virt, xor_dev->hw_desq);
free_msi_irqs:
- platform_msi_domain_free_irqs(&pdev->dev);
+ platform_device_msi_free_irqs_all(&pdev->dev);
return ret;
}
@@ -867,7 +867,7 @@ static void mv_xor_v2_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, xor_dev->irq, xor_dev);
- platform_msi_domain_free_irqs(&pdev->dev);
+ platform_device_msi_free_irqs_all(&pdev->dev);
tasklet_kill(&xor_dev->irq_tasklet);
}
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
index 1aa65e5de0f3..f79240734807 100644
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt)
chan->vc.desc_free = pt_do_cleanup;
vchan_init(&chan->vc, dma_dev);
- dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
-
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_reg;
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index d63b93dc7047..202ac95227cb 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -696,7 +696,7 @@ static void hidma_free_msis(struct hidma_dev *dmadev)
devm_free_irq(dev, virq, &dmadev->lldev);
}
- platform_msi_domain_free_irqs(dev);
+ platform_device_msi_free_irqs_all(dev);
#endif
}
@@ -706,8 +706,8 @@ static int hidma_request_msi(struct hidma_dev *dmadev,
#ifdef CONFIG_GENERIC_MSI_IRQ
int rc, i, virq;
- rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
- hidma_write_msi_msg);
+ rc = platform_device_msi_init_and_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
+ hidma_write_msi_msg);
if (rc)
return rc;
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 5152bd1b0daf..64eaca80d736 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -29,6 +29,8 @@ static u32 dpll_pin_xa_id;
WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
#define ASSERT_DPLL_NOT_REGISTERED(d) \
WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
+#define ASSERT_DPLL_PIN_REGISTERED(p) \
+ WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
struct dpll_device_registration {
struct list_head list;
@@ -129,9 +131,9 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
reg = dpll_pin_registration_find(ref, ops, priv);
if (WARN_ON(!reg))
return -EINVAL;
+ list_del(&reg->list);
+ kfree(reg);
if (refcount_dec_and_test(&ref->refcount)) {
- list_del(&reg->list);
- kfree(reg);
xa_erase(xa_pins, i);
WARN_ON(!list_empty(&ref->registration_list));
kfree(ref);
@@ -209,9 +211,9 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
reg = dpll_pin_registration_find(ref, ops, priv);
if (WARN_ON(!reg))
return;
+ list_del(&reg->list);
+ kfree(reg);
if (refcount_dec_and_test(&ref->refcount)) {
- list_del(&reg->list);
- kfree(reg);
xa_erase(xa_dplls, i);
WARN_ON(!list_empty(&ref->registration_list));
kfree(ref);
@@ -508,6 +510,26 @@ err_pin_prop:
return ERR_PTR(ret);
}
+static void dpll_netdev_pin_assign(struct net_device *dev, struct dpll_pin *dpll_pin)
+{
+ rtnl_lock();
+ rcu_assign_pointer(dev->dpll_pin, dpll_pin);
+ rtnl_unlock();
+}
+
+void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)
+{
+ WARN_ON(!dpll_pin);
+ dpll_netdev_pin_assign(dev, dpll_pin);
+}
+EXPORT_SYMBOL(dpll_netdev_pin_set);
+
+void dpll_netdev_pin_clear(struct net_device *dev)
+{
+ dpll_netdev_pin_assign(dev, NULL);
+}
+EXPORT_SYMBOL(dpll_netdev_pin_clear);
+
/**
* dpll_pin_get - find existing or create new dpll pin
* @clock_id: clock_id of creator
@@ -560,11 +582,11 @@ void dpll_pin_put(struct dpll_pin *pin)
{
mutex_lock(&dpll_lock);
if (refcount_dec_and_test(&pin->refcount)) {
+ xa_erase(&dpll_pin_xa, pin->id);
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
- xa_erase(&dpll_pin_xa, pin->id);
dpll_pin_prop_free(&pin->prop);
- kfree(pin);
+ kfree_rcu(pin, rcu);
}
mutex_unlock(&dpll_lock);
}
@@ -631,6 +653,7 @@ static void
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv)
{
+ ASSERT_DPLL_PIN_REGISTERED(pin);
dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv);
if (xa_empty(&pin->dpll_refs))
diff --git a/drivers/dpll/dpll_core.h b/drivers/dpll/dpll_core.h
index 717f715015c7..2b6d8ef1cdf3 100644
--- a/drivers/dpll/dpll_core.h
+++ b/drivers/dpll/dpll_core.h
@@ -47,6 +47,7 @@ struct dpll_device {
* @prop: pin properties copied from the registerer
* @rclk_dev_name: holds name of device when pin can recover clock from it
* @refcount: refcount
+ * @rcu: rcu_head for kfree_rcu()
**/
struct dpll_pin {
u32 id;
@@ -57,6 +58,7 @@ struct dpll_pin {
struct xarray parent_refs;
struct dpll_pin_properties prop;
refcount_t refcount;
+ struct rcu_head rcu;
};
/**
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 4ca9ad16cd95..98e6ad8528d3 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <net/genetlink.h>
#include "dpll_core.h"
#include "dpll_netlink.h"
@@ -48,18 +49,6 @@ dpll_msg_add_dev_parent_handle(struct sk_buff *msg, u32 id)
}
/**
- * dpll_msg_pin_handle_size - get size of pin handle attribute for given pin
- * @pin: pin pointer
- *
- * Return: byte size of pin handle attribute for given pin.
- */
-size_t dpll_msg_pin_handle_size(struct dpll_pin *pin)
-{
- return pin ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
-}
-EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size);
-
-/**
* dpll_msg_add_pin_handle - attach pin handle attribute to a given message
* @msg: pointer to sk_buff message to attach a pin handle
* @pin: pin pointer
@@ -68,7 +57,7 @@ EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size);
* * 0 - success
* * -EMSGSIZE - no space in message to attach pin handle
*/
-int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
+static int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
{
if (!pin)
return 0;
@@ -76,7 +65,28 @@ int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
return -EMSGSIZE;
return 0;
}
-EXPORT_SYMBOL_GPL(dpll_msg_add_pin_handle);
+
+static struct dpll_pin *dpll_netdev_pin(const struct net_device *dev)
+{
+ return rcu_dereference_rtnl(dev->dpll_pin);
+}
+
+/**
+ * dpll_netdev_pin_handle_size - get size of pin handle attribute of a netdev
+ * @dev: netdev from which to get the pin
+ *
+ * Return: byte size of pin handle attribute, or 0 if @dev has no pin.
+ */
+size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
+{
+ return dpll_netdev_pin(dev) ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
+}
+
+int dpll_netdev_add_pin_handle(struct sk_buff *msg,
+ const struct net_device *dev)
+{
+ return dpll_msg_add_pin_handle(msg, dpll_netdev_pin(dev));
+}
static int
dpll_msg_add_mode(struct sk_buff *msg, struct dpll_device *dpll,
@@ -121,14 +131,21 @@ dpll_msg_add_lock_status(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_lock_status_error status_error = 0;
enum dpll_lock_status status;
int ret;
- ret = ops->lock_status_get(dpll, dpll_priv(dpll), &status, extack);
+ ret = ops->lock_status_get(dpll, dpll_priv(dpll), &status,
+ &status_error, extack);
if (ret)
return ret;
if (nla_put_u32(msg, DPLL_A_LOCK_STATUS, status))
return -EMSGSIZE;
+ if (status_error &&
+ (status == DPLL_LOCK_STATUS_UNLOCKED ||
+ status == DPLL_LOCK_STATUS_HOLDOVER) &&
+ nla_put_u32(msg, DPLL_A_LOCK_STATUS_ERROR, status_error))
+ return -EMSGSIZE;
return 0;
}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 5a7f3fabee22..16c8de5050e5 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -78,6 +78,7 @@ config EDAC_GHES
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64)"
depends on AMD_NB && EDAC_DECODE_MCE
+ imply AMD_ATL
help
Support for error detection and correction of DRAM ECC errors on
the AMD64 families (>= K8) of memory controllers.
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 537b9987a431..1f3520d76861 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/ras.h>
#include "amd64_edac.h"
#include <asm/amd_nb.h>
@@ -1051,281 +1052,6 @@ static int fixup_node_id(int node_id, struct mce *m)
return nid - gpu_node_map.base_node_id + 1;
}
-/* Protect the PCI config register pairs used for DF indirect access. */
-static DEFINE_MUTEX(df_indirect_mutex);
-
-/*
- * Data Fabric Indirect Access uses FICAA/FICAD.
- *
- * Fabric Indirect Configuration Access Address (FICAA): Constructed based
- * on the device's Instance Id and the PCI function and register offset of
- * the desired register.
- *
- * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
- * and FICAD HI registers but so far we only need the LO register.
- *
- * Use Instance Id 0xFF to indicate a broadcast read.
- */
-#define DF_BROADCAST 0xFF
-static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
-{
- struct pci_dev *F4;
- u32 ficaa;
- int err = -ENODEV;
-
- if (node >= amd_nb_num())
- goto out;
-
- F4 = node_to_amd_nb(node)->link;
- if (!F4)
- goto out;
-
- ficaa = (instance_id == DF_BROADCAST) ? 0 : 1;
- ficaa |= reg & 0x3FC;
- ficaa |= (func & 0x7) << 11;
- ficaa |= instance_id << 16;
-
- mutex_lock(&df_indirect_mutex);
-
- err = pci_write_config_dword(F4, 0x5C, ficaa);
- if (err) {
- pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
- goto out_unlock;
- }
-
- err = pci_read_config_dword(F4, 0x98, lo);
- if (err)
- pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
-
-out_unlock:
- mutex_unlock(&df_indirect_mutex);
-
-out:
- return err;
-}
-
-static int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
-{
- return __df_indirect_read(node, func, reg, instance_id, lo);
-}
-
-static int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
-{
- return __df_indirect_read(node, func, reg, DF_BROADCAST, lo);
-}
-
-struct addr_ctx {
- u64 ret_addr;
- u32 tmp;
- u16 nid;
- u8 inst_id;
-};
-
-static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
-{
- u64 dram_base_addr, dram_limit_addr, dram_hole_base;
-
- u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
- u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
- u8 intlv_addr_sel, intlv_addr_bit;
- u8 num_intlv_bits, hashed_bit;
- u8 lgcy_mmio_hole_en, base = 0;
- u8 cs_mask, cs_id = 0;
- bool hash_enabled = false;
-
- struct addr_ctx ctx;
-
- memset(&ctx, 0, sizeof(ctx));
-
- /* Start from the normalized address */
- ctx.ret_addr = norm_addr;
-
- ctx.nid = nid;
- ctx.inst_id = umc;
-
- /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
- if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp))
- goto out_err;
-
- /* Remove HiAddrOffset from normalized address, if enabled: */
- if (ctx.tmp & BIT(0)) {
- u64 hi_addr_offset = (ctx.tmp & GENMASK_ULL(31, 20)) << 8;
-
- if (norm_addr >= hi_addr_offset) {
- ctx.ret_addr -= hi_addr_offset;
- base = 1;
- }
- }
-
- /* Read D18F0x110 (DramBaseAddress). */
- if (df_indirect_read_instance(nid, 0, 0x110 + (8 * base), umc, &ctx.tmp))
- goto out_err;
-
- /* Check if address range is valid. */
- if (!(ctx.tmp & BIT(0))) {
- pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
- __func__, ctx.tmp);
- goto out_err;
- }
-
- lgcy_mmio_hole_en = ctx.tmp & BIT(1);
- intlv_num_chan = (ctx.tmp >> 4) & 0xF;
- intlv_addr_sel = (ctx.tmp >> 8) & 0x7;
- dram_base_addr = (ctx.tmp & GENMASK_ULL(31, 12)) << 16;
-
- /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
- if (intlv_addr_sel > 3) {
- pr_err("%s: Invalid interleave address select %d.\n",
- __func__, intlv_addr_sel);
- goto out_err;
- }
-
- /* Read D18F0x114 (DramLimitAddress). */
- if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp))
- goto out_err;
-
- intlv_num_sockets = (ctx.tmp >> 8) & 0x1;
- intlv_num_dies = (ctx.tmp >> 10) & 0x3;
- dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
-
- intlv_addr_bit = intlv_addr_sel + 8;
-
- /* Re-use intlv_num_chan by setting it equal to log2(#channels) */
- switch (intlv_num_chan) {
- case 0: intlv_num_chan = 0; break;
- case 1: intlv_num_chan = 1; break;
- case 3: intlv_num_chan = 2; break;
- case 5: intlv_num_chan = 3; break;
- case 7: intlv_num_chan = 4; break;
-
- case 8: intlv_num_chan = 1;
- hash_enabled = true;
- break;
- default:
- pr_err("%s: Invalid number of interleaved channels %d.\n",
- __func__, intlv_num_chan);
- goto out_err;
- }
-
- num_intlv_bits = intlv_num_chan;
-
- if (intlv_num_dies > 2) {
- pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
- __func__, intlv_num_dies);
- goto out_err;
- }
-
- num_intlv_bits += intlv_num_dies;
-
- /* Add a bit if sockets are interleaved. */
- num_intlv_bits += intlv_num_sockets;
-
- /* Assert num_intlv_bits <= 4 */
- if (num_intlv_bits > 4) {
- pr_err("%s: Invalid interleave bits %d.\n",
- __func__, num_intlv_bits);
- goto out_err;
- }
-
- if (num_intlv_bits > 0) {
- u64 temp_addr_x, temp_addr_i, temp_addr_y;
- u8 die_id_bit, sock_id_bit, cs_fabric_id;
-
- /*
- * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
- * This is the fabric id for this coherent slave. Use
- * umc/channel# as instance id of the coherent slave
- * for FICAA.
- */
- if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp))
- goto out_err;
-
- cs_fabric_id = (ctx.tmp >> 8) & 0xFF;
- die_id_bit = 0;
-
- /* If interleaved over more than 1 channel: */
- if (intlv_num_chan) {
- die_id_bit = intlv_num_chan;
- cs_mask = (1 << die_id_bit) - 1;
- cs_id = cs_fabric_id & cs_mask;
- }
-
- sock_id_bit = die_id_bit;
-
- /* Read D18F1x208 (SystemFabricIdMask). */
- if (intlv_num_dies || intlv_num_sockets)
- if (df_indirect_read_broadcast(nid, 1, 0x208, &ctx.tmp))
- goto out_err;
-
- /* If interleaved over more than 1 die. */
- if (intlv_num_dies) {
- sock_id_bit = die_id_bit + intlv_num_dies;
- die_id_shift = (ctx.tmp >> 24) & 0xF;
- die_id_mask = (ctx.tmp >> 8) & 0xFF;
-
- cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
- }
-
- /* If interleaved over more than 1 socket. */
- if (intlv_num_sockets) {
- socket_id_shift = (ctx.tmp >> 28) & 0xF;
- socket_id_mask = (ctx.tmp >> 16) & 0xFF;
-
- cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
- }
-
- /*
- * The pre-interleaved address consists of XXXXXXIIIYYYYY
- * where III is the ID for this CS, and XXXXXXYYYYY are the
- * address bits from the post-interleaved address.
- * "num_intlv_bits" has been calculated to tell us how many "I"
- * bits there are. "intlv_addr_bit" tells us how many "Y" bits
- * there are (where "I" starts).
- */
- temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0);
- temp_addr_i = (cs_id << intlv_addr_bit);
- temp_addr_x = (ctx.ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
- ctx.ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
- }
-
- /* Add dram base address */
- ctx.ret_addr += dram_base_addr;
-
- /* If legacy MMIO hole enabled */
- if (lgcy_mmio_hole_en) {
- if (df_indirect_read_broadcast(nid, 0, 0x104, &ctx.tmp))
- goto out_err;
-
- dram_hole_base = ctx.tmp & GENMASK(31, 24);
- if (ctx.ret_addr >= dram_hole_base)
- ctx.ret_addr += (BIT_ULL(32) - dram_hole_base);
- }
-
- if (hash_enabled) {
- /* Save some parentheses and grab ls-bit at the end. */
- hashed_bit = (ctx.ret_addr >> 12) ^
- (ctx.ret_addr >> 18) ^
- (ctx.ret_addr >> 21) ^
- (ctx.ret_addr >> 30) ^
- cs_id;
-
- hashed_bit &= BIT(0);
-
- if (hashed_bit != ((ctx.ret_addr >> intlv_addr_bit) & BIT(0)))
- ctx.ret_addr ^= BIT(intlv_addr_bit);
- }
-
- /* Is calculated system address is above DRAM limit address? */
- if (ctx.ret_addr > dram_limit_addr)
- goto out_err;
-
- *sys_addr = ctx.ret_addr;
- return 0;
-
-out_err:
- return -EINVAL;
-}
-
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
/*
@@ -1915,7 +1641,7 @@ ddr3:
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
{
- u16 mce_nid = topology_die_id(m->extcpu);
+ u16 mce_nid = topology_amd_node_id(m->extcpu);
struct mem_ctl_info *mci;
u8 start_bit = 1;
u8 end_bit = 47;
@@ -3073,9 +2799,10 @@ static void decode_umc_error(int node_id, struct mce *m)
{
u8 ecc_type = (m->status >> 45) & 0x3;
struct mem_ctl_info *mci;
+ unsigned long sys_addr;
struct amd64_pvt *pvt;
+ struct atl_err a_err;
struct err_info err;
- u64 sys_addr;
node_id = fixup_node_id(node_id, m);
@@ -3106,7 +2833,12 @@ static void decode_umc_error(int node_id, struct mce *m)
pvt->ops->get_err_info(m, &err);
- if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+ a_err.addr = m->addr;
+ a_err.ipid = m->ipid;
+ a_err.cpu = m->extcpu;
+
+ sys_addr = amd_convert_umc_mca_addr_to_sys_addr(&a_err);
+ if (IS_ERR_VALUE(sys_addr)) {
err.err_code = ERR_NORM_ADDR;
goto log_error;
}
@@ -3446,7 +3178,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
int cpu;
for_each_online_cpu(cpu)
- if (topology_die_id(cpu) == nid)
+ if (topology_amd_node_id(cpu) == nid)
cpumask_set_cpu(cpu, mask);
}
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 2b83d6de9352..3fd22a1eb1a9 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -951,6 +951,7 @@ static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 2b0ecdeba5cd..cdd8480e7368 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -238,6 +238,7 @@ static struct work_struct ecclog_work;
#define DID_ADL_N_SKU9 0x4678
#define DID_ADL_N_SKU10 0x4679
#define DID_ADL_N_SKU11 0x467c
+#define DID_ADL_N_SKU12 0x4632
/* Compute die IDs for Raptor Lake-P with IBECC */
#define DID_RPL_P_SKU1 0xa706
@@ -583,6 +584,7 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU9), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU10), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU11), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU12), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU1), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU2), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU3), (kernel_ulong_t)&rpl_p_cfg },
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index ec8b6c9fedfd..8130c3dc64da 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -584,7 +584,7 @@ static void decode_mc3_mce(struct mce *m)
static void decode_mc4_mce(struct mce *m)
{
unsigned int fam = x86_family(m->cpuid);
- int node_id = topology_die_id(m->extcpu);
+ int node_id = topology_amd_node_id(m->extcpu);
u16 ec = EC(m->status);
u8 xec = XEC(m->status, 0x1f);
u8 offset = 0;
@@ -746,7 +746,7 @@ static void decode_smca_error(struct mce *m)
if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) &&
xec == 0 && decode_dram_ecc)
- decode_dram_ecc(topology_die_id(m->extcpu), m);
+ decode_dram_ecc(topology_amd_node_id(m->extcpu), m);
}
static inline void amd_decode_err_code(u16 ec)
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 709babce43ba..5527055b0964 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -1324,11 +1324,9 @@ static int mc_probe(struct platform_device *pdev)
struct synps_edac_priv *priv;
struct mem_ctl_info *mci;
void __iomem *baseaddr;
- struct resource *res;
int rc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- baseaddr = devm_ioremap_resource(&pdev->dev, res);
+ baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(baseaddr))
return PTR_ERR(baseaddr);
diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c
index 62caf454b567..1688a5050f63 100644
--- a/drivers/edac/versal_edac.c
+++ b/drivers/edac/versal_edac.c
@@ -42,8 +42,11 @@
#define ECCW0_FLIP_CTRL 0x109C
#define ECCW0_FLIP0_OFFSET 0x10A0
+#define ECCW0_FLIP0_BITS 31
+#define ECCW0_FLIP1_OFFSET 0x10A4
#define ECCW1_FLIP_CTRL 0x10AC
#define ECCW1_FLIP0_OFFSET 0x10B0
+#define ECCW1_FLIP1_OFFSET 0x10B4
#define ECCR0_CERR_STAT_OFFSET 0x10BC
#define ECCR0_CE_ADDR_LO_OFFSET 0x10C0
#define ECCR0_CE_ADDR_HI_OFFSET 0x10C4
@@ -116,9 +119,6 @@
#define XDDR_BUS_WIDTH_32 1
#define XDDR_BUS_WIDTH_16 2
-#define ECC_CEPOISON_MASK 0x1
-#define ECC_UEPOISON_MASK 0x3
-
#define XDDR_MAX_ROW_CNT 18
#define XDDR_MAX_COL_CNT 10
#define XDDR_MAX_RANK_CNT 2
@@ -133,6 +133,7 @@
* https://docs.xilinx.com/r/en-US/am012-versal-register-reference/PCSR_LOCK-XRAM_SLCR-Register
*/
#define PCSR_UNLOCK_VAL 0xF9E8D7C6
+#define PCSR_LOCK_VAL 1
#define XDDR_ERR_TYPE_CE 0
#define XDDR_ERR_TYPE_UE 1
@@ -142,6 +143,7 @@
#define XILINX_DRAM_SIZE_12G 3
#define XILINX_DRAM_SIZE_16G 4
#define XILINX_DRAM_SIZE_32G 5
+#define NUM_UE_BITPOS 2
/**
* struct ecc_error_info - ECC error log information.
@@ -479,7 +481,7 @@ static void err_callback(const u32 *payload, void *data)
writel(regval, priv->ddrmc_baseaddr + XDDR_ISR_OFFSET);
/* Lock the PCSR registers */
- writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
edac_dbg(3, "Total error count CE %d UE %d\n",
priv->ce_cnt, priv->ue_cnt);
}
@@ -650,7 +652,7 @@ static void enable_intr(struct edac_priv *priv)
writel(XDDR_IRQ_UE_MASK,
priv->ddrmc_baseaddr + XDDR_IRQ1_EN_OFFSET);
/* Lock the PCSR registers */
- writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
}
static void disable_intr(struct edac_priv *priv)
@@ -663,7 +665,7 @@ static void disable_intr(struct edac_priv *priv)
priv->ddrmc_baseaddr + XDDR_IRQ_DIS_OFFSET);
/* Lock the PCSR registers */
- writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
}
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
@@ -734,38 +736,63 @@ static void poison_setup(struct edac_priv *priv)
writel(regval, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC15_OFFSET);
}
-static ssize_t xddr_inject_data_poison_store(struct mem_ctl_info *mci,
- const char __user *data)
+static void xddr_inject_data_ce_store(struct mem_ctl_info *mci, u8 ce_bitpos)
{
+ u32 ecc0_flip0, ecc1_flip0, ecc0_flip1, ecc1_flip1;
struct edac_priv *priv = mci->pvt_info;
- writel(0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
- writel(0, priv->ddrmc_baseaddr + ECCW1_FLIP0_OFFSET);
-
- if (strncmp(data, "CE", 2) == 0) {
- writel(ECC_CEPOISON_MASK, priv->ddrmc_baseaddr +
- ECCW0_FLIP0_OFFSET);
- writel(ECC_CEPOISON_MASK, priv->ddrmc_baseaddr +
- ECCW1_FLIP0_OFFSET);
+ if (ce_bitpos < ECCW0_FLIP0_BITS) {
+ ecc0_flip0 = BIT(ce_bitpos);
+ ecc1_flip0 = BIT(ce_bitpos);
+ ecc0_flip1 = 0;
+ ecc1_flip1 = 0;
} else {
- writel(ECC_UEPOISON_MASK, priv->ddrmc_baseaddr +
- ECCW0_FLIP0_OFFSET);
- writel(ECC_UEPOISON_MASK, priv->ddrmc_baseaddr +
- ECCW1_FLIP0_OFFSET);
+ ce_bitpos = ce_bitpos - ECCW0_FLIP0_BITS;
+ ecc0_flip1 = BIT(ce_bitpos);
+ ecc1_flip1 = BIT(ce_bitpos);
+ ecc0_flip0 = 0;
+ ecc1_flip0 = 0;
}
- /* Lock the PCSR registers */
- writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
-
- return 0;
+ writel(ecc0_flip0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
+ writel(ecc1_flip0, priv->ddrmc_baseaddr + ECCW1_FLIP0_OFFSET);
+ writel(ecc0_flip1, priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET);
+ writel(ecc1_flip1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
}
-static ssize_t inject_data_poison_store(struct file *file, const char __user *data,
- size_t count, loff_t *ppos)
+/*
+ * To inject a correctable error, the following steps are needed:
+ *
+ * - Write the correctable error bit position value:
+ * echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ce
+ *
+ * poison_setup() derives the row, column, bank, group and rank and
+ * writes to the ADEC registers based on the address given by the user.
+ *
+ * The ADEC12 and ADEC13 are mask registers; write 0 to make sure default
+ * configuration is there and no addresses are masked.
+ *
+ * The row, column, bank, group and rank registers are written to the
+ * match ADEC bit to generate errors at the particular address. ADEC14
+ * and ADEC15 have the match bits.
+ *
+ * xddr_inject_data_ce_store() updates the ECC FLIP registers with the
+ * bits to be corrupted based on the bit position given by the user.
+ *
+ * Upon doing a read to the address the errors are injected.
+ */
+static ssize_t inject_data_ce_store(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
{
struct device *dev = file->private_data;
struct mem_ctl_info *mci = to_mci(dev);
struct edac_priv *priv = mci->pvt_info;
+ u8 ce_bitpos;
+ int ret;
+
+ ret = kstrtou8_from_user(data, count, 0, &ce_bitpos);
+ if (ret)
+ return ret;
/* Unlock the PCSR registers */
writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
@@ -773,17 +800,110 @@ static ssize_t inject_data_poison_store(struct file *file, const char __user *da
poison_setup(priv);
+ xddr_inject_data_ce_store(mci, ce_bitpos);
+ ret = count;
+
/* Lock the PCSR registers */
- writel(1, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+
+ return ret;
+}
+
+static const struct file_operations xddr_inject_ce_fops = {
+ .open = simple_open,
+ .write = inject_data_ce_store,
+ .llseek = generic_file_llseek,
+};
+
+static void xddr_inject_data_ue_store(struct mem_ctl_info *mci, u32 val0, u32 val1)
+{
+ struct edac_priv *priv = mci->pvt_info;
+
+ writel(val0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
+ writel(val0, priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET);
+ writel(val1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
+ writel(val1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
+}
+
+/*
+ * To inject an uncorrectable error, the following steps are needed:
+ * echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ue
+ *
+ * poison_setup() derives the row, column, bank, group and rank and
+ * writes to the ADEC registers based on the address given by the user.
+ *
+ * The ADEC12 and ADEC13 are mask registers; write 0 so that none of the
+ * addresses are masked. The row, column, bank, group and rank registers
+ * are written to the match ADEC bit to generate errors at the
+ * particular address. ADEC14 and ADEC15 have the match bits.
+ *
+ * xddr_inject_data_ue_store() updates the ECC FLIP registers with the
+ * bits to be corrupted based on the bit position given by the user. For
+ * uncorrectable errors
+ * 2 bit errors are injected.
+ *
+ * Upon doing a read to the address the errors are injected.
+ */
+static ssize_t inject_data_ue_store(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = file->private_data;
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct edac_priv *priv = mci->pvt_info;
+ char buf[6], *pbuf, *token[2];
+ u32 val0 = 0, val1 = 0;
+ u8 len, ue0, ue1;
+ int i, ret;
+
+ len = min_t(size_t, count, sizeof(buf));
+ if (copy_from_user(buf, data, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ pbuf = &buf[0];
+ for (i = 0; i < NUM_UE_BITPOS; i++)
+ token[i] = strsep(&pbuf, ",");
+
+ ret = kstrtou8(token[0], 0, &ue0);
+ if (ret)
+ return ret;
+
+ ret = kstrtou8(token[1], 0, &ue1);
+ if (ret)
+ return ret;
+
+ if (ue0 < ECCW0_FLIP0_BITS) {
+ val0 = BIT(ue0);
+ } else {
+ ue0 = ue0 - ECCW0_FLIP0_BITS;
+ val1 = BIT(ue0);
+ }
+
+ if (ue1 < ECCW0_FLIP0_BITS) {
+ val0 |= BIT(ue1);
+ } else {
+ ue1 = ue1 - ECCW0_FLIP0_BITS;
+ val1 |= BIT(ue1);
+ }
- xddr_inject_data_poison_store(mci, data);
+ /* Unlock the PCSR registers */
+ writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_UNLOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+ poison_setup(priv);
+
+ xddr_inject_data_ue_store(mci, val0, val1);
+
+ /* Lock the PCSR registers */
+ writel(PCSR_LOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
return count;
}
-static const struct file_operations xddr_inject_enable_fops = {
+static const struct file_operations xddr_inject_ue_fops = {
.open = simple_open,
- .write = inject_data_poison_store,
+ .write = inject_data_ue_store,
.llseek = generic_file_llseek,
};
@@ -795,8 +915,17 @@ static void create_debugfs_attributes(struct mem_ctl_info *mci)
if (!priv->debugfs)
return;
- edac_debugfs_create_file("inject_error", 0200, priv->debugfs,
- &mci->dev, &xddr_inject_enable_fops);
+ if (!edac_debugfs_create_file("inject_ce", 0200, priv->debugfs,
+ &mci->dev, &xddr_inject_ce_fops)) {
+ debugfs_remove_recursive(priv->debugfs);
+ return;
+ }
+
+ if (!edac_debugfs_create_file("inject_ue", 0200, priv->debugfs,
+ &mci->dev, &xddr_inject_ue_fops)) {
+ debugfs_remove_recursive(priv->debugfs);
+ return;
+ }
debugfs_create_x64("address", 0600, priv->debugfs,
&priv->err_inject_addr);
mci->debugfs = priv->debugfs;
@@ -1031,7 +1160,7 @@ free_edac_mc:
return rc;
}
-static int mc_remove(struct platform_device *pdev)
+static void mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
struct edac_priv *priv = mci->pvt_info;
@@ -1049,8 +1178,6 @@ static int mc_remove(struct platform_device *pdev)
XPM_EVENT_ERROR_MASK_DDRMC_NCR, err_callback, mci);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
-
- return 0;
}
static struct platform_driver xilinx_ddr_edac_mc_driver = {
@@ -1059,7 +1186,7 @@ static struct platform_driver xilinx_ddr_edac_mc_driver = {
.of_match_table = xlnx_edac_match,
},
.probe = mc_probe,
- .remove = mc_remove,
+ .remove_new = mc_remove,
};
module_platform_driver(xilinx_ddr_edac_mc_driver);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 8aaa7fcb2630..401a77e3b5fa 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -500,7 +500,19 @@ static void bm_work(struct work_struct *work)
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
new_root_id, gap_count);
fw_send_phy_config(card, new_root_id, generation, gap_count);
- reset_bus(card, true);
+ /*
+ * Where possible, use a short bus reset to minimize
+ * disruption to isochronous transfers. But in the event
+ * of a gap count inconsistency, use a long bus reset.
+ *
+ * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
+ * may set different gap counts after a bus reset. On a mixed
+ * 1394/1394a bus, a short bus reset can get doubled. Some
+ * nodes may treat the double reset as one bus reset and others
+ * may treat it as two, causing a gap count inconsistency
+ * again. Using a long bus reset prevents this.
+ */
+ reset_bus(card, card->gap_count != 0);
/* Will allocate broadcast channel after the reset. */
goto out;
}
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9db9290c3269..7bc71f4be64a 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -3773,6 +3773,7 @@ static int pci_probe(struct pci_dev *dev,
return 0;
fail_msi:
+ devm_free_irq(&dev->dev, dev->irq, ohci);
pci_disable_msi(dev);
return err;
@@ -3800,6 +3801,7 @@ static void pci_remove(struct pci_dev *dev)
software_reset(ohci);
+ devm_free_irq(&dev->dev, dev->irq, ohci);
pci_disable_msi(dev);
dev_notice(&dev->dev, "removing fw-ohci device\n");
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index afd38539b92e..71d8b26c4103 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -182,6 +182,7 @@ config MTK_ADSP_IPC
config SYSFB
bool
select BOOT_VESA_SUPPORT
+ select SCREEN_INFO
config SYSFB_SIMPLEFB
bool "Mark VGA/VBE/EFI FB as generic system framebuffer"
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index 1c7940ba5539..2f557e90f2eb 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -105,7 +105,7 @@ static struct attribute *ffa_device_attributes_attrs[] = {
};
ATTRIBUTE_GROUPS(ffa_device_attributes);
-struct bus_type ffa_bus_type = {
+const struct bus_type ffa_bus_type = {
.name = "arm_ffa",
.match = ffa_device_match,
.probe = ffa_device_probe,
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index c15928b8c5cc..77c78be6e79c 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -141,6 +141,17 @@ out:
return ret;
}
+static int scmi_protocol_table_register(const struct scmi_device_id *id_table)
+{
+ int ret = 0;
+ const struct scmi_device_id *entry;
+
+ for (entry = id_table; entry->name && ret == 0; entry++)
+ ret = scmi_protocol_device_request(entry);
+
+ return ret;
+}
+
/**
* scmi_protocol_device_unrequest - Helper to unrequest a device
*
@@ -186,6 +197,15 @@ static void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table
mutex_unlock(&scmi_requested_devices_mtx);
}
+static void
+scmi_protocol_table_unregister(const struct scmi_device_id *id_table)
+{
+ const struct scmi_device_id *entry;
+
+ for (entry = id_table; entry->name; entry++)
+ scmi_protocol_device_unrequest(entry);
+}
+
static const struct scmi_device_id *
scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
{
@@ -263,7 +283,7 @@ static void scmi_dev_remove(struct device *dev)
scmi_drv->remove(scmi_dev);
}
-struct bus_type scmi_bus_type = {
+const struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
.probe = scmi_dev_probe,
@@ -279,7 +299,7 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
if (!driver->probe)
return -EINVAL;
- retval = scmi_protocol_device_request(driver->id_table);
+ retval = scmi_protocol_table_register(driver->id_table);
if (retval)
return retval;
@@ -299,7 +319,7 @@ EXPORT_SYMBOL_GPL(scmi_driver_register);
void scmi_driver_unregister(struct scmi_driver *driver)
{
driver_unregister(&driver->driver);
- scmi_protocol_device_unrequest(driver->id_table);
+ scmi_protocol_table_unregister(driver->id_table);
}
EXPORT_SYMBOL_GPL(scmi_driver_unregister);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index e2050adbf85c..134019297d08 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -13,7 +13,7 @@
#include "notify.h"
/* Updated only after ALL the mandatory features for that version are merged */
-#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000
enum scmi_clock_protocol_cmd {
CLOCK_ATTRIBUTES = 0x3,
@@ -28,8 +28,13 @@ enum scmi_clock_protocol_cmd {
CLOCK_POSSIBLE_PARENTS_GET = 0xC,
CLOCK_PARENT_SET = 0xD,
CLOCK_PARENT_GET = 0xE,
+ CLOCK_GET_PERMISSIONS = 0xF,
};
+#define CLOCK_STATE_CONTROL_ALLOWED BIT(31)
+#define CLOCK_PARENT_CONTROL_ALLOWED BIT(30)
+#define CLOCK_RATE_CONTROL_ALLOWED BIT(29)
+
enum clk_state {
CLK_STATE_DISABLE,
CLK_STATE_ENABLE,
@@ -49,6 +54,8 @@ struct scmi_msg_resp_clock_attributes {
#define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30))
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
#define SUPPORTS_PARENT_CLOCK(x) ((x) & BIT(28))
+#define SUPPORTS_EXTENDED_CONFIG(x) ((x) & BIT(27))
+#define SUPPORTS_GET_PERMISSIONS(x) ((x) & BIT(1))
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
__le32 clock_enable_latency;
};
@@ -152,14 +159,18 @@ struct clock_info {
u32 version;
int num_clocks;
int max_async_req;
+ bool notify_rate_changed_cmd;
+ bool notify_rate_change_requested_cmd;
atomic_t cur_async_req;
struct scmi_clock_info *clk;
int (*clock_config_set)(const struct scmi_protocol_handle *ph,
u32 clk_id, enum clk_state state,
- u8 oem_type, u32 oem_val, bool atomic);
+ enum scmi_clock_oem_config oem_type,
+ u32 oem_val, bool atomic);
int (*clock_config_get)(const struct scmi_protocol_handle *ph,
- u32 clk_id, u8 oem_type, u32 *attributes,
- bool *enabled, u32 *oem_val, bool atomic);
+ u32 clk_id, enum scmi_clock_oem_config oem_type,
+ u32 *attributes, bool *enabled, u32 *oem_val,
+ bool atomic);
};
static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
@@ -167,6 +178,15 @@ static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
};
+static inline struct scmi_clock_info *
+scmi_clock_domain_lookup(struct clock_info *ci, u32 clk_id)
+{
+ if (clk_id >= ci->num_clocks)
+ return ERR_PTR(-EINVAL);
+
+ return ci->clk + clk_id;
+}
+
static int
scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
struct clock_info *ci)
@@ -189,6 +209,17 @@ scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ if (!ret) {
+ if (!ph->hops->protocol_msg_check(ph, CLOCK_RATE_NOTIFY, NULL))
+ ci->notify_rate_changed_cmd = true;
+
+ if (!ph->hops->protocol_msg_check(ph,
+ CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
+ NULL))
+ ci->notify_rate_change_requested_cmd = true;
+ }
+
return ret;
}
@@ -284,14 +315,44 @@ static int scmi_clock_possible_parents(const struct scmi_protocol_handle *ph, u3
return ret;
}
+static int
+scmi_clock_get_permissions(const struct scmi_protocol_handle *ph, u32 clk_id,
+ struct scmi_clock_info *clk)
+{
+ struct scmi_xfer *t;
+ u32 perm;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_GET_PERMISSIONS,
+ sizeof(clk_id), sizeof(perm), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(clk_id, t->tx.buf);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ perm = get_unaligned_le32(t->rx.buf);
+
+ clk->state_ctrl_forbidden = !(perm & CLOCK_STATE_CONTROL_ALLOWED);
+ clk->rate_ctrl_forbidden = !(perm & CLOCK_RATE_CONTROL_ALLOWED);
+ clk->parent_ctrl_forbidden = !(perm & CLOCK_PARENT_CONTROL_ALLOWED);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
- u32 clk_id, struct scmi_clock_info *clk,
+ u32 clk_id, struct clock_info *cinfo,
u32 version)
{
int ret;
u32 attributes;
struct scmi_xfer *t;
struct scmi_msg_resp_clock_attributes *attr;
+ struct scmi_clock_info *clk = cinfo->clk + clk_id;
ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
sizeof(clk_id), sizeof(*attr), &t);
@@ -324,12 +385,20 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
NULL, clk->name,
SCMI_MAX_STR_SIZE);
- if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
+ if (cinfo->notify_rate_changed_cmd &&
+ SUPPORTS_RATE_CHANGED_NOTIF(attributes))
clk->rate_changed_notifications = true;
- if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
+ if (cinfo->notify_rate_change_requested_cmd &&
+ SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
clk->rate_change_requested_notifications = true;
- if (SUPPORTS_PARENT_CLOCK(attributes))
- scmi_clock_possible_parents(ph, clk_id, clk);
+ if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
+ if (SUPPORTS_PARENT_CLOCK(attributes))
+ scmi_clock_possible_parents(ph, clk_id, clk);
+ if (SUPPORTS_GET_PERMISSIONS(attributes))
+ scmi_clock_get_permissions(ph, clk_id, clk);
+ if (SUPPORTS_EXTENDED_CONFIG(attributes))
+ clk->extended_config = true;
+ }
}
return ret;
@@ -502,6 +571,14 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
struct scmi_xfer *t;
struct scmi_clock_set_rate *cfg;
struct clock_info *ci = ph->get_priv(ph);
+ struct scmi_clock_info *clk;
+
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (clk->rate_ctrl_forbidden)
+ return -EACCES;
ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
if (ret)
@@ -543,7 +620,8 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
static int
scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
- enum clk_state state, u8 __unused0, u32 __unused1,
+ enum clk_state state,
+ enum scmi_clock_oem_config __unused0, u32 __unused1,
bool atomic)
{
int ret;
@@ -580,14 +658,16 @@ scmi_clock_set_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
struct clock_info *ci = ph->get_priv(ph);
struct scmi_clock_info *clk;
- if (clk_id >= ci->num_clocks)
- return -EINVAL;
-
- clk = ci->clk + clk_id;
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
if (parent_id >= clk->num_parents)
return -EINVAL;
+ if (clk->parent_ctrl_forbidden)
+ return -EACCES;
+
ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET,
sizeof(*cfg), 0, &t);
if (ret)
@@ -628,10 +708,11 @@ scmi_clock_get_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
return ret;
}
-/* For SCMI clock v2.1 and onwards */
+/* For SCMI clock v3.0 and onwards */
static int
scmi_clock_config_set_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
- enum clk_state state, u8 oem_type, u32 oem_val,
+ enum clk_state state,
+ enum scmi_clock_oem_config oem_type, u32 oem_val,
bool atomic)
{
int ret;
@@ -671,6 +752,14 @@ static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id,
bool atomic)
{
struct clock_info *ci = ph->get_priv(ph);
+ struct scmi_clock_info *clk;
+
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (clk->state_ctrl_forbidden)
+ return -EACCES;
return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE,
NULL_OEM_TYPE, 0, atomic);
@@ -680,16 +769,24 @@ static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id,
bool atomic)
{
struct clock_info *ci = ph->get_priv(ph);
+ struct scmi_clock_info *clk;
+
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (clk->state_ctrl_forbidden)
+ return -EACCES;
return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE,
NULL_OEM_TYPE, 0, atomic);
}
-/* For SCMI clock v2.1 and onwards */
+/* For SCMI clock v3.0 and onwards */
static int
scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
- u8 oem_type, u32 *attributes, bool *enabled,
- u32 *oem_val, bool atomic)
+ enum scmi_clock_oem_config oem_type, u32 *attributes,
+ bool *enabled, u32 *oem_val, bool atomic)
{
int ret;
u32 flags;
@@ -730,8 +827,8 @@ scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
static int
scmi_clock_config_get(const struct scmi_protocol_handle *ph, u32 clk_id,
- u8 oem_type, u32 *attributes, bool *enabled,
- u32 *oem_val, bool atomic)
+ enum scmi_clock_oem_config oem_type, u32 *attributes,
+ bool *enabled, u32 *oem_val, bool atomic)
{
int ret;
struct scmi_xfer *t;
@@ -768,20 +865,38 @@ static int scmi_clock_state_get(const struct scmi_protocol_handle *ph,
}
static int scmi_clock_config_oem_set(const struct scmi_protocol_handle *ph,
- u32 clk_id, u8 oem_type, u32 oem_val,
- bool atomic)
+ u32 clk_id,
+ enum scmi_clock_oem_config oem_type,
+ u32 oem_val, bool atomic)
{
struct clock_info *ci = ph->get_priv(ph);
+ struct scmi_clock_info *clk;
+
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (!clk->extended_config)
+ return -EOPNOTSUPP;
return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED,
oem_type, oem_val, atomic);
}
static int scmi_clock_config_oem_get(const struct scmi_protocol_handle *ph,
- u32 clk_id, u8 oem_type, u32 *oem_val,
- u32 *attributes, bool atomic)
+ u32 clk_id,
+ enum scmi_clock_oem_config oem_type,
+ u32 *oem_val, u32 *attributes, bool atomic)
{
struct clock_info *ci = ph->get_priv(ph);
+ struct scmi_clock_info *clk;
+
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (!clk->extended_config)
+ return -EOPNOTSUPP;
return ci->clock_config_get(ph, clk_id, oem_type, attributes,
NULL, oem_val, atomic);
@@ -800,10 +915,10 @@ scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
struct scmi_clock_info *clk;
struct clock_info *ci = ph->get_priv(ph);
- if (clk_id >= ci->num_clocks)
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
return NULL;
- clk = ci->clk + clk_id;
if (!clk->name[0])
return NULL;
@@ -824,6 +939,28 @@ static const struct scmi_clk_proto_ops clk_proto_ops = {
.parent_get = scmi_clock_get_parent,
};
+static bool scmi_clk_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ bool supported;
+ struct scmi_clock_info *clk;
+ struct clock_info *ci = ph->get_priv(ph);
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return false;
+
+ clk = scmi_clock_domain_lookup(ci, src_id);
+ if (IS_ERR(clk))
+ return false;
+
+ if (evt_id == SCMI_EVENT_CLOCK_RATE_CHANGED)
+ supported = clk->rate_changed_notifications;
+ else
+ supported = clk->rate_change_requested_notifications;
+
+ return supported;
+}
+
static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
u32 clk_id, int message_id, bool enable)
{
@@ -908,6 +1045,7 @@ static const struct scmi_event clk_events[] = {
};
static const struct scmi_event_ops clk_event_ops = {
+ .is_notify_supported = scmi_clk_notify_supported,
.get_num_sources = scmi_clk_get_num_sources,
.set_notify_enabled = scmi_clk_set_notify_enabled,
.fill_custom_report = scmi_clk_fill_custom_report,
@@ -949,7 +1087,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
struct scmi_clock_info *clk = cinfo->clk + clkid;
- ret = scmi_clock_attributes_get(ph, clkid, clk, version);
+ ret = scmi_clock_attributes_get(ph, clkid, cinfo, version);
if (!ret)
scmi_clock_describe_rates_get(ph, clkid, clk);
}
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 00b165d1f502..6affbfdd1dec 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -141,7 +141,7 @@ scmi_revision_area_get(const struct scmi_protocol_handle *ph);
void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
u8 *prot_imp);
-extern struct bus_type scmi_bus_type;
+extern const struct bus_type scmi_bus_type;
#define SCMI_BUS_NOTIFY_DEVICE_REQUEST 0
#define SCMI_BUS_NOTIFY_DEVICE_UNREQUEST 1
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 3ea64b22cf0d..2709598f3008 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -86,6 +86,12 @@ struct scmi_xfers_info {
* @users: A refcount to track effective users of this protocol.
* @priv: Reference for optional protocol private data.
* @version: Protocol version supported by the platform as detected at runtime.
+ * @negotiated_version: When the platform supports a newer protocol version,
+ * the agent will try to negotiate with the platform the
+ * usage of the newest version known to it, since
+ * backward compatibility is NOT automatically assured.
+ * This field is NON-zero when a successful negotiation
+ * has completed.
* @ph: An embedded protocol handle that will be passed down to protocol
* initialization code to identify this instance.
*
@@ -99,6 +105,7 @@ struct scmi_protocol_instance {
refcount_t users;
void *priv;
unsigned int version;
+ unsigned int negotiated_version;
struct scmi_protocol_handle ph;
};
@@ -1617,7 +1624,7 @@ static void
scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
u8 describe_id, u32 message_id, u32 valid_size,
u32 domain, void __iomem **p_addr,
- struct scmi_fc_db_info **p_db)
+ struct scmi_fc_db_info **p_db, u32 *rate_limit)
{
int ret;
u32 flags;
@@ -1661,6 +1668,9 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
goto err_xfer;
}
+ if (rate_limit)
+ *rate_limit = le32_to_cpu(resp->rate_limit) & GENMASK(19, 0);
+
phys_addr = le32_to_cpu(resp->chan_addr_low);
phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
addr = devm_ioremap(ph->dev, phys_addr, size);
@@ -1754,10 +1764,44 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
#endif
}
+/**
+ * scmi_protocol_msg_check - Check protocol message attributes
+ *
+ * @ph: A reference to the protocol handle.
+ * @message_id: The ID of the message to check.
+ * @attributes: A parameter to optionally return the retrieved message
+ * attributes, in case of Success.
+ *
+ * An helper to check protocol message attributes for a specific protocol
+ * and message pair.
+ *
+ * Return: 0 on SUCCESS
+ */
+static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
+ u32 message_id, u32 *attributes)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
+ sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(message_id, t->tx.buf);
+ ret = do_xfer(ph, t);
+ if (!ret && attributes)
+ *attributes = get_unaligned_le32(t->rx.buf);
+ xfer_put(ph, t);
+
+ return ret;
+}
+
static const struct scmi_proto_helpers_ops helpers_ops = {
.extended_name_get = scmi_common_extended_name_get,
.iter_response_init = scmi_iterator_init,
.iter_response_run = scmi_iterator_run,
+ .protocol_msg_check = scmi_protocol_msg_check,
.fastchannel_init = scmi_common_fastchannel_init,
.fastchannel_db_ring = scmi_common_fastchannel_db_ring,
};
@@ -1782,6 +1826,44 @@ scmi_revision_area_get(const struct scmi_protocol_handle *ph)
}
/**
+ * scmi_protocol_version_negotiate - Negotiate protocol version
+ *
+ * @ph: A reference to the protocol handle.
+ *
+ * An helper to negotiate a protocol version different from the latest
+ * advertised as supported from the platform: on Success backward
+ * compatibility is assured by the platform.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_protocol_version_negotiate(struct scmi_protocol_handle *ph)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ /* At first check if NEGOTIATE_PROTOCOL_VERSION is supported ... */
+ ret = scmi_protocol_msg_check(ph, NEGOTIATE_PROTOCOL_VERSION, NULL);
+ if (ret)
+ return ret;
+
+ /* ... then attempt protocol version negotiation */
+ ret = xfer_get_init(ph, NEGOTIATE_PROTOCOL_VERSION,
+ sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(pi->proto->supported_version, t->tx.buf);
+ ret = do_xfer(ph, t);
+ if (!ret)
+ pi->negotiated_version = pi->proto->supported_version;
+
+ xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
* scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
* instance descriptor.
* @info: The reference to the related SCMI instance.
@@ -1853,11 +1935,21 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info,
devres_close_group(handle->dev, pi->gid);
dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
- if (pi->version > proto->supported_version)
- dev_warn(handle->dev,
- "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X."
- "Backward compatibility is NOT assured.\n",
- pi->version, pi->proto->id);
+ if (pi->version > proto->supported_version) {
+ ret = scmi_protocol_version_negotiate(&pi->ph);
+ if (!ret) {
+ dev_info(handle->dev,
+ "Protocol 0x%X successfully negotiated version 0x%X\n",
+ proto->id, pi->negotiated_version);
+ } else {
+ dev_warn(handle->dev,
+ "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X.\n",
+ pi->version, pi->proto->id);
+ dev_warn(handle->dev,
+ "Trying version 0x%X. Backward compatibility is NOT assured.\n",
+ pi->proto->supported_version);
+ }
+ }
return pi;
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index 0efd20cd9d69..27c52531194d 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -99,6 +99,7 @@
#define PROTO_ID_MASK GENMASK(31, 24)
#define EVT_ID_MASK GENMASK(23, 16)
#define SRC_ID_MASK GENMASK(15, 0)
+#define NOTIF_UNSUPP -1
/*
* Builds an unsigned 32bit key from the given input tuple to be used
@@ -788,6 +789,7 @@ int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
pd->ph = ph;
for (i = 0; i < ee->num_events; i++, evt++) {
+ int id;
struct scmi_registered_event *r_evt;
r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
@@ -809,6 +811,11 @@ int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
if (!r_evt->report)
return -ENOMEM;
+ for (id = 0; id < r_evt->num_sources; id++)
+ if (ee->ops->is_notify_supported &&
+ !ee->ops->is_notify_supported(ph, r_evt->evt->id, id))
+ refcount_set(&r_evt->sources[id], NOTIF_UNSUPP);
+
pd->registered_events[i] = r_evt;
/* Ensure events are updated */
smp_wmb();
@@ -1166,7 +1173,13 @@ static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
int ret = 0;
sid = &r_evt->sources[src_id];
- if (refcount_read(sid) == 0) {
+ if (refcount_read(sid) == NOTIF_UNSUPP) {
+ dev_dbg(r_evt->proto->ph->dev,
+ "Notification NOT supported - proto_id:%d evt_id:%d src_id:%d",
+ r_evt->proto->id, r_evt->evt->id,
+ src_id);
+ ret = -EOPNOTSUPP;
+ } else if (refcount_read(sid) == 0) {
ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id,
src_id);
if (!ret)
@@ -1179,6 +1192,8 @@ static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
} else {
for (; num_sources; src_id++, num_sources--) {
sid = &r_evt->sources[src_id];
+ if (refcount_read(sid) == NOTIF_UNSUPP)
+ continue;
if (refcount_dec_and_test(sid))
REVT_NOTIFY_DISABLE(r_evt,
r_evt->evt->id, src_id);
diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h
index 4e9b627edfef..76758a736cf4 100644
--- a/drivers/firmware/arm_scmi/notify.h
+++ b/drivers/firmware/arm_scmi/notify.h
@@ -35,6 +35,8 @@ struct scmi_protocol_handle;
/**
* struct scmi_event_ops - Protocol helpers called by the notification core.
+ * @is_notify_supported: Return 0 if the specified notification for the
+ * specified resource (src_id) is supported.
* @get_num_sources: Returns the number of possible events' sources for this
* protocol
* @set_notify_enabled: Enable/disable the required evt_id/src_id notifications
@@ -50,6 +52,8 @@ struct scmi_protocol_handle;
* process context.
*/
struct scmi_event_ops {
+ bool (*is_notify_supported)(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id);
int (*get_num_sources)(const struct scmi_protocol_handle *ph);
int (*set_notify_enabled)(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enabled);
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index 25bfb465484d..4e7944b91e38 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -109,8 +109,10 @@ enum scmi_optee_pta_cmd {
* @rx_len: Response size
* @mu: Mutex protection on channel access
* @cinfo: SCMI channel information
- * @shmem: Virtual base address of the shared memory
- * @req: Shared memory protocol handle for SCMI request and synchronous response
+ * @req: union for SCMI interface
+ * @req.shmem: Virtual base address of the shared memory
+ * @req.msg: Shared memory protocol handle for SCMI request and
+ * synchronous response
* @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem
* @link: Reference in agent's channel list
*/
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 211e8e0aef2c..8e832d1ad825 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -153,6 +153,7 @@ struct perf_dom_info {
bool perf_fastchannels;
bool level_indexing_mode;
u32 opp_count;
+ u32 rate_limit_us;
u32 sustained_freq_khz;
u32 sustained_perf_level;
unsigned long mult_factor;
@@ -182,6 +183,8 @@ struct scmi_perf_info {
enum scmi_power_scale power_scale;
u64 stats_addr;
u32 stats_size;
+ bool notify_lvl_cmd;
+ bool notify_lim_cmd;
struct perf_dom_info *dom_info;
};
@@ -222,6 +225,15 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ if (!ret) {
+ if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LEVEL, NULL))
+ pi->notify_lvl_cmd = true;
+
+ if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LIMITS, NULL))
+ pi->notify_lim_cmd = true;
+ }
+
return ret;
}
@@ -239,6 +251,7 @@ static void scmi_perf_xa_destroy(void *data)
static int
scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
struct perf_dom_info *dom_info,
+ bool notify_lim_cmd, bool notify_lvl_cmd,
u32 version)
{
int ret;
@@ -260,25 +273,46 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
dom_info->info.set_perf = SUPPORTS_SET_PERF_LVL(flags);
- dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
- dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
+ if (notify_lim_cmd)
+ dom_info->perf_limit_notify =
+ SUPPORTS_PERF_LIMIT_NOTIFY(flags);
+ if (notify_lvl_cmd)
+ dom_info->perf_level_notify =
+ SUPPORTS_PERF_LEVEL_NOTIFY(flags);
dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
if (PROTOCOL_REV_MAJOR(version) >= 0x4)
dom_info->level_indexing_mode =
SUPPORTS_LEVEL_INDEXING(flags);
+ dom_info->rate_limit_us = le32_to_cpu(attr->rate_limit_us) &
+ GENMASK(19, 0);
dom_info->sustained_freq_khz =
le32_to_cpu(attr->sustained_freq_khz);
dom_info->sustained_perf_level =
le32_to_cpu(attr->sustained_perf_level);
+ /*
+ * sustained_freq_khz = mult_factor * sustained_perf_level
+ * mult_factor must be non zero positive integer(not fraction)
+ */
if (!dom_info->sustained_freq_khz ||
!dom_info->sustained_perf_level ||
- dom_info->level_indexing_mode)
+ dom_info->level_indexing_mode) {
/* CPUFreq converts to kHz, hence default 1000 */
dom_info->mult_factor = 1000;
- else
+ } else {
dom_info->mult_factor =
(dom_info->sustained_freq_khz * 1000UL)
/ dom_info->sustained_perf_level;
+ if ((dom_info->sustained_freq_khz * 1000UL) %
+ dom_info->sustained_perf_level)
+ dev_warn(ph->dev,
+ "multiplier for domain %d rounded\n",
+ dom_info->id);
+ }
+ if (!dom_info->mult_factor)
+ dev_warn(ph->dev,
+ "Wrong sustained perf/frequency(domain %d)\n",
+ dom_info->id);
+
strscpy(dom_info->info.name, attr->name,
SCMI_SHORT_NAME_MAX_SIZE);
}
@@ -295,9 +329,9 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
dom_info->id, NULL, dom_info->info.name,
SCMI_MAX_STR_SIZE);
+ xa_init(&dom_info->opps_by_lvl);
if (dom_info->level_indexing_mode) {
xa_init(&dom_info->opps_by_idx);
- xa_init(&dom_info->opps_by_lvl);
hash_init(dom_info->opps_by_freq);
}
@@ -340,13 +374,21 @@ static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
}
static inline void
-process_response_opp(struct scmi_opp *opp, unsigned int loop_idx,
+process_response_opp(struct device *dev, struct perf_dom_info *dom,
+ struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels *r)
{
+ int ret;
+
opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
opp->power = le32_to_cpu(r->opp[loop_idx].power);
opp->trans_latency_us =
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
+
+ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+ if (ret)
+ dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n",
+ opp->perf, ret);
}
static inline void
@@ -354,16 +396,21 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels_v4 *r)
{
+ int ret;
+
opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
opp->power = le32_to_cpu(r->opp[loop_idx].power);
opp->trans_latency_us =
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
+ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+ if (ret)
+ dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n",
+ opp->perf, ret);
+
/* Note that PERF v4 reports always five 32-bit words */
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
if (dom->level_indexing_mode) {
- int ret;
-
opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
@@ -373,12 +420,6 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
"Failed to add opps_by_idx at %d - ret:%d\n",
opp->level_index, ret);
- ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
- if (ret)
- dev_warn(dev,
- "Failed to add opps_by_lvl at %d - ret:%d\n",
- opp->perf, ret);
-
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
}
}
@@ -393,7 +434,8 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
- process_response_opp(opp, st->loop_idx, response);
+ process_response_opp(ph->dev, p->perf_dom, opp, st->loop_idx,
+ response);
else
process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
response);
@@ -786,23 +828,27 @@ static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
PERF_LEVEL_GET, 4, dom->id,
- &fc[PERF_FC_LEVEL].get_addr, NULL);
+ &fc[PERF_FC_LEVEL].get_addr, NULL,
+ &fc[PERF_FC_LEVEL].rate_limit);
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
PERF_LIMITS_GET, 8, dom->id,
- &fc[PERF_FC_LIMIT].get_addr, NULL);
+ &fc[PERF_FC_LIMIT].get_addr, NULL,
+ &fc[PERF_FC_LIMIT].rate_limit);
if (dom->info.set_perf)
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
PERF_LEVEL_SET, 4, dom->id,
&fc[PERF_FC_LEVEL].set_addr,
- &fc[PERF_FC_LEVEL].set_db);
+ &fc[PERF_FC_LEVEL].set_db,
+ &fc[PERF_FC_LEVEL].rate_limit);
if (dom->set_limits)
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
PERF_LIMITS_SET, 8, dom->id,
&fc[PERF_FC_LIMIT].set_addr,
- &fc[PERF_FC_LIMIT].set_db);
+ &fc[PERF_FC_LIMIT].set_db,
+ &fc[PERF_FC_LIMIT].rate_limit);
dom->fc_info = fc;
}
@@ -855,6 +901,23 @@ scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
}
+static int
+scmi_dvfs_rate_limit_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *rate_limit)
+{
+ struct perf_dom_info *dom;
+
+ if (!rate_limit)
+ return -EINVAL;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ *rate_limit = dom->rate_limit_us;
+ return 0;
+}
+
static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long freq, bool poll)
{
@@ -954,6 +1017,25 @@ static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr;
}
+static int scmi_fast_switch_rate_limit(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *rate_limit)
+{
+ struct perf_dom_info *dom;
+
+ if (!rate_limit)
+ return -EINVAL;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ if (!dom->fc_info)
+ return -EINVAL;
+
+ *rate_limit = dom->fc_info[PERF_FC_LEVEL].rate_limit;
+ return 0;
+}
+
static enum scmi_power_scale
scmi_power_scale_get(const struct scmi_protocol_handle *ph)
{
@@ -970,14 +1052,37 @@ static const struct scmi_perf_proto_ops perf_proto_ops = {
.level_set = scmi_perf_level_set,
.level_get = scmi_perf_level_get,
.transition_latency_get = scmi_dvfs_transition_latency_get,
+ .rate_limit_get = scmi_dvfs_rate_limit_get,
.device_opps_add = scmi_dvfs_device_opps_add,
.freq_set = scmi_dvfs_freq_set,
.freq_get = scmi_dvfs_freq_get,
.est_power_get = scmi_dvfs_est_power_get,
.fast_switch_possible = scmi_fast_switch_possible,
+ .fast_switch_rate_limit = scmi_fast_switch_rate_limit,
.power_scale_get = scmi_power_scale_get,
};
+static bool scmi_perf_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ bool supported;
+ struct perf_dom_info *dom;
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return false;
+
+ dom = scmi_perf_domain_lookup(ph, src_id);
+ if (IS_ERR(dom))
+ return false;
+
+ if (evt_id == SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED)
+ supported = dom->perf_limit_notify;
+ else
+ supported = dom->perf_level_notify;
+
+ return supported;
+}
+
static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
@@ -995,18 +1100,47 @@ static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
return ret;
}
+static int
+scmi_perf_xlate_opp_to_freq(struct perf_dom_info *dom,
+ unsigned int index, unsigned long *freq)
+{
+ struct scmi_opp *opp;
+
+ if (!dom || !freq)
+ return -EINVAL;
+
+ if (!dom->level_indexing_mode) {
+ opp = xa_load(&dom->opps_by_lvl, index);
+ if (!opp)
+ return -ENODEV;
+
+ *freq = opp->perf * dom->mult_factor;
+ } else {
+ opp = xa_load(&dom->opps_by_idx, index);
+ if (!opp)
+ return -ENODEV;
+
+ *freq = opp->indicative_freq * dom->mult_factor;
+ }
+
+ return 0;
+}
+
static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
+ int ret;
void *rep = NULL;
+ struct perf_dom_info *dom;
switch (evt_id) {
case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
{
const struct scmi_perf_limits_notify_payld *p = payld;
struct scmi_perf_limits_report *r = report;
+ unsigned long freq_min, freq_max;
if (sizeof(*p) != payld_sz)
break;
@@ -1016,14 +1150,36 @@ static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
r->domain_id = le32_to_cpu(p->domain_id);
r->range_max = le32_to_cpu(p->range_max);
r->range_min = le32_to_cpu(p->range_min);
+ /* Check if the reported domain exist at all */
+ dom = scmi_perf_domain_lookup(ph, r->domain_id);
+ if (IS_ERR(dom))
+ break;
+ /*
+ * Event will be reported from this point on...
+ * ...even if, later, xlated frequencies were not retrieved.
+ */
*src_id = r->domain_id;
rep = r;
+
+ ret = scmi_perf_xlate_opp_to_freq(dom, r->range_max, &freq_max);
+ if (ret)
+ break;
+
+ ret = scmi_perf_xlate_opp_to_freq(dom, r->range_min, &freq_min);
+ if (ret)
+ break;
+
+ /* Report translated freqs ONLY if both available */
+ r->range_max_freq = freq_max;
+ r->range_min_freq = freq_min;
+
break;
}
case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
{
const struct scmi_perf_level_notify_payld *p = payld;
struct scmi_perf_level_report *r = report;
+ unsigned long freq;
if (sizeof(*p) != payld_sz)
break;
@@ -1031,9 +1187,27 @@ static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->domain_id = le32_to_cpu(p->domain_id);
+ /* Report translated freqs ONLY if available */
r->performance_level = le32_to_cpu(p->performance_level);
+ /* Check if the reported domain exist at all */
+ dom = scmi_perf_domain_lookup(ph, r->domain_id);
+ if (IS_ERR(dom))
+ break;
+ /*
+ * Event will be reported from this point on...
+ * ...even if, later, xlated frequencies were not retrieved.
+ */
*src_id = r->domain_id;
rep = r;
+
+ /* Report translated freqs ONLY if available */
+ ret = scmi_perf_xlate_opp_to_freq(dom, r->performance_level,
+ &freq);
+ if (ret)
+ break;
+
+ r->performance_level_freq = freq;
+
break;
}
default:
@@ -1067,6 +1241,7 @@ static const struct scmi_event perf_events[] = {
};
static const struct scmi_event_ops perf_event_ops = {
+ .is_notify_supported = scmi_perf_notify_supported,
.get_num_sources = scmi_perf_get_num_sources,
.set_notify_enabled = scmi_perf_set_notify_enabled,
.fill_custom_report = scmi_perf_fill_custom_report,
@@ -1111,7 +1286,8 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
struct perf_dom_info *dom = pinfo->dom_info + domain;
dom->id = domain;
- scmi_perf_domain_attributes_get(ph, dom, version);
+ scmi_perf_domain_attributes_get(ph, dom, pinfo->notify_lim_cmd,
+ pinfo->notify_lvl_cmd, version);
scmi_perf_describe_levels_get(ph, dom, version);
if (dom->perf_fastchannels)
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index c2e6b9b4d941..49666bd1d8ac 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -68,6 +68,7 @@ struct power_dom_info {
struct scmi_power_info {
u32 version;
+ bool notify_state_change_cmd;
int num_domains;
u64 stats_addr;
u32 stats_size;
@@ -97,13 +98,18 @@ static int scmi_power_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ if (!ret)
+ if (!ph->hops->protocol_msg_check(ph, POWER_STATE_NOTIFY, NULL))
+ pi->notify_state_change_cmd = true;
+
return ret;
}
static int
scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
u32 domain, struct power_dom_info *dom_info,
- u32 version)
+ u32 version, bool notify_state_change_cmd)
{
int ret;
u32 flags;
@@ -122,7 +128,9 @@ scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
if (!ret) {
flags = le32_to_cpu(attr->flags);
- dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
+ if (notify_state_change_cmd)
+ dom_info->state_set_notify =
+ SUPPORTS_STATE_SET_NOTIFY(flags);
dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
@@ -231,6 +239,20 @@ static int scmi_power_request_notify(const struct scmi_protocol_handle *ph,
return ret;
}
+static bool scmi_power_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ struct power_dom_info *dom;
+ struct scmi_power_info *pinfo = ph->get_priv(ph);
+
+ if (evt_id != SCMI_EVENT_POWER_STATE_CHANGED ||
+ src_id >= pinfo->num_domains)
+ return false;
+
+ dom = pinfo->dom_info + src_id;
+ return dom->state_set_notify;
+}
+
static int scmi_power_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
@@ -285,6 +307,7 @@ static const struct scmi_event power_events[] = {
};
static const struct scmi_event_ops power_event_ops = {
+ .is_notify_supported = scmi_power_notify_supported,
.get_num_sources = scmi_power_get_num_sources,
.set_notify_enabled = scmi_power_set_notify_enabled,
.fill_custom_report = scmi_power_fill_custom_report,
@@ -326,7 +349,8 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct power_dom_info *dom = pinfo->dom_info + domain;
- scmi_power_domain_attributes_get(ph, domain, dom, version);
+ scmi_power_domain_attributes_get(ph, domain, dom, version,
+ pinfo->notify_state_change_cmd);
}
pinfo->version = version;
diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c
index a4c6cd4716fe..ea9201e7044c 100644
--- a/drivers/firmware/arm_scmi/powercap.c
+++ b/drivers/firmware/arm_scmi/powercap.c
@@ -124,6 +124,8 @@ struct scmi_powercap_state {
struct powercap_info {
u32 version;
int num_domains;
+ bool notify_cap_cmd;
+ bool notify_measurements_cmd;
struct scmi_powercap_state *states;
struct scmi_powercap_info *powercaps;
};
@@ -157,6 +159,18 @@ scmi_powercap_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ if (!ret) {
+ if (!ph->hops->protocol_msg_check(ph,
+ POWERCAP_CAP_NOTIFY, NULL))
+ pi->notify_cap_cmd = true;
+
+ if (!ph->hops->protocol_msg_check(ph,
+ POWERCAP_MEASUREMENTS_NOTIFY,
+ NULL))
+ pi->notify_measurements_cmd = true;
+ }
+
return ret;
}
@@ -200,10 +214,12 @@ scmi_powercap_domain_attributes_get(const struct scmi_protocol_handle *ph,
flags = le32_to_cpu(resp->attributes);
dom_info->id = domain;
- dom_info->notify_powercap_cap_change =
- SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags);
- dom_info->notify_powercap_measurement_change =
- SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags);
+ if (pinfo->notify_cap_cmd)
+ dom_info->notify_powercap_cap_change =
+ SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags);
+ if (pinfo->notify_measurements_cmd)
+ dom_info->notify_powercap_measurement_change =
+ SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags);
dom_info->async_powercap_cap_set =
SUPPORTS_ASYNC_POWERCAP_CAP_SET(flags);
dom_info->powercap_cap_config =
@@ -703,20 +719,24 @@ static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph,
ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
POWERCAP_CAP_SET, 4, domain,
&fc[POWERCAP_FC_CAP].set_addr,
- &fc[POWERCAP_FC_CAP].set_db);
+ &fc[POWERCAP_FC_CAP].set_db,
+ &fc[POWERCAP_FC_CAP].rate_limit);
ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
POWERCAP_CAP_GET, 4, domain,
- &fc[POWERCAP_FC_CAP].get_addr, NULL);
+ &fc[POWERCAP_FC_CAP].get_addr, NULL,
+ &fc[POWERCAP_FC_CAP].rate_limit);
ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
POWERCAP_PAI_SET, 4, domain,
&fc[POWERCAP_FC_PAI].set_addr,
- &fc[POWERCAP_FC_PAI].set_db);
+ &fc[POWERCAP_FC_PAI].set_db,
+ &fc[POWERCAP_FC_PAI].rate_limit);
ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
POWERCAP_PAI_GET, 4, domain,
- &fc[POWERCAP_FC_PAI].get_addr, NULL);
+ &fc[POWERCAP_FC_PAI].get_addr, NULL,
+ &fc[POWERCAP_PAI_GET].rate_limit);
*p_fc = fc;
}
@@ -788,6 +808,26 @@ static int scmi_powercap_notify(const struct scmi_protocol_handle *ph,
return ret;
}
+static bool
+scmi_powercap_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ bool supported = false;
+ const struct scmi_powercap_info *dom_info;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd) || src_id >= pi->num_domains)
+ return false;
+
+ dom_info = pi->powercaps + src_id;
+ if (evt_id == SCMI_EVENT_POWERCAP_CAP_CHANGED)
+ supported = dom_info->notify_powercap_cap_change;
+ else if (evt_id == SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED)
+ supported = dom_info->notify_powercap_measurement_change;
+
+ return supported;
+}
+
static int
scmi_powercap_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
@@ -904,6 +944,7 @@ static const struct scmi_event powercap_events[] = {
};
static const struct scmi_event_ops powercap_event_ops = {
+ .is_notify_supported = scmi_powercap_notify_supported,
.get_num_sources = scmi_powercap_get_num_sources,
.set_notify_enabled = scmi_powercap_set_notify_enabled,
.fill_custom_report = scmi_powercap_fill_custom_report,
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index e683c26f24eb..317d3fb32676 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -33,6 +33,7 @@ enum scmi_common_cmd {
PROTOCOL_VERSION = 0x0,
PROTOCOL_ATTRIBUTES = 0x1,
PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
+ NEGOTIATE_PROTOCOL_VERSION = 0x10,
};
/**
@@ -234,6 +235,7 @@ struct scmi_fc_info {
void __iomem *set_addr;
void __iomem *get_addr;
struct scmi_fc_db_info *set_db;
+ u32 rate_limit;
};
/**
@@ -251,6 +253,8 @@ struct scmi_fc_info {
* provided in @ops.
* @iter_response_run: A common helper to trigger the run of a previously
* initialized iterator.
+ * @protocol_msg_check: A common helper to check is a specific protocol message
+ * is supported.
* @fastchannel_init: A common helper used to initialize FC descriptors by
* gathering FC descriptions from the SCMI platform server.
* @fastchannel_db_ring: A common helper to ring a FC doorbell.
@@ -264,11 +268,14 @@ struct scmi_proto_helpers_ops {
unsigned int max_resources, u8 msg_id,
size_t tx_size, void *priv);
int (*iter_response_run)(void *iter);
+ int (*protocol_msg_check)(const struct scmi_protocol_handle *ph,
+ u32 message_id, u32 *attributes);
void (*fastchannel_init)(const struct scmi_protocol_handle *ph,
u8 describe_id, u32 message_id,
u32 valid_size, u32 domain,
void __iomem **p_addr,
- struct scmi_fc_db_info **p_db);
+ struct scmi_fc_db_info **p_db,
+ u32 *rate_limit);
void (*fastchannel_db_ring)(struct scmi_fc_db_info *db);
};
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index 19970d9f9e36..1b318316535e 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -67,6 +67,7 @@ struct reset_dom_info {
struct scmi_reset_info {
u32 version;
int num_domains;
+ bool notify_reset_cmd;
struct reset_dom_info *dom_info;
};
@@ -89,18 +90,24 @@ static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ if (!ret)
+ if (!ph->hops->protocol_msg_check(ph, RESET_NOTIFY, NULL))
+ pi->notify_reset_cmd = true;
+
return ret;
}
static int
scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
- u32 domain, struct reset_dom_info *dom_info,
- u32 version)
+ struct scmi_reset_info *pinfo,
+ u32 domain, u32 version)
{
int ret;
u32 attributes;
struct scmi_xfer *t;
struct scmi_msg_resp_reset_domain_attributes *attr;
+ struct reset_dom_info *dom_info = pinfo->dom_info + domain;
ret = ph->xops->xfer_get_init(ph, RESET_DOMAIN_ATTRIBUTES,
sizeof(domain), sizeof(*attr), &t);
@@ -115,7 +122,9 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
attributes = le32_to_cpu(attr->attributes);
dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
- dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes);
+ if (pinfo->notify_reset_cmd)
+ dom_info->reset_notify =
+ SUPPORTS_NOTIFY_RESET(attributes);
dom_info->latency_us = le32_to_cpu(attr->latency);
if (dom_info->latency_us == U32_MAX)
dom_info->latency_us = 0;
@@ -226,6 +235,20 @@ static const struct scmi_reset_proto_ops reset_proto_ops = {
.deassert = scmi_reset_domain_deassert,
};
+static bool scmi_reset_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ struct reset_dom_info *dom;
+ struct scmi_reset_info *pi = ph->get_priv(ph);
+
+ if (evt_id != SCMI_EVENT_RESET_ISSUED || src_id >= pi->num_domains)
+ return false;
+
+ dom = pi->dom_info + src_id;
+
+ return dom->reset_notify;
+}
+
static int scmi_reset_notify(const struct scmi_protocol_handle *ph,
u32 domain_id, bool enable)
{
@@ -301,6 +324,7 @@ static const struct scmi_event reset_events[] = {
};
static const struct scmi_event_ops reset_event_ops = {
+ .is_notify_supported = scmi_reset_notify_supported,
.get_num_sources = scmi_reset_get_num_sources,
.set_notify_enabled = scmi_reset_set_notify_enabled,
.fill_custom_report = scmi_reset_fill_custom_report,
@@ -339,11 +363,8 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
if (!pinfo->dom_info)
return -ENOMEM;
- for (domain = 0; domain < pinfo->num_domains; domain++) {
- struct reset_dom_info *dom = pinfo->dom_info + domain;
-
- scmi_reset_domain_attributes_get(ph, domain, dom, version);
- }
+ for (domain = 0; domain < pinfo->num_domains; domain++)
+ scmi_reset_domain_attributes_get(ph, pinfo, domain, version);
pinfo->version = version;
return ph->set_priv(ph, pinfo, version);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 311149965370..7fc5535ca34c 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -215,6 +215,8 @@ struct scmi_sensor_update_notify_payld {
struct sensors_info {
u32 version;
+ bool notify_trip_point_cmd;
+ bool notify_continuos_update_cmd;
int num_sensors;
int max_requests;
u64 reg_addr;
@@ -246,6 +248,18 @@ static int scmi_sensor_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ if (!ret) {
+ if (!ph->hops->protocol_msg_check(ph,
+ SENSOR_TRIP_POINT_NOTIFY, NULL))
+ si->notify_trip_point_cmd = true;
+
+ if (!ph->hops->protocol_msg_check(ph,
+ SENSOR_CONTINUOUS_UPDATE_NOTIFY,
+ NULL))
+ si->notify_continuos_update_cmd = true;
+ }
+
return ret;
}
@@ -594,7 +608,8 @@ iter_sens_descr_process_response(const struct scmi_protocol_handle *ph,
* Such bitfields are assumed to be zeroed on non
* relevant fw versions...assuming fw not buggy !
*/
- s->update = SUPPORTS_UPDATE_NOTIFY(attrl);
+ if (si->notify_continuos_update_cmd)
+ s->update = SUPPORTS_UPDATE_NOTIFY(attrl);
s->timestamped = SUPPORTS_TIMESTAMP(attrl);
if (s->timestamped)
s->tstamp_scale = S32_EXT(SENSOR_TSTAMP_EXP(attrl));
@@ -988,6 +1003,25 @@ static const struct scmi_sensor_proto_ops sensor_proto_ops = {
.config_set = scmi_sensor_config_set,
};
+static bool scmi_sensor_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ bool supported = false;
+ const struct scmi_sensor_info *s;
+ struct sensors_info *sinfo = ph->get_priv(ph);
+
+ s = scmi_sensor_info_get(ph, src_id);
+ if (!s)
+ return false;
+
+ if (evt_id == SCMI_EVENT_SENSOR_TRIP_POINT_EVENT)
+ supported = sinfo->notify_trip_point_cmd;
+ else if (evt_id == SCMI_EVENT_SENSOR_UPDATE)
+ supported = s->update;
+
+ return supported;
+}
+
static int scmi_sensor_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
@@ -1099,6 +1133,7 @@ static const struct scmi_event sensor_events[] = {
};
static const struct scmi_event_ops sensor_event_ops = {
+ .is_notify_supported = scmi_sensor_notify_supported,
.get_num_sources = scmi_sensor_get_num_sources,
.set_notify_enabled = scmi_sensor_set_notify_enabled,
.fill_custom_report = scmi_sensor_fill_custom_report,
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 7611e9665038..39936e1dd30e 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -214,6 +214,13 @@ static int smc_chan_free(int id, void *p, void *data)
struct scmi_chan_info *cinfo = p;
struct scmi_smc *scmi_info = cinfo->transport_info;
+ /*
+ * Different protocols might share the same chan info, so a previous
+ * smc_chan_free call might have already freed the structure.
+ */
+ if (!scmi_info)
+ return 0;
+
/* Ignore any possible further reception on the IRQ path */
if (scmi_info->irq > 0)
free_irq(scmi_info->irq, scmi_info);
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
index 1621da97bcbb..b6358c155f7f 100644
--- a/drivers/firmware/arm_scmi/system.c
+++ b/drivers/firmware/arm_scmi/system.c
@@ -36,8 +36,20 @@ struct scmi_system_power_state_notifier_payld {
struct scmi_system_info {
u32 version;
bool graceful_timeout_supported;
+ bool power_state_notify_cmd;
};
+static bool scmi_system_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ struct scmi_system_info *pinfo = ph->get_priv(ph);
+
+ if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER)
+ return false;
+
+ return pinfo->power_state_notify_cmd;
+}
+
static int scmi_system_request_notify(const struct scmi_protocol_handle *ph,
bool enable)
{
@@ -114,6 +126,7 @@ static const struct scmi_event system_events[] = {
};
static const struct scmi_event_ops system_event_ops = {
+ .is_notify_supported = scmi_system_notify_supported,
.set_notify_enabled = scmi_system_set_notify_enabled,
.fill_custom_report = scmi_system_fill_custom_report,
};
@@ -147,6 +160,9 @@ static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph)
if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2)
pinfo->graceful_timeout_supported = true;
+ if (!ph->hops->protocol_msg_check(ph, SYSTEM_POWER_STATE_NOTIFY, NULL))
+ pinfo->power_state_notify_cmd = true;
+
return ph->set_priv(ph, pinfo, version);
}
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 79d4254d1f9b..9f3d665cfdcf 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -522,7 +522,7 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
{
cs_dsp_debugfs_clear(dsp);
debugfs_remove_recursive(dsp->debugfs_root);
- dsp->debugfs_root = NULL;
+ dsp->debugfs_root = ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
#else
@@ -2246,6 +2246,11 @@ static int cs_dsp_common_init(struct cs_dsp *dsp)
mutex_init(&dsp->pwr_lock);
+#ifdef CONFIG_DEBUG_FS
+ /* Ensure this is invalid if client never provides a debugfs root */
+ dsp->debugfs_root = ERR_PTR(-ENODEV);
+#endif
+
return 0;
}
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 3e8d4b51a814..97bafb5f7038 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -292,7 +292,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
- cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+ cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
if (!cap_info->phys) {
kfree(cap_info->pages);
kfree(cap_info);
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 9b3884ff81e6..7d2cdd9e2227 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -445,8 +445,8 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
printk("%saer_uncor_severity: 0x%08x\n",
pfx, aer->uncor_severity);
printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
- aer->header_log.dw0, aer->header_log.dw1,
- aer->header_log.dw2, aer->header_log.dw3);
+ aer->header_log.dw[0], aer->header_log.dw[1],
+ aer->header_log.dw[2], aer->header_log.dw[3]);
}
}
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index e7b9ec6f8a86..833cbb995dd3 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -14,16 +14,43 @@ static unsigned int record_size = 1024;
module_param(record_size, uint, 0444);
MODULE_PARM_DESC(record_size, "size of each pstore UEFI var (in bytes, min/default=1024)");
-static bool efivars_pstore_disable =
- IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
-
-module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
-
#define PSTORE_EFI_ATTRIBUTES \
(EFI_VARIABLE_NON_VOLATILE | \
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
EFI_VARIABLE_RUNTIME_ACCESS)
+static bool pstore_disable = IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
+
+static int efivars_pstore_init(void);
+static void efivars_pstore_exit(void);
+
+static int efi_pstore_disable_set(const char *val, const struct kernel_param *kp)
+{
+ int err;
+ bool old_pstore_disable = pstore_disable;
+
+ err = param_set_bool(val, kp);
+ if (err)
+ return err;
+
+ if (old_pstore_disable != pstore_disable) {
+ if (pstore_disable)
+ efivars_pstore_exit();
+ else
+ efivars_pstore_init();
+ }
+
+ return 0;
+}
+
+static const struct kernel_param_ops pstore_disable_ops = {
+ .set = efi_pstore_disable_set,
+ .get = param_get_bool,
+};
+
+module_param_cb(pstore_disable, &pstore_disable_ops, &pstore_disable, 0644);
+__MODULE_PARM_TYPE(pstore_disable, "bool");
+
static int efi_pstore_open(struct pstore_info *psi)
{
int err;
@@ -218,12 +245,12 @@ static struct pstore_info efi_pstore_info = {
.erase = efi_pstore_erase,
};
-static __init int efivars_pstore_init(void)
+static int efivars_pstore_init(void)
{
if (!efivar_supports_writes())
return 0;
- if (efivars_pstore_disable)
+ if (pstore_disable)
return 0;
/*
@@ -250,7 +277,7 @@ static __init int efivars_pstore_init(void)
return 0;
}
-static __exit void efivars_pstore_exit(void)
+static void efivars_pstore_exit(void)
{
if (!efi_pstore_info.bufsize)
return;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 4fcda50acfa4..8859fb0b006d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -597,7 +597,8 @@ static const efi_config_table_type_t common_tables[] __initconst = {
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
{LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
{LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
- {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" },
+ {EFI_TCG2_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "TPMFinalLog" },
+ {EFI_CC_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "CCFinalLog" },
{LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
{LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
{EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index bfa30625f5d0..de659f6a815f 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -11,6 +11,7 @@
#include <linux/efi.h>
#include <linux/kernel.h>
+#include <linux/overflow.h>
#include <asm/efi.h>
#include <asm/setup.h>
@@ -24,6 +25,8 @@ static bool efi_noinitrd;
static bool efi_nosoftreserve;
static bool efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
+int efi_mem_encrypt;
+
bool __pure __efi_soft_reserve_enabled(void)
{
return !efi_nosoftreserve;
@@ -75,6 +78,12 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_noinitrd = true;
} else if (IS_ENABLED(CONFIG_X86_64) && !strcmp(param, "no5lvl")) {
efi_no5lvl = true;
+ } else if (IS_ENABLED(CONFIG_ARCH_HAS_MEM_ENCRYPT) &&
+ !strcmp(param, "mem_encrypt") && val) {
+ if (parse_option_str(val, "on"))
+ efi_mem_encrypt = 1;
+ else if (parse_option_str(val, "off"))
+ efi_mem_encrypt = -1;
} else if (!strcmp(param, "efi") && val) {
efi_nochunk = parse_option_str(val, "nochunk");
efi_novamap |= parse_option_str(val, "novamap");
@@ -193,7 +202,7 @@ void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_si
*load_options_size = load_option_unpacked.optional_data_size;
}
-enum efistub_event {
+enum efistub_event_type {
EFISTUB_EVT_INITRD,
EFISTUB_EVT_LOAD_OPTIONS,
EFISTUB_EVT_COUNT,
@@ -219,54 +228,95 @@ static const struct {
},
};
+static_assert(sizeof(efi_tcg2_event_t) == sizeof(efi_cc_event_t));
+
+union efistub_event {
+ efi_tcg2_event_t tcg2_data;
+ efi_cc_event_t cc_data;
+};
+
+struct efistub_measured_event {
+ union efistub_event event_data;
+ TCG_PCClientTaggedEvent tagged_event __packed;
+};
+
static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
unsigned long load_size,
- enum efistub_event event)
+ enum efistub_event_type event)
{
+ union {
+ efi_status_t
+ (__efiapi *hash_log_extend_event)(void *, u64, efi_physical_addr_t,
+ u64, const union efistub_event *);
+ struct { u32 hash_log_extend_event; } mixed_mode;
+ } method;
+ struct efistub_measured_event *evt;
+ int size = struct_size(evt, tagged_event.tagged_event_data,
+ events[event].event_data_len);
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
efi_tcg2_protocol_t *tcg2 = NULL;
+ union efistub_event ev;
efi_status_t status;
+ void *protocol;
efi_bs_call(locate_protocol, &tcg2_guid, NULL, (void **)&tcg2);
if (tcg2) {
- struct efi_measured_event {
- efi_tcg2_event_t event_data;
- efi_tcg2_tagged_event_t tagged_event;
- u8 tagged_event_data[];
- } *evt;
- int size = sizeof(*evt) + events[event].event_data_len;
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&evt);
- if (status != EFI_SUCCESS)
- goto fail;
-
- evt->event_data = (struct efi_tcg2_event){
+ ev.tcg2_data = (struct efi_tcg2_event){
.event_size = size,
- .event_header.header_size = sizeof(evt->event_data.event_header),
+ .event_header.header_size = sizeof(ev.tcg2_data.event_header),
.event_header.header_version = EFI_TCG2_EVENT_HEADER_VERSION,
.event_header.pcr_index = events[event].pcr_index,
.event_header.event_type = EV_EVENT_TAG,
};
+ protocol = tcg2;
+ method.hash_log_extend_event =
+ (void *)efi_table_attr(tcg2, hash_log_extend_event);
+ } else {
+ efi_guid_t cc_guid = EFI_CC_MEASUREMENT_PROTOCOL_GUID;
+ efi_cc_protocol_t *cc = NULL;
- evt->tagged_event = (struct efi_tcg2_tagged_event){
- .tagged_event_id = events[event].event_id,
- .tagged_event_data_size = events[event].event_data_len,
- };
-
- memcpy(evt->tagged_event_data, events[event].event_data,
- events[event].event_data_len);
+ efi_bs_call(locate_protocol, &cc_guid, NULL, (void **)&cc);
+ if (!cc)
+ return EFI_UNSUPPORTED;
- status = efi_call_proto(tcg2, hash_log_extend_event, 0,
- load_addr, load_size, &evt->event_data);
- efi_bs_call(free_pool, evt);
+ ev.cc_data = (struct efi_cc_event){
+ .event_size = size,
+ .event_header.header_size = sizeof(ev.cc_data.event_header),
+ .event_header.header_version = EFI_CC_EVENT_HEADER_VERSION,
+ .event_header.event_type = EV_EVENT_TAG,
+ };
+ status = efi_call_proto(cc, map_pcr_to_mr_index,
+ events[event].pcr_index,
+ &ev.cc_data.event_header.mr_index);
if (status != EFI_SUCCESS)
goto fail;
- return EFI_SUCCESS;
+
+ protocol = cc;
+ method.hash_log_extend_event =
+ (void *)efi_table_attr(cc, hash_log_extend_event);
}
- return EFI_UNSUPPORTED;
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, (void **)&evt);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ *evt = (struct efistub_measured_event) {
+ .event_data = ev,
+ .tagged_event.tagged_event_id = events[event].event_id,
+ .tagged_event.tagged_event_data_size = events[event].event_data_len,
+ };
+
+ memcpy(evt->tagged_event.tagged_event_data, events[event].event_data,
+ events[event].event_data_len);
+
+ status = efi_fn_call(&method, hash_log_extend_event, protocol, 0,
+ load_addr, load_size, &evt->event_data);
+ efi_bs_call(free_pool, evt);
+
+ if (status == EFI_SUCCESS)
+ return EFI_SUCCESS;
+
fail:
efi_warn("Failed to measure data for event %d: 0x%lx\n", event, status);
return status;
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index f9c1e8a2bd1d..958a680e0660 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -167,7 +167,7 @@ efi_status_t efi_stub_common(efi_handle_t handle,
si = setup_graphics();
- efi_retrieve_tpm2_eventlog();
+ efi_retrieve_eventlog();
/* Ask the firmware to clear memory on unclean shutdown */
efi_enable_reset_attack_mitigation();
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index c04b82ea40f2..27abb4ce0291 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -37,8 +37,8 @@ extern bool efi_no5lvl;
extern bool efi_nochunk;
extern bool efi_nokaslr;
extern int efi_loglevel;
+extern int efi_mem_encrypt;
extern bool efi_novamap;
-
extern const efi_system_table_t *efi_system_table;
typedef union efi_dxe_services_table efi_dxe_services_table_t;
@@ -843,14 +843,14 @@ struct efi_tcg2_event {
/* u8[] event follows here */
} __packed;
-struct efi_tcg2_tagged_event {
- u32 tagged_event_id;
- u32 tagged_event_data_size;
- /* u8 tagged event data follows here */
-} __packed;
+/* from TCG PC Client Platform Firmware Profile Specification */
+typedef struct tdTCG_PCClientTaggedEvent {
+ u32 tagged_event_id;
+ u32 tagged_event_data_size;
+ u8 tagged_event_data[];
+} TCG_PCClientTaggedEvent;
typedef struct efi_tcg2_event efi_tcg2_event_t;
-typedef struct efi_tcg2_tagged_event efi_tcg2_tagged_event_t;
typedef union efi_tcg2_protocol efi_tcg2_protocol_t;
union efi_tcg2_protocol {
@@ -882,6 +882,87 @@ union efi_tcg2_protocol {
} mixed_mode;
};
+typedef struct {
+ u8 major;
+ u8 minor;
+} efi_cc_version_t;
+
+typedef struct {
+ u8 type;
+ u8 sub_type;
+} efi_cc_type_t;
+
+/* EFI CC type/subtype defines */
+#define EFI_CC_TYPE_NONE 0
+#define EFI_CC_TYPE_AMD_SEV 1
+#define EFI_CC_TYPE_INTEL_TDX 2
+
+typedef u32 efi_cc_mr_index_t;
+
+struct efi_cc_event {
+ u32 event_size;
+ struct {
+ u32 header_size;
+ u16 header_version;
+ u32 mr_index;
+ u32 event_type;
+ } __packed event_header;
+ /* u8[] event follows here */
+} __packed;
+
+typedef struct efi_cc_event efi_cc_event_t;
+
+typedef u32 efi_cc_event_log_bitmap_t;
+typedef u32 efi_cc_event_log_format_t;
+typedef u32 efi_cc_event_algorithm_bitmap_t;
+
+typedef struct {
+ u8 size;
+ efi_cc_version_t structure_version;
+ efi_cc_version_t protocol_version;
+ efi_cc_event_algorithm_bitmap_t hash_algorithm_bitmap;
+ efi_cc_event_log_bitmap_t supported_event_logs;
+ efi_cc_type_t cc_type;
+} efi_cc_boot_service_cap_t;
+
+#define EFI_CC_EVENT_HEADER_VERSION 1
+
+#define EFI_CC_BOOT_HASH_ALG_SHA384 0x00000004
+
+#define EFI_CC_EVENT_LOG_FORMAT_TCG_2 0x00000002
+
+typedef union efi_cc_protocol efi_cc_protocol_t;
+
+union efi_cc_protocol {
+ struct {
+ efi_status_t
+ (__efiapi *get_capability)(efi_cc_protocol_t *,
+ efi_cc_boot_service_cap_t *);
+
+ efi_status_t
+ (__efiapi *get_event_log)(efi_cc_protocol_t *,
+ efi_cc_event_log_format_t,
+ efi_physical_addr_t *,
+ efi_physical_addr_t *,
+ efi_bool_t *);
+
+ efi_status_t
+ (__efiapi *hash_log_extend_event)(efi_cc_protocol_t *, u64,
+ efi_physical_addr_t, u64,
+ const efi_cc_event_t *);
+
+ efi_status_t
+ (__efiapi *map_pcr_to_mr_index)(efi_cc_protocol_t *, u32,
+ efi_cc_mr_index_t *);
+ };
+ struct {
+ u32 get_capability;
+ u32 get_event_log;
+ u32 hash_log_extend_event;
+ u32 map_pcr_to_mr_index;
+ } mixed_mode;
+};
+
struct riscv_efi_boot_protocol {
u64 revision;
@@ -1061,7 +1142,7 @@ static inline void
efi_enable_reset_attack_mitigation(void) { }
#endif
-void efi_retrieve_tpm2_eventlog(void);
+void efi_retrieve_eventlog(void);
struct screen_info *alloc_screen_info(void);
struct screen_info *__alloc_screen_info(void);
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index 7acbac16eae0..df3182f2e63a 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -47,39 +47,18 @@ void efi_enable_reset_attack_mitigation(void)
#endif
-void efi_retrieve_tpm2_eventlog(void)
+static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_location,
+ efi_physical_addr_t log_last_entry,
+ efi_bool_t truncated,
+ struct efi_tcg2_final_events_table *final_events_table)
{
- efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
efi_status_t status;
- efi_physical_addr_t log_location = 0, log_last_entry = 0;
struct linux_efi_tpm_eventlog *log_tbl = NULL;
- struct efi_tcg2_final_events_table *final_events_table = NULL;
unsigned long first_entry_addr, last_entry_addr;
size_t log_size, last_entry_size;
- efi_bool_t truncated;
- int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
- efi_tcg2_protocol_t *tcg2_protocol = NULL;
int final_events_size = 0;
- status = efi_bs_call(locate_protocol, &tcg2_guid, NULL,
- (void **)&tcg2_protocol);
- if (status != EFI_SUCCESS)
- return;
-
- status = efi_call_proto(tcg2_protocol, get_event_log, version,
- &log_location, &log_last_entry, &truncated);
-
- if (status != EFI_SUCCESS || !log_location) {
- version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
- status = efi_call_proto(tcg2_protocol, get_event_log, version,
- &log_location, &log_last_entry,
- &truncated);
- if (status != EFI_SUCCESS || !log_location)
- return;
-
- }
-
first_entry_addr = (unsigned long) log_location;
/*
@@ -93,8 +72,10 @@ void efi_retrieve_tpm2_eventlog(void)
* get_event_log only returns the address of the last entry.
* We need to calculate its size to deduce the full size of
* the logs.
+ *
+ * CC Event log also uses TCG2 format, handle it same as TPM2.
*/
- if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+ if (version > EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2) {
/*
* The TCG2 log format has variable length entries,
* and the information to decode the hash algorithms
@@ -127,8 +108,6 @@ void efi_retrieve_tpm2_eventlog(void)
* Figure out whether any events have already been logged to the
* final events structure, and if so how much space they take up
*/
- if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
- final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID);
if (final_events_table && final_events_table->nr_events) {
struct tcg_pcr_event2_head *header;
int offset;
@@ -165,3 +144,50 @@ void efi_retrieve_tpm2_eventlog(void)
err_free:
efi_bs_call(free_pool, log_tbl);
}
+
+void efi_retrieve_eventlog(void)
+{
+ struct efi_tcg2_final_events_table *final_events_table = NULL;
+ efi_physical_addr_t log_location = 0, log_last_entry = 0;
+ efi_guid_t tpm2_guid = EFI_TCG2_PROTOCOL_GUID;
+ int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ efi_tcg2_protocol_t *tpm2 = NULL;
+ efi_bool_t truncated;
+ efi_status_t status;
+
+ status = efi_bs_call(locate_protocol, &tpm2_guid, NULL, (void **)&tpm2);
+ if (status == EFI_SUCCESS) {
+ status = efi_call_proto(tpm2, get_event_log, version, &log_location,
+ &log_last_entry, &truncated);
+
+ if (status != EFI_SUCCESS || !log_location) {
+ version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ status = efi_call_proto(tpm2, get_event_log, version,
+ &log_location, &log_last_entry,
+ &truncated);
+ } else {
+ final_events_table =
+ get_efi_config_table(EFI_TCG2_FINAL_EVENTS_TABLE_GUID);
+ }
+ } else {
+ efi_guid_t cc_guid = EFI_CC_MEASUREMENT_PROTOCOL_GUID;
+ efi_cc_protocol_t *cc = NULL;
+
+ status = efi_bs_call(locate_protocol, &cc_guid, NULL, (void **)&cc);
+ if (status != EFI_SUCCESS)
+ return;
+
+ version = EFI_CC_EVENT_LOG_FORMAT_TCG_2;
+ status = efi_call_proto(cc, get_event_log, version, &log_location,
+ &log_last_entry, &truncated);
+
+ final_events_table =
+ get_efi_config_table(EFI_CC_FINAL_EVENTS_TABLE_GUID);
+ }
+
+ if (status != EFI_SUCCESS || !log_location)
+ return;
+
+ efi_retrieve_tcg2_eventlog(version, log_location, log_last_entry,
+ truncated, final_events_table);
+}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 99429bc4b0c7..4f448d4df7b8 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -236,6 +236,15 @@ efi_status_t efi_adjust_memory_range_protection(unsigned long start,
rounded_end = roundup(start + size, EFI_PAGE_SIZE);
if (memattr != NULL) {
+ status = efi_call_proto(memattr, set_memory_attributes,
+ rounded_start,
+ rounded_end - rounded_start,
+ EFI_MEMORY_RO);
+ if (status != EFI_SUCCESS) {
+ efi_warn("Failed to set EFI_MEMORY_RO attribute\n");
+ return status;
+ }
+
status = efi_call_proto(memattr, clear_memory_attributes,
rounded_start,
rounded_end - rounded_start,
@@ -812,7 +821,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
*kernel_entry = addr + entry;
- return efi_adjust_memory_range_protection(addr, kernel_total_size);
+ return efi_adjust_memory_range_protection(addr, kernel_text_size);
}
static void __noreturn enter_kernel(unsigned long kernel_addr,
@@ -884,6 +893,9 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
}
}
+ if (efi_mem_encrypt > 0)
+ hdr->xloadflags |= XLF_MEM_ENCRYPTION;
+
status = efi_decompress_kernel(&kernel_entry);
if (status != EFI_SUCCESS) {
efi_err("Failed to decompress kernel\n");
@@ -923,7 +935,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
efi_random_get_seed();
- efi_retrieve_tpm2_eventlog();
+ efi_retrieve_eventlog();
setup_graphics(boot_params);
diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c
index 88e587ba1e0d..c2bffdc352a3 100644
--- a/drivers/firmware/google/cbmem.c
+++ b/drivers/firmware/google/cbmem.c
@@ -114,6 +114,12 @@ static int cbmem_entry_probe(struct coreboot_device *dev)
return 0;
}
+static const struct coreboot_device_id cbmem_ids[] = {
+ { .tag = LB_TAG_CBMEM_ENTRY },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(coreboot, cbmem_ids);
+
static struct coreboot_driver cbmem_entry_driver = {
.probe = cbmem_entry_probe,
.drv = {
@@ -121,7 +127,7 @@ static struct coreboot_driver cbmem_entry_driver = {
.owner = THIS_MODULE,
.dev_groups = dev_groups,
},
- .tag = LB_TAG_CBMEM_ENTRY,
+ .id_table = cbmem_ids,
};
module_coreboot_driver(cbmem_entry_driver);
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 2a4469bf1b81..d4b6e581a6c6 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -28,8 +28,17 @@ static int coreboot_bus_match(struct device *dev, struct device_driver *drv)
{
struct coreboot_device *device = CB_DEV(dev);
struct coreboot_driver *driver = CB_DRV(drv);
+ const struct coreboot_device_id *id;
- return device->entry.tag == driver->tag;
+ if (!driver->id_table)
+ return 0;
+
+ for (id = driver->id_table; id->tag; id++) {
+ if (device->entry.tag == id->tag)
+ return 1;
+ }
+
+ return 0;
}
static int coreboot_bus_probe(struct device *dev)
@@ -53,11 +62,20 @@ static void coreboot_bus_remove(struct device *dev)
driver->remove(device);
}
-static struct bus_type coreboot_bus_type = {
+static int coreboot_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ struct coreboot_device *device = CB_DEV(dev);
+ u32 tag = device->entry.tag;
+
+ return add_uevent_var(env, "MODALIAS=coreboot:t%08X", tag);
+}
+
+static const struct bus_type coreboot_bus_type = {
.name = "coreboot",
.match = coreboot_bus_match,
.probe = coreboot_bus_probe,
.remove = coreboot_bus_remove,
+ .uevent = coreboot_bus_uevent,
};
static void coreboot_device_release(struct device *dev)
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
index d814dca33a08..86427989c57f 100644
--- a/drivers/firmware/google/coreboot_table.h
+++ b/drivers/firmware/google/coreboot_table.h
@@ -13,6 +13,7 @@
#define __COREBOOT_TABLE_H
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
/* Coreboot table header structure */
struct coreboot_table_header {
@@ -93,7 +94,7 @@ struct coreboot_driver {
int (*probe)(struct coreboot_device *);
void (*remove)(struct coreboot_device *);
struct device_driver drv;
- u32 tag;
+ const struct coreboot_device_id *id_table;
};
/* Register a driver that uses the data from a coreboot table. */
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index 5c84bbebfef8..07c458bf64ec 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -80,13 +80,19 @@ static void framebuffer_remove(struct coreboot_device *dev)
platform_device_unregister(pdev);
}
+static const struct coreboot_device_id framebuffer_ids[] = {
+ { .tag = CB_TAG_FRAMEBUFFER },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(coreboot, framebuffer_ids);
+
static struct coreboot_driver framebuffer_driver = {
.probe = framebuffer_probe,
.remove = framebuffer_remove,
.drv = {
.name = "framebuffer",
},
- .tag = CB_TAG_FRAMEBUFFER,
+ .id_table = framebuffer_ids,
};
module_coreboot_driver(framebuffer_driver);
diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c
index 74b5286518ee..24c97a70aa80 100644
--- a/drivers/firmware/google/memconsole-coreboot.c
+++ b/drivers/firmware/google/memconsole-coreboot.c
@@ -96,13 +96,19 @@ static void memconsole_remove(struct coreboot_device *dev)
memconsole_exit();
}
+static const struct coreboot_device_id memconsole_ids[] = {
+ { .tag = CB_TAG_CBMEM_CONSOLE },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(coreboot, memconsole_ids);
+
static struct coreboot_driver memconsole_driver = {
.probe = memconsole_probe,
.remove = memconsole_remove,
.drv = {
.name = "memconsole",
},
- .tag = CB_TAG_CBMEM_CONSOLE,
+ .id_table = memconsole_ids,
};
module_coreboot_driver(memconsole_driver);
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index ee6e08c0592b..8e4216714b29 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -306,13 +306,19 @@ static void vpd_remove(struct coreboot_device *dev)
kobject_put(vpd_kobj);
}
+static const struct coreboot_device_id vpd_ids[] = {
+ { .tag = CB_TAG_VPD },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(coreboot, vpd_ids);
+
static struct coreboot_driver vpd_driver = {
.probe = vpd_probe,
.remove = vpd_remove,
.drv = {
.name = "vpd",
},
- .tag = CB_TAG_VPD,
+ .id_table = vpd_ids,
};
module_coreboot_driver(vpd_driver);
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index 81f5f62e34fc..fbeeaee4ac85 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -167,7 +167,7 @@ static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader)
u32 *response_msg;
int ret;
- response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(response_msg),
+ response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
GFP_KERNEL);
if (!response_msg)
return -ENOMEM;
@@ -384,7 +384,8 @@ static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv)
u32 *response_msg;
int ret;
- response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(response_msg),
+ response_msg = devm_kzalloc(priv->dev,
+ AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
GFP_KERNEL);
if (!response_msg)
return -ENOMEM;
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index 3c197db42c9d..880ffcb50088 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/pci.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
@@ -69,24 +70,72 @@ void sysfb_disable(void)
}
EXPORT_SYMBOL_GPL(sysfb_disable);
+#if defined(CONFIG_PCI)
+static __init bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
+{
+ /*
+ * TODO: Try to integrate this code into the PCI subsystem
+ */
+ int ret;
+ u16 command;
+
+ ret = pci_read_config_word(pdev, PCI_COMMAND, &command);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return false;
+ if (!(command & PCI_COMMAND_MEMORY))
+ return false;
+ return true;
+}
+#else
+static __init bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
+{
+ return false;
+}
+#endif
+
+static __init struct device *sysfb_parent_dev(const struct screen_info *si)
+{
+ struct pci_dev *pdev;
+
+ pdev = screen_info_pci_dev(si);
+ if (IS_ERR(pdev)) {
+ return ERR_CAST(pdev);
+ } else if (pdev) {
+ if (!sysfb_pci_dev_is_enabled(pdev))
+ return ERR_PTR(-ENODEV);
+ return &pdev->dev;
+ }
+
+ return NULL;
+}
+
static __init int sysfb_init(void)
{
struct screen_info *si = &screen_info;
+ struct device *parent;
struct simplefb_platform_data mode;
const char *name;
bool compatible;
int ret = 0;
+ screen_info_apply_fixups();
+
mutex_lock(&disable_lock);
if (disabled)
goto unlock_mutex;
sysfb_apply_efi_quirks();
+ parent = sysfb_parent_dev(si);
+ if (IS_ERR(parent)) {
+ ret = PTR_ERR(parent);
+ goto unlock_mutex;
+ }
+
/* try to create a simple-framebuffer device */
compatible = sysfb_parse_mode(si, &mode);
if (compatible) {
- pd = sysfb_create_simplefb(si, &mode);
+ pd = sysfb_create_simplefb(si, &mode, parent);
if (!IS_ERR(pd))
goto unlock_mutex;
}
@@ -109,6 +158,8 @@ static __init int sysfb_init(void)
goto unlock_mutex;
}
+ pd->dev.parent = parent;
+
sysfb_set_efifb_fwnode(pd);
ret = platform_device_add_data(pd, si, sizeof(*si));
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index 74363ed7501f..75a186bf8f8e 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -91,7 +91,8 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
}
__init struct platform_device *sysfb_create_simplefb(const struct screen_info *si,
- const struct simplefb_platform_data *mode)
+ const struct simplefb_platform_data *mode,
+ struct device *parent)
{
struct platform_device *pd;
struct resource res;
@@ -143,6 +144,8 @@ __init struct platform_device *sysfb_create_simplefb(const struct screen_info *s
if (!pd)
return ERR_PTR(-ENOMEM);
+ pd->dev.parent = parent;
+
sysfb_set_efifb_fwnode(pd);
ret = platform_device_add_resources(pd, &res, 1);
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index bbcdd9fed3fb..4221fed70ad4 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -77,7 +77,7 @@ static const char *get_filename(struct tegra_bpmp *bpmp,
root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL);
if (!root_path_buf)
- goto out;
+ return NULL;
root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
root_path_buf_len);
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
index 7cbb3558b844..c0028ae4c5b7 100644
--- a/drivers/fpga/ice40-spi.c
+++ b/drivers/fpga/ice40-spi.c
@@ -66,7 +66,7 @@ static int ice40_fpga_ops_write_init(struct fpga_manager *mgr,
}
/* Lock the bus, assert CRESET_B and SS_B and delay >200ns */
- spi_bus_lock(dev->master);
+ spi_bus_lock(dev->controller);
gpiod_set_value(priv->reset, 1);
@@ -94,7 +94,7 @@ static int ice40_fpga_ops_write_init(struct fpga_manager *mgr,
ret = spi_sync_locked(dev, &message);
fail:
- spi_bus_unlock(dev->master);
+ spi_bus_unlock(dev->controller);
return ret;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 1301cec94f12..b50d0b470849 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -478,6 +478,18 @@ config GPIO_MXS
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
+config GPIO_NOMADIK
+ bool "Nomadik GPIO driver"
+ depends on ARCH_U8500 || ARCH_NOMADIK || MACH_EYEQ5 || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support the Nomadik SoC GPIO block. This block is also
+ used by the Mobileye EyeQ5 SoC.
+
+ It handles up to 32 GPIOs per bank, that can all be interrupt sources.
+ It is deeply interconnected with the associated pinctrl driver as GPIO
+ registers handle muxing ("alternate functions") as well.
+
config GPIO_NPCM_SGPIO
bool "Nuvoton SGPIO support"
depends on ARCH_NPCM || COMPILE_TEST
@@ -711,8 +723,9 @@ config GPIO_UNIPHIER
Say yes here to support UniPhier GPIOs.
config GPIO_VF610
- def_bool y
- depends on ARCH_MXC
+ bool "VF610 GPIO support"
+ default y if SOC_VF610
+ depends on ARCH_MXC || COMPILE_TEST
select GPIOLIB_IRQCHIP
help
Say yes here to support i.MX or Vybrid vf610 GPIOs.
@@ -1240,6 +1253,16 @@ config GPIO_BD9571MWV
This driver can also be built as a module. If so, the module
will be called gpio-bd9571mwv.
+config GPIO_CROS_EC
+ tristate "ChromeOS EC GPIO support"
+ depends on CROS_EC
+ help
+ GPIO driver for the ChromeOS Embedded Controller (EC). GPIOs
+ cannot be set unless the system is unlocked.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio-cros-ec.
+
config GPIO_CRYSTAL_COVE
tristate "GPIO support for Crystal Cove PMIC"
depends on (X86 || COMPILE_TEST) && INTEL_SOC_PMIC
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 9e40af196aae..fdd28c58d890 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
obj-$(CONFIG_GPIO_CADENCE) += gpio-cadence.o
obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o
obj-$(CONFIG_GPIO_SNPS_CREG) += gpio-creg-snps.o
+obj-$(CONFIG_GPIO_CROS_EC) += gpio-cros-ec.o
obj-$(CONFIG_GPIO_CRYSTAL_COVE) += gpio-crystalcove.o
obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
@@ -116,6 +117,7 @@ obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
+obj-$(CONFIG_GPIO_NOMADIK) += gpio-nomadik.o
obj-$(CONFIG_GPIO_NPCM_SGPIO) += gpio-npcm-sgpio.o
obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o
obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index e00c33310517..753e7be039e4 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -127,8 +127,6 @@ static int gen_74x164_probe(struct spi_device *spi)
if (IS_ERR(chip->gpiod_oe))
return PTR_ERR(chip->gpiod_oe);
- gpiod_set_value_cansleep(chip->gpiod_oe, 1);
-
spi_set_drvdata(spi, chip);
chip->gpio_chip.label = spi->modalias;
@@ -153,6 +151,8 @@ static int gen_74x164_probe(struct spi_device *spi)
goto exit_destroy;
}
+ gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+
ret = gpiochip_add_data(&chip->gpio_chip, chip);
if (!ret)
return 0;
diff --git a/drivers/gpio/gpio-cros-ec.c b/drivers/gpio/gpio-cros-ec.c
new file mode 100644
index 000000000000..842e1c060414
--- /dev/null
+++ b/drivers/gpio/gpio-cros-ec.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2024 Google LLC
+ *
+ * This driver provides the ability to control GPIOs on the Chrome OS EC.
+ * There isn't any direction control, and setting values on GPIOs is only
+ * possible when the system is unlocked.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+/* Prefix all names to avoid collisions with EC <-> AP nets */
+static const char cros_ec_gpio_prefix[] = "EC:";
+
+/* Setting gpios is only supported when the system is unlocked */
+static void cros_ec_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ const char *name = gc->names[gpio] + strlen(cros_ec_gpio_prefix);
+ struct cros_ec_device *cros_ec = gpiochip_get_data(gc);
+ struct ec_params_gpio_set params = {
+ .val = val,
+ };
+ int ret;
+ ssize_t copied;
+
+ copied = strscpy(params.name, name, sizeof(params.name));
+ if (copied < 0)
+ return;
+
+ ret = cros_ec_cmd(cros_ec, 0, EC_CMD_GPIO_SET, &params,
+ sizeof(params), NULL, 0);
+ if (ret < 0)
+ dev_err(gc->parent, "error setting gpio%d (%s) on EC: %d\n", gpio, name, ret);
+}
+
+static int cros_ec_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ const char *name = gc->names[gpio] + strlen(cros_ec_gpio_prefix);
+ struct cros_ec_device *cros_ec = gpiochip_get_data(gc);
+ struct ec_params_gpio_get params;
+ struct ec_response_gpio_get response;
+ int ret;
+ ssize_t copied;
+
+ copied = strscpy(params.name, name, sizeof(params.name));
+ if (copied < 0)
+ return -EINVAL;
+
+ ret = cros_ec_cmd(cros_ec, 0, EC_CMD_GPIO_GET, &params,
+ sizeof(params), &response, sizeof(response));
+ if (ret < 0) {
+ dev_err(gc->parent, "error getting gpio%d (%s) on EC: %d\n", gpio, name, ret);
+ return ret;
+ }
+
+ return response.val;
+}
+
+#define CROS_EC_GPIO_INPUT BIT(8)
+#define CROS_EC_GPIO_OUTPUT BIT(9)
+
+static int cros_ec_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
+{
+ const char *name = gc->names[gpio] + strlen(cros_ec_gpio_prefix);
+ struct cros_ec_device *cros_ec = gpiochip_get_data(gc);
+ struct ec_params_gpio_get_v1 params = {
+ .subcmd = EC_GPIO_GET_INFO,
+ .get_info.index = gpio,
+ };
+ struct ec_response_gpio_get_v1 response;
+ int ret;
+
+ ret = cros_ec_cmd(cros_ec, 1, EC_CMD_GPIO_GET, &params,
+ sizeof(params), &response, sizeof(response));
+ if (ret < 0) {
+ dev_err(gc->parent, "error getting direction of gpio%d (%s) on EC: %d\n", gpio, name, ret);
+ return ret;
+ }
+
+ if (response.get_info.flags & CROS_EC_GPIO_INPUT)
+ return GPIO_LINE_DIRECTION_IN;
+
+ if (response.get_info.flags & CROS_EC_GPIO_OUTPUT)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return -EINVAL;
+}
+
+/* Query EC for all gpio line names */
+static int cros_ec_gpio_init_names(struct cros_ec_device *cros_ec, struct gpio_chip *gc)
+{
+ struct ec_params_gpio_get_v1 params = {
+ .subcmd = EC_GPIO_GET_INFO,
+ };
+ struct ec_response_gpio_get_v1 response;
+ int ret, i;
+ /* EC may not NUL terminate */
+ size_t name_len = strlen(cros_ec_gpio_prefix) + sizeof(response.get_info.name) + 1;
+ ssize_t copied;
+ const char **names;
+ char *str;
+
+ names = devm_kcalloc(gc->parent, gc->ngpio, sizeof(*names), GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+ gc->names = names;
+
+ str = devm_kcalloc(gc->parent, gc->ngpio, name_len, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ /* Get gpio line names one at a time */
+ for (i = 0; i < gc->ngpio; i++) {
+ params.get_info.index = i;
+ ret = cros_ec_cmd(cros_ec, 1, EC_CMD_GPIO_GET, &params,
+ sizeof(params), &response, sizeof(response));
+ if (ret < 0) {
+ dev_err_probe(gc->parent, ret, "error getting gpio%d info\n", i);
+ return ret;
+ }
+
+ names[i] = str;
+ copied = scnprintf(str, name_len, "%s%s", cros_ec_gpio_prefix,
+ response.get_info.name);
+ if (copied < 0)
+ return copied;
+
+ str += copied + 1;
+ }
+
+ return 0;
+}
+
+/* Query EC for number of gpios */
+static int cros_ec_gpio_ngpios(struct cros_ec_device *cros_ec)
+{
+ struct ec_params_gpio_get_v1 params = {
+ .subcmd = EC_GPIO_GET_COUNT,
+ };
+ struct ec_response_gpio_get_v1 response;
+ int ret;
+
+ ret = cros_ec_cmd(cros_ec, 1, EC_CMD_GPIO_GET, &params,
+ sizeof(params), &response, sizeof(response));
+ if (ret < 0)
+ return ret;
+
+ return response.get_count.val;
+}
+
+static int cros_ec_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(parent);
+ struct cros_ec_device *cros_ec = ec_dev->ec_dev;
+ struct gpio_chip *gc;
+ int ngpios;
+ int ret;
+
+ /* Use the fwnode from the protocol device, e.g. cros-ec-spi */
+ device_set_node(dev, dev_fwnode(cros_ec->dev));
+
+ ngpios = cros_ec_gpio_ngpios(cros_ec);
+ if (ngpios < 0) {
+ dev_err_probe(dev, ngpios, "error getting gpio count\n");
+ return ngpios;
+ }
+
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
+
+ gc->ngpio = ngpios;
+ gc->parent = dev;
+ ret = cros_ec_gpio_init_names(cros_ec, gc);
+ if (ret)
+ return ret;
+
+ gc->can_sleep = true;
+ gc->label = dev_name(dev);
+ gc->base = -1;
+ gc->set = cros_ec_gpio_set;
+ gc->get = cros_ec_gpio_get;
+ gc->get_direction = cros_ec_gpio_get_direction;
+
+ return devm_gpiochip_add_data(dev, gc, cros_ec);
+}
+
+static struct platform_driver cros_ec_gpio_driver = {
+ .probe = cros_ec_gpio_probe,
+ .driver = {
+ .name = "cros-ec-gpio",
+ },
+};
+module_platform_driver(cros_ec_gpio_driver);
+
+MODULE_DESCRIPTION("ChromeOS EC GPIO Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index 806b88d8dfb7..2dd0e46c42ad 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -108,7 +108,6 @@ static struct sprd_eic *to_sprd_eic(struct notifier_block *nb)
struct sprd_eic_variant_data {
enum sprd_eic_type type;
- u32 num_eics;
};
static const char *sprd_eic_label_name[SPRD_EIC_MAX] = {
@@ -118,22 +117,18 @@ static const char *sprd_eic_label_name[SPRD_EIC_MAX] = {
static const struct sprd_eic_variant_data sc9860_eic_dbnc_data = {
.type = SPRD_EIC_DEBOUNCE,
- .num_eics = 8,
};
static const struct sprd_eic_variant_data sc9860_eic_latch_data = {
.type = SPRD_EIC_LATCH,
- .num_eics = 8,
};
static const struct sprd_eic_variant_data sc9860_eic_async_data = {
.type = SPRD_EIC_ASYNC,
- .num_eics = 8,
};
static const struct sprd_eic_variant_data sc9860_eic_sync_data = {
.type = SPRD_EIC_SYNC,
- .num_eics = 8,
};
static inline void __iomem *sprd_eic_offset_base(struct sprd_eic *sprd_eic,
@@ -619,6 +614,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
struct gpio_irq_chip *irq;
struct sprd_eic *sprd_eic;
struct resource *res;
+ u16 num_banks = 0;
int ret, i;
pdata = of_device_get_match_data(dev);
@@ -652,10 +648,12 @@ static int sprd_eic_probe(struct platform_device *pdev)
sprd_eic->base[i] = devm_ioremap_resource(dev, res);
if (IS_ERR(sprd_eic->base[i]))
return PTR_ERR(sprd_eic->base[i]);
+
+ num_banks++;
}
sprd_eic->chip.label = sprd_eic_label_name[sprd_eic->type];
- sprd_eic->chip.ngpio = pdata->num_eics;
+ sprd_eic->chip.ngpio = num_banks * SPRD_EIC_PER_BANK_NR;
sprd_eic->chip.base = -1;
sprd_eic->chip.parent = dev;
sprd_eic->chip.direction_input = sprd_eic_direction_input;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index a13f3c18ccd4..8cfd3a89c018 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -99,7 +99,6 @@ struct mvebu_pwm {
u32 offset;
unsigned long clk_rate;
struct gpio_desc *gpiod;
- struct pwm_chip chip;
spinlock_t lock;
struct mvebu_gpio_chip *mvchip;
@@ -615,7 +614,7 @@ static const struct regmap_config mvebu_gpio_regmap_config = {
*/
static struct mvebu_pwm *to_mvebu_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct mvebu_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int mvebu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -789,6 +788,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct mvebu_pwm *mvpwm;
+ struct pwm_chip *chip;
void __iomem *base;
u32 offset;
u32 set;
@@ -813,9 +813,11 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
if (IS_ERR(mvchip->clk))
return PTR_ERR(mvchip->clk);
- mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL);
- if (!mvpwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, mvchip->chip.ngpio, sizeof(*mvpwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ mvpwm = to_mvebu_pwm(chip);
+
mvchip->mvpwm = mvpwm;
mvpwm->mvchip = mvchip;
mvpwm->offset = offset;
@@ -868,13 +870,11 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
return -EINVAL;
}
- mvpwm->chip.dev = dev;
- mvpwm->chip.ops = &mvebu_pwm_ops;
- mvpwm->chip.npwm = mvchip->chip.ngpio;
+ chip->ops = &mvebu_pwm_ops;
spin_lock_init(&mvpwm->lock);
- return devm_pwmchip_add(dev, &mvpwm->chip);
+ return devm_pwmchip_add(dev, chip);
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
new file mode 100644
index 000000000000..836f1cc760c2
--- /dev/null
+++ b/drivers/gpio/gpio-nomadik.c
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GPIO driver for the IP block found in the Nomadik SoC; it is an AMBA device,
+ * managing 32 pins with alternate functions. It can also handle the STA2X11
+ * block from ST.
+ *
+ * The GPIO chips are shared with pinctrl-nomadik if used; it needs access for
+ * pinmuxing functionality and others.
+ *
+ * This driver also handles the mobileye,eyeq5-gpio compatible. It is an STA2X11
+ * but with only data, direction and interrupts register active. We want to
+ * avoid touching SLPM, RWIMSC, FWIMSC, AFSLA and AFSLB registers; that is,
+ * wake and alternate function registers. It is NOT compatible with
+ * pinctrl-nomadik.
+ *
+ * Copyright (C) 2008,2009 STMicroelectronics
+ * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
+ * Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
+ * Copyright (C) 2011-2013 Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/reset.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/gpio/gpio-nomadik.h>
+
+#ifndef CONFIG_PINCTRL_NOMADIK
+static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
+#endif
+
+void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip, unsigned int offset,
+ enum nmk_gpio_slpm mode)
+{
+ u32 slpm;
+
+ /* We should NOT have been called. */
+ if (WARN_ON(nmk_chip->is_mobileye_soc))
+ return;
+
+ slpm = readl(nmk_chip->addr + NMK_GPIO_SLPC);
+ if (mode == NMK_GPIO_SLPM_NOCHANGE)
+ slpm |= BIT(offset);
+ else
+ slpm &= ~BIT(offset);
+ writel(slpm, nmk_chip->addr + NMK_GPIO_SLPC);
+}
+
+static void __nmk_gpio_set_output(struct nmk_gpio_chip *nmk_chip,
+ unsigned int offset, int val)
+{
+ if (val)
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATS);
+ else
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATC);
+}
+
+void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
+ unsigned int offset, int val)
+{
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRS);
+ __nmk_gpio_set_output(nmk_chip, offset, val);
+}
+
+/* IRQ functions */
+
+static void nmk_gpio_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ clk_enable(nmk_chip->clk);
+ writel(BIT(d->hwirq), nmk_chip->addr + NMK_GPIO_IC);
+ clk_disable(nmk_chip->clk);
+}
+
+enum nmk_gpio_irq_type {
+ NORMAL,
+ WAKE,
+};
+
+static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
+ int offset, enum nmk_gpio_irq_type which,
+ bool enable)
+{
+ u32 *rimscval;
+ u32 *fimscval;
+ u32 rimscreg;
+ u32 fimscreg;
+
+ if (which == NORMAL) {
+ rimscreg = NMK_GPIO_RIMSC;
+ fimscreg = NMK_GPIO_FIMSC;
+ rimscval = &nmk_chip->rimsc;
+ fimscval = &nmk_chip->fimsc;
+ } else {
+ /* We should NOT have been called. */
+ if (WARN_ON(nmk_chip->is_mobileye_soc))
+ return;
+ rimscreg = NMK_GPIO_RWIMSC;
+ fimscreg = NMK_GPIO_FWIMSC;
+ rimscval = &nmk_chip->rwimsc;
+ fimscval = &nmk_chip->fwimsc;
+ }
+
+ /* we must individually set/clear the two edges */
+ if (nmk_chip->edge_rising & BIT(offset)) {
+ if (enable)
+ *rimscval |= BIT(offset);
+ else
+ *rimscval &= ~BIT(offset);
+ writel(*rimscval, nmk_chip->addr + rimscreg);
+ }
+ if (nmk_chip->edge_falling & BIT(offset)) {
+ if (enable)
+ *fimscval |= BIT(offset);
+ else
+ *fimscval &= ~BIT(offset);
+ writel(*fimscval, nmk_chip->addr + fimscreg);
+ }
+}
+
+static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
+ int offset, bool on)
+{
+ /* We should NOT have been called. */
+ if (WARN_ON(nmk_chip->is_mobileye_soc))
+ return;
+
+ /*
+ * Ensure WAKEUP_ENABLE is on. No need to disable it if wakeup is
+ * disabled, since setting SLPM to 1 increases power consumption, and
+ * wakeup is anyhow controlled by the RIMSC and FIMSC registers.
+ */
+ if (nmk_chip->sleepmode && on) {
+ __nmk_gpio_set_slpm(nmk_chip, offset,
+ NMK_GPIO_SLPM_WAKEUP_ENABLE);
+ }
+
+ __nmk_gpio_irq_modify(nmk_chip, offset, WAKE, on);
+}
+
+static void nmk_gpio_irq_maskunmask(struct nmk_gpio_chip *nmk_chip,
+ struct irq_data *d, bool enable)
+{
+ unsigned long flags;
+
+ clk_enable(nmk_chip->clk);
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+ spin_lock(&nmk_chip->lock);
+
+ __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, enable);
+
+ if (!nmk_chip->is_mobileye_soc && !(nmk_chip->real_wake & BIT(d->hwirq)))
+ __nmk_gpio_set_wake(nmk_chip, d->hwirq, enable);
+
+ spin_unlock(&nmk_chip->lock);
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+ clk_disable(nmk_chip->clk);
+}
+
+static void nmk_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ nmk_gpio_irq_maskunmask(nmk_chip, d, false);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
+}
+
+static void nmk_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
+ nmk_gpio_irq_maskunmask(nmk_chip, d, true);
+}
+
+static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ /* Handler is registered in all cases. */
+ if (nmk_chip->is_mobileye_soc)
+ return -ENXIO;
+
+ clk_enable(nmk_chip->clk);
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+ spin_lock(&nmk_chip->lock);
+
+ if (irqd_irq_disabled(d))
+ __nmk_gpio_set_wake(nmk_chip, d->hwirq, on);
+
+ if (on)
+ nmk_chip->real_wake |= BIT(d->hwirq);
+ else
+ nmk_chip->real_wake &= ~BIT(d->hwirq);
+
+ spin_unlock(&nmk_chip->lock);
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+ clk_disable(nmk_chip->clk);
+
+ return 0;
+}
+
+static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+ bool enabled = !irqd_irq_disabled(d);
+ bool wake = irqd_is_wakeup_set(d);
+ unsigned long flags;
+
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ return -EINVAL;
+
+ clk_enable(nmk_chip->clk);
+ spin_lock_irqsave(&nmk_chip->lock, flags);
+
+ if (enabled)
+ __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, false);
+
+ if (!nmk_chip->is_mobileye_soc && (enabled || wake))
+ __nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, false);
+
+ nmk_chip->edge_rising &= ~BIT(d->hwirq);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ nmk_chip->edge_rising |= BIT(d->hwirq);
+
+ nmk_chip->edge_falling &= ~BIT(d->hwirq);
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ nmk_chip->edge_falling |= BIT(d->hwirq);
+
+ if (enabled)
+ __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, true);
+
+ if (!nmk_chip->is_mobileye_soc && (enabled || wake))
+ __nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, true);
+
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
+ clk_disable(nmk_chip->clk);
+
+ return 0;
+}
+
+static unsigned int nmk_gpio_irq_startup(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ clk_enable(nmk_chip->clk);
+ nmk_gpio_irq_unmask(d);
+ return 0;
+}
+
+static void nmk_gpio_irq_shutdown(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ nmk_gpio_irq_mask(d);
+ clk_disable(nmk_chip->clk);
+}
+
+static irqreturn_t nmk_gpio_irq_handler(int irq, void *dev_id)
+{
+ struct nmk_gpio_chip *nmk_chip = dev_id;
+ struct gpio_chip *chip = &nmk_chip->chip;
+ unsigned long mask = GENMASK(chip->ngpio - 1, 0);
+ unsigned long status;
+ int bit;
+
+ clk_enable(nmk_chip->clk);
+
+ status = readl(nmk_chip->addr + NMK_GPIO_IS);
+
+ /* Ensure we cannot leave pending bits; this should never occur. */
+ if (unlikely(status & ~mask))
+ writel(status & ~mask, nmk_chip->addr + NMK_GPIO_IC);
+
+ clk_disable(nmk_chip->clk);
+
+ for_each_set_bit(bit, &status, chip->ngpio)
+ generic_handle_domain_irq_safe(chip->irq.domain, bit);
+
+ return IRQ_RETVAL((status & mask) != 0);
+}
+
+/* I/O Functions */
+
+static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned int offset)
+{
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+ int dir;
+
+ clk_enable(nmk_chip->clk);
+
+ dir = readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset);
+
+ clk_disable(nmk_chip->clk);
+
+ if (dir)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+
+ clk_enable(nmk_chip->clk);
+
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRC);
+
+ clk_disable(nmk_chip->clk);
+
+ return 0;
+}
+
+static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+ int value;
+
+ clk_enable(nmk_chip->clk);
+
+ value = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
+
+ clk_disable(nmk_chip->clk);
+
+ return value;
+}
+
+static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+
+ clk_enable(nmk_chip->clk);
+
+ __nmk_gpio_set_output(nmk_chip, offset, val);
+
+ clk_disable(nmk_chip->clk);
+}
+
+static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+
+ clk_enable(nmk_chip->clk);
+
+ __nmk_gpio_make_output(nmk_chip, offset, val);
+
+ clk_disable(nmk_chip->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int nmk_gpio_get_mode(struct nmk_gpio_chip *nmk_chip, int offset)
+{
+ u32 afunc, bfunc;
+
+ /* We don't support modes. */
+ if (nmk_chip->is_mobileye_soc)
+ return NMK_GPIO_ALT_GPIO;
+
+ clk_enable(nmk_chip->clk);
+
+ afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & BIT(offset);
+ bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & BIT(offset);
+
+ clk_disable(nmk_chip->clk);
+
+ return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
+}
+
+void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip, unsigned int offset,
+ unsigned int gpio)
+{
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+ int mode;
+ bool is_out;
+ bool data_out;
+ bool pull;
+ static const char * const modes[] = {
+ [NMK_GPIO_ALT_GPIO] = "gpio",
+ [NMK_GPIO_ALT_A] = "altA",
+ [NMK_GPIO_ALT_B] = "altB",
+ [NMK_GPIO_ALT_C] = "altC",
+ [NMK_GPIO_ALT_C + 1] = "altC1",
+ [NMK_GPIO_ALT_C + 2] = "altC2",
+ [NMK_GPIO_ALT_C + 3] = "altC3",
+ [NMK_GPIO_ALT_C + 4] = "altC4",
+ };
+
+ char *label = gpiochip_dup_line_label(chip, offset);
+ if (IS_ERR(label))
+ return;
+
+ clk_enable(nmk_chip->clk);
+ is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+ pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & BIT(offset));
+ data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
+ mode = nmk_gpio_get_mode(nmk_chip, offset);
+#ifdef CONFIG_PINCTRL_NOMADIK
+ if (mode == NMK_GPIO_ALT_C && pctldev)
+ mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio);
+#endif
+
+ if (is_out) {
+ seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s",
+ gpio,
+ label ?: "(none)",
+ data_out ? "hi" : "lo",
+ (mode < 0) ? "unknown" : modes[mode]);
+ } else {
+ int irq = chip->to_irq(chip, offset);
+ const int pullidx = pull ? 1 : 0;
+ int val;
+ static const char * const pulls[] = {
+ "none ",
+ "pull enabled",
+ };
+
+ seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
+ gpio,
+ label ?: "(none)",
+ pulls[pullidx],
+ (mode < 0) ? "unknown" : modes[mode]);
+
+ val = nmk_gpio_get_input(chip, offset);
+ seq_printf(s, " VAL %d", val);
+
+ /*
+ * This races with request_irq(), set_irq_type(),
+ * and set_irq_wake() ... but those are "rare".
+ */
+ if (irq > 0 && irq_has_action(irq)) {
+ char *trigger;
+ bool wake;
+
+ if (nmk_chip->edge_rising & BIT(offset))
+ trigger = "edge-rising";
+ else if (nmk_chip->edge_falling & BIT(offset))
+ trigger = "edge-falling";
+ else
+ trigger = "edge-undefined";
+
+ wake = !!(nmk_chip->real_wake & BIT(offset));
+
+ seq_printf(s, " irq-%d %s%s",
+ irq, trigger, wake ? " wakeup" : "");
+ }
+ }
+ clk_disable(nmk_chip->clk);
+}
+
+static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ unsigned int i, gpio = chip->base;
+
+ for (i = 0; i < chip->ngpio; i++, gpio++) {
+ nmk_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+ seq_puts(s, "\n");
+ }
+}
+
+#else
+
+#define nmk_gpio_dbg_show NULL
+
+#endif
+
+/*
+ * We will allocate memory for the state container using devm* allocators
+ * binding to the first device reaching this point, it doesn't matter if
+ * it is the pin controller or GPIO driver. However we need to use the right
+ * platform device when looking up resources so pay attention to pdev.
+ */
+struct nmk_gpio_chip *nmk_gpio_populate_chip(struct fwnode_handle *fwnode,
+ struct platform_device *pdev)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ struct platform_device *gpio_pdev;
+ struct device *dev = &pdev->dev;
+ struct reset_control *reset;
+ struct device *gpio_dev;
+ struct gpio_chip *chip;
+ struct resource *res;
+ struct clk *clk;
+ void __iomem *base;
+ u32 id, ngpio;
+ int ret;
+
+ gpio_dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
+ if (!gpio_dev) {
+ dev_err(dev, "populate \"%pfwP\": device not found\n", fwnode);
+ return ERR_PTR(-ENODEV);
+ }
+ gpio_pdev = to_platform_device(gpio_dev);
+
+ if (device_property_read_u32(gpio_dev, "gpio-bank", &id)) {
+ dev_err(dev, "populate: gpio-bank property not found\n");
+ platform_device_put(gpio_pdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+#ifdef CONFIG_PINCTRL_NOMADIK
+ if (id >= ARRAY_SIZE(nmk_gpio_chips)) {
+ dev_err(dev, "populate: invalid id: %u\n", id);
+ platform_device_put(gpio_pdev);
+ return ERR_PTR(-EINVAL);
+ }
+ /* Already populated? */
+ nmk_chip = nmk_gpio_chips[id];
+ if (nmk_chip) {
+ platform_device_put(gpio_pdev);
+ return nmk_chip;
+ }
+#endif
+
+ nmk_chip = devm_kzalloc(dev, sizeof(*nmk_chip), GFP_KERNEL);
+ if (!nmk_chip) {
+ platform_device_put(gpio_pdev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (device_property_read_u32(gpio_dev, "ngpios", &ngpio)) {
+ ngpio = NMK_GPIO_PER_CHIP;
+ dev_dbg(dev, "populate: using default ngpio (%u)\n", ngpio);
+ }
+
+ nmk_chip->is_mobileye_soc = device_is_compatible(gpio_dev,
+ "mobileye,eyeq5-gpio");
+ nmk_chip->bank = id;
+ chip = &nmk_chip->chip;
+ chip->base = -1;
+ chip->ngpio = ngpio;
+ chip->label = dev_name(gpio_dev);
+ chip->parent = gpio_dev;
+
+ /* NOTE: different devices! No devm_platform_ioremap_resource() here! */
+ res = platform_get_resource(gpio_pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ platform_device_put(gpio_pdev);
+ return ERR_CAST(base);
+ }
+ nmk_chip->addr = base;
+
+ /* NOTE: do not use devm_ here! */
+ clk = clk_get_optional(gpio_dev, NULL);
+ if (IS_ERR(clk)) {
+ platform_device_put(gpio_pdev);
+ return ERR_CAST(clk);
+ }
+ clk_prepare(clk);
+ nmk_chip->clk = clk;
+
+ /* NOTE: do not use devm_ here! */
+ reset = reset_control_get_optional_shared(gpio_dev, NULL);
+ if (IS_ERR(reset)) {
+ clk_unprepare(clk);
+ clk_put(clk);
+ platform_device_put(gpio_pdev);
+ dev_err(dev, "failed getting reset control: %pe\n",
+ reset);
+ return ERR_CAST(reset);
+ }
+
+ /*
+ * Reset might be shared and asserts/deasserts calls are unbalanced. We
+ * only support sharing this reset with other gpio-nomadik devices that
+ * use this reset to ensure deassertion at probe.
+ */
+ ret = reset_control_deassert(reset);
+ if (ret) {
+ reset_control_put(reset);
+ clk_unprepare(clk);
+ clk_put(clk);
+ platform_device_put(gpio_pdev);
+ dev_err(dev, "failed reset deassert: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+#ifdef CONFIG_PINCTRL_NOMADIK
+ nmk_gpio_chips[id] = nmk_chip;
+#endif
+ return nmk_chip;
+}
+
+static void nmk_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
+
+ seq_printf(p, "nmk%u-%u-%u", nmk_chip->bank,
+ gc->base, gc->base + gc->ngpio - 1);
+}
+
+static const struct irq_chip nmk_irq_chip = {
+ .irq_ack = nmk_gpio_irq_ack,
+ .irq_mask = nmk_gpio_irq_mask,
+ .irq_unmask = nmk_gpio_irq_unmask,
+ .irq_set_type = nmk_gpio_irq_set_type,
+ .irq_set_wake = nmk_gpio_irq_set_wake,
+ .irq_startup = nmk_gpio_irq_startup,
+ .irq_shutdown = nmk_gpio_irq_shutdown,
+ .irq_print_chip = nmk_gpio_irq_print_chip,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static int nmk_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nmk_gpio_chip *nmk_chip;
+ struct gpio_irq_chip *girq;
+ bool supports_sleepmode;
+ struct gpio_chip *chip;
+ int irq;
+ int ret;
+
+ nmk_chip = nmk_gpio_populate_chip(dev_fwnode(dev), pdev);
+ if (IS_ERR(nmk_chip)) {
+ dev_err(dev, "could not populate nmk chip struct\n");
+ return PTR_ERR(nmk_chip);
+ }
+
+ supports_sleepmode =
+ device_property_read_bool(dev, "st,supports-sleepmode");
+
+ /* Correct platform device ID */
+ pdev->id = nmk_chip->bank;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ /*
+ * The virt address in nmk_chip->addr is in the nomadik register space,
+ * so we can simply convert the resource address, without remapping
+ */
+ nmk_chip->sleepmode = supports_sleepmode;
+ spin_lock_init(&nmk_chip->lock);
+
+ chip = &nmk_chip->chip;
+ chip->parent = dev;
+ chip->request = gpiochip_generic_request;
+ chip->free = gpiochip_generic_free;
+ chip->get_direction = nmk_gpio_get_dir;
+ chip->direction_input = nmk_gpio_make_input;
+ chip->get = nmk_gpio_get_input;
+ chip->direction_output = nmk_gpio_make_output;
+ chip->set = nmk_gpio_set_output;
+ chip->dbg_show = nmk_gpio_dbg_show;
+ chip->can_sleep = false;
+ chip->owner = THIS_MODULE;
+
+ girq = &chip->irq;
+ gpio_irq_chip_set_chip(girq, &nmk_irq_chip);
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+
+ ret = devm_request_irq(dev, irq, nmk_gpio_irq_handler, IRQF_SHARED,
+ dev_name(dev), nmk_chip);
+ if (ret) {
+ dev_err(dev, "failed requesting IRQ\n");
+ return ret;
+ }
+
+ if (!nmk_chip->is_mobileye_soc) {
+ clk_enable(nmk_chip->clk);
+ nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
+ clk_disable(nmk_chip->clk);
+ }
+
+ ret = gpiochip_add_data(chip, nmk_chip);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, nmk_chip);
+
+ dev_info(dev, "chip registered\n");
+
+ return 0;
+}
+
+static const struct of_device_id nmk_gpio_match[] = {
+ { .compatible = "st,nomadik-gpio", },
+ { .compatible = "mobileye,eyeq5-gpio", },
+ {}
+};
+
+static struct platform_driver nmk_gpio_driver = {
+ .driver = {
+ .name = "nomadik-gpio",
+ .of_match_table = nmk_gpio_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = nmk_gpio_probe,
+};
+
+static int __init nmk_gpio_init(void)
+{
+ return platform_driver_register(&nmk_gpio_driver);
+}
+subsys_initcall(nmk_gpio_init);
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index c4106e37e6db..2ed5cbe7c8a8 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -22,6 +22,7 @@
#include <linux/irq_sim.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/minmax.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -234,10 +235,10 @@ static void gpio_sim_dbg_show(struct seq_file *seq, struct gpio_chip *gc)
guard(mutex)(&chip->lock);
- for_each_requested_gpio(gc, i, label)
+ for_each_hwgpio(gc, i, label)
seq_printf(seq, " gpio-%-3d (%s) %s,%s\n",
gc->base + i,
- label,
+ label ?: "<unused>",
test_bit(i, chip->direction_map) ? "input" :
test_bit(i, chip->value_map) ? "output-high" :
"output-low",
@@ -420,7 +421,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
ret = fwnode_property_read_string(swnode, "gpio-sim,label", &label);
if (ret) {
- label = devm_kasprintf(dev, GFP_KERNEL, "%s-%pfwP",
+ label = devm_kasprintf(dev, GFP_KERNEL, "%s:%pfwP",
dev_name(dev), swnode);
if (!label)
return -ENOMEM;
@@ -697,8 +698,10 @@ static struct gpio_sim_device *gpio_sim_hog_get_device(struct gpio_sim_hog *hog)
return gpio_sim_line_get_device(line);
}
-static bool gpio_sim_device_is_live_unlocked(struct gpio_sim_device *dev)
+static bool gpio_sim_device_is_live(struct gpio_sim_device *dev)
{
+ lockdep_assert_held(&dev->lock);
+
return !!dev->pdev;
}
@@ -737,7 +740,7 @@ gpio_sim_device_config_live_show(struct config_item *item, char *page)
bool live;
scoped_guard(mutex, &dev->lock)
- live = gpio_sim_device_is_live_unlocked(dev);
+ live = gpio_sim_device_is_live(dev);
return sprintf(page, "%c\n", live ? '1' : '0');
}
@@ -833,7 +836,7 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev)
GFP_KERNEL);
else
hog->chip_label = kasprintf(GFP_KERNEL,
- "gpio-sim.%u-%pfwP",
+ "gpio-sim.%u:%pfwP",
dev->id,
bank->swnode);
if (!hog->chip_label) {
@@ -926,7 +929,7 @@ static bool gpio_sim_bank_labels_non_unique(struct gpio_sim_device *dev)
return false;
}
-static int gpio_sim_device_activate_unlocked(struct gpio_sim_device *dev)
+static int gpio_sim_device_activate(struct gpio_sim_device *dev)
{
struct platform_device_info pdevinfo;
struct fwnode_handle *swnode;
@@ -934,6 +937,8 @@ static int gpio_sim_device_activate_unlocked(struct gpio_sim_device *dev)
struct gpio_sim_bank *bank;
int ret;
+ lockdep_assert_held(&dev->lock);
+
if (list_empty(&dev->bank_list))
return -ENODATA;
@@ -998,10 +1003,12 @@ static int gpio_sim_device_activate_unlocked(struct gpio_sim_device *dev)
return 0;
}
-static void gpio_sim_device_deactivate_unlocked(struct gpio_sim_device *dev)
+static void gpio_sim_device_deactivate(struct gpio_sim_device *dev)
{
struct fwnode_handle *swnode;
+ lockdep_assert_held(&dev->lock);
+
swnode = dev_fwnode(&dev->pdev->dev);
platform_device_unregister(dev->pdev);
gpio_sim_remove_hogs(dev);
@@ -1023,12 +1030,12 @@ gpio_sim_device_config_live_store(struct config_item *item,
guard(mutex)(&dev->lock);
- if (live == gpio_sim_device_is_live_unlocked(dev))
+ if (live == gpio_sim_device_is_live(dev))
ret = -EPERM;
else if (live)
- ret = gpio_sim_device_activate_unlocked(dev);
+ ret = gpio_sim_device_activate(dev);
else
- gpio_sim_device_deactivate_unlocked(dev);
+ gpio_sim_device_deactivate(dev);
return ret ?: count;
}
@@ -1069,7 +1076,7 @@ static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item,
guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev))
+ if (gpio_sim_device_is_live(dev))
return device_for_each_child(&dev->pdev->dev, &ctx,
gpio_sim_emit_chip_name);
@@ -1098,7 +1105,7 @@ static ssize_t gpio_sim_bank_config_label_store(struct config_item *item,
guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev))
+ if (gpio_sim_device_is_live(dev))
return -EBUSY;
trimmed = gpio_sim_strdup_trimmed(page, count);
@@ -1142,7 +1149,7 @@ gpio_sim_bank_config_num_lines_store(struct config_item *item,
guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev))
+ if (gpio_sim_device_is_live(dev))
return -EBUSY;
bank->num_lines = num_lines;
@@ -1179,7 +1186,7 @@ static ssize_t gpio_sim_line_config_name_store(struct config_item *item,
guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev))
+ if (gpio_sim_device_is_live(dev))
return -EBUSY;
trimmed = gpio_sim_strdup_trimmed(page, count);
@@ -1219,7 +1226,7 @@ static ssize_t gpio_sim_hog_config_name_store(struct config_item *item,
guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev))
+ if (gpio_sim_device_is_live(dev))
return -EBUSY;
trimmed = gpio_sim_strdup_trimmed(page, count);
@@ -1274,7 +1281,7 @@ gpio_sim_hog_config_direction_store(struct config_item *item,
guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev))
+ if (gpio_sim_device_is_live(dev))
return -EBUSY;
if (sysfs_streq(page, "input"))
@@ -1392,7 +1399,7 @@ gpio_sim_bank_config_make_line_group(struct config_group *group,
guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev))
+ if (gpio_sim_device_is_live(dev))
return ERR_PTR(-EBUSY);
line = kzalloc(sizeof(*line), GFP_KERNEL);
@@ -1445,7 +1452,7 @@ gpio_sim_device_config_make_bank_group(struct config_group *group,
guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev))
+ if (gpio_sim_device_is_live(dev))
return ERR_PTR(-EBUSY);
bank = kzalloc(sizeof(*bank), GFP_KERNEL);
@@ -1467,8 +1474,8 @@ static void gpio_sim_device_config_group_release(struct config_item *item)
struct gpio_sim_device *dev = to_gpio_sim_device(item);
scoped_guard(mutex, &dev->lock) {
- if (gpio_sim_device_is_live_unlocked(dev))
- gpio_sim_device_deactivate_unlocked(dev);
+ if (gpio_sim_device_is_live(dev))
+ gpio_sim_device_deactivate(dev);
}
mutex_destroy(&dev->lock);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index cd3e9657cc36..7f140df40f35 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -126,7 +126,7 @@ static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
static bool acpi_gpio_deferred_req_irqs_done;
-static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
+static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data)
{
return device_match_acpi_handle(&gc->gpiodev->dev, data);
}
@@ -1402,17 +1402,17 @@ static int acpi_find_gpio_count(struct acpi_resource *ares, void *data)
}
/**
- * acpi_gpio_count - count the GPIOs associated with a device / function
- * @dev: GPIO consumer, can be %NULL for system-global GPIOs
+ * acpi_gpio_count - count the GPIOs associated with a firmware node / function
+ * @fwnode: firmware node of the GPIO consumer
* @con_id: function within the GPIO consumer
*
* Return:
- * The number of GPIOs associated with a device / function or %-ENOENT,
+ * The number of GPIOs associated with a firmware node / function or %-ENOENT,
* if no GPIO has been assigned to the requested function.
*/
-int acpi_gpio_count(struct device *dev, const char *con_id)
+int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
{
- struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct acpi_device *adev = to_acpi_device_node(fwnode);
const union acpi_object *obj;
const struct acpi_gpio_mapping *gm;
int count = -ENOENT;
@@ -1429,8 +1429,7 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
snprintf(propname, sizeof(propname), "%s",
gpio_suffixes[i]);
- ret = acpi_dev_get_property(adev, propname, ACPI_TYPE_ANY,
- &obj);
+ ret = acpi_dev_get_property(adev, propname, ACPI_TYPE_ANY, &obj);
if (ret == 0) {
if (obj->type == ACPI_TYPE_LOCAL_REFERENCE)
count = 1;
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index 0fcd7e14d7f9..7e1c51d04040 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -33,7 +33,7 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
enum gpiod_flags *dflags,
unsigned long *lookupflags);
-int acpi_gpio_count(struct device *dev, const char *con_id);
+int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id);
#else
static inline void acpi_gpiochip_add(struct gpio_chip *chip) { }
static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { }
@@ -51,7 +51,8 @@ acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id,
{
return ERR_PTR(-ENOENT);
}
-static inline int acpi_gpio_count(struct device *dev, const char *con_id)
+static inline int acpi_gpio_count(const struct fwnode_handle *fwnode,
+ const char *con_id)
{
return -ENODEV;
}
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 2a88736629ef..f384fa278764 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -24,7 +24,6 @@
#include <linux/pinctrl/consumer.h>
#include <linux/poll.h>
#include <linux/rbtree.h>
-#include <linux/rwsem.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/timekeeping.h>
@@ -61,11 +60,6 @@ static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
* interface to gpiolib GPIOs via ioctl()s.
*/
-typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *);
-typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long);
-typedef ssize_t (*read_fn)(struct file *, char __user *,
- size_t count, loff_t *);
-
/*
* GPIO line handle management
*/
@@ -210,9 +204,9 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd,
unsigned int i;
int ret;
- guard(rwsem_read)(&lh->gdev->sem);
+ guard(srcu)(&lh->gdev->srcu);
- if (!lh->gdev->chip)
+ if (!rcu_access_pointer(lh->gdev->chip))
return -ENODEV;
switch (cmd) {
@@ -337,7 +331,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
/* Request each GPIO */
for (i = 0; i < handlereq.lines; i++) {
u32 offset = handlereq.lineoffsets[i];
- struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
+ struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
if (IS_ERR(desc)) {
ret = PTR_ERR(desc);
@@ -1525,9 +1519,9 @@ static long linereq_ioctl(struct file *file, unsigned int cmd,
struct linereq *lr = file->private_data;
void __user *ip = (void __user *)arg;
- guard(rwsem_read)(&lr->gdev->sem);
+ guard(srcu)(&lr->gdev->srcu);
- if (!lr->gdev->chip)
+ if (!rcu_access_pointer(lr->gdev->chip))
return -ENODEV;
switch (cmd) {
@@ -1556,9 +1550,9 @@ static __poll_t linereq_poll(struct file *file,
struct linereq *lr = file->private_data;
__poll_t events = 0;
- guard(rwsem_read)(&lr->gdev->sem);
+ guard(srcu)(&lr->gdev->srcu);
- if (!lr->gdev->chip)
+ if (!rcu_access_pointer(lr->gdev->chip))
return EPOLLHUP | EPOLLERR;
poll_wait(file, &lr->wait, wait);
@@ -1578,9 +1572,9 @@ static ssize_t linereq_read(struct file *file, char __user *buf,
ssize_t bytes_read = 0;
int ret;
- guard(rwsem_read)(&lr->gdev->sem);
+ guard(srcu)(&lr->gdev->srcu);
- if (!lr->gdev->chip)
+ if (!rcu_access_pointer(lr->gdev->chip))
return -ENODEV;
if (count < sizeof(le))
@@ -1744,7 +1738,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
/* Request each GPIO */
for (i = 0; i < ulr.num_lines; i++) {
u32 offset = ulr.offsets[i];
- struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
+ struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
if (IS_ERR(desc)) {
ret = PTR_ERR(desc);
@@ -1879,9 +1873,9 @@ static __poll_t lineevent_poll(struct file *file,
struct lineevent_state *le = file->private_data;
__poll_t events = 0;
- guard(rwsem_read)(&le->gdev->sem);
+ guard(srcu)(&le->gdev->srcu);
- if (!le->gdev->chip)
+ if (!rcu_access_pointer(le->gdev->chip))
return EPOLLHUP | EPOLLERR;
poll_wait(file, &le->wait, wait);
@@ -1917,9 +1911,9 @@ static ssize_t lineevent_read(struct file *file, char __user *buf,
ssize_t ge_size;
int ret;
- guard(rwsem_read)(&le->gdev->sem);
+ guard(srcu)(&le->gdev->srcu);
- if (!le->gdev->chip)
+ if (!rcu_access_pointer(le->gdev->chip))
return -ENODEV;
/*
@@ -2000,9 +1994,9 @@ static long lineevent_ioctl(struct file *file, unsigned int cmd,
void __user *ip = (void __user *)arg;
struct gpiohandle_data ghd;
- guard(rwsem_read)(&le->gdev->sem);
+ guard(srcu)(&le->gdev->srcu);
- if (!le->gdev->chip)
+ if (!rcu_access_pointer(le->gdev->chip))
return -ENODEV;
/*
@@ -2128,7 +2122,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
lflags = eventreq.handleflags;
eflags = eventreq.eventflags;
- desc = gpiochip_get_desc(gdev->chip, offset);
+ desc = gpio_device_get_desc(gdev, offset);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -2300,21 +2294,26 @@ static void gpio_v2_line_info_changed_to_v1(
static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
struct gpio_v2_line_info *info)
{
- struct gpio_chip *gc = desc->gdev->chip;
unsigned long dflags;
+ const char *label;
+
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return;
memset(info, 0, sizeof(*info));
info->offset = gpio_chip_hwgpio(desc);
- scoped_guard(spinlock_irqsave, &gpio_lock) {
- if (desc->name)
- strscpy(info->name, desc->name, sizeof(info->name));
+ if (desc->name)
+ strscpy(info->name, desc->name, sizeof(info->name));
- if (desc->label)
- strscpy(info->consumer, desc->label,
- sizeof(info->consumer));
+ dflags = READ_ONCE(desc->flags);
- dflags = READ_ONCE(desc->flags);
+ scoped_guard(srcu, &desc->srcu) {
+ label = gpiod_get_label(desc);
+ if (label && test_bit(FLAG_REQUESTED, &dflags))
+ strscpy(info->consumer, label,
+ sizeof(info->consumer));
}
/*
@@ -2334,8 +2333,8 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
test_bit(FLAG_USED_AS_IRQ, &dflags) ||
test_bit(FLAG_EXPORT, &dflags) ||
test_bit(FLAG_SYSFS, &dflags) ||
- !gpiochip_line_is_valid(gc, info->offset) ||
- !pinctrl_gpio_can_use_line(gc, info->offset))
+ !gpiochip_line_is_valid(guard.gc, info->offset) ||
+ !pinctrl_gpio_can_use_line(guard.gc, info->offset))
info->flags |= GPIO_V2_LINE_FLAG_USED;
if (test_bit(FLAG_IS_OUT, &dflags))
@@ -2422,7 +2421,7 @@ static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
return -EFAULT;
/* this doubles as a range check on line_offset */
- desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
+ desc = gpio_device_get_desc(cdev->gdev, lineinfo.line_offset);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -2459,7 +2458,7 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding)))
return -EINVAL;
- desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.offset);
+ desc = gpio_device_get_desc(cdev->gdev, lineinfo.offset);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -2508,10 +2507,10 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct gpio_device *gdev = cdev->gdev;
void __user *ip = (void __user *)arg;
- guard(rwsem_read)(&gdev->sem);
+ guard(srcu)(&gdev->srcu);
/* We fail any subsequent ioctl():s when the chip is gone */
- if (!gdev->chip)
+ if (!rcu_access_pointer(gdev->chip))
return -ENODEV;
/* Fill in the struct and pass to userspace */
@@ -2594,9 +2593,9 @@ static __poll_t lineinfo_watch_poll(struct file *file,
struct gpio_chardev_data *cdev = file->private_data;
__poll_t events = 0;
- guard(rwsem_read)(&cdev->gdev->sem);
+ guard(srcu)(&cdev->gdev->srcu);
- if (!cdev->gdev->chip)
+ if (!rcu_access_pointer(cdev->gdev->chip))
return EPOLLHUP | EPOLLERR;
poll_wait(file, &cdev->wait, pollt);
@@ -2617,9 +2616,9 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
int ret;
size_t event_size;
- guard(rwsem_read)(&cdev->gdev->sem);
+ guard(srcu)(&cdev->gdev->srcu);
- if (!cdev->gdev->chip)
+ if (!rcu_access_pointer(cdev->gdev->chip))
return -ENODEV;
#ifndef CONFIG_GPIO_CDEV_V1
@@ -2694,17 +2693,17 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
struct gpio_chardev_data *cdev;
int ret = -ENOMEM;
- guard(rwsem_read)(&gdev->sem);
+ guard(srcu)(&gdev->srcu);
/* Fail on open if the backing gpiochip is gone */
- if (!gdev->chip)
+ if (!rcu_access_pointer(gdev->chip))
return -ENODEV;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return -ENODEV;
- cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL);
+ cdev->watched_lines = bitmap_zalloc(gdev->ngpio, GFP_KERNEL);
if (!cdev->watched_lines)
goto out_free_cdev;
@@ -2784,6 +2783,7 @@ static const struct file_operations gpio_fileops = {
int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
{
+ struct gpio_chip *gc;
int ret;
cdev_init(&gdev->chrdev, &gpio_fileops);
@@ -2794,8 +2794,12 @@ int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
if (ret)
return ret;
- chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
- MAJOR(devt), gdev->id);
+ guard(srcu)(&gdev->srcu);
+ gc = srcu_dereference(gdev->chip, &gdev->srcu);
+ if (!gc)
+ return -ENODEV;
+
+ chip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id);
return 0;
}
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index fe9ce6b19f15..4987e62dcb3d 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -158,7 +158,7 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
if (!dr)
return ERR_PTR(-ENOMEM);
- desc = fwnode_gpiod_get_index(fwnode, con_id, index, flags, label);
+ desc = gpiod_find_and_request(dev, fwnode, con_id, index, flags, label, false);
if (IS_ERR(desc)) {
devres_free(dr);
return desc;
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
index 97f4b498e343..b138682fec3d 100644
--- a/drivers/gpio/gpiolib-legacy.c
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -6,6 +6,9 @@
#include "gpiolib.h"
+/*
+ * **DEPRECATED** This function is deprecated and must not be used in new code.
+ */
void gpio_free(unsigned gpio)
{
gpiod_free(gpio_to_desc(gpio));
@@ -17,6 +20,8 @@ EXPORT_SYMBOL_GPL(gpio_free);
* @gpio: the GPIO number
* @flags: GPIO configuration as specified by GPIOF_*
* @label: a literal description string of this GPIO
+ *
+ * **DEPRECATED** This function is deprecated and must not be used in new code.
*/
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
{
@@ -53,6 +58,9 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
}
EXPORT_SYMBOL_GPL(gpio_request_one);
+/*
+ * **DEPRECATED** This function is deprecated and must not be used in new code.
+ */
int gpio_request(unsigned gpio, const char *label)
{
struct gpio_desc *desc = gpio_to_desc(gpio);
@@ -69,6 +77,8 @@ EXPORT_SYMBOL_GPL(gpio_request);
* gpio_request_array - request multiple GPIOs in a single call
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
+ *
+ * **DEPRECATED** This function is deprecated and must not be used in new code.
*/
int gpio_request_array(const struct gpio *array, size_t num)
{
@@ -92,6 +102,8 @@ EXPORT_SYMBOL_GPL(gpio_request_array);
* gpio_free_array - release multiple GPIOs in a single call
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
+ *
+ * **DEPRECATED** This function is deprecated and must not be used in new code.
*/
void gpio_free_array(const struct gpio *array, size_t num)
{
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index e7770eedd146..cb0cefaec37e 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -68,7 +68,7 @@ static int of_gpio_named_count(const struct device_node *np,
/**
* of_gpio_spi_cs_get_count() - special GPIO counting for SPI
- * @dev: Consuming device
+ * @np: Consuming device node
* @con_id: Function within the GPIO consumer
*
* Some elder GPIO controllers need special quirks. Currently we handle
@@ -78,10 +78,9 @@ static int of_gpio_named_count(const struct device_node *np,
* the counting of "cs-gpios" to count "gpios" transparent to the
* driver.
*/
-static int of_gpio_spi_cs_get_count(struct device *dev, const char *con_id)
+static int of_gpio_spi_cs_get_count(const struct device_node *np,
+ const char *con_id)
{
- struct device_node *np = dev->of_node;
-
if (!IS_ENABLED(CONFIG_SPI_MASTER))
return 0;
if (!con_id || strcmp(con_id, "cs"))
@@ -93,13 +92,14 @@ static int of_gpio_spi_cs_get_count(struct device *dev, const char *con_id)
return of_gpio_named_count(np, "gpios");
}
-int of_gpio_get_count(struct device *dev, const char *con_id)
+int of_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
{
+ const struct device_node *np = to_of_node(fwnode);
int ret;
char propname[32];
unsigned int i;
- ret = of_gpio_spi_cs_get_count(dev, con_id);
+ ret = of_gpio_spi_cs_get_count(np, con_id);
if (ret > 0)
return ret;
@@ -111,16 +111,17 @@ int of_gpio_get_count(struct device *dev, const char *con_id)
snprintf(propname, sizeof(propname), "%s",
gpio_suffixes[i]);
- ret = of_gpio_named_count(dev->of_node, propname);
+ ret = of_gpio_named_count(np, propname);
if (ret > 0)
break;
}
return ret ? ret : -ENOENT;
}
-static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
+static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip,
+ const void *data)
{
- struct of_phandle_args *gpiospec = data;
+ const struct of_phandle_args *gpiospec = data;
return device_match_of_node(&chip->gpiodev->dev, gpiospec->np) &&
chip->of_xlate &&
@@ -128,7 +129,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
}
static struct gpio_device *
-of_find_gpio_device_by_xlate(struct of_phandle_args *gpiospec)
+of_find_gpio_device_by_xlate(const struct of_phandle_args *gpiospec)
{
return gpio_device_find(gpiospec, of_gpiochip_match_node_and_xlate);
}
@@ -414,6 +415,8 @@ out:
* @propname: Name of property containing gpio specifier(s)
* @index: index of the GPIO
*
+ * **DEPRECATED** This function is deprecated and must not be used in new code.
+ *
* Returns GPIO number to use with Linux generic GPIO API, or one of the errno
* value on the error condition.
*/
@@ -798,7 +801,7 @@ static int of_gpiochip_add_hog(struct gpio_chip *chip, struct device_node *hog)
return ret;
#ifdef CONFIG_OF_DYNAMIC
- desc->hog = hog;
+ WRITE_ONCE(desc->hog, hog);
#endif
}
@@ -846,11 +849,11 @@ static void of_gpiochip_remove_hog(struct gpio_chip *chip,
struct gpio_desc *desc;
for_each_gpio_desc_with_flag(chip, desc, FLAG_IS_HOGGED)
- if (desc->hog == hog)
+ if (READ_ONCE(desc->hog) == hog)
gpiochip_free_own_desc(desc);
}
-static int of_gpiochip_match_node(struct gpio_chip *chip, void *data)
+static int of_gpiochip_match_node(struct gpio_chip *chip, const void *data)
{
return device_match_of_node(&chip->gpiodev->dev, data);
}
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
index 6b3a5347c5d9..16d6ac8cb156 100644
--- a/drivers/gpio/gpiolib-of.h
+++ b/drivers/gpio/gpiolib-of.h
@@ -9,6 +9,7 @@
#include <linux/notifier.h>
struct device;
+struct fwnode_handle;
struct gpio_chip;
struct gpio_desc;
@@ -21,7 +22,7 @@ struct gpio_desc *of_find_gpio(struct device_node *np,
unsigned long *lookupflags);
int of_gpiochip_add(struct gpio_chip *gc);
void of_gpiochip_remove(struct gpio_chip *gc);
-int of_gpio_get_count(struct device *dev, const char *con_id);
+int of_gpio_count(const struct fwnode_handle *fwnode, const char *con_id);
#else
static inline struct gpio_desc *of_find_gpio(struct device_node *np,
const char *con_id,
@@ -32,7 +33,8 @@ static inline struct gpio_desc *of_find_gpio(struct device_node *np,
}
static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
-static inline int of_gpio_get_count(struct device *dev, const char *con_id)
+static inline int of_gpio_count(const struct fwnode_handle *fwnode,
+ const char *con_id)
{
return 0;
}
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 6bf5332136e5..6853ecd98bcb 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/init.h>
@@ -13,6 +14,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/srcu.h>
#include <linux/sysfs.h>
#include <linux/types.h>
@@ -170,6 +172,10 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
unsigned long irq_flags;
int ret;
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
data->irq = gpiod_to_irq(desc);
if (data->irq < 0)
return -EIO;
@@ -194,7 +200,7 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
* Remove this redundant call (along with the corresponding
* unlock) when those drivers have been fixed.
*/
- ret = gpiochip_lock_as_irq(desc->gdev->chip, gpio_chip_hwgpio(desc));
+ ret = gpiochip_lock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
if (ret < 0)
goto err_put_kn;
@@ -208,7 +214,7 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
return 0;
err_unlock:
- gpiochip_unlock_as_irq(desc->gdev->chip, gpio_chip_hwgpio(desc));
+ gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
err_put_kn:
sysfs_put(data->value_kn);
@@ -224,9 +230,13 @@ static void gpio_sysfs_free_irq(struct device *dev)
struct gpiod_data *data = dev_get_drvdata(dev);
struct gpio_desc *desc = data->desc;
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return;
+
data->irq_flags = 0;
free_irq(data->irq, data);
- gpiochip_unlock_as_irq(desc->gdev->chip, gpio_chip_hwgpio(desc));
+ gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
sysfs_put(data->value_kn);
}
@@ -400,27 +410,27 @@ static const struct attribute_group *gpio_groups[] = {
static ssize_t base_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct gpio_chip *chip = dev_get_drvdata(dev);
+ const struct gpio_device *gdev = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%d\n", chip->base);
+ return sysfs_emit(buf, "%d\n", gdev->base);
}
static DEVICE_ATTR_RO(base);
static ssize_t label_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct gpio_chip *chip = dev_get_drvdata(dev);
+ const struct gpio_device *gdev = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n", chip->label ?: "");
+ return sysfs_emit(buf, "%s\n", gdev->label);
}
static DEVICE_ATTR_RO(label);
static ssize_t ngpio_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct gpio_chip *chip = dev_get_drvdata(dev);
+ const struct gpio_device *gdev = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%u\n", chip->ngpio);
+ return sysfs_emit(buf, "%u\n", gdev->ngpio);
}
static DEVICE_ATTR_RO(ngpio);
@@ -443,13 +453,12 @@ static ssize_t export_store(const struct class *class,
const char *buf, size_t len)
{
struct gpio_desc *desc;
- struct gpio_chip *gc;
int status, offset;
long gpio;
status = kstrtol(buf, 0, &gpio);
- if (status < 0)
- goto done;
+ if (status)
+ return status;
desc = gpio_to_desc(gpio);
/* reject invalid GPIOs */
@@ -457,9 +466,13 @@ static ssize_t export_store(const struct class *class,
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
return -EINVAL;
}
- gc = desc->gdev->chip;
+
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
offset = gpio_chip_hwgpio(desc);
- if (!gpiochip_line_is_valid(gc, offset)) {
+ if (!gpiochip_line_is_valid(guard.gc, offset)) {
pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
return -EINVAL;
}
@@ -562,8 +575,6 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
const char *ioname = NULL;
struct gpio_device *gdev;
struct gpiod_data *data;
- struct gpio_chip *chip;
- unsigned long flags;
struct device *dev;
int status, offset;
@@ -578,29 +589,28 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
return -EINVAL;
}
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
+ if (test_and_set_bit(FLAG_EXPORT, &desc->flags))
+ return -EPERM;
+
gdev = desc->gdev;
- chip = gdev->chip;
mutex_lock(&sysfs_lock);
/* check if chip is being removed */
- if (!chip || !gdev->mockdev) {
+ if (!gdev->mockdev) {
status = -ENODEV;
goto err_unlock;
}
- spin_lock_irqsave(&gpio_lock, flags);
- if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
- test_bit(FLAG_EXPORT, &desc->flags)) {
- spin_unlock_irqrestore(&gpio_lock, flags);
- gpiod_dbg(desc, "%s: unavailable (requested=%d, exported=%d)\n",
- __func__,
- test_bit(FLAG_REQUESTED, &desc->flags),
- test_bit(FLAG_EXPORT, &desc->flags));
+ if (!test_bit(FLAG_REQUESTED, &desc->flags)) {
+ gpiod_dbg(desc, "%s: unavailable (not requested)\n", __func__);
status = -EPERM;
goto err_unlock;
}
- spin_unlock_irqrestore(&gpio_lock, flags);
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
@@ -610,14 +620,14 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
data->desc = desc;
mutex_init(&data->mutex);
- if (chip->direction_input && chip->direction_output)
+ if (guard.gc->direction_input && guard.gc->direction_output)
data->direction_can_change = direction_may_change;
else
data->direction_can_change = false;
offset = gpio_chip_hwgpio(desc);
- if (chip->names && chip->names[offset])
- ioname = chip->names[offset];
+ if (guard.gc->names && guard.gc->names[offset])
+ ioname = guard.gc->names[offset];
dev = device_create_with_groups(&gpio_class, &gdev->dev,
MKDEV(0, 0), data, gpio_groups,
@@ -628,7 +638,6 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
goto err_free_data;
}
- set_bit(FLAG_EXPORT, &desc->flags);
mutex_unlock(&sysfs_lock);
return 0;
@@ -636,6 +645,7 @@ err_free_data:
kfree(data);
err_unlock:
mutex_unlock(&sysfs_lock);
+ clear_bit(FLAG_EXPORT, &desc->flags);
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
return status;
}
@@ -732,7 +742,7 @@ EXPORT_SYMBOL_GPL(gpiod_unexport);
int gpiochip_sysfs_register(struct gpio_device *gdev)
{
- struct gpio_chip *chip = gdev->chip;
+ struct gpio_chip *chip;
struct device *parent;
struct device *dev;
@@ -745,6 +755,12 @@ int gpiochip_sysfs_register(struct gpio_device *gdev)
if (!class_is_registered(&gpio_class))
return 0;
+ guard(srcu)(&gdev->srcu);
+
+ chip = srcu_dereference(gdev->chip, &gdev->srcu);
+ if (!chip)
+ return -ENODEV;
+
/*
* For sysfs backward compatibility we need to preserve this
* preferred parenting to the gpio_chip parent field, if set.
@@ -755,7 +771,7 @@ int gpiochip_sysfs_register(struct gpio_device *gdev)
parent = &gdev->dev;
/* use chip->base for the ID; it's already known to be unique */
- dev = device_create_with_groups(&gpio_class, parent, MKDEV(0, 0), chip,
+ dev = device_create_with_groups(&gpio_class, parent, MKDEV(0, 0), gdev,
gpiochip_groups, GPIOCHIP_NAME "%d",
chip->base);
if (IS_ERR(dev))
@@ -771,17 +787,23 @@ int gpiochip_sysfs_register(struct gpio_device *gdev)
void gpiochip_sysfs_unregister(struct gpio_device *gdev)
{
struct gpio_desc *desc;
- struct gpio_chip *chip = gdev->chip;
+ struct gpio_chip *chip;
- if (!gdev->mockdev)
- return;
+ scoped_guard(mutex, &sysfs_lock) {
+ if (!gdev->mockdev)
+ return;
- device_unregister(gdev->mockdev);
+ device_unregister(gdev->mockdev);
- /* prevent further gpiod exports */
- mutex_lock(&sysfs_lock);
- gdev->mockdev = NULL;
- mutex_unlock(&sysfs_lock);
+ /* prevent further gpiod exports */
+ gdev->mockdev = NULL;
+ }
+
+ guard(srcu)(&gdev->srcu);
+
+ chip = srcu_dereference(gdev->chip, &gdev->srcu);
+ if (!chip)
+ return;
/* unregister gpiod class devices owned by sysfs */
for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) {
@@ -790,11 +812,29 @@ void gpiochip_sysfs_unregister(struct gpio_device *gdev)
}
}
+/*
+ * We're not really looking for a device - we just want to iterate over the
+ * list and call this callback for each GPIO device. This is why this function
+ * always returns 0.
+ */
+static int gpiofind_sysfs_register(struct gpio_chip *gc, const void *data)
+{
+ struct gpio_device *gdev = gc->gpiodev;
+ int ret;
+
+ if (gdev->mockdev)
+ return 0;
+
+ ret = gpiochip_sysfs_register(gdev);
+ if (ret)
+ chip_err(gc, "failed to register the sysfs entry: %d\n", ret);
+
+ return 0;
+}
+
static int __init gpiolib_sysfs_init(void)
{
- int status;
- unsigned long flags;
- struct gpio_device *gdev;
+ int status;
status = class_register(&gpio_class);
if (status < 0)
@@ -806,26 +846,8 @@ static int __init gpiolib_sysfs_init(void)
* We run before arch_initcall() so chip->dev nodes can have
* registered, and so arch_initcall() can always gpiod_export().
*/
- spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(gdev, &gpio_devices, list) {
- if (gdev->mockdev)
- continue;
-
- /*
- * TODO we yield gpio_lock here because
- * gpiochip_sysfs_register() acquires a mutex. This is unsafe
- * and needs to be fixed.
- *
- * Also it would be nice to use gpio_device_find() here so we
- * can keep gpio_chips local to gpiolib.c, but the yield of
- * gpio_lock prevents us from doing this.
- */
- spin_unlock_irqrestore(&gpio_lock, flags);
- status = gpiochip_sysfs_register(gdev);
- spin_lock_irqsave(&gpio_lock, flags);
- }
- spin_unlock_irqrestore(&gpio_lock, flags);
+ (void)gpio_device_find(NULL, gpiofind_sysfs_register);
- return status;
+ return 0;
}
postcore_initcall(gpiolib_sysfs_init);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 8b3a0f45b574..ce94e37bcbee 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2,6 +2,7 @@
#include <linux/acpi.h>
#include <linux/bitmap.h>
+#include <linux/cleanup.h>
#include <linux/compat.h>
#include <linux/debugfs.h>
#include <linux/device.h>
@@ -14,12 +15,14 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/srcu.h>
#include <linux/string.h>
#include <linux/gpio.h>
@@ -63,7 +66,7 @@ static int gpio_bus_match(struct device *dev, struct device_driver *drv)
return 1;
}
-static struct bus_type gpio_bus_type = {
+static const struct bus_type gpio_bus_type = {
.name = "gpio",
.match = gpio_bus_match,
};
@@ -73,15 +76,14 @@ static struct bus_type gpio_bus_type = {
*/
#define FASTPATH_NGPIO CONFIG_GPIOLIB_FASTPATH_LIMIT
-/* gpio_lock prevents conflicts during gpio_desc[] table updates.
- * While any GPIO is requested, its gpio_chip is not removable;
- * each GPIO's "requested" flag serves as a lock and refcount.
- */
-DEFINE_SPINLOCK(gpio_lock);
-
static DEFINE_MUTEX(gpio_lookup_lock);
static LIST_HEAD(gpio_lookup_list);
-LIST_HEAD(gpio_devices);
+
+static LIST_HEAD(gpio_devices);
+/* Protects the GPIO device list against concurrent modifications. */
+static DEFINE_MUTEX(gpio_devices_lock);
+/* Ensures coherence during read-only accesses to the list of GPIO devices. */
+DEFINE_STATIC_SRCU(gpio_devices_srcu);
static DEFINE_MUTEX(gpio_machine_hogs_mutex);
static LIST_HEAD(gpio_machine_hogs);
@@ -97,9 +99,34 @@ static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc);
static bool gpiolib_initialized;
-static inline void desc_set_label(struct gpio_desc *d, const char *label)
+const char *gpiod_get_label(struct gpio_desc *desc)
{
- d->label = label;
+ unsigned long flags;
+
+ flags = READ_ONCE(desc->flags);
+ if (test_bit(FLAG_USED_AS_IRQ, &flags) &&
+ !test_bit(FLAG_REQUESTED, &flags))
+ return "interrupt";
+
+ return test_bit(FLAG_REQUESTED, &flags) ?
+ srcu_dereference(desc->label, &desc->srcu) : NULL;
+}
+
+static int desc_set_label(struct gpio_desc *desc, const char *label)
+{
+ const char *new = NULL, *old;
+
+ if (label) {
+ new = kstrdup_const(label, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ }
+
+ old = rcu_replace_pointer(desc->label, new, 1);
+ synchronize_srcu(&desc->srcu);
+ kfree_const(old);
+
+ return 0;
}
/**
@@ -113,20 +140,16 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label)
struct gpio_desc *gpio_to_desc(unsigned gpio)
{
struct gpio_device *gdev;
- unsigned long flags;
- spin_lock_irqsave(&gpio_lock, flags);
-
- list_for_each_entry(gdev, &gpio_devices, list) {
- if (gdev->base <= gpio &&
- gdev->base + gdev->ngpio > gpio) {
- spin_unlock_irqrestore(&gpio_lock, flags);
- return &gdev->descs[gpio - gdev->base];
+ scoped_guard(srcu, &gpio_devices_srcu) {
+ list_for_each_entry_srcu(gdev, &gpio_devices, list,
+ srcu_read_lock_held(&gpio_devices_srcu)) {
+ if (gdev->base <= gpio &&
+ gdev->base + gdev->ngpio > gpio)
+ return &gdev->descs[gpio - gdev->base];
}
}
- spin_unlock_irqrestore(&gpio_lock, flags);
-
if (!gpio_is_valid(gpio))
pr_warn("invalid GPIO %d\n", gpio);
@@ -161,16 +184,6 @@ EXPORT_SYMBOL_GPL(gpiochip_get_desc);
struct gpio_desc *
gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum)
{
- struct gpio_chip *gc;
-
- /*
- * FIXME: This will be locked once we protect gdev->chip everywhere
- * with SRCU.
- */
- gc = gdev->chip;
- if (!gc)
- return ERR_PTR(-ENODEV);
-
if (hwnum >= gdev->ngpio)
return ERR_PTR(-EINVAL);
@@ -198,12 +211,18 @@ EXPORT_SYMBOL_GPL(desc_to_gpio);
/**
* gpiod_to_chip - Return the GPIO chip to which a GPIO descriptor belongs
* @desc: descriptor to return the chip of
+ *
+ * *DEPRECATED*
+ * This function is unsafe and should not be used. Using the chip address
+ * without taking the SRCU read lock may result in dereferencing a dangling
+ * pointer.
*/
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
- if (!desc || !desc->gdev)
+ if (!desc)
return NULL;
- return desc->gdev->chip;
+
+ return gpio_device_get_chip(desc->gdev);
}
EXPORT_SYMBOL_GPL(gpiod_to_chip);
@@ -262,6 +281,7 @@ EXPORT_SYMBOL(gpio_device_get_label);
* Returns:
* Address of the GPIO chip backing this device.
*
+ * *DEPRECATED*
* Until we can get rid of all non-driver users of struct gpio_chip, we must
* provide a way of retrieving the pointer to it from struct gpio_device. This
* is *NOT* safe as the GPIO API is considered to be hot-unpluggable and the
@@ -272,7 +292,7 @@ EXPORT_SYMBOL(gpio_device_get_label);
*/
struct gpio_chip *gpio_device_get_chip(struct gpio_device *gdev)
{
- return gdev->chip;
+ return rcu_dereference_check(gdev->chip, 1);
}
EXPORT_SYMBOL_GPL(gpio_device_get_chip);
@@ -282,7 +302,8 @@ static int gpiochip_find_base_unlocked(int ngpio)
struct gpio_device *gdev;
int base = GPIO_DYNAMIC_BASE;
- list_for_each_entry(gdev, &gpio_devices, list) {
+ list_for_each_entry_srcu(gdev, &gpio_devices, list,
+ lockdep_is_held(&gpio_devices_lock)) {
/* found a free space? */
if (gdev->base >= base + ngpio)
break;
@@ -311,25 +332,36 @@ static int gpiochip_find_base_unlocked(int ngpio)
*/
int gpiod_get_direction(struct gpio_desc *desc)
{
- struct gpio_chip *gc;
+ unsigned long flags;
unsigned int offset;
int ret;
- gc = gpiod_to_chip(desc);
+ /*
+ * We cannot use VALIDATE_DESC() as we must not return 0 for a NULL
+ * descriptor like we usually do.
+ */
+ if (!desc || IS_ERR(desc))
+ return -EINVAL;
+
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
offset = gpio_chip_hwgpio(desc);
+ flags = READ_ONCE(desc->flags);
/*
* Open drain emulation using input mode may incorrectly report
* input here, fix that up.
*/
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) &&
- test_bit(FLAG_IS_OUT, &desc->flags))
+ if (test_bit(FLAG_OPEN_DRAIN, &flags) &&
+ test_bit(FLAG_IS_OUT, &flags))
return 0;
- if (!gc->get_direction)
+ if (!guard.gc->get_direction)
return -ENOTSUPP;
- ret = gc->get_direction(gc, offset);
+ ret = guard.gc->get_direction(guard.gc, offset);
if (ret < 0)
return ret;
@@ -337,7 +369,8 @@ int gpiod_get_direction(struct gpio_desc *desc)
if (ret > 0)
ret = 1;
- assign_bit(FLAG_IS_OUT, &desc->flags, !ret);
+ assign_bit(FLAG_IS_OUT, &flags, !ret);
+ WRITE_ONCE(desc->flags, flags);
return ret;
}
@@ -354,23 +387,25 @@ static int gpiodev_add_to_list_unlocked(struct gpio_device *gdev)
{
struct gpio_device *prev, *next;
+ lockdep_assert_held(&gpio_devices_lock);
+
if (list_empty(&gpio_devices)) {
/* initial entry in list */
- list_add_tail(&gdev->list, &gpio_devices);
+ list_add_tail_rcu(&gdev->list, &gpio_devices);
return 0;
}
next = list_first_entry(&gpio_devices, struct gpio_device, list);
if (gdev->base + gdev->ngpio <= next->base) {
/* add before first entry */
- list_add(&gdev->list, &gpio_devices);
+ list_add_rcu(&gdev->list, &gpio_devices);
return 0;
}
prev = list_last_entry(&gpio_devices, struct gpio_device, list);
if (prev->base + prev->ngpio <= gdev->base) {
/* add behind last entry */
- list_add_tail(&gdev->list, &gpio_devices);
+ list_add_tail_rcu(&gdev->list, &gpio_devices);
return 0;
}
@@ -382,11 +417,13 @@ static int gpiodev_add_to_list_unlocked(struct gpio_device *gdev)
/* add between prev and next */
if (prev->base + prev->ngpio <= gdev->base
&& gdev->base + gdev->ngpio <= next->base) {
- list_add(&gdev->list, &prev->list);
+ list_add_rcu(&gdev->list, &prev->list);
return 0;
}
}
+ synchronize_srcu(&gpio_devices_srcu);
+
return -EBUSY;
}
@@ -399,26 +436,28 @@ static int gpiodev_add_to_list_unlocked(struct gpio_device *gdev)
static struct gpio_desc *gpio_name_to_desc(const char * const name)
{
struct gpio_device *gdev;
- unsigned long flags;
+ struct gpio_desc *desc;
+ struct gpio_chip *gc;
if (!name)
return NULL;
- spin_lock_irqsave(&gpio_lock, flags);
+ guard(srcu)(&gpio_devices_srcu);
- list_for_each_entry(gdev, &gpio_devices, list) {
- struct gpio_desc *desc;
+ list_for_each_entry_srcu(gdev, &gpio_devices, list,
+ srcu_read_lock_held(&gpio_devices_srcu)) {
+ guard(srcu)(&gdev->srcu);
+
+ gc = srcu_dereference(gdev->chip, &gdev->srcu);
+ if (!gc)
+ continue;
- for_each_gpio_desc(gdev->chip, desc) {
- if (desc->name && !strcmp(desc->name, name)) {
- spin_unlock_irqrestore(&gpio_lock, flags);
+ for_each_gpio_desc(gc, desc) {
+ if (desc->name && !strcmp(desc->name, name))
return desc;
- }
}
}
- spin_unlock_irqrestore(&gpio_lock, flags);
-
return NULL;
}
@@ -656,13 +695,23 @@ EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
static void gpiodev_release(struct device *dev)
{
struct gpio_device *gdev = to_gpio_device(dev);
+ unsigned int i;
+
+ for (i = 0; i < gdev->ngpio; i++)
+ cleanup_srcu_struct(&gdev->descs[i].srcu);
ida_free(&gpio_ida, gdev->id);
kfree_const(gdev->label);
kfree(gdev->descs);
+ cleanup_srcu_struct(&gdev->srcu);
kfree(gdev);
}
+static const struct device_type gpio_dev_type = {
+ .name = "gpio_chip",
+ .release = gpiodev_release,
+};
+
#ifdef CONFIG_GPIO_CDEV
#define gcdev_register(gdev, devt) gpiolib_cdev_register((gdev), (devt))
#define gcdev_unregister(gdev) gpiolib_cdev_unregister((gdev))
@@ -680,6 +729,8 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
struct fwnode_handle *fwnode = dev_fwnode(&gdev->dev);
int ret;
+ device_initialize(&gdev->dev);
+
/*
* If fwnode doesn't belong to another device, it's safe to clear its
* initialized flag.
@@ -691,15 +742,12 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
if (ret)
return ret;
- /* From this point, the .release() function cleans up gpio_device */
- gdev->dev.release = gpiodev_release;
-
ret = gpiochip_sysfs_register(gdev);
if (ret)
goto err_remove_device;
dev_dbg(&gdev->dev, "registered GPIOs %d to %d on %s\n", gdev->base,
- gdev->base + gdev->ngpio - 1, gdev->chip->label ? : "generic");
+ gdev->base + gdev->ngpio - 1, gdev->label);
return 0;
@@ -720,9 +768,6 @@ static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog)
return;
}
- if (test_bit(FLAG_IS_HOGGED, &desc->flags))
- return;
-
rv = gpiod_hog(desc, hog->line_name, hog->lflags, hog->dflags);
if (rv)
gpiod_err(desc, "%s: unable to hog GPIO line (%s:%u): %d\n",
@@ -748,7 +793,10 @@ static void gpiochip_setup_devs(void)
struct gpio_device *gdev;
int ret;
- list_for_each_entry(gdev, &gpio_devices, list) {
+ guard(srcu)(&gpio_devices_srcu);
+
+ list_for_each_entry_srcu(gdev, &gpio_devices, list,
+ srcu_read_lock_held(&gpio_devices_srcu)) {
ret = gpiochip_setup_dev(gdev);
if (ret)
dev_err(&gdev->dev,
@@ -813,8 +861,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct lock_class_key *request_key)
{
struct gpio_device *gdev;
- unsigned long flags;
- unsigned int i;
+ unsigned int desc_index;
int base = 0;
int ret = 0;
@@ -825,9 +872,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gdev = kzalloc(sizeof(*gdev), GFP_KERNEL);
if (!gdev)
return -ENOMEM;
+
+ gdev->dev.type = &gpio_dev_type;
gdev->dev.bus = &gpio_bus_type;
gdev->dev.parent = gc->parent;
- gdev->chip = gc;
+ rcu_assign_pointer(gdev->chip, gc);
gc->gpiodev = gdev;
gpiochip_set_data(gc, data);
@@ -851,7 +900,6 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (ret)
goto err_free_ida;
- device_initialize(&gdev->dev);
if (gc->parent && gc->parent->driver)
gdev->owner = gc->parent->driver->owner;
else if (gc->owner)
@@ -877,53 +925,55 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
}
gdev->ngpio = gc->ngpio;
+ gdev->can_sleep = gc->can_sleep;
- spin_lock_irqsave(&gpio_lock, flags);
-
- /*
- * TODO: this allocates a Linux GPIO number base in the global
- * GPIO numberspace for this chip. In the long run we want to
- * get *rid* of this numberspace and use only descriptors, but
- * it may be a pipe dream. It will not happen before we get rid
- * of the sysfs interface anyways.
- */
- base = gc->base;
- if (base < 0) {
- base = gpiochip_find_base_unlocked(gc->ngpio);
- if (base < 0) {
- spin_unlock_irqrestore(&gpio_lock, flags);
- ret = base;
- base = 0;
- goto err_free_label;
- }
+ scoped_guard(mutex, &gpio_devices_lock) {
/*
- * TODO: it should not be necessary to reflect the assigned
- * base outside of the GPIO subsystem. Go over drivers and
- * see if anyone makes use of this, else drop this and assign
- * a poison instead.
+ * TODO: this allocates a Linux GPIO number base in the global
+ * GPIO numberspace for this chip. In the long run we want to
+ * get *rid* of this numberspace and use only descriptors, but
+ * it may be a pipe dream. It will not happen before we get rid
+ * of the sysfs interface anyways.
*/
- gc->base = base;
- } else {
- dev_warn(&gdev->dev,
- "Static allocation of GPIO base is deprecated, use dynamic allocation.\n");
- }
- gdev->base = base;
+ base = gc->base;
+ if (base < 0) {
+ base = gpiochip_find_base_unlocked(gc->ngpio);
+ if (base < 0) {
+ ret = base;
+ base = 0;
+ goto err_free_label;
+ }
- ret = gpiodev_add_to_list_unlocked(gdev);
- if (ret) {
- spin_unlock_irqrestore(&gpio_lock, flags);
- chip_err(gc, "GPIO integer space overlap, cannot add chip\n");
- goto err_free_label;
- }
+ /*
+ * TODO: it should not be necessary to reflect the
+ * assigned base outside of the GPIO subsystem. Go over
+ * drivers and see if anyone makes use of this, else
+ * drop this and assign a poison instead.
+ */
+ gc->base = base;
+ } else {
+ dev_warn(&gdev->dev,
+ "Static allocation of GPIO base is deprecated, use dynamic allocation.\n");
+ }
+
+ gdev->base = base;
- for (i = 0; i < gc->ngpio; i++)
- gdev->descs[i].gdev = gdev;
+ ret = gpiodev_add_to_list_unlocked(gdev);
+ if (ret) {
+ chip_err(gc, "GPIO integer space overlap, cannot add chip\n");
+ goto err_free_label;
+ }
+ }
- spin_unlock_irqrestore(&gpio_lock, flags);
+ for (desc_index = 0; desc_index < gc->ngpio; desc_index++)
+ gdev->descs[desc_index].gdev = gdev;
BLOCKING_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
- init_rwsem(&gdev->sem);
+
+ ret = init_srcu_struct(&gdev->srcu);
+ if (ret)
+ goto err_remove_from_list;
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
@@ -932,32 +982,36 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (gc->names) {
ret = gpiochip_set_desc_names(gc);
if (ret)
- goto err_remove_from_list;
+ goto err_cleanup_gdev_srcu;
}
ret = gpiochip_set_names(gc);
if (ret)
- goto err_remove_from_list;
+ goto err_cleanup_gdev_srcu;
ret = gpiochip_init_valid_mask(gc);
if (ret)
- goto err_remove_from_list;
+ goto err_cleanup_gdev_srcu;
- ret = of_gpiochip_add(gc);
- if (ret)
- goto err_free_gpiochip_mask;
+ for (desc_index = 0; desc_index < gc->ngpio; desc_index++) {
+ struct gpio_desc *desc = &gdev->descs[desc_index];
- for (i = 0; i < gc->ngpio; i++) {
- struct gpio_desc *desc = &gdev->descs[i];
+ ret = init_srcu_struct(&desc->srcu);
+ if (ret)
+ goto err_cleanup_desc_srcu;
- if (gc->get_direction && gpiochip_line_is_valid(gc, i)) {
+ if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index)) {
assign_bit(FLAG_IS_OUT,
- &desc->flags, !gc->get_direction(gc, i));
+ &desc->flags, !gc->get_direction(gc, desc_index));
} else {
assign_bit(FLAG_IS_OUT,
&desc->flags, !gc->direction_input);
}
}
+ ret = of_gpiochip_add(gc);
+ if (ret)
+ goto err_cleanup_desc_srcu;
+
ret = gpiochip_add_pin_ranges(gc);
if (ret)
goto err_remove_of_chip;
@@ -968,11 +1022,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiochip_irqchip_init_valid_mask(gc);
if (ret)
- goto err_remove_acpi_chip;
+ goto err_free_hogs;
ret = gpiochip_irqchip_init_hw(gc);
if (ret)
- goto err_remove_acpi_chip;
+ goto err_remove_irqchip_mask;
ret = gpiochip_add_irqchip(gc, lock_key, request_key);
if (ret)
@@ -997,18 +1051,22 @@ err_remove_irqchip:
gpiochip_irqchip_remove(gc);
err_remove_irqchip_mask:
gpiochip_irqchip_free_valid_mask(gc);
-err_remove_acpi_chip:
+err_free_hogs:
+ gpiochip_free_hogs(gc);
acpi_gpiochip_remove(gc);
+ gpiochip_remove_pin_ranges(gc);
err_remove_of_chip:
- gpiochip_free_hogs(gc);
of_gpiochip_remove(gc);
-err_free_gpiochip_mask:
- gpiochip_remove_pin_ranges(gc);
+err_cleanup_desc_srcu:
+ while (desc_index--)
+ cleanup_srcu_struct(&gdev->descs[desc_index].srcu);
gpiochip_free_valid_mask(gc);
+err_cleanup_gdev_srcu:
+ cleanup_srcu_struct(&gdev->srcu);
err_remove_from_list:
- spin_lock_irqsave(&gpio_lock, flags);
- list_del(&gdev->list);
- spin_unlock_irqrestore(&gpio_lock, flags);
+ scoped_guard(mutex, &gpio_devices_lock)
+ list_del_rcu(&gdev->list);
+ synchronize_srcu(&gpio_devices_srcu);
if (gdev->dev.release) {
/* release() has been registered by gpiochip_setup_dev() */
gpio_device_put(gdev);
@@ -1044,16 +1102,18 @@ EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key);
void gpiochip_remove(struct gpio_chip *gc)
{
struct gpio_device *gdev = gc->gpiodev;
- unsigned long flags;
- unsigned int i;
-
- down_write(&gdev->sem);
/* FIXME: should the legacy sysfs handling be moved to gpio_device? */
gpiochip_sysfs_unregister(gdev);
gpiochip_free_hogs(gc);
+
+ scoped_guard(mutex, &gpio_devices_lock)
+ list_del_rcu(&gdev->list);
+ synchronize_srcu(&gpio_devices_srcu);
+
/* Numb the device, cancelling all outstanding operations */
- gdev->chip = NULL;
+ rcu_assign_pointer(gdev->chip, NULL);
+ synchronize_srcu(&gdev->srcu);
gpiochip_irqchip_remove(gc);
acpi_gpiochip_remove(gc);
of_gpiochip_remove(gc);
@@ -1065,20 +1125,6 @@ void gpiochip_remove(struct gpio_chip *gc)
*/
gpiochip_set_data(gc, NULL);
- spin_lock_irqsave(&gpio_lock, flags);
- for (i = 0; i < gdev->ngpio; i++) {
- if (test_bit(FLAG_REQUESTED, &gdev->descs[i].flags))
- break;
- }
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- if (i != gdev->ngpio)
- dev_crit(&gdev->dev,
- "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
-
- scoped_guard(spinlock_irqsave, &gpio_lock)
- list_del(&gdev->list);
-
/*
* The gpiochip side puts its use of the device to rest here:
* if there are no userspace clients, the chardev and device will
@@ -1086,7 +1132,6 @@ void gpiochip_remove(struct gpio_chip *gc)
* gone.
*/
gcdev_unregister(gdev);
- up_write(&gdev->sem);
gpio_device_put(gdev);
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
@@ -1112,11 +1157,12 @@ EXPORT_SYMBOL_GPL(gpiochip_remove);
* If the function returns non-NULL, the returned reference must be freed by
* the caller using gpio_device_put().
*/
-struct gpio_device *gpio_device_find(void *data,
+struct gpio_device *gpio_device_find(const void *data,
int (*match)(struct gpio_chip *gc,
- void *data))
+ const void *data))
{
struct gpio_device *gdev;
+ struct gpio_chip *gc;
/*
* Not yet but in the future the spinlock below will become a mutex.
@@ -1125,10 +1171,15 @@ struct gpio_device *gpio_device_find(void *data,
*/
might_sleep();
- guard(spinlock_irqsave)(&gpio_lock);
+ guard(srcu)(&gpio_devices_srcu);
+
+ list_for_each_entry_srcu(gdev, &gpio_devices, list,
+ srcu_read_lock_held(&gpio_devices_srcu)) {
+ guard(srcu)(&gdev->srcu);
- list_for_each_entry(gdev, &gpio_devices, list) {
- if (gdev->chip && match(gdev->chip, data))
+ gc = srcu_dereference(gdev->chip, &gdev->srcu);
+
+ if (gc && match(gc, data))
return gpio_device_get(gdev);
}
@@ -1136,7 +1187,7 @@ struct gpio_device *gpio_device_find(void *data,
}
EXPORT_SYMBOL_GPL(gpio_device_find);
-static int gpio_chip_match_by_label(struct gpio_chip *gc, void *label)
+static int gpio_chip_match_by_label(struct gpio_chip *gc, const void *label)
{
return gc->label && !strcmp(gc->label, label);
}
@@ -1156,7 +1207,7 @@ struct gpio_device *gpio_device_find_by_label(const char *label)
}
EXPORT_SYMBOL_GPL(gpio_device_find_by_label);
-static int gpio_chip_match_by_fwnode(struct gpio_chip *gc, void *fwnode)
+static int gpio_chip_match_by_fwnode(struct gpio_chip *gc, const void *fwnode)
{
return device_match_fwnode(&gc->gpiodev->dev, fwnode);
}
@@ -1254,8 +1305,8 @@ static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc)
gpiochip_free_mask(&gc->irq.valid_mask);
}
-bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
- unsigned int offset)
+static bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
+ unsigned int offset)
{
if (!gpiochip_line_is_valid(gc, offset))
return false;
@@ -1264,7 +1315,6 @@ bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
return true;
return test_bit(offset, gc->irq.valid_mask);
}
-EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
@@ -1439,6 +1489,43 @@ static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *gc,
return offset;
}
+/**
+ * gpiochip_irq_domain_activate() - Lock a GPIO to be used as an IRQ
+ * @domain: The IRQ domain used by this IRQ chip
+ * @data: Outermost irq_data associated with the IRQ
+ * @reserve: If set, only reserve an interrupt vector instead of assigning one
+ *
+ * This function is a wrapper that calls gpiochip_lock_as_irq() and is to be
+ * used as the activate function for the &struct irq_domain_ops. The host_data
+ * for the IRQ domain must be the &struct gpio_chip.
+ */
+static int gpiochip_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *data, bool reserve)
+{
+ struct gpio_chip *gc = domain->host_data;
+ unsigned int hwirq = irqd_to_hwirq(data);
+
+ return gpiochip_lock_as_irq(gc, hwirq);
+}
+
+/**
+ * gpiochip_irq_domain_deactivate() - Unlock a GPIO used as an IRQ
+ * @domain: The IRQ domain used by this IRQ chip
+ * @data: Outermost irq_data associated with the IRQ
+ *
+ * This function is a wrapper that will call gpiochip_unlock_as_irq() and is to
+ * be used as the deactivate function for the &struct irq_domain_ops. The
+ * host_data for the IRQ domain must be the &struct gpio_chip.
+ */
+static void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *data)
+{
+ struct gpio_chip *gc = domain->host_data;
+ unsigned int hwirq = irqd_to_hwirq(data);
+
+ return gpiochip_unlock_as_irq(gc, hwirq);
+}
+
static void gpiochip_hierarchy_setup_domain_ops(struct irq_domain_ops *ops)
{
ops->activate = gpiochip_irq_domain_activate;
@@ -1556,7 +1643,8 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
* gpiochip by assigning the gpiochip as chip data, and using the irqchip
* stored inside the gpiochip.
*/
-int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq)
+static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
{
struct gpio_chip *gc = d->host_data;
int ret = 0;
@@ -1593,9 +1681,8 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwi
return 0;
}
-EXPORT_SYMBOL_GPL(gpiochip_irq_map);
-void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
+static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
{
struct gpio_chip *gc = d->host_data;
@@ -1604,7 +1691,6 @@ void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
}
-EXPORT_SYMBOL_GPL(gpiochip_irq_unmap);
static const struct irq_domain_ops gpiochip_domain_ops = {
.map = gpiochip_irq_map,
@@ -1626,50 +1712,6 @@ static struct irq_domain *gpiochip_simple_create_domain(struct gpio_chip *gc)
return domain;
}
-/*
- * TODO: move these activate/deactivate in under the hierarchicial
- * irqchip implementation as static once SPMI and SSBI (all external
- * users) are phased over.
- */
-/**
- * gpiochip_irq_domain_activate() - Lock a GPIO to be used as an IRQ
- * @domain: The IRQ domain used by this IRQ chip
- * @data: Outermost irq_data associated with the IRQ
- * @reserve: If set, only reserve an interrupt vector instead of assigning one
- *
- * This function is a wrapper that calls gpiochip_lock_as_irq() and is to be
- * used as the activate function for the &struct irq_domain_ops. The host_data
- * for the IRQ domain must be the &struct gpio_chip.
- */
-int gpiochip_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *data, bool reserve)
-{
- struct gpio_chip *gc = domain->host_data;
- unsigned int hwirq = irqd_to_hwirq(data);
-
- return gpiochip_lock_as_irq(gc, hwirq);
-}
-EXPORT_SYMBOL_GPL(gpiochip_irq_domain_activate);
-
-/**
- * gpiochip_irq_domain_deactivate() - Unlock a GPIO used as an IRQ
- * @domain: The IRQ domain used by this IRQ chip
- * @data: Outermost irq_data associated with the IRQ
- *
- * This function is a wrapper that will call gpiochip_unlock_as_irq() and is to
- * be used as the deactivate function for the &struct irq_domain_ops. The
- * host_data for the IRQ domain must be the &struct gpio_chip.
- */
-void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
- struct irq_data *data)
-{
- struct gpio_chip *gc = domain->host_data;
- unsigned int hwirq = irqd_to_hwirq(data);
-
- return gpiochip_unlock_as_irq(gc, hwirq);
-}
-EXPORT_SYMBOL_GPL(gpiochip_irq_domain_deactivate);
-
static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct irq_domain *domain = gc->irq.domain;
@@ -2042,6 +2084,11 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_free);
int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config)
{
+#ifdef CONFIG_PINCTRL
+ if (list_empty(&gc->gpiodev->pin_ranges))
+ return -ENOTSUPP;
+#endif
+
return pinctrl_gpio_set_config(gc, offset, config);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_config);
@@ -2184,58 +2231,41 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges);
*/
static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
{
- struct gpio_chip *gc = desc->gdev->chip;
- unsigned long flags;
unsigned int offset;
int ret;
- if (label) {
- label = kstrdup_const(label, GFP_KERNEL);
- if (!label)
- return -ENOMEM;
- }
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
- spin_lock_irqsave(&gpio_lock, flags);
+ if (test_and_set_bit(FLAG_REQUESTED, &desc->flags))
+ return -EBUSY;
/* NOTE: gpio_request() can be called in early boot,
* before IRQs are enabled, for non-sleeping (SOC) GPIOs.
*/
- if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
- desc_set_label(desc, label ? : "?");
- } else {
- ret = -EBUSY;
- goto out_free_unlock;
- }
-
- if (gc->request) {
- /* gc->request may sleep */
- spin_unlock_irqrestore(&gpio_lock, flags);
+ if (guard.gc->request) {
offset = gpio_chip_hwgpio(desc);
- if (gpiochip_line_is_valid(gc, offset))
- ret = gc->request(gc, offset);
+ if (gpiochip_line_is_valid(guard.gc, offset))
+ ret = guard.gc->request(guard.gc, offset);
else
ret = -EINVAL;
- spin_lock_irqsave(&gpio_lock, flags);
-
- if (ret) {
- desc_set_label(desc, NULL);
- clear_bit(FLAG_REQUESTED, &desc->flags);
- goto out_free_unlock;
- }
+ if (ret)
+ goto out_clear_bit;
}
- if (gc->get_direction) {
- /* gc->get_direction may sleep */
- spin_unlock_irqrestore(&gpio_lock, flags);
+
+ if (guard.gc->get_direction)
gpiod_get_direction(desc);
- spin_lock_irqsave(&gpio_lock, flags);
- }
- spin_unlock_irqrestore(&gpio_lock, flags);
+
+ ret = desc_set_label(desc, label ? : "?");
+ if (ret)
+ goto out_clear_bit;
+
return 0;
-out_free_unlock:
- spin_unlock_irqrestore(&gpio_lock, flags);
- kfree_const(label);
+out_clear_bit:
+ clear_bit(FLAG_REQUESTED, &desc->flags);
return ret;
}
@@ -2249,19 +2279,12 @@ static int validate_desc(const struct gpio_desc *desc, const char *func)
{
if (!desc)
return 0;
+
if (IS_ERR(desc)) {
pr_warn("%s: invalid GPIO (errorpointer)\n", func);
return PTR_ERR(desc);
}
- if (!desc->gdev) {
- pr_warn("%s: invalid GPIO (no device)\n", func);
- return -EINVAL;
- }
- if (!desc->gdev->chip) {
- dev_warn(&desc->gdev->dev,
- "%s: backing chip is gone\n", func);
- return 0;
- }
+
return 1;
}
@@ -2297,60 +2320,45 @@ int gpiod_request(struct gpio_desc *desc, const char *label)
return ret;
}
-static bool gpiod_free_commit(struct gpio_desc *desc)
+static void gpiod_free_commit(struct gpio_desc *desc)
{
- struct gpio_chip *gc;
unsigned long flags;
- bool ret = false;
might_sleep();
- spin_lock_irqsave(&gpio_lock, flags);
+ CLASS(gpio_chip_guard, guard)(desc);
- gc = desc->gdev->chip;
- if (gc && test_bit(FLAG_REQUESTED, &desc->flags)) {
- if (gc->free) {
- spin_unlock_irqrestore(&gpio_lock, flags);
- might_sleep_if(gc->can_sleep);
- gc->free(gc, gpio_chip_hwgpio(desc));
- spin_lock_irqsave(&gpio_lock, flags);
- }
- kfree_const(desc->label);
- desc_set_label(desc, NULL);
- clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
- clear_bit(FLAG_REQUESTED, &desc->flags);
- clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
- clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
- clear_bit(FLAG_PULL_UP, &desc->flags);
- clear_bit(FLAG_PULL_DOWN, &desc->flags);
- clear_bit(FLAG_BIAS_DISABLE, &desc->flags);
- clear_bit(FLAG_EDGE_RISING, &desc->flags);
- clear_bit(FLAG_EDGE_FALLING, &desc->flags);
- clear_bit(FLAG_IS_HOGGED, &desc->flags);
+ flags = READ_ONCE(desc->flags);
+
+ if (guard.gc && test_bit(FLAG_REQUESTED, &flags)) {
+ if (guard.gc->free)
+ guard.gc->free(guard.gc, gpio_chip_hwgpio(desc));
+
+ clear_bit(FLAG_ACTIVE_LOW, &flags);
+ clear_bit(FLAG_REQUESTED, &flags);
+ clear_bit(FLAG_OPEN_DRAIN, &flags);
+ clear_bit(FLAG_OPEN_SOURCE, &flags);
+ clear_bit(FLAG_PULL_UP, &flags);
+ clear_bit(FLAG_PULL_DOWN, &flags);
+ clear_bit(FLAG_BIAS_DISABLE, &flags);
+ clear_bit(FLAG_EDGE_RISING, &flags);
+ clear_bit(FLAG_EDGE_FALLING, &flags);
+ clear_bit(FLAG_IS_HOGGED, &flags);
#ifdef CONFIG_OF_DYNAMIC
- desc->hog = NULL;
+ WRITE_ONCE(desc->hog, NULL);
#endif
- ret = true;
- }
-
- spin_unlock_irqrestore(&gpio_lock, flags);
- gpiod_line_state_notify(desc, GPIOLINE_CHANGED_RELEASED);
+ desc_set_label(desc, NULL);
+ WRITE_ONCE(desc->flags, flags);
- return ret;
+ gpiod_line_state_notify(desc, GPIOLINE_CHANGED_RELEASED);
+ }
}
void gpiod_free(struct gpio_desc *desc)
{
- /*
- * We must not use VALIDATE_DESC_VOID() as the underlying gdev->chip
- * may already be NULL but we still want to put the references.
- */
- if (!desc)
- return;
-
- if (!gpiod_free_commit(desc))
- WARN_ON(1);
+ VALIDATE_DESC_VOID(desc);
+ gpiod_free_commit(desc);
module_put(desc->gdev->owner);
gpio_device_put(desc->gdev);
}
@@ -2376,20 +2384,12 @@ char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset)
if (IS_ERR(desc))
return NULL;
- guard(spinlock_irqsave)(&gpio_lock);
-
if (!test_bit(FLAG_REQUESTED, &desc->flags))
return NULL;
- /*
- * FIXME: Once we mark gpiod_direction_input/output() and
- * gpiod_get_direction() with might_sleep(), we'll be able to protect
- * the GPIO descriptors with mutex (while value setting operations will
- * become lockless).
- *
- * Until this happens, this allocation needs to be atomic.
- */
- label = kstrdup(desc->label, GFP_ATOMIC);
+ guard(srcu)(&desc->srcu);
+
+ label = kstrdup(gpiod_get_label(desc), GFP_KERNEL);
if (!label)
return ERR_PTR(-ENOMEM);
@@ -2484,11 +2484,14 @@ static int gpio_set_config_with_argument(struct gpio_desc *desc,
enum pin_config_param mode,
u32 argument)
{
- struct gpio_chip *gc = desc->gdev->chip;
unsigned long config;
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
config = pinconf_to_config_packed(mode, argument);
- return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config);
+ return gpio_do_set_config(guard.gc, gpio_chip_hwgpio(desc), config);
}
static int gpio_set_config_with_argument_optional(struct gpio_desc *desc,
@@ -2522,13 +2525,16 @@ static int gpio_set_config(struct gpio_desc *desc, enum pin_config_param mode)
static int gpio_set_bias(struct gpio_desc *desc)
{
enum pin_config_param bias;
+ unsigned long flags;
unsigned int arg;
- if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
+ flags = READ_ONCE(desc->flags);
+
+ if (test_bit(FLAG_BIAS_DISABLE, &flags))
bias = PIN_CONFIG_BIAS_DISABLE;
- else if (test_bit(FLAG_PULL_UP, &desc->flags))
+ else if (test_bit(FLAG_PULL_UP, &flags))
bias = PIN_CONFIG_BIAS_PULL_UP;
- else if (test_bit(FLAG_PULL_DOWN, &desc->flags))
+ else if (test_bit(FLAG_PULL_DOWN, &flags))
bias = PIN_CONFIG_BIAS_PULL_DOWN;
else
return 0;
@@ -2575,18 +2581,20 @@ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
*/
int gpiod_direction_input(struct gpio_desc *desc)
{
- struct gpio_chip *gc;
int ret = 0;
VALIDATE_DESC(desc);
- gc = desc->gdev->chip;
+
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
/*
* It is legal to have no .get() and .direction_input() specified if
* the chip is output-only, but you can't specify .direction_input()
* and not support the .get() operation, that doesn't make sense.
*/
- if (!gc->get && gc->direction_input) {
+ if (!guard.gc->get && guard.gc->direction_input) {
gpiod_warn(desc,
"%s: missing get() but have direction_input()\n",
__func__);
@@ -2599,10 +2607,12 @@ int gpiod_direction_input(struct gpio_desc *desc)
* direction (if .get_direction() is supported) else we silently
* assume we are in input mode after this.
*/
- if (gc->direction_input) {
- ret = gc->direction_input(gc, gpio_chip_hwgpio(desc));
- } else if (gc->get_direction &&
- (gc->get_direction(gc, gpio_chip_hwgpio(desc)) != 1)) {
+ if (guard.gc->direction_input) {
+ ret = guard.gc->direction_input(guard.gc,
+ gpio_chip_hwgpio(desc));
+ } else if (guard.gc->get_direction &&
+ (guard.gc->get_direction(guard.gc,
+ gpio_chip_hwgpio(desc)) != 1)) {
gpiod_warn(desc,
"%s: missing direction_input() operation and line is output\n",
__func__);
@@ -2621,28 +2631,31 @@ EXPORT_SYMBOL_GPL(gpiod_direction_input);
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
{
- struct gpio_chip *gc = desc->gdev->chip;
- int val = !!value;
- int ret = 0;
+ int val = !!value, ret = 0;
+
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
/*
* It's OK not to specify .direction_output() if the gpiochip is
* output-only, but if there is then not even a .set() operation it
* is pretty tricky to drive the output line.
*/
- if (!gc->set && !gc->direction_output) {
+ if (!guard.gc->set && !guard.gc->direction_output) {
gpiod_warn(desc,
"%s: missing set() and direction_output() operations\n",
__func__);
return -EIO;
}
- if (gc->direction_output) {
- ret = gc->direction_output(gc, gpio_chip_hwgpio(desc), val);
+ if (guard.gc->direction_output) {
+ ret = guard.gc->direction_output(guard.gc,
+ gpio_chip_hwgpio(desc), val);
} else {
/* Check that we are in output mode if we can */
- if (gc->get_direction &&
- gc->get_direction(gc, gpio_chip_hwgpio(desc))) {
+ if (guard.gc->get_direction &&
+ guard.gc->get_direction(guard.gc, gpio_chip_hwgpio(desc))) {
gpiod_warn(desc,
"%s: missing direction_output() operation\n",
__func__);
@@ -2652,7 +2665,7 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
* If we can't actively set the direction, we are some
* output-only chip, so just drive the output as desired.
*/
- gc->set(gc, gpio_chip_hwgpio(desc), val);
+ guard.gc->set(guard.gc, gpio_chip_hwgpio(desc), val);
}
if (!ret)
@@ -2694,24 +2707,28 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
+ unsigned long flags;
int ret;
VALIDATE_DESC(desc);
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+
+ flags = READ_ONCE(desc->flags);
+
+ if (test_bit(FLAG_ACTIVE_LOW, &flags))
value = !value;
else
value = !!value;
/* GPIOs used for enabled IRQs shall not be set as output */
- if (test_bit(FLAG_USED_AS_IRQ, &desc->flags) &&
- test_bit(FLAG_IRQ_IS_ENABLED, &desc->flags)) {
+ if (test_bit(FLAG_USED_AS_IRQ, &flags) &&
+ test_bit(FLAG_IRQ_IS_ENABLED, &flags)) {
gpiod_err(desc,
"%s: tried to set a GPIO tied to an IRQ as output\n",
__func__);
return -EIO;
}
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
+ if (test_bit(FLAG_OPEN_DRAIN, &flags)) {
/* First see if we can enable open drain in hardware */
ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_DRAIN);
if (!ret)
@@ -2721,7 +2738,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
ret = gpiod_direction_input(desc);
goto set_output_flag;
}
- } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
+ } else if (test_bit(FLAG_OPEN_SOURCE, &flags)) {
ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE);
if (!ret)
goto set_output_value;
@@ -2764,17 +2781,20 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output);
int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
{
int ret = 0;
- struct gpio_chip *gc;
VALIDATE_DESC(desc);
- gc = desc->gdev->chip;
- if (!gc->en_hw_timestamp) {
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
+ if (!guard.gc->en_hw_timestamp) {
gpiod_warn(desc, "%s: hw ts not supported\n", __func__);
return -ENOTSUPP;
}
- ret = gc->en_hw_timestamp(gc, gpio_chip_hwgpio(desc), flags);
+ ret = guard.gc->en_hw_timestamp(guard.gc,
+ gpio_chip_hwgpio(desc), flags);
if (ret)
gpiod_warn(desc, "%s: hw ts request failed\n", __func__);
@@ -2793,17 +2813,20 @@ EXPORT_SYMBOL_GPL(gpiod_enable_hw_timestamp_ns);
int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
{
int ret = 0;
- struct gpio_chip *gc;
VALIDATE_DESC(desc);
- gc = desc->gdev->chip;
- if (!gc->dis_hw_timestamp) {
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
+ if (!guard.gc->dis_hw_timestamp) {
gpiod_warn(desc, "%s: hw ts not supported\n", __func__);
return -ENOTSUPP;
}
- ret = gc->dis_hw_timestamp(gc, gpio_chip_hwgpio(desc), flags);
+ ret = guard.gc->dis_hw_timestamp(guard.gc, gpio_chip_hwgpio(desc),
+ flags);
if (ret)
gpiod_warn(desc, "%s: hw ts release failed\n", __func__);
@@ -2822,12 +2845,13 @@ EXPORT_SYMBOL_GPL(gpiod_disable_hw_timestamp_ns);
*/
int gpiod_set_config(struct gpio_desc *desc, unsigned long config)
{
- struct gpio_chip *gc;
-
VALIDATE_DESC(desc);
- gc = desc->gdev->chip;
- return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config);
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
+ return gpio_do_set_config(guard.gc, gpio_chip_hwgpio(desc), config);
}
EXPORT_SYMBOL_GPL(gpiod_set_config);
@@ -2925,10 +2949,19 @@ static int gpio_chip_get_value(struct gpio_chip *gc, const struct gpio_desc *des
static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
{
+ struct gpio_device *gdev;
struct gpio_chip *gc;
int value;
- gc = desc->gdev->chip;
+ /* FIXME Unable to use gpio_chip_guard due to const desc. */
+ gdev = desc->gdev;
+
+ guard(srcu)(&gdev->srcu);
+
+ gc = srcu_dereference(gdev->chip, &gdev->srcu);
+ if (!gc)
+ return -ENODEV;
+
value = gpio_chip_get_value(gc, desc);
value = value < 0 ? value : !!value;
trace_gpio_value(desc_to_gpio(desc), 1, value);
@@ -2954,6 +2987,14 @@ static int gpio_chip_get_multiple(struct gpio_chip *gc,
return -EIO;
}
+/* The 'other' chip must be protected with its GPIO device's SRCU. */
+static bool gpio_device_chip_cmp(struct gpio_device *gdev, struct gpio_chip *gc)
+{
+ guard(srcu)(&gdev->srcu);
+
+ return gc == srcu_dereference(gdev->chip, &gdev->srcu);
+}
+
int gpiod_get_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
@@ -2991,33 +3032,36 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
}
while (i < array_size) {
- struct gpio_chip *gc = desc_array[i]->gdev->chip;
DECLARE_BITMAP(fastpath_mask, FASTPATH_NGPIO);
DECLARE_BITMAP(fastpath_bits, FASTPATH_NGPIO);
unsigned long *mask, *bits;
int first, j;
- if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
+ CLASS(gpio_chip_guard, guard)(desc_array[i]);
+ if (!guard.gc)
+ return -ENODEV;
+
+ if (likely(guard.gc->ngpio <= FASTPATH_NGPIO)) {
mask = fastpath_mask;
bits = fastpath_bits;
} else {
gfp_t flags = can_sleep ? GFP_KERNEL : GFP_ATOMIC;
- mask = bitmap_alloc(gc->ngpio, flags);
+ mask = bitmap_alloc(guard.gc->ngpio, flags);
if (!mask)
return -ENOMEM;
- bits = bitmap_alloc(gc->ngpio, flags);
+ bits = bitmap_alloc(guard.gc->ngpio, flags);
if (!bits) {
bitmap_free(mask);
return -ENOMEM;
}
}
- bitmap_zero(mask, gc->ngpio);
+ bitmap_zero(mask, guard.gc->ngpio);
if (!can_sleep)
- WARN_ON(gc->can_sleep);
+ WARN_ON(guard.gc->can_sleep);
/* collect all inputs belonging to the same chip */
first = i;
@@ -3032,9 +3076,9 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
i = find_next_zero_bit(array_info->get_mask,
array_size, i);
} while ((i < array_size) &&
- (desc_array[i]->gdev->chip == gc));
+ gpio_device_chip_cmp(desc_array[i]->gdev, guard.gc));
- ret = gpio_chip_get_multiple(gc, mask, bits);
+ ret = gpio_chip_get_multiple(guard.gc, mask, bits);
if (ret) {
if (mask != fastpath_mask)
bitmap_free(mask);
@@ -3081,7 +3125,7 @@ int gpiod_get_raw_value(const struct gpio_desc *desc)
{
VALIDATE_DESC(desc);
/* Should be using gpiod_get_raw_value_cansleep() */
- WARN_ON(desc->gdev->chip->can_sleep);
+ WARN_ON(desc->gdev->can_sleep);
return gpiod_get_raw_value_commit(desc);
}
EXPORT_SYMBOL_GPL(gpiod_get_raw_value);
@@ -3102,7 +3146,7 @@ int gpiod_get_value(const struct gpio_desc *desc)
VALIDATE_DESC(desc);
/* Should be using gpiod_get_value_cansleep() */
- WARN_ON(desc->gdev->chip->can_sleep);
+ WARN_ON(desc->gdev->can_sleep);
value = gpiod_get_raw_value_commit(desc);
if (value < 0)
@@ -3175,14 +3219,16 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value);
*/
static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
{
- int ret = 0;
- struct gpio_chip *gc = desc->gdev->chip;
- int offset = gpio_chip_hwgpio(desc);
+ int ret = 0, offset = gpio_chip_hwgpio(desc);
+
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return;
if (value) {
- ret = gc->direction_input(gc, offset);
+ ret = guard.gc->direction_input(guard.gc, offset);
} else {
- ret = gc->direction_output(gc, offset, 0);
+ ret = guard.gc->direction_output(guard.gc, offset, 0);
if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
}
@@ -3200,16 +3246,18 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
*/
static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
{
- int ret = 0;
- struct gpio_chip *gc = desc->gdev->chip;
- int offset = gpio_chip_hwgpio(desc);
+ int ret = 0, offset = gpio_chip_hwgpio(desc);
+
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return;
if (value) {
- ret = gc->direction_output(gc, offset, 1);
+ ret = guard.gc->direction_output(guard.gc, offset, 1);
if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
} else {
- ret = gc->direction_input(gc, offset);
+ ret = guard.gc->direction_input(guard.gc, offset);
}
trace_gpio_direction(desc_to_gpio(desc), !value, ret);
if (ret < 0)
@@ -3220,11 +3268,12 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value
static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
- struct gpio_chip *gc;
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return;
- gc = desc->gdev->chip;
trace_gpio_value(desc_to_gpio(desc), 0, value);
- gc->set(gc, gpio_chip_hwgpio(desc), value);
+ guard.gc->set(guard.gc, gpio_chip_hwgpio(desc), value);
}
/*
@@ -3285,33 +3334,36 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
}
while (i < array_size) {
- struct gpio_chip *gc = desc_array[i]->gdev->chip;
DECLARE_BITMAP(fastpath_mask, FASTPATH_NGPIO);
DECLARE_BITMAP(fastpath_bits, FASTPATH_NGPIO);
unsigned long *mask, *bits;
int count = 0;
- if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
+ CLASS(gpio_chip_guard, guard)(desc_array[i]);
+ if (!guard.gc)
+ return -ENODEV;
+
+ if (likely(guard.gc->ngpio <= FASTPATH_NGPIO)) {
mask = fastpath_mask;
bits = fastpath_bits;
} else {
gfp_t flags = can_sleep ? GFP_KERNEL : GFP_ATOMIC;
- mask = bitmap_alloc(gc->ngpio, flags);
+ mask = bitmap_alloc(guard.gc->ngpio, flags);
if (!mask)
return -ENOMEM;
- bits = bitmap_alloc(gc->ngpio, flags);
+ bits = bitmap_alloc(guard.gc->ngpio, flags);
if (!bits) {
bitmap_free(mask);
return -ENOMEM;
}
}
- bitmap_zero(mask, gc->ngpio);
+ bitmap_zero(mask, guard.gc->ngpio);
if (!can_sleep)
- WARN_ON(gc->can_sleep);
+ WARN_ON(guard.gc->can_sleep);
do {
struct gpio_desc *desc = desc_array[i];
@@ -3347,10 +3399,10 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
i = find_next_zero_bit(array_info->set_mask,
array_size, i);
} while ((i < array_size) &&
- (desc_array[i]->gdev->chip == gc));
+ gpio_device_chip_cmp(desc_array[i]->gdev, guard.gc));
/* push collected bits to outputs */
if (count != 0)
- gpio_chip_set_multiple(gc, mask, bits);
+ gpio_chip_set_multiple(guard.gc, mask, bits);
if (mask != fastpath_mask)
bitmap_free(mask);
@@ -3375,7 +3427,7 @@ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
VALIDATE_DESC_VOID(desc);
/* Should be using gpiod_set_raw_value_cansleep() */
- WARN_ON(desc->gdev->chip->can_sleep);
+ WARN_ON(desc->gdev->can_sleep);
gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
@@ -3416,7 +3468,7 @@ void gpiod_set_value(struct gpio_desc *desc, int value)
{
VALIDATE_DESC_VOID(desc);
/* Should be using gpiod_set_value_cansleep() */
- WARN_ON(desc->gdev->chip->can_sleep);
+ WARN_ON(desc->gdev->can_sleep);
gpiod_set_value_nocheck(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value);
@@ -3480,7 +3532,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_array_value);
int gpiod_cansleep(const struct gpio_desc *desc)
{
VALIDATE_DESC(desc);
- return desc->gdev->chip->can_sleep;
+ return desc->gdev->can_sleep;
}
EXPORT_SYMBOL_GPL(gpiod_cansleep);
@@ -3492,16 +3544,8 @@ EXPORT_SYMBOL_GPL(gpiod_cansleep);
int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
{
VALIDATE_DESC(desc);
- if (name) {
- name = kstrdup_const(name, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
- }
-
- kfree_const(desc->label);
- desc_set_label(desc, name);
- return 0;
+ return desc_set_label(desc, name);
}
EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
@@ -3514,6 +3558,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
*/
int gpiod_to_irq(const struct gpio_desc *desc)
{
+ struct gpio_device *gdev;
struct gpio_chip *gc;
int offset;
@@ -3522,10 +3567,16 @@ int gpiod_to_irq(const struct gpio_desc *desc)
* requires this function to not return zero on an invalid descriptor
* but rather a negative error number.
*/
- if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip)
+ if (!desc || IS_ERR(desc))
return -EINVAL;
- gc = desc->gdev->chip;
+ gdev = desc->gdev;
+ /* FIXME Cannot use gpio_chip_guard due to const desc. */
+ guard(srcu)(&gdev->srcu);
+ gc = srcu_dereference(gdev->chip, &gdev->srcu);
+ if (!gc)
+ return -ENODEV;
+
offset = gpio_chip_hwgpio(desc);
if (gc->to_irq) {
int retirq = gc->to_irq(gc, offset);
@@ -3592,14 +3643,6 @@ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset)
set_bit(FLAG_USED_AS_IRQ, &desc->flags);
set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
- /*
- * If the consumer has not set up a label (such as when the
- * IRQ is referenced from .to_irq()) we set up a label here
- * so it is clear this is used as an interrupt.
- */
- if (!desc->label)
- desc_set_label(desc, "interrupt");
-
return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
@@ -3622,10 +3665,6 @@ void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset)
clear_bit(FLAG_USED_AS_IRQ, &desc->flags);
clear_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
-
- /* If we only had this marking, erase it */
- if (desc->label && !strcmp(desc->label, "interrupt"))
- desc_set_label(desc, NULL);
}
EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
@@ -4133,39 +4172,48 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
return desc;
}
-static struct gpio_desc *gpiod_find_and_request(struct device *consumer,
- struct fwnode_handle *fwnode,
- const char *con_id,
- unsigned int idx,
- enum gpiod_flags flags,
- const char *label,
- bool platform_lookup_allowed)
+struct gpio_desc *gpiod_find_and_request(struct device *consumer,
+ struct fwnode_handle *fwnode,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags,
+ const char *label,
+ bool platform_lookup_allowed)
{
unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
- struct gpio_desc *desc;
- int ret;
+ /*
+ * scoped_guard() is implemented as a for loop, meaning static
+ * analyzers will complain about these two not being initialized.
+ */
+ struct gpio_desc *desc = NULL;
+ int ret = 0;
+
+ scoped_guard(srcu, &gpio_devices_srcu) {
+ desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx,
+ &flags, &lookupflags);
+ if (gpiod_not_found(desc) && platform_lookup_allowed) {
+ /*
+ * Either we are not using DT or ACPI, or their lookup
+ * did not return a result. In that case, use platform
+ * lookup as a fallback.
+ */
+ dev_dbg(consumer,
+ "using lookup tables for GPIO lookup\n");
+ desc = gpiod_find(consumer, con_id, idx, &lookupflags);
+ }
+
+ if (IS_ERR(desc)) {
+ dev_dbg(consumer, "No GPIO consumer %s found\n",
+ con_id);
+ return desc;
+ }
- desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, &flags, &lookupflags);
- if (gpiod_not_found(desc) && platform_lookup_allowed) {
/*
- * Either we are not using DT or ACPI, or their lookup did not
- * return a result. In that case, use platform lookup as a
- * fallback.
+ * If a connection label was passed use that, else attempt to use
+ * the device name as label
*/
- dev_dbg(consumer, "using lookup tables for GPIO lookup\n");
- desc = gpiod_find(consumer, con_id, idx, &lookupflags);
+ ret = gpiod_request(desc, label);
}
-
- if (IS_ERR(desc)) {
- dev_dbg(consumer, "No GPIO consumer %s found\n", con_id);
- return desc;
- }
-
- /*
- * If a connection label was passed use that, else attempt to use
- * the device name as label
- */
- ret = gpiod_request(desc, label);
if (ret) {
if (!(ret == -EBUSY && flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
return ERR_PTR(ret);
@@ -4238,9 +4286,9 @@ int gpiod_count(struct device *dev, const char *con_id)
int count = -ENOENT;
if (is_of_node(fwnode))
- count = of_gpio_get_count(dev, con_id);
+ count = of_gpio_count(fwnode, con_id);
else if (is_acpi_node(fwnode))
- count = acpi_gpio_count(dev, con_id);
+ count = acpi_gpio_count(fwnode, con_id);
else if (is_software_node(fwnode))
count = swnode_gpio_count(fwnode, con_id);
@@ -4424,26 +4472,30 @@ EXPORT_SYMBOL_GPL(gpiod_get_index_optional);
int gpiod_hog(struct gpio_desc *desc, const char *name,
unsigned long lflags, enum gpiod_flags dflags)
{
- struct gpio_chip *gc;
+ struct gpio_device *gdev = desc->gdev;
struct gpio_desc *local_desc;
int hwnum;
int ret;
- gc = gpiod_to_chip(desc);
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
+ if (test_and_set_bit(FLAG_IS_HOGGED, &desc->flags))
+ return 0;
+
hwnum = gpio_chip_hwgpio(desc);
- local_desc = gpiochip_request_own_desc(gc, hwnum, name,
+ local_desc = gpiochip_request_own_desc(guard.gc, hwnum, name,
lflags, dflags);
if (IS_ERR(local_desc)) {
+ clear_bit(FLAG_IS_HOGGED, &desc->flags);
ret = PTR_ERR(local_desc);
pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n",
- name, gc->label, hwnum, ret);
+ name, gdev->label, hwnum, ret);
return ret;
}
- /* Mark GPIO as hogged so it can be identified and removed later */
- set_bit(FLAG_IS_HOGGED, &desc->flags);
-
gpiod_dbg(desc, "hogged as %s%s\n",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ? "output" : "input",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ?
@@ -4707,13 +4759,22 @@ core_initcall(gpiolib_dev_init);
static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
{
- struct gpio_chip *gc = gdev->chip;
bool active_low, is_irq, is_out;
unsigned int gpio = gdev->base;
struct gpio_desc *desc;
+ struct gpio_chip *gc;
int value;
+ guard(srcu)(&gdev->srcu);
+
+ gc = srcu_dereference(gdev->chip, &gdev->srcu);
+ if (!gc) {
+ seq_puts(s, "Underlying GPIO chip is gone\n");
+ return;
+ }
+
for_each_gpio_desc(gc, desc) {
+ guard(srcu)(&desc->srcu);
if (test_bit(FLAG_REQUESTED, &desc->flags)) {
gpiod_get_direction(desc);
is_out = test_bit(FLAG_IS_OUT, &desc->flags);
@@ -4721,7 +4782,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
is_irq = test_bit(FLAG_USED_AS_IRQ, &desc->flags);
active_low = test_bit(FLAG_ACTIVE_LOW, &desc->flags);
seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s%s\n",
- gpio, desc->name ?: "", desc->label,
+ gpio, desc->name ?: "", gpiod_get_label(desc),
is_out ? "out" : "in ",
value >= 0 ? (value ? "hi" : "lo") : "? ",
is_irq ? "IRQ " : "",
@@ -4734,61 +4795,72 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
}
}
+struct gpiolib_seq_priv {
+ bool newline;
+ int idx;
+};
+
static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
{
- unsigned long flags;
- struct gpio_device *gdev = NULL;
+ struct gpiolib_seq_priv *priv;
+ struct gpio_device *gdev;
loff_t index = *pos;
- s->private = "";
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ s->private = priv;
+ priv->idx = srcu_read_lock(&gpio_devices_srcu);
- spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(gdev, &gpio_devices, list)
- if (index-- == 0) {
- spin_unlock_irqrestore(&gpio_lock, flags);
+ list_for_each_entry_srcu(gdev, &gpio_devices, list,
+ srcu_read_lock_held(&gpio_devices_srcu)) {
+ if (index-- == 0)
return gdev;
- }
- spin_unlock_irqrestore(&gpio_lock, flags);
+ }
return NULL;
}
static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- unsigned long flags;
- struct gpio_device *gdev = v;
- void *ret = NULL;
-
- spin_lock_irqsave(&gpio_lock, flags);
- if (list_is_last(&gdev->list, &gpio_devices))
- ret = NULL;
- else
- ret = list_first_entry(&gdev->list, struct gpio_device, list);
- spin_unlock_irqrestore(&gpio_lock, flags);
+ struct gpiolib_seq_priv *priv = s->private;
+ struct gpio_device *gdev = v, *next;
- s->private = "\n";
+ next = list_entry_rcu(gdev->list.next, struct gpio_device, list);
+ gdev = &next->list == &gpio_devices ? NULL : next;
+ priv->newline = true;
++*pos;
- return ret;
+ return gdev;
}
static void gpiolib_seq_stop(struct seq_file *s, void *v)
{
+ struct gpiolib_seq_priv *priv = s->private;
+
+ srcu_read_unlock(&gpio_devices_srcu, priv->idx);
+ kfree(priv);
}
static int gpiolib_seq_show(struct seq_file *s, void *v)
{
+ struct gpiolib_seq_priv *priv = s->private;
struct gpio_device *gdev = v;
- struct gpio_chip *gc = gdev->chip;
+ struct gpio_chip *gc;
struct device *parent;
+ guard(srcu)(&gdev->srcu);
+
+ gc = srcu_dereference(gdev->chip, &gdev->srcu);
if (!gc) {
- seq_printf(s, "%s%s: (dangling chip)", (char *)s->private,
+ seq_printf(s, "%s%s: (dangling chip)",
+ priv->newline ? "\n" : "",
dev_name(&gdev->dev));
return 0;
}
- seq_printf(s, "%s%s: GPIOs %d-%d", (char *)s->private,
+ seq_printf(s, "%s%s: GPIOs %d-%d", priv->newline ? "\n" : "",
dev_name(&gdev->dev),
gdev->base, gdev->base + gdev->ngpio - 1);
parent = gc->parent;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index a4a2520b5f31..f67d5991ab1c 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -16,7 +16,7 @@
#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/notifier.h>
-#include <linux/rwsem.h>
+#include <linux/srcu.h>
#define GPIOCHIP_NAME "gpiochip"
@@ -33,6 +33,8 @@
* @descs: array of ngpio descriptors.
* @ngpio: the number of GPIO lines on this GPIO device, equal to the size
* of the @descs array.
+ * @can_sleep: indicate whether the GPIO chip driver's callbacks can sleep
+ * implying that they cannot be used from atomic context
* @base: GPIO base in the DEPRECATED global Linux GPIO numberspace, assigned
* at device creation time.
* @label: a descriptive name for the GPIO device, such as the part number
@@ -43,9 +45,7 @@
* requested, released or reconfigured
* @device_notifier: used to notify character device wait queues about the GPIO
* device being unregistered
- * @sem: protects the structure from a NULL-pointer dereference of @chip by
- * user-space operations when the device gets unregistered during
- * a hot-unplug event
+ * @srcu: protects the pointer to the underlying GPIO chip
* @pin_ranges: range of pins served by the GPIO driver
*
* This state container holds most of the runtime variable data
@@ -59,16 +59,17 @@ struct gpio_device {
int id;
struct device *mockdev;
struct module *owner;
- struct gpio_chip *chip;
+ struct gpio_chip __rcu *chip;
struct gpio_desc *descs;
int base;
u16 ngpio;
+ bool can_sleep;
const char *label;
void *data;
struct list_head list;
struct blocking_notifier_head line_state_notifier;
struct blocking_notifier_head device_notifier;
- struct rw_semaphore sem;
+ struct srcu_struct srcu;
#ifdef CONFIG_PINCTRL
/*
@@ -134,9 +135,6 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
-extern spinlock_t gpio_lock;
-extern struct list_head gpio_devices;
-
void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action);
/**
@@ -147,6 +145,7 @@ void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action);
* @label: Name of the consumer
* @name: Line name
* @hog: Pointer to the device node that hogs this line (if any)
+ * @srcu: SRCU struct protecting the label pointer.
*
* These are obtained using gpiod_get() and are preferable to the old
* integer-based handles.
@@ -178,16 +177,38 @@ struct gpio_desc {
#define FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
/* Connection label */
- const char *label;
+ const char __rcu *label;
/* Name of the GPIO */
const char *name;
#ifdef CONFIG_OF_DYNAMIC
struct device_node *hog;
#endif
+ struct srcu_struct srcu;
};
#define gpiod_not_found(desc) (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
+struct gpio_chip_guard {
+ struct gpio_device *gdev;
+ struct gpio_chip *gc;
+ int idx;
+};
+
+DEFINE_CLASS(gpio_chip_guard,
+ struct gpio_chip_guard,
+ srcu_read_unlock(&_T.gdev->srcu, _T.idx),
+ ({
+ struct gpio_chip_guard _guard;
+
+ _guard.gdev = desc->gdev;
+ _guard.idx = srcu_read_lock(&_guard.gdev->srcu);
+ _guard.gc = srcu_dereference(_guard.gdev->chip,
+ &_guard.gdev->srcu);
+
+ _guard;
+ }),
+ struct gpio_desc *desc)
+
int gpiod_request(struct gpio_desc *desc, const char *label);
void gpiod_free(struct gpio_desc *desc);
@@ -202,12 +223,21 @@ static inline int gpiod_request_user(struct gpio_desc *desc, const char *label)
return ret;
}
+struct gpio_desc *gpiod_find_and_request(struct device *consumer,
+ struct fwnode_handle *fwnode,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags,
+ const char *label,
+ bool platform_lookup_allowed);
+
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags);
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
int gpiod_hog(struct gpio_desc *desc, const char *name,
unsigned long lflags, enum gpiod_flags dflags);
int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev);
+const char *gpiod_get_label(struct gpio_desc *desc);
/*
* Return the GPIO number of the passed descriptor relative to its chip
@@ -219,31 +249,32 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
/* With descriptor prefix */
-#define gpiod_emerg(desc, fmt, ...) \
- pr_emerg("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\
- ##__VA_ARGS__)
-#define gpiod_crit(desc, fmt, ...) \
- pr_crit("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
- ##__VA_ARGS__)
-#define gpiod_err(desc, fmt, ...) \
- pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
- ##__VA_ARGS__)
-#define gpiod_warn(desc, fmt, ...) \
- pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
- ##__VA_ARGS__)
-#define gpiod_info(desc, fmt, ...) \
- pr_info("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
- ##__VA_ARGS__)
-#define gpiod_dbg(desc, fmt, ...) \
- pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\
- ##__VA_ARGS__)
+#define gpiod_err(desc, fmt, ...) \
+do { \
+ scoped_guard(srcu, &desc->srcu) { \
+ pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
+ gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#define gpiod_warn(desc, fmt, ...) \
+do { \
+ scoped_guard(srcu, &desc->srcu) { \
+ pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
+ gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#define gpiod_dbg(desc, fmt, ...) \
+do { \
+ scoped_guard(srcu, &desc->srcu) { \
+ pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
+ gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
+ } \
+} while (0)
/* With chip prefix */
-#define chip_emerg(gc, fmt, ...) \
- dev_emerg(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
-#define chip_crit(gc, fmt, ...) \
- dev_crit(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
#define chip_err(gc, fmt, ...) \
dev_err(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
#define chip_warn(gc, fmt, ...) \
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2520db0b776e..182ed8f67850 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -19,8 +19,7 @@ menuconfig DRM
# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate
# device and dmabuf fd. Let's make sure that is available for our userspace.
select KCMP
- select VIDEO_CMDLINE
- select VIDEO_NOMODESET
+ select VIDEO
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -42,7 +41,7 @@ config DRM_MIPI_DSI
config DRM_DEBUG_MM
bool "Insert extra checks and debug info into the DRM range managers"
default n
- depends on DRM=y
+ depends on DRM
depends on STACKTRACE_SUPPORT
select STACKDEPOT
help
@@ -199,7 +198,7 @@ config DRM_TTM
config DRM_TTM_KUNIT_TEST
tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
default n
- depends on DRM && KUNIT && MMU
+ depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
select DRM_TTM
select DRM_EXPORT_FOR_TESTS if m
select DRM_KUNIT_TEST_HELPERS
@@ -207,7 +206,8 @@ config DRM_TTM_KUNIT_TEST
help
Enables unit tests for TTM, a GPU memory manager subsystem used
to manage memory buffers. This option is mostly useful for kernel
- developers.
+ developers. It depends on (UML || COMPILE_TEST) since no other driver
+ which uses TTM can be loaded while running the tests.
If in doubt, say "N".
@@ -289,19 +289,7 @@ config DRM_VGEM
as used by Mesa's software renderer for enhanced performance.
If M is selected the module will be called vgem.
-config DRM_VKMS
- tristate "Virtual KMS (EXPERIMENTAL)"
- depends on DRM && MMU
- select DRM_KMS_HELPER
- select DRM_GEM_SHMEM_HELPER
- select CRC32
- default n
- help
- Virtual Kernel Mode-Setting (VKMS) is used for testing or for
- running GPU in a headless machines. Choose this option to get
- a VKMS.
-
- If M is selected the module will be called vkms.
+source "drivers/gpu/drm/vkms/Kconfig"
source "drivers/gpu/drm/exynos/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 260e32ef7bae..4536c8ad0e11 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -80,7 +80,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
- amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o
+ amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@@ -98,7 +98,7 @@ amdgpu-y += \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
- nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o
+ nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o
# add DF block
amdgpu-y += \
@@ -132,7 +132,8 @@ amdgpu-y += \
vega20_ih.o \
navi10_ih.o \
ih_v6_0.o \
- ih_v6_1.o
+ ih_v6_1.o \
+ ih_v7_0.o
# add PSP block
amdgpu-y += \
@@ -143,7 +144,8 @@ amdgpu-y += \
psp_v11_0_8.o \
psp_v12_0.o \
psp_v13_0.o \
- psp_v13_0_4.o
+ psp_v13_0_4.o \
+ psp_v14_0.o
# add DCE block
amdgpu-y += \
@@ -208,6 +210,7 @@ amdgpu-y += \
vcn_v4_0.o \
vcn_v4_0_3.o \
vcn_v4_0_5.o \
+ vcn_v5_0_0.o \
amdgpu_jpeg.o \
jpeg_v1_0.o \
jpeg_v2_0.o \
@@ -215,7 +218,8 @@ amdgpu-y += \
jpeg_v3_0.o \
jpeg_v4_0.o \
jpeg_v4_0_3.o \
- jpeg_v4_0_5.o
+ jpeg_v4_0_5.o \
+ jpeg_v5_0_0.o
# add VPE block
amdgpu-y += \
@@ -233,7 +237,8 @@ amdgpu-y += \
athub_v1_0.o \
athub_v2_0.o \
athub_v2_1.o \
- athub_v3_0.o
+ athub_v3_0.o \
+ athub_v4_1_0.o
# add SMUIO block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 79827a6dcd7f..9c62552bec34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -107,6 +107,7 @@
#include "amdgpu_smuio.h"
#include "amdgpu_fdinfo.h"
#include "amdgpu_mca.h"
+#include "amdgpu_aca.h"
#include "amdgpu_ras.h"
#include "amdgpu_xcp.h"
#include "amdgpu_seq64.h"
@@ -114,14 +115,12 @@
#define MAX_GPU_INSTANCE 64
-struct amdgpu_gpu_instance
-{
+struct amdgpu_gpu_instance {
struct amdgpu_device *adev;
int mgpu_fan_enabled;
};
-struct amdgpu_mgpu_info
-{
+struct amdgpu_mgpu_info {
struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE];
struct mutex mutex;
uint32_t num_gpu;
@@ -140,8 +139,7 @@ enum amdgpu_ss {
AMDGPU_SS_DRV_UNLOAD
};
-struct amdgpu_watchdog_timer
-{
+struct amdgpu_watchdog_timer {
bool timeout_fatal_disable;
uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
};
@@ -196,9 +194,10 @@ extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern int amdgpu_smu_pptable_id;
extern uint amdgpu_dc_feature_mask;
+extern uint amdgpu_freesync_vid_mode;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
-extern uint amdgpu_dm_abm_level;
+extern int amdgpu_dm_abm_level;
extern int amdgpu_backlight;
extern int amdgpu_damage_clips;
extern struct amdgpu_mgpu_info mgpu_info;
@@ -1046,6 +1045,9 @@ struct amdgpu_device {
/* MCA */
struct amdgpu_mca mca;
+ /* ACA */
+ struct amdgpu_aca aca;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
uint32_t harvest_ip_mask;
int num_ip_blocks;
@@ -1095,6 +1097,7 @@ struct amdgpu_device {
long sdma_timeout;
long video_timeout;
long compute_timeout;
+ long psp_timeout;
uint64_t unique_id;
uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
@@ -1332,6 +1335,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+#define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l))
/*
* BIOS helpers.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
new file mode 100644
index 000000000000..493982f94649
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -0,0 +1,879 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/list.h>
+#include "amdgpu.h"
+#include "amdgpu_aca.h"
+#include "amdgpu_ras.h"
+
+#define ACA_BANK_HWID(type, hwid, mcatype) [ACA_HWIP_TYPE_##type] = {hwid, mcatype}
+
+typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, void *data);
+
+struct aca_banks {
+ int nr_banks;
+ struct list_head list;
+};
+
+struct aca_hwip {
+ int hwid;
+ int mcatype;
+};
+
+static struct aca_hwip aca_hwid_mcatypes[ACA_HWIP_TYPE_COUNT] = {
+ ACA_BANK_HWID(SMU, 0x01, 0x01),
+ ACA_BANK_HWID(PCS_XGMI, 0x50, 0x00),
+ ACA_BANK_HWID(UMC, 0x96, 0x00),
+};
+
+static void aca_banks_init(struct aca_banks *banks)
+{
+ if (!banks)
+ return;
+
+ memset(banks, 0, sizeof(*banks));
+ INIT_LIST_HEAD(&banks->list);
+}
+
+static int aca_banks_add_bank(struct aca_banks *banks, struct aca_bank *bank)
+{
+ struct aca_bank_node *node;
+
+ if (!bank)
+ return -EINVAL;
+
+ node = kvzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ memcpy(&node->bank, bank, sizeof(*bank));
+
+ INIT_LIST_HEAD(&node->node);
+ list_add_tail(&node->node, &banks->list);
+
+ banks->nr_banks++;
+
+ return 0;
+}
+
+static void aca_banks_release(struct aca_banks *banks)
+{
+ struct aca_bank_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &banks->list, node) {
+ list_del(&node->node);
+ kvfree(node);
+ }
+}
+
+static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_error_type type, u32 *count)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
+
+ if (!count)
+ return -EINVAL;
+
+ if (!smu_funcs || !smu_funcs->get_valid_aca_count)
+ return -EOPNOTSUPP;
+
+ return smu_funcs->get_valid_aca_count(adev, type, count);
+}
+
+static struct aca_regs_dump {
+ const char *name;
+ int reg_idx;
+} aca_regs[] = {
+ {"CONTROL", ACA_REG_IDX_CTL},
+ {"STATUS", ACA_REG_IDX_STATUS},
+ {"ADDR", ACA_REG_IDX_ADDR},
+ {"MISC", ACA_REG_IDX_MISC0},
+ {"CONFIG", ACA_REG_IDX_CONFG},
+ {"IPID", ACA_REG_IDX_IPID},
+ {"SYND", ACA_REG_IDX_SYND},
+ {"DESTAT", ACA_REG_IDX_DESTAT},
+ {"DEADDR", ACA_REG_IDX_DEADDR},
+ {"CONTROL_MASK", ACA_REG_IDX_CTL_MASK},
+};
+
+static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank)
+{
+ int i;
+
+ dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n");
+ /* plus 1 for output format, e.g: ACA[08/08]: xxxx */
+ for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
+ dev_info(adev->dev, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
+ idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+}
+
+static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_error_type type,
+ int start, int count,
+ struct aca_banks *banks)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
+ struct aca_bank bank;
+ int i, max_count, ret;
+
+ if (!count)
+ return 0;
+
+ if (!smu_funcs || !smu_funcs->get_valid_aca_bank)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ max_count = smu_funcs->max_ue_bank_count;
+ break;
+ case ACA_ERROR_TYPE_CE:
+ max_count = smu_funcs->max_ce_bank_count;
+ break;
+ case ACA_ERROR_TYPE_DEFERRED:
+ default:
+ return -EINVAL;
+ }
+
+ if (start + count >= max_count)
+ return -EINVAL;
+
+ count = min_t(int, count, max_count);
+ for (i = 0; i < count; i++) {
+ memset(&bank, 0, sizeof(bank));
+ ret = smu_funcs->get_valid_aca_bank(adev, type, start + i, &bank);
+ if (ret)
+ return ret;
+
+ aca_smu_bank_dump(adev, i, count, &bank);
+
+ ret = aca_banks_add_bank(banks, &bank);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
+{
+
+ struct aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX_IPID];
+ hwid = ACA_REG__IPID__HARDWAREID(ipid);
+ mcatype = ACA_REG__IPID__MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
+}
+
+static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type)
+{
+ const struct aca_bank_ops *bank_ops = handle->bank_ops;
+
+ if (!aca_bank_hwip_is_matched(bank, handle->hwip))
+ return false;
+
+ if (!bank_ops->aca_bank_is_valid)
+ return true;
+
+ return bank_ops->aca_bank_is_valid(handle, bank, type, handle->data);
+}
+
+static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
+{
+ struct aca_bank_error *bank_error;
+
+ bank_error = kvzalloc(sizeof(*bank_error), GFP_KERNEL);
+ if (!bank_error)
+ return NULL;
+
+ INIT_LIST_HEAD(&bank_error->node);
+ memcpy(&bank_error->info, info, sizeof(*info));
+
+ mutex_lock(&aerr->lock);
+ list_add_tail(&bank_error->node, &aerr->list);
+ mutex_unlock(&aerr->lock);
+
+ return bank_error;
+}
+
+static struct aca_bank_error *find_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
+{
+ struct aca_bank_error *bank_error = NULL;
+ struct aca_bank_info *tmp_info;
+ bool found = false;
+
+ mutex_lock(&aerr->lock);
+ list_for_each_entry(bank_error, &aerr->list, node) {
+ tmp_info = &bank_error->info;
+ if (tmp_info->socket_id == info->socket_id &&
+ tmp_info->die_id == info->die_id) {
+ found = true;
+ goto out_unlock;
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&aerr->lock);
+
+ return found ? bank_error : NULL;
+}
+
+static void aca_bank_error_remove(struct aca_error *aerr, struct aca_bank_error *bank_error)
+{
+ if (!aerr || !bank_error)
+ return;
+
+ list_del(&bank_error->node);
+ aerr->nr_errors--;
+
+ kvfree(bank_error);
+}
+
+static struct aca_bank_error *get_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
+{
+ struct aca_bank_error *bank_error;
+
+ if (!aerr || !info)
+ return NULL;
+
+ bank_error = find_bank_error(aerr, info);
+ if (bank_error)
+ return bank_error;
+
+ return new_bank_error(aerr, info);
+}
+
+static int aca_log_errors(struct aca_handle *handle, enum aca_error_type type,
+ struct aca_bank_report *report)
+{
+ struct aca_error_cache *error_cache = &handle->error_cache;
+ struct aca_bank_error *bank_error;
+ struct aca_error *aerr;
+
+ if (!handle || !report)
+ return -EINVAL;
+
+ if (!report->count[type])
+ return 0;
+
+ aerr = &error_cache->errors[type];
+ bank_error = get_bank_error(aerr, &report->info);
+ if (!bank_error)
+ return -ENOMEM;
+
+ bank_error->count[type] += report->count[type];
+
+ return 0;
+}
+
+static int aca_generate_bank_report(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_error_type type, struct aca_bank_report *report)
+{
+ const struct aca_bank_ops *bank_ops = handle->bank_ops;
+
+ if (!bank || !report)
+ return -EINVAL;
+
+ if (!bank_ops->aca_bank_generate_report)
+ return -EOPNOTSUPP;
+
+ memset(report, 0, sizeof(*report));
+ return bank_ops->aca_bank_generate_report(handle, bank, type,
+ report, handle->data);
+}
+
+static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_error_type type, void *data)
+{
+ struct aca_bank_report report;
+ int ret;
+
+ ret = aca_generate_bank_report(handle, bank, type, &report);
+ if (ret)
+ return ret;
+
+ if (!report.count[type])
+ return 0;
+
+ ret = aca_log_errors(handle, type, &report);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *bank,
+ enum aca_error_type type, bank_handler_t handler, void *data)
+{
+ struct aca_handle *handle;
+ int ret;
+
+ if (list_empty(&mgr->list))
+ return 0;
+
+ list_for_each_entry(handle, &mgr->list, node) {
+ if (!aca_bank_is_valid(handle, bank, type))
+ continue;
+
+ ret = handler(handle, bank, type, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks *banks,
+ enum aca_error_type type, bank_handler_t handler, void *data)
+{
+ struct aca_bank_node *node;
+ struct aca_bank *bank;
+ int ret;
+
+ if (!mgr || !banks)
+ return -EINVAL;
+
+ /* pre check to avoid unnecessary operations */
+ if (list_empty(&mgr->list) || list_empty(&banks->list))
+ return 0;
+
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+
+ ret = aca_dispatch_bank(mgr, bank, type, handler, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type,
+ bank_handler_t handler, void *data)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ struct aca_banks banks;
+ u32 count = 0;
+ int ret;
+
+ if (list_empty(&aca->mgr.list))
+ return 0;
+
+ /* NOTE: pmfw is only support UE and CE */
+ if (type == ACA_ERROR_TYPE_DEFERRED)
+ type = ACA_ERROR_TYPE_CE;
+
+ ret = aca_smu_get_valid_aca_count(adev, type, &count);
+ if (ret)
+ return ret;
+
+ if (!count)
+ return 0;
+
+ aca_banks_init(&banks);
+
+ ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks);
+ if (ret)
+ goto err_release_banks;
+
+ if (list_empty(&banks.list)) {
+ ret = 0;
+ goto err_release_banks;
+ }
+
+ ret = aca_dispatch_banks(&aca->mgr, &banks, type,
+ handler, data);
+ if (ret)
+ goto err_release_banks;
+
+err_release_banks:
+ aca_banks_release(&banks);
+
+ return ret;
+}
+
+static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_error_type type, struct ras_err_data *err_data)
+{
+ struct aca_bank_info *info;
+ struct amdgpu_smuio_mcm_config_info mcm_info;
+ u64 count;
+
+ if (type >= ACA_ERROR_TYPE_COUNT)
+ return -EINVAL;
+
+ count = bank_error->count[type];
+ if (!count)
+ return 0;
+
+ info = &bank_error->info;
+ mcm_info.die_id = info->die_id;
+ mcm_info.socket_id = info->socket_id;
+
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, count);
+ break;
+ case ACA_ERROR_TYPE_CE:
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count);
+ break;
+ case ACA_ERROR_TYPE_DEFERRED:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int aca_log_aca_error(struct aca_handle *handle, enum aca_error_type type, struct ras_err_data *err_data)
+{
+ struct aca_error_cache *error_cache = &handle->error_cache;
+ struct aca_error *aerr = &error_cache->errors[type];
+ struct aca_bank_error *bank_error, *tmp;
+
+ mutex_lock(&aerr->lock);
+
+ if (list_empty(&aerr->list))
+ goto out_unlock;
+
+ list_for_each_entry_safe(bank_error, tmp, &aerr->list, node) {
+ aca_log_aca_error_data(bank_error, type, err_data);
+ aca_bank_error_remove(aerr, bank_error);
+ }
+
+out_unlock:
+ mutex_unlock(&aerr->lock);
+
+ return 0;
+}
+
+static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, enum aca_error_type type,
+ struct ras_err_data *err_data)
+{
+ int ret;
+
+ /* udpate aca bank to aca source error_cache first */
+ ret = aca_banks_update(adev, type, handler_aca_log_bank_error, NULL);
+ if (ret)
+ return ret;
+
+ return aca_log_aca_error(handle, type, err_data);
+}
+
+static bool aca_handle_is_valid(struct aca_handle *handle)
+{
+ if (!handle->mask || !list_empty(&handle->node))
+ return false;
+
+ return true;
+}
+
+int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle,
+ enum aca_error_type type, void *data)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+
+ if (!handle || !err_data)
+ return -EINVAL;
+
+ if (aca_handle_is_valid(handle))
+ return -EOPNOTSUPP;
+
+ if (!(BIT(type) & handle->mask))
+ return 0;
+
+ return __aca_get_error_data(adev, handle, type, err_data);
+}
+
+static void aca_error_init(struct aca_error *aerr, enum aca_error_type type)
+{
+ mutex_init(&aerr->lock);
+ INIT_LIST_HEAD(&aerr->list);
+ aerr->type = type;
+ aerr->nr_errors = 0;
+}
+
+static void aca_init_error_cache(struct aca_handle *handle)
+{
+ struct aca_error_cache *error_cache = &handle->error_cache;
+ int type;
+
+ for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++)
+ aca_error_init(&error_cache->errors[type], type);
+}
+
+static void aca_error_fini(struct aca_error *aerr)
+{
+ struct aca_bank_error *bank_error, *tmp;
+
+ mutex_lock(&aerr->lock);
+ list_for_each_entry_safe(bank_error, tmp, &aerr->list, node)
+ aca_bank_error_remove(aerr, bank_error);
+
+ mutex_destroy(&aerr->lock);
+}
+
+static void aca_fini_error_cache(struct aca_handle *handle)
+{
+ struct aca_error_cache *error_cache = &handle->error_cache;
+ int type;
+
+ for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++)
+ aca_error_fini(&error_cache->errors[type]);
+}
+
+static int add_aca_handle(struct amdgpu_device *adev, struct aca_handle_manager *mgr, struct aca_handle *handle,
+ const char *name, const struct aca_info *ras_info, void *data)
+{
+ memset(handle, 0, sizeof(*handle));
+
+ handle->adev = adev;
+ handle->mgr = mgr;
+ handle->name = name;
+ handle->hwip = ras_info->hwip;
+ handle->mask = ras_info->mask;
+ handle->bank_ops = ras_info->bank_ops;
+ handle->data = data;
+ aca_init_error_cache(handle);
+
+ INIT_LIST_HEAD(&handle->node);
+ list_add_tail(&handle->node, &mgr->list);
+ mgr->nr_handles++;
+
+ return 0;
+}
+
+static ssize_t aca_sysfs_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct aca_handle *handle = container_of(attr, struct aca_handle, aca_attr);
+
+ /* NOTE: the aca cache will be auto cleared once read,
+ * So the driver should unify the query entry point, forward request to ras query interface directly */
+ return amdgpu_ras_aca_sysfs_read(dev, attr, handle, buf, handle->data);
+}
+
+static int add_aca_sysfs(struct amdgpu_device *adev, struct aca_handle *handle)
+{
+ struct device_attribute *aca_attr = &handle->aca_attr;
+
+ snprintf(handle->attr_name, sizeof(handle->attr_name) - 1, "aca_%s", handle->name);
+ aca_attr->show = aca_sysfs_read;
+ aca_attr->attr.name = handle->attr_name;
+ aca_attr->attr.mode = S_IRUGO;
+ sysfs_attr_init(&aca_attr->attr);
+
+ return sysfs_add_file_to_group(&adev->dev->kobj,
+ &aca_attr->attr,
+ "ras");
+}
+
+int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle,
+ const char *name, const struct aca_info *ras_info, void *data)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ int ret;
+
+ if (!amdgpu_aca_is_enabled(adev))
+ return 0;
+
+ ret = add_aca_handle(adev, &aca->mgr, handle, name, ras_info, data);
+ if (ret)
+ return ret;
+
+ return add_aca_sysfs(adev, handle);
+}
+
+static void remove_aca_handle(struct aca_handle *handle)
+{
+ struct aca_handle_manager *mgr = handle->mgr;
+
+ aca_fini_error_cache(handle);
+ list_del(&handle->node);
+ mgr->nr_handles--;
+}
+
+static void remove_aca_sysfs(struct aca_handle *handle)
+{
+ struct amdgpu_device *adev = handle->adev;
+ struct device_attribute *aca_attr = &handle->aca_attr;
+
+ if (adev->dev->kobj.sd)
+ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &aca_attr->attr,
+ "ras");
+}
+
+void amdgpu_aca_remove_handle(struct aca_handle *handle)
+{
+ if (!handle || list_empty(&handle->node))
+ return;
+
+ remove_aca_sysfs(handle);
+ remove_aca_handle(handle);
+}
+
+static int aca_manager_init(struct aca_handle_manager *mgr)
+{
+ INIT_LIST_HEAD(&mgr->list);
+ mgr->nr_handles = 0;
+
+ return 0;
+}
+
+static void aca_manager_fini(struct aca_handle_manager *mgr)
+{
+ struct aca_handle *handle, *tmp;
+
+ list_for_each_entry_safe(handle, tmp, &mgr->list, node)
+ amdgpu_aca_remove_handle(handle);
+}
+
+bool amdgpu_aca_is_enabled(struct amdgpu_device *adev)
+{
+ return adev->aca.is_enabled;
+}
+
+int amdgpu_aca_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ int ret;
+
+ ret = aca_manager_init(&aca->mgr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void amdgpu_aca_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+
+ aca_manager_fini(&aca->mgr);
+}
+
+int amdgpu_aca_reset(struct amdgpu_device *adev)
+{
+ amdgpu_aca_fini(adev);
+
+ return amdgpu_aca_init(adev);
+}
+
+void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+
+ WARN_ON(aca->smu_funcs);
+ aca->smu_funcs = smu_funcs;
+}
+
+int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info)
+{
+ u64 ipid;
+ u32 instidhi, instidlo;
+
+ if (!bank || !info)
+ return -EINVAL;
+
+ ipid = bank->regs[ACA_REG_IDX_IPID];
+ info->hwid = ACA_REG__IPID__HARDWAREID(ipid);
+ info->mcatype = ACA_REG__IPID__MCATYPE(ipid);
+ /*
+ * Unfied DieID Format: SAASS. A:AID, S:Socket.
+ * Unfied DieID[4:4] = InstanceId[0:0]
+ * Unfied DieID[0:3] = InstanceIdHi[0:3]
+ */
+ instidhi = ACA_REG__IPID__INSTANCEIDHI(ipid);
+ instidlo = ACA_REG__IPID__INSTANCEIDLO(ipid);
+ info->die_id = ((instidhi >> 2) & 0x03);
+ info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03);
+
+ return 0;
+}
+
+static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
+{
+ int error_code;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 6):
+ if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) {
+ error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
+ return error_code & 0xff;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* NOTE: the true error code is encoded in status.errorcode[0:7] */
+ error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
+
+ return error_code & 0xff;
+}
+
+int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size)
+{
+ int i, error_code;
+
+ if (!bank || !err_codes)
+ return -EINVAL;
+
+ error_code = aca_bank_get_error_code(adev, bank);
+ for (i = 0; i < size; i++) {
+ if (err_codes[i] == error_code)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
+
+ if (!smu_funcs || !smu_funcs->set_debug_mode)
+ return -EOPNOTSUPP;
+
+ return smu_funcs->set_debug_mode(adev, en);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_aca_smu_debug_mode_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ int ret;
+
+ ret = amdgpu_ras_set_aca_debug_mode(adev, val ? true : false);
+ if (ret)
+ return ret;
+
+ dev_info(adev->dev, "amdgpu set smu aca debug mode %s success\n", val ? "on" : "off");
+
+ return 0;
+}
+
+static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_error_type type, int idx)
+{
+ struct aca_bank_info info;
+ int i, ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return;
+
+ seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_ERROR_TYPE_UE ? "UE" : "CE");
+ seq_printf(m, "aca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
+ idx, info.socket_id, info.die_id, info.hwid, info.mcatype);
+
+ for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
+ seq_printf(m, "aca entry[%d].regs[%d]: 0x%016llx\n", idx, aca_regs[i].reg_idx, bank->regs[aca_regs[i].reg_idx]);
+}
+
+struct aca_dump_context {
+ struct seq_file *m;
+ int idx;
+};
+
+static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_error_type type, void *data)
+{
+ struct aca_dump_context *ctx = (struct aca_dump_context *)data;
+
+ aca_dump_entry(ctx->m, bank, type, ctx->idx++);
+
+ return handler_aca_log_bank_error(handle, bank, type, NULL);
+}
+
+static int aca_dump_show(struct seq_file *m, enum aca_error_type type)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct aca_dump_context context = {
+ .m = m,
+ .idx = 0,
+ };
+
+ return aca_banks_update(adev, type, handler_aca_bank_dump, (void *)&context);
+}
+
+static int aca_dump_ce_show(struct seq_file *m, void *unused)
+{
+ return aca_dump_show(m, ACA_ERROR_TYPE_CE);
+}
+
+static int aca_dump_ce_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aca_dump_ce_show, inode->i_private);
+}
+
+static const struct file_operations aca_ce_dump_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = aca_dump_ce_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int aca_dump_ue_show(struct seq_file *m, void *unused)
+{
+ return aca_dump_show(m, ACA_ERROR_TYPE_UE);
+}
+
+static int aca_dump_ue_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aca_dump_ue_show, inode->i_private);
+}
+
+static const struct file_operations aca_ue_dump_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = aca_dump_ue_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+DEFINE_DEBUGFS_ATTRIBUTE(aca_debug_mode_fops, NULL, amdgpu_aca_smu_debug_mode_set, "%llu\n");
+#endif
+
+void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
+{
+#if defined(CONFIG_DEBUG_FS)
+ if (!root || adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6))
+ return;
+
+ debugfs_create_file("aca_debug_mode", 0200, root, adev, &aca_debug_mode_fops);
+ debugfs_create_file("aca_ue_dump", 0400, root, adev, &aca_ue_dump_debug_fops);
+ debugfs_create_file("aca_ce_dump", 0400, root, adev, &aca_ce_dump_debug_fops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
new file mode 100644
index 000000000000..2da50e095883
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_ACA_H__
+#define __AMDGPU_ACA_H__
+
+#include <linux/list.h>
+
+#define ACA_MAX_REGS_COUNT (16)
+
+#define ACA_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l)
+#define ACA_REG__STATUS__VAL(x) ACA_REG_FIELD(x, 63, 63)
+#define ACA_REG__STATUS__OVERFLOW(x) ACA_REG_FIELD(x, 62, 62)
+#define ACA_REG__STATUS__UC(x) ACA_REG_FIELD(x, 61, 61)
+#define ACA_REG__STATUS__EN(x) ACA_REG_FIELD(x, 60, 60)
+#define ACA_REG__STATUS__MISCV(x) ACA_REG_FIELD(x, 59, 59)
+#define ACA_REG__STATUS__ADDRV(x) ACA_REG_FIELD(x, 58, 58)
+#define ACA_REG__STATUS__PCC(x) ACA_REG_FIELD(x, 57, 57)
+#define ACA_REG__STATUS__ERRCOREIDVAL(x) ACA_REG_FIELD(x, 56, 56)
+#define ACA_REG__STATUS__TCC(x) ACA_REG_FIELD(x, 55, 55)
+#define ACA_REG__STATUS__SYNDV(x) ACA_REG_FIELD(x, 53, 53)
+#define ACA_REG__STATUS__CECC(x) ACA_REG_FIELD(x, 46, 46)
+#define ACA_REG__STATUS__UECC(x) ACA_REG_FIELD(x, 45, 45)
+#define ACA_REG__STATUS__DEFERRED(x) ACA_REG_FIELD(x, 44, 44)
+#define ACA_REG__STATUS__POISON(x) ACA_REG_FIELD(x, 43, 43)
+#define ACA_REG__STATUS__SCRUB(x) ACA_REG_FIELD(x, 40, 40)
+#define ACA_REG__STATUS__ERRCOREID(x) ACA_REG_FIELD(x, 37, 32)
+#define ACA_REG__STATUS__ADDRLSB(x) ACA_REG_FIELD(x, 29, 24)
+#define ACA_REG__STATUS__ERRORCODEEXT(x) ACA_REG_FIELD(x, 21, 16)
+#define ACA_REG__STATUS__ERRORCODE(x) ACA_REG_FIELD(x, 15, 0)
+
+#define ACA_REG__IPID__MCATYPE(x) ACA_REG_FIELD(x, 63, 48)
+#define ACA_REG__IPID__INSTANCEIDHI(x) ACA_REG_FIELD(x, 47, 44)
+#define ACA_REG__IPID__HARDWAREID(x) ACA_REG_FIELD(x, 43, 32)
+#define ACA_REG__IPID__INSTANCEIDLO(x) ACA_REG_FIELD(x, 31, 0)
+
+#define ACA_REG__MISC0__VALID(x) ACA_REG_FIELD(x, 63, 63)
+#define ACA_REG__MISC0__OVRFLW(x) ACA_REG_FIELD(x, 48, 48)
+#define ACA_REG__MISC0__ERRCNT(x) ACA_REG_FIELD(x, 43, 32)
+
+#define ACA_REG__SYND__ERRORINFORMATION(x) ACA_REG_FIELD(x, 17, 0)
+
+/* NOTE: The following codes refers to the smu header file */
+#define ACA_EXTERROR_CODE_CE 0x3a
+#define ACA_EXTERROR_CODE_FAULT 0x3b
+
+#define ACA_ERROR_UE_MASK BIT_MASK(ACA_ERROR_TYPE_UE)
+#define ACA_ERROR_CE_MASK BIT_MASK(ACA_ERROR_TYPE_CE)
+#define ACA_ERROR_DEFERRED_MASK BIT_MASK(ACA_ERROR_TYPE_DEFERRED)
+
+enum aca_reg_idx {
+ ACA_REG_IDX_CTL = 0,
+ ACA_REG_IDX_STATUS = 1,
+ ACA_REG_IDX_ADDR = 2,
+ ACA_REG_IDX_MISC0 = 3,
+ ACA_REG_IDX_CONFG = 4,
+ ACA_REG_IDX_IPID = 5,
+ ACA_REG_IDX_SYND = 6,
+ ACA_REG_IDX_DESTAT = 8,
+ ACA_REG_IDX_DEADDR = 9,
+ ACA_REG_IDX_CTL_MASK = 10,
+ ACA_REG_IDX_COUNT = 16,
+};
+
+enum aca_hwip_type {
+ ACA_HWIP_TYPE_UNKNOW = -1,
+ ACA_HWIP_TYPE_PSP = 0,
+ ACA_HWIP_TYPE_UMC,
+ ACA_HWIP_TYPE_SMU,
+ ACA_HWIP_TYPE_PCS_XGMI,
+ ACA_HWIP_TYPE_COUNT,
+};
+
+enum aca_error_type {
+ ACA_ERROR_TYPE_INVALID = -1,
+ ACA_ERROR_TYPE_UE = 0,
+ ACA_ERROR_TYPE_CE,
+ ACA_ERROR_TYPE_DEFERRED,
+ ACA_ERROR_TYPE_COUNT
+};
+
+struct aca_bank {
+ u64 regs[ACA_MAX_REGS_COUNT];
+};
+
+struct aca_bank_node {
+ struct aca_bank bank;
+ struct list_head node;
+};
+
+struct aca_bank_info {
+ int die_id;
+ int socket_id;
+ int hwid;
+ int mcatype;
+};
+
+struct aca_bank_report {
+ struct aca_bank_info info;
+ u64 count[ACA_ERROR_TYPE_COUNT];
+};
+
+struct aca_bank_error {
+ struct list_head node;
+ struct aca_bank_info info;
+ u64 count[ACA_ERROR_TYPE_COUNT];
+};
+
+struct aca_error {
+ struct list_head list;
+ struct mutex lock;
+ enum aca_error_type type;
+ int nr_errors;
+};
+
+struct aca_handle_manager {
+ struct list_head list;
+ int nr_handles;
+};
+
+struct aca_error_cache {
+ struct aca_error errors[ACA_ERROR_TYPE_COUNT];
+};
+
+struct aca_handle {
+ struct list_head node;
+ enum aca_hwip_type hwip;
+ struct amdgpu_device *adev;
+ struct aca_handle_manager *mgr;
+ struct aca_error_cache error_cache;
+ const struct aca_bank_ops *bank_ops;
+ struct device_attribute aca_attr;
+ char attr_name[64];
+ const char *name;
+ u32 mask;
+ void *data;
+};
+
+struct aca_bank_ops {
+ int (*aca_bank_generate_report)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+ struct aca_bank_report *report, void *data);
+ bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+ void *data);
+};
+
+struct aca_smu_funcs {
+ int max_ue_bank_count;
+ int max_ce_bank_count;
+ int (*set_debug_mode)(struct amdgpu_device *adev, bool enable);
+ int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_error_type type, u32 *count);
+ int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_error_type type, int idx, struct aca_bank *bank);
+};
+
+struct amdgpu_aca {
+ struct aca_handle_manager mgr;
+ const struct aca_smu_funcs *smu_funcs;
+ bool is_enabled;
+};
+
+struct aca_info {
+ enum aca_hwip_type hwip;
+ const struct aca_bank_ops *bank_ops;
+ u32 mask;
+};
+
+int amdgpu_aca_init(struct amdgpu_device *adev);
+void amdgpu_aca_fini(struct amdgpu_device *adev);
+int amdgpu_aca_reset(struct amdgpu_device *adev);
+void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs);
+bool amdgpu_aca_is_enabled(struct amdgpu_device *adev);
+
+int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info);
+int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size);
+
+int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle,
+ const char *name, const struct aca_info *aca_info, void *data);
+void amdgpu_aca_remove_handle(struct aca_handle *handle);
+int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle,
+ enum aca_error_type type, void *data);
+int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en);
+void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index cc21ed67a330..7099ff9cf8c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1528,6 +1528,9 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
*/
void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
{
+ if (adev->in_runpm)
+ return;
+
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 41db030ddc4e..f5f2945711be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -742,9 +742,15 @@ void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev)
amdgpu_device_flush_hdp(adev, NULL);
}
-void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset)
+bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev)
{
- amdgpu_umc_poison_handler(adev, reset);
+ return amdgpu_ras_get_fed_status(adev);
+}
+
+void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, bool reset)
+{
+ amdgpu_umc_poison_handler(adev, block, reset);
}
int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 27c61c535e29..0ef223c2affb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -193,6 +193,9 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
unsigned long cur_seq, struct kgd_mem *mem);
+int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+ uint32_t domain,
+ struct dma_fence *fence);
#else
static inline
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
@@ -218,6 +221,13 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
{
return 0;
}
+static inline
+int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+ uint32_t domain,
+ struct dma_fence *fence)
+{
+ return 0;
+}
#endif
/* Shared API */
int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
@@ -310,7 +320,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
void **kptr, uint64_t *size);
void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
-int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo);
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence __rcu **ef);
@@ -326,7 +336,8 @@ void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev);
int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config);
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
- bool reset);
+ enum amdgpu_ras_block block, bool reset);
+bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev);
bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
void amdgpu_amdkfd_block_mmu_notifications(void *p);
int amdgpu_amdkfd_criu_resume(void *p);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 231fd927dcfb..14dc9d2d8d53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -426,9 +426,9 @@ validate_fail:
return ret;
}
-static int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
- uint32_t domain,
- struct dma_fence *fence)
+int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+ uint32_t domain,
+ struct dma_fence *fence)
{
int ret = amdgpu_bo_reserve(bo, false);
@@ -464,13 +464,15 @@ static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
* again. Page directories are only updated after updating page
* tables.
*/
-static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
+static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm,
+ struct ww_acquire_ctx *ticket)
{
struct amdgpu_bo *pd = vm->root.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
int ret;
- ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
+ ret = amdgpu_vm_validate(adev, vm, ticket,
+ amdgpu_amdkfd_validate_vm_bo, NULL);
if (ret) {
pr_err("failed to validate PT BOs\n");
return ret;
@@ -1310,14 +1312,15 @@ update_gpuvm_pte_failed:
return ret;
}
-static int process_validate_vms(struct amdkfd_process_info *process_info)
+static int process_validate_vms(struct amdkfd_process_info *process_info,
+ struct ww_acquire_ctx *ticket)
{
struct amdgpu_vm *peer_vm;
int ret;
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
- ret = vm_validate_pt_pd_bos(peer_vm);
+ ret = vm_validate_pt_pd_bos(peer_vm, ticket);
if (ret)
return ret;
}
@@ -1402,7 +1405,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
ret = amdgpu_bo_reserve(vm->root.bo, true);
if (ret)
goto reserve_pd_fail;
- ret = vm_validate_pt_pd_bos(vm);
+ ret = vm_validate_pt_pd_bos(vm, NULL);
if (ret) {
pr_err("validate_pt_pd_bos() failed\n");
goto validate_pd_fail;
@@ -2043,7 +2046,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
- ret = vm_validate_pt_pd_bos(avm);
+ ret = vm_validate_pt_pd_bos(avm, NULL);
if (unlikely(ret))
goto out_unreserve;
@@ -2136,7 +2139,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
goto unreserve_out;
}
- ret = vm_validate_pt_pd_bos(avm);
+ ret = vm_validate_pt_pd_bos(avm, NULL);
if (unlikely(ret))
goto unreserve_out;
@@ -2186,13 +2189,12 @@ int amdgpu_amdkfd_gpuvm_sync_memory(
/**
* amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
- * @adev: Device to which allocated BO belongs
* @bo: Buffer object to be mapped
*
* Before return, bo reference count is incremented. To release the reference and unpin/
* unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
*/
-int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo)
+int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
{
int ret;
@@ -2634,7 +2636,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
}
}
- ret = process_validate_vms(process_info);
+ ret = process_validate_vms(process_info, NULL);
if (ret)
goto unreserve_out;
@@ -2894,11 +2896,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
amdgpu_sync_create(&sync_obj);
- /* Validate PDs and PTs */
- ret = process_validate_vms(process_info);
- if (ret)
- goto validate_map_fail;
-
/* Validate BOs and map them to GPUVM (update VM page tables). */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
@@ -2949,6 +2946,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
if (failed_size)
pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
+ /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
+ * validations above would invalidate DMABuf imports again.
+ */
+ ret = process_validate_vms(process_info, &exec.ticket);
+ if (ret)
+ goto validate_map_fail;
+
/* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
@@ -3020,7 +3024,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
&process_info->eviction_fence->base,
DMA_RESV_USAGE_BOOKKEEP);
}
- /* Attach eviction fence to PD / PT BOs */
+ /* Attach eviction fence to PD / PT BOs and DMABuf imports */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
struct amdgpu_bo *bo = peer_vm->root.bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index dce9e7d5e4ec..52b12c1718eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1018,7 +1018,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
dividers->post_div = args.v3.ucPostDiv;
dividers->enable_post_div = (args.v3.ucCntlFlag &
@@ -1038,7 +1039,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
if (strobe_mode)
args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
dividers->post_div = args.v5.ucPostDiv;
dividers->enable_post_div = (args.v5.ucCntlFlag &
@@ -1056,7 +1058,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
/* fusion */
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
dividers->real_clock = le32_to_cpu(args.v4.ulClock);
@@ -1067,7 +1070,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
args.v6_in.ulClock.ulComputeClockFlag = clock_type;
args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
@@ -1109,7 +1113,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
if (strobe_mode)
args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
@@ -1151,7 +1156,8 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
if (mem_clock)
args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
}
void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
@@ -1205,7 +1211,8 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
args.v2.ucVoltageMode = 0;
args.v2.usVoltageLevel = 0;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
break;
@@ -1214,7 +1221,8 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+ sizeof(args));
*voltage = le16_to_cpu(args.v3.usVoltageLevel);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index fb2681dd6b33..6857c586ded7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -941,5 +941,6 @@ int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
return -EINVAL;
}
- return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1);
+ return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1,
+ sizeof(asic_init_ps_v2_1));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index c7eb2caec65a..649b5530d8ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -36,7 +36,7 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
-bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address);
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t *i2c_address);
bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6adeddfb3d56..0a4b09709cfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -952,10 +952,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bytes_moved = 0;
p->bytes_moved_vis = 0;
- r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
- amdgpu_cs_bo_validate, p);
+ r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
+ amdgpu_cs_bo_validate, p);
if (r) {
- DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
+ DRM_ERROR("amdgpu_vm_validate() failed.\n");
goto out_free_user_pages;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 796fa6f1420b..cfdf558b48b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -28,9 +28,8 @@
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
{
- uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
+ uint64_t addr = AMDGPU_VA_RESERVED_CSA_START(adev);
- addr -= AMDGPU_VA_RESERVED_SIZE;
addr = amdgpu_gmc_sign_extend(addr);
return addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 1afbb2e932c6..f5d0fa207a88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1782,9 +1782,14 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
list_for_each_entry(file, &dev->filelist, lhead) {
struct amdgpu_fpriv *fpriv = file->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_task_info *ti;
+
+ ti = amdgpu_vm_get_task_info_vm(vm);
+ if (ti) {
+ seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->pid, ti->process_name);
+ amdgpu_vm_put_task_info(ti);
+ }
- seq_printf(m, "pid:%d\tProcess:%s ----------\n",
- vm->task_info.pid, vm->task_info.process_name);
r = amdgpu_bo_reserve(vm->root.bo, true);
if (r)
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 94bdb5fa6ebc..1e9454e6e4cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -96,6 +96,9 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
#define AMDGPU_MAX_RETRY_LIMIT 2
#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
+#define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
+#define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
+#define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
static const struct drm_driver amdgpu_kms_driver;
@@ -781,12 +784,22 @@ u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
void __iomem *pcie_index_hi_offset;
void __iomem *pcie_data_offset;
- pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
- pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
- if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
- pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
- else
+ if (unlikely(!adev->nbio.funcs)) {
+ pcie_index = AMDGPU_PCIE_INDEX_FALLBACK;
+ pcie_data = AMDGPU_PCIE_DATA_FALLBACK;
+ } else {
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+ }
+
+ if (reg_addr >> 32) {
+ if (unlikely(!adev->nbio.funcs))
+ pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK;
+ else
+ pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
+ } else {
pcie_index_hi = 0;
+ }
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
@@ -1218,8 +1231,6 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
amdgpu_psp_wait_for_bootloader(adev);
ret = amdgpu_atomfirmware_asic_init(adev, true);
- /* TODO: check the return val and stop device initialization if boot fails */
- amdgpu_psp_query_boot_status(adev);
return ret;
} else {
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1442,6 +1453,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
+ if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
+ DRM_WARN("System can't access extended configuration space,please check!!\n");
+
/* skip if the bios has already enabled large BAR */
if (adev->gmc.real_vram_size &&
(pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
@@ -4025,8 +4040,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* early on during init and before calling to RREG32.
*/
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
- if (!adev->reset_domain)
- return -ENOMEM;
+ if (!adev->reset_domain) {
+ r = -ENOMEM;
+ goto unmap_memory;
+ }
/* detect hw virtualization here */
amdgpu_detect_virtualization(adev);
@@ -4036,20 +4053,20 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_device_get_job_timeout_settings(adev);
if (r) {
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- return r;
+ goto unmap_memory;
}
+ amdgpu_device_set_mcbp(adev);
+
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
- return r;
-
- amdgpu_device_set_mcbp(adev);
+ goto unmap_memory;
/* Get rid of things like offb */
r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
if (r)
- return r;
+ goto unmap_memory;
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
@@ -4059,7 +4076,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (adev->gmc.xgmi.supported) {
r = adev->gfxhub.funcs->get_xgmi_info(adev);
if (r)
- return r;
+ goto unmap_memory;
}
/* enable PCIE atomic ops */
@@ -4328,6 +4345,8 @@ release_ras_con:
failed:
amdgpu_vf_error_trans_all(adev);
+unmap_memory:
+ iounmap(adev->rmmio);
return r;
}
@@ -5306,6 +5325,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
if (need_full_reset) {
/* post card */
+ amdgpu_ras_set_fed(tmp_adev, false);
r = amdgpu_device_asic_init(tmp_adev);
if (r) {
dev_warn(tmp_adev->dev, "asic atom init failed!");
@@ -5686,6 +5706,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
IP_VERSION(9, 4, 2) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
amdgpu_ras_resume(adev);
} else {
@@ -6107,6 +6128,20 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
struct amdgpu_reset_context reset_context;
u32 memsize;
struct list_head device_list;
+ struct amdgpu_hive_info *hive;
+ int hive_ras_recovery = 0;
+ struct amdgpu_ras *ras;
+
+ /* PCI error slot reset should be skipped During RAS recovery */
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ hive_ras_recovery = atomic_read(&hive->ras_recovery);
+ amdgpu_put_xgmi_hive(hive);
+ }
+ ras = amdgpu_ras_get_context(adev);
+ if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) &&
+ ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
+ return PCI_ERS_RESULT_RECOVERED;
DRM_INFO("PCI error: slot reset callback!!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index c7d60dd0fb97..a07e4b87d4ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -27,6 +27,7 @@
#include "amdgpu_discovery.h"
#include "soc15_hw_ip.h"
#include "discovery.h"
+#include "amdgpu_ras.h"
#include "soc15.h"
#include "gfx_v9_0.h"
@@ -60,20 +61,24 @@
#include "nbio_v4_3.h"
#include "nbio_v7_2.h"
#include "nbio_v7_7.h"
+#include "nbif_v6_3_1.h"
#include "hdp_v5_0.h"
#include "hdp_v5_2.h"
#include "hdp_v6_0.h"
+#include "hdp_v7_0.h"
#include "nv.h"
#include "soc21.h"
#include "navi10_ih.h"
#include "ih_v6_0.h"
#include "ih_v6_1.h"
+#include "ih_v7_0.h"
#include "gfx_v10_0.h"
#include "gfx_v11_0.h"
#include "sdma_v5_0.h"
#include "sdma_v5_2.h"
#include "sdma_v6_0.h"
#include "lsdma_v6_0.h"
+#include "lsdma_v7_0.h"
#include "vcn_v2_0.h"
#include "jpeg_v2_0.h"
#include "vcn_v3_0.h"
@@ -92,12 +97,15 @@
#include "smuio_v13_0.h"
#include "smuio_v13_0_3.h"
#include "smuio_v13_0_6.h"
+#include "vcn_v5_0_0.h"
+#include "jpeg_v5_0_0.h"
#include "amdgpu_vpe.h"
#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
+#define mmIP_DISCOVERY_VERSION 0x16A00
#define mmRCC_CONFIG_MEMSIZE 0xde3
#define mmMP0_SMN_C2PMSG_33 0x16061
#define mmMM_INDEX 0x0
@@ -518,7 +526,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
out:
kfree(adev->mman.discovery_bin);
adev->mman.discovery_bin = NULL;
-
+ if ((amdgpu_discovery != 2) &&
+ (RREG32(mmIP_DISCOVERY_VERSION) == 4))
+ amdgpu_ras_query_boot_status(adev, 4);
return r;
}
@@ -1278,11 +1288,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
* 0b10 : encode is disabled
* 0b01 : decode is disabled
*/
- adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
- ip->revision & 0xc0;
- ip->revision &= ~0xc0;
if (adev->vcn.num_vcn_inst <
AMDGPU_MAX_VCN_INSTANCES) {
+ adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+ ip->revision & 0xc0;
adev->vcn.num_vcn_inst++;
adev->vcn.inst_mask |=
(1U << ip->instance_number);
@@ -1293,6 +1302,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
adev->vcn.num_vcn_inst + 1,
AMDGPU_MAX_VCN_INSTANCES);
}
+ ip->revision &= ~0xc0;
}
if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
@@ -1310,6 +1320,15 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
}
}
+ if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
+ if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
+ adev->vpe.num_instances++;
+ else
+ dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
+ adev->vpe.num_instances + 1,
+ AMDGPU_MAX_VPE_INSTANCES);
+ }
+
if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
adev->gmc.num_umc++;
adev->umc.node_inst_num++;
@@ -1674,6 +1693,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
break;
default:
@@ -1721,6 +1741,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
break;
default:
@@ -1763,6 +1784,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 1, 0):
amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
break;
+ case IP_VERSION(7, 0, 0):
+ amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
+ break;
default:
dev_err(adev->dev,
"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
@@ -1812,11 +1836,16 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break;
case IP_VERSION(13, 0, 4):
amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
+ break;
default:
dev_err(adev->dev,
"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
@@ -1917,6 +1946,7 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
if (amdgpu_sriov_vf(adev))
amdgpu_discovery_set_sriov_display(adev);
else
@@ -1986,6 +2016,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
break;
default:
@@ -2033,6 +2064,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 1):
amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
break;
default:
@@ -2119,9 +2151,14 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
break;
case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
break;
+ case IP_VERSION(5, 0, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
+ break;
default:
dev_err(adev->dev,
"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
@@ -2160,6 +2197,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
adev->enable_mes = true;
adev->enable_mes_kiq = true;
@@ -2185,6 +2223,7 @@ static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 1):
amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
break;
default:
@@ -2438,6 +2477,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->family = AMDGPU_FAMILY_GC_11_0_1;
break;
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
adev->family = AMDGPU_FAMILY_GC_11_5_0;
break;
default:
@@ -2457,6 +2497,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
adev->flags |= AMD_IS_APU;
break;
default:
@@ -2493,6 +2534,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
break;
case IP_VERSION(7, 11, 0):
+ case IP_VERSION(7, 11, 1):
adev->nbio.funcs = &nbio_v7_11_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
break;
@@ -2528,6 +2570,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.funcs = &nbio_v7_7_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
break;
+ case IP_VERSION(6, 3, 1):
+ adev->nbio.funcs = &nbif_v6_3_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
+ break;
default:
break;
}
@@ -2560,6 +2606,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 1, 0):
adev->hdp.funcs = &hdp_v6_0_funcs;
break;
+ case IP_VERSION(7, 0, 0):
+ adev->hdp.funcs = &hdp_v7_0_funcs;
+ break;
default:
break;
}
@@ -2624,6 +2673,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 8):
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
adev->smuio.funcs = &smuio_v13_0_6_funcs;
break;
default:
@@ -2637,6 +2687,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 3):
adev->lsdma.funcs = &lsdma_v6_0_funcs;
break;
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ adev->lsdma.funcs = &lsdma_v7_0_funcs;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b8fbe97efe1d..3ecc7ef95172 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1350,14 +1350,6 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
"dither",
amdgpu_dither_enum_list, sz);
- if (adev->dc_enabled) {
- adev->mode_info.abm_level_property =
- drm_property_create_range(adev_to_drm(adev), 0,
- "abm level", 0, 4);
- if (!adev->mode_info.abm_level_property)
- return -ENOMEM;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index decbbe3d4f06..055ba2ea4c12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -377,6 +377,10 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct amdgpu_vm_bo_base *bo_base;
int r;
+ /* FIXME: This should be after the "if", but needs a fix to make sure
+ * DMABuf imports are initialized in the right VM list.
+ */
+ amdgpu_vm_bo_invalidate(adev, bo, false);
if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 586f4d03039d..15b188aaf681 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -199,6 +199,7 @@ int amdgpu_mes_kiq;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
int amdgpu_tmz = -1; /* auto */
+uint amdgpu_freesync_vid_mode;
int amdgpu_reset_method = -1; /* auto */
int amdgpu_num_kcq = -1;
int amdgpu_smartshift_bias;
@@ -367,7 +368,7 @@ module_param_named(aspm, amdgpu_aspm, int, 0444);
* Setting the value to 0 disables this functionality.
* Setting the value to -2 is auto enabled with power down when displays are attached.
*/
-MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = autowith displays)");
+MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = auto with displays)");
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
/**
@@ -594,7 +595,7 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
#ifdef CONFIG_DRM_AMDGPU_SI
#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_si_support = 0;
+int amdgpu_si_support;
MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
#else
int amdgpu_si_support = 1;
@@ -613,7 +614,7 @@ module_param_named(si_support, amdgpu_si_support, int, 0444);
#ifdef CONFIG_DRM_AMDGPU_CIK
#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_cik_support = 0;
+int amdgpu_cik_support;
MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
#else
int amdgpu_cik_support = 1;
@@ -849,12 +850,13 @@ module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444);
* the ABM algorithm, with 1 being the least reduction and 4 being the most
* reduction.
*
- * Defaults to 0, or disabled. Userspace can still override this level later
- * after boot.
+ * Defaults to -1, or disabled. Userspace can only override this level after
+ * boot if it's set to auto.
*/
-uint amdgpu_dm_abm_level;
-MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
-module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
+int amdgpu_dm_abm_level = -1;
+MODULE_PARM_DESC(abmlevel,
+ "ABM level (0 = off, 1-4 = backlight reduction level, -1 auto (default))");
+module_param_named(abmlevel, amdgpu_dm_abm_level, int, 0444);
int amdgpu_backlight = -1;
MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))");
@@ -883,6 +885,32 @@ MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)
module_param_named(tmz, amdgpu_tmz, int, 0444);
/**
+ * DOC: freesync_video (uint)
+ * Enable the optimization to adjust front porch timing to achieve seamless
+ * mode change experience when setting a freesync supported mode for which full
+ * modeset is not needed.
+ *
+ * The Display Core will add a set of modes derived from the base FreeSync
+ * video mode into the corresponding connector's mode list based on commonly
+ * used refresh rates and VRR range of the connected display, when users enable
+ * this feature. From the userspace perspective, they can see a seamless mode
+ * change experience when the change between different refresh rates under the
+ * same resolution. Additionally, userspace applications such as Video playback
+ * can read this modeset list and change the refresh rate based on the video
+ * frame rate. Finally, the userspace can also derive an appropriate mode for a
+ * particular refresh rate based on the FreeSync Mode and add it to the
+ * connector's mode list.
+ *
+ * Note: This is an experimental feature.
+ *
+ * The default value: 0 (off).
+ */
+MODULE_PARM_DESC(
+ freesync_video,
+ "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
+module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
+
+/**
* DOC: reset_method (int)
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
*/
@@ -2665,7 +2693,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
}
adev->in_runpm = true;
- if (amdgpu_device_supports_px(drm_dev))
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX)
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/*
@@ -2675,7 +2703,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
* platforms.
* TODO: this may be also needed for PX capable platform.
*/
- if (amdgpu_device_supports_boco(drm_dev))
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO)
adev->mp1_state = PP_MP1_STATE_UNLOAD;
ret = amdgpu_device_prepare(drm_dev);
@@ -2684,15 +2712,15 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
ret = amdgpu_device_suspend(drm_dev, false);
if (ret) {
adev->in_runpm = false;
- if (amdgpu_device_supports_boco(drm_dev))
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO)
adev->mp1_state = PP_MP1_STATE_NONE;
return ret;
}
- if (amdgpu_device_supports_boco(drm_dev))
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO)
adev->mp1_state = PP_MP1_STATE_NONE;
- if (amdgpu_device_supports_px(drm_dev)) {
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
*/
@@ -2701,9 +2729,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
- } else if (amdgpu_device_supports_boco(drm_dev)) {
+ } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) {
/* nothing to do */
- } else if (amdgpu_device_supports_baco(drm_dev)) {
+ } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
amdgpu_device_baco_enter(drm_dev);
}
@@ -2726,7 +2754,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
if (!pci_device_is_present(adev->pdev))
adev->no_hw_access = true;
- if (amdgpu_device_supports_px(drm_dev)) {
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) {
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* Only need to handle PCI state in the driver for ATPX
@@ -2738,22 +2766,22 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
if (ret)
return ret;
pci_set_master(pdev);
- } else if (amdgpu_device_supports_boco(drm_dev)) {
+ } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
*/
pci_set_master(pdev);
- } else if (amdgpu_device_supports_baco(drm_dev)) {
+ } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
if (ret) {
- if (amdgpu_device_supports_px(drm_dev))
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX)
pci_disable_device(pdev);
return ret;
}
- if (amdgpu_device_supports_px(drm_dev))
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX)
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
adev->in_runpm = false;
return 0;
@@ -2763,8 +2791,7 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
- /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
- int ret = 1;
+ int ret;
if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) {
pm_runtime_forbid(dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 5706b282a0c7..c7df7fa3459f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -97,6 +97,10 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
stats.requested_visible_vram/1024UL);
drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
stats.requested_gtt/1024UL);
+ drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL);
+ drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL);
+ drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL);
+
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 70bff8cecfda..10832b470448 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -61,9 +61,7 @@ static struct kmem_cache *amdgpu_fence_slab;
int amdgpu_fence_slab_init(void)
{
- amdgpu_fence_slab = kmem_cache_create(
- "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ amdgpu_fence_slab = KMEM_CACHE(amdgpu_fence, SLAB_HWCACHE_ALIGN);
if (!amdgpu_fence_slab)
return -ENOMEM;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 49a5f1c73b3e..67c234bcf89f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -187,7 +187,40 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
else
++bo_va->ref_count;
amdgpu_bo_unreserve(abo);
- return 0;
+
+ /* Validate and add eviction fence to DMABuf imports with dynamic
+ * attachment in compute VMs. Re-validation will be done by
+ * amdgpu_vm_validate. Fences are on the reservation shared with the
+ * export, which is currently required to be validated and fenced
+ * already by amdgpu_amdkfd_gpuvm_restore_process_bos.
+ *
+ * Nested locking below for the case that a GEM object is opened in
+ * kfd_mem_export_dmabuf. Since the lock below is only taken for imports,
+ * but not for export, this is a different lock class that cannot lead to
+ * circular lock dependencies.
+ */
+ if (!vm->is_compute_context || !vm->process_info)
+ return 0;
+ if (!obj->import_attach ||
+ !dma_buf_is_dynamic(obj->import_attach->dmabuf))
+ return 0;
+ mutex_lock_nested(&vm->process_info->lock, 1);
+ if (!WARN_ON(!vm->process_info->eviction_fence)) {
+ r = amdgpu_amdkfd_bo_validate_and_fence(abo, AMDGPU_GEM_DOMAIN_GTT,
+ &vm->process_info->eviction_fence->base);
+ if (r) {
+ struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
+
+ dev_warn(adev->dev, "validate_and_fence failed: %d\n", r);
+ if (ti) {
+ dev_warn(adev->dev, "pid %d\n", ti->pid);
+ amdgpu_vm_put_task_info(ti);
+ }
+ }
+ }
+ mutex_unlock(&vm->process_info->lock);
+
+ return r;
}
static void amdgpu_gem_object_close(struct drm_gem_object *obj,
@@ -682,10 +715,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
uint64_t vm_size;
int r = 0;
- if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
+ if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) {
dev_dbg(dev->dev,
"va_address 0x%llx is in reserved area 0x%llx\n",
- args->va_address, AMDGPU_VA_RESERVED_SIZE);
+ args->va_address, AMDGPU_VA_RESERVED_BOTTOM);
return -EINVAL;
}
@@ -701,7 +734,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->va_address &= AMDGPU_GMC_HOLE_MASK;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
- vm_size -= AMDGPU_VA_RESERVED_SIZE;
+ vm_size -= AMDGPU_VA_RESERVED_TOP;
if (args->va_address + args->map_size > vm_size) {
dev_dbg(dev->dev,
"va_address 0x%llx is in top reserved area 0x%llx\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 6ddc8e3360e2..f8b48fd93108 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -304,11 +304,11 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
return -EINVAL;
}
-int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq, int xcc_id)
+int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_irq_src *irq = &kiq->irq;
+ struct amdgpu_ring *ring = &kiq->ring;
int r = 0;
spin_lock_init(&kiq->ring_lock);
@@ -329,7 +329,8 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
ring->eop_gpu_addr = kiq->eop_gpu_addr;
ring->no_scheduler = true;
- sprintf(ring->name, "kiq_%d.%d.%d.%d", xcc_id, ring->me, ring->pipe, ring->queue);
+ snprintf(ring->name, sizeof(ring->name), "kiq_%d.%d.%d.%d",
+ xcc_id, ring->me, ring->pipe, ring->queue);
r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
@@ -642,8 +643,8 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
j = i + xcc_id * adev->gfx.num_compute_rings;
- kiq->pmf->kiq_map_queues(kiq_ring,
- &adev->gfx.compute_ring[j]);
+ kiq->pmf->kiq_map_queues(kiq_ring,
+ &adev->gfx.compute_ring[j]);
}
r = amdgpu_ring_test_helper(kiq_ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index f23bafec71c5..8fcf889ddce9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -471,9 +471,7 @@ static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width)
void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
unsigned max_sh);
-int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq, int xcc_id);
+int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id);
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 55784a9f26c4..be4629cdac04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -52,7 +52,7 @@ int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
struct amdgpu_bo_param bp;
u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
- uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift;
+ uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) - 1) >> pde0_page_shift;
memset(&bp, 0, sizeof(bp));
bp.size = PAGE_ALIGN((npdes + 1) * 8);
@@ -746,6 +746,59 @@ error_unlock_reset:
return r;
}
+void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask,
+ uint32_t xcc_inst)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst];
+ struct amdgpu_ring *ring = &kiq->ring;
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+
+ if (adev->mes.ring.sched.ready) {
+ amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
+ ref, mask);
+ return;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
+ ref, mask);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for IRQ context */
+ if (r < 1 && in_interrupt())
+ goto failed_kiq;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq;
+
+ return;
+
+failed_undo:
+ amdgpu_ring_undo(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+failed_kiq:
+ dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
+}
+
/**
* amdgpu_gmc_tmz_set -- check and set if a device supports TMZ
* @adev: amdgpu_device pointer
@@ -790,6 +843,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 3):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
/* Don't enable it by default yet.
*/
if (amdgpu_tmz < 1) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index e699d1ca8deb..17f40ea1104b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -417,6 +417,10 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
uint32_t flush_type, bool all_hub,
uint32_t inst);
+void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask,
+ uint32_t xcc_inst);
extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
extern void amdgpu_gmc_noretry_set(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 6aa3b1d845ab..8b512dc28df8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -131,7 +131,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
struct amdgpu_ib *ib = &ibs[0];
struct dma_fence *tmp = NULL;
bool need_ctx_switch;
- unsigned int patch_offset = ~0;
struct amdgpu_vm *vm;
uint64_t fence_ctx;
uint32_t status = 0, alloc_size;
@@ -139,10 +138,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
bool secure, init_shadow;
u64 shadow_va, csa_va, gds_va;
int vmid = AMDGPU_JOB_GET_VMID(job);
+ bool need_pipe_sync = false;
+ unsigned int cond_exec;
unsigned int i;
int r = 0;
- bool need_pipe_sync = false;
if (num_ibs == 0)
return -EINVAL;
@@ -228,7 +228,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
init_shadow, vmid);
if (ring->funcs->init_cond_exec)
- patch_offset = amdgpu_ring_init_cond_exec(ring);
+ cond_exec = amdgpu_ring_init_cond_exec(ring,
+ ring->cond_exe_gpu_addr);
amdgpu_device_flush_hdp(adev, ring);
@@ -278,16 +279,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
}
- if (ring->funcs->emit_gfx_shadow) {
+ if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec) {
amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0);
-
- if (ring->funcs->init_cond_exec) {
- unsigned int ce_offset = ~0;
-
- ce_offset = amdgpu_ring_init_cond_exec(ring);
- if (ce_offset != ~0 && ring->funcs->patch_cond_exec)
- amdgpu_ring_patch_cond_exec(ring, ce_offset);
- }
+ amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
}
r = amdgpu_fence_emit(ring, f, job, fence_flags);
@@ -302,8 +296,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
if (ring->funcs->insert_end)
ring->funcs->insert_end(ring);
- if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
- amdgpu_ring_patch_cond_exec(ring, patch_offset);
+ amdgpu_ring_patch_cond_exec(ring, cond_exec);
ring->current_ctx = fence_ctx;
if (vm && ring->funcs->emit_switch_buffer)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index ddd0891da116..3d7fcdeaf8cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -62,9 +62,8 @@ int amdgpu_pasid_alloc(unsigned int bits)
int pasid = -EINVAL;
for (bits = min(bits, 31U); bits > 0; bits--) {
- pasid = ida_simple_get(&amdgpu_pasid_ida,
- 1U << (bits - 1), 1U << bits,
- GFP_KERNEL);
+ pasid = ida_alloc_range(&amdgpu_pasid_ida, 1U << (bits - 1),
+ (1U << bits) - 1, GFP_KERNEL);
if (pasid != -ENOSPC)
break;
}
@@ -82,7 +81,7 @@ int amdgpu_pasid_alloc(unsigned int bits)
void amdgpu_pasid_free(u32 pasid)
{
trace_amdgpu_pasid_freed(pasid);
- ida_simple_remove(&amdgpu_pasid_ida, pasid);
+ ida_free(&amdgpu_pasid_ida, pasid);
}
static void amdgpu_pasid_free_cb(struct dma_fence *fence,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 71a5cf37b472..4b3000c21ef2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -35,7 +35,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
- struct amdgpu_task_info ti;
+ struct amdgpu_task_info *ti;
struct amdgpu_device *adev = ring->adev;
int idx;
int r;
@@ -48,7 +48,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
return DRM_GPU_SCHED_STAT_ENODEV;
}
- memset(&ti, 0, sizeof(struct amdgpu_task_info));
+
adev->job_hang = true;
if (amdgpu_gpu_recovery &&
@@ -58,12 +58,16 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
goto exit;
}
- amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
- job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
- ring->fence_drv.sync_seq);
- DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
- ti.process_name, ti.tgid, ti.task_name, ti.pid);
+ job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
+ ring->fence_drv.sync_seq);
+
+ ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
+ if (ti) {
+ DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
+ ti->process_name, ti->tgid, ti->task_name, ti->pid);
+ amdgpu_vm_put_task_info(ti);
+ }
dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 2ff2897fd1db..6df99cb00d9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -36,10 +36,35 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work);
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
{
+ int i, r;
+
INIT_DELAYED_WORK(&adev->jpeg.idle_work, amdgpu_jpeg_idle_work_handler);
mutex_init(&adev->jpeg.jpeg_pg_lock);
atomic_set(&adev->jpeg.total_submission_cnt, 0);
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG))
+ adev->jpeg.indirect_sram = true;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (adev->jpeg.indirect_sram) {
+ r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->jpeg.inst[i].dpg_sram_bo,
+ &adev->jpeg.inst[i].dpg_sram_gpu_addr,
+ &adev->jpeg.inst[i].dpg_sram_cpu_addr);
+ if (r) {
+ dev_err(adev->dev,
+ "JPEG %d (%d) failed to allocate DPG bo\n", i, r);
+ return r;
+ }
+ }
+ }
+
return 0;
}
@@ -51,6 +76,11 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
if (adev->jpeg.harvest_config & (1 << i))
continue;
+ amdgpu_bo_free_kernel(
+ &adev->jpeg.inst[i].dpg_sram_bo,
+ &adev->jpeg.inst[i].dpg_sram_gpu_addr,
+ (void **)&adev->jpeg.inst[i].dpg_sram_cpu_addr);
+
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j)
amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec[j]);
}
@@ -210,12 +240,15 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
} else {
r = 0;
}
+
if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
+ if (amdgpu_emu_mode == 1)
+ udelay(10);
}
if (i >= adev->usec_timeout)
@@ -296,3 +329,16 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
return 0;
}
+
+int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
+ enum AMDGPU_UCODE_ID ucode_id)
+{
+ struct amdgpu_firmware_info ucode = {
+ .ucode_id = AMDGPU_UCODE_ID_JPEG_RAM,
+ .mc_addr = adev->jpeg.inst[inst_idx].dpg_sram_gpu_addr,
+ .ucode_size = ((uintptr_t)adev->jpeg.inst[inst_idx].dpg_sram_curr_addr -
+ (uintptr_t)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr),
+ };
+
+ return psp_execute_ip_fw_load(&adev->psp, &ucode);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index ffe47e9f5bf2..aea31d61d991 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -32,6 +32,34 @@
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
+#define WREG32_SOC15_JPEG_DPG_MODE(inst_idx, offset, value, indirect) \
+ do { \
+ if (!indirect) { \
+ WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
+ mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15( \
+ JPEG, GET_INST(JPEG, inst_idx), \
+ mmUVD_DPG_LMA_CTL, \
+ (UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT | \
+ indirect << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ } else { \
+ *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = \
+ offset; \
+ *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = \
+ value; \
+ } \
+ } while (0)
+
+#define RREG32_SOC15_JPEG_DPG_MODE(inst_idx, offset, mask_en) \
+ ({ \
+ WREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_CTL, \
+ (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ RREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_DATA); \
+ })
+
struct amdgpu_jpeg_reg{
unsigned jpeg_pitch[AMDGPU_MAX_JPEG_RINGS];
};
@@ -41,6 +69,11 @@ struct amdgpu_jpeg_inst {
struct amdgpu_irq_src irq;
struct amdgpu_irq_src ras_poison_irq;
struct amdgpu_jpeg_reg external;
+ struct amdgpu_bo *dpg_sram_bo;
+ struct dpg_pause_state pause_state;
+ void *dpg_sram_cpu_addr;
+ uint64_t dpg_sram_gpu_addr;
+ uint32_t *dpg_sram_curr_addr;
uint8_t aid_id;
};
@@ -63,6 +96,7 @@ struct amdgpu_jpeg {
uint16_t inst_mask;
uint8_t num_inst_per_aid;
+ bool indirect_sram;
};
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
@@ -82,5 +116,7 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block);
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
+ enum AMDGPU_UCODE_ID ucode_id);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index bf4f48fe438d..a2df3025a754 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -894,14 +894,14 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
- vm_size -= AMDGPU_VA_RESERVED_SIZE;
+ vm_size -= AMDGPU_VA_RESERVED_TOP;
/* Older VCE FW versions are buggy and can handle only 40bits */
if (adev->vce.fw_version &&
adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
vm_size = min(vm_size, 1ULL << 40);
- dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
+ dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_BOTTOM;
dev_info->virtual_address_max =
min(vm_size, AMDGPU_GMC_HOLE_START);
@@ -1114,6 +1114,15 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
ui32 >>= 8;
break;
+ case AMDGPU_INFO_SENSOR_GPU_INPUT_POWER:
+ /* get input GPU power */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 >>= 8;
+ break;
case AMDGPU_INFO_SENSOR_VDDNB:
/* get VDDNB in millivolts */
if (amdgpu_dpm_read_sensor(adev,
@@ -1370,6 +1379,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
goto error_vm;
}
+ r = amdgpu_seq64_map(adev, &fpriv->vm, &fpriv->seq64_va);
+ if (r)
+ goto error_vm;
+
mutex_init(&fpriv->bo_list_lock);
idr_init_base(&fpriv->bo_list_handles, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 59fafb8392e0..24ad4b97177b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -27,6 +27,16 @@
#include "umc/umc_6_7_0_offset.h"
#include "umc/umc_6_7_0_sh_mask.h"
+static bool amdgpu_mca_is_deferred_error(struct amdgpu_device *adev,
+ uint64_t mc_status)
+{
+ if (adev->umc.ras->check_ecc_err_status)
+ return adev->umc.ras->check_ecc_err_status(adev,
+ AMDGPU_MCA_ERROR_TYPE_DE, &mc_status);
+
+ return false;
+}
+
void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
unsigned long *error_count)
@@ -202,16 +212,16 @@ int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry)
{
- dev_info(adev->dev, "[Hardware error] Accelerator Check Architecture events logged\n");
- dev_info(adev->dev, "[Hardware error] aca entry[%02d].STATUS=0x%016llx\n",
+ dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n");
+ dev_info(adev->dev, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n",
idx, entry->regs[MCA_REG_IDX_STATUS]);
- dev_info(adev->dev, "[Hardware error] aca entry[%02d].ADDR=0x%016llx\n",
+ dev_info(adev->dev, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n",
idx, entry->regs[MCA_REG_IDX_ADDR]);
- dev_info(adev->dev, "[Hardware error] aca entry[%02d].MISC0=0x%016llx\n",
+ dev_info(adev->dev, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n",
idx, entry->regs[MCA_REG_IDX_MISC0]);
- dev_info(adev->dev, "[Hardware error] aca entry[%02d].IPID=0x%016llx\n",
+ dev_info(adev->dev, HW_ERR "aca entry[%02d].IPID=0x%016llx\n",
idx, entry->regs[MCA_REG_IDX_IPID]);
- dev_info(adev->dev, "[Hardware error] aca entry[%02d].SYND=0x%016llx\n",
+ dev_info(adev->dev, HW_ERR "aca entry[%02d].SYND=0x%016llx\n",
idx, entry->regs[MCA_REG_IDX_SYND]);
}
@@ -256,9 +266,14 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo
if (type == AMDGPU_MCA_ERROR_TYPE_UE)
amdgpu_ras_error_statistic_ue_count(err_data,
&mcm_info, &err_addr, (uint64_t)count);
- else
- amdgpu_ras_error_statistic_ce_count(err_data,
- &mcm_info, &err_addr, (uint64_t)count);
+ else {
+ if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]))
+ amdgpu_ras_error_statistic_de_count(err_data,
+ &mcm_info, &err_addr, (uint64_t)count);
+ else
+ amdgpu_ras_error_statistic_ce_count(err_data,
+ &mcm_info, &err_addr, (uint64_t)count);
+ }
}
out_mca_release:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
index b399f1b62887..b964110ed1e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -65,6 +65,7 @@ enum amdgpu_mca_ip {
enum amdgpu_mca_error_type {
AMDGPU_MCA_ERROR_TYPE_UE = 0,
AMDGPU_MCA_ERROR_TYPE_CE,
+ AMDGPU_MCA_ERROR_TYPE_DE,
};
struct amdgpu_mca_ras_block {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index da48b6da0107..a98e03e0a51f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -1398,7 +1398,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
goto error_fini;
}
- ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
+ ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
if (r) {
DRM_ERROR("failed to map ctx meta data\n");
@@ -1565,9 +1565,9 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
-
- debugfs_create_file("amdgpu_mes_event_log", 0444, root,
- adev, &amdgpu_debugfs_mes_event_log_fops);
+ if (adev->enable_mes)
+ debugfs_create_file("amdgpu_mes_event_log", 0444, root,
+ adev, &amdgpu_debugfs_mes_event_log_fops);
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 2e4911050cc5..1fe21a70ddd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -324,8 +324,6 @@ struct amdgpu_mode_info {
struct drm_property *audio_property;
/* FMT dithering */
struct drm_property *dither_property;
- /* Adaptive Backlight Modulation (power feature) */
- struct drm_property *abm_level_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index 51ca544a7094..d085687a47ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -53,14 +53,6 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
return 0;
}
-void amdgpu_nbio_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
- uint64_t *count1)
-{
- if (adev->nbio.funcs->get_pcie_usage)
- adev->nbio.funcs->get_pcie_usage(adev, count0, count1);
-
-}
-
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 65e35059de40..7b8c03be1d9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -102,8 +102,6 @@ struct amdgpu_nbio_funcs {
u32 (*get_memory_partition_mode)(struct amdgpu_device *adev,
u32 *supp_modes);
u64 (*get_pcie_replay_count)(struct amdgpu_device *adev);
- void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0,
- uint64_t *count1);
};
struct amdgpu_nbio {
@@ -116,7 +114,6 @@ struct amdgpu_nbio {
};
int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev);
-void amdgpu_nbio_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, uint64_t *count1);
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 425cebcc5cbf..010b0cb7693c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -220,9 +220,6 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
placement->num_placement = c;
placement->placement = places;
-
- placement->num_busy_placement = c;
- placement->busy_placement = places;
}
/**
@@ -1276,25 +1273,36 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
uint64_t size = amdgpu_bo_size(bo);
+ struct drm_gem_object *obj;
unsigned int domain;
+ bool shared;
/* Abort if the BO doesn't currently have a backing store */
if (!bo->tbo.resource)
return;
+ obj = &bo->tbo.base;
+ shared = drm_gem_object_is_shared_for_memory_stats(obj);
+
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size;
if (amdgpu_bo_in_cpu_visible_vram(bo))
stats->visible_vram += size;
+ if (shared)
+ stats->vram_shared += size;
break;
case AMDGPU_GEM_DOMAIN_GTT:
stats->gtt += size;
+ if (shared)
+ stats->gtt_shared += size;
break;
case AMDGPU_GEM_DOMAIN_CPU:
default:
stats->cpu += size;
+ if (shared)
+ stats->cpu_shared += size;
break;
}
@@ -1397,8 +1405,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
AMDGPU_GEM_DOMAIN_GTT);
/* Avoid costly evictions; only set GTT as a busy placement */
- abo->placement.num_busy_placement = 1;
- abo->placement.busy_placement = &abo->placements[1];
+ abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
r = ttm_bo_validate(bo, &abo->placement, &ctx);
if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index a3ea8a82db23..be679c42b0b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -138,12 +138,18 @@ struct amdgpu_bo_vm {
struct amdgpu_mem_stats {
/* current VRAM usage, includes visible VRAM */
uint64_t vram;
+ /* current shared VRAM usage, includes visible VRAM */
+ uint64_t vram_shared;
/* current visible VRAM usage */
uint64_t visible_vram;
/* current GTT usage */
uint64_t gtt;
+ /* current shared GTT usage */
+ uint64_t gtt_shared;
/* current system memory usage */
uint64_t cpu;
+ /* current shared system memory usage */
+ uint64_t cpu_shared;
/* sum of evicted buffers, includes visible VRAM */
uint64_t evicted_vram;
/* sum of evicted buffers due to CPU access */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 0328616473f8..3c2b1413058b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -38,6 +38,7 @@
#include "psp_v12_0.h"
#include "psp_v13_0.h"
#include "psp_v13_0_4.h"
+#include "psp_v14_0.h"
#include "amdgpu_ras.h"
#include "amdgpu_securedisplay.h"
@@ -162,20 +163,26 @@ static int psp_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
+ psp->autoload_supported = true;
+ psp->boot_time_tmr = true;
+
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(9, 0, 0):
psp_v3_1_set_psp_funcs(psp);
psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(10, 0, 0):
case IP_VERSION(10, 0, 1):
psp_v10_0_set_psp_funcs(psp);
psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 4):
psp_v11_0_set_psp_funcs(psp);
psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 7):
@@ -188,15 +195,20 @@ static int psp_early_init(void *handle)
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
psp_v11_0_set_psp_funcs(psp);
- psp->autoload_supported = true;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(11, 0, 3):
case IP_VERSION(12, 0, 1):
psp_v12_0_set_psp_funcs(psp);
+ psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(13, 0, 2):
+ psp->boot_time_tmr = false;
+ fallthrough;
case IP_VERSION(13, 0, 6):
psp_v13_0_set_psp_funcs(psp);
+ psp->autoload_supported = false;
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
@@ -204,25 +216,31 @@ static int psp_early_init(void *handle)
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 11):
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
psp_v13_0_set_psp_funcs(psp);
- psp->autoload_supported = true;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(11, 0, 8):
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
psp_v11_0_8_set_psp_funcs(psp);
- psp->autoload_supported = false;
}
+ psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
psp_v13_0_set_psp_funcs(psp);
- psp->autoload_supported = true;
adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
+ psp->boot_time_tmr = false;
break;
case IP_VERSION(13, 0, 4):
psp_v13_0_4_set_psp_funcs(psp);
- psp->autoload_supported = true;
+ psp->boot_time_tmr = false;
+ break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ psp_v14_0_set_psp_funcs(psp);
break;
default:
return -EINVAL;
@@ -230,6 +248,8 @@ static int psp_early_init(void *handle)
psp->adev = adev;
+ adev->psp_timeout = 20000;
+
psp_check_pmfw_centralized_cstate_management(psp);
if (amdgpu_sriov_vf(adev))
@@ -291,21 +311,22 @@ static int psp_memory_training_init(struct psp_context *psp)
struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
- DRM_DEBUG("memory training is not supported!\n");
+ dev_dbg(psp->adev->dev, "memory training is not supported!\n");
return 0;
}
ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
if (ctx->sys_cache == NULL) {
- DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
+ dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
ret = -ENOMEM;
goto Err_out;
}
- DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
- ctx->train_data_size,
- ctx->p2c_train_data_offset,
- ctx->c2p_train_data_offset);
+ dev_dbg(psp->adev->dev,
+ "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
return 0;
@@ -407,7 +428,7 @@ static int psp_sw_init(void *handle)
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd) {
- DRM_ERROR("Failed to allocate memory to command buffer!\n");
+ dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
ret = -ENOMEM;
}
@@ -454,13 +475,13 @@ static int psp_sw_init(void *handle)
if (mem_training_ctx->enable_mem_training) {
ret = psp_memory_training_init(psp);
if (ret) {
- DRM_ERROR("Failed to initialize memory training!\n");
+ dev_err(adev->dev, "Failed to initialize memory training!\n");
return ret;
}
ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
if (ret) {
- DRM_ERROR("Failed to process memory training!\n");
+ dev_err(adev->dev, "Failed to process memory training!\n");
return ret;
}
}
@@ -626,7 +647,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
{
int ret;
int index;
- int timeout = 20000;
+ int timeout = psp->adev->psp_timeout;
bool ras_intr = false;
bool skip_unsupport = false;
@@ -675,9 +696,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
*/
if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
- DRM_WARN("failed to load ucode %s(0x%X) ",
- amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
- DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
+ dev_warn(psp->adev->dev,
+ "failed to load ucode %s(0x%X) ",
+ amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
+ dev_warn(psp->adev->dev,
+ "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status);
/* If any firmware (including CAP) load fails under SRIOV, it should
@@ -771,16 +794,6 @@ static int psp_load_toc(struct psp_context *psp,
return ret;
}
-static bool psp_boottime_tmr(struct psp_context *psp)
-{
- switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
- case IP_VERSION(13, 0, 6):
- return true;
- default:
- return false;
- }
-}
-
/* Set up Trusted Memory Region */
static int psp_tmr_init(struct psp_context *psp)
{
@@ -807,12 +820,12 @@ static int psp_tmr_init(struct psp_context *psp)
psp->fw_pri_buf) {
ret = psp_load_toc(psp, &tmr_size);
if (ret) {
- DRM_ERROR("Failed to load toc\n");
+ dev_err(psp->adev->dev, "Failed to load toc\n");
return ret;
}
}
- if (!psp->tmr_bo) {
+ if (!psp->tmr_bo && !psp->boot_time_tmr) {
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
PSP_TMR_ALIGNMENT,
@@ -855,7 +868,7 @@ static int psp_tmr_load(struct psp_context *psp)
psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
if (psp->tmr_bo)
- DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
+ dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
@@ -1113,7 +1126,7 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
psp_prep_reg_prog_cmd_buf(cmd, reg, value);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (ret)
- DRM_ERROR("PSP failed to program reg id %d", reg);
+ dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
release_psp_cmd_buf(psp);
@@ -1526,22 +1539,22 @@ static void psp_ras_ta_check_status(struct psp_context *psp)
switch (ras_cmd->ras_status) {
case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
dev_warn(psp->adev->dev,
- "RAS WARNING: cmd failed due to unsupported ip\n");
+ "RAS WARNING: cmd failed due to unsupported ip\n");
break;
case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
dev_warn(psp->adev->dev,
- "RAS WARNING: cmd failed due to unsupported error injection\n");
+ "RAS WARNING: cmd failed due to unsupported error injection\n");
break;
case TA_RAS_STATUS__SUCCESS:
break;
case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
dev_warn(psp->adev->dev,
- "RAS WARNING: Inject error to critical region is not allowed\n");
+ "RAS WARNING: Inject error to critical region is not allowed\n");
break;
default:
dev_warn(psp->adev->dev,
- "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
+ "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
break;
}
}
@@ -1565,7 +1578,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
return ret;
if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
- DRM_WARN("RAS: Unsupported Interface");
+ dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
return -EINVAL;
}
@@ -1715,7 +1728,7 @@ int psp_ras_initialize(struct psp_context *psp)
psp->ras_context.context.initialized = true;
else {
if (ras_cmd->ras_status)
- dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
+ dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
/* fail to load RAS TA */
psp->ras_context.context.initialized = false;
@@ -1779,6 +1792,31 @@ int psp_ras_trigger_error(struct psp_context *psp,
return 0;
}
+
+int psp_ras_query_address(struct psp_context *psp,
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras_context.context.initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS;
+ ras_cmd->ras_in_message.address = *addr_in;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
+ return -EINVAL;
+
+ *addr_out = ras_cmd->ras_out_message.address;
+
+ return 0;
+}
// ras end
// HDCP start
@@ -2125,19 +2163,14 @@ int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
return ret;
}
-int amdgpu_psp_query_boot_status(struct amdgpu_device *adev)
+bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
{
- struct psp_context *psp = &adev->psp;
- int ret = 0;
-
- if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
- return 0;
-
if (psp->funcs &&
- psp->funcs->query_boot_status)
- ret = psp->funcs->query_boot_status(psp);
-
- return ret;
+ psp->funcs->get_ras_capability) {
+ return psp->funcs->get_ras_capability(psp);
+ } else {
+ return false;
+ }
}
static int psp_hw_start(struct psp_context *psp)
@@ -2150,7 +2183,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_kdb != NULL)) {
ret = psp_bootloader_load_kdb(psp);
if (ret) {
- DRM_ERROR("PSP load kdb failed!\n");
+ dev_err(adev->dev, "PSP load kdb failed!\n");
return ret;
}
}
@@ -2159,7 +2192,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_spl != NULL)) {
ret = psp_bootloader_load_spl(psp);
if (ret) {
- DRM_ERROR("PSP load spl failed!\n");
+ dev_err(adev->dev, "PSP load spl failed!\n");
return ret;
}
}
@@ -2168,7 +2201,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_sysdrv != NULL)) {
ret = psp_bootloader_load_sysdrv(psp);
if (ret) {
- DRM_ERROR("PSP load sys drv failed!\n");
+ dev_err(adev->dev, "PSP load sys drv failed!\n");
return ret;
}
}
@@ -2177,7 +2210,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_soc_drv != NULL)) {
ret = psp_bootloader_load_soc_drv(psp);
if (ret) {
- DRM_ERROR("PSP load soc drv failed!\n");
+ dev_err(adev->dev, "PSP load soc drv failed!\n");
return ret;
}
}
@@ -2186,7 +2219,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_intf_drv != NULL)) {
ret = psp_bootloader_load_intf_drv(psp);
if (ret) {
- DRM_ERROR("PSP load intf drv failed!\n");
+ dev_err(adev->dev, "PSP load intf drv failed!\n");
return ret;
}
}
@@ -2195,7 +2228,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_dbg_drv != NULL)) {
ret = psp_bootloader_load_dbg_drv(psp);
if (ret) {
- DRM_ERROR("PSP load dbg drv failed!\n");
+ dev_err(adev->dev, "PSP load dbg drv failed!\n");
return ret;
}
}
@@ -2204,7 +2237,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_ras_drv != NULL)) {
ret = psp_bootloader_load_ras_drv(psp);
if (ret) {
- DRM_ERROR("PSP load ras_drv failed!\n");
+ dev_err(adev->dev, "PSP load ras_drv failed!\n");
return ret;
}
}
@@ -2213,7 +2246,7 @@ static int psp_hw_start(struct psp_context *psp)
(psp->funcs->bootloader_load_sos != NULL)) {
ret = psp_bootloader_load_sos(psp);
if (ret) {
- DRM_ERROR("PSP load sos failed!\n");
+ dev_err(adev->dev, "PSP load sos failed!\n");
return ret;
}
}
@@ -2221,17 +2254,17 @@ static int psp_hw_start(struct psp_context *psp)
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
if (ret) {
- DRM_ERROR("PSP create ring failed!\n");
+ dev_err(adev->dev, "PSP create ring failed!\n");
return ret;
}
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo;
- if (!psp_boottime_tmr(psp)) {
+ if (!psp->boot_time_tmr || psp->autoload_supported) {
ret = psp_tmr_init(psp);
if (ret) {
- DRM_ERROR("PSP tmr init failed!\n");
+ dev_err(adev->dev, "PSP tmr init failed!\n");
return ret;
}
}
@@ -2248,10 +2281,12 @@ skip_pin_bo:
return ret;
}
- ret = psp_tmr_load(psp);
- if (ret) {
- DRM_ERROR("PSP load tmr failed!\n");
- return ret;
+ if (!psp->boot_time_tmr || !psp->autoload_supported) {
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load tmr failed!\n");
+ return ret;
+ }
}
return 0;
@@ -2462,6 +2497,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
case AMDGPU_UCODE_ID_P2S_TABLE:
*type = GFX_FW_TYPE_P2S_TABLE;
break;
+ case AMDGPU_UCODE_ID_JPEG_RAM:
+ *type = GFX_FW_TYPE_JPEG_RAM;
+ break;
case AMDGPU_UCODE_ID_MAXIMUM:
default:
return -EINVAL;
@@ -2518,7 +2556,8 @@ static void psp_print_fw_hdr(struct psp_context *psp,
}
}
-static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
+static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd)
{
int ret;
@@ -2531,7 +2570,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
if (ret)
- DRM_ERROR("Unknown firmware type\n");
+ dev_err(psp->adev->dev, "Unknown firmware type\n");
return ret;
}
@@ -2542,7 +2581,7 @@ int psp_execute_ip_fw_load(struct psp_context *psp,
int ret = 0;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
- ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd);
+ ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
if (!ret) {
ret = psp_cmd_submit_buf(psp, ucode, cmd,
psp->fence_buf_mc_addr);
@@ -2601,13 +2640,13 @@ static int psp_load_smu_fw(struct psp_context *psp)
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
if (ret)
- DRM_WARN("Failed to set MP1 state prepare for reload\n");
+ dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
}
ret = psp_execute_ip_fw_load(psp, ucode);
if (ret)
- DRM_ERROR("PSP load smu failed!\n");
+ dev_err(adev->dev, "PSP load smu failed!\n");
return ret;
}
@@ -2712,7 +2751,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
ret = psp_rlc_autoload_start(psp);
if (ret) {
- DRM_ERROR("Failed to start rlc autoload\n");
+ dev_err(adev->dev, "Failed to start rlc autoload\n");
return ret;
}
}
@@ -2734,7 +2773,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
if (ret) {
- DRM_ERROR("PSP ring init failed!\n");
+ dev_err(adev->dev, "PSP ring init failed!\n");
goto failed;
}
}
@@ -2749,13 +2788,13 @@ static int psp_load_fw(struct amdgpu_device *adev)
ret = psp_asd_initialize(psp);
if (ret) {
- DRM_ERROR("PSP load asd failed!\n");
+ dev_err(adev->dev, "PSP load asd failed!\n");
goto failed1;
}
ret = psp_rl_load(adev);
if (ret) {
- DRM_ERROR("PSP load RL failed!\n");
+ dev_err(adev->dev, "PSP load RL failed!\n");
goto failed1;
}
@@ -2775,7 +2814,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
ret = psp_ras_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
- "RAS: Failed to initialize RAS\n");
+ "RAS: Failed to initialize RAS\n");
ret = psp_hdcp_initialize(psp);
if (ret)
@@ -2828,7 +2867,7 @@ static int psp_hw_init(void *handle)
ret = psp_load_fw(adev);
if (ret) {
- DRM_ERROR("PSP firmware loading failed\n");
+ dev_err(adev->dev, "PSP firmware loading failed\n");
goto failed;
}
@@ -2875,7 +2914,7 @@ static int psp_suspend(void *handle)
psp->xgmi_context.context.initialized) {
ret = psp_xgmi_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate xgmi ta\n");
+ dev_err(adev->dev, "Failed to terminate xgmi ta\n");
goto out;
}
}
@@ -2883,46 +2922,46 @@ static int psp_suspend(void *handle)
if (psp->ta_fw) {
ret = psp_ras_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate ras ta\n");
+ dev_err(adev->dev, "Failed to terminate ras ta\n");
goto out;
}
ret = psp_hdcp_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate hdcp ta\n");
+ dev_err(adev->dev, "Failed to terminate hdcp ta\n");
goto out;
}
ret = psp_dtm_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate dtm ta\n");
+ dev_err(adev->dev, "Failed to terminate dtm ta\n");
goto out;
}
ret = psp_rap_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate rap ta\n");
+ dev_err(adev->dev, "Failed to terminate rap ta\n");
goto out;
}
ret = psp_securedisplay_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate securedisplay ta\n");
+ dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
goto out;
}
}
ret = psp_asd_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate asd\n");
+ dev_err(adev->dev, "Failed to terminate asd\n");
goto out;
}
ret = psp_tmr_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate tmr\n");
+ dev_err(adev->dev, "Failed to terminate tmr\n");
goto out;
}
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret)
- DRM_ERROR("PSP ring stop failed\n");
+ dev_err(adev->dev, "PSP ring stop failed\n");
out:
return ret;
@@ -2934,12 +2973,12 @@ static int psp_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- DRM_INFO("PSP is resuming...\n");
+ dev_info(adev->dev, "PSP is resuming...\n");
if (psp->mem_train_ctx.enable_mem_training) {
ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
if (ret) {
- DRM_ERROR("Failed to process memory training!\n");
+ dev_err(adev->dev, "Failed to process memory training!\n");
return ret;
}
}
@@ -2956,7 +2995,7 @@ static int psp_resume(void *handle)
ret = psp_asd_initialize(psp);
if (ret) {
- DRM_ERROR("PSP load asd failed!\n");
+ dev_err(adev->dev, "PSP load asd failed!\n");
goto failed;
}
@@ -2980,7 +3019,7 @@ static int psp_resume(void *handle)
ret = psp_ras_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
- "RAS: Failed to initialize RAS\n");
+ "RAS: Failed to initialize RAS\n");
ret = psp_hdcp_initialize(psp);
if (ret)
@@ -3008,7 +3047,7 @@ static int psp_resume(void *handle)
return 0;
failed:
- DRM_ERROR("PSP resume failed\n");
+ dev_err(adev->dev, "PSP resume failed\n");
mutex_unlock(&adev->firmware.mutex);
return ret;
}
@@ -3069,9 +3108,11 @@ int psp_ring_cmd_submit(struct psp_context *psp,
write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
/* Check invalid write_frame ptr address */
if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
- DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
- ring_buffer_start, ring_buffer_end, write_frame);
- DRM_ERROR("write_frame is pointing to address out of bounds\n");
+ dev_err(adev->dev,
+ "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
+ ring_buffer_start, ring_buffer_end, write_frame);
+ dev_err(adev->dev,
+ "write_frame is pointing to address out of bounds\n");
return -EINVAL;
}
@@ -3597,7 +3638,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
int ret;
if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
- DRM_INFO("PSP block is not ready yet.");
+ dev_info(adev->dev, "PSP block is not ready yet\n.");
return -EBUSY;
}
@@ -3606,7 +3647,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
mutex_unlock(&adev->psp.mutex);
if (ret) {
- DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
+ dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
return ret;
}
@@ -3628,7 +3669,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
void *fw_pri_cpu_addr;
if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
- DRM_INFO("PSP block is not ready yet.");
+ dev_err(adev->dev, "PSP block is not ready yet.");
return -EBUSY;
}
@@ -3661,7 +3702,7 @@ rel_buf:
release_firmware(usbc_pd_fw);
fail:
if (ret) {
- DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
+ dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
count = ret;
}
@@ -3708,7 +3749,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
/* Safeguard against memory drain */
if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
- dev_err(adev->dev, "File size cannot exceed %u", AMD_VBIOS_FILE_MAX_SIZE_B);
+ dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
kvfree(adev->psp.vbflash_tmp_buf);
adev->psp.vbflash_tmp_buf = NULL;
adev->psp.vbflash_image_size = 0;
@@ -3727,7 +3768,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
adev->psp.vbflash_image_size += count;
mutex_unlock(&adev->psp.mutex);
- dev_dbg(adev->dev, "IFWI staged for update");
+ dev_dbg(adev->dev, "IFWI staged for update\n");
return count;
}
@@ -3747,7 +3788,7 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
if (adev->psp.vbflash_image_size == 0)
return -EINVAL;
- dev_dbg(adev->dev, "PSP IFWI flash process initiated");
+ dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
AMDGPU_GPU_PAGE_SIZE,
@@ -3772,11 +3813,11 @@ rel_buf:
adev->psp.vbflash_image_size = 0;
if (ret) {
- dev_err(adev->dev, "Failed to load IFWI, err = %d", ret);
+ dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
return ret;
}
- dev_dbg(adev->dev, "PSP IFWI flash process done");
+ dev_dbg(adev->dev, "PSP IFWI flash process done\n");
return 0;
}
@@ -3930,3 +3971,11 @@ const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
.rev = 4,
.funcs = &psp_ip_funcs,
};
+
+const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_PSP,
+ .major = 14,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &psp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index c4d9cbde55b9..ee16f134ae92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -134,7 +134,7 @@ struct psp_funcs {
int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*vbflash_stat)(struct psp_context *psp);
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
- int (*query_boot_status)(struct psp_context *psp);
+ bool (*get_ras_capability)(struct psp_context *psp);
};
struct ta_funcs {
@@ -203,7 +203,7 @@ struct psp_ras_context {
#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
#define GDDR6_MEM_TRAINING_OFFSET 0x8000
/*Define the VRAM size that will be encroached by BIST training.*/
-#define GDDR6_MEM_TRAINING_ENCROACHED_SIZE 0x2000000
+#define BIST_MEM_TRAINING_ENCROACHED_SIZE 0x2000000
enum psp_memory_training_init_flag {
PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
@@ -364,6 +364,8 @@ struct psp_context {
atomic_t fence_value;
/* flag to mark whether gfx fw autoload is supported or not */
bool autoload_supported;
+ /* flag to mark whether psp use runtime TMR or boottime TMR */
+ bool boot_time_tmr;
/* flag to mark whether df cstate management centralized to PMFW */
bool pmfw_centralized_cstate_management;
@@ -463,6 +465,7 @@ extern const struct amdgpu_ip_block_version psp_v11_0_8_ip_block;
extern const struct amdgpu_ip_block_version psp_v12_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block;
+extern const struct amdgpu_ip_block_version psp_v14_0_ip_block;
extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
uint32_t field_val, uint32_t mask, bool check_changed);
@@ -502,6 +505,9 @@ int psp_ras_enable_features(struct psp_context *psp,
int psp_ras_trigger_error(struct psp_context *psp,
struct ta_ras_trigger_error_input *info, uint32_t instance_mask);
int psp_ras_terminate(struct psp_context *psp);
+int psp_ras_query_address(struct psp_context *psp,
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out);
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
@@ -538,7 +544,5 @@ int psp_spatial_partition(struct psp_context *psp, int mode);
int is_psp_fw_valid(struct psp_bin_desc bin);
int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
-
-int amdgpu_psp_query_boot_status(struct amdgpu_device *adev);
-
+bool amdgpu_psp_get_ras_capability(struct psp_context *psp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 31823a30dea2..8ebab6f22e5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -39,6 +39,7 @@
#include "nbio_v7_9.h"
#include "atom.h"
#include "amdgpu_reset.h"
+#include "amdgpu_psp.h"
#ifdef CONFIG_X86_MCE_AMD
#include <asm/mce.h>
@@ -73,6 +74,8 @@ const char *ras_block_string[] = {
"mca",
"vcn",
"jpeg",
+ "ih",
+ "mpio",
};
const char *ras_mca_block_string[] = {
@@ -94,7 +97,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
if (!ras_block)
return "NULL";
- if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
+ if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
+ ras_block->block >= ARRAY_SIZE(ras_block_string))
return "OUT OF RANGE";
if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
@@ -116,6 +120,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
+#define MAX_UMC_POISON_POLLING_TIME_ASYNC 100 //ms
+
enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -628,8 +634,12 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
}
- return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
- "ce", info.ce_count);
+ if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
+ return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+ "ce", info.ce_count, "de", info.de_count);
+ else
+ return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+ "ce", info.ce_count);
}
/* obj begin */
@@ -1036,7 +1046,8 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
struct ras_manager *ras_mgr,
struct ras_err_data *err_data,
const char *blk_name,
- bool is_ue)
+ bool is_ue,
+ bool is_de)
{
struct amdgpu_smuio_mcm_config_info *mcm_info;
struct ras_err_node *err_node;
@@ -1065,25 +1076,50 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
}
} else {
- for_each_ras_error(err_node, err_data) {
- err_info = &err_node->err_info;
- mcm_info = &err_info->mcm_info;
- if (err_info->ce_count) {
+ if (is_de) {
+ for_each_ras_error(err_node, err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
+ if (err_info->de_count) {
+ dev_info(adev->dev, "socket: %d, die: %d, "
+ "%lld new deferred hardware errors detected in %s block\n",
+ mcm_info->socket_id,
+ mcm_info->die_id,
+ err_info->de_count,
+ blk_name);
+ }
+ }
+
+ for_each_ras_error(err_node, &ras_mgr->err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld new correctable hardware errors detected in %s block\n",
- mcm_info->socket_id,
- mcm_info->die_id,
- err_info->ce_count,
- blk_name);
+ "%lld deferred hardware errors detected in total in %s block\n",
+ mcm_info->socket_id, mcm_info->die_id,
+ err_info->de_count, blk_name);
+ }
+ } else {
+ for_each_ras_error(err_node, err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
+ if (err_info->ce_count) {
+ dev_info(adev->dev, "socket: %d, die: %d, "
+ "%lld new correctable hardware errors detected in %s block\n",
+ mcm_info->socket_id,
+ mcm_info->die_id,
+ err_info->ce_count,
+ blk_name);
+ }
}
- }
- for_each_ras_error(err_node, &ras_mgr->err_data) {
- err_info = &err_node->err_info;
- mcm_info = &err_info->mcm_info;
- dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld correctable hardware errors detected in total in %s block\n",
- mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name);
+ for_each_ras_error(err_node, &ras_mgr->err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
+ dev_info(adev->dev, "socket: %d, die: %d, "
+ "%lld correctable hardware errors detected in total in %s block\n",
+ mcm_info->socket_id, mcm_info->die_id,
+ err_info->ce_count, blk_name);
+ }
}
}
}
@@ -1102,7 +1138,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
if (err_data->ce_count) {
if (err_data_has_source_info(err_data)) {
- amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, false);
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+ blk_name, false, false);
} else if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
@@ -1124,7 +1161,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
if (err_data->ue_count) {
if (err_data_has_source_info(err_data)) {
- amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, true);
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+ blk_name, true, false);
} else if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
@@ -1144,6 +1182,28 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
}
}
+ if (err_data->de_count) {
+ if (err_data_has_source_info(err_data)) {
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+ blk_name, false, true);
+ } else if (!adev->aid_mask &&
+ adev->smuio.funcs &&
+ adev->smuio.funcs->get_socket_id &&
+ adev->smuio.funcs->get_die_id) {
+ dev_info(adev->dev, "socket: %d, die: %d "
+ "%ld deferred hardware errors "
+ "detected in %s block\n",
+ adev->smuio.funcs->get_socket_id(adev),
+ adev->smuio.funcs->get_die_id(adev),
+ ras_mgr->err_data.de_count,
+ blk_name);
+ } else {
+ dev_info(adev->dev, "%ld deferred hardware errors "
+ "detected in %s block\n",
+ ras_mgr->err_data.de_count,
+ blk_name);
+ }
+ }
}
static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
@@ -1154,7 +1214,8 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
if (err_data_has_source_info(err_data)) {
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
-
+ amdgpu_ras_error_statistic_de_count(&obj->err_data,
+ &err_info->mcm_info, NULL, err_info->de_count);
amdgpu_ras_error_statistic_ce_count(&obj->err_data,
&err_info->mcm_info, NULL, err_info->ce_count);
amdgpu_ras_error_statistic_ue_count(&obj->err_data,
@@ -1164,9 +1225,72 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
/* for legacy asic path which doesn't has error source info */
obj->err_data.ue_count += err_data->ue_count;
obj->err_data.ce_count += err_data->ce_count;
+ obj->err_data.de_count += err_data->de_count;
}
}
+static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
+{
+ struct ras_common_if head;
+
+ memset(&head, 0, sizeof(head));
+ head.block = blk;
+
+ return amdgpu_ras_find_obj(adev, &head);
+}
+
+int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+ const struct aca_info *aca_info, void *data)
+{
+ struct ras_manager *obj;
+
+ obj = get_ras_manager(adev, blk);
+ if (!obj)
+ return -EINVAL;
+
+ return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
+}
+
+int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
+{
+ struct ras_manager *obj;
+
+ obj = get_ras_manager(adev, blk);
+ if (!obj)
+ return -EINVAL;
+
+ amdgpu_aca_remove_handle(&obj->aca_handle);
+
+ return 0;
+}
+
+static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+ enum aca_error_type type, struct ras_err_data *err_data)
+{
+ struct ras_manager *obj;
+
+ obj = get_ras_manager(adev, blk);
+ if (!obj)
+ return -EINVAL;
+
+ return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data);
+}
+
+ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
+ struct aca_handle *handle, char *buf, void *data)
+{
+ struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ if (amdgpu_ras_query_error_status(obj->adev, &info))
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+ "ce", info.ce_count);
+}
+
static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
struct ras_query_if *info,
struct ras_err_data *err_data,
@@ -1174,6 +1298,7 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
{
enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
struct amdgpu_ras_block_object *block_obj = NULL;
+ int ret;
if (blk == AMDGPU_RAS_BLOCK_COUNT)
return -EINVAL;
@@ -1203,9 +1328,19 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
}
}
} else {
- /* FIXME: add code to check return value later */
- amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
- amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
+ if (amdgpu_aca_is_enabled(adev)) {
+ ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data);
+ if (ret)
+ return ret;
+ } else {
+ /* FIXME: add code to check return value later */
+ amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
+ amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
+ }
}
return 0;
@@ -1239,6 +1374,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i
info->ue_count = obj->err_data.ue_count;
info->ce_count = obj->err_data.ce_count;
+ info->de_count = obj->err_data.de_count;
amdgpu_ras_error_generate_report(adev, info, &err_data);
@@ -1254,6 +1390,7 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+ const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
struct amdgpu_hive_info *hive;
int hive_ras_recovery = 0;
@@ -1264,7 +1401,7 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
}
if (!amdgpu_ras_is_supported(adev, block) ||
- !amdgpu_ras_get_mca_debug_mode(adev))
+ !amdgpu_ras_get_aca_debug_mode(adev))
return -EOPNOTSUPP;
hive = amdgpu_get_xgmi_hive(adev);
@@ -1276,7 +1413,8 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
/* skip ras error reset in gpu reset */
if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) ||
hive_ras_recovery) &&
- mca_funcs && mca_funcs->mca_set_debug_mode)
+ ((smu_funcs && smu_funcs->set_debug_mode) ||
+ (mca_funcs && mca_funcs->mca_set_debug_mode)))
return -EOPNOTSUPP;
if (block_obj->hw_ops->reset_ras_error_count)
@@ -1772,7 +1910,10 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
}
}
- amdgpu_mca_smu_debugfs_init(adev, dir);
+ if (amdgpu_aca_is_enabled(adev))
+ amdgpu_aca_smu_debugfs_init(adev, dir);
+ else
+ amdgpu_mca_smu_debugfs_init(adev, dir);
}
/* debugfs end */
@@ -1900,7 +2041,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
}
}
- amdgpu_umc_poison_handler(adev, false);
+ amdgpu_umc_poison_handler(adev, obj->head.block, false);
if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
@@ -1951,6 +2092,7 @@ static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
*/
obj->err_data.ue_count += err_data.ue_count;
obj->err_data.ce_count += err_data.ce_count;
+ obj->err_data.de_count += err_data.de_count;
}
amdgpu_ras_error_data_fini(&err_data);
@@ -2297,6 +2439,18 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ /* For any RAS error that needs a full reset to
+ * recover, set the fatal error status
+ */
+ if (hive) {
+ list_for_each_entry(remote_adev,
+ &hive->device_list,
+ gmc.xgmi.head)
+ amdgpu_ras_set_fed(remote_adev,
+ true);
+ } else {
+ amdgpu_ras_set_fed(adev, true);
+ }
psp_fatal_error_recovery_quirk(&adev->psp);
}
}
@@ -2520,6 +2674,32 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
}
}
+static int amdgpu_ras_page_retirement_thread(void *param)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)param;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ while (!kthread_should_stop()) {
+
+ wait_event_interruptible(con->page_retirement_wq,
+ kthread_should_stop() ||
+ atomic_read(&con->page_retirement_req_cnt));
+
+ if (kthread_should_stop())
+ break;
+
+ dev_info(adev->dev, "Start processing page retirement. request:%d\n",
+ atomic_read(&con->page_retirement_req_cnt));
+
+ atomic_dec(&con->page_retirement_req_cnt);
+
+ amdgpu_umc_bad_page_polling_timeout(adev,
+ false, MAX_UMC_POISON_POLLING_TIME_ASYNC);
+ }
+
+ return 0;
+}
+
int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -2583,6 +2763,16 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
}
}
+ mutex_init(&con->page_retirement_lock);
+ init_waitqueue_head(&con->page_retirement_wq);
+ atomic_set(&con->page_retirement_req_cnt, 0);
+ con->page_retirement_thread =
+ kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
+ if (IS_ERR(con->page_retirement_thread)) {
+ con->page_retirement_thread = NULL;
+ dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
+ }
+
#ifdef CONFIG_X86_MCE_AMD
if ((adev->asic_type == CHIP_ALDEBARAN) &&
(adev->gmc.xgmi.connected_to_cpu))
@@ -2618,6 +2808,11 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
if (!data)
return 0;
+ if (con->page_retirement_thread)
+ kthread_stop(con->page_retirement_thread);
+
+ atomic_set(&con->page_retirement_req_cnt, 0);
+
cancel_work_sync(&con->recovery_work);
mutex_lock(&con->recovery_lock);
@@ -2679,6 +2874,87 @@ static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
}
+/* Query ras capablity via atomfirmware interface */
+static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
+{
+ /* mem_ecc cap */
+ if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+ dev_info(adev->dev, "MEM ECC is active.\n");
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else {
+ dev_info(adev->dev, "MEM ECC is not presented.\n");
+ }
+
+ /* sram_ecc cap */
+ if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+ dev_info(adev->dev, "SRAM ECC is active.\n");
+ if (!amdgpu_sriov_vf(adev))
+ adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ else
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
+ 1 << AMDGPU_RAS_BLOCK__SDMA |
+ 1 << AMDGPU_RAS_BLOCK__GFX);
+
+ /*
+ * VCN/JPEG RAS can be supported on both bare metal and
+ * SRIOV environment
+ */
+ if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+ else
+ adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+
+ /*
+ * XGMI RAS is not supported if xgmi num physical nodes
+ * is zero
+ */
+ if (!adev->gmc.xgmi.num_physical_nodes)
+ adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
+ } else {
+ dev_info(adev->dev, "SRAM ECC is not presented.\n");
+ }
+}
+
+/* Query poison mode from umc/df IP callbacks */
+static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ bool df_poison, umc_poison;
+
+ /* poison setting is useless on SRIOV guest */
+ if (amdgpu_sriov_vf(adev) || !con)
+ return;
+
+ /* Init poison supported flag, the default value is false */
+ if (adev->gmc.xgmi.connected_to_cpu ||
+ adev->gmc.is_app_apu) {
+ /* enabled by default when GPU is connected to CPU */
+ con->poison_supported = true;
+ } else if (adev->df.funcs &&
+ adev->df.funcs->query_ras_poison_mode &&
+ adev->umc.ras &&
+ adev->umc.ras->query_ras_poison_mode) {
+ df_poison =
+ adev->df.funcs->query_ras_poison_mode(adev);
+ umc_poison =
+ adev->umc.ras->query_ras_poison_mode(adev);
+
+ /* Only poison is set in both DF and UMC, we can support it */
+ if (df_poison && umc_poison)
+ con->poison_supported = true;
+ else if (df_poison != umc_poison)
+ dev_warn(adev->dev,
+ "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
+ df_poison, umc_poison);
+ }
+}
+
/*
* check hardware's ras ability which will be saved in hw_supported.
* if hardware does not support ras, we can skip some ras initializtion and
@@ -2695,49 +2971,13 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
if (!amdgpu_ras_asic_supported(adev))
return;
- if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
- if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
- dev_info(adev->dev, "MEM ECC is active.\n");
- adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
- 1 << AMDGPU_RAS_BLOCK__DF);
- } else {
- dev_info(adev->dev, "MEM ECC is not presented.\n");
- }
+ /* query ras capability from psp */
+ if (amdgpu_psp_get_ras_capability(&adev->psp))
+ goto init_ras_enabled_flag;
- if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
- dev_info(adev->dev, "SRAM ECC is active.\n");
- if (!amdgpu_sriov_vf(adev))
- adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
- 1 << AMDGPU_RAS_BLOCK__DF);
- else
- adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
- 1 << AMDGPU_RAS_BLOCK__SDMA |
- 1 << AMDGPU_RAS_BLOCK__GFX);
-
- /* VCN/JPEG RAS can be supported on both bare metal and
- * SRIOV environment
- */
- if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
- IP_VERSION(2, 6, 0) ||
- amdgpu_ip_version(adev, VCN_HWIP, 0) ==
- IP_VERSION(4, 0, 0) ||
- amdgpu_ip_version(adev, VCN_HWIP, 0) ==
- IP_VERSION(4, 0, 3))
- adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
- else
- adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
-
- /*
- * XGMI RAS is not supported if xgmi num physical nodes
- * is zero
- */
- if (!adev->gmc.xgmi.num_physical_nodes)
- adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
- } else {
- dev_info(adev->dev, "SRAM ECC is not presented.\n");
- }
+ /* query ras capablity from bios */
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
+ amdgpu_ras_query_ras_capablity_from_vbios(adev);
} else {
/* driver only manages a few IP blocks RAS feature
* when GPU is connected cpu through XGMI */
@@ -2746,13 +2986,21 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
1 << AMDGPU_RAS_BLOCK__MMHUB);
}
+ /* apply asic specific settings (vega20 only for now) */
amdgpu_ras_get_quirks(adev);
+ /* query poison mode from umc/df ip callback */
+ amdgpu_ras_query_poison_mode(adev);
+
+init_ras_enabled_flag:
/* hw_supported needs to be aligned with RAS block mask. */
adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
adev->ras_hw_enabled & amdgpu_ras_mask;
+
+ /* aca is disabled by default */
+ adev->aca.is_enabled = false;
}
static void amdgpu_ras_counte_dw(struct work_struct *work)
@@ -2780,39 +3028,6 @@ Out:
pm_runtime_put_autosuspend(dev->dev);
}
-static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
-{
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- bool df_poison, umc_poison;
-
- /* poison setting is useless on SRIOV guest */
- if (amdgpu_sriov_vf(adev) || !con)
- return;
-
- /* Init poison supported flag, the default value is false */
- if (adev->gmc.xgmi.connected_to_cpu ||
- adev->gmc.is_app_apu) {
- /* enabled by default when GPU is connected to CPU */
- con->poison_supported = true;
- } else if (adev->df.funcs &&
- adev->df.funcs->query_ras_poison_mode &&
- adev->umc.ras &&
- adev->umc.ras->query_ras_poison_mode) {
- df_poison =
- adev->df.funcs->query_ras_poison_mode(adev);
- umc_poison =
- adev->umc.ras->query_ras_poison_mode(adev);
-
- /* Only poison is set in both DF and UMC, we can support it */
- if (df_poison && umc_poison)
- con->poison_supported = true;
- else if (df_poison != umc_poison)
- dev_warn(adev->dev,
- "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
- df_poison, umc_poison);
- }
-}
-
static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
{
return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
@@ -2917,12 +3132,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
- amdgpu_ras_query_poison_mode(adev);
-
/* Packed socket_id to ras feature mask bits[31:29] */
if (adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id)
- con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 29);
+ con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
+ AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
/* Get RAS schema for particular SOC */
con->schema = amdgpu_get_ras_schema(adev);
@@ -3128,7 +3342,7 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev)
amdgpu_ras_disable_all_features(adev, 0);
/* Make sure all ras objects are disabled. */
- if (con->features)
+ if (AMDGPU_RAS_GET_FEATURES(con->features))
amdgpu_ras_disable_all_features(adev, 1);
}
@@ -3142,15 +3356,29 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
- amdgpu_ras_set_mca_debug_mode(adev, false);
+ if (amdgpu_aca_is_enabled(adev)) {
+ if (amdgpu_in_reset(adev))
+ r = amdgpu_aca_reset(adev);
+ else
+ r = amdgpu_aca_init(adev);
+ if (r)
+ return r;
+
+ amdgpu_ras_set_aca_debug_mode(adev, false);
+ } else {
+ amdgpu_ras_set_mca_debug_mode(adev, false);
+ }
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
- if (!node->ras_obj) {
+ obj = node->ras_obj;
+ if (!obj) {
dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
continue;
}
- obj = node->ras_obj;
+ if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
+ continue;
+
if (obj->ras_late_init) {
r = obj->ras_late_init(adev, &obj->ras_comm);
if (r) {
@@ -3175,7 +3403,7 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
/* Need disable ras on all IPs here before ip [hw/sw]fini */
- if (con->features)
+ if (AMDGPU_RAS_GET_FEATURES(con->features))
amdgpu_ras_disable_all_features(adev, 0);
amdgpu_ras_recovery_fini(adev);
return 0;
@@ -3208,10 +3436,13 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
amdgpu_ras_fs_fini(adev);
amdgpu_ras_interrupt_remove_all(adev);
- WARN(con->features, "Feature mask is not cleared");
+ if (amdgpu_aca_is_enabled(adev))
+ amdgpu_aca_fini(adev);
- if (con->features)
- amdgpu_ras_disable_all_features(adev, 1);
+ WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
+
+ if (AMDGPU_RAS_GET_FEATURES(con->features))
+ amdgpu_ras_disable_all_features(adev, 0);
cancel_delayed_work_sync(&con->ras_counte_delay_work);
@@ -3221,6 +3452,26 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
return 0;
}
+bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (!ras)
+ return false;
+
+ return atomic_read(&ras->fed);
+}
+
+void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (ras)
+ atomic_set(&ras->fed, !!status);
+}
+
void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
{
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
@@ -3401,6 +3652,7 @@ int amdgpu_ras_is_supported(struct amdgpu_device *adev,
block == AMDGPU_RAS_BLOCK__SDMA ||
block == AMDGPU_RAS_BLOCK__VCN ||
block == AMDGPU_RAS_BLOCK__JPEG) &&
+ (amdgpu_ras_mask & (1 << block)) &&
amdgpu_ras_is_poison_mode_supported(adev) &&
amdgpu_ras_get_ras_block(adev, block, 0))
ret = 1;
@@ -3425,22 +3677,41 @@ int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
if (con) {
ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
if (!ret)
- con->is_mca_debug_mode = enable;
+ con->is_aca_debug_mode = enable;
}
return ret;
}
-bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev)
+int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret = 0;
+
+ if (con) {
+ if (amdgpu_aca_is_enabled(adev))
+ ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
+ else
+ ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
+ if (!ret)
+ con->is_aca_debug_mode = enable;
+ }
+
+ return ret;
+}
+
+bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
if (!con)
return false;
- if (mca_funcs && mca_funcs->mca_set_debug_mode)
- return con->is_mca_debug_mode;
+ if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
+ (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
+ return con->is_aca_debug_mode;
else
return true;
}
@@ -3450,15 +3721,16 @@ bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+ const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
if (!con) {
*error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
return false;
}
- if (mca_funcs && mca_funcs->mca_set_debug_mode)
+ if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode))
*error_query_mode =
- (con->is_mca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
+ (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
else
*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
@@ -3699,8 +3971,7 @@ static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct
}
static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr)
+ struct amdgpu_smuio_mcm_config_info *mcm_info)
{
struct ras_err_node *err_node;
@@ -3712,10 +3983,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
if (!err_node)
return NULL;
- memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
+ INIT_LIST_HEAD(&err_node->err_info.err_addr_list);
- if (err_addr)
- memcpy(&err_node->err_info.err_addr, err_addr, sizeof(*err_addr));
+ memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
err_data->err_list_count++;
list_add_tail(&err_node->node, &err_data->err_node_list);
@@ -3724,6 +3994,29 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
return &err_node->err_info;
}
+void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr)
+{
+ struct ras_err_addr *mca_err_addr;
+
+ mca_err_addr = kzalloc(sizeof(*mca_err_addr), GFP_KERNEL);
+ if (!mca_err_addr)
+ return;
+
+ INIT_LIST_HEAD(&mca_err_addr->node);
+
+ mca_err_addr->err_status = err_addr->err_status;
+ mca_err_addr->err_ipid = err_addr->err_ipid;
+ mca_err_addr->err_addr = err_addr->err_addr;
+
+ list_add_tail(&mca_err_addr->node, &err_info->err_addr_list);
+}
+
+void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr)
+{
+ list_del(&mca_err_addr->node);
+ kfree(mca_err_addr);
+}
+
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
struct amdgpu_smuio_mcm_config_info *mcm_info,
struct ras_err_addr *err_addr, u64 count)
@@ -3736,10 +4029,13 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
if (!count)
return 0;
- err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
+ err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
if (!err_info)
return -EINVAL;
+ if (err_addr && err_addr->err_status)
+ amdgpu_ras_add_mca_err_addr(err_info, err_addr);
+
err_info->ue_count += count;
err_data->ue_count += count;
@@ -3758,7 +4054,7 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
if (!count)
return 0;
- err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
+ err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
if (!err_info)
return -EINVAL;
@@ -3767,3 +4063,135 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
return 0;
}
+
+int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr, u64 count)
+{
+ struct ras_err_info *err_info;
+
+ if (!err_data || !mcm_info)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
+ if (!err_info)
+ return -EINVAL;
+
+ if (err_addr && err_addr->err_status)
+ amdgpu_ras_add_mca_err_addr(err_info, err_addr);
+
+ err_info->de_count += count;
+ err_data->de_count += count;
+
+ return 0;
+}
+
+#define mmMP0_SMN_C2PMSG_92 0x1609C
+#define mmMP0_SMN_C2PMSG_126 0x160BE
+static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
+ u32 instance, u32 boot_error)
+{
+ u32 socket_id, aid_id, hbm_id;
+ u32 reg_data;
+ u64 reg_addr;
+
+ socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
+ aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
+ hbm_id = AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error);
+
+ /* The pattern for smn addressing in other SOC could be different from
+ * the one for aqua_vanjaram. We should revisit the code if the pattern
+ * is changed. In such case, replace the aqua_vanjaram implementation
+ * with more common helper */
+ reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
+ aqua_vanjaram_encode_ext_smn_addressing(instance);
+
+ reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
+ dev_err(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n",
+ socket_id, aid_id, reg_data);
+
+ if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n",
+ socket_id, aid_id, hbm_id);
+
+ if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n",
+ socket_id, aid_id);
+
+ if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n",
+ socket_id, aid_id);
+
+ if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n",
+ socket_id, aid_id);
+
+ if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n",
+ socket_id, aid_id);
+
+ if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n",
+ socket_id, aid_id);
+
+ if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n",
+ socket_id, aid_id, hbm_id);
+
+ if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
+ dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n",
+ socket_id, aid_id, hbm_id);
+}
+
+static int amdgpu_ras_wait_for_boot_complete(struct amdgpu_device *adev,
+ u32 instance, u32 *boot_error)
+{
+ u32 reg_addr;
+ u32 reg_data;
+ int retry_loop;
+
+ reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
+ aqua_vanjaram_encode_ext_smn_addressing(instance);
+
+ for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
+ reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
+ if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS) {
+ *boot_error = AMDGPU_RAS_BOOT_SUCEESS;
+ return 0;
+ }
+ msleep(1);
+ }
+
+ /* The pattern for smn addressing in other SOC could be different from
+ * the one for aqua_vanjaram. We should revisit the code if the pattern
+ * is changed. In such case, replace the aqua_vanjaram implementation
+ * with more common helper */
+ reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
+ aqua_vanjaram_encode_ext_smn_addressing(instance);
+
+ for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
+ reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
+ if (AMDGPU_RAS_GPU_ERR_BOOT_STATUS(reg_data)) {
+ *boot_error = reg_data;
+ return 0;
+ }
+ msleep(1);
+ }
+
+ *boot_error = reg_data;
+ return -ETIME;
+}
+
+void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
+{
+ u32 boot_error = 0;
+ u32 i;
+
+ for (i = 0; i < num_instances; i++) {
+ if (amdgpu_ras_wait_for_boot_complete(adev, i, &boot_error))
+ amdgpu_ras_boot_time_error_reporting(adev, i, boot_error);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 76fb85628716..e0f8ce9d8440 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -29,9 +29,28 @@
#include "ta_ras_if.h"
#include "amdgpu_ras_eeprom.h"
#include "amdgpu_smuio.h"
+#include "amdgpu_aca.h"
struct amdgpu_iv_entry;
+#define AMDGPU_RAS_GPU_ERR_MEM_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 0, 0)
+#define AMDGPU_RAS_GPU_ERR_FW_LOAD(x) AMDGPU_GET_REG_FIELD(x, 1, 1)
+#define AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 2, 2)
+#define AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 3, 3)
+#define AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 4, 4)
+#define AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 5, 5)
+#define AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(x) AMDGPU_GET_REG_FIELD(x, 6, 6)
+#define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x) AMDGPU_GET_REG_FIELD(x, 7, 7)
+#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
+#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
+#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 13, 13)
+#define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x) AMDGPU_GET_REG_FIELD(x, 31, 31)
+
+#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 1000
+#define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA
+#define AMDGPU_RAS_BOOT_STATUS_MASK 0xFF
+#define AMDGPU_RAS_BOOT_SUCEESS 0x80000000
+
#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0)
/* position of instance value in sub_block_index of
* ta_ras_trigger_error_input, the sub block uses lower 12 bits
@@ -39,6 +58,12 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_INST_MASK 0xfffff000
#define AMDGPU_RAS_INST_SHIFT 0xc
+#define AMDGPU_RAS_FEATURES_SOCKETID_SHIFT 29
+#define AMDGPU_RAS_FEATURES_SOCKETID_MASK 0xe0000000
+
+/* The high three bits indicates socketid */
+#define AMDGPU_RAS_GET_FEATURES(val) ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK)
+
enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0,
AMDGPU_RAS_BLOCK__SDMA,
@@ -57,6 +82,8 @@ enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__MCA,
AMDGPU_RAS_BLOCK__VCN,
AMDGPU_RAS_BLOCK__JPEG,
+ AMDGPU_RAS_BLOCK__IH,
+ AMDGPU_RAS_BLOCK__MPIO,
AMDGPU_RAS_BLOCK__LAST
};
@@ -441,10 +468,17 @@ struct amdgpu_ras {
/* Indicates smu whether need update bad channel info */
bool update_channel_flag;
/* Record status of smu mca debug mode */
- bool is_mca_debug_mode;
+ bool is_aca_debug_mode;
/* Record special requirements of gpu reset caller */
uint32_t gpu_reset_flags;
+
+ struct task_struct *page_retirement_thread;
+ wait_queue_head_t page_retirement_wq;
+ struct mutex page_retirement_lock;
+ atomic_t page_retirement_req_cnt;
+ /* Fatal error detected flag */
+ atomic_t fed;
};
struct ras_fs_data {
@@ -453,6 +487,7 @@ struct ras_fs_data {
};
struct ras_err_addr {
+ struct list_head node;
uint64_t err_status;
uint64_t err_ipid;
uint64_t err_addr;
@@ -462,7 +497,8 @@ struct ras_err_info {
struct amdgpu_smuio_mcm_config_info mcm_info;
u64 ce_count;
u64 ue_count;
- struct ras_err_addr err_addr;
+ u64 de_count;
+ struct list_head err_addr_list;
};
struct ras_err_node {
@@ -473,6 +509,7 @@ struct ras_err_node {
struct ras_err_data {
unsigned long ue_count;
unsigned long ce_count;
+ unsigned long de_count;
unsigned long err_addr_cnt;
struct eeprom_table_record *err_addr;
u32 err_list_count;
@@ -529,6 +566,8 @@ struct ras_manager {
struct ras_ih_data ih_data;
struct ras_err_data err_data;
+
+ struct aca_handle aca_handle;
};
struct ras_badpage {
@@ -548,6 +587,7 @@ struct ras_query_if {
struct ras_common_if head;
unsigned long ue_count;
unsigned long ce_count;
+ unsigned long de_count;
};
struct ras_inject_if {
@@ -781,7 +821,8 @@ struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev);
int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con);
int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable);
-bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev);
+int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable);
+bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev);
bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
unsigned int *mode);
@@ -818,5 +859,24 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
struct amdgpu_smuio_mcm_config_info *mcm_info,
struct ras_err_addr *err_addr, u64 count);
+int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr, u64 count);
+void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances);
+int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+ const struct aca_info *aca_info, void *data);
+int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk);
+
+ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
+ struct aca_handle *handle, char *buf, void *data);
+
+void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info,
+ struct ras_err_addr *err_addr);
+
+void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info,
+ struct ras_err_addr *mca_err_addr);
+
+void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status);
+bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 2fde93b00cab..b12808c0c331 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -735,6 +735,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
control->tbl_rai.health_percent = 0;
}
+
+ /* ignore the -ENOTSUPP return value */
+ amdgpu_dpm_send_rma_reason(adev);
}
if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 4baa300121d8..147100c27c2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -196,6 +196,13 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
coredump->reset_task_info.process_name,
coredump->reset_task_info.pid);
+ if (coredump->ring) {
+ drm_printf(&p, "\nRing timed out details\n");
+ drm_printf(&p, "IP Type: %d Ring Name: %s\n",
+ coredump->ring->funcs->type,
+ coredump->ring->name);
+ }
+
if (coredump->reset_vram_lost)
drm_printf(&p, "VRAM is lost due to GPU reset!\n");
if (coredump->adev->reset_info.num_regs) {
@@ -220,6 +227,8 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
{
struct amdgpu_coredump_info *coredump;
struct drm_device *dev = adev_to_drm(adev);
+ struct amdgpu_job *job = reset_context->job;
+ struct drm_sched_job *s_job;
coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
@@ -230,8 +239,21 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
coredump->reset_vram_lost = vram_lost;
- if (reset_context->job && reset_context->job->vm)
- coredump->reset_task_info = reset_context->job->vm->task_info;
+ if (reset_context->job && reset_context->job->vm) {
+ struct amdgpu_task_info *ti;
+ struct amdgpu_vm *vm = reset_context->job->vm;
+
+ ti = amdgpu_vm_get_task_info_vm(vm);
+ if (ti) {
+ coredump->reset_task_info = *ti;
+ amdgpu_vm_put_task_info(ti);
+ }
+ }
+
+ if (job) {
+ s_job = &job->base;
+ coredump->ring = to_amdgpu_ring(s_job->sched);
+ }
coredump->adev = adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 19899f6b9b2b..60522963aaca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -97,6 +97,7 @@ struct amdgpu_coredump_info {
struct amdgpu_task_info reset_task_info;
struct timespec64 reset_time;
bool reset_vram_lost;
+ struct amdgpu_ring *ring;
};
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index fe1a61eb6e4c..582053f1cd56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -209,8 +209,7 @@ struct amdgpu_ring_funcs {
void (*insert_end)(struct amdgpu_ring *ring);
/* pad the indirect buffer to the necessary number of dw */
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
- unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
- void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
+ unsigned (*init_cond_exec)(struct amdgpu_ring *ring, uint64_t addr);
/* note usage for clock and power gating */
void (*begin_use)(struct amdgpu_ring *ring);
void (*end_use)(struct amdgpu_ring *ring);
@@ -286,6 +285,9 @@ struct amdgpu_ring {
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
+ unsigned int set_q_mode_offs;
+ volatile u32 *set_q_mode_ptr;
+ u64 set_q_mode_token;
unsigned vm_hub;
unsigned vm_inv_eng;
struct dma_fence *vmid_wait;
@@ -327,8 +329,7 @@ struct amdgpu_ring {
#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
#define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
-#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
-#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
+#define amdgpu_ring_init_cond_exec(r, a) (r)->funcs->init_cond_exec((r), (a))
#define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
@@ -411,6 +412,30 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
ring->count_dw -= count_dw;
}
+/**
+ * amdgpu_ring_patch_cond_exec - patch dw count of conditional execute
+ * @ring: amdgpu_ring structure
+ * @offset: offset returned by amdgpu_ring_init_cond_exec
+ *
+ * Calculate the dw count and patch it into a cond_exec command.
+ */
+static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring,
+ unsigned int offset)
+{
+ unsigned cur;
+
+ if (!ring->funcs->init_cond_exec)
+ return;
+
+ WARN_ON(offset > ring->buf_mask);
+ WARN_ON(ring->ring[offset] != 0);
+
+ cur = (ring->wptr - 1) & ring->buf_mask;
+ if (cur < offset)
+ cur += ring->ring_size >> 2;
+ ring->ring[offset] = cur - offset;
+}
+
#define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \
(ring->is_mes_queue && ring->mes_ctx ? \
(ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
index e1ee1c7117fb..d234b7ccfaaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -159,9 +159,7 @@ int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
mux->ring_entry_size = entry_size;
mux->s_resubmit = false;
- amdgpu_mux_chunk_slab = kmem_cache_create("amdgpu_mux_chunk",
- sizeof(struct amdgpu_mux_chunk), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ amdgpu_mux_chunk_slab = KMEM_CACHE(amdgpu_mux_chunk, SLAB_HWCACHE_ALIGN);
if (!amdgpu_mux_chunk_slab) {
DRM_ERROR("create amdgpu_mux_chunk cache failed\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 2c3675d91614..db5791e1a7ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -241,7 +241,7 @@ void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
table_size = le32_to_cpu(hdr->jt_size);
}
- for (i = 0; i < table_size; i ++) {
+ for (i = 0; i < table_size; i++) {
dst_ptr[bo_offset + i] =
cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index b591d33af264..5a17e0ff2ab8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -169,7 +169,7 @@ struct amdgpu_rlc_funcs {
void (*stop)(struct amdgpu_device *adev);
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
- void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
+ void (*update_spm_vmid)(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid);
bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index 7a6a67275404..e22cb2b5cd92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -36,13 +36,24 @@
*/
/**
+ * amdgpu_seq64_get_va_base - Get the seq64 va base address
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * va base address on success
+ */
+static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
+{
+ return AMDGPU_VA_RESERVED_SEQ64_START(adev);
+}
+
+/**
* amdgpu_seq64_map - Map the seq64 memory to VM
*
* @adev: amdgpu_device pointer
* @vm: vm pointer
* @bo_va: bo_va pointer
- * @seq64_addr: seq64 vaddr start address
- * @size: seq64 pool size
*
* Map the seq64 memory to the given VM.
*
@@ -50,11 +61,11 @@
* 0 on success or a negative error code on failure
*/
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo_va **bo_va, u64 seq64_addr,
- uint32_t size)
+ struct amdgpu_bo_va **bo_va)
{
struct amdgpu_bo *bo;
struct drm_exec exec;
+ u64 seq64_addr;
int r;
bo = adev->seq64.sbo;
@@ -77,9 +88,9 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
goto error;
}
- r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, size,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
+ seq64_addr = amdgpu_seq64_get_va_base(adev);
+ r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
+ AMDGPU_PTE_READABLE);
if (r) {
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
@@ -144,31 +155,25 @@ error:
* amdgpu_seq64_alloc - Allocate a 64 bit memory
*
* @adev: amdgpu_device pointer
- * @gpu_addr: allocated gpu VA start address
- * @cpu_addr: allocated cpu VA start address
+ * @va: VA to access the seq in process address space
+ * @cpu_addr: CPU address to access the seq
*
* Alloc a 64 bit memory from seq64 pool.
*
* Returns:
* 0 on success or a negative error code on failure
*/
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr,
- u64 **cpu_addr)
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
{
unsigned long bit_pos;
- u32 offset;
bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem);
+ if (bit_pos >= adev->seq64.num_sem)
+ return -ENOSPC;
- if (bit_pos < adev->seq64.num_sem) {
- __set_bit(bit_pos, adev->seq64.used);
- offset = bit_pos << 6; /* convert to qw offset */
- } else {
- return -EINVAL;
- }
-
- *gpu_addr = offset + AMDGPU_SEQ64_VADDR_START;
- *cpu_addr = offset + adev->seq64.cpu_base_addr;
+ __set_bit(bit_pos, adev->seq64.used);
+ *va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev);
+ *cpu_addr = bit_pos + adev->seq64.cpu_base_addr;
return 0;
}
@@ -177,20 +182,17 @@ int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr,
* amdgpu_seq64_free - Free the given 64 bit memory
*
* @adev: amdgpu_device pointer
- * @gpu_addr: gpu start address to be freed
+ * @va: gpu start address to be freed
*
* Free the given 64 bit memory from seq64 pool.
- *
*/
-void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr)
+void amdgpu_seq64_free(struct amdgpu_device *adev, u64 va)
{
- u32 offset;
-
- offset = gpu_addr - AMDGPU_SEQ64_VADDR_START;
+ unsigned long bit_pos;
- offset >>= 6;
- if (offset < adev->seq64.num_sem)
- __clear_bit(offset, adev->seq64.used);
+ bit_pos = (va - amdgpu_seq64_get_va_base(adev)) / sizeof(u64);
+ if (bit_pos < adev->seq64.num_sem)
+ __clear_bit(bit_pos, adev->seq64.used);
}
/**
@@ -229,7 +231,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev)
* AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS
* 64bit slots
*/
- r = amdgpu_bo_create_kernel(adev, AMDGPU_SEQ64_SIZE,
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->seq64.sbo, NULL,
(void **)&adev->seq64.cpu_base_addr);
@@ -238,7 +240,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev)
return r;
}
- memset(adev->seq64.cpu_base_addr, 0, AMDGPU_SEQ64_SIZE);
+ memset(adev->seq64.cpu_base_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE);
adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS;
memset(&adev->seq64.used, 0, sizeof(adev->seq64.used));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
index 2196e72be508..4203b2ab318d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
@@ -25,10 +25,9 @@
#ifndef __AMDGPU_SEQ64_H__
#define __AMDGPU_SEQ64_H__
-#define AMDGPU_SEQ64_SIZE (2ULL << 20)
-#define AMDGPU_MAX_SEQ64_SLOTS (AMDGPU_SEQ64_SIZE / (sizeof(u64) * 8))
-#define AMDGPU_SEQ64_VADDR_OFFSET 0x50000
-#define AMDGPU_SEQ64_VADDR_START (AMDGPU_VA_RESERVED_SIZE + AMDGPU_SEQ64_VADDR_OFFSET)
+#include "amdgpu_vm.h"
+
+#define AMDGPU_MAX_SEQ64_SLOTS (AMDGPU_VA_RESERVED_SEQ64_SIZE / sizeof(u64))
struct amdgpu_seq64 {
struct amdgpu_bo *sbo;
@@ -42,7 +41,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev);
int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr);
void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr);
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo_va **bo_va, u64 seq64_addr, uint32_t size);
+ struct amdgpu_bo_va **bo_va);
void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 1b013a44ca99..bdf1ef825d89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -441,9 +441,7 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
*/
int amdgpu_sync_init(void)
{
- amdgpu_sync_slab = kmem_cache_create(
- "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ amdgpu_sync_slab = KMEM_CACHE(amdgpu_sync_entry, SLAB_HWCACHE_ALIGN);
if (!amdgpu_sync_slab)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 75c9fd2c6c2a..8722beba494e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -102,23 +102,19 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
/* Don't handle scatter gather BOs */
if (bo->type == ttm_bo_type_sg) {
placement->num_placement = 0;
- placement->num_busy_placement = 0;
return;
}
/* Object isn't an AMDGPU object so ignore */
if (!amdgpu_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
- placement->busy_placement = &placements;
placement->num_placement = 1;
- placement->num_busy_placement = 1;
return;
}
abo = ttm_to_amdgpu_bo(bo);
if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
placement->num_placement = 0;
- placement->num_busy_placement = 0;
return;
}
@@ -128,13 +124,13 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case AMDGPU_PL_OA:
case AMDGPU_PL_DOORBELL:
placement->num_placement = 0;
- placement->num_busy_placement = 0;
return;
case TTM_PL_VRAM:
if (!adev->mman.buffer_funcs_enabled) {
/* Move to system memory */
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
amdgpu_bo_in_cpu_visible_vram(abo)) {
@@ -149,8 +145,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
AMDGPU_GEM_DOMAIN_CPU);
abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
abo->placements[0].lpfn = 0;
- abo->placement.busy_placement = &abo->placements[1];
- abo->placement.num_busy_placement = 1;
+ abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
} else {
/* Move to GTT memory */
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
@@ -966,8 +961,6 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
/* allocate GART space */
placement.num_placement = 1;
placement.placement = &placements;
- placement.num_busy_placement = 1;
- placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
placements.mem_type = TTM_PL_TT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 3e12763e477a..0867fd9e15ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -556,6 +556,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
default:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
+ else if (load_type == 3)
+ return AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO;
else
return AMDGPU_FW_LOAD_PSP;
}
@@ -678,6 +680,8 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
return "UMSCH_MM_DATA";
case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
return "UMSCH_MM_CMD_BUFFER";
+ case AMDGPU_UCODE_ID_JPEG_RAM:
+ return "JPEG";
default:
return "UNKNOWN UCODE";
}
@@ -1060,7 +1064,8 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
{
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
+ if ((adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) &&
+ (adev->firmware.load_type != AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)) {
amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
(amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 4244a13f9f22..619445760037 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -511,6 +511,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_UMSCH_MM_DATA,
AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER,
AMDGPU_UCODE_ID_P2S_TABLE,
+ AMDGPU_UCODE_ID_JPEG_RAM,
AMDGPU_UCODE_ID_MAXIMUM,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index d65e21914d8c..20436f81856a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "umc_v6_7.h"
+#define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms
static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data, uint64_t err_addr,
@@ -85,18 +86,21 @@ out_fini_err_data:
return ret;
}
-static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
- void *ras_error_status,
- struct amdgpu_iv_entry *entry,
- bool reset)
+static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
+ void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ unsigned int error_query_mode;
int ret = 0;
+ unsigned long err_count;
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
+
+ mutex_lock(&con->page_retirement_lock);
ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
- if (ret == -EOPNOTSUPP) {
+ if (ret == -EOPNOTSUPP &&
+ error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
@@ -120,7 +124,8 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
*/
adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
}
- } else if (!ret) {
+ } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
+ (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
if (adev->umc.ras &&
adev->umc.ras->ecc_info_query_ras_error_count)
adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
@@ -147,16 +152,13 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
}
/* only uncorrectable error needs gpu reset */
- if (err_data->ue_count) {
- dev_info(adev->dev, "%ld uncorrectable hardware errors "
- "detected in UMC block\n",
- err_data->ue_count);
-
+ if (err_data->ue_count || err_data->de_count) {
+ err_count = err_data->ue_count + err_data->de_count;
if ((amdgpu_bad_page_threshold != 0) &&
err_data->err_addr_cnt) {
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
err_data->err_addr_cnt);
- amdgpu_ras_save_bad_pages(adev, &(err_data->ue_count));
+ amdgpu_ras_save_bad_pages(adev, &err_count);
amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
@@ -165,20 +167,87 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
con->update_channel_flag = false;
}
}
-
- if (reset) {
- /* use mode-2 reset for poison consumption */
- if (!entry)
- con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
- amdgpu_ras_reset_gpu(adev);
- }
}
kfree(err_data->err_addr);
+
+ mutex_unlock(&con->page_retirement_lock);
+}
+
+static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry,
+ bool reset)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_umc_handle_bad_pages(adev, ras_error_status);
+
+ if (err_data->ue_count && reset) {
+ /* use mode-2 reset for poison consumption */
+ if (!entry)
+ con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ amdgpu_ras_reset_gpu(adev);
+ }
+
return AMDGPU_RAS_SUCCESS;
}
-int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset)
+int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
+ bool reset, uint32_t timeout_ms)
+{
+ struct ras_err_data err_data;
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ };
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+ uint32_t timeout = timeout_ms;
+
+ memset(&err_data, 0, sizeof(err_data));
+ amdgpu_ras_error_data_init(&err_data);
+
+ do {
+
+ amdgpu_umc_handle_bad_pages(adev, &err_data);
+
+ if (timeout && !err_data.de_count) {
+ msleep(1);
+ timeout--;
+ }
+
+ } while (timeout && !err_data.de_count);
+
+ if (!timeout)
+ dev_warn(adev->dev, "Can't find bad pages\n");
+
+ if (err_data.de_count)
+ dev_info(adev->dev, "%ld new deferred hardware errors detected\n", err_data.de_count);
+
+ if (obj) {
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+ obj->err_data.de_count += err_data.de_count;
+ }
+
+ amdgpu_ras_error_data_fini(&err_data);
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+
+ if (reset) {
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ /* use mode-2 reset for poison consumption */
+ con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ amdgpu_ras_reset_gpu(adev);
+ }
+
+ return 0;
+}
+
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, bool reset)
{
int ret = AMDGPU_RAS_SUCCESS;
@@ -195,27 +264,41 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset)
}
if (!amdgpu_sriov_vf(adev)) {
- struct ras_err_data err_data;
- struct ras_common_if head = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- };
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+ if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
+ struct ras_err_data err_data;
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ };
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+
+ ret = amdgpu_ras_error_data_init(&err_data);
+ if (ret)
+ return ret;
- ret = amdgpu_ras_error_data_init(&err_data);
- if (ret)
- return ret;
+ ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
- ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
+ if (ret == AMDGPU_RAS_SUCCESS && obj) {
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+ obj->err_data.de_count += err_data.de_count;
+ }
- if (ret == AMDGPU_RAS_SUCCESS && obj) {
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
- }
+ amdgpu_ras_error_data_fini(&err_data);
+ } else {
+ if (reset) {
+ amdgpu_umc_bad_page_polling_timeout(adev,
+ reset, MAX_UMC_POISON_POLLING_TIME_SYNC);
+ } else {
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- amdgpu_ras_error_data_fini(&err_data);
+ atomic_inc(&con->page_retirement_req_cnt);
+
+ wake_up(&con->page_retirement_wq);
+ }
+ }
} else {
if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
- adev->virt.ops->ras_poison_handler(adev);
+ adev->virt.ops->ras_poison_handler(adev, block);
else
dev_warn(adev->dev,
"No ras_poison_handler interface in SRIOV!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 417a6726c71b..26d2ae498daf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -21,7 +21,7 @@
#ifndef __AMDGPU_UMC_H__
#define __AMDGPU_UMC_H__
#include "amdgpu_ras.h"
-
+#include "amdgpu_mca.h"
/*
* (addr / 256) * 4096, the higher 26 bits in ErrorAddr
* is the index of 4KB block
@@ -64,6 +64,8 @@ struct amdgpu_umc_ras {
void *ras_error_status);
void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
+ bool (*check_ecc_err_status)(struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, void *ras_error_status);
/* support different eeprom table version for different asic */
void (*set_eeprom_table_version)(struct amdgpu_ras_eeprom_table_header *hdr);
};
@@ -100,7 +102,8 @@ struct amdgpu_umc {
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
-int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset);
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, bool reset);
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
@@ -118,4 +121,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
umc_func func, void *data);
+
+int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
+ bool reset, uint32_t timeout_ms);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
index 107f9bb0e24f..5b27fc41ffbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
@@ -69,12 +69,12 @@ struct amdgpu_debugfs_gprwave_data {
};
enum AMDGPU_DEBUGFS_REGS2_CMDS {
- AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0,
+ AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE = 0,
AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE_V2,
};
enum AMDGPU_DEBUGFS_GPRWAVE_CMDS {
- AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE=0,
+ AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE = 0,
};
//reg2 interface
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index bfbf59326ee1..ab820cf52668 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -358,7 +358,7 @@ static int setup_umsch_mm_test(struct amdgpu_device *adev,
memset(test->ring_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ring_data));
- test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
+ test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
r = map_ring_data(adev, test->vm, test->ring_data_obj, &test->bo_va,
test->ring_data_gpu_addr, sizeof(struct umsch_mm_test_ring_data));
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f4963330c772..b2535023764f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -59,6 +59,8 @@
#define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin"
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
#define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin"
+#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin"
+#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
@@ -82,6 +84,8 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
+MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -1189,7 +1193,7 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
} else {
if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
- adev->virt.ops->ras_poison_handler(adev);
+ adev->virt.ops->ras_poison_handler(adev, ras_if->block);
else
dev_warn(adev->dev,
"No ras_poison_handler interface in SRIOV for VCN!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 514c98ea144f..1985f71b4373 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -160,6 +160,48 @@
} \
} while (0)
+#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \
+ ({ \
+ uint32_t internal_reg_offset, addr; \
+ bool video_range, aon_range; \
+ \
+ addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
+ addr <<= 2; \
+ video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS + 0x2600))))); \
+ aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS + 0x600))))); \
+ if (video_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS) + \
+ (VCN_VID_IP_ADDRESS)); \
+ else if (aon_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS) + \
+ (VCN_AON_IP_ADDRESS)); \
+ else \
+ internal_reg_offset = (0xFFFFF & addr); \
+ \
+ internal_reg_offset >>= 2; \
+ })
+
+#define WREG32_SOC24_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \
+ do { \
+ if (!indirect) { \
+ WREG32_SOC15(VCN, GET_INST(VCN, inst_idx), \
+ regUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15( \
+ VCN, GET_INST(VCN, inst_idx), \
+ regUVD_DPG_LMA_CTL, \
+ (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ } else { \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \
+ offset; \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \
+ value; \
+ } \
+ } while (0)
+
#define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2)
#define AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT (1 << 4)
#define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 0dcff2889e25..7a4eae36778a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -71,59 +71,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
amdgpu_num_kcq = 2;
}
-void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
- uint32_t reg0, uint32_t reg1,
- uint32_t ref, uint32_t mask,
- uint32_t xcc_inst)
-{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst];
- struct amdgpu_ring *ring = &kiq->ring;
- signed long r, cnt = 0;
- unsigned long flags;
- uint32_t seq;
-
- if (adev->mes.ring.sched.ready) {
- amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
- ref, mask);
- return;
- }
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
- amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
- ref, mask);
- r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
- if (r)
- goto failed_undo;
-
- amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
- /* don't wait anymore for IRQ context */
- if (r < 1 && in_interrupt())
- goto failed_kiq;
-
- might_sleep();
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-
- msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
- }
-
- if (cnt > MAX_KIQ_REG_TRY)
- goto failed_kiq;
-
- return;
-
-failed_undo:
- amdgpu_ring_undo(ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-failed_kiq:
- dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
-}
-
/**
* amdgpu_virt_request_full_gpu() - request full gpu access
* @adev: amdgpu device.
@@ -303,11 +250,11 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
if (!*data)
goto data_failure;
- bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
+ bps = kmalloc_array(align_space, sizeof(*(*data)->bps), GFP_KERNEL);
if (!bps)
goto bps_failure;
- bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
+ bps_bo = kmalloc_array(align_space, sizeof(*(*data)->bps_bo), GFP_KERNEL);
if (!bps_bo)
goto bps_bo_failure;
@@ -340,8 +287,10 @@ static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
for (i = data->last_reserved - 1; i >= 0; i--) {
bo = data->bps_bo[i];
- amdgpu_bo_free_kernel(&bo, NULL, NULL);
- data->bps_bo[i] = bo;
+ if (bo) {
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
+ data->bps_bo[i] = bo;
+ }
data->last_reserved = i;
}
}
@@ -381,6 +330,8 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
{
struct amdgpu_virt *virt = &adev->virt;
struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
struct amdgpu_bo *bo = NULL;
uint64_t bp;
int i;
@@ -396,12 +347,18 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
* 2) a ras bad page has been reserved (duplicate error injection
* for one page);
*/
- if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
- AMDGPU_GPU_PAGE_SIZE,
- &bo, NULL))
- DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
-
- data->bps_bo[i] = bo;
+ if (ttm_resource_manager_used(man)) {
+ amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
+ bp << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_GPU_PAGE_SIZE);
+ data->bps_bo[i] = NULL;
+ } else {
+ if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_GPU_PAGE_SIZE,
+ &bo, NULL))
+ DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+ data->bps_bo[i] = bo;
+ }
data->last_reserved = i + 1;
bo = NULL;
}
@@ -1022,7 +979,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
* SCRATCH_REG0 = read/write value
* SCRATCH_REG1[30:28] = command
* SCRATCH_REG1[19:0] = address in dword
- * SCRATCH_REG1[26:24] = Error reporting
+ * SCRATCH_REG1[27:24] = Error reporting
*/
writel(v, scratch_reg0);
writel((offset | flag), scratch_reg1);
@@ -1036,7 +993,8 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
udelay(10);
}
- if (i >= timeout) {
+ tmp = readl(scratch_reg1);
+ if (i >= timeout || (tmp & AMDGPU_RLCG_SCRATCH1_ERROR_MASK) != 0) {
if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
dev_err(adev->dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index d4207e44141f..3f59b7b5523f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -45,6 +45,7 @@
#define AMDGPU_RLCG_REG_NOT_IN_RANGE 0x1000000
#define AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK 0xFFFFF
+#define AMDGPU_RLCG_SCRATCH1_ERROR_MASK 0xF000000
/* all asic after AI use this offset */
#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
@@ -88,7 +89,8 @@ struct amdgpu_virt_ops {
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req,
u32 data1, u32 data2, u32 data3);
- void (*ras_poison_handler)(struct amdgpu_device *adev);
+ void (*ras_poison_handler)(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
};
/*
@@ -332,10 +334,6 @@ static inline bool is_virtual_machine(void)
((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
-void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
- uint32_t reg0, uint32_t rreg1,
- uint32_t ref, uint32_t mask,
- uint32_t xcc_inst);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 453a4b786cfc..8baa2e0935cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -660,8 +660,7 @@ static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
.set_powergating_state = amdgpu_vkms_set_powergating_state,
};
-const struct amdgpu_ip_block_version amdgpu_vkms_ip_block =
-{
+const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 1,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b8fcb6c55698..4299ce386322 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -234,6 +234,22 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
}
/**
+ * amdgpu_vm_bo_evicted_user - vm_bo is evicted
+ *
+ * @vm_bo: vm_bo which is evicted
+ *
+ * State for BOs used by user mode queues which are not at the location they
+ * should be.
+ */
+static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
+{
+ vm_bo->moved = true;
+ spin_lock(&vm_bo->vm->status_lock);
+ list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
+ spin_unlock(&vm_bo->vm->status_lock);
+}
+
+/**
* amdgpu_vm_bo_relocated - vm_bo is reloacted
*
* @vm_bo: vm_bo which is relocated
@@ -427,21 +443,25 @@ uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
/**
- * amdgpu_vm_validate_pt_bos - validate the page table BOs
+ * amdgpu_vm_validate - validate evicted BOs tracked in the VM
*
* @adev: amdgpu device pointer
* @vm: vm providing the BOs
+ * @ticket: optional reservation ticket used to reserve the VM
* @validate: callback to do the validation
* @param: parameter for the validation callback
*
- * Validate the page table BOs on command submission if neccessary.
+ * Validate the page table BOs and per-VM BOs on command submission if
+ * necessary. If a ticket is given, also try to validate evicted user queue
+ * BOs. They must already be reserved with the given ticket.
*
* Returns:
* Validation result.
*/
-int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int (*validate)(void *p, struct amdgpu_bo *bo),
- void *param)
+int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct ww_acquire_ctx *ticket,
+ int (*validate)(void *p, struct amdgpu_bo *bo),
+ void *param)
{
struct amdgpu_vm_bo_base *bo_base;
struct amdgpu_bo *shadow;
@@ -484,6 +504,34 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
spin_lock(&vm->status_lock);
}
+ while (ticket && !list_empty(&vm->evicted_user)) {
+ bo_base = list_first_entry(&vm->evicted_user,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_base->bo;
+
+ if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
+ struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
+
+ pr_warn_ratelimited("Evicted user BO is not reserved\n");
+ if (ti) {
+ pr_warn_ratelimited("pid %d\n", ti->pid);
+ amdgpu_vm_put_task_info(ti);
+ }
+
+ return -EINVAL;
+ }
+
+ r = validate(param, bo);
+ if (r)
+ return r;
+
+ amdgpu_vm_bo_invalidated(bo_base);
+
+ spin_lock(&vm->status_lock);
+ }
spin_unlock(&vm->status_lock);
amdgpu_vm_eviction_lock(vm);
@@ -610,7 +658,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool vm_flush_needed = job->vm_needs_flush;
struct dma_fence *fence = NULL;
bool pasid_mapping_needed = false;
- unsigned patch_offset = 0;
+ unsigned int patch;
int r;
if (amdgpu_vmid_had_gpu_reset(adev, id)) {
@@ -637,7 +685,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
amdgpu_ring_ib_begin(ring);
if (ring->funcs->init_cond_exec)
- patch_offset = amdgpu_ring_init_cond_exec(ring);
+ patch = amdgpu_ring_init_cond_exec(ring,
+ ring->cond_exe_gpu_addr);
if (need_pipe_sync)
amdgpu_ring_emit_pipeline_sync(ring);
@@ -651,7 +700,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
gds_switch_needed) {
@@ -685,8 +734,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
}
dma_fence_put(fence);
- if (ring->funcs->patch_cond_exec)
- amdgpu_ring_patch_cond_exec(ring, patch_offset);
+ amdgpu_ring_patch_cond_exec(ring, patch);
/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
if (ring->funcs->emit_switch_buffer) {
@@ -1343,10 +1391,6 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
- if (vm->pte_support_ats &&
- mapping->start < AMDGPU_GMC_HOLE_START)
- init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
-
r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
resv, mapping->start, mapping->last,
init_pte_value, 0, 0, NULL, NULL,
@@ -1426,11 +1470,21 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
}
r = amdgpu_vm_bo_update(adev, bo_va, clear);
- if (r)
- return r;
if (unlock)
dma_resv_unlock(resv);
+ if (r)
+ return r;
+
+ /* Remember evicted DMABuf imports in compute VMs for later
+ * validation
+ */
+ if (vm->is_compute_context &&
+ bo_va->base.bo->tbo.base.import_attach &&
+ (!bo_va->base.bo->tbo.resource ||
+ bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
+ amdgpu_vm_bo_evicted_user(&bo_va->base);
+
spin_lock(&vm->status_lock);
}
spin_unlock(&vm->status_lock);
@@ -2173,6 +2227,108 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
}
+static void amdgpu_vm_destroy_task_info(struct kref *kref)
+{
+ struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
+
+ kfree(ti);
+}
+
+static inline struct amdgpu_vm *
+amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
+{
+ struct amdgpu_vm *vm;
+ unsigned long flags;
+
+ xa_lock_irqsave(&adev->vm_manager.pasids, flags);
+ vm = xa_load(&adev->vm_manager.pasids, pasid);
+ xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
+
+ return vm;
+}
+
+/**
+ * amdgpu_vm_put_task_info - reference down the vm task_info ptr
+ *
+ * @task_info: task_info struct under discussion.
+ *
+ * frees the vm task_info ptr at the last put
+ */
+void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
+{
+ kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
+}
+
+/**
+ * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
+ *
+ * @vm: VM to get info from
+ *
+ * Returns the reference counted task_info structure, which must be
+ * referenced down with amdgpu_vm_put_task_info.
+ */
+struct amdgpu_task_info *
+amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
+{
+ struct amdgpu_task_info *ti = NULL;
+
+ if (vm) {
+ ti = vm->task_info;
+ kref_get(&vm->task_info->refcount);
+ }
+
+ return ti;
+}
+
+/**
+ * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
+ *
+ * @adev: drm device pointer
+ * @pasid: PASID identifier for VM
+ *
+ * Returns the reference counted task_info structure, which must be
+ * referenced down with amdgpu_vm_put_task_info.
+ */
+struct amdgpu_task_info *
+amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
+{
+ return amdgpu_vm_get_task_info_vm(
+ amdgpu_vm_get_vm_from_pasid(adev, pasid));
+}
+
+static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
+{
+ vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
+ if (!vm->task_info)
+ return -ENOMEM;
+
+ kref_init(&vm->task_info->refcount);
+ return 0;
+}
+
+/**
+ * amdgpu_vm_set_task_info - Sets VMs task info.
+ *
+ * @vm: vm for which to set the info
+ */
+void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
+{
+ if (!vm->task_info)
+ return;
+
+ if (vm->task_info->pid == current->pid)
+ return;
+
+ vm->task_info->pid = current->pid;
+ get_task_comm(vm->task_info->task_name, current);
+
+ if (current->group_leader->mm != current->mm)
+ return;
+
+ vm->task_info->tgid = current->group_leader->pid;
+ get_task_comm(vm->task_info->process_name, current->group_leader);
+}
+
/**
* amdgpu_vm_init - initialize a vm instance
*
@@ -2196,6 +2352,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
INIT_LIST_HEAD(&vm->evicted);
+ INIT_LIST_HEAD(&vm->evicted_user);
INIT_LIST_HEAD(&vm->relocated);
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->idle);
@@ -2211,7 +2368,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
return r;
- vm->pte_support_ats = false;
vm->is_compute_context = false;
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
@@ -2258,6 +2414,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
goto error_free_root;
+ r = amdgpu_vm_create_task_info(vm);
+ if (r)
+ DRM_DEBUG("Failed to create task info for VM\n");
+
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
@@ -2297,30 +2457,12 @@ error_free_delayed:
*/
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
int r;
r = amdgpu_bo_reserve(vm->root.bo, true);
if (r)
return r;
- /* Check if PD needs to be reinitialized and do it before
- * changing any other state, in case it fails.
- */
- if (pte_support_ats != vm->pte_support_ats) {
- /* Sanity checks */
- if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
- r = -EINVAL;
- goto unreserve_bo;
- }
-
- vm->pte_support_ats = pte_support_ats;
- r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo),
- false);
- if (r)
- goto unreserve_bo;
- }
-
/* Update VM state */
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
@@ -2397,6 +2539,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
+ amdgpu_vm_put_task_info(vm->task_info);
amdgpu_vm_set_pasid(adev, vm, 0);
dma_fence_wait(vm->last_unlocked, false);
dma_fence_put(vm->last_unlocked);
@@ -2554,48 +2697,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
/**
- * amdgpu_vm_get_task_info - Extracts task info for a PASID.
- *
- * @adev: drm device pointer
- * @pasid: PASID identifier for VM
- * @task_info: task_info to fill.
- */
-void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
- struct amdgpu_task_info *task_info)
-{
- struct amdgpu_vm *vm;
- unsigned long flags;
-
- xa_lock_irqsave(&adev->vm_manager.pasids, flags);
-
- vm = xa_load(&adev->vm_manager.pasids, pasid);
- if (vm)
- *task_info = vm->task_info;
-
- xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
-}
-
-/**
- * amdgpu_vm_set_task_info - Sets VMs task info.
- *
- * @vm: vm for which to set the info
- */
-void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
-{
- if (vm->task_info.pid)
- return;
-
- vm->task_info.pid = current->pid;
- get_task_comm(vm->task_info.task_name, current);
-
- if (current->group_leader->mm != current->mm)
- return;
-
- vm->task_info.tgid = current->group_leader->pid;
- get_task_comm(vm->task_info.process_name, current->group_leader);
-}
-
-/**
* amdgpu_vm_handle_fault - graceful handling of VM faults.
* @adev: amdgpu device pointer
* @pasid: PASID of the VM
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 4740dd65b99d..047ec1930d12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -135,8 +135,21 @@ struct amdgpu_mem_stats;
#define AMDGPU_IS_MMHUB0(x) ((x) >= AMDGPU_MMHUB0_START && (x) < AMDGPU_MMHUB1_START)
#define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS)
-/* Reserve 2MB at top/bottom of address space for kernel use */
-#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
+/* Reserve space at top/bottom of address space for kernel use */
+#define AMDGPU_VA_RESERVED_CSA_SIZE (2ULL << 20)
+#define AMDGPU_VA_RESERVED_CSA_START(adev) (((adev)->vm_manager.max_pfn \
+ << AMDGPU_GPU_PAGE_SHIFT) \
+ - AMDGPU_VA_RESERVED_CSA_SIZE)
+#define AMDGPU_VA_RESERVED_SEQ64_SIZE (2ULL << 20)
+#define AMDGPU_VA_RESERVED_SEQ64_START(adev) (AMDGPU_VA_RESERVED_CSA_START(adev) \
+ - AMDGPU_VA_RESERVED_SEQ64_SIZE)
+#define AMDGPU_VA_RESERVED_TRAP_SIZE (2ULL << 12)
+#define AMDGPU_VA_RESERVED_TRAP_START(adev) (AMDGPU_VA_RESERVED_SEQ64_START(adev) \
+ - AMDGPU_VA_RESERVED_TRAP_SIZE)
+#define AMDGPU_VA_RESERVED_BOTTOM (1ULL << 16)
+#define AMDGPU_VA_RESERVED_TOP (AMDGPU_VA_RESERVED_TRAP_SIZE + \
+ AMDGPU_VA_RESERVED_SEQ64_SIZE + \
+ AMDGPU_VA_RESERVED_CSA_SIZE)
/* See vm_update_mode */
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
@@ -190,10 +203,11 @@ struct amdgpu_vm_pte_funcs {
};
struct amdgpu_task_info {
- char process_name[TASK_COMM_LEN];
- char task_name[TASK_COMM_LEN];
- pid_t pid;
- pid_t tgid;
+ char process_name[TASK_COMM_LEN];
+ char task_name[TASK_COMM_LEN];
+ pid_t pid;
+ pid_t tgid;
+ struct kref refcount;
};
/**
@@ -288,9 +302,12 @@ struct amdgpu_vm {
/* Lock to protect vm_bo add/del/move on all lists of vm */
spinlock_t status_lock;
- /* BOs who needs a validation */
+ /* Per-VM and PT BOs who needs a validation */
struct list_head evicted;
+ /* BOs for user mode queues that need a validation */
+ struct list_head evicted_user;
+
/* PT BOs which relocated and their parent need an update */
struct list_head relocated;
@@ -341,9 +358,6 @@ struct amdgpu_vm {
/* Functions to use for VM table updates */
const struct amdgpu_vm_update_funcs *update_funcs;
- /* Flag to indicate ATS support from PTE for GFX9 */
- bool pte_support_ats;
-
/* Up to 128 pending retry page faults */
DECLARE_KFIFO(faults, u64, 128);
@@ -357,7 +371,7 @@ struct amdgpu_vm {
uint64_t pd_phys_addr;
/* Some basic info about the task */
- struct amdgpu_task_info task_info;
+ struct amdgpu_task_info *task_info;
/* Store positions of group of BOs */
struct ttm_lru_bulk_move lru_bulk_move;
@@ -434,9 +448,10 @@ int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int (*callback)(void *p, struct amdgpu_bo *bo),
- void *param);
+int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct ww_acquire_ctx *ticket,
+ int (*callback)(void *p, struct amdgpu_bo *bo),
+ void *param);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool immediate);
@@ -497,8 +512,14 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job);
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
-void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
- struct amdgpu_task_info *task_info);
+struct amdgpu_task_info *
+amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid);
+
+struct amdgpu_task_info *
+amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm);
+
+void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info);
+
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
u32 vmid, u32 node_id, uint64_t addr,
bool write_fault);
@@ -516,8 +537,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int level, bool immediate, struct amdgpu_bo_vm **vmbo,
int32_t xcp_id);
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_bo_base *entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index a160265ddc07..124389a6bf48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -90,22 +90,6 @@ static unsigned int amdgpu_vm_pt_num_entries(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_pt_num_ats_entries - return the number of ATS entries in the root PD
- *
- * @adev: amdgpu_device pointer
- *
- * Returns:
- * The number of entries in the root page directory which needs the ATS setting.
- */
-static unsigned int amdgpu_vm_pt_num_ats_entries(struct amdgpu_device *adev)
-{
- unsigned int shift;
-
- shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
- return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
-}
-
-/**
* amdgpu_vm_pt_entries_mask - the mask to get the entry number of a PD/PT
*
* @adev: amdgpu_device pointer
@@ -379,7 +363,7 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_vm_update_params params;
struct amdgpu_bo *ancestor = &vmbo->bo;
- unsigned int entries, ats_entries;
+ unsigned int entries;
struct amdgpu_bo *bo = &vmbo->bo;
uint64_t addr;
int r, idx;
@@ -394,27 +378,6 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
entries = amdgpu_bo_size(bo) / 8;
- if (!vm->pte_support_ats) {
- ats_entries = 0;
-
- } else if (!bo->parent) {
- ats_entries = amdgpu_vm_pt_num_ats_entries(adev);
- ats_entries = min(ats_entries, entries);
- entries -= ats_entries;
-
- } else {
- struct amdgpu_vm_bo_base *pt;
-
- pt = ancestor->vm_bo;
- ats_entries = amdgpu_vm_pt_num_ats_entries(adev);
- if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >=
- ats_entries) {
- ats_entries = 0;
- } else {
- ats_entries = entries;
- entries = 0;
- }
- }
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
@@ -445,44 +408,24 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
goto exit;
addr = 0;
- if (ats_entries) {
- uint64_t value = 0, flags;
- flags = AMDGPU_PTE_DEFAULT_ATC;
+ uint64_t value = 0, flags = 0;
+ if (adev->asic_type >= CHIP_VEGA10) {
if (level != AMDGPU_VM_PTB) {
/* Handle leaf PDEs as PTEs */
flags |= AMDGPU_PDE_PTE;
- amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
+ amdgpu_gmc_get_vm_pde(adev, level,
+ &value, &flags);
+ } else {
+ /* Workaround for fault priority problem on GMC9 */
+ flags = AMDGPU_PTE_EXECUTABLE;
}
-
- r = vm->update_funcs->update(&params, vmbo, addr, 0,
- ats_entries, value, flags);
- if (r)
- goto exit;
-
- addr += ats_entries * 8;
}
- if (entries) {
- uint64_t value = 0, flags = 0;
-
- if (adev->asic_type >= CHIP_VEGA10) {
- if (level != AMDGPU_VM_PTB) {
- /* Handle leaf PDEs as PTEs */
- flags |= AMDGPU_PDE_PTE;
- amdgpu_gmc_get_vm_pde(adev, level,
- &value, &flags);
- } else {
- /* Workaround for fault priority problem on GMC9 */
- flags = AMDGPU_PTE_EXECUTABLE;
- }
- }
-
- r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
- value, flags);
- if (r)
- goto exit;
- }
+ r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
+ value, flags);
+ if (r)
+ goto exit;
r = vm->update_funcs->commit(&params, NULL);
exit:
@@ -728,33 +671,6 @@ void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
/**
- * amdgpu_vm_pt_is_root_clean - check if a root PD is clean
- *
- * @adev: amdgpu_device pointer
- * @vm: the VM to check
- *
- * Check all entries of the root PD, if any subsequent PDs are allocated,
- * it means there are page table creating and filling, and is no a clean
- * VM
- *
- * Returns:
- * 0 if this VM is clean
- */
-bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
- enum amdgpu_vm_level root = adev->vm_manager.root_level;
- unsigned int entries = amdgpu_vm_pt_num_entries(adev, root);
- unsigned int i = 0;
-
- for (i = 0; i < entries; i++) {
- if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo)
- return false;
- }
- return true;
-}
-
-/**
* amdgpu_vm_pde_update - update a single level in the hierarchy
*
* @params: parameters for the update
@@ -1027,7 +943,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
min(nptes, 32u), dst, incr,
upd_flags,
- vm->task_info.tgid,
+ vm->task_info ? vm->task_info->tgid : 0,
vm->immediate.fence_context);
amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
cursor.level, pe_start, dst,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index b9a15d51eb5c..70c5cc80ecdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -297,6 +297,10 @@ static int vpe_early_init(void *handle)
case IP_VERSION(6, 1, 0):
vpe_v6_1_set_funcs(vpe);
break;
+ case IP_VERSION(6, 1, 1):
+ vpe_v6_1_set_funcs(vpe);
+ vpe->collaborate_mode = true;
+ break;
default:
return -EINVAL;
}
@@ -304,6 +308,8 @@ static int vpe_early_init(void *handle)
vpe_set_ring_funcs(adev);
vpe_set_regs(vpe);
+ dev_info(adev->dev, "VPE: collaborate mode %s", vpe->collaborate_mode ? "true" : "false");
+
return 0;
}
@@ -457,6 +463,18 @@ static uint64_t vpe_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t vmid)
return csa_mc_addr;
}
+static void vpe_ring_emit_pred_exec(struct amdgpu_ring *ring,
+ uint32_t device_select,
+ uint32_t exec_count)
+{
+ if (!ring->adev->vpe.collaborate_mode)
+ return;
+
+ amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_PRED_EXE, 0) |
+ (device_select << 16));
+ amdgpu_ring_write(ring, exec_count & 0x1fff);
+}
+
static void vpe_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
@@ -505,6 +523,8 @@ static void vpe_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
+ vpe_ring_emit_pred_exec(ring, 0, 6);
+
/* wait for idle */
amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
VPE_POLL_REGMEM_SUBOP_REGMEM) |
@@ -520,6 +540,8 @@ static void vpe_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void vpe_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
{
+ vpe_ring_emit_pred_exec(ring, 0, 3);
+
amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_REG_WRITE, 0));
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, val);
@@ -528,6 +550,8 @@ static void vpe_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t
static void vpe_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
+ vpe_ring_emit_pred_exec(ring, 0, 6);
+
amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
VPE_POLL_REGMEM_SUBOP_REGMEM) |
VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
@@ -546,34 +570,24 @@ static void vpe_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid,
amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
}
-static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring)
+static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring,
+ uint64_t addr)
{
unsigned int ret;
+ if (ring->adev->vpe.collaborate_mode)
+ return ~0;
+
amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
- amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, 1);
- ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
- amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
+ ret = ring->wptr & ring->buf_mask;
+ amdgpu_ring_write(ring, 0);
return ret;
}
-static void vpe_ring_patch_cond_exec(struct amdgpu_ring *ring, unsigned int offset)
-{
- unsigned int cur;
-
- WARN_ON_ONCE(offset > ring->buf_mask);
- WARN_ON_ONCE(ring->ring[offset] != 0x55aa55aa);
-
- cur = (ring->wptr - 1) & ring->buf_mask;
- if (cur > offset)
- ring->ring[offset] = cur - offset;
- else
- ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
-}
-
static int vpe_ring_preempt_ib(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -695,16 +709,22 @@ static void vpe_ring_set_wptr(struct amdgpu_ring *ring)
upper_32_bits(ring->wptr << 2));
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+ if (vpe->collaborate_mode)
+ WDOORBELL64(ring->doorbell_index + 4, ring->wptr << 2);
} else {
- dev_dbg(adev->dev, "Not using doorbell, \
- regVPEC_QUEUE0_RB_WPTR == 0x%08x, \
- regVPEC_QUEUE0_RB_WPTR_HI == 0x%08x\n",
- lower_32_bits(ring->wptr << 2),
- upper_32_bits(ring->wptr << 2));
- WREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo),
- lower_32_bits(ring->wptr << 2));
- WREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi),
- upper_32_bits(ring->wptr << 2));
+ int i;
+
+ for (i = 0; i < vpe->num_instances; i++) {
+ dev_dbg(adev->dev, "Not using doorbell, \
+ regVPEC_QUEUE0_RB_WPTR == 0x%08x, \
+ regVPEC_QUEUE0_RB_WPTR_HI == 0x%08x\n",
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_lo),
+ lower_32_bits(ring->wptr << 2));
+ WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_hi),
+ upper_32_bits(ring->wptr << 2));
+ }
}
}
@@ -864,7 +884,6 @@ static const struct amdgpu_ring_funcs vpe_ring_funcs = {
.test_ring = vpe_ring_test_ring,
.test_ib = vpe_ring_test_ib,
.init_cond_exec = vpe_ring_init_cond_exec,
- .patch_cond_exec = vpe_ring_patch_cond_exec,
.preempt_ib = vpe_ring_preempt_ib,
.begin_use = vpe_ring_begin_use,
.end_use = vpe_ring_end_use,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
index 1153ddaea64d..231d86d0953e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
@@ -27,6 +27,8 @@
#include "amdgpu_irq.h"
#include "vpe_6_1_fw_if.h"
+#define AMDGPU_MAX_VPE_INSTANCES 2
+
struct amdgpu_vpe;
struct vpe_funcs {
@@ -74,6 +76,9 @@ struct amdgpu_vpe {
uint32_t *cmdbuf_cpu_addr;
struct delayed_work idle_work;
bool context_started;
+
+ uint32_t num_instances;
+ bool collaborate_mode;
};
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index a6c88f2fe6e5..20d51f6c9bb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -1035,15 +1035,74 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
return 0;
}
+static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+ struct aca_bank_report *report, void *data)
+{
+ struct amdgpu_device *adev = handle->adev;
+ const char *error_str;
+ u64 status;
+ int ret, ext_error_code;
+
+ ret = aca_bank_info_decode(bank, &report->info);
+ if (ret)
+ return ret;
+
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
+
+ error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ?
+ xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL;
+ if (error_str)
+ dev_info(adev->dev, "%s detected\n", error_str);
+
+ if ((type == ACA_ERROR_TYPE_UE && ext_error_code == 0) ||
+ (type == ACA_ERROR_TYPE_CE && ext_error_code == 6))
+ report->count[type] = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]);
+
+ return 0;
+}
+
+static const struct aca_bank_ops xgmi_v6_4_0_aca_bank_ops = {
+ .aca_bank_generate_report = xgmi_v6_4_0_aca_bank_generate_report,
+};
+
+static const struct aca_info xgmi_v6_4_0_aca_info = {
+ .hwip = ACA_HWIP_TYPE_PCS_XGMI,
+ .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
+ .bank_ops = &xgmi_v6_4_0_aca_bank_ops,
+};
+
static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
+ int r;
+
if (!adev->gmc.xgmi.supported ||
adev->gmc.xgmi.num_physical_nodes == 0)
return 0;
amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
- return amdgpu_ras_block_late_init(adev, ras_block);
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL,
+ &xgmi_v6_4_0_aca_info, NULL);
+ if (r)
+ goto late_fini;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
}
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
@@ -1099,7 +1158,7 @@ static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev)
static void __xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst, u64 mca_base)
{
- WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL);
+ WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL);
}
static void xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst)
@@ -1277,12 +1336,12 @@ static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev,
err_data->ce_count += ce_cnt;
}
-static enum amdgpu_mca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status)
+static enum aca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status)
{
const char *error_str;
int ext_error_code;
- ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status);
+ ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ?
xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL;
@@ -1291,9 +1350,9 @@ static enum amdgpu_mca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdg
switch (ext_error_code) {
case 0:
- return AMDGPU_MCA_ERROR_TYPE_UE;
+ return ACA_ERROR_TYPE_UE;
case 6:
- return AMDGPU_MCA_ERROR_TYPE_CE;
+ return ACA_ERROR_TYPE_CE;
default:
return -EINVAL;
}
@@ -1307,22 +1366,22 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a
int xgmi_inst = mcm_info->die_id;
u64 status = 0;
- status = RREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS);
- if (!MCA_REG__STATUS__VAL(status))
+ status = RREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS);
+ if (!ACA_REG__STATUS__VAL(status))
return;
switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
- case AMDGPU_MCA_ERROR_TYPE_UE:
+ case ACA_ERROR_TYPE_UE:
amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL);
break;
- case AMDGPU_MCA_ERROR_TYPE_CE:
+ case ACA_ERROR_TYPE_CE:
amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL);
break;
default:
break;
}
- WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL);
+ WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL);
}
static void xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, int xgmi_inst, struct ras_err_data *err_data)
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c
new file mode 100644
index 000000000000..8a0773b80864
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "athub_v4_1_0.h"
+#include "athub/athub_4_1_0_offset.h"
+#include "athub/athub_4_1_0_sh_mask.h"
+#include "soc15_common.h"
+
+static uint32_t athub_v4_1_0_get_cg_cntl(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ break;
+ default:
+ data = 0;
+ break;
+ }
+ return data;
+}
+
+static void athub_v4_1_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
+{
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+athub_v4_1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = athub_v4_1_0_get_cg_cntl(adev);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
+ data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+
+ if (def != data)
+ athub_v4_1_0_set_cg_cntl(adev, data);
+}
+
+static void
+athub_v4_1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = athub_v4_1_0_get_cg_cntl(adev);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
+ data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ athub_v4_1_0_set_cg_cntl(adev, data);
+}
+
+int athub_v4_1_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ athub_v4_1_0_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ athub_v4_1_0_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void athub_v4_1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
+{
+ int data;
+
+ /* AMD_CG_SUPPORT_ATHUB_MGCG */
+ data = athub_v4_1_0_get_cg_cntl(adev);
+ if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
+
+ /* AMD_CG_SUPPORT_ATHUB_LS */
+ if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_LS;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h
new file mode 100644
index 000000000000..4d18d0998fa8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __ATHUB_V4_1_0_H__
+#define __ATHUB_V4_1_0_H__
+
+int athub_v4_1_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+void athub_v4_1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index a33e890c70d9..72362df352f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -62,6 +62,7 @@
typedef struct {
struct atom_context *ctx;
uint32_t *ps, *ws;
+ int ps_size, ws_size;
int ps_shift;
uint16_t start;
unsigned last_jump;
@@ -70,8 +71,8 @@ typedef struct {
} atom_exec_context;
int amdgpu_atom_debug;
-static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params);
-int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
+static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size);
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
static uint32_t atom_arg_mask[8] =
{ 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
@@ -223,7 +224,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
(*ptr)++;
/* get_unaligned_le32 avoids unaligned accesses from atombios
* tables, noticed on a DEC Alpha. */
- val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+ if (idx < ctx->ps_size)
+ val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+ else
+ pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
if (print)
DEBUG("PS[0x%02X,0x%04X]", idx, val);
break;
@@ -261,7 +265,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
val = gctx->reg_block;
break;
default:
- val = ctx->ws[idx];
+ if (idx < ctx->ws_size)
+ val = ctx->ws[idx];
+ else
+ pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
}
break;
case ATOM_ARG_ID:
@@ -313,7 +320,7 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
DEBUG("IMM 0x%02X\n", val);
return val;
}
- return 0;
+ break;
case ATOM_ARG_PLL:
idx = U8(*ptr);
(*ptr)++;
@@ -495,6 +502,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
idx = U8(*ptr);
(*ptr)++;
DEBUG("PS[0x%02X]", idx);
+ if (idx >= ctx->ps_size) {
+ pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
+ return;
+ }
ctx->ps[idx] = cpu_to_le32(val);
break;
case ATOM_ARG_WS:
@@ -527,6 +538,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
gctx->reg_block = val;
break;
default:
+ if (idx >= ctx->ws_size) {
+ pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
+ return;
+ }
ctx->ws[idx] = val;
}
break;
@@ -624,7 +639,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
else
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
- r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift, ctx->ps_size - ctx->ps_shift);
if (r) {
ctx->abort = true;
}
@@ -1203,7 +1218,7 @@ static struct {
atom_op_div32, ATOM_ARG_WS},
};
-static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params)
+static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
@@ -1225,12 +1240,16 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
ectx.ps_shift = ps / 4;
ectx.start = base;
ectx.ps = params;
+ ectx.ps_size = params_size;
ectx.abort = false;
ectx.last_jump = 0;
- if (ws)
+ if (ws) {
ectx.ws = kcalloc(4, ws, GFP_KERNEL);
- else
+ ectx.ws_size = ws;
+ } else {
ectx.ws = NULL;
+ ectx.ws_size = 0;
+ }
debug_depth++;
while (1) {
@@ -1264,7 +1283,7 @@ free:
return ret;
}
-int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params)
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size)
{
int r;
@@ -1280,7 +1299,7 @@ int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *par
/* reset divmul */
ctx->divmul[0] = 0;
ctx->divmul[1] = 0;
- r = amdgpu_atom_execute_table_locked(ctx, index, params);
+ r = amdgpu_atom_execute_table_locked(ctx, index, params, params_size);
mutex_unlock(&ctx->mutex);
return r;
}
@@ -1552,7 +1571,7 @@ int amdgpu_atom_asic_init(struct atom_context *ctx)
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
- ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+ ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps, 16);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index c11cf18a0f18..b807f6639a4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -156,7 +156,7 @@ struct atom_context {
extern int amdgpu_atom_debug;
struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios);
-int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
int amdgpu_atom_asic_init(struct atom_context *ctx);
void amdgpu_atom_destroy(struct atom_context *ctx);
bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 10098fdd33fc..3dfc28840a7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -77,7 +77,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
args.usOverscanTop = cpu_to_le16(amdgpu_crtc->v_border);
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
@@ -106,7 +106,7 @@ void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
args.ucEnable = ATOM_SCALER_DISABLE;
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
@@ -123,7 +123,7 @@ void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
args.ucCRTC = amdgpu_crtc->crtc_id;
args.ucEnable = lock;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
@@ -139,7 +139,7 @@ void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
args.ucCRTC = amdgpu_crtc->crtc_id;
args.ucEnable = state;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
@@ -155,7 +155,7 @@ void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
args.ucCRTC = amdgpu_crtc->crtc_id;
args.ucBlanking = state;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
@@ -171,7 +171,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
args.ucDispPipeId = amdgpu_crtc->crtc_id;
args.ucEnable = state;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
@@ -183,7 +183,7 @@ void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
args.ucEnable = ATOM_INIT;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
@@ -228,7 +228,7 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = amdgpu_crtc->crtc_id;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union atom_enable_ss {
@@ -293,7 +293,7 @@ static void amdgpu_atombios_crtc_program_ss(struct amdgpu_device *adev,
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v3.ucEnable = enable;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union adjust_pixel_clock {
@@ -395,7 +395,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
ADJUST_DISPLAY_CONFIG_SS_ENABLE;
amdgpu_atom_execute_table(adev->mode_info.atom_context,
- index, (uint32_t *)&args);
+ index, (uint32_t *)&args, sizeof(args));
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
case 3:
@@ -428,7 +428,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
args.v3.sInput.ucExtTransmitterID = 0;
amdgpu_atom_execute_table(adev->mode_info.atom_context,
- index, (uint32_t *)&args);
+ index, (uint32_t *)&args, sizeof(args));
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
if (args.v3.sOutput.ucRefDiv) {
amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV;
@@ -514,7 +514,7 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union set_dce_clock {
@@ -544,7 +544,7 @@ u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev,
args.v2_1.asParam.ulDCEClkFreq = cpu_to_le32(freq); /* 10kHz units */
args.v2_1.asParam.ucDCEClkType = clk_type;
args.v2_1.asParam.ucDCEClkSrc = clk_src;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
ret_freq = le32_to_cpu(args.v2_1.asParam.ulDCEClkFreq) * 10;
break;
default:
@@ -740,7 +740,7 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
return;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 87c41e0e9b7c..622634c08c7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -83,7 +83,7 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
args.v2.ucDelay = delay / 10;
args.v2.ucHPD_ID = chan->rec.hpd;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*ack = args.v2.ucReplyStatus;
@@ -301,7 +301,7 @@ static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
args.ucLaneNum = lane_num;
args.ucStatus = 0;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return args.ucStatus;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 7672abe6c140..25feab188dfe 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -335,7 +335,7 @@ amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action)
args.ucDacStandard = ATOM_DAC1_PS2;
args.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
@@ -432,7 +432,7 @@ amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action)
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
@@ -732,7 +732,7 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
@@ -1136,7 +1136,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
bool
@@ -1164,7 +1164,7 @@ amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
args.v1.ucAction = action;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
/* wait for the panel to power up */
if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
@@ -1288,7 +1288,7 @@ amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder,
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void
@@ -1633,7 +1633,7 @@ amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
return;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
/* This only needs to be called once at startup */
@@ -1706,7 +1706,7 @@ amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder,
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return true;
} else
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index af0335535f82..a6501114322f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -86,7 +86,7 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
@@ -172,5 +172,5 @@ void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device *adev, u8 slave_addr
args.ucSlaveAddr = slave_addr;
args.ucLineNumber = line_number;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 4dfaa017cf7f..a3a643254d7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1638,28 +1638,18 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
PCI_EXP_LNKCTL_HAWD);
/* linkctl2 */
- pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
- &tmp16);
- tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN);
- tmp16 |= (bridge_cfg2 &
- (PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN));
- pcie_capability_write_word(root,
- PCI_EXP_LNKCTL2,
- tmp16);
-
- pcie_capability_read_word(adev->pdev,
- PCI_EXP_LNKCTL2,
- &tmp16);
- tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN);
- tmp16 |= (gpu_cfg2 &
- (PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN));
- pcie_capability_write_word(adev->pdev,
- PCI_EXP_LNKCTL2,
- tmp16);
+ pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN,
+ bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN,
+ gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1674,16 +1664,15 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
- pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
-
+ tmp16 = 0;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
- pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS, tmp16);
speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
index 567a904804bc..9c85ca6358c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
@@ -21,8 +21,7 @@
*
*/
-static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_1[] = {
0x00000000, // DB_RENDER_CONTROL
0x00000000, // DB_COUNT_CONTROL
0x00000000, // DB_DEPTH_VIEW
@@ -236,8 +235,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
0x00000000, // PA_SC_VPORT_ZMIN_15
0x3f800000, // PA_SC_VPORT_ZMAX_15
};
-static const unsigned int gfx9_SECT_CONTEXT_def_2[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_2[] = {
0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
0x00000000, // PA_SC_TILE_STEERING_OVERRIDE
0x00000000, // CP_PERFMON_CNTX_CNTL
@@ -521,15 +519,13 @@ static const unsigned int gfx9_SECT_CONTEXT_def_2[] =
0x00000000, // CB_MRT6_EPITCH
0x00000000, // CB_MRT7_EPITCH
};
-static const unsigned int gfx9_SECT_CONTEXT_def_3[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_3[] = {
0x00000000, // PA_CL_POINT_X_RAD
0x00000000, // PA_CL_POINT_Y_RAD
0x00000000, // PA_CL_POINT_SIZE
0x00000000, // PA_CL_POINT_CULL_RAD
};
-static const unsigned int gfx9_SECT_CONTEXT_def_4[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_4[] = {
0x00000000, // DB_DEPTH_CONTROL
0x00000000, // DB_EQAA
0x00000000, // CB_COLOR_CONTROL
@@ -688,17 +684,14 @@ static const unsigned int gfx9_SECT_CONTEXT_def_4[] =
0x00000000, // VGT_GS_OUT_PRIM_TYPE
0x00000000, // IA_ENHANCE
};
-static const unsigned int gfx9_SECT_CONTEXT_def_5[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_5[] = {
0x00000000, // WD_ENHANCE
0x00000000, // VGT_PRIMITIVEID_EN
};
-static const unsigned int gfx9_SECT_CONTEXT_def_6[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_6[] = {
0x00000000, // VGT_PRIMITIVEID_RESET
};
-static const unsigned int gfx9_SECT_CONTEXT_def_7[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_7[] = {
0x00000000, // VGT_GS_MAX_PRIMS_PER_SUBGROUP
0x00000000, // VGT_DRAW_PAYLOAD_CNTL
0, // HOLE
@@ -766,8 +759,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_7[] =
0x00000000, // VGT_STRMOUT_CONFIG
0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
};
-static const unsigned int gfx9_SECT_CONTEXT_def_8[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_8[] = {
0x00000000, // PA_SC_CENTROID_PRIORITY_0
0x00000000, // PA_SC_CENTROID_PRIORITY_1
0x00001000, // PA_SC_LINE_CNTL
@@ -924,8 +916,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_8[] =
0x00000000, // CB_COLOR7_DCC_BASE
0x00000000, // CB_COLOR7_DCC_BASE_EXT
};
-static const struct cs_extent_def gfx9_SECT_CONTEXT_defs[] =
-{
+static const struct cs_extent_def gfx9_SECT_CONTEXT_defs[] = {
{gfx9_SECT_CONTEXT_def_1, 0x0000a000, 212 },
{gfx9_SECT_CONTEXT_def_2, 0x0000a0d6, 282 },
{gfx9_SECT_CONTEXT_def_3, 0x0000a1f5, 4 },
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_si.h b/drivers/gpu/drm/amd/amdgpu/clearstate_si.h
index 66e39cdb5cb0..5fd96ddd7f0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/clearstate_si.h
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_si.h
@@ -21,8 +21,7 @@
*
*/
-static const u32 si_SECT_CONTEXT_def_1[] =
-{
+static const u32 si_SECT_CONTEXT_def_1[] = {
0x00000000, // DB_RENDER_CONTROL
0x00000000, // DB_COUNT_CONTROL
0x00000000, // DB_DEPTH_VIEW
@@ -236,8 +235,7 @@ static const u32 si_SECT_CONTEXT_def_1[] =
0x00000000, // PA_SC_VPORT_ZMIN_15
0x3f800000, // PA_SC_VPORT_ZMAX_15
};
-static const u32 si_SECT_CONTEXT_def_2[] =
-{
+static const u32 si_SECT_CONTEXT_def_2[] = {
0x00000000, // CP_PERFMON_CNTX_CNTL
0x00000000, // CP_RINGID
0x00000000, // CP_VMID
@@ -511,8 +509,7 @@ static const u32 si_SECT_CONTEXT_def_2[] =
0x00000000, // CB_BLEND6_CONTROL
0x00000000, // CB_BLEND7_CONTROL
};
-static const u32 si_SECT_CONTEXT_def_3[] =
-{
+static const u32 si_SECT_CONTEXT_def_3[] = {
0x00000000, // PA_CL_POINT_X_RAD
0x00000000, // PA_CL_POINT_Y_RAD
0x00000000, // PA_CL_POINT_SIZE
@@ -520,8 +517,7 @@ static const u32 si_SECT_CONTEXT_def_3[] =
0x00000000, // VGT_DMA_BASE_HI
0x00000000, // VGT_DMA_BASE
};
-static const u32 si_SECT_CONTEXT_def_4[] =
-{
+static const u32 si_SECT_CONTEXT_def_4[] = {
0x00000000, // DB_DEPTH_CONTROL
0x00000000, // DB_EQAA
0x00000000, // CB_COLOR_CONTROL
@@ -680,16 +676,13 @@ static const u32 si_SECT_CONTEXT_def_4[] =
0x00000000, // VGT_GS_OUT_PRIM_TYPE
0x00000000, // IA_ENHANCE
};
-static const u32 si_SECT_CONTEXT_def_5[] =
-{
+static const u32 si_SECT_CONTEXT_def_5[] = {
0x00000000, // VGT_PRIMITIVEID_EN
};
-static const u32 si_SECT_CONTEXT_def_6[] =
-{
+static const u32 si_SECT_CONTEXT_def_6[] = {
0x00000000, // VGT_PRIMITIVEID_RESET
};
-static const u32 si_SECT_CONTEXT_def_7[] =
-{
+static const u32 si_SECT_CONTEXT_def_7[] = {
0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
0, // HOLE
0, // HOLE
@@ -924,8 +917,7 @@ static const u32 si_SECT_CONTEXT_def_7[] =
0x00000000, // CB_COLOR7_CLEAR_WORD0
0x00000000, // CB_COLOR7_CLEAR_WORD1
};
-static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
-{
+static const struct cs_extent_def si_SECT_CONTEXT_defs[] = {
{si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
{si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
{si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 587ee632a3b8..221af054d874 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -52,6 +52,7 @@
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
+static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
static const u32 crtc_offsets[] = {
CRTC0_REGISTER_OFFSET,
@@ -364,6 +365,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
+ dce_v10_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index f22ec27365bd..69e8b0db6cf7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -52,6 +52,7 @@
static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
+static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
static const u32 crtc_offsets[] =
{
@@ -388,6 +389,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
+ dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 4dbe9b3259b5..60d40201fdd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -273,6 +273,21 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
+static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
+ int hpd)
+{
+ u32 tmp;
+
+ if (hpd >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("invalid hdp %d\n", hpd);
+ return;
+ }
+
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+}
+
/**
* dce_v6_0_hpd_init - hpd setup callback.
*
@@ -312,6 +327,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
continue;
}
+ dce_v6_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
@@ -3089,7 +3105,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, tmp;
+ uint32_t disp_int, mask;
unsigned hpd;
if (entry->src_data[0] >= adev->mode_info.num_hpd) {
@@ -3102,9 +3118,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
mask = interrupt_status_offsets[hpd].hpd;
if (disp_int & mask) {
- tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
- tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+ dce_v6_0_hpd_int_ack(adev, hpd);
schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 05bcce23385e..5a5fcc45e452 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -265,6 +265,21 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
+static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev,
+ int hpd)
+{
+ u32 tmp;
+
+ if (hpd >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("invalid hdp %d\n", hpd);
+ return;
+ }
+
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+}
+
/**
* dce_v8_0_hpd_init - hpd setup callback.
*
@@ -304,6 +319,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
continue;
}
+ dce_v8_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
@@ -3177,7 +3193,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, tmp;
+ uint32_t disp_int, mask;
unsigned hpd;
if (entry->src_data[0] >= adev->mode_info.num_hpd) {
@@ -3190,9 +3206,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
mask = interrupt_status_offsets[hpd].hpd;
if (disp_int & mask) {
- tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
- tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+ dce_v8_0_hpd_int_ack(adev, hpd);
schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index dcdecb18b230..904b9ff5ead2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4490,7 +4490,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
static int gfx_v10_0_sw_init(void *handle)
{
int i, j, k, r, ring_id = 0;
- struct amdgpu_kiq *kiq;
+ int xcc_id = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
@@ -4619,8 +4619,7 @@ static int gfx_v10_0_sw_init(void *handle)
return r;
}
- kiq = &adev->gfx.kiq[0];
- r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
+ r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
if (r)
return r;
}
@@ -7947,7 +7946,7 @@ static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
}
-static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)
+static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned int vmid)
{
amdgpu_gfx_off_ctrl(adev, false);
@@ -8543,34 +8542,23 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, 0);
}
-static unsigned int gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
+static unsigned int gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
+ uint64_t addr)
{
unsigned int ret;
amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
- amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ /* discard following DWs if *cond_exec_gpu_addr==0 */
+ amdgpu_ring_write(ring, 0);
ret = ring->wptr & ring->buf_mask;
- amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
+ /* patch dummy value later */
+ amdgpu_ring_write(ring, 0);
return ret;
}
-static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned int offset)
-{
- unsigned int cur;
-
- BUG_ON(offset > ring->buf_mask);
- BUG_ON(ring->ring[offset] != 0x55aa55aa);
-
- cur = (ring->wptr - 1) & ring->buf_mask;
- if (likely(cur > offset))
- ring->ring[offset] = cur - offset;
- else
- ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
-}
-
static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
@@ -9225,7 +9213,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_switch_buffer = gfx_v10_0_ring_emit_sb,
.emit_cntxcntl = gfx_v10_0_ring_emit_cntxcntl,
.init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
- .patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
.preempt_ib = gfx_v10_0_ring_preempt_ib,
.emit_frame_cntl = gfx_v10_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v10_0_ring_emit_wreg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 4f3bfdc75b37..1770e496c1b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -89,6 +89,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_0_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_1_pfp.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_1_me.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_1_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_1_rlc.bin");
static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
@@ -727,7 +731,7 @@ static int gfx_v11_0_rlc_init(struct amdgpu_device *adev)
/* init spm vmid with 0xf */
if (adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
return 0;
}
@@ -907,6 +911,7 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1329,7 +1334,7 @@ static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
static int gfx_v11_0_sw_init(void *handle)
{
int i, j, k, r, ring_id = 0;
- struct amdgpu_kiq *kiq;
+ int xcc_id = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
@@ -1346,6 +1351,7 @@ static int gfx_v11_0_sw_init(void *handle)
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
@@ -1454,8 +1460,7 @@ static int gfx_v11_0_sw_init(void *handle)
return r;
}
- kiq = &adev->gfx.kiq[0];
- r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
+ r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
if (r)
return r;
}
@@ -2588,7 +2593,8 @@ static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
IP_VERSION(11, 0, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) ==
IP_VERSION(11, 0, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 1))
bootload_status = RREG32_SOC15(GC, 0,
regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
else
@@ -5027,7 +5033,7 @@ static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
-static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
{
u32 data;
@@ -5041,6 +5047,14 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
amdgpu_gfx_off_ctrl(adev, true);
+
+ if (ring
+ && amdgpu_sriov_is_pp_one_vf(adev)
+ && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
+ || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
+ uint32_t reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
+ amdgpu_ring_emit_wreg(ring, reg, data);
+ }
}
static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
@@ -5074,6 +5088,7 @@ static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
break;
default:
@@ -5109,6 +5124,7 @@ static int gfx_v11_0_set_powergating_state(void *handle,
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
if (!enable)
amdgpu_gfx_off_ctrl(adev, false);
@@ -5140,6 +5156,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
gfx_v11_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -5444,6 +5461,11 @@ static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
amdgpu_ring_write(ring, 0x0);
}
+
+ /* Make sure that we can't skip the SET_Q_MODE packets when the VM
+ * changed in any way.
+ */
+ ring->set_q_mode_ptr = NULL;
}
static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
@@ -5493,16 +5515,81 @@ static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, 0);
}
+static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
+ uint64_t addr)
+{
+ unsigned ret;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ /* discard following DWs if *cond_exec_gpu_addr==0 */
+ amdgpu_ring_write(ring, 0);
+ ret = ring->wptr & ring->buf_mask;
+ /* patch dummy value later */
+ amdgpu_ring_write(ring, 0);
+
+ return ret;
+}
+
static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring,
u64 shadow_va, u64 csa_va,
u64 gds_va, bool init_shadow,
int vmid)
{
struct amdgpu_device *adev = ring->adev;
+ unsigned int offs, end;
- if (!adev->gfx.cp_gfx_shadow)
+ if (!adev->gfx.cp_gfx_shadow || !ring->ring_obj)
return;
+ /*
+ * The logic here isn't easy to understand because we need to keep state
+ * accross multiple executions of the function as well as between the
+ * CPU and GPU. The general idea is that the newly written GPU command
+ * has a condition on the previous one and only executed if really
+ * necessary.
+ */
+
+ /*
+ * The dw in the NOP controls if the next SET_Q_MODE packet should be
+ * executed or not. Reserve 64bits just to be on the save side.
+ */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, 1));
+ offs = ring->wptr & ring->buf_mask;
+
+ /*
+ * We start with skipping the prefix SET_Q_MODE and always executing
+ * the postfix SET_Q_MODE packet. This is changed below with a
+ * WRITE_DATA command when the postfix executed.
+ */
+ amdgpu_ring_write(ring, shadow_va ? 1 : 0);
+ amdgpu_ring_write(ring, 0);
+
+ if (ring->set_q_mode_offs) {
+ uint64_t addr;
+
+ addr = amdgpu_bo_gpu_offset(ring->ring_obj);
+ addr += ring->set_q_mode_offs << 2;
+ end = gfx_v11_0_ring_emit_init_cond_exec(ring, addr);
+ }
+
+ /*
+ * When the postfix SET_Q_MODE packet executes we need to make sure that the
+ * next prefix SET_Q_MODE packet executes as well.
+ */
+ if (!shadow_va) {
+ uint64_t addr;
+
+ addr = amdgpu_bo_gpu_offset(ring->ring_obj);
+ addr += offs << 2;
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, 0x1);
+ }
+
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7));
amdgpu_ring_write(ring, lower_32_bits(shadow_va));
amdgpu_ring_write(ring, upper_32_bits(shadow_va));
@@ -5514,33 +5601,26 @@ static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring,
PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0);
amdgpu_ring_write(ring, init_shadow ?
PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0);
-}
-static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
-{
- unsigned ret;
+ if (ring->set_q_mode_offs)
+ amdgpu_ring_patch_cond_exec(ring, end);
- amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
- amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
- ret = ring->wptr & ring->buf_mask;
- amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
+ if (shadow_va) {
+ uint64_t token = shadow_va ^ csa_va ^ gds_va ^ vmid;
- return ret;
-}
+ /*
+ * If the tokens match try to skip the last postfix SET_Q_MODE
+ * packet to avoid saving/restoring the state all the time.
+ */
+ if (ring->set_q_mode_ptr && ring->set_q_mode_token == token)
+ *ring->set_q_mode_ptr = 0;
-static void gfx_v11_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
-{
- unsigned cur;
- BUG_ON(offset > ring->buf_mask);
- BUG_ON(ring->ring[offset] != 0x55aa55aa);
+ ring->set_q_mode_token = token;
+ } else {
+ ring->set_q_mode_ptr = &ring->ring[ring->set_q_mode_offs];
+ }
- cur = (ring->wptr - 1) & ring->buf_mask;
- if (likely(cur > offset))
- ring->ring[offset] = cur - offset;
- else
- ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
+ ring->set_q_mode_offs = offs;
}
static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
@@ -6104,9 +6184,10 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.get_rptr = gfx_v11_0_ring_get_rptr_gfx,
.get_wptr = gfx_v11_0_ring_get_wptr_gfx,
.set_wptr = gfx_v11_0_ring_set_wptr_gfx,
- .emit_frame_size = /* totally 242 maximum if 16 IBs */
+ .emit_frame_size = /* totally 247 maximum if 16 IBs */
+ 5 + /* update_spm_vmid */
5 + /* COND_EXEC */
- 9 + /* SET_Q_PREEMPTION_MODE */
+ 22 + /* SET_Q_PREEMPTION_MODE */
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
@@ -6119,6 +6200,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
31 + /* DE_META */
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
+ 22 + /* SET_Q_PREEMPTION_MODE */
8 + 8 + /* FENCE x2 */
8, /* gfx_v11_0_emit_mem_sync */
.emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */
@@ -6135,7 +6217,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
.emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
.init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec,
- .patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec,
.preempt_ib = gfx_v11_0_ring_preempt_ib,
.emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v11_0_ring_emit_wreg,
@@ -6154,6 +6235,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.get_wptr = gfx_v11_0_ring_get_wptr_compute,
.set_wptr = gfx_v11_0_ring_set_wptr_compute,
.emit_frame_size =
+ 5 + /* update_spm_vmid */
20 + /* gfx_v11_0_ring_emit_gds_switch */
7 + /* gfx_v11_0_ring_emit_hdp_flush */
5 + /* hdp invalidate */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
index 26d6286d86c9..9e7ce1e6bc06 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
@@ -69,7 +69,7 @@ static int gfx_v11_0_3_rlc_gc_fed_irq(struct amdgpu_device *adev,
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
} else {
if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
- adev->virt.ops->ras_poison_handler(adev);
+ adev->virt.ops->ras_poison_handler(adev, ras_if->block);
else
dev_warn(adev->dev,
"No ras_poison_handler interface in SRIOV for %s!\n", ras_if->name);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index c2faf6b4c2fc..86a4865b1ae5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3274,7 +3274,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
/* init spm vmid with 0xf */
if (adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
return 0;
}
@@ -3500,7 +3500,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
{
u32 data;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 1943beb135c4..202ddda57f98 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1288,7 +1288,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
/* init spm vmid with 0xf */
if (adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
return 0;
}
@@ -1900,8 +1900,8 @@ static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
static int gfx_v8_0_sw_init(void *handle)
{
int i, j, k, r, ring_id;
+ int xcc_id = 0;
struct amdgpu_ring *ring;
- struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) {
@@ -2022,8 +2022,7 @@ static int gfx_v8_0_sw_init(void *handle)
return r;
}
- kiq = &adev->gfx.kiq[0];
- r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
+ r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
if (r)
return r;
@@ -5579,7 +5578,7 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
}
}
-static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
{
u32 data;
@@ -6327,33 +6326,22 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
amdgpu_ring_write(ring, 0);
}
-static unsigned gfx_v8_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
+static unsigned gfx_v8_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
+ uint64_t addr)
{
unsigned ret;
amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
- amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ /* discard following DWs if *cond_exec_gpu_addr==0 */
+ amdgpu_ring_write(ring, 0);
ret = ring->wptr & ring->buf_mask;
- amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
+ /* patch dummy value later */
+ amdgpu_ring_write(ring, 0);
return ret;
}
-static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
-{
- unsigned cur;
-
- BUG_ON(offset > ring->buf_mask);
- BUG_ON(ring->ring[offset] != 0x55aa55aa);
-
- cur = (ring->wptr & ring->buf_mask) - 1;
- if (likely(cur > offset))
- ring->ring[offset] = cur - offset;
- else
- ring->ring[offset] = (ring->ring_size >> 2) - offset + cur;
-}
-
static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t reg_val_offs)
{
@@ -6933,7 +6921,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_switch_buffer = gfx_v8_ring_emit_sb,
.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
.init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
- .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.soft_recovery = gfx_v8_0_ring_soft_recovery,
.emit_mem_sync = gfx_v8_0_emit_mem_sync,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 3bc6943365a4..6f97a6d0e6d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1997,8 +1997,8 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
static int gfx_v9_0_sw_init(void *handle)
{
int i, j, k, r, ring_id;
+ int xcc_id = 0;
struct amdgpu_ring *ring;
- struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
unsigned int hw_prio;
@@ -2080,7 +2080,7 @@ static int gfx_v9_0_sw_init(void *handle)
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
/* disable scheduler on the real ring */
- ring->no_scheduler = true;
+ ring->no_scheduler = adev->gfx.mcbp;
ring->vm_hub = AMDGPU_GFXHUB(0);
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
@@ -2090,7 +2090,7 @@ static int gfx_v9_0_sw_init(void *handle)
}
/* set up the software rings */
- if (adev->gfx.num_gfx_rings) {
+ if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
ring = &adev->gfx.sw_gfx_ring[i];
ring->ring_obj = NULL;
@@ -2151,8 +2151,7 @@ static int gfx_v9_0_sw_init(void *handle)
return r;
}
- kiq = &adev->gfx.kiq[0];
- r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
+ r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
if (r)
return r;
@@ -2181,7 +2180,7 @@ static int gfx_v9_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->gfx.num_gfx_rings) {
+ if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
amdgpu_ring_fini(&adev->gfx.sw_gfx_ring[i]);
amdgpu_ring_mux_fini(&adev->gfx.muxer);
@@ -4902,7 +4901,7 @@ static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
}
-static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)
+static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned int vmid)
{
amdgpu_gfx_off_ctrl(adev, false);
@@ -5611,31 +5610,21 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
amdgpu_ring_write(ring, 0);
}
-static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
+static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
+ uint64_t addr)
{
unsigned ret;
amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
- amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ /* discard following DWs if *cond_exec_gpu_addr==0 */
+ amdgpu_ring_write(ring, 0);
ret = ring->wptr & ring->buf_mask;
- amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
+ /* patch dummy value later */
+ amdgpu_ring_write(ring, 0);
return ret;
}
-static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
-{
- unsigned cur;
- BUG_ON(offset > ring->buf_mask);
- BUG_ON(ring->ring[offset] != 0x55aa55aa);
-
- cur = (ring->wptr - 1) & ring->buf_mask;
- if (likely(cur > offset))
- ring->ring[offset] = cur - offset;
- else
- ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
-}
-
static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t reg_val_offs)
{
@@ -5910,11 +5899,14 @@ static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
switch (me_id) {
case 0:
- if (adev->gfx.num_gfx_rings &&
- !amdgpu_mcbp_handle_trailing_fence_irq(&adev->gfx.muxer)) {
- /* Fence signals are handled on the software rings*/
- for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
- amdgpu_fence_process(&adev->gfx.sw_gfx_ring[i]);
+ if (adev->gfx.num_gfx_rings) {
+ if (!adev->gfx.mcbp) {
+ amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
+ } else if (!amdgpu_mcbp_handle_trailing_fence_irq(&adev->gfx.muxer)) {
+ /* Fence signals are handled on the software rings*/
+ for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
+ amdgpu_fence_process(&adev->gfx.sw_gfx_ring[i]);
+ }
}
break;
case 1:
@@ -6909,7 +6901,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_switch_buffer = gfx_v9_ring_emit_sb,
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
- .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
.preempt_ib = gfx_v9_0_ring_preempt_ib,
.emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
@@ -6964,7 +6955,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
.emit_switch_buffer = gfx_v9_ring_emit_sb,
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
- .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
.emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
@@ -7051,7 +7041,7 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
- if (adev->gfx.num_gfx_rings) {
+ if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
adev->gfx.sw_gfx_ring[i].funcs = &gfx_v9_0_sw_ring_funcs_gfx;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index bc8416afb62c..f53b379d8971 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -970,8 +970,9 @@ static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
}
-static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs =
- { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
+static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs = {
+ SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32
+};
static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 131cddbdda0d..b53c8fd4e8cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -38,6 +38,7 @@
#include "gfx_v9_4_3.h"
#include "amdgpu_xcp.h"
+#include "amdgpu_aca.h"
MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
@@ -48,6 +49,10 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
#define GOLDEN_GB_ADDR_CONFIG 0x2a114042
#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
+#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
+#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
+#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+
struct amdgpu_gfx_ras gfx_v9_4_3_ras;
static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
@@ -675,6 +680,66 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
};
+static int gfx_v9_4_3_aca_bank_generate_report(struct aca_handle *handle,
+ struct aca_bank *bank, enum aca_error_type type,
+ struct aca_bank_report *report, void *data)
+{
+ u64 status, misc0;
+ u32 instlo;
+ int ret;
+
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ if ((type == ACA_ERROR_TYPE_UE &&
+ ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
+ (type == ACA_ERROR_TYPE_CE &&
+ ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
+
+ ret = aca_bank_info_decode(bank, &report->info);
+ if (ret)
+ return ret;
+
+ /* NOTE: overwrite info.die_id with xcd id for gfx */
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+ report->info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+ }
+
+ return 0;
+}
+
+static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_error_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+ switch (instlo) {
+ case mmSMNAID_XCD0_MCA_SMU:
+ case mmSMNAID_XCD1_MCA_SMU:
+ case mmSMNXCD_XCD0_MCA_SMU:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
+ .aca_bank_generate_report = gfx_v9_4_3_aca_bank_generate_report,
+ .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
+};
+
+static const struct aca_info gfx_v9_4_3_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
+ .bank_ops = &gfx_v9_4_3_aca_bank_ops,
+};
+
static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
@@ -778,7 +843,6 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
static int gfx_v9_4_3_sw_init(void *handle)
{
int i, j, k, r, ring_id, xcc_id, num_xcc;
- struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.mec.num_mec = 2;
@@ -847,8 +911,7 @@ static int gfx_v9_4_3_sw_init(void *handle)
return r;
}
- kiq = &adev->gfx.kiq[xcc_id];
- r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, xcc_id);
+ r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
if (r)
return r;
@@ -1109,7 +1172,7 @@ static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
{
/* init spm vmid with 0xf */
if (adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
return 0;
}
@@ -1320,7 +1383,7 @@ static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev,
+static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned vmid)
{
u32 reg, data;
@@ -3888,6 +3951,9 @@ static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
uint32_t i;
uint32_t data;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
@@ -4242,9 +4308,32 @@ struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = {
.reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
};
+static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX,
+ &gfx_v9_4_3_aca_info,
+ NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
.ras_block = {
.hw_ops = &gfx_v9_4_3_ras_ops,
+ .ras_late_init = &gfx_v9_4_3_ras_late_init,
},
.enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 6c5185608854..d933e19e0cf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -105,7 +105,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
bool retry_fault = !!(entry->src_data[1] & 0x80);
bool write_fault = !!(entry->src_data[1] & 0x20);
- struct amdgpu_task_info task_info;
+ struct amdgpu_task_info *task_info;
uint32_t status = 0;
u64 addr;
@@ -157,18 +157,22 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
if (!printk_ratelimit())
return 0;
- memset(&task_info, 0, sizeof(struct amdgpu_task_info));
- amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
-
dev_err(adev->dev,
- "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
+ "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
entry->vmid_src ? "mmhub" : "gfxhub",
- entry->src_id, entry->ring_id, entry->vmid,
- entry->pasid, task_info.process_name, task_info.tgid,
- task_info.task_name, task_info.pid);
+ entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
+ task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
+ if (task_info) {
+ dev_err(adev->dev,
+ " in process %s pid %d thread %s pid %d\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task_name, task_info->pid);
+ amdgpu_vm_put_task_info(task_info);
+ }
+
dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
- addr, entry->client_id,
- soc15_ih_clientid_name[entry->client_id]);
+ addr, entry->client_id,
+ soc15_ih_clientid_name[entry->client_id]);
if (!amdgpu_sriov_vf(adev))
hub->vmhub_funcs->print_l2_protection_fault_status(adev,
@@ -262,16 +266,17 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* flush hdp cache */
adev->hdp.funcs->flush_hdp(adev, NULL);
- /* For SRIOV run time, driver shouldn't access the register through MMIO
- * Directly use kiq to do the vm invalidation instead
+ /* This is necessary for SRIOV as well as for GFXOFF to function
+ * properly under bare metal
*/
if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
- amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
- 1 << vmid, GET_INST(GC, 0));
+ amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
+ 1 << vmid, GET_INST(GC, 0));
return;
}
+ /* This path is needed before KIQ/MES/GFXOFF are set up */
hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
spin_lock(&adev->gmc.invalidate_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index c9c653cfc765..527dc917e049 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -126,19 +126,24 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
}
if (printk_ratelimit()) {
- struct amdgpu_task_info task_info;
-
- memset(&task_info, 0, sizeof(struct amdgpu_task_info));
- amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+ struct amdgpu_task_info *task_info;
dev_err(adev->dev,
- "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
+ "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
entry->vmid_src ? "mmhub" : "gfxhub",
- entry->src_id, entry->ring_id, entry->vmid,
- entry->pasid, task_info.process_name, task_info.tgid,
- task_info.task_name, task_info.pid);
+ entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
+ task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
+ if (task_info) {
+ dev_err(adev->dev,
+ " in process %s pid %d thread %s pid %d)\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task_name, task_info->pid);
+ amdgpu_vm_put_task_info(task_info);
+ }
+
dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
- addr, entry->client_id);
+ addr, entry->client_id);
+
if (!amdgpu_sriov_vf(adev))
hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
}
@@ -223,16 +228,17 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* flush hdp cache */
adev->hdp.funcs->flush_hdp(adev, NULL);
- /* For SRIOV run time, driver shouldn't access the register through MMIO
- * Directly use kiq to do the vm invalidation instead
+ /* This is necessary for SRIOV as well as for GFXOFF to function
+ * properly under bare metal
*/
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
- amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
- 1 << vmid, GET_INST(GC, 0));
+ amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
+ 1 << vmid, GET_INST(GC, 0));
return;
}
+ /* This path is needed before KIQ/MES/GFXOFF are set up */
hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
spin_lock(&adev->gmc.invalidate_lock);
@@ -570,6 +576,7 @@ static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
break;
case IP_VERSION(3, 3, 0):
+ case IP_VERSION(3, 3, 1):
adev->mmhub.funcs = &mmhub_v3_3_funcs;
break;
default:
@@ -585,6 +592,7 @@ static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs;
break;
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
adev->gfxhub.funcs = &gfxhub_v11_5_0_funcs;
break;
default:
@@ -746,6 +754,7 @@ static int gmc_v11_0_sw_init(void *handle)
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 59d9215e5556..23b478639921 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -435,9 +435,10 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
WREG32(mmVM_PRT_CNTL, tmp);
if (enable) {
- uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
+ uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
+ AMDGPU_GPU_PAGE_SHIFT;
uint32_t high = adev->vm_manager.max_pfn -
- (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
+ (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 45a2f8e031a2..3da7b6a2b00d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -563,9 +563,10 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
WREG32(mmVM_PRT_CNTL, tmp);
if (enable) {
- uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
+ uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
+ AMDGPU_GPU_PAGE_SHIFT;
uint32_t high = adev->vm_manager.max_pfn -
- (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
+ (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 4422b27a3cc2..d20e5f20ee31 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -777,9 +777,10 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
WREG32(mmVM_PRT_CNTL, tmp);
if (enable) {
- uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
+ uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
+ AMDGPU_GPU_PAGE_SHIFT;
uint32_t high = adev->vm_manager.max_pfn -
- (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
+ (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
@@ -1444,18 +1445,24 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
gmc_v8_0_set_fault_enable_default(adev, false);
if (printk_ratelimit()) {
- struct amdgpu_task_info task_info;
+ struct amdgpu_task_info *task_info;
- memset(&task_info, 0, sizeof(struct amdgpu_task_info));
- amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+ dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+ entry->src_id, entry->src_data[0]);
+
+ task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
+ if (task_info) {
+ dev_err(adev->dev, " for process %s pid %d thread %s pid %d\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task_name, task_info->pid);
+ amdgpu_vm_put_task_info(task_info);
+ }
- dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
- entry->src_id, entry->src_data[0], task_info.process_name,
- task_info.tgid, task_info.task_name, task_info.pid);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- addr);
+ addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
+
gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
entry->pasid);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e67a62db9e12..47b63a4ce68b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -496,14 +496,14 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
if (j >= AMDGPU_MMHUB0(0))
tmp = RREG32_SOC15_IP(MMHUB, reg);
else
- tmp = RREG32_SOC15_IP(GC, reg);
+ tmp = RREG32_XCC(reg, j);
tmp &= ~bits;
if (j >= AMDGPU_MMHUB0(0))
WREG32_SOC15_IP(MMHUB, reg, tmp);
else
- WREG32_SOC15_IP(GC, reg, tmp);
+ WREG32_XCC(reg, tmp, j);
}
}
break;
@@ -524,14 +524,14 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
if (j >= AMDGPU_MMHUB0(0))
tmp = RREG32_SOC15_IP(MMHUB, reg);
else
- tmp = RREG32_SOC15_IP(GC, reg);
+ tmp = RREG32_XCC(reg, j);
tmp |= bits;
if (j >= AMDGPU_MMHUB0(0))
WREG32_SOC15_IP(MMHUB, reg, tmp);
else
- WREG32_SOC15_IP(GC, reg, tmp);
+ WREG32_XCC(reg, tmp, j);
}
}
break;
@@ -549,7 +549,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
bool retry_fault = !!(entry->src_data[1] & 0x80);
bool write_fault = !!(entry->src_data[1] & 0x20);
uint32_t status = 0, cid = 0, rw = 0;
- struct amdgpu_task_info task_info;
+ struct amdgpu_task_info *task_info;
struct amdgpu_vmhub *hub;
const char *mmhub_cid;
const char *hub_name;
@@ -626,15 +626,20 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
if (!printk_ratelimit())
return 0;
- memset(&task_info, 0, sizeof(struct amdgpu_task_info));
- amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
-
dev_err(adev->dev,
- "[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
- hub_name, retry_fault ? "retry" : "no-retry",
- entry->src_id, entry->ring_id, entry->vmid,
- entry->pasid, task_info.process_name, task_info.tgid,
- task_info.task_name, task_info.pid);
+ "[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", hub_name,
+ retry_fault ? "retry" : "no-retry",
+ entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
+
+ task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
+ if (task_info) {
+ dev_err(adev->dev,
+ " for process %s pid %d thread %s pid %d)\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task_name, task_info->pid);
+ amdgpu_vm_put_task_info(task_info);
+ }
+
dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
addr, entry->client_id,
soc15_ih_clientid_name[entry->client_id]);
@@ -829,23 +834,25 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
- /* This is necessary for a HW workaround under SRIOV as well
- * as GFXOFF under bare metal
- */
if (vmhub >= AMDGPU_MMHUB0(0))
inst = GET_INST(GC, 0);
else
inst = vmhub;
+
+ /* This is necessary for SRIOV as well as for GFXOFF to function
+ * properly under bare metal
+ */
if (adev->gfx.kiq[inst].ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
- amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
- 1 << vmid, inst);
+ amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
+ 1 << vmid, inst);
return;
}
+ /* This path is needed before KIQ/MES/GFXOFF are set up */
spin_lock(&adev->gmc.invalidate_lock);
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
new file mode 100644
index 000000000000..8d7d0813e331
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "hdp_v7_0.h"
+
+#include "hdp/hdp_7_0_0_offset.h"
+#include "hdp/hdp_7_0_0_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ else
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+}
+
+static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl, hdp_clk_cntl1;
+ uint32_t hdp_mem_pwr_cntl;
+
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)))
+ return;
+
+ hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0,regHDP_CLK_CNTL);
+ hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+
+ /* Before doing clock/power mode switch,
+ * forced on IPH & RC clock */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 1);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+
+ /* disable clock and power gating before any changing */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 0);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* Already disabled above. The actions below are for "enabled" only */
+ if (enable) {
+ /* only one clock gating mode (LS/DS/SD) can be enabled */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 1);
+ }
+
+ /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
+ * be set for SRAM LS/DS/SD */
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 1);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+ }
+ }
+
+ /* disable IPH & RC clock override after clock/power mode changing */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 0);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v7_0_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t tmp;
+
+ /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+ if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_DS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_SD;
+}
+
+const struct amdgpu_hdp_funcs hdp_v7_0_funcs = {
+ .flush_hdp = hdp_v7_0_flush_hdp,
+ .update_clock_gating = hdp_v7_0_update_clock_gating,
+ .get_clock_gating_state = hdp_v7_0_get_clockgating_state,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.h
new file mode 100644
index 000000000000..25b69201402d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HDP_V7_0_H__
+#define __HDP_V7_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_hdp_funcs hdp_v7_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
new file mode 100644
index 000000000000..16fe428c0722
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -0,0 +1,767 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+
+#include "oss/osssys_7_0_0_offset.h"
+#include "oss/osssys_7_0_0_sh_mask.h"
+
+#include "soc15_common.h"
+#include "ih_v7_0.h"
+
+#define MAX_REARM_RETRY 10
+
+static void ih_v7_0_set_interrupt_funcs(struct amdgpu_device *adev);
+
+/**
+ * ih_v7_0_init_register_offset - Initialize register offset for ih rings
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize register offset ih rings (IH_V7_0).
+ */
+static void ih_v7_0_init_register_offset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ /* ih ring 2 is removed
+ * ih ring and ih ring 1 are available */
+ if (adev->irq.ih.ring_size) {
+ ih_regs = &adev->irq.ih.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR);
+ ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_LO);
+ ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_HI);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
+ }
+
+ if (adev->irq.ih1.ring_size) {
+ ih_regs = &adev->irq.ih1.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_RING1);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI_RING1);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL_RING1);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_RING1);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR_RING1);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR_RING1);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
+ }
+}
+
+/**
+ * force_update_wptr_for_self_int - Force update the wptr for self interrupt
+ *
+ * @adev: amdgpu_device pointer
+ * @threshold: threshold to trigger the wptr reporting
+ * @timeout: timeout to trigger the wptr reporting
+ * @enabled: Enable/disable timeout flush mechanism
+ *
+ * threshold input range: 0 ~ 15, default 0,
+ * real_threshold = 2^threshold
+ * timeout input range: 0 ~ 20, default 8,
+ * real_timeout = (2^timeout) * 1024 / (socclk_freq)
+ *
+ * Force update wptr for self interrupt ( >= SIENNA_CICHLID).
+ */
+static void
+force_update_wptr_for_self_int(struct amdgpu_device *adev,
+ u32 threshold, u32 timeout, bool enabled)
+{
+ u32 ih_cntl, ih_rb_cntl;
+
+ ih_cntl = RREG32_SOC15(OSSSYS, 0, regIH_CNTL2);
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1);
+
+ ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
+ SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
+ ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
+ SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_USED_INT_THRESHOLD, threshold);
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
+ return;
+ } else {
+ WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+
+ WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
+}
+
+/**
+ * ih_v7_0_toggle_ring_interrupts - toggle the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointet
+ * @enable: true - enable the interrupts, false - disable the interrupts
+ *
+ * Toggle the interrupt ring buffer (IH_V7_0)
+ */
+static int ih_v7_0_toggle_ring_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ bool enable)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (enable) {
+ ih->enabled = true;
+ } else {
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_rptr, 0);
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ ih->enabled = false;
+ ih->rptr = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ih_v7_0_toggle_interrupts - Toggle all the available interrupt ring buffers
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
+ *
+ * Toggle all the available interrupt ring buffers (IH_V7_0).
+ */
+static int ih_v7_0_toggle_interrupts(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
+ int i;
+ int r;
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ r = ih_v7_0_toggle_ring_interrupts(adev, ih[i], enable);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t ih_v7_0_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
+{
+ int rb_bufsz = order_base_2(ih->ring_size / 4);
+
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ MC_SPACE, ih->use_bus_addr ? 2 : 4);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_CLEAR, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
+ * value is written to memory
+ */
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_WRITEBACK_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
+
+ return ih_rb_cntl;
+}
+
+static uint32_t ih_v7_0_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+ u32 ih_doorbell_rtpr = 0;
+
+ if (ih->use_doorbell) {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR, OFFSET,
+ ih->doorbell_index);
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 1);
+ } else {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 0);
+ }
+ return ih_doorbell_rtpr;
+}
+
+/**
+ * ih_v7_0_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (IH_V7_0)
+ */
+static int ih_v7_0_enable_ring(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+ WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+ WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = ih_v7_0_rb_cntl(ih, tmp);
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+ if (ih == &adev->irq.ih1) {
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+ }
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (ih == &adev->irq.ih) {
+ /* set the ih ring 0 writeback address whether it's enabled or not */
+ WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+ WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+ }
+
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ WREG32(ih_regs->ih_rb_rptr, 0);
+
+ WREG32(ih_regs->ih_doorbell_rptr, ih_v7_0_doorbell_rptr(ih));
+
+ return 0;
+}
+
+/**
+ * ih_v7_0_irq_init - init and enable the interrupt ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it.
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int ih_v7_0_irq_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
+ u32 ih_chicken;
+ u32 tmp;
+ int ret;
+ int i;
+
+ /* disable irqs */
+ ret = ih_v7_0_toggle_interrupts(adev, false);
+ if (ret)
+ return ret;
+
+ adev->nbio.funcs->ih_control(adev);
+
+ if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
+ (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) {
+ if (ih[0]->use_bus_addr) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN);
+ ih_chicken = REG_SET_FIELD(ih_chicken,
+ IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
+ WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ ret = ih_v7_0_enable_ring(adev, ih[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* update doorbell range for ih ring 0 */
+ adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
+ ih[0]->doorbell_index);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
+ CLIENT18_IS_STORM_CLIENT, 1);
+ WREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL, tmp);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
+ WREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL, tmp);
+
+ /* GC/MMHUB UTCL2 page fault interrupts are configured as
+ * MSI storm capable interrupts by deafult. The delay is
+ * used to avoid ISR being called too frequently
+ * when page fault happens on several continuous page
+ * and thus avoid MSI storm */
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL);
+ tmp = REG_SET_FIELD(tmp, IH_MSI_STORM_CTRL,
+ DELAY, 3);
+ WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
+
+ pci_set_master(adev->pdev);
+
+ /* enable interrupts */
+ ret = ih_v7_0_toggle_interrupts(adev, true);
+ if (ret)
+ return ret;
+ /* enable wptr force update for self int */
+ force_update_wptr_for_self_int(adev, 0, 8, true);
+
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
+ return 0;
+}
+
+/**
+ * ih_v7_0_irq_disable - disable interrupts
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable interrupts on the hw.
+ */
+static void ih_v7_0_irq_disable(struct amdgpu_device *adev)
+{
+ force_update_wptr_for_self_int(adev, 0, 8, false);
+ ih_v7_0_toggle_interrupts(adev, false);
+
+ /* Wait and acknowledge irq */
+ mdelay(1);
+}
+
+/**
+ * ih_v7_0_get_wptr() - get the IH ring buffer wptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: IH ring buffer to fetch wptr
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer. Also check for
+ * ring buffer overflow and deal with it.
+ * Returns the value of the wptr.
+ */
+static u32 ih_v7_0_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ u32 wptr, tmp;
+ struct amdgpu_ih_regs *ih_regs;
+
+ wptr = le32_to_cpu(*ih->wptr_cpu);
+ ih_regs = &ih->ih_regs;
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 32). Hopefully
+ * this should allow us to catch up.
+ */
+ tmp = (wptr + 32) & ih->ptr_mask;
+ dev_warn(adev->dev, "IH ring buffer overflow "
+ "(0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, tmp);
+ ih->rptr = tmp;
+
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+out:
+ return (wptr & ih->ptr_mask);
+}
+
+/**
+ * ih_v7_0_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: IH ring to match
+ *
+ */
+static void ih_v7_0_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t v = 0;
+ uint32_t i = 0;
+ struct amdgpu_ih_regs *ih_regs;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Rearm IRQ / re-write doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
+ * ih_v7_0_set_rptr - set the IH ring buffer rptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: IH ring buffer to set rptr
+ */
+static void ih_v7_0_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (ih->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ *ih->rptr_cpu = ih->rptr;
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ ih_v7_0_irq_rearm(adev, ih);
+ } else {
+ ih_regs = &ih->ih_regs;
+ WREG32(ih_regs->ih_rb_rptr, ih->rptr);
+ }
+}
+
+/**
+ * ih_v7_0_self_irq - dispatch work for ring 1
+ *
+ * @adev: amdgpu_device pointer
+ * @source: irq source
+ * @entry: IV with WPTR update
+ *
+ * Update the WPTR from the IV and schedule work to handle the entries.
+ */
+static int ih_v7_0_self_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+
+ switch (entry->ring_id) {
+ case 1:
+ *adev->irq.ih1.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih1_work);
+ break;
+ default: break;
+ }
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs ih_v7_0_self_irq_funcs = {
+ .process = ih_v7_0_self_irq,
+};
+
+static void ih_v7_0_set_self_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.self_irq.num_types = 0;
+ adev->irq.self_irq.funcs = &ih_v7_0_self_irq_funcs;
+}
+
+static int ih_v7_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ ih_v7_0_set_interrupt_funcs(adev);
+ ih_v7_0_set_self_irq_funcs(adev);
+ return 0;
+}
+
+static int ih_v7_0_sw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool use_bus_addr;
+
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
+ &adev->irq.self_irq);
+
+ if (r)
+ return r;
+
+ /* use gpu virtual address for ih ring
+ * until ih_checken is programmed to allow
+ * use bus address for ih ring by psp bl */
+ use_bus_addr =
+ (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
+ if (r)
+ return r;
+
+ adev->irq.ih.use_doorbell = true;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
+
+ adev->irq.ih1.ring_size = 0;
+ adev->irq.ih2.ring_size = 0;
+
+ /* initialize ih control register offset */
+ ih_v7_0_init_register_offset(adev);
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_init(adev);
+
+ return r;
+}
+
+static int ih_v7_0_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini_sw(adev);
+
+ return 0;
+}
+
+static int ih_v7_0_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = ih_v7_0_irq_init(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int ih_v7_0_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ ih_v7_0_irq_disable(adev);
+
+ return 0;
+}
+
+static int ih_v7_0_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return ih_v7_0_hw_fini(adev);
+}
+
+static int ih_v7_0_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return ih_v7_0_hw_init(adev);
+}
+
+static bool ih_v7_0_is_idle(void *handle)
+{
+ /* todo */
+ return true;
+}
+
+static int ih_v7_0_wait_for_idle(void *handle)
+{
+ /* todo */
+ return -ETIMEDOUT;
+}
+
+static int ih_v7_0_soft_reset(void *handle)
+{
+ /* todo */
+ return 0;
+}
+
+static void ih_v7_0_update_clockgating_state(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def, field_val;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
+ def = data = RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL);
+ field_val = enable ? 0 : 1;
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DYN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ REG_CLK_SOFT_OVERRIDE, field_val);
+ if (def != data)
+ WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data);
+ }
+
+ return;
+}
+
+static int ih_v7_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ ih_v7_0_update_clockgating_state(adev,
+ state == AMD_CG_STATE_GATE);
+ return 0;
+}
+
+static void ih_v7_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t ih_mem_pwr_cntl;
+
+ /* Disable ih sram power cntl before switch powergating mode */
+ ih_mem_pwr_cntl = RREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_CTRL_EN, 0);
+ WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
+
+ /* It is recommended to set mem powergating mode to DS mode */
+ if (enable) {
+ /* mem power mode */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_LS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_DS_EN, 1);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_SD_EN, 0);
+ /* cam mem power mode */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 1);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
+ /* re-enable power cntl */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_CTRL_EN, 1);
+ } else {
+ /* mem power mode */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_LS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_DS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_SD_EN, 0);
+ /* cam mem power mode */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
+ /* re-enable power cntl*/
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_CTRL_EN, 1);
+ }
+
+ WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
+}
+
+static int ih_v7_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_PG_STATE_GATE);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
+ ih_v7_0_update_ih_mem_power_gating(adev, enable);
+
+ return 0;
+}
+
+static void ih_v7_0_get_clockgating_state(void *handle, u64 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
+ *flags |= AMD_CG_SUPPORT_IH_CG;
+
+ return;
+}
+
+static const struct amd_ip_funcs ih_v7_0_ip_funcs = {
+ .name = "ih_v7_0",
+ .early_init = ih_v7_0_early_init,
+ .late_init = NULL,
+ .sw_init = ih_v7_0_sw_init,
+ .sw_fini = ih_v7_0_sw_fini,
+ .hw_init = ih_v7_0_hw_init,
+ .hw_fini = ih_v7_0_hw_fini,
+ .suspend = ih_v7_0_suspend,
+ .resume = ih_v7_0_resume,
+ .is_idle = ih_v7_0_is_idle,
+ .wait_for_idle = ih_v7_0_wait_for_idle,
+ .soft_reset = ih_v7_0_soft_reset,
+ .set_clockgating_state = ih_v7_0_set_clockgating_state,
+ .set_powergating_state = ih_v7_0_set_powergating_state,
+ .get_clockgating_state = ih_v7_0_get_clockgating_state,
+};
+
+static const struct amdgpu_ih_funcs ih_v7_0_funcs = {
+ .get_wptr = ih_v7_0_get_wptr,
+ .decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
+ .set_rptr = ih_v7_0_set_rptr
+};
+
+static void ih_v7_0_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.ih_funcs = &ih_v7_0_funcs;
+}
+
+const struct amdgpu_ip_block_version ih_v7_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ih_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.h b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.h
new file mode 100644
index 000000000000..af9dcbc451fd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __IH_V7_0_IH_H__
+#define __IH_V7_0_IH_H__
+
+extern const struct amdgpu_ip_block_version ih_v7_0_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index c0bdab3bf0e4..3e91a8e42c21 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -37,6 +37,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_1_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index e67a337457ed..99cd49ee8ef6 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -551,7 +551,7 @@ static int jpeg_v2_5_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
- if(state == adev->jpeg.cur_state)
+ if (state == adev->jpeg.cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
@@ -559,7 +559,7 @@ static int jpeg_v2_5_set_powergating_state(void *handle,
else
ret = jpeg_v2_5_start(adev);
- if(!ret)
+ if (!ret)
adev->jpeg.cur_state = state;
return ret;
@@ -754,8 +754,7 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
-{
+const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = {
.type = AMD_IP_BLOCK_TYPE_JPEG,
.major = 2,
.minor = 5,
@@ -763,8 +762,7 @@ const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
.funcs = &jpeg_v2_5_ip_funcs,
};
-const struct amdgpu_ip_block_version jpeg_v2_6_ip_block =
-{
+const struct amdgpu_ip_block_version jpeg_v2_6_ip_block = {
.type = AMD_IP_BLOCK_TYPE_JPEG,
.major = 2,
.minor = 6,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 82b6b62c170b..32caeb37cef9 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -652,7 +652,7 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring)
*
* Write a start command to the ring.
*/
-static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
+void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
{
if (!amdgpu_sriov_vf(ring->adev)) {
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
@@ -672,7 +672,7 @@ static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
*
* Write a end command to the ring.
*/
-static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
+void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
{
if (!amdgpu_sriov_vf(ring->adev)) {
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
@@ -695,7 +695,7 @@ static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
*
* Write a fence and a trap command to the ring.
*/
-static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned int flags)
{
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
@@ -764,7 +764,7 @@ static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
*
* Write ring commands to execute the indirect buffer.
*/
-static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
+void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
@@ -815,7 +815,7 @@ static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, 0x2);
}
-static void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
uint32_t reg_offset = (reg << 2);
@@ -842,7 +842,7 @@ static void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_
amdgpu_ring_write(ring, mask);
}
-static void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
@@ -857,7 +857,7 @@ static void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
jpeg_v4_0_3_dec_ring_emit_reg_wait(ring, data0, data1, mask);
}
-static void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
+void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
{
uint32_t reg_offset = (reg << 2);
@@ -875,7 +875,7 @@ static void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t re
amdgpu_ring_write(ring, val);
}
-static void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
index 22483dc66351..747a3e5f6856 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
@@ -48,4 +48,19 @@
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
+void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags);
+void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned int flags);
+void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr);
+void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
+void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring);
+void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring);
+void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
+void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask);
+
#endif /* __JPEG_V4_0_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 78b74daf4eeb..edf5bcdd2bc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -34,7 +34,17 @@
#include "vcn/vcn_4_0_5_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
-#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+#define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL
+#define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX
+#define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA
+#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
+
+#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+#define regJPEG_DEC_GFX10_ADDR_CONFIG_INTERNAL_OFFSET 0x4026
+#define regJPEG_SYS_INT_EN_INTERNAL_OFFSET 0x4141
+#define regJPEG_CGC_CTRL_INTERNAL_OFFSET 0x4161
+#define regJPEG_CGC_GATE_INTERNAL_OFFSET 0x4160
+#define regUVD_NO_OP_INTERNAL_OFFSET 0x0029
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
@@ -43,6 +53,11 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle,
static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring);
+static int amdgpu_ih_clientid_jpeg[] = {
+ SOC15_IH_CLIENTID_VCN,
+ SOC15_IH_CLIENTID_VCN1
+};
+
/**
* jpeg_v4_0_5_early_init - set function pointers
*
@@ -54,8 +69,20 @@ static int jpeg_v4_0_5_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
+ case IP_VERSION(4, 0, 5):
+ adev->jpeg.num_jpeg_inst = 1;
+ break;
+ case IP_VERSION(4, 0, 6):
+ adev->jpeg.num_jpeg_inst = 2;
+ break;
+ default:
+ DRM_DEV_ERROR(adev->dev,
+ "Failed to init vcn ip block(UVD_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, UVD_HWIP, 0));
+ return -EINVAL;
+ }
- adev->jpeg.num_jpeg_inst = 1;
adev->jpeg.num_jpeg_rings = 1;
jpeg_v4_0_5_set_dec_ring_funcs(adev);
@@ -75,25 +102,30 @@ static int jpeg_v4_0_5_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
- int r;
-
- /* JPEG TRAP */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
- VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
- if (r)
- return r;
-
- /* JPEG DJPEG POISON EVENT */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
- VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq);
- if (r)
- return r;
-
- /* JPEG EJPEG POISON EVENT */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
- VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq);
- if (r)
- return r;
+ int r, i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ /* JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
+ VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq);
+ if (r)
+ return r;
+
+ /* JPEG DJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
+ VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq);
+ if (r)
+ return r;
+
+ /* JPEG EJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
+ VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq);
+ if (r)
+ return r;
+ }
r = amdgpu_jpeg_sw_init(adev);
if (r)
@@ -103,21 +135,23 @@ static int jpeg_v4_0_5_sw_init(void *handle)
if (r)
return r;
- ring = adev->jpeg.inst->ring_dec;
- ring->use_doorbell = true;
- ring->doorbell_index = amdgpu_sriov_vf(adev) ?
- (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) :
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1);
- ring->vm_hub = AMDGPU_MMHUB0(0);
-
- sprintf(ring->name, "jpeg_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
- if (r)
- return r;
-
- adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
- adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ ring = adev->jpeg.inst[i].ring_dec;
+ ring->use_doorbell = true;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
+ sprintf(ring->name, "jpeg_dec_%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
+ 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ return r;
+
+ adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
+ adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_PITCH);
+ }
return 0;
}
@@ -152,14 +186,27 @@ static int jpeg_v4_0_5_sw_fini(void *handle)
static int jpeg_v4_0_5_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
- int r;
+ struct amdgpu_ring *ring;
+ int r, i;
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
+ // TODO: Enable ring test with DPG support
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
+ DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully under DPG Mode");
+ return 0;
+ }
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
- DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
+ ring = adev->jpeg.inst[i].ring_dec;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
+
+ if (!r)
+ DRM_INFO("JPEG decode initialized successfully under SPG Mode\n");
return 0;
}
@@ -174,14 +221,20 @@ static int jpeg_v4_0_5_hw_init(void *handle)
static int jpeg_v4_0_5_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (!amdgpu_sriov_vf(adev)) {
- if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
- jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
- }
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(JPEG, i, regUVD_JRBC_STATUS))
+ jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ }
+ }
return 0;
}
@@ -227,11 +280,11 @@ static int jpeg_v4_0_5_resume(void *handle)
return r;
}
-static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev)
+static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst)
{
uint32_t data = 0;
- data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
+ data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
data &= (~JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK);
@@ -241,21 +294,21 @@ static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev)
data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
- WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
+ WREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL, data);
- data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
+ data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE);
data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
| JPEG_CGC_GATE__JPEG2_DEC_MASK
| JPEG_CGC_GATE__JMCIF_MASK
| JPEG_CGC_GATE__JRBBM_MASK);
- WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
+ WREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE, data);
}
-static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev)
+static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
{
uint32_t data = 0;
- data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
+ data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
data |= JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK;
@@ -265,47 +318,66 @@ static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev)
data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
- WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
+ WREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL, data);
- data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
+ data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE);
data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
|JPEG_CGC_GATE__JPEG2_DEC_MASK
|JPEG_CGC_GATE__JMCIF_MASK
|JPEG_CGC_GATE__JRBBM_MASK);
- WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
+ WREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE, data);
}
-static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev)
+static void jpeg_engine_4_0_5_dpg_clock_gating_mode(struct amdgpu_device *adev,
+ int inst_idx, uint8_t indirect)
+{
+ uint32_t data = 0;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
+ data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+
+ data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_CGC_CTRL_INTERNAL_OFFSET, data, indirect);
+
+ data = 0;
+ WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_CGC_GATE_INTERNAL_OFFSET,
+ data, indirect);
+}
+
+static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, int inst)
{
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
- WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG),
+ WREG32(SOC15_REG_OFFSET(JPEG, inst, regUVD_IPX_DLDO_CONFIG),
1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
- SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS,
+ SOC15_WAIT_ON_RREG(JPEG, inst, regUVD_IPX_DLDO_STATUS,
0, UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
}
/* disable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
+ WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
/* keep the JPEG in static PG mode */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
+ WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
return 0;
}
-static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev)
+static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, int inst)
{
/* enable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS),
UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
- WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG),
+ WREG32(SOC15_REG_OFFSET(JPEG, inst, regUVD_IPX_DLDO_CONFIG),
2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
- SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS,
+ SOC15_WAIT_ON_RREG(JPEG, inst, regUVD_IPX_DLDO_STATUS,
1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
}
@@ -314,61 +386,153 @@ static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev)
}
/**
- * jpeg_v4_0_5_start - start JPEG block
+ * jpeg_v4_0_5_start_dpg_mode - Jpeg start with dpg mode
*
* @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
*
- * Setup and start the JPEG block
+ * Start JPEG block with dpg mode
*/
-static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
+static void jpeg_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
- int r;
+ struct amdgpu_ring *ring = adev->jpeg.inst[inst_idx].ring_dec;
+ uint32_t reg_data = 0;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_jpeg(adev, true);
+ /* enable anti hang mechanism */
+ reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
+ reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
+ reg_data |= 0x1;
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
- /* doorbell programming is done for every playback */
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
+ WREG32(SOC15_REG_OFFSET(JPEG, inst_idx, regUVD_IPX_DLDO_CONFIG),
+ 2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
+ SOC15_WAIT_ON_RREG(JPEG, inst_idx, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
+ }
- WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
- ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
- VCN_JPEG_DB_CTRL__EN_MASK);
+ reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
+ reg_data |= UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
- /* disable power gating */
- r = jpeg_v4_0_5_disable_static_power_gating(adev);
- if (r)
- return r;
+ if (indirect)
+ adev->jpeg.inst[inst_idx].dpg_sram_curr_addr =
+ (uint32_t *)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr;
- /* JPEG disable CGC */
- jpeg_v4_0_5_disable_clock_gating(adev);
+ jpeg_engine_4_0_5_dpg_clock_gating_mode(adev, inst_idx, indirect);
/* MJPEG global tiling registers */
- WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
+ WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_DEC_GFX10_ADDR_CONFIG_INTERNAL_OFFSET,
+ adev->gfx.config.gb_addr_config, indirect);
+ /* enable System Interrupt for JRBC */
+ WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_SYS_INT_EN_INTERNAL_OFFSET,
+ JPEG_SYS_INT_EN__DJRBC_MASK, indirect);
- /* enable JMI channel */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+ /* add nop to workaround PSP size check */
+ WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regUVD_NO_OP_INTERNAL_OFFSET, 0, indirect);
- /* enable System Interrupt for JRBC */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN),
- JPEG_SYS_INT_EN__DJRBC_MASK,
- ~JPEG_SYS_INT_EN__DJRBC_MASK);
+ if (indirect)
+ amdgpu_jpeg_psp_update_sram(adev, inst_idx, 0);
- WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0);
- WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
- WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0);
- WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0);
- WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L);
- WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
- ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * jpeg_v4_0_5_stop_dpg_mode - Jpeg stop with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ *
+ * Stop JPEG block with dpg mode
+ */
+static void jpeg_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+{
+ uint32_t reg_data = 0;
+
+ reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
+ reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
+
+}
+
+/**
+ * jpeg_v4_0_5_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int r, i;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_jpeg(adev, true);
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ ring = adev->jpeg.inst[i].ring_dec;
+ /* doorbell programming is done for every playback */
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i);
+
+ WREG32_SOC15(VCN, i, regVCN_JPEG_DB_CTRL,
+ ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
+ jpeg_v4_0_5_start_dpg_mode(adev, i, adev->jpeg.indirect_sram);
+ continue;
+ }
+
+ /* disable power gating */
+ r = jpeg_v4_0_5_disable_static_power_gating(adev, i);
+ if (r)
+ return r;
+
+ /* JPEG disable CGC */
+ jpeg_v4_0_5_disable_clock_gating(adev, i);
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, i, regJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, regJPEG_SYS_INT_EN),
+ JPEG_SYS_INT_EN__DJRBC_MASK,
+ ~JPEG_SYS_INT_EN__DJRBC_MASK);
+
+ WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15(JPEG, i, regUVD_JRBC_RB_WPTR);
+ }
return 0;
}
@@ -382,20 +546,29 @@ static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
*/
static int jpeg_v4_0_5_stop(struct amdgpu_device *adev)
{
- int r;
+ int r, i;
- /* reset JMI */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL),
- UVD_JMI_CNTL__SOFT_RESET_MASK,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
- jpeg_v4_0_5_enable_clock_gating(adev);
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
+ jpeg_v4_0_5_stop_dpg_mode(adev, i);
+ continue;
+ }
- /* enable power gating */
- r = jpeg_v4_0_5_enable_static_power_gating(adev);
- if (r)
- return r;
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ jpeg_v4_0_5_enable_clock_gating(adev, i);
+ /* enable power gating */
+ r = jpeg_v4_0_5_enable_static_power_gating(adev, i);
+ if (r)
+ return r;
+ }
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, false);
@@ -413,7 +586,7 @@ static uint64_t jpeg_v4_0_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR);
+ return RREG32_SOC15(JPEG, ring->me, regUVD_JRBC_RB_RPTR);
}
/**
@@ -430,7 +603,7 @@ static uint64_t jpeg_v4_0_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return *ring->wptr_cpu_addr;
else
- return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
+ return RREG32_SOC15(JPEG, ring->me, regUVD_JRBC_RB_WPTR);
}
/**
@@ -448,29 +621,41 @@ static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
- WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(JPEG, ring->me, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
}
}
static bool jpeg_v4_0_5_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int ret = 1;
+ int i, ret = 1;
- ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) &
- UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
- UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+ ret &= (((RREG32_SOC15(JPEG, i, regUVD_JRBC_STATUS) &
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
+ }
return ret;
}
static int jpeg_v4_0_5_wait_for_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ return SOC15_WAIT_ON_RREG(JPEG, i, regUVD_JRBC_STATUS,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+ }
- return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS,
- UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
- UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+ return 0;
}
static int jpeg_v4_0_5_set_clockgating_state(void *handle,
@@ -478,13 +663,20 @@ static int jpeg_v4_0_5_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ int i;
- if (enable) {
- if (!jpeg_v4_0_5_is_idle(handle))
- return -EBUSY;
- jpeg_v4_0_5_enable_clock_gating(adev);
- } else {
- jpeg_v4_0_5_disable_clock_gating(adev);
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (enable) {
+ if (!jpeg_v4_0_5_is_idle(handle))
+ return -EBUSY;
+
+ jpeg_v4_0_5_enable_clock_gating(adev, i);
+ } else {
+ jpeg_v4_0_5_disable_clock_gating(adev, i);
+ }
}
return 0;
@@ -519,11 +711,25 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ uint32_t ip_instance;
+
DRM_DEBUG("IH: JPEG TRAP\n");
+ switch (entry->client_id) {
+ case SOC15_IH_CLIENTID_VCN:
+ ip_instance = 0;
+ break;
+ case SOC15_IH_CLIENTID_VCN1:
+ ip_instance = 1;
+ break;
+ default:
+ DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
+ return 0;
+ }
+
switch (entry->src_id) {
case VCN_4_0__SRCID__JPEG_DECODE:
- amdgpu_fence_process(adev->jpeg.inst->ring_dec);
+ amdgpu_fence_process(adev->jpeg.inst[ip_instance].ring_dec);
break;
case VCN_4_0__SRCID_DJPEG0_POISON:
case VCN_4_0__SRCID_EJPEG0_POISON:
@@ -589,8 +795,16 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
{
- adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs;
- DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs;
+ adev->jpeg.inst[i].ring_dec->me = i;
+ DRM_DEV_INFO(adev->dev, "JPEG%d decode is enabled in VM mode\n", i);
+ }
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
@@ -599,8 +813,15 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->jpeg.inst->irq.num_types = 1;
- adev->jpeg.inst->irq.funcs = &jpeg_v4_0_5_irq_funcs;
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ adev->jpeg.inst[i].irq.num_types = 1;
+ adev->jpeg.inst[i].irq.funcs = &jpeg_v4_0_5_irq_funcs;
+ }
}
const struct amdgpu_ip_block_version jpeg_v4_0_5_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
new file mode 100644
index 000000000000..e70200f97555
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "jpeg_v4_0_3.h"
+
+#include "vcn/vcn_5_0_0_offset.h"
+#include "vcn/vcn_5_0_0_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+
+static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
+static int jpeg_v5_0_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state);
+
+/**
+ * jpeg_v5_0_0_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+static int jpeg_v5_0_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->jpeg.num_jpeg_inst = 1;
+ adev->jpeg.num_jpeg_rings = 1;
+
+ jpeg_v5_0_0_set_dec_ring_funcs(adev);
+ jpeg_v5_0_0_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_sw_init - sw init for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Load firmware and sw initialization
+ */
+static int jpeg_v5_0_0_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int r;
+
+ /* JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ ring = adev->jpeg.inst->ring_dec;
+ ring->use_doorbell = true;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
+
+ sprintf(ring->name, "jpeg_dec");
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ return r;
+
+ adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
+ adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_sw_fini - sw fini for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * JPEG suspend and free up sw allocation
+ */
+static int jpeg_v5_0_0_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_jpeg_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_fini(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v5_0_0_hw_init - start and test JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ */
+static int jpeg_v5_0_0_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
+ int r;
+
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
+
+ WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
+ ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_hw_fini - stop the hardware block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Stop the JPEG block, mark ring as not ready any more
+ */
+static int jpeg_v5_0_0_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
+ jpeg_v5_0_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_suspend - suspend JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * HW fini and suspend JPEG block
+ */
+static int jpeg_v5_0_0_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = jpeg_v5_0_0_hw_fini(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_suspend(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v5_0_0_resume - resume JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Resume firmware and hw init JPEG block
+ */
+static int jpeg_v5_0_0_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ r = jpeg_v5_0_0_hw_init(adev);
+
+ return r;
+}
+
+static void jpeg_v5_0_0_disable_clock_gating(struct amdgpu_device *adev)
+{
+ uint32_t data = 0;
+
+ WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
+
+ data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
+ data &= ~(JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK
+ | JPEG_CGC_CTRL__JPEG_ENC_MODE_MASK);
+ WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
+}
+
+static void jpeg_v5_0_0_enable_clock_gating(struct amdgpu_device *adev)
+{
+ uint32_t data = 0;
+
+ data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
+
+ data |= 1 << JPEG_CGC_CTRL__JPEG0_DEC_MODE__SHIFT;
+ WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
+
+ data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
+ data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK
+ |JPEG_CGC_GATE__JPEG_ENC_MASK
+ |JPEG_CGC_GATE__JMCIF_MASK
+ |JPEG_CGC_GATE__JRBBM_MASK);
+ WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
+}
+
+static int jpeg_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev)
+{
+ uint32_t data = 0;
+
+ data = 1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(JPEG, 0, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, 0,
+ UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
+
+ /* disable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ /* keep the JPEG in static PG mode */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
+
+ return 0;
+}
+
+static int jpeg_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev)
+{
+ /* enable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
+ WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG),
+ 2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
+ SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v5_0_0_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
+ int r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_jpeg(adev, true);
+
+ /* disable power gating */
+ r = jpeg_v5_0_0_disable_static_power_gating(adev);
+ if (r)
+ return r;
+
+ /* JPEG disable CGC */
+ jpeg_v5_0_0_disable_clock_gating(adev);
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN),
+ JPEG_SYS_INT_EN__DJRBC0_MASK,
+ ~JPEG_SYS_INT_EN__DJRBC0_MASK);
+
+ WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_stop - stop JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the JPEG block
+ */
+static int jpeg_v5_0_0_stop(struct amdgpu_device *adev)
+{
+ int r;
+
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ jpeg_v5_0_0_enable_clock_gating(adev);
+
+ /* enable power gating */
+ r = jpeg_v5_0_0_enable_static_power_gating(adev);
+ if (r)
+ return r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_jpeg(adev, false);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_dec_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v5_0_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * jpeg_v5_0_0_dec_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v5_0_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell)
+ return *ring->wptr_cpu_addr;
+ else
+ return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * jpeg_v5_0_0_dec_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v5_0_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+ }
+}
+
+static bool jpeg_v5_0_0_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret = 1;
+
+ ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) &
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
+
+ return ret;
+}
+
+static int jpeg_v5_0_0_wait_for_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+}
+
+static int jpeg_v5_0_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+ if (enable) {
+ if (!jpeg_v5_0_0_is_idle(handle))
+ return -EBUSY;
+ jpeg_v5_0_0_enable_clock_gating(adev);
+ } else {
+ jpeg_v5_0_0_disable_clock_gating(adev);
+ }
+
+ return 0;
+}
+
+static int jpeg_v5_0_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ if (state == adev->jpeg.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = jpeg_v5_0_0_stop(adev);
+ else
+ ret = jpeg_v5_0_0_start(adev);
+
+ if (!ret)
+ adev->jpeg.cur_state = state;
+
+ return ret;
+}
+
+static int jpeg_v5_0_0_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("IH: JPEG TRAP\n");
+
+ switch (entry->src_id) {
+ case VCN_4_0__SRCID__JPEG_DECODE:
+ amdgpu_fence_process(adev->jpeg.inst->ring_dec);
+ break;
+ default:
+ DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
+ .name = "jpeg_v5_0_0",
+ .early_init = jpeg_v5_0_0_early_init,
+ .late_init = NULL,
+ .sw_init = jpeg_v5_0_0_sw_init,
+ .sw_fini = jpeg_v5_0_0_sw_fini,
+ .hw_init = jpeg_v5_0_0_hw_init,
+ .hw_fini = jpeg_v5_0_0_hw_fini,
+ .suspend = jpeg_v5_0_0_suspend,
+ .resume = jpeg_v5_0_0_resume,
+ .is_idle = jpeg_v5_0_0_is_idle,
+ .wait_for_idle = jpeg_v5_0_0_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = jpeg_v5_0_0_set_clockgating_state,
+ .set_powergating_state = jpeg_v5_0_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
+ .get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
+ .set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v5_0_0_dec_ring_emit_vm_flush */
+ 22 + 22 + /* jpeg_v5_0_0_dec_ring_emit_fence x2 vm fence */
+ 8 + 16,
+ .emit_ib_size = 22, /* jpeg_v5_0_0_dec_ring_emit_ib */
+ .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
+ .emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
+ .emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v4_0_3_dec_ring_nop,
+ .insert_start = jpeg_v4_0_3_dec_ring_insert_start,
+ .insert_end = jpeg_v4_0_3_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_jpeg_ring_begin_use,
+ .end_use = amdgpu_jpeg_ring_end_use,
+ .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.inst->ring_dec->funcs = &jpeg_v5_0_0_dec_ring_vm_funcs;
+ DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v5_0_0_irq_funcs = {
+ .set = jpeg_v5_0_0_set_interrupt_state,
+ .process = jpeg_v5_0_0_process_interrupt,
+};
+
+static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.inst->irq.num_types = 1;
+ adev->jpeg.inst->irq.funcs = &jpeg_v5_0_0_irq_funcs;
+}
+
+const struct amdgpu_ip_block_version jpeg_v5_0_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_JPEG,
+ .major = 5,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &jpeg_v5_0_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h
new file mode 100644
index 000000000000..bd348336b215
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __JPEG_V5_0_0_H__
+#define __JPEG_V5_0_0_H__
+
+extern const struct amdgpu_ip_block_version jpeg_v5_0_0_ip_block;
+
+#endif /* __JPEG_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.c
new file mode 100644
index 000000000000..396262044ea8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/delay.h>
+#include "amdgpu.h"
+#include "lsdma_v7_0.h"
+#include "amdgpu_lsdma.h"
+
+#include "lsdma/lsdma_7_0_0_offset.h"
+#include "lsdma/lsdma_7_0_0_sh_mask.h"
+
+static int lsdma_v7_0_wait_pio_status(struct amdgpu_device *adev)
+{
+ return amdgpu_lsdma_wait_for(adev, SOC15_REG_OFFSET(LSDMA, 0, regLSDMA_PIO_STATUS),
+ LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK,
+ LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK);
+}
+
+static int lsdma_v7_0_copy_mem(struct amdgpu_device *adev,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint64_t size)
+{
+ int ret;
+ uint32_t tmp;
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_LO, lower_32_bits(src_addr));
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_HI, upper_32_bits(src_addr));
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr));
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr));
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0);
+
+ tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, BYTE_COUNT, size);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_LOCATION, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_LOCATION, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_ADDR_INC, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_ADDR_INC, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, OVERLAP_DISABLE, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 0);
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp);
+
+ ret = lsdma_v7_0_wait_pio_status(adev);
+ if (ret)
+ dev_err(adev->dev, "LSDMA PIO failed to copy memory!\n");
+
+ return ret;
+}
+
+static int lsdma_v7_0_fill_mem(struct amdgpu_device *adev,
+ uint64_t dst_addr,
+ uint32_t data,
+ uint64_t size)
+{
+ int ret;
+ uint32_t tmp;
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONSTFILL_DATA, data);
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr));
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr));
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0);
+
+ tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, BYTE_COUNT, size);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_LOCATION, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_LOCATION, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_ADDR_INC, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_ADDR_INC, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, OVERLAP_DISABLE, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 1);
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp);
+
+ ret = lsdma_v7_0_wait_pio_status(adev);
+ if (ret)
+ dev_err(adev->dev, "LSDMA PIO failed to fill memory!\n");
+
+ return ret;
+}
+
+static void lsdma_v7_0_update_memory_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL);
+ tmp = REG_SET_FIELD(tmp, LSDMA_MEM_POWER_CTRL, MEM_POWER_CTRL_EN, 0);
+ WREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL, tmp);
+
+ tmp = REG_SET_FIELD(tmp, LSDMA_MEM_POWER_CTRL, MEM_POWER_CTRL_EN, enable);
+ WREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL, tmp);
+}
+
+const struct amdgpu_lsdma_funcs lsdma_v7_0_funcs = {
+ .copy_mem = lsdma_v7_0_copy_mem,
+ .fill_mem = lsdma_v7_0_fill_mem,
+ .update_memory_power_gating = lsdma_v7_0_update_memory_power_gating
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.h b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.h
new file mode 100644
index 000000000000..52b4485cdd98
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __LSDMA_V7_0_H__
+#define __LSDMA_V7_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_lsdma_funcs lsdma_v7_0_funcs;
+
+#endif /* __LSDMA_V7_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 26d71a22395d..072c478665ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -49,6 +49,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_mes1.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes_2.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes1.bin");
static int mes_v11_0_hw_fini(void *handle);
@@ -56,6 +58,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
#define MES_EOP_SIZE 2048
+#define GFX_MES_DRAM_SIZE 0x80000
static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
{
@@ -475,7 +478,13 @@ static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
- r = amdgpu_bo_create_reserved(adev, fw_size,
+ if (fw_size > GFX_MES_DRAM_SIZE) {
+ dev_err(adev->dev, "PIPE%d ucode data fw size (%d) is greater than dram size (%d)\n",
+ pipe, fw_size, GFX_MES_DRAM_SIZE);
+ return -EINVAL;
+ }
+
+ r = amdgpu_bo_create_reserved(adev, GFX_MES_DRAM_SIZE,
64 * 1024,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
@@ -611,8 +620,8 @@ static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, regCP_MES_MDBASE_HI,
upper_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
- /* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */
- WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x3FFFF);
+ /* Set 0x7FFFF (512K-1) to CP_MES_MDBOUND_LO */
+ WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x7FFFF);
if (prime_icache) {
/* invalidate ICACHE */
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index fb53aacdcba2..c0fc44cdd658 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -33,6 +33,7 @@
#define regVM_L2_CNTL3_DEFAULT 0x80100007
#define regVM_L2_CNTL4_DEFAULT 0x000000c1
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400
static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
{
@@ -705,8 +706,94 @@ static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = {
.reset_ras_error_count = mmhub_v1_8_reset_ras_error_count,
};
+static int mmhub_v1_8_aca_bank_generate_report(struct aca_handle *handle,
+ struct aca_bank *bank, enum aca_error_type type,
+ struct aca_bank_report *report, void *data)
+{
+ u64 status, misc0;
+ int ret;
+
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ if ((type == ACA_ERROR_TYPE_UE &&
+ ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
+ (type == ACA_ERROR_TYPE_CE &&
+ ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
+
+ ret = aca_bank_info_decode(bank, &report->info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+ }
+
+ return 0;
+}
+
+/* reference to smu driver if header file */
+static int mmhub_v1_8_err_codes[] = {
+ 0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */
+ 5, 6, 7, 8, 9, /* CODE_EA0 - 4 */
+ 10, /* CODE_UTCL2_ROUTER */
+ 11, /* CODE_VML2 */
+ 12, /* CODE_VML2_WALKER */
+ 13, /* CODE_MMCANE */
+};
+
+static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_error_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ mmhub_v1_8_err_codes,
+ ARRAY_SIZE(mmhub_v1_8_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops mmhub_v1_8_aca_bank_ops = {
+ .aca_bank_generate_report = mmhub_v1_8_aca_bank_generate_report,
+ .aca_bank_is_valid = mmhub_v1_8_aca_bank_is_valid,
+};
+
+static const struct aca_info mmhub_v1_8_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &mmhub_v1_8_aca_bank_ops,
+};
+
+static int mmhub_v1_8_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__MMHUB,
+ &mmhub_v1_8_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
struct amdgpu_mmhub_ras mmhub_v1_8_ras = {
.ras_block = {
.hw_ops = &mmhub_v1_8_ras_hw_ops,
+ .ras_late_init = mmhub_v1_8_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index dc4812ecc98d..b3961968c10c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -98,6 +98,7 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
+ case IP_VERSION(3, 3, 1):
mmhub_cid = mmhub_client_ids_v3_3[cid][rw];
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 63725b2ebc03..a2bd2c3b1ef9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -404,7 +404,8 @@ static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
}
-static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev)
+static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
{
xgpu_ai_send_access_requests(adev, IDH_RAS_POISON);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 6a68ee946f1c..77f5b55decf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -152,14 +152,14 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
xgpu_nv_mailbox_set_valid(adev, false);
}
-static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
- enum idh_request req)
+static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
+ enum idh_request req, u32 data1, u32 data2, u32 data3)
{
int r, retry = 1;
enum idh_event event = -1;
send_request:
- xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
+ xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
switch (req) {
case IDH_REQ_GPU_INIT_ACCESS:
@@ -170,6 +170,10 @@ send_request:
case IDH_REQ_GPU_INIT_DATA:
event = IDH_REQ_GPU_INIT_DATA_READY;
break;
+ case IDH_RAS_POISON:
+ if (data1 != 0)
+ event = IDH_RAS_POISON_READY;
+ break;
default:
break;
}
@@ -206,6 +210,13 @@ send_request:
return 0;
}
+static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
+ enum idh_request req)
+{
+ return xgpu_nv_send_access_requests_with_param(adev,
+ req, 0, 0, 0);
+}
+
static int xgpu_nv_request_reset(struct amdgpu_device *adev)
{
int ret, i = 0;
@@ -424,9 +435,17 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
-static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev)
+static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
{
- xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
+ if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
+ xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
+ } else {
+ amdgpu_virt_fini_data_exchange(adev);
+ xgpu_nv_send_access_requests_with_param(adev,
+ IDH_RAS_POISON, block, 0, 0);
+ amdgpu_virt_init_data_exchange(adev);
+ }
}
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index d0221ce08769..1e8fd90cab43 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -51,6 +51,7 @@ enum idh_event {
IDH_FAIL,
IDH_QUERY_ALIVE,
IDH_REQ_GPU_INIT_DATA_READY,
+ IDH_RAS_POISON_READY,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index de93614726c9..4178f4e5dad7 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -728,8 +728,7 @@ static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &navi10_ih_funcs;
}
-const struct amdgpu_ip_block_version navi10_ih_ip_block =
-{
+const struct amdgpu_ip_block_version navi10_ih_ip_block = {
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 5,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
new file mode 100644
index 000000000000..96ed00ac81ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "nbif_v6_3_1.h"
+
+#include "nbif/nbif_6_3_1_offset.h"
+#include "nbif/nbif_6_3_1_sh_mask.h"
+#include "pcie/pcie_6_1_0_offset.h"
+#include "pcie/pcie_6_1_0_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static void nbif_v6_3_1_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
+static u32 nbif_v6_3_1_get_rev_id(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
+
+ tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
+ tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
+
+ return tmp;
+}
+
+static void nbif_v6_3_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
+{
+ if (enable)
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN,
+ BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK |
+ BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
+ else
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
+}
+
+static u32 nbif_v6_3_1_get_memsize(struct amdgpu_device *adev)
+{
+ return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
+}
+
+static void nbif_v6_3_1_sdma_doorbell_range(struct amdgpu_device *adev,
+ int instance, bool use_doorbell,
+ int doorbell_index,
+ int doorbell_size)
+{
+ if (instance == 0) {
+ u32 doorbell_range = RREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL);
+
+ if (use_doorbell) {
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL,
+ S2A_DOORBELL_PORT2_ENABLE,
+ 0x1);
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL,
+ S2A_DOORBELL_PORT2_AWID,
+ 0xe);
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL,
+ S2A_DOORBELL_PORT2_RANGE_OFFSET,
+ doorbell_index);
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL,
+ S2A_DOORBELL_PORT2_RANGE_SIZE,
+ doorbell_size);
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL,
+ S2A_DOORBELL_PORT2_AWADDR_31_28_VALUE,
+ 0x3);
+ } else
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL,
+ S2A_DOORBELL_PORT2_RANGE_SIZE,
+ 0);
+
+ WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL, doorbell_range);
+ }
+}
+
+static void nbif_v6_3_1_vcn_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index,
+ int instance)
+{
+ u32 doorbell_range;
+
+ if (instance)
+ doorbell_range = RREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL);
+ else
+ doorbell_range = RREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL);
+
+ if (use_doorbell) {
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL,
+ S2A_DOORBELL_PORT4_ENABLE,
+ 0x1);
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL,
+ S2A_DOORBELL_PORT4_AWID,
+ instance ? 0x7 : 0x4);
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL,
+ S2A_DOORBELL_PORT4_RANGE_OFFSET,
+ doorbell_index);
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL,
+ S2A_DOORBELL_PORT4_RANGE_SIZE,
+ 8);
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL,
+ S2A_DOORBELL_PORT4_AWADDR_31_28_VALUE,
+ instance ? 0x7 : 0x4);
+ } else
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL,
+ S2A_DOORBELL_PORT4_RANGE_SIZE,
+ 0);
+
+ if (instance)
+ WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL, doorbell_range);
+ else
+ WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL, doorbell_range);
+}
+
+static void nbif_v6_3_1_gc_doorbell_init(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL, 0x30000007);
+ WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL, 0x3000000d);
+}
+
+static void nbif_v6_3_1_enable_doorbell_aperture(struct amdgpu_device *adev,
+ bool enable)
+{
+ WREG32_FIELD15_PREREG(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN,
+ BIF_DOORBELL_APER_EN, enable ? 1 : 0);
+}
+
+static void
+nbif_v6_3_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 tmp = 0;
+
+ if (enable) {
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
+ DOORBELL_SELFRING_GPA_APER_EN, 1) |
+ REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
+ DOORBELL_SELFRING_GPA_APER_MODE, 1) |
+ REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
+ DOORBELL_SELFRING_GPA_APER_SIZE, 0);
+
+ WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
+ lower_32_bits(adev->doorbell.base));
+ WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
+ upper_32_bits(adev->doorbell.base));
+ }
+
+ WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp);
+}
+
+static void nbif_v6_3_1_ih_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index)
+{
+ u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL);
+
+ if (use_doorbell) {
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_ENABLE,
+ 0x1);
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWID,
+ 0x0);
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_OFFSET,
+ doorbell_index);
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_SIZE,
+ 2);
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
+ 0x0);
+ } else
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
+ GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL,
+ S2A_DOORBELL_PORT1_RANGE_SIZE,
+ 0);
+
+ WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL, ih_doorbell_range);
+}
+
+static void nbif_v6_3_1_ih_control(struct amdgpu_device *adev)
+{
+ u32 interrupt_cntl;
+
+ /* setup interrupt control */
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
+
+ interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL);
+ /*
+ * BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
+ * BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
+ */
+ interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL,
+ IH_DUMMY_RD_OVERRIDE, 0);
+
+ /* BIF_BX0_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
+ interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL,
+ IH_REQ_NONSNOOP_EN, 0);
+
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL, interrupt_cntl);
+}
+
+static void
+nbif_v6_3_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+}
+
+static void
+nbif_v6_3_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+}
+
+static void
+nbif_v6_3_1_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+}
+
+static u32 nbif_v6_3_1_get_hdp_flush_req_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
+}
+
+static u32 nbif_v6_3_1_get_hdp_flush_done_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
+}
+
+static u32 nbif_v6_3_1_get_pcie_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_INDEX);
+}
+
+static u32 nbif_v6_3_1_get_pcie_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_DATA);
+}
+
+const struct nbio_hdp_flush_reg nbif_v6_3_1_hdp_flush_reg = {
+ .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
+ .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
+ .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
+ .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
+ .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
+ .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
+ .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
+ .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
+ .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
+ .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
+};
+
+static void nbif_v6_3_1_init_registers(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2);
+ data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
+ WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
+}
+
+static u32 nbif_v6_3_1_get_rom_offset(struct amdgpu_device *adev)
+{
+ u32 data, rom_offset;
+
+ data = RREG32_SOC15(NBIO, 0, regREGS_ROM_OFFSET_CTRL);
+ rom_offset = REG_GET_FIELD(data, REGS_ROM_OFFSET_CTRL, ROM_OFFSET);
+
+ return rom_offset;
+}
+
+#ifdef CONFIG_PCIEASPM
+static void nbif_v6_3_1_program_ltr(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ def = RREG32_SOC15(NBIO, 0, regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
+ data = 0x35EB;
+ data &= ~RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
+ data &= ~RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP2);
+ data &= ~RCC_STRAP0_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP2, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ if (adev->pdev->ltr_path)
+ data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ else
+ data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+}
+#endif
+
+static void nbif_v6_3_1_program_aspm(struct amdgpu_device *adev)
+{
+#ifdef CONFIG_PCIEASPM
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (def != data)
+ WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL, data);
+
+ def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL7);
+ data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
+ if (def != data)
+ WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL7, data);
+
+ def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL3, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3);
+ data &= ~RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
+ data &= ~RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5);
+ data &= ~RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+
+ WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
+
+#if 0
+ /* regPSWUSP0_PCIE_LC_CNTL2 should be replace by PCIE_LC_CNTL2 or someone else ? */
+ def = data = RREG32_SOC15(NBIO, 0, regPSWUSP0_PCIE_LC_CNTL2);
+ data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
+ PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+ data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regPSWUSP0_PCIE_LC_CNTL2, data);
+#endif
+ def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL4);
+ data |= PCIE_LC_CNTL4__LC_L1_POWERDOWN_MASK;
+ if (def != data)
+ WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL4, data);
+
+ def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL);
+ data |= PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RX_L0S_STANDBY_EN_MASK;
+ if (def != data)
+ WREG32_SOC15(PCIE, 0, regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL, data);
+
+ nbif_v6_3_1_program_ltr(adev);
+
+ def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3);
+ data |= 0x5DE0 << RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
+ data |= 0x0010 << RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5);
+ data |= 0x0010 << RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data);
+
+ def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL);
+ data |= 0x0 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+ data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (def != data)
+ WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL, data);
+
+ def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL3);
+ data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL3, data);
+#endif
+}
+
+const struct amdgpu_nbio_funcs nbif_v6_3_1_funcs = {
+ .get_hdp_flush_req_offset = nbif_v6_3_1_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbif_v6_3_1_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbif_v6_3_1_get_pcie_index_offset,
+ .get_pcie_data_offset = nbif_v6_3_1_get_pcie_data_offset,
+ .get_rev_id = nbif_v6_3_1_get_rev_id,
+ .mc_access_enable = nbif_v6_3_1_mc_access_enable,
+ .get_memsize = nbif_v6_3_1_get_memsize,
+ .sdma_doorbell_range = nbif_v6_3_1_sdma_doorbell_range,
+ .vcn_doorbell_range = nbif_v6_3_1_vcn_doorbell_range,
+ .gc_doorbell_init = nbif_v6_3_1_gc_doorbell_init,
+ .enable_doorbell_aperture = nbif_v6_3_1_enable_doorbell_aperture,
+ .enable_doorbell_selfring_aperture = nbif_v6_3_1_enable_doorbell_selfring_aperture,
+ .ih_doorbell_range = nbif_v6_3_1_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbif_v6_3_1_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbif_v6_3_1_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbif_v6_3_1_get_clockgating_state,
+ .ih_control = nbif_v6_3_1_ih_control,
+ .init_registers = nbif_v6_3_1_init_registers,
+ .remap_hdp_registers = nbif_v6_3_1_remap_hdp_registers,
+ .get_rom_offset = nbif_v6_3_1_get_rom_offset,
+ .program_aspm = nbif_v6_3_1_program_aspm,
+};
+
+
+static void nbif_v6_3_1_sriov_ih_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index)
+{
+}
+
+static void nbif_v6_3_1_sriov_sdma_doorbell_range(struct amdgpu_device *adev,
+ int instance, bool use_doorbell,
+ int doorbell_index,
+ int doorbell_size)
+{
+}
+
+static void nbif_v6_3_1_sriov_vcn_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell,
+ int doorbell_index, int instance)
+{
+}
+
+static void nbif_v6_3_1_sriov_gc_doorbell_init(struct amdgpu_device *adev)
+{
+}
+
+const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs = {
+ .get_hdp_flush_req_offset = nbif_v6_3_1_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbif_v6_3_1_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbif_v6_3_1_get_pcie_index_offset,
+ .get_pcie_data_offset = nbif_v6_3_1_get_pcie_data_offset,
+ .get_rev_id = nbif_v6_3_1_get_rev_id,
+ .mc_access_enable = nbif_v6_3_1_mc_access_enable,
+ .get_memsize = nbif_v6_3_1_get_memsize,
+ .sdma_doorbell_range = nbif_v6_3_1_sriov_sdma_doorbell_range,
+ .vcn_doorbell_range = nbif_v6_3_1_sriov_vcn_doorbell_range,
+ .gc_doorbell_init = nbif_v6_3_1_sriov_gc_doorbell_init,
+ .enable_doorbell_aperture = nbif_v6_3_1_enable_doorbell_aperture,
+ .enable_doorbell_selfring_aperture = nbif_v6_3_1_enable_doorbell_selfring_aperture,
+ .ih_doorbell_range = nbif_v6_3_1_sriov_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbif_v6_3_1_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbif_v6_3_1_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbif_v6_3_1_get_clockgating_state,
+ .ih_control = nbif_v6_3_1_ih_control,
+ .init_registers = nbif_v6_3_1_init_registers,
+ .remap_hdp_registers = nbif_v6_3_1_remap_hdp_registers,
+ .get_rom_offset = nbif_v6_3_1_get_rom_offset,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
new file mode 100644
index 000000000000..b7f2e0d88905
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NBIO_V6_3_1_H__
+#define __NBIO_V6_3_1_H__
+
+#include "soc15_common.h"
+
+extern const struct nbio_hdp_flush_reg nbif_v6_3_1_hdp_flush_reg;
+extern const struct amdgpu_nbio_funcs nbif_v6_3_1_funcs;
+extern const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
index 1f52b4b1db03..05020141c0ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
@@ -89,7 +89,9 @@ static void nbio_v7_11_vpe_doorbell_range(struct amdgpu_device *adev, int instan
bool use_doorbell, int doorbell_index,
int doorbell_size)
{
- u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VPE_DOORBELL_RANGE);
+ u32 reg = instance == 0 ?
+ SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VPE_DOORBELL_RANGE) :
+ SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VPE1_DOORBELL_RANGE);
u32 doorbell_range = RREG32_PCIE_PORT(reg);
if (use_doorbell) {
@@ -112,7 +114,10 @@ static void nbio_v7_11_vcn_doorbell_range(struct amdgpu_device *adev,
bool use_doorbell,
int doorbell_index, int instance)
{
- u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE);
+ u32 reg = instance == 0 ?
+ SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE):
+ SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN1_DOORBELL_RANGE);
+
u32 doorbell_range = RREG32_PCIE_PORT(reg);
if (use_doorbell) {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index b4723d68eab0..40d1e209eab7 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -35,15 +35,6 @@
/* Core 0 Port 0 counter */
#define smnPCIEP_NAK_COUNTER 0x1A340218
-#define smnPCIE_PERF_CNTL_TXCLK3 0x1A38021c
-#define smnPCIE_PERF_CNTL_TXCLK7 0x1A380888
-#define smnPCIE_PERF_COUNT_CNTL 0x1A380200
-#define smnPCIE_PERF_COUNT0_TXCLK3 0x1A380220
-#define smnPCIE_PERF_COUNT0_TXCLK7 0x1A38088C
-#define smnPCIE_PERF_COUNT0_UPVAL_TXCLK3 0x1A3808F8
-#define smnPCIE_PERF_COUNT0_UPVAL_TXCLK7 0x1A380918
-
-
static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
@@ -484,59 +475,6 @@ static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
return (nak_r + nak_g);
}
-static void nbio_v7_9_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
- uint64_t *count1)
-{
- uint32_t perfctrrx = 0;
- uint32_t perfctrtx = 0;
-
- /* This reports 0 on APUs, so return to avoid writing/reading registers
- * that may or may not be different from their GPU counterparts
- */
- if (adev->flags & AMD_IS_APU)
- return;
-
- /* Use TXCLK3 counter group for rx event */
- /* Use TXCLK7 counter group for tx event */
- /* Set the 2 events that we wish to watch, defined above */
- /* 40 is event# for received msgs */
- /* 2 is event# of posted requests sent */
- perfctrrx = REG_SET_FIELD(perfctrrx, PCIE_PERF_CNTL_TXCLK3, EVENT0_SEL, 40);
- perfctrtx = REG_SET_FIELD(perfctrtx, PCIE_PERF_CNTL_TXCLK7, EVENT0_SEL, 2);
-
- /* Write to enable desired perf counters */
- WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctrrx);
- WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK7, perfctrtx);
-
- /* Zero out and enable SHADOW_WR
- * Write 0x6:
- * Bit 1 = Global Shadow wr(1)
- * Bit 2 = Global counter reset enable(1)
- */
- WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000006);
-
- /* Enable Gloabl Counter
- * Write 0x1:
- * Bit 0 = Global Counter Enable(1)
- */
- WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000001);
-
- msleep(1000);
-
- /* Disable Global Counter, Reset and enable SHADOW_WR
- * Write 0x6:
- * Bit 1 = Global Shadow wr(1)
- * Bit 2 = Global counter reset enable(1)
- */
- WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000006);
-
- /* Get the upper and lower count */
- *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) |
- ((uint64_t)RREG32_PCIE(smnPCIE_PERF_COUNT0_UPVAL_TXCLK3) << 32);
- *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK7) |
- ((uint64_t)RREG32_PCIE(smnPCIE_PERF_COUNT0_UPVAL_TXCLK7) << 32);
-}
-
const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset,
@@ -561,7 +499,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
.init_registers = nbio_v7_9_init_registers,
.get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count,
- .get_pcie_usage = nbio_v7_9_get_pcie_usage,
};
static void nbio_v7_9_query_ras_error_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 4bb5e10217bb..7566973ed8f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -296,6 +296,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_VPEC_FW1 = 100, /* VPEC FW1 To Save VPE */
GFX_FW_TYPE_VPEC_FW2 = 101, /* VPEC FW2 To Save VPE */
GFX_FW_TYPE_VPE = 102,
+ GFX_FW_TYPE_JPEG_RAM = 128, /**< JPEG Command buffer */
GFX_FW_TYPE_P2S_TABLE = 129,
GFX_FW_TYPE_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index efa37e3b7931..2395f1856962 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -506,7 +506,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
* before training, and restore it after training to avoid
* VRAM corruption.
*/
- sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
+ sz = BIST_MEM_TRAINING_ENCROACHED_SIZE;
if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index df1844d0800f..0da50ea46eaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -27,6 +27,7 @@
#include "amdgpu_ucode.h"
#include "soc15_common.h"
#include "psp_v13_0.h"
+#include "amdgpu_ras.h"
#include "mp/mp_13_0_2_offset.h"
#include "mp/mp_13_0_2_sh_mask.h"
@@ -52,6 +53,8 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_1_toc.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_1_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -100,6 +103,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 11):
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
err = psp_init_toc_microcode(psp, ucode_prefix);
if (err)
return err;
@@ -187,11 +191,18 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
+ int ret;
if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) {
- psp_v13_0_wait_for_vmbx_ready(psp);
+ ret = psp_v13_0_wait_for_vmbx_ready(psp);
+ if (ret)
+ amdgpu_ras_query_boot_status(adev, 4);
+
+ ret = psp_v13_0_wait_for_bootloader(psp);
+ if (ret)
+ amdgpu_ras_query_boot_status(adev, 4);
- return psp_v13_0_wait_for_bootloader(psp);
+ return ret;
}
return 0;
@@ -553,7 +564,7 @@ static int psp_v13_0_memory_training(struct psp_context *psp, uint32_t ops)
* before training, and restore it after training to avoid
* VRAM corruption.
*/
- sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
+ sz = BIST_MEM_TRAINING_ENCROACHED_SIZE;
if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
dev_err(adev->dev, "visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",
@@ -763,81 +774,28 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp)
return 0;
}
-
-static void psp_v13_0_boot_error_reporting(struct amdgpu_device *adev,
- uint32_t inst,
- uint32_t boot_error)
-{
- uint32_t socket_id;
- uint32_t aid_id;
- uint32_t hbm_id;
- uint32_t reg_data;
-
- socket_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, SOCKET_ID);
- aid_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, AID_ID);
- hbm_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, HBM_ID);
-
- reg_data = RREG32_SOC15(MP0, inst, regMP0_SMN_C2PMSG_109);
- dev_info(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n",
- socket_id, aid_id, reg_data);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_MEM_TRAINING))
- dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n",
- socket_id, aid_id, hbm_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_FW_LOAD))
- dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n",
- socket_id, aid_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_WAFL_LINK_TRAINING))
- dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n",
- socket_id, aid_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_XGMI_LINK_TRAINING))
- dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n",
- socket_id, aid_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_CP_LINK_TRAINING))
- dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n",
- socket_id, aid_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_DP_LINK_TRAINING))
- dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n",
- socket_id, aid_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_MEM_TEST))
- dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n",
- socket_id, aid_id, hbm_id);
-
- if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_BIST_TEST))
- dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n",
- socket_id, aid_id, hbm_id);
-}
-
-static int psp_v13_0_query_boot_status(struct psp_context *psp)
+static bool psp_v13_0_get_ras_capability(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- int inst_mask = adev->aid_mask;
- uint32_t reg_data;
- uint32_t i;
- int ret = 0;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ u32 reg_data;
- if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))
- return 0;
+ /* query ras cap should be done from host side */
+ if (amdgpu_sriov_vf(adev))
+ return false;
- if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10109)
- return 0;
+ if (!con)
+ return false;
- for_each_inst(i, inst_mask) {
- reg_data = RREG32_SOC15(MP0, i, regMP0_SMN_C2PMSG_126);
- if (!REG_GET_FIELD(reg_data, MP0_SMN_C2PMSG_126, BOOT_STATUS)) {
- psp_v13_0_boot_error_reporting(adev, i, reg_data);
- ret = -EINVAL;
- break;
- }
+ if ((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) &&
+ (!(adev->flags & AMD_IS_APU))) {
+ reg_data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_127);
+ adev->ras_hw_enabled = (reg_data & GENMASK_ULL(23, 0));
+ con->poison_supported = ((reg_data & GENMASK_ULL(24, 24)) >> 24) ? true : false;
+ return true;
+ } else {
+ return false;
}
-
- return ret;
}
static const struct psp_funcs psp_v13_0_funcs = {
@@ -862,7 +820,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.update_spirom = psp_v13_0_update_spirom,
.vbflash_stat = psp_v13_0_vbflash_status,
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
- .query_boot_status = psp_v13_0_query_boot_status,
+ .get_ras_capability = psp_v13_0_get_ras_capability,
};
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
new file mode 100644
index 000000000000..78a95f8f370b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -0,0 +1,672 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <drm/drm_drv.h>
+#include <linux/vmalloc.h>
+#include "amdgpu.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_ucode.h"
+#include "soc15_common.h"
+#include "psp_v14_0.h"
+
+#include "mp/mp_14_0_2_offset.h"
+#include "mp/mp_14_0_2_sh_mask.h"
+
+MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
+
+/* For large FW files the time to complete can be very long */
+#define USBC_PD_POLLING_LIMIT_S 240
+
+/* Read USB-PD from LFB */
+#define GFX_CMD_USB_PD_USE_LFB 0x480
+
+/* VBIOS gfl defines */
+#define MBOX_READY_MASK 0x80000000
+#define MBOX_STATUS_MASK 0x0000FFFF
+#define MBOX_COMMAND_MASK 0x00FF0000
+#define MBOX_READY_FLAG 0x80000000
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
+#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
+
+/* memory training timeout define */
+#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
+
+static int psp_v14_0_init_microcode(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ char ucode_prefix[30];
+ int err = 0;
+
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ err = psp_init_sos_microcode(psp, ucode_prefix);
+ if (err)
+ return err;
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+static bool psp_v14_0_is_sos_alive(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81);
+
+ return sol_reg != 0x0;
+}
+
+static int psp_v14_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ int ret;
+ int retry_loop;
+
+ for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+ /* Wait for bootloader to signify that is
+ ready having bit 31 of C2PMSG_35 set to 1 */
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000,
+ 0x80000000,
+ false);
+
+ if (ret == 0)
+ return 0;
+ }
+
+ return ret;
+}
+
+static int psp_v14_0_bootloader_load_component(struct psp_context *psp,
+ struct psp_bin_desc *bin_desc,
+ enum psp_bootloader_cmd bl_cmd)
+{
+ int ret;
+ uint32_t psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Check tOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ if (psp_v14_0_is_sos_alive(psp))
+ return 0;
+
+ ret = psp_v14_0_wait_for_bootloader(psp);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy PSP KDB binary to memory */
+ memcpy(psp->fw_pri_buf, bin_desc->start_addr, bin_desc->size_bytes);
+
+ /* Provide the PSP KDB to bootloader */
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = bl_cmd;
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ ret = psp_v14_0_wait_for_bootloader(psp);
+
+ return ret;
+}
+
+static int psp_v14_0_bootloader_load_kdb(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_KEY_DATABASE);
+}
+
+static int psp_v14_0_bootloader_load_spl(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_TOS_SPL_TABLE);
+}
+
+static int psp_v14_0_bootloader_load_sysdrv(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->sys, PSP_BL__LOAD_SYSDRV);
+}
+
+static int psp_v14_0_bootloader_load_soc_drv(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->soc_drv, PSP_BL__LOAD_SOCDRV);
+}
+
+static int psp_v14_0_bootloader_load_intf_drv(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->intf_drv, PSP_BL__LOAD_INTFDRV);
+}
+
+static int psp_v14_0_bootloader_load_dbg_drv(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV);
+}
+
+static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp)
+{
+ return psp_v14_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV);
+}
+
+
+static int psp_v14_0_bootloader_load_sos(struct psp_context *psp)
+{
+ int ret;
+ unsigned int psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Check sOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ if (psp_v14_0_is_sos_alive(psp))
+ return 0;
+
+ ret = psp_v14_0_wait_for_bootloader(psp);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy Secure OS binary to PSP memory */
+ memcpy(psp->fw_pri_buf, psp->sos.start_addr, psp->sos.size_bytes);
+
+ /* Provide the PSP secure OS to bootloader */
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_SOSDRV;
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81),
+ RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81),
+ 0, true);
+
+ return ret;
+}
+
+static int psp_v14_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ /* Write the ring destroy command*/
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ /* Wait for response flag (bit 31) */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ } else {
+ /* Write the ring destroy command*/
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ /* Wait for response flag (bit 31) */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ }
+
+ return ret;
+}
+
+static int psp_v14_0_ring_create(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ unsigned int psp_ring_reg = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ ret = psp_v14_0_ring_stop(psp, ring_type);
+ if (ret) {
+ DRM_ERROR("psp_v14_0_ring_stop_sriov failed!\n");
+ return ret;
+ }
+
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_102, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_103 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_103, psp_ring_reg);
+
+ /* Write the ring initialization command to C2PMSG_101 */
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, false);
+
+ } else {
+ /* Wait for sOS ready for ring creation */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ if (ret) {
+ DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
+ return ret;
+ }
+
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+ }
+
+ return ret;
+}
+
+static int psp_v14_0_ring_destroy(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ret = psp_v14_0_ring_stop(psp, ring_type);
+ if (ret)
+ DRM_ERROR("Fail to stop psp ring\n");
+
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+
+ return ret;
+}
+
+static uint32_t psp_v14_0_ring_get_wptr(struct psp_context *psp)
+{
+ uint32_t data;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev))
+ data = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_102);
+ else
+ data = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_67);
+
+ return data;
+}
+
+static void psp_v14_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_102, value);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_67, value);
+}
+
+static int psp_v14_0_memory_training_send_msg(struct psp_context *psp, int msg)
+{
+ int ret;
+ int i;
+ uint32_t data_32;
+ int max_wait;
+ struct amdgpu_device *adev = psp->adev;
+
+ data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, data_32);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, msg);
+
+ max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
+ for (i = 0; i < max_wait; i++) {
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret == 0)
+ break;
+ }
+ if (i < max_wait)
+ ret = 0;
+ else
+ ret = -ETIME;
+
+ dev_dbg(adev->dev, "training %s %s, cost %d @ %d ms\n",
+ (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long",
+ (ret == 0) ? "succeed" : "failed",
+ i, adev->usec_timeout/1000);
+ return ret;
+}
+
+
+static int psp_v14_0_memory_training(struct psp_context *psp, uint32_t ops)
+{
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+ uint32_t *pcache = (uint32_t *)ctx->sys_cache;
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t p2c_header[4];
+ uint32_t sz;
+ void *buf;
+ int ret, idx;
+
+ if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
+ dev_dbg(adev->dev, "Memory training is not supported.\n");
+ return 0;
+ } else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) {
+ dev_err(adev->dev, "Memory training initialization failure.\n");
+ return -EINVAL;
+ }
+
+ if (psp_v14_0_is_sos_alive(psp)) {
+ dev_dbg(adev->dev, "SOS is alive, skip memory training.\n");
+ return 0;
+ }
+
+ amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
+ dev_dbg(adev->dev, "sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
+ pcache[0], pcache[1], pcache[2], pcache[3],
+ p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ dev_dbg(adev->dev, "Short training depends on restore.\n");
+ ops |= PSP_MEM_TRAIN_RESTORE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_RESTORE) &&
+ pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ dev_dbg(adev->dev, "sys_cache[0] is invalid, restore depends on save.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ pcache[3] == p2c_header[3])) {
+ dev_dbg(adev->dev, "sys_cache is invalid or out-of-date, need save training data to sys_cache.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_SAVE) &&
+ p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ dev_dbg(adev->dev, "p2c_header[0] is invalid, save depends on long training.\n");
+ ops |= PSP_MEM_TRAIN_SEND_LONG_MSG;
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG;
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ dev_dbg(adev->dev, "Memory training ops:%x.\n", ops);
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ /*
+ * Long training will encroach a certain amount on the bottom of VRAM;
+ * save the content from the bottom of VRAM to system memory
+ * before training, and restore it after training to avoid
+ * VRAM corruption.
+ */
+ sz = BIST_MEM_TRAINING_ENCROACHED_SIZE;
+
+ if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
+ dev_err(adev->dev, "visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",
+ adev->gmc.visible_vram_size,
+ adev->mman.aper_base_kaddr);
+ return -EINVAL;
+ }
+
+ buf = vmalloc(sz);
+ if (!buf) {
+ dev_err(adev->dev, "failed to allocate system memory.\n");
+ return -ENOMEM;
+ }
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
+ ret = psp_v14_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
+ if (ret) {
+ DRM_ERROR("Send long training msg failed.\n");
+ vfree(buf);
+ drm_dev_exit(idx);
+ return ret;
+ }
+
+ memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
+ vfree(buf);
+ drm_dev_exit(idx);
+ } else {
+ vfree(buf);
+ return -ENODEV;
+ }
+ }
+
+ if (ops & PSP_MEM_TRAIN_SAVE) {
+ amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false);
+ }
+
+ if (ops & PSP_MEM_TRAIN_RESTORE) {
+ amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true);
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ ret = psp_v14_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ?
+ PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN);
+ if (ret) {
+ dev_err(adev->dev, "send training msg failed.\n");
+ return ret;
+ }
+ }
+ ctx->training_cnt++;
+ return 0;
+}
+
+static int psp_v14_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc_addr)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t reg_status;
+ int ret, i = 0;
+
+ /*
+ * LFB address which is aligned to 1MB address and has to be
+ * right-shifted by 20 so that LFB address can be passed on a 32-bit C2P
+ * register
+ */
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ /* Fireup interrupt so PSP can pick up the address */
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, (GFX_CMD_USB_PD_USE_LFB << 16));
+
+ /* FW load takes very long time */
+ do {
+ msleep(1000);
+ reg_status = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35);
+
+ if (reg_status & 0x80000000)
+ goto done;
+
+ } while (++i < USBC_PD_POLLING_LIMIT_S);
+
+ return -ETIME;
+done:
+
+ if ((reg_status & 0xFFFF) != 0) {
+ DRM_ERROR("Address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %04x\n",
+ reg_status & 0xFFFF);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int psp_v14_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (!ret)
+ *fw_ver = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36);
+
+ return ret;
+}
+
+static int psp_v14_0_exec_spi_cmd(struct psp_context *psp, int cmd)
+{
+ uint32_t reg_status = 0, reg_val = 0;
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ /* clear MBX ready (MBOX_READY_MASK bit is 0) and set update command */
+ reg_val |= (cmd << 16);
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_115, reg_val);
+
+ /* Ring the doorbell */
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_73, 1);
+
+ if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE)
+ ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, false);
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ if (ret) {
+ dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
+ return ret;
+ }
+
+ reg_status = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_115);
+ if ((reg_status & 0xFFFF) != 0) {
+ dev_err(adev->dev, "SPI cmd %x failed, fail status = %04x\n",
+ cmd, reg_status & 0xFFFF);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int psp_v14_0_update_spirom(struct psp_context *psp,
+ uint64_t fw_pri_mc_addr)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ /* Confirm PSP is ready to start */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ if (ret) {
+ dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
+ return ret;
+ }
+
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_116, lower_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v14_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO);
+ if (ret)
+ return ret;
+
+ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_116, upper_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v14_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI);
+ if (ret)
+ return ret;
+
+ psp->vbflash_done = true;
+
+ ret = psp_v14_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int psp_v14_0_vbflash_status(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ return RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_115);
+}
+
+static const struct psp_funcs psp_v14_0_funcs = {
+ .init_microcode = psp_v14_0_init_microcode,
+ .bootloader_load_kdb = psp_v14_0_bootloader_load_kdb,
+ .bootloader_load_spl = psp_v14_0_bootloader_load_spl,
+ .bootloader_load_sysdrv = psp_v14_0_bootloader_load_sysdrv,
+ .bootloader_load_soc_drv = psp_v14_0_bootloader_load_soc_drv,
+ .bootloader_load_intf_drv = psp_v14_0_bootloader_load_intf_drv,
+ .bootloader_load_dbg_drv = psp_v14_0_bootloader_load_dbg_drv,
+ .bootloader_load_ras_drv = psp_v14_0_bootloader_load_ras_drv,
+ .bootloader_load_sos = psp_v14_0_bootloader_load_sos,
+ .ring_create = psp_v14_0_ring_create,
+ .ring_stop = psp_v14_0_ring_stop,
+ .ring_destroy = psp_v14_0_ring_destroy,
+ .ring_get_wptr = psp_v14_0_ring_get_wptr,
+ .ring_set_wptr = psp_v14_0_ring_set_wptr,
+ .mem_training = psp_v14_0_memory_training,
+ .load_usbc_pd_fw = psp_v14_0_load_usbc_pd_fw,
+ .read_usbc_pd_fw = psp_v14_0_read_usbc_pd_fw,
+ .update_spirom = psp_v14_0_update_spirom,
+ .vbflash_stat = psp_v14_0_vbflash_status
+};
+
+void psp_v14_0_set_psp_funcs(struct psp_context *psp)
+{
+ psp->funcs = &psp_v14_0_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.h
new file mode 100644
index 000000000000..dd18ba2cfad5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __PSP_V14_0_H__
+#define __PSP_V14_0_H__
+
+#include "amdgpu_psp.h"
+
+#define PSP_SPIROM_UPDATE_TIMEOUT 60000 /* 60s */
+
+void psp_v14_0_set_psp_funcs(struct psp_context *psp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 8d5d86675a7f..07e19caf2bc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -57,22 +57,19 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin");
-static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
-{
+static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = {
SDMA0_REGISTER_OFFSET,
SDMA1_REGISTER_OFFSET
};
-static const u32 golden_settings_iceland_a11[] =
-{
+static const u32 golden_settings_iceland_a11[] = {
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
};
-static const u32 iceland_mgcg_cgcg_init[] =
-{
+static const u32 iceland_mgcg_cgcg_init[] = {
mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
};
@@ -142,7 +139,8 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
case CHIP_TOPAZ:
chip_name = "topaz";
break;
- default: BUG();
+ default:
+ BUG();
}
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1258,8 +1256,7 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
-const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
-{
+const struct amdgpu_ip_block_version sdma_v2_4_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 2,
.minor = 4,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 3d68dd5523c6..43775cb67ff5 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2104,7 +2104,7 @@ static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
int instance;
- struct amdgpu_task_info task_info;
+ struct amdgpu_task_info *task_info;
u64 addr;
instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
@@ -2116,15 +2116,20 @@ static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
- memset(&task_info, 0, sizeof(struct amdgpu_task_info));
- amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
-
dev_dbg_ratelimited(adev->dev,
- "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u "
- "pasid:%u, for process %s pid %d thread %s pid %d\n",
- instance, addr, entry->src_id, entry->ring_id, entry->vmid,
- entry->pasid, task_info.process_name, task_info.tgid,
- task_info.task_name, task_info.pid);
+ "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u pasid:%u\n",
+ instance, addr, entry->src_id, entry->ring_id, entry->vmid,
+ entry->pasid);
+
+ task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
+ if (task_info) {
+ dev_dbg_ratelimited(adev->dev,
+ " for process %s pid %d thread %s pid %d\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task_name, task_info->pid);
+ amdgpu_vm_put_task_info(task_info);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 2d688dca26be..eaa4f5f49949 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -45,6 +45,8 @@
MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400
+
#define WREG32_SDMA(instance, offset, value) \
WREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)), value)
#define RREG32_SDMA(instance, offset) \
@@ -1642,7 +1644,7 @@ static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
int instance;
- struct amdgpu_task_info task_info;
+ struct amdgpu_task_info *task_info;
u64 addr;
instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
@@ -1654,15 +1656,19 @@ static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
- memset(&task_info, 0, sizeof(struct amdgpu_task_info));
- amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
-
dev_dbg_ratelimited(adev->dev,
- "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u "
- "pasid:%u, for process %s pid %d thread %s pid %d\n",
- instance, addr, entry->src_id, entry->ring_id, entry->vmid,
- entry->pasid, task_info.process_name, task_info.tgid,
- task_info.task_name, task_info.pid);
+ "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u pasid:%u\n",
+ instance, addr, entry->src_id, entry->ring_id, entry->vmid,
+ entry->pasid);
+
+ task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
+ if (task_info) {
+ dev_dbg_ratelimited(adev->dev, " for process %s pid %d thread %s pid %d\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task_name, task_info->pid);
+ amdgpu_vm_put_task_info(task_info);
+ }
+
return 0;
}
@@ -2204,9 +2210,79 @@ static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = {
.reset_ras_error_count = sdma_v4_4_2_reset_ras_error_count,
};
+static int sdma_v4_4_2_aca_bank_generate_report(struct aca_handle *handle,
+ struct aca_bank *bank, enum aca_error_type type,
+ struct aca_bank_report *report, void *data)
+{
+ u64 status, misc0;
+ int ret;
+
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ if ((type == ACA_ERROR_TYPE_UE &&
+ ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
+ (type == ACA_ERROR_TYPE_CE &&
+ ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
+
+ ret = aca_bank_info_decode(bank, &report->info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+ }
+
+ return 0;
+}
+
+/* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */
+static int sdma_v4_4_2_err_codes[] = { 33, 34, 35, 36 };
+
+static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_error_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ sdma_v4_4_2_err_codes,
+ ARRAY_SIZE(sdma_v4_4_2_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops sdma_v4_4_2_aca_bank_ops = {
+ .aca_bank_generate_report = sdma_v4_4_2_aca_bank_generate_report,
+ .aca_bank_is_valid = sdma_v4_4_2_aca_bank_is_valid,
+};
+
+static const struct aca_info sdma_v4_4_2_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &sdma_v4_4_2_aca_bank_ops,
+};
+
+static int sdma_v4_4_2_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_sdma_ras_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ return amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__SDMA,
+ &sdma_v4_4_2_aca_info, NULL);
+}
+
static struct amdgpu_sdma_ras sdma_v4_4_2_ras = {
.ras_block = {
.hw_ops = &sdma_v4_4_2_ras_hw_ops,
+ .ras_late_init = sdma_v4_4_2_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 3c485e5a531a..883e8a1b8a40 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -249,35 +249,23 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
return ret;
}
-static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring)
+static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring,
+ uint64_t addr)
{
unsigned ret;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
- amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, 1);
- ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
- amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
+ /* this is the offset we need patch later */
+ ret = ring->wptr & ring->buf_mask;
+ /* insert dummy here and patch it later */
+ amdgpu_ring_write(ring, 0);
return ret;
}
-static void sdma_v5_0_ring_patch_cond_exec(struct amdgpu_ring *ring,
- unsigned offset)
-{
- unsigned cur;
-
- BUG_ON(offset > ring->buf_mask);
- BUG_ON(ring->ring[offset] != 0x55aa55aa);
-
- cur = (ring->wptr - 1) & ring->buf_mask;
- if (cur > offset)
- ring->ring[offset] = cur - offset;
- else
- ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
-}
-
/**
* sdma_v5_0_ring_get_rptr - get the current read pointer
*
@@ -1780,7 +1768,6 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
.emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v5_0_ring_init_cond_exec,
- .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
.preempt_ib = sdma_v5_0_ring_preempt_ib,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 0058f3f7cf6e..42f4bd250def 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -89,35 +89,23 @@ static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring)
+static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring,
+ uint64_t addr)
{
unsigned ret;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
- amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, 1);
- ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
- amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
+ /* this is the offset we need patch later */
+ ret = ring->wptr & ring->buf_mask;
+ /* insert dummy here and patch it later */
+ amdgpu_ring_write(ring, 0);
return ret;
}
-static void sdma_v5_2_ring_patch_cond_exec(struct amdgpu_ring *ring,
- unsigned offset)
-{
- unsigned cur;
-
- BUG_ON(offset > ring->buf_mask);
- BUG_ON(ring->ring[offset] != 0x55aa55aa);
-
- cur = (ring->wptr - 1) & ring->buf_mask;
- if (cur > offset)
- ring->ring[offset] = cur - offset;
- else
- ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
-}
-
/**
* sdma_v5_2_ring_get_rptr - get the current read pointer
*
@@ -1722,7 +1710,6 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
.emit_reg_wait = sdma_v5_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v5_2_ring_init_cond_exec,
- .patch_cond_exec = sdma_v5_2_ring_patch_cond_exec,
.preempt_ib = sdma_v5_2_ring_preempt_ib,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 3c7ddd219de8..361835a61f2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_1_0.bin");
+MODULE_FIRMWARE("amdgpu/sdma_6_1_1.bin");
#define SDMA1_REG_OFFSET 0x600
#define SDMA0_HYP_DEC_REG_START 0x5880
@@ -79,35 +80,23 @@ static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring)
+static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring,
+ uint64_t addr)
{
unsigned ret;
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COND_EXE));
- amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, 1);
- ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
- amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
+ /* this is the offset we need patch later */
+ ret = ring->wptr & ring->buf_mask;
+ /* insert dummy here and patch it later */
+ amdgpu_ring_write(ring, 0);
return ret;
}
-static void sdma_v6_0_ring_patch_cond_exec(struct amdgpu_ring *ring,
- unsigned offset)
-{
- unsigned cur;
-
- BUG_ON(offset > ring->buf_mask);
- BUG_ON(ring->ring[offset] != 0x55aa55aa);
-
- cur = (ring->wptr - 1) & ring->buf_mask;
- if (cur > offset)
- ring->ring[offset] = cur - offset;
- else
- ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
-}
-
/**
* sdma_v6_0_ring_get_rptr - get the current read pointer
*
@@ -1541,7 +1530,6 @@ static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = {
.emit_reg_wait = sdma_v6_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = sdma_v6_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v6_0_ring_init_cond_exec,
- .patch_cond_exec = sdma_v6_0_ring_patch_cond_exec,
.preempt_ib = sdma_v6_0_ring_preempt_ib,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index a757526153e5..23e4ef4fff7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -2331,28 +2331,18 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
gpu_cfg &
PCI_EXP_LNKCTL_HAWD);
- pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
- &tmp16);
- tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN);
- tmp16 |= (bridge_cfg2 &
- (PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN));
- pcie_capability_write_word(root,
- PCI_EXP_LNKCTL2,
- tmp16);
-
- pcie_capability_read_word(adev->pdev,
- PCI_EXP_LNKCTL2,
- &tmp16);
- tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN);
- tmp16 |= (gpu_cfg2 &
- (PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN));
- pcie_capability_write_word(adev->pdev,
- PCI_EXP_LNKCTL2,
- tmp16);
+ pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN,
+ bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN,
+ gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -2365,16 +2355,15 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
-
+ tmp16 = 0;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
- pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index c64c01e2944a..dec81ccf6240 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -574,11 +574,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
return AMD_RESET_METHOD_MODE1;
}
+static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
+{
+ u32 sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+ /* Will reset for the following suspend abort cases.
+ * 1) Only reset limit on APU side, dGPU hasn't checked yet.
+ * 2) S3 suspend abort and TOS already launched.
+ */
+ if (adev->flags & AMD_IS_APU && adev->in_s3 &&
+ !adev->suspend_complete &&
+ sol_reg)
+ return true;
+
+ return false;
+}
+
static int soc15_asic_reset(struct amdgpu_device *adev)
{
/* original raven doesn't have full asic reset */
- if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
- (adev->apu_flags & AMD_APU_IS_RAVEN2))
+ /* On the latest Raven, the GPU reset can be performed
+ * successfully. So now, temporarily enable it for the
+ * S3 suspend abort case.
+ */
+ if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
+ (adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
+ !soc15_need_reset_on_resume(adev))
return 0;
switch (soc15_asic_reset_method(adev)) {
@@ -895,7 +918,6 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs =
.get_config_memsize = &soc15_get_config_memsize,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &aqua_vanjaram_doorbell_index_init,
- .get_pcie_usage = &amdgpu_nbio_get_pcie_usage,
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count,
.supports_baco = &soc15_supports_baco,
@@ -1278,7 +1300,8 @@ static int soc15_common_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);
- if (adev->nbio.ras_if &&
+ if ((!amdgpu_sriov_vf(adev)) &&
+ adev->nbio.ras_if &&
amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
if (adev->nbio.ras &&
adev->nbio.ras->init_ras_controller_interrupt)
@@ -1298,24 +1321,6 @@ static int soc15_common_suspend(void *handle)
return soc15_common_hw_fini(adev);
}
-static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
-{
- u32 sol_reg;
-
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
-
- /* Will reset for the following suspend abort cases.
- * 1) Only reset limit on APU side, dGPU hasn't checked yet.
- * 2) S3 suspend abort and TOS already launched.
- */
- if (adev->flags & AMD_IS_APU && adev->in_s3 &&
- !adev->suspend_complete &&
- sol_reg)
- return true;
-
- return false;
-}
-
static int soc15_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 4d7188912edf..581a3bd11481 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -185,6 +185,12 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
}
}
return 0;
+ case IP_VERSION(4, 0, 6):
+ if (encode)
+ *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
+ else
+ *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
+ return 0;
default:
return -EINVAL;
}
@@ -382,6 +388,7 @@ soc21_asic_reset_method(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
return AMD_RESET_METHOD_MODE2;
default:
if (amdgpu_dpm_is_baco_supported(adev))
@@ -711,11 +718,41 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG_DPG |
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_GFX_PG;
adev->external_rev_id = adev->rev_id + 0x1;
break;
+ case IP_VERSION(11, 5, 1):
+ adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_FGCG |
+ AMD_CG_SUPPORT_REPEATER_FGCG |
+ AMD_CG_SUPPORT_GFX_PERF_CLK |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG;
+ adev->pg_flags =
+ AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG;
+ adev->external_rev_id = adev->rev_id + 0xc1;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -865,6 +902,7 @@ static int soc21_common_set_clockgating_state(void *handle,
case IP_VERSION(7, 7, 0):
case IP_VERSION(7, 7, 1):
case IP_VERSION(7, 11, 0):
+ case IP_VERSION(7, 11, 1):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 879bb7af297c..056d4df8fa1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -36,6 +36,9 @@ enum ras_command {
TA_RAS_COMMAND__ENABLE_FEATURES = 0,
TA_RAS_COMMAND__DISABLE_FEATURES,
TA_RAS_COMMAND__TRIGGER_ERROR,
+ TA_RAS_COMMAND__QUERY_BLOCK_INFO,
+ TA_RAS_COMMAND__QUERY_SUB_BLOCK_INFO,
+ TA_RAS_COMMAND__QUERY_ADDRESS,
};
enum ta_ras_status {
@@ -105,6 +108,11 @@ enum ta_ras_error_type {
TA_RAS_ERROR__POISON = 8,
};
+enum ta_ras_address_type {
+ TA_RAS_MCA_TO_PA,
+ TA_RAS_PA_TO_MCA,
+};
+
/* Input/output structures for RAS commands */
/**********************************************************/
@@ -133,12 +141,38 @@ struct ta_ras_init_flags {
uint8_t channel_dis_num;
};
+struct ta_ras_mca_addr {
+ uint64_t err_addr;
+ uint32_t ch_inst;
+ uint32_t umc_inst;
+ uint32_t node_inst;
+};
+
+struct ta_ras_phy_addr {
+ uint64_t pa;
+ uint32_t bank;
+ uint32_t channel_idx;
+};
+
+struct ta_ras_query_address_input {
+ enum ta_ras_address_type addr_type;
+ struct ta_ras_mca_addr ma;
+ struct ta_ras_phy_addr pa;
+};
+
struct ta_ras_output_flags {
uint8_t ras_init_success_flag;
uint8_t err_inject_switch_disable_flag;
uint8_t reg_access_failure_flag;
};
+struct ta_ras_query_address_output {
+ /* don't use the flags here */
+ struct ta_ras_output_flags flags;
+ struct ta_ras_mca_addr ma;
+ struct ta_ras_phy_addr pa;
+};
+
/* Common input structure for RAS callbacks */
/**********************************************************/
union ta_ras_cmd_input {
@@ -146,12 +180,14 @@ union ta_ras_cmd_input {
struct ta_ras_enable_features_input enable_features;
struct ta_ras_disable_features_input disable_features;
struct ta_ras_trigger_error_input trigger_error;
+ struct ta_ras_query_address_input address;
uint32_t reserve_pad[256];
};
union ta_ras_cmd_output {
struct ta_ras_output_flags flags;
+ struct ta_ras_query_address_output address;
uint32_t reserve_pad[256];
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 7458a218e89d..77af4e25ff46 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -89,12 +89,28 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
umc_v12_0_reset_error_count_per_channel, NULL);
}
+bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
+{
+ dev_info(adev->dev,
+ "MCA_UMC_STATUS(0x%llx): Val:%llu, Poison:%llu, Deferred:%llu, PCC:%llu, UC:%llu, TCC:%llu\n",
+ mc_umc_status,
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val),
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison),
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred),
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC),
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC),
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC)
+ );
+
+ return (amdgpu_ras_is_poison_mode_supported(adev) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1));
+}
+
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
{
- if (amdgpu_ras_is_poison_mode_supported(adev) &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
- return true;
+ if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
+ return false;
return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
@@ -104,9 +120,7 @@ bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_um
bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
{
- if (amdgpu_ras_is_poison_mode_supported(adev) &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
+ if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
return false;
return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
@@ -119,9 +133,10 @@ bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_
!(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)))));
}
-static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
+static void umc_v12_0_query_error_count_per_type(struct amdgpu_device *adev,
uint64_t umc_reg_offset,
- unsigned long *error_count)
+ unsigned long *error_count,
+ check_error_type_func error_type_func)
{
uint64_t mc_umc_status;
uint64_t mc_umc_status_addr;
@@ -129,31 +144,11 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
- /* Rely on MCUMC_STATUS for correctable error counter
- * MCUMC_STATUS is a 64 bit register
- */
+ /* Check MCUMC_STATUS */
mc_umc_status =
RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
- if (umc_v12_0_is_correctable_error(adev, mc_umc_status))
- *error_count += 1;
-}
-
-static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev,
- uint64_t umc_reg_offset,
- unsigned long *error_count)
-{
- uint64_t mc_umc_status;
- uint64_t mc_umc_status_addr;
-
- mc_umc_status_addr =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
-
- /* Check the MCUMC_STATUS. */
- mc_umc_status =
- RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
-
- if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status))
+ if (error_type_func(adev, mc_umc_status))
*error_count += 1;
}
@@ -162,7 +157,7 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
uint32_t ch_inst, void *data)
{
struct ras_err_data *err_data = (struct ras_err_data *)data;
- unsigned long ue_count = 0, ce_count = 0;
+ unsigned long ue_count = 0, ce_count = 0, de_count = 0;
/* NOTE: node_inst is converted by adev->umc.active_mask and the range is [0-3],
* which can be used as die ID directly */
@@ -174,11 +169,16 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
uint64_t umc_reg_offset =
get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
- umc_v12_0_query_correctable_error_count(adev, umc_reg_offset, &ce_count);
- umc_v12_0_query_uncorrectable_error_count(adev, umc_reg_offset, &ue_count);
+ umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
+ &ce_count, umc_v12_0_is_correctable_error);
+ umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
+ &ue_count, umc_v12_0_is_uncorrectable_error);
+ umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
+ &de_count, umc_v12_0_is_deferred_error);
amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
+ amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, de_count);
return 0;
}
@@ -203,14 +203,14 @@ static bool umc_v12_0_bit_wise_xor(uint32_t val)
return result;
}
-static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
- struct ras_err_data *err_data, uint64_t err_addr,
- uint32_t ch_inst, uint32_t umc_inst,
- uint32_t node_inst)
+static void umc_v12_0_mca_addr_to_pa(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst,
+ uint32_t node_inst,
+ struct ta_ras_query_address_output *addr_out)
{
uint32_t channel_index, i;
- uint64_t soc_pa, na, retired_page, column;
- uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row, row_xor;
+ uint64_t na, soc_pa;
+ uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row;
uint32_t bank0, bank1, bank2, bank3, bank;
bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL;
@@ -260,12 +260,44 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
/* the umc channel bits are not original values, they are hashed */
UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa);
+ addr_out->pa.pa = soc_pa;
+ addr_out->pa.bank = bank;
+ addr_out->pa.channel_idx = channel_index;
+}
+
+static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst,
+ uint32_t node_inst)
+{
+ uint32_t col, row, row_xor, bank, channel_index;
+ uint64_t soc_pa, retired_page, column;
+ struct ta_ras_query_address_input addr_in;
+ struct ta_ras_query_address_output addr_out;
+
+ addr_in.addr_type = TA_RAS_MCA_TO_PA;
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = ch_inst;
+ addr_in.ma.umc_inst = umc_inst;
+ addr_in.ma.node_inst = node_inst;
+
+ if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out))
+ /* fallback to old path if fail to get pa from psp */
+ umc_v12_0_mca_addr_to_pa(adev, err_addr, ch_inst, umc_inst,
+ node_inst, &addr_out);
+
+ soc_pa = addr_out.pa.pa;
+ bank = addr_out.pa.bank;
+ channel_index = addr_out.pa.channel_idx;
+
+ col = (err_addr >> 1) & 0x1fULL;
+ row = (err_addr >> 10) & 0x3fffULL;
+ row_xor = row ^ (0x1ULL << 13);
/* clear [C3 C2] in soc physical address */
soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
/* clear [C4] in soc physical address */
soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
- row_xor = row ^ (0x1ULL << 13);
/* loop for all possibilities of [C4 C3 C2] */
for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
@@ -316,10 +348,8 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
}
/* calculate error address if ue error is detected */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1) {
-
+ if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) ||
+ umc_v12_0_is_deferred_error(adev, mc_umc_status)) {
mc_umc_addrt0 =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
@@ -385,45 +415,69 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
{
struct ras_err_node *err_node;
uint64_t mc_umc_status;
+ struct ras_err_info *err_info;
+ struct ras_err_addr *mca_err_addr, *tmp;
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
for_each_ras_error(err_node, err_data) {
- mc_umc_status = err_node->err_info.err_addr.err_status;
- if (!mc_umc_status)
+ err_info = &err_node->err_info;
+ if (list_empty(&err_info->err_addr_list))
continue;
- if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) {
- uint64_t mca_addr, err_addr, mca_ipid;
- uint32_t InstanceIdLo;
- struct amdgpu_smuio_mcm_config_info *mcm_info;
-
- mcm_info = &err_node->err_info.mcm_info;
- mca_addr = err_node->err_info.err_addr.err_addr;
- mca_ipid = err_node->err_info.err_addr.err_ipid;
-
- err_addr = REG_GET_FIELD(mca_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo);
-
- dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n",
- mca_ipid,
- mcm_info->die_id,
- MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
- MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
- err_addr);
-
- umc_v12_0_convert_error_address(adev,
- err_data, err_addr,
- MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
- MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
- mcm_info->die_id);
-
- /* Clear umc error address content */
- memset(&err_node->err_info.err_addr,
- 0, sizeof(err_node->err_info.err_addr));
+ list_for_each_entry_safe(mca_err_addr, tmp, &err_info->err_addr_list, node) {
+ mc_umc_status = mca_err_addr->err_status;
+ if (mc_umc_status &&
+ (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) ||
+ umc_v12_0_is_deferred_error(adev, mc_umc_status))) {
+ uint64_t mca_addr, err_addr, mca_ipid;
+ uint32_t InstanceIdLo;
+
+ mca_addr = mca_err_addr->err_addr;
+ mca_ipid = mca_err_addr->err_ipid;
+
+ err_addr = REG_GET_FIELD(mca_addr,
+ MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+ InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo);
+
+ dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n",
+ mca_ipid,
+ err_info->mcm_info.die_id,
+ MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
+ MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
+ err_addr);
+
+ umc_v12_0_convert_error_address(adev,
+ err_data, err_addr,
+ MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
+ MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
+ err_info->mcm_info.die_id);
+ }
+
+ /* Delete error address node from list and free memory */
+ amdgpu_ras_del_mca_err_addr(err_info, mca_err_addr);
}
}
}
+static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, void *ras_error_status)
+{
+ uint64_t mc_umc_status = *(uint64_t *)ras_error_status;
+
+ switch (type) {
+ case AMDGPU_MCA_ERROR_TYPE_UE:
+ return umc_v12_0_is_uncorrectable_error(adev, mc_umc_status);
+ case AMDGPU_MCA_ERROR_TYPE_CE:
+ return umc_v12_0_is_correctable_error(adev, mc_umc_status);
+ case AMDGPU_MCA_ERROR_TYPE_DE:
+ return umc_v12_0_is_deferred_error(adev, mc_umc_status);
+ default:
+ return false;
+ }
+
+ return false;
+}
+
static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev)
{
amdgpu_umc_loop_channels(adev,
@@ -444,12 +498,71 @@ const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = {
.query_ras_error_address = umc_v12_0_query_ras_error_address,
};
+static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+ struct aca_bank_report *report, void *data)
+{
+ struct amdgpu_device *adev = handle->adev;
+ u64 status;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &report->info);
+ if (ret)
+ return ret;
+
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ if (umc_v12_0_is_uncorrectable_error(adev, status)) {
+ report->count[type] = 1;
+ }
+ break;
+ case ACA_ERROR_TYPE_CE:
+ if (umc_v12_0_is_correctable_error(adev, status)) {
+ report->count[type] = 1;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct aca_bank_ops umc_v12_0_aca_bank_ops = {
+ .aca_bank_generate_report = umc_v12_0_aca_bank_generate_report,
+};
+
+const struct aca_info umc_v12_0_aca_info = {
+ .hwip = ACA_HWIP_TYPE_UMC,
+ .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
+ .bank_ops = &umc_v12_0_aca_bank_ops,
+};
+
+static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int ret;
+
+ ret = amdgpu_umc_ras_late_init(adev, ras_block);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__UMC,
+ &umc_v12_0_aca_info, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
struct amdgpu_umc_ras umc_v12_0_ras = {
.ras_block = {
.hw_ops = &umc_v12_0_ras_hw_ops,
+ .ras_late_init = umc_v12_0_ras_late_init,
},
.err_cnt_init = umc_v12_0_err_cnt_init,
.query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address,
+ .check_ecc_err_status = umc_v12_0_check_ecc_err_status,
};
+
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index e8de3a92251a..5973bfb14fce 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -121,9 +121,12 @@
(((_ipid_lo) >> 12) & 0xF))
#define MCA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7)
+bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
+typedef bool (*check_error_type_func)(struct amdgpu_device *adev, uint64_t mc_umc_status);
+
extern const uint32_t
umc_v12_0_channel_idx_tbl[]
[UMC_V12_0_UMC_INSTANCE_NUM]
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
index 0d6b50528d76..97fa88ed770c 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
@@ -25,7 +25,7 @@
static void umc_v6_0_init_registers(struct amdgpu_device *adev)
{
- unsigned i,j;
+ unsigned i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 49e4c3c09aca..0468955338b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -1684,6 +1684,9 @@ static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgp
case SOC15_IH_CLIENTID_VCN:
ip_instance = 0;
break;
+ case SOC15_IH_CLIENTID_VCN1:
+ ip_instance = 1;
+ break;
default:
DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
new file mode 100644
index 000000000000..d6ee9958ba5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -0,0 +1,1339 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "soc15_hw_ip.h"
+#include "vcn_v2_0.h"
+
+#include "vcn/vcn_5_0_0_offset.h"
+#include "vcn/vcn_5_0_0_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+#include "vcn_v5_0_0.h"
+
+#include <drm/drm_drv.h>
+
+static int amdgpu_ih_clientid_vcns[] = {
+ SOC15_IH_CLIENTID_VCN,
+ SOC15_IH_CLIENTID_VCN1
+};
+
+static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
+static int vcn_v5_0_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state);
+static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev,
+ int inst_idx, struct dpg_pause_state *new_state);
+static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
+
+/**
+ * vcn_v5_0_0_early_init - set function pointers and load microcode
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ * Load microcode from filesystem
+ */
+static int vcn_v5_0_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* re-use enc ring as unified ring */
+ adev->vcn.num_enc_rings = 1;
+
+ vcn_v5_0_0_set_unified_ring_funcs(adev);
+ vcn_v5_0_0_set_irq_funcs(adev);
+
+ return amdgpu_vcn_early_init(adev);
+}
+
+/**
+ * vcn_v5_0_0_sw_init - sw init for VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Load firmware and sw initialization
+ */
+static int vcn_v5_0_0_sw_init(void *handle)
+{
+ struct amdgpu_ring *ring;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, r;
+
+ r = amdgpu_vcn_sw_init(adev);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev);
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ atomic_set(&adev->vcn.inst[i].sched_score, 0);
+
+ /* VCN UNIFIED TRAP */
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
+ VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
+ if (r)
+ return r;
+
+ /* VCN POISON TRAP */
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
+ VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
+ if (r)
+ return r;
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->use_doorbell = true;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
+
+ ring->vm_hub = AMDGPU_MMHUB0(0);
+ sprintf(ring->name, "vcn_unified_%d", i);
+
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
+ AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
+ if (r)
+ return r;
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = 1;
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ }
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_sw_fini - sw fini for VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * VCN suspend and free up sw allocation
+ */
+static int vcn_v5_0_0_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, r, idx;
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = 0;
+ fw_shared->sq.is_enabled = 0;
+ }
+
+ drm_dev_exit(idx);
+ }
+
+ r = amdgpu_vcn_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_sw_fini(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_0_hw_init - start and test VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int vcn_v5_0_0_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int i, r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ goto done;
+ }
+
+done:
+ if (!r)
+ DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_0_hw_fini - stop the hardware block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Stop the VCN block, mark ring as not ready any more
+ */
+static int vcn_v5_0_0_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_suspend - suspend VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * HW fini and suspend VCN block
+ */
+static int vcn_v5_0_0_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = vcn_v5_0_0_hw_fini(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_suspend(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_0_resume - resume VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Resume firmware and hw init VCN block
+ */
+static int vcn_v5_0_0_resume(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ r = vcn_v5_0_0_hw_init(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_0_mc_resume - memory controller programming
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Let the VCN memory controller know it's offsets
+ */
+static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
+{
+ uint32_t offset, size;
+ const struct common_firmware_header *hdr;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr));
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr));
+ offset = size;
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size);
+
+ /* cache window 1: stack */
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0);
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+
+ /* cache window 2: context */
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0);
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+
+ /* non-cache window */
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+}
+
+/**
+ * vcn_v5_0_0_mc_resume_dpg_mode - memory controller programming for dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Let the VCN memory controller know it's offsets with dpg mode
+ */
+static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ uint32_t offset, size;
+ const struct common_firmware_header *hdr;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (!indirect) {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ }
+ offset = 0;
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ offset = size;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
+ }
+
+ if (!indirect)
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+ else
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+
+ /* cache window 1: stack */
+ if (!indirect) {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ }
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+
+ /* cache window 2: context */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+
+ /* non-cache window */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
+
+ /* VCN global tiling registers */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+
+ return;
+}
+
+/**
+ * vcn_v5_0_0_disable_static_power_gating - disable VCN static power gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Disable static power gating for VCN block
+ */
+static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+{
+ uint32_t data = 0;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+ data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
+ UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
+ } else {
+ data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
+ UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
+
+ data = 1 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
+ UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
+
+ data = 1 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
+ UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
+
+ data = 1 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
+ UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
+ }
+
+ data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
+ data &= ~0x103;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
+ data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
+ UVD_POWER_STATUS__UVD_PG_EN_MASK;
+
+ WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
+ return;
+}
+
+/**
+ * vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Enable static power gating for VCN block
+ */
+static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+{
+ uint32_t data;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+ /* Before power off, this indicator has to be turned on */
+ data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
+ data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
+ data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+ WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
+
+ data = 2 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
+ WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
+ SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
+ 1 << UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT,
+ UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
+ }
+ return;
+}
+
+/**
+ * vcn_v5_0_0_disable_clock_gating - disable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Disable clock gating for VCN block
+ */
+static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
+{
+ return;
+}
+
+#if 0
+/**
+ * vcn_v5_0_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @sram_sel: sram select
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Disable clock gating for VCN block with dpg mode
+ */
+static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
+ int inst_idx, uint8_t indirect)
+{
+ return;
+}
+#endif
+
+/**
+ * vcn_v5_0_0_enable_clock_gating - enable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Enable clock gating for VCN block
+ */
+static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
+{
+ return;
+}
+
+/**
+ * vcn_v5_0_0_start_dpg_mode - VCN start with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Start VCN block with dpg mode
+ */
+static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_ring *ring;
+ uint32_t tmp;
+
+ /* disable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* enable dynamic power gating mode */
+ tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
+ tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+ tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
+ WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
+
+ if (indirect)
+ adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
+
+ /* enable VCPU clock */
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* disable master interrupt */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__CRC_RESET_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ 0x00100000L);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
+
+ vcn_v5_0_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
+
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* enable LMI MC and UMC channels */
+ tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
+
+ /* enable master interrupt */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, inst_idx, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
+
+ if (indirect)
+ amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+
+ WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+
+ WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_start - VCN start
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Start VCN block
+ */
+static int vcn_v5_0_0_start(struct amdgpu_device *adev)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_ring *ring;
+ uint32_t tmp;
+ int i, j, k, r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v5_0_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+ continue;
+ }
+
+ /* disable VCN power gating */
+ vcn_v5_0_0_disable_static_power_gating(adev, i);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ vcn_v5_0_0_mc_resume(adev, i);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ if (amdgpu_emu_mode == 1)
+ msleep(1);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
+ r = 0;
+ break;
+ }
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
+ }
+ }
+
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
+
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_stop_dpg_mode - VCN stop with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ *
+ * Stop VCN block with dpg mode
+ */
+static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+{
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
+ uint32_t tmp;
+
+ vcn_v5_0_0_pause_dpg_mode(adev, inst_idx, &state);
+
+ /* Wait for power status to be 1 */
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
+
+ /* disable dynamic power gating mode */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ return;
+}
+
+/**
+ * vcn_v5_0_0_stop - VCN stop
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop VCN block
+ */
+static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ uint32_t tmp;
+ int i, r = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v5_0_0_stop_dpg_mode(adev, i);
+ continue;
+ }
+
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ return r;
+
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
+
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* clear status */
+ WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
+
+ /* enable VCN power gating */
+ vcn_v5_0_0_enable_static_power_gating(adev, i);
+ }
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_pause_dpg_mode - VCN pause with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @new_state: pause state
+ *
+ * Pause dpg mode for VCN block
+ */
+static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
+ struct dpg_pause_state *new_state)
+{
+ uint32_t reg_data = 0;
+ int ret_code;
+
+ /* pause/unpause if state is changed */
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
+ DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
+ adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
+ reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+ ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ if (!ret_code) {
+ /* pause DPG */
+ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
+
+ /* wait for ACK */
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+ }
+ } else {
+ /* unpause dpg, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
+ }
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_unified_ring_get_rptr - get unified read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware unified read pointer
+ */
+static uint64_t vcn_v5_0_0_unified_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
+}
+
+/**
+ * vcn_v5_0_0_unified_ring_get_wptr - get unified write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware unified write pointer
+ */
+static uint64_t vcn_v5_0_0_unified_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ if (ring->use_doorbell)
+ return *ring->wptr_cpu_addr;
+ else
+ return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
+}
+
+/**
+ * vcn_v5_0_0_unified_ring_set_wptr - set enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the enc write pointer to the hardware
+ */
+static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ if (ring->use_doorbell) {
+ *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ }
+}
+
+static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_ENC,
+ .align_mask = 0x3f,
+ .nop = VCN_ENC_CMD_NO_OP,
+ .get_rptr = vcn_v5_0_0_unified_ring_get_rptr,
+ .get_wptr = vcn_v5_0_0_unified_ring_get_wptr,
+ .set_wptr = vcn_v5_0_0_unified_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
+ 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* vcn_v2_0_enc_ring_insert_end */
+ .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
+ .emit_ib = vcn_v2_0_enc_ring_emit_ib,
+ .emit_fence = vcn_v2_0_enc_ring_emit_fence,
+ .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_enc_ring_test_ring,
+ .test_ib = amdgpu_vcn_unified_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .insert_end = vcn_v2_0_enc_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+/**
+ * vcn_v5_0_0_set_unified_ring_funcs - set unified ring functions
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set unified ring functions
+ */
+static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_0_unified_ring_vm_funcs;
+ adev->vcn.inst[i].ring_enc[0].me = i;
+
+ DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i);
+ }
+}
+
+/**
+ * vcn_v5_0_0_is_idle - check VCN block is idle
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Check whether VCN block is idle
+ */
+static bool vcn_v5_0_0_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 1;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
+ }
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_0_wait_for_idle - wait for VCN block idle
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Wait for VCN block idle
+ */
+static int vcn_v5_0_0_wait_for_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
+ UVD_STATUS__IDLE);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state
+ *
+ * @handle: amdgpu_device pointer
+ * @state: clock gating state
+ *
+ * Set VCN block clockgating state
+ */
+static int vcn_v5_0_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ if (enable) {
+ if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
+ return -EBUSY;
+ vcn_v5_0_0_enable_clock_gating(adev, i);
+ } else {
+ vcn_v5_0_0_disable_clock_gating(adev, i);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_set_powergating_state - set VCN block powergating state
+ *
+ * @handle: amdgpu_device pointer
+ * @state: power gating state
+ *
+ * Set VCN block powergating state
+ */
+static int vcn_v5_0_0_set_powergating_state(void *handle, enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ if (state == adev->vcn.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = vcn_v5_0_0_stop(adev);
+ else
+ ret = vcn_v5_0_0_start(adev);
+
+ if (!ret)
+ adev->vcn.cur_state = state;
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_0_set_interrupt_state - set VCN block interrupt state
+ *
+ * @adev: amdgpu_device pointer
+ * @source: interrupt sources
+ * @type: interrupt types
+ * @state: interrupt states
+ *
+ * Set VCN block interrupt state
+ */
+static int vcn_v5_0_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
+ unsigned type, enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+/**
+ * vcn_v5_0_0_process_interrupt - process VCN block interrupt
+ *
+ * @adev: amdgpu_device pointer
+ * @source: interrupt sources
+ * @entry: interrupt entry from clients and sources
+ *
+ * Process VCN block interrupt
+ */
+static int vcn_v5_0_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t ip_instance;
+
+ switch (entry->client_id) {
+ case SOC15_IH_CLIENTID_VCN:
+ ip_instance = 0;
+ break;
+ case SOC15_IH_CLIENTID_VCN1:
+ ip_instance = 1;
+ break;
+ default:
+ DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
+ return 0;
+ }
+
+ DRM_DEBUG("IH: VCN TRAP\n");
+
+ switch (entry->src_id) {
+ case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
+ amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
+ break;
+ case VCN_4_0__SRCID_UVD_POISON:
+ amdgpu_vcn_process_poison_irq(adev, source, entry);
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs vcn_v5_0_0_irq_funcs = {
+ .set = vcn_v5_0_0_set_interrupt_state,
+ .process = vcn_v5_0_0_process_interrupt,
+};
+
+/**
+ * vcn_v5_0_0_set_irq_funcs - set VCN block interrupt irq functions
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set VCN block interrupt irq functions
+ */
+static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.funcs = &vcn_v5_0_0_irq_funcs;
+ }
+}
+
+static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
+ .name = "vcn_v5_0_0",
+ .early_init = vcn_v5_0_0_early_init,
+ .late_init = NULL,
+ .sw_init = vcn_v5_0_0_sw_init,
+ .sw_fini = vcn_v5_0_0_sw_fini,
+ .hw_init = vcn_v5_0_0_hw_init,
+ .hw_fini = vcn_v5_0_0_hw_fini,
+ .suspend = vcn_v5_0_0_suspend,
+ .resume = vcn_v5_0_0_resume,
+ .is_idle = vcn_v5_0_0_is_idle,
+ .wait_for_idle = vcn_v5_0_0_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
+ .set_powergating_state = vcn_v5_0_0_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_VCN,
+ .major = 5,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vcn_v5_0_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
new file mode 100644
index 000000000000..51bbccd4360f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VCN_V5_0_0_H__
+#define __VCN_V5_0_0_H__
+
+#define VCN_VID_SOC_ADDRESS 0x1FC00
+#define VCN_AON_SOC_ADDRESS 0x1F800
+#define VCN1_VID_SOC_ADDRESS 0x48300
+#define VCN1_AON_SOC_ADDRESS 0x48000
+
+#define VCN_VID_IP_ADDRESS 0x0
+#define VCN_AON_IP_ADDRESS 0x30000
+
+extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block;
+
+#endif /* __VCN_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index db66e6cccaf2..b9e785846637 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -291,27 +291,29 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
adev->nbio.funcs->ih_control(adev);
- if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 2, 1)) &&
- adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
- ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
- if (adev->irq.ih.use_bus_addr) {
- ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
- MC_SPACE_GPA_ENABLE, 1);
+ if (!amdgpu_sriov_vf(adev)) {
+ if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 2, 1)) &&
+ adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
+ if (adev->irq.ih.use_bus_addr) {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_GPA_ENABLE, 1);
+ }
+ WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
}
- WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
- }
- /* psp firmware won't program IH_CHICKEN for aldebaran
- * driver needs to program it properly according to
- * MC_SPACE type in IH_RB_CNTL */
- if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0)) ||
- (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2))) {
- ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN);
- if (adev->irq.ih.use_bus_addr) {
- ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
- MC_SPACE_GPA_ENABLE, 1);
+ /* psp firmware won't program IH_CHICKEN for aldebaran
+ * driver needs to program it properly according to
+ * MC_SPACE type in IH_RB_CNTL */
+ if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0)) ||
+ (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2))) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN);
+ if (adev->irq.ih.use_bus_addr) {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_GPA_ENABLE, 1);
+ }
+ WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN, ih_chicken);
}
- WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN, ih_chicken);
}
for (i = 0; i < ARRAY_SIZE(ih); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_6_1_fw_if.h b/drivers/gpu/drm/amd/amdgpu/vpe_6_1_fw_if.h
index 9b550deb48d3..47534dbbd137 100644
--- a/drivers/gpu/drm/amd/amdgpu/vpe_6_1_fw_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/vpe_6_1_fw_if.h
@@ -40,7 +40,8 @@ enum VPE_CMD_OPCODE {
VPE_CMD_OPCODE_POLL_REGMEM = 0x8,
VPE_CMD_OPCODE_COND_EXE = 0x9,
VPE_CMD_OPCODE_ATOMIC = 0xA,
- VPE_CMD_OPCODE_PLANE_FILL = 0xB,
+ VPE_CMD_OPCODE_PRED_EXE = 0xB,
+ VPE_CMD_OPCODE_COLLAB_SYNC = 0xC,
VPE_CMD_OPCODE_TIMESTAMP = 0xD
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
index d20060a51e05..769eb8f7bb3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
@@ -33,14 +33,38 @@
#include "vpe/vpe_6_1_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/vpe_6_1_0.bin");
+MODULE_FIRMWARE("amdgpu/vpe_6_1_1.bin");
#define VPE_THREAD1_UCODE_OFFSET 0x8000
+#define regVPEC_COLLABORATE_CNTL 0x0013
+#define regVPEC_COLLABORATE_CNTL_BASE_IDX 0
+#define VPEC_COLLABORATE_CNTL__COLLABORATE_MODE_EN__SHIFT 0x0
+#define VPEC_COLLABORATE_CNTL__COLLABORATE_MODE_EN_MASK 0x00000001L
+
+#define regVPEC_COLLABORATE_CFG 0x0014
+#define regVPEC_COLLABORATE_CFG_BASE_IDX 0
+#define VPEC_COLLABORATE_CFG__MASTER_ID__SHIFT 0x0
+#define VPEC_COLLABORATE_CFG__MASTER_EN__SHIFT 0x3
+#define VPEC_COLLABORATE_CFG__SLAVE0_ID__SHIFT 0x4
+#define VPEC_COLLABORATE_CFG__SLAVE0_EN__SHIFT 0x7
+#define VPEC_COLLABORATE_CFG__MASTER_ID_MASK 0x00000007L
+#define VPEC_COLLABORATE_CFG__MASTER_EN_MASK 0x00000008L
+#define VPEC_COLLABORATE_CFG__SLAVE0_ID_MASK 0x00000070L
+#define VPEC_COLLABORATE_CFG__SLAVE0_EN_MASK 0x00000080L
+
+#define regVPEC_CNTL_6_1_1 0x0016
+#define regVPEC_CNTL_6_1_1_BASE_IDX 0
+#define regVPEC_QUEUE_RESET_REQ_6_1_1 0x002c
+#define regVPEC_QUEUE_RESET_REQ_6_1_1_BASE_IDX 0
+#define regVPEC_PUB_DUMMY2_6_1_1 0x004c
+#define regVPEC_PUB_DUMMY2_6_1_1_BASE_IDX 0
+
static uint32_t vpe_v6_1_get_reg_offset(struct amdgpu_vpe *vpe, uint32_t inst, uint32_t offset)
{
uint32_t base;
- base = vpe->ring.adev->reg_offset[VPE_HWIP][0][0];
+ base = vpe->ring.adev->reg_offset[VPE_HWIP][inst][0];
return base + offset;
}
@@ -48,12 +72,14 @@ static uint32_t vpe_v6_1_get_reg_offset(struct amdgpu_vpe *vpe, uint32_t inst, u
static void vpe_v6_1_halt(struct amdgpu_vpe *vpe, bool halt)
{
struct amdgpu_device *adev = vpe->ring.adev;
- uint32_t f32_cntl;
+ uint32_t i, f32_cntl;
- f32_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_F32_CNTL));
- f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, HALT, halt ? 1 : 0);
- f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, TH1_RESET, halt ? 1 : 0);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_F32_CNTL), f32_cntl);
+ for (i = 0; i < vpe->num_instances; i++) {
+ f32_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_F32_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, HALT, halt ? 1 : 0);
+ f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, TH1_RESET, halt ? 1 : 0);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_F32_CNTL), f32_cntl);
+ }
}
static int vpe_v6_1_irq_init(struct amdgpu_vpe *vpe)
@@ -70,20 +96,58 @@ static int vpe_v6_1_irq_init(struct amdgpu_vpe *vpe)
return 0;
}
+static void vpe_v6_1_set_collaborate_mode(struct amdgpu_vpe *vpe, bool enable)
+{
+ struct amdgpu_device *adev = vpe->ring.adev;
+ uint32_t vpe_colla_cntl, vpe_colla_cfg, i;
+
+ if (!vpe->collaborate_mode)
+ return;
+
+ for (i = 0; i < vpe->num_instances; i++) {
+ vpe_colla_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CNTL));
+ vpe_colla_cntl = REG_SET_FIELD(vpe_colla_cntl, VPEC_COLLABORATE_CNTL,
+ COLLABORATE_MODE_EN, enable ? 1 : 0);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CNTL), vpe_colla_cntl);
+
+ vpe_colla_cfg = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CFG));
+ vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, MASTER_ID, 0);
+ vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, MASTER_EN, enable ? 1 : 0);
+ vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, SLAVE0_ID, 1);
+ vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, SLAVE0_EN, enable ? 1 : 0);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CFG), vpe_colla_cfg);
+ }
+}
+
static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
{
struct amdgpu_device *adev = vpe->ring.adev;
const struct vpe_firmware_header_v1_0 *vpe_hdr;
const __le32 *data;
uint32_t ucode_offset[2], ucode_size[2];
- uint32_t i, size_dw;
+ uint32_t i, j, size_dw;
uint32_t ret;
- // disable UMSCH_INT_ENABLE
- ret = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL));
- ret = REG_SET_FIELD(ret, VPEC_CNTL, UMSCH_INT_ENABLE, 0);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL), ret);
+ /* disable UMSCH_INT_ENABLE */
+ for (j = 0; j < vpe->num_instances; j++) {
+
+ if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1))
+ ret = RREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL_6_1_1));
+ else
+ ret = RREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL));
+
+ ret = REG_SET_FIELD(ret, VPEC_CNTL, UMSCH_INT_ENABLE, 0);
+ if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1))
+ WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL_6_1_1), ret);
+ else
+ WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL), ret);
+ }
+
+ /*
+ * For VPE 6.1.1, still only need to add master's offset, and psp will apply it to slave as well.
+ * Here use instance 0 as master.
+ */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
uint32_t f32_offset, f32_cntl;
@@ -96,8 +160,7 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl;
amdgpu_vpe_psp_update_sram(adev);
-
- /* Config DPM */
+ vpe_v6_1_set_collaborate_mode(vpe, true);
amdgpu_vpe_configure_dpm(vpe);
return 0;
@@ -114,25 +177,26 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
vpe_v6_1_halt(vpe, true);
- for (i = 0; i < 2; i++) {
- if (i > 0)
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_UCODE_ADDR), VPE_THREAD1_UCODE_OFFSET);
- else
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_UCODE_ADDR), 0);
-
- data = (const __le32 *)(adev->vpe.fw->data + ucode_offset[i]);
- size_dw = ucode_size[i] / sizeof(__le32);
-
- while (size_dw--) {
- if (amdgpu_emu_mode && size_dw % 500 == 0)
- msleep(1);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_UCODE_DATA), le32_to_cpup(data++));
+ for (j = 0; j < vpe->num_instances; j++) {
+ for (i = 0; i < 2; i++) {
+ if (i > 0)
+ WREG32(vpe_get_reg_offset(vpe, j, regVPEC_UCODE_ADDR), VPE_THREAD1_UCODE_OFFSET);
+ else
+ WREG32(vpe_get_reg_offset(vpe, j, regVPEC_UCODE_ADDR), 0);
+
+ data = (const __le32 *)(adev->vpe.fw->data + ucode_offset[i]);
+ size_dw = ucode_size[i] / sizeof(__le32);
+
+ while (size_dw--) {
+ if (amdgpu_emu_mode && size_dw % 500 == 0)
+ msleep(1);
+ WREG32(vpe_get_reg_offset(vpe, j, regVPEC_UCODE_DATA), le32_to_cpup(data++));
+ }
}
-
}
vpe_v6_1_halt(vpe, false);
- /* Config DPM */
+ vpe_v6_1_set_collaborate_mode(vpe, true);
amdgpu_vpe_configure_dpm(vpe);
return 0;
@@ -142,68 +206,68 @@ static int vpe_v6_1_ring_start(struct amdgpu_vpe *vpe)
{
struct amdgpu_ring *ring = &vpe->ring;
struct amdgpu_device *adev = ring->adev;
- uint32_t rb_bufsz, rb_cntl;
- uint32_t ib_cntl;
uint32_t doorbell, doorbell_offset;
+ uint32_t rb_bufsz, rb_cntl;
+ uint32_t ib_cntl, i;
int ret;
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
- rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_PRIV, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_VMID, 0);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL), rb_cntl);
-
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_RPTR), 0);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_RPTR_HI), 0);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_WPTR), 0);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_WPTR_HI), 0);
-
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_RPTR_ADDR_LO),
- lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_RPTR_ADDR_HI),
- upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
-
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
-
- ring->wptr = 0;
-
- /* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 1);
-
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
-
- /* set minor_ptr_update to 0 after wptr programed */
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 0);
-
- doorbell = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_DOORBELL));
- doorbell_offset = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_DOORBELL_OFFSET));
-
- doorbell = REG_SET_FIELD(doorbell, VPEC_QUEUE0_DOORBELL, ENABLE, ring->use_doorbell ? 1 : 0);
- doorbell_offset = REG_SET_FIELD(doorbell_offset, VPEC_QUEUE0_DOORBELL_OFFSET, OFFSET, ring->doorbell_index);
-
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_DOORBELL), doorbell);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
-
- adev->nbio.funcs->vpe_doorbell_range(adev, 0, ring->use_doorbell, ring->doorbell_index, 2);
-
- rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_ENABLE, 1);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL), rb_cntl);
-
- ib_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, VPEC_QUEUE0_IB_CNTL, IB_ENABLE, 1);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_IB_CNTL), ib_cntl);
-
- ring->sched.ready = true;
+ for (i = 0; i < vpe->num_instances; i++) {
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
+ rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_PRIV, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_VMID, 0);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_CNTL), rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR), 0);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR_HI), 0);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR), 0);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR_HI), 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR_ADDR_LO),
+ lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR_ADDR_HI),
+ upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
+
+ ring->wptr = 0;
+
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 1);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 0);
+
+ doorbell_offset = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL_OFFSET));
+ doorbell_offset = REG_SET_FIELD(doorbell_offset, VPEC_QUEUE0_DOORBELL_OFFSET, OFFSET, ring->doorbell_index + i*4);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
+
+ doorbell = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL));
+ doorbell = REG_SET_FIELD(doorbell, VPEC_QUEUE0_DOORBELL, ENABLE, ring->use_doorbell ? 1 : 0);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL), doorbell);
+
+ adev->nbio.funcs->vpe_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index + i*4, 4);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_ENABLE, 1);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_CNTL), rb_cntl);
+
+ ib_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, VPEC_QUEUE0_IB_CNTL, IB_ENABLE, 1);
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_IB_CNTL), ib_cntl);
+ }
ret = amdgpu_ring_test_helper(ring);
- if (ret) {
- ring->sched.ready = false;
+ if (ret)
return ret;
- }
return 0;
}
@@ -211,17 +275,30 @@ static int vpe_v6_1_ring_start(struct amdgpu_vpe *vpe)
static int vpe_v_6_1_ring_stop(struct amdgpu_vpe *vpe)
{
struct amdgpu_device *adev = vpe->ring.adev;
- uint32_t queue_reset;
+ uint32_t queue_reset, i;
int ret;
- queue_reset = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE_RESET_REQ));
- queue_reset = REG_SET_FIELD(queue_reset, VPEC_QUEUE_RESET_REQ, QUEUE0_RESET, 1);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE_RESET_REQ), queue_reset);
+ for (i = 0; i < vpe->num_instances; i++) {
+ if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1))
+ queue_reset = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ_6_1_1));
+ else
+ queue_reset = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ));
+
+ queue_reset = REG_SET_FIELD(queue_reset, VPEC_QUEUE_RESET_REQ, QUEUE0_RESET, 1);
+
+ if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1)) {
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ_6_1_1), queue_reset);
+ ret = SOC15_WAIT_ON_RREG(VPE, i, regVPEC_QUEUE_RESET_REQ_6_1_1, 0,
+ VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK);
+ } else {
+ WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ), queue_reset);
+ ret = SOC15_WAIT_ON_RREG(VPE, i, regVPEC_QUEUE_RESET_REQ, 0,
+ VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK);
+ }
- ret = SOC15_WAIT_ON_RREG(VPE, 0, regVPEC_QUEUE_RESET_REQ, 0,
- VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK);
- if (ret)
- dev_err(adev->dev, "VPE queue reset failed\n");
+ if (ret)
+ dev_err(adev->dev, "VPE queue reset failed\n");
+ }
vpe->ring.sched.ready = false;
@@ -236,10 +313,18 @@ static int vpe_v6_1_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_vpe *vpe = &adev->vpe;
uint32_t vpe_cntl;
- vpe_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL));
+ if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1))
+ vpe_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL_6_1_1));
+ else
+ vpe_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL));
+
vpe_cntl = REG_SET_FIELD(vpe_cntl, VPEC_CNTL, TRAP_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL), vpe_cntl);
+
+ if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1))
+ WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL_6_1_1), vpe_cntl);
+ else
+ WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL), vpe_cntl);
return 0;
}
@@ -264,13 +349,19 @@ static int vpe_v6_1_process_trap_irq(struct amdgpu_device *adev,
static int vpe_v6_1_set_regs(struct amdgpu_vpe *vpe)
{
+ struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
+
vpe->regs.queue0_rb_rptr_lo = regVPEC_QUEUE0_RB_RPTR;
vpe->regs.queue0_rb_rptr_hi = regVPEC_QUEUE0_RB_RPTR_HI;
vpe->regs.queue0_rb_wptr_lo = regVPEC_QUEUE0_RB_WPTR;
vpe->regs.queue0_rb_wptr_hi = regVPEC_QUEUE0_RB_WPTR_HI;
vpe->regs.queue0_preempt = regVPEC_QUEUE0_PREEMPT;
- vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2;
+ if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1))
+ vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2_6_1_1;
+ else
+ vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2;
+
vpe->regs.dpm_pratio = regVPEC_QUEUE6_DUMMY4;
vpe->regs.dpm_request_interval = regVPEC_QUEUE5_DUMMY3;
vpe->regs.dpm_decision_threshold = regVPEC_QUEUE5_DUMMY4;
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index d1caaf0e6a7c..5a0308d26b53 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -678,7 +678,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
};
static const uint32_t cwsr_trap_nv1x_hex[] = {
- 0xbf820001, 0xbf8201f5,
+ 0xbf820001, 0xbf820394,
0xb0804004, 0xb978f802,
0x8a78ff78, 0x00020006,
0xb97bf803, 0x876eff78,
@@ -769,13 +769,90 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0x877c817c, 0xbf06817c,
0xbf850002, 0xbeff0380,
0xbf820002, 0xbeff03c1,
- 0xbf82000b, 0xbef603ff,
- 0x01000000, 0xe0704000,
- 0x705d0000, 0xe0704080,
- 0x705d0100, 0xe0704100,
- 0x705d0200, 0xe0704180,
- 0x705d0300, 0xbf82000a,
- 0xbef603ff, 0x01000000,
+ 0xbf820058, 0xbef603ff,
+ 0x01000000, 0xb97af803,
+ 0x8a7a7aff, 0x10000000,
+ 0xbf850049, 0xbe840380,
+ 0xd7600000, 0x00000900,
+ 0x80048104, 0xd7600001,
+ 0x00000900, 0x80048104,
+ 0xd7600002, 0x00000900,
+ 0x80048104, 0xd7600003,
+ 0x00000900, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06a004,
+ 0xbf84ffef, 0xbe840380,
+ 0xd7600000, 0x00000901,
+ 0x80048104, 0xd7600001,
+ 0x00000901, 0x80048104,
+ 0xd7600002, 0x00000901,
+ 0x80048104, 0xd7600003,
+ 0x00000901, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06a004,
+ 0xbf84ffef, 0xbe840380,
+ 0xd7600000, 0x00000902,
+ 0x80048104, 0xd7600001,
+ 0x00000902, 0x80048104,
+ 0xd7600002, 0x00000902,
+ 0x80048104, 0xd7600003,
+ 0x00000902, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06a004,
+ 0xbf84ffef, 0xbe840380,
+ 0xd7600000, 0x00000903,
+ 0x80048104, 0xd7600001,
+ 0x00000903, 0x80048104,
+ 0xd7600002, 0x00000903,
+ 0x80048104, 0xd7600003,
+ 0x00000903, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06a004,
+ 0xbf84ffef, 0xbf820060,
+ 0xe0704000, 0x705d0000,
+ 0xe0704080, 0x705d0100,
+ 0xe0704100, 0x705d0200,
+ 0xe0704180, 0x705d0300,
+ 0xbf820057, 0xbef603ff,
+ 0x01000000, 0xb97af803,
+ 0x8a7a7aff, 0x10000000,
+ 0xbf850049, 0xbe840380,
+ 0xd7600000, 0x00000900,
+ 0x80048104, 0xd7600001,
+ 0x00000900, 0x80048104,
+ 0xd7600002, 0x00000900,
+ 0x80048104, 0xd7600003,
+ 0x00000900, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffef, 0xbe840380,
+ 0xd7600000, 0x00000901,
+ 0x80048104, 0xd7600001,
+ 0x00000901, 0x80048104,
+ 0xd7600002, 0x00000901,
+ 0x80048104, 0xd7600003,
+ 0x00000901, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffef, 0xbe840380,
+ 0xd7600000, 0x00000902,
+ 0x80048104, 0xd7600001,
+ 0x00000902, 0x80048104,
+ 0xd7600002, 0x00000902,
+ 0x80048104, 0xd7600003,
+ 0x00000902, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffef, 0xbe840380,
+ 0xd7600000, 0x00000903,
+ 0x80048104, 0xd7600001,
+ 0x00000903, 0x80048104,
+ 0xd7600002, 0x00000903,
+ 0x80048104, 0xd7600003,
+ 0x00000903, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffef, 0xbf820008,
0xe0704000, 0x705d0000,
0xe0704100, 0x705d0100,
0xe0704200, 0x705d0200,
@@ -855,9 +932,9 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbf850002, 0xbeff0380,
0xbf820001, 0xbeff03c1,
0xb97b4306, 0x877bc17b,
- 0xbf840044, 0xbf8a0000,
+ 0xbf840086, 0xbf8a0000,
0x877aff6d, 0x80000000,
- 0xbf840040, 0x8f7b867b,
+ 0xbf840082, 0x8f7b867b,
0x8f7b827b, 0xbef6037b,
0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
@@ -871,16 +948,49 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xd7660000, 0x000200c1,
0x16000084, 0x907c9973,
0x877c817c, 0xbf06817c,
- 0xbefc0380, 0xbf850012,
- 0xbe8303ff, 0x00000080,
+ 0xbefc0380, 0xbf850033,
+ 0xb97af803, 0x8a7a7aff,
+ 0x10000000, 0xbf85001d,
+ 0xd8d80000, 0x01000000,
+ 0xbf8c0000, 0xbe840380,
+ 0xd7600000, 0x00000901,
+ 0x80048104, 0xd7600001,
+ 0x00000901, 0x80048104,
+ 0xd7600002, 0x00000901,
+ 0x80048104, 0xd7600003,
+ 0x00000901, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06a004,
+ 0xbf84ffef, 0x807cff7c,
+ 0x00000080, 0xd5250000,
+ 0x0001ff00, 0x00000080,
+ 0xbf0a7b7c, 0xbf85ffe4,
+ 0xbf820044, 0xbe8303ff,
+ 0x00000080, 0xbf800000,
0xbf800000, 0xbf800000,
- 0xbf800000, 0xd8d80000,
+ 0xd8d80000, 0x01000000,
+ 0xbf8c0000, 0xe0704000,
+ 0x705d0100, 0x807c037c,
+ 0x80700370, 0xd5250000,
+ 0x0001ff00, 0x00000080,
+ 0xbf0a7b7c, 0xbf85fff4,
+ 0xbf820032, 0xb97af803,
+ 0x8a7a7aff, 0x10000000,
+ 0xbf85001d, 0xd8d80000,
0x01000000, 0xbf8c0000,
- 0xe0704000, 0x705d0100,
- 0x807c037c, 0x80700370,
+ 0xbe840380, 0xd7600000,
+ 0x00000901, 0x80048104,
+ 0xd7600001, 0x00000901,
+ 0x80048104, 0xd7600002,
+ 0x00000901, 0x80048104,
+ 0xd7600003, 0x00000901,
+ 0x80048104, 0xf469003a,
+ 0xe0000000, 0x80709070,
+ 0xbf06c004, 0xbf84ffef,
+ 0x807cff7c, 0x00000100,
0xd5250000, 0x0001ff00,
- 0x00000080, 0xbf0a7b7c,
- 0xbf85fff4, 0xbf820011,
+ 0x00000100, 0xbf0a7b7c,
+ 0xbf85ffe4, 0xbf820011,
0xbe8303ff, 0x00000100,
0xbf800000, 0xbf800000,
0xbf800000, 0xd8d80000,
@@ -898,10 +1008,52 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbeff03c1, 0xb97b3a05,
0x807b817b, 0x8f7b827b,
0x907c9973, 0x877c817c,
- 0xbf06817c, 0xbf850017,
+ 0xbf06817c, 0xbf85006b,
0xbef603ff, 0x01000000,
0xbefc0384, 0xbf0a7b7c,
- 0xbf840037, 0x7e008700,
+ 0xbf8400fa, 0xb97af803,
+ 0x8a7a7aff, 0x10000000,
+ 0xbf850050, 0x7e008700,
+ 0x7e028701, 0x7e048702,
+ 0x7e068703, 0xbe840380,
+ 0xd7600000, 0x00000900,
+ 0x80048104, 0xd7600001,
+ 0x00000900, 0x80048104,
+ 0xd7600002, 0x00000900,
+ 0x80048104, 0xd7600003,
+ 0x00000900, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06a004,
+ 0xbf84ffef, 0xbe840380,
+ 0xd7600000, 0x00000901,
+ 0x80048104, 0xd7600001,
+ 0x00000901, 0x80048104,
+ 0xd7600002, 0x00000901,
+ 0x80048104, 0xd7600003,
+ 0x00000901, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06a004,
+ 0xbf84ffef, 0xbe840380,
+ 0xd7600000, 0x00000902,
+ 0x80048104, 0xd7600001,
+ 0x00000902, 0x80048104,
+ 0xd7600002, 0x00000902,
+ 0x80048104, 0xd7600003,
+ 0x00000902, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06a004,
+ 0xbf84ffef, 0xbe840380,
+ 0xd7600000, 0x00000903,
+ 0x80048104, 0xd7600001,
+ 0x00000903, 0x80048104,
+ 0xd7600002, 0x00000903,
+ 0x80048104, 0xd7600003,
+ 0x00000903, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06a004,
+ 0xbf84ffef, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf8200a6, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xe0704000,
0x705d0000, 0xe0704080,
@@ -910,9 +1062,51 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0x705d0300, 0x807c847c,
0x8070ff70, 0x00000200,
0xbf0a7b7c, 0xbf85ffef,
- 0xbf820025, 0xbef603ff,
+ 0xbf820094, 0xbef603ff,
0x01000000, 0xbefc0384,
- 0xbf0a7b7c, 0xbf840011,
+ 0xbf0a7b7c, 0xbf840065,
+ 0xb97af803, 0x8a7a7aff,
+ 0x10000000, 0xbf850050,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
+ 0xbe840380, 0xd7600000,
+ 0x00000900, 0x80048104,
+ 0xd7600001, 0x00000900,
+ 0x80048104, 0xd7600002,
+ 0x00000900, 0x80048104,
+ 0xd7600003, 0x00000900,
+ 0x80048104, 0xf469003a,
+ 0xe0000000, 0x80709070,
+ 0xbf06c004, 0xbf84ffef,
+ 0xbe840380, 0xd7600000,
+ 0x00000901, 0x80048104,
+ 0xd7600001, 0x00000901,
+ 0x80048104, 0xd7600002,
+ 0x00000901, 0x80048104,
+ 0xd7600003, 0x00000901,
+ 0x80048104, 0xf469003a,
+ 0xe0000000, 0x80709070,
+ 0xbf06c004, 0xbf84ffef,
+ 0xbe840380, 0xd7600000,
+ 0x00000902, 0x80048104,
+ 0xd7600001, 0x00000902,
+ 0x80048104, 0xd7600002,
+ 0x00000902, 0x80048104,
+ 0xd7600003, 0x00000902,
+ 0x80048104, 0xf469003a,
+ 0xe0000000, 0x80709070,
+ 0xbf06c004, 0xbf84ffef,
+ 0xbe840380, 0xd7600000,
+ 0x00000903, 0x80048104,
+ 0xd7600001, 0x00000903,
+ 0x80048104, 0xd7600002,
+ 0x00000903, 0x80048104,
+ 0xd7600003, 0x00000903,
+ 0x80048104, 0xf469003a,
+ 0xe0000000, 0x80709070,
+ 0xbf06c004, 0xbf84ffef,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffb1, 0xbf82003b,
0x7e008700, 0x7e028701,
0x7e048702, 0x7e068703,
0xe0704000, 0x705d0000,
@@ -922,179 +1116,192 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0x807c847c, 0x8070ff70,
0x00000400, 0xbf0a7b7c,
0xbf85ffef, 0xb97b1e06,
- 0x877bc17b, 0xbf84000c,
+ 0x877bc17b, 0xbf840027,
0x8f7b837b, 0x807b7c7b,
0xbefe03c1, 0xbeff0380,
- 0x7e008700, 0xe0704000,
- 0x705d0000, 0x807c817c,
- 0x8070ff70, 0x00000080,
- 0xbf0a7b7c, 0xbf85fff8,
- 0xbf820144, 0xbef4037e,
- 0x8775ff7f, 0x0000ffff,
- 0x8875ff75, 0x00040000,
- 0xbef60380, 0xbef703ff,
- 0x10807fac, 0xb97202dc,
- 0x8f729972, 0x876eff7f,
- 0x04000000, 0xbf840034,
+ 0xb97af803, 0x8a7a7aff,
+ 0x10000000, 0xbf850017,
+ 0x7e008700, 0xbe840380,
+ 0xd7600000, 0x00000900,
+ 0x80048104, 0xd7600001,
+ 0x00000900, 0x80048104,
+ 0xd7600002, 0x00000900,
+ 0x80048104, 0xd7600003,
+ 0x00000900, 0x80048104,
+ 0xf469003a, 0xe0000000,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffef, 0x807c817c,
+ 0xbf0a7b7c, 0xbf85ffea,
+ 0xbf820008, 0x7e008700,
+ 0xe0704000, 0x705d0000,
+ 0x807c817c, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7c,
+ 0xbf85fff8, 0xbf820144,
+ 0xbef4037e, 0x8775ff7f,
+ 0x0000ffff, 0x8875ff75,
+ 0x00040000, 0xbef60380,
+ 0xbef703ff, 0x10807fac,
+ 0xb97202dc, 0x8f729972,
+ 0x876eff7f, 0x04000000,
+ 0xbf840034, 0xbefe03c1,
+ 0x907c9972, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0xbeff0380, 0xbf820001,
+ 0xbeff03c1, 0xb96f4306,
+ 0x876fc16f, 0xbf840029,
+ 0x8f6f866f, 0x8f6f826f,
+ 0xbef6036f, 0xb9783a05,
+ 0x80788178, 0xbf0d9972,
+ 0xbf850002, 0x8f788978,
+ 0xbf820001, 0x8f788a78,
+ 0xb96e1e06, 0x8f6e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x8078ff78,
+ 0x00000080, 0xbef603ff,
+ 0x01000000, 0x907c9972,
+ 0x877c817c, 0xbf06817c,
+ 0xbefc0380, 0xbf850009,
+ 0xe0310000, 0x781d0000,
+ 0x807cff7c, 0x00000080,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7c, 0xbf85fff8,
+ 0xbf820008, 0xe0310000,
+ 0x781d0000, 0x807cff7c,
+ 0x00000100, 0x8078ff78,
+ 0x00000100, 0xbf0a6f7c,
+ 0xbf85fff8, 0xbef80380,
0xbefe03c1, 0x907c9972,
0x877c817c, 0xbf06817c,
0xbf850002, 0xbeff0380,
0xbf820001, 0xbeff03c1,
- 0xb96f4306, 0x876fc16f,
- 0xbf840029, 0x8f6f866f,
- 0x8f6f826f, 0xbef6036f,
- 0xb9783a05, 0x80788178,
- 0xbf0d9972, 0xbf850002,
- 0x8f788978, 0xbf820001,
- 0x8f788a78, 0xb96e1e06,
- 0x8f6e8a6e, 0x80786e78,
+ 0xb96f3a05, 0x806f816f,
+ 0x8f6f826f, 0x907c9972,
+ 0x877c817c, 0xbf06817c,
+ 0xbf850024, 0xbef603ff,
+ 0x01000000, 0xbeee0378,
0x8078ff78, 0x00000200,
- 0x8078ff78, 0x00000080,
- 0xbef603ff, 0x01000000,
- 0x907c9972, 0x877c817c,
- 0xbf06817c, 0xbefc0380,
- 0xbf850009, 0xe0310000,
- 0x781d0000, 0x807cff7c,
- 0x00000080, 0x8078ff78,
- 0x00000080, 0xbf0a6f7c,
- 0xbf85fff8, 0xbf820008,
- 0xe0310000, 0x781d0000,
- 0x807cff7c, 0x00000100,
- 0x8078ff78, 0x00000100,
- 0xbf0a6f7c, 0xbf85fff8,
- 0xbef80380, 0xbefe03c1,
- 0x907c9972, 0x877c817c,
- 0xbf06817c, 0xbf850002,
- 0xbeff0380, 0xbf820001,
- 0xbeff03c1, 0xb96f3a05,
- 0x806f816f, 0x8f6f826f,
- 0x907c9972, 0x877c817c,
- 0xbf06817c, 0xbf850024,
- 0xbef603ff, 0x01000000,
- 0xbeee0378, 0x8078ff78,
- 0x00000200, 0xbefc0384,
- 0xbf0a6f7c, 0xbf840050,
+ 0xbefc0384, 0xbf0a6f7c,
+ 0xbf840050, 0xe0304000,
+ 0x785d0000, 0xe0304080,
+ 0x785d0100, 0xe0304100,
+ 0x785d0200, 0xe0304180,
+ 0x785d0300, 0xbf8c3f70,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807c847c, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7c,
+ 0xbf85ffee, 0xe0304000,
+ 0x6e5d0000, 0xe0304080,
+ 0x6e5d0100, 0xe0304100,
+ 0x6e5d0200, 0xe0304180,
+ 0x6e5d0300, 0xbf8c3f70,
+ 0xbf820034, 0xbef603ff,
+ 0x01000000, 0xbeee0378,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0384, 0xbf0a6f7c,
+ 0xbf840012, 0xe0304000,
+ 0x785d0000, 0xe0304100,
+ 0x785d0100, 0xe0304200,
+ 0x785d0200, 0xe0304300,
+ 0x785d0300, 0xbf8c3f70,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xb96f1e06,
+ 0x876fc16f, 0xbf84000e,
+ 0x8f6f836f, 0x806f7c6f,
+ 0xbefe03c1, 0xbeff0380,
0xe0304000, 0x785d0000,
- 0xe0304080, 0x785d0100,
- 0xe0304100, 0x785d0200,
- 0xe0304180, 0x785d0300,
0xbf8c3f70, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807c847c,
- 0x8078ff78, 0x00000200,
- 0xbf0a6f7c, 0xbf85ffee,
+ 0x807c817c, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7c,
+ 0xbf85fff7, 0xbeff03c1,
0xe0304000, 0x6e5d0000,
- 0xe0304080, 0x6e5d0100,
- 0xe0304100, 0x6e5d0200,
- 0xe0304180, 0x6e5d0300,
- 0xbf8c3f70, 0xbf820034,
- 0xbef603ff, 0x01000000,
- 0xbeee0378, 0x8078ff78,
- 0x00000400, 0xbefc0384,
- 0xbf0a6f7c, 0xbf840012,
- 0xe0304000, 0x785d0000,
- 0xe0304100, 0x785d0100,
- 0xe0304200, 0x785d0200,
- 0xe0304300, 0x785d0300,
- 0xbf8c3f70, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xb96f1e06, 0x876fc16f,
- 0xbf84000e, 0x8f6f836f,
- 0x806f7c6f, 0xbefe03c1,
- 0xbeff0380, 0xe0304000,
- 0x785d0000, 0xbf8c3f70,
- 0x7e008500, 0x807c817c,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7c, 0xbf85fff7,
- 0xbeff03c1, 0xe0304000,
- 0x6e5d0000, 0xe0304100,
- 0x6e5d0100, 0xe0304200,
- 0x6e5d0200, 0xe0304300,
- 0x6e5d0300, 0xbf8c3f70,
+ 0xe0304100, 0x6e5d0100,
+ 0xe0304200, 0x6e5d0200,
+ 0xe0304300, 0x6e5d0300,
+ 0xbf8c3f70, 0xb9783a05,
+ 0x80788178, 0xbf0d9972,
+ 0xbf850002, 0x8f788978,
+ 0xbf820001, 0x8f788a78,
+ 0xb96e1e06, 0x8f6e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x80f8ff78,
+ 0x00000050, 0xbef603ff,
+ 0x01000000, 0xbefc03ff,
+ 0x0000006c, 0x80f89078,
+ 0xf429003a, 0xf0000000,
+ 0xbf8cc07f, 0x80fc847c,
+ 0xbf800000, 0xbe803100,
+ 0xbe823102, 0x80f8a078,
+ 0xf42d003a, 0xf0000000,
+ 0xbf8cc07f, 0x80fc887c,
+ 0xbf800000, 0xbe803100,
+ 0xbe823102, 0xbe843104,
+ 0xbe863106, 0x80f8c078,
+ 0xf431003a, 0xf0000000,
+ 0xbf8cc07f, 0x80fc907c,
+ 0xbf800000, 0xbe803100,
+ 0xbe823102, 0xbe843104,
+ 0xbe863106, 0xbe883108,
+ 0xbe8a310a, 0xbe8c310c,
+ 0xbe8e310e, 0xbf06807c,
+ 0xbf84fff0, 0xba80f801,
+ 0x00000000, 0xbf8a0000,
0xb9783a05, 0x80788178,
0xbf0d9972, 0xbf850002,
0x8f788978, 0xbf820001,
0x8f788a78, 0xb96e1e06,
0x8f6e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0x80f8ff78, 0x00000050,
0xbef603ff, 0x01000000,
- 0xbefc03ff, 0x0000006c,
- 0x80f89078, 0xf429003a,
- 0xf0000000, 0xbf8cc07f,
- 0x80fc847c, 0xbf800000,
- 0xbe803100, 0xbe823102,
- 0x80f8a078, 0xf42d003a,
- 0xf0000000, 0xbf8cc07f,
- 0x80fc887c, 0xbf800000,
- 0xbe803100, 0xbe823102,
- 0xbe843104, 0xbe863106,
- 0x80f8c078, 0xf431003a,
- 0xf0000000, 0xbf8cc07f,
- 0x80fc907c, 0xbf800000,
- 0xbe803100, 0xbe823102,
- 0xbe843104, 0xbe863106,
- 0xbe883108, 0xbe8a310a,
- 0xbe8c310c, 0xbe8e310e,
- 0xbf06807c, 0xbf84fff0,
- 0xba80f801, 0x00000000,
- 0xbf8a0000, 0xb9783a05,
- 0x80788178, 0xbf0d9972,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb96e1e06, 0x8f6e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0xbef603ff,
- 0x01000000, 0xf4211bfa,
+ 0xf4211bfa, 0xf0000000,
+ 0x80788478, 0xf4211b3a,
0xf0000000, 0x80788478,
- 0xf4211b3a, 0xf0000000,
- 0x80788478, 0xf4211b7a,
+ 0xf4211b7a, 0xf0000000,
+ 0x80788478, 0xf4211c3a,
0xf0000000, 0x80788478,
- 0xf4211c3a, 0xf0000000,
- 0x80788478, 0xf4211c7a,
+ 0xf4211c7a, 0xf0000000,
+ 0x80788478, 0xf4211eba,
0xf0000000, 0x80788478,
- 0xf4211eba, 0xf0000000,
- 0x80788478, 0xf4211efa,
+ 0xf4211efa, 0xf0000000,
+ 0x80788478, 0xf4211e7a,
0xf0000000, 0x80788478,
- 0xf4211e7a, 0xf0000000,
- 0x80788478, 0xf4211cfa,
+ 0xf4211cfa, 0xf0000000,
+ 0x80788478, 0xf4211bba,
0xf0000000, 0x80788478,
+ 0xbf8cc07f, 0xb9eef814,
0xf4211bba, 0xf0000000,
0x80788478, 0xbf8cc07f,
- 0xb9eef814, 0xf4211bba,
- 0xf0000000, 0x80788478,
- 0xbf8cc07f, 0xb9eef815,
- 0xbefc036f, 0xbefe0370,
- 0xbeff0371, 0x876f7bff,
- 0x000003ff, 0xb9ef4803,
- 0xb9f9f816, 0x876f7bff,
- 0xfffff800, 0x906f8b6f,
- 0xb9efa2c3, 0xb9f3f801,
- 0xb96e3a05, 0x806e816e,
- 0xbf0d9972, 0xbf850002,
- 0x8f6e896e, 0xbf820001,
- 0x8f6e8a6e, 0xb96f1e06,
- 0x8f6f8a6f, 0x806e6f6e,
- 0x806eff6e, 0x00000200,
- 0x806e746e, 0x826f8075,
- 0x876fff6f, 0x0000ffff,
- 0xf4091c37, 0xfa000050,
- 0xf4091d37, 0xfa000060,
- 0xf4011e77, 0xfa000074,
- 0xbf8cc07f, 0x906e8977,
- 0x876fff6e, 0x003f8000,
- 0x906e8677, 0x876eff6e,
- 0x02000000, 0x886e6f6e,
- 0xb9eef807, 0x876dff6d,
- 0x0000ffff, 0x87fe7e7e,
- 0x87ea6a6a, 0xb9faf802,
- 0xbe80226c, 0xbf9b0000,
+ 0xb9eef815, 0xbefc036f,
+ 0xbefe0370, 0xbeff0371,
+ 0x876f7bff, 0x000003ff,
+ 0xb9ef4803, 0xb9f9f816,
+ 0x876f7bff, 0xfffff800,
+ 0x906f8b6f, 0xb9efa2c3,
+ 0xb9f3f801, 0xb96e3a05,
+ 0x806e816e, 0xbf0d9972,
+ 0xbf850002, 0x8f6e896e,
+ 0xbf820001, 0x8f6e8a6e,
+ 0xb96f1e06, 0x8f6f8a6f,
+ 0x806e6f6e, 0x806eff6e,
+ 0x00000200, 0x806e746e,
+ 0x826f8075, 0x876fff6f,
+ 0x0000ffff, 0xf4091c37,
+ 0xfa000050, 0xf4091d37,
+ 0xfa000060, 0xf4011e77,
+ 0xfa000074, 0xbf8cc07f,
+ 0x906e8977, 0x876fff6e,
+ 0x003f8000, 0x906e8677,
+ 0x876eff6e, 0x02000000,
+ 0x886e6f6e, 0xb9eef807,
+ 0x876dff6d, 0x0000ffff,
+ 0x87fe7e7e, 0x87ea6a6a,
+ 0xb9faf802, 0xbe80226c,
+ 0xbf9b0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
- 0xbf9f0000, 0x00000000,
};
static const uint32_t cwsr_trap_arcturus_hex[] = {
@@ -2518,7 +2725,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
0x8b6eff7b, 0x00000400,
0xbfa20045, 0xbf830010,
0xb8fbf803, 0xbfa0fffa,
- 0x8b6eff7b, 0x00000900,
+ 0x8b6eff7b, 0x00160900,
0xbfa20015, 0x8b6eff7b,
0x000071ff, 0xbfa10008,
0x8b6fff7b, 0x00007080,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 71b3dc0c7363..e1aaa5ce0784 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -44,6 +44,7 @@
#define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
#define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO)
+#define SAVE_AFTER_XNACK_ERROR (HAVE_XNACK && !NO_SQC_STORE) // workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
@@ -81,6 +82,12 @@ var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
var SQ_WAVE_TRAPSTS_EXCP_HI_MASK = 0x7000
+#if ASIC_FAMILY >= CHIP_PLUM_BONITO
+var SQ_WAVE_TRAPSTS_WAVE_START_MASK = 0x20000
+var SQ_WAVE_TRAPSTS_WAVE_END_MASK = 0x40000
+var SQ_WAVE_TRAPSTS_TRAP_AFTER_INST_MASK = 0x100000
+#endif
+var SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK = 0x10000000
var SQ_WAVE_MODE_EXCP_EN_SHIFT = 12
var SQ_WAVE_MODE_EXCP_EN_ADDR_WATCH_SHIFT = 19
@@ -92,6 +99,16 @@ var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x003F8000
var SQ_WAVE_MODE_DEBUG_EN_MASK = 0x800
+#if ASIC_FAMILY < CHIP_PLUM_BONITO
+var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+#else
+var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_TRAPSTS_MEM_VIOL_MASK |\
+ SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK |\
+ SQ_WAVE_TRAPSTS_WAVE_START_MASK |\
+ SQ_WAVE_TRAPSTS_WAVE_END_MASK |\
+ SQ_WAVE_TRAPSTS_TRAP_AFTER_INST_MASK
+#endif
+
// bits [31:24] unused by SPI debug data
var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31
var TTMP11_SAVE_REPLAY_W64H_MASK = 0x80000000
@@ -224,7 +241,7 @@ L_NOT_HALTED:
// Check non-maskable exceptions. memory_violation, illegal_instruction
// and xnack_error exceptions always cause the wave to enter the trap
// handler.
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+ s_and_b32 ttmp2, s_save_trapsts, S_TRAPSTS_NON_MASKABLE_EXCP_MASK
s_cbranch_scc1 L_FETCH_2ND_TRAP
// Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
@@ -460,6 +477,16 @@ L_SAVE_4VGPR_WAVE32:
// VGPR Allocated in 4-GPR granularity
+#if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_FIRST_VGPRS32_WITH_TCP
+
+ write_vgprs_to_mem_with_sqc_w32(v0, 4, s_save_buf_rsrc0, s_save_mem_offset)
+ s_branch L_SAVE_HWREG
+
+L_SAVE_FIRST_VGPRS32_WITH_TCP:
+#endif
+
#if !NO_SQC_STORE
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
#endif
@@ -473,6 +500,16 @@ L_SAVE_4VGPR_WAVE64:
// VGPR Allocated in 4-GPR granularity
+#if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_FIRST_VGPRS64_WITH_TCP
+
+ write_vgprs_to_mem_with_sqc_w64(v0, 4, s_save_buf_rsrc0, s_save_mem_offset)
+ s_branch L_SAVE_HWREG
+
+L_SAVE_FIRST_VGPRS64_WITH_TCP:
+#endif
+
#if !NO_SQC_STORE
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
#endif
@@ -645,6 +682,26 @@ L_SAVE_LDS_NORMAL:
s_cbranch_scc1 L_SAVE_LDS_W64
L_SAVE_LDS_W32:
+#if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_LDS_WITH_TCP_W32
+
+L_SAVE_LDS_LOOP_SQC_W32:
+ ds_read_b32 v1, v0
+ s_waitcnt 0
+
+ write_vgprs_to_mem_with_sqc_w32(v1, 1, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_add_u32 m0, m0, 128 //every buffer_store_lds does 128 bytes
+ v_add_nc_u32 v0, v0, 128 //mem offset increased by 128 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_SQC_W32 //LDS save is complete?
+
+ s_branch L_SAVE_LDS_DONE
+
+L_SAVE_LDS_WITH_TCP_W32:
+#endif
+
s_mov_b32 s3, 128
s_nop 0
s_nop 0
@@ -654,7 +711,7 @@ L_SAVE_LDS_LOOP_W32:
s_waitcnt 0
buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 128 bytes
s_add_u32 s_save_mem_offset, s_save_mem_offset, s3
v_add_nc_u32 v0, v0, 128 //mem offset increased by 128 bytes
s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
@@ -663,6 +720,26 @@ L_SAVE_LDS_LOOP_W32:
s_branch L_SAVE_LDS_DONE
L_SAVE_LDS_W64:
+#if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_LDS_WITH_TCP_W64
+
+L_SAVE_LDS_LOOP_SQC_W64:
+ ds_read_b32 v1, v0
+ s_waitcnt 0
+
+ write_vgprs_to_mem_with_sqc_w64(v1, 1, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_add_u32 m0, m0, 256 //every buffer_store_lds does 256 bytes
+ v_add_nc_u32 v0, v0, 256 //mem offset increased by 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_SQC_W64 //LDS save is complete?
+
+ s_branch L_SAVE_LDS_DONE
+
+L_SAVE_LDS_WITH_TCP_W64:
+#endif
+
s_mov_b32 s3, 256
s_nop 0
s_nop 0
@@ -712,6 +789,25 @@ L_SAVE_VGPR_NORMAL:
s_cmp_lt_u32 m0, s_save_alloc_size
s_cbranch_scc0 L_SAVE_VGPR_END
+#if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_VGPR_W32_LOOP
+
+L_SAVE_VGPR_LOOP_SQC_W32:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ write_vgprs_to_mem_with_sqc_w32(v0, 4, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_add_u32 m0, m0, 4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc1 L_SAVE_VGPR_LOOP_SQC_W32
+
+ s_branch L_SAVE_VGPR_END
+#endif
+
L_SAVE_VGPR_W32_LOOP:
v_movrels_b32 v0, v0 //v0 = v[0+m0]
v_movrels_b32 v1, v1 //v1 = v[1+m0]
@@ -738,6 +834,25 @@ L_SAVE_VGPR_WAVE64:
s_cmp_lt_u32 m0, s_save_alloc_size
s_cbranch_scc0 L_SAVE_SHARED_VGPR
+#if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_VGPR_W64_LOOP
+
+L_SAVE_VGPR_LOOP_SQC_W64:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ write_vgprs_to_mem_with_sqc_w64(v0, 4, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_add_u32 m0, m0, 4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc1 L_SAVE_VGPR_LOOP_SQC_W64
+
+ s_branch L_SAVE_VGPR_END
+#endif
+
L_SAVE_VGPR_W64_LOOP:
v_movrels_b32 v0, v0 //v0 = v[0+m0]
v_movrels_b32 v1, v1 //v1 = v[1+m0]
@@ -765,6 +880,23 @@ L_SAVE_SHARED_VGPR:
s_add_u32 s_save_alloc_size, s_save_alloc_size, m0
s_mov_b32 exec_lo, 0xFFFFFFFF
s_mov_b32 exec_hi, 0x00000000
+
+#if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_SHARED_VGPR_WAVE64_LOOP
+
+L_SAVE_SHARED_VGPR_WAVE64_LOOP_SQC:
+ v_movrels_b32 v0, v0
+
+ write_vgprs_to_mem_with_sqc_w64(v0, 1, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_add_u32 m0, m0, 1
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc1 L_SAVE_SHARED_VGPR_WAVE64_LOOP_SQC
+
+ s_branch L_SAVE_VGPR_END
+#endif
+
L_SAVE_SHARED_VGPR_WAVE64_LOOP:
v_movrels_b32 v0, v0 //v0 = v[0+m0]
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
@@ -1175,6 +1307,43 @@ function read_4sgpr_from_mem(s, s_rsrc, s_mem_offset)
s_buffer_load_dwordx4 s, s_rsrc, s_mem_offset glc:1
end
+#if SAVE_AFTER_XNACK_ERROR
+function check_if_tcp_store_ok
+ // If TRAPSTS.XNACK_ERROR=1 then TCP stores will fail.
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_TRAPSTS)
+ s_andn2_b32 s_save_tmp, SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK, s_save_tmp
+
+L_TCP_STORE_CHECK_DONE:
+end
+
+function write_vgpr_to_mem_with_sqc(vgpr, n_lanes, s_rsrc, s_mem_offset)
+ s_mov_b32 s4, 0
+
+L_WRITE_VGPR_LANE_LOOP:
+ for var lane = 0; lane < 4; ++lane
+ v_readlane_b32 s[lane], vgpr, s4
+ s_add_u32 s4, s4, 1
+ end
+
+ s_buffer_store_dwordx4 s[0:3], s_rsrc, s_mem_offset glc:1
+
+ s_add_u32 s_mem_offset, s_mem_offset, 0x10
+ s_cmp_eq_u32 s4, n_lanes
+ s_cbranch_scc0 L_WRITE_VGPR_LANE_LOOP
+end
+
+function write_vgprs_to_mem_with_sqc_w32(vgpr0, n_vgprs, s_rsrc, s_mem_offset)
+ for var vgpr = 0; vgpr < n_vgprs; ++vgpr
+ write_vgpr_to_mem_with_sqc(vgpr0[vgpr], 32, s_rsrc, s_mem_offset)
+ end
+end
+
+function write_vgprs_to_mem_with_sqc_w64(vgpr0, n_vgprs, s_rsrc, s_mem_offset)
+ for var vgpr = 0; vgpr < n_vgprs; ++vgpr
+ write_vgpr_to_mem_with_sqc(vgpr0[vgpr], 64, s_rsrc, s_mem_offset)
+ end
+end
+#endif
function get_lds_size_bytes(s_lds_size_byte)
s_getreg_b32 s_lds_size_byte, hwreg(HW_REG_LDS_ALLOC, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 80e90fdef291..dfa8c69532d4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -63,8 +63,10 @@ static const struct file_operations kfd_fops = {
};
static int kfd_char_dev_major = -1;
-static struct class *kfd_class;
struct device *kfd_device;
+static const struct class kfd_class = {
+ .name = kfd_dev_name,
+};
static inline struct kfd_process_device *kfd_lock_pdd_by_id(struct kfd_process *p, __u32 gpu_id)
{
@@ -94,14 +96,13 @@ int kfd_chardev_init(void)
if (err < 0)
goto err_register_chrdev;
- kfd_class = class_create(kfd_dev_name);
- err = PTR_ERR(kfd_class);
- if (IS_ERR(kfd_class))
+ err = class_register(&kfd_class);
+ if (err)
goto err_class_create;
- kfd_device = device_create(kfd_class, NULL,
- MKDEV(kfd_char_dev_major, 0),
- NULL, kfd_dev_name);
+ kfd_device = device_create(&kfd_class, NULL,
+ MKDEV(kfd_char_dev_major, 0),
+ NULL, kfd_dev_name);
err = PTR_ERR(kfd_device);
if (IS_ERR(kfd_device))
goto err_device_create;
@@ -109,7 +110,7 @@ int kfd_chardev_init(void)
return 0;
err_device_create:
- class_destroy(kfd_class);
+ class_unregister(&kfd_class);
err_class_create:
unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
err_register_chrdev:
@@ -118,8 +119,8 @@ err_register_chrdev:
void kfd_chardev_exit(void)
{
- device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
- class_destroy(kfd_class);
+ device_destroy(&kfd_class, MKDEV(kfd_char_dev_major, 0));
+ class_unregister(&kfd_class);
unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
kfd_device = NULL;
}
@@ -371,7 +372,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_wptr_map_gart;
}
- err = amdgpu_amdkfd_map_gtt_bo_to_gart(dev->adev, wptr_bo);
+ err = amdgpu_amdkfd_map_gtt_bo_to_gart(wptr_bo);
if (err) {
pr_err("Failed to map wptr bo to GART\n");
goto err_wptr_map_gart;
@@ -2935,6 +2936,7 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v
if (IS_ERR_OR_NULL(target)) {
pr_debug("Cannot find process PID %i to debug\n", args->pid);
r = target ? PTR_ERR(target) : -ESRCH;
+ target = NULL;
goto out;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index cd8e459201f1..7f2ae0d15d4a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -55,6 +55,7 @@ static struct kfd_gpu_cache_info kaveri_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -64,6 +65,7 @@ static struct kfd_gpu_cache_info kaveri_cache_info[] = {
/* Scalar L1 Instruction Cache (in SQC module) per bank */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -73,6 +75,7 @@ static struct kfd_gpu_cache_info kaveri_cache_info[] = {
/* Scalar L1 Data Cache (in SQC module) per bank */
.cache_size = 8,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -88,6 +91,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -95,8 +99,9 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
},
{
/* Scalar L1 Instruction Cache (in SQC module) per bank */
- .cache_size = 8,
+ .cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -104,8 +109,9 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
},
{
/* Scalar L1 Data Cache (in SQC module) per bank. */
- .cache_size = 4,
+ .cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -135,6 +141,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -144,6 +151,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -153,6 +161,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -162,6 +171,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 4096,
.cache_level = 2,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -174,6 +184,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -183,6 +194,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -192,6 +204,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -201,6 +214,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 1024,
.cache_level = 2,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -213,6 +227,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -222,6 +237,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -231,6 +247,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -240,6 +257,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 1024,
.cache_level = 2,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -252,6 +270,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -261,6 +280,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -270,6 +290,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -279,6 +300,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -291,6 +313,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -300,6 +323,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -309,6 +333,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -318,6 +343,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 8192,
.cache_level = 2,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -330,6 +356,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -339,6 +366,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -348,6 +376,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -357,6 +386,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 8192,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -369,6 +399,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -378,6 +409,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -387,6 +419,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -396,6 +429,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -405,6 +439,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 4096,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -417,6 +452,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -426,6 +462,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -435,6 +472,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -444,6 +482,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -453,6 +492,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 1024,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -465,6 +505,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -474,6 +515,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -483,6 +525,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -492,6 +535,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -501,6 +545,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -513,6 +558,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -522,6 +568,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -531,6 +578,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -540,6 +588,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -549,6 +598,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 4096,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -558,6 +608,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
/* L3 Data Cache per GPU */
.cache_size = 128*1024,
.cache_level = 3,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -570,6 +621,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -579,6 +631,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -588,6 +641,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -597,6 +651,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -606,6 +661,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 3072,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -615,6 +671,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
/* L3 Data Cache per GPU */
.cache_size = 96*1024,
.cache_level = 3,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -627,6 +684,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -636,6 +694,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -645,6 +704,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -654,6 +714,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -663,6 +724,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -672,6 +734,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
/* L3 Data Cache per GPU */
.cache_size = 32*1024,
.cache_level = 3,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -684,6 +747,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -693,6 +757,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -702,6 +767,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -711,6 +777,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -720,6 +787,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 1024,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -729,6 +797,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
/* L3 Data Cache per GPU */
.cache_size = 16*1024,
.cache_level = 3,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -741,6 +810,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -750,6 +820,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -759,6 +830,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -768,6 +840,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -777,6 +850,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -789,6 +863,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -798,6 +873,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -807,6 +883,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -816,6 +893,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -825,6 +903,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 256,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -837,6 +916,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -846,6 +926,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -855,6 +936,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -864,6 +946,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -873,6 +956,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 256,
.cache_level = 2,
+ .cache_line_size = 128,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -885,6 +969,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -894,6 +979,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -903,6 +989,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -912,6 +999,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -921,6 +1009,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 2048,
.cache_level = 2,
+ .cache_line_size = 64,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -1587,6 +1676,7 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
num_of_cache_types =
kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
index 74c2d7a0d628..300634b9f668 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
@@ -303,6 +303,7 @@ struct kfd_node;
struct kfd_gpu_cache_info {
uint32_t cache_size;
uint32_t cache_level;
+ uint32_t cache_line_size;
uint32_t flags;
/* Indicates how many Compute Units share this cache
* within a SA. Value = 1 indicates the cache is not shared
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
index 9ec750666382..d889e3545120 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
@@ -1018,12 +1018,14 @@ int kfd_dbg_trap_device_snapshot(struct kfd_process *target,
uint32_t *entry_size)
{
struct kfd_dbg_device_info_entry device_info;
- uint32_t tmp_entry_size = *entry_size, tmp_num_devices;
+ uint32_t tmp_entry_size, tmp_num_devices;
int i, r = 0;
if (!(target && user_info && number_of_device_infos && entry_size))
return -EINVAL;
+ tmp_entry_size = *entry_size;
+
tmp_num_devices = min_t(size_t, *number_of_device_infos, target->n_pdds);
*number_of_device_infos = target->n_pdds;
*entry_size = min_t(size_t, *entry_size, sizeof(device_info));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 0a9cf9dfc224..041ec3de55e7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -96,6 +96,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 1):
kfd->device_info.num_sdma_queues_per_engine = 8;
break;
default:
@@ -113,6 +114,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 1):
/* Reserve 1 for paging and 1 for gfx */
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
@@ -165,6 +167,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
break;
default:
@@ -420,6 +423,10 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 110500;
f2g = &gfx_v11_kfd2kgd;
break;
+ case IP_VERSION(11, 5, 1):
+ gfx_target_version = 110501;
+ f2g = &gfx_v11_kfd2kgd;
+ break;
default:
break;
}
@@ -459,34 +466,43 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
{
if (cwsr_enable && kfd->device_info.supports_cwsr) {
if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
- BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex)
+ > KFD_CWSR_TMA_OFFSET);
kfd->cwsr_isa = cwsr_trap_gfx8_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
- BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex)
+ > KFD_CWSR_TMA_OFFSET);
kfd->cwsr_isa = cwsr_trap_arcturus_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
- BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex)
+ > KFD_CWSR_TMA_OFFSET);
kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) {
- BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_4_3_hex) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_4_3_hex)
+ > KFD_CWSR_TMA_OFFSET);
kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex);
} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
- BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex)
+ > KFD_CWSR_TMA_OFFSET);
kfd->cwsr_isa = cwsr_trap_gfx9_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
- BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex)
+ > KFD_CWSR_TMA_OFFSET);
kfd->cwsr_isa = cwsr_trap_nv1x_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
} else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
- BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex)
+ > KFD_CWSR_TMA_OFFSET);
kfd->cwsr_isa = cwsr_trap_gfx10_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
} else {
+ /* The gfx11 cwsr trap handler must fit inside a single
+ page. */
BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx11_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c0e71543389a..f4d395e38683 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1903,6 +1903,10 @@ int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
uint64_t *fence_addr = dqm->fence_addr;
while (*fence_addr != fence_value) {
+ /* Fatal err detected, this response won't come */
+ if (amdgpu_amdkfd_is_fed(dqm->dev->adev))
+ return -EIO;
+
if (time_after(jiffies, end_jiffies)) {
dev_err(dev, "qcm fence wait loop timeout expired\n");
/* In HWS case, this is used to halt the driver thread
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 739721254a5d..9b33d9d2c9ad 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -1285,8 +1285,10 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
uint32_t id = KFD_FIRST_NONSIGNAL_EVENT_ID;
int user_gpu_id;
- if (!p)
+ if (!p) {
+ dev_warn(dev->adev->dev, "Not find process with pasid:%d\n", pasid);
return; /* Presumably process exited. */
+ }
user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
if (unlikely(user_gpu_id == -EINVAL)) {
@@ -1322,6 +1324,8 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
}
}
+ dev_warn(dev->adev->dev, "Send SIGBUS to process %s(pasid:%d)\n",
+ p->lead_thread->comm, pasid);
rcu_read_unlock();
/* user application will handle SIGBUS signal */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 6604a3f99c5e..4a64307bc438 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -36,6 +36,7 @@
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/processor.h>
+#include "amdgpu_vm.h"
/*
* The primary memory I/O features being added for revisions of gfxip
@@ -326,10 +327,16 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
* with small reserved space for kernel.
* Set them to CANONICAL addresses.
*/
- pdd->gpuvm_base = SVM_USER_BASE;
+ pdd->gpuvm_base = max(SVM_USER_BASE, AMDGPU_VA_RESERVED_BOTTOM);
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
+ /* dGPUs: the reserved space for kernel
+ * before SVM
+ */
+ pdd->qpd.cwsr_base = SVM_CWSR_BASE;
+ pdd->qpd.ib_base = SVM_IB_BASE;
+
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
}
@@ -339,18 +346,18 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
pdd->lds_base = MAKE_LDS_APP_BASE_V9();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
- /* Raven needs SVM to support graphic handle, etc. Leave the small
- * reserved space before SVM on Raven as well, even though we don't
- * have to.
- * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
- * are used in Thunk to reserve SVM.
- */
- pdd->gpuvm_base = SVM_USER_BASE;
+ pdd->gpuvm_base = AMDGPU_VA_RESERVED_BOTTOM;
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+
+ /*
+ * Place TBA/TMA on opposite side of VM hole to prevent
+ * stray faults from triggering SVM on these pages.
+ */
+ pdd->qpd.cwsr_base = AMDGPU_VA_RESERVED_TRAP_START(pdd->dev->adev);
}
int kfd_init_apertures(struct kfd_process *process)
@@ -407,12 +414,6 @@ int kfd_init_apertures(struct kfd_process *process)
return -EINVAL;
}
}
-
- /* dGPUs: the reserved space for kernel
- * before SVM
- */
- pdd->qpd.cwsr_base = SVM_CWSR_BASE;
- pdd->qpd.ib_base = SVM_IB_BASE;
}
dev_dbg(kfd_device, "node id %u\n", id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
index a7697ec8188e..9a06c6fb6605 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -132,6 +132,7 @@ enum SQ_INTERRUPT_ERROR_TYPE {
static void event_interrupt_poison_consumption(struct kfd_node *dev,
uint16_t pasid, uint16_t client_id)
{
+ enum amdgpu_ras_block block = 0;
int old_poison, ret = -EINVAL;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
@@ -151,12 +152,14 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SE3SH:
case SOC15_IH_CLIENTID_UTCL2:
ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
+ block = AMDGPU_RAS_BLOCK__GFX;
break;
case SOC15_IH_CLIENTID_SDMA0:
case SOC15_IH_CLIENTID_SDMA1:
case SOC15_IH_CLIENTID_SDMA2:
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
+ block = AMDGPU_RAS_BLOCK__SDMA;
break;
default:
break;
@@ -171,12 +174,12 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
dev_warn(dev->adev->dev,
"RAS poison consumption, unmap queue flow succeeded: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
} else {
dev_warn(dev->adev->dev,
"RAS poison consumption, fall back to gpu reset flow: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index 2a65792fd116..7e2859736a55 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -191,6 +191,7 @@ static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
uint16_t pasid, uint16_t source_id)
{
+ enum amdgpu_ras_block block = 0;
int ret = -EINVAL;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
@@ -210,9 +211,11 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
case SOC15_INTSRC_SQ_INTERRUPT_MSG:
if (dev->dqm->ops.reset_queues)
ret = dev->dqm->ops.reset_queues(dev->dqm, pasid);
+ block = AMDGPU_RAS_BLOCK__GFX;
break;
case SOC21_INTSRC_SDMA_ECC:
default:
+ block = AMDGPU_RAS_BLOCK__GFX;
break;
}
@@ -221,9 +224,9 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
/* resetting queue passes, do page retirement without gpu reset
resetting queue fails, fallback to gpu reset solution */
if (!ret)
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
else
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
}
static bool event_interrupt_isr_v11(struct kfd_node *dev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 27cdaea40501..91dd5e045b51 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -143,6 +143,7 @@ enum SQ_INTERRUPT_ERROR_TYPE {
static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
uint16_t pasid, uint16_t client_id)
{
+ enum amdgpu_ras_block block = 0;
int old_poison, ret = -EINVAL;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
@@ -162,12 +163,14 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SE3SH:
case SOC15_IH_CLIENTID_UTCL2:
ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
+ block = AMDGPU_RAS_BLOCK__GFX;
break;
case SOC15_IH_CLIENTID_SDMA0:
case SOC15_IH_CLIENTID_SDMA1:
case SOC15_IH_CLIENTID_SDMA2:
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
+ block = AMDGPU_RAS_BLOCK__SDMA;
break;
default:
break;
@@ -182,12 +185,12 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
dev_warn(dev->adev->dev,
"RAS poison consumption, unmap queue flow succeeded: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
} else {
dev_warn(dev->adev->dev,
"RAS poison consumption, fall back to gpu reset flow: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 1bea629c49ca..32c926986dbb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -286,7 +286,7 @@ err_no_space:
return -ENOMEM;
}
-void kq_submit_packet(struct kernel_queue *kq)
+int kq_submit_packet(struct kernel_queue *kq)
{
#ifdef DEBUG
int i;
@@ -298,6 +298,10 @@ void kq_submit_packet(struct kernel_queue *kq)
}
pr_debug("\n");
#endif
+ /* Fatal err detected, packet submission won't go through */
+ if (amdgpu_amdkfd_is_fed(kq->dev->adev))
+ return -EIO;
+
if (kq->dev->kfd->device_info.doorbell_size == 8) {
*kq->wptr64_kernel = kq->pending_wptr64;
write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
@@ -307,6 +311,8 @@ void kq_submit_packet(struct kernel_queue *kq)
write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
kq->pending_wptr);
}
+
+ return 0;
}
void kq_rollback_packet(struct kernel_queue *kq)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index 9a6244430845..e24ee50acdf0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -47,7 +47,7 @@
int kq_acquire_packet_buffer(struct kernel_queue *kq,
size_t packet_size_in_dwords,
unsigned int **buffer_ptr);
-void kq_submit_packet(struct kernel_queue *kq);
+int kq_submit_packet(struct kernel_queue *kq);
void kq_rollback_packet(struct kernel_queue *kq);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 57bf5e513f4d..e5cc697a3ca8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -128,6 +128,31 @@ struct mqd_manager {
uint32_t mqd_size;
};
+struct mqd_user_context_save_area_header {
+ /* Byte offset from start of user context
+ * save area to the last saved top (lowest
+ * address) of control stack data. Must be
+ * 4 byte aligned.
+ */
+ uint32_t control_stack_offset;
+
+ /* Byte size of the last saved control stack
+ * data. Must be 4 byte aligned.
+ */
+ uint32_t control_stack_size;
+
+ /* Byte offset from start of user context save
+ * area to the last saved base (lowest address)
+ * of wave state data. Must be 4 byte aligned.
+ */
+ uint32_t wave_state_offset;
+
+ /* Byte size of the last saved wave state data.
+ * Must be 4 byte aligned.
+ */
+ uint32_t wave_state_size;
+};
+
struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev,
struct queue_properties *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 401096c103b2..d6f65f39072b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -288,7 +288,7 @@ int pm_send_set_resources(struct packet_manager *pm,
retval = pm->pmf->set_resources(pm, buffer, res);
if (!retval)
- kq_submit_packet(pm->priv_queue);
+ retval = kq_submit_packet(pm->priv_queue);
else
kq_rollback_packet(pm->priv_queue);
@@ -325,7 +325,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
if (retval)
goto fail_create_runlist;
- kq_submit_packet(pm->priv_queue);
+ retval = kq_submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
@@ -361,7 +361,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
if (!retval)
- kq_submit_packet(pm->priv_queue);
+ retval = kq_submit_packet(pm->priv_queue);
else
kq_rollback_packet(pm->priv_queue);
@@ -392,7 +392,7 @@ int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
retval = pm->pmf->set_grace_period(pm, buffer, grace_period);
if (!retval)
- kq_submit_packet(pm->priv_queue);
+ retval = kq_submit_packet(pm->priv_queue);
else
kq_rollback_packet(pm->priv_queue);
}
@@ -421,7 +421,7 @@ int pm_send_unmap_queue(struct packet_manager *pm,
retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset);
if (!retval)
- kq_submit_packet(pm->priv_queue);
+ retval = kq_submit_packet(pm->priv_queue);
else
kq_rollback_packet(pm->priv_queue);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 80320b8603fc..42d40560cd30 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -99,11 +99,11 @@
/*
* Size of the per-process TBA+TMA buffer: 2 pages
*
- * The first page is the TBA used for the CWSR ISA code. The second
- * page is used as TMA for user-mode trap handler setup in daisy-chain mode.
+ * The first chunk is the TBA used for the CWSR ISA code. The second
+ * chunk is used as TMA for user-mode trap handler setup in daisy-chain mode.
*/
#define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
-#define KFD_CWSR_TMA_OFFSET PAGE_SIZE
+#define KFD_CWSR_TMA_OFFSET (PAGE_SIZE + 2048)
#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
(KFD_MAX_NUM_OF_PROCESSES * \
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index d9953c2b2661..06ac835190f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -238,16 +238,16 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev,
void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
{
- struct amdgpu_task_info task_info;
-
- memset(&task_info, 0, sizeof(struct amdgpu_task_info));
- amdgpu_vm_get_task_info(dev->adev, pasid, &task_info);
- /* Report VM faults from user applications, not retry from kernel */
- if (!task_info.pid)
- return;
-
- kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, "%x:%s\n",
- task_info.pid, task_info.task_name);
+ struct amdgpu_task_info *task_info;
+
+ task_info = amdgpu_vm_get_task_info_pasid(dev->adev, pasid);
+ if (task_info) {
+ /* Report VM faults from user applications, not retry from kernel */
+ if (task_info->pid)
+ kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, "%x:%s\n",
+ task_info->pid, task_info->task_name);
+ amdgpu_vm_put_task_info(task_info);
+ }
}
void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index c50a0dc9c9c0..f0f7f48af413 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1515,9 +1515,9 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
goto unreserve_out;
}
- r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
- drm_priv_to_vm(pdd->drm_priv),
- svm_range_bo_validate, NULL);
+ r = amdgpu_vm_validate(pdd->dev->adev,
+ drm_priv_to_vm(pdd->drm_priv), NULL,
+ svm_range_bo_validate, NULL);
if (r) {
pr_debug("failed %d validate pt bos\n", r);
goto unreserve_out;
@@ -1641,7 +1641,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
goto free_ctx;
}
- svm_range_reserve_bos(ctx, intr);
+ r = svm_range_reserve_bos(ctx, intr);
+ if (r)
+ goto free_ctx;
p = container_of(prange->svms, struct kfd_process, svms);
owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 6ed2ec381aaa..c51f131eaa2f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1564,6 +1564,7 @@ static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
pcache->processor_id_low = cu_processor_id + (first_active_cu - 1);
pcache->cache_level = pcache_info[cache_type].cache_level;
pcache->cache_size = pcache_info[cache_type].cache_size;
+ pcache->cacheline_size = pcache_info[cache_type].cache_line_size;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_DATA;
@@ -1632,6 +1633,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
pcache->processor_id_low = cu_processor_id
+ (first_active_cu - 1);
pcache->cache_level = pcache_info[cache_type].cache_level;
+ pcache->cacheline_size = pcache_info[cache_type].cache_line_size;
if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3))
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
@@ -1703,6 +1705,7 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
gpu_processor_id = dev->node_props.simd_id_base;
+ memset(cache_info, 0, sizeof(cache_info));
pcache_info = cache_info;
num_of_cache_types = kfd_get_gpu_cache_info(kdev, &pcache_info);
if (!num_of_cache_types) {
@@ -1994,8 +1997,9 @@ int kfd_topology_add_device(struct kfd_node *gpu)
HSA_CAP_ASIC_REVISION_MASK);
dev->node_props.location_id = pci_dev_id(gpu->adev->pdev);
- if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3))
- dev->node_props.location_id |= dev->gpu->node_id;
+ /* On multi-partition nodes, node id = location_id[31:28] */
+ if (gpu->kfd->num_nodes > 1)
+ dev->node_props.location_id |= (dev->gpu->node_id << 28);
dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus);
dev->node_props.max_engine_clk_fcompute =
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
deleted file mode 100644
index a8a6c106e8c7..000000000000
--- a/drivers/gpu/drm/amd/display/TODO
+++ /dev/null
@@ -1,110 +0,0 @@
-===============================================================================
-TODOs
-===============================================================================
-
-1. Base this on drm-next - WIP
-
-
-2. Cleanup commit history
-
-
-3. WIP - Drop page flip helper and use DRM's version
-
-
-4. DONE - Flatten all DC objects
- * dc_stream/core_stream/stream should just be dc_stream
- * Same for other DC objects
-
- "Is there any major reason to keep all those abstractions?
-
- Could you collapse everything into struct dc_stream?
-
- I haven't looked recently but I didn't get the impression there was a
- lot of design around what was public/protected, more whatever needed
- to be used by someone else was in public."
- ~ Dave Airlie
-
-
-5. DONE - Rename DC objects to align more with DRM
- * dc_surface -> dc_plane_state
- * dc_stream -> dc_stream_state
-
-
-6. DONE - Per-plane and per-stream validation
-
-
-7. WIP - Per-plane and per-stream commit
-
-
-8. WIP - Split pipe_ctx into plane and stream resource structs
-
-
-9. Attach plane and stream reources to state object instead of validate_context
-
-
-10. Remove dc_edid_caps and drm_helpers_parse_edid_caps
- * Use drm_display_info instead
- * Remove DC's edid quirks and rely on DRM's quirks (add quirks if needed)
-
- "Making sure you use the sink-specific helper libraries and kernel
- subsystems, since there's really no good reason to have 2nd
- implementation of those in the kernel. Looks likes that's done for mst
- and edid parsing. There's still a bit a midlayer feeling to the edid
- parsing side (e.g. dc_edid_caps and dm_helpers_parse_edid_caps, I
- think it'd be much better if you convert that over to reading stuff
- from drm_display_info and if needed, push stuff into the core). Also,
- I can't come up with a good reason why DC needs all this (except to
- reimplement half of our edid quirk table, which really isn't a good
- idea). Might be good if you put this onto the list of things to fix
- long-term, but imo not a blocker. Definitely make sure new stuff
- doesn't slip in (i.e. if you start adding edid quirks to DC instead of
- the drm core, refactoring to use the core edid stuff was pointless)."
- ~ Daniel Vetter
-
-
-11. Remove dc/i2caux. This folder can be somewhat misleading. It's basically an
-overy complicated HW programming function for sendind and receiving i2c/aux
-commands. We can greatly simplify that and move it into dc/dceXYZ like other
-HW blocks.
-
-12. drm_modeset_lock in MST should no longer be needed in recent kernels
- * Adopt appropriate locking scheme
-
-13. get_modes and best_encoder callbacks look a bit funny. Can probably rip out
-a few indirections, and consider removing entirely and using the
-drm_atomic_helper_best_encoder default behaviour.
-
-14. core/dc_debug.c, consider switching to the atomic state debug helpers and
-moving all your driver state printing into the various atomic_print_state
-callbacks. There's also plans to expose this stuff in a standard way across all
-drivers, to make debugging userspace compositors easier across different hw.
-
-15. Move DP/HDMI dual mode adaptors to drm_dp_dual_mode_helper.c. See
-dal_ddc_service_i2c_query_dp_dual_mode_adaptor.
-
-16. Move to core SCDC helpers (I think those are new since initial DC review).
-
-17. There's still a pretty massive layer cake around dp aux and DPCD handling,
-with like 3 levels of abstraction and using your own structures instead of the
-stuff in drm_dp_helper.h. drm_dp_helper.h isn't really great and already has 2
-incompatible styles, just means more reasons not to add a third (or well third
-one gets to do the cleanup refactor).
-
-18. There's a pile of sink handling code, both for DP and HDMI where I didn't
-immediately recognize the standard. I think long term it'd be best for the drm
-subsystem if we try to move as much of that into helpers/core as possible, and
-share it with drivers. But that's a very long term goal, and by far not just an
-issue with DC - other drivers, especially around DP sink handling, are equally
-guilty.
-
-19. DONE - The DC logger is still a rather sore thing, but I know that the
-DRM_DEBUG stuff just isn't up to the challenges either. We need to figure out
-something that integrates better with DRM and linux debug printing, while not
-being useless with filtering output. dynamic debug printing might be an option.
-
-20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
-retimer that we need to program to pass PHY compliance. Currently that's
-bypassing the i2c device and goes directly to HW. This should be changed.
-
-21. Remove vector.c from dc/basics. It's used in DDC code which can probably
-be simplified enough to no longer need a vector implementation.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index cf875751971f..1c9c6096e28f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -67,6 +67,7 @@
#include "amdgpu_dm_debugfs.h"
#endif
#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -1218,6 +1219,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
hw_params.dpia_supported = true;
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
break;
@@ -1843,21 +1845,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: fail to register dmub aux callback");
goto error;
}
- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
- goto error;
- }
- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
- goto error;
- }
- }
-
- /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
- * It is expected that DMUB will resend any pending notifications at this point, for
- * example HPD from DPIA.
- */
- if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+ * It is expected that DMUB will resend any pending notifications at this point. Note
+ * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
+ * align legacy interface initialization sequence. Connection status will be proactivly
+ * detected once in the amdgpu_dm_initialize_drm_device.
+ */
dc_enable_dmub_outbox(adev->dm.dc);
/* DPIA trace goes to dmesg logs only if outbox is enabled */
@@ -1938,17 +1931,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = NULL;
}
- if (adev->dm.dc)
+ if (adev->dm.dc) {
dc_deinit_callbacks(adev->dm.dc);
-
- if (adev->dm.dc)
dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
-
- if (dc_enable_dmub_notifications(adev->dm.dc)) {
- kfree(adev->dm.dmub_notify);
- adev->dm.dmub_notify = NULL;
- destroy_workqueue(adev->dm.delayed_hpd_wq);
- adev->dm.delayed_hpd_wq = NULL;
+ if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ kfree(adev->dm.dmub_notify);
+ adev->dm.dmub_notify = NULL;
+ destroy_workqueue(adev->dm.delayed_hpd_wq);
+ adev->dm.delayed_hpd_wq = NULL;
+ }
}
if (adev->dm.dmub_bo)
@@ -2050,6 +2041,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
return 0;
default:
break;
@@ -2121,6 +2113,17 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
const struct dmcub_firmware_header_v1_0 *hdr;
enum dmub_asic dmub_asic;
enum dmub_status status;
+ static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = {
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
+ };
int r;
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
@@ -2159,6 +2162,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
dmub_asic = DMUB_ASIC_DCN321;
break;
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
dmub_asic = DMUB_ASIC_DCN35;
break;
default:
@@ -2218,7 +2222,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
PSP_HEADER_BYTES;
- region_params.is_mailbox_in_inbox = false;
+ region_params.window_memory_type = window_memory_type;
status = dmub_srv_calc_region_info(dmub_srv, &region_params,
&region_info);
@@ -2246,6 +2250,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
memory_params.region_info = &region_info;
+ memory_params.window_memory_type = window_memory_type;
adev->dm.dmub_fb_info =
kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
@@ -2287,6 +2292,7 @@ static int dm_sw_fini(void *handle)
if (adev->dm.dmub_srv) {
dmub_srv_destroy(adev->dm.dmub_srv);
+ kfree(adev->dm.dmub_srv);
adev->dm.dmub_srv = NULL;
}
@@ -3536,6 +3542,14 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ }
+
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
@@ -3564,10 +3578,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
handle_hpd_rx_irq,
(void *) aconnector);
}
-
- if (adev->dm.hpd_rx_offload_wq)
- adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
- aconnector;
}
}
@@ -4399,6 +4409,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
+ bool replay_feature_enabled = false;
int max_overlay = dm->dc->caps.max_slave_planes;
dm->display_indexes_num = dm->dc->caps.max_streams;
@@ -4481,6 +4492,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(2, 1, 0):
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
if (register_outbox_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -4502,6 +4514,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
psr_feature_enabled = true;
break;
default:
@@ -4510,6 +4523,24 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
}
+ /* Determine whether to enable Replay support by default. */
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ replay_feature_enabled = true;
+ break;
+ default:
+ replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
+ break;
+ }
+ }
+
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
@@ -4561,6 +4592,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
+ if (dm->hpd_rx_offload_wq)
+ dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
+ aconnector;
+
if (!dc_link_detect_connection_type(link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
@@ -4578,6 +4613,11 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_dm_update_connector_after_detect(aconnector);
setup_backlight_device(dm, aconnector);
+ /* Disable PSR if Replay can be enabled */
+ if (replay_feature_enabled)
+ if (amdgpu_dm_set_replay_caps(link, aconnector))
+ psr_feature_enabled = false;
+
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
@@ -4645,6 +4685,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -4776,6 +4817,7 @@ static int dm_init_microcode(struct amdgpu_device *adev)
fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
break;
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_35_DMUB;
break;
default:
@@ -4901,6 +4943,7 @@ static int dm_early_init(void *handle)
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
@@ -5201,6 +5244,10 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane,
* @new_plane_state: New state of @plane
* @crtc_state: New state of CRTC connected to the @plane
* @flip_addrs: DC flip tracking struct, which also tracts dirty rects
+ * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled.
+ * If PSR SU is enabled and damage clips are available, only the regions of the screen
+ * that have changed will be updated. If PSR SU is not enabled,
+ * or if damage clips are not available, the entire screen will be updated.
* @dirty_regions_changed: dirty regions changed
*
* For PSR SU, DC informs the DMUB uController of dirty rectangle regions
@@ -6195,7 +6242,8 @@ create_stream_for_sink(struct drm_connector *connector,
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else if (aconnector) {
- recalculate_timing = is_freesync_video_mode(&mode, aconnector);
+ recalculate_timing = amdgpu_freesync_vid_mode &&
+ is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode);
@@ -6355,9 +6403,6 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
dm_new_state->underscan_enable = val;
ret = 0;
- } else if (property == adev->mode_info.abm_level_property) {
- dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
- ret = 0;
}
return ret;
@@ -6400,19 +6445,87 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
*val = dm_state->underscan_enable;
ret = 0;
- } else if (property == adev->mode_info.abm_level_property) {
- *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
- dm_state->abm_level : 0;
- ret = 0;
}
return ret;
}
+/**
+ * DOC: panel power savings
+ *
+ * The display manager allows you to set your desired **panel power savings**
+ * level (between 0-4, with 0 representing off), e.g. using the following::
+ *
+ * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings
+ *
+ * Modifying this value can have implications on color accuracy, so tread
+ * carefully.
+ */
+
+static ssize_t panel_power_savings_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ u8 val;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ val = to_dm_connector_state(connector->state)->abm_level ==
+ ABM_LEVEL_IMMEDIATE_DISABLE ? 0 :
+ to_dm_connector_state(connector->state)->abm_level;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t panel_power_savings_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 0, &val);
+
+ if (ret)
+ return ret;
+
+ if (val < 0 || val > 4)
+ return -EINVAL;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ to_dm_connector_state(connector->state)->abm_level = val ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ drm_kms_helper_hotplug_event(dev);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(panel_power_savings);
+
+static struct attribute *amdgpu_attrs[] = {
+ &dev_attr_panel_power_savings.attr,
+ NULL
+};
+
+static const struct attribute_group amdgpu_group = {
+ .name = "amdgpu",
+ .attrs = amdgpu_attrs
+};
+
static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0)
+ sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
+
drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
}
@@ -6474,9 +6587,12 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->vcpi_slots = 0;
state->pbn = 0;
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- state->abm_level = amdgpu_dm_abm_level ?:
- ABM_LEVEL_IMMEDIATE_DISABLE;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (amdgpu_dm_abm_level <= 0)
+ state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
+ else
+ state->abm_level = amdgpu_dm_abm_level;
+ }
__drm_atomic_helper_connector_reset(connector, &state->base);
}
@@ -6514,6 +6630,14 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
int r;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0) {
+ r = sysfs_create_group(&connector->kdev->kobj,
+ &amdgpu_group);
+ if (r)
+ return r;
+ }
+
amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
@@ -6534,10 +6658,15 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
/*
* Note: drm_get_edid gets edid in the following order:
@@ -6545,7 +6674,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
* 2) firmware EDID if set via edid_firmware module parameter
* 3) regular DDC read.
*/
- edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+ edid = drm_get_edid(connector, ddc);
if (!edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;
@@ -6586,12 +6715,18 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base);
+ struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
/*
* Note: drm_get_edid gets edid in the following order:
@@ -6599,7 +6734,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
* 2) firmware EDID if set via edid_firmware module parameter
* 3) regular DDC read.
*/
- edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+ edid = drm_get_edid(connector, ddc);
if (!edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;
@@ -7419,7 +7554,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- if (!edid)
+ if (!(amdgpu_freesync_vid_mode && edid))
return;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@ -7536,12 +7671,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.state->max_bpc = 16;
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
- if (connector_type == DRM_MODE_CONNECTOR_eDP &&
- (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
- drm_object_attach_property(&aconnector->base.base,
- adev->mode_info.abm_level_property, 0);
- }
-
if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
/* Content Type is currently only implemented for HDMI. */
drm_connector_attach_content_type_property(&aconnector->base);
@@ -8535,10 +8664,22 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dm_update_pflip_irq_state(drm_to_adev(dev),
acrtc_attach);
- if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
- !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
- amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
+ !acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
+ } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ !acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+
+ struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
+ acrtc_state->stream->dm_stream_context;
+
+ if (!aconn->disallow_edp_enter_psr)
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ }
+ }
/* Decrement skip count when PSR is enabled and we're doing fast updates. */
if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
@@ -8565,6 +8706,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
!acrtc_state->stream->link->psr_settings.psr_allow_active &&
+ !aconn->disallow_edp_enter_psr &&
(timestamp_ns -
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
500000000)
@@ -8827,11 +8969,12 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
}
} /* for_each_crtc_in_state() */
- /* if there mode set or reset, disable eDP PSR */
+ /* if there mode set or reset, disable eDP PSR, Replay */
if (mode_set_reset_required) {
if (dm->vblank_control_workqueue)
flush_workqueue(dm->vblank_control_workqueue);
+ amdgpu_dm_replay_disable_all(dm);
amdgpu_dm_psr_disable_all(dm);
}
@@ -9704,7 +9847,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
* TODO: Refactor this function to allow this check to work
* in all conditions.
*/
- if (dm_new_crtc_state->stream &&
+ if (amdgpu_freesync_vid_mode &&
+ dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
goto skip_modeset;
@@ -9744,7 +9888,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
}
/* Now check if we should set freesync video mode */
- if (dm_new_crtc_state->stream &&
+ if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
is_timing_unchanged_for_freesync(new_crtc_state,
@@ -9757,7 +9901,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
set_freesync_fixed_config(dm_new_crtc_state);
goto skip_modeset;
- } else if (aconnector &&
+ } else if (amdgpu_freesync_vid_mode && aconnector &&
is_freesync_video_mode(&new_crtc_state->mode,
aconnector)) {
struct drm_display_mode *high_mode;
@@ -11158,14 +11302,23 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (range->flags != 1)
continue;
- amdgpu_dm_connector->min_vfreq = range->min_vfreq;
- amdgpu_dm_connector->max_vfreq = range->max_vfreq;
- amdgpu_dm_connector->pixel_clock_mhz =
- range->pixel_clock_mhz * 10;
-
connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+ if (edid->revision >= 4) {
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+ connector->display_info.monitor_range.min_vfreq += 255;
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+ connector->display_info.monitor_range.max_vfreq += 255;
+ }
+
+ amdgpu_dm_connector->min_vfreq =
+ connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq =
+ connector->display_info.monitor_range.max_vfreq;
+ amdgpu_dm_connector->pixel_clock_mhz =
+ range->pixel_clock_mhz * 10;
+
break;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 9c1871b866cc..09519b7abf67 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -693,6 +693,7 @@ struct amdgpu_dm_connector {
struct drm_display_mode freesync_vid_base;
int psr_skip_count;
+ bool disallow_edp_enter_psr;
/* Record progress status of mst*/
uint8_t mst_status;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 6e715ef3a556..e23a0a276e33 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -29,6 +29,7 @@
#include "dc.h"
#include "amdgpu.h"
#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
#include "amdgpu_dm_crtc.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_trace.h"
@@ -95,6 +96,61 @@ bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
}
+/**
+ * amdgpu_dm_crtc_set_panel_sr_feature() - Manage panel self-refresh features.
+ *
+ * @vblank_work: is a pointer to a struct vblank_control_work object.
+ * @vblank_enabled: indicates whether the DRM vblank counter is currently
+ * enabled (true) or disabled (false).
+ * @allow_sr_entry: represents whether entry into the self-refresh mode is
+ * allowed (true) or not allowed (false).
+ *
+ * The DRM vblank counter enable/disable action is used as the trigger to enable
+ * or disable various panel self-refresh features:
+ *
+ * Panel Replay and PSR SU
+ * - Enable when:
+ * - vblank counter is disabled
+ * - entry is allowed: usermode demonstrates an adequate number of fast
+ * commits)
+ * - CRC capture window isn't active
+ * - Keep enabled even when vblank counter gets enabled
+ *
+ * PSR1
+ * - Enable condition same as above
+ * - Disable when vblank counter is enabled
+ */
+static void amdgpu_dm_crtc_set_panel_sr_feature(
+ struct vblank_control_work *vblank_work,
+ bool vblank_enabled, bool allow_sr_entry)
+{
+ struct dc_link *link = vblank_work->stream->link;
+ bool is_sr_active = (link->replay_settings.replay_allow_active ||
+ link->psr_settings.psr_allow_active);
+ bool is_crc_window_active = false;
+
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ is_crc_window_active =
+ amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base);
+#endif
+
+ if (link->replay_settings.replay_feature_enabled &&
+ allow_sr_entry && !is_sr_active && !is_crc_window_active) {
+ amdgpu_dm_replay_enable(vblank_work->stream, true);
+ } else if (vblank_enabled) {
+ if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
+ amdgpu_dm_psr_disable(vblank_work->stream);
+ } else if (link->psr_settings.psr_feature_enabled &&
+ allow_sr_entry && !is_sr_active && !is_crc_window_active) {
+
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *) vblank_work->stream->dm_stream_context;
+
+ if (!aconn->disallow_edp_enter_psr)
+ amdgpu_dm_psr_enable(vblank_work->stream);
+ }
+}
+
static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
{
struct vblank_control_work *vblank_work =
@@ -123,18 +179,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
* fill_dc_dirty_rects().
*/
if (vblank_work->stream && vblank_work->stream->link) {
- if (vblank_work->enable) {
- if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
- vblank_work->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(vblank_work->stream);
- } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
- !vblank_work->stream->link->psr_settings.psr_allow_active &&
-#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
- !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
-#endif
- vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
- amdgpu_dm_psr_enable(vblank_work->stream);
- }
+ amdgpu_dm_crtc_set_panel_sr_feature(
+ vblank_work, vblank_work->enable,
+ vblank_work->acrtc->dm_irq_params.allow_psr_entry ||
+ vblank_work->stream->link->replay_settings.replay_feature_enabled);
}
mutex_unlock(&dm->dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 68a846323912..eee4945653e2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1483,7 +1483,7 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
const uint32_t rd_buf_size = 10;
struct pipe_ctx *pipe_ctx;
ssize_t result = 0;
- int i, r, str_len = 30;
+ int i, r, str_len = 10;
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
@@ -2971,6 +2971,53 @@ static int allow_edp_hotplug_detection_set(void *data, u64 val)
return 0;
}
+/* check if kernel disallow eDP enter psr state
+ * cat /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr
+ * 0: allow edp enter psr; 1: disallow
+ */
+static int disallow_edp_enter_psr_get(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *aconnector = data;
+
+ *val = (u64) aconnector->disallow_edp_enter_psr;
+ return 0;
+}
+
+/* set kernel disallow eDP enter psr state
+ * echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr
+ * 0: allow edp enter psr; 1: disallow
+ *
+ * usage: test app read crc from PSR eDP rx.
+ *
+ * during kernel boot up, kernel write dpcd 0x170 = 5.
+ * this notify eDP rx psr enable and let rx check crc.
+ * rx fw will start checking crc for rx internal logic.
+ * crc read count within dpcd 0x246 is not updated and
+ * value is 0. when eDP tx driver wants to read rx crc
+ * from dpcd 0x246, 0x270, read count 0 lead tx driver
+ * timeout.
+ *
+ * to avoid this, we add this debugfs to let test app to disbable
+ * rx crc checking for rx internal logic. then test app can read
+ * non-zero crc read count.
+ *
+ * expected app sequence is as below:
+ * 1. disable eDP PHY and notify eDP rx with dpcd 0x600 = 2.
+ * 2. echo 0x1 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr
+ * 3. enable eDP PHY and notify eDP rx with dpcd 0x600 = 1 but
+ * without dpcd 0x170 = 5.
+ * 4. read crc from rx dpcd 0x270, 0x246, etc.
+ * 5. echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr.
+ * this will let eDP back to normal with psr setup dpcd 0x170 = 5.
+ */
+static int disallow_edp_enter_psr_set(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *aconnector = data;
+
+ aconnector->disallow_edp_enter_psr = val ? true : false;
+ return 0;
+}
+
static int dmub_trace_mask_set(void *data, u64 val)
{
struct amdgpu_device *adev = data;
@@ -3092,6 +3139,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(allow_edp_hotplug_detection_fops,
allow_edp_hotplug_detection_get,
allow_edp_hotplug_detection_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(disallow_edp_enter_psr_fops,
+ disallow_edp_enter_psr_get,
+ disallow_edp_enter_psr_set, "%llu\n");
+
DEFINE_SHOW_ATTRIBUTE(current_backlight);
DEFINE_SHOW_ATTRIBUTE(target_backlight);
@@ -3265,6 +3316,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
&edp_ilr_debugfs_fops);
debugfs_create_file("allow_edp_hotplug_detection", 0644, dir, connector,
&allow_edp_hotplug_detection_fops);
+ debugfs_create_file("disallow_edp_enter_psr", 0644, dir, connector,
+ &disallow_edp_enter_psr_fops);
}
for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index b54d646a7c73..e339c7a8d541 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -741,6 +741,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
dc->ctx->dce_version == DCN_VERSION_3_14 ||
dc->ctx->dce_version == DCN_VERSION_3_15 ||
dc->ctx->dce_version == DCN_VERSION_3_5 ||
+ dc->ctx->dce_version == DCN_VERSION_3_51 ||
dc->ctx->dce_version == DCN_VERSION_3_16)
hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1;
hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 85b7f58a7f35..c27063305a13 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -67,6 +67,8 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
+ case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
+ case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
@@ -120,6 +122,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->edid_hdmi = connector->display_info.is_hdmi;
+ apply_edid_quirks(edid_buf, edid_caps);
+
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0)
return result;
@@ -146,8 +150,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
else
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
- apply_edid_quirks(edid_buf, edid_caps);
-
kfree(sads);
kfree(sadb);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
index 5ce542b1f860..738a58eebba7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
@@ -60,21 +60,26 @@ static bool link_supports_replay(struct dc_link *link, struct amdgpu_dm_connecto
if (!as_caps->dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT)
return false;
+ // Sink shall populate line deviation information
+ if (dpcd_caps->pr_info.pixel_deviation_per_line == 0 ||
+ dpcd_caps->pr_info.max_deviation_line == 0)
+ return false;
+
return true;
}
/*
- * amdgpu_dm_setup_replay() - setup replay configuration
+ * amdgpu_dm_set_replay_caps() - setup Replay capabilities
* @link: link
* @aconnector: aconnector
*
*/
-bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
+bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
{
- struct replay_config pr_config;
+ struct replay_config pr_config = { 0 };
union replay_debug_flags *debug_flags = NULL;
- // For eDP, if Replay is supported, return true to skip checks
+ // If Replay is already set to support, return true to skip checks
if (link->replay_settings.config.replay_supported)
return true;
@@ -87,27 +92,50 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac
if (!link_supports_replay(link, aconnector))
return false;
- // Mark Replay is supported in link and update related attributes
+ // Mark Replay is supported in pr_config
pr_config.replay_supported = true;
- pr_config.replay_power_opt_supported = 0;
- pr_config.replay_enable_option |= pr_enable_option_static_screen;
- pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq;
-
- if (!pr_config.replay_timing_sync_supported)
- pr_config.replay_enable_option &= ~pr_enable_option_general_ui;
debug_flags = (union replay_debug_flags *)&pr_config.debug_flags;
debug_flags->u32All = 0;
debug_flags->bitfields.visual_confirm =
link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY;
- link->replay_settings.replay_feature_enabled = true;
-
init_replay_config(link, &pr_config);
return true;
}
+/*
+ * amdgpu_dm_link_setup_replay() - configure replay link
+ * @link: link
+ * @aconnector: aconnector
+ *
+ */
+bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
+{
+ struct replay_config *pr_config;
+
+ if (link == NULL || aconnector == NULL)
+ return false;
+
+ pr_config = &link->replay_settings.config;
+
+ if (!pr_config->replay_supported)
+ return false;
+
+ pr_config->replay_power_opt_supported = 0x11;
+ pr_config->replay_smu_opt_supported = false;
+ pr_config->replay_enable_option |= pr_enable_option_static_screen;
+ pr_config->replay_support_fast_resync_in_ultra_sleep_mode = aconnector->max_vfreq >= 2 * aconnector->min_vfreq;
+ pr_config->replay_timing_sync_supported = false;
+
+ if (!pr_config->replay_timing_sync_supported)
+ pr_config->replay_enable_option &= ~pr_enable_option_general_ui;
+
+ link->replay_settings.replay_feature_enabled = true;
+
+ return true;
+}
/*
* amdgpu_dm_replay_enable() - enable replay f/w
@@ -117,51 +145,23 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac
*/
bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
{
- uint64_t state;
- unsigned int retry_count;
bool replay_active = true;
- const unsigned int max_retry = 1000;
- bool force_static = true;
struct dc_link *link = NULL;
-
if (stream == NULL)
return false;
link = stream->link;
- if (link == NULL)
- return false;
-
- link->dc->link_srv->edp_setup_replay(link, stream);
-
- link->dc->link_srv->edp_set_replay_allow_active(link, NULL, false, false, NULL);
-
- link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, false, true, NULL);
-
- if (wait == true) {
-
- for (retry_count = 0; retry_count <= max_retry; retry_count++) {
- dc_link_get_replay_state(link, &state);
- if (replay_active) {
- if (state != REPLAY_STATE_0 &&
- (!force_static || state == REPLAY_STATE_3))
- break;
- } else {
- if (state == REPLAY_STATE_0)
- break;
- }
- udelay(500);
- }
-
- /* assert if max retry hit */
- if (retry_count >= max_retry)
- ASSERT(0);
- } else {
- /* To-do: Add trace log */
+ if (link) {
+ link->dc->link_srv->edp_setup_replay(link, stream);
+ link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total);
+ DRM_DEBUG_DRIVER("Enabling replay...\n");
+ link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL);
+ return true;
}
- return true;
+ return false;
}
/*
@@ -172,12 +172,31 @@ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
*/
bool amdgpu_dm_replay_disable(struct dc_stream_state *stream)
{
+ bool replay_active = false;
+ struct dc_link *link = NULL;
- if (stream->link) {
+ if (stream == NULL)
+ return false;
+
+ link = stream->link;
+
+ if (link) {
DRM_DEBUG_DRIVER("Disabling replay...\n");
- stream->link->dc->link_srv->edp_set_replay_allow_active(stream->link, NULL, false, false, NULL);
+ link->dc->link_srv->edp_set_replay_allow_active(stream->link, &replay_active, true, false, NULL);
return true;
}
return false;
}
+
+/*
+ * amdgpu_dm_replay_disable_all() - disable replay f/w
+ * if replay is enabled on any stream
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm)
+{
+ DRM_DEBUG_DRIVER("Disabling replay if replay is enabled on any stream\n");
+ return dc_set_replay_allow_active(dm->dc, false);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
index 01cba3cd6246..f0d30eb47312 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
@@ -40,7 +40,9 @@ enum replay_enable_option {
bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool enable);
-bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
+bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
+bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
bool amdgpu_dm_replay_disable(struct dc_stream_state *stream);
+bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm);
#endif /* AMDGPU_DM_AMDGPU_DM_REPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 1090d235086a..bd1f60ecaba4 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -101,6 +101,40 @@ void convert_float_matrix(
}
}
+static struct fixed31_32 int_frac_to_fixed_point(uint16_t arg,
+ uint8_t integer_bits,
+ uint8_t fractional_bits)
+{
+ struct fixed31_32 result;
+ uint16_t sign_mask = 1 << (fractional_bits + integer_bits);
+ uint16_t value_mask = sign_mask - 1;
+
+ result.value = (long long)(arg & value_mask) <<
+ (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+
+ if (arg & sign_mask)
+ result = dc_fixpt_neg(result);
+
+ return result;
+}
+
+/**
+ * convert_hw_matrix - converts HW values into fixed31_32 matrix.
+ * @matrix: fixed point 31.32 matrix
+ * @reg: array of register values
+ * @buffer_size: size of the array of register values
+ *
+ * Converts HW register spec defined format S2D13 into a fixed-point 31.32
+ * matrix.
+ */
+void convert_hw_matrix(struct fixed31_32 *matrix,
+ uint16_t *reg,
+ uint32_t buffer_size)
+{
+ for (int i = 0; i < buffer_size; ++i)
+ matrix[i] = int_frac_to_fixed_point(reg[i], 2, 13);
+}
+
static uint32_t find_gcd(uint32_t a, uint32_t b)
{
uint32_t remainder;
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
index 81da4e6f7a1a..a433cef78496 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
@@ -41,6 +41,10 @@ void convert_float_matrix(
void reduce_fraction(uint32_t num, uint32_t den,
uint32_t *out_num, uint32_t *out_den);
+void convert_hw_matrix(struct fixed31_32 *matrix,
+ uint16_t *reg,
+ uint32_t buffer_size);
+
static inline unsigned int log_2(unsigned int num)
{
return ilog2(num);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index 39530b2ea495..b30c2cdc1a61 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "resource.h"
#include "dm_services.h"
#include "dce_calcs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 818a529cacc3..86f9198e7501 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -37,7 +37,7 @@
#define EXEC_BIOS_CMD_TABLE(command, params)\
(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
GetIndexIntoMasterTable(COMMAND, command), \
- (uint32_t *)&params) == 0)
+ (uint32_t *)&params, sizeof(params)) == 0)
#define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 293a919d605d..cbae1be7b009 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -49,7 +49,7 @@
#define EXEC_BIOS_CMD_TABLE(fname, params)\
(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
GET_INDEX_INTO_MASTER_TABLE(command, fname), \
- (uint32_t *)&params) == 0)
+ (uint32_t *)&params, sizeof(params)) == 0)
#define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\
amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 9d347960e2b0..117fc6d4c1de 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -81,6 +81,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCN_VERSION_3_2:
case DCN_VERSION_3_21:
case DCN_VERSION_3_5:
+ case DCN_VERSION_3_51:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 28a2a837d2f0..9f0f25aee426 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dal_asic_id.h"
#include "dc_types.h"
#include "dccg.h"
@@ -340,7 +338,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
- break;
}
case AMDGPU_FAMILY_GC_11_0_1: {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index 26feefbb8990..b77804cfde0f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -132,7 +132,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
int dprefclk_wdivider;
int dprefclk_src_sel;
int dp_ref_clk_khz;
- int target_div;
+ int target_div = 600000;
/* ASSERT DP Reference Clock source is from DFS*/
REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 60761ff3cbf1..2a74e2d74909 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "reg_helper.h"
#include "core_types.h"
#include "clk_mgr_internal.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c
deleted file mode 100644
index 61dd12198a3c..000000000000
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "reg_helper.h"
-#include "clk_mgr_internal.h"
-#include "rv1_clk_mgr_clk.h"
-
-#include "ip/Discovery/hwid.h"
-#include "ip/Discovery/v1/ip_offset_1.h"
-#include "ip/CLK/clk_10_0_default.h"
-#include "ip/CLK/clk_10_0_offset.h"
-#include "ip/CLK/clk_10_0_reg.h"
-#include "ip/CLK/clk_10_0_sh_mask.h"
-
-#include "dce100/dce_clk_mgr.h"
-
-#define CLK_BASE_INNER(inst) \
- CLK_BASE__INST ## inst ## _SEG0
-
-
-#define CLK_REG(reg_name, block, inst)\
- CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## _ ## inst ## _ ## reg_name
-
-#define REG(reg_name) \
- CLK_REG(reg_name, CLK0, 0)
-
-
-/* Only used by testing framework*/
-void rv1_dump_clk_registers(struct clk_state_registers *regs, struct clk_bypass *bypass, struct clk_mgr *clk_mgr_base)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
-
- regs->CLK0_CLK8_CURRENT_CNT = REG_READ(CLK0_CLK8_CURRENT_CNT) / 10; //dcf clk
-
- bypass->dcfclk_bypass = REG_READ(CLK0_CLK8_BYPASS_CNTL) & 0x0007;
- if (bypass->dcfclk_bypass < 0 || bypass->dcfclk_bypass > 4)
- bypass->dcfclk_bypass = 0;
-
-
- regs->CLK0_CLK8_DS_CNTL = REG_READ(CLK0_CLK8_DS_CNTL) / 10; //dcf deep sleep divider
-
- regs->CLK0_CLK8_ALLOW_DS = REG_READ(CLK0_CLK8_ALLOW_DS); //dcf deep sleep allow
-
- regs->CLK0_CLK10_CURRENT_CNT = REG_READ(CLK0_CLK10_CURRENT_CNT) / 10; //dpref clk
-
- bypass->dispclk_pypass = REG_READ(CLK0_CLK10_BYPASS_CNTL) & 0x0007;
- if (bypass->dispclk_pypass < 0 || bypass->dispclk_pypass > 4)
- bypass->dispclk_pypass = 0;
-
- regs->CLK0_CLK11_CURRENT_CNT = REG_READ(CLK0_CLK11_CURRENT_CNT) / 10; //disp clk
-
- bypass->dprefclk_bypass = REG_READ(CLK0_CLK11_BYPASS_CNTL) & 0x0007;
- if (bypass->dprefclk_bypass < 0 || bypass->dprefclk_bypass > 4)
- bypass->dprefclk_bypass = 0;
-
-}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 0c6a4ab72b1d..e3e1940198a9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -707,9 +707,7 @@ void rn_clk_mgr_construct(
int is_green_sardine = 0;
struct clk_log_info log_info = {0};
-#if defined(CONFIG_DRM_AMD_DC_FP)
is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
-#endif
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn21_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 8c9d45e5b13b..23b390245b5d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -26,6 +26,10 @@
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
+#include "dm_helpers.h"
+
+#include "rn_clk_mgr_vbios_smu.h"
+
#include <linux/delay.h>
#include "renoir_ip_offset.h"
@@ -33,8 +37,6 @@
#include "mp/mp_12_0_0_offset.h"
#include "mp/mp_12_0_0_sh_mask.h"
-#include "rn_clk_mgr_vbios_smu.h"
-
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
@@ -120,7 +122,10 @@ static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
result = rn_smu_wait_for_response(clk_mgr, 10, 200000);
- ASSERT(result == VBIOSSMC_Result_OK || result == VBIOSSMC_Result_UnknownCmd);
+ if (IS_SMU_TIMEOUT(result)) {
+ ASSERT(0);
+ dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
+ }
/* Actual dispclk set is returned in the parameter register */
return REG_READ(MP1_SMN_C2PMSG_83);
@@ -185,10 +190,6 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
index e4f96b6fd79d..b4fb17b7a096 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include "dcn301_smu.h"
+#include "dm_helpers.h"
#include "vangogh_ip_offset.h"
@@ -120,7 +121,10 @@ static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000);
- ASSERT(result == VBIOSSMC_Result_OK);
+ if (IS_SMU_TIMEOUT(result)) {
+ ASSERT(0);
+ dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
+ }
/* Actual dispclk set is returned in the parameter register */
return REG_READ(MP1_SMN_C2PMSG_83);
@@ -180,10 +184,6 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index 32279c5db724..6904e95113c1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -202,10 +202,6 @@ int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requeste
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index 07baa10a8647..c4af406146b7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -220,12 +220,6 @@ int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n",
- actual_dcfclk_set_mhz,
- actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 1042cf1a3ab0..879f1494c4cd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -215,10 +215,6 @@ int dcn315_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
index 3ed19197a755..8b82092b91cd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
@@ -189,10 +189,6 @@ int dcn316_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
-#ifdef DBG
- smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
return actual_dcfclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index aadd07bc68c5..668f05c8654e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -243,10 +243,8 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
/* Get UCLK, update bounding box */
clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
- DC_FP_START();
/* WM range table */
dcn32_build_wm_range_table(clk_mgr);
- DC_FP_END();
}
static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
@@ -387,7 +385,15 @@ static void dcn32_update_clocks_update_dentist(
uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider;
if (clk_mgr->smu_present)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz));
+ /*
+ * SMU uses discrete dispclk presets. We applied
+ * the same formula to increase our dppclk_khz
+ * to the next matching discrete value. By
+ * contract, we should use the preset dispclk
+ * floored in Mhz to describe the intended clock.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK,
+ khz_to_mhz_floor(temp_dispclk_khz));
if (dc->debug.override_dispclk_programming) {
REG_GET(DENTIST_DISPCLK_CNTL,
@@ -426,7 +432,15 @@ static void dcn32_update_clocks_update_dentist(
/* do requested DISPCLK updates*/
if (clk_mgr->smu_present)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz));
+ /*
+ * SMU uses discrete dispclk presets. We applied
+ * the same formula to increase our dppclk_khz
+ * to the next matching discrete value. By
+ * contract, we should use the preset dispclk
+ * floored in Mhz to describe the intended clock.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK,
+ khz_to_mhz_floor(clk_mgr->base.clks.dispclk_khz));
if (dc->debug.override_dispclk_programming) {
REG_GET(DENTIST_DISPCLK_CNTL,
@@ -493,6 +507,8 @@ static void dcn32_auto_dpm_test_log(
}
}
+ msleep(5);
+
mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes;
dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
@@ -734,7 +750,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
if (clk_mgr->smu_present && !dpp_clock_lowered)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
+ /*
+ * SMU uses discrete dppclk presets. We applied
+ * the same formula to increase our dppclk_khz
+ * to the next matching discrete value. By
+ * contract, we should use the preset dppclk
+ * floored in Mhz to describe the intended clock.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK,
+ khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz));
update_dppclk = true;
}
@@ -765,7 +789,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn32_update_clocks_update_dentist(clk_mgr, context);
if (clk_mgr->smu_present)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
+ /*
+ * SMU uses discrete dppclk presets. We applied
+ * the same formula to increase our dppclk_khz
+ * to the next matching discrete value. By
+ * contract, we should use the preset dppclk
+ * floored in Mhz to describe the intended clock.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK,
+ khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz));
} else {
/* if clock is being raised, increase refclk before lowering DTO */
if (update_dppclk || update_dispclk)
@@ -783,7 +815,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dmcu->funcs->set_psr_wait_loop(dmcu,
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
- if (dc->config.enable_auto_dpm_test_logs) {
+ if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) {
dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
index a34c258c19dc..c76352a817de 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
@@ -36,8 +36,7 @@
#define DALSMC_MSG_SetCabForUclkPstate 0x12
#define DALSMC_Result_OK 0x1
-void
-dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable);
+void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable);
void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index e64890259235..c378b879c76d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -384,19 +384,6 @@ static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn35_smu_enable_pme_wa(clk_mgr);
}
-void dcn35_init_clocks(struct clk_mgr *clk_mgr)
-{
- uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
-
- memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
-
- // Assumption is that boot state always supports pstate
- clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
- clk_mgr->clks.p_state_change_support = true;
- clk_mgr->clks.prev_p_state_change_support = true;
- clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
- clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
-}
bool dcn35_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
@@ -422,6 +409,22 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
{
}
+static void init_clk_states(struct clk_mgr *clk_mgr)
+{
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+}
+
+void dcn35_init_clocks(struct clk_mgr *clk_mgr)
+{
+ init_clk_states(clk_mgr);
+}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
@@ -833,7 +836,7 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
}
}
-static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
+static void dcn35_set_ips_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc *dc = clk_mgr_base->ctx->dc;
@@ -881,7 +884,7 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base)
return ips_supported;
}
-static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base)
+static uint32_t dcn35_get_ips_idle_state(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -890,7 +893,7 @@ static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base)
static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)
{
- dcn35_init_clocks(clk_mgr);
+ init_clk_states(clk_mgr);
/* TODO: Implement the functions and remove the ifndef guard */
}
@@ -975,8 +978,8 @@ static struct clk_mgr_funcs dcn35_funcs = {
.set_low_power_state = dcn35_set_low_power_state,
.exit_low_power_state = dcn35_exit_low_power_state,
.is_ips_supported = dcn35_is_ips_supported,
- .set_idle_state = dcn35_set_idle_state,
- .get_idle_state = dcn35_get_idle_state
+ .set_idle_state = dcn35_set_ips_idle_state,
+ .get_idle_state = dcn35_get_ips_idle_state
};
struct clk_mgr_funcs dcn35_fpga_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index 6d4a1ffab5ed..9e588c56c570 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -361,32 +361,32 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
case DCN_ZSTATE_SUPPORT_ALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 9) | (1 << 8);
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_DISALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = 0;
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10);
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 8);
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = 0x%x\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 8);
- smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = %d\n", __func__, param);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = 0x%x\n", __func__, param);
break;
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
@@ -400,7 +400,7 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
clk_mgr,
msg_id,
param);
- smu_print("%s: msg_id = %d, param = 0x%x, return = %d\n", __func__, msg_id, param, retv);
+ smu_print("%s: msg_id = %d, param = 0x%x, return = 0x%x\n", __func__, msg_id, param, retv);
}
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
@@ -447,6 +447,9 @@ void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
{
+ if (!clk_mgr->smu_present)
+ return;
+
dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
@@ -458,6 +461,9 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
{
int retv;
+ if (!clk_mgr->smu_present)
+ return 0;
+
retv = dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_DispPsrExit,
@@ -470,6 +476,9 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
{
int retv;
+ if (!clk_mgr->smu_present)
+ return 0;
+
retv = dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_QueryIPS2Support,
@@ -481,6 +490,9 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
{
+ if (!clk_mgr->smu_present)
+ return;
+
REG_WRITE(MP1_SMN_C2PMSG_71, param);
//smu_print("%s: write_ips_scratch = %x\n", __func__, param);
}
@@ -489,6 +501,9 @@ uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
{
uint32_t retv;
+ if (!clk_mgr->smu_present)
+ return 0;
+
retv = REG_READ(MP1_SMN_C2PMSG_71);
//smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv);
return retv;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 2c424e435962..5211c1c0f3c0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -414,6 +414,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
if (dc->optimized_required || dc->wm_optimized_required)
return false;
+ dc_exit_ips_for_hw_access(dc);
+
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -454,6 +456,8 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
int i = 0;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -484,6 +488,8 @@ bool dc_stream_get_crtc_position(struct dc *dc,
bool ret = false;
struct crtc_position position;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe =
&dc->current_state->res_ctx.pipe_ctx[i];
@@ -603,6 +609,8 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
if (pipe == NULL)
return false;
+ dc_exit_ips_for_hw_access(dc);
+
/* By default, capture the full frame */
param.windowa_x_start = 0;
param.windowa_y_start = 0;
@@ -662,6 +670,8 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
struct pipe_ctx *pipe;
struct timing_generator *tg;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream == stream)
@@ -686,6 +696,8 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
int i;
struct pipe_ctx *pipe_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
@@ -721,6 +733,8 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
if (option > DITHER_OPTION_MAX)
return;
+ dc_exit_ips_for_hw_access(stream->ctx->dc);
+
stream->dither_option = option;
memset(&params, 0, sizeof(params));
@@ -745,6 +759,8 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre
bool ret = false;
struct pipe_ctx *pipes;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
pipes = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -762,6 +778,8 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
bool ret = false;
struct pipe_ctx *pipes;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
@@ -788,6 +806,8 @@ void dc_stream_set_static_screen_params(struct dc *dc,
struct pipe_ctx *pipes_affected[MAX_PIPES];
int num_pipes_affected = 0;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < num_streams; i++) {
struct dc_stream_state *stream = streams[i];
@@ -1766,6 +1786,8 @@ void dc_enable_stereo(
int i, j;
struct pipe_ctx *pipe;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (context != NULL) {
pipe = &context->res_ctx.pipe_ctx[i];
@@ -1785,6 +1807,8 @@ void dc_enable_stereo(
void dc_trigger_sync(struct dc *dc, struct dc_state *context)
{
if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
+ dc_exit_ips_for_hw_access(dc);
+
enable_timing_multisync(dc, context);
program_timing_sync(dc, context);
}
@@ -2008,7 +2032,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
return result;
}
-static bool commit_minimal_transition_state(struct dc *dc,
+static bool commit_minimal_transition_state_legacy(struct dc *dc,
struct dc_state *transition_base_context);
/**
@@ -2041,6 +2065,8 @@ enum dc_status dc_commit_streams(struct dc *dc,
if (!streams_changed(dc, streams, stream_count))
return res;
+ dc_exit_ips_for_hw_access(dc);
+
DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
for (i = 0; i < stream_count; i++) {
@@ -2072,7 +2098,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
if (handle_exit_odm2to1)
- res = commit_minimal_transition_state(dc, dc->current_state);
+ res = commit_minimal_transition_state_legacy(dc, dc->current_state);
context = dc_state_create_current_copy(dc);
if (!context)
@@ -2428,6 +2454,10 @@ static enum surface_update_type get_scaling_info_update_type(
/* Changing clip size of a large surface may result in MPC slice count change */
update_flags->bits.bandwidth_change = 1;
+ if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
+ u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
+ update_flags->bits.clip_size_change = 1;
+
if (u->scaling_info->src_rect.x != u->surface->src_rect.x
|| u->scaling_info->src_rect.y != u->surface->src_rect.y
|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
@@ -2441,7 +2471,8 @@ static enum surface_update_type get_scaling_info_update_type(
|| update_flags->bits.scaling_change)
return UPDATE_TYPE_FULL;
- if (update_flags->bits.position_change)
+ if (update_flags->bits.position_change ||
+ update_flags->bits.clip_size_change)
return UPDATE_TYPE_MED;
return UPDATE_TYPE_FAST;
@@ -2921,8 +2952,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
}
}
-static void backup_plane_states_for_stream(
- struct dc_plane_state plane_states[MAX_SURFACE_NUM],
+static void backup_planes_and_stream_state(
+ struct dc_scratch_space *scratch,
struct dc_stream_state *stream)
{
int i;
@@ -2931,12 +2962,20 @@ static void backup_plane_states_for_stream(
if (!status)
return;
- for (i = 0; i < status->plane_count; i++)
- plane_states[i] = *status->plane_states[i];
+ for (i = 0; i < status->plane_count; i++) {
+ scratch->plane_states[i] = *status->plane_states[i];
+ scratch->gamma_correction[i] = *status->plane_states[i]->gamma_correction;
+ scratch->in_transfer_func[i] = *status->plane_states[i]->in_transfer_func;
+ scratch->lut3d_func[i] = *status->plane_states[i]->lut3d_func;
+ scratch->in_shaper_func[i] = *status->plane_states[i]->in_shaper_func;
+ scratch->blend_tf[i] = *status->plane_states[i]->blend_tf;
+ }
+ scratch->stream_state = *stream;
+ scratch->out_transfer_func = *stream->out_transfer_func;
}
-static void restore_plane_states_for_stream(
- struct dc_plane_state plane_states[MAX_SURFACE_NUM],
+static void restore_planes_and_stream_state(
+ struct dc_scratch_space *scratch,
struct dc_stream_state *stream)
{
int i;
@@ -2945,8 +2984,16 @@ static void restore_plane_states_for_stream(
if (!status)
return;
- for (i = 0; i < status->plane_count; i++)
- *status->plane_states[i] = plane_states[i];
+ for (i = 0; i < status->plane_count; i++) {
+ *status->plane_states[i] = scratch->plane_states[i];
+ *status->plane_states[i]->gamma_correction = scratch->gamma_correction[i];
+ *status->plane_states[i]->in_transfer_func = scratch->in_transfer_func[i];
+ *status->plane_states[i]->lut3d_func = scratch->lut3d_func[i];
+ *status->plane_states[i]->in_shaper_func = scratch->in_shaper_func[i];
+ *status->plane_states[i]->blend_tf = scratch->blend_tf[i];
+ }
+ *stream = scratch->stream_state;
+ *stream->out_transfer_func = scratch->out_transfer_func;
}
static bool update_planes_and_stream_state(struct dc *dc,
@@ -2972,7 +3019,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
context = dc->current_state;
- backup_plane_states_for_stream(dc->current_state->scratch.plane_states, stream);
+ backup_planes_and_stream_state(&dc->current_state->scratch, stream);
update_type = dc_check_update_surfaces_for_stream(
dc, srf_updates, surface_count, stream_update, stream_status);
@@ -3072,7 +3119,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
*new_context = context;
*new_update_type = update_type;
- backup_plane_states_for_stream(context->scratch.plane_states, stream);
+ backup_planes_and_stream_state(&context->scratch, stream);
return true;
@@ -3376,6 +3423,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
struct dc_stream_status *stream_status = NULL;
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
top_pipe_to_program = resource_get_otg_master_for_stream(
@@ -3503,10 +3552,23 @@ static void commit_planes_for_stream(struct dc *dc,
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
if (update_type == UPDATE_TYPE_FULL)
wait_for_outstanding_hw_updates(dc, context);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
+
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
+ }
+ }
+
if (update_type == UPDATE_TYPE_FULL) {
dc_allow_idle_optimizations(dc, false);
@@ -3541,17 +3603,6 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->stream && pipe->plane_state) {
- set_p_state_switch_method(dc, context, pipe);
-
- if (dc->debug.visual_confirm)
- dc_update_visual_confirm_color(dc, context, pipe);
- }
- }
-
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
struct pipe_ctx *mpcc_pipe;
struct pipe_ctx *odm_pipe;
@@ -4012,7 +4063,23 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
return minimal_transition_context;
}
-static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
+
+/**
+ * commit_minimal_transition_state - Commit a minimal state based on current or new context
+ *
+ * @dc: DC structure, used to get the current state
+ * @context: New context
+ * @stream: Stream getting the update for the flip
+ *
+ * The function takes in current state and new state and determine a minimal transition state
+ * as the intermediate step which could make the transition between current and new states
+ * seamless. If found, it will commit the minimal transition state and update current state to
+ * this minimal transition state and return true, if not, it will return false.
+ *
+ * Return:
+ * Return True if the minimal transition succeeded, false otherwise
+ */
+static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *context,
struct dc_stream_state *stream)
{
@@ -4021,12 +4088,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
struct pipe_split_policy_backup policy;
/* commit based on new context */
- /* Since all phantom pipes are removed in full validation,
- * we have to save and restore the subvp/mall config when
- * we do a minimal transition since the flags marking the
- * pipe as subvp/phantom will be cleared (dc copy constructor
- * creates a shallow copy).
- */
minimal_transition_context = create_minimal_transition_state(dc,
context, &policy);
if (minimal_transition_context) {
@@ -4043,7 +4104,7 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
if (!success) {
/* commit based on current context */
- restore_plane_states_for_stream(dc->current_state->scratch.plane_states, stream);
+ restore_planes_and_stream_state(&dc->current_state->scratch, stream);
minimal_transition_context = create_minimal_transition_state(dc,
dc->current_state, &policy);
if (minimal_transition_context) {
@@ -4056,7 +4117,7 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
}
release_minimal_transition_state(dc, minimal_transition_context, &policy);
}
- restore_plane_states_for_stream(context->scratch.plane_states, stream);
+ restore_planes_and_stream_state(&context->scratch, stream);
}
ASSERT(success);
@@ -4064,7 +4125,7 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
}
/**
- * commit_minimal_transition_state - Create a transition pipe split state
+ * commit_minimal_transition_state_legacy - Create a transition pipe split state
*
* @dc: Used to get the current state status
* @transition_base_context: New transition state
@@ -4081,7 +4142,7 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
* Return:
* Return false if something is wrong in the transition state.
*/
-static bool commit_minimal_transition_state(struct dc *dc,
+static bool commit_minimal_transition_state_legacy(struct dc *dc,
struct dc_state *transition_base_context)
{
struct dc_state *transition_context;
@@ -4319,53 +4380,6 @@ static bool fast_update_only(struct dc *dc,
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
-static bool should_commit_minimal_transition_for_windowed_mpo_odm(struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_state *context)
-{
- struct pipe_ctx *cur_pipe, *new_pipe;
- bool cur_is_odm_in_use, new_is_odm_in_use;
- struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
- struct dc_stream_status *new_stream_status = stream_get_status(context, stream);
-
- if (!dc->debug.enable_single_display_2to1_odm_policy ||
- !dc->config.enable_windowed_mpo_odm)
- /* skip the check if windowed MPO ODM or dynamic ODM is turned
- * off.
- */
- return false;
-
- if (context == dc->current_state)
- /* skip the check for fast update */
- return false;
-
- if (new_stream_status->plane_count != cur_stream_status->plane_count)
- /* plane count changed, not a plane scaling update so not the
- * case we are looking for
- */
- return false;
-
- cur_pipe = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, stream);
- new_pipe = resource_get_otg_master_for_stream(&context->res_ctx, stream);
- if (!cur_pipe || !new_pipe)
- return false;
- cur_is_odm_in_use = resource_get_odm_slice_count(cur_pipe) > 1;
- new_is_odm_in_use = resource_get_odm_slice_count(new_pipe) > 1;
- if (cur_is_odm_in_use == new_is_odm_in_use)
- /* ODM state isn't changed, not the case we are looking for */
- return false;
-
- if (dc->hwss.is_pipe_topology_transition_seamless &&
- dc->hwss.is_pipe_topology_transition_seamless(
- dc, dc->current_state, context))
- /* transition can be achieved without the need for committing
- * minimal transition state first
- */
- return false;
-
- return true;
-}
-
bool dc_update_planes_and_stream(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -4384,6 +4398,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
bool is_plane_addition = 0;
bool is_fast_update_only;
+ dc_exit_ips_for_hw_access(dc);
+
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
surface_count, stream_update, stream);
@@ -4396,7 +4412,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
/* on plane addition, minimal state is the current one */
if (force_minimal_pipe_splitting && is_plane_addition &&
- !commit_minimal_transition_state(dc, dc->current_state))
+ !commit_minimal_transition_state_legacy(dc, dc->current_state))
return false;
if (!update_planes_and_stream_state(
@@ -4411,32 +4427,19 @@ bool dc_update_planes_and_stream(struct dc *dc,
/* on plane removal, minimal state is the new one */
if (force_minimal_pipe_splitting && !is_plane_addition) {
- /* Since all phantom pipes are removed in full validation,
- * we have to save and restore the subvp/mall config when
- * we do a minimal transition since the flags marking the
- * pipe as subvp/phantom will be cleared (dc copy constructor
- * creates a shallow copy).
- */
- if (!commit_minimal_transition_state(dc, context)) {
+ if (!commit_minimal_transition_state_legacy(dc, context)) {
dc_state_release(context);
return false;
}
update_type = UPDATE_TYPE_FULL;
}
- /* when windowed MPO ODM is supported, we need to handle a special case
- * where we can transition between ODM combine and MPC combine due to
- * plane scaling update. This transition will require us to commit
- * minimal transition state. The condition to trigger this update can't
- * be predicted by could_mpcc_tree_change_for_active_pipes because we
- * can only determine it after DML validation. Therefore we can't rely
- * on the existing commit minimal transition state sequence. Instead
- * we have to add additional handling here to handle this transition
- * with its own special sequence.
- */
- if (should_commit_minimal_transition_for_windowed_mpo_odm(dc, stream, context))
- commit_minimal_transition_state_for_windowed_mpo_odm(dc,
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, context)) {
+ commit_minimal_transition_state(dc,
context, stream);
+ }
update_seamless_boot_flags(dc, context, surface_count, stream);
if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
@@ -4504,6 +4507,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
int i, j;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+ dc_exit_ips_for_hw_access(dc);
+
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
stream_status = dc_stream_get_status(stream);
context = dc->current_state;
@@ -4688,6 +4693,8 @@ void dc_set_power_state(
case DC_ACPI_CM_POWER_STATE_D0:
dc_state_construct(dc, dc->current_state);
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
dc->hwss.init_hw(dc);
@@ -4829,6 +4836,12 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
dc->idle_optimizations_allowed = allow;
}
+void dc_exit_ips_for_hw_access(struct dc *dc)
+{
+ if (dc->caps.ips_support)
+ dc_allow_idle_optimizations(dc, false);
+}
+
bool dc_dmub_is_ips_idle_state(struct dc *dc)
{
uint32_t idle_state = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 9fbdb09697fd..ec4bf9432bdb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -73,6 +73,7 @@
#include "dcn32/dcn32_resource.h"
#include "dcn321/dcn321_resource.h"
#include "dcn35/dcn35_resource.h"
+#include "dcn351/dcn351_resource.h"
#define VISUAL_CONFIRM_BASE_DEFAULT 3
#define VISUAL_CONFIRM_BASE_MIN 1
@@ -195,6 +196,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
break;
case AMDGPU_FAMILY_GC_11_5_0:
dc_version = DCN_VERSION_3_5;
+ if (ASICREV_IS_GC_11_0_4(asic_id.hw_internal_rev))
+ dc_version = DCN_VERSION_3_51;
break;
default:
dc_version = DCE_VERSION_UNKNOWN;
@@ -303,6 +306,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
case DCN_VERSION_3_5:
res_pool = dcn35_create_resource_pool(init_data, dc);
break;
+ case DCN_VERSION_3_51:
+ res_pool = dcn351_create_resource_pool(init_data, dc);
+ break;
#endif /* CONFIG_DRM_AMD_DC_FP */
default:
break;
@@ -1834,23 +1840,6 @@ int resource_find_any_free_pipe(struct resource_context *new_res_ctx,
bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type)
{
-#ifdef DBG
- if (pipe_ctx->stream == NULL) {
- /* a free pipe with dangling states */
- ASSERT(!pipe_ctx->plane_state);
- ASSERT(!pipe_ctx->prev_odm_pipe);
- ASSERT(!pipe_ctx->next_odm_pipe);
- ASSERT(!pipe_ctx->top_pipe);
- ASSERT(!pipe_ctx->bottom_pipe);
- } else if (pipe_ctx->top_pipe) {
- /* a secondary DPP pipe must be signed to a plane */
- ASSERT(pipe_ctx->plane_state)
- }
- /* Add more checks here to prevent corrupted pipe ctx. It is very hard
- * to debug this issue afterwards because we can't pinpoint the code
- * location causing inconsistent pipe context states.
- */
-#endif
switch (type) {
case OTG_MASTER:
return !pipe_ctx->prev_odm_pipe &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 54670e0b1518..51a970fcb5d0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -423,6 +423,8 @@ bool dc_stream_add_writeback(struct dc *dc,
return false;
}
+ dc_exit_ips_for_hw_access(dc);
+
wb_info->dwb_params.out_transfer_func = stream->out_transfer_func;
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
@@ -493,6 +495,8 @@ bool dc_stream_fc_disable_writeback(struct dc *dc,
return false;
}
+ dc_exit_ips_for_hw_access(dc);
+
if (dwb->funcs->set_fc_enable)
dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE);
@@ -542,6 +546,8 @@ bool dc_stream_remove_writeback(struct dc *dc,
return false;
}
+ dc_exit_ips_for_hw_access(dc);
+
/* disable writeback */
if (dc->hwss.disable_writeback) {
struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst];
@@ -557,6 +563,8 @@ bool dc_stream_warmup_writeback(struct dc *dc,
int num_dwb,
struct dc_writeback_info *wb_info)
{
+ dc_exit_ips_for_hw_access(dc);
+
if (dc->hwss.mmhubbub_warmup)
return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
else
@@ -569,6 +577,8 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
struct resource_context *res_ctx =
&dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -597,6 +607,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
dc = stream->ctx->dc;
res_ctx = &dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
@@ -628,6 +640,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
struct resource_context *res_ctx =
&dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -664,6 +678,8 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
if (i == MAX_PIPES)
return true;
+ dc_exit_ips_for_hw_access(dc);
+
return dc->hwss.dmdata_status_done(pipe);
}
@@ -698,6 +714,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
pipe_ctx->stream->dmdata_address = attr->address;
+ dc_exit_ips_for_hw_access(dc);
+
dc->hwss.program_dmdata_engine(pipe_ctx);
if (hubp->funcs->dmdata_set_attributes != NULL &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 19a2c7140ae8..19140fb65787 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -161,6 +161,8 @@ const struct dc_plane_status *dc_plane_get_status(
break;
}
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index c9317ea0258e..ee8453bf958f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -51,7 +51,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.266"
+#define DC_VER "3.2.273"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -429,12 +429,12 @@ struct dc_config {
bool force_bios_enable_lttpr;
uint8_t force_bios_fixed_vs;
int sdpif_request_limit_words_per_umc;
- bool use_old_fixed_vs_sequence;
bool dc_mode_clk_limit_support;
bool EnableMinDispClkODM;
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
unsigned int disable_ips_in_vpb;
+ bool usb4_bw_alloc_support;
};
enum visual_confirm {
@@ -987,9 +987,11 @@ struct dc_debug_options {
bool psp_disabled_wa;
unsigned int ips2_eval_delay_us;
unsigned int ips2_entry_delay_us;
+ bool disable_dmub_reallow_idle;
bool disable_timeout;
bool disable_extblankadj;
unsigned int static_screen_wait_frames;
+ bool force_chroma_subsampling_1tap;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -1068,6 +1070,7 @@ struct dc {
} scratch;
struct dml2_configuration_options dml2_options;
+ enum dc_acpi_cm_power_state power_state;
};
enum frame_buffer_mode {
@@ -1249,6 +1252,7 @@ union surface_update_flags {
uint32_t rotation_change:1;
uint32_t swizzle_change:1;
uint32_t scaling_change:1;
+ uint32_t clip_size_change: 1;
uint32_t position_change:1;
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
@@ -1568,7 +1572,19 @@ struct dc_link {
enum engine_id dpia_preferred_eng_id;
bool test_pattern_enabled;
+ /* Pending/Current test pattern are only used to perform and track
+ * FIXED_VS retimer test pattern/lane adjustment override state.
+ * Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
+ * to perform specific lane adjust overrides before setting certain
+ * PHY test patterns. In cases when lane adjust and set test pattern
+ * calls are not performed atomically (i.e. performing link training),
+ * pending_test_pattern will be invalid or contain a non-PHY test pattern
+ * and current_test_pattern will contain required context for any future
+ * set pattern/set lane adjust to transition between override state(s).
+ * */
enum dp_test_pattern current_test_pattern;
+ enum dp_test_pattern pending_test_pattern;
+
union compliance_test_state compliance_test_state;
void *priv;
@@ -2219,11 +2235,9 @@ struct dc_sink_dsc_caps {
// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),
// 'false' if they are sink's DSC caps
bool is_virtual_dpcd_dsc;
-#if defined(CONFIG_DRM_AMD_DC_FP)
// 'true' if MST topology supports DSC passthrough for sink
// 'false' if MST topology does not support DSC passthrough
bool is_dsc_passthrough_supported;
-#endif
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
@@ -2325,6 +2339,7 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_
struct dc_cursor_attributes *cursor_attr);
void dc_allow_idle_optimizations(struct dc *dc, bool allow);
+void dc_exit_ips_for_hw_access(struct dc *dc);
bool dc_dmub_is_ips_idle_state(struct dc *dc);
/* set min and max memory clock to lowest and highest DPM level, respectively */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 2b79a0e5638e..6083b1dcf050 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -74,7 +74,10 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
enum dmub_status status;
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
+
if (status != DMUB_STATUS_OK) {
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
@@ -125,7 +128,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dc_context *dc_ctx;
struct dmub_srv *dmub;
enum dmub_status status;
int i;
@@ -133,6 +136,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
+ dc_ctx = dc_dmub_srv->ctx;
dmub = dc_dmub_srv->dmub;
for (i = 0 ; i < count; i++) {
@@ -145,7 +149,9 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
if (status == DMUB_STATUS_POWER_STATE_D3)
return false;
- dmub_srv_wait_for_idle(dmub, 100000);
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* Requeue the command. */
status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
@@ -186,7 +192,9 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
// Wait for DMUB to process command
if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
if (status != DMUB_STATUS_OK) {
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
@@ -780,21 +788,22 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
} else if (subvp_pipe->next_odm_pipe) {
pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
} else {
- pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0;
+ pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF;
}
// Find phantom pipe index based on phantom stream
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
- if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
+ if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) &&
+ phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
if (phantom_pipe->bottom_pipe) {
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
} else if (phantom_pipe->next_odm_pipe) {
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
} else {
- pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0;
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF;
}
break;
}
@@ -1161,7 +1170,7 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con
bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
{
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dc_context *dc_ctx;
enum dmub_status status;
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
@@ -1170,6 +1179,8 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
return true;
+ dc_ctx = dc_dmub_srv->ctx;
+
if (wait) {
if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
do {
@@ -1190,11 +1201,17 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
{
+ struct dc_dmub_srv *dc_dmub_srv;
union dmub_rb_cmd cmd = {0};
if (dc->debug.dmcub_emulation)
return;
+ if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
+ return;
+
+ dc_dmub_srv = dc->ctx->dmub_srv;
+
memset(&cmd, 0, sizeof(cmd));
cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE;
@@ -1205,19 +1222,42 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
if (allow_idle) {
- if (dc->hwss.set_idle_state)
- dc->hwss.set_idle_state(dc, true);
+ volatile struct dmub_shared_state_ips_driver *ips_driver =
+ &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
+ union dmub_shared_state_ips_driver_signals new_signals;
+
+ dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+
+ memset(&new_signals, 0, sizeof(new_signals));
+
+ if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
+ dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ new_signals.bits.allow_z10 = 1;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
+ new_signals.bits.allow_ips1 = 1;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ }
+
+ ips_driver->signals = new_signals;
}
/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
/* We also do not perform a wait since DMCUB could enter idle after the notification. */
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
}
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
- uint32_t allow_state = 0;
- uint32_t commit_state = 0;
+ struct dc_dmub_srv *dc_dmub_srv;
if (dc->debug.dmcub_emulation)
return;
@@ -1225,61 +1265,44 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
return;
- if (dc->hwss.get_idle_state &&
- dc->hwss.set_idle_state &&
- dc->clk_mgr->funcs->exit_low_power_state) {
+ dc_dmub_srv = dc->ctx->dmub_srv;
- allow_state = dc->hwss.get_idle_state(dc);
- dc->hwss.set_idle_state(dc, false);
+ if (dc->clk_mgr->funcs->exit_low_power_state) {
+ volatile const struct dmub_shared_state_ips_fw *ips_fw =
+ &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
+ volatile struct dmub_shared_state_ips_driver *ips_driver =
+ &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
+ union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
- if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
- // Wait for evaluation time
- for (;;) {
- udelay(dc->debug.ips2_eval_delay_us);
- commit_state = dc->hwss.get_idle_state(dc);
- if (commit_state & DMUB_IPS2_ALLOW_MASK)
- break;
+ ips_driver->signals.all = 0;
- /* allow was still set, retry eval delay */
- dc->hwss.set_idle_state(dc, false);
- }
+ if (prev_driver_signals.bits.allow_ips2) {
+ udelay(dc->debug.ips2_eval_delay_us);
- if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
+ if (ips_fw->signals.bits.ips2_commit) {
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
// Wait for IPS2 entry upper bound
udelay(dc->debug.ips2_entry_delay_us);
- dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
- for (;;) {
- commit_state = dc->hwss.get_idle_state(dc);
- if (commit_state & DMUB_IPS2_COMMIT_MASK)
- break;
+ dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ while (ips_fw->signals.bits.ips2_commit)
udelay(1);
- }
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
- /* TODO: See if we can return early here - IPS2 should go
- * back directly to IPS0 and clear the flags, but it will
- * be safer to directly notify DMCUB of this.
- */
- allow_state = dc->hwss.get_idle_state(dc);
+ dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
}
}
dc_dmub_srv_notify_idle(dc, false);
- if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
- for (;;) {
- commit_state = dc->hwss.get_idle_state(dc);
- if (commit_state & DMUB_IPS1_COMMIT_MASK)
- break;
-
+ if (prev_driver_signals.bits.allow_ips1) {
+ while (ips_fw->signals.bits.ips1_commit)
udelay(1);
- }
+
}
}
@@ -1361,7 +1384,7 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in
else
result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
- if (result && reallow_idle)
+ if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
@@ -1410,7 +1433,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
- if (result && reallow_idle)
+ if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 811474f4419b..aae2f3a2660d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -827,9 +827,7 @@ struct dc_dsc_config {
uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */
bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */
int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */
-#if defined(CONFIG_DRM_AMD_DC_FP)
bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */
-#endif
bool is_dp; /* indicate if DSC is applied based on DP's capability */
uint32_t mst_pbn; /* pbn of display on dsc mst hub */
const struct dc_dsc_rc_params_override *rc_params_ovrd; /* DM owned memory. If not NULL, apply custom dsc rc params */
@@ -942,6 +940,7 @@ struct dc_crtc_timing {
uint32_t hdmi_vic;
uint32_t rid;
uint32_t fr_index;
+ uint32_t frl_uncompressed_video_bandwidth_in_kbps;
enum dc_timing_3d_format timing_3d_format;
enum dc_color_depth display_color_depth;
enum dc_pixel_encoding pixel_encoding;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index f0458b8f00af..12f3c35b3a34 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -239,27 +239,294 @@ static void check_audio_bandwidth_hdmi(
}
}
}
+static struct fixed31_32 get_link_symbol_clk_freq_mhz(enum dc_link_rate link_rate)
+{
+ switch (link_rate) {
+ case LINK_RATE_LOW:
+ return dc_fixpt_from_int(162); /* 162 MHz */
+ case LINK_RATE_HIGH:
+ return dc_fixpt_from_int(270); /* 270 MHz */
+ case LINK_RATE_HIGH2:
+ return dc_fixpt_from_int(540); /* 540 MHz */
+ case LINK_RATE_HIGH3:
+ return dc_fixpt_from_int(810); /* 810 MHz */
+ case LINK_RATE_UHBR10:
+ return dc_fixpt_from_fraction(3125, 10); /* 312.5 MHz */
+ case LINK_RATE_UHBR13_5:
+ return dc_fixpt_from_fraction(421875, 1000); /* 421.875 MHz */
+ case LINK_RATE_UHBR20:
+ return dc_fixpt_from_int(625); /* 625 MHz */
+ default:
+ /* Unexpected case, this requires debug if encountered. */
+ ASSERT(0);
+ return dc_fixpt_from_int(0);
+ }
+}
+
+struct dp_audio_layout_config {
+ uint8_t layouts_per_sample_denom;
+ uint8_t symbols_per_layout;
+ uint8_t max_layouts_per_audio_sdp;
+};
+
+static void get_audio_layout_config(
+ uint32_t channel_count,
+ enum dp_link_encoding encoding,
+ struct dp_audio_layout_config *output)
+{
+ /* Assuming L-PCM audio. Current implementation uses max 1 layout per SDP,
+ * with each layout being the same size (8ch layout).
+ */
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
+ }
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ }
+ }
+}
-/*For DP SST, calculate if specified sample rates can fit into a given timing */
-static void check_audio_bandwidth_dpsst(
+static uint32_t get_av_stream_map_lane_count(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t av_stream_map_lane_count = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (!is_mst)
+ av_stream_map_lane_count = lane_count;
+ else
+ av_stream_map_lane_count = 4;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ av_stream_map_lane_count = 4;
+ }
+
+ ASSERT(av_stream_map_lane_count != 0);
+
+ return av_stream_map_lane_count;
+}
+
+static uint32_t get_audio_sdp_overhead(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t audio_sdp_overhead = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (is_mst)
+ audio_sdp_overhead = 16; /* 4 * 2 + 8 */
+ else
+ audio_sdp_overhead = lane_count * 2 + 8;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ audio_sdp_overhead = 10; /* 4 x 2.5 */
+ }
+
+ ASSERT(audio_sdp_overhead != 0);
+
+ return audio_sdp_overhead;
+}
+
+static uint32_t calculate_required_audio_bw_in_symbols(
const struct audio_crtc_info *crtc_info,
+ const struct dp_audio_layout_config *layout_config,
uint32_t channel_count,
- union audio_sample_rates *sample_rates)
+ uint32_t sample_rate_hz,
+ uint32_t av_stream_map_lane_count,
+ uint32_t audio_sdp_overhead)
+{
+ /* DP spec recommends between 1.05 to 1.1 safety margin to prevent sample under-run */
+ struct fixed31_32 audio_sdp_margin = dc_fixpt_from_fraction(110, 100);
+ struct fixed31_32 horizontal_line_freq_khz = dc_fixpt_from_fraction(
+ crtc_info->requested_pixel_clock_100Hz, crtc_info->h_total * 10);
+ struct fixed31_32 samples_per_line;
+ struct fixed31_32 layouts_per_line;
+ struct fixed31_32 symbols_per_sdp_max_layout;
+ struct fixed31_32 remainder;
+ uint32_t num_sdp_with_max_layouts;
+ uint32_t required_symbols_per_hblank;
+
+ samples_per_line = dc_fixpt_from_fraction(sample_rate_hz, 1000);
+ samples_per_line = dc_fixpt_div(samples_per_line, horizontal_line_freq_khz);
+ layouts_per_line = dc_fixpt_div_int(samples_per_line, layout_config->layouts_per_sample_denom);
+
+ num_sdp_with_max_layouts = dc_fixpt_floor(
+ dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp));
+ symbols_per_sdp_max_layout = dc_fixpt_from_int(
+ layout_config->max_layouts_per_audio_sdp * layout_config->symbols_per_layout);
+ symbols_per_sdp_max_layout = dc_fixpt_add_int(symbols_per_sdp_max_layout, audio_sdp_overhead);
+ symbols_per_sdp_max_layout = dc_fixpt_mul(symbols_per_sdp_max_layout, audio_sdp_margin);
+ required_symbols_per_hblank = num_sdp_with_max_layouts;
+ required_symbols_per_hblank *= ((dc_fixpt_ceil(symbols_per_sdp_max_layout) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
+
+ if (num_sdp_with_max_layouts != dc_fixpt_ceil(
+ dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp))) {
+ remainder = dc_fixpt_sub_int(layouts_per_line,
+ num_sdp_with_max_layouts * layout_config->max_layouts_per_audio_sdp);
+ remainder = dc_fixpt_mul_int(remainder, layout_config->symbols_per_layout);
+ remainder = dc_fixpt_add_int(remainder, audio_sdp_overhead);
+ remainder = dc_fixpt_mul(remainder, audio_sdp_margin);
+ required_symbols_per_hblank += ((dc_fixpt_ceil(remainder) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
+ }
+
+ return required_symbols_per_hblank;
+}
+
+/* Current calculation only applicable for 8b/10b MST and 128b/132b SST/MST.
+ */
+static uint32_t calculate_available_hblank_bw_in_symbols(
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_dp_link_info *dp_link_info)
{
- /* do nothing */
+ uint64_t hblank = crtc_info->h_total - crtc_info->h_active;
+ struct fixed31_32 hblank_time_msec =
+ dc_fixpt_from_fraction(hblank * 10, crtc_info->requested_pixel_clock_100Hz);
+ struct fixed31_32 lsclkfreq_mhz =
+ get_link_symbol_clk_freq_mhz(dp_link_info->link_rate);
+ struct fixed31_32 average_stream_sym_bw_frac;
+ struct fixed31_32 peak_stream_bw_kbps;
+ struct fixed31_32 bits_per_pixel;
+ struct fixed31_32 link_bw_kbps;
+ struct fixed31_32 available_stream_sym_count;
+ uint32_t available_hblank_bw = 0; /* in stream symbols */
+
+ if (crtc_info->dsc_bits_per_pixel) {
+ bits_per_pixel = dc_fixpt_from_fraction(crtc_info->dsc_bits_per_pixel, 16);
+ } else {
+ switch (crtc_info->color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_pixel = dc_fixpt_from_int(6);
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_pixel = dc_fixpt_from_int(8);
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_pixel = dc_fixpt_from_int(10);
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_pixel = dc_fixpt_from_int(12);
+ break;
+ default:
+ /* Default to commonly supported color depth. */
+ bits_per_pixel = dc_fixpt_from_int(8);
+ break;
+ }
+
+ bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 3);
+
+ if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 3);
+ bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 2);
+ } else if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 2);
+ }
+ }
+
+ /* Use simple stream BW calculation because mainlink overhead is
+ * accounted for separately in the audio BW calculations.
+ */
+ peak_stream_bw_kbps = dc_fixpt_from_fraction(crtc_info->requested_pixel_clock_100Hz, 10);
+ peak_stream_bw_kbps = dc_fixpt_mul(peak_stream_bw_kbps, bits_per_pixel);
+ link_bw_kbps = dc_fixpt_from_int(dp_link_info->link_bandwidth_kbps);
+ average_stream_sym_bw_frac = dc_fixpt_div(peak_stream_bw_kbps, link_bw_kbps);
+
+ available_stream_sym_count = dc_fixpt_mul_int(hblank_time_msec, 1000);
+ available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, lsclkfreq_mhz);
+ available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, average_stream_sym_bw_frac);
+ available_hblank_bw = dc_fixpt_floor(available_stream_sym_count);
+ available_hblank_bw *= dp_link_info->lane_count;
+ available_hblank_bw -= crtc_info->dsc_num_slices * 4; /* EOC overhead */
+
+ if (available_hblank_bw < dp_link_info->hblank_min_symbol_width)
+ available_hblank_bw = dp_link_info->hblank_min_symbol_width;
+
+ if (available_hblank_bw < 12)
+ available_hblank_bw = 0;
+ else
+ available_hblank_bw -= 12; /* Main link overhead */
+
+ return available_hblank_bw;
}
-/*For DP MST, calculate if specified sample rates can fit into a given timing */
-static void check_audio_bandwidth_dpmst(
+static void check_audio_bandwidth_dp(
const struct audio_crtc_info *crtc_info,
+ const struct audio_dp_link_info *dp_link_info,
uint32_t channel_count,
union audio_sample_rates *sample_rates)
{
- /* do nothing */
+ struct dp_audio_layout_config layout_config = {0};
+ uint32_t available_hblank_bw;
+ uint32_t av_stream_map_lane_count;
+ uint32_t audio_sdp_overhead;
+
+ /* TODO: Add validation for SST 8b/10 case */
+ if (!dp_link_info->is_mst && dp_link_info->encoding == DP_8b_10b_ENCODING)
+ return;
+
+ available_hblank_bw = calculate_available_hblank_bw_in_symbols(
+ crtc_info, dp_link_info);
+ av_stream_map_lane_count = get_av_stream_map_lane_count(
+ dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst);
+ audio_sdp_overhead = get_audio_sdp_overhead(
+ dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst);
+ get_audio_layout_config(
+ channel_count, dp_link_info->encoding, &layout_config);
+
+ if (layout_config.max_layouts_per_audio_sdp == 0 ||
+ layout_config.symbols_per_layout == 0 ||
+ layout_config.layouts_per_sample_denom == 0) {
+ return;
+ }
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 192000,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_192 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 176400,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_176_4 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 96000,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_96 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 88200,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_88_2 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 48000,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_48 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 44100,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_44_1 = 0;
+ if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+ crtc_info, &layout_config, channel_count, 32000,
+ av_stream_map_lane_count, audio_sdp_overhead))
+ sample_rates->rate.RATE_32 = 0;
}
static void check_audio_bandwidth(
const struct audio_crtc_info *crtc_info,
+ const struct audio_dp_link_info *dp_link_info,
uint32_t channel_count,
enum signal_type signal,
union audio_sample_rates *sample_rates)
@@ -271,12 +538,9 @@ static void check_audio_bandwidth(
break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_DISPLAY_PORT:
- check_audio_bandwidth_dpsst(
- crtc_info, channel_count, sample_rates);
- break;
case SIGNAL_TYPE_DISPLAY_PORT_MST:
- check_audio_bandwidth_dpmst(
- crtc_info, channel_count, sample_rates);
+ check_audio_bandwidth_dp(
+ crtc_info, dp_link_info, channel_count, sample_rates);
break;
default:
break;
@@ -394,7 +658,8 @@ void dce_aud_az_configure(
struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
- const struct audio_info *audio_info)
+ const struct audio_info *audio_info,
+ const struct audio_dp_link_info *dp_link_info)
{
struct dce_audio *aud = DCE_AUD(audio);
@@ -529,6 +794,7 @@ void dce_aud_az_configure(
check_audio_bandwidth(
crtc_info,
+ dp_link_info,
channel_count,
signal,
&sample_rates);
@@ -588,6 +854,7 @@ void dce_aud_az_configure(
check_audio_bandwidth(
crtc_info,
+ dp_link_info,
8,
signal,
&sample_rate);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
index dbd2cfed0603..539f881928d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -170,7 +170,8 @@ void dce_aud_az_disable(struct audio *audio);
void dce_aud_az_configure(struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
- const struct audio_info *audio_info);
+ const struct audio_info *audio_info,
+ const struct audio_dp_link_info *dp_link_info);
void dce_aud_wall_dto_setup(struct audio *audio,
enum signal_type signal,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index e8570060d007..5bca67407c5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -290,4 +290,5 @@ void dce_panel_cntl_construct(
dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs;
dce_panel_cntl->base.ctx = init_data->ctx;
dce_panel_cntl->base.inst = init_data->inst;
+ dce_panel_cntl->base.pwrseq_inst = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index ba1fec3016d5..bf636b28e3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -65,5 +65,9 @@ bool should_use_dmub_lock(struct dc_link *link)
{
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
return true;
+
+ if (link->replay_settings.replay_feature_enabled)
+ return true;
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index 38e4797e9476..b010814706fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -258,7 +258,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
*residency = 0;
}
-/**
+/*
* Set REPLAY power optimization flags and coasting vtotal.
*/
static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
@@ -280,7 +280,7 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
-/**
+/*
* send Replay general cmd to DMUB.
*/
static void dmub_replay_send_cmd(struct dmub_replay *dmub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 3538973bd0c6..b7e57aa27361 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -62,6 +62,26 @@ void cm_helper_program_color_matrices(
}
+void cm_helper_read_color_matrices(struct dc_context *ctx,
+ uint16_t *regval,
+ const struct color_matrices_reg *reg)
+{
+ uint32_t cur_csc_reg, regval0, regval1;
+ unsigned int i = 0;
+
+ for (cur_csc_reg = reg->csc_c11_c12;
+ cur_csc_reg <= reg->csc_c33_c34; cur_csc_reg++) {
+ REG_GET_2(cur_csc_reg,
+ csc_c11, &regval0,
+ csc_c12, &regval1);
+
+ regval[2 * i] = regval0;
+ regval[(2 * i) + 1] = regval1;
+
+ i++;
+ }
+}
+
void cm_helper_program_xfer_func(
struct dc_context *ctx,
const struct pwl_params *params,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 0a68b63d6126..decc50b1ac53 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -114,5 +114,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params);
-
+void cm_helper_read_color_matrices(struct dc_context *ctx,
+ uint16_t *regval,
+ const struct color_matrices_reg *reg);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index ef52e6b6eccf..4e391fd1d71c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -543,7 +543,8 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
.dpp_program_blnd_lut = NULL,
.dpp_program_shaper_lut = NULL,
- .dpp_program_3dlut = NULL
+ .dpp_program_3dlut = NULL,
+ .dpp_get_gamut_remap = dpp1_cm_get_gamut_remap,
};
static struct dpp_caps dcn10_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index c9e045666dcc..a039eedc7c24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1521,4 +1521,7 @@ void dpp1_construct(struct dcn10_dpp *dpp1,
const struct dcn_dpp_registers *tf_regs,
const struct dcn_dpp_shift *tf_shift,
const struct dcn_dpp_mask *tf_mask);
+
+void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 904c2d278998..2f994a3a0b9c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -98,7 +98,7 @@ static void program_gamut_remap(
if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
- CM_GAMUT_REMAP_MODE, 0);
+ CM_GAMUT_REMAP_MODE, 0);
return;
}
switch (select) {
@@ -181,6 +181,74 @@ void dpp1_cm_set_gamut_remap(
}
}
+static void read_gamut_remap(struct dcn10_dpp *dpp,
+ uint16_t *regval,
+ enum gamut_remap_select *select)
+{
+ struct color_matrices_reg gam_regs;
+ uint32_t selection;
+
+ REG_GET(CM_GAMUT_REMAP_CONTROL,
+ CM_GAMUT_REMAP_MODE, &selection);
+
+ *select = selection;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (*select == GAMUT_REMAP_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_read_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == GAMUT_REMAP_COMA_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
+
+ cm_helper_read_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == GAMUT_REMAP_COMB_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
+
+ cm_helper_read_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+}
+
+void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ uint16_t arr_reg_val[12];
+ enum gamut_remap_select select;
+
+ read_gamut_remap(dpp, arr_reg_val, &select);
+
+ if (select == GAMUT_REMAP_BYPASS) {
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ return;
+ }
+
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ convert_hw_matrix(adjust->temperature_matrix,
+ arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
+
static void dpp1_cm_program_color_matrix(
struct dcn10_dpp *dpp,
const uint16_t *regval)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 0dec57679269..48a40dcc7050 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -23,6 +23,7 @@
*
*/
+#include "core_types.h"
#include "dm_services.h"
#include "dcn10_opp.h"
#include "reg_helper.h"
@@ -160,6 +161,9 @@ static void opp1_set_pixel_encoding(
struct dcn10_opp *oppn10,
const struct clamping_and_pixel_encoding_params *params)
{
+ bool force_chroma_subsampling_1tap =
+ oppn10->base.ctx->dc->debug.force_chroma_subsampling_1tap;
+
switch (params->pixel_encoding) {
case PIXEL_ENCODING_RGB:
@@ -178,6 +182,9 @@ static void opp1_set_pixel_encoding(
default:
break;
}
+
+ if (force_chroma_subsampling_1tap)
+ REG_UPDATE(FMT_CONTROL, FMT_SUBSAMPLING_MODE, 0);
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index eaa7032f0f1a..1516c0a48726 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -55,21 +55,23 @@ void dpp20_read_state(struct dpp *dpp_base,
REG_GET(DPP_CONTROL,
DPP_CLOCK_ENABLE, &s->is_enabled);
+
+ // Degamma LUT (RAM)
REG_GET(CM_DGAM_CONTROL,
- CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
- // BGAM has no ROM, and definition is different, can't reuse same dump
- //REG_GET(CM_BLNDGAM_CONTROL,
- // CM_BLNDGAM_LUT_MODE, &s->rgam_lut_mode);
- REG_GET(CM_GAMUT_REMAP_CONTROL,
- CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode);
- if (s->gamut_remap_mode) {
- s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12);
- s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14);
- s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22);
- s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24);
- s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32);
- s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34);
- }
+ CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
+
+ // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size)
+ REG_GET(CM_SHAPER_CONTROL,
+ CM_SHAPER_LUT_MODE, &s->shaper_lut_mode);
+ REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL,
+ CM_3DLUT_CONFIG_STATUS, &s->lut3d_mode,
+ CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+ REG_GET(CM_3DLUT_MODE,
+ CM_3DLUT_SIZE, &s->lut3d_size);
+
+ // Blend/Out Gamma (RAM)
+ REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK,
+ CM_BLNDGAM_CONFIG_STATUS, &s->rgam_lut_mode);
}
void dpp2_power_on_obuf(
@@ -393,6 +395,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap,
};
static struct dpp_caps dcn20_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
index e735363d0051..672cde46c4b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
@@ -775,4 +775,7 @@ bool dpp2_construct(struct dcn20_dpp *dpp2,
void dpp2_power_on_obuf(
struct dpp *dpp_base,
bool power_on);
+
+void dpp2_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust);
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
index 598caa508d43..58dc69926e8a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
@@ -234,6 +234,61 @@ void dpp2_cm_set_gamut_remap(
}
}
+static void read_gamut_remap(struct dcn20_dpp *dpp,
+ uint16_t *regval,
+ enum dcn20_gamut_remap_select *select)
+{
+ struct color_matrices_reg gam_regs;
+ uint32_t selection;
+
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &selection);
+
+ *select = selection;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (*select == DCN2_GAMUT_REMAP_COEF_A) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_read_color_matrices(dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == DCN2_GAMUT_REMAP_COEF_B) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+
+ cm_helper_read_color_matrices(dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+}
+
+void dpp2_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ uint16_t arr_reg_val[12];
+ enum dcn20_gamut_remap_select select;
+
+ read_gamut_remap(dpp, arr_reg_val, &select);
+
+ if (select == DCN2_GAMUT_REMAP_BYPASS) {
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ return;
+ }
+
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ convert_hw_matrix(adjust->temperature_matrix,
+ arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
+
void dpp2_program_input_csc(
struct dpp *dpp_base,
enum dc_color_space color_space,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 5da6e44f284a..16b5ff208d14 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -542,8 +542,30 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
return NULL;
}
+static void mpc2_read_mpcc_state(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct mpcc_state *s)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+
+ REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id);
+ REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id);
+ REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id);
+ REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode,
+ MPCC_ALPHA_BLND_MODE, &s->alpha_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only);
+ REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle,
+ MPCC_BUSY, &s->busy);
+
+ /* Gamma block state */
+ REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_inst],
+ MPCC_OGAM_CONFIG_STATUS, &s->rgam_mode);
+}
+
static const struct mpc_funcs dcn20_mpc_funcs = {
- .read_mpcc_state = mpc1_read_mpcc_state,
+ .read_mpcc_state = mpc2_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc1_mpc_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
index a7268027a472..f809a7d21033 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
@@ -275,6 +275,7 @@ static struct dpp_funcs dcn201_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap,
};
static struct dpp_caps dcn201_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 11f7746f3a65..a3a769aad042 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -44,12 +44,45 @@
void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)
{
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ uint32_t gamcor_lut_mode, rgam_lut_mode;
REG_GET(DPP_CONTROL,
- DPP_CLOCK_ENABLE, &s->is_enabled);
+ DPP_CLOCK_ENABLE, &s->is_enabled);
+
+ // Pre-degamma (ROM)
+ REG_GET_2(PRE_DEGAM,
+ PRE_DEGAM_MODE, &s->pre_dgam_mode,
+ PRE_DEGAM_SELECT, &s->pre_dgam_select);
+
+ // Gamma Correction (RAM)
+ REG_GET(CM_GAMCOR_CONTROL,
+ CM_GAMCOR_MODE_CURRENT, &s->gamcor_mode);
+ if (s->gamcor_mode) {
+ REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &gamcor_lut_mode);
+ if (!gamcor_lut_mode)
+ s->gamcor_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B
+ }
- // TODO: Implement for DCN3
+ // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size)
+ REG_GET(CM_SHAPER_CONTROL,
+ CM_SHAPER_LUT_MODE, &s->shaper_lut_mode);
+ REG_GET(CM_3DLUT_MODE,
+ CM_3DLUT_MODE_CURRENT, &s->lut3d_mode);
+ REG_GET(CM_3DLUT_READ_WRITE_CONTROL,
+ CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+ REG_GET(CM_3DLUT_MODE,
+ CM_3DLUT_SIZE, &s->lut3d_size);
+
+ // Blend/Out Gamma (RAM)
+ REG_GET(CM_BLNDGAM_CONTROL,
+ CM_BLNDGAM_MODE_CURRENT, &s->rgam_lut_mode);
+ if (s->rgam_lut_mode){
+ REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &rgam_lut_mode);
+ if (!rgam_lut_mode)
+ s->rgam_lut_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B
+ }
}
+
/*program post scaler scs block in dpp CM*/
void dpp3_program_post_csc(
struct dpp *dpp_base,
@@ -1462,6 +1495,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
index cea3208e4ab1..2ac8045a87a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
@@ -637,4 +637,6 @@ void dpp3_program_cm_dealpha(
struct dpp *dpp_base,
uint32_t enable, uint32_t additive_blending);
+void dpp3_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust);
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index 5f97a868ada3..2f5b3fbd3507 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -405,3 +405,57 @@ void dpp3_cm_set_gamut_remap(
program_gamut_remap(dpp, arr_reg_val, gamut_mode);
}
}
+
+static void read_gamut_remap(struct dcn3_dpp *dpp,
+ uint16_t *regval,
+ int *select)
+{
+ struct color_matrices_reg gam_regs;
+ uint32_t selection;
+
+ //current coefficient set in use
+ REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &selection);
+
+ *select = selection;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (*select == GAMUT_REMAP_COEFF) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_read_color_matrices(dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == GAMUT_REMAP_COMA_COEFF) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+
+ cm_helper_read_color_matrices(dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+}
+
+void dpp3_cm_get_gamut_remap(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ uint16_t arr_reg_val[12];
+ int select;
+
+ read_gamut_remap(dpp, arr_reg_val, &select);
+
+ if (select == GAMUT_REMAP_BYPASS) {
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ return;
+ }
+
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ convert_hw_matrix(adjust->temperature_matrix,
+ arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index d1500b223858..bf3386cd444d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -1129,6 +1129,64 @@ void mpc3_set_gamut_remap(
}
}
+static void read_gamut_remap(struct dcn30_mpc *mpc30,
+ int mpcc_id,
+ uint16_t *regval,
+ uint32_t *select)
+{
+ struct color_matrices_reg gam_regs;
+
+ //current coefficient set in use
+ REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, select);
+
+ gam_regs.shifts.csc_c11 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C11_A;
+ gam_regs.masks.csc_c11 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C11_A;
+ gam_regs.shifts.csc_c12 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C12_A;
+ gam_regs.masks.csc_c12 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C12_A;
+
+ if (*select == GAMUT_REMAP_COEFF) {
+ gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]);
+ gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]);
+
+ cm_helper_read_color_matrices(
+ mpc30->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (*select == GAMUT_REMAP_COMA_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]);
+ gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]);
+
+ cm_helper_read_color_matrices(
+ mpc30->base.ctx,
+ regval,
+ &gam_regs);
+
+ }
+
+}
+
+void mpc3_get_gamut_remap(struct mpc *mpc,
+ int mpcc_id,
+ struct mpc_grph_gamut_adjustment *adjust)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ uint16_t arr_reg_val[12];
+ int select;
+
+ read_gamut_remap(mpc30, mpcc_id, arr_reg_val, &select);
+
+ if (select == GAMUT_REMAP_BYPASS) {
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ return;
+ }
+
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ convert_hw_matrix(adjust->temperature_matrix,
+ arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
+
bool mpc3_program_3dlut(
struct mpc *mpc,
const struct tetrahedral_params *params,
@@ -1382,8 +1440,54 @@ static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc)
}
}
+static void mpc3_read_mpcc_state(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct mpcc_state *s)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ uint32_t rmu_status = 0xf;
+
+ REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id);
+ REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id);
+ REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id);
+ REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode,
+ MPCC_ALPHA_BLND_MODE, &s->alpha_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only);
+ REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle,
+ MPCC_BUSY, &s->busy);
+
+ /* Color blocks state */
+ REG_GET(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, &rmu_status);
+
+ if (rmu_status == mpcc_inst) {
+ REG_GET(SHAPER_CONTROL[0],
+ MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode);
+ REG_GET(RMU_3DLUT_MODE[0],
+ MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode);
+ REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[0],
+ MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+ REG_GET(RMU_3DLUT_MODE[0],
+ MPC_RMU_3DLUT_SIZE, &s->lut3d_size);
+ } else {
+ REG_GET(SHAPER_CONTROL[1],
+ MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode);
+ REG_GET(RMU_3DLUT_MODE[1],
+ MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode);
+ REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[1],
+ MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+ REG_GET(RMU_3DLUT_MODE[1],
+ MPC_RMU_3DLUT_SIZE, &s->lut3d_size);
+ }
+
+ REG_GET_2(MPCC_OGAM_CONTROL[mpcc_inst],
+ MPCC_OGAM_MODE_CURRENT, &s->rgam_mode,
+ MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut);
+}
+
static const struct mpc_funcs dcn30_mpc_funcs = {
- .read_mpcc_state = mpc1_read_mpcc_state,
+ .read_mpcc_state = mpc3_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc1_mpc_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
index 5198f2167c7c..9cb96ae95a2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
@@ -1056,6 +1056,10 @@ void mpc3_set_gamut_remap(
int mpcc_id,
const struct mpc_grph_gamut_adjustment *adjust);
+void mpc3_get_gamut_remap(struct mpc *mpc,
+ int mpcc_id,
+ struct mpc_grph_gamut_adjustment *adjust);
+
void mpc3_set_rmu_mux(
struct mpc *mpc,
int rmu_idx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
index ad0df1a72a90..9e96a3ace207 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
@@ -215,4 +215,5 @@ void dcn301_panel_cntl_construct(
dcn301_panel_cntl->base.funcs = &dcn301_link_panel_cntl_funcs;
dcn301_panel_cntl->base.ctx = init_data->ctx;
dcn301_panel_cntl->base.inst = init_data->inst;
+ dcn301_panel_cntl->base.pwrseq_inst = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 03248422d6ff..281be20b1a10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -154,8 +154,24 @@ void dcn31_panel_cntl_construct(
struct dcn31_panel_cntl *dcn31_panel_cntl,
const struct panel_cntl_init_data *init_data)
{
+ uint8_t pwrseq_inst = 0xF;
+
dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;
dcn31_panel_cntl->base.ctx = init_data->ctx;
dcn31_panel_cntl->base.inst = init_data->inst;
- dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst;
+
+ switch (init_data->eng_id) {
+ case ENGINE_ID_DIGA:
+ pwrseq_inst = 0;
+ break;
+ case ENGINE_ID_DIGB:
+ pwrseq_inst = 1;
+ break;
+ default:
+ DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id);
+ ASSERT(false);
+ break;
+ }
+
+ dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index d761b0df2878..e224a028d68a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -34,6 +34,7 @@
#include "dc_bios_types.h"
#include "link_enc_cfg.h"
+#include "dc_dmub_srv.h"
#include "gpio_service_interface.h"
#ifndef MIN
@@ -61,6 +62,38 @@
#define AUX_REG_WRITE(reg_name, val) \
dm_write_reg(CTX, AUX_REG(reg_name), val)
+static uint8_t phy_id_from_transmitter(enum transmitter t)
+{
+ uint8_t phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ phy_id = 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ phy_id = 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ phy_id = 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ phy_id = 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ phy_id = 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ phy_id = 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ phy_id = 6;
+ break;
+ default:
+ phy_id = 0;
+ break;
+ }
+ return phy_id;
+}
void enc32_hw_init(struct link_encoder *enc)
{
@@ -117,38 +150,50 @@ void dcn32_link_encoder_enable_dp_output(
}
}
-static bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+static bool query_dp_alt_from_dmub(struct link_encoder *enc,
+ union dmub_rb_cmd *cmd)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t dp_alt_mode_disable = 0;
- bool is_usb_c_alt_mode = false;
- if (enc->features.flags.bits.DP_IS_USB_C) {
- /* if value == 1 alt mode is disabled, otherwise it is enabled */
- REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
- is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
- }
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS;
+ cmd->query_dp_alt.header.sub_type =
+ DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
+ cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
+ cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
+
+ if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return false;
- return is_usb_c_alt_mode;
+ return true;
}
-static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
+bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!query_dp_alt_from_dmub(enc, &cmd))
+ return false;
+
+ return (cmd.query_dp_alt.data.is_dp_alt_disable == 0);
+}
+
+void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings)
{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t is_in_usb_c_dp4_mode = 0;
+ union dmub_rb_cmd cmd;
dcn10_link_encoder_get_max_link_cap(enc, link_settings);
- /* in usb c dp2 mode, max lane count is 2 */
- if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
- REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
- if (!is_in_usb_c_dp4_mode)
- link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
- }
+ if (!query_dp_alt_from_dmub(enc, &cmd))
+ return;
+ if (cmd.query_dp_alt.data.is_usb &&
+ cmd.query_dp_alt.data.is_dp4 == 0)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
}
+
static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
@@ -203,13 +248,15 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
-
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
+
enc10->base.transmitter = init_data->transmitter;
/* set the flag to indicate whether driver poll the I2C data pin
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
index bbcfce06bec0..2d5f25290ed1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
@@ -53,4 +53,9 @@ void dcn32_link_encoder_enable_dp_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
+bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc);
+
+void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
#endif /* __DC_LINK_ENCODER__DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
index dcf12a0b031c..681e75c6dbaf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
@@ -133,6 +133,7 @@ static struct dpp_funcs dcn32_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h
index 1212fcee38f2..499052329ebb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h
@@ -28,6 +28,7 @@
#include "dcn30/dcn30_vpg.h"
#include "dcn30/dcn30_afmt.h"
#include "stream_encoder.h"
+#include "dcn10/dcn10_link_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
/* Register bit field name change */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 4229369c57f4..f4d3f04ec857 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -26,6 +26,9 @@
#ifndef DM_CP_PSP_IF__H
#define DM_CP_PSP_IF__H
+/*
+ * Interface to CPLIB/PSP to enable ASSR
+ */
struct dc_link;
struct cp_psp_stream_config {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 59ade76ffb18..c4a5efd2dda5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -92,6 +92,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn35/dcn35_fpu.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn351/dcn351_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
@@ -126,6 +127,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn35/dcn35_fpu.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn351/dcn351_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_rcflags)
@@ -157,6 +159,7 @@ DML += dcn302/dcn302_fpu.o
DML += dcn303/dcn303_fpu.o
DML += dcn314/dcn314_fpu.o
DML += dcn35/dcn35_fpu.o
+DML += dcn351/dcn351_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 63c48c29ba49..e7f4a2d491cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -4273,7 +4273,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Calculate Swath, DET Configuration, DCFCLKDeepSleep
//
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->RequiredDPPCLKThisState[k] = v->RequiredDPPCLK[i][j][k];
@@ -4576,7 +4576,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Calculate Return BW
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->BlendingAndTiming[k] == k) {
@@ -4635,7 +4635,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->UrgentOutOfOrderReturnPerChannelVMDataOnly);
v->FinalDRAMClockChangeLatency = (v->DRAMClockChangeLatencyOverride > 0 ? v->DRAMClockChangeLatencyOverride : v->DRAMClockChangeLatency);
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->DCFCLKState[i][j] = v->DCFCLKPerState[i];
}
@@ -4646,7 +4646,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (v->ClampMinDCFCLK) {
/* Clamp calculated values to actual minimum */
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if (v->DCFCLKState[i][j] < mode_lib->soc.min_dcfclk) {
v->DCFCLKState[i][j] = mode_lib->soc.min_dcfclk;
@@ -4656,7 +4656,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->IdealSDPPortBandwidthPerState[i][j] = dml_min3(
v->ReturnBusWidth * v->DCFCLKState[i][j],
@@ -4674,7 +4674,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Re-ordering Buffer Support Check
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if ((v->ROBBufferSizeInKByte - v->PixelChunkSizeInKByte) * 1024 / v->ReturnBWPerState[i][j]
> (v->RoundTripPingLatencyCycles + 32) / v->DCFCLKState[i][j] + ReorderingBytes / v->ReturnBWPerState[i][j]) {
@@ -4692,7 +4692,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];
}
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
v->MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min(
v->IdealSDPPortBandwidthPerState[i][j] * v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100,
@@ -4708,7 +4708,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Prefetch Check
- for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (i = start_state; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
int NextPrefetchModeState = MinPrefetchMode;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
index 3eb3a021ab7d..3f02bb806d42 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
@@ -266,6 +266,17 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
+ } else {
+ /* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]):
+ * This is required for dcn303 because it just so happens that the memory
+ * bandwidth is low enough such that all the optimal DCFCLK for each UCLK
+ * is lower than the smallest DCFCLK STA target. In this case we need to
+ * populate the optimal UCLK for each DCFCLK STA target to be the max UCLK.
+ */
+ if (j == num_uclk_states - 1) {
+ optimal_uclk_for_dcfclk_sta_targets[i] =
+ bw_params->clk_table.entries[j].memclk_mhz * 16;
+ }
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index a0a65e099104..b49e1dc9d8ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -623,7 +623,6 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* - Not TMZ surface
*/
if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
- !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 7ea2bd5374d5..80bebfc268db 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -583,12 +583,14 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
plane_count++;
}
- if (plane_count == 0) {
+ if (context->stream_count == 0 || plane_count == 0) {
support = DCN_ZSTATE_SUPPORT_ALLOW;
- } else if (plane_count == 1 && context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ } else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
bool is_pwrseq0 = link && link->link_index == 0;
- bool is_psr1 = link && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr;
+ bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr);
+ bool is_replay = link && link->replay_settings.replay_feature_enabled;
int minmum_z8_residency =
dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
@@ -596,12 +598,14 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
dc->debug.minimum_z10_residency_time > 0 ? dc->debug.minimum_z10_residency_time : 5000;
bool allow_z10 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z10_residency;
+ /*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/
if (is_pwrseq0 && allow_z10)
support = DCN_ZSTATE_SUPPORT_ALLOW;
- else if (is_pwrseq0 && is_psr1)
+ else if (is_pwrseq0 && (is_psr || is_replay))
support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
else if (allow_z8)
support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
+
}
context->bw_ctx.bw.dcn.clk.zstate_support = support;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
new file mode 100644
index 000000000000..dc9e1b758ed6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -0,0 +1,574 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+#include "resource.h"
+#include "dcn351_fpu.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn32/dcn32_resource.h"
+#include "dcn35/dcn35_resource.h"
+#include "dcn351/dcn351_resource.h"
+#include "dml/dcn31/dcn31_fpu.h"
+#include "dml/dcn35/dcn35_fpu.h"
+#include "dml/dml_inline_defs.h"
+
+#include "link.h"
+
+#define DC_LOGGER_INIT(logger)
+
+struct _vcs_dpi_ip_params_st dcn3_51_ip = {
+ .VBlankNomDefaultUS = 668,
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = 1536,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,/*not used*/
+ .opp_output_buffer_lines = 1,/*not used*/
+ .pixel_chunk_size_kbytes = 8,
+ //.alpha_pixel_chunk_size_kbytes = 4;/*new*/
+ //.min_pixel_chunk_size_bytes = 1024;/*new*/
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 4,
+ .maximum_dsc_bits_per_component = 12,/*delta from 10*/
+ .dsc422_native_support = true,/*delta from false*/
+ .is_line_buffer_bpp_fixed = true,/*new*/
+ .line_buffer_fixed_bpp = 32,/*delta from 48*/
+ .line_buffer_size_bits = 986880,/*delta from 789504*/
+ .max_line_buffer_lines = 32,/*delta from 12*/
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ /*.max_num_hdmi_frl_outputs = 1; new in dml2*/
+ /*.max_num_dp2p0_outputs = 2; new in dml2*/
+ /*.max_num_dp2p0_streams = 4; new in dml2*/
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 68,/*changed from 64,*/
+ .dpte_buffer_size_in_pte_reqs_chroma = 36,/*changed from 34*/
+ /*.dcc_meta_buffer_size_bytes = 6272; new to dml2*/
+ .dispclk_ramp_margin_percent = 1.11,/*delta from 1*/
+ /*.dppclk_delay_subtotal = 47;
+ .dppclk_delay_scl = 50;
+ .dppclk_delay_scl_lb_only = 16;
+ .dppclk_delay_cnvc_formatter = 28;
+ .dppclk_delay_cnvc_cursor = 6;
+ .dispclk_delay_subtotal = 125;*/ /*new to dml2*/
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 47, /* changed from 46,*/
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 28,/*changed from 27,*/
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 125, /*changed from 119,*/
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+// .config_return_buffer_segment_size_in_kbytes = 64;/*required, hard coded in dml2_translate_ip_params*/
+
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 186.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 1,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 2,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 3,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 371.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 4,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 600.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .sr_exit_z8_time_us = 210.0,
+ .sr_enter_plus_exit_z8_time_us = 320.0,
+ .fclk_change_latency_us = 24.0,
+ .usr_retraining_latency_us = 2,
+ .writeback_latency_us = 12.0,
+
+ .dram_channel_width_bytes = 4,/*not exist in dml2*/
+ .round_trip_ping_latency_dcfclk_cycles = 106,/*not exist in dml2*/
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .dram_clock_change_latency_us = 11.72,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_fabric_bw_after_urgent = 80.0, /*new to dml2*/
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = 0,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+/*
+ * dcn351_update_bw_bounding_box
+ *
+ * This would override some dcn3_51 ip_or_soc initial parameters hardcoded from
+ * spreadsheet with actual values as per dGPU SKU:
+ * - with passed few options from dc->config
+ * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might
+ * need to get it from PM FW)
+ * - with passed latency values (passed in ns units) in dc-> bb override for
+ * debugging purposes
+ * - with passed latencies from VBIOS (in 100_ns units) if available for
+ * certain dGPU SKU
+ * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU
+ * of the same ASIC)
+ * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM
+ * FW for different clocks (which might differ for certain dGPU SKU of the
+ * same ASIC)
+ */
+void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
+ struct clk_bw_params *bw_params)
+{
+ unsigned int i, closest_clk_lvl;
+ int j;
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st *clock_limits =
+ dc->scratch.update_bw_bounding_box.clock_limits;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+
+ dc_assert_fp_enabled();
+
+ dcn3_51_ip.max_num_otg =
+ dc->res_pool->res_cap->num_timing_generator;
+ dcn3_51_ip.max_num_dpp = dc->res_pool->pipe_count;
+ dcn3_51_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_51_soc.num_states - 1;
+ j >= 0; j--) {
+ if (dcn3_51_soc.clock_limits[j].dcfclk_mhz <=
+ clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_51_soc.num_states - 1;
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ clock_limits[i].dcfclk_mhz <
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ clock_limits[i].dcfclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ }
+
+ clock_limits[i].fabricclk_mhz =
+ clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz =
+ clk_table->entries[i].socclk_mhz;
+
+ if (clk_table->entries[i].memclk_mhz &&
+ clk_table->entries[i].wck_ratio)
+ clock_limits[i].dram_speed_mts =
+ clk_table->entries[i].memclk_mhz * 2 *
+ clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ?
+ max_dispclk_mhz :
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ?
+ max_dppclk_mhz :
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+
+ memcpy(dcn3_51_soc.clock_limits, clock_limits,
+ sizeof(dcn3_51_soc.clock_limits));
+
+ if (clk_table->num_entries)
+ dcn3_51_soc.num_states = clk_table->num_entries;
+
+ if (max_dispclk_mhz) {
+ dcn3_51_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ }
+ if ((int)(dcn3_51_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_51_soc.dram_clock_change_latency_us =
+ dc->debug.dram_clock_change_latency_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns > 0)
+ dcn3_51_soc.dram_clock_change_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_time_ns > 0)
+ dcn3_51_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0)
+ dcn3_51_soc.sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_z8_time_ns > 0)
+ dcn3_51_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0)
+ dcn3_51_soc.sr_enter_plus_exit_z8_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+
+ /*temp till dml2 fully work without dml1*/
+ dml_init_instance(&dc->dml, &dcn3_51_soc, &dcn3_51_ip,
+ DML_PROJECT_DCN31);
+
+ /*copy to dml2, before dml2_create*/
+ if (clk_table->num_entries > 2) {
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ dc->dml2_options.bbox_overrides.clks_table.num_states =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
+ clock_limits[i].dcfclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
+ clock_limits[i].fabricclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
+ clock_limits[i].dispclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
+ clock_limits[i].dppclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
+ clock_limits[i].socclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
+ clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
+ clk_table->num_entries;
+ }
+ }
+
+ /* Update latency values */
+ dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = dcn3_51_soc.dram_clock_change_latency_us;
+
+ dc->dml2_options.bbox_overrides.sr_exit_latency_us = dcn3_51_soc.sr_exit_time_us;
+ dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = dcn3_51_soc.sr_enter_plus_exit_time_us;
+
+ dc->dml2_options.bbox_overrides.sr_exit_z8_time_us = dcn3_51_soc.sr_exit_z8_time_us;
+ dc->dml2_options.bbox_overrides.sr_enter_plus_exit_z8_time_us = dcn3_51_soc.sr_enter_plus_exit_z8_time_us;
+}
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
+ format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+/*
+ * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing
+ *
+ * @param: num_us: number of microseconds
+ * @return: number of vertical lines. If exact number of vertical lines is not found then
+ * it will round up to next number of lines to guarantee num_us
+ */
+static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing)
+{
+ unsigned int num_lines = 0;
+ unsigned int lines_time_in_ns = 1000.0 *
+ (((float)timing->h_total * 1000.0) /
+ ((float)timing->pix_clk_100hz / 10.0));
+
+ num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0);
+
+ return num_lines;
+}
+
+static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
+{
+ unsigned int v_active = 0, v_blank = 0, v_back_porch = 0;
+
+ v_active = timing->v_border_top + timing->v_addressable + timing->v_border_bottom;
+ v_blank = timing->v_total - v_active;
+ v_back_porch = v_blank - timing->v_front_porch - timing->v_sync_width;
+
+ return v_back_porch;
+}
+
+int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+ bool upscaled = false;
+ const unsigned int max_allowed_vblank_nom = 1023;
+
+ dcn31_populate_dml_pipes_from_context(dc, context, pipes,
+ fast_validate);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+ unsigned int num_lines = 0;
+ unsigned int v_back_porch = 0;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ num_lines = micro_sec_to_vert_lines(dcn3_51_ip.VBlankNomDefaultUS, timing);
+ v_back_porch = get_vertical_back_porch(timing);
+
+ if (pipe->stream->adjust.v_total_max ==
+ pipe->stream->adjust.v_total_min &&
+ pipe->stream->adjust.v_total_min > timing->v_total) {
+ pipes[pipe_cnt].pipe.dest.vtotal =
+ pipe->stream->adjust.v_total_min;
+ pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total -
+ pipes[pipe_cnt].pipe.dest.vactive;
+ }
+
+ pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
+ pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines);
+ // vblank_nom should not smaller than (VSync (timing->v_sync_width + v_back_porch) + 2)
+ // + 2 is because
+ // 1 -> VStartup_start should be 1 line before VSync
+ // 1 -> always reserve 1 line between start of vblank to vstartup signal
+ pipes[pipe_cnt].pipe.dest.vblank_nom =
+ max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width + v_back_porch + 2);
+ pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
+
+ if (pipe->plane_state &&
+ (pipe->plane_state->src_rect.height <
+ pipe->plane_state->dst_rect.height ||
+ pipe->plane_state->src_rect.width <
+ pipe->plane_state->dst_rect.width))
+ upscaled = true;
+
+ /*
+ * Immediate flip can be set dynamically after enabling the
+ * plane. We need to require support for immediate flip or
+ * underflow can be intermittently experienced depending on peak
+ * b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
+ pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+
+ DC_FP_START();
+ dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
+ DC_FP_END();
+
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ pipes[pipe_cnt].pipe.src.dcc_rate = 3;
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+ pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256;
+
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ pipe_cnt++;
+ }
+
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 384;/*per guide*/
+ dc->config.enable_4to1MPC = false;
+
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 &&
+ pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format) &&
+ pipe->plane_state->src_rect.width <= 5120) {
+ /*
+ * Limit to 5k max to avoid forced pipe split when there
+ * is not enough detile for swath
+ */
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ } else if (context->stream_count >=
+ dc->debug.crb_alloc_policy_min_disp_count &&
+ dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes =
+ dc->debug.crb_alloc_policy * 64;
+ } else if (context->stream_count >= 3 && upscaled) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->stream->signal == SIGNAL_TYPE_EDP &&
+ dc->debug.seamless_boot_odm_combine &&
+ pipe->stream->apply_seamless_boot_optimization) {
+
+ if (pipe->stream->apply_boot_odm_mode ==
+ dm_odm_combine_policy_2to1) {
+ context->bw_ctx.dml.vba.ODMCombinePolicy =
+ dm_odm_combine_policy_2to1;
+ break;
+ }
+ }
+ }
+
+ return pipe_cnt;
+}
+
+void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context)
+{
+ enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW;
+ unsigned int i, plane_count = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ plane_count++;
+ }
+ /*dcn351 does not support z9/z10*/
+ if (context->stream_count == 0 || plane_count == 0) {
+ support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
+ } else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ struct dc_link *link = context->streams[0]->sink->link;
+ bool is_pwrseq0 = link && link->link_index == 0;
+ bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr);
+ bool is_replay = link && link->replay_settings.replay_feature_enabled;
+ int minmum_z8_residency =
+ dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
+ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
+
+
+ /*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/
+ if (is_pwrseq0 && (is_psr || is_replay))
+ support = allow_z8 ? allow_z8 : DCN_ZSTATE_SUPPORT_DISALLOW;
+
+ }
+ context->bw_ctx.bw.dcn.clk.zstate_support = support;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
new file mode 100644
index 000000000000..f93efab9a668
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef __DCN351_FPU_H__
+#define __DCN351_FPU_H__
+
+#include "clk_mgr.h"
+
+void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
+ struct clk_bw_params *bw_params);
+
+int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate);
+
+void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index 0baf39d64a2d..a52c594e1ba4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -141,14 +141,33 @@ static unsigned int find_pipes_assigned_to_plane(struct dml2_context *ctx,
{
int i;
unsigned int num_found = 0;
- unsigned int plane_id_assigned_to_pipe;
+ unsigned int plane_id_assigned_to_pipe = -1;
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
- if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(ctx, state, state->res_ctx.pipe_ctx[i].plane_state,
- state->res_ctx.pipe_ctx[i].stream->stream_id,
- ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id_assigned_to_pipe)) {
- if (plane_id_assigned_to_pipe == plane_id)
- pipes[num_found++] = i;
+ struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state || !pipe->stream)
+ continue;
+
+ get_plane_id(ctx, state, pipe->plane_state, pipe->stream->stream_id,
+ ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[pipe->pipe_idx],
+ &plane_id_assigned_to_pipe);
+ if (plane_id_assigned_to_pipe == plane_id && !pipe->prev_odm_pipe
+ && (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) {
+ while (pipe) {
+ struct pipe_ctx *mpc_pipe = pipe;
+
+ while (mpc_pipe) {
+ pipes[num_found++] = mpc_pipe->pipe_idx;
+ mpc_pipe = mpc_pipe->bottom_pipe;
+ if (!mpc_pipe)
+ break;
+ if (mpc_pipe->plane_state != pipe->plane_state)
+ mpc_pipe = NULL;
+ }
+ pipe = pipe->next_odm_pipe;
+ }
+ break;
}
}
@@ -566,8 +585,14 @@ static unsigned int find_pipes_assigned_to_stream(struct dml2_context *ctx, stru
unsigned int num_found = 0;
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
- if (state->res_ctx.pipe_ctx[i].stream && state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id) {
- pipes[num_found++] = i;
+ struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->stream->stream_id == stream_id && !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ while (pipe) {
+ pipes[num_found++] = pipe->pipe_idx;
+ pipe = pipe->next_odm_pipe;
+ }
+ break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 23a608274096..1ba6933d2b36 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -398,7 +398,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
/* Copy clocks tables entries, if available */
if (dml2->config.bbox_overrides.clks_table.num_states) {
p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
-
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) {
p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;
}
@@ -437,6 +436,14 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
dml2_policy_build_synthetic_soc_states(s, p);
+ if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
+ dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
+ // Override last out_state with data from last in_state
+ // This will ensure that out_state contains max fclk
+ memcpy(&p->out_states->state_array[p->out_states->num_states - 1],
+ &p->in_states->state_array[p->in_states->num_states - 1],
+ sizeof(struct soc_state_bounding_box_st));
+ }
}
void dml2_translate_ip_params(const struct dc *in, struct ip_params_st *out)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
index 1068b962d1c1..f15d1dbad6a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
@@ -234,7 +234,7 @@ static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state
if (state->streams[i]->stream_id == stream_id) {
for (j = 0; j < state->stream_status[i].plane_count; j++) {
if (state->stream_status[i].plane_states[j] == plane &&
- (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) {
+ (!is_plane_duplicate || (j == plane_index))) {
*plane_id = (i << 16) | j;
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 26307e599614..2a58a7687bdb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -76,6 +76,11 @@ static void map_hw_resources(struct dml2_context *dml2,
in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
}
for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) {
+ if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) {
+ dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n",
+ __func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ break;
+ }
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 0df6c55eb326..ac41f9c0a283 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -137,6 +137,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
kbps = apply_128b_132b_stream_overhead(timing, kbps);
+ if (link_encoding == DC_LINK_ENCODING_HDMI_FRL &&
+ timing->vic == 0 && timing->hdmi_vic == 0 &&
+ timing->frl_uncompressed_video_bandwidth_in_kbps != 0)
+ kbps = timing->frl_uncompressed_video_bandwidth_in_kbps;
+
return kbps;
}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 279020535af7..8f1a95b77830 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -110,6 +110,7 @@ bool dal_hw_factory_init(
case DCN_VERSION_3_2:
case DCN_VERSION_3_21:
case DCN_VERSION_3_5:
+ case DCN_VERSION_3_51:
dal_hw_factory_dcn32_init(factory);
return true;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index d6b0a1af7d3e..37166b2b3fee 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -111,6 +111,7 @@ bool dal_hw_translate_init(
case DCN_VERSION_3_2:
case DCN_VERSION_3_21:
case DCN_VERSION_3_5:
+ case DCN_VERSION_3_51:
dal_hw_translate_dcn32_init(translate);
return true;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index 25ffc052d53b..99e17c164ce7 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dm_services.h"
#include "dm_helpers.h"
#include "include/hdcp_msg_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
index 254136f8e3f9..9e8e9de51a92 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
@@ -180,6 +180,14 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35)
###############################################################################
+HWSS_DCN351 = dcn351_init.o
+
+AMD_DAL_HWSS_DCN351 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn351/,$(HWSS_DCN351))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN351)
+
+###############################################################################
+
###############################################################################
endif
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 01493c49bd7a..9d5df4c0da59 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1291,6 +1291,46 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
}
}
+static void populate_audio_dp_link_info(
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_dp_link_info *dp_link_info)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ const struct dc_link *link = stream->link;
+ struct fixed31_32 link_bw_kbps;
+
+ dp_link_info->encoding = link->dc->link_srv->dp_get_encoding_format(
+ &pipe_ctx->link_config.dp_link_settings);
+ dp_link_info->is_mst = (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST);
+ dp_link_info->lane_count = pipe_ctx->link_config.dp_link_settings.lane_count;
+ dp_link_info->link_rate = pipe_ctx->link_config.dp_link_settings.link_rate;
+
+ link_bw_kbps = dc_fixpt_from_int(dc_link_bandwidth_kbps(link,
+ &pipe_ctx->link_config.dp_link_settings));
+
+ /* For audio stream calculations, the video stream should not include FEC or SSC
+ * in order to get the most pessimistic values.
+ */
+ if (dp_link_info->encoding == DP_8b_10b_ENCODING &&
+ link->dc->link_srv->dp_is_fec_supported(link)) {
+ link_bw_kbps = dc_fixpt_mul(link_bw_kbps,
+ dc_fixpt_from_fraction(100, DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100));
+ } else if (dp_link_info->encoding == DP_128b_132b_ENCODING) {
+ link_bw_kbps = dc_fixpt_mul(link_bw_kbps,
+ dc_fixpt_from_fraction(10000, 9975)); /* 99.75% SSC overhead*/
+ }
+
+ dp_link_info->link_bandwidth_kbps = dc_fixpt_floor(link_bw_kbps);
+
+ /* HW minimum for 128b/132b HBlank is 4 frame symbols.
+ * TODO: Plumb the actual programmed HBlank min symbol width to here.
+ */
+ if (dp_link_info->encoding == DP_128b_132b_ENCODING)
+ dp_link_info->hblank_min_symbol_width = 4;
+ else
+ dp_link_info->hblank_min_symbol_width = 0;
+}
+
static void build_audio_output(
struct dc_state *state,
const struct pipe_ctx *pipe_ctx,
@@ -1338,6 +1378,15 @@ static void build_audio_output(
audio_output->crtc_info.calculated_pixel_clock_100Hz =
pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz;
+ audio_output->crtc_info.pixel_encoding =
+ stream->timing.pixel_encoding;
+
+ audio_output->crtc_info.dsc_bits_per_pixel =
+ stream->timing.dsc_cfg.bits_per_pixel;
+
+ audio_output->crtc_info.dsc_num_slices =
+ stream->timing.dsc_cfg.num_slices_h;
+
/*for HDMI, audio ACR is with deep color ratio factor*/
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) &&
audio_output->crtc_info.requested_pixel_clock_100Hz ==
@@ -1371,6 +1420,10 @@ static void build_audio_output(
audio_output->pll_info.ss_percentage =
pipe_ctx->pll_settings.ss_percentage;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ populate_audio_dp_link_info(pipe_ctx, &audio_output->dp_link_info);
+ }
}
static void program_scaler(const struct dc *dc,
@@ -1507,7 +1560,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.audio,
pipe_ctx->stream->signal,
&audio_output.crtc_info,
- &pipe_ctx->stream->audio_info);
+ &pipe_ctx->stream->audio_info,
+ &audio_output.dp_link_info);
}
/* make sure no pipes syncd to the pipe being enabled */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 6dd479e8a348..314798400b16 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -283,33 +283,33 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
DTN_INFO("\n");
}
-void dcn10_log_hw_state(struct dc *dc,
- struct dc_log_buffer_ctx *log_ctx)
+static void dcn10_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
int i;
- DTN_INFO_BEGIN();
-
- dcn10_log_hubbub_state(dc, log_ctx);
-
- dcn10_log_hubp_states(dc, log_ctx);
-
- DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
- " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
- "C31 C32 C33 C34\n");
+ DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
+ " GAMUT adjust "
+ "C11 C12 C13 C14 "
+ "C21 C22 C23 C24 "
+ "C31 C32 C33 C34 \n");
for (i = 0; i < pool->pipe_count; i++) {
struct dpp *dpp = pool->dpps[i];
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
if (!s.is_enabled)
continue;
- DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
- "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
+ DTN_INFO("[%2d]: %11xh %11s %9s %9s"
+ " %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
dpp->inst,
s.igam_input_format,
(s.igam_lut_mode == 0) ? "BypassFixed" :
@@ -329,16 +329,42 @@ void dcn10_log_hw_state(struct dc *dc,
((s.rgam_lut_mode == 3) ? "RAM" :
((s.rgam_lut_mode == 4) ? "RAM" :
"Unknown")))),
- s.gamut_remap_mode,
- s.gamut_remap_c11_c12,
- s.gamut_remap_c13_c14,
- s.gamut_remap_c21_c22,
- s.gamut_remap_c23_c24,
- s.gamut_remap_c31_c32,
- s.gamut_remap_c33_c34);
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
DTN_INFO("\n");
}
DTN_INFO("\n");
+ DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
+ " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
+ " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
+ " blnd_lut:%d oscs:%d\n\n",
+ dc->caps.color.dpp.input_lut_shared,
+ dc->caps.color.dpp.icsc,
+ dc->caps.color.dpp.dgam_ram,
+ dc->caps.color.dpp.dgam_rom_caps.srgb,
+ dc->caps.color.dpp.dgam_rom_caps.bt2020,
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
+ dc->caps.color.dpp.dgam_rom_caps.pq,
+ dc->caps.color.dpp.dgam_rom_caps.hlg,
+ dc->caps.color.dpp.post_csc,
+ dc->caps.color.dpp.gamma_corr,
+ dc->caps.color.dpp.dgam_rom_for_yuv,
+ dc->caps.color.dpp.hw_3d_lut,
+ dc->caps.color.dpp.ogam_ram,
+ dc->caps.color.dpp.ocsc);
DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
for (i = 0; i < pool->pipe_count; i++) {
@@ -352,6 +378,30 @@ void dcn10_log_hw_state(struct dc *dc,
s.idle);
}
DTN_INFO("\n");
+ DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
+ dc->caps.color.mpc.gamut_remap,
+ dc->caps.color.mpc.num_3dluts,
+ dc->caps.color.mpc.ogam_ram,
+ dc->caps.color.mpc.ocsc);
+}
+
+void dcn10_log_hw_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ DTN_INFO_BEGIN();
+
+ dcn10_log_hubbub_state(dc, log_ctx);
+
+ dcn10_log_hubp_states(dc, log_ctx);
+
+ if (dc->hwss.log_color_state)
+ dc->hwss.log_color_state(dc, log_ctx);
+ else
+ dcn10_log_color_state(dc, log_ctx);
DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
@@ -1840,6 +1890,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
{
struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ if (!stream)
+ return false;
+
if (dpp == NULL)
return false;
@@ -1862,8 +1915,8 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
} else
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
- if (stream != NULL && stream->ctx != NULL &&
- stream->out_transfer_func != NULL) {
+ if (stream->ctx &&
+ stream->out_transfer_func) {
log_tf(stream->ctx,
stream->out_transfer_func,
dpp->regamma_params.hw_points_num);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 931ac8ed7069..c55d5155ecb9 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -71,6 +71,112 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
+void dcn20_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ DTN_INFO("DPP: DGAM mode SHAPER mode 3DLUT mode 3DLUT bit depth"
+ " 3DLUT size RGAM mode GAMUT adjust "
+ "C11 C12 C13 C14 "
+ "C21 C22 C23 C24 "
+ "C31 C32 C33 C34 \n");
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dpp *dpp = pool->dpps[i];
+ struct dcn_dpp_state s = {0};
+
+ dpp->funcs->dpp_read_state(dpp, &s);
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+
+ if (!s.is_enabled)
+ continue;
+
+ DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+ dpp->inst,
+ (s.dgam_lut_mode == 0) ? "Bypass" :
+ ((s.dgam_lut_mode == 1) ? "sRGB" :
+ ((s.dgam_lut_mode == 2) ? "Ycc" :
+ ((s.dgam_lut_mode == 3) ? "RAM" :
+ ((s.dgam_lut_mode == 4) ? "RAM" :
+ "Unknown")))),
+ (s.shaper_lut_mode == 1) ? "RAM A" :
+ ((s.shaper_lut_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_mode == 1) ? "RAM A" :
+ ((s.lut3d_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
+ (s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
+ (s.rgam_lut_mode == 1) ? "RAM A" :
+ ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"),
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
+ DTN_INFO("\n");
+ }
+ DTN_INFO("\n");
+ DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
+ " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
+ " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
+ " blnd_lut:%d oscs:%d\n\n",
+ dc->caps.color.dpp.input_lut_shared,
+ dc->caps.color.dpp.icsc,
+ dc->caps.color.dpp.dgam_ram,
+ dc->caps.color.dpp.dgam_rom_caps.srgb,
+ dc->caps.color.dpp.dgam_rom_caps.bt2020,
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
+ dc->caps.color.dpp.dgam_rom_caps.pq,
+ dc->caps.color.dpp.dgam_rom_caps.hlg,
+ dc->caps.color.dpp.post_csc,
+ dc->caps.color.dpp.gamma_corr,
+ dc->caps.color.dpp.dgam_rom_for_yuv,
+ dc->caps.color.dpp.hw_3d_lut,
+ dc->caps.color.dpp.ogam_ram,
+ dc->caps.color.dpp.ocsc);
+
+ DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE"
+ " OGAM mode\n");
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d %9s\n",
+ i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
+ s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
+ s.idle,
+ (s.rgam_mode == 1) ? "RAM A" :
+ ((s.rgam_mode == 2) ? "RAM B" :
+ "Bypass"));
+ }
+ DTN_INFO("\n");
+ DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
+ dc->caps.color.mpc.gamut_remap,
+ dc->caps.color.mpc.num_3dluts,
+ dc->caps.color.mpc.ogam_ram,
+ dc->caps.color.mpc.ocsc);
+}
+
+
static int find_free_gsl_group(const struct dc *dc)
{
if (dc->res_pool->gsl_groups.gsl_0 == 0)
@@ -1633,6 +1739,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.scaler ||
plane_state->update_flags.bits.scaling_change ||
plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.clip_size_change ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
pipe_ctx->stream->update_flags.bits.scaling) {
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
@@ -1645,6 +1752,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.viewport ||
(context == dc->current_state && plane_state->update_flags.bits.position_change) ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && plane_state->update_flags.bits.clip_size_change) ||
(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
hubp->funcs->mem_program_viewport(
@@ -1958,7 +2066,6 @@ void dcn20_program_front_end_for_ctx(
&& context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
-
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
index d950b3e54ec2..5c874f7b0683 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
@@ -28,6 +28,8 @@
#include "hw_sequencer_private.h"
+void dcn20_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx);
bool dcn20_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
bool dcn20_set_shaper_3dlut(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index c34c13e1e0a4..7e6b7f2a6dc9 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -69,6 +69,155 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
+void dcn30_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ DTN_INFO("DPP: DGAM ROM DGAM ROM type DGAM LUT SHAPER mode"
+ " 3DLUT mode 3DLUT bit depth 3DLUT size RGAM mode"
+ " GAMUT adjust "
+ "C11 C12 C13 C14 "
+ "C21 C22 C23 C24 "
+ "C31 C32 C33 C34 \n");
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dpp *dpp = pool->dpps[i];
+ struct dcn_dpp_state s = {0};
+
+ dpp->funcs->dpp_read_state(dpp, &s);
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+
+ if (!s.is_enabled)
+ continue;
+
+ DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s"
+ " %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+ dpp->inst,
+ s.pre_dgam_mode,
+ (s.pre_dgam_select == 0) ? "sRGB" :
+ ((s.pre_dgam_select == 1) ? "Gamma 2.2" :
+ ((s.pre_dgam_select == 2) ? "Gamma 2.4" :
+ ((s.pre_dgam_select == 3) ? "Gamma 2.6" :
+ ((s.pre_dgam_select == 4) ? "BT.709" :
+ ((s.pre_dgam_select == 5) ? "PQ" :
+ ((s.pre_dgam_select == 6) ? "HLG" :
+ "Unknown")))))),
+ (s.gamcor_mode == 0) ? "Bypass" :
+ ((s.gamcor_mode == 1) ? "RAM A" :
+ "RAM B"),
+ (s.shaper_lut_mode == 1) ? "RAM A" :
+ ((s.shaper_lut_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_mode == 1) ? "RAM A" :
+ ((s.lut3d_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
+ (s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
+ (s.rgam_lut_mode == 0) ? "Bypass" :
+ ((s.rgam_lut_mode == 1) ? "RAM A" :
+ "RAM B"),
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
+ DTN_INFO("\n");
+ }
+ DTN_INFO("\n");
+ DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
+ " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
+ " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
+ " blnd_lut:%d oscs:%d\n\n",
+ dc->caps.color.dpp.input_lut_shared,
+ dc->caps.color.dpp.icsc,
+ dc->caps.color.dpp.dgam_ram,
+ dc->caps.color.dpp.dgam_rom_caps.srgb,
+ dc->caps.color.dpp.dgam_rom_caps.bt2020,
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
+ dc->caps.color.dpp.dgam_rom_caps.pq,
+ dc->caps.color.dpp.dgam_rom_caps.hlg,
+ dc->caps.color.dpp.post_csc,
+ dc->caps.color.dpp.gamma_corr,
+ dc->caps.color.dpp.dgam_rom_for_yuv,
+ dc->caps.color.dpp.hw_3d_lut,
+ dc->caps.color.dpp.ogam_ram,
+ dc->caps.color.dpp.ocsc);
+
+ DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE"
+ " SHAPER mode 3DLUT mode 3DLUT bit-depth 3DLUT size OGAM mode OGAM LUT"
+ " GAMUT adjust "
+ "C11 C12 C13 C14 "
+ "C21 C22 C23 C24 "
+ "C31 C32 C33 C34 \n");
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ mpc3_get_gamut_remap(pool->mpc, i, &s.gamut_remap);
+
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d %11s %11s %16s %11s %10s %9s"
+ " %-12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld\n",
+ i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
+ s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
+ s.idle,
+ (s.shaper_lut_mode == 1) ? "RAM A" :
+ ((s.shaper_lut_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_mode == 1) ? "RAM A" :
+ ((s.lut3d_mode == 2) ? "RAM B" :
+ "Bypass"),
+ (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
+ (s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
+ (s.rgam_mode == 0) ? "Bypass" :
+ ((s.rgam_mode == 2) ? "RAM" :
+ "Unknown"),
+ (s.rgam_mode == 1) ? "B" : "A",
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
+
+ }
+ DTN_INFO("\n");
+ DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
+ dc->caps.color.mpc.gamut_remap,
+ dc->caps.color.mpc.num_3dluts,
+ dc->caps.color.mpc.ogam_ram,
+ dc->caps.color.mpc.ocsc);
+}
+
bool dcn30_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
@@ -1015,21 +1164,3 @@ void dcn30_prepare_bandwidth(struct dc *dc,
if (!dc->clk_mgr->clks.fw_based_mclk_switching)
dc_dmub_srv_p_state_delegate(dc, false, context);
}
-
-void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_params *params)
-{
- unsigned int i;
- unsigned int triggers = 0;
-
- if (params->triggers.surface_update)
- triggers |= 0x100;
- if (params->triggers.cursor_update)
- triggers |= 0x8;
- if (params->triggers.force_trigger)
- triggers |= 0x1;
-
- for (i = 0; i < num_pipes; i++)
- pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
- triggers, params->num_frames);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
index e557e2b98618..638f018a3cb5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
@@ -52,6 +52,9 @@ bool dcn30_mmhubbub_warmup(
unsigned int num_dwb,
struct dc_writeback_info *wb_info);
+void dcn30_log_color_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx);
+
bool dcn30_set_blend_lut(struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
@@ -90,7 +93,4 @@ void dcn30_set_hubp_blank(const struct dc *dc,
void dcn30_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
-void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_params *params);
-
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 9894caedffed..ef913445a795 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -64,7 +64,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn10_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 7423880fabb6..a760f0c6fe98 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -98,10 +98,8 @@ static void enable_memory_low_power(struct dc *dc)
for (i = 0; i < dc->res_pool->stream_enc_count; i++)
if (dc->res_pool->stream_enc[i]->vpg)
dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
-#if defined(CONFIG_DRM_AMD_DC_FP)
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
-#endif
}
}
@@ -617,3 +615,21 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
if (hws->ctx->dc->debug.hpo_optimization)
REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
}
+
+void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params)
+{
+ unsigned int i;
+ unsigned int triggers = 0;
+
+ if (params->triggers.surface_update)
+ triggers |= 0x100;
+ if (params->triggers.cursor_update)
+ triggers |= 0x8;
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
+
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h
index edfc01d6ad73..b8bc939da155 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h
@@ -56,4 +56,8 @@ bool dcn31_is_abm_supported(struct dc *dc,
void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
+void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params);
+
+
#endif /* __DC_HWSS_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 669f524bd064..c06cc2c5da92 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn31_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index ccb7e317e86a..542ce3b7f9e4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn31_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index e8ac94a005b8..2b073123d3ed 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -65,7 +65,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn31_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 8b6c49622f3b..4b92df23ff0d 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -1342,8 +1342,8 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
{
int i = 0;
struct drr_params params = {0};
- // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
- unsigned int event_triggers = 0x800;
+ // DRR set trigger event mapped to OTG_TRIG_A
+ unsigned int event_triggers = 0x2;//Bit[1]: OTG_TRIG_A
// Note DRR trigger events are generated regardless of whether num frames met.
unsigned int num_frames = 2;
@@ -1377,3 +1377,20 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
}
}
}
+void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params)
+{
+ unsigned int i;
+ unsigned int triggers = 0;
+
+ if (params->triggers.surface_update)
+ triggers |= 0x200;/*bit 9 : 10 0000 0000*/
+ if (params->triggers.cursor_update)
+ triggers |= 0x8;/*bit3*/
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index fd66316e33de..c354efa6c1b2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -90,4 +90,7 @@ uint32_t dcn35_get_idle_state(const struct dc *dc);
void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
int num_pipes, struct dc_crtc_timing_adjust adjust);
+void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index a630aa77dcec..a93073055e7b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -70,7 +70,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn35_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn35_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt
deleted file mode 100644
index 951ca2da4486..000000000000
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-dal3_subdirectory_sources(
- dcn351_init.c
- dcn351_init.h
-)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 143d3fc0221c..ab17fa1c64e8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn30_set_static_screen_control,
+ .set_static_screen_control = dcn35_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 64ca7c66509b..f89f205e42a1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -339,6 +339,8 @@ struct hw_sequencer_funcs {
/* HW State Logging Related */
void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx);
+ void (*log_color_state)(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx);
void (*get_hw_state)(struct dc *dc, char *pBuf,
unsigned int bufSize, unsigned int mask);
void (*clear_status_bits)(struct dc *dc, unsigned int mask);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index b3c62a82cb1c..554cfab5ab24 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -155,7 +155,6 @@ struct hwseq_private_funcs {
void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context);
-#ifdef CONFIG_DRM_AMD_DC_FP
void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);
void (*update_force_pstate)(struct dc *dc, struct dc_state *context);
void (*update_mall_sel)(struct dc *dc, struct dc_state *context);
@@ -170,7 +169,6 @@ struct hwseq_private_funcs {
struct dc_state *context,
struct dc *dc);
bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx);
-#endif
void (*reset_back_end_for_pipe)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 3a6bf77a6873..b1b72e688f74 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -522,6 +522,25 @@ struct dc_dmub_cmd {
enum dm_dmub_wait_type wait_type;
};
+struct dc_scratch_space {
+ /* used to temporarily backup plane states of a stream during
+ * dc update. The reason is that plane states are overwritten
+ * with surface updates in dc update. Once they are overwritten
+ * current state is no longer valid. We want to temporarily
+ * store current value in plane states so we can still recover
+ * a valid current state during dc update.
+ */
+ struct dc_plane_state plane_states[MAX_SURFACE_NUM];
+ struct dc_gamma gamma_correction[MAX_SURFACE_NUM];
+ struct dc_transfer_func in_transfer_func[MAX_SURFACE_NUM];
+ struct dc_3dlut lut3d_func[MAX_SURFACE_NUM];
+ struct dc_transfer_func in_shaper_func[MAX_SURFACE_NUM];
+ struct dc_transfer_func blend_tf[MAX_SURFACE_NUM];
+
+ struct dc_stream_state stream_state;
+ struct dc_transfer_func out_transfer_func;
+};
+
/**
* struct dc_state - The full description of a state requested by users
*/
@@ -604,16 +623,8 @@ struct dc_state {
unsigned int stutter_period_us;
} perf_params;
- struct {
- /* used to temporarily backup plane states of a stream during
- * dc update. The reason is that plane states are overwritten
- * with surface updates in dc update. Once they are overwritten
- * current state is no longer valid. We want to temporarily
- * store current value in plane states so we can still recover
- * a valid current state during dc update.
- */
- struct dc_plane_state plane_states[MAX_SURFACE_NUM];
- } scratch;
+
+ struct dc_scratch_space scratch;
};
struct replay_context {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
index 6ed1fb8c9300..b6203253111c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
@@ -43,7 +43,8 @@ struct audio_funcs {
void (*az_configure)(struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
- const struct audio_info *audio_info);
+ const struct audio_info *audio_info,
+ const struct audio_dp_link_info *dp_link_info);
void (*wall_dto_setup)(struct audio *audio,
enum signal_type signal,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 6f4c97543c14..f4d4a68c91dc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -356,6 +356,7 @@ struct clk_mgr_internal {
long long wm_range_table_addr;
bool dpm_present;
+ bool pme_trigger_pending;
};
struct clk_mgr_internal_funcs {
@@ -393,6 +394,11 @@ static inline int khz_to_mhz_ceil(int khz)
return (khz + 999) / 1000;
}
+static inline int khz_to_mhz_floor(int khz)
+{
+ return khz / 1000;
+}
+
int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 901891316dfb..2ae7484d18af 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -26,6 +26,12 @@
#ifndef __DAL_DCHUBBUB_H__
#define __DAL_DCHUBBUB_H__
+/**
+ * DOC: overview
+ *
+ * There is only one common DCHUBBUB. It contains the common request and return
+ * blocks for the Data Fabric Interface that are not clock/power gated.
+ */
enum dcc_control {
dcc_control__256_256_xxx,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index f4aa76e02518..0f24afbf4388 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -27,6 +27,31 @@
#ifndef __DAL_DPP_H__
#define __DAL_DPP_H__
+/**
+ * DOC: overview
+ *
+ * The DPP (Display Pipe and Plane) block is the unified display data
+ * processing engine in DCN for processing graphic or video data on per DPP
+ * rectangle base. This rectangle can be a part of SLS (Single Large Surface),
+ * or a layer to be blended with other DPP, or a rectangle associated with a
+ * display tile.
+ *
+ * It provides various functions including:
+ * - graphic color keyer
+ * - graphic cursor compositing
+ * - graphic or video image source to destination scaling
+ * - image sharping
+ * - video format conversion from 4:2:0 or 4:2:2 to 4:4:4
+ * - Color Space Conversion
+ * - Host LUT gamma adjustment
+ * - Color Gamut Remap
+ * - brightness and contrast adjustment.
+ *
+ * DPP pipe consists of Converter and Cursor (CNVC), Scaler (DSCL), Color
+ * Management (CM), Output Buffer (OBUF) and Digital Bypass (DPB) module
+ * connected in a video/graphics pipeline.
+ */
+
#include "transform.h"
#include "cursor_reg_cache.h"
@@ -141,6 +166,7 @@ struct dcn_dpp_state {
uint32_t igam_input_format;
uint32_t dgam_lut_mode;
uint32_t rgam_lut_mode;
+ // gamut_remap data for dcn10_get_cm_states()
uint32_t gamut_remap_mode;
uint32_t gamut_remap_c11_c12;
uint32_t gamut_remap_c13_c14;
@@ -148,6 +174,16 @@ struct dcn_dpp_state {
uint32_t gamut_remap_c23_c24;
uint32_t gamut_remap_c31_c32;
uint32_t gamut_remap_c33_c34;
+ // gamut_remap data for dcn*_log_color_state()
+ struct dpp_grph_csc_adjustment gamut_remap;
+ uint32_t shaper_lut_mode;
+ uint32_t lut3d_mode;
+ uint32_t lut3d_bit_depth;
+ uint32_t lut3d_size;
+ uint32_t blnd_lut_mode;
+ uint32_t pre_dgam_mode;
+ uint32_t pre_dgam_select;
+ uint32_t gamcor_mode;
};
struct CM_bias_params {
@@ -290,6 +326,9 @@ struct dpp_funcs {
void (*dpp_cnv_set_alpha_keyer)(
struct dpp *dpp_base,
struct cnv_color_keyer_params *color_keyer);
+
+ void (*dpp_get_gamut_remap)(struct dpp *dpp_base,
+ struct dpp_grph_csc_adjustment *adjust);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 7f3f9b69e903..72610cd7eae0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -26,13 +26,24 @@
#ifndef __DAL_HUBP_H__
#define __DAL_HUBP_H__
+/**
+ * DOC: overview
+ *
+ * Display Controller Hub (DCHUB) is the gateway between the Scalable Data Port
+ * (SDP) and DCN. This component has multiple features, such as memory
+ * arbitration, rotation, and cursor manipulation.
+ *
+ * There is one HUBP allocated per pipe, which fetches data and converts
+ * different pixel formats (i.e. ARGB8888, NV12, etc) into linear, interleaved
+ * and fixed-depth streams of pixel data.
+ */
+
#include "mem_input.h"
#include "cursor_reg_cache.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
-
enum cursor_pitch {
CURSOR_PITCH_64_PIXELS = 0,
CURSOR_PITCH_128_PIXELS,
@@ -146,9 +157,7 @@ struct hubp_funcs {
void (*set_blank)(struct hubp *hubp, bool blank);
void (*set_blank_regs)(struct hubp *hubp, bool blank);
-#ifdef CONFIG_DRM_AMD_DC_FP
void (*phantom_hubp_post_enable)(struct hubp *hubp);
-#endif
void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);
void (*set_cursor_attributes)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 61a2406dcc53..34a398f23fc6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -23,13 +23,28 @@
*/
/**
- * DOC: mpc-overview
+ * DOC: overview
*
- * Multiple Pipe/Plane Combined (MPC) is a component in the hardware pipeline
+ * Multiple Pipe/Plane Combiner (MPC) is a component in the hardware pipeline
* that performs blending of multiple planes, using global and per-pixel alpha.
* It also performs post-blending color correction operations according to the
* hardware capabilities, such as color transformation matrix and gamma 1D and
* 3D LUT.
+ *
+ * MPC receives output from all DPP pipes and combines them to multiple outputs
+ * supporting "M MPC inputs -> N MPC outputs" flexible composition
+ * architecture. It features:
+ *
+ * - Programmable blending structure to allow software controlled blending and
+ * cascading;
+ * - Programmable window location of each DPP in active region of display;
+ * - Combining multiple DPP pipes in one active region when a single DPP pipe
+ * cannot process very large surface;
+ * - Combining multiple DPP from different SLS with blending;
+ * - Stereo formats from single DPP in top-bottom or side-by-side modes;
+ * - Stereo formats from 2 DPPs;
+ * - Alpha blending of multiple layers from different DPP pipes;
+ * - Programmable background color;
*/
#ifndef __DC_MPCC_H__
@@ -83,34 +98,65 @@ enum mpcc_alpha_blend_mode {
/**
* struct mpcc_blnd_cfg - MPCC blending configuration
- *
- * @black_color: background color
- * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE)
- * @pre_multiplied_alpha: whether pixel color values were pre-multiplied by the
- * alpha channel (MPCC_ALPHA_MULTIPLIED_MODE)
- * @global_gain: used when blend mode considers both pixel alpha and plane
- * alpha value and assumes the global alpha value.
- * @global_alpha: plane alpha value
- * @overlap_only: whether overlapping of different planes is allowed
- * @bottom_gain_mode: blend mode for bottom gain setting
- * @background_color_bpc: background color for bpc
- * @top_gain: top gain setting
- * @bottom_inside_gain: blend mode for bottom inside
- * @bottom_outside_gain: blend mode for bottom outside
*/
struct mpcc_blnd_cfg {
- struct tg_color black_color; /* background color */
- enum mpcc_alpha_blend_mode alpha_mode; /* alpha blend mode */
- bool pre_multiplied_alpha; /* alpha pre-multiplied mode flag */
+ /**
+ * @black_color: background color.
+ */
+ struct tg_color black_color;
+
+ /**
+ * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE).
+ */
+ enum mpcc_alpha_blend_mode alpha_mode;
+
+ /**
+ * @pre_multiplied_alpha:
+ * Whether pixel color values were pre-multiplied by the alpha channel
+ * (MPCC_ALPHA_MULTIPLIED_MODE).
+ */
+ bool pre_multiplied_alpha;
+
+ /**
+ * @global_gain: Used when blend mode considers both pixel alpha and plane.
+ */
int global_gain;
+
+ /**
+ * @global_alpha: Plane alpha value.
+ */
int global_alpha;
+
+ /**
+ * @overlap_only: Whether overlapping of different planes is allowed.
+ */
bool overlap_only;
/* MPCC top/bottom gain settings */
+
+ /**
+ * @bottom_gain_mode: Blend mode for bottom gain setting.
+ */
int bottom_gain_mode;
+
+ /**
+ * @background_color_bpc: Background color for bpc.
+ */
int background_color_bpc;
+
+ /**
+ * @top_gain: Top gain setting.
+ */
int top_gain;
+
+ /**
+ * @bottom_inside_gain: Blend mode for bottom inside.
+ */
int bottom_inside_gain;
+
+ /**
+ * @bottom_outside_gain: Blend mode for bottom outside.
+ */
int bottom_outside_gain;
};
@@ -150,34 +196,58 @@ struct mpc_dwb_flow_control {
/**
* struct mpcc - MPCC connection and blending configuration for a single MPCC instance.
- * @mpcc_id: MPCC physical instance
- * @dpp_id: DPP input to this MPCC
- * @mpcc_bot: pointer to bottom layer MPCC. NULL when not connected.
- * @blnd_cfg: the blending configuration for this MPCC
- * @sm_cfg: stereo mix setting for this MPCC
- * @shared_bottom: if MPCC output to both OPP and DWB endpoints, true. Otherwise, false.
*
* This struct is used as a node in an MPC tree.
*/
struct mpcc {
- int mpcc_id; /* MPCC physical instance */
- int dpp_id; /* DPP input to this MPCC */
- struct mpcc *mpcc_bot; /* pointer to bottom layer MPCC. NULL when not connected */
- struct mpcc_blnd_cfg blnd_cfg; /* The blending configuration for this MPCC */
- struct mpcc_sm_cfg sm_cfg; /* stereo mix setting for this MPCC */
- bool shared_bottom; /* TRUE if MPCC output to both OPP and DWB endpoints, else FALSE */
+ /**
+ * @mpcc_id: MPCC physical instance.
+ */
+ int mpcc_id;
+
+ /**
+ * @dpp_id: DPP input to this MPCC
+ */
+ int dpp_id;
+
+ /**
+ * @mpcc_bot: Pointer to bottom layer MPCC. NULL when not connected.
+ */
+ struct mpcc *mpcc_bot;
+
+ /**
+ * @blnd_cfg: The blending configuration for this MPCC.
+ */
+ struct mpcc_blnd_cfg blnd_cfg;
+
+ /**
+ * @sm_cfg: stereo mix setting for this MPCC
+ */
+ struct mpcc_sm_cfg sm_cfg;
+
+ /**
+ * @shared_bottom:
+ *
+ * If MPCC output to both OPP and DWB endpoints, true. Otherwise, false.
+ */
+ bool shared_bottom;
};
/**
* struct mpc_tree - MPC tree represents all MPCC connections for a pipe.
*
- * @opp_id: the OPP instance that owns this MPC tree
- * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint
*
*/
struct mpc_tree {
- int opp_id; /* The OPP instance that owns this MPC tree */
- struct mpcc *opp_list; /* The top MPCC layer of the MPC tree that outputs to OPP endpoint */
+ /**
+ * @opp_id: The OPP instance that owns this MPC tree.
+ */
+ int opp_id;
+
+ /**
+ * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint
+ */
+ struct mpcc *opp_list;
};
struct mpc {
@@ -199,6 +269,13 @@ struct mpcc_state {
uint32_t overlap_only;
uint32_t idle;
uint32_t busy;
+ uint32_t shaper_lut_mode;
+ uint32_t lut3d_mode;
+ uint32_t lut3d_bit_depth;
+ uint32_t lut3d_size;
+ uint32_t rgam_mode;
+ uint32_t rgam_lut;
+ struct mpc_grph_gamut_adjustment gamut_remap;
};
/**
@@ -217,16 +294,20 @@ struct mpc_funcs {
* Only used for planes that are part of blending chain for OPP output
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in/out] tree - MPC tree structure that plane will be added to.
- * [in] blnd_cfg - MPCC blending configuration for the new blending layer.
- * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
- * stereo mix must disable for the very bottom layer of the tree config.
- * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
- * [in] dpp_id - DPP instance for the plane to be added.
- * [in] mpcc_id - The MPCC physical instance to use for blending.
- *
- * Return: struct mpcc* - MPCC that was added.
+ *
+ * - [in/out] mpc - MPC context.
+ * - [in/out] tree - MPC tree structure that plane will be added to.
+ * - [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+ * - [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
+ * stereo mix must disable for the very bottom layer of the tree config.
+ * - [in] insert_above_mpcc - Insert new plane above this MPCC.
+ * If NULL, insert as bottom plane.
+ * - [in] dpp_id - DPP instance for the plane to be added.
+ * - [in] mpcc_id - The MPCC physical instance to use for blending.
+ *
+ * Return:
+ *
+ * struct mpcc* - MPCC that was added.
*/
struct mpcc* (*insert_plane)(
struct mpc *mpc,
@@ -243,11 +324,14 @@ struct mpc_funcs {
* Remove a specified MPCC from the MPC tree.
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in/out] tree - MPC tree structure that plane will be removed from.
- * [in/out] mpcc - MPCC to be removed from tree.
*
- * Return: void
+ * - [in/out] mpc - MPC context.
+ * - [in/out] tree - MPC tree structure that plane will be removed from.
+ * - [in/out] mpcc - MPCC to be removed from tree.
+ *
+ * Return:
+ *
+ * void
*/
void (*remove_mpcc)(
struct mpc *mpc,
@@ -260,9 +344,12 @@ struct mpc_funcs {
* Reset the MPCC HW status by disconnecting all muxes.
*
* Parameters:
- * [in/out] mpc - MPC context.
*
- * Return: void
+ * - [in/out] mpc - MPC context.
+ *
+ * Return:
+ *
+ * void
*/
void (*mpc_init)(struct mpc *mpc);
void (*mpc_init_single_inst)(
@@ -275,11 +362,14 @@ struct mpc_funcs {
* Update the blending configuration for a specified MPCC.
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in] blnd_cfg - MPCC blending configuration.
- * [in] mpcc_id - The MPCC physical instance.
*
- * Return: void
+ * - [in/out] mpc - MPC context.
+ * - [in] blnd_cfg - MPCC blending configuration.
+ * - [in] mpcc_id - The MPCC physical instance.
+ *
+ * Return:
+ *
+ * void
*/
void (*update_blending)(
struct mpc *mpc,
@@ -289,15 +379,18 @@ struct mpc_funcs {
/**
* @cursor_lock:
*
- * Lock cursor updates for the specified OPP.
- * OPP defines the set of MPCC that are locked together for cursor.
+ * Lock cursor updates for the specified OPP. OPP defines the set of
+ * MPCC that are locked together for cursor.
*
* Parameters:
- * [in] mpc - MPC context.
- * [in] opp_id - The OPP to lock cursor updates on
- * [in] lock - lock/unlock the OPP
*
- * Return: void
+ * - [in] mpc - MPC context.
+ * - [in] opp_id - The OPP to lock cursor updates on
+ * - [in] lock - lock/unlock the OPP
+ *
+ * Return:
+ *
+ * void
*/
void (*cursor_lock)(
struct mpc *mpc,
@@ -307,20 +400,25 @@ struct mpc_funcs {
/**
* @insert_plane_to_secondary:
*
- * Add DPP into secondary MPC tree based on specified blending position.
- * Only used for planes that are part of blending chain for DWB output
+ * Add DPP into secondary MPC tree based on specified blending
+ * position. Only used for planes that are part of blending chain for
+ * DWB output
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in/out] tree - MPC tree structure that plane will be added to.
- * [in] blnd_cfg - MPCC blending configuration for the new blending layer.
- * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
- * stereo mix must disable for the very bottom layer of the tree config.
- * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
- * [in] dpp_id - DPP instance for the plane to be added.
- * [in] mpcc_id - The MPCC physical instance to use for blending.
- *
- * Return: struct mpcc* - MPCC that was added.
+ *
+ * - [in/out] mpc - MPC context.
+ * - [in/out] tree - MPC tree structure that plane will be added to.
+ * - [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+ * - [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
+ * stereo mix must disable for the very bottom layer of the tree config.
+ * - [in] insert_above_mpcc - Insert new plane above this MPCC. If
+ * NULL, insert as bottom plane.
+ * - [in] dpp_id - DPP instance for the plane to be added.
+ * - [in] mpcc_id - The MPCC physical instance to use for blending.
+ *
+ * Return:
+ *
+ * struct mpcc* - MPCC that was added.
*/
struct mpcc* (*insert_plane_to_secondary)(
struct mpc *mpc,
@@ -337,10 +435,14 @@ struct mpc_funcs {
* Remove a specified DPP from the 'secondary' MPC tree.
*
* Parameters:
- * [in/out] mpc - MPC context.
- * [in/out] tree - MPC tree structure that plane will be removed from.
- * [in] mpcc - MPCC to be removed from tree.
- * Return: void
+ *
+ * - [in/out] mpc - MPC context.
+ * - [in/out] tree - MPC tree structure that plane will be removed from.
+ * - [in] mpcc - MPCC to be removed from tree.
+ *
+ * Return:
+ *
+ * void
*/
void (*remove_mpcc_from_secondary)(
struct mpc *mpc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 7617fabbd16e..aee5372e292c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -23,6 +23,22 @@
*
*/
+/**
+ * DOC: overview
+ *
+ * The Output Plane Processor (OPP) block groups have functions that format
+ * pixel streams such that they are suitable for display at the display device.
+ * The key functions contained in the OPP are:
+ *
+ * - Adaptive Backlight Modulation (ABM)
+ * - Formatter (FMT) which provide pixel-by-pixel operations for format the
+ * incoming pixel stream.
+ * - Output Buffer that provide pixel replication, and overlapping.
+ * - Interface between MPC and OPTC.
+ * - Clock and reset generation.
+ * - CRC generation.
+ */
+
#ifndef __DAL_OPP_H__
#define __DAL_OPP_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
index 5dcbaa2db964..e97d964a1791 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
@@ -57,7 +57,7 @@ struct panel_cntl_funcs {
struct panel_cntl_init_data {
struct dc_context *ctx;
uint32_t inst;
- uint32_t pwrseq_inst;
+ uint32_t eng_id;
};
struct panel_cntl {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 9a00a99317b2..d98d72f35be5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -182,9 +182,7 @@ struct timing_generator_funcs {
bool (*enable_crtc)(struct timing_generator *tg);
bool (*disable_crtc)(struct timing_generator *tg);
-#ifdef CONFIG_DRM_AMD_DC_FP
void (*phantom_crtc_post_enable)(struct timing_generator *tg);
-#endif
void (*disable_phantom_crtc)(struct timing_generator *tg);
bool (*immediate_disable_crtc)(struct timing_generator *tg);
bool (*is_counter_moving)(struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index 076f667a82f6..2d4378780c1a 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -170,4 +170,13 @@ IRQ_DCN35 = irq_service_dcn35.o
AMD_DAL_IRQ_DCN35= $(addprefix $(AMDDALPATH)/dc/irq/dcn35/,$(IRQ_DCN35))
-AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN35) \ No newline at end of file
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN35)
+
+###############################################################################
+# DCN 351
+###############################################################################
+IRQ_DCN351 = irq_service_dcn351.o
+
+AMD_DAL_IRQ_DCN351= $(addprefix $(AMDDALPATH)/dc/irq/dcn351/,$(IRQ_DCN351))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN351)
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index e8baafa02443..916f0c974637 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dm_services.h"
#include "include/logger_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 03c5e8ff8cbd..42cdfe6c3538 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dm_services.h"
#include "include/logger_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
new file mode 100644
index 000000000000..7ec8e0de2f01
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#include "dm_services.h"
+#include "include/logger_interface.h"
+#include "../dce110/irq_service_dce110.h"
+
+
+#include "dcn/dcn_3_5_1_offset.h"
+#include "dcn/dcn_3_5_1_sh_mask.h"
+
+#include "irq_service_dcn351.h"
+
+#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
+
+static enum dc_irq_source to_dal_irq_source_dcn351(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK3;
+ case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK4;
+ case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK5;
+ case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC1_VLINE0;
+ case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC2_VLINE0;
+ case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC3_VLINE0;
+ case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC4_VLINE0;
+ case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC5_VLINE0;
+ case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC6_VLINE0;
+ case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP3;
+ case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP4;
+ case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP5;
+ case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP6;
+ case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE6;
+ case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT:
+ return DC_IRQ_SOURCE_DMCUB_OUTBOX;
+ case DCN_1_0__SRCID__DC_HPD1_INT:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case DCN_1_0__CTXID__DC_HPD1_INT:
+ return DC_IRQ_SOURCE_HPD1;
+ case DCN_1_0__CTXID__DC_HPD2_INT:
+ return DC_IRQ_SOURCE_HPD2;
+ case DCN_1_0__CTXID__DC_HPD3_INT:
+ return DC_IRQ_SOURCE_HPD3;
+ case DCN_1_0__CTXID__DC_HPD4_INT:
+ return DC_IRQ_SOURCE_HPD4;
+ case DCN_1_0__CTXID__DC_HPD5_INT:
+ return DC_IRQ_SOURCE_HPD5;
+ case DCN_1_0__CTXID__DC_HPD6_INT:
+ return DC_IRQ_SOURCE_HPD6;
+ case DCN_1_0__CTXID__DC_HPD1_RX_INT:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case DCN_1_0__CTXID__DC_HPD2_RX_INT:
+ return DC_IRQ_SOURCE_HPD2RX;
+ case DCN_1_0__CTXID__DC_HPD3_RX_INT:
+ return DC_IRQ_SOURCE_HPD3RX;
+ case DCN_1_0__CTXID__DC_HPD4_RX_INT:
+ return DC_IRQ_SOURCE_HPD4RX;
+ case DCN_1_0__CTXID__DC_HPD5_RX_INT:
+ return DC_IRQ_SOURCE_HPD5RX;
+ case DCN_1_0__CTXID__DC_HPD6_RX_INT:
+ return DC_IRQ_SOURCE_HPD6RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs outbox_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline0_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+#undef BASE_INNER
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SRI(reg_name, block, id)\
+ BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_DMUB(reg_name)\
+ BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define IRQ_REG_ENTRY(base, block, reg_num, reg1, mask1, reg2, mask2)\
+ REG_STRUCT[base + reg_num].enable_reg = SRI(reg1, block, reg_num),\
+ REG_STRUCT[base + reg_num].enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base + reg_num].enable_value[0] = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base + reg_num].enable_value[1] = \
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \
+ REG_STRUCT[base + reg_num].ack_reg = SRI(reg2, block, reg_num),\
+ REG_STRUCT[base + reg_num].ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ REG_STRUCT[base + reg_num].ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+#define IRQ_REG_ENTRY_DMUB(base, reg1, mask1, reg2, mask2)\
+ REG_STRUCT[base].enable_reg = SRI_DMUB(reg1),\
+ REG_STRUCT[base].enable_mask = \
+ reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base].enable_value[0] = \
+ reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base].enable_value[1] = \
+ ~reg1 ## __ ## mask1 ## _MASK, \
+ REG_STRUCT[base].ack_reg = SRI_DMUB(reg2),\
+ REG_STRUCT[base].ack_mask = \
+ reg2 ## __ ## mask2 ## _MASK,\
+ REG_STRUCT[base].ack_value = \
+ reg2 ## __ ## mask2 ## _MASK \
+
+#define hpd_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1, HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].funcs = &hpd_irq_info_funcs;\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\
+
+#define hpd_rx_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1RX, HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].funcs = &hpd_rx_irq_info_funcs;\
+
+#define pflip_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_PFLIP1, HUBPREQ, reg_num,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_PFLIP1 + reg_num].funcs = &pflip_irq_info_funcs\
+
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_VUPDATE1, OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_VUPDATE1 + reg_num].funcs = &vupdate_no_lock_irq_info_funcs\
+
+#define vblank_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_VBLANK1, OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_VBLANK1 + reg_num].funcs = &vblank_irq_info_funcs\
+
+#define vline0_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_DC1_VLINE0, OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num].funcs = &vline0_irq_info_funcs\
+
+#define dmub_outbox_int_entry()\
+ IRQ_REG_ENTRY_DMUB(DC_IRQ_SOURCE_DMCUB_OUTBOX, \
+ DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\
+ DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\
+ REG_STRUCT[DC_IRQ_SOURCE_DMCUB_OUTBOX].funcs = &outbox_irq_info_funcs
+
+#define dummy_irq_entry(irqno) \
+ REG_STRUCT[irqno].funcs = &dummy_irq_info_funcs\
+
+#define i2c_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_I2C_DDC ## reg_num)
+
+#define dp_sink_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_DPSINK ## reg_num)
+
+#define gpio_pad_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_GPIOPAD ## reg_num)
+
+#define dc_underflow_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW)
+
+static struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+#define dcn351_irq_init_part_1() {\
+ dummy_irq_entry(DC_IRQ_SOURCE_INVALID); \
+ hpd_int_entry(0); \
+ hpd_int_entry(1); \
+ hpd_int_entry(2); \
+ hpd_int_entry(3); \
+ hpd_int_entry(4); \
+ hpd_rx_int_entry(0); \
+ hpd_rx_int_entry(1); \
+ hpd_rx_int_entry(2); \
+ hpd_rx_int_entry(3); \
+ hpd_rx_int_entry(4); \
+ i2c_int_entry(1); \
+ i2c_int_entry(2); \
+ i2c_int_entry(3); \
+ i2c_int_entry(4); \
+ i2c_int_entry(5); \
+ i2c_int_entry(6); \
+ dp_sink_int_entry(1); \
+ dp_sink_int_entry(2); \
+ dp_sink_int_entry(3); \
+ dp_sink_int_entry(4); \
+ dp_sink_int_entry(5); \
+ dp_sink_int_entry(6); \
+ dummy_irq_entry(DC_IRQ_SOURCE_TIMER); \
+ pflip_int_entry(0); \
+ pflip_int_entry(1); \
+ pflip_int_entry(2); \
+ pflip_int_entry(3); \
+ dummy_irq_entry(DC_IRQ_SOURCE_PFLIP5); \
+ dummy_irq_entry(DC_IRQ_SOURCE_PFLIP6); \
+ dummy_irq_entry(DC_IRQ_SOURCE_PFLIP_UNDERLAY0); \
+ gpio_pad_int_entry(0); \
+ gpio_pad_int_entry(1); \
+ gpio_pad_int_entry(2); \
+ gpio_pad_int_entry(3); \
+ gpio_pad_int_entry(4); \
+ gpio_pad_int_entry(5); \
+ gpio_pad_int_entry(6); \
+ gpio_pad_int_entry(7); \
+ gpio_pad_int_entry(8); \
+ gpio_pad_int_entry(9); \
+ gpio_pad_int_entry(10); \
+ gpio_pad_int_entry(11); \
+ gpio_pad_int_entry(12); \
+ gpio_pad_int_entry(13); \
+ gpio_pad_int_entry(14); \
+ gpio_pad_int_entry(15); \
+ gpio_pad_int_entry(16); \
+ gpio_pad_int_entry(17); \
+ gpio_pad_int_entry(18); \
+ gpio_pad_int_entry(19); \
+ gpio_pad_int_entry(20); \
+ gpio_pad_int_entry(21); \
+ gpio_pad_int_entry(22); \
+ gpio_pad_int_entry(23); \
+ gpio_pad_int_entry(24); \
+ gpio_pad_int_entry(25); \
+ gpio_pad_int_entry(26); \
+ gpio_pad_int_entry(27); \
+ gpio_pad_int_entry(28); \
+ gpio_pad_int_entry(29); \
+ gpio_pad_int_entry(30); \
+ dc_underflow_int_entry(1); \
+ dc_underflow_int_entry(2); \
+ dc_underflow_int_entry(3); \
+ dc_underflow_int_entry(4); \
+ dc_underflow_int_entry(5); \
+ dc_underflow_int_entry(6); \
+ dummy_irq_entry(DC_IRQ_SOURCE_DMCU_SCP); \
+ dummy_irq_entry(DC_IRQ_SOURCE_VBIOS_SW); \
+}
+
+#define dcn351_irq_init_part_2() {\
+ vupdate_no_lock_int_entry(0); \
+ vupdate_no_lock_int_entry(1); \
+ vupdate_no_lock_int_entry(2); \
+ vupdate_no_lock_int_entry(3); \
+ vblank_int_entry(0); \
+ vblank_int_entry(1); \
+ vblank_int_entry(2); \
+ vblank_int_entry(3); \
+ vline0_int_entry(0); \
+ vline0_int_entry(1); \
+ vline0_int_entry(2); \
+ vline0_int_entry(3); \
+ dummy_irq_entry(DC_IRQ_SOURCE_DC5_VLINE1); \
+ dummy_irq_entry(DC_IRQ_SOURCE_DC6_VLINE1); \
+ dmub_outbox_int_entry(); \
+}
+
+#define dcn351_irq_init() {\
+ dcn351_irq_init_part_1(); \
+ dcn351_irq_init_part_2(); \
+}
+
+static struct irq_source_info irq_source_info_dcn351[DAL_IRQ_SOURCES_NUMBER] = {0};
+
+static struct irq_service_funcs irq_service_funcs_dcn351 = {
+ .to_dal_irq_source = to_dal_irq_source_dcn351
+};
+
+static void dcn351_irq_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ struct dc_context *ctx = init_data->ctx;
+
+#define REG_STRUCT irq_source_info_dcn351
+ dcn351_irq_init();
+
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dcn351;
+ irq_service->funcs = &irq_service_funcs_dcn351;
+}
+
+struct irq_service *dal_irq_service_dcn351_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ dcn351_irq_construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h
new file mode 100644
index 000000000000..4094631ffec6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2021 Advanced Micro Devices, Inc. */
+
+#ifndef __DAL_IRQ_SERVICE_DCN351_H__
+#define __DAL_IRQ_SERVICE_DCN351_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dcn351_create(
+ struct irq_service_init_data *init_data);
+
+#endif /* __DAL_IRQ_SERVICE_DCN351_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 2d152b68a501..22b24749c9d2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -61,22 +61,6 @@ static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate)
}
}
-static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern)
-{
- return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&
- test_pattern <= DP_TEST_PATTERN_SQUARE_END);
-}
-
-static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
-{
- if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&
- test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||
- test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
- return true;
- else
- return false;
-}
-
static void dp_retrain_link_dp_test(struct dc_link *link,
struct dc_link_settings *link_setting,
bool skip_video_pattern)
@@ -361,7 +345,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
test_pattern_size);
}
- if (is_dp_phy_sqaure_pattern(test_pattern)) {
+ if (IS_DP_PHY_SQUARE_PATTERN(test_pattern)) {
test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)
core_link_read_dpcd(
link,
@@ -623,6 +607,8 @@ bool dp_set_test_pattern(
if (pipe_ctx == NULL)
return false;
+ link->pending_test_pattern = test_pattern;
+
/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
if (link->test_pattern_enabled && test_pattern ==
DP_TEST_PATTERN_VIDEO_MODE) {
@@ -643,12 +629,13 @@ bool dp_set_test_pattern(
/* Reset Test Pattern state */
link->test_pattern_enabled = false;
link->current_test_pattern = test_pattern;
+ link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
return true;
}
/* Check for PHY Test Patterns */
- if (is_dp_phy_pattern(test_pattern)) {
+ if (IS_DP_PHY_PATTERN(test_pattern)) {
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
@@ -681,6 +668,7 @@ bool dp_set_test_pattern(
/* Set Test Pattern state */
link->test_pattern_enabled = true;
link->current_test_pattern = test_pattern;
+ link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
if (p_link_settings != NULL)
dpcd_set_link_settings(link,
p_link_settings);
@@ -756,7 +744,7 @@ bool dp_set_test_pattern(
return false;
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
- if (is_dp_phy_sqaure_pattern(test_pattern))
+ if (IS_DP_PHY_SQUARE_PATTERN(test_pattern))
core_link_write_dpcd(link,
DP_LINK_SQUARE_PATTERN,
p_custom_pattern,
@@ -884,6 +872,7 @@ bool dp_set_test_pattern(
/* Set Test Pattern state */
link->test_pattern_enabled = true;
link->current_test_pattern = test_pattern;
+ link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
index f4633d3cf9b9..a1f72fe378ee 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
@@ -22,6 +22,16 @@
* Authors: AMD
*
*/
+
+/**
+ * DOC: overview
+ *
+ * Display Input Output (DIO), is the display input and output unit in DCN. It
+ * includes output encoders to support different display output, like
+ * DisplayPort, HDMI, DVI interface, and others. It also includes the control
+ * and status channels for these interfaces.
+ */
+
#ifndef __LINK_HWSS_DIO_H__
#define __LINK_HWSS_DIO_H__
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
index b659baa23147..348ea4cb832d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
@@ -80,21 +80,23 @@ static bool set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(struct dc_
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
+ if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
+ return false;
if (tp_params == NULL)
return false;
- if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN &&
- link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) {
+ if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern))
// Deprogram overrides from previous test pattern
dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link);
- }
switch (tp_params->dp_phy_pattern) {
case DP_TEST_PATTERN_80BIT_CUSTOM:
if (tp_params->custom_pattern_size == 0 || memcmp(tp_params->custom_pattern,
pltpat_custom, tp_params->custom_pattern_size) != 0)
return false;
+ hw_tp_params.custom_pattern = tp_params->custom_pattern;
+ hw_tp_params.custom_pattern_size = tp_params->custom_pattern_size;
break;
case DP_TEST_PATTERN_D102:
break;
@@ -185,13 +187,7 @@ static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = {
bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link)
{
- if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
- return false;
-
- if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
- return false;
-
- return true;
+ return (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN);
}
const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
index b621b97711b6..3e6c7be7e278 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
@@ -74,13 +74,16 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,
static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
+ uint8_t clk_src = 0x4C;
+ uint8_t pattern = 0x4F; /* SQ128 */
+
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
- const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, 0x0};
- const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, 0x0};
+ const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, clk_src};
+ const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, clk_src};
const uint8_t vendor_lttpr_write_data_pg3[4] = {0x1, 0x10, 0x58, 0x21};
const uint8_t vendor_lttpr_write_data_pg4[4] = {0x1, 0x10, 0x59, 0x21};
- const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, 0x4F};
- const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, 0x4F};
+ const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, pattern};
+ const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, pattern};
const uint8_t vendor_lttpr_write_data_pg7[4] = {0x1, 0x30, 0x51, 0x20};
const uint8_t vendor_lttpr_write_data_pg8[4] = {0x1, 0x30, 0x52, 0x20};
const uint8_t vendor_lttpr_write_data_pg9[4] = {0x1, 0x30, 0x54, 0x20};
@@ -123,18 +126,20 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link
struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 };
const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
+ if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
+ return false;
+
if (tp_params == NULL)
return false;
- if (tp_params->dp_phy_pattern < DP_TEST_PATTERN_SQUARE_BEGIN ||
- tp_params->dp_phy_pattern > DP_TEST_PATTERN_SQUARE_END) {
+ if (!IS_DP_PHY_SQUARE_PATTERN(tp_params->dp_phy_pattern)) {
// Deprogram overrides from previously set square wave override
if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM ||
link->current_test_pattern == DP_TEST_PATTERN_D102)
link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_exit_manual_automation_0[0],
sizeof(vendor_lttpr_exit_manual_automation_0));
- else
+ else if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern))
dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link);
return false;
@@ -148,8 +153,6 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link
dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(link, tp_params);
- dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &link->cur_lane_setting[0]);
-
return true;
}
@@ -170,16 +173,18 @@ static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link,
const struct dc_link_settings *link_settings,
const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
{
- link_res->hpo_dp_link_enc->funcs->set_ffe(
- link_res->hpo_dp_link_enc,
- link_settings,
- lane_settings[0].FFE_PRESET.raw);
-
- // FFE is programmed when retimer is programmed for SQ128, but explicit
- // programming needed here as well in case FFE-only update is requested
- if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN &&
- link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END)
- dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]);
+ // Don't update our HW FFE when outputting phy test patterns
+ if (IS_DP_PHY_PATTERN(link->pending_test_pattern)) {
+ // Directly program FIXED_VS retimer FFE for SQ128 override
+ if (IS_DP_PHY_SQUARE_PATTERN(link->pending_test_pattern)) {
+ dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]);
+ }
+ } else {
+ link_res->hpo_dp_link_enc->funcs->set_ffe(
+ link_res->hpo_dp_link_enc,
+ link_settings,
+ lane_settings[0].FFE_PRESET.raw);
+ }
}
static void enable_hpo_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link,
@@ -214,13 +219,7 @@ static const struct link_hwss hpo_fixed_vs_pe_retimer_dp_link_hwss = {
bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link)
{
- if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
- return false;
-
- if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
- return false;
-
- return true;
+ return requires_fixed_vs_pe_retimer_dio_link_hwss(link);
}
const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 24153b0df503..b8c4a04dd175 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -41,6 +41,7 @@
#include "protocols/link_dp_dpia.h"
#include "protocols/link_dp_phy.h"
#include "protocols/link_dp_training.h"
+#include "protocols/link_dp_dpia_bw.h"
#include "accessories/link_dp_trace.h"
#include "link_enc_cfg.h"
@@ -991,6 +992,23 @@ static bool detect_link_and_local_sink(struct dc_link *link,
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ /*
+ * If this is DP over USB4 link then we need to:
+ * - Enable BW ALLOC support on DPtx if applicable
+ */
+ if (dc->config.usb4_bw_alloc_support) {
+ if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) {
+ /* update with non reduced link cap if bw allocation mode is supported */
+ if (link->dpia_bw_alloc_config.nrd_max_link_rate &&
+ link->dpia_bw_alloc_config.nrd_max_lane_count) {
+ link->reported_link_cap.link_rate =
+ link->dpia_bw_alloc_config.nrd_max_link_rate;
+ link->reported_link_cap.lane_count =
+ link->dpia_bw_alloc_config.nrd_max_lane_count;
+ }
+ }
+ }
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 3cbfbf8d107e..a72de44a5747 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -2197,6 +2197,64 @@ static enum dc_status enable_link(
static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)
{
+ struct dc_link *link = stream->sink->link;
+ int req_bw = bw;
+
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (!link->dpia_bw_alloc_config.bw_alloc_enabled)
+ return false;
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ int sink_index = 0;
+ int i = 0;
+
+ for (i = 0; i < link->sink_count; i++) {
+ if (link->remote_sinks[i] == NULL)
+ continue;
+
+ if (stream->sink->sink_id != link->remote_sinks[i]->sink_id)
+ req_bw += link->dpia_bw_alloc_config.remote_sink_req_bw[i];
+ else
+ sink_index = i;
+ }
+
+ link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw;
+ }
+
+ /* get dp overhead for dp tunneling */
+ link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link);
+ req_bw += link->dpia_bw_alloc_config.dp_overhead;
+
+ if (link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw)) {
+ if (req_bw <= link->dpia_bw_alloc_config.allocated_bw) {
+ DC_LOG_DEBUG("%s, Success in allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n",
+ __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw,
+ link->dpia_bw_alloc_config.dp_overhead);
+ } else {
+ // Cannot get the required bandwidth.
+ DC_LOG_ERROR("%s, Failed to allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n",
+ __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw,
+ link->dpia_bw_alloc_config.dp_overhead);
+ return false;
+ }
+ } else {
+ DC_LOG_DEBUG("%s, usb4 request bw timeout\n", __func__);
+ return false;
+ }
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ int i = 0;
+
+ for (i = 0; i < link->sink_count; i++) {
+ if (link->remote_sinks[i] == NULL)
+ continue;
+ DC_LOG_DEBUG("%s, remote_sink=%s, request_bw=%d\n", __func__,
+ (const char *)(&link->remote_sinks[i]->edid_caps.display_name[0]),
+ link->dpia_bw_alloc_config.remote_sink_req_bw[i]);
+ }
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 37d3027c32dc..cf22b8f28ba6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -370,30 +370,6 @@ static enum transmitter translate_encoder_to_transmitter(
}
}
-static uint8_t translate_dig_inst_to_pwrseq_inst(struct dc_link *link)
-{
- uint8_t pwrseq_inst = 0xF;
- struct dc_context *dc_ctx = link->dc->ctx;
-
- DC_LOGGER_INIT(dc_ctx->logger);
-
- switch (link->eng_id) {
- case ENGINE_ID_DIGA:
- pwrseq_inst = 0;
- break;
- case ENGINE_ID_DIGB:
- pwrseq_inst = 1;
- break;
- default:
- DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", link->eng_id);
- ASSERT(false);
- break;
- }
-
- return pwrseq_inst;
-}
-
-
static void link_destruct(struct dc_link *link)
{
int i;
@@ -657,7 +633,7 @@ static bool construct_phy(struct dc_link *link,
link->link_id.id == CONNECTOR_ID_LVDS)) {
panel_cntl_init_data.ctx = dc_ctx;
panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count;
- panel_cntl_init_data.pwrseq_inst = translate_dig_inst_to_pwrseq_inst(link);
+ panel_cntl_init_data.eng_id = link->eng_id;
link->panel_cntl =
link->dc->res_pool->funcs->panel_cntl_create(
&panel_cntl_init_data);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 5b0bc7f6a188..1aed55b0ab6a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -125,11 +125,9 @@ static bool dp_active_dongle_validate_timing(
if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
struct dc_crtc_timing outputTiming = *timing;
-#if defined(CONFIG_DRM_AMD_DC_FP)
if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
/* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
outputTiming.flags.DSC = 0;
-#endif
if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) >
dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
index 0050e0a06cbc..2fa4e64e2430 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -37,6 +37,7 @@
#include "clk_mgr.h"
#include "resource.h"
#include "link_enc_cfg.h"
+#include "atomfirmware.h"
#define DC_LOGGER \
link->ctx->logger
@@ -100,8 +101,11 @@ void dp_set_hw_lane_settings(
{
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ // Don't return here if using FIXED_VS link HWSS and encoding is 128b/132b
if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) &&
- !is_immediate_downstream(link, offset))
+ !is_immediate_downstream(link, offset) &&
+ (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) ||
+ link_dp_get_encoding_format(&link_settings->link_settings) == DP_8b_10b_ENCODING))
return;
if (link_hwss->ext.set_dp_lane_settings)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 16a62e018712..e538c67d3ed9 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1508,10 +1508,7 @@ enum link_training_result dp_perform_link_training(
* Non-LT AUX transactions inside training mode.
*/
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
- if (link->dc->config.use_old_fixed_vs_sequence)
- status = dp_perform_fixed_vs_pe_training_sequence_legacy(link, link_res, &lt_settings);
- else
- status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
+ status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
else if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
else if (encoding == DP_128b_132b_ENCODING)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index 7087cdc9e977..b5cf75975fff 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -186,356 +186,6 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
return status;
}
-
-enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
- const uint8_t offset = dp_parse_lttpr_repeater_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
- const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
- uint32_t pre_disable_intercept_delay_ms = 0;
- uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
- uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
- const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
- const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
- const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
- const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
- const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
- const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87};
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- uint8_t lane = 0;
- union down_spread_ctrl downspread = {0};
- union lane_count_set lane_count_set = {0};
- uint8_t toggle_rate;
- uint8_t rate;
-
- /* Only 8b/10b is supported */
- ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING);
-
- if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
- status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings);
- return status;
- }
-
- if (offset != 0xFF) {
- if (offset == 2) {
- pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
-
- /* Certain display and cable configuration require extra delay */
- } else if (offset > 2) {
- pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
- }
- }
-
- /* Vendor specific: Reset lane settings */
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
-
- /* Vendor specific: Enable intercept */
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en));
-
-
- /* 1. set link rate, lane count and spread. */
-
- downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread);
-
- lane_count_set.bits.LANE_COUNT_SET =
- lt_settings->link_settings.lane_count;
-
- lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
-
-
- if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
- link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
- }
-
- core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &downspread.raw, sizeof(downspread));
-
- core_link_write_dpcd(link, DP_LANE_COUNT_SET,
- &lane_count_set.raw, 1);
-
- rate = get_dpcd_link_rate(&lt_settings->link_settings);
-
- /* Vendor specific: Toggle link rate */
- toggle_rate = (rate == 0x6) ? 0xA : 0x6;
-
- if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
- core_link_write_dpcd(
- link,
- DP_LINK_BW_SET,
- &toggle_rate,
- 1);
- }
-
- link->vendor_specific_lttpr_link_rate_wa = rate;
-
- core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
-
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
- __func__,
- DP_LINK_BW_SET,
- lt_settings->link_settings.link_rate,
- DP_LANE_COUNT_SET,
- lt_settings->link_settings.lane_count,
- lt_settings->enhanced_framing,
- DP_DOWNSPREAD_CTRL,
- lt_settings->link_settings.link_spread);
-
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_dpmf[0],
- sizeof(vendor_lttpr_write_data_dpmf));
-
- if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5));
- }
-
- /* 2. Perform link training */
-
- /* Perform Clock Recovery Sequence */
- if (status == LINK_TRAINING_SUCCESS) {
- const uint8_t max_vendor_dpcd_retries = 10;
- uint32_t retries_cr;
- uint32_t retry_count;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
- union lane_align_status_updated dpcd_lane_status_updated;
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- uint8_t i = 0;
-
- retries_cr = 0;
- retry_count = 0;
-
- memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
- memset(&dpcd_lane_status_updated, '\0',
- sizeof(dpcd_lane_status_updated));
-
- while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
- (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
-
-
- /* 1. call HWSS to set lane settings */
- dp_set_hw_lane_settings(
- link,
- link_res,
- lt_settings,
- 0);
-
- /* 2. update DPCD of the receiver */
- if (!retry_count) {
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration.
- */
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- lt_settings->pattern_for_cr,
- 0);
- /* Vendor specific: Disable intercept */
- for (i = 0; i < max_vendor_dpcd_retries; i++) {
- if (pre_disable_intercept_delay_ms != 0)
- msleep(pre_disable_intercept_delay_ms);
- if (link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_intercept_dis[0],
- sizeof(vendor_lttpr_write_data_intercept_dis)))
- break;
-
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_intercept_en[0],
- sizeof(vendor_lttpr_write_data_intercept_en));
- }
- } else {
- vendor_lttpr_write_data_vs[3] = 0;
- vendor_lttpr_write_data_pe[3] = 0;
-
- for (lane = 0; lane < lane_count; lane++) {
- vendor_lttpr_write_data_vs[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
- vendor_lttpr_write_data_pe[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
- }
-
- /* Vendor specific: Update VS and PE to DPRX requested value */
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
-
- dpcd_set_lane_settings(
- link,
- lt_settings,
- 0);
- }
-
- /* 3. wait receiver to lock-on*/
- wait_time_microsec = lt_settings->cr_pattern_time;
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested drive
- * settings as set by the sink
- */
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- 0);
-
- /* 5. check CR done*/
- if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
- status = LINK_TRAINING_SUCCESS;
- break;
- }
-
- /* 6. max VS reached*/
- if (dp_is_max_vs_reached(lt_settings))
- break;
-
- /* 7. same lane settings */
- /* Note: settings are the same for all lanes,
- * so comparing first lane is sufficient
- */
- if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
- dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
- retries_cr++;
- else
- retries_cr = 0;
-
- /* 8. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- retry_count++;
- }
-
- if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
- ASSERT(0);
- DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
- __func__,
- LINK_TRAINING_MAX_CR_RETRY);
-
- }
-
- status = dp_get_cr_failure(lane_count, dpcd_lane_status);
- }
-
- /* Perform Channel EQ Sequence */
- if (status == LINK_TRAINING_SUCCESS) {
- enum dc_dp_training_pattern tr_pattern;
- uint32_t retries_ch_eq;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_align_status_updated dpcd_lane_status_updated = {0};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
- /* Note: also check that TPS4 is a supported feature*/
- tr_pattern = lt_settings->pattern_for_eq;
-
- dp_set_hw_training_pattern(link, link_res, tr_pattern, 0);
-
- status = LINK_TRAINING_EQ_FAIL_EQ;
-
- for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
- retries_ch_eq++) {
-
- dp_set_hw_lane_settings(link, link_res, lt_settings, 0);
-
- vendor_lttpr_write_data_vs[3] = 0;
- vendor_lttpr_write_data_pe[3] = 0;
-
- for (lane = 0; lane < lane_count; lane++) {
- vendor_lttpr_write_data_vs[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
- vendor_lttpr_write_data_pe[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
- }
-
- /* Vendor specific: Update VS and PE to DPRX requested value */
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
- link_configure_fixed_vs_pe_retimer(link->ddc,
- &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
-
- /* 2. update DPCD*/
- if (!retries_ch_eq)
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration
- */
-
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- tr_pattern, 0);
- else
- dpcd_set_lane_settings(link, lt_settings, 0);
-
- /* 3. wait for receiver to lock-on*/
- wait_time_microsec = lt_settings->eq_pattern_time;
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested
- * drive settings as set by the sink
- */
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- 0);
-
- /* 5. check CR done*/
- if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
- status = LINK_TRAINING_EQ_FAIL_CR;
- break;
- }
-
- /* 6. check CHEQ done*/
- if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
- dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
- dp_is_interlane_aligned(dpcd_lane_status_updated)) {
- status = LINK_TRAINING_SUCCESS;
- break;
- }
-
- /* 7. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- }
- }
-
- return status;
-}
-
enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
struct dc_link *link,
const struct link_resource *link_res,
@@ -620,18 +270,20 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
rate = get_dpcd_link_rate(&lt_settings->link_settings);
- /* Vendor specific: Toggle link rate */
- toggle_rate = (rate == 0x6) ? 0xA : 0x6;
+ if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) {
+ /* Vendor specific: Toggle link rate */
+ toggle_rate = (rate == 0x6) ? 0xA : 0x6;
- if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
- core_link_write_dpcd(
- link,
- DP_LINK_BW_SET,
- &toggle_rate,
- 1);
- }
+ if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+ &toggle_rate,
+ 1);
+ }
- link->vendor_specific_lttpr_link_rate_wa = rate;
+ link->vendor_specific_lttpr_link_rate_wa = rate;
+ }
core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
index c0d6ea329504..e61970e27661 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
@@ -28,11 +28,6 @@
#define __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__
#include "link_dp_training.h"
-enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings);
-
enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
struct dc_link *link,
const struct link_resource *link_res,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
index fc50931c2aec..c5de6ed5bf58 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
@@ -164,7 +164,7 @@ static void dpcd_extend_address_range(
if (new_addr_range.start != in_address || new_addr_range.end != end_address) {
*out_address = new_addr_range.start;
*out_size = ADDRESS_RANGE_SIZE(new_addr_range.start, new_addr_range.end);
- *out_data = kzalloc(*out_size * sizeof(**out_data), GFP_KERNEL);
+ *out_data = kcalloc(*out_size, sizeof(**out_data), GFP_KERNEL);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 046d3e205415..acfbbc638cc6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -287,7 +287,7 @@ bool set_default_brightness_aux(struct dc_link *link)
if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
if (!read_default_bl_aux(link, &default_backlight))
default_backlight = 150000;
- // if < 1 nits or > 5000, it might be wrong readback
+ // if > 5000, it might be wrong readback. 0 nits is a valid default value for OLED panel.
if (default_backlight < 1000 || default_backlight > 5000000)
default_backlight = 150000;
@@ -892,7 +892,8 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
/* Set power optimization flag */
if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) {
- if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) {
+ if (replay != NULL && link->replay_settings.replay_feature_enabled &&
+ replay->funcs->replay_set_power_opt) {
replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst);
link->replay_settings.replay_power_opt_active = *power_opts;
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
index 0a75ed8962a5..184b1f23aa77 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -194,6 +194,14 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN35)
###############################################################################
+RESOURCE_DCN351 = dcn351_resource.o
+
+AMD_DAL_RESOURCE_DCN351 = $(addprefix $(AMDDALPATH)/dc/resource/dcn351/,$(RESOURCE_DCN351))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN351)
+
+###############################################################################
+
###############################################################################
endif
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index f9c5bc624be3..a2387cea1af9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -24,8 +24,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dm_services.h"
#include "dc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 37a64186f324..ecc477ef8e3b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -2169,6 +2169,17 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
+ } else {
+ /* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]):
+ * If it just so happens that the memory bandwidth is low enough such that
+ * all the optimal DCFCLK for each UCLK is lower than the smallest DCFCLK STA
+ * target, we need to populate the optimal UCLK for each DCFCLK STA target to
+ * be the max UCLK.
+ */
+ if (j == num_uclk_states - 1) {
+ optimal_uclk_for_dcfclk_sta_targets[i] =
+ bw_params->clk_table.entries[j].memclk_mhz * 16;
+ }
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 31035fc3d868..04d142f97474 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1941,8 +1941,6 @@ static bool dcn31_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
- dc->config.use_old_fixed_vs_sequence = true;
-
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 6f10052caeef..3f3951f3ba98 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -2118,6 +2118,7 @@ static bool dcn32_resource_construct(
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.dc_mode_clk_limit_support = true;
+ dc->config.enable_windowed_mpo_odm = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 74412e5f03fe..b356fed1726d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1760,6 +1760,7 @@ static bool dcn321_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->config.dc_mode_clk_limit_support = true;
+ dc->config.enable_windowed_mpo_odm = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 5fdcda8f8602..5d52853cac96 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -701,7 +701,7 @@ static const struct dc_plane_cap plane_cap = {
// 6:1 downscaling ratio: 1000/6 = 166.666
.max_downscale_factor = {
- .argb8888 = 167,
+ .argb8888 = 250,
.nv12 = 167,
.fp16 = 167
},
@@ -764,6 +764,7 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+ .minimum_z8_residency_time = 2100,
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
@@ -782,6 +783,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.psp_disabled_wa = true,
.ips2_eval_delay_us = 2000,
.ips2_entry_delay_us = 800,
+ .disable_dmub_reallow_idle = true,
.static_screen_wait_frames = 2,
};
@@ -1905,7 +1907,8 @@ static bool dcn35_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
-
+ /*HW default is to have all the FGCG enabled, SW no need to program them*/
+ dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF;
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
new file mode 100644
index 000000000000..5b486400dfdb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -0,0 +1,2156 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+
+#include "dm_services.h"
+#include "dc.h"
+
+#include "dcn31/dcn31_init.h"
+#include "dcn351/dcn351_init.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dcn351_resource.h"
+
+#include "dcn20/dcn20_resource.h"
+#include "dcn30/dcn30_resource.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn32/dcn32_resource.h"
+#include "dcn35/dcn35_resource.h"
+
+#include "dcn10/dcn10_ipp.h"
+#include "dcn30/dcn30_hubbub.h"
+#include "dcn31/dcn31_hubbub.h"
+#include "dcn35/dcn35_hubbub.h"
+#include "dcn32/dcn32_mpc.h"
+#include "dcn35/dcn35_hubp.h"
+#include "irq/dcn351/irq_service_dcn351.h"
+#include "dcn35/dcn35_dpp.h"
+#include "dcn35/dcn35_optc.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn30/dcn30_hwseq.h"
+#include "dce110/dce110_hwseq.h"
+#include "dcn35/dcn35_opp.h"
+#include "dcn35/dcn35_dsc.h"
+#include "dcn30/dcn30_vpg.h"
+#include "dcn30/dcn30_afmt.h"
+
+#include "dcn31/dcn31_dio_link_encoder.h"
+#include "dcn35/dcn35_dio_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_link_encoder.h"
+#include "dcn32/dcn32_hpo_dp_link_encoder.h"
+#include "link.h"
+#include "dcn31/dcn31_apg.h"
+#include "dcn32/dcn32_dio_link_encoder.h"
+#include "dcn31/dcn31_vpg.h"
+#include "dcn31/dcn31_afmt.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "clk_mgr.h"
+#include "virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dml/display_mode_vba.h"
+#include "dcn35/dcn35_dccg.h"
+#include "dcn35/dcn35_pg_cntl.h"
+#include "dcn10/dcn10_resource.h"
+#include "dcn31/dcn31_panel_cntl.h"
+#include "dcn35/dcn35_hwseq.h"
+#include "dcn35/dcn35_dio_link_encoder.h"
+#include "dml/dcn31/dcn31_fpu.h" /*todo*/
+#include "dml/dcn35/dcn35_fpu.h"
+#include "dml/dcn351/dcn351_fpu.h"
+#include "dcn35/dcn35_dwb.h"
+#include "dcn35/dcn35_mmhubbub.h"
+
+#include "dcn/dcn_3_5_1_offset.h"
+#include "dcn/dcn_3_5_1_sh_mask.h"
+#include "nbio/nbio_7_11_0_offset.h"
+#include "mmhub/mmhub_3_3_0_offset.h"
+#include "mmhub/mmhub_3_3_0_sh_mask.h"
+
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+
+#include "reg_helper.h"
+#include "dce/dmub_abm.h"
+#include "dce/dmub_psr.h"
+#include "dce/dmub_replay.h"
+#include "dce/dce_aux.h"
+#include "dce/dce_i2c.h"
+#include "dml/dcn31/display_mode_vba_31.h" /*temp*/
+#include "vm_helper.h"
+#include "dcn20/dcn20_vmid.h"
+
+#include "dml2/dml2_wrapper.h"
+
+#include "link_enc_cfg.h"
+#define DC_LOGGER_INIT(logger)
+
+enum dcn351_clk_src_array_id {
+ DCN351_CLK_SRC_PLL0,
+ DCN351_CLK_SRC_PLL1,
+ DCN351_CLK_SRC_PLL2,
+ DCN351_CLK_SRC_PLL3,
+ DCN351_CLK_SRC_PLL4,
+ DCN351_CLK_SRC_TOTAL
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file
+ */
+
+/* DCN */
+/* TODO awful hack. fixup dcn20_dwb.h */
+#undef BASE_INNER
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SR_ARR(reg_name, id) \
+ REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SR_ARR_INIT(reg_name, id, value) \
+ REG_STRUCT[id].reg_name = value
+
+#define SRI(reg_name, block, id)\
+ REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SR_ARR_I2C(reg_name, id) \
+ REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SRI_ARR_I2C(reg_name, block, id)\
+ REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR_ALPHABET(reg_name, block, index, id)\
+ REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI2(reg_name, block, id)\
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SRI2_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SRIR(var_name, reg_name, block, id)\
+ .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII(reg_name, block, id)\
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_ARR_2(reg_name, block, id, inst)\
+ REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_MPC_RMU(reg_name, block, id)\
+ .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_DWB(reg_name, temp_name, block, id)\
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## temp_name
+
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DCCG_SRII(reg_name, block, id)\
+ REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define VUPDATE_SRII(reg_name, block, id)\
+ REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ reg ## reg_name ## _ ## block ## id
+
+/* NBIO */
+#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg]
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+#define NBIO_SR(reg_name)\
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX2_ ## reg_name
+
+#define NBIO_SR_ARR(reg_name, id)\
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX2_ ## reg_name
+
+#define bios_regs_init() \
+ ( \
+ NBIO_SR(BIOS_SCRATCH_3),\
+ NBIO_SR(BIOS_SCRATCH_6)\
+ )
+
+static struct bios_registers bios_regs;
+
+#define clk_src_regs_init(index, pllid)\
+ CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid)
+
+static struct dce110_clk_src_regs clk_src_regs[5];
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCN3_1_4(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCN3_1_4(_MASK)
+};
+
+#define abm_regs_init(id)\
+ ABM_DCN32_REG_LIST_RI(id)
+
+static struct dce_abm_registers abm_regs[4];
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define audio_regs_init(id)\
+ AUD_COMMON_REG_LIST_RI(id)
+
+static struct dce_audio_registers audio_regs[7];
+
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_audio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define vpg_regs_init(id)\
+ VPG_DCN31_REG_LIST_RI(id)
+
+static struct dcn31_vpg_registers vpg_regs[10];
+
+static const struct dcn31_vpg_shift vpg_shift = {
+ DCN31_VPG_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_vpg_mask vpg_mask = {
+ DCN31_VPG_MASK_SH_LIST(_MASK)
+};
+
+#define afmt_regs_init(id)\
+ AFMT_DCN31_REG_LIST_RI(id)
+
+static struct dcn31_afmt_registers afmt_regs[6];
+
+static const struct dcn31_afmt_shift afmt_shift = {
+ DCN31_AFMT_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_afmt_mask afmt_mask = {
+ DCN31_AFMT_MASK_SH_LIST(_MASK)
+};
+
+#define apg_regs_init(id)\
+ APG_DCN31_REG_LIST_RI(id)
+
+static struct dcn31_apg_registers apg_regs[4];
+
+static const struct dcn31_apg_shift apg_shift = {
+ DCN31_APG_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_apg_mask apg_mask = {
+ DCN31_APG_MASK_SH_LIST(_MASK)
+};
+
+#define stream_enc_regs_init(id)\
+ SE_DCN35_REG_LIST_RI(id)
+
+static struct dcn10_stream_enc_registers stream_enc_regs[5];
+
+static const struct dcn10_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn10_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define aux_regs_init(id)\
+ DCN2_AUX_REG_LIST_RI(id)
+
+static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5];
+
+#define hpd_regs_init(id)\
+ HPD_REG_LIST_RI(id)
+
+static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5];
+
+
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+#define link_regs_init(id, phyid)\
+ ( \
+ LE_DCN35_REG_LIST_RI(id), \
+ UNIPHY_DCN2_REG_LIST_RI(id, phyid)\
+ )
+
+static struct dcn10_link_enc_registers link_enc_regs[5];
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN35(__SHIFT), \
+ //DPCS_DCN31_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN35(_MASK), \
+ //DPCS_DCN31_MASK_SH_LIST(_MASK)
+};
+
+#define hpo_dp_stream_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id)
+
+static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4];
+
+static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK)
+};
+
+#define hpo_dp_link_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id)
+ /*DCN3_1_RDPCSTX_REG_LIST(0),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(1),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(2),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(3),*/
+
+static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2];
+
+static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
+ DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
+ DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define dpp_regs_init(id)\
+ DPP_REG_LIST_DCN35_RI(id)
+
+static struct dcn3_dpp_registers dpp_regs[4];
+
+static const struct dcn35_dpp_shift tf_shift = {
+ DPP_REG_LIST_SH_MASK_DCN35(__SHIFT)
+};
+
+static const struct dcn35_dpp_mask tf_mask = {
+ DPP_REG_LIST_SH_MASK_DCN35(_MASK)
+};
+
+#define opp_regs_init(id)\
+ OPP_REG_LIST_DCN35_RI(id)
+
+static struct dcn35_opp_registers opp_regs[4];
+
+static const struct dcn35_opp_shift opp_shift = {
+ OPP_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn35_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define aux_engine_regs_init(id)\
+ ( \
+ AUX_COMMON_REG_LIST0_RI(id), \
+ SR_ARR_INIT(AUXN_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUXP_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK) \
+ )
+
+static struct dce110_aux_registers aux_engine_regs[5];
+
+#define dwbc_regs_dcn3_init(id)\
+ DWBC_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dcn30_dwbc_registers dwbc35_regs[1];
+
+static const struct dcn35_dwbc_shift dwbc35_shift = {
+ DWBC_COMMON_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn35_dwbc_mask dwbc35_mask = {
+ DWBC_COMMON_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define mcif_wb_regs_dcn3_init(id)\
+ MCIF_WB_COMMON_REG_LIST_DCN3_5_RI(id)
+
+static struct dcn35_mmhubbub_registers mcif_wb35_regs[1];
+
+static const struct dcn35_mmhubbub_shift mcif_wb35_shift = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT)
+};
+
+static const struct dcn35_mmhubbub_mask mcif_wb35_mask = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(_MASK)
+};
+
+#define dsc_regsDCN35_init(id)\
+ DSC_REG_LIST_DCN20_RI(id)
+
+static struct dcn20_dsc_registers dsc_regs[4];
+
+static const struct dcn35_dsc_shift dsc_shift = {
+ DSC_REG_LIST_SH_MASK_DCN35(__SHIFT)
+};
+
+static const struct dcn35_dsc_mask dsc_mask = {
+ DSC_REG_LIST_SH_MASK_DCN35(_MASK)
+};
+
+static struct dcn30_mpc_registers mpc_regs;
+
+#define dcn_mpc_regs_init() \
+ MPC_REG_LIST_DCN3_2_RI(0),\
+ MPC_REG_LIST_DCN3_2_RI(1),\
+ MPC_REG_LIST_DCN3_2_RI(2),\
+ MPC_REG_LIST_DCN3_2_RI(3),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
+ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)
+
+static const struct dcn30_mpc_shift mpc_shift = {
+ MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
+};
+
+static const struct dcn30_mpc_mask mpc_mask = {
+ MPC_COMMON_MASK_SH_LIST_DCN32(_MASK)
+};
+
+#define optc_regs_init(id)\
+ OPTC_COMMON_REG_LIST_DCN3_5_RI(id)
+
+static struct dcn_optc_registers optc_regs[4];
+
+static const struct dcn_optc_shift optc_shift = {
+ OPTC_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT)
+};
+
+static const struct dcn_optc_mask optc_mask = {
+ OPTC_COMMON_MASK_SH_LIST_DCN3_5(_MASK)
+};
+
+#define hubp_regs_init(id)\
+ HUBP_REG_LIST_DCN30_RI(id)
+
+static struct dcn_hubp2_registers hubp_regs[4];
+
+
+static const struct dcn35_hubp2_shift hubp_shift = {
+ HUBP_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn35_hubp2_mask hubp_mask = {
+ HUBP_MASK_SH_LIST_DCN35(_MASK)
+};
+
+static struct dcn_hubbub_registers hubbub_reg;
+
+#define hubbub_reg_init()\
+ HUBBUB_REG_LIST_DCN35(0)
+
+static const struct dcn_hubbub_shift hubbub_shift = {
+ HUBBUB_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn_hubbub_mask hubbub_mask = {
+ HUBBUB_MASK_SH_LIST_DCN35(_MASK)
+};
+
+static struct dccg_registers dccg_regs;
+
+#define dccg_regs_init()\
+ DCCG_REG_LIST_DCN35()
+
+static const struct dccg_shift dccg_shift = {
+ DCCG_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dccg_mask dccg_mask = {
+ DCCG_MASK_SH_LIST_DCN35(_MASK)
+};
+
+static struct pg_cntl_registers pg_cntl_regs;
+
+#define pg_cntl_dcn35_regs_init() \
+ PG_CNTL_REG_LIST_DCN35()
+
+static const struct pg_cntl_shift pg_cntl_shift = {
+ PG_CNTL_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct pg_cntl_mask pg_cntl_mask = {
+ PG_CNTL_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define SRII2(reg_name_pre, reg_name_post, id)\
+ .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \
+ ## id ## _ ## reg_name_post ## _BASE_IDX) + \
+ reg ## reg_name_pre ## id ## _ ## reg_name_post
+
+static struct dce_hwseq_registers hwseq_reg;
+
+#define hwseq_reg_init()\
+ HWSEQ_DCN35_REG_LIST()
+
+#define HWSEQ_DCN35_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
+ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN22_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN23_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN24_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN25_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
+ HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \
+ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
+ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
+ HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \
+ HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DISPCLK_R_DMU_GATE_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DISPCLK_G_RBBMIF_GATE_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, RBBMIF_FGCG_REP_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DPREFCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DISPCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DPPCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DTBCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DCFCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DPIACLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_FGCG_REP_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_DISPCLK_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_SOCCLK_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK3_GATE_DISABLE, mask_sh)
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCN35_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCN35_MASK_SH_LIST(_MASK)
+};
+
+#define vmid_regs_init(id)\
+ DCN20_VMID_REG_LIST_RI(id)
+
+static struct dcn_vmid_registers vmid_regs[16];
+
+static const struct dcn20_vmid_shift vmid_shifts = {
+ DCN20_VMID_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn20_vmid_mask vmid_masks = {
+ DCN20_VMID_MASK_SH_LIST(_MASK)
+};
+
+static const struct resource_caps res_cap_dcn351 = {
+ .num_timing_generator = 4,
+ .num_opp = 4,
+ .num_video_plane = 4,
+ .num_audio = 5,
+ .num_stream_encoder = 5,
+ .num_dig_link_enc = 5,
+ .num_hpo_dp_stream_encoder = 4,
+ .num_hpo_dp_link_encoder = 2,
+ .num_pll = 4,/*1 c10 edp, 3xc20 combo PHY*/
+ .num_dwb = 1,
+ .num_ddc = 5,
+ .num_vmid = 16,
+ .num_mpc_3dlut = 2,
+ .num_dsc = 4,
+};
+
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .per_pixel_alpha = true,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = true,
+ .fp16 = true,
+ .p010 = true,
+ .ayuv = false,
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 16000,
+ .fp16 = 16000
+ },
+
+ // 6:1 downscaling ratio: 1000/6 = 166.666
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 167,
+ .fp16 = 167
+ },
+ 64,
+ 64
+};
+
+static const struct dc_debug_options debug_defaults_drv = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = false,
+ .clock_trace = true,
+ .disable_pplib_clock_request = false,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
+ .force_single_disp_pipe_split = false,
+ .disable_dcc = DCC_ENABLE,
+ .disable_dpp_power_gate = true,
+ .disable_hubp_power_gate = true,
+ .disable_clock_gate = false,
+ .disable_dsc_power_gate = true,
+ .vsr_support = true,
+ .performance_trace = false,
+ .max_downscale_src_width = 4096,/*upto true 4k*/
+ .disable_pplib_wm_range = false,
+ .scl_reset_length10 = true,
+ .sanity_checks = false,
+ .underflow_assert_delay_us = 0xFFFFFFFF,
+ .dwb_fi_phase = -1, // -1 = disable,
+ .dmub_command_table = true,
+ .pstate_enabled = true,
+ .use_max_lb = true,
+ .enable_mem_low_power = {
+ .bits = {
+ .vga = false,
+ .i2c = true,
+ .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
+ .dscl = true,
+ .cm = true,
+ .mpc = true,
+ .optc = true,
+ .vpg = true,
+ .afmt = true,
+ }
+ },
+ .root_clock_optimization = {
+ .bits = {
+ .dpp = true,
+ .dsc = true,/*dscclk and dsc pg*/
+ .hdmistream = true,
+ .hdmichar = true,
+ .dpstream = true,
+ .symclk32_se = true,
+ .symclk32_le = true,
+ .symclk_fe = true,
+ .physymclk = true,
+ .dpiasymclk = true,
+ }
+ },
+ .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
+ .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+ .using_dml2 = true,
+ .support_eDP1_5 = true,
+ .enable_hpo_pg_support = false,
+ .enable_legacy_fast_update = true,
+ .enable_single_display_2to1_odm_policy = true,
+ .disable_idle_power_optimizations = true,
+ .dmcub_emulation = false,
+ .disable_boot_optimizations = false,
+ .disable_unbounded_requesting = false,
+ .disable_mem_low_power = false,
+ //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions
+ .enable_double_buffered_dsc_pg_support = true,
+ .enable_dp_dig_pixel_rate_div_policy = 1,
+ .disable_z10 = true,
+ .ignore_pg = true,
+ .psp_disabled_wa = true,
+ .ips2_eval_delay_us = 200,
+ .ips2_entry_delay_us = 400
+};
+
+static const struct dc_panel_config panel_config_defaults = {
+ .psr = {
+ .disable_psr = false,
+ .disallow_psrsu = false,
+ .disallow_replay = false,
+ },
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
+static void dcn35_dpp_destroy(struct dpp **dpp)
+{
+ kfree(TO_DCN20_DPP(*dpp));
+ *dpp = NULL;
+}
+
+static struct dpp *dcn35_dpp_create(struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL);
+ bool success = (dpp != NULL);
+
+ if (!success)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT dpp_regs
+ dpp_regs_init(0),
+ dpp_regs_init(1),
+ dpp_regs_init(2),
+ dpp_regs_init(3);
+
+ success = dpp35_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift,
+ &tf_mask);
+ if (success) {
+ dpp35_set_fgcg(
+ dpp,
+ ctx->dc->debug.enable_fine_grain_clock_gating.bits.dpp);
+ return &dpp->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ kfree(dpp);
+ return NULL;
+}
+
+static struct output_pixel_processor *dcn35_opp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_opp *opp =
+ kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
+
+ if (!opp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT opp_regs
+ opp_regs_init(0),
+ opp_regs_init(1),
+ opp_regs_init(2),
+ opp_regs_init(3);
+
+ dcn35_opp_construct(opp, ctx, inst,
+ &opp_regs[inst], &opp_shift, &opp_mask);
+
+ dcn35_opp_set_fgcg(opp, ctx->dc->debug.enable_fine_grain_clock_gating.bits.opp);
+
+ return &opp->base;
+}
+
+static struct dce_aux *dcn31_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT aux_engine_regs
+ aux_engine_regs_init(0),
+ aux_engine_regs_init(1),
+ aux_engine_regs_init(2),
+ aux_engine_regs_init(3),
+ aux_engine_regs_init(4);
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
+
+ return &aux_engine->base;
+}
+
+#define i2c_inst_regs_init(id)\
+ I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dce_i2c_registers i2c_hw_regs[5];
+
+static const struct dce_i2c_shift i2c_shifts = {
+ I2C_COMMON_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dce_i2c_mask i2c_masks = {
+ I2C_COMMON_MASK_SH_LIST_DCN35(_MASK)
+};
+
+/* ========================================================== */
+
+/*
+ * DPIA index | Preferred Encoder | Host Router
+ * 0 | C | 0
+ * 1 | First Available | 0
+ * 2 | D | 1
+ * 3 | First Available | 1
+ */
+/* ========================================================== */
+static const enum engine_id dpia_to_preferred_enc_id_table[] = {
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGD,
+ ENGINE_ID_DIGD
+};
+
+static enum engine_id dcn351_get_preferred_eng_id_dpia(unsigned int dpia_index)
+{
+ return dpia_to_preferred_enc_id_table[dpia_index];
+}
+
+static struct dce_i2c_hw *dcn31_i2c_hw_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_i2c_hw *dce_i2c_hw =
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
+
+ if (!dce_i2c_hw)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT i2c_hw_regs
+ i2c_inst_regs_init(1),
+ i2c_inst_regs_init(2),
+ i2c_inst_regs_init(3),
+ i2c_inst_regs_init(4),
+ i2c_inst_regs_init(5);
+
+ dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
+ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
+ return dce_i2c_hw;
+}
+static struct mpc *dcn35_mpc_create(
+ struct dc_context *ctx,
+ int num_mpcc,
+ int num_rmu)
+{
+ struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL);
+
+ if (!mpc30)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT mpc_regs
+ dcn_mpc_regs_init();
+
+ dcn32_mpc_construct(mpc30, ctx,
+ &mpc_regs,
+ &mpc_shift,
+ &mpc_mask,
+ num_mpcc,
+ num_rmu);
+
+ return &mpc30->base;
+}
+
+static struct hubbub *dcn35_hubbub_create(struct dc_context *ctx)
+{
+ int i;
+
+ struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub),
+ GFP_KERNEL);
+
+ if (!hubbub3)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT hubbub_reg
+ hubbub_reg_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT vmid_regs
+ vmid_regs_init(0),
+ vmid_regs_init(1),
+ vmid_regs_init(2),
+ vmid_regs_init(3),
+ vmid_regs_init(4),
+ vmid_regs_init(5),
+ vmid_regs_init(6),
+ vmid_regs_init(7),
+ vmid_regs_init(8),
+ vmid_regs_init(9),
+ vmid_regs_init(10),
+ vmid_regs_init(11),
+ vmid_regs_init(12),
+ vmid_regs_init(13),
+ vmid_regs_init(14),
+ vmid_regs_init(15);
+
+ hubbub35_construct(hubbub3, ctx,
+ &hubbub_reg,
+ &hubbub_shift,
+ &hubbub_mask,
+ 384,/*ctx->dc->dml.ip.det_buffer_size_kbytes,*/
+ 8, /*ctx->dc->dml.ip.pixel_chunk_size_kbytes,*/
+ 1792 /*ctx->dc->dml.ip.config_return_buffer_size_in_kbytes*/);
+
+
+ for (i = 0; i < res_cap_dcn351.num_vmid; i++) {
+ struct dcn20_vmid *vmid = &hubbub3->vmid[i];
+
+ vmid->ctx = ctx;
+
+ vmid->regs = &vmid_regs[i];
+ vmid->shifts = &vmid_shifts;
+ vmid->masks = &vmid_masks;
+ }
+
+ return &hubbub3->base;
+}
+
+static struct timing_generator *dcn35_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance)
+{
+ struct optc *tgn10 =
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
+
+ if (!tgn10)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT optc_regs
+ optc_regs_init(0),
+ optc_regs_init(1),
+ optc_regs_init(2),
+ optc_regs_init(3);
+
+ tgn10->base.inst = instance;
+ tgn10->base.ctx = ctx;
+
+ tgn10->tg_regs = &optc_regs[instance];
+ tgn10->tg_shift = &optc_shift;
+ tgn10->tg_mask = &optc_mask;
+
+ dcn35_timing_generator_init(tgn10);
+
+ return &tgn10->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .fec_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+static struct link_encoder *dcn35_link_encoder_create(
+ struct dc_context *ctx,
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn20_link_encoder *enc20 =
+ kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+
+ if (!enc20)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_aux_regs
+ aux_regs_init(0),
+ aux_regs_init(1),
+ aux_regs_init(2),
+ aux_regs_init(3),
+ aux_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_hpd_regs
+ hpd_regs_init(0),
+ hpd_regs_init(1),
+ hpd_regs_init(2),
+ hpd_regs_init(3),
+ hpd_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_regs
+ link_regs_init(0, A),
+ link_regs_init(1, B),
+ link_regs_init(2, C),
+ link_regs_init(3, D),
+ link_regs_init(4, E);
+
+ dcn35_link_encoder_construct(enc20,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc20->enc10.base;
+}
+
+/* Create a minimal link encoder object not associated with a particular
+ * physical connector.
+ * resource_funcs.link_enc_create_minimal
+ */
+static struct link_encoder *dcn31_link_enc_create_minimal(
+ struct dc_context *ctx, enum engine_id eng_id)
+{
+ struct dcn20_link_encoder *enc20;
+
+ if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc)
+ return NULL;
+
+ enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+ if (!enc20)
+ return NULL;
+
+ dcn31_link_encoder_construct_minimal(
+ enc20,
+ ctx,
+ &link_enc_feature,
+ &link_enc_regs[eng_id - ENGINE_ID_DIGA],
+ eng_id);
+
+ return &enc20->enc10.base;
+}
+
+static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dcn31_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dcn31_panel_cntl_construct(panel_cntl, init_data);
+
+ return &panel_cntl->base;
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
+ FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
+
+}
+
+static struct audio *dcn31_create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+
+#undef REG_STRUCT
+#define REG_STRUCT audio_regs
+ audio_regs_init(0),
+ audio_regs_init(1),
+ audio_regs_init(2),
+ audio_regs_init(3),
+ audio_regs_init(4);
+ audio_regs_init(5);
+ audio_regs_init(6);
+
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct vpg *dcn31_vpg_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL);
+
+ if (!vpg31)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT vpg_regs
+ vpg_regs_init(0),
+ vpg_regs_init(1),
+ vpg_regs_init(2),
+ vpg_regs_init(3),
+ vpg_regs_init(4),
+ vpg_regs_init(5),
+ vpg_regs_init(6),
+ vpg_regs_init(7),
+ vpg_regs_init(8),
+ vpg_regs_init(9);
+
+ vpg31_construct(vpg31, ctx, inst,
+ &vpg_regs[inst],
+ &vpg_shift,
+ &vpg_mask);
+
+ return &vpg31->base;
+}
+
+static struct afmt *dcn31_afmt_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL);
+
+ if (!afmt31)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT afmt_regs
+ afmt_regs_init(0),
+ afmt_regs_init(1),
+ afmt_regs_init(2),
+ afmt_regs_init(3),
+ afmt_regs_init(4),
+ afmt_regs_init(5);
+
+ afmt31_construct(afmt31, ctx, inst,
+ &afmt_regs[inst],
+ &afmt_shift,
+ &afmt_mask);
+
+ // Light sleep by default, no need to power down here
+
+ return &afmt31->base;
+}
+
+static struct apg *dcn31_apg_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL);
+
+ if (!apg31)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT apg_regs
+ apg_regs_init(0),
+ apg_regs_init(1),
+ apg_regs_init(2),
+ apg_regs_init(3);
+
+ apg31_construct(apg31, ctx, inst,
+ &apg_regs[inst],
+ &apg_shift,
+ &apg_mask);
+
+ return &apg31->base;
+}
+
+static struct stream_encoder *dcn35_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn10_stream_encoder *enc1;
+ struct vpg *vpg;
+ struct afmt *afmt;
+ int vpg_inst;
+ int afmt_inst;
+
+ /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
+ if (eng_id <= ENGINE_ID_DIGF) {
+ vpg_inst = eng_id;
+ afmt_inst = eng_id;
+ } else
+ return NULL;
+
+ enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
+ vpg = dcn31_vpg_create(ctx, vpg_inst);
+ afmt = dcn31_afmt_create(ctx, afmt_inst);
+
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT stream_enc_regs
+ stream_enc_regs_init(0),
+ stream_enc_regs_init(1),
+ stream_enc_regs_init(2),
+ stream_enc_regs_init(3),
+ stream_enc_regs_init(4);
+
+ dcn35_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
+ eng_id, vpg, afmt,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+
+ return &enc1->base;
+}
+
+static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31;
+ struct vpg *vpg;
+ struct apg *apg;
+ uint32_t hpo_dp_inst;
+ uint32_t vpg_inst;
+ uint32_t apg_inst;
+
+ ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3));
+ hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0;
+
+ /* Mapping of VPG register blocks to HPO DP block instance:
+ * VPG[6] -> HPO_DP[0]
+ * VPG[7] -> HPO_DP[1]
+ * VPG[8] -> HPO_DP[2]
+ * VPG[9] -> HPO_DP[3]
+ */
+ vpg_inst = hpo_dp_inst + 6;
+
+ /* Mapping of APG register blocks to HPO DP block instance:
+ * APG[0] -> HPO_DP[0]
+ * APG[1] -> HPO_DP[1]
+ * APG[2] -> HPO_DP[2]
+ * APG[3] -> HPO_DP[3]
+ */
+ apg_inst = hpo_dp_inst;
+
+ /* allocate HPO stream encoder and create VPG sub-block */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL);
+ vpg = dcn31_vpg_create(ctx, vpg_inst);
+ apg = dcn31_apg_create(ctx, apg_inst);
+
+ if (!hpo_dp_enc31 || !vpg || !apg) {
+ kfree(hpo_dp_enc31);
+ kfree(vpg);
+ kfree(apg);
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_stream_enc_regs
+ hpo_dp_stream_encoder_reg_init(0),
+ hpo_dp_stream_encoder_reg_init(1),
+ hpo_dp_stream_encoder_reg_init(2),
+ hpo_dp_stream_encoder_reg_init(3);
+
+ dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
+ hpo_dp_inst, eng_id, vpg, apg,
+ &hpo_dp_stream_enc_regs[hpo_dp_inst],
+ &hpo_dp_se_shift, &hpo_dp_se_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
+static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ uint8_t inst,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31;
+
+ /* allocate HPO link encoder */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_link_enc_regs
+ hpo_dp_link_encoder_reg_init(0),
+ hpo_dp_link_encoder_reg_init(1);
+
+ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ &hpo_dp_link_enc_regs[inst],
+ &hpo_dp_le_shift, &hpo_dp_le_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
+static struct dce_hwseq *dcn351_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+#undef REG_STRUCT
+#define REG_STRUCT hwseq_reg
+ hwseq_reg_init();
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = dcn31_create_audio,
+ .create_stream_encoder = dcn35_stream_encoder_create,
+ .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
+ .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
+ .create_hwseq = dcn351_hwseq_create,
+};
+
+static void dcn351_resource_destruct(struct dcn351_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL) {
+ if (pool->base.stream_enc[i]->vpg != NULL) {
+ kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg));
+ pool->base.stream_enc[i]->vpg = NULL;
+ }
+ if (pool->base.stream_enc[i]->afmt != NULL) {
+ kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt));
+ pool->base.stream_enc[i]->afmt = NULL;
+ }
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ pool->base.stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) {
+ if (pool->base.hpo_dp_stream_enc[i] != NULL) {
+ if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) {
+ kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg));
+ pool->base.hpo_dp_stream_enc[i]->vpg = NULL;
+ }
+ if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) {
+ kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg));
+ pool->base.hpo_dp_stream_enc[i]->apg = NULL;
+ }
+ kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i]));
+ pool->base.hpo_dp_stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) {
+ if (pool->base.hpo_dp_link_enc[i] != NULL) {
+ kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i]));
+ pool->base.hpo_dp_link_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ if (pool->base.dscs[i] != NULL)
+ dcn20_dsc_destroy(&pool->base.dscs[i]);
+ }
+
+ if (pool->base.mpc != NULL) {
+ kfree(TO_DCN20_MPC(pool->base.mpc));
+ pool->base.mpc = NULL;
+ }
+ if (pool->base.hubbub != NULL) {
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
+ }
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.dpps[i] != NULL)
+ dcn35_dpp_destroy(&pool->base.dpps[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.hubps[i] != NULL) {
+ kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
+ pool->base.hubps[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+ kfree(pool->base.hw_i2cs[i]);
+ pool->base.hw_i2cs[i] = NULL;
+ }
+ if (pool->base.sw_i2cs[i] != NULL) {
+ kfree(pool->base.sw_i2cs[i]);
+ pool->base.sw_i2cs[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ if (pool->base.opps[i] != NULL)
+ pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
+ if (pool->base.dwbc[i] != NULL) {
+ kfree(TO_DCN30_DWBC(pool->base.dwbc[i]));
+ pool->base.dwbc[i] = NULL;
+ }
+ if (pool->base.mcif_wb[i] != NULL) {
+ kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i]));
+ pool->base.mcif_wb[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
+ pool->base.clock_sources[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) {
+ if (pool->base.mpc_lut[i] != NULL) {
+ dc_3dlut_func_release(pool->base.mpc_lut[i]);
+ pool->base.mpc_lut[i] = NULL;
+ }
+ if (pool->base.mpc_shaper[i] != NULL) {
+ dc_transfer_func_release(pool->base.mpc_shaper[i]);
+ pool->base.mpc_shaper[i] = NULL;
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL) {
+ dcn20_clock_source_destroy(&pool->base.dp_clock_source);
+ pool->base.dp_clock_source = NULL;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.multiple_abms[i] != NULL)
+ dce_abm_destroy(&pool->base.multiple_abms[i]);
+ }
+
+ if (pool->base.psr != NULL)
+ dmub_psr_destroy(&pool->base.psr);
+
+ if (pool->base.replay != NULL)
+ dmub_replay_destroy(&pool->base.replay);
+
+ if (pool->base.pg_cntl != NULL)
+ dcn_pg_cntl_destroy(&pool->base.pg_cntl);
+
+ if (pool->base.dccg != NULL)
+ dcn_dccg_destroy(&pool->base.dccg);
+}
+
+static struct hubp *dcn35_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn20_hubp *hubp2 =
+ kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
+
+ if (!hubp2)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT hubp_regs
+ hubp_regs_init(0),
+ hubp_regs_init(1),
+ hubp_regs_init(2),
+ hubp_regs_init(3);
+
+ if (hubp35_construct(hubp2, ctx, inst,
+ &hubp_regs[inst], &hubp_shift, &hubp_mask))
+ return &hubp2->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(hubp2);
+ return NULL;
+}
+
+static void dcn35_dwbc_init(struct dcn30_dwbc *dwbc30, struct dc_context *ctx)
+{
+ dcn35_dwbc_set_fgcg(
+ dwbc30, ctx->dc->debug.enable_fine_grain_clock_gating.bits.dwb);
+}
+
+static bool dcn35_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc),
+ GFP_KERNEL);
+
+ if (!dwbc30) {
+ dm_error("DC: failed to create dwbc30!\n");
+ return false;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT dwbc35_regs
+ dwbc_regs_dcn3_init(0);
+
+ dcn35_dwbc_construct(dwbc30, ctx,
+ &dwbc35_regs[i],
+ &dwbc35_shift,
+ &dwbc35_mask,
+ i);
+
+ pool->dwbc[i] = &dwbc30->base;
+
+ dcn35_dwbc_init(dwbc30, ctx);
+ }
+ return true;
+}
+
+static void dcn35_mmhubbub_init(struct dcn30_mmhubbub *mcif_wb30,
+ struct dc_context *ctx)
+{
+ dcn35_mmhubbub_set_fgcg(
+ mcif_wb30,
+ ctx->dc->debug.enable_fine_grain_clock_gating.bits.mmhubbub);
+}
+
+static bool dcn35_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub),
+ GFP_KERNEL);
+
+ if (!mcif_wb30) {
+ dm_error("DC: failed to create mcif_wb30!\n");
+ return false;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT mcif_wb35_regs
+ mcif_wb_regs_dcn3_init(0);
+
+ dcn35_mmhubbub_construct(mcif_wb30, ctx,
+ &mcif_wb35_regs[i],
+ &mcif_wb35_shift,
+ &mcif_wb35_mask,
+ i);
+
+ dcn35_mmhubbub_init(mcif_wb30, ctx);
+
+ pool->mcif_wb[i] = &mcif_wb30->base;
+ }
+ return true;
+}
+
+static struct display_stream_compressor *dcn35_dsc_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_dsc *dsc =
+ kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
+
+ if (!dsc) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT dsc_regs
+ dsc_regsDCN35_init(0),
+ dsc_regsDCN35_init(1),
+ dsc_regsDCN35_init(2),
+ dsc_regsDCN35_init(3);
+
+ dsc35_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
+ dsc35_set_fgcg(dsc,
+ ctx->dc->debug.enable_fine_grain_clock_gating.bits.dsc);
+ return &dsc->base;
+}
+
+static void dcn351_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dcn351_resource_pool *dcn351_pool = TO_DCN351_RES_POOL(*pool);
+
+ dcn351_resource_destruct(dcn351_pool);
+ kfree(dcn351_pool);
+ *pool = NULL;
+}
+
+static struct clock_source *dcn35_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dcn31_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = dcn20_get_dcc_compression_cap
+};
+
+static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
+
+static bool dcn351_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ out = dml2_validate(dc, context, fast_validate);
+
+ if (fast_validate)
+ return out;
+
+ DC_FP_START();
+ dcn351_decide_zstate_support(dc, context);
+ DC_FP_END();
+
+ return out;
+}
+
+
+static struct resource_funcs dcn351_res_pool_funcs = {
+ .destroy = dcn351_destroy_resource_pool,
+ .link_enc_create = dcn35_link_encoder_create,
+ .link_enc_create_minimal = dcn31_link_enc_create_minimal,
+ .link_encs_assign = link_enc_cfg_link_encs_assign,
+ .link_enc_unassign = link_enc_cfg_link_enc_unassign,
+ .panel_cntl_create = dcn31_panel_cntl_create,
+ .validate_bandwidth = dcn351_validate_bandwidth,
+ .calculate_wm_and_dlg = NULL,
+ .update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+ .populate_dml_pipes = dcn351_populate_dml_pipes_from_context_fpu,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
+ .release_pipe = dcn20_release_pipe,
+ .add_stream_to_ctx = dcn30_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
+ .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
+ .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
+ .set_mcif_arb_params = dcn30_set_mcif_arb_params,
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
+ .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
+ .update_bw_bounding_box = dcn351_update_bw_bounding_box_fpu,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn35_get_panel_config_defaults,
+ .get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
+};
+
+static bool dcn351_resource_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dcn351_resource_pool *pool)
+{
+ int i;
+ struct dc_context *ctx = dc->ctx;
+ struct irq_service_init_data init_data;
+
+#undef REG_STRUCT
+#define REG_STRUCT bios_regs
+ bios_regs_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT clk_src_regs
+ clk_src_regs_init(0, A),
+ clk_src_regs_init(1, B),
+ clk_src_regs_init(2, C),
+ clk_src_regs_init(3, D),
+ clk_src_regs_init(4, E);
+
+#undef REG_STRUCT
+#define REG_STRUCT abm_regs
+ abm_regs_init(0),
+ abm_regs_init(1),
+ abm_regs_init(2),
+ abm_regs_init(3);
+
+#undef REG_STRUCT
+#define REG_STRUCT dccg_regs
+ dccg_regs_init();
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_dcn351;
+
+ pool->base.funcs = &dcn351_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
+ dc->caps.max_downscale_ratio = 600;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.i2c_speed_in_khz_hdcp = 100;
+ dc->caps.max_cursor_size = 256;
+ dc->caps.min_horizontal_blanking_period = 80;
+ dc->caps.dmdata_alloc_size = 2048;
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
+ dc->caps.post_blend_color_processing = true;
+ dc->caps.force_dp_tps4_for_cp2520 = true;
+ if (dc->config.forceHBR2CP2520)
+ dc->caps.force_dp_tps4_for_cp2520 = false;
+ dc->caps.dp_hpo = true;
+ dc->caps.dp_hdmi21_pcon_support = true;
+
+ dc->caps.edp_dsc_support = true;
+ dc->caps.extended_aux_timeout_support = true;
+ dc->caps.dmcub_support = true;
+ dc->caps.is_apu = true;
+ dc->caps.seamless_odm = true;
+
+ dc->caps.zstate_support = true;
+ dc->caps.ips_support = true;
+ dc->caps.max_v_total = (1 << 15) - 1;
+
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 1;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 1;
+ dc->caps.color.dpp.post_csc = 1;
+ dc->caps.color.dpp.gamma_corr = 1;
+ dc->caps.color.dpp.dgam_rom_for_yuv = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
+ // no OGAM ROM on DCN301
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 1;
+ dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
+ /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
+ * to provide some margin.
+ * It's expected for furture ASIC to have equal or higher value, in order to
+ * have determinstic power improvement from generate to genration.
+ * (i.e., we should not expect new ASIC generation with lower vmin rate)
+ */
+ dc->caps.max_disp_clock_khz_at_vmin = 650000;
+
+ /* Use pipe context based otg sync logic */
+ dc->config.use_pipe_ctx_sync_logic = true;
+
+ /* read VBIOS LTTPR caps */
+ {
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ /* interop bit is implicit */
+ {
+ dc->caps.vbios_lttpr_aware = true;
+ }
+ }
+
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
+ dc->debug = debug_defaults_drv;
+
+ // Init the vm_helper
+ if (dc->vm_helper)
+ vm_helper_init(dc->vm_helper, 16);
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ /* Clock Sources for Pixel Clock*/
+ pool->base.clock_sources[DCN351_CLK_SRC_PLL0] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCN351_CLK_SRC_PLL1] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCN351_CLK_SRC_PLL2] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCN351_CLK_SRC_PLL3] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCN351_CLK_SRC_PLL4] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
+
+ pool->base.clk_src_count = DCN351_CLK_SRC_TOTAL;
+
+ /* todo: not reuse phy_pll registers */
+ pool->base.dp_clock_source =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+ /*temp till dml2 fully work without dml1*/
+ dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip, DML_PROJECT_DCN31);
+
+ /* TODO: DCCG */
+ pool->base.dccg = dccg35_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
+ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create dccg!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT pg_cntl_regs
+ pg_cntl_dcn35_regs_init();
+
+ pool->base.pg_cntl = pg_cntl35_create(ctx, &pg_cntl_regs, &pg_cntl_shift, &pg_cntl_mask);
+ if (pool->base.pg_cntl == NULL) {
+ dm_error("DC: failed to create power gate control!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* TODO: IRQ */
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dcn351_create(&init_data);
+ if (!pool->base.irqs)
+ goto create_fail;
+
+ /* HUBBUB */
+ pool->base.hubbub = dcn35_hubbub_create(ctx);
+ if (pool->base.hubbub == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create hubbub!\n");
+ goto create_fail;
+ }
+
+ /* HUBPs, DPPs, OPPs and TGs */
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.hubps[i] = dcn35_hubp_create(ctx, i);
+ if (pool->base.hubps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create hubps!\n");
+ goto create_fail;
+ }
+
+ pool->base.dpps[i] = dcn35_dpp_create(ctx, i);
+ if (pool->base.dpps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create dpps!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ pool->base.opps[i] = dcn35_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.timing_generators[i] = dcn35_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ }
+ pool->base.timing_generator_count = i;
+
+ /* PSR */
+ pool->base.psr = dmub_psr_create(ctx);
+ if (pool->base.psr == NULL) {
+ dm_error("DC: failed to create psr obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* Replay */
+ pool->base.replay = dmub_replay_create(ctx);
+ if (pool->base.replay == NULL) {
+ dm_error("DC: failed to create replay obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* ABM */
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.multiple_abms[i] = dmub_abm_create(ctx,
+ &abm_regs[i],
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.multiple_abms[i] == NULL) {
+ dm_error("DC: failed to create abm for pipe %d!\n", i);
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+
+ /* MPC and DSC */
+ pool->base.mpc = dcn35_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut);
+ if (pool->base.mpc == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mpc!\n");
+ goto create_fail;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ pool->base.dscs[i] = dcn35_dsc_create(ctx, i);
+ if (pool->base.dscs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create display stream compressor %d!\n", i);
+ goto create_fail;
+ }
+ }
+
+ /* DWB and MMHUBBUB */
+ if (!dcn35_dwbc_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create dwbc!\n");
+ goto create_fail;
+ }
+
+ if (!dcn35_mmhubbub_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mcif_wb!\n");
+ goto create_fail;
+ }
+
+ /* AUX and I2C */
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dcn31_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto create_fail;
+ }
+ pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create hw i2c!!\n");
+ goto create_fail;
+ }
+ pool->base.sw_i2cs[i] = NULL;
+ }
+
+ /* DCN3.5 has 6 DPIA */
+ pool->base.usb4_dpia_count = 4;
+ if (dc->debug.dpia_debug.bits.disable_dpia)
+ pool->base.usb4_dpia_count = 0;
+
+ /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto create_fail;
+
+ /* HW Sequencer and Plane caps */
+ dcn351_hw_sequencer_construct(dc);
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->cap_funcs = cap_funcs;
+
+
+ dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
+
+ dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
+ dc->dml2_options.use_native_pstate_optimization = true;
+ dc->dml2_options.use_native_soc_bb_construction = true;
+ dc->dml2_options.minimize_dispclk_using_odm = false;
+ if (dc->config.EnableMinDispClkODM)
+ dc->dml2_options.minimize_dispclk_using_odm = true;
+ dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
+
+ dc->dml2_options.callbacks.dc = dc;
+ dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
+ dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
+ dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
+ dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
+ dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
+ dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
+ dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
+ dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
+ dc->dml2_options.max_segments_per_hubp = 24;
+ dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
+
+ if (dc->config.sdpif_request_limit_words_per_umc == 0)
+ dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/
+
+ return true;
+
+create_fail:
+
+ dcn351_resource_destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dcn351_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc)
+{
+ struct dcn351_resource_pool *pool =
+ kzalloc(sizeof(struct dcn351_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dcn351_resource_construct(init_data->num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(pool);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h
new file mode 100644
index 000000000000..f3e045777a3d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef _DCN351_RESOURCE_H_
+#define _DCN351_RESOURCE_H_
+
+#include "core_types.h"
+
+extern struct _vcs_dpi_ip_params_st dcn3_51_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc;
+
+#define TO_DCN351_RES_POOL(pool)\
+ container_of(pool, struct dcn351_resource_pool, base)
+
+struct dcn351_resource_pool {
+ struct resource_pool base;
+};
+
+struct resource_pool *dcn351_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc);
+
+#endif /* _DCN351_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index c78c9224ab60..7785908a6676 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -78,6 +78,12 @@ struct dmub_srv_dcn31_regs;
struct dmcub_trace_buf_entry;
+/* enum dmub_window_memory_type - memory location type specification for windows */
+enum dmub_window_memory_type {
+ DMUB_WINDOW_MEMORY_TYPE_FB = 0,
+ DMUB_WINDOW_MEMORY_TYPE_GART
+};
+
/* enum dmub_status - return code for dmcub functions */
enum dmub_status {
DMUB_STATUS_OK = 0,
@@ -106,6 +112,7 @@ enum dmub_asic {
DMUB_ASIC_DCN32,
DMUB_ASIC_DCN321,
DMUB_ASIC_DCN35,
+ DMUB_ASIC_DCN351,
DMUB_ASIC_MAX,
};
@@ -119,6 +126,7 @@ enum dmub_window_id {
DMUB_WINDOW_5_TRACEBUFF,
DMUB_WINDOW_6_FW_STATE,
DMUB_WINDOW_7_SCRATCH_MEM,
+ DMUB_WINDOW_SHARED_STATE,
DMUB_WINDOW_TOTAL,
};
@@ -203,7 +211,7 @@ struct dmub_srv_region_params {
uint32_t vbios_size;
const uint8_t *fw_inst_const;
const uint8_t *fw_bss_data;
- bool is_mailbox_in_inbox;
+ const enum dmub_window_memory_type *window_memory_type;
};
/**
@@ -223,7 +231,7 @@ struct dmub_srv_region_params {
*/
struct dmub_srv_region_info {
uint32_t fb_size;
- uint32_t inbox_size;
+ uint32_t gart_size;
uint8_t num_regions;
struct dmub_region regions[DMUB_WINDOW_TOTAL];
};
@@ -239,9 +247,10 @@ struct dmub_srv_region_info {
struct dmub_srv_memory_params {
const struct dmub_srv_region_info *region_info;
void *cpu_fb_addr;
- void *cpu_inbox_addr;
+ void *cpu_gart_addr;
uint64_t gpu_fb_addr;
- uint64_t gpu_inbox_addr;
+ uint64_t gpu_gart_addr;
+ const enum dmub_window_memory_type *window_memory_type;
};
/**
@@ -361,7 +370,8 @@ struct dmub_srv_hw_funcs {
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void (*setup_mailbox)(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
@@ -443,7 +453,6 @@ struct dmub_srv_create_params {
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs *hw_funcs;
void *user_ctx;
- struct dc_context *dc_ctx;
enum dmub_asic asic;
uint32_t fw_version;
bool is_virtual;
@@ -455,6 +464,7 @@ struct dmub_srv_create_params {
* @user_ctx: user provided context for the dmub_srv
* @fw_version: the current firmware version, if any
* @is_virtual: false if hardware support only
+ * @shared_state: dmub shared state between firmware and driver
* @fw_state: dmub firmware state pointer
*/
struct dmub_srv {
@@ -463,6 +473,7 @@ struct dmub_srv {
uint32_t fw_version;
bool is_virtual;
struct dmub_fb scratch_mem_fb;
+ volatile struct dmub_shared_state_feature_block *shared_state;
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index e699731ee68e..a529e369b2ac 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -26,15 +26,6 @@
#ifndef DMUB_CMD_H
#define DMUB_CMD_H
-#if defined(_TEST_HARNESS) || defined(FPGA_USB4)
-#include "dmub_fw_types.h"
-#include "include_legacy/atomfirmware.h"
-
-#if defined(_TEST_HARNESS)
-#include <string.h>
-#endif
-#else
-
#include <asm/byteorder.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -42,8 +33,6 @@
#include "atomfirmware.h"
-#endif // defined(_TEST_HARNESS) || defined(FPGA_USB4)
-
//<DMUB_TYPES>==================================================================
/* Basic type definitions. */
@@ -403,15 +392,16 @@ union replay_debug_flags {
/**
* 0x400 (bit 10)
- * @force_disable_ips1: Force disable IPS1 state
+ * @enable_ips_visual_confirm: Enable IPS visual confirm when entering IPS
+ * If we enter IPS2, the Visual confirm bar will change to yellow
*/
- uint32_t force_disable_ips1 : 1;
+ uint32_t enable_ips_visual_confirm : 1;
/**
* 0x800 (bit 11)
- * @force_disable_ips2: Force disable IPS2 state
+ * @enable_ips_residency_profiling: Enable IPS residency profiling
*/
- uint32_t force_disable_ips2 : 1;
+ uint32_t enable_ips_residency_profiling : 1;
uint32_t reserved : 20;
} bitfields;
@@ -518,6 +508,8 @@ struct dmub_visual_confirm_color {
* @trace_buffer_size: size of the tracebuffer region
* @fw_version: the firmware version information
* @dal_fw: 1 if the firmware is DAL
+ * @shared_state_size: size of the shared state region in bytes
+ * @shared_state_features: number of shared state features
*/
struct dmub_fw_meta_info {
uint32_t magic_value; /**< magic value identifying DMUB firmware meta info */
@@ -526,6 +518,9 @@ struct dmub_fw_meta_info {
uint32_t fw_version; /**< the firmware version information */
uint8_t dal_fw; /**< 1 if the firmware is DAL */
uint8_t reserved[3]; /**< padding bits */
+ uint32_t shared_state_size; /**< size of the shared state region in bytes */
+ uint16_t shared_state_features; /**< number of shared state features */
+ uint16_t reserved2; /**< padding bytes */
};
/**
@@ -670,6 +665,116 @@ enum dmub_fw_boot_options_bit {
};
//==============================================================================
+//< DMUB_SHARED_STATE>==========================================================
+//==============================================================================
+
+/**
+ * Shared firmware state between driver and firmware for lockless communication
+ * in situations where the inbox/outbox may be unavailable.
+ *
+ * Each structure *must* be at most 256-bytes in size. The layout allocation is
+ * described below:
+ *
+ * [Header (256 Bytes)][Feature 1 (256 Bytes)][Feature 2 (256 Bytes)]...
+ */
+
+/**
+ * enum dmub_shared_state_feature_id - List of shared state features.
+ */
+enum dmub_shared_state_feature_id {
+ DMUB_SHARED_SHARE_FEATURE__INVALID = 0,
+ DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1,
+ DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2,
+ DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */
+};
+
+/**
+ * struct dmub_shared_state_ips_fw - Firmware signals for IPS.
+ */
+union dmub_shared_state_ips_fw_signals {
+ struct {
+ uint32_t ips1_commit : 1; /**< 1 if in IPS1 */
+ uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
+ uint32_t reserved_bits : 30; /**< Reversed */
+ } bits;
+ uint32_t all;
+};
+
+/**
+ * struct dmub_shared_state_ips_signals - Firmware signals for IPS.
+ */
+union dmub_shared_state_ips_driver_signals {
+ struct {
+ uint32_t allow_pg : 1; /**< 1 if PG is allowed */
+ uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
+ uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
+ uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
+ uint32_t reserved_bits : 28; /**< Reversed bits */
+ } bits;
+ uint32_t all;
+};
+
+/**
+ * IPS FW Version
+ */
+#define DMUB_SHARED_STATE__IPS_FW_VERSION 1
+
+/**
+ * struct dmub_shared_state_ips_fw - Firmware state for IPS.
+ */
+struct dmub_shared_state_ips_fw {
+ union dmub_shared_state_ips_fw_signals signals; /**< 4 bytes, IPS signal bits */
+ uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */
+}; /* 248-bytes, fixed */
+
+/**
+ * IPS Driver Version
+ */
+#define DMUB_SHARED_STATE__IPS_DRIVER_VERSION 1
+
+/**
+ * struct dmub_shared_state_ips_driver - Driver state for IPS.
+ */
+struct dmub_shared_state_ips_driver {
+ union dmub_shared_state_ips_driver_signals signals; /**< 4 bytes, IPS signal bits */
+ uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */
+}; /* 248-bytes, fixed */
+
+/**
+ * enum dmub_shared_state_feature_common - Generic payload.
+ */
+struct dmub_shared_state_feature_common {
+ uint32_t padding[62];
+}; /* 248-bytes, fixed */
+
+/**
+ * enum dmub_shared_state_feature_header - Feature description.
+ */
+struct dmub_shared_state_feature_header {
+ uint16_t id; /**< Feature ID */
+ uint16_t version; /**< Feature version */
+ uint32_t reserved; /**< Reserved bytes. */
+}; /* 8 bytes, fixed */
+
+/**
+ * struct dmub_shared_state_feature_block - Feature block.
+ */
+struct dmub_shared_state_feature_block {
+ struct dmub_shared_state_feature_header header; /**< Shared state header. */
+ union dmub_shared_feature_state_union {
+ struct dmub_shared_state_feature_common common; /**< Generic data */
+ struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */
+ struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */
+ } data; /**< Shared state data. */
+}; /* 256-bytes, fixed */
+
+/**
+ * Shared state size in bytes.
+ */
+#define DMUB_FW_HEADER_SHARED_STATE_SIZE \
+ ((DMUB_SHARED_STATE_FEATURE__LAST + 1) * sizeof(struct dmub_shared_state_feature_block))
+
+//==============================================================================
//</DMUB_STATUS>================================================================
//==============================================================================
//< DMUB_VBIOS>=================================================================
@@ -1270,11 +1375,11 @@ struct dmub_cmd_PLAT_54186_wa {
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; /**< reg value */
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; /**< reg value */
struct {
- uint8_t hubp_inst : 4; /**< HUBP instance */
- uint8_t tmz_surface : 1; /**< TMZ enable or disable */
- uint8_t immediate :1; /**< Immediate flip */
- uint8_t vmid : 4; /**< VMID */
- uint8_t grph_stereo : 1; /**< 1 if stereo */
+ uint32_t hubp_inst : 4; /**< HUBP instance */
+ uint32_t tmz_surface : 1; /**< TMZ enable or disable */
+ uint32_t immediate :1; /**< Immediate flip */
+ uint32_t vmid : 4; /**< VMID */
+ uint32_t grph_stereo : 1; /**< 1 if stereo */
uint32_t reserved : 21; /**< Reserved */
} flip_params; /**< Pageflip parameters */
uint32_t reserved[9]; /**< Reserved bits */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile
index 08aaf84affaf..50a98448e2e8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/Makefile
+++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile
@@ -25,6 +25,7 @@ DMUB += dmub_dcn30.o dmub_dcn301.o dmub_dcn302.o dmub_dcn303.o
DMUB += dmub_dcn31.o dmub_dcn314.o dmub_dcn315.o dmub_dcn316.o
DMUB += dmub_dcn32.o
DMUB += dmub_dcn35.o
+DMUB += dmub_dcn351.o
AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB))
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index 98dad0d47e72..cae96fba6349 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -191,7 +191,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
uint64_t fb_base, fb_offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index 1df128e57ed3..de287b101848 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -197,7 +197,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
index 81dae75e9ff8..a4abe951c838 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
@@ -124,7 +124,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h
index 9a3afffd9b0f..066f35a50094 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h
@@ -43,7 +43,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
#endif /* _DMUB_DCN30_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 094e9f864557..2bcf5fb87dd9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -187,7 +187,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index 4d520a893c7b..eccdab4986ce 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -199,7 +199,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index 2daa1e0c8061..0d521eeda050 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -32,8 +32,6 @@
#include "dcn/dcn_3_2_0_offset.h"
#include "dcn/dcn_3_2_0_sh_mask.h"
-#define DCN_BASE__INST0_SEG2 0x000034C0
-
#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
#define CTX dmub
#define REGS dmub->regs_dcn32
@@ -218,7 +216,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
index b0cd8d29402f..29c1132951af 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
@@ -206,7 +206,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 6d1fbea0f6ba..53f359f3fae2 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -229,7 +229,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6)
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6)
{
union dmub_addr offset;
@@ -275,6 +276,15 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0,
DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
DMCUB_REGION3_CW6_ENABLE, 1);
+
+ offset = region6->offset;
+
+ REG_WRITE(DMCUB_REGION6_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION6_OFFSET_HIGH, offset.u.high_part);
+ REG_SET_2(DMCUB_REGION6_TOP_ADDRESS, 0,
+ DMCUB_REGION6_TOP_ADDRESS,
+ region6->region.top - region6->region.base - 1,
+ DMCUB_REGION6_ENABLE, 1);
}
void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub,
@@ -545,8 +555,14 @@ uint32_t dmub_dcn35_read_inbox0_ack_register(struct dmub_srv *dmub)
bool dmub_dcn35_is_hw_powered_up(struct dmub_srv *dmub)
{
union dmub_fw_boot_status status;
+ uint32_t is_enable;
+
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable);
+ if (is_enable == 0)
+ return false;
status.all = REG_READ(DMCUB_SCRATCH0);
- return status.bits.hw_power_init_done;
+ return (status.bits.dal_fw && status.bits.hw_power_init_done && status.bits.mailbox_rdy) ||
+ (!status.bits.dal_fw && status.bits.mailbox_rdy);
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
index 129a7031d2ae..686e97c00ccc 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
@@ -89,6 +89,9 @@ struct dmub_srv;
DMUB_SR(DMCUB_REGION5_OFFSET) \
DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \
DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION6_OFFSET) \
+ DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \
DMUB_SR(DMCUB_SCRATCH0) \
DMUB_SR(DMCUB_SCRATCH1) \
DMUB_SR(DMCUB_SCRATCH2) \
@@ -154,6 +157,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \
DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
@@ -214,7 +219,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+ const struct dmub_window *cw6,
+ const struct dmub_window *region6);
void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.c
new file mode 100644
index 000000000000..8f40b9f6706c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#include "../dmub_srv.h"
+#include "dmub_reg.h"
+#include "dmub_dcn351.h"
+
+#include "dcn/dcn_3_5_1_offset.h"
+#include "dcn/dcn_3_5_1_sh_mask.h"
+
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
+#define CTX dmub
+#define REGS dmub->regs_dcn35
+#define REG_OFFSET_EXP(reg_name) BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+void dmub_srv_dcn351_regs_init(struct dmub_srv *dmub, struct dc_context *ctx)
+{
+ struct dmub_srv_dcn35_regs *regs = dmub->regs_dcn35;
+#define REG_STRUCT regs
+
+#define DMUB_SR(reg) REG_STRUCT->offset.reg = REG_OFFSET_EXP(reg);
+ DMUB_DCN35_REGS()
+ DMCUB_INTERNAL_REGS()
+#undef DMUB_SR
+
+#define DMUB_SF(reg, field) REG_STRUCT->mask.reg##__##field = FD_MASK(reg, field);
+ DMUB_DCN35_FIELDS()
+#undef DMUB_SF
+
+#define DMUB_SF(reg, field) REG_STRUCT->shift.reg##__##field = FD_SHIFT(reg, field);
+ DMUB_DCN35_FIELDS()
+#undef DMUB_SF
+#undef REG_STRUCT
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.h
new file mode 100644
index 000000000000..4121fa1b301d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef _DMUB_DCN351_H_
+#define _DMUB_DCN351_H_
+
+#include "dmub_dcn35.h"
+
+struct dmub_srv;
+
+void dmub_srv_dcn351_regs_init(struct dmub_srv *dmub, struct dc_context *ctx);
+
+#endif /* _DMUB_DCN351_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 9ad738805320..90e878195d95 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -37,6 +37,7 @@
#include "dmub_dcn316.h"
#include "dmub_dcn32.h"
#include "dmub_dcn35.h"
+#include "dmub_dcn351.h"
#include "os_types.h"
/*
* Note: the DMUB service is standalone. No additional headers should be
@@ -78,6 +79,7 @@
#define DMUB_CW6_BASE (0x66000000)
#define DMUB_REGION5_BASE (0xA0000000)
+#define DMUB_REGION6_BASE (0xC0000000)
static struct dmub_srv_dcn32_regs dmub_srv_dcn32_regs;
static struct dmub_srv_dcn35_regs dmub_srv_dcn35_regs;
@@ -314,6 +316,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
break;
case DMUB_ASIC_DCN35:
+ case DMUB_ASIC_DCN351:
dmub->regs_dcn35 = &dmub_srv_dcn35_regs;
funcs->configure_dmub_in_system_memory = dmub_dcn35_configure_dmub_in_system_memory;
funcs->send_inbox0_cmd = dmub_dcn35_send_inbox0_cmd;
@@ -350,6 +353,8 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_diagnostic_data = dmub_dcn35_get_diagnostic_data;
funcs->init_reg_offsets = dmub_srv_dcn35_regs_init;
+ if (asic == DMUB_ASIC_DCN351)
+ funcs->init_reg_offsets = dmub_srv_dcn351_regs_init;
funcs->is_hw_powered_up = dmub_dcn35_is_hw_powered_up;
funcs->should_detect = dmub_dcn35_should_detect;
@@ -417,58 +422,44 @@ void dmub_srv_destroy(struct dmub_srv *dmub)
dmub_memset(dmub, 0, sizeof(*dmub));
}
+static uint32_t dmub_srv_calc_regions_for_memory_type(const struct dmub_srv_region_params *params,
+ struct dmub_srv_region_info *out,
+ const uint32_t *window_sizes,
+ enum dmub_window_memory_type memory_type)
+{
+ uint32_t i, top = 0;
+
+ for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) {
+ if (params->window_memory_type[i] == memory_type) {
+ struct dmub_region *region = &out->regions[i];
+
+ region->base = dmub_align(top, 256);
+ region->top = region->base + dmub_align(window_sizes[i], 64);
+ top = region->top;
+ }
+ }
+
+ return dmub_align(top, 4096);
+}
+
enum dmub_status
-dmub_srv_calc_region_info(struct dmub_srv *dmub,
- const struct dmub_srv_region_params *params,
- struct dmub_srv_region_info *out)
+ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ const struct dmub_srv_region_params *params,
+ struct dmub_srv_region_info *out)
{
- struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST];
- struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK];
- struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA];
- struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS];
- struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
- struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
- struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
- struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM];
const struct dmub_fw_meta_info *fw_info;
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
- uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
- uint32_t previous_top = 0;
+ uint32_t window_sizes[DMUB_WINDOW_TOTAL] = { 0 };
+
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
memset(out, 0, sizeof(*out));
+ memset(window_sizes, 0, sizeof(window_sizes));
out->num_regions = DMUB_NUM_WINDOWS;
- inst->base = 0x0;
- inst->top = inst->base + params->inst_const_size;
-
- data->base = dmub_align(inst->top, 256);
- data->top = data->base + params->bss_data_size;
-
- /*
- * All cache windows below should be aligned to the size
- * of the DMCUB cache line, 64 bytes.
- */
-
- stack->base = dmub_align(data->top, 256);
- stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
-
- bios->base = dmub_align(stack->top, 256);
- bios->top = bios->base + params->vbios_size;
-
- if (params->is_mailbox_in_inbox) {
- mail->base = 0;
- mail->top = mail->base + DMUB_MAILBOX_SIZE;
- previous_top = bios->top;
- } else {
- mail->base = dmub_align(bios->top, 256);
- mail->top = mail->base + DMUB_MAILBOX_SIZE;
- previous_top = mail->top;
- }
-
fw_info = dmub_get_fw_meta_info(params);
if (fw_info) {
@@ -486,19 +477,21 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
dmub->fw_version = fw_info->fw_version;
}
- trace_buff->base = dmub_align(previous_top, 256);
- trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
-
- fw_state->base = dmub_align(trace_buff->top, 256);
- fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
+ window_sizes[DMUB_WINDOW_0_INST_CONST] = params->inst_const_size;
+ window_sizes[DMUB_WINDOW_1_STACK] = DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
+ window_sizes[DMUB_WINDOW_2_BSS_DATA] = params->bss_data_size;
+ window_sizes[DMUB_WINDOW_3_VBIOS] = params->vbios_size;
+ window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE;
+ window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
+ window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
+ window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
+ window_sizes[DMUB_WINDOW_SHARED_STATE] = DMUB_FW_HEADER_SHARED_STATE_SIZE;
- scratch_mem->base = dmub_align(fw_state->top, 256);
- scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64);
+ out->fb_size =
+ dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
- out->fb_size = dmub_align(scratch_mem->top, 4096);
-
- if (params->is_mailbox_in_inbox)
- out->inbox_size = dmub_align(mail->top, 4096);
+ out->gart_size =
+ dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_GART);
return DMUB_STATUS_OK;
}
@@ -507,8 +500,6 @@ enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
const struct dmub_srv_memory_params *params,
struct dmub_srv_fb_info *out)
{
- uint8_t *cpu_base;
- uint64_t gpu_base;
uint32_t i;
if (!dmub->sw_init)
@@ -519,19 +510,16 @@ enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
return DMUB_STATUS_INVALID;
- cpu_base = (uint8_t *)params->cpu_fb_addr;
- gpu_base = params->gpu_fb_addr;
-
for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
const struct dmub_region *reg =
&params->region_info->regions[i];
- out->fb[i].cpu_addr = cpu_base + reg->base;
- out->fb[i].gpu_addr = gpu_base + reg->base;
-
- if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
- out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
- out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
+ if (params->window_memory_type[i] == DMUB_WINDOW_MEMORY_TYPE_GART) {
+ out->fb[i].cpu_addr = (uint8_t *)params->cpu_gart_addr + reg->base;
+ out->fb[i].gpu_addr = params->gpu_gart_addr + reg->base;
+ } else {
+ out->fb[i].cpu_addr = (uint8_t *)params->cpu_fb_addr + reg->base;
+ out->fb[i].gpu_addr = params->gpu_fb_addr + reg->base;
}
out->fb[i].size = reg->top - reg->base;
@@ -583,9 +571,10 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
+ struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
struct dmub_rb_init_params rb_params, outbox0_rb_params;
- struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
+ struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6;
struct dmub_region inbox1, outbox1, outbox0;
if (!dmub->sw_init)
@@ -670,10 +659,16 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->fw_state = fw_state_fb->cpu_addr;
+ region6.offset.quad_part = shared_state_fb->gpu_addr;
+ region6.region.base = DMUB_CW6_BASE;
+ region6.region.top = region6.region.base + shared_state_fb->size;
+
+ dmub->shared_state = shared_state_fb->cpu_addr;
+
dmub->scratch_mem_fb = *scratch_mem_fb;
if (dmub->hw_funcs.setup_windows)
- dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6);
+ dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, &region6);
if (dmub->hw_funcs.setup_outbox0)
dmub->hw_funcs.setup_outbox0(dmub, &outbox0);
@@ -812,8 +807,10 @@ bool dmub_srv_is_hw_pwr_up(struct dmub_srv *dmub)
if (!dmub->hw_funcs.is_hw_powered_up)
return true;
- return dmub->hw_funcs.is_hw_powered_up(dmub) &&
- dmub->hw_funcs.is_hw_init(dmub);
+ if (!dmub->hw_funcs.is_hw_powered_up(dmub))
+ return false;
+
+ return true;
}
enum dmub_status dmub_srv_wait_for_hw_pwr_up(struct dmub_srv *dmub,
diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h
index 915a031a43cb..e4a26143f14c 100644
--- a/drivers/gpu/drm/amd/display/include/audio_types.h
+++ b/drivers/gpu/drm/amd/display/include/audio_types.h
@@ -27,11 +27,21 @@
#define __AUDIO_TYPES_H__
#include "signal_types.h"
+#include "fixed31_32.h"
+#include "dc_dp_types.h"
#define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
#define MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 18
#define MULTI_CHANNEL_SPLIT_NO_ASSO_INFO 0xFFFFFFFF
+struct audio_dp_link_info {
+ uint32_t link_bandwidth_kbps;
+ uint32_t hblank_min_symbol_width;
+ enum dp_link_encoding encoding;
+ enum dc_link_rate link_rate;
+ enum dc_lane_count lane_count;
+ bool is_mst;
+};
struct audio_crtc_info {
uint32_t h_total;
@@ -42,7 +52,10 @@ struct audio_crtc_info {
uint32_t calculated_pixel_clock_100Hz; /* in 100Hz */
uint32_t refresh_rate;
enum dc_color_depth color_depth;
+ enum dc_pixel_encoding pixel_encoding;
bool interlaced;
+ uint32_t dsc_bits_per_pixel;
+ uint32_t dsc_num_slices;
};
struct azalia_clock_info {
uint32_t pixel_clock_in_10khz;
@@ -95,6 +108,8 @@ struct audio_output {
enum signal_type signal;
/* video timing */
struct audio_crtc_info crtc_info;
+ /* DP link info */
+ struct audio_dp_link_info dp_link_info;
/* PLL for audio */
struct audio_pll_info pll_info;
};
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index e317089cf6ee..c9ec46c6b4c6 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -250,11 +250,13 @@ enum {
#define GC_11_0_0_A0 0x1
#define GC_11_0_2_A0 0x10
#define GC_11_0_3_A0 0x20
+#define GC_11_0_4_A0 0xC0
#define GC_11_UNKNOWN 0xFF
#define ASICREV_IS_GC_11_0_0(eChipRev) (eChipRev < GC_11_0_2_A0)
#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_0_3_A0)
#define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN)
+#define ASICREV_IS_GC_11_0_4(eChipRev) (eChipRev >= GC_11_0_4_A0 && eChipRev < GC_11_UNKNOWN)
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 1b8ab20f1715..92dbff22a7c6 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -169,6 +169,15 @@ enum dp_test_pattern {
DP_TEST_PATTERN_UNSUPPORTED
};
+#define IS_DP_PHY_SQUARE_PATTERN(test_pattern)\
+ (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&\
+ test_pattern <= DP_TEST_PATTERN_SQUARE_END)
+
+#define IS_DP_PHY_PATTERN(test_pattern)\
+ ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&\
+ test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||\
+ test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
+
enum dp_test_pattern_color_space {
DP_TEST_PATTERN_COLOR_SPACE_RGB,
DP_TEST_PATTERN_COLOR_SPACE_YCBCR601,
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
index 5960dd760e91..8ce6c22e5d04 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
@@ -57,10 +57,10 @@ void mod_stats_update_event(struct mod_stats *mod_stats,
unsigned int length);
void mod_stats_update_flip(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns);
+ unsigned long long timestamp_in_ns);
void mod_stats_update_vupdate(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns);
+ unsigned long long timestamp_in_ns);
void mod_stats_update_freesync(struct mod_stats *mod_stats,
unsigned int v_total_min,
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index df2c7ffe190f..b0a6256e89f4 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -174,6 +174,7 @@ enum amd_powergating_state {
#define AMD_PG_SUPPORT_ATHUB (1 << 16)
#define AMD_PG_SUPPORT_JPEG (1 << 17)
#define AMD_PG_SUPPORT_IH_SRAM_PG (1 << 18)
+#define AMD_PG_SUPPORT_JPEG_DPG (1 << 19)
/**
* enum PP_FEATURE_MASK - Used to mask power play features.
@@ -244,6 +245,7 @@ enum DC_FEATURE_MASK {
DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
+ DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
};
enum DC_DEBUG_MASK {
diff --git a/drivers/gpu/drm/amd/include/arct_ip_offset.h b/drivers/gpu/drm/amd/include/arct_ip_offset.h
index af1c46991429..7dd876f7df74 100644
--- a/drivers/gpu/drm/amd/include/arct_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/arct_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
} __maybe_unused;
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_offset.h
new file mode 100644
index 000000000000..84483366ab6a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_offset.h
@@ -0,0 +1,287 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _athub_4_1_0_OFFSET_HEADER
+#define _athub_4_1_0_OFFSET_HEADER
+
+
+
+// addressBlock: athub_xpbdec
+// base address: 0x3000
+#define regXPB_RTR_SRC_APRTR0 0x0000
+#define regXPB_RTR_SRC_APRTR0_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR1 0x0001
+#define regXPB_RTR_SRC_APRTR1_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR2 0x0002
+#define regXPB_RTR_SRC_APRTR2_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR3 0x0003
+#define regXPB_RTR_SRC_APRTR3_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR4 0x0004
+#define regXPB_RTR_SRC_APRTR4_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR5 0x0005
+#define regXPB_RTR_SRC_APRTR5_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR6 0x0006
+#define regXPB_RTR_SRC_APRTR6_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR7 0x0007
+#define regXPB_RTR_SRC_APRTR7_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR8 0x0008
+#define regXPB_RTR_SRC_APRTR8_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR9 0x0009
+#define regXPB_RTR_SRC_APRTR9_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR10 0x000a
+#define regXPB_RTR_SRC_APRTR10_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR11 0x000b
+#define regXPB_RTR_SRC_APRTR11_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR12 0x000c
+#define regXPB_RTR_SRC_APRTR12_BASE_IDX 0
+#define regXPB_RTR_SRC_APRTR13 0x000d
+#define regXPB_RTR_SRC_APRTR13_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP0 0x000e
+#define regXPB_RTR_DEST_MAP0_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP1 0x000f
+#define regXPB_RTR_DEST_MAP1_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP2 0x0010
+#define regXPB_RTR_DEST_MAP2_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP3 0x0011
+#define regXPB_RTR_DEST_MAP3_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP4 0x0012
+#define regXPB_RTR_DEST_MAP4_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP5 0x0013
+#define regXPB_RTR_DEST_MAP5_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP6 0x0014
+#define regXPB_RTR_DEST_MAP6_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP7 0x0015
+#define regXPB_RTR_DEST_MAP7_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP8 0x0016
+#define regXPB_RTR_DEST_MAP8_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP9 0x0017
+#define regXPB_RTR_DEST_MAP9_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP10 0x0018
+#define regXPB_RTR_DEST_MAP10_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP11 0x0019
+#define regXPB_RTR_DEST_MAP11_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP12 0x001a
+#define regXPB_RTR_DEST_MAP12_BASE_IDX 0
+#define regXPB_RTR_DEST_MAP13 0x001b
+#define regXPB_RTR_DEST_MAP13_BASE_IDX 0
+#define regXPB_CLG_CFG0 0x001c
+#define regXPB_CLG_CFG0_BASE_IDX 0
+#define regXPB_CLG_CFG1 0x001d
+#define regXPB_CLG_CFG1_BASE_IDX 0
+#define regXPB_CLG_CFG2 0x001e
+#define regXPB_CLG_CFG2_BASE_IDX 0
+#define regXPB_CLG_CFG3 0x001f
+#define regXPB_CLG_CFG3_BASE_IDX 0
+#define regXPB_CLG_CFG4 0x0020
+#define regXPB_CLG_CFG4_BASE_IDX 0
+#define regXPB_CLG_CFG5 0x0021
+#define regXPB_CLG_CFG5_BASE_IDX 0
+#define regXPB_CLG_CFG6 0x0022
+#define regXPB_CLG_CFG6_BASE_IDX 0
+#define regXPB_CLG_CFG7 0x0023
+#define regXPB_CLG_CFG7_BASE_IDX 0
+#define regXPB_CLG_EXTRA0 0x0024
+#define regXPB_CLG_EXTRA0_BASE_IDX 0
+#define regXPB_CLG_EXTRA1 0x0025
+#define regXPB_CLG_EXTRA1_BASE_IDX 0
+#define regXPB_CLG_EXTRA_MSK 0x0026
+#define regXPB_CLG_EXTRA_MSK_BASE_IDX 0
+#define regXPB_LB_ADDR 0x0027
+#define regXPB_LB_ADDR_BASE_IDX 0
+#define regXPB_HST_CFG 0x0028
+#define regXPB_HST_CFG_BASE_IDX 0
+#define regXPB_P2P_BAR_CFG 0x0029
+#define regXPB_P2P_BAR_CFG_BASE_IDX 0
+#define regXPB_P2P_BAR0 0x002a
+#define regXPB_P2P_BAR0_BASE_IDX 0
+#define regXPB_P2P_BAR1 0x002b
+#define regXPB_P2P_BAR1_BASE_IDX 0
+#define regXPB_P2P_BAR2 0x002c
+#define regXPB_P2P_BAR2_BASE_IDX 0
+#define regXPB_P2P_BAR3 0x002d
+#define regXPB_P2P_BAR3_BASE_IDX 0
+#define regXPB_P2P_BAR4 0x002e
+#define regXPB_P2P_BAR4_BASE_IDX 0
+#define regXPB_P2P_BAR5 0x002f
+#define regXPB_P2P_BAR5_BASE_IDX 0
+#define regXPB_P2P_BAR6 0x0030
+#define regXPB_P2P_BAR6_BASE_IDX 0
+#define regXPB_P2P_BAR7 0x0031
+#define regXPB_P2P_BAR7_BASE_IDX 0
+#define regXPB_P2P_BAR_SETUP 0x0032
+#define regXPB_P2P_BAR_SETUP_BASE_IDX 0
+#define regXPB_P2P_BAR_DELTA_ABOVE 0x0034
+#define regXPB_P2P_BAR_DELTA_ABOVE_BASE_IDX 0
+#define regXPB_P2P_BAR_DELTA_BELOW 0x0035
+#define regXPB_P2P_BAR_DELTA_BELOW_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR0 0x0036
+#define regXPB_PEER_SYS_BAR0_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR1 0x0037
+#define regXPB_PEER_SYS_BAR1_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR2 0x0038
+#define regXPB_PEER_SYS_BAR2_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR3 0x0039
+#define regXPB_PEER_SYS_BAR3_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR4 0x003a
+#define regXPB_PEER_SYS_BAR4_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR5 0x003b
+#define regXPB_PEER_SYS_BAR5_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR6 0x003c
+#define regXPB_PEER_SYS_BAR6_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR7 0x003d
+#define regXPB_PEER_SYS_BAR7_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR8 0x003e
+#define regXPB_PEER_SYS_BAR8_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR9 0x003f
+#define regXPB_PEER_SYS_BAR9_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR10 0x0040
+#define regXPB_PEER_SYS_BAR10_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR11 0x0041
+#define regXPB_PEER_SYS_BAR11_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR12 0x0042
+#define regXPB_PEER_SYS_BAR12_BASE_IDX 0
+#define regXPB_PEER_SYS_BAR13 0x0043
+#define regXPB_PEER_SYS_BAR13_BASE_IDX 0
+#define regXPB_CLK_GAT 0x0044
+#define regXPB_CLK_GAT_BASE_IDX 0
+#define regXPB_INTF_CFG 0x0045
+#define regXPB_INTF_CFG_BASE_IDX 0
+#define regXPB_INTF_STS 0x0046
+#define regXPB_INTF_STS_BASE_IDX 0
+#define regXPB_PIPE_STS 0x0047
+#define regXPB_PIPE_STS_BASE_IDX 0
+#define regXPB_WCB_STS 0x0048
+#define regXPB_WCB_STS_BASE_IDX 0
+#define regXPB_MAP_INVERT_FLUSH_NUM_LSB 0x0049
+#define regXPB_MAP_INVERT_FLUSH_NUM_LSB_BASE_IDX 0
+#define regXPB_STICKY 0x004a
+#define regXPB_STICKY_BASE_IDX 0
+#define regXPB_STICKY_W1C 0x004b
+#define regXPB_STICKY_W1C_BASE_IDX 0
+#define regXPB_SUB_CTRL 0x004c
+#define regXPB_SUB_CTRL_BASE_IDX 0
+#define regXPB_PERF_KNOBS 0x004d
+#define regXPB_PERF_KNOBS_BASE_IDX 0
+#define regXPB_MISC_CFG 0x004e
+#define regXPB_MISC_CFG_BASE_IDX 0
+#define regXPB_INTF_CFG2 0x004f
+#define regXPB_INTF_CFG2_BASE_IDX 0
+#define regXPB_CLG_EXTRA_RD 0x0050
+#define regXPB_CLG_EXTRA_RD_BASE_IDX 0
+#define regXPB_CLG_EXTRA_MSK_RD 0x0051
+#define regXPB_CLG_EXTRA_MSK_RD_BASE_IDX 0
+#define regXPB_CLG_GFX_MATCH 0x0052
+#define regXPB_CLG_GFX_MATCH_BASE_IDX 0
+#define regXPB_CLG_GFX_MATCH_VLD 0x0053
+#define regXPB_CLG_GFX_MATCH_VLD_BASE_IDX 0
+#define regXPB_CLG_GFX_MATCH_MSK 0x0054
+#define regXPB_CLG_GFX_MATCH_MSK_BASE_IDX 0
+#define regXPB_CLG_MM_MATCH 0x0055
+#define regXPB_CLG_MM_MATCH_BASE_IDX 0
+#define regXPB_CLG_MM_MATCH_VLD 0x0056
+#define regXPB_CLG_MM_MATCH_VLD_BASE_IDX 0
+#define regXPB_CLG_MM_MATCH_MSK 0x0057
+#define regXPB_CLG_MM_MATCH_MSK_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING0 0x005a
+#define regXPB_CLG_GFX_UNITID_MAPPING0_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING1 0x005b
+#define regXPB_CLG_GFX_UNITID_MAPPING1_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING2 0x005c
+#define regXPB_CLG_GFX_UNITID_MAPPING2_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING3 0x005d
+#define regXPB_CLG_GFX_UNITID_MAPPING3_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING4 0x005e
+#define regXPB_CLG_GFX_UNITID_MAPPING4_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING5 0x005f
+#define regXPB_CLG_GFX_UNITID_MAPPING5_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING6 0x0060
+#define regXPB_CLG_GFX_UNITID_MAPPING6_BASE_IDX 0
+#define regXPB_CLG_GFX_UNITID_MAPPING7 0x0061
+#define regXPB_CLG_GFX_UNITID_MAPPING7_BASE_IDX 0
+#define regXPB_CLG_MM_UNITID_MAPPING0 0x0062
+#define regXPB_CLG_MM_UNITID_MAPPING0_BASE_IDX 0
+#define regXPB_CLG_MM_UNITID_MAPPING1 0x0063
+#define regXPB_CLG_MM_UNITID_MAPPING1_BASE_IDX 0
+#define regXPB_CLG_MM_UNITID_MAPPING2 0x0064
+#define regXPB_CLG_MM_UNITID_MAPPING2_BASE_IDX 0
+#define regXPB_CLG_MM_UNITID_MAPPING3 0x0065
+#define regXPB_CLG_MM_UNITID_MAPPING3_BASE_IDX 0
+
+
+// addressBlock: athub_rpbdec
+// base address: 0x31d0
+#define regATHUB_SHARED_VIRT_RESET_REQ 0x0074
+#define regATHUB_SHARED_VIRT_RESET_REQ_BASE_IDX 0
+#define regATHUB_MEM_POWER_LS 0x007f
+#define regATHUB_MEM_POWER_LS_BASE_IDX 0
+#define regATHUB_MISC_CNTL 0x0080
+#define regATHUB_MISC_CNTL_BASE_IDX 0
+#define regRPB_PASSPW_CONF 0x0081
+#define regRPB_PASSPW_CONF_BASE_IDX 0
+#define regRPB_BLOCKLEVEL_CONF 0x0082
+#define regRPB_BLOCKLEVEL_CONF_BASE_IDX 0
+#define regRPB_TAG_CONF 0x0083
+#define regRPB_TAG_CONF_BASE_IDX 0
+#define regRPB_ARB_CNTL 0x0085
+#define regRPB_ARB_CNTL_BASE_IDX 0
+#define regRPB_ARB_CNTL2 0x0086
+#define regRPB_ARB_CNTL2_BASE_IDX 0
+#define regRPB_BIF_CNTL 0x0087
+#define regRPB_BIF_CNTL_BASE_IDX 0
+#define regRPB_BIF_CNTL2 0x0088
+#define regRPB_BIF_CNTL2_BASE_IDX 0
+#define regRPB_SDPPORT_CNTL 0x0089
+#define regRPB_SDPPORT_CNTL_BASE_IDX 0
+#define regRPB_NBIF_SDPPORT_CNTL 0x008a
+#define regRPB_NBIF_SDPPORT_CNTL_BASE_IDX 0
+#define regRPB_DEINTRLV_COMBINE_CNTL 0x008c
+#define regRPB_DEINTRLV_COMBINE_CNTL_BASE_IDX 0
+#define regRPB_VC_SWITCH_RDWR 0x008d
+#define regRPB_VC_SWITCH_RDWR_BASE_IDX 0
+#define regRPB_ATS_CNTL3 0x008e
+#define regRPB_ATS_CNTL3_BASE_IDX 0
+#define regRPB_DF_SDPPORT_CNTL 0x008f
+#define regRPB_DF_SDPPORT_CNTL_BASE_IDX 0
+#define regRPB_ATS_CNTL 0x0090
+#define regRPB_ATS_CNTL_BASE_IDX 0
+#define regRPB_ATS_CNTL2 0x0091
+#define regRPB_ATS_CNTL2_BASE_IDX 0
+#define regRPB_PERFCOUNTER0_CFG 0x0092
+#define regRPB_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regRPB_PERFCOUNTER1_CFG 0x0093
+#define regRPB_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regRPB_PERFCOUNTER2_CFG 0x0094
+#define regRPB_PERFCOUNTER2_CFG_BASE_IDX 0
+#define regRPB_PERFCOUNTER3_CFG 0x0095
+#define regRPB_PERFCOUNTER3_CFG_BASE_IDX 0
+#define regRPB_PERFCOUNTER_RSLT_CNTL 0x0096
+#define regRPB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regRPB_PERF_COUNTER_CNTL 0x0097
+#define regRPB_PERF_COUNTER_CNTL_BASE_IDX 0
+#define regRPB_PERFCOUNTER_HI 0x0098
+#define regRPB_PERFCOUNTER_HI_BASE_IDX 0
+#define regRPB_PERFCOUNTER_LO 0x0099
+#define regRPB_PERFCOUNTER_LO_BASE_IDX 0
+#define regRPB_PERF_COUNTER_STATUS 0x009a
+#define regRPB_PERF_COUNTER_STATUS_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_sh_mask.h
new file mode 100644
index 000000000000..56499fd62239
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_sh_mask.h
@@ -0,0 +1,1348 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _athub_4_1_0_SH_MASK_HEADER
+#define _athub_4_1_0_SH_MASK_HEADER
+
+
+// addressBlock: athub_xpbdec
+//XPB_RTR_SRC_APRTR0
+#define XPB_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR1
+#define XPB_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR2
+#define XPB_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR3
+#define XPB_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR4
+#define XPB_RTR_SRC_APRTR4__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR4__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR5
+#define XPB_RTR_SRC_APRTR5__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR5__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR6
+#define XPB_RTR_SRC_APRTR6__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR6__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR7
+#define XPB_RTR_SRC_APRTR7__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR7__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR8
+#define XPB_RTR_SRC_APRTR8__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR8__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR9
+#define XPB_RTR_SRC_APRTR9__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR9__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR10
+#define XPB_RTR_SRC_APRTR10__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR10__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR11
+#define XPB_RTR_SRC_APRTR11__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR11__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR12
+#define XPB_RTR_SRC_APRTR12__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR12__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR13
+#define XPB_RTR_SRC_APRTR13__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR13__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_DEST_MAP0
+#define XPB_RTR_DEST_MAP0__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP0__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP0__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP0__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP0__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP1
+#define XPB_RTR_DEST_MAP1__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP1__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP1__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP1__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP1__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP2
+#define XPB_RTR_DEST_MAP2__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP2__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP2__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP2__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP2__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP3
+#define XPB_RTR_DEST_MAP3__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP3__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP3__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP3__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP3__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP4
+#define XPB_RTR_DEST_MAP4__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP4__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP4__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP4__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP4__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP4__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP4__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP4__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP4__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP4__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP4__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP4__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP5
+#define XPB_RTR_DEST_MAP5__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP5__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP5__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP5__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP5__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP5__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP5__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP5__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP5__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP5__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP5__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP5__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP6
+#define XPB_RTR_DEST_MAP6__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP6__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP6__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP6__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP6__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP6__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP6__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP6__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP6__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP6__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP6__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP6__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP7
+#define XPB_RTR_DEST_MAP7__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP7__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP7__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP7__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP7__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP7__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP7__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP7__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP7__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP7__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP7__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP7__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP8
+#define XPB_RTR_DEST_MAP8__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP8__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP8__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP8__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP8__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP8__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP8__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP8__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP8__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP8__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP8__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP8__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP9
+#define XPB_RTR_DEST_MAP9__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP9__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP9__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP9__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP9__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP9__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP9__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP9__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP9__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP9__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP9__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP9__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP10
+#define XPB_RTR_DEST_MAP10__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP10__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP10__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP10__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP10__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP10__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP10__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP10__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP10__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP10__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP10__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP10__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP11
+#define XPB_RTR_DEST_MAP11__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP11__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP11__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP11__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP11__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP11__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP11__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP11__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP11__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP11__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP11__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP11__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP12
+#define XPB_RTR_DEST_MAP12__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP12__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP12__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP12__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP12__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP12__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP12__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP12__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP12__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP12__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP12__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP12__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP13
+#define XPB_RTR_DEST_MAP13__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP13__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP13__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP13__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP13__SIDE_OK__SHIFT 0x19
+#define XPB_RTR_DEST_MAP13__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP13__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP13__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP13__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP13__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP13__SIDE_OK_MASK 0x02000000L
+#define XPB_RTR_DEST_MAP13__APRTR_SIZE_MASK 0x7C000000L
+//XPB_CLG_CFG0
+#define XPB_CLG_CFG0__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG0__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG0__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG0__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG0__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG0__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG0__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG0__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG0__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG0__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG1
+#define XPB_CLG_CFG1__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG1__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG1__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG1__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG1__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG1__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG1__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG1__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG1__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG1__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG2
+#define XPB_CLG_CFG2__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG2__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG2__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG2__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG2__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG2__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG2__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG2__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG2__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG2__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG3
+#define XPB_CLG_CFG3__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG3__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG3__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG3__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG3__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG3__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG3__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG3__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG3__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG3__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG4
+#define XPB_CLG_CFG4__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG4__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG4__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG4__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG4__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG4__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG4__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG4__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG4__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG4__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG5
+#define XPB_CLG_CFG5__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG5__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG5__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG5__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG5__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG5__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG5__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG5__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG5__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG5__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG6
+#define XPB_CLG_CFG6__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG6__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG6__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG6__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG6__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG6__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG6__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG6__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG6__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG6__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_CFG7
+#define XPB_CLG_CFG7__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG7__LB_TYPE__SHIFT 0x4
+#define XPB_CLG_CFG7__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG7__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG7__SIDE_FLUSH__SHIFT 0xe
+#define XPB_CLG_CFG7__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG7__LB_TYPE_MASK 0x00000070L
+#define XPB_CLG_CFG7__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG7__HOST_FLUSH_MASK 0x00003C00L
+#define XPB_CLG_CFG7__SIDE_FLUSH_MASK 0x0003C000L
+//XPB_CLG_EXTRA0
+#define XPB_CLG_EXTRA0__CMP0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA0__CMP0_LOW__SHIFT 0x8
+#define XPB_CLG_EXTRA0__VLD0__SHIFT 0xd
+#define XPB_CLG_EXTRA0__CLG0_NUM__SHIFT 0xe
+#define XPB_CLG_EXTRA0__CMP0_HIGH_MASK 0x000000FFL
+#define XPB_CLG_EXTRA0__CMP0_LOW_MASK 0x00001F00L
+#define XPB_CLG_EXTRA0__VLD0_MASK 0x00002000L
+#define XPB_CLG_EXTRA0__CLG0_NUM_MASK 0x0001C000L
+//XPB_CLG_EXTRA1
+#define XPB_CLG_EXTRA1__CMP1_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA1__CMP1_LOW__SHIFT 0x8
+#define XPB_CLG_EXTRA1__VLD1__SHIFT 0xd
+#define XPB_CLG_EXTRA1__CLG1_NUM__SHIFT 0xe
+#define XPB_CLG_EXTRA1__CMP1_HIGH_MASK 0x000000FFL
+#define XPB_CLG_EXTRA1__CMP1_LOW_MASK 0x00001F00L
+#define XPB_CLG_EXTRA1__VLD1_MASK 0x00002000L
+#define XPB_CLG_EXTRA1__CLG1_NUM_MASK 0x0001C000L
+//XPB_CLG_EXTRA_MSK
+#define XPB_CLG_EXTRA_MSK__MSK0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA_MSK__MSK0_LOW__SHIFT 0x8
+#define XPB_CLG_EXTRA_MSK__MSK1_HIGH__SHIFT 0xd
+#define XPB_CLG_EXTRA_MSK__MSK1_LOW__SHIFT 0x15
+#define XPB_CLG_EXTRA_MSK__MSK0_HIGH_MASK 0x000000FFL
+#define XPB_CLG_EXTRA_MSK__MSK0_LOW_MASK 0x00001F00L
+#define XPB_CLG_EXTRA_MSK__MSK1_HIGH_MASK 0x001FE000L
+#define XPB_CLG_EXTRA_MSK__MSK1_LOW_MASK 0x03E00000L
+//XPB_LB_ADDR
+#define XPB_LB_ADDR__CMP0__SHIFT 0x0
+#define XPB_LB_ADDR__MASK0__SHIFT 0xa
+#define XPB_LB_ADDR__CMP1__SHIFT 0x14
+#define XPB_LB_ADDR__MASK1__SHIFT 0x1a
+#define XPB_LB_ADDR__CMP0_MASK 0x000003FFL
+#define XPB_LB_ADDR__MASK0_MASK 0x000FFC00L
+#define XPB_LB_ADDR__CMP1_MASK 0x03F00000L
+#define XPB_LB_ADDR__MASK1_MASK 0xFC000000L
+//XPB_HST_CFG
+#define XPB_HST_CFG__BAR_UP_WR_CMD__SHIFT 0x0
+#define XPB_HST_CFG__BAR_UP_WR_CMD_MASK 0x00000001L
+//XPB_P2P_BAR_CFG
+#define XPB_P2P_BAR_CFG__ADDR_SIZE__SHIFT 0x0
+#define XPB_P2P_BAR_CFG__SEND_BAR__SHIFT 0x4
+#define XPB_P2P_BAR_CFG__SNOOP__SHIFT 0x6
+#define XPB_P2P_BAR_CFG__SEND_DIS__SHIFT 0x7
+#define XPB_P2P_BAR_CFG__COMPRESS_DIS__SHIFT 0x8
+#define XPB_P2P_BAR_CFG__UPDATE_DIS__SHIFT 0x9
+#define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR__SHIFT 0xa
+#define XPB_P2P_BAR_CFG__RD_EN__SHIFT 0xb
+#define XPB_P2P_BAR_CFG__ATC_TRANSLATED__SHIFT 0xc
+#define XPB_P2P_BAR_CFG__ADDR_SIZE_MASK 0x0000000FL
+#define XPB_P2P_BAR_CFG__SEND_BAR_MASK 0x00000030L
+#define XPB_P2P_BAR_CFG__SNOOP_MASK 0x00000040L
+#define XPB_P2P_BAR_CFG__SEND_DIS_MASK 0x00000080L
+#define XPB_P2P_BAR_CFG__COMPRESS_DIS_MASK 0x00000100L
+#define XPB_P2P_BAR_CFG__UPDATE_DIS_MASK 0x00000200L
+#define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR_MASK 0x00000400L
+#define XPB_P2P_BAR_CFG__RD_EN_MASK 0x00000800L
+#define XPB_P2P_BAR_CFG__ATC_TRANSLATED_MASK 0x00001000L
+//XPB_P2P_BAR0
+#define XPB_P2P_BAR0__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR0__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR0__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR0__VALID__SHIFT 0xc
+#define XPB_P2P_BAR0__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR0__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR0__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR0__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR0__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR0__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR0__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR0__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR0__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR0__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR0__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR0__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR1
+#define XPB_P2P_BAR1__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR1__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR1__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR1__VALID__SHIFT 0xc
+#define XPB_P2P_BAR1__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR1__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR1__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR1__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR1__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR1__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR1__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR1__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR1__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR1__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR1__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR1__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR2
+#define XPB_P2P_BAR2__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR2__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR2__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR2__VALID__SHIFT 0xc
+#define XPB_P2P_BAR2__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR2__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR2__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR2__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR2__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR2__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR2__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR2__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR2__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR2__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR2__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR2__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR3
+#define XPB_P2P_BAR3__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR3__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR3__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR3__VALID__SHIFT 0xc
+#define XPB_P2P_BAR3__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR3__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR3__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR3__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR3__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR3__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR3__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR3__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR3__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR3__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR3__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR3__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR4
+#define XPB_P2P_BAR4__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR4__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR4__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR4__VALID__SHIFT 0xc
+#define XPB_P2P_BAR4__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR4__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR4__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR4__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR4__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR4__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR4__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR4__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR4__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR4__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR4__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR4__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR5
+#define XPB_P2P_BAR5__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR5__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR5__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR5__VALID__SHIFT 0xc
+#define XPB_P2P_BAR5__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR5__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR5__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR5__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR5__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR5__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR5__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR5__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR5__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR5__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR5__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR5__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR6
+#define XPB_P2P_BAR6__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR6__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR6__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR6__VALID__SHIFT 0xc
+#define XPB_P2P_BAR6__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR6__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR6__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR6__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR6__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR6__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR6__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR6__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR6__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR6__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR6__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR6__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR7
+#define XPB_P2P_BAR7__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR7__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR7__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR7__VALID__SHIFT 0xc
+#define XPB_P2P_BAR7__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR7__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR7__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR7__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR7__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR7__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR7__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR7__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR7__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR7__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR7__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR7__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR_SETUP
+#define XPB_P2P_BAR_SETUP__SEL__SHIFT 0x0
+#define XPB_P2P_BAR_SETUP__REG_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR_SETUP__VALID__SHIFT 0xc
+#define XPB_P2P_BAR_SETUP__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR_SETUP__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR_SETUP__RESERVE__SHIFT 0xf
+#define XPB_P2P_BAR_SETUP__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR_SETUP__SEL_MASK 0x000000FFL
+#define XPB_P2P_BAR_SETUP__REG_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR_SETUP__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR_SETUP__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR_SETUP__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR_SETUP__RESERVE_MASK 0x00008000L
+#define XPB_P2P_BAR_SETUP__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR_DELTA_ABOVE
+#define XPB_P2P_BAR_DELTA_ABOVE__EN__SHIFT 0x0
+#define XPB_P2P_BAR_DELTA_ABOVE__DELTA__SHIFT 0x8
+#define XPB_P2P_BAR_DELTA_ABOVE__EN_MASK 0x000000FFL
+#define XPB_P2P_BAR_DELTA_ABOVE__DELTA_MASK 0x0FFFFF00L
+//XPB_P2P_BAR_DELTA_BELOW
+#define XPB_P2P_BAR_DELTA_BELOW__EN__SHIFT 0x0
+#define XPB_P2P_BAR_DELTA_BELOW__DELTA__SHIFT 0x8
+#define XPB_P2P_BAR_DELTA_BELOW__EN_MASK 0x000000FFL
+#define XPB_P2P_BAR_DELTA_BELOW__DELTA_MASK 0x0FFFFF00L
+//XPB_PEER_SYS_BAR0
+#define XPB_PEER_SYS_BAR0__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR0__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR0__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR0__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR1
+#define XPB_PEER_SYS_BAR1__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR1__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR1__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR1__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR2
+#define XPB_PEER_SYS_BAR2__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR2__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR2__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR2__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR3
+#define XPB_PEER_SYS_BAR3__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR3__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR3__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR3__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR4
+#define XPB_PEER_SYS_BAR4__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR4__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR4__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR4__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR5
+#define XPB_PEER_SYS_BAR5__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR5__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR5__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR5__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR6
+#define XPB_PEER_SYS_BAR6__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR6__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR6__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR6__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR7
+#define XPB_PEER_SYS_BAR7__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR7__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR7__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR7__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR8
+#define XPB_PEER_SYS_BAR8__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR8__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR8__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR8__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR9
+#define XPB_PEER_SYS_BAR9__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR9__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR9__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR9__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR10
+#define XPB_PEER_SYS_BAR10__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR10__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR10__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR10__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR11
+#define XPB_PEER_SYS_BAR11__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR11__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR11__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR11__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR12
+#define XPB_PEER_SYS_BAR12__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR12__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR12__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR12__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR13
+#define XPB_PEER_SYS_BAR13__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR13__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR13__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR13__ADDR_MASK 0xFFFFFFFEL
+//XPB_CLK_GAT
+#define XPB_CLK_GAT__ONDLY__SHIFT 0x0
+#define XPB_CLK_GAT__OFFDLY__SHIFT 0x6
+#define XPB_CLK_GAT__RDYDLY__SHIFT 0xc
+#define XPB_CLK_GAT__ENABLE__SHIFT 0x12
+#define XPB_CLK_GAT__MEM_LS_ENABLE__SHIFT 0x13
+#define XPB_CLK_GAT__ONDLY_MASK 0x0000003FL
+#define XPB_CLK_GAT__OFFDLY_MASK 0x00000FC0L
+#define XPB_CLK_GAT__RDYDLY_MASK 0x0003F000L
+#define XPB_CLK_GAT__ENABLE_MASK 0x00040000L
+#define XPB_CLK_GAT__MEM_LS_ENABLE_MASK 0x00080000L
+//XPB_INTF_CFG
+#define XPB_INTF_CFG__RPB_WRREQ_CRD__SHIFT 0x0
+#define XPB_INTF_CFG__MC_WRRET_ASK__SHIFT 0x8
+#define XPB_INTF_CFG__XSP_REQ_CRD__SHIFT 0x10
+#define XPB_INTF_CFG__P2P_WR_CHAIN_BREAK__SHIFT 0x17
+#define XPB_INTF_CFG__XSP_SNOOP_SEL__SHIFT 0x1b
+#define XPB_INTF_CFG__XSP_SNOOP_VAL__SHIFT 0x1d
+#define XPB_INTF_CFG__XSP_ORDERING_SEL__SHIFT 0x1e
+#define XPB_INTF_CFG__QUALIFY_P2P_FOR_GPA__SHIFT 0x1f
+#define XPB_INTF_CFG__RPB_WRREQ_CRD_MASK 0x000000FFL
+#define XPB_INTF_CFG__MC_WRRET_ASK_MASK 0x0000FF00L
+#define XPB_INTF_CFG__XSP_REQ_CRD_MASK 0x007F0000L
+#define XPB_INTF_CFG__P2P_WR_CHAIN_BREAK_MASK 0x00800000L
+#define XPB_INTF_CFG__XSP_SNOOP_SEL_MASK 0x18000000L
+#define XPB_INTF_CFG__XSP_SNOOP_VAL_MASK 0x20000000L
+#define XPB_INTF_CFG__XSP_ORDERING_SEL_MASK 0x40000000L
+#define XPB_INTF_CFG__QUALIFY_P2P_FOR_GPA_MASK 0x80000000L
+//XPB_INTF_STS
+#define XPB_INTF_STS__RPB_WRREQ_CRD__SHIFT 0x0
+#define XPB_INTF_STS__XSP_REQ_CRD__SHIFT 0x8
+#define XPB_INTF_STS__HOP_DATA_BUF_FULL__SHIFT 0xf
+#define XPB_INTF_STS__HOP_ATTR_BUF_FULL__SHIFT 0x10
+#define XPB_INTF_STS__CNS_BUF_FULL__SHIFT 0x11
+#define XPB_INTF_STS__CNS_BUF_BUSY__SHIFT 0x12
+#define XPB_INTF_STS__RPB_RDREQ_CRD__SHIFT 0x13
+#define XPB_INTF_STS__RPB_WRREQ_CRD_MASK 0x000000FFL
+#define XPB_INTF_STS__XSP_REQ_CRD_MASK 0x00007F00L
+#define XPB_INTF_STS__HOP_DATA_BUF_FULL_MASK 0x00008000L
+#define XPB_INTF_STS__HOP_ATTR_BUF_FULL_MASK 0x00010000L
+#define XPB_INTF_STS__CNS_BUF_FULL_MASK 0x00020000L
+#define XPB_INTF_STS__CNS_BUF_BUSY_MASK 0x00040000L
+#define XPB_INTF_STS__RPB_RDREQ_CRD_MASK 0x07F80000L
+//XPB_PIPE_STS
+#define XPB_PIPE_STS__WCB_ANY_PBUF__SHIFT 0x0
+#define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x1
+#define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x8
+#define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL__SHIFT 0xf
+#define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL__SHIFT 0x10
+#define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL__SHIFT 0x11
+#define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL__SHIFT 0x12
+#define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL__SHIFT 0x13
+#define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL__SHIFT 0x14
+#define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL__SHIFT 0x15
+#define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL__SHIFT 0x16
+#define XPB_PIPE_STS__RET_BUF_FULL__SHIFT 0x17
+#define XPB_PIPE_STS__XPB_CLK_BUSY_BITS__SHIFT 0x18
+#define XPB_PIPE_STS__WCB_ANY_PBUF_MASK 0x00000001L
+#define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT_MASK 0x000000FEL
+#define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT_MASK 0x00007F00L
+#define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL_MASK 0x00008000L
+#define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL_MASK 0x00010000L
+#define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL_MASK 0x00020000L
+#define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL_MASK 0x00040000L
+#define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL_MASK 0x00080000L
+#define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL_MASK 0x00100000L
+#define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL_MASK 0x00200000L
+#define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL_MASK 0x00400000L
+#define XPB_PIPE_STS__RET_BUF_FULL_MASK 0x00800000L
+#define XPB_PIPE_STS__XPB_CLK_BUSY_BITS_MASK 0xFF000000L
+//XPB_WCB_STS
+#define XPB_WCB_STS__PBUF_VLD__SHIFT 0x0
+#define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x10
+#define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x17
+#define XPB_WCB_STS__PBUF_VLD_MASK 0x0000FFFFL
+#define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT_MASK 0x007F0000L
+#define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT_MASK 0x3F800000L
+//XPB_MAP_INVERT_FLUSH_NUM_LSB
+#define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM__SHIFT 0x0
+#define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM_MASK 0x0000FFFFL
+//XPB_STICKY
+#define XPB_STICKY__BITS__SHIFT 0x0
+#define XPB_STICKY__BITS_MASK 0xFFFFFFFFL
+//XPB_STICKY_W1C
+#define XPB_STICKY_W1C__BITS__SHIFT 0x0
+#define XPB_STICKY_W1C__BITS_MASK 0xFFFFFFFFL
+//XPB_SUB_CTRL
+#define XPB_SUB_CTRL__WRREQ_BYPASS_XPB__SHIFT 0x0
+#define XPB_SUB_CTRL__STALL_CNS_RTR_REQ__SHIFT 0x1
+#define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ__SHIFT 0x2
+#define XPB_SUB_CTRL__STALL_RTR_MAP_REQ__SHIFT 0x3
+#define XPB_SUB_CTRL__STALL_MAP_WCB_REQ__SHIFT 0x4
+#define XPB_SUB_CTRL__STALL_WCB_SID_REQ__SHIFT 0x5
+#define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND__SHIFT 0x6
+#define XPB_SUB_CTRL__STALL_WCB_HST_REQ__SHIFT 0x7
+#define XPB_SUB_CTRL__STALL_HST_HOP_REQ__SHIFT 0x8
+#define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR__SHIFT 0x9
+#define XPB_SUB_CTRL__RESET_CNS__SHIFT 0xa
+#define XPB_SUB_CTRL__RESET_RTR__SHIFT 0xb
+#define XPB_SUB_CTRL__RESET_RET__SHIFT 0xc
+#define XPB_SUB_CTRL__RESET_MAP__SHIFT 0xd
+#define XPB_SUB_CTRL__RESET_WCB__SHIFT 0xe
+#define XPB_SUB_CTRL__RESET_HST__SHIFT 0xf
+#define XPB_SUB_CTRL__RESET_HOP__SHIFT 0x10
+#define XPB_SUB_CTRL__RESET_SID__SHIFT 0x11
+#define XPB_SUB_CTRL__RESET_SRB__SHIFT 0x12
+#define XPB_SUB_CTRL__RESET_CGR__SHIFT 0x13
+#define XPB_SUB_CTRL__WRREQ_BYPASS_XPB_MASK 0x00000001L
+#define XPB_SUB_CTRL__STALL_CNS_RTR_REQ_MASK 0x00000002L
+#define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ_MASK 0x00000004L
+#define XPB_SUB_CTRL__STALL_RTR_MAP_REQ_MASK 0x00000008L
+#define XPB_SUB_CTRL__STALL_MAP_WCB_REQ_MASK 0x00000010L
+#define XPB_SUB_CTRL__STALL_WCB_SID_REQ_MASK 0x00000020L
+#define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND_MASK 0x00000040L
+#define XPB_SUB_CTRL__STALL_WCB_HST_REQ_MASK 0x00000080L
+#define XPB_SUB_CTRL__STALL_HST_HOP_REQ_MASK 0x00000100L
+#define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR_MASK 0x00000200L
+#define XPB_SUB_CTRL__RESET_CNS_MASK 0x00000400L
+#define XPB_SUB_CTRL__RESET_RTR_MASK 0x00000800L
+#define XPB_SUB_CTRL__RESET_RET_MASK 0x00001000L
+#define XPB_SUB_CTRL__RESET_MAP_MASK 0x00002000L
+#define XPB_SUB_CTRL__RESET_WCB_MASK 0x00004000L
+#define XPB_SUB_CTRL__RESET_HST_MASK 0x00008000L
+#define XPB_SUB_CTRL__RESET_HOP_MASK 0x00010000L
+#define XPB_SUB_CTRL__RESET_SID_MASK 0x00020000L
+#define XPB_SUB_CTRL__RESET_SRB_MASK 0x00040000L
+#define XPB_SUB_CTRL__RESET_CGR_MASK 0x00080000L
+//XPB_PERF_KNOBS
+#define XPB_PERF_KNOBS__CNS_FIFO_DEPTH__SHIFT 0x0
+#define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH__SHIFT 0x6
+#define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH__SHIFT 0xc
+#define XPB_PERF_KNOBS__CNS_FIFO_DEPTH_MASK 0x0000003FL
+#define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH_MASK 0x00000FC0L
+#define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH_MASK 0x0003F000L
+//XPB_MISC_CFG
+#define XPB_MISC_CFG__FIELDNAME0__SHIFT 0x0
+#define XPB_MISC_CFG__FIELDNAME1__SHIFT 0x8
+#define XPB_MISC_CFG__FIELDNAME2__SHIFT 0x10
+#define XPB_MISC_CFG__FIELDNAME3__SHIFT 0x18
+#define XPB_MISC_CFG__TRIGGERNAME__SHIFT 0x1f
+#define XPB_MISC_CFG__FIELDNAME0_MASK 0x000000FFL
+#define XPB_MISC_CFG__FIELDNAME1_MASK 0x0000FF00L
+#define XPB_MISC_CFG__FIELDNAME2_MASK 0x00FF0000L
+#define XPB_MISC_CFG__FIELDNAME3_MASK 0x7F000000L
+#define XPB_MISC_CFG__TRIGGERNAME_MASK 0x80000000L
+//XPB_INTF_CFG2
+#define XPB_INTF_CFG2__RPB_RDREQ_CRD__SHIFT 0x0
+#define XPB_INTF_CFG2__RPB_RDREQ_CRD_MASK 0x000000FFL
+//XPB_CLG_EXTRA_RD
+#define XPB_CLG_EXTRA_RD__CMP0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA_RD__CMP0_LOW__SHIFT 0x6
+#define XPB_CLG_EXTRA_RD__VLD0__SHIFT 0xb
+#define XPB_CLG_EXTRA_RD__CLG0_NUM__SHIFT 0xc
+#define XPB_CLG_EXTRA_RD__CMP1_HIGH__SHIFT 0xf
+#define XPB_CLG_EXTRA_RD__CMP1_LOW__SHIFT 0x15
+#define XPB_CLG_EXTRA_RD__VLD1__SHIFT 0x1a
+#define XPB_CLG_EXTRA_RD__CLG1_NUM__SHIFT 0x1b
+#define XPB_CLG_EXTRA_RD__CMP0_HIGH_MASK 0x0000003FL
+#define XPB_CLG_EXTRA_RD__CMP0_LOW_MASK 0x000007C0L
+#define XPB_CLG_EXTRA_RD__VLD0_MASK 0x00000800L
+#define XPB_CLG_EXTRA_RD__CLG0_NUM_MASK 0x00007000L
+#define XPB_CLG_EXTRA_RD__CMP1_HIGH_MASK 0x001F8000L
+#define XPB_CLG_EXTRA_RD__CMP1_LOW_MASK 0x03E00000L
+#define XPB_CLG_EXTRA_RD__VLD1_MASK 0x04000000L
+#define XPB_CLG_EXTRA_RD__CLG1_NUM_MASK 0x38000000L
+//XPB_CLG_EXTRA_MSK_RD
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW__SHIFT 0x6
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH__SHIFT 0xb
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW__SHIFT 0x11
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH_MASK 0x0000003FL
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW_MASK 0x000007C0L
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH_MASK 0x0001F800L
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW_MASK 0x003E0000L
+//XPB_CLG_GFX_MATCH
+#define XPB_CLG_GFX_MATCH__FARBIRC0_ID__SHIFT 0x0
+#define XPB_CLG_GFX_MATCH__FARBIRC1_ID__SHIFT 0x8
+#define XPB_CLG_GFX_MATCH__FARBIRC2_ID__SHIFT 0x10
+#define XPB_CLG_GFX_MATCH__FARBIRC3_ID__SHIFT 0x18
+#define XPB_CLG_GFX_MATCH__FARBIRC0_ID_MASK 0x000000FFL
+#define XPB_CLG_GFX_MATCH__FARBIRC1_ID_MASK 0x0000FF00L
+#define XPB_CLG_GFX_MATCH__FARBIRC2_ID_MASK 0x00FF0000L
+#define XPB_CLG_GFX_MATCH__FARBIRC3_ID_MASK 0xFF000000L
+//XPB_CLG_GFX_MATCH_VLD
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC0_VLD__SHIFT 0x0
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC1_VLD__SHIFT 0x1
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC2_VLD__SHIFT 0x2
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC3_VLD__SHIFT 0x3
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC0_VLD_MASK 0x00000001L
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC1_VLD_MASK 0x00000002L
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC2_VLD_MASK 0x00000004L
+#define XPB_CLG_GFX_MATCH_VLD__FARBIRC3_VLD_MASK 0x00000008L
+//XPB_CLG_GFX_MATCH_MSK
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x8
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK__SHIFT 0x10
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK__SHIFT 0x18
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x000000FFL
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x0000FF00L
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK_MASK 0x00FF0000L
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK_MASK 0xFF000000L
+//XPB_CLG_MM_MATCH
+#define XPB_CLG_MM_MATCH__FARBIRC0_ID__SHIFT 0x0
+#define XPB_CLG_MM_MATCH__FARBIRC1_ID__SHIFT 0x8
+#define XPB_CLG_MM_MATCH__FARBIRC2_ID__SHIFT 0x10
+#define XPB_CLG_MM_MATCH__FARBIRC3_ID__SHIFT 0x18
+#define XPB_CLG_MM_MATCH__FARBIRC0_ID_MASK 0x000000FFL
+#define XPB_CLG_MM_MATCH__FARBIRC1_ID_MASK 0x0000FF00L
+#define XPB_CLG_MM_MATCH__FARBIRC2_ID_MASK 0x00FF0000L
+#define XPB_CLG_MM_MATCH__FARBIRC3_ID_MASK 0xFF000000L
+//XPB_CLG_MM_MATCH_VLD
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC0_VLD__SHIFT 0x0
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC1_VLD__SHIFT 0x1
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC2_VLD__SHIFT 0x2
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC3_VLD__SHIFT 0x3
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC0_VLD_MASK 0x00000001L
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC1_VLD_MASK 0x00000002L
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC2_VLD_MASK 0x00000004L
+#define XPB_CLG_MM_MATCH_VLD__FARBIRC3_VLD_MASK 0x00000008L
+//XPB_CLG_MM_MATCH_MSK
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x8
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC2_ID_MSK__SHIFT 0x10
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC3_ID_MSK__SHIFT 0x18
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x000000FFL
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x0000FF00L
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC2_ID_MSK_MASK 0x00FF0000L
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC3_ID_MSK_MASK 0xFF000000L
+//XPB_CLG_GFX_UNITID_MAPPING0
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING0__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING0__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING1
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING1__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING1__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING2
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING2__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING2__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING3
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING3__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING3__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING4
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING4__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING4__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING5
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING5__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING5__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING6
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING6__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING6__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING7
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING7__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING7__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING0
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING0__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING0__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING1
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING1__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING1__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING2
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING2__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING2__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING3
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING3__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING3__DEST_CLG_NUM_MASK 0x000001C0L
+
+
+// addressBlock: athub_rpbdec
+//ATHUB_SHARED_VIRT_RESET_REQ
+#define ATHUB_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define ATHUB_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define ATHUB_SHARED_VIRT_RESET_REQ__VF_MASK 0x7FFFFFFFL
+#define ATHUB_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//ATHUB_MEM_POWER_LS
+#define ATHUB_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define ATHUB_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define ATHUB_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define ATHUB_MEM_POWER_LS__LS_HOLD_MASK 0x0007FFC0L
+//ATHUB_MISC_CNTL
+#define ATHUB_MISC_CNTL__CG_OFFDLY__SHIFT 0x0
+#define ATHUB_MISC_CNTL__CG_ENABLE__SHIFT 0x6
+#define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE__SHIFT 0x7
+#define ATHUB_MISC_CNTL__PG_ENABLE__SHIFT 0x8
+#define ATHUB_MISC_CNTL__PG_OFFDLY__SHIFT 0x9
+#define ATHUB_MISC_CNTL__ALWAYS_BUSY__SHIFT 0xf
+#define ATHUB_MISC_CNTL__CG_STATUS__SHIFT 0x10
+#define ATHUB_MISC_CNTL__PG_STATUS__SHIFT 0x11
+#define ATHUB_MISC_CNTL__RPB_BUSY__SHIFT 0x12
+#define ATHUB_MISC_CNTL__XPB_BUSY__SHIFT 0x13
+#define ATHUB_MISC_CNTL__ATS_BUSY__SHIFT 0x14
+#define ATHUB_MISC_CNTL__SDPNCS_BUSY__SHIFT 0x15
+#define ATHUB_MISC_CNTL__DFPORT_BUSY__SHIFT 0x16
+#define ATHUB_MISC_CNTL__SWITCH_CNTL__SHIFT 0x17
+#define ATHUB_MISC_CNTL__LS_DELAY_ENABLE__SHIFT 0x18
+#define ATHUB_MISC_CNTL__LS_DELAY_TIME__SHIFT 0x19
+#define ATHUB_MISC_CNTL__RESETB_PG_CLK_GATING_ENABLE__SHIFT 0x1e
+#define ATHUB_MISC_CNTL__CG_OFFDLY_MASK 0x0000003FL
+#define ATHUB_MISC_CNTL__CG_ENABLE_MASK 0x00000040L
+#define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK 0x00000080L
+#define ATHUB_MISC_CNTL__PG_ENABLE_MASK 0x00000100L
+#define ATHUB_MISC_CNTL__PG_OFFDLY_MASK 0x00007E00L
+#define ATHUB_MISC_CNTL__ALWAYS_BUSY_MASK 0x00008000L
+#define ATHUB_MISC_CNTL__CG_STATUS_MASK 0x00010000L
+#define ATHUB_MISC_CNTL__PG_STATUS_MASK 0x00020000L
+#define ATHUB_MISC_CNTL__RPB_BUSY_MASK 0x00040000L
+#define ATHUB_MISC_CNTL__XPB_BUSY_MASK 0x00080000L
+#define ATHUB_MISC_CNTL__ATS_BUSY_MASK 0x00100000L
+#define ATHUB_MISC_CNTL__SDPNCS_BUSY_MASK 0x00200000L
+#define ATHUB_MISC_CNTL__DFPORT_BUSY_MASK 0x00400000L
+#define ATHUB_MISC_CNTL__SWITCH_CNTL_MASK 0x00800000L
+#define ATHUB_MISC_CNTL__LS_DELAY_ENABLE_MASK 0x01000000L
+#define ATHUB_MISC_CNTL__LS_DELAY_TIME_MASK 0x3E000000L
+#define ATHUB_MISC_CNTL__RESETB_PG_CLK_GATING_ENABLE_MASK 0x40000000L
+//RPB_PASSPW_CONF
+#define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE__SHIFT 0x0
+#define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE__SHIFT 0x1
+#define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE__SHIFT 0x2
+#define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE_EN__SHIFT 0x3
+#define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE__SHIFT 0x4
+#define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE_EN__SHIFT 0x5
+#define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE__SHIFT 0x6
+#define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE_EN__SHIFT 0x7
+#define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE__SHIFT 0x8
+#define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE_EN__SHIFT 0x9
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE__SHIFT 0xa
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN__SHIFT 0xb
+#define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE__SHIFT 0xc
+#define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE_EN__SHIFT 0xd
+#define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE__SHIFT 0xe
+#define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE__SHIFT 0xf
+#define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE__SHIFT 0x10
+#define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE__SHIFT 0x11
+#define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE__SHIFT 0x12
+#define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE__SHIFT 0x13
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE__SHIFT 0x14
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN__SHIFT 0x15
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE__SHIFT 0x16
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN__SHIFT 0x17
+#define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE_MASK 0x00000001L
+#define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE_MASK 0x00000002L
+#define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE_MASK 0x00000004L
+#define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE_EN_MASK 0x00000008L
+#define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE_MASK 0x00000010L
+#define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE_EN_MASK 0x00000020L
+#define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE_MASK 0x00000040L
+#define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE_EN_MASK 0x00000080L
+#define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE_MASK 0x00000100L
+#define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE_EN_MASK 0x00000200L
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_MASK 0x00000400L
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN_MASK 0x00000800L
+#define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE_MASK 0x00001000L
+#define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE_EN_MASK 0x00002000L
+#define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE_MASK 0x00004000L
+#define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE_MASK 0x00008000L
+#define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE_MASK 0x00010000L
+#define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE_MASK 0x00020000L
+#define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE_MASK 0x00040000L
+#define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE_MASK 0x00080000L
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_MASK 0x00100000L
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN_MASK 0x00200000L
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_MASK 0x00400000L
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN_MASK 0x00800000L
+//RPB_BLOCKLEVEL_CONF
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE__SHIFT 0x0
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x2
+#define RPB_BLOCKLEVEL_CONF__ATC_VC5_TR_BLOCKLEVEL__SHIFT 0x3
+#define RPB_BLOCKLEVEL_CONF__ATC_VC0_TR_BLOCKLEVEL__SHIFT 0x5
+#define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL__SHIFT 0x7
+#define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL__SHIFT 0x9
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE__SHIFT 0xb
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0xd
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE__SHIFT 0xe
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x10
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE__SHIFT 0x11
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x13
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_MASK 0x00000003L
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00000004L
+#define RPB_BLOCKLEVEL_CONF__ATC_VC5_TR_BLOCKLEVEL_MASK 0x00000018L
+#define RPB_BLOCKLEVEL_CONF__ATC_VC0_TR_BLOCKLEVEL_MASK 0x00000060L
+#define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL_MASK 0x00000180L
+#define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL_MASK 0x00000600L
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_MASK 0x00001800L
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00002000L
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_MASK 0x0000C000L
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00010000L
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_MASK 0x00060000L
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00080000L
+//RPB_TAG_CONF
+#define RPB_TAG_CONF__RPB_IO_RD__SHIFT 0x0
+#define RPB_TAG_CONF__RPB_IO_WR__SHIFT 0xa
+#define RPB_TAG_CONF__RPB_IO_MAX_LIMIT__SHIFT 0x14
+#define RPB_TAG_CONF__RPB_IO_RD_MASK 0x000003FFL
+#define RPB_TAG_CONF__RPB_IO_WR_MASK 0x000FFC00L
+#define RPB_TAG_CONF__RPB_IO_MAX_LIMIT_MASK 0x7FF00000L
+//RPB_ARB_CNTL
+#define RPB_ARB_CNTL__RD_SWITCH_NUM__SHIFT 0x0
+#define RPB_ARB_CNTL__WR_SWITCH_NUM__SHIFT 0x8
+#define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM__SHIFT 0x10
+#define RPB_ARB_CNTL__ARB_MODE__SHIFT 0x18
+#define RPB_ARB_CNTL__SWITCH_NUM_MODE__SHIFT 0x19
+#define RPB_ARB_CNTL__RPB_VC0_CRD__SHIFT 0x1a
+#define RPB_ARB_CNTL__DISABLE_FED__SHIFT 0x1f
+#define RPB_ARB_CNTL__RD_SWITCH_NUM_MASK 0x000000FFL
+#define RPB_ARB_CNTL__WR_SWITCH_NUM_MASK 0x0000FF00L
+#define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM_MASK 0x00FF0000L
+#define RPB_ARB_CNTL__ARB_MODE_MASK 0x01000000L
+#define RPB_ARB_CNTL__SWITCH_NUM_MODE_MASK 0x02000000L
+#define RPB_ARB_CNTL__RPB_VC0_CRD_MASK 0x7C000000L
+#define RPB_ARB_CNTL__DISABLE_FED_MASK 0x80000000L
+//RPB_ARB_CNTL2
+#define RPB_ARB_CNTL2__P2P_SWITCH_NUM__SHIFT 0x0
+#define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM__SHIFT 0x8
+#define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM__SHIFT 0x10
+#define RPB_ARB_CNTL2__RPB_VC1_CRD__SHIFT 0x18
+#define RPB_ARB_CNTL2__P2P_SWITCH_NUM_MASK 0x000000FFL
+#define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM_MASK 0x0000FF00L
+#define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM_MASK 0x00FF0000L
+#define RPB_ARB_CNTL2__RPB_VC1_CRD_MASK 0x1F000000L
+//RPB_BIF_CNTL
+#define RPB_BIF_CNTL__VC0_SWITCH_NUM__SHIFT 0x0
+#define RPB_BIF_CNTL__VC1_SWITCH_NUM__SHIFT 0x8
+#define RPB_BIF_CNTL__VC2_SWITCH_NUM__SHIFT 0x10
+#define RPB_BIF_CNTL__NBIF_DMA_ORIGCLKCTL_EN__SHIFT 0x18
+#define RPB_BIF_CNTL__TR_QOS_VC__SHIFT 0x19
+#define RPB_BIF_CNTL__RESERVE__SHIFT 0x1c
+#define RPB_BIF_CNTL__VC0_SWITCH_NUM_MASK 0x000000FFL
+#define RPB_BIF_CNTL__VC1_SWITCH_NUM_MASK 0x0000FF00L
+#define RPB_BIF_CNTL__VC2_SWITCH_NUM_MASK 0x00FF0000L
+#define RPB_BIF_CNTL__NBIF_DMA_ORIGCLKCTL_EN_MASK 0x01000000L
+#define RPB_BIF_CNTL__TR_QOS_VC_MASK 0x0E000000L
+#define RPB_BIF_CNTL__RESERVE_MASK 0xF0000000L
+//RPB_BIF_CNTL2
+#define RPB_BIF_CNTL2__ARB_MODE__SHIFT 0x0
+#define RPB_BIF_CNTL2__DRAIN_VC_NUM__SHIFT 0x1
+#define RPB_BIF_CNTL2__SWITCH_ENABLE__SHIFT 0x3
+#define RPB_BIF_CNTL2__SWITCH_THRESHOLD__SHIFT 0x4
+#define RPB_BIF_CNTL2__PAGE_PRI_EN__SHIFT 0xc
+#define RPB_BIF_CNTL2__VC5_TR_PRI_EN__SHIFT 0xd
+#define RPB_BIF_CNTL2__VC0_TR_PRI_EN__SHIFT 0xe
+#define RPB_BIF_CNTL2__VC0_CHAINED_OVERRIDE__SHIFT 0xf
+#define RPB_BIF_CNTL2__VC1_CHAINED_OVERRIDE__SHIFT 0x10
+#define RPB_BIF_CNTL2__VC1_CHAINED_OVERRIDE_EN__SHIFT 0x11
+#define RPB_BIF_CNTL2__NBIF_HST_COMPCLKCTL_EN__SHIFT 0x12
+#define RPB_BIF_CNTL2__ATHUB_NBIF_UNITID__SHIFT 0x13
+#define RPB_BIF_CNTL2__RESERVE__SHIFT 0x1e
+#define RPB_BIF_CNTL2__ARB_MODE_MASK 0x00000001L
+#define RPB_BIF_CNTL2__DRAIN_VC_NUM_MASK 0x00000006L
+#define RPB_BIF_CNTL2__SWITCH_ENABLE_MASK 0x00000008L
+#define RPB_BIF_CNTL2__SWITCH_THRESHOLD_MASK 0x00000FF0L
+#define RPB_BIF_CNTL2__PAGE_PRI_EN_MASK 0x00001000L
+#define RPB_BIF_CNTL2__VC5_TR_PRI_EN_MASK 0x00002000L
+#define RPB_BIF_CNTL2__VC0_TR_PRI_EN_MASK 0x00004000L
+#define RPB_BIF_CNTL2__VC0_CHAINED_OVERRIDE_MASK 0x00008000L
+#define RPB_BIF_CNTL2__VC1_CHAINED_OVERRIDE_MASK 0x00010000L
+#define RPB_BIF_CNTL2__VC1_CHAINED_OVERRIDE_EN_MASK 0x00020000L
+#define RPB_BIF_CNTL2__NBIF_HST_COMPCLKCTL_EN_MASK 0x00040000L
+#define RPB_BIF_CNTL2__ATHUB_NBIF_UNITID_MASK 0x3FF80000L
+#define RPB_BIF_CNTL2__RESERVE_MASK 0xC0000000L
+//RPB_SDPPORT_CNTL
+#define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE__SHIFT 0x0
+#define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE__SHIFT 0x1
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT__SHIFT 0x3
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER__SHIFT 0x4
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS__SHIFT 0x5
+#define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD__SHIFT 0x6
+#define RPB_SDPPORT_CNTL__RESERVE1__SHIFT 0xa
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN__SHIFT 0x16
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV__SHIFT 0x17
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN__SHIFT 0x18
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x19
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN__SHIFT 0x1a
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV__SHIFT 0x1b
+#define RPB_SDPPORT_CNTL__CG_BUSY_PORT__SHIFT 0x1c
+#define RPB_SDPPORT_CNTL__RESERVE__SHIFT 0x1d
+#define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE_MASK 0x00000001L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE_MASK 0x00000006L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT_MASK 0x00000008L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER_MASK 0x00000010L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS_MASK 0x00000020L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD_MASK 0x000003C0L
+#define RPB_SDPPORT_CNTL__RESERVE1_MASK 0x003FFC00L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN_MASK 0x00400000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV_MASK 0x00800000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN_MASK 0x01000000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV_MASK 0x02000000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN_MASK 0x04000000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV_MASK 0x08000000L
+#define RPB_SDPPORT_CNTL__CG_BUSY_PORT_MASK 0x10000000L
+#define RPB_SDPPORT_CNTL__RESERVE_MASK 0xE0000000L
+//RPB_NBIF_SDPPORT_CNTL
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_WRRSP_CRD__SHIFT 0x0
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_RDRSP_CRD__SHIFT 0x8
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_REQ_CRD__SHIFT 0x10
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_DATA_CRD__SHIFT 0x18
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_WRRSP_CRD_MASK 0x000000FFL
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_RDRSP_CRD_MASK 0x0000FF00L
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_REQ_CRD_MASK 0x00FF0000L
+#define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_DATA_CRD_MASK 0xFF000000L
+//RPB_DEINTRLV_COMBINE_CNTL
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER__SHIFT 0x0
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN__SHIFT 0x4
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE__SHIFT 0x5
+#define RPB_DEINTRLV_COMBINE_CNTL__XPB_WRREQ_CRD__SHIFT 0x6
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CLI_INTLV_EN__SHIFT 0xe
+#define RPB_DEINTRLV_COMBINE_CNTL__RESERVE__SHIFT 0xf
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER_MASK 0x0000000FL
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN_MASK 0x00000010L
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE_MASK 0x00000020L
+#define RPB_DEINTRLV_COMBINE_CNTL__XPB_WRREQ_CRD_MASK 0x00003FC0L
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CLI_INTLV_EN_MASK 0x00004000L
+#define RPB_DEINTRLV_COMBINE_CNTL__RESERVE_MASK 0xFFFF8000L
+//RPB_VC_SWITCH_RDWR
+#define RPB_VC_SWITCH_RDWR__MODE__SHIFT 0x0
+#define RPB_VC_SWITCH_RDWR__NUM_RD__SHIFT 0x2
+#define RPB_VC_SWITCH_RDWR__NUM_WR__SHIFT 0xa
+#define RPB_VC_SWITCH_RDWR__XPB_RDREQ_CRD__SHIFT 0x12
+#define RPB_VC_SWITCH_RDWR__CENTER_MARGIN__SHIFT 0x1a
+#define RPB_VC_SWITCH_RDWR__MODE_MASK 0x00000003L
+#define RPB_VC_SWITCH_RDWR__NUM_RD_MASK 0x000003FCL
+#define RPB_VC_SWITCH_RDWR__NUM_WR_MASK 0x0003FC00L
+#define RPB_VC_SWITCH_RDWR__XPB_RDREQ_CRD_MASK 0x03FC0000L
+#define RPB_VC_SWITCH_RDWR__CENTER_MARGIN_MASK 0xFC000000L
+//RPB_ATS_CNTL3
+#define RPB_ATS_CNTL3__RPB_ATS_VC5_TR__SHIFT 0x0
+#define RPB_ATS_CNTL3__RPB_ATS_VC0_TR__SHIFT 0x9
+#define RPB_ATS_CNTL3__RPB_ATS_PR__SHIFT 0x12
+#define RPB_ATS_CNTL3__RPB_ATS_VC5_TR_MASK 0x000001FFL
+#define RPB_ATS_CNTL3__RPB_ATS_VC0_TR_MASK 0x0003FE00L
+#define RPB_ATS_CNTL3__RPB_ATS_PR_MASK 0x07FC0000L
+//RPB_DF_SDPPORT_CNTL
+#define RPB_DF_SDPPORT_CNTL__DF_REQ_CRD__SHIFT 0x0
+#define RPB_DF_SDPPORT_CNTL__DF_DATA_CRD__SHIFT 0x6
+#define RPB_DF_SDPPORT_CNTL__DF_HALT_THRESHOLD__SHIFT 0xe
+#define RPB_DF_SDPPORT_CNTL__DF_RELEASE_CREDIT_MODE__SHIFT 0x12
+#define RPB_DF_SDPPORT_CNTL__DF_ORIG_ACK_TIMER__SHIFT 0x13
+#define RPB_DF_SDPPORT_CNTL__DF_RAW_EA_CHECK_ENABLE__SHIFT 0x1b
+#define RPB_DF_SDPPORT_CNTL__DF_RAW_CHECK_ENABLE__SHIFT 0x1c
+#define RPB_DF_SDPPORT_CNTL__DF_RAAT_CHECK_ENABLE__SHIFT 0x1d
+#define RPB_DF_SDPPORT_CNTL__DF_ATAR_CHECK_ENABLE__SHIFT 0x1e
+#define RPB_DF_SDPPORT_CNTL__DF_VC3_READ_CHECK__SHIFT 0x1f
+#define RPB_DF_SDPPORT_CNTL__DF_REQ_CRD_MASK 0x0000003FL
+#define RPB_DF_SDPPORT_CNTL__DF_DATA_CRD_MASK 0x00003FC0L
+#define RPB_DF_SDPPORT_CNTL__DF_HALT_THRESHOLD_MASK 0x0003C000L
+#define RPB_DF_SDPPORT_CNTL__DF_RELEASE_CREDIT_MODE_MASK 0x00040000L
+#define RPB_DF_SDPPORT_CNTL__DF_ORIG_ACK_TIMER_MASK 0x07F80000L
+#define RPB_DF_SDPPORT_CNTL__DF_RAW_EA_CHECK_ENABLE_MASK 0x08000000L
+#define RPB_DF_SDPPORT_CNTL__DF_RAW_CHECK_ENABLE_MASK 0x10000000L
+#define RPB_DF_SDPPORT_CNTL__DF_RAAT_CHECK_ENABLE_MASK 0x20000000L
+#define RPB_DF_SDPPORT_CNTL__DF_ATAR_CHECK_ENABLE_MASK 0x40000000L
+#define RPB_DF_SDPPORT_CNTL__DF_VC3_READ_CHECK_MASK 0x80000000L
+//RPB_ATS_CNTL
+#define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE__SHIFT 0x0
+#define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE__SHIFT 0x1
+#define RPB_ATS_CNTL__SWITCH_THRESHOLD__SHIFT 0x2
+#define RPB_ATS_CNTL__TIME_SLICE__SHIFT 0x7
+#define RPB_ATS_CNTL__ATCTR_VC0_SWITCH_NUM__SHIFT 0xf
+#define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM__SHIFT 0x13
+#define RPB_ATS_CNTL__WR_AT__SHIFT 0x17
+#define RPB_ATS_CNTL__MM_TRANS_VC5_ENABLE__SHIFT 0x19
+#define RPB_ATS_CNTL__GC_TRANS_VC5_ENABLE__SHIFT 0x1a
+#define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE_MASK 0x00000001L
+#define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE_MASK 0x00000002L
+#define RPB_ATS_CNTL__SWITCH_THRESHOLD_MASK 0x0000007CL
+#define RPB_ATS_CNTL__TIME_SLICE_MASK 0x00007F80L
+#define RPB_ATS_CNTL__ATCTR_VC0_SWITCH_NUM_MASK 0x00078000L
+#define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM_MASK 0x00780000L
+#define RPB_ATS_CNTL__WR_AT_MASK 0x01800000L
+#define RPB_ATS_CNTL__MM_TRANS_VC5_ENABLE_MASK 0x02000000L
+#define RPB_ATS_CNTL__GC_TRANS_VC5_ENABLE_MASK 0x04000000L
+//RPB_ATS_CNTL2
+#define RPB_ATS_CNTL2__INVAL_COM_CMD__SHIFT 0x0
+#define RPB_ATS_CNTL2__TRANS_CMD__SHIFT 0x6
+#define RPB_ATS_CNTL2__PAGE_REQ_CMD__SHIFT 0xc
+#define RPB_ATS_CNTL2__PAGE_ROUTING_CODE__SHIFT 0x12
+#define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE__SHIFT 0x15
+#define RPB_ATS_CNTL2__VENDOR_ID__SHIFT 0x18
+#define RPB_ATS_CNTL2__RPB_VC5_CRD__SHIFT 0x1a
+#define RPB_ATS_CNTL2__INVAL_COM_CMD_MASK 0x0000003FL
+#define RPB_ATS_CNTL2__TRANS_CMD_MASK 0x00000FC0L
+#define RPB_ATS_CNTL2__PAGE_REQ_CMD_MASK 0x0003F000L
+#define RPB_ATS_CNTL2__PAGE_ROUTING_CODE_MASK 0x001C0000L
+#define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE_MASK 0x00E00000L
+#define RPB_ATS_CNTL2__VENDOR_ID_MASK 0x03000000L
+#define RPB_ATS_CNTL2__RPB_VC5_CRD_MASK 0x7C000000L
+//RPB_PERFCOUNTER0_CFG
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER1_CFG
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER2_CFG
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER3_CFG
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER_RSLT_CNTL
+#define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//RPB_PERF_COUNTER_CNTL
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER__SHIFT 0x2
+#define RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS__SHIFT 0x3
+#define RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION__SHIFT 0x4
+#define RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS__SHIFT 0x5
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0__SHIFT 0x9
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1__SHIFT 0xe
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2__SHIFT 0x13
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3__SHIFT 0x18
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT_MASK 0x00000003L
+#define RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER_MASK 0x00000004L
+#define RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS_MASK 0x00000008L
+#define RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION_MASK 0x00000010L
+#define RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS_MASK 0x000001E0L
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0_MASK 0x00003E00L
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1_MASK 0x0007C000L
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2_MASK 0x00F80000L
+#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3_MASK 0x1F000000L
+//RPB_PERFCOUNTER_HI
+#define RPB_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define RPB_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define RPB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define RPB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//RPB_PERFCOUNTER_LO
+#define RPB_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define RPB_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//RPB_PERF_COUNTER_STATUS
+#define RPB_PERF_COUNTER_STATUS__PERFORMANCE_COUNTER_VALUE__SHIFT 0x0
+#define RPB_PERF_COUNTER_STATUS__PERFORMANCE_COUNTER_VALUE_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h
index 222fa8d13269..a05bf8e4f58d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h
@@ -626,6 +626,8 @@
#define regDTBCLK_DTO2_MODULO_BASE_IDX 2
#define regDTBCLK_DTO3_MODULO 0x0022
#define regDTBCLK_DTO3_MODULO_BASE_IDX 2
+#define regHDMICHARCLK0_CLOCK_CNTL 0x004a
+#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2
#define regPHYASYMCLK_CLOCK_CNTL 0x0052
#define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX 2
#define regPHYBSYMCLK_CLOCK_CNTL 0x0053
@@ -638,6 +640,8 @@
#define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2
#define regPHYFSYMCLK_CLOCK_CNTL 0x0057
#define regPHYFSYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regHDMISTREAMCLK_CNTL 0x0059
+#define regHDMISTREAMCLK_CNTL_BASE_IDX 2
#define regDCCG_GATE_DISABLE_CNTL3 0x005a
#define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2
#define regHDMISTREAMCLK0_DTO_PARAM 0x005b
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h
index 8ddb03a1dc39..df84941bbe5b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h
@@ -1933,6 +1933,11 @@
//DTBCLK_DTO3_MODULO
#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT 0x0
#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK 0xFFFFFFFFL
+//HDMICHARCLK0_CLOCK_CNTL
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L
//PHYASYMCLK_CLOCK_CNTL
#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_EN__SHIFT 0x0
#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_SRC_SEL__SHIFT 0x4
@@ -1967,6 +1972,11 @@
#define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL__SHIFT 0x4
#define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_EN_MASK 0x00000001L
#define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL_MASK 0x00000030L
+//HDMISTREAMCLK_CNTL
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT 0x0
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS__SHIFT 0x10
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK 0x00000003L
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS_MASK 0x00010000L
//DCCG_GATE_DISABLE_CNTL3
#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT 0x0
#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h
index 7cf0a625277b..33b5d9be06b1 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h
@@ -4802,6 +4802,10 @@
#define regCM0_CM_DEALPHA_BASE_IDX 2
#define regCM0_CM_COEF_FORMAT 0x0d8c
#define regCM0_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d
+#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e
+#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5210,6 +5214,10 @@
#define regCM1_CM_DEALPHA_BASE_IDX 2
#define regCM1_CM_COEF_FORMAT 0x0ef7
#define regCM1_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8
+#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9
+#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -5618,6 +5626,10 @@
#define regCM2_CM_DEALPHA_BASE_IDX 2
#define regCM2_CM_COEF_FORMAT 0x1062
#define regCM2_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_INDEX 0x1063
+#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_DATA 0x1064
+#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -6026,6 +6038,10 @@
#define regCM3_CM_DEALPHA_BASE_IDX 2
#define regCM3_CM_COEF_FORMAT 0x11cd
#define regCM3_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce
+#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_DATA 0x11cf
+#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -10568,6 +10584,8 @@
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035
#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
// addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
@@ -10697,6 +10715,8 @@
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091
#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
// addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
@@ -10827,6 +10847,8 @@
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed
#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
// addressBlock: dce_dc_dsc2_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
@@ -10957,6 +10979,8 @@
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149
#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
// addressBlock: dce_dc_dsc3_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h
index fca72e2ec929..ff77b71167eb 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h
@@ -16556,6 +16556,13 @@
#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+
#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
@@ -27176,6 +27183,23 @@
#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+
+//DIG0_DIG_BE_CLK_CNTL
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET__SHIFT 0x6
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON__SHIFT 0xc
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET_MASK 0x00000040L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON_MASK 0x00001000L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+
#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
#define DIG0_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
@@ -36716,6 +36740,17 @@
#define DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+
+//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
@@ -38488,6 +38523,18 @@
#define DWB_OGAM_LUT_INDEX__DWB_OGAM_LUT_INDEX_MASK 0x000001FFL
#define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA__SHIFT 0x0
#define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA_MASK 0x0003FFFFL
+//DWB_OGAM_LUT_CONTROL
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x4
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG__SHIFT 0x8
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL__SHIFT 0xc
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE__SHIFT 0x10
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000030L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG_MASK 0x00000100L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL_MASK 0x00001000L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE_MASK 0x00010000L
+
#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x4
#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL__SHIFT 0xc
@@ -52008,6 +52055,14 @@
#define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS__SHIFT 0x10
#define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS__SHIFT 0x11
#define DIO_CLK_CNTL__DIO_FGCG_REP_DIS__SHIFT 0x14
+#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS__SHIFT 0x15
+#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS__SHIFT 0x16
+#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS__SHIFT 0x17
+#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS__SHIFT 0x18
+#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS__SHIFT 0x19
+#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS__SHIFT 0x1a
+#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS__SHIFT 0x1b
+#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS__SHIFT 0x1c
#define DIO_CLK_CNTL__DIO_TEST_CLK_SEL_MASK 0x0000007FL
#define DIO_CLK_CNTL__DISPCLK_R_GATE_DIS_MASK 0x00000200L
#define DIO_CLK_CNTL__DISPCLK_G_GATE_DIS_MASK 0x00000400L
@@ -52019,6 +52074,16 @@
#define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS_MASK 0x00010000L
#define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS_MASK 0x00020000L
#define DIO_CLK_CNTL__DIO_FGCG_REP_DIS_MASK 0x00100000L
+
+#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS_MASK 0x00200000L
+#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS_MASK 0x00400000L
+#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS_MASK 0x00800000L
+#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS_MASK 0x01000000L
+#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS_MASK 0x02000000L
+#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS_MASK 0x04000000L
+#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS_MASK 0x08000000L
+#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS_MASK 0x10000000L
+
#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS__SHIFT 0x0
#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_MESSAGE__SHIFT 0x1
#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS_MASK 0x00000001L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_1_offset.h
new file mode 100644
index 000000000000..5efcf9b27869
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_1_offset.h
@@ -0,0 +1,15259 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+#ifndef _dcn_3_5_1_OFFSET_HEADER
+#define _dcn_3_5_1_OFFSET_HEADER
+
+// addressBlock: dce_dc_hda_azcontroller_azdec
+// base address: 0x1300000
+#define regGLOBAL_CAPABILITIES 0x4b7000
+#define regGLOBAL_CAPABILITIES_BASE_IDX 3
+#define regMINOR_VERSION 0x4b7000
+#define regMINOR_VERSION_BASE_IDX 3
+#define regMAJOR_VERSION 0x4b7000
+#define regMAJOR_VERSION_BASE_IDX 3
+#define regOUTPUT_PAYLOAD_CAPABILITY 0x4b7001
+#define regOUTPUT_PAYLOAD_CAPABILITY_BASE_IDX 3
+#define regINPUT_PAYLOAD_CAPABILITY 0x4b7001
+#define regINPUT_PAYLOAD_CAPABILITY_BASE_IDX 3
+#define regGLOBAL_CONTROL 0x4b7002
+#define regGLOBAL_CONTROL_BASE_IDX 3
+#define regWAKE_ENABLE 0x4b7003
+#define regWAKE_ENABLE_BASE_IDX 3
+#define regSTATE_CHANGE_STATUS 0x4b7003
+#define regSTATE_CHANGE_STATUS_BASE_IDX 3
+#define regGLOBAL_STATUS 0x4b7004
+#define regGLOBAL_STATUS_BASE_IDX 3
+#define regOUTPUT_STREAM_PAYLOAD_CAPABILITY 0x4b7006
+#define regOUTPUT_STREAM_PAYLOAD_CAPABILITY_BASE_IDX 3
+#define regINPUT_STREAM_PAYLOAD_CAPABILITY 0x4b7006
+#define regINPUT_STREAM_PAYLOAD_CAPABILITY_BASE_IDX 3
+#define regINTERRUPT_CONTROL 0x4b7008
+#define regINTERRUPT_CONTROL_BASE_IDX 3
+#define regINTERRUPT_STATUS 0x4b7009
+#define regINTERRUPT_STATUS_BASE_IDX 3
+#define regWALL_CLOCK_COUNTER 0x4b700c
+#define regWALL_CLOCK_COUNTER_BASE_IDX 3
+#define regSTREAM_SYNCHRONIZATION 0x4b700e
+#define regSTREAM_SYNCHRONIZATION_BASE_IDX 3
+#define regCORB_LOWER_BASE_ADDRESS 0x4b7010
+#define regCORB_LOWER_BASE_ADDRESS_BASE_IDX 3
+#define regCORB_UPPER_BASE_ADDRESS 0x4b7011
+#define regCORB_UPPER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_WRITE_POINTER 0x4b7012
+#define regAZCONTROLLER0_CORB_WRITE_POINTER_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_READ_POINTER 0x4b7012
+#define regAZCONTROLLER0_CORB_READ_POINTER_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_CONTROL 0x4b7013
+#define regAZCONTROLLER0_CORB_CONTROL_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_STATUS 0x4b7013
+#define regAZCONTROLLER0_CORB_STATUS_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_SIZE 0x4b7013
+#define regAZCONTROLLER0_CORB_SIZE_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS 0x4b7014
+#define regAZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS 0x4b7015
+#define regAZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_WRITE_POINTER 0x4b7016
+#define regAZCONTROLLER0_RIRB_WRITE_POINTER_BASE_IDX 3
+#define regAZCONTROLLER0_RESPONSE_INTERRUPT_COUNT 0x4b7016
+#define regAZCONTROLLER0_RESPONSE_INTERRUPT_COUNT_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_CONTROL 0x4b7017
+#define regAZCONTROLLER0_RIRB_CONTROL_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_STATUS 0x4b7017
+#define regAZCONTROLLER0_RIRB_STATUS_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_SIZE 0x4b7017
+#define regAZCONTROLLER0_RIRB_SIZE_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE 0x4b7018
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x4b7018
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x4b7018
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE 0x4b7019
+#define regAZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_STATUS 0x4b701a
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_STATUS_BASE_IDX 3
+#define regAZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS 0x4b701c
+#define regAZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS 0x4b701d
+#define regAZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS 0x4b780c
+#define regAZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS_BASE_IDX 3
+
+// addressBlock: azendpoint_sinkinfoind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID 0x0000
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID 0x0001
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN 0x0002
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID0 0x0003
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID1 0x0004
+#define ixSINK_DESCRIPTION0 0x0005
+#define ixSINK_DESCRIPTION1 0x0006
+#define ixSINK_DESCRIPTION2 0x0007
+#define ixSINK_DESCRIPTION3 0x0008
+#define ixSINK_DESCRIPTION4 0x0009
+#define ixSINK_DESCRIPTION5 0x000a
+#define ixSINK_DESCRIPTION6 0x000b
+#define ixSINK_DESCRIPTION7 0x000c
+#define ixSINK_DESCRIPTION8 0x000d
+#define ixSINK_DESCRIPTION9 0x000e
+#define ixSINK_DESCRIPTION10 0x000f
+#define ixSINK_DESCRIPTION11 0x0010
+#define ixSINK_DESCRIPTION12 0x0011
+#define ixSINK_DESCRIPTION13 0x0012
+#define ixSINK_DESCRIPTION14 0x0013
+#define ixSINK_DESCRIPTION15 0x0014
+#define ixSINK_DESCRIPTION16 0x0015
+#define ixSINK_DESCRIPTION17 0x0016
+
+
+// addressBlock: azf0controller_azinputcrc0resultind
+// base address: 0x0
+#define ixAZALIA_INPUT_CRC0_CHANNEL0 0x0000
+#define ixAZALIA_INPUT_CRC0_CHANNEL1 0x0001
+#define ixAZALIA_INPUT_CRC0_CHANNEL2 0x0002
+#define ixAZALIA_INPUT_CRC0_CHANNEL3 0x0003
+#define ixAZALIA_INPUT_CRC0_CHANNEL4 0x0004
+#define ixAZALIA_INPUT_CRC0_CHANNEL5 0x0005
+#define ixAZALIA_INPUT_CRC0_CHANNEL6 0x0006
+#define ixAZALIA_INPUT_CRC0_CHANNEL7 0x0007
+
+
+// addressBlock: azf0controller_azinputcrc1resultind
+// base address: 0x0
+#define ixAZALIA_INPUT_CRC1_CHANNEL0 0x0000
+#define ixAZALIA_INPUT_CRC1_CHANNEL1 0x0001
+#define ixAZALIA_INPUT_CRC1_CHANNEL2 0x0002
+#define ixAZALIA_INPUT_CRC1_CHANNEL3 0x0003
+#define ixAZALIA_INPUT_CRC1_CHANNEL4 0x0004
+#define ixAZALIA_INPUT_CRC1_CHANNEL5 0x0005
+#define ixAZALIA_INPUT_CRC1_CHANNEL6 0x0006
+#define ixAZALIA_INPUT_CRC1_CHANNEL7 0x0007
+
+
+// addressBlock: azf0controller_azcrc0resultind
+// base address: 0x0
+#define ixAZALIA_CRC0_CHANNEL0 0x0000
+#define ixAZALIA_CRC0_CHANNEL1 0x0001
+#define ixAZALIA_CRC0_CHANNEL2 0x0002
+#define ixAZALIA_CRC0_CHANNEL3 0x0003
+#define ixAZALIA_CRC0_CHANNEL4 0x0004
+#define ixAZALIA_CRC0_CHANNEL5 0x0005
+#define ixAZALIA_CRC0_CHANNEL6 0x0006
+#define ixAZALIA_CRC0_CHANNEL7 0x0007
+
+
+// addressBlock: azf0controller_azcrc1resultind
+// base address: 0x0
+#define ixAZALIA_CRC1_CHANNEL0 0x0000
+#define ixAZALIA_CRC1_CHANNEL1 0x0001
+#define ixAZALIA_CRC1_CHANNEL2 0x0002
+#define ixAZALIA_CRC1_CHANNEL3 0x0003
+#define ixAZALIA_CRC1_CHANNEL4 0x0004
+#define ixAZALIA_CRC1_CHANNEL5 0x0005
+#define ixAZALIA_CRC1_CHANNEL6 0x0006
+#define ixAZALIA_CRC1_CHANNEL7 0x0007
+
+
+// addressBlock: azf0stream0_streamind
+// base address: 0x0
+#define ixAZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream1_streamind
+// base address: 0x0
+#define ixAZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream2_streamind
+// base address: 0x0
+#define ixAZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream3_streamind
+// base address: 0x0
+#define ixAZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream4_streamind
+// base address: 0x0
+#define ixAZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream5_streamind
+// base address: 0x0
+#define ixAZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream6_streamind
+// base address: 0x0
+#define ixAZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream7_streamind
+// base address: 0x0
+#define ixAZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream8_streamind
+// base address: 0x0
+#define ixAZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream9_streamind
+// base address: 0x0
+#define ixAZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream10_streamind
+// base address: 0x0
+#define ixAZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream11_streamind
+// base address: 0x0
+#define ixAZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream12_streamind
+// base address: 0x0
+#define ixAZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream13_streamind
+// base address: 0x0
+#define ixAZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream14_streamind
+// base address: 0x0
+#define ixAZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream15_streamind
+// base address: 0x0
+#define ixAZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0endpoint0_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT0_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint1_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT1_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint2_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT2_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint3_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT3_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint4_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT4_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint5_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT5_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint6_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT6_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint7_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT7_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0inputendpoint0_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint1_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint2_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint3_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint4_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint5_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint6_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint7_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azendpoint_descriptorind
+// base address: 0x0
+#define ixAUDIO_DESCRIPTOR0 0x0001
+#define ixAUDIO_DESCRIPTOR1 0x0002
+#define ixAUDIO_DESCRIPTOR2 0x0003
+#define ixAUDIO_DESCRIPTOR3 0x0004
+#define ixAUDIO_DESCRIPTOR4 0x0005
+#define ixAUDIO_DESCRIPTOR5 0x0006
+#define ixAUDIO_DESCRIPTOR6 0x0007
+#define ixAUDIO_DESCRIPTOR7 0x0008
+#define ixAUDIO_DESCRIPTOR8 0x0009
+#define ixAUDIO_DESCRIPTOR9 0x000a
+#define ixAUDIO_DESCRIPTOR10 0x000b
+#define ixAUDIO_DESCRIPTOR11 0x000c
+#define ixAUDIO_DESCRIPTOR12 0x000d
+#define ixAUDIO_DESCRIPTOR13 0x000e
+
+// addressBlock: dce_dc_hda_azendpoint_azdec
+// base address: 0x1300000
+#define regAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x4b7018
+#define regAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_BASE_IDX 3
+#define regAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x4b7018
+#define regAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hda_azinputendpoint_azdec
+// base address: 0x1300000
+#define regAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA 0x4b7018
+#define regAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA_BASE_IDX 3
+#define regAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX 0x4b7018
+#define regAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX_BASE_IDX 3
+
+
+// addressBlock: dce_dc_dccg_dccg_dispdec
+// base address: 0x0
+#define regPHYPLLA_PIXCLK_RESYNC_CNTL 0x0040
+#define regPHYPLLA_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regPHYPLLB_PIXCLK_RESYNC_CNTL 0x0041
+#define regPHYPLLB_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regPHYPLLC_PIXCLK_RESYNC_CNTL 0x0042
+#define regPHYPLLC_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regPHYPLLD_PIXCLK_RESYNC_CNTL 0x0043
+#define regPHYPLLD_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regDP_DTO_DBUF_EN 0x0044
+#define regDP_DTO_DBUF_EN_BASE_IDX 1
+#define regDSCCLK3_DTO_PARAM 0x0045
+#define regDSCCLK3_DTO_PARAM_BASE_IDX 1
+#define regDPREFCLK_CGTT_BLK_CTRL_REG 0x0048
+#define regDPREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL4 0x0049
+#define regDCCG_GATE_DISABLE_CNTL4_BASE_IDX 1
+#define regDPSTREAMCLK_CNTL 0x004a
+#define regDPSTREAMCLK_CNTL_BASE_IDX 1
+#define regREFCLK_CGTT_BLK_CTRL_REG 0x004b
+#define regREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regPHYPLLE_PIXCLK_RESYNC_CNTL 0x004c
+#define regPHYPLLE_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regDCCG_PERFMON_CNTL2 0x004e
+#define regDCCG_PERFMON_CNTL2_BASE_IDX 1
+#define regDCCG_GLOBAL_FGCG_REP_CNTL 0x0050
+#define regDCCG_GLOBAL_FGCG_REP_CNTL_BASE_IDX 1
+#define regDCCG_DS_DTO_INCR 0x0053
+#define regDCCG_DS_DTO_INCR_BASE_IDX 1
+#define regDCCG_DS_DTO_MODULO 0x0054
+#define regDCCG_DS_DTO_MODULO_BASE_IDX 1
+#define regDCCG_DS_CNTL 0x0055
+#define regDCCG_DS_CNTL_BASE_IDX 1
+#define regDCCG_DS_HW_CAL_INTERVAL 0x0056
+#define regDCCG_DS_HW_CAL_INTERVAL_BASE_IDX 1
+#define regDPREFCLK_CNTL 0x0058
+#define regDPREFCLK_CNTL_BASE_IDX 1
+#define regDCE_VERSION 0x005e
+#define regDCE_VERSION_BASE_IDX 1
+#define regDCCG_GTC_CNTL 0x0060
+#define regDCCG_GTC_CNTL_BASE_IDX 1
+#define regDCCG_GTC_DTO_INCR 0x0061
+#define regDCCG_GTC_DTO_INCR_BASE_IDX 1
+#define regDCCG_GTC_DTO_MODULO 0x0062
+#define regDCCG_GTC_DTO_MODULO_BASE_IDX 1
+#define regDCCG_GTC_CURRENT 0x0063
+#define regDCCG_GTC_CURRENT_BASE_IDX 1
+#define regSYMCLK32_SE_CNTL 0x0065
+#define regSYMCLK32_SE_CNTL_BASE_IDX 1
+#define regSYMCLK32_LE_CNTL 0x0066
+#define regSYMCLK32_LE_CNTL_BASE_IDX 1
+#define regDTBCLK_P_CNTL 0x0068
+#define regDTBCLK_P_CNTL_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL5 0x0069
+#define regDCCG_GATE_DISABLE_CNTL5_BASE_IDX 1
+#define regDSCCLK0_DTO_PARAM 0x006c
+#define regDSCCLK0_DTO_PARAM_BASE_IDX 1
+#define regDSCCLK1_DTO_PARAM 0x006d
+#define regDSCCLK1_DTO_PARAM_BASE_IDX 1
+#define regDSCCLK2_DTO_PARAM 0x006e
+#define regDSCCLK2_DTO_PARAM_BASE_IDX 1
+#define regOTG_PIXEL_RATE_DIV 0x006f
+#define regOTG_PIXEL_RATE_DIV_BASE_IDX 1
+#define regMILLISECOND_TIME_BASE_DIV 0x0070
+#define regMILLISECOND_TIME_BASE_DIV_BASE_IDX 1
+#define regDISPCLK_FREQ_CHANGE_CNTL 0x0071
+#define regDISPCLK_FREQ_CHANGE_CNTL_BASE_IDX 1
+#define regDC_MEM_GLOBAL_PWR_REQ_CNTL 0x0072
+#define regDC_MEM_GLOBAL_PWR_REQ_CNTL_BASE_IDX 1
+#define regDCCG_PERFMON_CNTL 0x0073
+#define regDCCG_PERFMON_CNTL_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL 0x0074
+#define regDCCG_GATE_DISABLE_CNTL_BASE_IDX 1
+#define regDISPCLK_CGTT_BLK_CTRL_REG 0x0075
+#define regDISPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regSOCCLK_CGTT_BLK_CTRL_REG 0x0076
+#define regSOCCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regDCCG_CAC_STATUS 0x0077
+#define regDCCG_CAC_STATUS_BASE_IDX 1
+#define regMICROSECOND_TIME_BASE_DIV 0x007b
+#define regMICROSECOND_TIME_BASE_DIV_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL2 0x007c
+#define regDCCG_GATE_DISABLE_CNTL2_BASE_IDX 1
+#define regSYMCLK_CGTT_BLK_CTRL_REG 0x007d
+#define regSYMCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regDCCG_DISP_CNTL_REG 0x007f
+#define regDCCG_DISP_CNTL_REG_BASE_IDX 1
+#define regOTG0_PIXEL_RATE_CNTL 0x0080
+#define regOTG0_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDP_DTO0_PHASE 0x0081
+#define regDP_DTO0_PHASE_BASE_IDX 1
+#define regDP_DTO0_MODULO 0x0082
+#define regDP_DTO0_MODULO_BASE_IDX 1
+#define regOTG0_PHYPLL_PIXEL_RATE_CNTL 0x0083
+#define regOTG0_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regOTG1_PIXEL_RATE_CNTL 0x0084
+#define regOTG1_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDP_DTO1_PHASE 0x0085
+#define regDP_DTO1_PHASE_BASE_IDX 1
+#define regDP_DTO1_MODULO 0x0086
+#define regDP_DTO1_MODULO_BASE_IDX 1
+#define regOTG1_PHYPLL_PIXEL_RATE_CNTL 0x0087
+#define regOTG1_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regOTG2_PIXEL_RATE_CNTL 0x0088
+#define regOTG2_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDP_DTO2_PHASE 0x0089
+#define regDP_DTO2_PHASE_BASE_IDX 1
+#define regDP_DTO2_MODULO 0x008a
+#define regDP_DTO2_MODULO_BASE_IDX 1
+#define regOTG2_PHYPLL_PIXEL_RATE_CNTL 0x008b
+#define regOTG2_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regOTG3_PIXEL_RATE_CNTL 0x008c
+#define regOTG3_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDP_DTO3_PHASE 0x008d
+#define regDP_DTO3_PHASE_BASE_IDX 1
+#define regDP_DTO3_MODULO 0x008e
+#define regDP_DTO3_MODULO_BASE_IDX 1
+#define regOTG3_PHYPLL_PIXEL_RATE_CNTL 0x008f
+#define regOTG3_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDPPCLK_CGTT_BLK_CTRL_REG 0x0098
+#define regDPPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regDPPCLK0_DTO_PARAM 0x0099
+#define regDPPCLK0_DTO_PARAM_BASE_IDX 1
+#define regDPPCLK1_DTO_PARAM 0x009a
+#define regDPPCLK1_DTO_PARAM_BASE_IDX 1
+#define regDPPCLK2_DTO_PARAM 0x009b
+#define regDPPCLK2_DTO_PARAM_BASE_IDX 1
+#define regDPPCLK3_DTO_PARAM 0x009c
+#define regDPPCLK3_DTO_PARAM_BASE_IDX 1
+#define regDCCG_CAC_STATUS2 0x009f
+#define regDCCG_CAC_STATUS2_BASE_IDX 1
+#define regSYMCLKA_CLOCK_ENABLE 0x00a0
+#define regSYMCLKA_CLOCK_ENABLE_BASE_IDX 1
+#define regSYMCLKB_CLOCK_ENABLE 0x00a1
+#define regSYMCLKB_CLOCK_ENABLE_BASE_IDX 1
+#define regSYMCLKC_CLOCK_ENABLE 0x00a2
+#define regSYMCLKC_CLOCK_ENABLE_BASE_IDX 1
+#define regSYMCLKD_CLOCK_ENABLE 0x00a3
+#define regSYMCLKD_CLOCK_ENABLE_BASE_IDX 1
+#define regSYMCLKE_CLOCK_ENABLE 0x00a4
+#define regSYMCLKE_CLOCK_ENABLE_BASE_IDX 1
+#define regDCCG_SOFT_RESET 0x00a6
+#define regDCCG_SOFT_RESET_BASE_IDX 1
+#define regDSCCLK_DTO_CTRL 0x00a7
+#define regDSCCLK_DTO_CTRL_BASE_IDX 1
+#define regDPPCLK_CTRL 0x00a8
+#define regDPPCLK_CTRL_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL6 0x00a9
+#define regDCCG_GATE_DISABLE_CNTL6_BASE_IDX 1
+#define regSYMCLK_PSP_CNTL 0x00aa
+#define regSYMCLK_PSP_CNTL_BASE_IDX 1
+#define regDCCG_AUDIO_DTO_SOURCE 0x00ab
+#define regDCCG_AUDIO_DTO_SOURCE_BASE_IDX 1
+#define regDCCG_AUDIO_DTO0_PHASE 0x00ac
+#define regDCCG_AUDIO_DTO0_PHASE_BASE_IDX 1
+#define regDCCG_AUDIO_DTO0_MODULE 0x00ad
+#define regDCCG_AUDIO_DTO0_MODULE_BASE_IDX 1
+#define regDCCG_AUDIO_DTO1_PHASE 0x00ae
+#define regDCCG_AUDIO_DTO1_PHASE_BASE_IDX 1
+#define regDCCG_AUDIO_DTO1_MODULE 0x00af
+#define regDCCG_AUDIO_DTO1_MODULE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG0_LATCH_VALUE 0x00b0
+#define regDCCG_VSYNC_OTG0_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG1_LATCH_VALUE 0x00b1
+#define regDCCG_VSYNC_OTG1_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG2_LATCH_VALUE 0x00b2
+#define regDCCG_VSYNC_OTG2_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG3_LATCH_VALUE 0x00b3
+#define regDCCG_VSYNC_OTG3_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG4_LATCH_VALUE 0x00b4
+#define regDCCG_VSYNC_OTG4_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG5_LATCH_VALUE 0x00b5
+#define regDCCG_VSYNC_OTG5_LATCH_VALUE_BASE_IDX 1
+#define regDPPCLK_DTO_CTRL 0x00b6
+#define regDPPCLK_DTO_CTRL_BASE_IDX 1
+#define regDCCG_VSYNC_CNT_CTRL 0x00b8
+#define regDCCG_VSYNC_CNT_CTRL_BASE_IDX 1
+#define regDCCG_VSYNC_CNT_INT_CTRL 0x00b9
+#define regDCCG_VSYNC_CNT_INT_CTRL_BASE_IDX 1
+#define regFORCE_SYMCLK_DISABLE 0x00ba
+#define regFORCE_SYMCLK_DISABLE_BASE_IDX 1
+#define regDTBCLK_DTO0_PHASE 0x0018
+#define regDTBCLK_DTO0_PHASE_BASE_IDX 2
+#define regDTBCLK_DTO1_PHASE 0x0019
+#define regDTBCLK_DTO1_PHASE_BASE_IDX 2
+#define regDTBCLK_DTO2_PHASE 0x001a
+#define regDTBCLK_DTO2_PHASE_BASE_IDX 2
+#define regDTBCLK_DTO3_PHASE 0x001b
+#define regDTBCLK_DTO3_PHASE_BASE_IDX 2
+#define regDTBCLK_DTO0_MODULO 0x001f
+#define regDTBCLK_DTO0_MODULO_BASE_IDX 2
+#define regDTBCLK_DTO1_MODULO 0x0020
+#define regDTBCLK_DTO1_MODULO_BASE_IDX 2
+#define regDTBCLK_DTO2_MODULO 0x0021
+#define regDTBCLK_DTO2_MODULO_BASE_IDX 2
+#define regDTBCLK_DTO3_MODULO 0x0022
+#define regDTBCLK_DTO3_MODULO_BASE_IDX 2
+#define regHDMICHARCLK0_CLOCK_CNTL 0x004a
+#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2
+#define regPHYASYMCLK_CLOCK_CNTL 0x0052
+#define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regPHYBSYMCLK_CLOCK_CNTL 0x0053
+#define regPHYBSYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regPHYCSYMCLK_CLOCK_CNTL 0x0054
+#define regPHYCSYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regPHYDSYMCLK_CLOCK_CNTL 0x0055
+#define regPHYDSYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regPHYESYMCLK_CLOCK_CNTL 0x0056
+#define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regHDMISTREAMCLK_CNTL 0x0059
+#define regHDMISTREAMCLK_CNTL_BASE_IDX 2
+#define regDCCG_GATE_DISABLE_CNTL3 0x005a
+#define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2
+#define regHDMISTREAMCLK0_DTO_PARAM 0x005b
+#define regHDMISTREAMCLK0_DTO_PARAM_BASE_IDX 2
+#define regDCCG_AUDIO_DTBCLK_DTO_PHASE 0x0061
+#define regDCCG_AUDIO_DTBCLK_DTO_PHASE_BASE_IDX 2
+#define regDCCG_AUDIO_DTBCLK_DTO_MODULO 0x0062
+#define regDCCG_AUDIO_DTBCLK_DTO_MODULO_BASE_IDX 2
+#define regDTBCLK_DTO_DBUF_EN 0x0063
+#define regDTBCLK_DTO_DBUF_EN_BASE_IDX 2
+
+// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
+// base address: 0x0
+#define regDENTIST_DISPCLK_CNTL 0x0064
+#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
+
+
+// addressBlock: azroot_f2codecind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x0f00
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID 0x0f02
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT 0x0f04
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE 0x1705
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x1720
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2 0x1721
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3 0x1722
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4 0x1723
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x1770
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESET 0x17ff
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT 0x1f04
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x1f05
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x1f0a
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x1f0b
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x1f0f
+
+
+// addressBlock: azendpoint_f2codecind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x2200
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x2706
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x270d
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2 0x270e
+#define ixAZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL 0x2724
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3 0x273e
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x2770
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x2771
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x2f09
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x2f0a
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x2f0b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY 0x3702
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x3707
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x3708
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x3709
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x371c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2 0x371d
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3 0x371e
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4 0x371f
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION 0x3770
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION 0x3771
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO 0x3772
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR 0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA 0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE 0x3777
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE 0x3778
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE 0x3779
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE 0x377a
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC 0x377b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_HBR 0x377c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX 0x3780
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA 0x3781
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE 0x3785
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE 0x3786
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE 0x3787
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE 0x3788
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x3789
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x378a
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x378b
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x378c
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x378d
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x378e
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x378f
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x3790
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x3791
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x3792
+#define ixAZALIA_F2_CODEC_PIN_ASSOCIATION_INFO 0x3793
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x3797
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x3798
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB 0x3799
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x379a
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE 0x379b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x379c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x379d
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x379e
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x3f09
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES 0x3f0c
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH 0x3f0e
+
+
+// addressBlock: azinputendpoint_f2codecind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x6200
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x6706
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x670d
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x6f09
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x6f0a
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x6f0b
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x7707
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x7708
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE 0x7709
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x771c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2 0x771d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3 0x771e
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4 0x771f
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x7771
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE 0x7777
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE 0x7778
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE 0x7779
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE 0x777a
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR 0x777c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE 0x7785
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE 0x7786
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE 0x7787
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE 0x7788
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x7798
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB 0x7799
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x779a
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x779b
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x779c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L 0x779d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H 0x779e
+#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x7f09
+#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x7f0c
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon0_dc_perfmon_dispdec
+// base address: 0x0
+#define regDC_PERFMON0_PERFCOUNTER_CNTL 0x0000
+#define regDC_PERFMON0_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON0_PERFCOUNTER_CNTL2 0x0001
+#define regDC_PERFMON0_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON0_PERFCOUNTER_STATE 0x0002
+#define regDC_PERFMON0_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_CNTL 0x0003
+#define regDC_PERFMON0_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_CNTL2 0x0004
+#define regDC_PERFMON0_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_CVALUE_INT_MISC 0x0005
+#define regDC_PERFMON0_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_CVALUE_LOW 0x0006
+#define regDC_PERFMON0_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_HI 0x0007
+#define regDC_PERFMON0_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_LOW 0x0008
+#define regDC_PERFMON0_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon1_dc_perfmon_dispdec
+// base address: 0x30
+#define regDC_PERFMON1_PERFCOUNTER_CNTL 0x000c
+#define regDC_PERFMON1_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON1_PERFCOUNTER_CNTL2 0x000d
+#define regDC_PERFMON1_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON1_PERFCOUNTER_STATE 0x000e
+#define regDC_PERFMON1_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_CNTL 0x000f
+#define regDC_PERFMON1_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_CNTL2 0x0010
+#define regDC_PERFMON1_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_CVALUE_INT_MISC 0x0011
+#define regDC_PERFMON1_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_CVALUE_LOW 0x0012
+#define regDC_PERFMON1_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_HI 0x0013
+#define regDC_PERFMON1_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_LOW 0x0014
+#define regDC_PERFMON1_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dc_pg_dispdec
+// base address: 0x0
+#define regDOMAIN0_PG_CONFIG 0x0080
+#define regDOMAIN0_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN0_PG_STATUS 0x0081
+#define regDOMAIN0_PG_STATUS_BASE_IDX 2
+#define regDOMAIN1_PG_CONFIG 0x0082
+#define regDOMAIN1_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN1_PG_STATUS 0x0083
+#define regDOMAIN1_PG_STATUS_BASE_IDX 2
+#define regDOMAIN2_PG_CONFIG 0x0084
+#define regDOMAIN2_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN2_PG_STATUS 0x0085
+#define regDOMAIN2_PG_STATUS_BASE_IDX 2
+#define regDOMAIN3_PG_CONFIG 0x0086
+#define regDOMAIN3_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN3_PG_STATUS 0x0087
+#define regDOMAIN3_PG_STATUS_BASE_IDX 2
+#define regDOMAIN16_PG_CONFIG 0x0089
+#define regDOMAIN16_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN16_PG_STATUS 0x008a
+#define regDOMAIN16_PG_STATUS_BASE_IDX 2
+#define regDOMAIN17_PG_CONFIG 0x008b
+#define regDOMAIN17_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN17_PG_STATUS 0x008c
+#define regDOMAIN17_PG_STATUS_BASE_IDX 2
+#define regDOMAIN18_PG_CONFIG 0x008d
+#define regDOMAIN18_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN18_PG_STATUS 0x008e
+#define regDOMAIN18_PG_STATUS_BASE_IDX 2
+#define regDOMAIN19_PG_CONFIG 0x008f
+#define regDOMAIN19_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN19_PG_STATUS 0x0090
+#define regDOMAIN19_PG_STATUS_BASE_IDX 2
+#define regDOMAIN22_PG_CONFIG 0x0092
+#define regDOMAIN22_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN22_PG_STATUS 0x0093
+#define regDOMAIN22_PG_STATUS_BASE_IDX 2
+#define regDOMAIN23_PG_CONFIG 0x0094
+#define regDOMAIN23_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN23_PG_STATUS 0x0095
+#define regDOMAIN23_PG_STATUS_BASE_IDX 2
+#define regDOMAIN24_PG_CONFIG 0x0096
+#define regDOMAIN24_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN24_PG_STATUS 0x0097
+#define regDOMAIN24_PG_STATUS_BASE_IDX 2
+#define regDOMAIN25_PG_CONFIG 0x0098
+#define regDOMAIN25_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN25_PG_STATUS 0x0099
+#define regDOMAIN25_PG_STATUS_BASE_IDX 2
+#define regDCPG_INTERRUPT_STATUS 0x009a
+#define regDCPG_INTERRUPT_STATUS_BASE_IDX 2
+#define regDCPG_INTERRUPT_STATUS_2 0x009b
+#define regDCPG_INTERRUPT_STATUS_2_BASE_IDX 2
+#define regDCPG_INTERRUPT_STATUS_3 0x009c
+#define regDCPG_INTERRUPT_STATUS_3_BASE_IDX 2
+#define regDCPG_INTERRUPT_CONTROL_1 0x009d
+#define regDCPG_INTERRUPT_CONTROL_1_BASE_IDX 2
+#define regDCPG_INTERRUPT_CONTROL_2 0x009e
+#define regDCPG_INTERRUPT_CONTROL_2_BASE_IDX 2
+#define regDCPG_INTERRUPT_CONTROL_3 0x009f
+#define regDCPG_INTERRUPT_CONTROL_3_BASE_IDX 2
+#define regDC_IP_REQUEST_CNTL 0x00a0
+#define regDC_IP_REQUEST_CNTL_BASE_IDX 2
+#define regLONO_MEM_PWR_REQ_CNTL 0x00a4
+#define regLONO_MEM_PWR_REQ_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dmu_dcperfmon_dc_perfmon_dispdec
+// base address: 0x2f8
+#define regDC_PERFMON2_PERFCOUNTER_CNTL 0x00be
+#define regDC_PERFMON2_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON2_PERFCOUNTER_CNTL2 0x00bf
+#define regDC_PERFMON2_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON2_PERFCOUNTER_STATE 0x00c0
+#define regDC_PERFMON2_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_CNTL 0x00c1
+#define regDC_PERFMON2_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_CNTL2 0x00c2
+#define regDC_PERFMON2_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_CVALUE_INT_MISC 0x00c3
+#define regDC_PERFMON2_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_CVALUE_LOW 0x00c4
+#define regDC_PERFMON2_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_HI 0x00c5
+#define regDC_PERFMON2_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_LOW 0x00c6
+#define regDC_PERFMON2_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dmu_misc_dispdec
+// base address: 0x0
+#define regCC_DC_PIPE_DIS 0x00ca
+#define regCC_DC_PIPE_DIS_BASE_IDX 2
+#define regDMU_CLK_CNTL 0x00cb
+#define regDMU_CLK_CNTL_BASE_IDX 2
+#define regDMCUB_SMU_INTERRUPT_CNTL 0x00cd
+#define regDMCUB_SMU_INTERRUPT_CNTL_BASE_IDX 2
+#define regSMU_INTERRUPT_CONTROL 0x00ce
+#define regSMU_INTERRUPT_CONTROL_BASE_IDX 2
+#define regZSC_CNTL 0x00cf
+#define regZSC_CNTL_BASE_IDX 2
+#define regZSC_CNTL2 0x00d0
+#define regZSC_CNTL2_BASE_IDX 2
+#define regDMU_MISC_ALLOW_DS_FORCE 0x00d6
+#define regDMU_MISC_ALLOW_DS_FORCE_BASE_IDX 2
+#define regZSC_STATUS 0x00d7
+#define regZSC_STATUS_BASE_IDX 2
+#define regDMU_DISPCLK_CGTT_BLK_CTRL_REG 0x00d8
+#define regDMU_DISPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 2
+#define regDMU_SOCCLK_CGTT_BLK_CTRL_REG 0x00d9
+#define regDMU_SOCCLK_CGTT_BLK_CTRL_REG_BASE_IDX 2
+#define regZPR_CLK_UNGATE_DELAY 0x00da
+#define regZPR_CLK_UNGATE_DELAY_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_dmu_ihc_dispdec
+// base address: 0x0
+#define regDC_GPU_TIMER_START_POSITION_V_UPDATE 0x0126
+#define regDC_GPU_TIMER_START_POSITION_V_UPDATE_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_VSTARTUP 0x0127
+#define regDC_GPU_TIMER_START_POSITION_VSTARTUP_BASE_IDX 2
+#define regDC_GPU_TIMER_READ 0x0128
+#define regDC_GPU_TIMER_READ_BASE_IDX 2
+#define regDC_GPU_TIMER_READ_CNTL 0x0129
+#define regDC_GPU_TIMER_READ_CNTL_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS 0x012a
+#define regDISP_INTERRUPT_STATUS_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE 0x012b
+#define regDISP_INTERRUPT_STATUS_CONTINUE_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE2 0x012c
+#define regDISP_INTERRUPT_STATUS_CONTINUE2_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE3 0x012d
+#define regDISP_INTERRUPT_STATUS_CONTINUE3_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE4 0x012e
+#define regDISP_INTERRUPT_STATUS_CONTINUE4_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE5 0x012f
+#define regDISP_INTERRUPT_STATUS_CONTINUE5_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE6 0x0130
+#define regDISP_INTERRUPT_STATUS_CONTINUE6_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE7 0x0131
+#define regDISP_INTERRUPT_STATUS_CONTINUE7_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE8 0x0132
+#define regDISP_INTERRUPT_STATUS_CONTINUE8_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE9 0x0133
+#define regDISP_INTERRUPT_STATUS_CONTINUE9_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE10 0x0134
+#define regDISP_INTERRUPT_STATUS_CONTINUE10_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE11 0x0135
+#define regDISP_INTERRUPT_STATUS_CONTINUE11_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE12 0x0136
+#define regDISP_INTERRUPT_STATUS_CONTINUE12_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE13 0x0137
+#define regDISP_INTERRUPT_STATUS_CONTINUE13_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE14 0x0138
+#define regDISP_INTERRUPT_STATUS_CONTINUE14_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE15 0x0139
+#define regDISP_INTERRUPT_STATUS_CONTINUE15_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE16 0x013a
+#define regDISP_INTERRUPT_STATUS_CONTINUE16_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE17 0x013b
+#define regDISP_INTERRUPT_STATUS_CONTINUE17_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE18 0x013c
+#define regDISP_INTERRUPT_STATUS_CONTINUE18_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE19 0x013d
+#define regDISP_INTERRUPT_STATUS_CONTINUE19_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE20 0x013e
+#define regDISP_INTERRUPT_STATUS_CONTINUE20_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE21 0x013f
+#define regDISP_INTERRUPT_STATUS_CONTINUE21_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE22 0x0140
+#define regDISP_INTERRUPT_STATUS_CONTINUE22_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_VREADY 0x0141
+#define regDC_GPU_TIMER_START_POSITION_VREADY_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_FLIP 0x0142
+#define regDC_GPU_TIMER_START_POSITION_FLIP_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK 0x0143
+#define regDC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_FLIP_AWAY 0x0144
+#define regDC_GPU_TIMER_START_POSITION_FLIP_AWAY_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE23 0x0145
+#define regDISP_INTERRUPT_STATUS_CONTINUE23_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE24 0x0146
+#define regDISP_INTERRUPT_STATUS_CONTINUE24_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE25 0x0147
+#define regDISP_INTERRUPT_STATUS_CONTINUE25_BASE_IDX 2
+#define regDCCG_INTERRUPT_DEST 0x0148
+#define regDCCG_INTERRUPT_DEST_BASE_IDX 2
+#define regDMU_INTERRUPT_DEST 0x0149
+#define regDMU_INTERRUPT_DEST_BASE_IDX 2
+#define regDMU_INTERRUPT_DEST2 0x014a
+#define regDMU_INTERRUPT_DEST2_BASE_IDX 2
+#define regDCPG_INTERRUPT_DEST 0x014b
+#define regDCPG_INTERRUPT_DEST_BASE_IDX 2
+#define regDCPG_INTERRUPT_DEST2 0x014c
+#define regDCPG_INTERRUPT_DEST2_BASE_IDX 2
+#define regMMHUBBUB_INTERRUPT_DEST 0x014d
+#define regMMHUBBUB_INTERRUPT_DEST_BASE_IDX 2
+#define regWB_INTERRUPT_DEST 0x014e
+#define regWB_INTERRUPT_DEST_BASE_IDX 2
+#define regDCHUB_INTERRUPT_DEST 0x014f
+#define regDCHUB_INTERRUPT_DEST_BASE_IDX 2
+#define regDCHUB_PERFCOUNTER_INTERRUPT_DEST 0x0150
+#define regDCHUB_PERFCOUNTER_INTERRUPT_DEST_BASE_IDX 2
+#define regDCHUB_INTERRUPT_DEST2 0x0151
+#define regDCHUB_INTERRUPT_DEST2_BASE_IDX 2
+#define regDPP_PERFCOUNTER_INTERRUPT_DEST 0x0152
+#define regDPP_PERFCOUNTER_INTERRUPT_DEST_BASE_IDX 2
+#define regMPC_INTERRUPT_DEST 0x0153
+#define regMPC_INTERRUPT_DEST_BASE_IDX 2
+#define regOPP_INTERRUPT_DEST 0x0154
+#define regOPP_INTERRUPT_DEST_BASE_IDX 2
+#define regOPTC_INTERRUPT_DEST 0x0155
+#define regOPTC_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG0_INTERRUPT_DEST 0x0156
+#define regOTG0_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG1_INTERRUPT_DEST 0x0157
+#define regOTG1_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG2_INTERRUPT_DEST 0x0158
+#define regOTG2_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG3_INTERRUPT_DEST 0x0159
+#define regOTG3_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG4_INTERRUPT_DEST 0x015a
+#define regOTG4_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG5_INTERRUPT_DEST 0x015b
+#define regOTG5_INTERRUPT_DEST_BASE_IDX 2
+#define regDIG_INTERRUPT_DEST 0x015c
+#define regDIG_INTERRUPT_DEST_BASE_IDX 2
+#define regI2C_DDC_HPD_INTERRUPT_DEST 0x015d
+#define regI2C_DDC_HPD_INTERRUPT_DEST_BASE_IDX 2
+#define regDIO_INTERRUPT_DEST 0x015f
+#define regDIO_INTERRUPT_DEST_BASE_IDX 2
+#define regDCIO_INTERRUPT_DEST 0x0160
+#define regDCIO_INTERRUPT_DEST_BASE_IDX 2
+#define regHPD_INTERRUPT_DEST 0x0161
+#define regHPD_INTERRUPT_DEST_BASE_IDX 2
+#define regAZ_INTERRUPT_DEST 0x0162
+#define regAZ_INTERRUPT_DEST_BASE_IDX 2
+#define regAUX_INTERRUPT_DEST 0x0163
+#define regAUX_INTERRUPT_DEST_BASE_IDX 2
+#define regDSC_INTERRUPT_DEST 0x0164
+#define regDSC_INTERRUPT_DEST_BASE_IDX 2
+#define regHPO_INTERRUPT_DEST 0x0165
+#define regHPO_INTERRUPT_DEST_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_fgsec_dispdec
+// base address: 0x0
+#define regDMCUB_RBBMIF_SEC_CNTL 0x017a
+#define regDMCUB_RBBMIF_SEC_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_rbbmif_dispdec
+// base address: 0x0
+#define regRBBMIF_TIMEOUT 0x017f
+#define regRBBMIF_TIMEOUT_BASE_IDX 2
+#define regRBBMIF_STATUS 0x0180
+#define regRBBMIF_STATUS_BASE_IDX 2
+#define regRBBMIF_STATUS_2 0x0181
+#define regRBBMIF_STATUS_2_BASE_IDX 2
+#define regRBBMIF_INT_STATUS 0x0182
+#define regRBBMIF_INT_STATUS_BASE_IDX 2
+#define regRBBMIF_TIMEOUT_DIS 0x0183
+#define regRBBMIF_TIMEOUT_DIS_BASE_IDX 2
+#define regRBBMIF_TIMEOUT_DIS_2 0x0184
+#define regRBBMIF_TIMEOUT_DIS_2_BASE_IDX 2
+#define regRBBMIF_STATUS_FLAG 0x0185
+#define regRBBMIF_STATUS_FLAG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dmcub_dispdec
+// base address: 0x0
+#define regDMCUB_REGION0_OFFSET 0x018e
+#define regDMCUB_REGION0_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION0_OFFSET_HIGH 0x018f
+#define regDMCUB_REGION0_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION1_OFFSET 0x0190
+#define regDMCUB_REGION1_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION1_OFFSET_HIGH 0x0191
+#define regDMCUB_REGION1_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION2_OFFSET 0x0192
+#define regDMCUB_REGION2_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION2_OFFSET_HIGH 0x0193
+#define regDMCUB_REGION2_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION4_OFFSET 0x0196
+#define regDMCUB_REGION4_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION4_OFFSET_HIGH 0x0197
+#define regDMCUB_REGION4_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION5_OFFSET 0x0198
+#define regDMCUB_REGION5_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION5_OFFSET_HIGH 0x0199
+#define regDMCUB_REGION5_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION6_OFFSET 0x019a
+#define regDMCUB_REGION6_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION6_OFFSET_HIGH 0x019b
+#define regDMCUB_REGION6_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION7_OFFSET 0x019c
+#define regDMCUB_REGION7_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION7_OFFSET_HIGH 0x019d
+#define regDMCUB_REGION7_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION0_TOP_ADDRESS 0x019e
+#define regDMCUB_REGION0_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION1_TOP_ADDRESS 0x019f
+#define regDMCUB_REGION1_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION2_TOP_ADDRESS 0x01a0
+#define regDMCUB_REGION2_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION4_TOP_ADDRESS 0x01a1
+#define regDMCUB_REGION4_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION5_TOP_ADDRESS 0x01a2
+#define regDMCUB_REGION5_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION6_TOP_ADDRESS 0x01a3
+#define regDMCUB_REGION6_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION7_TOP_ADDRESS 0x01a4
+#define regDMCUB_REGION7_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW0_BASE_ADDRESS 0x01a5
+#define regDMCUB_REGION3_CW0_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW1_BASE_ADDRESS 0x01a6
+#define regDMCUB_REGION3_CW1_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW2_BASE_ADDRESS 0x01a7
+#define regDMCUB_REGION3_CW2_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW3_BASE_ADDRESS 0x01a8
+#define regDMCUB_REGION3_CW3_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW4_BASE_ADDRESS 0x01a9
+#define regDMCUB_REGION3_CW4_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW5_BASE_ADDRESS 0x01aa
+#define regDMCUB_REGION3_CW5_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW6_BASE_ADDRESS 0x01ab
+#define regDMCUB_REGION3_CW6_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW7_BASE_ADDRESS 0x01ac
+#define regDMCUB_REGION3_CW7_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW0_TOP_ADDRESS 0x01ad
+#define regDMCUB_REGION3_CW0_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW1_TOP_ADDRESS 0x01ae
+#define regDMCUB_REGION3_CW1_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW2_TOP_ADDRESS 0x01af
+#define regDMCUB_REGION3_CW2_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW3_TOP_ADDRESS 0x01b0
+#define regDMCUB_REGION3_CW3_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW4_TOP_ADDRESS 0x01b1
+#define regDMCUB_REGION3_CW4_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW5_TOP_ADDRESS 0x01b2
+#define regDMCUB_REGION3_CW5_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW6_TOP_ADDRESS 0x01b3
+#define regDMCUB_REGION3_CW6_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW7_TOP_ADDRESS 0x01b4
+#define regDMCUB_REGION3_CW7_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW0_OFFSET 0x01b5
+#define regDMCUB_REGION3_CW0_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW0_OFFSET_HIGH 0x01b6
+#define regDMCUB_REGION3_CW0_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW1_OFFSET 0x01b7
+#define regDMCUB_REGION3_CW1_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW1_OFFSET_HIGH 0x01b8
+#define regDMCUB_REGION3_CW1_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW2_OFFSET 0x01b9
+#define regDMCUB_REGION3_CW2_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW2_OFFSET_HIGH 0x01ba
+#define regDMCUB_REGION3_CW2_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW3_OFFSET 0x01bb
+#define regDMCUB_REGION3_CW3_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW3_OFFSET_HIGH 0x01bc
+#define regDMCUB_REGION3_CW3_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW4_OFFSET 0x01bd
+#define regDMCUB_REGION3_CW4_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW4_OFFSET_HIGH 0x01be
+#define regDMCUB_REGION3_CW4_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW5_OFFSET 0x01bf
+#define regDMCUB_REGION3_CW5_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW5_OFFSET_HIGH 0x01c0
+#define regDMCUB_REGION3_CW5_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW6_OFFSET 0x01c1
+#define regDMCUB_REGION3_CW6_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW6_OFFSET_HIGH 0x01c2
+#define regDMCUB_REGION3_CW6_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW7_OFFSET 0x01c3
+#define regDMCUB_REGION3_CW7_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW7_OFFSET_HIGH 0x01c4
+#define regDMCUB_REGION3_CW7_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_INTERRUPT_ENABLE 0x01c5
+#define regDMCUB_INTERRUPT_ENABLE_BASE_IDX 2
+#define regDMCUB_INTERRUPT_ACK 0x01c6
+#define regDMCUB_INTERRUPT_ACK_BASE_IDX 2
+#define regDMCUB_INTERRUPT_STATUS 0x01c7
+#define regDMCUB_INTERRUPT_STATUS_BASE_IDX 2
+#define regDMCUB_INTERRUPT_TYPE 0x01c8
+#define regDMCUB_INTERRUPT_TYPE_BASE_IDX 2
+#define regDMCUB_EXT_INTERRUPT_STATUS 0x01c9
+#define regDMCUB_EXT_INTERRUPT_STATUS_BASE_IDX 2
+#define regDMCUB_EXT_INTERRUPT_CTXID 0x01ca
+#define regDMCUB_EXT_INTERRUPT_CTXID_BASE_IDX 2
+#define regDMCUB_EXT_INTERRUPT_ACK 0x01cb
+#define regDMCUB_EXT_INTERRUPT_ACK_BASE_IDX 2
+#define regDMCUB_INST_FETCH_FAULT_ADDR 0x01cc
+#define regDMCUB_INST_FETCH_FAULT_ADDR_BASE_IDX 2
+#define regDMCUB_DATA_WRITE_FAULT_ADDR 0x01cd
+#define regDMCUB_DATA_WRITE_FAULT_ADDR_BASE_IDX 2
+#define regDMCUB_SEC_CNTL 0x01ce
+#define regDMCUB_SEC_CNTL_BASE_IDX 2
+#define regDMCUB_MEM_CNTL 0x01cf
+#define regDMCUB_MEM_CNTL_BASE_IDX 2
+#define regDMCUB_INBOX0_BASE_ADDRESS 0x01d0
+#define regDMCUB_INBOX0_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_INBOX0_SIZE 0x01d1
+#define regDMCUB_INBOX0_SIZE_BASE_IDX 2
+#define regDMCUB_INBOX0_WPTR 0x01d2
+#define regDMCUB_INBOX0_WPTR_BASE_IDX 2
+#define regDMCUB_INBOX0_RPTR 0x01d3
+#define regDMCUB_INBOX0_RPTR_BASE_IDX 2
+#define regDMCUB_INBOX1_BASE_ADDRESS 0x01d4
+#define regDMCUB_INBOX1_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_INBOX1_SIZE 0x01d5
+#define regDMCUB_INBOX1_SIZE_BASE_IDX 2
+#define regDMCUB_INBOX1_WPTR 0x01d6
+#define regDMCUB_INBOX1_WPTR_BASE_IDX 2
+#define regDMCUB_INBOX1_RPTR 0x01d7
+#define regDMCUB_INBOX1_RPTR_BASE_IDX 2
+#define regDMCUB_OUTBOX0_BASE_ADDRESS 0x01d8
+#define regDMCUB_OUTBOX0_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_OUTBOX0_SIZE 0x01d9
+#define regDMCUB_OUTBOX0_SIZE_BASE_IDX 2
+#define regDMCUB_OUTBOX0_WPTR 0x01da
+#define regDMCUB_OUTBOX0_WPTR_BASE_IDX 2
+#define regDMCUB_OUTBOX0_RPTR 0x01db
+#define regDMCUB_OUTBOX0_RPTR_BASE_IDX 2
+#define regDMCUB_OUTBOX1_BASE_ADDRESS 0x01dc
+#define regDMCUB_OUTBOX1_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_OUTBOX1_SIZE 0x01dd
+#define regDMCUB_OUTBOX1_SIZE_BASE_IDX 2
+#define regDMCUB_OUTBOX1_WPTR 0x01de
+#define regDMCUB_OUTBOX1_WPTR_BASE_IDX 2
+#define regDMCUB_OUTBOX1_RPTR 0x01df
+#define regDMCUB_OUTBOX1_RPTR_BASE_IDX 2
+#define regDMCUB_TIMER_TRIGGER0 0x01e0
+#define regDMCUB_TIMER_TRIGGER0_BASE_IDX 2
+#define regDMCUB_TIMER_TRIGGER1 0x01e1
+#define regDMCUB_TIMER_TRIGGER1_BASE_IDX 2
+#define regDMCUB_TIMER_WINDOW 0x01e2
+#define regDMCUB_TIMER_WINDOW_BASE_IDX 2
+#define regDMCUB_SCRATCH0 0x01e3
+#define regDMCUB_SCRATCH0_BASE_IDX 2
+#define regDMCUB_SCRATCH1 0x01e4
+#define regDMCUB_SCRATCH1_BASE_IDX 2
+#define regDMCUB_SCRATCH2 0x01e5
+#define regDMCUB_SCRATCH2_BASE_IDX 2
+#define regDMCUB_SCRATCH3 0x01e6
+#define regDMCUB_SCRATCH3_BASE_IDX 2
+#define regDMCUB_SCRATCH4 0x01e7
+#define regDMCUB_SCRATCH4_BASE_IDX 2
+#define regDMCUB_SCRATCH5 0x01e8
+#define regDMCUB_SCRATCH5_BASE_IDX 2
+#define regDMCUB_SCRATCH6 0x01e9
+#define regDMCUB_SCRATCH6_BASE_IDX 2
+#define regDMCUB_SCRATCH7 0x01ea
+#define regDMCUB_SCRATCH7_BASE_IDX 2
+#define regDMCUB_SCRATCH8 0x01eb
+#define regDMCUB_SCRATCH8_BASE_IDX 2
+#define regDMCUB_SCRATCH9 0x01ec
+#define regDMCUB_SCRATCH9_BASE_IDX 2
+#define regDMCUB_SCRATCH10 0x01ed
+#define regDMCUB_SCRATCH10_BASE_IDX 2
+#define regDMCUB_SCRATCH11 0x01ee
+#define regDMCUB_SCRATCH11_BASE_IDX 2
+#define regDMCUB_SCRATCH12 0x01ef
+#define regDMCUB_SCRATCH12_BASE_IDX 2
+#define regDMCUB_SCRATCH13 0x01f0
+#define regDMCUB_SCRATCH13_BASE_IDX 2
+#define regDMCUB_SCRATCH14 0x01f1
+#define regDMCUB_SCRATCH14_BASE_IDX 2
+#define regDMCUB_SCRATCH15 0x01f2
+#define regDMCUB_SCRATCH15_BASE_IDX 2
+#define regDMCUB_SCRATCH16 0x01f3
+#define regDMCUB_SCRATCH16_BASE_IDX 2
+#define regDMCUB_SCRATCH17 0x01f4
+#define regDMCUB_SCRATCH17_BASE_IDX 2
+#define regDMCUB_SCRATCH18 0x01f5
+#define regDMCUB_SCRATCH18_BASE_IDX 2
+#define regDMCUB_CNTL 0x01f6
+#define regDMCUB_CNTL_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN0 0x01f7
+#define regDMCUB_GPINT_DATAIN0_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN1 0x01f8
+#define regDMCUB_GPINT_DATAIN1_BASE_IDX 2
+#define regDMCUB_GPINT_DATAOUT 0x01f9
+#define regDMCUB_GPINT_DATAOUT_BASE_IDX 2
+#define regDMCUB_UNDEFINED_ADDRESS_FAULT_ADDR 0x01fa
+#define regDMCUB_UNDEFINED_ADDRESS_FAULT_ADDR_BASE_IDX 2
+#define regDMCUB_LS_WAKE_INT_ENABLE 0x01fb
+#define regDMCUB_LS_WAKE_INT_ENABLE_BASE_IDX 2
+#define regDMCUB_MEM_PWR_CNTL 0x01fc
+#define regDMCUB_MEM_PWR_CNTL_BASE_IDX 2
+#define regDMCUB_TIMER_CURRENT 0x01fd
+#define regDMCUB_TIMER_CURRENT_BASE_IDX 2
+#define regDMCUB_PROC_ID 0x01ff
+#define regDMCUB_PROC_ID_BASE_IDX 2
+#define regDMCUB_CNTL2 0x0200
+#define regDMCUB_CNTL2_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN2 0x0215
+#define regDMCUB_GPINT_DATAIN2_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN3 0x0216
+#define regDMCUB_GPINT_DATAIN3_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN4 0x0217
+#define regDMCUB_GPINT_DATAIN4_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN5 0x0218
+#define regDMCUB_GPINT_DATAIN5_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN6 0x0219
+#define regDMCUB_GPINT_DATAIN6_BASE_IDX 2
+#define regDMCUB_REGION3_TMR_AXI_SPACE 0x021a
+#define regDMCUB_REGION3_TMR_AXI_SPACE_BASE_IDX 2
+#define regDMCUB_SCRATCH19 0x022e
+#define regDMCUB_SCRATCH19_BASE_IDX 2
+#define regDMCUB_SCRATCH20 0x022f
+#define regDMCUB_SCRATCH20_BASE_IDX 2
+#define regDMCUB_SCRATCH21 0x0230
+#define regDMCUB_SCRATCH21_BASE_IDX 2
+#define regDMCUB_SCRATCH22 0x0231
+#define regDMCUB_SCRATCH22_BASE_IDX 2
+#define regDMCUB_SCRATCH23 0x0232
+#define regDMCUB_SCRATCH23_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mcif_wb0_dispdec
+// base address: 0x0
+#define regMCIF_WB_BUFMGR_SW_CONTROL 0x0272
+#define regMCIF_WB_BUFMGR_SW_CONTROL_BASE_IDX 2
+#define regMCIF_WB_BUFMGR_STATUS 0x0274
+#define regMCIF_WB_BUFMGR_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_PITCH 0x0275
+#define regMCIF_WB_BUF_PITCH_BASE_IDX 2
+#define regMCIF_WB_BUF_1_STATUS 0x0276
+#define regMCIF_WB_BUF_1_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_1_STATUS2 0x0277
+#define regMCIF_WB_BUF_1_STATUS2_BASE_IDX 2
+#define regMCIF_WB_BUF_2_STATUS 0x0278
+#define regMCIF_WB_BUF_2_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_2_STATUS2 0x0279
+#define regMCIF_WB_BUF_2_STATUS2_BASE_IDX 2
+#define regMCIF_WB_BUF_3_STATUS 0x027a
+#define regMCIF_WB_BUF_3_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_3_STATUS2 0x027b
+#define regMCIF_WB_BUF_3_STATUS2_BASE_IDX 2
+#define regMCIF_WB_BUF_4_STATUS 0x027c
+#define regMCIF_WB_BUF_4_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_4_STATUS2 0x027d
+#define regMCIF_WB_BUF_4_STATUS2_BASE_IDX 2
+#define regMCIF_WB_ARBITRATION_CONTROL 0x027e
+#define regMCIF_WB_ARBITRATION_CONTROL_BASE_IDX 2
+#define regMCIF_WB_SCLK_CHANGE 0x027f
+#define regMCIF_WB_SCLK_CHANGE_BASE_IDX 2
+#define regMCIF_WB_BUF_1_ADDR_Y 0x0282
+#define regMCIF_WB_BUF_1_ADDR_Y_BASE_IDX 2
+#define regMCIF_WB_BUF_1_ADDR_C 0x0284
+#define regMCIF_WB_BUF_1_ADDR_C_BASE_IDX 2
+#define regMCIF_WB_BUF_2_ADDR_Y 0x0286
+#define regMCIF_WB_BUF_2_ADDR_Y_BASE_IDX 2
+#define regMCIF_WB_BUF_2_ADDR_C 0x0288
+#define regMCIF_WB_BUF_2_ADDR_C_BASE_IDX 2
+#define regMCIF_WB_BUF_3_ADDR_Y 0x028a
+#define regMCIF_WB_BUF_3_ADDR_Y_BASE_IDX 2
+#define regMCIF_WB_BUF_3_ADDR_C 0x028c
+#define regMCIF_WB_BUF_3_ADDR_C_BASE_IDX 2
+#define regMCIF_WB_BUF_4_ADDR_Y 0x028e
+#define regMCIF_WB_BUF_4_ADDR_Y_BASE_IDX 2
+#define regMCIF_WB_BUF_4_ADDR_C 0x0290
+#define regMCIF_WB_BUF_4_ADDR_C_BASE_IDX 2
+#define regMCIF_WB_BUFMGR_VCE_CONTROL 0x0292
+#define regMCIF_WB_BUFMGR_VCE_CONTROL_BASE_IDX 2
+#define regMCIF_WB_NB_PSTATE_CONTROL 0x0293
+#define regMCIF_WB_NB_PSTATE_CONTROL_BASE_IDX 2
+#define regMCIF_WB_CLOCK_GATER_CONTROL 0x0294
+#define regMCIF_WB_CLOCK_GATER_CONTROL_BASE_IDX 2
+#define regMCIF_WB_SELF_REFRESH_CONTROL 0x0296
+#define regMCIF_WB_SELF_REFRESH_CONTROL_BASE_IDX 2
+#define regMULTI_LEVEL_QOS_CTRL 0x0297
+#define regMULTI_LEVEL_QOS_CTRL_BASE_IDX 2
+#define regMCIF_WB_SECURITY_LEVEL 0x0298
+#define regMCIF_WB_SECURITY_LEVEL_BASE_IDX 2
+#define regMCIF_WB_BUF_LUMA_SIZE 0x0299
+#define regMCIF_WB_BUF_LUMA_SIZE_BASE_IDX 2
+#define regMCIF_WB_BUF_CHROMA_SIZE 0x029a
+#define regMCIF_WB_BUF_CHROMA_SIZE_BASE_IDX 2
+#define regMCIF_WB_BUF_1_ADDR_Y_HIGH 0x029b
+#define regMCIF_WB_BUF_1_ADDR_Y_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_1_ADDR_C_HIGH 0x029c
+#define regMCIF_WB_BUF_1_ADDR_C_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_2_ADDR_Y_HIGH 0x029d
+#define regMCIF_WB_BUF_2_ADDR_Y_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_2_ADDR_C_HIGH 0x029e
+#define regMCIF_WB_BUF_2_ADDR_C_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_3_ADDR_Y_HIGH 0x029f
+#define regMCIF_WB_BUF_3_ADDR_Y_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_3_ADDR_C_HIGH 0x02a0
+#define regMCIF_WB_BUF_3_ADDR_C_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_4_ADDR_Y_HIGH 0x02a1
+#define regMCIF_WB_BUF_4_ADDR_Y_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_4_ADDR_C_HIGH 0x02a2
+#define regMCIF_WB_BUF_4_ADDR_C_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_1_RESOLUTION 0x02a3
+#define regMCIF_WB_BUF_1_RESOLUTION_BASE_IDX 2
+#define regMCIF_WB_BUF_2_RESOLUTION 0x02a4
+#define regMCIF_WB_BUF_2_RESOLUTION_BASE_IDX 2
+#define regMCIF_WB_BUF_3_RESOLUTION 0x02a5
+#define regMCIF_WB_BUF_3_RESOLUTION_BASE_IDX 2
+#define regMCIF_WB_BUF_4_RESOLUTION 0x02a6
+#define regMCIF_WB_BUF_4_RESOLUTION_BASE_IDX 2
+#define regMCIF_WB_PSTATE_CHANGE_DURATION_VBI 0x02a7
+#define regMCIF_WB_PSTATE_CHANGE_DURATION_VBI_BASE_IDX 2
+#define regMCIF_WB_VMID_CONTROL 0x02a8
+#define regMCIF_WB_VMID_CONTROL_BASE_IDX 2
+#define regMCIF_WB_MIN_TTO 0x02a9
+#define regMCIF_WB_MIN_TTO_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dispdec
+// base address: 0x0
+#define regMCIF_WB_NB_PSTATE_LATENCY_WATERMARK 0x02aa
+#define regMCIF_WB_NB_PSTATE_LATENCY_WATERMARK_BASE_IDX 2
+#define regMCIF_WB_WATERMARK 0x02ab
+#define regMCIF_WB_WATERMARK_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_CONFIG 0x02ac
+#define regMMHUBBUB_WARMUP_CONFIG_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_CONTROL_STATUS 0x02ad
+#define regMMHUBBUB_WARMUP_CONTROL_STATUS_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_BASE_ADDR_LOW 0x02ae
+#define regMMHUBBUB_WARMUP_BASE_ADDR_LOW_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_BASE_ADDR_HIGH 0x02af
+#define regMMHUBBUB_WARMUP_BASE_ADDR_HIGH_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_ADDR_REGION 0x02b0
+#define regMMHUBBUB_WARMUP_ADDR_REGION_BASE_IDX 2
+#define regMMHUBBUB_MIN_TTO 0x02b1
+#define regMMHUBBUB_MIN_TTO_BASE_IDX 2
+#define regMMHUBBUB_CTRL 0x0333
+#define regMMHUBBUB_CTRL_BASE_IDX 2
+#define regWBIF_SMU_WM_CONTROL 0x0334
+#define regWBIF_SMU_WM_CONTROL_BASE_IDX 2
+#define regWBIF0_MISC_CTRL 0x0335
+#define regWBIF0_MISC_CTRL_BASE_IDX 2
+#define regWBIF0_PHASE0_OUTSTANDING_COUNTER 0x0336
+#define regWBIF0_PHASE0_OUTSTANDING_COUNTER_BASE_IDX 2
+#define regWBIF0_PHASE1_OUTSTANDING_COUNTER 0x0337
+#define regWBIF0_PHASE1_OUTSTANDING_COUNTER_BASE_IDX 2
+#define regMMHUBBUB_MEM_PWR_STATUS 0x033e
+#define regMMHUBBUB_MEM_PWR_STATUS_BASE_IDX 2
+#define regMMHUBBUB_MEM_PWR_CNTL 0x033f
+#define regMMHUBBUB_MEM_PWR_CNTL_BASE_IDX 2
+#define regMMHUBBUB_CLOCK_CNTL 0x0340
+#define regMMHUBBUB_CLOCK_CNTL_BASE_IDX 2
+#define regMMHUBBUB_SOFT_RESET 0x0341
+#define regMMHUBBUB_SOFT_RESET_BASE_IDX 2
+#define regDMU_IF_ERR_STATUS 0x0345
+#define regDMU_IF_ERR_STATUS_BASE_IDX 2
+#define regMMHUBBUB_CLIENT_UNIT_ID 0x0346
+#define regMMHUBBUB_CLIENT_UNIT_ID_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_VMID_CONTROL 0x0348
+#define regMMHUBBUB_WARMUP_VMID_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dcperfmon_dc_perfmon_dispdec
+// base address: 0xd48
+#define regDC_PERFMON4_PERFCOUNTER_CNTL 0x0352
+#define regDC_PERFMON4_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON4_PERFCOUNTER_CNTL2 0x0353
+#define regDC_PERFMON4_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON4_PERFCOUNTER_STATE 0x0354
+#define regDC_PERFMON4_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_CNTL 0x0355
+#define regDC_PERFMON4_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_CNTL2 0x0356
+#define regDC_PERFMON4_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_CVALUE_INT_MISC 0x0357
+#define regDC_PERFMON4_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_CVALUE_LOW 0x0358
+#define regDC_PERFMON4_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_HI 0x0359
+#define regDC_PERFMON4_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_LOW 0x035a
+#define regDC_PERFMON4_PERFMON_LOW_BASE_IDX 2
+
+
+
+
+// addressBlock: dce_dc_hda_azf0stream0_dispdec
+// base address: 0x0
+#define regAZF0STREAM0_AZALIA_STREAM_INDEX 0x035e
+#define regAZF0STREAM0_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM0_AZALIA_STREAM_DATA 0x035f
+#define regAZF0STREAM0_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream1_dispdec
+// base address: 0x8
+#define regAZF0STREAM1_AZALIA_STREAM_INDEX 0x0360
+#define regAZF0STREAM1_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM1_AZALIA_STREAM_DATA 0x0361
+#define regAZF0STREAM1_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream2_dispdec
+// base address: 0x10
+#define regAZF0STREAM2_AZALIA_STREAM_INDEX 0x0362
+#define regAZF0STREAM2_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM2_AZALIA_STREAM_DATA 0x0363
+#define regAZF0STREAM2_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream3_dispdec
+// base address: 0x18
+#define regAZF0STREAM3_AZALIA_STREAM_INDEX 0x0364
+#define regAZF0STREAM3_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM3_AZALIA_STREAM_DATA 0x0365
+#define regAZF0STREAM3_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream4_dispdec
+// base address: 0x20
+#define regAZF0STREAM4_AZALIA_STREAM_INDEX 0x0366
+#define regAZF0STREAM4_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM4_AZALIA_STREAM_DATA 0x0367
+#define regAZF0STREAM4_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream5_dispdec
+// base address: 0x28
+#define regAZF0STREAM5_AZALIA_STREAM_INDEX 0x0368
+#define regAZF0STREAM5_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM5_AZALIA_STREAM_DATA 0x0369
+#define regAZF0STREAM5_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream6_dispdec
+// base address: 0x30
+#define regAZF0STREAM6_AZALIA_STREAM_INDEX 0x036a
+#define regAZF0STREAM6_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM6_AZALIA_STREAM_DATA 0x036b
+#define regAZF0STREAM6_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream7_dispdec
+// base address: 0x38
+#define regAZF0STREAM7_AZALIA_STREAM_INDEX 0x036c
+#define regAZF0STREAM7_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM7_AZALIA_STREAM_DATA 0x036d
+#define regAZF0STREAM7_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_az_misc_dispdec
+// base address: 0x0
+#define regAZ_CLOCK_CNTL 0x0372
+#define regAZ_CLOCK_CNTL_BASE_IDX 2
+#define regAZ_MEM_GLOBAL_PWR_REQ_CNTL 0x0373
+#define regAZ_MEM_GLOBAL_PWR_REQ_CNTL_BASE_IDX 2
+
+// addressBlock: dce_dc_hda_az_dcperfmon_dc_perfmon_dispdec
+// base address: 0xde8
+#define regDC_PERFMON5_PERFCOUNTER_CNTL 0x037a
+#define regDC_PERFMON5_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON5_PERFCOUNTER_CNTL2 0x037b
+#define regDC_PERFMON5_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON5_PERFCOUNTER_STATE 0x037c
+#define regDC_PERFMON5_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_CNTL 0x037d
+#define regDC_PERFMON5_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_CNTL2 0x037e
+#define regDC_PERFMON5_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_CVALUE_INT_MISC 0x037f
+#define regDC_PERFMON5_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_CVALUE_LOW 0x0380
+#define regDC_PERFMON5_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_HI 0x0381
+#define regDC_PERFMON5_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_LOW 0x0382
+#define regDC_PERFMON5_PERFMON_LOW_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_hda_azf0endpoint0_dispdec
+// base address: 0x0
+#define regAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0386
+#define regAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0387
+#define regAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint1_dispdec
+// base address: 0x18
+#define regAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x038c
+#define regAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA 0x038d
+#define regAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint2_dispdec
+// base address: 0x30
+#define regAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0392
+#define regAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0393
+#define regAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint3_dispdec
+// base address: 0x48
+#define regAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0398
+#define regAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0399
+#define regAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint4_dispdec
+// base address: 0x60
+#define regAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x039e
+#define regAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA 0x039f
+#define regAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint5_dispdec
+// base address: 0x78
+#define regAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x03a4
+#define regAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA 0x03a5
+#define regAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint6_dispdec
+// base address: 0x90
+#define regAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x03aa
+#define regAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA 0x03ab
+#define regAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint7_dispdec
+// base address: 0xa8
+#define regAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x03b0
+#define regAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA 0x03b1
+#define regAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0controller_dispdec
+// base address: 0x0
+#define regAZALIA_CONTROLLER_CLOCK_GATING 0x03c2
+#define regAZALIA_CONTROLLER_CLOCK_GATING_BASE_IDX 2
+#define regAZALIA_AUDIO_DTO 0x03c3
+#define regAZALIA_AUDIO_DTO_BASE_IDX 2
+#define regAZALIA_AUDIO_DTO_CONTROL 0x03c4
+#define regAZALIA_AUDIO_DTO_CONTROL_BASE_IDX 2
+#define regAZALIA_SOCCLK_CONTROL 0x03c5
+#define regAZALIA_SOCCLK_CONTROL_BASE_IDX 2
+#define regAZALIA_UNDERFLOW_FILLER_SAMPLE 0x03c6
+#define regAZALIA_UNDERFLOW_FILLER_SAMPLE_BASE_IDX 2
+#define regAZALIA_DATA_DMA_CONTROL 0x03c7
+#define regAZALIA_DATA_DMA_CONTROL_BASE_IDX 2
+#define regAZALIA_BDL_DMA_CONTROL 0x03c8
+#define regAZALIA_BDL_DMA_CONTROL_BASE_IDX 2
+#define regAZALIA_RIRB_AND_DP_CONTROL 0x03c9
+#define regAZALIA_RIRB_AND_DP_CONTROL_BASE_IDX 2
+#define regAZALIA_CORB_DMA_CONTROL 0x03ca
+#define regAZALIA_CORB_DMA_CONTROL_BASE_IDX 2
+#define regAZALIA_GLOBAL_CAPABILITIES 0x03d3
+#define regAZALIA_GLOBAL_CAPABILITIES_BASE_IDX 2
+#define regAZALIA_OUTPUT_PAYLOAD_CAPABILITY 0x03d4
+#define regAZALIA_OUTPUT_PAYLOAD_CAPABILITY_BASE_IDX 2
+#define regAZALIA_OUTPUT_STREAM_ARBITER_CONTROL 0x03d5
+#define regAZALIA_OUTPUT_STREAM_ARBITER_CONTROL_BASE_IDX 2
+#define regAZALIA_INPUT_PAYLOAD_CAPABILITY 0x03d6
+#define regAZALIA_INPUT_PAYLOAD_CAPABILITY_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_CONTROL0 0x03d9
+#define regAZALIA_INPUT_CRC0_CONTROL0_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_CONTROL1 0x03da
+#define regAZALIA_INPUT_CRC0_CONTROL1_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_CONTROL2 0x03db
+#define regAZALIA_INPUT_CRC0_CONTROL2_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_CONTROL3 0x03dc
+#define regAZALIA_INPUT_CRC0_CONTROL3_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_RESULT 0x03dd
+#define regAZALIA_INPUT_CRC0_RESULT_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_CONTROL0 0x03de
+#define regAZALIA_INPUT_CRC1_CONTROL0_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_CONTROL1 0x03df
+#define regAZALIA_INPUT_CRC1_CONTROL1_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_CONTROL2 0x03e0
+#define regAZALIA_INPUT_CRC1_CONTROL2_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_CONTROL3 0x03e1
+#define regAZALIA_INPUT_CRC1_CONTROL3_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_RESULT 0x03e2
+#define regAZALIA_INPUT_CRC1_RESULT_BASE_IDX 2
+#define regAZALIA_CRC0_CONTROL0 0x03e3
+#define regAZALIA_CRC0_CONTROL0_BASE_IDX 2
+#define regAZALIA_CRC0_CONTROL1 0x03e4
+#define regAZALIA_CRC0_CONTROL1_BASE_IDX 2
+#define regAZALIA_CRC0_CONTROL2 0x03e5
+#define regAZALIA_CRC0_CONTROL2_BASE_IDX 2
+#define regAZALIA_CRC0_CONTROL3 0x03e6
+#define regAZALIA_CRC0_CONTROL3_BASE_IDX 2
+#define regAZALIA_CRC0_RESULT 0x03e7
+#define regAZALIA_CRC0_RESULT_BASE_IDX 2
+#define regAZALIA_CRC1_CONTROL0 0x03e8
+#define regAZALIA_CRC1_CONTROL0_BASE_IDX 2
+#define regAZALIA_CRC1_CONTROL1 0x03e9
+#define regAZALIA_CRC1_CONTROL1_BASE_IDX 2
+#define regAZALIA_CRC1_CONTROL2 0x03ea
+#define regAZALIA_CRC1_CONTROL2_BASE_IDX 2
+#define regAZALIA_CRC1_CONTROL3 0x03eb
+#define regAZALIA_CRC1_CONTROL3_BASE_IDX 2
+#define regAZALIA_CRC1_RESULT 0x03ec
+#define regAZALIA_CRC1_RESULT_BASE_IDX 2
+#define regAZALIA_MEM_PWR_CTRL 0x03ee
+#define regAZALIA_MEM_PWR_CTRL_BASE_IDX 2
+#define regAZALIA_MEM_PWR_STATUS 0x03ef
+#define regAZALIA_MEM_PWR_STATUS_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_hda_azf0root_dispdec
+// base address: 0x0
+#define regAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x0406
+#define regAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_BASE_IDX 2
+#define regAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID 0x0407
+#define regAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID_BASE_IDX 2
+#define regAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL 0x0408
+#define regAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL_BASE_IDX 2
+#define regAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL 0x0409
+#define regAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x040a
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x040b
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x040c
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x040d
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE 0x040e
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET 0x040f
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x0410
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x0411
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION_BASE_IDX 2
+#define regCC_RCU_DC_AUDIO_PORT_CONNECTIVITY 0x0412
+#define regCC_RCU_DC_AUDIO_PORT_CONNECTIVITY_BASE_IDX 2
+#define regCC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY 0x0413
+#define regCC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET0 0x0415
+#define regAZALIA_F0_GTC_GROUP_OFFSET0_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET1 0x0416
+#define regAZALIA_F0_GTC_GROUP_OFFSET1_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET2 0x0417
+#define regAZALIA_F0_GTC_GROUP_OFFSET2_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET3 0x0418
+#define regAZALIA_F0_GTC_GROUP_OFFSET3_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET4 0x0419
+#define regAZALIA_F0_GTC_GROUP_OFFSET4_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET5 0x041a
+#define regAZALIA_F0_GTC_GROUP_OFFSET5_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET6 0x041b
+#define regAZALIA_F0_GTC_GROUP_OFFSET6_BASE_IDX 2
+#define regREG_DC_AUDIO_PORT_CONNECTIVITY 0x041c
+#define regREG_DC_AUDIO_PORT_CONNECTIVITY_BASE_IDX 2
+#define regREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY 0x041d
+#define regREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_hda_azf0stream8_dispdec
+// base address: 0x320
+#define regAZF0STREAM8_AZALIA_STREAM_INDEX 0x0426
+#define regAZF0STREAM8_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM8_AZALIA_STREAM_DATA 0x0427
+#define regAZF0STREAM8_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream9_dispdec
+// base address: 0x328
+#define regAZF0STREAM9_AZALIA_STREAM_INDEX 0x0428
+#define regAZF0STREAM9_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM9_AZALIA_STREAM_DATA 0x0429
+#define regAZF0STREAM9_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream10_dispdec
+// base address: 0x330
+#define regAZF0STREAM10_AZALIA_STREAM_INDEX 0x042a
+#define regAZF0STREAM10_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM10_AZALIA_STREAM_DATA 0x042b
+#define regAZF0STREAM10_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream11_dispdec
+// base address: 0x338
+#define regAZF0STREAM11_AZALIA_STREAM_INDEX 0x042c
+#define regAZF0STREAM11_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM11_AZALIA_STREAM_DATA 0x042d
+#define regAZF0STREAM11_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream12_dispdec
+// base address: 0x340
+#define regAZF0STREAM12_AZALIA_STREAM_INDEX 0x042e
+#define regAZF0STREAM12_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM12_AZALIA_STREAM_DATA 0x042f
+#define regAZF0STREAM12_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream13_dispdec
+// base address: 0x348
+#define regAZF0STREAM13_AZALIA_STREAM_INDEX 0x0430
+#define regAZF0STREAM13_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM13_AZALIA_STREAM_DATA 0x0431
+#define regAZF0STREAM13_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream14_dispdec
+// base address: 0x350
+#define regAZF0STREAM14_AZALIA_STREAM_INDEX 0x0432
+#define regAZF0STREAM14_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM14_AZALIA_STREAM_DATA 0x0433
+#define regAZF0STREAM14_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream15_dispdec
+// base address: 0x358
+#define regAZF0STREAM15_AZALIA_STREAM_INDEX 0x0434
+#define regAZF0STREAM15_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM15_AZALIA_STREAM_DATA 0x0435
+#define regAZF0STREAM15_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec
+// base address: 0x0
+#define regAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043a
+#define regAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043b
+#define regAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec
+// base address: 0x10
+#define regAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043e
+#define regAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043f
+#define regAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint2_dispdec
+// base address: 0x20
+#define regAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0442
+#define regAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0443
+#define regAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint3_dispdec
+// base address: 0x30
+#define regAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0446
+#define regAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0447
+#define regAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint4_dispdec
+// base address: 0x40
+#define regAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x044a
+#define regAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x044b
+#define regAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint5_dispdec
+// base address: 0x50
+#define regAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x044e
+#define regAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x044f
+#define regAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint6_dispdec
+// base address: 0x60
+#define regAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0452
+#define regAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0453
+#define regAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint7_dispdec
+// base address: 0x70
+#define regAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0456
+#define regAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0457
+#define regAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_sdpif_dispdec
+// base address: 0x0
+#define regDCHUBBUB_SDPIF_CFG0 0x046f
+#define regDCHUBBUB_SDPIF_CFG0_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_CFG1 0x0470
+#define regDCHUBBUB_SDPIF_CFG1_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_CFG2 0x0471
+#define regDCHUBBUB_SDPIF_CFG2_BASE_IDX 2
+#define regVM_REQUEST_PHYSICAL 0x0472
+#define regVM_REQUEST_PHYSICAL_BASE_IDX 2
+#define regDCHUBBUB_FORCE_IO_STATUS_0 0x0473
+#define regDCHUBBUB_FORCE_IO_STATUS_0_BASE_IDX 2
+#define regDCHUBBUB_FORCE_IO_STATUS_1 0x0474
+#define regDCHUBBUB_FORCE_IO_STATUS_1_BASE_IDX 2
+#define regDCN_VM_FB_LOCATION_BASE 0x0475
+#define regDCN_VM_FB_LOCATION_BASE_BASE_IDX 2
+#define regDCN_VM_FB_LOCATION_TOP 0x0476
+#define regDCN_VM_FB_LOCATION_TOP_BASE_IDX 2
+#define regDCN_VM_FB_OFFSET 0x0477
+#define regDCN_VM_FB_OFFSET_BASE_IDX 2
+#define regDCN_VM_AGP_BOT 0x0478
+#define regDCN_VM_AGP_BOT_BASE_IDX 2
+#define regDCN_VM_AGP_TOP 0x0479
+#define regDCN_VM_AGP_TOP_BASE_IDX 2
+#define regDCN_VM_AGP_BASE 0x047a
+#define regDCN_VM_AGP_BASE_BASE_IDX 2
+#define regDCN_VM_LOCAL_HBM_ADDRESS_START 0x047b
+#define regDCN_VM_LOCAL_HBM_ADDRESS_START_BASE_IDX 2
+#define regDCN_VM_LOCAL_HBM_ADDRESS_END 0x047c
+#define regDCN_VM_LOCAL_HBM_ADDRESS_END_BASE_IDX 2
+#define regDCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL 0x047d
+#define regDCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_SEC_LVL 0x047e
+#define regDCHUBBUB_SDPIF_PIPE_SEC_LVL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_NOALLOC 0x047f
+#define regDCHUBBUB_SDPIF_PIPE_NOALLOC_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL 0x0480
+#define regDCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL 0x0481
+#define regDCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL 0x0482
+#define regDCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL 0x0483
+#define regDCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL_BASE_IDX 2
+#define regSDPIF_REQUEST_RATE_LIMIT 0x0484
+#define regSDPIF_REQUEST_RATE_LIMIT_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_MEM_PWR_CTRL 0x0485
+#define regDCHUBBUB_SDPIF_MEM_PWR_CTRL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_MEM_PWR_STATUS 0x0486
+#define regDCHUBBUB_SDPIF_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_ret_path_dispdec
+// base address: 0x0
+#define regDCHUBBUB_RET_PATH_MEM_PWR_CTRL 0x04af
+#define regDCHUBBUB_RET_PATH_MEM_PWR_CTRL_BASE_IDX 2
+#define regDCHUBBUB_RET_PATH_MEM_PWR_STATUS 0x04b0
+#define regDCHUBBUB_RET_PATH_MEM_PWR_STATUS_BASE_IDX 2
+#define regDCHUBBUB_CRC_CTRL 0x04b1
+#define regDCHUBBUB_CRC_CTRL_BASE_IDX 2
+#define regDCHUBBUB_CRC0_VAL_R_G 0x04b2
+#define regDCHUBBUB_CRC0_VAL_R_G_BASE_IDX 2
+#define regDCHUBBUB_CRC0_VAL_B_A 0x04b3
+#define regDCHUBBUB_CRC0_VAL_B_A_BASE_IDX 2
+#define regDCHUBBUB_CRC1_VAL_R_G 0x04b4
+#define regDCHUBBUB_CRC1_VAL_R_G_BASE_IDX 2
+#define regDCHUBBUB_CRC1_VAL_B_A 0x04b5
+#define regDCHUBBUB_CRC1_VAL_B_A_BASE_IDX 2
+#define regDCHUBBUB_DCC_STAT_CNTL 0x04b6
+#define regDCHUBBUB_DCC_STAT_CNTL_BASE_IDX 2
+#define regDCHUBBUB_DCC_STAT0 0x04b7
+#define regDCHUBBUB_DCC_STAT0_BASE_IDX 2
+#define regDCHUBBUB_DCC_STAT1 0x04b8
+#define regDCHUBBUB_DCC_STAT1_BASE_IDX 2
+#define regDCHUBBUB_DCC_STAT2 0x04b9
+#define regDCHUBBUB_DCC_STAT2_BASE_IDX 2
+#define regDCHUBBUB_COMPBUF_CTRL 0x04ba
+#define regDCHUBBUB_COMPBUF_CTRL_BASE_IDX 2
+#define regDCHUBBUB_DET0_CTRL 0x04bb
+#define regDCHUBBUB_DET0_CTRL_BASE_IDX 2
+#define regDCHUBBUB_DET1_CTRL 0x04bc
+#define regDCHUBBUB_DET1_CTRL_BASE_IDX 2
+#define regDCHUBBUB_DET2_CTRL 0x04bd
+#define regDCHUBBUB_DET2_CTRL_BASE_IDX 2
+#define regDCHUBBUB_DET3_CTRL 0x04be
+#define regDCHUBBUB_DET3_CTRL_BASE_IDX 2
+#define regDCHUBBUB_MEM_PWR_MODE_CTRL 0x04c0
+#define regDCHUBBUB_MEM_PWR_MODE_CTRL_BASE_IDX 2
+#define regCOMPBUF_MEM_PWR_CTRL_1 0x04c1
+#define regCOMPBUF_MEM_PWR_CTRL_1_BASE_IDX 2
+#define regCOMPBUF_MEM_PWR_CTRL_2 0x04c2
+#define regCOMPBUF_MEM_PWR_CTRL_2_BASE_IDX 2
+#define regDCHUBBUB_MEM_PWR_STATUS 0x04c3
+#define regDCHUBBUB_MEM_PWR_STATUS_BASE_IDX 2
+#define regCOMPBUF_RESERVED_SPACE 0x04c4
+#define regCOMPBUF_RESERVED_SPACE_BASE_IDX 2
+#define regDCHUBBUB_DEBUG_CTRL_0 0x04c5
+#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_dispdec
+// base address: 0x0
+#define regDCHUBBUB_ARB_DF_REQ_OUTSTAND 0x04f9
+#define regDCHUBBUB_ARB_DF_REQ_OUTSTAND_BASE_IDX 2
+#define regDCHUBBUB_ARB_SAT_LEVEL 0x04fa
+#define regDCHUBBUB_ARB_SAT_LEVEL_BASE_IDX 2
+#define regDCHUBBUB_ARB_QOS_FORCE 0x04fb
+#define regDCHUBBUB_ARB_QOS_FORCE_BASE_IDX 2
+#define regDCHUBBUB_ARB_DRAM_STATE_CNTL 0x04fc
+#define regDCHUBBUB_ARB_DRAM_STATE_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_CNTL 0x04fd
+#define regDCHUBBUB_ARB_USR_RETRAINING_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A 0x04fe
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A 0x04ff
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A 0x0500
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A 0x0501
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A 0x0502
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A 0x0503
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A 0x0504
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A 0x0505
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A 0x0506
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_A 0x0507
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_A 0x0508
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B 0x0509
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B 0x050a
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B 0x050b
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B 0x050c
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B 0x050d
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B 0x050e
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B 0x050f
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B 0x0510
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B 0x0511
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_B 0x0512
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_B 0x0513
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C 0x0514
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C 0x0515
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C 0x0516
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C 0x0517
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C 0x0518
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C 0x0519
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C 0x051a
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C 0x051b
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C 0x051c
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_C 0x051d
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_C 0x051e
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D 0x051f
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D 0x0520
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D 0x0521
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D 0x0522
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D 0x0523
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D 0x0524
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D 0x0525
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D 0x0526
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D 0x0527
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_D 0x0528
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_D 0x0529
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_HOSTVM_CNTL 0x052a
+#define regDCHUBBUB_ARB_HOSTVM_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL 0x052b
+#define regDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_MALL_CNTL 0x052c
+#define regDCHUBBUB_ARB_MALL_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_TIMEOUT_ENABLE 0x052d
+#define regDCHUBBUB_ARB_TIMEOUT_ENABLE_BASE_IDX 2
+#define regDCHUBBUB_GLOBAL_TIMER_CNTL 0x052e
+#define regDCHUBBUB_GLOBAL_TIMER_CNTL_BASE_IDX 2
+#define regSURFACE_CHECK0_ADDRESS_LSB 0x052f
+#define regSURFACE_CHECK0_ADDRESS_LSB_BASE_IDX 2
+#define regSURFACE_CHECK0_ADDRESS_MSB 0x0530
+#define regSURFACE_CHECK0_ADDRESS_MSB_BASE_IDX 2
+#define regSURFACE_CHECK1_ADDRESS_LSB 0x0531
+#define regSURFACE_CHECK1_ADDRESS_LSB_BASE_IDX 2
+#define regSURFACE_CHECK1_ADDRESS_MSB 0x0532
+#define regSURFACE_CHECK1_ADDRESS_MSB_BASE_IDX 2
+#define regSURFACE_CHECK2_ADDRESS_LSB 0x0533
+#define regSURFACE_CHECK2_ADDRESS_LSB_BASE_IDX 2
+#define regSURFACE_CHECK2_ADDRESS_MSB 0x0534
+#define regSURFACE_CHECK2_ADDRESS_MSB_BASE_IDX 2
+#define regSURFACE_CHECK3_ADDRESS_LSB 0x0535
+#define regSURFACE_CHECK3_ADDRESS_LSB_BASE_IDX 2
+#define regSURFACE_CHECK3_ADDRESS_MSB 0x0536
+#define regSURFACE_CHECK3_ADDRESS_MSB_BASE_IDX 2
+#define regVTG0_CONTROL 0x0537
+#define regVTG0_CONTROL_BASE_IDX 2
+#define regVTG1_CONTROL 0x0538
+#define regVTG1_CONTROL_BASE_IDX 2
+#define regVTG2_CONTROL 0x0539
+#define regVTG2_CONTROL_BASE_IDX 2
+#define regVTG3_CONTROL 0x053a
+#define regVTG3_CONTROL_BASE_IDX 2
+#define regDCHUBBUB_SOFT_RESET 0x053b
+#define regDCHUBBUB_SOFT_RESET_BASE_IDX 2
+#define regDCHUBBUB_CLOCK_CNTL 0x053c
+#define regDCHUBBUB_CLOCK_CNTL_BASE_IDX 2
+#define regDCFCLK_CNTL 0x053d
+#define regDCFCLK_CNTL_BASE_IDX 2
+#define regDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL 0x053e
+#define regDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL_BASE_IDX 2
+#define regDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2 0x053f
+#define regDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2_BASE_IDX 2
+#define regDCHUBBUB_VLINE_SNAPSHOT 0x0540
+#define regDCHUBBUB_VLINE_SNAPSHOT_BASE_IDX 2
+#define regDCHUBBUB_CTRL_STATUS 0x0541
+#define regDCHUBBUB_CTRL_STATUS_BASE_IDX 2
+#define regDCHUBBUB_TIMEOUT_DETECTION_CTRL1 0x0547
+#define regDCHUBBUB_TIMEOUT_DETECTION_CTRL1_BASE_IDX 2
+#define regDCHUBBUB_TIMEOUT_DETECTION_CTRL2 0x0548
+#define regDCHUBBUB_TIMEOUT_DETECTION_CTRL2_BASE_IDX 2
+#define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS 0x0549
+#define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2
+#define regFMON_CTRL 0x054a
+#define regFMON_CTRL_BASE_IDX 2
+#define regDCHUBBUB_TEST_DEBUG_INDEX 0x054b
+#define regDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regDCHUBBUB_TEST_DEBUG_DATA 0x054c
+#define regDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2
+
+// addressBlock: dce_dc_dchubbubl_dchubbub_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1534
+#define regDC_PERFMON6_PERFCOUNTER_CNTL 0x054d
+#define regDC_PERFMON6_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON6_PERFCOUNTER_CNTL2 0x054e
+#define regDC_PERFMON6_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON6_PERFCOUNTER_STATE 0x054f
+#define regDC_PERFMON6_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_CNTL 0x0550
+#define regDC_PERFMON6_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_CNTL2 0x0551
+#define regDC_PERFMON6_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_CVALUE_INT_MISC 0x0552
+#define regDC_PERFMON6_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_CVALUE_LOW 0x0553
+#define regDC_PERFMON6_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_HI 0x0554
+#define regDC_PERFMON6_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_LOW 0x0555
+#define regDC_PERFMON6_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_vmrq_if_dispdec
+// base address: 0x0
+#define regDCN_VM_CONTEXT0_CNTL 0x0559
+#define regDCN_VM_CONTEXT0_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x055a
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x055b
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x055c
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x055d
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x055e
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x055f
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_CNTL 0x0560
+#define regDCN_VM_CONTEXT1_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x0561
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x0562
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x0563
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x0564
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x0565
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x0566
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_CNTL 0x0567
+#define regDCN_VM_CONTEXT2_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x0568
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x0569
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x056a
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x056b
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x056c
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x056d
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_CNTL 0x056e
+#define regDCN_VM_CONTEXT3_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x056f
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x0570
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x0571
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x0572
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x0573
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x0574
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_CNTL 0x0575
+#define regDCN_VM_CONTEXT4_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x0576
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x0577
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x0578
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x0579
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x057a
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x057b
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_CNTL 0x057c
+#define regDCN_VM_CONTEXT5_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x057d
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x057e
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x057f
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x0580
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x0581
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x0582
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_CNTL 0x0583
+#define regDCN_VM_CONTEXT6_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x0584
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x0585
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x0586
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x0587
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x0588
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x0589
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_CNTL 0x058a
+#define regDCN_VM_CONTEXT7_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x058b
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x058c
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x058d
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x058e
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x058f
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x0590
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_CNTL 0x0591
+#define regDCN_VM_CONTEXT8_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x0592
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x0593
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x0594
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x0595
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x0596
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x0597
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_CNTL 0x0598
+#define regDCN_VM_CONTEXT9_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x0599
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x059a
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x059b
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x059c
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x059d
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x059e
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_CNTL 0x059f
+#define regDCN_VM_CONTEXT10_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x05a0
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x05a1
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x05a2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x05a3
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x05a4
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x05a5
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_CNTL 0x05a6
+#define regDCN_VM_CONTEXT11_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x05a7
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x05a8
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x05a9
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x05aa
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x05ab
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x05ac
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_CNTL 0x05ad
+#define regDCN_VM_CONTEXT12_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x05ae
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x05af
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x05b0
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x05b1
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x05b2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x05b3
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_CNTL 0x05b4
+#define regDCN_VM_CONTEXT13_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x05b5
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x05b6
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x05b7
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x05b8
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x05b9
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x05ba
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_CNTL 0x05bb
+#define regDCN_VM_CONTEXT14_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x05bc
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x05bd
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x05be
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x05bf
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x05c0
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x05c1
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_CNTL 0x05c2
+#define regDCN_VM_CONTEXT15_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x05c3
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x05c4
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x05c5
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x05c6
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x05c7
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x05c8
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_DEFAULT_ADDR_MSB 0x05c9
+#define regDCN_VM_DEFAULT_ADDR_MSB_BASE_IDX 2
+#define regDCN_VM_DEFAULT_ADDR_LSB 0x05ca
+#define regDCN_VM_DEFAULT_ADDR_LSB_BASE_IDX 2
+#define regDCN_VM_FAULT_CNTL 0x05cb
+#define regDCN_VM_FAULT_CNTL_BASE_IDX 2
+#define regDCN_VM_FAULT_STATUS 0x05cc
+#define regDCN_VM_FAULT_STATUS_BASE_IDX 2
+#define regDCN_VM_FAULT_ADDR_MSB 0x05cd
+#define regDCN_VM_FAULT_ADDR_MSB_BASE_IDX 2
+#define regDCN_VM_FAULT_ADDR_LSB 0x05ce
+#define regDCN_VM_FAULT_ADDR_LSB_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec
+// base address: 0x0
+#define regHUBP0_DCSURF_SURFACE_CONFIG 0x05e5
+#define regHUBP0_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define regHUBP0_DCSURF_ADDR_CONFIG 0x05e6
+#define regHUBP0_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define regHUBP0_DCSURF_TILING_CONFIG 0x05e7
+#define regHUBP0_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define regHUBP0_DCSURF_PRI_VIEWPORT_START 0x05e9
+#define regHUBP0_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x05ea
+#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP0_DCSURF_PRI_VIEWPORT_START_C 0x05eb
+#define regHUBP0_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x05ec
+#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP0_DCSURF_SEC_VIEWPORT_START 0x05ed
+#define regHUBP0_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define regHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION 0x05ee
+#define regHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP0_DCSURF_SEC_VIEWPORT_START_C 0x05ef
+#define regHUBP0_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x05f0
+#define regHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP0_DCHUBP_REQ_SIZE_CONFIG 0x05f1
+#define regHUBP0_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define regHUBP0_DCHUBP_REQ_SIZE_CONFIG_C 0x05f2
+#define regHUBP0_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define regHUBP0_DCHUBP_CNTL 0x05f3
+#define regHUBP0_DCHUBP_CNTL_BASE_IDX 2
+#define regHUBP0_HUBP_CLK_CNTL 0x05f4
+#define regHUBP0_HUBP_CLK_CNTL_BASE_IDX 2
+#define regHUBP0_DCHUBP_VMPG_CONFIG 0x05f5
+#define regHUBP0_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define regHUBP0_DCHUBP_MALL_CONFIG 0x05f6
+#define regHUBP0_DCHUBP_MALL_CONFIG_BASE_IDX 2
+#define regHUBP0_DCHUBP_MALL_SUB_VP 0x05f7
+#define regHUBP0_DCHUBP_MALL_SUB_VP_BASE_IDX 2
+#define regHUBP0_HUBPREQ_DEBUG_DB 0x05f8
+#define regHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define regHUBP0_HUBPREQ_DEBUG 0x05f9
+#define regHUBP0_HUBPREQ_DEBUG_BASE_IDX 2
+#define regHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x05fd
+#define regHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define regHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x05fe
+#define regHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+#define regHUBP0_HUBP_MALL_STATUS 0x05ff
+#define regHUBP0_HUBP_MALL_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec
+// base address: 0x0
+#define regHUBPREQ0_DCSURF_SURFACE_PITCH 0x0607
+#define regHUBPREQ0_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_PITCH_C 0x0608
+#define regHUBPREQ0_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define regHUBPREQ0_VMID_SETTINGS_0 0x0609
+#define regHUBPREQ0_VMID_SETTINGS_0_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS 0x060a
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x060b
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x060c
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x060d
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS 0x060e
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x060f
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x0610
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x0611
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x0612
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x0613
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x0614
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x0615
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x0616
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x0617
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x0618
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x0619
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_CONTROL 0x061a
+#define regHUBPREQ0_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_FLIP_CONTROL 0x061b
+#define regHUBPREQ0_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_FLIP_CONTROL2 0x061c
+#define regHUBPREQ0_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT 0x061f
+#define regHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE 0x0620
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH 0x0621
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_C 0x0622
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C 0x0623
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE 0x0624
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0625
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0626
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0627
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCN_EXPANSION_MODE 0x0628
+#define regHUBPREQ0_DCN_EXPANSION_MODE_BASE_IDX 2
+#define regHUBPREQ0_DCN_TTU_QOS_WM 0x0629
+#define regHUBPREQ0_DCN_TTU_QOS_WM_BASE_IDX 2
+#define regHUBPREQ0_DCN_GLOBAL_TTU_CNTL 0x062a
+#define regHUBPREQ0_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define regHUBPREQ0_DCN_SURF0_TTU_CNTL0 0x062b
+#define regHUBPREQ0_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ0_DCN_SURF0_TTU_CNTL1 0x062c
+#define regHUBPREQ0_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ0_DCN_SURF1_TTU_CNTL0 0x062d
+#define regHUBPREQ0_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ0_DCN_SURF1_TTU_CNTL1 0x062e
+#define regHUBPREQ0_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ0_DCN_CUR0_TTU_CNTL0 0x062f
+#define regHUBPREQ0_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ0_DCN_CUR0_TTU_CNTL1 0x0630
+#define regHUBPREQ0_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ0_DCN_CUR1_TTU_CNTL0 0x0631
+#define regHUBPREQ0_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ0_DCN_CUR1_TTU_CNTL1 0x0632
+#define regHUBPREQ0_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ0_DCN_DMDATA_VM_CNTL 0x0633
+#define regHUBPREQ0_DCN_DMDATA_VM_CNTL_BASE_IDX 2
+#define regHUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x0634
+#define regHUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define regHUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0635
+#define regHUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define regHUBPREQ0_DCN_VM_MX_L1_TLB_CNTL 0x0642
+#define regHUBPREQ0_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define regHUBPREQ0_BLANK_OFFSET_0 0x0643
+#define regHUBPREQ0_BLANK_OFFSET_0_BASE_IDX 2
+#define regHUBPREQ0_BLANK_OFFSET_1 0x0644
+#define regHUBPREQ0_BLANK_OFFSET_1_BASE_IDX 2
+#define regHUBPREQ0_DST_DIMENSIONS 0x0645
+#define regHUBPREQ0_DST_DIMENSIONS_BASE_IDX 2
+#define regHUBPREQ0_DST_AFTER_SCALER 0x0646
+#define regHUBPREQ0_DST_AFTER_SCALER_BASE_IDX 2
+#define regHUBPREQ0_PREFETCH_SETTINGS 0x0647
+#define regHUBPREQ0_PREFETCH_SETTINGS_BASE_IDX 2
+#define regHUBPREQ0_PREFETCH_SETTINGS_C 0x0648
+#define regHUBPREQ0_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_0 0x0649
+#define regHUBPREQ0_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_1 0x064a
+#define regHUBPREQ0_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_2 0x064b
+#define regHUBPREQ0_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_3 0x064c
+#define regHUBPREQ0_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_4 0x064d
+#define regHUBPREQ0_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_0 0x064e
+#define regHUBPREQ0_FLIP_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_1 0x064f
+#define regHUBPREQ0_FLIP_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_2 0x0650
+#define regHUBPREQ0_FLIP_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_0 0x0651
+#define regHUBPREQ0_NOM_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_1 0x0652
+#define regHUBPREQ0_NOM_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_2 0x0653
+#define regHUBPREQ0_NOM_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_3 0x0654
+#define regHUBPREQ0_NOM_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_4 0x0655
+#define regHUBPREQ0_NOM_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_5 0x0656
+#define regHUBPREQ0_NOM_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_6 0x0657
+#define regHUBPREQ0_NOM_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_7 0x0658
+#define regHUBPREQ0_NOM_PARAMETERS_7_BASE_IDX 2
+#define regHUBPREQ0_PER_LINE_DELIVERY_PRE 0x0659
+#define regHUBPREQ0_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define regHUBPREQ0_PER_LINE_DELIVERY 0x065a
+#define regHUBPREQ0_PER_LINE_DELIVERY_BASE_IDX 2
+#define regHUBPREQ0_CURSOR_SETTINGS 0x065b
+#define regHUBPREQ0_CURSOR_SETTINGS_BASE_IDX 2
+#define regHUBPREQ0_REF_FREQ_TO_PIX_FREQ 0x065c
+#define regHUBPREQ0_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define regHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT 0x065d
+#define regHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_MEM_PWR_CTRL 0x065e
+#define regHUBPREQ0_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_MEM_PWR_STATUS 0x065f
+#define regHUBPREQ0_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_5 0x0662
+#define regHUBPREQ0_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_6 0x0663
+#define regHUBPREQ0_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_3 0x0664
+#define regHUBPREQ0_FLIP_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_4 0x0665
+#define regHUBPREQ0_FLIP_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_5 0x0666
+#define regHUBPREQ0_FLIP_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_6 0x0667
+#define regHUBPREQ0_FLIP_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ0_UCLK_PSTATE_FORCE 0x0668
+#define regHUBPREQ0_UCLK_PSTATE_FORCE_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_STATUS_REG0 0x0669
+#define regHUBPREQ0_HUBPREQ_STATUS_REG0_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_STATUS_REG1 0x066a
+#define regHUBPREQ0_HUBPREQ_STATUS_REG1_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_STATUS_REG2 0x066b
+#define regHUBPREQ0_HUBPREQ_STATUS_REG2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec
+// base address: 0x0
+#define regHUBPRET0_HUBPRET_CONTROL 0x066c
+#define regHUBPRET0_HUBPRET_CONTROL_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_MEM_PWR_CTRL 0x066d
+#define regHUBPRET0_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_MEM_PWR_STATUS 0x066e
+#define regHUBPRET0_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE_CTRL0 0x066f
+#define regHUBPRET0_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE_CTRL1 0x0670
+#define regHUBPRET0_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE0 0x0671
+#define regHUBPRET0_HUBPRET_READ_LINE0_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE1 0x0672
+#define regHUBPRET0_HUBPRET_READ_LINE1_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_INTERRUPT 0x0673
+#define regHUBPRET0_HUBPRET_INTERRUPT_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE_VALUE 0x0674
+#define regHUBPRET0_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE_STATUS 0x0675
+#define regHUBPRET0_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_cursor0_dispdec
+// base address: 0x0
+#define regCURSOR0_0_CURSOR_CONTROL 0x0678
+#define regCURSOR0_0_CURSOR_CONTROL_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_SURFACE_ADDRESS 0x0679
+#define regCURSOR0_0_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH 0x067a
+#define regCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_SIZE 0x067b
+#define regCURSOR0_0_CURSOR_SIZE_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_POSITION 0x067c
+#define regCURSOR0_0_CURSOR_POSITION_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_HOT_SPOT 0x067d
+#define regCURSOR0_0_CURSOR_HOT_SPOT_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_STEREO_CONTROL 0x067e
+#define regCURSOR0_0_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_DST_OFFSET 0x067f
+#define regCURSOR0_0_CURSOR_DST_OFFSET_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_MEM_PWR_CTRL 0x0680
+#define regCURSOR0_0_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_MEM_PWR_STATUS 0x0681
+#define regCURSOR0_0_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_ADDRESS_HIGH 0x0682
+#define regCURSOR0_0_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_ADDRESS_LOW 0x0683
+#define regCURSOR0_0_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_CNTL 0x0684
+#define regCURSOR0_0_DMDATA_CNTL_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_QOS_CNTL 0x0685
+#define regCURSOR0_0_DMDATA_QOS_CNTL_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_STATUS 0x0686
+#define regCURSOR0_0_DMDATA_STATUS_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_SW_CNTL 0x0687
+#define regCURSOR0_0_DMDATA_SW_CNTL_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_SW_DATA 0x0688
+#define regCURSOR0_0_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1a74
+#define regDC_PERFMON7_PERFCOUNTER_CNTL 0x069d
+#define regDC_PERFMON7_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON7_PERFCOUNTER_CNTL2 0x069e
+#define regDC_PERFMON7_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON7_PERFCOUNTER_STATE 0x069f
+#define regDC_PERFMON7_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_CNTL 0x06a0
+#define regDC_PERFMON7_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_CNTL2 0x06a1
+#define regDC_PERFMON7_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_CVALUE_INT_MISC 0x06a2
+#define regDC_PERFMON7_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_CVALUE_LOW 0x06a3
+#define regDC_PERFMON7_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_HI 0x06a4
+#define regDC_PERFMON7_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_LOW 0x06a5
+#define regDC_PERFMON7_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec
+// base address: 0x370
+#define regHUBP1_DCSURF_SURFACE_CONFIG 0x06c1
+#define regHUBP1_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define regHUBP1_DCSURF_ADDR_CONFIG 0x06c2
+#define regHUBP1_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define regHUBP1_DCSURF_TILING_CONFIG 0x06c3
+#define regHUBP1_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define regHUBP1_DCSURF_PRI_VIEWPORT_START 0x06c5
+#define regHUBP1_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION 0x06c6
+#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP1_DCSURF_PRI_VIEWPORT_START_C 0x06c7
+#define regHUBP1_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x06c8
+#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP1_DCSURF_SEC_VIEWPORT_START 0x06c9
+#define regHUBP1_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define regHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION 0x06ca
+#define regHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP1_DCSURF_SEC_VIEWPORT_START_C 0x06cb
+#define regHUBP1_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x06cc
+#define regHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP1_DCHUBP_REQ_SIZE_CONFIG 0x06cd
+#define regHUBP1_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define regHUBP1_DCHUBP_REQ_SIZE_CONFIG_C 0x06ce
+#define regHUBP1_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define regHUBP1_DCHUBP_CNTL 0x06cf
+#define regHUBP1_DCHUBP_CNTL_BASE_IDX 2
+#define regHUBP1_HUBP_CLK_CNTL 0x06d0
+#define regHUBP1_HUBP_CLK_CNTL_BASE_IDX 2
+#define regHUBP1_DCHUBP_VMPG_CONFIG 0x06d1
+#define regHUBP1_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define regHUBP1_DCHUBP_MALL_CONFIG 0x06d2
+#define regHUBP1_DCHUBP_MALL_CONFIG_BASE_IDX 2
+#define regHUBP1_DCHUBP_MALL_SUB_VP 0x06d3
+#define regHUBP1_DCHUBP_MALL_SUB_VP_BASE_IDX 2
+#define regHUBP1_HUBPREQ_DEBUG_DB 0x06d4
+#define regHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define regHUBP1_HUBPREQ_DEBUG 0x06d5
+#define regHUBP1_HUBPREQ_DEBUG_BASE_IDX 2
+#define regHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x06d9
+#define regHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define regHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x06da
+#define regHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+#define regHUBP1_HUBP_MALL_STATUS 0x06db
+#define regHUBP1_HUBP_MALL_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec
+// base address: 0x370
+#define regHUBPREQ1_DCSURF_SURFACE_PITCH 0x06e3
+#define regHUBPREQ1_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_PITCH_C 0x06e4
+#define regHUBPREQ1_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define regHUBPREQ1_VMID_SETTINGS_0 0x06e5
+#define regHUBPREQ1_VMID_SETTINGS_0_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS 0x06e6
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x06e7
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x06e8
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x06e9
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS 0x06ea
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x06eb
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x06ec
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x06ed
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x06ee
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x06ef
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x06f0
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x06f1
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x06f2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x06f3
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x06f4
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x06f5
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_CONTROL 0x06f6
+#define regHUBPREQ1_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_FLIP_CONTROL 0x06f7
+#define regHUBPREQ1_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_FLIP_CONTROL2 0x06f8
+#define regHUBPREQ1_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT 0x06fb
+#define regHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE 0x06fc
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH 0x06fd
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_C 0x06fe
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C 0x06ff
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE 0x0700
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0701
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0702
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0703
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCN_EXPANSION_MODE 0x0704
+#define regHUBPREQ1_DCN_EXPANSION_MODE_BASE_IDX 2
+#define regHUBPREQ1_DCN_TTU_QOS_WM 0x0705
+#define regHUBPREQ1_DCN_TTU_QOS_WM_BASE_IDX 2
+#define regHUBPREQ1_DCN_GLOBAL_TTU_CNTL 0x0706
+#define regHUBPREQ1_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define regHUBPREQ1_DCN_SURF0_TTU_CNTL0 0x0707
+#define regHUBPREQ1_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ1_DCN_SURF0_TTU_CNTL1 0x0708
+#define regHUBPREQ1_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ1_DCN_SURF1_TTU_CNTL0 0x0709
+#define regHUBPREQ1_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ1_DCN_SURF1_TTU_CNTL1 0x070a
+#define regHUBPREQ1_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ1_DCN_CUR0_TTU_CNTL0 0x070b
+#define regHUBPREQ1_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ1_DCN_CUR0_TTU_CNTL1 0x070c
+#define regHUBPREQ1_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ1_DCN_CUR1_TTU_CNTL0 0x070d
+#define regHUBPREQ1_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ1_DCN_CUR1_TTU_CNTL1 0x070e
+#define regHUBPREQ1_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ1_DCN_DMDATA_VM_CNTL 0x070f
+#define regHUBPREQ1_DCN_DMDATA_VM_CNTL_BASE_IDX 2
+#define regHUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x0710
+#define regHUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define regHUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0711
+#define regHUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define regHUBPREQ1_DCN_VM_MX_L1_TLB_CNTL 0x071e
+#define regHUBPREQ1_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define regHUBPREQ1_BLANK_OFFSET_0 0x071f
+#define regHUBPREQ1_BLANK_OFFSET_0_BASE_IDX 2
+#define regHUBPREQ1_BLANK_OFFSET_1 0x0720
+#define regHUBPREQ1_BLANK_OFFSET_1_BASE_IDX 2
+#define regHUBPREQ1_DST_DIMENSIONS 0x0721
+#define regHUBPREQ1_DST_DIMENSIONS_BASE_IDX 2
+#define regHUBPREQ1_DST_AFTER_SCALER 0x0722
+#define regHUBPREQ1_DST_AFTER_SCALER_BASE_IDX 2
+#define regHUBPREQ1_PREFETCH_SETTINGS 0x0723
+#define regHUBPREQ1_PREFETCH_SETTINGS_BASE_IDX 2
+#define regHUBPREQ1_PREFETCH_SETTINGS_C 0x0724
+#define regHUBPREQ1_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_0 0x0725
+#define regHUBPREQ1_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_1 0x0726
+#define regHUBPREQ1_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_2 0x0727
+#define regHUBPREQ1_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_3 0x0728
+#define regHUBPREQ1_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_4 0x0729
+#define regHUBPREQ1_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_0 0x072a
+#define regHUBPREQ1_FLIP_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_1 0x072b
+#define regHUBPREQ1_FLIP_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_2 0x072c
+#define regHUBPREQ1_FLIP_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_0 0x072d
+#define regHUBPREQ1_NOM_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_1 0x072e
+#define regHUBPREQ1_NOM_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_2 0x072f
+#define regHUBPREQ1_NOM_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_3 0x0730
+#define regHUBPREQ1_NOM_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_4 0x0731
+#define regHUBPREQ1_NOM_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_5 0x0732
+#define regHUBPREQ1_NOM_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_6 0x0733
+#define regHUBPREQ1_NOM_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_7 0x0734
+#define regHUBPREQ1_NOM_PARAMETERS_7_BASE_IDX 2
+#define regHUBPREQ1_PER_LINE_DELIVERY_PRE 0x0735
+#define regHUBPREQ1_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define regHUBPREQ1_PER_LINE_DELIVERY 0x0736
+#define regHUBPREQ1_PER_LINE_DELIVERY_BASE_IDX 2
+#define regHUBPREQ1_CURSOR_SETTINGS 0x0737
+#define regHUBPREQ1_CURSOR_SETTINGS_BASE_IDX 2
+#define regHUBPREQ1_REF_FREQ_TO_PIX_FREQ 0x0738
+#define regHUBPREQ1_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define regHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT 0x0739
+#define regHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_MEM_PWR_CTRL 0x073a
+#define regHUBPREQ1_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_MEM_PWR_STATUS 0x073b
+#define regHUBPREQ1_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_5 0x073e
+#define regHUBPREQ1_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_6 0x073f
+#define regHUBPREQ1_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_3 0x0740
+#define regHUBPREQ1_FLIP_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_4 0x0741
+#define regHUBPREQ1_FLIP_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_5 0x0742
+#define regHUBPREQ1_FLIP_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_6 0x0743
+#define regHUBPREQ1_FLIP_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ1_UCLK_PSTATE_FORCE 0x0744
+#define regHUBPREQ1_UCLK_PSTATE_FORCE_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_STATUS_REG0 0x0745
+#define regHUBPREQ1_HUBPREQ_STATUS_REG0_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_STATUS_REG1 0x0746
+#define regHUBPREQ1_HUBPREQ_STATUS_REG1_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_STATUS_REG2 0x0747
+#define regHUBPREQ1_HUBPREQ_STATUS_REG2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec
+// base address: 0x370
+#define regHUBPRET1_HUBPRET_CONTROL 0x0748
+#define regHUBPRET1_HUBPRET_CONTROL_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_MEM_PWR_CTRL 0x0749
+#define regHUBPRET1_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_MEM_PWR_STATUS 0x074a
+#define regHUBPRET1_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE_CTRL0 0x074b
+#define regHUBPRET1_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE_CTRL1 0x074c
+#define regHUBPRET1_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE0 0x074d
+#define regHUBPRET1_HUBPRET_READ_LINE0_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE1 0x074e
+#define regHUBPRET1_HUBPRET_READ_LINE1_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_INTERRUPT 0x074f
+#define regHUBPRET1_HUBPRET_INTERRUPT_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE_VALUE 0x0750
+#define regHUBPRET1_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE_STATUS 0x0751
+#define regHUBPRET1_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_cursor0_dispdec
+// base address: 0x370
+#define regCURSOR0_1_CURSOR_CONTROL 0x0754
+#define regCURSOR0_1_CURSOR_CONTROL_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_SURFACE_ADDRESS 0x0755
+#define regCURSOR0_1_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH 0x0756
+#define regCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_SIZE 0x0757
+#define regCURSOR0_1_CURSOR_SIZE_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_POSITION 0x0758
+#define regCURSOR0_1_CURSOR_POSITION_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_HOT_SPOT 0x0759
+#define regCURSOR0_1_CURSOR_HOT_SPOT_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_STEREO_CONTROL 0x075a
+#define regCURSOR0_1_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_DST_OFFSET 0x075b
+#define regCURSOR0_1_CURSOR_DST_OFFSET_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_MEM_PWR_CTRL 0x075c
+#define regCURSOR0_1_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_MEM_PWR_STATUS 0x075d
+#define regCURSOR0_1_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_ADDRESS_HIGH 0x075e
+#define regCURSOR0_1_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_ADDRESS_LOW 0x075f
+#define regCURSOR0_1_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_CNTL 0x0760
+#define regCURSOR0_1_DMDATA_CNTL_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_QOS_CNTL 0x0761
+#define regCURSOR0_1_DMDATA_QOS_CNTL_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_STATUS 0x0762
+#define regCURSOR0_1_DMDATA_STATUS_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_SW_CNTL 0x0763
+#define regCURSOR0_1_DMDATA_SW_CNTL_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_SW_DATA 0x0764
+#define regCURSOR0_1_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1de4
+#define regDC_PERFMON8_PERFCOUNTER_CNTL 0x0779
+#define regDC_PERFMON8_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON8_PERFCOUNTER_CNTL2 0x077a
+#define regDC_PERFMON8_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON8_PERFCOUNTER_STATE 0x077b
+#define regDC_PERFMON8_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_CNTL 0x077c
+#define regDC_PERFMON8_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_CNTL2 0x077d
+#define regDC_PERFMON8_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_CVALUE_INT_MISC 0x077e
+#define regDC_PERFMON8_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_CVALUE_LOW 0x077f
+#define regDC_PERFMON8_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_HI 0x0780
+#define regDC_PERFMON8_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_LOW 0x0781
+#define regDC_PERFMON8_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec
+// base address: 0x6e0
+#define regHUBP2_DCSURF_SURFACE_CONFIG 0x079d
+#define regHUBP2_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define regHUBP2_DCSURF_ADDR_CONFIG 0x079e
+#define regHUBP2_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define regHUBP2_DCSURF_TILING_CONFIG 0x079f
+#define regHUBP2_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_START 0x07a1
+#define regHUBP2_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION 0x07a2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_START_C 0x07a3
+#define regHUBP2_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x07a4
+#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP2_DCSURF_SEC_VIEWPORT_START 0x07a5
+#define regHUBP2_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define regHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION 0x07a6
+#define regHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP2_DCSURF_SEC_VIEWPORT_START_C 0x07a7
+#define regHUBP2_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x07a8
+#define regHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP2_DCHUBP_REQ_SIZE_CONFIG 0x07a9
+#define regHUBP2_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define regHUBP2_DCHUBP_REQ_SIZE_CONFIG_C 0x07aa
+#define regHUBP2_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define regHUBP2_DCHUBP_CNTL 0x07ab
+#define regHUBP2_DCHUBP_CNTL_BASE_IDX 2
+#define regHUBP2_HUBP_CLK_CNTL 0x07ac
+#define regHUBP2_HUBP_CLK_CNTL_BASE_IDX 2
+#define regHUBP2_DCHUBP_VMPG_CONFIG 0x07ad
+#define regHUBP2_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define regHUBP2_DCHUBP_MALL_CONFIG 0x07ae
+#define regHUBP2_DCHUBP_MALL_CONFIG_BASE_IDX 2
+#define regHUBP2_DCHUBP_MALL_SUB_VP 0x07af
+#define regHUBP2_DCHUBP_MALL_SUB_VP_BASE_IDX 2
+#define regHUBP2_HUBPREQ_DEBUG_DB 0x07b0
+#define regHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define regHUBP2_HUBPREQ_DEBUG 0x07b1
+#define regHUBP2_HUBPREQ_DEBUG_BASE_IDX 2
+#define regHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x07b5
+#define regHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define regHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x07b6
+#define regHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+#define regHUBP2_HUBP_MALL_STATUS 0x07b7
+#define regHUBP2_HUBP_MALL_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec
+// base address: 0x6e0
+#define regHUBPREQ2_DCSURF_SURFACE_PITCH 0x07bf
+#define regHUBPREQ2_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_PITCH_C 0x07c0
+#define regHUBPREQ2_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define regHUBPREQ2_VMID_SETTINGS_0 0x07c1
+#define regHUBPREQ2_VMID_SETTINGS_0_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS 0x07c2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x07c3
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x07c4
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x07c5
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS 0x07c6
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x07c7
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x07c8
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x07c9
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x07ca
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x07cb
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x07cc
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x07cd
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x07ce
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x07cf
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x07d0
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x07d1
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_CONTROL 0x07d2
+#define regHUBPREQ2_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_FLIP_CONTROL 0x07d3
+#define regHUBPREQ2_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_FLIP_CONTROL2 0x07d4
+#define regHUBPREQ2_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT 0x07d7
+#define regHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE 0x07d8
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH 0x07d9
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_C 0x07da
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C 0x07db
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE 0x07dc
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x07dd
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C 0x07de
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x07df
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCN_EXPANSION_MODE 0x07e0
+#define regHUBPREQ2_DCN_EXPANSION_MODE_BASE_IDX 2
+#define regHUBPREQ2_DCN_TTU_QOS_WM 0x07e1
+#define regHUBPREQ2_DCN_TTU_QOS_WM_BASE_IDX 2
+#define regHUBPREQ2_DCN_GLOBAL_TTU_CNTL 0x07e2
+#define regHUBPREQ2_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define regHUBPREQ2_DCN_SURF0_TTU_CNTL0 0x07e3
+#define regHUBPREQ2_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ2_DCN_SURF0_TTU_CNTL1 0x07e4
+#define regHUBPREQ2_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ2_DCN_SURF1_TTU_CNTL0 0x07e5
+#define regHUBPREQ2_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ2_DCN_SURF1_TTU_CNTL1 0x07e6
+#define regHUBPREQ2_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ2_DCN_CUR0_TTU_CNTL0 0x07e7
+#define regHUBPREQ2_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ2_DCN_CUR0_TTU_CNTL1 0x07e8
+#define regHUBPREQ2_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ2_DCN_CUR1_TTU_CNTL0 0x07e9
+#define regHUBPREQ2_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ2_DCN_CUR1_TTU_CNTL1 0x07ea
+#define regHUBPREQ2_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ2_DCN_DMDATA_VM_CNTL 0x07eb
+#define regHUBPREQ2_DCN_DMDATA_VM_CNTL_BASE_IDX 2
+#define regHUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x07ec
+#define regHUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define regHUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x07ed
+#define regHUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define regHUBPREQ2_DCN_VM_MX_L1_TLB_CNTL 0x07fa
+#define regHUBPREQ2_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define regHUBPREQ2_BLANK_OFFSET_0 0x07fb
+#define regHUBPREQ2_BLANK_OFFSET_0_BASE_IDX 2
+#define regHUBPREQ2_BLANK_OFFSET_1 0x07fc
+#define regHUBPREQ2_BLANK_OFFSET_1_BASE_IDX 2
+#define regHUBPREQ2_DST_DIMENSIONS 0x07fd
+#define regHUBPREQ2_DST_DIMENSIONS_BASE_IDX 2
+#define regHUBPREQ2_DST_AFTER_SCALER 0x07fe
+#define regHUBPREQ2_DST_AFTER_SCALER_BASE_IDX 2
+#define regHUBPREQ2_PREFETCH_SETTINGS 0x07ff
+#define regHUBPREQ2_PREFETCH_SETTINGS_BASE_IDX 2
+#define regHUBPREQ2_PREFETCH_SETTINGS_C 0x0800
+#define regHUBPREQ2_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_0 0x0801
+#define regHUBPREQ2_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_1 0x0802
+#define regHUBPREQ2_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_2 0x0803
+#define regHUBPREQ2_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_3 0x0804
+#define regHUBPREQ2_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_4 0x0805
+#define regHUBPREQ2_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_0 0x0806
+#define regHUBPREQ2_FLIP_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_1 0x0807
+#define regHUBPREQ2_FLIP_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_2 0x0808
+#define regHUBPREQ2_FLIP_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_0 0x0809
+#define regHUBPREQ2_NOM_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_1 0x080a
+#define regHUBPREQ2_NOM_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_2 0x080b
+#define regHUBPREQ2_NOM_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_3 0x080c
+#define regHUBPREQ2_NOM_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_4 0x080d
+#define regHUBPREQ2_NOM_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_5 0x080e
+#define regHUBPREQ2_NOM_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_6 0x080f
+#define regHUBPREQ2_NOM_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_7 0x0810
+#define regHUBPREQ2_NOM_PARAMETERS_7_BASE_IDX 2
+#define regHUBPREQ2_PER_LINE_DELIVERY_PRE 0x0811
+#define regHUBPREQ2_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define regHUBPREQ2_PER_LINE_DELIVERY 0x0812
+#define regHUBPREQ2_PER_LINE_DELIVERY_BASE_IDX 2
+#define regHUBPREQ2_CURSOR_SETTINGS 0x0813
+#define regHUBPREQ2_CURSOR_SETTINGS_BASE_IDX 2
+#define regHUBPREQ2_REF_FREQ_TO_PIX_FREQ 0x0814
+#define regHUBPREQ2_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define regHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT 0x0815
+#define regHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_MEM_PWR_CTRL 0x0816
+#define regHUBPREQ2_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_MEM_PWR_STATUS 0x0817
+#define regHUBPREQ2_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_5 0x081a
+#define regHUBPREQ2_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_6 0x081b
+#define regHUBPREQ2_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_3 0x081c
+#define regHUBPREQ2_FLIP_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_4 0x081d
+#define regHUBPREQ2_FLIP_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_5 0x081e
+#define regHUBPREQ2_FLIP_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_6 0x081f
+#define regHUBPREQ2_FLIP_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ2_UCLK_PSTATE_FORCE 0x0820
+#define regHUBPREQ2_UCLK_PSTATE_FORCE_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_STATUS_REG0 0x0821
+#define regHUBPREQ2_HUBPREQ_STATUS_REG0_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_STATUS_REG1 0x0822
+#define regHUBPREQ2_HUBPREQ_STATUS_REG1_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_STATUS_REG2 0x0823
+#define regHUBPREQ2_HUBPREQ_STATUS_REG2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec
+// base address: 0x6e0
+#define regHUBPRET2_HUBPRET_CONTROL 0x0824
+#define regHUBPRET2_HUBPRET_CONTROL_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_MEM_PWR_CTRL 0x0825
+#define regHUBPRET2_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_MEM_PWR_STATUS 0x0826
+#define regHUBPRET2_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE_CTRL0 0x0827
+#define regHUBPRET2_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE_CTRL1 0x0828
+#define regHUBPRET2_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE0 0x0829
+#define regHUBPRET2_HUBPRET_READ_LINE0_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE1 0x082a
+#define regHUBPRET2_HUBPRET_READ_LINE1_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_INTERRUPT 0x082b
+#define regHUBPRET2_HUBPRET_INTERRUPT_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE_VALUE 0x082c
+#define regHUBPRET2_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE_STATUS 0x082d
+#define regHUBPRET2_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_cursor0_dispdec
+// base address: 0x6e0
+#define regCURSOR0_2_CURSOR_CONTROL 0x0830
+#define regCURSOR0_2_CURSOR_CONTROL_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_SURFACE_ADDRESS 0x0831
+#define regCURSOR0_2_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH 0x0832
+#define regCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_SIZE 0x0833
+#define regCURSOR0_2_CURSOR_SIZE_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_POSITION 0x0834
+#define regCURSOR0_2_CURSOR_POSITION_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_HOT_SPOT 0x0835
+#define regCURSOR0_2_CURSOR_HOT_SPOT_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_STEREO_CONTROL 0x0836
+#define regCURSOR0_2_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_DST_OFFSET 0x0837
+#define regCURSOR0_2_CURSOR_DST_OFFSET_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_MEM_PWR_CTRL 0x0838
+#define regCURSOR0_2_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_MEM_PWR_STATUS 0x0839
+#define regCURSOR0_2_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_ADDRESS_HIGH 0x083a
+#define regCURSOR0_2_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_ADDRESS_LOW 0x083b
+#define regCURSOR0_2_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_CNTL 0x083c
+#define regCURSOR0_2_DMDATA_CNTL_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_QOS_CNTL 0x083d
+#define regCURSOR0_2_DMDATA_QOS_CNTL_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_STATUS 0x083e
+#define regCURSOR0_2_DMDATA_STATUS_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_SW_CNTL 0x083f
+#define regCURSOR0_2_DMDATA_SW_CNTL_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_SW_DATA 0x0840
+#define regCURSOR0_2_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x2154
+#define regDC_PERFMON9_PERFCOUNTER_CNTL 0x0855
+#define regDC_PERFMON9_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON9_PERFCOUNTER_CNTL2 0x0856
+#define regDC_PERFMON9_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON9_PERFCOUNTER_STATE 0x0857
+#define regDC_PERFMON9_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_CNTL 0x0858
+#define regDC_PERFMON9_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_CNTL2 0x0859
+#define regDC_PERFMON9_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_CVALUE_INT_MISC 0x085a
+#define regDC_PERFMON9_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_CVALUE_LOW 0x085b
+#define regDC_PERFMON9_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_HI 0x085c
+#define regDC_PERFMON9_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_LOW 0x085d
+#define regDC_PERFMON9_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec
+// base address: 0xa50
+#define regHUBP3_DCSURF_SURFACE_CONFIG 0x0879
+#define regHUBP3_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define regHUBP3_DCSURF_ADDR_CONFIG 0x087a
+#define regHUBP3_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define regHUBP3_DCSURF_TILING_CONFIG 0x087b
+#define regHUBP3_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define regHUBP3_DCSURF_PRI_VIEWPORT_START 0x087d
+#define regHUBP3_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION 0x087e
+#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP3_DCSURF_PRI_VIEWPORT_START_C 0x087f
+#define regHUBP3_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x0880
+#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP3_DCSURF_SEC_VIEWPORT_START 0x0881
+#define regHUBP3_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define regHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION 0x0882
+#define regHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP3_DCSURF_SEC_VIEWPORT_START_C 0x0883
+#define regHUBP3_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x0884
+#define regHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP3_DCHUBP_REQ_SIZE_CONFIG 0x0885
+#define regHUBP3_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define regHUBP3_DCHUBP_REQ_SIZE_CONFIG_C 0x0886
+#define regHUBP3_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define regHUBP3_DCHUBP_CNTL 0x0887
+#define regHUBP3_DCHUBP_CNTL_BASE_IDX 2
+#define regHUBP3_HUBP_CLK_CNTL 0x0888
+#define regHUBP3_HUBP_CLK_CNTL_BASE_IDX 2
+#define regHUBP3_DCHUBP_VMPG_CONFIG 0x0889
+#define regHUBP3_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define regHUBP3_DCHUBP_MALL_CONFIG 0x088a
+#define regHUBP3_DCHUBP_MALL_CONFIG_BASE_IDX 2
+#define regHUBP3_DCHUBP_MALL_SUB_VP 0x088b
+#define regHUBP3_DCHUBP_MALL_SUB_VP_BASE_IDX 2
+#define regHUBP3_HUBPREQ_DEBUG_DB 0x088c
+#define regHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define regHUBP3_HUBPREQ_DEBUG 0x088d
+#define regHUBP3_HUBPREQ_DEBUG_BASE_IDX 2
+#define regHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x0891
+#define regHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define regHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x0892
+#define regHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+#define regHUBP3_HUBP_MALL_STATUS 0x0893
+#define regHUBP3_HUBP_MALL_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec
+// base address: 0xa50
+#define regHUBPREQ3_DCSURF_SURFACE_PITCH 0x089b
+#define regHUBPREQ3_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_PITCH_C 0x089c
+#define regHUBPREQ3_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define regHUBPREQ3_VMID_SETTINGS_0 0x089d
+#define regHUBPREQ3_VMID_SETTINGS_0_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS 0x089e
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x089f
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x08a0
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x08a1
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS 0x08a2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x08a3
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x08a4
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x08a5
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x08a6
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x08a7
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x08a8
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x08a9
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x08aa
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x08ab
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x08ac
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x08ad
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_CONTROL 0x08ae
+#define regHUBPREQ3_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_FLIP_CONTROL 0x08af
+#define regHUBPREQ3_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_FLIP_CONTROL2 0x08b0
+#define regHUBPREQ3_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT 0x08b3
+#define regHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE 0x08b4
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH 0x08b5
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_C 0x08b6
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C 0x08b7
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE 0x08b8
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x08b9
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C 0x08ba
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x08bb
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCN_EXPANSION_MODE 0x08bc
+#define regHUBPREQ3_DCN_EXPANSION_MODE_BASE_IDX 2
+#define regHUBPREQ3_DCN_TTU_QOS_WM 0x08bd
+#define regHUBPREQ3_DCN_TTU_QOS_WM_BASE_IDX 2
+#define regHUBPREQ3_DCN_GLOBAL_TTU_CNTL 0x08be
+#define regHUBPREQ3_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define regHUBPREQ3_DCN_SURF0_TTU_CNTL0 0x08bf
+#define regHUBPREQ3_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ3_DCN_SURF0_TTU_CNTL1 0x08c0
+#define regHUBPREQ3_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ3_DCN_SURF1_TTU_CNTL0 0x08c1
+#define regHUBPREQ3_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ3_DCN_SURF1_TTU_CNTL1 0x08c2
+#define regHUBPREQ3_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ3_DCN_CUR0_TTU_CNTL0 0x08c3
+#define regHUBPREQ3_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ3_DCN_CUR0_TTU_CNTL1 0x08c4
+#define regHUBPREQ3_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ3_DCN_CUR1_TTU_CNTL0 0x08c5
+#define regHUBPREQ3_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ3_DCN_CUR1_TTU_CNTL1 0x08c6
+#define regHUBPREQ3_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ3_DCN_DMDATA_VM_CNTL 0x08c7
+#define regHUBPREQ3_DCN_DMDATA_VM_CNTL_BASE_IDX 2
+#define regHUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x08c8
+#define regHUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define regHUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x08c9
+#define regHUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define regHUBPREQ3_DCN_VM_MX_L1_TLB_CNTL 0x08d6
+#define regHUBPREQ3_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define regHUBPREQ3_BLANK_OFFSET_0 0x08d7
+#define regHUBPREQ3_BLANK_OFFSET_0_BASE_IDX 2
+#define regHUBPREQ3_BLANK_OFFSET_1 0x08d8
+#define regHUBPREQ3_BLANK_OFFSET_1_BASE_IDX 2
+#define regHUBPREQ3_DST_DIMENSIONS 0x08d9
+#define regHUBPREQ3_DST_DIMENSIONS_BASE_IDX 2
+#define regHUBPREQ3_DST_AFTER_SCALER 0x08da
+#define regHUBPREQ3_DST_AFTER_SCALER_BASE_IDX 2
+#define regHUBPREQ3_PREFETCH_SETTINGS 0x08db
+#define regHUBPREQ3_PREFETCH_SETTINGS_BASE_IDX 2
+#define regHUBPREQ3_PREFETCH_SETTINGS_C 0x08dc
+#define regHUBPREQ3_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_0 0x08dd
+#define regHUBPREQ3_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_1 0x08de
+#define regHUBPREQ3_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_2 0x08df
+#define regHUBPREQ3_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_3 0x08e0
+#define regHUBPREQ3_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_4 0x08e1
+#define regHUBPREQ3_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_0 0x08e2
+#define regHUBPREQ3_FLIP_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_1 0x08e3
+#define regHUBPREQ3_FLIP_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_2 0x08e4
+#define regHUBPREQ3_FLIP_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_0 0x08e5
+#define regHUBPREQ3_NOM_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_1 0x08e6
+#define regHUBPREQ3_NOM_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_2 0x08e7
+#define regHUBPREQ3_NOM_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_3 0x08e8
+#define regHUBPREQ3_NOM_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_4 0x08e9
+#define regHUBPREQ3_NOM_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_5 0x08ea
+#define regHUBPREQ3_NOM_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_6 0x08eb
+#define regHUBPREQ3_NOM_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_7 0x08ec
+#define regHUBPREQ3_NOM_PARAMETERS_7_BASE_IDX 2
+#define regHUBPREQ3_PER_LINE_DELIVERY_PRE 0x08ed
+#define regHUBPREQ3_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define regHUBPREQ3_PER_LINE_DELIVERY 0x08ee
+#define regHUBPREQ3_PER_LINE_DELIVERY_BASE_IDX 2
+#define regHUBPREQ3_CURSOR_SETTINGS 0x08ef
+#define regHUBPREQ3_CURSOR_SETTINGS_BASE_IDX 2
+#define regHUBPREQ3_REF_FREQ_TO_PIX_FREQ 0x08f0
+#define regHUBPREQ3_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define regHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT 0x08f1
+#define regHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_MEM_PWR_CTRL 0x08f2
+#define regHUBPREQ3_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_MEM_PWR_STATUS 0x08f3
+#define regHUBPREQ3_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_5 0x08f6
+#define regHUBPREQ3_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_6 0x08f7
+#define regHUBPREQ3_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_3 0x08f8
+#define regHUBPREQ3_FLIP_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_4 0x08f9
+#define regHUBPREQ3_FLIP_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_5 0x08fa
+#define regHUBPREQ3_FLIP_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_6 0x08fb
+#define regHUBPREQ3_FLIP_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ3_UCLK_PSTATE_FORCE 0x08fc
+#define regHUBPREQ3_UCLK_PSTATE_FORCE_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_STATUS_REG0 0x08fd
+#define regHUBPREQ3_HUBPREQ_STATUS_REG0_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_STATUS_REG1 0x08fe
+#define regHUBPREQ3_HUBPREQ_STATUS_REG1_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_STATUS_REG2 0x08ff
+#define regHUBPREQ3_HUBPREQ_STATUS_REG2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec
+// base address: 0xa50
+#define regHUBPRET3_HUBPRET_CONTROL 0x0900
+#define regHUBPRET3_HUBPRET_CONTROL_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_MEM_PWR_CTRL 0x0901
+#define regHUBPRET3_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_MEM_PWR_STATUS 0x0902
+#define regHUBPRET3_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE_CTRL0 0x0903
+#define regHUBPRET3_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE_CTRL1 0x0904
+#define regHUBPRET3_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE0 0x0905
+#define regHUBPRET3_HUBPRET_READ_LINE0_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE1 0x0906
+#define regHUBPRET3_HUBPRET_READ_LINE1_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_INTERRUPT 0x0907
+#define regHUBPRET3_HUBPRET_INTERRUPT_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE_VALUE 0x0908
+#define regHUBPRET3_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE_STATUS 0x0909
+#define regHUBPRET3_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_cursor0_dispdec
+// base address: 0xa50
+#define regCURSOR0_3_CURSOR_CONTROL 0x090c
+#define regCURSOR0_3_CURSOR_CONTROL_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_SURFACE_ADDRESS 0x090d
+#define regCURSOR0_3_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH 0x090e
+#define regCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_SIZE 0x090f
+#define regCURSOR0_3_CURSOR_SIZE_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_POSITION 0x0910
+#define regCURSOR0_3_CURSOR_POSITION_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_HOT_SPOT 0x0911
+#define regCURSOR0_3_CURSOR_HOT_SPOT_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_STEREO_CONTROL 0x0912
+#define regCURSOR0_3_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_DST_OFFSET 0x0913
+#define regCURSOR0_3_CURSOR_DST_OFFSET_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_MEM_PWR_CTRL 0x0914
+#define regCURSOR0_3_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_MEM_PWR_STATUS 0x0915
+#define regCURSOR0_3_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_ADDRESS_HIGH 0x0916
+#define regCURSOR0_3_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_ADDRESS_LOW 0x0917
+#define regCURSOR0_3_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_CNTL 0x0918
+#define regCURSOR0_3_DMDATA_CNTL_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_QOS_CNTL 0x0919
+#define regCURSOR0_3_DMDATA_QOS_CNTL_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_STATUS 0x091a
+#define regCURSOR0_3_DMDATA_STATUS_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_SW_CNTL 0x091b
+#define regCURSOR0_3_DMDATA_SW_CNTL_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_SW_DATA 0x091c
+#define regCURSOR0_3_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x24c4
+#define regDC_PERFMON10_PERFCOUNTER_CNTL 0x0931
+#define regDC_PERFMON10_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON10_PERFCOUNTER_CNTL2 0x0932
+#define regDC_PERFMON10_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON10_PERFCOUNTER_STATE 0x0933
+#define regDC_PERFMON10_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_CNTL 0x0934
+#define regDC_PERFMON10_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_CNTL2 0x0935
+#define regDC_PERFMON10_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_CVALUE_INT_MISC 0x0936
+#define regDC_PERFMON10_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_CVALUE_LOW 0x0937
+#define regDC_PERFMON10_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_HI 0x0938
+#define regDC_PERFMON10_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_LOW 0x0939
+#define regDC_PERFMON10_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
+// base address: 0x0
+#define regDPP_TOP0_DPP_CONTROL 0x0cc5
+#define regDPP_TOP0_DPP_CONTROL_BASE_IDX 2
+#define regDPP_TOP0_DPP_SOFT_RESET 0x0cc6
+#define regDPP_TOP0_DPP_SOFT_RESET_BASE_IDX 2
+#define regDPP_TOP0_DPP_CRC_VAL_R_G 0x0cc7
+#define regDPP_TOP0_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define regDPP_TOP0_DPP_CRC_VAL_B_A 0x0cc8
+#define regDPP_TOP0_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define regDPP_TOP0_DPP_CRC_CTRL 0x0cc9
+#define regDPP_TOP0_DPP_CRC_CTRL_BASE_IDX 2
+#define regDPP_TOP0_HOST_READ_CONTROL 0x0cca
+#define regDPP_TOP0_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec
+// base address: 0x0
+#define regCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT 0x0ccf
+#define regCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define regCNVC_CFG0_FORMAT_CONTROL 0x0cd0
+#define regCNVC_CFG0_FORMAT_CONTROL_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_BIAS_R 0x0cd1
+#define regCNVC_CFG0_FCNV_FP_BIAS_R_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_BIAS_G 0x0cd2
+#define regCNVC_CFG0_FCNV_FP_BIAS_G_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_BIAS_B 0x0cd3
+#define regCNVC_CFG0_FCNV_FP_BIAS_B_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_SCALE_R 0x0cd4
+#define regCNVC_CFG0_FCNV_FP_SCALE_R_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_SCALE_G 0x0cd5
+#define regCNVC_CFG0_FCNV_FP_SCALE_G_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_SCALE_B 0x0cd6
+#define regCNVC_CFG0_FCNV_FP_SCALE_B_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_CONTROL 0x0cd7
+#define regCNVC_CFG0_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_ALPHA 0x0cd8
+#define regCNVC_CFG0_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_RED 0x0cd9
+#define regCNVC_CFG0_COLOR_KEYER_RED_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_GREEN 0x0cda
+#define regCNVC_CFG0_COLOR_KEYER_GREEN_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_BLUE 0x0cdb
+#define regCNVC_CFG0_COLOR_KEYER_BLUE_BASE_IDX 2
+#define regCNVC_CFG0_ALPHA_2BIT_LUT 0x0cdd
+#define regCNVC_CFG0_ALPHA_2BIT_LUT_BASE_IDX 2
+#define regCNVC_CFG0_PRE_DEALPHA 0x0cde
+#define regCNVC_CFG0_PRE_DEALPHA_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_MODE 0x0cdf
+#define regCNVC_CFG0_PRE_CSC_MODE_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C11_C12 0x0ce0
+#define regCNVC_CFG0_PRE_CSC_C11_C12_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C13_C14 0x0ce1
+#define regCNVC_CFG0_PRE_CSC_C13_C14_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C21_C22 0x0ce2
+#define regCNVC_CFG0_PRE_CSC_C21_C22_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C23_C24 0x0ce3
+#define regCNVC_CFG0_PRE_CSC_C23_C24_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C31_C32 0x0ce4
+#define regCNVC_CFG0_PRE_CSC_C31_C32_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C33_C34 0x0ce5
+#define regCNVC_CFG0_PRE_CSC_C33_C34_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C11_C12 0x0ce6
+#define regCNVC_CFG0_PRE_CSC_B_C11_C12_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C13_C14 0x0ce7
+#define regCNVC_CFG0_PRE_CSC_B_C13_C14_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C21_C22 0x0ce8
+#define regCNVC_CFG0_PRE_CSC_B_C21_C22_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C23_C24 0x0ce9
+#define regCNVC_CFG0_PRE_CSC_B_C23_C24_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C31_C32 0x0cea
+#define regCNVC_CFG0_PRE_CSC_B_C31_C32_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C33_C34 0x0ceb
+#define regCNVC_CFG0_PRE_CSC_B_C33_C34_BASE_IDX 2
+#define regCNVC_CFG0_CNVC_COEF_FORMAT 0x0cec
+#define regCNVC_CFG0_CNVC_COEF_FORMAT_BASE_IDX 2
+#define regCNVC_CFG0_PRE_DEGAM 0x0ced
+#define regCNVC_CFG0_PRE_DEGAM_BASE_IDX 2
+#define regCNVC_CFG0_PRE_REALPHA 0x0cee
+#define regCNVC_CFG0_PRE_REALPHA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec
+// base address: 0x0
+#define regCNVC_CUR0_CURSOR0_CONTROL 0x0cf1
+#define regCNVC_CUR0_CURSOR0_CONTROL_BASE_IDX 2
+#define regCNVC_CUR0_CURSOR0_COLOR0 0x0cf2
+#define regCNVC_CUR0_CURSOR0_COLOR0_BASE_IDX 2
+#define regCNVC_CUR0_CURSOR0_COLOR1 0x0cf3
+#define regCNVC_CUR0_CURSOR0_COLOR1_BASE_IDX 2
+#define regCNVC_CUR0_CURSOR0_FP_SCALE_BIAS 0x0cf4
+#define regCNVC_CUR0_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec
+// base address: 0x0
+#define regDSCL0_SCL_COEF_RAM_TAP_SELECT 0x0cf9
+#define regDSCL0_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define regDSCL0_SCL_COEF_RAM_TAP_DATA 0x0cfa
+#define regDSCL0_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define regDSCL0_SCL_MODE 0x0cfb
+#define regDSCL0_SCL_MODE_BASE_IDX 2
+#define regDSCL0_SCL_TAP_CONTROL 0x0cfc
+#define regDSCL0_SCL_TAP_CONTROL_BASE_IDX 2
+#define regDSCL0_DSCL_CONTROL 0x0cfd
+#define regDSCL0_DSCL_CONTROL_BASE_IDX 2
+#define regDSCL0_DSCL_2TAP_CONTROL 0x0cfe
+#define regDSCL0_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define regDSCL0_SCL_MANUAL_REPLICATE_CONTROL 0x0cff
+#define regDSCL0_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define regDSCL0_SCL_HORZ_FILTER_SCALE_RATIO 0x0d00
+#define regDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL0_SCL_HORZ_FILTER_INIT 0x0d01
+#define regDSCL0_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define regDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0d02
+#define regDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL0_SCL_HORZ_FILTER_INIT_C 0x0d03
+#define regDSCL0_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_SCALE_RATIO 0x0d04
+#define regDSCL0_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_INIT 0x0d05
+#define regDSCL0_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_INIT_BOT 0x0d06
+#define regDSCL0_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C 0x0d07
+#define regDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_INIT_C 0x0d08
+#define regDSCL0_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_INIT_BOT_C 0x0d09
+#define regDSCL0_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define regDSCL0_SCL_BLACK_COLOR 0x0d0a
+#define regDSCL0_SCL_BLACK_COLOR_BASE_IDX 2
+#define regDSCL0_DSCL_UPDATE 0x0d0b
+#define regDSCL0_DSCL_UPDATE_BASE_IDX 2
+#define regDSCL0_DSCL_AUTOCAL 0x0d0c
+#define regDSCL0_DSCL_AUTOCAL_BASE_IDX 2
+#define regDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0d0d
+#define regDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define regDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0d0e
+#define regDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define regDSCL0_OTG_H_BLANK 0x0d0f
+#define regDSCL0_OTG_H_BLANK_BASE_IDX 2
+#define regDSCL0_OTG_V_BLANK 0x0d10
+#define regDSCL0_OTG_V_BLANK_BASE_IDX 2
+#define regDSCL0_RECOUT_START 0x0d11
+#define regDSCL0_RECOUT_START_BASE_IDX 2
+#define regDSCL0_RECOUT_SIZE 0x0d12
+#define regDSCL0_RECOUT_SIZE_BASE_IDX 2
+#define regDSCL0_MPC_SIZE 0x0d13
+#define regDSCL0_MPC_SIZE_BASE_IDX 2
+#define regDSCL0_LB_DATA_FORMAT 0x0d14
+#define regDSCL0_LB_DATA_FORMAT_BASE_IDX 2
+#define regDSCL0_LB_MEMORY_CTRL 0x0d15
+#define regDSCL0_LB_MEMORY_CTRL_BASE_IDX 2
+#define regDSCL0_LB_V_COUNTER 0x0d16
+#define regDSCL0_LB_V_COUNTER_BASE_IDX 2
+#define regDSCL0_DSCL_MEM_PWR_CTRL 0x0d17
+#define regDSCL0_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define regDSCL0_DSCL_MEM_PWR_STATUS 0x0d18
+#define regDSCL0_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define regDSCL0_OBUF_CONTROL 0x0d19
+#define regDSCL0_OBUF_CONTROL_BASE_IDX 2
+#define regDSCL0_OBUF_MEM_PWR_CTRL 0x0d1a
+#define regDSCL0_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec
+// base address: 0x0
+#define regCM0_CM_CONTROL 0x0d20
+#define regCM0_CM_CONTROL_BASE_IDX 2
+#define regCM0_CM_POST_CSC_CONTROL 0x0d21
+#define regCM0_CM_POST_CSC_CONTROL_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C11_C12 0x0d22
+#define regCM0_CM_POST_CSC_C11_C12_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C13_C14 0x0d23
+#define regCM0_CM_POST_CSC_C13_C14_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C21_C22 0x0d24
+#define regCM0_CM_POST_CSC_C21_C22_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C23_C24 0x0d25
+#define regCM0_CM_POST_CSC_C23_C24_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C31_C32 0x0d26
+#define regCM0_CM_POST_CSC_C31_C32_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C33_C34 0x0d27
+#define regCM0_CM_POST_CSC_C33_C34_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C11_C12 0x0d28
+#define regCM0_CM_POST_CSC_B_C11_C12_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C13_C14 0x0d29
+#define regCM0_CM_POST_CSC_B_C13_C14_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C21_C22 0x0d2a
+#define regCM0_CM_POST_CSC_B_C21_C22_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C23_C24 0x0d2b
+#define regCM0_CM_POST_CSC_B_C23_C24_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C31_C32 0x0d2c
+#define regCM0_CM_POST_CSC_B_C31_C32_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C33_C34 0x0d2d
+#define regCM0_CM_POST_CSC_B_C33_C34_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_CONTROL 0x0d2e
+#define regCM0_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C11_C12 0x0d2f
+#define regCM0_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C13_C14 0x0d30
+#define regCM0_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C21_C22 0x0d31
+#define regCM0_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C23_C24 0x0d32
+#define regCM0_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C31_C32 0x0d33
+#define regCM0_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C33_C34 0x0d34
+#define regCM0_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C11_C12 0x0d35
+#define regCM0_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C13_C14 0x0d36
+#define regCM0_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C21_C22 0x0d37
+#define regCM0_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C23_C24 0x0d38
+#define regCM0_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C31_C32 0x0d39
+#define regCM0_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C33_C34 0x0d3a
+#define regCM0_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define regCM0_CM_BIAS_CR_R 0x0d3b
+#define regCM0_CM_BIAS_CR_R_BASE_IDX 2
+#define regCM0_CM_BIAS_Y_G_CB_B 0x0d3c
+#define regCM0_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_CONTROL 0x0d3d
+#define regCM0_CM_GAMCOR_CONTROL_BASE_IDX 2
+#define regCM0_CM_GAMCOR_LUT_INDEX 0x0d3e
+#define regCM0_CM_GAMCOR_LUT_INDEX_BASE_IDX 2
+#define regCM0_CM_GAMCOR_LUT_DATA 0x0d3f
+#define regCM0_CM_GAMCOR_LUT_DATA_BASE_IDX 2
+#define regCM0_CM_GAMCOR_LUT_CONTROL 0x0d40
+#define regCM0_CM_GAMCOR_LUT_CONTROL_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_B 0x0d41
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_G 0x0d42
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_R 0x0d43
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B 0x0d44
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G 0x0d45
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R 0x0d46
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B 0x0d47
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G 0x0d48
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R 0x0d49
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_B 0x0d4a
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_B 0x0d4b
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_G 0x0d4c
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_G 0x0d4d
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_R 0x0d4e
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_R 0x0d4f
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_B 0x0d50
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_G 0x0d51
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_R 0x0d52
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_0_1 0x0d53
+#define regCM0_CM_GAMCOR_RAMA_REGION_0_1_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_2_3 0x0d54
+#define regCM0_CM_GAMCOR_RAMA_REGION_2_3_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_4_5 0x0d55
+#define regCM0_CM_GAMCOR_RAMA_REGION_4_5_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_6_7 0x0d56
+#define regCM0_CM_GAMCOR_RAMA_REGION_6_7_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_8_9 0x0d57
+#define regCM0_CM_GAMCOR_RAMA_REGION_8_9_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_10_11 0x0d58
+#define regCM0_CM_GAMCOR_RAMA_REGION_10_11_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_12_13 0x0d59
+#define regCM0_CM_GAMCOR_RAMA_REGION_12_13_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_14_15 0x0d5a
+#define regCM0_CM_GAMCOR_RAMA_REGION_14_15_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_16_17 0x0d5b
+#define regCM0_CM_GAMCOR_RAMA_REGION_16_17_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_18_19 0x0d5c
+#define regCM0_CM_GAMCOR_RAMA_REGION_18_19_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_20_21 0x0d5d
+#define regCM0_CM_GAMCOR_RAMA_REGION_20_21_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_22_23 0x0d5e
+#define regCM0_CM_GAMCOR_RAMA_REGION_22_23_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_24_25 0x0d5f
+#define regCM0_CM_GAMCOR_RAMA_REGION_24_25_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_26_27 0x0d60
+#define regCM0_CM_GAMCOR_RAMA_REGION_26_27_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_28_29 0x0d61
+#define regCM0_CM_GAMCOR_RAMA_REGION_28_29_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_30_31 0x0d62
+#define regCM0_CM_GAMCOR_RAMA_REGION_30_31_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_32_33 0x0d63
+#define regCM0_CM_GAMCOR_RAMA_REGION_32_33_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_B 0x0d64
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_G 0x0d65
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_R 0x0d66
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B 0x0d67
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G 0x0d68
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R 0x0d69
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B 0x0d6a
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G 0x0d6b
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R 0x0d6c
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_B 0x0d6d
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_B 0x0d6e
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_G 0x0d6f
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_G 0x0d70
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_R 0x0d71
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_R 0x0d72
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_B 0x0d73
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_G 0x0d74
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_R 0x0d75
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_0_1 0x0d76
+#define regCM0_CM_GAMCOR_RAMB_REGION_0_1_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_2_3 0x0d77
+#define regCM0_CM_GAMCOR_RAMB_REGION_2_3_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_4_5 0x0d78
+#define regCM0_CM_GAMCOR_RAMB_REGION_4_5_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_6_7 0x0d79
+#define regCM0_CM_GAMCOR_RAMB_REGION_6_7_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_8_9 0x0d7a
+#define regCM0_CM_GAMCOR_RAMB_REGION_8_9_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_10_11 0x0d7b
+#define regCM0_CM_GAMCOR_RAMB_REGION_10_11_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_12_13 0x0d7c
+#define regCM0_CM_GAMCOR_RAMB_REGION_12_13_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_14_15 0x0d7d
+#define regCM0_CM_GAMCOR_RAMB_REGION_14_15_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_16_17 0x0d7e
+#define regCM0_CM_GAMCOR_RAMB_REGION_16_17_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_18_19 0x0d7f
+#define regCM0_CM_GAMCOR_RAMB_REGION_18_19_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_20_21 0x0d80
+#define regCM0_CM_GAMCOR_RAMB_REGION_20_21_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_22_23 0x0d81
+#define regCM0_CM_GAMCOR_RAMB_REGION_22_23_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_24_25 0x0d82
+#define regCM0_CM_GAMCOR_RAMB_REGION_24_25_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_26_27 0x0d83
+#define regCM0_CM_GAMCOR_RAMB_REGION_26_27_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_28_29 0x0d84
+#define regCM0_CM_GAMCOR_RAMB_REGION_28_29_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_30_31 0x0d85
+#define regCM0_CM_GAMCOR_RAMB_REGION_30_31_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_32_33 0x0d86
+#define regCM0_CM_GAMCOR_RAMB_REGION_32_33_BASE_IDX 2
+#define regCM0_CM_HDR_MULT_COEF 0x0d87
+#define regCM0_CM_HDR_MULT_COEF_BASE_IDX 2
+#define regCM0_CM_MEM_PWR_CTRL 0x0d88
+#define regCM0_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define regCM0_CM_MEM_PWR_STATUS 0x0d89
+#define regCM0_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define regCM0_CM_DEALPHA 0x0d8b
+#define regCM0_CM_DEALPHA_BASE_IDX 2
+#define regCM0_CM_COEF_FORMAT 0x0d8c
+#define regCM0_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d
+#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e
+#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x3890
+#define regDC_PERFMON11_PERFCOUNTER_CNTL 0x0e24
+#define regDC_PERFMON11_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON11_PERFCOUNTER_CNTL2 0x0e25
+#define regDC_PERFMON11_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON11_PERFCOUNTER_STATE 0x0e26
+#define regDC_PERFMON11_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_CNTL 0x0e27
+#define regDC_PERFMON11_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_CNTL2 0x0e28
+#define regDC_PERFMON11_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_CVALUE_INT_MISC 0x0e29
+#define regDC_PERFMON11_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_CVALUE_LOW 0x0e2a
+#define regDC_PERFMON11_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_HI 0x0e2b
+#define regDC_PERFMON11_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_LOW 0x0e2c
+#define regDC_PERFMON11_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
+// base address: 0x5ac
+#define regDPP_TOP1_DPP_CONTROL 0x0e30
+#define regDPP_TOP1_DPP_CONTROL_BASE_IDX 2
+#define regDPP_TOP1_DPP_SOFT_RESET 0x0e31
+#define regDPP_TOP1_DPP_SOFT_RESET_BASE_IDX 2
+#define regDPP_TOP1_DPP_CRC_VAL_R_G 0x0e32
+#define regDPP_TOP1_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define regDPP_TOP1_DPP_CRC_VAL_B_A 0x0e33
+#define regDPP_TOP1_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define regDPP_TOP1_DPP_CRC_CTRL 0x0e34
+#define regDPP_TOP1_DPP_CRC_CTRL_BASE_IDX 2
+#define regDPP_TOP1_HOST_READ_CONTROL 0x0e35
+#define regDPP_TOP1_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec
+// base address: 0x5ac
+#define regCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT 0x0e3a
+#define regCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define regCNVC_CFG1_FORMAT_CONTROL 0x0e3b
+#define regCNVC_CFG1_FORMAT_CONTROL_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_BIAS_R 0x0e3c
+#define regCNVC_CFG1_FCNV_FP_BIAS_R_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_BIAS_G 0x0e3d
+#define regCNVC_CFG1_FCNV_FP_BIAS_G_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_BIAS_B 0x0e3e
+#define regCNVC_CFG1_FCNV_FP_BIAS_B_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_SCALE_R 0x0e3f
+#define regCNVC_CFG1_FCNV_FP_SCALE_R_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_SCALE_G 0x0e40
+#define regCNVC_CFG1_FCNV_FP_SCALE_G_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_SCALE_B 0x0e41
+#define regCNVC_CFG1_FCNV_FP_SCALE_B_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_CONTROL 0x0e42
+#define regCNVC_CFG1_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_ALPHA 0x0e43
+#define regCNVC_CFG1_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_RED 0x0e44
+#define regCNVC_CFG1_COLOR_KEYER_RED_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_GREEN 0x0e45
+#define regCNVC_CFG1_COLOR_KEYER_GREEN_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_BLUE 0x0e46
+#define regCNVC_CFG1_COLOR_KEYER_BLUE_BASE_IDX 2
+#define regCNVC_CFG1_ALPHA_2BIT_LUT 0x0e48
+#define regCNVC_CFG1_ALPHA_2BIT_LUT_BASE_IDX 2
+#define regCNVC_CFG1_PRE_DEALPHA 0x0e49
+#define regCNVC_CFG1_PRE_DEALPHA_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_MODE 0x0e4a
+#define regCNVC_CFG1_PRE_CSC_MODE_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C11_C12 0x0e4b
+#define regCNVC_CFG1_PRE_CSC_C11_C12_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C13_C14 0x0e4c
+#define regCNVC_CFG1_PRE_CSC_C13_C14_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C21_C22 0x0e4d
+#define regCNVC_CFG1_PRE_CSC_C21_C22_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C23_C24 0x0e4e
+#define regCNVC_CFG1_PRE_CSC_C23_C24_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C31_C32 0x0e4f
+#define regCNVC_CFG1_PRE_CSC_C31_C32_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C33_C34 0x0e50
+#define regCNVC_CFG1_PRE_CSC_C33_C34_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C11_C12 0x0e51
+#define regCNVC_CFG1_PRE_CSC_B_C11_C12_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C13_C14 0x0e52
+#define regCNVC_CFG1_PRE_CSC_B_C13_C14_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C21_C22 0x0e53
+#define regCNVC_CFG1_PRE_CSC_B_C21_C22_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C23_C24 0x0e54
+#define regCNVC_CFG1_PRE_CSC_B_C23_C24_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C31_C32 0x0e55
+#define regCNVC_CFG1_PRE_CSC_B_C31_C32_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C33_C34 0x0e56
+#define regCNVC_CFG1_PRE_CSC_B_C33_C34_BASE_IDX 2
+#define regCNVC_CFG1_CNVC_COEF_FORMAT 0x0e57
+#define regCNVC_CFG1_CNVC_COEF_FORMAT_BASE_IDX 2
+#define regCNVC_CFG1_PRE_DEGAM 0x0e58
+#define regCNVC_CFG1_PRE_DEGAM_BASE_IDX 2
+#define regCNVC_CFG1_PRE_REALPHA 0x0e59
+#define regCNVC_CFG1_PRE_REALPHA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec
+// base address: 0x5ac
+#define regCNVC_CUR1_CURSOR0_CONTROL 0x0e5c
+#define regCNVC_CUR1_CURSOR0_CONTROL_BASE_IDX 2
+#define regCNVC_CUR1_CURSOR0_COLOR0 0x0e5d
+#define regCNVC_CUR1_CURSOR0_COLOR0_BASE_IDX 2
+#define regCNVC_CUR1_CURSOR0_COLOR1 0x0e5e
+#define regCNVC_CUR1_CURSOR0_COLOR1_BASE_IDX 2
+#define regCNVC_CUR1_CURSOR0_FP_SCALE_BIAS 0x0e5f
+#define regCNVC_CUR1_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec
+// base address: 0x5ac
+#define regDSCL1_SCL_COEF_RAM_TAP_SELECT 0x0e64
+#define regDSCL1_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define regDSCL1_SCL_COEF_RAM_TAP_DATA 0x0e65
+#define regDSCL1_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define regDSCL1_SCL_MODE 0x0e66
+#define regDSCL1_SCL_MODE_BASE_IDX 2
+#define regDSCL1_SCL_TAP_CONTROL 0x0e67
+#define regDSCL1_SCL_TAP_CONTROL_BASE_IDX 2
+#define regDSCL1_DSCL_CONTROL 0x0e68
+#define regDSCL1_DSCL_CONTROL_BASE_IDX 2
+#define regDSCL1_DSCL_2TAP_CONTROL 0x0e69
+#define regDSCL1_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define regDSCL1_SCL_MANUAL_REPLICATE_CONTROL 0x0e6a
+#define regDSCL1_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define regDSCL1_SCL_HORZ_FILTER_SCALE_RATIO 0x0e6b
+#define regDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL1_SCL_HORZ_FILTER_INIT 0x0e6c
+#define regDSCL1_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define regDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0e6d
+#define regDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL1_SCL_HORZ_FILTER_INIT_C 0x0e6e
+#define regDSCL1_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_SCALE_RATIO 0x0e6f
+#define regDSCL1_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_INIT 0x0e70
+#define regDSCL1_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_INIT_BOT 0x0e71
+#define regDSCL1_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C 0x0e72
+#define regDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_INIT_C 0x0e73
+#define regDSCL1_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_INIT_BOT_C 0x0e74
+#define regDSCL1_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define regDSCL1_SCL_BLACK_COLOR 0x0e75
+#define regDSCL1_SCL_BLACK_COLOR_BASE_IDX 2
+#define regDSCL1_DSCL_UPDATE 0x0e76
+#define regDSCL1_DSCL_UPDATE_BASE_IDX 2
+#define regDSCL1_DSCL_AUTOCAL 0x0e77
+#define regDSCL1_DSCL_AUTOCAL_BASE_IDX 2
+#define regDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0e78
+#define regDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define regDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0e79
+#define regDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define regDSCL1_OTG_H_BLANK 0x0e7a
+#define regDSCL1_OTG_H_BLANK_BASE_IDX 2
+#define regDSCL1_OTG_V_BLANK 0x0e7b
+#define regDSCL1_OTG_V_BLANK_BASE_IDX 2
+#define regDSCL1_RECOUT_START 0x0e7c
+#define regDSCL1_RECOUT_START_BASE_IDX 2
+#define regDSCL1_RECOUT_SIZE 0x0e7d
+#define regDSCL1_RECOUT_SIZE_BASE_IDX 2
+#define regDSCL1_MPC_SIZE 0x0e7e
+#define regDSCL1_MPC_SIZE_BASE_IDX 2
+#define regDSCL1_LB_DATA_FORMAT 0x0e7f
+#define regDSCL1_LB_DATA_FORMAT_BASE_IDX 2
+#define regDSCL1_LB_MEMORY_CTRL 0x0e80
+#define regDSCL1_LB_MEMORY_CTRL_BASE_IDX 2
+#define regDSCL1_LB_V_COUNTER 0x0e81
+#define regDSCL1_LB_V_COUNTER_BASE_IDX 2
+#define regDSCL1_DSCL_MEM_PWR_CTRL 0x0e82
+#define regDSCL1_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define regDSCL1_DSCL_MEM_PWR_STATUS 0x0e83
+#define regDSCL1_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define regDSCL1_OBUF_CONTROL 0x0e84
+#define regDSCL1_OBUF_CONTROL_BASE_IDX 2
+#define regDSCL1_OBUF_MEM_PWR_CTRL 0x0e85
+#define regDSCL1_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec
+// base address: 0x5ac
+#define regCM1_CM_CONTROL 0x0e8b
+#define regCM1_CM_CONTROL_BASE_IDX 2
+#define regCM1_CM_POST_CSC_CONTROL 0x0e8c
+#define regCM1_CM_POST_CSC_CONTROL_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C11_C12 0x0e8d
+#define regCM1_CM_POST_CSC_C11_C12_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C13_C14 0x0e8e
+#define regCM1_CM_POST_CSC_C13_C14_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C21_C22 0x0e8f
+#define regCM1_CM_POST_CSC_C21_C22_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C23_C24 0x0e90
+#define regCM1_CM_POST_CSC_C23_C24_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C31_C32 0x0e91
+#define regCM1_CM_POST_CSC_C31_C32_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C33_C34 0x0e92
+#define regCM1_CM_POST_CSC_C33_C34_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C11_C12 0x0e93
+#define regCM1_CM_POST_CSC_B_C11_C12_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C13_C14 0x0e94
+#define regCM1_CM_POST_CSC_B_C13_C14_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C21_C22 0x0e95
+#define regCM1_CM_POST_CSC_B_C21_C22_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C23_C24 0x0e96
+#define regCM1_CM_POST_CSC_B_C23_C24_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C31_C32 0x0e97
+#define regCM1_CM_POST_CSC_B_C31_C32_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C33_C34 0x0e98
+#define regCM1_CM_POST_CSC_B_C33_C34_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_CONTROL 0x0e99
+#define regCM1_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C11_C12 0x0e9a
+#define regCM1_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C13_C14 0x0e9b
+#define regCM1_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C21_C22 0x0e9c
+#define regCM1_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C23_C24 0x0e9d
+#define regCM1_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C31_C32 0x0e9e
+#define regCM1_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C33_C34 0x0e9f
+#define regCM1_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C11_C12 0x0ea0
+#define regCM1_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C13_C14 0x0ea1
+#define regCM1_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C21_C22 0x0ea2
+#define regCM1_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C23_C24 0x0ea3
+#define regCM1_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C31_C32 0x0ea4
+#define regCM1_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C33_C34 0x0ea5
+#define regCM1_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define regCM1_CM_BIAS_CR_R 0x0ea6
+#define regCM1_CM_BIAS_CR_R_BASE_IDX 2
+#define regCM1_CM_BIAS_Y_G_CB_B 0x0ea7
+#define regCM1_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_CONTROL 0x0ea8
+#define regCM1_CM_GAMCOR_CONTROL_BASE_IDX 2
+#define regCM1_CM_GAMCOR_LUT_INDEX 0x0ea9
+#define regCM1_CM_GAMCOR_LUT_INDEX_BASE_IDX 2
+#define regCM1_CM_GAMCOR_LUT_DATA 0x0eaa
+#define regCM1_CM_GAMCOR_LUT_DATA_BASE_IDX 2
+#define regCM1_CM_GAMCOR_LUT_CONTROL 0x0eab
+#define regCM1_CM_GAMCOR_LUT_CONTROL_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_B 0x0eac
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_G 0x0ead
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_R 0x0eae
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B 0x0eaf
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G 0x0eb0
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R 0x0eb1
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B 0x0eb2
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G 0x0eb3
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R 0x0eb4
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_B 0x0eb5
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_B 0x0eb6
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_G 0x0eb7
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_G 0x0eb8
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_R 0x0eb9
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_R 0x0eba
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_B 0x0ebb
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_G 0x0ebc
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_R 0x0ebd
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_0_1 0x0ebe
+#define regCM1_CM_GAMCOR_RAMA_REGION_0_1_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_2_3 0x0ebf
+#define regCM1_CM_GAMCOR_RAMA_REGION_2_3_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_4_5 0x0ec0
+#define regCM1_CM_GAMCOR_RAMA_REGION_4_5_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_6_7 0x0ec1
+#define regCM1_CM_GAMCOR_RAMA_REGION_6_7_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_8_9 0x0ec2
+#define regCM1_CM_GAMCOR_RAMA_REGION_8_9_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_10_11 0x0ec3
+#define regCM1_CM_GAMCOR_RAMA_REGION_10_11_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_12_13 0x0ec4
+#define regCM1_CM_GAMCOR_RAMA_REGION_12_13_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_14_15 0x0ec5
+#define regCM1_CM_GAMCOR_RAMA_REGION_14_15_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_16_17 0x0ec6
+#define regCM1_CM_GAMCOR_RAMA_REGION_16_17_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_18_19 0x0ec7
+#define regCM1_CM_GAMCOR_RAMA_REGION_18_19_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_20_21 0x0ec8
+#define regCM1_CM_GAMCOR_RAMA_REGION_20_21_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_22_23 0x0ec9
+#define regCM1_CM_GAMCOR_RAMA_REGION_22_23_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_24_25 0x0eca
+#define regCM1_CM_GAMCOR_RAMA_REGION_24_25_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_26_27 0x0ecb
+#define regCM1_CM_GAMCOR_RAMA_REGION_26_27_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_28_29 0x0ecc
+#define regCM1_CM_GAMCOR_RAMA_REGION_28_29_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_30_31 0x0ecd
+#define regCM1_CM_GAMCOR_RAMA_REGION_30_31_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_32_33 0x0ece
+#define regCM1_CM_GAMCOR_RAMA_REGION_32_33_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_B 0x0ecf
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_G 0x0ed0
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_R 0x0ed1
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B 0x0ed2
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G 0x0ed3
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R 0x0ed4
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B 0x0ed5
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G 0x0ed6
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R 0x0ed7
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_B 0x0ed8
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_B 0x0ed9
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_G 0x0eda
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_G 0x0edb
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_R 0x0edc
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_R 0x0edd
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_B 0x0ede
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_G 0x0edf
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_R 0x0ee0
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_0_1 0x0ee1
+#define regCM1_CM_GAMCOR_RAMB_REGION_0_1_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_2_3 0x0ee2
+#define regCM1_CM_GAMCOR_RAMB_REGION_2_3_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_4_5 0x0ee3
+#define regCM1_CM_GAMCOR_RAMB_REGION_4_5_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_6_7 0x0ee4
+#define regCM1_CM_GAMCOR_RAMB_REGION_6_7_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_8_9 0x0ee5
+#define regCM1_CM_GAMCOR_RAMB_REGION_8_9_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_10_11 0x0ee6
+#define regCM1_CM_GAMCOR_RAMB_REGION_10_11_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_12_13 0x0ee7
+#define regCM1_CM_GAMCOR_RAMB_REGION_12_13_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_14_15 0x0ee8
+#define regCM1_CM_GAMCOR_RAMB_REGION_14_15_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_16_17 0x0ee9
+#define regCM1_CM_GAMCOR_RAMB_REGION_16_17_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_18_19 0x0eea
+#define regCM1_CM_GAMCOR_RAMB_REGION_18_19_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_20_21 0x0eeb
+#define regCM1_CM_GAMCOR_RAMB_REGION_20_21_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_22_23 0x0eec
+#define regCM1_CM_GAMCOR_RAMB_REGION_22_23_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_24_25 0x0eed
+#define regCM1_CM_GAMCOR_RAMB_REGION_24_25_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_26_27 0x0eee
+#define regCM1_CM_GAMCOR_RAMB_REGION_26_27_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_28_29 0x0eef
+#define regCM1_CM_GAMCOR_RAMB_REGION_28_29_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_30_31 0x0ef0
+#define regCM1_CM_GAMCOR_RAMB_REGION_30_31_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_32_33 0x0ef1
+#define regCM1_CM_GAMCOR_RAMB_REGION_32_33_BASE_IDX 2
+#define regCM1_CM_HDR_MULT_COEF 0x0ef2
+#define regCM1_CM_HDR_MULT_COEF_BASE_IDX 2
+#define regCM1_CM_MEM_PWR_CTRL 0x0ef3
+#define regCM1_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define regCM1_CM_MEM_PWR_STATUS 0x0ef4
+#define regCM1_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define regCM1_CM_DEALPHA 0x0ef6
+#define regCM1_CM_DEALPHA_BASE_IDX 2
+#define regCM1_CM_COEF_FORMAT 0x0ef7
+#define regCM1_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8
+#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9
+#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x3e3c
+#define regDC_PERFMON12_PERFCOUNTER_CNTL 0x0f8f
+#define regDC_PERFMON12_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON12_PERFCOUNTER_CNTL2 0x0f90
+#define regDC_PERFMON12_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON12_PERFCOUNTER_STATE 0x0f91
+#define regDC_PERFMON12_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_CNTL 0x0f92
+#define regDC_PERFMON12_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_CNTL2 0x0f93
+#define regDC_PERFMON12_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_CVALUE_INT_MISC 0x0f94
+#define regDC_PERFMON12_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_CVALUE_LOW 0x0f95
+#define regDC_PERFMON12_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_HI 0x0f96
+#define regDC_PERFMON12_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_LOW 0x0f97
+#define regDC_PERFMON12_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
+// base address: 0xb58
+#define regDPP_TOP2_DPP_CONTROL 0x0f9b
+#define regDPP_TOP2_DPP_CONTROL_BASE_IDX 2
+#define regDPP_TOP2_DPP_SOFT_RESET 0x0f9c
+#define regDPP_TOP2_DPP_SOFT_RESET_BASE_IDX 2
+#define regDPP_TOP2_DPP_CRC_VAL_R_G 0x0f9d
+#define regDPP_TOP2_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define regDPP_TOP2_DPP_CRC_VAL_B_A 0x0f9e
+#define regDPP_TOP2_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define regDPP_TOP2_DPP_CRC_CTRL 0x0f9f
+#define regDPP_TOP2_DPP_CRC_CTRL_BASE_IDX 2
+#define regDPP_TOP2_HOST_READ_CONTROL 0x0fa0
+#define regDPP_TOP2_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec
+// base address: 0xb58
+#define regCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT 0x0fa5
+#define regCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define regCNVC_CFG2_FORMAT_CONTROL 0x0fa6
+#define regCNVC_CFG2_FORMAT_CONTROL_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_BIAS_R 0x0fa7
+#define regCNVC_CFG2_FCNV_FP_BIAS_R_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_BIAS_G 0x0fa8
+#define regCNVC_CFG2_FCNV_FP_BIAS_G_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_BIAS_B 0x0fa9
+#define regCNVC_CFG2_FCNV_FP_BIAS_B_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_SCALE_R 0x0faa
+#define regCNVC_CFG2_FCNV_FP_SCALE_R_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_SCALE_G 0x0fab
+#define regCNVC_CFG2_FCNV_FP_SCALE_G_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_SCALE_B 0x0fac
+#define regCNVC_CFG2_FCNV_FP_SCALE_B_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_CONTROL 0x0fad
+#define regCNVC_CFG2_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_ALPHA 0x0fae
+#define regCNVC_CFG2_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_RED 0x0faf
+#define regCNVC_CFG2_COLOR_KEYER_RED_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_GREEN 0x0fb0
+#define regCNVC_CFG2_COLOR_KEYER_GREEN_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_BLUE 0x0fb1
+#define regCNVC_CFG2_COLOR_KEYER_BLUE_BASE_IDX 2
+#define regCNVC_CFG2_ALPHA_2BIT_LUT 0x0fb3
+#define regCNVC_CFG2_ALPHA_2BIT_LUT_BASE_IDX 2
+#define regCNVC_CFG2_PRE_DEALPHA 0x0fb4
+#define regCNVC_CFG2_PRE_DEALPHA_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_MODE 0x0fb5
+#define regCNVC_CFG2_PRE_CSC_MODE_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C11_C12 0x0fb6
+#define regCNVC_CFG2_PRE_CSC_C11_C12_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C13_C14 0x0fb7
+#define regCNVC_CFG2_PRE_CSC_C13_C14_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C21_C22 0x0fb8
+#define regCNVC_CFG2_PRE_CSC_C21_C22_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C23_C24 0x0fb9
+#define regCNVC_CFG2_PRE_CSC_C23_C24_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C31_C32 0x0fba
+#define regCNVC_CFG2_PRE_CSC_C31_C32_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C33_C34 0x0fbb
+#define regCNVC_CFG2_PRE_CSC_C33_C34_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C11_C12 0x0fbc
+#define regCNVC_CFG2_PRE_CSC_B_C11_C12_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C13_C14 0x0fbd
+#define regCNVC_CFG2_PRE_CSC_B_C13_C14_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C21_C22 0x0fbe
+#define regCNVC_CFG2_PRE_CSC_B_C21_C22_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C23_C24 0x0fbf
+#define regCNVC_CFG2_PRE_CSC_B_C23_C24_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C31_C32 0x0fc0
+#define regCNVC_CFG2_PRE_CSC_B_C31_C32_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C33_C34 0x0fc1
+#define regCNVC_CFG2_PRE_CSC_B_C33_C34_BASE_IDX 2
+#define regCNVC_CFG2_CNVC_COEF_FORMAT 0x0fc2
+#define regCNVC_CFG2_CNVC_COEF_FORMAT_BASE_IDX 2
+#define regCNVC_CFG2_PRE_DEGAM 0x0fc3
+#define regCNVC_CFG2_PRE_DEGAM_BASE_IDX 2
+#define regCNVC_CFG2_PRE_REALPHA 0x0fc4
+#define regCNVC_CFG2_PRE_REALPHA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec
+// base address: 0xb58
+#define regCNVC_CUR2_CURSOR0_CONTROL 0x0fc7
+#define regCNVC_CUR2_CURSOR0_CONTROL_BASE_IDX 2
+#define regCNVC_CUR2_CURSOR0_COLOR0 0x0fc8
+#define regCNVC_CUR2_CURSOR0_COLOR0_BASE_IDX 2
+#define regCNVC_CUR2_CURSOR0_COLOR1 0x0fc9
+#define regCNVC_CUR2_CURSOR0_COLOR1_BASE_IDX 2
+#define regCNVC_CUR2_CURSOR0_FP_SCALE_BIAS 0x0fca
+#define regCNVC_CUR2_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec
+// base address: 0xb58
+#define regDSCL2_SCL_COEF_RAM_TAP_SELECT 0x0fcf
+#define regDSCL2_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define regDSCL2_SCL_COEF_RAM_TAP_DATA 0x0fd0
+#define regDSCL2_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define regDSCL2_SCL_MODE 0x0fd1
+#define regDSCL2_SCL_MODE_BASE_IDX 2
+#define regDSCL2_SCL_TAP_CONTROL 0x0fd2
+#define regDSCL2_SCL_TAP_CONTROL_BASE_IDX 2
+#define regDSCL2_DSCL_CONTROL 0x0fd3
+#define regDSCL2_DSCL_CONTROL_BASE_IDX 2
+#define regDSCL2_DSCL_2TAP_CONTROL 0x0fd4
+#define regDSCL2_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define regDSCL2_SCL_MANUAL_REPLICATE_CONTROL 0x0fd5
+#define regDSCL2_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define regDSCL2_SCL_HORZ_FILTER_SCALE_RATIO 0x0fd6
+#define regDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL2_SCL_HORZ_FILTER_INIT 0x0fd7
+#define regDSCL2_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define regDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0fd8
+#define regDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL2_SCL_HORZ_FILTER_INIT_C 0x0fd9
+#define regDSCL2_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_SCALE_RATIO 0x0fda
+#define regDSCL2_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_INIT 0x0fdb
+#define regDSCL2_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_INIT_BOT 0x0fdc
+#define regDSCL2_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C 0x0fdd
+#define regDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_INIT_C 0x0fde
+#define regDSCL2_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_INIT_BOT_C 0x0fdf
+#define regDSCL2_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define regDSCL2_SCL_BLACK_COLOR 0x0fe0
+#define regDSCL2_SCL_BLACK_COLOR_BASE_IDX 2
+#define regDSCL2_DSCL_UPDATE 0x0fe1
+#define regDSCL2_DSCL_UPDATE_BASE_IDX 2
+#define regDSCL2_DSCL_AUTOCAL 0x0fe2
+#define regDSCL2_DSCL_AUTOCAL_BASE_IDX 2
+#define regDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0fe3
+#define regDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define regDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0fe4
+#define regDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define regDSCL2_OTG_H_BLANK 0x0fe5
+#define regDSCL2_OTG_H_BLANK_BASE_IDX 2
+#define regDSCL2_OTG_V_BLANK 0x0fe6
+#define regDSCL2_OTG_V_BLANK_BASE_IDX 2
+#define regDSCL2_RECOUT_START 0x0fe7
+#define regDSCL2_RECOUT_START_BASE_IDX 2
+#define regDSCL2_RECOUT_SIZE 0x0fe8
+#define regDSCL2_RECOUT_SIZE_BASE_IDX 2
+#define regDSCL2_MPC_SIZE 0x0fe9
+#define regDSCL2_MPC_SIZE_BASE_IDX 2
+#define regDSCL2_LB_DATA_FORMAT 0x0fea
+#define regDSCL2_LB_DATA_FORMAT_BASE_IDX 2
+#define regDSCL2_LB_MEMORY_CTRL 0x0feb
+#define regDSCL2_LB_MEMORY_CTRL_BASE_IDX 2
+#define regDSCL2_LB_V_COUNTER 0x0fec
+#define regDSCL2_LB_V_COUNTER_BASE_IDX 2
+#define regDSCL2_DSCL_MEM_PWR_CTRL 0x0fed
+#define regDSCL2_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define regDSCL2_DSCL_MEM_PWR_STATUS 0x0fee
+#define regDSCL2_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define regDSCL2_OBUF_CONTROL 0x0fef
+#define regDSCL2_OBUF_CONTROL_BASE_IDX 2
+#define regDSCL2_OBUF_MEM_PWR_CTRL 0x0ff0
+#define regDSCL2_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec
+// base address: 0xb58
+#define regCM2_CM_CONTROL 0x0ff6
+#define regCM2_CM_CONTROL_BASE_IDX 2
+#define regCM2_CM_POST_CSC_CONTROL 0x0ff7
+#define regCM2_CM_POST_CSC_CONTROL_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C11_C12 0x0ff8
+#define regCM2_CM_POST_CSC_C11_C12_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C13_C14 0x0ff9
+#define regCM2_CM_POST_CSC_C13_C14_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C21_C22 0x0ffa
+#define regCM2_CM_POST_CSC_C21_C22_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C23_C24 0x0ffb
+#define regCM2_CM_POST_CSC_C23_C24_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C31_C32 0x0ffc
+#define regCM2_CM_POST_CSC_C31_C32_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C33_C34 0x0ffd
+#define regCM2_CM_POST_CSC_C33_C34_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C11_C12 0x0ffe
+#define regCM2_CM_POST_CSC_B_C11_C12_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C13_C14 0x0fff
+#define regCM2_CM_POST_CSC_B_C13_C14_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C21_C22 0x1000
+#define regCM2_CM_POST_CSC_B_C21_C22_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C23_C24 0x1001
+#define regCM2_CM_POST_CSC_B_C23_C24_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C31_C32 0x1002
+#define regCM2_CM_POST_CSC_B_C31_C32_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C33_C34 0x1003
+#define regCM2_CM_POST_CSC_B_C33_C34_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_CONTROL 0x1004
+#define regCM2_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C11_C12 0x1005
+#define regCM2_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C13_C14 0x1006
+#define regCM2_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C21_C22 0x1007
+#define regCM2_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C23_C24 0x1008
+#define regCM2_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C31_C32 0x1009
+#define regCM2_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C33_C34 0x100a
+#define regCM2_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C11_C12 0x100b
+#define regCM2_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C13_C14 0x100c
+#define regCM2_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C21_C22 0x100d
+#define regCM2_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C23_C24 0x100e
+#define regCM2_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C31_C32 0x100f
+#define regCM2_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C33_C34 0x1010
+#define regCM2_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define regCM2_CM_BIAS_CR_R 0x1011
+#define regCM2_CM_BIAS_CR_R_BASE_IDX 2
+#define regCM2_CM_BIAS_Y_G_CB_B 0x1012
+#define regCM2_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_CONTROL 0x1013
+#define regCM2_CM_GAMCOR_CONTROL_BASE_IDX 2
+#define regCM2_CM_GAMCOR_LUT_INDEX 0x1014
+#define regCM2_CM_GAMCOR_LUT_INDEX_BASE_IDX 2
+#define regCM2_CM_GAMCOR_LUT_DATA 0x1015
+#define regCM2_CM_GAMCOR_LUT_DATA_BASE_IDX 2
+#define regCM2_CM_GAMCOR_LUT_CONTROL 0x1016
+#define regCM2_CM_GAMCOR_LUT_CONTROL_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_B 0x1017
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_G 0x1018
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_R 0x1019
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B 0x101a
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G 0x101b
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R 0x101c
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B 0x101d
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G 0x101e
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R 0x101f
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_B 0x1020
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_B 0x1021
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_G 0x1022
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_G 0x1023
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_R 0x1024
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_R 0x1025
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_B 0x1026
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_G 0x1027
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_R 0x1028
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_0_1 0x1029
+#define regCM2_CM_GAMCOR_RAMA_REGION_0_1_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_2_3 0x102a
+#define regCM2_CM_GAMCOR_RAMA_REGION_2_3_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_4_5 0x102b
+#define regCM2_CM_GAMCOR_RAMA_REGION_4_5_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_6_7 0x102c
+#define regCM2_CM_GAMCOR_RAMA_REGION_6_7_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_8_9 0x102d
+#define regCM2_CM_GAMCOR_RAMA_REGION_8_9_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_10_11 0x102e
+#define regCM2_CM_GAMCOR_RAMA_REGION_10_11_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_12_13 0x102f
+#define regCM2_CM_GAMCOR_RAMA_REGION_12_13_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_14_15 0x1030
+#define regCM2_CM_GAMCOR_RAMA_REGION_14_15_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_16_17 0x1031
+#define regCM2_CM_GAMCOR_RAMA_REGION_16_17_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_18_19 0x1032
+#define regCM2_CM_GAMCOR_RAMA_REGION_18_19_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_20_21 0x1033
+#define regCM2_CM_GAMCOR_RAMA_REGION_20_21_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_22_23 0x1034
+#define regCM2_CM_GAMCOR_RAMA_REGION_22_23_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_24_25 0x1035
+#define regCM2_CM_GAMCOR_RAMA_REGION_24_25_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_26_27 0x1036
+#define regCM2_CM_GAMCOR_RAMA_REGION_26_27_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_28_29 0x1037
+#define regCM2_CM_GAMCOR_RAMA_REGION_28_29_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_30_31 0x1038
+#define regCM2_CM_GAMCOR_RAMA_REGION_30_31_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_32_33 0x1039
+#define regCM2_CM_GAMCOR_RAMA_REGION_32_33_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_B 0x103a
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_G 0x103b
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_R 0x103c
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B 0x103d
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G 0x103e
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R 0x103f
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B 0x1040
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G 0x1041
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R 0x1042
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_B 0x1043
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_B 0x1044
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_G 0x1045
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_G 0x1046
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_R 0x1047
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_R 0x1048
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_B 0x1049
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_G 0x104a
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_R 0x104b
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_0_1 0x104c
+#define regCM2_CM_GAMCOR_RAMB_REGION_0_1_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_2_3 0x104d
+#define regCM2_CM_GAMCOR_RAMB_REGION_2_3_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_4_5 0x104e
+#define regCM2_CM_GAMCOR_RAMB_REGION_4_5_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_6_7 0x104f
+#define regCM2_CM_GAMCOR_RAMB_REGION_6_7_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_8_9 0x1050
+#define regCM2_CM_GAMCOR_RAMB_REGION_8_9_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_10_11 0x1051
+#define regCM2_CM_GAMCOR_RAMB_REGION_10_11_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_12_13 0x1052
+#define regCM2_CM_GAMCOR_RAMB_REGION_12_13_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_14_15 0x1053
+#define regCM2_CM_GAMCOR_RAMB_REGION_14_15_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_16_17 0x1054
+#define regCM2_CM_GAMCOR_RAMB_REGION_16_17_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_18_19 0x1055
+#define regCM2_CM_GAMCOR_RAMB_REGION_18_19_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_20_21 0x1056
+#define regCM2_CM_GAMCOR_RAMB_REGION_20_21_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_22_23 0x1057
+#define regCM2_CM_GAMCOR_RAMB_REGION_22_23_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_24_25 0x1058
+#define regCM2_CM_GAMCOR_RAMB_REGION_24_25_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_26_27 0x1059
+#define regCM2_CM_GAMCOR_RAMB_REGION_26_27_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_28_29 0x105a
+#define regCM2_CM_GAMCOR_RAMB_REGION_28_29_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_30_31 0x105b
+#define regCM2_CM_GAMCOR_RAMB_REGION_30_31_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_32_33 0x105c
+#define regCM2_CM_GAMCOR_RAMB_REGION_32_33_BASE_IDX 2
+#define regCM2_CM_HDR_MULT_COEF 0x105d
+#define regCM2_CM_HDR_MULT_COEF_BASE_IDX 2
+#define regCM2_CM_MEM_PWR_CTRL 0x105e
+#define regCM2_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define regCM2_CM_MEM_PWR_STATUS 0x105f
+#define regCM2_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define regCM2_CM_DEALPHA 0x1061
+#define regCM2_CM_DEALPHA_BASE_IDX 2
+#define regCM2_CM_COEF_FORMAT 0x1062
+#define regCM2_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_INDEX 0x1063
+#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_DATA 0x1064
+#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x43e8
+#define regDC_PERFMON13_PERFCOUNTER_CNTL 0x10fa
+#define regDC_PERFMON13_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON13_PERFCOUNTER_CNTL2 0x10fb
+#define regDC_PERFMON13_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON13_PERFCOUNTER_STATE 0x10fc
+#define regDC_PERFMON13_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_CNTL 0x10fd
+#define regDC_PERFMON13_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_CNTL2 0x10fe
+#define regDC_PERFMON13_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_CVALUE_INT_MISC 0x10ff
+#define regDC_PERFMON13_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_CVALUE_LOW 0x1100
+#define regDC_PERFMON13_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_HI 0x1101
+#define regDC_PERFMON13_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_LOW 0x1102
+#define regDC_PERFMON13_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
+// base address: 0x1104
+#define regDPP_TOP3_DPP_CONTROL 0x1106
+#define regDPP_TOP3_DPP_CONTROL_BASE_IDX 2
+#define regDPP_TOP3_DPP_SOFT_RESET 0x1107
+#define regDPP_TOP3_DPP_SOFT_RESET_BASE_IDX 2
+#define regDPP_TOP3_DPP_CRC_VAL_R_G 0x1108
+#define regDPP_TOP3_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define regDPP_TOP3_DPP_CRC_VAL_B_A 0x1109
+#define regDPP_TOP3_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define regDPP_TOP3_DPP_CRC_CTRL 0x110a
+#define regDPP_TOP3_DPP_CRC_CTRL_BASE_IDX 2
+#define regDPP_TOP3_HOST_READ_CONTROL 0x110b
+#define regDPP_TOP3_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec
+// base address: 0x1104
+#define regCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT 0x1110
+#define regCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define regCNVC_CFG3_FORMAT_CONTROL 0x1111
+#define regCNVC_CFG3_FORMAT_CONTROL_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_BIAS_R 0x1112
+#define regCNVC_CFG3_FCNV_FP_BIAS_R_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_BIAS_G 0x1113
+#define regCNVC_CFG3_FCNV_FP_BIAS_G_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_BIAS_B 0x1114
+#define regCNVC_CFG3_FCNV_FP_BIAS_B_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_SCALE_R 0x1115
+#define regCNVC_CFG3_FCNV_FP_SCALE_R_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_SCALE_G 0x1116
+#define regCNVC_CFG3_FCNV_FP_SCALE_G_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_SCALE_B 0x1117
+#define regCNVC_CFG3_FCNV_FP_SCALE_B_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_CONTROL 0x1118
+#define regCNVC_CFG3_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_ALPHA 0x1119
+#define regCNVC_CFG3_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_RED 0x111a
+#define regCNVC_CFG3_COLOR_KEYER_RED_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_GREEN 0x111b
+#define regCNVC_CFG3_COLOR_KEYER_GREEN_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_BLUE 0x111c
+#define regCNVC_CFG3_COLOR_KEYER_BLUE_BASE_IDX 2
+#define regCNVC_CFG3_ALPHA_2BIT_LUT 0x111e
+#define regCNVC_CFG3_ALPHA_2BIT_LUT_BASE_IDX 2
+#define regCNVC_CFG3_PRE_DEALPHA 0x111f
+#define regCNVC_CFG3_PRE_DEALPHA_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_MODE 0x1120
+#define regCNVC_CFG3_PRE_CSC_MODE_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C11_C12 0x1121
+#define regCNVC_CFG3_PRE_CSC_C11_C12_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C13_C14 0x1122
+#define regCNVC_CFG3_PRE_CSC_C13_C14_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C21_C22 0x1123
+#define regCNVC_CFG3_PRE_CSC_C21_C22_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C23_C24 0x1124
+#define regCNVC_CFG3_PRE_CSC_C23_C24_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C31_C32 0x1125
+#define regCNVC_CFG3_PRE_CSC_C31_C32_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C33_C34 0x1126
+#define regCNVC_CFG3_PRE_CSC_C33_C34_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C11_C12 0x1127
+#define regCNVC_CFG3_PRE_CSC_B_C11_C12_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C13_C14 0x1128
+#define regCNVC_CFG3_PRE_CSC_B_C13_C14_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C21_C22 0x1129
+#define regCNVC_CFG3_PRE_CSC_B_C21_C22_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C23_C24 0x112a
+#define regCNVC_CFG3_PRE_CSC_B_C23_C24_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C31_C32 0x112b
+#define regCNVC_CFG3_PRE_CSC_B_C31_C32_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C33_C34 0x112c
+#define regCNVC_CFG3_PRE_CSC_B_C33_C34_BASE_IDX 2
+#define regCNVC_CFG3_CNVC_COEF_FORMAT 0x112d
+#define regCNVC_CFG3_CNVC_COEF_FORMAT_BASE_IDX 2
+#define regCNVC_CFG3_PRE_DEGAM 0x112e
+#define regCNVC_CFG3_PRE_DEGAM_BASE_IDX 2
+#define regCNVC_CFG3_PRE_REALPHA 0x112f
+#define regCNVC_CFG3_PRE_REALPHA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec
+// base address: 0x1104
+#define regCNVC_CUR3_CURSOR0_CONTROL 0x1132
+#define regCNVC_CUR3_CURSOR0_CONTROL_BASE_IDX 2
+#define regCNVC_CUR3_CURSOR0_COLOR0 0x1133
+#define regCNVC_CUR3_CURSOR0_COLOR0_BASE_IDX 2
+#define regCNVC_CUR3_CURSOR0_COLOR1 0x1134
+#define regCNVC_CUR3_CURSOR0_COLOR1_BASE_IDX 2
+#define regCNVC_CUR3_CURSOR0_FP_SCALE_BIAS 0x1135
+#define regCNVC_CUR3_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec
+// base address: 0x1104
+#define regDSCL3_SCL_COEF_RAM_TAP_SELECT 0x113a
+#define regDSCL3_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define regDSCL3_SCL_COEF_RAM_TAP_DATA 0x113b
+#define regDSCL3_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define regDSCL3_SCL_MODE 0x113c
+#define regDSCL3_SCL_MODE_BASE_IDX 2
+#define regDSCL3_SCL_TAP_CONTROL 0x113d
+#define regDSCL3_SCL_TAP_CONTROL_BASE_IDX 2
+#define regDSCL3_DSCL_CONTROL 0x113e
+#define regDSCL3_DSCL_CONTROL_BASE_IDX 2
+#define regDSCL3_DSCL_2TAP_CONTROL 0x113f
+#define regDSCL3_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define regDSCL3_SCL_MANUAL_REPLICATE_CONTROL 0x1140
+#define regDSCL3_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define regDSCL3_SCL_HORZ_FILTER_SCALE_RATIO 0x1141
+#define regDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL3_SCL_HORZ_FILTER_INIT 0x1142
+#define regDSCL3_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define regDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C 0x1143
+#define regDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL3_SCL_HORZ_FILTER_INIT_C 0x1144
+#define regDSCL3_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_SCALE_RATIO 0x1145
+#define regDSCL3_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_INIT 0x1146
+#define regDSCL3_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_INIT_BOT 0x1147
+#define regDSCL3_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C 0x1148
+#define regDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_INIT_C 0x1149
+#define regDSCL3_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_INIT_BOT_C 0x114a
+#define regDSCL3_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define regDSCL3_SCL_BLACK_COLOR 0x114b
+#define regDSCL3_SCL_BLACK_COLOR_BASE_IDX 2
+#define regDSCL3_DSCL_UPDATE 0x114c
+#define regDSCL3_DSCL_UPDATE_BASE_IDX 2
+#define regDSCL3_DSCL_AUTOCAL 0x114d
+#define regDSCL3_DSCL_AUTOCAL_BASE_IDX 2
+#define regDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x114e
+#define regDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define regDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x114f
+#define regDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define regDSCL3_OTG_H_BLANK 0x1150
+#define regDSCL3_OTG_H_BLANK_BASE_IDX 2
+#define regDSCL3_OTG_V_BLANK 0x1151
+#define regDSCL3_OTG_V_BLANK_BASE_IDX 2
+#define regDSCL3_RECOUT_START 0x1152
+#define regDSCL3_RECOUT_START_BASE_IDX 2
+#define regDSCL3_RECOUT_SIZE 0x1153
+#define regDSCL3_RECOUT_SIZE_BASE_IDX 2
+#define regDSCL3_MPC_SIZE 0x1154
+#define regDSCL3_MPC_SIZE_BASE_IDX 2
+#define regDSCL3_LB_DATA_FORMAT 0x1155
+#define regDSCL3_LB_DATA_FORMAT_BASE_IDX 2
+#define regDSCL3_LB_MEMORY_CTRL 0x1156
+#define regDSCL3_LB_MEMORY_CTRL_BASE_IDX 2
+#define regDSCL3_LB_V_COUNTER 0x1157
+#define regDSCL3_LB_V_COUNTER_BASE_IDX 2
+#define regDSCL3_DSCL_MEM_PWR_CTRL 0x1158
+#define regDSCL3_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define regDSCL3_DSCL_MEM_PWR_STATUS 0x1159
+#define regDSCL3_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define regDSCL3_OBUF_CONTROL 0x115a
+#define regDSCL3_OBUF_CONTROL_BASE_IDX 2
+#define regDSCL3_OBUF_MEM_PWR_CTRL 0x115b
+#define regDSCL3_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec
+// base address: 0x1104
+#define regCM3_CM_CONTROL 0x1161
+#define regCM3_CM_CONTROL_BASE_IDX 2
+#define regCM3_CM_POST_CSC_CONTROL 0x1162
+#define regCM3_CM_POST_CSC_CONTROL_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C11_C12 0x1163
+#define regCM3_CM_POST_CSC_C11_C12_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C13_C14 0x1164
+#define regCM3_CM_POST_CSC_C13_C14_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C21_C22 0x1165
+#define regCM3_CM_POST_CSC_C21_C22_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C23_C24 0x1166
+#define regCM3_CM_POST_CSC_C23_C24_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C31_C32 0x1167
+#define regCM3_CM_POST_CSC_C31_C32_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C33_C34 0x1168
+#define regCM3_CM_POST_CSC_C33_C34_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C11_C12 0x1169
+#define regCM3_CM_POST_CSC_B_C11_C12_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C13_C14 0x116a
+#define regCM3_CM_POST_CSC_B_C13_C14_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C21_C22 0x116b
+#define regCM3_CM_POST_CSC_B_C21_C22_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C23_C24 0x116c
+#define regCM3_CM_POST_CSC_B_C23_C24_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C31_C32 0x116d
+#define regCM3_CM_POST_CSC_B_C31_C32_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C33_C34 0x116e
+#define regCM3_CM_POST_CSC_B_C33_C34_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_CONTROL 0x116f
+#define regCM3_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C11_C12 0x1170
+#define regCM3_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C13_C14 0x1171
+#define regCM3_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C21_C22 0x1172
+#define regCM3_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C23_C24 0x1173
+#define regCM3_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C31_C32 0x1174
+#define regCM3_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C33_C34 0x1175
+#define regCM3_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C11_C12 0x1176
+#define regCM3_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C13_C14 0x1177
+#define regCM3_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C21_C22 0x1178
+#define regCM3_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C23_C24 0x1179
+#define regCM3_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C31_C32 0x117a
+#define regCM3_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C33_C34 0x117b
+#define regCM3_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define regCM3_CM_BIAS_CR_R 0x117c
+#define regCM3_CM_BIAS_CR_R_BASE_IDX 2
+#define regCM3_CM_BIAS_Y_G_CB_B 0x117d
+#define regCM3_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_CONTROL 0x117e
+#define regCM3_CM_GAMCOR_CONTROL_BASE_IDX 2
+#define regCM3_CM_GAMCOR_LUT_INDEX 0x117f
+#define regCM3_CM_GAMCOR_LUT_INDEX_BASE_IDX 2
+#define regCM3_CM_GAMCOR_LUT_DATA 0x1180
+#define regCM3_CM_GAMCOR_LUT_DATA_BASE_IDX 2
+#define regCM3_CM_GAMCOR_LUT_CONTROL 0x1181
+#define regCM3_CM_GAMCOR_LUT_CONTROL_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_B 0x1182
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_G 0x1183
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_R 0x1184
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B 0x1185
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G 0x1186
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R 0x1187
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B 0x1188
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G 0x1189
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R 0x118a
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_B 0x118b
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_B 0x118c
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_G 0x118d
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_G 0x118e
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_R 0x118f
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_R 0x1190
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_B 0x1191
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_G 0x1192
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_R 0x1193
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_0_1 0x1194
+#define regCM3_CM_GAMCOR_RAMA_REGION_0_1_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_2_3 0x1195
+#define regCM3_CM_GAMCOR_RAMA_REGION_2_3_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_4_5 0x1196
+#define regCM3_CM_GAMCOR_RAMA_REGION_4_5_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_6_7 0x1197
+#define regCM3_CM_GAMCOR_RAMA_REGION_6_7_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_8_9 0x1198
+#define regCM3_CM_GAMCOR_RAMA_REGION_8_9_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_10_11 0x1199
+#define regCM3_CM_GAMCOR_RAMA_REGION_10_11_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_12_13 0x119a
+#define regCM3_CM_GAMCOR_RAMA_REGION_12_13_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_14_15 0x119b
+#define regCM3_CM_GAMCOR_RAMA_REGION_14_15_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_16_17 0x119c
+#define regCM3_CM_GAMCOR_RAMA_REGION_16_17_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_18_19 0x119d
+#define regCM3_CM_GAMCOR_RAMA_REGION_18_19_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_20_21 0x119e
+#define regCM3_CM_GAMCOR_RAMA_REGION_20_21_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_22_23 0x119f
+#define regCM3_CM_GAMCOR_RAMA_REGION_22_23_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_24_25 0x11a0
+#define regCM3_CM_GAMCOR_RAMA_REGION_24_25_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_26_27 0x11a1
+#define regCM3_CM_GAMCOR_RAMA_REGION_26_27_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_28_29 0x11a2
+#define regCM3_CM_GAMCOR_RAMA_REGION_28_29_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_30_31 0x11a3
+#define regCM3_CM_GAMCOR_RAMA_REGION_30_31_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_32_33 0x11a4
+#define regCM3_CM_GAMCOR_RAMA_REGION_32_33_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_B 0x11a5
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_G 0x11a6
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_R 0x11a7
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B 0x11a8
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G 0x11a9
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R 0x11aa
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B 0x11ab
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G 0x11ac
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R 0x11ad
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_B 0x11ae
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_B 0x11af
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_G 0x11b0
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_G 0x11b1
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_R 0x11b2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_R 0x11b3
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_B 0x11b4
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_G 0x11b5
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_R 0x11b6
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_0_1 0x11b7
+#define regCM3_CM_GAMCOR_RAMB_REGION_0_1_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_2_3 0x11b8
+#define regCM3_CM_GAMCOR_RAMB_REGION_2_3_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_4_5 0x11b9
+#define regCM3_CM_GAMCOR_RAMB_REGION_4_5_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_6_7 0x11ba
+#define regCM3_CM_GAMCOR_RAMB_REGION_6_7_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_8_9 0x11bb
+#define regCM3_CM_GAMCOR_RAMB_REGION_8_9_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_10_11 0x11bc
+#define regCM3_CM_GAMCOR_RAMB_REGION_10_11_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_12_13 0x11bd
+#define regCM3_CM_GAMCOR_RAMB_REGION_12_13_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_14_15 0x11be
+#define regCM3_CM_GAMCOR_RAMB_REGION_14_15_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_16_17 0x11bf
+#define regCM3_CM_GAMCOR_RAMB_REGION_16_17_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_18_19 0x11c0
+#define regCM3_CM_GAMCOR_RAMB_REGION_18_19_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_20_21 0x11c1
+#define regCM3_CM_GAMCOR_RAMB_REGION_20_21_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_22_23 0x11c2
+#define regCM3_CM_GAMCOR_RAMB_REGION_22_23_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_24_25 0x11c3
+#define regCM3_CM_GAMCOR_RAMB_REGION_24_25_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_26_27 0x11c4
+#define regCM3_CM_GAMCOR_RAMB_REGION_26_27_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_28_29 0x11c5
+#define regCM3_CM_GAMCOR_RAMB_REGION_28_29_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_30_31 0x11c6
+#define regCM3_CM_GAMCOR_RAMB_REGION_30_31_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_32_33 0x11c7
+#define regCM3_CM_GAMCOR_RAMB_REGION_32_33_BASE_IDX 2
+#define regCM3_CM_HDR_MULT_COEF 0x11c8
+#define regCM3_CM_HDR_MULT_COEF_BASE_IDX 2
+#define regCM3_CM_MEM_PWR_CTRL 0x11c9
+#define regCM3_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define regCM3_CM_MEM_PWR_STATUS 0x11ca
+#define regCM3_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define regCM3_CM_DEALPHA 0x11cc
+#define regCM3_CM_DEALPHA_BASE_IDX 2
+#define regCM3_CM_COEF_FORMAT 0x11cd
+#define regCM3_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce
+#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_DATA 0x11cf
+#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x4994
+#define regDC_PERFMON14_PERFCOUNTER_CNTL 0x1265
+#define regDC_PERFMON14_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON14_PERFCOUNTER_CNTL2 0x1266
+#define regDC_PERFMON14_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON14_PERFCOUNTER_STATE 0x1267
+#define regDC_PERFMON14_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_CNTL 0x1268
+#define regDC_PERFMON14_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_CNTL2 0x1269
+#define regDC_PERFMON14_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_CVALUE_INT_MISC 0x126a
+#define regDC_PERFMON14_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_CVALUE_LOW 0x126b
+#define regDC_PERFMON14_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_HI 0x126c
+#define regDC_PERFMON14_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_LOW 0x126d
+#define regDC_PERFMON14_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt0_dispdec
+// base address: 0x0
+#define regFMT0_FMT_CLAMP_COMPONENT_R 0x183c
+#define regFMT0_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define regFMT0_FMT_CLAMP_COMPONENT_G 0x183d
+#define regFMT0_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define regFMT0_FMT_CLAMP_COMPONENT_B 0x183e
+#define regFMT0_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define regFMT0_FMT_DYNAMIC_EXP_CNTL 0x183f
+#define regFMT0_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define regFMT0_FMT_CONTROL 0x1840
+#define regFMT0_FMT_CONTROL_BASE_IDX 2
+#define regFMT0_FMT_BIT_DEPTH_CONTROL 0x1841
+#define regFMT0_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define regFMT0_FMT_DITHER_RAND_R_SEED 0x1842
+#define regFMT0_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define regFMT0_FMT_DITHER_RAND_G_SEED 0x1843
+#define regFMT0_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define regFMT0_FMT_DITHER_RAND_B_SEED 0x1844
+#define regFMT0_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define regFMT0_FMT_CLAMP_CNTL 0x1845
+#define regFMT0_FMT_CLAMP_CNTL_BASE_IDX 2
+#define regFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1846
+#define regFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define regFMT0_FMT_MAP420_MEMORY_CONTROL 0x1847
+#define regFMT0_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define regFMT0_FMT_422_CONTROL 0x1849
+#define regFMT0_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg0_dispdec
+// base address: 0x0
+#define regDPG0_DPG_CONTROL 0x1854
+#define regDPG0_DPG_CONTROL_BASE_IDX 2
+#define regDPG0_DPG_RAMP_CONTROL 0x1855
+#define regDPG0_DPG_RAMP_CONTROL_BASE_IDX 2
+#define regDPG0_DPG_DIMENSIONS 0x1856
+#define regDPG0_DPG_DIMENSIONS_BASE_IDX 2
+#define regDPG0_DPG_COLOUR_R_CR 0x1857
+#define regDPG0_DPG_COLOUR_R_CR_BASE_IDX 2
+#define regDPG0_DPG_COLOUR_G_Y 0x1858
+#define regDPG0_DPG_COLOUR_G_Y_BASE_IDX 2
+#define regDPG0_DPG_COLOUR_B_CB 0x1859
+#define regDPG0_DPG_COLOUR_B_CB_BASE_IDX 2
+#define regDPG0_DPG_OFFSET_SEGMENT 0x185a
+#define regDPG0_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define regDPG0_DPG_STATUS 0x185b
+#define regDPG0_DPG_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf0_dispdec
+// base address: 0x0
+#define regOPPBUF0_OPPBUF_CONTROL 0x1884
+#define regOPPBUF0_OPPBUF_CONTROL_BASE_IDX 2
+#define regOPPBUF0_OPPBUF_3D_PARAMETERS_0 0x1885
+#define regOPPBUF0_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define regOPPBUF0_OPPBUF_3D_PARAMETERS_1 0x1886
+#define regOPPBUF0_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define regOPPBUF0_OPPBUF_CONTROL1 0x1889
+#define regOPPBUF0_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe0_dispdec
+// base address: 0x0
+#define regOPP_PIPE0_OPP_PIPE_CONTROL 0x188c
+#define regOPP_PIPE0_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc0_dispdec
+// base address: 0x0
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL 0x1891
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK 0x1892
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0 0x1893
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1 0x1894
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2 0x1895
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt1_dispdec
+// base address: 0x168
+#define regFMT1_FMT_CLAMP_COMPONENT_R 0x1896
+#define regFMT1_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define regFMT1_FMT_CLAMP_COMPONENT_G 0x1897
+#define regFMT1_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define regFMT1_FMT_CLAMP_COMPONENT_B 0x1898
+#define regFMT1_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define regFMT1_FMT_DYNAMIC_EXP_CNTL 0x1899
+#define regFMT1_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define regFMT1_FMT_CONTROL 0x189a
+#define regFMT1_FMT_CONTROL_BASE_IDX 2
+#define regFMT1_FMT_BIT_DEPTH_CONTROL 0x189b
+#define regFMT1_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define regFMT1_FMT_DITHER_RAND_R_SEED 0x189c
+#define regFMT1_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define regFMT1_FMT_DITHER_RAND_G_SEED 0x189d
+#define regFMT1_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define regFMT1_FMT_DITHER_RAND_B_SEED 0x189e
+#define regFMT1_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define regFMT1_FMT_CLAMP_CNTL 0x189f
+#define regFMT1_FMT_CLAMP_CNTL_BASE_IDX 2
+#define regFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x18a0
+#define regFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define regFMT1_FMT_MAP420_MEMORY_CONTROL 0x18a1
+#define regFMT1_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define regFMT1_FMT_422_CONTROL 0x18a3
+#define regFMT1_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg1_dispdec
+// base address: 0x168
+#define regDPG1_DPG_CONTROL 0x18ae
+#define regDPG1_DPG_CONTROL_BASE_IDX 2
+#define regDPG1_DPG_RAMP_CONTROL 0x18af
+#define regDPG1_DPG_RAMP_CONTROL_BASE_IDX 2
+#define regDPG1_DPG_DIMENSIONS 0x18b0
+#define regDPG1_DPG_DIMENSIONS_BASE_IDX 2
+#define regDPG1_DPG_COLOUR_R_CR 0x18b1
+#define regDPG1_DPG_COLOUR_R_CR_BASE_IDX 2
+#define regDPG1_DPG_COLOUR_G_Y 0x18b2
+#define regDPG1_DPG_COLOUR_G_Y_BASE_IDX 2
+#define regDPG1_DPG_COLOUR_B_CB 0x18b3
+#define regDPG1_DPG_COLOUR_B_CB_BASE_IDX 2
+#define regDPG1_DPG_OFFSET_SEGMENT 0x18b4
+#define regDPG1_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define regDPG1_DPG_STATUS 0x18b5
+#define regDPG1_DPG_STATUS_BASE_IDX 2
+
+// addressBlock: dce_dc_opp_oppbuf1_dispdec
+// base address: 0x168
+#define regOPPBUF1_OPPBUF_CONTROL 0x18de
+#define regOPPBUF1_OPPBUF_CONTROL_BASE_IDX 2
+#define regOPPBUF1_OPPBUF_3D_PARAMETERS_0 0x18df
+#define regOPPBUF1_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define regOPPBUF1_OPPBUF_3D_PARAMETERS_1 0x18e0
+#define regOPPBUF1_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define regOPPBUF1_OPPBUF_CONTROL1 0x18e3
+#define regOPPBUF1_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe1_dispdec
+// base address: 0x168
+#define regOPP_PIPE1_OPP_PIPE_CONTROL 0x18e6
+#define regOPP_PIPE1_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc1_dispdec
+// base address: 0x168
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL 0x18eb
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK 0x18ec
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0 0x18ed
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1 0x18ee
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2 0x18ef
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt2_dispdec
+// base address: 0x2d0
+#define regFMT2_FMT_CLAMP_COMPONENT_R 0x18f0
+#define regFMT2_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define regFMT2_FMT_CLAMP_COMPONENT_G 0x18f1
+#define regFMT2_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define regFMT2_FMT_CLAMP_COMPONENT_B 0x18f2
+#define regFMT2_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define regFMT2_FMT_DYNAMIC_EXP_CNTL 0x18f3
+#define regFMT2_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define regFMT2_FMT_CONTROL 0x18f4
+#define regFMT2_FMT_CONTROL_BASE_IDX 2
+#define regFMT2_FMT_BIT_DEPTH_CONTROL 0x18f5
+#define regFMT2_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define regFMT2_FMT_DITHER_RAND_R_SEED 0x18f6
+#define regFMT2_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define regFMT2_FMT_DITHER_RAND_G_SEED 0x18f7
+#define regFMT2_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define regFMT2_FMT_DITHER_RAND_B_SEED 0x18f8
+#define regFMT2_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define regFMT2_FMT_CLAMP_CNTL 0x18f9
+#define regFMT2_FMT_CLAMP_CNTL_BASE_IDX 2
+#define regFMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x18fa
+#define regFMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define regFMT2_FMT_MAP420_MEMORY_CONTROL 0x18fb
+#define regFMT2_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define regFMT2_FMT_422_CONTROL 0x18fd
+#define regFMT2_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg2_dispdec
+// base address: 0x2d0
+#define regDPG2_DPG_CONTROL 0x1908
+#define regDPG2_DPG_CONTROL_BASE_IDX 2
+#define regDPG2_DPG_RAMP_CONTROL 0x1909
+#define regDPG2_DPG_RAMP_CONTROL_BASE_IDX 2
+#define regDPG2_DPG_DIMENSIONS 0x190a
+#define regDPG2_DPG_DIMENSIONS_BASE_IDX 2
+#define regDPG2_DPG_COLOUR_R_CR 0x190b
+#define regDPG2_DPG_COLOUR_R_CR_BASE_IDX 2
+#define regDPG2_DPG_COLOUR_G_Y 0x190c
+#define regDPG2_DPG_COLOUR_G_Y_BASE_IDX 2
+#define regDPG2_DPG_COLOUR_B_CB 0x190d
+#define regDPG2_DPG_COLOUR_B_CB_BASE_IDX 2
+#define regDPG2_DPG_OFFSET_SEGMENT 0x190e
+#define regDPG2_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define regDPG2_DPG_STATUS 0x190f
+#define regDPG2_DPG_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf2_dispdec
+// base address: 0x2d0
+#define regOPPBUF2_OPPBUF_CONTROL 0x1938
+#define regOPPBUF2_OPPBUF_CONTROL_BASE_IDX 2
+#define regOPPBUF2_OPPBUF_3D_PARAMETERS_0 0x1939
+#define regOPPBUF2_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define regOPPBUF2_OPPBUF_3D_PARAMETERS_1 0x193a
+#define regOPPBUF2_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define regOPPBUF2_OPPBUF_CONTROL1 0x193d
+#define regOPPBUF2_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe2_dispdec
+// base address: 0x2d0
+#define regOPP_PIPE2_OPP_PIPE_CONTROL 0x1940
+#define regOPP_PIPE2_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc2_dispdec
+// base address: 0x2d0
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL 0x1945
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_MASK 0x1946
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0 0x1947
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1 0x1948
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT2 0x1949
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt3_dispdec
+// base address: 0x438
+#define regFMT3_FMT_CLAMP_COMPONENT_R 0x194a
+#define regFMT3_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define regFMT3_FMT_CLAMP_COMPONENT_G 0x194b
+#define regFMT3_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define regFMT3_FMT_CLAMP_COMPONENT_B 0x194c
+#define regFMT3_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define regFMT3_FMT_DYNAMIC_EXP_CNTL 0x194d
+#define regFMT3_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define regFMT3_FMT_CONTROL 0x194e
+#define regFMT3_FMT_CONTROL_BASE_IDX 2
+#define regFMT3_FMT_BIT_DEPTH_CONTROL 0x194f
+#define regFMT3_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define regFMT3_FMT_DITHER_RAND_R_SEED 0x1950
+#define regFMT3_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define regFMT3_FMT_DITHER_RAND_G_SEED 0x1951
+#define regFMT3_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define regFMT3_FMT_DITHER_RAND_B_SEED 0x1952
+#define regFMT3_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define regFMT3_FMT_CLAMP_CNTL 0x1953
+#define regFMT3_FMT_CLAMP_CNTL_BASE_IDX 2
+#define regFMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1954
+#define regFMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define regFMT3_FMT_MAP420_MEMORY_CONTROL 0x1955
+#define regFMT3_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define regFMT3_FMT_422_CONTROL 0x1957
+#define regFMT3_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg3_dispdec
+// base address: 0x438
+#define regDPG3_DPG_CONTROL 0x1962
+#define regDPG3_DPG_CONTROL_BASE_IDX 2
+#define regDPG3_DPG_RAMP_CONTROL 0x1963
+#define regDPG3_DPG_RAMP_CONTROL_BASE_IDX 2
+#define regDPG3_DPG_DIMENSIONS 0x1964
+#define regDPG3_DPG_DIMENSIONS_BASE_IDX 2
+#define regDPG3_DPG_COLOUR_R_CR 0x1965
+#define regDPG3_DPG_COLOUR_R_CR_BASE_IDX 2
+#define regDPG3_DPG_COLOUR_G_Y 0x1966
+#define regDPG3_DPG_COLOUR_G_Y_BASE_IDX 2
+#define regDPG3_DPG_COLOUR_B_CB 0x1967
+#define regDPG3_DPG_COLOUR_B_CB_BASE_IDX 2
+#define regDPG3_DPG_OFFSET_SEGMENT 0x1968
+#define regDPG3_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define regDPG3_DPG_STATUS 0x1969
+#define regDPG3_DPG_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf3_dispdec
+// base address: 0x438
+#define regOPPBUF3_OPPBUF_CONTROL 0x1992
+#define regOPPBUF3_OPPBUF_CONTROL_BASE_IDX 2
+#define regOPPBUF3_OPPBUF_3D_PARAMETERS_0 0x1993
+#define regOPPBUF3_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define regOPPBUF3_OPPBUF_3D_PARAMETERS_1 0x1994
+#define regOPPBUF3_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define regOPPBUF3_OPPBUF_CONTROL1 0x1997
+#define regOPPBUF3_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe3_dispdec
+// base address: 0x438
+#define regOPP_PIPE3_OPP_PIPE_CONTROL 0x199a
+#define regOPP_PIPE3_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc3_dispdec
+// base address: 0x438
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL 0x199f
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_MASK 0x19a0
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0 0x19a1
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1 0x19a2
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT2 0x19a3
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_top_dispdec
+// base address: 0x0
+#define regOPP_TOP_CLK_CONTROL 0x1a5e
+#define regOPP_TOP_CLK_CONTROL_BASE_IDX 2
+#define regOPP_ABM_CONTROL 0x1a60
+#define regOPP_ABM_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm0_dispdec
+// base address: 0x0
+#define regDSCRM0_DSCRM_DSC_FORWARD_CONFIG 0x1a64
+#define regDSCRM0_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm1_dispdec
+// base address: 0x4
+#define regDSCRM1_DSCRM_DSC_FORWARD_CONFIG 0x1a65
+#define regDSCRM1_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm2_dispdec
+// base address: 0x8
+#define regDSCRM2_DSCRM_DSC_FORWARD_CONFIG 0x1a66
+#define regDSCRM2_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm3_dispdec
+// base address: 0xc
+#define regDSCRM3_DSCRM_DSC_FORWARD_CONFIG 0x1a67
+#define regDSCRM3_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x6af8
+#define regDC_PERFMON16_PERFCOUNTER_CNTL 0x1abe
+#define regDC_PERFMON16_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON16_PERFCOUNTER_CNTL2 0x1abf
+#define regDC_PERFMON16_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON16_PERFCOUNTER_STATE 0x1ac0
+#define regDC_PERFMON16_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_CNTL 0x1ac1
+#define regDC_PERFMON16_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_CNTL2 0x1ac2
+#define regDC_PERFMON16_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_CVALUE_INT_MISC 0x1ac3
+#define regDC_PERFMON16_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_CVALUE_LOW 0x1ac4
+#define regDC_PERFMON16_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_HI 0x1ac5
+#define regDC_PERFMON16_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_LOW 0x1ac6
+#define regDC_PERFMON16_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm0_dispdec
+// base address: 0x0
+#define regODM0_OPTC_INPUT_GLOBAL_CONTROL 0x1aca
+#define regODM0_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_DATA_SOURCE_SELECT 0x1acb
+#define regODM0_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define regODM0_OPTC_DATA_FORMAT_CONTROL 0x1acc
+#define regODM0_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_BYTES_PER_PIXEL 0x1acd
+#define regODM0_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define regODM0_OPTC_WIDTH_CONTROL 0x1ace
+#define regODM0_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_INPUT_CLOCK_CONTROL 0x1acf
+#define regODM0_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_MEMORY_CONFIG 0x1ad0
+#define regODM0_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define regODM0_OPTC_INPUT_SPARE_REGISTER 0x1ad1
+#define regODM0_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm1_dispdec
+// base address: 0x40
+#define regODM1_OPTC_INPUT_GLOBAL_CONTROL 0x1ada
+#define regODM1_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM1_OPTC_DATA_SOURCE_SELECT 0x1adb
+#define regODM1_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define regODM1_OPTC_DATA_FORMAT_CONTROL 0x1adc
+#define regODM1_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define regODM1_OPTC_BYTES_PER_PIXEL 0x1add
+#define regODM1_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define regODM1_OPTC_WIDTH_CONTROL 0x1ade
+#define regODM1_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define regODM1_OPTC_INPUT_CLOCK_CONTROL 0x1adf
+#define regODM1_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define regODM1_OPTC_MEMORY_CONFIG 0x1ae0
+#define regODM1_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define regODM1_OPTC_INPUT_SPARE_REGISTER 0x1ae1
+#define regODM1_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm2_dispdec
+// base address: 0x80
+#define regODM2_OPTC_INPUT_GLOBAL_CONTROL 0x1aea
+#define regODM2_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM2_OPTC_DATA_SOURCE_SELECT 0x1aeb
+#define regODM2_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define regODM2_OPTC_DATA_FORMAT_CONTROL 0x1aec
+#define regODM2_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define regODM2_OPTC_BYTES_PER_PIXEL 0x1aed
+#define regODM2_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define regODM2_OPTC_WIDTH_CONTROL 0x1aee
+#define regODM2_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define regODM2_OPTC_INPUT_CLOCK_CONTROL 0x1aef
+#define regODM2_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define regODM2_OPTC_MEMORY_CONFIG 0x1af0
+#define regODM2_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define regODM2_OPTC_INPUT_SPARE_REGISTER 0x1af1
+#define regODM2_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm3_dispdec
+// base address: 0xc0
+#define regODM3_OPTC_INPUT_GLOBAL_CONTROL 0x1afa
+#define regODM3_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM3_OPTC_DATA_SOURCE_SELECT 0x1afb
+#define regODM3_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define regODM3_OPTC_DATA_FORMAT_CONTROL 0x1afc
+#define regODM3_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define regODM3_OPTC_BYTES_PER_PIXEL 0x1afd
+#define regODM3_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define regODM3_OPTC_WIDTH_CONTROL 0x1afe
+#define regODM3_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define regODM3_OPTC_INPUT_CLOCK_CONTROL 0x1aff
+#define regODM3_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define regODM3_OPTC_MEMORY_CONFIG 0x1b00
+#define regODM3_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define regODM3_OPTC_INPUT_SPARE_REGISTER 0x1b01
+#define regODM3_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg0_dispdec
+// base address: 0x0
+#define regOTG0_OTG_H_TOTAL 0x1b2a
+#define regOTG0_OTG_H_TOTAL_BASE_IDX 2
+#define regOTG0_OTG_H_BLANK_START_END 0x1b2b
+#define regOTG0_OTG_H_BLANK_START_END_BASE_IDX 2
+#define regOTG0_OTG_H_SYNC_A 0x1b2c
+#define regOTG0_OTG_H_SYNC_A_BASE_IDX 2
+#define regOTG0_OTG_H_SYNC_A_CNTL 0x1b2d
+#define regOTG0_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG0_OTG_H_TIMING_CNTL 0x1b2e
+#define regOTG0_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL 0x1b2f
+#define regOTG0_OTG_V_TOTAL_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_MIN 0x1b30
+#define regOTG0_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_MAX 0x1b31
+#define regOTG0_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_MID 0x1b32
+#define regOTG0_OTG_V_TOTAL_MID_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_CONTROL 0x1b33
+#define regOTG0_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_V_COUNT_STOP_CONTROL 0x1b34
+#define regOTG0_OTG_V_COUNT_STOP_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_V_COUNT_STOP_CONTROL2 0x1b35
+#define regOTG0_OTG_V_COUNT_STOP_CONTROL2_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_INT_STATUS 0x1b36
+#define regOTG0_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define regOTG0_OTG_VSYNC_NOM_INT_STATUS 0x1b37
+#define regOTG0_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define regOTG0_OTG_V_BLANK_START_END 0x1b38
+#define regOTG0_OTG_V_BLANK_START_END_BASE_IDX 2
+#define regOTG0_OTG_V_SYNC_A 0x1b39
+#define regOTG0_OTG_V_SYNC_A_BASE_IDX 2
+#define regOTG0_OTG_V_SYNC_A_CNTL 0x1b3a
+#define regOTG0_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG0_OTG_TRIGA_CNTL 0x1b3b
+#define regOTG0_OTG_TRIGA_CNTL_BASE_IDX 2
+#define regOTG0_OTG_TRIGA_MANUAL_TRIG 0x1b3c
+#define regOTG0_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define regOTG0_OTG_TRIGB_CNTL 0x1b3d
+#define regOTG0_OTG_TRIGB_CNTL_BASE_IDX 2
+#define regOTG0_OTG_TRIGB_MANUAL_TRIG 0x1b3e
+#define regOTG0_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define regOTG0_OTG_FORCE_COUNT_NOW_CNTL 0x1b3f
+#define regOTG0_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define regOTG0_OTG_STEREO_FORCE_NEXT_EYE 0x1b41
+#define regOTG0_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define regOTG0_OTG_CONTROL 0x1b43
+#define regOTG0_OTG_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_DLPC_CONTROL 0x1b44
+#define regOTG0_OTG_DLPC_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_INTERLACE_CONTROL 0x1b45
+#define regOTG0_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_INTERLACE_STATUS 0x1b46
+#define regOTG0_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define regOTG0_OTG_PIXEL_DATA_READBACK0 0x1b47
+#define regOTG0_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define regOTG0_OTG_PIXEL_DATA_READBACK1 0x1b48
+#define regOTG0_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define regOTG0_OTG_STATUS 0x1b49
+#define regOTG0_OTG_STATUS_BASE_IDX 2
+#define regOTG0_OTG_STATUS_POSITION 0x1b4a
+#define regOTG0_OTG_STATUS_POSITION_BASE_IDX 2
+#define regOTG0_OTG_LONG_VBLANK_STATUS 0x1b4b
+#define regOTG0_OTG_LONG_VBLANK_STATUS_BASE_IDX 2
+#define regOTG0_OTG_NOM_VERT_POSITION 0x1b4c
+#define regOTG0_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define regOTG0_OTG_STATUS_FRAME_COUNT 0x1b4d
+#define regOTG0_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define regOTG0_OTG_STATUS_VF_COUNT 0x1b4e
+#define regOTG0_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define regOTG0_OTG_STATUS_HV_COUNT 0x1b4f
+#define regOTG0_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define regOTG0_OTG_COUNT_CONTROL 0x1b50
+#define regOTG0_OTG_COUNT_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_COUNT_RESET 0x1b51
+#define regOTG0_OTG_COUNT_RESET_BASE_IDX 2
+#define regOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1b52
+#define regOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define regOTG0_OTG_VERT_SYNC_CONTROL 0x1b53
+#define regOTG0_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_STEREO_STATUS 0x1b54
+#define regOTG0_OTG_STEREO_STATUS_BASE_IDX 2
+#define regOTG0_OTG_STEREO_CONTROL 0x1b55
+#define regOTG0_OTG_STEREO_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_SNAPSHOT_STATUS 0x1b56
+#define regOTG0_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define regOTG0_OTG_SNAPSHOT_CONTROL 0x1b57
+#define regOTG0_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_SNAPSHOT_POSITION 0x1b58
+#define regOTG0_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define regOTG0_OTG_SNAPSHOT_FRAME 0x1b59
+#define regOTG0_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define regOTG0_OTG_INTERRUPT_CONTROL 0x1b5a
+#define regOTG0_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_UPDATE_LOCK 0x1b5b
+#define regOTG0_OTG_UPDATE_LOCK_BASE_IDX 2
+#define regOTG0_OTG_DOUBLE_BUFFER_CONTROL 0x1b5c
+#define regOTG0_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_MASTER_EN 0x1b5d
+#define regOTG0_OTG_MASTER_EN_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT0_POSITION 0x1b5f
+#define regOTG0_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1b60
+#define regOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT1_POSITION 0x1b61
+#define regOTG0_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1b62
+#define regOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT2_POSITION 0x1b63
+#define regOTG0_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1b64
+#define regOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC_CNTL 0x1b65
+#define regOTG0_OTG_CRC_CNTL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWA_X_CONTROL 0x1b66
+#define regOTG0_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWA_Y_CONTROL 0x1b67
+#define regOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWB_X_CONTROL 0x1b68
+#define regOTG0_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWB_Y_CONTROL 0x1b69
+#define regOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_DATA_RG 0x1b6a
+#define regOTG0_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define regOTG0_OTG_CRC0_DATA_B 0x1b6b
+#define regOTG0_OTG_CRC0_DATA_B_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWA_X_CONTROL 0x1b6c
+#define regOTG0_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWA_Y_CONTROL 0x1b6d
+#define regOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWB_X_CONTROL 0x1b6e
+#define regOTG0_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWB_Y_CONTROL 0x1b6f
+#define regOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC1_DATA_RG 0x1b70
+#define regOTG0_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define regOTG0_OTG_CRC1_DATA_B 0x1b71
+#define regOTG0_OTG_CRC1_DATA_B_BASE_IDX 2
+#define regOTG0_OTG_CRC2_DATA_RG 0x1b72
+#define regOTG0_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define regOTG0_OTG_CRC2_DATA_B 0x1b73
+#define regOTG0_OTG_CRC2_DATA_B_BASE_IDX 2
+#define regOTG0_OTG_CRC3_DATA_RG 0x1b74
+#define regOTG0_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define regOTG0_OTG_CRC3_DATA_B 0x1b75
+#define regOTG0_OTG_CRC3_DATA_B_BASE_IDX 2
+#define regOTG0_OTG_CRC_SIG_RED_GREEN_MASK 0x1b76
+#define regOTG0_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define regOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1b77
+#define regOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK 0x1b78
+#define regOTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK 0x1b79
+#define regOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK 0x1b7a
+#define regOTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK 0x1b7b
+#define regOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK 0x1b7c
+#define regOTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK 0x1b7d
+#define regOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK 0x1b7e
+#define regOTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK 0x1b7f
+#define regOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_STATIC_SCREEN_CONTROL 0x1b80
+#define regOTG0_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_3D_STRUCTURE_CONTROL 0x1b81
+#define regOTG0_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_GSL_VSYNC_GAP 0x1b82
+#define regOTG0_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define regOTG0_OTG_MASTER_UPDATE_MODE 0x1b83
+#define regOTG0_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define regOTG0_OTG_CLOCK_CONTROL 0x1b84
+#define regOTG0_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_VSTARTUP_PARAM 0x1b85
+#define regOTG0_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define regOTG0_OTG_VUPDATE_PARAM 0x1b86
+#define regOTG0_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define regOTG0_OTG_VREADY_PARAM 0x1b87
+#define regOTG0_OTG_VREADY_PARAM_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_SYNC_STATUS 0x1b88
+#define regOTG0_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define regOTG0_OTG_MASTER_UPDATE_LOCK 0x1b89
+#define regOTG0_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define regOTG0_OTG_GSL_CONTROL 0x1b8a
+#define regOTG0_OTG_GSL_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_GSL_WINDOW_X 0x1b8b
+#define regOTG0_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define regOTG0_OTG_GSL_WINDOW_Y 0x1b8c
+#define regOTG0_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define regOTG0_OTG_VUPDATE_KEEPOUT 0x1b8d
+#define regOTG0_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL0 0x1b8e
+#define regOTG0_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL1 0x1b8f
+#define regOTG0_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL2 0x1b90
+#define regOTG0_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL3 0x1b91
+#define regOTG0_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL4 0x1b92
+#define regOTG0_OTG_GLOBAL_CONTROL4_BASE_IDX 2
+#define regOTG0_OTG_TRIG_MANUAL_CONTROL 0x1b93
+#define regOTG0_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_DRR_TIMING_INT_STATUS 0x1b95
+#define regOTG0_OTG_DRR_TIMING_INT_STATUS_BASE_IDX 2
+#define regOTG0_OTG_DRR_V_TOTAL_REACH_RANGE 0x1b96
+#define regOTG0_OTG_DRR_V_TOTAL_REACH_RANGE_BASE_IDX 2
+#define regOTG0_OTG_DRR_V_TOTAL_CHANGE 0x1b97
+#define regOTG0_OTG_DRR_V_TOTAL_CHANGE_BASE_IDX 2
+#define regOTG0_OTG_DRR_TRIGGER_WINDOW 0x1b98
+#define regOTG0_OTG_DRR_TRIGGER_WINDOW_BASE_IDX 2
+#define regOTG0_OTG_DRR_CONTROL 0x1b99
+#define regOTG0_OTG_DRR_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_DRR_CONTOL2 0x1b9a
+#define regOTG0_OTG_DRR_CONTOL2_BASE_IDX 2
+#define regOTG0_OTG_M_CONST_DTO0 0x1b9b
+#define regOTG0_OTG_M_CONST_DTO0_BASE_IDX 2
+#define regOTG0_OTG_M_CONST_DTO1 0x1b9c
+#define regOTG0_OTG_M_CONST_DTO1_BASE_IDX 2
+#define regOTG0_OTG_REQUEST_CONTROL 0x1b9d
+#define regOTG0_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_DSC_START_POSITION 0x1b9e
+#define regOTG0_OTG_DSC_START_POSITION_BASE_IDX 2
+#define regOTG0_OTG_PIPE_UPDATE_STATUS 0x1b9f
+#define regOTG0_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define regOTG0_OTG_SPARE_REGISTER 0x1ba0
+#define regOTG0_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg1_dispdec
+// base address: 0x200
+#define regOTG1_OTG_H_TOTAL 0x1baa
+#define regOTG1_OTG_H_TOTAL_BASE_IDX 2
+#define regOTG1_OTG_H_BLANK_START_END 0x1bab
+#define regOTG1_OTG_H_BLANK_START_END_BASE_IDX 2
+#define regOTG1_OTG_H_SYNC_A 0x1bac
+#define regOTG1_OTG_H_SYNC_A_BASE_IDX 2
+#define regOTG1_OTG_H_SYNC_A_CNTL 0x1bad
+#define regOTG1_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG1_OTG_H_TIMING_CNTL 0x1bae
+#define regOTG1_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL 0x1baf
+#define regOTG1_OTG_V_TOTAL_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_MIN 0x1bb0
+#define regOTG1_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_MAX 0x1bb1
+#define regOTG1_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_MID 0x1bb2
+#define regOTG1_OTG_V_TOTAL_MID_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_CONTROL 0x1bb3
+#define regOTG1_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_V_COUNT_STOP_CONTROL 0x1bb4
+#define regOTG1_OTG_V_COUNT_STOP_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_V_COUNT_STOP_CONTROL2 0x1bb5
+#define regOTG1_OTG_V_COUNT_STOP_CONTROL2_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_INT_STATUS 0x1bb6
+#define regOTG1_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define regOTG1_OTG_VSYNC_NOM_INT_STATUS 0x1bb7
+#define regOTG1_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define regOTG1_OTG_V_BLANK_START_END 0x1bb8
+#define regOTG1_OTG_V_BLANK_START_END_BASE_IDX 2
+#define regOTG1_OTG_V_SYNC_A 0x1bb9
+#define regOTG1_OTG_V_SYNC_A_BASE_IDX 2
+#define regOTG1_OTG_V_SYNC_A_CNTL 0x1bba
+#define regOTG1_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG1_OTG_TRIGA_CNTL 0x1bbb
+#define regOTG1_OTG_TRIGA_CNTL_BASE_IDX 2
+#define regOTG1_OTG_TRIGA_MANUAL_TRIG 0x1bbc
+#define regOTG1_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define regOTG1_OTG_TRIGB_CNTL 0x1bbd
+#define regOTG1_OTG_TRIGB_CNTL_BASE_IDX 2
+#define regOTG1_OTG_TRIGB_MANUAL_TRIG 0x1bbe
+#define regOTG1_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define regOTG1_OTG_FORCE_COUNT_NOW_CNTL 0x1bbf
+#define regOTG1_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define regOTG1_OTG_STEREO_FORCE_NEXT_EYE 0x1bc1
+#define regOTG1_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define regOTG1_OTG_CONTROL 0x1bc3
+#define regOTG1_OTG_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_DLPC_CONTROL 0x1bc4
+#define regOTG1_OTG_DLPC_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_INTERLACE_CONTROL 0x1bc5
+#define regOTG1_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_INTERLACE_STATUS 0x1bc6
+#define regOTG1_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define regOTG1_OTG_PIXEL_DATA_READBACK0 0x1bc7
+#define regOTG1_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define regOTG1_OTG_PIXEL_DATA_READBACK1 0x1bc8
+#define regOTG1_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define regOTG1_OTG_STATUS 0x1bc9
+#define regOTG1_OTG_STATUS_BASE_IDX 2
+#define regOTG1_OTG_STATUS_POSITION 0x1bca
+#define regOTG1_OTG_STATUS_POSITION_BASE_IDX 2
+#define regOTG1_OTG_LONG_VBLANK_STATUS 0x1bcb
+#define regOTG1_OTG_LONG_VBLANK_STATUS_BASE_IDX 2
+#define regOTG1_OTG_NOM_VERT_POSITION 0x1bcc
+#define regOTG1_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define regOTG1_OTG_STATUS_FRAME_COUNT 0x1bcd
+#define regOTG1_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define regOTG1_OTG_STATUS_VF_COUNT 0x1bce
+#define regOTG1_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define regOTG1_OTG_STATUS_HV_COUNT 0x1bcf
+#define regOTG1_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define regOTG1_OTG_COUNT_CONTROL 0x1bd0
+#define regOTG1_OTG_COUNT_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_COUNT_RESET 0x1bd1
+#define regOTG1_OTG_COUNT_RESET_BASE_IDX 2
+#define regOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1bd2
+#define regOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define regOTG1_OTG_VERT_SYNC_CONTROL 0x1bd3
+#define regOTG1_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_STEREO_STATUS 0x1bd4
+#define regOTG1_OTG_STEREO_STATUS_BASE_IDX 2
+#define regOTG1_OTG_STEREO_CONTROL 0x1bd5
+#define regOTG1_OTG_STEREO_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_SNAPSHOT_STATUS 0x1bd6
+#define regOTG1_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define regOTG1_OTG_SNAPSHOT_CONTROL 0x1bd7
+#define regOTG1_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_SNAPSHOT_POSITION 0x1bd8
+#define regOTG1_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define regOTG1_OTG_SNAPSHOT_FRAME 0x1bd9
+#define regOTG1_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define regOTG1_OTG_INTERRUPT_CONTROL 0x1bda
+#define regOTG1_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_UPDATE_LOCK 0x1bdb
+#define regOTG1_OTG_UPDATE_LOCK_BASE_IDX 2
+#define regOTG1_OTG_DOUBLE_BUFFER_CONTROL 0x1bdc
+#define regOTG1_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_MASTER_EN 0x1bdd
+#define regOTG1_OTG_MASTER_EN_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT0_POSITION 0x1bdf
+#define regOTG1_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1be0
+#define regOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT1_POSITION 0x1be1
+#define regOTG1_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1be2
+#define regOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT2_POSITION 0x1be3
+#define regOTG1_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1be4
+#define regOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC_CNTL 0x1be5
+#define regOTG1_OTG_CRC_CNTL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWA_X_CONTROL 0x1be6
+#define regOTG1_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWA_Y_CONTROL 0x1be7
+#define regOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWB_X_CONTROL 0x1be8
+#define regOTG1_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWB_Y_CONTROL 0x1be9
+#define regOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_DATA_RG 0x1bea
+#define regOTG1_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define regOTG1_OTG_CRC0_DATA_B 0x1beb
+#define regOTG1_OTG_CRC0_DATA_B_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWA_X_CONTROL 0x1bec
+#define regOTG1_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWA_Y_CONTROL 0x1bed
+#define regOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWB_X_CONTROL 0x1bee
+#define regOTG1_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWB_Y_CONTROL 0x1bef
+#define regOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC1_DATA_RG 0x1bf0
+#define regOTG1_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define regOTG1_OTG_CRC1_DATA_B 0x1bf1
+#define regOTG1_OTG_CRC1_DATA_B_BASE_IDX 2
+#define regOTG1_OTG_CRC2_DATA_RG 0x1bf2
+#define regOTG1_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define regOTG1_OTG_CRC2_DATA_B 0x1bf3
+#define regOTG1_OTG_CRC2_DATA_B_BASE_IDX 2
+#define regOTG1_OTG_CRC3_DATA_RG 0x1bf4
+#define regOTG1_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define regOTG1_OTG_CRC3_DATA_B 0x1bf5
+#define regOTG1_OTG_CRC3_DATA_B_BASE_IDX 2
+#define regOTG1_OTG_CRC_SIG_RED_GREEN_MASK 0x1bf6
+#define regOTG1_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define regOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1bf7
+#define regOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK 0x1bf8
+#define regOTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK 0x1bf9
+#define regOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK 0x1bfa
+#define regOTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK 0x1bfb
+#define regOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK 0x1bfc
+#define regOTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK 0x1bfd
+#define regOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK 0x1bfe
+#define regOTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK 0x1bff
+#define regOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_STATIC_SCREEN_CONTROL 0x1c00
+#define regOTG1_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_3D_STRUCTURE_CONTROL 0x1c01
+#define regOTG1_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_GSL_VSYNC_GAP 0x1c02
+#define regOTG1_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define regOTG1_OTG_MASTER_UPDATE_MODE 0x1c03
+#define regOTG1_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define regOTG1_OTG_CLOCK_CONTROL 0x1c04
+#define regOTG1_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_VSTARTUP_PARAM 0x1c05
+#define regOTG1_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define regOTG1_OTG_VUPDATE_PARAM 0x1c06
+#define regOTG1_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define regOTG1_OTG_VREADY_PARAM 0x1c07
+#define regOTG1_OTG_VREADY_PARAM_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_SYNC_STATUS 0x1c08
+#define regOTG1_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define regOTG1_OTG_MASTER_UPDATE_LOCK 0x1c09
+#define regOTG1_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define regOTG1_OTG_GSL_CONTROL 0x1c0a
+#define regOTG1_OTG_GSL_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_GSL_WINDOW_X 0x1c0b
+#define regOTG1_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define regOTG1_OTG_GSL_WINDOW_Y 0x1c0c
+#define regOTG1_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define regOTG1_OTG_VUPDATE_KEEPOUT 0x1c0d
+#define regOTG1_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL0 0x1c0e
+#define regOTG1_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL1 0x1c0f
+#define regOTG1_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL2 0x1c10
+#define regOTG1_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL3 0x1c11
+#define regOTG1_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL4 0x1c12
+#define regOTG1_OTG_GLOBAL_CONTROL4_BASE_IDX 2
+#define regOTG1_OTG_TRIG_MANUAL_CONTROL 0x1c13
+#define regOTG1_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_DRR_TIMING_INT_STATUS 0x1c15
+#define regOTG1_OTG_DRR_TIMING_INT_STATUS_BASE_IDX 2
+#define regOTG1_OTG_DRR_V_TOTAL_REACH_RANGE 0x1c16
+#define regOTG1_OTG_DRR_V_TOTAL_REACH_RANGE_BASE_IDX 2
+#define regOTG1_OTG_DRR_V_TOTAL_CHANGE 0x1c17
+#define regOTG1_OTG_DRR_V_TOTAL_CHANGE_BASE_IDX 2
+#define regOTG1_OTG_DRR_TRIGGER_WINDOW 0x1c18
+#define regOTG1_OTG_DRR_TRIGGER_WINDOW_BASE_IDX 2
+#define regOTG1_OTG_DRR_CONTROL 0x1c19
+#define regOTG1_OTG_DRR_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_DRR_CONTOL2 0x1c1a
+#define regOTG1_OTG_DRR_CONTOL2_BASE_IDX 2
+#define regOTG1_OTG_M_CONST_DTO0 0x1c1b
+#define regOTG1_OTG_M_CONST_DTO0_BASE_IDX 2
+#define regOTG1_OTG_M_CONST_DTO1 0x1c1c
+#define regOTG1_OTG_M_CONST_DTO1_BASE_IDX 2
+#define regOTG1_OTG_REQUEST_CONTROL 0x1c1d
+#define regOTG1_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_DSC_START_POSITION 0x1c1e
+#define regOTG1_OTG_DSC_START_POSITION_BASE_IDX 2
+#define regOTG1_OTG_PIPE_UPDATE_STATUS 0x1c1f
+#define regOTG1_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define regOTG1_OTG_SPARE_REGISTER 0x1c20
+#define regOTG1_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg2_dispdec
+// base address: 0x400
+#define regOTG2_OTG_H_TOTAL 0x1c2a
+#define regOTG2_OTG_H_TOTAL_BASE_IDX 2
+#define regOTG2_OTG_H_BLANK_START_END 0x1c2b
+#define regOTG2_OTG_H_BLANK_START_END_BASE_IDX 2
+#define regOTG2_OTG_H_SYNC_A 0x1c2c
+#define regOTG2_OTG_H_SYNC_A_BASE_IDX 2
+#define regOTG2_OTG_H_SYNC_A_CNTL 0x1c2d
+#define regOTG2_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG2_OTG_H_TIMING_CNTL 0x1c2e
+#define regOTG2_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL 0x1c2f
+#define regOTG2_OTG_V_TOTAL_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_MIN 0x1c30
+#define regOTG2_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_MAX 0x1c31
+#define regOTG2_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_MID 0x1c32
+#define regOTG2_OTG_V_TOTAL_MID_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_CONTROL 0x1c33
+#define regOTG2_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_V_COUNT_STOP_CONTROL 0x1c34
+#define regOTG2_OTG_V_COUNT_STOP_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_V_COUNT_STOP_CONTROL2 0x1c35
+#define regOTG2_OTG_V_COUNT_STOP_CONTROL2_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_INT_STATUS 0x1c36
+#define regOTG2_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define regOTG2_OTG_VSYNC_NOM_INT_STATUS 0x1c37
+#define regOTG2_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define regOTG2_OTG_V_BLANK_START_END 0x1c38
+#define regOTG2_OTG_V_BLANK_START_END_BASE_IDX 2
+#define regOTG2_OTG_V_SYNC_A 0x1c39
+#define regOTG2_OTG_V_SYNC_A_BASE_IDX 2
+#define regOTG2_OTG_V_SYNC_A_CNTL 0x1c3a
+#define regOTG2_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG2_OTG_TRIGA_CNTL 0x1c3b
+#define regOTG2_OTG_TRIGA_CNTL_BASE_IDX 2
+#define regOTG2_OTG_TRIGA_MANUAL_TRIG 0x1c3c
+#define regOTG2_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define regOTG2_OTG_TRIGB_CNTL 0x1c3d
+#define regOTG2_OTG_TRIGB_CNTL_BASE_IDX 2
+#define regOTG2_OTG_TRIGB_MANUAL_TRIG 0x1c3e
+#define regOTG2_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define regOTG2_OTG_FORCE_COUNT_NOW_CNTL 0x1c3f
+#define regOTG2_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define regOTG2_OTG_STEREO_FORCE_NEXT_EYE 0x1c41
+#define regOTG2_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define regOTG2_OTG_CONTROL 0x1c43
+#define regOTG2_OTG_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_DLPC_CONTROL 0x1c44
+#define regOTG2_OTG_DLPC_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_INTERLACE_CONTROL 0x1c45
+#define regOTG2_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_INTERLACE_STATUS 0x1c46
+#define regOTG2_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define regOTG2_OTG_PIXEL_DATA_READBACK0 0x1c47
+#define regOTG2_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define regOTG2_OTG_PIXEL_DATA_READBACK1 0x1c48
+#define regOTG2_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define regOTG2_OTG_STATUS 0x1c49
+#define regOTG2_OTG_STATUS_BASE_IDX 2
+#define regOTG2_OTG_STATUS_POSITION 0x1c4a
+#define regOTG2_OTG_STATUS_POSITION_BASE_IDX 2
+#define regOTG2_OTG_LONG_VBLANK_STATUS 0x1c4b
+#define regOTG2_OTG_LONG_VBLANK_STATUS_BASE_IDX 2
+#define regOTG2_OTG_NOM_VERT_POSITION 0x1c4c
+#define regOTG2_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define regOTG2_OTG_STATUS_FRAME_COUNT 0x1c4d
+#define regOTG2_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define regOTG2_OTG_STATUS_VF_COUNT 0x1c4e
+#define regOTG2_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define regOTG2_OTG_STATUS_HV_COUNT 0x1c4f
+#define regOTG2_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define regOTG2_OTG_COUNT_CONTROL 0x1c50
+#define regOTG2_OTG_COUNT_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_COUNT_RESET 0x1c51
+#define regOTG2_OTG_COUNT_RESET_BASE_IDX 2
+#define regOTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1c52
+#define regOTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define regOTG2_OTG_VERT_SYNC_CONTROL 0x1c53
+#define regOTG2_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_STEREO_STATUS 0x1c54
+#define regOTG2_OTG_STEREO_STATUS_BASE_IDX 2
+#define regOTG2_OTG_STEREO_CONTROL 0x1c55
+#define regOTG2_OTG_STEREO_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_SNAPSHOT_STATUS 0x1c56
+#define regOTG2_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define regOTG2_OTG_SNAPSHOT_CONTROL 0x1c57
+#define regOTG2_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_SNAPSHOT_POSITION 0x1c58
+#define regOTG2_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define regOTG2_OTG_SNAPSHOT_FRAME 0x1c59
+#define regOTG2_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define regOTG2_OTG_INTERRUPT_CONTROL 0x1c5a
+#define regOTG2_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_UPDATE_LOCK 0x1c5b
+#define regOTG2_OTG_UPDATE_LOCK_BASE_IDX 2
+#define regOTG2_OTG_DOUBLE_BUFFER_CONTROL 0x1c5c
+#define regOTG2_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_MASTER_EN 0x1c5d
+#define regOTG2_OTG_MASTER_EN_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT0_POSITION 0x1c5f
+#define regOTG2_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1c60
+#define regOTG2_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT1_POSITION 0x1c61
+#define regOTG2_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1c62
+#define regOTG2_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT2_POSITION 0x1c63
+#define regOTG2_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1c64
+#define regOTG2_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC_CNTL 0x1c65
+#define regOTG2_OTG_CRC_CNTL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWA_X_CONTROL 0x1c66
+#define regOTG2_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWA_Y_CONTROL 0x1c67
+#define regOTG2_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWB_X_CONTROL 0x1c68
+#define regOTG2_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWB_Y_CONTROL 0x1c69
+#define regOTG2_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_DATA_RG 0x1c6a
+#define regOTG2_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define regOTG2_OTG_CRC0_DATA_B 0x1c6b
+#define regOTG2_OTG_CRC0_DATA_B_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWA_X_CONTROL 0x1c6c
+#define regOTG2_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWA_Y_CONTROL 0x1c6d
+#define regOTG2_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWB_X_CONTROL 0x1c6e
+#define regOTG2_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWB_Y_CONTROL 0x1c6f
+#define regOTG2_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC1_DATA_RG 0x1c70
+#define regOTG2_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define regOTG2_OTG_CRC1_DATA_B 0x1c71
+#define regOTG2_OTG_CRC1_DATA_B_BASE_IDX 2
+#define regOTG2_OTG_CRC2_DATA_RG 0x1c72
+#define regOTG2_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define regOTG2_OTG_CRC2_DATA_B 0x1c73
+#define regOTG2_OTG_CRC2_DATA_B_BASE_IDX 2
+#define regOTG2_OTG_CRC3_DATA_RG 0x1c74
+#define regOTG2_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define regOTG2_OTG_CRC3_DATA_B 0x1c75
+#define regOTG2_OTG_CRC3_DATA_B_BASE_IDX 2
+#define regOTG2_OTG_CRC_SIG_RED_GREEN_MASK 0x1c76
+#define regOTG2_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define regOTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1c77
+#define regOTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK 0x1c78
+#define regOTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK 0x1c79
+#define regOTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK 0x1c7a
+#define regOTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK 0x1c7b
+#define regOTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK 0x1c7c
+#define regOTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK 0x1c7d
+#define regOTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK 0x1c7e
+#define regOTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK 0x1c7f
+#define regOTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_STATIC_SCREEN_CONTROL 0x1c80
+#define regOTG2_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_3D_STRUCTURE_CONTROL 0x1c81
+#define regOTG2_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_GSL_VSYNC_GAP 0x1c82
+#define regOTG2_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define regOTG2_OTG_MASTER_UPDATE_MODE 0x1c83
+#define regOTG2_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define regOTG2_OTG_CLOCK_CONTROL 0x1c84
+#define regOTG2_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_VSTARTUP_PARAM 0x1c85
+#define regOTG2_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define regOTG2_OTG_VUPDATE_PARAM 0x1c86
+#define regOTG2_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define regOTG2_OTG_VREADY_PARAM 0x1c87
+#define regOTG2_OTG_VREADY_PARAM_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_SYNC_STATUS 0x1c88
+#define regOTG2_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define regOTG2_OTG_MASTER_UPDATE_LOCK 0x1c89
+#define regOTG2_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define regOTG2_OTG_GSL_CONTROL 0x1c8a
+#define regOTG2_OTG_GSL_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_GSL_WINDOW_X 0x1c8b
+#define regOTG2_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define regOTG2_OTG_GSL_WINDOW_Y 0x1c8c
+#define regOTG2_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define regOTG2_OTG_VUPDATE_KEEPOUT 0x1c8d
+#define regOTG2_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL0 0x1c8e
+#define regOTG2_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL1 0x1c8f
+#define regOTG2_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL2 0x1c90
+#define regOTG2_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL3 0x1c91
+#define regOTG2_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL4 0x1c92
+#define regOTG2_OTG_GLOBAL_CONTROL4_BASE_IDX 2
+#define regOTG2_OTG_TRIG_MANUAL_CONTROL 0x1c93
+#define regOTG2_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_DRR_TIMING_INT_STATUS 0x1c95
+#define regOTG2_OTG_DRR_TIMING_INT_STATUS_BASE_IDX 2
+#define regOTG2_OTG_DRR_V_TOTAL_REACH_RANGE 0x1c96
+#define regOTG2_OTG_DRR_V_TOTAL_REACH_RANGE_BASE_IDX 2
+#define regOTG2_OTG_DRR_V_TOTAL_CHANGE 0x1c97
+#define regOTG2_OTG_DRR_V_TOTAL_CHANGE_BASE_IDX 2
+#define regOTG2_OTG_DRR_TRIGGER_WINDOW 0x1c98
+#define regOTG2_OTG_DRR_TRIGGER_WINDOW_BASE_IDX 2
+#define regOTG2_OTG_DRR_CONTROL 0x1c99
+#define regOTG2_OTG_DRR_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_DRR_CONTOL2 0x1c9a
+#define regOTG2_OTG_DRR_CONTOL2_BASE_IDX 2
+#define regOTG2_OTG_M_CONST_DTO0 0x1c9b
+#define regOTG2_OTG_M_CONST_DTO0_BASE_IDX 2
+#define regOTG2_OTG_M_CONST_DTO1 0x1c9c
+#define regOTG2_OTG_M_CONST_DTO1_BASE_IDX 2
+#define regOTG2_OTG_REQUEST_CONTROL 0x1c9d
+#define regOTG2_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_DSC_START_POSITION 0x1c9e
+#define regOTG2_OTG_DSC_START_POSITION_BASE_IDX 2
+#define regOTG2_OTG_PIPE_UPDATE_STATUS 0x1c9f
+#define regOTG2_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define regOTG2_OTG_SPARE_REGISTER 0x1ca0
+#define regOTG2_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg3_dispdec
+// base address: 0x600
+#define regOTG3_OTG_H_TOTAL 0x1caa
+#define regOTG3_OTG_H_TOTAL_BASE_IDX 2
+#define regOTG3_OTG_H_BLANK_START_END 0x1cab
+#define regOTG3_OTG_H_BLANK_START_END_BASE_IDX 2
+#define regOTG3_OTG_H_SYNC_A 0x1cac
+#define regOTG3_OTG_H_SYNC_A_BASE_IDX 2
+#define regOTG3_OTG_H_SYNC_A_CNTL 0x1cad
+#define regOTG3_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG3_OTG_H_TIMING_CNTL 0x1cae
+#define regOTG3_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL 0x1caf
+#define regOTG3_OTG_V_TOTAL_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_MIN 0x1cb0
+#define regOTG3_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_MAX 0x1cb1
+#define regOTG3_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_MID 0x1cb2
+#define regOTG3_OTG_V_TOTAL_MID_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_CONTROL 0x1cb3
+#define regOTG3_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_V_COUNT_STOP_CONTROL 0x1cb4
+#define regOTG3_OTG_V_COUNT_STOP_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_V_COUNT_STOP_CONTROL2 0x1cb5
+#define regOTG3_OTG_V_COUNT_STOP_CONTROL2_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_INT_STATUS 0x1cb6
+#define regOTG3_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define regOTG3_OTG_VSYNC_NOM_INT_STATUS 0x1cb7
+#define regOTG3_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define regOTG3_OTG_V_BLANK_START_END 0x1cb8
+#define regOTG3_OTG_V_BLANK_START_END_BASE_IDX 2
+#define regOTG3_OTG_V_SYNC_A 0x1cb9
+#define regOTG3_OTG_V_SYNC_A_BASE_IDX 2
+#define regOTG3_OTG_V_SYNC_A_CNTL 0x1cba
+#define regOTG3_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG3_OTG_TRIGA_CNTL 0x1cbb
+#define regOTG3_OTG_TRIGA_CNTL_BASE_IDX 2
+#define regOTG3_OTG_TRIGA_MANUAL_TRIG 0x1cbc
+#define regOTG3_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define regOTG3_OTG_TRIGB_CNTL 0x1cbd
+#define regOTG3_OTG_TRIGB_CNTL_BASE_IDX 2
+#define regOTG3_OTG_TRIGB_MANUAL_TRIG 0x1cbe
+#define regOTG3_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define regOTG3_OTG_FORCE_COUNT_NOW_CNTL 0x1cbf
+#define regOTG3_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define regOTG3_OTG_STEREO_FORCE_NEXT_EYE 0x1cc1
+#define regOTG3_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define regOTG3_OTG_CONTROL 0x1cc3
+#define regOTG3_OTG_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_DLPC_CONTROL 0x1cc4
+#define regOTG3_OTG_DLPC_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_INTERLACE_CONTROL 0x1cc5
+#define regOTG3_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_INTERLACE_STATUS 0x1cc6
+#define regOTG3_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define regOTG3_OTG_PIXEL_DATA_READBACK0 0x1cc7
+#define regOTG3_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define regOTG3_OTG_PIXEL_DATA_READBACK1 0x1cc8
+#define regOTG3_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define regOTG3_OTG_STATUS 0x1cc9
+#define regOTG3_OTG_STATUS_BASE_IDX 2
+#define regOTG3_OTG_STATUS_POSITION 0x1cca
+#define regOTG3_OTG_STATUS_POSITION_BASE_IDX 2
+#define regOTG3_OTG_LONG_VBLANK_STATUS 0x1ccb
+#define regOTG3_OTG_LONG_VBLANK_STATUS_BASE_IDX 2
+#define regOTG3_OTG_NOM_VERT_POSITION 0x1ccc
+#define regOTG3_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define regOTG3_OTG_STATUS_FRAME_COUNT 0x1ccd
+#define regOTG3_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define regOTG3_OTG_STATUS_VF_COUNT 0x1cce
+#define regOTG3_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define regOTG3_OTG_STATUS_HV_COUNT 0x1ccf
+#define regOTG3_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define regOTG3_OTG_COUNT_CONTROL 0x1cd0
+#define regOTG3_OTG_COUNT_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_COUNT_RESET 0x1cd1
+#define regOTG3_OTG_COUNT_RESET_BASE_IDX 2
+#define regOTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1cd2
+#define regOTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define regOTG3_OTG_VERT_SYNC_CONTROL 0x1cd3
+#define regOTG3_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_STEREO_STATUS 0x1cd4
+#define regOTG3_OTG_STEREO_STATUS_BASE_IDX 2
+#define regOTG3_OTG_STEREO_CONTROL 0x1cd5
+#define regOTG3_OTG_STEREO_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_SNAPSHOT_STATUS 0x1cd6
+#define regOTG3_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define regOTG3_OTG_SNAPSHOT_CONTROL 0x1cd7
+#define regOTG3_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_SNAPSHOT_POSITION 0x1cd8
+#define regOTG3_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define regOTG3_OTG_SNAPSHOT_FRAME 0x1cd9
+#define regOTG3_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define regOTG3_OTG_INTERRUPT_CONTROL 0x1cda
+#define regOTG3_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_UPDATE_LOCK 0x1cdb
+#define regOTG3_OTG_UPDATE_LOCK_BASE_IDX 2
+#define regOTG3_OTG_DOUBLE_BUFFER_CONTROL 0x1cdc
+#define regOTG3_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_MASTER_EN 0x1cdd
+#define regOTG3_OTG_MASTER_EN_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT0_POSITION 0x1cdf
+#define regOTG3_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1ce0
+#define regOTG3_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT1_POSITION 0x1ce1
+#define regOTG3_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1ce2
+#define regOTG3_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT2_POSITION 0x1ce3
+#define regOTG3_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1ce4
+#define regOTG3_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC_CNTL 0x1ce5
+#define regOTG3_OTG_CRC_CNTL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWA_X_CONTROL 0x1ce6
+#define regOTG3_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWA_Y_CONTROL 0x1ce7
+#define regOTG3_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWB_X_CONTROL 0x1ce8
+#define regOTG3_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWB_Y_CONTROL 0x1ce9
+#define regOTG3_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_DATA_RG 0x1cea
+#define regOTG3_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define regOTG3_OTG_CRC0_DATA_B 0x1ceb
+#define regOTG3_OTG_CRC0_DATA_B_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWA_X_CONTROL 0x1cec
+#define regOTG3_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWA_Y_CONTROL 0x1ced
+#define regOTG3_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWB_X_CONTROL 0x1cee
+#define regOTG3_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWB_Y_CONTROL 0x1cef
+#define regOTG3_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC1_DATA_RG 0x1cf0
+#define regOTG3_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define regOTG3_OTG_CRC1_DATA_B 0x1cf1
+#define regOTG3_OTG_CRC1_DATA_B_BASE_IDX 2
+#define regOTG3_OTG_CRC2_DATA_RG 0x1cf2
+#define regOTG3_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define regOTG3_OTG_CRC2_DATA_B 0x1cf3
+#define regOTG3_OTG_CRC2_DATA_B_BASE_IDX 2
+#define regOTG3_OTG_CRC3_DATA_RG 0x1cf4
+#define regOTG3_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define regOTG3_OTG_CRC3_DATA_B 0x1cf5
+#define regOTG3_OTG_CRC3_DATA_B_BASE_IDX 2
+#define regOTG3_OTG_CRC_SIG_RED_GREEN_MASK 0x1cf6
+#define regOTG3_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define regOTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1cf7
+#define regOTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK 0x1cf8
+#define regOTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK 0x1cf9
+#define regOTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK 0x1cfa
+#define regOTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK 0x1cfb
+#define regOTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK 0x1cfc
+#define regOTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK 0x1cfd
+#define regOTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK 0x1cfe
+#define regOTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK 0x1cff
+#define regOTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_STATIC_SCREEN_CONTROL 0x1d00
+#define regOTG3_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_3D_STRUCTURE_CONTROL 0x1d01
+#define regOTG3_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_GSL_VSYNC_GAP 0x1d02
+#define regOTG3_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define regOTG3_OTG_MASTER_UPDATE_MODE 0x1d03
+#define regOTG3_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define regOTG3_OTG_CLOCK_CONTROL 0x1d04
+#define regOTG3_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_VSTARTUP_PARAM 0x1d05
+#define regOTG3_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define regOTG3_OTG_VUPDATE_PARAM 0x1d06
+#define regOTG3_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define regOTG3_OTG_VREADY_PARAM 0x1d07
+#define regOTG3_OTG_VREADY_PARAM_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_SYNC_STATUS 0x1d08
+#define regOTG3_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define regOTG3_OTG_MASTER_UPDATE_LOCK 0x1d09
+#define regOTG3_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define regOTG3_OTG_GSL_CONTROL 0x1d0a
+#define regOTG3_OTG_GSL_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_GSL_WINDOW_X 0x1d0b
+#define regOTG3_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define regOTG3_OTG_GSL_WINDOW_Y 0x1d0c
+#define regOTG3_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define regOTG3_OTG_VUPDATE_KEEPOUT 0x1d0d
+#define regOTG3_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL0 0x1d0e
+#define regOTG3_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL1 0x1d0f
+#define regOTG3_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL2 0x1d10
+#define regOTG3_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL3 0x1d11
+#define regOTG3_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL4 0x1d12
+#define regOTG3_OTG_GLOBAL_CONTROL4_BASE_IDX 2
+#define regOTG3_OTG_TRIG_MANUAL_CONTROL 0x1d13
+#define regOTG3_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_DRR_TIMING_INT_STATUS 0x1d15
+#define regOTG3_OTG_DRR_TIMING_INT_STATUS_BASE_IDX 2
+#define regOTG3_OTG_DRR_V_TOTAL_REACH_RANGE 0x1d16
+#define regOTG3_OTG_DRR_V_TOTAL_REACH_RANGE_BASE_IDX 2
+#define regOTG3_OTG_DRR_V_TOTAL_CHANGE 0x1d17
+#define regOTG3_OTG_DRR_V_TOTAL_CHANGE_BASE_IDX 2
+#define regOTG3_OTG_DRR_TRIGGER_WINDOW 0x1d18
+#define regOTG3_OTG_DRR_TRIGGER_WINDOW_BASE_IDX 2
+#define regOTG3_OTG_DRR_CONTROL 0x1d19
+#define regOTG3_OTG_DRR_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_DRR_CONTOL2 0x1d1a
+#define regOTG3_OTG_DRR_CONTOL2_BASE_IDX 2
+#define regOTG3_OTG_M_CONST_DTO0 0x1d1b
+#define regOTG3_OTG_M_CONST_DTO0_BASE_IDX 2
+#define regOTG3_OTG_M_CONST_DTO1 0x1d1c
+#define regOTG3_OTG_M_CONST_DTO1_BASE_IDX 2
+#define regOTG3_OTG_REQUEST_CONTROL 0x1d1d
+#define regOTG3_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_DSC_START_POSITION 0x1d1e
+#define regOTG3_OTG_DSC_START_POSITION_BASE_IDX 2
+#define regOTG3_OTG_PIPE_UPDATE_STATUS 0x1d1f
+#define regOTG3_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define regOTG3_OTG_SPARE_REGISTER 0x1d20
+#define regOTG3_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_optc_misc_dispdec
+// base address: 0x0
+#define regGSL_SOURCE_SELECT 0x1e2b
+#define regGSL_SOURCE_SELECT_BASE_IDX 2
+#define regOPTC_DLPC_CONTROL 0x1e2c
+#define regOPTC_DLPC_CONTROL_BASE_IDX 2
+#define regOPTC_CLOCK_CONTROL 0x1e2d
+#define regOPTC_CLOCK_CONTROL_BASE_IDX 2
+#define regODM_MEM_PWR_CTRL 0x1e2e
+#define regODM_MEM_PWR_CTRL_BASE_IDX 2
+#define regODM_MEM_PWR_CTRL3 0x1e30
+#define regODM_MEM_PWR_CTRL3_BASE_IDX 2
+#define regODM_MEM_PWR_STATUS 0x1e31
+#define regODM_MEM_PWR_STATUS_BASE_IDX 2
+#define regOPTC_MISC_SPARE_REGISTER 0x1e32
+#define regOPTC_MISC_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_optc_dcperfmon_dc_perfmon_dispdec
+// base address: 0x79a8
+#define regDC_PERFMON17_PERFCOUNTER_CNTL 0x1e6a
+#define regDC_PERFMON17_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON17_PERFCOUNTER_CNTL2 0x1e6b
+#define regDC_PERFMON17_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON17_PERFCOUNTER_STATE 0x1e6c
+#define regDC_PERFMON17_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_CNTL 0x1e6d
+#define regDC_PERFMON17_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_CNTL2 0x1e6e
+#define regDC_PERFMON17_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_CVALUE_INT_MISC 0x1e6f
+#define regDC_PERFMON17_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_CVALUE_LOW 0x1e70
+#define regDC_PERFMON17_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_HI 0x1e71
+#define regDC_PERFMON17_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_LOW 0x1e72
+#define regDC_PERFMON17_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dout_i2c_dispdec
+// base address: 0x0
+#define regDC_I2C_CONTROL 0x1e98
+#define regDC_I2C_CONTROL_BASE_IDX 2
+#define regDC_I2C_ARBITRATION 0x1e99
+#define regDC_I2C_ARBITRATION_BASE_IDX 2
+#define regDC_I2C_INTERRUPT_CONTROL 0x1e9a
+#define regDC_I2C_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDC_I2C_SW_STATUS 0x1e9b
+#define regDC_I2C_SW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC1_HW_STATUS 0x1e9c
+#define regDC_I2C_DDC1_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC2_HW_STATUS 0x1e9d
+#define regDC_I2C_DDC2_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC3_HW_STATUS 0x1e9e
+#define regDC_I2C_DDC3_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC4_HW_STATUS 0x1e9f
+#define regDC_I2C_DDC4_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC5_HW_STATUS 0x1ea0
+#define regDC_I2C_DDC5_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC1_SPEED 0x1ea2
+#define regDC_I2C_DDC1_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC1_SETUP 0x1ea3
+#define regDC_I2C_DDC1_SETUP_BASE_IDX 2
+#define regDC_I2C_DDC2_SPEED 0x1ea4
+#define regDC_I2C_DDC2_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC2_SETUP 0x1ea5
+#define regDC_I2C_DDC2_SETUP_BASE_IDX 2
+#define regDC_I2C_DDC3_SPEED 0x1ea6
+#define regDC_I2C_DDC3_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC3_SETUP 0x1ea7
+#define regDC_I2C_DDC3_SETUP_BASE_IDX 2
+#define regDC_I2C_DDC4_SPEED 0x1ea8
+#define regDC_I2C_DDC4_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC4_SETUP 0x1ea9
+#define regDC_I2C_DDC4_SETUP_BASE_IDX 2
+#define regDC_I2C_DDC5_SPEED 0x1eaa
+#define regDC_I2C_DDC5_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC5_SETUP 0x1eab
+#define regDC_I2C_DDC5_SETUP_BASE_IDX 2
+#define regDC_I2C_TRANSACTION0 0x1eae
+#define regDC_I2C_TRANSACTION0_BASE_IDX 2
+#define regDC_I2C_TRANSACTION1 0x1eaf
+#define regDC_I2C_TRANSACTION1_BASE_IDX 2
+#define regDC_I2C_TRANSACTION2 0x1eb0
+#define regDC_I2C_TRANSACTION2_BASE_IDX 2
+#define regDC_I2C_TRANSACTION3 0x1eb1
+#define regDC_I2C_TRANSACTION3_BASE_IDX 2
+#define regDC_I2C_DATA 0x1eb2
+#define regDC_I2C_DATA_BASE_IDX 2
+#define regDC_I2C_EDID_DETECT_CTRL 0x1eb6
+#define regDC_I2C_EDID_DETECT_CTRL_BASE_IDX 2
+#define regDC_I2C_READ_REQUEST_INTERRUPT 0x1eb7
+#define regDC_I2C_READ_REQUEST_INTERRUPT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_misc_dispdec
+// base address: 0x0
+#define regDIO_DCN_STATUS 0x1ec3
+#define regDIO_DCN_STATUS_BASE_IDX 2
+#define regDIO_SCRATCH0 0x1eca
+#define regDIO_SCRATCH0_BASE_IDX 2
+#define regDIO_SCRATCH1 0x1ecb
+#define regDIO_SCRATCH1_BASE_IDX 2
+#define regDIO_SCRATCH2 0x1ecc
+#define regDIO_SCRATCH2_BASE_IDX 2
+#define regDIO_SCRATCH3 0x1ecd
+#define regDIO_SCRATCH3_BASE_IDX 2
+#define regDIO_SCRATCH4 0x1ece
+#define regDIO_SCRATCH4_BASE_IDX 2
+#define regDIO_SCRATCH5 0x1ecf
+#define regDIO_SCRATCH5_BASE_IDX 2
+#define regDIO_SCRATCH6 0x1ed0
+#define regDIO_SCRATCH6_BASE_IDX 2
+#define regDIO_SCRATCH7 0x1ed1
+#define regDIO_SCRATCH7_BASE_IDX 2
+#define regDIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS 0x1ed3
+#define regDIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS_BASE_IDX 2
+#define regDIO_MEM_PWR_STATUS 0x1edd
+#define regDIO_MEM_PWR_STATUS_BASE_IDX 2
+#define regDIO_MEM_PWR_CTRL 0x1ede
+#define regDIO_MEM_PWR_CTRL_BASE_IDX 2
+#define regDIO_MEM_PWR_CTRL2 0x1edf
+#define regDIO_MEM_PWR_CTRL2_BASE_IDX 2
+#define regDIO_CLK_CNTL 0x1ee0
+#define regDIO_CLK_CNTL_BASE_IDX 2
+#define regDIO_POWER_MANAGEMENT_CNTL 0x1ee4
+#define regDIO_POWER_MANAGEMENT_CNTL_BASE_IDX 2
+#define regDIO_HDMI_RXSTATUS_TIMER_CONTROL 0x1eff
+#define regDIO_HDMI_RXSTATUS_TIMER_CONTROL_BASE_IDX 2
+#define regDIO_PSP_INTERRUPT_STATUS 0x1f00
+#define regDIO_PSP_INTERRUPT_STATUS_BASE_IDX 2
+#define regDIO_PSP_INTERRUPT_CLEAR 0x1f01
+#define regDIO_PSP_INTERRUPT_CLEAR_BASE_IDX 2
+#define regDIO_STATUS 0x1f02
+#define regDIO_STATUS_BASE_IDX 2
+#define regDIO_LINKA_CNTL 0x1f04
+#define regDIO_LINKA_CNTL_BASE_IDX 2
+#define regDIO_LINKB_CNTL 0x1f05
+#define regDIO_LINKB_CNTL_BASE_IDX 2
+#define regDIO_LINKC_CNTL 0x1f06
+#define regDIO_LINKC_CNTL_BASE_IDX 2
+#define regDIO_LINKD_CNTL 0x1f07
+#define regDIO_LINKD_CNTL_BASE_IDX 2
+#define regDIO_LINKE_CNTL 0x1f08
+#define regDIO_LINKE_CNTL_BASE_IDX 2
+#define regDIO_LINKF_CNTL 0x1f09
+#define regDIO_LINKF_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd0_dispdec
+// base address: 0x0
+#define regHPD0_DC_HPD_INT_STATUS 0x1f14
+#define regHPD0_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD0_DC_HPD_INT_CONTROL 0x1f15
+#define regHPD0_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD0_DC_HPD_CONTROL 0x1f16
+#define regHPD0_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD0_DC_HPD_FAST_TRAIN_CNTL 0x1f17
+#define regHPD0_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD0_DC_HPD_TOGGLE_FILT_CNTL 0x1f18
+#define regHPD0_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd1_dispdec
+// base address: 0x20
+#define regHPD1_DC_HPD_INT_STATUS 0x1f1c
+#define regHPD1_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD1_DC_HPD_INT_CONTROL 0x1f1d
+#define regHPD1_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD1_DC_HPD_CONTROL 0x1f1e
+#define regHPD1_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD1_DC_HPD_FAST_TRAIN_CNTL 0x1f1f
+#define regHPD1_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD1_DC_HPD_TOGGLE_FILT_CNTL 0x1f20
+#define regHPD1_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd2_dispdec
+// base address: 0x40
+#define regHPD2_DC_HPD_INT_STATUS 0x1f24
+#define regHPD2_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD2_DC_HPD_INT_CONTROL 0x1f25
+#define regHPD2_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD2_DC_HPD_CONTROL 0x1f26
+#define regHPD2_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD2_DC_HPD_FAST_TRAIN_CNTL 0x1f27
+#define regHPD2_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD2_DC_HPD_TOGGLE_FILT_CNTL 0x1f28
+#define regHPD2_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd3_dispdec
+// base address: 0x60
+#define regHPD3_DC_HPD_INT_STATUS 0x1f2c
+#define regHPD3_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD3_DC_HPD_INT_CONTROL 0x1f2d
+#define regHPD3_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD3_DC_HPD_CONTROL 0x1f2e
+#define regHPD3_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD3_DC_HPD_FAST_TRAIN_CNTL 0x1f2f
+#define regHPD3_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD3_DC_HPD_TOGGLE_FILT_CNTL 0x1f30
+#define regHPD3_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd4_dispdec
+// base address: 0x80
+#define regHPD4_DC_HPD_INT_STATUS 0x1f34
+#define regHPD4_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD4_DC_HPD_INT_CONTROL 0x1f35
+#define regHPD4_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD4_DC_HPD_CONTROL 0x1f36
+#define regHPD4_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD4_DC_HPD_FAST_TRAIN_CNTL 0x1f37
+#define regHPD4_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD4_DC_HPD_TOGGLE_FILT_CNTL 0x1f38
+#define regHPD4_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dcperfmon_dc_perfmon_dispdec
+// base address: 0x7d10
+#define regDC_PERFMON18_PERFCOUNTER_CNTL 0x1f44
+#define regDC_PERFMON18_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON18_PERFCOUNTER_CNTL2 0x1f45
+#define regDC_PERFMON18_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON18_PERFCOUNTER_STATE 0x1f46
+#define regDC_PERFMON18_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_CNTL 0x1f47
+#define regDC_PERFMON18_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_CNTL2 0x1f48
+#define regDC_PERFMON18_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_CVALUE_INT_MISC 0x1f49
+#define regDC_PERFMON18_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_CVALUE_LOW 0x1f4a
+#define regDC_PERFMON18_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_HI 0x1f4b
+#define regDC_PERFMON18_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_LOW 0x1f4c
+#define regDC_PERFMON18_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux0_dispdec
+// base address: 0x0
+#define regDP_AUX0_AUX_CONTROL 0x1f50
+#define regDP_AUX0_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_SW_CONTROL 0x1f51
+#define regDP_AUX0_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_ARB_CONTROL 0x1f52
+#define regDP_AUX0_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_INTERRUPT_CONTROL 0x1f53
+#define regDP_AUX0_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_SW_STATUS 0x1f54
+#define regDP_AUX0_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_LS_STATUS 0x1f55
+#define regDP_AUX0_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_SW_DATA 0x1f56
+#define regDP_AUX0_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX0_AUX_LS_DATA 0x1f57
+#define regDP_AUX0_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_TX_REF_CONTROL 0x1f58
+#define regDP_AUX0_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_TX_CONTROL 0x1f59
+#define regDP_AUX0_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_RX_CONTROL0 0x1f5a
+#define regDP_AUX0_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_RX_CONTROL1 0x1f5b
+#define regDP_AUX0_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_TX_STATUS 0x1f5c
+#define regDP_AUX0_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_RX_STATUS 0x1f5d
+#define regDP_AUX0_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_GTC_SYNC_CONTROL 0x1f5e
+#define regDP_AUX0_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL 0x1f5f
+#define regDP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1f60
+#define regDP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_GTC_SYNC_STATUS 0x1f61
+#define regDP_AUX0_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_PHY_WAKE_CNTL 0x1f66
+#define regDP_AUX0_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux1_dispdec
+// base address: 0x70
+#define regDP_AUX1_AUX_CONTROL 0x1f6c
+#define regDP_AUX1_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_SW_CONTROL 0x1f6d
+#define regDP_AUX1_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_ARB_CONTROL 0x1f6e
+#define regDP_AUX1_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_INTERRUPT_CONTROL 0x1f6f
+#define regDP_AUX1_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_SW_STATUS 0x1f70
+#define regDP_AUX1_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_LS_STATUS 0x1f71
+#define regDP_AUX1_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_SW_DATA 0x1f72
+#define regDP_AUX1_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX1_AUX_LS_DATA 0x1f73
+#define regDP_AUX1_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_TX_REF_CONTROL 0x1f74
+#define regDP_AUX1_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_TX_CONTROL 0x1f75
+#define regDP_AUX1_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_RX_CONTROL0 0x1f76
+#define regDP_AUX1_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_RX_CONTROL1 0x1f77
+#define regDP_AUX1_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_TX_STATUS 0x1f78
+#define regDP_AUX1_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_RX_STATUS 0x1f79
+#define regDP_AUX1_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_GTC_SYNC_CONTROL 0x1f7a
+#define regDP_AUX1_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL 0x1f7b
+#define regDP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1f7c
+#define regDP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_GTC_SYNC_STATUS 0x1f7d
+#define regDP_AUX1_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_PHY_WAKE_CNTL 0x1f82
+#define regDP_AUX1_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux2_dispdec
+// base address: 0xe0
+#define regDP_AUX2_AUX_CONTROL 0x1f88
+#define regDP_AUX2_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_SW_CONTROL 0x1f89
+#define regDP_AUX2_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_ARB_CONTROL 0x1f8a
+#define regDP_AUX2_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_INTERRUPT_CONTROL 0x1f8b
+#define regDP_AUX2_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_SW_STATUS 0x1f8c
+#define regDP_AUX2_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_LS_STATUS 0x1f8d
+#define regDP_AUX2_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_SW_DATA 0x1f8e
+#define regDP_AUX2_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX2_AUX_LS_DATA 0x1f8f
+#define regDP_AUX2_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_TX_REF_CONTROL 0x1f90
+#define regDP_AUX2_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_TX_CONTROL 0x1f91
+#define regDP_AUX2_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_RX_CONTROL0 0x1f92
+#define regDP_AUX2_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_RX_CONTROL1 0x1f93
+#define regDP_AUX2_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_TX_STATUS 0x1f94
+#define regDP_AUX2_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_RX_STATUS 0x1f95
+#define regDP_AUX2_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_GTC_SYNC_CONTROL 0x1f96
+#define regDP_AUX2_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL 0x1f97
+#define regDP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1f98
+#define regDP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_GTC_SYNC_STATUS 0x1f99
+#define regDP_AUX2_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_PHY_WAKE_CNTL 0x1f9e
+#define regDP_AUX2_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux3_dispdec
+// base address: 0x150
+#define regDP_AUX3_AUX_CONTROL 0x1fa4
+#define regDP_AUX3_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_SW_CONTROL 0x1fa5
+#define regDP_AUX3_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_ARB_CONTROL 0x1fa6
+#define regDP_AUX3_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_INTERRUPT_CONTROL 0x1fa7
+#define regDP_AUX3_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_SW_STATUS 0x1fa8
+#define regDP_AUX3_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_LS_STATUS 0x1fa9
+#define regDP_AUX3_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_SW_DATA 0x1faa
+#define regDP_AUX3_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX3_AUX_LS_DATA 0x1fab
+#define regDP_AUX3_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_TX_REF_CONTROL 0x1fac
+#define regDP_AUX3_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_TX_CONTROL 0x1fad
+#define regDP_AUX3_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_RX_CONTROL0 0x1fae
+#define regDP_AUX3_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_RX_CONTROL1 0x1faf
+#define regDP_AUX3_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_TX_STATUS 0x1fb0
+#define regDP_AUX3_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_RX_STATUS 0x1fb1
+#define regDP_AUX3_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_GTC_SYNC_CONTROL 0x1fb2
+#define regDP_AUX3_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL 0x1fb3
+#define regDP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1fb4
+#define regDP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_GTC_SYNC_STATUS 0x1fb5
+#define regDP_AUX3_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_PHY_WAKE_CNTL 0x1fba
+#define regDP_AUX3_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux4_dispdec
+// base address: 0x1c0
+#define regDP_AUX4_AUX_CONTROL 0x1fc0
+#define regDP_AUX4_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_SW_CONTROL 0x1fc1
+#define regDP_AUX4_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_ARB_CONTROL 0x1fc2
+#define regDP_AUX4_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_INTERRUPT_CONTROL 0x1fc3
+#define regDP_AUX4_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_SW_STATUS 0x1fc4
+#define regDP_AUX4_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_LS_STATUS 0x1fc5
+#define regDP_AUX4_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_SW_DATA 0x1fc6
+#define regDP_AUX4_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX4_AUX_LS_DATA 0x1fc7
+#define regDP_AUX4_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_TX_REF_CONTROL 0x1fc8
+#define regDP_AUX4_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_TX_CONTROL 0x1fc9
+#define regDP_AUX4_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_RX_CONTROL0 0x1fca
+#define regDP_AUX4_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_RX_CONTROL1 0x1fcb
+#define regDP_AUX4_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_TX_STATUS 0x1fcc
+#define regDP_AUX4_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_RX_STATUS 0x1fcd
+#define regDP_AUX4_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_GTC_SYNC_CONTROL 0x1fce
+#define regDP_AUX4_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL 0x1fcf
+#define regDP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1fd0
+#define regDP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_GTC_SYNC_STATUS 0x1fd1
+#define regDP_AUX4_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_PHY_WAKE_CNTL 0x1fd6
+#define regDP_AUX4_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_vpg_vpg_dispdec
+// base address: 0x154a0
+#define regVPG0_VPG_GENERIC_PACKET_ACCESS_CTRL 0x2068
+#define regVPG0_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG0_VPG_GENERIC_PACKET_DATA 0x2069
+#define regVPG0_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG0_VPG_GSP_FRAME_UPDATE_CTRL 0x206a
+#define regVPG0_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x206b
+#define regVPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG0_VPG_GENERIC_STATUS 0x206c
+#define regVPG0_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG0_VPG_MEM_PWR 0x206d
+#define regVPG0_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG0_VPG_ISRC1_2_ACCESS_CTRL 0x206e
+#define regVPG0_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG0_VPG_ISRC1_2_DATA 0x206f
+#define regVPG0_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG0_VPG_MPEG_INFO0 0x2070
+#define regVPG0_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG0_VPG_MPEG_INFO1 0x2071
+#define regVPG0_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_afmt_afmt_dispdec
+// base address: 0x154cc
+#define regAFMT0_AFMT_ACP 0x2073
+#define regAFMT0_AFMT_ACP_BASE_IDX 2
+#define regAFMT0_AFMT_VBI_PACKET_CONTROL 0x2074
+#define regAFMT0_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_PACKET_CONTROL2 0x2075
+#define regAFMT0_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_INFO0 0x2076
+#define regAFMT0_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_INFO1 0x2077
+#define regAFMT0_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT0_AFMT_60958_0 0x2078
+#define regAFMT0_AFMT_60958_0_BASE_IDX 2
+#define regAFMT0_AFMT_60958_1 0x2079
+#define regAFMT0_AFMT_60958_1_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_CRC_CONTROL 0x207a
+#define regAFMT0_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT0_AFMT_RAMP_CONTROL0 0x207b
+#define regAFMT0_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT0_AFMT_RAMP_CONTROL1 0x207c
+#define regAFMT0_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT0_AFMT_RAMP_CONTROL2 0x207d
+#define regAFMT0_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT0_AFMT_RAMP_CONTROL3 0x207e
+#define regAFMT0_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT0_AFMT_60958_2 0x207f
+#define regAFMT0_AFMT_60958_2_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_CRC_RESULT 0x2080
+#define regAFMT0_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT0_AFMT_STATUS 0x2081
+#define regAFMT0_AFMT_STATUS_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_PACKET_CONTROL 0x2082
+#define regAFMT0_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT0_AFMT_INFOFRAME_CONTROL0 0x2083
+#define regAFMT0_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT0_AFMT_INTERRUPT_STATUS 0x2084
+#define regAFMT0_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_SRC_CONTROL 0x2085
+#define regAFMT0_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT0_AFMT_MEM_PWR 0x2087
+#define regAFMT0_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_dme_dme_dispdec
+// base address: 0x15544
+#define regDME0_DME_CONTROL 0x2091
+#define regDME0_DME_CONTROL_BASE_IDX 2
+#define regDME0_DME_MEMORY_CONTROL 0x2092
+#define regDME0_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_dio_dig0_dispdec
+// base address: 0x0
+#define regDIG0_DIG_FE_CNTL 0x2093
+#define regDIG0_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG0_DIG_FE_CLK_CNTL 0x2094
+#define regDIG0_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG0_DIG_FE_EN_CNTL 0x2095
+#define regDIG0_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG0_DIG_OUTPUT_CRC_CNTL 0x2096
+#define regDIG0_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG0_DIG_OUTPUT_CRC_RESULT 0x2097
+#define regDIG0_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG0_DIG_CLOCK_PATTERN 0x2098
+#define regDIG0_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG0_DIG_TEST_PATTERN 0x2099
+#define regDIG0_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG0_DIG_RANDOM_PATTERN_SEED 0x209a
+#define regDIG0_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG0_DIG_FIFO_CTRL0 0x209b
+#define regDIG0_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG0_DIG_FIFO_CTRL1 0x209c
+#define regDIG0_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG0_HDMI_METADATA_PACKET_CONTROL 0x209d
+#define regDIG0_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_CONTROL 0x209e
+#define regDIG0_HDMI_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_STATUS 0x209f
+#define regDIG0_HDMI_STATUS_BASE_IDX 2
+#define regDIG0_HDMI_AUDIO_PACKET_CONTROL 0x20a0
+#define regDIG0_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_ACR_PACKET_CONTROL 0x20a1
+#define regDIG0_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_VBI_PACKET_CONTROL 0x20a2
+#define regDIG0_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_INFOFRAME_CONTROL0 0x20a3
+#define regDIG0_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG0_HDMI_INFOFRAME_CONTROL1 0x20a4
+#define regDIG0_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL0 0x20a5
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL6 0x20a6
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL5 0x20a7
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG0_HDMI_GC 0x20a8
+#define regDIG0_HDMI_GC_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL1 0x20a9
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL2 0x20aa
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL3 0x20ab
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL4 0x20ac
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL7 0x20ad
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL8 0x20ae
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL9 0x20af
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL10 0x20b0
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG0_HDMI_DB_CONTROL 0x20b1
+#define regDIG0_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_ACR_32_0 0x20b2
+#define regDIG0_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG0_HDMI_ACR_32_1 0x20b3
+#define regDIG0_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG0_HDMI_ACR_44_0 0x20b4
+#define regDIG0_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG0_HDMI_ACR_44_1 0x20b5
+#define regDIG0_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG0_HDMI_ACR_48_0 0x20b6
+#define regDIG0_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG0_HDMI_ACR_48_1 0x20b7
+#define regDIG0_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG0_HDMI_ACR_STATUS_0 0x20b8
+#define regDIG0_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG0_HDMI_ACR_STATUS_1 0x20b9
+#define regDIG0_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG0_AFMT_CNTL 0x20ba
+#define regDIG0_AFMT_CNTL_BASE_IDX 2
+#define regDIG0_DIG_BE_CLK_CNTL 0x20bb
+#define regDIG0_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG0_DIG_BE_CNTL 0x20bc
+#define regDIG0_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG0_DIG_BE_EN_CNTL 0x20bd
+#define regDIG0_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG0_TMDS_CNTL 0x20e4
+#define regDIG0_TMDS_CNTL_BASE_IDX 2
+#define regDIG0_TMDS_CONTROL_CHAR 0x20e5
+#define regDIG0_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG0_TMDS_CONTROL0_FEEDBACK 0x20e6
+#define regDIG0_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG0_TMDS_STEREOSYNC_CTL_SEL 0x20e7
+#define regDIG0_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG0_TMDS_SYNC_CHAR_PATTERN_0_1 0x20e8
+#define regDIG0_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG0_TMDS_SYNC_CHAR_PATTERN_2_3 0x20e9
+#define regDIG0_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG0_TMDS_CTL_BITS 0x20eb
+#define regDIG0_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG0_TMDS_DCBALANCER_CONTROL 0x20ec
+#define regDIG0_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG0_TMDS_SYNC_DCBALANCE_CHAR 0x20ed
+#define regDIG0_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG0_TMDS_CTL0_1_GEN_CNTL 0x20ee
+#define regDIG0_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG0_TMDS_CTL2_3_GEN_CNTL 0x20ef
+#define regDIG0_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG0_DIG_VERSION 0x20f1
+#define regDIG0_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp0_dispdec
+// base address: 0x0
+#define regDP0_DP_LINK_CNTL 0x211e
+#define regDP0_DP_LINK_CNTL_BASE_IDX 2
+#define regDP0_DP_PIXEL_FORMAT 0x211f
+#define regDP0_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP0_DP_MSA_COLORIMETRY 0x2120
+#define regDP0_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP0_DP_CONFIG 0x2121
+#define regDP0_DP_CONFIG_BASE_IDX 2
+#define regDP0_DP_VID_STREAM_CNTL 0x2122
+#define regDP0_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP0_DP_STEER_FIFO 0x2123
+#define regDP0_DP_STEER_FIFO_BASE_IDX 2
+#define regDP0_DP_MSA_MISC 0x2124
+#define regDP0_DP_MSA_MISC_BASE_IDX 2
+#define regDP0_DP_DPHY_INTERNAL_CTRL 0x2125
+#define regDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP0_DP_VID_TIMING 0x2126
+#define regDP0_DP_VID_TIMING_BASE_IDX 2
+#define regDP0_DP_VID_N 0x2127
+#define regDP0_DP_VID_N_BASE_IDX 2
+#define regDP0_DP_VID_M 0x2128
+#define regDP0_DP_VID_M_BASE_IDX 2
+#define regDP0_DP_LINK_FRAMING_CNTL 0x2129
+#define regDP0_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP0_DP_HBR2_EYE_PATTERN 0x212a
+#define regDP0_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP0_DP_VID_MSA_VBID 0x212b
+#define regDP0_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP0_DP_VID_INTERRUPT_CNTL 0x212c
+#define regDP0_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_CNTL 0x212d
+#define regDP0_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_TRAINING_PATTERN_SEL 0x212e
+#define regDP0_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP0_DP_DPHY_SYM0 0x212f
+#define regDP0_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP0_DP_DPHY_SYM1 0x2130
+#define regDP0_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP0_DP_DPHY_SYM2 0x2131
+#define regDP0_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP0_DP_DPHY_8B10B_CNTL 0x2132
+#define regDP0_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_PRBS_CNTL 0x2133
+#define regDP0_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_SCRAM_CNTL 0x2134
+#define regDP0_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_EN 0x2135
+#define regDP0_DP_DPHY_CRC_EN_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_CNTL 0x2136
+#define regDP0_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_RESULT 0x2137
+#define regDP0_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_MST_CNTL 0x2138
+#define regDP0_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_MST_STATUS 0x2139
+#define regDP0_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define regDP0_DP_DPHY_FAST_TRAINING 0x213a
+#define regDP0_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP0_DP_DPHY_FAST_TRAINING_STATUS 0x213b
+#define regDP0_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL 0x2141
+#define regDP0_DP_SEC_CNTL_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL1 0x2142
+#define regDP0_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP0_DP_SEC_FRAMING1 0x2143
+#define regDP0_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP0_DP_SEC_FRAMING2 0x2144
+#define regDP0_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP0_DP_SEC_FRAMING3 0x2145
+#define regDP0_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP0_DP_SEC_FRAMING4 0x2146
+#define regDP0_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP0_DP_SEC_AUD_N 0x2147
+#define regDP0_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP0_DP_SEC_AUD_N_READBACK 0x2148
+#define regDP0_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP0_DP_SEC_AUD_M 0x2149
+#define regDP0_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP0_DP_SEC_AUD_M_READBACK 0x214a
+#define regDP0_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP0_DP_SEC_TIMESTAMP 0x214b
+#define regDP0_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP0_DP_SEC_PACKET_CNTL 0x214c
+#define regDP0_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP0_DP_MSE_RATE_CNTL 0x214d
+#define regDP0_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP0_DP_MSE_RATE_UPDATE 0x214f
+#define regDP0_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP0_DP_MSE_SAT0 0x2150
+#define regDP0_DP_MSE_SAT0_BASE_IDX 2
+#define regDP0_DP_MSE_SAT1 0x2151
+#define regDP0_DP_MSE_SAT1_BASE_IDX 2
+#define regDP0_DP_MSE_SAT2 0x2152
+#define regDP0_DP_MSE_SAT2_BASE_IDX 2
+#define regDP0_DP_MSE_SAT_UPDATE 0x2153
+#define regDP0_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP0_DP_MSE_LINK_TIMING 0x2154
+#define regDP0_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP0_DP_MSE_MISC_CNTL 0x2155
+#define regDP0_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x215a
+#define regDP0_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_HBR2_PATTERN_CONTROL 0x215b
+#define regDP0_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP0_DP_MSE_SAT0_STATUS 0x215d
+#define regDP0_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP0_DP_MSE_SAT1_STATUS 0x215e
+#define regDP0_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP0_DP_MSE_SAT2_STATUS 0x215f
+#define regDP0_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP0_DP_DPIA_SPARE 0x2160
+#define regDP0_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP0_DP_MSA_TIMING_PARAM1 0x2162
+#define regDP0_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP0_DP_MSA_TIMING_PARAM2 0x2163
+#define regDP0_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP0_DP_MSA_TIMING_PARAM3 0x2164
+#define regDP0_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP0_DP_MSA_TIMING_PARAM4 0x2165
+#define regDP0_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP0_DP_MSO_CNTL 0x2166
+#define regDP0_DP_MSO_CNTL_BASE_IDX 2
+#define regDP0_DP_MSO_CNTL1 0x2167
+#define regDP0_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP0_DP_DSC_CNTL 0x2168
+#define regDP0_DP_DSC_CNTL_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL2 0x2169
+#define regDP0_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL3 0x216a
+#define regDP0_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL4 0x216b
+#define regDP0_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL5 0x216c
+#define regDP0_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL6 0x216d
+#define regDP0_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL7 0x216e
+#define regDP0_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP0_DP_DB_CNTL 0x216f
+#define regDP0_DP_DB_CNTL_BASE_IDX 2
+#define regDP0_DP_MSA_VBID_MISC 0x2170
+#define regDP0_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP0_DP_SEC_METADATA_TRANSMISSION 0x2171
+#define regDP0_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP0_DP_ALPM_CNTL 0x2173
+#define regDP0_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP8_CNTL 0x2174
+#define regDP0_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP9_CNTL 0x2175
+#define regDP0_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP10_CNTL 0x2176
+#define regDP0_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP11_CNTL 0x2177
+#define regDP0_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP_EN_DB_STATUS 0x2178
+#define regDP0_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL1 0x2179
+#define regDP0_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL2 0x217a
+#define regDP0_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL3 0x217b
+#define regDP0_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL4 0x217c
+#define regDP0_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL5 0x217d
+#define regDP0_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP0_DP_STREAM_SYMBOL_COUNT_STATUS 0x217e
+#define regDP0_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP0_DP_STREAM_SYMBOL_COUNT_CONTROL 0x217f
+#define regDP0_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP0_DP_LINK_SYMBOL_COUNT_STATUS0 0x2180
+#define regDP0_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP0_DP_LINK_SYMBOL_COUNT_STATUS1 0x2181
+#define regDP0_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP0_DP_LINK_SYMBOL_COUNT_CONTROL 0x2182
+#define regDP0_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_vpg_vpg_dispdec
+// base address: 0x15930
+#define regVPG1_VPG_GENERIC_PACKET_ACCESS_CTRL 0x218c
+#define regVPG1_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG1_VPG_GENERIC_PACKET_DATA 0x218d
+#define regVPG1_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG1_VPG_GSP_FRAME_UPDATE_CTRL 0x218e
+#define regVPG1_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x218f
+#define regVPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG1_VPG_GENERIC_STATUS 0x2190
+#define regVPG1_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG1_VPG_MEM_PWR 0x2191
+#define regVPG1_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG1_VPG_ISRC1_2_ACCESS_CTRL 0x2192
+#define regVPG1_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG1_VPG_ISRC1_2_DATA 0x2193
+#define regVPG1_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG1_VPG_MPEG_INFO0 0x2194
+#define regVPG1_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG1_VPG_MPEG_INFO1 0x2195
+#define regVPG1_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_afmt_afmt_dispdec
+// base address: 0x1595c
+#define regAFMT1_AFMT_ACP 0x2197
+#define regAFMT1_AFMT_ACP_BASE_IDX 2
+#define regAFMT1_AFMT_VBI_PACKET_CONTROL 0x2198
+#define regAFMT1_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_PACKET_CONTROL2 0x2199
+#define regAFMT1_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_INFO0 0x219a
+#define regAFMT1_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_INFO1 0x219b
+#define regAFMT1_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT1_AFMT_60958_0 0x219c
+#define regAFMT1_AFMT_60958_0_BASE_IDX 2
+#define regAFMT1_AFMT_60958_1 0x219d
+#define regAFMT1_AFMT_60958_1_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_CRC_CONTROL 0x219e
+#define regAFMT1_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT1_AFMT_RAMP_CONTROL0 0x219f
+#define regAFMT1_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT1_AFMT_RAMP_CONTROL1 0x21a0
+#define regAFMT1_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT1_AFMT_RAMP_CONTROL2 0x21a1
+#define regAFMT1_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT1_AFMT_RAMP_CONTROL3 0x21a2
+#define regAFMT1_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT1_AFMT_60958_2 0x21a3
+#define regAFMT1_AFMT_60958_2_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_CRC_RESULT 0x21a4
+#define regAFMT1_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT1_AFMT_STATUS 0x21a5
+#define regAFMT1_AFMT_STATUS_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_PACKET_CONTROL 0x21a6
+#define regAFMT1_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT1_AFMT_INFOFRAME_CONTROL0 0x21a7
+#define regAFMT1_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT1_AFMT_INTERRUPT_STATUS 0x21a8
+#define regAFMT1_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_SRC_CONTROL 0x21a9
+#define regAFMT1_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT1_AFMT_MEM_PWR 0x21ab
+#define regAFMT1_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_dme_dme_dispdec
+// base address: 0x159d4
+#define regDME1_DME_CONTROL 0x21b5
+#define regDME1_DME_CONTROL_BASE_IDX 2
+#define regDME1_DME_MEMORY_CONTROL 0x21b6
+#define regDME1_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_dispdec
+// base address: 0x490
+#define regDIG1_DIG_FE_CNTL 0x21b7
+#define regDIG1_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG1_DIG_FE_CLK_CNTL 0x21b8
+#define regDIG1_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG1_DIG_FE_EN_CNTL 0x21b9
+#define regDIG1_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG1_DIG_OUTPUT_CRC_CNTL 0x21ba
+#define regDIG1_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG1_DIG_OUTPUT_CRC_RESULT 0x21bb
+#define regDIG1_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG1_DIG_CLOCK_PATTERN 0x21bc
+#define regDIG1_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG1_DIG_TEST_PATTERN 0x21bd
+#define regDIG1_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG1_DIG_RANDOM_PATTERN_SEED 0x21be
+#define regDIG1_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG1_DIG_FIFO_CTRL0 0x21bf
+#define regDIG1_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG1_DIG_FIFO_CTRL1 0x21c0
+#define regDIG1_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG1_HDMI_METADATA_PACKET_CONTROL 0x21c1
+#define regDIG1_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_CONTROL 0x21c2
+#define regDIG1_HDMI_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_STATUS 0x21c3
+#define regDIG1_HDMI_STATUS_BASE_IDX 2
+#define regDIG1_HDMI_AUDIO_PACKET_CONTROL 0x21c4
+#define regDIG1_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_ACR_PACKET_CONTROL 0x21c5
+#define regDIG1_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_VBI_PACKET_CONTROL 0x21c6
+#define regDIG1_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_INFOFRAME_CONTROL0 0x21c7
+#define regDIG1_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG1_HDMI_INFOFRAME_CONTROL1 0x21c8
+#define regDIG1_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL0 0x21c9
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL6 0x21ca
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL5 0x21cb
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG1_HDMI_GC 0x21cc
+#define regDIG1_HDMI_GC_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL1 0x21cd
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL2 0x21ce
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL3 0x21cf
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL4 0x21d0
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL7 0x21d1
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL8 0x21d2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL9 0x21d3
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL10 0x21d4
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG1_HDMI_DB_CONTROL 0x21d5
+#define regDIG1_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_ACR_32_0 0x21d6
+#define regDIG1_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG1_HDMI_ACR_32_1 0x21d7
+#define regDIG1_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG1_HDMI_ACR_44_0 0x21d8
+#define regDIG1_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG1_HDMI_ACR_44_1 0x21d9
+#define regDIG1_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG1_HDMI_ACR_48_0 0x21da
+#define regDIG1_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG1_HDMI_ACR_48_1 0x21db
+#define regDIG1_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG1_HDMI_ACR_STATUS_0 0x21dc
+#define regDIG1_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG1_HDMI_ACR_STATUS_1 0x21dd
+#define regDIG1_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG1_AFMT_CNTL 0x21de
+#define regDIG1_AFMT_CNTL_BASE_IDX 2
+#define regDIG1_DIG_BE_CLK_CNTL 0x21df
+#define regDIG1_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG1_DIG_BE_CNTL 0x21e0
+#define regDIG1_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG1_DIG_BE_EN_CNTL 0x21e1
+#define regDIG1_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG1_TMDS_CNTL 0x2208
+#define regDIG1_TMDS_CNTL_BASE_IDX 2
+#define regDIG1_TMDS_CONTROL_CHAR 0x2209
+#define regDIG1_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG1_TMDS_CONTROL0_FEEDBACK 0x220a
+#define regDIG1_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG1_TMDS_STEREOSYNC_CTL_SEL 0x220b
+#define regDIG1_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG1_TMDS_SYNC_CHAR_PATTERN_0_1 0x220c
+#define regDIG1_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG1_TMDS_SYNC_CHAR_PATTERN_2_3 0x220d
+#define regDIG1_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG1_TMDS_CTL_BITS 0x220f
+#define regDIG1_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG1_TMDS_DCBALANCER_CONTROL 0x2210
+#define regDIG1_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG1_TMDS_SYNC_DCBALANCE_CHAR 0x2211
+#define regDIG1_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG1_TMDS_CTL0_1_GEN_CNTL 0x2212
+#define regDIG1_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG1_TMDS_CTL2_3_GEN_CNTL 0x2213
+#define regDIG1_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG1_DIG_VERSION 0x2215
+#define regDIG1_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp1_dispdec
+// base address: 0x490
+#define regDP1_DP_LINK_CNTL 0x2242
+#define regDP1_DP_LINK_CNTL_BASE_IDX 2
+#define regDP1_DP_PIXEL_FORMAT 0x2243
+#define regDP1_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP1_DP_MSA_COLORIMETRY 0x2244
+#define regDP1_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP1_DP_CONFIG 0x2245
+#define regDP1_DP_CONFIG_BASE_IDX 2
+#define regDP1_DP_VID_STREAM_CNTL 0x2246
+#define regDP1_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP1_DP_STEER_FIFO 0x2247
+#define regDP1_DP_STEER_FIFO_BASE_IDX 2
+#define regDP1_DP_MSA_MISC 0x2248
+#define regDP1_DP_MSA_MISC_BASE_IDX 2
+#define regDP1_DP_DPHY_INTERNAL_CTRL 0x2249
+#define regDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP1_DP_VID_TIMING 0x224a
+#define regDP1_DP_VID_TIMING_BASE_IDX 2
+#define regDP1_DP_VID_N 0x224b
+#define regDP1_DP_VID_N_BASE_IDX 2
+#define regDP1_DP_VID_M 0x224c
+#define regDP1_DP_VID_M_BASE_IDX 2
+#define regDP1_DP_LINK_FRAMING_CNTL 0x224d
+#define regDP1_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP1_DP_HBR2_EYE_PATTERN 0x224e
+#define regDP1_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP1_DP_VID_MSA_VBID 0x224f
+#define regDP1_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP1_DP_VID_INTERRUPT_CNTL 0x2250
+#define regDP1_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_CNTL 0x2251
+#define regDP1_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_TRAINING_PATTERN_SEL 0x2252
+#define regDP1_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP1_DP_DPHY_SYM0 0x2253
+#define regDP1_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP1_DP_DPHY_SYM1 0x2254
+#define regDP1_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP1_DP_DPHY_SYM2 0x2255
+#define regDP1_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP1_DP_DPHY_8B10B_CNTL 0x2256
+#define regDP1_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_PRBS_CNTL 0x2257
+#define regDP1_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_SCRAM_CNTL 0x2258
+#define regDP1_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_EN 0x2259
+#define regDP1_DP_DPHY_CRC_EN_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_CNTL 0x225a
+#define regDP1_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_RESULT 0x225b
+#define regDP1_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_MST_CNTL 0x225c
+#define regDP1_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_MST_STATUS 0x225d
+#define regDP1_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define regDP1_DP_DPHY_FAST_TRAINING 0x225e
+#define regDP1_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP1_DP_DPHY_FAST_TRAINING_STATUS 0x225f
+#define regDP1_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL 0x2265
+#define regDP1_DP_SEC_CNTL_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL1 0x2266
+#define regDP1_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP1_DP_SEC_FRAMING1 0x2267
+#define regDP1_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP1_DP_SEC_FRAMING2 0x2268
+#define regDP1_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP1_DP_SEC_FRAMING3 0x2269
+#define regDP1_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP1_DP_SEC_FRAMING4 0x226a
+#define regDP1_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP1_DP_SEC_AUD_N 0x226b
+#define regDP1_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP1_DP_SEC_AUD_N_READBACK 0x226c
+#define regDP1_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP1_DP_SEC_AUD_M 0x226d
+#define regDP1_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP1_DP_SEC_AUD_M_READBACK 0x226e
+#define regDP1_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP1_DP_SEC_TIMESTAMP 0x226f
+#define regDP1_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP1_DP_SEC_PACKET_CNTL 0x2270
+#define regDP1_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP1_DP_MSE_RATE_CNTL 0x2271
+#define regDP1_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP1_DP_MSE_RATE_UPDATE 0x2273
+#define regDP1_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP1_DP_MSE_SAT0 0x2274
+#define regDP1_DP_MSE_SAT0_BASE_IDX 2
+#define regDP1_DP_MSE_SAT1 0x2275
+#define regDP1_DP_MSE_SAT1_BASE_IDX 2
+#define regDP1_DP_MSE_SAT2 0x2276
+#define regDP1_DP_MSE_SAT2_BASE_IDX 2
+#define regDP1_DP_MSE_SAT_UPDATE 0x2277
+#define regDP1_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP1_DP_MSE_LINK_TIMING 0x2278
+#define regDP1_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP1_DP_MSE_MISC_CNTL 0x2279
+#define regDP1_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x227e
+#define regDP1_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_HBR2_PATTERN_CONTROL 0x227f
+#define regDP1_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP1_DP_MSE_SAT0_STATUS 0x2281
+#define regDP1_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP1_DP_MSE_SAT1_STATUS 0x2282
+#define regDP1_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP1_DP_MSE_SAT2_STATUS 0x2283
+#define regDP1_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP1_DP_DPIA_SPARE 0x2284
+#define regDP1_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP1_DP_MSA_TIMING_PARAM1 0x2286
+#define regDP1_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP1_DP_MSA_TIMING_PARAM2 0x2287
+#define regDP1_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP1_DP_MSA_TIMING_PARAM3 0x2288
+#define regDP1_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP1_DP_MSA_TIMING_PARAM4 0x2289
+#define regDP1_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP1_DP_MSO_CNTL 0x228a
+#define regDP1_DP_MSO_CNTL_BASE_IDX 2
+#define regDP1_DP_MSO_CNTL1 0x228b
+#define regDP1_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP1_DP_DSC_CNTL 0x228c
+#define regDP1_DP_DSC_CNTL_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL2 0x228d
+#define regDP1_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL3 0x228e
+#define regDP1_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL4 0x228f
+#define regDP1_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL5 0x2290
+#define regDP1_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL6 0x2291
+#define regDP1_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL7 0x2292
+#define regDP1_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP1_DP_DB_CNTL 0x2293
+#define regDP1_DP_DB_CNTL_BASE_IDX 2
+#define regDP1_DP_MSA_VBID_MISC 0x2294
+#define regDP1_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP1_DP_SEC_METADATA_TRANSMISSION 0x2295
+#define regDP1_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP1_DP_ALPM_CNTL 0x2297
+#define regDP1_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP8_CNTL 0x2298
+#define regDP1_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP9_CNTL 0x2299
+#define regDP1_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP10_CNTL 0x229a
+#define regDP1_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP11_CNTL 0x229b
+#define regDP1_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP_EN_DB_STATUS 0x229c
+#define regDP1_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL1 0x229d
+#define regDP1_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL2 0x229e
+#define regDP1_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL3 0x229f
+#define regDP1_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL4 0x22a0
+#define regDP1_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL5 0x22a1
+#define regDP1_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP1_DP_STREAM_SYMBOL_COUNT_STATUS 0x22a2
+#define regDP1_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP1_DP_STREAM_SYMBOL_COUNT_CONTROL 0x22a3
+#define regDP1_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP1_DP_LINK_SYMBOL_COUNT_STATUS0 0x22a4
+#define regDP1_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP1_DP_LINK_SYMBOL_COUNT_STATUS1 0x22a5
+#define regDP1_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP1_DP_LINK_SYMBOL_COUNT_CONTROL 0x22a6
+#define regDP1_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig2_vpg_vpg_dispdec
+// base address: 0x15dc0
+#define regVPG2_VPG_GENERIC_PACKET_ACCESS_CTRL 0x22b0
+#define regVPG2_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG2_VPG_GENERIC_PACKET_DATA 0x22b1
+#define regVPG2_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG2_VPG_GSP_FRAME_UPDATE_CTRL 0x22b2
+#define regVPG2_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x22b3
+#define regVPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG2_VPG_GENERIC_STATUS 0x22b4
+#define regVPG2_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG2_VPG_MEM_PWR 0x22b5
+#define regVPG2_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG2_VPG_ISRC1_2_ACCESS_CTRL 0x22b6
+#define regVPG2_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG2_VPG_ISRC1_2_DATA 0x22b7
+#define regVPG2_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG2_VPG_MPEG_INFO0 0x22b8
+#define regVPG2_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG2_VPG_MPEG_INFO1 0x22b9
+#define regVPG2_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig2_afmt_afmt_dispdec
+// base address: 0x15dec
+#define regAFMT2_AFMT_ACP 0x22bb
+#define regAFMT2_AFMT_ACP_BASE_IDX 2
+#define regAFMT2_AFMT_VBI_PACKET_CONTROL 0x22bc
+#define regAFMT2_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_PACKET_CONTROL2 0x22bd
+#define regAFMT2_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_INFO0 0x22be
+#define regAFMT2_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_INFO1 0x22bf
+#define regAFMT2_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT2_AFMT_60958_0 0x22c0
+#define regAFMT2_AFMT_60958_0_BASE_IDX 2
+#define regAFMT2_AFMT_60958_1 0x22c1
+#define regAFMT2_AFMT_60958_1_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_CRC_CONTROL 0x22c2
+#define regAFMT2_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT2_AFMT_RAMP_CONTROL0 0x22c3
+#define regAFMT2_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT2_AFMT_RAMP_CONTROL1 0x22c4
+#define regAFMT2_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT2_AFMT_RAMP_CONTROL2 0x22c5
+#define regAFMT2_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT2_AFMT_RAMP_CONTROL3 0x22c6
+#define regAFMT2_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT2_AFMT_60958_2 0x22c7
+#define regAFMT2_AFMT_60958_2_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_CRC_RESULT 0x22c8
+#define regAFMT2_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT2_AFMT_STATUS 0x22c9
+#define regAFMT2_AFMT_STATUS_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_PACKET_CONTROL 0x22ca
+#define regAFMT2_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT2_AFMT_INFOFRAME_CONTROL0 0x22cb
+#define regAFMT2_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT2_AFMT_INTERRUPT_STATUS 0x22cc
+#define regAFMT2_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_SRC_CONTROL 0x22cd
+#define regAFMT2_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT2_AFMT_MEM_PWR 0x22cf
+#define regAFMT2_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig2_dme_dme_dispdec
+// base address: 0x15e64
+#define regDME2_DME_CONTROL 0x22d9
+#define regDME2_DME_CONTROL_BASE_IDX 2
+#define regDME2_DME_MEMORY_CONTROL 0x22da
+#define regDME2_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig2_dispdec
+// base address: 0x920
+#define regDIG2_DIG_FE_CNTL 0x22db
+#define regDIG2_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG2_DIG_FE_CLK_CNTL 0x22dc
+#define regDIG2_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG2_DIG_FE_EN_CNTL 0x22dd
+#define regDIG2_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG2_DIG_OUTPUT_CRC_CNTL 0x22de
+#define regDIG2_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG2_DIG_OUTPUT_CRC_RESULT 0x22df
+#define regDIG2_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG2_DIG_CLOCK_PATTERN 0x22e0
+#define regDIG2_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG2_DIG_TEST_PATTERN 0x22e1
+#define regDIG2_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG2_DIG_RANDOM_PATTERN_SEED 0x22e2
+#define regDIG2_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG2_DIG_FIFO_CTRL0 0x22e3
+#define regDIG2_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG2_DIG_FIFO_CTRL1 0x22e4
+#define regDIG2_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG2_HDMI_METADATA_PACKET_CONTROL 0x22e5
+#define regDIG2_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_CONTROL 0x22e6
+#define regDIG2_HDMI_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_STATUS 0x22e7
+#define regDIG2_HDMI_STATUS_BASE_IDX 2
+#define regDIG2_HDMI_AUDIO_PACKET_CONTROL 0x22e8
+#define regDIG2_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_ACR_PACKET_CONTROL 0x22e9
+#define regDIG2_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_VBI_PACKET_CONTROL 0x22ea
+#define regDIG2_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_INFOFRAME_CONTROL0 0x22eb
+#define regDIG2_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG2_HDMI_INFOFRAME_CONTROL1 0x22ec
+#define regDIG2_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL0 0x22ed
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL6 0x22ee
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL5 0x22ef
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG2_HDMI_GC 0x22f0
+#define regDIG2_HDMI_GC_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL1 0x22f1
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL2 0x22f2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL3 0x22f3
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL4 0x22f4
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL7 0x22f5
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL8 0x22f6
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL9 0x22f7
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL10 0x22f8
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG2_HDMI_DB_CONTROL 0x22f9
+#define regDIG2_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_ACR_32_0 0x22fa
+#define regDIG2_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG2_HDMI_ACR_32_1 0x22fb
+#define regDIG2_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG2_HDMI_ACR_44_0 0x22fc
+#define regDIG2_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG2_HDMI_ACR_44_1 0x22fd
+#define regDIG2_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG2_HDMI_ACR_48_0 0x22fe
+#define regDIG2_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG2_HDMI_ACR_48_1 0x22ff
+#define regDIG2_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG2_HDMI_ACR_STATUS_0 0x2300
+#define regDIG2_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG2_HDMI_ACR_STATUS_1 0x2301
+#define regDIG2_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG2_AFMT_CNTL 0x2302
+#define regDIG2_AFMT_CNTL_BASE_IDX 2
+#define regDIG2_DIG_BE_CLK_CNTL 0x2303
+#define regDIG2_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG2_DIG_BE_CNTL 0x2304
+#define regDIG2_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG2_DIG_BE_EN_CNTL 0x2305
+#define regDIG2_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG2_TMDS_CNTL 0x232c
+#define regDIG2_TMDS_CNTL_BASE_IDX 2
+#define regDIG2_TMDS_CONTROL_CHAR 0x232d
+#define regDIG2_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG2_TMDS_CONTROL0_FEEDBACK 0x232e
+#define regDIG2_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG2_TMDS_STEREOSYNC_CTL_SEL 0x232f
+#define regDIG2_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG2_TMDS_SYNC_CHAR_PATTERN_0_1 0x2330
+#define regDIG2_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG2_TMDS_SYNC_CHAR_PATTERN_2_3 0x2331
+#define regDIG2_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG2_TMDS_CTL_BITS 0x2333
+#define regDIG2_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG2_TMDS_DCBALANCER_CONTROL 0x2334
+#define regDIG2_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG2_TMDS_SYNC_DCBALANCE_CHAR 0x2335
+#define regDIG2_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG2_TMDS_CTL0_1_GEN_CNTL 0x2336
+#define regDIG2_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG2_TMDS_CTL2_3_GEN_CNTL 0x2337
+#define regDIG2_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG2_DIG_VERSION 0x2339
+#define regDIG2_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp2_dispdec
+// base address: 0x920
+#define regDP2_DP_LINK_CNTL 0x2366
+#define regDP2_DP_LINK_CNTL_BASE_IDX 2
+#define regDP2_DP_PIXEL_FORMAT 0x2367
+#define regDP2_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP2_DP_MSA_COLORIMETRY 0x2368
+#define regDP2_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP2_DP_CONFIG 0x2369
+#define regDP2_DP_CONFIG_BASE_IDX 2
+#define regDP2_DP_VID_STREAM_CNTL 0x236a
+#define regDP2_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP2_DP_STEER_FIFO 0x236b
+#define regDP2_DP_STEER_FIFO_BASE_IDX 2
+#define regDP2_DP_MSA_MISC 0x236c
+#define regDP2_DP_MSA_MISC_BASE_IDX 2
+#define regDP2_DP_DPHY_INTERNAL_CTRL 0x236d
+#define regDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP2_DP_VID_TIMING 0x236e
+#define regDP2_DP_VID_TIMING_BASE_IDX 2
+#define regDP2_DP_VID_N 0x236f
+#define regDP2_DP_VID_N_BASE_IDX 2
+#define regDP2_DP_VID_M 0x2370
+#define regDP2_DP_VID_M_BASE_IDX 2
+#define regDP2_DP_LINK_FRAMING_CNTL 0x2371
+#define regDP2_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP2_DP_HBR2_EYE_PATTERN 0x2372
+#define regDP2_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP2_DP_VID_MSA_VBID 0x2373
+#define regDP2_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP2_DP_VID_INTERRUPT_CNTL 0x2374
+#define regDP2_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_CNTL 0x2375
+#define regDP2_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_TRAINING_PATTERN_SEL 0x2376
+#define regDP2_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP2_DP_DPHY_SYM0 0x2377
+#define regDP2_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP2_DP_DPHY_SYM1 0x2378
+#define regDP2_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP2_DP_DPHY_SYM2 0x2379
+#define regDP2_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP2_DP_DPHY_8B10B_CNTL 0x237a
+#define regDP2_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_PRBS_CNTL 0x237b
+#define regDP2_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_SCRAM_CNTL 0x237c
+#define regDP2_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_EN 0x237d
+#define regDP2_DP_DPHY_CRC_EN_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_CNTL 0x237e
+#define regDP2_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_RESULT 0x237f
+#define regDP2_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_MST_CNTL 0x2380
+#define regDP2_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_MST_STATUS 0x2381
+#define regDP2_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define regDP2_DP_DPHY_FAST_TRAINING 0x2382
+#define regDP2_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP2_DP_DPHY_FAST_TRAINING_STATUS 0x2383
+#define regDP2_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL 0x2389
+#define regDP2_DP_SEC_CNTL_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL1 0x238a
+#define regDP2_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP2_DP_SEC_FRAMING1 0x238b
+#define regDP2_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP2_DP_SEC_FRAMING2 0x238c
+#define regDP2_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP2_DP_SEC_FRAMING3 0x238d
+#define regDP2_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP2_DP_SEC_FRAMING4 0x238e
+#define regDP2_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP2_DP_SEC_AUD_N 0x238f
+#define regDP2_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP2_DP_SEC_AUD_N_READBACK 0x2390
+#define regDP2_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP2_DP_SEC_AUD_M 0x2391
+#define regDP2_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP2_DP_SEC_AUD_M_READBACK 0x2392
+#define regDP2_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP2_DP_SEC_TIMESTAMP 0x2393
+#define regDP2_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP2_DP_SEC_PACKET_CNTL 0x2394
+#define regDP2_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP2_DP_MSE_RATE_CNTL 0x2395
+#define regDP2_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP2_DP_MSE_RATE_UPDATE 0x2397
+#define regDP2_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP2_DP_MSE_SAT0 0x2398
+#define regDP2_DP_MSE_SAT0_BASE_IDX 2
+#define regDP2_DP_MSE_SAT1 0x2399
+#define regDP2_DP_MSE_SAT1_BASE_IDX 2
+#define regDP2_DP_MSE_SAT2 0x239a
+#define regDP2_DP_MSE_SAT2_BASE_IDX 2
+#define regDP2_DP_MSE_SAT_UPDATE 0x239b
+#define regDP2_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP2_DP_MSE_LINK_TIMING 0x239c
+#define regDP2_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP2_DP_MSE_MISC_CNTL 0x239d
+#define regDP2_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x23a2
+#define regDP2_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_HBR2_PATTERN_CONTROL 0x23a3
+#define regDP2_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP2_DP_MSE_SAT0_STATUS 0x23a5
+#define regDP2_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP2_DP_MSE_SAT1_STATUS 0x23a6
+#define regDP2_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP2_DP_MSE_SAT2_STATUS 0x23a7
+#define regDP2_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP2_DP_DPIA_SPARE 0x23a8
+#define regDP2_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP2_DP_MSA_TIMING_PARAM1 0x23aa
+#define regDP2_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP2_DP_MSA_TIMING_PARAM2 0x23ab
+#define regDP2_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP2_DP_MSA_TIMING_PARAM3 0x23ac
+#define regDP2_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP2_DP_MSA_TIMING_PARAM4 0x23ad
+#define regDP2_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP2_DP_MSO_CNTL 0x23ae
+#define regDP2_DP_MSO_CNTL_BASE_IDX 2
+#define regDP2_DP_MSO_CNTL1 0x23af
+#define regDP2_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP2_DP_DSC_CNTL 0x23b0
+#define regDP2_DP_DSC_CNTL_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL2 0x23b1
+#define regDP2_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL3 0x23b2
+#define regDP2_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL4 0x23b3
+#define regDP2_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL5 0x23b4
+#define regDP2_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL6 0x23b5
+#define regDP2_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL7 0x23b6
+#define regDP2_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP2_DP_DB_CNTL 0x23b7
+#define regDP2_DP_DB_CNTL_BASE_IDX 2
+#define regDP2_DP_MSA_VBID_MISC 0x23b8
+#define regDP2_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP2_DP_SEC_METADATA_TRANSMISSION 0x23b9
+#define regDP2_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP2_DP_ALPM_CNTL 0x23bb
+#define regDP2_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP8_CNTL 0x23bc
+#define regDP2_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP9_CNTL 0x23bd
+#define regDP2_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP10_CNTL 0x23be
+#define regDP2_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP11_CNTL 0x23bf
+#define regDP2_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP_EN_DB_STATUS 0x23c0
+#define regDP2_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL1 0x23c1
+#define regDP2_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL2 0x23c2
+#define regDP2_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL3 0x23c3
+#define regDP2_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL4 0x23c4
+#define regDP2_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL5 0x23c5
+#define regDP2_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP2_DP_STREAM_SYMBOL_COUNT_STATUS 0x23c6
+#define regDP2_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP2_DP_STREAM_SYMBOL_COUNT_CONTROL 0x23c7
+#define regDP2_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP2_DP_LINK_SYMBOL_COUNT_STATUS0 0x23c8
+#define regDP2_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP2_DP_LINK_SYMBOL_COUNT_STATUS1 0x23c9
+#define regDP2_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP2_DP_LINK_SYMBOL_COUNT_CONTROL 0x23ca
+#define regDP2_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig3_vpg_vpg_dispdec
+// base address: 0x16250
+#define regVPG3_VPG_GENERIC_PACKET_ACCESS_CTRL 0x23d4
+#define regVPG3_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG3_VPG_GENERIC_PACKET_DATA 0x23d5
+#define regVPG3_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG3_VPG_GSP_FRAME_UPDATE_CTRL 0x23d6
+#define regVPG3_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x23d7
+#define regVPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG3_VPG_GENERIC_STATUS 0x23d8
+#define regVPG3_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG3_VPG_MEM_PWR 0x23d9
+#define regVPG3_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG3_VPG_ISRC1_2_ACCESS_CTRL 0x23da
+#define regVPG3_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG3_VPG_ISRC1_2_DATA 0x23db
+#define regVPG3_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG3_VPG_MPEG_INFO0 0x23dc
+#define regVPG3_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG3_VPG_MPEG_INFO1 0x23dd
+#define regVPG3_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig3_afmt_afmt_dispdec
+// base address: 0x1627c
+#define regAFMT3_AFMT_ACP 0x23df
+#define regAFMT3_AFMT_ACP_BASE_IDX 2
+#define regAFMT3_AFMT_VBI_PACKET_CONTROL 0x23e0
+#define regAFMT3_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_PACKET_CONTROL2 0x23e1
+#define regAFMT3_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_INFO0 0x23e2
+#define regAFMT3_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_INFO1 0x23e3
+#define regAFMT3_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT3_AFMT_60958_0 0x23e4
+#define regAFMT3_AFMT_60958_0_BASE_IDX 2
+#define regAFMT3_AFMT_60958_1 0x23e5
+#define regAFMT3_AFMT_60958_1_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_CRC_CONTROL 0x23e6
+#define regAFMT3_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT3_AFMT_RAMP_CONTROL0 0x23e7
+#define regAFMT3_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT3_AFMT_RAMP_CONTROL1 0x23e8
+#define regAFMT3_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT3_AFMT_RAMP_CONTROL2 0x23e9
+#define regAFMT3_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT3_AFMT_RAMP_CONTROL3 0x23ea
+#define regAFMT3_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT3_AFMT_60958_2 0x23eb
+#define regAFMT3_AFMT_60958_2_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_CRC_RESULT 0x23ec
+#define regAFMT3_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT3_AFMT_STATUS 0x23ed
+#define regAFMT3_AFMT_STATUS_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_PACKET_CONTROL 0x23ee
+#define regAFMT3_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT3_AFMT_INFOFRAME_CONTROL0 0x23ef
+#define regAFMT3_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT3_AFMT_INTERRUPT_STATUS 0x23f0
+#define regAFMT3_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_SRC_CONTROL 0x23f1
+#define regAFMT3_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT3_AFMT_MEM_PWR 0x23f3
+#define regAFMT3_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig3_dme_dme_dispdec
+// base address: 0x162f4
+#define regDME3_DME_CONTROL 0x23fd
+#define regDME3_DME_CONTROL_BASE_IDX 2
+#define regDME3_DME_MEMORY_CONTROL 0x23fe
+#define regDME3_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig3_dispdec
+// base address: 0xdb0
+#define regDIG3_DIG_FE_CNTL 0x23ff
+#define regDIG3_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG3_DIG_FE_CLK_CNTL 0x2400
+#define regDIG3_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG3_DIG_FE_EN_CNTL 0x2401
+#define regDIG3_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG3_DIG_OUTPUT_CRC_CNTL 0x2402
+#define regDIG3_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG3_DIG_OUTPUT_CRC_RESULT 0x2403
+#define regDIG3_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG3_DIG_CLOCK_PATTERN 0x2404
+#define regDIG3_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG3_DIG_TEST_PATTERN 0x2405
+#define regDIG3_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG3_DIG_RANDOM_PATTERN_SEED 0x2406
+#define regDIG3_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG3_DIG_FIFO_CTRL0 0x2407
+#define regDIG3_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG3_DIG_FIFO_CTRL1 0x2408
+#define regDIG3_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG3_HDMI_METADATA_PACKET_CONTROL 0x2409
+#define regDIG3_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_CONTROL 0x240a
+#define regDIG3_HDMI_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_STATUS 0x240b
+#define regDIG3_HDMI_STATUS_BASE_IDX 2
+#define regDIG3_HDMI_AUDIO_PACKET_CONTROL 0x240c
+#define regDIG3_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_ACR_PACKET_CONTROL 0x240d
+#define regDIG3_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_VBI_PACKET_CONTROL 0x240e
+#define regDIG3_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_INFOFRAME_CONTROL0 0x240f
+#define regDIG3_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG3_HDMI_INFOFRAME_CONTROL1 0x2410
+#define regDIG3_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL0 0x2411
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL6 0x2412
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL5 0x2413
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG3_HDMI_GC 0x2414
+#define regDIG3_HDMI_GC_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL1 0x2415
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL2 0x2416
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL3 0x2417
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL4 0x2418
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL7 0x2419
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL8 0x241a
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL9 0x241b
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL10 0x241c
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG3_HDMI_DB_CONTROL 0x241d
+#define regDIG3_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_ACR_32_0 0x241e
+#define regDIG3_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG3_HDMI_ACR_32_1 0x241f
+#define regDIG3_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG3_HDMI_ACR_44_0 0x2420
+#define regDIG3_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG3_HDMI_ACR_44_1 0x2421
+#define regDIG3_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG3_HDMI_ACR_48_0 0x2422
+#define regDIG3_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG3_HDMI_ACR_48_1 0x2423
+#define regDIG3_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG3_HDMI_ACR_STATUS_0 0x2424
+#define regDIG3_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG3_HDMI_ACR_STATUS_1 0x2425
+#define regDIG3_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG3_AFMT_CNTL 0x2426
+#define regDIG3_AFMT_CNTL_BASE_IDX 2
+#define regDIG3_DIG_BE_CLK_CNTL 0x2427
+#define regDIG3_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG3_DIG_BE_CNTL 0x2428
+#define regDIG3_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG3_DIG_BE_EN_CNTL 0x2429
+#define regDIG3_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG3_TMDS_CNTL 0x2450
+#define regDIG3_TMDS_CNTL_BASE_IDX 2
+#define regDIG3_TMDS_CONTROL_CHAR 0x2451
+#define regDIG3_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG3_TMDS_CONTROL0_FEEDBACK 0x2452
+#define regDIG3_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG3_TMDS_STEREOSYNC_CTL_SEL 0x2453
+#define regDIG3_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG3_TMDS_SYNC_CHAR_PATTERN_0_1 0x2454
+#define regDIG3_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG3_TMDS_SYNC_CHAR_PATTERN_2_3 0x2455
+#define regDIG3_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG3_TMDS_CTL_BITS 0x2457
+#define regDIG3_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG3_TMDS_DCBALANCER_CONTROL 0x2458
+#define regDIG3_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG3_TMDS_SYNC_DCBALANCE_CHAR 0x2459
+#define regDIG3_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG3_TMDS_CTL0_1_GEN_CNTL 0x245a
+#define regDIG3_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG3_TMDS_CTL2_3_GEN_CNTL 0x245b
+#define regDIG3_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG3_DIG_VERSION 0x245d
+#define regDIG3_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp3_dispdec
+// base address: 0xdb0
+#define regDP3_DP_LINK_CNTL 0x248a
+#define regDP3_DP_LINK_CNTL_BASE_IDX 2
+#define regDP3_DP_PIXEL_FORMAT 0x248b
+#define regDP3_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP3_DP_MSA_COLORIMETRY 0x248c
+#define regDP3_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP3_DP_CONFIG 0x248d
+#define regDP3_DP_CONFIG_BASE_IDX 2
+#define regDP3_DP_VID_STREAM_CNTL 0x248e
+#define regDP3_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP3_DP_STEER_FIFO 0x248f
+#define regDP3_DP_STEER_FIFO_BASE_IDX 2
+#define regDP3_DP_MSA_MISC 0x2490
+#define regDP3_DP_MSA_MISC_BASE_IDX 2
+#define regDP3_DP_DPHY_INTERNAL_CTRL 0x2491
+#define regDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP3_DP_VID_TIMING 0x2492
+#define regDP3_DP_VID_TIMING_BASE_IDX 2
+#define regDP3_DP_VID_N 0x2493
+#define regDP3_DP_VID_N_BASE_IDX 2
+#define regDP3_DP_VID_M 0x2494
+#define regDP3_DP_VID_M_BASE_IDX 2
+#define regDP3_DP_LINK_FRAMING_CNTL 0x2495
+#define regDP3_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP3_DP_HBR2_EYE_PATTERN 0x2496
+#define regDP3_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP3_DP_VID_MSA_VBID 0x2497
+#define regDP3_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP3_DP_VID_INTERRUPT_CNTL 0x2498
+#define regDP3_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_CNTL 0x2499
+#define regDP3_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_TRAINING_PATTERN_SEL 0x249a
+#define regDP3_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP3_DP_DPHY_SYM0 0x249b
+#define regDP3_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP3_DP_DPHY_SYM1 0x249c
+#define regDP3_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP3_DP_DPHY_SYM2 0x249d
+#define regDP3_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP3_DP_DPHY_8B10B_CNTL 0x249e
+#define regDP3_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_PRBS_CNTL 0x249f
+#define regDP3_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_SCRAM_CNTL 0x24a0
+#define regDP3_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_EN 0x24a1
+#define regDP3_DP_DPHY_CRC_EN_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_CNTL 0x24a2
+#define regDP3_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_RESULT 0x24a3
+#define regDP3_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_MST_CNTL 0x24a4
+#define regDP3_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_MST_STATUS 0x24a5
+#define regDP3_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define regDP3_DP_DPHY_FAST_TRAINING 0x24a6
+#define regDP3_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP3_DP_DPHY_FAST_TRAINING_STATUS 0x24a7
+#define regDP3_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL 0x24ad
+#define regDP3_DP_SEC_CNTL_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL1 0x24ae
+#define regDP3_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP3_DP_SEC_FRAMING1 0x24af
+#define regDP3_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP3_DP_SEC_FRAMING2 0x24b0
+#define regDP3_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP3_DP_SEC_FRAMING3 0x24b1
+#define regDP3_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP3_DP_SEC_FRAMING4 0x24b2
+#define regDP3_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP3_DP_SEC_AUD_N 0x24b3
+#define regDP3_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP3_DP_SEC_AUD_N_READBACK 0x24b4
+#define regDP3_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP3_DP_SEC_AUD_M 0x24b5
+#define regDP3_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP3_DP_SEC_AUD_M_READBACK 0x24b6
+#define regDP3_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP3_DP_SEC_TIMESTAMP 0x24b7
+#define regDP3_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP3_DP_SEC_PACKET_CNTL 0x24b8
+#define regDP3_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP3_DP_MSE_RATE_CNTL 0x24b9
+#define regDP3_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP3_DP_MSE_RATE_UPDATE 0x24bb
+#define regDP3_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP3_DP_MSE_SAT0 0x24bc
+#define regDP3_DP_MSE_SAT0_BASE_IDX 2
+#define regDP3_DP_MSE_SAT1 0x24bd
+#define regDP3_DP_MSE_SAT1_BASE_IDX 2
+#define regDP3_DP_MSE_SAT2 0x24be
+#define regDP3_DP_MSE_SAT2_BASE_IDX 2
+#define regDP3_DP_MSE_SAT_UPDATE 0x24bf
+#define regDP3_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP3_DP_MSE_LINK_TIMING 0x24c0
+#define regDP3_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP3_DP_MSE_MISC_CNTL 0x24c1
+#define regDP3_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x24c6
+#define regDP3_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_HBR2_PATTERN_CONTROL 0x24c7
+#define regDP3_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP3_DP_MSE_SAT0_STATUS 0x24c9
+#define regDP3_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP3_DP_MSE_SAT1_STATUS 0x24ca
+#define regDP3_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP3_DP_MSE_SAT2_STATUS 0x24cb
+#define regDP3_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP3_DP_DPIA_SPARE 0x24cc
+#define regDP3_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP3_DP_MSA_TIMING_PARAM1 0x24ce
+#define regDP3_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP3_DP_MSA_TIMING_PARAM2 0x24cf
+#define regDP3_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP3_DP_MSA_TIMING_PARAM3 0x24d0
+#define regDP3_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP3_DP_MSA_TIMING_PARAM4 0x24d1
+#define regDP3_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP3_DP_MSO_CNTL 0x24d2
+#define regDP3_DP_MSO_CNTL_BASE_IDX 2
+#define regDP3_DP_MSO_CNTL1 0x24d3
+#define regDP3_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP3_DP_DSC_CNTL 0x24d4
+#define regDP3_DP_DSC_CNTL_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL2 0x24d5
+#define regDP3_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL3 0x24d6
+#define regDP3_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL4 0x24d7
+#define regDP3_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL5 0x24d8
+#define regDP3_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL6 0x24d9
+#define regDP3_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL7 0x24da
+#define regDP3_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP3_DP_DB_CNTL 0x24db
+#define regDP3_DP_DB_CNTL_BASE_IDX 2
+#define regDP3_DP_MSA_VBID_MISC 0x24dc
+#define regDP3_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP3_DP_SEC_METADATA_TRANSMISSION 0x24dd
+#define regDP3_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP3_DP_ALPM_CNTL 0x24df
+#define regDP3_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP8_CNTL 0x24e0
+#define regDP3_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP9_CNTL 0x24e1
+#define regDP3_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP10_CNTL 0x24e2
+#define regDP3_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP11_CNTL 0x24e3
+#define regDP3_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP_EN_DB_STATUS 0x24e4
+#define regDP3_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL1 0x24e5
+#define regDP3_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL2 0x24e6
+#define regDP3_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL3 0x24e7
+#define regDP3_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL4 0x24e8
+#define regDP3_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL5 0x24e9
+#define regDP3_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP3_DP_STREAM_SYMBOL_COUNT_STATUS 0x24ea
+#define regDP3_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP3_DP_STREAM_SYMBOL_COUNT_CONTROL 0x24eb
+#define regDP3_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP3_DP_LINK_SYMBOL_COUNT_STATUS0 0x24ec
+#define regDP3_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP3_DP_LINK_SYMBOL_COUNT_STATUS1 0x24ed
+#define regDP3_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP3_DP_LINK_SYMBOL_COUNT_CONTROL 0x24ee
+#define regDP3_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig4_vpg_vpg_dispdec
+// base address: 0x166e0
+#define regVPG4_VPG_GENERIC_PACKET_ACCESS_CTRL 0x24f8
+#define regVPG4_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG4_VPG_GENERIC_PACKET_DATA 0x24f9
+#define regVPG4_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG4_VPG_GSP_FRAME_UPDATE_CTRL 0x24fa
+#define regVPG4_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x24fb
+#define regVPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG4_VPG_GENERIC_STATUS 0x24fc
+#define regVPG4_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG4_VPG_MEM_PWR 0x24fd
+#define regVPG4_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG4_VPG_ISRC1_2_ACCESS_CTRL 0x24fe
+#define regVPG4_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG4_VPG_ISRC1_2_DATA 0x24ff
+#define regVPG4_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG4_VPG_MPEG_INFO0 0x2500
+#define regVPG4_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG4_VPG_MPEG_INFO1 0x2501
+#define regVPG4_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig4_afmt_afmt_dispdec
+// base address: 0x1670c
+#define regAFMT4_AFMT_ACP 0x2503
+#define regAFMT4_AFMT_ACP_BASE_IDX 2
+#define regAFMT4_AFMT_VBI_PACKET_CONTROL 0x2504
+#define regAFMT4_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_PACKET_CONTROL2 0x2505
+#define regAFMT4_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_INFO0 0x2506
+#define regAFMT4_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_INFO1 0x2507
+#define regAFMT4_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT4_AFMT_60958_0 0x2508
+#define regAFMT4_AFMT_60958_0_BASE_IDX 2
+#define regAFMT4_AFMT_60958_1 0x2509
+#define regAFMT4_AFMT_60958_1_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_CRC_CONTROL 0x250a
+#define regAFMT4_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT4_AFMT_RAMP_CONTROL0 0x250b
+#define regAFMT4_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT4_AFMT_RAMP_CONTROL1 0x250c
+#define regAFMT4_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT4_AFMT_RAMP_CONTROL2 0x250d
+#define regAFMT4_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT4_AFMT_RAMP_CONTROL3 0x250e
+#define regAFMT4_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT4_AFMT_60958_2 0x250f
+#define regAFMT4_AFMT_60958_2_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_CRC_RESULT 0x2510
+#define regAFMT4_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT4_AFMT_STATUS 0x2511
+#define regAFMT4_AFMT_STATUS_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_PACKET_CONTROL 0x2512
+#define regAFMT4_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT4_AFMT_INFOFRAME_CONTROL0 0x2513
+#define regAFMT4_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT4_AFMT_INTERRUPT_STATUS 0x2514
+#define regAFMT4_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_SRC_CONTROL 0x2515
+#define regAFMT4_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT4_AFMT_MEM_PWR 0x2517
+#define regAFMT4_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig4_dme_dme_dispdec
+// base address: 0x16784
+#define regDME4_DME_CONTROL 0x2521
+#define regDME4_DME_CONTROL_BASE_IDX 2
+#define regDME4_DME_MEMORY_CONTROL 0x2522
+#define regDME4_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig4_dispdec
+// base address: 0x1240
+#define regDIG4_DIG_FE_CNTL 0x2523
+#define regDIG4_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG4_DIG_FE_CLK_CNTL 0x2524
+#define regDIG4_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG4_DIG_FE_EN_CNTL 0x2525
+#define regDIG4_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG4_DIG_OUTPUT_CRC_CNTL 0x2526
+#define regDIG4_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG4_DIG_OUTPUT_CRC_RESULT 0x2527
+#define regDIG4_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG4_DIG_CLOCK_PATTERN 0x2528
+#define regDIG4_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG4_DIG_TEST_PATTERN 0x2529
+#define regDIG4_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG4_DIG_RANDOM_PATTERN_SEED 0x252a
+#define regDIG4_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG4_DIG_FIFO_CTRL0 0x252b
+#define regDIG4_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG4_DIG_FIFO_CTRL1 0x252c
+#define regDIG4_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG4_HDMI_METADATA_PACKET_CONTROL 0x252d
+#define regDIG4_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_CONTROL 0x252e
+#define regDIG4_HDMI_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_STATUS 0x252f
+#define regDIG4_HDMI_STATUS_BASE_IDX 2
+#define regDIG4_HDMI_AUDIO_PACKET_CONTROL 0x2530
+#define regDIG4_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_ACR_PACKET_CONTROL 0x2531
+#define regDIG4_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_VBI_PACKET_CONTROL 0x2532
+#define regDIG4_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_INFOFRAME_CONTROL0 0x2533
+#define regDIG4_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG4_HDMI_INFOFRAME_CONTROL1 0x2534
+#define regDIG4_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL0 0x2535
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL6 0x2536
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL5 0x2537
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG4_HDMI_GC 0x2538
+#define regDIG4_HDMI_GC_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL1 0x2539
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL2 0x253a
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL3 0x253b
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL4 0x253c
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL7 0x253d
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL8 0x253e
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL9 0x253f
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL10 0x2540
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG4_HDMI_DB_CONTROL 0x2541
+#define regDIG4_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_ACR_32_0 0x2542
+#define regDIG4_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG4_HDMI_ACR_32_1 0x2543
+#define regDIG4_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG4_HDMI_ACR_44_0 0x2544
+#define regDIG4_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG4_HDMI_ACR_44_1 0x2545
+#define regDIG4_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG4_HDMI_ACR_48_0 0x2546
+#define regDIG4_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG4_HDMI_ACR_48_1 0x2547
+#define regDIG4_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG4_HDMI_ACR_STATUS_0 0x2548
+#define regDIG4_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG4_HDMI_ACR_STATUS_1 0x2549
+#define regDIG4_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG4_AFMT_CNTL 0x254a
+#define regDIG4_AFMT_CNTL_BASE_IDX 2
+#define regDIG4_DIG_BE_CLK_CNTL 0x254b
+#define regDIG4_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG4_DIG_BE_CNTL 0x254c
+#define regDIG4_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG4_DIG_BE_EN_CNTL 0x254d
+#define regDIG4_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG4_TMDS_CNTL 0x2574
+#define regDIG4_TMDS_CNTL_BASE_IDX 2
+#define regDIG4_TMDS_CONTROL_CHAR 0x2575
+#define regDIG4_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG4_TMDS_CONTROL0_FEEDBACK 0x2576
+#define regDIG4_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG4_TMDS_STEREOSYNC_CTL_SEL 0x2577
+#define regDIG4_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG4_TMDS_SYNC_CHAR_PATTERN_0_1 0x2578
+#define regDIG4_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG4_TMDS_SYNC_CHAR_PATTERN_2_3 0x2579
+#define regDIG4_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG4_TMDS_CTL_BITS 0x257b
+#define regDIG4_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG4_TMDS_DCBALANCER_CONTROL 0x257c
+#define regDIG4_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG4_TMDS_SYNC_DCBALANCE_CHAR 0x257d
+#define regDIG4_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG4_TMDS_CTL0_1_GEN_CNTL 0x257e
+#define regDIG4_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG4_TMDS_CTL2_3_GEN_CNTL 0x257f
+#define regDIG4_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG4_DIG_VERSION 0x2581
+#define regDIG4_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp4_dispdec
+// base address: 0x1240
+#define regDP4_DP_LINK_CNTL 0x25ae
+#define regDP4_DP_LINK_CNTL_BASE_IDX 2
+#define regDP4_DP_PIXEL_FORMAT 0x25af
+#define regDP4_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP4_DP_MSA_COLORIMETRY 0x25b0
+#define regDP4_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP4_DP_CONFIG 0x25b1
+#define regDP4_DP_CONFIG_BASE_IDX 2
+#define regDP4_DP_VID_STREAM_CNTL 0x25b2
+#define regDP4_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP4_DP_STEER_FIFO 0x25b3
+#define regDP4_DP_STEER_FIFO_BASE_IDX 2
+#define regDP4_DP_MSA_MISC 0x25b4
+#define regDP4_DP_MSA_MISC_BASE_IDX 2
+#define regDP4_DP_DPHY_INTERNAL_CTRL 0x25b5
+#define regDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP4_DP_VID_TIMING 0x25b6
+#define regDP4_DP_VID_TIMING_BASE_IDX 2
+#define regDP4_DP_VID_N 0x25b7
+#define regDP4_DP_VID_N_BASE_IDX 2
+#define regDP4_DP_VID_M 0x25b8
+#define regDP4_DP_VID_M_BASE_IDX 2
+#define regDP4_DP_LINK_FRAMING_CNTL 0x25b9
+#define regDP4_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP4_DP_HBR2_EYE_PATTERN 0x25ba
+#define regDP4_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP4_DP_VID_MSA_VBID 0x25bb
+#define regDP4_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP4_DP_VID_INTERRUPT_CNTL 0x25bc
+#define regDP4_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_CNTL 0x25bd
+#define regDP4_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_TRAINING_PATTERN_SEL 0x25be
+#define regDP4_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP4_DP_DPHY_SYM0 0x25bf
+#define regDP4_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP4_DP_DPHY_SYM1 0x25c0
+#define regDP4_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP4_DP_DPHY_SYM2 0x25c1
+#define regDP4_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP4_DP_DPHY_8B10B_CNTL 0x25c2
+#define regDP4_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_PRBS_CNTL 0x25c3
+#define regDP4_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_SCRAM_CNTL 0x25c4
+#define regDP4_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_EN 0x25c5
+#define regDP4_DP_DPHY_CRC_EN_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_CNTL 0x25c6
+#define regDP4_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_RESULT 0x25c7
+#define regDP4_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_MST_CNTL 0x25c8
+#define regDP4_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_MST_STATUS 0x25c9
+#define regDP4_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define regDP4_DP_DPHY_FAST_TRAINING 0x25ca
+#define regDP4_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP4_DP_DPHY_FAST_TRAINING_STATUS 0x25cb
+#define regDP4_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL 0x25d1
+#define regDP4_DP_SEC_CNTL_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL1 0x25d2
+#define regDP4_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP4_DP_SEC_FRAMING1 0x25d3
+#define regDP4_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP4_DP_SEC_FRAMING2 0x25d4
+#define regDP4_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP4_DP_SEC_FRAMING3 0x25d5
+#define regDP4_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP4_DP_SEC_FRAMING4 0x25d6
+#define regDP4_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP4_DP_SEC_AUD_N 0x25d7
+#define regDP4_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP4_DP_SEC_AUD_N_READBACK 0x25d8
+#define regDP4_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP4_DP_SEC_AUD_M 0x25d9
+#define regDP4_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP4_DP_SEC_AUD_M_READBACK 0x25da
+#define regDP4_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP4_DP_SEC_TIMESTAMP 0x25db
+#define regDP4_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP4_DP_SEC_PACKET_CNTL 0x25dc
+#define regDP4_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP4_DP_MSE_RATE_CNTL 0x25dd
+#define regDP4_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP4_DP_MSE_RATE_UPDATE 0x25df
+#define regDP4_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP4_DP_MSE_SAT0 0x25e0
+#define regDP4_DP_MSE_SAT0_BASE_IDX 2
+#define regDP4_DP_MSE_SAT1 0x25e1
+#define regDP4_DP_MSE_SAT1_BASE_IDX 2
+#define regDP4_DP_MSE_SAT2 0x25e2
+#define regDP4_DP_MSE_SAT2_BASE_IDX 2
+#define regDP4_DP_MSE_SAT_UPDATE 0x25e3
+#define regDP4_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP4_DP_MSE_LINK_TIMING 0x25e4
+#define regDP4_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP4_DP_MSE_MISC_CNTL 0x25e5
+#define regDP4_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x25ea
+#define regDP4_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_HBR2_PATTERN_CONTROL 0x25eb
+#define regDP4_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP4_DP_MSE_SAT0_STATUS 0x25ed
+#define regDP4_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP4_DP_MSE_SAT1_STATUS 0x25ee
+#define regDP4_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP4_DP_MSE_SAT2_STATUS 0x25ef
+#define regDP4_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP4_DP_DPIA_SPARE 0x25f0
+#define regDP4_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP4_DP_MSA_TIMING_PARAM1 0x25f2
+#define regDP4_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP4_DP_MSA_TIMING_PARAM2 0x25f3
+#define regDP4_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP4_DP_MSA_TIMING_PARAM3 0x25f4
+#define regDP4_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP4_DP_MSA_TIMING_PARAM4 0x25f5
+#define regDP4_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP4_DP_MSO_CNTL 0x25f6
+#define regDP4_DP_MSO_CNTL_BASE_IDX 2
+#define regDP4_DP_MSO_CNTL1 0x25f7
+#define regDP4_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP4_DP_DSC_CNTL 0x25f8
+#define regDP4_DP_DSC_CNTL_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL2 0x25f9
+#define regDP4_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL3 0x25fa
+#define regDP4_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL4 0x25fb
+#define regDP4_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL5 0x25fc
+#define regDP4_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL6 0x25fd
+#define regDP4_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL7 0x25fe
+#define regDP4_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP4_DP_DB_CNTL 0x25ff
+#define regDP4_DP_DB_CNTL_BASE_IDX 2
+#define regDP4_DP_MSA_VBID_MISC 0x2600
+#define regDP4_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP4_DP_SEC_METADATA_TRANSMISSION 0x2601
+#define regDP4_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP4_DP_ALPM_CNTL 0x2603
+#define regDP4_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP8_CNTL 0x2604
+#define regDP4_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP9_CNTL 0x2605
+#define regDP4_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP10_CNTL 0x2606
+#define regDP4_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP11_CNTL 0x2607
+#define regDP4_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP_EN_DB_STATUS 0x2608
+#define regDP4_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL1 0x2609
+#define regDP4_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL2 0x260a
+#define regDP4_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL3 0x260b
+#define regDP4_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL4 0x260c
+#define regDP4_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL5 0x260d
+#define regDP4_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP4_DP_STREAM_SYMBOL_COUNT_STATUS 0x260e
+#define regDP4_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP4_DP_STREAM_SYMBOL_COUNT_CONTROL 0x260f
+#define regDP4_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP4_DP_LINK_SYMBOL_COUNT_STATUS0 0x2610
+#define regDP4_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP4_DP_LINK_SYMBOL_COUNT_STATUS1 0x2611
+#define regDP4_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP4_DP_LINK_SYMBOL_COUNT_CONTROL 0x2612
+#define regDP4_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_dispdec
+// base address: 0x0
+#define regDC_GENERICA 0x2868
+#define regDC_GENERICA_BASE_IDX 2
+#define regDC_GENERICB 0x2869
+#define regDC_GENERICB_BASE_IDX 2
+#define regDCIO_CLOCK_CNTL 0x286a
+#define regDCIO_CLOCK_CNTL_BASE_IDX 2
+#define regDC_REF_CLK_CNTL 0x286b
+#define regDC_REF_CLK_CNTL_BASE_IDX 2
+#define regUNIPHYA_LINK_CNTL 0x286d
+#define regUNIPHYA_LINK_CNTL_BASE_IDX 2
+#define regUNIPHYA_CHANNEL_XBAR_CNTL 0x286e
+#define regUNIPHYA_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regUNIPHYB_LINK_CNTL 0x286f
+#define regUNIPHYB_LINK_CNTL_BASE_IDX 2
+#define regUNIPHYB_CHANNEL_XBAR_CNTL 0x2870
+#define regUNIPHYB_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regUNIPHYC_LINK_CNTL 0x2871
+#define regUNIPHYC_LINK_CNTL_BASE_IDX 2
+#define regUNIPHYC_CHANNEL_XBAR_CNTL 0x2872
+#define regUNIPHYC_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regUNIPHYD_CHANNEL_XBAR_CNTL 0x2874
+#define regUNIPHYD_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regUNIPHYE_CHANNEL_XBAR_CNTL 0x2876
+#define regUNIPHYE_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regDCIO_WRCMD_DELAY 0x287e
+#define regDCIO_WRCMD_DELAY_BASE_IDX 2
+#define regDC_PINSTRAPS 0x2880
+#define regDC_PINSTRAPS_BASE_IDX 2
+#define regDCIO_SPARE 0x2882
+#define regDCIO_SPARE_BASE_IDX 2
+#define regINTERCEPT_STATE 0x2884
+#define regINTERCEPT_STATE_BASE_IDX 2
+#define regDCIO_PATTERN_GEN_PAT 0x2886
+#define regDCIO_PATTERN_GEN_PAT_BASE_IDX 2
+#define regDCIO_PATTERN_GEN_EN 0x2887
+#define regDCIO_PATTERN_GEN_EN_BASE_IDX 2
+#define regDCIO_BL_PWM_FRAME_START_DISP_SEL 0x288b
+#define regDCIO_BL_PWM_FRAME_START_DISP_SEL_BASE_IDX 2
+#define regDCIO_GSL_GENLK_PAD_CNTL 0x288c
+#define regDCIO_GSL_GENLK_PAD_CNTL_BASE_IDX 2
+#define regDCIO_GSL_SWAPLOCK_PAD_CNTL 0x288d
+#define regDCIO_GSL_SWAPLOCK_PAD_CNTL_BASE_IDX 2
+#define regDCIO_SOFT_RESET 0x289e
+#define regDCIO_SOFT_RESET_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_chip_dispdec
+// base address: 0x0
+#define regDC_GPIO_GENERIC_MASK 0x28c8
+#define regDC_GPIO_GENERIC_MASK_BASE_IDX 2
+#define regDC_GPIO_GENERIC_A 0x28c9
+#define regDC_GPIO_GENERIC_A_BASE_IDX 2
+#define regDC_GPIO_GENERIC_EN 0x28ca
+#define regDC_GPIO_GENERIC_EN_BASE_IDX 2
+#define regDC_GPIO_GENERIC_Y 0x28cb
+#define regDC_GPIO_GENERIC_Y_BASE_IDX 2
+#define regDC_GPIO_DDC1_MASK 0x28d0
+#define regDC_GPIO_DDC1_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC1_A 0x28d1
+#define regDC_GPIO_DDC1_A_BASE_IDX 2
+#define regDC_GPIO_DDC1_EN 0x28d2
+#define regDC_GPIO_DDC1_EN_BASE_IDX 2
+#define regDC_GPIO_DDC1_Y 0x28d3
+#define regDC_GPIO_DDC1_Y_BASE_IDX 2
+#define regDC_GPIO_DDC2_MASK 0x28d4
+#define regDC_GPIO_DDC2_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC2_A 0x28d5
+#define regDC_GPIO_DDC2_A_BASE_IDX 2
+#define regDC_GPIO_DDC2_EN 0x28d6
+#define regDC_GPIO_DDC2_EN_BASE_IDX 2
+#define regDC_GPIO_DDC2_Y 0x28d7
+#define regDC_GPIO_DDC2_Y_BASE_IDX 2
+#define regDC_GPIO_DDC3_MASK 0x28d8
+#define regDC_GPIO_DDC3_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC3_A 0x28d9
+#define regDC_GPIO_DDC3_A_BASE_IDX 2
+#define regDC_GPIO_DDC3_EN 0x28da
+#define regDC_GPIO_DDC3_EN_BASE_IDX 2
+#define regDC_GPIO_DDC3_Y 0x28db
+#define regDC_GPIO_DDC3_Y_BASE_IDX 2
+#define regDC_GPIO_DDC4_MASK 0x28dc
+#define regDC_GPIO_DDC4_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC4_A 0x28dd
+#define regDC_GPIO_DDC4_A_BASE_IDX 2
+#define regDC_GPIO_DDC4_EN 0x28de
+#define regDC_GPIO_DDC4_EN_BASE_IDX 2
+#define regDC_GPIO_DDC4_Y 0x28df
+#define regDC_GPIO_DDC4_Y_BASE_IDX 2
+#define regDC_GPIO_DDC5_MASK 0x28e0
+#define regDC_GPIO_DDC5_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC5_A 0x28e1
+#define regDC_GPIO_DDC5_A_BASE_IDX 2
+#define regDC_GPIO_DDC5_EN 0x28e2
+#define regDC_GPIO_DDC5_EN_BASE_IDX 2
+#define regDC_GPIO_DDC5_Y 0x28e3
+#define regDC_GPIO_DDC5_Y_BASE_IDX 2
+#define regDC_GPIO_DDCVGA_MASK 0x28e8
+#define regDC_GPIO_DDCVGA_MASK_BASE_IDX 2
+#define regDC_GPIO_DDCVGA_A 0x28e9
+#define regDC_GPIO_DDCVGA_A_BASE_IDX 2
+#define regDC_GPIO_DDCVGA_EN 0x28ea
+#define regDC_GPIO_DDCVGA_EN_BASE_IDX 2
+#define regDC_GPIO_DDCVGA_Y 0x28eb
+#define regDC_GPIO_DDCVGA_Y_BASE_IDX 2
+#define regDC_GPIO_GENLK_MASK 0x28f0
+#define regDC_GPIO_GENLK_MASK_BASE_IDX 2
+#define regDC_GPIO_GENLK_A 0x28f1
+#define regDC_GPIO_GENLK_A_BASE_IDX 2
+#define regDC_GPIO_GENLK_EN 0x28f2
+#define regDC_GPIO_GENLK_EN_BASE_IDX 2
+#define regDC_GPIO_GENLK_Y 0x28f3
+#define regDC_GPIO_GENLK_Y_BASE_IDX 2
+#define regDC_GPIO_HPD_MASK 0x28f4
+#define regDC_GPIO_HPD_MASK_BASE_IDX 2
+#define regDC_GPIO_HPD_A 0x28f5
+#define regDC_GPIO_HPD_A_BASE_IDX 2
+#define regDC_GPIO_HPD_EN 0x28f6
+#define regDC_GPIO_HPD_EN_BASE_IDX 2
+#define regDC_GPIO_HPD_Y 0x28f7
+#define regDC_GPIO_HPD_Y_BASE_IDX 2
+#define regDC_GPIO_DRIVE_STRENGTH_S0 0x28f8
+#define regDC_GPIO_DRIVE_STRENGTH_S0_BASE_IDX 2
+#define regDC_GPIO_DRIVE_STRENGTH_S1 0x28f9
+#define regDC_GPIO_DRIVE_STRENGTH_S1_BASE_IDX 2
+#define regDC_GPIO_PWRSEQ0_EN 0x28fa
+#define regDC_GPIO_PWRSEQ0_EN_BASE_IDX 2
+#define regDC_GPIO_PAD_STRENGTH_1 0x28fc
+#define regDC_GPIO_PAD_STRENGTH_1_BASE_IDX 2
+#define regDC_GPIO_PAD_STRENGTH_2 0x28fd
+#define regDC_GPIO_PAD_STRENGTH_2_BASE_IDX 2
+#define regPHY_AUX_CNTL 0x28ff
+#define regPHY_AUX_CNTL_BASE_IDX 2
+#define regDC_GPIO_DRIVE_TXIMPSEL 0x2900
+#define regDC_GPIO_DRIVE_TXIMPSEL_BASE_IDX 2
+#define regDC_GPIO_PWRSEQ1_EN 0x2902
+#define regDC_GPIO_PWRSEQ1_EN_BASE_IDX 2
+#define regDC_GPIO_TX12_EN 0x2915
+#define regDC_GPIO_TX12_EN_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_0 0x2916
+#define regDC_GPIO_AUX_CTRL_0_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_1 0x2917
+#define regDC_GPIO_AUX_CTRL_1_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_2 0x2918
+#define regDC_GPIO_AUX_CTRL_2_BASE_IDX 2
+#define regDC_GPIO_RXEN 0x2919
+#define regDC_GPIO_RXEN_BASE_IDX 2
+#define regDC_GPIO_PULLUPEN 0x291a
+#define regDC_GPIO_PULLUPEN_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_3 0x291b
+#define regDC_GPIO_AUX_CTRL_3_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_4 0x291c
+#define regDC_GPIO_AUX_CTRL_4_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_5 0x291d
+#define regDC_GPIO_AUX_CTRL_5_BASE_IDX 2
+#define regAUXI2C_PAD_ALL_PWR_OK 0x291e
+#define regAUXI2C_PAD_ALL_PWR_OK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy0_dispdec
+// base address: 0x0
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy1_dispdec
+// base address: 0x360
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0 0x2a00
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1 0x2a01
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2 0x2a02
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3 0x2a03
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4 0x2a04
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5 0x2a05
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6 0x2a06
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7 0x2a07
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8 0x2a08
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9 0x2a09
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10 0x2a0a
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11 0x2a0b
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12 0x2a0c
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13 0x2a0d
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14 0x2a0e
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15 0x2a0f
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16 0x2a10
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17 0x2a11
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18 0x2a12
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19 0x2a13
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20 0x2a14
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21 0x2a15
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22 0x2a16
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23 0x2a17
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24 0x2a18
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25 0x2a19
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26 0x2a1a
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27 0x2a1b
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28 0x2a1c
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29 0x2a1d
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30 0x2a1e
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31 0x2a1f
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32 0x2a20
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33 0x2a21
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34 0x2a22
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35 0x2a23
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36 0x2a24
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37 0x2a25
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38 0x2a26
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39 0x2a27
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40 0x2a28
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41 0x2a29
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42 0x2a2a
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43 0x2a2b
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44 0x2a2c
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45 0x2a2d
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46 0x2a2e
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47 0x2a2f
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48 0x2a30
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49 0x2a31
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50 0x2a32
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51 0x2a33
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52 0x2a34
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53 0x2a35
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54 0x2a36
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55 0x2a37
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56 0x2a38
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57 0x2a39
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy2_dispdec
+// base address: 0x6c0
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0 0x2ad8
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1 0x2ad9
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2 0x2ada
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3 0x2adb
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4 0x2adc
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5 0x2add
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6 0x2ade
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7 0x2adf
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8 0x2ae0
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9 0x2ae1
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10 0x2ae2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11 0x2ae3
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12 0x2ae4
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13 0x2ae5
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14 0x2ae6
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15 0x2ae7
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16 0x2ae8
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17 0x2ae9
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18 0x2aea
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19 0x2aeb
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20 0x2aec
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21 0x2aed
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22 0x2aee
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23 0x2aef
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24 0x2af0
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25 0x2af1
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26 0x2af2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27 0x2af3
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28 0x2af4
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29 0x2af5
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30 0x2af6
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31 0x2af7
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32 0x2af8
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33 0x2af9
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34 0x2afa
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35 0x2afb
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36 0x2afc
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37 0x2afd
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38 0x2afe
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39 0x2aff
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40 0x2b00
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41 0x2b01
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42 0x2b02
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43 0x2b03
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44 0x2b04
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45 0x2b05
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46 0x2b06
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47 0x2b07
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48 0x2b08
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49 0x2b09
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50 0x2b0a
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51 0x2b0b
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52 0x2b0c
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53 0x2b0d
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54 0x2b0e
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55 0x2b0f
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56 0x2b10
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57 0x2b11
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy3_dispdec
+// base address: 0xa20
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0 0x2bb0
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1 0x2bb1
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2 0x2bb2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3 0x2bb3
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4 0x2bb4
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5 0x2bb5
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6 0x2bb6
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7 0x2bb7
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8 0x2bb8
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9 0x2bb9
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10 0x2bba
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11 0x2bbb
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12 0x2bbc
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13 0x2bbd
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14 0x2bbe
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15 0x2bbf
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16 0x2bc0
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17 0x2bc1
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18 0x2bc2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19 0x2bc3
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20 0x2bc4
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21 0x2bc5
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22 0x2bc6
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23 0x2bc7
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24 0x2bc8
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25 0x2bc9
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26 0x2bca
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27 0x2bcb
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28 0x2bcc
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29 0x2bcd
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30 0x2bce
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31 0x2bcf
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32 0x2bd0
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33 0x2bd1
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34 0x2bd2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35 0x2bd3
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36 0x2bd4
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37 0x2bd5
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38 0x2bd6
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39 0x2bd7
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40 0x2bd8
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41 0x2bd9
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42 0x2bda
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43 0x2bdb
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44 0x2bdc
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45 0x2bdd
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46 0x2bde
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47 0x2bdf
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48 0x2be0
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49 0x2be1
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50 0x2be2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51 0x2be3
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52 0x2be4
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53 0x2be5
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54 0x2be6
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55 0x2be7
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56 0x2be8
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57 0x2be9
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy4_dispdec
+// base address: 0xd80
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0 0x2c88
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1 0x2c89
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2 0x2c8a
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3 0x2c8b
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4 0x2c8c
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5 0x2c8d
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6 0x2c8e
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7 0x2c8f
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8 0x2c90
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9 0x2c91
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10 0x2c92
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11 0x2c93
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12 0x2c94
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13 0x2c95
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14 0x2c96
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15 0x2c97
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16 0x2c98
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17 0x2c99
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18 0x2c9a
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19 0x2c9b
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20 0x2c9c
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21 0x2c9d
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22 0x2c9e
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23 0x2c9f
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24 0x2ca0
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25 0x2ca1
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26 0x2ca2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27 0x2ca3
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28 0x2ca4
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29 0x2ca5
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30 0x2ca6
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31 0x2ca7
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32 0x2ca8
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33 0x2ca9
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34 0x2caa
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35 0x2cab
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36 0x2cac
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37 0x2cad
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38 0x2cae
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39 0x2caf
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40 0x2cb0
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41 0x2cb1
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42 0x2cb2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43 0x2cb3
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44 0x2cb4
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45 0x2cb5
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46 0x2cb6
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47 0x2cb7
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48 0x2cb8
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49 0x2cb9
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50 0x2cba
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51 0x2cbb
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52 0x2cbc
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53 0x2cbd
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54 0x2cbe
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55 0x2cbf
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56 0x2cc0
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57 0x2cc1
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57_BASE_IDX 2
+
+
+// addressBlock: dce_dc_pwrseq0_dispdec_pwrseq_dispdec
+// base address: 0x0
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_EN 0x2f10
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_EN_BASE_IDX 2
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_CTRL 0x2f11
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_CTRL_BASE_IDX 2
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_MASK 0x2f12
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_MASK_BASE_IDX 2
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_A_Y 0x2f13
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_A_Y_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_CNTL 0x2f14
+#define regPWRSEQ0_PANEL_PWRSEQ_CNTL_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_STATE 0x2f15
+#define regPWRSEQ0_PANEL_PWRSEQ_STATE_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_DELAY1 0x2f16
+#define regPWRSEQ0_PANEL_PWRSEQ_DELAY1_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_DELAY2 0x2f17
+#define regPWRSEQ0_PANEL_PWRSEQ_DELAY2_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_REF_DIV1 0x2f18
+#define regPWRSEQ0_PANEL_PWRSEQ_REF_DIV1_BASE_IDX 2
+#define regPWRSEQ0_BL_PWM_CNTL 0x2f19
+#define regPWRSEQ0_BL_PWM_CNTL_BASE_IDX 2
+#define regPWRSEQ0_BL_PWM_CNTL2 0x2f1a
+#define regPWRSEQ0_BL_PWM_CNTL2_BASE_IDX 2
+#define regPWRSEQ0_BL_PWM_PERIOD_CNTL 0x2f1b
+#define regPWRSEQ0_BL_PWM_PERIOD_CNTL_BASE_IDX 2
+#define regPWRSEQ0_BL_PWM_GRP1_REG_LOCK 0x2f1c
+#define regPWRSEQ0_BL_PWM_GRP1_REG_LOCK_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_REF_DIV2 0x2f1d
+#define regPWRSEQ0_PANEL_PWRSEQ_REF_DIV2_BASE_IDX 2
+#define regPWRSEQ0_PWRSEQ_SPARE 0x2f21
+#define regPWRSEQ0_PWRSEQ_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_pwrseq1_dispdec_pwrseq_dispdec
+// base address: 0x1b0
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_EN 0x2f7c
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_EN_BASE_IDX 2
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_CTRL 0x2f7d
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_CTRL_BASE_IDX 2
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_MASK 0x2f7e
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_MASK_BASE_IDX 2
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_A_Y 0x2f7f
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_A_Y_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_CNTL 0x2f80
+#define regPWRSEQ1_PANEL_PWRSEQ_CNTL_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_STATE 0x2f81
+#define regPWRSEQ1_PANEL_PWRSEQ_STATE_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_DELAY1 0x2f82
+#define regPWRSEQ1_PANEL_PWRSEQ_DELAY1_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_DELAY2 0x2f83
+#define regPWRSEQ1_PANEL_PWRSEQ_DELAY2_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_REF_DIV1 0x2f84
+#define regPWRSEQ1_PANEL_PWRSEQ_REF_DIV1_BASE_IDX 2
+#define regPWRSEQ1_BL_PWM_CNTL 0x2f85
+#define regPWRSEQ1_BL_PWM_CNTL_BASE_IDX 2
+#define regPWRSEQ1_BL_PWM_CNTL2 0x2f86
+#define regPWRSEQ1_BL_PWM_CNTL2_BASE_IDX 2
+#define regPWRSEQ1_BL_PWM_PERIOD_CNTL 0x2f87
+#define regPWRSEQ1_BL_PWM_PERIOD_CNTL_BASE_IDX 2
+#define regPWRSEQ1_BL_PWM_GRP1_REG_LOCK 0x2f88
+#define regPWRSEQ1_BL_PWM_GRP1_REG_LOCK_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_REF_DIV2 0x2f89
+#define regPWRSEQ1_PANEL_PWRSEQ_REF_DIV2_BASE_IDX 2
+#define regPWRSEQ1_PWRSEQ_SPARE 0x2f8d
+#define regPWRSEQ1_PWRSEQ_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_top_dispdec
+// base address: 0x0
+#define regDSC_TOP0_DSC_TOP_CONTROL 0x3000
+#define regDSC_TOP0_DSC_TOP_CONTROL_BASE_IDX 2
+#define regDSC_TOP0_DSC_DEBUG_CONTROL 0x3001
+#define regDSC_TOP0_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
+// base address: 0x0
+#define regDSCCIF0_DSCCIF_CONFIG0 0x3005
+#define regDSCCIF0_DSCCIF_CONFIG0_BASE_IDX 2
+#define regDSCCIF0_DSCCIF_CONFIG1 0x3006
+#define regDSCCIF0_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dscc_dispdec
+// base address: 0x0
+#define regDSCC0_DSCC_CONFIG0 0x300a
+#define regDSCC0_DSCC_CONFIG0_BASE_IDX 2
+#define regDSCC0_DSCC_CONFIG1 0x300b
+#define regDSCC0_DSCC_CONFIG1_BASE_IDX 2
+#define regDSCC0_DSCC_STATUS 0x300c
+#define regDSCC0_DSCC_STATUS_BASE_IDX 2
+#define regDSCC0_DSCC_INTERRUPT_CONTROL_STATUS 0x300d
+#define regDSCC0_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG0 0x300e
+#define regDSCC0_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG1 0x300f
+#define regDSCC0_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG2 0x3010
+#define regDSCC0_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG3 0x3011
+#define regDSCC0_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG4 0x3012
+#define regDSCC0_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG5 0x3013
+#define regDSCC0_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG6 0x3014
+#define regDSCC0_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG7 0x3015
+#define regDSCC0_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG8 0x3016
+#define regDSCC0_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG9 0x3017
+#define regDSCC0_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG10 0x3018
+#define regDSCC0_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG11 0x3019
+#define regDSCC0_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG12 0x301a
+#define regDSCC0_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG13 0x301b
+#define regDSCC0_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG14 0x301c
+#define regDSCC0_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG15 0x301d
+#define regDSCC0_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG16 0x301e
+#define regDSCC0_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG17 0x301f
+#define regDSCC0_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG18 0x3020
+#define regDSCC0_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG19 0x3021
+#define regDSCC0_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG20 0x3022
+#define regDSCC0_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG21 0x3023
+#define regDSCC0_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG22 0x3024
+#define regDSCC0_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define regDSCC0_DSCC_MEM_POWER_CONTROL 0x3025
+#define regDSCC0_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER 0x3026
+#define regDSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER 0x3027
+#define regDSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER 0x3028
+#define regDSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER 0x3029
+#define regDSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER 0x302a
+#define regDSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER 0x302b
+#define regDSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC0_DSCC_MAX_ABS_ERROR0 0x302c
+#define regDSCC0_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define regDSCC0_DSCC_MAX_ABS_ERROR1 0x302d
+#define regDSCC0_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x302e
+#define regDSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x302f
+#define regDSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x3030
+#define regDSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x3031
+#define regDSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x3032
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x3033
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x3034
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc140
+#define regDC_PERFMON19_PERFCOUNTER_CNTL 0x3050
+#define regDC_PERFMON19_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON19_PERFCOUNTER_CNTL2 0x3051
+#define regDC_PERFMON19_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON19_PERFCOUNTER_STATE 0x3052
+#define regDC_PERFMON19_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_CNTL 0x3053
+#define regDC_PERFMON19_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_CNTL2 0x3054
+#define regDC_PERFMON19_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_CVALUE_INT_MISC 0x3055
+#define regDC_PERFMON19_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_CVALUE_LOW 0x3056
+#define regDC_PERFMON19_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_HI 0x3057
+#define regDC_PERFMON19_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_LOW 0x3058
+#define regDC_PERFMON19_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_top_dispdec
+// base address: 0x170
+#define regDSC_TOP1_DSC_TOP_CONTROL 0x305c
+#define regDSC_TOP1_DSC_TOP_CONTROL_BASE_IDX 2
+#define regDSC_TOP1_DSC_DEBUG_CONTROL 0x305d
+#define regDSC_TOP1_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+// addressBlock: dce_dc_dsc1_dispdec_dsccif_dispdec
+// base address: 0x170
+#define regDSCCIF1_DSCCIF_CONFIG0 0x3061
+#define regDSCCIF1_DSCCIF_CONFIG0_BASE_IDX 2
+#define regDSCCIF1_DSCCIF_CONFIG1 0x3062
+#define regDSCCIF1_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dscc_dispdec
+// base address: 0x170
+#define regDSCC1_DSCC_CONFIG0 0x3066
+#define regDSCC1_DSCC_CONFIG0_BASE_IDX 2
+#define regDSCC1_DSCC_CONFIG1 0x3067
+#define regDSCC1_DSCC_CONFIG1_BASE_IDX 2
+#define regDSCC1_DSCC_STATUS 0x3068
+#define regDSCC1_DSCC_STATUS_BASE_IDX 2
+#define regDSCC1_DSCC_INTERRUPT_CONTROL_STATUS 0x3069
+#define regDSCC1_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG0 0x306a
+#define regDSCC1_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG1 0x306b
+#define regDSCC1_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG2 0x306c
+#define regDSCC1_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG3 0x306d
+#define regDSCC1_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG4 0x306e
+#define regDSCC1_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG5 0x306f
+#define regDSCC1_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG6 0x3070
+#define regDSCC1_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG7 0x3071
+#define regDSCC1_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG8 0x3072
+#define regDSCC1_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG9 0x3073
+#define regDSCC1_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG10 0x3074
+#define regDSCC1_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG11 0x3075
+#define regDSCC1_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG12 0x3076
+#define regDSCC1_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG13 0x3077
+#define regDSCC1_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG14 0x3078
+#define regDSCC1_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG15 0x3079
+#define regDSCC1_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG16 0x307a
+#define regDSCC1_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG17 0x307b
+#define regDSCC1_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG18 0x307c
+#define regDSCC1_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG19 0x307d
+#define regDSCC1_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG20 0x307e
+#define regDSCC1_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG21 0x307f
+#define regDSCC1_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG22 0x3080
+#define regDSCC1_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define regDSCC1_DSCC_MEM_POWER_CONTROL 0x3081
+#define regDSCC1_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER 0x3082
+#define regDSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER 0x3083
+#define regDSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER 0x3084
+#define regDSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER 0x3085
+#define regDSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER 0x3086
+#define regDSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER 0x3087
+#define regDSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC1_DSCC_MAX_ABS_ERROR0 0x3088
+#define regDSCC1_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define regDSCC1_DSCC_MAX_ABS_ERROR1 0x3089
+#define regDSCC1_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x308a
+#define regDSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x308b
+#define regDSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x308c
+#define regDSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x308d
+#define regDSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x308e
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x308f
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x3090
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc2b0
+#define regDC_PERFMON20_PERFCOUNTER_CNTL 0x30ac
+#define regDC_PERFMON20_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON20_PERFCOUNTER_CNTL2 0x30ad
+#define regDC_PERFMON20_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON20_PERFCOUNTER_STATE 0x30ae
+#define regDC_PERFMON20_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_CNTL 0x30af
+#define regDC_PERFMON20_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_CNTL2 0x30b0
+#define regDC_PERFMON20_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_CVALUE_INT_MISC 0x30b1
+#define regDC_PERFMON20_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_CVALUE_LOW 0x30b2
+#define regDC_PERFMON20_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_HI 0x30b3
+#define regDC_PERFMON20_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_LOW 0x30b4
+#define regDC_PERFMON20_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_top_dispdec
+// base address: 0x2e0
+#define regDSC_TOP2_DSC_TOP_CONTROL 0x30b8
+#define regDSC_TOP2_DSC_TOP_CONTROL_BASE_IDX 2
+#define regDSC_TOP2_DSC_DEBUG_CONTROL 0x30b9
+#define regDSC_TOP2_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsccif_dispdec
+// base address: 0x2e0
+#define regDSCCIF2_DSCCIF_CONFIG0 0x30bd
+#define regDSCCIF2_DSCCIF_CONFIG0_BASE_IDX 2
+#define regDSCCIF2_DSCCIF_CONFIG1 0x30be
+#define regDSCCIF2_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dscc_dispdec
+// base address: 0x2e0
+#define regDSCC2_DSCC_CONFIG0 0x30c2
+#define regDSCC2_DSCC_CONFIG0_BASE_IDX 2
+#define regDSCC2_DSCC_CONFIG1 0x30c3
+#define regDSCC2_DSCC_CONFIG1_BASE_IDX 2
+#define regDSCC2_DSCC_STATUS 0x30c4
+#define regDSCC2_DSCC_STATUS_BASE_IDX 2
+#define regDSCC2_DSCC_INTERRUPT_CONTROL_STATUS 0x30c5
+#define regDSCC2_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG0 0x30c6
+#define regDSCC2_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG1 0x30c7
+#define regDSCC2_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG2 0x30c8
+#define regDSCC2_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG3 0x30c9
+#define regDSCC2_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG4 0x30ca
+#define regDSCC2_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG5 0x30cb
+#define regDSCC2_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG6 0x30cc
+#define regDSCC2_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG7 0x30cd
+#define regDSCC2_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG8 0x30ce
+#define regDSCC2_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG9 0x30cf
+#define regDSCC2_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG10 0x30d0
+#define regDSCC2_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG11 0x30d1
+#define regDSCC2_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG12 0x30d2
+#define regDSCC2_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG13 0x30d3
+#define regDSCC2_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG14 0x30d4
+#define regDSCC2_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG15 0x30d5
+#define regDSCC2_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG16 0x30d6
+#define regDSCC2_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG17 0x30d7
+#define regDSCC2_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG18 0x30d8
+#define regDSCC2_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG19 0x30d9
+#define regDSCC2_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG20 0x30da
+#define regDSCC2_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG21 0x30db
+#define regDSCC2_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG22 0x30dc
+#define regDSCC2_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define regDSCC2_DSCC_MEM_POWER_CONTROL 0x30dd
+#define regDSCC2_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER 0x30de
+#define regDSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER 0x30df
+#define regDSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER 0x30e0
+#define regDSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER 0x30e1
+#define regDSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER 0x30e2
+#define regDSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER 0x30e3
+#define regDSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC2_DSCC_MAX_ABS_ERROR0 0x30e4
+#define regDSCC2_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define regDSCC2_DSCC_MAX_ABS_ERROR1 0x30e5
+#define regDSCC2_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x30e6
+#define regDSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x30e7
+#define regDSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x30e8
+#define regDSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x30e9
+#define regDSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x30ea
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x30eb
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x30ec
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc420
+#define regDC_PERFMON21_PERFCOUNTER_CNTL 0x3108
+#define regDC_PERFMON21_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON21_PERFCOUNTER_CNTL2 0x3109
+#define regDC_PERFMON21_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON21_PERFCOUNTER_STATE 0x310a
+#define regDC_PERFMON21_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_CNTL 0x310b
+#define regDC_PERFMON21_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_CNTL2 0x310c
+#define regDC_PERFMON21_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_CVALUE_INT_MISC 0x310d
+#define regDC_PERFMON21_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_CVALUE_LOW 0x310e
+#define regDC_PERFMON21_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_HI 0x310f
+#define regDC_PERFMON21_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_LOW 0x3110
+#define regDC_PERFMON21_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_top_dispdec
+// base address: 0x450
+#define regDSC_TOP3_DSC_TOP_CONTROL 0x3114
+#define regDSC_TOP3_DSC_TOP_CONTROL_BASE_IDX 2
+#define regDSC_TOP3_DSC_DEBUG_CONTROL 0x3115
+#define regDSC_TOP3_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsccif_dispdec
+// base address: 0x450
+#define regDSCCIF3_DSCCIF_CONFIG0 0x3119
+#define regDSCCIF3_DSCCIF_CONFIG0_BASE_IDX 2
+#define regDSCCIF3_DSCCIF_CONFIG1 0x311a
+#define regDSCCIF3_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dscc_dispdec
+// base address: 0x450
+#define regDSCC3_DSCC_CONFIG0 0x311e
+#define regDSCC3_DSCC_CONFIG0_BASE_IDX 2
+#define regDSCC3_DSCC_CONFIG1 0x311f
+#define regDSCC3_DSCC_CONFIG1_BASE_IDX 2
+#define regDSCC3_DSCC_STATUS 0x3120
+#define regDSCC3_DSCC_STATUS_BASE_IDX 2
+#define regDSCC3_DSCC_INTERRUPT_CONTROL_STATUS 0x3121
+#define regDSCC3_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG0 0x3122
+#define regDSCC3_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG1 0x3123
+#define regDSCC3_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG2 0x3124
+#define regDSCC3_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG3 0x3125
+#define regDSCC3_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG4 0x3126
+#define regDSCC3_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG5 0x3127
+#define regDSCC3_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG6 0x3128
+#define regDSCC3_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG7 0x3129
+#define regDSCC3_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG8 0x312a
+#define regDSCC3_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG9 0x312b
+#define regDSCC3_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG10 0x312c
+#define regDSCC3_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG11 0x312d
+#define regDSCC3_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG12 0x312e
+#define regDSCC3_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG13 0x312f
+#define regDSCC3_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG14 0x3130
+#define regDSCC3_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG15 0x3131
+#define regDSCC3_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG16 0x3132
+#define regDSCC3_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG17 0x3133
+#define regDSCC3_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG18 0x3134
+#define regDSCC3_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG19 0x3135
+#define regDSCC3_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG20 0x3136
+#define regDSCC3_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG21 0x3137
+#define regDSCC3_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG22 0x3138
+#define regDSCC3_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define regDSCC3_DSCC_MEM_POWER_CONTROL 0x3139
+#define regDSCC3_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER 0x313a
+#define regDSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER 0x313b
+#define regDSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER 0x313c
+#define regDSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER 0x313d
+#define regDSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER 0x313e
+#define regDSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER 0x313f
+#define regDSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC3_DSCC_MAX_ABS_ERROR0 0x3140
+#define regDSCC3_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define regDSCC3_DSCC_MAX_ABS_ERROR1 0x3141
+#define regDSCC3_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x3142
+#define regDSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x3143
+#define regDSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x3144
+#define regDSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x3145
+#define regDSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x3146
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x3147
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x3148
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc590
+#define regDC_PERFMON22_PERFCOUNTER_CNTL 0x3164
+#define regDC_PERFMON22_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON22_PERFCOUNTER_CNTL2 0x3165
+#define regDC_PERFMON22_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON22_PERFCOUNTER_STATE 0x3166
+#define regDC_PERFMON22_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_CNTL 0x3167
+#define regDC_PERFMON22_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_CNTL2 0x3168
+#define regDC_PERFMON22_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_CVALUE_INT_MISC 0x3169
+#define regDC_PERFMON22_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_CVALUE_LOW 0x316a
+#define regDC_PERFMON22_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_HI 0x316b
+#define regDC_PERFMON22_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_LOW 0x316c
+#define regDC_PERFMON22_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_wb0_dispdec_dwb_top_dispdec
+// base address: 0x0
+#define regDWB_ENABLE_CLK_CTRL 0x3228
+#define regDWB_ENABLE_CLK_CTRL_BASE_IDX 2
+#define regDWB_MEM_PWR_CTRL 0x3229
+#define regDWB_MEM_PWR_CTRL_BASE_IDX 2
+#define regFC_MODE_CTRL 0x322a
+#define regFC_MODE_CTRL_BASE_IDX 2
+#define regFC_FLOW_CTRL 0x322b
+#define regFC_FLOW_CTRL_BASE_IDX 2
+#define regFC_WINDOW_START 0x322c
+#define regFC_WINDOW_START_BASE_IDX 2
+#define regFC_WINDOW_SIZE 0x322d
+#define regFC_WINDOW_SIZE_BASE_IDX 2
+#define regFC_SOURCE_SIZE 0x322e
+#define regFC_SOURCE_SIZE_BASE_IDX 2
+#define regDWB_UPDATE_CTRL 0x322f
+#define regDWB_UPDATE_CTRL_BASE_IDX 2
+#define regDWB_CRC_CTRL 0x3230
+#define regDWB_CRC_CTRL_BASE_IDX 2
+#define regDWB_CRC_MASK_R_G 0x3231
+#define regDWB_CRC_MASK_R_G_BASE_IDX 2
+#define regDWB_CRC_MASK_B_A 0x3232
+#define regDWB_CRC_MASK_B_A_BASE_IDX 2
+#define regDWB_CRC_VAL_R_G 0x3233
+#define regDWB_CRC_VAL_R_G_BASE_IDX 2
+#define regDWB_CRC_VAL_B_A 0x3234
+#define regDWB_CRC_VAL_B_A_BASE_IDX 2
+#define regDWB_OUT_CTRL 0x3235
+#define regDWB_OUT_CTRL_BASE_IDX 2
+#define regDWB_MMHUBBUB_BACKPRESSURE_CNT_EN 0x3236
+#define regDWB_MMHUBBUB_BACKPRESSURE_CNT_EN_BASE_IDX 2
+#define regDWB_MMHUBBUB_BACKPRESSURE_CNT 0x3237
+#define regDWB_MMHUBBUB_BACKPRESSURE_CNT_BASE_IDX 2
+#define regDWB_HOST_READ_CONTROL 0x3238
+#define regDWB_HOST_READ_CONTROL_BASE_IDX 2
+#define regDWB_OVERFLOW_STATUS 0x3239
+#define regDWB_OVERFLOW_STATUS_BASE_IDX 2
+#define regDWB_OVERFLOW_COUNTER 0x323a
+#define regDWB_OVERFLOW_COUNTER_BASE_IDX 2
+#define regDWB_SOFT_RESET 0x323b
+#define regDWB_SOFT_RESET_BASE_IDX 2
+
+
+// addressBlock: dce_dc_wb0_dispdec_wb_dcperfmon_dc_perfmon_dispdec
+// base address: 0xca20
+#define regDC_PERFMON3_PERFCOUNTER_CNTL 0x3288
+#define regDC_PERFMON3_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON3_PERFCOUNTER_CNTL2 0x3289
+#define regDC_PERFMON3_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON3_PERFCOUNTER_STATE 0x328a
+#define regDC_PERFMON3_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_CNTL 0x328b
+#define regDC_PERFMON3_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_CNTL2 0x328c
+#define regDC_PERFMON3_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_CVALUE_INT_MISC 0x328d
+#define regDC_PERFMON3_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_CVALUE_LOW 0x328e
+#define regDC_PERFMON3_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_HI 0x328f
+#define regDC_PERFMON3_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_LOW 0x3290
+#define regDC_PERFMON3_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_wb0_dispdec_dwbcp_dispdec
+// base address: 0x0
+#define regDWB_HDR_MULT_COEF 0x3294
+#define regDWB_HDR_MULT_COEF_BASE_IDX 2
+#define regDWB_GAMUT_REMAP_MODE 0x3295
+#define regDWB_GAMUT_REMAP_MODE_BASE_IDX 2
+#define regDWB_GAMUT_REMAP_COEF_FORMAT 0x3296
+#define regDWB_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C11_C12 0x3297
+#define regDWB_GAMUT_REMAPA_C11_C12_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C13_C14 0x3298
+#define regDWB_GAMUT_REMAPA_C13_C14_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C21_C22 0x3299
+#define regDWB_GAMUT_REMAPA_C21_C22_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C23_C24 0x329a
+#define regDWB_GAMUT_REMAPA_C23_C24_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C31_C32 0x329b
+#define regDWB_GAMUT_REMAPA_C31_C32_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C33_C34 0x329c
+#define regDWB_GAMUT_REMAPA_C33_C34_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C11_C12 0x329d
+#define regDWB_GAMUT_REMAPB_C11_C12_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C13_C14 0x329e
+#define regDWB_GAMUT_REMAPB_C13_C14_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C21_C22 0x329f
+#define regDWB_GAMUT_REMAPB_C21_C22_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C23_C24 0x32a0
+#define regDWB_GAMUT_REMAPB_C23_C24_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C31_C32 0x32a1
+#define regDWB_GAMUT_REMAPB_C31_C32_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C33_C34 0x32a2
+#define regDWB_GAMUT_REMAPB_C33_C34_BASE_IDX 2
+#define regDWB_OGAM_CONTROL 0x32a3
+#define regDWB_OGAM_CONTROL_BASE_IDX 2
+#define regDWB_OGAM_LUT_INDEX 0x32a4
+#define regDWB_OGAM_LUT_INDEX_BASE_IDX 2
+#define regDWB_OGAM_LUT_DATA 0x32a5
+#define regDWB_OGAM_LUT_DATA_BASE_IDX 2
+#define regDWB_OGAM_LUT_CONTROL 0x32a6
+#define regDWB_OGAM_LUT_CONTROL_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_CNTL_B 0x32a7
+#define regDWB_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_CNTL_G 0x32a8
+#define regDWB_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_CNTL_R 0x32a9
+#define regDWB_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_B 0x32aa
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_B 0x32ab
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_G 0x32ac
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_G 0x32ad
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_R 0x32ae
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_R 0x32af
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL1_B 0x32b0
+#define regDWB_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL2_B 0x32b1
+#define regDWB_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL1_G 0x32b2
+#define regDWB_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL2_G 0x32b3
+#define regDWB_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL1_R 0x32b4
+#define regDWB_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL2_R 0x32b5
+#define regDWB_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_OFFSET_B 0x32b6
+#define regDWB_OGAM_RAMA_OFFSET_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_OFFSET_G 0x32b7
+#define regDWB_OGAM_RAMA_OFFSET_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_OFFSET_R 0x32b8
+#define regDWB_OGAM_RAMA_OFFSET_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_0_1 0x32b9
+#define regDWB_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_2_3 0x32ba
+#define regDWB_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_4_5 0x32bb
+#define regDWB_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_6_7 0x32bc
+#define regDWB_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_8_9 0x32bd
+#define regDWB_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_10_11 0x32be
+#define regDWB_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_12_13 0x32bf
+#define regDWB_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_14_15 0x32c0
+#define regDWB_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_16_17 0x32c1
+#define regDWB_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_18_19 0x32c2
+#define regDWB_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_20_21 0x32c3
+#define regDWB_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_22_23 0x32c4
+#define regDWB_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_24_25 0x32c5
+#define regDWB_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_26_27 0x32c6
+#define regDWB_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_28_29 0x32c7
+#define regDWB_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_30_31 0x32c8
+#define regDWB_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_32_33 0x32c9
+#define regDWB_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_CNTL_B 0x32ca
+#define regDWB_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_CNTL_G 0x32cb
+#define regDWB_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_CNTL_R 0x32cc
+#define regDWB_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_B 0x32cd
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_B 0x32ce
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_G 0x32cf
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_G 0x32d0
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_R 0x32d1
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_R 0x32d2
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL1_B 0x32d3
+#define regDWB_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL2_B 0x32d4
+#define regDWB_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL1_G 0x32d5
+#define regDWB_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL2_G 0x32d6
+#define regDWB_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL1_R 0x32d7
+#define regDWB_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL2_R 0x32d8
+#define regDWB_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_OFFSET_B 0x32d9
+#define regDWB_OGAM_RAMB_OFFSET_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_OFFSET_G 0x32da
+#define regDWB_OGAM_RAMB_OFFSET_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_OFFSET_R 0x32db
+#define regDWB_OGAM_RAMB_OFFSET_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_0_1 0x32dc
+#define regDWB_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_2_3 0x32dd
+#define regDWB_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_4_5 0x32de
+#define regDWB_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_6_7 0x32df
+#define regDWB_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_8_9 0x32e0
+#define regDWB_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_10_11 0x32e1
+#define regDWB_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_12_13 0x32e2
+#define regDWB_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_14_15 0x32e3
+#define regDWB_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_16_17 0x32e4
+#define regDWB_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_18_19 0x32e5
+#define regDWB_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_20_21 0x32e6
+#define regDWB_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_22_23 0x32e7
+#define regDWB_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_24_25 0x32e8
+#define regDWB_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_26_27 0x32e9
+#define regDWB_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_28_29 0x32ea
+#define regDWB_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_30_31 0x32eb
+#define regDWB_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_32_33 0x32ec
+#define regDWB_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchvm_hvm_dispdec
+// base address: 0x0
+#define regDCHVM_CTRL0 0x3603
+#define regDCHVM_CTRL0_BASE_IDX 2
+#define regDCHVM_CTRL1 0x3604
+#define regDCHVM_CTRL1_BASE_IDX 2
+#define regDCHVM_CLK_CTRL 0x3605
+#define regDCHVM_CLK_CTRL_BASE_IDX 2
+#define regDCHVM_MEM_CTRL 0x3606
+#define regDCHVM_MEM_CTRL_BASE_IDX 2
+#define regDCHVM_RIOMMU_CTRL0 0x3607
+#define regDCHVM_RIOMMU_CTRL0_BASE_IDX 2
+#define regDCHVM_RIOMMU_STAT0 0x3608
+#define regDCHVM_RIOMMU_STAT0_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_dispdec
+// base address: 0x1ab8c
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL 0x3623
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL 0x3624
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL 0x3625
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x3626
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x3627
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_SPARE 0x3628
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_apg_apg_dispdec
+// base address: 0x1abc0
+#define regAPG0_APG_CONTROL 0x3630
+#define regAPG0_APG_CONTROL_BASE_IDX 2
+#define regAPG0_APG_CONTROL2 0x3631
+#define regAPG0_APG_CONTROL2_BASE_IDX 2
+#define regAPG0_APG_DBG_GEN_CONTROL 0x3632
+#define regAPG0_APG_DBG_GEN_CONTROL_BASE_IDX 2
+#define regAPG0_APG_PACKET_CONTROL 0x3633
+#define regAPG0_APG_PACKET_CONTROL_BASE_IDX 2
+#define regAPG0_APG_AUDIO_CRC_CONTROL 0x363a
+#define regAPG0_APG_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAPG0_APG_AUDIO_CRC_CONTROL2 0x363b
+#define regAPG0_APG_AUDIO_CRC_CONTROL2_BASE_IDX 2
+#define regAPG0_APG_AUDIO_CRC_RESULT 0x363c
+#define regAPG0_APG_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAPG0_APG_STATUS 0x3641
+#define regAPG0_APG_STATUS_BASE_IDX 2
+#define regAPG0_APG_STATUS2 0x3642
+#define regAPG0_APG_STATUS2_BASE_IDX 2
+#define regAPG0_APG_MEM_PWR 0x3644
+#define regAPG0_APG_MEM_PWR_BASE_IDX 2
+#define regAPG0_APG_SPARE 0x3646
+#define regAPG0_APG_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_dme_dme_dispdec
+// base address: 0x1ac38
+#define regDME6_DME_CONTROL 0x364e
+#define regDME6_DME_CONTROL_BASE_IDX 2
+#define regDME6_DME_MEMORY_CONTROL 0x364f
+#define regDME6_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_vpg_vpg_dispdec
+// base address: 0x1ac44
+#define regVPG6_VPG_GENERIC_PACKET_ACCESS_CTRL 0x3651
+#define regVPG6_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG6_VPG_GENERIC_PACKET_DATA 0x3652
+#define regVPG6_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG6_VPG_GSP_FRAME_UPDATE_CTRL 0x3653
+#define regVPG6_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x3654
+#define regVPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG6_VPG_GENERIC_STATUS 0x3655
+#define regVPG6_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG6_VPG_MEM_PWR 0x3656
+#define regVPG6_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG6_VPG_ISRC1_2_ACCESS_CTRL 0x3657
+#define regVPG6_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG6_VPG_ISRC1_2_DATA 0x3658
+#define regVPG6_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG6_VPG_MPEG_INFO0 0x3659
+#define regVPG6_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG6_VPG_MPEG_INFO1 0x365a
+#define regVPG6_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc0_dispdec
+// base address: 0x1ac74
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_CONTROL 0x365d
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL 0x365e
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL 0x365f
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL 0x3660
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT 0x3661
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0 0x3662
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1 0x3663
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2 0x3664
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3 0x3665
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4 0x3666
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5 0x3667
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6 0x3668
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7 0x3669
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8 0x366a
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL 0x366b
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0 0x366c
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1 0x366d
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2 0x366e
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3 0x366f
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4 0x3670
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5 0x3671
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6 0x3672
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7 0x3673
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8 0x3674
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9 0x3675
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10 0x3676
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11 0x3677
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12 0x3678
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13 0x3679
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14 0x367a
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL 0x367b
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0 0x367c
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1 0x367d
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL 0x367e
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL 0x3683
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL 0x3684
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL 0x3685
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL 0x3686
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL 0x3687
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0 0x3688
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1 0x3689
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS 0x368a
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS 0x368b
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL 0x368c
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL 0x368d
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SPARE 0x368e
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_link_enc0_dispdec
+// base address: 0x1ad5c
+#define regDP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL 0x3697
+#define regDP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_LINK_ENC0_DP_LINK_ENC_SPARE 0x3698
+#define regDP_LINK_ENC0_DP_LINK_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_dphy_sym320_dispdec
+// base address: 0x1ae00
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL 0x36c0
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_STATUS 0x36c1
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE 0x36c4
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0 0x36c5
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1 0x36c6
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2 0x36c7
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3 0x36c8
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0 0x36cb
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1 0x36cc
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2 0x36cd
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3 0x36ce
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0 0x36d1
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1 0x36d2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2 0x36d3
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3 0x36d4
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG 0x36d7
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0 0x36d8
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1 0x36d9
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2 0x36da
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3 0x36db
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE 0x36dc
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0 0x36dd
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1 0x36de
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2 0x36df
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3 0x36e0
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4 0x36e1
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5 0x36e2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6 0x36e3
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7 0x36e4
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8 0x36e5
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9 0x36e6
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10 0x36e7
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS 0x36e8
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE 0x36ea
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0 0x36eb
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1 0x36ec
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL 0x36ed
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0 0x36ee
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1 0x36ef
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS 0x36f0
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT 0x36f1
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_dispdec
+// base address: 0x1aedc
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL 0x36f7
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL 0x36f8
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL 0x36f9
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x36fa
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x36fb
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_SPARE 0x36fc
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_apg_apg_dispdec
+// base address: 0x1af10
+#define regAPG1_APG_CONTROL 0x3704
+#define regAPG1_APG_CONTROL_BASE_IDX 2
+#define regAPG1_APG_CONTROL2 0x3705
+#define regAPG1_APG_CONTROL2_BASE_IDX 2
+#define regAPG1_APG_DBG_GEN_CONTROL 0x3706
+#define regAPG1_APG_DBG_GEN_CONTROL_BASE_IDX 2
+#define regAPG1_APG_PACKET_CONTROL 0x3707
+#define regAPG1_APG_PACKET_CONTROL 0x3707
+#define regAPG1_APG_PACKET_CONTROL_BASE_IDX 2
+#define regAPG1_APG_AUDIO_CRC_CONTROL 0x370e
+#define regAPG1_APG_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAPG1_APG_AUDIO_CRC_CONTROL2 0x370f
+#define regAPG1_APG_AUDIO_CRC_CONTROL2_BASE_IDX 2
+#define regAPG1_APG_AUDIO_CRC_RESULT 0x3710
+#define regAPG1_APG_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAPG1_APG_STATUS 0x3715
+#define regAPG1_APG_STATUS_BASE_IDX 2
+#define regAPG1_APG_STATUS2 0x3716
+#define regAPG1_APG_STATUS2_BASE_IDX 2
+#define regAPG1_APG_MEM_PWR 0x3718
+#define regAPG1_APG_MEM_PWR_BASE_IDX 2
+#define regAPG1_APG_SPARE 0x371a
+#define regAPG1_APG_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_dme_dme_dispdec
+// base address: 0x1af88
+#define regDME7_DME_CONTROL 0x3722
+#define regDME7_DME_CONTROL_BASE_IDX 2
+#define regDME7_DME_MEMORY_CONTROL 0x3723
+#define regDME7_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_vpg_vpg_dispdec
+// base address: 0x1af94
+#define regVPG7_VPG_GENERIC_PACKET_ACCESS_CTRL 0x3725
+#define regVPG7_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG7_VPG_GENERIC_PACKET_DATA 0x3726
+#define regVPG7_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG7_VPG_GSP_FRAME_UPDATE_CTRL 0x3727
+#define regVPG7_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x3728
+#define regVPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG7_VPG_GENERIC_STATUS 0x3729
+#define regVPG7_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG7_VPG_MEM_PWR 0x372a
+#define regVPG7_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG7_VPG_ISRC1_2_ACCESS_CTRL 0x372b
+#define regVPG7_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG7_VPG_ISRC1_2_DATA 0x372c
+#define regVPG7_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG7_VPG_MPEG_INFO0 0x372d
+#define regVPG7_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG7_VPG_MPEG_INFO1 0x372e
+#define regVPG7_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc1_dispdec
+// base address: 0x1afc4
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_CONTROL 0x3731
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL 0x3732
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL 0x3733
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL 0x3734
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT 0x3735
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0 0x3736
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1 0x3737
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2 0x3738
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3 0x3739
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4 0x373a
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5 0x373b
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6 0x373c
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7 0x373d
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8 0x373e
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL 0x373f
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0 0x3740
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1 0x3741
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2 0x3742
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3 0x3743
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4 0x3744
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5 0x3745
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6 0x3746
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7 0x3747
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8 0x3748
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9 0x3749
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10 0x374a
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11 0x374b
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12 0x374c
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13 0x374d
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14 0x374e
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL 0x374f
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0 0x3750
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1 0x3751
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL 0x3752
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL 0x3757
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL 0x3758
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL 0x3759
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL 0x375a
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL 0x375b
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0 0x375c
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1 0x375d
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS 0x375e
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS 0x375f
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL 0x3760
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL 0x3761
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SPARE 0x3762
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_link_enc1_dispdec
+// base address: 0x1b0ac
+#define regDP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL 0x376b
+#define regDP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_LINK_ENC1_DP_LINK_ENC_SPARE 0x376c
+#define regDP_LINK_ENC1_DP_LINK_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_dphy_sym321_dispdec
+// base address: 0x1b150
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL 0x3794
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_STATUS 0x3795
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE 0x3798
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0 0x3799
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1 0x379a
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2 0x379b
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3 0x379c
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0 0x379f
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1 0x37a0
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2 0x37a1
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3 0x37a2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0 0x37a5
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1 0x37a6
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2 0x37a7
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3 0x37a8
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG 0x37ab
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0 0x37ac
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1 0x37ad
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2 0x37ae
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3 0x37af
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE 0x37b0
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0 0x37b1
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1 0x37b2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2 0x37b3
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3 0x37b4
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4 0x37b5
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5 0x37b6
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6 0x37b7
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7 0x37b8
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8 0x37b9
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9 0x37ba
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10 0x37bb
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS 0x37bc
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE 0x37be
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0 0x37bf
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1 0x37c0
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL 0x37c1
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0 0x37c2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1 0x37c3
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS 0x37c4
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT 0x37c5
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_dispdec
+// base address: 0x1b22c
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL 0x37cb
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL 0x37cc
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL 0x37cd
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x37ce
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x37cf
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_SPARE 0x37d0
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_apg_apg_dispdec
+// base address: 0x1b260
+#define regAPG2_APG_CONTROL 0x37d8
+#define regAPG2_APG_CONTROL_BASE_IDX 2
+#define regAPG2_APG_CONTROL2 0x37d9
+#define regAPG2_APG_CONTROL2_BASE_IDX 2
+#define regAPG2_APG_DBG_GEN_CONTROL 0x37da
+#define regAPG2_APG_DBG_GEN_CONTROL_BASE_IDX 2
+#define regAPG2_APG_PACKET_CONTROL 0x37db
+#define regAPG2_APG_PACKET_CONTROL_BASE_IDX 2
+#define regAPG2_APG_AUDIO_CRC_CONTROL 0x37e2
+#define regAPG2_APG_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAPG2_APG_AUDIO_CRC_CONTROL2 0x37e3
+#define regAPG2_APG_AUDIO_CRC_CONTROL2_BASE_IDX 2
+#define regAPG2_APG_AUDIO_CRC_RESULT 0x37e4
+#define regAPG2_APG_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAPG2_APG_STATUS 0x37e9
+#define regAPG2_APG_STATUS_BASE_IDX 2
+#define regAPG2_APG_STATUS2 0x37ea
+#define regAPG2_APG_STATUS2_BASE_IDX 2
+#define regAPG2_APG_MEM_PWR 0x37ec
+#define regAPG2_APG_MEM_PWR_BASE_IDX 2
+#define regAPG2_APG_SPARE 0x37ee
+#define regAPG2_APG_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_dme_dme_dispdec
+// base address: 0x1b2d8
+#define regDME8_DME_CONTROL 0x37f6
+#define regDME8_DME_CONTROL_BASE_IDX 2
+#define regDME8_DME_MEMORY_CONTROL 0x37f7
+#define regDME8_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_vpg_vpg_dispdec
+// base address: 0x1b2e4
+#define regVPG8_VPG_GENERIC_PACKET_ACCESS_CTRL 0x37f9
+#define regVPG8_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG8_VPG_GENERIC_PACKET_DATA 0x37fa
+#define regVPG8_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG8_VPG_GSP_FRAME_UPDATE_CTRL 0x37fb
+#define regVPG8_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x37fc
+#define regVPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG8_VPG_GENERIC_STATUS 0x37fd
+#define regVPG8_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG8_VPG_MEM_PWR 0x37fe
+#define regVPG8_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG8_VPG_ISRC1_2_ACCESS_CTRL 0x37ff
+#define regVPG8_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG8_VPG_ISRC1_2_DATA 0x3800
+#define regVPG8_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG8_VPG_MPEG_INFO0 0x3801
+#define regVPG8_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG8_VPG_MPEG_INFO1 0x3802
+#define regVPG8_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc2_dispdec
+// base address: 0x1b314
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_CONTROL 0x3805
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL 0x3806
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL 0x3807
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL 0x3808
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT 0x3809
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0 0x380a
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1 0x380b
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2 0x380c
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3 0x380d
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4 0x380e
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5 0x380f
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6 0x3810
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7 0x3811
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8 0x3812
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL 0x3813
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0 0x3814
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1 0x3815
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2 0x3816
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3 0x3817
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4 0x3818
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5 0x3819
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6 0x381a
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7 0x381b
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8 0x381c
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9 0x381d
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10 0x381e
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11 0x381f
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12 0x3820
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13 0x3821
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14 0x3822
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL 0x3823
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0 0x3824
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1 0x3825
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL 0x3826
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL 0x382b
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL 0x382c
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL 0x382d
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL 0x382e
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL 0x382f
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0 0x3830
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1 0x3831
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS 0x3832
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS 0x3833
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL 0x3834
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL 0x3835
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SPARE 0x3836
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_dispdec
+// base address: 0x1b57c
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL 0x389f
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL 0x38a0
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL 0x38a1
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x38a2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x38a3
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_SPARE 0x38a4
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_apg_apg_dispdec
+// base address: 0x1b5b0
+#define regAPG3_APG_CONTROL 0x38ac
+#define regAPG3_APG_CONTROL_BASE_IDX 2
+#define regAPG3_APG_CONTROL2 0x38ad
+#define regAPG3_APG_CONTROL2_BASE_IDX 2
+#define regAPG3_APG_DBG_GEN_CONTROL 0x38ae
+#define regAPG3_APG_DBG_GEN_CONTROL_BASE_IDX 2
+#define regAPG3_APG_PACKET_CONTROL 0x38af
+#define regAPG3_APG_PACKET_CONTROL_BASE_IDX 2
+#define regAPG3_APG_AUDIO_CRC_CONTROL 0x38b6
+#define regAPG3_APG_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAPG3_APG_AUDIO_CRC_CONTROL2 0x38b7
+#define regAPG3_APG_AUDIO_CRC_CONTROL2_BASE_IDX 2
+#define regAPG3_APG_AUDIO_CRC_RESULT 0x38b8
+#define regAPG3_APG_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAPG3_APG_STATUS 0x38bd
+#define regAPG3_APG_STATUS_BASE_IDX 2
+#define regAPG3_APG_STATUS2 0x38be
+#define regAPG3_APG_STATUS2_BASE_IDX 2
+#define regAPG3_APG_MEM_PWR 0x38c0
+#define regAPG3_APG_MEM_PWR_BASE_IDX 2
+#define regAPG3_APG_SPARE 0x38c2
+#define regAPG3_APG_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_dme_dme_dispdec
+// base address: 0x1b628
+#define regDME9_DME_CONTROL 0x38ca
+#define regDME9_DME_CONTROL_BASE_IDX 2
+#define regDME9_DME_MEMORY_CONTROL 0x38cb
+#define regDME9_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_vpg_vpg_dispdec
+// base address: 0x1b634
+#define regVPG9_VPG_GENERIC_PACKET_ACCESS_CTRL 0x38cd
+#define regVPG9_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG9_VPG_GENERIC_PACKET_DATA 0x38ce
+#define regVPG9_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG9_VPG_GSP_FRAME_UPDATE_CTRL 0x38cf
+#define regVPG9_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x38d0
+#define regVPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG9_VPG_GENERIC_STATUS 0x38d1
+#define regVPG9_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG9_VPG_MEM_PWR 0x38d2
+#define regVPG9_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG9_VPG_ISRC1_2_ACCESS_CTRL 0x38d3
+#define regVPG9_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG9_VPG_ISRC1_2_DATA 0x38d4
+#define regVPG9_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG9_VPG_MPEG_INFO0 0x38d5
+#define regVPG9_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG9_VPG_MPEG_INFO1 0x38d6
+#define regVPG9_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc3_dispdec
+// base address: 0x1b664
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_CONTROL 0x38d9
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL 0x38da
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL 0x38db
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL 0x38dc
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT 0x38dd
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0 0x38de
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1 0x38df
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2 0x38e0
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3 0x38e1
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4 0x38e2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5 0x38e3
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6 0x38e4
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7 0x38e5
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8 0x38e6
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL 0x38e7
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0 0x38e8
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1 0x38e9
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2 0x38ea
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3 0x38eb
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4 0x38ec
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5 0x38ed
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6 0x38ee
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7 0x38ef
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8 0x38f0
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9 0x38f1
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10 0x38f2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11 0x38f3
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12 0x38f4
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13 0x38f5
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14 0x38f6
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL 0x38f7
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0 0x38f8
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1 0x38f9
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL 0x38fa
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL 0x38ff
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL 0x3900
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL 0x3901
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL 0x3902
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL 0x3903
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0 0x3904
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1 0x3905
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS 0x3906
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS 0x3907
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL 0x3908
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL 0x3909
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SPARE 0x390a
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc0_dispdec
+// base address: 0x0
+#define regMPCC0_MPCC_TOP_SEL 0x0000
+#define regMPCC0_MPCC_TOP_SEL_BASE_IDX 3
+#define regMPCC0_MPCC_BOT_SEL 0x0001
+#define regMPCC0_MPCC_BOT_SEL_BASE_IDX 3
+#define regMPCC0_MPCC_OPP_ID 0x0002
+#define regMPCC0_MPCC_OPP_ID_BASE_IDX 3
+#define regMPCC0_MPCC_CONTROL 0x0003
+#define regMPCC0_MPCC_CONTROL_BASE_IDX 3
+#define regMPCC0_MPCC_SM_CONTROL 0x0004
+#define regMPCC0_MPCC_SM_CONTROL_BASE_IDX 3
+#define regMPCC0_MPCC_UPDATE_LOCK_SEL 0x0005
+#define regMPCC0_MPCC_UPDATE_LOCK_SEL_BASE_IDX 3
+#define regMPCC0_MPCC_TOP_GAIN 0x0006
+#define regMPCC0_MPCC_TOP_GAIN_BASE_IDX 3
+#define regMPCC0_MPCC_BOT_GAIN_INSIDE 0x0007
+#define regMPCC0_MPCC_BOT_GAIN_INSIDE_BASE_IDX 3
+#define regMPCC0_MPCC_BOT_GAIN_OUTSIDE 0x0008
+#define regMPCC0_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 3
+#define regMPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL 0x0009
+#define regMPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL_BASE_IDX 3
+#define regMPCC0_MPCC_BG_R_CR 0x000a
+#define regMPCC0_MPCC_BG_R_CR_BASE_IDX 3
+#define regMPCC0_MPCC_BG_G_Y 0x000b
+#define regMPCC0_MPCC_BG_G_Y_BASE_IDX 3
+#define regMPCC0_MPCC_BG_B_CB 0x000c
+#define regMPCC0_MPCC_BG_B_CB_BASE_IDX 3
+#define regMPCC0_MPCC_MEM_PWR_CTRL 0x000d
+#define regMPCC0_MPCC_MEM_PWR_CTRL_BASE_IDX 3
+#define regMPCC0_MPCC_STATUS 0x000e
+#define regMPCC0_MPCC_STATUS_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc1_dispdec
+// base address: 0x54
+#define regMPCC1_MPCC_TOP_SEL 0x0015
+#define regMPCC1_MPCC_TOP_SEL_BASE_IDX 3
+#define regMPCC1_MPCC_BOT_SEL 0x0016
+#define regMPCC1_MPCC_BOT_SEL_BASE_IDX 3
+#define regMPCC1_MPCC_OPP_ID 0x0017
+#define regMPCC1_MPCC_OPP_ID_BASE_IDX 3
+#define regMPCC1_MPCC_CONTROL 0x0018
+#define regMPCC1_MPCC_CONTROL_BASE_IDX 3
+#define regMPCC1_MPCC_SM_CONTROL 0x0019
+#define regMPCC1_MPCC_SM_CONTROL_BASE_IDX 3
+#define regMPCC1_MPCC_UPDATE_LOCK_SEL 0x001a
+#define regMPCC1_MPCC_UPDATE_LOCK_SEL_BASE_IDX 3
+#define regMPCC1_MPCC_TOP_GAIN 0x001b
+#define regMPCC1_MPCC_TOP_GAIN_BASE_IDX 3
+#define regMPCC1_MPCC_BOT_GAIN_INSIDE 0x001c
+#define regMPCC1_MPCC_BOT_GAIN_INSIDE_BASE_IDX 3
+#define regMPCC1_MPCC_BOT_GAIN_OUTSIDE 0x001d
+#define regMPCC1_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 3
+#define regMPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL 0x001e
+#define regMPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL_BASE_IDX 3
+#define regMPCC1_MPCC_BG_R_CR 0x001f
+#define regMPCC1_MPCC_BG_R_CR_BASE_IDX 3
+#define regMPCC1_MPCC_BG_G_Y 0x0020
+#define regMPCC1_MPCC_BG_G_Y_BASE_IDX 3
+#define regMPCC1_MPCC_BG_B_CB 0x0021
+#define regMPCC1_MPCC_BG_B_CB_BASE_IDX 3
+#define regMPCC1_MPCC_MEM_PWR_CTRL 0x0022
+#define regMPCC1_MPCC_MEM_PWR_CTRL_BASE_IDX 3
+#define regMPCC1_MPCC_STATUS 0x0023
+#define regMPCC1_MPCC_STATUS_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc2_dispdec
+// base address: 0xa8
+#define regMPCC2_MPCC_TOP_SEL 0x002a
+#define regMPCC2_MPCC_TOP_SEL_BASE_IDX 3
+#define regMPCC2_MPCC_BOT_SEL 0x002b
+#define regMPCC2_MPCC_BOT_SEL_BASE_IDX 3
+#define regMPCC2_MPCC_OPP_ID 0x002c
+#define regMPCC2_MPCC_OPP_ID_BASE_IDX 3
+#define regMPCC2_MPCC_CONTROL 0x002d
+#define regMPCC2_MPCC_CONTROL_BASE_IDX 3
+#define regMPCC2_MPCC_SM_CONTROL 0x002e
+#define regMPCC2_MPCC_SM_CONTROL_BASE_IDX 3
+#define regMPCC2_MPCC_UPDATE_LOCK_SEL 0x002f
+#define regMPCC2_MPCC_UPDATE_LOCK_SEL_BASE_IDX 3
+#define regMPCC2_MPCC_TOP_GAIN 0x0030
+#define regMPCC2_MPCC_TOP_GAIN_BASE_IDX 3
+#define regMPCC2_MPCC_BOT_GAIN_INSIDE 0x0031
+#define regMPCC2_MPCC_BOT_GAIN_INSIDE_BASE_IDX 3
+#define regMPCC2_MPCC_BOT_GAIN_OUTSIDE 0x0032
+#define regMPCC2_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 3
+#define regMPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL 0x0033
+#define regMPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL_BASE_IDX 3
+#define regMPCC2_MPCC_BG_R_CR 0x0034
+#define regMPCC2_MPCC_BG_R_CR_BASE_IDX 3
+#define regMPCC2_MPCC_BG_G_Y 0x0035
+#define regMPCC2_MPCC_BG_G_Y_BASE_IDX 3
+#define regMPCC2_MPCC_BG_B_CB 0x0036
+#define regMPCC2_MPCC_BG_B_CB_BASE_IDX 3
+#define regMPCC2_MPCC_MEM_PWR_CTRL 0x0037
+#define regMPCC2_MPCC_MEM_PWR_CTRL_BASE_IDX 3
+#define regMPCC2_MPCC_STATUS 0x0038
+#define regMPCC2_MPCC_STATUS_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc3_dispdec
+// base address: 0xfc
+#define regMPCC3_MPCC_TOP_SEL 0x003f
+#define regMPCC3_MPCC_TOP_SEL_BASE_IDX 3
+#define regMPCC3_MPCC_BOT_SEL 0x0040
+#define regMPCC3_MPCC_BOT_SEL_BASE_IDX 3
+#define regMPCC3_MPCC_OPP_ID 0x0041
+#define regMPCC3_MPCC_OPP_ID_BASE_IDX 3
+#define regMPCC3_MPCC_CONTROL 0x0042
+#define regMPCC3_MPCC_CONTROL_BASE_IDX 3
+#define regMPCC3_MPCC_SM_CONTROL 0x0043
+#define regMPCC3_MPCC_SM_CONTROL_BASE_IDX 3
+#define regMPCC3_MPCC_UPDATE_LOCK_SEL 0x0044
+#define regMPCC3_MPCC_UPDATE_LOCK_SEL_BASE_IDX 3
+#define regMPCC3_MPCC_TOP_GAIN 0x0045
+#define regMPCC3_MPCC_TOP_GAIN_BASE_IDX 3
+#define regMPCC3_MPCC_BOT_GAIN_INSIDE 0x0046
+#define regMPCC3_MPCC_BOT_GAIN_INSIDE_BASE_IDX 3
+#define regMPCC3_MPCC_BOT_GAIN_OUTSIDE 0x0047
+#define regMPCC3_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 3
+#define regMPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL 0x0048
+#define regMPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL_BASE_IDX 3
+#define regMPCC3_MPCC_BG_R_CR 0x0049
+#define regMPCC3_MPCC_BG_R_CR_BASE_IDX 3
+#define regMPCC3_MPCC_BG_G_Y 0x004a
+#define regMPCC3_MPCC_BG_G_Y_BASE_IDX 3
+#define regMPCC3_MPCC_BG_B_CB 0x004b
+#define regMPCC3_MPCC_BG_B_CB_BASE_IDX 3
+#define regMPCC3_MPCC_MEM_PWR_CTRL 0x004c
+#define regMPCC3_MPCC_MEM_PWR_CTRL_BASE_IDX 3
+#define regMPCC3_MPCC_STATUS 0x004d
+#define regMPCC3_MPCC_STATUS_BASE_IDX 3
+
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam0_dispdec
+// base address: 0x0
+#define regMPCC_OGAM0_MPCC_OGAM_CONTROL 0x00a8
+#define regMPCC_OGAM0_MPCC_OGAM_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_INDEX 0x00a9
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_INDEX_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_DATA 0x00aa
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_DATA_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_CONTROL 0x00ab
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B 0x00ac
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G 0x00ad
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R 0x00ae
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B 0x00af
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G 0x00b0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R 0x00b1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B 0x00b2
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G 0x00b3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R 0x00b4
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B 0x00b5
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B 0x00b6
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G 0x00b7
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G 0x00b8
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R 0x00b9
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R 0x00ba
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B 0x00bb
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G 0x00bc
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R 0x00bd
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1 0x00be
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3 0x00bf
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5 0x00c0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7 0x00c1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9 0x00c2
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11 0x00c3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13 0x00c4
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15 0x00c5
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17 0x00c6
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19 0x00c7
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21 0x00c8
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23 0x00c9
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25 0x00ca
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27 0x00cb
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29 0x00cc
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31 0x00cd
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33 0x00ce
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B 0x00cf
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G 0x00d0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R 0x00d1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B 0x00d2
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G 0x00d3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R 0x00d4
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B 0x00d5
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G 0x00d6
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R 0x00d7
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B 0x00d8
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B 0x00d9
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G 0x00da
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G 0x00db
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R 0x00dc
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R 0x00dd
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B 0x00de
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G 0x00df
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R 0x00e0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1 0x00e1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3 0x00e2
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5 0x00e3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7 0x00e4
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9 0x00e5
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11 0x00e6
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13 0x00e7
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15 0x00e8
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17 0x00e9
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19 0x00ea
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21 0x00eb
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23 0x00ec
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25 0x00ed
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27 0x00ee
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29 0x00ef
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31 0x00f0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33 0x00f1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT 0x00f2
+#define regMPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_GAMUT_REMAP_MODE 0x00f3
+#define regMPCC_OGAM0_MPCC_GAMUT_REMAP_MODE_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A 0x00f4
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A 0x00f5
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A 0x00f6
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A 0x00f7
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A 0x00f8
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A 0x00f9
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B 0x00fa
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B 0x00fb
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B 0x00fc
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B 0x00fd
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B 0x00fe
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B 0x00ff
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam1_dispdec
+// base address: 0x178
+#define regMPCC_OGAM1_MPCC_OGAM_CONTROL 0x0106
+#define regMPCC_OGAM1_MPCC_OGAM_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_INDEX 0x0107
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_INDEX_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_DATA 0x0108
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_DATA_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_CONTROL 0x0109
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B 0x010a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G 0x010b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R 0x010c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B 0x010d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G 0x010e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R 0x010f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B 0x0110
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G 0x0111
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R 0x0112
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B 0x0113
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B 0x0114
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G 0x0115
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G 0x0116
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R 0x0117
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R 0x0118
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B 0x0119
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G 0x011a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R 0x011b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1 0x011c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3 0x011d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5 0x011e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7 0x011f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9 0x0120
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11 0x0121
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13 0x0122
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15 0x0123
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17 0x0124
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19 0x0125
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21 0x0126
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23 0x0127
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25 0x0128
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27 0x0129
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29 0x012a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31 0x012b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33 0x012c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B 0x012d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G 0x012e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R 0x012f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B 0x0130
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G 0x0131
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R 0x0132
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B 0x0133
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G 0x0134
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R 0x0135
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B 0x0136
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B 0x0137
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G 0x0138
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G 0x0139
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R 0x013a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R 0x013b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B 0x013c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G 0x013d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R 0x013e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1 0x013f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3 0x0140
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5 0x0141
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7 0x0142
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9 0x0143
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11 0x0144
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13 0x0145
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15 0x0146
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17 0x0147
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19 0x0148
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21 0x0149
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23 0x014a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25 0x014b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27 0x014c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29 0x014d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31 0x014e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33 0x014f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT 0x0150
+#define regMPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_GAMUT_REMAP_MODE 0x0151
+#define regMPCC_OGAM1_MPCC_GAMUT_REMAP_MODE_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A 0x0152
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A 0x0153
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A 0x0154
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A 0x0155
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A 0x0156
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A 0x0157
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B 0x0158
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B 0x0159
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B 0x015a
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B 0x015b
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B 0x015c
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B 0x015d
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam2_dispdec
+// base address: 0x2f0
+#define regMPCC_OGAM2_MPCC_OGAM_CONTROL 0x0164
+#define regMPCC_OGAM2_MPCC_OGAM_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_INDEX 0x0165
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_INDEX_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_DATA 0x0166
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_DATA_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_CONTROL 0x0167
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B 0x0168
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G 0x0169
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R 0x016a
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B 0x016b
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G 0x016c
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R 0x016d
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B 0x016e
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G 0x016f
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R 0x0170
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B 0x0171
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B 0x0172
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G 0x0173
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G 0x0174
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R 0x0175
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R 0x0176
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B 0x0177
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G 0x0178
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R 0x0179
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1 0x017a
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3 0x017b
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5 0x017c
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7 0x017d
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9 0x017e
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11 0x017f
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13 0x0180
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15 0x0181
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17 0x0182
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19 0x0183
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21 0x0184
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23 0x0185
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25 0x0186
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27 0x0187
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29 0x0188
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31 0x0189
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33 0x018a
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B 0x018b
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G 0x018c
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R 0x018d
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B 0x018e
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G 0x018f
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R 0x0190
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B 0x0191
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G 0x0192
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R 0x0193
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B 0x0194
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B 0x0195
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G 0x0196
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G 0x0197
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R 0x0198
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R 0x0199
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B 0x019a
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G 0x019b
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R 0x019c
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1 0x019d
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3 0x019e
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5 0x019f
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7 0x01a0
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9 0x01a1
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11 0x01a2
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13 0x01a3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15 0x01a4
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17 0x01a5
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19 0x01a6
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21 0x01a7
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23 0x01a8
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25 0x01a9
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27 0x01aa
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29 0x01ab
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31 0x01ac
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33 0x01ad
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT 0x01ae
+#define regMPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_GAMUT_REMAP_MODE 0x01af
+#define regMPCC_OGAM2_MPCC_GAMUT_REMAP_MODE_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A 0x01b0
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A 0x01b1
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A 0x01b2
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A 0x01b3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A 0x01b4
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A 0x01b5
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B 0x01b6
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B 0x01b7
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B 0x01b8
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B 0x01b9
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B 0x01ba
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B 0x01bb
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam3_dispdec
+// base address: 0x468
+#define regMPCC_OGAM3_MPCC_OGAM_CONTROL 0x01c2
+#define regMPCC_OGAM3_MPCC_OGAM_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_INDEX 0x01c3
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_INDEX_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_DATA 0x01c4
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_DATA_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_CONTROL 0x01c5
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B 0x01c6
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G 0x01c7
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R 0x01c8
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B 0x01c9
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G 0x01ca
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R 0x01cb
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B 0x01cc
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G 0x01cd
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R 0x01ce
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B 0x01cf
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B 0x01d0
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G 0x01d1
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G 0x01d2
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R 0x01d3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R 0x01d4
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B 0x01d5
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G 0x01d6
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R 0x01d7
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1 0x01d8
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3 0x01d9
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5 0x01da
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7 0x01db
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9 0x01dc
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11 0x01dd
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13 0x01de
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15 0x01df
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17 0x01e0
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19 0x01e1
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21 0x01e2
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23 0x01e3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25 0x01e4
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27 0x01e5
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29 0x01e6
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31 0x01e7
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33 0x01e8
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B 0x01e9
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G 0x01ea
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R 0x01eb
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B 0x01ec
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G 0x01ed
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R 0x01ee
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B 0x01ef
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G 0x01f0
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R 0x01f1
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B 0x01f2
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B 0x01f3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G 0x01f4
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G 0x01f5
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R 0x01f6
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R 0x01f7
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B 0x01f8
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G 0x01f9
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R 0x01fa
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1 0x01fb
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3 0x01fc
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5 0x01fd
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7 0x01fe
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9 0x01ff
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11 0x0200
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13 0x0201
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15 0x0202
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17 0x0203
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19 0x0204
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21 0x0205
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23 0x0206
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25 0x0207
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27 0x0208
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29 0x0209
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31 0x020a
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33 0x020b
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT 0x020c
+#define regMPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_GAMUT_REMAP_MODE 0x020d
+#define regMPCC_OGAM3_MPCC_GAMUT_REMAP_MODE_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A 0x020e
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A 0x020f
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A 0x0210
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A 0x0211
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A 0x0212
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A 0x0213
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B 0x0214
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B 0x0215
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B 0x0216
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B 0x0217
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B 0x0218
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B 0x0219
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpc_cfg_dispdec
+// base address: 0x0
+#define regMPC_CLOCK_CONTROL 0x0398
+#define regMPC_CLOCK_CONTROL_BASE_IDX 3
+#define regMPC_SOFT_RESET 0x0399
+#define regMPC_SOFT_RESET_BASE_IDX 3
+#define regMPC_CRC_CTRL 0x039a
+#define regMPC_CRC_CTRL_BASE_IDX 3
+#define regMPC_CRC_SEL_CONTROL 0x039b
+#define regMPC_CRC_SEL_CONTROL_BASE_IDX 3
+#define regMPC_CRC_RESULT_AR 0x039c
+#define regMPC_CRC_RESULT_AR_BASE_IDX 3
+#define regMPC_CRC_RESULT_GB 0x039d
+#define regMPC_CRC_RESULT_GB_BASE_IDX 3
+#define regMPC_CRC_RESULT_C 0x039e
+#define regMPC_CRC_RESULT_C_BASE_IDX 3
+#define regMPC_PERFMON_EVENT_CTRL 0x03a1
+#define regMPC_PERFMON_EVENT_CTRL_BASE_IDX 3
+#define regMPC_BYPASS_BG_AR 0x03a2
+#define regMPC_BYPASS_BG_AR_BASE_IDX 3
+#define regMPC_BYPASS_BG_GB 0x03a3
+#define regMPC_BYPASS_BG_GB_BASE_IDX 3
+#define regMPC_HOST_READ_CONTROL 0x03a4
+#define regMPC_HOST_READ_CONTROL_BASE_IDX 3
+#define regMPC_DPP_PENDING_STATUS 0x03a5
+#define regMPC_DPP_PENDING_STATUS_BASE_IDX 3
+#define regMPC_PENDING_STATUS_MISC 0x03a6
+#define regMPC_PENDING_STATUS_MISC_BASE_IDX 3
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET0 0x03a7
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regADR_CFG_VUPDATE_LOCK_SET0 0x03a8
+#define regADR_CFG_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regADR_VUPDATE_LOCK_SET0 0x03a9
+#define regADR_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regCFG_VUPDATE_LOCK_SET0 0x03aa
+#define regCFG_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regCUR_VUPDATE_LOCK_SET0 0x03ab
+#define regCUR_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET1 0x03ac
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regADR_CFG_VUPDATE_LOCK_SET1 0x03ad
+#define regADR_CFG_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regADR_VUPDATE_LOCK_SET1 0x03ae
+#define regADR_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regCFG_VUPDATE_LOCK_SET1 0x03af
+#define regCFG_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regCUR_VUPDATE_LOCK_SET1 0x03b0
+#define regCUR_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET2 0x03b1
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regADR_CFG_VUPDATE_LOCK_SET2 0x03b2
+#define regADR_CFG_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regADR_VUPDATE_LOCK_SET2 0x03b3
+#define regADR_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regCFG_VUPDATE_LOCK_SET2 0x03b4
+#define regCFG_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regCUR_VUPDATE_LOCK_SET2 0x03b5
+#define regCUR_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET3 0x03b6
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regADR_CFG_VUPDATE_LOCK_SET3 0x03b7
+#define regADR_CFG_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regADR_VUPDATE_LOCK_SET3 0x03b8
+#define regADR_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regCFG_VUPDATE_LOCK_SET3 0x03b9
+#define regCFG_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regCUR_VUPDATE_LOCK_SET3 0x03ba
+#define regCUR_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regMPC_DWB0_MUX 0x03c6
+#define regMPC_DWB0_MUX_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpc_ocsc_dispdec
+// base address: 0x0
+#define regMPC_OUT0_MUX 0x03d8
+#define regMPC_OUT0_MUX_BASE_IDX 3
+#define regMPC_OUT0_DENORM_CONTROL 0x03d9
+#define regMPC_OUT0_DENORM_CONTROL_BASE_IDX 3
+#define regMPC_OUT0_DENORM_CLAMP_G_Y 0x03da
+#define regMPC_OUT0_DENORM_CLAMP_G_Y_BASE_IDX 3
+#define regMPC_OUT0_DENORM_CLAMP_B_CB 0x03db
+#define regMPC_OUT0_DENORM_CLAMP_B_CB_BASE_IDX 3
+#define regMPC_OUT1_MUX 0x03dc
+#define regMPC_OUT1_MUX_BASE_IDX 3
+#define regMPC_OUT1_DENORM_CONTROL 0x03dd
+#define regMPC_OUT1_DENORM_CONTROL_BASE_IDX 3
+#define regMPC_OUT1_DENORM_CLAMP_G_Y 0x03de
+#define regMPC_OUT1_DENORM_CLAMP_G_Y_BASE_IDX 3
+#define regMPC_OUT1_DENORM_CLAMP_B_CB 0x03df
+#define regMPC_OUT1_DENORM_CLAMP_B_CB_BASE_IDX 3
+#define regMPC_OUT2_MUX 0x03e0
+#define regMPC_OUT2_MUX_BASE_IDX 3
+#define regMPC_OUT2_DENORM_CONTROL 0x03e1
+#define regMPC_OUT2_DENORM_CONTROL_BASE_IDX 3
+#define regMPC_OUT2_DENORM_CLAMP_G_Y 0x03e2
+#define regMPC_OUT2_DENORM_CLAMP_G_Y_BASE_IDX 3
+#define regMPC_OUT2_DENORM_CLAMP_B_CB 0x03e3
+#define regMPC_OUT2_DENORM_CLAMP_B_CB_BASE_IDX 3
+#define regMPC_OUT3_MUX 0x03e4
+#define regMPC_OUT3_MUX_BASE_IDX 3
+#define regMPC_OUT3_DENORM_CONTROL 0x03e5
+#define regMPC_OUT3_DENORM_CONTROL_BASE_IDX 3
+#define regMPC_OUT3_DENORM_CLAMP_G_Y 0x03e6
+#define regMPC_OUT3_DENORM_CLAMP_G_Y_BASE_IDX 3
+#define regMPC_OUT3_DENORM_CLAMP_B_CB 0x03e7
+#define regMPC_OUT3_DENORM_CLAMP_B_CB_BASE_IDX 3
+#define regMPC_OUT_CSC_COEF_FORMAT 0x03f0
+#define regMPC_OUT_CSC_COEF_FORMAT_BASE_IDX 3
+#define regMPC_OUT0_CSC_MODE 0x03f1
+#define regMPC_OUT0_CSC_MODE_BASE_IDX 3
+#define regMPC_OUT0_CSC_C11_C12_A 0x03f2
+#define regMPC_OUT0_CSC_C11_C12_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C13_C14_A 0x03f3
+#define regMPC_OUT0_CSC_C13_C14_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C21_C22_A 0x03f4
+#define regMPC_OUT0_CSC_C21_C22_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C23_C24_A 0x03f5
+#define regMPC_OUT0_CSC_C23_C24_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C31_C32_A 0x03f6
+#define regMPC_OUT0_CSC_C31_C32_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C33_C34_A 0x03f7
+#define regMPC_OUT0_CSC_C33_C34_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C11_C12_B 0x03f8
+#define regMPC_OUT0_CSC_C11_C12_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C13_C14_B 0x03f9
+#define regMPC_OUT0_CSC_C13_C14_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C21_C22_B 0x03fa
+#define regMPC_OUT0_CSC_C21_C22_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C23_C24_B 0x03fb
+#define regMPC_OUT0_CSC_C23_C24_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C31_C32_B 0x03fc
+#define regMPC_OUT0_CSC_C31_C32_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C33_C34_B 0x03fd
+#define regMPC_OUT0_CSC_C33_C34_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_MODE 0x03fe
+#define regMPC_OUT1_CSC_MODE_BASE_IDX 3
+#define regMPC_OUT1_CSC_C11_C12_A 0x03ff
+#define regMPC_OUT1_CSC_C11_C12_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C13_C14_A 0x0400
+#define regMPC_OUT1_CSC_C13_C14_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C21_C22_A 0x0401
+#define regMPC_OUT1_CSC_C21_C22_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C23_C24_A 0x0402
+#define regMPC_OUT1_CSC_C23_C24_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C31_C32_A 0x0403
+#define regMPC_OUT1_CSC_C31_C32_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C33_C34_A 0x0404
+#define regMPC_OUT1_CSC_C33_C34_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C11_C12_B 0x0405
+#define regMPC_OUT1_CSC_C11_C12_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C13_C14_B 0x0406
+#define regMPC_OUT1_CSC_C13_C14_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C21_C22_B 0x0407
+#define regMPC_OUT1_CSC_C21_C22_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C23_C24_B 0x0408
+#define regMPC_OUT1_CSC_C23_C24_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C31_C32_B 0x0409
+#define regMPC_OUT1_CSC_C31_C32_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C33_C34_B 0x040a
+#define regMPC_OUT1_CSC_C33_C34_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_MODE 0x040b
+#define regMPC_OUT2_CSC_MODE_BASE_IDX 3
+#define regMPC_OUT2_CSC_C11_C12_A 0x040c
+#define regMPC_OUT2_CSC_C11_C12_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C13_C14_A 0x040d
+#define regMPC_OUT2_CSC_C13_C14_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C21_C22_A 0x040e
+#define regMPC_OUT2_CSC_C21_C22_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C23_C24_A 0x040f
+#define regMPC_OUT2_CSC_C23_C24_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C31_C32_A 0x0410
+#define regMPC_OUT2_CSC_C31_C32_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C33_C34_A 0x0411
+#define regMPC_OUT2_CSC_C33_C34_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C11_C12_B 0x0412
+#define regMPC_OUT2_CSC_C11_C12_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C13_C14_B 0x0413
+#define regMPC_OUT2_CSC_C13_C14_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C21_C22_B 0x0414
+#define regMPC_OUT2_CSC_C21_C22_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C23_C24_B 0x0415
+#define regMPC_OUT2_CSC_C23_C24_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C31_C32_B 0x0416
+#define regMPC_OUT2_CSC_C31_C32_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C33_C34_B 0x0417
+#define regMPC_OUT2_CSC_C33_C34_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_MODE 0x0418
+#define regMPC_OUT3_CSC_MODE_BASE_IDX 3
+#define regMPC_OUT3_CSC_C11_C12_A 0x0419
+#define regMPC_OUT3_CSC_C11_C12_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C13_C14_A 0x041a
+#define regMPC_OUT3_CSC_C13_C14_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C21_C22_A 0x041b
+#define regMPC_OUT3_CSC_C21_C22_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C23_C24_A 0x041c
+#define regMPC_OUT3_CSC_C23_C24_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C31_C32_A 0x041d
+#define regMPC_OUT3_CSC_C31_C32_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C33_C34_A 0x041e
+#define regMPC_OUT3_CSC_C33_C34_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C11_C12_B 0x041f
+#define regMPC_OUT3_CSC_C11_C12_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C13_C14_B 0x0420
+#define regMPC_OUT3_CSC_C13_C14_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C21_C22_B 0x0421
+#define regMPC_OUT3_CSC_C21_C22_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C23_C24_B 0x0422
+#define regMPC_OUT3_CSC_C23_C24_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C31_C32_B 0x0423
+#define regMPC_OUT3_CSC_C31_C32_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C33_C34_B 0x0424
+#define regMPC_OUT3_CSC_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
+// base address: 0x17e1c
+#define regDC_PERFMON15_PERFCOUNTER_CNTL 0x0447
+#define regDC_PERFMON15_PERFCOUNTER_CNTL_BASE_IDX 3
+#define regDC_PERFMON15_PERFCOUNTER_CNTL2 0x0448
+#define regDC_PERFMON15_PERFCOUNTER_CNTL2_BASE_IDX 3
+#define regDC_PERFMON15_PERFCOUNTER_STATE 0x0449
+#define regDC_PERFMON15_PERFCOUNTER_STATE_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_CNTL 0x044a
+#define regDC_PERFMON15_PERFMON_CNTL_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_CNTL2 0x044b
+#define regDC_PERFMON15_PERFMON_CNTL2_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_CVALUE_INT_MISC 0x044c
+#define regDC_PERFMON15_PERFMON_CVALUE_INT_MISC_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_CVALUE_LOW 0x044d
+#define regDC_PERFMON15_PERFMON_CVALUE_LOW_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_HI 0x044e
+#define regDC_PERFMON15_PERFMON_HI_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_LOW 0x044f
+#define regDC_PERFMON15_PERFMON_LOW_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_afmt_afmt_dispdec
+// base address: 0x2646c
+#define regAFMT5_AFMT_ACP 0x091b
+#define regAFMT5_AFMT_ACP_BASE_IDX 3
+#define regAFMT5_AFMT_VBI_PACKET_CONTROL 0x091c
+#define regAFMT5_AFMT_VBI_PACKET_CONTROL_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_PACKET_CONTROL2 0x091d
+#define regAFMT5_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_INFO0 0x091e
+#define regAFMT5_AFMT_AUDIO_INFO0_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_INFO1 0x091f
+#define regAFMT5_AFMT_AUDIO_INFO1_BASE_IDX 3
+#define regAFMT5_AFMT_60958_0 0x0920
+#define regAFMT5_AFMT_60958_0_BASE_IDX 3
+#define regAFMT5_AFMT_60958_1 0x0921
+#define regAFMT5_AFMT_60958_1_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_CRC_CONTROL 0x0922
+#define regAFMT5_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 3
+#define regAFMT5_AFMT_RAMP_CONTROL0 0x0923
+#define regAFMT5_AFMT_RAMP_CONTROL0_BASE_IDX 3
+#define regAFMT5_AFMT_RAMP_CONTROL1 0x0924
+#define regAFMT5_AFMT_RAMP_CONTROL1_BASE_IDX 3
+#define regAFMT5_AFMT_RAMP_CONTROL2 0x0925
+#define regAFMT5_AFMT_RAMP_CONTROL2_BASE_IDX 3
+#define regAFMT5_AFMT_RAMP_CONTROL3 0x0926
+#define regAFMT5_AFMT_RAMP_CONTROL3_BASE_IDX 3
+#define regAFMT5_AFMT_60958_2 0x0927
+#define regAFMT5_AFMT_60958_2_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_CRC_RESULT 0x0928
+#define regAFMT5_AFMT_AUDIO_CRC_RESULT_BASE_IDX 3
+#define regAFMT5_AFMT_STATUS 0x0929
+#define regAFMT5_AFMT_STATUS_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_PACKET_CONTROL 0x092a
+#define regAFMT5_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 3
+#define regAFMT5_AFMT_INFOFRAME_CONTROL0 0x092b
+#define regAFMT5_AFMT_INFOFRAME_CONTROL0_BASE_IDX 3
+#define regAFMT5_AFMT_INTERRUPT_STATUS 0x092c
+#define regAFMT5_AFMT_INTERRUPT_STATUS_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_SRC_CONTROL 0x092d
+#define regAFMT5_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 3
+#define regAFMT5_AFMT_MEM_PWR 0x092f
+#define regAFMT5_AFMT_MEM_PWR_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_vpg_vpg_dispdec
+// base address: 0x264c4
+#define regVPG5_VPG_GENERIC_PACKET_ACCESS_CTRL 0x0931
+#define regVPG5_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 3
+#define regVPG5_VPG_GENERIC_PACKET_DATA 0x0932
+#define regVPG5_VPG_GENERIC_PACKET_DATA_BASE_IDX 3
+#define regVPG5_VPG_GSP_FRAME_UPDATE_CTRL 0x0933
+#define regVPG5_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 3
+#define regVPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x0934
+#define regVPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 3
+#define regVPG5_VPG_GENERIC_STATUS 0x0935
+#define regVPG5_VPG_GENERIC_STATUS_BASE_IDX 3
+#define regVPG5_VPG_MEM_PWR 0x0936
+#define regVPG5_VPG_MEM_PWR_BASE_IDX 3
+#define regVPG5_VPG_ISRC1_2_ACCESS_CTRL 0x0937
+#define regVPG5_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 3
+#define regVPG5_VPG_ISRC1_2_DATA 0x0938
+#define regVPG5_VPG_ISRC1_2_DATA_BASE_IDX 3
+#define regVPG5_VPG_MPEG_INFO0 0x0939
+#define regVPG5_VPG_MPEG_INFO0_BASE_IDX 3
+#define regVPG5_VPG_MPEG_INFO1 0x093a
+#define regVPG5_VPG_MPEG_INFO1_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_dme_dme_dispdec
+// base address: 0x264f0
+#define regDME5_DME_CONTROL 0x093c
+#define regDME5_DME_CONTROL_BASE_IDX 3
+#define regDME5_DME_MEMORY_CONTROL 0x093d
+#define regDME5_DME_MEMORY_CONTROL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hpo_top_dispdec
+// base address: 0x2790c
+#define regHPO_TOP_CLOCK_CONTROL 0x0e43
+#define regHPO_TOP_CLOCK_CONTROL_BASE_IDX 3
+#define regHPO_TOP_HW_CONTROL 0x0e4a
+#define regHPO_TOP_HW_CONTROL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_dp_stream_mapper_dispdec
+// base address: 0x27958
+#define regDP_STREAM_MAPPER_CONTROL0 0x0e56
+#define regDP_STREAM_MAPPER_CONTROL0_BASE_IDX 3
+#define regDP_STREAM_MAPPER_CONTROL1 0x0e57
+#define regDP_STREAM_MAPPER_CONTROL1_BASE_IDX 3
+#define regDP_STREAM_MAPPER_CONTROL2 0x0e58
+#define regDP_STREAM_MAPPER_CONTROL2_BASE_IDX 3
+#define regDP_STREAM_MAPPER_CONTROL3 0x0e59
+#define regDP_STREAM_MAPPER_CONTROL3_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hpo_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1a698
+#define regDC_PERFMON23_PERFCOUNTER_CNTL 0x0e66
+#define regDC_PERFMON23_PERFCOUNTER_CNTL_BASE_IDX 3
+#define regDC_PERFMON23_PERFCOUNTER_CNTL2 0x0e67
+#define regDC_PERFMON23_PERFCOUNTER_CNTL2_BASE_IDX 3
+#define regDC_PERFMON23_PERFCOUNTER_STATE 0x0e68
+#define regDC_PERFMON23_PERFCOUNTER_STATE_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_CNTL 0x0e69
+#define regDC_PERFMON23_PERFMON_CNTL_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_CNTL2 0x0e6a
+#define regDC_PERFMON23_PERFMON_CNTL2_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_CVALUE_INT_MISC 0x0e6b
+#define regDC_PERFMON23_PERFMON_CVALUE_INT_MISC_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_CVALUE_LOW 0x0e6c
+#define regDC_PERFMON23_PERFMON_CVALUE_LOW_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_HI 0x0e6d
+#define regDC_PERFMON23_PERFMON_HI_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_LOW 0x0e6e
+#define regDC_PERFMON23_PERFMON_LOW_BASE_IDX 3
+
+
+
+// addressBlock: dce_dc_opp_abm0_dispdec
+// base address: 0x0
+#define regABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL 0x0e7a
+#define regABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL_BASE_IDX 3
+#define regABM0_BL1_PWM_USER_LEVEL 0x0e7b
+#define regABM0_BL1_PWM_USER_LEVEL_BASE_IDX 3
+#define regABM0_BL1_PWM_TARGET_ABM_LEVEL 0x0e7c
+#define regABM0_BL1_PWM_TARGET_ABM_LEVEL_BASE_IDX 3
+#define regABM0_BL1_PWM_CURRENT_ABM_LEVEL 0x0e7d
+#define regABM0_BL1_PWM_CURRENT_ABM_LEVEL_BASE_IDX 3
+#define regABM0_BL1_PWM_FINAL_DUTY_CYCLE 0x0e7e
+#define regABM0_BL1_PWM_FINAL_DUTY_CYCLE_BASE_IDX 3
+#define regABM0_BL1_PWM_MINIMUM_DUTY_CYCLE 0x0e7f
+#define regABM0_BL1_PWM_MINIMUM_DUTY_CYCLE_BASE_IDX 3
+#define regABM0_BL1_PWM_ABM_CNTL 0x0e80
+#define regABM0_BL1_PWM_ABM_CNTL_BASE_IDX 3
+#define regABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE 0x0e81
+#define regABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE_BASE_IDX 3
+#define regABM0_BL1_PWM_GRP2_REG_LOCK 0x0e82
+#define regABM0_BL1_PWM_GRP2_REG_LOCK_BASE_IDX 3
+#define regABM0_DC_ABM1_CNTL 0x0e83
+#define regABM0_DC_ABM1_CNTL_BASE_IDX 3
+#define regABM0_DC_ABM1_IPCSC_COEFF_SEL 0x0e84
+#define regABM0_DC_ABM1_IPCSC_COEFF_SEL_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_0 0x0e85
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_0_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_1 0x0e86
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_1_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_2 0x0e87
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_2_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_3 0x0e88
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_3_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_4 0x0e89
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_4_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_THRES_12 0x0e8a
+#define regABM0_DC_ABM1_ACE_THRES_12_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_THRES_34 0x0e8b
+#define regABM0_DC_ABM1_ACE_THRES_34_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_CNTL_MISC 0x0e8c
+#define regABM0_DC_ABM1_ACE_CNTL_MISC_BASE_IDX 3
+#define regABM0_DC_ABM1_HGLS_REG_READ_PROGRESS 0x0e8e
+#define regABM0_DC_ABM1_HGLS_REG_READ_PROGRESS_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_MISC_CTRL 0x0e8f
+#define regABM0_DC_ABM1_HG_MISC_CTRL_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_SUM_OF_LUMA 0x0e90
+#define regABM0_DC_ABM1_LS_SUM_OF_LUMA_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_MIN_MAX_LUMA 0x0e91
+#define regABM0_DC_ABM1_LS_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x0e92
+#define regABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_PIXEL_COUNT 0x0e93
+#define regABM0_DC_ABM1_LS_PIXEL_COUNT_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x0e94
+#define regABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x0e95
+#define regABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x0e96
+#define regABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_SAMPLE_RATE 0x0e97
+#define regABM0_DC_ABM1_HG_SAMPLE_RATE_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_SAMPLE_RATE 0x0e98
+#define regABM0_DC_ABM1_LS_SAMPLE_RATE_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x0e99
+#define regABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x0e9a
+#define regABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x0e9b
+#define regABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x0e9c
+#define regABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x0e9d
+#define regABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_1 0x0e9e
+#define regABM0_DC_ABM1_HG_RESULT_1_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_2 0x0e9f
+#define regABM0_DC_ABM1_HG_RESULT_2_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_3 0x0ea0
+#define regABM0_DC_ABM1_HG_RESULT_3_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_4 0x0ea1
+#define regABM0_DC_ABM1_HG_RESULT_4_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_5 0x0ea2
+#define regABM0_DC_ABM1_HG_RESULT_5_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_6 0x0ea3
+#define regABM0_DC_ABM1_HG_RESULT_6_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_7 0x0ea4
+#define regABM0_DC_ABM1_HG_RESULT_7_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_8 0x0ea5
+#define regABM0_DC_ABM1_HG_RESULT_8_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_9 0x0ea6
+#define regABM0_DC_ABM1_HG_RESULT_9_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_10 0x0ea7
+#define regABM0_DC_ABM1_HG_RESULT_10_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_11 0x0ea8
+#define regABM0_DC_ABM1_HG_RESULT_11_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_12 0x0ea9
+#define regABM0_DC_ABM1_HG_RESULT_12_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_13 0x0eaa
+#define regABM0_DC_ABM1_HG_RESULT_13_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_14 0x0eab
+#define regABM0_DC_ABM1_HG_RESULT_14_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_15 0x0eac
+#define regABM0_DC_ABM1_HG_RESULT_15_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_16 0x0ead
+#define regABM0_DC_ABM1_HG_RESULT_16_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_17 0x0eae
+#define regABM0_DC_ABM1_HG_RESULT_17_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_18 0x0eaf
+#define regABM0_DC_ABM1_HG_RESULT_18_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_19 0x0eb0
+#define regABM0_DC_ABM1_HG_RESULT_19_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_20 0x0eb1
+#define regABM0_DC_ABM1_HG_RESULT_20_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_21 0x0eb2
+#define regABM0_DC_ABM1_HG_RESULT_21_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_22 0x0eb3
+#define regABM0_DC_ABM1_HG_RESULT_22_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_23 0x0eb4
+#define regABM0_DC_ABM1_HG_RESULT_23_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_24 0x0eb5
+#define regABM0_DC_ABM1_HG_RESULT_24_BASE_IDX 3
+#define regABM0_DC_ABM1_BL_MASTER_LOCK 0x0eb6
+#define regABM0_DC_ABM1_BL_MASTER_LOCK_BASE_IDX 3
+
+
+// addressBlock: dce_dc_opp_abm1_dispdec
+// base address: 0x104
+#define regABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL 0x0ebb
+#define regABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL_BASE_IDX 3
+#define regABM1_BL1_PWM_USER_LEVEL 0x0ebc
+#define regABM1_BL1_PWM_USER_LEVEL_BASE_IDX 3
+#define regABM1_BL1_PWM_TARGET_ABM_LEVEL 0x0ebd
+#define regABM1_BL1_PWM_TARGET_ABM_LEVEL_BASE_IDX 3
+#define regABM1_BL1_PWM_CURRENT_ABM_LEVEL 0x0ebe
+#define regABM1_BL1_PWM_CURRENT_ABM_LEVEL_BASE_IDX 3
+#define regABM1_BL1_PWM_FINAL_DUTY_CYCLE 0x0ebf
+#define regABM1_BL1_PWM_FINAL_DUTY_CYCLE_BASE_IDX 3
+#define regABM1_BL1_PWM_MINIMUM_DUTY_CYCLE 0x0ec0
+#define regABM1_BL1_PWM_MINIMUM_DUTY_CYCLE_BASE_IDX 3
+#define regABM1_BL1_PWM_ABM_CNTL 0x0ec1
+#define regABM1_BL1_PWM_ABM_CNTL_BASE_IDX 3
+#define regABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE 0x0ec2
+#define regABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE_BASE_IDX 3
+#define regABM1_BL1_PWM_GRP2_REG_LOCK 0x0ec3
+#define regABM1_BL1_PWM_GRP2_REG_LOCK_BASE_IDX 3
+#define regABM1_DC_ABM1_CNTL 0x0ec4
+#define regABM1_DC_ABM1_CNTL_BASE_IDX 3
+#define regABM1_DC_ABM1_IPCSC_COEFF_SEL 0x0ec5
+#define regABM1_DC_ABM1_IPCSC_COEFF_SEL_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_0 0x0ec6
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_0_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_1 0x0ec7
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_1_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_2 0x0ec8
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_2_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_3 0x0ec9
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_3_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_4 0x0eca
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_4_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_THRES_12 0x0ecb
+#define regABM1_DC_ABM1_ACE_THRES_12_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_THRES_34 0x0ecc
+#define regABM1_DC_ABM1_ACE_THRES_34_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_CNTL_MISC 0x0ecd
+#define regABM1_DC_ABM1_ACE_CNTL_MISC_BASE_IDX 3
+#define regABM1_DC_ABM1_HGLS_REG_READ_PROGRESS 0x0ecf
+#define regABM1_DC_ABM1_HGLS_REG_READ_PROGRESS_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_MISC_CTRL 0x0ed0
+#define regABM1_DC_ABM1_HG_MISC_CTRL_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_SUM_OF_LUMA 0x0ed1
+#define regABM1_DC_ABM1_LS_SUM_OF_LUMA_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_MIN_MAX_LUMA 0x0ed2
+#define regABM1_DC_ABM1_LS_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x0ed3
+#define regABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_PIXEL_COUNT 0x0ed4
+#define regABM1_DC_ABM1_LS_PIXEL_COUNT_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x0ed5
+#define regABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x0ed6
+#define regABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x0ed7
+#define regABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_SAMPLE_RATE 0x0ed8
+#define regABM1_DC_ABM1_HG_SAMPLE_RATE_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_SAMPLE_RATE 0x0ed9
+#define regABM1_DC_ABM1_LS_SAMPLE_RATE_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x0eda
+#define regABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x0edb
+#define regABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x0edc
+#define regABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x0edd
+#define regABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x0ede
+#define regABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_1 0x0edf
+#define regABM1_DC_ABM1_HG_RESULT_1_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_2 0x0ee0
+#define regABM1_DC_ABM1_HG_RESULT_2_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_3 0x0ee1
+#define regABM1_DC_ABM1_HG_RESULT_3_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_4 0x0ee2
+#define regABM1_DC_ABM1_HG_RESULT_4_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_5 0x0ee3
+#define regABM1_DC_ABM1_HG_RESULT_5_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_6 0x0ee4
+#define regABM1_DC_ABM1_HG_RESULT_6_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_7 0x0ee5
+#define regABM1_DC_ABM1_HG_RESULT_7_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_8 0x0ee6
+#define regABM1_DC_ABM1_HG_RESULT_8_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_9 0x0ee7
+#define regABM1_DC_ABM1_HG_RESULT_9_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_10 0x0ee8
+#define regABM1_DC_ABM1_HG_RESULT_10_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_11 0x0ee9
+#define regABM1_DC_ABM1_HG_RESULT_11_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_12 0x0eea
+#define regABM1_DC_ABM1_HG_RESULT_12_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_13 0x0eeb
+#define regABM1_DC_ABM1_HG_RESULT_13_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_14 0x0eec
+#define regABM1_DC_ABM1_HG_RESULT_14_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_15 0x0eed
+#define regABM1_DC_ABM1_HG_RESULT_15_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_16 0x0eee
+#define regABM1_DC_ABM1_HG_RESULT_16_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_17 0x0eef
+#define regABM1_DC_ABM1_HG_RESULT_17_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_18 0x0ef0
+#define regABM1_DC_ABM1_HG_RESULT_18_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_19 0x0ef1
+#define regABM1_DC_ABM1_HG_RESULT_19_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_20 0x0ef2
+#define regABM1_DC_ABM1_HG_RESULT_20_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_21 0x0ef3
+#define regABM1_DC_ABM1_HG_RESULT_21_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_22 0x0ef4
+#define regABM1_DC_ABM1_HG_RESULT_22_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_23 0x0ef5
+#define regABM1_DC_ABM1_HG_RESULT_23_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_24 0x0ef6
+#define regABM1_DC_ABM1_HG_RESULT_24_BASE_IDX 3
+#define regABM1_DC_ABM1_BL_MASTER_LOCK 0x0ef7
+#define regABM1_DC_ABM1_BL_MASTER_LOCK_BASE_IDX 3
+
+
+// addressBlock: dce_dc_opp_abm2_dispdec
+// base address: 0x208
+#define regABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL 0x0efc
+#define regABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL_BASE_IDX 3
+#define regABM2_BL1_PWM_USER_LEVEL 0x0efd
+#define regABM2_BL1_PWM_USER_LEVEL_BASE_IDX 3
+#define regABM2_BL1_PWM_TARGET_ABM_LEVEL 0x0efe
+#define regABM2_BL1_PWM_TARGET_ABM_LEVEL_BASE_IDX 3
+#define regABM2_BL1_PWM_CURRENT_ABM_LEVEL 0x0eff
+#define regABM2_BL1_PWM_CURRENT_ABM_LEVEL_BASE_IDX 3
+#define regABM2_BL1_PWM_FINAL_DUTY_CYCLE 0x0f00
+#define regABM2_BL1_PWM_FINAL_DUTY_CYCLE_BASE_IDX 3
+#define regABM2_BL1_PWM_MINIMUM_DUTY_CYCLE 0x0f01
+#define regABM2_BL1_PWM_MINIMUM_DUTY_CYCLE_BASE_IDX 3
+#define regABM2_BL1_PWM_ABM_CNTL 0x0f02
+#define regABM2_BL1_PWM_ABM_CNTL_BASE_IDX 3
+#define regABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE 0x0f03
+#define regABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE_BASE_IDX 3
+#define regABM2_BL1_PWM_GRP2_REG_LOCK 0x0f04
+#define regABM2_BL1_PWM_GRP2_REG_LOCK_BASE_IDX 3
+#define regABM2_DC_ABM1_CNTL 0x0f05
+#define regABM2_DC_ABM1_CNTL_BASE_IDX 3
+#define regABM2_DC_ABM1_IPCSC_COEFF_SEL 0x0f06
+#define regABM2_DC_ABM1_IPCSC_COEFF_SEL_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_0 0x0f07
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_0_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_1 0x0f08
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_1_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_2 0x0f09
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_2_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_3 0x0f0a
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_3_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_4 0x0f0b
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_4_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_THRES_12 0x0f0c
+#define regABM2_DC_ABM1_ACE_THRES_12_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_THRES_34 0x0f0d
+#define regABM2_DC_ABM1_ACE_THRES_34_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_CNTL_MISC 0x0f0e
+#define regABM2_DC_ABM1_ACE_CNTL_MISC_BASE_IDX 3
+#define regABM2_DC_ABM1_HGLS_REG_READ_PROGRESS 0x0f10
+#define regABM2_DC_ABM1_HGLS_REG_READ_PROGRESS_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_MISC_CTRL 0x0f11
+#define regABM2_DC_ABM1_HG_MISC_CTRL_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_SUM_OF_LUMA 0x0f12
+#define regABM2_DC_ABM1_LS_SUM_OF_LUMA_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_MIN_MAX_LUMA 0x0f13
+#define regABM2_DC_ABM1_LS_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x0f14
+#define regABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_PIXEL_COUNT 0x0f15
+#define regABM2_DC_ABM1_LS_PIXEL_COUNT_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x0f16
+#define regABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x0f17
+#define regABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x0f18
+#define regABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_SAMPLE_RATE 0x0f19
+#define regABM2_DC_ABM1_HG_SAMPLE_RATE_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_SAMPLE_RATE 0x0f1a
+#define regABM2_DC_ABM1_LS_SAMPLE_RATE_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x0f1b
+#define regABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x0f1c
+#define regABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x0f1d
+#define regABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x0f1e
+#define regABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x0f1f
+#define regABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_1 0x0f20
+#define regABM2_DC_ABM1_HG_RESULT_1_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_2 0x0f21
+#define regABM2_DC_ABM1_HG_RESULT_2_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_3 0x0f22
+#define regABM2_DC_ABM1_HG_RESULT_3_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_4 0x0f23
+#define regABM2_DC_ABM1_HG_RESULT_4_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_5 0x0f24
+#define regABM2_DC_ABM1_HG_RESULT_5_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_6 0x0f25
+#define regABM2_DC_ABM1_HG_RESULT_6_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_7 0x0f26
+#define regABM2_DC_ABM1_HG_RESULT_7_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_8 0x0f27
+#define regABM2_DC_ABM1_HG_RESULT_8_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_9 0x0f28
+#define regABM2_DC_ABM1_HG_RESULT_9_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_10 0x0f29
+#define regABM2_DC_ABM1_HG_RESULT_10_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_11 0x0f2a
+#define regABM2_DC_ABM1_HG_RESULT_11_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_12 0x0f2b
+#define regABM2_DC_ABM1_HG_RESULT_12_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_13 0x0f2c
+#define regABM2_DC_ABM1_HG_RESULT_13_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_14 0x0f2d
+#define regABM2_DC_ABM1_HG_RESULT_14_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_15 0x0f2e
+#define regABM2_DC_ABM1_HG_RESULT_15_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_16 0x0f2f
+#define regABM2_DC_ABM1_HG_RESULT_16_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_17 0x0f30
+#define regABM2_DC_ABM1_HG_RESULT_17_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_18 0x0f31
+#define regABM2_DC_ABM1_HG_RESULT_18_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_19 0x0f32
+#define regABM2_DC_ABM1_HG_RESULT_19_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_20 0x0f33
+#define regABM2_DC_ABM1_HG_RESULT_20_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_21 0x0f34
+#define regABM2_DC_ABM1_HG_RESULT_21_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_22 0x0f35
+#define regABM2_DC_ABM1_HG_RESULT_22_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_23 0x0f36
+#define regABM2_DC_ABM1_HG_RESULT_23_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_24 0x0f37
+#define regABM2_DC_ABM1_HG_RESULT_24_BASE_IDX 3
+#define regABM2_DC_ABM1_BL_MASTER_LOCK 0x0f38
+#define regABM2_DC_ABM1_BL_MASTER_LOCK_BASE_IDX 3
+
+
+// addressBlock: dce_dc_opp_abm3_dispdec
+// base address: 0x30c
+#define regABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL 0x0f3d
+#define regABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL_BASE_IDX 3
+#define regABM3_BL1_PWM_USER_LEVEL 0x0f3e
+#define regABM3_BL1_PWM_USER_LEVEL_BASE_IDX 3
+#define regABM3_BL1_PWM_TARGET_ABM_LEVEL 0x0f3f
+#define regABM3_BL1_PWM_TARGET_ABM_LEVEL_BASE_IDX 3
+#define regABM3_BL1_PWM_CURRENT_ABM_LEVEL 0x0f40
+#define regABM3_BL1_PWM_CURRENT_ABM_LEVEL_BASE_IDX 3
+#define regABM3_BL1_PWM_FINAL_DUTY_CYCLE 0x0f41
+#define regABM3_BL1_PWM_FINAL_DUTY_CYCLE_BASE_IDX 3
+#define regABM3_BL1_PWM_MINIMUM_DUTY_CYCLE 0x0f42
+#define regABM3_BL1_PWM_MINIMUM_DUTY_CYCLE_BASE_IDX 3
+#define regABM3_BL1_PWM_ABM_CNTL 0x0f43
+#define regABM3_BL1_PWM_ABM_CNTL_BASE_IDX 3
+#define regABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE 0x0f44
+#define regABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE_BASE_IDX 3
+#define regABM3_BL1_PWM_GRP2_REG_LOCK 0x0f45
+#define regABM3_BL1_PWM_GRP2_REG_LOCK_BASE_IDX 3
+#define regABM3_DC_ABM1_CNTL 0x0f46
+#define regABM3_DC_ABM1_CNTL_BASE_IDX 3
+#define regABM3_DC_ABM1_IPCSC_COEFF_SEL 0x0f47
+#define regABM3_DC_ABM1_IPCSC_COEFF_SEL_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_0 0x0f48
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_0_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_1 0x0f49
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_1_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_2 0x0f4a
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_2_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_3 0x0f4b
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_3_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_4 0x0f4c
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_4_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_THRES_12 0x0f4d
+#define regABM3_DC_ABM1_ACE_THRES_12_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_THRES_34 0x0f4e
+#define regABM3_DC_ABM1_ACE_THRES_34_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_CNTL_MISC 0x0f4f
+#define regABM3_DC_ABM1_ACE_CNTL_MISC_BASE_IDX 3
+#define regABM3_DC_ABM1_HGLS_REG_READ_PROGRESS 0x0f51
+#define regABM3_DC_ABM1_HGLS_REG_READ_PROGRESS_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_MISC_CTRL 0x0f52
+#define regABM3_DC_ABM1_HG_MISC_CTRL_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_SUM_OF_LUMA 0x0f53
+#define regABM3_DC_ABM1_LS_SUM_OF_LUMA_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_MIN_MAX_LUMA 0x0f54
+#define regABM3_DC_ABM1_LS_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x0f55
+#define regABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_PIXEL_COUNT 0x0f56
+#define regABM3_DC_ABM1_LS_PIXEL_COUNT_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x0f57
+#define regABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x0f58
+#define regABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x0f59
+#define regABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_SAMPLE_RATE 0x0f5a
+#define regABM3_DC_ABM1_HG_SAMPLE_RATE_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_SAMPLE_RATE 0x0f5b
+#define regABM3_DC_ABM1_LS_SAMPLE_RATE_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x0f5c
+#define regABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x0f5d
+#define regABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x0f5e
+#define regABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x0f5f
+#define regABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x0f60
+#define regABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_1 0x0f61
+#define regABM3_DC_ABM1_HG_RESULT_1_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_2 0x0f62
+#define regABM3_DC_ABM1_HG_RESULT_2_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_3 0x0f63
+#define regABM3_DC_ABM1_HG_RESULT_3_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_4 0x0f64
+#define regABM3_DC_ABM1_HG_RESULT_4_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_5 0x0f65
+#define regABM3_DC_ABM1_HG_RESULT_5_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_6 0x0f66
+#define regABM3_DC_ABM1_HG_RESULT_6_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_7 0x0f67
+#define regABM3_DC_ABM1_HG_RESULT_7_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_8 0x0f68
+#define regABM3_DC_ABM1_HG_RESULT_8_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_9 0x0f69
+#define regABM3_DC_ABM1_HG_RESULT_9_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_10 0x0f6a
+#define regABM3_DC_ABM1_HG_RESULT_10_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_11 0x0f6b
+#define regABM3_DC_ABM1_HG_RESULT_11_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_12 0x0f6c
+#define regABM3_DC_ABM1_HG_RESULT_12_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_13 0x0f6d
+#define regABM3_DC_ABM1_HG_RESULT_13_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_14 0x0f6e
+#define regABM3_DC_ABM1_HG_RESULT_14_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_15 0x0f6f
+#define regABM3_DC_ABM1_HG_RESULT_15_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_16 0x0f70
+#define regABM3_DC_ABM1_HG_RESULT_16_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_17 0x0f71
+#define regABM3_DC_ABM1_HG_RESULT_17_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_18 0x0f72
+#define regABM3_DC_ABM1_HG_RESULT_18_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_19 0x0f73
+#define regABM3_DC_ABM1_HG_RESULT_19_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_20 0x0f74
+#define regABM3_DC_ABM1_HG_RESULT_20_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_21 0x0f75
+#define regABM3_DC_ABM1_HG_RESULT_21_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_22 0x0f76
+#define regABM3_DC_ABM1_HG_RESULT_22_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_23 0x0f77
+#define regABM3_DC_ABM1_HG_RESULT_23_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_24 0x0f78
+#define regABM3_DC_ABM1_HG_RESULT_24_BASE_IDX 3
+#define regABM3_DC_ABM1_BL_MASTER_LOCK 0x0f79
+#define regABM3_DC_ABM1_BL_MASTER_LOCK_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_link_enc0_dispdec
+// base address: 0x2656c
+#define regHDMI_LINK_ENC_CONTROL 0x095b
+#define regHDMI_LINK_ENC_CONTROL_BASE_IDX 3
+#define regHDMI_LINK_ENC_CLK_CTRL 0x095c
+#define regHDMI_LINK_ENC_CLK_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_frl_enc0_dispdec
+// base address: 0x26594
+#define regHDMI_FRL_ENC_CONFIG 0x0965
+#define regHDMI_FRL_ENC_CONFIG_BASE_IDX 3
+#define regHDMI_FRL_ENC_CONFIG2 0x0966
+#define regHDMI_FRL_ENC_CONFIG2_BASE_IDX 3
+#define regHDMI_FRL_ENC_METER_BUFFER_STATUS 0x0967
+#define regHDMI_FRL_ENC_METER_BUFFER_STATUS_BASE_IDX 3
+#define regHDMI_FRL_ENC_MEM_CTRL 0x0968
+#define regHDMI_FRL_ENC_MEM_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_dispdec
+// base address: 0x2634c
+#define regHDMI_STREAM_ENC_CLOCK_CONTROL 0x08d3
+#define regHDMI_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 3
+#define regHDMI_STREAM_ENC_INPUT_MUX_CONTROL 0x08d5
+#define regHDMI_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 3
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x08d6
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 3
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x08d7
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 3
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2 0x08d8
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_tb_enc0_dispdec
+// base address: 0x2637c
+#define regHDMI_TB_ENC_CONTROL 0x08df
+#define regHDMI_TB_ENC_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_PIXEL_FORMAT 0x08e0
+#define regHDMI_TB_ENC_PIXEL_FORMAT_BASE_IDX 3
+#define regHDMI_TB_ENC_PACKET_CONTROL 0x08e1
+#define regHDMI_TB_ENC_PACKET_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_PACKET_CONTROL 0x08e2
+#define regHDMI_TB_ENC_ACR_PACKET_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_VBI_PACKET_CONTROL1 0x08e3
+#define regHDMI_TB_ENC_VBI_PACKET_CONTROL1_BASE_IDX 3
+#define regHDMI_TB_ENC_VBI_PACKET_CONTROL2 0x08e4
+#define regHDMI_TB_ENC_VBI_PACKET_CONTROL2_BASE_IDX 3
+#define regHDMI_TB_ENC_GC_CONTROL 0x08e5
+#define regHDMI_TB_ENC_GC_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL0 0x08e6
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL0_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL1 0x08e7
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL1_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL2 0x08e8
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL2_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET0_1_LINE 0x08e9
+#define regHDMI_TB_ENC_GENERIC_PACKET0_1_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET2_3_LINE 0x08ea
+#define regHDMI_TB_ENC_GENERIC_PACKET2_3_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET4_5_LINE 0x08eb
+#define regHDMI_TB_ENC_GENERIC_PACKET4_5_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET6_7_LINE 0x08ec
+#define regHDMI_TB_ENC_GENERIC_PACKET6_7_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET8_9_LINE 0x08ed
+#define regHDMI_TB_ENC_GENERIC_PACKET8_9_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET10_11_LINE 0x08ee
+#define regHDMI_TB_ENC_GENERIC_PACKET10_11_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET12_13_LINE 0x08ef
+#define regHDMI_TB_ENC_GENERIC_PACKET12_13_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET14_LINE 0x08f0
+#define regHDMI_TB_ENC_GENERIC_PACKET14_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_DB_CONTROL 0x08f1
+#define regHDMI_TB_ENC_DB_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_32_0 0x08f2
+#define regHDMI_TB_ENC_ACR_32_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_32_1 0x08f3
+#define regHDMI_TB_ENC_ACR_32_1_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_44_0 0x08f4
+#define regHDMI_TB_ENC_ACR_44_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_44_1 0x08f5
+#define regHDMI_TB_ENC_ACR_44_1_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_48_0 0x08f6
+#define regHDMI_TB_ENC_ACR_48_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_48_1 0x08f7
+#define regHDMI_TB_ENC_ACR_48_1_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_STATUS_0 0x08f8
+#define regHDMI_TB_ENC_ACR_STATUS_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_STATUS_1 0x08f9
+#define regHDMI_TB_ENC_ACR_STATUS_1_BASE_IDX 3
+#define regHDMI_TB_ENC_BUFFER_CONTROL 0x08fb
+#define regHDMI_TB_ENC_BUFFER_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_MEM_CTRL 0x08fe
+#define regHDMI_TB_ENC_MEM_CTRL_BASE_IDX 3
+#define regHDMI_TB_ENC_METADATA_PACKET_CONTROL 0x08ff
+#define regHDMI_TB_ENC_METADATA_PACKET_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_H_ACTIVE_BLANK 0x0900
+#define regHDMI_TB_ENC_H_ACTIVE_BLANK_BASE_IDX 3
+#define regHDMI_TB_ENC_HC_ACTIVE_BLANK 0x0901
+#define regHDMI_TB_ENC_HC_ACTIVE_BLANK_BASE_IDX 3
+#define regHDMI_TB_ENC_CRC_CNTL 0x0903
+#define regHDMI_TB_ENC_CRC_CNTL_BASE_IDX 3
+#define regHDMI_TB_ENC_CRC_RESULT_0 0x0904
+#define regHDMI_TB_ENC_CRC_RESULT_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ENCRYPTION_CONTROL 0x0907
+#define regHDMI_TB_ENC_ENCRYPTION_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_MODE 0x0908
+#define regHDMI_TB_ENC_MODE_BASE_IDX 3
+#define regHDMI_TB_ENC_INPUT_FIFO_STATUS 0x0909
+#define regHDMI_TB_ENC_INPUT_FIFO_STATUS_BASE_IDX 3
+#define regHDMI_TB_ENC_CRC_RESULT_1 0x090a
+#define regHDMI_TB_ENC_CRC_RESULT_1_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm0_dispdec
+// base address: 0x0
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_CONTROL 0x0453
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_CONTROL_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R 0x0454
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G 0x0455
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B 0x0456
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R 0x0457
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B 0x0458
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX 0x0459
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA 0x045a
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK 0x045b
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B 0x045c
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G 0x045d
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R 0x045e
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B 0x045f
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G 0x0460
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R 0x0461
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1 0x0462
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3 0x0463
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5 0x0464
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7 0x0465
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9 0x0466
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11 0x0467
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13 0x0468
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15 0x0469
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17 0x046a
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19 0x046b
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21 0x046c
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23 0x046d
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25 0x046e
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27 0x046f
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29 0x0470
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31 0x0471
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33 0x0472
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B 0x0473
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G 0x0474
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R 0x0475
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B 0x0476
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G 0x0477
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R 0x0478
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1 0x0479
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3 0x047a
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5 0x047b
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7 0x047c
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9 0x047d
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11 0x047e
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13 0x047f
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15 0x0480
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17 0x0481
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19 0x0482
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21 0x0483
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23 0x0484
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25 0x0485
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27 0x0486
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29 0x0487
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31 0x0488
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33 0x0489
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_MODE 0x048a
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_MODE_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_INDEX 0x048b
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_DATA 0x048c
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_DATA_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT 0x048d
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL 0x048e
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR 0x048f
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R 0x0490
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G 0x0491
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B 0x0492
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_CONTROL 0x0493
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX 0x0494
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA 0x0495
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL 0x0496
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B 0x0497
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G 0x0498
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R 0x0499
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B 0x049a
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G 0x049b
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R 0x049c
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B 0x049d
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G 0x049e
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R 0x049f
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B 0x04a0
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B 0x04a1
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G 0x04a2
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G 0x04a3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R 0x04a4
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R 0x04a5
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B 0x04a6
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G 0x04a7
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R 0x04a8
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1 0x04a9
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3 0x04aa
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5 0x04ab
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7 0x04ac
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9 0x04ad
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11 0x04ae
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13 0x04af
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15 0x04b0
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17 0x04b1
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19 0x04b2
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21 0x04b3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23 0x04b4
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25 0x04b5
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27 0x04b6
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29 0x04b7
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31 0x04b8
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33 0x04b9
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B 0x04ba
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G 0x04bb
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R 0x04bc
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B 0x04bd
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G 0x04be
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R 0x04bf
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B 0x04c0
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G 0x04c1
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R 0x04c2
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B 0x04c3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B 0x04c4
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G 0x04c5
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G 0x04c6
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R 0x04c7
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R 0x04c8
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B 0x04c9
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G 0x04ca
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R 0x04cb
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1 0x04cc
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3 0x04cd
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5 0x04ce
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7 0x04cf
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9 0x04d0
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11 0x04d1
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13 0x04d2
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15 0x04d3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17 0x04d4
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19 0x04d5
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21 0x04d6
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23 0x04d7
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25 0x04d8
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27 0x04d9
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29 0x04da
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31 0x04db
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33 0x04dc
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL 0x04dd
+#define regMPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm1_dispdec
+// base address: 0x240
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_CONTROL 0x04e3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_CONTROL_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R 0x04e4
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G 0x04e5
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B 0x04e6
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R 0x04e7
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B 0x04e8
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX 0x04e9
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA 0x04ea
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK 0x04eb
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B 0x04ec
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G 0x04ed
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R 0x04ee
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B 0x04ef
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G 0x04f0
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R 0x04f1
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1 0x04f2
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3 0x04f3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5 0x04f4
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7 0x04f5
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9 0x04f6
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11 0x04f7
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13 0x04f8
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15 0x04f9
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17 0x04fa
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19 0x04fb
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21 0x04fc
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23 0x04fd
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25 0x04fe
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27 0x04ff
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29 0x0500
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31 0x0501
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33 0x0502
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B 0x0503
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G 0x0504
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R 0x0505
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B 0x0506
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G 0x0507
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R 0x0508
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1 0x0509
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3 0x050a
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5 0x050b
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7 0x050c
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9 0x050d
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11 0x050e
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13 0x050f
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15 0x0510
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17 0x0511
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19 0x0512
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21 0x0513
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23 0x0514
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25 0x0515
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27 0x0516
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29 0x0517
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31 0x0518
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33 0x0519
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_MODE 0x051a
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_MODE_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_INDEX 0x051b
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_DATA 0x051c
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_DATA_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT 0x051d
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL 0x051e
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR 0x051f
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R 0x0520
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G 0x0521
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B 0x0522
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_CONTROL 0x0523
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX 0x0524
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA 0x0525
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL 0x0526
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B 0x0527
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G 0x0528
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R 0x0529
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B 0x052a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G 0x052b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R 0x052c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B 0x052d
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G 0x052e
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R 0x052f
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B 0x0530
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B 0x0531
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G 0x0532
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G 0x0533
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R 0x0534
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R 0x0535
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B 0x0536
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G 0x0537
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R 0x0538
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1 0x0539
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3 0x053a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5 0x053b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7 0x053c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9 0x053d
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11 0x053e
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13 0x053f
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15 0x0540
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17 0x0541
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19 0x0542
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21 0x0543
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23 0x0544
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25 0x0545
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27 0x0546
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29 0x0547
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31 0x0548
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33 0x0549
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B 0x054a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G 0x054b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R 0x054c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B 0x054d
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G 0x054e
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R 0x054f
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B 0x0550
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G 0x0551
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R 0x0552
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B 0x0553
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B 0x0554
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G 0x0555
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G 0x0556
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R 0x0557
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R 0x0558
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B 0x0559
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G 0x055a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R 0x055b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1 0x055c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3 0x055d
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5 0x055e
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7 0x055f
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9 0x0560
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11 0x0561
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13 0x0562
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15 0x0563
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17 0x0564
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19 0x0565
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21 0x0566
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23 0x0567
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25 0x0568
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27 0x0569
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29 0x056a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31 0x056b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33 0x056c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL 0x056d
+#define regMPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm2_dispdec
+// base address: 0x480
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_CONTROL 0x0573
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_CONTROL_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R 0x0574
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G 0x0575
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B 0x0576
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R 0x0577
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B 0x0578
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX 0x0579
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA 0x057a
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK 0x057b
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B 0x057c
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G 0x057d
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R 0x057e
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B 0x057f
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G 0x0580
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R 0x0581
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1 0x0582
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3 0x0583
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5 0x0584
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7 0x0585
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9 0x0586
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11 0x0587
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13 0x0588
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15 0x0589
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17 0x058a
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19 0x058b
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21 0x058c
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23 0x058d
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25 0x058e
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27 0x058f
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29 0x0590
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31 0x0591
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33 0x0592
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B 0x0593
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G 0x0594
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R 0x0595
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B 0x0596
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G 0x0597
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R 0x0598
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1 0x0599
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3 0x059a
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5 0x059b
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7 0x059c
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9 0x059d
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11 0x059e
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13 0x059f
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15 0x05a0
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17 0x05a1
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19 0x05a2
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21 0x05a3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23 0x05a4
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25 0x05a5
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27 0x05a6
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29 0x05a7
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31 0x05a8
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33 0x05a9
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_MODE 0x05aa
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_MODE_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_INDEX 0x05ab
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_DATA 0x05ac
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_DATA_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT 0x05ad
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL 0x05ae
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR 0x05af
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R 0x05b0
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G 0x05b1
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B 0x05b2
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_CONTROL 0x05b3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX 0x05b4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA 0x05b5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL 0x05b6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B 0x05b7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G 0x05b8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R 0x05b9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B 0x05ba
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G 0x05bb
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R 0x05bc
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B 0x05bd
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G 0x05be
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R 0x05bf
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B 0x05c0
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B 0x05c1
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G 0x05c2
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G 0x05c3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R 0x05c4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R 0x05c5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B 0x05c6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G 0x05c7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R 0x05c8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1 0x05c9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3 0x05ca
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5 0x05cb
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7 0x05cc
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9 0x05cd
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11 0x05ce
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13 0x05cf
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15 0x05d0
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17 0x05d1
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19 0x05d2
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21 0x05d3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23 0x05d4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25 0x05d5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27 0x05d6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29 0x05d7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31 0x05d8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33 0x05d9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B 0x05da
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G 0x05db
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R 0x05dc
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B 0x05dd
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G 0x05de
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R 0x05df
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B 0x05e0
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G 0x05e1
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R 0x05e2
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B 0x05e3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B 0x05e4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G 0x05e5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G 0x05e6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R 0x05e7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R 0x05e8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B 0x05e9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G 0x05ea
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R 0x05eb
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1 0x05ec
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3 0x05ed
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5 0x05ee
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7 0x05ef
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9 0x05f0
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11 0x05f1
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13 0x05f2
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15 0x05f3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17 0x05f4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19 0x05f5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21 0x05f6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23 0x05f7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25 0x05f8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27 0x05f9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29 0x05fa
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31 0x05fb
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33 0x05fc
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL 0x05fd
+#define regMPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm3_dispdec
+// base address: 0x6c0
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_CONTROL 0x0603
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_CONTROL_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R 0x0604
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G 0x0605
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B 0x0606
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R 0x0607
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B 0x0608
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX 0x0609
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA 0x060a
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK 0x060b
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B 0x060c
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G 0x060d
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R 0x060e
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B 0x060f
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G 0x0610
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R 0x0611
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1 0x0612
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3 0x0613
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5 0x0614
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7 0x0615
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9 0x0616
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11 0x0617
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13 0x0618
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15 0x0619
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17 0x061a
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19 0x061b
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21 0x061c
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23 0x061d
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25 0x061e
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27 0x061f
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29 0x0620
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31 0x0621
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33 0x0622
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B 0x0623
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G 0x0624
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R 0x0625
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B 0x0626
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G 0x0627
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R 0x0628
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1 0x0629
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3 0x062a
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5 0x062b
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7 0x062c
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9 0x062d
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11 0x062e
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13 0x062f
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15 0x0630
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17 0x0631
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19 0x0632
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21 0x0633
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23 0x0634
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25 0x0635
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27 0x0636
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29 0x0637
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31 0x0638
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33 0x0639
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_MODE 0x063a
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_MODE_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_INDEX 0x063b
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_DATA 0x063c
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_DATA_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT 0x063d
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL 0x063e
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR 0x063f
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R 0x0640
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G 0x0641
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B 0x0642
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_CONTROL 0x0643
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX 0x0644
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA 0x0645
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL 0x0646
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B 0x0647
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G 0x0648
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R 0x0649
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B 0x064a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G 0x064b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R 0x064c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B 0x064d
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G 0x064e
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R 0x064f
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B 0x0650
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B 0x0651
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G 0x0652
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G 0x0653
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R 0x0654
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R 0x0655
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B 0x0656
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G 0x0657
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R 0x0658
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1 0x0659
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3 0x065a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5 0x065b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7 0x065c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9 0x065d
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11 0x065e
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13 0x065f
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15 0x0660
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17 0x0661
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19 0x0662
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21 0x0663
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23 0x0664
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25 0x0665
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27 0x0666
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29 0x0667
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31 0x0668
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33 0x0669
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B 0x066a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G 0x066b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R 0x066c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B 0x066d
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G 0x066e
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R 0x066f
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B 0x0670
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G 0x0671
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R 0x0672
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B 0x0673
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B 0x0674
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G 0x0675
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G 0x0676
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R 0x0677
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R 0x0678
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B 0x0679
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G 0x067a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R 0x067b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1 0x067c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3 0x067d
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5 0x067e
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7 0x067f
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9 0x0680
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11 0x0681
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13 0x0682
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15 0x0683
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17 0x0684
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19 0x0685
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21 0x0686
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23 0x0687
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25 0x0688
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27 0x0689
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29 0x068a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31 0x068b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33 0x068c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL 0x068d
+#define regMPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_dlpc_dlpc_dispdec
+// base address: 0x0
+#define regDLPC_ENABLE 0x2fe8
+#define regDLPC_ENABLE_BASE_IDX 2
+#define regDLPC_CURRENT_COUNT 0x2fe9
+#define regDLPC_CURRENT_COUNT_BASE_IDX 2
+#define regDLPC_OPTC_SNAPSHOT 0x2fea
+#define regDLPC_OPTC_SNAPSHOT_BASE_IDX 2
+#define regDLPC_PWRUP 0x2feb
+#define regDLPC_PWRUP_BASE_IDX 2
+#define regDLPC_OTG_RESYNC 0x2fec
+#define regDLPC_OTG_RESYNC_BASE_IDX 2
+#define regDLPC_DCN_ZSC_LONO_PWRUP 0x2fed
+#define regDLPC_DCN_ZSC_LONO_PWRUP_BASE_IDX 2
+#define regDLPC_SPARE 0x2fee
+#define regDLPC_SPARE_BASE_IDX 2
+#define regDLPC_COUNTER_INIT_VALUE 0x2fef
+#define regDLPC_COUNTER_INIT_VALUE_BASE_IDX 2
+
+
+// addressBlock: dce_dpia_dpia_mu0_dpiadec
+// base address: 0x72000
+#define regDPIA_MU_CLOCK_CTRL 0x13800
+#define regDPIA_MU_CLOCK_CTRL_BASE_IDX 3
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT0 0x13801
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT0_BASE_IDX 3
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT0 0x13802
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT0_BASE_IDX 3
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT1 0x13803
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT1_BASE_IDX 3
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT1 0x13804
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT1_BASE_IDX 3
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT2 0x13805
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT2_BASE_IDX 3
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT2 0x13806
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT2_BASE_IDX 3
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT3 0x13807
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT3_BASE_IDX 3
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT3 0x13808
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT3_BASE_IDX 3
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT0 0x13811
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT0_BASE_IDX 3
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT1 0x13812
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT1_BASE_IDX 3
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT2 0x13813
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT2_BASE_IDX 3
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT3 0x13814
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT3_BASE_IDX 3
+#define regDPIA_MU_TPI_MAX_CREDIT_COUNT 0x13819
+#define regDPIA_MU_TPI_MAX_CREDIT_COUNT_BASE_IDX 3
+#define regDPIA_MU_INTERRUPT_STATUS 0x1381a
+#define regDPIA_MU_INTERRUPT_STATUS_BASE_IDX 3
+#define regDPIA_MU_INTERRUPT_CTRL 0x1381b
+#define regDPIA_MU_INTERRUPT_CTRL_BASE_IDX 3
+#define regDPIA_MU_LOCAL_INTERRUPT_CTRL 0x1381c
+#define regDPIA_MU_LOCAL_INTERRUPT_CTRL_BASE_IDX 3
+#define regDPIA_MU_LOCAL_INTERRUPT_ACK 0x1381d
+#define regDPIA_MU_LOCAL_INTERRUPT_ACK_BASE_IDX 3
+#define regDPIA_MU_RBBMIF_TIMEOUT_CTRL 0x1381e
+#define regDPIA_MU_RBBMIF_TIMEOUT_CTRL_BASE_IDX 3
+#define regDPIA_MU_RBBMIF_TIMEOUT_CTRL2 0x1381f
+#define regDPIA_MU_RBBMIF_TIMEOUT_CTRL2_BASE_IDX 3
+#define regDPIA_MU_RBBMIF_STATUS 0x13820
+#define regDPIA_MU_RBBMIF_STATUS_BASE_IDX 3
+#define regDPIA_MU_MICROSECOND_REF_CTRL 0x13821
+#define regDPIA_MU_MICROSECOND_REF_CTRL_BASE_IDX 3
+#define regDPIA_MU_PORT_ADP_STATUS 0x13822
+#define regDPIA_MU_PORT_ADP_STATUS_BASE_IDX 3
+#define regDPIA_GLUE_CTRL 0x13823
+#define regDPIA_GLUE_CTRL_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL0 0x13825
+#define regDPIA_PERF_COUNT_CONTROL0_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL1 0x13826
+#define regDPIA_PERF_COUNT_CONTROL1_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL2 0x13827
+#define regDPIA_PERF_COUNT_CONTROL2_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL3 0x13828
+#define regDPIA_PERF_COUNT_CONTROL3_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL4 0x13829
+#define regDPIA_PERF_COUNT_CONTROL4_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL5 0x1382a
+#define regDPIA_PERF_COUNT_CONTROL5_BASE_IDX 3
+#define regDPIA_PERF_COUNT_INDEX 0x1382b
+#define regDPIA_PERF_COUNT_INDEX_BASE_IDX 3
+#define regDPIA_PERF_COUNT_DATA_LO 0x1382c
+#define regDPIA_PERF_COUNT_DATA_LO_BASE_IDX 3
+#define regDPIA_MU_SPARE 0x1382d
+#define regDPIA_MU_SPARE_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hda_azcontroller_azdec
+// base address: 0x0
+#define regAZCONTROLLER1_CORB_WRITE_POINTER 0x0000
+#define regAZCONTROLLER1_CORB_WRITE_POINTER_BASE_IDX 0
+#define regAZCONTROLLER1_CORB_READ_POINTER 0x0000
+#define regAZCONTROLLER1_CORB_READ_POINTER_BASE_IDX 0
+#define regAZCONTROLLER1_CORB_CONTROL 0x0001
+#define regAZCONTROLLER1_CORB_CONTROL_BASE_IDX 0
+#define regAZCONTROLLER1_CORB_STATUS 0x0001
+#define regAZCONTROLLER1_CORB_STATUS_BASE_IDX 0
+#define regAZCONTROLLER1_CORB_SIZE 0x0001
+#define regAZCONTROLLER1_CORB_SIZE_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS 0x0002
+#define regAZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS 0x0003
+#define regAZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_WRITE_POINTER 0x0004
+#define regAZCONTROLLER1_RIRB_WRITE_POINTER_BASE_IDX 0
+#define regAZCONTROLLER1_RESPONSE_INTERRUPT_COUNT 0x0004
+#define regAZCONTROLLER1_RESPONSE_INTERRUPT_COUNT_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_CONTROL 0x0005
+#define regAZCONTROLLER1_RIRB_CONTROL_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_STATUS 0x0005
+#define regAZCONTROLLER1_RIRB_STATUS_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_SIZE 0x0005
+#define regAZCONTROLLER1_RIRB_SIZE_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE 0x0006
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x0006
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x0006
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE 0x0007
+#define regAZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_STATUS 0x0008
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_STATUS_BASE_IDX 0
+#define regAZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS 0x000a
+#define regAZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS_BASE_IDX 0
+#define regAZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS 0x000b
+#define regAZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS_BASE_IDX 0
+#define regAZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS 0x074c
+#define regAZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS_BASE_IDX 1
+
+
+// addressBlock: dce_dc_hda_azendpoint_azdec
+// base address: 0x0
+#define regAZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x0006
+#define regAZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_BASE_IDX 0
+#define regAZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x0006
+#define regAZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_BASE_IDX 0
+
+
+// addressBlock: dce_dc_hda_azinputendpoint_azdec
+// base address: 0x0
+#define regAZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA 0x0006
+#define regAZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA_BASE_IDX 0
+#define regAZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX 0x0006
+#define regAZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX_BASE_IDX 0
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux0_dispdec
+// base address: 0x14de0
+#define regDIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL 0x1eb8
+#define regDIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux1_dispdec
+// base address: 0x14de4
+#define regDIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL 0x1eb9
+#define regDIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux2_dispdec
+// base address: 0x14de8
+#define regDIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL 0x1eba
+#define regDIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux3_dispdec
+// base address: 0x14dec
+#define regDIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL 0x1ebb
+#define regDIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig_stream_mapper_dispdec
+// base address: 0x0
+#define regDIG0_STREAM_MAPPER_CONTROL 0x1f0d
+#define regDIG0_STREAM_MAPPER_CONTROL_BASE_IDX 2
+#define regDIG1_STREAM_MAPPER_CONTROL 0x1f0e
+#define regDIG1_STREAM_MAPPER_CONTROL_BASE_IDX 2
+#define regDIG2_STREAM_MAPPER_CONTROL 0x1f0f
+#define regDIG2_STREAM_MAPPER_CONTROL_BASE_IDX 2
+#define regDIG3_STREAM_MAPPER_CONTROL 0x1f10
+#define regDIG3_STREAM_MAPPER_CONTROL_BASE_IDX 2
+#define regDIG4_STREAM_MAPPER_CONTROL 0x1f11
+#define regDIG4_STREAM_MAPPER_CONTROL_BASE_IDX 2
+
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_1_sh_mask.h
new file mode 100644
index 000000000000..d0e95d324053
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_1_sh_mask.h
@@ -0,0 +1,53464 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+#ifndef _dcn_3_5_1_SH_MASK_HEADER
+#define _dcn_3_5_1_SH_MASK_HEADER
+
+#define AZCONTROLLER0_CORB_WRITE_POINTER__CORB_WRITE_POINTER__SHIFT 0x0
+#define AZCONTROLLER0_CORB_WRITE_POINTER__CORB_WRITE_POINTER_MASK 0x00FFL
+#define AZCONTROLLER0_CORB_READ_POINTER__CORB_READ_POINTER__SHIFT 0x0
+#define AZCONTROLLER0_CORB_READ_POINTER__CORB_READ_POINTER_RESET__SHIFT 0xf
+#define AZCONTROLLER0_CORB_READ_POINTER__CORB_READ_POINTER_MASK 0x00FFL
+#define AZCONTROLLER0_CORB_READ_POINTER__CORB_READ_POINTER_RESET_MASK 0x8000L
+#define AZCONTROLLER0_CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE__SHIFT 0x0
+#define AZCONTROLLER0_CORB_CONTROL__ENABLE_CORB_DMA_ENGINE__SHIFT 0x1
+#define AZCONTROLLER0_CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE_MASK 0x01L
+#define AZCONTROLLER0_CORB_CONTROL__ENABLE_CORB_DMA_ENGINE_MASK 0x02L
+#define AZCONTROLLER0_CORB_STATUS__CORB_MEMORY_ERROR_INDICATION__SHIFT 0x0
+#define AZCONTROLLER0_CORB_STATUS__CORB_MEMORY_ERROR_INDICATION_MASK 0x01L
+#define AZCONTROLLER0_CORB_SIZE__CORB_SIZE__SHIFT 0x0
+#define AZCONTROLLER0_CORB_SIZE__CORB_SIZE_CAPABILITY__SHIFT 0x4
+#define AZCONTROLLER0_CORB_SIZE__CORB_SIZE_MASK 0x0003L
+#define AZCONTROLLER0_CORB_SIZE__CORB_SIZE_CAPABILITY_MASK 0x00F0L
+#define AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007FL
+#define AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+#define AZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+#define AZCONTROLLER0_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET__SHIFT 0xf
+#define AZCONTROLLER0_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_MASK 0x00FFL
+#define AZCONTROLLER0_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET_MASK 0x8000L
+#define AZCONTROLLER0_RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT__SHIFT 0x0
+#define AZCONTROLLER0_RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT_MASK 0x00FFL
+#define AZCONTROLLER0_RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_CONTROL__RIRB_DMA_ENABLE__SHIFT 0x1
+#define AZCONTROLLER0_RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL__SHIFT 0x2
+#define AZCONTROLLER0_RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL_MASK 0x01L
+#define AZCONTROLLER0_RIRB_CONTROL__RIRB_DMA_ENABLE_MASK 0x02L
+#define AZCONTROLLER0_RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL_MASK 0x04L
+#define AZCONTROLLER0_RIRB_STATUS__RESPONSE_INTERRUPT__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS__SHIFT 0x2
+#define AZCONTROLLER0_RIRB_STATUS__RESPONSE_INTERRUPT_MASK 0x01L
+#define AZCONTROLLER0_RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS_MASK 0x04L
+#define AZCONTROLLER0_RIRB_SIZE__RIRB_SIZE__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_SIZE__RIRB_SIZE_CAPABILITY__SHIFT 0x4
+#define AZCONTROLLER0_RIRB_SIZE__RIRB_SIZE_MASK 0x0003L
+#define AZCONTROLLER0_RIRB_SIZE__RIRB_SIZE_CAPABILITY_MASK 0x00F0L
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS__SHIFT 0x1c
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD_MASK 0x0FFFFFFFL
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS_MASK 0xF0000000L
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0000FFFFL
+#define AZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ_MASK 0xFFFFFFFFL
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID__SHIFT 0x1
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY_MASK 0x00000001L
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID_MASK 0x00000002L
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE__SHIFT 0x0
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x1
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE_MASK 0x00000001L
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007EL
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+#define AZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define AZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID_MASK 0x0000FFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID_MASK 0xFFFFFFFFL
+#define SINK_DESCRIPTION0__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION0__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION1__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION1__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION2__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION2__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION3__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION3__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION4__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION4__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION5__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION5__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION6__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION6__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION7__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION7__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION8__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION8__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION9__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION9__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION10__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION10__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION11__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION11__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION12__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION12__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION13__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION13__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION14__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION14__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION15__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION15__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION16__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION16__DESCRIPTION_MASK 0x000000FFL
+#define SINK_DESCRIPTION17__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION17__DESCRIPTION_MASK 0x000000FFL
+#define AZALIA_INPUT_CRC0_CHANNEL0__INPUT_CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL0__INPUT_CRC_CHANNEL0_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC0_CHANNEL1__INPUT_CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL1__INPUT_CRC_CHANNEL1_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC0_CHANNEL2__INPUT_CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL2__INPUT_CRC_CHANNEL2_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC0_CHANNEL3__INPUT_CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL3__INPUT_CRC_CHANNEL3_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC0_CHANNEL4__INPUT_CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL4__INPUT_CRC_CHANNEL4_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC0_CHANNEL5__INPUT_CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL5__INPUT_CRC_CHANNEL5_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC0_CHANNEL6__INPUT_CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL6__INPUT_CRC_CHANNEL6_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC0_CHANNEL7__INPUT_CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL7__INPUT_CRC_CHANNEL7_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC1_CHANNEL0__INPUT_CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL0__INPUT_CRC_CHANNEL0_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC1_CHANNEL1__INPUT_CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL1__INPUT_CRC_CHANNEL1_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC1_CHANNEL2__INPUT_CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL2__INPUT_CRC_CHANNEL2_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC1_CHANNEL3__INPUT_CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL3__INPUT_CRC_CHANNEL3_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC1_CHANNEL4__INPUT_CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL4__INPUT_CRC_CHANNEL4_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC1_CHANNEL5__INPUT_CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL5__INPUT_CRC_CHANNEL5_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC1_CHANNEL6__INPUT_CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL6__INPUT_CRC_CHANNEL6_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC1_CHANNEL7__INPUT_CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL7__INPUT_CRC_CHANNEL7_MASK 0xFFFFFFFFL
+#define AZALIA_CRC0_CHANNEL0__CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL0__CRC_CHANNEL0_MASK 0xFFFFFFFFL
+#define AZALIA_CRC0_CHANNEL1__CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL1__CRC_CHANNEL1_MASK 0xFFFFFFFFL
+#define AZALIA_CRC0_CHANNEL2__CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL2__CRC_CHANNEL2_MASK 0xFFFFFFFFL
+#define AZALIA_CRC0_CHANNEL3__CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL3__CRC_CHANNEL3_MASK 0xFFFFFFFFL
+#define AZALIA_CRC0_CHANNEL4__CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL4__CRC_CHANNEL4_MASK 0xFFFFFFFFL
+#define AZALIA_CRC0_CHANNEL5__CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL5__CRC_CHANNEL5_MASK 0xFFFFFFFFL
+#define AZALIA_CRC0_CHANNEL6__CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL6__CRC_CHANNEL6_MASK 0xFFFFFFFFL
+#define AZALIA_CRC0_CHANNEL7__CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL7__CRC_CHANNEL7_MASK 0xFFFFFFFFL
+#define AZALIA_CRC1_CHANNEL0__CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL0__CRC_CHANNEL0_MASK 0xFFFFFFFFL
+#define AZALIA_CRC1_CHANNEL1__CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL1__CRC_CHANNEL1_MASK 0xFFFFFFFFL
+#define AZALIA_CRC1_CHANNEL2__CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL2__CRC_CHANNEL2_MASK 0xFFFFFFFFL
+#define AZALIA_CRC1_CHANNEL3__CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL3__CRC_CHANNEL3_MASK 0xFFFFFFFFL
+#define AZALIA_CRC1_CHANNEL4__CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL4__CRC_CHANNEL4_MASK 0xFFFFFFFFL
+#define AZALIA_CRC1_CHANNEL5__CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL5__CRC_CHANNEL5_MASK 0xFFFFFFFFL
+#define AZALIA_CRC1_CHANNEL6__CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL6__CRC_CHANNEL6_MASK 0xFFFFFFFFL
+#define AZALIA_CRC1_CHANNEL7__CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL7__CRC_CHANNEL7_MASK 0xFFFFFFFFL
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+#define AZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+#define AZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+#define AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0001FFFFL
+#define AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+#define AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0001FFFFL
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_ENABLE_MASK 0x00000100L
+#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN__SHIFT 0x0
+#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN__SHIFT 0x1
+#define DP_DTO_DBUF_EN__DP_DTO2_DBUF_EN__SHIFT 0x2
+#define DP_DTO_DBUF_EN__DP_DTO3_DBUF_EN__SHIFT 0x3
+#define DP_DTO_DBUF_EN__DP_DTO4_DBUF_EN__SHIFT 0x4
+#define DP_DTO_DBUF_EN__DP_DTO5_DBUF_EN__SHIFT 0x5
+#define DP_DTO_DBUF_EN__DP_DTO6_DBUF_EN__SHIFT 0x6
+#define DP_DTO_DBUF_EN__DP_DTO7_DBUF_EN__SHIFT 0x7
+#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN_MASK 0x00000001L
+#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN_MASK 0x00000002L
+#define DP_DTO_DBUF_EN__DP_DTO2_DBUF_EN_MASK 0x00000004L
+#define DP_DTO_DBUF_EN__DP_DTO3_DBUF_EN_MASK 0x00000008L
+#define DP_DTO_DBUF_EN__DP_DTO4_DBUF_EN_MASK 0x00000010L
+#define DP_DTO_DBUF_EN__DP_DTO5_DBUF_EN_MASK 0x00000020L
+#define DP_DTO_DBUF_EN__DP_DTO6_DBUF_EN_MASK 0x00000040L
+#define DP_DTO_DBUF_EN__DP_DTO7_DBUF_EN_MASK 0x00000080L
+#define DSCCLK3_DTO_PARAM__DSCCLK3_DTO_PHASE__SHIFT 0x0
+#define DSCCLK3_DTO_PARAM__DSCCLK3_DTO_MODULO__SHIFT 0x10
+#define DSCCLK3_DTO_PARAM__DSCCLK3_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK3_DTO_PARAM__DSCCLK3_DTO_MODULO_MASK 0x00FF0000L
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define DCCG_GATE_DISABLE_CNTL4__PHYA_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL4__PHYB_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL4__PHYC_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL4__PHYD_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL4__PHYE_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL4__HDMICHARCLK0_ROOT_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK0_GATE_DISABLE__SHIFT 0x17
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK1_GATE_DISABLE__SHIFT 0x18
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK2_GATE_DISABLE__SHIFT 0x19
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK3_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL4__PHYA_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL4__PHYB_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL4__PHYC_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL4__PHYD_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL4__PHYE_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL4__HDMICHARCLK0_ROOT_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK0_GATE_DISABLE_MASK 0x00800000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK1_GATE_DISABLE_MASK 0x01000000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK2_GATE_DISABLE_MASK 0x02000000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK3_GATE_DISABLE_MASK 0x04000000L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_SRC_SEL__SHIFT 0x0
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_EN__SHIFT 0x3
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK1_SRC_SEL__SHIFT 0x4
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK1_EN__SHIFT 0x7
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK2_SRC_SEL__SHIFT 0x8
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK2_EN__SHIFT 0xb
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK3_SRC_SEL__SHIFT 0xc
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK3_EN__SHIFT 0xf
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_SRC_SEL_MASK 0x00000007L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_EN_MASK 0x00000008L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK1_SRC_SEL_MASK 0x00000070L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK1_EN_MASK 0x00000080L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK2_SRC_SEL_MASK 0x00000700L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK2_EN_MASK 0x00000800L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK3_SRC_SEL_MASK 0x00007000L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK3_EN_MASK 0x00008000L
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_ENABLE_MASK 0x00000100L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DSICLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_REFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK1_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK2_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYC_PIXCLK_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYD_PIXCLK_ENABLE__SHIFT 0x5
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYE_PIXCLK_ENABLE__SHIFT 0x6
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYF_PIXCLK_ENABLE__SHIFT 0x7
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYG_PIXCLK_ENABLE__SHIFT 0x8
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DTBCLK0_ENABLE__SHIFT 0x9
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DSICLK_ENABLE_MASK 0x00000001L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_REFCLK_ENABLE_MASK 0x00000002L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK1_ENABLE_MASK 0x00000004L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK2_ENABLE_MASK 0x00000008L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYC_PIXCLK_ENABLE_MASK 0x00000010L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYD_PIXCLK_ENABLE_MASK 0x00000020L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYE_PIXCLK_ENABLE_MASK 0x00000040L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYF_PIXCLK_ENABLE_MASK 0x00000080L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYG_PIXCLK_ENABLE_MASK 0x00000100L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DTBCLK0_ENABLE_MASK 0x00000200L
+#define DCCG_GLOBAL_FGCG_REP_CNTL__DCCG_GLOBAL_FGCG_REP_DIS__SHIFT 0x0
+#define DCCG_GLOBAL_FGCG_REP_CNTL__DCCG_GLOBAL_FGCG_REP_DIS_MASK 0x00000001L
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR__SHIFT 0x0
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR_MASK 0xFFFFFFFFL
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO__SHIFT 0x0
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO_MASK 0xFFFFFFFFL
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE__SHIFT 0x0
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC__SHIFT 0x4
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE__SHIFT 0x8
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS__SHIFT 0x9
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV__SHIFT 0x10
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS__SHIFT 0x18
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL__SHIFT 0x19
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE_MASK 0x00000001L
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC_MASK 0x00000030L
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE_MASK 0x00000100L
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS_MASK 0x00000200L
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV_MASK 0x00030000L
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS_MASK 0x01000000L
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL_MASK 0x02000000L
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL__SHIFT 0x0
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL_MASK 0xFFFFFFFFL
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL__SHIFT 0x0
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL_MASK 0x00000007L
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE__SHIFT 0x0
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE_MASK 0x00000001L
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR__SHIFT 0x0
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR_MASK 0xFFFFFFFFL
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO__SHIFT 0x0
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO_MASK 0xFFFFFFFFL
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT__SHIFT 0x0
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT_MASK 0xFFFFFFFFL
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE0_SRC_SEL__SHIFT 0x0
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE0_EN__SHIFT 0x3
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE1_SRC_SEL__SHIFT 0x4
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE1_EN__SHIFT 0x7
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE2_SRC_SEL__SHIFT 0x8
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE2_EN__SHIFT 0xb
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE3_SRC_SEL__SHIFT 0xc
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE3_EN__SHIFT 0xf
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE0_SRC_SEL_MASK 0x00000007L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE0_EN_MASK 0x00000008L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE1_SRC_SEL_MASK 0x00000070L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE1_EN_MASK 0x00000080L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE2_SRC_SEL_MASK 0x00000700L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE2_EN_MASK 0x00000800L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE3_SRC_SEL_MASK 0x00007000L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE3_EN_MASK 0x00008000L
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE0_SRC_SEL__SHIFT 0x0
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE0_EN__SHIFT 0x3
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE1_SRC_SEL__SHIFT 0x4
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE1_EN__SHIFT 0x7
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE0_SRC_SEL_MASK 0x00000007L
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE0_EN_MASK 0x00000008L
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE1_SRC_SEL_MASK 0x00000070L
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE1_EN_MASK 0x00000080L
+#define DTBCLK_P_CNTL__DTBCLK_P0_SRC_SEL__SHIFT 0x0
+#define DTBCLK_P_CNTL__DTBCLK_P0_EN__SHIFT 0x2
+#define DTBCLK_P_CNTL__DTBCLK_P1_SRC_SEL__SHIFT 0x3
+#define DTBCLK_P_CNTL__DTBCLK_P1_EN__SHIFT 0x5
+#define DTBCLK_P_CNTL__DTBCLK_P2_SRC_SEL__SHIFT 0x6
+#define DTBCLK_P_CNTL__DTBCLK_P2_EN__SHIFT 0x8
+#define DTBCLK_P_CNTL__DTBCLK_P3_SRC_SEL__SHIFT 0x9
+#define DTBCLK_P_CNTL__DTBCLK_P3_EN__SHIFT 0xb
+#define DTBCLK_P_CNTL__DTBCLK_P0_SRC_SEL_MASK 0x00000003L
+#define DTBCLK_P_CNTL__DTBCLK_P0_EN_MASK 0x00000004L
+#define DTBCLK_P_CNTL__DTBCLK_P1_SRC_SEL_MASK 0x00000018L
+#define DTBCLK_P_CNTL__DTBCLK_P1_EN_MASK 0x00000020L
+#define DTBCLK_P_CNTL__DTBCLK_P2_SRC_SEL_MASK 0x000000C0L
+#define DTBCLK_P_CNTL__DTBCLK_P2_EN_MASK 0x00000100L
+#define DTBCLK_P_CNTL__DTBCLK_P3_SRC_SEL_MASK 0x00000600L
+#define DTBCLK_P_CNTL__DTBCLK_P3_EN_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P0_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P1_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P2_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P3_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK0_ROOT_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK0_GATE_DISABLE__SHIFT 0x7
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK1_ROOT_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK1_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK2_ROOT_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK2_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK3_ROOT_GATE_DISABLE__SHIFT 0xc
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK3_GATE_DISABLE__SHIFT 0xd
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P0_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P1_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P2_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P3_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK0_ROOT_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK0_GATE_DISABLE_MASK 0x00000080L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK1_ROOT_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK1_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK2_ROOT_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK2_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK3_ROOT_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK3_GATE_DISABLE_MASK 0x00002000L
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_PHASE__SHIFT 0x0
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_MODULO__SHIFT 0x10
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_MODULO_MASK 0x00FF0000L
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_PHASE__SHIFT 0x0
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_MODULO__SHIFT 0x10
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_MODULO_MASK 0x00FF0000L
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_PHASE__SHIFT 0x0
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_MODULO__SHIFT 0x10
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_MODULO_MASK 0x00FF0000L
+#define OTG_PIXEL_RATE_DIV__OTG0_PIXEL_RATE_DIVK1__SHIFT 0x0
+#define OTG_PIXEL_RATE_DIV__OTG0_PIXEL_RATE_DIVK2__SHIFT 0x1
+#define OTG_PIXEL_RATE_DIV__OTG1_PIXEL_RATE_DIVK1__SHIFT 0x3
+#define OTG_PIXEL_RATE_DIV__OTG1_PIXEL_RATE_DIVK2__SHIFT 0x4
+#define OTG_PIXEL_RATE_DIV__OTG2_PIXEL_RATE_DIVK1__SHIFT 0x6
+#define OTG_PIXEL_RATE_DIV__OTG2_PIXEL_RATE_DIVK2__SHIFT 0x7
+#define OTG_PIXEL_RATE_DIV__OTG3_PIXEL_RATE_DIVK1__SHIFT 0x9
+#define OTG_PIXEL_RATE_DIV__OTG3_PIXEL_RATE_DIVK2__SHIFT 0xa
+#define OTG_PIXEL_RATE_DIV__OTG0_PIXEL_RATE_DIVK1_MASK 0x00000001L
+#define OTG_PIXEL_RATE_DIV__OTG0_PIXEL_RATE_DIVK2_MASK 0x00000006L
+#define OTG_PIXEL_RATE_DIV__OTG1_PIXEL_RATE_DIVK1_MASK 0x00000008L
+#define OTG_PIXEL_RATE_DIV__OTG1_PIXEL_RATE_DIVK2_MASK 0x00000030L
+#define OTG_PIXEL_RATE_DIV__OTG2_PIXEL_RATE_DIVK1_MASK 0x00000040L
+#define OTG_PIXEL_RATE_DIV__OTG2_PIXEL_RATE_DIVK2_MASK 0x00000180L
+#define OTG_PIXEL_RATE_DIV__OTG3_PIXEL_RATE_DIVK1_MASK 0x00000200L
+#define OTG_PIXEL_RATE_DIV__OTG3_PIXEL_RATE_DIVK2_MASK 0x00000C00L
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV_MASK 0x0001FFFFL
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY__SHIFT 0x0
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE__SHIFT 0x10
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE__SHIFT 0x14
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES__SHIFT 0x19
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET__SHIFT 0x1c
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE__SHIFT 0x1d
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN__SHIFT 0x1e
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE__SHIFT 0x1f
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY_MASK 0x00003FFFL
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE_MASK 0x000F0000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE_MASK 0x00100000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES_MASK 0x0E000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET_MASK 0x10000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE_MASK 0x20000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN_MASK 0x40000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE_MASK 0x80000000L
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS__SHIFT 0x0
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS_MASK 0x00000001L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN__SHIFT 0x5
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC__SHIFT 0x6
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC__SHIFT 0x7
+#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL__SHIFT 0x8
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV__SHIFT 0xb
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE_MASK 0x00000001L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE_MASK 0x00000002L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE_MASK 0x00000004L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE_MASK 0x00000008L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE_MASK 0x00000010L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN_MASK 0x00000020L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC_MASK 0x00000040L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC_MASK 0x00000080L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL_MASK 0x00000700L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV_MASK 0xFFFFF800L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE__SHIFT 0x1d
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE__SHIFT 0x1e
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE_MASK 0x04000000L
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE_MASK 0x08000000L
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE_MASK 0x10000000L
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE_MASK 0x20000000L
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE_MASK 0x40000000L
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA__SHIFT 0x0
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA_MASK 0xFFFFFFFFL
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV__SHIFT 0x8
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL__SHIFT 0x10
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL__SHIFT 0x11
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV_MASK 0x0000007FL
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV_MASK 0x00007F00L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL_MASK 0x00010000L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL_MASK 0x00020000L
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE__SHIFT 0x5
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK0_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK1_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK2_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK3_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK4_GATE_DISABLE__SHIFT 0xc
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK5_GATE_DISABLE__SHIFT 0xd
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE__SHIFT 0x10
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE_MASK 0x00000020L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK0_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK1_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK2_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK3_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK4_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK5_GATE_DISABLE_MASK 0x00002000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE_MASK 0x00010000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE_MASK 0x00400000L
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ__SHIFT 0x8
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ_MASK 0x00000100L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG0_PIXEL_RATE_CNTL__DTBCLK_DTO0_ENABLE__SHIFT 0x3
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE__SHIFT 0x4
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE__SHIFT 0x5
+#define OTG0_PIXEL_RATE_CNTL__DTBCLKDTO0_ENABLE_STATUS__SHIFT 0x6
+#define OTG0_PIXEL_RATE_CNTL__DPDTO0_ENABLE_STATUS__SHIFT 0x7
+#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL__SHIFT 0x8
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL__SHIFT 0x9
+#define OTG0_PIXEL_RATE_CNTL__PIPE0_DTO_SRC_SEL__SHIFT 0xc
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG0_PIXEL_RATE_CNTL__DTBCLK_DTO0_ENABLE_MASK 0x00000008L
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE_MASK 0x00000010L
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE_MASK 0x00000020L
+#define OTG0_PIXEL_RATE_CNTL__DTBCLKDTO0_ENABLE_STATUS_MASK 0x00000040L
+#define OTG0_PIXEL_RATE_CNTL__DPDTO0_ENABLE_STATUS_MASK 0x00000080L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL_MASK 0x00000100L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL_MASK 0x00000200L
+#define OTG0_PIXEL_RATE_CNTL__PIPE0_DTO_SRC_SEL_MASK 0x00003000L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+#define DP_DTO0_PHASE__DP_DTO0_PHASE__SHIFT 0x0
+#define DP_DTO0_PHASE__DP_DTO0_PHASE_MASK 0xFFFFFFFFL
+#define DP_DTO0_MODULO__DP_DTO0_MODULO__SHIFT 0x0
+#define DP_DTO0_MODULO__DP_DTO0_MODULO_MASK 0xFFFFFFFFL
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG1_PIXEL_RATE_CNTL__DTBCLK_DTO1_ENABLE__SHIFT 0x3
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE__SHIFT 0x4
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE__SHIFT 0x5
+#define OTG1_PIXEL_RATE_CNTL__DTBCLKDTO1_ENABLE_STATUS__SHIFT 0x6
+#define OTG1_PIXEL_RATE_CNTL__DPDTO1_ENABLE_STATUS__SHIFT 0x7
+#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL__SHIFT 0x8
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL__SHIFT 0x9
+#define OTG1_PIXEL_RATE_CNTL__PIPE1_DTO_SRC_SEL__SHIFT 0xc
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG1_PIXEL_RATE_CNTL__DTBCLK_DTO1_ENABLE_MASK 0x00000008L
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE_MASK 0x00000010L
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE_MASK 0x00000020L
+#define OTG1_PIXEL_RATE_CNTL__DTBCLKDTO1_ENABLE_STATUS_MASK 0x00000040L
+#define OTG1_PIXEL_RATE_CNTL__DPDTO1_ENABLE_STATUS_MASK 0x00000080L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL_MASK 0x00000100L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL_MASK 0x00000200L
+#define OTG1_PIXEL_RATE_CNTL__PIPE1_DTO_SRC_SEL_MASK 0x00003000L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+#define DP_DTO1_PHASE__DP_DTO1_PHASE__SHIFT 0x0
+#define DP_DTO1_PHASE__DP_DTO1_PHASE_MASK 0xFFFFFFFFL
+#define DP_DTO1_MODULO__DP_DTO1_MODULO__SHIFT 0x0
+#define DP_DTO1_MODULO__DP_DTO1_MODULO_MASK 0xFFFFFFFFL
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG2_PIXEL_RATE_CNTL__DTBCLK_DTO2_ENABLE__SHIFT 0x3
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE__SHIFT 0x4
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_DS_DISABLE__SHIFT 0x5
+#define OTG2_PIXEL_RATE_CNTL__DTBCLKDTO2_ENABLE_STATUS__SHIFT 0x6
+#define OTG2_PIXEL_RATE_CNTL__DPDTO2_ENABLE_STATUS__SHIFT 0x7
+#define OTG2_PIXEL_RATE_CNTL__OTG2_ADD_PIXEL__SHIFT 0x8
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DROP_PIXEL__SHIFT 0x9
+#define OTG2_PIXEL_RATE_CNTL__PIPE2_DTO_SRC_SEL__SHIFT 0xc
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG2_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG2_PIXEL_RATE_CNTL__DTBCLK_DTO2_ENABLE_MASK 0x00000008L
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE_MASK 0x00000010L
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_DS_DISABLE_MASK 0x00000020L
+#define OTG2_PIXEL_RATE_CNTL__DTBCLKDTO2_ENABLE_STATUS_MASK 0x00000040L
+#define OTG2_PIXEL_RATE_CNTL__DPDTO2_ENABLE_STATUS_MASK 0x00000080L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_ADD_PIXEL_MASK 0x00000100L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DROP_PIXEL_MASK 0x00000200L
+#define OTG2_PIXEL_RATE_CNTL__PIPE2_DTO_SRC_SEL_MASK 0x00003000L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+#define DP_DTO2_PHASE__DP_DTO2_PHASE__SHIFT 0x0
+#define DP_DTO2_PHASE__DP_DTO2_PHASE_MASK 0xFFFFFFFFL
+#define DP_DTO2_MODULO__DP_DTO2_MODULO__SHIFT 0x0
+#define DP_DTO2_MODULO__DP_DTO2_MODULO_MASK 0xFFFFFFFFL
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG3_PIXEL_RATE_CNTL__DTBCLK_DTO3_ENABLE__SHIFT 0x3
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE__SHIFT 0x4
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_DS_DISABLE__SHIFT 0x5
+#define OTG3_PIXEL_RATE_CNTL__DTBCLKDTO3_ENABLE_STATUS__SHIFT 0x6
+#define OTG3_PIXEL_RATE_CNTL__DPDTO3_ENABLE_STATUS__SHIFT 0x7
+#define OTG3_PIXEL_RATE_CNTL__OTG3_ADD_PIXEL__SHIFT 0x8
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DROP_PIXEL__SHIFT 0x9
+#define OTG3_PIXEL_RATE_CNTL__PIPE3_DTO_SRC_SEL__SHIFT 0xc
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG3_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG3_PIXEL_RATE_CNTL__DTBCLK_DTO3_ENABLE_MASK 0x00000008L
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE_MASK 0x00000010L
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_DS_DISABLE_MASK 0x00000020L
+#define OTG3_PIXEL_RATE_CNTL__DTBCLKDTO3_ENABLE_STATUS_MASK 0x00000040L
+#define OTG3_PIXEL_RATE_CNTL__DPDTO3_ENABLE_STATUS_MASK 0x00000080L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_ADD_PIXEL_MASK 0x00000100L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DROP_PIXEL_MASK 0x00000200L
+#define OTG3_PIXEL_RATE_CNTL__PIPE3_DTO_SRC_SEL_MASK 0x00003000L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+#define DP_DTO3_PHASE__DP_DTO3_PHASE__SHIFT 0x0
+#define DP_DTO3_PHASE__DP_DTO3_PHASE_MASK 0xFFFFFFFFL
+#define DP_DTO3_MODULO__DP_DTO3_MODULO__SHIFT 0x0
+#define DP_DTO3_MODULO__DP_DTO3_MODULO_MASK 0xFFFFFFFFL
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE__SHIFT 0x0
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO__SHIFT 0x10
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO_MASK 0x00FF0000L
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE__SHIFT 0x0
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO__SHIFT 0x10
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO_MASK 0x00FF0000L
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE__SHIFT 0x0
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO__SHIFT 0x10
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO_MASK 0x00FF0000L
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE__SHIFT 0x0
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO__SHIFT 0x10
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO_MASK 0x00FF0000L
+#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2__SHIFT 0x0
+#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2_MASK 0x0007FFFFL
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_SRC_SEL__SHIFT 0x5
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_SRC_SEL__SHIFT 0x5
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_SRC_SEL__SHIFT 0x5
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_SRC_SEL__SHIFT 0x5
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_SRC_SEL__SHIFT 0x5
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_SRC_SEL_MASK 0x000000E0L
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET__SHIFT 0x0
+#define DCCG_SOFT_RESET__SOFT_RESET_DVO__SHIFT 0x2
+#define DCCG_SOFT_RESET__DVO_ENABLE_RST__SHIFT 0x3
+#define DCCG_SOFT_RESET__AUDIO_DTO2_CLK_SOFT_RESET__SHIFT 0x4
+#define DCCG_SOFT_RESET__DPREFCLK_SOFT_RESET__SHIFT 0x8
+#define DCCG_SOFT_RESET__AMCLK0_SOFT_RESET__SHIFT 0xc
+#define DCCG_SOFT_RESET__AMCLK1_SOFT_RESET__SHIFT 0xd
+#define DCCG_SOFT_RESET__P0PLL_CFG_IF_SOFT_RESET__SHIFT 0xe
+#define DCCG_SOFT_RESET__P1PLL_CFG_IF_SOFT_RESET__SHIFT 0xf
+#define DCCG_SOFT_RESET__P2PLL_CFG_IF_SOFT_RESET__SHIFT 0x10
+#define DCCG_SOFT_RESET__A0PLL_CFG_IF_SOFT_RESET__SHIFT 0x11
+#define DCCG_SOFT_RESET__A1PLL_CFG_IF_SOFT_RESET__SHIFT 0x12
+#define DCCG_SOFT_RESET__C0PLL_CFG_IF_SOFT_RESET__SHIFT 0x13
+#define DCCG_SOFT_RESET__C1PLL_CFG_IF_SOFT_RESET__SHIFT 0x14
+#define DCCG_SOFT_RESET__C2PLL_CFG_IF_SOFT_RESET__SHIFT 0x15
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET_MASK 0x00000001L
+#define DCCG_SOFT_RESET__SOFT_RESET_DVO_MASK 0x00000004L
+#define DCCG_SOFT_RESET__DVO_ENABLE_RST_MASK 0x00000008L
+#define DCCG_SOFT_RESET__AUDIO_DTO2_CLK_SOFT_RESET_MASK 0x00000010L
+#define DCCG_SOFT_RESET__DPREFCLK_SOFT_RESET_MASK 0x00000100L
+#define DCCG_SOFT_RESET__AMCLK0_SOFT_RESET_MASK 0x00001000L
+#define DCCG_SOFT_RESET__AMCLK1_SOFT_RESET_MASK 0x00002000L
+#define DCCG_SOFT_RESET__P0PLL_CFG_IF_SOFT_RESET_MASK 0x00004000L
+#define DCCG_SOFT_RESET__P1PLL_CFG_IF_SOFT_RESET_MASK 0x00008000L
+#define DCCG_SOFT_RESET__P2PLL_CFG_IF_SOFT_RESET_MASK 0x00010000L
+#define DCCG_SOFT_RESET__A0PLL_CFG_IF_SOFT_RESET_MASK 0x00020000L
+#define DCCG_SOFT_RESET__A1PLL_CFG_IF_SOFT_RESET_MASK 0x00040000L
+#define DCCG_SOFT_RESET__C0PLL_CFG_IF_SOFT_RESET_MASK 0x00080000L
+#define DCCG_SOFT_RESET__C1PLL_CFG_IF_SOFT_RESET_MASK 0x00100000L
+#define DCCG_SOFT_RESET__C2PLL_CFG_IF_SOFT_RESET_MASK 0x00200000L
+#define DSCCLK_DTO_CTRL__DSCCLK0_DTO_DB_EN__SHIFT 0x8
+#define DSCCLK_DTO_CTRL__DSCCLK1_DTO_DB_EN__SHIFT 0x9
+#define DSCCLK_DTO_CTRL__DSCCLK2_DTO_DB_EN__SHIFT 0xa
+#define DSCCLK_DTO_CTRL__DSCCLK3_DTO_DB_EN__SHIFT 0xb
+#define DSCCLK_DTO_CTRL__DSCCLK4_DTO_DB_EN__SHIFT 0xc
+#define DSCCLK_DTO_CTRL__DSCCLK5_DTO_DB_EN__SHIFT 0xd
+#define DSCCLK_DTO_CTRL__DSCCLK0_DTO_DB_EN_MASK 0x00000100L
+#define DSCCLK_DTO_CTRL__DSCCLK1_DTO_DB_EN_MASK 0x00000200L
+#define DSCCLK_DTO_CTRL__DSCCLK2_DTO_DB_EN_MASK 0x00000400L
+#define DSCCLK_DTO_CTRL__DSCCLK3_DTO_DB_EN_MASK 0x00000800L
+#define DSCCLK_DTO_CTRL__DSCCLK4_DTO_DB_EN_MASK 0x00001000L
+#define DSCCLK_DTO_CTRL__DSCCLK5_DTO_DB_EN_MASK 0x00002000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT 0x0
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL__SHIFT 0x4
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO__SHIFT 0x14
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO__SHIFT 0x18
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO__SHIFT 0x1c
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTBCLK_DTO_USE_512FBR_DTO__SHIFT 0x1d
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL_MASK 0x00000007L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL_MASK 0x00000070L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO_MASK 0x00100000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO_MASK 0x01000000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO_MASK 0x10000000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTBCLK_DTO_USE_512FBR_DTO_MASK 0x20000000L
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE_MASK 0xFFFFFFFFL
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE_MASK 0xFFFFFFFFL
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE_MASK 0xFFFFFFFFL
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE_MASK 0xFFFFFFFFL
+#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE_MASK 0xFFFFFFFFL
+#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE_MASK 0xFFFFFFFFL
+#define DCCG_VSYNC_OTG2_LATCH_VALUE__DCCG_VSYNC_CNT_OTG2_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG2_LATCH_VALUE__DCCG_VSYNC_CNT_OTG2_LATCH_VALUE_MASK 0xFFFFFFFFL
+#define DCCG_VSYNC_OTG3_LATCH_VALUE__DCCG_VSYNC_CNT_OTG3_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG3_LATCH_VALUE__DCCG_VSYNC_CNT_OTG3_LATCH_VALUE_MASK 0xFFFFFFFFL
+#define DCCG_VSYNC_OTG4_LATCH_VALUE__DCCG_VSYNC_CNT_OTG4_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG4_LATCH_VALUE__DCCG_VSYNC_CNT_OTG4_LATCH_VALUE_MASK 0xFFFFFFFFL
+#define DCCG_VSYNC_OTG5_LATCH_VALUE__DCCG_VSYNC_CNT_OTG5_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG5_LATCH_VALUE__DCCG_VSYNC_CNT_OTG5_LATCH_VALUE_MASK 0xFFFFFFFFL
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN__SHIFT 0x1
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN__SHIFT 0x5
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN__SHIFT 0x9
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN__SHIFT 0xd
+#define DPPCLK_DTO_CTRL__DPPCLK4_DTO_DB_EN__SHIFT 0x11
+#define DPPCLK_DTO_CTRL__DPPCLK5_DTO_DB_EN__SHIFT 0x15
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN_MASK 0x00000002L
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN_MASK 0x00000020L
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN_MASK 0x00000200L
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN_MASK 0x00002000L
+#define DPPCLK_DTO_CTRL__DPPCLK4_DTO_DB_EN_MASK 0x00020000L
+#define DPPCLK_DTO_CTRL__DPPCLK5_DTO_DB_EN_MASK 0x00200000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE__SHIFT 0x0
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET__SHIFT 0x2
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL__SHIFT 0x3
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL__SHIFT 0x4
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT__SHIFT 0x8
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN__SHIFT 0x10
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN__SHIFT 0x11
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_LATCH_EN__SHIFT 0x12
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_LATCH_EN__SHIFT 0x13
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_LATCH_EN__SHIFT 0x14
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_LATCH_EN__SHIFT 0x15
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL__SHIFT 0x18
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL__SHIFT 0x19
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_VSYNC_TRIG_SEL__SHIFT 0x1a
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_VSYNC_TRIG_SEL__SHIFT 0x1b
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_VSYNC_TRIG_SEL__SHIFT 0x1c
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_VSYNC_TRIG_SEL__SHIFT 0x1d
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL_MASK 0x000000F0L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT_MASK 0x00000F00L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN_MASK 0x00010000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN_MASK 0x00020000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_LATCH_EN_MASK 0x00040000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_LATCH_EN_MASK 0x00080000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_LATCH_EN_MASK 0x00100000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_LATCH_EN_MASK 0x00200000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL_MASK 0x01000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL_MASK 0x02000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_VSYNC_TRIG_SEL_MASK 0x04000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_VSYNC_TRIG_SEL_MASK 0x08000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_VSYNC_TRIG_SEL_MASK 0x10000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_VSYNC_TRIG_SEL_MASK 0x20000000L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT__SHIFT 0x0
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR__SHIFT 0x0
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT__SHIFT 0x1
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR__SHIFT 0x1
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT__SHIFT 0x2
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT_CLEAR__SHIFT 0x2
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT__SHIFT 0x3
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT_CLEAR__SHIFT 0x3
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT__SHIFT 0x4
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT_CLEAR__SHIFT 0x4
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT__SHIFT 0x5
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT_CLEAR__SHIFT 0x5
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK__SHIFT 0x8
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK__SHIFT 0x9
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_MASK__SHIFT 0xa
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_MASK__SHIFT 0xb
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_MASK__SHIFT 0xc
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_MASK__SHIFT 0xd
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT_CLEAR_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT_MASK 0x00000010L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT_CLEAR_MASK 0x00000010L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT_MASK 0x00000020L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT_CLEAR_MASK 0x00000020L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK_MASK 0x00000100L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK_MASK 0x00000200L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_MASK_MASK 0x00000400L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_MASK_MASK 0x00000800L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_MASK_MASK 0x00001000L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_MASK_MASK 0x00002000L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKA_DISABLE__SHIFT 0x0
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKB_DISABLE__SHIFT 0x1
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKC_DISABLE__SHIFT 0x2
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKD_DISABLE__SHIFT 0x3
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKE_DISABLE__SHIFT 0x4
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKF_DISABLE__SHIFT 0x5
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKG_DISABLE__SHIFT 0x6
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKA_DISABLE_MASK 0x00000001L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKB_DISABLE_MASK 0x00000002L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKC_DISABLE_MASK 0x00000004L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKD_DISABLE_MASK 0x00000008L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKE_DISABLE_MASK 0x00000010L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKF_DISABLE_MASK 0x00000020L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKG_DISABLE_MASK 0x00000040L
+#define DTBCLK_DTO0_PHASE__DTBCLK_DTO0_PHASE__SHIFT 0x0
+#define DTBCLK_DTO0_PHASE__DTBCLK_DTO0_PHASE_MASK 0xFFFFFFFFL
+#define DTBCLK_DTO1_PHASE__DTBCLK_DTO1_PHASE__SHIFT 0x0
+#define DTBCLK_DTO1_PHASE__DTBCLK_DTO1_PHASE_MASK 0xFFFFFFFFL
+#define DTBCLK_DTO2_PHASE__DTBCLK_DTO2_PHASE__SHIFT 0x0
+#define DTBCLK_DTO2_PHASE__DTBCLK_DTO2_PHASE_MASK 0xFFFFFFFFL
+#define DTBCLK_DTO3_PHASE__DTBCLK_DTO3_PHASE__SHIFT 0x0
+#define DTBCLK_DTO3_PHASE__DTBCLK_DTO3_PHASE_MASK 0xFFFFFFFFL
+#define DTBCLK_DTO0_MODULO__DTBCLK_DTO0_MODULO__SHIFT 0x0
+#define DTBCLK_DTO0_MODULO__DTBCLK_DTO0_MODULO_MASK 0xFFFFFFFFL
+#define DTBCLK_DTO1_MODULO__DTBCLK_DTO1_MODULO__SHIFT 0x0
+#define DTBCLK_DTO1_MODULO__DTBCLK_DTO1_MODULO_MASK 0xFFFFFFFFL
+#define DTBCLK_DTO2_MODULO__DTBCLK_DTO2_MODULO__SHIFT 0x0
+#define DTBCLK_DTO2_MODULO__DTBCLK_DTO2_MODULO_MASK 0xFFFFFFFFL
+#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT 0x0
+#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK 0xFFFFFFFFL
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT 0x0
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN__SHIFT 0x3
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK 0x00000007L
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK2_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK3_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK4_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK5_GATE_DISABLE__SHIFT 0x5
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE0_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE0_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE1_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE1_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE2_GATE_DISABLE__SHIFT 0xc
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE2_GATE_DISABLE__SHIFT 0xd
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE3_GATE_DISABLE__SHIFT 0xe
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE3_GATE_DISABLE__SHIFT 0xf
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE0_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE0_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE1_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE1_GATE_DISABLE__SHIFT 0x17
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK2_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK3_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK4_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK5_GATE_DISABLE_MASK 0x00000020L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE0_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE0_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE1_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE1_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE2_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE2_GATE_DISABLE_MASK 0x00002000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE3_GATE_DISABLE_MASK 0x00004000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE3_GATE_DISABLE_MASK 0x00008000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE0_GATE_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE0_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE1_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE1_GATE_DISABLE_MASK 0x00800000L
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE__SHIFT 0x0
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO__SHIFT 0x8
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN__SHIFT 0x10
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE_MASK 0x000000FFL
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO_MASK 0x0000FF00L
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN_MASK 0x00010000L
+#define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE_MASK 0xFFFFFFFFL
+#define DCCG_AUDIO_DTBCLK_DTO_MODULO__DCCG_AUDIO_DTBCLK_DTO_MODULO__SHIFT 0x0
+#define DCCG_AUDIO_DTBCLK_DTO_MODULO__DCCG_AUDIO_DTBCLK_DTO_MODULO_MASK 0xFFFFFFFFL
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO0_DBUF_EN__SHIFT 0x0
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO1_DBUF_EN__SHIFT 0x1
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO2_DBUF_EN__SHIFT 0x2
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO3_DBUF_EN__SHIFT 0x3
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO0_DBUF_EN_MASK 0x00000001L
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO1_DBUF_EN_MASK 0x00000002L
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO2_DBUF_EN_MASK 0x00000004L
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO3_DBUF_EN_MASK 0x00000008L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE__SHIFT 0xf
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG__SHIFT 0x11
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG__SHIFT 0x12
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG__SHIFT 0x15
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG__SHIFT 0x16
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE_MASK 0x00018000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG_MASK 0x00020000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG_MASK 0x00040000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG_MASK 0x00200000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG_MASK 0x00400000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00FF0000L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xFF000000L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3FFFFFFFL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R__SHIFT 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R_MASK 0x00008000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC_MASK 0x0000007FL
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0x000000C0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION__SHIFT 0x9
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO__SHIFT 0xa
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION_MASK 0x00000100L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION_MASK 0x00000200L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO_MASK 0x0000FC00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LFE_PLAYBACK_LEVEL__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LFE_PLAYBACK_LEVEL_MASK 0x00000003L
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT_MASK 0x00000078L
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE_MASK 0x00000078L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0x000000C0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L__CHANNEL_STATUS_L__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L__CHANNEL_STATUS_L_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H__CHANNEL_STATUS_H__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H__CHANNEL_STATUS_H_MASK 0xFFFFFFFFL
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON0_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON0_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON0_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON0_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON0_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON1_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON1_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON1_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON1_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON1_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DOMAIN0_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN0_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN0_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN0_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+#define DOMAIN0_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN0_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN0_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN0_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+#define DOMAIN1_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN1_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN1_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN1_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+#define DOMAIN1_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN1_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN1_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN1_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+#define DOMAIN2_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN2_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN2_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN2_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+#define DOMAIN2_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN2_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN2_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN2_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+#define DOMAIN3_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN3_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN3_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN3_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+#define DOMAIN3_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN3_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN3_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN3_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+#define DOMAIN16_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN16_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN16_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN16_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+#define DOMAIN16_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN16_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN16_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN16_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+#define DOMAIN17_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN17_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN17_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN17_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+#define DOMAIN17_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN17_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN17_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN17_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+#define DOMAIN18_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN18_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN18_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN18_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+#define DOMAIN18_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN18_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN18_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN18_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+#define DOMAIN19_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN19_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN19_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN19_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+#define DOMAIN19_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN19_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN19_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN19_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_DOWN_INT_OCCURRED__SHIFT 0x1
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_UP_INT_OCCURRED_MASK 0x00000001L
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_DOWN_INT_OCCURRED_MASK 0x00000002L
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_UP_INT_OCCURRED_MASK 0x00000004L
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_DOWN_INT_OCCURRED_MASK 0x00000008L
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_UP_INT_OCCURRED_MASK 0x00000010L
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_DOWN_INT_OCCURRED_MASK 0x00000020L
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_UP_INT_OCCURRED_MASK 0x00000040L
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_DOWN_INT_OCCURRED_MASK 0x00000080L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_DOWN_INT_OCCURRED__SHIFT 0x1
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_UP_INT_OCCURRED_MASK 0x00000001L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_DOWN_INT_OCCURRED_MASK 0x00000002L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_UP_INT_OCCURRED_MASK 0x00000004L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_DOWN_INT_OCCURRED_MASK 0x00000008L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_UP_INT_OCCURRED_MASK 0x00000010L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_DOWN_INT_OCCURRED_MASK 0x00000020L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_UP_INT_OCCURRED_MASK 0x00000040L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_DOWN_INT_OCCURRED_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_MASK_MASK 0x00000001L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_MASK_MASK 0x00000004L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_CLEAR_MASK 0x00000008L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_MASK_MASK 0x00000010L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_MASK_MASK 0x00000040L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_CLEAR_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_MASK_MASK 0x00000100L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_CLEAR_MASK 0x00000200L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_MASK_MASK 0x00000400L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_MASK_MASK 0x00004000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_CLEAR_MASK 0x00008000L
+#define DC_IP_REQUEST_CNTL__IP_REQUEST_EN__SHIFT 0x0
+#define DC_IP_REQUEST_CNTL__IP_REQUEST_EN_MASK 0x00000001L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON2_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON2_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON2_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON2_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON2_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS__SHIFT 0x0
+#define CC_DC_PIPE_DIS__DC_DMCUB_ENABLE__SHIFT 0x10
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS_MASK 0x000000FFL
+#define CC_DC_PIPE_DIS__DC_DMCUB_ENABLE_MASK 0x00010000L
+#define DMU_CLK_CNTL__DMU_TEST_CLK_SEL__SHIFT 0x0
+#define DMU_CLK_CNTL__DISPCLK_R_DMU_GATE_DIS__SHIFT 0x4
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_GATE_DIS__SHIFT 0x5
+#define DMU_CLK_CNTL__DISPCLK_R_CLOCK_ON__SHIFT 0x6
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_CLOCK_ON__SHIFT 0x7
+#define DMU_CLK_CNTL__DMU_TEST_CLK_SEL_MASK 0x0000000FL
+#define DMU_CLK_CNTL__DISPCLK_R_DMU_GATE_DIS_MASK 0x00000010L
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_GATE_DIS_MASK 0x00000020L
+#define DMU_CLK_CNTL__DISPCLK_R_CLOCK_ON_MASK 0x00000040L
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_CLOCK_ON_MASK 0x00000080L
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_ENABLE__SHIFT 0x0
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_STATUS__SHIFT 0x4
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_EVENT__SHIFT 0x10
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_ENABLE_MASK 0x00000001L
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_STATUS_MASK 0x00000010L
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_EVENT_MASK 0xFFFF0000L
+#define ZSC_CNTL__FORCE_SOC_ACCESS__SHIFT 0x0
+#define ZSC_CNTL__FORCE_SOC_ACCESS_MASK 0x00000003L
+#define ZSC_CNTL2__ALLOW_Z10__SHIFT 0x0
+#define ZSC_CNTL2__ALLOW_Z10_MASK 0x00000001L
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_EN__SHIFT 0x0
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_VALUE__SHIFT 0x4
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_EN_MASK 0x00000001L
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_VALUE_MASK 0x00000010L
+#define ZSC_STATUS__SOC_ACCESS_TRIGGER_STATUS__SHIFT 0x0
+#define ZSC_STATUS__SOC_ACCESS_STICKY_TRIGGER_STATUS__SHIFT 0x4
+#define ZSC_STATUS__FENCE_REQ_STATUS__SHIFT 0x8
+#define ZSC_STATUS__FENCE_ACK_STATUS__SHIFT 0x9
+#define ZSC_STATUS__FENCE_STATUS__SHIFT 0xa
+#define ZSC_STATUS__SOC_ACCESS_TRIGGER_STATUS_MASK 0x00000007L
+#define ZSC_STATUS__SOC_ACCESS_STICKY_TRIGGER_STATUS_MASK 0x00000070L
+#define ZSC_STATUS__FENCE_REQ_STATUS_MASK 0x00000100L
+#define ZSC_STATUS__FENCE_ACK_STATUS_MASK 0x00000200L
+#define ZSC_STATUS__FENCE_STATUS_MASK 0x00000C00L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D1_VSTARTUP__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D2_VSTARTUP__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D3_VSTARTUP__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D4_VSTARTUP__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D5_VSTARTUP__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D6_VSTARTUP__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D1_VSTARTUP_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D2_VSTARTUP_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D3_VSTARTUP_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D4_VSTARTUP_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D5_VSTARTUP_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D6_VSTARTUP_MASK 0x00700000L
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ__SHIFT 0x0
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ_MASK 0xFFFFFFFFL
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT__SHIFT 0x0
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM__SHIFT 0x8
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM__SHIFT 0xb
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM__SHIFT 0xe
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM__SHIFT 0x11
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM__SHIFT 0x14
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM__SHIFT 0x17
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT_MASK 0x0000007FL
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM_MASK 0x00000700L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM_MASK 0x00003800L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM_MASK 0x0001C000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM_MASK 0x000E0000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM_MASK 0x00700000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM_MASK 0x03800000L
+#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS__DIO_ALPM_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS__DIO_ALPM_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OPTC2_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE__OPTC2_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OPTC3_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OPTC3_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OPTC4_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL0_DATA_OVERFLOW_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OPTC4_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL0_DATA_OVERFLOW_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC5_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC6_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC5_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC6_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT0__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DISP_INTERRUPT_STATUS_CONTINUE6__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT0_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT1_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT2_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DISP_INTERRUPT_STATUS_CONTINUE6_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB0_IHIF_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB1_IHIF_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB0_IHIF_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB1_IHIF_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB2_IHIF_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DISP_INTERRUPT_STATUS_CONTINUE7__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB0_IHIF_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB1_IHIF_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB0_IHIF_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB1_IHIF_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB2_IHIF_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_ERROR_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_ERROR_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_ERROR_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_ERROR_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_ERROR_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_ERROR_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DISP_INTERRUPT_STATUS_CONTINUE7_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DISP_INTERRUPT_STATUS_CONTINUE8__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER0_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER1_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DISP_INTERRUPT_STATUS_CONTINUE8_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DISP_INTERRUPT_STATUS_CONTINUE9__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DISP_INTERRUPT_STATUS_CONTINUE9_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL1_DATA_OVERFLOW_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL2_DATA_OVERFLOW_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DISP_INTERRUPT_STATUS_CONTINUE10__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL1_DATA_OVERFLOW_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL2_DATA_OVERFLOW_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DISP_INTERRUPT_STATUS_CONTINUE10_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG0_LATCH_INT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG1_LATCH_INT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG2_LATCH_INT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG3_LATCH_INT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG4_LATCH_INT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG5_LATCH_INT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER0_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER1_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG1_IHC_DRR_TIMING_UPDATE__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG2_IHC_DRR_TIMING_UPDATE__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG3_IHC_DRR_TIMING_UPDATE__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG4_IHC_DRR_TIMING_UPDATE__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG5_IHC_DRR_TIMING_UPDATE__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG6_IHC_DRR_TIMING_UPDATE__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DISP_INTERRUPT_STATUS_CONTINUE11__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG0_LATCH_INT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG1_LATCH_INT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG2_LATCH_INT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG3_LATCH_INT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG4_LATCH_INT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG5_LATCH_INT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER0_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER1_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG1_IHC_DRR_TIMING_UPDATE_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG2_IHC_DRR_TIMING_UPDATE_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG3_IHC_DRR_TIMING_UPDATE_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG4_IHC_DRR_TIMING_UPDATE_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG5_IHC_DRR_TIMING_UPDATE_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG6_IHC_DRR_TIMING_UPDATE_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DISP_INTERRUPT_STATUS_CONTINUE11_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC0_STALL_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC1_STALL_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC2_STALL_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC3_STALL_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC4_STALL_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC5_STALL_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC6_STALL_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC7_STALL_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE11__DISP_INTERRUPT_STATUS_CONTINUE12__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC0_STALL_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC1_STALL_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC2_STALL_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC3_STALL_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC4_STALL_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC5_STALL_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC6_STALL_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC7_STALL_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__DISP_INTERRUPT_STATUS_CONTINUE12_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DISP_INTERRUPT_STATUS_CONTINUE13__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DISP_INTERRUPT_STATUS_CONTINUE13_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_VM_FAULT_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_TIMEOUT_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_COMPBUF_SIZE_CHANGE_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VBLANK_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DISP_INTERRUPT_STATUS_CONTINUE14__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_VM_FAULT_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_TIMEOUT_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_COMPBUF_SIZE_CHANGE_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VBLANK_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE2_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_TIMEOUT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DISP_INTERRUPT_STATUS_CONTINUE14_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VBLANK_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE14__DISP_INTERRUPT_STATUS_CONTINUE15__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VBLANK_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE2_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_TIMEOUT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__DISP_INTERRUPT_STATUS_CONTINUE15_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VBLANK_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE15__DISP_INTERRUPT_STATUS_CONTINUE16__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VBLANK_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE2_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_TIMEOUT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__DISP_INTERRUPT_STATUS_CONTINUE16_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VBLANK_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE2_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VBLANK_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE2_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VBLANK_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE2_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VBLANK_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE2_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VBLANK_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE2_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_TIMEOUT_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_TIMEOUT_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE16__DISP_INTERRUPT_STATUS_CONTINUE17__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VBLANK_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE2_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VBLANK_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE2_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VBLANK_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE2_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VBLANK_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE2_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VBLANK_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE2_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_TIMEOUT_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_TIMEOUT_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_TIMEOUT_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_TIMEOUT_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_TIMEOUT_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__DISP_INTERRUPT_STATUS_CONTINUE17_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER0_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE17__DISP_INTERRUPT_STATUS_CONTINUE18__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_AWAY_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_AWAY_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_AWAY_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_AWAY_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__DISP_INTERRUPT_STATUS_CONTINUE18_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DISP_INTERRUPT_STATUS_CONTINUE19__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DISP_INTERRUPT_STATUS_CONTINUE19_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DISP_INTERRUPT_STATUS_CONTINUE20__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DISP_INTERRUPT_STATUS_CONTINUE20_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_CPU_SS_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_CPU_SS_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_CPU_SS_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_CPU_SS_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_CPU_SS_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_CPU_SS_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_V_UPDATE_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_V_UPDATE_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_V_UPDATE_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_V_UPDATE_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_V_UPDATE_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_V_UPDATE_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VSTARTUP_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VSTARTUP_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VSTARTUP_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VSTARTUP_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VSTARTUP_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VSTARTUP_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VREADY_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VREADY_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VREADY_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VREADY_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VREADY_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VREADY_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE20__DISP_INTERRUPT_STATUS_CONTINUE21__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_CPU_SS_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_CPU_SS_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_CPU_SS_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_CPU_SS_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_CPU_SS_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_CPU_SS_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_V_UPDATE_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_V_UPDATE_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_V_UPDATE_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_V_UPDATE_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_V_UPDATE_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_V_UPDATE_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VSTARTUP_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VSTARTUP_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VSTARTUP_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VSTARTUP_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VSTARTUP_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VSTARTUP_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VREADY_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VREADY_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VREADY_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VREADY_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VREADY_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VREADY_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__DISP_INTERRUPT_STATUS_CONTINUE21_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC1_READ_REQUEST_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC2_READ_REQUEST_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC3_READ_REQUEST_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC4_READ_REQUEST_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC5_READ_REQUEST_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC6_READ_REQUEST_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_VGA_READ_REQUEST_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DISP_INTERRUPT_STATUS_CONTINUE22__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC1_READ_REQUEST_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC2_READ_REQUEST_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC3_READ_REQUEST_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC4_READ_REQUEST_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC5_READ_REQUEST_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC6_READ_REQUEST_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_VGA_READ_REQUEST_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DISP_INTERRUPT_STATUS_CONTINUE22_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DISP_INTERRUPT_STATUS_CONTINUE23__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DISP_INTERRUPT_STATUS_CONTINUE23_MASK 0x80000000L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D1_VREADY__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D2_VREADY__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D3_VREADY__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D4_VREADY__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D5_VREADY__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D6_VREADY__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D1_VREADY_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D2_VREADY_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D3_VREADY_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D4_VREADY_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D5_VREADY_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D6_VREADY_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D1_FLIP__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D2_FLIP__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D3_FLIP__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D4_FLIP__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D5_FLIP__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D6_FLIP__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D7_FLIP__SHIFT 0x18
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D8_FLIP__SHIFT 0x1c
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D1_FLIP_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D2_FLIP_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D3_FLIP_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D4_FLIP_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D5_FLIP_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D6_FLIP_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D7_FLIP_MASK 0x07000000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D8_FLIP_MASK 0x70000000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_NO_LOCK__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_NO_LOCK__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_NO_LOCK__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_NO_LOCK__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_NO_LOCK__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_NO_LOCK__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_NO_LOCK_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_NO_LOCK_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_NO_LOCK_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_NO_LOCK_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_NO_LOCK_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_NO_LOCK_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D1_FLIP_AWAY__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D2_FLIP_AWAY__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D3_FLIP_AWAY__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D4_FLIP_AWAY__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D5_FLIP_AWAY__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D6_FLIP_AWAY__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D7_FLIP_AWAY__SHIFT 0x18
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D8_FLIP_AWAY__SHIFT 0x1c
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D1_FLIP_AWAY_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D2_FLIP_AWAY_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D3_FLIP_AWAY_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D4_FLIP_AWAY_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D5_FLIP_AWAY_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D6_FLIP_AWAY_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D7_FLIP_AWAY_MASK 0x07000000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D8_FLIP_AWAY_MASK 0x70000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DISP_INTERRUPT_STATUS_CONTINUE24__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DISP_INTERRUPT_STATUS_CONTINUE24_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_HIGH_PRIORITY_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_LOW_PRIORITY_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_READY_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_DONE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_READY_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_DONE_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_READY_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_DONE_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN0_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN1_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN2_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN3_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN4_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN5_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN6_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAOUT_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_UNDEFINED_ADDRESS_FAULT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DISP_INTERRUPT_STATUS_CONTINUE25__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_HIGH_PRIORITY_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_LOW_PRIORITY_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_READY_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_DONE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_READY_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_DONE_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_READY_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_DONE_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN0_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN1_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN2_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN3_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN4_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN5_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN6_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAOUT_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_UNDEFINED_ADDRESS_FAULT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DISP_INTERRUPT_STATUS_CONTINUE25_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DPIA_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DMCUB_WHITELIST_INVALID_ACCESS_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE25__HPO_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE25__HPO_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE25__MMHUBBUB_WARMUP_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DPIA_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DMCUB_WHITELIST_INVALID_ACCESS_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__HPO_PERFMON_COUNTER0_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__HPO_PERFMON_COUNTER1_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__MMHUBBUB_WARMUP_INTERRUPT_MASK 0x40000000L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG0_LATCH_INT_DEST__SHIFT 0x0
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG1_LATCH_INT_DEST__SHIFT 0x1
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG2_LATCH_INT_DEST__SHIFT 0x2
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG3_LATCH_INT_DEST__SHIFT 0x3
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG4_LATCH_INT_DEST__SHIFT 0x4
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG5_LATCH_INT_DEST__SHIFT 0x5
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG0_LATCH_INT_DEST_MASK 0x00000001L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG1_LATCH_INT_DEST_MASK 0x00000002L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG2_LATCH_INT_DEST_MASK 0x00000004L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG3_LATCH_INT_DEST_MASK 0x00000008L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG4_LATCH_INT_DEST_MASK 0x00000010L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG5_LATCH_INT_DEST_MASK 0x00000020L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER0_INT_DEST__SHIFT 0x0
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER1_INT_DEST__SHIFT 0x1
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT0_INT_DEST__SHIFT 0x2
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT1_INT_DEST__SHIFT 0x3
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT2_INT_DEST__SHIFT 0x4
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT3_INT_DEST__SHIFT 0x5
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT4_INT_DEST__SHIFT 0x6
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT5_INT_DEST__SHIFT 0x7
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT6_INT_DEST__SHIFT 0x8
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT_IH_INT_DEST__SHIFT 0x9
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_READY_INT_DEST__SHIFT 0xa
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_DONE_INT_DEST__SHIFT 0xb
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_READY_INT_DEST__SHIFT 0xc
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_DONE_INT_DEST__SHIFT 0xd
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_READY_INT_DEST__SHIFT 0xe
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_DONE_INT_DEST__SHIFT 0xf
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_READY_INT_DEST__SHIFT 0x10
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_DONE_INT_DEST__SHIFT 0x11
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_UNDEFINED_ADDRESS_FAULT_INT_DEST__SHIFT 0x1a
+#define DMU_INTERRUPT_DEST__RBBMIF_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x1b
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER0_INT_DEST_MASK 0x00000001L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER1_INT_DEST_MASK 0x00000002L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT0_INT_DEST_MASK 0x00000004L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT1_INT_DEST_MASK 0x00000008L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT2_INT_DEST_MASK 0x00000010L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT3_INT_DEST_MASK 0x00000020L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT4_INT_DEST_MASK 0x00000040L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT5_INT_DEST_MASK 0x00000080L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT6_INT_DEST_MASK 0x00000100L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT_IH_INT_DEST_MASK 0x00000200L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_READY_INT_DEST_MASK 0x00000400L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_DONE_INT_DEST_MASK 0x00000800L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_READY_INT_DEST_MASK 0x00001000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_DONE_INT_DEST_MASK 0x00002000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_READY_INT_DEST_MASK 0x00004000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_DONE_INT_DEST_MASK 0x00008000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_READY_INT_DEST_MASK 0x00010000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_DONE_INT_DEST_MASK 0x00020000L
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_UNDEFINED_ADDRESS_FAULT_INT_DEST_MASK 0x04000000L
+#define DMU_INTERRUPT_DEST__RBBMIF_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x08000000L
+#define DMU_INTERRUPT_DEST2__DPIA_IHC_INTERRUPT_DEST__SHIFT 0xc
+#define DMU_INTERRUPT_DEST2__DMCUB_IHC_WHITELIST_INVALID_ACCESS_INTERRUPT_DEST__SHIFT 0xd
+#define DMU_INTERRUPT_DEST2__DPIA_IHC_INTERRUPT_DEST_MASK 0x00001000L
+#define DMU_INTERRUPT_DEST2__DMCUB_IHC_WHITELIST_INVALID_ACCESS_INTERRUPT_DEST_MASK 0x00002000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT_DEST__SHIFT 0x0
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT_DEST__SHIFT 0x1
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT_DEST__SHIFT 0x2
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT_DEST__SHIFT 0x3
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT_DEST__SHIFT 0x4
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT_DEST__SHIFT 0x5
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT_DEST__SHIFT 0x6
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT_DEST__SHIFT 0x7
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x10
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x11
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x12
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x13
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x14
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x15
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x16
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x17
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT_DEST_MASK 0x00000001L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT_DEST_MASK 0x00000002L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT_DEST_MASK 0x00000004L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT_DEST_MASK 0x00000008L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT_DEST_MASK 0x00000010L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT_DEST_MASK 0x00000020L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT_DEST_MASK 0x00000040L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT_DEST_MASK 0x00000080L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT_DEST_MASK 0x00010000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT_DEST_MASK 0x00020000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT_DEST_MASK 0x00040000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT_DEST_MASK 0x00080000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT_DEST_MASK 0x00100000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT_DEST_MASK 0x00200000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT_DEST_MASK 0x00400000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT_DEST_MASK 0x00800000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT_DEST__SHIFT 0x0
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT_DEST__SHIFT 0x1
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT_DEST__SHIFT 0x2
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT_DEST__SHIFT 0x3
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT_DEST__SHIFT 0x4
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT_DEST__SHIFT 0x5
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xa
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xb
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xc
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xd
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xe
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xf
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT_DEST_MASK 0x00000001L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT_DEST_MASK 0x00000002L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT_DEST_MASK 0x00000004L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT_DEST_MASK 0x00000008L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT_DEST_MASK 0x00000010L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT_DEST_MASK 0x00000020L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT_DEST_MASK 0x00000400L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT_DEST_MASK 0x00000800L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT_DEST_MASK 0x00001000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT_DEST_MASK 0x00002000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT_DEST_MASK 0x00004000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT_DEST_MASK 0x00008000L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB0_IHIF_INTERRUPT_DEST__SHIFT 0x1
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB1_IHIF_INTERRUPT_DEST__SHIFT 0x2
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB0_IHIF_INTERRUPT_DEST__SHIFT 0x3
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB1_IHIF_INTERRUPT_DEST__SHIFT 0x4
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB2_IHIF_INTERRUPT_DEST__SHIFT 0x5
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_WARMUP_INTERRUPT_DEST__SHIFT 0x8
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB0_IHIF_INTERRUPT_DEST_MASK 0x00000002L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB1_IHIF_INTERRUPT_DEST_MASK 0x00000004L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB0_IHIF_INTERRUPT_DEST_MASK 0x00000008L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB1_IHIF_INTERRUPT_DEST_MASK 0x00000010L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB2_IHIF_INTERRUPT_DEST_MASK 0x00000020L
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_WARMUP_INTERRUPT_DEST_MASK 0x00000100L
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define WB_INTERRUPT_DEST__WBSCL0_IHIF_DATA_OVERFLOW_INTERRUPT_DEST__SHIFT 0x1
+#define WB_INTERRUPT_DEST__WBSCL1_IHIF_DATA_OVERFLOW_INTERRUPT_DEST__SHIFT 0x9
+#define WB_INTERRUPT_DEST__WBSCL2_IHIF_DATA_OVERFLOW_INTERRUPT_DEST__SHIFT 0xb
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x10
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x11
+#define WB_INTERRUPT_DEST__WBSCL0_IHIF_DATA_OVERFLOW_INTERRUPT_DEST_MASK 0x00000002L
+#define WB_INTERRUPT_DEST__WBSCL1_IHIF_DATA_OVERFLOW_INTERRUPT_DEST_MASK 0x00000200L
+#define WB_INTERRUPT_DEST__WBSCL2_IHIF_DATA_OVERFLOW_INTERRUPT_DEST_MASK 0x00000800L
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00010000L
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00020000L
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x0
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x1
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x2
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x3
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x4
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x5
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x6
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x7
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x8
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x9
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0xa
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0xb
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0xc
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE_INTERRUPT_DEST__SHIFT 0xd
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0xe
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0xf
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x10
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x11
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x12
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x13
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x14
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x15
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x16
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x17
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x18
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x19
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x1a
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x1b
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x1c
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x1d
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x1e
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x1f
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00000001L
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE_INTERRUPT_DEST_MASK 0x00000002L
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00000004L
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00000008L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00000010L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE_INTERRUPT_DEST_MASK 0x00000020L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00000040L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00000080L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00000100L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE_INTERRUPT_DEST_MASK 0x00000200L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00000400L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00000800L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00001000L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE_INTERRUPT_DEST_MASK 0x00002000L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00004000L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00008000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00010000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE_INTERRUPT_DEST_MASK 0x00020000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00040000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00080000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00100000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE_INTERRUPT_DEST_MASK 0x00200000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00400000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00800000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VBLANK_INTERRUPT_DEST_MASK 0x01000000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE_INTERRUPT_DEST_MASK 0x02000000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE2_INTERRUPT_DEST_MASK 0x04000000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x08000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VBLANK_INTERRUPT_DEST_MASK 0x10000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE_INTERRUPT_DEST_MASK 0x20000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE2_INTERRUPT_DEST_MASK 0x40000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x80000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x10
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x11
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x14
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x15
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x16
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x17
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x18
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x19
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1a
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1b
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1c
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1d
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00010000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00020000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00100000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00200000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00400000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00800000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x01000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x02000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x04000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x08000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x10000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x20000000L
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x0
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x1
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x2
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x3
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x4
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x5
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x6
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x7
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x8
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x9
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_INTERRUPT_DEST__SHIFT 0xa
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0xb
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_INTERRUPT_DEST__SHIFT 0xc
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0xd
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_INTERRUPT_DEST__SHIFT 0xe
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0xf
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_VM_FAULT_INTERRUPT_DEST__SHIFT 0x18
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x19
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_COMPBUF_SIZE_CHANGE_INTERRUPT_DEST__SHIFT 0x1a
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000001L
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000002L
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000004L
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000008L
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000010L
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000020L
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000040L
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000080L
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000100L
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000200L
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000400L
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000800L
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_INTERRUPT_DEST_MASK 0x00001000L
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00002000L
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_INTERRUPT_DEST_MASK 0x00004000L
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00008000L
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_VM_FAULT_INTERRUPT_DEST_MASK 0x01000000L
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x02000000L
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_COMPBUF_SIZE_CHANGE_INTERRUPT_DEST_MASK 0x04000000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x10
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x11
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x14
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x15
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x16
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x17
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x18
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x19
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1a
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1b
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00010000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00020000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00100000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00200000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00400000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00800000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x01000000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x02000000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x04000000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x08000000L
+#define MPC_INTERRUPT_DEST__MPCC0_STALL_INTERRUPT_DEST__SHIFT 0x0
+#define MPC_INTERRUPT_DEST__MPCC1_STALL_INTERRUPT_DEST__SHIFT 0x1
+#define MPC_INTERRUPT_DEST__MPCC2_STALL_INTERRUPT_DEST__SHIFT 0x2
+#define MPC_INTERRUPT_DEST__MPCC3_STALL_INTERRUPT_DEST__SHIFT 0x3
+#define MPC_INTERRUPT_DEST__MPCC4_STALL_INTERRUPT_DEST__SHIFT 0x4
+#define MPC_INTERRUPT_DEST__MPCC5_STALL_INTERRUPT_DEST__SHIFT 0x5
+#define MPC_INTERRUPT_DEST__MPCC6_STALL_INTERRUPT_DEST__SHIFT 0x6
+#define MPC_INTERRUPT_DEST__MPCC7_STALL_INTERRUPT_DEST__SHIFT 0x7
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define MPC_INTERRUPT_DEST__MPCC0_STALL_INTERRUPT_DEST_MASK 0x00000001L
+#define MPC_INTERRUPT_DEST__MPCC1_STALL_INTERRUPT_DEST_MASK 0x00000002L
+#define MPC_INTERRUPT_DEST__MPCC2_STALL_INTERRUPT_DEST_MASK 0x00000004L
+#define MPC_INTERRUPT_DEST__MPCC3_STALL_INTERRUPT_DEST_MASK 0x00000008L
+#define MPC_INTERRUPT_DEST__MPCC4_STALL_INTERRUPT_DEST_MASK 0x00000010L
+#define MPC_INTERRUPT_DEST__MPCC5_STALL_INTERRUPT_DEST_MASK 0x00000020L
+#define MPC_INTERRUPT_DEST__MPCC6_STALL_INTERRUPT_DEST_MASK 0x00000040L
+#define MPC_INTERRUPT_DEST__MPCC7_STALL_INTERRUPT_DEST_MASK 0x00000080L
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define OPTC_INTERRUPT_DEST__OPTC0_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x18
+#define OPTC_INTERRUPT_DEST__OPTC1_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x19
+#define OPTC_INTERRUPT_DEST__OPTC2_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1a
+#define OPTC_INTERRUPT_DEST__OPTC3_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1b
+#define OPTC_INTERRUPT_DEST__OPTC4_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1c
+#define OPTC_INTERRUPT_DEST__OPTC5_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1d
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define OPTC_INTERRUPT_DEST__OPTC0_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x01000000L
+#define OPTC_INTERRUPT_DEST__OPTC1_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x02000000L
+#define OPTC_INTERRUPT_DEST__OPTC2_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x04000000L
+#define OPTC_INTERRUPT_DEST__OPTC3_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x08000000L
+#define OPTC_INTERRUPT_DEST__OPTC4_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x10000000L
+#define OPTC_INTERRUPT_DEST__OPTC5_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x20000000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG0_INTERRUPT_DEST__OTG0_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG0_INTERRUPT_DEST__OTG0_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG1_INTERRUPT_DEST__OTG1_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG1_INTERRUPT_DEST__OTG1_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG2_INTERRUPT_DEST__OTG2_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG2_INTERRUPT_DEST__OTG2_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG3_INTERRUPT_DEST__OTG3_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG3_INTERRUPT_DEST__OTG3_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG4_INTERRUPT_DEST__OTG4_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG4_INTERRUPT_DEST__OTG4_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG5_INTERRUPT_DEST__OTG5_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG5_INTERRUPT_DEST__OTG5_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x0
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x1
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x2
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x3
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x4
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x5
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x6
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x7
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0x8
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0x9
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xa
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xb
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xc
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xd
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xe
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xf
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000001L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000002L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000004L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000008L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000010L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000020L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000040L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000080L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000100L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000200L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000400L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000800L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00001000L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00002000L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00004000L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00008000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_SW_DONE_INTERRUPT_DEST__SHIFT 0x0
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT_DEST__SHIFT 0x1
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT_DEST__SHIFT 0x2
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT_DEST__SHIFT 0x3
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT_DEST__SHIFT 0x4
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT_DEST__SHIFT 0x5
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT_DEST__SHIFT 0x6
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT_DEST__SHIFT 0x7
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC1_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x10
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC2_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x11
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC3_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x12
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC4_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x13
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC5_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x14
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC6_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x15
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDCVGA_READ_REQUEST_INTERRPUT_DEST__SHIFT 0x16
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_SW_DONE_INTERRUPT_DEST_MASK 0x00000001L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT_DEST_MASK 0x00000002L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT_DEST_MASK 0x00000004L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT_DEST_MASK 0x00000008L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT_DEST_MASK 0x00000010L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT_DEST_MASK 0x00000020L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT_DEST_MASK 0x00000040L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT_DEST_MASK 0x00000080L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC1_READ_REQUEST_INTERRUPT_DEST_MASK 0x00010000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC2_READ_REQUEST_INTERRUPT_DEST_MASK 0x00020000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC3_READ_REQUEST_INTERRUPT_DEST_MASK 0x00040000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC4_READ_REQUEST_INTERRUPT_DEST_MASK 0x00080000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC5_READ_REQUEST_INTERRUPT_DEST_MASK 0x00100000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC6_READ_REQUEST_INTERRUPT_DEST_MASK 0x00200000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDCVGA_READ_REQUEST_INTERRPUT_DEST_MASK 0x00400000L
+#define DIO_INTERRUPT_DEST__DIO_ALPM_INTERRUPT_DEST__SHIFT 0x4
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DIO_INTERRUPT_DEST__DIO_ALPM_INTERRUPT_DEST_MASK 0x00000010L
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x0
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x1
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x2
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x3
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x4
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x5
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x6
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x10
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000001L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000002L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000004L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000008L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000010L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000020L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000040L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT_DEST_MASK 0x00010000L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_INTERRUPT_DEST__SHIFT 0x0
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_INTERRUPT_DEST__SHIFT 0x1
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_INTERRUPT_DEST__SHIFT 0x2
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_INTERRUPT_DEST__SHIFT 0x3
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_INTERRUPT_DEST__SHIFT 0x4
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_INTERRUPT_DEST__SHIFT 0x5
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_RX_INTERRUPT_DEST__SHIFT 0x8
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_RX_INTERRUPT_DEST__SHIFT 0x9
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_RX_INTERRUPT_DEST__SHIFT 0xa
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_RX_INTERRUPT_DEST__SHIFT 0xb
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_RX_INTERRUPT_DEST__SHIFT 0xc
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_RX_INTERRUPT_DEST__SHIFT 0xd
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_INTERRUPT_DEST_MASK 0x00000001L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_INTERRUPT_DEST_MASK 0x00000002L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_INTERRUPT_DEST_MASK 0x00000004L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_INTERRUPT_DEST_MASK 0x00000008L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_INTERRUPT_DEST_MASK 0x00000010L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_INTERRUPT_DEST_MASK 0x00000020L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_RX_INTERRUPT_DEST_MASK 0x00000100L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_RX_INTERRUPT_DEST_MASK 0x00000200L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_RX_INTERRUPT_DEST_MASK 0x00000400L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_RX_INTERRUPT_DEST_MASK 0x00000800L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_RX_INTERRUPT_DEST_MASK 0x00001000L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_RX_INTERRUPT_DEST_MASK 0x00002000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x0
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x1
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x2
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x3
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x4
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x5
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x6
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x7
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT_DEST__SHIFT 0x8
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT_DEST__SHIFT 0x9
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT_DEST__SHIFT 0xa
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT_DEST__SHIFT 0xb
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT_DEST__SHIFT 0xc
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT_DEST__SHIFT 0xd
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT_DEST__SHIFT 0xe
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT_DEST__SHIFT 0xf
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT_DEST__SHIFT 0x10
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT_DEST__SHIFT 0x11
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT_DEST__SHIFT 0x12
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT_DEST__SHIFT 0x13
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT_DEST__SHIFT 0x14
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT_DEST__SHIFT 0x15
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT_DEST__SHIFT 0x16
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT_DEST__SHIFT 0x17
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1e
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1f
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000001L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000002L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000004L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000008L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000010L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000020L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000040L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000080L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT_DEST_MASK 0x00000100L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT_DEST_MASK 0x00000200L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT_DEST_MASK 0x00000400L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT_DEST_MASK 0x00000800L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT_DEST_MASK 0x00001000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT_DEST_MASK 0x00002000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT_DEST_MASK 0x00004000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT_DEST_MASK 0x00008000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT_DEST_MASK 0x00010000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT_DEST_MASK 0x00020000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT_DEST_MASK 0x00040000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT_DEST_MASK 0x00080000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT_DEST_MASK 0x00100000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT_DEST_MASK 0x00200000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT_DEST_MASK 0x00400000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT_DEST_MASK 0x00800000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x40000000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x80000000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_SW_DONE_INTERRUPT_DEST__SHIFT 0x0
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_LS_DONE_INTERRUPT_DEST__SHIFT 0x1
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_SW_DONE_INTERRUPT_DEST__SHIFT 0x2
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_LS_DONE_INTERRUPT_DEST__SHIFT 0x3
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_SW_DONE_INTERRUPT_DEST__SHIFT 0x4
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_LS_DONE_INTERRUPT_DEST__SHIFT 0x5
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_SW_DONE_INTERRUPT_DEST__SHIFT 0x6
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_LS_DONE_INTERRUPT_DEST__SHIFT 0x7
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_SW_DONE_INTERRUPT_DEST__SHIFT 0x8
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_LS_DONE_INTERRUPT_DEST__SHIFT 0x9
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_SW_DONE_INTERRUPT_DEST__SHIFT 0xa
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_LS_DONE_INTERRUPT_DEST__SHIFT 0xb
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x10
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x11
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x12
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x13
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x14
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x15
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x16
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x17
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x18
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x19
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x1a
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x1b
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_SW_DONE_INTERRUPT_DEST_MASK 0x00000001L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_LS_DONE_INTERRUPT_DEST_MASK 0x00000002L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_SW_DONE_INTERRUPT_DEST_MASK 0x00000004L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_LS_DONE_INTERRUPT_DEST_MASK 0x00000008L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_SW_DONE_INTERRUPT_DEST_MASK 0x00000010L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_LS_DONE_INTERRUPT_DEST_MASK 0x00000020L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_SW_DONE_INTERRUPT_DEST_MASK 0x00000040L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_LS_DONE_INTERRUPT_DEST_MASK 0x00000080L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_SW_DONE_INTERRUPT_DEST_MASK 0x00000100L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_LS_DONE_INTERRUPT_DEST_MASK 0x00000200L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_SW_DONE_INTERRUPT_DEST_MASK 0x00000400L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_LS_DONE_INTERRUPT_DEST_MASK 0x00000800L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00010000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00020000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00040000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00080000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00100000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00200000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00400000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00800000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x01000000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x02000000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x04000000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x08000000L
+#define DSC_INTERRUPT_DEST__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x0
+#define DSC_INTERRUPT_DEST__DSC0_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x1
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x2
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x3
+#define DSC_INTERRUPT_DEST__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x4
+#define DSC_INTERRUPT_DEST__DSC1_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x5
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x6
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x7
+#define DSC_INTERRUPT_DEST__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x8
+#define DSC_INTERRUPT_DEST__DSC2_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x9
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xa
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xb
+#define DSC_INTERRUPT_DEST__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0xc
+#define DSC_INTERRUPT_DEST__DSC3_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0xd
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DSC_INTERRUPT_DEST__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x10
+#define DSC_INTERRUPT_DEST__DSC4_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x11
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DSC_INTERRUPT_DEST__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x14
+#define DSC_INTERRUPT_DEST__DSC5_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x15
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x16
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x17
+#define DSC_INTERRUPT_DEST__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00000001L
+#define DSC_INTERRUPT_DEST__DSC0_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00000002L
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000004L
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000008L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00000010L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00000020L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000040L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000080L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00000100L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00000200L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000400L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000800L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00001000L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00002000L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00010000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00020000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00100000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00200000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00400000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00800000L
+#define HPO_INTERRUPT_DEST__HPO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x2
+#define HPO_INTERRUPT_DEST__HPO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x3
+#define HPO_INTERRUPT_DEST__HPO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000004L
+#define HPO_INTERRUPT_DEST__HPO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000008L
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_TRUST_LVL__SHIFT 0x4
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_SOURCE_ID__SHIFT 0x8
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_TRUST_LVL_MASK 0x00000070L
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_SOURCE_ID_MASK 0x01FFFF00L
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_DELAY__SHIFT 0x0
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_TO_REQ_HOLD__SHIFT 0x14
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_DELAY_MASK 0x000FFFFFL
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_TO_REQ_HOLD_MASK 0xFFF00000L
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_CLIENTS_DEC__SHIFT 0x0
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_CLIENTS_DEC_MASK 0xFFFFFFFFL
+#define RBBMIF_STATUS_2__RBBMIF_TIMEOUT_CLIENTS_DEC_2__SHIFT 0x0
+#define RBBMIF_STATUS_2__RBBMIF_TIMEOUT_CLIENTS_DEC_2_MASK 0x0000007FL
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ADDR__SHIFT 0x2
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_OP__SHIFT 0x1c
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_RDWR_STATUS__SHIFT 0x1d
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ACK__SHIFT 0x1e
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_MASK__SHIFT 0x1f
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ADDR_MASK 0x0003FFFCL
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_OP_MASK 0x10000000L
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_RDWR_STATUS_MASK 0x20000000L
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ACK_MASK 0x40000000L
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_MASK_MASK 0x80000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT0_TIMEOUT_DIS__SHIFT 0x0
+#define RBBMIF_TIMEOUT_DIS__CLIENT1_TIMEOUT_DIS__SHIFT 0x1
+#define RBBMIF_TIMEOUT_DIS__CLIENT2_TIMEOUT_DIS__SHIFT 0x2
+#define RBBMIF_TIMEOUT_DIS__CLIENT3_TIMEOUT_DIS__SHIFT 0x3
+#define RBBMIF_TIMEOUT_DIS__CLIENT4_TIMEOUT_DIS__SHIFT 0x4
+#define RBBMIF_TIMEOUT_DIS__CLIENT5_TIMEOUT_DIS__SHIFT 0x5
+#define RBBMIF_TIMEOUT_DIS__CLIENT6_TIMEOUT_DIS__SHIFT 0x6
+#define RBBMIF_TIMEOUT_DIS__CLIENT7_TIMEOUT_DIS__SHIFT 0x7
+#define RBBMIF_TIMEOUT_DIS__CLIENT8_TIMEOUT_DIS__SHIFT 0x8
+#define RBBMIF_TIMEOUT_DIS__CLIENT9_TIMEOUT_DIS__SHIFT 0x9
+#define RBBMIF_TIMEOUT_DIS__CLIENT10_TIMEOUT_DIS__SHIFT 0xa
+#define RBBMIF_TIMEOUT_DIS__CLIENT11_TIMEOUT_DIS__SHIFT 0xb
+#define RBBMIF_TIMEOUT_DIS__CLIENT12_TIMEOUT_DIS__SHIFT 0xc
+#define RBBMIF_TIMEOUT_DIS__CLIENT13_TIMEOUT_DIS__SHIFT 0xd
+#define RBBMIF_TIMEOUT_DIS__CLIENT14_TIMEOUT_DIS__SHIFT 0xe
+#define RBBMIF_TIMEOUT_DIS__CLIENT15_TIMEOUT_DIS__SHIFT 0xf
+#define RBBMIF_TIMEOUT_DIS__CLIENT16_TIMEOUT_DIS__SHIFT 0x10
+#define RBBMIF_TIMEOUT_DIS__CLIENT17_TIMEOUT_DIS__SHIFT 0x11
+#define RBBMIF_TIMEOUT_DIS__CLIENT18_TIMEOUT_DIS__SHIFT 0x12
+#define RBBMIF_TIMEOUT_DIS__CLIENT19_TIMEOUT_DIS__SHIFT 0x13
+#define RBBMIF_TIMEOUT_DIS__CLIENT20_TIMEOUT_DIS__SHIFT 0x14
+#define RBBMIF_TIMEOUT_DIS__CLIENT21_TIMEOUT_DIS__SHIFT 0x15
+#define RBBMIF_TIMEOUT_DIS__CLIENT22_TIMEOUT_DIS__SHIFT 0x16
+#define RBBMIF_TIMEOUT_DIS__CLIENT23_TIMEOUT_DIS__SHIFT 0x17
+#define RBBMIF_TIMEOUT_DIS__CLIENT24_TIMEOUT_DIS__SHIFT 0x18
+#define RBBMIF_TIMEOUT_DIS__CLIENT25_TIMEOUT_DIS__SHIFT 0x19
+#define RBBMIF_TIMEOUT_DIS__CLIENT26_TIMEOUT_DIS__SHIFT 0x1a
+#define RBBMIF_TIMEOUT_DIS__CLIENT27_TIMEOUT_DIS__SHIFT 0x1b
+#define RBBMIF_TIMEOUT_DIS__CLIENT28_TIMEOUT_DIS__SHIFT 0x1c
+#define RBBMIF_TIMEOUT_DIS__CLIENT29_TIMEOUT_DIS__SHIFT 0x1d
+#define RBBMIF_TIMEOUT_DIS__CLIENT30_TIMEOUT_DIS__SHIFT 0x1e
+#define RBBMIF_TIMEOUT_DIS__CLIENT31_TIMEOUT_DIS__SHIFT 0x1f
+#define RBBMIF_TIMEOUT_DIS__CLIENT0_TIMEOUT_DIS_MASK 0x00000001L
+#define RBBMIF_TIMEOUT_DIS__CLIENT1_TIMEOUT_DIS_MASK 0x00000002L
+#define RBBMIF_TIMEOUT_DIS__CLIENT2_TIMEOUT_DIS_MASK 0x00000004L
+#define RBBMIF_TIMEOUT_DIS__CLIENT3_TIMEOUT_DIS_MASK 0x00000008L
+#define RBBMIF_TIMEOUT_DIS__CLIENT4_TIMEOUT_DIS_MASK 0x00000010L
+#define RBBMIF_TIMEOUT_DIS__CLIENT5_TIMEOUT_DIS_MASK 0x00000020L
+#define RBBMIF_TIMEOUT_DIS__CLIENT6_TIMEOUT_DIS_MASK 0x00000040L
+#define RBBMIF_TIMEOUT_DIS__CLIENT7_TIMEOUT_DIS_MASK 0x00000080L
+#define RBBMIF_TIMEOUT_DIS__CLIENT8_TIMEOUT_DIS_MASK 0x00000100L
+#define RBBMIF_TIMEOUT_DIS__CLIENT9_TIMEOUT_DIS_MASK 0x00000200L
+#define RBBMIF_TIMEOUT_DIS__CLIENT10_TIMEOUT_DIS_MASK 0x00000400L
+#define RBBMIF_TIMEOUT_DIS__CLIENT11_TIMEOUT_DIS_MASK 0x00000800L
+#define RBBMIF_TIMEOUT_DIS__CLIENT12_TIMEOUT_DIS_MASK 0x00001000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT13_TIMEOUT_DIS_MASK 0x00002000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT14_TIMEOUT_DIS_MASK 0x00004000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT15_TIMEOUT_DIS_MASK 0x00008000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT16_TIMEOUT_DIS_MASK 0x00010000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT17_TIMEOUT_DIS_MASK 0x00020000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT18_TIMEOUT_DIS_MASK 0x00040000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT19_TIMEOUT_DIS_MASK 0x00080000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT20_TIMEOUT_DIS_MASK 0x00100000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT21_TIMEOUT_DIS_MASK 0x00200000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT22_TIMEOUT_DIS_MASK 0x00400000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT23_TIMEOUT_DIS_MASK 0x00800000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT24_TIMEOUT_DIS_MASK 0x01000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT25_TIMEOUT_DIS_MASK 0x02000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT26_TIMEOUT_DIS_MASK 0x04000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT27_TIMEOUT_DIS_MASK 0x08000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT28_TIMEOUT_DIS_MASK 0x10000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT29_TIMEOUT_DIS_MASK 0x20000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT30_TIMEOUT_DIS_MASK 0x40000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT31_TIMEOUT_DIS_MASK 0x80000000L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT32_TIMEOUT_DIS__SHIFT 0x0
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT33_TIMEOUT_DIS__SHIFT 0x1
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT34_TIMEOUT_DIS__SHIFT 0x2
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT35_TIMEOUT_DIS__SHIFT 0x3
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT36_TIMEOUT_DIS__SHIFT 0x4
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT37_TIMEOUT_DIS__SHIFT 0x5
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT38_TIMEOUT_DIS__SHIFT 0x6
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT32_TIMEOUT_DIS_MASK 0x00000001L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT33_TIMEOUT_DIS_MASK 0x00000002L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT34_TIMEOUT_DIS_MASK 0x00000004L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT35_TIMEOUT_DIS_MASK 0x00000008L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT36_TIMEOUT_DIS_MASK 0x00000010L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT37_TIMEOUT_DIS_MASK 0x00000020L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT38_TIMEOUT_DIS_MASK 0x00000040L
+#define RBBMIF_STATUS_FLAG__RBBMIF_STATE__SHIFT 0x0
+#define RBBMIF_STATUS_FLAG__RBBMIF_READ_TIMEOUT__SHIFT 0x4
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_EMPTY__SHIFT 0x5
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_FULL__SHIFT 0x6
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_FLAG__SHIFT 0x8
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_TYPE__SHIFT 0x9
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_ADDR__SHIFT 0x10
+#define RBBMIF_STATUS_FLAG__RBBMIF_STATE_MASK 0x00000003L
+#define RBBMIF_STATUS_FLAG__RBBMIF_READ_TIMEOUT_MASK 0x00000010L
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_EMPTY_MASK 0x00000020L
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_FULL_MASK 0x00000040L
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_FLAG_MASK 0x00000100L
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_TYPE_MASK 0x00000E00L
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_ADDR_MASK 0xFFFF0000L
+#define DMCUB_REGION0_OFFSET__DMCUB_REGION0_OFFSET__SHIFT 0x8
+#define DMCUB_REGION0_OFFSET__DMCUB_REGION0_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION0_OFFSET_HIGH__DMCUB_REGION0_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION0_OFFSET_HIGH__DMCUB_REGION0_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION1_OFFSET__DMCUB_REGION1_OFFSET__SHIFT 0x8
+#define DMCUB_REGION1_OFFSET__DMCUB_REGION1_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION1_OFFSET_HIGH__DMCUB_REGION1_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION1_OFFSET_HIGH__DMCUB_REGION1_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION2_OFFSET__DMCUB_REGION2_OFFSET__SHIFT 0x8
+#define DMCUB_REGION2_OFFSET__DMCUB_REGION2_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION2_OFFSET_HIGH__DMCUB_REGION2_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION2_OFFSET_HIGH__DMCUB_REGION2_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION4_OFFSET__DMCUB_REGION4_OFFSET__SHIFT 0x8
+#define DMCUB_REGION4_OFFSET__DMCUB_REGION4_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION4_OFFSET_HIGH__DMCUB_REGION4_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION4_OFFSET_HIGH__DMCUB_REGION4_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION5_OFFSET__DMCUB_REGION5_OFFSET__SHIFT 0x8
+#define DMCUB_REGION5_OFFSET__DMCUB_REGION5_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION5_OFFSET_HIGH__DMCUB_REGION5_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION5_OFFSET_HIGH__DMCUB_REGION5_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION6_OFFSET__DMCUB_REGION6_OFFSET__SHIFT 0x8
+#define DMCUB_REGION6_OFFSET__DMCUB_REGION6_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION6_OFFSET_HIGH__DMCUB_REGION6_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION6_OFFSET_HIGH__DMCUB_REGION6_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION7_OFFSET__DMCUB_REGION7_OFFSET__SHIFT 0x8
+#define DMCUB_REGION7_OFFSET__DMCUB_REGION7_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION7_OFFSET_HIGH__DMCUB_REGION7_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION7_OFFSET_HIGH__DMCUB_REGION7_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION3_CW0_BASE_ADDRESS__DMCUB_REGION3_CW0_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW0_BASE_ADDRESS__DMCUB_REGION3_CW0_BASE_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW1_BASE_ADDRESS__DMCUB_REGION3_CW1_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW1_BASE_ADDRESS__DMCUB_REGION3_CW1_BASE_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW2_BASE_ADDRESS__DMCUB_REGION3_CW2_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW2_BASE_ADDRESS__DMCUB_REGION3_CW2_BASE_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW3_BASE_ADDRESS__DMCUB_REGION3_CW3_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW3_BASE_ADDRESS__DMCUB_REGION3_CW3_BASE_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW4_BASE_ADDRESS__DMCUB_REGION3_CW4_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW4_BASE_ADDRESS__DMCUB_REGION3_CW4_BASE_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW5_BASE_ADDRESS__DMCUB_REGION3_CW5_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW5_BASE_ADDRESS__DMCUB_REGION3_CW5_BASE_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW6_BASE_ADDRESS__DMCUB_REGION3_CW6_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW6_BASE_ADDRESS__DMCUB_REGION3_CW6_BASE_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW7_BASE_ADDRESS__DMCUB_REGION3_CW7_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW7_BASE_ADDRESS__DMCUB_REGION3_CW7_BASE_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_ENABLE_MASK 0x80000000L
+#define DMCUB_REGION3_CW0_OFFSET__DMCUB_REGION3_CW0_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW0_OFFSET__DMCUB_REGION3_CW0_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION3_CW0_OFFSET_HIGH__DMCUB_REGION3_CW0_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW0_OFFSET_HIGH__DMCUB_REGION3_CW0_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION3_CW1_OFFSET__DMCUB_REGION3_CW1_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW1_OFFSET__DMCUB_REGION3_CW1_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION3_CW1_OFFSET_HIGH__DMCUB_REGION3_CW1_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW1_OFFSET_HIGH__DMCUB_REGION3_CW1_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION3_CW2_OFFSET__DMCUB_REGION3_CW2_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW2_OFFSET__DMCUB_REGION3_CW2_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION3_CW2_OFFSET_HIGH__DMCUB_REGION3_CW2_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW2_OFFSET_HIGH__DMCUB_REGION3_CW2_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION3_CW3_OFFSET__DMCUB_REGION3_CW3_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW3_OFFSET__DMCUB_REGION3_CW3_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION3_CW3_OFFSET_HIGH__DMCUB_REGION3_CW3_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW3_OFFSET_HIGH__DMCUB_REGION3_CW3_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION3_CW4_OFFSET__DMCUB_REGION3_CW4_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW4_OFFSET__DMCUB_REGION3_CW4_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION3_CW4_OFFSET_HIGH__DMCUB_REGION3_CW4_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW4_OFFSET_HIGH__DMCUB_REGION3_CW4_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION3_CW5_OFFSET__DMCUB_REGION3_CW5_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW5_OFFSET__DMCUB_REGION3_CW5_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION3_CW5_OFFSET_HIGH__DMCUB_REGION3_CW5_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW5_OFFSET_HIGH__DMCUB_REGION3_CW5_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION3_CW6_OFFSET__DMCUB_REGION3_CW6_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW6_OFFSET__DMCUB_REGION3_CW6_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION3_CW6_OFFSET_HIGH__DMCUB_REGION3_CW6_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW6_OFFSET_HIGH__DMCUB_REGION3_CW6_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_REGION3_CW7_OFFSET__DMCUB_REGION3_CW7_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW7_OFFSET__DMCUB_REGION3_CW7_OFFSET_MASK 0xFFFFFF00L
+#define DMCUB_REGION3_CW7_OFFSET_HIGH__DMCUB_REGION3_CW7_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW7_OFFSET_HIGH__DMCUB_REGION3_CW7_OFFSET_HIGH_MASK 0x0000FFFFL
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER0_INT_EN__SHIFT 0x0
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER1_INT_EN__SHIFT 0x1
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_READY_INT_EN__SHIFT 0x2
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_DONE_INT_EN__SHIFT 0x3
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_READY_INT_EN__SHIFT 0x4
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_DONE_INT_EN__SHIFT 0x5
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_READY_INT_EN__SHIFT 0x6
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_DONE_INT_EN__SHIFT 0x7
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_READY_INT_EN__SHIFT 0x8
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_DONE_INT_EN__SHIFT 0x9
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT0_INT_EN__SHIFT 0xa
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT1_INT_EN__SHIFT 0xb
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT2_INT_EN__SHIFT 0xc
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT3_INT_EN__SHIFT 0xd
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT4_INT_EN__SHIFT 0xe
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT5_INT_EN__SHIFT 0xf
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT6_INT_EN__SHIFT 0x10
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT_IH_INT_EN__SHIFT 0x11
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_EN__SHIFT 0x12
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER0_INT_EN_MASK 0x00000001L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER1_INT_EN_MASK 0x00000002L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_READY_INT_EN_MASK 0x00000004L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_DONE_INT_EN_MASK 0x00000008L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_READY_INT_EN_MASK 0x00000010L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_DONE_INT_EN_MASK 0x00000020L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_READY_INT_EN_MASK 0x00000040L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_DONE_INT_EN_MASK 0x00000080L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_READY_INT_EN_MASK 0x00000100L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_DONE_INT_EN_MASK 0x00000200L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT0_INT_EN_MASK 0x00000400L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT1_INT_EN_MASK 0x00000800L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT2_INT_EN_MASK 0x00001000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT3_INT_EN_MASK 0x00002000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT4_INT_EN_MASK 0x00004000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT5_INT_EN_MASK 0x00008000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT6_INT_EN_MASK 0x00010000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT_IH_INT_EN_MASK 0x00020000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_EN_MASK 0x00040000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER0_INT_ACK__SHIFT 0x0
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER1_INT_ACK__SHIFT 0x1
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_READY_INT_ACK__SHIFT 0x2
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_DONE_INT_ACK__SHIFT 0x3
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_READY_INT_ACK__SHIFT 0x4
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_DONE_INT_ACK__SHIFT 0x5
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_READY_INT_ACK__SHIFT 0x6
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_DONE_INT_ACK__SHIFT 0x7
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_READY_INT_ACK__SHIFT 0x8
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_DONE_INT_ACK__SHIFT 0x9
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT0_INT_ACK__SHIFT 0xa
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT1_INT_ACK__SHIFT 0xb
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT2_INT_ACK__SHIFT 0xc
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT3_INT_ACK__SHIFT 0xd
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT4_INT_ACK__SHIFT 0xe
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT5_INT_ACK__SHIFT 0xf
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT6_INT_ACK__SHIFT 0x10
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT_IH_INT_ACK__SHIFT 0x11
+#define DMCUB_INTERRUPT_ACK__DMCUB_UNDEFINED_ADDRESS_FAULT_ACK__SHIFT 0x12
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER0_INT_ACK_MASK 0x00000001L
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER1_INT_ACK_MASK 0x00000002L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_READY_INT_ACK_MASK 0x00000004L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_DONE_INT_ACK_MASK 0x00000008L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_READY_INT_ACK_MASK 0x00000010L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_DONE_INT_ACK_MASK 0x00000020L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_READY_INT_ACK_MASK 0x00000040L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_DONE_INT_ACK_MASK 0x00000080L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_READY_INT_ACK_MASK 0x00000100L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_DONE_INT_ACK_MASK 0x00000200L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT0_INT_ACK_MASK 0x00000400L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT1_INT_ACK_MASK 0x00000800L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT2_INT_ACK_MASK 0x00001000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT3_INT_ACK_MASK 0x00002000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT4_INT_ACK_MASK 0x00004000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT5_INT_ACK_MASK 0x00008000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT6_INT_ACK_MASK 0x00010000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT_IH_INT_ACK_MASK 0x00020000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_UNDEFINED_ADDRESS_FAULT_ACK_MASK 0x00040000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER0_INT_STAT__SHIFT 0x0
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER1_INT_STAT__SHIFT 0x1
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_READY_INT_STAT__SHIFT 0x2
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_DONE_INT_STAT__SHIFT 0x3
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_READY_INT_STAT__SHIFT 0x4
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_DONE_INT_STAT__SHIFT 0x5
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_READY_INT_STAT__SHIFT 0x6
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_DONE_INT_STAT__SHIFT 0x7
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_READY_INT_STAT__SHIFT 0x8
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_DONE_INT_STAT__SHIFT 0x9
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT0_INT_STAT__SHIFT 0xa
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT1_INT_STAT__SHIFT 0xb
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT2_INT_STAT__SHIFT 0xc
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT3_INT_STAT__SHIFT 0xd
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT4_INT_STAT__SHIFT 0xe
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT5_INT_STAT__SHIFT 0xf
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT6_INT_STAT__SHIFT 0x10
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT_IH_INT_STAT__SHIFT 0x11
+#define DMCUB_INTERRUPT_STATUS__DMCUB_UNDEFINED_ADDRESS_FAULT__SHIFT 0x12
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INST_FETCH_FAULT__SHIFT 0x13
+#define DMCUB_INTERRUPT_STATUS__DMCUB_DATA_WRITE_FAULT__SHIFT 0x14
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER0_INT_STAT_MASK 0x00000001L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER1_INT_STAT_MASK 0x00000002L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_READY_INT_STAT_MASK 0x00000004L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_DONE_INT_STAT_MASK 0x00000008L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_READY_INT_STAT_MASK 0x00000010L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_DONE_INT_STAT_MASK 0x00000020L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_READY_INT_STAT_MASK 0x00000040L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_DONE_INT_STAT_MASK 0x00000080L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_READY_INT_STAT_MASK 0x00000100L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_DONE_INT_STAT_MASK 0x00000200L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT0_INT_STAT_MASK 0x00000400L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT1_INT_STAT_MASK 0x00000800L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT2_INT_STAT_MASK 0x00001000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT3_INT_STAT_MASK 0x00002000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT4_INT_STAT_MASK 0x00004000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT5_INT_STAT_MASK 0x00008000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT6_INT_STAT_MASK 0x00010000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT_IH_INT_STAT_MASK 0x00020000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_UNDEFINED_ADDRESS_FAULT_MASK 0x00040000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INST_FETCH_FAULT_MASK 0x00080000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_DATA_WRITE_FAULT_MASK 0x00100000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER0_INT_TYPE__SHIFT 0x0
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER1_INT_TYPE__SHIFT 0x1
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_READY_INT_TYPE__SHIFT 0x2
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_DONE_INT_TYPE__SHIFT 0x3
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_READY_INT_TYPE__SHIFT 0x4
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_DONE_INT_TYPE__SHIFT 0x5
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_READY_INT_TYPE__SHIFT 0x6
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_DONE_INT_TYPE__SHIFT 0x7
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_READY_INT_TYPE__SHIFT 0x8
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_DONE_INT_TYPE__SHIFT 0x9
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT0_INT_TYPE__SHIFT 0xa
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT1_INT_TYPE__SHIFT 0xb
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT2_INT_TYPE__SHIFT 0xc
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT3_INT_TYPE__SHIFT 0xd
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT4_INT_TYPE__SHIFT 0xe
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT5_INT_TYPE__SHIFT 0xf
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT6_INT_TYPE__SHIFT 0x10
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT_IH_INT_TYPE__SHIFT 0x11
+#define DMCUB_INTERRUPT_TYPE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_TYPE__SHIFT 0x12
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER0_INT_TYPE_MASK 0x00000001L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER1_INT_TYPE_MASK 0x00000002L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_READY_INT_TYPE_MASK 0x00000004L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_DONE_INT_TYPE_MASK 0x00000008L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_READY_INT_TYPE_MASK 0x00000010L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_DONE_INT_TYPE_MASK 0x00000020L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_READY_INT_TYPE_MASK 0x00000040L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_DONE_INT_TYPE_MASK 0x00000080L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_READY_INT_TYPE_MASK 0x00000100L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_DONE_INT_TYPE_MASK 0x00000200L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT0_INT_TYPE_MASK 0x00000400L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT1_INT_TYPE_MASK 0x00000800L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT2_INT_TYPE_MASK 0x00001000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT3_INT_TYPE_MASK 0x00002000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT4_INT_TYPE_MASK 0x00004000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT5_INT_TYPE_MASK 0x00008000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT6_INT_TYPE_MASK 0x00010000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT_IH_INT_TYPE_MASK 0x00020000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_TYPE_MASK 0x00040000L
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_COUNT__SHIFT 0x0
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_ID__SHIFT 0x8
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_COUNT_MASK 0x000000FFL
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_ID_MASK 0x0000FF00L
+#define DMCUB_EXT_INTERRUPT_CTXID__DMCUB_EXT_INTERRUPT_CTXID__SHIFT 0x0
+#define DMCUB_EXT_INTERRUPT_CTXID__DMCUB_EXT_INTERRUPT_CTXID_MASK 0x0FFFFFFFL
+#define DMCUB_EXT_INTERRUPT_ACK__DMCUB_EXT_INTERRUPT_ACK__SHIFT 0x0
+#define DMCUB_EXT_INTERRUPT_ACK__DMCUB_EXT_INTERRUPT_ACK_MASK 0x00000001L
+#define DMCUB_INST_FETCH_FAULT_ADDR__DMCUB_INST_FETCH_FAULT_ADDR__SHIFT 0x0
+#define DMCUB_INST_FETCH_FAULT_ADDR__DMCUB_INST_FETCH_FAULT_ADDR_MASK 0xFFFFFFFFL
+#define DMCUB_DATA_WRITE_FAULT_ADDR__DMCUB_DATA_WRITE_FAULT_ADDR__SHIFT 0x0
+#define DMCUB_DATA_WRITE_FAULT_ADDR__DMCUB_DATA_WRITE_FAULT_ADDR_MASK 0xFFFFFFFFL
+#define DMCUB_SEC_CNTL__DMCUB_MEM_UNIT_ID__SHIFT 0x8
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET__SHIFT 0x10
+#define DMCUB_SEC_CNTL__DMCUB_DATA_FAULT_INT_DISABLE__SHIFT 0x11
+#define DMCUB_SEC_CNTL__DMCUB_AUTO_RESET_STATUS__SHIFT 0x14
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET_STATUS__SHIFT 0x15
+#define DMCUB_SEC_CNTL__DMCUB_INST_FETCH_FAULT_CLEAR__SHIFT 0x18
+#define DMCUB_SEC_CNTL__DMCUB_DATA_WRITE_FAULT_CLEAR__SHIFT 0x19
+#define DMCUB_SEC_CNTL__DMCUB_MEM_UNIT_ID_MASK 0x00003F00L
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET_MASK 0x00010000L
+#define DMCUB_SEC_CNTL__DMCUB_DATA_FAULT_INT_DISABLE_MASK 0x00020000L
+#define DMCUB_SEC_CNTL__DMCUB_AUTO_RESET_STATUS_MASK 0x00100000L
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET_STATUS_MASK 0x00200000L
+#define DMCUB_SEC_CNTL__DMCUB_INST_FETCH_FAULT_CLEAR_MASK 0x01000000L
+#define DMCUB_SEC_CNTL__DMCUB_DATA_WRITE_FAULT_CLEAR_MASK 0x02000000L
+#define DMCUB_MEM_CNTL__DMCUB_MEM_WRITE_QOS__SHIFT 0x0
+#define DMCUB_MEM_CNTL__DMCUB_MEM_READ_QOS__SHIFT 0x4
+#define DMCUB_MEM_CNTL__DMCUB_MEM_WRITE_QOS_MASK 0x0000000FL
+#define DMCUB_MEM_CNTL__DMCUB_MEM_READ_QOS_MASK 0x000000F0L
+#define DMCUB_INBOX0_BASE_ADDRESS__DMCUB_INBOX0_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_INBOX0_BASE_ADDRESS__DMCUB_INBOX0_BASE_ADDRESS_MASK 0xFFFFFFFFL
+#define DMCUB_INBOX0_SIZE__DMCUB_INBOX0_SIZE__SHIFT 0x0
+#define DMCUB_INBOX0_SIZE__DMCUB_INBOX0_SIZE_MASK 0xFFFFFFFFL
+#define DMCUB_INBOX0_WPTR__DMCUB_INBOX0_WPTR__SHIFT 0x0
+#define DMCUB_INBOX0_WPTR__DMCUB_INBOX0_WPTR_MASK 0xFFFFFFFFL
+#define DMCUB_INBOX0_RPTR__DMCUB_INBOX0_RPTR__SHIFT 0x0
+#define DMCUB_INBOX0_RPTR__DMCUB_INBOX0_RPTR_MASK 0xFFFFFFFFL
+#define DMCUB_INBOX1_BASE_ADDRESS__DMCUB_INBOX1_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_INBOX1_BASE_ADDRESS__DMCUB_INBOX1_BASE_ADDRESS_MASK 0xFFFFFFFFL
+#define DMCUB_INBOX1_SIZE__DMCUB_INBOX1_SIZE__SHIFT 0x0
+#define DMCUB_INBOX1_SIZE__DMCUB_INBOX1_SIZE_MASK 0xFFFFFFFFL
+#define DMCUB_INBOX1_WPTR__DMCUB_INBOX1_WPTR__SHIFT 0x0
+#define DMCUB_INBOX1_WPTR__DMCUB_INBOX1_WPTR_MASK 0xFFFFFFFFL
+#define DMCUB_INBOX1_RPTR__DMCUB_INBOX1_RPTR__SHIFT 0x0
+#define DMCUB_INBOX1_RPTR__DMCUB_INBOX1_RPTR_MASK 0xFFFFFFFFL
+#define DMCUB_OUTBOX0_BASE_ADDRESS__DMCUB_OUTBOX0_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_OUTBOX0_BASE_ADDRESS__DMCUB_OUTBOX0_BASE_ADDRESS_MASK 0xFFFFFFFFL
+#define DMCUB_OUTBOX0_SIZE__DMCUB_OUTBOX0_SIZE__SHIFT 0x0
+#define DMCUB_OUTBOX0_SIZE__DMCUB_OUTBOX0_SIZE_MASK 0xFFFFFFFFL
+#define DMCUB_OUTBOX0_WPTR__DMCUB_OUTBOX0_WPTR__SHIFT 0x0
+#define DMCUB_OUTBOX0_WPTR__DMCUB_OUTBOX0_WPTR_MASK 0xFFFFFFFFL
+#define DMCUB_OUTBOX0_RPTR__DMCUB_OUTBOX0_RPTR__SHIFT 0x0
+#define DMCUB_OUTBOX0_RPTR__DMCUB_OUTBOX0_RPTR_MASK 0xFFFFFFFFL
+#define DMCUB_OUTBOX1_BASE_ADDRESS__DMCUB_OUTBOX1_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_OUTBOX1_BASE_ADDRESS__DMCUB_OUTBOX1_BASE_ADDRESS_MASK 0xFFFFFFFFL
+#define DMCUB_OUTBOX1_SIZE__DMCUB_OUTBOX1_SIZE__SHIFT 0x0
+#define DMCUB_OUTBOX1_SIZE__DMCUB_OUTBOX1_SIZE_MASK 0xFFFFFFFFL
+#define DMCUB_OUTBOX1_WPTR__DMCUB_OUTBOX1_WPTR__SHIFT 0x0
+#define DMCUB_OUTBOX1_WPTR__DMCUB_OUTBOX1_WPTR_MASK 0xFFFFFFFFL
+#define DMCUB_OUTBOX1_RPTR__DMCUB_OUTBOX1_RPTR__SHIFT 0x0
+#define DMCUB_OUTBOX1_RPTR__DMCUB_OUTBOX1_RPTR_MASK 0xFFFFFFFFL
+#define DMCUB_TIMER_TRIGGER0__DMCUB_TIMER_TRIGGER0__SHIFT 0x0
+#define DMCUB_TIMER_TRIGGER0__DMCUB_TIMER_TRIGGER0_MASK 0xFFFFFFFFL
+#define DMCUB_TIMER_TRIGGER1__DMCUB_TIMER_TRIGGER1__SHIFT 0x0
+#define DMCUB_TIMER_TRIGGER1__DMCUB_TIMER_TRIGGER1_MASK 0xFFFFFFFFL
+#define DMCUB_TIMER_WINDOW__DMCUB_TIMER_WINDOW__SHIFT 0x0
+#define DMCUB_TIMER_WINDOW__DMCUB_TIMER_WINDOW_MASK 0x00000007L
+#define DMCUB_SCRATCH0__DMCUB_SCRATCH0__SHIFT 0x0
+#define DMCUB_SCRATCH0__DMCUB_SCRATCH0_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH1__DMCUB_SCRATCH1__SHIFT 0x0
+#define DMCUB_SCRATCH1__DMCUB_SCRATCH1_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH2__DMCUB_SCRATCH2__SHIFT 0x0
+#define DMCUB_SCRATCH2__DMCUB_SCRATCH2_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH3__DMCUB_SCRATCH3__SHIFT 0x0
+#define DMCUB_SCRATCH3__DMCUB_SCRATCH3_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH4__DMCUB_SCRATCH4__SHIFT 0x0
+#define DMCUB_SCRATCH4__DMCUB_SCRATCH4_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH5__DMCUB_SCRATCH5__SHIFT 0x0
+#define DMCUB_SCRATCH5__DMCUB_SCRATCH5_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH6__DMCUB_SCRATCH6__SHIFT 0x0
+#define DMCUB_SCRATCH6__DMCUB_SCRATCH6_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH7__DMCUB_SCRATCH7__SHIFT 0x0
+#define DMCUB_SCRATCH7__DMCUB_SCRATCH7_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH8__DMCUB_SCRATCH8__SHIFT 0x0
+#define DMCUB_SCRATCH8__DMCUB_SCRATCH8_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH9__DMCUB_SCRATCH9__SHIFT 0x0
+#define DMCUB_SCRATCH9__DMCUB_SCRATCH9_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH10__DMCUB_SCRATCH10__SHIFT 0x0
+#define DMCUB_SCRATCH10__DMCUB_SCRATCH10_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH11__DMCUB_SCRATCH11__SHIFT 0x0
+#define DMCUB_SCRATCH11__DMCUB_SCRATCH11_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH12__DMCUB_SCRATCH12__SHIFT 0x0
+#define DMCUB_SCRATCH12__DMCUB_SCRATCH12_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH13__DMCUB_SCRATCH13__SHIFT 0x0
+#define DMCUB_SCRATCH13__DMCUB_SCRATCH13_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH14__DMCUB_SCRATCH14__SHIFT 0x0
+#define DMCUB_SCRATCH14__DMCUB_SCRATCH14_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH15__DMCUB_SCRATCH15__SHIFT 0x0
+#define DMCUB_SCRATCH15__DMCUB_SCRATCH15_MASK 0xFFFFFFFFL
+#define DMCUB_CNTL__DMCUB_LS_WAKE_DELAY__SHIFT 0x0
+#define DMCUB_CNTL__DMCUB_DMCUBCLK_R_GATE_DIS__SHIFT 0x8
+#define DMCUB_CNTL__DMCUB_ENABLE__SHIFT 0x10
+#define DMCUB_CNTL__DMCUB_MEM_LIGHT_SLEEP_DISABLE__SHIFT 0x12
+#define DMCUB_CNTL__DMCUB_TRACEPORT_EN__SHIFT 0x13
+#define DMCUB_CNTL__DMCUB_PWAIT_MODE_STATUS__SHIFT 0x14
+#define DMCUB_CNTL__DMCUB_LS_WAKE_DELAY_MASK 0x000000FFL
+#define DMCUB_CNTL__DMCUB_DMCUBCLK_R_GATE_DIS_MASK 0x00000100L
+#define DMCUB_CNTL__DMCUB_ENABLE_MASK 0x00010000L
+#define DMCUB_CNTL__DMCUB_MEM_LIGHT_SLEEP_DISABLE_MASK 0x00040000L
+#define DMCUB_CNTL__DMCUB_TRACEPORT_EN_MASK 0x00080000L
+#define DMCUB_CNTL__DMCUB_PWAIT_MODE_STATUS_MASK 0x00100000L
+#define DMCUB_GPINT_DATAIN0__DMCUB_GPINT_DATAIN0__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN0__DMCUB_GPINT_DATAIN0_MASK 0xFFFFFFFFL
+#define DMCUB_GPINT_DATAIN1__DMCUB_GPINT_DATAIN1__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN1__DMCUB_GPINT_DATAIN1_MASK 0xFFFFFFFFL
+#define DMCUB_GPINT_DATAOUT__DMCUB_GPINT_DATAOUT__SHIFT 0x0
+#define DMCUB_GPINT_DATAOUT__DMCUB_GPINT_DATAOUT_MASK 0xFFFFFFFFL
+#define DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR__DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR__SHIFT 0x0
+#define DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR__DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR_MASK 0xFFFFFFFFL
+#define DMCUB_LS_WAKE_INT_ENABLE__DMCUB_LS_WAKE_INT_ENABLE__SHIFT 0x0
+#define DMCUB_LS_WAKE_INT_ENABLE__DMCUB_LS_WAKE_INT_ENABLE_MASK 0xFFFFFFFFL
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_FORCE__SHIFT 0x1
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_DIS__SHIFT 0x3
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_STATE__SHIFT 0x4
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_FORCE_MASK 0x00000006L
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_DIS_MASK 0x00000008L
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_STATE_MASK 0x00000030L
+#define DMCUB_TIMER_CURRENT__DMCUB_TIMER_CURRENT__SHIFT 0x0
+#define DMCUB_TIMER_CURRENT__DMCUB_TIMER_CURRENT_MASK 0xFFFFFFFFL
+#define DMCUB_PROC_ID__DMCUB_PROC_ID__SHIFT 0x0
+#define DMCUB_PROC_ID__DMCUB_PROC_ID_MASK 0x0000FFFFL
+#define DMCUB_CNTL2__DMCUB_SOFT_RESET__SHIFT 0x0
+#define DMCUB_CNTL2__DMCUB_SOFT_RESET_MASK 0x00000001L
+#define DMCUB_GPINT_DATAIN2__DMCUB_GPINT_DATAIN2__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN2__DMCUB_GPINT_DATAIN2_MASK 0xFFFFFFFFL
+#define DMCUB_GPINT_DATAIN3__DMCUB_GPINT_DATAIN3__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN3__DMCUB_GPINT_DATAIN3_MASK 0xFFFFFFFFL
+#define DMCUB_GPINT_DATAIN4__DMCUB_GPINT_DATAIN4__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN4__DMCUB_GPINT_DATAIN4_MASK 0xFFFFFFFFL
+#define DMCUB_GPINT_DATAIN5__DMCUB_GPINT_DATAIN5__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN5__DMCUB_GPINT_DATAIN5_MASK 0xFFFFFFFFL
+#define DMCUB_GPINT_DATAIN6__DMCUB_GPINT_DATAIN6__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN6__DMCUB_GPINT_DATAIN6_MASK 0xFFFFFFFFL
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_ENABLE__SHIFT 0x0
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_EN__SHIFT 0x4
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_ACK__SHIFT 0x5
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_SLICE_INT_EN__SHIFT 0x6
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN__SHIFT 0x7
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_LOCK__SHIFT 0x8
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_ADDR_FENCE_EN__SHIFT 0x18
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_ENABLE_MASK 0x00000001L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_EN_MASK 0x00000010L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_ACK_MASK 0x00000020L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_SLICE_INT_EN_MASK 0x00000040L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN_MASK 0x00000080L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_LOCK_MASK 0x00000F00L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_ADDR_FENCE_EN_MASK 0x01000000L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_INT_STATUS__SHIFT 0x0
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS__SHIFT 0x1
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_BUF__SHIFT 0x4
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUF_DUALSIZE_STATUS__SHIFT 0x7
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_LINE_L__SHIFT 0xc
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_NEXT_BUF__SHIFT 0x1c
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_INT_STATUS_MASK 0x00000001L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS_MASK 0x00000002L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_BUF_MASK 0x00000070L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUF_DUALSIZE_STATUS_MASK 0x00000080L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_LINE_L_MASK 0x01FFF000L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_NEXT_BUF_MASK 0x70000000L
+#define MCIF_WB_BUF_PITCH__MCIF_WB_BUF_LUMA_PITCH__SHIFT 0x8
+#define MCIF_WB_BUF_PITCH__MCIF_WB_BUF_CHROMA_PITCH__SHIFT 0x18
+#define MCIF_WB_BUF_PITCH__MCIF_WB_BUF_LUMA_PITCH_MASK 0x0000FF00L
+#define MCIF_WB_BUF_PITCH__MCIF_WB_BUF_CHROMA_PITCH_MASK 0xFF000000L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_ACTIVE__SHIFT 0x0
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_OVERFLOW__SHIFT 0x3
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_DISABLE__SHIFT 0x4
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_MODE__SHIFT 0x5
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_NXT_BUF__SHIFT 0xc
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_ACTIVE_MASK 0x00000001L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_DISABLE_MASK 0x00000010L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_MODE_MASK 0x000000E0L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ__SHIFT 0x10
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_MASK 0x00010000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_EYE_FLAG_MASK 0x00080000L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_ACTIVE__SHIFT 0x0
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_OVERFLOW__SHIFT 0x3
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_DISABLE__SHIFT 0x4
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_MODE__SHIFT 0x5
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_NXT_BUF__SHIFT 0xc
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_ACTIVE_MASK 0x00000001L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_DISABLE_MASK 0x00000010L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_MODE_MASK 0x000000E0L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ__SHIFT 0x10
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_MASK 0x00010000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_EYE_FLAG_MASK 0x00080000L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_ACTIVE__SHIFT 0x0
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_OVERFLOW__SHIFT 0x3
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_DISABLE__SHIFT 0x4
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_MODE__SHIFT 0x5
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_NXT_BUF__SHIFT 0xc
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_ACTIVE_MASK 0x00000001L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_DISABLE_MASK 0x00000010L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_MODE_MASK 0x000000E0L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ__SHIFT 0x10
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_MASK 0x00010000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_EYE_FLAG_MASK 0x00080000L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_ACTIVE__SHIFT 0x0
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_OVERFLOW__SHIFT 0x3
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_DISABLE__SHIFT 0x4
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_MODE__SHIFT 0x5
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_NXT_BUF__SHIFT 0xc
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_ACTIVE_MASK 0x00000001L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_DISABLE_MASK 0x00000010L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_MODE_MASK 0x000000E0L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_CUR_LINE_L_MASK 0x1FFF0000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ__SHIFT 0x10
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_MASK 0x00010000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_EYE_FLAG_MASK 0x00080000L
+#define MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_CLIENT_ARBITRATION_SLICE__SHIFT 0x0
+#define MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_TIME_PER_PIXEL__SHIFT 0x14
+#define MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_CLIENT_ARBITRATION_SLICE_MASK 0x00000003L
+#define MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_TIME_PER_PIXEL_MASK 0xFFF00000L
+#define MCIF_WB_SCLK_CHANGE__WM_CHANGE_ACK_FORCE_ON__SHIFT 0x0
+#define MCIF_WB_SCLK_CHANGE__WM_CHANGE_ACK_FORCE_ON_MASK 0x00000001L
+#define MCIF_WB_BUF_1_ADDR_Y__MCIF_WB_BUF_1_ADDR_Y__SHIFT 0x0
+#define MCIF_WB_BUF_1_ADDR_Y__MCIF_WB_BUF_1_ADDR_Y_MASK 0xFFFFFFFFL
+#define MCIF_WB_BUF_1_ADDR_C__MCIF_WB_BUF_1_ADDR_C__SHIFT 0x0
+#define MCIF_WB_BUF_1_ADDR_C__MCIF_WB_BUF_1_ADDR_C_MASK 0xFFFFFFFFL
+#define MCIF_WB_BUF_2_ADDR_Y__MCIF_WB_BUF_2_ADDR_Y__SHIFT 0x0
+#define MCIF_WB_BUF_2_ADDR_Y__MCIF_WB_BUF_2_ADDR_Y_MASK 0xFFFFFFFFL
+#define MCIF_WB_BUF_2_ADDR_C__MCIF_WB_BUF_2_ADDR_C__SHIFT 0x0
+#define MCIF_WB_BUF_2_ADDR_C__MCIF_WB_BUF_2_ADDR_C_MASK 0xFFFFFFFFL
+#define MCIF_WB_BUF_3_ADDR_Y__MCIF_WB_BUF_3_ADDR_Y__SHIFT 0x0
+#define MCIF_WB_BUF_3_ADDR_Y__MCIF_WB_BUF_3_ADDR_Y_MASK 0xFFFFFFFFL
+#define MCIF_WB_BUF_3_ADDR_C__MCIF_WB_BUF_3_ADDR_C__SHIFT 0x0
+#define MCIF_WB_BUF_3_ADDR_C__MCIF_WB_BUF_3_ADDR_C_MASK 0xFFFFFFFFL
+#define MCIF_WB_BUF_4_ADDR_Y__MCIF_WB_BUF_4_ADDR_Y__SHIFT 0x0
+#define MCIF_WB_BUF_4_ADDR_Y__MCIF_WB_BUF_4_ADDR_Y_MASK 0xFFFFFFFFL
+#define MCIF_WB_BUF_4_ADDR_C__MCIF_WB_BUF_4_ADDR_C__SHIFT 0x0
+#define MCIF_WB_BUF_4_ADDR_C__MCIF_WB_BUF_4_ADDR_C_MASK 0xFFFFFFFFL
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_IGNORE__SHIFT 0x0
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK__SHIFT 0x8
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_SLICE_SIZE__SHIFT 0x10
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_IGNORE_MASK 0x00000001L
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_MASK 0x00000F00L
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_SLICE_SIZE_MASK 0x1FFF0000L
+#define MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x1
+#define MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x00000002L
+#define MCIF_WB_CLOCK_GATER_CONTROL__MCIF_WB_CLI_CLOCK_GATER_OVERRIDE__SHIFT 0x0
+#define MCIF_WB_CLOCK_GATER_CONTROL__MCIF_WB_CLI_CLOCK_GATER_OVERRIDE_MASK 0x00000001L
+#define MCIF_WB_SELF_REFRESH_CONTROL__PERFRAME_SELF_REFRESH__SHIFT 0x1
+#define MCIF_WB_SELF_REFRESH_CONTROL__PERFRAME_SELF_REFRESH_MASK 0x00000002L
+#define MULTI_LEVEL_QOS_CTRL__MAX_SCALED_TIME_TO_URGENT__SHIFT 0x0
+#define MULTI_LEVEL_QOS_CTRL__MAX_SCALED_TIME_TO_URGENT_MASK 0x003FFFFFL
+#define MCIF_WB_SECURITY_LEVEL__MCIF_WB_SECURITY_LEVEL__SHIFT 0x0
+#define MCIF_WB_SECURITY_LEVEL__MCIF_WB_SPACE__SHIFT 0x4
+#define MCIF_WB_SECURITY_LEVEL__MCIF_WB_SECURITY_LEVEL_MASK 0x00000007L
+#define MCIF_WB_SECURITY_LEVEL__MCIF_WB_SPACE_MASK 0x00000070L
+#define MCIF_WB_BUF_LUMA_SIZE__MCIF_WB_BUF_LUMA_SIZE__SHIFT 0x0
+#define MCIF_WB_BUF_LUMA_SIZE__MCIF_WB_BUF_LUMA_SIZE_MASK 0x000FFFFFL
+#define MCIF_WB_BUF_CHROMA_SIZE__MCIF_WB_BUF_CHROMA_SIZE__SHIFT 0x0
+#define MCIF_WB_BUF_CHROMA_SIZE__MCIF_WB_BUF_CHROMA_SIZE_MASK 0x000FFFFFL
+#define MCIF_WB_BUF_1_ADDR_Y_HIGH__MCIF_WB_BUF_1_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_1_ADDR_Y_HIGH__MCIF_WB_BUF_1_ADDR_Y_HIGH_MASK 0x000000FFL
+#define MCIF_WB_BUF_1_ADDR_C_HIGH__MCIF_WB_BUF_1_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_1_ADDR_C_HIGH__MCIF_WB_BUF_1_ADDR_C_HIGH_MASK 0x000000FFL
+#define MCIF_WB_BUF_2_ADDR_Y_HIGH__MCIF_WB_BUF_2_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_2_ADDR_Y_HIGH__MCIF_WB_BUF_2_ADDR_Y_HIGH_MASK 0x000000FFL
+#define MCIF_WB_BUF_2_ADDR_C_HIGH__MCIF_WB_BUF_2_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_2_ADDR_C_HIGH__MCIF_WB_BUF_2_ADDR_C_HIGH_MASK 0x000000FFL
+#define MCIF_WB_BUF_3_ADDR_Y_HIGH__MCIF_WB_BUF_3_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_3_ADDR_Y_HIGH__MCIF_WB_BUF_3_ADDR_Y_HIGH_MASK 0x000000FFL
+#define MCIF_WB_BUF_3_ADDR_C_HIGH__MCIF_WB_BUF_3_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_3_ADDR_C_HIGH__MCIF_WB_BUF_3_ADDR_C_HIGH_MASK 0x000000FFL
+#define MCIF_WB_BUF_4_ADDR_Y_HIGH__MCIF_WB_BUF_4_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_4_ADDR_Y_HIGH__MCIF_WB_BUF_4_ADDR_Y_HIGH_MASK 0x000000FFL
+#define MCIF_WB_BUF_4_ADDR_C_HIGH__MCIF_WB_BUF_4_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_4_ADDR_C_HIGH__MCIF_WB_BUF_4_ADDR_C_HIGH_MASK 0x000000FFL
+#define MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+#define MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+#define MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+#define MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+#define MCIF_WB_VMID_CONTROL__MCIF_WB_P_VMID__SHIFT 0x0
+#define MCIF_WB_VMID_CONTROL__MCIF_WB_P_VMID_MASK 0x0000000FL
+#define MCIF_WB_MIN_TTO__MCIF_WB_MIN_TTO__SHIFT 0x0
+#define MCIF_WB_MIN_TTO__MCIF_WB_MIN_TTO_MASK 0x0007FFFFL
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_REFRESH_WATERMARK__SHIFT 0x0
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x18
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_REFRESH_WATERMARK_MASK 0x001FFFFFL
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x07000000L
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK__SHIFT 0x0
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK__SHIFT 0x18
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK__SHIFT 0x18
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK_MASK 0x07000000L
+#define MMHUBBUB_WARMUP_CONFIG__MMHUBBUB_WARMUP_QOS__SHIFT 0x10
+#define MMHUBBUB_WARMUP_CONFIG__MMHUBBUB_WARMUP_AWID__SHIFT 0x14
+#define MMHUBBUB_WARMUP_CONFIG__MMHUBBUB_WARMUP_QOS_MASK 0x000F0000L
+#define MMHUBBUB_WARMUP_CONFIG__MMHUBBUB_WARMUP_AWID_MASK 0x00F00000L
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_EN__SHIFT 0x0
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_EN__SHIFT 0x4
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_STATUS__SHIFT 0x5
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_ACK__SHIFT 0x6
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_INC_ADDR__SHIFT 0x8
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_EN_MASK 0x00000001L
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_EN_MASK 0x00000010L
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_STATUS_MASK 0x00000020L
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_ACK_MASK 0x00000040L
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_INC_ADDR_MASK 0x03FFFF00L
+#define MMHUBBUB_WARMUP_BASE_ADDR_LOW__MMHUBBUB_WARMUP_BASE_ADDR_LOW__SHIFT 0x0
+#define MMHUBBUB_WARMUP_BASE_ADDR_LOW__MMHUBBUB_WARMUP_BASE_ADDR_LOW_MASK 0xFFFFFFFFL
+#define MMHUBBUB_WARMUP_BASE_ADDR_HIGH__MMHUBBUB_WARMUP_BASE_ADDR_HIGH__SHIFT 0x0
+#define MMHUBBUB_WARMUP_BASE_ADDR_HIGH__MMHUBBUB_WARMUP_BASE_ADDR_HIGH_MASK 0x000007FFL
+#define MMHUBBUB_WARMUP_ADDR_REGION__MMHUBBUB_WARMUP_ADDR_REGION__SHIFT 0x0
+#define MMHUBBUB_WARMUP_ADDR_REGION__MMHUBBUB_WARMUP_ADDR_REGION_MASK 0x07FFFFFFL
+#define MMHUBBUB_MIN_TTO__MMHUBBUB_MIN_TTO__SHIFT 0x0
+#define MMHUBBUB_MIN_TTO__MMHUBBUB_MIN_TTO_MASK 0x0007FFFFL
+#define MMHUBBUB_CTRL__MMHUB_SOCCLK_DS_MODE__SHIFT 0x0
+#define MMHUBBUB_CTRL__MMHUB_SOCCLK_DS_MODE_MASK 0x00000003L
+#define WBIF_SMU_WM_CONTROL__MCIF_WB_WM_CHG_SEL__SHIFT 0x14
+#define WBIF_SMU_WM_CONTROL__MCIF_WB_WM_CHG_REQ__SHIFT 0x16
+#define WBIF_SMU_WM_CONTROL__MCIF_WB_WM_CHG_SEL_MASK 0x00300000L
+#define WBIF_SMU_WM_CONTROL__MCIF_WB_WM_CHG_REQ_MASK 0x00400000L
+#define WBIF0_MISC_CTRL__MCIFWB0_WR_COMBINE_TIMEOUT_THRESH__SHIFT 0x0
+#define WBIF0_MISC_CTRL__MCIF_WB0_SOCCLK_DS_ENABLE__SHIFT 0x10
+#define WBIF0_MISC_CTRL__MCIF_WB0_WM_CHG_ACK_INT_DIS__SHIFT 0x18
+#define WBIF0_MISC_CTRL__MCIF_WB0_WM_CHG_ACK_INT_STATUS__SHIFT 0x19
+#define WBIF0_MISC_CTRL__MCIFWB0_WR_COMBINE_TIMEOUT_THRESH_MASK 0x000003FFL
+#define WBIF0_MISC_CTRL__MCIF_WB0_SOCCLK_DS_ENABLE_MASK 0x00010000L
+#define WBIF0_MISC_CTRL__MCIF_WB0_WM_CHG_ACK_INT_DIS_MASK 0x01000000L
+#define WBIF0_MISC_CTRL__MCIF_WB0_WM_CHG_ACK_INT_STATUS_MASK 0x02000000L
+#define WBIF0_PHASE0_OUTSTANDING_COUNTER__MCIF_WB0_PHASE0_OUTSTANDING_COUNTER__SHIFT 0x0
+#define WBIF0_PHASE0_OUTSTANDING_COUNTER__MCIF_WB0_PHASE0_OUTSTANDING_COUNTER_MASK 0x07FFFFFFL
+#define WBIF0_PHASE1_OUTSTANDING_COUNTER__MCIF_WB0_PHASE1_OUTSTANDING_COUNTER__SHIFT 0x0
+#define WBIF0_PHASE1_OUTSTANDING_COUNTER__MCIF_WB0_PHASE1_OUTSTANDING_COUNTER_MASK 0x07FFFFFFL
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM0_PWR_STATE__SHIFT 0x0
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM1_PWR_STATE__SHIFT 0x2
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM0_PWR_STATE__SHIFT 0x4
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM1_PWR_STATE__SHIFT 0x6
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM0_PWR_STATE_MASK 0x00000003L
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM1_PWR_STATE_MASK 0x0000000CL
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM0_PWR_STATE_MASK 0x00000030L
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM1_PWR_STATE_MASK 0x000000C0L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_FORCE__SHIFT 0x2
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_DIS__SHIFT 0x4
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_MODE_SEL__SHIFT 0x5
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_LUMA_MEM_EN_NUM__SHIFT 0x7
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_CHROMA_MEM_EN_NUM__SHIFT 0x8
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_FORCE_MASK 0x0000000CL
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_DIS_MASK 0x00000010L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_MODE_SEL_MASK 0x00000060L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_LUMA_MEM_EN_NUM_MASK 0x00000080L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_CHROMA_MEM_EN_NUM_MASK 0x00000100L
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_TEST_CLK_SEL__SHIFT 0x0
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_R_MMHUBBUB_GATE_DIS__SHIFT 0x5
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_G_WBIF0_GATE_DIS__SHIFT 0x9
+#define MMHUBBUB_CLOCK_CNTL__SOCCLK_G_WBIF0_GATE_DIS__SHIFT 0xa
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_TEST_CLK_SEL_MASK 0x0000001FL
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_R_MMHUBBUB_GATE_DIS_MASK 0x00000020L
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_G_WBIF0_GATE_DIS_MASK 0x00000200L
+#define MMHUBBUB_CLOCK_CNTL__SOCCLK_G_WBIF0_GATE_DIS_MASK 0x00000400L
+#define MMHUBBUB_SOFT_RESET__WBIF0_SOFT_RESET__SHIFT 0x2
+#define MMHUBBUB_SOFT_RESET__DMUIF_SOFT_RESET__SHIFT 0x8
+#define MMHUBBUB_SOFT_RESET__WBIF0_SOFT_RESET_MASK 0x00000004L
+#define MMHUBBUB_SOFT_RESET__DMUIF_SOFT_RESET_MASK 0x00000100L
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR__SHIFT 0x0
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR_CLR__SHIFT 0x4
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR_MASK 0x00000001L
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR_CLR_MASK 0x00000010L
+#define MMHUBBUB_CLIENT_UNIT_ID__WBIF0_UNIT_ID__SHIFT 0x8
+#define MMHUBBUB_CLIENT_UNIT_ID__WBIF0_UNIT_ID_MASK 0x00003F00L
+#define MMHUBBUB_WARMUP_VMID_CONTROL__MMHUBBUB_WARMUP_P_VMID__SHIFT 0x0
+#define MMHUBBUB_WARMUP_VMID_CONTROL__MMHUBBUB_WARMUP_P_VMID_MASK 0x0000000FL
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON3_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON3_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON3_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON3_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON3_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM0_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM1_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM2_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM3_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM4_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM5_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM6_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM7_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS__SHIFT 0x0
+#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS__SHIFT 0x4
+#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS__SHIFT 0x8
+#define AZ_CLOCK_CNTL__DCIPG_TEST_CLK_SEL__SHIFT 0xc
+#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS_MASK 0x00000001L
+#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS_MASK 0x00000010L
+#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS_MASK 0x00000100L
+#define AZ_CLOCK_CNTL__DCIPG_TEST_CLK_SEL_MASK 0x0000F000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON4_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON4_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON4_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON4_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON4_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING__SHIFT 0x0
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE__SHIFT 0x4
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING_MASK 0x00000001L
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE__SHIFT 0x0
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE__SHIFT 0x10
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE_MASK 0x0000FFFFL
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE_MASK 0xFFFF0000L
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO__SHIFT 0x8
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO_MASK 0x00000300L
+#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN__SHIFT 0x1
+#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN_MASK 0x00000002L
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE__SHIFT 0x0
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE_MASK 0xFFFFFFFFL
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD__SHIFT 0x10
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL__SHIFT 0x11
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP_MASK 0x0000000CL
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS_MASK 0x000000C0L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD_MASK 0x00010000L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL_MASK 0x00020000L
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP_MASK 0x0000000CL
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS_MASK 0x000000C0L
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP__SHIFT 0x0
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP__SHIFT 0x4
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER__SHIFT 0x5
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP_MASK 0x00000010L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER_MASK 0x000001E0L
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS_MASK 0x00000010L
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS__SHIFT 0x1
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS_MASK 0x00000006L
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL__SHIFT 0x0
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE__SHIFT 0x8
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__INPUT_LATENCY_HIDING_LEVEL__SHIFT 0x10
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL_MASK 0x000000FFL
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE_MASK 0x00000100L
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__INPUT_LATENCY_HIDING_LEVEL_MASK 0x00FF0000L
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_EN_MASK 0x00000001L
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_INSTANCE_SEL_MASK 0x00000700L
+#define AZALIA_INPUT_CRC0_CONTROL1__INPUT_CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL1__INPUT_CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC0_CONTROL2__INPUT_CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL2__INPUT_CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+#define AZALIA_INPUT_CRC0_RESULT__INPUT_CRC_RESULT__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_RESULT__INPUT_CRC_RESULT_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_EN_MASK 0x00000001L
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_INSTANCE_SEL_MASK 0x00000700L
+#define AZALIA_INPUT_CRC1_CONTROL1__INPUT_CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL1__INPUT_CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+#define AZALIA_INPUT_CRC1_CONTROL2__INPUT_CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL2__INPUT_CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+#define AZALIA_INPUT_CRC1_RESULT__INPUT_CRC_RESULT__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_RESULT__INPUT_CRC_RESULT_MASK 0xFFFFFFFFL
+#define AZALIA_CRC0_CONTROL0__CRC_EN__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL0__CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_CRC0_CONTROL0__CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_CRC0_CONTROL0__CRC_SOURCE_SEL__SHIFT 0xc
+#define AZALIA_CRC0_CONTROL0__CRC_EN_MASK 0x00000001L
+#define AZALIA_CRC0_CONTROL0__CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_CRC0_CONTROL0__CRC_INSTANCE_SEL_MASK 0x00000700L
+#define AZALIA_CRC0_CONTROL0__CRC_SOURCE_SEL_MASK 0x00001000L
+#define AZALIA_CRC0_CONTROL1__CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL1__CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+#define AZALIA_CRC0_CONTROL2__CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL2__CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+#define AZALIA_CRC0_CONTROL3__CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL3__CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_CRC0_CONTROL3__CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_CRC0_CONTROL3__CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_CRC0_CONTROL3__CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_CRC0_CONTROL3__CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+#define AZALIA_CRC0_RESULT__CRC_RESULT__SHIFT 0x0
+#define AZALIA_CRC0_RESULT__CRC_RESULT_MASK 0xFFFFFFFFL
+#define AZALIA_CRC1_CONTROL0__CRC_EN__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL0__CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_CRC1_CONTROL0__CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_CRC1_CONTROL0__CRC_SOURCE_SEL__SHIFT 0xc
+#define AZALIA_CRC1_CONTROL0__CRC_EN_MASK 0x00000001L
+#define AZALIA_CRC1_CONTROL0__CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_CRC1_CONTROL0__CRC_INSTANCE_SEL_MASK 0x00000700L
+#define AZALIA_CRC1_CONTROL0__CRC_SOURCE_SEL_MASK 0x00001000L
+#define AZALIA_CRC1_CONTROL1__CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL1__CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+#define AZALIA_CRC1_CONTROL2__CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL2__CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+#define AZALIA_CRC1_CONTROL3__CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL3__CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_CRC1_CONTROL3__CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_CRC1_CONTROL3__CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_CRC1_CONTROL3__CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_CRC1_CONTROL3__CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+#define AZALIA_CRC1_RESULT__CRC_RESULT__SHIFT 0x0
+#define AZALIA_CRC1_RESULT__CRC_RESULT_MASK 0xFFFFFFFFL
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_FORCE__SHIFT 0x0
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_DIS__SHIFT 0x2
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_FORCE__SHIFT 0x3
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_DIS__SHIFT 0x5
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_FORCE__SHIFT 0x6
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_DIS__SHIFT 0x8
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_FORCE__SHIFT 0x9
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_DIS__SHIFT 0xb
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_FORCE__SHIFT 0xc
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_DIS__SHIFT 0xe
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_FORCE__SHIFT 0xf
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_DIS__SHIFT 0x11
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_FORCE__SHIFT 0x12
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_DIS__SHIFT 0x14
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_MODE_SEL__SHIFT 0x1c
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_FORCE_MASK 0x00000003L
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_DIS_MASK 0x00000004L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_FORCE_MASK 0x00000018L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_DIS_MASK 0x00000020L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_FORCE_MASK 0x000000C0L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_DIS_MASK 0x00000100L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_FORCE_MASK 0x00000600L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_DIS_MASK 0x00000800L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_FORCE_MASK 0x00003000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_DIS_MASK 0x00004000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_FORCE_MASK 0x00018000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_DIS_MASK 0x00020000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_FORCE_MASK 0x000C0000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_DIS_MASK 0x00100000L
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_MODE_SEL_MASK 0x30000000L
+#define AZALIA_MEM_PWR_STATUS__AZ_MEM_PWR_STATE__SHIFT 0x0
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM0_MEM_PWR_STATE__SHIFT 0x2
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM1_MEM_PWR_STATE__SHIFT 0x4
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM2_MEM_PWR_STATE__SHIFT 0x6
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM3_MEM_PWR_STATE__SHIFT 0x8
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM4_MEM_PWR_STATE__SHIFT 0xa
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM5_MEM_PWR_STATE__SHIFT 0xc
+#define AZALIA_MEM_PWR_STATUS__AZ_MEM_PWR_STATE_MASK 0x00000003L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM0_MEM_PWR_STATE_MASK 0x0000000CL
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM1_MEM_PWR_STATE_MASK 0x00000030L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM2_MEM_PWR_STATE_MASK 0x000000C0L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM3_MEM_PWR_STATE_MASK 0x00000300L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM4_MEM_PWR_STATE_MASK 0x00000C00L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM5_MEM_PWR_STATE_MASK 0x00003000L
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xFFFFFFFFL
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xFFFFFFFFL
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT__SHIFT 0x4
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT_MASK 0x00000007L
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT_MASK 0x00000070L
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW__SHIFT 0x0
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW_MASK 0x0000003FL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xFFFFFFFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xFFFFFFFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3FFFFFFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000FL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000F0L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000FFL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000FF00L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00FF0000L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xFF000000L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x000000FFL
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+#define AZALIA_F0_GTC_GROUP_OFFSET0__GTC_GROUP_OFFSET0__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET0__GTC_GROUP_OFFSET0_MASK 0xFFFFFFFFL
+#define AZALIA_F0_GTC_GROUP_OFFSET1__GTC_GROUP_OFFSET1__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET1__GTC_GROUP_OFFSET1_MASK 0xFFFFFFFFL
+#define AZALIA_F0_GTC_GROUP_OFFSET2__GTC_GROUP_OFFSET2__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET2__GTC_GROUP_OFFSET2_MASK 0xFFFFFFFFL
+#define AZALIA_F0_GTC_GROUP_OFFSET3__GTC_GROUP_OFFSET3__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET3__GTC_GROUP_OFFSET3_MASK 0xFFFFFFFFL
+#define AZALIA_F0_GTC_GROUP_OFFSET4__GTC_GROUP_OFFSET4__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET4__GTC_GROUP_OFFSET4_MASK 0xFFFFFFFFL
+#define AZALIA_F0_GTC_GROUP_OFFSET5__GTC_GROUP_OFFSET5__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET5__GTC_GROUP_OFFSET5_MASK 0xFFFFFFFFL
+#define AZALIA_F0_GTC_GROUP_OFFSET6__GTC_GROUP_OFFSET6__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET6__GTC_GROUP_OFFSET6_MASK 0xFFFFFFFFL
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY__SHIFT 0x0
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_MASK 0x00000007L
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_MASK 0x00000007L
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM8_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM9_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM10_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM11_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM12_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM13_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM14_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZF0STREAM15_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS__SHIFT 0x1
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS__SHIFT 0x3
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS__SHIFT 0x6
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR__SHIFT 0xa
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR__SHIFT 0xb
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR__SHIFT 0xc
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN__SHIFT 0xd
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN__SHIFT 0xe
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL__SHIFT 0xf
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY__SHIFT 0x19
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS_MASK 0x00000006L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS_MASK 0x00000038L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_MASK 0x000003C0L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_MASK 0x00000400L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR_MASK 0x00000800L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR_MASK 0x00001000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN_MASK 0x00002000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN_MASK 0x00004000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL_MASK 0x00008000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY_MASK 0x7E000000L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS__SHIFT 0x1
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR__SHIFT 0x2
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP__SHIFT 0x8
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_MASK 0x00000002L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP_MASK 0x00000100L
+#define DCHUBBUB_SDPIF_CFG2__dGPU_ADDR_PRESENT__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_UNIT_ID_BITMASK__SHIFT 0x10
+#define DCHUBBUB_SDPIF_CFG2__dGPU_ADDR_PRESENT_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_UNIT_ID_BITMASK_MASK 0x01FF0000L
+#define VM_REQUEST_PHYSICAL__PDE_REQUEST_PHYSICAL__SHIFT 0x0
+#define VM_REQUEST_PHYSICAL__PTE_REQUEST_PHYSICAL__SHIFT 0x3
+#define VM_REQUEST_PHYSICAL__PDE_REQUEST_PHYSICAL_MASK 0x00000001L
+#define VM_REQUEST_PHYSICAL__PTE_REQUEST_PHYSICAL_MASK 0x00000008L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS__SHIFT 0x0
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_STICKY__SHIFT 0x1
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_CLEAR__SHIFT 0x2
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_PIPE_ID__SHIFT 0x3
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_REQUEST_TYPE__SHIFT 0x7
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_ADDR_LO__SHIFT 0xa
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_MASK 0x00000001L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_STICKY_MASK 0x00000002L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_PIPE_ID_MASK 0x00000078L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_REQUEST_TYPE_MASK 0x00000380L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_ADDR_LO_MASK 0xFFFFFC00L
+#define DCHUBBUB_FORCE_IO_STATUS_1__SDPIF_FORCE_IO_STATUS_ADDR_HI__SHIFT 0x0
+#define DCHUBBUB_FORCE_IO_STATUS_1__SDPIF_FORCE_IO_STATUS_ADDR_HI_MASK 0x001FFFFFL
+#define DCN_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define DCN_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
+#define DCN_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
+#define DCN_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
+#define DCN_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
+#define DCN_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
+#define DCN_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
+#define DCN_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
+#define DCN_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
+#define DCN_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
+#define DCN_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
+#define DCN_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
+#define DCN_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_START__SHIFT 0x0
+#define DCN_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_START_MASK 0x000FFFFFL
+#define DCN_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_END__SHIFT 0x0
+#define DCN_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_END_MASK 0x000FFFFFL
+#define DCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
+#define DCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE__SHIFT 0x0
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS__SHIFT 0x2
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS_MASK 0x00000004L
+#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE_MASK 0x00000003L
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS__SHIFT 0x2
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE_MASK 0x00000003L
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS_MASK 0x00000004L
+#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE_MASK 0x00000003L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_EN__SHIFT 0x0
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_CONT_EN__SHIFT 0x1
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_ONE_SHOT_PENDING__SHIFT 0x2
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_ONE_SHOT_PENDING__SHIFT 0x3
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_SRC_SEL__SHIFT 0x4
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_SRC_SEL__SHIFT 0x6
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_PIPE_SEL__SHIFT 0x8
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_SURF_SEL__SHIFT 0xc
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_DATA_SRC_SEL__SHIFT 0x14
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_EN_MASK 0x00000001L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_CONT_EN_MASK 0x00000002L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_ONE_SHOT_PENDING_MASK 0x00000008L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_SRC_SEL_MASK 0x00000030L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_SRC_SEL_MASK 0x000000C0L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_PIPE_SEL_MASK 0x00000F00L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_SURF_SEL_MASK 0x00001000L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_DATA_SRC_SEL_MASK 0x00100000L
+#define DCHUBBUB_CRC0_VAL_R_G__DCHUBBUB_CRC0_R_CR__SHIFT 0x0
+#define DCHUBBUB_CRC0_VAL_R_G__DCHUBBUB_CRC0_G_Y__SHIFT 0x10
+#define DCHUBBUB_CRC0_VAL_R_G__DCHUBBUB_CRC0_R_CR_MASK 0x0000FFFFL
+#define DCHUBBUB_CRC0_VAL_R_G__DCHUBBUB_CRC0_G_Y_MASK 0xFFFF0000L
+#define DCHUBBUB_CRC0_VAL_B_A__DCHUBBUB_CRC0_B_CB__SHIFT 0x0
+#define DCHUBBUB_CRC0_VAL_B_A__DCHUBBUB_CRC0_ALPHA__SHIFT 0x10
+#define DCHUBBUB_CRC0_VAL_B_A__DCHUBBUB_CRC0_B_CB_MASK 0x0000FFFFL
+#define DCHUBBUB_CRC0_VAL_B_A__DCHUBBUB_CRC0_ALPHA_MASK 0xFFFF0000L
+#define DCHUBBUB_CRC1_VAL_R_G__DCHUBBUB_CRC1_R_CR__SHIFT 0x0
+#define DCHUBBUB_CRC1_VAL_R_G__DCHUBBUB_CRC1_G_Y__SHIFT 0x10
+#define DCHUBBUB_CRC1_VAL_R_G__DCHUBBUB_CRC1_R_CR_MASK 0x0000FFFFL
+#define DCHUBBUB_CRC1_VAL_R_G__DCHUBBUB_CRC1_G_Y_MASK 0xFFFF0000L
+#define DCHUBBUB_CRC1_VAL_B_A__DCHUBBUB_CRC1_B_CB__SHIFT 0x0
+#define DCHUBBUB_CRC1_VAL_B_A__DCHUBBUB_CRC1_ALPHA__SHIFT 0x10
+#define DCHUBBUB_CRC1_VAL_B_A__DCHUBBUB_CRC1_B_CB_MASK 0x0000FFFFL
+#define DCHUBBUB_CRC1_VAL_B_A__DCHUBBUB_CRC1_ALPHA_MASK 0xFFFF0000L
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_MODE__SHIFT 0x0
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_EN__SHIFT 0x1
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_DONE__SHIFT 0x2
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_PIPE_SEL__SHIFT 0x4
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_FRAME_CNT__SHIFT 0x10
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_MODE_MASK 0x00000001L
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_EN_MASK 0x00000002L
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_DONE_MASK 0x00000004L
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_PIPE_SEL_MASK 0x000000F0L
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_FRAME_CNT_MASK 0xFFFF0000L
+#define DCHUBBUB_DCC_STAT0__DCHUBBUB_DCC_STAT_TOTAL_REQ__SHIFT 0x0
+#define DCHUBBUB_DCC_STAT0__DCHUBBUB_DCC_STAT_TOTAL_REQ_MASK 0xFFFFFFFFL
+#define DCHUBBUB_DCC_STAT1__DCHUBBUB_DCC_STAT_ZS_REQ__SHIFT 0x0
+#define DCHUBBUB_DCC_STAT1__DCHUBBUB_DCC_STAT_ZS_REQ_MASK 0xFFFFFFFFL
+#define DCHUBBUB_DCC_STAT2__DCHUBBUB_DCC_STAT_DCC_REQ__SHIFT 0x0
+#define DCHUBBUB_DCC_STAT2__DCHUBBUB_DCC_STAT_DCC_REQ_MASK 0xFFFFFFFFL
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE__SHIFT 0x0
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_ENABLE__SHIFT 0x10
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_STATUS__SHIFT 0x12
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_CLEAR__SHIFT 0x13
+#define DCHUBBUB_COMPBUF_CTRL__CONFIG_ERROR__SHIFT 0x1f
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CURRENT_MASK 0x00001F00L
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_ENABLE_MASK 0x00010000L
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_STATUS_MASK 0x00040000L
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_CLEAR_MASK 0x00080000L
+#define DCHUBBUB_COMPBUF_CTRL__CONFIG_ERROR_MASK 0x80000000L
+#define DCHUBBUB_DET0_CTRL__DET0_SIZE__SHIFT 0x0
+#define DCHUBBUB_DET0_CTRL__DET0_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_DET0_CTRL__DET0_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_DET0_CTRL__DET0_SIZE_CURRENT_MASK 0x00001F00L
+#define DCHUBBUB_DET1_CTRL__DET1_SIZE__SHIFT 0x0
+#define DCHUBBUB_DET1_CTRL__DET1_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_DET1_CTRL__DET1_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_DET1_CTRL__DET1_SIZE_CURRENT_MASK 0x00001F00L
+#define DCHUBBUB_DET2_CTRL__DET2_SIZE__SHIFT 0x0
+#define DCHUBBUB_DET2_CTRL__DET2_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_DET2_CTRL__DET2_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_DET2_CTRL__DET2_SIZE_CURRENT_MASK 0x00001F00L
+#define DCHUBBUB_DET3_CTRL__DET3_SIZE__SHIFT 0x0
+#define DCHUBBUB_DET3_CTRL__DET3_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_DET3_CTRL__DET3_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_DET3_CTRL__DET3_SIZE_CURRENT_MASK 0x00001F00L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_ACCESS_MEM_PWR_MODE__SHIFT 0x0
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_ACTIVE_MEM_PWR_MODE__SHIFT 0x2
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_IDLE_MEM_PWR_MODE__SHIFT 0x4
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__METAFIFO_MEM_PWR_FORCE__SHIFT 0x6
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DCC_SKID_MEM_PWR_FORCE__SHIFT 0x8
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__UNALLOCATED_MEM_PWR_MODE__SHIFT 0xa
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x10
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_IDLE_MEM_PWR_MODE__SHIFT 0x12
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x14
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__SEGMENT_MEM_PWR_DIS__SHIFT 0x18
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__METAFIFO_MEM_PWR_DIS__SHIFT 0x19
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DCC_SKID_MEM_PWR_DIS__SHIFT 0x1a
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_ACCESS_MEM_PWR_MODE_MASK 0x00000003L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_ACTIVE_MEM_PWR_MODE_MASK 0x0000000CL
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_IDLE_MEM_PWR_MODE_MASK 0x00000030L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__METAFIFO_MEM_PWR_FORCE_MASK 0x000000C0L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DCC_SKID_MEM_PWR_FORCE_MASK 0x00000300L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__UNALLOCATED_MEM_PWR_MODE_MASK 0x00000C00L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_MEM_PWR_FORCE_MASK 0x00030000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_IDLE_MEM_PWR_MODE_MASK 0x000C0000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00300000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__SEGMENT_MEM_PWR_DIS_MASK 0x01000000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__METAFIFO_MEM_PWR_DIS_MASK 0x02000000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DCC_SKID_MEM_PWR_DIS_MASK 0x04000000L
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_ACTIVE_WAKE_LATENCY__SHIFT 0x0
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_ACTIVE_SLEEP_LATENCY__SHIFT 0x8
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_IDLE_WAKE_LATENCY__SHIFT 0x10
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_IDLE_SLEEP_LATENCY__SHIFT 0x18
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_ACTIVE_WAKE_LATENCY_MASK 0x000000FFL
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_ACTIVE_SLEEP_LATENCY_MASK 0x0000FF00L
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_IDLE_WAKE_LATENCY_MASK 0x00FF0000L
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_IDLE_SLEEP_LATENCY_MASK 0xFF000000L
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_UNALLOCATED_WAKE_LATENCY__SHIFT 0x0
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_UNALLOCATED_WAKE_LATENCY_MASK 0x000000FFL
+#define DCHUBBUB_MEM_PWR_STATUS__COMPBUF_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_MEM_PWR_STATUS__METAFIFO_MEM_PWR_STATE__SHIFT 0x2
+#define DCHUBBUB_MEM_PWR_STATUS__UNALLOCATED_MEM_PWR_STATE__SHIFT 0x4
+#define DCHUBBUB_MEM_PWR_STATUS__DCC_SKID_MEM_PWR_STATE__SHIFT 0x6
+#define DCHUBBUB_MEM_PWR_STATUS__DET0_MEM_PWR_STATE__SHIFT 0x8
+#define DCHUBBUB_MEM_PWR_STATUS__DET1_MEM_PWR_STATE__SHIFT 0xa
+#define DCHUBBUB_MEM_PWR_STATUS__DET2_MEM_PWR_STATE__SHIFT 0xc
+#define DCHUBBUB_MEM_PWR_STATUS__DET3_MEM_PWR_STATE__SHIFT 0xe
+#define DCHUBBUB_MEM_PWR_STATUS__COMPBUF_MEM_PWR_STATE_MASK 0x00000003L
+#define DCHUBBUB_MEM_PWR_STATUS__METAFIFO_MEM_PWR_STATE_MASK 0x0000000CL
+#define DCHUBBUB_MEM_PWR_STATUS__UNALLOCATED_MEM_PWR_STATE_MASK 0x00000030L
+#define DCHUBBUB_MEM_PWR_STATUS__DCC_SKID_MEM_PWR_STATE_MASK 0x000000C0L
+#define DCHUBBUB_MEM_PWR_STATUS__DET0_MEM_PWR_STATE_MASK 0x00000300L
+#define DCHUBBUB_MEM_PWR_STATUS__DET1_MEM_PWR_STATE_MASK 0x00000C00L
+#define DCHUBBUB_MEM_PWR_STATUS__DET2_MEM_PWR_STATE_MASK 0x00003000L
+#define DCHUBBUB_MEM_PWR_STATUS__DET3_MEM_PWR_STATE_MASK 0x0000C000L
+#define COMPBUF_RESERVED_SPACE__COMPBUF_RESERVED_SPACE_64B__SHIFT 0x0
+#define COMPBUF_RESERVED_SPACE__COMPBUF_RESERVED_SPACE_ZS__SHIFT 0x10
+#define COMPBUF_RESERVED_SPACE__COMPBUF_RESERVED_SPACE_64B_MASK 0x00000FFFL
+#define COMPBUF_RESERVED_SPACE__COMPBUF_RESERVED_SPACE_ZS_MASK 0x0FFF0000L
+#define DCHUBBUB_DEBUG_CTRL_0__METAFIFO_DEPTH__SHIFT 0x0
+#define DCHUBBUB_DEBUG_CTRL_0__COMPBUF_SEG_DEPTH__SHIFT 0x8
+#define DCHUBBUB_DEBUG_CTRL_0__DET_SEG_DEPTH__SHIFT 0xc
+#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10
+#define DCHUBBUB_DEBUG_CTRL_0__DELAY_COMPBUF_DEALLOC_ON_DRQ_STOP_DISABLE__SHIFT 0x1b
+#define DCHUBBUB_DEBUG_CTRL_0__SEG_ALLOC_ERR_PIPE_BLANK_ENABLE__SHIFT 0x1c
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_RESET_OPTIMIZATION_DISABLE__SHIFT 0x1d
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_STALL_FOR_ALLOC_ENABLE__SHIFT 0x1e
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_STALL_FOR_DEALLOC_ENABLE__SHIFT 0x1f
+#define DCHUBBUB_DEBUG_CTRL_0__METAFIFO_DEPTH_MASK 0x000000FFL
+#define DCHUBBUB_DEBUG_CTRL_0__COMPBUF_SEG_DEPTH_MASK 0x00000F00L
+#define DCHUBBUB_DEBUG_CTRL_0__DET_SEG_DEPTH_MASK 0x0000F000L
+#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x07FF0000L
+#define DCHUBBUB_DEBUG_CTRL_0__DELAY_COMPBUF_DEALLOC_ON_DRQ_STOP_DISABLE_MASK 0x08000000L
+#define DCHUBBUB_DEBUG_CTRL_0__SEG_ALLOC_ERR_PIPE_BLANK_ENABLE_MASK 0x10000000L
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_RESET_OPTIMIZATION_DISABLE_MASK 0x20000000L
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_STALL_FOR_ALLOC_ENABLE_MASK 0x40000000L
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_STALL_FOR_DEALLOC_ENABLE_MASK 0x80000000L
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND__SHIFT 0x0
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND__SHIFT 0xa
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD__SHIFT 0x16
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND_MASK 0x000001FFL
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_MASK 0x0007FC00L
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD_MASK 0x7FC00000L
+#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL__SHIFT 0x0
+#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL_MASK 0xFFFFFFFFL
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE__SHIFT 0x0
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE__SHIFT 0x8
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_HOSTVM_STALL_QOS__SHIFT 0xc
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE_MASK 0x0000000FL
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE_MASK 0x00000100L
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_HOSTVM_STALL_QOS_MASK 0x0000F000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE__SHIFT 0x0
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE__SHIFT 0x1
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE__SHIFT 0x4
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE__SHIFT 0x5
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE__SHIFT 0xc
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_VALUE__SHIFT 0x10
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_ENABLE__SHIFT 0x11
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_LEGACY__SHIFT 0x12
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DCFCLK_DEEP_SLEEP_HYSTERESIS__SHIFT 0x18
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE_MASK 0x00000001L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE_MASK 0x00000002L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE_MASK 0x00000010L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE_MASK 0x00000020L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE_MASK 0x00001000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_VALUE_MASK 0x00010000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_ENABLE_MASK 0x00020000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_LEGACY_MASK 0x00040000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DCFCLK_DEEP_SLEEP_HYSTERESIS_MASK 0xFF000000L
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A_MASK 0x000FFFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A_MASK 0x000FFFFFL
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_A__DCHUBBUB_ARB_FRAC_URG_BW_NOM_A__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_A__DCHUBBUB_ARB_FRAC_URG_BW_NOM_A_MASK 0x000003FFL
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A_MASK 0x000003FFL
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B_MASK 0x000FFFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B_MASK 0x000FFFFFL
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_B__DCHUBBUB_ARB_FRAC_URG_BW_NOM_B__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_B__DCHUBBUB_ARB_FRAC_URG_BW_NOM_B_MASK 0x000003FFL
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B_MASK 0x000003FFL
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C_MASK 0x000FFFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C_MASK 0x000FFFFFL
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_C__DCHUBBUB_ARB_FRAC_URG_BW_NOM_C__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_C__DCHUBBUB_ARB_FRAC_URG_BW_NOM_C_MASK 0x000003FFL
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C_MASK 0x000003FFL
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D_MASK 0x000FFFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D_MASK 0x000FFFFFL
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_D__DCHUBBUB_ARB_FRAC_URG_BW_NOM_D__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_D__DCHUBBUB_ARB_FRAC_URG_BW_NOM_D_MASK 0x000003FFL
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D_MASK 0x000003FFL
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_CSTATE__SHIFT 0x0
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_PSTATE__SHIFT 0x1
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SLACK_MASK__SHIFT 0x3
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SPACE_OK_STATUS__SHIFT 0x4
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_GID_FREE_STATUS__SHIFT 0x5
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHVM_RET_FIFO_FREE_STATUS__SHIFT 0x6
+#define DCHUBBUB_ARB_HOSTVM_CNTL__NON_PRQ_CLIENT_WINNER_STATUS__SHIFT 0x7
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_ALLOCATED_GROUPS__SHIFT 0x8
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_RD_FIFO_ENTRIES__SHIFT 0x10
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_QOS__SHIFT 0x18
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD__SHIFT 0x1c
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_CSTATE_MASK 0x00000001L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_PSTATE_MASK 0x00000002L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SLACK_MASK_MASK 0x00000008L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SPACE_OK_STATUS_MASK 0x00000010L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_GID_FREE_STATUS_MASK 0x00000020L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHVM_RET_FIFO_FREE_STATUS_MASK 0x00000040L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__NON_PRQ_CLIENT_WINNER_STATUS_MASK 0x00000080L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_ALLOCATED_GROUPS_MASK 0x00003F00L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_RD_FIFO_ENTRIES_MASK 0x00FF0000L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_QOS_MASK 0x0F000000L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD_MASK 0xF0000000L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT__SHIFT 0x0
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE__SHIFT 0x4
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS__SHIFT 0x5
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST__SHIFT 0x8
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT_Z8__SHIFT 0x10
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT_MASK 0x00000003L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE_MASK 0x00000010L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS_MASK 0x00000020L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST_MASK 0x00000100L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT_Z8_MASK 0x00010000L
+#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE__SHIFT 0x0
+#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE_MASK 0x00000001L
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV__SHIFT 0x0
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE__SHIFT 0xc
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT__SHIFT 0x10
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV_MASK 0x0000000FL
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE_MASK 0x00001000L
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT_MASK 0xFFFF0000L
+#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB_MASK 0xFFFFFFFFL
+#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE_MASK 0x80000000L
+#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB_MASK 0xFFFFFFFFL
+#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE_MASK 0x80000000L
+#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB_MASK 0xFFFFFFFFL
+#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE_MASK 0x80000000L
+#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB_MASK 0xFFFFFFFFL
+#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE_MASK 0x80000000L
+#define VTG0_CONTROL__VTG0_FP2__SHIFT 0x0
+#define VTG0_CONTROL__VTG0_VCOUNT_INIT__SHIFT 0x10
+#define VTG0_CONTROL__VTG0_ENABLE__SHIFT 0x1f
+#define VTG0_CONTROL__VTG0_FP2_MASK 0x00007FFFL
+#define VTG0_CONTROL__VTG0_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG0_CONTROL__VTG0_ENABLE_MASK 0x80000000L
+#define VTG1_CONTROL__VTG1_FP2__SHIFT 0x0
+#define VTG1_CONTROL__VTG1_VCOUNT_INIT__SHIFT 0x10
+#define VTG1_CONTROL__VTG1_ENABLE__SHIFT 0x1f
+#define VTG1_CONTROL__VTG1_FP2_MASK 0x00007FFFL
+#define VTG1_CONTROL__VTG1_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG1_CONTROL__VTG1_ENABLE_MASK 0x80000000L
+#define VTG2_CONTROL__VTG2_FP2__SHIFT 0x0
+#define VTG2_CONTROL__VTG2_VCOUNT_INIT__SHIFT 0x10
+#define VTG2_CONTROL__VTG2_ENABLE__SHIFT 0x1f
+#define VTG2_CONTROL__VTG2_FP2_MASK 0x00007FFFL
+#define VTG2_CONTROL__VTG2_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG2_CONTROL__VTG2_ENABLE_MASK 0x80000000L
+#define VTG3_CONTROL__VTG3_FP2__SHIFT 0x0
+#define VTG3_CONTROL__VTG3_VCOUNT_INIT__SHIFT 0x10
+#define VTG3_CONTROL__VTG3_ENABLE__SHIFT 0x1f
+#define VTG3_CONTROL__VTG3_FP2_MASK 0x00007FFFL
+#define VTG3_CONTROL__VTG3_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG3_CONTROL__VTG3_ENABLE_MASK 0x80000000L
+#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET__SHIFT 0x0
+#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET__SHIFT 0x1
+#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET__SHIFT 0x4
+#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET_MASK 0x00000001L
+#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET_MASK 0x00000002L
+#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET_MASK 0x00000010L
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_TEST_CLK_SEL__SHIFT 0x0
+#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x5
+#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x6
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_FGCG_REP_DIS__SHIFT 0x7
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_TEST_CLK_SEL_MASK 0x0000001FL
+#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000020L
+#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000040L
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_FGCG_REP_DIS_MASK 0x00000080L
+#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DCFCLK_CNTL__DCFCLK_GATE_DIS__SHIFT 0x1f
+#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define DCFCLK_CNTL__DCFCLK_GATE_DIS_MASK 0x80000000L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_CNT_EN__SHIFT 0x0
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_EVENT_SHORT_PULSE_FILTER_EN__SHIFT 0x1
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_DF_REQ_CMD_LATENCY_SEL__SHIFT 0x2
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_PIPE_SEL__SHIFT 0x3
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_REQ_TYPE_SEL__SHIFT 0x7
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DF_LATENCY_URGENT_ONLY__SHIFT 0xa
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ROB_FIFO_LEVEL__SHIFT 0xb
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_CNT_EN_MASK 0x00000001L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_EVENT_SHORT_PULSE_FILTER_EN_MASK 0x00000002L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_DF_REQ_CMD_LATENCY_SEL_MASK 0x00000004L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_PIPE_SEL_MASK 0x00000078L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_REQ_TYPE_SEL_MASK 0x00000380L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DF_LATENCY_URGENT_ONLY_MASK 0x00000400L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ROB_FIFO_LEVEL_MASK 0x003FF800L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_EN__SHIFT 0x0
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_SRC_SEL__SHIFT 0x1
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_DUR__SHIFT 0x4
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__LATENCY_SOURCE_SEL__SHIFT 0xc
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL__SHIFT 0x14
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL_RESET__SHIFT 0x1f
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_EN_MASK 0x00000001L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_SRC_SEL_MASK 0x0000000EL
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_DUR_MASK 0x00000FF0L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__LATENCY_SOURCE_SEL_MASK 0x00007000L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL_MASK 0x7FF00000L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL_RESET_MASK 0x80000000L
+#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT__SHIFT 0x0
+#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT_MASK 0x00000001L
+#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN__SHIFT 0x0
+#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS__SHIFT 0x2
+#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR__SHIFT 0x3
+#define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE__SHIFT 0x1f
+#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN_MASK 0x00000001L
+#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS_MASK 0x00000004L
+#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR_MASK 0x00000008L
+#define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE_MASK 0x80000000L
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD__SHIFT 0x6
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS_MASK 0x0000003FL
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD_MASK 0xFFFFFFC0L
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN__SHIFT 0x1b
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET__SHIFT 0x1c
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD_MASK 0x07FFFFFFL
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN_MASK 0x08000000L
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET_MASK 0x10000000L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS__SHIFT 0x1
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR__SHIFT 0x2
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK__SHIFT 0x3
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE_MASK 0x00000001L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS_MASK 0x00000002L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK_MASK 0x000000F8L
+#define FMON_CTRL__FMON_START__SHIFT 0x0
+#define FMON_CTRL__FMON_MODE__SHIFT 0x1
+#define FMON_CTRL__FMON_PSTATE_IGNORE__SHIFT 0x4
+#define FMON_CTRL__FMON_STATUS_IGNORE__SHIFT 0x5
+#define FMON_CTRL__FMON_URG_MODE_GREATER__SHIFT 0x6
+#define FMON_CTRL__FMON_FILTER_UID_EN__SHIFT 0x7
+#define FMON_CTRL__FMON_STATE__SHIFT 0x9
+#define FMON_CTRL__FMON_URG_FILTER__SHIFT 0xc
+#define FMON_CTRL__FMON_URG_THRESHOLD__SHIFT 0xd
+#define FMON_CTRL__FMON_FILTER_UID_1__SHIFT 0x11
+#define FMON_CTRL__FMON_FILTER_UID_2__SHIFT 0x16
+#define FMON_CTRL__FMON_SOF_SEL__SHIFT 0x1b
+#define FMON_CTRL__FMON_START_MASK 0x00000001L
+#define FMON_CTRL__FMON_MODE_MASK 0x00000006L
+#define FMON_CTRL__FMON_PSTATE_IGNORE_MASK 0x00000010L
+#define FMON_CTRL__FMON_STATUS_IGNORE_MASK 0x00000020L
+#define FMON_CTRL__FMON_URG_MODE_GREATER_MASK 0x00000040L
+#define FMON_CTRL__FMON_FILTER_UID_EN_MASK 0x00000180L
+#define FMON_CTRL__FMON_STATE_MASK 0x00000600L
+#define FMON_CTRL__FMON_URG_FILTER_MASK 0x00001000L
+#define FMON_CTRL__FMON_URG_THRESHOLD_MASK 0x0001E000L
+#define FMON_CTRL__FMON_FILTER_UID_1_MASK 0x003E0000L
+#define FMON_CTRL__FMON_FILTER_UID_2_MASK 0x07C00000L
+#define FMON_CTRL__FMON_SOF_SEL_MASK 0x38000000L
+#define DCHUBBUB_TEST_DEBUG_INDEX__DCHUBBUB_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCHUBBUB_TEST_DEBUG_INDEX__DCHUBBUB_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define DCHUBBUB_TEST_DEBUG_DATA__DCHUBBUB_TEST_DEBUG_DATA__SHIFT 0x0
+#define DCHUBBUB_TEST_DEBUG_DATA__DCHUBBUB_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON5_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON5_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON5_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON5_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON5_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_ADDR_MSB__SHIFT 0x0
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SPA__SHIFT 0x1c
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SNOOP__SHIFT 0x1d
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_ADDR_MSB_MASK 0x0000000FL
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SPA_MASK 0x10000000L
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SNOOP_MASK 0x20000000L
+#define DCN_VM_DEFAULT_ADDR_LSB__DCN_VM_DEFAULT_ADDR_LSB__SHIFT 0x0
+#define DCN_VM_DEFAULT_ADDR_LSB__DCN_VM_DEFAULT_ADDR_LSB_MASK 0xFFFFFFFFL
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_CLEAR__SHIFT 0x0
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_MODE__SHIFT 0x1
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_INTERRUPT_ENABLE__SHIFT 0x2
+#define DCN_VM_FAULT_CNTL__DCN_VM_RANGE_FAULT_DISABLE__SHIFT 0x8
+#define DCN_VM_FAULT_CNTL__DCN_VM_PRQ_FAULT_DISABLE__SHIFT 0x9
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_CLEAR_MASK 0x00000001L
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_MODE_MASK 0x00000002L
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_INTERRUPT_ENABLE_MASK 0x00000004L
+#define DCN_VM_FAULT_CNTL__DCN_VM_RANGE_FAULT_DISABLE_MASK 0x00000100L
+#define DCN_VM_FAULT_CNTL__DCN_VM_PRQ_FAULT_DISABLE_MASK 0x00000200L
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_STATUS__SHIFT 0x0
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_VMID__SHIFT 0x10
+#define DCN_VM_FAULT_STATUS__DCN_VM_TR_RESP_ERROR_VMID__SHIFT 0x14
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_TABLE_LEVEL__SHIFT 0x18
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_PIPE__SHIFT 0x1a
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_INTERRUPT_STATUS__SHIFT 0x1f
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_STATUS_MASK 0x0000FFFFL
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_VMID_MASK 0x000F0000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_TR_RESP_ERROR_VMID_MASK 0x00F00000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_TABLE_LEVEL_MASK 0x03000000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_PIPE_MASK 0x3C000000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_INTERRUPT_STATUS_MASK 0x80000000L
+#define DCN_VM_FAULT_ADDR_MSB__DCN_VM_FAULT_ADDR_MSB__SHIFT 0x0
+#define DCN_VM_FAULT_ADDR_MSB__DCN_VM_FAULT_ADDR_MSB_MASK 0x0000000FL
+#define DCN_VM_FAULT_ADDR_LSB__DCN_VM_FAULT_ADDR_LSB__SHIFT 0x0
+#define DCN_VM_FAULT_ADDR_LSB__DCN_VM_FAULT_ADDR_LSB_MASK 0xFFFFFFFFL
+#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP0_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN__SHIFT 0xb
+#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+#define HUBP0_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN_MASK 0x00000800L
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PKRS__SHIFT 0x10
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PKRS_MASK 0x00070000L
+#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP0_DCHUBP_CNTL__HUBP_SOFT_RESET__SHIFT 0x2
+#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP0_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE__SHIFT 0xa
+#define HUBP0_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS__SHIFT 0xb
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP0_DCHUBP_CNTL__HUBP_SOFT_RESET_MASK 0x00000004L
+#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE_MASK 0x00000400L
+#define HUBP0_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS_MASK 0x00000800L
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP0_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+#define HUBP0_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP0_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+#define HUBPREQ0_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ0_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK__SHIFT 0x2
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C__SHIFT 0x5
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK__SHIFT 0xa
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C__SHIFT 0xd
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_MASK 0x0000000CL
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C_MASK 0x00000060L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_MASK 0x00000C00L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C_MASK 0x00006000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID_MASK 0xF0000000L
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C_MASK 0xF0000000L
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID_MASK 0xF0000000L
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C_MASK 0xF0000000L
+#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT__SHIFT 0x18
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT__SHIFT 0x19
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE__SHIFT 0x1b
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT_MASK 0x01000000L
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT_MASK 0x02000000L
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE_MASK 0x08000000L
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA__SHIFT 0x0
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS__SHIFT 0x10
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR__SHIFT 0x14
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS__SHIFT 0x18
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS__SHIFT 0x19
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR__SHIFT 0x1a
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE__SHIFT 0x1f
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA_MASK 0x0000FFFFL
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_MASK 0x000F0000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR_MASK 0x00100000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_MASK 0x01000000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS_MASK 0x02000000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR_MASK 0x04000000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE_MASK 0x80000000L
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+#define HUBPREQ0_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+#define HUBPREQ0_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+#define HUBPREQ0_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+#define HUBPREQ0_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+#define HUBPREQ0_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+#define HUBPREQ0_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+#define HUBPREQ0_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+#define HUBPREQ0_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+#define HUBPREQ0_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+#define HUBPREQ0_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+#define HUBPREQ0_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x4
+#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xf
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00007FF0L
+#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00008000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_REQ_MODE__SHIFT 0x2
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_REQ_MODE_MASK 0x00000004L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON6_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON6_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON6_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON6_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON6_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP1_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN__SHIFT 0xb
+#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+#define HUBP1_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN_MASK 0x00000800L
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PKRS__SHIFT 0x10
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PKRS_MASK 0x00070000L
+#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP1_DCHUBP_CNTL__HUBP_SOFT_RESET__SHIFT 0x2
+#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP1_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE__SHIFT 0xa
+#define HUBP1_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS__SHIFT 0xb
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP1_DCHUBP_CNTL__HUBP_SOFT_RESET_MASK 0x00000004L
+#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE_MASK 0x00000400L
+#define HUBP1_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS_MASK 0x00000800L
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP1_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+#define HUBP1_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP1_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+#define HUBPREQ1_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ1_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK__SHIFT 0x2
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C__SHIFT 0x5
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK__SHIFT 0xa
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C__SHIFT 0xd
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_MASK 0x0000000CL
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C_MASK 0x00000060L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_MASK 0x00000C00L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C_MASK 0x00006000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID_MASK 0xF0000000L
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C_MASK 0xF0000000L
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID_MASK 0xF0000000L
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C_MASK 0xF0000000L
+#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT__SHIFT 0x18
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT__SHIFT 0x19
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE__SHIFT 0x1b
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT_MASK 0x01000000L
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT_MASK 0x02000000L
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE_MASK 0x08000000L
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA__SHIFT 0x0
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS__SHIFT 0x10
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR__SHIFT 0x14
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS__SHIFT 0x18
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS__SHIFT 0x19
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR__SHIFT 0x1a
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE__SHIFT 0x1f
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA_MASK 0x0000FFFFL
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_MASK 0x000F0000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR_MASK 0x00100000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_MASK 0x01000000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS_MASK 0x02000000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR_MASK 0x04000000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE_MASK 0x80000000L
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+#define HUBPREQ1_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+#define HUBPREQ1_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+#define HUBPREQ1_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+#define HUBPREQ1_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+#define HUBPREQ1_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+#define HUBPREQ1_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+#define HUBPREQ1_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+#define HUBPREQ1_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+#define HUBPREQ1_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+#define HUBPREQ1_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+#define HUBPREQ1_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x4
+#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xf
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00007FF0L
+#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00008000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_REQ_MODE__SHIFT 0x2
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_REQ_MODE_MASK 0x00000004L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON7_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON7_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON7_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON7_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON7_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP2_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN__SHIFT 0xb
+#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+#define HUBP2_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN_MASK 0x00000800L
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PKRS__SHIFT 0x10
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PKRS_MASK 0x00070000L
+#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP2_DCHUBP_CNTL__HUBP_SOFT_RESET__SHIFT 0x2
+#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP2_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE__SHIFT 0xa
+#define HUBP2_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS__SHIFT 0xb
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP2_DCHUBP_CNTL__HUBP_SOFT_RESET_MASK 0x00000004L
+#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE_MASK 0x00000400L
+#define HUBP2_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS_MASK 0x00000800L
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP2_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+#define HUBP2_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP2_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+#define HUBPREQ2_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ2_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK__SHIFT 0x2
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C__SHIFT 0x5
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK__SHIFT 0xa
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C__SHIFT 0xd
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_MASK 0x0000000CL
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C_MASK 0x00000060L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_MASK 0x00000C00L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C_MASK 0x00006000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID_MASK 0xF0000000L
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C_MASK 0xF0000000L
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID_MASK 0xF0000000L
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C_MASK 0xF0000000L
+#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT__SHIFT 0x18
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT__SHIFT 0x19
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE__SHIFT 0x1b
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT_MASK 0x01000000L
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT_MASK 0x02000000L
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE_MASK 0x08000000L
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA__SHIFT 0x0
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS__SHIFT 0x10
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR__SHIFT 0x14
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS__SHIFT 0x18
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS__SHIFT 0x19
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR__SHIFT 0x1a
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE__SHIFT 0x1f
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA_MASK 0x0000FFFFL
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_MASK 0x000F0000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR_MASK 0x00100000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_MASK 0x01000000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS_MASK 0x02000000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR_MASK 0x04000000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE_MASK 0x80000000L
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+#define HUBPREQ2_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+#define HUBPREQ2_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+#define HUBPREQ2_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+#define HUBPREQ2_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+#define HUBPREQ2_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+#define HUBPREQ2_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+#define HUBPREQ2_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+#define HUBPREQ2_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+#define HUBPREQ2_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+#define HUBPREQ2_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+#define HUBPREQ2_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x4
+#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xf
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00007FF0L
+#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00008000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_REQ_MODE__SHIFT 0x2
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_REQ_MODE_MASK 0x00000004L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON8_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON8_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON8_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON8_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON8_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP3_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN__SHIFT 0xb
+#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+#define HUBP3_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN_MASK 0x00000800L
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PKRS__SHIFT 0x10
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PKRS_MASK 0x00070000L
+#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP3_DCHUBP_CNTL__HUBP_SOFT_RESET__SHIFT 0x2
+#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP3_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE__SHIFT 0xa
+#define HUBP3_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS__SHIFT 0xb
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP3_DCHUBP_CNTL__HUBP_SOFT_RESET_MASK 0x00000004L
+#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE_MASK 0x00000400L
+#define HUBP3_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS_MASK 0x00000800L
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP3_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+#define HUBP3_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP3_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+#define HUBPREQ3_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ3_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK__SHIFT 0x2
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C__SHIFT 0x5
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK__SHIFT 0xa
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C__SHIFT 0xd
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_MASK 0x0000000CL
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C_MASK 0x00000060L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_MASK 0x00000C00L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C_MASK 0x00006000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID_MASK 0xF0000000L
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C_MASK 0xF0000000L
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID_MASK 0xF0000000L
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C_MASK 0xF0000000L
+#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT__SHIFT 0x18
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT__SHIFT 0x19
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE__SHIFT 0x1b
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT_MASK 0x01000000L
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT_MASK 0x02000000L
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE_MASK 0x08000000L
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA__SHIFT 0x0
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS__SHIFT 0x10
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR__SHIFT 0x14
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS__SHIFT 0x18
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS__SHIFT 0x19
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR__SHIFT 0x1a
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE__SHIFT 0x1f
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA_MASK 0x0000FFFFL
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_MASK 0x000F0000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR_MASK 0x00100000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_MASK 0x01000000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS_MASK 0x02000000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR_MASK 0x04000000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE_MASK 0x80000000L
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+#define HUBPREQ3_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+#define HUBPREQ3_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+#define HUBPREQ3_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+#define HUBPREQ3_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+#define HUBPREQ3_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+#define HUBPREQ3_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+#define HUBPREQ3_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+#define HUBPREQ3_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+#define HUBPREQ3_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+#define HUBPREQ3_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+#define HUBPREQ3_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x4
+#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xf
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00007FF0L
+#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00008000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_REQ_MODE__SHIFT 0x2
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_REQ_MODE_MASK 0x00000004L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON9_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON9_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON9_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON9_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON9_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP0_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP0_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0x70000000L
+#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x6
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x7
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0x9
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xb
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xe
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000040L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000180L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000600L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00003800L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x0000C000L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE__SHIFT 0x8
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE_MASK 0x00000100L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_R__SHIFT 0x18
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_G__SHIFT 0x1a
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_B__SHIFT 0x1c
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_R_MASK 0x03000000L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_G_MASK 0x0C000000L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_B_MASK 0x30000000L
+#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+#define CNVC_CFG0_PRE_DEALPHA__PRE_DEALPHA_EN__SHIFT 0x0
+#define CNVC_CFG0_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG0_PRE_DEALPHA__PRE_DEALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG0_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN_MASK 0x00000010L
+#define CNVC_CFG0_PRE_CSC_MODE__PRE_CSC_MODE__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT__SHIFT 0x2
+#define CNVC_CFG0_PRE_CSC_MODE__PRE_CSC_MODE_MASK 0x00000003L
+#define CNVC_CFG0_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT_MASK 0x0000000CL
+#define CNVC_CFG0_PRE_CSC_C11_C12__PRE_CSC_C11__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C11_C12__PRE_CSC_C12__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C11_C12__PRE_CSC_C11_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C11_C12__PRE_CSC_C12_MASK 0xFFFF0000L
+#define CNVC_CFG0_PRE_CSC_C13_C14__PRE_CSC_C13__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C13_C14__PRE_CSC_C14__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C13_C14__PRE_CSC_C13_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C13_C14__PRE_CSC_C14_MASK 0xFFFF0000L
+#define CNVC_CFG0_PRE_CSC_C21_C22__PRE_CSC_C21__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C21_C22__PRE_CSC_C22__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C21_C22__PRE_CSC_C21_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C21_C22__PRE_CSC_C22_MASK 0xFFFF0000L
+#define CNVC_CFG0_PRE_CSC_C23_C24__PRE_CSC_C23__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C23_C24__PRE_CSC_C24__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C23_C24__PRE_CSC_C23_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C23_C24__PRE_CSC_C24_MASK 0xFFFF0000L
+#define CNVC_CFG0_PRE_CSC_C31_C32__PRE_CSC_C31__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C31_C32__PRE_CSC_C32__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C31_C32__PRE_CSC_C31_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C31_C32__PRE_CSC_C32_MASK 0xFFFF0000L
+#define CNVC_CFG0_PRE_CSC_C33_C34__PRE_CSC_C33__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C33_C34__PRE_CSC_C34__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C33_C34__PRE_CSC_C33_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C33_C34__PRE_CSC_C34_MASK 0xFFFF0000L
+#define CNVC_CFG0_PRE_CSC_B_C11_C12__PRE_CSC_B_C11__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C11_C12__PRE_CSC_B_C12__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C11_C12__PRE_CSC_B_C11_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C11_C12__PRE_CSC_B_C12_MASK 0xFFFF0000L
+#define CNVC_CFG0_PRE_CSC_B_C13_C14__PRE_CSC_B_C13__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C13_C14__PRE_CSC_B_C14__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C13_C14__PRE_CSC_B_C13_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C13_C14__PRE_CSC_B_C14_MASK 0xFFFF0000L
+#define CNVC_CFG0_PRE_CSC_B_C21_C22__PRE_CSC_B_C21__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C21_C22__PRE_CSC_B_C22__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C21_C22__PRE_CSC_B_C21_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C21_C22__PRE_CSC_B_C22_MASK 0xFFFF0000L
+#define CNVC_CFG0_PRE_CSC_B_C23_C24__PRE_CSC_B_C23__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C23_C24__PRE_CSC_B_C24__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C23_C24__PRE_CSC_B_C23_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C23_C24__PRE_CSC_B_C24_MASK 0xFFFF0000L
+#define CNVC_CFG0_PRE_CSC_B_C31_C32__PRE_CSC_B_C31__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C31_C32__PRE_CSC_B_C32__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C31_C32__PRE_CSC_B_C31_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C31_C32__PRE_CSC_B_C32_MASK 0xFFFF0000L
+#define CNVC_CFG0_PRE_CSC_B_C33_C34__PRE_CSC_B_C33__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C33_C34__PRE_CSC_B_C34__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C33_C34__PRE_CSC_B_C33_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C33_C34__PRE_CSC_B_C34_MASK 0xFFFF0000L
+#define CNVC_CFG0_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT__SHIFT 0x0
+#define CNVC_CFG0_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT_MASK 0x00000001L
+#define CNVC_CFG0_PRE_DEGAM__PRE_DEGAM_MODE__SHIFT 0x0
+#define CNVC_CFG0_PRE_DEGAM__PRE_DEGAM_SELECT__SHIFT 0x4
+#define CNVC_CFG0_PRE_DEGAM__PRE_DEGAM_MODE_MASK 0x00000003L
+#define CNVC_CFG0_PRE_DEGAM__PRE_DEGAM_SELECT_MASK 0x00000070L
+#define CNVC_CFG0_PRE_REALPHA__PRE_REALPHA_EN__SHIFT 0x0
+#define CNVC_CFG0_PRE_REALPHA__PRE_REALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG0_PRE_REALPHA__PRE_REALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG0_PRE_REALPHA__PRE_REALPHA_ABLND_EN_MASK 0x00000010L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00030000L
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+#define DSCL0_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL0_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+#define DSCL0_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y__SHIFT 0x0
+#define DSCL0_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR__SHIFT 0x10
+#define DSCL0_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y_MASK 0x0000FFFFL
+#define DSCL0_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR_MASK 0xFFFF0000L
+#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+#define DSCL0_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL0_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL0_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL0_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+#define DSCL0_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL0_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL0_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL0_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL0_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL0_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+#define DSCL0_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL0_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL0_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL0_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+#define DSCL0_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x1
+#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0x2
+#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x4
+#define DSCL0_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000002L
+#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00000004L
+#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0x000000F0L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+#define CM0_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM0_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM0_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM0_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+#define CM0_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE__SHIFT 0x0
+#define CM0_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT__SHIFT 0x2
+#define CM0_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_MASK 0x00000003L
+#define CM0_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT_MASK 0x0000000CL
+#define CM0_CM_POST_CSC_C11_C12__CM_POST_CSC_C11__SHIFT 0x0
+#define CM0_CM_POST_CSC_C11_C12__CM_POST_CSC_C12__SHIFT 0x10
+#define CM0_CM_POST_CSC_C11_C12__CM_POST_CSC_C11_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C11_C12__CM_POST_CSC_C12_MASK 0xFFFF0000L
+#define CM0_CM_POST_CSC_C13_C14__CM_POST_CSC_C13__SHIFT 0x0
+#define CM0_CM_POST_CSC_C13_C14__CM_POST_CSC_C14__SHIFT 0x10
+#define CM0_CM_POST_CSC_C13_C14__CM_POST_CSC_C13_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C13_C14__CM_POST_CSC_C14_MASK 0xFFFF0000L
+#define CM0_CM_POST_CSC_C21_C22__CM_POST_CSC_C21__SHIFT 0x0
+#define CM0_CM_POST_CSC_C21_C22__CM_POST_CSC_C22__SHIFT 0x10
+#define CM0_CM_POST_CSC_C21_C22__CM_POST_CSC_C21_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C21_C22__CM_POST_CSC_C22_MASK 0xFFFF0000L
+#define CM0_CM_POST_CSC_C23_C24__CM_POST_CSC_C23__SHIFT 0x0
+#define CM0_CM_POST_CSC_C23_C24__CM_POST_CSC_C24__SHIFT 0x10
+#define CM0_CM_POST_CSC_C23_C24__CM_POST_CSC_C23_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C23_C24__CM_POST_CSC_C24_MASK 0xFFFF0000L
+#define CM0_CM_POST_CSC_C31_C32__CM_POST_CSC_C31__SHIFT 0x0
+#define CM0_CM_POST_CSC_C31_C32__CM_POST_CSC_C32__SHIFT 0x10
+#define CM0_CM_POST_CSC_C31_C32__CM_POST_CSC_C31_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C31_C32__CM_POST_CSC_C32_MASK 0xFFFF0000L
+#define CM0_CM_POST_CSC_C33_C34__CM_POST_CSC_C33__SHIFT 0x0
+#define CM0_CM_POST_CSC_C33_C34__CM_POST_CSC_C34__SHIFT 0x10
+#define CM0_CM_POST_CSC_C33_C34__CM_POST_CSC_C33_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C33_C34__CM_POST_CSC_C34_MASK 0xFFFF0000L
+#define CM0_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12_MASK 0xFFFF0000L
+#define CM0_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14_MASK 0xFFFF0000L
+#define CM0_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22_MASK 0xFFFF0000L
+#define CM0_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24_MASK 0xFFFF0000L
+#define CM0_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32_MASK 0xFFFF0000L
+#define CM0_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34_MASK 0xFFFF0000L
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x2
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT_MASK 0x0000000CL
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE__SHIFT 0x0
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT__SHIFT 0x2
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE__SHIFT 0x3
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT__SHIFT 0x4
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT__SHIFT 0x6
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_MASK 0x00000003L
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_MASK 0x00000004L
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE_MASK 0x00000008L
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT_MASK 0x00000030L
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT_MASK 0x00000040L
+#define CM0_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX__SHIFT 0x0
+#define CM0_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA__SHIFT 0x0
+#define CM0_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL__SHIFT 0x6
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE__SHIFT 0x7
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL_MASK 0x00000040L
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE_MASK 0x00000080L
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define CM0_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B_MASK 0x0007FFFFL
+#define CM0_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G_MASK 0x0007FFFFL
+#define CM0_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R_MASK 0x0007FFFFL
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define CM0_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B_MASK 0x0007FFFFL
+#define CM0_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G_MASK 0x0007FFFFL
+#define CM0_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R_MASK 0x0007FFFFL
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+#define CM0_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE__SHIFT 0x0
+#define CM0_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS__SHIFT 0x2
+#define CM0_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM0_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS_MASK 0x00000004L
+#define CM0_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE__SHIFT 0x0
+#define CM0_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE_MASK 0x00000003L
+#define CM0_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM0_CM_DEALPHA__CM_DEALPHA_ABLND__SHIFT 0x1
+#define CM0_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+#define CM0_CM_DEALPHA__CM_DEALPHA_ABLND_MASK 0x00000002L
+#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT__SHIFT 0x4
+#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
+#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON10_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON10_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON10_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON10_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON10_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP1_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP1_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0x70000000L
+#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x6
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x7
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0x9
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xb
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xe
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000040L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000180L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000600L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00003800L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x0000C000L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE__SHIFT 0x8
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE_MASK 0x00000100L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_R__SHIFT 0x18
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_G__SHIFT 0x1a
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_B__SHIFT 0x1c
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_R_MASK 0x03000000L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_G_MASK 0x0C000000L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_B_MASK 0x30000000L
+#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+#define CNVC_CFG1_PRE_DEALPHA__PRE_DEALPHA_EN__SHIFT 0x0
+#define CNVC_CFG1_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG1_PRE_DEALPHA__PRE_DEALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG1_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN_MASK 0x00000010L
+#define CNVC_CFG1_PRE_CSC_MODE__PRE_CSC_MODE__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT__SHIFT 0x2
+#define CNVC_CFG1_PRE_CSC_MODE__PRE_CSC_MODE_MASK 0x00000003L
+#define CNVC_CFG1_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT_MASK 0x0000000CL
+#define CNVC_CFG1_PRE_CSC_C11_C12__PRE_CSC_C11__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C11_C12__PRE_CSC_C12__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C11_C12__PRE_CSC_C11_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C11_C12__PRE_CSC_C12_MASK 0xFFFF0000L
+#define CNVC_CFG1_PRE_CSC_C13_C14__PRE_CSC_C13__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C13_C14__PRE_CSC_C14__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C13_C14__PRE_CSC_C13_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C13_C14__PRE_CSC_C14_MASK 0xFFFF0000L
+#define CNVC_CFG1_PRE_CSC_C21_C22__PRE_CSC_C21__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C21_C22__PRE_CSC_C22__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C21_C22__PRE_CSC_C21_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C21_C22__PRE_CSC_C22_MASK 0xFFFF0000L
+#define CNVC_CFG1_PRE_CSC_C23_C24__PRE_CSC_C23__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C23_C24__PRE_CSC_C24__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C23_C24__PRE_CSC_C23_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C23_C24__PRE_CSC_C24_MASK 0xFFFF0000L
+#define CNVC_CFG1_PRE_CSC_C31_C32__PRE_CSC_C31__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C31_C32__PRE_CSC_C32__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C31_C32__PRE_CSC_C31_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C31_C32__PRE_CSC_C32_MASK 0xFFFF0000L
+#define CNVC_CFG1_PRE_CSC_C33_C34__PRE_CSC_C33__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C33_C34__PRE_CSC_C34__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C33_C34__PRE_CSC_C33_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C33_C34__PRE_CSC_C34_MASK 0xFFFF0000L
+#define CNVC_CFG1_PRE_CSC_B_C11_C12__PRE_CSC_B_C11__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C11_C12__PRE_CSC_B_C12__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C11_C12__PRE_CSC_B_C11_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C11_C12__PRE_CSC_B_C12_MASK 0xFFFF0000L
+#define CNVC_CFG1_PRE_CSC_B_C13_C14__PRE_CSC_B_C13__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C13_C14__PRE_CSC_B_C14__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C13_C14__PRE_CSC_B_C13_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C13_C14__PRE_CSC_B_C14_MASK 0xFFFF0000L
+#define CNVC_CFG1_PRE_CSC_B_C21_C22__PRE_CSC_B_C21__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C21_C22__PRE_CSC_B_C22__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C21_C22__PRE_CSC_B_C21_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C21_C22__PRE_CSC_B_C22_MASK 0xFFFF0000L
+#define CNVC_CFG1_PRE_CSC_B_C23_C24__PRE_CSC_B_C23__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C23_C24__PRE_CSC_B_C24__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C23_C24__PRE_CSC_B_C23_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C23_C24__PRE_CSC_B_C24_MASK 0xFFFF0000L
+#define CNVC_CFG1_PRE_CSC_B_C31_C32__PRE_CSC_B_C31__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C31_C32__PRE_CSC_B_C32__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C31_C32__PRE_CSC_B_C31_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C31_C32__PRE_CSC_B_C32_MASK 0xFFFF0000L
+#define CNVC_CFG1_PRE_CSC_B_C33_C34__PRE_CSC_B_C33__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C33_C34__PRE_CSC_B_C34__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C33_C34__PRE_CSC_B_C33_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C33_C34__PRE_CSC_B_C34_MASK 0xFFFF0000L
+#define CNVC_CFG1_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT__SHIFT 0x0
+#define CNVC_CFG1_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT_MASK 0x00000001L
+#define CNVC_CFG1_PRE_DEGAM__PRE_DEGAM_MODE__SHIFT 0x0
+#define CNVC_CFG1_PRE_DEGAM__PRE_DEGAM_SELECT__SHIFT 0x4
+#define CNVC_CFG1_PRE_DEGAM__PRE_DEGAM_MODE_MASK 0x00000003L
+#define CNVC_CFG1_PRE_DEGAM__PRE_DEGAM_SELECT_MASK 0x00000070L
+#define CNVC_CFG1_PRE_REALPHA__PRE_REALPHA_EN__SHIFT 0x0
+#define CNVC_CFG1_PRE_REALPHA__PRE_REALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG1_PRE_REALPHA__PRE_REALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG1_PRE_REALPHA__PRE_REALPHA_ABLND_EN_MASK 0x00000010L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00030000L
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+#define DSCL1_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL1_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+#define DSCL1_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y__SHIFT 0x0
+#define DSCL1_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR__SHIFT 0x10
+#define DSCL1_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y_MASK 0x0000FFFFL
+#define DSCL1_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR_MASK 0xFFFF0000L
+#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+#define DSCL1_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL1_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL1_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL1_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+#define DSCL1_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL1_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL1_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL1_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL1_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL1_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+#define DSCL1_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL1_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL1_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL1_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+#define DSCL1_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x1
+#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0x2
+#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x4
+#define DSCL1_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000002L
+#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00000004L
+#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0x000000F0L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+#define CM1_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM1_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM1_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM1_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+#define CM1_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE__SHIFT 0x0
+#define CM1_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT__SHIFT 0x2
+#define CM1_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_MASK 0x00000003L
+#define CM1_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT_MASK 0x0000000CL
+#define CM1_CM_POST_CSC_C11_C12__CM_POST_CSC_C11__SHIFT 0x0
+#define CM1_CM_POST_CSC_C11_C12__CM_POST_CSC_C12__SHIFT 0x10
+#define CM1_CM_POST_CSC_C11_C12__CM_POST_CSC_C11_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C11_C12__CM_POST_CSC_C12_MASK 0xFFFF0000L
+#define CM1_CM_POST_CSC_C13_C14__CM_POST_CSC_C13__SHIFT 0x0
+#define CM1_CM_POST_CSC_C13_C14__CM_POST_CSC_C14__SHIFT 0x10
+#define CM1_CM_POST_CSC_C13_C14__CM_POST_CSC_C13_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C13_C14__CM_POST_CSC_C14_MASK 0xFFFF0000L
+#define CM1_CM_POST_CSC_C21_C22__CM_POST_CSC_C21__SHIFT 0x0
+#define CM1_CM_POST_CSC_C21_C22__CM_POST_CSC_C22__SHIFT 0x10
+#define CM1_CM_POST_CSC_C21_C22__CM_POST_CSC_C21_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C21_C22__CM_POST_CSC_C22_MASK 0xFFFF0000L
+#define CM1_CM_POST_CSC_C23_C24__CM_POST_CSC_C23__SHIFT 0x0
+#define CM1_CM_POST_CSC_C23_C24__CM_POST_CSC_C24__SHIFT 0x10
+#define CM1_CM_POST_CSC_C23_C24__CM_POST_CSC_C23_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C23_C24__CM_POST_CSC_C24_MASK 0xFFFF0000L
+#define CM1_CM_POST_CSC_C31_C32__CM_POST_CSC_C31__SHIFT 0x0
+#define CM1_CM_POST_CSC_C31_C32__CM_POST_CSC_C32__SHIFT 0x10
+#define CM1_CM_POST_CSC_C31_C32__CM_POST_CSC_C31_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C31_C32__CM_POST_CSC_C32_MASK 0xFFFF0000L
+#define CM1_CM_POST_CSC_C33_C34__CM_POST_CSC_C33__SHIFT 0x0
+#define CM1_CM_POST_CSC_C33_C34__CM_POST_CSC_C34__SHIFT 0x10
+#define CM1_CM_POST_CSC_C33_C34__CM_POST_CSC_C33_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C33_C34__CM_POST_CSC_C34_MASK 0xFFFF0000L
+#define CM1_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12_MASK 0xFFFF0000L
+#define CM1_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14_MASK 0xFFFF0000L
+#define CM1_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22_MASK 0xFFFF0000L
+#define CM1_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24_MASK 0xFFFF0000L
+#define CM1_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32_MASK 0xFFFF0000L
+#define CM1_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34_MASK 0xFFFF0000L
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x2
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT_MASK 0x0000000CL
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE__SHIFT 0x0
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT__SHIFT 0x2
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE__SHIFT 0x3
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT__SHIFT 0x4
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT__SHIFT 0x6
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_MASK 0x00000003L
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_MASK 0x00000004L
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE_MASK 0x00000008L
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT_MASK 0x00000030L
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT_MASK 0x00000040L
+#define CM1_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX__SHIFT 0x0
+#define CM1_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA__SHIFT 0x0
+#define CM1_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL__SHIFT 0x6
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE__SHIFT 0x7
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL_MASK 0x00000040L
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE_MASK 0x00000080L
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define CM1_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B_MASK 0x0007FFFFL
+#define CM1_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G_MASK 0x0007FFFFL
+#define CM1_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R_MASK 0x0007FFFFL
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define CM1_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B_MASK 0x0007FFFFL
+#define CM1_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G_MASK 0x0007FFFFL
+#define CM1_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R_MASK 0x0007FFFFL
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+#define CM1_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE__SHIFT 0x0
+#define CM1_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS__SHIFT 0x2
+#define CM1_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM1_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS_MASK 0x00000004L
+#define CM1_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE__SHIFT 0x0
+#define CM1_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE_MASK 0x00000003L
+#define CM1_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM1_CM_DEALPHA__CM_DEALPHA_ABLND__SHIFT 0x1
+#define CM1_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+#define CM1_CM_DEALPHA__CM_DEALPHA_ABLND_MASK 0x00000002L
+#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM1_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT__SHIFT 0x4
+#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM1_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
+#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON11_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON11_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON11_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON11_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON11_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP2_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP2_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0x70000000L
+#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x6
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x7
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0x9
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xb
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xe
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000040L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000180L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000600L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00003800L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x0000C000L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE__SHIFT 0x8
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE_MASK 0x00000100L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_R__SHIFT 0x18
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_G__SHIFT 0x1a
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_B__SHIFT 0x1c
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_R_MASK 0x03000000L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_G_MASK 0x0C000000L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_B_MASK 0x30000000L
+#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+#define CNVC_CFG2_PRE_DEALPHA__PRE_DEALPHA_EN__SHIFT 0x0
+#define CNVC_CFG2_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG2_PRE_DEALPHA__PRE_DEALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG2_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN_MASK 0x00000010L
+#define CNVC_CFG2_PRE_CSC_MODE__PRE_CSC_MODE__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT__SHIFT 0x2
+#define CNVC_CFG2_PRE_CSC_MODE__PRE_CSC_MODE_MASK 0x00000003L
+#define CNVC_CFG2_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT_MASK 0x0000000CL
+#define CNVC_CFG2_PRE_CSC_C11_C12__PRE_CSC_C11__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C11_C12__PRE_CSC_C12__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C11_C12__PRE_CSC_C11_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C11_C12__PRE_CSC_C12_MASK 0xFFFF0000L
+#define CNVC_CFG2_PRE_CSC_C13_C14__PRE_CSC_C13__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C13_C14__PRE_CSC_C14__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C13_C14__PRE_CSC_C13_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C13_C14__PRE_CSC_C14_MASK 0xFFFF0000L
+#define CNVC_CFG2_PRE_CSC_C21_C22__PRE_CSC_C21__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C21_C22__PRE_CSC_C22__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C21_C22__PRE_CSC_C21_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C21_C22__PRE_CSC_C22_MASK 0xFFFF0000L
+#define CNVC_CFG2_PRE_CSC_C23_C24__PRE_CSC_C23__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C23_C24__PRE_CSC_C24__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C23_C24__PRE_CSC_C23_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C23_C24__PRE_CSC_C24_MASK 0xFFFF0000L
+#define CNVC_CFG2_PRE_CSC_C31_C32__PRE_CSC_C31__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C31_C32__PRE_CSC_C32__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C31_C32__PRE_CSC_C31_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C31_C32__PRE_CSC_C32_MASK 0xFFFF0000L
+#define CNVC_CFG2_PRE_CSC_C33_C34__PRE_CSC_C33__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C33_C34__PRE_CSC_C34__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C33_C34__PRE_CSC_C33_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C33_C34__PRE_CSC_C34_MASK 0xFFFF0000L
+#define CNVC_CFG2_PRE_CSC_B_C11_C12__PRE_CSC_B_C11__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C11_C12__PRE_CSC_B_C12__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C11_C12__PRE_CSC_B_C11_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C11_C12__PRE_CSC_B_C12_MASK 0xFFFF0000L
+#define CNVC_CFG2_PRE_CSC_B_C13_C14__PRE_CSC_B_C13__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C13_C14__PRE_CSC_B_C14__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C13_C14__PRE_CSC_B_C13_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C13_C14__PRE_CSC_B_C14_MASK 0xFFFF0000L
+#define CNVC_CFG2_PRE_CSC_B_C21_C22__PRE_CSC_B_C21__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C21_C22__PRE_CSC_B_C22__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C21_C22__PRE_CSC_B_C21_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C21_C22__PRE_CSC_B_C22_MASK 0xFFFF0000L
+#define CNVC_CFG2_PRE_CSC_B_C23_C24__PRE_CSC_B_C23__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C23_C24__PRE_CSC_B_C24__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C23_C24__PRE_CSC_B_C23_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C23_C24__PRE_CSC_B_C24_MASK 0xFFFF0000L
+#define CNVC_CFG2_PRE_CSC_B_C31_C32__PRE_CSC_B_C31__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C31_C32__PRE_CSC_B_C32__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C31_C32__PRE_CSC_B_C31_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C31_C32__PRE_CSC_B_C32_MASK 0xFFFF0000L
+#define CNVC_CFG2_PRE_CSC_B_C33_C34__PRE_CSC_B_C33__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C33_C34__PRE_CSC_B_C34__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C33_C34__PRE_CSC_B_C33_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C33_C34__PRE_CSC_B_C34_MASK 0xFFFF0000L
+#define CNVC_CFG2_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT__SHIFT 0x0
+#define CNVC_CFG2_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT_MASK 0x00000001L
+#define CNVC_CFG2_PRE_DEGAM__PRE_DEGAM_MODE__SHIFT 0x0
+#define CNVC_CFG2_PRE_DEGAM__PRE_DEGAM_SELECT__SHIFT 0x4
+#define CNVC_CFG2_PRE_DEGAM__PRE_DEGAM_MODE_MASK 0x00000003L
+#define CNVC_CFG2_PRE_DEGAM__PRE_DEGAM_SELECT_MASK 0x00000070L
+#define CNVC_CFG2_PRE_REALPHA__PRE_REALPHA_EN__SHIFT 0x0
+#define CNVC_CFG2_PRE_REALPHA__PRE_REALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG2_PRE_REALPHA__PRE_REALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG2_PRE_REALPHA__PRE_REALPHA_ABLND_EN_MASK 0x00000010L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00030000L
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+#define DSCL2_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL2_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+#define DSCL2_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y__SHIFT 0x0
+#define DSCL2_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR__SHIFT 0x10
+#define DSCL2_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y_MASK 0x0000FFFFL
+#define DSCL2_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR_MASK 0xFFFF0000L
+#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+#define DSCL2_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL2_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL2_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL2_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+#define DSCL2_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL2_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL2_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL2_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL2_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL2_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+#define DSCL2_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL2_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL2_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL2_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+#define DSCL2_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x1
+#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0x2
+#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x4
+#define DSCL2_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000002L
+#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00000004L
+#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0x000000F0L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+#define CM2_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM2_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM2_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM2_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+#define CM2_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE__SHIFT 0x0
+#define CM2_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT__SHIFT 0x2
+#define CM2_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_MASK 0x00000003L
+#define CM2_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT_MASK 0x0000000CL
+#define CM2_CM_POST_CSC_C11_C12__CM_POST_CSC_C11__SHIFT 0x0
+#define CM2_CM_POST_CSC_C11_C12__CM_POST_CSC_C12__SHIFT 0x10
+#define CM2_CM_POST_CSC_C11_C12__CM_POST_CSC_C11_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C11_C12__CM_POST_CSC_C12_MASK 0xFFFF0000L
+#define CM2_CM_POST_CSC_C13_C14__CM_POST_CSC_C13__SHIFT 0x0
+#define CM2_CM_POST_CSC_C13_C14__CM_POST_CSC_C14__SHIFT 0x10
+#define CM2_CM_POST_CSC_C13_C14__CM_POST_CSC_C13_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C13_C14__CM_POST_CSC_C14_MASK 0xFFFF0000L
+#define CM2_CM_POST_CSC_C21_C22__CM_POST_CSC_C21__SHIFT 0x0
+#define CM2_CM_POST_CSC_C21_C22__CM_POST_CSC_C22__SHIFT 0x10
+#define CM2_CM_POST_CSC_C21_C22__CM_POST_CSC_C21_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C21_C22__CM_POST_CSC_C22_MASK 0xFFFF0000L
+#define CM2_CM_POST_CSC_C23_C24__CM_POST_CSC_C23__SHIFT 0x0
+#define CM2_CM_POST_CSC_C23_C24__CM_POST_CSC_C24__SHIFT 0x10
+#define CM2_CM_POST_CSC_C23_C24__CM_POST_CSC_C23_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C23_C24__CM_POST_CSC_C24_MASK 0xFFFF0000L
+#define CM2_CM_POST_CSC_C31_C32__CM_POST_CSC_C31__SHIFT 0x0
+#define CM2_CM_POST_CSC_C31_C32__CM_POST_CSC_C32__SHIFT 0x10
+#define CM2_CM_POST_CSC_C31_C32__CM_POST_CSC_C31_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C31_C32__CM_POST_CSC_C32_MASK 0xFFFF0000L
+#define CM2_CM_POST_CSC_C33_C34__CM_POST_CSC_C33__SHIFT 0x0
+#define CM2_CM_POST_CSC_C33_C34__CM_POST_CSC_C34__SHIFT 0x10
+#define CM2_CM_POST_CSC_C33_C34__CM_POST_CSC_C33_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C33_C34__CM_POST_CSC_C34_MASK 0xFFFF0000L
+#define CM2_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12_MASK 0xFFFF0000L
+#define CM2_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14_MASK 0xFFFF0000L
+#define CM2_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22_MASK 0xFFFF0000L
+#define CM2_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24_MASK 0xFFFF0000L
+#define CM2_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32_MASK 0xFFFF0000L
+#define CM2_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34_MASK 0xFFFF0000L
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x2
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT_MASK 0x0000000CL
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE__SHIFT 0x0
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT__SHIFT 0x2
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE__SHIFT 0x3
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT__SHIFT 0x4
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT__SHIFT 0x6
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_MASK 0x00000003L
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_MASK 0x00000004L
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE_MASK 0x00000008L
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT_MASK 0x00000030L
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT_MASK 0x00000040L
+#define CM2_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX__SHIFT 0x0
+#define CM2_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA__SHIFT 0x0
+#define CM2_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL__SHIFT 0x6
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE__SHIFT 0x7
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL_MASK 0x00000040L
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE_MASK 0x00000080L
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define CM2_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B_MASK 0x0007FFFFL
+#define CM2_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G_MASK 0x0007FFFFL
+#define CM2_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R_MASK 0x0007FFFFL
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define CM2_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B_MASK 0x0007FFFFL
+#define CM2_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G_MASK 0x0007FFFFL
+#define CM2_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R_MASK 0x0007FFFFL
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+#define CM2_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE__SHIFT 0x0
+#define CM2_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS__SHIFT 0x2
+#define CM2_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM2_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS_MASK 0x00000004L
+#define CM2_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE__SHIFT 0x0
+#define CM2_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE_MASK 0x00000003L
+#define CM2_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM2_CM_DEALPHA__CM_DEALPHA_ABLND__SHIFT 0x1
+#define CM2_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+#define CM2_CM_DEALPHA__CM_DEALPHA_ABLND_MASK 0x00000002L
+#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM2_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT__SHIFT 0x4
+#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM2_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
+#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON12_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON12_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON12_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON12_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON12_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP3_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP3_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0x70000000L
+#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x6
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x7
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0x9
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xb
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xe
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000040L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000180L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000600L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00003800L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x0000C000L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE__SHIFT 0x8
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE_MASK 0x00000100L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_R__SHIFT 0x18
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_G__SHIFT 0x1a
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_B__SHIFT 0x1c
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_R_MASK 0x03000000L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_G_MASK 0x0C000000L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_B_MASK 0x30000000L
+#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+#define CNVC_CFG3_PRE_DEALPHA__PRE_DEALPHA_EN__SHIFT 0x0
+#define CNVC_CFG3_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG3_PRE_DEALPHA__PRE_DEALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG3_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN_MASK 0x00000010L
+#define CNVC_CFG3_PRE_CSC_MODE__PRE_CSC_MODE__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT__SHIFT 0x2
+#define CNVC_CFG3_PRE_CSC_MODE__PRE_CSC_MODE_MASK 0x00000003L
+#define CNVC_CFG3_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT_MASK 0x0000000CL
+#define CNVC_CFG3_PRE_CSC_C11_C12__PRE_CSC_C11__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C11_C12__PRE_CSC_C12__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C11_C12__PRE_CSC_C11_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C11_C12__PRE_CSC_C12_MASK 0xFFFF0000L
+#define CNVC_CFG3_PRE_CSC_C13_C14__PRE_CSC_C13__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C13_C14__PRE_CSC_C14__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C13_C14__PRE_CSC_C13_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C13_C14__PRE_CSC_C14_MASK 0xFFFF0000L
+#define CNVC_CFG3_PRE_CSC_C21_C22__PRE_CSC_C21__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C21_C22__PRE_CSC_C22__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C21_C22__PRE_CSC_C21_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C21_C22__PRE_CSC_C22_MASK 0xFFFF0000L
+#define CNVC_CFG3_PRE_CSC_C23_C24__PRE_CSC_C23__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C23_C24__PRE_CSC_C24__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C23_C24__PRE_CSC_C23_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C23_C24__PRE_CSC_C24_MASK 0xFFFF0000L
+#define CNVC_CFG3_PRE_CSC_C31_C32__PRE_CSC_C31__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C31_C32__PRE_CSC_C32__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C31_C32__PRE_CSC_C31_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C31_C32__PRE_CSC_C32_MASK 0xFFFF0000L
+#define CNVC_CFG3_PRE_CSC_C33_C34__PRE_CSC_C33__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C33_C34__PRE_CSC_C34__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C33_C34__PRE_CSC_C33_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C33_C34__PRE_CSC_C34_MASK 0xFFFF0000L
+#define CNVC_CFG3_PRE_CSC_B_C11_C12__PRE_CSC_B_C11__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C11_C12__PRE_CSC_B_C12__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C11_C12__PRE_CSC_B_C11_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C11_C12__PRE_CSC_B_C12_MASK 0xFFFF0000L
+#define CNVC_CFG3_PRE_CSC_B_C13_C14__PRE_CSC_B_C13__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C13_C14__PRE_CSC_B_C14__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C13_C14__PRE_CSC_B_C13_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C13_C14__PRE_CSC_B_C14_MASK 0xFFFF0000L
+#define CNVC_CFG3_PRE_CSC_B_C21_C22__PRE_CSC_B_C21__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C21_C22__PRE_CSC_B_C22__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C21_C22__PRE_CSC_B_C21_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C21_C22__PRE_CSC_B_C22_MASK 0xFFFF0000L
+#define CNVC_CFG3_PRE_CSC_B_C23_C24__PRE_CSC_B_C23__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C23_C24__PRE_CSC_B_C24__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C23_C24__PRE_CSC_B_C23_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C23_C24__PRE_CSC_B_C24_MASK 0xFFFF0000L
+#define CNVC_CFG3_PRE_CSC_B_C31_C32__PRE_CSC_B_C31__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C31_C32__PRE_CSC_B_C32__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C31_C32__PRE_CSC_B_C31_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C31_C32__PRE_CSC_B_C32_MASK 0xFFFF0000L
+#define CNVC_CFG3_PRE_CSC_B_C33_C34__PRE_CSC_B_C33__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C33_C34__PRE_CSC_B_C34__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C33_C34__PRE_CSC_B_C33_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C33_C34__PRE_CSC_B_C34_MASK 0xFFFF0000L
+#define CNVC_CFG3_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT__SHIFT 0x0
+#define CNVC_CFG3_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT_MASK 0x00000001L
+#define CNVC_CFG3_PRE_DEGAM__PRE_DEGAM_MODE__SHIFT 0x0
+#define CNVC_CFG3_PRE_DEGAM__PRE_DEGAM_SELECT__SHIFT 0x4
+#define CNVC_CFG3_PRE_DEGAM__PRE_DEGAM_MODE_MASK 0x00000003L
+#define CNVC_CFG3_PRE_DEGAM__PRE_DEGAM_SELECT_MASK 0x00000070L
+#define CNVC_CFG3_PRE_REALPHA__PRE_REALPHA_EN__SHIFT 0x0
+#define CNVC_CFG3_PRE_REALPHA__PRE_REALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG3_PRE_REALPHA__PRE_REALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG3_PRE_REALPHA__PRE_REALPHA_ABLND_EN_MASK 0x00000010L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00030000L
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+#define DSCL3_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL3_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+#define DSCL3_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y__SHIFT 0x0
+#define DSCL3_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR__SHIFT 0x10
+#define DSCL3_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y_MASK 0x0000FFFFL
+#define DSCL3_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR_MASK 0xFFFF0000L
+#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+#define DSCL3_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL3_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL3_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL3_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+#define DSCL3_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL3_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL3_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL3_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL3_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL3_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+#define DSCL3_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL3_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL3_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL3_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+#define DSCL3_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x1
+#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0x2
+#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x4
+#define DSCL3_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000002L
+#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00000004L
+#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0x000000F0L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+#define CM3_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM3_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM3_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM3_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+#define CM3_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE__SHIFT 0x0
+#define CM3_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT__SHIFT 0x2
+#define CM3_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_MASK 0x00000003L
+#define CM3_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT_MASK 0x0000000CL
+#define CM3_CM_POST_CSC_C11_C12__CM_POST_CSC_C11__SHIFT 0x0
+#define CM3_CM_POST_CSC_C11_C12__CM_POST_CSC_C12__SHIFT 0x10
+#define CM3_CM_POST_CSC_C11_C12__CM_POST_CSC_C11_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C11_C12__CM_POST_CSC_C12_MASK 0xFFFF0000L
+#define CM3_CM_POST_CSC_C13_C14__CM_POST_CSC_C13__SHIFT 0x0
+#define CM3_CM_POST_CSC_C13_C14__CM_POST_CSC_C14__SHIFT 0x10
+#define CM3_CM_POST_CSC_C13_C14__CM_POST_CSC_C13_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C13_C14__CM_POST_CSC_C14_MASK 0xFFFF0000L
+#define CM3_CM_POST_CSC_C21_C22__CM_POST_CSC_C21__SHIFT 0x0
+#define CM3_CM_POST_CSC_C21_C22__CM_POST_CSC_C22__SHIFT 0x10
+#define CM3_CM_POST_CSC_C21_C22__CM_POST_CSC_C21_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C21_C22__CM_POST_CSC_C22_MASK 0xFFFF0000L
+#define CM3_CM_POST_CSC_C23_C24__CM_POST_CSC_C23__SHIFT 0x0
+#define CM3_CM_POST_CSC_C23_C24__CM_POST_CSC_C24__SHIFT 0x10
+#define CM3_CM_POST_CSC_C23_C24__CM_POST_CSC_C23_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C23_C24__CM_POST_CSC_C24_MASK 0xFFFF0000L
+#define CM3_CM_POST_CSC_C31_C32__CM_POST_CSC_C31__SHIFT 0x0
+#define CM3_CM_POST_CSC_C31_C32__CM_POST_CSC_C32__SHIFT 0x10
+#define CM3_CM_POST_CSC_C31_C32__CM_POST_CSC_C31_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C31_C32__CM_POST_CSC_C32_MASK 0xFFFF0000L
+#define CM3_CM_POST_CSC_C33_C34__CM_POST_CSC_C33__SHIFT 0x0
+#define CM3_CM_POST_CSC_C33_C34__CM_POST_CSC_C34__SHIFT 0x10
+#define CM3_CM_POST_CSC_C33_C34__CM_POST_CSC_C33_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C33_C34__CM_POST_CSC_C34_MASK 0xFFFF0000L
+#define CM3_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12_MASK 0xFFFF0000L
+#define CM3_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14_MASK 0xFFFF0000L
+#define CM3_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22_MASK 0xFFFF0000L
+#define CM3_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24_MASK 0xFFFF0000L
+#define CM3_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32_MASK 0xFFFF0000L
+#define CM3_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34_MASK 0xFFFF0000L
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x2
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT_MASK 0x0000000CL
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE__SHIFT 0x0
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT__SHIFT 0x2
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE__SHIFT 0x3
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT__SHIFT 0x4
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT__SHIFT 0x6
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_MASK 0x00000003L
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_MASK 0x00000004L
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE_MASK 0x00000008L
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT_MASK 0x00000030L
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT_MASK 0x00000040L
+#define CM3_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX__SHIFT 0x0
+#define CM3_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA__SHIFT 0x0
+#define CM3_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL__SHIFT 0x6
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE__SHIFT 0x7
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL_MASK 0x00000040L
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE_MASK 0x00000080L
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define CM3_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B_MASK 0x0007FFFFL
+#define CM3_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G_MASK 0x0007FFFFL
+#define CM3_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R_MASK 0x0007FFFFL
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define CM3_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B_MASK 0x0007FFFFL
+#define CM3_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G_MASK 0x0007FFFFL
+#define CM3_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R_MASK 0x0007FFFFL
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+#define CM3_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE__SHIFT 0x0
+#define CM3_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS__SHIFT 0x2
+#define CM3_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM3_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS_MASK 0x00000004L
+#define CM3_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE__SHIFT 0x0
+#define CM3_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE_MASK 0x00000003L
+#define CM3_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM3_CM_DEALPHA__CM_DEALPHA_ABLND__SHIFT 0x1
+#define CM3_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+#define CM3_CM_DEALPHA__CM_DEALPHA_ABLND_MASK 0x00000002L
+#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM3_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT__SHIFT 0x4
+#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM3_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
+#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON13_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON13_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON13_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON13_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON13_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+#define DPG0_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG0_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG0_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG0_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG0_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG0_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG0_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG0_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+#define OPPBUF0_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF0_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R__SHIFT 0x10
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFF0000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B__SHIFT 0x10
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFF0000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+#define DPG1_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG1_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG1_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG1_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG1_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG1_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG1_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG1_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+#define OPPBUF1_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF1_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R__SHIFT 0x10
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFF0000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B__SHIFT 0x10
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFF0000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C_MASK 0x0000FFFFL
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+#define FMT2_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT2_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT2_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT2_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT2_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT2_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT2_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT2_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+#define FMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define FMT2_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT2_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+#define DPG2_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG2_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG2_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG2_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG2_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG2_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG2_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG2_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG2_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG2_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG2_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG2_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG2_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG2_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+#define DPG2_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG2_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+#define DPG2_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG2_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+#define OPPBUF2_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF2_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R__SHIFT 0x10
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFF0000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B__SHIFT 0x10
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFF0000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C_MASK 0x0000FFFFL
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+#define FMT3_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT3_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT3_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT3_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT3_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT3_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT3_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT3_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+#define FMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define FMT3_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT3_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+#define DPG3_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG3_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG3_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG3_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG3_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG3_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG3_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG3_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG3_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG3_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG3_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG3_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG3_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG3_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+#define DPG3_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG3_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+#define DPG3_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG3_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+#define OPPBUF3_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF3_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R__SHIFT 0x10
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_A_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFF0000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B__SHIFT 0x10
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_G_MASK 0x0000FFFFL
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFF0000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT2__OPP_PIPE_CRC_RESULT_C_MASK 0x0000FFFFL
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS__SHIFT 0x4
+#define OPP_TOP_CLK_CONTROL__OPP_TEST_CLK_SEL__SHIFT 0x8
+#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON__SHIFT 0xc
+#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON__SHIFT 0xd
+#define OPP_TOP_CLK_CONTROL__OPP_ABM2_CLOCK_ON__SHIFT 0xe
+#define OPP_TOP_CLK_CONTROL__OPP_ABM3_CLOCK_ON__SHIFT 0xf
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS_MASK 0x00000010L
+#define OPP_TOP_CLK_CONTROL__OPP_TEST_CLK_SEL_MASK 0x00000F00L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON_MASK 0x00001000L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON_MASK 0x00002000L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM2_CLOCK_ON_MASK 0x00004000L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM3_CLOCK_ON_MASK 0x00008000L
+#define OPP_ABM_CONTROL__OPP_ABM_BLPWM_SEL__SHIFT 0x0
+#define OPP_ABM_CONTROL__OPP_ABM_BLPWM_SEL_MASK 0x00000007L
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON14_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON14_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON14_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON14_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON14_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x8
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x10
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0x14
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL__SHIFT 0x18
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL__SHIFT 0x1c
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000003L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x00000300L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x000F0000L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x00F00000L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL_MASK 0x0F000000L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL_MASK 0xF0000000L
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS__SHIFT 0x10
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS_MASK 0xFFFF0000L
+#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x8
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x10
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0x14
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL__SHIFT 0x18
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL__SHIFT 0x1c
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000003L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x00000300L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x000F0000L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x00F00000L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL_MASK 0x0F000000L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL_MASK 0xF0000000L
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS__SHIFT 0x10
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS_MASK 0xFFFF0000L
+#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x8
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x10
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0x14
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL__SHIFT 0x18
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL__SHIFT 0x1c
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000003L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x00000300L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x000F0000L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x00F00000L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL_MASK 0x0F000000L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL_MASK 0xF0000000L
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+#define ODM2_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM2_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS__SHIFT 0x10
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS_MASK 0xFFFF0000L
+#define ODM2_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM2_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x8
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x10
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0x14
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL__SHIFT 0x18
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL__SHIFT 0x1c
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000003L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x00000300L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x000F0000L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x00F00000L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL_MASK 0x0F000000L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL_MASK 0xF0000000L
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+#define ODM3_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM3_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS__SHIFT 0x10
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS_MASK 0xFFFF0000L
+#define ODM3_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM3_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE__SHIFT 0x0
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL__SHIFT 0x8
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR__SHIFT 0x10
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MASK 0x00000003L
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL_MASK 0x00000100L
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR_MASK 0x00030000L
+#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE__SHIFT 0x8
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE_MASK 0x00000100L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG0_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG0_OTG_CONTROL__OTG_OUT_MUX__SHIFT 0x14
+#define OTG0_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG0_OTG_CONTROL__OTG_OUT_MUX_MASK 0x00300000L
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG0_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG0_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG0_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG0_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG0_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG0_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG0_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG0_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG0_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG0_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS__SHIFT 0x1c
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS_MASK 0x10000000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_EN__SHIFT 0x7
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_MODE__SHIFT 0xa
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_EN_MASK 0x00000080L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_MODE_MASK 0x00000400L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+#define OTG0_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG0_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+#define OTG0_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG0_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X_MASK 0x00007FFFL
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X_MASK 0x7FFF0000L
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y_MASK 0x00007FFFL
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y_MASK 0x7FFF0000L
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE_MASK 0x80000000L
+#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG0_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL__SHIFT 0x14
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG0_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL_MASK 0x00030000L
+#define OTG0_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL_MASK 0x00300000L
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X_MASK 0x00007FFFL
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y_MASK 0x7FFF0000L
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE_MASK 0x80000000L
+#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0xd
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED__SHIFT 0x10
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT__SHIFT 0x14
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR__SHIFT 0x18
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK__SHIFT 0x1c
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE__SHIFT 0x1d
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00002000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_MASK 0x00010000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MASK 0x00100000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR_MASK 0x01000000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK_MASK 0x10000000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE_MASK 0x20000000L
+#define OTG0_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE__SHIFT 0x0
+#define OTG0_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE__SHIFT 0x10
+#define OTG0_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE_MASK 0x00007FFFL
+#define OTG0_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE_MASK 0x7FFF0000L
+#define OTG0_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT__SHIFT 0x0
+#define OTG0_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT_MASK 0x00007FFFL
+#define OTG0_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X__SHIFT 0x0
+#define OTG0_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X__SHIFT 0x10
+#define OTG0_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG0_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X_MASK 0x7FFF0000L
+#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000003L
+#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+#define OTG0_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE__SHIFT 0x0
+#define OTG0_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE_MASK 0xFFFFFFFFL
+#define OTG0_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO__SHIFT 0x0
+#define OTG0_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO_MASK 0xFFFFFFFFL
+#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE__SHIFT 0x0
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL__SHIFT 0x8
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR__SHIFT 0x10
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MASK 0x00000003L
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL_MASK 0x00000100L
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR_MASK 0x00030000L
+#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE__SHIFT 0x8
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE_MASK 0x00000100L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG1_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG1_OTG_CONTROL__OTG_OUT_MUX__SHIFT 0x14
+#define OTG1_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG1_OTG_CONTROL__OTG_OUT_MUX_MASK 0x00300000L
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG1_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG1_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG1_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG1_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG1_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG1_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG1_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG1_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG1_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG1_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS__SHIFT 0x1c
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS_MASK 0x10000000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_EN__SHIFT 0x7
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_MODE__SHIFT 0xa
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_EN_MASK 0x00000080L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_MODE_MASK 0x00000400L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+#define OTG1_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG1_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+#define OTG1_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG1_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X_MASK 0x00007FFFL
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X_MASK 0x7FFF0000L
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y_MASK 0x00007FFFL
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y_MASK 0x7FFF0000L
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE_MASK 0x80000000L
+#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG1_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL__SHIFT 0x14
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG1_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL_MASK 0x00030000L
+#define OTG1_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL_MASK 0x00300000L
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X_MASK 0x00007FFFL
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y_MASK 0x7FFF0000L
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE_MASK 0x80000000L
+#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0xd
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED__SHIFT 0x10
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT__SHIFT 0x14
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR__SHIFT 0x18
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK__SHIFT 0x1c
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE__SHIFT 0x1d
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00002000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_MASK 0x00010000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MASK 0x00100000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR_MASK 0x01000000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK_MASK 0x10000000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE_MASK 0x20000000L
+#define OTG1_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE__SHIFT 0x0
+#define OTG1_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE__SHIFT 0x10
+#define OTG1_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE_MASK 0x00007FFFL
+#define OTG1_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE_MASK 0x7FFF0000L
+#define OTG1_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT__SHIFT 0x0
+#define OTG1_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT_MASK 0x00007FFFL
+#define OTG1_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X__SHIFT 0x0
+#define OTG1_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X__SHIFT 0x10
+#define OTG1_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG1_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X_MASK 0x7FFF0000L
+#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000003L
+#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+#define OTG1_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE__SHIFT 0x0
+#define OTG1_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE_MASK 0xFFFFFFFFL
+#define OTG1_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO__SHIFT 0x0
+#define OTG1_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO_MASK 0xFFFFFFFFL
+#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG1_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+#define OTG2_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG2_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE__SHIFT 0x0
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL__SHIFT 0x8
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR__SHIFT 0x10
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MASK 0x00000003L
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL_MASK 0x00000100L
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR_MASK 0x00030000L
+#define OTG2_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+#define OTG2_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+#define OTG2_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+#define OTG2_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE__SHIFT 0x8
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE_MASK 0x00000100L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+#define OTG2_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG2_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+#define OTG2_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG2_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+#define OTG2_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG2_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG2_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG2_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG2_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG2_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG2_OTG_CONTROL__OTG_OUT_MUX__SHIFT 0x14
+#define OTG2_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG2_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG2_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG2_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG2_OTG_CONTROL__OTG_OUT_MUX_MASK 0x00300000L
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+#define OTG2_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG2_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG2_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG2_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG2_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG2_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG2_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG2_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG2_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG2_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG2_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG2_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG2_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG2_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG2_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG2_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG2_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG2_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+#define OTG2_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG2_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+#define OTG2_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+#define OTG2_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+#define OTG2_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+#define OTG2_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG2_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+#define OTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG2_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+#define OTG2_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+#define OTG2_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+#define OTG2_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG2_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+#define OTG2_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG2_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS__SHIFT 0x1c
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS_MASK 0x10000000L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG2_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG2_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_EN__SHIFT 0x7
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_MODE__SHIFT 0xa
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG2_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_EN_MASK 0x00000080L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_MODE_MASK 0x00000400L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+#define OTG2_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG2_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+#define OTG2_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG2_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+#define OTG2_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG2_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+#define OTG2_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG2_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+#define OTG2_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG2_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG2_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG2_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+#define OTG2_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG2_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+#define OTG2_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG2_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+#define OTG2_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG2_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG2_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG2_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG2_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG2_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG2_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG2_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X_MASK 0x00007FFFL
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X_MASK 0x7FFF0000L
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y_MASK 0x00007FFFL
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y_MASK 0x7FFF0000L
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE_MASK 0x80000000L
+#define OTG2_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG2_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG2_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG2_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG2_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG2_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL__SHIFT 0x14
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG2_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL_MASK 0x00030000L
+#define OTG2_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL_MASK 0x00300000L
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X_MASK 0x00007FFFL
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y_MASK 0x7FFF0000L
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE_MASK 0x80000000L
+#define OTG2_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG2_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0xd
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED__SHIFT 0x10
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT__SHIFT 0x14
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR__SHIFT 0x18
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK__SHIFT 0x1c
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE__SHIFT 0x1d
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00002000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_MASK 0x00010000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MASK 0x00100000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR_MASK 0x01000000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK_MASK 0x10000000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE_MASK 0x20000000L
+#define OTG2_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE__SHIFT 0x0
+#define OTG2_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE__SHIFT 0x10
+#define OTG2_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE_MASK 0x00007FFFL
+#define OTG2_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE_MASK 0x7FFF0000L
+#define OTG2_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT__SHIFT 0x0
+#define OTG2_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT_MASK 0x00007FFFL
+#define OTG2_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X__SHIFT 0x0
+#define OTG2_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X__SHIFT 0x10
+#define OTG2_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG2_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X_MASK 0x7FFF0000L
+#define OTG2_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG2_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG2_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000003L
+#define OTG2_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+#define OTG2_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE__SHIFT 0x0
+#define OTG2_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE_MASK 0xFFFFFFFFL
+#define OTG2_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO__SHIFT 0x0
+#define OTG2_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO_MASK 0xFFFFFFFFL
+#define OTG2_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG2_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+#define OTG2_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG2_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+#define OTG3_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG3_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE__SHIFT 0x0
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL__SHIFT 0x8
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR__SHIFT 0x10
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MASK 0x00000003L
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL_MASK 0x00000100L
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR_MASK 0x00030000L
+#define OTG3_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+#define OTG3_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+#define OTG3_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+#define OTG3_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE__SHIFT 0x8
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE_MASK 0x00000100L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+#define OTG3_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG3_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+#define OTG3_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG3_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+#define OTG3_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG3_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG3_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG3_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG3_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG3_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG3_OTG_CONTROL__OTG_OUT_MUX__SHIFT 0x14
+#define OTG3_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG3_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG3_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG3_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG3_OTG_CONTROL__OTG_OUT_MUX_MASK 0x00300000L
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+#define OTG3_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG3_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG3_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG3_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG3_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG3_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG3_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG3_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG3_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG3_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG3_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG3_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG3_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG3_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG3_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG3_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG3_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG3_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+#define OTG3_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG3_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+#define OTG3_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+#define OTG3_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+#define OTG3_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+#define OTG3_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG3_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+#define OTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG3_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+#define OTG3_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+#define OTG3_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+#define OTG3_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG3_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+#define OTG3_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG3_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS__SHIFT 0x1c
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS_MASK 0x10000000L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG3_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG3_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_EN__SHIFT 0x7
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_MODE__SHIFT 0xa
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG3_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_EN_MASK 0x00000080L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_MODE_MASK 0x00000400L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+#define OTG3_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG3_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+#define OTG3_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG3_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+#define OTG3_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG3_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+#define OTG3_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG3_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+#define OTG3_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG3_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG3_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG3_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+#define OTG3_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG3_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+#define OTG3_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG3_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+#define OTG3_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG3_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG3_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG3_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG3_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG3_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG3_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG3_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X_MASK 0x00007FFFL
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X_MASK 0x7FFF0000L
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y_MASK 0x00007FFFL
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y_MASK 0x7FFF0000L
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE_MASK 0x80000000L
+#define OTG3_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG3_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG3_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG3_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG3_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG3_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL__SHIFT 0x14
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG3_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL_MASK 0x00030000L
+#define OTG3_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL_MASK 0x00300000L
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X_MASK 0x00007FFFL
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y_MASK 0x7FFF0000L
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE_MASK 0x80000000L
+#define OTG3_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG3_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0xd
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED__SHIFT 0x10
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT__SHIFT 0x14
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR__SHIFT 0x18
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK__SHIFT 0x1c
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE__SHIFT 0x1d
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00002000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_MASK 0x00010000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MASK 0x00100000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR_MASK 0x01000000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK_MASK 0x10000000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE_MASK 0x20000000L
+#define OTG3_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE__SHIFT 0x0
+#define OTG3_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE__SHIFT 0x10
+#define OTG3_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE_MASK 0x00007FFFL
+#define OTG3_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE_MASK 0x7FFF0000L
+#define OTG3_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT__SHIFT 0x0
+#define OTG3_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT_MASK 0x00007FFFL
+#define OTG3_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X__SHIFT 0x0
+#define OTG3_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X__SHIFT 0x10
+#define OTG3_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG3_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X_MASK 0x7FFF0000L
+#define OTG3_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG3_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG3_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000003L
+#define OTG3_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+#define OTG3_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE__SHIFT 0x0
+#define OTG3_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE_MASK 0xFFFFFFFFL
+#define OTG3_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO__SHIFT 0x0
+#define OTG3_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO_MASK 0xFFFFFFFFL
+#define OTG3_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG3_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+#define OTG3_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG3_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL__SHIFT 0x0
+#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL__SHIFT 0x4
+#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL__SHIFT 0x8
+#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL__SHIFT 0x10
+#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL_MASK 0x00000007L
+#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL_MASK 0x00000070L
+#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL_MASK 0x00000700L
+#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL_MASK 0x00070000L
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON__SHIFT 0x1
+#define OPTC_CLOCK_CONTROL__OPTC_TEST_CLK_SEL__SHIFT 0x8
+#define OPTC_CLOCK_CONTROL__OPTC_FGCG_REP_DIS__SHIFT 0xf
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON_MASK 0x00000002L
+#define OPTC_CLOCK_CONTROL__OPTC_TEST_CLK_SEL_MASK 0x00000F00L
+#define OPTC_CLOCK_CONTROL__OPTC_FGCG_REP_DIS_MASK 0x00008000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_FORCE__SHIFT 0x0
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_DIS__SHIFT 0x2
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_FORCE__SHIFT 0x4
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_DIS__SHIFT 0x6
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_FORCE__SHIFT 0x8
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_DIS__SHIFT 0xa
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_FORCE__SHIFT 0xc
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_DIS__SHIFT 0xe
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_FORCE__SHIFT 0x10
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_DIS__SHIFT 0x12
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_FORCE__SHIFT 0x14
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_DIS__SHIFT 0x16
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_FORCE__SHIFT 0x18
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_DIS__SHIFT 0x1a
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_FORCE__SHIFT 0x1c
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_DIS__SHIFT 0x1e
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_FORCE_MASK 0x00000003L
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_DIS_MASK 0x00000004L
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_FORCE_MASK 0x00000030L
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_DIS_MASK 0x00000040L
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_FORCE_MASK 0x00000300L
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_DIS_MASK 0x00000400L
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_FORCE_MASK 0x00003000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_DIS_MASK 0x00004000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_FORCE_MASK 0x00030000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_DIS_MASK 0x00040000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_FORCE_MASK 0x00300000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_DIS_MASK 0x00400000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_FORCE_MASK 0x03000000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_DIS_MASK 0x04000000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_FORCE_MASK 0x30000000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_DIS_MASK 0x40000000L
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_UNASSIGNED_PWR_MODE__SHIFT 0x0
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_VBLANK_PWR_MODE__SHIFT 0x2
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_UNASSIGNED_PWR_MODE_MASK 0x00000003L
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_VBLANK_PWR_MODE_MASK 0x0000000CL
+#define ODM_MEM_PWR_STATUS__ODM_MEM0_PWR_STATE__SHIFT 0x0
+#define ODM_MEM_PWR_STATUS__ODM_MEM1_PWR_STATE__SHIFT 0x2
+#define ODM_MEM_PWR_STATUS__ODM_MEM2_PWR_STATE__SHIFT 0x4
+#define ODM_MEM_PWR_STATUS__ODM_MEM3_PWR_STATE__SHIFT 0x6
+#define ODM_MEM_PWR_STATUS__ODM_MEM4_PWR_STATE__SHIFT 0x8
+#define ODM_MEM_PWR_STATUS__ODM_MEM5_PWR_STATE__SHIFT 0xa
+#define ODM_MEM_PWR_STATUS__ODM_MEM6_PWR_STATE__SHIFT 0xc
+#define ODM_MEM_PWR_STATUS__ODM_MEM7_PWR_STATE__SHIFT 0xe
+#define ODM_MEM_PWR_STATUS__ODM_MEM0_PWR_STATE_MASK 0x00000003L
+#define ODM_MEM_PWR_STATUS__ODM_MEM1_PWR_STATE_MASK 0x0000000CL
+#define ODM_MEM_PWR_STATUS__ODM_MEM2_PWR_STATE_MASK 0x00000030L
+#define ODM_MEM_PWR_STATUS__ODM_MEM3_PWR_STATE_MASK 0x000000C0L
+#define ODM_MEM_PWR_STATUS__ODM_MEM4_PWR_STATE_MASK 0x00000300L
+#define ODM_MEM_PWR_STATUS__ODM_MEM5_PWR_STATE_MASK 0x00000C00L
+#define ODM_MEM_PWR_STATUS__ODM_MEM6_PWR_STATE_MASK 0x00003000L
+#define ODM_MEM_PWR_STATUS__ODM_MEM7_PWR_STATE_MASK 0x0000C000L
+#define OPTC_MISC_SPARE_REGISTER__OPTC_MISC_SPARE_REG__SHIFT 0x0
+#define OPTC_MISC_SPARE_REGISTER__OPTC_MISC_SPARE_REG_MASK 0x000000FFL
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON15_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON15_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON15_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON15_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON15_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DC_I2C_CONTROL__DC_I2C_GO__SHIFT 0x0
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET__SHIFT 0x1
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET__SHIFT 0x2
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET__SHIFT 0x3
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT__SHIFT 0x8
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT__SHIFT 0x14
+#define DC_I2C_CONTROL__DC_I2C_GO_MASK 0x00000001L
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET_MASK 0x00000002L
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET_MASK 0x00000004L
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET_MASK 0x00000008L
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT_MASK 0x00000700L
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT_MASK 0x00300000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY__SHIFT 0x0
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO__SHIFT 0x4
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER__SHIFT 0x8
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER__SHIFT 0xc
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ__SHIFT 0x14
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG__SHIFT 0x15
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ__SHIFT 0x18
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG__SHIFT 0x19
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_MASK 0x00000003L
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO_MASK 0x00000010L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER_MASK 0x00000100L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER_MASK 0x00001000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ_MASK 0x00100000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG_MASK 0x00200000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ_MASK 0x01000000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG_MASK 0x02000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT__SHIFT 0x0
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK__SHIFT 0x1
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK__SHIFT 0x2
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT__SHIFT 0x4
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK__SHIFT 0x5
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK__SHIFT 0x6
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT__SHIFT 0x8
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK__SHIFT 0x9
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK__SHIFT 0xa
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT__SHIFT 0xc
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK__SHIFT 0xd
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK__SHIFT 0xe
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT__SHIFT 0x10
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK__SHIFT 0x11
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK__SHIFT 0x12
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT__SHIFT 0x14
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK__SHIFT 0x15
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK__SHIFT 0x16
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT__SHIFT 0x18
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK__SHIFT 0x19
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK__SHIFT 0x1a
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT__SHIFT 0x1b
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK__SHIFT 0x1c
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK__SHIFT 0x1d
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT_MASK 0x00000001L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK_MASK 0x00000002L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK_MASK 0x00000004L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT_MASK 0x00000010L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK_MASK 0x00000020L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK_MASK 0x00000040L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT_MASK 0x00000100L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK_MASK 0x00000200L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK_MASK 0x00000400L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT_MASK 0x00001000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK_MASK 0x00002000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK_MASK 0x00004000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT_MASK 0x00010000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK_MASK 0x00020000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK_MASK 0x00040000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT_MASK 0x00100000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK_MASK 0x00200000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK_MASK 0x00400000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT_MASK 0x01000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK_MASK 0x02000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK_MASK 0x04000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT_MASK 0x08000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK_MASK 0x10000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK_MASK 0x20000000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS__SHIFT 0x0
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE__SHIFT 0x2
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED__SHIFT 0x4
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT__SHIFT 0x5
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED__SHIFT 0x6
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW__SHIFT 0x7
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK__SHIFT 0x8
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0__SHIFT 0xc
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1__SHIFT 0xd
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2__SHIFT 0xe
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3__SHIFT 0xf
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ__SHIFT 0x12
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS_MASK 0x00000003L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK 0x00000004L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK 0x00000010L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK 0x00000020L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED_MASK 0x00000040L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW_MASK 0x00000080L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK 0x00000100L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0_MASK 0x00001000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1_MASK 0x00002000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2_MASK 0x00004000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3_MASK 0x00008000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ_MASK 0x00040000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE_MASK 0xFFFF0000L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT_MASK 0xFF000000L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE_MASK 0xFFFF0000L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT_MASK 0xFF000000L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE_MASK 0xFFFF0000L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT_MASK 0xFF000000L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE_MASK 0xFFFF0000L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT_MASK 0xFF000000L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE_MASK 0xFFFF0000L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT_MASK 0xFF000000L
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0__SHIFT 0x0
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0__SHIFT 0x8
+#define DC_I2C_TRANSACTION0__DC_I2C_START0__SHIFT 0xc
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0__SHIFT 0xd
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0__SHIFT 0x10
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0_MASK 0x00000001L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0_MASK 0x00000100L
+#define DC_I2C_TRANSACTION0__DC_I2C_START0_MASK 0x00001000L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0_MASK 0x00002000L
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0_MASK 0x03FF0000L
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1__SHIFT 0x0
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1__SHIFT 0x8
+#define DC_I2C_TRANSACTION1__DC_I2C_START1__SHIFT 0xc
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1__SHIFT 0xd
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1__SHIFT 0x10
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1_MASK 0x00000001L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1_MASK 0x00000100L
+#define DC_I2C_TRANSACTION1__DC_I2C_START1_MASK 0x00001000L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1_MASK 0x00002000L
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1_MASK 0x03FF0000L
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2__SHIFT 0x0
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2__SHIFT 0x8
+#define DC_I2C_TRANSACTION2__DC_I2C_START2__SHIFT 0xc
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2__SHIFT 0xd
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2__SHIFT 0x10
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2_MASK 0x00000001L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2_MASK 0x00000100L
+#define DC_I2C_TRANSACTION2__DC_I2C_START2_MASK 0x00001000L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2_MASK 0x00002000L
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2_MASK 0x03FF0000L
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3__SHIFT 0x0
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3__SHIFT 0x8
+#define DC_I2C_TRANSACTION3__DC_I2C_START3__SHIFT 0xc
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3__SHIFT 0xd
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3__SHIFT 0x10
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3_MASK 0x00000001L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3_MASK 0x00000100L
+#define DC_I2C_TRANSACTION3__DC_I2C_START3_MASK 0x00001000L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3_MASK 0x00002000L
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3_MASK 0x03FF0000L
+#define DC_I2C_DATA__DC_I2C_DATA_RW__SHIFT 0x0
+#define DC_I2C_DATA__DC_I2C_DATA__SHIFT 0x8
+#define DC_I2C_DATA__DC_I2C_INDEX__SHIFT 0x10
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE__SHIFT 0x1f
+#define DC_I2C_DATA__DC_I2C_DATA_RW_MASK 0x00000001L
+#define DC_I2C_DATA__DC_I2C_DATA_MASK 0x0000FF00L
+#define DC_I2C_DATA__DC_I2C_INDEX_MASK 0x03FF0000L
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE_MASK 0x80000000L
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME__SHIFT 0x0
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID__SHIFT 0x14
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET__SHIFT 0x1c
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME_MASK 0x0000FFFFL
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID_MASK 0x00F00000L
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET_MASK 0x10000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_OCCURRED__SHIFT 0x0
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_INT__SHIFT 0x1
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_ACK__SHIFT 0x2
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_MASK__SHIFT 0x3
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_OCCURRED__SHIFT 0x4
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_INT__SHIFT 0x5
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_ACK__SHIFT 0x6
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_MASK__SHIFT 0x7
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_OCCURRED__SHIFT 0x8
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_INT__SHIFT 0x9
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_ACK__SHIFT 0xa
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_MASK__SHIFT 0xb
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_OCCURRED__SHIFT 0xc
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_INT__SHIFT 0xd
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_ACK__SHIFT 0xe
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_MASK__SHIFT 0xf
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_OCCURRED__SHIFT 0x10
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_INT__SHIFT 0x11
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_ACK__SHIFT 0x12
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_MASK__SHIFT 0x13
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_OCCURRED__SHIFT 0x14
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_INT__SHIFT 0x15
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_ACK__SHIFT 0x16
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_MASK__SHIFT 0x17
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_OCCURRED__SHIFT 0x18
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_INT__SHIFT 0x19
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_ACK__SHIFT 0x1a
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_MASK__SHIFT 0x1b
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_ACK_ENABLE__SHIFT 0x1e
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_INT_TYPE__SHIFT 0x1f
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_OCCURRED_MASK 0x00000001L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_INT_MASK 0x00000002L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_ACK_MASK 0x00000004L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_MASK_MASK 0x00000008L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_OCCURRED_MASK 0x00000010L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_INT_MASK 0x00000020L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_ACK_MASK 0x00000040L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_MASK_MASK 0x00000080L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_OCCURRED_MASK 0x00000100L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_INT_MASK 0x00000200L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_ACK_MASK 0x00000400L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_MASK_MASK 0x00000800L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_OCCURRED_MASK 0x00001000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_INT_MASK 0x00002000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_ACK_MASK 0x00004000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_MASK_MASK 0x00008000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_OCCURRED_MASK 0x00010000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_INT_MASK 0x00020000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_ACK_MASK 0x00040000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_MASK_MASK 0x00080000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_OCCURRED_MASK 0x00100000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_INT_MASK 0x00200000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_ACK_MASK 0x00400000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_MASK_MASK 0x00800000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_OCCURRED_MASK 0x01000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_INT_MASK 0x02000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_ACK_MASK 0x04000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_MASK_MASK 0x08000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_ACK_ENABLE_MASK 0x40000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_INT_TYPE_MASK 0x80000000L
+#define DIO_SCRATCH0__DIO_SCRATCH0__SHIFT 0x0
+#define DIO_SCRATCH0__DIO_SCRATCH0_MASK 0xFFFFFFFFL
+#define DIO_SCRATCH1__DIO_SCRATCH1__SHIFT 0x0
+#define DIO_SCRATCH1__DIO_SCRATCH1_MASK 0xFFFFFFFFL
+#define DIO_SCRATCH2__DIO_SCRATCH2__SHIFT 0x0
+#define DIO_SCRATCH2__DIO_SCRATCH2_MASK 0xFFFFFFFFL
+#define DIO_SCRATCH3__DIO_SCRATCH3__SHIFT 0x0
+#define DIO_SCRATCH3__DIO_SCRATCH3_MASK 0xFFFFFFFFL
+#define DIO_SCRATCH4__DIO_SCRATCH4__SHIFT 0x0
+#define DIO_SCRATCH4__DIO_SCRATCH4_MASK 0xFFFFFFFFL
+#define DIO_SCRATCH5__DIO_SCRATCH5__SHIFT 0x0
+#define DIO_SCRATCH5__DIO_SCRATCH5_MASK 0xFFFFFFFFL
+#define DIO_SCRATCH6__DIO_SCRATCH6__SHIFT 0x0
+#define DIO_SCRATCH6__DIO_SCRATCH6_MASK 0xFFFFFFFFL
+#define DIO_SCRATCH7__DIO_SCRATCH7__SHIFT 0x0
+#define DIO_SCRATCH7__DIO_SCRATCH7_MASK 0xFFFFFFFFL
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGA_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x0
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGB_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x1
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGC_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGD_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x3
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGE_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x4
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGF_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x5
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGG_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x6
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGA_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000001L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGB_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000002L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGC_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGD_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000008L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGE_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000010L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGF_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000020L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGG_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000040L
+#define DIO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE__SHIFT 0x0
+#define DIO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE__SHIFT 0x3
+#define DIO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE__SHIFT 0x4
+#define DIO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE__SHIFT 0x5
+#define DIO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE__SHIFT 0x6
+#define DIO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE__SHIFT 0x7
+#define DIO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE__SHIFT 0x8
+#define DIO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE__SHIFT 0x9
+#define DIO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE_MASK 0x00000001L
+#define DIO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE_MASK 0x00000008L
+#define DIO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE_MASK 0x00000010L
+#define DIO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE_MASK 0x00000020L
+#define DIO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE_MASK 0x00000040L
+#define DIO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE_MASK 0x00000080L
+#define DIO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE_MASK 0x00000100L
+#define DIO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE_MASK 0x00000200L
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE__SHIFT 0x0
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS__SHIFT 0x1
+#define DIO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS__SHIFT 0x4
+#define DIO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS__SHIFT 0x5
+#define DIO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS__SHIFT 0x6
+#define DIO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS__SHIFT 0x7
+#define DIO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS__SHIFT 0x8
+#define DIO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS__SHIFT 0x9
+#define DIO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS__SHIFT 0xa
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE_MASK 0x00000001L
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS_MASK 0x00000002L
+#define DIO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS_MASK 0x00000010L
+#define DIO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS_MASK 0x00000020L
+#define DIO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS_MASK 0x00000040L
+#define DIO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS_MASK 0x00000080L
+#define DIO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS_MASK 0x00000100L
+#define DIO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS_MASK 0x00000200L
+#define DIO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS_MASK 0x00000400L
+#define DIO_MEM_PWR_CTRL2__DPA_LIGHT_SLEEP_FORCE__SHIFT 0x18
+#define DIO_MEM_PWR_CTRL2__DPB_LIGHT_SLEEP_FORCE__SHIFT 0x19
+#define DIO_MEM_PWR_CTRL2__DPC_LIGHT_SLEEP_FORCE__SHIFT 0x1a
+#define DIO_MEM_PWR_CTRL2__DPD_LIGHT_SLEEP_FORCE__SHIFT 0x1b
+#define DIO_MEM_PWR_CTRL2__DPE_LIGHT_SLEEP_FORCE__SHIFT 0x1c
+#define DIO_MEM_PWR_CTRL2__DPF_LIGHT_SLEEP_FORCE__SHIFT 0x1d
+#define DIO_MEM_PWR_CTRL2__DPG_LIGHT_SLEEP_FORCE__SHIFT 0x1e
+#define DIO_MEM_PWR_CTRL2__DPA_LIGHT_SLEEP_FORCE_MASK 0x01000000L
+#define DIO_MEM_PWR_CTRL2__DPB_LIGHT_SLEEP_FORCE_MASK 0x02000000L
+#define DIO_MEM_PWR_CTRL2__DPC_LIGHT_SLEEP_FORCE_MASK 0x04000000L
+#define DIO_MEM_PWR_CTRL2__DPD_LIGHT_SLEEP_FORCE_MASK 0x08000000L
+#define DIO_MEM_PWR_CTRL2__DPE_LIGHT_SLEEP_FORCE_MASK 0x10000000L
+#define DIO_MEM_PWR_CTRL2__DPF_LIGHT_SLEEP_FORCE_MASK 0x20000000L
+#define DIO_MEM_PWR_CTRL2__DPG_LIGHT_SLEEP_FORCE_MASK 0x40000000L
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET__SHIFT 0x0
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF__SHIFT 0x8
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET_MASK 0x00000001L
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF_MASK 0x00000100L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE__SHIFT 0x0
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE__SHIFT 0x4
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS__SHIFT 0x8
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK__SHIFT 0xc
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL__SHIFT 0x10
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE_MASK 0x00000001L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE_MASK 0x00000010L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS_MASK 0x00000100L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK_MASK 0x00001000L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL_MASK 0x0FFF0000L
+#define DIO_LINKA_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKA_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKA_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKA_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKA_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKA_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+#define DIO_LINKB_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKB_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKB_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKB_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKB_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKB_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+#define DIO_LINKC_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKC_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKC_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKC_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKC_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKC_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+#define DIO_LINKD_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKD_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKD_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKD_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKD_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKD_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+#define DIO_LINKE_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKE_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKE_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKE_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKE_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKE_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+#define DIO_LINKF_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKF_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKF_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKF_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKF_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKF_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD1_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD1_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+#define HPD2_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD2_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD2_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD2_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD2_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD2_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+#define HPD3_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD3_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD3_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD3_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD3_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD3_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+#define HPD4_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD4_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD4_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD4_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD4_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD4_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON16_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON16_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON16_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON16_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON16_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DP_AUX0_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX0_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX0_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX0_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX0_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX0_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX0_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+#define DP_AUX1_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX1_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX1_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX1_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX1_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX1_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX1_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+#define DP_AUX2_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX2_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX2_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX2_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX2_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX2_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX2_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX2_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX2_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX2_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX2_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX2_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX2_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX2_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX2_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX2_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX2_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX2_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX2_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX2_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX2_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX2_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX2_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX2_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX2_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX2_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX2_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX2_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX2_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+#define DP_AUX3_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX3_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX3_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX3_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX3_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX3_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX3_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX3_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX3_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX3_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX3_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX3_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX3_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX3_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX3_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX3_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX3_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX3_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX3_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX3_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX3_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX3_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX3_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX3_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX3_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX3_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX3_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX3_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX3_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+#define DP_AUX4_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX4_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX4_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX4_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX4_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX4_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX4_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX4_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX4_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX4_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX4_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX4_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX4_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX4_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX4_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX4_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX4_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX4_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX4_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX4_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX4_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX4_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX4_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX4_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX4_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX4_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX4_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX4_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX4_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+#define VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+#define VPG0_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG0_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG0_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG0_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG0_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG0_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+#define VPG0_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG0_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT0_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT0_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT0_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT0_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+#define AFMT0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+#define AFMT0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+#define AFMT0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+#define AFMT0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+#define AFMT0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+#define AFMT0_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT0_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define AFMT0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+#define DME0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME0_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME0_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME0_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME0_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME0_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME0_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME0_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME0_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME0_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME0_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME0_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME0_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME0_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME0_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME0_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME0_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG0_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG0_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG0_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG0_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG0_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG0_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+
+//DIG0_DIG_BE_CLK_CNTL
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET__SHIFT 0x6
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON__SHIFT 0xc
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET_MASK 0x00000040L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON_MASK 0x00001000L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+
+#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG0_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG0_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG0_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG0_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP0_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP0_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+#define DP0_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP0_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP0_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP0_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+#define DP0_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP0_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP0_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP0_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP0_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP0_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP0_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP0_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+#define DP0_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP0_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP0_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP0_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP0_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP0_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP0_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP0_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP0_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+#define DP0_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP0_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+#define DP0_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP0_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP0_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP0_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP0_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP0_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP0_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP0_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+#define DP0_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP0_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP0_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP0_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP0_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+#define DP0_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP0_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP0_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP0_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP0_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+#define DP0_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP0_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP0_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+#define DP0_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP0_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+#define DP0_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP0_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+#define DP0_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP0_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+#define DP0_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP0_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP0_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP0_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP0_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP0_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP0_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP0_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP0_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP0_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+#define DP0_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP0_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+#define DP0_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP0_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+#define DP0_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP0_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+#define DP0_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP0_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+#define DP0_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP0_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP0_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+#define VPG1_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG1_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+#define VPG1_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG1_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG1_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG1_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG1_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG1_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+#define VPG1_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG1_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT1_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT1_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT1_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT1_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+#define AFMT1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+#define AFMT1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+#define AFMT1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+#define AFMT1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+#define AFMT1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+#define AFMT1_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT1_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define AFMT1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+#define DME1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME1_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME1_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME1_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME1_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME1_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME1_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME1_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME1_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME1_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME1_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME1_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME1_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME1_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME1_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME1_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME1_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG1_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG1_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG1_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG1_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG1_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG1_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG1_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG1_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG1_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG1_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP1_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP1_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+#define DP1_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP1_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP1_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP1_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+#define DP1_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP1_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP1_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP1_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP1_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP1_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP1_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP1_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+#define DP1_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP1_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP1_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP1_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP1_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP1_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP1_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP1_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP1_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+#define DP1_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP1_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+#define DP1_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP1_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP1_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP1_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP1_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP1_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP1_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP1_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+#define DP1_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP1_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP1_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP1_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP1_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+#define DP1_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP1_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP1_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP1_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP1_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+#define DP1_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP1_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP1_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+#define DP1_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP1_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+#define DP1_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP1_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+#define DP1_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP1_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+#define DP1_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP1_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP1_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP1_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP1_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP1_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP1_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP1_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP1_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP1_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+#define DP1_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP1_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+#define DP1_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP1_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+#define DP1_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP1_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+#define DP1_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP1_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+#define DP1_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP1_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP1_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+#define VPG2_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG2_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+#define VPG2_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG2_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG2_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG2_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG2_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG2_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+#define VPG2_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG2_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT2_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT2_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT2_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT2_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+#define AFMT2_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT2_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT2_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT2_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+#define AFMT2_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT2_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT2_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT2_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+#define AFMT2_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT2_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+#define AFMT2_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT2_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+#define AFMT2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+#define AFMT2_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT2_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT2_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT2_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT2_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT2_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT2_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT2_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define AFMT2_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+#define DME2_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME2_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME2_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME2_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME2_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME2_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME2_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME2_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME2_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME2_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME2_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME2_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME2_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME2_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME2_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME2_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME2_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME2_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define DIG2_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG2_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG2_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG2_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG2_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG2_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+#define DIG2_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG2_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+#define DIG2_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG2_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG2_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG2_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG2_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG2_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+#define DIG2_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG2_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG2_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG2_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG2_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG2_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG2_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG2_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG2_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG2_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG2_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG2_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+#define DIG2_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG2_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG2_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG2_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG2_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG2_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG2_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG2_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+#define DIG2_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG2_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG2_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG2_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+#define DIG2_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG2_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+#define DIG2_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG2_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+#define DIG2_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG2_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+#define DIG2_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG2_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+#define DIG2_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG2_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+#define DIG2_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG2_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+#define DIG2_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG2_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+#define DIG2_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG2_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+#define DIG2_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG2_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG2_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG2_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG2_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG2_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG2_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG2_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG2_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG2_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+#define DIG2_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG2_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+#define DIG2_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG2_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG2_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG2_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+#define DP2_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP2_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP2_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP2_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP2_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP2_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+#define DP2_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP2_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+#define DP2_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP2_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+#define DP2_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP2_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP2_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP2_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+#define DP2_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP2_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP2_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP2_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP2_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP2_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP2_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP2_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+#define DP2_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP2_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP2_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP2_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+#define DP2_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP2_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP2_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP2_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP2_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP2_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP2_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP2_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP2_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP2_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+#define DP2_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP2_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+#define DP2_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP2_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+#define DP2_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP2_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP2_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP2_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+#define DP2_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP2_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP2_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP2_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP2_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP2_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP2_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP2_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP2_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP2_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+#define DP2_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP2_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+#define DP2_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP2_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP2_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP2_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP2_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP2_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+#define DP2_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP2_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP2_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP2_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP2_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP2_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+#define DP2_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP2_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP2_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP2_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+#define DP2_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP2_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP2_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP2_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+#define DP2_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP2_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP2_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP2_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP2_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP2_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+#define DP2_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP2_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+#define DP2_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP2_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP2_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP2_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP2_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP2_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP2_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP2_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP2_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP2_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+#define DP2_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP2_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP2_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP2_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP2_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP2_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP2_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP2_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+#define DP2_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP2_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+#define DP2_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP2_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+#define DP2_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP2_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+#define DP2_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP2_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+#define DP2_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP2_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+#define DP2_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP2_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+#define DP2_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP2_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+#define DP2_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP2_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+#define DP2_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP2_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+#define DP2_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP2_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+#define DP2_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP2_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP2_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP2_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP2_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP2_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP2_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP2_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP2_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP2_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+#define DP2_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP2_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+#define DP2_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP2_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+#define DP2_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP2_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+#define DP2_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP2_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+#define DP2_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP2_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP2_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+#define VPG3_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG3_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+#define VPG3_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG3_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG3_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG3_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG3_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG3_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+#define VPG3_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG3_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT3_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT3_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT3_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT3_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+#define AFMT3_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT3_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT3_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT3_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+#define AFMT3_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT3_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT3_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT3_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+#define AFMT3_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT3_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+#define AFMT3_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT3_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+#define AFMT3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+#define AFMT3_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT3_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT3_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT3_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT3_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT3_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT3_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT3_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define AFMT3_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+#define DME3_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME3_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME3_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME3_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME3_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME3_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME3_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME3_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME3_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME3_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME3_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME3_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME3_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME3_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME3_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME3_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME3_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME3_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define DIG3_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG3_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG3_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG3_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG3_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG3_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+#define DIG3_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG3_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+#define DIG3_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG3_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG3_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG3_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG3_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG3_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+#define DIG3_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG3_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG3_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG3_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG3_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG3_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG3_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG3_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG3_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG3_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG3_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG3_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+#define DIG3_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG3_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG3_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG3_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG3_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG3_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG3_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG3_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+#define DIG3_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG3_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG3_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG3_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+#define DIG3_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG3_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+#define DIG3_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG3_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+#define DIG3_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG3_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+#define DIG3_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG3_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+#define DIG3_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG3_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+#define DIG3_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG3_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+#define DIG3_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG3_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+#define DIG3_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG3_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+#define DIG3_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG3_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG3_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG3_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG3_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG3_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG3_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG3_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG3_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG3_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+#define DIG3_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG3_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+#define DIG3_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG3_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG3_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG3_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+#define DP3_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP3_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP3_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP3_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP3_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP3_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+#define DP3_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP3_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+#define DP3_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP3_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+#define DP3_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP3_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP3_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP3_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+#define DP3_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP3_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP3_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP3_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP3_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP3_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP3_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP3_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+#define DP3_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP3_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP3_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP3_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+#define DP3_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP3_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP3_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP3_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP3_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP3_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP3_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP3_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP3_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP3_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+#define DP3_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP3_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+#define DP3_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP3_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+#define DP3_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP3_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP3_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP3_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+#define DP3_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP3_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP3_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP3_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP3_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP3_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP3_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP3_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP3_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP3_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+#define DP3_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP3_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+#define DP3_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP3_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP3_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP3_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP3_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP3_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+#define DP3_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP3_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP3_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP3_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP3_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP3_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+#define DP3_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP3_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP3_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP3_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+#define DP3_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP3_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP3_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP3_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+#define DP3_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP3_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP3_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP3_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP3_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP3_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+#define DP3_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP3_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+#define DP3_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP3_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP3_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP3_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP3_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP3_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP3_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP3_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP3_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP3_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+#define DP3_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP3_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP3_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP3_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP3_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP3_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP3_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP3_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+#define DP3_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP3_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+#define DP3_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP3_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+#define DP3_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP3_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+#define DP3_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP3_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+#define DP3_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP3_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+#define DP3_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP3_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+#define DP3_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP3_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+#define DP3_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP3_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+#define DP3_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP3_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+#define DP3_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP3_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+#define DP3_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP3_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP3_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP3_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP3_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP3_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP3_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP3_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP3_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP3_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+#define DP3_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP3_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+#define DP3_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP3_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+#define DP3_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP3_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+#define DP3_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP3_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+#define DP3_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP3_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP3_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+#define VPG4_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG4_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+#define VPG4_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG4_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG4_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG4_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG4_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG4_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+#define VPG4_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG4_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT4_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT4_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT4_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT4_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+#define AFMT4_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT4_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT4_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT4_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+#define AFMT4_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT4_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT4_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT4_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+#define AFMT4_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT4_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+#define AFMT4_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT4_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+#define AFMT4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+#define AFMT4_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT4_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT4_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT4_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT4_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT4_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT4_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT4_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define AFMT4_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+#define DME4_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME4_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME4_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME4_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME4_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME4_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME4_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME4_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME4_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME4_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME4_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME4_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME4_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME4_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME4_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME4_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME4_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME4_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define DIG4_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG4_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG4_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG4_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG4_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG4_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+#define DIG4_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG4_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+#define DIG4_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG4_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG4_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG4_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG4_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG4_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+#define DIG4_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG4_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG4_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG4_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG4_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG4_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG4_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG4_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG4_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG4_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG4_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG4_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+#define DIG4_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG4_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG4_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG4_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG4_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG4_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG4_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG4_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+#define DIG4_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG4_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG4_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG4_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+#define DIG4_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG4_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+#define DIG4_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG4_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+#define DIG4_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG4_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+#define DIG4_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG4_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+#define DIG4_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG4_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+#define DIG4_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG4_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+#define DIG4_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG4_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+#define DIG4_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG4_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+#define DIG4_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG4_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG4_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG4_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG4_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG4_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG4_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG4_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG4_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG4_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+#define DIG4_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG4_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+#define DIG4_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG4_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG4_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG4_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+#define DP4_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP4_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP4_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP4_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP4_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP4_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+#define DP4_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP4_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+#define DP4_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP4_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+#define DP4_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP4_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP4_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP4_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+#define DP4_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP4_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP4_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP4_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP4_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP4_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP4_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP4_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+#define DP4_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP4_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP4_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP4_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+#define DP4_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP4_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP4_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP4_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP4_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP4_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP4_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP4_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP4_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP4_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+#define DP4_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP4_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+#define DP4_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP4_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+#define DP4_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP4_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP4_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP4_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+#define DP4_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP4_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP4_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP4_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP4_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP4_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP4_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP4_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP4_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP4_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+#define DP4_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP4_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+#define DP4_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP4_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP4_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP4_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP4_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP4_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+#define DP4_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP4_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP4_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP4_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP4_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP4_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+#define DP4_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP4_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP4_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP4_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+#define DP4_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP4_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP4_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP4_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+#define DP4_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP4_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP4_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP4_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP4_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP4_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+#define DP4_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP4_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+#define DP4_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP4_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP4_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP4_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP4_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP4_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP4_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP4_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP4_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP4_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+#define DP4_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP4_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP4_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP4_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP4_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP4_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+#define DP4_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP4_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+#define DP4_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP4_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+#define DP4_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP4_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+#define DP4_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP4_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+#define DP4_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP4_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+#define DP4_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP4_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+#define DP4_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP4_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+#define DP4_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP4_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+#define DP4_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP4_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+#define DP4_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP4_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+#define DP4_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP4_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+#define DP4_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP4_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP4_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP4_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP4_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP4_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP4_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP4_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP4_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP4_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+#define DP4_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP4_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+#define DP4_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP4_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+#define DP4_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP4_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+#define DP4_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP4_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+#define DP4_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP4_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP4_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+#define DC_GENERICA__GENERICA_EN__SHIFT 0x0
+#define DC_GENERICA__GENERICA_SEL__SHIFT 0x7
+#define DC_GENERICA__GENERICA_EN_MASK 0x00000001L
+#define DC_GENERICA__GENERICA_SEL_MASK 0x00000F80L
+#define DC_GENERICB__GENERICB_EN__SHIFT 0x0
+#define DC_GENERICB__GENERICB_SEL__SHIFT 0x8
+#define DC_GENERICB__GENERICB_EN_MASK 0x00000001L
+#define DC_GENERICB__GENERICB_SEL_MASK 0x00000F00L
+#define DCIO_CLOCK_CNTL__DCIO_TEST_CLK_SEL__SHIFT 0x0
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS__SHIFT 0x5
+#define DCIO_CLOCK_CNTL__DCIO_TEST_CLK_SEL_MASK 0x0000001FL
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS_MASK 0x00000020L
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL__SHIFT 0x8
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL_MASK 0x00000300L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define DCIO_WRCMD_DELAY__UNIPHY_DELAY__SHIFT 0x18
+#define DCIO_WRCMD_DELAY__UNIPHY_DELAY_MASK 0xFF000000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD__SHIFT 0xd
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS__SHIFT 0x10
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD_MASK 0x00002000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000C000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS_MASK 0x00010000L
+#define DCIO_SPARE__DCIO_SPARE__SHIFT 0x0
+#define DCIO_SPARE__DCIO_SPARE_MASK 0xFFFFFFFFL
+#define INTERCEPT_STATE__PWRSEQ0_INTERCEPTB_STATE__SHIFT 0x0
+#define INTERCEPT_STATE__PWRSEQ1_INTERCEPTB_STATE__SHIFT 0x1
+#define INTERCEPT_STATE__DPCS0_INTERCEPTB_STATE__SHIFT 0x4
+#define INTERCEPT_STATE__DPCS1_INTERCEPTB_STATE__SHIFT 0x5
+#define INTERCEPT_STATE__DPCS2_INTERCEPTB_STATE__SHIFT 0x6
+#define INTERCEPT_STATE__DPCS3_INTERCEPTB_STATE__SHIFT 0x7
+#define INTERCEPT_STATE__DPCS4_INTERCEPTB_STATE__SHIFT 0x8
+#define INTERCEPT_STATE__DPCS5_INTERCEPTB_STATE__SHIFT 0x9
+#define INTERCEPT_STATE__DPCS6_INTERCEPTB_STATE__SHIFT 0xa
+#define INTERCEPT_STATE__PWRSEQ0_INTERCEPTB_STATE_MASK 0x00000001L
+#define INTERCEPT_STATE__PWRSEQ1_INTERCEPTB_STATE_MASK 0x00000002L
+#define INTERCEPT_STATE__DPCS0_INTERCEPTB_STATE_MASK 0x00000010L
+#define INTERCEPT_STATE__DPCS1_INTERCEPTB_STATE_MASK 0x00000020L
+#define INTERCEPT_STATE__DPCS2_INTERCEPTB_STATE_MASK 0x00000040L
+#define INTERCEPT_STATE__DPCS3_INTERCEPTB_STATE_MASK 0x00000080L
+#define INTERCEPT_STATE__DPCS4_INTERCEPTB_STATE_MASK 0x00000100L
+#define INTERCEPT_STATE__DPCS5_INTERCEPTB_STATE_MASK 0x00000200L
+#define INTERCEPT_STATE__DPCS6_INTERCEPTB_STATE_MASK 0x00000400L
+#define DCIO_PATTERN_GEN_PAT__DCIO_PATTERN_GEN_PAT__SHIFT 0x0
+#define DCIO_PATTERN_GEN_PAT__DCIO_PATTERN_GEN_PAT_MASK 0xFFFFFFFFL
+#define DCIO_PATTERN_GEN_EN__DCIO_PATTERN_GEN_EN__SHIFT 0x0
+#define DCIO_PATTERN_GEN_EN__DCIO_PATTERN_GEN_EN_MASK 0x00000001L
+#define DCIO_BL_PWM_FRAME_START_DISP_SEL__BL_PWM0_GRP1_FRAME_START_DISP_SEL__SHIFT 0x0
+#define DCIO_BL_PWM_FRAME_START_DISP_SEL__BL_PWM1_GRP1_FRAME_START_DISP_SEL__SHIFT 0x4
+#define DCIO_BL_PWM_FRAME_START_DISP_SEL__BL_PWM0_GRP1_FRAME_START_DISP_SEL_MASK 0x00000007L
+#define DCIO_BL_PWM_FRAME_START_DISP_SEL__BL_PWM1_GRP1_FRAME_START_DISP_SEL_MASK 0x00000070L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_READY_SEL__SHIFT 0x4
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK__SHIFT 0x8
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_READY_SEL__SHIFT 0x14
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK__SHIFT 0x18
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_READY_SEL_MASK 0x00000030L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK_MASK 0x00000300L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_READY_SEL_MASK 0x00300000L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK_MASK 0x03000000L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_READY_SEL__SHIFT 0x4
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK__SHIFT 0x8
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_READY_SEL__SHIFT 0x14
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK__SHIFT 0x18
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_READY_SEL_MASK 0x00000030L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK_MASK 0x00000300L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_READY_SEL_MASK 0x00300000L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK_MASK 0x03000000L
+#define DCIO_SOFT_RESET__UNIPHYA_SOFT_RESET__SHIFT 0x0
+#define DCIO_SOFT_RESET__UNIPHYB_SOFT_RESET__SHIFT 0x1
+#define DCIO_SOFT_RESET__UNIPHYC_SOFT_RESET__SHIFT 0x2
+#define DCIO_SOFT_RESET__UNIPHYD_SOFT_RESET__SHIFT 0x3
+#define DCIO_SOFT_RESET__UNIPHYE_SOFT_RESET__SHIFT 0x4
+#define DCIO_SOFT_RESET__UNIPHYF_SOFT_RESET__SHIFT 0x5
+#define DCIO_SOFT_RESET__UNIPHYG_SOFT_RESET__SHIFT 0x6
+#define DCIO_SOFT_RESET__DSYNCA_SOFT_RESET__SHIFT 0x8
+#define DCIO_SOFT_RESET__DSYNCB_SOFT_RESET__SHIFT 0x9
+#define DCIO_SOFT_RESET__DSYNCC_SOFT_RESET__SHIFT 0xa
+#define DCIO_SOFT_RESET__DSYNCD_SOFT_RESET__SHIFT 0xb
+#define DCIO_SOFT_RESET__DSYNCE_SOFT_RESET__SHIFT 0xc
+#define DCIO_SOFT_RESET__DSYNCF_SOFT_RESET__SHIFT 0xd
+#define DCIO_SOFT_RESET__DSYNCG_SOFT_RESET__SHIFT 0xe
+#define DCIO_SOFT_RESET__PWRSEQ0_SOFT_RESET__SHIFT 0x10
+#define DCIO_SOFT_RESET__PWRSEQ1_SOFT_RESET__SHIFT 0x11
+#define DCIO_SOFT_RESET__UNIPHYA_SOFT_RESET_MASK 0x00000001L
+#define DCIO_SOFT_RESET__UNIPHYB_SOFT_RESET_MASK 0x00000002L
+#define DCIO_SOFT_RESET__UNIPHYC_SOFT_RESET_MASK 0x00000004L
+#define DCIO_SOFT_RESET__UNIPHYD_SOFT_RESET_MASK 0x00000008L
+#define DCIO_SOFT_RESET__UNIPHYE_SOFT_RESET_MASK 0x00000010L
+#define DCIO_SOFT_RESET__UNIPHYF_SOFT_RESET_MASK 0x00000020L
+#define DCIO_SOFT_RESET__UNIPHYG_SOFT_RESET_MASK 0x00000040L
+#define DCIO_SOFT_RESET__DSYNCA_SOFT_RESET_MASK 0x00000100L
+#define DCIO_SOFT_RESET__DSYNCB_SOFT_RESET_MASK 0x00000200L
+#define DCIO_SOFT_RESET__DSYNCC_SOFT_RESET_MASK 0x00000400L
+#define DCIO_SOFT_RESET__DSYNCD_SOFT_RESET_MASK 0x00000800L
+#define DCIO_SOFT_RESET__DSYNCE_SOFT_RESET_MASK 0x00001000L
+#define DCIO_SOFT_RESET__DSYNCF_SOFT_RESET_MASK 0x00002000L
+#define DCIO_SOFT_RESET__DSYNCG_SOFT_RESET_MASK 0x00004000L
+#define DCIO_SOFT_RESET__PWRSEQ0_SOFT_RESET_MASK 0x00010000L
+#define DCIO_SOFT_RESET__PWRSEQ1_SOFT_RESET_MASK 0x00020000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK__SHIFT 0x0
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS__SHIFT 0x1
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV__SHIFT 0x2
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK__SHIFT 0x4
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS__SHIFT 0x5
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV__SHIFT 0x6
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK__SHIFT 0x8
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS__SHIFT 0x9
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV__SHIFT 0xa
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK__SHIFT 0xc
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS__SHIFT 0xd
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV__SHIFT 0xe
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK__SHIFT 0x10
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS__SHIFT 0x11
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV__SHIFT 0x12
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK__SHIFT 0x14
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS__SHIFT 0x15
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV__SHIFT 0x16
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK__SHIFT 0x18
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS__SHIFT 0x19
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV__SHIFT 0x1a
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_STRENGTH_SN__SHIFT 0x1c
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK_MASK 0x00000001L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV_MASK 0x0000000CL
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK_MASK 0x00000010L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS_MASK 0x00000020L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV_MASK 0x000000C0L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK_MASK 0x00000100L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV_MASK 0x00000C00L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK_MASK 0x00001000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS_MASK 0x00002000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV_MASK 0x0000C000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK_MASK 0x00010000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV_MASK 0x000C0000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK_MASK 0x00100000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV_MASK 0x00C00000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK_MASK 0x01000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV_MASK 0x0C000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_STRENGTH_SN_MASK 0xF0000000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A__SHIFT 0x0
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A__SHIFT 0x8
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A__SHIFT 0x10
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A__SHIFT 0x14
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A__SHIFT 0x15
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A__SHIFT 0x16
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A__SHIFT 0x17
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK 0x00000001L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK 0x00000100L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK 0x00010000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK 0x00100000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK 0x00200000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK 0x00400000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK 0x00800000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN__SHIFT 0x0
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN__SHIFT 0x8
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN__SHIFT 0x10
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN__SHIFT 0x14
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN__SHIFT 0x15
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN__SHIFT 0x16
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN__SHIFT 0x17
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN_MASK 0x00000001L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN_MASK 0x00000100L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN_MASK 0x00010000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN_MASK 0x00100000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN_MASK 0x00200000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN_MASK 0x00400000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN_MASK 0x00800000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y__SHIFT 0x0
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y__SHIFT 0x8
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y__SHIFT 0x10
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y__SHIFT 0x14
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y__SHIFT 0x15
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y__SHIFT 0x16
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y__SHIFT 0x17
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y_MASK 0x00000001L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y_MASK 0x00000100L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y_MASK 0x00010000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y_MASK 0x00100000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y_MASK 0x00200000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y_MASK 0x00400000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y_MASK 0x00800000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE__SHIFT 0x10
+#define DC_GPIO_DDC1_MASK__AUX1_POL__SHIFT 0x14
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC1_MASK__AUX1_POL_MASK 0x00100000L
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR_MASK 0xF0000000L
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE__SHIFT 0x10
+#define DC_GPIO_DDC2_MASK__AUX2_POL__SHIFT 0x14
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC2_MASK__AUX2_POL_MASK 0x00100000L
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR_MASK 0xF0000000L
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE__SHIFT 0x10
+#define DC_GPIO_DDC3_MASK__AUX3_POL__SHIFT 0x14
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC3_MASK__AUX3_POL_MASK 0x00100000L
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR_MASK 0xF0000000L
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE__SHIFT 0x10
+#define DC_GPIO_DDC4_MASK__AUX4_POL__SHIFT 0x14
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC4_MASK__AUX4_POL_MASK 0x00100000L
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR_MASK 0xF0000000L
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE__SHIFT 0x10
+#define DC_GPIO_DDC5_MASK__AUX5_POL__SHIFT 0x14
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC5_MASK__AUX5_POL_MASK 0x00100000L
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR_MASK 0xF0000000L
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE__SHIFT 0x10
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL__SHIFT 0x14
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR__SHIFT 0x18
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE_MASK 0x00010000L
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL_MASK 0x00100000L
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR_MASK 0xF0000000L
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A__SHIFT 0x0
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A__SHIFT 0x8
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN__SHIFT 0x0
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN__SHIFT 0x8
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y__SHIFT 0x0
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y__SHIFT 0x8
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y_MASK 0x00000100L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK__SHIFT 0x0
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS__SHIFT 0x1
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN__SHIFT 0x3
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV__SHIFT 0x4
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK__SHIFT 0x8
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS__SHIFT 0x9
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN__SHIFT 0xb
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV__SHIFT 0xc
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK__SHIFT 0x10
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS__SHIFT 0x11
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN__SHIFT 0x13
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV__SHIFT 0x14
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK__SHIFT 0x18
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS__SHIFT 0x19
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN__SHIFT 0x1b
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV__SHIFT 0x1c
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN_MASK 0x00000008L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV_MASK 0x00000030L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK_MASK 0x00000100L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN_MASK 0x00000800L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV_MASK 0x00003000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK_MASK 0x00010000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN_MASK 0x00080000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV_MASK 0x00300000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK_MASK 0x01000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN_MASK 0x08000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV_MASK 0x30000000L
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A__SHIFT 0x0
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A__SHIFT 0x8
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A__SHIFT 0x10
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A__SHIFT 0x18
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK 0x00000001L
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK 0x00000100L
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK 0x00010000L
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK 0x01000000L
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN__SHIFT 0x0
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN__SHIFT 0x8
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN__SHIFT 0x10
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN__SHIFT 0x18
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN_MASK 0x00000001L
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN_MASK 0x00000100L
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN_MASK 0x00010000L
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN_MASK 0x01000000L
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y__SHIFT 0x0
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y__SHIFT 0x8
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y__SHIFT 0x10
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y__SHIFT 0x18
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y_MASK 0x00000001L
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y_MASK 0x00000100L
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y_MASK 0x00010000L
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y_MASK 0x01000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK__SHIFT 0x0
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS__SHIFT 0x4
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV__SHIFT 0x6
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK__SHIFT 0x8
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS__SHIFT 0x9
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV__SHIFT 0xa
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK__SHIFT 0x10
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS__SHIFT 0x11
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV__SHIFT 0x12
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK__SHIFT 0x14
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS__SHIFT 0x15
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV__SHIFT 0x16
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK__SHIFT 0x18
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS__SHIFT 0x19
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV__SHIFT 0x1a
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK__SHIFT 0x1c
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS__SHIFT 0x1d
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV__SHIFT 0x1e
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK_MASK 0x00000001L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS_MASK 0x00000010L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV_MASK 0x000000C0L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK_MASK 0x00000100L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV_MASK 0x00000C00L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK_MASK 0x00010000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV_MASK 0x000C0000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK_MASK 0x00100000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV_MASK 0x00C00000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK_MASK 0x01000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV_MASK 0x0C000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK_MASK 0x10000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS_MASK 0x20000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV_MASK 0xC0000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A__SHIFT 0x0
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A__SHIFT 0x8
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A__SHIFT 0x10
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A__SHIFT 0x18
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A__SHIFT 0x1a
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A__SHIFT 0x1c
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK 0x00000001L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK 0x00000100L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK 0x00010000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK 0x01000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK 0x04000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK 0x10000000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN__SHIFT 0x0
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI__SHIFT 0x1
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE__SHIFT 0x2
+#define DC_GPIO_HPD_EN__HPD12_SPARE0__SHIFT 0x5
+#define DC_GPIO_HPD_EN__HPD1_SEL0__SHIFT 0x6
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN__SHIFT 0x8
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI__SHIFT 0x9
+#define DC_GPIO_HPD_EN__HPD12_SPARE1__SHIFT 0xa
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN__SHIFT 0x10
+#define DC_GPIO_HPD_EN__HPD3_SCHMEN_PI__SHIFT 0x11
+#define DC_GPIO_HPD_EN__HPD34_SPARE0__SHIFT 0x12
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN__SHIFT 0x14
+#define DC_GPIO_HPD_EN__HPD4_SCHMEN_PI__SHIFT 0x15
+#define DC_GPIO_HPD_EN__HPD34_SPARE1__SHIFT 0x16
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN__SHIFT 0x18
+#define DC_GPIO_HPD_EN__HPD5_SCHMEN_PI__SHIFT 0x19
+#define DC_GPIO_HPD_EN__HPD56_SPARE0__SHIFT 0x1a
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN__SHIFT 0x1c
+#define DC_GPIO_HPD_EN__HPD6_SCHMEN_PI__SHIFT 0x1d
+#define DC_GPIO_HPD_EN__HPD56_SPARE1__SHIFT 0x1e
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN_MASK 0x00000001L
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI_MASK 0x00000002L
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE_MASK 0x00000004L
+#define DC_GPIO_HPD_EN__HPD12_SPARE0_MASK 0x00000020L
+#define DC_GPIO_HPD_EN__HPD1_SEL0_MASK 0x00000040L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN_MASK 0x00000100L
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI_MASK 0x00000200L
+#define DC_GPIO_HPD_EN__HPD12_SPARE1_MASK 0x00000400L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN_MASK 0x00010000L
+#define DC_GPIO_HPD_EN__HPD3_SCHMEN_PI_MASK 0x00020000L
+#define DC_GPIO_HPD_EN__HPD34_SPARE0_MASK 0x00040000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN_MASK 0x00100000L
+#define DC_GPIO_HPD_EN__HPD4_SCHMEN_PI_MASK 0x00200000L
+#define DC_GPIO_HPD_EN__HPD34_SPARE1_MASK 0x00400000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN_MASK 0x01000000L
+#define DC_GPIO_HPD_EN__HPD5_SCHMEN_PI_MASK 0x02000000L
+#define DC_GPIO_HPD_EN__HPD56_SPARE0_MASK 0x04000000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN_MASK 0x10000000L
+#define DC_GPIO_HPD_EN__HPD6_SCHMEN_PI_MASK 0x20000000L
+#define DC_GPIO_HPD_EN__HPD56_SPARE1_MASK 0x40000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y__SHIFT 0x0
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y__SHIFT 0x8
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y__SHIFT 0x10
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y__SHIFT 0x18
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y__SHIFT 0x1a
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y__SHIFT 0x1c
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y_MASK 0x00000001L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y_MASK 0x00000100L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y_MASK 0x00010000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y_MASK 0x01000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y_MASK 0x04000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y_MASK 0x10000000L
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_OTG_VSYNC_EN__SHIFT 0x14
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_OTG_VSYNC_SEL__SHIFT 0x15
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_BLON_OTG_VSYNC_EN__SHIFT 0x19
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_BLON_OTG_VSYNC_SEL__SHIFT 0x1a
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_GENERICA_EN__SHIFT 0x1d
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_OTG_VSYNC_EN_MASK 0x00100000L
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_OTG_VSYNC_SEL_MASK 0x00E00000L
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_BLON_OTG_VSYNC_EN_MASK 0x02000000L
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_BLON_OTG_VSYNC_SEL_MASK 0x1C000000L
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_GENERICA_EN_MASK 0x20000000L
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN__SHIFT 0x10
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP__SHIFT 0x14
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN__SHIFT 0x18
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP__SHIFT 0x1c
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN_MASK 0x0000000FL
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK 0x000000F0L
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN_MASK 0x000F0000L
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP_MASK 0x00F00000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN_MASK 0x0F000000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP_MASK 0xF0000000L
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_2__EXT_RESET_DRVSTRENGTH__SHIFT 0x8
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_DRVSTRENGTH__SHIFT 0xc
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_SRC_SEL__SHIFT 0x1e
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN_MASK 0x0000000FL
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP_MASK 0x000000F0L
+#define DC_GPIO_PAD_STRENGTH_2__EXT_RESET_DRVSTRENGTH_MASK 0x00000700L
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_DRVSTRENGTH_MASK 0x00007000L
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_SRC_SEL_MASK 0xC0000000L
+#define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0x9
+#define PHY_AUX_CNTL__AUX1_PAD_RXSEL__SHIFT 0xa
+#define PHY_AUX_CNTL__AUX2_PAD_RXSEL__SHIFT 0xc
+#define PHY_AUX_CNTL__AUX3_PAD_RXSEL__SHIFT 0xe
+#define PHY_AUX_CNTL__AUX4_PAD_RXSEL__SHIFT 0x10
+#define PHY_AUX_CNTL__AUX5_PAD_RXSEL__SHIFT 0x12
+#define PHY_AUX_CNTL__AUX6_PAD_RXSEL__SHIFT 0x14
+#define PHY_AUX_CNTL__AUX_PAD_WAKE_MASK 0x00000200L
+#define PHY_AUX_CNTL__AUX1_PAD_RXSEL_MASK 0x00000C00L
+#define PHY_AUX_CNTL__AUX2_PAD_RXSEL_MASK 0x00003000L
+#define PHY_AUX_CNTL__AUX3_PAD_RXSEL_MASK 0x0000C000L
+#define PHY_AUX_CNTL__AUX4_PAD_RXSEL_MASK 0x00030000L
+#define PHY_AUX_CNTL__AUX5_PAD_RXSEL_MASK 0x000C0000L
+#define PHY_AUX_CNTL__AUX6_PAD_RXSEL_MASK 0x00300000L
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_OTG_VSYNC_EN__SHIFT 0x14
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_OTG_VSYNC_SEL__SHIFT 0x15
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_BLON_OTG_VSYNC_EN__SHIFT 0x19
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_BLON_OTG_VSYNC_SEL__SHIFT 0x1a
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_GENERICA_EN__SHIFT 0x1d
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_OTG_VSYNC_EN_MASK 0x00100000L
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_OTG_VSYNC_SEL_MASK 0x00E00000L
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_BLON_OTG_VSYNC_EN_MASK 0x02000000L
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_BLON_OTG_VSYNC_SEL_MASK 0x1C000000L
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_GENERICA_EN_MASK 0x20000000L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICA_TX12_EN__SHIFT 0x3
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICB_TX12_EN__SHIFT 0x4
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICC_TX12_EN__SHIFT 0x5
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICD_TX12_EN__SHIFT 0x6
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICE_TX12_EN__SHIFT 0x7
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICF_TX12_EN__SHIFT 0x8
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICG_TX12_EN__SHIFT 0x9
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICA_TX12_EN_MASK 0x00000008L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICB_TX12_EN_MASK 0x00000010L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICC_TX12_EN_MASK 0x00000020L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICD_TX12_EN_MASK 0x00000040L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICE_TX12_EN_MASK 0x00000080L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICF_TX12_EN_MASK 0x00000100L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICG_TX12_EN_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_FALLSLEWSEL__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_FALLSLEWSEL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_FALLSLEWSEL__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_FALLSLEWSEL__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_FALLSLEWSEL__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_FALLSLEWSEL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCEN__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCEN__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCEN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCEN__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCEN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCEN__SHIFT 0x15
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCEN__SHIFT 0x16
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCSEL__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCSEL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCSEL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCSEL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_FALLSLEWSEL_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_FALLSLEWSEL_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_FALLSLEWSEL_MASK 0x00000030L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_FALLSLEWSEL_MASK 0x000000C0L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_FALLSLEWSEL_MASK 0x00000300L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_FALLSLEWSEL_MASK 0x00000C00L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_FALLSLEWSEL_MASK 0x00003000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCEN_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCEN_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCEN_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCEN_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCEN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCEN_MASK 0x00200000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCEN_MASK 0x00C00000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCSEL_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCSEL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCSEL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCSEL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCSEL_MASK 0xC0000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1__SHIFT 0x7
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN__SHIFT 0xb
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9_MASK 0x00000001L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1_MASK 0x00000002L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9_MASK 0x00000004L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1_MASK 0x00000008L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9_MASK 0x00000010L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1_MASK 0x00000020L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9_MASK 0x00000040L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1_MASK 0x00000080L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN_MASK 0x00030000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN_MASK 0x00001800L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE_MASK 0x0000C000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN_MASK 0x000C0000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL_MASK 0x00300000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL_MASK 0xC0000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_FALLSLEWSEL__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_FALLSLEWSEL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCEN__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCEN__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCSEL__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCSEL__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SLEWN__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SLEWN__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_COMPSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_COMPSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_FALLSLEWSEL_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_FALLSLEWSEL_MASK 0x00000030L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCEN_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCEN_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCSEL_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCSEL_MASK 0x00004000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SLEWN_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SLEWN_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_COMPSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_COMPSEL_MASK 0x40000000L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICA_RXEN__SHIFT 0x0
+#define DC_GPIO_RXEN__DC_GPIO_GENERICB_RXEN__SHIFT 0x1
+#define DC_GPIO_RXEN__DC_GPIO_GENERICC_RXEN__SHIFT 0x2
+#define DC_GPIO_RXEN__DC_GPIO_GENERICD_RXEN__SHIFT 0x3
+#define DC_GPIO_RXEN__DC_GPIO_GENERICE_RXEN__SHIFT 0x4
+#define DC_GPIO_RXEN__DC_GPIO_GENERICF_RXEN__SHIFT 0x5
+#define DC_GPIO_RXEN__DC_GPIO_GENERICG_RXEN__SHIFT 0x6
+#define DC_GPIO_RXEN__DC_GPIO_HSYNCA_RXEN__SHIFT 0x8
+#define DC_GPIO_RXEN__DC_GPIO_VSYNCA_RXEN__SHIFT 0x9
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_CLK_RXEN__SHIFT 0xa
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_VSYNC_RXEN__SHIFT 0xb
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_A_RXEN__SHIFT 0xc
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_B_RXEN__SHIFT 0xd
+#define DC_GPIO_RXEN__DC_GPIO_HPD1_RXEN__SHIFT 0xe
+#define DC_GPIO_RXEN__DC_GPIO_HPD2_RXEN__SHIFT 0xf
+#define DC_GPIO_RXEN__DC_GPIO_HPD3_RXEN__SHIFT 0x10
+#define DC_GPIO_RXEN__DC_GPIO_HPD4_RXEN__SHIFT 0x11
+#define DC_GPIO_RXEN__DC_GPIO_HPD5_RXEN__SHIFT 0x12
+#define DC_GPIO_RXEN__DC_GPIO_HPD6_RXEN__SHIFT 0x13
+#define DC_GPIO_RXEN__DC_GPIO_GENERICA_RXEN_MASK 0x00000001L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICB_RXEN_MASK 0x00000002L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICC_RXEN_MASK 0x00000004L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICD_RXEN_MASK 0x00000008L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICE_RXEN_MASK 0x00000010L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICF_RXEN_MASK 0x00000020L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICG_RXEN_MASK 0x00000040L
+#define DC_GPIO_RXEN__DC_GPIO_HSYNCA_RXEN_MASK 0x00000100L
+#define DC_GPIO_RXEN__DC_GPIO_VSYNCA_RXEN_MASK 0x00000200L
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_CLK_RXEN_MASK 0x00000400L
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_VSYNC_RXEN_MASK 0x00000800L
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_A_RXEN_MASK 0x00001000L
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_B_RXEN_MASK 0x00002000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD1_RXEN_MASK 0x00004000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD2_RXEN_MASK 0x00008000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD3_RXEN_MASK 0x00010000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD4_RXEN_MASK 0x00020000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD5_RXEN_MASK 0x00040000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD6_RXEN_MASK 0x00080000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN__SHIFT 0x0
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN__SHIFT 0x1
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN__SHIFT 0x2
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN__SHIFT 0x3
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN__SHIFT 0x4
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN__SHIFT 0x5
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN__SHIFT 0x6
+#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN__SHIFT 0x8
+#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN__SHIFT 0x9
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN__SHIFT 0xe
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD2_PU_EN__SHIFT 0xf
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD3_PU_EN__SHIFT 0x10
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD4_PU_EN__SHIFT 0x11
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD5_PU_EN__SHIFT 0x12
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD6_PU_EN__SHIFT 0x13
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN_MASK 0x00000001L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN_MASK 0x00000002L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN_MASK 0x00000004L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN_MASK 0x00000008L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN_MASK 0x00000010L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN_MASK 0x00000020L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN_MASK 0x00000040L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN_MASK 0x00000100L
+#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN_MASK 0x00000200L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN_MASK 0x00004000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD2_PU_EN_MASK 0x00008000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD3_PU_EN_MASK 0x00010000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD4_PU_EN_MASK 0x00020000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD5_PU_EN_MASK 0x00040000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD6_PU_EN_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP__SHIFT 0xb
+#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE__SHIFT 0x16
+#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM_MASK 0x00000001L
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM_MASK 0x00000002L
+#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM_MASK 0x00000004L
+#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM_MASK 0x00000008L
+#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM_MASK 0x00000010L
+#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM_MASK 0x00000020L
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP_MASK 0x00000800L
+#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE_MASK 0x00030000L
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE_MASK 0x000C0000L
+#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE_MASK 0x00300000L
+#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE_MASK 0x00C00000L
+#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE_MASK 0x03000000L
+#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE_MASK 0x0C000000L
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL_MASK 0x0000000FL
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL_MASK 0x000000F0L
+#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL_MASK 0x00000F00L
+#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL_MASK 0x0000F000L
+#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL_MASK 0x000F0000L
+#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL_MASK 0x00F00000L
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE__SHIFT 0xf
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN__SHIFT 0x15
+#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN__SHIFT 0x16
+#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN__SHIFT 0x17
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE_MASK 0x00000030L
+#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE_MASK 0x000000C0L
+#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE_MASK 0x00000300L
+#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE_MASK 0x00000C00L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE_MASK 0x00004000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE_MASK 0x00008000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN_MASK 0x00200000L
+#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN_MASK 0x00400000L
+#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN_MASK 0x00800000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL_MASK 0x20000000L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK__SHIFT 0x0
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK__SHIFT 0x1
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK__SHIFT 0x2
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK__SHIFT 0x3
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK__SHIFT 0x4
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK__SHIFT 0x5
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK_MASK 0x00000001L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK_MASK 0x00000002L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK_MASK 0x00000004L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK_MASK 0x00000008L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK_MASK 0x00000010L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK_MASK 0x00000020L
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_EN__SHIFT 0x0
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN__SHIFT 0x8
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN__SHIFT 0x10
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_EN_MASK 0x00000001L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN_MASK 0x00000100L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN_MASK 0x00010000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_RXEN__SHIFT 0x3
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_RXEN__SHIFT 0x4
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_RXEN__SHIFT 0x5
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_PU_EN__SHIFT 0x6
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_PU_EN__SHIFT 0x7
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_PU_EN__SHIFT 0x8
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_RXEN_MASK 0x00000008L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_RXEN_MASK 0x00000010L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_RXEN_MASK 0x00000020L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_PU_EN_MASK 0x00000040L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_PU_EN_MASK 0x00000080L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_PU_EN_MASK 0x00000100L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_MASK__SHIFT 0x0
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_PD_DIS__SHIFT 0x4
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_RECV__SHIFT 0x6
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK__SHIFT 0x8
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS__SHIFT 0xc
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV__SHIFT 0xe
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK__SHIFT 0x10
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS__SHIFT 0x14
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV__SHIFT 0x16
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_MASK_MASK 0x00000001L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_PD_DIS_MASK 0x00000010L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_RECV_MASK 0x000000C0L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK_MASK 0x00000100L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS_MASK 0x00001000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV_MASK 0x0000C000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK_MASK 0x00010000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS_MASK 0x00100000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV_MASK 0x00C00000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_A__SHIFT 0x0
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_Y__SHIFT 0x1
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_A__SHIFT 0x8
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_Y__SHIFT 0x9
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_A__SHIFT 0x10
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_Y__SHIFT 0x11
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_A_MASK 0x00000001L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_Y_MASK 0x00000002L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_A_MASK 0x00000100L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_Y_MASK 0x00000200L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_A_MASK 0x00010000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_Y_MASK 0x00020000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_EN__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_TARGET_STATE__SHIFT 0x4
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_OVRD__SHIFT 0x9
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_POL__SHIFT 0xa
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_OVRD__SHIFT 0x11
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_POL__SHIFT 0x12
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON__SHIFT 0x18
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_OVRD__SHIFT 0x19
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_POL__SHIFT 0x1a
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_EN_MASK 0x00000001L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_TARGET_STATE_MASK 0x00000010L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_MASK 0x00000100L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_OVRD_MASK 0x00000200L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_POL_MASK 0x00000400L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_MASK 0x00010000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_OVRD_MASK 0x00020000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_POL_MASK 0x00040000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_MASK 0x01000000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_OVRD_MASK 0x02000000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_POL_MASK 0x04000000L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_TARGET_STATE_R__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DIGON__SHIFT 0x1
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_SYNCEN__SHIFT 0x2
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_BLON__SHIFT 0x3
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DONE__SHIFT 0x4
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_STATE__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_TARGET_STATE_R_MASK 0x00000001L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DIGON_MASK 0x00000002L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_SYNCEN_MASK 0x00000004L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_BLON_MASK 0x00000008L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DONE_MASK 0x00000010L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_STATE_MASK 0x00000F00L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY1__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY2__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY1__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY2__SHIFT 0x18
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY1_MASK 0x000000FFL
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY2_MASK 0x0000FF00L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY1_MASK 0x00FF0000L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY2_MASK 0xFF000000L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_MIN_LENGTH__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRUP_DELAY3__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_DELAY3__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_VARY_BL_OVERRIDE_EN__SHIFT 0x18
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_MIN_LENGTH_MASK 0x000000FFL
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRUP_DELAY3_MASK 0x0000FF00L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_DELAY3_MASK 0x00FF0000L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_VARY_BL_OVERRIDE_EN_MASK 0x01000000L
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV1__PANEL_PWRSEQ_REF_DIV__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV1__BL_PWM_REF_DIV__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV1__PANEL_PWRSEQ_REF_DIV_MASK 0x00000FFFL
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV1__BL_PWM_REF_DIV_MASK 0xFFFF0000L
+#define PWRSEQ0_BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT__SHIFT 0x0
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_EN_EQ_ZERO__SHIFT 0x13
+#define PWRSEQ0_BL_PWM_CNTL__FRAME_START_EVENT_RECOGNIZED__SHIFT 0x14
+#define PWRSEQ0_BL_PWM_CNTL__RECOGNIZE_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x15
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN__SHIFT 0x1e
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_EN__SHIFT 0x1f
+#define PWRSEQ0_BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT_MASK 0x0000FFFFL
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_EN_EQ_ZERO_MASK 0x00080000L
+#define PWRSEQ0_BL_PWM_CNTL__FRAME_START_EVENT_RECOGNIZED_MASK 0x00100000L
+#define PWRSEQ0_BL_PWM_CNTL__RECOGNIZE_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x00200000L
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN_MASK 0x40000000L
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_EN_MASK 0x80000000L
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x0
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE__SHIFT 0x1e
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_OVERRIDE_PANEL_PWRSEQ_EN__SHIFT 0x1f
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x0000FFFFL
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE_MASK 0x40000000L
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_OVERRIDE_PANEL_PWRSEQ_EN_MASK 0x80000000L
+#define PWRSEQ0_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD__SHIFT 0x0
+#define PWRSEQ0_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT__SHIFT 0x10
+#define PWRSEQ0_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_MASK 0x0000FFFFL
+#define PWRSEQ0_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT_MASK 0x000F0000L
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK__SHIFT 0x0
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING__SHIFT 0x8
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK_MASK 0x00000001L
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING_MASK 0x00000100L
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_DIV__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__MICROSECOND_TIME_BASE_DIV__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_START_ON_VARY_BL_ACTIVE__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_DIV_MASK 0x0000007FL
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__MICROSECOND_TIME_BASE_DIV_MASK 0x00007F00L
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_START_ON_VARY_BL_ACTIVE_MASK 0x00010000L
+#define PWRSEQ0_PWRSEQ_SPARE__PWRSEQ_SPARE__SHIFT 0x0
+#define PWRSEQ0_PWRSEQ_SPARE__PWRSEQ_SPARE_MASK 0xFFFFFFFFL
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_EN__SHIFT 0x0
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN__SHIFT 0x8
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN__SHIFT 0x10
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_EN_MASK 0x00000001L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN_MASK 0x00000100L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN_MASK 0x00010000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_RXEN__SHIFT 0x3
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_RXEN__SHIFT 0x4
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_RXEN__SHIFT 0x5
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_PU_EN__SHIFT 0x6
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_PU_EN__SHIFT 0x7
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_PU_EN__SHIFT 0x8
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_RXEN_MASK 0x00000008L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_RXEN_MASK 0x00000010L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_RXEN_MASK 0x00000020L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_PU_EN_MASK 0x00000040L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_PU_EN_MASK 0x00000080L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_PU_EN_MASK 0x00000100L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_MASK__SHIFT 0x0
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_PD_DIS__SHIFT 0x4
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_RECV__SHIFT 0x6
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK__SHIFT 0x8
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS__SHIFT 0xc
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV__SHIFT 0xe
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK__SHIFT 0x10
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS__SHIFT 0x14
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV__SHIFT 0x16
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_MASK_MASK 0x00000001L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_PD_DIS_MASK 0x00000010L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_RECV_MASK 0x000000C0L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK_MASK 0x00000100L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS_MASK 0x00001000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV_MASK 0x0000C000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK_MASK 0x00010000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS_MASK 0x00100000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV_MASK 0x00C00000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_A__SHIFT 0x0
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_Y__SHIFT 0x1
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_A__SHIFT 0x8
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_Y__SHIFT 0x9
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_A__SHIFT 0x10
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_Y__SHIFT 0x11
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_A_MASK 0x00000001L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_Y_MASK 0x00000002L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_A_MASK 0x00000100L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_Y_MASK 0x00000200L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_A_MASK 0x00010000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_Y_MASK 0x00020000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_EN__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_TARGET_STATE__SHIFT 0x4
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_OVRD__SHIFT 0x9
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_POL__SHIFT 0xa
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_OVRD__SHIFT 0x11
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_POL__SHIFT 0x12
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON__SHIFT 0x18
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_OVRD__SHIFT 0x19
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_POL__SHIFT 0x1a
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_EN_MASK 0x00000001L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_TARGET_STATE_MASK 0x00000010L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_MASK 0x00000100L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_OVRD_MASK 0x00000200L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_POL_MASK 0x00000400L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_MASK 0x00010000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_OVRD_MASK 0x00020000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_POL_MASK 0x00040000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_MASK 0x01000000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_OVRD_MASK 0x02000000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_POL_MASK 0x04000000L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_TARGET_STATE_R__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DIGON__SHIFT 0x1
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_SYNCEN__SHIFT 0x2
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_BLON__SHIFT 0x3
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DONE__SHIFT 0x4
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_STATE__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_TARGET_STATE_R_MASK 0x00000001L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DIGON_MASK 0x00000002L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_SYNCEN_MASK 0x00000004L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_BLON_MASK 0x00000008L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DONE_MASK 0x00000010L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_STATE_MASK 0x00000F00L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY1__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY2__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY1__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY2__SHIFT 0x18
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY1_MASK 0x000000FFL
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY2_MASK 0x0000FF00L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY1_MASK 0x00FF0000L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY2_MASK 0xFF000000L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_MIN_LENGTH__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRUP_DELAY3__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_DELAY3__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_VARY_BL_OVERRIDE_EN__SHIFT 0x18
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_MIN_LENGTH_MASK 0x000000FFL
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRUP_DELAY3_MASK 0x0000FF00L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_DELAY3_MASK 0x00FF0000L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_VARY_BL_OVERRIDE_EN_MASK 0x01000000L
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV1__PANEL_PWRSEQ_REF_DIV__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV1__BL_PWM_REF_DIV__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV1__PANEL_PWRSEQ_REF_DIV_MASK 0x00000FFFL
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV1__BL_PWM_REF_DIV_MASK 0xFFFF0000L
+#define PWRSEQ1_BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT__SHIFT 0x0
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_EN_EQ_ZERO__SHIFT 0x13
+#define PWRSEQ1_BL_PWM_CNTL__FRAME_START_EVENT_RECOGNIZED__SHIFT 0x14
+#define PWRSEQ1_BL_PWM_CNTL__RECOGNIZE_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x15
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN__SHIFT 0x1e
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_EN__SHIFT 0x1f
+#define PWRSEQ1_BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT_MASK 0x0000FFFFL
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_EN_EQ_ZERO_MASK 0x00080000L
+#define PWRSEQ1_BL_PWM_CNTL__FRAME_START_EVENT_RECOGNIZED_MASK 0x00100000L
+#define PWRSEQ1_BL_PWM_CNTL__RECOGNIZE_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x00200000L
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN_MASK 0x40000000L
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_EN_MASK 0x80000000L
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x0
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE__SHIFT 0x1e
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_OVERRIDE_PANEL_PWRSEQ_EN__SHIFT 0x1f
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x0000FFFFL
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE_MASK 0x40000000L
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_OVERRIDE_PANEL_PWRSEQ_EN_MASK 0x80000000L
+#define PWRSEQ1_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD__SHIFT 0x0
+#define PWRSEQ1_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT__SHIFT 0x10
+#define PWRSEQ1_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_MASK 0x0000FFFFL
+#define PWRSEQ1_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT_MASK 0x000F0000L
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK__SHIFT 0x0
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING__SHIFT 0x8
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK_MASK 0x00000001L
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING_MASK 0x00000100L
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_DIV__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__MICROSECOND_TIME_BASE_DIV__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_START_ON_VARY_BL_ACTIVE__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_DIV_MASK 0x0000007FL
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__MICROSECOND_TIME_BASE_DIV_MASK 0x00007F00L
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_START_ON_VARY_BL_ACTIVE_MASK 0x00010000L
+#define PWRSEQ1_PWRSEQ_SPARE__PWRSEQ_SPARE__SHIFT 0x0
+#define PWRSEQ1_PWRSEQ_SPARE__PWRSEQ_SPARE_MASK 0xFFFFFFFFL
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF0_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF0_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF0_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF0_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC0_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC0_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+#define DSCC0_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC0_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC0_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC0_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC0_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC0_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+#define DSCC0_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC0_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC0_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC0_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC0_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC0_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC0_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC0_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC0_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+#define DSCC0_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC0_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+#define DSCC0_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC0_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+#define DSCC0_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+#define DSCC0_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+#define DSCC0_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC0_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+#define DSCC0_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC0_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+#define DSCC0_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC0_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+
+//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON17_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON17_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON17_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON17_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON17_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF1_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF1_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF1_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF1_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC1_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC1_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+#define DSCC1_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC1_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC1_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC1_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC1_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC1_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC1_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+#define DSCC1_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC1_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC1_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC1_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC1_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC1_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC1_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC1_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC1_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+#define DSCC1_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC1_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+#define DSCC1_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC1_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+#define DSCC1_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+#define DSCC1_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+#define DSCC1_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC1_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+#define DSCC1_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC1_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+#define DSCC1_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC1_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON18_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON18_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON18_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON18_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON18_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF2_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF2_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF2_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF2_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC2_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC2_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+#define DSCC2_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC2_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC2_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC2_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC2_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC2_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC2_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+#define DSCC2_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC2_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC2_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC2_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC2_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC2_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC2_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC2_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC2_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+#define DSCC2_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC2_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+#define DSCC2_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC2_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+#define DSCC2_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+#define DSCC2_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+#define DSCC2_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC2_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+#define DSCC2_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC2_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+#define DSCC2_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC2_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON19_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON19_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON19_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON19_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON19_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF3_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF3_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF3_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF3_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC3_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC3_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+#define DSCC3_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC3_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC3_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC3_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC3_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC3_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC3_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+#define DSCC3_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC3_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC3_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC3_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC3_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC3_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC3_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC3_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC3_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+#define DSCC3_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC3_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+#define DSCC3_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC3_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+#define DSCC3_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+#define DSCC3_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+#define DSCC3_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC3_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+#define DSCC3_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC3_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+#define DSCC3_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC3_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON20_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON20_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON20_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON20_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON20_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DWB_ENABLE_CLK_CTRL__DWB_ENABLE__SHIFT 0x0
+#define DWB_ENABLE_CLK_CTRL__DISPCLK_R_DWB_GATE_DIS__SHIFT 0x4
+#define DWB_ENABLE_CLK_CTRL__DISPCLK_G_DWB_GATE_DIS__SHIFT 0x8
+#define DWB_ENABLE_CLK_CTRL__DWB_TEST_CLK_SEL__SHIFT 0xc
+#define DWB_ENABLE_CLK_CTRL__DWB_ENABLE_MASK 0x00000001L
+#define DWB_ENABLE_CLK_CTRL__DISPCLK_R_DWB_GATE_DIS_MASK 0x00000010L
+#define DWB_ENABLE_CLK_CTRL__DISPCLK_G_DWB_GATE_DIS_MASK 0x00000100L
+#define DWB_ENABLE_CLK_CTRL__DWB_TEST_CLK_SEL_MASK 0x00003000L
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_FORCE__SHIFT 0x8
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_DIS__SHIFT 0xa
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_STATE__SHIFT 0xc
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_FORCE__SHIFT 0x10
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_DIS__SHIFT 0x12
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_STATE__SHIFT 0x14
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_FORCE_MASK 0x00000300L
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_DIS_MASK 0x00000400L
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_STATE_MASK 0x00003000L
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_DIS_MASK 0x00040000L
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_STATE_MASK 0x00300000L
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_EN__SHIFT 0x0
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_RATE__SHIFT 0x4
+#define FC_MODE_CTRL__FC_WINDOW_CROP_EN__SHIFT 0x8
+#define FC_MODE_CTRL__FC_EYE_SELECTION__SHIFT 0xc
+#define FC_MODE_CTRL__FC_STEREO_EYE_POLARITY__SHIFT 0x10
+#define FC_MODE_CTRL__FC_NEW_CONTENT__SHIFT 0x14
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_EN_CURRENT__SHIFT 0x1f
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_EN_MASK 0x00000001L
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_RATE_MASK 0x00000030L
+#define FC_MODE_CTRL__FC_WINDOW_CROP_EN_MASK 0x00000100L
+#define FC_MODE_CTRL__FC_EYE_SELECTION_MASK 0x00003000L
+#define FC_MODE_CTRL__FC_STEREO_EYE_POLARITY_MASK 0x00010000L
+#define FC_MODE_CTRL__FC_NEW_CONTENT_MASK 0x00100000L
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_EN_CURRENT_MASK 0x80000000L
+#define FC_FLOW_CTRL__FC_FIRST_PIXEL_DELAY_COUNT__SHIFT 0x0
+#define FC_FLOW_CTRL__FC_FIRST_PIXEL_DELAY_COUNT_MASK 0x00000FFFL
+#define FC_WINDOW_START__FC_WINDOW_START_X__SHIFT 0x0
+#define FC_WINDOW_START__FC_WINDOW_START_Y__SHIFT 0x10
+#define FC_WINDOW_START__FC_WINDOW_START_X_MASK 0x00001FFFL
+#define FC_WINDOW_START__FC_WINDOW_START_Y_MASK 0x1FFF0000L
+#define FC_WINDOW_SIZE__FC_WINDOW_WIDTH__SHIFT 0x0
+#define FC_WINDOW_SIZE__FC_WINDOW_HEIGHT__SHIFT 0x10
+#define FC_WINDOW_SIZE__FC_WINDOW_WIDTH_MASK 0x00000FFFL
+#define FC_WINDOW_SIZE__FC_WINDOW_HEIGHT_MASK 0x0FFF0000L
+#define FC_SOURCE_SIZE__FC_SOURCE_WIDTH__SHIFT 0x0
+#define FC_SOURCE_SIZE__FC_SOURCE_HEIGHT__SHIFT 0x10
+#define FC_SOURCE_SIZE__FC_SOURCE_WIDTH_MASK 0x00007FFFL
+#define FC_SOURCE_SIZE__FC_SOURCE_HEIGHT_MASK 0x7FFF0000L
+#define DWB_UPDATE_CTRL__DWB_UPDATE_LOCK__SHIFT 0x0
+#define DWB_UPDATE_CTRL__DWB_UPDATE_PENDING__SHIFT 0x4
+#define DWB_UPDATE_CTRL__DWB_UPDATE_LOCK_MASK 0x00000001L
+#define DWB_UPDATE_CTRL__DWB_UPDATE_PENDING_MASK 0x00000010L
+#define DWB_CRC_CTRL__DWB_CRC_EN__SHIFT 0x0
+#define DWB_CRC_CTRL__DWB_CRC_CONT_EN__SHIFT 0x4
+#define DWB_CRC_CTRL__DWB_CRC_SRC_SEL__SHIFT 0x8
+#define DWB_CRC_CTRL__DWB_CRC_EN_MASK 0x00000001L
+#define DWB_CRC_CTRL__DWB_CRC_CONT_EN_MASK 0x00000010L
+#define DWB_CRC_CTRL__DWB_CRC_SRC_SEL_MASK 0x00000300L
+#define DWB_CRC_MASK_R_G__DWB_CRC_RED_MASK__SHIFT 0x0
+#define DWB_CRC_MASK_R_G__DWB_CRC_GREEN_MASK__SHIFT 0x10
+#define DWB_CRC_MASK_R_G__DWB_CRC_RED_MASK_MASK 0x0000FFFFL
+#define DWB_CRC_MASK_R_G__DWB_CRC_GREEN_MASK_MASK 0xFFFF0000L
+#define DWB_CRC_MASK_B_A__DWB_CRC_BLUE_MASK__SHIFT 0x0
+#define DWB_CRC_MASK_B_A__DWB_CRC_A_MASK__SHIFT 0x10
+#define DWB_CRC_MASK_B_A__DWB_CRC_BLUE_MASK_MASK 0x0000FFFFL
+#define DWB_CRC_MASK_B_A__DWB_CRC_A_MASK_MASK 0xFFFF0000L
+#define DWB_CRC_VAL_R_G__DWB_CRC_SIG_RED__SHIFT 0x0
+#define DWB_CRC_VAL_R_G__DWB_CRC_SIG_GREEN__SHIFT 0x10
+#define DWB_CRC_VAL_R_G__DWB_CRC_SIG_RED_MASK 0x0000FFFFL
+#define DWB_CRC_VAL_R_G__DWB_CRC_SIG_GREEN_MASK 0xFFFF0000L
+#define DWB_CRC_VAL_B_A__DWB_CRC_SIG_BLUE__SHIFT 0x0
+#define DWB_CRC_VAL_B_A__DWB_CRC_SIG_A__SHIFT 0x10
+#define DWB_CRC_VAL_B_A__DWB_CRC_SIG_BLUE_MASK 0x0000FFFFL
+#define DWB_CRC_VAL_B_A__DWB_CRC_SIG_A_MASK 0xFFFF0000L
+#define DWB_OUT_CTRL__OUT_FORMAT__SHIFT 0x0
+#define DWB_OUT_CTRL__OUT_DENORM__SHIFT 0x4
+#define DWB_OUT_CTRL__OUT_MAX__SHIFT 0x8
+#define DWB_OUT_CTRL__OUT_MIN__SHIFT 0x14
+#define DWB_OUT_CTRL__OUT_FORMAT_MASK 0x00000003L
+#define DWB_OUT_CTRL__OUT_DENORM_MASK 0x00000030L
+#define DWB_OUT_CTRL__OUT_MAX_MASK 0x0003FF00L
+#define DWB_OUT_CTRL__OUT_MIN_MASK 0x3FF00000L
+#define DWB_MMHUBBUB_BACKPRESSURE_CNT_EN__DWB_MMHUBBUB_BACKPRESSURE_CNT_EN__SHIFT 0x0
+#define DWB_MMHUBBUB_BACKPRESSURE_CNT_EN__DWB_MMHUBBUB_BACKPRESSURE_CNT_EN_MASK 0x00000001L
+#define DWB_MMHUBBUB_BACKPRESSURE_CNT__DWB_MMHUBBUB_MAX_BACKPRESSURE__SHIFT 0x0
+#define DWB_MMHUBBUB_BACKPRESSURE_CNT__DWB_MMHUBBUB_MAX_BACKPRESSURE_MASK 0x0000FFFFL
+#define DWB_HOST_READ_CONTROL__DWB_HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DWB_HOST_READ_CONTROL__DWB_HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_FLAG__SHIFT 0x0
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_ACK__SHIFT 0x8
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_MASK__SHIFT 0xc
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_INT_STATUS__SHIFT 0x10
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_INT_TYPE__SHIFT 0x14
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_FLAG_MASK 0x00000001L
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_ACK_MASK 0x00000100L
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_MASK_MASK 0x00001000L
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_INT_STATUS_MASK 0x00010000L
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_INT_TYPE_MASK 0x00100000L
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_TYPE__SHIFT 0x0
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_OUT_X_CNT__SHIFT 0x4
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_OUT_Y_CNT__SHIFT 0x10
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_TYPE_MASK 0x00000003L
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_OUT_X_CNT_MASK 0x0000FFF0L
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_OUT_Y_CNT_MASK 0x0FFF0000L
+#define DWB_SOFT_RESET__DWB_SOFT_RESET__SHIFT 0x0
+#define DWB_SOFT_RESET__DWB_SOFT_RESET_MASK 0x00000001L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON21_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON21_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON21_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON21_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON21_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define DWB_HDR_MULT_COEF__DWB_HDR_MULT_COEF__SHIFT 0x0
+#define DWB_HDR_MULT_COEF__DWB_HDR_MULT_COEF_MASK 0x0007FFFFL
+#define DWB_GAMUT_REMAP_MODE__DWB_GAMUT_REMAP_MODE__SHIFT 0x0
+#define DWB_GAMUT_REMAP_MODE__DWB_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x18
+#define DWB_GAMUT_REMAP_MODE__DWB_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define DWB_GAMUT_REMAP_MODE__DWB_GAMUT_REMAP_MODE_CURRENT_MASK 0x03000000L
+#define DWB_GAMUT_REMAP_COEF_FORMAT__DWB_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define DWB_GAMUT_REMAP_COEF_FORMAT__DWB_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+#define DWB_GAMUT_REMAPA_C11_C12__DWB_GAMUT_REMAPA_C11__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C11_C12__DWB_GAMUT_REMAPA_C12__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C11_C12__DWB_GAMUT_REMAPA_C11_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C11_C12__DWB_GAMUT_REMAPA_C12_MASK 0xFFFF0000L
+#define DWB_GAMUT_REMAPA_C13_C14__DWB_GAMUT_REMAPA_C13__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C13_C14__DWB_GAMUT_REMAPA_C14__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C13_C14__DWB_GAMUT_REMAPA_C13_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C13_C14__DWB_GAMUT_REMAPA_C14_MASK 0xFFFF0000L
+#define DWB_GAMUT_REMAPA_C21_C22__DWB_GAMUT_REMAPA_C21__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C21_C22__DWB_GAMUT_REMAPA_C22__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C21_C22__DWB_GAMUT_REMAPA_C21_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C21_C22__DWB_GAMUT_REMAPA_C22_MASK 0xFFFF0000L
+#define DWB_GAMUT_REMAPA_C23_C24__DWB_GAMUT_REMAPA_C23__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C23_C24__DWB_GAMUT_REMAPA_C24__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C23_C24__DWB_GAMUT_REMAPA_C23_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C23_C24__DWB_GAMUT_REMAPA_C24_MASK 0xFFFF0000L
+#define DWB_GAMUT_REMAPA_C31_C32__DWB_GAMUT_REMAPA_C31__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C31_C32__DWB_GAMUT_REMAPA_C32__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C31_C32__DWB_GAMUT_REMAPA_C31_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C31_C32__DWB_GAMUT_REMAPA_C32_MASK 0xFFFF0000L
+#define DWB_GAMUT_REMAPA_C33_C34__DWB_GAMUT_REMAPA_C33__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C33_C34__DWB_GAMUT_REMAPA_C34__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C33_C34__DWB_GAMUT_REMAPA_C33_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C33_C34__DWB_GAMUT_REMAPA_C34_MASK 0xFFFF0000L
+#define DWB_GAMUT_REMAPB_C11_C12__DWB_GAMUT_REMAPB_C11__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C11_C12__DWB_GAMUT_REMAPB_C12__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C11_C12__DWB_GAMUT_REMAPB_C11_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C11_C12__DWB_GAMUT_REMAPB_C12_MASK 0xFFFF0000L
+#define DWB_GAMUT_REMAPB_C13_C14__DWB_GAMUT_REMAPB_C13__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C13_C14__DWB_GAMUT_REMAPB_C14__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C13_C14__DWB_GAMUT_REMAPB_C13_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C13_C14__DWB_GAMUT_REMAPB_C14_MASK 0xFFFF0000L
+#define DWB_GAMUT_REMAPB_C21_C22__DWB_GAMUT_REMAPB_C21__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C21_C22__DWB_GAMUT_REMAPB_C22__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C21_C22__DWB_GAMUT_REMAPB_C21_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C21_C22__DWB_GAMUT_REMAPB_C22_MASK 0xFFFF0000L
+#define DWB_GAMUT_REMAPB_C23_C24__DWB_GAMUT_REMAPB_C23__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C23_C24__DWB_GAMUT_REMAPB_C24__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C23_C24__DWB_GAMUT_REMAPB_C23_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C23_C24__DWB_GAMUT_REMAPB_C24_MASK 0xFFFF0000L
+#define DWB_GAMUT_REMAPB_C31_C32__DWB_GAMUT_REMAPB_C31__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C31_C32__DWB_GAMUT_REMAPB_C32__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C31_C32__DWB_GAMUT_REMAPB_C31_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C31_C32__DWB_GAMUT_REMAPB_C32_MASK 0xFFFF0000L
+#define DWB_GAMUT_REMAPB_C33_C34__DWB_GAMUT_REMAPB_C33__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C33_C34__DWB_GAMUT_REMAPB_C34__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C33_C34__DWB_GAMUT_REMAPB_C33_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C33_C34__DWB_GAMUT_REMAPB_C34_MASK 0xFFFF0000L
+#define DWB_OGAM_CONTROL__DWB_OGAM_MODE__SHIFT 0x0
+#define DWB_OGAM_CONTROL__DWB_OGAM_SELECT__SHIFT 0x4
+#define DWB_OGAM_CONTROL__DWB_OGAM_PWL_DISABLE__SHIFT 0x8
+#define DWB_OGAM_CONTROL__DWB_OGAM_MODE_CURRENT__SHIFT 0x18
+#define DWB_OGAM_CONTROL__DWB_OGAM_SELECT_CURRENT__SHIFT 0x1c
+#define DWB_OGAM_CONTROL__DWB_OGAM_MODE_MASK 0x00000003L
+#define DWB_OGAM_CONTROL__DWB_OGAM_SELECT_MASK 0x00000010L
+#define DWB_OGAM_CONTROL__DWB_OGAM_PWL_DISABLE_MASK 0x00000100L
+#define DWB_OGAM_CONTROL__DWB_OGAM_MODE_CURRENT_MASK 0x03000000L
+#define DWB_OGAM_CONTROL__DWB_OGAM_SELECT_CURRENT_MASK 0x10000000L
+#define DWB_OGAM_LUT_INDEX__DWB_OGAM_LUT_INDEX__SHIFT 0x0
+#define DWB_OGAM_LUT_INDEX__DWB_OGAM_LUT_INDEX_MASK 0x000001FFL
+#define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA__SHIFT 0x0
+#define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA_MASK 0x0003FFFFL
+//DWB_OGAM_LUT_CONTROL
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x4
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG__SHIFT 0x8
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL__SHIFT 0xc
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE__SHIFT 0x10
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000030L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG_MASK 0x00000100L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL_MASK 0x00001000L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE_MASK 0x00010000L
+
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x4
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL__SHIFT 0xc
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE__SHIFT 0x10
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000030L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL_MASK 0x00001000L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE_MASK 0x00010000L
+#define DWB_OGAM_RAMA_START_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define DWB_OGAM_RAMA_START_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define DWB_OGAM_RAMA_START_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define DWB_OGAM_RAMA_START_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define DWB_OGAM_RAMA_START_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define DWB_OGAM_RAMA_START_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define DWB_OGAM_RAMA_START_BASE_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_BASE_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_BASE_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_BASE_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_BASE_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_BASE_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_END_CNTL1_B__DWB_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL1_B__DWB_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_END_CNTL2_B__DWB_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL2_B__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define DWB_OGAM_RAMA_END_CNTL2_B__DWB_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMA_END_CNTL2_B__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define DWB_OGAM_RAMA_END_CNTL1_G__DWB_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL1_G__DWB_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_END_CNTL2_G__DWB_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL2_G__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define DWB_OGAM_RAMA_END_CNTL2_G__DWB_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMA_END_CNTL2_G__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define DWB_OGAM_RAMA_END_CNTL1_R__DWB_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL1_R__DWB_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_END_CNTL2_R__DWB_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL2_R__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define DWB_OGAM_RAMA_END_CNTL2_R__DWB_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMA_END_CNTL2_R__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define DWB_OGAM_RAMA_OFFSET_B__DWB_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_OFFSET_B__DWB_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+#define DWB_OGAM_RAMA_OFFSET_G__DWB_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_OFFSET_G__DWB_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+#define DWB_OGAM_RAMA_OFFSET_R__DWB_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_OFFSET_R__DWB_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_START_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define DWB_OGAM_RAMB_START_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define DWB_OGAM_RAMB_START_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define DWB_OGAM_RAMB_START_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define DWB_OGAM_RAMB_START_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define DWB_OGAM_RAMB_START_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define DWB_OGAM_RAMB_START_BASE_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_BASE_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_BASE_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_BASE_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_BASE_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_BASE_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_END_CNTL1_B__DWB_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL1_B__DWB_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_END_CNTL2_B__DWB_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL2_B__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define DWB_OGAM_RAMB_END_CNTL2_B__DWB_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMB_END_CNTL2_B__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define DWB_OGAM_RAMB_END_CNTL1_G__DWB_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL1_G__DWB_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_END_CNTL2_G__DWB_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL2_G__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define DWB_OGAM_RAMB_END_CNTL2_G__DWB_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMB_END_CNTL2_G__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define DWB_OGAM_RAMB_END_CNTL1_R__DWB_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL1_R__DWB_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_END_CNTL2_R__DWB_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL2_R__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define DWB_OGAM_RAMB_END_CNTL2_R__DWB_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMB_END_CNTL2_R__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define DWB_OGAM_RAMB_OFFSET_B__DWB_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_OFFSET_B__DWB_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+#define DWB_OGAM_RAMB_OFFSET_G__DWB_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_OFFSET_G__DWB_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+#define DWB_OGAM_RAMB_OFFSET_R__DWB_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_OFFSET_R__DWB_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define DCHVM_CTRL0__HOSTVM_INIT_REQ__SHIFT 0x0
+#define DCHVM_CTRL0__HOSTVM_INIT_REQ_MASK 0x00000001L
+#define DCHVM_CTRL1__DUMMY1__SHIFT 0x0
+#define DCHVM_CTRL1__DUMMY1_MASK 0xFFFFFFFFL
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_G_GATE_DIS__SHIFT 0x1
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_R_GATE_DIS__SHIFT 0x4
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_G_GATE_DIS__SHIFT 0x5
+#define DCHVM_CLK_CTRL__TR_REQ_REQCLKREQ_MODE__SHIFT 0x8
+#define DCHVM_CLK_CTRL__TW_RSP_COMPCLKREQ_MODE__SHIFT 0xa
+#define DCHVM_CLK_CTRL__HVM_FGCG_REP_DIS__SHIFT 0xc
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_G_GATE_DIS_MASK 0x00000002L
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_R_GATE_DIS_MASK 0x00000010L
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_G_GATE_DIS_MASK 0x00000020L
+#define DCHVM_CLK_CTRL__TR_REQ_REQCLKREQ_MODE_MASK 0x00000300L
+#define DCHVM_CLK_CTRL__TW_RSP_COMPCLKREQ_MODE_MASK 0x00000C00L
+#define DCHVM_CLK_CTRL__HVM_FGCG_REP_DIS_MASK 0x00001000L
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_PWR_REQ_DIS__SHIFT 0x0
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_FORCE_REQ__SHIFT 0x2
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_POWER_STATUS__SHIFT 0x4
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_PWR_REQ_DIS_MASK 0x00000001L
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_FORCE_REQ_MASK 0x0000000CL
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_POWER_STATUS_MASK 0x00000030L
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_PREFETCH_REQ__SHIFT 0x0
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_POWERSTATUS__SHIFT 0x1
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_PREFETCH_REQ_MASK 0x00000001L
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_POWERSTATUS_MASK 0x00000002L
+#define DCHVM_RIOMMU_STAT0__RIOMMU_ACTIVE__SHIFT 0x0
+#define DCHVM_RIOMMU_STAT0__HOSTVM_PREFETCH_DONE__SHIFT 0x1
+#define DCHVM_RIOMMU_STAT0__RIOMMU_ACTIVE_MASK 0x00000001L
+#define DCHVM_RIOMMU_STAT0__HOSTVM_PREFETCH_DONE_MASK 0x00000002L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK__SHIFT 0xc
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x10
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK_MASK 0x00001000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32_MASK 0x00010000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL_MASK 0x00000007L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL_MASK 0x00000007L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL__SHIFT 0x8
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC__SHIFT 0x10
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL_MASK 0x00001F00L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC_MASK 0x00010000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE_MASK 0xFFFFFFFFL
+#define APG0_APG_CONTROL__APG_RESET__SHIFT 0x1
+#define APG0_APG_CONTROL__APG_RESET_DONE__SHIFT 0x2
+#define APG0_APG_CONTROL__APG_RESET_MASK 0x00000002L
+#define APG0_APG_CONTROL__APG_RESET_DONE_MASK 0x00000004L
+#define APG0_APG_CONTROL2__APG_ENABLE__SHIFT 0x0
+#define APG0_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID__SHIFT 0x8
+#define APG0_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x18
+#define APG0_APG_CONTROL2__APG_ENABLE_MASK 0x00000001L
+#define APG0_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID_MASK 0x0000FF00L
+#define APG0_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x01000000L
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE__SHIFT 0x0
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET__SHIFT 0x1
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE_MASK 0x00000001L
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET_MASK 0x00000002L
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+#define APG0_APG_PACKET_CONTROL__APG_ACP_SOURCE__SHIFT 0x1
+#define APG0_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE__SHIFT 0x2
+#define APG0_APG_PACKET_CONTROL__APG_ACP_SOURCE_MASK 0x00000002L
+#define APG0_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE_MASK 0x00000004L
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN__SHIFT 0x0
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT__SHIFT 0x4
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL__SHIFT 0xd
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT__SHIFT 0x10
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN_MASK 0x00000001L
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT_MASK 0x00000010L
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL_MASK 0x0000E000L
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+#define APG0_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT__SHIFT 0x0
+#define APG0_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT_MASK 0x0000FFFFL
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE__SHIFT 0x0
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR__SHIFT 0x8
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC__SHIFT 0x10
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_MASK 0x00000001L
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR_MASK 0x00000100L
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_MASK 0xFFFF0000L
+#define APG0_APG_STATUS__APG_AUDIO_ENABLE__SHIFT 0x4
+#define APG0_APG_STATUS__APG_HBR_ENABLE__SHIFT 0x8
+#define APG0_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS__SHIFT 0x18
+#define APG0_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR__SHIFT 0x19
+#define APG0_APG_STATUS__APG_AUDIO_ENABLE_MASK 0x00000010L
+#define APG0_APG_STATUS__APG_HBR_ENABLE_MASK 0x00000100L
+#define APG0_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_MASK 0x01000000L
+#define APG0_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR_MASK 0x02000000L
+#define APG0_APG_STATUS2__APG_OUTPUT_ACTIVE__SHIFT 0x0
+#define APG0_APG_STATUS2__APG_OUTPUT_ACTIVE_MASK 0x00000001L
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_DIS__SHIFT 0x0
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_FORCE__SHIFT 0x4
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_STATE__SHIFT 0x8
+#define APG0_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0xc
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_DIS_MASK 0x00000001L
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_FORCE_MASK 0x00000030L
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_STATE_MASK 0x00000300L
+#define APG0_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00003000L
+#define APG0_APG_SPARE__APG_SPARE__SHIFT 0x0
+#define APG0_APG_SPARE__APG_SPARE_MASK 0xFFFFFFFFL
+#define DME5_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME5_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME5_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME5_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME5_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME5_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME5_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME5_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME5_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME5_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME5_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME5_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME5_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME5_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME5_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME5_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME5_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME5_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define VPG5_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG5_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+#define VPG5_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG5_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG5_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG5_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG5_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG5_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+#define VPG5_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG5_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS__SHIFT 0xc
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS_MASK 0x00001000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING_MASK 0x00000030L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH_MASK 0x00000300L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH_MASK 0x0000FFFFL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE__SHIFT 0x1c
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY_MASK 0x00000020L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER_MASK 0x00003F00L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_MASK 0x10000000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS_MASK 0x20000000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0xc
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x14
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x000003F0L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x0003F000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x03F00000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING__SHIFT 0xc
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING_MASK 0x00001000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER_MASK 0x00000030L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT0__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT1__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT0_MASK 0x0000FFFFL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT1_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT2__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT3__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT2_MASK 0x0000FFFFL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT3_MASK 0xFFFF0000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE__SHIFT 0xc
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00000003L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE_MASK 0x00000030L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE_MASK 0x00003000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE_MASK 0xFFFFFFFFL
+#define DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x4
+#define DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_ON_SYMCLK32_MASK 0x00000010L
+#define DP_LINK_ENC0_DP_LINK_ENC_SPARE__DP_LINK_ENC_SPARE__SHIFT 0x0
+#define DP_LINK_ENC0_DP_LINK_ENC_SPARE__DP_LINK_ENC_SPARE_MASK 0xFFFFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__DPHY_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__DPHY_RESET__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__PRECODER_ENABLE__SHIFT 0x2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__MODE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__NUM_LANES__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__DPHY_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__DPHY_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__PRECODER_ENABLE_MASK 0x00000004L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__MODE_MASK 0x00000030L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__NUM_LANES_MASK 0x00000300L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__STATUS__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__RESET_STATUS__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__CURRENT_MODE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__RATE_UPDATE_PENDING__SHIFT 0xc
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__SAT_UPDATE_PENDING__SHIFT 0x10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__STATUS_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__RESET_STATUS_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__CURRENT_MODE_MASK 0x00000030L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__RATE_UPDATE_PENDING_MASK 0x00001000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__SAT_UPDATE_PENDING_MASK 0x00030000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE__SAT_UPDATE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE__SAT_UPDATE_MASK 0x00000003L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_X_MASK 0xFE000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_X_MASK 0xFE000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_X_MASK 0xFE000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_X_MASK 0xFE000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT0__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL0__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT1__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL1__SHIFT 0xc
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT2__SHIFT 0x10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL2__SHIFT 0x14
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT3__SHIFT 0x18
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL3__SHIFT 0x1c
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT0_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL0_MASK 0x00000070L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT1_MASK 0x00000700L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL1_MASK 0x00007000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT2_MASK 0x00070000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL2_MASK 0x00700000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT3_MASK 0x07000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL3_MASK 0x70000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE__TP_SQ_PULSE_WIDTH__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE__TP_SQ_PULSE_WIDTH_MASK 0x000000FFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__TOTAL_SLOT_COUNT_ERROR__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__RATE_ERROR__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__VC_SAME_STREAM_SOURCE__SHIFT 0x2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__NO_ACT_ERROR__SHIFT 0x3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__UNEXPECT_MODE_TRANSITION__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__ILLEGAL_STREAM_SYMBOL__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__RATE_COUNTER_SATURATION__SHIFT 0x6
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__COUNTER_OVERFLOW__SHIFT 0x7
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__TOTAL_SLOT_COUNT_ERROR_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__RATE_ERROR_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__VC_SAME_STREAM_SOURCE_MASK 0x00000004L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__NO_ACT_ERROR_MASK 0x00000008L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__UNEXPECT_MODE_TRANSITION_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__ILLEGAL_STREAM_SYMBOL_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__RATE_COUNTER_SATURATION_MASK 0x00000040L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__COUNTER_OVERFLOW_MASK 0x00000080L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_TYPE__SHIFT 0x2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_SYMBOL__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_ENABLE__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_TYPE__SHIFT 0xa
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_SYMBOL__SHIFT 0xc
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_ENABLE__SHIFT 0x10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_TYPE__SHIFT 0x12
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_SYMBOL__SHIFT 0x14
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_ENABLE__SHIFT 0x18
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_TYPE__SHIFT 0x1a
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_SYMBOL__SHIFT 0x1c
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_ENABLE_MASK 0x00000003L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_TYPE_MASK 0x00000004L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_SYMBOL_MASK 0x000000F0L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_ENABLE_MASK 0x00000300L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_TYPE_MASK 0x00000400L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_SYMBOL_MASK 0x0000F000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_ENABLE_MASK 0x00030000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_TYPE_MASK 0x00040000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_SYMBOL_MASK 0x00F00000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_ENABLE_MASK 0x03000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_TYPE_MASK 0x04000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_SYMBOL_MASK 0xF0000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_RESET__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_LANE_SOURCE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_TAP_SOURCE__SHIFT 0x6
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_SCHEDULER_SOURCE__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_IGNORE_VCPF__SHIFT 0x10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_START_EVENT__SHIFT 0x11
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_USE_NUM_SYMBOLS__SHIFT 0x14
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_END_EVENT__SHIFT 0x15
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_LANE_SOURCE_MASK 0x00000030L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_TAP_SOURCE_MASK 0x000000C0L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_SCHEDULER_SOURCE_MASK 0x00003F00L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_IGNORE_VCPF_MASK 0x00010000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_START_EVENT_MASK 0x000E0000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_USE_NUM_SYMBOLS_MASK 0x00100000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_END_EVENT_MASK 0x00600000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1__CRC_NUM_SYMBOLS__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1__CRC_NUM_SYMBOLS_MASK 0xFFFFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS__CRC_DONE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS__CRC_VALUE__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS__CRC_DONE_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS__CRC_VALUE_MASK 0x00FFFF00L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT__CRC_SYMBOL_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT__CRC_SYMBOL_COUNT_MASK 0xFFFFFFFFL
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK__SHIFT 0xc
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x10
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK_MASK 0x00001000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32_MASK 0x00010000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL_MASK 0x00000007L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL_MASK 0x00000007L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL__SHIFT 0x8
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC__SHIFT 0x10
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL_MASK 0x00001F00L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC_MASK 0x00010000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE_MASK 0xFFFFFFFFL
+#define APG1_APG_CONTROL__APG_RESET__SHIFT 0x1
+#define APG1_APG_CONTROL__APG_RESET_DONE__SHIFT 0x2
+#define APG1_APG_CONTROL__APG_RESET_MASK 0x00000002L
+#define APG1_APG_CONTROL__APG_RESET_DONE_MASK 0x00000004L
+#define APG1_APG_CONTROL2__APG_ENABLE__SHIFT 0x0
+#define APG1_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID__SHIFT 0x8
+#define APG1_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x18
+#define APG1_APG_CONTROL2__APG_ENABLE_MASK 0x00000001L
+#define APG1_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID_MASK 0x0000FF00L
+#define APG1_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x01000000L
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE__SHIFT 0x0
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET__SHIFT 0x1
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE_MASK 0x00000001L
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET_MASK 0x00000002L
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+#define APG1_APG_PACKET_CONTROL__APG_ACP_SOURCE__SHIFT 0x1
+#define APG1_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE__SHIFT 0x2
+#define APG1_APG_PACKET_CONTROL__APG_ACP_SOURCE_MASK 0x00000002L
+#define APG1_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE_MASK 0x00000004L
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN__SHIFT 0x0
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT__SHIFT 0x4
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL__SHIFT 0xd
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT__SHIFT 0x10
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN_MASK 0x00000001L
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT_MASK 0x00000010L
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL_MASK 0x0000E000L
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+#define APG1_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT__SHIFT 0x0
+#define APG1_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT_MASK 0x0000FFFFL
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE__SHIFT 0x0
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR__SHIFT 0x8
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC__SHIFT 0x10
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_MASK 0x00000001L
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR_MASK 0x00000100L
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_MASK 0xFFFF0000L
+#define APG1_APG_STATUS__APG_AUDIO_ENABLE__SHIFT 0x4
+#define APG1_APG_STATUS__APG_HBR_ENABLE__SHIFT 0x8
+#define APG1_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS__SHIFT 0x18
+#define APG1_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR__SHIFT 0x19
+#define APG1_APG_STATUS__APG_AUDIO_ENABLE_MASK 0x00000010L
+#define APG1_APG_STATUS__APG_HBR_ENABLE_MASK 0x00000100L
+#define APG1_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_MASK 0x01000000L
+#define APG1_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR_MASK 0x02000000L
+#define APG1_APG_STATUS2__APG_OUTPUT_ACTIVE__SHIFT 0x0
+#define APG1_APG_STATUS2__APG_OUTPUT_ACTIVE_MASK 0x00000001L
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_DIS__SHIFT 0x0
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_FORCE__SHIFT 0x4
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_STATE__SHIFT 0x8
+#define APG1_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0xc
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_DIS_MASK 0x00000001L
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_FORCE_MASK 0x00000030L
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_STATE_MASK 0x00000300L
+#define APG1_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00003000L
+#define APG1_APG_SPARE__APG_SPARE__SHIFT 0x0
+#define APG1_APG_SPARE__APG_SPARE_MASK 0xFFFFFFFFL
+#define DME6_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME6_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME6_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME6_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME6_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME6_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME6_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME6_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME6_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME6_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME6_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME6_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME6_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME6_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME6_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME6_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME6_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME6_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define VPG6_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG6_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+#define VPG6_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG6_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG6_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG6_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG6_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG6_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+#define VPG6_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG6_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS__SHIFT 0xc
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS_MASK 0x00001000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING_MASK 0x00000030L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH_MASK 0x00000300L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH_MASK 0x0000FFFFL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE__SHIFT 0x1c
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY_MASK 0x00000020L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER_MASK 0x00003F00L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_MASK 0x10000000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS_MASK 0x20000000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0xc
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x14
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x000003F0L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x0003F000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x03F00000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING__SHIFT 0xc
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING_MASK 0x00001000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER_MASK 0x00000030L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT0__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT1__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT0_MASK 0x0000FFFFL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT1_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT2__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT3__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT2_MASK 0x0000FFFFL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT3_MASK 0xFFFF0000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE__SHIFT 0xc
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00000003L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE_MASK 0x00000030L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE_MASK 0x00003000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE_MASK 0xFFFFFFFFL
+#define DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x4
+#define DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_ON_SYMCLK32_MASK 0x00000010L
+#define DP_LINK_ENC1_DP_LINK_ENC_SPARE__DP_LINK_ENC_SPARE__SHIFT 0x0
+#define DP_LINK_ENC1_DP_LINK_ENC_SPARE__DP_LINK_ENC_SPARE_MASK 0xFFFFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__DPHY_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__DPHY_RESET__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__PRECODER_ENABLE__SHIFT 0x2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__MODE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__NUM_LANES__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__DPHY_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__DPHY_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__PRECODER_ENABLE_MASK 0x00000004L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__MODE_MASK 0x00000030L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__NUM_LANES_MASK 0x00000300L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__STATUS__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__RESET_STATUS__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__CURRENT_MODE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__RATE_UPDATE_PENDING__SHIFT 0xc
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__SAT_UPDATE_PENDING__SHIFT 0x10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__STATUS_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__RESET_STATUS_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__CURRENT_MODE_MASK 0x00000030L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__RATE_UPDATE_PENDING_MASK 0x00001000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__SAT_UPDATE_PENDING_MASK 0x00030000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE__SAT_UPDATE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE__SAT_UPDATE_MASK 0x00000003L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_X_MASK 0xFE000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_X_MASK 0xFE000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_X_MASK 0xFE000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_X_MASK 0xFE000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_SLOT_COUNT_MASK 0x00007F00L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT0__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL0__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT1__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL1__SHIFT 0xc
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT2__SHIFT 0x10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL2__SHIFT 0x14
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT3__SHIFT 0x18
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL3__SHIFT 0x1c
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT0_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL0_MASK 0x00000070L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT1_MASK 0x00000700L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL1_MASK 0x00007000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT2_MASK 0x00070000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL2_MASK 0x00700000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT3_MASK 0x07000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL3_MASK 0x70000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE__TP_SQ_PULSE_WIDTH__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE__TP_SQ_PULSE_WIDTH_MASK 0x000000FFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10__TP_CUSTOM_MASK 0x00FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__TOTAL_SLOT_COUNT_ERROR__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__RATE_ERROR__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__VC_SAME_STREAM_SOURCE__SHIFT 0x2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__NO_ACT_ERROR__SHIFT 0x3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__UNEXPECT_MODE_TRANSITION__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__ILLEGAL_STREAM_SYMBOL__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__RATE_COUNTER_SATURATION__SHIFT 0x6
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__COUNTER_OVERFLOW__SHIFT 0x7
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__TOTAL_SLOT_COUNT_ERROR_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__RATE_ERROR_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__VC_SAME_STREAM_SOURCE_MASK 0x00000004L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__NO_ACT_ERROR_MASK 0x00000008L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__UNEXPECT_MODE_TRANSITION_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__ILLEGAL_STREAM_SYMBOL_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__RATE_COUNTER_SATURATION_MASK 0x00000040L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__COUNTER_OVERFLOW_MASK 0x00000080L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_TYPE__SHIFT 0x2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_SYMBOL__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_ENABLE__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_TYPE__SHIFT 0xa
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_SYMBOL__SHIFT 0xc
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_ENABLE__SHIFT 0x10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_TYPE__SHIFT 0x12
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_SYMBOL__SHIFT 0x14
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_ENABLE__SHIFT 0x18
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_TYPE__SHIFT 0x1a
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_SYMBOL__SHIFT 0x1c
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_ENABLE_MASK 0x00000003L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_TYPE_MASK 0x00000004L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_SYMBOL_MASK 0x000000F0L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_ENABLE_MASK 0x00000300L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_TYPE_MASK 0x00000400L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_SYMBOL_MASK 0x0000F000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_ENABLE_MASK 0x00030000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_TYPE_MASK 0x00040000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_SYMBOL_MASK 0x00F00000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_ENABLE_MASK 0x03000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_TYPE_MASK 0x04000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_SYMBOL_MASK 0xF0000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_RESET__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_LANE_SOURCE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_TAP_SOURCE__SHIFT 0x6
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_SCHEDULER_SOURCE__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_IGNORE_VCPF__SHIFT 0x10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_START_EVENT__SHIFT 0x11
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_USE_NUM_SYMBOLS__SHIFT 0x14
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_END_EVENT__SHIFT 0x15
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_LANE_SOURCE_MASK 0x00000030L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_TAP_SOURCE_MASK 0x000000C0L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_SCHEDULER_SOURCE_MASK 0x00003F00L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_IGNORE_VCPF_MASK 0x00010000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_START_EVENT_MASK 0x000E0000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_USE_NUM_SYMBOLS_MASK 0x00100000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_END_EVENT_MASK 0x00600000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1__CRC_NUM_SYMBOLS__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1__CRC_NUM_SYMBOLS_MASK 0xFFFFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS__CRC_DONE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS__CRC_VALUE__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS__CRC_DONE_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS__CRC_VALUE_MASK 0x00FFFF00L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT__CRC_SYMBOL_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT__CRC_SYMBOL_COUNT_MASK 0xFFFFFFFFL
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK__SHIFT 0xc
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x10
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK_MASK 0x00001000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32_MASK 0x00010000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL_MASK 0x00000007L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL_MASK 0x00000007L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL__SHIFT 0x8
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC__SHIFT 0x10
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL_MASK 0x00001F00L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC_MASK 0x00010000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE_MASK 0xFFFFFFFFL
+#define APG2_APG_CONTROL__APG_RESET__SHIFT 0x1
+#define APG2_APG_CONTROL__APG_RESET_DONE__SHIFT 0x2
+#define APG2_APG_CONTROL__APG_RESET_MASK 0x00000002L
+#define APG2_APG_CONTROL__APG_RESET_DONE_MASK 0x00000004L
+#define APG2_APG_CONTROL2__APG_ENABLE__SHIFT 0x0
+#define APG2_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID__SHIFT 0x8
+#define APG2_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x18
+#define APG2_APG_CONTROL2__APG_ENABLE_MASK 0x00000001L
+#define APG2_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID_MASK 0x0000FF00L
+#define APG2_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x01000000L
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE__SHIFT 0x0
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET__SHIFT 0x1
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE_MASK 0x00000001L
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET_MASK 0x00000002L
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+#define APG2_APG_PACKET_CONTROL__APG_ACP_SOURCE__SHIFT 0x1
+#define APG2_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE__SHIFT 0x2
+#define APG2_APG_PACKET_CONTROL__APG_ACP_SOURCE_MASK 0x00000002L
+#define APG2_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE_MASK 0x00000004L
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN__SHIFT 0x0
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT__SHIFT 0x4
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL__SHIFT 0xd
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT__SHIFT 0x10
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN_MASK 0x00000001L
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT_MASK 0x00000010L
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL_MASK 0x0000E000L
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+#define APG2_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT__SHIFT 0x0
+#define APG2_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT_MASK 0x0000FFFFL
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE__SHIFT 0x0
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR__SHIFT 0x8
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC__SHIFT 0x10
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_MASK 0x00000001L
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR_MASK 0x00000100L
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_MASK 0xFFFF0000L
+#define APG2_APG_STATUS__APG_AUDIO_ENABLE__SHIFT 0x4
+#define APG2_APG_STATUS__APG_HBR_ENABLE__SHIFT 0x8
+#define APG2_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS__SHIFT 0x18
+#define APG2_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR__SHIFT 0x19
+#define APG2_APG_STATUS__APG_AUDIO_ENABLE_MASK 0x00000010L
+#define APG2_APG_STATUS__APG_HBR_ENABLE_MASK 0x00000100L
+#define APG2_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_MASK 0x01000000L
+#define APG2_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR_MASK 0x02000000L
+#define APG2_APG_STATUS2__APG_OUTPUT_ACTIVE__SHIFT 0x0
+#define APG2_APG_STATUS2__APG_OUTPUT_ACTIVE_MASK 0x00000001L
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_DIS__SHIFT 0x0
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_FORCE__SHIFT 0x4
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_STATE__SHIFT 0x8
+#define APG2_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0xc
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_DIS_MASK 0x00000001L
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_FORCE_MASK 0x00000030L
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_STATE_MASK 0x00000300L
+#define APG2_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00003000L
+#define APG2_APG_SPARE__APG_SPARE__SHIFT 0x0
+#define APG2_APG_SPARE__APG_SPARE_MASK 0xFFFFFFFFL
+#define DME7_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME7_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME7_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME7_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME7_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME7_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME7_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME7_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME7_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME7_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME7_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME7_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME7_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME7_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME7_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME7_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME7_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME7_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define VPG7_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG7_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+#define VPG7_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG7_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG7_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG7_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG7_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG7_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+#define VPG7_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG7_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS__SHIFT 0xc
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS_MASK 0x00001000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING_MASK 0x00000030L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH_MASK 0x00000300L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH_MASK 0x0000FFFFL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE__SHIFT 0x1c
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY_MASK 0x00000020L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER_MASK 0x00003F00L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_MASK 0x10000000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS_MASK 0x20000000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0xc
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x14
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x000003F0L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x0003F000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x03F00000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING__SHIFT 0xc
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING_MASK 0x00001000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER_MASK 0x00000030L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT0__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT1__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT0_MASK 0x0000FFFFL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT1_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT2__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT3__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT2_MASK 0x0000FFFFL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT3_MASK 0xFFFF0000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE__SHIFT 0xc
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00000003L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE_MASK 0x00000030L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE_MASK 0x00003000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE_MASK 0xFFFFFFFFL
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK__SHIFT 0xc
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x10
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK_MASK 0x00001000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32_MASK 0x00010000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL_MASK 0x00000007L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL_MASK 0x00000007L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL__SHIFT 0x8
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC__SHIFT 0x10
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL_MASK 0x00001F00L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC_MASK 0x00010000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE_MASK 0xFFFFFFFFL
+#define APG3_APG_CONTROL__APG_RESET__SHIFT 0x1
+#define APG3_APG_CONTROL__APG_RESET_DONE__SHIFT 0x2
+#define APG3_APG_CONTROL__APG_RESET_MASK 0x00000002L
+#define APG3_APG_CONTROL__APG_RESET_DONE_MASK 0x00000004L
+#define APG3_APG_CONTROL2__APG_ENABLE__SHIFT 0x0
+#define APG3_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID__SHIFT 0x8
+#define APG3_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x18
+#define APG3_APG_CONTROL2__APG_ENABLE_MASK 0x00000001L
+#define APG3_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID_MASK 0x0000FF00L
+#define APG3_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x01000000L
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE__SHIFT 0x0
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET__SHIFT 0x1
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE_MASK 0x00000001L
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET_MASK 0x00000002L
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+#define APG3_APG_PACKET_CONTROL__APG_ACP_SOURCE__SHIFT 0x1
+#define APG3_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE__SHIFT 0x2
+#define APG3_APG_PACKET_CONTROL__APG_ACP_SOURCE_MASK 0x00000002L
+#define APG3_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE_MASK 0x00000004L
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN__SHIFT 0x0
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT__SHIFT 0x4
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL__SHIFT 0xd
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT__SHIFT 0x10
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN_MASK 0x00000001L
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT_MASK 0x00000010L
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL_MASK 0x0000E000L
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+#define APG3_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT__SHIFT 0x0
+#define APG3_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT_MASK 0x0000FFFFL
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE__SHIFT 0x0
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR__SHIFT 0x8
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC__SHIFT 0x10
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_MASK 0x00000001L
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR_MASK 0x00000100L
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_MASK 0xFFFF0000L
+#define APG3_APG_STATUS__APG_AUDIO_ENABLE__SHIFT 0x4
+#define APG3_APG_STATUS__APG_HBR_ENABLE__SHIFT 0x8
+#define APG3_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS__SHIFT 0x18
+#define APG3_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR__SHIFT 0x19
+#define APG3_APG_STATUS__APG_AUDIO_ENABLE_MASK 0x00000010L
+#define APG3_APG_STATUS__APG_HBR_ENABLE_MASK 0x00000100L
+#define APG3_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_MASK 0x01000000L
+#define APG3_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR_MASK 0x02000000L
+#define APG3_APG_STATUS2__APG_OUTPUT_ACTIVE__SHIFT 0x0
+#define APG3_APG_STATUS2__APG_OUTPUT_ACTIVE_MASK 0x00000001L
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_DIS__SHIFT 0x0
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_FORCE__SHIFT 0x4
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_STATE__SHIFT 0x8
+#define APG3_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0xc
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_DIS_MASK 0x00000001L
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_FORCE_MASK 0x00000030L
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_STATE_MASK 0x00000300L
+#define APG3_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00003000L
+#define APG3_APG_SPARE__APG_SPARE__SHIFT 0x0
+#define APG3_APG_SPARE__APG_SPARE_MASK 0xFFFFFFFFL
+#define DME8_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME8_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME8_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME8_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME8_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME8_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME8_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME8_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME8_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME8_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME8_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME8_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME8_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME8_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME8_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME8_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME8_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME8_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define VPG8_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG8_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+#define VPG8_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG8_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG8_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG8_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG8_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG8_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+#define VPG8_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG8_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS__SHIFT 0xc
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS_MASK 0x00001000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING_MASK 0x00000030L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH_MASK 0x00000300L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8__MSA_DATA_MASK 0xFFFFFFFFL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH_MASK 0x0000FFFFL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE__SHIFT 0x1c
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY_MASK 0x00000020L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER_MASK 0x00003F00L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_MASK 0x10000000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS_MASK 0x20000000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0xc
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x14
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x000003F0L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x0003F000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x03F00000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING__SHIFT 0xc
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING_MASK 0x00001000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER_MASK 0x00000030L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT0__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT1__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT0_MASK 0x0000FFFFL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT1_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT2__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT3__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT2_MASK 0x0000FFFFL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT3_MASK 0xFFFF0000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE__SHIFT 0xc
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00000003L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE_MASK 0x00000030L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE_MASK 0x00003000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE_MASK 0xFFFFFFFFL
+#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+#define MPCC0_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC0_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x8
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000300L
+#define MPCC0_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC0_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC0_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC0_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC0_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC0_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+#define MPCC1_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC1_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x8
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000300L
+#define MPCC1_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC1_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC1_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC1_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC1_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC1_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+#define MPCC2_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC2_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x8
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000300L
+#define MPCC2_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC2_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC2_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC2_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC2_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC2_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+#define MPCC3_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC3_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x8
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000300L
+#define MPCC3_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC3_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC3_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC3_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC3_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC3_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT__SHIFT 0x2
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE__SHIFT 0x3
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT__SHIFT 0x9
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_MASK 0x00000003L
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_MASK 0x00000004L
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT_MASK 0x00000200L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE_MASK 0x00000080L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B_MASK 0xFFFF0000L
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT__SHIFT 0x2
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE__SHIFT 0x3
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT__SHIFT 0x9
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_MASK 0x00000003L
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_MASK 0x00000004L
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT_MASK 0x00000200L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE_MASK 0x00000080L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B_MASK 0xFFFF0000L
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT__SHIFT 0x2
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE__SHIFT 0x3
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT__SHIFT 0x9
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_MASK 0x00000003L
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_MASK 0x00000004L
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT_MASK 0x00000200L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE_MASK 0x00000080L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B_MASK 0xFFFF0000L
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT__SHIFT 0x2
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE__SHIFT 0x3
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT__SHIFT 0x9
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_MASK 0x00000003L
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_MASK 0x00000004L
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT_MASK 0x00000200L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE_MASK 0x00000080L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B_MASK 0xFFFF0000L
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B_MASK 0xFFFF0000L
+#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x1
+#define MPC_CLOCK_CONTROL__MPC_TEST_CLK_SEL__SHIFT 0x4
+#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00000002L
+#define MPC_CLOCK_CONTROL__MPC_TEST_CLK_SEL_MASK 0x00000030L
+#define MPC_SOFT_RESET__MPCC0_SOFT_RESET__SHIFT 0x0
+#define MPC_SOFT_RESET__MPCC1_SOFT_RESET__SHIFT 0x1
+#define MPC_SOFT_RESET__MPCC2_SOFT_RESET__SHIFT 0x2
+#define MPC_SOFT_RESET__MPCC3_SOFT_RESET__SHIFT 0x3
+#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET__SHIFT 0xa
+#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET__SHIFT 0xb
+#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET__SHIFT 0xc
+#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET__SHIFT 0xd
+#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET__SHIFT 0x14
+#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET__SHIFT 0x15
+#define MPC_SOFT_RESET__MPC_SFT2_SOFT_RESET__SHIFT 0x16
+#define MPC_SOFT_RESET__MPC_SFT3_SOFT_RESET__SHIFT 0x17
+#define MPC_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x1f
+#define MPC_SOFT_RESET__MPCC0_SOFT_RESET_MASK 0x00000001L
+#define MPC_SOFT_RESET__MPCC1_SOFT_RESET_MASK 0x00000002L
+#define MPC_SOFT_RESET__MPCC2_SOFT_RESET_MASK 0x00000004L
+#define MPC_SOFT_RESET__MPCC3_SOFT_RESET_MASK 0x00000008L
+#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET_MASK 0x00000400L
+#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET_MASK 0x00000800L
+#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET_MASK 0x00001000L
+#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET_MASK 0x00002000L
+#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET_MASK 0x00100000L
+#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET_MASK 0x00200000L
+#define MPC_SOFT_RESET__MPC_SFT2_SOFT_RESET_MASK 0x00400000L
+#define MPC_SOFT_RESET__MPC_SFT3_SOFT_RESET_MASK 0x00800000L
+#define MPC_SOFT_RESET__MPC_SOFT_RESET_MASK 0x80000000L
+#define MPC_CRC_CTRL__MPC_CRC_EN__SHIFT 0x0
+#define MPC_CRC_CTRL__MPC_CRC_CONT_EN__SHIFT 0x4
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_MODE__SHIFT 0x8
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_EN__SHIFT 0xa
+#define MPC_CRC_CTRL__MPC_CRC_INTERLACE_MODE__SHIFT 0xc
+#define MPC_CRC_CTRL__MPC_CRC_SRC_SEL__SHIFT 0x18
+#define MPC_CRC_CTRL__MPC_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_ENABLED__SHIFT 0x1e
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_LOCK__SHIFT 0x1f
+#define MPC_CRC_CTRL__MPC_CRC_EN_MASK 0x00000001L
+#define MPC_CRC_CTRL__MPC_CRC_CONT_EN_MASK 0x00000010L
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_MODE_MASK 0x00000300L
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_EN_MASK 0x00000400L
+#define MPC_CRC_CTRL__MPC_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define MPC_CRC_CTRL__MPC_CRC_SRC_SEL_MASK 0x03000000L
+#define MPC_CRC_CTRL__MPC_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_ENABLED_MASK 0x40000000L
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_LOCK_MASK 0x80000000L
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DPP_SEL__SHIFT 0x0
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_OPP_SEL__SHIFT 0x4
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DWB_SEL__SHIFT 0x8
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_MASK__SHIFT 0x10
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DPP_SEL_MASK 0x0000000FL
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_OPP_SEL_MASK 0x000000F0L
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DWB_SEL_MASK 0x00000300L
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_MASK_MASK 0xFFFF0000L
+#define MPC_CRC_RESULT_AR__MPC_CRC_RESULT_A__SHIFT 0x0
+#define MPC_CRC_RESULT_AR__MPC_CRC_RESULT_R__SHIFT 0x10
+#define MPC_CRC_RESULT_AR__MPC_CRC_RESULT_A_MASK 0x0000FFFFL
+#define MPC_CRC_RESULT_AR__MPC_CRC_RESULT_R_MASK 0xFFFF0000L
+#define MPC_CRC_RESULT_GB__MPC_CRC_RESULT_G__SHIFT 0x0
+#define MPC_CRC_RESULT_GB__MPC_CRC_RESULT_B__SHIFT 0x10
+#define MPC_CRC_RESULT_GB__MPC_CRC_RESULT_G_MASK 0x0000FFFFL
+#define MPC_CRC_RESULT_GB__MPC_CRC_RESULT_B_MASK 0xFFFF0000L
+#define MPC_CRC_RESULT_C__MPC_CRC_RESULT_C__SHIFT 0x0
+#define MPC_CRC_RESULT_C__MPC_CRC_RESULT_C_MASK 0x0000FFFFL
+#define MPC_PERFMON_EVENT_CTRL__MPC_PERFMON_EVENT_EN__SHIFT 0x0
+#define MPC_PERFMON_EVENT_CTRL__MPC_PERFMON_EVENT_EN_MASK 0x00000001L
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA__SHIFT 0x0
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR__SHIFT 0x10
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA_MASK 0x0000FFFFL
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR_MASK 0xFFFF0000L
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y__SHIFT 0x0
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB__SHIFT 0x10
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y_MASK 0x0000FFFFL
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB_MASK 0xFFFF0000L
+#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_SURFACE_UPDATE_PENDING__SHIFT 0x0
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_CONFIG_UPDATE_PENDING__SHIFT 0x1
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_CURSOR_UPDATE_PENDING__SHIFT 0x2
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_SURFACE_UPDATE_PENDING__SHIFT 0x4
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_CONFIG_UPDATE_PENDING__SHIFT 0x5
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_CURSOR_UPDATE_PENDING__SHIFT 0x6
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_SURFACE_UPDATE_PENDING__SHIFT 0x8
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_CONFIG_UPDATE_PENDING__SHIFT 0x9
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_CURSOR_UPDATE_PENDING__SHIFT 0xa
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_SURFACE_UPDATE_PENDING__SHIFT 0xc
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_CONFIG_UPDATE_PENDING__SHIFT 0xd
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_CURSOR_UPDATE_PENDING__SHIFT 0xe
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_SURFACE_UPDATE_PENDING_MASK 0x00000001L
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_CONFIG_UPDATE_PENDING_MASK 0x00000002L
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_CURSOR_UPDATE_PENDING_MASK 0x00000004L
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_SURFACE_UPDATE_PENDING_MASK 0x00000010L
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_CONFIG_UPDATE_PENDING_MASK 0x00000020L
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_CURSOR_UPDATE_PENDING_MASK 0x00000040L
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_SURFACE_UPDATE_PENDING_MASK 0x00000100L
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_CONFIG_UPDATE_PENDING_MASK 0x00000200L
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_CURSOR_UPDATE_PENDING_MASK 0x00000400L
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_SURFACE_UPDATE_PENDING_MASK 0x00001000L
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_CONFIG_UPDATE_PENDING_MASK 0x00002000L
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_CURSOR_UPDATE_PENDING_MASK 0x00004000L
+#define MPC_PENDING_STATUS_MISC__OUT_OPP0_CONFIG_UPDATE_PENDING__SHIFT 0x0
+#define MPC_PENDING_STATUS_MISC__OUT_OPP1_CONFIG_UPDATE_PENDING__SHIFT 0x1
+#define MPC_PENDING_STATUS_MISC__OUT_OPP2_CONFIG_UPDATE_PENDING__SHIFT 0x2
+#define MPC_PENDING_STATUS_MISC__OUT_OPP3_CONFIG_UPDATE_PENDING__SHIFT 0x3
+#define MPC_PENDING_STATUS_MISC__MPCC0_CONFIG_UPDATE_PENDING__SHIFT 0x8
+#define MPC_PENDING_STATUS_MISC__MPCC1_CONFIG_UPDATE_PENDING__SHIFT 0x9
+#define MPC_PENDING_STATUS_MISC__MPCC2_CONFIG_UPDATE_PENDING__SHIFT 0xa
+#define MPC_PENDING_STATUS_MISC__MPCC3_CONFIG_UPDATE_PENDING__SHIFT 0xb
+#define MPC_PENDING_STATUS_MISC__IN_DWB0_CONFIG_UPDATE_PENDING__SHIFT 0x10
+#define MPC_PENDING_STATUS_MISC__OUT_OPP0_CONFIG_UPDATE_PENDING_MASK 0x00000001L
+#define MPC_PENDING_STATUS_MISC__OUT_OPP1_CONFIG_UPDATE_PENDING_MASK 0x00000002L
+#define MPC_PENDING_STATUS_MISC__OUT_OPP2_CONFIG_UPDATE_PENDING_MASK 0x00000004L
+#define MPC_PENDING_STATUS_MISC__OUT_OPP3_CONFIG_UPDATE_PENDING_MASK 0x00000008L
+#define MPC_PENDING_STATUS_MISC__MPCC0_CONFIG_UPDATE_PENDING_MASK 0x00000100L
+#define MPC_PENDING_STATUS_MISC__MPCC1_CONFIG_UPDATE_PENDING_MASK 0x00000200L
+#define MPC_PENDING_STATUS_MISC__MPCC2_CONFIG_UPDATE_PENDING_MASK 0x00000400L
+#define MPC_PENDING_STATUS_MISC__MPCC3_CONFIG_UPDATE_PENDING_MASK 0x00000800L
+#define MPC_PENDING_STATUS_MISC__IN_DWB0_CONFIG_UPDATE_PENDING_MASK 0x00010000L
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+#define MPC_DWB0_MUX__MPC_DWB0_MUX__SHIFT 0x0
+#define MPC_DWB0_MUX__MPC_DWB0_MUX_STATUS__SHIFT 0x4
+#define MPC_DWB0_MUX__MPC_DWB0_MUX_MASK 0x0000000FL
+#define MPC_DWB0_MUX__MPC_DWB0_MUX_STATUS_MASK 0x000000F0L
+#define MPC_OUT0_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT__SHIFT 0xb
+#define MPC_OUT0_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT_MASK 0x007FF800L
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+#define MPC_OUT1_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT__SHIFT 0xb
+#define MPC_OUT1_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT_MASK 0x007FF800L
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+#define MPC_OUT2_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT2_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT2_MUX__MPC_OUT_FLOW_CONTROL_COUNT__SHIFT 0xb
+#define MPC_OUT2_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT2_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT2_MUX__MPC_OUT_FLOW_CONTROL_COUNT_MASK 0x007FF800L
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+#define MPC_OUT3_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT3_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT3_MUX__MPC_OUT_FLOW_CONTROL_COUNT__SHIFT 0xb
+#define MPC_OUT3_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT3_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT3_MUX__MPC_OUT_FLOW_CONTROL_COUNT_MASK 0x007FF800L
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT__SHIFT 0x0
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT__SHIFT 0x1
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC2_COEF_FORMAT__SHIFT 0x2
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC3_COEF_FORMAT__SHIFT 0x3
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT_MASK 0x00000001L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT_MASK 0x00000002L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC2_COEF_FORMAT_MASK 0x00000004L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC3_COEF_FORMAT_MASK 0x00000008L
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE_CURRENT__SHIFT 0x7
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE_CURRENT_MASK 0x00000180L
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE_CURRENT__SHIFT 0x7
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE_CURRENT_MASK 0x00000180L
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE_CURRENT__SHIFT 0x7
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE_CURRENT_MASK 0x00000180L
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE_CURRENT__SHIFT 0x7
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE_CURRENT_MASK 0x00000180L
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON22_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON22_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON22_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON22_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON22_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT5_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT5_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT5_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT5_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+#define AFMT5_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT5_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT5_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT5_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+#define AFMT5_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT5_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT5_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT5_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+#define AFMT5_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT5_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+#define AFMT5_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT5_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+#define AFMT5_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT5_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT5_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+#define AFMT5_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT5_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT5_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT5_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT5_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT5_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT5_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT5_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT5_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT5_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT5_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT5_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define AFMT5_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+#define VPG9_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG9_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+#define VPG9_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG9_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG9_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG9_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG9_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG9_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+#define VPG9_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG9_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+#define DME9_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME9_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME9_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME9_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME9_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME9_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME9_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME9_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME9_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME9_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME9_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME9_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME9_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME9_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME9_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME9_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME9_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME9_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define HPO_TOP_CLOCK_CONTROL__HPO_DISPCLK_GATE_DIS__SHIFT 0x1
+#define HPO_TOP_CLOCK_CONTROL__HPO_SOCCLK_R_GATE_DIS__SHIFT 0x4
+#define HPO_TOP_CLOCK_CONTROL__HPO_SOCCLK_GATE_DIS__SHIFT 0x5
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_R_GATE_DIS__SHIFT 0x8
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_G_GATE_DIS__SHIFT 0x9
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMICHARCLK_R_GATE_DIS__SHIFT 0xc
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMICHARCLK_G_GATE_DIS__SHIFT 0xd
+#define HPO_TOP_CLOCK_CONTROL__HPO_DPSTREAMCLK_R_GATE_DIS__SHIFT 0x10
+#define HPO_TOP_CLOCK_CONTROL__HPO_DPSTREAMCLK_G_GATE_DIS__SHIFT 0x11
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_SE_R_GATE_DIS__SHIFT 0x12
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_SE_G_GATE_DIS__SHIFT 0x13
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_LE_R_GATE_DIS__SHIFT 0x14
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_LE_G_GATE_DIS__SHIFT 0x15
+#define HPO_TOP_CLOCK_CONTROL__HPO_TEST_CLK_SEL__SHIFT 0x18
+#define HPO_TOP_CLOCK_CONTROL__HPO_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define HPO_TOP_CLOCK_CONTROL__HPO_DISPCLK_GATE_DIS_MASK 0x00000002L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SOCCLK_R_GATE_DIS_MASK 0x00000010L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SOCCLK_GATE_DIS_MASK 0x00000020L
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_R_GATE_DIS_MASK 0x00000100L
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_G_GATE_DIS_MASK 0x00000200L
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMICHARCLK_R_GATE_DIS_MASK 0x00001000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMICHARCLK_G_GATE_DIS_MASK 0x00002000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_DPSTREAMCLK_R_GATE_DIS_MASK 0x00010000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_DPSTREAMCLK_G_GATE_DIS_MASK 0x00020000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_SE_R_GATE_DIS_MASK 0x00040000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_SE_G_GATE_DIS_MASK 0x00080000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_LE_R_GATE_DIS_MASK 0x00100000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_LE_G_GATE_DIS_MASK 0x00200000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_TEST_CLK_SEL_MASK 0xFF000000L
+#define HPO_TOP_HW_CONTROL__HPO_IO_EN__SHIFT 0x0
+#define HPO_TOP_HW_CONTROL__HPO_IO_EN_MASK 0x00000001L
+#define DP_STREAM_MAPPER_CONTROL0__DP_STREAM_LINK_TARGET__SHIFT 0x0
+#define DP_STREAM_MAPPER_CONTROL0__DP_STREAM_LINK_TARGET_MASK 0x00000007L
+#define DP_STREAM_MAPPER_CONTROL1__DP_STREAM_LINK_TARGET__SHIFT 0x0
+#define DP_STREAM_MAPPER_CONTROL1__DP_STREAM_LINK_TARGET_MASK 0x00000007L
+#define DP_STREAM_MAPPER_CONTROL2__DP_STREAM_LINK_TARGET__SHIFT 0x0
+#define DP_STREAM_MAPPER_CONTROL2__DP_STREAM_LINK_TARGET_MASK 0x00000007L
+#define DP_STREAM_MAPPER_CONTROL3__DP_STREAM_LINK_TARGET__SHIFT 0x0
+#define DP_STREAM_MAPPER_CONTROL3__DP_STREAM_LINK_TARGET_MASK 0x00000007L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+#define DC_PERFMON23_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+#define DC_PERFMON23_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON23_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON23_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+#define DC_PERFMON23_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+#define ABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define ABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001FFFFL
+#define ABM0_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define ABM0_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001FFFFL
+#define ABM0_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define ABM0_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001FFFFL
+#define ABM0_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define ABM0_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001FFFFL
+#define ABM0_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define ABM0_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001FFFFL
+#define ABM0_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define ABM0_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001FFFFL
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xFFFF0000L
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+#define ABM0_DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define ABM0_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS__SHIFT 0x4
+#define ABM0_DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define ABM0_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS_MASK 0x00000010L
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000FL
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000F00L
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000F0000L
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003FFL
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03FF0000L
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003FFL
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03FF0000L
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM0_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define ABM0_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define ABM0_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM0_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define ABM0_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003FFL
+#define ABM0_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03FF0000L
+#define ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003FFL
+#define ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03FF0000L
+#define ABM0_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB__SHIFT 0x18
+#define ABM0_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00FFFFFFL
+#define ABM0_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB_MASK 0xFF000000L
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003FFL
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03FF0000L
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+#define ABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xFFFFFFFFL
+#define ABM0_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+#define ABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define ABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001FFFFL
+#define ABM1_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define ABM1_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001FFFFL
+#define ABM1_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define ABM1_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001FFFFL
+#define ABM1_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define ABM1_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001FFFFL
+#define ABM1_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define ABM1_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001FFFFL
+#define ABM1_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define ABM1_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001FFFFL
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xFFFF0000L
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+#define ABM1_DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define ABM1_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS__SHIFT 0x4
+#define ABM1_DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define ABM1_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS_MASK 0x00000010L
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000FL
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000F00L
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000F0000L
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003FFL
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03FF0000L
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003FFL
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03FF0000L
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM1_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define ABM1_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define ABM1_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM1_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define ABM1_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003FFL
+#define ABM1_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03FF0000L
+#define ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003FFL
+#define ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03FF0000L
+#define ABM1_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB__SHIFT 0x18
+#define ABM1_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00FFFFFFL
+#define ABM1_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB_MASK 0xFF000000L
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003FFL
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03FF0000L
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+#define ABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xFFFFFFFFL
+#define ABM1_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+#define ABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define ABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001FFFFL
+#define ABM2_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define ABM2_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001FFFFL
+#define ABM2_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define ABM2_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001FFFFL
+#define ABM2_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define ABM2_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001FFFFL
+#define ABM2_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define ABM2_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001FFFFL
+#define ABM2_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define ABM2_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001FFFFL
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xFFFF0000L
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+#define ABM2_DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define ABM2_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS__SHIFT 0x4
+#define ABM2_DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define ABM2_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS_MASK 0x00000010L
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000FL
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000F00L
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000F0000L
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003FFL
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03FF0000L
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003FFL
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03FF0000L
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM2_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define ABM2_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define ABM2_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM2_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define ABM2_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003FFL
+#define ABM2_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03FF0000L
+#define ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003FFL
+#define ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03FF0000L
+#define ABM2_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB__SHIFT 0x18
+#define ABM2_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00FFFFFFL
+#define ABM2_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB_MASK 0xFF000000L
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003FFL
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03FF0000L
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+#define ABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xFFFFFFFFL
+#define ABM2_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+#define ABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define ABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001FFFFL
+#define ABM3_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define ABM3_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001FFFFL
+#define ABM3_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define ABM3_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001FFFFL
+#define ABM3_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define ABM3_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001FFFFL
+#define ABM3_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define ABM3_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001FFFFL
+#define ABM3_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define ABM3_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001FFFFL
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xFFFF0000L
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+#define ABM3_DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define ABM3_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS__SHIFT 0x4
+#define ABM3_DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define ABM3_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS_MASK 0x00000010L
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000FL
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000F00L
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000F0000L
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003FFL
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03FF0000L
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003FFL
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03FF0000L
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+#define ABM3_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define ABM3_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define ABM3_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM3_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define ABM3_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003FFL
+#define ABM3_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03FF0000L
+#define ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003FFL
+#define ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03FF0000L
+#define ABM3_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB__SHIFT 0x18
+#define ABM3_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00FFFFFFL
+#define ABM3_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB_MASK 0xFF000000L
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003FFL
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03FF0000L
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+#define ABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define ABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xFFFFFFFFL
+#define ABM3_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL__RBBMIF_TIMEOUT_DELAY__SHIFT 0x0
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL__RBBMIF_TIMEOUT_HOLD__SHIFT 0x14
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL__RBBMIF_TIMEOUT_DELAY_MASK 0x000FFFFFL
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL__RBBMIF_TIMEOUT_HOLD_MASK 0xFFF00000L
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL2__RBBMIF_TIMEOUT_DIS__SHIFT 0x0
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL2__RBBMIF_TIMEOUT_DIS_MASK 0x00000001L
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_FLAG__SHIFT 0x0
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_TYPE__SHIFT 0x1
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_ADDR__SHIFT 0x5
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_TIMEOUT_STATUS_READBACK__SHIFT 0x18
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_STATUS_CLEAR__SHIFT 0x1f
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_FLAG_MASK 0x00000001L
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_TYPE_MASK 0x00000006L
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_ADDR_MASK 0x007FFFE0L
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_TIMEOUT_STATUS_READBACK_MASK 0x01000000L
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_STATUS_CLEAR_MASK 0x80000000L
+#define AZCONTROLLER1_CORB_WRITE_POINTER__CORB_WRITE_POINTER__SHIFT 0x0
+#define AZCONTROLLER1_CORB_WRITE_POINTER__CORB_WRITE_POINTER_MASK 0x00FFL
+#define AZCONTROLLER1_CORB_READ_POINTER__CORB_READ_POINTER__SHIFT 0x0
+#define AZCONTROLLER1_CORB_READ_POINTER__CORB_READ_POINTER_RESET__SHIFT 0xf
+#define AZCONTROLLER1_CORB_READ_POINTER__CORB_READ_POINTER_MASK 0x00FFL
+#define AZCONTROLLER1_CORB_READ_POINTER__CORB_READ_POINTER_RESET_MASK 0x8000L
+#define AZCONTROLLER1_CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE__SHIFT 0x0
+#define AZCONTROLLER1_CORB_CONTROL__ENABLE_CORB_DMA_ENGINE__SHIFT 0x1
+#define AZCONTROLLER1_CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE_MASK 0x01L
+#define AZCONTROLLER1_CORB_CONTROL__ENABLE_CORB_DMA_ENGINE_MASK 0x02L
+#define AZCONTROLLER1_CORB_STATUS__CORB_MEMORY_ERROR_INDICATION__SHIFT 0x0
+#define AZCONTROLLER1_CORB_STATUS__CORB_MEMORY_ERROR_INDICATION_MASK 0x01L
+#define AZCONTROLLER1_CORB_SIZE__CORB_SIZE__SHIFT 0x0
+#define AZCONTROLLER1_CORB_SIZE__CORB_SIZE_CAPABILITY__SHIFT 0x4
+#define AZCONTROLLER1_CORB_SIZE__CORB_SIZE_MASK 0x0003L
+#define AZCONTROLLER1_CORB_SIZE__CORB_SIZE_CAPABILITY_MASK 0x00F0L
+#define AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007FL
+#define AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+#define AZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+#define AZCONTROLLER1_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET__SHIFT 0xf
+#define AZCONTROLLER1_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_MASK 0x00FFL
+#define AZCONTROLLER1_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET_MASK 0x8000L
+#define AZCONTROLLER1_RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT__SHIFT 0x0
+#define AZCONTROLLER1_RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT_MASK 0x00FFL
+#define AZCONTROLLER1_RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_CONTROL__RIRB_DMA_ENABLE__SHIFT 0x1
+#define AZCONTROLLER1_RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL__SHIFT 0x2
+#define AZCONTROLLER1_RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL_MASK 0x01L
+#define AZCONTROLLER1_RIRB_CONTROL__RIRB_DMA_ENABLE_MASK 0x02L
+#define AZCONTROLLER1_RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL_MASK 0x04L
+#define AZCONTROLLER1_RIRB_STATUS__RESPONSE_INTERRUPT__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS__SHIFT 0x2
+#define AZCONTROLLER1_RIRB_STATUS__RESPONSE_INTERRUPT_MASK 0x01L
+#define AZCONTROLLER1_RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS_MASK 0x04L
+#define AZCONTROLLER1_RIRB_SIZE__RIRB_SIZE__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_SIZE__RIRB_SIZE_CAPABILITY__SHIFT 0x4
+#define AZCONTROLLER1_RIRB_SIZE__RIRB_SIZE_MASK 0x0003L
+#define AZCONTROLLER1_RIRB_SIZE__RIRB_SIZE_CAPABILITY_MASK 0x00F0L
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS__SHIFT 0x1c
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD_MASK 0x0FFFFFFFL
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS_MASK 0xF0000000L
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0000FFFFL
+#define AZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ_MASK 0xFFFFFFFFL
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID__SHIFT 0x1
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY_MASK 0x00000001L
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID_MASK 0x00000002L
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE__SHIFT 0x0
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x1
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE_MASK 0x00000001L
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007EL
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+#define AZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define AZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+#define AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+#define AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0001FFFFL
+#define AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+#define AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0001FFFFL
+
+#define GLOBAL_CAPABILITIES__SIXTY_FOUR_BIT_ADDRESS_SUPPORTED__SHIFT 0x0
+#define GLOBAL_CAPABILITIES__NUMBER_OF_BIDIRECTIONAL_STREAMS_SUPPORTED__SHIFT 0x3
+#define GLOBAL_CAPABILITIES__NUMBER_OF_INPUT_STREAMS_SUPPORTED__SHIFT 0x8
+#define GLOBAL_CAPABILITIES__NUMBER_OF_OUTPUT_STREAMS_SUPPORTED__SHIFT 0xc
+#define GLOBAL_CAPABILITIES__SIXTY_FOUR_BIT_ADDRESS_SUPPORTED_MASK 0x0001L
+#define GLOBAL_CAPABILITIES__NUMBER_OF_BIDIRECTIONAL_STREAMS_SUPPORTED_MASK 0x00F8L
+#define GLOBAL_CAPABILITIES__NUMBER_OF_INPUT_STREAMS_SUPPORTED_MASK 0x0F00L
+#define GLOBAL_CAPABILITIES__NUMBER_OF_OUTPUT_STREAMS_SUPPORTED_MASK 0xF000L
+#define MINOR_VERSION__MINOR_VERSION__SHIFT 0x0
+#define MINOR_VERSION__MINOR_VERSION_MASK 0xFFL
+#define MAJOR_VERSION__MAJOR_VERSION__SHIFT 0x0
+#define MAJOR_VERSION__MAJOR_VERSION_MASK 0xFFL
+#define OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY_MASK 0xFFFFL
+#define INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY_MASK 0xFFFFL
+#define GLOBAL_CONTROL__CONTROLLER_RESET__SHIFT 0x0
+#define GLOBAL_CONTROL__FLUSH_CONTROL__SHIFT 0x1
+#define GLOBAL_CONTROL__ACCEPT_UNSOLICITED_RESPONSE_ENABLE__SHIFT 0x8
+#define GLOBAL_CONTROL__CONTROLLER_RESET_MASK 0x00000001L
+#define GLOBAL_CONTROL__FLUSH_CONTROL_MASK 0x00000002L
+#define GLOBAL_CONTROL__ACCEPT_UNSOLICITED_RESPONSE_ENABLE_MASK 0x00000100L
+#define WAKE_ENABLE__SDIN_WAKE_ENABLE_FLAG__SHIFT 0x0
+#define WAKE_ENABLE__SDIN_WAKE_ENABLE_FLAG_MASK 0x0001L
+#define STATE_CHANGE_STATUS__STATE_CHANGE_STATUS__SHIFT 0x0
+#define STATE_CHANGE_STATUS__STATE_CHANGE_STATUS_MASK 0x0001L
+#define GLOBAL_STATUS__FLUSH_STATUS__SHIFT 0x1
+#define GLOBAL_STATUS__FLUSH_STATUS_MASK 0x00000002L
+#define OUTPUT_STREAM_PAYLOAD_CAPABILITY__OUTSTRMPAY__SHIFT 0x0
+#define OUTPUT_STREAM_PAYLOAD_CAPABILITY__OUTSTRMPAY_MASK 0xFFFFL
+#define INPUT_STREAM_PAYLOAD_CAPABILITY__INSTRMPAY__SHIFT 0x0
+#define INPUT_STREAM_PAYLOAD_CAPABILITY__INSTRMPAY_MASK 0xFFFFL
+#define INTERRUPT_CONTROL__STREAM_0_INTERRUPT_ENABLE__SHIFT 0x0
+#define INTERRUPT_CONTROL__STREAM_1_INTERRUPT_ENABLE__SHIFT 0x1
+#define INTERRUPT_CONTROL__STREAM_2_INTERRUPT_ENABLE__SHIFT 0x2
+#define INTERRUPT_CONTROL__STREAM_3_INTERRUPT_ENABLE__SHIFT 0x3
+#define INTERRUPT_CONTROL__STREAM_4_INTERRUPT_ENABLE__SHIFT 0x4
+#define INTERRUPT_CONTROL__STREAM_5_INTERRUPT_ENABLE__SHIFT 0x5
+#define INTERRUPT_CONTROL__STREAM_6_INTERRUPT_ENABLE__SHIFT 0x6
+#define INTERRUPT_CONTROL__STREAM_7_INTERRUPT_ENABLE__SHIFT 0x7
+#define INTERRUPT_CONTROL__STREAM_8_INTERRUPT_ENABLE__SHIFT 0x8
+#define INTERRUPT_CONTROL__STREAM_9_INTERRUPT_ENABLE__SHIFT 0x9
+#define INTERRUPT_CONTROL__STREAM_10_INTERRUPT_ENABLE__SHIFT 0xa
+#define INTERRUPT_CONTROL__STREAM_11_INTERRUPT_ENABLE__SHIFT 0xb
+#define INTERRUPT_CONTROL__STREAM_12_INTERRUPT_ENABLE__SHIFT 0xc
+#define INTERRUPT_CONTROL__STREAM_13_INTERRUPT_ENABLE__SHIFT 0xd
+#define INTERRUPT_CONTROL__STREAM_14_INTERRUPT_ENABLE__SHIFT 0xe
+#define INTERRUPT_CONTROL__STREAM_15_INTERRUPT_ENABLE__SHIFT 0xf
+#define INTERRUPT_CONTROL__CONTROLLER_INTERRUPT_ENABLE__SHIFT 0x1e
+#define INTERRUPT_CONTROL__GLOBAL_INTERRUPT_ENABLE__SHIFT 0x1f
+#define INTERRUPT_CONTROL__STREAM_0_INTERRUPT_ENABLE_MASK 0x00000001L
+#define INTERRUPT_CONTROL__STREAM_1_INTERRUPT_ENABLE_MASK 0x00000002L
+#define INTERRUPT_CONTROL__STREAM_2_INTERRUPT_ENABLE_MASK 0x00000004L
+#define INTERRUPT_CONTROL__STREAM_3_INTERRUPT_ENABLE_MASK 0x00000008L
+#define INTERRUPT_CONTROL__STREAM_4_INTERRUPT_ENABLE_MASK 0x00000010L
+#define INTERRUPT_CONTROL__STREAM_5_INTERRUPT_ENABLE_MASK 0x00000020L
+#define INTERRUPT_CONTROL__STREAM_6_INTERRUPT_ENABLE_MASK 0x00000040L
+#define INTERRUPT_CONTROL__STREAM_7_INTERRUPT_ENABLE_MASK 0x00000080L
+#define INTERRUPT_CONTROL__STREAM_8_INTERRUPT_ENABLE_MASK 0x00000100L
+#define INTERRUPT_CONTROL__STREAM_9_INTERRUPT_ENABLE_MASK 0x00000200L
+#define INTERRUPT_CONTROL__STREAM_10_INTERRUPT_ENABLE_MASK 0x00000400L
+#define INTERRUPT_CONTROL__STREAM_11_INTERRUPT_ENABLE_MASK 0x00000800L
+#define INTERRUPT_CONTROL__STREAM_12_INTERRUPT_ENABLE_MASK 0x00001000L
+#define INTERRUPT_CONTROL__STREAM_13_INTERRUPT_ENABLE_MASK 0x00002000L
+#define INTERRUPT_CONTROL__STREAM_14_INTERRUPT_ENABLE_MASK 0x00004000L
+#define INTERRUPT_CONTROL__STREAM_15_INTERRUPT_ENABLE_MASK 0x00008000L
+#define INTERRUPT_CONTROL__CONTROLLER_INTERRUPT_ENABLE_MASK 0x40000000L
+#define INTERRUPT_CONTROL__GLOBAL_INTERRUPT_ENABLE_MASK 0x80000000L
+#define INTERRUPT_STATUS__STREAM_0_INTERRUPT_STATUS__SHIFT 0x0
+#define INTERRUPT_STATUS__STREAM_1_INTERRUPT_STATUS__SHIFT 0x1
+#define INTERRUPT_STATUS__STREAM_2_INTERRUPT_STATUS__SHIFT 0x2
+#define INTERRUPT_STATUS__STREAM_3_INTERRUPT_STATUS__SHIFT 0x3
+#define INTERRUPT_STATUS__STREAM_4_INTERRUPT_STATUS__SHIFT 0x4
+#define INTERRUPT_STATUS__STREAM_5_INTERRUPT_STATUS__SHIFT 0x5
+#define INTERRUPT_STATUS__STREAM_6_INTERRUPT_STATUS__SHIFT 0x6
+#define INTERRUPT_STATUS__STREAM_7_INTERRUPT_STATUS__SHIFT 0x7
+#define INTERRUPT_STATUS__STREAM_8_INTERRUPT_STATUS__SHIFT 0x8
+#define INTERRUPT_STATUS__STREAM_9_INTERRUPT_STATUS__SHIFT 0x9
+#define INTERRUPT_STATUS__STREAM_10_INTERRUPT_STATUS__SHIFT 0xa
+#define INTERRUPT_STATUS__STREAM_11_INTERRUPT_STATUS__SHIFT 0xb
+#define INTERRUPT_STATUS__STREAM_12_INTERRUPT_STATUS__SHIFT 0xc
+#define INTERRUPT_STATUS__STREAM_13_INTERRUPT_STATUS__SHIFT 0xd
+#define INTERRUPT_STATUS__STREAM_14_INTERRUPT_STATUS__SHIFT 0xe
+#define INTERRUPT_STATUS__STREAM_15_INTERRUPT_STATUS__SHIFT 0xf
+#define INTERRUPT_STATUS__CONTROLLER_INTERRUPT_STATUS__SHIFT 0x1e
+#define INTERRUPT_STATUS__GLOBAL_INTERRUPT_STATUS__SHIFT 0x1f
+#define INTERRUPT_STATUS__STREAM_0_INTERRUPT_STATUS_MASK 0x00000001L
+#define INTERRUPT_STATUS__STREAM_1_INTERRUPT_STATUS_MASK 0x00000002L
+#define INTERRUPT_STATUS__STREAM_2_INTERRUPT_STATUS_MASK 0x00000004L
+#define INTERRUPT_STATUS__STREAM_3_INTERRUPT_STATUS_MASK 0x00000008L
+#define INTERRUPT_STATUS__STREAM_4_INTERRUPT_STATUS_MASK 0x00000010L
+#define INTERRUPT_STATUS__STREAM_5_INTERRUPT_STATUS_MASK 0x00000020L
+#define INTERRUPT_STATUS__STREAM_6_INTERRUPT_STATUS_MASK 0x00000040L
+#define INTERRUPT_STATUS__STREAM_7_INTERRUPT_STATUS_MASK 0x00000080L
+#define INTERRUPT_STATUS__STREAM_8_INTERRUPT_STATUS_MASK 0x00000100L
+#define INTERRUPT_STATUS__STREAM_9_INTERRUPT_STATUS_MASK 0x00000200L
+#define INTERRUPT_STATUS__STREAM_10_INTERRUPT_STATUS_MASK 0x00000400L
+#define INTERRUPT_STATUS__STREAM_11_INTERRUPT_STATUS_MASK 0x00000800L
+#define INTERRUPT_STATUS__STREAM_12_INTERRUPT_STATUS_MASK 0x00001000L
+#define INTERRUPT_STATUS__STREAM_13_INTERRUPT_STATUS_MASK 0x00002000L
+#define INTERRUPT_STATUS__STREAM_14_INTERRUPT_STATUS_MASK 0x00004000L
+#define INTERRUPT_STATUS__STREAM_15_INTERRUPT_STATUS_MASK 0x00008000L
+#define INTERRUPT_STATUS__CONTROLLER_INTERRUPT_STATUS_MASK 0x40000000L
+#define INTERRUPT_STATUS__GLOBAL_INTERRUPT_STATUS_MASK 0x80000000L
+#define WALL_CLOCK_COUNTER__WALL_CLOCK_COUNTER__SHIFT 0x0
+#define WALL_CLOCK_COUNTER__WALL_CLOCK_COUNTER_MASK 0xFFFFFFFFL
+#define STREAM_SYNCHRONIZATION__STREAM_0_SYNCHRONIZATION__SHIFT 0x0
+#define STREAM_SYNCHRONIZATION__STREAM_1_SYNCHRONIZATION__SHIFT 0x1
+#define STREAM_SYNCHRONIZATION__STREAM_2_SYNCHRONIZATION__SHIFT 0x2
+#define STREAM_SYNCHRONIZATION__STREAM_3_SYNCHRONIZATION__SHIFT 0x3
+#define STREAM_SYNCHRONIZATION__STREAM_4_SYNCHRONIZATION__SHIFT 0x4
+#define STREAM_SYNCHRONIZATION__STREAM_5_SYNCHRONIZATION__SHIFT 0x5
+#define STREAM_SYNCHRONIZATION__STREAM_6_SYNCHRONIZATION__SHIFT 0x6
+#define STREAM_SYNCHRONIZATION__STREAM_7_SYNCHRONIZATION__SHIFT 0x7
+#define STREAM_SYNCHRONIZATION__STREAM_8_SYNCHRONIZATION__SHIFT 0x8
+#define STREAM_SYNCHRONIZATION__STREAM_9_SYNCHRONIZATION__SHIFT 0x9
+#define STREAM_SYNCHRONIZATION__STREAM_10_SYNCHRONIZATION__SHIFT 0xa
+#define STREAM_SYNCHRONIZATION__STREAM_11_SYNCHRONIZATION__SHIFT 0xb
+#define STREAM_SYNCHRONIZATION__STREAM_12_SYNCHRONIZATION__SHIFT 0xc
+#define STREAM_SYNCHRONIZATION__STREAM_13_SYNCHRONIZATION__SHIFT 0xd
+#define STREAM_SYNCHRONIZATION__STREAM_14_SYNCHRONIZATION__SHIFT 0xe
+#define STREAM_SYNCHRONIZATION__STREAM_15_SYNCHRONIZATION__SHIFT 0xf
+#define STREAM_SYNCHRONIZATION__STREAM_0_SYNCHRONIZATION_MASK 0x00000001L
+#define STREAM_SYNCHRONIZATION__STREAM_1_SYNCHRONIZATION_MASK 0x00000002L
+#define STREAM_SYNCHRONIZATION__STREAM_2_SYNCHRONIZATION_MASK 0x00000004L
+#define STREAM_SYNCHRONIZATION__STREAM_3_SYNCHRONIZATION_MASK 0x00000008L
+#define STREAM_SYNCHRONIZATION__STREAM_4_SYNCHRONIZATION_MASK 0x00000010L
+#define STREAM_SYNCHRONIZATION__STREAM_5_SYNCHRONIZATION_MASK 0x00000020L
+#define STREAM_SYNCHRONIZATION__STREAM_6_SYNCHRONIZATION_MASK 0x00000040L
+#define STREAM_SYNCHRONIZATION__STREAM_7_SYNCHRONIZATION_MASK 0x00000080L
+#define STREAM_SYNCHRONIZATION__STREAM_8_SYNCHRONIZATION_MASK 0x00000100L
+#define STREAM_SYNCHRONIZATION__STREAM_9_SYNCHRONIZATION_MASK 0x00000200L
+#define STREAM_SYNCHRONIZATION__STREAM_10_SYNCHRONIZATION_MASK 0x00000400L
+#define STREAM_SYNCHRONIZATION__STREAM_11_SYNCHRONIZATION_MASK 0x00000800L
+#define STREAM_SYNCHRONIZATION__STREAM_12_SYNCHRONIZATION_MASK 0x00001000L
+#define STREAM_SYNCHRONIZATION__STREAM_13_SYNCHRONIZATION_MASK 0x00002000L
+#define STREAM_SYNCHRONIZATION__STREAM_14_SYNCHRONIZATION_MASK 0x00004000L
+#define STREAM_SYNCHRONIZATION__STREAM_15_SYNCHRONIZATION_MASK 0x00008000L
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007FL
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+#define CORB_UPPER_BASE_ADDRESS__CORB_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define CORB_UPPER_BASE_ADDRESS__CORB_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+#define AZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS__SHIFT 0x0
+#define AZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS_MASK 0xFFFFFFFFL
+
+
+#define AZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS__SHIFT 0x0
+#define AZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS_MASK 0xFFFFFFFFL
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#define DCE_VERSION__MAJOR_VERSION__SHIFT 0x0
+#define DCE_VERSION__MINOR_VERSION__SHIFT 0x8
+#define DCE_VERSION__MAJOR_VERSION_MASK 0x000000FFL
+#define DCE_VERSION__MINOR_VERSION_MASK 0x0000FF00L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKA_ROOT_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKB_ROOT_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKC_ROOT_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKD_ROOT_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKE_ROOT_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKA_FE_ROOT_GATE_DISABLE__SHIFT 0x18
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKB_FE_ROOT_GATE_DISABLE__SHIFT 0x19
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKC_FE_ROOT_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKD_FE_ROOT_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKE_FE_ROOT_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKA_ROOT_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKB_ROOT_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKC_ROOT_GATE_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKD_ROOT_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKE_ROOT_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKA_FE_ROOT_GATE_DISABLE_MASK 0x01000000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKB_FE_ROOT_GATE_DISABLE_MASK 0x02000000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKC_FE_ROOT_GATE_DISABLE_MASK 0x04000000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKD_FE_ROOT_GATE_DISABLE_MASK 0x08000000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKE_FE_ROOT_GATE_DISABLE_MASK 0x10000000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYASYMCLK_ROOT_GATE_DISABLE__SHIFT 0x18
+#define DCCG_GATE_DISABLE_CNTL2__PHYBSYMCLK_ROOT_GATE_DISABLE__SHIFT 0x19
+#define DCCG_GATE_DISABLE_CNTL2__PHYCSYMCLK_ROOT_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL2__PHYDSYMCLK_ROOT_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL2__PHYESYMCLK_ROOT_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL2__PHYASYMCLK_ROOT_GATE_DISABLE_MASK 0x01000000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYBSYMCLK_ROOT_GATE_DISABLE_MASK 0x02000000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYCSYMCLK_ROOT_GATE_DISABLE_MASK 0x04000000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYDSYMCLK_ROOT_GATE_DISABLE_MASK 0x08000000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYESYMCLK_ROOT_GATE_DISABLE_MASK 0x10000000L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_EN__SHIFT 0x4
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_EN_MASK 0x00000010L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_SRC_SEL_MASK 0x00000700L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_EN__SHIFT 0x4
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_EN_MASK 0x00000010L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_SRC_SEL_MASK 0x00000700L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_EN__SHIFT 0x4
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_EN_MASK 0x00000010L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_SRC_SEL_MASK 0x00000700L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_EN__SHIFT 0x4
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_EN_MASK 0x00000010L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_SRC_SEL_MASK 0x00000700L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_EN__SHIFT 0x4
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_EN_MASK 0x00000010L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_SRC_SEL_MASK 0x00000700L
+#define DSCCLK_DTO_CTRL__DSCCLK0_EN__SHIFT 0x0
+#define DSCCLK_DTO_CTRL__DSCCLK1_EN__SHIFT 0x1
+#define DSCCLK_DTO_CTRL__DSCCLK2_EN__SHIFT 0x2
+#define DSCCLK_DTO_CTRL__DSCCLK3_EN__SHIFT 0x3
+#define DSCCLK_DTO_CTRL__DSCCLK4_EN__SHIFT 0x4
+#define DSCCLK_DTO_CTRL__DSCCLK5_EN__SHIFT 0x5
+#define DSCCLK_DTO_CTRL__DSCCLK0_EN_MASK 0x00000001L
+#define DSCCLK_DTO_CTRL__DSCCLK1_EN_MASK 0x00000002L
+#define DSCCLK_DTO_CTRL__DSCCLK2_EN_MASK 0x00000004L
+#define DSCCLK_DTO_CTRL__DSCCLK3_EN_MASK 0x00000008L
+#define DSCCLK_DTO_CTRL__DSCCLK4_EN_MASK 0x00000010L
+#define DSCCLK_DTO_CTRL__DSCCLK5_EN_MASK 0x00000020L
+#define DPPCLK_CTRL__DPPCLK0_EN__SHIFT 0x0
+#define DPPCLK_CTRL__DPPCLK1_EN__SHIFT 0x3
+#define DPPCLK_CTRL__DPPCLK2_EN__SHIFT 0x6
+#define DPPCLK_CTRL__DPPCLK3_EN__SHIFT 0x9
+#define DPPCLK_CTRL__DPPCLK0_EN_MASK 0x00000001L
+#define DPPCLK_CTRL__DPPCLK1_EN_MASK 0x00000008L
+#define DPPCLK_CTRL__DPPCLK2_EN_MASK 0x00000040L
+#define DPPCLK_CTRL__DPPCLK3_EN_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK0_ROOT_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK1_ROOT_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK2_ROOT_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK3_ROOT_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK0_ROOT_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK1_ROOT_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK2_ROOT_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK3_ROOT_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL6__HDMISTREAMCLK0_ROOT_GATE_DISABLE__SHIFT 0xf
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK0_ROOT_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK1_ROOT_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK2_ROOT_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK3_ROOT_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK0_ROOT_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK1_ROOT_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK2_ROOT_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK3_ROOT_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL6__HDMISTREAMCLK0_ROOT_GATE_DISABLE_MASK 0x00008000L
+#define SYMCLK_PSP_CNTL__SYMCLK_PSP_FORCE_ON__SHIFT 0x0
+#define SYMCLK_PSP_CNTL__SYMCLK_PSP_FORCE_ON_MASK 0x00000001L
+#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_EN__SHIFT 0x0
+#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_EN_MASK 0x00000001L
+#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_SRC_SEL_MASK 0x00000030L
+#define PHYBSYMCLK_CLOCK_CNTL__PHYBSYMCLK_EN__SHIFT 0x0
+#define PHYBSYMCLK_CLOCK_CNTL__PHYBSYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYBSYMCLK_CLOCK_CNTL__PHYBSYMCLK_EN_MASK 0x00000001L
+#define PHYBSYMCLK_CLOCK_CNTL__PHYBSYMCLK_SRC_SEL_MASK 0x00000030L
+#define PHYCSYMCLK_CLOCK_CNTL__PHYCSYMCLK_EN__SHIFT 0x0
+#define PHYCSYMCLK_CLOCK_CNTL__PHYCSYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYCSYMCLK_CLOCK_CNTL__PHYCSYMCLK_EN_MASK 0x00000001L
+#define PHYCSYMCLK_CLOCK_CNTL__PHYCSYMCLK_SRC_SEL_MASK 0x00000030L
+#define PHYDSYMCLK_CLOCK_CNTL__PHYDSYMCLK_EN__SHIFT 0x0
+#define PHYDSYMCLK_CLOCK_CNTL__PHYDSYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYDSYMCLK_CLOCK_CNTL__PHYDSYMCLK_EN_MASK 0x00000001L
+#define PHYDSYMCLK_CLOCK_CNTL__PHYDSYMCLK_SRC_SEL_MASK 0x00000030L
+#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_EN__SHIFT 0x0
+#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_EN_MASK 0x00000001L
+#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_SRC_SEL_MASK 0x00000030L
+
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_SEC_LVL__SHIFT 0x0
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_SEC_LVL_MASK 0x00000007L
+
+
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN22_POWER_UP_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN23_POWER_UP_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN24_POWER_UP_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN25_POWER_UP_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN22_POWER_DOWN_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN23_POWER_DOWN_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN24_POWER_DOWN_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN25_POWER_DOWN_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN22_POWER_UP_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN23_POWER_UP_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN24_POWER_UP_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN25_POWER_UP_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN22_POWER_DOWN_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN23_POWER_DOWN_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN24_POWER_DOWN_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN25_POWER_DOWN_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC0_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC1_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC2_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC3_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC4_IHC_CORE_ERROR_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC5_IHC_CORE_ERROR_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC0_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC1_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC2_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC3_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC4_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC5_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000800L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN22_POWER_UP_INTERRUPT_DEST__SHIFT 0x6
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN23_POWER_UP_INTERRUPT_DEST__SHIFT 0x7
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN24_POWER_UP_INTERRUPT_DEST__SHIFT 0x8
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN25_POWER_UP_INTERRUPT_DEST__SHIFT 0x9
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN22_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x10
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN23_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x11
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN24_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x12
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN25_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x13
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN22_POWER_UP_INTERRUPT_DEST_MASK 0x00000040L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN23_POWER_UP_INTERRUPT_DEST_MASK 0x00000080L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN24_POWER_UP_INTERRUPT_DEST_MASK 0x00000100L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN25_POWER_UP_INTERRUPT_DEST_MASK 0x00000200L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN22_POWER_DOWN_INTERRUPT_DEST_MASK 0x00010000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN23_POWER_DOWN_INTERRUPT_DEST_MASK 0x00020000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN24_POWER_DOWN_INTERRUPT_DEST_MASK 0x00040000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN25_POWER_DOWN_INTERRUPT_DEST_MASK 0x00080000L
+
+
+#define CC_DC_PIPE_DIS__DC_FULL_DIS__SHIFT 0xc
+#define CC_DC_PIPE_DIS__DC_FULL_DIS_MASK 0x00001000L
+#define DMU_CLK_CNTL__RIOMMU_CLK_SEL__SHIFT 0x8
+#define DMU_CLK_CNTL__RBBMIF_FGCG_REP_DIS__SHIFT 0xc
+#define DMU_CLK_CNTL__DMCUB_DMCUBCLK_SRC_SEL__SHIFT 0xd
+#define DMU_CLK_CNTL__DPREFCLK_ALLOW_DS_CLKSTOP__SHIFT 0x10
+#define DMU_CLK_CNTL__DISPCLK_ALLOW_DS_CLKSTOP__SHIFT 0x12
+#define DMU_CLK_CNTL__DPPCLK_ALLOW_DS_CLKSTOP__SHIFT 0x14
+#define DMU_CLK_CNTL__DTBCLK_ALLOW_DS_CLKSTOP__SHIFT 0x16
+#define DMU_CLK_CNTL__DCFCLK_ALLOW_DS_CLKSTOP__SHIFT 0x18
+#define DMU_CLK_CNTL__DPIACLK_ALLOW_DS_CLKSTOP__SHIFT 0x1a
+#define DMU_CLK_CNTL__LONO_FGCG_REP_DIS__SHIFT 0x1c
+#define DMU_CLK_CNTL__LONO_DISPCLK_GATE_DISABLE__SHIFT 0x1d
+#define DMU_CLK_CNTL__LONO_SOCCLK_GATE_DISABLE__SHIFT 0x1e
+#define DMU_CLK_CNTL__LONO_DMCUBCLK_GATE_DISABLE__SHIFT 0x1f
+#define DMU_CLK_CNTL__RIOMMU_CLK_SEL_MASK 0x00000100L
+#define DMU_CLK_CNTL__RBBMIF_FGCG_REP_DIS_MASK 0x00001000L
+#define DMU_CLK_CNTL__DMCUB_DMCUBCLK_SRC_SEL_MASK 0x00006000L
+#define DMU_CLK_CNTL__DPREFCLK_ALLOW_DS_CLKSTOP_MASK 0x00030000L
+#define DMU_CLK_CNTL__DISPCLK_ALLOW_DS_CLKSTOP_MASK 0x000C0000L
+#define DMU_CLK_CNTL__DPPCLK_ALLOW_DS_CLKSTOP_MASK 0x00300000L
+#define DMU_CLK_CNTL__DTBCLK_ALLOW_DS_CLKSTOP_MASK 0x00C00000L
+#define DMU_CLK_CNTL__DCFCLK_ALLOW_DS_CLKSTOP_MASK 0x03000000L
+#define DMU_CLK_CNTL__DPIACLK_ALLOW_DS_CLKSTOP_MASK 0x0C000000L
+#define DMU_CLK_CNTL__LONO_FGCG_REP_DIS_MASK 0x10000000L
+#define DMU_CLK_CNTL__LONO_DISPCLK_GATE_DISABLE_MASK 0x20000000L
+#define DMU_CLK_CNTL__LONO_SOCCLK_GATE_DISABLE_MASK 0x40000000L
+#define DMU_CLK_CNTL__LONO_DMCUBCLK_GATE_DISABLE_MASK 0x80000000L
+#define DMCUB_SMU_INTERRUPT_CNTL__DMCUB_SMU_MSG_INT__SHIFT 0x0
+#define DMCUB_SMU_INTERRUPT_CNTL__DMCUB_SMU_MSG__SHIFT 0x10
+#define DMCUB_SMU_INTERRUPT_CNTL__DMCUB_SMU_MSG_INT_MASK 0x00000001L
+#define DMCUB_SMU_INTERRUPT_CNTL__DMCUB_SMU_MSG_MASK 0xFFFF0000L
+#define ZSC_CNTL__LONO_PWR_DN__SHIFT 0x8
+#define ZSC_CNTL__LONO_PWR_DN_MASK 0x00000100L
+#define DMU_DISPCLK_CGTT_BLK_CTRL_REG__LONO_DISPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DMU_DISPCLK_CGTT_BLK_CTRL_REG__LONO_DISPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DMU_DISPCLK_CGTT_BLK_CTRL_REG__LONO_DISPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DMU_DISPCLK_CGTT_BLK_CTRL_REG__LONO_DISPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define DMU_SOCCLK_CGTT_BLK_CTRL_REG__LONO_SOCCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DMU_SOCCLK_CGTT_BLK_CTRL_REG__LONO_SOCCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DMU_SOCCLK_CGTT_BLK_CTRL_REG__LONO_SOCCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DMU_SOCCLK_CGTT_BLK_CTRL_REG__LONO_SOCCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define ZPR_CLK_UNGATE_DELAY__ZPR_CLK_UNGATE_DELAY__SHIFT 0x0
+#define ZPR_CLK_UNGATE_DELAY__ZPR_CLK_UNGATE_DELAY_MASK 0x000000FFL
+
+
+#define DOMAIN22_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN22_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN22_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN22_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+#define DOMAIN22_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN22_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN22_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN22_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+#define DOMAIN23_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN23_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN23_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN23_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+#define DOMAIN23_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN23_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN23_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN23_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+#define DOMAIN24_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN24_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN24_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN24_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+#define DOMAIN24_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN24_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN24_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN24_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+#define DOMAIN25_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN25_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN25_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN25_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+#define DOMAIN25_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN25_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN25_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN25_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN22_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN22_POWER_DOWN_INT_OCCURRED__SHIFT 0x1
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN23_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN23_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN24_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN24_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN25_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN25_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN22_POWER_UP_INT_OCCURRED_MASK 0x00000001L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN22_POWER_DOWN_INT_OCCURRED_MASK 0x00000002L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN23_POWER_UP_INT_OCCURRED_MASK 0x00000004L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN23_POWER_DOWN_INT_OCCURRED_MASK 0x00000008L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN24_POWER_UP_INT_OCCURRED_MASK 0x00000010L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN24_POWER_DOWN_INT_OCCURRED_MASK 0x00000020L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN25_POWER_UP_INT_OCCURRED_MASK 0x00000040L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN25_POWER_DOWN_INT_OCCURRED_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_UP_INT_MASK_MASK 0x00000001L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_DOWN_INT_MASK_MASK 0x00000004L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_DOWN_INT_CLEAR_MASK 0x00000008L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_UP_INT_MASK_MASK 0x00000010L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_DOWN_INT_MASK_MASK 0x00000040L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_DOWN_INT_CLEAR_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_UP_INT_MASK_MASK 0x00000100L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_UP_INT_CLEAR_MASK 0x00000200L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_DOWN_INT_MASK_MASK 0x00000400L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_DOWN_INT_MASK_MASK 0x00004000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_DOWN_INT_CLEAR_MASK 0x00008000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_UP_INT_MASK_MASK 0x00000001L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_DOWN_INT_MASK_MASK 0x00000004L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_DOWN_INT_CLEAR_MASK 0x00000008L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_UP_INT_MASK_MASK 0x00000010L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_DOWN_INT_MASK_MASK 0x00000040L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_DOWN_INT_CLEAR_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_UP_INT_MASK_MASK 0x00000100L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_UP_INT_CLEAR_MASK 0x00000200L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_DOWN_INT_MASK_MASK 0x00000400L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_DOWN_INT_MASK_MASK 0x00004000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_DOWN_INT_CLEAR_MASK 0x00008000L
+#define LONO_MEM_PWR_REQ_CNTL__LONO_MEM_PWR_REQ_DIS__SHIFT 0x0
+#define LONO_MEM_PWR_REQ_CNTL__LONO_MEM_PWR_REQ_DIS_MASK 0x00000001L
+
+
+#define DMCUB_INTERRUPT_STATUS__DMCUB_PWR_UP_TRIG_INT_STAT__SHIFT 0x15
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OTG_RESYNC_TRIG_INT_STAT__SHIFT 0x16
+#define DMCUB_INTERRUPT_STATUS__DMCUB_PWR_UP_TRIG_INT_STAT_MASK 0x00200000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OTG_RESYNC_TRIG_INT_STAT_MASK 0x00400000L
+#define DMCUB_SEC_CNTL__DMCUB_MEM_SEC_LVL__SHIFT 0x0
+#define DMCUB_SEC_CNTL__DMCUB_MEM_SEC_LVL_MASK 0x00000007L
+#define DMCUB_SCRATCH16__DMCUB_SCRATCH16__SHIFT 0x0
+#define DMCUB_SCRATCH16__DMCUB_SCRATCH16_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH17__DMCUB_SCRATCH17__SHIFT 0x0
+#define DMCUB_SCRATCH17__DMCUB_SCRATCH17_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH18__DMCUB_SCRATCH18__SHIFT 0x0
+#define DMCUB_SCRATCH18__DMCUB_SCRATCH18_MASK 0xFFFFFFFFL
+#define DMCUB_REGION3_TMR_AXI_SPACE__DMCUB_REGION3_TMR_AXI_SPACE__SHIFT 0x0
+#define DMCUB_REGION3_TMR_AXI_SPACE__DMCUB_REGION3_TMR_AXI_SPACE_MASK 0x07L
+#define DMCUB_SCRATCH19__DMCUB_SCRATCH19__SHIFT 0x0
+#define DMCUB_SCRATCH19__DMCUB_SCRATCH19_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH20__DMCUB_SCRATCH20__SHIFT 0x0
+#define DMCUB_SCRATCH20__DMCUB_SCRATCH20_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH21__DMCUB_SCRATCH21__SHIFT 0x0
+#define DMCUB_SCRATCH21__DMCUB_SCRATCH21_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH22__DMCUB_SCRATCH22__SHIFT 0x0
+#define DMCUB_SCRATCH22__DMCUB_SCRATCH22_MASK 0xFFFFFFFFL
+#define DMCUB_SCRATCH23__DMCUB_SCRATCH23__SHIFT 0x0
+#define DMCUB_SCRATCH23__DMCUB_SCRATCH23_MASK 0xFFFFFFFFL
+
+#define DWB_ENABLE_CLK_CTRL__DWB_FGCG_REP_DIS__SHIFT 0x18
+#define DWB_ENABLE_CLK_CTRL__DWB_FGCG_REP_DIS_MASK 0x01000000L
+
+#define MCIF_WB_PSTATE_CHANGE_DURATION_VBI__MCIF_WB_UCLK_PSTATE_CHANGE_DURATION_VBI__SHIFT 0x0
+#define MCIF_WB_PSTATE_CHANGE_DURATION_VBI__MCIF_WB_FCLK_PSTATE_CHANGE_DURATION_VBI__SHIFT 0x10
+#define MCIF_WB_PSTATE_CHANGE_DURATION_VBI__MCIF_WB_UCLK_PSTATE_CHANGE_DURATION_VBI_MASK 0x0000FFFFL
+#define MCIF_WB_PSTATE_CHANGE_DURATION_VBI__MCIF_WB_FCLK_PSTATE_CHANGE_DURATION_VBI_MASK 0xFFFF0000L
+
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_WATERMARK_TYPE__SHIFT 0x1f
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_WATERMARK_TYPE_MASK 0x80000000L
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK__SHIFT 0x0
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK__SHIFT 0x18
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK 0x001FFFFFL
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK_MASK 0x07000000L
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_FGCG_REP_DIS__SHIFT 0x11
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_FGCG_REP_DIS_MASK 0x00020000L
+
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY__SHIFT 0x10
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY_MASK 0x0000FFFFL
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY_MASK 0xFFFF0000L
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INSTRMPAY__SHIFT 0x10
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY_MASK 0x0000FFFFL
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INSTRMPAY_MASK 0xFFFF0000L
+
+#define AZ_CLOCK_CNTL__AZ_GLOBAL_FGCG_REP_DIS__SHIFT 0x1
+#define AZ_CLOCK_CNTL__SCLK_GATE_DIS__SHIFT 0x10
+#define AZ_CLOCK_CNTL__SCLK_TURN_ON_DELAY__SHIFT 0x14
+#define AZ_CLOCK_CNTL__SCLK_TURN_OFF_DELAY__SHIFT 0x18
+#define AZ_CLOCK_CNTL__AZ_GLOBAL_FGCG_REP_DIS_MASK 0x00000002L
+#define AZ_CLOCK_CNTL__SCLK_GATE_DIS_MASK 0x00010000L
+#define AZ_CLOCK_CNTL__SCLK_TURN_ON_DELAY_MASK 0x00F00000L
+#define AZ_CLOCK_CNTL__SCLK_TURN_OFF_DELAY_MASK 0xFF000000L
+#define AZ_MEM_GLOBAL_PWR_REQ_CNTL__AZ_MEM_GLOBAL_PWR_REQ_DIS__SHIFT 0x0
+#define AZ_MEM_GLOBAL_PWR_REQ_CNTL__AZ_MEM_GLOBAL_PWR_REQ_DIS_MASK 0x00000001L
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_DURING_PSTATE_CHANGE_REQUEST__SHIFT 0x9
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_DURING_PSTATE_CHANGE_REQUEST_MASK 0x00000200L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_CSTATE_DURING_PSTATE_CHANGE_REQUEST__SHIFT 0x2
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__ENABLE_QOS_FORCE_PSTATE__SHIFT 0x7
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_CSTATE_DEEPSLEEP_LEGACY_MODE__SHIFT 0xd
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DISABLE_HOSTVM_FORCE_DCFCLK_DEEP_SLEEP__SHIFT 0xf
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_CSTATE_DURING_PSTATE_CHANGE_REQUEST_MASK 0x00000004L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__ENABLE_QOS_FORCE_PSTATE_MASK 0x00000080L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_CSTATE_DEEPSLEEP_LEGACY_MODE_MASK 0x00002000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DISABLE_HOSTVM_FORCE_DCFCLK_DEEP_SLEEP_MASK 0x00008000L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__USR_RETRAINING_REQUEST__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__ALLOW_USR_RETRAINING__SHIFT 0x1
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE__SHIFT 0x8
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE__SHIFT 0x9
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PSTATE_CHANGE_REQUEST__SHIFT 0xa
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PRE_CSTATE__SHIFT 0xb
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__USR_RETRAINING_REQUEST_MASK 0x00000001L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__ALLOW_USR_RETRAINING_MASK 0x00000002L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE_MASK 0x00000100L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE_MASK 0x00000200L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PSTATE_CHANGE_REQUEST_MASK 0x00000400L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PRE_CSTATE_MASK 0x00000800L
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D_MASK 0x00003FFFL
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D_MASK 0x0000FFFFL
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_USR_RETRAINING__SHIFT 0x2
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_USR_RETRAINING_MASK 0x00000004L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__PSTATE_CHANGE_TYPE__SHIFT 0x18
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__PSTATE_CHANGE_TYPE_MASK 0x01000000L
+#define DCHUBBUB_ARB_MALL_CNTL__GLOBAL_USE_MALL_FOR_SS__SHIFT 0x0
+#define DCHUBBUB_ARB_MALL_CNTL__MALL_IN_USE__SHIFT 0x4
+#define DCHUBBUB_ARB_MALL_CNTL__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define DCHUBBUB_ARB_MALL_CNTL__GLOBAL_USE_MALL_FOR_SS_MASK 0x00000001L
+#define DCHUBBUB_ARB_MALL_CNTL__MALL_IN_USE_MASK 0x00000010L
+#define DCHUBBUB_ARB_MALL_CNTL__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+
+
+#define DCHUBBUB_SDPIF_CFG0__DF_CSTATE_DISALLOW__SHIFT 0x10
+#define DCHUBBUB_SDPIF_CFG0__DF_CSTATE_DISALLOW_MASK 0x00010000L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_MAX_NUM_OUTSTANDING__SHIFT 0x9
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_MAX_NUM_OUTSTANDING_MASK 0x00000200L
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_HOSTVM_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_HOSTVM_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL_MASK 0x0000F000L
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE0_NOALLOC__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE1_NOALLOC__SHIFT 0x1
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE2_NOALLOC__SHIFT 0x2
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE3_NOALLOC__SHIFT 0x3
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE0_NOALLOC_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE1_NOALLOC_MASK 0x00000002L
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE2_NOALLOC_MASK 0x00000004L
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE3_NOALLOC_MASK 0x00000008L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL_MASK 0x0000F000L
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE0_DCCMETA_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE1_DCCMETA_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE2_DCCMETA_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE3_DCCMETA_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE0_DCCMETA_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE1_DCCMETA_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE2_DCCMETA_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE3_DCCMETA_SEC_LVL_MASK 0x0000F000L
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE0_CURSOR0_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE1_CURSOR0_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE2_CURSOR0_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE3_CURSOR0_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE0_CURSOR0_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE1_CURSOR0_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE2_CURSOR0_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE3_CURSOR0_SEC_LVL_MASK 0x0000F000L
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE0_GPUVM_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE1_GPUVM_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE2_GPUVM_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE3_GPUVM_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE0_GPUVM_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE1_GPUVM_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE2_GPUVM_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE3_GPUVM_SEC_LVL_MASK 0x0000F000L
+#define SDPIF_REQUEST_RATE_LIMIT__SDPIF_REQUEST_RATE_LIMIT__SHIFT 0x0
+#define SDPIF_REQUEST_RATE_LIMIT__SDPIF_REQUEST_RATE_LIMIT_MASK 0x00000FFFL
+
+
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_ACTIVE_ENTER_LATENCY__SHIFT 0x8
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_IDLE_ENTER_LATENCY__SHIFT 0xc
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_ACTIVE_ENTER_LATENCY_MASK 0x00000F00L
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_IDLE_ENTER_LATENCY_MASK 0x0000F000L
+
+#define HUBP0_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS__SHIFT 0x18
+#define HUBP0_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS_MASK 0x01000000L
+#define HUBP0_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE__SHIFT 0x1
+#define HUBP0_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE__SHIFT 0x2
+#define HUBP0_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME__SHIFT 0x7
+#define HUBP0_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE_MASK 0x00000002L
+#define HUBP0_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE_MASK 0x0000007CL
+#define HUBP0_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME_MASK 0x00000080L
+#define HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_SEL__SHIFT 0x0
+#define HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR__SHIFT 0x2
+#define HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_SEL_MASK 0x00000003L
+#define HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR_MASK 0x00000004L
+#define HUBP0_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE__SHIFT 0x0
+#define HUBP0_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0__SHIFT 0x1
+#define HUBP0_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1__SHIFT 0xf
+#define HUBP0_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE_MASK 0x00000001L
+#define HUBP0_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0_MASK 0x00007FFEL
+#define HUBP0_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1_MASK 0x1FFF8000L
+#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+#define HUBP0_HUBPREQ_DEBUG__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP0_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS__SHIFT 0x1f
+#define HUBP0_HUBPREQ_DEBUG__HUBPREQ_DEBUG_MASK 0x7FFFFFFFL
+#define HUBP0_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS_MASK 0x80000000L
+#define HUBP0_HUBP_DEBUG_CTRL__HUBP_DBG_EN__SHIFT 0x0
+#define HUBP0_HUBP_DEBUG_CTRL__HUBP_DBG_HUBP_DCFCLK_G_DIS__SHIFT 0x4
+#define HUBP0_HUBP_DEBUG_CTRL__HUBP_DBG_EN_MASK 0x00000001L
+#define HUBP0_HUBP_DEBUG_CTRL__HUBP_DBG_HUBP_DCFCLK_G_DIS_MASK 0x00000010L
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN__SHIFT 0x0
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE__SHIFT 0x1
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQUEST__SHIFT 0x2
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_RESPONSE__SHIFT 0x3
+#define HUBP0_HUBP_MALL_STATUS__MALL_IN_USE__SHIFT 0x4
+#define HUBP0_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define HUBP0_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE__SHIFT 0x6
+#define HUBP0_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE__SHIFT 0x7
+#define HUBP0_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE__SHIFT 0x8
+#define HUBP0_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH__SHIFT 0x9
+#define HUBP0_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME__SHIFT 0xa
+#define HUBP0_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME__SHIFT 0xb
+#define HUBP0_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL__SHIFT 0xc
+#define HUBP0_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL__SHIFT 0xd
+#define HUBP0_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL__SHIFT 0xe
+#define HUBP0_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME__SHIFT 0xf
+#define HUBP0_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS__SHIFT 0x10
+#define HUBP0_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING__SHIFT 0x11
+#define HUBP0_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING__SHIFT 0x12
+#define HUBP0_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO__SHIFT 0x13
+#define HUBP0_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP__SHIFT 0x14
+#define HUBP0_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP__SHIFT 0x15
+#define HUBP0_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING__SHIFT 0x16
+#define HUBP0_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP__SHIFT 0x17
+#define HUBP0_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING__SHIFT 0x18
+#define HUBP0_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING__SHIFT 0x19
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN_MASK 0x00000001L
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE_MASK 0x00000002L
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQUEST_MASK 0x00000004L
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_RESPONSE_MASK 0x00000008L
+#define HUBP0_HUBP_MALL_STATUS__MALL_IN_USE_MASK 0x00000010L
+#define HUBP0_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+#define HUBP0_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE_MASK 0x00000040L
+#define HUBP0_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE_MASK 0x00000080L
+#define HUBP0_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE_MASK 0x00000100L
+#define HUBP0_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH_MASK 0x00000200L
+#define HUBP0_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME_MASK 0x00000400L
+#define HUBP0_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME_MASK 0x00000800L
+#define HUBP0_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL_MASK 0x00001000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_MASK 0x00002000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL_MASK 0x00004000L
+#define HUBP0_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME_MASK 0x00008000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS_MASK 0x00010000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING_MASK 0x00020000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING_MASK 0x00040000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO_MASK 0x00080000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP_MASK 0x00100000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP_MASK 0x00200000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING_MASK 0x00400000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP_MASK 0x00800000L
+#define HUBP0_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING_MASK 0x01000000L
+#define HUBP0_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING_MASK 0x02000000L
+
+
+#define HUBPREQ0_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN__SHIFT 0x0
+#define HUBPREQ0_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x1
+#define HUBPREQ0_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN__SHIFT 0x2
+#define HUBPREQ0_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x3
+#define HUBPREQ0_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN_MASK 0x00000001L
+#define HUBPREQ0_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000002L
+#define HUBPREQ0_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN_MASK 0x00000004L
+#define HUBPREQ0_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000008L
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1__SHIFT 0x8
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT__SHIFT 0x10
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0_MASK 0x0000001FL
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1_MASK 0x00001F00L
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT_MASK 0x7FFF0000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1__SHIFT 0x10
+#define HUBPREQ0_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0_MASK 0x00003FFFL
+#define HUBPREQ0_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1_MASK 0x3FFF0000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0__SHIFT 0x1
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0__SHIFT 0x3
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0__SHIFT 0x5
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1__SHIFT 0x8
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1__SHIFT 0x9
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1__SHIFT 0xa
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1__SHIFT 0xb
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1__SHIFT 0xc
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1__SHIFT 0xd
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR__SHIFT 0x10
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR__SHIFT 0x11
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR__SHIFT 0x12
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR__SHIFT 0x13
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR__SHIFT 0x14
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR__SHIFT 0x15
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_VBLANK__SHIFT 0x1a
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN__SHIFT 0x1b
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY__SHIFT 0x1c
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH__SHIFT 0x1d
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0__SHIFT 0x1e
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1__SHIFT 0x1f
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0_MASK 0x00000001L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0_MASK 0x00000002L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0_MASK 0x00000004L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0_MASK 0x00000008L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0_MASK 0x00000010L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0_MASK 0x00000020L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1_MASK 0x00000100L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1_MASK 0x00000200L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1_MASK 0x00000400L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1_MASK 0x00000800L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1_MASK 0x00001000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1_MASK 0x00002000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR_MASK 0x00010000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR_MASK 0x00020000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR_MASK 0x00040000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR_MASK 0x00080000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR_MASK 0x00100000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR_MASK 0x00200000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_VBLANK_MASK 0x04000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN_MASK 0x08000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY_MASK 0x10000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH_MASK 0x20000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0_MASK 0x40000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1_MASK 0x80000000L
+
+
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+
+
+#define HUBP1_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS__SHIFT 0x18
+#define HUBP1_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS_MASK 0x01000000L
+#define HUBP1_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE__SHIFT 0x1
+#define HUBP1_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE__SHIFT 0x2
+#define HUBP1_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME__SHIFT 0x7
+#define HUBP1_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE_MASK 0x00000002L
+#define HUBP1_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE_MASK 0x0000007CL
+#define HUBP1_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME_MASK 0x00000080L
+#define HUBP1_DCHUBP_MALL_CONFIG__USE_MALL_SEL__SHIFT 0x0
+#define HUBP1_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR__SHIFT 0x2
+#define HUBP1_DCHUBP_MALL_CONFIG__USE_MALL_SEL_MASK 0x00000003L
+#define HUBP1_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR_MASK 0x00000004L
+#define HUBP1_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE__SHIFT 0x0
+#define HUBP1_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0__SHIFT 0x1
+#define HUBP1_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1__SHIFT 0xf
+#define HUBP1_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE_MASK 0x00000001L
+#define HUBP1_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0_MASK 0x00007FFEL
+#define HUBP1_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1_MASK 0x1FFF8000L
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN__SHIFT 0x0
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE__SHIFT 0x1
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQUEST__SHIFT 0x2
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_RESPONSE__SHIFT 0x3
+#define HUBP1_HUBP_MALL_STATUS__MALL_IN_USE__SHIFT 0x4
+#define HUBP1_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define HUBP1_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE__SHIFT 0x6
+#define HUBP1_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE__SHIFT 0x7
+#define HUBP1_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE__SHIFT 0x8
+#define HUBP1_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH__SHIFT 0x9
+#define HUBP1_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME__SHIFT 0xa
+#define HUBP1_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME__SHIFT 0xb
+#define HUBP1_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL__SHIFT 0xc
+#define HUBP1_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL__SHIFT 0xd
+#define HUBP1_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL__SHIFT 0xe
+#define HUBP1_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME__SHIFT 0xf
+#define HUBP1_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS__SHIFT 0x10
+#define HUBP1_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING__SHIFT 0x11
+#define HUBP1_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING__SHIFT 0x12
+#define HUBP1_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO__SHIFT 0x13
+#define HUBP1_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP__SHIFT 0x14
+#define HUBP1_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP__SHIFT 0x15
+#define HUBP1_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING__SHIFT 0x16
+#define HUBP1_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP__SHIFT 0x17
+#define HUBP1_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING__SHIFT 0x18
+#define HUBP1_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING__SHIFT 0x19
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN_MASK 0x00000001L
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE_MASK 0x00000002L
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQUEST_MASK 0x00000004L
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_RESPONSE_MASK 0x00000008L
+#define HUBP1_HUBP_MALL_STATUS__MALL_IN_USE_MASK 0x00000010L
+#define HUBP1_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+#define HUBP1_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE_MASK 0x00000040L
+#define HUBP1_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE_MASK 0x00000080L
+#define HUBP1_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE_MASK 0x00000100L
+#define HUBP1_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH_MASK 0x00000200L
+#define HUBP1_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME_MASK 0x00000400L
+#define HUBP1_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME_MASK 0x00000800L
+#define HUBP1_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL_MASK 0x00001000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_MASK 0x00002000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL_MASK 0x00004000L
+#define HUBP1_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME_MASK 0x00008000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS_MASK 0x00010000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING_MASK 0x00020000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING_MASK 0x00040000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO_MASK 0x00080000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP_MASK 0x00100000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP_MASK 0x00200000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING_MASK 0x00400000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP_MASK 0x00800000L
+#define HUBP1_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING_MASK 0x01000000L
+#define HUBP1_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING_MASK 0x02000000L
+
+
+#define HUBPREQ1_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN__SHIFT 0x0
+#define HUBPREQ1_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x1
+#define HUBPREQ1_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN__SHIFT 0x2
+#define HUBPREQ1_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x3
+#define HUBPREQ1_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN_MASK 0x00000001L
+#define HUBPREQ1_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000002L
+#define HUBPREQ1_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN_MASK 0x00000004L
+#define HUBPREQ1_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000008L
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1__SHIFT 0x8
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT__SHIFT 0x10
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0_MASK 0x0000001FL
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1_MASK 0x00001F00L
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT_MASK 0x7FFF0000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1__SHIFT 0x10
+#define HUBPREQ1_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0_MASK 0x00003FFFL
+#define HUBPREQ1_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1_MASK 0x3FFF0000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0__SHIFT 0x1
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0__SHIFT 0x3
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0__SHIFT 0x5
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1__SHIFT 0x8
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1__SHIFT 0x9
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1__SHIFT 0xa
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1__SHIFT 0xb
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1__SHIFT 0xc
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1__SHIFT 0xd
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR__SHIFT 0x10
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR__SHIFT 0x11
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR__SHIFT 0x12
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR__SHIFT 0x13
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR__SHIFT 0x14
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR__SHIFT 0x15
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_VBLANK__SHIFT 0x1a
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN__SHIFT 0x1b
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY__SHIFT 0x1c
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH__SHIFT 0x1d
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0__SHIFT 0x1e
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1__SHIFT 0x1f
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0_MASK 0x00000001L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0_MASK 0x00000002L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0_MASK 0x00000004L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0_MASK 0x00000008L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0_MASK 0x00000010L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0_MASK 0x00000020L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1_MASK 0x00000100L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1_MASK 0x00000200L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1_MASK 0x00000400L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1_MASK 0x00000800L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1_MASK 0x00001000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1_MASK 0x00002000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR_MASK 0x00010000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR_MASK 0x00020000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR_MASK 0x00040000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR_MASK 0x00080000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR_MASK 0x00100000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR_MASK 0x00200000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_VBLANK_MASK 0x04000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN_MASK 0x08000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY_MASK 0x10000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH_MASK 0x20000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0_MASK 0x40000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1_MASK 0x80000000L
+
+
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+
+
+#define HUBP2_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS__SHIFT 0x18
+#define HUBP2_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS_MASK 0x01000000L
+#define HUBP2_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE__SHIFT 0x1
+#define HUBP2_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE__SHIFT 0x2
+#define HUBP2_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME__SHIFT 0x7
+#define HUBP2_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE_MASK 0x00000002L
+#define HUBP2_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE_MASK 0x0000007CL
+#define HUBP2_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME_MASK 0x00000080L
+#define HUBP2_DCHUBP_MALL_CONFIG__USE_MALL_SEL__SHIFT 0x0
+#define HUBP2_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR__SHIFT 0x2
+#define HUBP2_DCHUBP_MALL_CONFIG__USE_MALL_SEL_MASK 0x00000003L
+#define HUBP2_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR_MASK 0x00000004L
+#define HUBP2_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE__SHIFT 0x0
+#define HUBP2_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0__SHIFT 0x1
+#define HUBP2_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1__SHIFT 0xf
+#define HUBP2_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE_MASK 0x00000001L
+#define HUBP2_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0_MASK 0x00007FFEL
+#define HUBP2_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1_MASK 0x1FFF8000L
+#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+#define HUBP2_HUBPREQ_DEBUG__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP2_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS__SHIFT 0x1f
+#define HUBP2_HUBPREQ_DEBUG__HUBPREQ_DEBUG_MASK 0x7FFFFFFFL
+#define HUBP2_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS_MASK 0x80000000L
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN__SHIFT 0x0
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE__SHIFT 0x1
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQUEST__SHIFT 0x2
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_RESPONSE__SHIFT 0x3
+#define HUBP2_HUBP_MALL_STATUS__MALL_IN_USE__SHIFT 0x4
+#define HUBP2_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define HUBP2_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE__SHIFT 0x6
+#define HUBP2_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE__SHIFT 0x7
+#define HUBP2_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE__SHIFT 0x8
+#define HUBP2_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH__SHIFT 0x9
+#define HUBP2_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME__SHIFT 0xa
+#define HUBP2_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME__SHIFT 0xb
+#define HUBP2_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL__SHIFT 0xc
+#define HUBP2_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL__SHIFT 0xd
+#define HUBP2_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL__SHIFT 0xe
+#define HUBP2_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME__SHIFT 0xf
+#define HUBP2_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS__SHIFT 0x10
+#define HUBP2_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING__SHIFT 0x11
+#define HUBP2_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING__SHIFT 0x12
+#define HUBP2_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO__SHIFT 0x13
+#define HUBP2_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP__SHIFT 0x14
+#define HUBP2_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP__SHIFT 0x15
+#define HUBP2_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING__SHIFT 0x16
+#define HUBP2_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP__SHIFT 0x17
+#define HUBP2_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING__SHIFT 0x18
+#define HUBP2_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING__SHIFT 0x19
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN_MASK 0x00000001L
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE_MASK 0x00000002L
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQUEST_MASK 0x00000004L
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_RESPONSE_MASK 0x00000008L
+#define HUBP2_HUBP_MALL_STATUS__MALL_IN_USE_MASK 0x00000010L
+#define HUBP2_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+#define HUBP2_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE_MASK 0x00000040L
+#define HUBP2_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE_MASK 0x00000080L
+#define HUBP2_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE_MASK 0x00000100L
+#define HUBP2_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH_MASK 0x00000200L
+#define HUBP2_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME_MASK 0x00000400L
+#define HUBP2_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME_MASK 0x00000800L
+#define HUBP2_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL_MASK 0x00001000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_MASK 0x00002000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL_MASK 0x00004000L
+#define HUBP2_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME_MASK 0x00008000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS_MASK 0x00010000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING_MASK 0x00020000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING_MASK 0x00040000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO_MASK 0x00080000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP_MASK 0x00100000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP_MASK 0x00200000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING_MASK 0x00400000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP_MASK 0x00800000L
+#define HUBP2_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING_MASK 0x01000000L
+#define HUBP2_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING_MASK 0x02000000L
+
+
+#define HUBPREQ2_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN__SHIFT 0x0
+#define HUBPREQ2_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x1
+#define HUBPREQ2_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN__SHIFT 0x2
+#define HUBPREQ2_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x3
+#define HUBPREQ2_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN_MASK 0x00000001L
+#define HUBPREQ2_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000002L
+#define HUBPREQ2_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN_MASK 0x00000004L
+#define HUBPREQ2_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000008L
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1__SHIFT 0x8
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT__SHIFT 0x10
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0_MASK 0x0000001FL
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1_MASK 0x00001F00L
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT_MASK 0x7FFF0000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1__SHIFT 0x10
+#define HUBPREQ2_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0_MASK 0x00003FFFL
+#define HUBPREQ2_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1_MASK 0x3FFF0000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0__SHIFT 0x1
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0__SHIFT 0x3
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0__SHIFT 0x5
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1__SHIFT 0x8
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1__SHIFT 0x9
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1__SHIFT 0xa
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1__SHIFT 0xb
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1__SHIFT 0xc
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1__SHIFT 0xd
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR__SHIFT 0x10
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR__SHIFT 0x11
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR__SHIFT 0x12
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR__SHIFT 0x13
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR__SHIFT 0x14
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR__SHIFT 0x15
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_VBLANK__SHIFT 0x1a
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN__SHIFT 0x1b
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY__SHIFT 0x1c
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH__SHIFT 0x1d
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0__SHIFT 0x1e
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1__SHIFT 0x1f
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0_MASK 0x00000001L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0_MASK 0x00000002L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0_MASK 0x00000004L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0_MASK 0x00000008L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0_MASK 0x00000010L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0_MASK 0x00000020L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1_MASK 0x00000100L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1_MASK 0x00000200L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1_MASK 0x00000400L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1_MASK 0x00000800L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1_MASK 0x00001000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1_MASK 0x00002000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR_MASK 0x00010000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR_MASK 0x00020000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR_MASK 0x00040000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR_MASK 0x00080000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR_MASK 0x00100000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR_MASK 0x00200000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_VBLANK_MASK 0x04000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN_MASK 0x08000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY_MASK 0x10000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH_MASK 0x20000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0_MASK 0x40000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1_MASK 0x80000000L
+
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+
+
+#define HUBP3_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS__SHIFT 0x18
+#define HUBP3_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS_MASK 0x01000000L
+#define HUBP3_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE__SHIFT 0x1
+#define HUBP3_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE__SHIFT 0x2
+#define HUBP3_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME__SHIFT 0x7
+#define HUBP3_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE_MASK 0x00000002L
+#define HUBP3_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE_MASK 0x0000007CL
+#define HUBP3_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME_MASK 0x00000080L
+#define HUBP3_DCHUBP_MALL_CONFIG__USE_MALL_SEL__SHIFT 0x0
+#define HUBP3_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR__SHIFT 0x2
+#define HUBP3_DCHUBP_MALL_CONFIG__USE_MALL_SEL_MASK 0x00000003L
+#define HUBP3_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR_MASK 0x00000004L
+#define HUBP3_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE__SHIFT 0x0
+#define HUBP3_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0__SHIFT 0x1
+#define HUBP3_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1__SHIFT 0xf
+#define HUBP3_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE_MASK 0x00000001L
+#define HUBP3_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0_MASK 0x00007FFEL
+#define HUBP3_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1_MASK 0x1FFF8000L
+#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+#define HUBP3_HUBPREQ_DEBUG__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP3_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS__SHIFT 0x1f
+#define HUBP3_HUBPREQ_DEBUG__HUBPREQ_DEBUG_MASK 0x7FFFFFFFL
+#define HUBP3_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS_MASK 0x80000000L
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN__SHIFT 0x0
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE__SHIFT 0x1
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQUEST__SHIFT 0x2
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_RESPONSE__SHIFT 0x3
+#define HUBP3_HUBP_MALL_STATUS__MALL_IN_USE__SHIFT 0x4
+#define HUBP3_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define HUBP3_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE__SHIFT 0x6
+#define HUBP3_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE__SHIFT 0x7
+#define HUBP3_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE__SHIFT 0x8
+#define HUBP3_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH__SHIFT 0x9
+#define HUBP3_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME__SHIFT 0xa
+#define HUBP3_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME__SHIFT 0xb
+#define HUBP3_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL__SHIFT 0xc
+#define HUBP3_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL__SHIFT 0xd
+#define HUBP3_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL__SHIFT 0xe
+#define HUBP3_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME__SHIFT 0xf
+#define HUBP3_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS__SHIFT 0x10
+#define HUBP3_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING__SHIFT 0x11
+#define HUBP3_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING__SHIFT 0x12
+#define HUBP3_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO__SHIFT 0x13
+#define HUBP3_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP__SHIFT 0x14
+#define HUBP3_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP__SHIFT 0x15
+#define HUBP3_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING__SHIFT 0x16
+#define HUBP3_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP__SHIFT 0x17
+#define HUBP3_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING__SHIFT 0x18
+#define HUBP3_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING__SHIFT 0x19
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN_MASK 0x00000001L
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE_MASK 0x00000002L
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQUEST_MASK 0x00000004L
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_RESPONSE_MASK 0x00000008L
+#define HUBP3_HUBP_MALL_STATUS__MALL_IN_USE_MASK 0x00000010L
+#define HUBP3_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+#define HUBP3_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE_MASK 0x00000040L
+#define HUBP3_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE_MASK 0x00000080L
+#define HUBP3_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE_MASK 0x00000100L
+#define HUBP3_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH_MASK 0x00000200L
+#define HUBP3_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME_MASK 0x00000400L
+#define HUBP3_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME_MASK 0x00000800L
+#define HUBP3_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL_MASK 0x00001000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_MASK 0x00002000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL_MASK 0x00004000L
+#define HUBP3_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME_MASK 0x00008000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS_MASK 0x00010000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING_MASK 0x00020000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING_MASK 0x00040000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO_MASK 0x00080000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP_MASK 0x00100000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP_MASK 0x00200000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING_MASK 0x00400000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP_MASK 0x00800000L
+#define HUBP3_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING_MASK 0x01000000L
+#define HUBP3_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING_MASK 0x02000000L
+
+
+#define HUBPREQ3_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN__SHIFT 0x0
+#define HUBPREQ3_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x1
+#define HUBPREQ3_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN__SHIFT 0x2
+#define HUBPREQ3_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x3
+#define HUBPREQ3_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN_MASK 0x00000001L
+#define HUBPREQ3_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000002L
+#define HUBPREQ3_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN_MASK 0x00000004L
+#define HUBPREQ3_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000008L
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1__SHIFT 0x8
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT__SHIFT 0x10
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0_MASK 0x0000001FL
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1_MASK 0x00001F00L
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT_MASK 0x7FFF0000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1__SHIFT 0x10
+#define HUBPREQ3_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0_MASK 0x00003FFFL
+#define HUBPREQ3_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1_MASK 0x3FFF0000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0__SHIFT 0x1
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0__SHIFT 0x3
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0__SHIFT 0x5
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1__SHIFT 0x8
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1__SHIFT 0x9
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1__SHIFT 0xa
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1__SHIFT 0xb
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1__SHIFT 0xc
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1__SHIFT 0xd
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR__SHIFT 0x10
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR__SHIFT 0x11
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR__SHIFT 0x12
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR__SHIFT 0x13
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR__SHIFT 0x14
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR__SHIFT 0x15
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_VBLANK__SHIFT 0x1a
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN__SHIFT 0x1b
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY__SHIFT 0x1c
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH__SHIFT 0x1d
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0__SHIFT 0x1e
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1__SHIFT 0x1f
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0_MASK 0x00000001L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0_MASK 0x00000002L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0_MASK 0x00000004L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0_MASK 0x00000008L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0_MASK 0x00000010L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0_MASK 0x00000020L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1_MASK 0x00000100L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1_MASK 0x00000200L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1_MASK 0x00000400L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1_MASK 0x00000800L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1_MASK 0x00001000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1_MASK 0x00002000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR_MASK 0x00010000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR_MASK 0x00020000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR_MASK 0x00040000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR_MASK 0x00080000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR_MASK 0x00100000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR_MASK 0x00200000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_VBLANK_MASK 0x04000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN_MASK 0x08000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY_MASK 0x10000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH_MASK 0x20000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0_MASK 0x40000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1_MASK 0x80000000L
+
+
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+
+#define DPP_TOP0_DPP_CONTROL__DPP_FGCG_REP_DIS__SHIFT 0x18
+#define DPP_TOP0_DPP_CONTROL__DPP_FGCG_REP_DIS_MASK 0x01000000L
+
+
+#define DPP_TOP1_DPP_CONTROL__DPP_FGCG_REP_DIS__SHIFT 0x18
+#define DPP_TOP1_DPP_CONTROL__DPP_FGCG_REP_DIS_MASK 0x01000000L
+
+
+#define DPP_TOP2_DPP_CONTROL__DPP_FGCG_REP_DIS__SHIFT 0x18
+#define DPP_TOP2_DPP_CONTROL__DPP_FGCG_REP_DIS_MASK 0x01000000L
+
+
+#define DPP_TOP3_DPP_CONTROL__DPP_FGCG_REP_DIS__SHIFT 0x18
+#define DPP_TOP3_DPP_CONTROL__DPP_FGCG_REP_DIS_MASK 0x01000000L
+
+
+#define MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL__SHIFT 0x0
+#define MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT__SHIFT 0x4
+#define MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_MASK 0x00000001L
+#define MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT_MASK 0x00000010L
+
+
+#define MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL__SHIFT 0x0
+#define MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT__SHIFT 0x4
+#define MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_MASK 0x00000001L
+#define MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT_MASK 0x00000010L
+
+
+#define MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL__SHIFT 0x0
+#define MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT__SHIFT 0x4
+#define MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_MASK 0x00000001L
+#define MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT_MASK 0x00000010L
+
+
+#define MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL__SHIFT 0x0
+#define MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT__SHIFT 0x4
+#define MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_MASK 0x00000001L
+#define MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT_MASK 0x00000010L
+
+
+#define MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT__SHIFT 0x2
+#define MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE_MASK 0x00000003L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT_MASK 0x0000000CL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT__SHIFT 0x8
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE_MASK 0x00000010L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT_MASK 0x00000300L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX_MASK 0x000007FFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1_MASK 0xFFFF0000L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN__SHIFT 0x8
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL_MASK 0x00030000L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT__SHIFT 0x2
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE__SHIFT 0x3
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT__SHIFT 0x6
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_MASK 0x00000004L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT_MASK 0x00000030L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT_MASK 0x00000040L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE_MASK 0x00000080L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE__SHIFT 0x8
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS__SHIFT 0xa
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS__SHIFT 0x12
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE__SHIFT 0x18
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE__SHIFT 0x1a
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE_MASK 0x00000300L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS_MASK 0x00000400L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE_MASK 0x00003000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS_MASK 0x00040000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE_MASK 0x00300000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE_MASK 0x03000000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE_MASK 0x0C000000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE_MASK 0x30000000L
+
+
+#define MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT__SHIFT 0x2
+#define MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE_MASK 0x00000003L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT_MASK 0x0000000CL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT__SHIFT 0x8
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE_MASK 0x00000010L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT_MASK 0x00000300L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX_MASK 0x000007FFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1_MASK 0xFFFF0000L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN__SHIFT 0x8
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL_MASK 0x00030000L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT__SHIFT 0x2
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE__SHIFT 0x3
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT__SHIFT 0x6
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_MASK 0x00000004L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT_MASK 0x00000030L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT_MASK 0x00000040L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE_MASK 0x00000080L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE__SHIFT 0x8
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS__SHIFT 0xa
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS__SHIFT 0x12
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE__SHIFT 0x18
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE__SHIFT 0x1a
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE_MASK 0x00000300L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS_MASK 0x00000400L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE_MASK 0x00003000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS_MASK 0x00040000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE_MASK 0x00300000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE_MASK 0x03000000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE_MASK 0x0C000000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE_MASK 0x30000000L
+
+
+#define MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT__SHIFT 0x2
+#define MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE_MASK 0x00000003L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT_MASK 0x0000000CL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT__SHIFT 0x8
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE_MASK 0x00000010L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT_MASK 0x00000300L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX_MASK 0x000007FFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1_MASK 0xFFFF0000L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN__SHIFT 0x8
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL_MASK 0x00030000L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT__SHIFT 0x2
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE__SHIFT 0x3
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT__SHIFT 0x6
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_MASK 0x00000004L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT_MASK 0x00000030L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT_MASK 0x00000040L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE_MASK 0x00000080L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE__SHIFT 0x8
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS__SHIFT 0xa
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS__SHIFT 0x12
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE__SHIFT 0x18
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE__SHIFT 0x1a
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE_MASK 0x00000300L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS_MASK 0x00000400L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE_MASK 0x00003000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS_MASK 0x00040000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE_MASK 0x00300000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE_MASK 0x03000000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE_MASK 0x0C000000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE_MASK 0x30000000L
+
+
+#define MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT__SHIFT 0x2
+#define MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE_MASK 0x00000003L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT_MASK 0x0000000CL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT__SHIFT 0x8
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE_MASK 0x00000010L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT_MASK 0x00000300L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX_MASK 0x000007FFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1_MASK 0xFFFF0000L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN__SHIFT 0x8
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL_MASK 0x00030000L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT__SHIFT 0x2
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE__SHIFT 0x3
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT__SHIFT 0x6
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_MASK 0x00000004L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT_MASK 0x00000030L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT_MASK 0x00000040L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE_MASK 0x00000080L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B_MASK 0x0007FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G_MASK 0x0007FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R_MASK 0x0007FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE__SHIFT 0x8
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS__SHIFT 0xa
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS__SHIFT 0x12
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE__SHIFT 0x18
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE__SHIFT 0x1a
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE_MASK 0x00000300L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS_MASK 0x00000400L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE_MASK 0x00003000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS_MASK 0x00040000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE_MASK 0x00300000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE_MASK 0x03000000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE_MASK 0x0C000000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE_MASK 0x30000000L
+
+
+#define OPP_TOP_CLK_CONTROL__OPP_FGCG_REP_DIS__SHIFT 0x18
+#define OPP_TOP_CLK_CONTROL__OPP_FGCG_REP_DIS_MASK 0x01000000L
+
+#define OTG0_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP__SHIFT 0x0
+#define OTG0_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP_MASK 0x00007FFFL
+#define OTG0_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER__SHIFT 0x0
+#define OTG0_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER_MASK 0xFFFFFFFFL
+#define OTG0_OTG_DLPC_CONTROL__OTG_RESYNC_MODE__SHIFT 0x0
+#define OTG0_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION__SHIFT 0x10
+#define OTG0_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT__SHIFT 0x1f
+#define OTG0_OTG_DLPC_CONTROL__OTG_RESYNC_MODE_MASK 0x00000001L
+#define OTG0_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION_MASK 0x7FFF0000L
+#define OTG0_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT_MASK 0x80000000L
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK__SHIFT 0xf
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK_MASK 0x00008000L
+#define OTG0_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT__SHIFT 0x0
+#define OTG0_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT_MASK 0xFFFFFFFFL
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN__SHIFT 0x1
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN_MASK 0x00000002L
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG0_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR__SHIFT 0x0
+#define OTG0_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR_MASK 0xFFFFFFFFL
+
+
+#define OTG1_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP__SHIFT 0x0
+#define OTG1_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP_MASK 0x00007FFFL
+#define OTG1_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER__SHIFT 0x0
+#define OTG1_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER_MASK 0xFFFFFFFFL
+#define OTG1_OTG_DLPC_CONTROL__OTG_RESYNC_MODE__SHIFT 0x0
+#define OTG1_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION__SHIFT 0x10
+#define OTG1_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT__SHIFT 0x1f
+#define OTG1_OTG_DLPC_CONTROL__OTG_RESYNC_MODE_MASK 0x00000001L
+#define OTG1_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION_MASK 0x7FFF0000L
+#define OTG1_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT_MASK 0x80000000L
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK__SHIFT 0xf
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK_MASK 0x00008000L
+#define OTG1_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT__SHIFT 0x0
+#define OTG1_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT_MASK 0xFFFFFFFFL
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN__SHIFT 0x1
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN_MASK 0x00000002L
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG1_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR__SHIFT 0x0
+#define OTG1_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR_MASK 0xFFFFFFFFL
+
+
+#define OTG2_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP__SHIFT 0x0
+#define OTG2_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP_MASK 0x00007FFFL
+#define OTG2_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER__SHIFT 0x0
+#define OTG2_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER_MASK 0xFFFFFFFFL
+#define OTG2_OTG_DLPC_CONTROL__OTG_RESYNC_MODE__SHIFT 0x0
+#define OTG2_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION__SHIFT 0x10
+#define OTG2_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT__SHIFT 0x1f
+#define OTG2_OTG_DLPC_CONTROL__OTG_RESYNC_MODE_MASK 0x00000001L
+#define OTG2_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION_MASK 0x7FFF0000L
+#define OTG2_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT_MASK 0x80000000L
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK__SHIFT 0xf
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK_MASK 0x00008000L
+#define OTG2_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT__SHIFT 0x0
+#define OTG2_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT_MASK 0xFFFFFFFFL
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN__SHIFT 0x1
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN_MASK 0x00000002L
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG2_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR__SHIFT 0x0
+#define OTG2_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR_MASK 0xFFFFFFFFL
+
+
+#define OTG3_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP__SHIFT 0x0
+#define OTG3_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP_MASK 0x00007FFFL
+#define OTG3_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER__SHIFT 0x0
+#define OTG3_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER_MASK 0xFFFFFFFFL
+#define OTG3_OTG_DLPC_CONTROL__OTG_RESYNC_MODE__SHIFT 0x0
+#define OTG3_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION__SHIFT 0x10
+#define OTG3_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT__SHIFT 0x1f
+#define OTG3_OTG_DLPC_CONTROL__OTG_RESYNC_MODE_MASK 0x00000001L
+#define OTG3_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION_MASK 0x7FFF0000L
+#define OTG3_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT_MASK 0x80000000L
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK__SHIFT 0xf
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK_MASK 0x00008000L
+#define OTG3_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT__SHIFT 0x0
+#define OTG3_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT_MASK 0xFFFFFFFFL
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN__SHIFT 0x1
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN_MASK 0x00000002L
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+#define OTG3_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR__SHIFT 0x0
+#define OTG3_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR_MASK 0xFFFFFFFFL
+
+#define OPTC_DLPC_CONTROL__OPTC_DLPC_SNAPSHOT_MUX__SHIFT 0x0
+#define OPTC_DLPC_CONTROL__OPTC_DLPC_SNAPSHOT_MUX_MASK 0x00000007L
+
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP0_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP0_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP0_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP0_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP0_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP0_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP0_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP0_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+#define DP0_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP0_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP0_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP0_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+#define DP0_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP0_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+#define DP0_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP0_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+#define DIG0_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG0_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG0_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG0_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG0_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG0_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG0_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG0_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG0_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG0_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG0_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG0_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+
+
+
+
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP1_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP1_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP1_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP1_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP1_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP1_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP1_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP1_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+#define DP1_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP1_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP1_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP1_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+#define DP1_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP1_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+#define DP1_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP1_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+#define DIG1_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG1_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG1_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG1_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG1_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG1_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG1_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG1_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG1_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG1_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG1_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG1_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+
+
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP2_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP2_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP2_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP2_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP2_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP2_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP2_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP2_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+#define DP2_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP2_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP2_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP2_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+#define DP2_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP2_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+#define DP2_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP2_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+#define DIG2_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG2_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG2_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG2_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG2_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG2_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG2_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG2_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG2_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG2_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG2_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG2_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+
+
+
+
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP3_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP3_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP3_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP3_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP3_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP3_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP3_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP3_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+#define DP3_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP3_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP3_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP3_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+#define DP3_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP3_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+#define DP3_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP3_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+#define DIG3_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG3_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG3_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG3_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG3_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG3_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG3_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG3_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG3_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG3_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG3_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG3_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+
+
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP4_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP4_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP4_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP4_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP4_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP4_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP4_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP4_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+#define DP4_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP4_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP4_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP4_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+#define DP4_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP4_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+#define DP4_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP4_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+#define DIG4_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG4_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG4_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG4_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG4_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG4_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG4_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG4_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG4_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG4_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG4_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG4_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+
+
+
+
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+
+
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+
+
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+
+
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+
+
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+
+
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__ENABLE__SHIFT 0x0
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__RESET__SHIFT 0x4
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT__SHIFT 0x8
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__ENABLE_MASK 0x00000001L
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__RESET_MASK 0x00000010L
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT_MASK 0x00000F00L
+
+
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__ENABLE__SHIFT 0x0
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__RESET__SHIFT 0x4
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT__SHIFT 0x8
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__ENABLE_MASK 0x00000001L
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__RESET_MASK 0x00000010L
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT_MASK 0x00000F00L
+
+
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__ENABLE__SHIFT 0x0
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__RESET__SHIFT 0x4
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT__SHIFT 0x8
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__ENABLE_MASK 0x00000001L
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__RESET_MASK 0x00000010L
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT_MASK 0x00000F00L
+
+
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__ENABLE__SHIFT 0x0
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__RESET__SHIFT 0x4
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT__SHIFT 0x8
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__ENABLE_MASK 0x00000001L
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__RESET_MASK 0x00000010L
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT_MASK 0x00000F00L
+
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_EN_MASK 0x00000008L
+
+
+#define DIO_DCN_STATUS__DCN_ACTIVE__SHIFT 0x0
+#define DIO_DCN_STATUS__DCN_ACTIVE_MASK 0x00000001L
+#define DIO_CLK_CNTL__DIO_TEST_CLK_SEL__SHIFT 0x0
+#define DIO_CLK_CNTL__DISPCLK_R_GATE_DIS__SHIFT 0x9
+#define DIO_CLK_CNTL__DISPCLK_G_GATE_DIS__SHIFT 0xa
+#define DIO_CLK_CNTL__REFCLK_R_GATE_DIS__SHIFT 0xb
+#define DIO_CLK_CNTL__REFCLK_G_GATE_DIS__SHIFT 0xc
+#define DIO_CLK_CNTL__SOCCLK_G_GATE_DIS__SHIFT 0xd
+#define DIO_CLK_CNTL__SYMCLK_FE_R_GATE_DIS__SHIFT 0xe
+#define DIO_CLK_CNTL__SYMCLK_FE_G_GATE_DIS__SHIFT 0xf
+#define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS__SHIFT 0x10
+#define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS__SHIFT 0x11
+#define DIO_CLK_CNTL__DIO_FGCG_REP_DIS__SHIFT 0x14
+#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS__SHIFT 0x15
+#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS__SHIFT 0x16
+#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS__SHIFT 0x17
+#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS__SHIFT 0x18
+#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS__SHIFT 0x19
+#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS__SHIFT 0x1a
+#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS__SHIFT 0x1b
+#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS__SHIFT 0x1c
+#define DIO_CLK_CNTL__DIO_TEST_CLK_SEL_MASK 0x0000007FL
+#define DIO_CLK_CNTL__DISPCLK_R_GATE_DIS_MASK 0x00000200L
+#define DIO_CLK_CNTL__DISPCLK_G_GATE_DIS_MASK 0x00000400L
+#define DIO_CLK_CNTL__REFCLK_R_GATE_DIS_MASK 0x00000800L
+#define DIO_CLK_CNTL__REFCLK_G_GATE_DIS_MASK 0x00001000L
+#define DIO_CLK_CNTL__SOCCLK_G_GATE_DIS_MASK 0x00002000L
+#define DIO_CLK_CNTL__SYMCLK_FE_R_GATE_DIS_MASK 0x00004000L
+#define DIO_CLK_CNTL__SYMCLK_FE_G_GATE_DIS_MASK 0x00008000L
+#define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS_MASK 0x00010000L
+#define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS_MASK 0x00020000L
+#define DIO_CLK_CNTL__DIO_FGCG_REP_DIS_MASK 0x00100000L
+
+#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS_MASK 0x00200000L
+#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS_MASK 0x00400000L
+#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS_MASK 0x00800000L
+#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS_MASK 0x01000000L
+#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS_MASK 0x02000000L
+#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS_MASK 0x04000000L
+#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS_MASK 0x08000000L
+#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS_MASK 0x10000000L
+
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS__SHIFT 0x0
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_MESSAGE__SHIFT 0x1
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS_MASK 0x00000001L
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_MESSAGE_MASK 0xFFFFFFFEL
+#define DIO_PSP_INTERRUPT_CLEAR__DIO_PSP_INTERRUPT_CLEAR__SHIFT 0x0
+#define DIO_PSP_INTERRUPT_CLEAR__DIO_PSP_INTERRUPT_CLEAR_MASK 0x00000001L
+#define DIO_STATUS__DIO_EN__SHIFT 0x0
+#define DIO_STATUS__DIO_EN_MASK 0x00000001L
+
+
+#define DIG0_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG0_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+#define DIG1_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG1_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+#define DIG2_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG2_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+#define DIG3_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG3_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+#define DIG4_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG4_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+
+
+
+
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+#define INTERCEPT_STATE__DLPC_INTERCEPTB_STATE__SHIFT 0x2
+#define INTERCEPT_STATE__DLPC_INTERCEPTB_STATE_MASK 0x00000004L
+#define DCIO_SOFT_RESET__DLPC_SOFT_RESET__SHIFT 0x14
+#define DCIO_SOFT_RESET__DLPC_SOFT_RESET_MASK 0x00100000L
+
+
+#define DC_GPIO_DDCVGA_MASK__DDCVGA_INVERT_INPUT_POLARITY__SHIFT 0x4
+#define DC_GPIO_DDCVGA_MASK__DDCVGA_INVERT_INPUT_POLARITY_MASK 0x00000010L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICA_S0__SHIFT 0x0
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICB_S0__SHIFT 0x1
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICC_S0__SHIFT 0x2
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICD_S0__SHIFT 0x3
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICE_S0__SHIFT 0x4
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICF_S0__SHIFT 0x5
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICG_S0__SHIFT 0x6
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENLK_CLK_S0__SHIFT 0x8
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENLK_VSYNC_S0__SHIFT 0x9
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_SWAPLOCK_A_S0__SHIFT 0xa
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_SWAPLOCK_B_S0__SHIFT 0xb
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICA_S0_MASK 0x00000001L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICB_S0_MASK 0x00000002L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICC_S0_MASK 0x00000004L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICD_S0_MASK 0x00000008L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICE_S0_MASK 0x00000010L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICF_S0_MASK 0x00000020L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICG_S0_MASK 0x00000040L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENLK_CLK_S0_MASK 0x00000100L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENLK_VSYNC_S0_MASK 0x00000200L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_SWAPLOCK_A_S0_MASK 0x00000400L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_SWAPLOCK_B_S0_MASK 0x00000800L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICA_S1__SHIFT 0x0
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICB_S1__SHIFT 0x1
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICC_S1__SHIFT 0x2
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICD_S1__SHIFT 0x3
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICE_S1__SHIFT 0x4
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICF_S1__SHIFT 0x5
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICG_S1__SHIFT 0x6
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENLK_CLK_S1__SHIFT 0x8
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENLK_VSYNC_S1__SHIFT 0x9
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_SWAPLOCK_A_S1__SHIFT 0xa
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_SWAPLOCK_B_S1__SHIFT 0xb
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICA_S1_MASK 0x00000001L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICB_S1_MASK 0x00000002L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICC_S1_MASK 0x00000004L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICD_S1_MASK 0x00000008L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICE_S1_MASK 0x00000010L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICF_S1_MASK 0x00000020L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICG_S1_MASK 0x00000040L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENLK_CLK_S1_MASK 0x00000100L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENLK_VSYNC_S1_MASK 0x00000200L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_SWAPLOCK_A_S1_MASK 0x00000400L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_SWAPLOCK_B_S1_MASK 0x00000800L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICA_TXIMPSEL__SHIFT 0x0
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICB_TXIMPSEL__SHIFT 0x1
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICC_TXIMPSEL__SHIFT 0x2
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICD_TXIMPSEL__SHIFT 0x3
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICE_TXIMPSEL__SHIFT 0x4
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICF_TXIMPSEL__SHIFT 0x5
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICG_TXIMPSEL__SHIFT 0x6
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENLK_CLK_TXIMPSEL__SHIFT 0x8
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENLK_VSYNC_TXIMPSEL__SHIFT 0x9
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_SWAPLOCK_A_TXIMPSEL__SHIFT 0xa
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_SWAPLOCK_B_TXIMPSEL__SHIFT 0xb
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD1_TXIMPSEL__SHIFT 0xc
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD2_TXIMPSEL__SHIFT 0xd
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD3_TXIMPSEL__SHIFT 0xe
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD4_TXIMPSEL__SHIFT 0xf
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD5_TXIMPSEL__SHIFT 0x10
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD6_TXIMPSEL__SHIFT 0x11
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICA_TXIMPSEL_MASK 0x00000001L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICB_TXIMPSEL_MASK 0x00000002L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICC_TXIMPSEL_MASK 0x00000004L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICD_TXIMPSEL_MASK 0x00000008L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICE_TXIMPSEL_MASK 0x00000010L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICF_TXIMPSEL_MASK 0x00000020L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICG_TXIMPSEL_MASK 0x00000040L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENLK_CLK_TXIMPSEL_MASK 0x00000100L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENLK_VSYNC_TXIMPSEL_MASK 0x00000200L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_SWAPLOCK_A_TXIMPSEL_MASK 0x00000400L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_SWAPLOCK_B_TXIMPSEL_MASK 0x00000800L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD1_TXIMPSEL_MASK 0x00001000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD2_TXIMPSEL_MASK 0x00002000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD3_TXIMPSEL_MASK 0x00004000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD4_TXIMPSEL_MASK 0x00008000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD5_TXIMPSEL_MASK 0x00010000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD6_TXIMPSEL_MASK 0x00020000L
+
+
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_TXIMPSEL__SHIFT 0x0
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_TXIMPSEL__SHIFT 0x1
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_TXIMPSEL__SHIFT 0x2
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__PWRSEQ_STRENGTH_S0__SHIFT 0x10
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_S1__SHIFT 0x14
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_S1__SHIFT 0x15
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_S1__SHIFT 0x16
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_TXIMPSEL_MASK 0x00000001L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_TXIMPSEL_MASK 0x00000002L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_TXIMPSEL_MASK 0x00000004L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__PWRSEQ_STRENGTH_S0_MASK 0x00010000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_S1_MASK 0x00100000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_S1_MASK 0x00200000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_S1_MASK 0x00400000L
+
+
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_TXIMPSEL__SHIFT 0x0
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_TXIMPSEL__SHIFT 0x1
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_TXIMPSEL__SHIFT 0x2
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__PWRSEQ_STRENGTH_S0__SHIFT 0x10
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_S1__SHIFT 0x14
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_S1__SHIFT 0x15
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_S1__SHIFT 0x16
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_TXIMPSEL_MASK 0x00000001L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_TXIMPSEL_MASK 0x00000002L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_TXIMPSEL_MASK 0x00000004L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__PWRSEQ_STRENGTH_S0_MASK 0x00010000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_S1_MASK 0x00100000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_S1_MASK 0x00200000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_S1_MASK 0x00400000L
+
+
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC0_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC0_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED__SHIFT 0xc
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN__SHIFT 0x1c
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_MASK 0x00001000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN_MASK 0x10000000L
+
+
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS__SHIFT 0xc
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN__SHIFT 0x10
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS_MASK 0x00001000L
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN_MASK 0x00010000L
+
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+#define DSCC1_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC1_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC1_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC1_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED__SHIFT 0xc
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN__SHIFT 0x1c
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_MASK 0x00001000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN_MASK 0x10000000L
+
+
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS__SHIFT 0xc
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN__SHIFT 0x10
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS_MASK 0x00001000L
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN_MASK 0x00010000L
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+#define DSCC2_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC2_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC2_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC2_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED__SHIFT 0xc
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN__SHIFT 0x1c
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_MASK 0x00001000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN_MASK 0x10000000L
+
+
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS__SHIFT 0xc
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN__SHIFT 0x10
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS_MASK 0x00001000L
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN_MASK 0x00010000L
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+#define DSCC3_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC3_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC3_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC3_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED__SHIFT 0xc
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN__SHIFT 0x1c
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_MASK 0x00001000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN_MASK 0x10000000L
+
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS__SHIFT 0xc
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN__SHIFT 0x10
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS_MASK 0x00001000L
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN_MASK 0x00010000L
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+#define HDMI_LINK_ENC_CONTROL__HDMI_LINK_ENC_ENABLE__SHIFT 0x0
+#define HDMI_LINK_ENC_CONTROL__HDMI_LINK_ENC_SOFT_RESET__SHIFT 0x4
+#define HDMI_LINK_ENC_CONTROL__HDMI_LINK_ENC_ENABLE_MASK 0x00000001L
+#define HDMI_LINK_ENC_CONTROL__HDMI_LINK_ENC_SOFT_RESET_MASK 0x00000010L
+#define HDMI_LINK_ENC_CLK_CTRL__HDMI_LINK_ENC_CLOCK_EN__SHIFT 0x0
+#define HDMI_LINK_ENC_CLK_CTRL__HDMI_LINK_ENC_CLOCK_ON_HDMICHARCLK__SHIFT 0x1
+#define HDMI_LINK_ENC_CLK_CTRL__HDMI_LINK_ENC_CLOCK_EN_MASK 0x00000001L
+#define HDMI_LINK_ENC_CLK_CTRL__HDMI_LINK_ENC_CLOCK_ON_HDMICHARCLK_MASK 0x00000002L
+
+
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE_COUNT__SHIFT 0x0
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_TRAINING_ENABLE__SHIFT 0x1
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_SCRAMBLER_DISABLE__SHIFT 0x2
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE0_TRAINING_PATTERN__SHIFT 0x10
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE1_TRAINING_PATTERN__SHIFT 0x14
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE2_TRAINING_PATTERN__SHIFT 0x18
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE3_TRAINING_PATTERN__SHIFT 0x1c
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE_COUNT_MASK 0x00000001L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_TRAINING_ENABLE_MASK 0x00000002L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_SCRAMBLER_DISABLE_MASK 0x00000004L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE0_TRAINING_PATTERN_MASK 0x000F0000L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE1_TRAINING_PATTERN_MASK 0x00F00000L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE2_TRAINING_PATTERN_MASK 0x0F000000L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE3_TRAINING_PATTERN_MASK 0xF0000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_MAX_JITTER_VALUE__SHIFT 0x0
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_THRESHOLD__SHIFT 0xc
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_CAL_EN__SHIFT 0x18
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_RC_COMPRESS_DISABLE__SHIFT 0x19
+#define HDMI_FRL_ENC_CONFIG2__HDMI_FRL_HDMISTREAMCLK_DB_SEL__SHIFT 0x1a
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_MAX_JITTER_VALUE_RESET__SHIFT 0x1c
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_EXCEED_STATUS__SHIFT 0x1d
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_METER_BUFFER_OVERFLOW_STATUS__SHIFT 0x1e
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_MAX_JITTER_VALUE_MASK 0x000001FFL
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_THRESHOLD_MASK 0x001FF000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_CAL_EN_MASK 0x01000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_RC_COMPRESS_DISABLE_MASK 0x02000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_FRL_HDMISTREAMCLK_DB_SEL_MASK 0x0C000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_MAX_JITTER_VALUE_RESET_MASK 0x10000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_EXCEED_STATUS_MASK 0x20000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_METER_BUFFER_OVERFLOW_STATUS_MASK 0x40000000L
+#define HDMI_FRL_ENC_METER_BUFFER_STATUS__HDMI_LINK_MAX_METER_BUFFER_LEVEL__SHIFT 0x0
+#define HDMI_FRL_ENC_METER_BUFFER_STATUS__HDMI_LINK_METER_BUFFER_MAX_LEVEL_RESET__SHIFT 0x1f
+#define HDMI_FRL_ENC_METER_BUFFER_STATUS__HDMI_LINK_MAX_METER_BUFFER_LEVEL_MASK 0x0000007FL
+#define HDMI_FRL_ENC_METER_BUFFER_STATUS__HDMI_LINK_METER_BUFFER_MAX_LEVEL_RESET_MASK 0x80000000L
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_DIS__SHIFT 0x0
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_FORCE__SHIFT 0x1
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_STATE__SHIFT 0x4
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x8
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_DIS_MASK 0x00000001L
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_FORCE_MASK 0x00000006L
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_STATE_MASK 0x00000030L
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000300L
+
+
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_HDMISTREAMCLK__SHIFT 0xc
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_HDMISTREAMCLK_MASK 0x00001000L
+#define HDMI_STREAM_ENC_INPUT_MUX_CONTROL__HDMI_STREAM_ENC_INPUT_MUX_SOURCE_SEL__SHIFT 0x0
+#define HDMI_STREAM_ENC_INPUT_MUX_CONTROL__HDMI_STREAM_ENC_INPUT_MUX_SOURCE_SEL_MASK 0x00000007L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_PIXEL_ENCODING__SHIFT 0x8
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ODM_COMBINE_MODE__SHIFT 0xc
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_DSC_MODE__SHIFT 0x10
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_PIXEL_ENCODING_MASK 0x00000300L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ODM_COMBINE_MODE_MASK 0x00003000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_DSC_MODE_MASK 0x00030000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_READ_START_LEVEL__SHIFT 0x0
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_READ_CLOCK_SRC__SHIFT 0x5
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_DB_PENDING__SHIFT 0x8
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_DB_DISABLE__SHIFT 0xc
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_READ_START_LEVEL_MASK 0x0000001FL
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_READ_CLOCK_SRC_MASK 0x00000020L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_DB_PENDING_MASK 0x00000100L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_DB_DISABLE_MASK 0x00001000L
+
+
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+
+
+
+
+
+
+
+
+#define HDMI_TB_ENC_CONTROL__HDMI_TB_ENC_EN__SHIFT 0x0
+#define HDMI_TB_ENC_CONTROL__HDMI_RESET__SHIFT 0x4
+#define HDMI_TB_ENC_CONTROL__HDMI_RESET_DONE__SHIFT 0x8
+#define HDMI_TB_ENC_CONTROL__HDMI_TB_ENC_EN_MASK 0x00000001L
+#define HDMI_TB_ENC_CONTROL__HDMI_RESET_MASK 0x00000010L
+#define HDMI_TB_ENC_CONTROL__HDMI_RESET_DONE_MASK 0x00000100L
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x0
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x8
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_PIXEL_ENCODING__SHIFT 0x10
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DSC_MODE__SHIFT 0x18
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DEEP_COLOR_ENABLE_MASK 0x00000001L
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DEEP_COLOR_DEPTH_MASK 0x00000300L
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_PIXEL_ENCODING_MASK 0x00030000L
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DSC_MODE_MASK 0x03000000L
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_MAX_PACKETS_PER_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_MAX_ISLANDS_PER_LINE__SHIFT 0x8
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_OVERFLOW__SHIFT 0xc
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_TB_ENC_PACKET_ERROR_CLEAR__SHIFT 0x10
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_MAX_PACKETS_PER_LINE_MASK 0x0000001FL
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_MAX_ISLANDS_PER_LINE_MASK 0x00000300L
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_OVERFLOW_MASK 0x00001000L
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_TB_ENC_PACKET_ERROR_CLEAR_MASK 0x00010000L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_GC_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_GC_CONT__SHIFT 0x1
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_SEND__SHIFT 0x4
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_CONT__SHIFT 0x5
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_LINE_REFERENCE__SHIFT 0x6
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ACP_SEND__SHIFT 0x8
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ACP_LINE_REFERENCE__SHIFT 0x9
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_CONT__SHIFT 0xd
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_LINE_REFERENCE__SHIFT 0xe
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_GC_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_GC_CONT_MASK 0x00000002L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_SEND_MASK 0x00000010L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_CONT_MASK 0x00000020L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_LINE_REFERENCE_MASK 0x00000040L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ACP_SEND_MASK 0x00000100L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ACP_LINE_REFERENCE_MASK 0x00000200L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_CONT_MASK 0x00002000L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_LINE_REFERENCE_MASK 0x00004000L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL2__HDMI_ISRC_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL2__HDMI_ACP_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL2__HDMI_ISRC_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL2__HDMI_ACP_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_GC_AVMUTE__SHIFT 0x0
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LOCK_EN__SHIFT 0x2
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x3
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LOCK_EN__SHIFT 0x6
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x7
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LOCK_EN__SHIFT 0xa
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xb
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LOCK_EN__SHIFT 0xe
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LOCK_EN__SHIFT 0x12
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x13
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LOCK_EN__SHIFT 0x16
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x17
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LOCK_EN__SHIFT 0x1a
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1b
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LOCK_EN__SHIFT 0x1e
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LOCK_EN_MASK 0x00000004L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000008L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LOCK_EN_MASK 0x00000040L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000080L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LOCK_EN_MASK 0x00000400L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000800L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LOCK_EN_MASK 0x00004000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LOCK_EN_MASK 0x00040000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00080000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LOCK_EN_MASK 0x00400000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00800000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LOCK_EN_MASK 0x04000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x08000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LOCK_EN_MASK 0x40000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x80000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_LOCK_EN__SHIFT 0x2
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x3
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_LOCK_EN__SHIFT 0x6
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x7
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_LOCK_EN__SHIFT 0xa
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xb
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_LOCK_EN__SHIFT 0xe
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_LOCK_EN__SHIFT 0x12
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x13
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_LOCK_EN__SHIFT 0x16
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x17
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_LOCK_EN__SHIFT 0x1a
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1b
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_LOCK_EN_MASK 0x00000004L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000008L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_LOCK_EN_MASK 0x00000040L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000080L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_LOCK_EN_MASK 0x00000400L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000800L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_LOCK_EN_MASK 0x00004000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_LOCK_EN_MASK 0x00040000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00080000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_LOCK_EN_MASK 0x00400000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00800000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_LOCK_EN_MASK 0x04000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x08000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC0_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC1_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC0_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC0_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC1_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC1_EMP_MASK 0x80000000L
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC2_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC3_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC2_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC2_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC3_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC3_EMP_MASK 0x80000000L
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC4_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC5_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC4_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC4_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC5_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC5_EMP_MASK 0x80000000L
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC6_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC7_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC6_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC6_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC7_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC7_EMP_MASK 0x80000000L
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC8_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC9_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC8_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC8_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC9_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC9_EMP_MASK 0x80000000L
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC10_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC11_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC10_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC10_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC11_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC11_EMP_MASK 0x80000000L
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC12_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC13_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC12_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC12_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC13_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC13_EMP_MASK 0x80000000L
+#define HDMI_TB_ENC_GENERIC_PACKET14_LINE__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET14_LINE__HDMI_GENERIC14_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET14_LINE__HDMI_GENERIC14_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET14_LINE__HDMI_GENERIC14_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define HDMI_TB_ENC_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define HDMI_TB_ENC_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define HDMI_TB_ENC_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define HDMI_TB_ENC_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define HDMI_TB_ENC_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define HDMI_TB_ENC_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+#define HDMI_TB_ENC_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+#define HDMI_TB_ENC_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+#define HDMI_TB_ENC_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+#define HDMI_TB_ENC_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+#define HDMI_TB_ENC_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+#define HDMI_TB_ENC_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+#define HDMI_TB_ENC_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_PREFILL_OVERRIDE_EN__SHIFT 0x0
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_RATE_BUFFER_PREFILL_OVERRIDE_EN__SHIFT 0x1
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_MAX_MIN_LEVEL_RESET__SHIFT 0x4
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_PREFILL_OVERRIDE_LEVEL__SHIFT 0x8
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_RATE_BUFFER_PREFILL_OVERRIDE_LEVEL__SHIFT 0x18
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_PREFILL_OVERRIDE_EN_MASK 0x00000001L
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_RATE_BUFFER_PREFILL_OVERRIDE_EN_MASK 0x00000002L
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_MAX_MIN_LEVEL_RESET_MASK 0x00000010L
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_PREFILL_OVERRIDE_LEVEL_MASK 0x0000FF00L
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_RATE_BUFFER_PREFILL_OVERRIDE_LEVEL_MASK 0x1F000000L
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_DIS__SHIFT 0x0
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_FORCE__SHIFT 0x1
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_STATE__SHIFT 0x4
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x8
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_DIS_MASK 0x00000001L
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_FORCE_MASK 0x00000006L
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_STATE_MASK 0x00000030L
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000300L
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+#define HDMI_TB_ENC_H_ACTIVE_BLANK__HDMI_H_ACTIVE__SHIFT 0x0
+#define HDMI_TB_ENC_H_ACTIVE_BLANK__HDMI_H_BLANK__SHIFT 0x10
+#define HDMI_TB_ENC_H_ACTIVE_BLANK__HDMI_H_ACTIVE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_H_ACTIVE_BLANK__HDMI_H_BLANK_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_HC_ACTIVE_BLANK__HDMI_HC_ACTIVE__SHIFT 0x0
+#define HDMI_TB_ENC_HC_ACTIVE_BLANK__HDMI_HC_BLANK__SHIFT 0x10
+#define HDMI_TB_ENC_HC_ACTIVE_BLANK__HDMI_HC_ACTIVE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_HC_ACTIVE_BLANK__HDMI_HC_BLANK_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_EN__SHIFT 0x0
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_CONT_EN__SHIFT 0x1
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_TYPE__SHIFT 0x8
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_SRC_SEL__SHIFT 0xa
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_INTERLACE_EN__SHIFT 0x10
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_INTERLACE_MODE__SHIFT 0x11
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_EN_MASK 0x00000001L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_CONT_EN_MASK 0x00000002L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_TYPE_MASK 0x00000300L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_SRC_SEL_MASK 0x00000C00L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_INTERLACE_EN_MASK 0x00010000L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_INTERLACE_MODE_MASK 0x00060000L
+#define HDMI_TB_ENC_CRC_RESULT_0__CRC_TRIBYTE0__SHIFT 0x0
+#define HDMI_TB_ENC_CRC_RESULT_0__CRC_TRIBYTE1__SHIFT 0x10
+#define HDMI_TB_ENC_CRC_RESULT_0__CRC_TRIBYTE0_MASK 0x0000FFFFL
+#define HDMI_TB_ENC_CRC_RESULT_0__CRC_TRIBYTE1_MASK 0xFFFF0000L
+#define HDMI_TB_ENC_ENCRYPTION_CONTROL__HDMI_EESS_ENABLE__SHIFT 0x0
+#define HDMI_TB_ENC_ENCRYPTION_CONTROL__HDMI_EESS_WHEN_AVMUTE__SHIFT 0x4
+#define HDMI_TB_ENC_ENCRYPTION_CONTROL__HDMI_EESS_ENABLE_MASK 0x00000001L
+#define HDMI_TB_ENC_ENCRYPTION_CONTROL__HDMI_EESS_WHEN_AVMUTE_MASK 0x00000010L
+#define HDMI_TB_ENC_MODE__HDMI_BORROW_MODE__SHIFT 0x0
+#define HDMI_TB_ENC_MODE__HDMI_SKIP_FIRST_HBLANK__SHIFT 0x8
+#define HDMI_TB_ENC_MODE__HDMI_BORROW_MODE_MASK 0x00000003L
+#define HDMI_TB_ENC_MODE__HDMI_SKIP_FIRST_HBLANK_MASK 0x00000100L
+#define HDMI_TB_ENC_INPUT_FIFO_STATUS__INPUT_FIFO_ERROR__SHIFT 0x0
+#define HDMI_TB_ENC_INPUT_FIFO_STATUS__INPUT_FIFO_ERROR_MASK 0x00000001L
+#define HDMI_TB_ENC_CRC_RESULT_1__CRC_TRIBYTE2__SHIFT 0x0
+#define HDMI_TB_ENC_CRC_RESULT_1__CRC_TRIBYTE2_MASK 0x0000FFFFL
+
+
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+
+
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT_MASK 0x0000FFFFL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET_MASK 0x00000010L
+
+
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT_MASK 0x0000FFFFL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET_MASK 0x00000010L
+
+
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+
+
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT_MASK 0x0000FFFFL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET_MASK 0x00000010L
+
+
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+
+
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT_MASK 0x0000FFFFL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET_MASK 0x00000010L
+
+
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__SHORT_LAST_TPS2_PERIOD__SHIFT 0xc
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__SHORT_LAST_TPS2_PERIOD_MASK 0x00001000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__ENCRYPTION_ENABLED__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__ENCRYPTION_ENABLED_MASK 0x00000100L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__CIPHER_ERROR__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__CIPHER_ERROR_MASK 0x00000100L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0__LLCP_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0__LLCP_COUNT_MASK 0x0000FFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1__CYCLE_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1__CYCLE_COUNT_MASK 0xFFFFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_RESET__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_ENABLE__SHIFT 0x2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_RESET__SHIFT 0x3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_ENABLE_MASK 0x00000004L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_RESET_MASK 0x00000008L
+
+
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__SHORT_LAST_TPS2_PERIOD__SHIFT 0xc
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__SHORT_LAST_TPS2_PERIOD_MASK 0x00001000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__ENCRYPTION_ENABLED__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__ENCRYPTION_ENABLED_MASK 0x00000100L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__CIPHER_ERROR__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__CIPHER_ERROR_MASK 0x00000100L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0__LLCP_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0__LLCP_COUNT_MASK 0x0000FFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1__CYCLE_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1__CYCLE_COUNT_MASK 0xFFFFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_RESET__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_ENABLE__SHIFT 0x2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_RESET__SHIFT 0x3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_ENABLE_MASK 0x00000004L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_RESET_MASK 0x00000008L
+
+#define DLPC_ENABLE__DLPC_EN__SHIFT 0x0
+#define DLPC_ENABLE__PWRUP_TRIGGER_EN__SHIFT 0x4
+#define DLPC_ENABLE__PWRUP_TRIGGER_CLR__SHIFT 0x5
+#define DLPC_ENABLE__PWRUP_TRIGGER_STATUS__SHIFT 0x6
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_EN__SHIFT 0x8
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_CLR__SHIFT 0x9
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_STATUS__SHIFT 0xa
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_EN__SHIFT 0xc
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_CLR__SHIFT 0xd
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_STATUS__SHIFT 0xe
+#define DLPC_ENABLE__DLPC_EN_MASK 0x00000001L
+#define DLPC_ENABLE__PWRUP_TRIGGER_EN_MASK 0x00000010L
+#define DLPC_ENABLE__PWRUP_TRIGGER_CLR_MASK 0x00000020L
+#define DLPC_ENABLE__PWRUP_TRIGGER_STATUS_MASK 0x00000040L
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_EN_MASK 0x00000100L
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_CLR_MASK 0x00000200L
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_STATUS_MASK 0x00000400L
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_EN_MASK 0x00001000L
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_CLR_MASK 0x00002000L
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_STATUS_MASK 0x00004000L
+#define DLPC_CURRENT_COUNT__VALUE__SHIFT 0x0
+#define DLPC_CURRENT_COUNT__VALUE_MASK 0xFFFFFFFFL
+#define DLPC_OPTC_SNAPSHOT__VALUE__SHIFT 0x0
+#define DLPC_OPTC_SNAPSHOT__VALUE_MASK 0xFFFFFFFFL
+#define DLPC_PWRUP__VALUE__SHIFT 0x0
+#define DLPC_PWRUP__VALUE_MASK 0xFFFFFFFFL
+#define DLPC_OTG_RESYNC__VALUE__SHIFT 0x0
+#define DLPC_OTG_RESYNC__VALUE_MASK 0xFFFFFFFFL
+#define DLPC_DCN_ZSC_LONO_PWRUP__VALUE__SHIFT 0x0
+#define DLPC_DCN_ZSC_LONO_PWRUP__VALUE_MASK 0xFFFFFFFFL
+#define DLPC_SPARE__SPARE__SHIFT 0x0
+#define DLPC_SPARE__SPARE_MASK 0xFFFFFFFFL
+#define DLPC_COUNTER_INIT_VALUE__DLPC_COUNTER_INIT_VALUE__SHIFT 0x0
+#define DLPC_COUNTER_INIT_VALUE__DLPC_COUNTER_INIT_VALUE_MASK 0xFFFFFFFFL
+
+#define DPIA_MU_CLOCK_CTRL__DPIA_REFCLK_GATE_DIS__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL__DPIA_CIO_CLKS_GATE_DIS__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_REFCLK_R_GATE_DIS__SHIFT 0x8
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_ML_SSCLK_G_GATE_DIS__SHIFT 0xb
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_AUXIN_SSCLK_G_GATE_DIS__SHIFT 0xc
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_AUXOUT_SSCLK_G_GATE_DIS__SHIFT 0xd
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_TMUCLK_G_GATE_DIS__SHIFT 0xe
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_TEST_CLK_SEL__SHIFT 0x18
+#define DPIA_MU_CLOCK_CTRL__DPIA_REFCLK_GATE_DIS_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL__DPIA_CIO_CLKS_GATE_DIS_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_REFCLK_R_GATE_DIS_MASK 0x00000100L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_ML_SSCLK_G_GATE_DIS_MASK 0x00000800L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_AUXIN_SSCLK_G_GATE_DIS_MASK 0x00001000L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_AUXOUT_SSCLK_G_GATE_DIS_MASK 0x00002000L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_TMUCLK_G_GATE_DIS_MASK 0x00004000L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_TEST_CLK_SEL_MASK 0xFF000000L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__CLK_SRC__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__ML_CLK_EN__SHIFT 0x1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUX_CLK_EN__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__ML_SSCLK_CLOCK_ON__SHIFT 0x9
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__TMUCLK_CLOCK_ON__SHIFT 0xa
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUXIN_SSCLK_CLOCK_ON__SHIFT 0x11
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUXOUT_SSCLK_CLOCK_ON__SHIFT 0x12
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__CLK_SRC_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__ML_CLK_EN_MASK 0x00000002L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUX_CLK_EN_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__ML_SSCLK_CLOCK_ON_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__TMUCLK_CLOCK_ON_MASK 0x00000400L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUXIN_SSCLK_CLOCK_ON_MASK 0x00020000L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUXOUT_SSCLK_CLOCK_ON_MASK 0x00040000L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CORE_SW_RST__SHIFT 0x0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CAPREG_SW_RST__SHIFT 0x1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__AUX_TPI_SW_RST__SHIFT 0x2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__ML_TPI_SW_RST__SHIFT 0x3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CAPREG_RESET_DONE__SHIFT 0x8
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CORE_RESET_DONE__SHIFT 0x9
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CORE_SW_RST_MASK 0x00000001L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CAPREG_SW_RST_MASK 0x00000002L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__AUX_TPI_SW_RST_MASK 0x00000004L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__ML_TPI_SW_RST_MASK 0x00000008L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CAPREG_RESET_DONE_MASK 0x00000100L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CORE_RESET_DONE_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__CLK_SRC__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__ML_CLK_EN__SHIFT 0x1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUX_CLK_EN__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__ML_SSCLK_CLOCK_ON__SHIFT 0x9
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__TMUCLK_CLOCK_ON__SHIFT 0xa
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUXIN_SSCLK_CLOCK_ON__SHIFT 0x11
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUXOUT_SSCLK_CLOCK_ON__SHIFT 0x12
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__CLK_SRC_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__ML_CLK_EN_MASK 0x00000002L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUX_CLK_EN_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__ML_SSCLK_CLOCK_ON_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__TMUCLK_CLOCK_ON_MASK 0x00000400L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUXIN_SSCLK_CLOCK_ON_MASK 0x00020000L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUXOUT_SSCLK_CLOCK_ON_MASK 0x00040000L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CORE_SW_RST__SHIFT 0x0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CAPREG_SW_RST__SHIFT 0x1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__AUX_TPI_SW_RST__SHIFT 0x2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__ML_TPI_SW_RST__SHIFT 0x3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CAPREG_RESET_DONE__SHIFT 0x8
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CORE_RESET_DONE__SHIFT 0x9
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CORE_SW_RST_MASK 0x00000001L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CAPREG_SW_RST_MASK 0x00000002L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__AUX_TPI_SW_RST_MASK 0x00000004L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__ML_TPI_SW_RST_MASK 0x00000008L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CAPREG_RESET_DONE_MASK 0x00000100L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CORE_RESET_DONE_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__CLK_SRC__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__ML_CLK_EN__SHIFT 0x1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUX_CLK_EN__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__ML_SSCLK_CLOCK_ON__SHIFT 0x9
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__TMUCLK_CLOCK_ON__SHIFT 0xa
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUXIN_SSCLK_CLOCK_ON__SHIFT 0x11
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUXOUT_SSCLK_CLOCK_ON__SHIFT 0x12
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__CLK_SRC_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__ML_CLK_EN_MASK 0x00000002L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUX_CLK_EN_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__ML_SSCLK_CLOCK_ON_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__TMUCLK_CLOCK_ON_MASK 0x00000400L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUXIN_SSCLK_CLOCK_ON_MASK 0x00020000L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUXOUT_SSCLK_CLOCK_ON_MASK 0x00040000L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CORE_SW_RST__SHIFT 0x0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CAPREG_SW_RST__SHIFT 0x1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__AUX_TPI_SW_RST__SHIFT 0x2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__ML_TPI_SW_RST__SHIFT 0x3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CAPREG_RESET_DONE__SHIFT 0x8
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CORE_RESET_DONE__SHIFT 0x9
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CORE_SW_RST_MASK 0x00000001L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CAPREG_SW_RST_MASK 0x00000002L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__AUX_TPI_SW_RST_MASK 0x00000004L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__ML_TPI_SW_RST_MASK 0x00000008L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CAPREG_RESET_DONE_MASK 0x00000100L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CORE_RESET_DONE_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__CLK_SRC__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__ML_CLK_EN__SHIFT 0x1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUX_CLK_EN__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__ML_SSCLK_CLOCK_ON__SHIFT 0x9
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__TMUCLK_CLOCK_ON__SHIFT 0xa
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUXIN_SSCLK_CLOCK_ON__SHIFT 0x11
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUXOUT_SSCLK_CLOCK_ON__SHIFT 0x12
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__CLK_SRC_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__ML_CLK_EN_MASK 0x00000002L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUX_CLK_EN_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__ML_SSCLK_CLOCK_ON_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__TMUCLK_CLOCK_ON_MASK 0x00000400L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUXIN_SSCLK_CLOCK_ON_MASK 0x00020000L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUXOUT_SSCLK_CLOCK_ON_MASK 0x00040000L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CORE_SW_RST__SHIFT 0x0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CAPREG_SW_RST__SHIFT 0x1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__AUX_TPI_SW_RST__SHIFT 0x2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__ML_TPI_SW_RST__SHIFT 0x3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CAPREG_RESET_DONE__SHIFT 0x8
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CORE_RESET_DONE__SHIFT 0x9
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CORE_SW_RST_MASK 0x00000001L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CAPREG_SW_RST_MASK 0x00000002L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__AUX_TPI_SW_RST_MASK 0x00000004L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__ML_TPI_SW_RST_MASK 0x00000008L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CAPREG_RESET_DONE_MASK 0x00000100L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CORE_RESET_DONE_MASK 0x00000200L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__MAIN_LINK_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXOUT_CREDIT_COUNT__SHIFT 0x8
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXIN_CREDIT_COUNT__SHIFT 0x10
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__MAIN_LINK_EXTRA_CREDIT_RECEIVED__SHIFT 0x18
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXOUT_EXTRA_CREDIT_RECEIVED__SHIFT 0x19
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXIN_EXTRA_PAYLOAD_RECEIVED__SHIFT 0x1a
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__MAIN_LINK_CREDIT_COUNT_MASK 0x0000007FL
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXOUT_CREDIT_COUNT_MASK 0x00000F00L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXIN_CREDIT_COUNT_MASK 0x000F0000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__MAIN_LINK_EXTRA_CREDIT_RECEIVED_MASK 0x01000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXOUT_EXTRA_CREDIT_RECEIVED_MASK 0x02000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXIN_EXTRA_PAYLOAD_RECEIVED_MASK 0x04000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__MAIN_LINK_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXOUT_CREDIT_COUNT__SHIFT 0x8
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXIN_CREDIT_COUNT__SHIFT 0x10
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__MAIN_LINK_EXTRA_CREDIT_RECEIVED__SHIFT 0x18
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXOUT_EXTRA_CREDIT_RECEIVED__SHIFT 0x19
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXIN_EXTRA_PAYLOAD_RECEIVED__SHIFT 0x1a
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__MAIN_LINK_CREDIT_COUNT_MASK 0x0000007FL
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXOUT_CREDIT_COUNT_MASK 0x00000F00L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXIN_CREDIT_COUNT_MASK 0x000F0000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__MAIN_LINK_EXTRA_CREDIT_RECEIVED_MASK 0x01000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXOUT_EXTRA_CREDIT_RECEIVED_MASK 0x02000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXIN_EXTRA_PAYLOAD_RECEIVED_MASK 0x04000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__MAIN_LINK_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXOUT_CREDIT_COUNT__SHIFT 0x8
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXIN_CREDIT_COUNT__SHIFT 0x10
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__MAIN_LINK_EXTRA_CREDIT_RECEIVED__SHIFT 0x18
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXOUT_EXTRA_CREDIT_RECEIVED__SHIFT 0x19
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXIN_EXTRA_PAYLOAD_RECEIVED__SHIFT 0x1a
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__MAIN_LINK_CREDIT_COUNT_MASK 0x0000007FL
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXOUT_CREDIT_COUNT_MASK 0x00000F00L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXIN_CREDIT_COUNT_MASK 0x000F0000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__MAIN_LINK_EXTRA_CREDIT_RECEIVED_MASK 0x01000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXOUT_EXTRA_CREDIT_RECEIVED_MASK 0x02000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXIN_EXTRA_PAYLOAD_RECEIVED_MASK 0x04000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__MAIN_LINK_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXOUT_CREDIT_COUNT__SHIFT 0x8
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXIN_CREDIT_COUNT__SHIFT 0x10
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__MAIN_LINK_EXTRA_CREDIT_RECEIVED__SHIFT 0x18
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXOUT_EXTRA_CREDIT_RECEIVED__SHIFT 0x19
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXIN_EXTRA_PAYLOAD_RECEIVED__SHIFT 0x1a
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__MAIN_LINK_CREDIT_COUNT_MASK 0x0000007FL
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXOUT_CREDIT_COUNT_MASK 0x00000F00L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXIN_CREDIT_COUNT_MASK 0x000F0000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__MAIN_LINK_EXTRA_CREDIT_RECEIVED_MASK 0x01000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXOUT_EXTRA_CREDIT_RECEIVED_MASK 0x02000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXIN_EXTRA_PAYLOAD_RECEIVED_MASK 0x04000000L
+#define DPIA_MU_TPI_MAX_CREDIT_COUNT__DPIA_TPI_MAX_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_MAX_CREDIT_COUNT__DPIA_TPI_MAX_CREDIT_COUNT_MASK 0x0000003FL
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT0_INT_STATUS__SHIFT 0x0
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT1_INT_STATUS__SHIFT 0x3
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT2_INT_STATUS__SHIFT 0x6
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT3_INT_STATUS__SHIFT 0x9
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_MU_LOCAL_INT_STATUS__SHIFT 0x1f
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT0_INT_STATUS_MASK 0x00000007L
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT1_INT_STATUS_MASK 0x00000038L
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT2_INT_STATUS_MASK 0x000001C0L
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT3_INT_STATUS_MASK 0x00000E00L
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_MU_LOCAL_INT_STATUS_MASK 0x80000000L
+#define DPIA_MU_INTERRUPT_CTRL__DPIA_DCN_INT_EN__SHIFT 0x0
+#define DPIA_MU_INTERRUPT_CTRL__RBBMIF_TIMEOUT_INT_MASK__SHIFT 0x18
+#define DPIA_MU_INTERRUPT_CTRL__DPIA_DCN_INT_EN_MASK 0x00000001L
+#define DPIA_MU_INTERRUPT_CTRL__RBBMIF_TIMEOUT_INT_MASK_MASK 0x01000000L
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_INT_STATUS__SHIFT 0x0
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_ADDR__SHIFT 0x2
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_OP__SHIFT 0x14
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__DPIA_RST_DONE_INT_STATUS__SHIFT 0x18
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_INT_STATUS_MASK 0x00000001L
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_ADDR_MASK 0x000FFFFCL
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_OP_MASK 0x00100000L
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__DPIA_RST_DONE_INT_STATUS_MASK 0x01000000L
+#define DPIA_MU_LOCAL_INTERRUPT_ACK__RBBMIF_TIMEOUT_INT_ACK__SHIFT 0x0
+#define DPIA_MU_LOCAL_INTERRUPT_ACK__DPIA_RST_DONE_INT_ACK__SHIFT 0x1
+#define DPIA_MU_LOCAL_INTERRUPT_ACK__RBBMIF_TIMEOUT_INT_ACK_MASK 0x00000001L
+#define DPIA_MU_LOCAL_INTERRUPT_ACK__DPIA_RST_DONE_INT_ACK_MASK 0x00000002L
+#define DPIA_MU_MICROSECOND_REF_CTRL__MICROSECOND_TIME_BASE_DIV__SHIFT 0x0
+#define DPIA_MU_MICROSECOND_REF_CTRL__MICROSECOND_TIME_BASE_DIV_MASK 0x0000007FL
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT0_HIDDEN_STATUS__SHIFT 0x0
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT1_HIDDEN_STATUS__SHIFT 0x1
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT2_HIDDEN_STATUS__SHIFT 0x2
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT3_HIDDEN_STATUS__SHIFT 0x3
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT0_HIDDEN_STATUS_MASK 0x00000001L
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT1_HIDDEN_STATUS_MASK 0x00000002L
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT2_HIDDEN_STATUS_MASK 0x00000004L
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT3_HIDDEN_STATUS_MASK 0x00000008L
+#define DPIA_GLUE_CTRL__DPIA_IO_EN__SHIFT 0x0
+#define DPIA_GLUE_CTRL__DPIA_IO_EN_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL0__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL0__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL0__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL0__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL0__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL0__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL0__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL0__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL0__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL0__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL0__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL0__COUNT_LIMIT_MASK 0xFFFFF000L
+#define DPIA_PERF_COUNT_CONTROL1__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL1__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL1__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL1__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL1__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL1__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL1__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL1__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL1__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL1__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL1__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL1__COUNT_LIMIT_MASK 0xFFFFF000L
+#define DPIA_PERF_COUNT_CONTROL2__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL2__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL2__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL2__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL2__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL2__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL2__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL2__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL2__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL2__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL2__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL2__COUNT_LIMIT_MASK 0xFFFFF000L
+#define DPIA_PERF_COUNT_CONTROL3__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL3__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL3__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL3__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL3__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL3__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL3__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL3__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL3__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL3__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL3__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL3__COUNT_LIMIT_MASK 0xFFFFF000L
+#define DPIA_PERF_COUNT_CONTROL4__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL4__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL4__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL4__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL4__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL4__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL4__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL4__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL4__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL4__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL4__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL4__COUNT_LIMIT_MASK 0xFFFFF000L
+#define DPIA_PERF_COUNT_CONTROL5__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL5__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL5__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL5__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL5__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL5__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL5__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL5__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL5__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL5__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL5__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL5__COUNT_LIMIT_MASK 0xFFFFF000L
+#define DPIA_PERF_COUNT_INDEX__COUNTER_SELECT__SHIFT 0x0
+#define DPIA_PERF_COUNT_INDEX__MEAS_SELECT__SHIFT 0x4
+#define DPIA_PERF_COUNT_INDEX__COUNTER_SELECT_MASK 0x00000007L
+#define DPIA_PERF_COUNT_INDEX__MEAS_SELECT_MASK 0x00000070L
+#define DPIA_PERF_COUNT_DATA_LO__VALUE__SHIFT 0x0
+#define DPIA_PERF_COUNT_DATA_LO__VALUE_MASK 0xFFFFFFFFL
+#define DPIA_MU_SPARE__DPIA_MU_SPARE__SHIFT 0x0
+#define DPIA_MU_SPARE__DPIA_MU_SPARE_MASK 0xFFFFFFFFL
+
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__ACP_INDEX__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__SUPPORTS_AI__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__ACP_INDEX_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__SUPPORTS_AI_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_DATA__ACP_DATA__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_DATA__ACP_DATA_MASK 0x000000FFL
+
+
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+#define AZF0ENDPOINT0_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+#define AZF0ENDPOINT1_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+#define AZF0ENDPOINT2_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+#define AZF0ENDPOINT3_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+#define AZF0ENDPOINT4_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+#define AZF0ENDPOINT5_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+#define AZF0ENDPOINT6_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+#define AZF0ENDPOINT7_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_offset.h
new file mode 100644
index 000000000000..9c16611af06b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_offset.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _hdp_7_0_0_OFFSET_HEADER
+#define _hdp_7_0_0_OFFSET_HEADER
+
+
+
+// addressBlock: hdp_hdpdec
+// base address: 0x3c80
+#define regHDP_MMHUB_TLVL 0x0008
+#define regHDP_MMHUB_TLVL_BASE_IDX 0
+#define regHDP_MMHUB_UNITID 0x0009
+#define regHDP_MMHUB_UNITID_BASE_IDX 0
+#define regHDP_NONSURFACE_BASE 0x0040
+#define regHDP_NONSURFACE_BASE_BASE_IDX 0
+#define regHDP_NONSURFACE_INFO 0x0041
+#define regHDP_NONSURFACE_INFO_BASE_IDX 0
+#define regHDP_NONSURFACE_BASE_HI 0x0042
+#define regHDP_NONSURFACE_BASE_HI_BASE_IDX 0
+#define regHDP_SURFACE_WRITE_FLAGS 0x00c4
+#define regHDP_SURFACE_WRITE_FLAGS_BASE_IDX 0
+#define regHDP_SURFACE_READ_FLAGS 0x00c5
+#define regHDP_SURFACE_READ_FLAGS_BASE_IDX 0
+#define regHDP_SURFACE_WRITE_FLAGS_CLR 0x00c6
+#define regHDP_SURFACE_WRITE_FLAGS_CLR_BASE_IDX 0
+#define regHDP_SURFACE_READ_FLAGS_CLR 0x00c7
+#define regHDP_SURFACE_READ_FLAGS_CLR_BASE_IDX 0
+#define regHDP_NONSURF_FLAGS 0x00c8
+#define regHDP_NONSURF_FLAGS_BASE_IDX 0
+#define regHDP_NONSURF_FLAGS_CLR 0x00c9
+#define regHDP_NONSURF_FLAGS_CLR_BASE_IDX 0
+#define regHDP_SW_SEMAPHORE 0x00cd
+#define regHDP_SW_SEMAPHORE_BASE_IDX 0
+#define regHDP_DEBUG0 0x00ce
+#define regHDP_DEBUG0_BASE_IDX 0
+#define regHDP_LAST_SURFACE_HIT 0x00d0
+#define regHDP_LAST_SURFACE_HIT_BASE_IDX 0
+#define regHDP_OUTSTANDING_REQ 0x00d1
+#define regHDP_OUTSTANDING_REQ_BASE_IDX 0
+#define regHDP_HOST_PATH_CNTL 0x00d2
+#define regHDP_HOST_PATH_CNTL_BASE_IDX 0
+#define regHDP_MISC_CNTL 0x00d3
+#define regHDP_MISC_CNTL_BASE_IDX 0
+#define regHDP_MEM_POWER_CTRL 0x00d4
+#define regHDP_MEM_POWER_CTRL_BASE_IDX 0
+#define regHDP_CLK_CNTL 0x00d5
+#define regHDP_CLK_CNTL_BASE_IDX 0
+#define regHDP_MMHUB_CNTL 0x00d6
+#define regHDP_MMHUB_CNTL_BASE_IDX 0
+#define regHDP_XDP_BUSY_STS 0x00d7
+#define regHDP_XDP_BUSY_STS_BASE_IDX 0
+#define regHDP_XDP_MMHUB_ERROR 0x00d8
+#define regHDP_XDP_MMHUB_ERROR_BASE_IDX 0
+#define regHDP_XDP_MMHUB_ERROR_CLR 0x00da
+#define regHDP_XDP_MMHUB_ERROR_CLR_BASE_IDX 0
+#define regHDP_VERSION 0x00db
+#define regHDP_VERSION_BASE_IDX 0
+#define regHDP_MEMIO_CNTL 0x00f6
+#define regHDP_MEMIO_CNTL_BASE_IDX 0
+#define regHDP_MEMIO_ADDR 0x00f7
+#define regHDP_MEMIO_ADDR_BASE_IDX 0
+#define regHDP_MEMIO_STATUS 0x00f8
+#define regHDP_MEMIO_STATUS_BASE_IDX 0
+#define regHDP_MEMIO_WR_DATA 0x00f9
+#define regHDP_MEMIO_WR_DATA_BASE_IDX 0
+#define regHDP_MEMIO_RD_DATA 0x00fa
+#define regHDP_MEMIO_RD_DATA_BASE_IDX 0
+#define regHDP_XDP_DIRECT2HDP_FIRST 0x0100
+#define regHDP_XDP_DIRECT2HDP_FIRST_BASE_IDX 0
+#define regHDP_XDP_D2H_FLUSH 0x0101
+#define regHDP_XDP_D2H_FLUSH_BASE_IDX 0
+#define regHDP_XDP_D2H_BAR_UPDATE 0x0102
+#define regHDP_XDP_D2H_BAR_UPDATE_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_3 0x0103
+#define regHDP_XDP_D2H_RSVD_3_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_4 0x0104
+#define regHDP_XDP_D2H_RSVD_4_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_5 0x0105
+#define regHDP_XDP_D2H_RSVD_5_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_6 0x0106
+#define regHDP_XDP_D2H_RSVD_6_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_7 0x0107
+#define regHDP_XDP_D2H_RSVD_7_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_8 0x0108
+#define regHDP_XDP_D2H_RSVD_8_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_9 0x0109
+#define regHDP_XDP_D2H_RSVD_9_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_10 0x010a
+#define regHDP_XDP_D2H_RSVD_10_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_11 0x010b
+#define regHDP_XDP_D2H_RSVD_11_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_12 0x010c
+#define regHDP_XDP_D2H_RSVD_12_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_13 0x010d
+#define regHDP_XDP_D2H_RSVD_13_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_14 0x010e
+#define regHDP_XDP_D2H_RSVD_14_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_15 0x010f
+#define regHDP_XDP_D2H_RSVD_15_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_16 0x0110
+#define regHDP_XDP_D2H_RSVD_16_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_17 0x0111
+#define regHDP_XDP_D2H_RSVD_17_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_18 0x0112
+#define regHDP_XDP_D2H_RSVD_18_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_19 0x0113
+#define regHDP_XDP_D2H_RSVD_19_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_20 0x0114
+#define regHDP_XDP_D2H_RSVD_20_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_21 0x0115
+#define regHDP_XDP_D2H_RSVD_21_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_22 0x0116
+#define regHDP_XDP_D2H_RSVD_22_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_23 0x0117
+#define regHDP_XDP_D2H_RSVD_23_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_24 0x0118
+#define regHDP_XDP_D2H_RSVD_24_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_25 0x0119
+#define regHDP_XDP_D2H_RSVD_25_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_26 0x011a
+#define regHDP_XDP_D2H_RSVD_26_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_27 0x011b
+#define regHDP_XDP_D2H_RSVD_27_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_28 0x011c
+#define regHDP_XDP_D2H_RSVD_28_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_29 0x011d
+#define regHDP_XDP_D2H_RSVD_29_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_30 0x011e
+#define regHDP_XDP_D2H_RSVD_30_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_31 0x011f
+#define regHDP_XDP_D2H_RSVD_31_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_32 0x0120
+#define regHDP_XDP_D2H_RSVD_32_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_33 0x0121
+#define regHDP_XDP_D2H_RSVD_33_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_34 0x0122
+#define regHDP_XDP_D2H_RSVD_34_BASE_IDX 0
+#define regHDP_XDP_DIRECT2HDP_LAST 0x0123
+#define regHDP_XDP_DIRECT2HDP_LAST_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR_CFG 0x0124
+#define regHDP_XDP_P2P_BAR_CFG_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_OFFSET 0x0125
+#define regHDP_XDP_P2P_MBX_OFFSET_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR0 0x0126
+#define regHDP_XDP_P2P_MBX_ADDR0_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR1 0x0127
+#define regHDP_XDP_P2P_MBX_ADDR1_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR2 0x0128
+#define regHDP_XDP_P2P_MBX_ADDR2_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR3 0x0129
+#define regHDP_XDP_P2P_MBX_ADDR3_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR4 0x012a
+#define regHDP_XDP_P2P_MBX_ADDR4_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR5 0x012b
+#define regHDP_XDP_P2P_MBX_ADDR5_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR6 0x012c
+#define regHDP_XDP_P2P_MBX_ADDR6_BASE_IDX 0
+#define regHDP_XDP_HDP_MBX_MC_CFG 0x012d
+#define regHDP_XDP_HDP_MBX_MC_CFG_BASE_IDX 0
+#define regHDP_XDP_HDP_MC_CFG 0x012e
+#define regHDP_XDP_HDP_MC_CFG_BASE_IDX 0
+#define regHDP_XDP_HST_CFG 0x012f
+#define regHDP_XDP_HST_CFG_BASE_IDX 0
+#define regHDP_XDP_HDP_IPH_CFG 0x0131
+#define regHDP_XDP_HDP_IPH_CFG_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR0 0x0134
+#define regHDP_XDP_P2P_BAR0_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR1 0x0135
+#define regHDP_XDP_P2P_BAR1_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR2 0x0136
+#define regHDP_XDP_P2P_BAR2_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR3 0x0137
+#define regHDP_XDP_P2P_BAR3_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR4 0x0138
+#define regHDP_XDP_P2P_BAR4_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR5 0x0139
+#define regHDP_XDP_P2P_BAR5_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR6 0x013a
+#define regHDP_XDP_P2P_BAR6_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR7 0x013b
+#define regHDP_XDP_P2P_BAR7_BASE_IDX 0
+#define regHDP_XDP_FLUSH_ARMED_STS 0x013c
+#define regHDP_XDP_FLUSH_ARMED_STS_BASE_IDX 0
+#define regHDP_XDP_FLUSH_CNTR0_STS 0x013d
+#define regHDP_XDP_FLUSH_CNTR0_STS_BASE_IDX 0
+#define regHDP_XDP_STICKY 0x013f
+#define regHDP_XDP_STICKY_BASE_IDX 0
+#define regHDP_XDP_CHKN 0x0140
+#define regHDP_XDP_CHKN_BASE_IDX 0
+#define regHDP_XDP_BARS_ADDR_39_36 0x0144
+#define regHDP_XDP_BARS_ADDR_39_36_BASE_IDX 0
+#define regHDP_XDP_MC_VM_FB_LOCATION_BASE 0x0145
+#define regHDP_XDP_MC_VM_FB_LOCATION_BASE_BASE_IDX 0
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG 0x0148
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2 0x0149
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_sh_mask.h
new file mode 100644
index 000000000000..afb73c5a4018
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_sh_mask.h
@@ -0,0 +1,735 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _hdp_7_0_0_SH_MASK_HEADER
+#define _hdp_7_0_0_SH_MASK_HEADER
+
+
+// addressBlock: hdp_hdpdec
+//HDP_MMHUB_TLVL
+#define HDP_MMHUB_TLVL__HDP_WR_TLVL__SHIFT 0x0
+#define HDP_MMHUB_TLVL__HDP_RD_TLVL__SHIFT 0x4
+#define HDP_MMHUB_TLVL__XDP_WR_TLVL__SHIFT 0x8
+#define HDP_MMHUB_TLVL__XDP_RD_TLVL__SHIFT 0xc
+#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL__SHIFT 0x10
+#define HDP_MMHUB_TLVL__HDP_WR_TLVL_MASK 0x0000000FL
+#define HDP_MMHUB_TLVL__HDP_RD_TLVL_MASK 0x000000F0L
+#define HDP_MMHUB_TLVL__XDP_WR_TLVL_MASK 0x00000F00L
+#define HDP_MMHUB_TLVL__XDP_RD_TLVL_MASK 0x0000F000L
+#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL_MASK 0x000F0000L
+//HDP_MMHUB_UNITID
+#define HDP_MMHUB_UNITID__HDP_UNITID__SHIFT 0x0
+#define HDP_MMHUB_UNITID__XDP_UNITID__SHIFT 0x8
+#define HDP_MMHUB_UNITID__XDP_MBX_UNITID__SHIFT 0x10
+#define HDP_MMHUB_UNITID__HDP_UNITID_MASK 0x0000003FL
+#define HDP_MMHUB_UNITID__XDP_UNITID_MASK 0x00003F00L
+#define HDP_MMHUB_UNITID__XDP_MBX_UNITID_MASK 0x003F0000L
+//HDP_NONSURFACE_BASE
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8__SHIFT 0x0
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8_MASK 0xFFFFFFFFL
+//HDP_NONSURFACE_INFO
+#define HDP_NONSURFACE_INFO__NONSURF_SWAP__SHIFT 0x4
+#define HDP_NONSURFACE_INFO__NONSURF_VMID__SHIFT 0x8
+#define HDP_NONSURFACE_INFO__NONSURF_SWAP_MASK 0x00000030L
+#define HDP_NONSURFACE_INFO__NONSURF_VMID_MASK 0x00000F00L
+//HDP_NONSURFACE_BASE_HI
+#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40__SHIFT 0x0
+#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40_MASK 0x000000FFL
+//HDP_SURFACE_WRITE_FLAGS
+#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG__SHIFT 0x0
+#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG__SHIFT 0x1
+#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG_MASK 0x00000001L
+#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG_MASK 0x00000002L
+//HDP_SURFACE_READ_FLAGS
+#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG__SHIFT 0x0
+#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG__SHIFT 0x1
+#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG_MASK 0x00000001L
+#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG_MASK 0x00000002L
+//HDP_SURFACE_WRITE_FLAGS_CLR
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR__SHIFT 0x0
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR__SHIFT 0x1
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR_MASK 0x00000001L
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR_MASK 0x00000002L
+//HDP_SURFACE_READ_FLAGS_CLR
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR__SHIFT 0x0
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR__SHIFT 0x1
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR_MASK 0x00000001L
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR_MASK 0x00000002L
+//HDP_NONSURF_FLAGS
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG__SHIFT 0x0
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG__SHIFT 0x1
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG_MASK 0x00000002L
+//HDP_NONSURF_FLAGS_CLR
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR__SHIFT 0x0
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR__SHIFT 0x1
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR_MASK 0x00000002L
+//HDP_SW_SEMAPHORE
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE__SHIFT 0x0
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE_MASK 0xFFFFFFFFL
+//HDP_DEBUG0
+#define HDP_DEBUG0__HDP_DEBUG__SHIFT 0x0
+#define HDP_DEBUG0__HDP_DEBUG_MASK 0xFFFFFFFFL
+//HDP_LAST_SURFACE_HIT
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT__SHIFT 0x0
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT_MASK 0x00000003L
+//HDP_OUTSTANDING_REQ
+#define HDP_OUTSTANDING_REQ__WRITE_REQ__SHIFT 0x0
+#define HDP_OUTSTANDING_REQ__READ_REQ__SHIFT 0x8
+#define HDP_OUTSTANDING_REQ__WRITE_REQ_MASK 0x000000FFL
+#define HDP_OUTSTANDING_REQ__READ_REQ_MASK 0x0000FF00L
+//HDP_HOST_PATH_CNTL
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER__SHIFT 0x9
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER__SHIFT 0xb
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x12
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER__SHIFT 0x13
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN__SHIFT 0x15
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN__SHIFT 0x16
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS__SHIFT 0x1d
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER_MASK 0x00000600L
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER_MASK 0x00001800L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00040000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_MASK 0x00180000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN_MASK 0x00200000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN_MASK 0x00400000L
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS_MASK 0x20000000L
+//HDP_MISC_CNTL
+#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL__SHIFT 0x2
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024__SHIFT 0x5
+#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE__SHIFT 0x8
+#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE__SHIFT 0x9
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES__SHIFT 0xb
+#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK__SHIFT 0xe
+#define HDP_MISC_CNTL__NACK_ENABLE__SHIFT 0x13
+#define HDP_MISC_CNTL__ATOMIC_NACK_ENABLE__SHIFT 0x14
+#define HDP_MISC_CNTL__FED_ENABLE__SHIFT 0x15
+#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE__SHIFT 0x16
+#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY__SHIFT 0x17
+#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE__SHIFT 0x18
+#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE__SHIFT 0x1e
+#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL_MASK 0x0000000CL
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024_MASK 0x00000020L
+#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE_MASK 0x00000100L
+#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE_MASK 0x00000200L
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES_MASK 0x00000800L
+#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK_MASK 0x0000C000L
+#define HDP_MISC_CNTL__NACK_ENABLE_MASK 0x00080000L
+#define HDP_MISC_CNTL__ATOMIC_NACK_ENABLE_MASK 0x00100000L
+#define HDP_MISC_CNTL__FED_ENABLE_MASK 0x00200000L
+#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE_MASK 0x00400000L
+#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY_MASK 0x00800000L
+#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE_MASK 0x01000000L
+#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE_MASK 0x40000000L
+//HDP_MEM_POWER_CTRL
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN__SHIFT 0x1
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN__SHIFT 0x2
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN__SHIFT 0x3
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN__SHIFT 0x10
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN__SHIFT 0x11
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN__SHIFT 0x12
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN__SHIFT 0x13
+#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS__SHIFT 0x14
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x18
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0x1e
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK 0x00000002L
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK 0x00000004L
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK 0x00000008L
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN_MASK 0x00040000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN_MASK 0x00080000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS_MASK 0x00700000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY_MASK 0x3F000000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_ENTER_DELAY_MASK 0xC0000000L
+//HDP_CLK_CNTL
+#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT__SHIFT 0x0
+#define HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1b
+#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT_MASK 0x0000000FL
+#define HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK 0x08000000L
+#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//HDP_MMHUB_CNTL
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO__SHIFT 0x0
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC__SHIFT 0x1
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP__SHIFT 0x2
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE__SHIFT 0x4
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE__SHIFT 0x5
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE__SHIFT 0x6
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_MASK 0x00000001L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_MASK 0x00000002L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_MASK 0x00000004L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE_MASK 0x00000010L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE_MASK 0x00000020L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE_MASK 0x00000040L
+//HDP_XDP_BUSY_STS
+#define HDP_XDP_BUSY_STS__BUSY_BITS_0__SHIFT 0x0
+#define HDP_XDP_BUSY_STS__BUSY_BITS_1__SHIFT 0x1
+#define HDP_XDP_BUSY_STS__BUSY_BITS_2__SHIFT 0x2
+#define HDP_XDP_BUSY_STS__BUSY_BITS_3__SHIFT 0x3
+#define HDP_XDP_BUSY_STS__BUSY_BITS_4__SHIFT 0x4
+#define HDP_XDP_BUSY_STS__BUSY_BITS_5__SHIFT 0x5
+#define HDP_XDP_BUSY_STS__BUSY_BITS_6__SHIFT 0x6
+#define HDP_XDP_BUSY_STS__BUSY_BITS_7__SHIFT 0x7
+#define HDP_XDP_BUSY_STS__BUSY_BITS_8__SHIFT 0x8
+#define HDP_XDP_BUSY_STS__BUSY_BITS_9__SHIFT 0x9
+#define HDP_XDP_BUSY_STS__BUSY_BITS_10__SHIFT 0xa
+#define HDP_XDP_BUSY_STS__BUSY_BITS_11__SHIFT 0xb
+#define HDP_XDP_BUSY_STS__BUSY_BITS_12__SHIFT 0xc
+#define HDP_XDP_BUSY_STS__BUSY_BITS_13__SHIFT 0xd
+#define HDP_XDP_BUSY_STS__BUSY_BITS_14__SHIFT 0xe
+#define HDP_XDP_BUSY_STS__BUSY_BITS_15__SHIFT 0xf
+#define HDP_XDP_BUSY_STS__BUSY_BITS_16__SHIFT 0x10
+#define HDP_XDP_BUSY_STS__BUSY_BITS_17__SHIFT 0x11
+#define HDP_XDP_BUSY_STS__BUSY_BITS_18__SHIFT 0x12
+#define HDP_XDP_BUSY_STS__BUSY_BITS_19__SHIFT 0x13
+#define HDP_XDP_BUSY_STS__BUSY_BITS_20__SHIFT 0x14
+#define HDP_XDP_BUSY_STS__BUSY_BITS_21__SHIFT 0x15
+#define HDP_XDP_BUSY_STS__BUSY_BITS_22__SHIFT 0x16
+#define HDP_XDP_BUSY_STS__BUSY_BITS_23__SHIFT 0x17
+#define HDP_XDP_BUSY_STS__Z_FENCE_BIT__SHIFT 0x18
+#define HDP_XDP_BUSY_STS__BUSY_BITS_0_MASK 0x00000001L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_1_MASK 0x00000002L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_2_MASK 0x00000004L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_3_MASK 0x00000008L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_4_MASK 0x00000010L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_5_MASK 0x00000020L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_6_MASK 0x00000040L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_7_MASK 0x00000080L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_8_MASK 0x00000100L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_9_MASK 0x00000200L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_10_MASK 0x00000400L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_11_MASK 0x00000800L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_12_MASK 0x00001000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_13_MASK 0x00002000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_14_MASK 0x00004000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_15_MASK 0x00008000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_16_MASK 0x00010000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_17_MASK 0x00020000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_18_MASK 0x00040000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_19_MASK 0x00080000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_20_MASK 0x00100000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_21_MASK 0x00200000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_22_MASK 0x00400000L
+#define HDP_XDP_BUSY_STS__BUSY_BITS_23_MASK 0x00800000L
+#define HDP_XDP_BUSY_STS__Z_FENCE_BIT_MASK 0x01000000L
+//HDP_XDP_MMHUB_ERROR
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01__SHIFT 0x1
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10__SHIFT 0x2
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11__SHIFT 0x3
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01__SHIFT 0x5
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10__SHIFT 0x6
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11__SHIFT 0x7
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01__SHIFT 0x9
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10__SHIFT 0xa
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11__SHIFT 0xb
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01__SHIFT 0xd
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10__SHIFT 0xe
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11__SHIFT 0xf
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01__SHIFT 0x11
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10__SHIFT 0x12
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11__SHIFT 0x13
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01__SHIFT 0x15
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10__SHIFT 0x16
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11__SHIFT 0x17
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01_MASK 0x00000002L
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10_MASK 0x00000004L
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11_MASK 0x00000008L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01_MASK 0x00000020L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10_MASK 0x00000040L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11_MASK 0x00000080L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01_MASK 0x00000200L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10_MASK 0x00000400L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11_MASK 0x00000800L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01_MASK 0x00002000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10_MASK 0x00004000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11_MASK 0x00008000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01_MASK 0x00020000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10_MASK 0x00040000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11_MASK 0x00080000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01_MASK 0x00200000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10_MASK 0x00400000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11_MASK 0x00800000L
+//HDP_XDP_MMHUB_ERROR_CLR
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_01_CLR__SHIFT 0x1
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_10_CLR__SHIFT 0x2
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_11_CLR__SHIFT 0x3
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_FED_CLR__SHIFT 0x4
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_01_CLR__SHIFT 0x5
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_10_CLR__SHIFT 0x6
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_11_CLR__SHIFT 0x7
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_01_CLR__SHIFT 0x9
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_10_CLR__SHIFT 0xa
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_11_CLR__SHIFT 0xb
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_FED_CLR__SHIFT 0xc
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_01_CLR__SHIFT 0xd
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_10_CLR__SHIFT 0xe
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_11_CLR__SHIFT 0xf
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_WUSER_FED_CLR__SHIFT 0x10
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_01_CLR__SHIFT 0x11
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_10_CLR__SHIFT 0x12
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_11_CLR__SHIFT 0x13
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_01_CLR__SHIFT 0x15
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_10_CLR__SHIFT 0x16
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_11_CLR__SHIFT 0x17
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_01_CLR_MASK 0x00000002L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_10_CLR_MASK 0x00000004L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_11_CLR_MASK 0x00000008L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_FED_CLR_MASK 0x00000010L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_01_CLR_MASK 0x00000020L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_10_CLR_MASK 0x00000040L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_11_CLR_MASK 0x00000080L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_01_CLR_MASK 0x00000200L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_10_CLR_MASK 0x00000400L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_11_CLR_MASK 0x00000800L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_FED_CLR_MASK 0x00001000L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_01_CLR_MASK 0x00002000L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_10_CLR_MASK 0x00004000L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_11_CLR_MASK 0x00008000L
+#define HDP_XDP_MMHUB_ERROR_CLR__HDP_WUSER_FED_CLR_MASK 0x00010000L
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_01_CLR_MASK 0x00020000L
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_10_CLR_MASK 0x00040000L
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_11_CLR_MASK 0x00080000L
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_01_CLR_MASK 0x00200000L
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_10_CLR_MASK 0x00400000L
+#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_11_CLR_MASK 0x00800000L
+//HDP_VERSION
+#define HDP_VERSION__MINVER__SHIFT 0x0
+#define HDP_VERSION__MAJVER__SHIFT 0x8
+#define HDP_VERSION__REV__SHIFT 0x10
+#define HDP_VERSION__MINVER_MASK 0x000000FFL
+#define HDP_VERSION__MAJVER_MASK 0x0000FF00L
+#define HDP_VERSION__REV_MASK 0x00FF0000L
+//HDP_MEMIO_CNTL
+#define HDP_MEMIO_CNTL__MEMIO_SEND__SHIFT 0x0
+#define HDP_MEMIO_CNTL__MEMIO_OP__SHIFT 0x1
+#define HDP_MEMIO_CNTL__MEMIO_BE__SHIFT 0x2
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE__SHIFT 0x6
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE__SHIFT 0x7
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER__SHIFT 0x8
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR__SHIFT 0xe
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR__SHIFT 0xf
+#define HDP_MEMIO_CNTL__MEMIO_VF__SHIFT 0x10
+#define HDP_MEMIO_CNTL__MEMIO_VFID__SHIFT 0x11
+#define HDP_MEMIO_CNTL__MEMIO_SEND_MASK 0x00000001L
+#define HDP_MEMIO_CNTL__MEMIO_OP_MASK 0x00000002L
+#define HDP_MEMIO_CNTL__MEMIO_BE_MASK 0x0000003CL
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE_MASK 0x00000040L
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE_MASK 0x00000080L
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER_MASK 0x00003F00L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR_MASK 0x00004000L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR_MASK 0x00008000L
+#define HDP_MEMIO_CNTL__MEMIO_VF_MASK 0x00010000L
+#define HDP_MEMIO_CNTL__MEMIO_VFID_MASK 0x003E0000L
+//HDP_MEMIO_ADDR
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER__SHIFT 0x0
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER_MASK 0xFFFFFFFFL
+//HDP_MEMIO_STATUS
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS__SHIFT 0x0
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS__SHIFT 0x1
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR__SHIFT 0x2
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR__SHIFT 0x3
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS_MASK 0x00000001L
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS_MASK 0x00000002L
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR_MASK 0x00000004L
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR_MASK 0x00000008L
+//HDP_MEMIO_WR_DATA
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA__SHIFT 0x0
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA_MASK 0xFFFFFFFFL
+//HDP_MEMIO_RD_DATA
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA__SHIFT 0x0
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA_MASK 0xFFFFFFFFL
+//HDP_XDP_DIRECT2HDP_FIRST
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED__SHIFT 0x0
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_FLUSH
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM__SHIFT 0x0
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA__SHIFT 0x4
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL__SHIFT 0x8
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG__SHIFT 0xb
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST__SHIFT 0x10
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM__SHIFT 0x12
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0__SHIFT 0x13
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1__SHIFT 0x14
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM_MASK 0x0000000FL
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA_MASK 0x000000F0L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL_MASK 0x00000700L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG_MASK 0x0000F800L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST_MASK 0x00010000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM_MASK 0x00040000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0_MASK 0x00080000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1_MASK 0x00100000L
+//HDP_XDP_D2H_BAR_UPDATE
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR__SHIFT 0x0
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM__SHIFT 0x10
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM__SHIFT 0x14
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM_MASK 0x000F0000L
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM_MASK 0x00700000L
+//HDP_XDP_D2H_RSVD_3
+#define HDP_XDP_D2H_RSVD_3__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_3__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_4
+#define HDP_XDP_D2H_RSVD_4__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_4__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_5
+#define HDP_XDP_D2H_RSVD_5__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_5__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_6
+#define HDP_XDP_D2H_RSVD_6__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_6__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_7
+#define HDP_XDP_D2H_RSVD_7__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_7__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_8
+#define HDP_XDP_D2H_RSVD_8__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_8__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_9
+#define HDP_XDP_D2H_RSVD_9__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_9__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_10
+#define HDP_XDP_D2H_RSVD_10__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_10__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_11
+#define HDP_XDP_D2H_RSVD_11__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_11__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_12
+#define HDP_XDP_D2H_RSVD_12__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_12__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_13
+#define HDP_XDP_D2H_RSVD_13__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_13__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_14
+#define HDP_XDP_D2H_RSVD_14__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_14__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_15
+#define HDP_XDP_D2H_RSVD_15__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_15__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_16
+#define HDP_XDP_D2H_RSVD_16__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_16__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_17
+#define HDP_XDP_D2H_RSVD_17__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_17__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_18
+#define HDP_XDP_D2H_RSVD_18__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_18__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_19
+#define HDP_XDP_D2H_RSVD_19__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_19__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_20
+#define HDP_XDP_D2H_RSVD_20__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_20__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_21
+#define HDP_XDP_D2H_RSVD_21__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_21__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_22
+#define HDP_XDP_D2H_RSVD_22__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_22__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_23
+#define HDP_XDP_D2H_RSVD_23__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_23__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_24
+#define HDP_XDP_D2H_RSVD_24__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_24__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_25
+#define HDP_XDP_D2H_RSVD_25__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_25__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_26
+#define HDP_XDP_D2H_RSVD_26__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_26__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_27
+#define HDP_XDP_D2H_RSVD_27__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_27__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_28
+#define HDP_XDP_D2H_RSVD_28__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_28__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_29
+#define HDP_XDP_D2H_RSVD_29__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_29__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_30
+#define HDP_XDP_D2H_RSVD_30__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_30__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_31
+#define HDP_XDP_D2H_RSVD_31__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_31__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_32
+#define HDP_XDP_D2H_RSVD_32__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_32__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_33
+#define HDP_XDP_D2H_RSVD_33__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_33__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_34
+#define HDP_XDP_D2H_RSVD_34__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_34__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_DIRECT2HDP_LAST
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED__SHIFT 0x0
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_P2P_BAR_CFG
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE__SHIFT 0x0
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM__SHIFT 0x4
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE_MASK 0x0000000FL
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM_MASK 0x00000030L
+//HDP_XDP_P2P_MBX_OFFSET
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET_MASK 0x0001FFFFL
+//HDP_XDP_P2P_MBX_ADDR0
+#define HDP_XDP_P2P_MBX_ADDR0__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR0__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR1
+#define HDP_XDP_P2P_MBX_ADDR1__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR1__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR2
+#define HDP_XDP_P2P_MBX_ADDR2__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR2__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR3
+#define HDP_XDP_P2P_MBX_ADDR3__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR3__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR4
+#define HDP_XDP_P2P_MBX_ADDR4__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR4__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR5
+#define HDP_XDP_P2P_MBX_ADDR5__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR5__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR6
+#define HDP_XDP_P2P_MBX_ADDR6__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR6__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_HDP_MBX_MC_CFG
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS__SHIFT 0x0
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP__SHIFT 0x4
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID__SHIFT 0x8
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO__SHIFT 0xc
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC__SHIFT 0xd
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP__SHIFT 0xe
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS_MASK 0x0000000FL
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP_MASK 0x00000030L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID_MASK 0x00000F00L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO_MASK 0x00001000L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC_MASK 0x00002000L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP_MASK 0x00004000L
+//HDP_XDP_HDP_MC_CFG
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE__SHIFT 0x0
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE__SHIFT 0x1
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE__SHIFT 0x2
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP__SHIFT 0x3
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP__SHIFT 0x4
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID__SHIFT 0x8
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO__SHIFT 0xc
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC__SHIFT 0xd
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH__SHIFT 0xe
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE_MASK 0x00000001L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE_MASK 0x00000002L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE_MASK 0x00000004L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_MASK 0x00000008L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP_MASK 0x00000030L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID_MASK 0x00000F00L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_MASK 0x00001000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_MASK 0x00002000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH_MASK 0x000FC000L
+//HDP_XDP_HST_CFG
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN__SHIFT 0x0
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER__SHIFT 0x1
+#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN__SHIFT 0x3
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN__SHIFT 0x4
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x5
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN_MASK 0x00000001L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_MASK 0x00000006L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN_MASK 0x00000008L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN_MASK 0x00000010L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00000020L
+//HDP_XDP_HDP_IPH_CFG
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING__SHIFT 0xc
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN__SHIFT 0xd
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING_MASK 0x00001000L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN_MASK 0x00002000L
+//HDP_XDP_P2P_BAR0
+#define HDP_XDP_P2P_BAR0__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR0__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR0__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR0__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR0__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR0__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR1
+#define HDP_XDP_P2P_BAR1__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR1__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR1__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR1__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR1__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR1__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR2
+#define HDP_XDP_P2P_BAR2__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR2__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR2__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR2__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR2__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR2__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR3
+#define HDP_XDP_P2P_BAR3__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR3__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR3__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR3__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR3__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR3__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR4
+#define HDP_XDP_P2P_BAR4__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR4__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR4__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR4__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR4__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR4__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR5
+#define HDP_XDP_P2P_BAR5__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR5__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR5__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR5__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR5__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR5__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR6
+#define HDP_XDP_P2P_BAR6__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR6__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR6__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR6__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR6__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR6__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR7
+#define HDP_XDP_P2P_BAR7__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR7__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR7__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR7__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR7__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR7__VALID_MASK 0x00100000L
+//HDP_XDP_FLUSH_ARMED_STS
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS__SHIFT 0x0
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS_MASK 0xFFFFFFFFL
+//HDP_XDP_FLUSH_CNTR0_STS
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS__SHIFT 0x0
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS_MASK 0x03FFFFFFL
+//HDP_XDP_STICKY
+#define HDP_XDP_STICKY__STICKY_STS__SHIFT 0x0
+#define HDP_XDP_STICKY__STICKY_W1C__SHIFT 0x10
+#define HDP_XDP_STICKY__STICKY_STS_MASK 0x0000FFFFL
+#define HDP_XDP_STICKY__STICKY_W1C_MASK 0xFFFF0000L
+//HDP_XDP_CHKN
+#define HDP_XDP_CHKN__CHKN_0_RSVD__SHIFT 0x0
+#define HDP_XDP_CHKN__CHKN_1_RSVD__SHIFT 0x8
+#define HDP_XDP_CHKN__CHKN_2_RSVD__SHIFT 0x10
+#define HDP_XDP_CHKN__CHKN_3_RSVD__SHIFT 0x18
+#define HDP_XDP_CHKN__CHKN_0_RSVD_MASK 0x000000FFL
+#define HDP_XDP_CHKN__CHKN_1_RSVD_MASK 0x0000FF00L
+#define HDP_XDP_CHKN__CHKN_2_RSVD_MASK 0x00FF0000L
+#define HDP_XDP_CHKN__CHKN_3_RSVD_MASK 0xFF000000L
+//HDP_XDP_BARS_ADDR_39_36
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36__SHIFT 0x0
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36__SHIFT 0x4
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36__SHIFT 0x8
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36__SHIFT 0xc
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36__SHIFT 0x10
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36__SHIFT 0x18
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36__SHIFT 0x1c
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36_MASK 0x0000000FL
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36_MASK 0x000000F0L
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36_MASK 0x00000F00L
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36_MASK 0x0000F000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36_MASK 0x000F0000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36_MASK 0x0F000000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36_MASK 0xF0000000L
+//HDP_XDP_MC_VM_FB_LOCATION_BASE
+#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x03FFFFFFL
+//HDP_XDP_GPU_IOV_VIOLATION_LOG
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x01F00000L
+//HDP_XDP_GPU_IOV_VIOLATION_LOG2
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_offset.h
new file mode 100644
index 000000000000..c783b8ea4698
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_offset.h
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _lsdma_7_0_0_OFFSET_HEADER
+#define _lsdma_7_0_0_OFFSET_HEADER
+
+
+
+// addressBlock: lsdma0_lsdma0dec
+// base address: 0x45000
+#define regLSDMA_UCODE_ADDR 0x0000
+#define regLSDMA_UCODE_ADDR_BASE_IDX 0
+#define regLSDMA_UCODE_DATA 0x0001
+#define regLSDMA_UCODE_DATA_BASE_IDX 0
+#define regLSDMA_ERROR_INJECT_CNTL 0x0004
+#define regLSDMA_ERROR_INJECT_CNTL_BASE_IDX 0
+#define regLSDMA_ERROR_INJECT_SELECT 0x0005
+#define regLSDMA_ERROR_INJECT_SELECT_BASE_IDX 0
+#define regLSDMA_CONTEXT_GROUP_BOUNDARY 0x001f
+#define regLSDMA_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
+#define regLSDMA_RB_RPTR_FETCH_HI 0x0020
+#define regLSDMA_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define regLSDMA_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define regLSDMA_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define regLSDMA_RB_RPTR_FETCH 0x0022
+#define regLSDMA_RB_RPTR_FETCH_BASE_IDX 0
+#define regLSDMA_IB_OFFSET_FETCH 0x0023
+#define regLSDMA_IB_OFFSET_FETCH_BASE_IDX 0
+#define regLSDMA_PROGRAM 0x0024
+#define regLSDMA_PROGRAM_BASE_IDX 0
+#define regLSDMA_STATUS_REG 0x0025
+#define regLSDMA_STATUS_REG_BASE_IDX 0
+#define regLSDMA_STATUS1_REG 0x0026
+#define regLSDMA_STATUS1_REG_BASE_IDX 0
+#define regLSDMA_RD_BURST_CNTL 0x0027
+#define regLSDMA_RD_BURST_CNTL_BASE_IDX 0
+#define regLSDMA_HBM_PAGE_CONFIG 0x0028
+#define regLSDMA_HBM_PAGE_CONFIG_BASE_IDX 0
+#define regLSDMA_UCODE_CHECKSUM 0x0029
+#define regLSDMA_UCODE_CHECKSUM_BASE_IDX 0
+#define regLSDMA_FREEZE 0x002b
+#define regLSDMA_FREEZE_BASE_IDX 0
+#define regLSDMA_DCC_CNTL 0x002d
+#define regLSDMA_DCC_CNTL_BASE_IDX 0
+#define regLSDMA_POWER_GATING 0x002e
+#define regLSDMA_POWER_GATING_BASE_IDX 0
+#define regLSDMA_PGFSM_CONFIG 0x002f
+#define regLSDMA_PGFSM_CONFIG_BASE_IDX 0
+#define regLSDMA_PGFSM_WRITE 0x0030
+#define regLSDMA_PGFSM_WRITE_BASE_IDX 0
+#define regLSDMA_PGFSM_READ 0x0031
+#define regLSDMA_PGFSM_READ_BASE_IDX 0
+#define regLSDMA_BA_THRESHOLD 0x0033
+#define regLSDMA_BA_THRESHOLD_BASE_IDX 0
+#define regLSDMA_ID 0x0034
+#define regLSDMA_ID_BASE_IDX 0
+#define regLSDMA_VERSION 0x0035
+#define regLSDMA_VERSION_BASE_IDX 0
+#define regLSDMA_EDC_COUNTER 0x0036
+#define regLSDMA_EDC_COUNTER_BASE_IDX 0
+#define regLSDMA_EDC_COUNTER2 0x0037
+#define regLSDMA_EDC_COUNTER2_BASE_IDX 0
+#define regLSDMA_STATUS2_REG 0x0038
+#define regLSDMA_STATUS2_REG_BASE_IDX 0
+#define regLSDMA_ATOMIC_CNTL 0x0039
+#define regLSDMA_ATOMIC_CNTL_BASE_IDX 0
+#define regLSDMA_ATOMIC_PREOP_LO 0x003a
+#define regLSDMA_ATOMIC_PREOP_LO_BASE_IDX 0
+#define regLSDMA_ATOMIC_PREOP_HI 0x003b
+#define regLSDMA_ATOMIC_PREOP_HI_BASE_IDX 0
+#define regLSDMA_UTCL1_CNTL 0x003c
+#define regLSDMA_UTCL1_CNTL_BASE_IDX 0
+#define regLSDMA_UTCL1_WATERMK 0x003d
+#define regLSDMA_UTCL1_WATERMK_BASE_IDX 0
+#define regLSDMA_UTCL1_RD_STATUS 0x003e
+#define regLSDMA_UTCL1_RD_STATUS_BASE_IDX 0
+#define regLSDMA_UTCL1_WR_STATUS 0x003f
+#define regLSDMA_UTCL1_WR_STATUS_BASE_IDX 0
+#define regLSDMA_UTCL1_INV0 0x0040
+#define regLSDMA_UTCL1_INV0_BASE_IDX 0
+#define regLSDMA_UTCL1_INV1 0x0041
+#define regLSDMA_UTCL1_INV1_BASE_IDX 0
+#define regLSDMA_UTCL1_INV2 0x0042
+#define regLSDMA_UTCL1_INV2_BASE_IDX 0
+#define regLSDMA_UTCL1_RD_XNACK0 0x0043
+#define regLSDMA_UTCL1_RD_XNACK0_BASE_IDX 0
+#define regLSDMA_UTCL1_RD_XNACK1 0x0044
+#define regLSDMA_UTCL1_RD_XNACK1_BASE_IDX 0
+#define regLSDMA_UTCL1_WR_XNACK0 0x0045
+#define regLSDMA_UTCL1_WR_XNACK0_BASE_IDX 0
+#define regLSDMA_UTCL1_WR_XNACK1 0x0046
+#define regLSDMA_UTCL1_WR_XNACK1_BASE_IDX 0
+#define regLSDMA_UTCL1_TIMEOUT 0x0047
+#define regLSDMA_UTCL1_TIMEOUT_BASE_IDX 0
+#define regLSDMA_UTCL1_PAGE 0x0048
+#define regLSDMA_UTCL1_PAGE_BASE_IDX 0
+#define regLSDMA_RELAX_ORDERING_LUT 0x004a
+#define regLSDMA_RELAX_ORDERING_LUT_BASE_IDX 0
+#define regLSDMA_CHICKEN_BITS_2 0x004b
+#define regLSDMA_CHICKEN_BITS_2_BASE_IDX 0
+#define regLSDMA_STATUS3_REG 0x004c
+#define regLSDMA_STATUS3_REG_BASE_IDX 0
+#define regLSDMA_PHYSICAL_ADDR_LO 0x004d
+#define regLSDMA_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define regLSDMA_PHYSICAL_ADDR_HI 0x004e
+#define regLSDMA_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define regLSDMA_ECC_CNTL 0x004f
+#define regLSDMA_ECC_CNTL_BASE_IDX 0
+#define regLSDMA_ERROR_LOG 0x0050
+#define regLSDMA_ERROR_LOG_BASE_IDX 0
+#define regLSDMA_PUB_DUMMY0 0x0051
+#define regLSDMA_PUB_DUMMY0_BASE_IDX 0
+#define regLSDMA_PUB_DUMMY1 0x0052
+#define regLSDMA_PUB_DUMMY1_BASE_IDX 0
+#define regLSDMA_PUB_DUMMY2 0x0053
+#define regLSDMA_PUB_DUMMY2_BASE_IDX 0
+#define regLSDMA_PUB_DUMMY3 0x0054
+#define regLSDMA_PUB_DUMMY3_BASE_IDX 0
+#define regLSDMA_F32_COUNTER 0x0055
+#define regLSDMA_F32_COUNTER_BASE_IDX 0
+#define regLSDMA_PERFCNT_PERFCOUNTER0_CFG 0x0057
+#define regLSDMA_PERFCNT_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regLSDMA_PERFCNT_PERFCOUNTER1_CFG 0x0058
+#define regLSDMA_PERFCNT_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regLSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL 0x0059
+#define regLSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regLSDMA_PERFCNT_MISC_CNTL 0x005a
+#define regLSDMA_PERFCNT_MISC_CNTL_BASE_IDX 0
+#define regLSDMA_PERFCNT_PERFCOUNTER_LO 0x005b
+#define regLSDMA_PERFCNT_PERFCOUNTER_LO_BASE_IDX 0
+#define regLSDMA_PERFCNT_PERFCOUNTER_HI 0x005c
+#define regLSDMA_PERFCNT_PERFCOUNTER_HI_BASE_IDX 0
+#define regLSDMA_CRD_CNTL 0x005d
+#define regLSDMA_CRD_CNTL_BASE_IDX 0
+#define regLSDMA_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regLSDMA_ULV_CNTL 0x005f
+#define regLSDMA_ULV_CNTL_BASE_IDX 0
+#define regLSDMA_EA_DBIT_ADDR_DATA 0x0060
+#define regLSDMA_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define regLSDMA_EA_DBIT_ADDR_INDEX 0x0061
+#define regLSDMA_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define regLSDMA_STATUS4_REG 0x0063
+#define regLSDMA_STATUS4_REG_BASE_IDX 0
+#define regLSDMA_CE_CTRL 0x0066
+#define regLSDMA_CE_CTRL_BASE_IDX 0
+#define regLSDMA_EXCEPTION_STATUS 0x0067
+#define regLSDMA_EXCEPTION_STATUS_BASE_IDX 0
+#define regLSDMA_INT_CNTL 0x0069
+#define regLSDMA_INT_CNTL_BASE_IDX 0
+#define regLSDMA_MEM_POWER_CTRL 0x006a
+#define regLSDMA_MEM_POWER_CTRL_BASE_IDX 0
+#define regLSDMA_CLK_CTRL 0x006b
+#define regLSDMA_CLK_CTRL_BASE_IDX 0
+#define regLSDMA_CNTL 0x006c
+#define regLSDMA_CNTL_BASE_IDX 0
+#define regLSDMA_CHICKEN_BITS 0x006d
+#define regLSDMA_CHICKEN_BITS_BASE_IDX 0
+#define regLSDMA_PIO_SRC_ADDR_LO 0x0070
+#define regLSDMA_PIO_SRC_ADDR_LO_BASE_IDX 0
+#define regLSDMA_PIO_SRC_ADDR_HI 0x0071
+#define regLSDMA_PIO_SRC_ADDR_HI_BASE_IDX 0
+#define regLSDMA_PIO_DST_ADDR_LO 0x0072
+#define regLSDMA_PIO_DST_ADDR_LO_BASE_IDX 0
+#define regLSDMA_PIO_DST_ADDR_HI 0x0073
+#define regLSDMA_PIO_DST_ADDR_HI_BASE_IDX 0
+#define regLSDMA_PIO_COMMAND 0x0074
+#define regLSDMA_PIO_COMMAND_BASE_IDX 0
+#define regLSDMA_PIO_CONSTFILL_DATA 0x0075
+#define regLSDMA_PIO_CONSTFILL_DATA_BASE_IDX 0
+#define regLSDMA_PIO_CONTROL 0x0076
+#define regLSDMA_PIO_CONTROL_BASE_IDX 0
+#define regLSDMA_PIO_STATUS 0x007a
+#define regLSDMA_PIO_STATUS_BASE_IDX 0
+#define regLSDMA_PF_PIO_STATUS 0x007b
+#define regLSDMA_PF_PIO_STATUS_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_CNTL 0x0080
+#define regLSDMA_QUEUE0_RB_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_BASE 0x0081
+#define regLSDMA_QUEUE0_RB_BASE_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_BASE_HI 0x0082
+#define regLSDMA_QUEUE0_RB_BASE_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_RPTR 0x0083
+#define regLSDMA_QUEUE0_RB_RPTR_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_RPTR_HI 0x0084
+#define regLSDMA_QUEUE0_RB_RPTR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_WPTR 0x0085
+#define regLSDMA_QUEUE0_RB_WPTR_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_WPTR_HI 0x0086
+#define regLSDMA_QUEUE0_RB_WPTR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_WPTR_POLL_CNTL 0x0087
+#define regLSDMA_QUEUE0_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI 0x0088
+#define regLSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO 0x0089
+#define regLSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_RPTR_ADDR_HI 0x008a
+#define regLSDMA_QUEUE0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_RPTR_ADDR_LO 0x008b
+#define regLSDMA_QUEUE0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_CNTL 0x008c
+#define regLSDMA_QUEUE0_IB_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_RPTR 0x008d
+#define regLSDMA_QUEUE0_IB_RPTR_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_OFFSET 0x008e
+#define regLSDMA_QUEUE0_IB_OFFSET_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_BASE_LO 0x008f
+#define regLSDMA_QUEUE0_IB_BASE_LO_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_BASE_HI 0x0090
+#define regLSDMA_QUEUE0_IB_BASE_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_SIZE 0x0091
+#define regLSDMA_QUEUE0_IB_SIZE_BASE_IDX 0
+#define regLSDMA_QUEUE0_SKIP_CNTL 0x0092
+#define regLSDMA_QUEUE0_SKIP_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE0_CSA_ADDR_LO 0x0093
+#define regLSDMA_QUEUE0_CSA_ADDR_LO_BASE_IDX 0
+#define regLSDMA_QUEUE0_CSA_ADDR_HI 0x0094
+#define regLSDMA_QUEUE0_CSA_ADDR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_AQL_CNTL 0x0095
+#define regLSDMA_QUEUE0_RB_AQL_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE0_MINOR_PTR_UPDATE 0x0096
+#define regLSDMA_QUEUE0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regLSDMA_QUEUE0_CNTL 0x0097
+#define regLSDMA_QUEUE0_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE0_RB_PREEMPT 0x0098
+#define regLSDMA_QUEUE0_RB_PREEMPT_BASE_IDX 0
+#define regLSDMA_QUEUE0_IB_SUB_REMAIN 0x0099
+#define regLSDMA_QUEUE0_IB_SUB_REMAIN_BASE_IDX 0
+#define regLSDMA_QUEUE0_PREEMPT 0x009a
+#define regLSDMA_QUEUE0_PREEMPT_BASE_IDX 0
+#define regLSDMA_QUEUE0_CONTEXT_STATUS 0x009b
+#define regLSDMA_QUEUE0_CONTEXT_STATUS_BASE_IDX 0
+#define regLSDMA_QUEUE0_STATUS 0x009c
+#define regLSDMA_QUEUE0_STATUS_BASE_IDX 0
+#define regLSDMA_QUEUE0_DOORBELL 0x009d
+#define regLSDMA_QUEUE0_DOORBELL_BASE_IDX 0
+#define regLSDMA_QUEUE0_DOORBELL_OFFSET 0x009e
+#define regLSDMA_QUEUE0_DOORBELL_OFFSET_BASE_IDX 0
+#define regLSDMA_QUEUE0_DOORBELL_LOG 0x009f
+#define regLSDMA_QUEUE0_DOORBELL_LOG_BASE_IDX 0
+#define regLSDMA_QUEUE0_WATERMARK 0x00a0
+#define regLSDMA_QUEUE0_WATERMARK_BASE_IDX 0
+#define regLSDMA_QUEUE0_DUMMY0 0x00a1
+#define regLSDMA_QUEUE0_DUMMY0_BASE_IDX 0
+#define regLSDMA_QUEUE0_DUMMY1 0x00a2
+#define regLSDMA_QUEUE0_DUMMY1_BASE_IDX 0
+#define regLSDMA_QUEUE0_DUMMY2 0x00a3
+#define regLSDMA_QUEUE0_DUMMY2_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA0 0x00c0
+#define regLSDMA_QUEUE0_MIDCMD_DATA0_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA1 0x00c1
+#define regLSDMA_QUEUE0_MIDCMD_DATA1_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA2 0x00c2
+#define regLSDMA_QUEUE0_MIDCMD_DATA2_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA3 0x00c3
+#define regLSDMA_QUEUE0_MIDCMD_DATA3_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA4 0x00c4
+#define regLSDMA_QUEUE0_MIDCMD_DATA4_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA5 0x00c5
+#define regLSDMA_QUEUE0_MIDCMD_DATA5_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA6 0x00c6
+#define regLSDMA_QUEUE0_MIDCMD_DATA6_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA7 0x00c7
+#define regLSDMA_QUEUE0_MIDCMD_DATA7_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA8 0x00c8
+#define regLSDMA_QUEUE0_MIDCMD_DATA8_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA9 0x00c9
+#define regLSDMA_QUEUE0_MIDCMD_DATA9_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_DATA10 0x00ca
+#define regLSDMA_QUEUE0_MIDCMD_DATA10_BASE_IDX 0
+#define regLSDMA_QUEUE0_MIDCMD_CNTL 0x00cb
+#define regLSDMA_QUEUE0_MIDCMD_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_CNTL 0x00d8
+#define regLSDMA_QUEUE1_RB_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_BASE 0x00d9
+#define regLSDMA_QUEUE1_RB_BASE_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_BASE_HI 0x00da
+#define regLSDMA_QUEUE1_RB_BASE_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_RPTR 0x00db
+#define regLSDMA_QUEUE1_RB_RPTR_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_RPTR_HI 0x00dc
+#define regLSDMA_QUEUE1_RB_RPTR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_WPTR 0x00dd
+#define regLSDMA_QUEUE1_RB_WPTR_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_WPTR_HI 0x00de
+#define regLSDMA_QUEUE1_RB_WPTR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_WPTR_POLL_CNTL 0x00df
+#define regLSDMA_QUEUE1_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI 0x00e0
+#define regLSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO 0x00e1
+#define regLSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_RPTR_ADDR_HI 0x00e2
+#define regLSDMA_QUEUE1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_RPTR_ADDR_LO 0x00e3
+#define regLSDMA_QUEUE1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_CNTL 0x00e4
+#define regLSDMA_QUEUE1_IB_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_RPTR 0x00e5
+#define regLSDMA_QUEUE1_IB_RPTR_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_OFFSET 0x00e6
+#define regLSDMA_QUEUE1_IB_OFFSET_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_BASE_LO 0x00e7
+#define regLSDMA_QUEUE1_IB_BASE_LO_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_BASE_HI 0x00e8
+#define regLSDMA_QUEUE1_IB_BASE_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_SIZE 0x00e9
+#define regLSDMA_QUEUE1_IB_SIZE_BASE_IDX 0
+#define regLSDMA_QUEUE1_SKIP_CNTL 0x00ea
+#define regLSDMA_QUEUE1_SKIP_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_CSA_ADDR_LO 0x00eb
+#define regLSDMA_QUEUE1_CSA_ADDR_LO_BASE_IDX 0
+#define regLSDMA_QUEUE1_CSA_ADDR_HI 0x00ec
+#define regLSDMA_QUEUE1_CSA_ADDR_HI_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_AQL_CNTL 0x00ed
+#define regLSDMA_QUEUE1_RB_AQL_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_MINOR_PTR_UPDATE 0x00ee
+#define regLSDMA_QUEUE1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regLSDMA_QUEUE1_CNTL 0x00ef
+#define regLSDMA_QUEUE1_CNTL_BASE_IDX 0
+#define regLSDMA_QUEUE1_RB_PREEMPT 0x00f0
+#define regLSDMA_QUEUE1_RB_PREEMPT_BASE_IDX 0
+#define regLSDMA_QUEUE1_IB_SUB_REMAIN 0x00f1
+#define regLSDMA_QUEUE1_IB_SUB_REMAIN_BASE_IDX 0
+#define regLSDMA_QUEUE1_PREEMPT 0x00f2
+#define regLSDMA_QUEUE1_PREEMPT_BASE_IDX 0
+#define regLSDMA_QUEUE1_CONTEXT_STATUS 0x00f3
+#define regLSDMA_QUEUE1_CONTEXT_STATUS_BASE_IDX 0
+#define regLSDMA_QUEUE1_STATUS 0x00f4
+#define regLSDMA_QUEUE1_STATUS_BASE_IDX 0
+#define regLSDMA_QUEUE1_DOORBELL 0x00f5
+#define regLSDMA_QUEUE1_DOORBELL_BASE_IDX 0
+#define regLSDMA_QUEUE1_DOORBELL_OFFSET 0x00f6
+#define regLSDMA_QUEUE1_DOORBELL_OFFSET_BASE_IDX 0
+#define regLSDMA_QUEUE1_DOORBELL_LOG 0x00f7
+#define regLSDMA_QUEUE1_DOORBELL_LOG_BASE_IDX 0
+#define regLSDMA_QUEUE1_WATERMARK 0x00f8
+#define regLSDMA_QUEUE1_WATERMARK_BASE_IDX 0
+#define regLSDMA_QUEUE1_DUMMY0 0x00f9
+#define regLSDMA_QUEUE1_DUMMY0_BASE_IDX 0
+#define regLSDMA_QUEUE1_DUMMY1 0x00fa
+#define regLSDMA_QUEUE1_DUMMY1_BASE_IDX 0
+#define regLSDMA_QUEUE1_DUMMY2 0x00fb
+#define regLSDMA_QUEUE1_DUMMY2_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA0 0x0118
+#define regLSDMA_QUEUE1_MIDCMD_DATA0_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA1 0x0119
+#define regLSDMA_QUEUE1_MIDCMD_DATA1_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA2 0x011a
+#define regLSDMA_QUEUE1_MIDCMD_DATA2_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA3 0x011b
+#define regLSDMA_QUEUE1_MIDCMD_DATA3_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA4 0x011c
+#define regLSDMA_QUEUE1_MIDCMD_DATA4_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA5 0x011d
+#define regLSDMA_QUEUE1_MIDCMD_DATA5_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA6 0x011e
+#define regLSDMA_QUEUE1_MIDCMD_DATA6_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA7 0x011f
+#define regLSDMA_QUEUE1_MIDCMD_DATA7_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA8 0x0120
+#define regLSDMA_QUEUE1_MIDCMD_DATA8_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA9 0x0121
+#define regLSDMA_QUEUE1_MIDCMD_DATA9_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_DATA10 0x0122
+#define regLSDMA_QUEUE1_MIDCMD_DATA10_BASE_IDX 0
+#define regLSDMA_QUEUE1_MIDCMD_CNTL 0x0123
+#define regLSDMA_QUEUE1_MIDCMD_CNTL_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_sh_mask.h
new file mode 100644
index 000000000000..644a5d066ab2
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_sh_mask.h
@@ -0,0 +1,1411 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _lsdma_7_0_0_SH_MASK_HEADER
+#define _lsdma_7_0_0_SH_MASK_HEADER
+
+
+// addressBlock: lsdma0_lsdma0dec
+//LSDMA_UCODE_ADDR
+#define LSDMA_UCODE_ADDR__VALUE__SHIFT 0x0
+#define LSDMA_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+//LSDMA_UCODE_DATA
+#define LSDMA_UCODE_DATA__VALUE__SHIFT 0x0
+#define LSDMA_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//LSDMA_ERROR_INJECT_CNTL
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_IRRITATION__SHIFT 0x0
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x1
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x2
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_MEMHUB_READ_POISON_INJECT__SHIFT 0x8
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_MEMHUB_ATOMIC_POISON_INJECT__SHIFT 0x9
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_IRRITATION_MASK 0x00000001L
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000002L
+#define LSDMA_ERROR_INJECT_CNTL__ENABLE_ERROR_INJECT_MASK 0x0000000CL
+//LSDMA_ERROR_INJECT_SELECT
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF0__SHIFT 0x0
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF1__SHIFT 0x1
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF2__SHIFT 0x2
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF3__SHIFT 0x3
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF4__SHIFT 0x4
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF5__SHIFT 0x5
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF6__SHIFT 0x6
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF7__SHIFT 0x7
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF8__SHIFT 0x8
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF9__SHIFT 0x9
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF10__SHIFT 0xa
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF11__SHIFT 0xb
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF12__SHIFT 0xc
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF13__SHIFT 0xd
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF14__SHIFT 0xe
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF15__SHIFT 0xf
+#define LSDMA_ERROR_INJECT_SELECT__UCODE_BUF__SHIFT 0x10
+#define LSDMA_ERROR_INJECT_SELECT__RB_CMD_BUF__SHIFT 0x11
+#define LSDMA_ERROR_INJECT_SELECT__IB_CMD_BUF__SHIFT 0x12
+#define LSDMA_ERROR_INJECT_SELECT__UTCL1_RD_FIFO__SHIFT 0x13
+#define LSDMA_ERROR_INJECT_SELECT__UTCL1_RDBST_FIFO__SHIFT 0x14
+#define LSDMA_ERROR_INJECT_SELECT__UTCL1_WR_FIFO__SHIFT 0x15
+#define LSDMA_ERROR_INJECT_SELECT__DATA_LUT_FIFO__SHIFT 0x16
+#define LSDMA_ERROR_INJECT_SELECT__SPLIT_DATA_FIFO__SHIFT 0x17
+#define LSDMA_ERROR_INJECT_SELECT__MC_WR_ADDR_FIFO__SHIFT 0x18
+#define LSDMA_ERROR_INJECT_SELECT__MC_RDRET_BUF__SHIFT 0x19
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF0_MASK 0x00000001L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF1_MASK 0x00000002L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF2_MASK 0x00000004L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF3_MASK 0x00000008L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF4_MASK 0x00000010L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF5_MASK 0x00000020L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF6_MASK 0x00000040L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF7_MASK 0x00000080L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF8_MASK 0x00000100L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF9_MASK 0x00000200L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF10_MASK 0x00000400L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF11_MASK 0x00000800L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF12_MASK 0x00001000L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF13_MASK 0x00002000L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF14_MASK 0x00004000L
+#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF15_MASK 0x00008000L
+#define LSDMA_ERROR_INJECT_SELECT__UCODE_BUF_MASK 0x00010000L
+#define LSDMA_ERROR_INJECT_SELECT__RB_CMD_BUF_MASK 0x00020000L
+#define LSDMA_ERROR_INJECT_SELECT__IB_CMD_BUF_MASK 0x00040000L
+#define LSDMA_ERROR_INJECT_SELECT__UTCL1_RD_FIFO_MASK 0x00080000L
+#define LSDMA_ERROR_INJECT_SELECT__UTCL1_RDBST_FIFO_MASK 0x00100000L
+#define LSDMA_ERROR_INJECT_SELECT__UTCL1_WR_FIFO_MASK 0x00200000L
+#define LSDMA_ERROR_INJECT_SELECT__DATA_LUT_FIFO_MASK 0x00400000L
+#define LSDMA_ERROR_INJECT_SELECT__SPLIT_DATA_FIFO_MASK 0x00800000L
+#define LSDMA_ERROR_INJECT_SELECT__MC_WR_ADDR_FIFO_MASK 0x01000000L
+#define LSDMA_ERROR_INJECT_SELECT__MC_RDRET_BUF_MASK 0x02000000L
+#define LSDMA_PUB_REG_TYPE0__LSDMA_UCODE_ADDR__SHIFT 0x0
+#define LSDMA_PUB_REG_TYPE0__LSDMA_UCODE_DATA__SHIFT 0x1
+#define LSDMA_PUB_REG_TYPE0__LSDMA_UCODE_ADDR_MASK 0x00000001L
+#define LSDMA_PUB_REG_TYPE0__LSDMA_UCODE_DATA_MASK 0x00000002L
+#define LSDMA_PUB_REG_TYPE3__LSDMA_CLK_CTRL__SHIFT 0xb
+#define LSDMA_PUB_REG_TYPE3__LSDMA_CLK_CTRL_MASK 0x00000800L
+//LSDMA_CONTEXT_GROUP_BOUNDARY
+#define LSDMA_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define LSDMA_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//LSDMA_RB_RPTR_FETCH_HI
+#define LSDMA_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define LSDMA_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_SEM_WAIT_FAIL_TIMER_CNTL
+#define LSDMA_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define LSDMA_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//LSDMA_RB_RPTR_FETCH
+#define LSDMA_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define LSDMA_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//LSDMA_IB_OFFSET_FETCH
+#define LSDMA_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define LSDMA_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//LSDMA_PROGRAM
+#define LSDMA_PROGRAM__STREAM__SHIFT 0x0
+#define LSDMA_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//LSDMA_STATUS_REG
+#define LSDMA_STATUS_REG__IDLE__SHIFT 0x0
+#define LSDMA_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define LSDMA_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define LSDMA_STATUS_REG__RB_FULL__SHIFT 0x3
+#define LSDMA_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define LSDMA_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define LSDMA_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define LSDMA_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define LSDMA_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define LSDMA_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define LSDMA_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define LSDMA_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define LSDMA_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define LSDMA_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define LSDMA_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define LSDMA_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define LSDMA_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define LSDMA_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define LSDMA_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define LSDMA_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define LSDMA_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define LSDMA_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define LSDMA_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define LSDMA_STATUS_REG__DRM_IDLE__SHIFT 0x17
+#define LSDMA_STATUS_REG__Reserved__SHIFT 0x18
+#define LSDMA_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define LSDMA_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define LSDMA_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define LSDMA_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define LSDMA_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define LSDMA_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define LSDMA_STATUS_REG__IDLE_MASK 0x00000001L
+#define LSDMA_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define LSDMA_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define LSDMA_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define LSDMA_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define LSDMA_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define LSDMA_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define LSDMA_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define LSDMA_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define LSDMA_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define LSDMA_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define LSDMA_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define LSDMA_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define LSDMA_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define LSDMA_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define LSDMA_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define LSDMA_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define LSDMA_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define LSDMA_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define LSDMA_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define LSDMA_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define LSDMA_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define LSDMA_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define LSDMA_STATUS_REG__Reserved_MASK 0x01000000L
+#define LSDMA_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define LSDMA_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define LSDMA_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define LSDMA_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define LSDMA_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define LSDMA_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//LSDMA_STATUS1_REG
+#define LSDMA_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define LSDMA_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define LSDMA_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define LSDMA_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define LSDMA_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define LSDMA_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define LSDMA_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define LSDMA_STATUS1_REG__CE_DRM_IDLE__SHIFT 0x7
+#define LSDMA_STATUS1_REG__CE_DRM1_IDLE__SHIFT 0x8
+#define LSDMA_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define LSDMA_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define LSDMA_STATUS1_REG__CE_INFO_FULL__SHIFT 0xb
+#define LSDMA_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xc
+#define LSDMA_STATUS1_REG__EX_START__SHIFT 0xd
+#define LSDMA_STATUS1_REG__DRM_CTX_RESTORE__SHIFT 0xe
+#define LSDMA_STATUS1_REG__CE_RD_STALL__SHIFT 0xf
+#define LSDMA_STATUS1_REG__CE_WR_STALL__SHIFT 0x10
+#define LSDMA_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define LSDMA_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define LSDMA_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define LSDMA_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define LSDMA_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define LSDMA_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define LSDMA_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define LSDMA_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define LSDMA_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define LSDMA_STATUS1_REG__CE_INFO_FULL_MASK 0x00000800L
+#define LSDMA_STATUS1_REG__CE_INFO1_FULL_MASK 0x00001000L
+#define LSDMA_STATUS1_REG__EX_START_MASK 0x00002000L
+#define LSDMA_STATUS1_REG__CE_RD_STALL_MASK 0x00008000L
+#define LSDMA_STATUS1_REG__CE_WR_STALL_MASK 0x00010000L
+//LSDMA_RD_BURST_CNTL
+#define LSDMA_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define LSDMA_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
+#define LSDMA_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+#define LSDMA_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
+//LSDMA_HBM_PAGE_CONFIG
+#define LSDMA_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define LSDMA_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
+//LSDMA_UCODE_CHECKSUM
+#define LSDMA_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define LSDMA_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//LSDMA_FREEZE
+#define LSDMA_FREEZE__PREEMPT__SHIFT 0x0
+#define LSDMA_FREEZE__FREEZE__SHIFT 0x4
+#define LSDMA_FREEZE__FROZEN__SHIFT 0x5
+#define LSDMA_FREEZE__F32_FREEZE__SHIFT 0x6
+#define LSDMA_FREEZE__PREEMPT_MASK 0x00000001L
+#define LSDMA_FREEZE__FREEZE_MASK 0x00000010L
+#define LSDMA_FREEZE__FROZEN_MASK 0x00000020L
+#define LSDMA_FREEZE__F32_FREEZE_MASK 0x00000040L
+//LSDMA_DCC_CNTL
+#define LSDMA_DCC_CNTL__DCC_FORCE_BYPASS__SHIFT 0x0
+#define LSDMA_DCC_CNTL__DCC_FORCE_BYPASS_MASK 0x00000001L
+//LSDMA_POWER_GATING
+#define LSDMA_POWER_GATING__LSDMA_POWER_OFF_CONDITION__SHIFT 0x0
+#define LSDMA_POWER_GATING__LSDMA_POWER_ON_CONDITION__SHIFT 0x1
+#define LSDMA_POWER_GATING__LSDMA_POWER_OFF_REQ__SHIFT 0x2
+#define LSDMA_POWER_GATING__LSDMA_POWER_ON_REQ__SHIFT 0x3
+#define LSDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4
+#define LSDMA_POWER_GATING__LSDMA_POWER_OFF_CONDITION_MASK 0x00000001L
+#define LSDMA_POWER_GATING__LSDMA_POWER_ON_CONDITION_MASK 0x00000002L
+#define LSDMA_POWER_GATING__LSDMA_POWER_OFF_REQ_MASK 0x00000004L
+#define LSDMA_POWER_GATING__LSDMA_POWER_ON_REQ_MASK 0x00000008L
+#define LSDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L
+//LSDMA_PGFSM_CONFIG
+#define LSDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0
+#define LSDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8
+#define LSDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9
+#define LSDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa
+#define LSDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb
+#define LSDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc
+#define LSDMA_PGFSM_CONFIG__READ__SHIFT 0xd
+#define LSDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b
+#define LSDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c
+#define LSDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL
+#define LSDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L
+#define LSDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L
+#define LSDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L
+#define LSDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L
+#define LSDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L
+#define LSDMA_PGFSM_CONFIG__READ_MASK 0x00002000L
+#define LSDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L
+#define LSDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L
+//LSDMA_PGFSM_WRITE
+#define LSDMA_PGFSM_WRITE__VALUE__SHIFT 0x0
+#define LSDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL
+//LSDMA_PGFSM_READ
+#define LSDMA_PGFSM_READ__VALUE__SHIFT 0x0
+#define LSDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL
+//LSDMA_BA_THRESHOLD
+#define LSDMA_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define LSDMA_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define LSDMA_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define LSDMA_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//LSDMA_ID
+#define LSDMA_ID__DEVICE_ID__SHIFT 0x0
+#define LSDMA_ID__DEVICE_ID_MASK 0x000000FFL
+//LSDMA_VERSION
+#define LSDMA_VERSION__MINVER__SHIFT 0x0
+#define LSDMA_VERSION__MAJVER__SHIFT 0x8
+#define LSDMA_VERSION__REV__SHIFT 0x10
+#define LSDMA_VERSION__MINVER_MASK 0x0000007FL
+#define LSDMA_VERSION__MAJVER_MASK 0x00007F00L
+#define LSDMA_VERSION__REV_MASK 0x003F0000L
+//LSDMA_EDC_COUNTER
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF0_SED__SHIFT 0x0
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF1_SED__SHIFT 0x2
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF2_SED__SHIFT 0x4
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF3_SED__SHIFT 0x6
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF4_SED__SHIFT 0x8
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF5_SED__SHIFT 0xa
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF6_SED__SHIFT 0xc
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF8_SED__SHIFT 0x10
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF9_SED__SHIFT 0x12
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF10_SED__SHIFT 0x14
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF11_SED__SHIFT 0x16
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF12_SED__SHIFT 0x18
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF13_SED__SHIFT 0x1a
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF14_SED__SHIFT 0x1c
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF15_SED__SHIFT 0x1e
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF0_SED_MASK 0x00000003L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF1_SED_MASK 0x0000000CL
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF2_SED_MASK 0x00000030L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF3_SED_MASK 0x000000C0L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF4_SED_MASK 0x00000300L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF5_SED_MASK 0x00000C00L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF6_SED_MASK 0x00003000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF7_SED_MASK 0x0000C000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF8_SED_MASK 0x00030000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF9_SED_MASK 0x000C0000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF10_SED_MASK 0x00300000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF11_SED_MASK 0x00C00000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF12_SED_MASK 0x03000000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF13_SED_MASK 0x0C000000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF14_SED_MASK 0x30000000L
+#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF15_SED_MASK 0xC0000000L
+//LSDMA_EDC_COUNTER2
+#define LSDMA_EDC_COUNTER2__LSDMA_UCODE_BUF_SED__SHIFT 0x0
+#define LSDMA_EDC_COUNTER2__LSDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define LSDMA_EDC_COUNTER2__LSDMA_IB_CMD_BUF_SED__SHIFT 0x4
+#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_RD_FIFO_SED__SHIFT 0x6
+#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x8
+#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_WR_FIFO_SED__SHIFT 0xa
+#define LSDMA_EDC_COUNTER2__LSDMA_DATA_LUT_FIFO_SED__SHIFT 0xc
+#define LSDMA_EDC_COUNTER2__LSDMA_SPLIT_DATA_BUF_SED__SHIFT 0xe
+#define LSDMA_EDC_COUNTER2__LSDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
+#define LSDMA_EDC_COUNTER2__LSDMA_MC_RDRET_BUF_SED__SHIFT 0x12
+#define LSDMA_EDC_COUNTER2__LSDMA_UCODE_BUF_SED_MASK 0x00000003L
+#define LSDMA_EDC_COUNTER2__LSDMA_RB_CMD_BUF_SED_MASK 0x0000000CL
+#define LSDMA_EDC_COUNTER2__LSDMA_IB_CMD_BUF_SED_MASK 0x00000030L
+#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_RD_FIFO_SED_MASK 0x000000C0L
+#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000300L
+#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_WR_FIFO_SED_MASK 0x00000C00L
+#define LSDMA_EDC_COUNTER2__LSDMA_DATA_LUT_FIFO_SED_MASK 0x00003000L
+#define LSDMA_EDC_COUNTER2__LSDMA_SPLIT_DATA_BUF_SED_MASK 0x0000C000L
+#define LSDMA_EDC_COUNTER2__LSDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00030000L
+#define LSDMA_EDC_COUNTER2__LSDMA_MC_RDRET_BUF_SED_MASK 0x000C0000L
+//LSDMA_STATUS2_REG
+#define LSDMA_STATUS2_REG__ID__SHIFT 0x0
+#define LSDMA_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
+#define LSDMA_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define LSDMA_STATUS2_REG__ID_MASK 0x00000007L
+#define LSDMA_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
+#define LSDMA_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//LSDMA_ATOMIC_CNTL
+#define LSDMA_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define LSDMA_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+//LSDMA_ATOMIC_PREOP_LO
+#define LSDMA_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define LSDMA_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//LSDMA_ATOMIC_PREOP_HI
+#define LSDMA_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define LSDMA_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//LSDMA_UTCL1_CNTL
+#define LSDMA_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define LSDMA_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define LSDMA_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define LSDMA_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define LSDMA_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define LSDMA_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define LSDMA_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define LSDMA_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define LSDMA_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define LSDMA_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define LSDMA_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define LSDMA_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//LSDMA_UTCL1_WATERMK
+#define LSDMA_UTCL1_WATERMK__REQ_WATERMK__SHIFT 0x0
+#define LSDMA_UTCL1_WATERMK__REQ_DEPTH__SHIFT 0x3
+#define LSDMA_UTCL1_WATERMK__PAGE_WATERMK__SHIFT 0x5
+#define LSDMA_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x8
+#define LSDMA_UTCL1_WATERMK__RESERVED__SHIFT 0x10
+#define LSDMA_UTCL1_WATERMK__REQ_WATERMK_MASK 0x00000007L
+#define LSDMA_UTCL1_WATERMK__REQ_DEPTH_MASK 0x00000018L
+#define LSDMA_UTCL1_WATERMK__PAGE_WATERMK_MASK 0x000000E0L
+#define LSDMA_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x0000FF00L
+#define LSDMA_UTCL1_WATERMK__RESERVED_MASK 0xFFFF0000L
+//LSDMA_UTCL1_RD_STATUS
+#define LSDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define LSDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define LSDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define LSDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define LSDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define LSDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define LSDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define LSDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define LSDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define LSDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define LSDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define LSDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define LSDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define LSDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define LSDMA_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define LSDMA_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define LSDMA_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define LSDMA_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define LSDMA_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define LSDMA_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define LSDMA_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define LSDMA_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define LSDMA_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define LSDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define LSDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define LSDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define LSDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define LSDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define LSDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define LSDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define LSDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define LSDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define LSDMA_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define LSDMA_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define LSDMA_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define LSDMA_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define LSDMA_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define LSDMA_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define LSDMA_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define LSDMA_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define LSDMA_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//LSDMA_UTCL1_WR_STATUS
+#define LSDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define LSDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define LSDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define LSDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define LSDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define LSDMA_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define LSDMA_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define LSDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define LSDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define LSDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define LSDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define LSDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define LSDMA_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define LSDMA_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define LSDMA_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define LSDMA_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define LSDMA_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define LSDMA_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define LSDMA_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define LSDMA_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define LSDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define LSDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define LSDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define LSDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define LSDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define LSDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define LSDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define LSDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define LSDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define LSDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define LSDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define LSDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define LSDMA_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define LSDMA_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define LSDMA_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define LSDMA_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define LSDMA_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define LSDMA_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define LSDMA_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define LSDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define LSDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define LSDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define LSDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//LSDMA_UTCL1_INV0
+#define LSDMA_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define LSDMA_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define LSDMA_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define LSDMA_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define LSDMA_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define LSDMA_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define LSDMA_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define LSDMA_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define LSDMA_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define LSDMA_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define LSDMA_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define LSDMA_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define LSDMA_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define LSDMA_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define LSDMA_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define LSDMA_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define LSDMA_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define LSDMA_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define LSDMA_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define LSDMA_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define LSDMA_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define LSDMA_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define LSDMA_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define LSDMA_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define LSDMA_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define LSDMA_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define LSDMA_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define LSDMA_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//LSDMA_UTCL1_INV1
+#define LSDMA_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define LSDMA_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_UTCL1_INV2
+#define LSDMA_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define LSDMA_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//LSDMA_UTCL1_RD_XNACK0
+#define LSDMA_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define LSDMA_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_UTCL1_RD_XNACK1
+#define LSDMA_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define LSDMA_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define LSDMA_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define LSDMA_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define LSDMA_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define LSDMA_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define LSDMA_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define LSDMA_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//LSDMA_UTCL1_WR_XNACK0
+#define LSDMA_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define LSDMA_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_UTCL1_WR_XNACK1
+#define LSDMA_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define LSDMA_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define LSDMA_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define LSDMA_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define LSDMA_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define LSDMA_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define LSDMA_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define LSDMA_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//LSDMA_UTCL1_TIMEOUT
+#define LSDMA_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define LSDMA_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define LSDMA_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define LSDMA_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//LSDMA_UTCL1_PAGE
+#define LSDMA_UTCL1_PAGE__INVALID_ADDR__SHIFT 0x0
+#define LSDMA_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define LSDMA_UTCL1_PAGE__TMZ_ENABLE__SHIFT 0x5
+#define LSDMA_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define LSDMA_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define LSDMA_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define LSDMA_UTCL1_PAGE__TMZ_ENABLE_MASK 0x00000020L
+#define LSDMA_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define LSDMA_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+//LSDMA_RELAX_ORDERING_LUT
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define LSDMA_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define LSDMA_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define LSDMA_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define LSDMA_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define LSDMA_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define LSDMA_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define LSDMA_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define LSDMA_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define LSDMA_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define LSDMA_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define LSDMA_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define LSDMA_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define LSDMA_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define LSDMA_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define LSDMA_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define LSDMA_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define LSDMA_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define LSDMA_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define LSDMA_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define LSDMA_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define LSDMA_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define LSDMA_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define LSDMA_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define LSDMA_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define LSDMA_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define LSDMA_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define LSDMA_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define LSDMA_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define LSDMA_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//LSDMA_CHICKEN_BITS_2
+#define LSDMA_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define LSDMA_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN__SHIFT 0x4
+#define LSDMA_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+#define LSDMA_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN_MASK 0x00000010L
+//LSDMA_STATUS3_REG
+#define LSDMA_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define LSDMA_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define LSDMA_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define LSDMA_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
+#define LSDMA_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
+#define LSDMA_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define LSDMA_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define LSDMA_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define LSDMA_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
+#define LSDMA_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
+//LSDMA_PHYSICAL_ADDR_LO
+#define LSDMA_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define LSDMA_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define LSDMA_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define LSDMA_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define LSDMA_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define LSDMA_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define LSDMA_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define LSDMA_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//LSDMA_PHYSICAL_ADDR_HI
+#define LSDMA_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//LSDMA_ECC_CNTL
+#define LSDMA_ECC_CNTL__ECC_DISABLE__SHIFT 0x0
+#define LSDMA_ECC_CNTL__ECC_DISABLE_MASK 0x00000001L
+//LSDMA_ERROR_LOG
+#define LSDMA_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define LSDMA_ERROR_LOG__STATUS__SHIFT 0x10
+#define LSDMA_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define LSDMA_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//LSDMA_PUB_DUMMY0
+#define LSDMA_PUB_DUMMY0__DUMMY__SHIFT 0x0
+#define LSDMA_PUB_DUMMY0__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_PUB_DUMMY1
+#define LSDMA_PUB_DUMMY1__DUMMY__SHIFT 0x0
+#define LSDMA_PUB_DUMMY1__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_PUB_DUMMY2
+#define LSDMA_PUB_DUMMY2__DUMMY__SHIFT 0x0
+#define LSDMA_PUB_DUMMY2__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_PUB_DUMMY3
+#define LSDMA_PUB_DUMMY3__DUMMY__SHIFT 0x0
+#define LSDMA_PUB_DUMMY3__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_F32_COUNTER
+#define LSDMA_F32_COUNTER__VALUE__SHIFT 0x0
+#define LSDMA_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//LSDMA_PERFCNT_PERFCOUNTER0_CFG
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//LSDMA_PERFCNT_PERFCOUNTER1_CFG
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//LSDMA_PERFCNT_MISC_CNTL
+#define LSDMA_PERFCNT_MISC_CNTL__CMD_OP__SHIFT 0x0
+#define LSDMA_PERFCNT_MISC_CNTL__MMHUB_REQ_EVENT_SELECT__SHIFT 0x10
+#define LSDMA_PERFCNT_MISC_CNTL__CMD_OP_MASK 0x0000FFFFL
+#define LSDMA_PERFCNT_MISC_CNTL__MMHUB_REQ_EVENT_SELECT_MASK 0x00010000L
+//LSDMA_PERFCNT_PERFCOUNTER_LO
+#define LSDMA_PERFCNT_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define LSDMA_PERFCNT_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//LSDMA_PERFCNT_PERFCOUNTER_HI
+#define LSDMA_PERFCNT_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define LSDMA_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define LSDMA_PERFCNT_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define LSDMA_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//LSDMA_CRD_CNTL
+#define LSDMA_CRD_CNTL__DRM_CREDIT__SHIFT 0x0
+#define LSDMA_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define LSDMA_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define LSDMA_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define LSDMA_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//LSDMA_ULV_CNTL
+#define LSDMA_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define LSDMA_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
+#define LSDMA_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
+#define LSDMA_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define LSDMA_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define LSDMA_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define LSDMA_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define LSDMA_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
+#define LSDMA_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
+#define LSDMA_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define LSDMA_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define LSDMA_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//LSDMA_EA_DBIT_ADDR_DATA
+#define LSDMA_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define LSDMA_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//LSDMA_EA_DBIT_ADDR_INDEX
+#define LSDMA_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define LSDMA_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//LSDMA_STATUS4_REG
+#define LSDMA_STATUS4_REG__IDLE__SHIFT 0x0
+#define LSDMA_STATUS4_REG__IH_OUTSTANDING__SHIFT 0x2
+#define LSDMA_STATUS4_REG__SEM_OUTSTANDING__SHIFT 0x3
+#define LSDMA_STATUS4_REG__MMHUB_RD_OUTSTANDING__SHIFT 0x4
+#define LSDMA_STATUS4_REG__MMHUB_WR_OUTSTANDING__SHIFT 0x5
+#define LSDMA_STATUS4_REG__UTCL2_RD_OUTSTANDING__SHIFT 0x6
+#define LSDMA_STATUS4_REG__UTCL2_WR_OUTSTANDING__SHIFT 0x7
+#define LSDMA_STATUS4_REG__REG_POLLING__SHIFT 0x8
+#define LSDMA_STATUS4_REG__MEM_POLLING__SHIFT 0x9
+#define LSDMA_STATUS4_REG__UTCL2_RD_XNACK__SHIFT 0xa
+#define LSDMA_STATUS4_REG__UTCL2_WR_XNACK__SHIFT 0xc
+#define LSDMA_STATUS4_REG__ACTIVE_QUEUE_ID__SHIFT 0xe
+#define LSDMA_STATUS4_REG__SRIOV_WATING_RLCV_CMD__SHIFT 0x12
+#define LSDMA_STATUS4_REG__SRIOV_LSDMA_EXECUTING_CMD__SHIFT 0x13
+#define LSDMA_STATUS4_REG__IDLE_MASK 0x00000001L
+#define LSDMA_STATUS4_REG__IH_OUTSTANDING_MASK 0x00000004L
+#define LSDMA_STATUS4_REG__SEM_OUTSTANDING_MASK 0x00000008L
+#define LSDMA_STATUS4_REG__MMHUB_RD_OUTSTANDING_MASK 0x00000010L
+#define LSDMA_STATUS4_REG__MMHUB_WR_OUTSTANDING_MASK 0x00000020L
+#define LSDMA_STATUS4_REG__UTCL2_RD_OUTSTANDING_MASK 0x00000040L
+#define LSDMA_STATUS4_REG__UTCL2_WR_OUTSTANDING_MASK 0x00000080L
+#define LSDMA_STATUS4_REG__REG_POLLING_MASK 0x00000100L
+#define LSDMA_STATUS4_REG__MEM_POLLING_MASK 0x00000200L
+#define LSDMA_STATUS4_REG__UTCL2_RD_XNACK_MASK 0x00000C00L
+#define LSDMA_STATUS4_REG__UTCL2_WR_XNACK_MASK 0x00003000L
+#define LSDMA_STATUS4_REG__ACTIVE_QUEUE_ID_MASK 0x0003C000L
+#define LSDMA_STATUS4_REG__SRIOV_WATING_RLCV_CMD_MASK 0x00040000L
+#define LSDMA_STATUS4_REG__SRIOV_LSDMA_EXECUTING_CMD_MASK 0x00080000L
+//LSDMA_CE_CTRL
+#define LSDMA_CE_CTRL__RD_LUT_WATERMARK__SHIFT 0x0
+#define LSDMA_CE_CTRL__RD_LUT_DEPTH__SHIFT 0x3
+#define LSDMA_CE_CTRL__RESERVED_7_5__SHIFT 0x5
+#define LSDMA_CE_CTRL__RESERVED__SHIFT 0x8
+#define LSDMA_CE_CTRL__RD_LUT_WATERMARK_MASK 0x00000007L
+#define LSDMA_CE_CTRL__RD_LUT_DEPTH_MASK 0x00000018L
+#define LSDMA_CE_CTRL__RESERVED_7_5_MASK 0x000000E0L
+#define LSDMA_CE_CTRL__RESERVED_MASK 0xFFFFFF00L
+//LSDMA_EXCEPTION_STATUS
+#define LSDMA_EXCEPTION_STATUS__RB_FETCH_ECC__SHIFT 0x0
+#define LSDMA_EXCEPTION_STATUS__IB_FETCH_ECC__SHIFT 0x1
+#define LSDMA_EXCEPTION_STATUS__COPY_CMD_ECC__SHIFT 0x2
+#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_ECC__SHIFT 0x3
+#define LSDMA_EXCEPTION_STATUS__SRAM_ECC__SHIFT 0x6
+#define LSDMA_EXCEPTION_STATUS__RB_FETCH_NACK_GEN_ERR__SHIFT 0x8
+#define LSDMA_EXCEPTION_STATUS__IB_FETCH_NACK_GEN_ERR__SHIFT 0x9
+#define LSDMA_EXCEPTION_STATUS__COPY_CMD_NACK_GEN_ERR__SHIFT 0xa
+#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_NACK_GEN_ERR__SHIFT 0xb
+#define LSDMA_EXCEPTION_STATUS__RPTR_WB_NACK_GEN_ERR__SHIFT 0xd
+#define LSDMA_EXCEPTION_STATUS__RB_FETCH_NACK_PRT__SHIFT 0x10
+#define LSDMA_EXCEPTION_STATUS__IB_FETCH_NACK_PRT__SHIFT 0x11
+#define LSDMA_EXCEPTION_STATUS__COPY_CMD_NACK_PRT__SHIFT 0x12
+#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_NACK_PRT__SHIFT 0x13
+#define LSDMA_EXCEPTION_STATUS__RPTR_WB_NACK_PRT__SHIFT 0x15
+#define LSDMA_EXCEPTION_STATUS__INVALID_ADDR__SHIFT 0x18
+#define LSDMA_EXCEPTION_STATUS__RB_FETCH_ECC_MASK 0x00000001L
+#define LSDMA_EXCEPTION_STATUS__IB_FETCH_ECC_MASK 0x00000002L
+#define LSDMA_EXCEPTION_STATUS__COPY_CMD_ECC_MASK 0x00000004L
+#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_ECC_MASK 0x00000008L
+#define LSDMA_EXCEPTION_STATUS__SRAM_ECC_MASK 0x00000040L
+#define LSDMA_EXCEPTION_STATUS__RB_FETCH_NACK_GEN_ERR_MASK 0x00000100L
+#define LSDMA_EXCEPTION_STATUS__IB_FETCH_NACK_GEN_ERR_MASK 0x00000200L
+#define LSDMA_EXCEPTION_STATUS__COPY_CMD_NACK_GEN_ERR_MASK 0x00000400L
+#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_NACK_GEN_ERR_MASK 0x00000800L
+#define LSDMA_EXCEPTION_STATUS__RPTR_WB_NACK_GEN_ERR_MASK 0x00002000L
+#define LSDMA_EXCEPTION_STATUS__RB_FETCH_NACK_PRT_MASK 0x00010000L
+#define LSDMA_EXCEPTION_STATUS__IB_FETCH_NACK_PRT_MASK 0x00020000L
+#define LSDMA_EXCEPTION_STATUS__COPY_CMD_NACK_PRT_MASK 0x00040000L
+#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_NACK_PRT_MASK 0x00080000L
+#define LSDMA_EXCEPTION_STATUS__RPTR_WB_NACK_PRT_MASK 0x00200000L
+//LSDMA_INT_CNTL
+#define LSDMA_INT_CNTL__ATOMIC_RTN_DONE_INT_ENABLE__SHIFT 0x0
+#define LSDMA_INT_CNTL__TRAP_INT_ENABLE__SHIFT 0x1
+#define LSDMA_INT_CNTL__SRBM_WRITE_INT_ENABLE__SHIFT 0x2
+#define LSDMA_INT_CNTL__CTX_EMPTY_INT_ENABLE__SHIFT 0x3
+#define LSDMA_INT_CNTL__FROZEN_INT_ENABLE__SHIFT 0x4
+#define LSDMA_INT_CNTL__PREEMPT_INT_ENABLE__SHIFT 0x5
+#define LSDMA_INT_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x6
+#define LSDMA_INT_CNTL__ATOMIC_TIMEOUT_INT_ENABLE__SHIFT 0x7
+#define LSDMA_INT_CNTL__POLL_TIMEOUT_INT_ENABLE__SHIFT 0x8
+#define LSDMA_INT_CNTL__INVALID_ADDR_INT_ENABLE__SHIFT 0x9
+#define LSDMA_INT_CNTL__NACK_GEN_ERR_INT_ENABLE__SHIFT 0xa
+#define LSDMA_INT_CNTL__NACK_PRT_INT_ENABLE__SHIFT 0xb
+#define LSDMA_INT_CNTL__ECC_INT_ENABLE__SHIFT 0xc
+#define LSDMA_INT_CNTL__ATOMIC_RTN_DONE_INT_ENABLE_MASK 0x00000001L
+#define LSDMA_INT_CNTL__TRAP_INT_ENABLE_MASK 0x00000002L
+#define LSDMA_INT_CNTL__SRBM_WRITE_INT_ENABLE_MASK 0x00000004L
+#define LSDMA_INT_CNTL__CTX_EMPTY_INT_ENABLE_MASK 0x00000008L
+#define LSDMA_INT_CNTL__FROZEN_INT_ENABLE_MASK 0x00000010L
+#define LSDMA_INT_CNTL__PREEMPT_INT_ENABLE_MASK 0x00000020L
+#define LSDMA_INT_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x00000040L
+#define LSDMA_INT_CNTL__ATOMIC_TIMEOUT_INT_ENABLE_MASK 0x00000080L
+#define LSDMA_INT_CNTL__POLL_TIMEOUT_INT_ENABLE_MASK 0x00000100L
+#define LSDMA_INT_CNTL__NACK_GEN_ERR_INT_ENABLE_MASK 0x00000400L
+#define LSDMA_INT_CNTL__NACK_PRT_INT_ENABLE_MASK 0x00000800L
+#define LSDMA_INT_CNTL__ECC_INT_ENABLE_MASK 0x00001000L
+//LSDMA_MEM_POWER_CTRL
+#define LSDMA_MEM_POWER_CTRL__MEM_POWER_CTRL_EN__SHIFT 0x0
+#define LSDMA_MEM_POWER_CTRL__MEM_POWER_CTRL_EN_MASK 0x00000001L
+//LSDMA_CLK_CTRL
+#define LSDMA_CLK_CTRL__RESERVED__SHIFT 0x1
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define LSDMA_CLK_CTRL__RESERVED_MASK 0x00FFFFFEL
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define LSDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//LSDMA_CNTL
+#define LSDMA_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define LSDMA_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define LSDMA_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define LSDMA_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define LSDMA_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define LSDMA_CNTL__MIDCMD_EXPIRE_ENABLE__SHIFT 0x6
+#define LSDMA_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define LSDMA_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define LSDMA_CNTL__DRM_RESTORE_ENABLE__SHIFT 0x13
+#define LSDMA_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define LSDMA_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define LSDMA_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define LSDMA_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define LSDMA_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define LSDMA_CNTL__MIDCMD_EXPIRE_ENABLE_MASK 0x00000040L
+#define LSDMA_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define LSDMA_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+//LSDMA_CHICKEN_BITS
+#define LSDMA_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define LSDMA_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define LSDMA_CHICKEN_BITS__F32_MGCG_ENABLE__SHIFT 0x3
+#define LSDMA_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define LSDMA_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define LSDMA_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define LSDMA_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define LSDMA_CHICKEN_BITS__T2L_256B_ENABLE__SHIFT 0x12
+#define LSDMA_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define LSDMA_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define LSDMA_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL__SHIFT 0x18
+#define LSDMA_CHICKEN_BITS__DRAM_ECC_NACK_F32_RESET_ENABLE__SHIFT 0x19
+#define LSDMA_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define LSDMA_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define LSDMA_CHICKEN_BITS__F32_MGCG_ENABLE_MASK 0x00000008L
+#define LSDMA_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define LSDMA_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define LSDMA_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define LSDMA_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define LSDMA_CHICKEN_BITS__T2L_256B_ENABLE_MASK 0x00040000L
+#define LSDMA_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define LSDMA_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+//LSDMA_PIO_SRC_ADDR_LO
+#define LSDMA_PIO_SRC_ADDR_LO__SRC_ADDR_LO__SHIFT 0x0
+#define LSDMA_PIO_SRC_ADDR_LO__SRC_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_PIO_SRC_ADDR_HI
+#define LSDMA_PIO_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define LSDMA_PIO_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0xFFFFFFFFL
+//LSDMA_PIO_DST_ADDR_LO
+#define LSDMA_PIO_DST_ADDR_LO__DST_ADDR_LO__SHIFT 0x0
+#define LSDMA_PIO_DST_ADDR_LO__DST_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_PIO_DST_ADDR_HI
+#define LSDMA_PIO_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define LSDMA_PIO_DST_ADDR_HI__DST_ADDR_HI_MASK 0xFFFFFFFFL
+//LSDMA_PIO_COMMAND
+#define LSDMA_PIO_COMMAND__BYTE_COUNT__SHIFT 0x0
+#define LSDMA_PIO_COMMAND__SRC_LOCATION__SHIFT 0x1a
+#define LSDMA_PIO_COMMAND__DST_LOCATION__SHIFT 0x1b
+#define LSDMA_PIO_COMMAND__SRC_ADDR_INC__SHIFT 0x1c
+#define LSDMA_PIO_COMMAND__DST_ADDR_INC__SHIFT 0x1d
+#define LSDMA_PIO_COMMAND__OVERLAP_DISABLE__SHIFT 0x1e
+#define LSDMA_PIO_COMMAND__CONSTANT_FILL__SHIFT 0x1f
+#define LSDMA_PIO_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
+#define LSDMA_PIO_COMMAND__SRC_LOCATION_MASK 0x04000000L
+#define LSDMA_PIO_COMMAND__DST_LOCATION_MASK 0x08000000L
+#define LSDMA_PIO_COMMAND__SRC_ADDR_INC_MASK 0x10000000L
+#define LSDMA_PIO_COMMAND__DST_ADDR_INC_MASK 0x20000000L
+#define LSDMA_PIO_COMMAND__OVERLAP_DISABLE_MASK 0x40000000L
+#define LSDMA_PIO_COMMAND__CONSTANT_FILL_MASK 0x80000000L
+//LSDMA_PIO_CONSTFILL_DATA
+#define LSDMA_PIO_CONSTFILL_DATA__DATA__SHIFT 0x0
+#define LSDMA_PIO_CONSTFILL_DATA__DATA_MASK 0xFFFFFFFFL
+//LSDMA_PIO_CONTROL
+#define LSDMA_PIO_CONTROL__VMID__SHIFT 0x0
+#define LSDMA_PIO_CONTROL__DST_GPA__SHIFT 0x4
+#define LSDMA_PIO_CONTROL__DST_SYS__SHIFT 0x5
+#define LSDMA_PIO_CONTROL__DST_GCC__SHIFT 0x6
+#define LSDMA_PIO_CONTROL__DST_SNOOP__SHIFT 0x7
+#define LSDMA_PIO_CONTROL__DST_REUSE_HINT__SHIFT 0x8
+#define LSDMA_PIO_CONTROL__DST_COMP_EN__SHIFT 0xa
+#define LSDMA_PIO_CONTROL__SRC_GPA__SHIFT 0x14
+#define LSDMA_PIO_CONTROL__SRC_SYS__SHIFT 0x15
+#define LSDMA_PIO_CONTROL__SRC_SNOOP__SHIFT 0x17
+#define LSDMA_PIO_CONTROL__SRC_REUSE_HINT__SHIFT 0x18
+#define LSDMA_PIO_CONTROL__SRC_COMP_EN__SHIFT 0x1a
+#define LSDMA_PIO_CONTROL__VMID_MASK 0x0000000FL
+#define LSDMA_PIO_CONTROL__DST_GPA_MASK 0x00000010L
+#define LSDMA_PIO_CONTROL__DST_SYS_MASK 0x00000020L
+#define LSDMA_PIO_CONTROL__DST_GCC_MASK 0x00000040L
+#define LSDMA_PIO_CONTROL__DST_SNOOP_MASK 0x00000080L
+#define LSDMA_PIO_CONTROL__DST_REUSE_HINT_MASK 0x00000300L
+#define LSDMA_PIO_CONTROL__DST_COMP_EN_MASK 0x00000400L
+#define LSDMA_PIO_CONTROL__SRC_GPA_MASK 0x00100000L
+#define LSDMA_PIO_CONTROL__SRC_SYS_MASK 0x00200000L
+#define LSDMA_PIO_CONTROL__SRC_SNOOP_MASK 0x00800000L
+#define LSDMA_PIO_CONTROL__SRC_REUSE_HINT_MASK 0x03000000L
+#define LSDMA_PIO_CONTROL__SRC_COMP_EN_MASK 0x04000000L
+//LSDMA_PIO_STATUS
+#define LSDMA_PIO_STATUS__CMD_IN_FIFO__SHIFT 0x0
+#define LSDMA_PIO_STATUS__CMD_PROCESSING__SHIFT 0x3
+#define LSDMA_PIO_STATUS__ERROR_INVALID_ADDR__SHIFT 0x8
+#define LSDMA_PIO_STATUS__ERROR_ZERO_COUNT__SHIFT 0x9
+#define LSDMA_PIO_STATUS__ERROR_DRAM_ECC__SHIFT 0xa
+#define LSDMA_PIO_STATUS__ERROR_SRAM_ECC__SHIFT 0xb
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR__SHIFT 0xf
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR__SHIFT 0x10
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_PRT__SHIFT 0x11
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_PRT__SHIFT 0x12
+#define LSDMA_PIO_STATUS__PIO_FIFO_EMPTY__SHIFT 0x1c
+#define LSDMA_PIO_STATUS__PIO_FIFO_FULL__SHIFT 0x1d
+#define LSDMA_PIO_STATUS__PIO_IDLE__SHIFT 0x1f
+#define LSDMA_PIO_STATUS__CMD_IN_FIFO_MASK 0x00000007L
+#define LSDMA_PIO_STATUS__CMD_PROCESSING_MASK 0x000000F8L
+#define LSDMA_PIO_STATUS__ERROR_INVALID_ADDR_MASK 0x00000100L
+#define LSDMA_PIO_STATUS__ERROR_ZERO_COUNT_MASK 0x00000200L
+#define LSDMA_PIO_STATUS__ERROR_DRAM_ECC_MASK 0x00000400L
+#define LSDMA_PIO_STATUS__ERROR_SRAM_ECC_MASK 0x00000800L
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR_MASK 0x00008000L
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR_MASK 0x00010000L
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_PRT_MASK 0x00020000L
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_PRT_MASK 0x00040000L
+#define LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK 0x10000000L
+#define LSDMA_PIO_STATUS__PIO_FIFO_FULL_MASK 0x20000000L
+#define LSDMA_PIO_STATUS__PIO_IDLE_MASK 0x80000000L
+//LSDMA_PF_PIO_STATUS
+#define LSDMA_PF_PIO_STATUS__CMD_IN_FIFO__SHIFT 0x0
+#define LSDMA_PF_PIO_STATUS__CMD_PROCESSING__SHIFT 0x3
+#define LSDMA_PF_PIO_STATUS__ERROR_INVALID_ADDR__SHIFT 0x8
+#define LSDMA_PF_PIO_STATUS__ERROR_ZERO_COUNT__SHIFT 0x9
+#define LSDMA_PF_PIO_STATUS__ERROR_DRAM_ECC__SHIFT 0xa
+#define LSDMA_PF_PIO_STATUS__ERROR_SRAM_ECC__SHIFT 0xb
+#define LSDMA_PF_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR__SHIFT 0xf
+#define LSDMA_PF_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR__SHIFT 0x10
+#define LSDMA_PF_PIO_STATUS__ERROR_WRRET_NACK_PRT__SHIFT 0x11
+#define LSDMA_PF_PIO_STATUS__ERROR_RDRET_NACK_PRT__SHIFT 0x12
+#define LSDMA_PF_PIO_STATUS__PIO_FIFO_EMPTY__SHIFT 0x1c
+#define LSDMA_PF_PIO_STATUS__PIO_FIFO_FULL__SHIFT 0x1d
+#define LSDMA_PF_PIO_STATUS__PIO_IDLE__SHIFT 0x1f
+#define LSDMA_PF_PIO_STATUS__CMD_IN_FIFO_MASK 0x00000007L
+#define LSDMA_PF_PIO_STATUS__CMD_PROCESSING_MASK 0x000000F8L
+#define LSDMA_PF_PIO_STATUS__ERROR_ZERO_COUNT_MASK 0x00000200L
+#define LSDMA_PF_PIO_STATUS__ERROR_DRAM_ECC_MASK 0x00000400L
+#define LSDMA_PF_PIO_STATUS__ERROR_SRAM_ECC_MASK 0x00000800L
+#define LSDMA_PF_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR_MASK 0x00008000L
+#define LSDMA_PF_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR_MASK 0x00010000L
+#define LSDMA_PF_PIO_STATUS__ERROR_WRRET_NACK_PRT_MASK 0x00020000L
+#define LSDMA_PF_PIO_STATUS__ERROR_RDRET_NACK_PRT_MASK 0x00040000L
+#define LSDMA_PF_PIO_STATUS__PIO_FIFO_EMPTY_MASK 0x10000000L
+#define LSDMA_PF_PIO_STATUS__PIO_FIFO_FULL_MASK 0x20000000L
+#define LSDMA_PF_PIO_STATUS__PIO_IDLE_MASK 0x80000000L
+//LSDMA_QUEUE0_RB_CNTL
+#define LSDMA_QUEUE0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define LSDMA_QUEUE0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define LSDMA_QUEUE0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define LSDMA_QUEUE0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define LSDMA_QUEUE0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define LSDMA_QUEUE0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define LSDMA_QUEUE0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//LSDMA_QUEUE0_RB_BASE
+#define LSDMA_QUEUE0_RB_BASE__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_BASE_HI
+#define LSDMA_QUEUE0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//LSDMA_QUEUE0_RB_RPTR
+#define LSDMA_QUEUE0_RB_RPTR__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_RPTR_HI
+#define LSDMA_QUEUE0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_WPTR
+#define LSDMA_QUEUE0_RB_WPTR__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_WPTR_HI
+#define LSDMA_QUEUE0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_WPTR_POLL_CNTL
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI
+#define LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO
+#define LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE0_RB_RPTR_ADDR_HI
+#define LSDMA_QUEUE0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_RPTR_ADDR_LO
+#define LSDMA_QUEUE0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define LSDMA_QUEUE0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define LSDMA_QUEUE0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE0_IB_CNTL
+#define LSDMA_QUEUE0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define LSDMA_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define LSDMA_QUEUE0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define LSDMA_QUEUE0_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define LSDMA_QUEUE0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define LSDMA_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define LSDMA_QUEUE0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//LSDMA_QUEUE0_IB_RPTR
+#define LSDMA_QUEUE0_IB_RPTR__OFFSET__SHIFT 0x2
+#define LSDMA_QUEUE0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//LSDMA_QUEUE0_IB_OFFSET
+#define LSDMA_QUEUE0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define LSDMA_QUEUE0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//LSDMA_QUEUE0_IB_BASE_LO
+#define LSDMA_QUEUE0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define LSDMA_QUEUE0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//LSDMA_QUEUE0_IB_BASE_HI
+#define LSDMA_QUEUE0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_IB_SIZE
+#define LSDMA_QUEUE0_IB_SIZE__SIZE__SHIFT 0x0
+#define LSDMA_QUEUE0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//LSDMA_QUEUE0_SKIP_CNTL
+#define LSDMA_QUEUE0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define LSDMA_QUEUE0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//LSDMA_QUEUE0_CSA_ADDR_LO
+#define LSDMA_QUEUE0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define LSDMA_QUEUE0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE0_CSA_ADDR_HI
+#define LSDMA_QUEUE0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_RB_AQL_CNTL
+#define LSDMA_QUEUE0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define LSDMA_QUEUE0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define LSDMA_QUEUE0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define LSDMA_QUEUE0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//LSDMA_QUEUE0_MINOR_PTR_UPDATE
+#define LSDMA_QUEUE0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//LSDMA_QUEUE0_CNTL
+#define LSDMA_QUEUE0_CNTL__QUANTUM__SHIFT 0x0
+#define LSDMA_QUEUE0_CNTL__QUANTUM_MASK 0x000000FFL
+//LSDMA_QUEUE0_RB_PREEMPT
+#define LSDMA_QUEUE0_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define LSDMA_QUEUE0_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//LSDMA_QUEUE0_IB_SUB_REMAIN
+#define LSDMA_QUEUE0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define LSDMA_QUEUE0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//LSDMA_QUEUE0_PREEMPT
+#define LSDMA_QUEUE0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define LSDMA_QUEUE0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//LSDMA_QUEUE0_CONTEXT_STATUS
+#define LSDMA_QUEUE0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define LSDMA_QUEUE0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define LSDMA_QUEUE0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define LSDMA_QUEUE0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define LSDMA_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define LSDMA_QUEUE0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define LSDMA_QUEUE0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define LSDMA_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define LSDMA_QUEUE0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define LSDMA_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//LSDMA_QUEUE0_STATUS
+#define LSDMA_QUEUE0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define LSDMA_QUEUE0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define LSDMA_QUEUE0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define LSDMA_QUEUE0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//LSDMA_QUEUE0_DOORBELL
+#define LSDMA_QUEUE0_DOORBELL__ENABLE__SHIFT 0x1c
+#define LSDMA_QUEUE0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define LSDMA_QUEUE0_DOORBELL__ENABLE_MASK 0x10000000L
+#define LSDMA_QUEUE0_DOORBELL__CAPTURED_MASK 0x40000000L
+//LSDMA_QUEUE0_DOORBELL_OFFSET
+#define LSDMA_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define LSDMA_QUEUE0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//LSDMA_QUEUE0_DOORBELL_LOG
+#define LSDMA_QUEUE0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define LSDMA_QUEUE0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define LSDMA_QUEUE0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define LSDMA_QUEUE0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE0_WATERMARK
+#define LSDMA_QUEUE0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define LSDMA_QUEUE0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define LSDMA_QUEUE0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define LSDMA_QUEUE0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//LSDMA_QUEUE0_DUMMY0
+#define LSDMA_QUEUE0_DUMMY0__DUMMY__SHIFT 0x0
+#define LSDMA_QUEUE0_DUMMY0__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_DUMMY1
+#define LSDMA_QUEUE0_DUMMY1__DUMMY__SHIFT 0x0
+#define LSDMA_QUEUE0_DUMMY1__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_DUMMY2
+#define LSDMA_QUEUE0_DUMMY2__DUMMY__SHIFT 0x0
+#define LSDMA_QUEUE0_DUMMY2__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA0
+#define LSDMA_QUEUE0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA1
+#define LSDMA_QUEUE0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA2
+#define LSDMA_QUEUE0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA3
+#define LSDMA_QUEUE0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA4
+#define LSDMA_QUEUE0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA5
+#define LSDMA_QUEUE0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA6
+#define LSDMA_QUEUE0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA7
+#define LSDMA_QUEUE0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA8
+#define LSDMA_QUEUE0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA9
+#define LSDMA_QUEUE0_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_DATA10
+#define LSDMA_QUEUE0_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE0_MIDCMD_CNTL
+#define LSDMA_QUEUE0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define LSDMA_QUEUE0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define LSDMA_QUEUE0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define LSDMA_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define LSDMA_QUEUE0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define LSDMA_QUEUE0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define LSDMA_QUEUE0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define LSDMA_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//LSDMA_QUEUE1_RB_CNTL
+#define LSDMA_QUEUE1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define LSDMA_QUEUE1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define LSDMA_QUEUE1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define LSDMA_QUEUE1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define LSDMA_QUEUE1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define LSDMA_QUEUE1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define LSDMA_QUEUE1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//LSDMA_QUEUE1_RB_BASE
+#define LSDMA_QUEUE1_RB_BASE__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_BASE_HI
+#define LSDMA_QUEUE1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//LSDMA_QUEUE1_RB_RPTR
+#define LSDMA_QUEUE1_RB_RPTR__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_RPTR_HI
+#define LSDMA_QUEUE1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_WPTR
+#define LSDMA_QUEUE1_RB_WPTR__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_WPTR_HI
+#define LSDMA_QUEUE1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_WPTR_POLL_CNTL
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI
+#define LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO
+#define LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE1_RB_RPTR_ADDR_HI
+#define LSDMA_QUEUE1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_RPTR_ADDR_LO
+#define LSDMA_QUEUE1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define LSDMA_QUEUE1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define LSDMA_QUEUE1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE1_IB_CNTL
+#define LSDMA_QUEUE1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define LSDMA_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define LSDMA_QUEUE1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define LSDMA_QUEUE1_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define LSDMA_QUEUE1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define LSDMA_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define LSDMA_QUEUE1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//LSDMA_QUEUE1_IB_RPTR
+#define LSDMA_QUEUE1_IB_RPTR__OFFSET__SHIFT 0x2
+#define LSDMA_QUEUE1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//LSDMA_QUEUE1_IB_OFFSET
+#define LSDMA_QUEUE1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define LSDMA_QUEUE1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//LSDMA_QUEUE1_IB_BASE_LO
+#define LSDMA_QUEUE1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define LSDMA_QUEUE1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//LSDMA_QUEUE1_IB_BASE_HI
+#define LSDMA_QUEUE1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_IB_SIZE
+#define LSDMA_QUEUE1_IB_SIZE__SIZE__SHIFT 0x0
+#define LSDMA_QUEUE1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//LSDMA_QUEUE1_SKIP_CNTL
+#define LSDMA_QUEUE1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define LSDMA_QUEUE1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//LSDMA_QUEUE1_CSA_ADDR_LO
+#define LSDMA_QUEUE1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define LSDMA_QUEUE1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE1_CSA_ADDR_HI
+#define LSDMA_QUEUE1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define LSDMA_QUEUE1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_RB_AQL_CNTL
+#define LSDMA_QUEUE1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define LSDMA_QUEUE1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define LSDMA_QUEUE1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define LSDMA_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define LSDMA_QUEUE1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//LSDMA_QUEUE1_MINOR_PTR_UPDATE
+#define LSDMA_QUEUE1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define LSDMA_QUEUE1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//LSDMA_QUEUE1_CNTL
+#define LSDMA_QUEUE1_CNTL__QUANTUM__SHIFT 0x0
+#define LSDMA_QUEUE1_CNTL__QUANTUM_MASK 0x000000FFL
+//LSDMA_QUEUE1_RB_PREEMPT
+#define LSDMA_QUEUE1_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define LSDMA_QUEUE1_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//LSDMA_QUEUE1_IB_SUB_REMAIN
+#define LSDMA_QUEUE1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define LSDMA_QUEUE1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//LSDMA_QUEUE1_PREEMPT
+#define LSDMA_QUEUE1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define LSDMA_QUEUE1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//LSDMA_QUEUE1_CONTEXT_STATUS
+#define LSDMA_QUEUE1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define LSDMA_QUEUE1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define LSDMA_QUEUE1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define LSDMA_QUEUE1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define LSDMA_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define LSDMA_QUEUE1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define LSDMA_QUEUE1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define LSDMA_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define LSDMA_QUEUE1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define LSDMA_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//LSDMA_QUEUE1_STATUS
+#define LSDMA_QUEUE1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define LSDMA_QUEUE1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define LSDMA_QUEUE1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define LSDMA_QUEUE1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//LSDMA_QUEUE1_DOORBELL
+#define LSDMA_QUEUE1_DOORBELL__ENABLE__SHIFT 0x1c
+#define LSDMA_QUEUE1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define LSDMA_QUEUE1_DOORBELL__ENABLE_MASK 0x10000000L
+#define LSDMA_QUEUE1_DOORBELL__CAPTURED_MASK 0x40000000L
+//LSDMA_QUEUE1_DOORBELL_OFFSET
+#define LSDMA_QUEUE1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define LSDMA_QUEUE1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//LSDMA_QUEUE1_DOORBELL_LOG
+#define LSDMA_QUEUE1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define LSDMA_QUEUE1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define LSDMA_QUEUE1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define LSDMA_QUEUE1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//LSDMA_QUEUE1_WATERMARK
+#define LSDMA_QUEUE1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define LSDMA_QUEUE1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define LSDMA_QUEUE1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define LSDMA_QUEUE1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//LSDMA_QUEUE1_DUMMY0
+#define LSDMA_QUEUE1_DUMMY0__DUMMY__SHIFT 0x0
+#define LSDMA_QUEUE1_DUMMY0__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_DUMMY1
+#define LSDMA_QUEUE1_DUMMY1__DUMMY__SHIFT 0x0
+#define LSDMA_QUEUE1_DUMMY1__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_DUMMY2
+#define LSDMA_QUEUE1_DUMMY2__DUMMY__SHIFT 0x0
+#define LSDMA_QUEUE1_DUMMY2__DUMMY_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA0
+#define LSDMA_QUEUE1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA1
+#define LSDMA_QUEUE1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA2
+#define LSDMA_QUEUE1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA3
+#define LSDMA_QUEUE1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA4
+#define LSDMA_QUEUE1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA5
+#define LSDMA_QUEUE1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA6
+#define LSDMA_QUEUE1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA7
+#define LSDMA_QUEUE1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA8
+#define LSDMA_QUEUE1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA9
+#define LSDMA_QUEUE1_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_DATA10
+#define LSDMA_QUEUE1_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//LSDMA_QUEUE1_MIDCMD_CNTL
+#define LSDMA_QUEUE1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define LSDMA_QUEUE1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define LSDMA_QUEUE1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define LSDMA_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define LSDMA_QUEUE1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define LSDMA_QUEUE1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define LSDMA_QUEUE1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define LSDMA_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_offset.h
new file mode 100644
index 000000000000..6a1b7b524809
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_offset.h
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _mp_14_0_2_OFFSET_HEADER
+#define _mp_14_0_2_OFFSET_HEADER
+
+
+// addressBlock: mp_SmuMp1_SmnDec
+// base address: 0x0
+#define regMP1_SMN_C2PMSG_0 0x0040
+#define regMP1_SMN_C2PMSG_0_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_1 0x0041
+#define regMP1_SMN_C2PMSG_1_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_2 0x0042
+#define regMP1_SMN_C2PMSG_2_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_3 0x0043
+#define regMP1_SMN_C2PMSG_3_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_4 0x0044
+#define regMP1_SMN_C2PMSG_4_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_5 0x0045
+#define regMP1_SMN_C2PMSG_5_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_6 0x0046
+#define regMP1_SMN_C2PMSG_6_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_7 0x0047
+#define regMP1_SMN_C2PMSG_7_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_8 0x0048
+#define regMP1_SMN_C2PMSG_8_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_9 0x0049
+#define regMP1_SMN_C2PMSG_9_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_10 0x004a
+#define regMP1_SMN_C2PMSG_10_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_11 0x004b
+#define regMP1_SMN_C2PMSG_11_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_12 0x004c
+#define regMP1_SMN_C2PMSG_12_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_13 0x004d
+#define regMP1_SMN_C2PMSG_13_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_14 0x004e
+#define regMP1_SMN_C2PMSG_14_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_15 0x004f
+#define regMP1_SMN_C2PMSG_15_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_16 0x0050
+#define regMP1_SMN_C2PMSG_16_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_17 0x0051
+#define regMP1_SMN_C2PMSG_17_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_18 0x0052
+#define regMP1_SMN_C2PMSG_18_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_19 0x0053
+#define regMP1_SMN_C2PMSG_19_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_20 0x0054
+#define regMP1_SMN_C2PMSG_20_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_21 0x0055
+#define regMP1_SMN_C2PMSG_21_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_22 0x0056
+#define regMP1_SMN_C2PMSG_22_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_23 0x0057
+#define regMP1_SMN_C2PMSG_23_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_24 0x0058
+#define regMP1_SMN_C2PMSG_24_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_25 0x0059
+#define regMP1_SMN_C2PMSG_25_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_26 0x005a
+#define regMP1_SMN_C2PMSG_26_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_27 0x005b
+#define regMP1_SMN_C2PMSG_27_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_28 0x005c
+#define regMP1_SMN_C2PMSG_28_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_29 0x005d
+#define regMP1_SMN_C2PMSG_29_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_30 0x005e
+#define regMP1_SMN_C2PMSG_30_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_31 0x005f
+#define regMP1_SMN_C2PMSG_31_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_32 0x0060
+#define regMP1_SMN_C2PMSG_32_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_33 0x0061
+#define regMP1_SMN_C2PMSG_33_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_34 0x0062
+#define regMP1_SMN_C2PMSG_34_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_35 0x0063
+#define regMP1_SMN_C2PMSG_35_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_36 0x0064
+#define regMP1_SMN_C2PMSG_36_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_37 0x0065
+#define regMP1_SMN_C2PMSG_37_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_38 0x0066
+#define regMP1_SMN_C2PMSG_38_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_39 0x0067
+#define regMP1_SMN_C2PMSG_39_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_40 0x0068
+#define regMP1_SMN_C2PMSG_40_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_41 0x0069
+#define regMP1_SMN_C2PMSG_41_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_42 0x006a
+#define regMP1_SMN_C2PMSG_42_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_43 0x006b
+#define regMP1_SMN_C2PMSG_43_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_44 0x006c
+#define regMP1_SMN_C2PMSG_44_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_45 0x006d
+#define regMP1_SMN_C2PMSG_45_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_46 0x006e
+#define regMP1_SMN_C2PMSG_46_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_47 0x006f
+#define regMP1_SMN_C2PMSG_47_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_48 0x0070
+#define regMP1_SMN_C2PMSG_48_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_49 0x0071
+#define regMP1_SMN_C2PMSG_49_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_50 0x0072
+#define regMP1_SMN_C2PMSG_50_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_51 0x0073
+#define regMP1_SMN_C2PMSG_51_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_52 0x0074
+#define regMP1_SMN_C2PMSG_52_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_53 0x0075
+#define regMP1_SMN_C2PMSG_53_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_54 0x0076
+#define regMP1_SMN_C2PMSG_54_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_55 0x0077
+#define regMP1_SMN_C2PMSG_55_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_56 0x0078
+#define regMP1_SMN_C2PMSG_56_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_57 0x0079
+#define regMP1_SMN_C2PMSG_57_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_58 0x007a
+#define regMP1_SMN_C2PMSG_58_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_59 0x007b
+#define regMP1_SMN_C2PMSG_59_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_60 0x007c
+#define regMP1_SMN_C2PMSG_60_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_61 0x007d
+#define regMP1_SMN_C2PMSG_61_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_62 0x007e
+#define regMP1_SMN_C2PMSG_62_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_63 0x007f
+#define regMP1_SMN_C2PMSG_63_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_64 0x0080
+#define regMP1_SMN_C2PMSG_64_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_65 0x0081
+#define regMP1_SMN_C2PMSG_65_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_66 0x0082
+#define regMP1_SMN_C2PMSG_66_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_67 0x0083
+#define regMP1_SMN_C2PMSG_67_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_68 0x0084
+#define regMP1_SMN_C2PMSG_68_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_69 0x0085
+#define regMP1_SMN_C2PMSG_69_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_70 0x0086
+#define regMP1_SMN_C2PMSG_70_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_71 0x0087
+#define regMP1_SMN_C2PMSG_71_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_72 0x0088
+#define regMP1_SMN_C2PMSG_72_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_73 0x0089
+#define regMP1_SMN_C2PMSG_73_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_74 0x008a
+#define regMP1_SMN_C2PMSG_74_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_75 0x008b
+#define regMP1_SMN_C2PMSG_75_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_76 0x008c
+#define regMP1_SMN_C2PMSG_76_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_77 0x008d
+#define regMP1_SMN_C2PMSG_77_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_78 0x008e
+#define regMP1_SMN_C2PMSG_78_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_79 0x008f
+#define regMP1_SMN_C2PMSG_79_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_80 0x0090
+#define regMP1_SMN_C2PMSG_80_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_81 0x0091
+#define regMP1_SMN_C2PMSG_81_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_82 0x0092
+#define regMP1_SMN_C2PMSG_82_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_83 0x0093
+#define regMP1_SMN_C2PMSG_83_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_84 0x0094
+#define regMP1_SMN_C2PMSG_84_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_85 0x0095
+#define regMP1_SMN_C2PMSG_85_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_86 0x0096
+#define regMP1_SMN_C2PMSG_86_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_87 0x0097
+#define regMP1_SMN_C2PMSG_87_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_88 0x0098
+#define regMP1_SMN_C2PMSG_88_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_89 0x0099
+#define regMP1_SMN_C2PMSG_89_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_90 0x009a
+#define regMP1_SMN_C2PMSG_90_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_91 0x009b
+#define regMP1_SMN_C2PMSG_91_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_92 0x009c
+#define regMP1_SMN_C2PMSG_92_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_93 0x009d
+#define regMP1_SMN_C2PMSG_93_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_94 0x009e
+#define regMP1_SMN_C2PMSG_94_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_95 0x009f
+#define regMP1_SMN_C2PMSG_95_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_96 0x00a0
+#define regMP1_SMN_C2PMSG_96_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_97 0x00a1
+#define regMP1_SMN_C2PMSG_97_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_98 0x00a2
+#define regMP1_SMN_C2PMSG_98_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_99 0x00a3
+#define regMP1_SMN_C2PMSG_99_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_100 0x00a4
+#define regMP1_SMN_C2PMSG_100_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_101 0x00a5
+#define regMP1_SMN_C2PMSG_101_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_102 0x00a6
+#define regMP1_SMN_C2PMSG_102_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_103 0x00a7
+#define regMP1_SMN_C2PMSG_103_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_104 0x00a8
+#define regMP1_SMN_C2PMSG_104_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_105 0x00a9
+#define regMP1_SMN_C2PMSG_105_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_106 0x00aa
+#define regMP1_SMN_C2PMSG_106_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_107 0x00ab
+#define regMP1_SMN_C2PMSG_107_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_108 0x00ac
+#define regMP1_SMN_C2PMSG_108_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_109 0x00ad
+#define regMP1_SMN_C2PMSG_109_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_110 0x00ae
+#define regMP1_SMN_C2PMSG_110_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_111 0x00af
+#define regMP1_SMN_C2PMSG_111_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_112 0x00b0
+#define regMP1_SMN_C2PMSG_112_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_113 0x00b1
+#define regMP1_SMN_C2PMSG_113_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_114 0x00b2
+#define regMP1_SMN_C2PMSG_114_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_115 0x00b3
+#define regMP1_SMN_C2PMSG_115_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_116 0x00b4
+#define regMP1_SMN_C2PMSG_116_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_117 0x00b5
+#define regMP1_SMN_C2PMSG_117_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_118 0x00b6
+#define regMP1_SMN_C2PMSG_118_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_119 0x00b7
+#define regMP1_SMN_C2PMSG_119_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_120 0x00b8
+#define regMP1_SMN_C2PMSG_120_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_121 0x00b9
+#define regMP1_SMN_C2PMSG_121_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_122 0x00ba
+#define regMP1_SMN_C2PMSG_122_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_123 0x00bb
+#define regMP1_SMN_C2PMSG_123_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_124 0x00bc
+#define regMP1_SMN_C2PMSG_124_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_125 0x00bd
+#define regMP1_SMN_C2PMSG_125_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_126 0x00be
+#define regMP1_SMN_C2PMSG_126_BASE_IDX 1
+#define regMP1_SMN_C2PMSG_127 0x00bf
+#define regMP1_SMN_C2PMSG_127_BASE_IDX 1
+#define regMP1_SMN_IH_CREDIT 0x0140
+#define regMP1_SMN_IH_CREDIT_BASE_IDX 1
+#define regMP1_SMN_IH_SW_INT 0x0141
+#define regMP1_SMN_IH_SW_INT_BASE_IDX 1
+#define regMP1_SMN_IH_SW_INT_CTRL 0x0142
+#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 1
+#define regMP1_SMN_FPS_CNT 0x0143
+#define regMP1_SMN_FPS_CNT_BASE_IDX 1
+#define regMP1_SMN_PUB_CTRL 0x0144
+#define regMP1_SMN_PUB_CTRL_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH0 0x01c0
+#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH1 0x01c1
+#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH2 0x01c2
+#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH3 0x01c3
+#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH4 0x01c4
+#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH5 0x01c5
+#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH6 0x01c6
+#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH7 0x01c7
+#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH8 0x01c8
+#define regMP1_SMN_EXT_SCRATCH8_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH9 0x01c9
+#define regMP1_SMN_EXT_SCRATCH9_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH10 0x01ca
+#define regMP1_SMN_EXT_SCRATCH10_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH11 0x01cb
+#define regMP1_SMN_EXT_SCRATCH11_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH12 0x01cc
+#define regMP1_SMN_EXT_SCRATCH12_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH13 0x01cd
+#define regMP1_SMN_EXT_SCRATCH13_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH14 0x01ce
+#define regMP1_SMN_EXT_SCRATCH14_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH15 0x01cf
+#define regMP1_SMN_EXT_SCRATCH15_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH16 0x01d0
+#define regMP1_SMN_EXT_SCRATCH16_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH17 0x01d1
+#define regMP1_SMN_EXT_SCRATCH17_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH18 0x01d2
+#define regMP1_SMN_EXT_SCRATCH18_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH19 0x01d3
+#define regMP1_SMN_EXT_SCRATCH19_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH20 0x01d4
+#define regMP1_SMN_EXT_SCRATCH20_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH21 0x01d5
+#define regMP1_SMN_EXT_SCRATCH21_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH22 0x01d6
+#define regMP1_SMN_EXT_SCRATCH22_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH23 0x01d7
+#define regMP1_SMN_EXT_SCRATCH23_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH24 0x01d8
+#define regMP1_SMN_EXT_SCRATCH24_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH25 0x01d9
+#define regMP1_SMN_EXT_SCRATCH25_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH26 0x01da
+#define regMP1_SMN_EXT_SCRATCH26_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH27 0x01db
+#define regMP1_SMN_EXT_SCRATCH27_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH28 0x01dc
+#define regMP1_SMN_EXT_SCRATCH28_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH29 0x01dd
+#define regMP1_SMN_EXT_SCRATCH29_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH30 0x01de
+#define regMP1_SMN_EXT_SCRATCH30_BASE_IDX 1
+#define regMP1_SMN_EXT_SCRATCH31 0x01df
+#define regMP1_SMN_EXT_SCRATCH31_BASE_IDX 1
+
+
+// addressBlock: mp_SmuMpASP_SmnDec
+// base address: 0x0
+#define regMPASP_SMN_C2PMSG_32 0x0060
+#define regMPASP_SMN_C2PMSG_32_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_33 0x0061
+#define regMPASP_SMN_C2PMSG_33_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_34 0x0062
+#define regMPASP_SMN_C2PMSG_34_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_35 0x0063
+#define regMPASP_SMN_C2PMSG_35_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_36 0x0064
+#define regMPASP_SMN_C2PMSG_36_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_37 0x0065
+#define regMPASP_SMN_C2PMSG_37_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_38 0x0066
+#define regMPASP_SMN_C2PMSG_38_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_39 0x0067
+#define regMPASP_SMN_C2PMSG_39_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_60 0x007c
+#define regMPASP_SMN_C2PMSG_60_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_61 0x007d
+#define regMPASP_SMN_C2PMSG_61_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_62 0x007e
+#define regMPASP_SMN_C2PMSG_62_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_63 0x007f
+#define regMPASP_SMN_C2PMSG_63_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_64 0x0080
+#define regMPASP_SMN_C2PMSG_64_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_65 0x0081
+#define regMPASP_SMN_C2PMSG_65_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_66 0x0082
+#define regMPASP_SMN_C2PMSG_66_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_67 0x0083
+#define regMPASP_SMN_C2PMSG_67_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_68 0x0084
+#define regMPASP_SMN_C2PMSG_68_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_69 0x0085
+#define regMPASP_SMN_C2PMSG_69_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_70 0x0086
+#define regMPASP_SMN_C2PMSG_70_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_71 0x0087
+#define regMPASP_SMN_C2PMSG_71_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_72 0x0088
+#define regMPASP_SMN_C2PMSG_72_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_73 0x0089
+#define regMPASP_SMN_C2PMSG_73_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_74 0x008a
+#define regMPASP_SMN_C2PMSG_74_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_75 0x008b
+#define regMPASP_SMN_C2PMSG_75_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_76 0x008c
+#define regMPASP_SMN_C2PMSG_76_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_77 0x008d
+#define regMPASP_SMN_C2PMSG_77_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_78 0x008e
+#define regMPASP_SMN_C2PMSG_78_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_79 0x008f
+#define regMPASP_SMN_C2PMSG_79_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_80 0x0090
+#define regMPASP_SMN_C2PMSG_80_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_81 0x0091
+#define regMPASP_SMN_C2PMSG_81_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_82 0x0092
+#define regMPASP_SMN_C2PMSG_82_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_83 0x0093
+#define regMPASP_SMN_C2PMSG_83_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_84 0x0094
+#define regMPASP_SMN_C2PMSG_84_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_85 0x0095
+#define regMPASP_SMN_C2PMSG_85_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_86 0x0096
+#define regMPASP_SMN_C2PMSG_86_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_87 0x0097
+#define regMPASP_SMN_C2PMSG_87_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_88 0x0098
+#define regMPASP_SMN_C2PMSG_88_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_89 0x0099
+#define regMPASP_SMN_C2PMSG_89_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_100 0x00a4
+#define regMPASP_SMN_C2PMSG_100_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_101 0x00a5
+#define regMPASP_SMN_C2PMSG_101_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_102 0x00a6
+#define regMPASP_SMN_C2PMSG_102_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_103 0x00a7
+#define regMPASP_SMN_C2PMSG_103_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_109 0x00ad
+#define regMPASP_SMN_C2PMSG_109_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_115 0x00b3
+#define regMPASP_SMN_C2PMSG_115_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_116 0x00b4
+#define regMPASP_SMN_C2PMSG_116_BASE_IDX 0
+#define regMPASP_SMN_C2PMSG_119_BASE_IDX 0
+#define regMPASP_SMN_IH_CREDIT 0x0140
+#define regMPASP_SMN_IH_CREDIT_BASE_IDX 0
+#define regMPASP_SMN_IH_SW_INT 0x0141
+#define regMPASP_SMN_IH_SW_INT_BASE_IDX 0
+#define regMPASP_SMN_IH_SW_INT_CTRL 0x0142
+#define regMPASP_SMN_IH_SW_INT_CTRL_BASE_IDX 0
+
+
+// addressBlock: Mp1MmioPublic_SmuMp1Pub_CruDec
+// base address: 0x3b00000
+#define regMP1_CRU1_MP1_FIRMWARE_FLAGS 0x4009
+#define regMP1_CRU1_MP1_FIRMWARE_FLAGS_BASE_IDX 7
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_sh_mask.h
new file mode 100644
index 000000000000..3ba269da1463
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_sh_mask.h
@@ -0,0 +1,692 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _mp_14_0_2_SH_MASK_HEADER
+#define _mp_14_0_2_SH_MASK_HEADER
+
+
+// addressBlock: mp_SmuMp1_SmnDec
+//MP1_SMN_C2PMSG_0
+#define MP1_SMN_C2PMSG_0__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_0__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_1
+#define MP1_SMN_C2PMSG_1__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_1__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_2
+#define MP1_SMN_C2PMSG_2__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_2__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_3
+#define MP1_SMN_C2PMSG_3__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_3__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_4
+#define MP1_SMN_C2PMSG_4__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_4__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_5
+#define MP1_SMN_C2PMSG_5__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_5__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_6
+#define MP1_SMN_C2PMSG_6__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_6__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_7
+#define MP1_SMN_C2PMSG_7__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_7__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_8
+#define MP1_SMN_C2PMSG_8__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_8__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_9
+#define MP1_SMN_C2PMSG_9__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_9__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_10
+#define MP1_SMN_C2PMSG_10__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_10__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_11
+#define MP1_SMN_C2PMSG_11__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_11__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_12
+#define MP1_SMN_C2PMSG_12__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_12__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_13
+#define MP1_SMN_C2PMSG_13__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_13__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_14
+#define MP1_SMN_C2PMSG_14__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_14__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_15
+#define MP1_SMN_C2PMSG_15__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_15__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_16
+#define MP1_SMN_C2PMSG_16__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_16__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_17
+#define MP1_SMN_C2PMSG_17__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_17__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_18
+#define MP1_SMN_C2PMSG_18__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_18__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_19
+#define MP1_SMN_C2PMSG_19__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_19__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_20
+#define MP1_SMN_C2PMSG_20__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_20__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_21
+#define MP1_SMN_C2PMSG_21__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_21__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_22
+#define MP1_SMN_C2PMSG_22__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_22__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_23
+#define MP1_SMN_C2PMSG_23__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_23__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_24
+#define MP1_SMN_C2PMSG_24__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_24__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_25
+#define MP1_SMN_C2PMSG_25__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_25__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_26
+#define MP1_SMN_C2PMSG_26__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_26__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_27
+#define MP1_SMN_C2PMSG_27__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_27__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_28
+#define MP1_SMN_C2PMSG_28__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_28__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_29
+#define MP1_SMN_C2PMSG_29__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_29__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_30
+#define MP1_SMN_C2PMSG_30__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_30__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_31
+#define MP1_SMN_C2PMSG_31__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_31__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_32
+#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_33
+#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_34
+#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_35
+#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_36
+#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_37
+#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_38
+#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_39
+#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_40
+#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_41
+#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_42
+#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_43
+#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_44
+#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_45
+#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_46
+#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_47
+#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_48
+#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_49
+#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_50
+#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_51
+#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_52
+#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_53
+#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_54
+#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_55
+#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_56
+#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_57
+#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_58
+#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_59
+#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_60
+#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_61
+#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_62
+#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_63
+#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_64
+#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_65
+#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_66
+#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_67
+#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_68
+#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_69
+#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_70
+#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_71
+#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_72
+#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_73
+#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_74
+#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_75
+#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_76
+#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_77
+#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_78
+#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_79
+#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_80
+#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_81
+#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_82
+#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_83
+#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_84
+#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_85
+#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_86
+#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_87
+#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_88
+#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_89
+#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_90
+#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_91
+#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_92
+#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_93
+#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_94
+#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_95
+#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_96
+#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_97
+#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_98
+#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_99
+#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_100
+#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_101
+#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_102
+#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_103
+#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_104
+#define MP1_SMN_C2PMSG_104__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_104__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_105
+#define MP1_SMN_C2PMSG_105__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_105__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_106
+#define MP1_SMN_C2PMSG_106__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_106__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_107
+#define MP1_SMN_C2PMSG_107__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_107__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_108
+#define MP1_SMN_C2PMSG_108__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_108__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_109
+#define MP1_SMN_C2PMSG_109__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_110
+#define MP1_SMN_C2PMSG_110__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_110__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_111
+#define MP1_SMN_C2PMSG_111__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_111__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_112
+#define MP1_SMN_C2PMSG_112__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_112__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_113
+#define MP1_SMN_C2PMSG_113__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_113__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_114
+#define MP1_SMN_C2PMSG_114__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_114__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_115
+#define MP1_SMN_C2PMSG_115__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_115__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_116
+#define MP1_SMN_C2PMSG_116__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_116__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_117
+#define MP1_SMN_C2PMSG_117__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_117__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_118
+#define MP1_SMN_C2PMSG_118__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_118__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_119
+#define MP1_SMN_C2PMSG_119__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_119__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_120
+#define MP1_SMN_C2PMSG_120__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_120__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_121
+#define MP1_SMN_C2PMSG_121__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_121__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_122
+#define MP1_SMN_C2PMSG_122__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_122__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_123
+#define MP1_SMN_C2PMSG_123__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_123__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_124
+#define MP1_SMN_C2PMSG_124__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_124__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_125
+#define MP1_SMN_C2PMSG_125__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_125__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_126
+#define MP1_SMN_C2PMSG_126__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_126__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_127
+#define MP1_SMN_C2PMSG_127__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_127__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_IH_CREDIT
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP1_SMN_IH_SW_INT
+#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+//MP1_SMN_IH_SW_INT_CTRL
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+//MP1_SMN_FPS_CNT
+#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
+#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
+//MP1_SMN_PUB_CTRL
+#define MP1_SMN_PUB_CTRL__LX3_RESET__SHIFT 0x0
+#define MP1_SMN_PUB_CTRL__LX3_RESET_MASK 0x00000001L
+//MP1_SMN_EXT_SCRATCH0
+#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH1
+#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH2
+#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH3
+#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH4
+#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH5
+#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH6
+#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH7
+#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH8
+#define MP1_SMN_EXT_SCRATCH8__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH8__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH9
+#define MP1_SMN_EXT_SCRATCH9__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH9__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH10
+#define MP1_SMN_EXT_SCRATCH10__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH10__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH11
+#define MP1_SMN_EXT_SCRATCH11__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH11__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH12
+#define MP1_SMN_EXT_SCRATCH12__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH12__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH13
+#define MP1_SMN_EXT_SCRATCH13__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH13__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH14
+#define MP1_SMN_EXT_SCRATCH14__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH14__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH15
+#define MP1_SMN_EXT_SCRATCH15__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH15__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH16
+#define MP1_SMN_EXT_SCRATCH16__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH16__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH17
+#define MP1_SMN_EXT_SCRATCH17__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH17__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH18
+#define MP1_SMN_EXT_SCRATCH18__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH18__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH19
+#define MP1_SMN_EXT_SCRATCH19__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH19__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH20
+#define MP1_SMN_EXT_SCRATCH20__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH20__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH21
+#define MP1_SMN_EXT_SCRATCH21__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH21__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH22
+#define MP1_SMN_EXT_SCRATCH22__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH22__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH23
+#define MP1_SMN_EXT_SCRATCH23__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH23__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH24
+#define MP1_SMN_EXT_SCRATCH24__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH24__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH25
+#define MP1_SMN_EXT_SCRATCH25__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH25__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH26
+#define MP1_SMN_EXT_SCRATCH26__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH26__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH27
+#define MP1_SMN_EXT_SCRATCH27__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH27__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH28
+#define MP1_SMN_EXT_SCRATCH28__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH28__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH29
+#define MP1_SMN_EXT_SCRATCH29__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH29__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH30
+#define MP1_SMN_EXT_SCRATCH30__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH30__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH31
+#define MP1_SMN_EXT_SCRATCH31__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH31__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mp_SmuMpASP_SmnDec
+//MPASP_SMN_C2PMSG_32
+#define MPASP_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_33
+#define MPASP_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_34
+#define MPASP_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_35
+#define MPASP_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_36
+#define MPASP_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_37
+#define MPASP_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_38
+#define MPASP_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_39
+#define MPASP_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_60
+#define MPASP_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_61
+#define MPASP_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_62
+#define MPASP_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_63
+#define MPASP_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_64
+#define MPASP_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_65
+#define MPASP_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_66
+#define MPASP_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_67
+#define MPASP_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_68
+#define MPASP_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_69
+#define MPASP_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_70
+#define MPASP_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_71
+#define MPASP_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_72
+#define MPASP_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_73
+#define MPASP_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_74
+#define MPASP_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_75
+#define MPASP_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_76
+#define MPASP_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_77
+#define MPASP_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_78
+#define MPASP_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_79
+#define MPASP_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_80
+#define MPASP_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_81
+#define MPASP_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_82
+#define MPASP_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_83
+#define MPASP_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_84
+#define MPASP_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_85
+#define MPASP_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_86
+#define MPASP_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_87
+#define MPASP_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_88
+#define MPASP_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_89
+#define MPASP_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_100
+#define MPASP_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_101
+#define MPASP_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_102
+#define MPASP_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_103
+#define MPASP_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_109
+#define MPASP_SMN_C2PMSG_109__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_115
+#define MPASP_SMN_C2PMSG_115__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_115__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_C2PMSG_116
+#define MPASP_SMN_C2PMSG_116__CONTENT__SHIFT 0x0
+#define MPASP_SMN_C2PMSG_116__CONTENT_MASK 0xFFFFFFFFL
+//MPASP_SMN_IH_CREDIT
+#define MPASP_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MPASP_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MPASP_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MPASP_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MPASP_SMN_IH_SW_INT
+#define MPASP_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MPASP_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MPASP_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MPASP_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+//MPASP_SMN_IH_SW_INT_CTRL
+#define MPASP_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MPASP_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MPASP_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MPASP_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+
+
+// addressBlock: Mp1MmioPublic_SmuMp1Pub_CruDec
+//MP1_CRU1_MP1_FIRMWARE_FLAGS
+#define MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
+#define MP1_CRU1_MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
+#define MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
+#define MP1_CRU1_MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_3_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_3_1_offset.h
new file mode 100644
index 000000000000..792c3edb633b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_3_1_offset.h
@@ -0,0 +1,11287 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _nbif_6_3_1_OFFSET_HEADER
+#define _nbif_6_3_1_OFFSET_HEADER
+
+
+// addressBlock: nbif_bif_cfg_dev0_rc_bifcfgdecp
+// base address: 0x0
+#define cfgIRQ_BRIDGE_CNTL 0x003e
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST 0x0048
+#define cfgBIF_CFG_DEV0_EPF0_ADAPTER_ID_W 0x004c
+#define cfgBIF_CFG_DEV0_EPF0_PMI_CAP_LIST 0x0050
+#define cfgBIF_CFG_DEV0_EPF0_PMI_CAP 0x0052
+#define cfgBIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL 0x0054
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST 0x0110
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1 0x0114
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2 0x0118
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL 0x011c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_STATUS 0x011e
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP 0x0120
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL 0x0124
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS 0x012a
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP 0x012c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL 0x0130
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS 0x0136
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x0140
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW1 0x0144
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW2 0x0148
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST 0x0200
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR1_CAP 0x0204
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL 0x0208
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR2_CAP 0x020c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL 0x0210
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR3_CAP 0x0214
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL 0x0218
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR4_CAP 0x021c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL 0x0220
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR5_CAP 0x0224
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL 0x0228
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR6_CAP 0x022c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL 0x0230
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x0240
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA_SELECT 0x0244
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA 0x0248
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_CAP 0x024c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST 0x0250
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_CAP 0x0254
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_LATENCY_INDICATOR 0x0258
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS 0x025c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_CNTL 0x025e
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x0260
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x0261
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x0262
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x0263
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x0264
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x0265
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x0266
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x0267
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST 0x0270
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3 0x0274
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_ERROR_STATUS 0x0278
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL 0x027c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL 0x027e
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL 0x0280
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL 0x0282
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL 0x0284
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL 0x0286
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL 0x0288
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL 0x028a
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL 0x028c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL 0x028e
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL 0x0290
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL 0x0292
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL 0x0294
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL 0x0296
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL 0x0298
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL 0x029a
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST 0x02a0
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ACS_CAP 0x02a4
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL 0x02a6
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST 0x02d0
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PASID_CAP 0x02d4
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL 0x02d6
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST 0x0320
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x0324
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL 0x032e
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_ENH_CAP_LIST 0x0330
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_CAP 0x0334
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL 0x0338
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_STATUS 0x033a
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_INITIAL_VFS 0x033c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_TOTAL_VFS 0x033e
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_NUM_VFS 0x0340
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_FUNC_DEP_LINK 0x0342
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_FIRST_VF_OFFSET 0x0344
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_STRIDE 0x0346
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_DEVICE_ID 0x034a
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE 0x034c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_SYSTEM_PAGE_SIZE 0x0350
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_0 0x0354
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_1 0x0358
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_2 0x035c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_3 0x0360
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_4 0x0364
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_5 0x0368
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET 0x036c
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST 0x0400
+#define cfgBIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP 0x0404
+#define cfgBIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS 0x0408
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST 0x0410
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CAP_16GT 0x0414
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CNTL_16GT 0x0418
+#define cfgBIF_CFG_DEV0_EPF0_LINK_STATUS_16GT 0x041c
+#define cfgBIF_CFG_DEV0_EPF0_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x0420
+#define cfgBIF_CFG_DEV0_EPF0_RTM1_PARITY_MISMATCH_STATUS_16GT 0x0424
+#define cfgBIF_CFG_DEV0_EPF0_RTM2_PARITY_MISMATCH_STATUS_16GT 0x0428
+#define cfgBIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT 0x0430
+#define cfgBIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT 0x0431
+#define cfgBIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT 0x0432
+#define cfgBIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT 0x0433
+#define cfgBIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT 0x0434
+#define cfgBIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT 0x0435
+#define cfgBIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT 0x0436
+#define cfgBIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT 0x0437
+#define cfgBIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT 0x0438
+#define cfgBIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT 0x0439
+#define cfgBIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT 0x043a
+#define cfgBIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT 0x043b
+#define cfgBIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT 0x043c
+#define cfgBIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT 0x043d
+#define cfgBIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT 0x043e
+#define cfgBIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT 0x043f
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST 0x0450
+#define cfgBIF_CFG_DEV0_EPF0_MARGINING_PORT_CAP 0x0454
+#define cfgBIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS 0x0456
+#define cfgBIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL 0x0458
+#define cfgBIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS 0x045a
+#define cfgBIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL 0x045c
+#define cfgBIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS 0x045e
+#define cfgBIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL 0x0460
+#define cfgBIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS 0x0462
+#define cfgBIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL 0x0464
+#define cfgBIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS 0x0466
+#define cfgBIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL 0x0468
+#define cfgBIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS 0x046a
+#define cfgBIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL 0x046c
+#define cfgBIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS 0x046e
+#define cfgBIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL 0x0470
+#define cfgBIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS 0x0472
+#define cfgBIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL 0x0474
+#define cfgBIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS 0x0476
+#define cfgBIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL 0x0478
+#define cfgBIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS 0x047a
+#define cfgBIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL 0x047c
+#define cfgBIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS 0x047e
+#define cfgBIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL 0x0480
+#define cfgBIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS 0x0482
+#define cfgBIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL 0x0484
+#define cfgBIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS 0x0486
+#define cfgBIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL 0x0488
+#define cfgBIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS 0x048a
+#define cfgBIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL 0x048c
+#define cfgBIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS 0x048e
+#define cfgBIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL 0x0490
+#define cfgBIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS 0x0492
+#define cfgBIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL 0x0494
+#define cfgBIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS 0x0496
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST 0x04c0
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CAP 0x04c4
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CNTL 0x04c8
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CAP 0x04cc
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CNTL 0x04d0
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CAP 0x04d4
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CNTL 0x04d8
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CAP 0x04dc
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CNTL 0x04e0
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CAP 0x04e4
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CNTL 0x04e8
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CAP 0x04ec
+#define cfgBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CNTL 0x04f0
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CAP_32GT 0x0504
+#define cfgBIF_CFG_DEV0_EPF0_LINK_CNTL_32GT 0x0508
+#define cfgBIF_CFG_DEV0_EPF0_LINK_STATUS_32GT 0x050c
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf0_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF0_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF0_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF0_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF0_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF0_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF0_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF0_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF0_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF0_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF0_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF0_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf1_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF1_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF1_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF1_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF1_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF1_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF1_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF1_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF1_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF1_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF1_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF1_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf2_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF2_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF2_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF2_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF2_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF2_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF2_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF2_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF2_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF2_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF2_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF2_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf3_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF3_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF3_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF3_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF3_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF3_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF3_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF3_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF3_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF3_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF3_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF3_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf4_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF4_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF4_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF4_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF4_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF4_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF4_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF4_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF4_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF4_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF4_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF4_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf5_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF5_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF5_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF5_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF5_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF5_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF5_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF5_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF5_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF5_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF5_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF5_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf6_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF6_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF6_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF6_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF6_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF6_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF6_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF6_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF6_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF6_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF6_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF6_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf7_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF0_VF7_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF0_VF7_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF0_VF7_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF0_VF7_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF0_VF7_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF0_VF7_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF0_VF7_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF0_VF7_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF0_VF7_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF0_VF7_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF0_VF7_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL 0x032e
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf1_bifcfgdecp
+// base address: 0x0
+#define cfgBIF_CFG_DEV0_EPF1_VENDOR_ID 0x0000
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_ID 0x0002
+#define cfgBIF_CFG_DEV0_EPF1_COMMAND 0x0004
+#define cfgBIF_CFG_DEV0_EPF1_STATUS 0x0006
+#define cfgBIF_CFG_DEV0_EPF1_REVISION_ID 0x0008
+#define cfgBIF_CFG_DEV0_EPF1_PROG_INTERFACE 0x0009
+#define cfgBIF_CFG_DEV0_EPF1_SUB_CLASS 0x000a
+#define cfgBIF_CFG_DEV0_EPF1_BASE_CLASS 0x000b
+#define cfgBIF_CFG_DEV0_EPF1_CACHE_LINE 0x000c
+#define cfgBIF_CFG_DEV0_EPF1_LATENCY 0x000d
+#define cfgBIF_CFG_DEV0_EPF1_HEADER 0x000e
+#define cfgBIF_CFG_DEV0_EPF1_BIST 0x000f
+#define cfgBIF_CFG_DEV0_EPF1_BASE_ADDR_1 0x0010
+#define cfgBIF_CFG_DEV0_EPF1_BASE_ADDR_2 0x0014
+#define cfgBIF_CFG_DEV0_EPF1_BASE_ADDR_3 0x0018
+#define cfgBIF_CFG_DEV0_EPF1_BASE_ADDR_4 0x001c
+#define cfgBIF_CFG_DEV0_EPF1_BASE_ADDR_5 0x0020
+#define cfgBIF_CFG_DEV0_EPF1_BASE_ADDR_6 0x0024
+#define cfgBIF_CFG_DEV0_EPF1_CARDBUS_CIS_PTR 0x0028
+#define cfgBIF_CFG_DEV0_EPF1_ADAPTER_ID 0x002c
+#define cfgBIF_CFG_DEV0_EPF1_ROM_BASE_ADDR 0x0030
+#define cfgBIF_CFG_DEV0_EPF1_CAP_PTR 0x0034
+#define cfgBIF_CFG_DEV0_EPF1_INTERRUPT_LINE 0x003c
+#define cfgBIF_CFG_DEV0_EPF1_INTERRUPT_PIN 0x003d
+#define cfgBIF_CFG_DEV0_EPF1_MIN_GRANT 0x003e
+#define cfgBIF_CFG_DEV0_EPF1_MAX_LATENCY 0x003f
+#define cfgBIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST 0x0048
+#define cfgBIF_CFG_DEV0_EPF1_ADAPTER_ID_W 0x004c
+#define cfgBIF_CFG_DEV0_EPF1_PMI_CAP_LIST 0x0050
+#define cfgBIF_CFG_DEV0_EPF1_PMI_CAP 0x0052
+#define cfgBIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL 0x0054
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_CAP_LIST 0x0064
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_CAP 0x0066
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_CAP 0x0068
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_CNTL 0x006c
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_STATUS 0x006e
+#define cfgBIF_CFG_DEV0_EPF1_LINK_CAP 0x0070
+#define cfgBIF_CFG_DEV0_EPF1_LINK_CNTL 0x0074
+#define cfgBIF_CFG_DEV0_EPF1_LINK_STATUS 0x0076
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_CAP2 0x0088
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_CNTL2 0x008c
+#define cfgBIF_CFG_DEV0_EPF1_DEVICE_STATUS2 0x008e
+#define cfgBIF_CFG_DEV0_EPF1_LINK_CAP2 0x0090
+#define cfgBIF_CFG_DEV0_EPF1_LINK_CNTL2 0x0094
+#define cfgBIF_CFG_DEV0_EPF1_LINK_STATUS2 0x0096
+#define cfgBIF_CFG_DEV0_EPF1_MSI_CAP_LIST 0x00a0
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MSG_CNTL 0x00a2
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_LO 0x00a4
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_HI 0x00a8
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MSG_DATA 0x00a8
+#define cfgBIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA 0x00aa
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MASK 0x00ac
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MSG_DATA_64 0x00ac
+#define cfgBIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA_64 0x00ae
+#define cfgBIF_CFG_DEV0_EPF1_MSI_MASK_64 0x00b0
+#define cfgBIF_CFG_DEV0_EPF1_MSI_PENDING 0x00b0
+#define cfgBIF_CFG_DEV0_EPF1_MSI_PENDING_64 0x00b4
+#define cfgBIF_CFG_DEV0_EPF1_MSIX_CAP_LIST 0x00c0
+#define cfgBIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL 0x00c2
+#define cfgBIF_CFG_DEV0_EPF1_MSIX_TABLE 0x00c4
+#define cfgBIF_CFG_DEV0_EPF1_MSIX_PBA 0x00c8
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR 0x0104
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC1 0x0108
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC2 0x010c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x0140
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_DW1 0x0144
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_DW2 0x0148
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS 0x0154
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK 0x0158
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY 0x015c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS 0x0160
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK 0x0164
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL 0x0168
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG0 0x016c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG1 0x0170
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG2 0x0174
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG3 0x0178
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG0 0x0188
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG1 0x018c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG2 0x0190
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG3 0x0194
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST 0x0200
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR1_CAP 0x0204
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL 0x0208
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR2_CAP 0x020c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL 0x0210
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR3_CAP 0x0214
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL 0x0218
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR4_CAP 0x021c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL 0x0220
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR5_CAP 0x0224
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL 0x0228
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR6_CAP 0x022c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL 0x0230
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x0240
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA_SELECT 0x0244
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA 0x0248
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_CAP 0x024c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST 0x0250
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_CAP 0x0254
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_LATENCY_INDICATOR 0x0258
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS 0x025c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_CNTL 0x025e
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x0260
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x0261
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x0262
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x0263
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x0264
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x0265
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x0266
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x0267
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SECONDARY_ENH_CAP_LIST 0x0270
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LINK_CNTL3 0x0274
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_ERROR_STATUS 0x0278
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_0_EQUALIZATION_CNTL 0x027c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_1_EQUALIZATION_CNTL 0x027e
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_2_EQUALIZATION_CNTL 0x0280
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_3_EQUALIZATION_CNTL 0x0282
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_4_EQUALIZATION_CNTL 0x0284
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_5_EQUALIZATION_CNTL 0x0286
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_6_EQUALIZATION_CNTL 0x0288
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_7_EQUALIZATION_CNTL 0x028a
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_8_EQUALIZATION_CNTL 0x028c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_9_EQUALIZATION_CNTL 0x028e
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_10_EQUALIZATION_CNTL 0x0290
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_11_EQUALIZATION_CNTL 0x0292
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_12_EQUALIZATION_CNTL 0x0294
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_13_EQUALIZATION_CNTL 0x0296
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_14_EQUALIZATION_CNTL 0x0298
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LANE_15_EQUALIZATION_CNTL 0x029a
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST 0x02a0
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ACS_CAP 0x02a4
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL 0x02a6
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST 0x02d0
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PASID_CAP 0x02d4
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL 0x02d6
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LTR_ENH_CAP_LIST 0x0320
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_LTR_CAP 0x0324
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST 0x0328
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ARI_CAP 0x032c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL 0x032e
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_ENH_CAP_LIST 0x0330
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_CAP 0x0334
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL 0x0338
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_STATUS 0x033a
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_INITIAL_VFS 0x033c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_TOTAL_VFS 0x033e
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_NUM_VFS 0x0340
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_FUNC_DEP_LINK 0x0342
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_FIRST_VF_OFFSET 0x0344
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_STRIDE 0x0346
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_DEVICE_ID 0x034a
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_SUPPORTED_PAGE_SIZE 0x034c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_SYSTEM_PAGE_SIZE 0x0350
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_0 0x0354
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_1 0x0358
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_2 0x035c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_3 0x0360
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_4 0x0364
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_5 0x0368
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET 0x036c
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST 0x04c0
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CAP 0x04c4
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CNTL 0x04c8
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CAP 0x04cc
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CNTL 0x04d0
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CAP 0x04d4
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CNTL 0x04d8
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CAP 0x04dc
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CNTL 0x04e0
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CAP 0x04e4
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CNTL 0x04e8
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CAP 0x04ec
+#define cfgBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CNTL 0x04f0
+
+
+// addressBlock: nbif_bif_bx_pf_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_PF0_MM_INDEX 0x0000
+#define regBIF_BX_PF0_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_PF0_MM_DATA 0x0001
+#define regBIF_BX_PF0_MM_DATA_BASE_IDX 0
+#define regBIF_BX_PF0_MM_INDEX_HI 0x0006
+#define regBIF_BX_PF0_MM_INDEX_HI_BASE_IDX 0
+#define regBIF_BX_PF0_RSMU_INDEX 0x0000
+#define regBIF_BX_PF0_RSMU_INDEX_BASE_IDX 1
+#define regBIF_BX_PF0_RSMU_DATA 0x0001
+#define regBIF_BX_PF0_RSMU_DATA_BASE_IDX 1
+#define regBIF_BX_PF0_RSMU_INDEX_HI 0x0002
+#define regBIF_BX_PF0_RSMU_INDEX_HI_BASE_IDX 1
+
+
+// addressBlock: nbif_bif_bx_SYSDEC
+// base address: 0x0
+#define regBIF_BX0_PCIE_INDEX 0x000c
+#define regBIF_BX0_PCIE_INDEX_BASE_IDX 0
+#define regBIF_BX0_PCIE_DATA 0x000d
+#define regBIF_BX0_PCIE_DATA_BASE_IDX 0
+#define regBIF_BX0_PCIE_INDEX2 0x000e
+#define regBIF_BX0_PCIE_INDEX2_BASE_IDX 0
+#define regBIF_BX0_PCIE_DATA2 0x000f
+#define regBIF_BX0_PCIE_DATA2_BASE_IDX 0
+#define regBIF_BX0_PCIE_INDEX_HI 0x0010
+#define regBIF_BX0_PCIE_INDEX_HI_BASE_IDX 0
+#define regBIF_BX0_PCIE_INDEX2_HI 0x0011
+#define regBIF_BX0_PCIE_INDEX2_HI_BASE_IDX 0
+#define regBIF_BX0_SBIOS_SCRATCH_0 0x0034
+#define regBIF_BX0_SBIOS_SCRATCH_0_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_1 0x0035
+#define regBIF_BX0_SBIOS_SCRATCH_1_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_2 0x0036
+#define regBIF_BX0_SBIOS_SCRATCH_2_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_3 0x0037
+#define regBIF_BX0_SBIOS_SCRATCH_3_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_0 0x0038
+#define regBIF_BX0_BIOS_SCRATCH_0_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_1 0x0039
+#define regBIF_BX0_BIOS_SCRATCH_1_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_2 0x003a
+#define regBIF_BX0_BIOS_SCRATCH_2_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_3 0x003b
+#define regBIF_BX0_BIOS_SCRATCH_3_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_4 0x003c
+#define regBIF_BX0_BIOS_SCRATCH_4_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_5 0x003d
+#define regBIF_BX0_BIOS_SCRATCH_5_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_6 0x003e
+#define regBIF_BX0_BIOS_SCRATCH_6_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_7 0x003f
+#define regBIF_BX0_BIOS_SCRATCH_7_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_8 0x0040
+#define regBIF_BX0_BIOS_SCRATCH_8_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_9 0x0041
+#define regBIF_BX0_BIOS_SCRATCH_9_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_10 0x0042
+#define regBIF_BX0_BIOS_SCRATCH_10_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_11 0x0043
+#define regBIF_BX0_BIOS_SCRATCH_11_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_12 0x0044
+#define regBIF_BX0_BIOS_SCRATCH_12_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_13 0x0045
+#define regBIF_BX0_BIOS_SCRATCH_13_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_14 0x0046
+#define regBIF_BX0_BIOS_SCRATCH_14_BASE_IDX 1
+#define regBIF_BX0_BIOS_SCRATCH_15 0x0047
+#define regBIF_BX0_BIOS_SCRATCH_15_BASE_IDX 1
+#define regBIF_BX0_BIF_RLC_INTR_CNTL 0x004c
+#define regBIF_BX0_BIF_RLC_INTR_CNTL_BASE_IDX 1
+#define regBIF_BX0_BIF_VCE_INTR_CNTL 0x004d
+#define regBIF_BX0_BIF_VCE_INTR_CNTL_BASE_IDX 1
+#define regBIF_BX0_BIF_UVD_INTR_CNTL 0x004e
+#define regBIF_BX0_BIF_UVD_INTR_CNTL_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR0 0x006c
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR0_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR0 0x006d
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR0_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR1 0x006e
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR1_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR1 0x006f
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR1_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR2 0x0070
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR2_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR2 0x0071
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR2_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR3 0x0072
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR3_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR3 0x0073
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR3_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR4 0x0074
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR4_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR4 0x0075
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR4_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR5 0x0076
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR5_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR5 0x0077
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR5_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR6 0x0078
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR6_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR6 0x0079
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR6_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR7 0x007a
+#define regBIF_BX0_GFX_MMIOREG_CAM_ADDR7_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR7 0x007b
+#define regBIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR7_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_CNTL 0x007c
+#define regBIF_BX0_GFX_MMIOREG_CAM_CNTL_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ZERO_CPL 0x007d
+#define regBIF_BX0_GFX_MMIOREG_CAM_ZERO_CPL_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_ONE_CPL 0x007e
+#define regBIF_BX0_GFX_MMIOREG_CAM_ONE_CPL_BASE_IDX 1
+#define regBIF_BX0_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL 0x007f
+#define regBIF_BX0_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_0 0x0080
+#define regBIF_BX0_DRIVER_SCRATCH_0_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_1 0x0081
+#define regBIF_BX0_DRIVER_SCRATCH_1_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_2 0x0082
+#define regBIF_BX0_DRIVER_SCRATCH_2_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_3 0x0083
+#define regBIF_BX0_DRIVER_SCRATCH_3_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_4 0x0084
+#define regBIF_BX0_DRIVER_SCRATCH_4_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_5 0x0085
+#define regBIF_BX0_DRIVER_SCRATCH_5_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_6 0x0086
+#define regBIF_BX0_DRIVER_SCRATCH_6_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_7 0x0087
+#define regBIF_BX0_DRIVER_SCRATCH_7_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_8 0x0088
+#define regBIF_BX0_DRIVER_SCRATCH_8_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_9 0x0089
+#define regBIF_BX0_DRIVER_SCRATCH_9_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_10 0x008a
+#define regBIF_BX0_DRIVER_SCRATCH_10_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_11 0x008b
+#define regBIF_BX0_DRIVER_SCRATCH_11_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_12 0x008c
+#define regBIF_BX0_DRIVER_SCRATCH_12_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_13 0x008d
+#define regBIF_BX0_DRIVER_SCRATCH_13_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_14 0x008e
+#define regBIF_BX0_DRIVER_SCRATCH_14_BASE_IDX 1
+#define regBIF_BX0_DRIVER_SCRATCH_15 0x008f
+#define regBIF_BX0_DRIVER_SCRATCH_15_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_0 0x0090
+#define regBIF_BX0_FW_SCRATCH_0_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_1 0x0091
+#define regBIF_BX0_FW_SCRATCH_1_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_2 0x0092
+#define regBIF_BX0_FW_SCRATCH_2_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_3 0x0093
+#define regBIF_BX0_FW_SCRATCH_3_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_4 0x0094
+#define regBIF_BX0_FW_SCRATCH_4_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_5 0x0095
+#define regBIF_BX0_FW_SCRATCH_5_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_6 0x0096
+#define regBIF_BX0_FW_SCRATCH_6_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_7 0x0097
+#define regBIF_BX0_FW_SCRATCH_7_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_8 0x0098
+#define regBIF_BX0_FW_SCRATCH_8_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_9 0x0099
+#define regBIF_BX0_FW_SCRATCH_9_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_10 0x009a
+#define regBIF_BX0_FW_SCRATCH_10_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_11 0x009b
+#define regBIF_BX0_FW_SCRATCH_11_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_12 0x009c
+#define regBIF_BX0_FW_SCRATCH_12_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_13 0x009d
+#define regBIF_BX0_FW_SCRATCH_13_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_14 0x009e
+#define regBIF_BX0_FW_SCRATCH_14_BASE_IDX 1
+#define regBIF_BX0_FW_SCRATCH_15 0x009f
+#define regBIF_BX0_FW_SCRATCH_15_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_4 0x00a0
+#define regBIF_BX0_SBIOS_SCRATCH_4_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_5 0x00a1
+#define regBIF_BX0_SBIOS_SCRATCH_5_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_6 0x00a2
+#define regBIF_BX0_SBIOS_SCRATCH_6_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_7 0x00a3
+#define regBIF_BX0_SBIOS_SCRATCH_7_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_8 0x00a4
+#define regBIF_BX0_SBIOS_SCRATCH_8_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_9 0x00a5
+#define regBIF_BX0_SBIOS_SCRATCH_9_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_10 0x00a6
+#define regBIF_BX0_SBIOS_SCRATCH_10_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_11 0x00a7
+#define regBIF_BX0_SBIOS_SCRATCH_11_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_12 0x00a8
+#define regBIF_BX0_SBIOS_SCRATCH_12_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_13 0x00a9
+#define regBIF_BX0_SBIOS_SCRATCH_13_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_14 0x00aa
+#define regBIF_BX0_SBIOS_SCRATCH_14_BASE_IDX 1
+#define regBIF_BX0_SBIOS_SCRATCH_15 0x00ab
+#define regBIF_BX0_SBIOS_SCRATCH_15_BASE_IDX 1
+
+
+// addressBlock: nbif_rcc_dwn_dev0_BIFDEC1
+// base address: 0x0
+#define regRCC_DWN_DEV0_0_DN_PCIE_RESERVED 0x0060
+#define regRCC_DWN_DEV0_0_DN_PCIE_RESERVED_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_SCRATCH 0x0061
+#define regRCC_DWN_DEV0_0_DN_PCIE_SCRATCH_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_CNTL 0x0063
+#define regRCC_DWN_DEV0_0_DN_PCIE_CNTL_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL 0x0064
+#define regRCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2 0x0065
+#define regRCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL 0x0066
+#define regRCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL 0x0067
+#define regRCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_STRAP_F0 0x0068
+#define regRCC_DWN_DEV0_0_DN_PCIE_STRAP_F0_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC 0x0069
+#define regRCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC_BASE_IDX 2
+#define regRCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC2 0x006a
+#define regRCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC2_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dwnp_dev0_BIFDEC1
+// base address: 0x0
+#define regRCC_DWNP_DEV0_0_PCIE_ERR_CNTL 0x006c
+#define regRCC_DWNP_DEV0_0_PCIE_ERR_CNTL_BASE_IDX 2
+#define regRCC_DWNP_DEV0_0_PCIE_RX_CNTL 0x006d
+#define regRCC_DWNP_DEV0_0_PCIE_RX_CNTL_BASE_IDX 2
+#define regRCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL 0x006e
+#define regRCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL_BASE_IDX 2
+#define regRCC_DWNP_DEV0_0_PCIE_LC_CNTL2 0x006f
+#define regRCC_DWNP_DEV0_0_PCIE_LC_CNTL2_BASE_IDX 2
+#define regRCC_DWNP_DEV0_0_PCIEP_STRAP_MISC 0x0070
+#define regRCC_DWNP_DEV0_0_PCIEP_STRAP_MISC_BASE_IDX 2
+#define regRCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP 0x0071
+#define regRCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_ep_dev0_BIFDEC1
+// base address: 0x0
+#define regRCC_EP_DEV0_0_EP_PCIE_SCRATCH 0x0041
+#define regRCC_EP_DEV0_0_EP_PCIE_SCRATCH_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_CNTL 0x0043
+#define regRCC_EP_DEV0_0_EP_PCIE_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_INT_CNTL 0x0044
+#define regRCC_EP_DEV0_0_EP_PCIE_INT_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_INT_STATUS 0x0045
+#define regRCC_EP_DEV0_0_EP_PCIE_INT_STATUS_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_RX_CNTL2 0x0046
+#define regRCC_EP_DEV0_0_EP_PCIE_RX_CNTL2_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_BUS_CNTL 0x0047
+#define regRCC_EP_DEV0_0_EP_PCIE_BUS_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_CFG_CNTL 0x0048
+#define regRCC_EP_DEV0_0_EP_PCIE_CFG_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x004a
+#define regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0 0x004b
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1 0x004b
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2 0x004b
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3 0x004b
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4 0x004c
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5 0x004c
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6 0x004c
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7 0x004c
+#define regRCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_STRAP_MISC 0x004d
+#define regRCC_EP_DEV0_0_EP_PCIE_STRAP_MISC_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_STRAP_MISC2 0x004e
+#define regRCC_EP_DEV0_0_EP_PCIE_STRAP_MISC2_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP 0x0050
+#define regRCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR 0x0051
+#define regRCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL 0x0051
+#define regRCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 0x0051
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 0x0052
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 0x0052
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 0x0052
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 0x0052
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 0x0053
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 0x0053
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 2
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 0x0053
+#define regRCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_PME_CONTROL 0x0053
+#define regRCC_EP_DEV0_0_EP_PCIE_PME_CONTROL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIEP_RESERVED 0x0054
+#define regRCC_EP_DEV0_0_EP_PCIEP_RESERVED_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_TX_CNTL 0x0056
+#define regRCC_EP_DEV0_0_EP_PCIE_TX_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID 0x0057
+#define regRCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_ERR_CNTL 0x0058
+#define regRCC_EP_DEV0_0_EP_PCIE_ERR_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_RX_CNTL 0x0059
+#define regRCC_EP_DEV0_0_EP_PCIE_RX_CNTL_BASE_IDX 2
+#define regRCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL 0x005a
+#define regRCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_BIFDEC1
+// base address: 0x0
+#define regBIF_BX0_CC_BIF_BX_STRAP0 0x00e2
+#define regBIF_BX0_CC_BIF_BX_STRAP0_BASE_IDX 2
+#define regBIF_BX0_CC_BIF_BX_PINSTRAP0 0x00e4
+#define regBIF_BX0_CC_BIF_BX_PINSTRAP0_BASE_IDX 2
+#define regBIF_BX0_BIF_MM_INDACCESS_CNTL 0x00e6
+#define regBIF_BX0_BIF_MM_INDACCESS_CNTL_BASE_IDX 2
+#define regBIF_BX0_BUS_CNTL 0x00e7
+#define regBIF_BX0_BUS_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_SCRATCH0 0x00e8
+#define regBIF_BX0_BIF_SCRATCH0_BASE_IDX 2
+#define regBIF_BX0_BIF_SCRATCH1 0x00e9
+#define regBIF_BX0_BIF_SCRATCH1_BASE_IDX 2
+#define regBIF_BX0_BX_RESET_EN 0x00ed
+#define regBIF_BX0_BX_RESET_EN_BASE_IDX 2
+#define regBIF_BX0_MM_CFGREGS_CNTL 0x00ee
+#define regBIF_BX0_MM_CFGREGS_CNTL_BASE_IDX 2
+#define regBIF_BX0_BX_RESET_CNTL 0x00f0
+#define regBIF_BX0_BX_RESET_CNTL_BASE_IDX 2
+#define regBIF_BX0_INTERRUPT_CNTL 0x00f1
+#define regBIF_BX0_INTERRUPT_CNTL_BASE_IDX 2
+#define regBIF_BX0_INTERRUPT_CNTL2 0x00f2
+#define regBIF_BX0_INTERRUPT_CNTL2_BASE_IDX 2
+#define regBIF_BX0_CLKREQB_PAD_CNTL 0x00f8
+#define regBIF_BX0_CLKREQB_PAD_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_FEATURES_CONTROL_MISC 0x00fb
+#define regBIF_BX0_BIF_FEATURES_CONTROL_MISC_BASE_IDX 2
+#define regBIF_BX0_HDP_ATOMIC_CONTROL_MISC 0x00fc
+#define regBIF_BX0_HDP_ATOMIC_CONTROL_MISC_BASE_IDX 2
+#define regBIF_BX0_BIF_DOORBELL_CNTL 0x00fd
+#define regBIF_BX0_BIF_DOORBELL_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_DOORBELL_INT_CNTL 0x00fe
+#define regBIF_BX0_BIF_DOORBELL_INT_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_FB_EN 0x0100
+#define regBIF_BX0_BIF_FB_EN_BASE_IDX 2
+#define regBIF_BX0_BIF_INTR_CNTL 0x0101
+#define regBIF_BX0_BIF_INTR_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_MST_TRANS_PENDING_VF 0x0109
+#define regBIF_BX0_BIF_MST_TRANS_PENDING_VF_BASE_IDX 2
+#define regBIF_BX0_BIF_SLV_TRANS_PENDING_VF 0x010a
+#define regBIF_BX0_BIF_SLV_TRANS_PENDING_VF_BASE_IDX 2
+#define regBIF_BX0_BACO_CNTL 0x010b
+#define regBIF_BX0_BACO_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_BACO_EXIT_TIME0 0x010c
+#define regBIF_BX0_BIF_BACO_EXIT_TIME0_BASE_IDX 2
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER1 0x010d
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER1_BASE_IDX 2
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER2 0x010e
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER2_BASE_IDX 2
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER3 0x010f
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER3_BASE_IDX 2
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER4 0x0110
+#define regBIF_BX0_BIF_BACO_EXIT_TIMER4_BASE_IDX 2
+#define regBIF_BX0_MEM_TYPE_CNTL 0x0111
+#define regBIF_BX0_MEM_TYPE_CNTL_BASE_IDX 2
+#define regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL 0x012d
+#define regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL 0x012e
+#define regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_RB_CNTL 0x012f
+#define regBIF_BX0_BIF_RB_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_RB_BASE 0x0130
+#define regBIF_BX0_BIF_RB_BASE_BASE_IDX 2
+#define regBIF_BX0_BIF_RB_RPTR 0x0131
+#define regBIF_BX0_BIF_RB_RPTR_BASE_IDX 2
+#define regBIF_BX0_BIF_RB_WPTR 0x0132
+#define regBIF_BX0_BIF_RB_WPTR_BASE_IDX 2
+#define regBIF_BX0_BIF_RB_WPTR_ADDR_HI 0x0133
+#define regBIF_BX0_BIF_RB_WPTR_ADDR_HI_BASE_IDX 2
+#define regBIF_BX0_BIF_RB_WPTR_ADDR_LO 0x0134
+#define regBIF_BX0_BIF_RB_WPTR_ADDR_LO_BASE_IDX 2
+#define regBIF_BX0_MAILBOX_INDEX 0x0135
+#define regBIF_BX0_MAILBOX_INDEX_BASE_IDX 2
+#define regBIF_BX0_BIF_MP1_INTR_CTRL 0x0142
+#define regBIF_BX0_BIF_MP1_INTR_CTRL_BASE_IDX 2
+#define regBIF_BX0_BIF_PERSTB_PAD_CNTL 0x0145
+#define regBIF_BX0_BIF_PERSTB_PAD_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_PX_EN_PAD_CNTL 0x0146
+#define regBIF_BX0_BIF_PX_EN_PAD_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_REFPADKIN_PAD_CNTL 0x0147
+#define regBIF_BX0_BIF_REFPADKIN_PAD_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_CLKREQB_PAD_CNTL 0x0148
+#define regBIF_BX0_BIF_CLKREQB_PAD_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_PWRBRK_PAD_CNTL 0x0149
+#define regBIF_BX0_BIF_PWRBRK_PAD_CNTL_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_BIFDEC1
+// base address: 0x0
+#define regRCC_DEV0_0_RCC_ERR_INT_CNTL 0x0086
+#define regRCC_DEV0_0_RCC_ERR_INT_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_BACO_CNTL_MISC 0x0087
+#define regRCC_DEV0_0_RCC_BACO_CNTL_MISC_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_RESET_EN 0x0088
+#define regRCC_DEV0_0_RCC_RESET_EN_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_VDM_SUPPORT 0x0089
+#define regRCC_DEV0_0_RCC_VDM_SUPPORT_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0 0x008a
+#define regRCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1 0x008b
+#define regRCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_GPUIOV_REGION 0x008c
+#define regRCC_DEV0_0_RCC_GPUIOV_REGION_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_GPU_HOSTVM_EN 0x008d
+#define regRCC_DEV0_0_RCC_GPU_HOSTVM_EN_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL 0x008e
+#define regRCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONSOLE_IOV_FIRST_VF_OFFSET 0x008f
+#define regRCC_DEV0_0_RCC_CONSOLE_IOV_FIRST_VF_OFFSET_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONSOLE_IOV_VF_STRIDE 0x008f
+#define regRCC_DEV0_0_RCC_CONSOLE_IOV_VF_STRIDE_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER_REG_RANGE0 0x00be
+#define regRCC_DEV0_0_RCC_PEER_REG_RANGE0_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER_REG_RANGE1 0x00bf
+#define regRCC_DEV0_0_RCC_PEER_REG_RANGE1_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_BUS_CNTL 0x00c1
+#define regRCC_DEV0_0_RCC_BUS_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONFIG_CNTL 0x00c2
+#define regRCC_DEV0_0_RCC_CONFIG_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONFIG_F0_BASE 0x00c6
+#define regRCC_DEV0_0_RCC_CONFIG_F0_BASE_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONFIG_APER_SIZE 0x00c7
+#define regRCC_DEV0_0_RCC_CONFIG_APER_SIZE_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CONFIG_REG_APER_SIZE 0x00c8
+#define regRCC_DEV0_0_RCC_CONFIG_REG_APER_SIZE_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_XDMA_LO 0x00c9
+#define regRCC_DEV0_0_RCC_XDMA_LO_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_XDMA_HI 0x00ca
+#define regRCC_DEV0_0_RCC_XDMA_HI_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_FEATURES_CONTROL_MISC 0x00cb
+#define regRCC_DEV0_0_RCC_FEATURES_CONTROL_MISC_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_BUSNUM_CNTL1 0x00cc
+#define regRCC_DEV0_0_RCC_BUSNUM_CNTL1_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_BUSNUM_LIST0 0x00cd
+#define regRCC_DEV0_0_RCC_BUSNUM_LIST0_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_BUSNUM_LIST1 0x00ce
+#define regRCC_DEV0_0_RCC_BUSNUM_LIST1_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_BUSNUM_CNTL2 0x00cf
+#define regRCC_DEV0_0_RCC_BUSNUM_CNTL2_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CAPTURE_HOST_BUSNUM 0x00d0
+#define regRCC_DEV0_0_RCC_CAPTURE_HOST_BUSNUM_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_HOST_BUSNUM 0x00d1
+#define regRCC_DEV0_0_RCC_HOST_BUSNUM_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER0_FB_OFFSET_HI 0x00d2
+#define regRCC_DEV0_0_RCC_PEER0_FB_OFFSET_HI_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO 0x00d3
+#define regRCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER1_FB_OFFSET_HI 0x00d4
+#define regRCC_DEV0_0_RCC_PEER1_FB_OFFSET_HI_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO 0x00d5
+#define regRCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER2_FB_OFFSET_HI 0x00d6
+#define regRCC_DEV0_0_RCC_PEER2_FB_OFFSET_HI_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO 0x00d7
+#define regRCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER3_FB_OFFSET_HI 0x00d8
+#define regRCC_DEV0_0_RCC_PEER3_FB_OFFSET_HI_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO 0x00d9
+#define regRCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_DEVFUNCNUM_LIST0 0x00da
+#define regRCC_DEV0_0_RCC_DEVFUNCNUM_LIST0_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_DEVFUNCNUM_LIST1 0x00db
+#define regRCC_DEV0_0_RCC_DEVFUNCNUM_LIST1_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_DEV0_LINK_CNTL 0x00dd
+#define regRCC_DEV0_0_RCC_DEV0_LINK_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_CMN_LINK_CNTL 0x00de
+#define regRCC_DEV0_0_RCC_CMN_LINK_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE 0x00df
+#define regRCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_LTR_LSWITCH_CNTL 0x00e0
+#define regRCC_DEV0_0_RCC_LTR_LSWITCH_CNTL_BASE_IDX 2
+#define regRCC_DEV0_0_RCC_MH_ARB_CNTL 0x00e1
+#define regRCC_DEV0_0_RCC_MH_ARB_CNTL_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_rcc_strap_BIFDEC1
+// base address: 0x0
+#define regRCC_STRAP0_RCC_BIF_STRAP0 0x0000
+#define regRCC_STRAP0_RCC_BIF_STRAP0_BASE_IDX 2
+#define regRCC_STRAP0_RCC_BIF_STRAP1 0x0001
+#define regRCC_STRAP0_RCC_BIF_STRAP1_BASE_IDX 2
+#define regRCC_STRAP0_RCC_BIF_STRAP2 0x0005
+#define regRCC_STRAP0_RCC_BIF_STRAP2_BASE_IDX 2
+#define regRCC_STRAP0_RCC_BIF_STRAP3 0x0006
+#define regRCC_STRAP0_RCC_BIF_STRAP3_BASE_IDX 2
+#define regRCC_STRAP0_RCC_BIF_STRAP4 0x0007
+#define regRCC_STRAP0_RCC_BIF_STRAP4_BASE_IDX 2
+#define regRCC_STRAP0_RCC_BIF_STRAP5 0x0008
+#define regRCC_STRAP0_RCC_BIF_STRAP5_BASE_IDX 2
+#define regRCC_STRAP0_RCC_BIF_STRAP6 0x0009
+#define regRCC_STRAP0_RCC_BIF_STRAP6_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP0 0x000d
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP0_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP1 0x000e
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP1_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP10 0x000f
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP10_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP11 0x0010
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP11_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP12 0x0011
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP12_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP13 0x0012
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP13_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP14 0x0013
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP14_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP2 0x0014
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP2_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP3 0x0015
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP3_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP4 0x0016
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP4_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP5 0x0017
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP5_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP6 0x0018
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP6_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP7 0x0019
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP7_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP8 0x001a
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP8_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP9 0x001b
+#define regRCC_STRAP0_RCC_DEV0_PORT_STRAP9_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0 0x001c
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP1 0x001d
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP1_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP13 0x001e
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP13_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP14 0x001f
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP14_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP15 0x0020
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP15_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP16 0x0021
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP16_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP17 0x0022
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP17_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP18 0x0023
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP18_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP2 0x0024
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP2_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP3 0x0026
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP3_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP4 0x0027
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP4_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP5 0x0028
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP5_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP8 0x0029
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP8_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP9 0x002a
+#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP9_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP0 0x002b
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP0_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP2 0x0036
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP2_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP20 0x0037
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP20_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP21 0x0038
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP21_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP3 0x0039
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP3_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP4 0x003a
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP4_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP5 0x003b
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP5_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP6 0x003c
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP6_BASE_IDX 2
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP7 0x003d
+#define regRCC_STRAP0_RCC_DEV0_EPF1_STRAP7_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_pf_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_PF0_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_PF0_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_PF0_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_PF0_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_PF0_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_PF0_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_PF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_PF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_PF0_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_PF0_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_PF0_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_PF0_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_PF0_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_PF0_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_PF0_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_PF0_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_PF0_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_PF0_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_PF0_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_gdc_GDCDEC
+// base address: 0x0
+#define regGDC0_SHUB_REGS_IF_CTL 0x0181
+#define regGDC0_SHUB_REGS_IF_CTL_BASE_IDX 2
+#define regGDC0_A2S_QUEUE_FIFO_ARB_CNTL 0x0182
+#define regGDC0_A2S_QUEUE_FIFO_ARB_CNTL_BASE_IDX 2
+#define regGDC0_NGDC_MGCG_CTRL 0x0187
+#define regGDC0_NGDC_MGCG_CTRL_BASE_IDX 2
+#define regGDC0_S2A_MISC_CNTL 0x0188
+#define regGDC0_S2A_MISC_CNTL_BASE_IDX 2
+#define regGDC0_NGDC_PG_MISC_CTRL 0x0190
+#define regGDC0_NGDC_PG_MISC_CTRL_BASE_IDX 2
+#define regGDC0_NGDC_PGMST_CTRL 0x0191
+#define regGDC0_NGDC_PGMST_CTRL_BASE_IDX 2
+#define regGDC0_NGDC_PGSLV_CTRL 0x0192
+#define regGDC0_NGDC_PGSLV_CTRL_BASE_IDX 2
+#define regGDC0_ATDMA_MISC_CNTL 0x01e1
+#define regGDC0_ATDMA_MISC_CNTL_BASE_IDX 2
+
+
+// addressBlock: nbif_gdc_s2a_GDCS2A_DEC
+// base address: 0x0
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL 0x01cb
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL 0x01cc
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL 0x01cd
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL 0x01ce
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL 0x01cf
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL 0x01d0
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL 0x01d1
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL 0x01d2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL 0x01d3
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL 0x01d4
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL 0x01d5
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL 0x01d6
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL 0x01d7
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL 0x01d8
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL 0x01d9
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL 0x01da
+#define regGDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL_BASE_IDX 2
+#define regGDC_S2A0_S2A_DOORBELL_COMMON_CTRL_REG 0x01db
+#define regGDC_S2A0_S2A_DOORBELL_COMMON_CTRL_REG_BASE_IDX 2
+#define regGDC_S2A0_NBIF_GFX_DOORBELL_STATUS 0x01dc
+#define regGDC_S2A0_NBIF_GFX_DOORBELL_STATUS_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_cfg_dev0_rc_bifcfgdecp
+// base address: 0x10100000
+#define regIRQ_BRIDGE_CNTL 0x000f
+#define regIRQ_BRIDGE_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_bifcfgdecp
+// base address: 0x10140000
+#define regBIF_CFG_DEV0_EPF0_VENDOR_ID 0x10000
+#define regBIF_CFG_DEV0_EPF0_VENDOR_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_DEVICE_ID 0x10000
+#define regBIF_CFG_DEV0_EPF0_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_COMMAND 0x10001
+#define regBIF_CFG_DEV0_EPF0_COMMAND_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_STATUS 0x10001
+#define regBIF_CFG_DEV0_EPF0_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_REVISION_ID 0x10002
+#define regBIF_CFG_DEV0_EPF0_REVISION_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PROG_INTERFACE 0x10002
+#define regBIF_CFG_DEV0_EPF0_PROG_INTERFACE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_SUB_CLASS 0x10002
+#define regBIF_CFG_DEV0_EPF0_SUB_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_BASE_CLASS 0x10002
+#define regBIF_CFG_DEV0_EPF0_BASE_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_CACHE_LINE 0x10003
+#define regBIF_CFG_DEV0_EPF0_CACHE_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LATENCY 0x10003
+#define regBIF_CFG_DEV0_EPF0_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_HEADER 0x10003
+#define regBIF_CFG_DEV0_EPF0_HEADER_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_BIST 0x10003
+#define regBIF_CFG_DEV0_EPF0_BIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_BASE_ADDR_1 0x10004
+#define regBIF_CFG_DEV0_EPF0_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_BASE_ADDR_2 0x10005
+#define regBIF_CFG_DEV0_EPF0_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_BASE_ADDR_3 0x10006
+#define regBIF_CFG_DEV0_EPF0_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_BASE_ADDR_4 0x10007
+#define regBIF_CFG_DEV0_EPF0_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_BASE_ADDR_5 0x10008
+#define regBIF_CFG_DEV0_EPF0_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_BASE_ADDR_6 0x10009
+#define regBIF_CFG_DEV0_EPF0_BASE_ADDR_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_CARDBUS_CIS_PTR 0x1000a
+#define regBIF_CFG_DEV0_EPF0_CARDBUS_CIS_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_ADAPTER_ID 0x1000b
+#define regBIF_CFG_DEV0_EPF0_ADAPTER_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_ROM_BASE_ADDR 0x1000c
+#define regBIF_CFG_DEV0_EPF0_ROM_BASE_ADDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_CAP_PTR 0x1000d
+#define regBIF_CFG_DEV0_EPF0_CAP_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_INTERRUPT_LINE 0x1000f
+#define regBIF_CFG_DEV0_EPF0_INTERRUPT_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_INTERRUPT_PIN 0x1000f
+#define regBIF_CFG_DEV0_EPF0_INTERRUPT_PIN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MIN_GRANT 0x1000f
+#define regBIF_CFG_DEV0_EPF0_MIN_GRANT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MAX_LATENCY 0x1000f
+#define regBIF_CFG_DEV0_EPF0_MAX_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST 0x10012
+#define regBIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_ADAPTER_ID_W 0x10013
+#define regBIF_CFG_DEV0_EPF0_ADAPTER_ID_W_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PMI_CAP_LIST 0x10014
+#define regBIF_CFG_DEV0_EPF0_PMI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PMI_CAP 0x10014
+#define regBIF_CFG_DEV0_EPF0_PMI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL 0x10015
+#define regBIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_CAP_LIST 0x10019
+#define regBIF_CFG_DEV0_EPF0_PCIE_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_CAP 0x10019
+#define regBIF_CFG_DEV0_EPF0_PCIE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_DEVICE_CAP 0x1001a
+#define regBIF_CFG_DEV0_EPF0_DEVICE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_DEVICE_CNTL 0x1001b
+#define regBIF_CFG_DEV0_EPF0_DEVICE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_DEVICE_STATUS 0x1001b
+#define regBIF_CFG_DEV0_EPF0_DEVICE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LINK_CAP 0x1001c
+#define regBIF_CFG_DEV0_EPF0_LINK_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LINK_CNTL 0x1001d
+#define regBIF_CFG_DEV0_EPF0_LINK_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LINK_STATUS 0x1001d
+#define regBIF_CFG_DEV0_EPF0_LINK_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_DEVICE_CAP2 0x10022
+#define regBIF_CFG_DEV0_EPF0_DEVICE_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x10023
+#define regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_DEVICE_STATUS2 0x10023
+#define regBIF_CFG_DEV0_EPF0_DEVICE_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LINK_CAP2 0x10024
+#define regBIF_CFG_DEV0_EPF0_LINK_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LINK_CNTL2 0x10025
+#define regBIF_CFG_DEV0_EPF0_LINK_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LINK_STATUS2 0x10025
+#define regBIF_CFG_DEV0_EPF0_LINK_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSI_CAP_LIST 0x10028
+#define regBIF_CFG_DEV0_EPF0_MSI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSI_MSG_CNTL 0x10028
+#define regBIF_CFG_DEV0_EPF0_MSI_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_LO 0x10029
+#define regBIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_LO_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_HI 0x1002a
+#define regBIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_HI_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSI_MSG_DATA 0x1002a
+#define regBIF_CFG_DEV0_EPF0_MSI_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA 0x1002a
+#define regBIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSI_MASK 0x1002b
+#define regBIF_CFG_DEV0_EPF0_MSI_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSI_MSG_DATA_64 0x1002b
+#define regBIF_CFG_DEV0_EPF0_MSI_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA_64 0x1002b
+#define regBIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSI_MASK_64 0x1002c
+#define regBIF_CFG_DEV0_EPF0_MSI_MASK_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSI_PENDING 0x1002c
+#define regBIF_CFG_DEV0_EPF0_MSI_PENDING_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSI_PENDING_64 0x1002d
+#define regBIF_CFG_DEV0_EPF0_MSI_PENDING_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSIX_CAP_LIST 0x10030
+#define regBIF_CFG_DEV0_EPF0_MSIX_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL 0x10030
+#define regBIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSIX_TABLE 0x10031
+#define regBIF_CFG_DEV0_EPF0_MSIX_TABLE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MSIX_PBA 0x10032
+#define regBIF_CFG_DEV0_EPF0_MSIX_PBA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x10040
+#define regBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR 0x10041
+#define regBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC1 0x10042
+#define regBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC2 0x10043
+#define regBIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST 0x10044
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1 0x10045
+#define regBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2 0x10046
+#define regBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL 0x10047
+#define regBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_STATUS 0x10047
+#define regBIF_CFG_DEV0_EPF0_PCIE_PORT_VC_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP 0x10048
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL 0x10049
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS 0x1004a
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP 0x1004b
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL 0x1004c
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS 0x1004d
+#define regBIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x10050
+#define regBIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW1 0x10051
+#define regBIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW2 0x10052
+#define regBIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x10054
+#define regBIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS 0x10055
+#define regBIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK 0x10056
+#define regBIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY 0x10057
+#define regBIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS 0x10058
+#define regBIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK 0x10059
+#define regBIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL 0x1005a
+#define regBIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG0 0x1005b
+#define regBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG1 0x1005c
+#define regBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG2 0x1005d
+#define regBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG3 0x1005e
+#define regBIF_CFG_DEV0_EPF0_PCIE_HDR_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG0 0x10062
+#define regBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG1 0x10063
+#define regBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG2 0x10064
+#define regBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG3 0x10065
+#define regBIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST 0x10080
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR1_CAP 0x10081
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR1_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL 0x10082
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR2_CAP 0x10083
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR2_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL 0x10084
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR3_CAP 0x10085
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR3_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL 0x10086
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR4_CAP 0x10087
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR4_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL 0x10088
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR5_CAP 0x10089
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR5_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL 0x1008a
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR6_CAP 0x1008b
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR6_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL 0x1008c
+#define regBIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x10090
+#define regBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA_SELECT 0x10091
+#define regBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA_SELECT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA 0x10092
+#define regBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_CAP 0x10093
+#define regBIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST 0x10094
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_CAP 0x10095
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_LATENCY_INDICATOR 0x10096
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_LATENCY_INDICATOR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS 0x10097
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_CNTL 0x10097
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x10098
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x10098
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x10098
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x10098
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x10099
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x10099
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x10099
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x10099
+#define regBIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST 0x1009c
+#define regBIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3 0x1009d
+#define regBIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_ERROR_STATUS 0x1009e
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_ERROR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL 0x1009f
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL 0x1009f
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL 0x100a0
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL 0x100a0
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL 0x100a1
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL 0x100a1
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL 0x100a2
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL 0x100a2
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL 0x100a3
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL 0x100a3
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL 0x100a4
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL 0x100a4
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL 0x100a5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL 0x100a5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL 0x100a6
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL 0x100a6
+#define regBIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST 0x100a8
+#define regBIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_ACS_CAP 0x100a9
+#define regBIF_CFG_DEV0_EPF0_PCIE_ACS_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL 0x100a9
+#define regBIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST 0x100b4
+#define regBIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PASID_CAP 0x100b5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PASID_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL 0x100b5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST 0x100c8
+#define regBIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x100c9
+#define regBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST 0x100ca
+#define regBIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_ARI_CAP 0x100cb
+#define regBIF_CFG_DEV0_EPF0_PCIE_ARI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL 0x100cb
+#define regBIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_ENH_CAP_LIST 0x100cc
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_CAP 0x100cd
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL 0x100ce
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_STATUS 0x100ce
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_INITIAL_VFS 0x100cf
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_INITIAL_VFS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_TOTAL_VFS 0x100cf
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_TOTAL_VFS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_NUM_VFS 0x100d0
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_NUM_VFS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_FUNC_DEP_LINK 0x100d0
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_FUNC_DEP_LINK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_FIRST_VF_OFFSET 0x100d1
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_FIRST_VF_OFFSET_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_STRIDE 0x100d1
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_STRIDE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_DEVICE_ID 0x100d2
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE 0x100d3
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_SYSTEM_PAGE_SIZE 0x100d4
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_SYSTEM_PAGE_SIZE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_0 0x100d5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_1 0x100d6
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_2 0x100d7
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_3 0x100d8
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_4 0x100d9
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_5 0x100da
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET 0x100db
+#define regBIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST 0x10100
+#define regBIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP 0x10101
+#define regBIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS 0x10102
+#define regBIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST 0x10104
+#define regBIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LINK_CAP_16GT 0x10105
+#define regBIF_CFG_DEV0_EPF0_LINK_CAP_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LINK_CNTL_16GT 0x10106
+#define regBIF_CFG_DEV0_EPF0_LINK_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LINK_STATUS_16GT 0x10107
+#define regBIF_CFG_DEV0_EPF0_LINK_STATUS_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x10108
+#define regBIF_CFG_DEV0_EPF0_LOCAL_PARITY_MISMATCH_STATUS_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_RTM1_PARITY_MISMATCH_STATUS_16GT 0x10109
+#define regBIF_CFG_DEV0_EPF0_RTM1_PARITY_MISMATCH_STATUS_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_RTM2_PARITY_MISMATCH_STATUS_16GT 0x1010a
+#define regBIF_CFG_DEV0_EPF0_RTM2_PARITY_MISMATCH_STATUS_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT 0x1010c
+#define regBIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT 0x1010c
+#define regBIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT 0x1010c
+#define regBIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT 0x1010c
+#define regBIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT 0x1010d
+#define regBIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT 0x1010d
+#define regBIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT 0x1010d
+#define regBIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT 0x1010d
+#define regBIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT 0x1010e
+#define regBIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT 0x1010e
+#define regBIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT 0x1010e
+#define regBIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT 0x1010e
+#define regBIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT 0x1010f
+#define regBIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT 0x1010f
+#define regBIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT 0x1010f
+#define regBIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT 0x1010f
+#define regBIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST 0x10114
+#define regBIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MARGINING_PORT_CAP 0x10115
+#define regBIF_CFG_DEV0_EPF0_MARGINING_PORT_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS 0x10115
+#define regBIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL 0x10116
+#define regBIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS 0x10116
+#define regBIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL 0x10117
+#define regBIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS 0x10117
+#define regBIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL 0x10118
+#define regBIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS 0x10118
+#define regBIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL 0x10119
+#define regBIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS 0x10119
+#define regBIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL 0x1011a
+#define regBIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS 0x1011a
+#define regBIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL 0x1011b
+#define regBIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS 0x1011b
+#define regBIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL 0x1011c
+#define regBIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS 0x1011c
+#define regBIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL 0x1011d
+#define regBIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS 0x1011d
+#define regBIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL 0x1011e
+#define regBIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS 0x1011e
+#define regBIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL 0x1011f
+#define regBIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS 0x1011f
+#define regBIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL 0x10120
+#define regBIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS 0x10120
+#define regBIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL 0x10121
+#define regBIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS 0x10121
+#define regBIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL 0x10122
+#define regBIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS 0x10122
+#define regBIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL 0x10123
+#define regBIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS 0x10123
+#define regBIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL 0x10124
+#define regBIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS 0x10124
+#define regBIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL 0x10125
+#define regBIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS 0x10125
+#define regBIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST 0x10130
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CAP 0x10131
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CNTL 0x10132
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CAP 0x10133
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CNTL 0x10134
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CAP 0x10135
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CNTL 0x10136
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CAP 0x10137
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CNTL 0x10138
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CAP 0x10139
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CNTL 0x1013a
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CAP 0x1013b
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CNTL 0x1013c
+#define regBIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LINK_CAP_32GT 0x10141
+#define regBIF_CFG_DEV0_EPF0_LINK_CAP_32GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LINK_CNTL_32GT 0x10142
+#define regBIF_CFG_DEV0_EPF0_LINK_CNTL_32GT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_LINK_STATUS_32GT 0x10143
+#define regBIF_CFG_DEV0_EPF0_LINK_STATUS_32GT_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf0_bifcfgdecp
+// base address: 0x10160000
+#define regBIF_CFG_DEV0_EPF0_VF0_VENDOR_ID 0x18000
+#define regBIF_CFG_DEV0_EPF0_VF0_VENDOR_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_ID 0x18000
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_COMMAND 0x18001
+#define regBIF_CFG_DEV0_EPF0_VF0_COMMAND_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_STATUS 0x18001
+#define regBIF_CFG_DEV0_EPF0_VF0_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_REVISION_ID 0x18002
+#define regBIF_CFG_DEV0_EPF0_VF0_REVISION_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PROG_INTERFACE 0x18002
+#define regBIF_CFG_DEV0_EPF0_VF0_PROG_INTERFACE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_SUB_CLASS 0x18002
+#define regBIF_CFG_DEV0_EPF0_VF0_SUB_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_CLASS 0x18002
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_CACHE_LINE 0x18003
+#define regBIF_CFG_DEV0_EPF0_VF0_CACHE_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_LATENCY 0x18003
+#define regBIF_CFG_DEV0_EPF0_VF0_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_HEADER 0x18003
+#define regBIF_CFG_DEV0_EPF0_VF0_HEADER_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_BIST 0x18003
+#define regBIF_CFG_DEV0_EPF0_VF0_BIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_1 0x18004
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_2 0x18005
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_3 0x18006
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_4 0x18007
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_5 0x18008
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_6 0x18009
+#define regBIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_CARDBUS_CIS_PTR 0x1800a
+#define regBIF_CFG_DEV0_EPF0_VF0_CARDBUS_CIS_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID 0x1800b
+#define regBIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR 0x1800c
+#define regBIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_CAP_PTR 0x1800d
+#define regBIF_CFG_DEV0_EPF0_VF0_CAP_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_INTERRUPT_LINE 0x1800f
+#define regBIF_CFG_DEV0_EPF0_VF0_INTERRUPT_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_INTERRUPT_PIN 0x1800f
+#define regBIF_CFG_DEV0_EPF0_VF0_INTERRUPT_PIN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MIN_GRANT 0x1800f
+#define regBIF_CFG_DEV0_EPF0_VF0_MIN_GRANT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MAX_LATENCY 0x1800f
+#define regBIF_CFG_DEV0_EPF0_VF0_MAX_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST 0x18019
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_CAP 0x18019
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP 0x1801a
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL 0x1801b
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS 0x1801b
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_LINK_CAP 0x1801c
+#define regBIF_CFG_DEV0_EPF0_VF0_LINK_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_LINK_CNTL 0x1801d
+#define regBIF_CFG_DEV0_EPF0_VF0_LINK_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_LINK_STATUS 0x1801d
+#define regBIF_CFG_DEV0_EPF0_VF0_LINK_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2 0x18022
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2 0x18023
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS2 0x18023
+#define regBIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_LINK_CAP2 0x18024
+#define regBIF_CFG_DEV0_EPF0_VF0_LINK_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2 0x18025
+#define regBIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2 0x18025
+#define regBIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST 0x18028
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL 0x18028
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_LO 0x18029
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_LO_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_HI 0x1802a
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_HI_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA 0x1802a
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA 0x1802a
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MASK 0x1802b
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA_64 0x1802b
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA_64 0x1802b
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MASK_64 0x1802c
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_MASK_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_PENDING 0x1802c
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_PENDING_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_PENDING_64 0x1802d
+#define regBIF_CFG_DEV0_EPF0_VF0_MSI_PENDING_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST 0x18030
+#define regBIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL 0x18030
+#define regBIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE 0x18031
+#define regBIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_MSIX_PBA 0x18032
+#define regBIF_CFG_DEV0_EPF0_VF0_MSIX_PBA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x18040
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR 0x18041
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC1 0x18042
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC2 0x18043
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x18054
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS 0x18055
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK 0x18056
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY 0x18057
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS 0x18058
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK 0x18059
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL 0x1805a
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG0 0x1805b
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG1 0x1805c
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG2 0x1805d
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG3 0x1805e
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG0 0x18062
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG1 0x18063
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG2 0x18064
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG3 0x18065
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST 0x180ca
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP 0x180cb
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL 0x180cb
+#define regBIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf1_bifcfgdecp
+// base address: 0x10161000
+#define regBIF_CFG_DEV0_EPF0_VF1_VENDOR_ID 0x18400
+#define regBIF_CFG_DEV0_EPF0_VF1_VENDOR_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_ID 0x18400
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_COMMAND 0x18401
+#define regBIF_CFG_DEV0_EPF0_VF1_COMMAND_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_STATUS 0x18401
+#define regBIF_CFG_DEV0_EPF0_VF1_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_REVISION_ID 0x18402
+#define regBIF_CFG_DEV0_EPF0_VF1_REVISION_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PROG_INTERFACE 0x18402
+#define regBIF_CFG_DEV0_EPF0_VF1_PROG_INTERFACE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_SUB_CLASS 0x18402
+#define regBIF_CFG_DEV0_EPF0_VF1_SUB_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_CLASS 0x18402
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_CACHE_LINE 0x18403
+#define regBIF_CFG_DEV0_EPF0_VF1_CACHE_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_LATENCY 0x18403
+#define regBIF_CFG_DEV0_EPF0_VF1_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_HEADER 0x18403
+#define regBIF_CFG_DEV0_EPF0_VF1_HEADER_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_BIST 0x18403
+#define regBIF_CFG_DEV0_EPF0_VF1_BIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_1 0x18404
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_2 0x18405
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_3 0x18406
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_4 0x18407
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_5 0x18408
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_6 0x18409
+#define regBIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_CARDBUS_CIS_PTR 0x1840a
+#define regBIF_CFG_DEV0_EPF0_VF1_CARDBUS_CIS_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID 0x1840b
+#define regBIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR 0x1840c
+#define regBIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_CAP_PTR 0x1840d
+#define regBIF_CFG_DEV0_EPF0_VF1_CAP_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_INTERRUPT_LINE 0x1840f
+#define regBIF_CFG_DEV0_EPF0_VF1_INTERRUPT_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_INTERRUPT_PIN 0x1840f
+#define regBIF_CFG_DEV0_EPF0_VF1_INTERRUPT_PIN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MIN_GRANT 0x1840f
+#define regBIF_CFG_DEV0_EPF0_VF1_MIN_GRANT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MAX_LATENCY 0x1840f
+#define regBIF_CFG_DEV0_EPF0_VF1_MAX_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST 0x18419
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_CAP 0x18419
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP 0x1841a
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL 0x1841b
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS 0x1841b
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_LINK_CAP 0x1841c
+#define regBIF_CFG_DEV0_EPF0_VF1_LINK_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_LINK_CNTL 0x1841d
+#define regBIF_CFG_DEV0_EPF0_VF1_LINK_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_LINK_STATUS 0x1841d
+#define regBIF_CFG_DEV0_EPF0_VF1_LINK_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2 0x18422
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2 0x18423
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS2 0x18423
+#define regBIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_LINK_CAP2 0x18424
+#define regBIF_CFG_DEV0_EPF0_VF1_LINK_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2 0x18425
+#define regBIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2 0x18425
+#define regBIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST 0x18428
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL 0x18428
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_LO 0x18429
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_LO_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_HI 0x1842a
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_HI_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA 0x1842a
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA 0x1842a
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MASK 0x1842b
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA_64 0x1842b
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA_64 0x1842b
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MASK_64 0x1842c
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_MASK_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_PENDING 0x1842c
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_PENDING_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_PENDING_64 0x1842d
+#define regBIF_CFG_DEV0_EPF0_VF1_MSI_PENDING_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST 0x18430
+#define regBIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL 0x18430
+#define regBIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE 0x18431
+#define regBIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_MSIX_PBA 0x18432
+#define regBIF_CFG_DEV0_EPF0_VF1_MSIX_PBA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x18440
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR 0x18441
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC1 0x18442
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC2 0x18443
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x18454
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS 0x18455
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK 0x18456
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY 0x18457
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS 0x18458
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK 0x18459
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL 0x1845a
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG0 0x1845b
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG1 0x1845c
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG2 0x1845d
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG3 0x1845e
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG0 0x18462
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG1 0x18463
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG2 0x18464
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG3 0x18465
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST 0x184ca
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP 0x184cb
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL 0x184cb
+#define regBIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf2_bifcfgdecp
+// base address: 0x10162000
+#define regBIF_CFG_DEV0_EPF0_VF2_VENDOR_ID 0x18800
+#define regBIF_CFG_DEV0_EPF0_VF2_VENDOR_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_ID 0x18800
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_COMMAND 0x18801
+#define regBIF_CFG_DEV0_EPF0_VF2_COMMAND_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_STATUS 0x18801
+#define regBIF_CFG_DEV0_EPF0_VF2_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_REVISION_ID 0x18802
+#define regBIF_CFG_DEV0_EPF0_VF2_REVISION_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PROG_INTERFACE 0x18802
+#define regBIF_CFG_DEV0_EPF0_VF2_PROG_INTERFACE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_SUB_CLASS 0x18802
+#define regBIF_CFG_DEV0_EPF0_VF2_SUB_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_CLASS 0x18802
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_CACHE_LINE 0x18803
+#define regBIF_CFG_DEV0_EPF0_VF2_CACHE_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_LATENCY 0x18803
+#define regBIF_CFG_DEV0_EPF0_VF2_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_HEADER 0x18803
+#define regBIF_CFG_DEV0_EPF0_VF2_HEADER_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_BIST 0x18803
+#define regBIF_CFG_DEV0_EPF0_VF2_BIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_1 0x18804
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_2 0x18805
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_3 0x18806
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_4 0x18807
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_5 0x18808
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_6 0x18809
+#define regBIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_CARDBUS_CIS_PTR 0x1880a
+#define regBIF_CFG_DEV0_EPF0_VF2_CARDBUS_CIS_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID 0x1880b
+#define regBIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR 0x1880c
+#define regBIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_CAP_PTR 0x1880d
+#define regBIF_CFG_DEV0_EPF0_VF2_CAP_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_INTERRUPT_LINE 0x1880f
+#define regBIF_CFG_DEV0_EPF0_VF2_INTERRUPT_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_INTERRUPT_PIN 0x1880f
+#define regBIF_CFG_DEV0_EPF0_VF2_INTERRUPT_PIN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MIN_GRANT 0x1880f
+#define regBIF_CFG_DEV0_EPF0_VF2_MIN_GRANT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MAX_LATENCY 0x1880f
+#define regBIF_CFG_DEV0_EPF0_VF2_MAX_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST 0x18819
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_CAP 0x18819
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP 0x1881a
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL 0x1881b
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS 0x1881b
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_LINK_CAP 0x1881c
+#define regBIF_CFG_DEV0_EPF0_VF2_LINK_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_LINK_CNTL 0x1881d
+#define regBIF_CFG_DEV0_EPF0_VF2_LINK_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_LINK_STATUS 0x1881d
+#define regBIF_CFG_DEV0_EPF0_VF2_LINK_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2 0x18822
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2 0x18823
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS2 0x18823
+#define regBIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_LINK_CAP2 0x18824
+#define regBIF_CFG_DEV0_EPF0_VF2_LINK_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2 0x18825
+#define regBIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2 0x18825
+#define regBIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST 0x18828
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL 0x18828
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_LO 0x18829
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_LO_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_HI 0x1882a
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_HI_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA 0x1882a
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA 0x1882a
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MASK 0x1882b
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA_64 0x1882b
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA_64 0x1882b
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MASK_64 0x1882c
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_MASK_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_PENDING 0x1882c
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_PENDING_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_PENDING_64 0x1882d
+#define regBIF_CFG_DEV0_EPF0_VF2_MSI_PENDING_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST 0x18830
+#define regBIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL 0x18830
+#define regBIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE 0x18831
+#define regBIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_MSIX_PBA 0x18832
+#define regBIF_CFG_DEV0_EPF0_VF2_MSIX_PBA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x18840
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR 0x18841
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC1 0x18842
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC2 0x18843
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x18854
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS 0x18855
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK 0x18856
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY 0x18857
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS 0x18858
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK 0x18859
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL 0x1885a
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG0 0x1885b
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG1 0x1885c
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG2 0x1885d
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG3 0x1885e
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG0 0x18862
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG1 0x18863
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG2 0x18864
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG3 0x18865
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST 0x188ca
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP 0x188cb
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL 0x188cb
+#define regBIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf3_bifcfgdecp
+// base address: 0x10163000
+#define regBIF_CFG_DEV0_EPF0_VF3_VENDOR_ID 0x18c00
+#define regBIF_CFG_DEV0_EPF0_VF3_VENDOR_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_ID 0x18c00
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_COMMAND 0x18c01
+#define regBIF_CFG_DEV0_EPF0_VF3_COMMAND_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_STATUS 0x18c01
+#define regBIF_CFG_DEV0_EPF0_VF3_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_REVISION_ID 0x18c02
+#define regBIF_CFG_DEV0_EPF0_VF3_REVISION_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PROG_INTERFACE 0x18c02
+#define regBIF_CFG_DEV0_EPF0_VF3_PROG_INTERFACE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_SUB_CLASS 0x18c02
+#define regBIF_CFG_DEV0_EPF0_VF3_SUB_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_CLASS 0x18c02
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_CACHE_LINE 0x18c03
+#define regBIF_CFG_DEV0_EPF0_VF3_CACHE_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_LATENCY 0x18c03
+#define regBIF_CFG_DEV0_EPF0_VF3_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_HEADER 0x18c03
+#define regBIF_CFG_DEV0_EPF0_VF3_HEADER_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_BIST 0x18c03
+#define regBIF_CFG_DEV0_EPF0_VF3_BIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_1 0x18c04
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_2 0x18c05
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_3 0x18c06
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_4 0x18c07
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_5 0x18c08
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_6 0x18c09
+#define regBIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_CARDBUS_CIS_PTR 0x18c0a
+#define regBIF_CFG_DEV0_EPF0_VF3_CARDBUS_CIS_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID 0x18c0b
+#define regBIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR 0x18c0c
+#define regBIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_CAP_PTR 0x18c0d
+#define regBIF_CFG_DEV0_EPF0_VF3_CAP_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_INTERRUPT_LINE 0x18c0f
+#define regBIF_CFG_DEV0_EPF0_VF3_INTERRUPT_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_INTERRUPT_PIN 0x18c0f
+#define regBIF_CFG_DEV0_EPF0_VF3_INTERRUPT_PIN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MIN_GRANT 0x18c0f
+#define regBIF_CFG_DEV0_EPF0_VF3_MIN_GRANT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MAX_LATENCY 0x18c0f
+#define regBIF_CFG_DEV0_EPF0_VF3_MAX_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST 0x18c19
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_CAP 0x18c19
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP 0x18c1a
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL 0x18c1b
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS 0x18c1b
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_LINK_CAP 0x18c1c
+#define regBIF_CFG_DEV0_EPF0_VF3_LINK_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_LINK_CNTL 0x18c1d
+#define regBIF_CFG_DEV0_EPF0_VF3_LINK_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_LINK_STATUS 0x18c1d
+#define regBIF_CFG_DEV0_EPF0_VF3_LINK_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2 0x18c22
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2 0x18c23
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS2 0x18c23
+#define regBIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_LINK_CAP2 0x18c24
+#define regBIF_CFG_DEV0_EPF0_VF3_LINK_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2 0x18c25
+#define regBIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2 0x18c25
+#define regBIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST 0x18c28
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL 0x18c28
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_LO 0x18c29
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_LO_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_HI 0x18c2a
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_HI_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA 0x18c2a
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA 0x18c2a
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MASK 0x18c2b
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA_64 0x18c2b
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA_64 0x18c2b
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MASK_64 0x18c2c
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_MASK_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_PENDING 0x18c2c
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_PENDING_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_PENDING_64 0x18c2d
+#define regBIF_CFG_DEV0_EPF0_VF3_MSI_PENDING_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST 0x18c30
+#define regBIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL 0x18c30
+#define regBIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE 0x18c31
+#define regBIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_MSIX_PBA 0x18c32
+#define regBIF_CFG_DEV0_EPF0_VF3_MSIX_PBA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x18c40
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR 0x18c41
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC1 0x18c42
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC2 0x18c43
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x18c54
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS 0x18c55
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK 0x18c56
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY 0x18c57
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS 0x18c58
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK 0x18c59
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL 0x18c5a
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG0 0x18c5b
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG1 0x18c5c
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG2 0x18c5d
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG3 0x18c5e
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG0 0x18c62
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG1 0x18c63
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG2 0x18c64
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG3 0x18c65
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST 0x18cca
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP 0x18ccb
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL 0x18ccb
+#define regBIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf4_bifcfgdecp
+// base address: 0x10164000
+#define regBIF_CFG_DEV0_EPF0_VF4_VENDOR_ID 0x19000
+#define regBIF_CFG_DEV0_EPF0_VF4_VENDOR_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_ID 0x19000
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_COMMAND 0x19001
+#define regBIF_CFG_DEV0_EPF0_VF4_COMMAND_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_STATUS 0x19001
+#define regBIF_CFG_DEV0_EPF0_VF4_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_REVISION_ID 0x19002
+#define regBIF_CFG_DEV0_EPF0_VF4_REVISION_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PROG_INTERFACE 0x19002
+#define regBIF_CFG_DEV0_EPF0_VF4_PROG_INTERFACE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_SUB_CLASS 0x19002
+#define regBIF_CFG_DEV0_EPF0_VF4_SUB_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_CLASS 0x19002
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_CACHE_LINE 0x19003
+#define regBIF_CFG_DEV0_EPF0_VF4_CACHE_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_LATENCY 0x19003
+#define regBIF_CFG_DEV0_EPF0_VF4_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_HEADER 0x19003
+#define regBIF_CFG_DEV0_EPF0_VF4_HEADER_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_BIST 0x19003
+#define regBIF_CFG_DEV0_EPF0_VF4_BIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_1 0x19004
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_2 0x19005
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_3 0x19006
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_4 0x19007
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_5 0x19008
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_6 0x19009
+#define regBIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_CARDBUS_CIS_PTR 0x1900a
+#define regBIF_CFG_DEV0_EPF0_VF4_CARDBUS_CIS_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID 0x1900b
+#define regBIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR 0x1900c
+#define regBIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_CAP_PTR 0x1900d
+#define regBIF_CFG_DEV0_EPF0_VF4_CAP_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_INTERRUPT_LINE 0x1900f
+#define regBIF_CFG_DEV0_EPF0_VF4_INTERRUPT_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_INTERRUPT_PIN 0x1900f
+#define regBIF_CFG_DEV0_EPF0_VF4_INTERRUPT_PIN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MIN_GRANT 0x1900f
+#define regBIF_CFG_DEV0_EPF0_VF4_MIN_GRANT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MAX_LATENCY 0x1900f
+#define regBIF_CFG_DEV0_EPF0_VF4_MAX_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST 0x19019
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_CAP 0x19019
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP 0x1901a
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL 0x1901b
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS 0x1901b
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_LINK_CAP 0x1901c
+#define regBIF_CFG_DEV0_EPF0_VF4_LINK_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_LINK_CNTL 0x1901d
+#define regBIF_CFG_DEV0_EPF0_VF4_LINK_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_LINK_STATUS 0x1901d
+#define regBIF_CFG_DEV0_EPF0_VF4_LINK_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2 0x19022
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2 0x19023
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS2 0x19023
+#define regBIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_LINK_CAP2 0x19024
+#define regBIF_CFG_DEV0_EPF0_VF4_LINK_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2 0x19025
+#define regBIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2 0x19025
+#define regBIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST 0x19028
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL 0x19028
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_LO 0x19029
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_LO_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_HI 0x1902a
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_HI_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA 0x1902a
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA 0x1902a
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MASK 0x1902b
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA_64 0x1902b
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA_64 0x1902b
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MASK_64 0x1902c
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_MASK_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_PENDING 0x1902c
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_PENDING_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_PENDING_64 0x1902d
+#define regBIF_CFG_DEV0_EPF0_VF4_MSI_PENDING_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST 0x19030
+#define regBIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL 0x19030
+#define regBIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE 0x19031
+#define regBIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_MSIX_PBA 0x19032
+#define regBIF_CFG_DEV0_EPF0_VF4_MSIX_PBA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x19040
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR 0x19041
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC1 0x19042
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC2 0x19043
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x19054
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS 0x19055
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK 0x19056
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY 0x19057
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS 0x19058
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK 0x19059
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL 0x1905a
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG0 0x1905b
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG1 0x1905c
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG2 0x1905d
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG3 0x1905e
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG0 0x19062
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG1 0x19063
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG2 0x19064
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG3 0x19065
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST 0x190ca
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP 0x190cb
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL 0x190cb
+#define regBIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf5_bifcfgdecp
+// base address: 0x10165000
+#define regBIF_CFG_DEV0_EPF0_VF5_VENDOR_ID 0x19400
+#define regBIF_CFG_DEV0_EPF0_VF5_VENDOR_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_ID 0x19400
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_COMMAND 0x19401
+#define regBIF_CFG_DEV0_EPF0_VF5_COMMAND_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_STATUS 0x19401
+#define regBIF_CFG_DEV0_EPF0_VF5_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_REVISION_ID 0x19402
+#define regBIF_CFG_DEV0_EPF0_VF5_REVISION_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PROG_INTERFACE 0x19402
+#define regBIF_CFG_DEV0_EPF0_VF5_PROG_INTERFACE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_SUB_CLASS 0x19402
+#define regBIF_CFG_DEV0_EPF0_VF5_SUB_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_CLASS 0x19402
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_CACHE_LINE 0x19403
+#define regBIF_CFG_DEV0_EPF0_VF5_CACHE_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_LATENCY 0x19403
+#define regBIF_CFG_DEV0_EPF0_VF5_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_HEADER 0x19403
+#define regBIF_CFG_DEV0_EPF0_VF5_HEADER_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_BIST 0x19403
+#define regBIF_CFG_DEV0_EPF0_VF5_BIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_1 0x19404
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_2 0x19405
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_3 0x19406
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_4 0x19407
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_5 0x19408
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_6 0x19409
+#define regBIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_CARDBUS_CIS_PTR 0x1940a
+#define regBIF_CFG_DEV0_EPF0_VF5_CARDBUS_CIS_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID 0x1940b
+#define regBIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR 0x1940c
+#define regBIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_CAP_PTR 0x1940d
+#define regBIF_CFG_DEV0_EPF0_VF5_CAP_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_INTERRUPT_LINE 0x1940f
+#define regBIF_CFG_DEV0_EPF0_VF5_INTERRUPT_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_INTERRUPT_PIN 0x1940f
+#define regBIF_CFG_DEV0_EPF0_VF5_INTERRUPT_PIN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MIN_GRANT 0x1940f
+#define regBIF_CFG_DEV0_EPF0_VF5_MIN_GRANT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MAX_LATENCY 0x1940f
+#define regBIF_CFG_DEV0_EPF0_VF5_MAX_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST 0x19419
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_CAP 0x19419
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP 0x1941a
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL 0x1941b
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS 0x1941b
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_LINK_CAP 0x1941c
+#define regBIF_CFG_DEV0_EPF0_VF5_LINK_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_LINK_CNTL 0x1941d
+#define regBIF_CFG_DEV0_EPF0_VF5_LINK_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_LINK_STATUS 0x1941d
+#define regBIF_CFG_DEV0_EPF0_VF5_LINK_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2 0x19422
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2 0x19423
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS2 0x19423
+#define regBIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_LINK_CAP2 0x19424
+#define regBIF_CFG_DEV0_EPF0_VF5_LINK_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2 0x19425
+#define regBIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2 0x19425
+#define regBIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST 0x19428
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL 0x19428
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_LO 0x19429
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_LO_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_HI 0x1942a
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_HI_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA 0x1942a
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA 0x1942a
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MASK 0x1942b
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA_64 0x1942b
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA_64 0x1942b
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MASK_64 0x1942c
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_MASK_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_PENDING 0x1942c
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_PENDING_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_PENDING_64 0x1942d
+#define regBIF_CFG_DEV0_EPF0_VF5_MSI_PENDING_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST 0x19430
+#define regBIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL 0x19430
+#define regBIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE 0x19431
+#define regBIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_MSIX_PBA 0x19432
+#define regBIF_CFG_DEV0_EPF0_VF5_MSIX_PBA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x19440
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR 0x19441
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC1 0x19442
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC2 0x19443
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x19454
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS 0x19455
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK 0x19456
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY 0x19457
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS 0x19458
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK 0x19459
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL 0x1945a
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG0 0x1945b
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG1 0x1945c
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG2 0x1945d
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG3 0x1945e
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG0 0x19462
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG1 0x19463
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG2 0x19464
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG3 0x19465
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST 0x194ca
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP 0x194cb
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL 0x194cb
+#define regBIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf6_bifcfgdecp
+// base address: 0x10166000
+#define regBIF_CFG_DEV0_EPF0_VF6_VENDOR_ID 0x19800
+#define regBIF_CFG_DEV0_EPF0_VF6_VENDOR_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_ID 0x19800
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_COMMAND 0x19801
+#define regBIF_CFG_DEV0_EPF0_VF6_COMMAND_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_STATUS 0x19801
+#define regBIF_CFG_DEV0_EPF0_VF6_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_REVISION_ID 0x19802
+#define regBIF_CFG_DEV0_EPF0_VF6_REVISION_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PROG_INTERFACE 0x19802
+#define regBIF_CFG_DEV0_EPF0_VF6_PROG_INTERFACE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_SUB_CLASS 0x19802
+#define regBIF_CFG_DEV0_EPF0_VF6_SUB_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_CLASS 0x19802
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_CACHE_LINE 0x19803
+#define regBIF_CFG_DEV0_EPF0_VF6_CACHE_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_LATENCY 0x19803
+#define regBIF_CFG_DEV0_EPF0_VF6_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_HEADER 0x19803
+#define regBIF_CFG_DEV0_EPF0_VF6_HEADER_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_BIST 0x19803
+#define regBIF_CFG_DEV0_EPF0_VF6_BIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_1 0x19804
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_2 0x19805
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_3 0x19806
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_4 0x19807
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_5 0x19808
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_6 0x19809
+#define regBIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_CARDBUS_CIS_PTR 0x1980a
+#define regBIF_CFG_DEV0_EPF0_VF6_CARDBUS_CIS_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID 0x1980b
+#define regBIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR 0x1980c
+#define regBIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_CAP_PTR 0x1980d
+#define regBIF_CFG_DEV0_EPF0_VF6_CAP_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_INTERRUPT_LINE 0x1980f
+#define regBIF_CFG_DEV0_EPF0_VF6_INTERRUPT_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_INTERRUPT_PIN 0x1980f
+#define regBIF_CFG_DEV0_EPF0_VF6_INTERRUPT_PIN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MIN_GRANT 0x1980f
+#define regBIF_CFG_DEV0_EPF0_VF6_MIN_GRANT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MAX_LATENCY 0x1980f
+#define regBIF_CFG_DEV0_EPF0_VF6_MAX_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST 0x19819
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_CAP 0x19819
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP 0x1981a
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL 0x1981b
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS 0x1981b
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_LINK_CAP 0x1981c
+#define regBIF_CFG_DEV0_EPF0_VF6_LINK_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_LINK_CNTL 0x1981d
+#define regBIF_CFG_DEV0_EPF0_VF6_LINK_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_LINK_STATUS 0x1981d
+#define regBIF_CFG_DEV0_EPF0_VF6_LINK_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2 0x19822
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2 0x19823
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS2 0x19823
+#define regBIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_LINK_CAP2 0x19824
+#define regBIF_CFG_DEV0_EPF0_VF6_LINK_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2 0x19825
+#define regBIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2 0x19825
+#define regBIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST 0x19828
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL 0x19828
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_LO 0x19829
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_LO_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_HI 0x1982a
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_HI_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA 0x1982a
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA 0x1982a
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MASK 0x1982b
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA_64 0x1982b
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA_64 0x1982b
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MASK_64 0x1982c
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_MASK_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_PENDING 0x1982c
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_PENDING_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_PENDING_64 0x1982d
+#define regBIF_CFG_DEV0_EPF0_VF6_MSI_PENDING_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST 0x19830
+#define regBIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL 0x19830
+#define regBIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE 0x19831
+#define regBIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_MSIX_PBA 0x19832
+#define regBIF_CFG_DEV0_EPF0_VF6_MSIX_PBA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x19840
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR 0x19841
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC1 0x19842
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC2 0x19843
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x19854
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS 0x19855
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK 0x19856
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY 0x19857
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS 0x19858
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK 0x19859
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL 0x1985a
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG0 0x1985b
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG1 0x1985c
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG2 0x1985d
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG3 0x1985e
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG0 0x19862
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG1 0x19863
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG2 0x19864
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG3 0x19865
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST 0x198ca
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP 0x198cb
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL 0x198cb
+#define regBIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf7_bifcfgdecp
+// base address: 0x10167000
+#define regBIF_CFG_DEV0_EPF0_VF7_VENDOR_ID 0x19c00
+#define regBIF_CFG_DEV0_EPF0_VF7_VENDOR_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_ID 0x19c00
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_COMMAND 0x19c01
+#define regBIF_CFG_DEV0_EPF0_VF7_COMMAND_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_STATUS 0x19c01
+#define regBIF_CFG_DEV0_EPF0_VF7_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_REVISION_ID 0x19c02
+#define regBIF_CFG_DEV0_EPF0_VF7_REVISION_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PROG_INTERFACE 0x19c02
+#define regBIF_CFG_DEV0_EPF0_VF7_PROG_INTERFACE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_SUB_CLASS 0x19c02
+#define regBIF_CFG_DEV0_EPF0_VF7_SUB_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_CLASS 0x19c02
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_CACHE_LINE 0x19c03
+#define regBIF_CFG_DEV0_EPF0_VF7_CACHE_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_LATENCY 0x19c03
+#define regBIF_CFG_DEV0_EPF0_VF7_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_HEADER 0x19c03
+#define regBIF_CFG_DEV0_EPF0_VF7_HEADER_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_BIST 0x19c03
+#define regBIF_CFG_DEV0_EPF0_VF7_BIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_1 0x19c04
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_2 0x19c05
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_3 0x19c06
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_4 0x19c07
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_5 0x19c08
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_6 0x19c09
+#define regBIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_CARDBUS_CIS_PTR 0x19c0a
+#define regBIF_CFG_DEV0_EPF0_VF7_CARDBUS_CIS_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID 0x19c0b
+#define regBIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR 0x19c0c
+#define regBIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_CAP_PTR 0x19c0d
+#define regBIF_CFG_DEV0_EPF0_VF7_CAP_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_INTERRUPT_LINE 0x19c0f
+#define regBIF_CFG_DEV0_EPF0_VF7_INTERRUPT_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_INTERRUPT_PIN 0x19c0f
+#define regBIF_CFG_DEV0_EPF0_VF7_INTERRUPT_PIN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MIN_GRANT 0x19c0f
+#define regBIF_CFG_DEV0_EPF0_VF7_MIN_GRANT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MAX_LATENCY 0x19c0f
+#define regBIF_CFG_DEV0_EPF0_VF7_MAX_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST 0x19c19
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_CAP 0x19c19
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP 0x19c1a
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL 0x19c1b
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS 0x19c1b
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_LINK_CAP 0x19c1c
+#define regBIF_CFG_DEV0_EPF0_VF7_LINK_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_LINK_CNTL 0x19c1d
+#define regBIF_CFG_DEV0_EPF0_VF7_LINK_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_LINK_STATUS 0x19c1d
+#define regBIF_CFG_DEV0_EPF0_VF7_LINK_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2 0x19c22
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2 0x19c23
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS2 0x19c23
+#define regBIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_LINK_CAP2 0x19c24
+#define regBIF_CFG_DEV0_EPF0_VF7_LINK_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2 0x19c25
+#define regBIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2 0x19c25
+#define regBIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST 0x19c28
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL 0x19c28
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_LO 0x19c29
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_LO_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_HI 0x19c2a
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_HI_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA 0x19c2a
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA 0x19c2a
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MASK 0x19c2b
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA_64 0x19c2b
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA_64 0x19c2b
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MASK_64 0x19c2c
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_MASK_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_PENDING 0x19c2c
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_PENDING_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_PENDING_64 0x19c2d
+#define regBIF_CFG_DEV0_EPF0_VF7_MSI_PENDING_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST 0x19c30
+#define regBIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL 0x19c30
+#define regBIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE 0x19c31
+#define regBIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_MSIX_PBA 0x19c32
+#define regBIF_CFG_DEV0_EPF0_VF7_MSIX_PBA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x19c40
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR 0x19c41
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC1 0x19c42
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC2 0x19c43
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x19c54
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS 0x19c55
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK 0x19c56
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY 0x19c57
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS 0x19c58
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK 0x19c59
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL 0x19c5a
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG0 0x19c5b
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG1 0x19c5c
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG2 0x19c5d
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG3 0x19c5e
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG0 0x19c62
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG1 0x19c63
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG2 0x19c64
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG3 0x19c65
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST 0x19cca
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP 0x19ccb
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL 0x19ccb
+#define regBIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf1_bifcfgdecp
+// base address: 0x10141000
+#define regBIF_CFG_DEV0_EPF1_VENDOR_ID 0x10400
+#define regBIF_CFG_DEV0_EPF1_VENDOR_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_DEVICE_ID 0x10400
+#define regBIF_CFG_DEV0_EPF1_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_COMMAND 0x10401
+#define regBIF_CFG_DEV0_EPF1_COMMAND_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_STATUS 0x10401
+#define regBIF_CFG_DEV0_EPF1_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_REVISION_ID 0x10402
+#define regBIF_CFG_DEV0_EPF1_REVISION_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PROG_INTERFACE 0x10402
+#define regBIF_CFG_DEV0_EPF1_PROG_INTERFACE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_SUB_CLASS 0x10402
+#define regBIF_CFG_DEV0_EPF1_SUB_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_BASE_CLASS 0x10402
+#define regBIF_CFG_DEV0_EPF1_BASE_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_CACHE_LINE 0x10403
+#define regBIF_CFG_DEV0_EPF1_CACHE_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_LATENCY 0x10403
+#define regBIF_CFG_DEV0_EPF1_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_HEADER 0x10403
+#define regBIF_CFG_DEV0_EPF1_HEADER_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_BIST 0x10403
+#define regBIF_CFG_DEV0_EPF1_BIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_BASE_ADDR_1 0x10404
+#define regBIF_CFG_DEV0_EPF1_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_BASE_ADDR_2 0x10405
+#define regBIF_CFG_DEV0_EPF1_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_BASE_ADDR_3 0x10406
+#define regBIF_CFG_DEV0_EPF1_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_BASE_ADDR_4 0x10407
+#define regBIF_CFG_DEV0_EPF1_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_BASE_ADDR_5 0x10408
+#define regBIF_CFG_DEV0_EPF1_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_BASE_ADDR_6 0x10409
+#define regBIF_CFG_DEV0_EPF1_BASE_ADDR_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_CARDBUS_CIS_PTR 0x1040a
+#define regBIF_CFG_DEV0_EPF1_CARDBUS_CIS_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_ADAPTER_ID 0x1040b
+#define regBIF_CFG_DEV0_EPF1_ADAPTER_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_ROM_BASE_ADDR 0x1040c
+#define regBIF_CFG_DEV0_EPF1_ROM_BASE_ADDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_CAP_PTR 0x1040d
+#define regBIF_CFG_DEV0_EPF1_CAP_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_INTERRUPT_LINE 0x1040f
+#define regBIF_CFG_DEV0_EPF1_INTERRUPT_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_INTERRUPT_PIN 0x1040f
+#define regBIF_CFG_DEV0_EPF1_INTERRUPT_PIN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MIN_GRANT 0x1040f
+#define regBIF_CFG_DEV0_EPF1_MIN_GRANT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MAX_LATENCY 0x1040f
+#define regBIF_CFG_DEV0_EPF1_MAX_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST 0x10412
+#define regBIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_ADAPTER_ID_W 0x10413
+#define regBIF_CFG_DEV0_EPF1_ADAPTER_ID_W_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PMI_CAP_LIST 0x10414
+#define regBIF_CFG_DEV0_EPF1_PMI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PMI_CAP 0x10414
+#define regBIF_CFG_DEV0_EPF1_PMI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL 0x10415
+#define regBIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_CAP_LIST 0x10419
+#define regBIF_CFG_DEV0_EPF1_PCIE_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_CAP 0x10419
+#define regBIF_CFG_DEV0_EPF1_PCIE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_DEVICE_CAP 0x1041a
+#define regBIF_CFG_DEV0_EPF1_DEVICE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_DEVICE_CNTL 0x1041b
+#define regBIF_CFG_DEV0_EPF1_DEVICE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_DEVICE_STATUS 0x1041b
+#define regBIF_CFG_DEV0_EPF1_DEVICE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_LINK_CAP 0x1041c
+#define regBIF_CFG_DEV0_EPF1_LINK_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_LINK_CNTL 0x1041d
+#define regBIF_CFG_DEV0_EPF1_LINK_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_LINK_STATUS 0x1041d
+#define regBIF_CFG_DEV0_EPF1_LINK_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_DEVICE_CAP2 0x10422
+#define regBIF_CFG_DEV0_EPF1_DEVICE_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_DEVICE_CNTL2 0x10423
+#define regBIF_CFG_DEV0_EPF1_DEVICE_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_DEVICE_STATUS2 0x10423
+#define regBIF_CFG_DEV0_EPF1_DEVICE_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_LINK_CAP2 0x10424
+#define regBIF_CFG_DEV0_EPF1_LINK_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_LINK_CNTL2 0x10425
+#define regBIF_CFG_DEV0_EPF1_LINK_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_LINK_STATUS2 0x10425
+#define regBIF_CFG_DEV0_EPF1_LINK_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSI_CAP_LIST 0x10428
+#define regBIF_CFG_DEV0_EPF1_MSI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSI_MSG_CNTL 0x10428
+#define regBIF_CFG_DEV0_EPF1_MSI_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_LO 0x10429
+#define regBIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_LO_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_HI 0x1042a
+#define regBIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_HI_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSI_MSG_DATA 0x1042a
+#define regBIF_CFG_DEV0_EPF1_MSI_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA 0x1042a
+#define regBIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSI_MASK 0x1042b
+#define regBIF_CFG_DEV0_EPF1_MSI_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSI_MSG_DATA_64 0x1042b
+#define regBIF_CFG_DEV0_EPF1_MSI_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA_64 0x1042b
+#define regBIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSI_MASK_64 0x1042c
+#define regBIF_CFG_DEV0_EPF1_MSI_MASK_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSI_PENDING 0x1042c
+#define regBIF_CFG_DEV0_EPF1_MSI_PENDING_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSI_PENDING_64 0x1042d
+#define regBIF_CFG_DEV0_EPF1_MSI_PENDING_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSIX_CAP_LIST 0x10430
+#define regBIF_CFG_DEV0_EPF1_MSIX_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL 0x10430
+#define regBIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSIX_TABLE 0x10431
+#define regBIF_CFG_DEV0_EPF1_MSIX_TABLE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_MSIX_PBA 0x10432
+#define regBIF_CFG_DEV0_EPF1_MSIX_PBA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x10440
+#define regBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR 0x10441
+#define regBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC1 0x10442
+#define regBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC2 0x10443
+#define regBIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x10450
+#define regBIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_DW1 0x10451
+#define regBIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_DW1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_DW2 0x10452
+#define regBIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_DW2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x10454
+#define regBIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS 0x10455
+#define regBIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK 0x10456
+#define regBIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY 0x10457
+#define regBIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS 0x10458
+#define regBIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK 0x10459
+#define regBIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL 0x1045a
+#define regBIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG0 0x1045b
+#define regBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG1 0x1045c
+#define regBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG2 0x1045d
+#define regBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG3 0x1045e
+#define regBIF_CFG_DEV0_EPF1_PCIE_HDR_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG0 0x10462
+#define regBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG1 0x10463
+#define regBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG2 0x10464
+#define regBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG3 0x10465
+#define regBIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST 0x10480
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR1_CAP 0x10481
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR1_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL 0x10482
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR2_CAP 0x10483
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR2_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL 0x10484
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR3_CAP 0x10485
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR3_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL 0x10486
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR4_CAP 0x10487
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR4_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL 0x10488
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR5_CAP 0x10489
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR5_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL 0x1048a
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR6_CAP 0x1048b
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR6_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL 0x1048c
+#define regBIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x10490
+#define regBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA_SELECT 0x10491
+#define regBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA_SELECT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA 0x10492
+#define regBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_CAP 0x10493
+#define regBIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST 0x10494
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_CAP 0x10495
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_LATENCY_INDICATOR 0x10496
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_LATENCY_INDICATOR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS 0x10497
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_CNTL 0x10497
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x10498
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x10498
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x10498
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x10498
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x10499
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x10499
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x10499
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x10499
+#define regBIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SECONDARY_ENH_CAP_LIST 0x1049c
+#define regBIF_CFG_DEV0_EPF1_PCIE_SECONDARY_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LINK_CNTL3 0x1049d
+#define regBIF_CFG_DEV0_EPF1_PCIE_LINK_CNTL3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_ERROR_STATUS 0x1049e
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_ERROR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_0_EQUALIZATION_CNTL 0x1049f
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_0_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_1_EQUALIZATION_CNTL 0x1049f
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_1_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_2_EQUALIZATION_CNTL 0x104a0
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_2_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_3_EQUALIZATION_CNTL 0x104a0
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_3_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_4_EQUALIZATION_CNTL 0x104a1
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_4_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_5_EQUALIZATION_CNTL 0x104a1
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_5_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_6_EQUALIZATION_CNTL 0x104a2
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_6_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_7_EQUALIZATION_CNTL 0x104a2
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_7_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_8_EQUALIZATION_CNTL 0x104a3
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_8_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_9_EQUALIZATION_CNTL 0x104a3
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_9_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_10_EQUALIZATION_CNTL 0x104a4
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_10_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_11_EQUALIZATION_CNTL 0x104a4
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_11_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_12_EQUALIZATION_CNTL 0x104a5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_12_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_13_EQUALIZATION_CNTL 0x104a5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_13_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_14_EQUALIZATION_CNTL 0x104a6
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_14_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_15_EQUALIZATION_CNTL 0x104a6
+#define regBIF_CFG_DEV0_EPF1_PCIE_LANE_15_EQUALIZATION_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST 0x104a8
+#define regBIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_ACS_CAP 0x104a9
+#define regBIF_CFG_DEV0_EPF1_PCIE_ACS_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL 0x104a9
+#define regBIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST 0x104b4
+#define regBIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_PASID_CAP 0x104b5
+#define regBIF_CFG_DEV0_EPF1_PCIE_PASID_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL 0x104b5
+#define regBIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LTR_ENH_CAP_LIST 0x104c8
+#define regBIF_CFG_DEV0_EPF1_PCIE_LTR_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_LTR_CAP 0x104c9
+#define regBIF_CFG_DEV0_EPF1_PCIE_LTR_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST 0x104ca
+#define regBIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_ARI_CAP 0x104cb
+#define regBIF_CFG_DEV0_EPF1_PCIE_ARI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL 0x104cb
+#define regBIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_ENH_CAP_LIST 0x104cc
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_CAP 0x104cd
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL 0x104ce
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_STATUS 0x104ce
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_INITIAL_VFS 0x104cf
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_INITIAL_VFS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_TOTAL_VFS 0x104cf
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_TOTAL_VFS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_NUM_VFS 0x104d0
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_NUM_VFS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_FUNC_DEP_LINK 0x104d0
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_FUNC_DEP_LINK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_FIRST_VF_OFFSET 0x104d1
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_FIRST_VF_OFFSET_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_STRIDE 0x104d1
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_STRIDE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_DEVICE_ID 0x104d2
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_SUPPORTED_PAGE_SIZE 0x104d3
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_SUPPORTED_PAGE_SIZE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_SYSTEM_PAGE_SIZE 0x104d4
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_SYSTEM_PAGE_SIZE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_0 0x104d5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_1 0x104d6
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_2 0x104d7
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_3 0x104d8
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_4 0x104d9
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_5 0x104da
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET 0x104db
+#define regBIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST 0x10530
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CAP 0x10531
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CNTL 0x10532
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CAP 0x10533
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CNTL 0x10534
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CAP 0x10535
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CNTL 0x10536
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CAP 0x10537
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CNTL 0x10538
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CAP 0x10539
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CNTL 0x1053a
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CAP 0x1053b
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CNTL 0x1053c
+#define regBIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf2_bifcfgdecp
+// base address: 0x10142000
+#define regBIF_CFG_DEV0_EPF2_VENDOR_ID 0x10800
+#define regBIF_CFG_DEV0_EPF2_VENDOR_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_DEVICE_ID 0x10800
+#define regBIF_CFG_DEV0_EPF2_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_COMMAND 0x10801
+#define regBIF_CFG_DEV0_EPF2_COMMAND_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_STATUS 0x10801
+#define regBIF_CFG_DEV0_EPF2_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_REVISION_ID 0x10802
+#define regBIF_CFG_DEV0_EPF2_REVISION_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PROG_INTERFACE 0x10802
+#define regBIF_CFG_DEV0_EPF2_PROG_INTERFACE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_SUB_CLASS 0x10802
+#define regBIF_CFG_DEV0_EPF2_SUB_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_BASE_CLASS 0x10802
+#define regBIF_CFG_DEV0_EPF2_BASE_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_CACHE_LINE 0x10803
+#define regBIF_CFG_DEV0_EPF2_CACHE_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_LATENCY 0x10803
+#define regBIF_CFG_DEV0_EPF2_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_HEADER 0x10803
+#define regBIF_CFG_DEV0_EPF2_HEADER_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_BIST 0x10803
+#define regBIF_CFG_DEV0_EPF2_BIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_BASE_ADDR_1 0x10804
+#define regBIF_CFG_DEV0_EPF2_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_BASE_ADDR_2 0x10805
+#define regBIF_CFG_DEV0_EPF2_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_BASE_ADDR_3 0x10806
+#define regBIF_CFG_DEV0_EPF2_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_BASE_ADDR_4 0x10807
+#define regBIF_CFG_DEV0_EPF2_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_BASE_ADDR_5 0x10808
+#define regBIF_CFG_DEV0_EPF2_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_BASE_ADDR_6 0x10809
+#define regBIF_CFG_DEV0_EPF2_BASE_ADDR_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_CARDBUS_CIS_PTR 0x1080a
+#define regBIF_CFG_DEV0_EPF2_CARDBUS_CIS_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_ADAPTER_ID 0x1080b
+#define regBIF_CFG_DEV0_EPF2_ADAPTER_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_ROM_BASE_ADDR 0x1080c
+#define regBIF_CFG_DEV0_EPF2_ROM_BASE_ADDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_CAP_PTR 0x1080d
+#define regBIF_CFG_DEV0_EPF2_CAP_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_INTERRUPT_LINE 0x1080f
+#define regBIF_CFG_DEV0_EPF2_INTERRUPT_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_INTERRUPT_PIN 0x1080f
+#define regBIF_CFG_DEV0_EPF2_INTERRUPT_PIN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MIN_GRANT 0x1080f
+#define regBIF_CFG_DEV0_EPF2_MIN_GRANT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MAX_LATENCY 0x1080f
+#define regBIF_CFG_DEV0_EPF2_MAX_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_VENDOR_CAP_LIST 0x10812
+#define regBIF_CFG_DEV0_EPF2_VENDOR_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_ADAPTER_ID_W 0x10813
+#define regBIF_CFG_DEV0_EPF2_ADAPTER_ID_W_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PMI_CAP_LIST 0x10814
+#define regBIF_CFG_DEV0_EPF2_PMI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PMI_CAP 0x10814
+#define regBIF_CFG_DEV0_EPF2_PMI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL 0x10815
+#define regBIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_SBRN 0x10818
+#define regBIF_CFG_DEV0_EPF2_SBRN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_FLADJ 0x10818
+#define regBIF_CFG_DEV0_EPF2_FLADJ_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_DBESL_DBESLD 0x10818
+#define regBIF_CFG_DEV0_EPF2_DBESL_DBESLD_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_CAP_LIST 0x10819
+#define regBIF_CFG_DEV0_EPF2_PCIE_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_CAP 0x10819
+#define regBIF_CFG_DEV0_EPF2_PCIE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_DEVICE_CAP 0x1081a
+#define regBIF_CFG_DEV0_EPF2_DEVICE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_DEVICE_CNTL 0x1081b
+#define regBIF_CFG_DEV0_EPF2_DEVICE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_DEVICE_STATUS 0x1081b
+#define regBIF_CFG_DEV0_EPF2_DEVICE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_LINK_CAP 0x1081c
+#define regBIF_CFG_DEV0_EPF2_LINK_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_LINK_CNTL 0x1081d
+#define regBIF_CFG_DEV0_EPF2_LINK_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_LINK_STATUS 0x1081d
+#define regBIF_CFG_DEV0_EPF2_LINK_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_DEVICE_CAP2 0x10822
+#define regBIF_CFG_DEV0_EPF2_DEVICE_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_DEVICE_CNTL2 0x10823
+#define regBIF_CFG_DEV0_EPF2_DEVICE_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_DEVICE_STATUS2 0x10823
+#define regBIF_CFG_DEV0_EPF2_DEVICE_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_LINK_CAP2 0x10824
+#define regBIF_CFG_DEV0_EPF2_LINK_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_LINK_CNTL2 0x10825
+#define regBIF_CFG_DEV0_EPF2_LINK_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_LINK_STATUS2 0x10825
+#define regBIF_CFG_DEV0_EPF2_LINK_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSI_CAP_LIST 0x10828
+#define regBIF_CFG_DEV0_EPF2_MSI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSI_MSG_CNTL 0x10828
+#define regBIF_CFG_DEV0_EPF2_MSI_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSI_MSG_ADDR_LO 0x10829
+#define regBIF_CFG_DEV0_EPF2_MSI_MSG_ADDR_LO_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSI_MSG_ADDR_HI 0x1082a
+#define regBIF_CFG_DEV0_EPF2_MSI_MSG_ADDR_HI_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSI_MSG_DATA 0x1082a
+#define regBIF_CFG_DEV0_EPF2_MSI_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSI_EXT_MSG_DATA 0x1082a
+#define regBIF_CFG_DEV0_EPF2_MSI_EXT_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSI_MASK 0x1082b
+#define regBIF_CFG_DEV0_EPF2_MSI_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSI_MSG_DATA_64 0x1082b
+#define regBIF_CFG_DEV0_EPF2_MSI_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSI_EXT_MSG_DATA_64 0x1082b
+#define regBIF_CFG_DEV0_EPF2_MSI_EXT_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSI_MASK_64 0x1082c
+#define regBIF_CFG_DEV0_EPF2_MSI_MASK_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSI_PENDING 0x1082c
+#define regBIF_CFG_DEV0_EPF2_MSI_PENDING_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSI_PENDING_64 0x1082d
+#define regBIF_CFG_DEV0_EPF2_MSI_PENDING_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSIX_CAP_LIST 0x10830
+#define regBIF_CFG_DEV0_EPF2_MSIX_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSIX_MSG_CNTL 0x10830
+#define regBIF_CFG_DEV0_EPF2_MSIX_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSIX_TABLE 0x10831
+#define regBIF_CFG_DEV0_EPF2_MSIX_TABLE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_MSIX_PBA 0x10832
+#define regBIF_CFG_DEV0_EPF2_MSIX_PBA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x10840
+#define regBIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_HDR 0x10841
+#define regBIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC1 0x10842
+#define regBIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC2 0x10843
+#define regBIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x10854
+#define regBIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS 0x10855
+#define regBIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK 0x10856
+#define regBIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY 0x10857
+#define regBIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS 0x10858
+#define regBIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK 0x10859
+#define regBIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL 0x1085a
+#define regBIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_HDR_LOG0 0x1085b
+#define regBIF_CFG_DEV0_EPF2_PCIE_HDR_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_HDR_LOG1 0x1085c
+#define regBIF_CFG_DEV0_EPF2_PCIE_HDR_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_HDR_LOG2 0x1085d
+#define regBIF_CFG_DEV0_EPF2_PCIE_HDR_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_HDR_LOG3 0x1085e
+#define regBIF_CFG_DEV0_EPF2_PCIE_HDR_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG0 0x10862
+#define regBIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG1 0x10863
+#define regBIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG2 0x10864
+#define regBIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG3 0x10865
+#define regBIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR_ENH_CAP_LIST 0x10880
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR1_CAP 0x10881
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR1_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR1_CNTL 0x10882
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR1_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR2_CAP 0x10883
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR2_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR2_CNTL 0x10884
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR2_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR3_CAP 0x10885
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR3_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR3_CNTL 0x10886
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR3_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR4_CAP 0x10887
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR4_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR4_CNTL 0x10888
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR4_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR5_CAP 0x10889
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR5_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR5_CNTL 0x1088a
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR5_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR6_CAP 0x1088b
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR6_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR6_CNTL 0x1088c
+#define regBIF_CFG_DEV0_EPF2_PCIE_BAR6_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x10890
+#define regBIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA_SELECT 0x10891
+#define regBIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA_SELECT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA 0x10892
+#define regBIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_CAP 0x10893
+#define regBIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_ENH_CAP_LIST 0x10894
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_CAP 0x10895
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_LATENCY_INDICATOR 0x10896
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_LATENCY_INDICATOR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_STATUS 0x10897
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_CNTL 0x10897
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x10898
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x10898
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x10898
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x10898
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x10899
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x10899
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x10899
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x10899
+#define regBIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_ACS_ENH_CAP_LIST 0x108a8
+#define regBIF_CFG_DEV0_EPF2_PCIE_ACS_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_ACS_CAP 0x108a9
+#define regBIF_CFG_DEV0_EPF2_PCIE_ACS_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL 0x108a9
+#define regBIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_PASID_ENH_CAP_LIST 0x108b4
+#define regBIF_CFG_DEV0_EPF2_PCIE_PASID_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_PASID_CAP 0x108b5
+#define regBIF_CFG_DEV0_EPF2_PCIE_PASID_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_PASID_CNTL 0x108b5
+#define regBIF_CFG_DEV0_EPF2_PCIE_PASID_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_ARI_ENH_CAP_LIST 0x108ca
+#define regBIF_CFG_DEV0_EPF2_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_ARI_CAP 0x108cb
+#define regBIF_CFG_DEV0_EPF2_PCIE_ARI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF2_PCIE_ARI_CNTL 0x108cb
+#define regBIF_CFG_DEV0_EPF2_PCIE_ARI_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf3_bifcfgdecp
+// base address: 0x10143000
+#define regBIF_CFG_DEV0_EPF3_VENDOR_ID 0x10c00
+#define regBIF_CFG_DEV0_EPF3_VENDOR_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_DEVICE_ID 0x10c00
+#define regBIF_CFG_DEV0_EPF3_DEVICE_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_COMMAND 0x10c01
+#define regBIF_CFG_DEV0_EPF3_COMMAND_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_STATUS 0x10c01
+#define regBIF_CFG_DEV0_EPF3_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_REVISION_ID 0x10c02
+#define regBIF_CFG_DEV0_EPF3_REVISION_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PROG_INTERFACE 0x10c02
+#define regBIF_CFG_DEV0_EPF3_PROG_INTERFACE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_SUB_CLASS 0x10c02
+#define regBIF_CFG_DEV0_EPF3_SUB_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_BASE_CLASS 0x10c02
+#define regBIF_CFG_DEV0_EPF3_BASE_CLASS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_CACHE_LINE 0x10c03
+#define regBIF_CFG_DEV0_EPF3_CACHE_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_LATENCY 0x10c03
+#define regBIF_CFG_DEV0_EPF3_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_HEADER 0x10c03
+#define regBIF_CFG_DEV0_EPF3_HEADER_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_BIST 0x10c03
+#define regBIF_CFG_DEV0_EPF3_BIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_BASE_ADDR_1 0x10c04
+#define regBIF_CFG_DEV0_EPF3_BASE_ADDR_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_BASE_ADDR_2 0x10c05
+#define regBIF_CFG_DEV0_EPF3_BASE_ADDR_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_BASE_ADDR_3 0x10c06
+#define regBIF_CFG_DEV0_EPF3_BASE_ADDR_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_BASE_ADDR_4 0x10c07
+#define regBIF_CFG_DEV0_EPF3_BASE_ADDR_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_BASE_ADDR_5 0x10c08
+#define regBIF_CFG_DEV0_EPF3_BASE_ADDR_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_BASE_ADDR_6 0x10c09
+#define regBIF_CFG_DEV0_EPF3_BASE_ADDR_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_CARDBUS_CIS_PTR 0x10c0a
+#define regBIF_CFG_DEV0_EPF3_CARDBUS_CIS_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_ADAPTER_ID 0x10c0b
+#define regBIF_CFG_DEV0_EPF3_ADAPTER_ID_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_ROM_BASE_ADDR 0x10c0c
+#define regBIF_CFG_DEV0_EPF3_ROM_BASE_ADDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_CAP_PTR 0x10c0d
+#define regBIF_CFG_DEV0_EPF3_CAP_PTR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_INTERRUPT_LINE 0x10c0f
+#define regBIF_CFG_DEV0_EPF3_INTERRUPT_LINE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_INTERRUPT_PIN 0x10c0f
+#define regBIF_CFG_DEV0_EPF3_INTERRUPT_PIN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MIN_GRANT 0x10c0f
+#define regBIF_CFG_DEV0_EPF3_MIN_GRANT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MAX_LATENCY 0x10c0f
+#define regBIF_CFG_DEV0_EPF3_MAX_LATENCY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_VENDOR_CAP_LIST 0x10c12
+#define regBIF_CFG_DEV0_EPF3_VENDOR_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_ADAPTER_ID_W 0x10c13
+#define regBIF_CFG_DEV0_EPF3_ADAPTER_ID_W_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PMI_CAP_LIST 0x10c14
+#define regBIF_CFG_DEV0_EPF3_PMI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PMI_CAP 0x10c14
+#define regBIF_CFG_DEV0_EPF3_PMI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL 0x10c15
+#define regBIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_SBRN 0x10c18
+#define regBIF_CFG_DEV0_EPF3_SBRN_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_FLADJ 0x10c18
+#define regBIF_CFG_DEV0_EPF3_FLADJ_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_DBESL_DBESLD 0x10c18
+#define regBIF_CFG_DEV0_EPF3_DBESL_DBESLD_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_CAP_LIST 0x10c19
+#define regBIF_CFG_DEV0_EPF3_PCIE_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_CAP 0x10c19
+#define regBIF_CFG_DEV0_EPF3_PCIE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_DEVICE_CAP 0x10c1a
+#define regBIF_CFG_DEV0_EPF3_DEVICE_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_DEVICE_CNTL 0x10c1b
+#define regBIF_CFG_DEV0_EPF3_DEVICE_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_DEVICE_STATUS 0x10c1b
+#define regBIF_CFG_DEV0_EPF3_DEVICE_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_LINK_CAP 0x10c1c
+#define regBIF_CFG_DEV0_EPF3_LINK_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_LINK_CNTL 0x10c1d
+#define regBIF_CFG_DEV0_EPF3_LINK_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_LINK_STATUS 0x10c1d
+#define regBIF_CFG_DEV0_EPF3_LINK_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_DEVICE_CAP2 0x10c22
+#define regBIF_CFG_DEV0_EPF3_DEVICE_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_DEVICE_CNTL2 0x10c23
+#define regBIF_CFG_DEV0_EPF3_DEVICE_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_DEVICE_STATUS2 0x10c23
+#define regBIF_CFG_DEV0_EPF3_DEVICE_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_LINK_CAP2 0x10c24
+#define regBIF_CFG_DEV0_EPF3_LINK_CAP2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_LINK_CNTL2 0x10c25
+#define regBIF_CFG_DEV0_EPF3_LINK_CNTL2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_LINK_STATUS2 0x10c25
+#define regBIF_CFG_DEV0_EPF3_LINK_STATUS2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSI_CAP_LIST 0x10c28
+#define regBIF_CFG_DEV0_EPF3_MSI_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSI_MSG_CNTL 0x10c28
+#define regBIF_CFG_DEV0_EPF3_MSI_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSI_MSG_ADDR_LO 0x10c29
+#define regBIF_CFG_DEV0_EPF3_MSI_MSG_ADDR_LO_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSI_MSG_ADDR_HI 0x10c2a
+#define regBIF_CFG_DEV0_EPF3_MSI_MSG_ADDR_HI_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSI_MSG_DATA 0x10c2a
+#define regBIF_CFG_DEV0_EPF3_MSI_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSI_EXT_MSG_DATA 0x10c2a
+#define regBIF_CFG_DEV0_EPF3_MSI_EXT_MSG_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSI_MASK 0x10c2b
+#define regBIF_CFG_DEV0_EPF3_MSI_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSI_MSG_DATA_64 0x10c2b
+#define regBIF_CFG_DEV0_EPF3_MSI_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSI_EXT_MSG_DATA_64 0x10c2b
+#define regBIF_CFG_DEV0_EPF3_MSI_EXT_MSG_DATA_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSI_MASK_64 0x10c2c
+#define regBIF_CFG_DEV0_EPF3_MSI_MASK_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSI_PENDING 0x10c2c
+#define regBIF_CFG_DEV0_EPF3_MSI_PENDING_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSI_PENDING_64 0x10c2d
+#define regBIF_CFG_DEV0_EPF3_MSI_PENDING_64_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSIX_CAP_LIST 0x10c30
+#define regBIF_CFG_DEV0_EPF3_MSIX_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSIX_MSG_CNTL 0x10c30
+#define regBIF_CFG_DEV0_EPF3_MSIX_MSG_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSIX_TABLE 0x10c31
+#define regBIF_CFG_DEV0_EPF3_MSIX_TABLE_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_MSIX_PBA 0x10c32
+#define regBIF_CFG_DEV0_EPF3_MSIX_PBA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x10c40
+#define regBIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_HDR 0x10c41
+#define regBIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_HDR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC1 0x10c42
+#define regBIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC2 0x10c43
+#define regBIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x10c54
+#define regBIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS 0x10c55
+#define regBIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK 0x10c56
+#define regBIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY 0x10c57
+#define regBIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS 0x10c58
+#define regBIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK 0x10c59
+#define regBIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL 0x10c5a
+#define regBIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_HDR_LOG0 0x10c5b
+#define regBIF_CFG_DEV0_EPF3_PCIE_HDR_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_HDR_LOG1 0x10c5c
+#define regBIF_CFG_DEV0_EPF3_PCIE_HDR_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_HDR_LOG2 0x10c5d
+#define regBIF_CFG_DEV0_EPF3_PCIE_HDR_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_HDR_LOG3 0x10c5e
+#define regBIF_CFG_DEV0_EPF3_PCIE_HDR_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG0 0x10c62
+#define regBIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG1 0x10c63
+#define regBIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG2 0x10c64
+#define regBIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG3 0x10c65
+#define regBIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR_ENH_CAP_LIST 0x10c80
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR1_CAP 0x10c81
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR1_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR1_CNTL 0x10c82
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR1_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR2_CAP 0x10c83
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR2_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR2_CNTL 0x10c84
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR2_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR3_CAP 0x10c85
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR3_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR3_CNTL 0x10c86
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR3_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR4_CAP 0x10c87
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR4_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR4_CNTL 0x10c88
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR4_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR5_CAP 0x10c89
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR5_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR5_CNTL 0x10c8a
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR5_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR6_CAP 0x10c8b
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR6_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR6_CNTL 0x10c8c
+#define regBIF_CFG_DEV0_EPF3_PCIE_BAR6_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x10c90
+#define regBIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA_SELECT 0x10c91
+#define regBIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA_SELECT_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA 0x10c92
+#define regBIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_CAP 0x10c93
+#define regBIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_ENH_CAP_LIST 0x10c94
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_CAP 0x10c95
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_LATENCY_INDICATOR 0x10c96
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_LATENCY_INDICATOR_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_STATUS 0x10c97
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_STATUS_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_CNTL 0x10c97
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x10c98
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x10c98
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x10c98
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x10c98
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x10c99
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x10c99
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x10c99
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x10c99
+#define regBIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_ACS_ENH_CAP_LIST 0x10ca8
+#define regBIF_CFG_DEV0_EPF3_PCIE_ACS_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_ACS_CAP 0x10ca9
+#define regBIF_CFG_DEV0_EPF3_PCIE_ACS_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL 0x10ca9
+#define regBIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_PASID_ENH_CAP_LIST 0x10cb4
+#define regBIF_CFG_DEV0_EPF3_PCIE_PASID_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_PASID_CAP 0x10cb5
+#define regBIF_CFG_DEV0_EPF3_PCIE_PASID_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_PASID_CNTL 0x10cb5
+#define regBIF_CFG_DEV0_EPF3_PCIE_PASID_CNTL_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_ARI_ENH_CAP_LIST 0x10cca
+#define regBIF_CFG_DEV0_EPF3_PCIE_ARI_ENH_CAP_LIST_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_ARI_CAP 0x10ccb
+#define regBIF_CFG_DEV0_EPF3_PCIE_ARI_CAP_BASE_IDX 5
+#define regBIF_CFG_DEV0_EPF3_PCIE_ARI_CNTL 0x10ccb
+#define regBIF_CFG_DEV0_EPF3_PCIE_ARI_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_dev0_RCCPORTDEC
+// base address: 0x10131000
+#define regRCC_DEV0_1_RCC_VDM_SUPPORT 0xc440
+#define regRCC_DEV0_1_RCC_VDM_SUPPORT_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_BUS_CNTL 0xc441
+#define regRCC_DEV0_1_RCC_BUS_CNTL_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_FEATURES_CONTROL_MISC 0xc442
+#define regRCC_DEV0_1_RCC_FEATURES_CONTROL_MISC_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_DEV0_LINK_CNTL 0xc443
+#define regRCC_DEV0_1_RCC_DEV0_LINK_CNTL_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_CMN_LINK_CNTL 0xc444
+#define regRCC_DEV0_1_RCC_CMN_LINK_CNTL_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE 0xc445
+#define regRCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_LTR_LSWITCH_CNTL 0xc446
+#define regRCC_DEV0_1_RCC_LTR_LSWITCH_CNTL_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_MH_ARB_CNTL 0xc447
+#define regRCC_DEV0_1_RCC_MH_ARB_CNTL_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0 0xc448
+#define regRCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1 0xc449
+#define regRCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_ep_dev0_RCCPORTDEC
+// base address: 0x10131000
+#define regRCC_EP_DEV0_1_EP_PCIE_SCRATCH 0xc44c
+#define regRCC_EP_DEV0_1_EP_PCIE_SCRATCH_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_CNTL 0xc44e
+#define regRCC_EP_DEV0_1_EP_PCIE_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_INT_CNTL 0xc44f
+#define regRCC_EP_DEV0_1_EP_PCIE_INT_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_INT_STATUS 0xc450
+#define regRCC_EP_DEV0_1_EP_PCIE_INT_STATUS_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_RX_CNTL2 0xc451
+#define regRCC_EP_DEV0_1_EP_PCIE_RX_CNTL2_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_BUS_CNTL 0xc452
+#define regRCC_EP_DEV0_1_EP_PCIE_BUS_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_CFG_CNTL 0xc453
+#define regRCC_EP_DEV0_1_EP_PCIE_CFG_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL 0xc454
+#define regRCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_STRAP_MISC 0xc455
+#define regRCC_EP_DEV0_1_EP_PCIE_STRAP_MISC_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_STRAP_MISC2 0xc456
+#define regRCC_EP_DEV0_1_EP_PCIE_STRAP_MISC2_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP 0xc457
+#define regRCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR 0xc458
+#define regRCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL 0xc458
+#define regRCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 0xc458
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 0xc459
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 0xc459
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 0xc459
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 0xc459
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 0xc45a
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 0xc45a
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 0xc45a
+#define regRCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_PME_CONTROL 0xc45c
+#define regRCC_EP_DEV0_1_EP_PCIE_PME_CONTROL_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIEP_RESERVED 0xc45d
+#define regRCC_EP_DEV0_1_EP_PCIEP_RESERVED_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_TX_CNTL 0xc45f
+#define regRCC_EP_DEV0_1_EP_PCIE_TX_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID 0xc460
+#define regRCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_ERR_CNTL 0xc461
+#define regRCC_EP_DEV0_1_EP_PCIE_ERR_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_RX_CNTL 0xc462
+#define regRCC_EP_DEV0_1_EP_PCIE_RX_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL 0xc463
+#define regRCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_dwn_dev0_RCCPORTDEC
+// base address: 0x10131000
+#define regRCC_DWN_DEV0_1_DN_PCIE_RESERVED 0xc468
+#define regRCC_DWN_DEV0_1_DN_PCIE_RESERVED_BASE_IDX 5
+#define regRCC_DWN_DEV0_1_DN_PCIE_SCRATCH 0xc469
+#define regRCC_DWN_DEV0_1_DN_PCIE_SCRATCH_BASE_IDX 5
+#define regRCC_DWN_DEV0_1_DN_PCIE_CNTL 0xc46b
+#define regRCC_DWN_DEV0_1_DN_PCIE_CNTL_BASE_IDX 5
+#define regRCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL 0xc46c
+#define regRCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL_BASE_IDX 5
+#define regRCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2 0xc46d
+#define regRCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2_BASE_IDX 5
+#define regRCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL 0xc46e
+#define regRCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL_BASE_IDX 5
+#define regRCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL 0xc46f
+#define regRCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL_BASE_IDX 5
+#define regRCC_DWN_DEV0_1_DN_PCIE_STRAP_F0 0xc470
+#define regRCC_DWN_DEV0_1_DN_PCIE_STRAP_F0_BASE_IDX 5
+#define regRCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC 0xc471
+#define regRCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC_BASE_IDX 5
+#define regRCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC2 0xc472
+#define regRCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC2_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_dwnp_dev0_RCCPORTDEC
+// base address: 0x10131000
+#define regRCC_DWNP_DEV0_1_PCIE_ERR_CNTL 0xc475
+#define regRCC_DWNP_DEV0_1_PCIE_ERR_CNTL_BASE_IDX 5
+#define regRCC_DWNP_DEV0_1_PCIE_RX_CNTL 0xc476
+#define regRCC_DWNP_DEV0_1_PCIE_RX_CNTL_BASE_IDX 5
+#define regRCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL 0xc477
+#define regRCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL_BASE_IDX 5
+#define regRCC_DWNP_DEV0_1_PCIE_LC_CNTL2 0xc478
+#define regRCC_DWNP_DEV0_1_PCIE_LC_CNTL2_BASE_IDX 5
+#define regRCC_DWNP_DEV0_1_PCIEP_STRAP_MISC 0xc479
+#define regRCC_DWNP_DEV0_1_PCIEP_STRAP_MISC_BASE_IDX 5
+#define regRCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP 0xc47a
+#define regRCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_pfc_amdgfx_RCCPFCDEC
+// base address: 0x10134000
+#define regRCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL 0xd040
+#define regRCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL_BASE_IDX 5
+#define regRCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE 0xd041
+#define regRCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE_BASE_IDX 5
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0 0xd042
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0_BASE_IDX 5
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1 0xd043
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1_BASE_IDX 5
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2 0xd044
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2_BASE_IDX 5
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3 0xd045
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3_BASE_IDX 5
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4 0xd046
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4_BASE_IDX 5
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5 0xd047
+#define regRCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5_BASE_IDX 5
+#define regRCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL 0xd048
+#define regRCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_pfc_amdgfxaz_RCCPFCDEC
+// base address: 0x10134200
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL 0xd0c0
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL_BASE_IDX 5
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE 0xd0c1
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE_BASE_IDX 5
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0 0xd0c2
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0_BASE_IDX 5
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_1 0xd0c3
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_1_BASE_IDX 5
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_2 0xd0c4
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_2_BASE_IDX 5
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_3 0xd0c5
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_3_BASE_IDX 5
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_4 0xd0c6
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_4_BASE_IDX 5
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_5 0xd0c7
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_5_BASE_IDX 5
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL 0xd0c8
+#define regRCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_pciemsix_0_usb_MSIXTDEC
+// base address: 0x10178000
+#define regPCIEMSIX_VECT0_ADDR_LO 0x1e000
+#define regPCIEMSIX_VECT0_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT0_ADDR_HI 0x1e001
+#define regPCIEMSIX_VECT0_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT0_MSG_DATA 0x1e002
+#define regPCIEMSIX_VECT0_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT0_CONTROL 0x1e003
+#define regPCIEMSIX_VECT0_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT1_ADDR_LO 0x1e004
+#define regPCIEMSIX_VECT1_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT1_ADDR_HI 0x1e005
+#define regPCIEMSIX_VECT1_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT1_MSG_DATA 0x1e006
+#define regPCIEMSIX_VECT1_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT1_CONTROL 0x1e007
+#define regPCIEMSIX_VECT1_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT2_ADDR_LO 0x1e008
+#define regPCIEMSIX_VECT2_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT2_ADDR_HI 0x1e009
+#define regPCIEMSIX_VECT2_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT2_MSG_DATA 0x1e00a
+#define regPCIEMSIX_VECT2_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT2_CONTROL 0x1e00b
+#define regPCIEMSIX_VECT2_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT3_ADDR_LO 0x1e00c
+#define regPCIEMSIX_VECT3_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT3_ADDR_HI 0x1e00d
+#define regPCIEMSIX_VECT3_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT3_MSG_DATA 0x1e00e
+#define regPCIEMSIX_VECT3_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT3_CONTROL 0x1e00f
+#define regPCIEMSIX_VECT3_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT4_ADDR_LO 0x1e010
+#define regPCIEMSIX_VECT4_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT4_ADDR_HI 0x1e011
+#define regPCIEMSIX_VECT4_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT4_MSG_DATA 0x1e012
+#define regPCIEMSIX_VECT4_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT4_CONTROL 0x1e013
+#define regPCIEMSIX_VECT4_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT5_ADDR_LO 0x1e014
+#define regPCIEMSIX_VECT5_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT5_ADDR_HI 0x1e015
+#define regPCIEMSIX_VECT5_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT5_MSG_DATA 0x1e016
+#define regPCIEMSIX_VECT5_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT5_CONTROL 0x1e017
+#define regPCIEMSIX_VECT5_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT6_ADDR_LO 0x1e018
+#define regPCIEMSIX_VECT6_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT6_ADDR_HI 0x1e019
+#define regPCIEMSIX_VECT6_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT6_MSG_DATA 0x1e01a
+#define regPCIEMSIX_VECT6_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT6_CONTROL 0x1e01b
+#define regPCIEMSIX_VECT6_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT7_ADDR_LO 0x1e01c
+#define regPCIEMSIX_VECT7_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT7_ADDR_HI 0x1e01d
+#define regPCIEMSIX_VECT7_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT7_MSG_DATA 0x1e01e
+#define regPCIEMSIX_VECT7_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT7_CONTROL 0x1e01f
+#define regPCIEMSIX_VECT7_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT8_ADDR_LO 0x1e020
+#define regPCIEMSIX_VECT8_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT8_ADDR_HI 0x1e021
+#define regPCIEMSIX_VECT8_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT8_MSG_DATA 0x1e022
+#define regPCIEMSIX_VECT8_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT8_CONTROL 0x1e023
+#define regPCIEMSIX_VECT8_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT9_ADDR_LO 0x1e024
+#define regPCIEMSIX_VECT9_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT9_ADDR_HI 0x1e025
+#define regPCIEMSIX_VECT9_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT9_MSG_DATA 0x1e026
+#define regPCIEMSIX_VECT9_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT9_CONTROL 0x1e027
+#define regPCIEMSIX_VECT9_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT10_ADDR_LO 0x1e028
+#define regPCIEMSIX_VECT10_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT10_ADDR_HI 0x1e029
+#define regPCIEMSIX_VECT10_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT10_MSG_DATA 0x1e02a
+#define regPCIEMSIX_VECT10_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT10_CONTROL 0x1e02b
+#define regPCIEMSIX_VECT10_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT11_ADDR_LO 0x1e02c
+#define regPCIEMSIX_VECT11_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT11_ADDR_HI 0x1e02d
+#define regPCIEMSIX_VECT11_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT11_MSG_DATA 0x1e02e
+#define regPCIEMSIX_VECT11_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT11_CONTROL 0x1e02f
+#define regPCIEMSIX_VECT11_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT12_ADDR_LO 0x1e030
+#define regPCIEMSIX_VECT12_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT12_ADDR_HI 0x1e031
+#define regPCIEMSIX_VECT12_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT12_MSG_DATA 0x1e032
+#define regPCIEMSIX_VECT12_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT12_CONTROL 0x1e033
+#define regPCIEMSIX_VECT12_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT13_ADDR_LO 0x1e034
+#define regPCIEMSIX_VECT13_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT13_ADDR_HI 0x1e035
+#define regPCIEMSIX_VECT13_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT13_MSG_DATA 0x1e036
+#define regPCIEMSIX_VECT13_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT13_CONTROL 0x1e037
+#define regPCIEMSIX_VECT13_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT14_ADDR_LO 0x1e038
+#define regPCIEMSIX_VECT14_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT14_ADDR_HI 0x1e039
+#define regPCIEMSIX_VECT14_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT14_MSG_DATA 0x1e03a
+#define regPCIEMSIX_VECT14_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT14_CONTROL 0x1e03b
+#define regPCIEMSIX_VECT14_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT15_ADDR_LO 0x1e03c
+#define regPCIEMSIX_VECT15_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT15_ADDR_HI 0x1e03d
+#define regPCIEMSIX_VECT15_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT15_MSG_DATA 0x1e03e
+#define regPCIEMSIX_VECT15_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT15_CONTROL 0x1e03f
+#define regPCIEMSIX_VECT15_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT16_ADDR_LO 0x1e040
+#define regPCIEMSIX_VECT16_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT16_ADDR_HI 0x1e041
+#define regPCIEMSIX_VECT16_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT16_MSG_DATA 0x1e042
+#define regPCIEMSIX_VECT16_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT16_CONTROL 0x1e043
+#define regPCIEMSIX_VECT16_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT17_ADDR_LO 0x1e044
+#define regPCIEMSIX_VECT17_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT17_ADDR_HI 0x1e045
+#define regPCIEMSIX_VECT17_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT17_MSG_DATA 0x1e046
+#define regPCIEMSIX_VECT17_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT17_CONTROL 0x1e047
+#define regPCIEMSIX_VECT17_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT18_ADDR_LO 0x1e048
+#define regPCIEMSIX_VECT18_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT18_ADDR_HI 0x1e049
+#define regPCIEMSIX_VECT18_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT18_MSG_DATA 0x1e04a
+#define regPCIEMSIX_VECT18_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT18_CONTROL 0x1e04b
+#define regPCIEMSIX_VECT18_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT19_ADDR_LO 0x1e04c
+#define regPCIEMSIX_VECT19_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT19_ADDR_HI 0x1e04d
+#define regPCIEMSIX_VECT19_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT19_MSG_DATA 0x1e04e
+#define regPCIEMSIX_VECT19_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT19_CONTROL 0x1e04f
+#define regPCIEMSIX_VECT19_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT20_ADDR_LO 0x1e050
+#define regPCIEMSIX_VECT20_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT20_ADDR_HI 0x1e051
+#define regPCIEMSIX_VECT20_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT20_MSG_DATA 0x1e052
+#define regPCIEMSIX_VECT20_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT20_CONTROL 0x1e053
+#define regPCIEMSIX_VECT20_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT21_ADDR_LO 0x1e054
+#define regPCIEMSIX_VECT21_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT21_ADDR_HI 0x1e055
+#define regPCIEMSIX_VECT21_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT21_MSG_DATA 0x1e056
+#define regPCIEMSIX_VECT21_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT21_CONTROL 0x1e057
+#define regPCIEMSIX_VECT21_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT22_ADDR_LO 0x1e058
+#define regPCIEMSIX_VECT22_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT22_ADDR_HI 0x1e059
+#define regPCIEMSIX_VECT22_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT22_MSG_DATA 0x1e05a
+#define regPCIEMSIX_VECT22_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT22_CONTROL 0x1e05b
+#define regPCIEMSIX_VECT22_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT23_ADDR_LO 0x1e05c
+#define regPCIEMSIX_VECT23_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT23_ADDR_HI 0x1e05d
+#define regPCIEMSIX_VECT23_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT23_MSG_DATA 0x1e05e
+#define regPCIEMSIX_VECT23_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT23_CONTROL 0x1e05f
+#define regPCIEMSIX_VECT23_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT24_ADDR_LO 0x1e060
+#define regPCIEMSIX_VECT24_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT24_ADDR_HI 0x1e061
+#define regPCIEMSIX_VECT24_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT24_MSG_DATA 0x1e062
+#define regPCIEMSIX_VECT24_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT24_CONTROL 0x1e063
+#define regPCIEMSIX_VECT24_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT25_ADDR_LO 0x1e064
+#define regPCIEMSIX_VECT25_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT25_ADDR_HI 0x1e065
+#define regPCIEMSIX_VECT25_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT25_MSG_DATA 0x1e066
+#define regPCIEMSIX_VECT25_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT25_CONTROL 0x1e067
+#define regPCIEMSIX_VECT25_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT26_ADDR_LO 0x1e068
+#define regPCIEMSIX_VECT26_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT26_ADDR_HI 0x1e069
+#define regPCIEMSIX_VECT26_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT26_MSG_DATA 0x1e06a
+#define regPCIEMSIX_VECT26_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT26_CONTROL 0x1e06b
+#define regPCIEMSIX_VECT26_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT27_ADDR_LO 0x1e06c
+#define regPCIEMSIX_VECT27_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT27_ADDR_HI 0x1e06d
+#define regPCIEMSIX_VECT27_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT27_MSG_DATA 0x1e06e
+#define regPCIEMSIX_VECT27_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT27_CONTROL 0x1e06f
+#define regPCIEMSIX_VECT27_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT28_ADDR_LO 0x1e070
+#define regPCIEMSIX_VECT28_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT28_ADDR_HI 0x1e071
+#define regPCIEMSIX_VECT28_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT28_MSG_DATA 0x1e072
+#define regPCIEMSIX_VECT28_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT28_CONTROL 0x1e073
+#define regPCIEMSIX_VECT28_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT29_ADDR_LO 0x1e074
+#define regPCIEMSIX_VECT29_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT29_ADDR_HI 0x1e075
+#define regPCIEMSIX_VECT29_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT29_MSG_DATA 0x1e076
+#define regPCIEMSIX_VECT29_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT29_CONTROL 0x1e077
+#define regPCIEMSIX_VECT29_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT30_ADDR_LO 0x1e078
+#define regPCIEMSIX_VECT30_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT30_ADDR_HI 0x1e079
+#define regPCIEMSIX_VECT30_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT30_MSG_DATA 0x1e07a
+#define regPCIEMSIX_VECT30_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT30_CONTROL 0x1e07b
+#define regPCIEMSIX_VECT30_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT31_ADDR_LO 0x1e07c
+#define regPCIEMSIX_VECT31_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT31_ADDR_HI 0x1e07d
+#define regPCIEMSIX_VECT31_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT31_MSG_DATA 0x1e07e
+#define regPCIEMSIX_VECT31_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT31_CONTROL 0x1e07f
+#define regPCIEMSIX_VECT31_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT32_ADDR_LO 0x1e080
+#define regPCIEMSIX_VECT32_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT32_ADDR_HI 0x1e081
+#define regPCIEMSIX_VECT32_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT32_MSG_DATA 0x1e082
+#define regPCIEMSIX_VECT32_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT32_CONTROL 0x1e083
+#define regPCIEMSIX_VECT32_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT33_ADDR_LO 0x1e084
+#define regPCIEMSIX_VECT33_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT33_ADDR_HI 0x1e085
+#define regPCIEMSIX_VECT33_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT33_MSG_DATA 0x1e086
+#define regPCIEMSIX_VECT33_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT33_CONTROL 0x1e087
+#define regPCIEMSIX_VECT33_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT34_ADDR_LO 0x1e088
+#define regPCIEMSIX_VECT34_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT34_ADDR_HI 0x1e089
+#define regPCIEMSIX_VECT34_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT34_MSG_DATA 0x1e08a
+#define regPCIEMSIX_VECT34_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT34_CONTROL 0x1e08b
+#define regPCIEMSIX_VECT34_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT35_ADDR_LO 0x1e08c
+#define regPCIEMSIX_VECT35_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT35_ADDR_HI 0x1e08d
+#define regPCIEMSIX_VECT35_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT35_MSG_DATA 0x1e08e
+#define regPCIEMSIX_VECT35_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT35_CONTROL 0x1e08f
+#define regPCIEMSIX_VECT35_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT36_ADDR_LO 0x1e090
+#define regPCIEMSIX_VECT36_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT36_ADDR_HI 0x1e091
+#define regPCIEMSIX_VECT36_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT36_MSG_DATA 0x1e092
+#define regPCIEMSIX_VECT36_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT36_CONTROL 0x1e093
+#define regPCIEMSIX_VECT36_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT37_ADDR_LO 0x1e094
+#define regPCIEMSIX_VECT37_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT37_ADDR_HI 0x1e095
+#define regPCIEMSIX_VECT37_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT37_MSG_DATA 0x1e096
+#define regPCIEMSIX_VECT37_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT37_CONTROL 0x1e097
+#define regPCIEMSIX_VECT37_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT38_ADDR_LO 0x1e098
+#define regPCIEMSIX_VECT38_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT38_ADDR_HI 0x1e099
+#define regPCIEMSIX_VECT38_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT38_MSG_DATA 0x1e09a
+#define regPCIEMSIX_VECT38_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT38_CONTROL 0x1e09b
+#define regPCIEMSIX_VECT38_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT39_ADDR_LO 0x1e09c
+#define regPCIEMSIX_VECT39_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT39_ADDR_HI 0x1e09d
+#define regPCIEMSIX_VECT39_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT39_MSG_DATA 0x1e09e
+#define regPCIEMSIX_VECT39_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT39_CONTROL 0x1e09f
+#define regPCIEMSIX_VECT39_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT40_ADDR_LO 0x1e0a0
+#define regPCIEMSIX_VECT40_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT40_ADDR_HI 0x1e0a1
+#define regPCIEMSIX_VECT40_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT40_MSG_DATA 0x1e0a2
+#define regPCIEMSIX_VECT40_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT40_CONTROL 0x1e0a3
+#define regPCIEMSIX_VECT40_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT41_ADDR_LO 0x1e0a4
+#define regPCIEMSIX_VECT41_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT41_ADDR_HI 0x1e0a5
+#define regPCIEMSIX_VECT41_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT41_MSG_DATA 0x1e0a6
+#define regPCIEMSIX_VECT41_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT41_CONTROL 0x1e0a7
+#define regPCIEMSIX_VECT41_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT42_ADDR_LO 0x1e0a8
+#define regPCIEMSIX_VECT42_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT42_ADDR_HI 0x1e0a9
+#define regPCIEMSIX_VECT42_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT42_MSG_DATA 0x1e0aa
+#define regPCIEMSIX_VECT42_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT42_CONTROL 0x1e0ab
+#define regPCIEMSIX_VECT42_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT43_ADDR_LO 0x1e0ac
+#define regPCIEMSIX_VECT43_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT43_ADDR_HI 0x1e0ad
+#define regPCIEMSIX_VECT43_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT43_MSG_DATA 0x1e0ae
+#define regPCIEMSIX_VECT43_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT43_CONTROL 0x1e0af
+#define regPCIEMSIX_VECT43_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT44_ADDR_LO 0x1e0b0
+#define regPCIEMSIX_VECT44_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT44_ADDR_HI 0x1e0b1
+#define regPCIEMSIX_VECT44_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT44_MSG_DATA 0x1e0b2
+#define regPCIEMSIX_VECT44_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT44_CONTROL 0x1e0b3
+#define regPCIEMSIX_VECT44_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT45_ADDR_LO 0x1e0b4
+#define regPCIEMSIX_VECT45_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT45_ADDR_HI 0x1e0b5
+#define regPCIEMSIX_VECT45_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT45_MSG_DATA 0x1e0b6
+#define regPCIEMSIX_VECT45_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT45_CONTROL 0x1e0b7
+#define regPCIEMSIX_VECT45_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT46_ADDR_LO 0x1e0b8
+#define regPCIEMSIX_VECT46_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT46_ADDR_HI 0x1e0b9
+#define regPCIEMSIX_VECT46_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT46_MSG_DATA 0x1e0ba
+#define regPCIEMSIX_VECT46_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT46_CONTROL 0x1e0bb
+#define regPCIEMSIX_VECT46_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT47_ADDR_LO 0x1e0bc
+#define regPCIEMSIX_VECT47_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT47_ADDR_HI 0x1e0bd
+#define regPCIEMSIX_VECT47_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT47_MSG_DATA 0x1e0be
+#define regPCIEMSIX_VECT47_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT47_CONTROL 0x1e0bf
+#define regPCIEMSIX_VECT47_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT48_ADDR_LO 0x1e0c0
+#define regPCIEMSIX_VECT48_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT48_ADDR_HI 0x1e0c1
+#define regPCIEMSIX_VECT48_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT48_MSG_DATA 0x1e0c2
+#define regPCIEMSIX_VECT48_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT48_CONTROL 0x1e0c3
+#define regPCIEMSIX_VECT48_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT49_ADDR_LO 0x1e0c4
+#define regPCIEMSIX_VECT49_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT49_ADDR_HI 0x1e0c5
+#define regPCIEMSIX_VECT49_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT49_MSG_DATA 0x1e0c6
+#define regPCIEMSIX_VECT49_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT49_CONTROL 0x1e0c7
+#define regPCIEMSIX_VECT49_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT50_ADDR_LO 0x1e0c8
+#define regPCIEMSIX_VECT50_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT50_ADDR_HI 0x1e0c9
+#define regPCIEMSIX_VECT50_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT50_MSG_DATA 0x1e0ca
+#define regPCIEMSIX_VECT50_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT50_CONTROL 0x1e0cb
+#define regPCIEMSIX_VECT50_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT51_ADDR_LO 0x1e0cc
+#define regPCIEMSIX_VECT51_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT51_ADDR_HI 0x1e0cd
+#define regPCIEMSIX_VECT51_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT51_MSG_DATA 0x1e0ce
+#define regPCIEMSIX_VECT51_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT51_CONTROL 0x1e0cf
+#define regPCIEMSIX_VECT51_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT52_ADDR_LO 0x1e0d0
+#define regPCIEMSIX_VECT52_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT52_ADDR_HI 0x1e0d1
+#define regPCIEMSIX_VECT52_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT52_MSG_DATA 0x1e0d2
+#define regPCIEMSIX_VECT52_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT52_CONTROL 0x1e0d3
+#define regPCIEMSIX_VECT52_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT53_ADDR_LO 0x1e0d4
+#define regPCIEMSIX_VECT53_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT53_ADDR_HI 0x1e0d5
+#define regPCIEMSIX_VECT53_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT53_MSG_DATA 0x1e0d6
+#define regPCIEMSIX_VECT53_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT53_CONTROL 0x1e0d7
+#define regPCIEMSIX_VECT53_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT54_ADDR_LO 0x1e0d8
+#define regPCIEMSIX_VECT54_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT54_ADDR_HI 0x1e0d9
+#define regPCIEMSIX_VECT54_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT54_MSG_DATA 0x1e0da
+#define regPCIEMSIX_VECT54_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT54_CONTROL 0x1e0db
+#define regPCIEMSIX_VECT54_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT55_ADDR_LO 0x1e0dc
+#define regPCIEMSIX_VECT55_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT55_ADDR_HI 0x1e0dd
+#define regPCIEMSIX_VECT55_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT55_MSG_DATA 0x1e0de
+#define regPCIEMSIX_VECT55_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT55_CONTROL 0x1e0df
+#define regPCIEMSIX_VECT55_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT56_ADDR_LO 0x1e0e0
+#define regPCIEMSIX_VECT56_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT56_ADDR_HI 0x1e0e1
+#define regPCIEMSIX_VECT56_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT56_MSG_DATA 0x1e0e2
+#define regPCIEMSIX_VECT56_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT56_CONTROL 0x1e0e3
+#define regPCIEMSIX_VECT56_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT57_ADDR_LO 0x1e0e4
+#define regPCIEMSIX_VECT57_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT57_ADDR_HI 0x1e0e5
+#define regPCIEMSIX_VECT57_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT57_MSG_DATA 0x1e0e6
+#define regPCIEMSIX_VECT57_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT57_CONTROL 0x1e0e7
+#define regPCIEMSIX_VECT57_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT58_ADDR_LO 0x1e0e8
+#define regPCIEMSIX_VECT58_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT58_ADDR_HI 0x1e0e9
+#define regPCIEMSIX_VECT58_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT58_MSG_DATA 0x1e0ea
+#define regPCIEMSIX_VECT58_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT58_CONTROL 0x1e0eb
+#define regPCIEMSIX_VECT58_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT59_ADDR_LO 0x1e0ec
+#define regPCIEMSIX_VECT59_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT59_ADDR_HI 0x1e0ed
+#define regPCIEMSIX_VECT59_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT59_MSG_DATA 0x1e0ee
+#define regPCIEMSIX_VECT59_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT59_CONTROL 0x1e0ef
+#define regPCIEMSIX_VECT59_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT60_ADDR_LO 0x1e0f0
+#define regPCIEMSIX_VECT60_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT60_ADDR_HI 0x1e0f1
+#define regPCIEMSIX_VECT60_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT60_MSG_DATA 0x1e0f2
+#define regPCIEMSIX_VECT60_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT60_CONTROL 0x1e0f3
+#define regPCIEMSIX_VECT60_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT61_ADDR_LO 0x1e0f4
+#define regPCIEMSIX_VECT61_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT61_ADDR_HI 0x1e0f5
+#define regPCIEMSIX_VECT61_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT61_MSG_DATA 0x1e0f6
+#define regPCIEMSIX_VECT61_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT61_CONTROL 0x1e0f7
+#define regPCIEMSIX_VECT61_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT62_ADDR_LO 0x1e0f8
+#define regPCIEMSIX_VECT62_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT62_ADDR_HI 0x1e0f9
+#define regPCIEMSIX_VECT62_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT62_MSG_DATA 0x1e0fa
+#define regPCIEMSIX_VECT62_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT62_CONTROL 0x1e0fb
+#define regPCIEMSIX_VECT62_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT63_ADDR_LO 0x1e0fc
+#define regPCIEMSIX_VECT63_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT63_ADDR_HI 0x1e0fd
+#define regPCIEMSIX_VECT63_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT63_MSG_DATA 0x1e0fe
+#define regPCIEMSIX_VECT63_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT63_CONTROL 0x1e0ff
+#define regPCIEMSIX_VECT63_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT64_ADDR_LO 0x1e100
+#define regPCIEMSIX_VECT64_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT64_ADDR_HI 0x1e101
+#define regPCIEMSIX_VECT64_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT64_MSG_DATA 0x1e102
+#define regPCIEMSIX_VECT64_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT64_CONTROL 0x1e103
+#define regPCIEMSIX_VECT64_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT65_ADDR_LO 0x1e104
+#define regPCIEMSIX_VECT65_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT65_ADDR_HI 0x1e105
+#define regPCIEMSIX_VECT65_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT65_MSG_DATA 0x1e106
+#define regPCIEMSIX_VECT65_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT65_CONTROL 0x1e107
+#define regPCIEMSIX_VECT65_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT66_ADDR_LO 0x1e108
+#define regPCIEMSIX_VECT66_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT66_ADDR_HI 0x1e109
+#define regPCIEMSIX_VECT66_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT66_MSG_DATA 0x1e10a
+#define regPCIEMSIX_VECT66_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT66_CONTROL 0x1e10b
+#define regPCIEMSIX_VECT66_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT67_ADDR_LO 0x1e10c
+#define regPCIEMSIX_VECT67_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT67_ADDR_HI 0x1e10d
+#define regPCIEMSIX_VECT67_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT67_MSG_DATA 0x1e10e
+#define regPCIEMSIX_VECT67_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT67_CONTROL 0x1e10f
+#define regPCIEMSIX_VECT67_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT68_ADDR_LO 0x1e110
+#define regPCIEMSIX_VECT68_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT68_ADDR_HI 0x1e111
+#define regPCIEMSIX_VECT68_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT68_MSG_DATA 0x1e112
+#define regPCIEMSIX_VECT68_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT68_CONTROL 0x1e113
+#define regPCIEMSIX_VECT68_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT69_ADDR_LO 0x1e114
+#define regPCIEMSIX_VECT69_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT69_ADDR_HI 0x1e115
+#define regPCIEMSIX_VECT69_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT69_MSG_DATA 0x1e116
+#define regPCIEMSIX_VECT69_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT69_CONTROL 0x1e117
+#define regPCIEMSIX_VECT69_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT70_ADDR_LO 0x1e118
+#define regPCIEMSIX_VECT70_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT70_ADDR_HI 0x1e119
+#define regPCIEMSIX_VECT70_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT70_MSG_DATA 0x1e11a
+#define regPCIEMSIX_VECT70_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT70_CONTROL 0x1e11b
+#define regPCIEMSIX_VECT70_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT71_ADDR_LO 0x1e11c
+#define regPCIEMSIX_VECT71_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT71_ADDR_HI 0x1e11d
+#define regPCIEMSIX_VECT71_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT71_MSG_DATA 0x1e11e
+#define regPCIEMSIX_VECT71_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT71_CONTROL 0x1e11f
+#define regPCIEMSIX_VECT71_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT72_ADDR_LO 0x1e120
+#define regPCIEMSIX_VECT72_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT72_ADDR_HI 0x1e121
+#define regPCIEMSIX_VECT72_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT72_MSG_DATA 0x1e122
+#define regPCIEMSIX_VECT72_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT72_CONTROL 0x1e123
+#define regPCIEMSIX_VECT72_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT73_ADDR_LO 0x1e124
+#define regPCIEMSIX_VECT73_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT73_ADDR_HI 0x1e125
+#define regPCIEMSIX_VECT73_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT73_MSG_DATA 0x1e126
+#define regPCIEMSIX_VECT73_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT73_CONTROL 0x1e127
+#define regPCIEMSIX_VECT73_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT74_ADDR_LO 0x1e128
+#define regPCIEMSIX_VECT74_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT74_ADDR_HI 0x1e129
+#define regPCIEMSIX_VECT74_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT74_MSG_DATA 0x1e12a
+#define regPCIEMSIX_VECT74_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT74_CONTROL 0x1e12b
+#define regPCIEMSIX_VECT74_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT75_ADDR_LO 0x1e12c
+#define regPCIEMSIX_VECT75_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT75_ADDR_HI 0x1e12d
+#define regPCIEMSIX_VECT75_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT75_MSG_DATA 0x1e12e
+#define regPCIEMSIX_VECT75_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT75_CONTROL 0x1e12f
+#define regPCIEMSIX_VECT75_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT76_ADDR_LO 0x1e130
+#define regPCIEMSIX_VECT76_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT76_ADDR_HI 0x1e131
+#define regPCIEMSIX_VECT76_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT76_MSG_DATA 0x1e132
+#define regPCIEMSIX_VECT76_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT76_CONTROL 0x1e133
+#define regPCIEMSIX_VECT76_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT77_ADDR_LO 0x1e134
+#define regPCIEMSIX_VECT77_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT77_ADDR_HI 0x1e135
+#define regPCIEMSIX_VECT77_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT77_MSG_DATA 0x1e136
+#define regPCIEMSIX_VECT77_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT77_CONTROL 0x1e137
+#define regPCIEMSIX_VECT77_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT78_ADDR_LO 0x1e138
+#define regPCIEMSIX_VECT78_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT78_ADDR_HI 0x1e139
+#define regPCIEMSIX_VECT78_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT78_MSG_DATA 0x1e13a
+#define regPCIEMSIX_VECT78_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT78_CONTROL 0x1e13b
+#define regPCIEMSIX_VECT78_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT79_ADDR_LO 0x1e13c
+#define regPCIEMSIX_VECT79_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT79_ADDR_HI 0x1e13d
+#define regPCIEMSIX_VECT79_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT79_MSG_DATA 0x1e13e
+#define regPCIEMSIX_VECT79_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT79_CONTROL 0x1e13f
+#define regPCIEMSIX_VECT79_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT80_ADDR_LO 0x1e140
+#define regPCIEMSIX_VECT80_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT80_ADDR_HI 0x1e141
+#define regPCIEMSIX_VECT80_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT80_MSG_DATA 0x1e142
+#define regPCIEMSIX_VECT80_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT80_CONTROL 0x1e143
+#define regPCIEMSIX_VECT80_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT81_ADDR_LO 0x1e144
+#define regPCIEMSIX_VECT81_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT81_ADDR_HI 0x1e145
+#define regPCIEMSIX_VECT81_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT81_MSG_DATA 0x1e146
+#define regPCIEMSIX_VECT81_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT81_CONTROL 0x1e147
+#define regPCIEMSIX_VECT81_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT82_ADDR_LO 0x1e148
+#define regPCIEMSIX_VECT82_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT82_ADDR_HI 0x1e149
+#define regPCIEMSIX_VECT82_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT82_MSG_DATA 0x1e14a
+#define regPCIEMSIX_VECT82_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT82_CONTROL 0x1e14b
+#define regPCIEMSIX_VECT82_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT83_ADDR_LO 0x1e14c
+#define regPCIEMSIX_VECT83_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT83_ADDR_HI 0x1e14d
+#define regPCIEMSIX_VECT83_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT83_MSG_DATA 0x1e14e
+#define regPCIEMSIX_VECT83_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT83_CONTROL 0x1e14f
+#define regPCIEMSIX_VECT83_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT84_ADDR_LO 0x1e150
+#define regPCIEMSIX_VECT84_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT84_ADDR_HI 0x1e151
+#define regPCIEMSIX_VECT84_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT84_MSG_DATA 0x1e152
+#define regPCIEMSIX_VECT84_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT84_CONTROL 0x1e153
+#define regPCIEMSIX_VECT84_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT85_ADDR_LO 0x1e154
+#define regPCIEMSIX_VECT85_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT85_ADDR_HI 0x1e155
+#define regPCIEMSIX_VECT85_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT85_MSG_DATA 0x1e156
+#define regPCIEMSIX_VECT85_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT85_CONTROL 0x1e157
+#define regPCIEMSIX_VECT85_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT86_ADDR_LO 0x1e158
+#define regPCIEMSIX_VECT86_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT86_ADDR_HI 0x1e159
+#define regPCIEMSIX_VECT86_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT86_MSG_DATA 0x1e15a
+#define regPCIEMSIX_VECT86_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT86_CONTROL 0x1e15b
+#define regPCIEMSIX_VECT86_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT87_ADDR_LO 0x1e15c
+#define regPCIEMSIX_VECT87_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT87_ADDR_HI 0x1e15d
+#define regPCIEMSIX_VECT87_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT87_MSG_DATA 0x1e15e
+#define regPCIEMSIX_VECT87_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT87_CONTROL 0x1e15f
+#define regPCIEMSIX_VECT87_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT88_ADDR_LO 0x1e160
+#define regPCIEMSIX_VECT88_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT88_ADDR_HI 0x1e161
+#define regPCIEMSIX_VECT88_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT88_MSG_DATA 0x1e162
+#define regPCIEMSIX_VECT88_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT88_CONTROL 0x1e163
+#define regPCIEMSIX_VECT88_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT89_ADDR_LO 0x1e164
+#define regPCIEMSIX_VECT89_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT89_ADDR_HI 0x1e165
+#define regPCIEMSIX_VECT89_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT89_MSG_DATA 0x1e166
+#define regPCIEMSIX_VECT89_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT89_CONTROL 0x1e167
+#define regPCIEMSIX_VECT89_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT90_ADDR_LO 0x1e168
+#define regPCIEMSIX_VECT90_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT90_ADDR_HI 0x1e169
+#define regPCIEMSIX_VECT90_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT90_MSG_DATA 0x1e16a
+#define regPCIEMSIX_VECT90_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT90_CONTROL 0x1e16b
+#define regPCIEMSIX_VECT90_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT91_ADDR_LO 0x1e16c
+#define regPCIEMSIX_VECT91_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT91_ADDR_HI 0x1e16d
+#define regPCIEMSIX_VECT91_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT91_MSG_DATA 0x1e16e
+#define regPCIEMSIX_VECT91_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT91_CONTROL 0x1e16f
+#define regPCIEMSIX_VECT91_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT92_ADDR_LO 0x1e170
+#define regPCIEMSIX_VECT92_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT92_ADDR_HI 0x1e171
+#define regPCIEMSIX_VECT92_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT92_MSG_DATA 0x1e172
+#define regPCIEMSIX_VECT92_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT92_CONTROL 0x1e173
+#define regPCIEMSIX_VECT92_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT93_ADDR_LO 0x1e174
+#define regPCIEMSIX_VECT93_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT93_ADDR_HI 0x1e175
+#define regPCIEMSIX_VECT93_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT93_MSG_DATA 0x1e176
+#define regPCIEMSIX_VECT93_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT93_CONTROL 0x1e177
+#define regPCIEMSIX_VECT93_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT94_ADDR_LO 0x1e178
+#define regPCIEMSIX_VECT94_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT94_ADDR_HI 0x1e179
+#define regPCIEMSIX_VECT94_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT94_MSG_DATA 0x1e17a
+#define regPCIEMSIX_VECT94_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT94_CONTROL 0x1e17b
+#define regPCIEMSIX_VECT94_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT95_ADDR_LO 0x1e17c
+#define regPCIEMSIX_VECT95_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT95_ADDR_HI 0x1e17d
+#define regPCIEMSIX_VECT95_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT95_MSG_DATA 0x1e17e
+#define regPCIEMSIX_VECT95_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT95_CONTROL 0x1e17f
+#define regPCIEMSIX_VECT95_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT96_ADDR_LO 0x1e180
+#define regPCIEMSIX_VECT96_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT96_ADDR_HI 0x1e181
+#define regPCIEMSIX_VECT96_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT96_MSG_DATA 0x1e182
+#define regPCIEMSIX_VECT96_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT96_CONTROL 0x1e183
+#define regPCIEMSIX_VECT96_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT97_ADDR_LO 0x1e184
+#define regPCIEMSIX_VECT97_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT97_ADDR_HI 0x1e185
+#define regPCIEMSIX_VECT97_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT97_MSG_DATA 0x1e186
+#define regPCIEMSIX_VECT97_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT97_CONTROL 0x1e187
+#define regPCIEMSIX_VECT97_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT98_ADDR_LO 0x1e188
+#define regPCIEMSIX_VECT98_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT98_ADDR_HI 0x1e189
+#define regPCIEMSIX_VECT98_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT98_MSG_DATA 0x1e18a
+#define regPCIEMSIX_VECT98_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT98_CONTROL 0x1e18b
+#define regPCIEMSIX_VECT98_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT99_ADDR_LO 0x1e18c
+#define regPCIEMSIX_VECT99_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT99_ADDR_HI 0x1e18d
+#define regPCIEMSIX_VECT99_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT99_MSG_DATA 0x1e18e
+#define regPCIEMSIX_VECT99_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT99_CONTROL 0x1e18f
+#define regPCIEMSIX_VECT99_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT100_ADDR_LO 0x1e190
+#define regPCIEMSIX_VECT100_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT100_ADDR_HI 0x1e191
+#define regPCIEMSIX_VECT100_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT100_MSG_DATA 0x1e192
+#define regPCIEMSIX_VECT100_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT100_CONTROL 0x1e193
+#define regPCIEMSIX_VECT100_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT101_ADDR_LO 0x1e194
+#define regPCIEMSIX_VECT101_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT101_ADDR_HI 0x1e195
+#define regPCIEMSIX_VECT101_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT101_MSG_DATA 0x1e196
+#define regPCIEMSIX_VECT101_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT101_CONTROL 0x1e197
+#define regPCIEMSIX_VECT101_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT102_ADDR_LO 0x1e198
+#define regPCIEMSIX_VECT102_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT102_ADDR_HI 0x1e199
+#define regPCIEMSIX_VECT102_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT102_MSG_DATA 0x1e19a
+#define regPCIEMSIX_VECT102_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT102_CONTROL 0x1e19b
+#define regPCIEMSIX_VECT102_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT103_ADDR_LO 0x1e19c
+#define regPCIEMSIX_VECT103_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT103_ADDR_HI 0x1e19d
+#define regPCIEMSIX_VECT103_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT103_MSG_DATA 0x1e19e
+#define regPCIEMSIX_VECT103_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT103_CONTROL 0x1e19f
+#define regPCIEMSIX_VECT103_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT104_ADDR_LO 0x1e1a0
+#define regPCIEMSIX_VECT104_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT104_ADDR_HI 0x1e1a1
+#define regPCIEMSIX_VECT104_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT104_MSG_DATA 0x1e1a2
+#define regPCIEMSIX_VECT104_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT104_CONTROL 0x1e1a3
+#define regPCIEMSIX_VECT104_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT105_ADDR_LO 0x1e1a4
+#define regPCIEMSIX_VECT105_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT105_ADDR_HI 0x1e1a5
+#define regPCIEMSIX_VECT105_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT105_MSG_DATA 0x1e1a6
+#define regPCIEMSIX_VECT105_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT105_CONTROL 0x1e1a7
+#define regPCIEMSIX_VECT105_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT106_ADDR_LO 0x1e1a8
+#define regPCIEMSIX_VECT106_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT106_ADDR_HI 0x1e1a9
+#define regPCIEMSIX_VECT106_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT106_MSG_DATA 0x1e1aa
+#define regPCIEMSIX_VECT106_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT106_CONTROL 0x1e1ab
+#define regPCIEMSIX_VECT106_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT107_ADDR_LO 0x1e1ac
+#define regPCIEMSIX_VECT107_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT107_ADDR_HI 0x1e1ad
+#define regPCIEMSIX_VECT107_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT107_MSG_DATA 0x1e1ae
+#define regPCIEMSIX_VECT107_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT107_CONTROL 0x1e1af
+#define regPCIEMSIX_VECT107_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT108_ADDR_LO 0x1e1b0
+#define regPCIEMSIX_VECT108_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT108_ADDR_HI 0x1e1b1
+#define regPCIEMSIX_VECT108_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT108_MSG_DATA 0x1e1b2
+#define regPCIEMSIX_VECT108_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT108_CONTROL 0x1e1b3
+#define regPCIEMSIX_VECT108_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT109_ADDR_LO 0x1e1b4
+#define regPCIEMSIX_VECT109_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT109_ADDR_HI 0x1e1b5
+#define regPCIEMSIX_VECT109_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT109_MSG_DATA 0x1e1b6
+#define regPCIEMSIX_VECT109_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT109_CONTROL 0x1e1b7
+#define regPCIEMSIX_VECT109_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT110_ADDR_LO 0x1e1b8
+#define regPCIEMSIX_VECT110_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT110_ADDR_HI 0x1e1b9
+#define regPCIEMSIX_VECT110_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT110_MSG_DATA 0x1e1ba
+#define regPCIEMSIX_VECT110_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT110_CONTROL 0x1e1bb
+#define regPCIEMSIX_VECT110_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT111_ADDR_LO 0x1e1bc
+#define regPCIEMSIX_VECT111_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT111_ADDR_HI 0x1e1bd
+#define regPCIEMSIX_VECT111_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT111_MSG_DATA 0x1e1be
+#define regPCIEMSIX_VECT111_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT111_CONTROL 0x1e1bf
+#define regPCIEMSIX_VECT111_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT112_ADDR_LO 0x1e1c0
+#define regPCIEMSIX_VECT112_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT112_ADDR_HI 0x1e1c1
+#define regPCIEMSIX_VECT112_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT112_MSG_DATA 0x1e1c2
+#define regPCIEMSIX_VECT112_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT112_CONTROL 0x1e1c3
+#define regPCIEMSIX_VECT112_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT113_ADDR_LO 0x1e1c4
+#define regPCIEMSIX_VECT113_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT113_ADDR_HI 0x1e1c5
+#define regPCIEMSIX_VECT113_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT113_MSG_DATA 0x1e1c6
+#define regPCIEMSIX_VECT113_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT113_CONTROL 0x1e1c7
+#define regPCIEMSIX_VECT113_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT114_ADDR_LO 0x1e1c8
+#define regPCIEMSIX_VECT114_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT114_ADDR_HI 0x1e1c9
+#define regPCIEMSIX_VECT114_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT114_MSG_DATA 0x1e1ca
+#define regPCIEMSIX_VECT114_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT114_CONTROL 0x1e1cb
+#define regPCIEMSIX_VECT114_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT115_ADDR_LO 0x1e1cc
+#define regPCIEMSIX_VECT115_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT115_ADDR_HI 0x1e1cd
+#define regPCIEMSIX_VECT115_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT115_MSG_DATA 0x1e1ce
+#define regPCIEMSIX_VECT115_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT115_CONTROL 0x1e1cf
+#define regPCIEMSIX_VECT115_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT116_ADDR_LO 0x1e1d0
+#define regPCIEMSIX_VECT116_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT116_ADDR_HI 0x1e1d1
+#define regPCIEMSIX_VECT116_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT116_MSG_DATA 0x1e1d2
+#define regPCIEMSIX_VECT116_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT116_CONTROL 0x1e1d3
+#define regPCIEMSIX_VECT116_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT117_ADDR_LO 0x1e1d4
+#define regPCIEMSIX_VECT117_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT117_ADDR_HI 0x1e1d5
+#define regPCIEMSIX_VECT117_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT117_MSG_DATA 0x1e1d6
+#define regPCIEMSIX_VECT117_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT117_CONTROL 0x1e1d7
+#define regPCIEMSIX_VECT117_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT118_ADDR_LO 0x1e1d8
+#define regPCIEMSIX_VECT118_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT118_ADDR_HI 0x1e1d9
+#define regPCIEMSIX_VECT118_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT118_MSG_DATA 0x1e1da
+#define regPCIEMSIX_VECT118_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT118_CONTROL 0x1e1db
+#define regPCIEMSIX_VECT118_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT119_ADDR_LO 0x1e1dc
+#define regPCIEMSIX_VECT119_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT119_ADDR_HI 0x1e1dd
+#define regPCIEMSIX_VECT119_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT119_MSG_DATA 0x1e1de
+#define regPCIEMSIX_VECT119_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT119_CONTROL 0x1e1df
+#define regPCIEMSIX_VECT119_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT120_ADDR_LO 0x1e1e0
+#define regPCIEMSIX_VECT120_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT120_ADDR_HI 0x1e1e1
+#define regPCIEMSIX_VECT120_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT120_MSG_DATA 0x1e1e2
+#define regPCIEMSIX_VECT120_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT120_CONTROL 0x1e1e3
+#define regPCIEMSIX_VECT120_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT121_ADDR_LO 0x1e1e4
+#define regPCIEMSIX_VECT121_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT121_ADDR_HI 0x1e1e5
+#define regPCIEMSIX_VECT121_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT121_MSG_DATA 0x1e1e6
+#define regPCIEMSIX_VECT121_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT121_CONTROL 0x1e1e7
+#define regPCIEMSIX_VECT121_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT122_ADDR_LO 0x1e1e8
+#define regPCIEMSIX_VECT122_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT122_ADDR_HI 0x1e1e9
+#define regPCIEMSIX_VECT122_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT122_MSG_DATA 0x1e1ea
+#define regPCIEMSIX_VECT122_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT122_CONTROL 0x1e1eb
+#define regPCIEMSIX_VECT122_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT123_ADDR_LO 0x1e1ec
+#define regPCIEMSIX_VECT123_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT123_ADDR_HI 0x1e1ed
+#define regPCIEMSIX_VECT123_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT123_MSG_DATA 0x1e1ee
+#define regPCIEMSIX_VECT123_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT123_CONTROL 0x1e1ef
+#define regPCIEMSIX_VECT123_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT124_ADDR_LO 0x1e1f0
+#define regPCIEMSIX_VECT124_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT124_ADDR_HI 0x1e1f1
+#define regPCIEMSIX_VECT124_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT124_MSG_DATA 0x1e1f2
+#define regPCIEMSIX_VECT124_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT124_CONTROL 0x1e1f3
+#define regPCIEMSIX_VECT124_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT125_ADDR_LO 0x1e1f4
+#define regPCIEMSIX_VECT125_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT125_ADDR_HI 0x1e1f5
+#define regPCIEMSIX_VECT125_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT125_MSG_DATA 0x1e1f6
+#define regPCIEMSIX_VECT125_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT125_CONTROL 0x1e1f7
+#define regPCIEMSIX_VECT125_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT126_ADDR_LO 0x1e1f8
+#define regPCIEMSIX_VECT126_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT126_ADDR_HI 0x1e1f9
+#define regPCIEMSIX_VECT126_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT126_MSG_DATA 0x1e1fa
+#define regPCIEMSIX_VECT126_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT126_CONTROL 0x1e1fb
+#define regPCIEMSIX_VECT126_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT127_ADDR_LO 0x1e1fc
+#define regPCIEMSIX_VECT127_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT127_ADDR_HI 0x1e1fd
+#define regPCIEMSIX_VECT127_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT127_MSG_DATA 0x1e1fe
+#define regPCIEMSIX_VECT127_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT127_CONTROL 0x1e1ff
+#define regPCIEMSIX_VECT127_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT128_ADDR_LO 0x1e200
+#define regPCIEMSIX_VECT128_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT128_ADDR_HI 0x1e201
+#define regPCIEMSIX_VECT128_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT128_MSG_DATA 0x1e202
+#define regPCIEMSIX_VECT128_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT128_CONTROL 0x1e203
+#define regPCIEMSIX_VECT128_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT129_ADDR_LO 0x1e204
+#define regPCIEMSIX_VECT129_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT129_ADDR_HI 0x1e205
+#define regPCIEMSIX_VECT129_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT129_MSG_DATA 0x1e206
+#define regPCIEMSIX_VECT129_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT129_CONTROL 0x1e207
+#define regPCIEMSIX_VECT129_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT130_ADDR_LO 0x1e208
+#define regPCIEMSIX_VECT130_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT130_ADDR_HI 0x1e209
+#define regPCIEMSIX_VECT130_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT130_MSG_DATA 0x1e20a
+#define regPCIEMSIX_VECT130_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT130_CONTROL 0x1e20b
+#define regPCIEMSIX_VECT130_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT131_ADDR_LO 0x1e20c
+#define regPCIEMSIX_VECT131_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT131_ADDR_HI 0x1e20d
+#define regPCIEMSIX_VECT131_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT131_MSG_DATA 0x1e20e
+#define regPCIEMSIX_VECT131_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT131_CONTROL 0x1e20f
+#define regPCIEMSIX_VECT131_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT132_ADDR_LO 0x1e210
+#define regPCIEMSIX_VECT132_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT132_ADDR_HI 0x1e211
+#define regPCIEMSIX_VECT132_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT132_MSG_DATA 0x1e212
+#define regPCIEMSIX_VECT132_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT132_CONTROL 0x1e213
+#define regPCIEMSIX_VECT132_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT133_ADDR_LO 0x1e214
+#define regPCIEMSIX_VECT133_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT133_ADDR_HI 0x1e215
+#define regPCIEMSIX_VECT133_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT133_MSG_DATA 0x1e216
+#define regPCIEMSIX_VECT133_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT133_CONTROL 0x1e217
+#define regPCIEMSIX_VECT133_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT134_ADDR_LO 0x1e218
+#define regPCIEMSIX_VECT134_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT134_ADDR_HI 0x1e219
+#define regPCIEMSIX_VECT134_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT134_MSG_DATA 0x1e21a
+#define regPCIEMSIX_VECT134_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT134_CONTROL 0x1e21b
+#define regPCIEMSIX_VECT134_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT135_ADDR_LO 0x1e21c
+#define regPCIEMSIX_VECT135_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT135_ADDR_HI 0x1e21d
+#define regPCIEMSIX_VECT135_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT135_MSG_DATA 0x1e21e
+#define regPCIEMSIX_VECT135_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT135_CONTROL 0x1e21f
+#define regPCIEMSIX_VECT135_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT136_ADDR_LO 0x1e220
+#define regPCIEMSIX_VECT136_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT136_ADDR_HI 0x1e221
+#define regPCIEMSIX_VECT136_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT136_MSG_DATA 0x1e222
+#define regPCIEMSIX_VECT136_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT136_CONTROL 0x1e223
+#define regPCIEMSIX_VECT136_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT137_ADDR_LO 0x1e224
+#define regPCIEMSIX_VECT137_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT137_ADDR_HI 0x1e225
+#define regPCIEMSIX_VECT137_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT137_MSG_DATA 0x1e226
+#define regPCIEMSIX_VECT137_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT137_CONTROL 0x1e227
+#define regPCIEMSIX_VECT137_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT138_ADDR_LO 0x1e228
+#define regPCIEMSIX_VECT138_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT138_ADDR_HI 0x1e229
+#define regPCIEMSIX_VECT138_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT138_MSG_DATA 0x1e22a
+#define regPCIEMSIX_VECT138_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT138_CONTROL 0x1e22b
+#define regPCIEMSIX_VECT138_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT139_ADDR_LO 0x1e22c
+#define regPCIEMSIX_VECT139_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT139_ADDR_HI 0x1e22d
+#define regPCIEMSIX_VECT139_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT139_MSG_DATA 0x1e22e
+#define regPCIEMSIX_VECT139_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT139_CONTROL 0x1e22f
+#define regPCIEMSIX_VECT139_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT140_ADDR_LO 0x1e230
+#define regPCIEMSIX_VECT140_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT140_ADDR_HI 0x1e231
+#define regPCIEMSIX_VECT140_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT140_MSG_DATA 0x1e232
+#define regPCIEMSIX_VECT140_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT140_CONTROL 0x1e233
+#define regPCIEMSIX_VECT140_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT141_ADDR_LO 0x1e234
+#define regPCIEMSIX_VECT141_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT141_ADDR_HI 0x1e235
+#define regPCIEMSIX_VECT141_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT141_MSG_DATA 0x1e236
+#define regPCIEMSIX_VECT141_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT141_CONTROL 0x1e237
+#define regPCIEMSIX_VECT141_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT142_ADDR_LO 0x1e238
+#define regPCIEMSIX_VECT142_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT142_ADDR_HI 0x1e239
+#define regPCIEMSIX_VECT142_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT142_MSG_DATA 0x1e23a
+#define regPCIEMSIX_VECT142_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT142_CONTROL 0x1e23b
+#define regPCIEMSIX_VECT142_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT143_ADDR_LO 0x1e23c
+#define regPCIEMSIX_VECT143_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT143_ADDR_HI 0x1e23d
+#define regPCIEMSIX_VECT143_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT143_MSG_DATA 0x1e23e
+#define regPCIEMSIX_VECT143_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT143_CONTROL 0x1e23f
+#define regPCIEMSIX_VECT143_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT144_ADDR_LO 0x1e240
+#define regPCIEMSIX_VECT144_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT144_ADDR_HI 0x1e241
+#define regPCIEMSIX_VECT144_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT144_MSG_DATA 0x1e242
+#define regPCIEMSIX_VECT144_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT144_CONTROL 0x1e243
+#define regPCIEMSIX_VECT144_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT145_ADDR_LO 0x1e244
+#define regPCIEMSIX_VECT145_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT145_ADDR_HI 0x1e245
+#define regPCIEMSIX_VECT145_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT145_MSG_DATA 0x1e246
+#define regPCIEMSIX_VECT145_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT145_CONTROL 0x1e247
+#define regPCIEMSIX_VECT145_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT146_ADDR_LO 0x1e248
+#define regPCIEMSIX_VECT146_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT146_ADDR_HI 0x1e249
+#define regPCIEMSIX_VECT146_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT146_MSG_DATA 0x1e24a
+#define regPCIEMSIX_VECT146_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT146_CONTROL 0x1e24b
+#define regPCIEMSIX_VECT146_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT147_ADDR_LO 0x1e24c
+#define regPCIEMSIX_VECT147_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT147_ADDR_HI 0x1e24d
+#define regPCIEMSIX_VECT147_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT147_MSG_DATA 0x1e24e
+#define regPCIEMSIX_VECT147_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT147_CONTROL 0x1e24f
+#define regPCIEMSIX_VECT147_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT148_ADDR_LO 0x1e250
+#define regPCIEMSIX_VECT148_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT148_ADDR_HI 0x1e251
+#define regPCIEMSIX_VECT148_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT148_MSG_DATA 0x1e252
+#define regPCIEMSIX_VECT148_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT148_CONTROL 0x1e253
+#define regPCIEMSIX_VECT148_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT149_ADDR_LO 0x1e254
+#define regPCIEMSIX_VECT149_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT149_ADDR_HI 0x1e255
+#define regPCIEMSIX_VECT149_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT149_MSG_DATA 0x1e256
+#define regPCIEMSIX_VECT149_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT149_CONTROL 0x1e257
+#define regPCIEMSIX_VECT149_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT150_ADDR_LO 0x1e258
+#define regPCIEMSIX_VECT150_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT150_ADDR_HI 0x1e259
+#define regPCIEMSIX_VECT150_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT150_MSG_DATA 0x1e25a
+#define regPCIEMSIX_VECT150_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT150_CONTROL 0x1e25b
+#define regPCIEMSIX_VECT150_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT151_ADDR_LO 0x1e25c
+#define regPCIEMSIX_VECT151_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT151_ADDR_HI 0x1e25d
+#define regPCIEMSIX_VECT151_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT151_MSG_DATA 0x1e25e
+#define regPCIEMSIX_VECT151_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT151_CONTROL 0x1e25f
+#define regPCIEMSIX_VECT151_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT152_ADDR_LO 0x1e260
+#define regPCIEMSIX_VECT152_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT152_ADDR_HI 0x1e261
+#define regPCIEMSIX_VECT152_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT152_MSG_DATA 0x1e262
+#define regPCIEMSIX_VECT152_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT152_CONTROL 0x1e263
+#define regPCIEMSIX_VECT152_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT153_ADDR_LO 0x1e264
+#define regPCIEMSIX_VECT153_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT153_ADDR_HI 0x1e265
+#define regPCIEMSIX_VECT153_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT153_MSG_DATA 0x1e266
+#define regPCIEMSIX_VECT153_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT153_CONTROL 0x1e267
+#define regPCIEMSIX_VECT153_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT154_ADDR_LO 0x1e268
+#define regPCIEMSIX_VECT154_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT154_ADDR_HI 0x1e269
+#define regPCIEMSIX_VECT154_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT154_MSG_DATA 0x1e26a
+#define regPCIEMSIX_VECT154_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT154_CONTROL 0x1e26b
+#define regPCIEMSIX_VECT154_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT155_ADDR_LO 0x1e26c
+#define regPCIEMSIX_VECT155_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT155_ADDR_HI 0x1e26d
+#define regPCIEMSIX_VECT155_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT155_MSG_DATA 0x1e26e
+#define regPCIEMSIX_VECT155_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT155_CONTROL 0x1e26f
+#define regPCIEMSIX_VECT155_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT156_ADDR_LO 0x1e270
+#define regPCIEMSIX_VECT156_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT156_ADDR_HI 0x1e271
+#define regPCIEMSIX_VECT156_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT156_MSG_DATA 0x1e272
+#define regPCIEMSIX_VECT156_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT156_CONTROL 0x1e273
+#define regPCIEMSIX_VECT156_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT157_ADDR_LO 0x1e274
+#define regPCIEMSIX_VECT157_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT157_ADDR_HI 0x1e275
+#define regPCIEMSIX_VECT157_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT157_MSG_DATA 0x1e276
+#define regPCIEMSIX_VECT157_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT157_CONTROL 0x1e277
+#define regPCIEMSIX_VECT157_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT158_ADDR_LO 0x1e278
+#define regPCIEMSIX_VECT158_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT158_ADDR_HI 0x1e279
+#define regPCIEMSIX_VECT158_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT158_MSG_DATA 0x1e27a
+#define regPCIEMSIX_VECT158_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT158_CONTROL 0x1e27b
+#define regPCIEMSIX_VECT158_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT159_ADDR_LO 0x1e27c
+#define regPCIEMSIX_VECT159_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT159_ADDR_HI 0x1e27d
+#define regPCIEMSIX_VECT159_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT159_MSG_DATA 0x1e27e
+#define regPCIEMSIX_VECT159_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT159_CONTROL 0x1e27f
+#define regPCIEMSIX_VECT159_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT160_ADDR_LO 0x1e280
+#define regPCIEMSIX_VECT160_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT160_ADDR_HI 0x1e281
+#define regPCIEMSIX_VECT160_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT160_MSG_DATA 0x1e282
+#define regPCIEMSIX_VECT160_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT160_CONTROL 0x1e283
+#define regPCIEMSIX_VECT160_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT161_ADDR_LO 0x1e284
+#define regPCIEMSIX_VECT161_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT161_ADDR_HI 0x1e285
+#define regPCIEMSIX_VECT161_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT161_MSG_DATA 0x1e286
+#define regPCIEMSIX_VECT161_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT161_CONTROL 0x1e287
+#define regPCIEMSIX_VECT161_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT162_ADDR_LO 0x1e288
+#define regPCIEMSIX_VECT162_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT162_ADDR_HI 0x1e289
+#define regPCIEMSIX_VECT162_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT162_MSG_DATA 0x1e28a
+#define regPCIEMSIX_VECT162_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT162_CONTROL 0x1e28b
+#define regPCIEMSIX_VECT162_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT163_ADDR_LO 0x1e28c
+#define regPCIEMSIX_VECT163_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT163_ADDR_HI 0x1e28d
+#define regPCIEMSIX_VECT163_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT163_MSG_DATA 0x1e28e
+#define regPCIEMSIX_VECT163_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT163_CONTROL 0x1e28f
+#define regPCIEMSIX_VECT163_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT164_ADDR_LO 0x1e290
+#define regPCIEMSIX_VECT164_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT164_ADDR_HI 0x1e291
+#define regPCIEMSIX_VECT164_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT164_MSG_DATA 0x1e292
+#define regPCIEMSIX_VECT164_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT164_CONTROL 0x1e293
+#define regPCIEMSIX_VECT164_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT165_ADDR_LO 0x1e294
+#define regPCIEMSIX_VECT165_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT165_ADDR_HI 0x1e295
+#define regPCIEMSIX_VECT165_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT165_MSG_DATA 0x1e296
+#define regPCIEMSIX_VECT165_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT165_CONTROL 0x1e297
+#define regPCIEMSIX_VECT165_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT166_ADDR_LO 0x1e298
+#define regPCIEMSIX_VECT166_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT166_ADDR_HI 0x1e299
+#define regPCIEMSIX_VECT166_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT166_MSG_DATA 0x1e29a
+#define regPCIEMSIX_VECT166_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT166_CONTROL 0x1e29b
+#define regPCIEMSIX_VECT166_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT167_ADDR_LO 0x1e29c
+#define regPCIEMSIX_VECT167_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT167_ADDR_HI 0x1e29d
+#define regPCIEMSIX_VECT167_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT167_MSG_DATA 0x1e29e
+#define regPCIEMSIX_VECT167_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT167_CONTROL 0x1e29f
+#define regPCIEMSIX_VECT167_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT168_ADDR_LO 0x1e2a0
+#define regPCIEMSIX_VECT168_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT168_ADDR_HI 0x1e2a1
+#define regPCIEMSIX_VECT168_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT168_MSG_DATA 0x1e2a2
+#define regPCIEMSIX_VECT168_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT168_CONTROL 0x1e2a3
+#define regPCIEMSIX_VECT168_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT169_ADDR_LO 0x1e2a4
+#define regPCIEMSIX_VECT169_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT169_ADDR_HI 0x1e2a5
+#define regPCIEMSIX_VECT169_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT169_MSG_DATA 0x1e2a6
+#define regPCIEMSIX_VECT169_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT169_CONTROL 0x1e2a7
+#define regPCIEMSIX_VECT169_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT170_ADDR_LO 0x1e2a8
+#define regPCIEMSIX_VECT170_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT170_ADDR_HI 0x1e2a9
+#define regPCIEMSIX_VECT170_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT170_MSG_DATA 0x1e2aa
+#define regPCIEMSIX_VECT170_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT170_CONTROL 0x1e2ab
+#define regPCIEMSIX_VECT170_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT171_ADDR_LO 0x1e2ac
+#define regPCIEMSIX_VECT171_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT171_ADDR_HI 0x1e2ad
+#define regPCIEMSIX_VECT171_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT171_MSG_DATA 0x1e2ae
+#define regPCIEMSIX_VECT171_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT171_CONTROL 0x1e2af
+#define regPCIEMSIX_VECT171_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT172_ADDR_LO 0x1e2b0
+#define regPCIEMSIX_VECT172_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT172_ADDR_HI 0x1e2b1
+#define regPCIEMSIX_VECT172_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT172_MSG_DATA 0x1e2b2
+#define regPCIEMSIX_VECT172_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT172_CONTROL 0x1e2b3
+#define regPCIEMSIX_VECT172_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT173_ADDR_LO 0x1e2b4
+#define regPCIEMSIX_VECT173_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT173_ADDR_HI 0x1e2b5
+#define regPCIEMSIX_VECT173_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT173_MSG_DATA 0x1e2b6
+#define regPCIEMSIX_VECT173_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT173_CONTROL 0x1e2b7
+#define regPCIEMSIX_VECT173_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT174_ADDR_LO 0x1e2b8
+#define regPCIEMSIX_VECT174_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT174_ADDR_HI 0x1e2b9
+#define regPCIEMSIX_VECT174_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT174_MSG_DATA 0x1e2ba
+#define regPCIEMSIX_VECT174_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT174_CONTROL 0x1e2bb
+#define regPCIEMSIX_VECT174_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT175_ADDR_LO 0x1e2bc
+#define regPCIEMSIX_VECT175_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT175_ADDR_HI 0x1e2bd
+#define regPCIEMSIX_VECT175_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT175_MSG_DATA 0x1e2be
+#define regPCIEMSIX_VECT175_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT175_CONTROL 0x1e2bf
+#define regPCIEMSIX_VECT175_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT176_ADDR_LO 0x1e2c0
+#define regPCIEMSIX_VECT176_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT176_ADDR_HI 0x1e2c1
+#define regPCIEMSIX_VECT176_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT176_MSG_DATA 0x1e2c2
+#define regPCIEMSIX_VECT176_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT176_CONTROL 0x1e2c3
+#define regPCIEMSIX_VECT176_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT177_ADDR_LO 0x1e2c4
+#define regPCIEMSIX_VECT177_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT177_ADDR_HI 0x1e2c5
+#define regPCIEMSIX_VECT177_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT177_MSG_DATA 0x1e2c6
+#define regPCIEMSIX_VECT177_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT177_CONTROL 0x1e2c7
+#define regPCIEMSIX_VECT177_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT178_ADDR_LO 0x1e2c8
+#define regPCIEMSIX_VECT178_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT178_ADDR_HI 0x1e2c9
+#define regPCIEMSIX_VECT178_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT178_MSG_DATA 0x1e2ca
+#define regPCIEMSIX_VECT178_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT178_CONTROL 0x1e2cb
+#define regPCIEMSIX_VECT178_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT179_ADDR_LO 0x1e2cc
+#define regPCIEMSIX_VECT179_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT179_ADDR_HI 0x1e2cd
+#define regPCIEMSIX_VECT179_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT179_MSG_DATA 0x1e2ce
+#define regPCIEMSIX_VECT179_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT179_CONTROL 0x1e2cf
+#define regPCIEMSIX_VECT179_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT180_ADDR_LO 0x1e2d0
+#define regPCIEMSIX_VECT180_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT180_ADDR_HI 0x1e2d1
+#define regPCIEMSIX_VECT180_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT180_MSG_DATA 0x1e2d2
+#define regPCIEMSIX_VECT180_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT180_CONTROL 0x1e2d3
+#define regPCIEMSIX_VECT180_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT181_ADDR_LO 0x1e2d4
+#define regPCIEMSIX_VECT181_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT181_ADDR_HI 0x1e2d5
+#define regPCIEMSIX_VECT181_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT181_MSG_DATA 0x1e2d6
+#define regPCIEMSIX_VECT181_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT181_CONTROL 0x1e2d7
+#define regPCIEMSIX_VECT181_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT182_ADDR_LO 0x1e2d8
+#define regPCIEMSIX_VECT182_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT182_ADDR_HI 0x1e2d9
+#define regPCIEMSIX_VECT182_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT182_MSG_DATA 0x1e2da
+#define regPCIEMSIX_VECT182_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT182_CONTROL 0x1e2db
+#define regPCIEMSIX_VECT182_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT183_ADDR_LO 0x1e2dc
+#define regPCIEMSIX_VECT183_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT183_ADDR_HI 0x1e2dd
+#define regPCIEMSIX_VECT183_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT183_MSG_DATA 0x1e2de
+#define regPCIEMSIX_VECT183_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT183_CONTROL 0x1e2df
+#define regPCIEMSIX_VECT183_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT184_ADDR_LO 0x1e2e0
+#define regPCIEMSIX_VECT184_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT184_ADDR_HI 0x1e2e1
+#define regPCIEMSIX_VECT184_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT184_MSG_DATA 0x1e2e2
+#define regPCIEMSIX_VECT184_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT184_CONTROL 0x1e2e3
+#define regPCIEMSIX_VECT184_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT185_ADDR_LO 0x1e2e4
+#define regPCIEMSIX_VECT185_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT185_ADDR_HI 0x1e2e5
+#define regPCIEMSIX_VECT185_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT185_MSG_DATA 0x1e2e6
+#define regPCIEMSIX_VECT185_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT185_CONTROL 0x1e2e7
+#define regPCIEMSIX_VECT185_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT186_ADDR_LO 0x1e2e8
+#define regPCIEMSIX_VECT186_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT186_ADDR_HI 0x1e2e9
+#define regPCIEMSIX_VECT186_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT186_MSG_DATA 0x1e2ea
+#define regPCIEMSIX_VECT186_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT186_CONTROL 0x1e2eb
+#define regPCIEMSIX_VECT186_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT187_ADDR_LO 0x1e2ec
+#define regPCIEMSIX_VECT187_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT187_ADDR_HI 0x1e2ed
+#define regPCIEMSIX_VECT187_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT187_MSG_DATA 0x1e2ee
+#define regPCIEMSIX_VECT187_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT187_CONTROL 0x1e2ef
+#define regPCIEMSIX_VECT187_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT188_ADDR_LO 0x1e2f0
+#define regPCIEMSIX_VECT188_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT188_ADDR_HI 0x1e2f1
+#define regPCIEMSIX_VECT188_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT188_MSG_DATA 0x1e2f2
+#define regPCIEMSIX_VECT188_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT188_CONTROL 0x1e2f3
+#define regPCIEMSIX_VECT188_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT189_ADDR_LO 0x1e2f4
+#define regPCIEMSIX_VECT189_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT189_ADDR_HI 0x1e2f5
+#define regPCIEMSIX_VECT189_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT189_MSG_DATA 0x1e2f6
+#define regPCIEMSIX_VECT189_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT189_CONTROL 0x1e2f7
+#define regPCIEMSIX_VECT189_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT190_ADDR_LO 0x1e2f8
+#define regPCIEMSIX_VECT190_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT190_ADDR_HI 0x1e2f9
+#define regPCIEMSIX_VECT190_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT190_MSG_DATA 0x1e2fa
+#define regPCIEMSIX_VECT190_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT190_CONTROL 0x1e2fb
+#define regPCIEMSIX_VECT190_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT191_ADDR_LO 0x1e2fc
+#define regPCIEMSIX_VECT191_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT191_ADDR_HI 0x1e2fd
+#define regPCIEMSIX_VECT191_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT191_MSG_DATA 0x1e2fe
+#define regPCIEMSIX_VECT191_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT191_CONTROL 0x1e2ff
+#define regPCIEMSIX_VECT191_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT192_ADDR_LO 0x1e300
+#define regPCIEMSIX_VECT192_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT192_ADDR_HI 0x1e301
+#define regPCIEMSIX_VECT192_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT192_MSG_DATA 0x1e302
+#define regPCIEMSIX_VECT192_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT192_CONTROL 0x1e303
+#define regPCIEMSIX_VECT192_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT193_ADDR_LO 0x1e304
+#define regPCIEMSIX_VECT193_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT193_ADDR_HI 0x1e305
+#define regPCIEMSIX_VECT193_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT193_MSG_DATA 0x1e306
+#define regPCIEMSIX_VECT193_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT193_CONTROL 0x1e307
+#define regPCIEMSIX_VECT193_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT194_ADDR_LO 0x1e308
+#define regPCIEMSIX_VECT194_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT194_ADDR_HI 0x1e309
+#define regPCIEMSIX_VECT194_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT194_MSG_DATA 0x1e30a
+#define regPCIEMSIX_VECT194_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT194_CONTROL 0x1e30b
+#define regPCIEMSIX_VECT194_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT195_ADDR_LO 0x1e30c
+#define regPCIEMSIX_VECT195_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT195_ADDR_HI 0x1e30d
+#define regPCIEMSIX_VECT195_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT195_MSG_DATA 0x1e30e
+#define regPCIEMSIX_VECT195_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT195_CONTROL 0x1e30f
+#define regPCIEMSIX_VECT195_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT196_ADDR_LO 0x1e310
+#define regPCIEMSIX_VECT196_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT196_ADDR_HI 0x1e311
+#define regPCIEMSIX_VECT196_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT196_MSG_DATA 0x1e312
+#define regPCIEMSIX_VECT196_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT196_CONTROL 0x1e313
+#define regPCIEMSIX_VECT196_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT197_ADDR_LO 0x1e314
+#define regPCIEMSIX_VECT197_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT197_ADDR_HI 0x1e315
+#define regPCIEMSIX_VECT197_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT197_MSG_DATA 0x1e316
+#define regPCIEMSIX_VECT197_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT197_CONTROL 0x1e317
+#define regPCIEMSIX_VECT197_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT198_ADDR_LO 0x1e318
+#define regPCIEMSIX_VECT198_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT198_ADDR_HI 0x1e319
+#define regPCIEMSIX_VECT198_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT198_MSG_DATA 0x1e31a
+#define regPCIEMSIX_VECT198_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT198_CONTROL 0x1e31b
+#define regPCIEMSIX_VECT198_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT199_ADDR_LO 0x1e31c
+#define regPCIEMSIX_VECT199_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT199_ADDR_HI 0x1e31d
+#define regPCIEMSIX_VECT199_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT199_MSG_DATA 0x1e31e
+#define regPCIEMSIX_VECT199_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT199_CONTROL 0x1e31f
+#define regPCIEMSIX_VECT199_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT200_ADDR_LO 0x1e320
+#define regPCIEMSIX_VECT200_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT200_ADDR_HI 0x1e321
+#define regPCIEMSIX_VECT200_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT200_MSG_DATA 0x1e322
+#define regPCIEMSIX_VECT200_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT200_CONTROL 0x1e323
+#define regPCIEMSIX_VECT200_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT201_ADDR_LO 0x1e324
+#define regPCIEMSIX_VECT201_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT201_ADDR_HI 0x1e325
+#define regPCIEMSIX_VECT201_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT201_MSG_DATA 0x1e326
+#define regPCIEMSIX_VECT201_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT201_CONTROL 0x1e327
+#define regPCIEMSIX_VECT201_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT202_ADDR_LO 0x1e328
+#define regPCIEMSIX_VECT202_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT202_ADDR_HI 0x1e329
+#define regPCIEMSIX_VECT202_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT202_MSG_DATA 0x1e32a
+#define regPCIEMSIX_VECT202_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT202_CONTROL 0x1e32b
+#define regPCIEMSIX_VECT202_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT203_ADDR_LO 0x1e32c
+#define regPCIEMSIX_VECT203_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT203_ADDR_HI 0x1e32d
+#define regPCIEMSIX_VECT203_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT203_MSG_DATA 0x1e32e
+#define regPCIEMSIX_VECT203_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT203_CONTROL 0x1e32f
+#define regPCIEMSIX_VECT203_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT204_ADDR_LO 0x1e330
+#define regPCIEMSIX_VECT204_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT204_ADDR_HI 0x1e331
+#define regPCIEMSIX_VECT204_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT204_MSG_DATA 0x1e332
+#define regPCIEMSIX_VECT204_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT204_CONTROL 0x1e333
+#define regPCIEMSIX_VECT204_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT205_ADDR_LO 0x1e334
+#define regPCIEMSIX_VECT205_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT205_ADDR_HI 0x1e335
+#define regPCIEMSIX_VECT205_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT205_MSG_DATA 0x1e336
+#define regPCIEMSIX_VECT205_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT205_CONTROL 0x1e337
+#define regPCIEMSIX_VECT205_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT206_ADDR_LO 0x1e338
+#define regPCIEMSIX_VECT206_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT206_ADDR_HI 0x1e339
+#define regPCIEMSIX_VECT206_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT206_MSG_DATA 0x1e33a
+#define regPCIEMSIX_VECT206_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT206_CONTROL 0x1e33b
+#define regPCIEMSIX_VECT206_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT207_ADDR_LO 0x1e33c
+#define regPCIEMSIX_VECT207_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT207_ADDR_HI 0x1e33d
+#define regPCIEMSIX_VECT207_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT207_MSG_DATA 0x1e33e
+#define regPCIEMSIX_VECT207_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT207_CONTROL 0x1e33f
+#define regPCIEMSIX_VECT207_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT208_ADDR_LO 0x1e340
+#define regPCIEMSIX_VECT208_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT208_ADDR_HI 0x1e341
+#define regPCIEMSIX_VECT208_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT208_MSG_DATA 0x1e342
+#define regPCIEMSIX_VECT208_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT208_CONTROL 0x1e343
+#define regPCIEMSIX_VECT208_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT209_ADDR_LO 0x1e344
+#define regPCIEMSIX_VECT209_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT209_ADDR_HI 0x1e345
+#define regPCIEMSIX_VECT209_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT209_MSG_DATA 0x1e346
+#define regPCIEMSIX_VECT209_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT209_CONTROL 0x1e347
+#define regPCIEMSIX_VECT209_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT210_ADDR_LO 0x1e348
+#define regPCIEMSIX_VECT210_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT210_ADDR_HI 0x1e349
+#define regPCIEMSIX_VECT210_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT210_MSG_DATA 0x1e34a
+#define regPCIEMSIX_VECT210_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT210_CONTROL 0x1e34b
+#define regPCIEMSIX_VECT210_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT211_ADDR_LO 0x1e34c
+#define regPCIEMSIX_VECT211_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT211_ADDR_HI 0x1e34d
+#define regPCIEMSIX_VECT211_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT211_MSG_DATA 0x1e34e
+#define regPCIEMSIX_VECT211_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT211_CONTROL 0x1e34f
+#define regPCIEMSIX_VECT211_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT212_ADDR_LO 0x1e350
+#define regPCIEMSIX_VECT212_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT212_ADDR_HI 0x1e351
+#define regPCIEMSIX_VECT212_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT212_MSG_DATA 0x1e352
+#define regPCIEMSIX_VECT212_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT212_CONTROL 0x1e353
+#define regPCIEMSIX_VECT212_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT213_ADDR_LO 0x1e354
+#define regPCIEMSIX_VECT213_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT213_ADDR_HI 0x1e355
+#define regPCIEMSIX_VECT213_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT213_MSG_DATA 0x1e356
+#define regPCIEMSIX_VECT213_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT213_CONTROL 0x1e357
+#define regPCIEMSIX_VECT213_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT214_ADDR_LO 0x1e358
+#define regPCIEMSIX_VECT214_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT214_ADDR_HI 0x1e359
+#define regPCIEMSIX_VECT214_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT214_MSG_DATA 0x1e35a
+#define regPCIEMSIX_VECT214_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT214_CONTROL 0x1e35b
+#define regPCIEMSIX_VECT214_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT215_ADDR_LO 0x1e35c
+#define regPCIEMSIX_VECT215_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT215_ADDR_HI 0x1e35d
+#define regPCIEMSIX_VECT215_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT215_MSG_DATA 0x1e35e
+#define regPCIEMSIX_VECT215_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT215_CONTROL 0x1e35f
+#define regPCIEMSIX_VECT215_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT216_ADDR_LO 0x1e360
+#define regPCIEMSIX_VECT216_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT216_ADDR_HI 0x1e361
+#define regPCIEMSIX_VECT216_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT216_MSG_DATA 0x1e362
+#define regPCIEMSIX_VECT216_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT216_CONTROL 0x1e363
+#define regPCIEMSIX_VECT216_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT217_ADDR_LO 0x1e364
+#define regPCIEMSIX_VECT217_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT217_ADDR_HI 0x1e365
+#define regPCIEMSIX_VECT217_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT217_MSG_DATA 0x1e366
+#define regPCIEMSIX_VECT217_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT217_CONTROL 0x1e367
+#define regPCIEMSIX_VECT217_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT218_ADDR_LO 0x1e368
+#define regPCIEMSIX_VECT218_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT218_ADDR_HI 0x1e369
+#define regPCIEMSIX_VECT218_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT218_MSG_DATA 0x1e36a
+#define regPCIEMSIX_VECT218_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT218_CONTROL 0x1e36b
+#define regPCIEMSIX_VECT218_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT219_ADDR_LO 0x1e36c
+#define regPCIEMSIX_VECT219_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT219_ADDR_HI 0x1e36d
+#define regPCIEMSIX_VECT219_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT219_MSG_DATA 0x1e36e
+#define regPCIEMSIX_VECT219_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT219_CONTROL 0x1e36f
+#define regPCIEMSIX_VECT219_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT220_ADDR_LO 0x1e370
+#define regPCIEMSIX_VECT220_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT220_ADDR_HI 0x1e371
+#define regPCIEMSIX_VECT220_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT220_MSG_DATA 0x1e372
+#define regPCIEMSIX_VECT220_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT220_CONTROL 0x1e373
+#define regPCIEMSIX_VECT220_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT221_ADDR_LO 0x1e374
+#define regPCIEMSIX_VECT221_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT221_ADDR_HI 0x1e375
+#define regPCIEMSIX_VECT221_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT221_MSG_DATA 0x1e376
+#define regPCIEMSIX_VECT221_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT221_CONTROL 0x1e377
+#define regPCIEMSIX_VECT221_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT222_ADDR_LO 0x1e378
+#define regPCIEMSIX_VECT222_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT222_ADDR_HI 0x1e379
+#define regPCIEMSIX_VECT222_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT222_MSG_DATA 0x1e37a
+#define regPCIEMSIX_VECT222_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT222_CONTROL 0x1e37b
+#define regPCIEMSIX_VECT222_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT223_ADDR_LO 0x1e37c
+#define regPCIEMSIX_VECT223_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT223_ADDR_HI 0x1e37d
+#define regPCIEMSIX_VECT223_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT223_MSG_DATA 0x1e37e
+#define regPCIEMSIX_VECT223_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT223_CONTROL 0x1e37f
+#define regPCIEMSIX_VECT223_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT224_ADDR_LO 0x1e380
+#define regPCIEMSIX_VECT224_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT224_ADDR_HI 0x1e381
+#define regPCIEMSIX_VECT224_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT224_MSG_DATA 0x1e382
+#define regPCIEMSIX_VECT224_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT224_CONTROL 0x1e383
+#define regPCIEMSIX_VECT224_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT225_ADDR_LO 0x1e384
+#define regPCIEMSIX_VECT225_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT225_ADDR_HI 0x1e385
+#define regPCIEMSIX_VECT225_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT225_MSG_DATA 0x1e386
+#define regPCIEMSIX_VECT225_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT225_CONTROL 0x1e387
+#define regPCIEMSIX_VECT225_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT226_ADDR_LO 0x1e388
+#define regPCIEMSIX_VECT226_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT226_ADDR_HI 0x1e389
+#define regPCIEMSIX_VECT226_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT226_MSG_DATA 0x1e38a
+#define regPCIEMSIX_VECT226_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT226_CONTROL 0x1e38b
+#define regPCIEMSIX_VECT226_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT227_ADDR_LO 0x1e38c
+#define regPCIEMSIX_VECT227_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT227_ADDR_HI 0x1e38d
+#define regPCIEMSIX_VECT227_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT227_MSG_DATA 0x1e38e
+#define regPCIEMSIX_VECT227_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT227_CONTROL 0x1e38f
+#define regPCIEMSIX_VECT227_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT228_ADDR_LO 0x1e390
+#define regPCIEMSIX_VECT228_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT228_ADDR_HI 0x1e391
+#define regPCIEMSIX_VECT228_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT228_MSG_DATA 0x1e392
+#define regPCIEMSIX_VECT228_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT228_CONTROL 0x1e393
+#define regPCIEMSIX_VECT228_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT229_ADDR_LO 0x1e394
+#define regPCIEMSIX_VECT229_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT229_ADDR_HI 0x1e395
+#define regPCIEMSIX_VECT229_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT229_MSG_DATA 0x1e396
+#define regPCIEMSIX_VECT229_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT229_CONTROL 0x1e397
+#define regPCIEMSIX_VECT229_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT230_ADDR_LO 0x1e398
+#define regPCIEMSIX_VECT230_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT230_ADDR_HI 0x1e399
+#define regPCIEMSIX_VECT230_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT230_MSG_DATA 0x1e39a
+#define regPCIEMSIX_VECT230_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT230_CONTROL 0x1e39b
+#define regPCIEMSIX_VECT230_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT231_ADDR_LO 0x1e39c
+#define regPCIEMSIX_VECT231_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT231_ADDR_HI 0x1e39d
+#define regPCIEMSIX_VECT231_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT231_MSG_DATA 0x1e39e
+#define regPCIEMSIX_VECT231_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT231_CONTROL 0x1e39f
+#define regPCIEMSIX_VECT231_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT232_ADDR_LO 0x1e3a0
+#define regPCIEMSIX_VECT232_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT232_ADDR_HI 0x1e3a1
+#define regPCIEMSIX_VECT232_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT232_MSG_DATA 0x1e3a2
+#define regPCIEMSIX_VECT232_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT232_CONTROL 0x1e3a3
+#define regPCIEMSIX_VECT232_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT233_ADDR_LO 0x1e3a4
+#define regPCIEMSIX_VECT233_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT233_ADDR_HI 0x1e3a5
+#define regPCIEMSIX_VECT233_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT233_MSG_DATA 0x1e3a6
+#define regPCIEMSIX_VECT233_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT233_CONTROL 0x1e3a7
+#define regPCIEMSIX_VECT233_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT234_ADDR_LO 0x1e3a8
+#define regPCIEMSIX_VECT234_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT234_ADDR_HI 0x1e3a9
+#define regPCIEMSIX_VECT234_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT234_MSG_DATA 0x1e3aa
+#define regPCIEMSIX_VECT234_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT234_CONTROL 0x1e3ab
+#define regPCIEMSIX_VECT234_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT235_ADDR_LO 0x1e3ac
+#define regPCIEMSIX_VECT235_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT235_ADDR_HI 0x1e3ad
+#define regPCIEMSIX_VECT235_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT235_MSG_DATA 0x1e3ae
+#define regPCIEMSIX_VECT235_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT235_CONTROL 0x1e3af
+#define regPCIEMSIX_VECT235_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT236_ADDR_LO 0x1e3b0
+#define regPCIEMSIX_VECT236_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT236_ADDR_HI 0x1e3b1
+#define regPCIEMSIX_VECT236_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT236_MSG_DATA 0x1e3b2
+#define regPCIEMSIX_VECT236_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT236_CONTROL 0x1e3b3
+#define regPCIEMSIX_VECT236_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT237_ADDR_LO 0x1e3b4
+#define regPCIEMSIX_VECT237_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT237_ADDR_HI 0x1e3b5
+#define regPCIEMSIX_VECT237_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT237_MSG_DATA 0x1e3b6
+#define regPCIEMSIX_VECT237_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT237_CONTROL 0x1e3b7
+#define regPCIEMSIX_VECT237_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT238_ADDR_LO 0x1e3b8
+#define regPCIEMSIX_VECT238_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT238_ADDR_HI 0x1e3b9
+#define regPCIEMSIX_VECT238_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT238_MSG_DATA 0x1e3ba
+#define regPCIEMSIX_VECT238_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT238_CONTROL 0x1e3bb
+#define regPCIEMSIX_VECT238_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT239_ADDR_LO 0x1e3bc
+#define regPCIEMSIX_VECT239_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT239_ADDR_HI 0x1e3bd
+#define regPCIEMSIX_VECT239_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT239_MSG_DATA 0x1e3be
+#define regPCIEMSIX_VECT239_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT239_CONTROL 0x1e3bf
+#define regPCIEMSIX_VECT239_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT240_ADDR_LO 0x1e3c0
+#define regPCIEMSIX_VECT240_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT240_ADDR_HI 0x1e3c1
+#define regPCIEMSIX_VECT240_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT240_MSG_DATA 0x1e3c2
+#define regPCIEMSIX_VECT240_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT240_CONTROL 0x1e3c3
+#define regPCIEMSIX_VECT240_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT241_ADDR_LO 0x1e3c4
+#define regPCIEMSIX_VECT241_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT241_ADDR_HI 0x1e3c5
+#define regPCIEMSIX_VECT241_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT241_MSG_DATA 0x1e3c6
+#define regPCIEMSIX_VECT241_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT241_CONTROL 0x1e3c7
+#define regPCIEMSIX_VECT241_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT242_ADDR_LO 0x1e3c8
+#define regPCIEMSIX_VECT242_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT242_ADDR_HI 0x1e3c9
+#define regPCIEMSIX_VECT242_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT242_MSG_DATA 0x1e3ca
+#define regPCIEMSIX_VECT242_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT242_CONTROL 0x1e3cb
+#define regPCIEMSIX_VECT242_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT243_ADDR_LO 0x1e3cc
+#define regPCIEMSIX_VECT243_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT243_ADDR_HI 0x1e3cd
+#define regPCIEMSIX_VECT243_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT243_MSG_DATA 0x1e3ce
+#define regPCIEMSIX_VECT243_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT243_CONTROL 0x1e3cf
+#define regPCIEMSIX_VECT243_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT244_ADDR_LO 0x1e3d0
+#define regPCIEMSIX_VECT244_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT244_ADDR_HI 0x1e3d1
+#define regPCIEMSIX_VECT244_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT244_MSG_DATA 0x1e3d2
+#define regPCIEMSIX_VECT244_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT244_CONTROL 0x1e3d3
+#define regPCIEMSIX_VECT244_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT245_ADDR_LO 0x1e3d4
+#define regPCIEMSIX_VECT245_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT245_ADDR_HI 0x1e3d5
+#define regPCIEMSIX_VECT245_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT245_MSG_DATA 0x1e3d6
+#define regPCIEMSIX_VECT245_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT245_CONTROL 0x1e3d7
+#define regPCIEMSIX_VECT245_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT246_ADDR_LO 0x1e3d8
+#define regPCIEMSIX_VECT246_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT246_ADDR_HI 0x1e3d9
+#define regPCIEMSIX_VECT246_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT246_MSG_DATA 0x1e3da
+#define regPCIEMSIX_VECT246_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT246_CONTROL 0x1e3db
+#define regPCIEMSIX_VECT246_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT247_ADDR_LO 0x1e3dc
+#define regPCIEMSIX_VECT247_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT247_ADDR_HI 0x1e3dd
+#define regPCIEMSIX_VECT247_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT247_MSG_DATA 0x1e3de
+#define regPCIEMSIX_VECT247_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT247_CONTROL 0x1e3df
+#define regPCIEMSIX_VECT247_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT248_ADDR_LO 0x1e3e0
+#define regPCIEMSIX_VECT248_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT248_ADDR_HI 0x1e3e1
+#define regPCIEMSIX_VECT248_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT248_MSG_DATA 0x1e3e2
+#define regPCIEMSIX_VECT248_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT248_CONTROL 0x1e3e3
+#define regPCIEMSIX_VECT248_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT249_ADDR_LO 0x1e3e4
+#define regPCIEMSIX_VECT249_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT249_ADDR_HI 0x1e3e5
+#define regPCIEMSIX_VECT249_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT249_MSG_DATA 0x1e3e6
+#define regPCIEMSIX_VECT249_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT249_CONTROL 0x1e3e7
+#define regPCIEMSIX_VECT249_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT250_ADDR_LO 0x1e3e8
+#define regPCIEMSIX_VECT250_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT250_ADDR_HI 0x1e3e9
+#define regPCIEMSIX_VECT250_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT250_MSG_DATA 0x1e3ea
+#define regPCIEMSIX_VECT250_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT250_CONTROL 0x1e3eb
+#define regPCIEMSIX_VECT250_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT251_ADDR_LO 0x1e3ec
+#define regPCIEMSIX_VECT251_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT251_ADDR_HI 0x1e3ed
+#define regPCIEMSIX_VECT251_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT251_MSG_DATA 0x1e3ee
+#define regPCIEMSIX_VECT251_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT251_CONTROL 0x1e3ef
+#define regPCIEMSIX_VECT251_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT252_ADDR_LO 0x1e3f0
+#define regPCIEMSIX_VECT252_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT252_ADDR_HI 0x1e3f1
+#define regPCIEMSIX_VECT252_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT252_MSG_DATA 0x1e3f2
+#define regPCIEMSIX_VECT252_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT252_CONTROL 0x1e3f3
+#define regPCIEMSIX_VECT252_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT253_ADDR_LO 0x1e3f4
+#define regPCIEMSIX_VECT253_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT253_ADDR_HI 0x1e3f5
+#define regPCIEMSIX_VECT253_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT253_MSG_DATA 0x1e3f6
+#define regPCIEMSIX_VECT253_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT253_CONTROL 0x1e3f7
+#define regPCIEMSIX_VECT253_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT254_ADDR_LO 0x1e3f8
+#define regPCIEMSIX_VECT254_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT254_ADDR_HI 0x1e3f9
+#define regPCIEMSIX_VECT254_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT254_MSG_DATA 0x1e3fa
+#define regPCIEMSIX_VECT254_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT254_CONTROL 0x1e3fb
+#define regPCIEMSIX_VECT254_CONTROL_BASE_IDX 5
+#define regPCIEMSIX_VECT255_ADDR_LO 0x1e3fc
+#define regPCIEMSIX_VECT255_ADDR_LO_BASE_IDX 5
+#define regPCIEMSIX_VECT255_ADDR_HI 0x1e3fd
+#define regPCIEMSIX_VECT255_ADDR_HI_BASE_IDX 5
+#define regPCIEMSIX_VECT255_MSG_DATA 0x1e3fe
+#define regPCIEMSIX_VECT255_MSG_DATA_BASE_IDX 5
+#define regPCIEMSIX_VECT255_CONTROL 0x1e3ff
+#define regPCIEMSIX_VECT255_CONTROL_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_pfc_usb_RCCPFCDEC
+// base address: 0x10134400
+#define regRCC_PFC_USB_RCC_PFC_LTR_CNTL 0xd140
+#define regRCC_PFC_USB_RCC_PFC_LTR_CNTL_BASE_IDX 5
+#define regRCC_PFC_USB_RCC_PFC_PME_RESTORE 0xd141
+#define regRCC_PFC_USB_RCC_PFC_PME_RESTORE_BASE_IDX 5
+#define regRCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0 0xd142
+#define regRCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0_BASE_IDX 5
+#define regRCC_PFC_USB_RCC_PFC_STICKY_RESTORE_1 0xd143
+#define regRCC_PFC_USB_RCC_PFC_STICKY_RESTORE_1_BASE_IDX 5
+#define regRCC_PFC_USB_RCC_PFC_STICKY_RESTORE_2 0xd144
+#define regRCC_PFC_USB_RCC_PFC_STICKY_RESTORE_2_BASE_IDX 5
+#define regRCC_PFC_USB_RCC_PFC_STICKY_RESTORE_3 0xd145
+#define regRCC_PFC_USB_RCC_PFC_STICKY_RESTORE_3_BASE_IDX 5
+#define regRCC_PFC_USB_RCC_PFC_STICKY_RESTORE_4 0xd146
+#define regRCC_PFC_USB_RCC_PFC_STICKY_RESTORE_4_BASE_IDX 5
+#define regRCC_PFC_USB_RCC_PFC_STICKY_RESTORE_5 0xd147
+#define regRCC_PFC_USB_RCC_PFC_STICKY_RESTORE_5_BASE_IDX 5
+#define regRCC_PFC_USB_RCC_PFC_AUXPWR_CNTL 0xd148
+#define regRCC_PFC_USB_RCC_PFC_AUXPWR_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_pfc_pd_controller_RCCPFCDEC
+// base address: 0x10134600
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL 0xd1c0
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL_BASE_IDX 5
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_PME_RESTORE 0xd1c1
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_PME_RESTORE_BASE_IDX 5
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0 0xd1c2
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0_BASE_IDX 5
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_1 0xd1c3
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_1_BASE_IDX 5
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_2 0xd1c4
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_2_BASE_IDX 5
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_3 0xd1c5
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_3_BASE_IDX 5
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_4 0xd1c6
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_4_BASE_IDX 5
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_5 0xd1c7
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_5_BASE_IDX 5
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_AUXPWR_CNTL 0xd1c8
+#define regRCC_PFC_PD_CONTROLLER_RCC_PFC_AUXPWR_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_pciemsix_0_usb_MSIXPDEC
+// base address: 0x10179000
+#define regPCIEMSIX_PBA_0 0x1e400
+#define regPCIEMSIX_PBA_0_BASE_IDX 5
+#define regPCIEMSIX_PBA_1 0x1e401
+#define regPCIEMSIX_PBA_1_BASE_IDX 5
+#define regPCIEMSIX_PBA_2 0x1e402
+#define regPCIEMSIX_PBA_2_BASE_IDX 5
+#define regPCIEMSIX_PBA_3 0x1e403
+#define regPCIEMSIX_PBA_3_BASE_IDX 5
+#define regPCIEMSIX_PBA_4 0x1e404
+#define regPCIEMSIX_PBA_4_BASE_IDX 5
+#define regPCIEMSIX_PBA_5 0x1e405
+#define regPCIEMSIX_PBA_5_BASE_IDX 5
+#define regPCIEMSIX_PBA_6 0x1e406
+#define regPCIEMSIX_PBA_6_BASE_IDX 5
+#define regPCIEMSIX_PBA_7 0x1e407
+#define regPCIEMSIX_PBA_7_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_shadow_reg_shadowdec
+// base address: 0x10130000
+#define regSHADOW_COMMAND 0xc001
+#define regSHADOW_COMMAND_BASE_IDX 5
+#define regSHADOW_BASE_ADDR_1 0xc004
+#define regSHADOW_BASE_ADDR_1_BASE_IDX 5
+#define regSHADOW_BASE_ADDR_2 0xc005
+#define regSHADOW_BASE_ADDR_2_BASE_IDX 5
+#define regSHADOW_IRQ_BRIDGE_CNTL 0xc00f
+#define regSHADOW_IRQ_BRIDGE_CNTL_BASE_IDX 5
+#define regSUC_INDEX 0xc038
+#define regSUC_INDEX_BASE_IDX 5
+#define regSUC_DATA 0xc039
+#define regSUC_DATA_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_swus_SUMDEC
+// base address: 0x1013b000
+#define regSUM_INDEX 0xec38
+#define regSUM_INDEX_BASE_IDX 5
+#define regSUM_DATA 0xec39
+#define regSUM_DATA_BASE_IDX 5
+#define regSUM_INDEX_HI 0xec3b
+#define regSUM_INDEX_HI_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_strap_rcc_strap_internal
+// base address: 0x10100000
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP0 0xc400
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP0_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP1 0xc401
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP1_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP2 0xc402
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP2_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP3 0xc403
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP3_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP4 0xc404
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP4_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP5 0xc405
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP5_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP6 0xc406
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP6_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP7 0xc407
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP7_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP8 0xc408
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP8_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP9 0xc409
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP9_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP10 0xc40a
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP10_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP11 0xc40b
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP11_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP12 0xc40c
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP12_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP13 0xc40d
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP13_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP14 0xc40e
+#define regRCC_STRAP1_RCC_DEV0_PORT_STRAP14_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP0 0xc480
+#define regRCC_DEV1_PORT_STRAP0_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP1 0xc481
+#define regRCC_DEV1_PORT_STRAP1_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP2 0xc482
+#define regRCC_DEV1_PORT_STRAP2_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP3 0xc483
+#define regRCC_DEV1_PORT_STRAP3_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP4 0xc484
+#define regRCC_DEV1_PORT_STRAP4_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP5 0xc485
+#define regRCC_DEV1_PORT_STRAP5_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP6 0xc486
+#define regRCC_DEV1_PORT_STRAP6_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP7 0xc487
+#define regRCC_DEV1_PORT_STRAP7_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP8 0xc488
+#define regRCC_DEV1_PORT_STRAP8_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP9 0xc489
+#define regRCC_DEV1_PORT_STRAP9_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP10 0xc48a
+#define regRCC_DEV1_PORT_STRAP10_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP11 0xc48b
+#define regRCC_DEV1_PORT_STRAP11_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP12 0xc48c
+#define regRCC_DEV1_PORT_STRAP12_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP13 0xc48d
+#define regRCC_DEV1_PORT_STRAP13_BASE_IDX 5
+#define regRCC_DEV1_PORT_STRAP14 0xc48e
+#define regRCC_DEV1_PORT_STRAP14_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP0 0xc500
+#define regRCC_DEV2_PORT_STRAP0_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP1 0xc501
+#define regRCC_DEV2_PORT_STRAP1_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP2 0xc502
+#define regRCC_DEV2_PORT_STRAP2_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP3 0xc503
+#define regRCC_DEV2_PORT_STRAP3_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP4 0xc504
+#define regRCC_DEV2_PORT_STRAP4_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP5 0xc505
+#define regRCC_DEV2_PORT_STRAP5_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP6 0xc506
+#define regRCC_DEV2_PORT_STRAP6_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP7 0xc507
+#define regRCC_DEV2_PORT_STRAP7_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP8 0xc508
+#define regRCC_DEV2_PORT_STRAP8_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP9 0xc509
+#define regRCC_DEV2_PORT_STRAP9_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP10 0xc50a
+#define regRCC_DEV2_PORT_STRAP10_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP11 0xc50b
+#define regRCC_DEV2_PORT_STRAP11_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP12 0xc50c
+#define regRCC_DEV2_PORT_STRAP12_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP13 0xc50d
+#define regRCC_DEV2_PORT_STRAP13_BASE_IDX 5
+#define regRCC_DEV2_PORT_STRAP14 0xc50e
+#define regRCC_DEV2_PORT_STRAP14_BASE_IDX 5
+#define regRCC_STRAP1_RCC_BIF_STRAP0 0xc600
+#define regRCC_STRAP1_RCC_BIF_STRAP0_BASE_IDX 5
+#define regRCC_STRAP1_RCC_BIF_STRAP1 0xc601
+#define regRCC_STRAP1_RCC_BIF_STRAP1_BASE_IDX 5
+#define regRCC_STRAP1_RCC_BIF_STRAP2 0xc602
+#define regRCC_STRAP1_RCC_BIF_STRAP2_BASE_IDX 5
+#define regRCC_STRAP1_RCC_BIF_STRAP3 0xc603
+#define regRCC_STRAP1_RCC_BIF_STRAP3_BASE_IDX 5
+#define regRCC_STRAP1_RCC_BIF_STRAP4 0xc604
+#define regRCC_STRAP1_RCC_BIF_STRAP4_BASE_IDX 5
+#define regRCC_STRAP1_RCC_BIF_STRAP5 0xc605
+#define regRCC_STRAP1_RCC_BIF_STRAP5_BASE_IDX 5
+#define regRCC_STRAP1_RCC_BIF_STRAP6 0xc606
+#define regRCC_STRAP1_RCC_BIF_STRAP6_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP0 0xd000
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP0_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP1 0xd001
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP1_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP2 0xd002
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP2_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP3 0xd003
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP3_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP4 0xd004
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP4_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP5 0xd005
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP5_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP8 0xd008
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP8_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP9 0xd009
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP9_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP13 0xd00d
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP13_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP14 0xd00e
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP14_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP15 0xd00f
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP15_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP16 0xd010
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP16_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP17 0xd011
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP17_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP18 0xd012
+#define regRCC_STRAP1_RCC_DEV0_EPF0_STRAP18_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP0 0xd080
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP0_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP2 0xd082
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP2_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP3 0xd083
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP3_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP4 0xd084
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP4_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP5 0xd085
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP5_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP6 0xd086
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP6_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP7 0xd087
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP7_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP20 0xd094
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP20_BASE_IDX 5
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP21 0xd095
+#define regRCC_STRAP1_RCC_DEV0_EPF1_STRAP21_BASE_IDX 5
+#define regRCC_DEV0_EPF2_STRAP0 0xd100
+#define regRCC_DEV0_EPF2_STRAP0_BASE_IDX 5
+#define regRCC_DEV0_EPF2_STRAP2 0xd102
+#define regRCC_DEV0_EPF2_STRAP2_BASE_IDX 5
+#define regRCC_DEV0_EPF2_STRAP3 0xd103
+#define regRCC_DEV0_EPF2_STRAP3_BASE_IDX 5
+#define regRCC_DEV0_EPF2_STRAP4 0xd104
+#define regRCC_DEV0_EPF2_STRAP4_BASE_IDX 5
+#define regRCC_DEV0_EPF2_STRAP5 0xd105
+#define regRCC_DEV0_EPF2_STRAP5_BASE_IDX 5
+#define regRCC_DEV0_EPF2_STRAP6 0xd106
+#define regRCC_DEV0_EPF2_STRAP6_BASE_IDX 5
+#define regRCC_DEV0_EPF2_STRAP7 0xd107
+#define regRCC_DEV0_EPF2_STRAP7_BASE_IDX 5
+#define regRCC_DEV0_EPF2_STRAP10 0xd10a
+#define regRCC_DEV0_EPF2_STRAP10_BASE_IDX 5
+#define regRCC_DEV0_EPF2_STRAP11 0xd10b
+#define regRCC_DEV0_EPF2_STRAP11_BASE_IDX 5
+#define regRCC_DEV0_EPF2_STRAP12 0xd10c
+#define regRCC_DEV0_EPF2_STRAP12_BASE_IDX 5
+#define regRCC_DEV0_EPF2_STRAP13 0xd10d
+#define regRCC_DEV0_EPF2_STRAP13_BASE_IDX 5
+#define regRCC_DEV0_EPF2_STRAP14 0xd10e
+#define regRCC_DEV0_EPF2_STRAP14_BASE_IDX 5
+#define regRCC_DEV0_EPF2_STRAP20 0xd114
+#define regRCC_DEV0_EPF2_STRAP20_BASE_IDX 5
+#define regRCC_DEV0_EPF3_STRAP0 0xd180
+#define regRCC_DEV0_EPF3_STRAP0_BASE_IDX 5
+#define regRCC_DEV0_EPF3_STRAP2 0xd182
+#define regRCC_DEV0_EPF3_STRAP2_BASE_IDX 5
+#define regRCC_DEV0_EPF3_STRAP3 0xd183
+#define regRCC_DEV0_EPF3_STRAP3_BASE_IDX 5
+#define regRCC_DEV0_EPF3_STRAP4 0xd184
+#define regRCC_DEV0_EPF3_STRAP4_BASE_IDX 5
+#define regRCC_DEV0_EPF3_STRAP5 0xd185
+#define regRCC_DEV0_EPF3_STRAP5_BASE_IDX 5
+#define regRCC_DEV0_EPF3_STRAP6 0xd186
+#define regRCC_DEV0_EPF3_STRAP6_BASE_IDX 5
+#define regRCC_DEV0_EPF3_STRAP7 0xd187
+#define regRCC_DEV0_EPF3_STRAP7_BASE_IDX 5
+#define regRCC_DEV0_EPF3_STRAP10 0xd18a
+#define regRCC_DEV0_EPF3_STRAP10_BASE_IDX 5
+#define regRCC_DEV0_EPF3_STRAP11 0xd18b
+#define regRCC_DEV0_EPF3_STRAP11_BASE_IDX 5
+#define regRCC_DEV0_EPF3_STRAP12 0xd18c
+#define regRCC_DEV0_EPF3_STRAP12_BASE_IDX 5
+#define regRCC_DEV0_EPF3_STRAP13 0xd18d
+#define regRCC_DEV0_EPF3_STRAP13_BASE_IDX 5
+#define regRCC_DEV0_EPF3_STRAP14 0xd18e
+#define regRCC_DEV0_EPF3_STRAP14_BASE_IDX 5
+#define regRCC_DEV0_EPF3_STRAP20 0xd194
+#define regRCC_DEV0_EPF3_STRAP20_BASE_IDX 5
+#define regRCC_DEV0_EPF4_STRAP0 0xd200
+#define regRCC_DEV0_EPF4_STRAP0_BASE_IDX 5
+#define regRCC_DEV0_EPF4_STRAP2 0xd202
+#define regRCC_DEV0_EPF4_STRAP2_BASE_IDX 5
+#define regRCC_DEV0_EPF4_STRAP3 0xd203
+#define regRCC_DEV0_EPF4_STRAP3_BASE_IDX 5
+#define regRCC_DEV0_EPF4_STRAP4 0xd204
+#define regRCC_DEV0_EPF4_STRAP4_BASE_IDX 5
+#define regRCC_DEV0_EPF4_STRAP5 0xd205
+#define regRCC_DEV0_EPF4_STRAP5_BASE_IDX 5
+#define regRCC_DEV0_EPF4_STRAP6 0xd206
+#define regRCC_DEV0_EPF4_STRAP6_BASE_IDX 5
+#define regRCC_DEV0_EPF4_STRAP7 0xd207
+#define regRCC_DEV0_EPF4_STRAP7_BASE_IDX 5
+#define regRCC_DEV0_EPF4_STRAP13 0xd20d
+#define regRCC_DEV0_EPF4_STRAP13_BASE_IDX 5
+#define regRCC_DEV0_EPF4_STRAP14 0xd20e
+#define regRCC_DEV0_EPF4_STRAP14_BASE_IDX 5
+#define regRCC_DEV0_EPF5_STRAP0 0xd280
+#define regRCC_DEV0_EPF5_STRAP0_BASE_IDX 5
+#define regRCC_DEV0_EPF5_STRAP2 0xd282
+#define regRCC_DEV0_EPF5_STRAP2_BASE_IDX 5
+#define regRCC_DEV0_EPF5_STRAP3 0xd283
+#define regRCC_DEV0_EPF5_STRAP3_BASE_IDX 5
+#define regRCC_DEV0_EPF5_STRAP4 0xd284
+#define regRCC_DEV0_EPF5_STRAP4_BASE_IDX 5
+#define regRCC_DEV0_EPF5_STRAP5 0xd285
+#define regRCC_DEV0_EPF5_STRAP5_BASE_IDX 5
+#define regRCC_DEV0_EPF5_STRAP6 0xd286
+#define regRCC_DEV0_EPF5_STRAP6_BASE_IDX 5
+#define regRCC_DEV0_EPF5_STRAP7 0xd287
+#define regRCC_DEV0_EPF5_STRAP7_BASE_IDX 5
+#define regRCC_DEV0_EPF5_STRAP13 0xd28d
+#define regRCC_DEV0_EPF5_STRAP13_BASE_IDX 5
+#define regRCC_DEV0_EPF5_STRAP14 0xd28e
+#define regRCC_DEV0_EPF5_STRAP14_BASE_IDX 5
+#define regRCC_DEV0_EPF6_STRAP0 0xd300
+#define regRCC_DEV0_EPF6_STRAP0_BASE_IDX 5
+#define regRCC_DEV0_EPF6_STRAP2 0xd302
+#define regRCC_DEV0_EPF6_STRAP2_BASE_IDX 5
+#define regRCC_DEV0_EPF6_STRAP3 0xd303
+#define regRCC_DEV0_EPF6_STRAP3_BASE_IDX 5
+#define regRCC_DEV0_EPF6_STRAP4 0xd304
+#define regRCC_DEV0_EPF6_STRAP4_BASE_IDX 5
+#define regRCC_DEV0_EPF6_STRAP5 0xd305
+#define regRCC_DEV0_EPF6_STRAP5_BASE_IDX 5
+#define regRCC_DEV0_EPF6_STRAP6 0xd306
+#define regRCC_DEV0_EPF6_STRAP6_BASE_IDX 5
+#define regRCC_DEV0_EPF6_STRAP13 0xd30d
+#define regRCC_DEV0_EPF6_STRAP13_BASE_IDX 5
+#define regRCC_DEV0_EPF6_STRAP14 0xd30e
+#define regRCC_DEV0_EPF6_STRAP14_BASE_IDX 5
+#define regRCC_DEV0_EPF7_STRAP0 0xd380
+#define regRCC_DEV0_EPF7_STRAP0_BASE_IDX 5
+#define regRCC_DEV0_EPF7_STRAP2 0xd382
+#define regRCC_DEV0_EPF7_STRAP2_BASE_IDX 5
+#define regRCC_DEV0_EPF7_STRAP3 0xd383
+#define regRCC_DEV0_EPF7_STRAP3_BASE_IDX 5
+#define regRCC_DEV0_EPF7_STRAP4 0xd384
+#define regRCC_DEV0_EPF7_STRAP4_BASE_IDX 5
+#define regRCC_DEV0_EPF7_STRAP5 0xd385
+#define regRCC_DEV0_EPF7_STRAP5_BASE_IDX 5
+#define regRCC_DEV0_EPF7_STRAP6 0xd386
+#define regRCC_DEV0_EPF7_STRAP6_BASE_IDX 5
+#define regRCC_DEV0_EPF7_STRAP13 0xd38d
+#define regRCC_DEV0_EPF7_STRAP13_BASE_IDX 5
+#define regRCC_DEV0_EPF7_STRAP14 0xd38e
+#define regRCC_DEV0_EPF7_STRAP14_BASE_IDX 5
+#define regRCC_DEV1_EPF0_STRAP0 0xd400
+#define regRCC_DEV1_EPF0_STRAP0_BASE_IDX 5
+#define regRCC_DEV1_EPF0_STRAP2 0xd402
+#define regRCC_DEV1_EPF0_STRAP2_BASE_IDX 5
+#define regRCC_DEV1_EPF0_STRAP3 0xd403
+#define regRCC_DEV1_EPF0_STRAP3_BASE_IDX 5
+#define regRCC_DEV1_EPF0_STRAP4 0xd404
+#define regRCC_DEV1_EPF0_STRAP4_BASE_IDX 5
+#define regRCC_DEV1_EPF0_STRAP5 0xd405
+#define regRCC_DEV1_EPF0_STRAP5_BASE_IDX 5
+#define regRCC_DEV1_EPF0_STRAP6 0xd406
+#define regRCC_DEV1_EPF0_STRAP6_BASE_IDX 5
+#define regRCC_DEV1_EPF0_STRAP13 0xd40d
+#define regRCC_DEV1_EPF0_STRAP13_BASE_IDX 5
+#define regRCC_DEV1_EPF0_STRAP14 0xd40e
+#define regRCC_DEV1_EPF0_STRAP14_BASE_IDX 5
+#define regRCC_DEV1_EPF1_STRAP0 0xd480
+#define regRCC_DEV1_EPF1_STRAP0_BASE_IDX 5
+#define regRCC_DEV1_EPF1_STRAP2 0xd482
+#define regRCC_DEV1_EPF1_STRAP2_BASE_IDX 5
+#define regRCC_DEV1_EPF1_STRAP3 0xd483
+#define regRCC_DEV1_EPF1_STRAP3_BASE_IDX 5
+#define regRCC_DEV1_EPF1_STRAP4 0xd484
+#define regRCC_DEV1_EPF1_STRAP4_BASE_IDX 5
+#define regRCC_DEV1_EPF1_STRAP5 0xd485
+#define regRCC_DEV1_EPF1_STRAP5_BASE_IDX 5
+#define regRCC_DEV1_EPF1_STRAP6 0xd486
+#define regRCC_DEV1_EPF1_STRAP6_BASE_IDX 5
+#define regRCC_DEV1_EPF1_STRAP13 0xd48d
+#define regRCC_DEV1_EPF1_STRAP13_BASE_IDX 5
+#define regRCC_DEV1_EPF1_STRAP14 0xd48e
+#define regRCC_DEV1_EPF1_STRAP14_BASE_IDX 5
+#define regRCC_DEV1_EPF2_STRAP0 0xd500
+#define regRCC_DEV1_EPF2_STRAP0_BASE_IDX 5
+#define regRCC_DEV1_EPF2_STRAP2 0xd502
+#define regRCC_DEV1_EPF2_STRAP2_BASE_IDX 5
+#define regRCC_DEV1_EPF2_STRAP3 0xd503
+#define regRCC_DEV1_EPF2_STRAP3_BASE_IDX 5
+#define regRCC_DEV1_EPF2_STRAP4 0xd504
+#define regRCC_DEV1_EPF2_STRAP4_BASE_IDX 5
+#define regRCC_DEV1_EPF2_STRAP5 0xd505
+#define regRCC_DEV1_EPF2_STRAP5_BASE_IDX 5
+#define regRCC_DEV1_EPF2_STRAP13 0xd50d
+#define regRCC_DEV1_EPF2_STRAP13_BASE_IDX 5
+#define regRCC_DEV1_EPF2_STRAP14 0xd50e
+#define regRCC_DEV1_EPF2_STRAP14_BASE_IDX 5
+#define regRCC_DEV1_EPF3_STRAP0 0xd580
+#define regRCC_DEV1_EPF3_STRAP0_BASE_IDX 5
+#define regRCC_DEV1_EPF3_STRAP2 0xd582
+#define regRCC_DEV1_EPF3_STRAP2_BASE_IDX 5
+#define regRCC_DEV1_EPF3_STRAP3 0xd583
+#define regRCC_DEV1_EPF3_STRAP3_BASE_IDX 5
+#define regRCC_DEV1_EPF3_STRAP4 0xd584
+#define regRCC_DEV1_EPF3_STRAP4_BASE_IDX 5
+#define regRCC_DEV1_EPF3_STRAP5 0xd585
+#define regRCC_DEV1_EPF3_STRAP5_BASE_IDX 5
+#define regRCC_DEV1_EPF3_STRAP13 0xd58d
+#define regRCC_DEV1_EPF3_STRAP13_BASE_IDX 5
+#define regRCC_DEV1_EPF3_STRAP14 0xd58e
+#define regRCC_DEV1_EPF3_STRAP14_BASE_IDX 5
+#define regRCC_DEV1_EPF4_STRAP0 0xd600
+#define regRCC_DEV1_EPF4_STRAP0_BASE_IDX 5
+#define regRCC_DEV1_EPF4_STRAP2 0xd602
+#define regRCC_DEV1_EPF4_STRAP2_BASE_IDX 5
+#define regRCC_DEV1_EPF4_STRAP3 0xd603
+#define regRCC_DEV1_EPF4_STRAP3_BASE_IDX 5
+#define regRCC_DEV1_EPF4_STRAP4 0xd604
+#define regRCC_DEV1_EPF4_STRAP4_BASE_IDX 5
+#define regRCC_DEV1_EPF4_STRAP5 0xd605
+#define regRCC_DEV1_EPF4_STRAP5_BASE_IDX 5
+#define regRCC_DEV1_EPF4_STRAP13 0xd60d
+#define regRCC_DEV1_EPF4_STRAP13_BASE_IDX 5
+#define regRCC_DEV1_EPF4_STRAP14 0xd60e
+#define regRCC_DEV1_EPF4_STRAP14_BASE_IDX 5
+#define regRCC_DEV1_EPF5_STRAP0 0xd680
+#define regRCC_DEV1_EPF5_STRAP0_BASE_IDX 5
+#define regRCC_DEV1_EPF5_STRAP2 0xd682
+#define regRCC_DEV1_EPF5_STRAP2_BASE_IDX 5
+#define regRCC_DEV1_EPF5_STRAP3 0xd683
+#define regRCC_DEV1_EPF5_STRAP3_BASE_IDX 5
+#define regRCC_DEV1_EPF5_STRAP4 0xd684
+#define regRCC_DEV1_EPF5_STRAP4_BASE_IDX 5
+#define regRCC_DEV1_EPF5_STRAP5 0xd685
+#define regRCC_DEV1_EPF5_STRAP5_BASE_IDX 5
+#define regRCC_DEV1_EPF5_STRAP13 0xd68d
+#define regRCC_DEV1_EPF5_STRAP13_BASE_IDX 5
+#define regRCC_DEV1_EPF5_STRAP14 0xd68e
+#define regRCC_DEV1_EPF5_STRAP14_BASE_IDX 5
+#define regRCC_DEV2_EPF0_STRAP0 0xd800
+#define regRCC_DEV2_EPF0_STRAP0_BASE_IDX 5
+#define regRCC_DEV2_EPF0_STRAP2 0xd802
+#define regRCC_DEV2_EPF0_STRAP2_BASE_IDX 5
+#define regRCC_DEV2_EPF0_STRAP3 0xd803
+#define regRCC_DEV2_EPF0_STRAP3_BASE_IDX 5
+#define regRCC_DEV2_EPF0_STRAP4 0xd804
+#define regRCC_DEV2_EPF0_STRAP4_BASE_IDX 5
+#define regRCC_DEV2_EPF0_STRAP5 0xd805
+#define regRCC_DEV2_EPF0_STRAP5_BASE_IDX 5
+#define regRCC_DEV2_EPF0_STRAP6 0xd806
+#define regRCC_DEV2_EPF0_STRAP6_BASE_IDX 5
+#define regRCC_DEV2_EPF0_STRAP7 0xd807
+#define regRCC_DEV2_EPF0_STRAP7_BASE_IDX 5
+#define regRCC_DEV2_EPF0_STRAP13 0xd80d
+#define regRCC_DEV2_EPF0_STRAP13_BASE_IDX 5
+#define regRCC_DEV2_EPF0_STRAP14 0xd80e
+#define regRCC_DEV2_EPF0_STRAP14_BASE_IDX 5
+#define regRCC_DEV2_EPF1_STRAP0 0xd880
+#define regRCC_DEV2_EPF1_STRAP0_BASE_IDX 5
+#define regRCC_DEV2_EPF1_STRAP2 0xd882
+#define regRCC_DEV2_EPF1_STRAP2_BASE_IDX 5
+#define regRCC_DEV2_EPF1_STRAP3 0xd883
+#define regRCC_DEV2_EPF1_STRAP3_BASE_IDX 5
+#define regRCC_DEV2_EPF1_STRAP4 0xd884
+#define regRCC_DEV2_EPF1_STRAP4_BASE_IDX 5
+#define regRCC_DEV2_EPF1_STRAP5 0xd885
+#define regRCC_DEV2_EPF1_STRAP5_BASE_IDX 5
+#define regRCC_DEV2_EPF1_STRAP6 0xd886
+#define regRCC_DEV2_EPF1_STRAP6_BASE_IDX 5
+#define regRCC_DEV2_EPF1_STRAP13 0xd88d
+#define regRCC_DEV2_EPF1_STRAP13_BASE_IDX 5
+#define regRCC_DEV2_EPF1_STRAP14 0xd88e
+#define regRCC_DEV2_EPF1_STRAP14_BASE_IDX 5
+#define regRCC_DEV2_EPF2_STRAP0 0xd900
+#define regRCC_DEV2_EPF2_STRAP0_BASE_IDX 5
+#define regRCC_DEV2_EPF2_STRAP2 0xd902
+#define regRCC_DEV2_EPF2_STRAP2_BASE_IDX 5
+#define regRCC_DEV2_EPF2_STRAP3 0xd903
+#define regRCC_DEV2_EPF2_STRAP3_BASE_IDX 5
+#define regRCC_DEV2_EPF2_STRAP4 0xd904
+#define regRCC_DEV2_EPF2_STRAP4_BASE_IDX 5
+#define regRCC_DEV2_EPF2_STRAP5 0xd905
+#define regRCC_DEV2_EPF2_STRAP5_BASE_IDX 5
+#define regRCC_DEV2_EPF2_STRAP6 0xd906
+#define regRCC_DEV2_EPF2_STRAP6_BASE_IDX 5
+#define regRCC_DEV2_EPF2_STRAP13 0xd90d
+#define regRCC_DEV2_EPF2_STRAP13_BASE_IDX 5
+#define regRCC_DEV2_EPF2_STRAP14 0xd90e
+#define regRCC_DEV2_EPF2_STRAP14_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_rst_bif_rst_regblk
+// base address: 0x10100000
+#define regHARD_RST_CTRL 0xe000
+#define regHARD_RST_CTRL_BASE_IDX 5
+#define regRSMU_SOFT_RST_CTRL 0xe001
+#define regRSMU_SOFT_RST_CTRL_BASE_IDX 5
+#define regSELF_SOFT_RST 0xe002
+#define regSELF_SOFT_RST_BASE_IDX 5
+#define regBIF_GFX_DRV_VPU_RST 0xe003
+#define regBIF_GFX_DRV_VPU_RST_BASE_IDX 5
+#define regBIF_RST_MISC_CTRL 0xe004
+#define regBIF_RST_MISC_CTRL_BASE_IDX 5
+#define regBIF_RST_MISC_CTRL2 0xe005
+#define regBIF_RST_MISC_CTRL2_BASE_IDX 5
+#define regBIF_RST_MISC_CTRL3 0xe006
+#define regBIF_RST_MISC_CTRL3_BASE_IDX 5
+#define regDEV0_PF0_FLR_RST_CTRL 0xe008
+#define regDEV0_PF0_FLR_RST_CTRL_BASE_IDX 5
+#define regDEV0_PF1_FLR_RST_CTRL 0xe009
+#define regDEV0_PF1_FLR_RST_CTRL_BASE_IDX 5
+#define regDEV0_PF2_FLR_RST_CTRL 0xe00a
+#define regDEV0_PF2_FLR_RST_CTRL_BASE_IDX 5
+#define regDEV0_PF3_FLR_RST_CTRL 0xe00b
+#define regDEV0_PF3_FLR_RST_CTRL_BASE_IDX 5
+#define regDEV0_PF4_FLR_RST_CTRL 0xe00c
+#define regDEV0_PF4_FLR_RST_CTRL_BASE_IDX 5
+#define regDEV0_PF5_FLR_RST_CTRL 0xe00d
+#define regDEV0_PF5_FLR_RST_CTRL_BASE_IDX 5
+#define regDEV0_PF6_FLR_RST_CTRL 0xe00e
+#define regDEV0_PF6_FLR_RST_CTRL_BASE_IDX 5
+#define regBIF_INST_RESET_INTR_STS 0xe010
+#define regBIF_INST_RESET_INTR_STS_BASE_IDX 5
+#define regBIF_PF_FLR_INTR_STS 0xe011
+#define regBIF_PF_FLR_INTR_STS_BASE_IDX 5
+#define regBIF_D3HOTD0_INTR_STS 0xe012
+#define regBIF_D3HOTD0_INTR_STS_BASE_IDX 5
+#define regBIF_POWER_INTR_STS 0xe014
+#define regBIF_POWER_INTR_STS_BASE_IDX 5
+#define regBIF_PF_DSTATE_INTR_STS 0xe015
+#define regBIF_PF_DSTATE_INTR_STS_BASE_IDX 5
+#define regSELF_SOFT_RST_2 0xe016
+#define regSELF_SOFT_RST_2_BASE_IDX 5
+#define regBIF_INST_RESET_INTR_MASK 0xe020
+#define regBIF_INST_RESET_INTR_MASK_BASE_IDX 5
+#define regBIF_PF_FLR_INTR_MASK 0xe021
+#define regBIF_PF_FLR_INTR_MASK_BASE_IDX 5
+#define regBIF_D3HOTD0_INTR_MASK 0xe022
+#define regBIF_D3HOTD0_INTR_MASK_BASE_IDX 5
+#define regBIF_POWER_INTR_MASK 0xe024
+#define regBIF_POWER_INTR_MASK_BASE_IDX 5
+#define regBIF_PF_DSTATE_INTR_MASK 0xe025
+#define regBIF_PF_DSTATE_INTR_MASK_BASE_IDX 5
+#define regBIF_PF_FLR_RST 0xe040
+#define regBIF_PF_FLR_RST_BASE_IDX 5
+#define regBIF_DEV0_PF0_DSTATE_VALUE 0xe050
+#define regBIF_DEV0_PF0_DSTATE_VALUE_BASE_IDX 5
+#define regBIF_DEV0_PF1_DSTATE_VALUE 0xe051
+#define regBIF_DEV0_PF1_DSTATE_VALUE_BASE_IDX 5
+#define regBIF_DEV0_PF2_DSTATE_VALUE 0xe052
+#define regBIF_DEV0_PF2_DSTATE_VALUE_BASE_IDX 5
+#define regBIF_DEV0_PF3_DSTATE_VALUE 0xe053
+#define regBIF_DEV0_PF3_DSTATE_VALUE_BASE_IDX 5
+#define regBIF_DEV0_PF4_DSTATE_VALUE 0xe054
+#define regBIF_DEV0_PF4_DSTATE_VALUE_BASE_IDX 5
+#define regBIF_DEV0_PF5_DSTATE_VALUE 0xe055
+#define regBIF_DEV0_PF5_DSTATE_VALUE_BASE_IDX 5
+#define regBIF_DEV0_PF6_DSTATE_VALUE 0xe056
+#define regBIF_DEV0_PF6_DSTATE_VALUE_BASE_IDX 5
+#define regDEV0_PF0_D3HOTD0_RST_CTRL 0xe078
+#define regDEV0_PF0_D3HOTD0_RST_CTRL_BASE_IDX 5
+#define regDEV0_PF1_D3HOTD0_RST_CTRL 0xe079
+#define regDEV0_PF1_D3HOTD0_RST_CTRL_BASE_IDX 5
+#define regDEV0_PF2_D3HOTD0_RST_CTRL 0xe07a
+#define regDEV0_PF2_D3HOTD0_RST_CTRL_BASE_IDX 5
+#define regDEV0_PF3_D3HOTD0_RST_CTRL 0xe07b
+#define regDEV0_PF3_D3HOTD0_RST_CTRL_BASE_IDX 5
+#define regDEV0_PF4_D3HOTD0_RST_CTRL 0xe07c
+#define regDEV0_PF4_D3HOTD0_RST_CTRL_BASE_IDX 5
+#define regDEV0_PF5_D3HOTD0_RST_CTRL 0xe07d
+#define regDEV0_PF5_D3HOTD0_RST_CTRL_BASE_IDX 5
+#define regDEV0_PF6_D3HOTD0_RST_CTRL 0xe07e
+#define regDEV0_PF6_D3HOTD0_RST_CTRL_BASE_IDX 5
+#define regBIF_PORT0_DSTATE_VALUE 0xe230
+#define regBIF_PORT0_DSTATE_VALUE_BASE_IDX 5
+#define regBIF_USB_SHUB_RS_RESET_CNTL 0xe231
+#define regBIF_USB_SHUB_RS_RESET_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_misc_bif_misc_regblk
+// base address: 0x10100000
+#define regREGS_ROM_OFFSET_CTRL 0xcc23
+#define regREGS_ROM_OFFSET_CTRL_BASE_IDX 5
+#define regNBIF_STRAP_BIOS_CNTL 0xcc81
+#define regNBIF_STRAP_BIOS_CNTL_BASE_IDX 5
+#define regMISC_SCRATCH 0xe800
+#define regMISC_SCRATCH_BASE_IDX 5
+#define regINTR_LINE_POLARITY 0xe801
+#define regINTR_LINE_POLARITY_BASE_IDX 5
+#define regINTR_LINE_ENABLE 0xe802
+#define regINTR_LINE_ENABLE_BASE_IDX 5
+#define regOUTSTANDING_VC_ALLOC 0xe803
+#define regOUTSTANDING_VC_ALLOC_BASE_IDX 5
+#define regBIFC_MISC_CTRL0 0xe804
+#define regBIFC_MISC_CTRL0_BASE_IDX 5
+#define regBIFC_MISC_CTRL1 0xe805
+#define regBIFC_MISC_CTRL1_BASE_IDX 5
+#define regBIFC_BME_ERR_LOG_LB 0xe806
+#define regBIFC_BME_ERR_LOG_LB_BASE_IDX 5
+#define regBIFC_LC_TIMER_CTRL 0xe807
+#define regBIFC_LC_TIMER_CTRL_BASE_IDX 5
+#define regBIFC_RCCBIH_BME_ERR_LOG0 0xe808
+#define regBIFC_RCCBIH_BME_ERR_LOG0_BASE_IDX 5
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1 0xe80a
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1_BASE_IDX 5
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3 0xe80b
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3_BASE_IDX 5
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5 0xe80c
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5_BASE_IDX 5
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7 0xe80d
+#define regBIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7_BASE_IDX 5
+#define regBIFC_DMA_ATTR_CNTL2_DEV0 0xe81a
+#define regBIFC_DMA_ATTR_CNTL2_DEV0_BASE_IDX 5
+#define regBIFC_MISC_CTRL2 0xe822
+#define regBIFC_MISC_CTRL2_BASE_IDX 5
+#define regBME_DUMMY_CNTL_0 0xe825
+#define regBME_DUMMY_CNTL_0_BASE_IDX 5
+#define regBIFC_THT_CNTL 0xe827
+#define regBIFC_THT_CNTL_BASE_IDX 5
+#define regBIFC_HSTARB_CNTL 0xe828
+#define regBIFC_HSTARB_CNTL_BASE_IDX 5
+#define regBIFC_GSI_CNTL 0xe829
+#define regBIFC_GSI_CNTL_BASE_IDX 5
+#define regBIFC_PCIEFUNC_CNTL 0xe82a
+#define regBIFC_PCIEFUNC_CNTL_BASE_IDX 5
+#define regBIFC_PASID_CHECK_DIS 0xe82b
+#define regBIFC_PASID_CHECK_DIS_BASE_IDX 5
+#define regBIFC_SDP_CNTL_0 0xe82c
+#define regBIFC_SDP_CNTL_0_BASE_IDX 5
+#define regBIFC_SDP_CNTL_1 0xe82d
+#define regBIFC_SDP_CNTL_1_BASE_IDX 5
+#define regBIFC_PASID_STS 0xe82e
+#define regBIFC_PASID_STS_BASE_IDX 5
+#define regBIFC_ATHUB_ACT_CNTL 0xe82f
+#define regBIFC_ATHUB_ACT_CNTL_BASE_IDX 5
+#define regBIFC_PERF_CNTL_0 0xe830
+#define regBIFC_PERF_CNTL_0_BASE_IDX 5
+#define regBIFC_PERF_CNTL_1 0xe831
+#define regBIFC_PERF_CNTL_1_BASE_IDX 5
+#define regBIFC_PERF_CNT_MMIO_RD_L32BIT 0xe832
+#define regBIFC_PERF_CNT_MMIO_RD_L32BIT_BASE_IDX 5
+#define regBIFC_PERF_CNT_MMIO_WR_L32BIT 0xe833
+#define regBIFC_PERF_CNT_MMIO_WR_L32BIT_BASE_IDX 5
+#define regBIFC_PERF_CNT_DMA_RD_L32BIT 0xe834
+#define regBIFC_PERF_CNT_DMA_RD_L32BIT_BASE_IDX 5
+#define regBIFC_PERF_CNT_DMA_WR_L32BIT 0xe835
+#define regBIFC_PERF_CNT_DMA_WR_L32BIT_BASE_IDX 5
+#define regNBIF_REGIF_ERRSET_CTRL 0xe836
+#define regNBIF_REGIF_ERRSET_CTRL_BASE_IDX 5
+#define regBIFC_SDP_CNTL_2 0xe837
+#define regBIFC_SDP_CNTL_2_BASE_IDX 5
+#define regNBIF_PGMST_CTRL 0xe838
+#define regNBIF_PGMST_CTRL_BASE_IDX 5
+#define regNBIF_PGSLV_CTRL 0xe839
+#define regNBIF_PGSLV_CTRL_BASE_IDX 5
+#define regNBIF_PG_MISC_CTRL 0xe83a
+#define regNBIF_PG_MISC_CTRL_BASE_IDX 5
+#define regSMN_MST_EP_CNTL3 0xe83c
+#define regSMN_MST_EP_CNTL3_BASE_IDX 5
+#define regSMN_MST_EP_CNTL4 0xe83d
+#define regSMN_MST_EP_CNTL4_BASE_IDX 5
+#define regSMN_MST_CNTL1 0xe83e
+#define regSMN_MST_CNTL1_BASE_IDX 5
+#define regSMN_MST_EP_CNTL5 0xe83f
+#define regSMN_MST_EP_CNTL5_BASE_IDX 5
+#define regBIF_SELFRING_BUFFER_VID 0xe840
+#define regBIF_SELFRING_BUFFER_VID_BASE_IDX 5
+#define regBIF_SELFRING_VECTOR_CNTL 0xe841
+#define regBIF_SELFRING_VECTOR_CNTL_BASE_IDX 5
+#define regNBIF_STRAP_WRITE_CTRL 0xe845
+#define regNBIF_STRAP_WRITE_CTRL_BASE_IDX 5
+#define regNBIF_INTX_DSTATE_MISC_CNTL 0xe846
+#define regNBIF_INTX_DSTATE_MISC_CNTL_BASE_IDX 5
+#define regNBIF_PENDING_MISC_CNTL 0xe847
+#define regNBIF_PENDING_MISC_CNTL_BASE_IDX 5
+#define regBIF_GMI_WRR_WEIGHT 0xe848
+#define regBIF_GMI_WRR_WEIGHT_BASE_IDX 5
+#define regBIF_GMI_WRR_WEIGHT2 0xe849
+#define regBIF_GMI_WRR_WEIGHT2_BASE_IDX 5
+#define regBIF_GMI_WRR_WEIGHT3 0xe84a
+#define regBIF_GMI_WRR_WEIGHT3_BASE_IDX 5
+#define regNBIF_PWRBRK_REQUEST 0xe84c
+#define regNBIF_PWRBRK_REQUEST_BASE_IDX 5
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F0 0xe850
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F0_BASE_IDX 5
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F1 0xe851
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F1_BASE_IDX 5
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F2 0xe852
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F2_BASE_IDX 5
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F3 0xe853
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F3_BASE_IDX 5
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F4 0xe854
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F4_BASE_IDX 5
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F5 0xe855
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F5_BASE_IDX 5
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F6 0xe856
+#define regBIF_ATOMIC_ERR_LOG_DEV0_F6_BASE_IDX 5
+#define regBIF_DMA_MP4_ERR_LOG 0xe870
+#define regBIF_DMA_MP4_ERR_LOG_BASE_IDX 5
+#define regBIF_PASID_ERR_LOG 0xe871
+#define regBIF_PASID_ERR_LOG_BASE_IDX 5
+#define regBIF_PASID_ERR_CLR 0xe872
+#define regBIF_PASID_ERR_CLR_BASE_IDX 5
+#define regNBIF_VWIRE_CTRL 0xe880
+#define regNBIF_VWIRE_CTRL_BASE_IDX 5
+#define regNBIF_SMN_VWR_VCHG_DIS_CTRL 0xe881
+#define regNBIF_SMN_VWR_VCHG_DIS_CTRL_BASE_IDX 5
+#define regNBIF_SMN_VWR_VCHG_RST_CTRL0 0xe882
+#define regNBIF_SMN_VWR_VCHG_RST_CTRL0_BASE_IDX 5
+#define regNBIF_SMN_VWR_VCHG_TRIG 0xe884
+#define regNBIF_SMN_VWR_VCHG_TRIG_BASE_IDX 5
+#define regNBIF_SMN_VWR_WTRIG_CNTL 0xe885
+#define regNBIF_SMN_VWR_WTRIG_CNTL_BASE_IDX 5
+#define regNBIF_SMN_VWR_VCHG_DIS_CTRL_1 0xe886
+#define regNBIF_SMN_VWR_VCHG_DIS_CTRL_1_BASE_IDX 5
+#define regNBIF_MGCG_CTRL_LCLK 0xe887
+#define regNBIF_MGCG_CTRL_LCLK_BASE_IDX 5
+#define regNBIF_DS_CTRL_LCLK 0xe888
+#define regNBIF_DS_CTRL_LCLK_BASE_IDX 5
+#define regSMN_MST_CNTL0 0xe889
+#define regSMN_MST_CNTL0_BASE_IDX 5
+#define regSMN_MST_EP_CNTL1 0xe88a
+#define regSMN_MST_EP_CNTL1_BASE_IDX 5
+#define regSMN_MST_EP_CNTL2 0xe88b
+#define regSMN_MST_EP_CNTL2_BASE_IDX 5
+#define regNBIF_SDP_VWR_VCHG_DIS_CTRL 0xe88c
+#define regNBIF_SDP_VWR_VCHG_DIS_CTRL_BASE_IDX 5
+#define regNBIF_SDP_VWR_VCHG_RST_CTRL0 0xe88d
+#define regNBIF_SDP_VWR_VCHG_RST_CTRL0_BASE_IDX 5
+#define regNBIF_SDP_VWR_VCHG_RST_CTRL1 0xe88e
+#define regNBIF_SDP_VWR_VCHG_RST_CTRL1_BASE_IDX 5
+#define regNBIF_SDP_VWR_VCHG_TRIG 0xe88f
+#define regNBIF_SDP_VWR_VCHG_TRIG_BASE_IDX 5
+#define regNBIF_SHUB_TODET_CTRL 0xe898
+#define regNBIF_SHUB_TODET_CTRL_BASE_IDX 5
+#define regNBIF_SHUB_TODET_CLIENT_CTRL 0xe899
+#define regNBIF_SHUB_TODET_CLIENT_CTRL_BASE_IDX 5
+#define regNBIF_SHUB_TODET_CLIENT_STATUS 0xe89a
+#define regNBIF_SHUB_TODET_CLIENT_STATUS_BASE_IDX 5
+#define regNBIF_SHUB_TODET_SYNCFLOOD_CTRL 0xe89b
+#define regNBIF_SHUB_TODET_SYNCFLOOD_CTRL_BASE_IDX 5
+#define regNBIF_SHUB_TODET_CLIENT_CTRL2 0xe89c
+#define regNBIF_SHUB_TODET_CLIENT_CTRL2_BASE_IDX 5
+#define regNBIF_SHUB_TODET_CLIENT_STATUS2 0xe89d
+#define regNBIF_SHUB_TODET_CLIENT_STATUS2_BASE_IDX 5
+#define regNBIF_SHUB_TODET_SYNCFLOOD_CTRL2 0xe89e
+#define regNBIF_SHUB_TODET_SYNCFLOOD_CTRL2_BASE_IDX 5
+#define regBIFC_BME_ERR_LOG_HB 0xe8ab
+#define regBIFC_BME_ERR_LOG_HB_BASE_IDX 5
+#define regBIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC 0xe8c0
+#define regBIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC_BASE_IDX 5
+#define regBIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC 0xe8c1
+#define regBIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC_BASE_IDX 5
+#define regBIFC_GMI_SDP_REQ_POOLCRED_ALLOC 0xe8c2
+#define regBIFC_GMI_SDP_REQ_POOLCRED_ALLOC_BASE_IDX 5
+#define regBIFC_GMI_SDP_DAT_POOLCRED_ALLOC 0xe8c3
+#define regBIFC_GMI_SDP_DAT_POOLCRED_ALLOC_BASE_IDX 5
+#define regBIFC_GMI_SST_RDRSP_POOLCRED_ALLOC 0xe8c4
+#define regBIFC_GMI_SST_RDRSP_POOLCRED_ALLOC_BASE_IDX 5
+#define regBIFC_GMI_SST_WRRSP_POOLCRED_ALLOC 0xe8c5
+#define regBIFC_GMI_SST_WRRSP_POOLCRED_ALLOC_BASE_IDX 5
+#define regDISCON_HYSTERESIS_HEAD_CTRL 0xe8c6
+#define regDISCON_HYSTERESIS_HEAD_CTRL_BASE_IDX 5
+#define regBIFC_EARLY_WAKEUP_CNTL 0xe8d2
+#define regBIFC_EARLY_WAKEUP_CNTL_BASE_IDX 5
+#define regBIFC_PERF_CNT_MMIO_RD_H16BIT 0xe8f0
+#define regBIFC_PERF_CNT_MMIO_RD_H16BIT_BASE_IDX 5
+#define regBIFC_PERF_CNT_MMIO_WR_H16BIT 0xe8f1
+#define regBIFC_PERF_CNT_MMIO_WR_H16BIT_BASE_IDX 5
+#define regBIFC_PERF_CNT_DMA_RD_H16BIT 0xe8f2
+#define regBIFC_PERF_CNT_DMA_RD_H16BIT_BASE_IDX 5
+#define regBIFC_PERF_CNT_DMA_WR_H16BIT 0xe8f3
+#define regBIFC_PERF_CNT_DMA_WR_H16BIT_BASE_IDX 5
+#define regNBIF_PERF_COM_COUNT_ENABLE 0xe8f4
+#define regNBIF_PERF_COM_COUNT_ENABLE_BASE_IDX 5
+#define regNBIF_BX_PERF_CNT_FSM 0xe8ff
+#define regNBIF_BX_PERF_CNT_FSM_BASE_IDX 5
+#define regNBIF_COM_COUNT_VALUE 0xe908
+#define regNBIF_COM_COUNT_VALUE_BASE_IDX 5
+#define regBIFC_A2S_SDP_PORT_CTRL 0xeb00
+#define regBIFC_A2S_SDP_PORT_CTRL_BASE_IDX 5
+#define regBIFC_A2S_CNTL_SW0 0xeb01
+#define regBIFC_A2S_CNTL_SW0_BASE_IDX 5
+#define regBIFC_A2S_MISC_CNTL 0xeb02
+#define regBIFC_A2S_MISC_CNTL_BASE_IDX 5
+#define regBIFC_A2S_TAG_ALLOC_0 0xeb03
+#define regBIFC_A2S_TAG_ALLOC_0_BASE_IDX 5
+#define regBIFC_A2S_TAG_ALLOC_1 0xeb04
+#define regBIFC_A2S_TAG_ALLOC_1_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_ras_bif_ras_regblk
+// base address: 0x10100000
+#define regBIFL_RAS_CENTRAL_CNTL 0xe400
+#define regBIFL_RAS_CENTRAL_CNTL_BASE_IDX 5
+#define regBIFL_RAS_CENTRAL_STATUS 0xe410
+#define regBIFL_RAS_CENTRAL_STATUS_BASE_IDX 5
+#define regBIFL_RAS_LEAF0_CTRL 0xe420
+#define regBIFL_RAS_LEAF0_CTRL_BASE_IDX 5
+#define regBIFL_RAS_LEAF1_CTRL 0xe421
+#define regBIFL_RAS_LEAF1_CTRL_BASE_IDX 5
+#define regBIFL_RAS_LEAF2_CTRL 0xe422
+#define regBIFL_RAS_LEAF2_CTRL_BASE_IDX 5
+#define regBIFL_RAS_LEAF3_CTRL 0xe423
+#define regBIFL_RAS_LEAF3_CTRL_BASE_IDX 5
+#define regBIFL_RAS_LEAF0_STATUS 0xe430
+#define regBIFL_RAS_LEAF0_STATUS_BASE_IDX 5
+#define regBIFL_RAS_LEAF1_STATUS 0xe431
+#define regBIFL_RAS_LEAF1_STATUS_BASE_IDX 5
+#define regBIFL_RAS_LEAF2_STATUS 0xe432
+#define regBIFL_RAS_LEAF2_STATUS_BASE_IDX 5
+#define regBIFL_RAS_LEAF3_STATUS 0xe433
+#define regBIFL_RAS_LEAF3_STATUS_BASE_IDX 5
+#define regBIFL_IOHUB_RAS_IH_CNTL 0xe7fe
+#define regBIFL_IOHUB_RAS_IH_CNTL_BASE_IDX 5
+#define regBIFL_RAS_VWR_FROM_IOHUB 0xe7ff
+#define regBIFL_RAS_VWR_FROM_IOHUB_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_dwn_dev0_BIFDEC1
+// base address: 0x10120000
+#define regRCC_DWN_DEV0_2_DN_PCIE_RESERVED 0x8d80
+#define regRCC_DWN_DEV0_2_DN_PCIE_RESERVED_BASE_IDX 5
+#define regRCC_DWN_DEV0_2_DN_PCIE_SCRATCH 0x8d81
+#define regRCC_DWN_DEV0_2_DN_PCIE_SCRATCH_BASE_IDX 5
+#define regRCC_DWN_DEV0_2_DN_PCIE_CNTL 0x8d83
+#define regRCC_DWN_DEV0_2_DN_PCIE_CNTL_BASE_IDX 5
+#define regRCC_DWN_DEV0_2_DN_PCIE_CONFIG_CNTL 0x8d84
+#define regRCC_DWN_DEV0_2_DN_PCIE_CONFIG_CNTL_BASE_IDX 5
+#define regRCC_DWN_DEV0_2_DN_PCIE_RX_CNTL2 0x8d85
+#define regRCC_DWN_DEV0_2_DN_PCIE_RX_CNTL2_BASE_IDX 5
+#define regRCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL 0x8d86
+#define regRCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL_BASE_IDX 5
+#define regRCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL 0x8d87
+#define regRCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL_BASE_IDX 5
+#define regRCC_DWN_DEV0_2_DN_PCIE_STRAP_F0 0x8d88
+#define regRCC_DWN_DEV0_2_DN_PCIE_STRAP_F0_BASE_IDX 5
+#define regRCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC 0x8d89
+#define regRCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC_BASE_IDX 5
+#define regRCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC2 0x8d8a
+#define regRCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC2_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_dwnp_dev0_BIFDEC1
+// base address: 0x10120000
+#define regRCC_DWNP_DEV0_2_PCIE_ERR_CNTL 0x8d8c
+#define regRCC_DWNP_DEV0_2_PCIE_ERR_CNTL_BASE_IDX 5
+#define regRCC_DWNP_DEV0_2_PCIE_RX_CNTL 0x8d8d
+#define regRCC_DWNP_DEV0_2_PCIE_RX_CNTL_BASE_IDX 5
+#define regRCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL 0x8d8e
+#define regRCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL_BASE_IDX 5
+#define regRCC_DWNP_DEV0_2_PCIE_LC_CNTL2 0x8d8f
+#define regRCC_DWNP_DEV0_2_PCIE_LC_CNTL2_BASE_IDX 5
+#define regRCC_DWNP_DEV0_2_PCIEP_STRAP_MISC 0x8d90
+#define regRCC_DWNP_DEV0_2_PCIEP_STRAP_MISC_BASE_IDX 5
+#define regRCC_DWNP_DEV0_2_LTR_MSG_INFO_FROM_EP 0x8d91
+#define regRCC_DWNP_DEV0_2_LTR_MSG_INFO_FROM_EP_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_ep_dev0_BIFDEC1
+// base address: 0x10120000
+#define regRCC_EP_DEV0_2_EP_PCIE_SCRATCH 0x8d61
+#define regRCC_EP_DEV0_2_EP_PCIE_SCRATCH_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_CNTL 0x8d63
+#define regRCC_EP_DEV0_2_EP_PCIE_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_INT_CNTL 0x8d64
+#define regRCC_EP_DEV0_2_EP_PCIE_INT_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_INT_STATUS 0x8d65
+#define regRCC_EP_DEV0_2_EP_PCIE_INT_STATUS_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_RX_CNTL2 0x8d66
+#define regRCC_EP_DEV0_2_EP_PCIE_RX_CNTL2_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_BUS_CNTL 0x8d67
+#define regRCC_EP_DEV0_2_EP_PCIE_BUS_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_CFG_CNTL 0x8d68
+#define regRCC_EP_DEV0_2_EP_PCIE_CFG_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL 0x8d6a
+#define regRCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0 0x8d6b
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1 0x8d6b
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2 0x8d6b
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3 0x8d6b
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4 0x8d6c
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5 0x8d6c
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6 0x8d6c
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 5
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7 0x8d6c
+#define regRCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_STRAP_MISC 0x8d6d
+#define regRCC_EP_DEV0_2_EP_PCIE_STRAP_MISC_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_STRAP_MISC2 0x8d6e
+#define regRCC_EP_DEV0_2_EP_PCIE_STRAP_MISC2_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP 0x8d70
+#define regRCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_F0_DPA_LATENCY_INDICATOR 0x8d71
+#define regRCC_EP_DEV0_2_EP_PCIE_F0_DPA_LATENCY_INDICATOR_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL 0x8d71
+#define regRCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 0x8d71
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 5
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 0x8d72
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 5
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 0x8d72
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 5
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 0x8d72
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 5
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 0x8d72
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 5
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 0x8d73
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 5
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 0x8d73
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 5
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 0x8d73
+#define regRCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_PME_CONTROL 0x8d73
+#define regRCC_EP_DEV0_2_EP_PCIE_PME_CONTROL_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIEP_RESERVED 0x8d74
+#define regRCC_EP_DEV0_2_EP_PCIEP_RESERVED_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_TX_CNTL 0x8d76
+#define regRCC_EP_DEV0_2_EP_PCIE_TX_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID 0x8d77
+#define regRCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_ERR_CNTL 0x8d78
+#define regRCC_EP_DEV0_2_EP_PCIE_ERR_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_RX_CNTL 0x8d79
+#define regRCC_EP_DEV0_2_EP_PCIE_RX_CNTL_BASE_IDX 5
+#define regRCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL 0x8d7a
+#define regRCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_dev0_BIFDEC1
+// base address: 0x10120000
+#define regRCC_DEV0_1_RCC_ERR_INT_CNTL 0x8da6
+#define regRCC_DEV0_1_RCC_ERR_INT_CNTL_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_BACO_CNTL_MISC 0x8da7
+#define regRCC_DEV0_1_RCC_BACO_CNTL_MISC_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_RESET_EN 0x8da8
+#define regRCC_DEV0_1_RCC_RESET_EN_BASE_IDX 5
+#define regRCC_DEV0_2_RCC_VDM_SUPPORT 0x8da9
+#define regRCC_DEV0_2_RCC_VDM_SUPPORT_BASE_IDX 5
+#define regRCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0 0x8daa
+#define regRCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0_BASE_IDX 5
+#define regRCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1 0x8dab
+#define regRCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_GPUIOV_REGION 0x8dac
+#define regRCC_DEV0_1_RCC_GPUIOV_REGION_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_GPU_HOSTVM_EN 0x8dad
+#define regRCC_DEV0_1_RCC_GPU_HOSTVM_EN_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL 0x8dae
+#define regRCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_CONSOLE_IOV_FIRST_VF_OFFSET 0x8daf
+#define regRCC_DEV0_1_RCC_CONSOLE_IOV_FIRST_VF_OFFSET_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_CONSOLE_IOV_VF_STRIDE 0x8daf
+#define regRCC_DEV0_1_RCC_CONSOLE_IOV_VF_STRIDE_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_PEER_REG_RANGE0 0x8dde
+#define regRCC_DEV0_1_RCC_PEER_REG_RANGE0_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_PEER_REG_RANGE1 0x8ddf
+#define regRCC_DEV0_1_RCC_PEER_REG_RANGE1_BASE_IDX 5
+#define regRCC_DEV0_2_RCC_BUS_CNTL 0x8de1
+#define regRCC_DEV0_2_RCC_BUS_CNTL_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_CONFIG_CNTL 0x8de2
+#define regRCC_DEV0_1_RCC_CONFIG_CNTL_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_CONFIG_F0_BASE 0x8de6
+#define regRCC_DEV0_1_RCC_CONFIG_F0_BASE_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_CONFIG_APER_SIZE 0x8de7
+#define regRCC_DEV0_1_RCC_CONFIG_APER_SIZE_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_CONFIG_REG_APER_SIZE 0x8de8
+#define regRCC_DEV0_1_RCC_CONFIG_REG_APER_SIZE_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_XDMA_LO 0x8de9
+#define regRCC_DEV0_1_RCC_XDMA_LO_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_XDMA_HI 0x8dea
+#define regRCC_DEV0_1_RCC_XDMA_HI_BASE_IDX 5
+#define regRCC_DEV0_2_RCC_FEATURES_CONTROL_MISC 0x8deb
+#define regRCC_DEV0_2_RCC_FEATURES_CONTROL_MISC_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_BUSNUM_CNTL1 0x8dec
+#define regRCC_DEV0_1_RCC_BUSNUM_CNTL1_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_BUSNUM_LIST0 0x8ded
+#define regRCC_DEV0_1_RCC_BUSNUM_LIST0_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_BUSNUM_LIST1 0x8dee
+#define regRCC_DEV0_1_RCC_BUSNUM_LIST1_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_BUSNUM_CNTL2 0x8def
+#define regRCC_DEV0_1_RCC_BUSNUM_CNTL2_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_CAPTURE_HOST_BUSNUM 0x8df0
+#define regRCC_DEV0_1_RCC_CAPTURE_HOST_BUSNUM_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_HOST_BUSNUM 0x8df1
+#define regRCC_DEV0_1_RCC_HOST_BUSNUM_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_PEER0_FB_OFFSET_HI 0x8df2
+#define regRCC_DEV0_1_RCC_PEER0_FB_OFFSET_HI_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO 0x8df3
+#define regRCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_PEER1_FB_OFFSET_HI 0x8df4
+#define regRCC_DEV0_1_RCC_PEER1_FB_OFFSET_HI_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO 0x8df5
+#define regRCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_PEER2_FB_OFFSET_HI 0x8df6
+#define regRCC_DEV0_1_RCC_PEER2_FB_OFFSET_HI_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO 0x8df7
+#define regRCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_PEER3_FB_OFFSET_HI 0x8df8
+#define regRCC_DEV0_1_RCC_PEER3_FB_OFFSET_HI_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO 0x8df9
+#define regRCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_DEVFUNCNUM_LIST0 0x8dfa
+#define regRCC_DEV0_1_RCC_DEVFUNCNUM_LIST0_BASE_IDX 5
+#define regRCC_DEV0_1_RCC_DEVFUNCNUM_LIST1 0x8dfb
+#define regRCC_DEV0_1_RCC_DEVFUNCNUM_LIST1_BASE_IDX 5
+#define regRCC_DEV0_2_RCC_DEV0_LINK_CNTL 0x8dfd
+#define regRCC_DEV0_2_RCC_DEV0_LINK_CNTL_BASE_IDX 5
+#define regRCC_DEV0_2_RCC_CMN_LINK_CNTL 0x8dfe
+#define regRCC_DEV0_2_RCC_CMN_LINK_CNTL_BASE_IDX 5
+#define regRCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE 0x8dff
+#define regRCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE_BASE_IDX 5
+#define regRCC_DEV0_2_RCC_LTR_LSWITCH_CNTL 0x8e00
+#define regRCC_DEV0_2_RCC_LTR_LSWITCH_CNTL_BASE_IDX 5
+#define regRCC_DEV0_2_RCC_MH_ARB_CNTL 0x8e01
+#define regRCC_DEV0_2_RCC_MH_ARB_CNTL_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_bx_SYSDEC
+// base address: 0x10120000
+#define regBIF_BX1_PCIE_INDEX 0x800c
+#define regBIF_BX1_PCIE_INDEX_BASE_IDX 5
+#define regBIF_BX1_PCIE_DATA 0x800d
+#define regBIF_BX1_PCIE_DATA_BASE_IDX 5
+#define regBIF_BX1_PCIE_INDEX2 0x800e
+#define regBIF_BX1_PCIE_INDEX2_BASE_IDX 5
+#define regBIF_BX1_PCIE_DATA2 0x800f
+#define regBIF_BX1_PCIE_DATA2_BASE_IDX 5
+#define regBIF_BX1_PCIE_INDEX_HI 0x8010
+#define regBIF_BX1_PCIE_INDEX_HI_BASE_IDX 5
+#define regBIF_BX1_PCIE_INDEX2_HI 0x8011
+#define regBIF_BX1_PCIE_INDEX2_HI_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_0 0x8048
+#define regBIF_BX1_SBIOS_SCRATCH_0_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_1 0x8049
+#define regBIF_BX1_SBIOS_SCRATCH_1_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_2 0x804a
+#define regBIF_BX1_SBIOS_SCRATCH_2_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_3 0x804b
+#define regBIF_BX1_SBIOS_SCRATCH_3_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_0 0x804c
+#define regBIF_BX1_BIOS_SCRATCH_0_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_1 0x804d
+#define regBIF_BX1_BIOS_SCRATCH_1_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_2 0x804e
+#define regBIF_BX1_BIOS_SCRATCH_2_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_3 0x804f
+#define regBIF_BX1_BIOS_SCRATCH_3_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_4 0x8050
+#define regBIF_BX1_BIOS_SCRATCH_4_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_5 0x8051
+#define regBIF_BX1_BIOS_SCRATCH_5_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_6 0x8052
+#define regBIF_BX1_BIOS_SCRATCH_6_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_7 0x8053
+#define regBIF_BX1_BIOS_SCRATCH_7_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_8 0x8054
+#define regBIF_BX1_BIOS_SCRATCH_8_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_9 0x8055
+#define regBIF_BX1_BIOS_SCRATCH_9_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_10 0x8056
+#define regBIF_BX1_BIOS_SCRATCH_10_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_11 0x8057
+#define regBIF_BX1_BIOS_SCRATCH_11_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_12 0x8058
+#define regBIF_BX1_BIOS_SCRATCH_12_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_13 0x8059
+#define regBIF_BX1_BIOS_SCRATCH_13_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_14 0x805a
+#define regBIF_BX1_BIOS_SCRATCH_14_BASE_IDX 5
+#define regBIF_BX1_BIOS_SCRATCH_15 0x805b
+#define regBIF_BX1_BIOS_SCRATCH_15_BASE_IDX 5
+#define regBIF_BX1_BIF_RLC_INTR_CNTL 0x8060
+#define regBIF_BX1_BIF_RLC_INTR_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_VCE_INTR_CNTL 0x8061
+#define regBIF_BX1_BIF_VCE_INTR_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_UVD_INTR_CNTL 0x8062
+#define regBIF_BX1_BIF_UVD_INTR_CNTL_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR0 0x8080
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR0_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR0 0x8081
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR0_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR1 0x8082
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR1_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR1 0x8083
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR1_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR2 0x8084
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR2_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR2 0x8085
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR2_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR3 0x8086
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR3_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR3 0x8087
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR3_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR4 0x8088
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR4_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR4 0x8089
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR4_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR5 0x808a
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR5_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR5 0x808b
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR5_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR6 0x808c
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR6_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR6 0x808d
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR6_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR7 0x808e
+#define regBIF_BX1_GFX_MMIOREG_CAM_ADDR7_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR7 0x808f
+#define regBIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR7_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_CNTL 0x8090
+#define regBIF_BX1_GFX_MMIOREG_CAM_CNTL_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_ZERO_CPL 0x8091
+#define regBIF_BX1_GFX_MMIOREG_CAM_ZERO_CPL_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_ONE_CPL 0x8092
+#define regBIF_BX1_GFX_MMIOREG_CAM_ONE_CPL_BASE_IDX 5
+#define regBIF_BX1_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL 0x8093
+#define regBIF_BX1_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_0 0x8094
+#define regBIF_BX1_DRIVER_SCRATCH_0_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_1 0x8095
+#define regBIF_BX1_DRIVER_SCRATCH_1_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_2 0x8096
+#define regBIF_BX1_DRIVER_SCRATCH_2_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_3 0x8097
+#define regBIF_BX1_DRIVER_SCRATCH_3_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_4 0x8098
+#define regBIF_BX1_DRIVER_SCRATCH_4_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_5 0x8099
+#define regBIF_BX1_DRIVER_SCRATCH_5_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_6 0x809a
+#define regBIF_BX1_DRIVER_SCRATCH_6_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_7 0x809b
+#define regBIF_BX1_DRIVER_SCRATCH_7_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_8 0x809c
+#define regBIF_BX1_DRIVER_SCRATCH_8_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_9 0x809d
+#define regBIF_BX1_DRIVER_SCRATCH_9_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_10 0x809e
+#define regBIF_BX1_DRIVER_SCRATCH_10_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_11 0x809f
+#define regBIF_BX1_DRIVER_SCRATCH_11_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_12 0x80a0
+#define regBIF_BX1_DRIVER_SCRATCH_12_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_13 0x80a1
+#define regBIF_BX1_DRIVER_SCRATCH_13_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_14 0x80a2
+#define regBIF_BX1_DRIVER_SCRATCH_14_BASE_IDX 5
+#define regBIF_BX1_DRIVER_SCRATCH_15 0x80a3
+#define regBIF_BX1_DRIVER_SCRATCH_15_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_0 0x80a4
+#define regBIF_BX1_FW_SCRATCH_0_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_1 0x80a5
+#define regBIF_BX1_FW_SCRATCH_1_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_2 0x80a6
+#define regBIF_BX1_FW_SCRATCH_2_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_3 0x80a7
+#define regBIF_BX1_FW_SCRATCH_3_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_4 0x80a8
+#define regBIF_BX1_FW_SCRATCH_4_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_5 0x80a9
+#define regBIF_BX1_FW_SCRATCH_5_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_6 0x80aa
+#define regBIF_BX1_FW_SCRATCH_6_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_7 0x80ab
+#define regBIF_BX1_FW_SCRATCH_7_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_8 0x80ac
+#define regBIF_BX1_FW_SCRATCH_8_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_9 0x80ad
+#define regBIF_BX1_FW_SCRATCH_9_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_10 0x80ae
+#define regBIF_BX1_FW_SCRATCH_10_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_11 0x80af
+#define regBIF_BX1_FW_SCRATCH_11_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_12 0x80b0
+#define regBIF_BX1_FW_SCRATCH_12_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_13 0x80b1
+#define regBIF_BX1_FW_SCRATCH_13_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_14 0x80b2
+#define regBIF_BX1_FW_SCRATCH_14_BASE_IDX 5
+#define regBIF_BX1_FW_SCRATCH_15 0x80b3
+#define regBIF_BX1_FW_SCRATCH_15_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_4 0x80b4
+#define regBIF_BX1_SBIOS_SCRATCH_4_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_5 0x80b5
+#define regBIF_BX1_SBIOS_SCRATCH_5_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_6 0x80b6
+#define regBIF_BX1_SBIOS_SCRATCH_6_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_7 0x80b7
+#define regBIF_BX1_SBIOS_SCRATCH_7_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_8 0x80b8
+#define regBIF_BX1_SBIOS_SCRATCH_8_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_9 0x80b9
+#define regBIF_BX1_SBIOS_SCRATCH_9_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_10 0x80ba
+#define regBIF_BX1_SBIOS_SCRATCH_10_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_11 0x80bb
+#define regBIF_BX1_SBIOS_SCRATCH_11_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_12 0x80bc
+#define regBIF_BX1_SBIOS_SCRATCH_12_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_13 0x80bd
+#define regBIF_BX1_SBIOS_SCRATCH_13_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_14 0x80be
+#define regBIF_BX1_SBIOS_SCRATCH_14_BASE_IDX 5
+#define regBIF_BX1_SBIOS_SCRATCH_15 0x80bf
+#define regBIF_BX1_SBIOS_SCRATCH_15_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_bx_pf_SYSPFVFDEC
+// base address: 0x10120000
+#define regBIF_BX_PF1_MM_INDEX 0x8000
+#define regBIF_BX_PF1_MM_INDEX_BASE_IDX 5
+#define regBIF_BX_PF1_MM_DATA 0x8001
+#define regBIF_BX_PF1_MM_DATA_BASE_IDX 5
+#define regBIF_BX_PF1_MM_INDEX_HI 0x8006
+#define regBIF_BX_PF1_MM_INDEX_HI_BASE_IDX 5
+#define regBIF_BX_PF1_RSMU_INDEX 0x8014
+#define regBIF_BX_PF1_RSMU_INDEX_BASE_IDX 5
+#define regBIF_BX_PF1_RSMU_DATA 0x8015
+#define regBIF_BX_PF1_RSMU_DATA_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_bx_BIFDEC1
+// base address: 0x10120000
+#define regBIF_BX1_CC_BIF_BX_STRAP0 0x8e02
+#define regBIF_BX1_CC_BIF_BX_STRAP0_BASE_IDX 5
+#define regBIF_BX1_CC_BIF_BX_PINSTRAP0 0x8e04
+#define regBIF_BX1_CC_BIF_BX_PINSTRAP0_BASE_IDX 5
+#define regBIF_BX1_BIF_MM_INDACCESS_CNTL 0x8e06
+#define regBIF_BX1_BIF_MM_INDACCESS_CNTL_BASE_IDX 5
+#define regBIF_BX1_BUS_CNTL 0x8e07
+#define regBIF_BX1_BUS_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_SCRATCH0 0x8e08
+#define regBIF_BX1_BIF_SCRATCH0_BASE_IDX 5
+#define regBIF_BX1_BIF_SCRATCH1 0x8e09
+#define regBIF_BX1_BIF_SCRATCH1_BASE_IDX 5
+#define regBIF_BX1_BX_RESET_EN 0x8e0d
+#define regBIF_BX1_BX_RESET_EN_BASE_IDX 5
+#define regBIF_BX1_MM_CFGREGS_CNTL 0x8e0e
+#define regBIF_BX1_MM_CFGREGS_CNTL_BASE_IDX 5
+#define regBIF_BX1_BX_RESET_CNTL 0x8e10
+#define regBIF_BX1_BX_RESET_CNTL_BASE_IDX 5
+#define regBIF_BX1_INTERRUPT_CNTL 0x8e11
+#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX 5
+#define regBIF_BX1_INTERRUPT_CNTL2 0x8e12
+#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX 5
+#define regBIF_BX1_CLKREQB_PAD_CNTL 0x8e18
+#define regBIF_BX1_CLKREQB_PAD_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_FEATURES_CONTROL_MISC 0x8e1b
+#define regBIF_BX1_BIF_FEATURES_CONTROL_MISC_BASE_IDX 5
+#define regBIF_BX1_HDP_ATOMIC_CONTROL_MISC 0x8e1c
+#define regBIF_BX1_HDP_ATOMIC_CONTROL_MISC_BASE_IDX 5
+#define regBIF_BX1_BIF_DOORBELL_CNTL 0x8e1d
+#define regBIF_BX1_BIF_DOORBELL_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_DOORBELL_INT_CNTL 0x8e1e
+#define regBIF_BX1_BIF_DOORBELL_INT_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_FB_EN 0x8e20
+#define regBIF_BX1_BIF_FB_EN_BASE_IDX 5
+#define regBIF_BX1_BIF_INTR_CNTL 0x8e21
+#define regBIF_BX1_BIF_INTR_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_MST_TRANS_PENDING_VF 0x8e29
+#define regBIF_BX1_BIF_MST_TRANS_PENDING_VF_BASE_IDX 5
+#define regBIF_BX1_BIF_SLV_TRANS_PENDING_VF 0x8e2a
+#define regBIF_BX1_BIF_SLV_TRANS_PENDING_VF_BASE_IDX 5
+#define regBIF_BX1_BACO_CNTL 0x8e2b
+#define regBIF_BX1_BACO_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_BACO_EXIT_TIME0 0x8e2c
+#define regBIF_BX1_BIF_BACO_EXIT_TIME0_BASE_IDX 5
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER1 0x8e2d
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER1_BASE_IDX 5
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER2 0x8e2e
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER2_BASE_IDX 5
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER3 0x8e2f
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER3_BASE_IDX 5
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER4 0x8e30
+#define regBIF_BX1_BIF_BACO_EXIT_TIMER4_BASE_IDX 5
+#define regBIF_BX1_MEM_TYPE_CNTL 0x8e31
+#define regBIF_BX1_MEM_TYPE_CNTL_BASE_IDX 5
+#define regBIF_BX1_VF_REGWR_EN 0x8e44
+#define regBIF_BX1_VF_REGWR_EN_BASE_IDX 5
+#define regBIF_BX1_VF_DOORBELL_EN 0x8e45
+#define regBIF_BX1_VF_DOORBELL_EN_BASE_IDX 5
+#define regBIF_BX1_VF_FB_EN 0x8e46
+#define regBIF_BX1_VF_FB_EN_BASE_IDX 5
+#define regBIF_BX1_VF_REGWR_STATUS 0x8e47
+#define regBIF_BX1_VF_REGWR_STATUS_BASE_IDX 5
+#define regBIF_BX1_VF_DOORBELL_STATUS 0x8e48
+#define regBIF_BX1_VF_DOORBELL_STATUS_BASE_IDX 5
+#define regBIF_BX1_VF_FB_STATUS 0x8e49
+#define regBIF_BX1_VF_FB_STATUS_BASE_IDX 5
+#define regBIF_BX1_REMAP_HDP_MEM_FLUSH_CNTL 0x8e4d
+#define regBIF_BX1_REMAP_HDP_MEM_FLUSH_CNTL_BASE_IDX 5
+#define regBIF_BX1_REMAP_HDP_REG_FLUSH_CNTL 0x8e4e
+#define regBIF_BX1_REMAP_HDP_REG_FLUSH_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_RB_CNTL 0x8e4f
+#define regBIF_BX1_BIF_RB_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_RB_BASE 0x8e50
+#define regBIF_BX1_BIF_RB_BASE_BASE_IDX 5
+#define regBIF_BX1_BIF_RB_RPTR 0x8e51
+#define regBIF_BX1_BIF_RB_RPTR_BASE_IDX 5
+#define regBIF_BX1_BIF_RB_WPTR 0x8e52
+#define regBIF_BX1_BIF_RB_WPTR_BASE_IDX 5
+#define regBIF_BX1_BIF_RB_WPTR_ADDR_HI 0x8e53
+#define regBIF_BX1_BIF_RB_WPTR_ADDR_HI_BASE_IDX 5
+#define regBIF_BX1_BIF_RB_WPTR_ADDR_LO 0x8e54
+#define regBIF_BX1_BIF_RB_WPTR_ADDR_LO_BASE_IDX 5
+#define regBIF_BX1_MAILBOX_INDEX 0x8e55
+#define regBIF_BX1_MAILBOX_INDEX_BASE_IDX 5
+#define regBIF_BX1_BIF_MP1_INTR_CTRL 0x8e62
+#define regBIF_BX1_BIF_MP1_INTR_CTRL_BASE_IDX 5
+#define regBIF_BX1_BIF_PERSTB_PAD_CNTL 0x8e65
+#define regBIF_BX1_BIF_PERSTB_PAD_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_PX_EN_PAD_CNTL 0x8e66
+#define regBIF_BX1_BIF_PX_EN_PAD_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_REFPADKIN_PAD_CNTL 0x8e67
+#define regBIF_BX1_BIF_REFPADKIN_PAD_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_CLKREQB_PAD_CNTL 0x8e68
+#define regBIF_BX1_BIF_CLKREQB_PAD_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_PWRBRK_PAD_CNTL 0x8e69
+#define regBIF_BX1_BIF_PWRBRK_PAD_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_WAKEB_PAD_CNTL 0x8e6d
+#define regBIF_BX1_BIF_WAKEB_PAD_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL 0x8e6e
+#define regBIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL_BASE_IDX 5
+#define regBIF_BX1_PCIE_PAR_SAVE_RESTORE_CNTL 0x8e70
+#define regBIF_BX1_PCIE_PAR_SAVE_RESTORE_CNTL_BASE_IDX 5
+#define regBIF_BX1_BIF_S5_MEM_POWER_CTRL0 0x8e71
+#define regBIF_BX1_BIF_S5_MEM_POWER_CTRL0_BASE_IDX 5
+#define regBIF_BX1_BIF_S5_MEM_POWER_CTRL1 0x8e72
+#define regBIF_BX1_BIF_S5_MEM_POWER_CTRL1_BASE_IDX 5
+#define regBIF_BX1_BIF_S5_DUMMY_REGS 0x8e73
+#define regBIF_BX1_BIF_S5_DUMMY_REGS_BASE_IDX 5
+
+
+// addressBlock: nbif_bif_bx_pf_BIFPFVFDEC1
+// base address: 0x10120000
+#define regBIF_BX_PF1_BIF_BME_STATUS 0x8e0b
+#define regBIF_BX_PF1_BIF_BME_STATUS_BASE_IDX 5
+#define regBIF_BX_PF1_BIF_ATOMIC_ERR_LOG 0x8e0c
+#define regBIF_BX_PF1_BIF_ATOMIC_ERR_LOG_BASE_IDX 5
+#define regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x8e13
+#define regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 5
+#define regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x8e14
+#define regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 5
+#define regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL 0x8e15
+#define regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 5
+#define regBIF_BX_PF1_HDP_REG_COHERENCY_FLUSH_CNTL 0x8e16
+#define regBIF_BX_PF1_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 5
+#define regBIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_CNTL 0x8e17
+#define regBIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 5
+#define regBIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x8e19
+#define regBIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 5
+#define regBIF_BX_PF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x8e1a
+#define regBIF_BX_PF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 5
+#define regBIF_BX_PF1_GPU_HDP_FLUSH_REQ 0x8e26
+#define regBIF_BX_PF1_GPU_HDP_FLUSH_REQ_BASE_IDX 5
+#define regBIF_BX_PF1_GPU_HDP_FLUSH_DONE 0x8e27
+#define regBIF_BX_PF1_GPU_HDP_FLUSH_DONE_BASE_IDX 5
+#define regBIF_BX_PF1_BIF_TRANS_PENDING 0x8e28
+#define regBIF_BX_PF1_BIF_TRANS_PENDING_BASE_IDX 5
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW0 0x8e56
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 5
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW1 0x8e57
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 5
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW2 0x8e58
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 5
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW3 0x8e59
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 5
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW0 0x8e5a
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 5
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW1 0x8e5b
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 5
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW2 0x8e5c
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 5
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW3 0x8e5d
+#define regBIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 5
+#define regBIF_BX_PF1_MAILBOX_CONTROL 0x8e5e
+#define regBIF_BX_PF1_MAILBOX_CONTROL_BASE_IDX 5
+#define regBIF_BX_PF1_MAILBOX_INT_CNTL 0x8e5f
+#define regBIF_BX_PF1_MAILBOX_INT_CNTL_BASE_IDX 5
+#define regBIF_BX_PF1_BIF_VMHV_MAILBOX 0x8e60
+#define regBIF_BX_PF1_BIF_VMHV_MAILBOX_BASE_IDX 5
+
+
+// addressBlock: nbif_rcc_strap_BIFDEC1:1
+// base address: 0x10120000
+#define regRCC_STRAP2_RCC_BIF_STRAP0 0x8d20
+#define regRCC_STRAP2_RCC_BIF_STRAP0_BASE_IDX 5
+#define regRCC_STRAP2_RCC_BIF_STRAP1 0x8d21
+#define regRCC_STRAP2_RCC_BIF_STRAP1_BASE_IDX 5
+#define regRCC_STRAP2_RCC_BIF_STRAP2 0x8d25
+#define regRCC_STRAP2_RCC_BIF_STRAP2_BASE_IDX 5
+#define regRCC_STRAP2_RCC_BIF_STRAP3 0x8d26
+#define regRCC_STRAP2_RCC_BIF_STRAP3_BASE_IDX 5
+#define regRCC_STRAP2_RCC_BIF_STRAP4 0x8d27
+#define regRCC_STRAP2_RCC_BIF_STRAP4_BASE_IDX 5
+#define regRCC_STRAP2_RCC_BIF_STRAP5 0x8d28
+#define regRCC_STRAP2_RCC_BIF_STRAP5_BASE_IDX 5
+#define regRCC_STRAP2_RCC_BIF_STRAP6 0x8d29
+#define regRCC_STRAP2_RCC_BIF_STRAP6_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP0 0x8d2d
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP0_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP1 0x8d2e
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP1_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP10 0x8d2f
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP10_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP11 0x8d30
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP11_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP12 0x8d31
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP12_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP13 0x8d32
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP13_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP14 0x8d33
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP14_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP2 0x8d34
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP2_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP3 0x8d35
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP3_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP4 0x8d36
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP4_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP5 0x8d37
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP5_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP6 0x8d38
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP6_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP7 0x8d39
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP7_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP8 0x8d3a
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP8_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP9 0x8d3b
+#define regRCC_STRAP2_RCC_DEV0_PORT_STRAP9_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP0 0x8d3c
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP0_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP1 0x8d3d
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP1_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP13 0x8d3e
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP13_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP14 0x8d3f
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP14_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP15 0x8d40
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP15_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP16 0x8d41
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP16_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP17 0x8d42
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP17_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP18 0x8d43
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP18_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP2 0x8d44
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP2_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP3 0x8d46
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP3_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP4 0x8d47
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP4_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP5 0x8d48
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP5_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP8 0x8d49
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP8_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP9 0x8d4a
+#define regRCC_STRAP2_RCC_DEV0_EPF0_STRAP9_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP0 0x8d4b
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP0_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP2 0x8d56
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP2_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP20 0x8d57
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP20_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP21 0x8d58
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP21_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP3 0x8d59
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP3_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP4 0x8d5a
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP4_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP5 0x8d5b
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP5_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP6 0x8d5c
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP6_BASE_IDX 5
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP7 0x8d5d
+#define regRCC_STRAP2_RCC_DEV0_EPF1_STRAP7_BASE_IDX 5
+
+
+// addressBlock: nbif_gdc_dma_sion_SIONDEC
+// base address: 0x1400000
+#define regGDC_DMA_SION_CL0_RdRsp_BurstTarget_REG0 0x4f7400
+#define regGDC_DMA_SION_CL0_RdRsp_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_RdRsp_BurstTarget_REG1 0x4f7401
+#define regGDC_DMA_SION_CL0_RdRsp_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_RdRsp_TimeSlot_REG0 0x4f7402
+#define regGDC_DMA_SION_CL0_RdRsp_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_RdRsp_TimeSlot_REG1 0x4f7403
+#define regGDC_DMA_SION_CL0_RdRsp_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_WrRsp_BurstTarget_REG0 0x4f7404
+#define regGDC_DMA_SION_CL0_WrRsp_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_WrRsp_BurstTarget_REG1 0x4f7405
+#define regGDC_DMA_SION_CL0_WrRsp_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_WrRsp_TimeSlot_REG0 0x4f7406
+#define regGDC_DMA_SION_CL0_WrRsp_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_WrRsp_TimeSlot_REG1 0x4f7407
+#define regGDC_DMA_SION_CL0_WrRsp_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_Req_BurstTarget_REG0 0x4f7408
+#define regGDC_DMA_SION_CL0_Req_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_Req_BurstTarget_REG1 0x4f7409
+#define regGDC_DMA_SION_CL0_Req_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_Req_TimeSlot_REG0 0x4f740a
+#define regGDC_DMA_SION_CL0_Req_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_Req_TimeSlot_REG1 0x4f740b
+#define regGDC_DMA_SION_CL0_Req_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_ReqPoolCredit_Alloc_REG0 0x4f740c
+#define regGDC_DMA_SION_CL0_ReqPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_ReqPoolCredit_Alloc_REG1 0x4f740d
+#define regGDC_DMA_SION_CL0_ReqPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_DataPoolCredit_Alloc_REG0 0x4f740e
+#define regGDC_DMA_SION_CL0_DataPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_DataPoolCredit_Alloc_REG1 0x4f740f
+#define regGDC_DMA_SION_CL0_DataPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_RdRspPoolCredit_Alloc_REG0 0x4f7410
+#define regGDC_DMA_SION_CL0_RdRspPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_RdRspPoolCredit_Alloc_REG1 0x4f7411
+#define regGDC_DMA_SION_CL0_RdRspPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_WrRspPoolCredit_Alloc_REG0 0x4f7412
+#define regGDC_DMA_SION_CL0_WrRspPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL0_WrRspPoolCredit_Alloc_REG1 0x4f7413
+#define regGDC_DMA_SION_CL0_WrRspPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_RdRsp_BurstTarget_REG0 0x4f7414
+#define regGDC_DMA_SION_CL1_RdRsp_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_RdRsp_BurstTarget_REG1 0x4f7415
+#define regGDC_DMA_SION_CL1_RdRsp_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_RdRsp_TimeSlot_REG0 0x4f7416
+#define regGDC_DMA_SION_CL1_RdRsp_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_RdRsp_TimeSlot_REG1 0x4f7417
+#define regGDC_DMA_SION_CL1_RdRsp_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_WrRsp_BurstTarget_REG0 0x4f7418
+#define regGDC_DMA_SION_CL1_WrRsp_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_WrRsp_BurstTarget_REG1 0x4f7419
+#define regGDC_DMA_SION_CL1_WrRsp_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_WrRsp_TimeSlot_REG0 0x4f741a
+#define regGDC_DMA_SION_CL1_WrRsp_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_WrRsp_TimeSlot_REG1 0x4f741b
+#define regGDC_DMA_SION_CL1_WrRsp_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_Req_BurstTarget_REG0 0x4f741c
+#define regGDC_DMA_SION_CL1_Req_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_Req_BurstTarget_REG1 0x4f741d
+#define regGDC_DMA_SION_CL1_Req_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_Req_TimeSlot_REG0 0x4f741e
+#define regGDC_DMA_SION_CL1_Req_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_Req_TimeSlot_REG1 0x4f741f
+#define regGDC_DMA_SION_CL1_Req_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_ReqPoolCredit_Alloc_REG0 0x4f7420
+#define regGDC_DMA_SION_CL1_ReqPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_ReqPoolCredit_Alloc_REG1 0x4f7421
+#define regGDC_DMA_SION_CL1_ReqPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_DataPoolCredit_Alloc_REG0 0x4f7422
+#define regGDC_DMA_SION_CL1_DataPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_DataPoolCredit_Alloc_REG1 0x4f7423
+#define regGDC_DMA_SION_CL1_DataPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_RdRspPoolCredit_Alloc_REG0 0x4f7424
+#define regGDC_DMA_SION_CL1_RdRspPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_RdRspPoolCredit_Alloc_REG1 0x4f7425
+#define regGDC_DMA_SION_CL1_RdRspPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_WrRspPoolCredit_Alloc_REG0 0x4f7426
+#define regGDC_DMA_SION_CL1_WrRspPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL1_WrRspPoolCredit_Alloc_REG1 0x4f7427
+#define regGDC_DMA_SION_CL1_WrRspPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_RdRsp_BurstTarget_REG0 0x4f7428
+#define regGDC_DMA_SION_CL2_RdRsp_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_RdRsp_BurstTarget_REG1 0x4f7429
+#define regGDC_DMA_SION_CL2_RdRsp_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_RdRsp_TimeSlot_REG0 0x4f742a
+#define regGDC_DMA_SION_CL2_RdRsp_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_RdRsp_TimeSlot_REG1 0x4f742b
+#define regGDC_DMA_SION_CL2_RdRsp_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_WrRsp_BurstTarget_REG0 0x4f742c
+#define regGDC_DMA_SION_CL2_WrRsp_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_WrRsp_BurstTarget_REG1 0x4f742d
+#define regGDC_DMA_SION_CL2_WrRsp_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_WrRsp_TimeSlot_REG0 0x4f742e
+#define regGDC_DMA_SION_CL2_WrRsp_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_WrRsp_TimeSlot_REG1 0x4f742f
+#define regGDC_DMA_SION_CL2_WrRsp_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_Req_BurstTarget_REG0 0x4f7430
+#define regGDC_DMA_SION_CL2_Req_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_Req_BurstTarget_REG1 0x4f7431
+#define regGDC_DMA_SION_CL2_Req_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_Req_TimeSlot_REG0 0x4f7432
+#define regGDC_DMA_SION_CL2_Req_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_Req_TimeSlot_REG1 0x4f7433
+#define regGDC_DMA_SION_CL2_Req_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_ReqPoolCredit_Alloc_REG0 0x4f7434
+#define regGDC_DMA_SION_CL2_ReqPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_ReqPoolCredit_Alloc_REG1 0x4f7435
+#define regGDC_DMA_SION_CL2_ReqPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_DataPoolCredit_Alloc_REG0 0x4f7436
+#define regGDC_DMA_SION_CL2_DataPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_DataPoolCredit_Alloc_REG1 0x4f7437
+#define regGDC_DMA_SION_CL2_DataPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_RdRspPoolCredit_Alloc_REG0 0x4f7438
+#define regGDC_DMA_SION_CL2_RdRspPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_RdRspPoolCredit_Alloc_REG1 0x4f7439
+#define regGDC_DMA_SION_CL2_RdRspPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_WrRspPoolCredit_Alloc_REG0 0x4f743a
+#define regGDC_DMA_SION_CL2_WrRspPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL2_WrRspPoolCredit_Alloc_REG1 0x4f743b
+#define regGDC_DMA_SION_CL2_WrRspPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_RdRsp_BurstTarget_REG0 0x4f743c
+#define regGDC_DMA_SION_CL3_RdRsp_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_RdRsp_BurstTarget_REG1 0x4f743d
+#define regGDC_DMA_SION_CL3_RdRsp_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_RdRsp_TimeSlot_REG0 0x4f743e
+#define regGDC_DMA_SION_CL3_RdRsp_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_RdRsp_TimeSlot_REG1 0x4f743f
+#define regGDC_DMA_SION_CL3_RdRsp_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_WrRsp_BurstTarget_REG0 0x4f7440
+#define regGDC_DMA_SION_CL3_WrRsp_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_WrRsp_BurstTarget_REG1 0x4f7441
+#define regGDC_DMA_SION_CL3_WrRsp_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_WrRsp_TimeSlot_REG0 0x4f7442
+#define regGDC_DMA_SION_CL3_WrRsp_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_WrRsp_TimeSlot_REG1 0x4f7443
+#define regGDC_DMA_SION_CL3_WrRsp_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_Req_BurstTarget_REG0 0x4f7444
+#define regGDC_DMA_SION_CL3_Req_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_Req_BurstTarget_REG1 0x4f7445
+#define regGDC_DMA_SION_CL3_Req_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_Req_TimeSlot_REG0 0x4f7446
+#define regGDC_DMA_SION_CL3_Req_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_Req_TimeSlot_REG1 0x4f7447
+#define regGDC_DMA_SION_CL3_Req_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_ReqPoolCredit_Alloc_REG0 0x4f7448
+#define regGDC_DMA_SION_CL3_ReqPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_ReqPoolCredit_Alloc_REG1 0x4f7449
+#define regGDC_DMA_SION_CL3_ReqPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_DataPoolCredit_Alloc_REG0 0x4f744a
+#define regGDC_DMA_SION_CL3_DataPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_DataPoolCredit_Alloc_REG1 0x4f744b
+#define regGDC_DMA_SION_CL3_DataPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_RdRspPoolCredit_Alloc_REG0 0x4f744c
+#define regGDC_DMA_SION_CL3_RdRspPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_RdRspPoolCredit_Alloc_REG1 0x4f744d
+#define regGDC_DMA_SION_CL3_RdRspPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_WrRspPoolCredit_Alloc_REG0 0x4f744e
+#define regGDC_DMA_SION_CL3_WrRspPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CL3_WrRspPoolCredit_Alloc_REG1 0x4f744f
+#define regGDC_DMA_SION_CL3_WrRspPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_DMA_SION_CNTL_REG0 0x4f7450
+#define regGDC_DMA_SION_CNTL_REG0_BASE_IDX 3
+#define regGDC_DMA_SION_CNTL_REG1 0x4f7451
+#define regGDC_DMA_SION_CNTL_REG1_BASE_IDX 3
+
+
+// addressBlock: nbif_gdc_hst_sion_SIONDEC
+// base address: 0x1400000
+#define regGDC_HST_SION_CL0_RdRsp_BurstTarget_REG0 0x4f7600
+#define regGDC_HST_SION_CL0_RdRsp_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL0_RdRsp_BurstTarget_REG1 0x4f7601
+#define regGDC_HST_SION_CL0_RdRsp_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL0_RdRsp_TimeSlot_REG0 0x4f7602
+#define regGDC_HST_SION_CL0_RdRsp_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL0_RdRsp_TimeSlot_REG1 0x4f7603
+#define regGDC_HST_SION_CL0_RdRsp_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL0_WrRsp_BurstTarget_REG0 0x4f7604
+#define regGDC_HST_SION_CL0_WrRsp_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL0_WrRsp_BurstTarget_REG1 0x4f7605
+#define regGDC_HST_SION_CL0_WrRsp_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL0_WrRsp_TimeSlot_REG0 0x4f7606
+#define regGDC_HST_SION_CL0_WrRsp_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL0_WrRsp_TimeSlot_REG1 0x4f7607
+#define regGDC_HST_SION_CL0_WrRsp_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL0_Req_BurstTarget_REG0 0x4f7608
+#define regGDC_HST_SION_CL0_Req_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL0_Req_BurstTarget_REG1 0x4f7609
+#define regGDC_HST_SION_CL0_Req_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL0_Req_TimeSlot_REG0 0x4f760a
+#define regGDC_HST_SION_CL0_Req_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL0_Req_TimeSlot_REG1 0x4f760b
+#define regGDC_HST_SION_CL0_Req_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL0_ReqPoolCredit_Alloc_REG0 0x4f760c
+#define regGDC_HST_SION_CL0_ReqPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL0_ReqPoolCredit_Alloc_REG1 0x4f760d
+#define regGDC_HST_SION_CL0_ReqPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL0_DataPoolCredit_Alloc_REG0 0x4f760e
+#define regGDC_HST_SION_CL0_DataPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL0_DataPoolCredit_Alloc_REG1 0x4f760f
+#define regGDC_HST_SION_CL0_DataPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL0_RdRspPoolCredit_Alloc_REG0 0x4f7610
+#define regGDC_HST_SION_CL0_RdRspPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL0_RdRspPoolCredit_Alloc_REG1 0x4f7611
+#define regGDC_HST_SION_CL0_RdRspPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL0_WrRspPoolCredit_Alloc_REG0 0x4f7612
+#define regGDC_HST_SION_CL0_WrRspPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL0_WrRspPoolCredit_Alloc_REG1 0x4f7613
+#define regGDC_HST_SION_CL0_WrRspPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL1_RdRsp_BurstTarget_REG0 0x4f7614
+#define regGDC_HST_SION_CL1_RdRsp_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL1_RdRsp_BurstTarget_REG1 0x4f7615
+#define regGDC_HST_SION_CL1_RdRsp_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL1_RdRsp_TimeSlot_REG0 0x4f7616
+#define regGDC_HST_SION_CL1_RdRsp_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL1_RdRsp_TimeSlot_REG1 0x4f7617
+#define regGDC_HST_SION_CL1_RdRsp_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL1_WrRsp_BurstTarget_REG0 0x4f7618
+#define regGDC_HST_SION_CL1_WrRsp_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL1_WrRsp_BurstTarget_REG1 0x4f7619
+#define regGDC_HST_SION_CL1_WrRsp_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL1_WrRsp_TimeSlot_REG0 0x4f761a
+#define regGDC_HST_SION_CL1_WrRsp_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL1_WrRsp_TimeSlot_REG1 0x4f761b
+#define regGDC_HST_SION_CL1_WrRsp_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL1_Req_BurstTarget_REG0 0x4f761c
+#define regGDC_HST_SION_CL1_Req_BurstTarget_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL1_Req_BurstTarget_REG1 0x4f761d
+#define regGDC_HST_SION_CL1_Req_BurstTarget_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL1_Req_TimeSlot_REG0 0x4f761e
+#define regGDC_HST_SION_CL1_Req_TimeSlot_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL1_Req_TimeSlot_REG1 0x4f761f
+#define regGDC_HST_SION_CL1_Req_TimeSlot_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL1_ReqPoolCredit_Alloc_REG0 0x4f7620
+#define regGDC_HST_SION_CL1_ReqPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL1_ReqPoolCredit_Alloc_REG1 0x4f7621
+#define regGDC_HST_SION_CL1_ReqPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL1_DataPoolCredit_Alloc_REG0 0x4f7622
+#define regGDC_HST_SION_CL1_DataPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL1_DataPoolCredit_Alloc_REG1 0x4f7623
+#define regGDC_HST_SION_CL1_DataPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL1_RdRspPoolCredit_Alloc_REG0 0x4f7624
+#define regGDC_HST_SION_CL1_RdRspPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL1_RdRspPoolCredit_Alloc_REG1 0x4f7625
+#define regGDC_HST_SION_CL1_RdRspPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CL1_WrRspPoolCredit_Alloc_REG0 0x4f7626
+#define regGDC_HST_SION_CL1_WrRspPoolCredit_Alloc_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CL1_WrRspPoolCredit_Alloc_REG1 0x4f7627
+#define regGDC_HST_SION_CL1_WrRspPoolCredit_Alloc_REG1_BASE_IDX 3
+#define regGDC_HST_SION_CNTL_REG0 0x4f7628
+#define regGDC_HST_SION_CNTL_REG0_BASE_IDX 3
+#define regGDC_HST_SION_CNTL_REG1 0x4f7629
+#define regGDC_HST_SION_CNTL_REG1_BASE_IDX 3
+
+
+// addressBlock: nbif_gdc_GDCDEC
+// base address: 0x1400000
+#define regGDC1_SHUB_REGS_IF_CTL 0x4f0aa1
+#define regGDC1_SHUB_REGS_IF_CTL_BASE_IDX 3
+#define regGDC1_A2S_QUEUE_FIFO_ARB_CNTL 0x4f0aa2
+#define regGDC1_A2S_QUEUE_FIFO_ARB_CNTL_BASE_IDX 3
+#define regGDC1_NGDC_MGCG_CTRL 0x4f0aa7
+#define regGDC1_NGDC_MGCG_CTRL_BASE_IDX 3
+#define regGDC1_S2A_MISC_CNTL 0x4f0aa8
+#define regGDC1_S2A_MISC_CNTL_BASE_IDX 3
+#define regGDC1_NGDC_EARLY_WAKEUP_CTRL 0x4f0aac
+#define regGDC1_NGDC_EARLY_WAKEUP_CTRL_BASE_IDX 3
+#define regGDC1_NGDC_PG_MISC_CTRL 0x4f0ab0
+#define regGDC1_NGDC_PG_MISC_CTRL_BASE_IDX 3
+#define regGDC1_NGDC_PGMST_CTRL 0x4f0ab1
+#define regGDC1_NGDC_PGMST_CTRL_BASE_IDX 3
+#define regGDC1_NGDC_PGSLV_CTRL 0x4f0ab2
+#define regGDC1_NGDC_PGSLV_CTRL_BASE_IDX 3
+#define regGDC1_ATDMA_MISC_CNTL 0x4f0b01
+#define regGDC1_ATDMA_MISC_CNTL_BASE_IDX 3
+
+
+// addressBlock: nbif_gdc_ras_gdc_ras_regblk
+// base address: 0x1400000
+#define regGDCSOC_ERR_RSP_CNTL 0x4f5c00
+#define regGDCSOC_ERR_RSP_CNTL_BASE_IDX 3
+#define regGDCSOC_RAS_CENTRAL_STATUS 0x4f5c10
+#define regGDCSOC_RAS_CENTRAL_STATUS_BASE_IDX 3
+#define regGDCSOC_RAS_LEAF0_CTRL 0x4f5c20
+#define regGDCSOC_RAS_LEAF0_CTRL_BASE_IDX 3
+#define regGDCSOC_RAS_LEAF1_CTRL 0x4f5c21
+#define regGDCSOC_RAS_LEAF1_CTRL_BASE_IDX 3
+#define regGDCSOC_RAS_LEAF2_CTRL 0x4f5c22
+#define regGDCSOC_RAS_LEAF2_CTRL_BASE_IDX 3
+#define regGDCSOC_RAS_LEAF3_CTRL 0x4f5c23
+#define regGDCSOC_RAS_LEAF3_CTRL_BASE_IDX 3
+#define regGDCSOC_RAS_LEAF4_CTRL 0x4f5c24
+#define regGDCSOC_RAS_LEAF4_CTRL_BASE_IDX 3
+#define regGDCSOC_RAS_LEAF2_MISC_CTRL 0x4f5c2e
+#define regGDCSOC_RAS_LEAF2_MISC_CTRL_BASE_IDX 3
+#define regGDCSOC_RAS_LEAF2_MISC_CTRL2 0x4f5c2f
+#define regGDCSOC_RAS_LEAF2_MISC_CTRL2_BASE_IDX 3
+#define regGDCSOC_RAS_LEAF0_STATUS 0x4f5c30
+#define regGDCSOC_RAS_LEAF0_STATUS_BASE_IDX 3
+#define regGDCSOC_RAS_LEAF1_STATUS 0x4f5c31
+#define regGDCSOC_RAS_LEAF1_STATUS_BASE_IDX 3
+#define regGDCSOC_RAS_LEAF2_STATUS 0x4f5c32
+#define regGDCSOC_RAS_LEAF2_STATUS_BASE_IDX 3
+#define regGDCSOC_RAS_LEAF3_STATUS 0x4f5c33
+#define regGDCSOC_RAS_LEAF3_STATUS_BASE_IDX 3
+#define regGDCSOC_RAS_LEAF4_STATUS 0x4f5c34
+#define regGDCSOC_RAS_LEAF4_STATUS_BASE_IDX 3
+
+
+// addressBlock: nbif_gdc_rst_GDCRST_DEC
+// base address: 0x1400000
+#define regSHUB_PF_FLR_RST 0x4f7800
+#define regSHUB_PF_FLR_RST_BASE_IDX 3
+#define regSHUB_GFX_DRV_VPU_RST 0x4f7801
+#define regSHUB_GFX_DRV_VPU_RST_BASE_IDX 3
+#define regSHUB_LINK_RESET 0x4f7802
+#define regSHUB_LINK_RESET_BASE_IDX 3
+#define regSHUB_HARD_RST_CTRL 0x4f7810
+#define regSHUB_HARD_RST_CTRL_BASE_IDX 3
+#define regSHUB_SOFT_RST_CTRL 0x4f7811
+#define regSHUB_SOFT_RST_CTRL_BASE_IDX 3
+#define regSHUB_SDP_PORT_RST 0x4f7812
+#define regSHUB_SDP_PORT_RST_BASE_IDX 3
+#define regSHUB_RST_MISC_TRL 0x4f7813
+#define regSHUB_RST_MISC_TRL_BASE_IDX 3
+
+
+// addressBlock: nbif_gdc_s2a_GDCS2A_DEC
+// base address: 0x1400000
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL 0x4f0aeb
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL 0x4f0aec
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL 0x4f0aed
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL 0x4f0aee
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL 0x4f0aef
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL 0x4f0af0
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL 0x4f0af1
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL 0x4f0af2
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL 0x4f0af3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL 0x4f0af4
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL 0x4f0af5
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL 0x4f0af6
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL 0x4f0af7
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL 0x4f0af8
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL 0x4f0af9
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL 0x4f0afa
+#define regGDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL_BASE_IDX 3
+#define regGDC_S2A1_S2A_DOORBELL_COMMON_CTRL_REG 0x4f0afb
+#define regGDC_S2A1_S2A_DOORBELL_COMMON_CTRL_REG_BASE_IDX 3
+#define regGDC_S2A1_NBIF_GFX_DOORBELL_STATUS 0x4f0afc
+#define regGDC_S2A1_NBIF_GFX_DOORBELL_STATUS_BASE_IDX 3
+
+
+// addressBlock: nbif_gdc_a2s_GDCA2S_DEC
+// base address: 0x1400000
+#define regA2S_CNTL_SW0 0x4f0c40
+#define regA2S_CNTL_SW0_BASE_IDX 3
+#define regA2S_CNTL_SW1 0x4f0c41
+#define regA2S_CNTL_SW1_BASE_IDX 3
+#define regA2S_MISC_CNTL 0x4f0c72
+#define regA2S_MISC_CNTL_BASE_IDX 3
+#define regA2S_TAG_ALLOC_0 0x4f0c74
+#define regA2S_TAG_ALLOC_0_BASE_IDX 3
+#define regA2S_TAG_ALLOC_1 0x4f0c75
+#define regA2S_TAG_ALLOC_1_BASE_IDX 3
+
+
+// addressBlock: nbif_syshub_mmreg_syshubdirect
+// base address: 0x1400000
+#define regHST_CLK0_SW0_CL0_CNTL 0x4f3d40
+#define regHST_CLK0_SW0_CL0_CNTL_BASE_IDX 3
+#define regHST_CLK0_SW1_CL0_CNTL 0x4f3d60
+#define regHST_CLK0_SW1_CL0_CNTL_BASE_IDX 3
+#define regDMA_CLK0_SW0_CL0_CNTL 0x4f3e40
+#define regDMA_CLK0_SW0_CL0_CNTL_BASE_IDX 3
+#define regNIC400_1_ASIB_0_FN_MOD 0x4fbc42
+#define regNIC400_1_ASIB_0_FN_MOD_BASE_IDX 3
+#define regNIC400_1_IB_0_FN_MOD 0x4ff842
+#define regNIC400_1_IB_0_FN_MOD_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf0_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf0_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF0_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF0_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF0_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF0_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf0_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF0_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF0_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf0_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF0_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf1_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf1_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF1_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF1_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF1_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF1_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf1_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF1_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF1_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf1_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF1_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf2_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf2_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF2_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF2_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF2_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF2_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf2_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF2_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF2_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf2_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF2_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf3_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf3_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF3_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF3_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF3_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF3_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf3_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF3_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF3_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf3_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF3_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf4_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf4_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF4_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF4_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF4_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF4_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf4_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF4_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF4_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf4_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF4_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf5_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf5_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF5_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF5_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF5_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF5_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf5_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF5_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF5_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf5_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF5_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf6_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf6_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF6_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF6_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF6_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF6_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf6_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF6_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF6_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf6_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF6_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf7_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf7_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF7_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF7_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF7_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF7_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf7_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF7_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF7_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf7_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF7_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf8_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf8_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF8_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF8_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF8_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF8_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf8_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF8_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF8_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf8_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF8_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf9_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf9_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF9_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF9_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF9_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF9_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf9_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF9_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF9_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf9_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF9_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf10_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf10_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF10_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF10_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF10_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF10_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf10_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF10_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF10_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf10_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF10_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf11_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf11_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF11_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF11_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF11_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF11_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf11_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF11_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF11_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf11_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF11_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf12_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf12_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF12_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF12_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF12_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF12_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf12_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF12_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF12_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf12_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF12_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf13_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf13_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF13_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF13_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF13_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF13_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf13_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF13_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF13_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf13_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF13_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf14_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf14_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF14_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF14_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF14_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF14_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf14_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF14_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF14_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf14_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF14_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf15_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL 0x00f9
+#define regBIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL 0x00fa
+#define regBIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf15_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF15_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF15_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF15_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF15_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf15_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF15_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF15_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf15_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF15_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf16_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF16_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF16_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF16_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF16_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF16_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF16_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf16_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF16_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF16_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF16_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF16_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF16_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF16_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf16_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF16_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF16_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF16_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF16_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF16_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF16_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF16_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF16_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF16_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF16_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf16_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF16_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf17_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF17_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF17_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF17_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF17_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF17_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF17_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf17_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF17_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF17_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF17_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF17_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF17_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF17_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf17_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF17_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF17_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF17_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF17_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF17_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF17_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF17_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF17_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF17_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF17_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf17_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF17_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf18_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF18_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF18_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF18_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF18_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF18_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF18_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf18_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF18_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF18_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF18_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF18_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF18_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF18_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf18_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF18_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF18_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF18_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF18_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF18_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF18_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF18_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF18_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF18_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF18_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf18_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF18_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf19_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF19_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF19_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF19_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF19_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF19_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF19_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf19_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF19_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF19_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF19_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF19_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF19_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF19_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf19_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF19_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF19_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF19_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF19_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF19_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF19_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF19_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF19_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF19_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF19_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf19_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF19_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf20_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF20_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF20_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF20_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF20_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF20_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF20_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf20_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF20_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF20_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF20_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF20_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF20_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF20_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf20_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF20_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF20_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF20_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF20_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF20_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF20_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF20_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF20_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF20_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF20_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf20_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF20_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf21_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF21_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF21_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF21_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF21_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF21_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF21_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf21_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF21_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF21_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF21_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF21_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF21_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF21_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf21_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF21_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF21_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF21_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF21_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF21_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF21_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF21_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF21_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF21_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF21_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf21_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF21_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf22_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF22_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF22_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF22_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF22_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF22_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF22_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf22_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF22_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF22_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF22_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF22_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF22_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF22_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf22_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF22_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF22_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF22_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF22_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF22_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF22_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF22_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF22_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF22_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF22_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf22_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF22_GFXMSIX_PBA_BASE_IDX 3
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf23_BIFPFVFDEC1
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF23_BIF_BME_STATUS 0x00eb
+#define regBIF_BX_DEV0_EPF0_VF23_BIF_BME_STATUS_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG 0x00ec
+#define regBIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3
+#define regBIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4
+#define regBIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5
+#define regBIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6
+#define regBIF_BX_DEV0_EPF0_VF23_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7
+#define regBIF_BX_DEV0_EPF0_VF23_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ 0x0106
+#define regBIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE 0x0107
+#define regBIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_BIF_TRANS_PENDING 0x0108
+#define regBIF_BX_DEV0_EPF0_VF23_BIF_TRANS_PENDING_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW0 0x0136
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW1 0x0137
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW2 0x0138
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW3 0x0139
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW0 0x013a
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW1 0x013b
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW2 0x013c
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW3 0x013d
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_CONTROL 0x013e
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_CONTROL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_INT_CNTL 0x013f
+#define regBIF_BX_DEV0_EPF0_VF23_MAILBOX_INT_CNTL_BASE_IDX 2
+#define regBIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX 0x0140
+#define regBIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX_BASE_IDX 2
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf23_SYSPFVFDEC
+// base address: 0x0
+#define regBIF_BX_DEV0_EPF0_VF23_MM_INDEX 0x0000
+#define regBIF_BX_DEV0_EPF0_VF23_MM_INDEX_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF23_MM_DATA 0x0001
+#define regBIF_BX_DEV0_EPF0_VF23_MM_DATA_BASE_IDX 0
+#define regBIF_BX_DEV0_EPF0_VF23_MM_INDEX_HI 0x0006
+#define regBIF_BX_DEV0_EPF0_VF23_MM_INDEX_HI_BASE_IDX 0
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf23_BIFPFVFDEC1
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF23_RCC_ERR_LOG 0x0085
+#define regRCC_DEV0_EPF0_VF23_RCC_ERR_LOG_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF23_RCC_DOORBELL_APER_EN 0x00c0
+#define regRCC_DEV0_EPF0_VF23_RCC_DOORBELL_APER_EN_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF23_RCC_CONFIG_MEMSIZE 0x00c3
+#define regRCC_DEV0_EPF0_VF23_RCC_CONFIG_MEMSIZE_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF23_RCC_CONFIG_RESERVED 0x00c4
+#define regRCC_DEV0_EPF0_VF23_RCC_CONFIG_RESERVED_BASE_IDX 2
+#define regRCC_DEV0_EPF0_VF23_RCC_IOV_FUNC_IDENTIFIER 0x00c5
+#define regRCC_DEV0_EPF0_VF23_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf23_BIFDEC2
+// base address: 0x0
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_ADDR_LO 0x0400
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_ADDR_HI 0x0401
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_MSG_DATA 0x0402
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_CONTROL 0x0403
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_ADDR_LO 0x0404
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_ADDR_HI 0x0405
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_MSG_DATA 0x0406
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_CONTROL 0x0407
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_ADDR_LO 0x0408
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_ADDR_HI 0x0409
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_MSG_DATA 0x040a
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_CONTROL 0x040b
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_ADDR_LO 0x040c
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_ADDR_LO_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_ADDR_HI 0x040d
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_ADDR_HI_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_MSG_DATA 0x040e
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_MSG_DATA_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_CONTROL 0x040f
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_CONTROL_BASE_IDX 3
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_PBA 0x0800
+#define regRCC_DEV0_EPF0_VF23_GFXMSIX_PBA_BASE_IDX 3
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_3_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_3_1_sh_mask.h
new file mode 100644
index 000000000000..490d04ebdf42
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_3_1_sh_mask.h
@@ -0,0 +1,32806 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _nbif_6_3_1_SH_MASK_HEADER
+#define _nbif_6_3_1_SH_MASK_HEADER
+
+
+// addressBlock: nbif_bif_cfg_dev0_rc_bifcfgdecp
+//IRQ_BRIDGE_CNTL
+#define IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN__SHIFT 0x0
+#define IRQ_BRIDGE_CNTL__SERR_EN__SHIFT 0x1
+#define IRQ_BRIDGE_CNTL__ISA_EN__SHIFT 0x2
+#define IRQ_BRIDGE_CNTL__VGA_EN__SHIFT 0x3
+#define IRQ_BRIDGE_CNTL__VGA_DEC__SHIFT 0x4
+#define IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE__SHIFT 0x5
+#define IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET__SHIFT 0x6
+#define IRQ_BRIDGE_CNTL__FAST_B2B_EN__SHIFT 0x7
+#define IRQ_BRIDGE_CNTL__PRIMARY_DISCARD_TIMER__SHIFT 0x8
+#define IRQ_BRIDGE_CNTL__SECONDARY_DISCARD_TIMER__SHIFT 0x9
+#define IRQ_BRIDGE_CNTL__DISCARD_TIMER_STATUS__SHIFT 0xa
+#define IRQ_BRIDGE_CNTL__DISCARD_TIMER_SERR_ENABLE__SHIFT 0xb
+#define IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN_MASK 0x0001L
+#define IRQ_BRIDGE_CNTL__SERR_EN_MASK 0x0002L
+#define IRQ_BRIDGE_CNTL__ISA_EN_MASK 0x0004L
+#define IRQ_BRIDGE_CNTL__VGA_EN_MASK 0x0008L
+#define IRQ_BRIDGE_CNTL__VGA_DEC_MASK 0x0010L
+#define IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE_MASK 0x0020L
+#define IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_MASK 0x0040L
+#define IRQ_BRIDGE_CNTL__FAST_B2B_EN_MASK 0x0080L
+#define IRQ_BRIDGE_CNTL__PRIMARY_DISCARD_TIMER_MASK 0x0100L
+#define IRQ_BRIDGE_CNTL__SECONDARY_DISCARD_TIMER_MASK 0x0200L
+#define IRQ_BRIDGE_CNTL__DISCARD_TIMER_STATUS_MASK 0x0400L
+#define IRQ_BRIDGE_CNTL__DISCARD_TIMER_SERR_ENABLE_MASK 0x0800L
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_COMMAND
+#define BIF_CFG_DEV0_EPF0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_STATUS
+#define BIF_CFG_DEV0_EPF0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_LATENCY
+#define BIF_CFG_DEV0_EPF0_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_HEADER
+#define BIF_CFG_DEV0_EPF0_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_BIST
+#define BIF_CFG_DEV0_EPF0_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_EPF0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L
+//BIF_CFG_DEV0_EPF0_ADAPTER_ID_W
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PMI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_PMI_CAP
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__PME_CLOCK__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__AUX_CURRENT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__D1_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__D2_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__PME_SUPPORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__VERSION_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__PME_CLOCK_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__D1_SUPPORT_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__D2_SUPPORT_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_PMI_CAP__PME_SUPPORT_MASK 0xF800L
+//BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L
+//BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL
+//BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L
+//BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
+//BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L
+//BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW1
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW2
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR1_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR2_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR3_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR4_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR5_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR6_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA_SELECT
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L
+//BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_LATENCY_INDICATOR
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0x000000FFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x001FL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3
+#define BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN_MASK 0x0000FE00L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_ERROR_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__ENHANCED_CAPABILITY__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__ENHANCED_CAPABILITY_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0C00L
+#define BIF_CFG_DEV0_EPF0_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL_MASK 0x1000L
+//BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L
+//BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L
+//BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L
+//BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM_MASK 0xFFE00000L
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x0020L
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_STATUS
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS_MASK 0x0001L
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_INITIAL_VFS
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_TOTAL_VFS
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_NUM_VFS
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_FUNC_DEP_LINK
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_FIRST_VF_OFFSET
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_STRIDE
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_SYSTEM_PAGE_SIZE
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL
+#define BIF_CFG_DEV0_EPF0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_LINK_CAP_16GT
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_16GT__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_LINK_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L
+//BIF_CFG_DEV0_EPF0_LOCAL_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_EPF0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_EPF0_RTM1_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_EPF0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_EPF0_RTM2_PARITY_MISMATCH_STATUS_16GT
+#define BIF_CFG_DEV0_EPF0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT
+#define BIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_MARGINING_PORT_CAP
+#define BIF_CFG_DEV0_EPF0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L
+//BIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS
+#define BIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L
+//BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CAP
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CNTL
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_LINK_CAP_32GT
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__EQ_BYPASS_TO_HIGHEST_RATE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__NO_EQ_NEEDED_SUPPORTED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE0_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE1_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE2_SUPPORTED__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_RESERVED_USAGE_MODES__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__EQ_BYPASS_TO_HIGHEST_RATE_SUPPORTED_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__NO_EQ_NEEDED_SUPPORTED_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE0_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE1_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_USAGE_MODE2_SUPPORTED_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_LINK_CAP_32GT__MODIFIED_TS_RESERVED_USAGE_MODES_MASK 0x0000F800L
+//BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT__EQ_BYPASS_TO_HIGHEST_RATE_DIS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT__NO_EQ_NEEDED_DIS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT__MODIFIED_TS_USAGE_MODE_SEL__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT__EQ_BYPASS_TO_HIGHEST_RATE_DIS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT__NO_EQ_NEEDED_DIS_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_LINK_CNTL_32GT__MODIFIED_TS_USAGE_MODE_SEL_MASK 0x00000700L
+//BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_COMPLETE_32GT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_PHASE1_SUCCESS_32GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_PHASE2_SUCCESS_32GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_PHASE3_SUCCESS_32GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__LINK_EQUALIZATION_REQUEST_32GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__MODIFIED_TS_RECEIVED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__RECEIVED_ENHANCED_LINK_BEHAVIOR_CNTL__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__TRANSMITTER_PRECODING_ON__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__TRANSMITTER_PRECODE_REQUEST__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__NO_EQ_NEEDED_RECEIVED__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_COMPLETE_32GT_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_PHASE1_SUCCESS_32GT_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_PHASE2_SUCCESS_32GT_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__EQUALIZATION_PHASE3_SUCCESS_32GT_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__LINK_EQUALIZATION_REQUEST_32GT_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__MODIFIED_TS_RECEIVED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__RECEIVED_ENHANCED_LINK_BEHAVIOR_CNTL_MASK 0x000000C0L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__TRANSMITTER_PRECODING_ON_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__TRANSMITTER_PRECODE_REQUEST_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_LINK_STATUS_32GT__NO_EQ_NEEDED_RECEIVED_MASK 0x00000400L
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf0_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF0_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF0_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF0_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF0_STATUS
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF0_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF0_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF0_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF0_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_HEADER
+#define BIF_CFG_DEV0_EPF0_VF0_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF0_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF0_BIST
+#define BIF_CFG_DEV0_EPF0_VF0_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF0_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF0_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF0_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF0_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF0_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF0_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF0_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF0_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF0_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF0_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf1_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF1_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF1_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF1_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF1_STATUS
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF1_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF1_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF1_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF1_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF1_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF1_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF1_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF1_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF1_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_HEADER
+#define BIF_CFG_DEV0_EPF0_VF1_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF1_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF1_BIST
+#define BIF_CFG_DEV0_EPF0_VF1_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF1_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF1_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF1_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF1_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF1_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF1_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF1_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF1_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF1_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF1_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF1_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF1_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF1_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF1_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF1_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF1_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf2_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF2_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF2_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF2_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF2_STATUS
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF2_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF2_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF2_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF2_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF2_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF2_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF2_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF2_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF2_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_HEADER
+#define BIF_CFG_DEV0_EPF0_VF2_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF2_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF2_BIST
+#define BIF_CFG_DEV0_EPF0_VF2_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF2_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF2_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF2_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF2_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF2_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF2_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF2_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF2_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF2_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF2_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF2_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF2_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF2_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF2_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF2_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF2_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf3_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF3_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF3_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF3_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF3_STATUS
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF3_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF3_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF3_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF3_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF3_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF3_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF3_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF3_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF3_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_HEADER
+#define BIF_CFG_DEV0_EPF0_VF3_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF3_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF3_BIST
+#define BIF_CFG_DEV0_EPF0_VF3_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF3_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF3_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF3_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF3_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF3_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF3_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF3_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF3_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF3_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF3_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF3_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF3_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF3_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF3_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF3_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF3_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf4_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF4_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF4_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF4_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF4_STATUS
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF4_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF4_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF4_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF4_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF4_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF4_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF4_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF4_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF4_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_HEADER
+#define BIF_CFG_DEV0_EPF0_VF4_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF4_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF4_BIST
+#define BIF_CFG_DEV0_EPF0_VF4_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF4_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF4_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF4_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF4_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF4_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF4_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF4_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF4_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF4_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF4_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF4_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF4_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF4_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF4_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF4_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF4_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf5_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF5_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF5_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF5_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF5_STATUS
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF5_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF5_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF5_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF5_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF5_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF5_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF5_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF5_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF5_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_HEADER
+#define BIF_CFG_DEV0_EPF0_VF5_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF5_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF5_BIST
+#define BIF_CFG_DEV0_EPF0_VF5_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF5_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF5_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF5_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF5_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF5_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF5_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF5_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF5_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF5_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF5_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF5_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF5_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF5_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF5_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF5_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF5_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf6_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF6_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF6_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF6_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF6_STATUS
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF6_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF6_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF6_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF6_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF6_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF6_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF6_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF6_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF6_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_HEADER
+#define BIF_CFG_DEV0_EPF0_VF6_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF6_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF6_BIST
+#define BIF_CFG_DEV0_EPF0_VF6_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF6_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF6_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF6_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF6_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF6_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF6_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF6_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF6_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF6_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF6_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF6_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF6_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF6_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF6_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF6_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF6_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf0_vf7_bifcfgdecp
+//BIF_CFG_DEV0_EPF0_VF7_VENDOR_ID
+#define BIF_CFG_DEV0_EPF0_VF7_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_ID
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_COMMAND
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF7_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF7_STATUS
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF7_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF7_REVISION_ID
+#define BIF_CFG_DEV0_EPF0_VF7_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF7_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF0_VF7_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF0_VF7_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_SUB_CLASS
+#define BIF_CFG_DEV0_EPF0_VF7_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_BASE_CLASS
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_CACHE_LINE
+#define BIF_CFG_DEV0_EPF0_VF7_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF7_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_HEADER
+#define BIF_CFG_DEV0_EPF0_VF7_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF0_VF7_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF7_BIST
+#define BIF_CFG_DEV0_EPF0_VF7_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF0_VF7_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF0_VF7_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF0_VF7_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF7_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF0_VF7_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF0_VF7_CAP_PTR
+#define BIF_CFG_DEV0_EPF0_VF7_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF0_VF7_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF0_VF7_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_MIN_GRANT
+#define BIF_CFG_DEV0_EPF0_VF7_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF0_VF7_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF0_VF7_LINK_CAP
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF0_VF7_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MASK
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_PENDING
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF7_MSIX_PBA
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF0_VF7_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF0_VF7_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf1_bifcfgdecp
+//BIF_CFG_DEV0_EPF1_VENDOR_ID
+#define BIF_CFG_DEV0_EPF1_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_DEVICE_ID
+#define BIF_CFG_DEV0_EPF1_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_COMMAND
+#define BIF_CFG_DEV0_EPF1_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF1_STATUS
+#define BIF_CFG_DEV0_EPF1_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF1_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF1_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF1_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF1_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_REVISION_ID
+#define BIF_CFG_DEV0_EPF1_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF1_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF1_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF1_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_SUB_CLASS
+#define BIF_CFG_DEV0_EPF1_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_BASE_CLASS
+#define BIF_CFG_DEV0_EPF1_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_CACHE_LINE
+#define BIF_CFG_DEV0_EPF1_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_LATENCY
+#define BIF_CFG_DEV0_EPF1_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_HEADER
+#define BIF_CFG_DEV0_EPF1_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF1_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF1_BIST
+#define BIF_CFG_DEV0_EPF1_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF1_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF1_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF1_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF1_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF1_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF1_CAP_PTR
+#define BIF_CFG_DEV0_EPF1_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF1_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF1_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_MIN_GRANT
+#define BIF_CFG_DEV0_EPF1_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF1_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_EPF1_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L
+//BIF_CFG_DEV0_EPF1_ADAPTER_ID_W
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PMI_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF1_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_PMI_CAP
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__PME_CLOCK__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__AUX_CURRENT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__D1_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__D2_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__PME_SUPPORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__VERSION_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__PME_CLOCK_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__D1_SUPPORT_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__D2_SUPPORT_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_PMI_CAP__PME_SUPPORT_MASK 0xF800L
+//BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF1_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_PCIE_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF1_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF1_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF1_LINK_CAP
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF1_LINK_CNTL
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF1_LINK_STATUS
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF1_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF1_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_LINK_CAP2
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF1_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF1_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF1_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF1_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF1_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF1_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_MASK
+#define BIF_CFG_DEV0_EPF1_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF1_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_PENDING
+#define BIF_CFG_DEV0_EPF1_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF1_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF1_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF1_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF1_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF1_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF1_MSIX_PBA
+#define BIF_CFG_DEV0_EPF1_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_DW1
+#define BIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_DW2
+#define BIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF1_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF1_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF1_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR1_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR2_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR3_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR4_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR5_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR6_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA_SELECT
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L
+//BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_LATENCY_INDICATOR
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0x000000FFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x001FL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_1
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_2
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_3
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_4
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_5
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_6
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_7
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SECONDARY_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_LINK_CNTL3
+#define BIF_CFG_DEV0_EPF1_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF1_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF1_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF1_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN_MASK 0x0000FE00L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_ERROR_STATUS
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_0_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_1_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_2_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_3_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_4_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_5_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_6_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_7_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_8_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_9_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_10_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_11_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_12_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_13_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_14_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_LANE_15_EQUALIZATION_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_TX_PRESET_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_TX_PRESET_MASK 0x0F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_8GT_RX_PRESET_HINT_MASK 0x7000L
+//BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__ENHANCED_CAPABILITY__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__ENHANCED_CAPABILITY_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0C00L
+#define BIF_CFG_DEV0_EPF1_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL_MASK 0x1000L
+//BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L
+//BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L
+//BIF_CFG_DEV0_EPF1_PCIE_LTR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_LTR_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L
+//BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED_MASK 0x00000002L
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00000004L
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM_MASK 0xFFE00000L
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x0020L
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_STATUS
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS_MASK 0x0001L
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_INITIAL_VFS
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_TOTAL_VFS
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_NUM_VFS
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_FUNC_DEP_LINK
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK_MASK 0xFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_FIRST_VF_OFFSET
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_STRIDE
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_DEVICE_ID
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_SUPPORTED_PAGE_SIZE
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_SYSTEM_PAGE_SIZE
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CAP
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CNTL
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF1_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+
+
+// addressBlock: nbif_bif_bx_pf_SYSPFVFDEC
+//BIF_BX_PF0_MM_INDEX
+#define BIF_BX_PF0_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_PF0_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_PF0_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_PF0_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_PF0_MM_DATA
+#define BIF_BX_PF0_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MM_INDEX_HI
+#define BIF_BX_PF0_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_PF0_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_RSMU_INDEX
+#define BIF_BX_PF0_RSMU_INDEX__RSMU_INDEX__SHIFT 0x0
+#define BIF_BX_PF0_RSMU_INDEX__RSMU_INDEX_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_RSMU_DATA
+#define BIF_BX_PF0_RSMU_DATA__RSMU_DATA__SHIFT 0x0
+#define BIF_BX_PF0_RSMU_DATA__RSMU_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_RSMU_INDEX_HI
+#define BIF_BX_PF0_RSMU_INDEX_HI__RSMU_INDEX_HI__SHIFT 0x0
+#define BIF_BX_PF0_RSMU_INDEX_HI__RSMU_INDEX_HI_MASK 0x000000FFL
+
+
+// addressBlock: nbif_bif_bx_SYSDEC
+//BIF_BX0_PCIE_INDEX
+#define BIF_BX0_PCIE_INDEX__PCIE_INDEX__SHIFT 0x0
+#define BIF_BX0_PCIE_INDEX__PCIE_INDEX_MASK 0xFFFFFFFFL
+//BIF_BX0_PCIE_DATA
+#define BIF_BX0_PCIE_DATA__PCIE_DATA__SHIFT 0x0
+#define BIF_BX0_PCIE_DATA__PCIE_DATA_MASK 0xFFFFFFFFL
+//BIF_BX0_PCIE_INDEX2
+#define BIF_BX0_PCIE_INDEX2__PCIE_INDEX2__SHIFT 0x0
+#define BIF_BX0_PCIE_INDEX2__PCIE_INDEX2_MASK 0xFFFFFFFFL
+//BIF_BX0_PCIE_DATA2
+#define BIF_BX0_PCIE_DATA2__PCIE_DATA2__SHIFT 0x0
+#define BIF_BX0_PCIE_DATA2__PCIE_DATA2_MASK 0xFFFFFFFFL
+//BIF_BX0_PCIE_INDEX_HI
+#define BIF_BX0_PCIE_INDEX_HI__PCIE_INDEX_HI__SHIFT 0x0
+#define BIF_BX0_PCIE_INDEX_HI__PCIE_INDEX_HI_MASK 0x000000FFL
+//BIF_BX0_PCIE_INDEX2_HI
+#define BIF_BX0_PCIE_INDEX2_HI__PCIE_INDEX2_HI__SHIFT 0x0
+#define BIF_BX0_PCIE_INDEX2_HI__PCIE_INDEX2_HI_MASK 0x000000FFL
+//BIF_BX0_SBIOS_SCRATCH_0
+#define BIF_BX0_SBIOS_SCRATCH_0__SBIOS_SCRATCH_0__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_0__SBIOS_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_1
+#define BIF_BX0_SBIOS_SCRATCH_1__SBIOS_SCRATCH_1__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_1__SBIOS_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_2
+#define BIF_BX0_SBIOS_SCRATCH_2__SBIOS_SCRATCH_2__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_2__SBIOS_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_3
+#define BIF_BX0_SBIOS_SCRATCH_3__SBIOS_SCRATCH_3__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_3__SBIOS_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_0
+#define BIF_BX0_BIOS_SCRATCH_0__BIOS_SCRATCH_0__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_0__BIOS_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_1
+#define BIF_BX0_BIOS_SCRATCH_1__BIOS_SCRATCH_1__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_1__BIOS_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_2
+#define BIF_BX0_BIOS_SCRATCH_2__BIOS_SCRATCH_2__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_2__BIOS_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_3
+#define BIF_BX0_BIOS_SCRATCH_3__BIOS_SCRATCH_3__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_3__BIOS_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_4
+#define BIF_BX0_BIOS_SCRATCH_4__BIOS_SCRATCH_4__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_4__BIOS_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_5
+#define BIF_BX0_BIOS_SCRATCH_5__BIOS_SCRATCH_5__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_5__BIOS_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_6
+#define BIF_BX0_BIOS_SCRATCH_6__BIOS_SCRATCH_6__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_6__BIOS_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_7
+#define BIF_BX0_BIOS_SCRATCH_7__BIOS_SCRATCH_7__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_7__BIOS_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_8
+#define BIF_BX0_BIOS_SCRATCH_8__BIOS_SCRATCH_8__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_8__BIOS_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_9
+#define BIF_BX0_BIOS_SCRATCH_9__BIOS_SCRATCH_9__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_9__BIOS_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_10
+#define BIF_BX0_BIOS_SCRATCH_10__BIOS_SCRATCH_10__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_10__BIOS_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_11
+#define BIF_BX0_BIOS_SCRATCH_11__BIOS_SCRATCH_11__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_11__BIOS_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_12
+#define BIF_BX0_BIOS_SCRATCH_12__BIOS_SCRATCH_12__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_12__BIOS_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_13
+#define BIF_BX0_BIOS_SCRATCH_13__BIOS_SCRATCH_13__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_13__BIOS_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_14
+#define BIF_BX0_BIOS_SCRATCH_14__BIOS_SCRATCH_14__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_14__BIOS_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX0_BIOS_SCRATCH_15
+#define BIF_BX0_BIOS_SCRATCH_15__BIOS_SCRATCH_15__SHIFT 0x0
+#define BIF_BX0_BIOS_SCRATCH_15__BIOS_SCRATCH_15_MASK 0xFFFFFFFFL
+//BIF_BX0_BIF_RLC_INTR_CNTL
+//BIF_BX0_BIF_VCE_INTR_CNTL
+//BIF_BX0_BIF_UVD_INTR_CNTL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR0__CAM_ADDR0__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR0__CAM_ADDR0_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR0__CAM_REMAP_ADDR0__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR0__CAM_REMAP_ADDR0_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR1
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR1__CAM_ADDR1__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR1__CAM_ADDR1_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR1
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR1__CAM_REMAP_ADDR1__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR1__CAM_REMAP_ADDR1_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR2
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR2__CAM_ADDR2__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR2__CAM_ADDR2_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR2
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR2__CAM_REMAP_ADDR2__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR2__CAM_REMAP_ADDR2_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR3
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR3__CAM_ADDR3__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR3__CAM_ADDR3_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR3
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR3__CAM_REMAP_ADDR3__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR3__CAM_REMAP_ADDR3_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR4
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR4__CAM_ADDR4__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR4__CAM_ADDR4_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR4
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR4__CAM_REMAP_ADDR4__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR4__CAM_REMAP_ADDR4_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR5
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR5__CAM_ADDR5__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR5__CAM_ADDR5_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR5
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR5__CAM_REMAP_ADDR5__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR5__CAM_REMAP_ADDR5_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR6
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR6__CAM_ADDR6__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR6__CAM_ADDR6_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR6
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR6__CAM_REMAP_ADDR6__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR6__CAM_REMAP_ADDR6_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ADDR7
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR7__CAM_ADDR7__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ADDR7__CAM_ADDR7_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR7
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR7__CAM_REMAP_ADDR7__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_REMAP_ADDR7__CAM_REMAP_ADDR7_MASK 0x000FFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_CNTL
+#define BIF_BX0_GFX_MMIOREG_CAM_CNTL__CAM_ENABLE__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_CNTL__CAM_ENABLE_MASK 0x000000FFL
+//BIF_BX0_GFX_MMIOREG_CAM_ZERO_CPL
+#define BIF_BX0_GFX_MMIOREG_CAM_ZERO_CPL__CAM_ZERO_CPL__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ZERO_CPL__CAM_ZERO_CPL_MASK 0xFFFFFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_ONE_CPL
+#define BIF_BX0_GFX_MMIOREG_CAM_ONE_CPL__CAM_ONE_CPL__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_ONE_CPL__CAM_ONE_CPL_MASK 0xFFFFFFFFL
+//BIF_BX0_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL
+#define BIF_BX0_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL__CAM_PROGRAMMABLE_CPL__SHIFT 0x0
+#define BIF_BX0_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL__CAM_PROGRAMMABLE_CPL_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_0
+#define BIF_BX0_DRIVER_SCRATCH_0__DRIVER_SCRATCH_0__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_0__DRIVER_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_1
+#define BIF_BX0_DRIVER_SCRATCH_1__DRIVER_SCRATCH_1__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_1__DRIVER_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_2
+#define BIF_BX0_DRIVER_SCRATCH_2__DRIVER_SCRATCH_2__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_2__DRIVER_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_3
+#define BIF_BX0_DRIVER_SCRATCH_3__DRIVER_SCRATCH_3__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_3__DRIVER_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_4
+#define BIF_BX0_DRIVER_SCRATCH_4__DRIVER_SCRATCH_4__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_4__DRIVER_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_5
+#define BIF_BX0_DRIVER_SCRATCH_5__DRIVER_SCRATCH_5__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_5__DRIVER_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_6
+#define BIF_BX0_DRIVER_SCRATCH_6__DRIVER_SCRATCH_6__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_6__DRIVER_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_7
+#define BIF_BX0_DRIVER_SCRATCH_7__DRIVER_SCRATCH_7__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_7__DRIVER_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_8
+#define BIF_BX0_DRIVER_SCRATCH_8__DRIVER_SCRATCH_8__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_8__DRIVER_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_9
+#define BIF_BX0_DRIVER_SCRATCH_9__DRIVER_SCRATCH_9__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_9__DRIVER_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_10
+#define BIF_BX0_DRIVER_SCRATCH_10__DRIVER_SCRATCH_10__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_10__DRIVER_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_11
+#define BIF_BX0_DRIVER_SCRATCH_11__DRIVER_SCRATCH_11__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_11__DRIVER_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_12
+#define BIF_BX0_DRIVER_SCRATCH_12__DRIVER_SCRATCH_12__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_12__DRIVER_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_13
+#define BIF_BX0_DRIVER_SCRATCH_13__DRIVER_SCRATCH_13__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_13__DRIVER_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_14
+#define BIF_BX0_DRIVER_SCRATCH_14__DRIVER_SCRATCH_14__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_14__DRIVER_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX0_DRIVER_SCRATCH_15
+#define BIF_BX0_DRIVER_SCRATCH_15__DRIVER_SCRATCH_15__SHIFT 0x0
+#define BIF_BX0_DRIVER_SCRATCH_15__DRIVER_SCRATCH_15_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_0
+#define BIF_BX0_FW_SCRATCH_0__FW_SCRATCH_0__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_0__FW_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_1
+#define BIF_BX0_FW_SCRATCH_1__FW_SCRATCH_1__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_1__FW_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_2
+#define BIF_BX0_FW_SCRATCH_2__FW_SCRATCH_2__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_2__FW_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_3
+#define BIF_BX0_FW_SCRATCH_3__FW_SCRATCH_3__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_3__FW_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_4
+#define BIF_BX0_FW_SCRATCH_4__FW_SCRATCH_4__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_4__FW_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_5
+#define BIF_BX0_FW_SCRATCH_5__FW_SCRATCH_5__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_5__FW_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_6
+#define BIF_BX0_FW_SCRATCH_6__FW_SCRATCH_6__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_6__FW_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_7
+#define BIF_BX0_FW_SCRATCH_7__FW_SCRATCH_7__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_7__FW_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_8
+#define BIF_BX0_FW_SCRATCH_8__FW_SCRATCH_8__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_8__FW_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_9
+#define BIF_BX0_FW_SCRATCH_9__FW_SCRATCH_9__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_9__FW_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_10
+#define BIF_BX0_FW_SCRATCH_10__FW_SCRATCH_10__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_10__FW_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_11
+#define BIF_BX0_FW_SCRATCH_11__FW_SCRATCH_11__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_11__FW_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_12
+#define BIF_BX0_FW_SCRATCH_12__FW_SCRATCH_12__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_12__FW_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_13
+#define BIF_BX0_FW_SCRATCH_13__FW_SCRATCH_13__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_13__FW_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_14
+#define BIF_BX0_FW_SCRATCH_14__FW_SCRATCH_14__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_14__FW_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX0_FW_SCRATCH_15
+#define BIF_BX0_FW_SCRATCH_15__FW_SCRATCH_15__SHIFT 0x0
+#define BIF_BX0_FW_SCRATCH_15__FW_SCRATCH_15_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_4
+#define BIF_BX0_SBIOS_SCRATCH_4__SBIOS_SCRATCH_4__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_4__SBIOS_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_5
+#define BIF_BX0_SBIOS_SCRATCH_5__SBIOS_SCRATCH_5__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_5__SBIOS_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_6
+#define BIF_BX0_SBIOS_SCRATCH_6__SBIOS_SCRATCH_6__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_6__SBIOS_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_7
+#define BIF_BX0_SBIOS_SCRATCH_7__SBIOS_SCRATCH_7__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_7__SBIOS_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_8
+#define BIF_BX0_SBIOS_SCRATCH_8__SBIOS_SCRATCH_8__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_8__SBIOS_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_9
+#define BIF_BX0_SBIOS_SCRATCH_9__SBIOS_SCRATCH_9__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_9__SBIOS_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_10
+#define BIF_BX0_SBIOS_SCRATCH_10__SBIOS_SCRATCH_10__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_10__SBIOS_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_11
+#define BIF_BX0_SBIOS_SCRATCH_11__SBIOS_SCRATCH_11__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_11__SBIOS_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_12
+#define BIF_BX0_SBIOS_SCRATCH_12__SBIOS_SCRATCH_12__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_12__SBIOS_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_13
+#define BIF_BX0_SBIOS_SCRATCH_13__SBIOS_SCRATCH_13__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_13__SBIOS_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_14
+#define BIF_BX0_SBIOS_SCRATCH_14__SBIOS_SCRATCH_14__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_14__SBIOS_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX0_SBIOS_SCRATCH_15
+#define BIF_BX0_SBIOS_SCRATCH_15__SBIOS_SCRATCH_15__SHIFT 0x0
+#define BIF_BX0_SBIOS_SCRATCH_15__SBIOS_SCRATCH_15_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dwn_dev0_BIFDEC1
+//RCC_DWN_DEV0_0_DN_PCIE_RESERVED
+#define RCC_DWN_DEV0_0_DN_PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x0
+#define RCC_DWN_DEV0_0_DN_PCIE_RESERVED__PCIE_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DWN_DEV0_0_DN_PCIE_SCRATCH
+#define RCC_DWN_DEV0_0_DN_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
+#define RCC_DWN_DEV0_0_DN_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
+//RCC_DWN_DEV0_0_DN_PCIE_CNTL
+#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0
+#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN__SHIFT 0x7
+#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
+#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L
+#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN_MASK 0x00000080L
+#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
+//RCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL
+#define RCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19
+#define RCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L
+//RCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2
+#define RCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c
+#define RCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L
+//RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL
+#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
+#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN__SHIFT 0x8
+#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN_MASK 0x00000100L
+//RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG__SHIFT 0x4
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
+#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG_MASK 0x00000010L
+//RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0__STRAP_F0_EN__SHIFT 0x0
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0__STRAP_F0_MC_EN__SHIFT 0x11
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP__SHIFT 0x15
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0__STRAP_F0_EN_MASK 0x00000001L
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0__STRAP_F0_MC_EN_MASK 0x00020000L
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP_MASK 0x00E00000L
+//RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC__STRAP_CLK_PM_EN__SHIFT 0x18
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x1d
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC__STRAP_CLK_PM_EN_MASK 0x01000000L
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+//RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC2
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN__SHIFT 0x2
+#define RCC_DWN_DEV0_0_DN_PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN_MASK 0x00000004L
+
+
+// addressBlock: nbif_rcc_dwnp_dev0_BIFDEC1
+//RCC_DWNP_DEV0_0_PCIE_ERR_CNTL
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0xb
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__ERR_CORR_RCVD_CLR__SHIFT 0x12
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__NONFATAL_ERR_RCVD_CLR__SHIFT 0x13
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__FATAL_ERR_RCVD_CLR__SHIFT 0x14
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x00000800L
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__ERR_CORR_RCVD_CLR_MASK 0x00040000L
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__NONFATAL_ERR_RCVD_CLR_MASK 0x00080000L
+#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__FATAL_ERR_RCVD_CLR_MASK 0x00100000L
+//RCC_DWNP_DEV0_0_PCIE_RX_CNTL
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN__SHIFT 0x9
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0x15
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN_MASK 0x00000200L
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00200000L
+#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L
+//RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP__SHIFT 0x3
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
+#define RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP_MASK 0x00000008L
+//RCC_DWNP_DEV0_0_PCIE_LC_CNTL2
+#define RCC_DWNP_DEV0_0_PCIE_LC_CNTL2__DL_STATE_CHANGED_NOTIFICATION_DIS__SHIFT 0x0
+#define RCC_DWNP_DEV0_0_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b
+#define RCC_DWNP_DEV0_0_PCIE_LC_CNTL2__DL_STATE_CHANGED_NOTIFICATION_DIS_MASK 0x00000001L
+#define RCC_DWNP_DEV0_0_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L
+//RCC_DWNP_DEV0_0_PCIEP_STRAP_MISC
+#define RCC_DWNP_DEV0_0_PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN__SHIFT 0xa
+#define RCC_DWNP_DEV0_0_PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN_MASK 0x00000400L
+//RCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP
+#define RCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP__SHIFT 0x0
+#define RCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_ep_dev0_BIFDEC1
+//RCC_EP_DEV0_0_EP_PCIE_SCRATCH
+#define RCC_EP_DEV0_0_EP_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
+//RCC_EP_DEV0_0_EP_PCIE_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__MFIOV_GFX_F0_FLR_DIS__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__MFIOV_GFX_F0_FLR_DIS_MASK 0x00000001L
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L
+#define RCC_EP_DEV0_0_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
+//RCC_EP_DEV0_0_EP_PCIE_INT_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x1
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x2
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x3
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x4
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x6
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L
+//RCC_EP_DEV0_0_EP_PCIE_INT_STATUS
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x1
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x2
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x3
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x4
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x6
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_F0__SHIFT 0x7
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L
+#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_F0_MASK 0x00000080L
+//RCC_EP_DEV0_0_EP_PCIE_RX_CNTL2
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L
+//RCC_EP_DEV0_0_EP_PCIE_BUS_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
+#define RCC_EP_DEV0_0_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+//RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG__SHIFT 0x4
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
+#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG_MASK 0x00000010L
+//RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE__SHIFT 0x3
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT__SHIFT 0x6
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE__SHIFT 0x7
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE__SHIFT 0xa
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT__SHIFT 0xd
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0__SHIFT 0xe
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN__SHIFT 0xf
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1__SHIFT 0x10
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN__SHIFT 0x11
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE_MASK 0x00000007L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE_MASK 0x00000038L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT_MASK 0x00000040L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE_MASK 0x00000380L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE_MASK 0x00001C00L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT_MASK 0x00002000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK 0x00004000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK 0x00008000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1_MASK 0x00010000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN_MASK 0x00020000L
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_EP_PCIE_STRAP_MISC
+#define RCC_EP_DEV0_0_EP_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x1d
+#define RCC_EP_DEV0_0_EP_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+//RCC_EP_DEV0_0_EP_PCIE_STRAP_MISC2
+#define RCC_EP_DEV0_0_EP_PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED__SHIFT 0x4
+#define RCC_EP_DEV0_0_EP_PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED_MASK 0x00000010L
+//RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//RCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL
+//RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE__SHIFT 0x8
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x001FL
+#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE_MASK 0x0100L
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_0_EP_PCIE_PME_CONTROL
+#define RCC_EP_DEV0_0_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER_MASK 0x1FL
+//RCC_EP_DEV0_0_EP_PCIEP_RESERVED
+#define RCC_EP_DEV0_0_EP_PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xFFFFFFFFL
+//RCC_EP_DEV0_0_EP_PCIE_TX_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L
+//RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID
+#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3
+#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8
+#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L
+#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L
+//RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x18
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x19
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x1a
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED__SHIFT 0x1b
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED__SHIFT 0x1c
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED__SHIFT 0x1d
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED__SHIFT 0x1e
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED__SHIFT 0x1f
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x01000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x02000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x04000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED_MASK 0x08000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED_MASK 0x10000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED_MASK 0x20000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED_MASK 0x40000000L
+#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED_MASK 0x80000000L
+//RCC_EP_DEV0_0_EP_PCIE_RX_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L
+#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L
+//RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP__SHIFT 0x3
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
+#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP_MASK 0x00000008L
+
+
+// addressBlock: nbif_bif_bx_BIFDEC1
+//BIF_BX0_CC_BIF_BX_STRAP0
+#define BIF_BX0_CC_BIF_BX_STRAP0__STRAP_RESERVED__SHIFT 0x19
+#define BIF_BX0_CC_BIF_BX_STRAP0__STRAP_RESERVED_MASK 0xFE000000L
+//BIF_BX0_CC_BIF_BX_PINSTRAP0
+//BIF_BX0_BIF_MM_INDACCESS_CNTL
+#define BIF_BX0_BIF_MM_INDACCESS_CNTL__WRITE_DIS__SHIFT 0x0
+#define BIF_BX0_BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1
+#define BIF_BX0_BIF_MM_INDACCESS_CNTL__WRITE_DIS_MASK 0x00000001L
+#define BIF_BX0_BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x00000002L
+//BIF_BX0_BUS_CNTL
+#define BIF_BX0_BUS_CNTL__VGA_REG_COHERENCY_DIS__SHIFT 0x6
+#define BIF_BX0_BUS_CNTL__VGA_MEM_COHERENCY_DIS__SHIFT 0x7
+#define BIF_BX0_BUS_CNTL__SET_AZ_TC__SHIFT 0xa
+#define BIF_BX0_BUS_CNTL__SET_MC_TC__SHIFT 0xd
+#define BIF_BX0_BUS_CNTL__ZERO_BE_WR_EN__SHIFT 0x10
+#define BIF_BX0_BUS_CNTL__ZERO_BE_RD_EN__SHIFT 0x11
+#define BIF_BX0_BUS_CNTL__RD_STALL_IO_WR__SHIFT 0x12
+#define BIF_BX0_BUS_CNTL__HDP_FB_FLUSH_STALL_DOORBELL_DIS__SHIFT 0x18
+#define BIF_BX0_BUS_CNTL__PRECEEDINGWR_STALL_VGA_FB_FLUSH_DIS__SHIFT 0x19
+#define BIF_BX0_BUS_CNTL__PRECEEDINGWR_STALL_VGA_REG_FLUSH_DIS__SHIFT 0x1a
+#define BIF_BX0_BUS_CNTL__MMDAT_RD_HDP_TRIGGER_HDP_FB_FLUSH_DIS__SHIFT 0x1b
+#define BIF_BX0_BUS_CNTL__HDP_FB_FLUSH_STALL_MMDAT_RD_HDP_DIS__SHIFT 0x1c
+#define BIF_BX0_BUS_CNTL__HDP_REG_FLUSH_VF_MASK_EN__SHIFT 0x1d
+#define BIF_BX0_BUS_CNTL__VGAFB_ZERO_BE_WR_EN__SHIFT 0x1e
+#define BIF_BX0_BUS_CNTL__VGAFB_ZERO_BE_RD_EN__SHIFT 0x1f
+#define BIF_BX0_BUS_CNTL__VGA_REG_COHERENCY_DIS_MASK 0x00000040L
+#define BIF_BX0_BUS_CNTL__VGA_MEM_COHERENCY_DIS_MASK 0x00000080L
+#define BIF_BX0_BUS_CNTL__SET_AZ_TC_MASK 0x00001C00L
+#define BIF_BX0_BUS_CNTL__SET_MC_TC_MASK 0x0000E000L
+#define BIF_BX0_BUS_CNTL__ZERO_BE_WR_EN_MASK 0x00010000L
+#define BIF_BX0_BUS_CNTL__ZERO_BE_RD_EN_MASK 0x00020000L
+#define BIF_BX0_BUS_CNTL__RD_STALL_IO_WR_MASK 0x00040000L
+#define BIF_BX0_BUS_CNTL__HDP_FB_FLUSH_STALL_DOORBELL_DIS_MASK 0x01000000L
+#define BIF_BX0_BUS_CNTL__PRECEEDINGWR_STALL_VGA_FB_FLUSH_DIS_MASK 0x02000000L
+#define BIF_BX0_BUS_CNTL__PRECEEDINGWR_STALL_VGA_REG_FLUSH_DIS_MASK 0x04000000L
+#define BIF_BX0_BUS_CNTL__MMDAT_RD_HDP_TRIGGER_HDP_FB_FLUSH_DIS_MASK 0x08000000L
+#define BIF_BX0_BUS_CNTL__HDP_FB_FLUSH_STALL_MMDAT_RD_HDP_DIS_MASK 0x10000000L
+#define BIF_BX0_BUS_CNTL__HDP_REG_FLUSH_VF_MASK_EN_MASK 0x20000000L
+#define BIF_BX0_BUS_CNTL__VGAFB_ZERO_BE_WR_EN_MASK 0x40000000L
+#define BIF_BX0_BUS_CNTL__VGAFB_ZERO_BE_RD_EN_MASK 0x80000000L
+//BIF_BX0_BIF_SCRATCH0
+#define BIF_BX0_BIF_SCRATCH0__BIF_SCRATCH0__SHIFT 0x0
+#define BIF_BX0_BIF_SCRATCH0__BIF_SCRATCH0_MASK 0xFFFFFFFFL
+//BIF_BX0_BIF_SCRATCH1
+#define BIF_BX0_BIF_SCRATCH1__BIF_SCRATCH1__SHIFT 0x0
+#define BIF_BX0_BIF_SCRATCH1__BIF_SCRATCH1_MASK 0xFFFFFFFFL
+//BIF_BX0_BX_RESET_EN
+#define BIF_BX0_BX_RESET_EN__RESET_ON_VFENABLE_LOW_EN__SHIFT 0x10
+#define BIF_BX0_BX_RESET_EN__RESET_ON_VFENABLE_LOW_EN_MASK 0x00010000L
+//BIF_BX0_MM_CFGREGS_CNTL
+#define BIF_BX0_MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL__SHIFT 0x0
+#define BIF_BX0_MM_CFGREGS_CNTL__MM_CFG_DEV_SEL__SHIFT 0x6
+#define BIF_BX0_MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN__SHIFT 0x1f
+#define BIF_BX0_MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL_MASK 0x00000007L
+#define BIF_BX0_MM_CFGREGS_CNTL__MM_CFG_DEV_SEL_MASK 0x000000C0L
+#define BIF_BX0_MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN_MASK 0x80000000L
+//BIF_BX0_BX_RESET_CNTL
+#define BIF_BX0_BX_RESET_CNTL__LINK_TRAIN_EN__SHIFT 0x0
+#define BIF_BX0_BX_RESET_CNTL__LINK_TRAIN_EN_MASK 0x00000001L
+//BIF_BX0_INTERRUPT_CNTL
+#define BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE__SHIFT 0x0
+#define BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_EN__SHIFT 0x1
+#define BIF_BX0_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN__SHIFT 0x3
+#define BIF_BX0_INTERRUPT_CNTL__IH_INTR_DLY_CNTR__SHIFT 0x4
+#define BIF_BX0_INTERRUPT_CNTL__GEN_IH_INT_EN__SHIFT 0x8
+#define BIF_BX0_INTERRUPT_CNTL__BIF_RB_REQ_NONSNOOP_EN__SHIFT 0xf
+#define BIF_BX0_INTERRUPT_CNTL__DUMMYRD_BYPASS_IN_MSI_EN__SHIFT 0x10
+#define BIF_BX0_INTERRUPT_CNTL__ALWAYS_SEND_INTPKT_AFTER_DUMMYRD_DIS__SHIFT 0x11
+#define BIF_BX0_INTERRUPT_CNTL__BIF_RB_REQ_RELAX_ORDER_EN__SHIFT 0x12
+#define BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK 0x00000001L
+#define BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_EN_MASK 0x00000002L
+#define BIF_BX0_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK 0x00000008L
+#define BIF_BX0_INTERRUPT_CNTL__IH_INTR_DLY_CNTR_MASK 0x000000F0L
+#define BIF_BX0_INTERRUPT_CNTL__GEN_IH_INT_EN_MASK 0x00000100L
+#define BIF_BX0_INTERRUPT_CNTL__BIF_RB_REQ_NONSNOOP_EN_MASK 0x00008000L
+#define BIF_BX0_INTERRUPT_CNTL__DUMMYRD_BYPASS_IN_MSI_EN_MASK 0x00010000L
+#define BIF_BX0_INTERRUPT_CNTL__ALWAYS_SEND_INTPKT_AFTER_DUMMYRD_DIS_MASK 0x00020000L
+#define BIF_BX0_INTERRUPT_CNTL__BIF_RB_REQ_RELAX_ORDER_EN_MASK 0x00040000L
+//BIF_BX0_INTERRUPT_CNTL2
+#define BIF_BX0_INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR__SHIFT 0x0
+#define BIF_BX0_INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR_MASK 0xFFFFFFFFL
+//BIF_BX0_CLKREQB_PAD_CNTL
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_A__SHIFT 0x0
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL__SHIFT 0x1
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE__SHIFT 0x2
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE__SHIFT 0x3
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0__SHIFT 0x5
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1__SHIFT 0x6
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2__SHIFT 0x7
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3__SHIFT 0x8
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN__SHIFT 0x9
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE__SHIFT 0xa
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN__SHIFT 0xb
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN__SHIFT 0xc
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_Y__SHIFT 0xd
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_A_MASK 0x00000001L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL_MASK 0x00000002L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE_MASK 0x00000004L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE_MASK 0x00000018L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0_MASK 0x00000020L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1_MASK 0x00000040L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2_MASK 0x00000080L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3_MASK 0x00000100L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN_MASK 0x00000200L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE_MASK 0x00000400L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN_MASK 0x00000800L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN_MASK 0x00001000L
+#define BIF_BX0_CLKREQB_PAD_CNTL__CLKREQB_PAD_Y_MASK 0x00002000L
+//BIF_BX0_BIF_FEATURES_CONTROL_MISC
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS__SHIFT 0x0
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS__SHIFT 0x1
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS__SHIFT 0x2
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS__SHIFT 0x3
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_RB_MSI_VEC_NOT_ENABLED_MODE__SHIFT 0xb
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_RB_SET_OVERFLOW_EN__SHIFT 0xc
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__ATOMIC_ERR_INT_DIS__SHIFT 0xd
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__ATOMIC_ONLY_WRITE_DIS__SHIFT 0xe
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BME_HDL_NONVIR_EN__SHIFT 0xf
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__HDP_NP_OSTD_LIMIT__SHIFT 0x10
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__DOORBELL_SELFRING_GPA_APER_CHK_48BIT_ADDR__SHIFT 0x19
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS_MASK 0x00000001L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS_MASK 0x00000002L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS_MASK 0x00000004L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS_MASK 0x00000008L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_RB_MSI_VEC_NOT_ENABLED_MODE_MASK 0x00000800L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BIF_RB_SET_OVERFLOW_EN_MASK 0x00001000L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__ATOMIC_ERR_INT_DIS_MASK 0x00002000L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__ATOMIC_ONLY_WRITE_DIS_MASK 0x00004000L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__BME_HDL_NONVIR_EN_MASK 0x00008000L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__HDP_NP_OSTD_LIMIT_MASK 0x01FF0000L
+#define BIF_BX0_BIF_FEATURES_CONTROL_MISC__DOORBELL_SELFRING_GPA_APER_CHK_48BIT_ADDR_MASK 0x02000000L
+//BIF_BX0_HDP_ATOMIC_CONTROL_MISC
+#define BIF_BX0_HDP_ATOMIC_CONTROL_MISC__HDP_NP_ATOMIC_OSTD_LIMIT__SHIFT 0x0
+#define BIF_BX0_HDP_ATOMIC_CONTROL_MISC__HDP_NP_ATOMIC_OSTD_LIMIT_MASK 0x000000FFL
+//BIF_BX0_BIF_DOORBELL_CNTL
+#define BIF_BX0_BIF_DOORBELL_CNTL__SELF_RING_DIS__SHIFT 0x0
+#define BIF_BX0_BIF_DOORBELL_CNTL__TRANS_CHECK_DIS__SHIFT 0x1
+#define BIF_BX0_BIF_DOORBELL_CNTL__UNTRANS_LBACK_EN__SHIFT 0x2
+#define BIF_BX0_BIF_DOORBELL_CNTL__NON_CONSECUTIVE_BE_ZERO_DIS__SHIFT 0x3
+#define BIF_BX0_BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT 0x4
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_DIS__SHIFT 0x18
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_0__SHIFT 0x19
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_1__SHIFT 0x1a
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_2__SHIFT 0x1b
+#define BIF_BX0_BIF_DOORBELL_CNTL__SELF_RING_DIS_MASK 0x00000001L
+#define BIF_BX0_BIF_DOORBELL_CNTL__TRANS_CHECK_DIS_MASK 0x00000002L
+#define BIF_BX0_BIF_DOORBELL_CNTL__UNTRANS_LBACK_EN_MASK 0x00000004L
+#define BIF_BX0_BIF_DOORBELL_CNTL__NON_CONSECUTIVE_BE_ZERO_DIS_MASK 0x00000008L
+#define BIF_BX0_BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK 0x00000010L
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_DIS_MASK 0x01000000L
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_0_MASK 0x02000000L
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_1_MASK 0x04000000L
+#define BIF_BX0_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_2_MASK 0x08000000L
+//BIF_BX0_BIF_DOORBELL_INT_CNTL
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS__SHIFT 0x0
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS__SHIFT 0x1
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS__SHIFT 0x2
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR__SHIFT 0x10
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR__SHIFT 0x11
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR__SHIFT 0x12
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_ERR_EVENT_INTERRUPT_ENABLE__SHIFT 0x17
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_DISABLE__SHIFT 0x19
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE__SHIFT 0x1a
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__SET_DB_INTR_STATUS_WHEN_RB_ENABLE__SHIFT 0x1c
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__SET_IOH_RAS_INTR_STATUS_WHEN_RB_ENABLE__SHIFT 0x1d
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__SET_ATH_RAS_INTR_STATUS_WHEN_RB_ENABLE__SHIFT 0x1e
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__TIMEOUT_ERR_EVENT_INTERRUPT_ENABLE__SHIFT 0x1f
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS_MASK 0x00000001L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS_MASK 0x00000002L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS_MASK 0x00000004L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR_MASK 0x00010000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR_MASK 0x00020000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR_MASK 0x00040000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_ERR_EVENT_INTERRUPT_ENABLE_MASK 0x00800000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_DISABLE_MASK 0x02000000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE_MASK 0x04000000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__SET_DB_INTR_STATUS_WHEN_RB_ENABLE_MASK 0x10000000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__SET_IOH_RAS_INTR_STATUS_WHEN_RB_ENABLE_MASK 0x20000000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__SET_ATH_RAS_INTR_STATUS_WHEN_RB_ENABLE_MASK 0x40000000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__TIMEOUT_ERR_EVENT_INTERRUPT_ENABLE_MASK 0x80000000L
+//BIF_BX0_BIF_FB_EN
+#define BIF_BX0_BIF_FB_EN__FB_READ_EN__SHIFT 0x0
+#define BIF_BX0_BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1
+#define BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK 0x00000001L
+#define BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L
+//BIF_BX0_BIF_INTR_CNTL
+#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0
+#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L
+//BIF_BX0_BIF_MST_TRANS_PENDING_VF
+#define BIF_BX0_BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX0_BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL
+//BIF_BX0_BIF_SLV_TRANS_PENDING_VF
+#define BIF_BX0_BIF_SLV_TRANS_PENDING_VF__BIF_SLV_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX0_BIF_SLV_TRANS_PENDING_VF__BIF_SLV_TRANS_PENDING_MASK 0x7FFFFFFFL
+//BIF_BX0_BACO_CNTL
+#define BIF_BX0_BACO_CNTL__BACO_EN__SHIFT 0x0
+#define BIF_BX0_BACO_CNTL__BACO_DUMMY_EN__SHIFT 0x2
+#define BIF_BX0_BACO_CNTL__BACO_POWER_OFF__SHIFT 0x3
+#define BIF_BX0_BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT 0x5
+#define BIF_BX0_BACO_CNTL__BACO_RST_INTR_MASK__SHIFT 0x6
+#define BIF_BX0_BACO_CNTL__BACO_MODE__SHIFT 0x8
+#define BIF_BX0_BACO_CNTL__RCU_BIF_CONFIG_DONE__SHIFT 0x9
+#define BIF_BX0_BACO_CNTL__PWRGOOD_VDDSOC__SHIFT 0x10
+#define BIF_BX0_BACO_CNTL__BACO_AUTO_EXIT__SHIFT 0x1f
+#define BIF_BX0_BACO_CNTL__BACO_EN_MASK 0x00000001L
+#define BIF_BX0_BACO_CNTL__BACO_DUMMY_EN_MASK 0x00000004L
+#define BIF_BX0_BACO_CNTL__BACO_POWER_OFF_MASK 0x00000008L
+#define BIF_BX0_BACO_CNTL__BACO_DSTATE_BYPASS_MASK 0x00000020L
+#define BIF_BX0_BACO_CNTL__BACO_RST_INTR_MASK_MASK 0x00000040L
+#define BIF_BX0_BACO_CNTL__BACO_MODE_MASK 0x00000100L
+#define BIF_BX0_BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK 0x00000200L
+#define BIF_BX0_BACO_CNTL__PWRGOOD_VDDSOC_MASK 0x00010000L
+#define BIF_BX0_BACO_CNTL__BACO_AUTO_EXIT_MASK 0x80000000L
+//BIF_BX0_BIF_BACO_EXIT_TIME0
+#define BIF_BX0_BIF_BACO_EXIT_TIME0__BACO_EXIT_PXEN_CLR_TIMER__SHIFT 0x0
+#define BIF_BX0_BIF_BACO_EXIT_TIME0__BACO_EXIT_PXEN_CLR_TIMER_MASK 0x000FFFFFL
+//BIF_BX0_BIF_BACO_EXIT_TIMER1
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_EXIT_SIDEBAND_TIMER__SHIFT 0x0
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_HW_AUTO_FLUSH_EN__SHIFT 0x18
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_ENDING_AUTO_BY_RSMU_INTR_CLR__SHIFT 0x19
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_DIS__SHIFT 0x1a
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_HIGH__SHIFT 0x1b
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_LOW__SHIFT 0x1c
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_MODE_SEL__SHIFT 0x1d
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__AUTO_BACO_EXIT_CLR_BY_HW_DIS__SHIFT 0x1f
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_EXIT_SIDEBAND_TIMER_MASK 0x000FFFFFL
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_HW_AUTO_FLUSH_EN_MASK 0x01000000L
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_ENDING_AUTO_BY_RSMU_INTR_CLR_MASK 0x02000000L
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_DIS_MASK 0x04000000L
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_HIGH_MASK 0x08000000L
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_LOW_MASK 0x10000000L
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__BACO_MODE_SEL_MASK 0x60000000L
+#define BIF_BX0_BIF_BACO_EXIT_TIMER1__AUTO_BACO_EXIT_CLR_BY_HW_DIS_MASK 0x80000000L
+//BIF_BX0_BIF_BACO_EXIT_TIMER2
+#define BIF_BX0_BIF_BACO_EXIT_TIMER2__BACO_EXIT_LCLK_BAK_TIMER__SHIFT 0x0
+#define BIF_BX0_BIF_BACO_EXIT_TIMER2__BACO_EXIT_LCLK_BAK_TIMER_MASK 0x000FFFFFL
+//BIF_BX0_BIF_BACO_EXIT_TIMER3
+#define BIF_BX0_BIF_BACO_EXIT_TIMER3__BACO_EXIT_DUMMY_EN_CLR_TIMER__SHIFT 0x0
+#define BIF_BX0_BIF_BACO_EXIT_TIMER3__BACO_EXIT_DUMMY_EN_CLR_TIMER_MASK 0x000FFFFFL
+//BIF_BX0_BIF_BACO_EXIT_TIMER4
+#define BIF_BX0_BIF_BACO_EXIT_TIMER4__BACO_EXIT_BACO_EN_CLR_TIMER__SHIFT 0x0
+#define BIF_BX0_BIF_BACO_EXIT_TIMER4__BACO_EXIT_BACO_EN_CLR_TIMER_MASK 0x000FFFFFL
+//BIF_BX0_MEM_TYPE_CNTL
+#define BIF_BX0_MEM_TYPE_CNTL__BF_MEM_PHY_G5_G3__SHIFT 0x0
+#define BIF_BX0_MEM_TYPE_CNTL__BF_MEM_PHY_G5_G3_MASK 0x00000001L
+//BIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL
+#define BIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL__ADDRESS__SHIFT 0x2
+#define BIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL__ADDRESS_MASK 0x0007FFFCL
+//BIF_BX0_REMAP_HDP_REG_FLUSH_CNTL
+#define BIF_BX0_REMAP_HDP_REG_FLUSH_CNTL__ADDRESS__SHIFT 0x2
+#define BIF_BX0_REMAP_HDP_REG_FLUSH_CNTL__ADDRESS_MASK 0x0007FFFCL
+//BIF_BX0_BIF_RB_CNTL
+#define BIF_BX0_BIF_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define BIF_BX0_BIF_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define BIF_BX0_BIF_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8
+#define BIF_BX0_BIF_RB_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x9
+#define BIF_BX0_BIF_RB_CNTL__BIF_RB_TRAN__SHIFT 0x11
+#define BIF_BX0_BIF_RB_CNTL__DIS_PROTECT_WHEN_RB_FULL__SHIFT 0x19
+#define BIF_BX0_BIF_RB_CNTL__RB_INTR_FIX_PRIORITY__SHIFT 0x1a
+#define BIF_BX0_BIF_RB_CNTL__RB_INTR_ARB_MODE__SHIFT 0x1d
+#define BIF_BX0_BIF_RB_CNTL__RB_RST_BY_FLR_DISABLE__SHIFT 0x1e
+#define BIF_BX0_BIF_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define BIF_BX0_BIF_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define BIF_BX0_BIF_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define BIF_BX0_BIF_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define BIF_BX0_BIF_RB_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x00003E00L
+#define BIF_BX0_BIF_RB_CNTL__BIF_RB_TRAN_MASK 0x00020000L
+#define BIF_BX0_BIF_RB_CNTL__DIS_PROTECT_WHEN_RB_FULL_MASK 0x02000000L
+#define BIF_BX0_BIF_RB_CNTL__RB_INTR_FIX_PRIORITY_MASK 0x1C000000L
+#define BIF_BX0_BIF_RB_CNTL__RB_INTR_ARB_MODE_MASK 0x20000000L
+#define BIF_BX0_BIF_RB_CNTL__RB_RST_BY_FLR_DISABLE_MASK 0x40000000L
+#define BIF_BX0_BIF_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//BIF_BX0_BIF_RB_BASE
+#define BIF_BX0_BIF_RB_BASE__ADDR__SHIFT 0x0
+#define BIF_BX0_BIF_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//BIF_BX0_BIF_RB_RPTR
+#define BIF_BX0_BIF_RB_RPTR__OFFSET__SHIFT 0x2
+#define BIF_BX0_BIF_RB_RPTR__OFFSET_MASK 0x0003FFFCL
+//BIF_BX0_BIF_RB_WPTR
+#define BIF_BX0_BIF_RB_WPTR__BIF_RB_OVERFLOW__SHIFT 0x0
+#define BIF_BX0_BIF_RB_WPTR__OFFSET__SHIFT 0x2
+#define BIF_BX0_BIF_RB_WPTR__BIF_RB_OVERFLOW_MASK 0x00000001L
+#define BIF_BX0_BIF_RB_WPTR__OFFSET_MASK 0x0003FFFCL
+//BIF_BX0_BIF_RB_WPTR_ADDR_HI
+#define BIF_BX0_BIF_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define BIF_BX0_BIF_RB_WPTR_ADDR_HI__ADDR_MASK 0x000000FFL
+//BIF_BX0_BIF_RB_WPTR_ADDR_LO
+#define BIF_BX0_BIF_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define BIF_BX0_BIF_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//BIF_BX0_MAILBOX_INDEX
+#define BIF_BX0_MAILBOX_INDEX__MAILBOX_INDEX__SHIFT 0x0
+#define BIF_BX0_MAILBOX_INDEX__MAILBOX_INDEX_MASK 0x0000001FL
+//BIF_BX0_BIF_MP1_INTR_CTRL
+#define BIF_BX0_BIF_MP1_INTR_CTRL__BACO_EXIT_DONE__SHIFT 0x0
+#define BIF_BX0_BIF_MP1_INTR_CTRL__BACO_EXIT_DONE_MASK 0x00000001L
+//BIF_BX0_BIF_PERSTB_PAD_CNTL
+#define BIF_BX0_BIF_PERSTB_PAD_CNTL__PERSTB_PAD_CNTL__SHIFT 0x0
+#define BIF_BX0_BIF_PERSTB_PAD_CNTL__PERSTB_PAD_CNTL_MASK 0x0000FFFFL
+//BIF_BX0_BIF_PX_EN_PAD_CNTL
+#define BIF_BX0_BIF_PX_EN_PAD_CNTL__PX_EN_PAD_CNTL__SHIFT 0x0
+#define BIF_BX0_BIF_PX_EN_PAD_CNTL__PX_EN_PAD_CNTL_MASK 0x00000FFFL
+//BIF_BX0_BIF_REFPADKIN_PAD_CNTL
+#define BIF_BX0_BIF_REFPADKIN_PAD_CNTL__REFPADKIN_PAD_CNTL__SHIFT 0x0
+#define BIF_BX0_BIF_REFPADKIN_PAD_CNTL__REFPADKIN_PAD_CNTL_MASK 0x000000FFL
+//BIF_BX0_BIF_CLKREQB_PAD_CNTL
+#define BIF_BX0_BIF_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL__SHIFT 0x0
+#define BIF_BX0_BIF_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_MASK 0x7FFFFFFFL
+//BIF_BX0_BIF_PWRBRK_PAD_CNTL
+#define BIF_BX0_BIF_PWRBRK_PAD_CNTL__PWRBRK_PAD_CNTL__SHIFT 0x0
+#define BIF_BX0_BIF_PWRBRK_PAD_CNTL__PWRBRK_PAD_CNTL_MASK 0x000000FFL
+
+
+// addressBlock: nbif_rcc_dev0_BIFDEC1
+//RCC_DEV0_0_RCC_ERR_INT_CNTL
+#define RCC_DEV0_0_RCC_ERR_INT_CNTL__INVALID_REG_ACCESS_IN_SRIOV_INT_EN__SHIFT 0x0
+#define RCC_DEV0_0_RCC_ERR_INT_CNTL__INVALID_REG_ACCESS_IN_SRIOV_INT_EN_MASK 0x00000001L
+//RCC_DEV0_0_RCC_BACO_CNTL_MISC
+#define RCC_DEV0_0_RCC_BACO_CNTL_MISC__BIF_ROM_REQ_DIS__SHIFT 0x0
+#define RCC_DEV0_0_RCC_BACO_CNTL_MISC__BIF_AZ_REQ_DIS__SHIFT 0x1
+#define RCC_DEV0_0_RCC_BACO_CNTL_MISC__BIF_ROM_REQ_DIS_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_BACO_CNTL_MISC__BIF_AZ_REQ_DIS_MASK 0x00000002L
+//RCC_DEV0_0_RCC_RESET_EN
+#define RCC_DEV0_0_RCC_RESET_EN__DB_APER_RESET_EN__SHIFT 0xf
+#define RCC_DEV0_0_RCC_RESET_EN__DB_APER_RESET_EN_MASK 0x00008000L
+//RCC_DEV0_0_RCC_VDM_SUPPORT
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__MCTP_SUPPORT__SHIFT 0x0
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__AMPTP_SUPPORT__SHIFT 0x1
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT__SHIFT 0x2
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE__SHIFT 0x3
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE__SHIFT 0x4
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__MCTP_SUPPORT_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__AMPTP_SUPPORT_MASK 0x00000002L
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT_MASK 0x00000004L
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE_MASK 0x00000008L
+#define RCC_DEV0_0_RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE_MASK 0x00000010L
+//RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED__SHIFT 0x0
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING__SHIFT 0x1
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE__SHIFT 0x2
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER__SHIFT 0x3
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD__SHIFT 0x4
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS__SHIFT 0x5
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET__SHIFT 0xb
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS__SHIFT 0x12
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET__SHIFT 0x19
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING_MASK 0x00000002L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE_MASK 0x00000004L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER_MASK 0x00000008L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD_MASK 0x00000010L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS_MASK 0x000007E0L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET_MASK 0x0003F800L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS_MASK 0x01FC0000L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET_MASK 0xFE000000L
+//RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING__SHIFT 0x6
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES__SHIFT 0xc
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT__SHIFT 0x11
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE_MASK 0x0000003FL
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING_MASK 0x00000FC0L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES_MASK 0x0001F000L
+#define RCC_DEV0_0_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT_MASK 0x00FE0000L
+//RCC_DEV0_0_RCC_GPUIOV_REGION
+#define RCC_DEV0_0_RCC_GPUIOV_REGION__LFB_REGION__SHIFT 0x0
+#define RCC_DEV0_0_RCC_GPUIOV_REGION__MAX_REGION__SHIFT 0x4
+#define RCC_DEV0_0_RCC_GPUIOV_REGION__LFB_REGION_MASK 0x0000000FL
+#define RCC_DEV0_0_RCC_GPUIOV_REGION__MAX_REGION_MASK 0x000000F0L
+//RCC_DEV0_0_RCC_GPU_HOSTVM_EN
+#define RCC_DEV0_0_RCC_GPU_HOSTVM_EN__GPU_HOSTVM_EN__SHIFT 0x0
+#define RCC_DEV0_0_RCC_GPU_HOSTVM_EN__GPU_HOSTVM_EN_MASK 0x00000001L
+//RCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL__RCC_CONSOLE_IOV_MODE_ENABLE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL__MULTIOS_IH_SUPPORT_EN__SHIFT 0x1
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL__RCC_CONSOLE_IOV_MODE_ENABLE_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_MODE_CNTL__MULTIOS_IH_SUPPORT_EN_MASK 0x00000002L
+//RCC_DEV0_0_RCC_CONSOLE_IOV_FIRST_VF_OFFSET
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_FIRST_VF_OFFSET__CONSOLE_IOV_FIRST_VF_OFFSET__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_FIRST_VF_OFFSET__CONSOLE_IOV_FIRST_VF_OFFSET_MASK 0xFFFFL
+//RCC_DEV0_0_RCC_CONSOLE_IOV_VF_STRIDE
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_VF_STRIDE__CONSOLE_IOV_VF_STRIDE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONSOLE_IOV_VF_STRIDE__CONSOLE_IOV_VF_STRIDE_MASK 0xFFFFL
+//RCC_DEV0_0_RCC_PEER_REG_RANGE0
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE0__START_ADDR__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE0__END_ADDR__SHIFT 0x10
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE0__START_ADDR_MASK 0x0000FFFFL
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE0__END_ADDR_MASK 0xFFFF0000L
+//RCC_DEV0_0_RCC_PEER_REG_RANGE1
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE1__START_ADDR__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE1__END_ADDR__SHIFT 0x10
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE1__START_ADDR_MASK 0x0000FFFFL
+#define RCC_DEV0_0_RCC_PEER_REG_RANGE1__END_ADDR_MASK 0xFFFF0000L
+//RCC_DEV0_0_RCC_BUS_CNTL
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_IO_DIS__SHIFT 0x2
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_MEM_DIS__SHIFT 0x3
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_BM_DIS__SHIFT 0x4
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_IO_DIS_DN__SHIFT 0x5
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_MEM_DIS_DN__SHIFT 0x6
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_IO_DIS_UP__SHIFT 0x7
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_MEM_DIS_UP__SHIFT 0x8
+#define RCC_DEV0_0_RCC_BUS_CNTL__ROOT_ERR_LOG_ON_EVENT__SHIFT 0xc
+#define RCC_DEV0_0_RCC_BUS_CNTL__HOST_CPL_POISONED_LOG_IN_RC__SHIFT 0xd
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x10
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x11
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x12
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x13
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x14
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x15
+#define RCC_DEV0_0_RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE__SHIFT 0x18
+#define RCC_DEV0_0_RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x19
+#define RCC_DEV0_0_RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x1c
+#define RCC_DEV0_0_RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x1d
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_IO_DIS_MASK 0x00000004L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_MEM_DIS_MASK 0x00000008L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_BM_DIS_MASK 0x00000010L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_IO_DIS_DN_MASK 0x00000020L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_MEM_DIS_DN_MASK 0x00000040L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_IO_DIS_UP_MASK 0x00000080L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PMI_MEM_DIS_UP_MASK 0x00000100L
+#define RCC_DEV0_0_RCC_BUS_CNTL__ROOT_ERR_LOG_ON_EVENT_MASK 0x00001000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__HOST_CPL_POISONED_LOG_IN_RC_MASK 0x00002000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR_MASK 0x00010000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR_MASK 0x00020000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR_MASK 0x00040000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR_MASK 0x00080000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR_MASK 0x00100000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR_MASK 0x00200000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE_MASK 0x01000000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE_MASK 0x0E000000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE_MASK 0x10000000L
+#define RCC_DEV0_0_RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE_MASK 0xE0000000L
+//RCC_DEV0_0_RCC_CONFIG_CNTL
+#define RCC_DEV0_0_RCC_CONFIG_CNTL__CFG_VGA_RAM_EN__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONFIG_CNTL__GENMO_MONO_ADDRESS_B__SHIFT 0x2
+#define RCC_DEV0_0_RCC_CONFIG_CNTL__GRPH_ADRSEL__SHIFT 0x3
+#define RCC_DEV0_0_RCC_CONFIG_CNTL__CFG_VGA_RAM_EN_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_CONFIG_CNTL__GENMO_MONO_ADDRESS_B_MASK 0x00000004L
+#define RCC_DEV0_0_RCC_CONFIG_CNTL__GRPH_ADRSEL_MASK 0x00000018L
+//RCC_DEV0_0_RCC_CONFIG_F0_BASE
+#define RCC_DEV0_0_RCC_CONFIG_F0_BASE__F0_BASE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONFIG_F0_BASE__F0_BASE_MASK 0xFFFFFFFFL
+//RCC_DEV0_0_RCC_CONFIG_APER_SIZE
+#define RCC_DEV0_0_RCC_CONFIG_APER_SIZE__APER_SIZE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONFIG_APER_SIZE__APER_SIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_0_RCC_CONFIG_REG_APER_SIZE
+#define RCC_DEV0_0_RCC_CONFIG_REG_APER_SIZE__REG_APER_SIZE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CONFIG_REG_APER_SIZE__REG_APER_SIZE_MASK 0x07FFFFFFL
+//RCC_DEV0_0_RCC_XDMA_LO
+#define RCC_DEV0_0_RCC_XDMA_LO__BIF_XDMA_LOWER_BOUND__SHIFT 0x0
+#define RCC_DEV0_0_RCC_XDMA_LO__BIF_XDMA_APER_EN__SHIFT 0x1f
+#define RCC_DEV0_0_RCC_XDMA_LO__BIF_XDMA_LOWER_BOUND_MASK 0x7FFFFFFFL
+#define RCC_DEV0_0_RCC_XDMA_LO__BIF_XDMA_APER_EN_MASK 0x80000000L
+//RCC_DEV0_0_RCC_XDMA_HI
+#define RCC_DEV0_0_RCC_XDMA_HI__BIF_XDMA_UPPER_BOUND__SHIFT 0x0
+#define RCC_DEV0_0_RCC_XDMA_HI__BIF_XDMA_UPPER_BOUND_MASK 0x7FFFFFFFL
+//RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS__SHIFT 0x7
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN__SHIFT 0x8
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR__SHIFT 0x9
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR__SHIFT 0xa
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR__SHIFT 0xb
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR__SHIFT 0xc
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR__SHIFT 0xd
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS__SHIFT 0xe
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS__SHIFT 0xf
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS__SHIFT 0x10
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS__SHIFT 0x11
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN__SHIFT 0x12
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS__SHIFT 0x13
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS_MASK 0x00000080L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN_MASK 0x00000100L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR_MASK 0x00000200L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR_MASK 0x00000400L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR_MASK 0x00000800L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR_MASK 0x00001000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR_MASK 0x00002000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS_MASK 0x00004000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS_MASK 0x00008000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS_MASK 0x00010000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS_MASK 0x00020000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN_MASK 0x00040000L
+#define RCC_DEV0_0_RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS_MASK 0x00080000L
+//RCC_DEV0_0_RCC_BUSNUM_CNTL1
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL1__ID_MASK__SHIFT 0x0
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL1__ID_MASK_MASK 0x000000FFL
+//RCC_DEV0_0_RCC_BUSNUM_LIST0
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID0__SHIFT 0x0
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID1__SHIFT 0x8
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID2__SHIFT 0x10
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID3__SHIFT 0x18
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID0_MASK 0x000000FFL
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID1_MASK 0x0000FF00L
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID2_MASK 0x00FF0000L
+#define RCC_DEV0_0_RCC_BUSNUM_LIST0__ID3_MASK 0xFF000000L
+//RCC_DEV0_0_RCC_BUSNUM_LIST1
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID4__SHIFT 0x0
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID5__SHIFT 0x8
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID6__SHIFT 0x10
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID7__SHIFT 0x18
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID4_MASK 0x000000FFL
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID5_MASK 0x0000FF00L
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID6_MASK 0x00FF0000L
+#define RCC_DEV0_0_RCC_BUSNUM_LIST1__ID7_MASK 0xFF000000L
+//RCC_DEV0_0_RCC_BUSNUM_CNTL2
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__AUTOUPDATE_SEL__SHIFT 0x0
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__AUTOUPDATE_EN__SHIFT 0x8
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__HDPREG_CNTL__SHIFT 0x10
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH__SHIFT 0x11
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__AUTOUPDATE_SEL_MASK 0x000000FFL
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__AUTOUPDATE_EN_MASK 0x00000100L
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__HDPREG_CNTL_MASK 0x00010000L
+#define RCC_DEV0_0_RCC_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH_MASK 0x00020000L
+//RCC_DEV0_0_RCC_CAPTURE_HOST_BUSNUM
+#define RCC_DEV0_0_RCC_CAPTURE_HOST_BUSNUM__CHECK_EN__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CAPTURE_HOST_BUSNUM__CHECK_EN_MASK 0x00000001L
+//RCC_DEV0_0_RCC_HOST_BUSNUM
+#define RCC_DEV0_0_RCC_HOST_BUSNUM__HOST_ID__SHIFT 0x0
+#define RCC_DEV0_0_RCC_HOST_BUSNUM__HOST_ID_MASK 0x0000FFFFL
+//RCC_DEV0_0_RCC_PEER0_FB_OFFSET_HI
+#define RCC_DEV0_0_RCC_PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO
+#define RCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_0_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_EN_MASK 0x80000000L
+//RCC_DEV0_0_RCC_PEER1_FB_OFFSET_HI
+#define RCC_DEV0_0_RCC_PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO
+#define RCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_0_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_EN_MASK 0x80000000L
+//RCC_DEV0_0_RCC_PEER2_FB_OFFSET_HI
+#define RCC_DEV0_0_RCC_PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO
+#define RCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_0_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_EN_MASK 0x80000000L
+//RCC_DEV0_0_RCC_PEER3_FB_OFFSET_HI
+#define RCC_DEV0_0_RCC_PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO
+#define RCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_0_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_EN_MASK 0x80000000L
+//RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID0__SHIFT 0x0
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID1__SHIFT 0x8
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID2__SHIFT 0x10
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID3__SHIFT 0x18
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID0_MASK 0x000000FFL
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID1_MASK 0x0000FF00L
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID2_MASK 0x00FF0000L
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID3_MASK 0xFF000000L
+//RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID4__SHIFT 0x0
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID5__SHIFT 0x8
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID6__SHIFT 0x10
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID7__SHIFT 0x18
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID4_MASK 0x000000FFL
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID5_MASK 0x0000FF00L
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID6_MASK 0x00FF0000L
+#define RCC_DEV0_0_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID7_MASK 0xFF000000L
+//RCC_DEV0_0_RCC_DEV0_LINK_CNTL
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__LINK_DOWN_EXIT__SHIFT 0x0
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__LINK_DOWN_ENTRY__SHIFT 0x8
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__SWUS_SRB_RST_TLS_DIS__SHIFT 0x10
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__SWUS_LDN_RST_TLS_DIS__SHIFT 0x11
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__LINK_DOWN_EXIT_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__LINK_DOWN_ENTRY_MASK 0x00000100L
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__SWUS_SRB_RST_TLS_DIS_MASK 0x00010000L
+#define RCC_DEV0_0_RCC_DEV0_LINK_CNTL__SWUS_LDN_RST_TLS_DIS_MASK 0x00020000L
+//RCC_DEV0_0_RCC_CMN_LINK_CNTL
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS__SHIFT 0x0
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS__SHIFT 0x1
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS__SHIFT 0x2
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN__SHIFT 0x3
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER__SHIFT 0x10
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS_MASK 0x00000002L
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS_MASK 0x00000004L
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN_MASK 0x00000008L
+#define RCC_DEV0_0_RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER_MASK 0xFFFF0000L
+//RCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE
+#define RCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS__SHIFT 0x0
+#define RCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV__SHIFT 0x8
+#define RCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS_MASK 0x000000FFL
+#define RCC_DEV0_0_RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV_MASK 0x00001F00L
+//RCC_DEV0_0_RCC_LTR_LSWITCH_CNTL
+#define RCC_DEV0_0_RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE_MASK 0x000003FFL
+//RCC_DEV0_0_RCC_MH_ARB_CNTL
+#define RCC_DEV0_0_RCC_MH_ARB_CNTL__MH_ARB_MODE__SHIFT 0x0
+#define RCC_DEV0_0_RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY__SHIFT 0x1
+#define RCC_DEV0_0_RCC_MH_ARB_CNTL__MH_ARB_MODE_MASK 0x00000001L
+#define RCC_DEV0_0_RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY_MASK 0x00007FFEL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_BIFDEC2
+//RCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_3__SHIFT 0x3
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
+#define RCC_DEV0_EPF0_GFXMSIX_PBA__MSIX_PENDING_BITS_3_MASK 0x00000008L
+
+
+// addressBlock: nbif_rcc_strap_BIFDEC1
+//RCC_STRAP0_RCC_BIF_STRAP0
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_GEN4_DIS__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_EXPANSION_ROM_VALIDATION_SUPPORT__SHIFT 0x1
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_VGA_DIS_PIN__SHIFT 0x2
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_MEM_AP_SIZE_PIN__SHIFT 0x3
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_PX_CAPABLE__SHIFT 0x7
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN3__SHIFT 0x8
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_MSI_FIRST_BE_FULL_PAYLOAD_EN__SHIFT 0x9
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_NBIF_IGNORE_ERR_INFLR__SHIFT 0xa
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_PME_SUPPORT_COMPLIANCE_EN__SHIFT 0xb
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_EP_ERR__SHIFT 0xc
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MSG_ERR__SHIFT 0xd
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0xe
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0xf
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR__SHIFT 0x10
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_DN__SHIFT 0x11
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_AUD_PIN__SHIFT 0x12
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIOS_ROM_EN_PIN__SHIFT 0x14
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIF_GFX_IOBAR_DIS_AND_REGBAR_64BIT_EN__SHIFT 0x15
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_GPUIOV_EN__SHIFT 0x16
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_GEN3_DIS__SHIFT 0x18
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN4__SHIFT 0x19
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_QUICKSIM_START__SHIFT 0x1a
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_NO_RO_ENABLED_P2P_PASSING__SHIFT 0x1b
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_IGNORE_LOCAL_PREFIX_UR_SWUS__SHIFT 0x1c
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_CFG0_RD_VF_BUSNUM_CHK_EN__SHIFT 0x1d
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIGAPU_MODE__SHIFT 0x1e
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_LINK_DOWN_RESET_EN__SHIFT 0x1f
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_GEN4_DIS_MASK 0x00000001L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_EXPANSION_ROM_VALIDATION_SUPPORT_MASK 0x00000002L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_VGA_DIS_PIN_MASK 0x00000004L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_MEM_AP_SIZE_PIN_MASK 0x00000078L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK 0x00000080L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN3_MASK 0x00000100L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_MSI_FIRST_BE_FULL_PAYLOAD_EN_MASK 0x00000200L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_NBIF_IGNORE_ERR_INFLR_MASK 0x00000400L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_PME_SUPPORT_COMPLIANCE_EN_MASK 0x00000800L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_EP_ERR_MASK 0x00001000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MSG_ERR_MASK 0x00002000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00004000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00008000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_MASK 0x00010000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_DN_MASK 0x00020000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_AUD_PIN_MASK 0x000C0000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIOS_ROM_EN_PIN_MASK 0x00100000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIF_GFX_IOBAR_DIS_AND_REGBAR_64BIT_EN_MASK 0x00200000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_GPUIOV_EN_MASK 0x00400000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_GEN3_DIS_MASK 0x01000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN4_MASK 0x02000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_QUICKSIM_START_MASK 0x04000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_NO_RO_ENABLED_P2P_PASSING_MASK 0x08000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_IGNORE_LOCAL_PREFIX_UR_SWUS_MASK 0x10000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_CFG0_RD_VF_BUSNUM_CHK_EN_MASK 0x20000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_BIGAPU_MODE_MASK 0x40000000L
+#define RCC_STRAP0_RCC_BIF_STRAP0__STRAP_LINK_DOWN_RESET_EN_MASK 0x80000000L
+//RCC_STRAP0_RCC_BIF_STRAP1
+#define RCC_STRAP0_RCC_BIF_STRAP1__FUSESTRAP_VALID__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP1__ROMSTRAP_VALID__SHIFT 0x1
+#define RCC_STRAP0_RCC_BIF_STRAP1__WRITE_DISABLE__SHIFT 0x2
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_ECRC_INTERMEDIATE_CHK_EN__SHIFT 0x3
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_IGNORE_E2E_PREFIX_UR_SWUS__SHIFT 0x5
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_MARGINING_USES_SOFTWARE__SHIFT 0x6
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_MARGINING_READY__SHIFT 0x7
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_APER_EN__SHIFT 0x8
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_64BAR_EN__SHIFT 0x9
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_AP_SIZE__SHIFT 0xa
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_APER_PREFETCHABLE__SHIFT 0xc
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_HWREV_LSB2__SHIFT 0xd
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWREV_LSB2__SHIFT 0xf
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_LINK_RST_CFG_ONLY__SHIFT 0x11
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_BIF_IOV_LKRST_DIS__SHIFT 0x12
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_DLF_EN__SHIFT 0x13
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_PHY_16GT_EN__SHIFT 0x14
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_MARGIN_EN__SHIFT 0x15
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_BIF_PSN_UR_RPT_EN__SHIFT 0x16
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_BIF_SLOT_POWER_SUPPORT_EN__SHIFT 0x17
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_S5_REGS_ACCESS_DIS__SHIFT 0x18
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_S5_MMREG_WR_POSTED_EN__SHIFT 0x19
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_GFX_FUNC_LTR_MODE__SHIFT 0x1a
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_GSI_SMN_POSTWR_MULTI_EN__SHIFT 0x1b
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_DLF_EN_EP__SHIFT 0x1d
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_AP_EN__SHIFT 0x1e
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_AP_EN_DN__SHIFT 0x1f
+#define RCC_STRAP0_RCC_BIF_STRAP1__FUSESTRAP_VALID_MASK 0x00000001L
+#define RCC_STRAP0_RCC_BIF_STRAP1__ROMSTRAP_VALID_MASK 0x00000002L
+#define RCC_STRAP0_RCC_BIF_STRAP1__WRITE_DISABLE_MASK 0x00000004L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_ECRC_INTERMEDIATE_CHK_EN_MASK 0x00000008L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_IGNORE_E2E_PREFIX_UR_SWUS_MASK 0x00000020L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_MARGINING_USES_SOFTWARE_MASK 0x00000040L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_MARGINING_READY_MASK 0x00000080L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_APER_EN_MASK 0x00000100L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_64BAR_EN_MASK 0x00000200L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_AP_SIZE_MASK 0x00000C00L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWUS_APER_PREFETCHABLE_MASK 0x00001000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_HWREV_LSB2_MASK 0x00006000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_SWREV_LSB2_MASK 0x00018000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_LINK_RST_CFG_ONLY_MASK 0x00020000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_BIF_IOV_LKRST_DIS_MASK 0x00040000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_DLF_EN_MASK 0x00080000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_PHY_16GT_EN_MASK 0x00100000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_MARGIN_EN_MASK 0x00200000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_BIF_PSN_UR_RPT_EN_MASK 0x00400000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_BIF_SLOT_POWER_SUPPORT_EN_MASK 0x00800000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_S5_REGS_ACCESS_DIS_MASK 0x01000000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_S5_MMREG_WR_POSTED_EN_MASK 0x02000000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_GFX_FUNC_LTR_MODE_MASK 0x04000000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_GSI_SMN_POSTWR_MULTI_EN_MASK 0x18000000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_DLF_EN_EP_MASK 0x20000000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_AP_EN_MASK 0x40000000L
+#define RCC_STRAP0_RCC_BIF_STRAP1__STRAP_AP_EN_DN_MASK 0x80000000L
+//RCC_STRAP0_RCC_BIF_STRAP2
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_PCIESWUS_INDEX_APER_RANGE__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SWUS_SPT__SHIFT 0x1
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SUC_IND_ACCESS_DIS__SHIFT 0x3
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SUM_IND_ACCESS_DIS__SHIFT 0x4
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_ENDP_LINKDOWN_DROP_DMA__SHIFT 0x5
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SWITCH_LINKDOWN_DROP_DMA__SHIFT 0x6
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SWUS_SEC_LVL_OVRD_EN__SHIFT 0x7
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_GMI_DNS_SDP_CLKREQ_TOGGLE_DIS__SHIFT 0x8
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_ACS_MSKSEV_EP_HIDE_DIS__SHIFT 0x9
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_CFG_PG_FW_INTERLOCK_EXIT_EN__SHIFT 0xa
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_USB_PD_FUNC_DIS__SHIFT 0xc
+#define RCC_STRAP0_RCC_BIF_STRAP2__RESERVED_BIF_STRAP2__SHIFT 0xd
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS__SHIFT 0xe
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_GFXAZ_POWERSTATE_INTERLOCK_EN__SHIFT 0xf
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_CYCLE__SHIFT 0x10
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_BYPASS__SHIFT 0x18
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_VLINK_PMETO_LDN_EXIT_BY_LNKRST_DIS__SHIFT 0x1f
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_PCIESWUS_INDEX_APER_RANGE_MASK 0x00000001L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SWUS_SPT_MASK 0x00000002L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SUC_IND_ACCESS_DIS_MASK 0x00000008L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SUM_IND_ACCESS_DIS_MASK 0x00000010L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_ENDP_LINKDOWN_DROP_DMA_MASK 0x00000020L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SWITCH_LINKDOWN_DROP_DMA_MASK 0x00000040L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_SWUS_SEC_LVL_OVRD_EN_MASK 0x00000080L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_GMI_DNS_SDP_CLKREQ_TOGGLE_DIS_MASK 0x00000100L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_ACS_MSKSEV_EP_HIDE_DIS_MASK 0x00000200L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_CFG_PG_FW_INTERLOCK_EXIT_EN_MASK 0x00000C00L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_USB_PD_FUNC_DIS_MASK 0x00001000L
+#define RCC_STRAP0_RCC_BIF_STRAP2__RESERVED_BIF_STRAP2_MASK 0x00002000L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_GFXAZ_POWERSTATE_INTERLOCK_EN_MASK 0x00008000L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_CYCLE_MASK 0x00FF0000L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_BYPASS_MASK 0x01000000L
+#define RCC_STRAP0_RCC_BIF_STRAP2__STRAP_VLINK_PMETO_LDN_EXIT_BY_LNKRST_DIS_MASK 0x80000000L
+//RCC_STRAP0_RCC_BIF_STRAP3
+#define RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
+#define RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
+//RCC_STRAP0_RCC_BIF_STRAP4
+#define RCC_STRAP0_RCC_BIF_STRAP4__STRAP_VLINK_L0S_EXIT_TIMER__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP4__STRAP_VLINK_L1_EXIT_TIMER__SHIFT 0x10
+#define RCC_STRAP0_RCC_BIF_STRAP4__STRAP_VLINK_L0S_EXIT_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_BIF_STRAP4__STRAP_VLINK_L1_EXIT_TIMER_MASK 0xFFFF0000L
+//RCC_STRAP0_RCC_BIF_STRAP5
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_LDN_EN__SHIFT 0x10
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_SECRST_EN__SHIFT 0x11
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_ENTER_COMPLIANCE_DIS__SHIFT 0x12
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_IGNORE_PSN_ON_VDM1_DIS__SHIFT 0x13
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_SMN_ERR_STATUS_MASK_EN_UPS__SHIFT 0x14
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_REG_PROTECTION_DIS__SHIFT 0x15
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_SMN_ERRRSP_DATA_FORCE__SHIFT 0x16
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_INTERMEDIATERSP_DATA_ALLF_DATA_FORCE__SHIFT 0x18
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x19
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1b
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_PWRBRK_STATUS_TIMER__SHIFT 0x1c
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_BIF_GFX_REG_APER_REMAPPING_EN__SHIFT 0x1f
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_LDN_EN_MASK 0x00010000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_SECRST_EN_MASK 0x00020000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_ENTER_COMPLIANCE_DIS_MASK 0x00040000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_IGNORE_PSN_ON_VDM1_DIS_MASK 0x00080000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_SMN_ERR_STATUS_MASK_EN_UPS_MASK 0x00100000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_REG_PROTECTION_DIS_MASK 0x00200000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_SMN_ERRRSP_DATA_FORCE_MASK 0x00C00000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_INTERMEDIATERSP_DATA_ALLF_DATA_FORCE_MASK 0x01000000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_SUPPORTED_MASK 0x06000000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_INIT_REQ_MASK 0x08000000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_PWRBRK_STATUS_TIMER_MASK 0x70000000L
+#define RCC_STRAP0_RCC_BIF_STRAP5__STRAP_BIF_GFX_REG_APER_REMAPPING_EN_MASK 0x80000000L
+//RCC_STRAP0_RCC_BIF_STRAP6
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_GEN5_DIS__SHIFT 0x0
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_BIF_KILL_GEN5__SHIFT 0x1
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_PHY_32GT_EN__SHIFT 0x2
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_DOE_VERSION_SEL__SHIFT 0x3
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_S5_GFX_REGS_ACCESS_DIS__SHIFT 0x4
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_PRODUCTION_MODE__SHIFT 0x5
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_GFX_PARTITION_CAP_SPX_SUPPORT__SHIFT 0x6
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_GFX_PARTITION_CAP_TPX_SUPPORT__SHIFT 0x7
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_MEM_PARTITION_CAP_NPS1_SUPPORT__SHIFT 0x8
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_MEM_PARTITION_CAP_NPS3_SUPPORT__SHIFT 0x9
+#define RCC_STRAP0_RCC_BIF_STRAP6__RESERVED_BIF_STRAP6__SHIFT 0xa
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_GEN5_DIS_MASK 0x00000001L
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_BIF_KILL_GEN5_MASK 0x00000002L
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_PHY_32GT_EN_MASK 0x00000004L
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_DOE_VERSION_SEL_MASK 0x00000008L
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_S5_GFX_REGS_ACCESS_DIS_MASK 0x00000010L
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_PRODUCTION_MODE_MASK 0x00000020L
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_GFX_PARTITION_CAP_SPX_SUPPORT_MASK 0x00000040L
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_GFX_PARTITION_CAP_TPX_SUPPORT_MASK 0x00000080L
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_MEM_PARTITION_CAP_NPS1_SUPPORT_MASK 0x00000100L
+#define RCC_STRAP0_RCC_BIF_STRAP6__STRAP_MEM_PARTITION_CAP_NPS3_SUPPORT_MASK 0x00000200L
+#define RCC_STRAP0_RCC_BIF_STRAP6__RESERVED_BIF_STRAP6_MASK 0xFFFFFC00L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_DEVICE_ID_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_ARI_EN_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_ACS_EN_DN_DEV0__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_AER_EN_DN_DEV0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_CPL_ABORT_ERR_EN_DN_DEV0__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_NEGO_GLOBAL_EN_DEV0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_INTERRUPT_PIN_DN_DEV0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_IGNORE_E2E_PREFIX_UR_DN_DEV0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_MAX_PAYLOAD_SUPPORT_DN_DEV0__SHIFT 0x19
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_MAX_LINK_WIDTH_SUPPORT_DEV0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_EPF0_DUMMY_EN_DEV0__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_DEVICE_ID_DN_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_ARI_EN_DN_DEV0_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_ACS_EN_DN_DEV0_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_AER_EN_DN_DEV0_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_CPL_ABORT_ERR_EN_DN_DEV0_MASK 0x00080000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_NEGO_GLOBAL_EN_DEV0_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_INTERRUPT_PIN_DN_DEV0_MASK 0x00E00000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_IGNORE_E2E_PREFIX_UR_DN_DEV0_MASK 0x01000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_MAX_PAYLOAD_SUPPORT_DN_DEV0_MASK 0x0E000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_MAX_LINK_WIDTH_SUPPORT_DEV0_MASK 0x70000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP0__STRAP_EPF0_DUMMY_EN_DEV0_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP1
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_ID_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_VEN_ID_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_ID_DN_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_VEN_ID_DN_DEV0_MASK 0xFFFF0000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE1_SUPPORTED_DEV0__SHIFT 0x2
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE2_SUPPORTED_DEV0__SHIFT 0x3
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODEING_ON_DEV0__SHIFT 0x4
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODE_REQUEST_DEV0__SHIFT 0x5
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_MODIFIED_TS_INFOR1_DEV0__SHIFT 0x6
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DEV0_MASK 0x00000001L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE1_SUPPORTED_DEV0_MASK 0x00000004L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE2_SUPPORTED_DEV0_MASK 0x00000008L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODEING_ON_DEV0_MASK 0x00000010L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODE_REQUEST_DEV0_MASK 0x00000020L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP10__STRAP_MODIFIED_TS_INFOR1_DEV0_MASK 0x0007FFC0L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP11
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_MODIFIED_TS_VENDOR_ID_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_RTR_RESET_TIME_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_RTR_VALID_DN_DEV0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_RTR_EN_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_SDPVW_REG_UPDATE_EN_DEV0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_MODIFIED_TS_VENDOR_ID_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_RTR_RESET_TIME_DN_DEV0_MASK 0x0FFF0000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_RTR_VALID_DN_DEV0_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_RTR_EN_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP11__STRAP_SDPVW_REG_UPDATE_EN_DEV0_MASK 0x40000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP12
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP12__STRAP_MODIFIED_TS_INFOR2_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP12__STRAP_MODIFIED_TS_INFOR2_DEV0_MASK 0x00FFFFFFL
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP13
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_COUNT_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_SELECTIVE_ENABLE_SUPPORTED_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_DETAILS_DEV0__SHIFT 0x9
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_RTR_D3HOTD0_TIME_DN_DEV0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_COUNT_DEV0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_SELECTIVE_ENABLE_SUPPORTED_DEV0_MASK 0x00000100L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_DETAILS_DEV0_MASK 0x000FFE00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP13__STRAP_RTR_D3HOTD0_TIME_DN_DEV0_MASK 0xFFF00000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP14
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_CTO_LOGGING_SUPPORT_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_ACS_ENH_CAPABILITY_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_COMMAND_COMPLETED_DEV0__SHIFT 0x2
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_ERR_COR_SUBCLASS_CAPABLE_DEV0__SHIFT 0x3
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_DOE_EN_UP_DEV0__SHIFT 0x4
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_CTO_LOGGING_SUPPORT_DEV0_MASK 0x00000001L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_ACS_ENH_CAPABILITY_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_COMMAND_COMPLETED_DEV0_MASK 0x00000004L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_ERR_COR_SUBCLASS_CAPABLE_DEV0_MASK 0x00000008L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP14__STRAP_DOE_EN_UP_DEV0_MASK 0x00000010L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP2
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_DE_EMPHASIS_SEL_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_DSN_EN_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_E2E_PREFIX_EN_DEV0__SHIFT 0x2
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ECN1P1_EN_DEV0__SHIFT 0x3
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_CHECK_EN_DEV0__SHIFT 0x4
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_GEN_EN_DEV0__SHIFT 0x5
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ERR_REPORTING_DIS_DEV0__SHIFT 0x6
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_FMT_SUPPORTED_DEV0__SHIFT 0x7
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_TAG_ECN_EN_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_EXT_VC_COUNT_DN_DEV0__SHIFT 0x9
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_FIRST_RCVD_ERR_LOG_DN_DEV0__SHIFT 0xc
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_POISONED_ADVISORY_NONFATAL_DN_DEV0__SHIFT 0xd
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_COMPLIANCE_DEV0__SHIFT 0xe
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_EN_DEV0__SHIFT 0xf
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN3_COMPLIANCE_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN4_COMPLIANCE_DEV0__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L0S_ACCEPTABLE_LATENCY_DEV0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L0S_EXIT_LATENCY_DEV0__SHIFT 0x17
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L1_ACCEPTABLE_LATENCY_DEV0__SHIFT 0x1a
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L1_EXIT_LATENCY_DEV0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_DE_EMPHASIS_SEL_DN_DEV0_MASK 0x00000001L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_DSN_EN_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_E2E_PREFIX_EN_DEV0_MASK 0x00000004L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ECN1P1_EN_DEV0_MASK 0x00000008L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_CHECK_EN_DEV0_MASK 0x00000010L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_GEN_EN_DEV0_MASK 0x00000020L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_ERR_REPORTING_DIS_DEV0_MASK 0x00000040L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_FMT_SUPPORTED_DEV0_MASK 0x00000080L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_TAG_ECN_EN_DEV0_MASK 0x00000100L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_EXT_VC_COUNT_DN_DEV0_MASK 0x00000E00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_FIRST_RCVD_ERR_LOG_DN_DEV0_MASK 0x00001000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_POISONED_ADVISORY_NONFATAL_DN_DEV0_MASK 0x00002000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_COMPLIANCE_DEV0_MASK 0x00004000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_EN_DEV0_MASK 0x00008000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN3_COMPLIANCE_DEV0_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_GEN4_COMPLIANCE_DEV0_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L0S_ACCEPTABLE_LATENCY_DEV0_MASK 0x00700000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L0S_EXIT_LATENCY_DEV0_MASK 0x03800000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L1_ACCEPTABLE_LATENCY_DEV0_MASK 0x1C000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP2__STRAP_L1_EXIT_LATENCY_DEV0_MASK 0xE0000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP3
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_LINK_BW_NOTIFICATION_CAP_DN_EN_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DEV0__SHIFT 0x1
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DN_DEV0__SHIFT 0x2
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_MAX_PAYLOAD_SUPPORT_DEV0__SHIFT 0x3
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_MSI_EN_DN_DEV0__SHIFT 0x6
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_MSTCPL_TIMEOUT_EN_DEV0__SHIFT 0x7
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_NO_SOFT_RESET_DN_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_OBFF_SUPPORTED_DEV0__SHIFT 0x9
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_RX_PRESET_HINT_DEV0__SHIFT 0xb
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_TX_PRESET_DEV0__SHIFT 0xe
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_RX_PRESET_HINT_DEV0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_TX_PRESET_DEV0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DEV0__SHIFT 0x19
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DN_DEV0__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_ATOMIC_EN_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PMC_DSI_DN_DEV0__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_LINK_BW_NOTIFICATION_CAP_DN_EN_DEV0_MASK 0x00000001L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DEV0_MASK 0x00000002L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DN_DEV0_MASK 0x00000004L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_MAX_PAYLOAD_SUPPORT_DEV0_MASK 0x00000038L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_MSI_EN_DN_DEV0_MASK 0x00000040L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_MSTCPL_TIMEOUT_EN_DEV0_MASK 0x00000080L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_NO_SOFT_RESET_DN_DEV0_MASK 0x00000100L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_OBFF_SUPPORTED_DEV0_MASK 0x00000600L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_RX_PRESET_HINT_DEV0_MASK 0x00003800L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_TX_PRESET_DEV0_MASK 0x0003C000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_RX_PRESET_HINT_DEV0_MASK 0x001C0000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_TX_PRESET_DEV0_MASK 0x01E00000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DEV0_MASK 0x06000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DN_DEV0_MASK 0x18000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_ATOMIC_EN_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP3__STRAP_PMC_DSI_DN_DEV0_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP4
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_0_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_1_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_2_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_3_DEV0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_0_DEV0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_1_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_2_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_3_DEV0_MASK 0xFF000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP5
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_4_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_5_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_SYSTEM_ALLOCATED_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_64BIT_EN_DN_DEV0__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_ROUTING_EN_DEV0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_VC_EN_DN_DEV0__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DEV0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DN_DEV0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_LOCAL_DLF_SUPPORTED_DEV0__SHIFT 0x16
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_SOURCE_VALIDATION_DN_DEV0__SHIFT 0x17
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_TRANSLATION_BLOCKING_DN_DEV0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_REQUEST_REDIRECT_DN_DEV0__SHIFT 0x19
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_COMPLETION_REDIRECT_DN_DEV0__SHIFT 0x1a
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_UPSTREAM_FORWARDING_DN_DEV0__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_EGRESS_CONTROL_DN_DEV0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_DIRECT_TRANSLATED_P2P_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_MSI_MAP_EN_DEV0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_SSID_EN_DEV0__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_4_DEV0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_5_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_SYSTEM_ALLOCATED_DEV0_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_64BIT_EN_DN_DEV0_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_ROUTING_EN_DEV0_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_VC_EN_DN_DEV0_MASK 0x00080000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DEV0_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DN_DEV0_MASK 0x00200000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_LOCAL_DLF_SUPPORTED_DEV0_MASK 0x00400000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_SOURCE_VALIDATION_DN_DEV0_MASK 0x00800000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_TRANSLATION_BLOCKING_DN_DEV0_MASK 0x01000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_REQUEST_REDIRECT_DN_DEV0_MASK 0x02000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_COMPLETION_REDIRECT_DN_DEV0_MASK 0x04000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_UPSTREAM_FORWARDING_DN_DEV0_MASK 0x08000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_EGRESS_CONTROL_DN_DEV0_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_ACS_DIRECT_TRANSLATED_P2P_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_MSI_MAP_EN_DEV0_MASK 0x40000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP5__STRAP_SSID_EN_DEV0_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP6
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_CFG_CRS_EN_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_SMN_ERR_STATUS_MASK_EN_DNS_DEV0__SHIFT 0x1
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_INTERNAL_ERR_EN_DEV0__SHIFT 0x2
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_RTM1_PRESENCE_DET_SUPPORT_DEV0__SHIFT 0x3
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_RTM2_PRESENCE_DET_SUPPORT_DEV0__SHIFT 0x4
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_COMPLETER_SUPPORTED_DEV0__SHIFT 0x5
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_REQUESTER_SUPPORTED_DEV0__SHIFT 0x6
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_VF_10BIT_TAG_REQUESTER_SUPPORTED_DEV0__SHIFT 0x7
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0__SHIFT 0xc
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_TPH_CPLR_SUPPORTED_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_MSI_EXT_MSG_DATA_CAP_DN_DEV0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_NO_COMMAND_COMPLETED_SUPPORTED_DEV0__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_GEN5_COMPLIANCE_DEV0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_TARGET_LINK_SPEED_DEV0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_CFG_CRS_EN_DEV0_MASK 0x00000001L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_SMN_ERR_STATUS_MASK_EN_DNS_DEV0_MASK 0x00000002L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_INTERNAL_ERR_EN_DEV0_MASK 0x00000004L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_RTM1_PRESENCE_DET_SUPPORT_DEV0_MASK 0x00000008L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_RTM2_PRESENCE_DET_SUPPORT_DEV0_MASK 0x00000010L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_COMPLETER_SUPPORTED_DEV0_MASK 0x00000020L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_REQUESTER_SUPPORTED_DEV0_MASK 0x00000040L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_VF_10BIT_TAG_REQUESTER_SUPPORTED_DEV0_MASK 0x00000080L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0_MASK 0x00000F00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0_MASK 0x0000F000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_TPH_CPLR_SUPPORTED_DN_DEV0_MASK 0x00030000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_MSI_EXT_MSG_DATA_CAP_DN_DEV0_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_NO_COMMAND_COMPLETED_SUPPORTED_DEV0_MASK 0x00080000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_GEN5_COMPLIANCE_DEV0_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_TARGET_LINK_SPEED_DEV0_MASK 0x00E00000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0_MASK 0x0F000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0_MASK 0xF0000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP7
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_PORT_NUMBER_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_MAJOR_REV_ID_DN_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_MINOR_REV_ID_DN_DEV0__SHIFT 0xc
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_RP_BUSNUM_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_DN_DEVNUM_DEV0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_DN_FUNCID_DEV0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_PORT_NUMBER_DEV0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_MAJOR_REV_ID_DN_DEV0_MASK 0x00000F00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_MINOR_REV_ID_DN_DEV0_MASK 0x0000F000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_RP_BUSNUM_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_DN_DEVNUM_DEV0_MASK 0x1F000000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP7__STRAP_DN_FUNCID_DEV0_MASK 0xE0000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_6_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_7_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_8_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_9_DEV0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_6_DEV0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_7_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_8_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_9_DEV0_MASK 0xFF000000L
+//RCC_STRAP0_RCC_DEV0_PORT_STRAP9
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_a_DEV0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_b_DEV0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP9__STRAP_VENDOR_ID_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_a_DEV0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_b_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP0_RCC_DEV0_PORT_STRAP9__STRAP_VENDOR_ID_DN_DEV0_MASK 0xFFFF0000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0_MASK 0x000F0000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0_MASK 0x00F00000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK 0x0F000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP1
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_VF_DEVICE_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_SUPPORTED_PAGE_SIZE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_VF_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_SUPPORTED_PAGE_SIZE_DEV0_F0_MASK 0xFFFF0000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP13
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_SRIOV_TOTAL_VFS_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F0_MASK 0x000000FFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F0_MASK 0x0000FF00L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F0_MASK 0x00FF0000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP13__STRAP_SRIOV_TOTAL_VFS_DEV0_F0_MASK 0xFF000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP14
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP14__STRAP_VENDOR_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP14__STRAP_VENDOR_ID_DEV0_F0_MASK 0x0000FFFFL
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP15
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_RESET_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_DLUP_TIME_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_VALID_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_RESET_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_DLUP_TIME_DEV0_F0_MASK 0x00FFF000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_VALID_DEV0_F0_MASK 0x01000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP16
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_FLR_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_D3HOTD0_TIME_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_FLR_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_D3HOTD0_TIME_DEV0_F0_MASK 0x00FFF000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP17
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_RESET_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_VALID_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_FLR_TIME_DEV0_F0__SHIFT 0xd
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_RESET_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_VALID_DEV0_F0_MASK 0x00001000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_FLR_TIME_DEV0_F0_MASK 0x01FFE000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP18__STRAP_RTR_VF_D3HOTD0_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP18__STRAP_RTR_VF_D3HOTD0_TIME_DEV0_F0_MASK 0x00000FFFL
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP2
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_SRIOV_EN_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_64BAR_DIS_DEV0_F0__SHIFT 0x6
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F0__SHIFT 0x7
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F0__SHIFT 0x9
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F0__SHIFT 0xe
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_ARI_EN_DEV0_F0__SHIFT 0xf
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_AER_EN_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_ACS_EN_DEV0_F0__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_ATS_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_DPA_EN_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_DSN_EN_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_VC_EN_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PAGE_REQ_EN_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_SRIOV_EN_DEV0_F0_MASK 0x00000001L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_64BAR_DIS_DEV0_F0_MASK 0x00000040L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F0_MASK 0x00000080L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F0_MASK 0x00000100L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F0_MASK 0x00003E00L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F0_MASK 0x00004000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_ARI_EN_DEV0_F0_MASK 0x00008000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_AER_EN_DEV0_F0_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_ACS_EN_DEV0_F0_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_ATS_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_DPA_EN_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_DSN_EN_DEV0_F0_MASK 0x00400000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_VC_EN_DEV0_F0_MASK 0x00800000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F0_MASK 0x07000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PAGE_REQ_EN_DEV0_F0_MASK 0x08000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP3
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_SUBSYS_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_PWR_EN_DEV0_F0__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_TABLE_BIR_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_PMC_DSI_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F0__SHIFT 0x1a
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_VF_RESIZE_BAR_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_CLK_PM_EN_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_RTR_EN_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_SUBSYS_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F0_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_PWR_EN_DEV0_F0_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F0_MASK 0x00080000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_TABLE_BIR_DEV0_F0_MASK 0x00E00000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_PMC_DSI_DEV0_F0_MASK 0x01000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F0_MASK 0x04000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F0_MASK 0x08000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_VF_RESIZE_BAR_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_CLK_PM_EN_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP3__STRAP_RTR_EN_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP4
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_RESERVED_STRAP4_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_DOE_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_EN_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_FLR_EN_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_PME_SUPPORT_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_RESERVED_STRAP4_DEV0_F0_MASK 0x000003FFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_DOE_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_EN_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_FLR_EN_DEV0_F0_MASK 0x00400000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_PME_SUPPORT_DEV0_F0_MASK 0x0F800000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F0_MASK 0x70000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP5
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP5__STRAP_AUX_CURRENT_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP5__STRAP_AUX_CURRENT_DEV0_F0_MASK 0x38000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F0_MASK 0x40000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP8
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_APER_SIZE_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_BAR_DIS_DEV0_F0__SHIFT 0x3
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_ROM_AP_SIZE_DEV0_F0__SHIFT 0x4
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_IO_BAR_DIS_DEV0_F0__SHIFT 0x7
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_LFB_ERRMSG_EN_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_MEM_AP_SIZE_DEV0_F0__SHIFT 0x9
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_REG_AP_SIZE_DEV0_F0__SHIFT 0xd
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_DOORBELL_APER_SIZE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MEM_AP_SIZE_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_REG_AP_SIZE_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VGA_DIS_DEV0_F0__SHIFT 0x1a
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MSI_MULTI_CAP_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_SRIOV_VF_MAPPING_MODE_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_APER_SIZE_DEV0_F0_MASK 0x00000007L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_BAR_DIS_DEV0_F0_MASK 0x00000008L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_ROM_AP_SIZE_DEV0_F0_MASK 0x00000070L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_IO_BAR_DIS_DEV0_F0_MASK 0x00000080L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_LFB_ERRMSG_EN_DEV0_F0_MASK 0x00000100L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_MEM_AP_SIZE_DEV0_F0_MASK 0x00001E00L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_REG_AP_SIZE_DEV0_F0_MASK 0x0000E000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_DOORBELL_APER_SIZE_DEV0_F0_MASK 0x00070000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MEM_AP_SIZE_DEV0_F0_MASK 0x00780000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_REG_AP_SIZE_DEV0_F0_MASK 0x03800000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VGA_DIS_DEV0_F0_MASK 0x04000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MSI_MULTI_CAP_DEV0_F0_MASK 0x38000000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP8__STRAP_SRIOV_VF_MAPPING_MODE_DEV0_F0_MASK 0xC0000000L
+//RCC_STRAP0_RCC_DEV0_EPF0_STRAP9
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_OUTSTAND_PAGE_REQ_CAP_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_BAR_COMPLIANCE_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_NBIF_ROM_BAR_DIS_CHICKEN_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_VF_REG_PROT_DIS_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_FB_ALWAYS_ON_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_FB_CPL_TYPE_SEL_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_GPUIOV_VSEC_REV_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_OUTSTAND_PAGE_REQ_CAP_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_BAR_COMPLIANCE_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_NBIF_ROM_BAR_DIS_CHICKEN_DEV0_F0_MASK 0x00080000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_VF_REG_PROT_DIS_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_FB_ALWAYS_ON_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_FB_CPL_TYPE_SEL_DEV0_F0_MASK 0x00C00000L
+#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP9__STRAP_GPUIOV_VSEC_REV_DEV0_F0_MASK 0x0F000000L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP0
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_DEVICE_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_MINOR_REV_ID_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_FUNC_EN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_D1_SUPPORT_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_D2_SUPPORT_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_DEVICE_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F1_MASK 0x000F0000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_MINOR_REV_ID_DEV0_F1_MASK 0x00F00000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_FUNC_EN_DEV0_F1_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_D1_SUPPORT_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP0__STRAP_D2_SUPPORT_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP2
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F1__SHIFT 0x7
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F1__SHIFT 0x8
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F1__SHIFT 0x9
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F1__SHIFT 0xe
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_AER_EN_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_ACS_EN_DEV0_F1__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_ATS_EN_DEV0_F1__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_DPA_EN_DEV0_F1__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_DSN_EN_DEV0_F1__SHIFT 0x16
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F1__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F1_MASK 0x00000080L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F1_MASK 0x00000100L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F1_MASK 0x00003E00L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F1_MASK 0x00004000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_AER_EN_DEV0_F1_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_ACS_EN_DEV0_F1_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_ATS_EN_DEV0_F1_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_DPA_EN_DEV0_F1_MASK 0x00200000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_DSN_EN_DEV0_F1_MASK 0x00400000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F1_MASK 0x07000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EN_DEV0_F1_MASK 0x10000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP20
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP21
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP3
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_SUBSYS_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_PWR_EN_DEV0_F1__SHIFT 0x11
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_EN_DEV0_F1__SHIFT 0x12
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F1__SHIFT 0x13
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_MSIX_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_PMC_DSI_DEV0_F1__SHIFT 0x18
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F1__SHIFT 0x1a
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F1__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_CLK_PM_EN_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_RTR_EN_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_SUBSYS_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F1_MASK 0x00010000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_PWR_EN_DEV0_F1_MASK 0x00020000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_EN_DEV0_F1_MASK 0x00040000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F1_MASK 0x00080000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_MSIX_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_PMC_DSI_DEV0_F1_MASK 0x01000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F1_MASK 0x04000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F1_MASK 0x08000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_CLK_PM_EN_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP3__STRAP_RTR_EN_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP4
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_EN_DEV0_F1__SHIFT 0x15
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_FLR_EN_DEV0_F1__SHIFT 0x16
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_PME_SUPPORT_DEV0_F1__SHIFT 0x17
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_EN_DEV0_F1_MASK 0x00200000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_FLR_EN_DEV0_F1_MASK 0x00400000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_PME_SUPPORT_DEV0_F1_MASK 0x0F800000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F1_MASK 0x70000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP5
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP5__STRAP_AUX_CURRENT_DEV0_F1__SHIFT 0x1b
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP5__STRAP_AUX_CURRENT_DEV0_F1_MASK 0x38000000L
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F1_MASK 0x40000000L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP6
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP6__STRAP_APER0_64BAR_EN_DEV0_F1__SHIFT 0x2
+#define RCC_STRAP0_RCC_DEV0_EPF1_STRAP6__STRAP_APER0_64BAR_EN_DEV0_F1_MASK 0x00000004L
+//RCC_STRAP0_RCC_DEV0_EPF1_STRAP7
+
+
+// addressBlock: nbif_bif_bx_pf_BIFPFVFDEC1
+//BIF_BX_PF0_BIF_BME_STATUS
+#define BIF_BX_PF0_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_PF0_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_PF0_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_PF0_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_PF0_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_PF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_PF0_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_PF0_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_PF0_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_PF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_PF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_PF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_PF0_GPU_HDP_FLUSH_REQ
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_PF0_GPU_HDP_FLUSH_DONE
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_PF0_BIF_TRANS_PENDING
+#define BIF_BX_PF0_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_PF0_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_PF0_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_PF0_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF0_MAILBOX_CONTROL
+#define BIF_BX_PF0_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_PF0_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_PF0_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_PF0_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_PF0_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_PF0_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_PF0_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_PF0_MAILBOX_INT_CNTL
+#define BIF_BX_PF0_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_PF0_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_PF0_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_PF0_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_PF0_BIF_VMHV_MAILBOX
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_PF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_BIFPFVFDEC1
+//RCC_DEV0_EPF0_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_gdc_GDCDEC
+//GDC0_SHUB_REGS_IF_CTL
+#define GDC0_SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS__SHIFT 0x0
+#define GDC0_SHUB_REGS_IF_CTL__SHUB_REGS_VF_PROTECTION_DIS__SHIFT 0x1
+#define GDC0_SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS_MASK 0x00000001L
+#define GDC0_SHUB_REGS_IF_CTL__SHUB_REGS_VF_PROTECTION_DIS_MASK 0x00000002L
+//GDC0_A2S_QUEUE_FIFO_ARB_CNTL
+#define GDC0_A2S_QUEUE_FIFO_ARB_CNTL__WR_QUEUE_FIFO_POP_ARB_PRIORITY__SHIFT 0x0
+#define GDC0_A2S_QUEUE_FIFO_ARB_CNTL__RD_QUEUE_FIFO_POP_ARB_PRIORITY__SHIFT 0xa
+#define GDC0_A2S_QUEUE_FIFO_ARB_CNTL__WR_QUEUE_FIFO_POP_ARB_MODE__SHIFT 0x14
+#define GDC0_A2S_QUEUE_FIFO_ARB_CNTL__RD_QUEUE_FIFO_POP_ARB_MODE__SHIFT 0x15
+#define GDC0_A2S_QUEUE_FIFO_ARB_CNTL__WR_QUEUE_FIFO_POP_ARB_PRIORITY_MASK 0x000003FFL
+#define GDC0_A2S_QUEUE_FIFO_ARB_CNTL__RD_QUEUE_FIFO_POP_ARB_PRIORITY_MASK 0x000FFC00L
+#define GDC0_A2S_QUEUE_FIFO_ARB_CNTL__WR_QUEUE_FIFO_POP_ARB_MODE_MASK 0x00100000L
+#define GDC0_A2S_QUEUE_FIFO_ARB_CNTL__RD_QUEUE_FIFO_POP_ARB_MODE_MASK 0x00200000L
+//GDC0_NGDC_MGCG_CTRL
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_EN__SHIFT 0x0
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_MODE__SHIFT 0x1
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS__SHIFT 0x2
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS__SHIFT 0xa
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS__SHIFT 0xb
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS__SHIFT 0xc
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS__SHIFT 0xd
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_DBG_DIS__SHIFT 0xe
+#define GDC0_NGDC_MGCG_CTRL__NGDC_SRAM_FGCG_EN__SHIFT 0xf
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_EN_MASK 0x00000001L
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_MODE_MASK 0x00000002L
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS_MASK 0x000003FCL
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS_MASK 0x00000400L
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS_MASK 0x00000800L
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS_MASK 0x00001000L
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS_MASK 0x00002000L
+#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_DBG_DIS_MASK 0x00004000L
+#define GDC0_NGDC_MGCG_CTRL__NGDC_SRAM_FGCG_EN_MASK 0x00008000L
+//GDC0_S2A_MISC_CNTL
+#define GDC0_S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS__SHIFT 0x3
+#define GDC0_S2A_MISC_CNTL__ATM_ARB_MODE__SHIFT 0x8
+#define GDC0_S2A_MISC_CNTL__RB_ARB_MODE__SHIFT 0xa
+#define GDC0_S2A_MISC_CNTL__HSTR_ARB_MODE__SHIFT 0xc
+#define GDC0_S2A_MISC_CNTL__HDP_PERF_ENH_DIS__SHIFT 0xf
+#define GDC0_S2A_MISC_CNTL__WRSP_ARB_MODE__SHIFT 0x10
+#define GDC0_S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS_MASK 0x00000008L
+#define GDC0_S2A_MISC_CNTL__ATM_ARB_MODE_MASK 0x00000300L
+#define GDC0_S2A_MISC_CNTL__RB_ARB_MODE_MASK 0x00000C00L
+#define GDC0_S2A_MISC_CNTL__HSTR_ARB_MODE_MASK 0x00003000L
+#define GDC0_S2A_MISC_CNTL__HDP_PERF_ENH_DIS_MASK 0x00008000L
+#define GDC0_S2A_MISC_CNTL__WRSP_ARB_MODE_MASK 0x000F0000L
+//GDC0_NGDC_PG_MISC_CTRL
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_ENDP_D3_ONLY__SHIFT 0xa
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM1__SHIFT 0xd
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_DS_ALLOW_DIS__SHIFT 0xe
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM2__SHIFT 0x10
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_CFG_REFCLK_CYCLE_FOR_200NS__SHIFT 0x18
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_CFG_PG_EXIT_OVERRIDE__SHIFT 0x1f
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_ENDP_D3_ONLY_MASK 0x00000400L
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM1_MASK 0x00002000L
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_DS_ALLOW_DIS_MASK 0x00004000L
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM2_MASK 0x00010000L
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_CFG_REFCLK_CYCLE_FOR_200NS_MASK 0x3F000000L
+#define GDC0_NGDC_PG_MISC_CTRL__NGDC_CFG_PG_EXIT_OVERRIDE_MASK 0x80000000L
+//GDC0_NGDC_PGMST_CTRL
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_PG_HYSTERESIS__SHIFT 0x0
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_PG_EN__SHIFT 0x8
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_IDLENESS_COUNT_EN__SHIFT 0xa
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_FW_PG_EXIT_EN__SHIFT 0xe
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_PG_HYSTERESIS_MASK 0x000000FFL
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_PG_EN_MASK 0x00000100L
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_IDLENESS_COUNT_EN_MASK 0x00003C00L
+#define GDC0_NGDC_PGMST_CTRL__NGDC_CFG_FW_PG_EXIT_EN_MASK 0x0000C000L
+//GDC0_NGDC_PGSLV_CTRL
+#define GDC0_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_0_IDLE_HYSTERESIS__SHIFT 0x0
+#define GDC0_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_1_IDLE_HYSTERESIS__SHIFT 0x5
+#define GDC0_NGDC_PGSLV_CTRL__NGDC_CFG_GDCCLK_IDLE_HYSTERESIS__SHIFT 0xa
+#define GDC0_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_0_IDLE_HYSTERESIS_MASK 0x0000001FL
+#define GDC0_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_1_IDLE_HYSTERESIS_MASK 0x000003E0L
+#define GDC0_NGDC_PGSLV_CTRL__NGDC_CFG_GDCCLK_IDLE_HYSTERESIS_MASK 0x00007C00L
+//GDC0_ATDMA_MISC_CNTL
+#define GDC0_ATDMA_MISC_CNTL__ATDMA_WRR_ARB_MODE__SHIFT 0x0
+#define GDC0_ATDMA_MISC_CNTL__ATDMA_MISC_CNTL_INSERT_RD_ON_2ND_WDAT_EN__SHIFT 0x1
+#define GDC0_ATDMA_MISC_CNTL__ATDMA_RDRSP_ARB_MODE__SHIFT 0x2
+#define GDC0_ATDMA_MISC_CNTL__ATDMA_WRR_VC6_WEIGHT__SHIFT 0x8
+#define GDC0_ATDMA_MISC_CNTL__ATDMA_WRR_VC0_WEIGHT__SHIFT 0x10
+#define GDC0_ATDMA_MISC_CNTL__ATDMA_WRR_VC1_WEIGHT__SHIFT 0x18
+#define GDC0_ATDMA_MISC_CNTL__ATDMA_WRR_ARB_MODE_MASK 0x00000001L
+#define GDC0_ATDMA_MISC_CNTL__ATDMA_MISC_CNTL_INSERT_RD_ON_2ND_WDAT_EN_MASK 0x00000002L
+#define GDC0_ATDMA_MISC_CNTL__ATDMA_RDRSP_ARB_MODE_MASK 0x0000000CL
+#define GDC0_ATDMA_MISC_CNTL__ATDMA_WRR_VC6_WEIGHT_MASK 0x0000FF00L
+#define GDC0_ATDMA_MISC_CNTL__ATDMA_WRR_VC0_WEIGHT_MASK 0x00FF0000L
+#define GDC0_ATDMA_MISC_CNTL__ATDMA_WRR_VC1_WEIGHT_MASK 0xFF000000L
+
+
+// addressBlock: nbif_gdc_s2a_GDCS2A_DEC
+//GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_ENABLE__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_AWID__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_DROP_EN__SHIFT 0x1b
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_ENABLE_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_AWID_MASK 0x0000003EL
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_DROP_EN_MASK 0x08000000L
+#define GDC_S2A0_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A0_S2A_DOORBELL_COMMON_CTRL_REG
+#define GDC_S2A0_S2A_DOORBELL_COMMON_CTRL_REG__S2A_DOORBELL_FENCE_ONCE_TRIGGER_DIS__SHIFT 0x0
+#define GDC_S2A0_S2A_DOORBELL_COMMON_CTRL_REG__S2A_DOORBE_FENCE_INTR_ENABLE__SHIFT 0x1
+#define GDC_S2A0_S2A_DOORBELL_COMMON_CTRL_REG__S2A_DOORBELL_FENCE_ONCE_TRIGGER_DIS_MASK 0x00000001L
+#define GDC_S2A0_S2A_DOORBELL_COMMON_CTRL_REG__S2A_DOORBE_FENCE_INTR_ENABLE_MASK 0x00000002L
+//GDC_S2A0_NBIF_GFX_DOORBELL_STATUS
+#define GDC_S2A0_NBIF_GFX_DOORBELL_STATUS__NBIF_GFX_DOORBELL_SENT__SHIFT 0x0
+#define GDC_S2A0_NBIF_GFX_DOORBELL_STATUS__S2A_DOORBELL_ALL_CLR_EN__SHIFT 0x10
+#define GDC_S2A0_NBIF_GFX_DOORBELL_STATUS__S2A_DOORBELL_ALL_CLR_ST__SHIFT 0x18
+#define GDC_S2A0_NBIF_GFX_DOORBELL_STATUS__NBIF_GFX_DOORBELL_SENT_MASK 0x0000FFFFL
+#define GDC_S2A0_NBIF_GFX_DOORBELL_STATUS__S2A_DOORBELL_ALL_CLR_EN_MASK 0x00010000L
+#define GDC_S2A0_NBIF_GFX_DOORBELL_STATUS__S2A_DOORBELL_ALL_CLR_ST_MASK 0x01000000L
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf2_bifcfgdecp
+//BIF_CFG_DEV0_EPF2_VENDOR_ID
+#define BIF_CFG_DEV0_EPF2_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF2_DEVICE_ID
+#define BIF_CFG_DEV0_EPF2_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF2_COMMAND
+#define BIF_CFG_DEV0_EPF2_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF2_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF2_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF2_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF2_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF2_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF2_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF2_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF2_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF2_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF2_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF2_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF2_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF2_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF2_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF2_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF2_STATUS
+#define BIF_CFG_DEV0_EPF2_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF2_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF2_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF2_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF2_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF2_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF2_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF2_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF2_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF2_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF2_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF2_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF2_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF2_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF2_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF2_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF2_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF2_REVISION_ID
+#define BIF_CFG_DEV0_EPF2_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF2_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF2_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF2_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_SUB_CLASS
+#define BIF_CFG_DEV0_EPF2_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_BASE_CLASS
+#define BIF_CFG_DEV0_EPF2_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_CACHE_LINE
+#define BIF_CFG_DEV0_EPF2_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_LATENCY
+#define BIF_CFG_DEV0_EPF2_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_HEADER
+#define BIF_CFG_DEV0_EPF2_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF2_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF2_BIST
+#define BIF_CFG_DEV0_EPF2_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF2_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF2_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF2_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF2_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF2_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF2_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF2_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF2_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF2_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF2_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF2_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF2_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF2_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF2_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF2_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF2_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF2_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF2_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF2_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF2_CAP_PTR
+#define BIF_CFG_DEV0_EPF2_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF2_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF2_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_MIN_GRANT
+#define BIF_CFG_DEV0_EPF2_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF2_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_VENDOR_CAP_LIST
+#define BIF_CFG_DEV0_EPF2_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF2_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_EPF2_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L
+//BIF_CFG_DEV0_EPF2_ADAPTER_ID_W
+#define BIF_CFG_DEV0_EPF2_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF2_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF2_PMI_CAP_LIST
+#define BIF_CFG_DEV0_EPF2_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF2_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF2_PMI_CAP
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__PME_CLOCK__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__AUX_CURRENT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__D1_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__D2_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__PME_SUPPORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__VERSION_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__PME_CLOCK_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__D1_SUPPORT_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__D2_SUPPORT_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF2_PMI_CAP__PME_SUPPORT_MASK 0xF800L
+//BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF2_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF2_SBRN
+#define BIF_CFG_DEV0_EPF2_SBRN__SBRN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_SBRN__SBRN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_FLADJ
+#define BIF_CFG_DEV0_EPF2_FLADJ__FLADJ__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_FLADJ__NFC__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_FLADJ__FLADJ_MASK 0x3FL
+#define BIF_CFG_DEV0_EPF2_FLADJ__NFC_MASK 0x40L
+//BIF_CFG_DEV0_EPF2_DBESL_DBESLD
+#define BIF_CFG_DEV0_EPF2_DBESL_DBESLD__DBESL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_DBESL_DBESLD__DBESLD__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_DBESL_DBESLD__DBESL_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF2_DBESL_DBESLD__DBESLD_MASK 0xF0L
+//BIF_CFG_DEV0_EPF2_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF2_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF2_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF2_PCIE_CAP
+#define BIF_CFG_DEV0_EPF2_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF2_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF2_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF2_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF2_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF2_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF2_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF2_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF2_LINK_CAP
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF2_LINK_CNTL
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF2_LINK_STATUS
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF2_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF2_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF2_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF2_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF2_LINK_CAP2
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF2_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF2_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF2_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF2_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF2_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF2_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF2_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF2_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF2_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF2_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF2_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF2_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF2_MSI_MASK
+#define BIF_CFG_DEV0_EPF2_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF2_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF2_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF2_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF2_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_MSI_PENDING
+#define BIF_CFG_DEV0_EPF2_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF2_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF2_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF2_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF2_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF2_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF2_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF2_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF2_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF2_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF2_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF2_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF2_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF2_MSIX_PBA
+#define BIF_CFG_DEV0_EPF2_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF2_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF2_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF2_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF2_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF2_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF2_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF2_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF2_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF2_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF2_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF2_PCIE_BAR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF2_PCIE_BAR1_CAP
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF2_PCIE_BAR1_CNTL
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF2_PCIE_BAR2_CAP
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF2_PCIE_BAR2_CNTL
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF2_PCIE_BAR3_CAP
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF2_PCIE_BAR3_CNTL
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF2_PCIE_BAR4_CAP
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF2_PCIE_BAR4_CNTL
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF2_PCIE_BAR5_CAP
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF2_PCIE_BAR5_CNTL
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF2_PCIE_BAR6_CAP
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF2_PCIE_BAR6_CNTL
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF2_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA_SELECT
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L
+//BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_CAP
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L
+//BIF_CFG_DEV0_EPF2_PCIE_DPA_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF2_PCIE_DPA_CAP
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF2_PCIE_DPA_LATENCY_INDICATOR
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0x000000FFL
+//BIF_CFG_DEV0_EPF2_PCIE_DPA_STATUS
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L
+//BIF_CFG_DEV0_EPF2_PCIE_DPA_CNTL
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x001FL
+//BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_1
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_2
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_3
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_4
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_5
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_6
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_7
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF2_PCIE_ACS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__ENHANCED_CAPABILITY__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__ENHANCED_CAPABILITY_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0C00L
+#define BIF_CFG_DEV0_EPF2_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL_MASK 0x1000L
+//BIF_CFG_DEV0_EPF2_PCIE_PASID_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF2_PCIE_PASID_CAP
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L
+//BIF_CFG_DEV0_EPF2_PCIE_PASID_CNTL
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF2_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L
+//BIF_CFG_DEV0_EPF2_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF2_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF2_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF2_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: nbif_bif_cfg_dev0_epf3_bifcfgdecp
+//BIF_CFG_DEV0_EPF3_VENDOR_ID
+#define BIF_CFG_DEV0_EPF3_VENDOR_ID__VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF3_DEVICE_ID
+#define BIF_CFG_DEV0_EPF3_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF3_COMMAND
+#define BIF_CFG_DEV0_EPF3_COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF3_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF3_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF3_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_COMMAND__AD_STEPPING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_COMMAND__SERR_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF3_COMMAND__INT_DIS__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF3_COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF3_COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF3_COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF3_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF3_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF3_COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF3_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF3_COMMAND__AD_STEPPING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF3_COMMAND__SERR_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF3_COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF3_COMMAND__INT_DIS_MASK 0x0400L
+//BIF_CFG_DEV0_EPF3_STATUS
+#define BIF_CFG_DEV0_EPF3_STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_STATUS__INT_STATUS__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF3_STATUS__CAP_LIST__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_STATUS__PCI_66_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF3_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF3_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF3_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF3_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF3_STATUS__INT_STATUS_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF3_STATUS__CAP_LIST_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF3_STATUS__PCI_66_CAP_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF3_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF3_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF3_STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define BIF_CFG_DEV0_EPF3_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF3_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF3_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF3_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF3_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF3_REVISION_ID
+#define BIF_CFG_DEV0_EPF3_REVISION_ID__MINOR_REV_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_REVISION_ID__MINOR_REV_ID_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF3_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L
+//BIF_CFG_DEV0_EPF3_PROG_INTERFACE
+#define BIF_CFG_DEV0_EPF3_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_SUB_CLASS
+#define BIF_CFG_DEV0_EPF3_SUB_CLASS__SUB_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_SUB_CLASS__SUB_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_BASE_CLASS
+#define BIF_CFG_DEV0_EPF3_BASE_CLASS__BASE_CLASS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_BASE_CLASS__BASE_CLASS_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_CACHE_LINE
+#define BIF_CFG_DEV0_EPF3_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_LATENCY
+#define BIF_CFG_DEV0_EPF3_LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_LATENCY__LATENCY_TIMER_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_HEADER
+#define BIF_CFG_DEV0_EPF3_HEADER__HEADER_TYPE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_HEADER__DEVICE_TYPE__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_HEADER__HEADER_TYPE_MASK 0x7FL
+#define BIF_CFG_DEV0_EPF3_HEADER__DEVICE_TYPE_MASK 0x80L
+//BIF_CFG_DEV0_EPF3_BIST
+#define BIF_CFG_DEV0_EPF3_BIST__BIST_COMP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_BIST__BIST_STRT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_BIST__BIST_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_BIST__BIST_COMP_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF3_BIST__BIST_STRT_MASK 0x40L
+#define BIF_CFG_DEV0_EPF3_BIST__BIST_CAP_MASK 0x80L
+//BIF_CFG_DEV0_EPF3_BASE_ADDR_1
+#define BIF_CFG_DEV0_EPF3_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_BASE_ADDR_2
+#define BIF_CFG_DEV0_EPF3_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_BASE_ADDR_3
+#define BIF_CFG_DEV0_EPF3_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_BASE_ADDR_4
+#define BIF_CFG_DEV0_EPF3_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_BASE_ADDR_5
+#define BIF_CFG_DEV0_EPF3_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_BASE_ADDR_6
+#define BIF_CFG_DEV0_EPF3_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_CARDBUS_CIS_PTR
+#define BIF_CFG_DEV0_EPF3_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_CARDBUS_CIS_PTR__CARDBUS_CIS_PTR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_ADAPTER_ID
+#define BIF_CFG_DEV0_EPF3_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF3_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF3_ROM_BASE_ADDR
+#define BIF_CFG_DEV0_EPF3_ROM_BASE_ADDR__ROM_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_ROM_BASE_ADDR__ROM_VALIDATION_STATUS__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF3_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF3_ROM_BASE_ADDR__ROM_ENABLE_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF3_ROM_BASE_ADDR__ROM_VALIDATION_STATUS_MASK 0x0000000EL
+#define BIF_CFG_DEV0_EPF3_ROM_BASE_ADDR__ROM_VALIDATION_DETAILS_MASK 0x000000F0L
+#define BIF_CFG_DEV0_EPF3_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFF800L
+//BIF_CFG_DEV0_EPF3_CAP_PTR
+#define BIF_CFG_DEV0_EPF3_CAP_PTR__CAP_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_CAP_PTR__CAP_PTR_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_INTERRUPT_LINE
+#define BIF_CFG_DEV0_EPF3_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_INTERRUPT_PIN
+#define BIF_CFG_DEV0_EPF3_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_MIN_GRANT
+#define BIF_CFG_DEV0_EPF3_MIN_GRANT__MIN_GNT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MIN_GRANT__MIN_GNT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_MAX_LATENCY
+#define BIF_CFG_DEV0_EPF3_MAX_LATENCY__MAX_LAT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MAX_LATENCY__MAX_LAT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_VENDOR_CAP_LIST
+#define BIF_CFG_DEV0_EPF3_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF3_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L
+#define BIF_CFG_DEV0_EPF3_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L
+//BIF_CFG_DEV0_EPF3_ADAPTER_ID_W
+#define BIF_CFG_DEV0_EPF3_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF3_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF3_PMI_CAP_LIST
+#define BIF_CFG_DEV0_EPF3_PMI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF3_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF3_PMI_CAP
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__PME_CLOCK__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__AUX_CURRENT__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__D1_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__D2_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__PME_SUPPORT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__VERSION_MASK 0x0007L
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__PME_CLOCK_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__AUX_CURRENT_MASK 0x01C0L
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__D1_SUPPORT_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__D2_SUPPORT_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF3_PMI_CAP__PME_SUPPORT_MASK 0xF800L
+//BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF3_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF3_SBRN
+#define BIF_CFG_DEV0_EPF3_SBRN__SBRN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_SBRN__SBRN_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_FLADJ
+#define BIF_CFG_DEV0_EPF3_FLADJ__FLADJ__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_FLADJ__NFC__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_FLADJ__FLADJ_MASK 0x3FL
+#define BIF_CFG_DEV0_EPF3_FLADJ__NFC_MASK 0x40L
+//BIF_CFG_DEV0_EPF3_DBESL_DBESLD
+#define BIF_CFG_DEV0_EPF3_DBESL_DBESLD__DBESL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_DBESL_DBESLD__DBESLD__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_DBESL_DBESLD__DBESL_MASK 0x0FL
+#define BIF_CFG_DEV0_EPF3_DBESL_DBESLD__DBESLD_MASK 0xF0L
+//BIF_CFG_DEV0_EPF3_PCIE_CAP_LIST
+#define BIF_CFG_DEV0_EPF3_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF3_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF3_PCIE_CAP
+#define BIF_CFG_DEV0_EPF3_PCIE_CAP__VERSION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF3_PCIE_CAP__VERSION_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF3_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L
+#define BIF_CFG_DEV0_EPF3_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF3_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L
+//BIF_CFG_DEV0_EPF3_DEVICE_CAP
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__ERR_COR_SUBCLASS_CAPABLE_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L
+//BIF_CFG_DEV0_EPF3_DEVICE_CNTL
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L
+//BIF_CFG_DEV0_EPF3_DEVICE_STATUS
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__CORR_ERR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__AUX_PWR__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__CORR_ERR_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__AUX_PWR_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L
+//BIF_CFG_DEV0_EPF3_LINK_CAP
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__PM_SUPPORT__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__PORT_NUMBER__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__LINK_SPEED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF3_LINK_CNTL
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__PM_CONTROL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__LINK_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__PM_CONTROL_MASK 0x0003L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__PTM_PROP_DELAY_ADAPT_INTER_B_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__LINK_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L
+//BIF_CFG_DEV0_EPF3_LINK_STATUS
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__LINK_TRAINING__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__DL_ACTIVE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__LINK_TRAINING_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__DL_ACTIVE_MASK 0x2000L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L
+//BIF_CFG_DEV0_EPF3_DEVICE_CAP2
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF3_DEVICE_CNTL2
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__LTR_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L
+#define BIF_CFG_DEV0_EPF3_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L
+//BIF_CFG_DEV0_EPF3_DEVICE_STATUS2
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS2__RESERVED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF3_LINK_CAP2
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x0000FE00L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x007F0000L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF3_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L
+//BIF_CFG_DEV0_EPF3_LINK_CNTL2
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L
+#define BIF_CFG_DEV0_EPF3_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L
+//BIF_CFG_DEV0_EPF3_LINK_STATUS2
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L
+#define BIF_CFG_DEV0_EPF3_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L
+//BIF_CFG_DEV0_EPF3_MSI_CAP_LIST
+#define BIF_CFG_DEV0_EPF3_MSI_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF3_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_CAP_MASK 0x0200L
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_CNTL__MSI_EXT_MSG_DATA_EN_MASK 0x0400L
+//BIF_CFG_DEV0_EPF3_MSI_MSG_ADDR_LO
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//BIF_CFG_DEV0_EPF3_MSI_MSG_ADDR_HI
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_MSI_MSG_DATA
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_DATA__MSI_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF3_MSI_EXT_MSG_DATA
+#define BIF_CFG_DEV0_EPF3_MSI_EXT_MSG_DATA__MSI_EXT_DATA__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSI_EXT_MSG_DATA__MSI_EXT_DATA_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF3_MSI_MASK
+#define BIF_CFG_DEV0_EPF3_MSI_MASK__MSI_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_MSI_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF3_MSI_EXT_MSG_DATA_64
+#define BIF_CFG_DEV0_EPF3_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSI_EXT_MSG_DATA_64__MSI_EXT_DATA_64_MASK 0xFFFFL
+//BIF_CFG_DEV0_EPF3_MSI_MASK_64
+#define BIF_CFG_DEV0_EPF3_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_MSI_PENDING
+#define BIF_CFG_DEV0_EPF3_MSI_PENDING__MSI_PENDING__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_MSI_PENDING_64
+#define BIF_CFG_DEV0_EPF3_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_MSIX_CAP_LIST
+#define BIF_CFG_DEV0_EPF3_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL
+#define BIF_CFG_DEV0_EPF3_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF3_MSIX_MSG_CNTL
+#define BIF_CFG_DEV0_EPF3_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF3_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL
+#define BIF_CFG_DEV0_EPF3_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L
+#define BIF_CFG_DEV0_EPF3_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L
+//BIF_CFG_DEV0_EPF3_MSIX_TABLE
+#define BIF_CFG_DEV0_EPF3_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF3_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF3_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF3_MSIX_PBA
+#define BIF_CFG_DEV0_EPF3_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF3_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF3_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L
+//BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_HDR
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC1
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC2
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L
+#define BIF_CFG_DEV0_EPF3_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L
+//BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L
+#define BIF_CFG_DEV0_EPF3_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L
+//BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L
+#define BIF_CFG_DEV0_EPF3_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L
+//BIF_CFG_DEV0_EPF3_PCIE_HDR_LOG0
+#define BIF_CFG_DEV0_EPF3_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_PCIE_HDR_LOG1
+#define BIF_CFG_DEV0_EPF3_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_PCIE_HDR_LOG2
+#define BIF_CFG_DEV0_EPF3_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_PCIE_HDR_LOG3
+#define BIF_CFG_DEV0_EPF3_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG0
+#define BIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG1
+#define BIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG2
+#define BIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG3
+#define BIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL
+//BIF_CFG_DEV0_EPF3_PCIE_BAR_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF3_PCIE_BAR1_CAP
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF3_PCIE_BAR1_CNTL
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR1_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF3_PCIE_BAR2_CAP
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF3_PCIE_BAR2_CNTL
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR2_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF3_PCIE_BAR3_CAP
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF3_PCIE_BAR3_CNTL
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR3_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF3_PCIE_BAR4_CAP
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF3_PCIE_BAR4_CNTL
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR4_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF3_PCIE_BAR5_CAP
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF3_PCIE_BAR5_CNTL
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR5_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF3_PCIE_BAR6_CAP
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0xFFFFFFF0L
+//BIF_CFG_DEV0_EPF3_PCIE_BAR6_CNTL
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x00000007L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x000000E0L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x00003F00L
+#define BIF_CFG_DEV0_EPF3_PCIE_BAR6_CNTL__BAR_SIZE_SUPPORTED_UPPER_MASK 0xFFFF0000L
+//BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA_SELECT
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L
+//BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_CAP
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L
+//BIF_CFG_DEV0_EPF3_PCIE_DPA_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF3_PCIE_DPA_CAP
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//BIF_CFG_DEV0_EPF3_PCIE_DPA_LATENCY_INDICATOR
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0x000000FFL
+//BIF_CFG_DEV0_EPF3_PCIE_DPA_STATUS
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L
+//BIF_CFG_DEV0_EPF3_PCIE_DPA_CNTL
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x001FL
+//BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_1
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_2
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_3
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_4
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_5
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_6
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_7
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//BIF_CFG_DEV0_EPF3_PCIE_ACS_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__ENHANCED_CAPABILITY__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__ENHANCED_CAPABILITY_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN__SHIFT 0x7
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL__SHIFT 0xa
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL__SHIFT 0xc
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__IO_REQUEST_BLOCKING_EN_MASK 0x0080L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__DSP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0300L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__USP_MEMORY_TARGET_ACCESS_CNTL_MASK 0x0C00L
+#define BIF_CFG_DEV0_EPF3_PCIE_ACS_CNTL__UNCLAIMED_REQUEST_REDIRECT_CNTL_MASK 0x1000L
+//BIF_CFG_DEV0_EPF3_PCIE_PASID_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF3_PCIE_PASID_CAP
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L
+//BIF_CFG_DEV0_EPF3_PCIE_PASID_CNTL
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF3_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L
+//BIF_CFG_DEV0_EPF3_PCIE_ARI_ENH_CAP_LIST
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//BIF_CFG_DEV0_EPF3_PCIE_ARI_CAP
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L
+//BIF_CFG_DEV0_EPF3_PCIE_ARI_CNTL
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L
+#define BIF_CFG_DEV0_EPF3_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L
+
+
+// addressBlock: nbif_rcc_dev0_RCCPORTDEC
+//RCC_DEV0_1_RCC_VDM_SUPPORT
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__MCTP_SUPPORT__SHIFT 0x0
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__AMPTP_SUPPORT__SHIFT 0x1
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT__SHIFT 0x2
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE__SHIFT 0x3
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE__SHIFT 0x4
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__MCTP_SUPPORT_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__AMPTP_SUPPORT_MASK 0x00000002L
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT_MASK 0x00000004L
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE_MASK 0x00000008L
+#define RCC_DEV0_1_RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE_MASK 0x00000010L
+//RCC_DEV0_1_RCC_BUS_CNTL
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_IO_DIS__SHIFT 0x2
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_MEM_DIS__SHIFT 0x3
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_BM_DIS__SHIFT 0x4
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_IO_DIS_DN__SHIFT 0x5
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_MEM_DIS_DN__SHIFT 0x6
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_IO_DIS_UP__SHIFT 0x7
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_MEM_DIS_UP__SHIFT 0x8
+#define RCC_DEV0_1_RCC_BUS_CNTL__ROOT_ERR_LOG_ON_EVENT__SHIFT 0xc
+#define RCC_DEV0_1_RCC_BUS_CNTL__HOST_CPL_POISONED_LOG_IN_RC__SHIFT 0xd
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x10
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x11
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x12
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x13
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x14
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x15
+#define RCC_DEV0_1_RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE__SHIFT 0x18
+#define RCC_DEV0_1_RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x19
+#define RCC_DEV0_1_RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x1c
+#define RCC_DEV0_1_RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x1d
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_IO_DIS_MASK 0x00000004L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_MEM_DIS_MASK 0x00000008L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_BM_DIS_MASK 0x00000010L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_IO_DIS_DN_MASK 0x00000020L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_MEM_DIS_DN_MASK 0x00000040L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_IO_DIS_UP_MASK 0x00000080L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PMI_MEM_DIS_UP_MASK 0x00000100L
+#define RCC_DEV0_1_RCC_BUS_CNTL__ROOT_ERR_LOG_ON_EVENT_MASK 0x00001000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__HOST_CPL_POISONED_LOG_IN_RC_MASK 0x00002000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR_MASK 0x00010000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR_MASK 0x00020000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR_MASK 0x00040000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR_MASK 0x00080000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR_MASK 0x00100000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR_MASK 0x00200000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE_MASK 0x01000000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE_MASK 0x0E000000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE_MASK 0x10000000L
+#define RCC_DEV0_1_RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE_MASK 0xE0000000L
+//RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS__SHIFT 0x7
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN__SHIFT 0x8
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR__SHIFT 0x9
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR__SHIFT 0xa
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR__SHIFT 0xb
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR__SHIFT 0xc
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR__SHIFT 0xd
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS__SHIFT 0xe
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS__SHIFT 0xf
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS__SHIFT 0x10
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS__SHIFT 0x11
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN__SHIFT 0x12
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS__SHIFT 0x13
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS_MASK 0x00000080L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN_MASK 0x00000100L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR_MASK 0x00000200L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR_MASK 0x00000400L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR_MASK 0x00000800L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR_MASK 0x00001000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR_MASK 0x00002000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS_MASK 0x00004000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS_MASK 0x00008000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS_MASK 0x00010000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS_MASK 0x00020000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN_MASK 0x00040000L
+#define RCC_DEV0_1_RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS_MASK 0x00080000L
+//RCC_DEV0_1_RCC_DEV0_LINK_CNTL
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__LINK_DOWN_EXIT__SHIFT 0x0
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__LINK_DOWN_ENTRY__SHIFT 0x8
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__SWUS_SRB_RST_TLS_DIS__SHIFT 0x10
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__SWUS_LDN_RST_TLS_DIS__SHIFT 0x11
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__LINK_DOWN_EXIT_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__LINK_DOWN_ENTRY_MASK 0x00000100L
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__SWUS_SRB_RST_TLS_DIS_MASK 0x00010000L
+#define RCC_DEV0_1_RCC_DEV0_LINK_CNTL__SWUS_LDN_RST_TLS_DIS_MASK 0x00020000L
+//RCC_DEV0_1_RCC_CMN_LINK_CNTL
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS__SHIFT 0x1
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS__SHIFT 0x2
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN__SHIFT 0x3
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER__SHIFT 0x10
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS_MASK 0x00000002L
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS_MASK 0x00000004L
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN_MASK 0x00000008L
+#define RCC_DEV0_1_RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER_MASK 0xFFFF0000L
+//RCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE
+#define RCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS__SHIFT 0x0
+#define RCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV__SHIFT 0x8
+#define RCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS_MASK 0x000000FFL
+#define RCC_DEV0_1_RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV_MASK 0x00001F00L
+//RCC_DEV0_1_RCC_LTR_LSWITCH_CNTL
+#define RCC_DEV0_1_RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE_MASK 0x000003FFL
+//RCC_DEV0_1_RCC_MH_ARB_CNTL
+#define RCC_DEV0_1_RCC_MH_ARB_CNTL__MH_ARB_MODE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY__SHIFT 0x1
+#define RCC_DEV0_1_RCC_MH_ARB_CNTL__MH_ARB_MODE_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY_MASK 0x00007FFEL
+//RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED__SHIFT 0x0
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING__SHIFT 0x1
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE__SHIFT 0x2
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER__SHIFT 0x3
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD__SHIFT 0x4
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS__SHIFT 0x5
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET__SHIFT 0xb
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS__SHIFT 0x12
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET__SHIFT 0x19
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING_MASK 0x00000002L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE_MASK 0x00000004L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER_MASK 0x00000008L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD_MASK 0x00000010L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS_MASK 0x000007E0L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET_MASK 0x0003F800L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS_MASK 0x01FC0000L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET_MASK 0xFE000000L
+//RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING__SHIFT 0x6
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES__SHIFT 0xc
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT__SHIFT 0x11
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE_MASK 0x0000003FL
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING_MASK 0x00000FC0L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES_MASK 0x0001F000L
+#define RCC_DEV0_1_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT_MASK 0x00FE0000L
+
+
+// addressBlock: nbif_rcc_ep_dev0_RCCPORTDEC
+//RCC_EP_DEV0_1_EP_PCIE_SCRATCH
+#define RCC_EP_DEV0_1_EP_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
+//RCC_EP_DEV0_1_EP_PCIE_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__MFIOV_GFX_F0_FLR_DIS__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__MFIOV_GFX_F0_FLR_DIS_MASK 0x00000001L
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L
+#define RCC_EP_DEV0_1_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
+//RCC_EP_DEV0_1_EP_PCIE_INT_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x1
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x2
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x3
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x4
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x6
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L
+//RCC_EP_DEV0_1_EP_PCIE_INT_STATUS
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x1
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x2
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x3
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x4
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x6
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_F0__SHIFT 0x7
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L
+#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_F0_MASK 0x00000080L
+//RCC_EP_DEV0_1_EP_PCIE_RX_CNTL2
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L
+//RCC_EP_DEV0_1_EP_PCIE_BUS_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
+#define RCC_EP_DEV0_1_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+//RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG__SHIFT 0x4
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
+#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG_MASK 0x00000010L
+//RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE__SHIFT 0x3
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT__SHIFT 0x6
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE__SHIFT 0x7
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE__SHIFT 0xa
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT__SHIFT 0xd
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0__SHIFT 0xe
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN__SHIFT 0xf
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1__SHIFT 0x10
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN__SHIFT 0x11
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE_MASK 0x00000007L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE_MASK 0x00000038L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT_MASK 0x00000040L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE_MASK 0x00000380L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE_MASK 0x00001C00L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT_MASK 0x00002000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK 0x00004000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK 0x00008000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1_MASK 0x00010000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN_MASK 0x00020000L
+//RCC_EP_DEV0_1_EP_PCIE_STRAP_MISC
+#define RCC_EP_DEV0_1_EP_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x1d
+#define RCC_EP_DEV0_1_EP_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+//RCC_EP_DEV0_1_EP_PCIE_STRAP_MISC2
+#define RCC_EP_DEV0_1_EP_PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED__SHIFT 0x4
+#define RCC_EP_DEV0_1_EP_PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED_MASK 0x00000010L
+//RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//RCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL
+//RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE__SHIFT 0x8
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x001FL
+#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE_MASK 0x0100L
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_EP_PCIE_PME_CONTROL
+#define RCC_EP_DEV0_1_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER_MASK 0x1FL
+//RCC_EP_DEV0_1_EP_PCIEP_RESERVED
+#define RCC_EP_DEV0_1_EP_PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xFFFFFFFFL
+//RCC_EP_DEV0_1_EP_PCIE_TX_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L
+//RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID
+#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3
+#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8
+#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L
+#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L
+//RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x18
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x19
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x1a
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED__SHIFT 0x1b
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED__SHIFT 0x1c
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED__SHIFT 0x1d
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED__SHIFT 0x1e
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED__SHIFT 0x1f
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x01000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x02000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x04000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED_MASK 0x08000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED_MASK 0x10000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED_MASK 0x20000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED_MASK 0x40000000L
+#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED_MASK 0x80000000L
+//RCC_EP_DEV0_1_EP_PCIE_RX_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L
+#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L
+//RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP__SHIFT 0x3
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
+#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP_MASK 0x00000008L
+
+
+// addressBlock: nbif_rcc_dwn_dev0_RCCPORTDEC
+//RCC_DWN_DEV0_1_DN_PCIE_RESERVED
+#define RCC_DWN_DEV0_1_DN_PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x0
+#define RCC_DWN_DEV0_1_DN_PCIE_RESERVED__PCIE_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DWN_DEV0_1_DN_PCIE_SCRATCH
+#define RCC_DWN_DEV0_1_DN_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
+#define RCC_DWN_DEV0_1_DN_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
+//RCC_DWN_DEV0_1_DN_PCIE_CNTL
+#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0
+#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN__SHIFT 0x7
+#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
+#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L
+#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN_MASK 0x00000080L
+#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
+//RCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL
+#define RCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19
+#define RCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L
+//RCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2
+#define RCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c
+#define RCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L
+//RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL
+#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
+#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN__SHIFT 0x8
+#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN_MASK 0x00000100L
+//RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG__SHIFT 0x4
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
+#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG_MASK 0x00000010L
+//RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0__STRAP_F0_EN__SHIFT 0x0
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0__STRAP_F0_MC_EN__SHIFT 0x11
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP__SHIFT 0x15
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0__STRAP_F0_EN_MASK 0x00000001L
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0__STRAP_F0_MC_EN_MASK 0x00020000L
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP_MASK 0x00E00000L
+//RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC__STRAP_CLK_PM_EN__SHIFT 0x18
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x1d
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC__STRAP_CLK_PM_EN_MASK 0x01000000L
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+//RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC2
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN__SHIFT 0x2
+#define RCC_DWN_DEV0_1_DN_PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN_MASK 0x00000004L
+
+
+// addressBlock: nbif_rcc_dwnp_dev0_RCCPORTDEC
+//RCC_DWNP_DEV0_1_PCIE_ERR_CNTL
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0xb
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__ERR_CORR_RCVD_CLR__SHIFT 0x12
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__NONFATAL_ERR_RCVD_CLR__SHIFT 0x13
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__FATAL_ERR_RCVD_CLR__SHIFT 0x14
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x00000800L
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__ERR_CORR_RCVD_CLR_MASK 0x00040000L
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__NONFATAL_ERR_RCVD_CLR_MASK 0x00080000L
+#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__FATAL_ERR_RCVD_CLR_MASK 0x00100000L
+//RCC_DWNP_DEV0_1_PCIE_RX_CNTL
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN__SHIFT 0x9
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0x15
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN_MASK 0x00000200L
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00200000L
+#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L
+//RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP__SHIFT 0x3
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
+#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP_MASK 0x00000008L
+//RCC_DWNP_DEV0_1_PCIE_LC_CNTL2
+#define RCC_DWNP_DEV0_1_PCIE_LC_CNTL2__DL_STATE_CHANGED_NOTIFICATION_DIS__SHIFT 0x0
+#define RCC_DWNP_DEV0_1_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b
+#define RCC_DWNP_DEV0_1_PCIE_LC_CNTL2__DL_STATE_CHANGED_NOTIFICATION_DIS_MASK 0x00000001L
+#define RCC_DWNP_DEV0_1_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L
+//RCC_DWNP_DEV0_1_PCIEP_STRAP_MISC
+#define RCC_DWNP_DEV0_1_PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN__SHIFT 0xa
+#define RCC_DWNP_DEV0_1_PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN_MASK 0x00000400L
+//RCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP
+#define RCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP__SHIFT 0x0
+#define RCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_pfc_amdgfx_RCCPFCDEC
+//RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE__SHIFT 0xa
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT__SHIFT 0xf
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE__SHIFT 0x10
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE__SHIFT 0x1a
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT__SHIFT 0x1f
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE_MASK 0x000003FFL
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE_MASK 0x00001C00L
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT_MASK 0x00008000L
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE_MASK 0x03FF0000L
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE_MASK 0x1C000000L
+#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT_MASK 0x80000000L
+//RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE
+#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS__SHIFT 0x8
+#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_SENT_FLAG__SHIFT 0x9
+#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN_MASK 0x00000001L
+#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS_MASK 0x00000100L
+#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_SENT_FLAG_MASK 0x00000200L
+//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS__SHIFT 0x1
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS__SHIFT 0x2
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS__SHIFT 0x3
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS__SHIFT 0x4
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS__SHIFT 0x5
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS__SHIFT 0x6
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0x7
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS_MASK 0x00000001L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS_MASK 0x00000002L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS_MASK 0x00000004L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS_MASK 0x00000008L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS_MASK 0x00000010L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS_MASK 0x00000020L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS_MASK 0x00000040L
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00000080L
+//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL
+#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE__SHIFT 0x0
+#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE__SHIFT 0x3
+#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE_MASK 0x00000007L
+#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE_MASK 0x00000008L
+
+
+// addressBlock: nbif_rcc_pfc_amdgfxaz_RCCPFCDEC
+//RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE__SHIFT 0x0
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE__SHIFT 0xa
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT__SHIFT 0xf
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE__SHIFT 0x10
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE__SHIFT 0x1a
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT__SHIFT 0x1f
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE_MASK 0x000003FFL
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE_MASK 0x00001C00L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT_MASK 0x00008000L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE_MASK 0x03FF0000L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE_MASK 0x1C000000L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT_MASK 0x80000000L
+//RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN__SHIFT 0x0
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS__SHIFT 0x8
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_SENT_FLAG__SHIFT 0x9
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN_MASK 0x00000001L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS_MASK 0x00000100L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_SENT_FLAG_MASK 0x00000200L
+//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS__SHIFT 0x0
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS__SHIFT 0x1
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS__SHIFT 0x2
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS__SHIFT 0x3
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS__SHIFT 0x4
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS__SHIFT 0x5
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS__SHIFT 0x6
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0x7
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS_MASK 0x00000001L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS_MASK 0x00000002L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS_MASK 0x00000004L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS_MASK 0x00000008L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS_MASK 0x00000010L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS_MASK 0x00000020L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS_MASK 0x00000040L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00000080L
+//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_1
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0__SHIFT 0x0
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_2
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1__SHIFT 0x0
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_3
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2__SHIFT 0x0
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_4
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3__SHIFT 0x0
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_5
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX__SHIFT 0x0
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX_MASK 0xFFFFFFFFL
+//RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE__SHIFT 0x0
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE__SHIFT 0x3
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE_MASK 0x00000007L
+#define RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE_MASK 0x00000008L
+
+
+// addressBlock: nbif_pciemsix_0_usb_MSIXTDEC
+//PCIEMSIX_VECT0_ADDR_LO
+#define PCIEMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT0_ADDR_HI
+#define PCIEMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT0_MSG_DATA
+#define PCIEMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT0_CONTROL
+#define PCIEMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT1_ADDR_LO
+#define PCIEMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT1_ADDR_HI
+#define PCIEMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT1_MSG_DATA
+#define PCIEMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT1_CONTROL
+#define PCIEMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT2_ADDR_LO
+#define PCIEMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT2_ADDR_HI
+#define PCIEMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT2_MSG_DATA
+#define PCIEMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT2_CONTROL
+#define PCIEMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT3_ADDR_LO
+#define PCIEMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT3_ADDR_HI
+#define PCIEMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT3_MSG_DATA
+#define PCIEMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT3_CONTROL
+#define PCIEMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT4_ADDR_LO
+#define PCIEMSIX_VECT4_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT4_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT4_ADDR_HI
+#define PCIEMSIX_VECT4_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT4_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT4_MSG_DATA
+#define PCIEMSIX_VECT4_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT4_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT4_CONTROL
+#define PCIEMSIX_VECT4_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT4_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT5_ADDR_LO
+#define PCIEMSIX_VECT5_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT5_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT5_ADDR_HI
+#define PCIEMSIX_VECT5_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT5_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT5_MSG_DATA
+#define PCIEMSIX_VECT5_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT5_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT5_CONTROL
+#define PCIEMSIX_VECT5_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT5_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT6_ADDR_LO
+#define PCIEMSIX_VECT6_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT6_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT6_ADDR_HI
+#define PCIEMSIX_VECT6_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT6_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT6_MSG_DATA
+#define PCIEMSIX_VECT6_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT6_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT6_CONTROL
+#define PCIEMSIX_VECT6_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT6_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT7_ADDR_LO
+#define PCIEMSIX_VECT7_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT7_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT7_ADDR_HI
+#define PCIEMSIX_VECT7_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT7_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT7_MSG_DATA
+#define PCIEMSIX_VECT7_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT7_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT7_CONTROL
+#define PCIEMSIX_VECT7_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT7_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT8_ADDR_LO
+#define PCIEMSIX_VECT8_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT8_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT8_ADDR_HI
+#define PCIEMSIX_VECT8_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT8_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT8_MSG_DATA
+#define PCIEMSIX_VECT8_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT8_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT8_CONTROL
+#define PCIEMSIX_VECT8_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT8_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT9_ADDR_LO
+#define PCIEMSIX_VECT9_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT9_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT9_ADDR_HI
+#define PCIEMSIX_VECT9_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT9_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT9_MSG_DATA
+#define PCIEMSIX_VECT9_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT9_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT9_CONTROL
+#define PCIEMSIX_VECT9_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT9_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT10_ADDR_LO
+#define PCIEMSIX_VECT10_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT10_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT10_ADDR_HI
+#define PCIEMSIX_VECT10_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT10_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT10_MSG_DATA
+#define PCIEMSIX_VECT10_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT10_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT10_CONTROL
+#define PCIEMSIX_VECT10_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT10_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT11_ADDR_LO
+#define PCIEMSIX_VECT11_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT11_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT11_ADDR_HI
+#define PCIEMSIX_VECT11_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT11_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT11_MSG_DATA
+#define PCIEMSIX_VECT11_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT11_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT11_CONTROL
+#define PCIEMSIX_VECT11_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT11_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT12_ADDR_LO
+#define PCIEMSIX_VECT12_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT12_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT12_ADDR_HI
+#define PCIEMSIX_VECT12_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT12_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT12_MSG_DATA
+#define PCIEMSIX_VECT12_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT12_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT12_CONTROL
+#define PCIEMSIX_VECT12_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT12_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT13_ADDR_LO
+#define PCIEMSIX_VECT13_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT13_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT13_ADDR_HI
+#define PCIEMSIX_VECT13_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT13_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT13_MSG_DATA
+#define PCIEMSIX_VECT13_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT13_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT13_CONTROL
+#define PCIEMSIX_VECT13_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT13_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT14_ADDR_LO
+#define PCIEMSIX_VECT14_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT14_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT14_ADDR_HI
+#define PCIEMSIX_VECT14_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT14_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT14_MSG_DATA
+#define PCIEMSIX_VECT14_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT14_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT14_CONTROL
+#define PCIEMSIX_VECT14_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT14_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT15_ADDR_LO
+#define PCIEMSIX_VECT15_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT15_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT15_ADDR_HI
+#define PCIEMSIX_VECT15_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT15_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT15_MSG_DATA
+#define PCIEMSIX_VECT15_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT15_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT15_CONTROL
+#define PCIEMSIX_VECT15_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT15_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT16_ADDR_LO
+#define PCIEMSIX_VECT16_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT16_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT16_ADDR_HI
+#define PCIEMSIX_VECT16_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT16_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT16_MSG_DATA
+#define PCIEMSIX_VECT16_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT16_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT16_CONTROL
+#define PCIEMSIX_VECT16_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT16_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT17_ADDR_LO
+#define PCIEMSIX_VECT17_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT17_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT17_ADDR_HI
+#define PCIEMSIX_VECT17_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT17_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT17_MSG_DATA
+#define PCIEMSIX_VECT17_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT17_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT17_CONTROL
+#define PCIEMSIX_VECT17_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT17_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT18_ADDR_LO
+#define PCIEMSIX_VECT18_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT18_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT18_ADDR_HI
+#define PCIEMSIX_VECT18_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT18_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT18_MSG_DATA
+#define PCIEMSIX_VECT18_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT18_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT18_CONTROL
+#define PCIEMSIX_VECT18_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT18_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT19_ADDR_LO
+#define PCIEMSIX_VECT19_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT19_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT19_ADDR_HI
+#define PCIEMSIX_VECT19_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT19_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT19_MSG_DATA
+#define PCIEMSIX_VECT19_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT19_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT19_CONTROL
+#define PCIEMSIX_VECT19_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT19_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT20_ADDR_LO
+#define PCIEMSIX_VECT20_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT20_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT20_ADDR_HI
+#define PCIEMSIX_VECT20_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT20_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT20_MSG_DATA
+#define PCIEMSIX_VECT20_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT20_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT20_CONTROL
+#define PCIEMSIX_VECT20_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT20_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT21_ADDR_LO
+#define PCIEMSIX_VECT21_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT21_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT21_ADDR_HI
+#define PCIEMSIX_VECT21_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT21_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT21_MSG_DATA
+#define PCIEMSIX_VECT21_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT21_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT21_CONTROL
+#define PCIEMSIX_VECT21_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT21_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT22_ADDR_LO
+#define PCIEMSIX_VECT22_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT22_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT22_ADDR_HI
+#define PCIEMSIX_VECT22_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT22_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT22_MSG_DATA
+#define PCIEMSIX_VECT22_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT22_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT22_CONTROL
+#define PCIEMSIX_VECT22_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT22_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT23_ADDR_LO
+#define PCIEMSIX_VECT23_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT23_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT23_ADDR_HI
+#define PCIEMSIX_VECT23_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT23_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT23_MSG_DATA
+#define PCIEMSIX_VECT23_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT23_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT23_CONTROL
+#define PCIEMSIX_VECT23_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT23_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT24_ADDR_LO
+#define PCIEMSIX_VECT24_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT24_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT24_ADDR_HI
+#define PCIEMSIX_VECT24_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT24_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT24_MSG_DATA
+#define PCIEMSIX_VECT24_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT24_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT24_CONTROL
+#define PCIEMSIX_VECT24_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT24_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT25_ADDR_LO
+#define PCIEMSIX_VECT25_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT25_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT25_ADDR_HI
+#define PCIEMSIX_VECT25_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT25_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT25_MSG_DATA
+#define PCIEMSIX_VECT25_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT25_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT25_CONTROL
+#define PCIEMSIX_VECT25_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT25_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT26_ADDR_LO
+#define PCIEMSIX_VECT26_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT26_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT26_ADDR_HI
+#define PCIEMSIX_VECT26_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT26_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT26_MSG_DATA
+#define PCIEMSIX_VECT26_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT26_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT26_CONTROL
+#define PCIEMSIX_VECT26_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT26_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT27_ADDR_LO
+#define PCIEMSIX_VECT27_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT27_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT27_ADDR_HI
+#define PCIEMSIX_VECT27_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT27_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT27_MSG_DATA
+#define PCIEMSIX_VECT27_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT27_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT27_CONTROL
+#define PCIEMSIX_VECT27_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT27_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT28_ADDR_LO
+#define PCIEMSIX_VECT28_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT28_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT28_ADDR_HI
+#define PCIEMSIX_VECT28_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT28_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT28_MSG_DATA
+#define PCIEMSIX_VECT28_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT28_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT28_CONTROL
+#define PCIEMSIX_VECT28_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT28_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT29_ADDR_LO
+#define PCIEMSIX_VECT29_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT29_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT29_ADDR_HI
+#define PCIEMSIX_VECT29_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT29_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT29_MSG_DATA
+#define PCIEMSIX_VECT29_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT29_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT29_CONTROL
+#define PCIEMSIX_VECT29_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT29_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT30_ADDR_LO
+#define PCIEMSIX_VECT30_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT30_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT30_ADDR_HI
+#define PCIEMSIX_VECT30_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT30_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT30_MSG_DATA
+#define PCIEMSIX_VECT30_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT30_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT30_CONTROL
+#define PCIEMSIX_VECT30_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT30_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT31_ADDR_LO
+#define PCIEMSIX_VECT31_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT31_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT31_ADDR_HI
+#define PCIEMSIX_VECT31_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT31_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT31_MSG_DATA
+#define PCIEMSIX_VECT31_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT31_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT31_CONTROL
+#define PCIEMSIX_VECT31_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT31_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT32_ADDR_LO
+#define PCIEMSIX_VECT32_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT32_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT32_ADDR_HI
+#define PCIEMSIX_VECT32_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT32_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT32_MSG_DATA
+#define PCIEMSIX_VECT32_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT32_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT32_CONTROL
+#define PCIEMSIX_VECT32_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT32_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT33_ADDR_LO
+#define PCIEMSIX_VECT33_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT33_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT33_ADDR_HI
+#define PCIEMSIX_VECT33_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT33_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT33_MSG_DATA
+#define PCIEMSIX_VECT33_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT33_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT33_CONTROL
+#define PCIEMSIX_VECT33_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT33_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT34_ADDR_LO
+#define PCIEMSIX_VECT34_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT34_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT34_ADDR_HI
+#define PCIEMSIX_VECT34_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT34_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT34_MSG_DATA
+#define PCIEMSIX_VECT34_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT34_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT34_CONTROL
+#define PCIEMSIX_VECT34_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT34_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT35_ADDR_LO
+#define PCIEMSIX_VECT35_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT35_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT35_ADDR_HI
+#define PCIEMSIX_VECT35_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT35_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT35_MSG_DATA
+#define PCIEMSIX_VECT35_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT35_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT35_CONTROL
+#define PCIEMSIX_VECT35_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT35_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT36_ADDR_LO
+#define PCIEMSIX_VECT36_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT36_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT36_ADDR_HI
+#define PCIEMSIX_VECT36_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT36_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT36_MSG_DATA
+#define PCIEMSIX_VECT36_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT36_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT36_CONTROL
+#define PCIEMSIX_VECT36_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT36_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT37_ADDR_LO
+#define PCIEMSIX_VECT37_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT37_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT37_ADDR_HI
+#define PCIEMSIX_VECT37_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT37_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT37_MSG_DATA
+#define PCIEMSIX_VECT37_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT37_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT37_CONTROL
+#define PCIEMSIX_VECT37_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT37_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT38_ADDR_LO
+#define PCIEMSIX_VECT38_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT38_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT38_ADDR_HI
+#define PCIEMSIX_VECT38_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT38_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT38_MSG_DATA
+#define PCIEMSIX_VECT38_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT38_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT38_CONTROL
+#define PCIEMSIX_VECT38_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT38_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT39_ADDR_LO
+#define PCIEMSIX_VECT39_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT39_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT39_ADDR_HI
+#define PCIEMSIX_VECT39_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT39_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT39_MSG_DATA
+#define PCIEMSIX_VECT39_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT39_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT39_CONTROL
+#define PCIEMSIX_VECT39_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT39_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT40_ADDR_LO
+#define PCIEMSIX_VECT40_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT40_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT40_ADDR_HI
+#define PCIEMSIX_VECT40_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT40_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT40_MSG_DATA
+#define PCIEMSIX_VECT40_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT40_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT40_CONTROL
+#define PCIEMSIX_VECT40_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT40_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT41_ADDR_LO
+#define PCIEMSIX_VECT41_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT41_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT41_ADDR_HI
+#define PCIEMSIX_VECT41_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT41_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT41_MSG_DATA
+#define PCIEMSIX_VECT41_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT41_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT41_CONTROL
+#define PCIEMSIX_VECT41_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT41_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT42_ADDR_LO
+#define PCIEMSIX_VECT42_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT42_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT42_ADDR_HI
+#define PCIEMSIX_VECT42_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT42_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT42_MSG_DATA
+#define PCIEMSIX_VECT42_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT42_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT42_CONTROL
+#define PCIEMSIX_VECT42_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT42_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT43_ADDR_LO
+#define PCIEMSIX_VECT43_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT43_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT43_ADDR_HI
+#define PCIEMSIX_VECT43_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT43_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT43_MSG_DATA
+#define PCIEMSIX_VECT43_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT43_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT43_CONTROL
+#define PCIEMSIX_VECT43_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT43_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT44_ADDR_LO
+#define PCIEMSIX_VECT44_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT44_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT44_ADDR_HI
+#define PCIEMSIX_VECT44_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT44_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT44_MSG_DATA
+#define PCIEMSIX_VECT44_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT44_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT44_CONTROL
+#define PCIEMSIX_VECT44_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT44_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT45_ADDR_LO
+#define PCIEMSIX_VECT45_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT45_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT45_ADDR_HI
+#define PCIEMSIX_VECT45_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT45_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT45_MSG_DATA
+#define PCIEMSIX_VECT45_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT45_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT45_CONTROL
+#define PCIEMSIX_VECT45_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT45_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT46_ADDR_LO
+#define PCIEMSIX_VECT46_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT46_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT46_ADDR_HI
+#define PCIEMSIX_VECT46_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT46_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT46_MSG_DATA
+#define PCIEMSIX_VECT46_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT46_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT46_CONTROL
+#define PCIEMSIX_VECT46_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT46_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT47_ADDR_LO
+#define PCIEMSIX_VECT47_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT47_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT47_ADDR_HI
+#define PCIEMSIX_VECT47_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT47_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT47_MSG_DATA
+#define PCIEMSIX_VECT47_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT47_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT47_CONTROL
+#define PCIEMSIX_VECT47_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT47_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT48_ADDR_LO
+#define PCIEMSIX_VECT48_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT48_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT48_ADDR_HI
+#define PCIEMSIX_VECT48_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT48_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT48_MSG_DATA
+#define PCIEMSIX_VECT48_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT48_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT48_CONTROL
+#define PCIEMSIX_VECT48_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT48_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT49_ADDR_LO
+#define PCIEMSIX_VECT49_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT49_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT49_ADDR_HI
+#define PCIEMSIX_VECT49_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT49_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT49_MSG_DATA
+#define PCIEMSIX_VECT49_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT49_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT49_CONTROL
+#define PCIEMSIX_VECT49_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT49_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT50_ADDR_LO
+#define PCIEMSIX_VECT50_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT50_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT50_ADDR_HI
+#define PCIEMSIX_VECT50_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT50_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT50_MSG_DATA
+#define PCIEMSIX_VECT50_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT50_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT50_CONTROL
+#define PCIEMSIX_VECT50_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT50_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT51_ADDR_LO
+#define PCIEMSIX_VECT51_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT51_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT51_ADDR_HI
+#define PCIEMSIX_VECT51_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT51_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT51_MSG_DATA
+#define PCIEMSIX_VECT51_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT51_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT51_CONTROL
+#define PCIEMSIX_VECT51_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT51_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT52_ADDR_LO
+#define PCIEMSIX_VECT52_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT52_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT52_ADDR_HI
+#define PCIEMSIX_VECT52_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT52_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT52_MSG_DATA
+#define PCIEMSIX_VECT52_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT52_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT52_CONTROL
+#define PCIEMSIX_VECT52_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT52_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT53_ADDR_LO
+#define PCIEMSIX_VECT53_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT53_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT53_ADDR_HI
+#define PCIEMSIX_VECT53_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT53_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT53_MSG_DATA
+#define PCIEMSIX_VECT53_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT53_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT53_CONTROL
+#define PCIEMSIX_VECT53_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT53_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT54_ADDR_LO
+#define PCIEMSIX_VECT54_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT54_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT54_ADDR_HI
+#define PCIEMSIX_VECT54_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT54_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT54_MSG_DATA
+#define PCIEMSIX_VECT54_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT54_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT54_CONTROL
+#define PCIEMSIX_VECT54_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT54_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT55_ADDR_LO
+#define PCIEMSIX_VECT55_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT55_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT55_ADDR_HI
+#define PCIEMSIX_VECT55_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT55_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT55_MSG_DATA
+#define PCIEMSIX_VECT55_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT55_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT55_CONTROL
+#define PCIEMSIX_VECT55_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT55_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT56_ADDR_LO
+#define PCIEMSIX_VECT56_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT56_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT56_ADDR_HI
+#define PCIEMSIX_VECT56_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT56_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT56_MSG_DATA
+#define PCIEMSIX_VECT56_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT56_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT56_CONTROL
+#define PCIEMSIX_VECT56_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT56_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT57_ADDR_LO
+#define PCIEMSIX_VECT57_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT57_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT57_ADDR_HI
+#define PCIEMSIX_VECT57_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT57_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT57_MSG_DATA
+#define PCIEMSIX_VECT57_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT57_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT57_CONTROL
+#define PCIEMSIX_VECT57_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT57_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT58_ADDR_LO
+#define PCIEMSIX_VECT58_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT58_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT58_ADDR_HI
+#define PCIEMSIX_VECT58_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT58_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT58_MSG_DATA
+#define PCIEMSIX_VECT58_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT58_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT58_CONTROL
+#define PCIEMSIX_VECT58_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT58_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT59_ADDR_LO
+#define PCIEMSIX_VECT59_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT59_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT59_ADDR_HI
+#define PCIEMSIX_VECT59_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT59_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT59_MSG_DATA
+#define PCIEMSIX_VECT59_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT59_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT59_CONTROL
+#define PCIEMSIX_VECT59_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT59_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT60_ADDR_LO
+#define PCIEMSIX_VECT60_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT60_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT60_ADDR_HI
+#define PCIEMSIX_VECT60_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT60_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT60_MSG_DATA
+#define PCIEMSIX_VECT60_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT60_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT60_CONTROL
+#define PCIEMSIX_VECT60_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT60_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT61_ADDR_LO
+#define PCIEMSIX_VECT61_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT61_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT61_ADDR_HI
+#define PCIEMSIX_VECT61_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT61_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT61_MSG_DATA
+#define PCIEMSIX_VECT61_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT61_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT61_CONTROL
+#define PCIEMSIX_VECT61_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT61_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT62_ADDR_LO
+#define PCIEMSIX_VECT62_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT62_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT62_ADDR_HI
+#define PCIEMSIX_VECT62_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT62_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT62_MSG_DATA
+#define PCIEMSIX_VECT62_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT62_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT62_CONTROL
+#define PCIEMSIX_VECT62_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT62_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT63_ADDR_LO
+#define PCIEMSIX_VECT63_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT63_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT63_ADDR_HI
+#define PCIEMSIX_VECT63_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT63_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT63_MSG_DATA
+#define PCIEMSIX_VECT63_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT63_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT63_CONTROL
+#define PCIEMSIX_VECT63_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT63_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT64_ADDR_LO
+#define PCIEMSIX_VECT64_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT64_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT64_ADDR_HI
+#define PCIEMSIX_VECT64_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT64_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT64_MSG_DATA
+#define PCIEMSIX_VECT64_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT64_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT64_CONTROL
+#define PCIEMSIX_VECT64_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT64_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT65_ADDR_LO
+#define PCIEMSIX_VECT65_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT65_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT65_ADDR_HI
+#define PCIEMSIX_VECT65_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT65_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT65_MSG_DATA
+#define PCIEMSIX_VECT65_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT65_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT65_CONTROL
+#define PCIEMSIX_VECT65_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT65_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT66_ADDR_LO
+#define PCIEMSIX_VECT66_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT66_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT66_ADDR_HI
+#define PCIEMSIX_VECT66_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT66_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT66_MSG_DATA
+#define PCIEMSIX_VECT66_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT66_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT66_CONTROL
+#define PCIEMSIX_VECT66_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT66_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT67_ADDR_LO
+#define PCIEMSIX_VECT67_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT67_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT67_ADDR_HI
+#define PCIEMSIX_VECT67_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT67_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT67_MSG_DATA
+#define PCIEMSIX_VECT67_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT67_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT67_CONTROL
+#define PCIEMSIX_VECT67_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT67_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT68_ADDR_LO
+#define PCIEMSIX_VECT68_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT68_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT68_ADDR_HI
+#define PCIEMSIX_VECT68_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT68_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT68_MSG_DATA
+#define PCIEMSIX_VECT68_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT68_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT68_CONTROL
+#define PCIEMSIX_VECT68_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT68_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT69_ADDR_LO
+#define PCIEMSIX_VECT69_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT69_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT69_ADDR_HI
+#define PCIEMSIX_VECT69_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT69_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT69_MSG_DATA
+#define PCIEMSIX_VECT69_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT69_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT69_CONTROL
+#define PCIEMSIX_VECT69_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT69_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT70_ADDR_LO
+#define PCIEMSIX_VECT70_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT70_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT70_ADDR_HI
+#define PCIEMSIX_VECT70_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT70_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT70_MSG_DATA
+#define PCIEMSIX_VECT70_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT70_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT70_CONTROL
+#define PCIEMSIX_VECT70_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT70_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT71_ADDR_LO
+#define PCIEMSIX_VECT71_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT71_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT71_ADDR_HI
+#define PCIEMSIX_VECT71_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT71_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT71_MSG_DATA
+#define PCIEMSIX_VECT71_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT71_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT71_CONTROL
+#define PCIEMSIX_VECT71_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT71_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT72_ADDR_LO
+#define PCIEMSIX_VECT72_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT72_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT72_ADDR_HI
+#define PCIEMSIX_VECT72_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT72_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT72_MSG_DATA
+#define PCIEMSIX_VECT72_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT72_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT72_CONTROL
+#define PCIEMSIX_VECT72_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT72_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT73_ADDR_LO
+#define PCIEMSIX_VECT73_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT73_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT73_ADDR_HI
+#define PCIEMSIX_VECT73_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT73_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT73_MSG_DATA
+#define PCIEMSIX_VECT73_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT73_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT73_CONTROL
+#define PCIEMSIX_VECT73_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT73_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT74_ADDR_LO
+#define PCIEMSIX_VECT74_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT74_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT74_ADDR_HI
+#define PCIEMSIX_VECT74_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT74_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT74_MSG_DATA
+#define PCIEMSIX_VECT74_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT74_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT74_CONTROL
+#define PCIEMSIX_VECT74_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT74_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT75_ADDR_LO
+#define PCIEMSIX_VECT75_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT75_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT75_ADDR_HI
+#define PCIEMSIX_VECT75_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT75_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT75_MSG_DATA
+#define PCIEMSIX_VECT75_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT75_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT75_CONTROL
+#define PCIEMSIX_VECT75_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT75_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT76_ADDR_LO
+#define PCIEMSIX_VECT76_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT76_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT76_ADDR_HI
+#define PCIEMSIX_VECT76_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT76_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT76_MSG_DATA
+#define PCIEMSIX_VECT76_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT76_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT76_CONTROL
+#define PCIEMSIX_VECT76_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT76_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT77_ADDR_LO
+#define PCIEMSIX_VECT77_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT77_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT77_ADDR_HI
+#define PCIEMSIX_VECT77_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT77_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT77_MSG_DATA
+#define PCIEMSIX_VECT77_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT77_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT77_CONTROL
+#define PCIEMSIX_VECT77_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT77_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT78_ADDR_LO
+#define PCIEMSIX_VECT78_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT78_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT78_ADDR_HI
+#define PCIEMSIX_VECT78_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT78_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT78_MSG_DATA
+#define PCIEMSIX_VECT78_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT78_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT78_CONTROL
+#define PCIEMSIX_VECT78_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT78_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT79_ADDR_LO
+#define PCIEMSIX_VECT79_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT79_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT79_ADDR_HI
+#define PCIEMSIX_VECT79_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT79_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT79_MSG_DATA
+#define PCIEMSIX_VECT79_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT79_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT79_CONTROL
+#define PCIEMSIX_VECT79_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT79_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT80_ADDR_LO
+#define PCIEMSIX_VECT80_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT80_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT80_ADDR_HI
+#define PCIEMSIX_VECT80_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT80_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT80_MSG_DATA
+#define PCIEMSIX_VECT80_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT80_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT80_CONTROL
+#define PCIEMSIX_VECT80_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT80_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT81_ADDR_LO
+#define PCIEMSIX_VECT81_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT81_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT81_ADDR_HI
+#define PCIEMSIX_VECT81_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT81_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT81_MSG_DATA
+#define PCIEMSIX_VECT81_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT81_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT81_CONTROL
+#define PCIEMSIX_VECT81_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT81_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT82_ADDR_LO
+#define PCIEMSIX_VECT82_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT82_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT82_ADDR_HI
+#define PCIEMSIX_VECT82_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT82_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT82_MSG_DATA
+#define PCIEMSIX_VECT82_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT82_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT82_CONTROL
+#define PCIEMSIX_VECT82_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT82_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT83_ADDR_LO
+#define PCIEMSIX_VECT83_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT83_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT83_ADDR_HI
+#define PCIEMSIX_VECT83_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT83_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT83_MSG_DATA
+#define PCIEMSIX_VECT83_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT83_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT83_CONTROL
+#define PCIEMSIX_VECT83_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT83_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT84_ADDR_LO
+#define PCIEMSIX_VECT84_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT84_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT84_ADDR_HI
+#define PCIEMSIX_VECT84_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT84_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT84_MSG_DATA
+#define PCIEMSIX_VECT84_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT84_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT84_CONTROL
+#define PCIEMSIX_VECT84_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT84_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT85_ADDR_LO
+#define PCIEMSIX_VECT85_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT85_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT85_ADDR_HI
+#define PCIEMSIX_VECT85_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT85_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT85_MSG_DATA
+#define PCIEMSIX_VECT85_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT85_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT85_CONTROL
+#define PCIEMSIX_VECT85_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT85_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT86_ADDR_LO
+#define PCIEMSIX_VECT86_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT86_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT86_ADDR_HI
+#define PCIEMSIX_VECT86_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT86_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT86_MSG_DATA
+#define PCIEMSIX_VECT86_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT86_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT86_CONTROL
+#define PCIEMSIX_VECT86_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT86_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT87_ADDR_LO
+#define PCIEMSIX_VECT87_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT87_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT87_ADDR_HI
+#define PCIEMSIX_VECT87_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT87_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT87_MSG_DATA
+#define PCIEMSIX_VECT87_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT87_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT87_CONTROL
+#define PCIEMSIX_VECT87_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT87_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT88_ADDR_LO
+#define PCIEMSIX_VECT88_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT88_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT88_ADDR_HI
+#define PCIEMSIX_VECT88_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT88_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT88_MSG_DATA
+#define PCIEMSIX_VECT88_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT88_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT88_CONTROL
+#define PCIEMSIX_VECT88_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT88_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT89_ADDR_LO
+#define PCIEMSIX_VECT89_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT89_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT89_ADDR_HI
+#define PCIEMSIX_VECT89_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT89_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT89_MSG_DATA
+#define PCIEMSIX_VECT89_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT89_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT89_CONTROL
+#define PCIEMSIX_VECT89_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT89_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT90_ADDR_LO
+#define PCIEMSIX_VECT90_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT90_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT90_ADDR_HI
+#define PCIEMSIX_VECT90_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT90_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT90_MSG_DATA
+#define PCIEMSIX_VECT90_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT90_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT90_CONTROL
+#define PCIEMSIX_VECT90_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT90_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT91_ADDR_LO
+#define PCIEMSIX_VECT91_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT91_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT91_ADDR_HI
+#define PCIEMSIX_VECT91_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT91_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT91_MSG_DATA
+#define PCIEMSIX_VECT91_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT91_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT91_CONTROL
+#define PCIEMSIX_VECT91_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT91_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT92_ADDR_LO
+#define PCIEMSIX_VECT92_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT92_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT92_ADDR_HI
+#define PCIEMSIX_VECT92_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT92_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT92_MSG_DATA
+#define PCIEMSIX_VECT92_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT92_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT92_CONTROL
+#define PCIEMSIX_VECT92_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT92_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT93_ADDR_LO
+#define PCIEMSIX_VECT93_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT93_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT93_ADDR_HI
+#define PCIEMSIX_VECT93_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT93_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT93_MSG_DATA
+#define PCIEMSIX_VECT93_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT93_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT93_CONTROL
+#define PCIEMSIX_VECT93_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT93_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT94_ADDR_LO
+#define PCIEMSIX_VECT94_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT94_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT94_ADDR_HI
+#define PCIEMSIX_VECT94_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT94_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT94_MSG_DATA
+#define PCIEMSIX_VECT94_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT94_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT94_CONTROL
+#define PCIEMSIX_VECT94_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT94_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT95_ADDR_LO
+#define PCIEMSIX_VECT95_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT95_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT95_ADDR_HI
+#define PCIEMSIX_VECT95_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT95_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT95_MSG_DATA
+#define PCIEMSIX_VECT95_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT95_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT95_CONTROL
+#define PCIEMSIX_VECT95_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT95_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT96_ADDR_LO
+#define PCIEMSIX_VECT96_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT96_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT96_ADDR_HI
+#define PCIEMSIX_VECT96_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT96_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT96_MSG_DATA
+#define PCIEMSIX_VECT96_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT96_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT96_CONTROL
+#define PCIEMSIX_VECT96_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT96_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT97_ADDR_LO
+#define PCIEMSIX_VECT97_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT97_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT97_ADDR_HI
+#define PCIEMSIX_VECT97_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT97_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT97_MSG_DATA
+#define PCIEMSIX_VECT97_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT97_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT97_CONTROL
+#define PCIEMSIX_VECT97_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT97_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT98_ADDR_LO
+#define PCIEMSIX_VECT98_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT98_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT98_ADDR_HI
+#define PCIEMSIX_VECT98_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT98_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT98_MSG_DATA
+#define PCIEMSIX_VECT98_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT98_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT98_CONTROL
+#define PCIEMSIX_VECT98_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT98_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT99_ADDR_LO
+#define PCIEMSIX_VECT99_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT99_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT99_ADDR_HI
+#define PCIEMSIX_VECT99_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT99_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT99_MSG_DATA
+#define PCIEMSIX_VECT99_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT99_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT99_CONTROL
+#define PCIEMSIX_VECT99_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT99_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT100_ADDR_LO
+#define PCIEMSIX_VECT100_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT100_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT100_ADDR_HI
+#define PCIEMSIX_VECT100_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT100_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT100_MSG_DATA
+#define PCIEMSIX_VECT100_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT100_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT100_CONTROL
+#define PCIEMSIX_VECT100_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT100_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT101_ADDR_LO
+#define PCIEMSIX_VECT101_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT101_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT101_ADDR_HI
+#define PCIEMSIX_VECT101_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT101_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT101_MSG_DATA
+#define PCIEMSIX_VECT101_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT101_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT101_CONTROL
+#define PCIEMSIX_VECT101_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT101_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT102_ADDR_LO
+#define PCIEMSIX_VECT102_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT102_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT102_ADDR_HI
+#define PCIEMSIX_VECT102_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT102_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT102_MSG_DATA
+#define PCIEMSIX_VECT102_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT102_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT102_CONTROL
+#define PCIEMSIX_VECT102_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT102_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT103_ADDR_LO
+#define PCIEMSIX_VECT103_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT103_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT103_ADDR_HI
+#define PCIEMSIX_VECT103_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT103_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT103_MSG_DATA
+#define PCIEMSIX_VECT103_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT103_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT103_CONTROL
+#define PCIEMSIX_VECT103_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT103_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT104_ADDR_LO
+#define PCIEMSIX_VECT104_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT104_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT104_ADDR_HI
+#define PCIEMSIX_VECT104_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT104_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT104_MSG_DATA
+#define PCIEMSIX_VECT104_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT104_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT104_CONTROL
+#define PCIEMSIX_VECT104_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT104_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT105_ADDR_LO
+#define PCIEMSIX_VECT105_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT105_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT105_ADDR_HI
+#define PCIEMSIX_VECT105_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT105_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT105_MSG_DATA
+#define PCIEMSIX_VECT105_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT105_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT105_CONTROL
+#define PCIEMSIX_VECT105_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT105_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT106_ADDR_LO
+#define PCIEMSIX_VECT106_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT106_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT106_ADDR_HI
+#define PCIEMSIX_VECT106_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT106_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT106_MSG_DATA
+#define PCIEMSIX_VECT106_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT106_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT106_CONTROL
+#define PCIEMSIX_VECT106_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT106_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT107_ADDR_LO
+#define PCIEMSIX_VECT107_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT107_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT107_ADDR_HI
+#define PCIEMSIX_VECT107_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT107_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT107_MSG_DATA
+#define PCIEMSIX_VECT107_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT107_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT107_CONTROL
+#define PCIEMSIX_VECT107_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT107_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT108_ADDR_LO
+#define PCIEMSIX_VECT108_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT108_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT108_ADDR_HI
+#define PCIEMSIX_VECT108_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT108_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT108_MSG_DATA
+#define PCIEMSIX_VECT108_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT108_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT108_CONTROL
+#define PCIEMSIX_VECT108_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT108_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT109_ADDR_LO
+#define PCIEMSIX_VECT109_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT109_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT109_ADDR_HI
+#define PCIEMSIX_VECT109_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT109_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT109_MSG_DATA
+#define PCIEMSIX_VECT109_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT109_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT109_CONTROL
+#define PCIEMSIX_VECT109_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT109_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT110_ADDR_LO
+#define PCIEMSIX_VECT110_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT110_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT110_ADDR_HI
+#define PCIEMSIX_VECT110_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT110_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT110_MSG_DATA
+#define PCIEMSIX_VECT110_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT110_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT110_CONTROL
+#define PCIEMSIX_VECT110_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT110_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT111_ADDR_LO
+#define PCIEMSIX_VECT111_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT111_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT111_ADDR_HI
+#define PCIEMSIX_VECT111_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT111_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT111_MSG_DATA
+#define PCIEMSIX_VECT111_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT111_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT111_CONTROL
+#define PCIEMSIX_VECT111_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT111_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT112_ADDR_LO
+#define PCIEMSIX_VECT112_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT112_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT112_ADDR_HI
+#define PCIEMSIX_VECT112_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT112_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT112_MSG_DATA
+#define PCIEMSIX_VECT112_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT112_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT112_CONTROL
+#define PCIEMSIX_VECT112_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT112_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT113_ADDR_LO
+#define PCIEMSIX_VECT113_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT113_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT113_ADDR_HI
+#define PCIEMSIX_VECT113_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT113_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT113_MSG_DATA
+#define PCIEMSIX_VECT113_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT113_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT113_CONTROL
+#define PCIEMSIX_VECT113_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT113_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT114_ADDR_LO
+#define PCIEMSIX_VECT114_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT114_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT114_ADDR_HI
+#define PCIEMSIX_VECT114_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT114_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT114_MSG_DATA
+#define PCIEMSIX_VECT114_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT114_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT114_CONTROL
+#define PCIEMSIX_VECT114_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT114_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT115_ADDR_LO
+#define PCIEMSIX_VECT115_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT115_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT115_ADDR_HI
+#define PCIEMSIX_VECT115_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT115_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT115_MSG_DATA
+#define PCIEMSIX_VECT115_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT115_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT115_CONTROL
+#define PCIEMSIX_VECT115_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT115_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT116_ADDR_LO
+#define PCIEMSIX_VECT116_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT116_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT116_ADDR_HI
+#define PCIEMSIX_VECT116_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT116_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT116_MSG_DATA
+#define PCIEMSIX_VECT116_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT116_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT116_CONTROL
+#define PCIEMSIX_VECT116_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT116_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT117_ADDR_LO
+#define PCIEMSIX_VECT117_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT117_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT117_ADDR_HI
+#define PCIEMSIX_VECT117_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT117_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT117_MSG_DATA
+#define PCIEMSIX_VECT117_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT117_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT117_CONTROL
+#define PCIEMSIX_VECT117_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT117_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT118_ADDR_LO
+#define PCIEMSIX_VECT118_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT118_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT118_ADDR_HI
+#define PCIEMSIX_VECT118_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT118_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT118_MSG_DATA
+#define PCIEMSIX_VECT118_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT118_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT118_CONTROL
+#define PCIEMSIX_VECT118_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT118_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT119_ADDR_LO
+#define PCIEMSIX_VECT119_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT119_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT119_ADDR_HI
+#define PCIEMSIX_VECT119_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT119_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT119_MSG_DATA
+#define PCIEMSIX_VECT119_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT119_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT119_CONTROL
+#define PCIEMSIX_VECT119_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT119_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT120_ADDR_LO
+#define PCIEMSIX_VECT120_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT120_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT120_ADDR_HI
+#define PCIEMSIX_VECT120_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT120_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT120_MSG_DATA
+#define PCIEMSIX_VECT120_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT120_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT120_CONTROL
+#define PCIEMSIX_VECT120_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT120_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT121_ADDR_LO
+#define PCIEMSIX_VECT121_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT121_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT121_ADDR_HI
+#define PCIEMSIX_VECT121_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT121_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT121_MSG_DATA
+#define PCIEMSIX_VECT121_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT121_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT121_CONTROL
+#define PCIEMSIX_VECT121_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT121_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT122_ADDR_LO
+#define PCIEMSIX_VECT122_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT122_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT122_ADDR_HI
+#define PCIEMSIX_VECT122_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT122_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT122_MSG_DATA
+#define PCIEMSIX_VECT122_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT122_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT122_CONTROL
+#define PCIEMSIX_VECT122_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT122_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT123_ADDR_LO
+#define PCIEMSIX_VECT123_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT123_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT123_ADDR_HI
+#define PCIEMSIX_VECT123_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT123_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT123_MSG_DATA
+#define PCIEMSIX_VECT123_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT123_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT123_CONTROL
+#define PCIEMSIX_VECT123_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT123_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT124_ADDR_LO
+#define PCIEMSIX_VECT124_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT124_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT124_ADDR_HI
+#define PCIEMSIX_VECT124_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT124_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT124_MSG_DATA
+#define PCIEMSIX_VECT124_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT124_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT124_CONTROL
+#define PCIEMSIX_VECT124_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT124_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT125_ADDR_LO
+#define PCIEMSIX_VECT125_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT125_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT125_ADDR_HI
+#define PCIEMSIX_VECT125_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT125_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT125_MSG_DATA
+#define PCIEMSIX_VECT125_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT125_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT125_CONTROL
+#define PCIEMSIX_VECT125_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT125_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT126_ADDR_LO
+#define PCIEMSIX_VECT126_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT126_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT126_ADDR_HI
+#define PCIEMSIX_VECT126_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT126_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT126_MSG_DATA
+#define PCIEMSIX_VECT126_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT126_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT126_CONTROL
+#define PCIEMSIX_VECT126_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT126_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT127_ADDR_LO
+#define PCIEMSIX_VECT127_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT127_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT127_ADDR_HI
+#define PCIEMSIX_VECT127_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT127_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT127_MSG_DATA
+#define PCIEMSIX_VECT127_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT127_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT127_CONTROL
+#define PCIEMSIX_VECT127_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT127_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT128_ADDR_LO
+#define PCIEMSIX_VECT128_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT128_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT128_ADDR_HI
+#define PCIEMSIX_VECT128_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT128_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT128_MSG_DATA
+#define PCIEMSIX_VECT128_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT128_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT128_CONTROL
+#define PCIEMSIX_VECT128_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT128_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT129_ADDR_LO
+#define PCIEMSIX_VECT129_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT129_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT129_ADDR_HI
+#define PCIEMSIX_VECT129_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT129_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT129_MSG_DATA
+#define PCIEMSIX_VECT129_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT129_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT129_CONTROL
+#define PCIEMSIX_VECT129_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT129_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT130_ADDR_LO
+#define PCIEMSIX_VECT130_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT130_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT130_ADDR_HI
+#define PCIEMSIX_VECT130_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT130_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT130_MSG_DATA
+#define PCIEMSIX_VECT130_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT130_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT130_CONTROL
+#define PCIEMSIX_VECT130_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT130_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT131_ADDR_LO
+#define PCIEMSIX_VECT131_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT131_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT131_ADDR_HI
+#define PCIEMSIX_VECT131_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT131_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT131_MSG_DATA
+#define PCIEMSIX_VECT131_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT131_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT131_CONTROL
+#define PCIEMSIX_VECT131_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT131_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT132_ADDR_LO
+#define PCIEMSIX_VECT132_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT132_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT132_ADDR_HI
+#define PCIEMSIX_VECT132_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT132_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT132_MSG_DATA
+#define PCIEMSIX_VECT132_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT132_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT132_CONTROL
+#define PCIEMSIX_VECT132_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT132_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT133_ADDR_LO
+#define PCIEMSIX_VECT133_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT133_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT133_ADDR_HI
+#define PCIEMSIX_VECT133_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT133_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT133_MSG_DATA
+#define PCIEMSIX_VECT133_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT133_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT133_CONTROL
+#define PCIEMSIX_VECT133_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT133_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT134_ADDR_LO
+#define PCIEMSIX_VECT134_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT134_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT134_ADDR_HI
+#define PCIEMSIX_VECT134_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT134_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT134_MSG_DATA
+#define PCIEMSIX_VECT134_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT134_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT134_CONTROL
+#define PCIEMSIX_VECT134_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT134_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT135_ADDR_LO
+#define PCIEMSIX_VECT135_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT135_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT135_ADDR_HI
+#define PCIEMSIX_VECT135_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT135_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT135_MSG_DATA
+#define PCIEMSIX_VECT135_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT135_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT135_CONTROL
+#define PCIEMSIX_VECT135_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT135_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT136_ADDR_LO
+#define PCIEMSIX_VECT136_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT136_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT136_ADDR_HI
+#define PCIEMSIX_VECT136_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT136_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT136_MSG_DATA
+#define PCIEMSIX_VECT136_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT136_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT136_CONTROL
+#define PCIEMSIX_VECT136_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT136_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT137_ADDR_LO
+#define PCIEMSIX_VECT137_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT137_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT137_ADDR_HI
+#define PCIEMSIX_VECT137_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT137_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT137_MSG_DATA
+#define PCIEMSIX_VECT137_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT137_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT137_CONTROL
+#define PCIEMSIX_VECT137_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT137_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT138_ADDR_LO
+#define PCIEMSIX_VECT138_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT138_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT138_ADDR_HI
+#define PCIEMSIX_VECT138_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT138_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT138_MSG_DATA
+#define PCIEMSIX_VECT138_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT138_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT138_CONTROL
+#define PCIEMSIX_VECT138_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT138_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT139_ADDR_LO
+#define PCIEMSIX_VECT139_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT139_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT139_ADDR_HI
+#define PCIEMSIX_VECT139_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT139_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT139_MSG_DATA
+#define PCIEMSIX_VECT139_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT139_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT139_CONTROL
+#define PCIEMSIX_VECT139_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT139_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT140_ADDR_LO
+#define PCIEMSIX_VECT140_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT140_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT140_ADDR_HI
+#define PCIEMSIX_VECT140_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT140_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT140_MSG_DATA
+#define PCIEMSIX_VECT140_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT140_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT140_CONTROL
+#define PCIEMSIX_VECT140_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT140_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT141_ADDR_LO
+#define PCIEMSIX_VECT141_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT141_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT141_ADDR_HI
+#define PCIEMSIX_VECT141_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT141_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT141_MSG_DATA
+#define PCIEMSIX_VECT141_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT141_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT141_CONTROL
+#define PCIEMSIX_VECT141_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT141_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT142_ADDR_LO
+#define PCIEMSIX_VECT142_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT142_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT142_ADDR_HI
+#define PCIEMSIX_VECT142_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT142_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT142_MSG_DATA
+#define PCIEMSIX_VECT142_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT142_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT142_CONTROL
+#define PCIEMSIX_VECT142_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT142_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT143_ADDR_LO
+#define PCIEMSIX_VECT143_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT143_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT143_ADDR_HI
+#define PCIEMSIX_VECT143_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT143_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT143_MSG_DATA
+#define PCIEMSIX_VECT143_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT143_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT143_CONTROL
+#define PCIEMSIX_VECT143_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT143_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT144_ADDR_LO
+#define PCIEMSIX_VECT144_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT144_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT144_ADDR_HI
+#define PCIEMSIX_VECT144_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT144_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT144_MSG_DATA
+#define PCIEMSIX_VECT144_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT144_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT144_CONTROL
+#define PCIEMSIX_VECT144_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT144_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT145_ADDR_LO
+#define PCIEMSIX_VECT145_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT145_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT145_ADDR_HI
+#define PCIEMSIX_VECT145_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT145_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT145_MSG_DATA
+#define PCIEMSIX_VECT145_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT145_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT145_CONTROL
+#define PCIEMSIX_VECT145_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT145_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT146_ADDR_LO
+#define PCIEMSIX_VECT146_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT146_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT146_ADDR_HI
+#define PCIEMSIX_VECT146_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT146_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT146_MSG_DATA
+#define PCIEMSIX_VECT146_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT146_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT146_CONTROL
+#define PCIEMSIX_VECT146_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT146_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT147_ADDR_LO
+#define PCIEMSIX_VECT147_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT147_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT147_ADDR_HI
+#define PCIEMSIX_VECT147_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT147_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT147_MSG_DATA
+#define PCIEMSIX_VECT147_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT147_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT147_CONTROL
+#define PCIEMSIX_VECT147_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT147_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT148_ADDR_LO
+#define PCIEMSIX_VECT148_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT148_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT148_ADDR_HI
+#define PCIEMSIX_VECT148_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT148_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT148_MSG_DATA
+#define PCIEMSIX_VECT148_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT148_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT148_CONTROL
+#define PCIEMSIX_VECT148_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT148_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT149_ADDR_LO
+#define PCIEMSIX_VECT149_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT149_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT149_ADDR_HI
+#define PCIEMSIX_VECT149_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT149_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT149_MSG_DATA
+#define PCIEMSIX_VECT149_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT149_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT149_CONTROL
+#define PCIEMSIX_VECT149_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT149_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT150_ADDR_LO
+#define PCIEMSIX_VECT150_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT150_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT150_ADDR_HI
+#define PCIEMSIX_VECT150_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT150_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT150_MSG_DATA
+#define PCIEMSIX_VECT150_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT150_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT150_CONTROL
+#define PCIEMSIX_VECT150_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT150_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT151_ADDR_LO
+#define PCIEMSIX_VECT151_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT151_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT151_ADDR_HI
+#define PCIEMSIX_VECT151_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT151_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT151_MSG_DATA
+#define PCIEMSIX_VECT151_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT151_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT151_CONTROL
+#define PCIEMSIX_VECT151_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT151_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT152_ADDR_LO
+#define PCIEMSIX_VECT152_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT152_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT152_ADDR_HI
+#define PCIEMSIX_VECT152_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT152_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT152_MSG_DATA
+#define PCIEMSIX_VECT152_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT152_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT152_CONTROL
+#define PCIEMSIX_VECT152_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT152_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT153_ADDR_LO
+#define PCIEMSIX_VECT153_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT153_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT153_ADDR_HI
+#define PCIEMSIX_VECT153_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT153_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT153_MSG_DATA
+#define PCIEMSIX_VECT153_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT153_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT153_CONTROL
+#define PCIEMSIX_VECT153_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT153_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT154_ADDR_LO
+#define PCIEMSIX_VECT154_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT154_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT154_ADDR_HI
+#define PCIEMSIX_VECT154_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT154_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT154_MSG_DATA
+#define PCIEMSIX_VECT154_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT154_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT154_CONTROL
+#define PCIEMSIX_VECT154_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT154_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT155_ADDR_LO
+#define PCIEMSIX_VECT155_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT155_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT155_ADDR_HI
+#define PCIEMSIX_VECT155_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT155_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT155_MSG_DATA
+#define PCIEMSIX_VECT155_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT155_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT155_CONTROL
+#define PCIEMSIX_VECT155_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT155_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT156_ADDR_LO
+#define PCIEMSIX_VECT156_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT156_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT156_ADDR_HI
+#define PCIEMSIX_VECT156_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT156_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT156_MSG_DATA
+#define PCIEMSIX_VECT156_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT156_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT156_CONTROL
+#define PCIEMSIX_VECT156_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT156_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT157_ADDR_LO
+#define PCIEMSIX_VECT157_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT157_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT157_ADDR_HI
+#define PCIEMSIX_VECT157_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT157_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT157_MSG_DATA
+#define PCIEMSIX_VECT157_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT157_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT157_CONTROL
+#define PCIEMSIX_VECT157_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT157_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT158_ADDR_LO
+#define PCIEMSIX_VECT158_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT158_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT158_ADDR_HI
+#define PCIEMSIX_VECT158_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT158_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT158_MSG_DATA
+#define PCIEMSIX_VECT158_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT158_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT158_CONTROL
+#define PCIEMSIX_VECT158_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT158_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT159_ADDR_LO
+#define PCIEMSIX_VECT159_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT159_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT159_ADDR_HI
+#define PCIEMSIX_VECT159_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT159_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT159_MSG_DATA
+#define PCIEMSIX_VECT159_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT159_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT159_CONTROL
+#define PCIEMSIX_VECT159_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT159_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT160_ADDR_LO
+#define PCIEMSIX_VECT160_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT160_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT160_ADDR_HI
+#define PCIEMSIX_VECT160_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT160_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT160_MSG_DATA
+#define PCIEMSIX_VECT160_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT160_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT160_CONTROL
+#define PCIEMSIX_VECT160_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT160_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT161_ADDR_LO
+#define PCIEMSIX_VECT161_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT161_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT161_ADDR_HI
+#define PCIEMSIX_VECT161_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT161_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT161_MSG_DATA
+#define PCIEMSIX_VECT161_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT161_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT161_CONTROL
+#define PCIEMSIX_VECT161_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT161_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT162_ADDR_LO
+#define PCIEMSIX_VECT162_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT162_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT162_ADDR_HI
+#define PCIEMSIX_VECT162_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT162_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT162_MSG_DATA
+#define PCIEMSIX_VECT162_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT162_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT162_CONTROL
+#define PCIEMSIX_VECT162_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT162_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT163_ADDR_LO
+#define PCIEMSIX_VECT163_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT163_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT163_ADDR_HI
+#define PCIEMSIX_VECT163_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT163_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT163_MSG_DATA
+#define PCIEMSIX_VECT163_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT163_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT163_CONTROL
+#define PCIEMSIX_VECT163_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT163_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT164_ADDR_LO
+#define PCIEMSIX_VECT164_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT164_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT164_ADDR_HI
+#define PCIEMSIX_VECT164_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT164_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT164_MSG_DATA
+#define PCIEMSIX_VECT164_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT164_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT164_CONTROL
+#define PCIEMSIX_VECT164_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT164_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT165_ADDR_LO
+#define PCIEMSIX_VECT165_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT165_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT165_ADDR_HI
+#define PCIEMSIX_VECT165_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT165_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT165_MSG_DATA
+#define PCIEMSIX_VECT165_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT165_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT165_CONTROL
+#define PCIEMSIX_VECT165_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT165_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT166_ADDR_LO
+#define PCIEMSIX_VECT166_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT166_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT166_ADDR_HI
+#define PCIEMSIX_VECT166_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT166_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT166_MSG_DATA
+#define PCIEMSIX_VECT166_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT166_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT166_CONTROL
+#define PCIEMSIX_VECT166_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT166_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT167_ADDR_LO
+#define PCIEMSIX_VECT167_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT167_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT167_ADDR_HI
+#define PCIEMSIX_VECT167_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT167_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT167_MSG_DATA
+#define PCIEMSIX_VECT167_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT167_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT167_CONTROL
+#define PCIEMSIX_VECT167_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT167_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT168_ADDR_LO
+#define PCIEMSIX_VECT168_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT168_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT168_ADDR_HI
+#define PCIEMSIX_VECT168_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT168_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT168_MSG_DATA
+#define PCIEMSIX_VECT168_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT168_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT168_CONTROL
+#define PCIEMSIX_VECT168_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT168_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT169_ADDR_LO
+#define PCIEMSIX_VECT169_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT169_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT169_ADDR_HI
+#define PCIEMSIX_VECT169_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT169_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT169_MSG_DATA
+#define PCIEMSIX_VECT169_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT169_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT169_CONTROL
+#define PCIEMSIX_VECT169_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT169_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT170_ADDR_LO
+#define PCIEMSIX_VECT170_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT170_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT170_ADDR_HI
+#define PCIEMSIX_VECT170_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT170_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT170_MSG_DATA
+#define PCIEMSIX_VECT170_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT170_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT170_CONTROL
+#define PCIEMSIX_VECT170_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT170_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT171_ADDR_LO
+#define PCIEMSIX_VECT171_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT171_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT171_ADDR_HI
+#define PCIEMSIX_VECT171_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT171_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT171_MSG_DATA
+#define PCIEMSIX_VECT171_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT171_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT171_CONTROL
+#define PCIEMSIX_VECT171_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT171_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT172_ADDR_LO
+#define PCIEMSIX_VECT172_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT172_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT172_ADDR_HI
+#define PCIEMSIX_VECT172_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT172_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT172_MSG_DATA
+#define PCIEMSIX_VECT172_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT172_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT172_CONTROL
+#define PCIEMSIX_VECT172_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT172_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT173_ADDR_LO
+#define PCIEMSIX_VECT173_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT173_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT173_ADDR_HI
+#define PCIEMSIX_VECT173_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT173_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT173_MSG_DATA
+#define PCIEMSIX_VECT173_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT173_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT173_CONTROL
+#define PCIEMSIX_VECT173_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT173_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT174_ADDR_LO
+#define PCIEMSIX_VECT174_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT174_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT174_ADDR_HI
+#define PCIEMSIX_VECT174_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT174_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT174_MSG_DATA
+#define PCIEMSIX_VECT174_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT174_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT174_CONTROL
+#define PCIEMSIX_VECT174_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT174_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT175_ADDR_LO
+#define PCIEMSIX_VECT175_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT175_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT175_ADDR_HI
+#define PCIEMSIX_VECT175_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT175_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT175_MSG_DATA
+#define PCIEMSIX_VECT175_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT175_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT175_CONTROL
+#define PCIEMSIX_VECT175_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT175_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT176_ADDR_LO
+#define PCIEMSIX_VECT176_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT176_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT176_ADDR_HI
+#define PCIEMSIX_VECT176_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT176_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT176_MSG_DATA
+#define PCIEMSIX_VECT176_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT176_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT176_CONTROL
+#define PCIEMSIX_VECT176_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT176_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT177_ADDR_LO
+#define PCIEMSIX_VECT177_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT177_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT177_ADDR_HI
+#define PCIEMSIX_VECT177_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT177_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT177_MSG_DATA
+#define PCIEMSIX_VECT177_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT177_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT177_CONTROL
+#define PCIEMSIX_VECT177_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT177_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT178_ADDR_LO
+#define PCIEMSIX_VECT178_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT178_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT178_ADDR_HI
+#define PCIEMSIX_VECT178_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT178_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT178_MSG_DATA
+#define PCIEMSIX_VECT178_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT178_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT178_CONTROL
+#define PCIEMSIX_VECT178_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT178_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT179_ADDR_LO
+#define PCIEMSIX_VECT179_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT179_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT179_ADDR_HI
+#define PCIEMSIX_VECT179_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT179_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT179_MSG_DATA
+#define PCIEMSIX_VECT179_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT179_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT179_CONTROL
+#define PCIEMSIX_VECT179_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT179_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT180_ADDR_LO
+#define PCIEMSIX_VECT180_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT180_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT180_ADDR_HI
+#define PCIEMSIX_VECT180_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT180_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT180_MSG_DATA
+#define PCIEMSIX_VECT180_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT180_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT180_CONTROL
+#define PCIEMSIX_VECT180_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT180_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT181_ADDR_LO
+#define PCIEMSIX_VECT181_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT181_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT181_ADDR_HI
+#define PCIEMSIX_VECT181_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT181_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT181_MSG_DATA
+#define PCIEMSIX_VECT181_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT181_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT181_CONTROL
+#define PCIEMSIX_VECT181_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT181_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT182_ADDR_LO
+#define PCIEMSIX_VECT182_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT182_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT182_ADDR_HI
+#define PCIEMSIX_VECT182_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT182_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT182_MSG_DATA
+#define PCIEMSIX_VECT182_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT182_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT182_CONTROL
+#define PCIEMSIX_VECT182_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT182_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT183_ADDR_LO
+#define PCIEMSIX_VECT183_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT183_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT183_ADDR_HI
+#define PCIEMSIX_VECT183_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT183_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT183_MSG_DATA
+#define PCIEMSIX_VECT183_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT183_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT183_CONTROL
+#define PCIEMSIX_VECT183_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT183_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT184_ADDR_LO
+#define PCIEMSIX_VECT184_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT184_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT184_ADDR_HI
+#define PCIEMSIX_VECT184_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT184_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT184_MSG_DATA
+#define PCIEMSIX_VECT184_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT184_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT184_CONTROL
+#define PCIEMSIX_VECT184_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT184_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT185_ADDR_LO
+#define PCIEMSIX_VECT185_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT185_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT185_ADDR_HI
+#define PCIEMSIX_VECT185_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT185_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT185_MSG_DATA
+#define PCIEMSIX_VECT185_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT185_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT185_CONTROL
+#define PCIEMSIX_VECT185_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT185_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT186_ADDR_LO
+#define PCIEMSIX_VECT186_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT186_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT186_ADDR_HI
+#define PCIEMSIX_VECT186_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT186_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT186_MSG_DATA
+#define PCIEMSIX_VECT186_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT186_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT186_CONTROL
+#define PCIEMSIX_VECT186_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT186_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT187_ADDR_LO
+#define PCIEMSIX_VECT187_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT187_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT187_ADDR_HI
+#define PCIEMSIX_VECT187_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT187_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT187_MSG_DATA
+#define PCIEMSIX_VECT187_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT187_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT187_CONTROL
+#define PCIEMSIX_VECT187_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT187_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT188_ADDR_LO
+#define PCIEMSIX_VECT188_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT188_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT188_ADDR_HI
+#define PCIEMSIX_VECT188_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT188_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT188_MSG_DATA
+#define PCIEMSIX_VECT188_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT188_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT188_CONTROL
+#define PCIEMSIX_VECT188_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT188_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT189_ADDR_LO
+#define PCIEMSIX_VECT189_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT189_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT189_ADDR_HI
+#define PCIEMSIX_VECT189_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT189_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT189_MSG_DATA
+#define PCIEMSIX_VECT189_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT189_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT189_CONTROL
+#define PCIEMSIX_VECT189_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT189_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT190_ADDR_LO
+#define PCIEMSIX_VECT190_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT190_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT190_ADDR_HI
+#define PCIEMSIX_VECT190_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT190_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT190_MSG_DATA
+#define PCIEMSIX_VECT190_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT190_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT190_CONTROL
+#define PCIEMSIX_VECT190_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT190_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT191_ADDR_LO
+#define PCIEMSIX_VECT191_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT191_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT191_ADDR_HI
+#define PCIEMSIX_VECT191_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT191_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT191_MSG_DATA
+#define PCIEMSIX_VECT191_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT191_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT191_CONTROL
+#define PCIEMSIX_VECT191_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT191_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT192_ADDR_LO
+#define PCIEMSIX_VECT192_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT192_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT192_ADDR_HI
+#define PCIEMSIX_VECT192_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT192_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT192_MSG_DATA
+#define PCIEMSIX_VECT192_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT192_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT192_CONTROL
+#define PCIEMSIX_VECT192_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT192_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT193_ADDR_LO
+#define PCIEMSIX_VECT193_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT193_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT193_ADDR_HI
+#define PCIEMSIX_VECT193_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT193_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT193_MSG_DATA
+#define PCIEMSIX_VECT193_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT193_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT193_CONTROL
+#define PCIEMSIX_VECT193_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT193_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT194_ADDR_LO
+#define PCIEMSIX_VECT194_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT194_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT194_ADDR_HI
+#define PCIEMSIX_VECT194_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT194_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT194_MSG_DATA
+#define PCIEMSIX_VECT194_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT194_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT194_CONTROL
+#define PCIEMSIX_VECT194_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT194_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT195_ADDR_LO
+#define PCIEMSIX_VECT195_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT195_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT195_ADDR_HI
+#define PCIEMSIX_VECT195_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT195_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT195_MSG_DATA
+#define PCIEMSIX_VECT195_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT195_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT195_CONTROL
+#define PCIEMSIX_VECT195_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT195_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT196_ADDR_LO
+#define PCIEMSIX_VECT196_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT196_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT196_ADDR_HI
+#define PCIEMSIX_VECT196_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT196_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT196_MSG_DATA
+#define PCIEMSIX_VECT196_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT196_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT196_CONTROL
+#define PCIEMSIX_VECT196_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT196_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT197_ADDR_LO
+#define PCIEMSIX_VECT197_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT197_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT197_ADDR_HI
+#define PCIEMSIX_VECT197_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT197_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT197_MSG_DATA
+#define PCIEMSIX_VECT197_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT197_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT197_CONTROL
+#define PCIEMSIX_VECT197_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT197_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT198_ADDR_LO
+#define PCIEMSIX_VECT198_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT198_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT198_ADDR_HI
+#define PCIEMSIX_VECT198_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT198_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT198_MSG_DATA
+#define PCIEMSIX_VECT198_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT198_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT198_CONTROL
+#define PCIEMSIX_VECT198_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT198_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT199_ADDR_LO
+#define PCIEMSIX_VECT199_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT199_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT199_ADDR_HI
+#define PCIEMSIX_VECT199_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT199_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT199_MSG_DATA
+#define PCIEMSIX_VECT199_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT199_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT199_CONTROL
+#define PCIEMSIX_VECT199_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT199_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT200_ADDR_LO
+#define PCIEMSIX_VECT200_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT200_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT200_ADDR_HI
+#define PCIEMSIX_VECT200_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT200_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT200_MSG_DATA
+#define PCIEMSIX_VECT200_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT200_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT200_CONTROL
+#define PCIEMSIX_VECT200_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT200_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT201_ADDR_LO
+#define PCIEMSIX_VECT201_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT201_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT201_ADDR_HI
+#define PCIEMSIX_VECT201_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT201_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT201_MSG_DATA
+#define PCIEMSIX_VECT201_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT201_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT201_CONTROL
+#define PCIEMSIX_VECT201_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT201_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT202_ADDR_LO
+#define PCIEMSIX_VECT202_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT202_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT202_ADDR_HI
+#define PCIEMSIX_VECT202_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT202_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT202_MSG_DATA
+#define PCIEMSIX_VECT202_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT202_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT202_CONTROL
+#define PCIEMSIX_VECT202_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT202_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT203_ADDR_LO
+#define PCIEMSIX_VECT203_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT203_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT203_ADDR_HI
+#define PCIEMSIX_VECT203_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT203_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT203_MSG_DATA
+#define PCIEMSIX_VECT203_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT203_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT203_CONTROL
+#define PCIEMSIX_VECT203_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT203_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT204_ADDR_LO
+#define PCIEMSIX_VECT204_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT204_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT204_ADDR_HI
+#define PCIEMSIX_VECT204_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT204_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT204_MSG_DATA
+#define PCIEMSIX_VECT204_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT204_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT204_CONTROL
+#define PCIEMSIX_VECT204_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT204_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT205_ADDR_LO
+#define PCIEMSIX_VECT205_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT205_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT205_ADDR_HI
+#define PCIEMSIX_VECT205_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT205_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT205_MSG_DATA
+#define PCIEMSIX_VECT205_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT205_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT205_CONTROL
+#define PCIEMSIX_VECT205_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT205_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT206_ADDR_LO
+#define PCIEMSIX_VECT206_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT206_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT206_ADDR_HI
+#define PCIEMSIX_VECT206_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT206_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT206_MSG_DATA
+#define PCIEMSIX_VECT206_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT206_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT206_CONTROL
+#define PCIEMSIX_VECT206_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT206_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT207_ADDR_LO
+#define PCIEMSIX_VECT207_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT207_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT207_ADDR_HI
+#define PCIEMSIX_VECT207_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT207_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT207_MSG_DATA
+#define PCIEMSIX_VECT207_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT207_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT207_CONTROL
+#define PCIEMSIX_VECT207_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT207_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT208_ADDR_LO
+#define PCIEMSIX_VECT208_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT208_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT208_ADDR_HI
+#define PCIEMSIX_VECT208_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT208_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT208_MSG_DATA
+#define PCIEMSIX_VECT208_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT208_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT208_CONTROL
+#define PCIEMSIX_VECT208_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT208_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT209_ADDR_LO
+#define PCIEMSIX_VECT209_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT209_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT209_ADDR_HI
+#define PCIEMSIX_VECT209_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT209_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT209_MSG_DATA
+#define PCIEMSIX_VECT209_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT209_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT209_CONTROL
+#define PCIEMSIX_VECT209_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT209_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT210_ADDR_LO
+#define PCIEMSIX_VECT210_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT210_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT210_ADDR_HI
+#define PCIEMSIX_VECT210_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT210_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT210_MSG_DATA
+#define PCIEMSIX_VECT210_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT210_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT210_CONTROL
+#define PCIEMSIX_VECT210_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT210_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT211_ADDR_LO
+#define PCIEMSIX_VECT211_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT211_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT211_ADDR_HI
+#define PCIEMSIX_VECT211_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT211_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT211_MSG_DATA
+#define PCIEMSIX_VECT211_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT211_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT211_CONTROL
+#define PCIEMSIX_VECT211_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT211_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT212_ADDR_LO
+#define PCIEMSIX_VECT212_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT212_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT212_ADDR_HI
+#define PCIEMSIX_VECT212_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT212_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT212_MSG_DATA
+#define PCIEMSIX_VECT212_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT212_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT212_CONTROL
+#define PCIEMSIX_VECT212_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT212_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT213_ADDR_LO
+#define PCIEMSIX_VECT213_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT213_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT213_ADDR_HI
+#define PCIEMSIX_VECT213_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT213_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT213_MSG_DATA
+#define PCIEMSIX_VECT213_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT213_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT213_CONTROL
+#define PCIEMSIX_VECT213_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT213_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT214_ADDR_LO
+#define PCIEMSIX_VECT214_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT214_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT214_ADDR_HI
+#define PCIEMSIX_VECT214_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT214_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT214_MSG_DATA
+#define PCIEMSIX_VECT214_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT214_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT214_CONTROL
+#define PCIEMSIX_VECT214_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT214_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT215_ADDR_LO
+#define PCIEMSIX_VECT215_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT215_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT215_ADDR_HI
+#define PCIEMSIX_VECT215_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT215_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT215_MSG_DATA
+#define PCIEMSIX_VECT215_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT215_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT215_CONTROL
+#define PCIEMSIX_VECT215_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT215_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT216_ADDR_LO
+#define PCIEMSIX_VECT216_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT216_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT216_ADDR_HI
+#define PCIEMSIX_VECT216_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT216_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT216_MSG_DATA
+#define PCIEMSIX_VECT216_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT216_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT216_CONTROL
+#define PCIEMSIX_VECT216_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT216_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT217_ADDR_LO
+#define PCIEMSIX_VECT217_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT217_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT217_ADDR_HI
+#define PCIEMSIX_VECT217_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT217_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT217_MSG_DATA
+#define PCIEMSIX_VECT217_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT217_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT217_CONTROL
+#define PCIEMSIX_VECT217_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT217_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT218_ADDR_LO
+#define PCIEMSIX_VECT218_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT218_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT218_ADDR_HI
+#define PCIEMSIX_VECT218_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT218_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT218_MSG_DATA
+#define PCIEMSIX_VECT218_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT218_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT218_CONTROL
+#define PCIEMSIX_VECT218_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT218_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT219_ADDR_LO
+#define PCIEMSIX_VECT219_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT219_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT219_ADDR_HI
+#define PCIEMSIX_VECT219_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT219_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT219_MSG_DATA
+#define PCIEMSIX_VECT219_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT219_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT219_CONTROL
+#define PCIEMSIX_VECT219_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT219_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT220_ADDR_LO
+#define PCIEMSIX_VECT220_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT220_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT220_ADDR_HI
+#define PCIEMSIX_VECT220_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT220_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT220_MSG_DATA
+#define PCIEMSIX_VECT220_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT220_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT220_CONTROL
+#define PCIEMSIX_VECT220_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT220_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT221_ADDR_LO
+#define PCIEMSIX_VECT221_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT221_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT221_ADDR_HI
+#define PCIEMSIX_VECT221_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT221_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT221_MSG_DATA
+#define PCIEMSIX_VECT221_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT221_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT221_CONTROL
+#define PCIEMSIX_VECT221_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT221_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT222_ADDR_LO
+#define PCIEMSIX_VECT222_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT222_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT222_ADDR_HI
+#define PCIEMSIX_VECT222_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT222_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT222_MSG_DATA
+#define PCIEMSIX_VECT222_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT222_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT222_CONTROL
+#define PCIEMSIX_VECT222_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT222_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT223_ADDR_LO
+#define PCIEMSIX_VECT223_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT223_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT223_ADDR_HI
+#define PCIEMSIX_VECT223_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT223_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT223_MSG_DATA
+#define PCIEMSIX_VECT223_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT223_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT223_CONTROL
+#define PCIEMSIX_VECT223_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT223_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT224_ADDR_LO
+#define PCIEMSIX_VECT224_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT224_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT224_ADDR_HI
+#define PCIEMSIX_VECT224_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT224_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT224_MSG_DATA
+#define PCIEMSIX_VECT224_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT224_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT224_CONTROL
+#define PCIEMSIX_VECT224_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT224_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT225_ADDR_LO
+#define PCIEMSIX_VECT225_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT225_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT225_ADDR_HI
+#define PCIEMSIX_VECT225_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT225_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT225_MSG_DATA
+#define PCIEMSIX_VECT225_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT225_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT225_CONTROL
+#define PCIEMSIX_VECT225_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT225_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT226_ADDR_LO
+#define PCIEMSIX_VECT226_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT226_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT226_ADDR_HI
+#define PCIEMSIX_VECT226_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT226_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT226_MSG_DATA
+#define PCIEMSIX_VECT226_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT226_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT226_CONTROL
+#define PCIEMSIX_VECT226_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT226_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT227_ADDR_LO
+#define PCIEMSIX_VECT227_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT227_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT227_ADDR_HI
+#define PCIEMSIX_VECT227_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT227_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT227_MSG_DATA
+#define PCIEMSIX_VECT227_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT227_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT227_CONTROL
+#define PCIEMSIX_VECT227_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT227_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT228_ADDR_LO
+#define PCIEMSIX_VECT228_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT228_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT228_ADDR_HI
+#define PCIEMSIX_VECT228_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT228_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT228_MSG_DATA
+#define PCIEMSIX_VECT228_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT228_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT228_CONTROL
+#define PCIEMSIX_VECT228_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT228_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT229_ADDR_LO
+#define PCIEMSIX_VECT229_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT229_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT229_ADDR_HI
+#define PCIEMSIX_VECT229_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT229_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT229_MSG_DATA
+#define PCIEMSIX_VECT229_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT229_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT229_CONTROL
+#define PCIEMSIX_VECT229_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT229_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT230_ADDR_LO
+#define PCIEMSIX_VECT230_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT230_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT230_ADDR_HI
+#define PCIEMSIX_VECT230_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT230_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT230_MSG_DATA
+#define PCIEMSIX_VECT230_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT230_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT230_CONTROL
+#define PCIEMSIX_VECT230_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT230_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT231_ADDR_LO
+#define PCIEMSIX_VECT231_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT231_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT231_ADDR_HI
+#define PCIEMSIX_VECT231_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT231_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT231_MSG_DATA
+#define PCIEMSIX_VECT231_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT231_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT231_CONTROL
+#define PCIEMSIX_VECT231_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT231_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT232_ADDR_LO
+#define PCIEMSIX_VECT232_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT232_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT232_ADDR_HI
+#define PCIEMSIX_VECT232_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT232_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT232_MSG_DATA
+#define PCIEMSIX_VECT232_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT232_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT232_CONTROL
+#define PCIEMSIX_VECT232_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT232_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT233_ADDR_LO
+#define PCIEMSIX_VECT233_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT233_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT233_ADDR_HI
+#define PCIEMSIX_VECT233_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT233_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT233_MSG_DATA
+#define PCIEMSIX_VECT233_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT233_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT233_CONTROL
+#define PCIEMSIX_VECT233_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT233_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT234_ADDR_LO
+#define PCIEMSIX_VECT234_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT234_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT234_ADDR_HI
+#define PCIEMSIX_VECT234_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT234_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT234_MSG_DATA
+#define PCIEMSIX_VECT234_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT234_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT234_CONTROL
+#define PCIEMSIX_VECT234_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT234_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT235_ADDR_LO
+#define PCIEMSIX_VECT235_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT235_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT235_ADDR_HI
+#define PCIEMSIX_VECT235_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT235_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT235_MSG_DATA
+#define PCIEMSIX_VECT235_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT235_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT235_CONTROL
+#define PCIEMSIX_VECT235_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT235_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT236_ADDR_LO
+#define PCIEMSIX_VECT236_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT236_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT236_ADDR_HI
+#define PCIEMSIX_VECT236_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT236_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT236_MSG_DATA
+#define PCIEMSIX_VECT236_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT236_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT236_CONTROL
+#define PCIEMSIX_VECT236_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT236_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT237_ADDR_LO
+#define PCIEMSIX_VECT237_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT237_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT237_ADDR_HI
+#define PCIEMSIX_VECT237_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT237_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT237_MSG_DATA
+#define PCIEMSIX_VECT237_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT237_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT237_CONTROL
+#define PCIEMSIX_VECT237_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT237_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT238_ADDR_LO
+#define PCIEMSIX_VECT238_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT238_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT238_ADDR_HI
+#define PCIEMSIX_VECT238_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT238_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT238_MSG_DATA
+#define PCIEMSIX_VECT238_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT238_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT238_CONTROL
+#define PCIEMSIX_VECT238_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT238_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT239_ADDR_LO
+#define PCIEMSIX_VECT239_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT239_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT239_ADDR_HI
+#define PCIEMSIX_VECT239_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT239_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT239_MSG_DATA
+#define PCIEMSIX_VECT239_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT239_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT239_CONTROL
+#define PCIEMSIX_VECT239_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT239_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT240_ADDR_LO
+#define PCIEMSIX_VECT240_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT240_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT240_ADDR_HI
+#define PCIEMSIX_VECT240_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT240_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT240_MSG_DATA
+#define PCIEMSIX_VECT240_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT240_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT240_CONTROL
+#define PCIEMSIX_VECT240_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT240_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT241_ADDR_LO
+#define PCIEMSIX_VECT241_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT241_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT241_ADDR_HI
+#define PCIEMSIX_VECT241_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT241_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT241_MSG_DATA
+#define PCIEMSIX_VECT241_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT241_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT241_CONTROL
+#define PCIEMSIX_VECT241_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT241_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT242_ADDR_LO
+#define PCIEMSIX_VECT242_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT242_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT242_ADDR_HI
+#define PCIEMSIX_VECT242_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT242_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT242_MSG_DATA
+#define PCIEMSIX_VECT242_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT242_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT242_CONTROL
+#define PCIEMSIX_VECT242_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT242_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT243_ADDR_LO
+#define PCIEMSIX_VECT243_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT243_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT243_ADDR_HI
+#define PCIEMSIX_VECT243_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT243_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT243_MSG_DATA
+#define PCIEMSIX_VECT243_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT243_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT243_CONTROL
+#define PCIEMSIX_VECT243_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT243_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT244_ADDR_LO
+#define PCIEMSIX_VECT244_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT244_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT244_ADDR_HI
+#define PCIEMSIX_VECT244_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT244_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT244_MSG_DATA
+#define PCIEMSIX_VECT244_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT244_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT244_CONTROL
+#define PCIEMSIX_VECT244_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT244_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT245_ADDR_LO
+#define PCIEMSIX_VECT245_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT245_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT245_ADDR_HI
+#define PCIEMSIX_VECT245_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT245_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT245_MSG_DATA
+#define PCIEMSIX_VECT245_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT245_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT245_CONTROL
+#define PCIEMSIX_VECT245_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT245_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT246_ADDR_LO
+#define PCIEMSIX_VECT246_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT246_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT246_ADDR_HI
+#define PCIEMSIX_VECT246_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT246_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT246_MSG_DATA
+#define PCIEMSIX_VECT246_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT246_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT246_CONTROL
+#define PCIEMSIX_VECT246_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT246_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT247_ADDR_LO
+#define PCIEMSIX_VECT247_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT247_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT247_ADDR_HI
+#define PCIEMSIX_VECT247_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT247_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT247_MSG_DATA
+#define PCIEMSIX_VECT247_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT247_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT247_CONTROL
+#define PCIEMSIX_VECT247_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT247_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT248_ADDR_LO
+#define PCIEMSIX_VECT248_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT248_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT248_ADDR_HI
+#define PCIEMSIX_VECT248_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT248_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT248_MSG_DATA
+#define PCIEMSIX_VECT248_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT248_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT248_CONTROL
+#define PCIEMSIX_VECT248_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT248_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT249_ADDR_LO
+#define PCIEMSIX_VECT249_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT249_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT249_ADDR_HI
+#define PCIEMSIX_VECT249_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT249_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT249_MSG_DATA
+#define PCIEMSIX_VECT249_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT249_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT249_CONTROL
+#define PCIEMSIX_VECT249_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT249_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT250_ADDR_LO
+#define PCIEMSIX_VECT250_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT250_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT250_ADDR_HI
+#define PCIEMSIX_VECT250_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT250_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT250_MSG_DATA
+#define PCIEMSIX_VECT250_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT250_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT250_CONTROL
+#define PCIEMSIX_VECT250_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT250_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT251_ADDR_LO
+#define PCIEMSIX_VECT251_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT251_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT251_ADDR_HI
+#define PCIEMSIX_VECT251_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT251_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT251_MSG_DATA
+#define PCIEMSIX_VECT251_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT251_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT251_CONTROL
+#define PCIEMSIX_VECT251_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT251_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT252_ADDR_LO
+#define PCIEMSIX_VECT252_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT252_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT252_ADDR_HI
+#define PCIEMSIX_VECT252_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT252_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT252_MSG_DATA
+#define PCIEMSIX_VECT252_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT252_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT252_CONTROL
+#define PCIEMSIX_VECT252_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT252_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT253_ADDR_LO
+#define PCIEMSIX_VECT253_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT253_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT253_ADDR_HI
+#define PCIEMSIX_VECT253_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT253_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT253_MSG_DATA
+#define PCIEMSIX_VECT253_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT253_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT253_CONTROL
+#define PCIEMSIX_VECT253_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT253_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT254_ADDR_LO
+#define PCIEMSIX_VECT254_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT254_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT254_ADDR_HI
+#define PCIEMSIX_VECT254_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT254_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT254_MSG_DATA
+#define PCIEMSIX_VECT254_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT254_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT254_CONTROL
+#define PCIEMSIX_VECT254_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT254_CONTROL__MASK_BIT_MASK 0x00000001L
+//PCIEMSIX_VECT255_ADDR_LO
+#define PCIEMSIX_VECT255_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define PCIEMSIX_VECT255_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIEMSIX_VECT255_ADDR_HI
+#define PCIEMSIX_VECT255_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define PCIEMSIX_VECT255_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT255_MSG_DATA
+#define PCIEMSIX_VECT255_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define PCIEMSIX_VECT255_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//PCIEMSIX_VECT255_CONTROL
+#define PCIEMSIX_VECT255_CONTROL__MASK_BIT__SHIFT 0x0
+#define PCIEMSIX_VECT255_CONTROL__MASK_BIT_MASK 0x00000001L
+
+
+// addressBlock: nbif_rcc_pfc_usb_RCCPFCDEC
+//RCC_PFC_USB_RCC_PFC_LTR_CNTL
+#define RCC_PFC_USB_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE__SHIFT 0x0
+#define RCC_PFC_USB_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE__SHIFT 0xa
+#define RCC_PFC_USB_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT__SHIFT 0xf
+#define RCC_PFC_USB_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE__SHIFT 0x10
+#define RCC_PFC_USB_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE__SHIFT 0x1a
+#define RCC_PFC_USB_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT__SHIFT 0x1f
+#define RCC_PFC_USB_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE_MASK 0x000003FFL
+#define RCC_PFC_USB_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE_MASK 0x00001C00L
+#define RCC_PFC_USB_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT_MASK 0x00008000L
+#define RCC_PFC_USB_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE_MASK 0x03FF0000L
+#define RCC_PFC_USB_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE_MASK 0x1C000000L
+#define RCC_PFC_USB_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT_MASK 0x80000000L
+//RCC_PFC_USB_RCC_PFC_PME_RESTORE
+#define RCC_PFC_USB_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN__SHIFT 0x0
+#define RCC_PFC_USB_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS__SHIFT 0x8
+#define RCC_PFC_USB_RCC_PFC_PME_RESTORE__PME_SENT_FLAG__SHIFT 0x9
+#define RCC_PFC_USB_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN_MASK 0x00000001L
+#define RCC_PFC_USB_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS_MASK 0x00000100L
+#define RCC_PFC_USB_RCC_PFC_PME_RESTORE__PME_SENT_FLAG_MASK 0x00000200L
+//RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS__SHIFT 0x0
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS__SHIFT 0x1
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS__SHIFT 0x2
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS__SHIFT 0x3
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS__SHIFT 0x4
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS__SHIFT 0x5
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS__SHIFT 0x6
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0x7
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS_MASK 0x00000001L
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS_MASK 0x00000002L
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS_MASK 0x00000004L
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS_MASK 0x00000008L
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS_MASK 0x00000010L
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS_MASK 0x00000020L
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS_MASK 0x00000040L
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00000080L
+//RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_1
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0__SHIFT 0x0
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0_MASK 0xFFFFFFFFL
+//RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_2
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1__SHIFT 0x0
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1_MASK 0xFFFFFFFFL
+//RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_3
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2__SHIFT 0x0
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2_MASK 0xFFFFFFFFL
+//RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_4
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3__SHIFT 0x0
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3_MASK 0xFFFFFFFFL
+//RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_5
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX__SHIFT 0x0
+#define RCC_PFC_USB_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX_MASK 0xFFFFFFFFL
+//RCC_PFC_USB_RCC_PFC_AUXPWR_CNTL
+#define RCC_PFC_USB_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE__SHIFT 0x0
+#define RCC_PFC_USB_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE__SHIFT 0x3
+#define RCC_PFC_USB_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE_MASK 0x00000007L
+#define RCC_PFC_USB_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE_MASK 0x00000008L
+
+
+// addressBlock: nbif_rcc_pfc_pd_controller_RCCPFCDEC
+//RCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE__SHIFT 0x0
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE__SHIFT 0xa
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT__SHIFT 0xf
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE__SHIFT 0x10
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE__SHIFT 0x1a
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT__SHIFT 0x1f
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE_MASK 0x000003FFL
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE_MASK 0x00001C00L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT_MASK 0x00008000L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE_MASK 0x03FF0000L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE_MASK 0x1C000000L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT_MASK 0x80000000L
+//RCC_PFC_PD_CONTROLLER_RCC_PFC_PME_RESTORE
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN__SHIFT 0x0
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS__SHIFT 0x8
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_PME_RESTORE__PME_SENT_FLAG__SHIFT 0x9
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN_MASK 0x00000001L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS_MASK 0x00000100L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_PME_RESTORE__PME_SENT_FLAG_MASK 0x00000200L
+//RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS__SHIFT 0x0
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS__SHIFT 0x1
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS__SHIFT 0x2
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS__SHIFT 0x3
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS__SHIFT 0x4
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS__SHIFT 0x5
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS__SHIFT 0x6
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0x7
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS_MASK 0x00000001L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS_MASK 0x00000002L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS_MASK 0x00000004L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS_MASK 0x00000008L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS_MASK 0x00000010L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS_MASK 0x00000020L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS_MASK 0x00000040L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00000080L
+//RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_1
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0__SHIFT 0x0
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0_MASK 0xFFFFFFFFL
+//RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_2
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1__SHIFT 0x0
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1_MASK 0xFFFFFFFFL
+//RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_3
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2__SHIFT 0x0
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2_MASK 0xFFFFFFFFL
+//RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_4
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3__SHIFT 0x0
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3_MASK 0xFFFFFFFFL
+//RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_5
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX__SHIFT 0x0
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX_MASK 0xFFFFFFFFL
+//RCC_PFC_PD_CONTROLLER_RCC_PFC_AUXPWR_CNTL
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE__SHIFT 0x0
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE__SHIFT 0x3
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE_MASK 0x00000007L
+#define RCC_PFC_PD_CONTROLLER_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE_MASK 0x00000008L
+
+
+// addressBlock: nbif_pciemsix_0_usb_MSIXPDEC
+//PCIEMSIX_PBA_0
+#define PCIEMSIX_PBA_0__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_0__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_1
+#define PCIEMSIX_PBA_1__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_1__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_2
+#define PCIEMSIX_PBA_2__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_2__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_3
+#define PCIEMSIX_PBA_3__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_3__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_4
+#define PCIEMSIX_PBA_4__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_4__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_5
+#define PCIEMSIX_PBA_5__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_5__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_6
+#define PCIEMSIX_PBA_6__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_6__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+//PCIEMSIX_PBA_7
+#define PCIEMSIX_PBA_7__MSIX_PENDING_BITS__SHIFT 0x0
+#define PCIEMSIX_PBA_7__MSIX_PENDING_BITS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_shadow_reg_shadowdec
+//SHADOW_COMMAND
+#define SHADOW_COMMAND__IOEN_UP__SHIFT 0x0
+#define SHADOW_COMMAND__MEMEN_UP__SHIFT 0x1
+#define SHADOW_COMMAND__IOEN_UP_MASK 0x0001L
+#define SHADOW_COMMAND__MEMEN_UP_MASK 0x0002L
+//SHADOW_BASE_ADDR_1
+#define SHADOW_BASE_ADDR_1__BAR1_UP__SHIFT 0x0
+#define SHADOW_BASE_ADDR_1__BAR1_UP_MASK 0xFFFFFFFFL
+//SHADOW_BASE_ADDR_2
+#define SHADOW_BASE_ADDR_2__BAR2_UP__SHIFT 0x0
+#define SHADOW_BASE_ADDR_2__BAR2_UP_MASK 0xFFFFFFFFL
+//SHADOW_IRQ_BRIDGE_CNTL
+#define SHADOW_IRQ_BRIDGE_CNTL__ISA_EN_UP__SHIFT 0x2
+#define SHADOW_IRQ_BRIDGE_CNTL__VGA_EN_UP__SHIFT 0x3
+#define SHADOW_IRQ_BRIDGE_CNTL__VGA_DEC_UP__SHIFT 0x4
+#define SHADOW_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_UP__SHIFT 0x6
+#define SHADOW_IRQ_BRIDGE_CNTL__ISA_EN_UP_MASK 0x0004L
+#define SHADOW_IRQ_BRIDGE_CNTL__VGA_EN_UP_MASK 0x0008L
+#define SHADOW_IRQ_BRIDGE_CNTL__VGA_DEC_UP_MASK 0x0010L
+#define SHADOW_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_UP_MASK 0x0040L
+//SUC_INDEX
+#define SUC_INDEX__SUC_INDEX__SHIFT 0x0
+#define SUC_INDEX__SUC_INDEX_MASK 0xFFFFFFFFL
+//SUC_DATA
+#define SUC_DATA__SUC_DATA__SHIFT 0x0
+#define SUC_DATA__SUC_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_bif_swus_SUMDEC
+//SUM_INDEX
+#define SUM_INDEX__SUM_INDEX__SHIFT 0x0
+#define SUM_INDEX__SUM_INDEX_MASK 0xFFFFFFFFL
+//SUM_DATA
+#define SUM_DATA__SUM_DATA__SHIFT 0x0
+#define SUM_DATA__SUM_DATA_MASK 0xFFFFFFFFL
+//SUM_INDEX_HI
+#define SUM_INDEX_HI__SUM_INDEX_HI__SHIFT 0x0
+#define SUM_INDEX_HI__SUM_INDEX_HI_MASK 0x000000FFL
+
+
+// addressBlock: nbif_rcc_strap_rcc_strap_internal
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_DEVICE_ID_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_ARI_EN_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_ACS_EN_DN_DEV0__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_AER_EN_DN_DEV0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_CPL_ABORT_ERR_EN_DN_DEV0__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_NEGO_GLOBAL_EN_DEV0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_INTERRUPT_PIN_DN_DEV0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_IGNORE_E2E_PREFIX_UR_DN_DEV0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_MAX_PAYLOAD_SUPPORT_DN_DEV0__SHIFT 0x19
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_MAX_LINK_WIDTH_SUPPORT_DEV0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_EPF0_DUMMY_EN_DEV0__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_DEVICE_ID_DN_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_ARI_EN_DN_DEV0_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_ACS_EN_DN_DEV0_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_AER_EN_DN_DEV0_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_CPL_ABORT_ERR_EN_DN_DEV0_MASK 0x00080000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_NEGO_GLOBAL_EN_DEV0_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_INTERRUPT_PIN_DN_DEV0_MASK 0x00E00000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_IGNORE_E2E_PREFIX_UR_DN_DEV0_MASK 0x01000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_MAX_PAYLOAD_SUPPORT_DN_DEV0_MASK 0x0E000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_MAX_LINK_WIDTH_SUPPORT_DEV0_MASK 0x70000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP0__STRAP_EPF0_DUMMY_EN_DEV0_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP1
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_ID_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_VEN_ID_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_ID_DN_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_VEN_ID_DN_DEV0_MASK 0xFFFF0000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP2
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_DE_EMPHASIS_SEL_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_DSN_EN_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_E2E_PREFIX_EN_DEV0__SHIFT 0x2
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ECN1P1_EN_DEV0__SHIFT 0x3
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_CHECK_EN_DEV0__SHIFT 0x4
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_GEN_EN_DEV0__SHIFT 0x5
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ERR_REPORTING_DIS_DEV0__SHIFT 0x6
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_FMT_SUPPORTED_DEV0__SHIFT 0x7
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_TAG_ECN_EN_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_EXT_VC_COUNT_DN_DEV0__SHIFT 0x9
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_FIRST_RCVD_ERR_LOG_DN_DEV0__SHIFT 0xc
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_POISONED_ADVISORY_NONFATAL_DN_DEV0__SHIFT 0xd
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_COMPLIANCE_DEV0__SHIFT 0xe
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_EN_DEV0__SHIFT 0xf
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN3_COMPLIANCE_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN4_COMPLIANCE_DEV0__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L0S_ACCEPTABLE_LATENCY_DEV0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L0S_EXIT_LATENCY_DEV0__SHIFT 0x17
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L1_ACCEPTABLE_LATENCY_DEV0__SHIFT 0x1a
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L1_EXIT_LATENCY_DEV0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_DE_EMPHASIS_SEL_DN_DEV0_MASK 0x00000001L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_DSN_EN_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_E2E_PREFIX_EN_DEV0_MASK 0x00000004L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ECN1P1_EN_DEV0_MASK 0x00000008L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_CHECK_EN_DEV0_MASK 0x00000010L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_GEN_EN_DEV0_MASK 0x00000020L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_ERR_REPORTING_DIS_DEV0_MASK 0x00000040L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_FMT_SUPPORTED_DEV0_MASK 0x00000080L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_TAG_ECN_EN_DEV0_MASK 0x00000100L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_EXT_VC_COUNT_DN_DEV0_MASK 0x00000E00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_FIRST_RCVD_ERR_LOG_DN_DEV0_MASK 0x00001000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_POISONED_ADVISORY_NONFATAL_DN_DEV0_MASK 0x00002000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_COMPLIANCE_DEV0_MASK 0x00004000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_EN_DEV0_MASK 0x00008000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN3_COMPLIANCE_DEV0_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_GEN4_COMPLIANCE_DEV0_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L0S_ACCEPTABLE_LATENCY_DEV0_MASK 0x00700000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L0S_EXIT_LATENCY_DEV0_MASK 0x03800000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L1_ACCEPTABLE_LATENCY_DEV0_MASK 0x1C000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP2__STRAP_L1_EXIT_LATENCY_DEV0_MASK 0xE0000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP3
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_LINK_BW_NOTIFICATION_CAP_DN_EN_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DEV0__SHIFT 0x1
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DN_DEV0__SHIFT 0x2
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_MAX_PAYLOAD_SUPPORT_DEV0__SHIFT 0x3
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_MSI_EN_DN_DEV0__SHIFT 0x6
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_MSTCPL_TIMEOUT_EN_DEV0__SHIFT 0x7
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_NO_SOFT_RESET_DN_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_OBFF_SUPPORTED_DEV0__SHIFT 0x9
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_RX_PRESET_HINT_DEV0__SHIFT 0xb
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_TX_PRESET_DEV0__SHIFT 0xe
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_RX_PRESET_HINT_DEV0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_TX_PRESET_DEV0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DEV0__SHIFT 0x19
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DN_DEV0__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_ATOMIC_EN_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PMC_DSI_DN_DEV0__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_LINK_BW_NOTIFICATION_CAP_DN_EN_DEV0_MASK 0x00000001L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DEV0_MASK 0x00000002L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DN_DEV0_MASK 0x00000004L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_MAX_PAYLOAD_SUPPORT_DEV0_MASK 0x00000038L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_MSI_EN_DN_DEV0_MASK 0x00000040L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_MSTCPL_TIMEOUT_EN_DEV0_MASK 0x00000080L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_NO_SOFT_RESET_DN_DEV0_MASK 0x00000100L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_OBFF_SUPPORTED_DEV0_MASK 0x00000600L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_RX_PRESET_HINT_DEV0_MASK 0x00003800L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_TX_PRESET_DEV0_MASK 0x0003C000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_RX_PRESET_HINT_DEV0_MASK 0x001C0000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_TX_PRESET_DEV0_MASK 0x01E00000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DEV0_MASK 0x06000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DN_DEV0_MASK 0x18000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_ATOMIC_EN_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP3__STRAP_PMC_DSI_DN_DEV0_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP4
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_0_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_1_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_2_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_3_DEV0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_0_DEV0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_1_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_2_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_3_DEV0_MASK 0xFF000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP5
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_4_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_5_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_SYSTEM_ALLOCATED_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_64BIT_EN_DN_DEV0__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_ROUTING_EN_DEV0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_VC_EN_DN_DEV0__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DEV0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DN_DEV0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_LOCAL_DLF_SUPPORTED_DEV0__SHIFT 0x16
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_SOURCE_VALIDATION_DN_DEV0__SHIFT 0x17
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_TRANSLATION_BLOCKING_DN_DEV0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_REQUEST_REDIRECT_DN_DEV0__SHIFT 0x19
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_COMPLETION_REDIRECT_DN_DEV0__SHIFT 0x1a
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_UPSTREAM_FORWARDING_DN_DEV0__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_EGRESS_CONTROL_DN_DEV0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_DIRECT_TRANSLATED_P2P_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_MSI_MAP_EN_DEV0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_SSID_EN_DEV0__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_4_DEV0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_5_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_SYSTEM_ALLOCATED_DEV0_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_64BIT_EN_DN_DEV0_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_ROUTING_EN_DEV0_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_VC_EN_DN_DEV0_MASK 0x00080000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DEV0_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DN_DEV0_MASK 0x00200000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_LOCAL_DLF_SUPPORTED_DEV0_MASK 0x00400000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_SOURCE_VALIDATION_DN_DEV0_MASK 0x00800000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_TRANSLATION_BLOCKING_DN_DEV0_MASK 0x01000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_REQUEST_REDIRECT_DN_DEV0_MASK 0x02000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_COMPLETION_REDIRECT_DN_DEV0_MASK 0x04000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_UPSTREAM_FORWARDING_DN_DEV0_MASK 0x08000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_EGRESS_CONTROL_DN_DEV0_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_ACS_DIRECT_TRANSLATED_P2P_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_MSI_MAP_EN_DEV0_MASK 0x40000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP5__STRAP_SSID_EN_DEV0_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP6
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_CFG_CRS_EN_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_SMN_ERR_STATUS_MASK_EN_DNS_DEV0__SHIFT 0x1
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_INTERNAL_ERR_EN_DEV0__SHIFT 0x2
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_RTM1_PRESENCE_DET_SUPPORT_DEV0__SHIFT 0x3
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_RTM2_PRESENCE_DET_SUPPORT_DEV0__SHIFT 0x4
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_COMPLETER_SUPPORTED_DEV0__SHIFT 0x5
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_REQUESTER_SUPPORTED_DEV0__SHIFT 0x6
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_VF_10BIT_TAG_REQUESTER_SUPPORTED_DEV0__SHIFT 0x7
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0__SHIFT 0xc
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_TPH_CPLR_SUPPORTED_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_MSI_EXT_MSG_DATA_CAP_DN_DEV0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_NO_COMMAND_COMPLETED_SUPPORTED_DEV0__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_GEN5_COMPLIANCE_DEV0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_TARGET_LINK_SPEED_DEV0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_CFG_CRS_EN_DEV0_MASK 0x00000001L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_SMN_ERR_STATUS_MASK_EN_DNS_DEV0_MASK 0x00000002L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_INTERNAL_ERR_EN_DEV0_MASK 0x00000004L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_RTM1_PRESENCE_DET_SUPPORT_DEV0_MASK 0x00000008L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_RTM2_PRESENCE_DET_SUPPORT_DEV0_MASK 0x00000010L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_COMPLETER_SUPPORTED_DEV0_MASK 0x00000020L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_REQUESTER_SUPPORTED_DEV0_MASK 0x00000040L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_VF_10BIT_TAG_REQUESTER_SUPPORTED_DEV0_MASK 0x00000080L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0_MASK 0x00000F00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0_MASK 0x0000F000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_TPH_CPLR_SUPPORTED_DN_DEV0_MASK 0x00030000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_MSI_EXT_MSG_DATA_CAP_DN_DEV0_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_NO_COMMAND_COMPLETED_SUPPORTED_DEV0_MASK 0x00080000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_GEN5_COMPLIANCE_DEV0_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_TARGET_LINK_SPEED_DEV0_MASK 0x00E00000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0_MASK 0x0F000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0_MASK 0xF0000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP7
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_PORT_NUMBER_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_MAJOR_REV_ID_DN_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_MINOR_REV_ID_DN_DEV0__SHIFT 0xc
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_RP_BUSNUM_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_DN_DEVNUM_DEV0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_DN_FUNCID_DEV0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_PORT_NUMBER_DEV0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_MAJOR_REV_ID_DN_DEV0_MASK 0x00000F00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_MINOR_REV_ID_DN_DEV0_MASK 0x0000F000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_RP_BUSNUM_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_DN_DEVNUM_DEV0_MASK 0x1F000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP7__STRAP_DN_FUNCID_DEV0_MASK 0xE0000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_6_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_7_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_8_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_9_DEV0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_6_DEV0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_7_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_8_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_9_DEV0_MASK 0xFF000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP9
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_a_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_b_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP9__STRAP_VENDOR_ID_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_a_DEV0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_b_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP9__STRAP_VENDOR_ID_DN_DEV0_MASK 0xFFFF0000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE1_SUPPORTED_DEV0__SHIFT 0x2
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE2_SUPPORTED_DEV0__SHIFT 0x3
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODEING_ON_DEV0__SHIFT 0x4
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODE_REQUEST_DEV0__SHIFT 0x5
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_MODIFIED_TS_INFOR1_DEV0__SHIFT 0x6
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DEV0_MASK 0x00000001L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE1_SUPPORTED_DEV0_MASK 0x00000004L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE2_SUPPORTED_DEV0_MASK 0x00000008L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODEING_ON_DEV0_MASK 0x00000010L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODE_REQUEST_DEV0_MASK 0x00000020L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP10__STRAP_MODIFIED_TS_INFOR1_DEV0_MASK 0x0007FFC0L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP11
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_MODIFIED_TS_VENDOR_ID_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_RTR_RESET_TIME_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_RTR_VALID_DN_DEV0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_RTR_EN_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_SDPVW_REG_UPDATE_EN_DEV0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_MODIFIED_TS_VENDOR_ID_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_RTR_RESET_TIME_DN_DEV0_MASK 0x0FFF0000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_RTR_VALID_DN_DEV0_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_RTR_EN_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP11__STRAP_SDPVW_REG_UPDATE_EN_DEV0_MASK 0x40000000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP12
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP12__STRAP_MODIFIED_TS_INFOR2_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP12__STRAP_MODIFIED_TS_INFOR2_DEV0_MASK 0x00FFFFFFL
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP13
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_COUNT_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_SELECTIVE_ENABLE_SUPPORTED_DEV0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_DETAILS_DEV0__SHIFT 0x9
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_RTR_D3HOTD0_TIME_DN_DEV0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_COUNT_DEV0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_SELECTIVE_ENABLE_SUPPORTED_DEV0_MASK 0x00000100L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_DETAILS_DEV0_MASK 0x000FFE00L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP13__STRAP_RTR_D3HOTD0_TIME_DN_DEV0_MASK 0xFFF00000L
+//RCC_STRAP1_RCC_DEV0_PORT_STRAP14
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_CTO_LOGGING_SUPPORT_DEV0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_ACS_ENH_CAPABILITY_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_COMMAND_COMPLETED_DEV0__SHIFT 0x2
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_ERR_COR_SUBCLASS_CAPABLE_DEV0__SHIFT 0x3
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_DOE_EN_UP_DEV0__SHIFT 0x4
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_CTO_LOGGING_SUPPORT_DEV0_MASK 0x00000001L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_ACS_ENH_CAPABILITY_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_COMMAND_COMPLETED_DEV0_MASK 0x00000004L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_ERR_COR_SUBCLASS_CAPABLE_DEV0_MASK 0x00000008L
+#define RCC_STRAP1_RCC_DEV0_PORT_STRAP14__STRAP_DOE_EN_UP_DEV0_MASK 0x00000010L
+//RCC_DEV1_PORT_STRAP0
+//RCC_DEV1_PORT_STRAP1
+//RCC_DEV1_PORT_STRAP2
+//RCC_DEV1_PORT_STRAP3
+//RCC_DEV1_PORT_STRAP4
+//RCC_DEV1_PORT_STRAP5
+//RCC_DEV1_PORT_STRAP6
+//RCC_DEV1_PORT_STRAP7
+//RCC_DEV1_PORT_STRAP8
+//RCC_DEV1_PORT_STRAP9
+//RCC_DEV1_PORT_STRAP10
+//RCC_DEV1_PORT_STRAP11
+//RCC_DEV1_PORT_STRAP12
+//RCC_DEV1_PORT_STRAP13
+//RCC_DEV1_PORT_STRAP14
+//RCC_DEV2_PORT_STRAP0
+//RCC_DEV2_PORT_STRAP1
+//RCC_DEV2_PORT_STRAP2
+//RCC_DEV2_PORT_STRAP3
+//RCC_DEV2_PORT_STRAP4
+//RCC_DEV2_PORT_STRAP5
+//RCC_DEV2_PORT_STRAP6
+//RCC_DEV2_PORT_STRAP7
+//RCC_DEV2_PORT_STRAP8
+//RCC_DEV2_PORT_STRAP9
+//RCC_DEV2_PORT_STRAP10
+//RCC_DEV2_PORT_STRAP11
+//RCC_DEV2_PORT_STRAP12
+//RCC_DEV2_PORT_STRAP13
+//RCC_DEV2_PORT_STRAP14
+//RCC_STRAP1_RCC_BIF_STRAP0
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_GEN4_DIS__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_EXPANSION_ROM_VALIDATION_SUPPORT__SHIFT 0x1
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_VGA_DIS_PIN__SHIFT 0x2
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_MEM_AP_SIZE_PIN__SHIFT 0x3
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_PX_CAPABLE__SHIFT 0x7
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN3__SHIFT 0x8
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_MSI_FIRST_BE_FULL_PAYLOAD_EN__SHIFT 0x9
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_NBIF_IGNORE_ERR_INFLR__SHIFT 0xa
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_PME_SUPPORT_COMPLIANCE_EN__SHIFT 0xb
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_EP_ERR__SHIFT 0xc
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MSG_ERR__SHIFT 0xd
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0xe
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0xf
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR__SHIFT 0x10
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_DN__SHIFT 0x11
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_AUD_PIN__SHIFT 0x12
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIOS_ROM_EN_PIN__SHIFT 0x14
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIF_GFX_IOBAR_DIS_AND_REGBAR_64BIT_EN__SHIFT 0x15
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_GPUIOV_EN__SHIFT 0x16
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_GEN3_DIS__SHIFT 0x18
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN4__SHIFT 0x19
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_QUICKSIM_START__SHIFT 0x1a
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_NO_RO_ENABLED_P2P_PASSING__SHIFT 0x1b
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_IGNORE_LOCAL_PREFIX_UR_SWUS__SHIFT 0x1c
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_CFG0_RD_VF_BUSNUM_CHK_EN__SHIFT 0x1d
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIGAPU_MODE__SHIFT 0x1e
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_LINK_DOWN_RESET_EN__SHIFT 0x1f
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_GEN4_DIS_MASK 0x00000001L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_EXPANSION_ROM_VALIDATION_SUPPORT_MASK 0x00000002L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_VGA_DIS_PIN_MASK 0x00000004L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_MEM_AP_SIZE_PIN_MASK 0x00000078L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK 0x00000080L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN3_MASK 0x00000100L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_MSI_FIRST_BE_FULL_PAYLOAD_EN_MASK 0x00000200L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_NBIF_IGNORE_ERR_INFLR_MASK 0x00000400L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_PME_SUPPORT_COMPLIANCE_EN_MASK 0x00000800L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_EP_ERR_MASK 0x00001000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MSG_ERR_MASK 0x00002000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00004000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00008000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_MASK 0x00010000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_DN_MASK 0x00020000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_AUD_PIN_MASK 0x000C0000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIOS_ROM_EN_PIN_MASK 0x00100000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIF_GFX_IOBAR_DIS_AND_REGBAR_64BIT_EN_MASK 0x00200000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_GPUIOV_EN_MASK 0x00400000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_GEN3_DIS_MASK 0x01000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN4_MASK 0x02000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_QUICKSIM_START_MASK 0x04000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_NO_RO_ENABLED_P2P_PASSING_MASK 0x08000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_IGNORE_LOCAL_PREFIX_UR_SWUS_MASK 0x10000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_CFG0_RD_VF_BUSNUM_CHK_EN_MASK 0x20000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_BIGAPU_MODE_MASK 0x40000000L
+#define RCC_STRAP1_RCC_BIF_STRAP0__STRAP_LINK_DOWN_RESET_EN_MASK 0x80000000L
+//RCC_STRAP1_RCC_BIF_STRAP1
+#define RCC_STRAP1_RCC_BIF_STRAP1__FUSESTRAP_VALID__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP1__ROMSTRAP_VALID__SHIFT 0x1
+#define RCC_STRAP1_RCC_BIF_STRAP1__WRITE_DISABLE__SHIFT 0x2
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_ECRC_INTERMEDIATE_CHK_EN__SHIFT 0x3
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_IGNORE_E2E_PREFIX_UR_SWUS__SHIFT 0x5
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_MARGINING_USES_SOFTWARE__SHIFT 0x6
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_MARGINING_READY__SHIFT 0x7
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_APER_EN__SHIFT 0x8
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_64BAR_EN__SHIFT 0x9
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_AP_SIZE__SHIFT 0xa
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_APER_PREFETCHABLE__SHIFT 0xc
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_HWREV_LSB2__SHIFT 0xd
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWREV_LSB2__SHIFT 0xf
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_LINK_RST_CFG_ONLY__SHIFT 0x11
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_BIF_IOV_LKRST_DIS__SHIFT 0x12
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_DLF_EN__SHIFT 0x13
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_PHY_16GT_EN__SHIFT 0x14
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_MARGIN_EN__SHIFT 0x15
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_BIF_PSN_UR_RPT_EN__SHIFT 0x16
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_BIF_SLOT_POWER_SUPPORT_EN__SHIFT 0x17
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_S5_REGS_ACCESS_DIS__SHIFT 0x18
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_S5_MMREG_WR_POSTED_EN__SHIFT 0x19
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_GFX_FUNC_LTR_MODE__SHIFT 0x1a
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_GSI_SMN_POSTWR_MULTI_EN__SHIFT 0x1b
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_DLF_EN_EP__SHIFT 0x1d
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_AP_EN__SHIFT 0x1e
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_AP_EN_DN__SHIFT 0x1f
+#define RCC_STRAP1_RCC_BIF_STRAP1__FUSESTRAP_VALID_MASK 0x00000001L
+#define RCC_STRAP1_RCC_BIF_STRAP1__ROMSTRAP_VALID_MASK 0x00000002L
+#define RCC_STRAP1_RCC_BIF_STRAP1__WRITE_DISABLE_MASK 0x00000004L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_ECRC_INTERMEDIATE_CHK_EN_MASK 0x00000008L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_IGNORE_E2E_PREFIX_UR_SWUS_MASK 0x00000020L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_MARGINING_USES_SOFTWARE_MASK 0x00000040L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_MARGINING_READY_MASK 0x00000080L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_APER_EN_MASK 0x00000100L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_64BAR_EN_MASK 0x00000200L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_AP_SIZE_MASK 0x00000C00L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWUS_APER_PREFETCHABLE_MASK 0x00001000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_HWREV_LSB2_MASK 0x00006000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_SWREV_LSB2_MASK 0x00018000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_LINK_RST_CFG_ONLY_MASK 0x00020000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_BIF_IOV_LKRST_DIS_MASK 0x00040000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_DLF_EN_MASK 0x00080000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_PHY_16GT_EN_MASK 0x00100000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_MARGIN_EN_MASK 0x00200000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_BIF_PSN_UR_RPT_EN_MASK 0x00400000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_BIF_SLOT_POWER_SUPPORT_EN_MASK 0x00800000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_S5_REGS_ACCESS_DIS_MASK 0x01000000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_S5_MMREG_WR_POSTED_EN_MASK 0x02000000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_GFX_FUNC_LTR_MODE_MASK 0x04000000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_GSI_SMN_POSTWR_MULTI_EN_MASK 0x18000000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_DLF_EN_EP_MASK 0x20000000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_AP_EN_MASK 0x40000000L
+#define RCC_STRAP1_RCC_BIF_STRAP1__STRAP_AP_EN_DN_MASK 0x80000000L
+//RCC_STRAP1_RCC_BIF_STRAP2
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_PCIESWUS_INDEX_APER_RANGE__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SWUS_SPT__SHIFT 0x1
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SUC_IND_ACCESS_DIS__SHIFT 0x3
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SUM_IND_ACCESS_DIS__SHIFT 0x4
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_ENDP_LINKDOWN_DROP_DMA__SHIFT 0x5
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SWITCH_LINKDOWN_DROP_DMA__SHIFT 0x6
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SWUS_SEC_LVL_OVRD_EN__SHIFT 0x7
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_GMI_DNS_SDP_CLKREQ_TOGGLE_DIS__SHIFT 0x8
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_ACS_MSKSEV_EP_HIDE_DIS__SHIFT 0x9
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_CFG_PG_FW_INTERLOCK_EXIT_EN__SHIFT 0xa
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_USB_PD_FUNC_DIS__SHIFT 0xc
+#define RCC_STRAP1_RCC_BIF_STRAP2__RESERVED_BIF_STRAP2__SHIFT 0xd
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS__SHIFT 0xe
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_GFXAZ_POWERSTATE_INTERLOCK_EN__SHIFT 0xf
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_CYCLE__SHIFT 0x10
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_BYPASS__SHIFT 0x18
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_VLINK_PMETO_LDN_EXIT_BY_LNKRST_DIS__SHIFT 0x1f
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_PCIESWUS_INDEX_APER_RANGE_MASK 0x00000001L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SWUS_SPT_MASK 0x00000002L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SUC_IND_ACCESS_DIS_MASK 0x00000008L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SUM_IND_ACCESS_DIS_MASK 0x00000010L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_ENDP_LINKDOWN_DROP_DMA_MASK 0x00000020L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SWITCH_LINKDOWN_DROP_DMA_MASK 0x00000040L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_SWUS_SEC_LVL_OVRD_EN_MASK 0x00000080L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_GMI_DNS_SDP_CLKREQ_TOGGLE_DIS_MASK 0x00000100L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_ACS_MSKSEV_EP_HIDE_DIS_MASK 0x00000200L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_CFG_PG_FW_INTERLOCK_EXIT_EN_MASK 0x00000C00L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_USB_PD_FUNC_DIS_MASK 0x00001000L
+#define RCC_STRAP1_RCC_BIF_STRAP2__RESERVED_BIF_STRAP2_MASK 0x00002000L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_GFXAZ_POWERSTATE_INTERLOCK_EN_MASK 0x00008000L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_CYCLE_MASK 0x00FF0000L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_BYPASS_MASK 0x01000000L
+#define RCC_STRAP1_RCC_BIF_STRAP2__STRAP_VLINK_PMETO_LDN_EXIT_BY_LNKRST_DIS_MASK 0x80000000L
+//RCC_STRAP1_RCC_BIF_STRAP3
+#define RCC_STRAP1_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
+#define RCC_STRAP1_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
+//RCC_STRAP1_RCC_BIF_STRAP4
+#define RCC_STRAP1_RCC_BIF_STRAP4__STRAP_VLINK_L0S_EXIT_TIMER__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP4__STRAP_VLINK_L1_EXIT_TIMER__SHIFT 0x10
+#define RCC_STRAP1_RCC_BIF_STRAP4__STRAP_VLINK_L0S_EXIT_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_BIF_STRAP4__STRAP_VLINK_L1_EXIT_TIMER_MASK 0xFFFF0000L
+//RCC_STRAP1_RCC_BIF_STRAP5
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_LDN_EN__SHIFT 0x10
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_SECRST_EN__SHIFT 0x11
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_ENTER_COMPLIANCE_DIS__SHIFT 0x12
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_IGNORE_PSN_ON_VDM1_DIS__SHIFT 0x13
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_SMN_ERR_STATUS_MASK_EN_UPS__SHIFT 0x14
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_REG_PROTECTION_DIS__SHIFT 0x15
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_SMN_ERRRSP_DATA_FORCE__SHIFT 0x16
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_INTERMEDIATERSP_DATA_ALLF_DATA_FORCE__SHIFT 0x18
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x19
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1b
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_PWRBRK_STATUS_TIMER__SHIFT 0x1c
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_BIF_GFX_REG_APER_REMAPPING_EN__SHIFT 0x1f
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_LDN_EN_MASK 0x00010000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_SECRST_EN_MASK 0x00020000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_VLINK_ENTER_COMPLIANCE_DIS_MASK 0x00040000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_IGNORE_PSN_ON_VDM1_DIS_MASK 0x00080000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_SMN_ERR_STATUS_MASK_EN_UPS_MASK 0x00100000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_REG_PROTECTION_DIS_MASK 0x00200000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_SMN_ERRRSP_DATA_FORCE_MASK 0x00C00000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_INTERMEDIATERSP_DATA_ALLF_DATA_FORCE_MASK 0x01000000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_SUPPORTED_MASK 0x06000000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_INIT_REQ_MASK 0x08000000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_PWRBRK_STATUS_TIMER_MASK 0x70000000L
+#define RCC_STRAP1_RCC_BIF_STRAP5__STRAP_BIF_GFX_REG_APER_REMAPPING_EN_MASK 0x80000000L
+//RCC_STRAP1_RCC_BIF_STRAP6
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_GEN5_DIS__SHIFT 0x0
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_BIF_KILL_GEN5__SHIFT 0x1
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_PHY_32GT_EN__SHIFT 0x2
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_DOE_VERSION_SEL__SHIFT 0x3
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_S5_GFX_REGS_ACCESS_DIS__SHIFT 0x4
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_PRODUCTION_MODE__SHIFT 0x5
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_GFX_PARTITION_CAP_SPX_SUPPORT__SHIFT 0x6
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_GFX_PARTITION_CAP_TPX_SUPPORT__SHIFT 0x7
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_MEM_PARTITION_CAP_NPS1_SUPPORT__SHIFT 0x8
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_MEM_PARTITION_CAP_NPS3_SUPPORT__SHIFT 0x9
+#define RCC_STRAP1_RCC_BIF_STRAP6__RESERVED_BIF_STRAP6__SHIFT 0xa
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_GEN5_DIS_MASK 0x00000001L
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_BIF_KILL_GEN5_MASK 0x00000002L
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_PHY_32GT_EN_MASK 0x00000004L
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_DOE_VERSION_SEL_MASK 0x00000008L
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_S5_GFX_REGS_ACCESS_DIS_MASK 0x00000010L
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_PRODUCTION_MODE_MASK 0x00000020L
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_GFX_PARTITION_CAP_SPX_SUPPORT_MASK 0x00000040L
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_GFX_PARTITION_CAP_TPX_SUPPORT_MASK 0x00000080L
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_MEM_PARTITION_CAP_NPS1_SUPPORT_MASK 0x00000100L
+#define RCC_STRAP1_RCC_BIF_STRAP6__STRAP_MEM_PARTITION_CAP_NPS3_SUPPORT_MASK 0x00000200L
+#define RCC_STRAP1_RCC_BIF_STRAP6__RESERVED_BIF_STRAP6_MASK 0xFFFFFC00L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0_MASK 0x000F0000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0_MASK 0x00F00000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK 0x0F000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP1
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_VF_DEVICE_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_SUPPORTED_PAGE_SIZE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_VF_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_SUPPORTED_PAGE_SIZE_DEV0_F0_MASK 0xFFFF0000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP2
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_SRIOV_EN_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_64BAR_DIS_DEV0_F0__SHIFT 0x6
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F0__SHIFT 0x7
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F0__SHIFT 0x9
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F0__SHIFT 0xe
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_ARI_EN_DEV0_F0__SHIFT 0xf
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_AER_EN_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_ACS_EN_DEV0_F0__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_ATS_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_DPA_EN_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_DSN_EN_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_VC_EN_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PAGE_REQ_EN_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_SRIOV_EN_DEV0_F0_MASK 0x00000001L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_64BAR_DIS_DEV0_F0_MASK 0x00000040L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F0_MASK 0x00000080L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F0_MASK 0x00000100L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F0_MASK 0x00003E00L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F0_MASK 0x00004000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_ARI_EN_DEV0_F0_MASK 0x00008000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_AER_EN_DEV0_F0_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_ACS_EN_DEV0_F0_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_ATS_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_DPA_EN_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_DSN_EN_DEV0_F0_MASK 0x00400000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_VC_EN_DEV0_F0_MASK 0x00800000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F0_MASK 0x07000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PAGE_REQ_EN_DEV0_F0_MASK 0x08000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP3
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_SUBSYS_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_PWR_EN_DEV0_F0__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_TABLE_BIR_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_PMC_DSI_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F0__SHIFT 0x1a
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_VF_RESIZE_BAR_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_CLK_PM_EN_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_RTR_EN_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_SUBSYS_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F0_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_PWR_EN_DEV0_F0_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F0_MASK 0x00080000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_TABLE_BIR_DEV0_F0_MASK 0x00E00000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_PMC_DSI_DEV0_F0_MASK 0x01000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F0_MASK 0x04000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F0_MASK 0x08000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_VF_RESIZE_BAR_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_CLK_PM_EN_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP3__STRAP_RTR_EN_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP4
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_RESERVED_STRAP4_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_DOE_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_EN_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_FLR_EN_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_PME_SUPPORT_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_RESERVED_STRAP4_DEV0_F0_MASK 0x000003FFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_DOE_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_EN_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_FLR_EN_DEV0_F0_MASK 0x00400000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_PME_SUPPORT_DEV0_F0_MASK 0x0F800000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F0_MASK 0x70000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP5
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP5__STRAP_AUX_CURRENT_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP5__STRAP_AUX_CURRENT_DEV0_F0_MASK 0x38000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F0_MASK 0x40000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP8
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_APER_SIZE_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_BAR_DIS_DEV0_F0__SHIFT 0x3
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_ROM_AP_SIZE_DEV0_F0__SHIFT 0x4
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_IO_BAR_DIS_DEV0_F0__SHIFT 0x7
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_LFB_ERRMSG_EN_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_MEM_AP_SIZE_DEV0_F0__SHIFT 0x9
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_REG_AP_SIZE_DEV0_F0__SHIFT 0xd
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_DOORBELL_APER_SIZE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MEM_AP_SIZE_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_REG_AP_SIZE_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VGA_DIS_DEV0_F0__SHIFT 0x1a
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MSI_MULTI_CAP_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_SRIOV_VF_MAPPING_MODE_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_APER_SIZE_DEV0_F0_MASK 0x00000007L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_BAR_DIS_DEV0_F0_MASK 0x00000008L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_ROM_AP_SIZE_DEV0_F0_MASK 0x00000070L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_IO_BAR_DIS_DEV0_F0_MASK 0x00000080L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_LFB_ERRMSG_EN_DEV0_F0_MASK 0x00000100L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_MEM_AP_SIZE_DEV0_F0_MASK 0x00001E00L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_REG_AP_SIZE_DEV0_F0_MASK 0x0000E000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_DOORBELL_APER_SIZE_DEV0_F0_MASK 0x00070000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MEM_AP_SIZE_DEV0_F0_MASK 0x00780000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_REG_AP_SIZE_DEV0_F0_MASK 0x03800000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VGA_DIS_DEV0_F0_MASK 0x04000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MSI_MULTI_CAP_DEV0_F0_MASK 0x38000000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP8__STRAP_SRIOV_VF_MAPPING_MODE_DEV0_F0_MASK 0xC0000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP9
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_OUTSTAND_PAGE_REQ_CAP_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_BAR_COMPLIANCE_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_NBIF_ROM_BAR_DIS_CHICKEN_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_VF_REG_PROT_DIS_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_FB_ALWAYS_ON_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_FB_CPL_TYPE_SEL_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_GPUIOV_VSEC_REV_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_OUTSTAND_PAGE_REQ_CAP_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_BAR_COMPLIANCE_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_NBIF_ROM_BAR_DIS_CHICKEN_DEV0_F0_MASK 0x00080000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_VF_REG_PROT_DIS_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_FB_ALWAYS_ON_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_FB_CPL_TYPE_SEL_DEV0_F0_MASK 0x00C00000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP9__STRAP_GPUIOV_VSEC_REV_DEV0_F0_MASK 0x0F000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP13
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_SRIOV_TOTAL_VFS_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F0_MASK 0x000000FFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F0_MASK 0x0000FF00L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F0_MASK 0x00FF0000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP13__STRAP_SRIOV_TOTAL_VFS_DEV0_F0_MASK 0xFF000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP14
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP14__STRAP_VENDOR_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP14__STRAP_VENDOR_ID_DEV0_F0_MASK 0x0000FFFFL
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP15
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_RESET_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_DLUP_TIME_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_VALID_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_RESET_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_DLUP_TIME_DEV0_F0_MASK 0x00FFF000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_VALID_DEV0_F0_MASK 0x01000000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP16
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_FLR_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_D3HOTD0_TIME_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_FLR_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_D3HOTD0_TIME_DEV0_F0_MASK 0x00FFF000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP17
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_RESET_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_VALID_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_FLR_TIME_DEV0_F0__SHIFT 0xd
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_RESET_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_VALID_DEV0_F0_MASK 0x00001000L
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_FLR_TIME_DEV0_F0_MASK 0x01FFE000L
+//RCC_STRAP1_RCC_DEV0_EPF0_STRAP18
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP18__STRAP_RTR_VF_D3HOTD0_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP18__STRAP_RTR_VF_D3HOTD0_TIME_DEV0_F0_MASK 0x00000FFFL
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP0
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_DEVICE_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_MINOR_REV_ID_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_FUNC_EN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_D1_SUPPORT_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_D2_SUPPORT_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_DEVICE_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F1_MASK 0x000F0000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_MINOR_REV_ID_DEV0_F1_MASK 0x00F00000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_FUNC_EN_DEV0_F1_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_D1_SUPPORT_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP0__STRAP_D2_SUPPORT_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP2
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F1__SHIFT 0x7
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F1__SHIFT 0x8
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F1__SHIFT 0x9
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F1__SHIFT 0xe
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_AER_EN_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_ACS_EN_DEV0_F1__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_ATS_EN_DEV0_F1__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_DPA_EN_DEV0_F1__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_DSN_EN_DEV0_F1__SHIFT 0x16
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F1__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F1_MASK 0x00000080L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F1_MASK 0x00000100L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F1_MASK 0x00003E00L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F1_MASK 0x00004000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_AER_EN_DEV0_F1_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_ACS_EN_DEV0_F1_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_ATS_EN_DEV0_F1_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_DPA_EN_DEV0_F1_MASK 0x00200000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_DSN_EN_DEV0_F1_MASK 0x00400000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F1_MASK 0x07000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EN_DEV0_F1_MASK 0x10000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP3
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_SUBSYS_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_PWR_EN_DEV0_F1__SHIFT 0x11
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_EN_DEV0_F1__SHIFT 0x12
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F1__SHIFT 0x13
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_MSIX_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_PMC_DSI_DEV0_F1__SHIFT 0x18
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F1__SHIFT 0x1a
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F1__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_CLK_PM_EN_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_RTR_EN_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_SUBSYS_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F1_MASK 0x00010000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_PWR_EN_DEV0_F1_MASK 0x00020000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_EN_DEV0_F1_MASK 0x00040000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F1_MASK 0x00080000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_MSIX_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_PMC_DSI_DEV0_F1_MASK 0x01000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F1_MASK 0x04000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F1_MASK 0x08000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_CLK_PM_EN_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP3__STRAP_RTR_EN_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP4
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_EN_DEV0_F1__SHIFT 0x15
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_FLR_EN_DEV0_F1__SHIFT 0x16
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_PME_SUPPORT_DEV0_F1__SHIFT 0x17
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_EN_DEV0_F1_MASK 0x00200000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_FLR_EN_DEV0_F1_MASK 0x00400000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_PME_SUPPORT_DEV0_F1_MASK 0x0F800000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F1_MASK 0x70000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP5
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP5__STRAP_AUX_CURRENT_DEV0_F1__SHIFT 0x1b
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP5__STRAP_AUX_CURRENT_DEV0_F1_MASK 0x38000000L
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F1_MASK 0x40000000L
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP6
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP6__STRAP_APER0_64BAR_EN_DEV0_F1__SHIFT 0x2
+#define RCC_STRAP1_RCC_DEV0_EPF1_STRAP6__STRAP_APER0_64BAR_EN_DEV0_F1_MASK 0x00000004L
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP7
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP20
+//RCC_STRAP1_RCC_DEV0_EPF1_STRAP21
+//RCC_DEV0_EPF2_STRAP0
+#define RCC_DEV0_EPF2_STRAP0__STRAP_DEVICE_ID_DEV0_F2__SHIFT 0x0
+#define RCC_DEV0_EPF2_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F2__SHIFT 0x10
+#define RCC_DEV0_EPF2_STRAP0__STRAP_MINOR_REV_ID_DEV0_F2__SHIFT 0x14
+#define RCC_DEV0_EPF2_STRAP0__STRAP_FUNC_EN_DEV0_F2__SHIFT 0x1c
+#define RCC_DEV0_EPF2_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F2__SHIFT 0x1d
+#define RCC_DEV0_EPF2_STRAP0__STRAP_D1_SUPPORT_DEV0_F2__SHIFT 0x1e
+#define RCC_DEV0_EPF2_STRAP0__STRAP_D2_SUPPORT_DEV0_F2__SHIFT 0x1f
+#define RCC_DEV0_EPF2_STRAP0__STRAP_DEVICE_ID_DEV0_F2_MASK 0x0000FFFFL
+#define RCC_DEV0_EPF2_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F2_MASK 0x000F0000L
+#define RCC_DEV0_EPF2_STRAP0__STRAP_MINOR_REV_ID_DEV0_F2_MASK 0x00F00000L
+#define RCC_DEV0_EPF2_STRAP0__STRAP_FUNC_EN_DEV0_F2_MASK 0x10000000L
+#define RCC_DEV0_EPF2_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F2_MASK 0x20000000L
+#define RCC_DEV0_EPF2_STRAP0__STRAP_D1_SUPPORT_DEV0_F2_MASK 0x40000000L
+#define RCC_DEV0_EPF2_STRAP0__STRAP_D2_SUPPORT_DEV0_F2_MASK 0x80000000L
+//RCC_DEV0_EPF2_STRAP2
+#define RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2__SHIFT 0x7
+#define RCC_DEV0_EPF2_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F2__SHIFT 0x8
+#define RCC_DEV0_EPF2_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F2__SHIFT 0x9
+#define RCC_DEV0_EPF2_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F2__SHIFT 0xe
+#define RCC_DEV0_EPF2_STRAP2__STRAP_AER_EN_DEV0_F2__SHIFT 0x10
+#define RCC_DEV0_EPF2_STRAP2__STRAP_ACS_EN_DEV0_F2__SHIFT 0x11
+#define RCC_DEV0_EPF2_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F2__SHIFT 0x14
+#define RCC_DEV0_EPF2_STRAP2__STRAP_DPA_EN_DEV0_F2__SHIFT 0x15
+#define RCC_DEV0_EPF2_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F2__SHIFT 0x18
+#define RCC_DEV0_EPF2_STRAP2__STRAP_PASID_EN_DEV0_F2__SHIFT 0x1c
+#define RCC_DEV0_EPF2_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F2__SHIFT 0x1d
+#define RCC_DEV0_EPF2_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F2__SHIFT 0x1e
+#define RCC_DEV0_EPF2_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F2__SHIFT 0x1f
+#define RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK 0x00000080L
+#define RCC_DEV0_EPF2_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F2_MASK 0x00000100L
+#define RCC_DEV0_EPF2_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F2_MASK 0x00003E00L
+#define RCC_DEV0_EPF2_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F2_MASK 0x00004000L
+#define RCC_DEV0_EPF2_STRAP2__STRAP_AER_EN_DEV0_F2_MASK 0x00010000L
+#define RCC_DEV0_EPF2_STRAP2__STRAP_ACS_EN_DEV0_F2_MASK 0x00020000L
+#define RCC_DEV0_EPF2_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F2_MASK 0x00100000L
+#define RCC_DEV0_EPF2_STRAP2__STRAP_DPA_EN_DEV0_F2_MASK 0x00200000L
+#define RCC_DEV0_EPF2_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F2_MASK 0x07000000L
+#define RCC_DEV0_EPF2_STRAP2__STRAP_PASID_EN_DEV0_F2_MASK 0x10000000L
+#define RCC_DEV0_EPF2_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F2_MASK 0x20000000L
+#define RCC_DEV0_EPF2_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F2_MASK 0x40000000L
+#define RCC_DEV0_EPF2_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F2_MASK 0x80000000L
+//RCC_DEV0_EPF2_STRAP3
+#define RCC_DEV0_EPF2_STRAP3__STRAP_SUBSYS_ID_DEV0_F2__SHIFT 0x0
+#define RCC_DEV0_EPF2_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F2__SHIFT 0x10
+#define RCC_DEV0_EPF2_STRAP3__STRAP_PWR_EN_DEV0_F2__SHIFT 0x11
+#define RCC_DEV0_EPF2_STRAP3__STRAP_MSI_EN_DEV0_F2__SHIFT 0x12
+#define RCC_DEV0_EPF2_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F2__SHIFT 0x13
+#define RCC_DEV0_EPF2_STRAP3__STRAP_MSIX_EN_DEV0_F2__SHIFT 0x14
+#define RCC_DEV0_EPF2_STRAP3__STRAP_PMC_DSI_DEV0_F2__SHIFT 0x18
+#define RCC_DEV0_EPF2_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F2__SHIFT 0x1a
+#define RCC_DEV0_EPF2_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F2__SHIFT 0x1b
+#define RCC_DEV0_EPF2_STRAP3__STRAP_CLK_PM_EN_DEV0_F2__SHIFT 0x1d
+#define RCC_DEV0_EPF2_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F2__SHIFT 0x1e
+#define RCC_DEV0_EPF2_STRAP3__STRAP_RTR_EN_DEV0_F2__SHIFT 0x1f
+#define RCC_DEV0_EPF2_STRAP3__STRAP_SUBSYS_ID_DEV0_F2_MASK 0x0000FFFFL
+#define RCC_DEV0_EPF2_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F2_MASK 0x00010000L
+#define RCC_DEV0_EPF2_STRAP3__STRAP_PWR_EN_DEV0_F2_MASK 0x00020000L
+#define RCC_DEV0_EPF2_STRAP3__STRAP_MSI_EN_DEV0_F2_MASK 0x00040000L
+#define RCC_DEV0_EPF2_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F2_MASK 0x00080000L
+#define RCC_DEV0_EPF2_STRAP3__STRAP_MSIX_EN_DEV0_F2_MASK 0x00100000L
+#define RCC_DEV0_EPF2_STRAP3__STRAP_PMC_DSI_DEV0_F2_MASK 0x01000000L
+#define RCC_DEV0_EPF2_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F2_MASK 0x04000000L
+#define RCC_DEV0_EPF2_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F2_MASK 0x08000000L
+#define RCC_DEV0_EPF2_STRAP3__STRAP_CLK_PM_EN_DEV0_F2_MASK 0x20000000L
+#define RCC_DEV0_EPF2_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F2_MASK 0x40000000L
+#define RCC_DEV0_EPF2_STRAP3__STRAP_RTR_EN_DEV0_F2_MASK 0x80000000L
+//RCC_DEV0_EPF2_STRAP4
+#define RCC_DEV0_EPF2_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F2__SHIFT 0x14
+#define RCC_DEV0_EPF2_STRAP4__STRAP_ATOMIC_EN_DEV0_F2__SHIFT 0x15
+#define RCC_DEV0_EPF2_STRAP4__STRAP_FLR_EN_DEV0_F2__SHIFT 0x16
+#define RCC_DEV0_EPF2_STRAP4__STRAP_PME_SUPPORT_DEV0_F2__SHIFT 0x17
+#define RCC_DEV0_EPF2_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F2__SHIFT 0x1c
+#define RCC_DEV0_EPF2_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F2__SHIFT 0x1f
+#define RCC_DEV0_EPF2_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F2_MASK 0x00100000L
+#define RCC_DEV0_EPF2_STRAP4__STRAP_ATOMIC_EN_DEV0_F2_MASK 0x00200000L
+#define RCC_DEV0_EPF2_STRAP4__STRAP_FLR_EN_DEV0_F2_MASK 0x00400000L
+#define RCC_DEV0_EPF2_STRAP4__STRAP_PME_SUPPORT_DEV0_F2_MASK 0x0F800000L
+#define RCC_DEV0_EPF2_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F2_MASK 0x70000000L
+#define RCC_DEV0_EPF2_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F2_MASK 0x80000000L
+//RCC_DEV0_EPF2_STRAP5
+#define RCC_DEV0_EPF2_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F2__SHIFT 0x0
+#define RCC_DEV0_EPF2_STRAP5__STRAP_USB_DBESEL_DEV0_F2__SHIFT 0x10
+#define RCC_DEV0_EPF2_STRAP5__STRAP_USB_DBESELD_DEV0_F2__SHIFT 0x14
+#define RCC_DEV0_EPF2_STRAP5__STRAP_AUX_CURRENT_DEV0_F2__SHIFT 0x1b
+#define RCC_DEV0_EPF2_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F2__SHIFT 0x1e
+#define RCC_DEV0_EPF2_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F2_MASK 0x0000FFFFL
+#define RCC_DEV0_EPF2_STRAP5__STRAP_USB_DBESEL_DEV0_F2_MASK 0x000F0000L
+#define RCC_DEV0_EPF2_STRAP5__STRAP_USB_DBESELD_DEV0_F2_MASK 0x00F00000L
+#define RCC_DEV0_EPF2_STRAP5__STRAP_AUX_CURRENT_DEV0_F2_MASK 0x38000000L
+#define RCC_DEV0_EPF2_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F2_MASK 0x40000000L
+//RCC_DEV0_EPF2_STRAP6
+#define RCC_DEV0_EPF2_STRAP6__STRAP_APER0_EN_DEV0_F2__SHIFT 0x0
+#define RCC_DEV0_EPF2_STRAP6__STRAP_APER0_EN_DEV0_F2_MASK 0x00000001L
+//RCC_DEV0_EPF2_STRAP7
+//RCC_DEV0_EPF2_STRAP10
+//RCC_DEV0_EPF2_STRAP11
+//RCC_DEV0_EPF2_STRAP12
+//RCC_DEV0_EPF2_STRAP13
+#define RCC_DEV0_EPF2_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F2__SHIFT 0x0
+#define RCC_DEV0_EPF2_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F2__SHIFT 0x8
+#define RCC_DEV0_EPF2_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F2__SHIFT 0x10
+#define RCC_DEV0_EPF2_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F2_MASK 0x000000FFL
+#define RCC_DEV0_EPF2_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F2_MASK 0x0000FF00L
+#define RCC_DEV0_EPF2_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F2_MASK 0x00FF0000L
+//RCC_DEV0_EPF2_STRAP14
+#define RCC_DEV0_EPF2_STRAP14__STRAP_VENDOR_ID_DEV0_F2__SHIFT 0x0
+#define RCC_DEV0_EPF2_STRAP14__STRAP_VENDOR_ID_DEV0_F2_MASK 0x0000FFFFL
+//RCC_DEV0_EPF2_STRAP20
+//RCC_DEV0_EPF3_STRAP0
+#define RCC_DEV0_EPF3_STRAP0__STRAP_DEVICE_ID_DEV0_F3__SHIFT 0x0
+#define RCC_DEV0_EPF3_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F3__SHIFT 0x10
+#define RCC_DEV0_EPF3_STRAP0__STRAP_MINOR_REV_ID_DEV0_F3__SHIFT 0x14
+#define RCC_DEV0_EPF3_STRAP0__STRAP_FUNC_EN_DEV0_F3__SHIFT 0x1c
+#define RCC_DEV0_EPF3_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F3__SHIFT 0x1d
+#define RCC_DEV0_EPF3_STRAP0__STRAP_D1_SUPPORT_DEV0_F3__SHIFT 0x1e
+#define RCC_DEV0_EPF3_STRAP0__STRAP_D2_SUPPORT_DEV0_F3__SHIFT 0x1f
+#define RCC_DEV0_EPF3_STRAP0__STRAP_DEVICE_ID_DEV0_F3_MASK 0x0000FFFFL
+#define RCC_DEV0_EPF3_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F3_MASK 0x000F0000L
+#define RCC_DEV0_EPF3_STRAP0__STRAP_MINOR_REV_ID_DEV0_F3_MASK 0x00F00000L
+#define RCC_DEV0_EPF3_STRAP0__STRAP_FUNC_EN_DEV0_F3_MASK 0x10000000L
+#define RCC_DEV0_EPF3_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F3_MASK 0x20000000L
+#define RCC_DEV0_EPF3_STRAP0__STRAP_D1_SUPPORT_DEV0_F3_MASK 0x40000000L
+#define RCC_DEV0_EPF3_STRAP0__STRAP_D2_SUPPORT_DEV0_F3_MASK 0x80000000L
+//RCC_DEV0_EPF3_STRAP2
+#define RCC_DEV0_EPF3_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F3__SHIFT 0x7
+#define RCC_DEV0_EPF3_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F3__SHIFT 0x8
+#define RCC_DEV0_EPF3_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F3__SHIFT 0x9
+#define RCC_DEV0_EPF3_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F3__SHIFT 0xe
+#define RCC_DEV0_EPF3_STRAP2__STRAP_AER_EN_DEV0_F3__SHIFT 0x10
+#define RCC_DEV0_EPF3_STRAP2__STRAP_ACS_EN_DEV0_F3__SHIFT 0x11
+#define RCC_DEV0_EPF3_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F3__SHIFT 0x14
+#define RCC_DEV0_EPF3_STRAP2__STRAP_DPA_EN_DEV0_F3__SHIFT 0x15
+#define RCC_DEV0_EPF3_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F3__SHIFT 0x18
+#define RCC_DEV0_EPF3_STRAP2__STRAP_PASID_EN_DEV0_F3__SHIFT 0x1c
+#define RCC_DEV0_EPF3_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F3__SHIFT 0x1d
+#define RCC_DEV0_EPF3_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F3__SHIFT 0x1e
+#define RCC_DEV0_EPF3_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F3__SHIFT 0x1f
+#define RCC_DEV0_EPF3_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F3_MASK 0x00000080L
+#define RCC_DEV0_EPF3_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F3_MASK 0x00000100L
+#define RCC_DEV0_EPF3_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F3_MASK 0x00003E00L
+#define RCC_DEV0_EPF3_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F3_MASK 0x00004000L
+#define RCC_DEV0_EPF3_STRAP2__STRAP_AER_EN_DEV0_F3_MASK 0x00010000L
+#define RCC_DEV0_EPF3_STRAP2__STRAP_ACS_EN_DEV0_F3_MASK 0x00020000L
+#define RCC_DEV0_EPF3_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F3_MASK 0x00100000L
+#define RCC_DEV0_EPF3_STRAP2__STRAP_DPA_EN_DEV0_F3_MASK 0x00200000L
+#define RCC_DEV0_EPF3_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F3_MASK 0x07000000L
+#define RCC_DEV0_EPF3_STRAP2__STRAP_PASID_EN_DEV0_F3_MASK 0x10000000L
+#define RCC_DEV0_EPF3_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F3_MASK 0x20000000L
+#define RCC_DEV0_EPF3_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F3_MASK 0x40000000L
+#define RCC_DEV0_EPF3_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F3_MASK 0x80000000L
+//RCC_DEV0_EPF3_STRAP3
+#define RCC_DEV0_EPF3_STRAP3__STRAP_SUBSYS_ID_DEV0_F3__SHIFT 0x0
+#define RCC_DEV0_EPF3_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F3__SHIFT 0x10
+#define RCC_DEV0_EPF3_STRAP3__STRAP_PWR_EN_DEV0_F3__SHIFT 0x11
+#define RCC_DEV0_EPF3_STRAP3__STRAP_MSI_EN_DEV0_F3__SHIFT 0x12
+#define RCC_DEV0_EPF3_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F3__SHIFT 0x13
+#define RCC_DEV0_EPF3_STRAP3__STRAP_MSIX_EN_DEV0_F3__SHIFT 0x14
+#define RCC_DEV0_EPF3_STRAP3__STRAP_PMC_DSI_DEV0_F3__SHIFT 0x18
+#define RCC_DEV0_EPF3_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F3__SHIFT 0x1a
+#define RCC_DEV0_EPF3_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F3__SHIFT 0x1b
+#define RCC_DEV0_EPF3_STRAP3__STRAP_CLK_PM_EN_DEV0_F3__SHIFT 0x1d
+#define RCC_DEV0_EPF3_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F3__SHIFT 0x1e
+#define RCC_DEV0_EPF3_STRAP3__STRAP_RTR_EN_DEV0_F3__SHIFT 0x1f
+#define RCC_DEV0_EPF3_STRAP3__STRAP_SUBSYS_ID_DEV0_F3_MASK 0x0000FFFFL
+#define RCC_DEV0_EPF3_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F3_MASK 0x00010000L
+#define RCC_DEV0_EPF3_STRAP3__STRAP_PWR_EN_DEV0_F3_MASK 0x00020000L
+#define RCC_DEV0_EPF3_STRAP3__STRAP_MSI_EN_DEV0_F3_MASK 0x00040000L
+#define RCC_DEV0_EPF3_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F3_MASK 0x00080000L
+#define RCC_DEV0_EPF3_STRAP3__STRAP_MSIX_EN_DEV0_F3_MASK 0x00100000L
+#define RCC_DEV0_EPF3_STRAP3__STRAP_PMC_DSI_DEV0_F3_MASK 0x01000000L
+#define RCC_DEV0_EPF3_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F3_MASK 0x04000000L
+#define RCC_DEV0_EPF3_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F3_MASK 0x08000000L
+#define RCC_DEV0_EPF3_STRAP3__STRAP_CLK_PM_EN_DEV0_F3_MASK 0x20000000L
+#define RCC_DEV0_EPF3_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F3_MASK 0x40000000L
+#define RCC_DEV0_EPF3_STRAP3__STRAP_RTR_EN_DEV0_F3_MASK 0x80000000L
+//RCC_DEV0_EPF3_STRAP4
+#define RCC_DEV0_EPF3_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F3__SHIFT 0x14
+#define RCC_DEV0_EPF3_STRAP4__STRAP_ATOMIC_EN_DEV0_F3__SHIFT 0x15
+#define RCC_DEV0_EPF3_STRAP4__STRAP_FLR_EN_DEV0_F3__SHIFT 0x16
+#define RCC_DEV0_EPF3_STRAP4__STRAP_PME_SUPPORT_DEV0_F3__SHIFT 0x17
+#define RCC_DEV0_EPF3_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F3__SHIFT 0x1c
+#define RCC_DEV0_EPF3_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F3__SHIFT 0x1f
+#define RCC_DEV0_EPF3_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F3_MASK 0x00100000L
+#define RCC_DEV0_EPF3_STRAP4__STRAP_ATOMIC_EN_DEV0_F3_MASK 0x00200000L
+#define RCC_DEV0_EPF3_STRAP4__STRAP_FLR_EN_DEV0_F3_MASK 0x00400000L
+#define RCC_DEV0_EPF3_STRAP4__STRAP_PME_SUPPORT_DEV0_F3_MASK 0x0F800000L
+#define RCC_DEV0_EPF3_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F3_MASK 0x70000000L
+#define RCC_DEV0_EPF3_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F3_MASK 0x80000000L
+//RCC_DEV0_EPF3_STRAP5
+#define RCC_DEV0_EPF3_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F3__SHIFT 0x0
+#define RCC_DEV0_EPF3_STRAP5__STRAP_AUX_CURRENT_DEV0_F3__SHIFT 0x1b
+#define RCC_DEV0_EPF3_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F3__SHIFT 0x1e
+#define RCC_DEV0_EPF3_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F3_MASK 0x0000FFFFL
+#define RCC_DEV0_EPF3_STRAP5__STRAP_AUX_CURRENT_DEV0_F3_MASK 0x38000000L
+#define RCC_DEV0_EPF3_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F3_MASK 0x40000000L
+//RCC_DEV0_EPF3_STRAP6
+#define RCC_DEV0_EPF3_STRAP6__STRAP_APER0_EN_DEV0_F3__SHIFT 0x0
+#define RCC_DEV0_EPF3_STRAP6__STRAP_APER0_EN_DEV0_F3_MASK 0x00000001L
+//RCC_DEV0_EPF3_STRAP7
+//RCC_DEV0_EPF3_STRAP10
+//RCC_DEV0_EPF3_STRAP11
+//RCC_DEV0_EPF3_STRAP12
+//RCC_DEV0_EPF3_STRAP13
+#define RCC_DEV0_EPF3_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F3__SHIFT 0x0
+#define RCC_DEV0_EPF3_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F3__SHIFT 0x8
+#define RCC_DEV0_EPF3_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F3__SHIFT 0x10
+#define RCC_DEV0_EPF3_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F3_MASK 0x000000FFL
+#define RCC_DEV0_EPF3_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F3_MASK 0x0000FF00L
+#define RCC_DEV0_EPF3_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F3_MASK 0x00FF0000L
+//RCC_DEV0_EPF3_STRAP14
+#define RCC_DEV0_EPF3_STRAP14__STRAP_VENDOR_ID_DEV0_F3__SHIFT 0x0
+#define RCC_DEV0_EPF3_STRAP14__STRAP_VENDOR_ID_DEV0_F3_MASK 0x0000FFFFL
+//RCC_DEV0_EPF3_STRAP20
+//RCC_DEV0_EPF4_STRAP0
+#define RCC_DEV0_EPF4_STRAP0__STRAP_FUNC_EN_DEV0_F4__SHIFT 0x1c
+#define RCC_DEV0_EPF4_STRAP0__STRAP_D1_SUPPORT_DEV0_F4__SHIFT 0x1e
+#define RCC_DEV0_EPF4_STRAP0__STRAP_D2_SUPPORT_DEV0_F4__SHIFT 0x1f
+#define RCC_DEV0_EPF4_STRAP0__STRAP_FUNC_EN_DEV0_F4_MASK 0x10000000L
+#define RCC_DEV0_EPF4_STRAP0__STRAP_D1_SUPPORT_DEV0_F4_MASK 0x40000000L
+#define RCC_DEV0_EPF4_STRAP0__STRAP_D2_SUPPORT_DEV0_F4_MASK 0x80000000L
+//RCC_DEV0_EPF4_STRAP2
+#define RCC_DEV0_EPF4_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F4__SHIFT 0x9
+#define RCC_DEV0_EPF4_STRAP2__STRAP_AER_EN_DEV0_F4__SHIFT 0x10
+#define RCC_DEV0_EPF4_STRAP2__STRAP_ACS_EN_DEV0_F4__SHIFT 0x11
+#define RCC_DEV0_EPF4_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F4__SHIFT 0x14
+#define RCC_DEV0_EPF4_STRAP2__STRAP_DPA_EN_DEV0_F4__SHIFT 0x15
+#define RCC_DEV0_EPF4_STRAP2__STRAP_PASID_EN_DEV0_F4__SHIFT 0x1c
+#define RCC_DEV0_EPF4_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F4__SHIFT 0x1d
+#define RCC_DEV0_EPF4_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F4__SHIFT 0x1e
+#define RCC_DEV0_EPF4_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F4__SHIFT 0x1f
+#define RCC_DEV0_EPF4_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F4_MASK 0x00003E00L
+#define RCC_DEV0_EPF4_STRAP2__STRAP_AER_EN_DEV0_F4_MASK 0x00010000L
+#define RCC_DEV0_EPF4_STRAP2__STRAP_ACS_EN_DEV0_F4_MASK 0x00020000L
+#define RCC_DEV0_EPF4_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F4_MASK 0x00100000L
+#define RCC_DEV0_EPF4_STRAP2__STRAP_DPA_EN_DEV0_F4_MASK 0x00200000L
+#define RCC_DEV0_EPF4_STRAP2__STRAP_PASID_EN_DEV0_F4_MASK 0x10000000L
+#define RCC_DEV0_EPF4_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F4_MASK 0x20000000L
+#define RCC_DEV0_EPF4_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F4_MASK 0x40000000L
+#define RCC_DEV0_EPF4_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F4_MASK 0x80000000L
+//RCC_DEV0_EPF4_STRAP3
+#define RCC_DEV0_EPF4_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F4__SHIFT 0x10
+#define RCC_DEV0_EPF4_STRAP3__STRAP_PWR_EN_DEV0_F4__SHIFT 0x11
+#define RCC_DEV0_EPF4_STRAP3__STRAP_PMC_DSI_DEV0_F4__SHIFT 0x18
+#define RCC_DEV0_EPF4_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F4__SHIFT 0x1a
+#define RCC_DEV0_EPF4_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F4__SHIFT 0x1b
+#define RCC_DEV0_EPF4_STRAP3__STRAP_CLK_PM_EN_DEV0_F4__SHIFT 0x1d
+#define RCC_DEV0_EPF4_STRAP3__STRAP_RTR_EN_DEV0_F4__SHIFT 0x1f
+#define RCC_DEV0_EPF4_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F4_MASK 0x00010000L
+#define RCC_DEV0_EPF4_STRAP3__STRAP_PWR_EN_DEV0_F4_MASK 0x00020000L
+#define RCC_DEV0_EPF4_STRAP3__STRAP_PMC_DSI_DEV0_F4_MASK 0x01000000L
+#define RCC_DEV0_EPF4_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F4_MASK 0x04000000L
+#define RCC_DEV0_EPF4_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F4_MASK 0x08000000L
+#define RCC_DEV0_EPF4_STRAP3__STRAP_CLK_PM_EN_DEV0_F4_MASK 0x20000000L
+#define RCC_DEV0_EPF4_STRAP3__STRAP_RTR_EN_DEV0_F4_MASK 0x80000000L
+//RCC_DEV0_EPF4_STRAP4
+#define RCC_DEV0_EPF4_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F4__SHIFT 0x1f
+#define RCC_DEV0_EPF4_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F4_MASK 0x80000000L
+//RCC_DEV0_EPF4_STRAP5
+#define RCC_DEV0_EPF4_STRAP5__STRAP_AUX_CURRENT_DEV0_F4__SHIFT 0x1b
+#define RCC_DEV0_EPF4_STRAP5__STRAP_AUX_CURRENT_DEV0_F4_MASK 0x38000000L
+//RCC_DEV0_EPF4_STRAP6
+//RCC_DEV0_EPF4_STRAP7
+//RCC_DEV0_EPF4_STRAP13
+//RCC_DEV0_EPF4_STRAP14
+//RCC_DEV0_EPF5_STRAP0
+#define RCC_DEV0_EPF5_STRAP0__STRAP_FUNC_EN_DEV0_F5__SHIFT 0x1c
+#define RCC_DEV0_EPF5_STRAP0__STRAP_D1_SUPPORT_DEV0_F5__SHIFT 0x1e
+#define RCC_DEV0_EPF5_STRAP0__STRAP_D2_SUPPORT_DEV0_F5__SHIFT 0x1f
+#define RCC_DEV0_EPF5_STRAP0__STRAP_FUNC_EN_DEV0_F5_MASK 0x10000000L
+#define RCC_DEV0_EPF5_STRAP0__STRAP_D1_SUPPORT_DEV0_F5_MASK 0x40000000L
+#define RCC_DEV0_EPF5_STRAP0__STRAP_D2_SUPPORT_DEV0_F5_MASK 0x80000000L
+//RCC_DEV0_EPF5_STRAP2
+#define RCC_DEV0_EPF5_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F5__SHIFT 0x9
+#define RCC_DEV0_EPF5_STRAP2__STRAP_AER_EN_DEV0_F5__SHIFT 0x10
+#define RCC_DEV0_EPF5_STRAP2__STRAP_ACS_EN_DEV0_F5__SHIFT 0x11
+#define RCC_DEV0_EPF5_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F5__SHIFT 0x14
+#define RCC_DEV0_EPF5_STRAP2__STRAP_DPA_EN_DEV0_F5__SHIFT 0x15
+#define RCC_DEV0_EPF5_STRAP2__STRAP_PASID_EN_DEV0_F5__SHIFT 0x1c
+#define RCC_DEV0_EPF5_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F5__SHIFT 0x1d
+#define RCC_DEV0_EPF5_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F5__SHIFT 0x1e
+#define RCC_DEV0_EPF5_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F5__SHIFT 0x1f
+#define RCC_DEV0_EPF5_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F5_MASK 0x00003E00L
+#define RCC_DEV0_EPF5_STRAP2__STRAP_AER_EN_DEV0_F5_MASK 0x00010000L
+#define RCC_DEV0_EPF5_STRAP2__STRAP_ACS_EN_DEV0_F5_MASK 0x00020000L
+#define RCC_DEV0_EPF5_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F5_MASK 0x00100000L
+#define RCC_DEV0_EPF5_STRAP2__STRAP_DPA_EN_DEV0_F5_MASK 0x00200000L
+#define RCC_DEV0_EPF5_STRAP2__STRAP_PASID_EN_DEV0_F5_MASK 0x10000000L
+#define RCC_DEV0_EPF5_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F5_MASK 0x20000000L
+#define RCC_DEV0_EPF5_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F5_MASK 0x40000000L
+#define RCC_DEV0_EPF5_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F5_MASK 0x80000000L
+//RCC_DEV0_EPF5_STRAP3
+#define RCC_DEV0_EPF5_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F5__SHIFT 0x10
+#define RCC_DEV0_EPF5_STRAP3__STRAP_PWR_EN_DEV0_F5__SHIFT 0x11
+#define RCC_DEV0_EPF5_STRAP3__STRAP_PMC_DSI_DEV0_F5__SHIFT 0x18
+#define RCC_DEV0_EPF5_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F5__SHIFT 0x1a
+#define RCC_DEV0_EPF5_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F5__SHIFT 0x1b
+#define RCC_DEV0_EPF5_STRAP3__STRAP_CLK_PM_EN_DEV0_F5__SHIFT 0x1d
+#define RCC_DEV0_EPF5_STRAP3__STRAP_RTR_EN_DEV0_F5__SHIFT 0x1f
+#define RCC_DEV0_EPF5_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F5_MASK 0x00010000L
+#define RCC_DEV0_EPF5_STRAP3__STRAP_PWR_EN_DEV0_F5_MASK 0x00020000L
+#define RCC_DEV0_EPF5_STRAP3__STRAP_PMC_DSI_DEV0_F5_MASK 0x01000000L
+#define RCC_DEV0_EPF5_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F5_MASK 0x04000000L
+#define RCC_DEV0_EPF5_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F5_MASK 0x08000000L
+#define RCC_DEV0_EPF5_STRAP3__STRAP_CLK_PM_EN_DEV0_F5_MASK 0x20000000L
+#define RCC_DEV0_EPF5_STRAP3__STRAP_RTR_EN_DEV0_F5_MASK 0x80000000L
+//RCC_DEV0_EPF5_STRAP4
+#define RCC_DEV0_EPF5_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F5__SHIFT 0x1f
+#define RCC_DEV0_EPF5_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F5_MASK 0x80000000L
+//RCC_DEV0_EPF5_STRAP5
+#define RCC_DEV0_EPF5_STRAP5__STRAP_AUX_CURRENT_DEV0_F5__SHIFT 0x1b
+#define RCC_DEV0_EPF5_STRAP5__STRAP_AUX_CURRENT_DEV0_F5_MASK 0x38000000L
+//RCC_DEV0_EPF5_STRAP6
+//RCC_DEV0_EPF5_STRAP7
+//RCC_DEV0_EPF5_STRAP13
+//RCC_DEV0_EPF5_STRAP14
+//RCC_DEV0_EPF6_STRAP0
+#define RCC_DEV0_EPF6_STRAP0__STRAP_FUNC_EN_DEV0_F6__SHIFT 0x1c
+#define RCC_DEV0_EPF6_STRAP0__STRAP_D1_SUPPORT_DEV0_F6__SHIFT 0x1e
+#define RCC_DEV0_EPF6_STRAP0__STRAP_D2_SUPPORT_DEV0_F6__SHIFT 0x1f
+#define RCC_DEV0_EPF6_STRAP0__STRAP_FUNC_EN_DEV0_F6_MASK 0x10000000L
+#define RCC_DEV0_EPF6_STRAP0__STRAP_D1_SUPPORT_DEV0_F6_MASK 0x40000000L
+#define RCC_DEV0_EPF6_STRAP0__STRAP_D2_SUPPORT_DEV0_F6_MASK 0x80000000L
+//RCC_DEV0_EPF6_STRAP2
+#define RCC_DEV0_EPF6_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F6__SHIFT 0x9
+#define RCC_DEV0_EPF6_STRAP2__STRAP_AER_EN_DEV0_F6__SHIFT 0x10
+#define RCC_DEV0_EPF6_STRAP2__STRAP_ACS_EN_DEV0_F6__SHIFT 0x11
+#define RCC_DEV0_EPF6_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F6__SHIFT 0x14
+#define RCC_DEV0_EPF6_STRAP2__STRAP_DPA_EN_DEV0_F6__SHIFT 0x15
+#define RCC_DEV0_EPF6_STRAP2__STRAP_PASID_EN_DEV0_F6__SHIFT 0x1c
+#define RCC_DEV0_EPF6_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F6__SHIFT 0x1d
+#define RCC_DEV0_EPF6_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F6__SHIFT 0x1e
+#define RCC_DEV0_EPF6_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F6__SHIFT 0x1f
+#define RCC_DEV0_EPF6_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F6_MASK 0x00003E00L
+#define RCC_DEV0_EPF6_STRAP2__STRAP_AER_EN_DEV0_F6_MASK 0x00010000L
+#define RCC_DEV0_EPF6_STRAP2__STRAP_ACS_EN_DEV0_F6_MASK 0x00020000L
+#define RCC_DEV0_EPF6_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F6_MASK 0x00100000L
+#define RCC_DEV0_EPF6_STRAP2__STRAP_DPA_EN_DEV0_F6_MASK 0x00200000L
+#define RCC_DEV0_EPF6_STRAP2__STRAP_PASID_EN_DEV0_F6_MASK 0x10000000L
+#define RCC_DEV0_EPF6_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F6_MASK 0x20000000L
+#define RCC_DEV0_EPF6_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F6_MASK 0x40000000L
+#define RCC_DEV0_EPF6_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F6_MASK 0x80000000L
+//RCC_DEV0_EPF6_STRAP3
+#define RCC_DEV0_EPF6_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F6__SHIFT 0x10
+#define RCC_DEV0_EPF6_STRAP3__STRAP_PWR_EN_DEV0_F6__SHIFT 0x11
+#define RCC_DEV0_EPF6_STRAP3__STRAP_PMC_DSI_DEV0_F6__SHIFT 0x18
+#define RCC_DEV0_EPF6_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F6__SHIFT 0x1a
+#define RCC_DEV0_EPF6_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F6__SHIFT 0x1b
+#define RCC_DEV0_EPF6_STRAP3__STRAP_CLK_PM_EN_DEV0_F6__SHIFT 0x1d
+#define RCC_DEV0_EPF6_STRAP3__STRAP_RTR_EN_DEV0_F6__SHIFT 0x1f
+#define RCC_DEV0_EPF6_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F6_MASK 0x00010000L
+#define RCC_DEV0_EPF6_STRAP3__STRAP_PWR_EN_DEV0_F6_MASK 0x00020000L
+#define RCC_DEV0_EPF6_STRAP3__STRAP_PMC_DSI_DEV0_F6_MASK 0x01000000L
+#define RCC_DEV0_EPF6_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F6_MASK 0x04000000L
+#define RCC_DEV0_EPF6_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F6_MASK 0x08000000L
+#define RCC_DEV0_EPF6_STRAP3__STRAP_CLK_PM_EN_DEV0_F6_MASK 0x20000000L
+#define RCC_DEV0_EPF6_STRAP3__STRAP_RTR_EN_DEV0_F6_MASK 0x80000000L
+//RCC_DEV0_EPF6_STRAP4
+#define RCC_DEV0_EPF6_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F6__SHIFT 0x1f
+#define RCC_DEV0_EPF6_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F6_MASK 0x80000000L
+//RCC_DEV0_EPF6_STRAP5
+#define RCC_DEV0_EPF6_STRAP5__STRAP_AUX_CURRENT_DEV0_F6__SHIFT 0x1b
+#define RCC_DEV0_EPF6_STRAP5__STRAP_AUX_CURRENT_DEV0_F6_MASK 0x38000000L
+
+
+// addressBlock: nbif_bif_rst_bif_rst_regblk
+//HARD_RST_CTRL
+#define HARD_RST_CTRL__DSPT_CFG_RST_EN__SHIFT 0x0
+#define HARD_RST_CTRL__DSPT_CFG_STICKY_RST_EN__SHIFT 0x1
+#define HARD_RST_CTRL__DSPT_PRV_RST_EN__SHIFT 0x2
+#define HARD_RST_CTRL__DSPT_PRV_STICKY_RST_EN__SHIFT 0x3
+#define HARD_RST_CTRL__EP_CFG_RST_EN__SHIFT 0x4
+#define HARD_RST_CTRL__EP_CFG_STICKY_RST_EN__SHIFT 0x5
+#define HARD_RST_CTRL__EP_PRV_RST_EN__SHIFT 0x6
+#define HARD_RST_CTRL__EP_PRV_STICKY_RST_EN__SHIFT 0x7
+#define HARD_RST_CTRL__SDP_PORT_RESET_EN__SHIFT 0x9
+#define HARD_RST_CTRL__SION_AON_RESET_EN__SHIFT 0xa
+#define HARD_RST_CTRL__STRAP_RST_EN__SHIFT 0x17
+#define HARD_RST_CTRL__SWUS_SHADOW_RST_EN__SHIFT 0x1c
+#define HARD_RST_CTRL__CORE_STICKY_RST_EN__SHIFT 0x1d
+#define HARD_RST_CTRL__RELOAD_STRAP_EN__SHIFT 0x1e
+#define HARD_RST_CTRL__CORE_RST_EN__SHIFT 0x1f
+#define HARD_RST_CTRL__DSPT_CFG_RST_EN_MASK 0x00000001L
+#define HARD_RST_CTRL__DSPT_CFG_STICKY_RST_EN_MASK 0x00000002L
+#define HARD_RST_CTRL__DSPT_PRV_RST_EN_MASK 0x00000004L
+#define HARD_RST_CTRL__DSPT_PRV_STICKY_RST_EN_MASK 0x00000008L
+#define HARD_RST_CTRL__EP_CFG_RST_EN_MASK 0x00000010L
+#define HARD_RST_CTRL__EP_CFG_STICKY_RST_EN_MASK 0x00000020L
+#define HARD_RST_CTRL__EP_PRV_RST_EN_MASK 0x00000040L
+#define HARD_RST_CTRL__EP_PRV_STICKY_RST_EN_MASK 0x00000080L
+#define HARD_RST_CTRL__SDP_PORT_RESET_EN_MASK 0x00000200L
+#define HARD_RST_CTRL__SION_AON_RESET_EN_MASK 0x00000400L
+#define HARD_RST_CTRL__STRAP_RST_EN_MASK 0x00800000L
+#define HARD_RST_CTRL__SWUS_SHADOW_RST_EN_MASK 0x10000000L
+#define HARD_RST_CTRL__CORE_STICKY_RST_EN_MASK 0x20000000L
+#define HARD_RST_CTRL__RELOAD_STRAP_EN_MASK 0x40000000L
+#define HARD_RST_CTRL__CORE_RST_EN_MASK 0x80000000L
+//RSMU_SOFT_RST_CTRL
+#define RSMU_SOFT_RST_CTRL__DSPT_CFG_RST_EN__SHIFT 0x0
+#define RSMU_SOFT_RST_CTRL__DSPT_CFG_STICKY_RST_EN__SHIFT 0x1
+#define RSMU_SOFT_RST_CTRL__DSPT_PRV_RST_EN__SHIFT 0x2
+#define RSMU_SOFT_RST_CTRL__DSPT_PRV_STICKY_RST_EN__SHIFT 0x3
+#define RSMU_SOFT_RST_CTRL__EP_CFG_RST_EN__SHIFT 0x4
+#define RSMU_SOFT_RST_CTRL__EP_CFG_STICKY_RST_EN__SHIFT 0x5
+#define RSMU_SOFT_RST_CTRL__EP_PRV_RST_EN__SHIFT 0x6
+#define RSMU_SOFT_RST_CTRL__EP_PRV_STICKY_RST_EN__SHIFT 0x7
+#define RSMU_SOFT_RST_CTRL__SDP_PORT_RESET_EN__SHIFT 0x9
+#define RSMU_SOFT_RST_CTRL__SION_AON_RESET_EN__SHIFT 0xa
+#define RSMU_SOFT_RST_CTRL__STRAP_RST_EN__SHIFT 0x17
+#define RSMU_SOFT_RST_CTRL__SWUS_SHADOW_RST_EN__SHIFT 0x1c
+#define RSMU_SOFT_RST_CTRL__CORE_STICKY_RST_EN__SHIFT 0x1d
+#define RSMU_SOFT_RST_CTRL__RELOAD_STRAP_EN__SHIFT 0x1e
+#define RSMU_SOFT_RST_CTRL__CORE_RST_EN__SHIFT 0x1f
+#define RSMU_SOFT_RST_CTRL__DSPT_CFG_RST_EN_MASK 0x00000001L
+#define RSMU_SOFT_RST_CTRL__DSPT_CFG_STICKY_RST_EN_MASK 0x00000002L
+#define RSMU_SOFT_RST_CTRL__DSPT_PRV_RST_EN_MASK 0x00000004L
+#define RSMU_SOFT_RST_CTRL__DSPT_PRV_STICKY_RST_EN_MASK 0x00000008L
+#define RSMU_SOFT_RST_CTRL__EP_CFG_RST_EN_MASK 0x00000010L
+#define RSMU_SOFT_RST_CTRL__EP_CFG_STICKY_RST_EN_MASK 0x00000020L
+#define RSMU_SOFT_RST_CTRL__EP_PRV_RST_EN_MASK 0x00000040L
+#define RSMU_SOFT_RST_CTRL__EP_PRV_STICKY_RST_EN_MASK 0x00000080L
+#define RSMU_SOFT_RST_CTRL__SDP_PORT_RESET_EN_MASK 0x00000200L
+#define RSMU_SOFT_RST_CTRL__SION_AON_RESET_EN_MASK 0x00000400L
+#define RSMU_SOFT_RST_CTRL__STRAP_RST_EN_MASK 0x00800000L
+#define RSMU_SOFT_RST_CTRL__SWUS_SHADOW_RST_EN_MASK 0x10000000L
+#define RSMU_SOFT_RST_CTRL__CORE_STICKY_RST_EN_MASK 0x20000000L
+#define RSMU_SOFT_RST_CTRL__RELOAD_STRAP_EN_MASK 0x40000000L
+#define RSMU_SOFT_RST_CTRL__CORE_RST_EN_MASK 0x80000000L
+//SELF_SOFT_RST
+#define SELF_SOFT_RST__DSPT0_CFG_RST__SHIFT 0x0
+#define SELF_SOFT_RST__DSPT0_CFG_STICKY_RST__SHIFT 0x1
+#define SELF_SOFT_RST__DSPT0_PRV_RST__SHIFT 0x2
+#define SELF_SOFT_RST__DSPT0_PRV_STICKY_RST__SHIFT 0x3
+#define SELF_SOFT_RST__EP0_CFG_RST__SHIFT 0x4
+#define SELF_SOFT_RST__EP0_CFG_STICKY_RST__SHIFT 0x5
+#define SELF_SOFT_RST__EP0_PRV_RST__SHIFT 0x6
+#define SELF_SOFT_RST__EP0_PRV_STICKY_RST__SHIFT 0x7
+#define SELF_SOFT_RST__HRPU_SDP_PORT_RST__SHIFT 0x18
+#define SELF_SOFT_RST__GSID_SDP_PORT_RST__SHIFT 0x19
+#define SELF_SOFT_RST__GMIU_SDP_PORT_RST__SHIFT 0x1a
+#define SELF_SOFT_RST__GMID_SDP_PORT_RST__SHIFT 0x1b
+#define SELF_SOFT_RST__SWUS_SHADOW_RST__SHIFT 0x1c
+#define SELF_SOFT_RST__CORE_STICKY_RST__SHIFT 0x1d
+#define SELF_SOFT_RST__RELOAD_STRAP__SHIFT 0x1e
+#define SELF_SOFT_RST__CORE_RST__SHIFT 0x1f
+#define SELF_SOFT_RST__DSPT0_CFG_RST_MASK 0x00000001L
+#define SELF_SOFT_RST__DSPT0_CFG_STICKY_RST_MASK 0x00000002L
+#define SELF_SOFT_RST__DSPT0_PRV_RST_MASK 0x00000004L
+#define SELF_SOFT_RST__DSPT0_PRV_STICKY_RST_MASK 0x00000008L
+#define SELF_SOFT_RST__EP0_CFG_RST_MASK 0x00000010L
+#define SELF_SOFT_RST__EP0_CFG_STICKY_RST_MASK 0x00000020L
+#define SELF_SOFT_RST__EP0_PRV_RST_MASK 0x00000040L
+#define SELF_SOFT_RST__EP0_PRV_STICKY_RST_MASK 0x00000080L
+#define SELF_SOFT_RST__HRPU_SDP_PORT_RST_MASK 0x01000000L
+#define SELF_SOFT_RST__GSID_SDP_PORT_RST_MASK 0x02000000L
+#define SELF_SOFT_RST__GMIU_SDP_PORT_RST_MASK 0x04000000L
+#define SELF_SOFT_RST__GMID_SDP_PORT_RST_MASK 0x08000000L
+#define SELF_SOFT_RST__SWUS_SHADOW_RST_MASK 0x10000000L
+#define SELF_SOFT_RST__CORE_STICKY_RST_MASK 0x20000000L
+#define SELF_SOFT_RST__RELOAD_STRAP_MASK 0x40000000L
+#define SELF_SOFT_RST__CORE_RST_MASK 0x80000000L
+//BIF_GFX_DRV_VPU_RST
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_RST__SHIFT 0x0
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_FLR_EXC_RST__SHIFT 0x1
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_STICKY_RST__SHIFT 0x2
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_RST__SHIFT 0x3
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_STICKY_RST__SHIFT 0x4
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_RST__SHIFT 0x5
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_STICKY_RST__SHIFT 0x6
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_PRV_RST__SHIFT 0x7
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_RST_MASK 0x00000001L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_FLR_EXC_RST_MASK 0x00000002L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_STICKY_RST_MASK 0x00000004L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_RST_MASK 0x00000008L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_STICKY_RST_MASK 0x00000010L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_RST_MASK 0x00000020L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_STICKY_RST_MASK 0x00000040L
+#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_PRV_RST_MASK 0x00000080L
+//BIF_RST_MISC_CTRL
+#define BIF_RST_MISC_CTRL__ERRSTATUS_KEPT_IN_PERSTB__SHIFT 0x0
+#define BIF_RST_MISC_CTRL__DRV_RST_MODE__SHIFT 0x2
+#define BIF_RST_MISC_CTRL__DRV_RST_CFG_MASK__SHIFT 0x4
+#define BIF_RST_MISC_CTRL__DRV_RST_BITS_AUTO_CLEAR__SHIFT 0x5
+#define BIF_RST_MISC_CTRL__FLR_RST_BIT_AUTO_CLEAR__SHIFT 0x6
+#define BIF_RST_MISC_CTRL__STRAP_EP_LNK_RST_IOV_EN__SHIFT 0x8
+#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_MODE__SHIFT 0x9
+#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_TIMEOUT__SHIFT 0xa
+#define BIF_RST_MISC_CTRL__LNK_RST_TIMER_SEL__SHIFT 0xd
+#define BIF_RST_MISC_CTRL__LNK_RST_TIMER2_SEL__SHIFT 0xf
+#define BIF_RST_MISC_CTRL__SRIOV_SAVE_VFS_ON_VFENABLE_CLR__SHIFT 0x11
+#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_DIS__SHIFT 0x17
+#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_RSPSTS__SHIFT 0x18
+#define BIF_RST_MISC_CTRL__HARD_RST_CTRL_SWUS_SHADOW_PRV_RST_EN__SHIFT 0x1a
+#define BIF_RST_MISC_CTRL__RSMU_SOFT_RST_CTRL_SWUS_SHADOW_PRV_RST_EN__SHIFT 0x1b
+#define BIF_RST_MISC_CTRL__ERRSTATUS_KEPT_IN_PERSTB_MASK 0x00000001L
+#define BIF_RST_MISC_CTRL__DRV_RST_MODE_MASK 0x0000000CL
+#define BIF_RST_MISC_CTRL__DRV_RST_CFG_MASK_MASK 0x00000010L
+#define BIF_RST_MISC_CTRL__DRV_RST_BITS_AUTO_CLEAR_MASK 0x00000020L
+#define BIF_RST_MISC_CTRL__FLR_RST_BIT_AUTO_CLEAR_MASK 0x00000040L
+#define BIF_RST_MISC_CTRL__STRAP_EP_LNK_RST_IOV_EN_MASK 0x00000100L
+#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_MODE_MASK 0x00000200L
+#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_TIMEOUT_MASK 0x00001C00L
+#define BIF_RST_MISC_CTRL__LNK_RST_TIMER_SEL_MASK 0x00006000L
+#define BIF_RST_MISC_CTRL__LNK_RST_TIMER2_SEL_MASK 0x00018000L
+#define BIF_RST_MISC_CTRL__SRIOV_SAVE_VFS_ON_VFENABLE_CLR_MASK 0x000E0000L
+#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_DIS_MASK 0x00800000L
+#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_RSPSTS_MASK 0x03000000L
+#define BIF_RST_MISC_CTRL__HARD_RST_CTRL_SWUS_SHADOW_PRV_RST_EN_MASK 0x04000000L
+#define BIF_RST_MISC_CTRL__RSMU_SOFT_RST_CTRL_SWUS_SHADOW_PRV_RST_EN_MASK 0x08000000L
+//BIF_RST_MISC_CTRL2
+#define BIF_RST_MISC_CTRL2__SWUS_LNK_RST_PROTECT__SHIFT 0x0
+#define BIF_RST_MISC_CTRL2__SWDS_LNK_RST_PROTECT__SHIFT 0x1
+#define BIF_RST_MISC_CTRL2__ENDP0_LNK_RST_PROTECT__SHIFT 0x2
+#define BIF_RST_MISC_CTRL2__ALL_RST_PROTECT__SHIFT 0xf
+#define BIF_RST_MISC_CTRL2__SWUS_LNK_RST_TRANS_IDLE__SHIFT 0x10
+#define BIF_RST_MISC_CTRL2__SWDS_LNK_RST_TRANS_IDLE__SHIFT 0x11
+#define BIF_RST_MISC_CTRL2__ENDP0_LNK_RST_TRANS_IDLE__SHIFT 0x12
+#define BIF_RST_MISC_CTRL2__ALL_RST_PROTECT_DIS__SHIFT 0x1e
+#define BIF_RST_MISC_CTRL2__ALL_RST_TRANS_IDLE__SHIFT 0x1f
+#define BIF_RST_MISC_CTRL2__SWUS_LNK_RST_PROTECT_MASK 0x00000001L
+#define BIF_RST_MISC_CTRL2__SWDS_LNK_RST_PROTECT_MASK 0x00000002L
+#define BIF_RST_MISC_CTRL2__ENDP0_LNK_RST_PROTECT_MASK 0x00000004L
+#define BIF_RST_MISC_CTRL2__ALL_RST_PROTECT_MASK 0x00008000L
+#define BIF_RST_MISC_CTRL2__SWUS_LNK_RST_TRANS_IDLE_MASK 0x00010000L
+#define BIF_RST_MISC_CTRL2__SWDS_LNK_RST_TRANS_IDLE_MASK 0x00020000L
+#define BIF_RST_MISC_CTRL2__ENDP0_LNK_RST_TRANS_IDLE_MASK 0x00040000L
+#define BIF_RST_MISC_CTRL2__ALL_RST_PROTECT_DIS_MASK 0x40000000L
+#define BIF_RST_MISC_CTRL2__ALL_RST_TRANS_IDLE_MASK 0x80000000L
+//BIF_RST_MISC_CTRL3
+#define BIF_RST_MISC_CTRL3__TIMER_SCALE__SHIFT 0x0
+#define BIF_RST_MISC_CTRL3__PME_TURNOFF_TIMEOUT__SHIFT 0x4
+#define BIF_RST_MISC_CTRL3__PME_TURNOFF_MODE__SHIFT 0x6
+#define BIF_RST_MISC_CTRL3__RELOAD_STRAP_DELAY_HARD__SHIFT 0x7
+#define BIF_RST_MISC_CTRL3__RELOAD_STRAP_DELAY_SOFT__SHIFT 0xa
+#define BIF_RST_MISC_CTRL3__RELOAD_STRAP_DELAY_SELF__SHIFT 0xd
+#define BIF_RST_MISC_CTRL3__RSMU_SOFT_RST_CYCLE__SHIFT 0x10
+#define BIF_RST_MISC_CTRL3__TIMER_SCALE_MASK 0x0000000FL
+#define BIF_RST_MISC_CTRL3__PME_TURNOFF_TIMEOUT_MASK 0x00000030L
+#define BIF_RST_MISC_CTRL3__PME_TURNOFF_MODE_MASK 0x00000040L
+#define BIF_RST_MISC_CTRL3__RELOAD_STRAP_DELAY_HARD_MASK 0x00000380L
+#define BIF_RST_MISC_CTRL3__RELOAD_STRAP_DELAY_SOFT_MASK 0x00001C00L
+#define BIF_RST_MISC_CTRL3__RELOAD_STRAP_DELAY_SELF_MASK 0x0000E000L
+#define BIF_RST_MISC_CTRL3__RSMU_SOFT_RST_CYCLE_MASK 0x00FF0000L
+//DEV0_PF0_FLR_RST_CTRL
+#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_EN__SHIFT 0x5
+#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_STICKY_EN__SHIFT 0x6
+#define DEV0_PF0_FLR_RST_CTRL__VF_PRV_EN__SHIFT 0x7
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_EN__SHIFT 0x8
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_FLR_EXC_EN__SHIFT 0x9
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_STICKY_EN__SHIFT 0xa
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_EN__SHIFT 0xb
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_STICKY_EN__SHIFT 0xc
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_EN__SHIFT 0xd
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_STICKY_EN__SHIFT 0xe
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_PRV_EN__SHIFT 0xf
+#define DEV0_PF0_FLR_RST_CTRL__FLR_TWICE_EN__SHIFT 0x10
+#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
+#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
+#define DEV0_PF0_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
+#define DEV0_PF0_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN__SHIFT 0x1b
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PFCOPY_PRV_EN__SHIFT 0x1f
+#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_EN_MASK 0x00000020L
+#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_STICKY_EN_MASK 0x00000040L
+#define DEV0_PF0_FLR_RST_CTRL__VF_PRV_EN_MASK 0x00000080L
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_EN_MASK 0x00000100L
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_FLR_EXC_EN_MASK 0x00000200L
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_STICKY_EN_MASK 0x00000400L
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_EN_MASK 0x00000800L
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_STICKY_EN_MASK 0x00001000L
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_EN_MASK 0x00002000L
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_STICKY_EN_MASK 0x00004000L
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_PRV_EN_MASK 0x00008000L
+#define DEV0_PF0_FLR_RST_CTRL__FLR_TWICE_EN_MASK 0x00010000L
+#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
+#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
+#define DEV0_PF0_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
+#define DEV0_PF0_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
+#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN_MASK 0x08000000L
+#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PFCOPY_PRV_EN_MASK 0x80000000L
+//DEV0_PF1_FLR_RST_CTRL
+#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
+#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
+#define DEV0_PF1_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
+#define DEV0_PF1_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
+#define DEV0_PF1_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN__SHIFT 0x1b
+#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
+#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
+#define DEV0_PF1_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
+#define DEV0_PF1_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
+#define DEV0_PF1_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN_MASK 0x08000000L
+//DEV0_PF2_FLR_RST_CTRL
+#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF2_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF2_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF2_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
+#define DEV0_PF2_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
+#define DEV0_PF2_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
+#define DEV0_PF2_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
+#define DEV0_PF2_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN__SHIFT 0x1b
+#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF2_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF2_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+#define DEV0_PF2_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
+#define DEV0_PF2_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
+#define DEV0_PF2_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
+#define DEV0_PF2_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
+#define DEV0_PF2_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN_MASK 0x08000000L
+//DEV0_PF3_FLR_RST_CTRL
+#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF3_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF3_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF3_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
+#define DEV0_PF3_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
+#define DEV0_PF3_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
+#define DEV0_PF3_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
+#define DEV0_PF3_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN__SHIFT 0x1b
+#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF3_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF3_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+#define DEV0_PF3_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
+#define DEV0_PF3_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
+#define DEV0_PF3_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
+#define DEV0_PF3_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
+#define DEV0_PF3_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN_MASK 0x08000000L
+//DEV0_PF4_FLR_RST_CTRL
+#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF4_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF4_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF4_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
+#define DEV0_PF4_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
+#define DEV0_PF4_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
+#define DEV0_PF4_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
+#define DEV0_PF4_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN__SHIFT 0x1b
+#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF4_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF4_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+#define DEV0_PF4_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
+#define DEV0_PF4_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
+#define DEV0_PF4_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
+#define DEV0_PF4_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
+#define DEV0_PF4_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN_MASK 0x08000000L
+//DEV0_PF5_FLR_RST_CTRL
+#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF5_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF5_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF5_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
+#define DEV0_PF5_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
+#define DEV0_PF5_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
+#define DEV0_PF5_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
+#define DEV0_PF5_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN__SHIFT 0x1b
+#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF5_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF5_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+#define DEV0_PF5_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
+#define DEV0_PF5_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
+#define DEV0_PF5_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
+#define DEV0_PF5_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
+#define DEV0_PF5_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN_MASK 0x08000000L
+//DEV0_PF6_FLR_RST_CTRL
+#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF6_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF6_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF6_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11
+#define DEV0_PF6_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12
+#define DEV0_PF6_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17
+#define DEV0_PF6_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19
+#define DEV0_PF6_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN__SHIFT 0x1b
+#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF6_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF6_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+#define DEV0_PF6_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L
+#define DEV0_PF6_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L
+#define DEV0_PF6_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L
+#define DEV0_PF6_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L
+#define DEV0_PF6_FLR_RST_CTRL__VF_VF_CFG_FLR_EXC_EN_MASK 0x08000000L
+//BIF_INST_RESET_INTR_STS
+#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_INTR_STS__SHIFT 0x0
+#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_CFG_ONLY_INTR_STS__SHIFT 0x1
+#define BIF_INST_RESET_INTR_STS__DRV_RESET_M0_INTR_STS__SHIFT 0x2
+#define BIF_INST_RESET_INTR_STS__DRV_RESET_M1_INTR_STS__SHIFT 0x3
+#define BIF_INST_RESET_INTR_STS__DRV_RESET_M2_INTR_STS__SHIFT 0x4
+#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_INTR_STS_MASK 0x00000001L
+#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_CFG_ONLY_INTR_STS_MASK 0x00000002L
+#define BIF_INST_RESET_INTR_STS__DRV_RESET_M0_INTR_STS_MASK 0x00000004L
+#define BIF_INST_RESET_INTR_STS__DRV_RESET_M1_INTR_STS_MASK 0x00000008L
+#define BIF_INST_RESET_INTR_STS__DRV_RESET_M2_INTR_STS_MASK 0x00000010L
+//BIF_PF_FLR_INTR_STS
+#define BIF_PF_FLR_INTR_STS__DEV0_PF0_FLR_INTR_STS__SHIFT 0x0
+#define BIF_PF_FLR_INTR_STS__DEV0_PF1_FLR_INTR_STS__SHIFT 0x1
+#define BIF_PF_FLR_INTR_STS__DEV0_PF2_FLR_INTR_STS__SHIFT 0x2
+#define BIF_PF_FLR_INTR_STS__DEV0_PF3_FLR_INTR_STS__SHIFT 0x3
+#define BIF_PF_FLR_INTR_STS__DEV0_PF4_FLR_INTR_STS__SHIFT 0x4
+#define BIF_PF_FLR_INTR_STS__DEV0_PF5_FLR_INTR_STS__SHIFT 0x5
+#define BIF_PF_FLR_INTR_STS__DEV0_PF6_FLR_INTR_STS__SHIFT 0x6
+#define BIF_PF_FLR_INTR_STS__DEV0_PF0_FLR_INTR_STS_MASK 0x00000001L
+#define BIF_PF_FLR_INTR_STS__DEV0_PF1_FLR_INTR_STS_MASK 0x00000002L
+#define BIF_PF_FLR_INTR_STS__DEV0_PF2_FLR_INTR_STS_MASK 0x00000004L
+#define BIF_PF_FLR_INTR_STS__DEV0_PF3_FLR_INTR_STS_MASK 0x00000008L
+#define BIF_PF_FLR_INTR_STS__DEV0_PF4_FLR_INTR_STS_MASK 0x00000010L
+#define BIF_PF_FLR_INTR_STS__DEV0_PF5_FLR_INTR_STS_MASK 0x00000020L
+#define BIF_PF_FLR_INTR_STS__DEV0_PF6_FLR_INTR_STS_MASK 0x00000040L
+//BIF_D3HOTD0_INTR_STS
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF0_D3HOTD0_INTR_STS__SHIFT 0x0
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF1_D3HOTD0_INTR_STS__SHIFT 0x1
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF2_D3HOTD0_INTR_STS__SHIFT 0x2
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF3_D3HOTD0_INTR_STS__SHIFT 0x3
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF4_D3HOTD0_INTR_STS__SHIFT 0x4
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF5_D3HOTD0_INTR_STS__SHIFT 0x5
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF6_D3HOTD0_INTR_STS__SHIFT 0x6
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF0_D3HOTD0_INTR_STS_MASK 0x00000001L
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF1_D3HOTD0_INTR_STS_MASK 0x00000002L
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF2_D3HOTD0_INTR_STS_MASK 0x00000004L
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF3_D3HOTD0_INTR_STS_MASK 0x00000008L
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF4_D3HOTD0_INTR_STS_MASK 0x00000010L
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF5_D3HOTD0_INTR_STS_MASK 0x00000020L
+#define BIF_D3HOTD0_INTR_STS__DEV0_PF6_D3HOTD0_INTR_STS_MASK 0x00000040L
+//BIF_POWER_INTR_STS
+#define BIF_POWER_INTR_STS__DEV0_PME_TURN_OFF_INTR_STS__SHIFT 0x0
+#define BIF_POWER_INTR_STS__PORT0_DSTATE_INTR_STS__SHIFT 0x10
+#define BIF_POWER_INTR_STS__DEV0_PME_TURN_OFF_INTR_STS_MASK 0x00000001L
+#define BIF_POWER_INTR_STS__PORT0_DSTATE_INTR_STS_MASK 0x00010000L
+//BIF_PF_DSTATE_INTR_STS
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF0_DSTATE_INTR_STS__SHIFT 0x0
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF1_DSTATE_INTR_STS__SHIFT 0x1
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF2_DSTATE_INTR_STS__SHIFT 0x2
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF3_DSTATE_INTR_STS__SHIFT 0x3
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF4_DSTATE_INTR_STS__SHIFT 0x4
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF5_DSTATE_INTR_STS__SHIFT 0x5
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF6_DSTATE_INTR_STS__SHIFT 0x6
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF7_DSTATE_INTR_STS__SHIFT 0x7
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF0_DSTATE_INTR_STS_MASK 0x00000001L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF1_DSTATE_INTR_STS_MASK 0x00000002L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF2_DSTATE_INTR_STS_MASK 0x00000004L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF3_DSTATE_INTR_STS_MASK 0x00000008L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF4_DSTATE_INTR_STS_MASK 0x00000010L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF5_DSTATE_INTR_STS_MASK 0x00000020L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF6_DSTATE_INTR_STS_MASK 0x00000040L
+#define BIF_PF_DSTATE_INTR_STS__DEV0_PF7_DSTATE_INTR_STS_MASK 0x00000080L
+//SELF_SOFT_RST_2
+#define SELF_SOFT_RST_2__DSPT3_CFG_RST__SHIFT 0x0
+#define SELF_SOFT_RST_2__DSPT3_CFG_STICKY_RST__SHIFT 0x1
+#define SELF_SOFT_RST_2__DSPT3_PRV_RST__SHIFT 0x2
+#define SELF_SOFT_RST_2__DSPT3_PRV_STICKY_RST__SHIFT 0x3
+#define SELF_SOFT_RST_2__EP3_CFG_RST__SHIFT 0x4
+#define SELF_SOFT_RST_2__EP3_CFG_STICKY_RST__SHIFT 0x5
+#define SELF_SOFT_RST_2__EP3_PRV_RST__SHIFT 0x6
+#define SELF_SOFT_RST_2__EP3_PRV_STICKY_RST__SHIFT 0x7
+#define SELF_SOFT_RST_2__GMISP0_SDP_PORT_RST__SHIFT 0x18
+#define SELF_SOFT_RST_2__STRAP_RST__SHIFT 0x19
+#define SELF_SOFT_RST_2__SWUS_SHADOW_PRV_RST__SHIFT 0x1a
+#define SELF_SOFT_RST_2__NBIF_S5_RST__SHIFT 0x1e
+#define SELF_SOFT_RST_2__NBIF_S5_CDC_RST__SHIFT 0x1f
+#define SELF_SOFT_RST_2__DSPT3_CFG_RST_MASK 0x00000001L
+#define SELF_SOFT_RST_2__DSPT3_CFG_STICKY_RST_MASK 0x00000002L
+#define SELF_SOFT_RST_2__DSPT3_PRV_RST_MASK 0x00000004L
+#define SELF_SOFT_RST_2__DSPT3_PRV_STICKY_RST_MASK 0x00000008L
+#define SELF_SOFT_RST_2__EP3_CFG_RST_MASK 0x00000010L
+#define SELF_SOFT_RST_2__EP3_CFG_STICKY_RST_MASK 0x00000020L
+#define SELF_SOFT_RST_2__EP3_PRV_RST_MASK 0x00000040L
+#define SELF_SOFT_RST_2__EP3_PRV_STICKY_RST_MASK 0x00000080L
+#define SELF_SOFT_RST_2__GMISP0_SDP_PORT_RST_MASK 0x01000000L
+#define SELF_SOFT_RST_2__STRAP_RST_MASK 0x02000000L
+#define SELF_SOFT_RST_2__SWUS_SHADOW_PRV_RST_MASK 0x04000000L
+#define SELF_SOFT_RST_2__NBIF_S5_RST_MASK 0x40000000L
+#define SELF_SOFT_RST_2__NBIF_S5_CDC_RST_MASK 0x80000000L
+//BIF_INST_RESET_INTR_MASK
+#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_INTR_MASK__SHIFT 0x0
+#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_CFG_ONLY_INTR_MASK__SHIFT 0x1
+#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M0_INTR_MASK__SHIFT 0x2
+#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M1_INTR_MASK__SHIFT 0x3
+#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M2_INTR_MASK__SHIFT 0x4
+#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_INTR_MASK_MASK 0x00000001L
+#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_CFG_ONLY_INTR_MASK_MASK 0x00000002L
+#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M0_INTR_MASK_MASK 0x00000004L
+#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M1_INTR_MASK_MASK 0x00000008L
+#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M2_INTR_MASK_MASK 0x00000010L
+//BIF_PF_FLR_INTR_MASK
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF0_FLR_INTR_MASK__SHIFT 0x0
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF1_FLR_INTR_MASK__SHIFT 0x1
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF2_FLR_INTR_MASK__SHIFT 0x2
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF3_FLR_INTR_MASK__SHIFT 0x3
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF4_FLR_INTR_MASK__SHIFT 0x4
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF5_FLR_INTR_MASK__SHIFT 0x5
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF6_FLR_INTR_MASK__SHIFT 0x6
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF0_FLR_INTR_MASK_MASK 0x00000001L
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF1_FLR_INTR_MASK_MASK 0x00000002L
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF2_FLR_INTR_MASK_MASK 0x00000004L
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF3_FLR_INTR_MASK_MASK 0x00000008L
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF4_FLR_INTR_MASK_MASK 0x00000010L
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF5_FLR_INTR_MASK_MASK 0x00000020L
+#define BIF_PF_FLR_INTR_MASK__DEV0_PF6_FLR_INTR_MASK_MASK 0x00000040L
+//BIF_D3HOTD0_INTR_MASK
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF0_D3HOTD0_INTR_MASK__SHIFT 0x0
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF1_D3HOTD0_INTR_MASK__SHIFT 0x1
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF2_D3HOTD0_INTR_MASK__SHIFT 0x2
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF3_D3HOTD0_INTR_MASK__SHIFT 0x3
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF4_D3HOTD0_INTR_MASK__SHIFT 0x4
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF5_D3HOTD0_INTR_MASK__SHIFT 0x5
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF6_D3HOTD0_INTR_MASK__SHIFT 0x6
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF0_D3HOTD0_INTR_MASK_MASK 0x00000001L
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF1_D3HOTD0_INTR_MASK_MASK 0x00000002L
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF2_D3HOTD0_INTR_MASK_MASK 0x00000004L
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF3_D3HOTD0_INTR_MASK_MASK 0x00000008L
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF4_D3HOTD0_INTR_MASK_MASK 0x00000010L
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF5_D3HOTD0_INTR_MASK_MASK 0x00000020L
+#define BIF_D3HOTD0_INTR_MASK__DEV0_PF6_D3HOTD0_INTR_MASK_MASK 0x00000040L
+//BIF_POWER_INTR_MASK
+#define BIF_POWER_INTR_MASK__DEV0_PME_TURN_OFF_INTR_MASK__SHIFT 0x0
+#define BIF_POWER_INTR_MASK__PORT0_DSTATE_INTR_MASK__SHIFT 0x10
+#define BIF_POWER_INTR_MASK__DEV0_PME_TURN_OFF_INTR_MASK_MASK 0x00000001L
+#define BIF_POWER_INTR_MASK__PORT0_DSTATE_INTR_MASK_MASK 0x00010000L
+//BIF_PF_DSTATE_INTR_MASK
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF0_DSTATE_INTR_MASK__SHIFT 0x0
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF1_DSTATE_INTR_MASK__SHIFT 0x1
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF2_DSTATE_INTR_MASK__SHIFT 0x2
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF3_DSTATE_INTR_MASK__SHIFT 0x3
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF4_DSTATE_INTR_MASK__SHIFT 0x4
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF5_DSTATE_INTR_MASK__SHIFT 0x5
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF6_DSTATE_INTR_MASK__SHIFT 0x6
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF7_DSTATE_INTR_MASK__SHIFT 0x7
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF0_DSTATE_INTR_MASK_MASK 0x00000001L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF1_DSTATE_INTR_MASK_MASK 0x00000002L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF2_DSTATE_INTR_MASK_MASK 0x00000004L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF3_DSTATE_INTR_MASK_MASK 0x00000008L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF4_DSTATE_INTR_MASK_MASK 0x00000010L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF5_DSTATE_INTR_MASK_MASK 0x00000020L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF6_DSTATE_INTR_MASK_MASK 0x00000040L
+#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF7_DSTATE_INTR_MASK_MASK 0x00000080L
+//BIF_PF_FLR_RST
+#define BIF_PF_FLR_RST__DEV0_PF0_FLR_RST__SHIFT 0x0
+#define BIF_PF_FLR_RST__DEV0_PF1_FLR_RST__SHIFT 0x1
+#define BIF_PF_FLR_RST__DEV0_PF2_FLR_RST__SHIFT 0x2
+#define BIF_PF_FLR_RST__DEV0_PF3_FLR_RST__SHIFT 0x3
+#define BIF_PF_FLR_RST__DEV0_PF4_FLR_RST__SHIFT 0x4
+#define BIF_PF_FLR_RST__DEV0_PF5_FLR_RST__SHIFT 0x5
+#define BIF_PF_FLR_RST__DEV0_PF6_FLR_RST__SHIFT 0x6
+#define BIF_PF_FLR_RST__DEV0_PF0_FLR_RST_MASK 0x00000001L
+#define BIF_PF_FLR_RST__DEV0_PF1_FLR_RST_MASK 0x00000002L
+#define BIF_PF_FLR_RST__DEV0_PF2_FLR_RST_MASK 0x00000004L
+#define BIF_PF_FLR_RST__DEV0_PF3_FLR_RST_MASK 0x00000008L
+#define BIF_PF_FLR_RST__DEV0_PF4_FLR_RST_MASK 0x00000010L
+#define BIF_PF_FLR_RST__DEV0_PF5_FLR_RST_MASK 0x00000020L
+#define BIF_PF_FLR_RST__DEV0_PF6_FLR_RST_MASK 0x00000040L
+//BIF_DEV0_PF0_DSTATE_VALUE
+#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_TGT_VALUE__SHIFT 0x0
+#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
+#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_ACK_VALUE__SHIFT 0x10
+#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_TGT_VALUE_MASK 0x00000003L
+#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
+#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_ACK_VALUE_MASK 0x00030000L
+//BIF_DEV0_PF1_DSTATE_VALUE
+#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_TGT_VALUE__SHIFT 0x0
+#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
+#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_ACK_VALUE__SHIFT 0x10
+#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_TGT_VALUE_MASK 0x00000003L
+#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
+#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_ACK_VALUE_MASK 0x00030000L
+//BIF_DEV0_PF2_DSTATE_VALUE
+#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_TGT_VALUE__SHIFT 0x0
+#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
+#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_ACK_VALUE__SHIFT 0x10
+#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_TGT_VALUE_MASK 0x00000003L
+#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
+#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_ACK_VALUE_MASK 0x00030000L
+//BIF_DEV0_PF3_DSTATE_VALUE
+#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_TGT_VALUE__SHIFT 0x0
+#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
+#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_ACK_VALUE__SHIFT 0x10
+#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_TGT_VALUE_MASK 0x00000003L
+#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
+#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_ACK_VALUE_MASK 0x00030000L
+//BIF_DEV0_PF4_DSTATE_VALUE
+#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_TGT_VALUE__SHIFT 0x0
+#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
+#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_ACK_VALUE__SHIFT 0x10
+#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_TGT_VALUE_MASK 0x00000003L
+#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
+#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_ACK_VALUE_MASK 0x00030000L
+//BIF_DEV0_PF5_DSTATE_VALUE
+#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_TGT_VALUE__SHIFT 0x0
+#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
+#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_ACK_VALUE__SHIFT 0x10
+#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_TGT_VALUE_MASK 0x00000003L
+#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
+#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_ACK_VALUE_MASK 0x00030000L
+//BIF_DEV0_PF6_DSTATE_VALUE
+#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_TGT_VALUE__SHIFT 0x0
+#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2
+#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_ACK_VALUE__SHIFT 0x10
+#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_TGT_VALUE_MASK 0x00000003L
+#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L
+#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_ACK_VALUE_MASK 0x00030000L
+//DEV0_PF0_D3HOTD0_RST_CTRL
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+//DEV0_PF1_D3HOTD0_RST_CTRL
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+//DEV0_PF2_D3HOTD0_RST_CTRL
+#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+//DEV0_PF3_D3HOTD0_RST_CTRL
+#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+//DEV0_PF4_D3HOTD0_RST_CTRL
+#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+//DEV0_PF5_D3HOTD0_RST_CTRL
+#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+//DEV0_PF6_D3HOTD0_RST_CTRL
+#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0
+#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1
+#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2
+#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3
+#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4
+#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L
+#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L
+#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L
+#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L
+#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L
+//BIF_PORT0_DSTATE_VALUE
+#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_TGT_VALUE__SHIFT 0x0
+#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_ACK_VALUE__SHIFT 0x10
+#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_TGT_VALUE_MASK 0x00000003L
+#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_ACK_VALUE_MASK 0x00030000L
+//BIF_USB_SHUB_RS_RESET_CNTL
+#define BIF_USB_SHUB_RS_RESET_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
+#define BIF_USB_SHUB_RS_RESET_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
+#define BIF_USB_SHUB_RS_RESET_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
+#define BIF_USB_SHUB_RS_RESET_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_misc_bif_misc_regblk
+//REGS_ROM_OFFSET_CTRL
+#define REGS_ROM_OFFSET_CTRL__ROM_OFFSET__SHIFT 0x0
+#define REGS_ROM_OFFSET_CTRL__ROM_OFFSET_MASK 0x7FL
+//NBIF_STRAP_BIOS_CNTL
+#define NBIF_STRAP_BIOS_CNTL__NBIF_STRAP_BIOS_EN__SHIFT 0x0
+#define NBIF_STRAP_BIOS_CNTL__NBIF_STRAP_PCIE_ID_BIOS_EN__SHIFT 0x1
+#define NBIF_STRAP_BIOS_CNTL__NBIF_STRAP_BIOS_EN_MASK 0x00000001L
+#define NBIF_STRAP_BIOS_CNTL__NBIF_STRAP_PCIE_ID_BIOS_EN_MASK 0x00000002L
+//MISC_SCRATCH
+#define MISC_SCRATCH__MISC_SCRATCH0__SHIFT 0x0
+#define MISC_SCRATCH__MISC_SCRATCH0_MASK 0xFFFFFFFFL
+//INTR_LINE_POLARITY
+#define INTR_LINE_POLARITY__INTR_LINE_POLARITY_DEV0__SHIFT 0x0
+#define INTR_LINE_POLARITY__INTR_LINE_POLARITY_DEV0_MASK 0x000000FFL
+//INTR_LINE_ENABLE
+#define INTR_LINE_ENABLE__INTR_LINE_ENABLE_DEV0__SHIFT 0x0
+#define INTR_LINE_ENABLE__INTR_LINE_ENABLE_DEV0_MASK 0x000000FFL
+//OUTSTANDING_VC_ALLOC
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC0_ALLOC__SHIFT 0x0
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC1_ALLOC__SHIFT 0x2
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC2_ALLOC__SHIFT 0x4
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC3_ALLOC__SHIFT 0x6
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC4_ALLOC__SHIFT 0x8
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC5_ALLOC__SHIFT 0xa
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC6_ALLOC__SHIFT 0xc
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC7_ALLOC__SHIFT 0xe
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_THRD__SHIFT 0x10
+#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC0_ALLOC__SHIFT 0x18
+#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC1_ALLOC__SHIFT 0x1a
+#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_THRD__SHIFT 0x1c
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC0_ALLOC_MASK 0x00000003L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC1_ALLOC_MASK 0x0000000CL
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC2_ALLOC_MASK 0x00000030L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC3_ALLOC_MASK 0x000000C0L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC4_ALLOC_MASK 0x00000300L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC5_ALLOC_MASK 0x00000C00L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC6_ALLOC_MASK 0x00003000L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC7_ALLOC_MASK 0x0000C000L
+#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_THRD_MASK 0x000F0000L
+#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC0_ALLOC_MASK 0x03000000L
+#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC1_ALLOC_MASK 0x0C000000L
+#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_THRD_MASK 0xF0000000L
+//BIFC_MISC_CTRL0
+#define BIFC_MISC_CTRL0__VWIRE_TARG_UNITID_CHECK_EN__SHIFT 0x0
+#define BIFC_MISC_CTRL0__VWIRE_SRC_UNITID_CHECK_EN__SHIFT 0x1
+#define BIFC_MISC_CTRL0__REG_ACTIVE_VLINK_L0_EN__SHIFT 0x3
+#define BIFC_MISC_CTRL0__DMA_VC4_NON_DVM_STS__SHIFT 0x4
+#define BIFC_MISC_CTRL0__DMA_CHAIN_BREAK_IN_RCMODE__SHIFT 0x8
+#define BIFC_MISC_CTRL0__HST_ARB_CHAIN_LOCK_P__SHIFT 0x9
+#define BIFC_MISC_CTRL0__GSI_SST_ARB_CHAIN_LOCK__SHIFT 0xa
+#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_FLUSH_EN__SHIFT 0xb
+#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_NPWR_DIS__SHIFT 0xc
+#define BIFC_MISC_CTRL0__GSI_SET_PRECEEDINGWR_DIS__SHIFT 0xd
+#define BIFC_MISC_CTRL0__HST_ARB_CHAIN_LOCK_NP__SHIFT 0xe
+#define BIFC_MISC_CTRL0__HRP_CHAIN_DISABLE__SHIFT 0xf
+#define BIFC_MISC_CTRL0__DMA_ATOMIC_LENGTH_CHK_DIS__SHIFT 0x10
+#define BIFC_MISC_CTRL0__DMA_ATOMIC_FAILED_STS_SEL__SHIFT 0x11
+#define BIFC_MISC_CTRL0__DMA_FORCE_VF_AS_PF_SRIOIVEN_LOW__SHIFT 0x12
+#define BIFC_MISC_CTRL0__DMA_ADDR_KEEP_PH__SHIFT 0x13
+#define BIFC_MISC_CTRL0__RCC_GMI_TD_FORCE_ZERO__SHIFT 0x14
+#define BIFC_MISC_CTRL0__HST_FLUSH_DEFER_EN__SHIFT 0x15
+#define BIFC_MISC_CTRL0__HST_FLUSH_CLR_LOCK_EN__SHIFT 0x16
+#define BIFC_MISC_CTRL0__STFETCH_BLOCK_IN_RST__SHIFT 0x17
+#define BIFC_MISC_CTRL0__PCIE_CAPABILITY_PROT_DIS__SHIFT 0x18
+#define BIFC_MISC_CTRL0__ATS_MSG_BLOCK_IN_RST__SHIFT 0x19
+#define BIFC_MISC_CTRL0__DMA_2ND_REQ_DIS__SHIFT 0x1a
+#define BIFC_MISC_CTRL0__PORT_DSTATE_BYPASS_MODE__SHIFT 0x1b
+#define BIFC_MISC_CTRL0__PME_TURNOFF_MODE__SHIFT 0x1c
+#define BIFC_MISC_CTRL0__DMA_ALL_RST_PROTECT_STS_SEL__SHIFT 0x1d
+#define BIFC_MISC_CTRL0__HDP_P2P_DIRECT_ADD_ADJUST__SHIFT 0x1e
+#define BIFC_MISC_CTRL0__PCIESWUS_SELECTION__SHIFT 0x1f
+#define BIFC_MISC_CTRL0__VWIRE_TARG_UNITID_CHECK_EN_MASK 0x00000001L
+#define BIFC_MISC_CTRL0__VWIRE_SRC_UNITID_CHECK_EN_MASK 0x00000006L
+#define BIFC_MISC_CTRL0__REG_ACTIVE_VLINK_L0_EN_MASK 0x00000008L
+#define BIFC_MISC_CTRL0__DMA_VC4_NON_DVM_STS_MASK 0x000000F0L
+#define BIFC_MISC_CTRL0__DMA_CHAIN_BREAK_IN_RCMODE_MASK 0x00000100L
+#define BIFC_MISC_CTRL0__HST_ARB_CHAIN_LOCK_P_MASK 0x00000200L
+#define BIFC_MISC_CTRL0__GSI_SST_ARB_CHAIN_LOCK_MASK 0x00000400L
+#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_FLUSH_EN_MASK 0x00000800L
+#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_NPWR_DIS_MASK 0x00001000L
+#define BIFC_MISC_CTRL0__GSI_SET_PRECEEDINGWR_DIS_MASK 0x00002000L
+#define BIFC_MISC_CTRL0__HST_ARB_CHAIN_LOCK_NP_MASK 0x00004000L
+#define BIFC_MISC_CTRL0__HRP_CHAIN_DISABLE_MASK 0x00008000L
+#define BIFC_MISC_CTRL0__DMA_ATOMIC_LENGTH_CHK_DIS_MASK 0x00010000L
+#define BIFC_MISC_CTRL0__DMA_ATOMIC_FAILED_STS_SEL_MASK 0x00020000L
+#define BIFC_MISC_CTRL0__DMA_FORCE_VF_AS_PF_SRIOIVEN_LOW_MASK 0x00040000L
+#define BIFC_MISC_CTRL0__DMA_ADDR_KEEP_PH_MASK 0x00080000L
+#define BIFC_MISC_CTRL0__RCC_GMI_TD_FORCE_ZERO_MASK 0x00100000L
+#define BIFC_MISC_CTRL0__HST_FLUSH_DEFER_EN_MASK 0x00200000L
+#define BIFC_MISC_CTRL0__HST_FLUSH_CLR_LOCK_EN_MASK 0x00400000L
+#define BIFC_MISC_CTRL0__STFETCH_BLOCK_IN_RST_MASK 0x00800000L
+#define BIFC_MISC_CTRL0__PCIE_CAPABILITY_PROT_DIS_MASK 0x01000000L
+#define BIFC_MISC_CTRL0__ATS_MSG_BLOCK_IN_RST_MASK 0x02000000L
+#define BIFC_MISC_CTRL0__DMA_2ND_REQ_DIS_MASK 0x04000000L
+#define BIFC_MISC_CTRL0__PORT_DSTATE_BYPASS_MODE_MASK 0x08000000L
+#define BIFC_MISC_CTRL0__PME_TURNOFF_MODE_MASK 0x10000000L
+#define BIFC_MISC_CTRL0__DMA_ALL_RST_PROTECT_STS_SEL_MASK 0x20000000L
+#define BIFC_MISC_CTRL0__HDP_P2P_DIRECT_ADD_ADJUST_MASK 0x40000000L
+#define BIFC_MISC_CTRL0__PCIESWUS_SELECTION_MASK 0x80000000L
+//BIFC_MISC_CTRL1
+#define BIFC_MISC_CTRL1__THT_HST_CPLD_POISON_REPORT__SHIFT 0x0
+#define BIFC_MISC_CTRL1__DMA_REQ_POISON_REPORT__SHIFT 0x1
+#define BIFC_MISC_CTRL1__DMA_REQ_ACSVIO_REPORT__SHIFT 0x2
+#define BIFC_MISC_CTRL1__DMA_RSP_POISON_CPLD_REPORT__SHIFT 0x3
+#define BIFC_MISC_CTRL1__GSI_SMN_WORST_ERR_STSTUS__SHIFT 0x4
+#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE1_FOR_ERROR__SHIFT 0x5
+#define BIFC_MISC_CTRL1__GSI_RDWR_BALANCE_DIS__SHIFT 0x6
+#define BIFC_MISC_CTRL1__GMI_ATOMIC_POISON_DROP__SHIFT 0x7
+#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_STS__SHIFT 0x8
+#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_DATASTS__SHIFT 0xa
+#define BIFC_MISC_CTRL1__DROP_OTHER_HT_ADDR_REQ__SHIFT 0xc
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE__SHIFT 0xd
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE_VALUE__SHIFT 0xe
+#define BIFC_MISC_CTRL1__UPS_SDP_RDY_TIE1__SHIFT 0xf
+#define BIFC_MISC_CTRL1__GMI_RCC_DN_BME_DROP_DIS__SHIFT 0x10
+#define BIFC_MISC_CTRL1__GMI_RCC_EP_BME_DROP_DIS__SHIFT 0x11
+#define BIFC_MISC_CTRL1__GMI_BIH_DN_BME_DROP_DIS__SHIFT 0x12
+#define BIFC_MISC_CTRL1__GMI_BIH_EP_BME_DROP_DIS__SHIFT 0x13
+#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE0_FOR_ERROR__SHIFT 0x14
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTWRRSP_ORDER_FORCE__SHIFT 0x15
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTWRRSP_ORDER_FORCE_VALUE__SHIFT 0x16
+#define BIFC_MISC_CTRL1__GMI_ATOMIC_POISON_FOR_AERLOG__SHIFT 0x17
+#define BIFC_MISC_CTRL1__GMI_RDSIZED_REQATTR_MASK__SHIFT 0x18
+#define BIFC_MISC_CTRL1__GMI_RDSIZEDDW_REQATTR_MASK__SHIFT 0x19
+#define BIFC_MISC_CTRL1__GMI_WRSIZED_REQATTR_MASK__SHIFT 0x1a
+#define BIFC_MISC_CTRL1__GMI_WRSIZEDFL_REQATTR_MASK__SHIFT 0x1b
+#define BIFC_MISC_CTRL1__GMI_FORCE_NOT_SEND_NON_BASEVC_RSPCREDIT__SHIFT 0x1c
+#define BIFC_MISC_CTRL1__GMI_CPLBUF_EN__SHIFT 0x1d
+#define BIFC_MISC_CTRL1__GMI_MSG_BLOCKLVL_SEL__SHIFT 0x1e
+#define BIFC_MISC_CTRL1__THT_HST_CPLD_POISON_REPORT_MASK 0x00000001L
+#define BIFC_MISC_CTRL1__DMA_REQ_POISON_REPORT_MASK 0x00000002L
+#define BIFC_MISC_CTRL1__DMA_REQ_ACSVIO_REPORT_MASK 0x00000004L
+#define BIFC_MISC_CTRL1__DMA_RSP_POISON_CPLD_REPORT_MASK 0x00000008L
+#define BIFC_MISC_CTRL1__GSI_SMN_WORST_ERR_STSTUS_MASK 0x00000010L
+#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE1_FOR_ERROR_MASK 0x00000020L
+#define BIFC_MISC_CTRL1__GSI_RDWR_BALANCE_DIS_MASK 0x00000040L
+#define BIFC_MISC_CTRL1__GMI_ATOMIC_POISON_DROP_MASK 0x00000080L
+#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_STS_MASK 0x00000300L
+#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_DATASTS_MASK 0x00000C00L
+#define BIFC_MISC_CTRL1__DROP_OTHER_HT_ADDR_REQ_MASK 0x00001000L
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE_MASK 0x00002000L
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE_VALUE_MASK 0x00004000L
+#define BIFC_MISC_CTRL1__UPS_SDP_RDY_TIE1_MASK 0x00008000L
+#define BIFC_MISC_CTRL1__GMI_RCC_DN_BME_DROP_DIS_MASK 0x00010000L
+#define BIFC_MISC_CTRL1__GMI_RCC_EP_BME_DROP_DIS_MASK 0x00020000L
+#define BIFC_MISC_CTRL1__GMI_BIH_DN_BME_DROP_DIS_MASK 0x00040000L
+#define BIFC_MISC_CTRL1__GMI_BIH_EP_BME_DROP_DIS_MASK 0x00080000L
+#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE0_FOR_ERROR_MASK 0x00100000L
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTWRRSP_ORDER_FORCE_MASK 0x00200000L
+#define BIFC_MISC_CTRL1__DMAWRREQ_HSTWRRSP_ORDER_FORCE_VALUE_MASK 0x00400000L
+#define BIFC_MISC_CTRL1__GMI_ATOMIC_POISON_FOR_AERLOG_MASK 0x00800000L
+#define BIFC_MISC_CTRL1__GMI_RDSIZED_REQATTR_MASK_MASK 0x01000000L
+#define BIFC_MISC_CTRL1__GMI_RDSIZEDDW_REQATTR_MASK_MASK 0x02000000L
+#define BIFC_MISC_CTRL1__GMI_WRSIZED_REQATTR_MASK_MASK 0x04000000L
+#define BIFC_MISC_CTRL1__GMI_WRSIZEDFL_REQATTR_MASK_MASK 0x08000000L
+#define BIFC_MISC_CTRL1__GMI_FORCE_NOT_SEND_NON_BASEVC_RSPCREDIT_MASK 0x10000000L
+#define BIFC_MISC_CTRL1__GMI_CPLBUF_EN_MASK 0x20000000L
+#define BIFC_MISC_CTRL1__GMI_MSG_BLOCKLVL_SEL_MASK 0xC0000000L
+//BIFC_BME_ERR_LOG_LB
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F0__SHIFT 0x0
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F1__SHIFT 0x1
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F2__SHIFT 0x2
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F3__SHIFT 0x3
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F4__SHIFT 0x4
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F5__SHIFT 0x5
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F6__SHIFT 0x6
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F0__SHIFT 0x10
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F1__SHIFT 0x11
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F2__SHIFT 0x12
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F3__SHIFT 0x13
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F4__SHIFT 0x14
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F5__SHIFT 0x15
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F6__SHIFT 0x16
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F0_MASK 0x00000001L
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F1_MASK 0x00000002L
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F2_MASK 0x00000004L
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F3_MASK 0x00000008L
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F4_MASK 0x00000010L
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F5_MASK 0x00000020L
+#define BIFC_BME_ERR_LOG_LB__DMA_ON_BME_LOW_DEV0_F6_MASK 0x00000040L
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F0_MASK 0x00010000L
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F1_MASK 0x00020000L
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F2_MASK 0x00040000L
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F3_MASK 0x00080000L
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F4_MASK 0x00100000L
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F5_MASK 0x00200000L
+#define BIFC_BME_ERR_LOG_LB__CLEAR_DMA_ON_BME_LOW_DEV0_F6_MASK 0x00400000L
+//BIFC_LC_TIMER_CTRL
+#define BIFC_LC_TIMER_CTRL__ASPM_IDLE_TIMER_SCALE__SHIFT 0x0
+#define BIFC_LC_TIMER_CTRL__L1_EXIT_TIMER_SCALE__SHIFT 0x10
+#define BIFC_LC_TIMER_CTRL__ASPM_IDLE_TIMER_SCALE_MASK 0x0000FFFFL
+#define BIFC_LC_TIMER_CTRL__L1_EXIT_TIMER_SCALE_MASK 0xFFFF0000L
+//BIFC_RCCBIH_BME_ERR_LOG0
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F0__SHIFT 0x0
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F1__SHIFT 0x1
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F2__SHIFT 0x2
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F3__SHIFT 0x3
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F4__SHIFT 0x4
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F5__SHIFT 0x5
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F6__SHIFT 0x6
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F0__SHIFT 0x10
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F1__SHIFT 0x11
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F2__SHIFT 0x12
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F3__SHIFT 0x13
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F4__SHIFT 0x14
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F5__SHIFT 0x15
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F6__SHIFT 0x16
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F0_MASK 0x00000001L
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F1_MASK 0x00000002L
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F2_MASK 0x00000004L
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F3_MASK 0x00000008L
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F4_MASK 0x00000010L
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F5_MASK 0x00000020L
+#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F6_MASK 0x00000040L
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F0_MASK 0x00010000L
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F1_MASK 0x00020000L
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F2_MASK 0x00040000L
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F3_MASK 0x00080000L
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F4_MASK 0x00100000L
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F5_MASK 0x00200000L
+#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F6_MASK 0x00400000L
+//BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F0__SHIFT 0x0
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F0__SHIFT 0x2
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F0__SHIFT 0x4
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F0__SHIFT 0x6
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F0__SHIFT 0x8
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F0__SHIFT 0xa
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F0__SHIFT 0xc
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F0__SHIFT 0xe
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F1__SHIFT 0x10
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F1__SHIFT 0x12
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F1__SHIFT 0x14
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F1__SHIFT 0x16
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F1__SHIFT 0x18
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F1__SHIFT 0x1a
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F1__SHIFT 0x1c
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F1__SHIFT 0x1e
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F0_MASK 0x00000003L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F0_MASK 0x0000000CL
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F0_MASK 0x00000030L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F0_MASK 0x000000C0L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F0_MASK 0x00000300L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F0_MASK 0x00000C00L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F0_MASK 0x00003000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F0_MASK 0x0000C000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F1_MASK 0x00030000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F1_MASK 0x000C0000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F1_MASK 0x00300000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F1_MASK 0x00C00000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F1_MASK 0x03000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F1_MASK 0x0C000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F1_MASK 0x30000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F1_MASK 0xC0000000L
+//BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F2__SHIFT 0x0
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F2__SHIFT 0x2
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F2__SHIFT 0x4
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F2__SHIFT 0x6
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F2__SHIFT 0x8
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F2__SHIFT 0xa
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F2__SHIFT 0xc
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F2__SHIFT 0xe
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F3__SHIFT 0x10
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F3__SHIFT 0x12
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F3__SHIFT 0x14
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F3__SHIFT 0x16
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F3__SHIFT 0x18
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F3__SHIFT 0x1a
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F3__SHIFT 0x1c
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F3__SHIFT 0x1e
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F2_MASK 0x00000003L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F2_MASK 0x0000000CL
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F2_MASK 0x00000030L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F2_MASK 0x000000C0L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F2_MASK 0x00000300L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F2_MASK 0x00000C00L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F2_MASK 0x00003000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F2_MASK 0x0000C000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F3_MASK 0x00030000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F3_MASK 0x000C0000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F3_MASK 0x00300000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F3_MASK 0x00C00000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F3_MASK 0x03000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F3_MASK 0x0C000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F3_MASK 0x30000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F3_MASK 0xC0000000L
+//BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F4__SHIFT 0x0
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F4__SHIFT 0x2
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F4__SHIFT 0x4
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F4__SHIFT 0x6
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F4__SHIFT 0x8
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F4__SHIFT 0xa
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F4__SHIFT 0xc
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F4__SHIFT 0xe
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F5__SHIFT 0x10
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F5__SHIFT 0x12
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F5__SHIFT 0x14
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F5__SHIFT 0x16
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F5__SHIFT 0x18
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F5__SHIFT 0x1a
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F5__SHIFT 0x1c
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F5__SHIFT 0x1e
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F4_MASK 0x00000003L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F4_MASK 0x0000000CL
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F4_MASK 0x00000030L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F4_MASK 0x000000C0L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F4_MASK 0x00000300L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F4_MASK 0x00000C00L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F4_MASK 0x00003000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F4_MASK 0x0000C000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F5_MASK 0x00030000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F5_MASK 0x000C0000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F5_MASK 0x00300000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F5_MASK 0x00C00000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F5_MASK 0x03000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F5_MASK 0x0C000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F5_MASK 0x30000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F5_MASK 0xC0000000L
+//BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F6__SHIFT 0x0
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F6__SHIFT 0x2
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F6__SHIFT 0x4
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F6__SHIFT 0x6
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F6__SHIFT 0x8
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F6__SHIFT 0xa
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F6__SHIFT 0xc
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F6__SHIFT 0xe
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F7__SHIFT 0x10
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F7__SHIFT 0x12
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F7__SHIFT 0x14
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F7__SHIFT 0x16
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F7__SHIFT 0x18
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F7__SHIFT 0x1a
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F7__SHIFT 0x1c
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F7__SHIFT 0x1e
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F6_MASK 0x00000003L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F6_MASK 0x0000000CL
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F6_MASK 0x00000030L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F6_MASK 0x000000C0L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F6_MASK 0x00000300L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F6_MASK 0x00000C00L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F6_MASK 0x00003000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F6_MASK 0x0000C000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F7_MASK 0x00030000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F7_MASK 0x000C0000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F7_MASK 0x00300000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F7_MASK 0x00C00000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F7_MASK 0x03000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F7_MASK 0x0C000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F7_MASK 0x30000000L
+#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F7_MASK 0xC0000000L
+//BIFC_DMA_ATTR_CNTL2_DEV0
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F0__SHIFT 0x0
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F1__SHIFT 0x4
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F2__SHIFT 0x8
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F3__SHIFT 0xc
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F4__SHIFT 0x10
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F5__SHIFT 0x14
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F6__SHIFT 0x18
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F7__SHIFT 0x1c
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F0_MASK 0x00000001L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F1_MASK 0x00000010L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F2_MASK 0x00000100L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F3_MASK 0x00001000L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F4_MASK 0x00010000L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F5_MASK 0x00100000L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F6_MASK 0x01000000L
+#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F7_MASK 0x10000000L
+//BIFC_MISC_CTRL2
+#define BIFC_MISC_CTRL2__DIH_INTR_STFETCH_BLOCK_IN_LINKDOWN__SHIFT 0x0
+#define BIFC_MISC_CTRL2__SLOW_GMI_UPS_RSP_CRED_REL_EN__SHIFT 0x1
+#define BIFC_MISC_CTRL2__SLFR_IGNORE_DATAERR_EN__SHIFT 0x10
+#define BIFC_MISC_CTRL2__DATAERR_OVERRIDE_SLFR_BYTEEN_EN__SHIFT 0x11
+#define BIFC_MISC_CTRL2__PH_SUPPORT__SHIFT 0x12
+#define BIFC_MISC_CTRL2__GMI_FAIL_REQ_RTS_MASK__SHIFT 0x16
+#define BIFC_MISC_CTRL2__NBIF_AERRPT_BACKPERSURE_EN__SHIFT 0x17
+#define BIFC_MISC_CTRL2__DIH_INTR_STFETCH_BLOCK_IN_LINKDOWN_MASK 0x00000001L
+#define BIFC_MISC_CTRL2__SLOW_GMI_UPS_RSP_CRED_REL_EN_MASK 0x00000002L
+#define BIFC_MISC_CTRL2__SLFR_IGNORE_DATAERR_EN_MASK 0x00010000L
+#define BIFC_MISC_CTRL2__DATAERR_OVERRIDE_SLFR_BYTEEN_EN_MASK 0x00020000L
+#define BIFC_MISC_CTRL2__PH_SUPPORT_MASK 0x003C0000L
+#define BIFC_MISC_CTRL2__GMI_FAIL_REQ_RTS_MASK_MASK 0x00400000L
+#define BIFC_MISC_CTRL2__NBIF_AERRPT_BACKPERSURE_EN_MASK 0x00800000L
+//BME_DUMMY_CNTL_0
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F0__SHIFT 0x0
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F1__SHIFT 0x2
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F2__SHIFT 0x4
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F3__SHIFT 0x6
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F4__SHIFT 0x8
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F5__SHIFT 0xa
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F6__SHIFT 0xc
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F7__SHIFT 0xe
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F0_MASK 0x00000003L
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F1_MASK 0x0000000CL
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F2_MASK 0x00000030L
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F3_MASK 0x000000C0L
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F4_MASK 0x00000300L
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F5_MASK 0x00000C00L
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F6_MASK 0x00003000L
+#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F7_MASK 0x0000C000L
+//BIFC_THT_CNTL
+#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_RD_VC0__SHIFT 0x0
+#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC0__SHIFT 0x4
+#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC1__SHIFT 0x8
+#define BIFC_THT_CNTL__UR_OVRD_FOR_ECRC_EN__SHIFT 0x10
+#define BIFC_THT_CNTL__THT_NTB_VC0_APER0_ADSC_PUSH_DIS__SHIFT 0x18
+#define BIFC_THT_CNTL__THT_NTB_VC0_OTHAPER_ADSC_PUSH_DIS__SHIFT 0x19
+#define BIFC_THT_CNTL__THT_NTB_VC1_APER0_ADSC_PUSH_DIS__SHIFT 0x1a
+#define BIFC_THT_CNTL__THT_NTB_VC1_OTHAPER_ADSC_PUSH_DIS__SHIFT 0x1b
+#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_RD_VC0_MASK 0x0000000FL
+#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC0_MASK 0x000000F0L
+#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC1_MASK 0x00000F00L
+#define BIFC_THT_CNTL__UR_OVRD_FOR_ECRC_EN_MASK 0x00010000L
+#define BIFC_THT_CNTL__THT_NTB_VC0_APER0_ADSC_PUSH_DIS_MASK 0x01000000L
+#define BIFC_THT_CNTL__THT_NTB_VC0_OTHAPER_ADSC_PUSH_DIS_MASK 0x02000000L
+#define BIFC_THT_CNTL__THT_NTB_VC1_APER0_ADSC_PUSH_DIS_MASK 0x04000000L
+#define BIFC_THT_CNTL__THT_NTB_VC1_OTHAPER_ADSC_PUSH_DIS_MASK 0x08000000L
+//BIFC_HSTARB_CNTL
+#define BIFC_HSTARB_CNTL__SLVARB_MODE__SHIFT 0x0
+#define BIFC_HSTARB_CNTL__CFG_BLOCK_P_EN__SHIFT 0x8
+#define BIFC_HSTARB_CNTL__SLVARB_MODE_MASK 0x00000003L
+#define BIFC_HSTARB_CNTL__CFG_BLOCK_P_EN_MASK 0x00000100L
+//BIFC_GSI_CNTL
+#define BIFC_GSI_CNTL__GSI_SDP_RSP_ARB_MODE__SHIFT 0x0
+#define BIFC_GSI_CNTL__GSI_CPL_RSP_ARB_MODE__SHIFT 0x2
+#define BIFC_GSI_CNTL__GSI_CPL_INTERLEAVING_EN__SHIFT 0x6
+#define BIFC_GSI_CNTL__GSI_CPL_PCR_EP_CAUSE_UR_EN__SHIFT 0x7
+#define BIFC_GSI_CNTL__GSI_CPL_SMN_P_EP_CAUSE_UR_EN__SHIFT 0x8
+#define BIFC_GSI_CNTL__GSI_CPL_SMN_NP_EP_CAUSE_UR_EN__SHIFT 0x9
+#define BIFC_GSI_CNTL__GSI_CPL_SST_EP_CAUSE_UR_EN__SHIFT 0xa
+#define BIFC_GSI_CNTL__GSI_SDP_REQ_ARB_MODE__SHIFT 0xb
+#define BIFC_GSI_CNTL__GSI_SMN_REQ_ARB_MODE__SHIFT 0xd
+#define BIFC_GSI_CNTL__GSI_CPL_SST_ATOMIC_EP_CAUSE_UR_EN__SHIFT 0xf
+#define BIFC_GSI_CNTL__GSI_SMN_PARITY_CHK_BE_MSK__SHIFT 0x10
+#define BIFC_GSI_CNTL__GSI_SMN_BURST_EN__SHIFT 0x11
+#define BIFC_GSI_CNTL__GSI_SMN_256B_SPLIT_64B_EN__SHIFT 0x12
+#define BIFC_GSI_CNTL__SMN_PP_PIPE_ENABLE__SHIFT 0x1b
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_FBFLUSH__SHIFT 0x1c
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_HDPFLUSH__SHIFT 0x1d
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_HDPRD__SHIFT 0x1e
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_ALL__SHIFT 0x1f
+#define BIFC_GSI_CNTL__GSI_SDP_RSP_ARB_MODE_MASK 0x00000003L
+#define BIFC_GSI_CNTL__GSI_CPL_RSP_ARB_MODE_MASK 0x0000003CL
+#define BIFC_GSI_CNTL__GSI_CPL_INTERLEAVING_EN_MASK 0x00000040L
+#define BIFC_GSI_CNTL__GSI_CPL_PCR_EP_CAUSE_UR_EN_MASK 0x00000080L
+#define BIFC_GSI_CNTL__GSI_CPL_SMN_P_EP_CAUSE_UR_EN_MASK 0x00000100L
+#define BIFC_GSI_CNTL__GSI_CPL_SMN_NP_EP_CAUSE_UR_EN_MASK 0x00000200L
+#define BIFC_GSI_CNTL__GSI_CPL_SST_EP_CAUSE_UR_EN_MASK 0x00000400L
+#define BIFC_GSI_CNTL__GSI_SDP_REQ_ARB_MODE_MASK 0x00001800L
+#define BIFC_GSI_CNTL__GSI_SMN_REQ_ARB_MODE_MASK 0x00006000L
+#define BIFC_GSI_CNTL__GSI_CPL_SST_ATOMIC_EP_CAUSE_UR_EN_MASK 0x00008000L
+#define BIFC_GSI_CNTL__GSI_SMN_PARITY_CHK_BE_MSK_MASK 0x00010000L
+#define BIFC_GSI_CNTL__GSI_SMN_BURST_EN_MASK 0x00020000L
+#define BIFC_GSI_CNTL__GSI_SMN_256B_SPLIT_64B_EN_MASK 0x00040000L
+#define BIFC_GSI_CNTL__SMN_PP_PIPE_ENABLE_MASK 0x08000000L
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_FBFLUSH_MASK 0x10000000L
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_HDPFLUSH_MASK 0x20000000L
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_HDPRD_MASK 0x40000000L
+#define BIFC_GSI_CNTL__HDP_FB_UPLIMIT_COUNT_ALL_MASK 0x80000000L
+//BIFC_PCIEFUNC_CNTL
+#define BIFC_PCIEFUNC_CNTL__DMA_NON_PCIEFUNC_BUSDEVFUNC__SHIFT 0x0
+#define BIFC_PCIEFUNC_CNTL__MP1SYSHUBDATA_DRAM_IS_PCIEFUNC__SHIFT 0x10
+#define BIFC_PCIEFUNC_CNTL__DMA_NON_PCIEFUNC_BUSDEVFUNC_MASK 0x0000FFFFL
+#define BIFC_PCIEFUNC_CNTL__MP1SYSHUBDATA_DRAM_IS_PCIEFUNC_MASK 0x00010000L
+//BIFC_PASID_CHECK_DIS
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F0__SHIFT 0x0
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F1__SHIFT 0x1
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F2__SHIFT 0x2
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F3__SHIFT 0x3
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F4__SHIFT 0x4
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F5__SHIFT 0x5
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F6__SHIFT 0x6
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F0_MASK 0x00000001L
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F1_MASK 0x00000002L
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F2_MASK 0x00000004L
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F3_MASK 0x00000008L
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F4_MASK 0x00000010L
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F5_MASK 0x00000020L
+#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F6_MASK 0x00000040L
+//BIFC_SDP_CNTL_0
+#define BIFC_SDP_CNTL_0__HRP_SDP_DISCON_HYSTERESIS__SHIFT 0x0
+#define BIFC_SDP_CNTL_0__GSI_SDP_DISCON_HYSTERESIS__SHIFT 0x8
+#define BIFC_SDP_CNTL_0__GMI_DNS_SDP_DISCON_HYSTERESIS__SHIFT 0x10
+#define BIFC_SDP_CNTL_0__GMI_UPS_SDP_DISCON_HYSTERESIS__SHIFT 0x18
+#define BIFC_SDP_CNTL_0__HRP_SDP_DISCON_HYSTERESIS_MASK 0x000000FFL
+#define BIFC_SDP_CNTL_0__GSI_SDP_DISCON_HYSTERESIS_MASK 0x0000FF00L
+#define BIFC_SDP_CNTL_0__GMI_DNS_SDP_DISCON_HYSTERESIS_MASK 0x00FF0000L
+#define BIFC_SDP_CNTL_0__GMI_UPS_SDP_DISCON_HYSTERESIS_MASK 0xFF000000L
+//BIFC_SDP_CNTL_1
+#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_DIS__SHIFT 0x0
+#define BIFC_SDP_CNTL_1__GSI_SDP_DISCON_DIS__SHIFT 0x1
+#define BIFC_SDP_CNTL_1__GMI_DNS_SDP_DISCON_DIS__SHIFT 0x2
+#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_DIS__SHIFT 0x3
+#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_VLINK_NONL0_ONLY__SHIFT 0x4
+#define BIFC_SDP_CNTL_1__NP_KEEP_GOING_STALL_P__SHIFT 0x5
+#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_VLINK_NONL0_ONLY__SHIFT 0x7
+#define BIFC_SDP_CNTL_1__ATOMIC_STALL_BY_RDWR_EN__SHIFT 0x8
+#define BIFC_SDP_CNTL_1__POOL_CREDIT_ALLOC_OVERRIDE_DYNAMIC__SHIFT 0x9
+#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_DIS_MASK 0x00000001L
+#define BIFC_SDP_CNTL_1__GSI_SDP_DISCON_DIS_MASK 0x00000002L
+#define BIFC_SDP_CNTL_1__GMI_DNS_SDP_DISCON_DIS_MASK 0x00000004L
+#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_DIS_MASK 0x00000008L
+#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_VLINK_NONL0_ONLY_MASK 0x00000010L
+#define BIFC_SDP_CNTL_1__NP_KEEP_GOING_STALL_P_MASK 0x00000020L
+#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_VLINK_NONL0_ONLY_MASK 0x00000080L
+#define BIFC_SDP_CNTL_1__ATOMIC_STALL_BY_RDWR_EN_MASK 0x00000100L
+#define BIFC_SDP_CNTL_1__POOL_CREDIT_ALLOC_OVERRIDE_DYNAMIC_MASK 0x00000200L
+//BIFC_PASID_STS
+#define BIFC_PASID_STS__PASID_STS__SHIFT 0x0
+#define BIFC_PASID_STS__PASID_STS_MASK 0x0000000FL
+//BIFC_ATHUB_ACT_CNTL
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_RSP_STS_TYPE__SHIFT 0x0
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_SLFR_DATAERR_RSP_STS_TYPE__SHIFT 0x3
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_REQ_DROP_DIS__SHIFT 0x8
+#define BIFC_ATHUB_ACT_CNTL__GSI_ATHUB_ACT_FLUSH_TRIGGER__SHIFT 0x9
+#define BIFC_ATHUB_ACT_CNTL__GMI_ATHUB_ACT_FLUSH_TRIGGER__SHIFT 0xa
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_SST_PP_REQ_DROP_EN__SHIFT 0xb
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_RSP_STS_TYPE_MASK 0x00000007L
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_SLFR_DATAERR_RSP_STS_TYPE_MASK 0x00000038L
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_REQ_DROP_DIS_MASK 0x00000100L
+#define BIFC_ATHUB_ACT_CNTL__GSI_ATHUB_ACT_FLUSH_TRIGGER_MASK 0x00000200L
+#define BIFC_ATHUB_ACT_CNTL__GMI_ATHUB_ACT_FLUSH_TRIGGER_MASK 0x00000400L
+#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_SST_PP_REQ_DROP_EN_MASK 0x00000800L
+//BIFC_PERF_CNTL_0
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_EN__SHIFT 0x0
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_EN__SHIFT 0x1
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_RESET__SHIFT 0x8
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_RESET__SHIFT 0x9
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_SEL__SHIFT 0x10
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_SEL__SHIFT 0x18
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_EN_MASK 0x00000001L
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_EN_MASK 0x00000002L
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_RESET_MASK 0x00000100L
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_RESET_MASK 0x00000200L
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_SEL_MASK 0x007F0000L
+#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_SEL_MASK 0x7F000000L
+//BIFC_PERF_CNTL_1
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_EN__SHIFT 0x0
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_EN__SHIFT 0x1
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_RESET__SHIFT 0x4
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_RESET__SHIFT 0x5
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_SEL__SHIFT 0x8
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_SEL__SHIFT 0x10
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_EN_MASK 0x00000001L
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_EN_MASK 0x00000002L
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_RESET_MASK 0x00000010L
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_RESET_MASK 0x00000020L
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_SEL_MASK 0x0000FF00L
+#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_SEL_MASK 0x01FF0000L
+//BIFC_PERF_CNT_MMIO_RD_L32BIT
+#define BIFC_PERF_CNT_MMIO_RD_L32BIT__PERF_CNT_MMIO_RD_VALUE_L32BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_MMIO_RD_L32BIT__PERF_CNT_MMIO_RD_VALUE_L32BIT_MASK 0xFFFFFFFFL
+//BIFC_PERF_CNT_MMIO_WR_L32BIT
+#define BIFC_PERF_CNT_MMIO_WR_L32BIT__PERF_CNT_MMIO_WR_VALUE_L32BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_MMIO_WR_L32BIT__PERF_CNT_MMIO_WR_VALUE_L32BIT_MASK 0xFFFFFFFFL
+//BIFC_PERF_CNT_DMA_RD_L32BIT
+#define BIFC_PERF_CNT_DMA_RD_L32BIT__PERF_CNT_DMA_RD_VALUE_L32BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_DMA_RD_L32BIT__PERF_CNT_DMA_RD_VALUE_L32BIT_MASK 0xFFFFFFFFL
+//BIFC_PERF_CNT_DMA_WR_L32BIT
+#define BIFC_PERF_CNT_DMA_WR_L32BIT__PERF_CNT_DMA_WR_VALUE_L32BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_DMA_WR_L32BIT__PERF_CNT_DMA_WR_VALUE_L32BIT_MASK 0xFFFFFFFFL
+//NBIF_REGIF_ERRSET_CTRL
+#define NBIF_REGIF_ERRSET_CTRL__DROP_NONPF_MMREGREQ_SETERR_DIS__SHIFT 0x0
+#define NBIF_REGIF_ERRSET_CTRL__DROP_NONPF_MMREGREQ_SETERR_DIS_MASK 0x00000001L
+//BIFC_SDP_CNTL_2
+#define BIFC_SDP_CNTL_2__SDP_SION_DISCON_HYSTERESIS__SHIFT 0x0
+#define BIFC_SDP_CNTL_2__SDP_SION_DISCON_HYSTERESIS_H__SHIFT 0x8
+#define BIFC_SDP_CNTL_2__HRP_SDP_DISCON_HYSTERESIS_H__SHIFT 0x10
+#define BIFC_SDP_CNTL_2__GSI_SDP_DISCON_HYSTERESIS_H__SHIFT 0x18
+#define BIFC_SDP_CNTL_2__SDP_SION_DISCON_HYSTERESIS_MASK 0x000000FFL
+#define BIFC_SDP_CNTL_2__SDP_SION_DISCON_HYSTERESIS_H_MASK 0x00000F00L
+#define BIFC_SDP_CNTL_2__HRP_SDP_DISCON_HYSTERESIS_H_MASK 0x000F0000L
+#define BIFC_SDP_CNTL_2__GSI_SDP_DISCON_HYSTERESIS_H_MASK 0x0F000000L
+//NBIF_PGMST_CTRL
+#define NBIF_PGMST_CTRL__NBIF_CFG_PG_HYSTERESIS__SHIFT 0x0
+#define NBIF_PGMST_CTRL__NBIF_CFG_PG_EN__SHIFT 0x8
+#define NBIF_PGMST_CTRL__NBIF_CFG_IDLENESS_COUNT_EN__SHIFT 0xa
+#define NBIF_PGMST_CTRL__NBIF_CFG_FW_PG_EXIT_EN__SHIFT 0xe
+#define NBIF_PGMST_CTRL__NBIF_CFG_PG_HYSTERESIS_MASK 0x000000FFL
+#define NBIF_PGMST_CTRL__NBIF_CFG_PG_EN_MASK 0x00000100L
+#define NBIF_PGMST_CTRL__NBIF_CFG_IDLENESS_COUNT_EN_MASK 0x00003C00L
+#define NBIF_PGMST_CTRL__NBIF_CFG_FW_PG_EXIT_EN_MASK 0x0000C000L
+//NBIF_PGSLV_CTRL
+#define NBIF_PGSLV_CTRL__NBIF_CFG_IDLE_HYSTERESIS__SHIFT 0x0
+#define NBIF_PGSLV_CTRL__NBIF_CFG_IDLE_HYSTERESIS_MASK 0x0000001FL
+//NBIF_PG_MISC_CTRL
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_SHUBCLK_0_IDLE_HYSTERESIS__SHIFT 0x0
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_SHUBCLK_1_IDLE_HYSTERESIS__SHIFT 0x5
+#define NBIF_PG_MISC_CTRL__NBIF_PG_ENDP_D3_ONLY__SHIFT 0xa
+#define NBIF_PG_MISC_CTRL__NBIF_PG_CLK_PERM1__SHIFT 0xd
+#define NBIF_PG_MISC_CTRL__NBIF_PG_DS_ALLOW_DIS__SHIFT 0xe
+#define NBIF_PG_MISC_CTRL__NBIF_PG_CLK_PERM2__SHIFT 0x10
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_REFCLK_CYCLE_FOR_200NS__SHIFT 0x18
+#define NBIF_PG_MISC_CTRL__NBIF_PG_PCIE_NBIF_LD_MASK__SHIFT 0x1e
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_PG_EXIT_OVERRIDE__SHIFT 0x1f
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_SHUBCLK_0_IDLE_HYSTERESIS_MASK 0x0000001FL
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_SHUBCLK_1_IDLE_HYSTERESIS_MASK 0x000003E0L
+#define NBIF_PG_MISC_CTRL__NBIF_PG_ENDP_D3_ONLY_MASK 0x00000400L
+#define NBIF_PG_MISC_CTRL__NBIF_PG_CLK_PERM1_MASK 0x00002000L
+#define NBIF_PG_MISC_CTRL__NBIF_PG_DS_ALLOW_DIS_MASK 0x00004000L
+#define NBIF_PG_MISC_CTRL__NBIF_PG_CLK_PERM2_MASK 0x00010000L
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_REFCLK_CYCLE_FOR_200NS_MASK 0x3F000000L
+#define NBIF_PG_MISC_CTRL__NBIF_PG_PCIE_NBIF_LD_MASK_MASK 0x40000000L
+#define NBIF_PG_MISC_CTRL__NBIF_CFG_PG_EXIT_OVERRIDE_MASK 0x80000000L
+//SMN_MST_EP_CNTL3
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF0__SHIFT 0x0
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF1__SHIFT 0x1
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF2__SHIFT 0x2
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF3__SHIFT 0x3
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF4__SHIFT 0x4
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF5__SHIFT 0x5
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF6__SHIFT 0x6
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF7__SHIFT 0x7
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF0_MASK 0x00000001L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF1_MASK 0x00000002L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF2_MASK 0x00000004L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF3_MASK 0x00000008L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF4_MASK 0x00000010L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF5_MASK 0x00000020L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF6_MASK 0x00000040L
+#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF7_MASK 0x00000080L
+//SMN_MST_EP_CNTL4
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF0__SHIFT 0x0
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF1__SHIFT 0x1
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF2__SHIFT 0x2
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF3__SHIFT 0x3
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF4__SHIFT 0x4
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF5__SHIFT 0x5
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF6__SHIFT 0x6
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF7__SHIFT 0x7
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF0_MASK 0x00000001L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF1_MASK 0x00000002L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF2_MASK 0x00000004L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF3_MASK 0x00000008L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF4_MASK 0x00000010L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF5_MASK 0x00000020L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF6_MASK 0x00000040L
+#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF7_MASK 0x00000080L
+//SMN_MST_CNTL1
+#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_UPS__SHIFT 0x0
+#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_DNS_DEV0__SHIFT 0x10
+#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_UPS_MASK 0x00000001L
+#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_DNS_DEV0_MASK 0x00010000L
+//SMN_MST_EP_CNTL5
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF0__SHIFT 0x0
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF1__SHIFT 0x1
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF2__SHIFT 0x2
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF3__SHIFT 0x3
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF4__SHIFT 0x4
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF5__SHIFT 0x5
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF6__SHIFT 0x6
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF7__SHIFT 0x7
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF0_MASK 0x00000001L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF1_MASK 0x00000002L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF2_MASK 0x00000004L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF3_MASK 0x00000008L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF4_MASK 0x00000010L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF5_MASK 0x00000020L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF6_MASK 0x00000040L
+#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF7_MASK 0x00000080L
+//BIF_SELFRING_BUFFER_VID
+#define BIF_SELFRING_BUFFER_VID__DOORBELL_MONITOR_CID__SHIFT 0x0
+#define BIF_SELFRING_BUFFER_VID__RAS_CNTLR_INTR_CID__SHIFT 0x8
+#define BIF_SELFRING_BUFFER_VID__RAS_ATHUB_ERR_EVENT_INTR_CID__SHIFT 0x10
+#define BIF_SELFRING_BUFFER_VID__DOORBELL_MONITOR_CID_MASK 0x000000FFL
+#define BIF_SELFRING_BUFFER_VID__RAS_CNTLR_INTR_CID_MASK 0x0000FF00L
+#define BIF_SELFRING_BUFFER_VID__RAS_ATHUB_ERR_EVENT_INTR_CID_MASK 0x00FF0000L
+//BIF_SELFRING_VECTOR_CNTL
+#define BIF_SELFRING_VECTOR_CNTL__MISC_DB_MNTR_INTR_DIS__SHIFT 0x0
+#define BIF_SELFRING_VECTOR_CNTL__DB_MNTR_TS_FROM__SHIFT 0x1
+#define BIF_SELFRING_VECTOR_CNTL__MISC_DB_MNTR_INTR_DIS_MASK 0x00000001L
+#define BIF_SELFRING_VECTOR_CNTL__DB_MNTR_TS_FROM_MASK 0x00000002L
+//NBIF_STRAP_WRITE_CTRL
+#define NBIF_STRAP_WRITE_CTRL__NBIF_STRAP_WRITE_ONCE_ENABLE__SHIFT 0x0
+#define NBIF_STRAP_WRITE_CTRL__NBIF_STRAP_WRITE_ONCE_ENABLE_MASK 0x00000001L
+//NBIF_INTX_DSTATE_MISC_CNTL
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_EP__SHIFT 0x0
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_DN__SHIFT 0x1
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_SWUS__SHIFT 0x2
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_EP__SHIFT 0x3
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_DN__SHIFT 0x4
+#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_EP__SHIFT 0x5
+#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_DN__SHIFT 0x6
+#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_SWUS__SHIFT 0x7
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_EP_MASK 0x00000001L
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_DN_MASK 0x00000002L
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_SWUS_MASK 0x00000004L
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_EP_MASK 0x00000008L
+#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_DN_MASK 0x00000010L
+#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_EP_MASK 0x00000020L
+#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_DN_MASK 0x00000040L
+#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_SWUS_MASK 0x00000080L
+//NBIF_PENDING_MISC_CNTL
+#define NBIF_PENDING_MISC_CNTL__FLR_MST_PEND_CHK_DIS__SHIFT 0x0
+#define NBIF_PENDING_MISC_CNTL__FLR_SLV_PEND_CHK_DIS__SHIFT 0x1
+#define NBIF_PENDING_MISC_CNTL__FLR_MST_PEND_CHK_DIS_MASK 0x00000001L
+#define NBIF_PENDING_MISC_CNTL__FLR_SLV_PEND_CHK_DIS_MASK 0x00000002L
+//BIF_GMI_WRR_WEIGHT
+#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_LRG_COUNTER_MODE__SHIFT 0x1d
+#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_LRG_MODE__SHIFT 0x1e
+#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_LRG_SIZE_MODE__SHIFT 0x1f
+#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_LRG_COUNTER_MODE_MASK 0x20000000L
+#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_LRG_MODE_MASK 0x40000000L
+#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_LRG_SIZE_MODE_MASK 0x80000000L
+//BIF_GMI_WRR_WEIGHT2
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY0_WEIGHT__SHIFT 0x0
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY1_WEIGHT__SHIFT 0x8
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY2_WEIGHT__SHIFT 0x10
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY3_WEIGHT__SHIFT 0x18
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY0_WEIGHT_MASK 0x000000FFL
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY1_WEIGHT_MASK 0x0000FF00L
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY2_WEIGHT_MASK 0x00FF0000L
+#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY3_WEIGHT_MASK 0xFF000000L
+//BIF_GMI_WRR_WEIGHT3
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY4_WEIGHT__SHIFT 0x0
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY5_WEIGHT__SHIFT 0x8
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY6_WEIGHT__SHIFT 0x10
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY7_WEIGHT__SHIFT 0x18
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY4_WEIGHT_MASK 0x000000FFL
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY5_WEIGHT_MASK 0x0000FF00L
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY6_WEIGHT_MASK 0x00FF0000L
+#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY7_WEIGHT_MASK 0xFF000000L
+//NBIF_PWRBRK_REQUEST
+#define NBIF_PWRBRK_REQUEST__NBIF_PWRBRK_REQUEST__SHIFT 0x0
+#define NBIF_PWRBRK_REQUEST__NBIF_PWRBRK_REQUEST_MASK 0x00000001L
+//BIF_ATOMIC_ERR_LOG_DEV0_F0
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_OPCODE_DEV0_F0__SHIFT 0x0
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_REQEN_LOW_DEV0_F0__SHIFT 0x1
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_LENGTH_DEV0_F0__SHIFT 0x2
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_NR_DEV0_F0__SHIFT 0x3
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_OPCODE_DEV0_F0__SHIFT 0x10
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F0__SHIFT 0x11
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_LENGTH_DEV0_F0__SHIFT 0x12
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_NR_DEV0_F0__SHIFT 0x13
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_OPCODE_DEV0_F0_MASK 0x00000001L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_REQEN_LOW_DEV0_F0_MASK 0x00000002L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_LENGTH_DEV0_F0_MASK 0x00000004L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_NR_DEV0_F0_MASK 0x00000008L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_OPCODE_DEV0_F0_MASK 0x00010000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F0_MASK 0x00020000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_LENGTH_DEV0_F0_MASK 0x00040000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_NR_DEV0_F0_MASK 0x00080000L
+//BIF_ATOMIC_ERR_LOG_DEV0_F1
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_OPCODE_DEV0_F1__SHIFT 0x0
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_REQEN_LOW_DEV0_F1__SHIFT 0x1
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_LENGTH_DEV0_F1__SHIFT 0x2
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_NR_DEV0_F1__SHIFT 0x3
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_OPCODE_DEV0_F1__SHIFT 0x10
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F1__SHIFT 0x11
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_LENGTH_DEV0_F1__SHIFT 0x12
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_NR_DEV0_F1__SHIFT 0x13
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_OPCODE_DEV0_F1_MASK 0x00000001L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_REQEN_LOW_DEV0_F1_MASK 0x00000002L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_LENGTH_DEV0_F1_MASK 0x00000004L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_NR_DEV0_F1_MASK 0x00000008L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_OPCODE_DEV0_F1_MASK 0x00010000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F1_MASK 0x00020000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_LENGTH_DEV0_F1_MASK 0x00040000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_NR_DEV0_F1_MASK 0x00080000L
+//BIF_ATOMIC_ERR_LOG_DEV0_F2
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_OPCODE_DEV0_F2__SHIFT 0x0
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_REQEN_LOW_DEV0_F2__SHIFT 0x1
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_LENGTH_DEV0_F2__SHIFT 0x2
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_NR_DEV0_F2__SHIFT 0x3
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_OPCODE_DEV0_F2__SHIFT 0x10
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F2__SHIFT 0x11
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_LENGTH_DEV0_F2__SHIFT 0x12
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_NR_DEV0_F2__SHIFT 0x13
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_OPCODE_DEV0_F2_MASK 0x00000001L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_REQEN_LOW_DEV0_F2_MASK 0x00000002L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_LENGTH_DEV0_F2_MASK 0x00000004L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_NR_DEV0_F2_MASK 0x00000008L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_OPCODE_DEV0_F2_MASK 0x00010000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F2_MASK 0x00020000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_LENGTH_DEV0_F2_MASK 0x00040000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_NR_DEV0_F2_MASK 0x00080000L
+//BIF_ATOMIC_ERR_LOG_DEV0_F3
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_OPCODE_DEV0_F3__SHIFT 0x0
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_REQEN_LOW_DEV0_F3__SHIFT 0x1
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_LENGTH_DEV0_F3__SHIFT 0x2
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_NR_DEV0_F3__SHIFT 0x3
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_OPCODE_DEV0_F3__SHIFT 0x10
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F3__SHIFT 0x11
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_LENGTH_DEV0_F3__SHIFT 0x12
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_NR_DEV0_F3__SHIFT 0x13
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_OPCODE_DEV0_F3_MASK 0x00000001L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_REQEN_LOW_DEV0_F3_MASK 0x00000002L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_LENGTH_DEV0_F3_MASK 0x00000004L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_NR_DEV0_F3_MASK 0x00000008L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_OPCODE_DEV0_F3_MASK 0x00010000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F3_MASK 0x00020000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_LENGTH_DEV0_F3_MASK 0x00040000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_NR_DEV0_F3_MASK 0x00080000L
+//BIF_ATOMIC_ERR_LOG_DEV0_F4
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_OPCODE_DEV0_F4__SHIFT 0x0
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_REQEN_LOW_DEV0_F4__SHIFT 0x1
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_LENGTH_DEV0_F4__SHIFT 0x2
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_NR_DEV0_F4__SHIFT 0x3
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_OPCODE_DEV0_F4__SHIFT 0x10
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F4__SHIFT 0x11
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_LENGTH_DEV0_F4__SHIFT 0x12
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_NR_DEV0_F4__SHIFT 0x13
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_OPCODE_DEV0_F4_MASK 0x00000001L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_REQEN_LOW_DEV0_F4_MASK 0x00000002L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_LENGTH_DEV0_F4_MASK 0x00000004L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_NR_DEV0_F4_MASK 0x00000008L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_OPCODE_DEV0_F4_MASK 0x00010000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F4_MASK 0x00020000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_LENGTH_DEV0_F4_MASK 0x00040000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_NR_DEV0_F4_MASK 0x00080000L
+//BIF_ATOMIC_ERR_LOG_DEV0_F5
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_OPCODE_DEV0_F5__SHIFT 0x0
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_REQEN_LOW_DEV0_F5__SHIFT 0x1
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_LENGTH_DEV0_F5__SHIFT 0x2
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_NR_DEV0_F5__SHIFT 0x3
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_OPCODE_DEV0_F5__SHIFT 0x10
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F5__SHIFT 0x11
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_LENGTH_DEV0_F5__SHIFT 0x12
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_NR_DEV0_F5__SHIFT 0x13
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_OPCODE_DEV0_F5_MASK 0x00000001L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_REQEN_LOW_DEV0_F5_MASK 0x00000002L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_LENGTH_DEV0_F5_MASK 0x00000004L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_NR_DEV0_F5_MASK 0x00000008L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_OPCODE_DEV0_F5_MASK 0x00010000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F5_MASK 0x00020000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_LENGTH_DEV0_F5_MASK 0x00040000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_NR_DEV0_F5_MASK 0x00080000L
+//BIF_ATOMIC_ERR_LOG_DEV0_F6
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_OPCODE_DEV0_F6__SHIFT 0x0
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_REQEN_LOW_DEV0_F6__SHIFT 0x1
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_LENGTH_DEV0_F6__SHIFT 0x2
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_NR_DEV0_F6__SHIFT 0x3
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_OPCODE_DEV0_F6__SHIFT 0x10
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F6__SHIFT 0x11
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_LENGTH_DEV0_F6__SHIFT 0x12
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_NR_DEV0_F6__SHIFT 0x13
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_OPCODE_DEV0_F6_MASK 0x00000001L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_REQEN_LOW_DEV0_F6_MASK 0x00000002L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_LENGTH_DEV0_F6_MASK 0x00000004L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_NR_DEV0_F6_MASK 0x00000008L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_OPCODE_DEV0_F6_MASK 0x00010000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F6_MASK 0x00020000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_LENGTH_DEV0_F6_MASK 0x00040000L
+#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_NR_DEV0_F6_MASK 0x00080000L
+//BIF_DMA_MP4_ERR_LOG
+#define BIF_DMA_MP4_ERR_LOG__MP4SDP_VC4_NON_DVM_ERR__SHIFT 0x0
+#define BIF_DMA_MP4_ERR_LOG__MP4SDP_ATOMIC_REQEN_LOW_ERR__SHIFT 0x1
+#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_VC4_NON_DVM_ERR__SHIFT 0x10
+#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_ATOMIC_REQEN_LOW_ERR__SHIFT 0x11
+#define BIF_DMA_MP4_ERR_LOG__MP4SDP_VC4_NON_DVM_ERR_MASK 0x00000001L
+#define BIF_DMA_MP4_ERR_LOG__MP4SDP_ATOMIC_REQEN_LOW_ERR_MASK 0x00000002L
+#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_VC4_NON_DVM_ERR_MASK 0x00010000L
+#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_ATOMIC_REQEN_LOW_ERR_MASK 0x00020000L
+//BIF_PASID_ERR_LOG
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F0__SHIFT 0x0
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F1__SHIFT 0x1
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F2__SHIFT 0x2
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F3__SHIFT 0x3
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F4__SHIFT 0x4
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F5__SHIFT 0x5
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F6__SHIFT 0x6
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F0_MASK 0x00000001L
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F1_MASK 0x00000002L
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F2_MASK 0x00000004L
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F3_MASK 0x00000008L
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F4_MASK 0x00000010L
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F5_MASK 0x00000020L
+#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F6_MASK 0x00000040L
+//BIF_PASID_ERR_CLR
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F0__SHIFT 0x0
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F1__SHIFT 0x1
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F2__SHIFT 0x2
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F3__SHIFT 0x3
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F4__SHIFT 0x4
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F5__SHIFT 0x5
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F6__SHIFT 0x6
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F0_MASK 0x00000001L
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F1_MASK 0x00000002L
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F2_MASK 0x00000004L
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F3_MASK 0x00000008L
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F4_MASK 0x00000010L
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F5_MASK 0x00000020L
+#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F6_MASK 0x00000040L
+//NBIF_VWIRE_CTRL
+#define NBIF_VWIRE_CTRL__NBIF_SMN_VWR_DIS__SHIFT 0x0
+#define NBIF_VWIRE_CTRL__SMN_VWR_RESET_DELAY_CNT__SHIFT 0x4
+#define NBIF_VWIRE_CTRL__SMN_VWR_POSTED__SHIFT 0x8
+#define NBIF_VWIRE_CTRL__NBIF_SDP_UPS_VWR_DIS__SHIFT 0x10
+#define NBIF_VWIRE_CTRL__SDP_VWR_RESET_DELAY_CNT__SHIFT 0x14
+#define NBIF_VWIRE_CTRL__SDP_VWR_BLOCKLVL__SHIFT 0x1a
+#define NBIF_VWIRE_CTRL__NBIF_SMN_VWR_DIS_MASK 0x00000001L
+#define NBIF_VWIRE_CTRL__SMN_VWR_RESET_DELAY_CNT_MASK 0x000000F0L
+#define NBIF_VWIRE_CTRL__SMN_VWR_POSTED_MASK 0x00000100L
+#define NBIF_VWIRE_CTRL__NBIF_SDP_UPS_VWR_DIS_MASK 0x00010000L
+#define NBIF_VWIRE_CTRL__SDP_VWR_RESET_DELAY_CNT_MASK 0x00F00000L
+#define NBIF_VWIRE_CTRL__SDP_VWR_BLOCKLVL_MASK 0x0C000000L
+//NBIF_SMN_VWR_VCHG_DIS_CTRL
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET0_DIS__SHIFT 0x0
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET1_DIS__SHIFT 0x1
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET2_DIS__SHIFT 0x2
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET3_DIS__SHIFT 0x3
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET4_DIS__SHIFT 0x4
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET5_DIS__SHIFT 0x5
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET6_DIS__SHIFT 0x6
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET7_DIS__SHIFT 0x7
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET8_DIS__SHIFT 0x8
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET9_DIS__SHIFT 0x9
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET0_DIS_MASK 0x00000001L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET1_DIS_MASK 0x00000002L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET2_DIS_MASK 0x00000004L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET3_DIS_MASK 0x00000008L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET4_DIS_MASK 0x00000010L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET5_DIS_MASK 0x00000020L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET6_DIS_MASK 0x00000040L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET7_DIS_MASK 0x00000080L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET8_DIS_MASK 0x00000100L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET9_DIS_MASK 0x00000200L
+//NBIF_SMN_VWR_VCHG_RST_CTRL0
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET0_RST_DEF_REV__SHIFT 0x0
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET1_RST_DEF_REV__SHIFT 0x1
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET2_RST_DEF_REV__SHIFT 0x2
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET3_RST_DEF_REV__SHIFT 0x3
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET4_RST_DEF_REV__SHIFT 0x4
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET5_RST_DEF_REV__SHIFT 0x5
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET6_RST_DEF_REV__SHIFT 0x6
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET7_RST_DEF_REV__SHIFT 0x7
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET8_RST_DEF_REV__SHIFT 0x8
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET9_RST_DEF_REV__SHIFT 0x9
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET0_RST_DEF_REV_MASK 0x00000001L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET1_RST_DEF_REV_MASK 0x00000002L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET2_RST_DEF_REV_MASK 0x00000004L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET3_RST_DEF_REV_MASK 0x00000008L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET4_RST_DEF_REV_MASK 0x00000010L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET5_RST_DEF_REV_MASK 0x00000020L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET6_RST_DEF_REV_MASK 0x00000040L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET7_RST_DEF_REV_MASK 0x00000080L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET8_RST_DEF_REV_MASK 0x00000100L
+#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET9_RST_DEF_REV_MASK 0x00000200L
+//NBIF_SMN_VWR_VCHG_TRIG
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET0_TRIG__SHIFT 0x0
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET1_TRIG__SHIFT 0x1
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET2_TRIG__SHIFT 0x2
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET3_TRIG__SHIFT 0x3
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET4_TRIG__SHIFT 0x4
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET5_TRIG__SHIFT 0x5
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET6_TRIG__SHIFT 0x6
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET7_TRIG__SHIFT 0x7
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET8_TRIG__SHIFT 0x8
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET9_TRIG__SHIFT 0x9
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET0_TRIG_MASK 0x00000001L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET1_TRIG_MASK 0x00000002L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET2_TRIG_MASK 0x00000004L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET3_TRIG_MASK 0x00000008L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET4_TRIG_MASK 0x00000010L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET5_TRIG_MASK 0x00000020L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET6_TRIG_MASK 0x00000040L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET7_TRIG_MASK 0x00000080L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET8_TRIG_MASK 0x00000100L
+#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET9_TRIG_MASK 0x00000200L
+//NBIF_SMN_VWR_WTRIG_CNTL
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET0_DIS__SHIFT 0x0
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET1_DIS__SHIFT 0x1
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET2_DIS__SHIFT 0x2
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET3_DIS__SHIFT 0x3
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET4_DIS__SHIFT 0x4
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET5_DIS__SHIFT 0x5
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET6_DIS__SHIFT 0x6
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET7_DIS__SHIFT 0x7
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET8_DIS__SHIFT 0x8
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET9_DIS__SHIFT 0x9
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET0_DIS_MASK 0x00000001L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET1_DIS_MASK 0x00000002L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET2_DIS_MASK 0x00000004L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET3_DIS_MASK 0x00000008L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET4_DIS_MASK 0x00000010L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET5_DIS_MASK 0x00000020L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET6_DIS_MASK 0x00000040L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET7_DIS_MASK 0x00000080L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET8_DIS_MASK 0x00000100L
+#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET9_DIS_MASK 0x00000200L
+//NBIF_SMN_VWR_VCHG_DIS_CTRL_1
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET0_DIFFDET_DEF_REV__SHIFT 0x0
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET1_DIFFDET_DEF_REV__SHIFT 0x1
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET2_DIFFDET_DEF_REV__SHIFT 0x2
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET3_DIFFDET_DEF_REV__SHIFT 0x3
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET4_DIFFDET_DEF_REV__SHIFT 0x4
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET5_DIFFDET_DEF_REV__SHIFT 0x5
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET6_DIFFDET_DEF_REV__SHIFT 0x6
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET7_DIFFDET_DEF_REV__SHIFT 0x7
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET8_DIFFDET_DEF_REV__SHIFT 0x8
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET9_DIFFDET_DEF_REV__SHIFT 0x9
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET0_DIFFDET_DEF_REV_MASK 0x00000001L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET1_DIFFDET_DEF_REV_MASK 0x00000002L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET2_DIFFDET_DEF_REV_MASK 0x00000004L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET3_DIFFDET_DEF_REV_MASK 0x00000008L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET4_DIFFDET_DEF_REV_MASK 0x00000010L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET5_DIFFDET_DEF_REV_MASK 0x00000020L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET6_DIFFDET_DEF_REV_MASK 0x00000040L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET7_DIFFDET_DEF_REV_MASK 0x00000080L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET8_DIFFDET_DEF_REV_MASK 0x00000100L
+#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET9_DIFFDET_DEF_REV_MASK 0x00000200L
+//NBIF_MGCG_CTRL_LCLK
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK__SHIFT 0x0
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_MODE_LCLK__SHIFT 0x1
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HYSTERESIS_LCLK__SHIFT 0x2
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HST_DIS_LCLK__SHIFT 0xa
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_DMA_DIS_LCLK__SHIFT 0xb
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK__SHIFT 0xc
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_AER_DIS_LCLK__SHIFT 0xd
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_DBG_DIS_LCLK__SHIFT 0xe
+#define NBIF_MGCG_CTRL_LCLK__NBIF_SRAM_FGCG_EN_LCLK__SHIFT 0xf
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK_MASK 0x00000001L
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_MODE_LCLK_MASK 0x00000002L
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HYSTERESIS_LCLK_MASK 0x000003FCL
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HST_DIS_LCLK_MASK 0x00000400L
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_DMA_DIS_LCLK_MASK 0x00000800L
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK 0x00001000L
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_AER_DIS_LCLK_MASK 0x00002000L
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_DBG_DIS_LCLK_MASK 0x00004000L
+#define NBIF_MGCG_CTRL_LCLK__NBIF_SRAM_FGCG_EN_LCLK_MASK 0x00008000L
+//NBIF_DS_CTRL_LCLK
+#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_EN__SHIFT 0x0
+#define NBIF_DS_CTRL_LCLK__ATHUB_LCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x1
+#define NBIF_DS_CTRL_LCLK__USB_LCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x2
+#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_TIMER__SHIFT 0x10
+#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_EN_MASK 0x00000001L
+#define NBIF_DS_CTRL_LCLK__ATHUB_LCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000002L
+#define NBIF_DS_CTRL_LCLK__USB_LCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000004L
+#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_TIMER_MASK 0xFFFF0000L
+//SMN_MST_CNTL0
+#define SMN_MST_CNTL0__SMN_ARB_MODE__SHIFT 0x0
+#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_UPS__SHIFT 0x8
+#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_UPS__SHIFT 0x9
+#define SMN_MST_CNTL0__SMN_POST_MASK_EN_UPS__SHIFT 0xa
+#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_UPS__SHIFT 0xb
+#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_DNS_DEV0__SHIFT 0x10
+#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_DNS_DEV0__SHIFT 0x14
+#define SMN_MST_CNTL0__SMN_POST_MASK_EN_DNS_DEV0__SHIFT 0x18
+#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_DNS_DEV0__SHIFT 0x1c
+#define SMN_MST_CNTL0__SMN_ARB_MODE_MASK 0x00000003L
+#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_UPS_MASK 0x00000100L
+#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_UPS_MASK 0x00000200L
+#define SMN_MST_CNTL0__SMN_POST_MASK_EN_UPS_MASK 0x00000400L
+#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_UPS_MASK 0x00000800L
+#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_DNS_DEV0_MASK 0x00010000L
+#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_DNS_DEV0_MASK 0x00100000L
+#define SMN_MST_CNTL0__SMN_POST_MASK_EN_DNS_DEV0_MASK 0x01000000L
+#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_DNS_DEV0_MASK 0x10000000L
+//SMN_MST_EP_CNTL1
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF0__SHIFT 0x0
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF1__SHIFT 0x1
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF2__SHIFT 0x2
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF3__SHIFT 0x3
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF4__SHIFT 0x4
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF5__SHIFT 0x5
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF6__SHIFT 0x6
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF7__SHIFT 0x7
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF0_MASK 0x00000001L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF1_MASK 0x00000002L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF2_MASK 0x00000004L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF3_MASK 0x00000008L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF4_MASK 0x00000010L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF5_MASK 0x00000020L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF6_MASK 0x00000040L
+#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF7_MASK 0x00000080L
+//SMN_MST_EP_CNTL2
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF0__SHIFT 0x0
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF1__SHIFT 0x1
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF2__SHIFT 0x2
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF3__SHIFT 0x3
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF4__SHIFT 0x4
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF5__SHIFT 0x5
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF6__SHIFT 0x6
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF7__SHIFT 0x7
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF0_MASK 0x00000001L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF1_MASK 0x00000002L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF2_MASK 0x00000004L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF3_MASK 0x00000008L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF4_MASK 0x00000010L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF5_MASK 0x00000020L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF6_MASK 0x00000040L
+#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF7_MASK 0x00000080L
+//NBIF_SDP_VWR_VCHG_DIS_CTRL
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F0_DIS__SHIFT 0x0
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F1_DIS__SHIFT 0x1
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F2_DIS__SHIFT 0x2
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F3_DIS__SHIFT 0x3
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F4_DIS__SHIFT 0x4
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F5_DIS__SHIFT 0x5
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F6_DIS__SHIFT 0x6
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F7_DIS__SHIFT 0x7
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_SWDS_P0_DIS__SHIFT 0x18
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F0_DIS_MASK 0x00000001L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F1_DIS_MASK 0x00000002L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F2_DIS_MASK 0x00000004L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F3_DIS_MASK 0x00000008L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F4_DIS_MASK 0x00000010L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F5_DIS_MASK 0x00000020L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F6_DIS_MASK 0x00000040L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F7_DIS_MASK 0x00000080L
+#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_SWDS_P0_DIS_MASK 0x01000000L
+//NBIF_SDP_VWR_VCHG_RST_CTRL0
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_EN__SHIFT 0x0
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_EN__SHIFT 0x1
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_EN__SHIFT 0x2
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_EN__SHIFT 0x3
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_EN__SHIFT 0x4
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_EN__SHIFT 0x5
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_EN__SHIFT 0x6
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_EN__SHIFT 0x7
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_EN__SHIFT 0x18
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_EN_MASK 0x00000001L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_EN_MASK 0x00000002L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_EN_MASK 0x00000004L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_EN_MASK 0x00000008L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_EN_MASK 0x00000010L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_EN_MASK 0x00000020L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_EN_MASK 0x00000040L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_EN_MASK 0x00000080L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_EN_MASK 0x01000000L
+//NBIF_SDP_VWR_VCHG_RST_CTRL1
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_VAL__SHIFT 0x0
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_VAL__SHIFT 0x1
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_VAL__SHIFT 0x2
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_VAL__SHIFT 0x3
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_VAL__SHIFT 0x4
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_VAL__SHIFT 0x5
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_VAL__SHIFT 0x6
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_VAL__SHIFT 0x7
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_VAL__SHIFT 0x18
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_VAL_MASK 0x00000001L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_VAL_MASK 0x00000002L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_VAL_MASK 0x00000004L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_VAL_MASK 0x00000008L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_VAL_MASK 0x00000010L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_VAL_MASK 0x00000020L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_VAL_MASK 0x00000040L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_VAL_MASK 0x00000080L
+#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_VAL_MASK 0x01000000L
+//NBIF_SDP_VWR_VCHG_TRIG
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F0_TRIG__SHIFT 0x0
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F1_TRIG__SHIFT 0x1
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F2_TRIG__SHIFT 0x2
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F3_TRIG__SHIFT 0x3
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F4_TRIG__SHIFT 0x4
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F5_TRIG__SHIFT 0x5
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F6_TRIG__SHIFT 0x6
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F7_TRIG__SHIFT 0x7
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_SWDS_P0_TRIG__SHIFT 0x18
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F0_TRIG_MASK 0x00000001L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F1_TRIG_MASK 0x00000002L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F2_TRIG_MASK 0x00000004L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F3_TRIG_MASK 0x00000008L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F4_TRIG_MASK 0x00000010L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F5_TRIG_MASK 0x00000020L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F6_TRIG_MASK 0x00000040L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F7_TRIG_MASK 0x00000080L
+#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_SWDS_P0_TRIG_MASK 0x01000000L
+//NBIF_SHUB_TODET_CTRL
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TODET_EN__SHIFT 0x0
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TODET_AER_LOG_EN__SHIFT 0x1
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TODET_TIMER_UNIT__SHIFT 0x8
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TIMEOUT_COUNT__SHIFT 0x10
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TODET_EN_MASK 0x00000001L
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TODET_AER_LOG_EN_MASK 0x00000002L
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TODET_TIMER_UNIT_MASK 0x00000700L
+#define NBIF_SHUB_TODET_CTRL__NBIF_SHUB_TIMEOUT_COUNT_MASK 0xFFFF0000L
+//NBIF_SHUB_TODET_CLIENT_CTRL
+#define NBIF_SHUB_TODET_CLIENT_CTRL__NBIF_SHUB_TODET_SLVERR_EN__SHIFT 0x0
+#define NBIF_SHUB_TODET_CLIENT_CTRL__NBIF_SHUB_TODET_SLVERR_EN_MASK 0xFFFFFFFFL
+//NBIF_SHUB_TODET_CLIENT_STATUS
+#define NBIF_SHUB_TODET_CLIENT_STATUS__NBIF_SHUB_TODET_CLIENT_STATUS__SHIFT 0x0
+#define NBIF_SHUB_TODET_CLIENT_STATUS__NBIF_SHUB_TODET_CLIENT_STATUS_MASK 0xFFFFFFFFL
+//NBIF_SHUB_TODET_SYNCFLOOD_CTRL
+#define NBIF_SHUB_TODET_SYNCFLOOD_CTRL__NBIF_SHUB_TODET_SYNCFLOOD_EN__SHIFT 0x0
+#define NBIF_SHUB_TODET_SYNCFLOOD_CTRL__NBIF_SHUB_TODET_SYNCFLOOD_EN_MASK 0xFFFFFFFFL
+//NBIF_SHUB_TODET_CLIENT_CTRL2
+#define NBIF_SHUB_TODET_CLIENT_CTRL2__NBIF_SHUB_TODET_SLVERR_EN2__SHIFT 0x0
+#define NBIF_SHUB_TODET_CLIENT_CTRL2__NBIF_SHUB_TODET_SLVERR_EN2_MASK 0xFFFFFFFFL
+//NBIF_SHUB_TODET_CLIENT_STATUS2
+#define NBIF_SHUB_TODET_CLIENT_STATUS2__NBIF_SHUB_TODET_CLIENT_STATUS2__SHIFT 0x0
+#define NBIF_SHUB_TODET_CLIENT_STATUS2__NBIF_SHUB_TODET_CLIENT_STATUS2_MASK 0xFFFFFFFFL
+//NBIF_SHUB_TODET_SYNCFLOOD_CTRL2
+#define NBIF_SHUB_TODET_SYNCFLOOD_CTRL2__NBIF_SHUB_TODET_SYNCFLOOD_EN2__SHIFT 0x0
+#define NBIF_SHUB_TODET_SYNCFLOOD_CTRL2__NBIF_SHUB_TODET_SYNCFLOOD_EN2_MASK 0xFFFFFFFFL
+//BIFC_BME_ERR_LOG_HB
+//BIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC
+#define BIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC__VC0_ALLOC__SHIFT 0x0
+#define BIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC__VC1_ALLOC__SHIFT 0x4
+#define BIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC__VC0_ALLOC_MASK 0x0000000FL
+#define BIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC__VC1_ALLOC_MASK 0x000000F0L
+//BIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC
+#define BIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC__VC0_ALLOC__SHIFT 0x0
+#define BIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC__VC1_ALLOC__SHIFT 0x4
+#define BIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC__VC0_ALLOC_MASK 0x0000000FL
+#define BIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC__VC1_ALLOC_MASK 0x000000F0L
+//BIFC_GMI_SDP_REQ_POOLCRED_ALLOC
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC0_ALLOC__SHIFT 0x0
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC1_ALLOC__SHIFT 0x4
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC2_ALLOC__SHIFT 0x8
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC3_ALLOC__SHIFT 0xc
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC4_ALLOC__SHIFT 0x10
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC5_ALLOC__SHIFT 0x14
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC6_ALLOC__SHIFT 0x18
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC7_ALLOC__SHIFT 0x1c
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC0_ALLOC_MASK 0x0000000FL
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC1_ALLOC_MASK 0x000000F0L
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC2_ALLOC_MASK 0x00000F00L
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC3_ALLOC_MASK 0x0000F000L
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC4_ALLOC_MASK 0x000F0000L
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC5_ALLOC_MASK 0x00F00000L
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC6_ALLOC_MASK 0x0F000000L
+#define BIFC_GMI_SDP_REQ_POOLCRED_ALLOC__VC7_ALLOC_MASK 0xF0000000L
+//BIFC_GMI_SDP_DAT_POOLCRED_ALLOC
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC0_ALLOC__SHIFT 0x0
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC1_ALLOC__SHIFT 0x4
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC2_ALLOC__SHIFT 0x8
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC3_ALLOC__SHIFT 0xc
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC4_ALLOC__SHIFT 0x10
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC5_ALLOC__SHIFT 0x14
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC6_ALLOC__SHIFT 0x18
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC7_ALLOC__SHIFT 0x1c
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC0_ALLOC_MASK 0x0000000FL
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC1_ALLOC_MASK 0x000000F0L
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC2_ALLOC_MASK 0x00000F00L
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC3_ALLOC_MASK 0x0000F000L
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC4_ALLOC_MASK 0x000F0000L
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC5_ALLOC_MASK 0x00F00000L
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC6_ALLOC_MASK 0x0F000000L
+#define BIFC_GMI_SDP_DAT_POOLCRED_ALLOC__VC7_ALLOC_MASK 0xF0000000L
+//BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC0_ALLOC__SHIFT 0x0
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC1_ALLOC__SHIFT 0x4
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC2_ALLOC__SHIFT 0x8
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC3_ALLOC__SHIFT 0xc
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC4_ALLOC__SHIFT 0x10
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC5_ALLOC__SHIFT 0x14
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC6_ALLOC__SHIFT 0x18
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC7_ALLOC__SHIFT 0x1c
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC0_ALLOC_MASK 0x0000000FL
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC1_ALLOC_MASK 0x000000F0L
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC2_ALLOC_MASK 0x00000F00L
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC3_ALLOC_MASK 0x0000F000L
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC4_ALLOC_MASK 0x000F0000L
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC5_ALLOC_MASK 0x00F00000L
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC6_ALLOC_MASK 0x0F000000L
+#define BIFC_GMI_SST_RDRSP_POOLCRED_ALLOC__VC7_ALLOC_MASK 0xF0000000L
+//BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC0_ALLOC__SHIFT 0x0
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC1_ALLOC__SHIFT 0x4
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC2_ALLOC__SHIFT 0x8
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC3_ALLOC__SHIFT 0xc
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC4_ALLOC__SHIFT 0x10
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC5_ALLOC__SHIFT 0x14
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC6_ALLOC__SHIFT 0x18
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC7_ALLOC__SHIFT 0x1c
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC0_ALLOC_MASK 0x0000000FL
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC1_ALLOC_MASK 0x000000F0L
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC2_ALLOC_MASK 0x00000F00L
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC3_ALLOC_MASK 0x0000F000L
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC4_ALLOC_MASK 0x000F0000L
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC5_ALLOC_MASK 0x00F00000L
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC6_ALLOC_MASK 0x0F000000L
+#define BIFC_GMI_SST_WRRSP_POOLCRED_ALLOC__VC7_ALLOC_MASK 0xF0000000L
+//DISCON_HYSTERESIS_HEAD_CTRL
+#define DISCON_HYSTERESIS_HEAD_CTRL__GMI_DNS_SDP_DISCON_HYSTERESIS_H__SHIFT 0x0
+#define DISCON_HYSTERESIS_HEAD_CTRL__GMI_UPS_SDP_DISCON_HYSTERESIS_H__SHIFT 0x8
+#define DISCON_HYSTERESIS_HEAD_CTRL__GMI_DNS_SDP_DISCON_HYSTERESIS_H_MASK 0x0000000FL
+#define DISCON_HYSTERESIS_HEAD_CTRL__GMI_UPS_SDP_DISCON_HYSTERESIS_H_MASK 0x00000F00L
+//BIFC_EARLY_WAKEUP_CNTL
+#define BIFC_EARLY_WAKEUP_CNTL__NBIF_EARLY_WAKEUP_BY_CLIENT_ACTIVE__SHIFT 0x0
+#define BIFC_EARLY_WAKEUP_CNTL__NBIF_EARLY_WAKEUP_BY_CLIENT_DS_EXIT__SHIFT 0x1
+#define BIFC_EARLY_WAKEUP_CNTL__NBIF_EARLY_WAKEUP_ALLOW_AER_ACTIVE__SHIFT 0x2
+#define BIFC_EARLY_WAKEUP_CNTL__NBIF_EARLY_WAKEUP_BY_CLIENT_ACTIVE_MASK 0x00000001L
+#define BIFC_EARLY_WAKEUP_CNTL__NBIF_EARLY_WAKEUP_BY_CLIENT_DS_EXIT_MASK 0x00000002L
+#define BIFC_EARLY_WAKEUP_CNTL__NBIF_EARLY_WAKEUP_ALLOW_AER_ACTIVE_MASK 0x00000004L
+//BIFC_PERF_CNT_MMIO_RD_H16BIT
+#define BIFC_PERF_CNT_MMIO_RD_H16BIT__PERF_CNT_MMIO_RD_VALUE_H16BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_MMIO_RD_H16BIT__PERF_CNT_MMIO_RD_VALUE_H16BIT_MASK 0x0000FFFFL
+//BIFC_PERF_CNT_MMIO_WR_H16BIT
+#define BIFC_PERF_CNT_MMIO_WR_H16BIT__PERF_CNT_MMIO_WR_VALUE_H16BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_MMIO_WR_H16BIT__PERF_CNT_MMIO_WR_VALUE_H16BIT_MASK 0x0000FFFFL
+//BIFC_PERF_CNT_DMA_RD_H16BIT
+#define BIFC_PERF_CNT_DMA_RD_H16BIT__PERF_CNT_DMA_RD_VALUE_H16BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_DMA_RD_H16BIT__PERF_CNT_DMA_RD_VALUE_H16BIT_MASK 0x0000FFFFL
+//BIFC_PERF_CNT_DMA_WR_H16BIT
+#define BIFC_PERF_CNT_DMA_WR_H16BIT__PERF_CNT_DMA_WR_VALUE_H16BIT__SHIFT 0x0
+#define BIFC_PERF_CNT_DMA_WR_H16BIT__PERF_CNT_DMA_WR_VALUE_H16BIT_MASK 0x0000FFFFL
+//NBIF_PERF_COM_COUNT_ENABLE
+#define NBIF_PERF_COM_COUNT_ENABLE__NBIF_COM_COUNT_ENABLE__SHIFT 0x0
+#define NBIF_PERF_COM_COUNT_ENABLE__START_COUNT_NOPULS__SHIFT 0x3
+#define NBIF_PERF_COM_COUNT_ENABLE__LEGACY_OUT_REALTIME_SEL__SHIFT 0x4
+#define NBIF_PERF_COM_COUNT_ENABLE__NBIF_COM_COUNT_ENABLE_MASK 0x00000001L
+#define NBIF_PERF_COM_COUNT_ENABLE__START_COUNT_NOPULS_MASK 0x00000008L
+#define NBIF_PERF_COM_COUNT_ENABLE__LEGACY_OUT_REALTIME_SEL_MASK 0x00000010L
+//NBIF_BX_PERF_CNT_FSM
+#define NBIF_BX_PERF_CNT_FSM__BX_GLOBAL_SHADOW_TGL_DELAY_COUNT__SHIFT 0x0
+#define NBIF_BX_PERF_CNT_FSM__BX_GLOBAL_PERF_RESET_TGL_DELAY_COUNT__SHIFT 0x4
+#define NBIF_BX_PERF_CNT_FSM__BX_GLOBAL_PERF_RESET_TGL_DELAY_EN__SHIFT 0x8
+#define NBIF_BX_PERF_CNT_FSM__BX_PRE_FLD_GLOBAL_SHADOW_WR__SHIFT 0x9
+#define NBIF_BX_PERF_CNT_FSM__BX_PERF_CNT_DONE__SHIFT 0xa
+#define NBIF_BX_PERF_CNT_FSM__BX_GLOBAL_SHADOW_TGL_DELAY_COUNT_MASK 0x0000000FL
+#define NBIF_BX_PERF_CNT_FSM__BX_GLOBAL_PERF_RESET_TGL_DELAY_COUNT_MASK 0x000000F0L
+#define NBIF_BX_PERF_CNT_FSM__BX_GLOBAL_PERF_RESET_TGL_DELAY_EN_MASK 0x00000100L
+#define NBIF_BX_PERF_CNT_FSM__BX_PRE_FLD_GLOBAL_SHADOW_WR_MASK 0x00000200L
+#define NBIF_BX_PERF_CNT_FSM__BX_PERF_CNT_DONE_MASK 0x00000400L
+//NBIF_COM_COUNT_VALUE
+#define NBIF_COM_COUNT_VALUE__NBIF_COM_COUNT_VALUE__SHIFT 0x0
+#define NBIF_COM_COUNT_VALUE__NBIF_COM_COUNT_VALUE_MASK 0xFFFFFFFFL
+//BIFC_A2S_SDP_PORT_CTRL
+#define BIFC_A2S_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS__SHIFT 0x0
+#define BIFC_A2S_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS_H__SHIFT 0x8
+#define BIFC_A2S_SDP_PORT_CTRL__SDP_DISCON_DIS__SHIFT 0xc
+#define BIFC_A2S_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS_MASK 0x000000FFL
+#define BIFC_A2S_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS_H_MASK 0x00000F00L
+#define BIFC_A2S_SDP_PORT_CTRL__SDP_DISCON_DIS_MASK 0x00001000L
+//BIFC_A2S_CNTL_SW0
+#define BIFC_A2S_CNTL_SW0__RDRSP_ERRMAP__SHIFT 0x0
+#define BIFC_A2S_CNTL_SW0__RDRSP_SEL_MODE__SHIFT 0x2
+#define BIFC_A2S_CNTL_SW0__STATIC_VC_ENABLE__SHIFT 0x5
+#define BIFC_A2S_CNTL_SW0__STATIC_VC_VALUE__SHIFT 0x6
+#define BIFC_A2S_CNTL_SW0__SDP_WR_CHAIN_DIS__SHIFT 0x9
+#define BIFC_A2S_CNTL_SW0__WR_TAG_FOR_CHAIN_ENABLE__SHIFT 0xa
+#define BIFC_A2S_CNTL_SW0__WR_TAG_FOR_NONCHAIN_ENABLE__SHIFT 0xb
+#define BIFC_A2S_CNTL_SW0__SDP_DYNAMIC_VC_WR_CHAIN_DIS__SHIFT 0xc
+#define BIFC_A2S_CNTL_SW0__WRR_RD_WEIGHT__SHIFT 0x10
+#define BIFC_A2S_CNTL_SW0__WRR_WR_WEIGHT__SHIFT 0x18
+#define BIFC_A2S_CNTL_SW0__RDRSP_ERRMAP_MASK 0x00000003L
+#define BIFC_A2S_CNTL_SW0__RDRSP_SEL_MODE_MASK 0x0000001CL
+#define BIFC_A2S_CNTL_SW0__STATIC_VC_ENABLE_MASK 0x00000020L
+#define BIFC_A2S_CNTL_SW0__STATIC_VC_VALUE_MASK 0x000001C0L
+#define BIFC_A2S_CNTL_SW0__SDP_WR_CHAIN_DIS_MASK 0x00000200L
+#define BIFC_A2S_CNTL_SW0__WR_TAG_FOR_CHAIN_ENABLE_MASK 0x00000400L
+#define BIFC_A2S_CNTL_SW0__WR_TAG_FOR_NONCHAIN_ENABLE_MASK 0x00000800L
+#define BIFC_A2S_CNTL_SW0__SDP_DYNAMIC_VC_WR_CHAIN_DIS_MASK 0x00001000L
+#define BIFC_A2S_CNTL_SW0__WRR_RD_WEIGHT_MASK 0x00FF0000L
+#define BIFC_A2S_CNTL_SW0__WRR_WR_WEIGHT_MASK 0xFF000000L
+//BIFC_A2S_MISC_CNTL
+#define BIFC_A2S_MISC_CNTL__BLKLVL_FOR_MSG__SHIFT 0x0
+#define BIFC_A2S_MISC_CNTL__RESERVE_2_CRED_FOR_NPWR_REQ_DIS__SHIFT 0x2
+#define BIFC_A2S_MISC_CNTL__WRR_LRG_SIZE_MODE__SHIFT 0x3
+#define BIFC_A2S_MISC_CNTL__FORCE_RSP_REORDER_EN__SHIFT 0x4
+#define BIFC_A2S_MISC_CNTL__RSP_REORDER_DIS__SHIFT 0x5
+#define BIFC_A2S_MISC_CNTL__WRRSP_ACCUM_SEL__SHIFT 0x6
+#define BIFC_A2S_MISC_CNTL__WRRSP_TAGFIFO_CONT_RD_DIS__SHIFT 0x7
+#define BIFC_A2S_MISC_CNTL__RDRSP_TAGFIFO_CONT_RD_DIS__SHIFT 0x8
+#define BIFC_A2S_MISC_CNTL__RDRSP_STS_DATSTS_PRIORITY__SHIFT 0x9
+#define BIFC_A2S_MISC_CNTL__INSERT_RD_ON_2ND_WDAT_EN__SHIFT 0xa
+#define BIFC_A2S_MISC_CNTL__WR_TAG_SET_MIN__SHIFT 0x10
+#define BIFC_A2S_MISC_CNTL__RD_TAG_SET_MIN__SHIFT 0x15
+#define BIFC_A2S_MISC_CNTL__WRR_LRG_MODE__SHIFT 0x1a
+#define BIFC_A2S_MISC_CNTL__WRR_LRG_COUNTER_MODE__SHIFT 0x1b
+#define BIFC_A2S_MISC_CNTL__BYPASS_OVERRIDE_EN__SHIFT 0x1c
+#define BIFC_A2S_MISC_CNTL__BYPASS_OVERRIDE_VALUE__SHIFT 0x1d
+#define BIFC_A2S_MISC_CNTL__BLKLVL_FOR_MSG_MASK 0x00000003L
+#define BIFC_A2S_MISC_CNTL__RESERVE_2_CRED_FOR_NPWR_REQ_DIS_MASK 0x00000004L
+#define BIFC_A2S_MISC_CNTL__WRR_LRG_SIZE_MODE_MASK 0x00000008L
+#define BIFC_A2S_MISC_CNTL__FORCE_RSP_REORDER_EN_MASK 0x00000010L
+#define BIFC_A2S_MISC_CNTL__RSP_REORDER_DIS_MASK 0x00000020L
+#define BIFC_A2S_MISC_CNTL__WRRSP_ACCUM_SEL_MASK 0x00000040L
+#define BIFC_A2S_MISC_CNTL__WRRSP_TAGFIFO_CONT_RD_DIS_MASK 0x00000080L
+#define BIFC_A2S_MISC_CNTL__RDRSP_TAGFIFO_CONT_RD_DIS_MASK 0x00000100L
+#define BIFC_A2S_MISC_CNTL__RDRSP_STS_DATSTS_PRIORITY_MASK 0x00000200L
+#define BIFC_A2S_MISC_CNTL__INSERT_RD_ON_2ND_WDAT_EN_MASK 0x00000400L
+#define BIFC_A2S_MISC_CNTL__WR_TAG_SET_MIN_MASK 0x001F0000L
+#define BIFC_A2S_MISC_CNTL__RD_TAG_SET_MIN_MASK 0x03E00000L
+#define BIFC_A2S_MISC_CNTL__WRR_LRG_MODE_MASK 0x04000000L
+#define BIFC_A2S_MISC_CNTL__WRR_LRG_COUNTER_MODE_MASK 0x08000000L
+#define BIFC_A2S_MISC_CNTL__BYPASS_OVERRIDE_EN_MASK 0x10000000L
+#define BIFC_A2S_MISC_CNTL__BYPASS_OVERRIDE_VALUE_MASK 0x20000000L
+//BIFC_A2S_TAG_ALLOC_0
+#define BIFC_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_WR__SHIFT 0x0
+#define BIFC_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_RD__SHIFT 0x8
+#define BIFC_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC1_WR__SHIFT 0x10
+#define BIFC_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_WR_MASK 0x000000FFL
+#define BIFC_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_RD_MASK 0x0000FF00L
+#define BIFC_A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC1_WR_MASK 0x00FF0000L
+//BIFC_A2S_TAG_ALLOC_1
+#define BIFC_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC3_WR__SHIFT 0x0
+#define BIFC_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_WR__SHIFT 0x10
+#define BIFC_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_RD__SHIFT 0x18
+#define BIFC_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC3_WR_MASK 0x000000FFL
+#define BIFC_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_WR_MASK 0x00FF0000L
+#define BIFC_A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_RD_MASK 0xFF000000L
+
+
+// addressBlock: nbif_bif_ras_bif_ras_regblk
+//BIFL_RAS_CENTRAL_CNTL
+#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_ERREVENT_HST_STALL_DIS__SHIFT 0x1b
+#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_ERREVENT_STALL_DIS__SHIFT 0x1c
+#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_ERREVENT_DIS__SHIFT 0x1d
+#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_INTR_DIS__SHIFT 0x1e
+#define BIFL_RAS_CENTRAL_CNTL__BIFL_LINKDIS_TRIG_EGRESS_STALL_DIS__SHIFT 0x1f
+#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_ERREVENT_HST_STALL_DIS_MASK 0x08000000L
+#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_ERREVENT_STALL_DIS_MASK 0x10000000L
+#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_ERREVENT_DIS_MASK 0x20000000L
+#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_INTR_DIS_MASK 0x40000000L
+#define BIFL_RAS_CENTRAL_CNTL__BIFL_LINKDIS_TRIG_EGRESS_STALL_DIS_MASK 0x80000000L
+//BIFL_RAS_CENTRAL_STATUS
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_L2C_EgStall_det__SHIFT 0x0
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_L2C_ErrEvent_det__SHIFT 0x1
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_C2L_EgStall_det__SHIFT 0x2
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_C2L_ErrEvent_det__SHIFT 0x3
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_RasContller_ErrEvent_Recv__SHIFT 0x1d
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_RasContller_Intr_Recv__SHIFT 0x1e
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_LinkDis_Recv__SHIFT 0x1f
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_L2C_EgStall_det_MASK 0x00000001L
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_L2C_ErrEvent_det_MASK 0x00000002L
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_C2L_EgStall_det_MASK 0x00000004L
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_C2L_ErrEvent_det_MASK 0x00000008L
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_RasContller_ErrEvent_Recv_MASK 0x20000000L
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_RasContller_Intr_Recv_MASK 0x40000000L
+#define BIFL_RAS_CENTRAL_STATUS__BIFL_LinkDis_Recv_MASK 0x80000000L
+//BIFL_RAS_LEAF0_CTRL
+#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_DET_EN__SHIFT 0x0
+#define BIFL_RAS_LEAF0_CTRL__POISON_ERREVENT_EN__SHIFT 0x1
+#define BIFL_RAS_LEAF0_CTRL__POISON_STALL_EN__SHIFT 0x2
+#define BIFL_RAS_LEAF0_CTRL__PARITY_ERREVENT_EN__SHIFT 0x3
+#define BIFL_RAS_LEAF0_CTRL__PARITY_STALL_EN__SHIFT 0x4
+#define BIFL_RAS_LEAF0_CTRL__RCVERREVENT_ERREVENT_EN__SHIFT 0x5
+#define BIFL_RAS_LEAF0_CTRL__RCVERREVENT_STALL_EN__SHIFT 0x6
+#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_GEN_EN__SHIFT 0x8
+#define BIFL_RAS_LEAF0_CTRL__EGRESS_STALL_GEN_EN__SHIFT 0x9
+#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_PROP_EN__SHIFT 0xa
+#define BIFL_RAS_LEAF0_CTRL__EGRESS_STALL_PROP_EN__SHIFT 0xb
+#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_RAS_INTR_EN__SHIFT 0x10
+#define BIFL_RAS_LEAF0_CTRL__PARITY_ERREVENT_LOG_MCA__SHIFT 0x11
+#define BIFL_RAS_LEAF0_CTRL__POISON_ERREVENT_LOG_MCA__SHIFT 0x12
+#define BIFL_RAS_LEAF0_CTRL__TIMEOUT_ERREVENT_LOG_MCA__SHIFT 0x13
+#define BIFL_RAS_LEAF0_CTRL__RCVERREVENT_ERREVENT_LOG_MCA__SHIFT 0x14
+#define BIFL_RAS_LEAF0_CTRL__UCP_EN__SHIFT 0x15
+#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_DET_EN_MASK 0x00000001L
+#define BIFL_RAS_LEAF0_CTRL__POISON_ERREVENT_EN_MASK 0x00000002L
+#define BIFL_RAS_LEAF0_CTRL__POISON_STALL_EN_MASK 0x00000004L
+#define BIFL_RAS_LEAF0_CTRL__PARITY_ERREVENT_EN_MASK 0x00000008L
+#define BIFL_RAS_LEAF0_CTRL__PARITY_STALL_EN_MASK 0x00000010L
+#define BIFL_RAS_LEAF0_CTRL__RCVERREVENT_ERREVENT_EN_MASK 0x00000020L
+#define BIFL_RAS_LEAF0_CTRL__RCVERREVENT_STALL_EN_MASK 0x00000040L
+#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_GEN_EN_MASK 0x00000100L
+#define BIFL_RAS_LEAF0_CTRL__EGRESS_STALL_GEN_EN_MASK 0x00000200L
+#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_PROP_EN_MASK 0x00000400L
+#define BIFL_RAS_LEAF0_CTRL__EGRESS_STALL_PROP_EN_MASK 0x00000800L
+#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_RAS_INTR_EN_MASK 0x00010000L
+#define BIFL_RAS_LEAF0_CTRL__PARITY_ERREVENT_LOG_MCA_MASK 0x00020000L
+#define BIFL_RAS_LEAF0_CTRL__POISON_ERREVENT_LOG_MCA_MASK 0x00040000L
+#define BIFL_RAS_LEAF0_CTRL__TIMEOUT_ERREVENT_LOG_MCA_MASK 0x00080000L
+#define BIFL_RAS_LEAF0_CTRL__RCVERREVENT_ERREVENT_LOG_MCA_MASK 0x00100000L
+#define BIFL_RAS_LEAF0_CTRL__UCP_EN_MASK 0x00200000L
+//BIFL_RAS_LEAF1_CTRL
+#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_DET_EN__SHIFT 0x0
+#define BIFL_RAS_LEAF1_CTRL__POISON_ERREVENT_EN__SHIFT 0x1
+#define BIFL_RAS_LEAF1_CTRL__POISON_STALL_EN__SHIFT 0x2
+#define BIFL_RAS_LEAF1_CTRL__PARITY_ERREVENT_EN__SHIFT 0x3
+#define BIFL_RAS_LEAF1_CTRL__PARITY_STALL_EN__SHIFT 0x4
+#define BIFL_RAS_LEAF1_CTRL__RCVERREVENT_ERREVENT_EN__SHIFT 0x5
+#define BIFL_RAS_LEAF1_CTRL__RCVERREVENT_STALL_EN__SHIFT 0x6
+#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_GEN_EN__SHIFT 0x8
+#define BIFL_RAS_LEAF1_CTRL__EGRESS_STALL_GEN_EN__SHIFT 0x9
+#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_PROP_EN__SHIFT 0xa
+#define BIFL_RAS_LEAF1_CTRL__EGRESS_STALL_PROP_EN__SHIFT 0xb
+#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_RAS_INTR_EN__SHIFT 0x10
+#define BIFL_RAS_LEAF1_CTRL__PARITY_ERREVENT_LOG_MCA__SHIFT 0x11
+#define BIFL_RAS_LEAF1_CTRL__POISON_ERREVENT_LOG_MCA__SHIFT 0x12
+#define BIFL_RAS_LEAF1_CTRL__TIMEOUT_ERREVENT_LOG_MCA__SHIFT 0x13
+#define BIFL_RAS_LEAF1_CTRL__RCVERREVENT_ERREVENT_LOG_MCA__SHIFT 0x14
+#define BIFL_RAS_LEAF1_CTRL__UCP_EN__SHIFT 0x15
+#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_DET_EN_MASK 0x00000001L
+#define BIFL_RAS_LEAF1_CTRL__POISON_ERREVENT_EN_MASK 0x00000002L
+#define BIFL_RAS_LEAF1_CTRL__POISON_STALL_EN_MASK 0x00000004L
+#define BIFL_RAS_LEAF1_CTRL__PARITY_ERREVENT_EN_MASK 0x00000008L
+#define BIFL_RAS_LEAF1_CTRL__PARITY_STALL_EN_MASK 0x00000010L
+#define BIFL_RAS_LEAF1_CTRL__RCVERREVENT_ERREVENT_EN_MASK 0x00000020L
+#define BIFL_RAS_LEAF1_CTRL__RCVERREVENT_STALL_EN_MASK 0x00000040L
+#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_GEN_EN_MASK 0x00000100L
+#define BIFL_RAS_LEAF1_CTRL__EGRESS_STALL_GEN_EN_MASK 0x00000200L
+#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_PROP_EN_MASK 0x00000400L
+#define BIFL_RAS_LEAF1_CTRL__EGRESS_STALL_PROP_EN_MASK 0x00000800L
+#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_RAS_INTR_EN_MASK 0x00010000L
+#define BIFL_RAS_LEAF1_CTRL__PARITY_ERREVENT_LOG_MCA_MASK 0x00020000L
+#define BIFL_RAS_LEAF1_CTRL__POISON_ERREVENT_LOG_MCA_MASK 0x00040000L
+#define BIFL_RAS_LEAF1_CTRL__TIMEOUT_ERREVENT_LOG_MCA_MASK 0x00080000L
+#define BIFL_RAS_LEAF1_CTRL__RCVERREVENT_ERREVENT_LOG_MCA_MASK 0x00100000L
+#define BIFL_RAS_LEAF1_CTRL__UCP_EN_MASK 0x00200000L
+//BIFL_RAS_LEAF2_CTRL
+#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_DET_EN__SHIFT 0x0
+#define BIFL_RAS_LEAF2_CTRL__POISON_ERREVENT_EN__SHIFT 0x1
+#define BIFL_RAS_LEAF2_CTRL__POISON_STALL_EN__SHIFT 0x2
+#define BIFL_RAS_LEAF2_CTRL__PARITY_ERREVENT_EN__SHIFT 0x3
+#define BIFL_RAS_LEAF2_CTRL__PARITY_STALL_EN__SHIFT 0x4
+#define BIFL_RAS_LEAF2_CTRL__RCVERREVENT_ERREVENT_EN__SHIFT 0x5
+#define BIFL_RAS_LEAF2_CTRL__RCVERREVENT_STALL_EN__SHIFT 0x6
+#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_GEN_EN__SHIFT 0x8
+#define BIFL_RAS_LEAF2_CTRL__EGRESS_STALL_GEN_EN__SHIFT 0x9
+#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_PROP_EN__SHIFT 0xa
+#define BIFL_RAS_LEAF2_CTRL__EGRESS_STALL_PROP_EN__SHIFT 0xb
+#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_RAS_INTR_EN__SHIFT 0x10
+#define BIFL_RAS_LEAF2_CTRL__PARITY_ERREVENT_LOG_MCA__SHIFT 0x11
+#define BIFL_RAS_LEAF2_CTRL__POISON_ERREVENT_LOG_MCA__SHIFT 0x12
+#define BIFL_RAS_LEAF2_CTRL__TIMEOUT_ERREVENT_LOG_MCA__SHIFT 0x13
+#define BIFL_RAS_LEAF2_CTRL__RCVERREVENT_ERREVENT_LOG_MCA__SHIFT 0x14
+#define BIFL_RAS_LEAF2_CTRL__UCP_EN__SHIFT 0x15
+#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_DET_EN_MASK 0x00000001L
+#define BIFL_RAS_LEAF2_CTRL__POISON_ERREVENT_EN_MASK 0x00000002L
+#define BIFL_RAS_LEAF2_CTRL__POISON_STALL_EN_MASK 0x00000004L
+#define BIFL_RAS_LEAF2_CTRL__PARITY_ERREVENT_EN_MASK 0x00000008L
+#define BIFL_RAS_LEAF2_CTRL__PARITY_STALL_EN_MASK 0x00000010L
+#define BIFL_RAS_LEAF2_CTRL__RCVERREVENT_ERREVENT_EN_MASK 0x00000020L
+#define BIFL_RAS_LEAF2_CTRL__RCVERREVENT_STALL_EN_MASK 0x00000040L
+#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_GEN_EN_MASK 0x00000100L
+#define BIFL_RAS_LEAF2_CTRL__EGRESS_STALL_GEN_EN_MASK 0x00000200L
+#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_PROP_EN_MASK 0x00000400L
+#define BIFL_RAS_LEAF2_CTRL__EGRESS_STALL_PROP_EN_MASK 0x00000800L
+#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_RAS_INTR_EN_MASK 0x00010000L
+#define BIFL_RAS_LEAF2_CTRL__PARITY_ERREVENT_LOG_MCA_MASK 0x00020000L
+#define BIFL_RAS_LEAF2_CTRL__POISON_ERREVENT_LOG_MCA_MASK 0x00040000L
+#define BIFL_RAS_LEAF2_CTRL__TIMEOUT_ERREVENT_LOG_MCA_MASK 0x00080000L
+#define BIFL_RAS_LEAF2_CTRL__RCVERREVENT_ERREVENT_LOG_MCA_MASK 0x00100000L
+#define BIFL_RAS_LEAF2_CTRL__UCP_EN_MASK 0x00200000L
+//BIFL_RAS_LEAF3_CTRL
+#define BIFL_RAS_LEAF3_CTRL__ERR_EVENT_DET_EN__SHIFT 0x0
+#define BIFL_RAS_LEAF3_CTRL__POISON_ERREVENT_EN__SHIFT 0x1
+#define BIFL_RAS_LEAF3_CTRL__POISON_STALL_EN__SHIFT 0x2
+#define BIFL_RAS_LEAF3_CTRL__PARITY_ERREVENT_EN__SHIFT 0x3
+#define BIFL_RAS_LEAF3_CTRL__PARITY_STALL_EN__SHIFT 0x4
+#define BIFL_RAS_LEAF3_CTRL__RCVERREVENT_ERREVENT_EN__SHIFT 0x5
+#define BIFL_RAS_LEAF3_CTRL__RCVERREVENT_STALL_EN__SHIFT 0x6
+#define BIFL_RAS_LEAF3_CTRL__ERR_EVENT_GEN_EN__SHIFT 0x8
+#define BIFL_RAS_LEAF3_CTRL__EGRESS_STALL_GEN_EN__SHIFT 0x9
+#define BIFL_RAS_LEAF3_CTRL__ERR_EVENT_PROP_EN__SHIFT 0xa
+#define BIFL_RAS_LEAF3_CTRL__EGRESS_STALL_PROP_EN__SHIFT 0xb
+#define BIFL_RAS_LEAF3_CTRL__ERR_EVENT_RAS_INTR_EN__SHIFT 0x10
+#define BIFL_RAS_LEAF3_CTRL__PARITY_ERREVENT_LOG_MCA__SHIFT 0x11
+#define BIFL_RAS_LEAF3_CTRL__POISON_ERREVENT_LOG_MCA__SHIFT 0x12
+#define BIFL_RAS_LEAF3_CTRL__TIMEOUT_ERREVENT_LOG_MCA__SHIFT 0x13
+#define BIFL_RAS_LEAF3_CTRL__RCVERREVENT_ERREVENT_LOG_MCA__SHIFT 0x14
+#define BIFL_RAS_LEAF3_CTRL__UCP_EN__SHIFT 0x15
+#define BIFL_RAS_LEAF3_CTRL__ERR_EVENT_DET_EN_MASK 0x00000001L
+#define BIFL_RAS_LEAF3_CTRL__POISON_ERREVENT_EN_MASK 0x00000002L
+#define BIFL_RAS_LEAF3_CTRL__POISON_STALL_EN_MASK 0x00000004L
+#define BIFL_RAS_LEAF3_CTRL__PARITY_ERREVENT_EN_MASK 0x00000008L
+#define BIFL_RAS_LEAF3_CTRL__PARITY_STALL_EN_MASK 0x00000010L
+#define BIFL_RAS_LEAF3_CTRL__RCVERREVENT_ERREVENT_EN_MASK 0x00000020L
+#define BIFL_RAS_LEAF3_CTRL__RCVERREVENT_STALL_EN_MASK 0x00000040L
+#define BIFL_RAS_LEAF3_CTRL__ERR_EVENT_GEN_EN_MASK 0x00000100L
+#define BIFL_RAS_LEAF3_CTRL__EGRESS_STALL_GEN_EN_MASK 0x00000200L
+#define BIFL_RAS_LEAF3_CTRL__ERR_EVENT_PROP_EN_MASK 0x00000400L
+#define BIFL_RAS_LEAF3_CTRL__EGRESS_STALL_PROP_EN_MASK 0x00000800L
+#define BIFL_RAS_LEAF3_CTRL__ERR_EVENT_RAS_INTR_EN_MASK 0x00010000L
+#define BIFL_RAS_LEAF3_CTRL__PARITY_ERREVENT_LOG_MCA_MASK 0x00020000L
+#define BIFL_RAS_LEAF3_CTRL__POISON_ERREVENT_LOG_MCA_MASK 0x00040000L
+#define BIFL_RAS_LEAF3_CTRL__TIMEOUT_ERREVENT_LOG_MCA_MASK 0x00080000L
+#define BIFL_RAS_LEAF3_CTRL__RCVERREVENT_ERREVENT_LOG_MCA_MASK 0x00100000L
+#define BIFL_RAS_LEAF3_CTRL__UCP_EN_MASK 0x00200000L
+//BIFL_RAS_LEAF0_STATUS
+#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_RECV__SHIFT 0x0
+#define BIFL_RAS_LEAF0_STATUS__POISON_ERR_DET__SHIFT 0x1
+#define BIFL_RAS_LEAF0_STATUS__PARITY_ERR_DET__SHIFT 0x2
+#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_GENN_STAT__SHIFT 0x8
+#define BIFL_RAS_LEAF0_STATUS__EGRESS_STALLED_GENN_STAT__SHIFT 0x9
+#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_PROP_STAT__SHIFT 0xa
+#define BIFL_RAS_LEAF0_STATUS__EGRESS_STALLED_PROP_STAT__SHIFT 0xb
+#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_RECV_MASK 0x00000001L
+#define BIFL_RAS_LEAF0_STATUS__POISON_ERR_DET_MASK 0x00000002L
+#define BIFL_RAS_LEAF0_STATUS__PARITY_ERR_DET_MASK 0x00000004L
+#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_GENN_STAT_MASK 0x00000100L
+#define BIFL_RAS_LEAF0_STATUS__EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
+#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_PROP_STAT_MASK 0x00000400L
+#define BIFL_RAS_LEAF0_STATUS__EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
+//BIFL_RAS_LEAF1_STATUS
+#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_RECV__SHIFT 0x0
+#define BIFL_RAS_LEAF1_STATUS__POISON_ERR_DET__SHIFT 0x1
+#define BIFL_RAS_LEAF1_STATUS__PARITY_ERR_DET__SHIFT 0x2
+#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_GENN_STAT__SHIFT 0x8
+#define BIFL_RAS_LEAF1_STATUS__EGRESS_STALLED_GENN_STAT__SHIFT 0x9
+#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_PROP_STAT__SHIFT 0xa
+#define BIFL_RAS_LEAF1_STATUS__EGRESS_STALLED_PROP_STAT__SHIFT 0xb
+#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_RECV_MASK 0x00000001L
+#define BIFL_RAS_LEAF1_STATUS__POISON_ERR_DET_MASK 0x00000002L
+#define BIFL_RAS_LEAF1_STATUS__PARITY_ERR_DET_MASK 0x00000004L
+#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_GENN_STAT_MASK 0x00000100L
+#define BIFL_RAS_LEAF1_STATUS__EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
+#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_PROP_STAT_MASK 0x00000400L
+#define BIFL_RAS_LEAF1_STATUS__EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
+//BIFL_RAS_LEAF2_STATUS
+#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_RECV__SHIFT 0x0
+#define BIFL_RAS_LEAF2_STATUS__POISON_ERR_DET__SHIFT 0x1
+#define BIFL_RAS_LEAF2_STATUS__PARITY_ERR_DET__SHIFT 0x2
+#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_GENN_STAT__SHIFT 0x8
+#define BIFL_RAS_LEAF2_STATUS__EGRESS_STALLED_GENN_STAT__SHIFT 0x9
+#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_PROP_STAT__SHIFT 0xa
+#define BIFL_RAS_LEAF2_STATUS__EGRESS_STALLED_PROP_STAT__SHIFT 0xb
+#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_RECV_MASK 0x00000001L
+#define BIFL_RAS_LEAF2_STATUS__POISON_ERR_DET_MASK 0x00000002L
+#define BIFL_RAS_LEAF2_STATUS__PARITY_ERR_DET_MASK 0x00000004L
+#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_GENN_STAT_MASK 0x00000100L
+#define BIFL_RAS_LEAF2_STATUS__EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
+#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_PROP_STAT_MASK 0x00000400L
+#define BIFL_RAS_LEAF2_STATUS__EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
+//BIFL_RAS_LEAF3_STATUS
+#define BIFL_RAS_LEAF3_STATUS__ERR_EVENT_RECV__SHIFT 0x0
+#define BIFL_RAS_LEAF3_STATUS__POISON_ERR_DET__SHIFT 0x1
+#define BIFL_RAS_LEAF3_STATUS__PARITY_ERR_DET__SHIFT 0x2
+#define BIFL_RAS_LEAF3_STATUS__ERR_EVENT_GENN_STAT__SHIFT 0x8
+#define BIFL_RAS_LEAF3_STATUS__EGRESS_STALLED_GENN_STAT__SHIFT 0x9
+#define BIFL_RAS_LEAF3_STATUS__ERR_EVENT_PROP_STAT__SHIFT 0xa
+#define BIFL_RAS_LEAF3_STATUS__EGRESS_STALLED_PROP_STAT__SHIFT 0xb
+#define BIFL_RAS_LEAF3_STATUS__ERR_EVENT_RECV_MASK 0x00000001L
+#define BIFL_RAS_LEAF3_STATUS__POISON_ERR_DET_MASK 0x00000002L
+#define BIFL_RAS_LEAF3_STATUS__PARITY_ERR_DET_MASK 0x00000004L
+#define BIFL_RAS_LEAF3_STATUS__ERR_EVENT_GENN_STAT_MASK 0x00000100L
+#define BIFL_RAS_LEAF3_STATUS__EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
+#define BIFL_RAS_LEAF3_STATUS__ERR_EVENT_PROP_STAT_MASK 0x00000400L
+#define BIFL_RAS_LEAF3_STATUS__EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
+//BIFL_IOHUB_RAS_IH_CNTL
+#define BIFL_IOHUB_RAS_IH_CNTL__BIFL_RAS_IH_INTR_EN__SHIFT 0x0
+#define BIFL_IOHUB_RAS_IH_CNTL__BIFL_RAS_IH_INTR_EN_MASK 0x00000001L
+//BIFL_RAS_VWR_FROM_IOHUB
+#define BIFL_RAS_VWR_FROM_IOHUB__BIFL_RAS_IH_INTR_TRIG__SHIFT 0x0
+#define BIFL_RAS_VWR_FROM_IOHUB__BIFL_RAS_IH_INTR_TRIG_MASK 0x00000001L
+
+
+// addressBlock: nbif_rcc_dwn_dev0_BIFDEC1
+//RCC_DWN_DEV0_2_DN_PCIE_RESERVED
+#define RCC_DWN_DEV0_2_DN_PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x0
+#define RCC_DWN_DEV0_2_DN_PCIE_RESERVED__PCIE_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DWN_DEV0_2_DN_PCIE_SCRATCH
+#define RCC_DWN_DEV0_2_DN_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
+#define RCC_DWN_DEV0_2_DN_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
+//RCC_DWN_DEV0_2_DN_PCIE_CNTL
+#define RCC_DWN_DEV0_2_DN_PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0
+#define RCC_DWN_DEV0_2_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN__SHIFT 0x7
+#define RCC_DWN_DEV0_2_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
+#define RCC_DWN_DEV0_2_DN_PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L
+#define RCC_DWN_DEV0_2_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN_MASK 0x00000080L
+#define RCC_DWN_DEV0_2_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
+//RCC_DWN_DEV0_2_DN_PCIE_CONFIG_CNTL
+#define RCC_DWN_DEV0_2_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19
+#define RCC_DWN_DEV0_2_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L
+//RCC_DWN_DEV0_2_DN_PCIE_RX_CNTL2
+#define RCC_DWN_DEV0_2_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c
+#define RCC_DWN_DEV0_2_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L
+//RCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL
+#define RCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
+#define RCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN__SHIFT 0x8
+#define RCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+#define RCC_DWN_DEV0_2_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN_MASK 0x00000100L
+//RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG__SHIFT 0x4
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
+#define RCC_DWN_DEV0_2_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG_MASK 0x00000010L
+//RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0__STRAP_F0_EN__SHIFT 0x0
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0__STRAP_F0_MC_EN__SHIFT 0x11
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP__SHIFT 0x15
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0__STRAP_F0_EN_MASK 0x00000001L
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0__STRAP_F0_MC_EN_MASK 0x00020000L
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP_MASK 0x00E00000L
+//RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC__STRAP_CLK_PM_EN__SHIFT 0x18
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x1d
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC__STRAP_CLK_PM_EN_MASK 0x01000000L
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+//RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC2
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN__SHIFT 0x2
+#define RCC_DWN_DEV0_2_DN_PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN_MASK 0x00000004L
+
+
+// addressBlock: nbif_rcc_dwnp_dev0_BIFDEC1
+//RCC_DWNP_DEV0_2_PCIE_ERR_CNTL
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0xb
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__ERR_CORR_RCVD_CLR__SHIFT 0x12
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__NONFATAL_ERR_RCVD_CLR__SHIFT 0x13
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__FATAL_ERR_RCVD_CLR__SHIFT 0x14
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x00000800L
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__ERR_CORR_RCVD_CLR_MASK 0x00040000L
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__NONFATAL_ERR_RCVD_CLR_MASK 0x00080000L
+#define RCC_DWNP_DEV0_2_PCIE_ERR_CNTL__FATAL_ERR_RCVD_CLR_MASK 0x00100000L
+//RCC_DWNP_DEV0_2_PCIE_RX_CNTL
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN__SHIFT 0x9
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0x15
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN_MASK 0x00000200L
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00200000L
+#define RCC_DWNP_DEV0_2_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L
+//RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP__SHIFT 0x3
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
+#define RCC_DWNP_DEV0_2_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP_MASK 0x00000008L
+//RCC_DWNP_DEV0_2_PCIE_LC_CNTL2
+#define RCC_DWNP_DEV0_2_PCIE_LC_CNTL2__DL_STATE_CHANGED_NOTIFICATION_DIS__SHIFT 0x0
+#define RCC_DWNP_DEV0_2_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b
+#define RCC_DWNP_DEV0_2_PCIE_LC_CNTL2__DL_STATE_CHANGED_NOTIFICATION_DIS_MASK 0x00000001L
+#define RCC_DWNP_DEV0_2_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L
+//RCC_DWNP_DEV0_2_PCIEP_STRAP_MISC
+#define RCC_DWNP_DEV0_2_PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN__SHIFT 0xa
+#define RCC_DWNP_DEV0_2_PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN_MASK 0x00000400L
+//RCC_DWNP_DEV0_2_LTR_MSG_INFO_FROM_EP
+#define RCC_DWNP_DEV0_2_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP__SHIFT 0x0
+#define RCC_DWNP_DEV0_2_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_ep_dev0_BIFDEC1
+//RCC_EP_DEV0_2_EP_PCIE_SCRATCH
+#define RCC_EP_DEV0_2_EP_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
+//RCC_EP_DEV0_2_EP_PCIE_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__MFIOV_GFX_F0_FLR_DIS__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__MFIOV_GFX_F0_FLR_DIS_MASK 0x00000001L
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L
+#define RCC_EP_DEV0_2_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
+//RCC_EP_DEV0_2_EP_PCIE_INT_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x1
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x2
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x3
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x4
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x6
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L
+//RCC_EP_DEV0_2_EP_PCIE_INT_STATUS
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x1
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x2
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x3
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x4
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x6
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_F0__SHIFT 0x7
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L
+#define RCC_EP_DEV0_2_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_F0_MASK 0x00000080L
+//RCC_EP_DEV0_2_EP_PCIE_RX_CNTL2
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L
+//RCC_EP_DEV0_2_EP_PCIE_BUS_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
+#define RCC_EP_DEV0_2_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+//RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG__SHIFT 0x4
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L
+#define RCC_EP_DEV0_2_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN5_HIDDEN_REG_MASK 0x00000010L
+//RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE__SHIFT 0x3
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT__SHIFT 0x6
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE__SHIFT 0x7
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE__SHIFT 0xa
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT__SHIFT 0xd
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0__SHIFT 0xe
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN__SHIFT 0xf
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1__SHIFT 0x10
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN__SHIFT 0x11
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE_MASK 0x00000007L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE_MASK 0x00000038L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT_MASK 0x00000040L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE_MASK 0x00000380L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE_MASK 0x00001C00L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT_MASK 0x00002000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK 0x00004000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK 0x00008000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1_MASK 0x00010000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN_MASK 0x00020000L
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_1_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_EP_PCIE_STRAP_MISC
+#define RCC_EP_DEV0_2_EP_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x1d
+#define RCC_EP_DEV0_2_EP_PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+//RCC_EP_DEV0_2_EP_PCIE_STRAP_MISC2
+#define RCC_EP_DEV0_2_EP_PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED__SHIFT 0x4
+#define RCC_EP_DEV0_2_EP_PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED_MASK 0x00000010L
+//RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L
+//RCC_EP_DEV0_2_EP_PCIE_F0_DPA_LATENCY_INDICATOR
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL
+//RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE__SHIFT 0x8
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x001FL
+#define RCC_EP_DEV0_2_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE_MASK 0x0100L
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0
+#define RCC_EP_DEV0_2_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL
+//RCC_EP_DEV0_2_EP_PCIE_PME_CONTROL
+#define RCC_EP_DEV0_2_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER_MASK 0x1FL
+//RCC_EP_DEV0_2_EP_PCIEP_RESERVED
+#define RCC_EP_DEV0_2_EP_PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xFFFFFFFFL
+//RCC_EP_DEV0_2_EP_PCIE_TX_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L
+//RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID
+#define RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3
+#define RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8
+#define RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L
+#define RCC_EP_DEV0_2_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L
+//RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x18
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x19
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x1a
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED__SHIFT 0x1b
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED__SHIFT 0x1c
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED__SHIFT 0x1d
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED__SHIFT 0x1e
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED__SHIFT 0x1f
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x01000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x02000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x04000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED_MASK 0x08000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED_MASK 0x10000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED_MASK 0x20000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED_MASK 0x40000000L
+#define RCC_EP_DEV0_2_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED_MASK 0x80000000L
+//RCC_EP_DEV0_2_EP_PCIE_RX_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L
+#define RCC_EP_DEV0_2_EP_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L
+//RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP__SHIFT 0x3
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
+#define RCC_EP_DEV0_2_EP_PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP_MASK 0x00000008L
+
+
+// addressBlock: nbif_rcc_dev0_BIFDEC1
+//RCC_DEV0_1_RCC_ERR_INT_CNTL
+#define RCC_DEV0_1_RCC_ERR_INT_CNTL__INVALID_REG_ACCESS_IN_SRIOV_INT_EN__SHIFT 0x0
+#define RCC_DEV0_1_RCC_ERR_INT_CNTL__INVALID_REG_ACCESS_IN_SRIOV_INT_EN_MASK 0x00000001L
+//RCC_DEV0_1_RCC_BACO_CNTL_MISC
+#define RCC_DEV0_1_RCC_BACO_CNTL_MISC__BIF_ROM_REQ_DIS__SHIFT 0x0
+#define RCC_DEV0_1_RCC_BACO_CNTL_MISC__BIF_AZ_REQ_DIS__SHIFT 0x1
+#define RCC_DEV0_1_RCC_BACO_CNTL_MISC__BIF_ROM_REQ_DIS_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_BACO_CNTL_MISC__BIF_AZ_REQ_DIS_MASK 0x00000002L
+//RCC_DEV0_1_RCC_RESET_EN
+#define RCC_DEV0_1_RCC_RESET_EN__DB_APER_RESET_EN__SHIFT 0xf
+#define RCC_DEV0_1_RCC_RESET_EN__DB_APER_RESET_EN_MASK 0x00008000L
+//RCC_DEV0_2_RCC_VDM_SUPPORT
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__MCTP_SUPPORT__SHIFT 0x0
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__AMPTP_SUPPORT__SHIFT 0x1
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT__SHIFT 0x2
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE__SHIFT 0x3
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE__SHIFT 0x4
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__MCTP_SUPPORT_MASK 0x00000001L
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__AMPTP_SUPPORT_MASK 0x00000002L
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT_MASK 0x00000004L
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE_MASK 0x00000008L
+#define RCC_DEV0_2_RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE_MASK 0x00000010L
+//RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED__SHIFT 0x0
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING__SHIFT 0x1
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE__SHIFT 0x2
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER__SHIFT 0x3
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD__SHIFT 0x4
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS__SHIFT 0x5
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET__SHIFT 0xb
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS__SHIFT 0x12
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET__SHIFT 0x19
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED_MASK 0x00000001L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING_MASK 0x00000002L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE_MASK 0x00000004L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER_MASK 0x00000008L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD_MASK 0x00000010L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS_MASK 0x000007E0L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET_MASK 0x0003F800L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS_MASK 0x01FC0000L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET_MASK 0xFE000000L
+//RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE__SHIFT 0x0
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING__SHIFT 0x6
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES__SHIFT 0xc
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT__SHIFT 0x11
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE_MASK 0x0000003FL
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING_MASK 0x00000FC0L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES_MASK 0x0001F000L
+#define RCC_DEV0_2_RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT_MASK 0x00FE0000L
+//RCC_DEV0_1_RCC_GPUIOV_REGION
+#define RCC_DEV0_1_RCC_GPUIOV_REGION__LFB_REGION__SHIFT 0x0
+#define RCC_DEV0_1_RCC_GPUIOV_REGION__MAX_REGION__SHIFT 0x4
+#define RCC_DEV0_1_RCC_GPUIOV_REGION__LFB_REGION_MASK 0x0000000FL
+#define RCC_DEV0_1_RCC_GPUIOV_REGION__MAX_REGION_MASK 0x000000F0L
+//RCC_DEV0_1_RCC_GPU_HOSTVM_EN
+#define RCC_DEV0_1_RCC_GPU_HOSTVM_EN__GPU_HOSTVM_EN__SHIFT 0x0
+#define RCC_DEV0_1_RCC_GPU_HOSTVM_EN__GPU_HOSTVM_EN_MASK 0x00000001L
+//RCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL__RCC_CONSOLE_IOV_MODE_ENABLE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL__MULTIOS_IH_SUPPORT_EN__SHIFT 0x1
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL__RCC_CONSOLE_IOV_MODE_ENABLE_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_MODE_CNTL__MULTIOS_IH_SUPPORT_EN_MASK 0x00000002L
+//RCC_DEV0_1_RCC_CONSOLE_IOV_FIRST_VF_OFFSET
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_FIRST_VF_OFFSET__CONSOLE_IOV_FIRST_VF_OFFSET__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_FIRST_VF_OFFSET__CONSOLE_IOV_FIRST_VF_OFFSET_MASK 0xFFFFL
+//RCC_DEV0_1_RCC_CONSOLE_IOV_VF_STRIDE
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_VF_STRIDE__CONSOLE_IOV_VF_STRIDE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONSOLE_IOV_VF_STRIDE__CONSOLE_IOV_VF_STRIDE_MASK 0xFFFFL
+//RCC_DEV0_1_RCC_PEER_REG_RANGE0
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE0__START_ADDR__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE0__END_ADDR__SHIFT 0x10
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE0__START_ADDR_MASK 0x0000FFFFL
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE0__END_ADDR_MASK 0xFFFF0000L
+//RCC_DEV0_1_RCC_PEER_REG_RANGE1
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE1__START_ADDR__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE1__END_ADDR__SHIFT 0x10
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE1__START_ADDR_MASK 0x0000FFFFL
+#define RCC_DEV0_1_RCC_PEER_REG_RANGE1__END_ADDR_MASK 0xFFFF0000L
+//RCC_DEV0_2_RCC_BUS_CNTL
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_IO_DIS__SHIFT 0x2
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_MEM_DIS__SHIFT 0x3
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_BM_DIS__SHIFT 0x4
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_IO_DIS_DN__SHIFT 0x5
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_MEM_DIS_DN__SHIFT 0x6
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_IO_DIS_UP__SHIFT 0x7
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_MEM_DIS_UP__SHIFT 0x8
+#define RCC_DEV0_2_RCC_BUS_CNTL__ROOT_ERR_LOG_ON_EVENT__SHIFT 0xc
+#define RCC_DEV0_2_RCC_BUS_CNTL__HOST_CPL_POISONED_LOG_IN_RC__SHIFT 0xd
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x10
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x11
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x12
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x13
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x14
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x15
+#define RCC_DEV0_2_RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE__SHIFT 0x18
+#define RCC_DEV0_2_RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x19
+#define RCC_DEV0_2_RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x1c
+#define RCC_DEV0_2_RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x1d
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_IO_DIS_MASK 0x00000004L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_MEM_DIS_MASK 0x00000008L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_BM_DIS_MASK 0x00000010L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_IO_DIS_DN_MASK 0x00000020L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_MEM_DIS_DN_MASK 0x00000040L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_IO_DIS_UP_MASK 0x00000080L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PMI_MEM_DIS_UP_MASK 0x00000100L
+#define RCC_DEV0_2_RCC_BUS_CNTL__ROOT_ERR_LOG_ON_EVENT_MASK 0x00001000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__HOST_CPL_POISONED_LOG_IN_RC_MASK 0x00002000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR_MASK 0x00010000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR_MASK 0x00020000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR_MASK 0x00040000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR_MASK 0x00080000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR_MASK 0x00100000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR_MASK 0x00200000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE_MASK 0x01000000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE_MASK 0x0E000000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE_MASK 0x10000000L
+#define RCC_DEV0_2_RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE_MASK 0xE0000000L
+//RCC_DEV0_1_RCC_CONFIG_CNTL
+#define RCC_DEV0_1_RCC_CONFIG_CNTL__CFG_VGA_RAM_EN__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONFIG_CNTL__GENMO_MONO_ADDRESS_B__SHIFT 0x2
+#define RCC_DEV0_1_RCC_CONFIG_CNTL__GRPH_ADRSEL__SHIFT 0x3
+#define RCC_DEV0_1_RCC_CONFIG_CNTL__CFG_VGA_RAM_EN_MASK 0x00000001L
+#define RCC_DEV0_1_RCC_CONFIG_CNTL__GENMO_MONO_ADDRESS_B_MASK 0x00000004L
+#define RCC_DEV0_1_RCC_CONFIG_CNTL__GRPH_ADRSEL_MASK 0x00000018L
+//RCC_DEV0_1_RCC_CONFIG_F0_BASE
+#define RCC_DEV0_1_RCC_CONFIG_F0_BASE__F0_BASE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONFIG_F0_BASE__F0_BASE_MASK 0xFFFFFFFFL
+//RCC_DEV0_1_RCC_CONFIG_APER_SIZE
+#define RCC_DEV0_1_RCC_CONFIG_APER_SIZE__APER_SIZE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONFIG_APER_SIZE__APER_SIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_1_RCC_CONFIG_REG_APER_SIZE
+#define RCC_DEV0_1_RCC_CONFIG_REG_APER_SIZE__REG_APER_SIZE__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CONFIG_REG_APER_SIZE__REG_APER_SIZE_MASK 0x07FFFFFFL
+//RCC_DEV0_1_RCC_XDMA_LO
+#define RCC_DEV0_1_RCC_XDMA_LO__BIF_XDMA_LOWER_BOUND__SHIFT 0x0
+#define RCC_DEV0_1_RCC_XDMA_LO__BIF_XDMA_APER_EN__SHIFT 0x1f
+#define RCC_DEV0_1_RCC_XDMA_LO__BIF_XDMA_LOWER_BOUND_MASK 0x7FFFFFFFL
+#define RCC_DEV0_1_RCC_XDMA_LO__BIF_XDMA_APER_EN_MASK 0x80000000L
+//RCC_DEV0_1_RCC_XDMA_HI
+#define RCC_DEV0_1_RCC_XDMA_HI__BIF_XDMA_UPPER_BOUND__SHIFT 0x0
+#define RCC_DEV0_1_RCC_XDMA_HI__BIF_XDMA_UPPER_BOUND_MASK 0x7FFFFFFFL
+//RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS__SHIFT 0x7
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN__SHIFT 0x8
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR__SHIFT 0x9
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR__SHIFT 0xa
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR__SHIFT 0xb
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR__SHIFT 0xc
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR__SHIFT 0xd
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS__SHIFT 0xe
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS__SHIFT 0xf
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS__SHIFT 0x10
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS__SHIFT 0x11
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN__SHIFT 0x12
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS__SHIFT 0x13
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS_MASK 0x00000080L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN_MASK 0x00000100L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR_MASK 0x00000200L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR_MASK 0x00000400L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR_MASK 0x00000800L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR_MASK 0x00001000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR_MASK 0x00002000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS_MASK 0x00004000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS_MASK 0x00008000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS_MASK 0x00010000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS_MASK 0x00020000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN_MASK 0x00040000L
+#define RCC_DEV0_2_RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS_MASK 0x00080000L
+//RCC_DEV0_1_RCC_BUSNUM_CNTL1
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL1__ID_MASK__SHIFT 0x0
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL1__ID_MASK_MASK 0x000000FFL
+//RCC_DEV0_1_RCC_BUSNUM_LIST0
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID0__SHIFT 0x0
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID1__SHIFT 0x8
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID2__SHIFT 0x10
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID3__SHIFT 0x18
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID0_MASK 0x000000FFL
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID1_MASK 0x0000FF00L
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID2_MASK 0x00FF0000L
+#define RCC_DEV0_1_RCC_BUSNUM_LIST0__ID3_MASK 0xFF000000L
+//RCC_DEV0_1_RCC_BUSNUM_LIST1
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID4__SHIFT 0x0
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID5__SHIFT 0x8
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID6__SHIFT 0x10
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID7__SHIFT 0x18
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID4_MASK 0x000000FFL
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID5_MASK 0x0000FF00L
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID6_MASK 0x00FF0000L
+#define RCC_DEV0_1_RCC_BUSNUM_LIST1__ID7_MASK 0xFF000000L
+//RCC_DEV0_1_RCC_BUSNUM_CNTL2
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__AUTOUPDATE_SEL__SHIFT 0x0
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__AUTOUPDATE_EN__SHIFT 0x8
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__HDPREG_CNTL__SHIFT 0x10
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH__SHIFT 0x11
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__AUTOUPDATE_SEL_MASK 0x000000FFL
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__AUTOUPDATE_EN_MASK 0x00000100L
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__HDPREG_CNTL_MASK 0x00010000L
+#define RCC_DEV0_1_RCC_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH_MASK 0x00020000L
+//RCC_DEV0_1_RCC_CAPTURE_HOST_BUSNUM
+#define RCC_DEV0_1_RCC_CAPTURE_HOST_BUSNUM__CHECK_EN__SHIFT 0x0
+#define RCC_DEV0_1_RCC_CAPTURE_HOST_BUSNUM__CHECK_EN_MASK 0x00000001L
+//RCC_DEV0_1_RCC_HOST_BUSNUM
+#define RCC_DEV0_1_RCC_HOST_BUSNUM__HOST_ID__SHIFT 0x0
+#define RCC_DEV0_1_RCC_HOST_BUSNUM__HOST_ID_MASK 0x0000FFFFL
+//RCC_DEV0_1_RCC_PEER0_FB_OFFSET_HI
+#define RCC_DEV0_1_RCC_PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO
+#define RCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_1_RCC_PEER0_FB_OFFSET_LO__PEER0_FB_EN_MASK 0x80000000L
+//RCC_DEV0_1_RCC_PEER1_FB_OFFSET_HI
+#define RCC_DEV0_1_RCC_PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO
+#define RCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_1_RCC_PEER1_FB_OFFSET_LO__PEER1_FB_EN_MASK 0x80000000L
+//RCC_DEV0_1_RCC_PEER2_FB_OFFSET_HI
+#define RCC_DEV0_1_RCC_PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO
+#define RCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_1_RCC_PEER2_FB_OFFSET_LO__PEER2_FB_EN_MASK 0x80000000L
+//RCC_DEV0_1_RCC_PEER3_FB_OFFSET_HI
+#define RCC_DEV0_1_RCC_PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI_MASK 0x000FFFFFL
+//RCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO
+#define RCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO__SHIFT 0x0
+#define RCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_EN__SHIFT 0x1f
+#define RCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO_MASK 0x000FFFFFL
+#define RCC_DEV0_1_RCC_PEER3_FB_OFFSET_LO__PEER3_FB_EN_MASK 0x80000000L
+//RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID0__SHIFT 0x0
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID1__SHIFT 0x8
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID2__SHIFT 0x10
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID3__SHIFT 0x18
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID0_MASK 0x000000FFL
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID1_MASK 0x0000FF00L
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID2_MASK 0x00FF0000L
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST0__DEVFUNC_ID3_MASK 0xFF000000L
+//RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID4__SHIFT 0x0
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID5__SHIFT 0x8
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID6__SHIFT 0x10
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID7__SHIFT 0x18
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID4_MASK 0x000000FFL
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID5_MASK 0x0000FF00L
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID6_MASK 0x00FF0000L
+#define RCC_DEV0_1_RCC_DEVFUNCNUM_LIST1__DEVFUNC_ID7_MASK 0xFF000000L
+//RCC_DEV0_2_RCC_DEV0_LINK_CNTL
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__LINK_DOWN_EXIT__SHIFT 0x0
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__LINK_DOWN_ENTRY__SHIFT 0x8
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__SWUS_SRB_RST_TLS_DIS__SHIFT 0x10
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__SWUS_LDN_RST_TLS_DIS__SHIFT 0x11
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__LINK_DOWN_EXIT_MASK 0x00000001L
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__LINK_DOWN_ENTRY_MASK 0x00000100L
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__SWUS_SRB_RST_TLS_DIS_MASK 0x00010000L
+#define RCC_DEV0_2_RCC_DEV0_LINK_CNTL__SWUS_LDN_RST_TLS_DIS_MASK 0x00020000L
+//RCC_DEV0_2_RCC_CMN_LINK_CNTL
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS__SHIFT 0x0
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS__SHIFT 0x1
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS__SHIFT 0x2
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN__SHIFT 0x3
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER__SHIFT 0x10
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS_MASK 0x00000001L
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS_MASK 0x00000002L
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS_MASK 0x00000004L
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN_MASK 0x00000008L
+#define RCC_DEV0_2_RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER_MASK 0xFFFF0000L
+//RCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE
+#define RCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS__SHIFT 0x0
+#define RCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV__SHIFT 0x8
+#define RCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS_MASK 0x000000FFL
+#define RCC_DEV0_2_RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV_MASK 0x00001F00L
+//RCC_DEV0_2_RCC_LTR_LSWITCH_CNTL
+#define RCC_DEV0_2_RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE__SHIFT 0x0
+#define RCC_DEV0_2_RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE_MASK 0x000003FFL
+//RCC_DEV0_2_RCC_MH_ARB_CNTL
+#define RCC_DEV0_2_RCC_MH_ARB_CNTL__MH_ARB_MODE__SHIFT 0x0
+#define RCC_DEV0_2_RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY__SHIFT 0x1
+#define RCC_DEV0_2_RCC_MH_ARB_CNTL__MH_ARB_MODE_MASK 0x00000001L
+#define RCC_DEV0_2_RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY_MASK 0x00007FFEL
+
+
+// addressBlock: nbif_bif_bx_SYSDEC
+//BIF_BX1_PCIE_INDEX
+#define BIF_BX1_PCIE_INDEX__PCIE_INDEX__SHIFT 0x0
+#define BIF_BX1_PCIE_INDEX__PCIE_INDEX_MASK 0xFFFFFFFFL
+//BIF_BX1_PCIE_DATA
+#define BIF_BX1_PCIE_DATA__PCIE_DATA__SHIFT 0x0
+#define BIF_BX1_PCIE_DATA__PCIE_DATA_MASK 0xFFFFFFFFL
+//BIF_BX1_PCIE_INDEX2
+#define BIF_BX1_PCIE_INDEX2__PCIE_INDEX2__SHIFT 0x0
+#define BIF_BX1_PCIE_INDEX2__PCIE_INDEX2_MASK 0xFFFFFFFFL
+//BIF_BX1_PCIE_DATA2
+#define BIF_BX1_PCIE_DATA2__PCIE_DATA2__SHIFT 0x0
+#define BIF_BX1_PCIE_DATA2__PCIE_DATA2_MASK 0xFFFFFFFFL
+//BIF_BX1_PCIE_INDEX_HI
+#define BIF_BX1_PCIE_INDEX_HI__PCIE_INDEX_HI__SHIFT 0x0
+#define BIF_BX1_PCIE_INDEX_HI__PCIE_INDEX_HI_MASK 0x000000FFL
+//BIF_BX1_PCIE_INDEX2_HI
+#define BIF_BX1_PCIE_INDEX2_HI__PCIE_INDEX2_HI__SHIFT 0x0
+#define BIF_BX1_PCIE_INDEX2_HI__PCIE_INDEX2_HI_MASK 0x000000FFL
+//BIF_BX1_SBIOS_SCRATCH_0
+#define BIF_BX1_SBIOS_SCRATCH_0__SBIOS_SCRATCH_0__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_0__SBIOS_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_1
+#define BIF_BX1_SBIOS_SCRATCH_1__SBIOS_SCRATCH_1__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_1__SBIOS_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_2
+#define BIF_BX1_SBIOS_SCRATCH_2__SBIOS_SCRATCH_2__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_2__SBIOS_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_3
+#define BIF_BX1_SBIOS_SCRATCH_3__SBIOS_SCRATCH_3__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_3__SBIOS_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_0
+#define BIF_BX1_BIOS_SCRATCH_0__BIOS_SCRATCH_0__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_0__BIOS_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_1
+#define BIF_BX1_BIOS_SCRATCH_1__BIOS_SCRATCH_1__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_1__BIOS_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_2
+#define BIF_BX1_BIOS_SCRATCH_2__BIOS_SCRATCH_2__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_2__BIOS_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_3
+#define BIF_BX1_BIOS_SCRATCH_3__BIOS_SCRATCH_3__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_3__BIOS_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_4
+#define BIF_BX1_BIOS_SCRATCH_4__BIOS_SCRATCH_4__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_4__BIOS_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_5
+#define BIF_BX1_BIOS_SCRATCH_5__BIOS_SCRATCH_5__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_5__BIOS_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_6
+#define BIF_BX1_BIOS_SCRATCH_6__BIOS_SCRATCH_6__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_6__BIOS_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_7
+#define BIF_BX1_BIOS_SCRATCH_7__BIOS_SCRATCH_7__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_7__BIOS_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_8
+#define BIF_BX1_BIOS_SCRATCH_8__BIOS_SCRATCH_8__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_8__BIOS_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_9
+#define BIF_BX1_BIOS_SCRATCH_9__BIOS_SCRATCH_9__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_9__BIOS_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_10
+#define BIF_BX1_BIOS_SCRATCH_10__BIOS_SCRATCH_10__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_10__BIOS_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_11
+#define BIF_BX1_BIOS_SCRATCH_11__BIOS_SCRATCH_11__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_11__BIOS_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_12
+#define BIF_BX1_BIOS_SCRATCH_12__BIOS_SCRATCH_12__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_12__BIOS_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_13
+#define BIF_BX1_BIOS_SCRATCH_13__BIOS_SCRATCH_13__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_13__BIOS_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_14
+#define BIF_BX1_BIOS_SCRATCH_14__BIOS_SCRATCH_14__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_14__BIOS_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX1_BIOS_SCRATCH_15
+#define BIF_BX1_BIOS_SCRATCH_15__BIOS_SCRATCH_15__SHIFT 0x0
+#define BIF_BX1_BIOS_SCRATCH_15__BIOS_SCRATCH_15_MASK 0xFFFFFFFFL
+//BIF_BX1_BIF_RLC_INTR_CNTL
+//BIF_BX1_BIF_VCE_INTR_CNTL
+//BIF_BX1_BIF_UVD_INTR_CNTL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR0__CAM_ADDR0__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR0__CAM_ADDR0_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR0__CAM_REMAP_ADDR0__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR0__CAM_REMAP_ADDR0_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR1
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR1__CAM_ADDR1__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR1__CAM_ADDR1_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR1
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR1__CAM_REMAP_ADDR1__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR1__CAM_REMAP_ADDR1_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR2
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR2__CAM_ADDR2__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR2__CAM_ADDR2_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR2
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR2__CAM_REMAP_ADDR2__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR2__CAM_REMAP_ADDR2_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR3
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR3__CAM_ADDR3__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR3__CAM_ADDR3_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR3
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR3__CAM_REMAP_ADDR3__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR3__CAM_REMAP_ADDR3_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR4
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR4__CAM_ADDR4__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR4__CAM_ADDR4_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR4
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR4__CAM_REMAP_ADDR4__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR4__CAM_REMAP_ADDR4_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR5
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR5__CAM_ADDR5__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR5__CAM_ADDR5_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR5
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR5__CAM_REMAP_ADDR5__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR5__CAM_REMAP_ADDR5_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR6
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR6__CAM_ADDR6__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR6__CAM_ADDR6_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR6
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR6__CAM_REMAP_ADDR6__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR6__CAM_REMAP_ADDR6_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ADDR7
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR7__CAM_ADDR7__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ADDR7__CAM_ADDR7_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR7
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR7__CAM_REMAP_ADDR7__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_REMAP_ADDR7__CAM_REMAP_ADDR7_MASK 0x000FFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_CNTL
+#define BIF_BX1_GFX_MMIOREG_CAM_CNTL__CAM_ENABLE__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_CNTL__CAM_ENABLE_MASK 0x000000FFL
+//BIF_BX1_GFX_MMIOREG_CAM_ZERO_CPL
+#define BIF_BX1_GFX_MMIOREG_CAM_ZERO_CPL__CAM_ZERO_CPL__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ZERO_CPL__CAM_ZERO_CPL_MASK 0xFFFFFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_ONE_CPL
+#define BIF_BX1_GFX_MMIOREG_CAM_ONE_CPL__CAM_ONE_CPL__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_ONE_CPL__CAM_ONE_CPL_MASK 0xFFFFFFFFL
+//BIF_BX1_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL
+#define BIF_BX1_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL__CAM_PROGRAMMABLE_CPL__SHIFT 0x0
+#define BIF_BX1_GFX_MMIOREG_CAM_PROGRAMMABLE_CPL__CAM_PROGRAMMABLE_CPL_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_0
+#define BIF_BX1_DRIVER_SCRATCH_0__DRIVER_SCRATCH_0__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_0__DRIVER_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_1
+#define BIF_BX1_DRIVER_SCRATCH_1__DRIVER_SCRATCH_1__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_1__DRIVER_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_2
+#define BIF_BX1_DRIVER_SCRATCH_2__DRIVER_SCRATCH_2__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_2__DRIVER_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_3
+#define BIF_BX1_DRIVER_SCRATCH_3__DRIVER_SCRATCH_3__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_3__DRIVER_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_4
+#define BIF_BX1_DRIVER_SCRATCH_4__DRIVER_SCRATCH_4__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_4__DRIVER_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_5
+#define BIF_BX1_DRIVER_SCRATCH_5__DRIVER_SCRATCH_5__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_5__DRIVER_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_6
+#define BIF_BX1_DRIVER_SCRATCH_6__DRIVER_SCRATCH_6__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_6__DRIVER_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_7
+#define BIF_BX1_DRIVER_SCRATCH_7__DRIVER_SCRATCH_7__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_7__DRIVER_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_8
+#define BIF_BX1_DRIVER_SCRATCH_8__DRIVER_SCRATCH_8__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_8__DRIVER_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_9
+#define BIF_BX1_DRIVER_SCRATCH_9__DRIVER_SCRATCH_9__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_9__DRIVER_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_10
+#define BIF_BX1_DRIVER_SCRATCH_10__DRIVER_SCRATCH_10__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_10__DRIVER_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_11
+#define BIF_BX1_DRIVER_SCRATCH_11__DRIVER_SCRATCH_11__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_11__DRIVER_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_12
+#define BIF_BX1_DRIVER_SCRATCH_12__DRIVER_SCRATCH_12__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_12__DRIVER_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_13
+#define BIF_BX1_DRIVER_SCRATCH_13__DRIVER_SCRATCH_13__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_13__DRIVER_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_14
+#define BIF_BX1_DRIVER_SCRATCH_14__DRIVER_SCRATCH_14__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_14__DRIVER_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX1_DRIVER_SCRATCH_15
+#define BIF_BX1_DRIVER_SCRATCH_15__DRIVER_SCRATCH_15__SHIFT 0x0
+#define BIF_BX1_DRIVER_SCRATCH_15__DRIVER_SCRATCH_15_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_0
+#define BIF_BX1_FW_SCRATCH_0__FW_SCRATCH_0__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_0__FW_SCRATCH_0_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_1
+#define BIF_BX1_FW_SCRATCH_1__FW_SCRATCH_1__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_1__FW_SCRATCH_1_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_2
+#define BIF_BX1_FW_SCRATCH_2__FW_SCRATCH_2__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_2__FW_SCRATCH_2_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_3
+#define BIF_BX1_FW_SCRATCH_3__FW_SCRATCH_3__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_3__FW_SCRATCH_3_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_4
+#define BIF_BX1_FW_SCRATCH_4__FW_SCRATCH_4__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_4__FW_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_5
+#define BIF_BX1_FW_SCRATCH_5__FW_SCRATCH_5__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_5__FW_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_6
+#define BIF_BX1_FW_SCRATCH_6__FW_SCRATCH_6__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_6__FW_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_7
+#define BIF_BX1_FW_SCRATCH_7__FW_SCRATCH_7__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_7__FW_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_8
+#define BIF_BX1_FW_SCRATCH_8__FW_SCRATCH_8__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_8__FW_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_9
+#define BIF_BX1_FW_SCRATCH_9__FW_SCRATCH_9__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_9__FW_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_10
+#define BIF_BX1_FW_SCRATCH_10__FW_SCRATCH_10__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_10__FW_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_11
+#define BIF_BX1_FW_SCRATCH_11__FW_SCRATCH_11__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_11__FW_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_12
+#define BIF_BX1_FW_SCRATCH_12__FW_SCRATCH_12__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_12__FW_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_13
+#define BIF_BX1_FW_SCRATCH_13__FW_SCRATCH_13__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_13__FW_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_14
+#define BIF_BX1_FW_SCRATCH_14__FW_SCRATCH_14__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_14__FW_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX1_FW_SCRATCH_15
+#define BIF_BX1_FW_SCRATCH_15__FW_SCRATCH_15__SHIFT 0x0
+#define BIF_BX1_FW_SCRATCH_15__FW_SCRATCH_15_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_4
+#define BIF_BX1_SBIOS_SCRATCH_4__SBIOS_SCRATCH_4__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_4__SBIOS_SCRATCH_4_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_5
+#define BIF_BX1_SBIOS_SCRATCH_5__SBIOS_SCRATCH_5__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_5__SBIOS_SCRATCH_5_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_6
+#define BIF_BX1_SBIOS_SCRATCH_6__SBIOS_SCRATCH_6__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_6__SBIOS_SCRATCH_6_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_7
+#define BIF_BX1_SBIOS_SCRATCH_7__SBIOS_SCRATCH_7__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_7__SBIOS_SCRATCH_7_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_8
+#define BIF_BX1_SBIOS_SCRATCH_8__SBIOS_SCRATCH_8__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_8__SBIOS_SCRATCH_8_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_9
+#define BIF_BX1_SBIOS_SCRATCH_9__SBIOS_SCRATCH_9__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_9__SBIOS_SCRATCH_9_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_10
+#define BIF_BX1_SBIOS_SCRATCH_10__SBIOS_SCRATCH_10__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_10__SBIOS_SCRATCH_10_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_11
+#define BIF_BX1_SBIOS_SCRATCH_11__SBIOS_SCRATCH_11__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_11__SBIOS_SCRATCH_11_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_12
+#define BIF_BX1_SBIOS_SCRATCH_12__SBIOS_SCRATCH_12__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_12__SBIOS_SCRATCH_12_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_13
+#define BIF_BX1_SBIOS_SCRATCH_13__SBIOS_SCRATCH_13__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_13__SBIOS_SCRATCH_13_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_14
+#define BIF_BX1_SBIOS_SCRATCH_14__SBIOS_SCRATCH_14__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_14__SBIOS_SCRATCH_14_MASK 0xFFFFFFFFL
+//BIF_BX1_SBIOS_SCRATCH_15
+#define BIF_BX1_SBIOS_SCRATCH_15__SBIOS_SCRATCH_15__SHIFT 0x0
+#define BIF_BX1_SBIOS_SCRATCH_15__SBIOS_SCRATCH_15_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_bif_bx_pf_SYSPFVFDEC
+//BIF_BX_PF1_MM_INDEX
+#define BIF_BX_PF1_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_PF1_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_PF1_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_PF1_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_PF1_MM_DATA
+#define BIF_BX_PF1_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MM_INDEX_HI
+#define BIF_BX_PF1_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_PF1_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_RSMU_INDEX
+#define BIF_BX_PF1_RSMU_INDEX__RSMU_INDEX__SHIFT 0x0
+#define BIF_BX_PF1_RSMU_INDEX__RSMU_INDEX_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_RSMU_DATA
+#define BIF_BX_PF1_RSMU_DATA__RSMU_DATA__SHIFT 0x0
+#define BIF_BX_PF1_RSMU_DATA__RSMU_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_bif_bx_BIFDEC1
+//BIF_BX1_CC_BIF_BX_STRAP0
+#define BIF_BX1_CC_BIF_BX_STRAP0__STRAP_RESERVED__SHIFT 0x19
+#define BIF_BX1_CC_BIF_BX_STRAP0__STRAP_RESERVED_MASK 0xFE000000L
+//BIF_BX1_CC_BIF_BX_PINSTRAP0
+//BIF_BX1_BIF_MM_INDACCESS_CNTL
+#define BIF_BX1_BIF_MM_INDACCESS_CNTL__WRITE_DIS__SHIFT 0x0
+#define BIF_BX1_BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1
+#define BIF_BX1_BIF_MM_INDACCESS_CNTL__WRITE_DIS_MASK 0x00000001L
+#define BIF_BX1_BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x00000002L
+//BIF_BX1_BUS_CNTL
+#define BIF_BX1_BUS_CNTL__VGA_REG_COHERENCY_DIS__SHIFT 0x6
+#define BIF_BX1_BUS_CNTL__VGA_MEM_COHERENCY_DIS__SHIFT 0x7
+#define BIF_BX1_BUS_CNTL__SET_AZ_TC__SHIFT 0xa
+#define BIF_BX1_BUS_CNTL__SET_MC_TC__SHIFT 0xd
+#define BIF_BX1_BUS_CNTL__ZERO_BE_WR_EN__SHIFT 0x10
+#define BIF_BX1_BUS_CNTL__ZERO_BE_RD_EN__SHIFT 0x11
+#define BIF_BX1_BUS_CNTL__RD_STALL_IO_WR__SHIFT 0x12
+#define BIF_BX1_BUS_CNTL__HDP_FB_FLUSH_STALL_DOORBELL_DIS__SHIFT 0x18
+#define BIF_BX1_BUS_CNTL__PRECEEDINGWR_STALL_VGA_FB_FLUSH_DIS__SHIFT 0x19
+#define BIF_BX1_BUS_CNTL__PRECEEDINGWR_STALL_VGA_REG_FLUSH_DIS__SHIFT 0x1a
+#define BIF_BX1_BUS_CNTL__MMDAT_RD_HDP_TRIGGER_HDP_FB_FLUSH_DIS__SHIFT 0x1b
+#define BIF_BX1_BUS_CNTL__HDP_FB_FLUSH_STALL_MMDAT_RD_HDP_DIS__SHIFT 0x1c
+#define BIF_BX1_BUS_CNTL__HDP_REG_FLUSH_VF_MASK_EN__SHIFT 0x1d
+#define BIF_BX1_BUS_CNTL__VGAFB_ZERO_BE_WR_EN__SHIFT 0x1e
+#define BIF_BX1_BUS_CNTL__VGAFB_ZERO_BE_RD_EN__SHIFT 0x1f
+#define BIF_BX1_BUS_CNTL__VGA_REG_COHERENCY_DIS_MASK 0x00000040L
+#define BIF_BX1_BUS_CNTL__VGA_MEM_COHERENCY_DIS_MASK 0x00000080L
+#define BIF_BX1_BUS_CNTL__SET_AZ_TC_MASK 0x00001C00L
+#define BIF_BX1_BUS_CNTL__SET_MC_TC_MASK 0x0000E000L
+#define BIF_BX1_BUS_CNTL__ZERO_BE_WR_EN_MASK 0x00010000L
+#define BIF_BX1_BUS_CNTL__ZERO_BE_RD_EN_MASK 0x00020000L
+#define BIF_BX1_BUS_CNTL__RD_STALL_IO_WR_MASK 0x00040000L
+#define BIF_BX1_BUS_CNTL__HDP_FB_FLUSH_STALL_DOORBELL_DIS_MASK 0x01000000L
+#define BIF_BX1_BUS_CNTL__PRECEEDINGWR_STALL_VGA_FB_FLUSH_DIS_MASK 0x02000000L
+#define BIF_BX1_BUS_CNTL__PRECEEDINGWR_STALL_VGA_REG_FLUSH_DIS_MASK 0x04000000L
+#define BIF_BX1_BUS_CNTL__MMDAT_RD_HDP_TRIGGER_HDP_FB_FLUSH_DIS_MASK 0x08000000L
+#define BIF_BX1_BUS_CNTL__HDP_FB_FLUSH_STALL_MMDAT_RD_HDP_DIS_MASK 0x10000000L
+#define BIF_BX1_BUS_CNTL__HDP_REG_FLUSH_VF_MASK_EN_MASK 0x20000000L
+#define BIF_BX1_BUS_CNTL__VGAFB_ZERO_BE_WR_EN_MASK 0x40000000L
+#define BIF_BX1_BUS_CNTL__VGAFB_ZERO_BE_RD_EN_MASK 0x80000000L
+//BIF_BX1_BIF_SCRATCH0
+#define BIF_BX1_BIF_SCRATCH0__BIF_SCRATCH0__SHIFT 0x0
+#define BIF_BX1_BIF_SCRATCH0__BIF_SCRATCH0_MASK 0xFFFFFFFFL
+//BIF_BX1_BIF_SCRATCH1
+#define BIF_BX1_BIF_SCRATCH1__BIF_SCRATCH1__SHIFT 0x0
+#define BIF_BX1_BIF_SCRATCH1__BIF_SCRATCH1_MASK 0xFFFFFFFFL
+//BIF_BX1_BX_RESET_EN
+#define BIF_BX1_BX_RESET_EN__RESET_ON_VFENABLE_LOW_EN__SHIFT 0x10
+#define BIF_BX1_BX_RESET_EN__RESET_ON_VFENABLE_LOW_EN_MASK 0x00010000L
+//BIF_BX1_MM_CFGREGS_CNTL
+#define BIF_BX1_MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL__SHIFT 0x0
+#define BIF_BX1_MM_CFGREGS_CNTL__MM_CFG_DEV_SEL__SHIFT 0x6
+#define BIF_BX1_MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN__SHIFT 0x1f
+#define BIF_BX1_MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL_MASK 0x00000007L
+#define BIF_BX1_MM_CFGREGS_CNTL__MM_CFG_DEV_SEL_MASK 0x000000C0L
+#define BIF_BX1_MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN_MASK 0x80000000L
+//BIF_BX1_BX_RESET_CNTL
+#define BIF_BX1_BX_RESET_CNTL__LINK_TRAIN_EN__SHIFT 0x0
+#define BIF_BX1_BX_RESET_CNTL__LINK_TRAIN_EN_MASK 0x00000001L
+//BIF_BX1_INTERRUPT_CNTL
+#define BIF_BX1_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE__SHIFT 0x0
+#define BIF_BX1_INTERRUPT_CNTL__IH_DUMMY_RD_EN__SHIFT 0x1
+#define BIF_BX1_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN__SHIFT 0x3
+#define BIF_BX1_INTERRUPT_CNTL__IH_INTR_DLY_CNTR__SHIFT 0x4
+#define BIF_BX1_INTERRUPT_CNTL__GEN_IH_INT_EN__SHIFT 0x8
+#define BIF_BX1_INTERRUPT_CNTL__BIF_RB_REQ_NONSNOOP_EN__SHIFT 0xf
+#define BIF_BX1_INTERRUPT_CNTL__DUMMYRD_BYPASS_IN_MSI_EN__SHIFT 0x10
+#define BIF_BX1_INTERRUPT_CNTL__ALWAYS_SEND_INTPKT_AFTER_DUMMYRD_DIS__SHIFT 0x11
+#define BIF_BX1_INTERRUPT_CNTL__BIF_RB_REQ_RELAX_ORDER_EN__SHIFT 0x12
+#define BIF_BX1_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK 0x00000001L
+#define BIF_BX1_INTERRUPT_CNTL__IH_DUMMY_RD_EN_MASK 0x00000002L
+#define BIF_BX1_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK 0x00000008L
+#define BIF_BX1_INTERRUPT_CNTL__IH_INTR_DLY_CNTR_MASK 0x000000F0L
+#define BIF_BX1_INTERRUPT_CNTL__GEN_IH_INT_EN_MASK 0x00000100L
+#define BIF_BX1_INTERRUPT_CNTL__BIF_RB_REQ_NONSNOOP_EN_MASK 0x00008000L
+#define BIF_BX1_INTERRUPT_CNTL__DUMMYRD_BYPASS_IN_MSI_EN_MASK 0x00010000L
+#define BIF_BX1_INTERRUPT_CNTL__ALWAYS_SEND_INTPKT_AFTER_DUMMYRD_DIS_MASK 0x00020000L
+#define BIF_BX1_INTERRUPT_CNTL__BIF_RB_REQ_RELAX_ORDER_EN_MASK 0x00040000L
+//BIF_BX1_INTERRUPT_CNTL2
+#define BIF_BX1_INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR__SHIFT 0x0
+#define BIF_BX1_INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR_MASK 0xFFFFFFFFL
+//BIF_BX1_CLKREQB_PAD_CNTL
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_A__SHIFT 0x0
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL__SHIFT 0x1
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE__SHIFT 0x2
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE__SHIFT 0x3
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0__SHIFT 0x5
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1__SHIFT 0x6
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2__SHIFT 0x7
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3__SHIFT 0x8
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN__SHIFT 0x9
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE__SHIFT 0xa
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN__SHIFT 0xb
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN__SHIFT 0xc
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_Y__SHIFT 0xd
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_A_MASK 0x00000001L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL_MASK 0x00000002L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE_MASK 0x00000004L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE_MASK 0x00000018L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0_MASK 0x00000020L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1_MASK 0x00000040L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2_MASK 0x00000080L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3_MASK 0x00000100L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN_MASK 0x00000200L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE_MASK 0x00000400L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN_MASK 0x00000800L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN_MASK 0x00001000L
+#define BIF_BX1_CLKREQB_PAD_CNTL__CLKREQB_PAD_Y_MASK 0x00002000L
+//BIF_BX1_BIF_FEATURES_CONTROL_MISC
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS__SHIFT 0x0
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS__SHIFT 0x1
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS__SHIFT 0x2
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS__SHIFT 0x3
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_RB_MSI_VEC_NOT_ENABLED_MODE__SHIFT 0xb
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_RB_SET_OVERFLOW_EN__SHIFT 0xc
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__ATOMIC_ERR_INT_DIS__SHIFT 0xd
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__ATOMIC_ONLY_WRITE_DIS__SHIFT 0xe
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BME_HDL_NONVIR_EN__SHIFT 0xf
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__HDP_NP_OSTD_LIMIT__SHIFT 0x10
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__DOORBELL_SELFRING_GPA_APER_CHK_48BIT_ADDR__SHIFT 0x19
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS_MASK 0x00000001L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS_MASK 0x00000002L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS_MASK 0x00000004L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS_MASK 0x00000008L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_RB_MSI_VEC_NOT_ENABLED_MODE_MASK 0x00000800L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BIF_RB_SET_OVERFLOW_EN_MASK 0x00001000L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__ATOMIC_ERR_INT_DIS_MASK 0x00002000L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__ATOMIC_ONLY_WRITE_DIS_MASK 0x00004000L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__BME_HDL_NONVIR_EN_MASK 0x00008000L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__HDP_NP_OSTD_LIMIT_MASK 0x01FF0000L
+#define BIF_BX1_BIF_FEATURES_CONTROL_MISC__DOORBELL_SELFRING_GPA_APER_CHK_48BIT_ADDR_MASK 0x02000000L
+//BIF_BX1_HDP_ATOMIC_CONTROL_MISC
+#define BIF_BX1_HDP_ATOMIC_CONTROL_MISC__HDP_NP_ATOMIC_OSTD_LIMIT__SHIFT 0x0
+#define BIF_BX1_HDP_ATOMIC_CONTROL_MISC__HDP_NP_ATOMIC_OSTD_LIMIT_MASK 0x000000FFL
+//BIF_BX1_BIF_DOORBELL_CNTL
+#define BIF_BX1_BIF_DOORBELL_CNTL__SELF_RING_DIS__SHIFT 0x0
+#define BIF_BX1_BIF_DOORBELL_CNTL__TRANS_CHECK_DIS__SHIFT 0x1
+#define BIF_BX1_BIF_DOORBELL_CNTL__UNTRANS_LBACK_EN__SHIFT 0x2
+#define BIF_BX1_BIF_DOORBELL_CNTL__NON_CONSECUTIVE_BE_ZERO_DIS__SHIFT 0x3
+#define BIF_BX1_BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT 0x4
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_DIS__SHIFT 0x18
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_0__SHIFT 0x19
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_1__SHIFT 0x1a
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_2__SHIFT 0x1b
+#define BIF_BX1_BIF_DOORBELL_CNTL__SELF_RING_DIS_MASK 0x00000001L
+#define BIF_BX1_BIF_DOORBELL_CNTL__TRANS_CHECK_DIS_MASK 0x00000002L
+#define BIF_BX1_BIF_DOORBELL_CNTL__UNTRANS_LBACK_EN_MASK 0x00000004L
+#define BIF_BX1_BIF_DOORBELL_CNTL__NON_CONSECUTIVE_BE_ZERO_DIS_MASK 0x00000008L
+#define BIF_BX1_BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK 0x00000010L
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_DIS_MASK 0x01000000L
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_0_MASK 0x02000000L
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_1_MASK 0x04000000L
+#define BIF_BX1_BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_2_MASK 0x08000000L
+//BIF_BX1_BIF_DOORBELL_INT_CNTL
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS__SHIFT 0x0
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS__SHIFT 0x1
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS__SHIFT 0x2
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR__SHIFT 0x10
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR__SHIFT 0x11
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR__SHIFT 0x12
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_ERR_EVENT_INTERRUPT_ENABLE__SHIFT 0x17
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_DISABLE__SHIFT 0x19
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE__SHIFT 0x1a
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__SET_DB_INTR_STATUS_WHEN_RB_ENABLE__SHIFT 0x1c
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__SET_IOH_RAS_INTR_STATUS_WHEN_RB_ENABLE__SHIFT 0x1d
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__SET_ATH_RAS_INTR_STATUS_WHEN_RB_ENABLE__SHIFT 0x1e
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__TIMEOUT_ERR_EVENT_INTERRUPT_ENABLE__SHIFT 0x1f
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS_MASK 0x00000001L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS_MASK 0x00000002L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS_MASK 0x00000004L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR_MASK 0x00010000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR_MASK 0x00020000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR_MASK 0x00040000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_ERR_EVENT_INTERRUPT_ENABLE_MASK 0x00800000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_DISABLE_MASK 0x02000000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE_MASK 0x04000000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__SET_DB_INTR_STATUS_WHEN_RB_ENABLE_MASK 0x10000000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__SET_IOH_RAS_INTR_STATUS_WHEN_RB_ENABLE_MASK 0x20000000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__SET_ATH_RAS_INTR_STATUS_WHEN_RB_ENABLE_MASK 0x40000000L
+#define BIF_BX1_BIF_DOORBELL_INT_CNTL__TIMEOUT_ERR_EVENT_INTERRUPT_ENABLE_MASK 0x80000000L
+//BIF_BX1_BIF_FB_EN
+#define BIF_BX1_BIF_FB_EN__FB_READ_EN__SHIFT 0x0
+#define BIF_BX1_BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1
+#define BIF_BX1_BIF_FB_EN__FB_READ_EN_MASK 0x00000001L
+#define BIF_BX1_BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L
+//BIF_BX1_BIF_INTR_CNTL
+#define BIF_BX1_BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0
+#define BIF_BX1_BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L
+//BIF_BX1_BIF_MST_TRANS_PENDING_VF
+#define BIF_BX1_BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX1_BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL
+//BIF_BX1_BIF_SLV_TRANS_PENDING_VF
+#define BIF_BX1_BIF_SLV_TRANS_PENDING_VF__BIF_SLV_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX1_BIF_SLV_TRANS_PENDING_VF__BIF_SLV_TRANS_PENDING_MASK 0x7FFFFFFFL
+//BIF_BX1_BACO_CNTL
+#define BIF_BX1_BACO_CNTL__BACO_EN__SHIFT 0x0
+#define BIF_BX1_BACO_CNTL__BACO_DUMMY_EN__SHIFT 0x2
+#define BIF_BX1_BACO_CNTL__BACO_POWER_OFF__SHIFT 0x3
+#define BIF_BX1_BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT 0x5
+#define BIF_BX1_BACO_CNTL__BACO_RST_INTR_MASK__SHIFT 0x6
+#define BIF_BX1_BACO_CNTL__BACO_MODE__SHIFT 0x8
+#define BIF_BX1_BACO_CNTL__RCU_BIF_CONFIG_DONE__SHIFT 0x9
+#define BIF_BX1_BACO_CNTL__PWRGOOD_VDDSOC__SHIFT 0x10
+#define BIF_BX1_BACO_CNTL__BACO_AUTO_EXIT__SHIFT 0x1f
+#define BIF_BX1_BACO_CNTL__BACO_EN_MASK 0x00000001L
+#define BIF_BX1_BACO_CNTL__BACO_DUMMY_EN_MASK 0x00000004L
+#define BIF_BX1_BACO_CNTL__BACO_POWER_OFF_MASK 0x00000008L
+#define BIF_BX1_BACO_CNTL__BACO_DSTATE_BYPASS_MASK 0x00000020L
+#define BIF_BX1_BACO_CNTL__BACO_RST_INTR_MASK_MASK 0x00000040L
+#define BIF_BX1_BACO_CNTL__BACO_MODE_MASK 0x00000100L
+#define BIF_BX1_BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK 0x00000200L
+#define BIF_BX1_BACO_CNTL__PWRGOOD_VDDSOC_MASK 0x00010000L
+#define BIF_BX1_BACO_CNTL__BACO_AUTO_EXIT_MASK 0x80000000L
+//BIF_BX1_BIF_BACO_EXIT_TIME0
+#define BIF_BX1_BIF_BACO_EXIT_TIME0__BACO_EXIT_PXEN_CLR_TIMER__SHIFT 0x0
+#define BIF_BX1_BIF_BACO_EXIT_TIME0__BACO_EXIT_PXEN_CLR_TIMER_MASK 0x000FFFFFL
+//BIF_BX1_BIF_BACO_EXIT_TIMER1
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_EXIT_SIDEBAND_TIMER__SHIFT 0x0
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_HW_AUTO_FLUSH_EN__SHIFT 0x18
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_ENDING_AUTO_BY_RSMU_INTR_CLR__SHIFT 0x19
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_DIS__SHIFT 0x1a
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_HIGH__SHIFT 0x1b
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_LOW__SHIFT 0x1c
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_MODE_SEL__SHIFT 0x1d
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__AUTO_BACO_EXIT_CLR_BY_HW_DIS__SHIFT 0x1f
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_EXIT_SIDEBAND_TIMER_MASK 0x000FFFFFL
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_HW_AUTO_FLUSH_EN_MASK 0x01000000L
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_ENDING_AUTO_BY_RSMU_INTR_CLR_MASK 0x02000000L
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_DIS_MASK 0x04000000L
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_HIGH_MASK 0x08000000L
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_LOW_MASK 0x10000000L
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__BACO_MODE_SEL_MASK 0x60000000L
+#define BIF_BX1_BIF_BACO_EXIT_TIMER1__AUTO_BACO_EXIT_CLR_BY_HW_DIS_MASK 0x80000000L
+//BIF_BX1_BIF_BACO_EXIT_TIMER2
+#define BIF_BX1_BIF_BACO_EXIT_TIMER2__BACO_EXIT_LCLK_BAK_TIMER__SHIFT 0x0
+#define BIF_BX1_BIF_BACO_EXIT_TIMER2__BACO_EXIT_LCLK_BAK_TIMER_MASK 0x000FFFFFL
+//BIF_BX1_BIF_BACO_EXIT_TIMER3
+#define BIF_BX1_BIF_BACO_EXIT_TIMER3__BACO_EXIT_DUMMY_EN_CLR_TIMER__SHIFT 0x0
+#define BIF_BX1_BIF_BACO_EXIT_TIMER3__BACO_EXIT_DUMMY_EN_CLR_TIMER_MASK 0x000FFFFFL
+//BIF_BX1_BIF_BACO_EXIT_TIMER4
+#define BIF_BX1_BIF_BACO_EXIT_TIMER4__BACO_EXIT_BACO_EN_CLR_TIMER__SHIFT 0x0
+#define BIF_BX1_BIF_BACO_EXIT_TIMER4__BACO_EXIT_BACO_EN_CLR_TIMER_MASK 0x000FFFFFL
+//BIF_BX1_MEM_TYPE_CNTL
+#define BIF_BX1_MEM_TYPE_CNTL__BF_MEM_PHY_G5_G3__SHIFT 0x0
+#define BIF_BX1_MEM_TYPE_CNTL__BF_MEM_PHY_G5_G3_MASK 0x00000001L
+//BIF_BX1_VF_REGWR_EN
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF0__SHIFT 0x0
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF1__SHIFT 0x1
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF2__SHIFT 0x2
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF3__SHIFT 0x3
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF4__SHIFT 0x4
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF5__SHIFT 0x5
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF6__SHIFT 0x6
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF7__SHIFT 0x7
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF8__SHIFT 0x8
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF9__SHIFT 0x9
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF10__SHIFT 0xa
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF11__SHIFT 0xb
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF12__SHIFT 0xc
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF13__SHIFT 0xd
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF14__SHIFT 0xe
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF15__SHIFT 0xf
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF16__SHIFT 0x10
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF17__SHIFT 0x11
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF18__SHIFT 0x12
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF19__SHIFT 0x13
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF20__SHIFT 0x14
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF21__SHIFT 0x15
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF22__SHIFT 0x16
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF23__SHIFT 0x17
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF24__SHIFT 0x18
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF25__SHIFT 0x19
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF26__SHIFT 0x1a
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF27__SHIFT 0x1b
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF28__SHIFT 0x1c
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF29__SHIFT 0x1d
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF30__SHIFT 0x1e
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF0_MASK 0x00000001L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF1_MASK 0x00000002L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF2_MASK 0x00000004L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF3_MASK 0x00000008L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF4_MASK 0x00000010L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF5_MASK 0x00000020L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF6_MASK 0x00000040L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF7_MASK 0x00000080L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF8_MASK 0x00000100L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF9_MASK 0x00000200L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF10_MASK 0x00000400L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF11_MASK 0x00000800L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF12_MASK 0x00001000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF13_MASK 0x00002000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF14_MASK 0x00004000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF15_MASK 0x00008000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF16_MASK 0x00010000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF17_MASK 0x00020000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF18_MASK 0x00040000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF19_MASK 0x00080000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF20_MASK 0x00100000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF21_MASK 0x00200000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF22_MASK 0x00400000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF23_MASK 0x00800000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF24_MASK 0x01000000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF25_MASK 0x02000000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF26_MASK 0x04000000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF27_MASK 0x08000000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF28_MASK 0x10000000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF29_MASK 0x20000000L
+#define BIF_BX1_VF_REGWR_EN__VF_REGWR_EN_VF30_MASK 0x40000000L
+//BIF_BX1_VF_DOORBELL_EN
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF0__SHIFT 0x0
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF1__SHIFT 0x1
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF2__SHIFT 0x2
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF3__SHIFT 0x3
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF4__SHIFT 0x4
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF5__SHIFT 0x5
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF6__SHIFT 0x6
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF7__SHIFT 0x7
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF8__SHIFT 0x8
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF9__SHIFT 0x9
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF10__SHIFT 0xa
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF11__SHIFT 0xb
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF12__SHIFT 0xc
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF13__SHIFT 0xd
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF14__SHIFT 0xe
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF15__SHIFT 0xf
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF16__SHIFT 0x10
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF17__SHIFT 0x11
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF18__SHIFT 0x12
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF19__SHIFT 0x13
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF20__SHIFT 0x14
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF21__SHIFT 0x15
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF22__SHIFT 0x16
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF23__SHIFT 0x17
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF24__SHIFT 0x18
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF25__SHIFT 0x19
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF26__SHIFT 0x1a
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF27__SHIFT 0x1b
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF28__SHIFT 0x1c
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF29__SHIFT 0x1d
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF30__SHIFT 0x1e
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_RD_LOG_DIS__SHIFT 0x1f
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF0_MASK 0x00000001L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF1_MASK 0x00000002L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF2_MASK 0x00000004L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF3_MASK 0x00000008L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF4_MASK 0x00000010L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF5_MASK 0x00000020L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF6_MASK 0x00000040L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF7_MASK 0x00000080L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF8_MASK 0x00000100L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF9_MASK 0x00000200L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF10_MASK 0x00000400L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF11_MASK 0x00000800L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF12_MASK 0x00001000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF13_MASK 0x00002000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF14_MASK 0x00004000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF15_MASK 0x00008000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF16_MASK 0x00010000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF17_MASK 0x00020000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF18_MASK 0x00040000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF19_MASK 0x00080000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF20_MASK 0x00100000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF21_MASK 0x00200000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF22_MASK 0x00400000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF23_MASK 0x00800000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF24_MASK 0x01000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF25_MASK 0x02000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF26_MASK 0x04000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF27_MASK 0x08000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF28_MASK 0x10000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF29_MASK 0x20000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_EN_VF30_MASK 0x40000000L
+#define BIF_BX1_VF_DOORBELL_EN__VF_DOORBELL_RD_LOG_DIS_MASK 0x80000000L
+//BIF_BX1_VF_FB_EN
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF0__SHIFT 0x0
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF1__SHIFT 0x1
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF2__SHIFT 0x2
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF3__SHIFT 0x3
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF4__SHIFT 0x4
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF5__SHIFT 0x5
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF6__SHIFT 0x6
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF7__SHIFT 0x7
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF8__SHIFT 0x8
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF9__SHIFT 0x9
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF10__SHIFT 0xa
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF11__SHIFT 0xb
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF12__SHIFT 0xc
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF13__SHIFT 0xd
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF14__SHIFT 0xe
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF15__SHIFT 0xf
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF16__SHIFT 0x10
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF17__SHIFT 0x11
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF18__SHIFT 0x12
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF19__SHIFT 0x13
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF20__SHIFT 0x14
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF21__SHIFT 0x15
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF22__SHIFT 0x16
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF23__SHIFT 0x17
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF24__SHIFT 0x18
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF25__SHIFT 0x19
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF26__SHIFT 0x1a
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF27__SHIFT 0x1b
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF28__SHIFT 0x1c
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF29__SHIFT 0x1d
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF30__SHIFT 0x1e
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF0_MASK 0x00000001L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF1_MASK 0x00000002L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF2_MASK 0x00000004L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF3_MASK 0x00000008L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF4_MASK 0x00000010L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF5_MASK 0x00000020L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF6_MASK 0x00000040L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF7_MASK 0x00000080L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF8_MASK 0x00000100L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF9_MASK 0x00000200L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF10_MASK 0x00000400L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF11_MASK 0x00000800L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF12_MASK 0x00001000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF13_MASK 0x00002000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF14_MASK 0x00004000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF15_MASK 0x00008000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF16_MASK 0x00010000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF17_MASK 0x00020000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF18_MASK 0x00040000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF19_MASK 0x00080000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF20_MASK 0x00100000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF21_MASK 0x00200000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF22_MASK 0x00400000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF23_MASK 0x00800000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF24_MASK 0x01000000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF25_MASK 0x02000000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF26_MASK 0x04000000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF27_MASK 0x08000000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF28_MASK 0x10000000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF29_MASK 0x20000000L
+#define BIF_BX1_VF_FB_EN__VF_FB_EN_VF30_MASK 0x40000000L
+//BIF_BX1_VF_REGWR_STATUS
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF0__SHIFT 0x0
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF1__SHIFT 0x1
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF2__SHIFT 0x2
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF3__SHIFT 0x3
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF4__SHIFT 0x4
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF5__SHIFT 0x5
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF6__SHIFT 0x6
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF7__SHIFT 0x7
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF8__SHIFT 0x8
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF9__SHIFT 0x9
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF10__SHIFT 0xa
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF11__SHIFT 0xb
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF12__SHIFT 0xc
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF13__SHIFT 0xd
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF14__SHIFT 0xe
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF15__SHIFT 0xf
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF16__SHIFT 0x10
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF17__SHIFT 0x11
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF18__SHIFT 0x12
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF19__SHIFT 0x13
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF20__SHIFT 0x14
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF21__SHIFT 0x15
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF22__SHIFT 0x16
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF23__SHIFT 0x17
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF24__SHIFT 0x18
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF25__SHIFT 0x19
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF26__SHIFT 0x1a
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF27__SHIFT 0x1b
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF28__SHIFT 0x1c
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF29__SHIFT 0x1d
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF30__SHIFT 0x1e
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF0_MASK 0x00000001L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF1_MASK 0x00000002L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF2_MASK 0x00000004L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF3_MASK 0x00000008L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF4_MASK 0x00000010L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF5_MASK 0x00000020L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF6_MASK 0x00000040L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF7_MASK 0x00000080L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF8_MASK 0x00000100L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF9_MASK 0x00000200L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF10_MASK 0x00000400L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF11_MASK 0x00000800L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF12_MASK 0x00001000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF13_MASK 0x00002000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF14_MASK 0x00004000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF15_MASK 0x00008000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF16_MASK 0x00010000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF17_MASK 0x00020000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF18_MASK 0x00040000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF19_MASK 0x00080000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF20_MASK 0x00100000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF21_MASK 0x00200000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF22_MASK 0x00400000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF23_MASK 0x00800000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF24_MASK 0x01000000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF25_MASK 0x02000000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF26_MASK 0x04000000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF27_MASK 0x08000000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF28_MASK 0x10000000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF29_MASK 0x20000000L
+#define BIF_BX1_VF_REGWR_STATUS__VF_REGWR_STATUS_VF30_MASK 0x40000000L
+//BIF_BX1_VF_DOORBELL_STATUS
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF0__SHIFT 0x0
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF1__SHIFT 0x1
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF2__SHIFT 0x2
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF3__SHIFT 0x3
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF4__SHIFT 0x4
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF5__SHIFT 0x5
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF6__SHIFT 0x6
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF7__SHIFT 0x7
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF8__SHIFT 0x8
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF9__SHIFT 0x9
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF10__SHIFT 0xa
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF11__SHIFT 0xb
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF12__SHIFT 0xc
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF13__SHIFT 0xd
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF14__SHIFT 0xe
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF15__SHIFT 0xf
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF16__SHIFT 0x10
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF17__SHIFT 0x11
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF18__SHIFT 0x12
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF19__SHIFT 0x13
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF20__SHIFT 0x14
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF21__SHIFT 0x15
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF22__SHIFT 0x16
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF23__SHIFT 0x17
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF24__SHIFT 0x18
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF25__SHIFT 0x19
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF26__SHIFT 0x1a
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF27__SHIFT 0x1b
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF28__SHIFT 0x1c
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF29__SHIFT 0x1d
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF30__SHIFT 0x1e
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF0_MASK 0x00000001L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF1_MASK 0x00000002L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF2_MASK 0x00000004L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF3_MASK 0x00000008L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF4_MASK 0x00000010L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF5_MASK 0x00000020L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF6_MASK 0x00000040L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF7_MASK 0x00000080L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF8_MASK 0x00000100L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF9_MASK 0x00000200L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF10_MASK 0x00000400L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF11_MASK 0x00000800L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF12_MASK 0x00001000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF13_MASK 0x00002000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF14_MASK 0x00004000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF15_MASK 0x00008000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF16_MASK 0x00010000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF17_MASK 0x00020000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF18_MASK 0x00040000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF19_MASK 0x00080000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF20_MASK 0x00100000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF21_MASK 0x00200000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF22_MASK 0x00400000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF23_MASK 0x00800000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF24_MASK 0x01000000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF25_MASK 0x02000000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF26_MASK 0x04000000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF27_MASK 0x08000000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF28_MASK 0x10000000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF29_MASK 0x20000000L
+#define BIF_BX1_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_VF30_MASK 0x40000000L
+//BIF_BX1_VF_FB_STATUS
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF0__SHIFT 0x0
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF1__SHIFT 0x1
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF2__SHIFT 0x2
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF3__SHIFT 0x3
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF4__SHIFT 0x4
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF5__SHIFT 0x5
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF6__SHIFT 0x6
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF7__SHIFT 0x7
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF8__SHIFT 0x8
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF9__SHIFT 0x9
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF10__SHIFT 0xa
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF11__SHIFT 0xb
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF12__SHIFT 0xc
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF13__SHIFT 0xd
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF14__SHIFT 0xe
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF15__SHIFT 0xf
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF16__SHIFT 0x10
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF17__SHIFT 0x11
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF18__SHIFT 0x12
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF19__SHIFT 0x13
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF20__SHIFT 0x14
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF21__SHIFT 0x15
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF22__SHIFT 0x16
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF23__SHIFT 0x17
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF24__SHIFT 0x18
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF25__SHIFT 0x19
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF26__SHIFT 0x1a
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF27__SHIFT 0x1b
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF28__SHIFT 0x1c
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF29__SHIFT 0x1d
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF30__SHIFT 0x1e
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF0_MASK 0x00000001L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF1_MASK 0x00000002L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF2_MASK 0x00000004L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF3_MASK 0x00000008L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF4_MASK 0x00000010L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF5_MASK 0x00000020L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF6_MASK 0x00000040L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF7_MASK 0x00000080L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF8_MASK 0x00000100L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF9_MASK 0x00000200L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF10_MASK 0x00000400L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF11_MASK 0x00000800L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF12_MASK 0x00001000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF13_MASK 0x00002000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF14_MASK 0x00004000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF15_MASK 0x00008000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF16_MASK 0x00010000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF17_MASK 0x00020000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF18_MASK 0x00040000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF19_MASK 0x00080000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF20_MASK 0x00100000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF21_MASK 0x00200000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF22_MASK 0x00400000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF23_MASK 0x00800000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF24_MASK 0x01000000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF25_MASK 0x02000000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF26_MASK 0x04000000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF27_MASK 0x08000000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF28_MASK 0x10000000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF29_MASK 0x20000000L
+#define BIF_BX1_VF_FB_STATUS__VF_FB_STATUS_VF30_MASK 0x40000000L
+//BIF_BX1_REMAP_HDP_MEM_FLUSH_CNTL
+#define BIF_BX1_REMAP_HDP_MEM_FLUSH_CNTL__ADDRESS__SHIFT 0x2
+#define BIF_BX1_REMAP_HDP_MEM_FLUSH_CNTL__ADDRESS_MASK 0x0007FFFCL
+//BIF_BX1_REMAP_HDP_REG_FLUSH_CNTL
+#define BIF_BX1_REMAP_HDP_REG_FLUSH_CNTL__ADDRESS__SHIFT 0x2
+#define BIF_BX1_REMAP_HDP_REG_FLUSH_CNTL__ADDRESS_MASK 0x0007FFFCL
+//BIF_BX1_BIF_RB_CNTL
+#define BIF_BX1_BIF_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define BIF_BX1_BIF_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define BIF_BX1_BIF_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8
+#define BIF_BX1_BIF_RB_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x9
+#define BIF_BX1_BIF_RB_CNTL__BIF_RB_TRAN__SHIFT 0x11
+#define BIF_BX1_BIF_RB_CNTL__DIS_PROTECT_WHEN_RB_FULL__SHIFT 0x19
+#define BIF_BX1_BIF_RB_CNTL__RB_INTR_FIX_PRIORITY__SHIFT 0x1a
+#define BIF_BX1_BIF_RB_CNTL__RB_INTR_ARB_MODE__SHIFT 0x1d
+#define BIF_BX1_BIF_RB_CNTL__RB_RST_BY_FLR_DISABLE__SHIFT 0x1e
+#define BIF_BX1_BIF_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define BIF_BX1_BIF_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define BIF_BX1_BIF_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define BIF_BX1_BIF_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define BIF_BX1_BIF_RB_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x00003E00L
+#define BIF_BX1_BIF_RB_CNTL__BIF_RB_TRAN_MASK 0x00020000L
+#define BIF_BX1_BIF_RB_CNTL__DIS_PROTECT_WHEN_RB_FULL_MASK 0x02000000L
+#define BIF_BX1_BIF_RB_CNTL__RB_INTR_FIX_PRIORITY_MASK 0x1C000000L
+#define BIF_BX1_BIF_RB_CNTL__RB_INTR_ARB_MODE_MASK 0x20000000L
+#define BIF_BX1_BIF_RB_CNTL__RB_RST_BY_FLR_DISABLE_MASK 0x40000000L
+#define BIF_BX1_BIF_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//BIF_BX1_BIF_RB_BASE
+#define BIF_BX1_BIF_RB_BASE__ADDR__SHIFT 0x0
+#define BIF_BX1_BIF_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//BIF_BX1_BIF_RB_RPTR
+#define BIF_BX1_BIF_RB_RPTR__OFFSET__SHIFT 0x2
+#define BIF_BX1_BIF_RB_RPTR__OFFSET_MASK 0x0003FFFCL
+//BIF_BX1_BIF_RB_WPTR
+#define BIF_BX1_BIF_RB_WPTR__BIF_RB_OVERFLOW__SHIFT 0x0
+#define BIF_BX1_BIF_RB_WPTR__OFFSET__SHIFT 0x2
+#define BIF_BX1_BIF_RB_WPTR__BIF_RB_OVERFLOW_MASK 0x00000001L
+#define BIF_BX1_BIF_RB_WPTR__OFFSET_MASK 0x0003FFFCL
+//BIF_BX1_BIF_RB_WPTR_ADDR_HI
+#define BIF_BX1_BIF_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define BIF_BX1_BIF_RB_WPTR_ADDR_HI__ADDR_MASK 0x000000FFL
+//BIF_BX1_BIF_RB_WPTR_ADDR_LO
+#define BIF_BX1_BIF_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define BIF_BX1_BIF_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//BIF_BX1_MAILBOX_INDEX
+#define BIF_BX1_MAILBOX_INDEX__MAILBOX_INDEX__SHIFT 0x0
+#define BIF_BX1_MAILBOX_INDEX__MAILBOX_INDEX_MASK 0x0000001FL
+//BIF_BX1_BIF_MP1_INTR_CTRL
+#define BIF_BX1_BIF_MP1_INTR_CTRL__BACO_EXIT_DONE__SHIFT 0x0
+#define BIF_BX1_BIF_MP1_INTR_CTRL__BACO_EXIT_DONE_MASK 0x00000001L
+//BIF_BX1_BIF_PERSTB_PAD_CNTL
+#define BIF_BX1_BIF_PERSTB_PAD_CNTL__PERSTB_PAD_CNTL__SHIFT 0x0
+#define BIF_BX1_BIF_PERSTB_PAD_CNTL__PERSTB_PAD_CNTL_MASK 0x0000FFFFL
+//BIF_BX1_BIF_PX_EN_PAD_CNTL
+#define BIF_BX1_BIF_PX_EN_PAD_CNTL__PX_EN_PAD_CNTL__SHIFT 0x0
+#define BIF_BX1_BIF_PX_EN_PAD_CNTL__PX_EN_PAD_CNTL_MASK 0x00000FFFL
+//BIF_BX1_BIF_REFPADKIN_PAD_CNTL
+#define BIF_BX1_BIF_REFPADKIN_PAD_CNTL__REFPADKIN_PAD_CNTL__SHIFT 0x0
+#define BIF_BX1_BIF_REFPADKIN_PAD_CNTL__REFPADKIN_PAD_CNTL_MASK 0x000000FFL
+//BIF_BX1_BIF_CLKREQB_PAD_CNTL
+#define BIF_BX1_BIF_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL__SHIFT 0x0
+#define BIF_BX1_BIF_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_MASK 0x7FFFFFFFL
+//BIF_BX1_BIF_PWRBRK_PAD_CNTL
+#define BIF_BX1_BIF_PWRBRK_PAD_CNTL__PWRBRK_PAD_CNTL__SHIFT 0x0
+#define BIF_BX1_BIF_PWRBRK_PAD_CNTL__PWRBRK_PAD_CNTL_MASK 0x000000FFL
+//BIF_BX1_BIF_WAKEB_PAD_CNTL
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_ITXIMPSEL__SHIFT 0x0
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_ICTFEN__SHIFT 0x1
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_IPD__SHIFT 0x2
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_IPU__SHIFT 0x3
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_IRXEN__SHIFT 0x4
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_IRXSEL0__SHIFT 0x5
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_IRXSEL1__SHIFT 0x6
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_RESERVED__SHIFT 0x7
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_ITXIMPSEL_MASK 0x00000001L
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_ICTFEN_MASK 0x00000002L
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_IPD_MASK 0x00000004L
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_IPU_MASK 0x00000008L
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_IRXEN_MASK 0x00000010L
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_IRXSEL0_MASK 0x00000020L
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_IRXSEL1_MASK 0x00000040L
+#define BIF_BX1_BIF_WAKEB_PAD_CNTL__GPIO33_RESERVED_MASK 0x00000080L
+//BIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL
+#define BIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL__GPIO_IPD__SHIFT 0x0
+#define BIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL__GPIO_IPU__SHIFT 0x1
+#define BIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL__GPIO_IRXEN__SHIFT 0x2
+#define BIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL__GPIO_IRXSEL0__SHIFT 0x3
+#define BIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL__GPIO_IRXSEL1__SHIFT 0x4
+#define BIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL__GPIO_ITXIMPSEL__SHIFT 0x5
+#define BIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL__GPIO_IPD_MASK 0x00000001L
+#define BIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL__GPIO_IPU_MASK 0x00000002L
+#define BIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL__GPIO_IRXEN_MASK 0x00000004L
+#define BIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL__GPIO_IRXSEL0_MASK 0x00000008L
+#define BIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL__GPIO_IRXSEL1_MASK 0x00000010L
+#define BIF_BX1_BIF_VAUX_PRESENT_PAD_CNTL__GPIO_ITXIMPSEL_MASK 0x00000020L
+//BIF_BX1_PCIE_PAR_SAVE_RESTORE_CNTL
+#define BIF_BX1_PCIE_PAR_SAVE_RESTORE_CNTL__PCIE_PAR_SAVE_VALID__SHIFT 0x0
+#define BIF_BX1_PCIE_PAR_SAVE_RESTORE_CNTL__PCIE_PAR_SAVE_SCRATCH__SHIFT 0x1
+#define BIF_BX1_PCIE_PAR_SAVE_RESTORE_CNTL__PCIE_PAR_SAVE_VALID_MASK 0x00000001L
+#define BIF_BX1_PCIE_PAR_SAVE_RESTORE_CNTL__PCIE_PAR_SAVE_SCRATCH_MASK 0xFFFFFFFEL
+//BIF_BX1_BIF_S5_MEM_POWER_CTRL0
+#define BIF_BX1_BIF_S5_MEM_POWER_CTRL0__MEM_POWER_CTRL_S5_31_0__SHIFT 0x0
+#define BIF_BX1_BIF_S5_MEM_POWER_CTRL0__MEM_POWER_CTRL_S5_31_0_MASK 0xFFFFFFFFL
+//BIF_BX1_BIF_S5_MEM_POWER_CTRL1
+#define BIF_BX1_BIF_S5_MEM_POWER_CTRL1__MEM_POWER_CTRL_S5_41_32__SHIFT 0x0
+#define BIF_BX1_BIF_S5_MEM_POWER_CTRL1__MEM_POWER_CTRL_SEL__SHIFT 0xa
+#define BIF_BX1_BIF_S5_MEM_POWER_CTRL1__MEM_POWER_CTRL_S5_41_32_MASK 0x000003FFL
+#define BIF_BX1_BIF_S5_MEM_POWER_CTRL1__MEM_POWER_CTRL_SEL_MASK 0x00000400L
+//BIF_BX1_BIF_S5_DUMMY_REGS
+#define BIF_BX1_BIF_S5_DUMMY_REGS__BIF_S5_DUMMY_REGS__SHIFT 0x0
+#define BIF_BX1_BIF_S5_DUMMY_REGS__BIF_S5_DUMMY_REGS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_bif_bx_pf_BIFPFVFDEC1
+//BIF_BX_PF1_BIF_BME_STATUS
+#define BIF_BX_PF1_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_PF1_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_PF1_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_PF1_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_PF1_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_PF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_PF1_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_PF1_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_PF1_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_PF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_PF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_PF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_PF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_PF1_GPU_HDP_FLUSH_REQ
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_PF1_GPU_HDP_FLUSH_DONE
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_PF1_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_PF1_BIF_TRANS_PENDING
+#define BIF_BX_PF1_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_PF1_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_PF1_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_PF1_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_PF1_MAILBOX_CONTROL
+#define BIF_BX_PF1_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_PF1_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_PF1_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_PF1_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_PF1_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_PF1_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_PF1_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_PF1_MAILBOX_INT_CNTL
+#define BIF_BX_PF1_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_PF1_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_PF1_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_PF1_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_PF1_BIF_VMHV_MAILBOX
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_PF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_rcc_strap_BIFDEC1:1
+//RCC_STRAP2_RCC_BIF_STRAP0
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_GEN4_DIS__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_EXPANSION_ROM_VALIDATION_SUPPORT__SHIFT 0x1
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_VGA_DIS_PIN__SHIFT 0x2
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_MEM_AP_SIZE_PIN__SHIFT 0x3
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_PX_CAPABLE__SHIFT 0x7
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN3__SHIFT 0x8
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_MSI_FIRST_BE_FULL_PAYLOAD_EN__SHIFT 0x9
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_NBIF_IGNORE_ERR_INFLR__SHIFT 0xa
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_PME_SUPPORT_COMPLIANCE_EN__SHIFT 0xb
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_EP_ERR__SHIFT 0xc
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MSG_ERR__SHIFT 0xd
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0xe
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0xf
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR__SHIFT 0x10
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_DN__SHIFT 0x11
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_AUD_PIN__SHIFT 0x12
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIOS_ROM_EN_PIN__SHIFT 0x14
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIF_GFX_IOBAR_DIS_AND_REGBAR_64BIT_EN__SHIFT 0x15
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_GPUIOV_EN__SHIFT 0x16
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_GEN3_DIS__SHIFT 0x18
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN4__SHIFT 0x19
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_QUICKSIM_START__SHIFT 0x1a
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_NO_RO_ENABLED_P2P_PASSING__SHIFT 0x1b
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_IGNORE_LOCAL_PREFIX_UR_SWUS__SHIFT 0x1c
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_CFG0_RD_VF_BUSNUM_CHK_EN__SHIFT 0x1d
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIGAPU_MODE__SHIFT 0x1e
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_LINK_DOWN_RESET_EN__SHIFT 0x1f
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_GEN4_DIS_MASK 0x00000001L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_EXPANSION_ROM_VALIDATION_SUPPORT_MASK 0x00000002L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_VGA_DIS_PIN_MASK 0x00000004L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_MEM_AP_SIZE_PIN_MASK 0x00000078L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK 0x00000080L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN3_MASK 0x00000100L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_MSI_FIRST_BE_FULL_PAYLOAD_EN_MASK 0x00000200L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_NBIF_IGNORE_ERR_INFLR_MASK 0x00000400L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_PME_SUPPORT_COMPLIANCE_EN_MASK 0x00000800L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_EP_ERR_MASK 0x00001000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MSG_ERR_MASK 0x00002000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00004000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00008000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_MASK 0x00010000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_RX_IGNORE_TC_ERR_DN_MASK 0x00020000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_AUD_PIN_MASK 0x000C0000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIOS_ROM_EN_PIN_MASK 0x00100000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIF_GFX_IOBAR_DIS_AND_REGBAR_64BIT_EN_MASK 0x00200000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_GPUIOV_EN_MASK 0x00400000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_GEN3_DIS_MASK 0x01000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIF_KILL_GEN4_MASK 0x02000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_QUICKSIM_START_MASK 0x04000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_NO_RO_ENABLED_P2P_PASSING_MASK 0x08000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_IGNORE_LOCAL_PREFIX_UR_SWUS_MASK 0x10000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_CFG0_RD_VF_BUSNUM_CHK_EN_MASK 0x20000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_BIGAPU_MODE_MASK 0x40000000L
+#define RCC_STRAP2_RCC_BIF_STRAP0__STRAP_LINK_DOWN_RESET_EN_MASK 0x80000000L
+//RCC_STRAP2_RCC_BIF_STRAP1
+#define RCC_STRAP2_RCC_BIF_STRAP1__FUSESTRAP_VALID__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP1__ROMSTRAP_VALID__SHIFT 0x1
+#define RCC_STRAP2_RCC_BIF_STRAP1__WRITE_DISABLE__SHIFT 0x2
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_ECRC_INTERMEDIATE_CHK_EN__SHIFT 0x3
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_IGNORE_E2E_PREFIX_UR_SWUS__SHIFT 0x5
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_MARGINING_USES_SOFTWARE__SHIFT 0x6
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_MARGINING_READY__SHIFT 0x7
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_APER_EN__SHIFT 0x8
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_64BAR_EN__SHIFT 0x9
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_AP_SIZE__SHIFT 0xa
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_APER_PREFETCHABLE__SHIFT 0xc
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_HWREV_LSB2__SHIFT 0xd
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWREV_LSB2__SHIFT 0xf
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_LINK_RST_CFG_ONLY__SHIFT 0x11
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_BIF_IOV_LKRST_DIS__SHIFT 0x12
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_DLF_EN__SHIFT 0x13
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_PHY_16GT_EN__SHIFT 0x14
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_MARGIN_EN__SHIFT 0x15
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_BIF_PSN_UR_RPT_EN__SHIFT 0x16
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_BIF_SLOT_POWER_SUPPORT_EN__SHIFT 0x17
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_S5_REGS_ACCESS_DIS__SHIFT 0x18
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_S5_MMREG_WR_POSTED_EN__SHIFT 0x19
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_GFX_FUNC_LTR_MODE__SHIFT 0x1a
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_GSI_SMN_POSTWR_MULTI_EN__SHIFT 0x1b
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_DLF_EN_EP__SHIFT 0x1d
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_AP_EN__SHIFT 0x1e
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_AP_EN_DN__SHIFT 0x1f
+#define RCC_STRAP2_RCC_BIF_STRAP1__FUSESTRAP_VALID_MASK 0x00000001L
+#define RCC_STRAP2_RCC_BIF_STRAP1__ROMSTRAP_VALID_MASK 0x00000002L
+#define RCC_STRAP2_RCC_BIF_STRAP1__WRITE_DISABLE_MASK 0x00000004L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_ECRC_INTERMEDIATE_CHK_EN_MASK 0x00000008L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_IGNORE_E2E_PREFIX_UR_SWUS_MASK 0x00000020L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_MARGINING_USES_SOFTWARE_MASK 0x00000040L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_MARGINING_READY_MASK 0x00000080L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_APER_EN_MASK 0x00000100L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_64BAR_EN_MASK 0x00000200L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_AP_SIZE_MASK 0x00000C00L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWUS_APER_PREFETCHABLE_MASK 0x00001000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_HWREV_LSB2_MASK 0x00006000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_SWREV_LSB2_MASK 0x00018000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_LINK_RST_CFG_ONLY_MASK 0x00020000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_BIF_IOV_LKRST_DIS_MASK 0x00040000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_DLF_EN_MASK 0x00080000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_PHY_16GT_EN_MASK 0x00100000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_MARGIN_EN_MASK 0x00200000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_BIF_PSN_UR_RPT_EN_MASK 0x00400000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_BIF_SLOT_POWER_SUPPORT_EN_MASK 0x00800000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_S5_REGS_ACCESS_DIS_MASK 0x01000000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_S5_MMREG_WR_POSTED_EN_MASK 0x02000000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_GFX_FUNC_LTR_MODE_MASK 0x04000000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_GSI_SMN_POSTWR_MULTI_EN_MASK 0x18000000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_DLF_EN_EP_MASK 0x20000000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_AP_EN_MASK 0x40000000L
+#define RCC_STRAP2_RCC_BIF_STRAP1__STRAP_AP_EN_DN_MASK 0x80000000L
+//RCC_STRAP2_RCC_BIF_STRAP2
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_PCIESWUS_INDEX_APER_RANGE__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SWUS_SPT__SHIFT 0x1
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SUC_IND_ACCESS_DIS__SHIFT 0x3
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SUM_IND_ACCESS_DIS__SHIFT 0x4
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_ENDP_LINKDOWN_DROP_DMA__SHIFT 0x5
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SWITCH_LINKDOWN_DROP_DMA__SHIFT 0x6
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SWUS_SEC_LVL_OVRD_EN__SHIFT 0x7
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_GMI_DNS_SDP_CLKREQ_TOGGLE_DIS__SHIFT 0x8
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_ACS_MSKSEV_EP_HIDE_DIS__SHIFT 0x9
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_CFG_PG_FW_INTERLOCK_EXIT_EN__SHIFT 0xa
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_USB_PD_FUNC_DIS__SHIFT 0xc
+#define RCC_STRAP2_RCC_BIF_STRAP2__RESERVED_BIF_STRAP2__SHIFT 0xd
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS__SHIFT 0xe
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_GFXAZ_POWERSTATE_INTERLOCK_EN__SHIFT 0xf
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_CYCLE__SHIFT 0x10
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_BYPASS__SHIFT 0x18
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_VLINK_PMETO_LDN_EXIT_BY_LNKRST_DIS__SHIFT 0x1f
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_PCIESWUS_INDEX_APER_RANGE_MASK 0x00000001L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SWUS_SPT_MASK 0x00000002L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SUC_IND_ACCESS_DIS_MASK 0x00000008L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SUM_IND_ACCESS_DIS_MASK 0x00000010L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_ENDP_LINKDOWN_DROP_DMA_MASK 0x00000020L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SWITCH_LINKDOWN_DROP_DMA_MASK 0x00000040L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_SWUS_SEC_LVL_OVRD_EN_MASK 0x00000080L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_GMI_DNS_SDP_CLKREQ_TOGGLE_DIS_MASK 0x00000100L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_ACS_MSKSEV_EP_HIDE_DIS_MASK 0x00000200L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_CFG_PG_FW_INTERLOCK_EXIT_EN_MASK 0x00000C00L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_USB_PD_FUNC_DIS_MASK 0x00001000L
+#define RCC_STRAP2_RCC_BIF_STRAP2__RESERVED_BIF_STRAP2_MASK 0x00002000L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_GFXAZ_POWERSTATE_INTERLOCK_EN_MASK 0x00008000L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_CYCLE_MASK 0x00FF0000L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_PWRBRK_DEGLITCH_BYPASS_MASK 0x01000000L
+#define RCC_STRAP2_RCC_BIF_STRAP2__STRAP_VLINK_PMETO_LDN_EXIT_BY_LNKRST_DIS_MASK 0x80000000L
+//RCC_STRAP2_RCC_BIF_STRAP3
+#define RCC_STRAP2_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
+#define RCC_STRAP2_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
+//RCC_STRAP2_RCC_BIF_STRAP4
+#define RCC_STRAP2_RCC_BIF_STRAP4__STRAP_VLINK_L0S_EXIT_TIMER__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP4__STRAP_VLINK_L1_EXIT_TIMER__SHIFT 0x10
+#define RCC_STRAP2_RCC_BIF_STRAP4__STRAP_VLINK_L0S_EXIT_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_BIF_STRAP4__STRAP_VLINK_L1_EXIT_TIMER_MASK 0xFFFF0000L
+//RCC_STRAP2_RCC_BIF_STRAP5
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_LDN_EN__SHIFT 0x10
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_SECRST_EN__SHIFT 0x11
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_ENTER_COMPLIANCE_DIS__SHIFT 0x12
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_IGNORE_PSN_ON_VDM1_DIS__SHIFT 0x13
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_SMN_ERR_STATUS_MASK_EN_UPS__SHIFT 0x14
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_REG_PROTECTION_DIS__SHIFT 0x15
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_SMN_ERRRSP_DATA_FORCE__SHIFT 0x16
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_INTERMEDIATERSP_DATA_ALLF_DATA_FORCE__SHIFT 0x18
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x19
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1b
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_PWRBRK_STATUS_TIMER__SHIFT 0x1c
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_BIF_GFX_REG_APER_REMAPPING_EN__SHIFT 0x1f
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_LDN_EN_MASK 0x00010000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ON_SWUS_SECRST_EN_MASK 0x00020000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_VLINK_ENTER_COMPLIANCE_DIS_MASK 0x00040000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_IGNORE_PSN_ON_VDM1_DIS_MASK 0x00080000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_SMN_ERR_STATUS_MASK_EN_UPS_MASK 0x00100000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_REG_PROTECTION_DIS_MASK 0x00200000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_SMN_ERRRSP_DATA_FORCE_MASK 0x00C00000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_INTERMEDIATERSP_DATA_ALLF_DATA_FORCE_MASK 0x01000000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_SUPPORTED_MASK 0x06000000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_EMER_POWER_REDUCTION_INIT_REQ_MASK 0x08000000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_PWRBRK_STATUS_TIMER_MASK 0x70000000L
+#define RCC_STRAP2_RCC_BIF_STRAP5__STRAP_BIF_GFX_REG_APER_REMAPPING_EN_MASK 0x80000000L
+//RCC_STRAP2_RCC_BIF_STRAP6
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_GEN5_DIS__SHIFT 0x0
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_BIF_KILL_GEN5__SHIFT 0x1
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_PHY_32GT_EN__SHIFT 0x2
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_DOE_VERSION_SEL__SHIFT 0x3
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_S5_GFX_REGS_ACCESS_DIS__SHIFT 0x4
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_PRODUCTION_MODE__SHIFT 0x5
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_GFX_PARTITION_CAP_SPX_SUPPORT__SHIFT 0x6
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_GFX_PARTITION_CAP_TPX_SUPPORT__SHIFT 0x7
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_MEM_PARTITION_CAP_NPS1_SUPPORT__SHIFT 0x8
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_MEM_PARTITION_CAP_NPS3_SUPPORT__SHIFT 0x9
+#define RCC_STRAP2_RCC_BIF_STRAP6__RESERVED_BIF_STRAP6__SHIFT 0xa
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_GEN5_DIS_MASK 0x00000001L
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_BIF_KILL_GEN5_MASK 0x00000002L
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_PHY_32GT_EN_MASK 0x00000004L
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_DOE_VERSION_SEL_MASK 0x00000008L
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_S5_GFX_REGS_ACCESS_DIS_MASK 0x00000010L
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_PRODUCTION_MODE_MASK 0x00000020L
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_GFX_PARTITION_CAP_SPX_SUPPORT_MASK 0x00000040L
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_GFX_PARTITION_CAP_TPX_SUPPORT_MASK 0x00000080L
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_MEM_PARTITION_CAP_NPS1_SUPPORT_MASK 0x00000100L
+#define RCC_STRAP2_RCC_BIF_STRAP6__STRAP_MEM_PARTITION_CAP_NPS3_SUPPORT_MASK 0x00000200L
+#define RCC_STRAP2_RCC_BIF_STRAP6__RESERVED_BIF_STRAP6_MASK 0xFFFFFC00L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_DEVICE_ID_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_ARI_EN_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_ACS_EN_DN_DEV0__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_AER_EN_DN_DEV0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_CPL_ABORT_ERR_EN_DN_DEV0__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_NEGO_GLOBAL_EN_DEV0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_INTERRUPT_PIN_DN_DEV0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_IGNORE_E2E_PREFIX_UR_DN_DEV0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_MAX_PAYLOAD_SUPPORT_DN_DEV0__SHIFT 0x19
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_MAX_LINK_WIDTH_SUPPORT_DEV0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_EPF0_DUMMY_EN_DEV0__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_DEVICE_ID_DN_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_ARI_EN_DN_DEV0_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_ACS_EN_DN_DEV0_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_AER_EN_DN_DEV0_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_CPL_ABORT_ERR_EN_DN_DEV0_MASK 0x00080000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_NEGO_GLOBAL_EN_DEV0_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_INTERRUPT_PIN_DN_DEV0_MASK 0x00E00000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_IGNORE_E2E_PREFIX_UR_DN_DEV0_MASK 0x01000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_MAX_PAYLOAD_SUPPORT_DN_DEV0_MASK 0x0E000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_MAX_LINK_WIDTH_SUPPORT_DEV0_MASK 0x70000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP0__STRAP_EPF0_DUMMY_EN_DEV0_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP1
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_ID_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_VEN_ID_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_ID_DN_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP1__STRAP_SUBSYS_VEN_ID_DN_DEV0_MASK 0xFFFF0000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE1_SUPPORTED_DEV0__SHIFT 0x2
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE2_SUPPORTED_DEV0__SHIFT 0x3
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODEING_ON_DEV0__SHIFT 0x4
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODE_REQUEST_DEV0__SHIFT 0x5
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_MODIFIED_TS_INFOR1_DEV0__SHIFT 0x6
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DEV0_MASK 0x00000001L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_NO_EQ_NEED_SUPPORTED_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE1_SUPPORTED_DEV0_MASK 0x00000004L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_MODIFID_TS_USAGE_MODE2_SUPPORTED_DEV0_MASK 0x00000008L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODEING_ON_DEV0_MASK 0x00000010L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_TRANSMITTER_PRECODE_REQUEST_DEV0_MASK 0x00000020L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP10__STRAP_MODIFIED_TS_INFOR1_DEV0_MASK 0x0007FFC0L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP11
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_MODIFIED_TS_VENDOR_ID_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_RTR_RESET_TIME_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_RTR_VALID_DN_DEV0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_RTR_EN_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_SDPVW_REG_UPDATE_EN_DEV0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_MODIFIED_TS_VENDOR_ID_DEV0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_RTR_RESET_TIME_DN_DEV0_MASK 0x0FFF0000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_RTR_VALID_DN_DEV0_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_RTR_EN_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP11__STRAP_SDPVW_REG_UPDATE_EN_DEV0_MASK 0x40000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP12
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP12__STRAP_MODIFIED_TS_INFOR2_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP12__STRAP_MODIFIED_TS_INFOR2_DEV0_MASK 0x00FFFFFFL
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP13
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_COUNT_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_SELECTIVE_ENABLE_SUPPORTED_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_DETAILS_DEV0__SHIFT 0x9
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_RTR_D3HOTD0_TIME_DN_DEV0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_COUNT_DEV0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_SELECTIVE_ENABLE_SUPPORTED_DEV0_MASK 0x00000100L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_ALTERNATE_PROTOCOL_DETAILS_DEV0_MASK 0x000FFE00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP13__STRAP_RTR_D3HOTD0_TIME_DN_DEV0_MASK 0xFFF00000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP14
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_CTO_LOGGING_SUPPORT_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_ACS_ENH_CAPABILITY_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_COMMAND_COMPLETED_DEV0__SHIFT 0x2
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_ERR_COR_SUBCLASS_CAPABLE_DEV0__SHIFT 0x3
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_DOE_EN_UP_DEV0__SHIFT 0x4
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_CTO_LOGGING_SUPPORT_DEV0_MASK 0x00000001L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_ACS_ENH_CAPABILITY_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_COMMAND_COMPLETED_DEV0_MASK 0x00000004L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_ERR_COR_SUBCLASS_CAPABLE_DEV0_MASK 0x00000008L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP14__STRAP_DOE_EN_UP_DEV0_MASK 0x00000010L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP2
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_DE_EMPHASIS_SEL_DN_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_DSN_EN_DN_DEV0__SHIFT 0x1
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_E2E_PREFIX_EN_DEV0__SHIFT 0x2
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ECN1P1_EN_DEV0__SHIFT 0x3
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_CHECK_EN_DEV0__SHIFT 0x4
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_GEN_EN_DEV0__SHIFT 0x5
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ERR_REPORTING_DIS_DEV0__SHIFT 0x6
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_FMT_SUPPORTED_DEV0__SHIFT 0x7
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_TAG_ECN_EN_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_EXT_VC_COUNT_DN_DEV0__SHIFT 0x9
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_FIRST_RCVD_ERR_LOG_DN_DEV0__SHIFT 0xc
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_POISONED_ADVISORY_NONFATAL_DN_DEV0__SHIFT 0xd
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_COMPLIANCE_DEV0__SHIFT 0xe
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_EN_DEV0__SHIFT 0xf
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN3_COMPLIANCE_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN4_COMPLIANCE_DEV0__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L0S_ACCEPTABLE_LATENCY_DEV0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L0S_EXIT_LATENCY_DEV0__SHIFT 0x17
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L1_ACCEPTABLE_LATENCY_DEV0__SHIFT 0x1a
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L1_EXIT_LATENCY_DEV0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_DE_EMPHASIS_SEL_DN_DEV0_MASK 0x00000001L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_DSN_EN_DN_DEV0_MASK 0x00000002L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_E2E_PREFIX_EN_DEV0_MASK 0x00000004L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ECN1P1_EN_DEV0_MASK 0x00000008L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_CHECK_EN_DEV0_MASK 0x00000010L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ECRC_GEN_EN_DEV0_MASK 0x00000020L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_ERR_REPORTING_DIS_DEV0_MASK 0x00000040L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_FMT_SUPPORTED_DEV0_MASK 0x00000080L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_EXTENDED_TAG_ECN_EN_DEV0_MASK 0x00000100L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_EXT_VC_COUNT_DN_DEV0_MASK 0x00000E00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_FIRST_RCVD_ERR_LOG_DN_DEV0_MASK 0x00001000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_POISONED_ADVISORY_NONFATAL_DN_DEV0_MASK 0x00002000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_COMPLIANCE_DEV0_MASK 0x00004000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN2_EN_DEV0_MASK 0x00008000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN3_COMPLIANCE_DEV0_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_GEN4_COMPLIANCE_DEV0_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L0S_ACCEPTABLE_LATENCY_DEV0_MASK 0x00700000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L0S_EXIT_LATENCY_DEV0_MASK 0x03800000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L1_ACCEPTABLE_LATENCY_DEV0_MASK 0x1C000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP2__STRAP_L1_EXIT_LATENCY_DEV0_MASK 0xE0000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP3
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_LINK_BW_NOTIFICATION_CAP_DN_EN_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DEV0__SHIFT 0x1
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DN_DEV0__SHIFT 0x2
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_MAX_PAYLOAD_SUPPORT_DEV0__SHIFT 0x3
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_MSI_EN_DN_DEV0__SHIFT 0x6
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_MSTCPL_TIMEOUT_EN_DEV0__SHIFT 0x7
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_NO_SOFT_RESET_DN_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_OBFF_SUPPORTED_DEV0__SHIFT 0x9
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_RX_PRESET_HINT_DEV0__SHIFT 0xb
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_TX_PRESET_DEV0__SHIFT 0xe
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_RX_PRESET_HINT_DEV0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_TX_PRESET_DEV0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DEV0__SHIFT 0x19
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DN_DEV0__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_ATOMIC_EN_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PMC_DSI_DN_DEV0__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_LINK_BW_NOTIFICATION_CAP_DN_EN_DEV0_MASK 0x00000001L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DEV0_MASK 0x00000002L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_LTR_EN_DN_DEV0_MASK 0x00000004L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_MAX_PAYLOAD_SUPPORT_DEV0_MASK 0x00000038L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_MSI_EN_DN_DEV0_MASK 0x00000040L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_MSTCPL_TIMEOUT_EN_DEV0_MASK 0x00000080L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_NO_SOFT_RESET_DN_DEV0_MASK 0x00000100L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_OBFF_SUPPORTED_DEV0_MASK 0x00000600L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_RX_PRESET_HINT_DEV0_MASK 0x00003800L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_DOWNSTREAM_PORT_TX_PRESET_DEV0_MASK 0x0003C000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_RX_PRESET_HINT_DEV0_MASK 0x001C0000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PCIE_LANE_EQUALIZATION_CNTL_UPSTREAM_PORT_TX_PRESET_DEV0_MASK 0x01E00000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DEV0_MASK 0x06000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PM_SUPPORT_DN_DEV0_MASK 0x18000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_ATOMIC_EN_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP3__STRAP_PMC_DSI_DN_DEV0_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP4
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_0_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_1_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_2_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_3_DEV0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_0_DEV0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_1_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_2_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP4__STRAP_PWR_BUDGET_DATA_8T0_3_DEV0_MASK 0xFF000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP5
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_4_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_5_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_SYSTEM_ALLOCATED_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_64BIT_EN_DN_DEV0__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_ROUTING_EN_DEV0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_VC_EN_DN_DEV0__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DEV0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DN_DEV0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_LOCAL_DLF_SUPPORTED_DEV0__SHIFT 0x16
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_SOURCE_VALIDATION_DN_DEV0__SHIFT 0x17
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_TRANSLATION_BLOCKING_DN_DEV0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_REQUEST_REDIRECT_DN_DEV0__SHIFT 0x19
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_COMPLETION_REDIRECT_DN_DEV0__SHIFT 0x1a
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_UPSTREAM_FORWARDING_DN_DEV0__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_EGRESS_CONTROL_DN_DEV0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_DIRECT_TRANSLATED_P2P_DN_DEV0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_MSI_MAP_EN_DEV0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_SSID_EN_DEV0__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_4_DEV0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_DATA_8T0_5_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_PWR_BUDGET_SYSTEM_ALLOCATED_DEV0_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_64BIT_EN_DN_DEV0_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ATOMIC_ROUTING_EN_DEV0_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_VC_EN_DN_DEV0_MASK 0x00080000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DEV0_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_TwoVC_EN_DN_DEV0_MASK 0x00200000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_LOCAL_DLF_SUPPORTED_DEV0_MASK 0x00400000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_SOURCE_VALIDATION_DN_DEV0_MASK 0x00800000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_TRANSLATION_BLOCKING_DN_DEV0_MASK 0x01000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_REQUEST_REDIRECT_DN_DEV0_MASK 0x02000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_COMPLETION_REDIRECT_DN_DEV0_MASK 0x04000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_UPSTREAM_FORWARDING_DN_DEV0_MASK 0x08000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_P2P_EGRESS_CONTROL_DN_DEV0_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_ACS_DIRECT_TRANSLATED_P2P_DN_DEV0_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_MSI_MAP_EN_DEV0_MASK 0x40000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP5__STRAP_SSID_EN_DEV0_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP6
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_CFG_CRS_EN_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_SMN_ERR_STATUS_MASK_EN_DNS_DEV0__SHIFT 0x1
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_INTERNAL_ERR_EN_DEV0__SHIFT 0x2
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_RTM1_PRESENCE_DET_SUPPORT_DEV0__SHIFT 0x3
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_RTM2_PRESENCE_DET_SUPPORT_DEV0__SHIFT 0x4
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_COMPLETER_SUPPORTED_DEV0__SHIFT 0x5
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_REQUESTER_SUPPORTED_DEV0__SHIFT 0x6
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_VF_10BIT_TAG_REQUESTER_SUPPORTED_DEV0__SHIFT 0x7
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0__SHIFT 0xc
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_TPH_CPLR_SUPPORTED_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_MSI_EXT_MSG_DATA_CAP_DN_DEV0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_NO_COMMAND_COMPLETED_SUPPORTED_DEV0__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_GEN5_COMPLIANCE_DEV0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_TARGET_LINK_SPEED_DEV0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_CFG_CRS_EN_DEV0_MASK 0x00000001L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_SMN_ERR_STATUS_MASK_EN_DNS_DEV0_MASK 0x00000002L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_INTERNAL_ERR_EN_DEV0_MASK 0x00000004L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_RTM1_PRESENCE_DET_SUPPORT_DEV0_MASK 0x00000008L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_RTM2_PRESENCE_DET_SUPPORT_DEV0_MASK 0x00000010L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_COMPLETER_SUPPORTED_DEV0_MASK 0x00000020L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_10BIT_TAG_REQUESTER_SUPPORTED_DEV0_MASK 0x00000040L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_VF_10BIT_TAG_REQUESTER_SUPPORTED_DEV0_MASK 0x00000080L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0_MASK 0x00000F00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_16GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0_MASK 0x0000F000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_TPH_CPLR_SUPPORTED_DN_DEV0_MASK 0x00030000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_MSI_EXT_MSG_DATA_CAP_DN_DEV0_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_NO_COMMAND_COMPLETED_SUPPORTED_DEV0_MASK 0x00080000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_GEN5_COMPLIANCE_DEV0_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_TARGET_LINK_SPEED_DEV0_MASK 0x00E00000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_DSP_TX_PRESET_DEV0_MASK 0x0F000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP6__STRAP_PCIE_32GT_LANE_EQUALIZATION_CNTL_USP_TX_PRESET_DEV0_MASK 0xF0000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP7
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_PORT_NUMBER_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_MAJOR_REV_ID_DN_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_MINOR_REV_ID_DN_DEV0__SHIFT 0xc
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_RP_BUSNUM_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_DN_DEVNUM_DEV0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_DN_FUNCID_DEV0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_PORT_NUMBER_DEV0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_MAJOR_REV_ID_DN_DEV0_MASK 0x00000F00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_MINOR_REV_ID_DN_DEV0_MASK 0x0000F000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_RP_BUSNUM_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_DN_DEVNUM_DEV0_MASK 0x1F000000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP7__STRAP_DN_FUNCID_DEV0_MASK 0xE0000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_6_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_7_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_8_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_9_DEV0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_6_DEV0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_7_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_8_DEV0_MASK 0x00FF0000L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP8__STRAP_PWR_BUDGET_DATA_8T0_9_DEV0_MASK 0xFF000000L
+//RCC_STRAP2_RCC_DEV0_PORT_STRAP9
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_a_DEV0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_b_DEV0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP9__STRAP_VENDOR_ID_DN_DEV0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_a_DEV0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP9__STRAP_PWR_BUDGET_DATA_8T0_b_DEV0_MASK 0x0000FF00L
+#define RCC_STRAP2_RCC_DEV0_PORT_STRAP9__STRAP_VENDOR_ID_DN_DEV0_MASK 0xFFFF0000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0_MASK 0x000F0000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0_MASK 0x00F00000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK 0x0F000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP1
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_VF_DEVICE_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_SUPPORTED_PAGE_SIZE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_VF_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP1__STRAP_SRIOV_SUPPORTED_PAGE_SIZE_DEV0_F0_MASK 0xFFFF0000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP13
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_SRIOV_TOTAL_VFS_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_PIF_DEV0_F0_MASK 0x000000FFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_SUB_DEV0_F0_MASK 0x0000FF00L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_CLASS_CODE_BASE_DEV0_F0_MASK 0x00FF0000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP13__STRAP_SRIOV_TOTAL_VFS_DEV0_F0_MASK 0xFF000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP14
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP14__STRAP_VENDOR_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP14__STRAP_VENDOR_ID_DEV0_F0_MASK 0x0000FFFFL
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP15
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_RESET_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_DLUP_TIME_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_VALID_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_RESET_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_DLUP_TIME_DEV0_F0_MASK 0x00FFF000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP15__STRAP_RTR_VALID_DEV0_F0_MASK 0x01000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP16
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_FLR_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_D3HOTD0_TIME_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_FLR_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP16__STRAP_RTR_D3HOTD0_TIME_DEV0_F0_MASK 0x00FFF000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP17
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_RESET_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_VALID_DEV0_F0__SHIFT 0xc
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_FLR_TIME_DEV0_F0__SHIFT 0xd
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_RESET_TIME_DEV0_F0_MASK 0x00000FFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_VALID_DEV0_F0_MASK 0x00001000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP17__STRAP_RTR_VF_FLR_TIME_DEV0_F0_MASK 0x01FFE000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP18__STRAP_RTR_VF_D3HOTD0_TIME_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP18__STRAP_RTR_VF_D3HOTD0_TIME_DEV0_F0_MASK 0x00000FFFL
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP2
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_SRIOV_EN_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_64BAR_DIS_DEV0_F0__SHIFT 0x6
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F0__SHIFT 0x7
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F0__SHIFT 0x9
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F0__SHIFT 0xe
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_ARI_EN_DEV0_F0__SHIFT 0xf
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_AER_EN_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_ACS_EN_DEV0_F0__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_ATS_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_DPA_EN_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_DSN_EN_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_VC_EN_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PAGE_REQ_EN_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_SRIOV_EN_DEV0_F0_MASK 0x00000001L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_64BAR_DIS_DEV0_F0_MASK 0x00000040L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F0_MASK 0x00000080L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F0_MASK 0x00000100L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F0_MASK 0x00003E00L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F0_MASK 0x00004000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_ARI_EN_DEV0_F0_MASK 0x00008000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_AER_EN_DEV0_F0_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_ACS_EN_DEV0_F0_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_ATS_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_DPA_EN_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_DSN_EN_DEV0_F0_MASK 0x00400000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_VC_EN_DEV0_F0_MASK 0x00800000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F0_MASK 0x07000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PAGE_REQ_EN_DEV0_F0_MASK 0x08000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP3
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_SUBSYS_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_PWR_EN_DEV0_F0__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_TABLE_BIR_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_PMC_DSI_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F0__SHIFT 0x1a
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_VF_RESIZE_BAR_EN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_CLK_PM_EN_DEV0_F0__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_RTR_EN_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_SUBSYS_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F0_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_PWR_EN_DEV0_F0_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F0_MASK 0x00080000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_MSIX_TABLE_BIR_DEV0_F0_MASK 0x00E00000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_PMC_DSI_DEV0_F0_MASK 0x01000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F0_MASK 0x04000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F0_MASK 0x08000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_VF_RESIZE_BAR_EN_DEV0_F0_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_CLK_PM_EN_DEV0_F0_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F0_MASK 0x40000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP3__STRAP_RTR_EN_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP4
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_RESERVED_STRAP4_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_DOE_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_EN_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_FLR_EN_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_PME_SUPPORT_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F0__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F0__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_RESERVED_STRAP4_DEV0_F0_MASK 0x000003FFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_DOE_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_ATOMIC_EN_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_FLR_EN_DEV0_F0_MASK 0x00400000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_PME_SUPPORT_DEV0_F0_MASK 0x0F800000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F0_MASK 0x70000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F0_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP5
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP5__STRAP_AUX_CURRENT_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP5__STRAP_AUX_CURRENT_DEV0_F0_MASK 0x38000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F0_MASK 0x40000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP8
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_APER_SIZE_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_BAR_DIS_DEV0_F0__SHIFT 0x3
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_ROM_AP_SIZE_DEV0_F0__SHIFT 0x4
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_IO_BAR_DIS_DEV0_F0__SHIFT 0x7
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_LFB_ERRMSG_EN_DEV0_F0__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_MEM_AP_SIZE_DEV0_F0__SHIFT 0x9
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_REG_AP_SIZE_DEV0_F0__SHIFT 0xd
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_DOORBELL_APER_SIZE_DEV0_F0__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MEM_AP_SIZE_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_REG_AP_SIZE_DEV0_F0__SHIFT 0x17
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VGA_DIS_DEV0_F0__SHIFT 0x1a
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MSI_MULTI_CAP_DEV0_F0__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_SRIOV_VF_MAPPING_MODE_DEV0_F0__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_APER_SIZE_DEV0_F0_MASK 0x00000007L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_DOORBELL_BAR_DIS_DEV0_F0_MASK 0x00000008L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_ROM_AP_SIZE_DEV0_F0_MASK 0x00000070L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_IO_BAR_DIS_DEV0_F0_MASK 0x00000080L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_LFB_ERRMSG_EN_DEV0_F0_MASK 0x00000100L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_MEM_AP_SIZE_DEV0_F0_MASK 0x00001E00L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_REG_AP_SIZE_DEV0_F0_MASK 0x0000E000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_DOORBELL_APER_SIZE_DEV0_F0_MASK 0x00070000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MEM_AP_SIZE_DEV0_F0_MASK 0x00780000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_REG_AP_SIZE_DEV0_F0_MASK 0x03800000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VGA_DIS_DEV0_F0_MASK 0x04000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_VF_MSI_MULTI_CAP_DEV0_F0_MASK 0x38000000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP8__STRAP_SRIOV_VF_MAPPING_MODE_DEV0_F0_MASK 0xC0000000L
+//RCC_STRAP2_RCC_DEV0_EPF0_STRAP9
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_OUTSTAND_PAGE_REQ_CAP_DEV0_F0__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_BAR_COMPLIANCE_EN_DEV0_F0__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_NBIF_ROM_BAR_DIS_CHICKEN_DEV0_F0__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_VF_REG_PROT_DIS_DEV0_F0__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_FB_ALWAYS_ON_DEV0_F0__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_FB_CPL_TYPE_SEL_DEV0_F0__SHIFT 0x16
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_GPUIOV_VSEC_REV_DEV0_F0__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_OUTSTAND_PAGE_REQ_CAP_DEV0_F0_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_BAR_COMPLIANCE_EN_DEV0_F0_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_NBIF_ROM_BAR_DIS_CHICKEN_DEV0_F0_MASK 0x00080000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_VF_REG_PROT_DIS_DEV0_F0_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_FB_ALWAYS_ON_DEV0_F0_MASK 0x00200000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_FB_CPL_TYPE_SEL_DEV0_F0_MASK 0x00C00000L
+#define RCC_STRAP2_RCC_DEV0_EPF0_STRAP9__STRAP_GPUIOV_VSEC_REV_DEV0_F0_MASK 0x0F000000L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP0
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_DEVICE_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_MINOR_REV_ID_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_FUNC_EN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_D1_SUPPORT_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_D2_SUPPORT_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_DEVICE_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F1_MASK 0x000F0000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_MINOR_REV_ID_DEV0_F1_MASK 0x00F00000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_FUNC_EN_DEV0_F1_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_D1_SUPPORT_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP0__STRAP_D2_SUPPORT_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP2
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F1__SHIFT 0x7
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F1__SHIFT 0x8
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F1__SHIFT 0x9
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F1__SHIFT 0xe
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_AER_EN_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_ACS_EN_DEV0_F1__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_ATS_EN_DEV0_F1__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_DPA_EN_DEV0_F1__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_DSN_EN_DEV0_F1__SHIFT 0x16
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F1__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F1_MASK 0x00000080L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_RESIZE_BAR_EN_DEV0_F1_MASK 0x00000100L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_MAX_PASID_WIDTH_DEV0_F1_MASK 0x00003E00L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_PERVECTOR_MASK_CAP_DEV0_F1_MASK 0x00004000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_AER_EN_DEV0_F1_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_ACS_EN_DEV0_F1_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_ATS_EN_DEV0_F1_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_CPL_ABORT_ERR_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_DPA_EN_DEV0_F1_MASK 0x00200000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_DSN_EN_DEV0_F1_MASK 0x00400000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_MSI_MULTI_CAP_DEV0_F1_MASK 0x07000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EN_DEV0_F1_MASK 0x10000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_EXE_PERMISSION_SUPPORTED_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP2__STRAP_PASID_PRIV_MODE_SUPPORTED_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP20
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP21
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP3
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_SUBSYS_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F1__SHIFT 0x10
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_PWR_EN_DEV0_F1__SHIFT 0x11
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_EN_DEV0_F1__SHIFT 0x12
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F1__SHIFT 0x13
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_MSIX_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_PMC_DSI_DEV0_F1__SHIFT 0x18
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F1__SHIFT 0x1a
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F1__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_CLK_PM_EN_DEV0_F1__SHIFT 0x1d
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_RTR_EN_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_SUBSYS_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_POISONED_ADVISORY_NONFATAL_DEV0_F1_MASK 0x00010000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_PWR_EN_DEV0_F1_MASK 0x00020000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_EN_DEV0_F1_MASK 0x00040000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_MSI_CLR_PENDING_EN_DEV0_F1_MASK 0x00080000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_MSIX_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_PMC_DSI_DEV0_F1_MASK 0x01000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_ALL_MSI_EVENT_SUPPORT_EN_DEV0_F1_MASK 0x04000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_SMN_ERR_STATUS_MASK_EN_EP_DEV0_F1_MASK 0x08000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_CLK_PM_EN_DEV0_F1_MASK 0x20000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_TRUE_PM_STATUS_EN_DEV0_F1_MASK 0x40000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP3__STRAP_RTR_EN_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP4
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F1__SHIFT 0x14
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_EN_DEV0_F1__SHIFT 0x15
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_FLR_EN_DEV0_F1__SHIFT 0x16
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_PME_SUPPORT_DEV0_F1__SHIFT 0x17
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F1__SHIFT 0x1c
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F1__SHIFT 0x1f
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F1_MASK 0x00100000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_ATOMIC_EN_DEV0_F1_MASK 0x00200000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_FLR_EN_DEV0_F1_MASK 0x00400000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_PME_SUPPORT_DEV0_F1_MASK 0x0F800000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F1_MASK 0x70000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F1_MASK 0x80000000L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP5
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F1__SHIFT 0x0
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP5__STRAP_AUX_CURRENT_DEV0_F1__SHIFT 0x1b
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F1__SHIFT 0x1e
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP5__STRAP_SUBSYS_VEN_ID_DEV0_F1_MASK 0x0000FFFFL
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP5__STRAP_AUX_CURRENT_DEV0_F1_MASK 0x38000000L
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP5__STRAP_MSI_EXT_MSG_DATA_CAP_DEV0_F1_MASK 0x40000000L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP6
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP6__STRAP_APER0_64BAR_EN_DEV0_F1__SHIFT 0x2
+#define RCC_STRAP2_RCC_DEV0_EPF1_STRAP6__STRAP_APER0_64BAR_EN_DEV0_F1_MASK 0x00000004L
+//RCC_STRAP2_RCC_DEV0_EPF1_STRAP7
+
+
+// addressBlock: nbif_gdc_dma_sion_SIONDEC
+//GDC_DMA_SION_CL0_RdRsp_BurstTarget_REG0
+#define GDC_DMA_SION_CL0_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL0_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_RdRsp_BurstTarget_REG1
+#define GDC_DMA_SION_CL0_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL0_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_RdRsp_TimeSlot_REG0
+#define GDC_DMA_SION_CL0_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL0_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_RdRsp_TimeSlot_REG1
+#define GDC_DMA_SION_CL0_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL0_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_WrRsp_BurstTarget_REG0
+#define GDC_DMA_SION_CL0_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL0_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_WrRsp_BurstTarget_REG1
+#define GDC_DMA_SION_CL0_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL0_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_WrRsp_TimeSlot_REG0
+#define GDC_DMA_SION_CL0_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL0_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_WrRsp_TimeSlot_REG1
+#define GDC_DMA_SION_CL0_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL0_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_Req_BurstTarget_REG0
+#define GDC_DMA_SION_CL0_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL0_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_Req_BurstTarget_REG1
+#define GDC_DMA_SION_CL0_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL0_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_Req_TimeSlot_REG0
+#define GDC_DMA_SION_CL0_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL0_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_Req_TimeSlot_REG1
+#define GDC_DMA_SION_CL0_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL0_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_ReqPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL0_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL0_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_ReqPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL0_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL0_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_DataPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL0_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL0_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_DataPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL0_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL0_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_RdRspPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL0_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL0_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_RdRspPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL0_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL0_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_WrRspPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL0_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL0_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL0_WrRspPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL0_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL0_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_RdRsp_BurstTarget_REG0
+#define GDC_DMA_SION_CL1_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL1_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_RdRsp_BurstTarget_REG1
+#define GDC_DMA_SION_CL1_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL1_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_RdRsp_TimeSlot_REG0
+#define GDC_DMA_SION_CL1_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL1_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_RdRsp_TimeSlot_REG1
+#define GDC_DMA_SION_CL1_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL1_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_WrRsp_BurstTarget_REG0
+#define GDC_DMA_SION_CL1_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL1_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_WrRsp_BurstTarget_REG1
+#define GDC_DMA_SION_CL1_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL1_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_WrRsp_TimeSlot_REG0
+#define GDC_DMA_SION_CL1_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL1_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_WrRsp_TimeSlot_REG1
+#define GDC_DMA_SION_CL1_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL1_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_Req_BurstTarget_REG0
+#define GDC_DMA_SION_CL1_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL1_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_Req_BurstTarget_REG1
+#define GDC_DMA_SION_CL1_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL1_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_Req_TimeSlot_REG0
+#define GDC_DMA_SION_CL1_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL1_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_Req_TimeSlot_REG1
+#define GDC_DMA_SION_CL1_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL1_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_ReqPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL1_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL1_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_ReqPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL1_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL1_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_DataPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL1_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL1_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_DataPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL1_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL1_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_RdRspPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL1_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL1_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_RdRspPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL1_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL1_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_WrRspPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL1_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL1_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL1_WrRspPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL1_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL1_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_RdRsp_BurstTarget_REG0
+#define GDC_DMA_SION_CL2_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL2_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_RdRsp_BurstTarget_REG1
+#define GDC_DMA_SION_CL2_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL2_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_RdRsp_TimeSlot_REG0
+#define GDC_DMA_SION_CL2_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL2_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_RdRsp_TimeSlot_REG1
+#define GDC_DMA_SION_CL2_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL2_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_WrRsp_BurstTarget_REG0
+#define GDC_DMA_SION_CL2_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL2_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_WrRsp_BurstTarget_REG1
+#define GDC_DMA_SION_CL2_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL2_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_WrRsp_TimeSlot_REG0
+#define GDC_DMA_SION_CL2_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL2_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_WrRsp_TimeSlot_REG1
+#define GDC_DMA_SION_CL2_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL2_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_Req_BurstTarget_REG0
+#define GDC_DMA_SION_CL2_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL2_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_Req_BurstTarget_REG1
+#define GDC_DMA_SION_CL2_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL2_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_Req_TimeSlot_REG0
+#define GDC_DMA_SION_CL2_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL2_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_Req_TimeSlot_REG1
+#define GDC_DMA_SION_CL2_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL2_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_ReqPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL2_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL2_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_ReqPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL2_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL2_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_DataPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL2_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL2_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_DataPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL2_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL2_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_RdRspPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL2_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL2_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_RdRspPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL2_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL2_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_WrRspPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL2_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL2_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL2_WrRspPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL2_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL2_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_RdRsp_BurstTarget_REG0
+#define GDC_DMA_SION_CL3_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL3_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_RdRsp_BurstTarget_REG1
+#define GDC_DMA_SION_CL3_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL3_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_RdRsp_TimeSlot_REG0
+#define GDC_DMA_SION_CL3_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL3_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_RdRsp_TimeSlot_REG1
+#define GDC_DMA_SION_CL3_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL3_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_WrRsp_BurstTarget_REG0
+#define GDC_DMA_SION_CL3_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL3_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_WrRsp_BurstTarget_REG1
+#define GDC_DMA_SION_CL3_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL3_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_WrRsp_TimeSlot_REG0
+#define GDC_DMA_SION_CL3_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL3_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_WrRsp_TimeSlot_REG1
+#define GDC_DMA_SION_CL3_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL3_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_Req_BurstTarget_REG0
+#define GDC_DMA_SION_CL3_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL3_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_Req_BurstTarget_REG1
+#define GDC_DMA_SION_CL3_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL3_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_Req_TimeSlot_REG0
+#define GDC_DMA_SION_CL3_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL3_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_Req_TimeSlot_REG1
+#define GDC_DMA_SION_CL3_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL3_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_ReqPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL3_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL3_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_ReqPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL3_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL3_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_DataPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL3_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL3_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_DataPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL3_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL3_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_RdRspPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL3_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL3_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_RdRspPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL3_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL3_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_WrRspPoolCredit_Alloc_REG0
+#define GDC_DMA_SION_CL3_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_DMA_SION_CL3_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CL3_WrRspPoolCredit_Alloc_REG1
+#define GDC_DMA_SION_CL3_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_DMA_SION_CL3_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_DMA_SION_CNTL_REG0
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK0__SHIFT 0x0
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK1__SHIFT 0x1
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK2__SHIFT 0x2
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK3__SHIFT 0x3
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK4__SHIFT 0x4
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK5__SHIFT 0x5
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK6__SHIFT 0x6
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK7__SHIFT 0x7
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK8__SHIFT 0x8
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK9__SHIFT 0x9
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK0__SHIFT 0xa
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK1__SHIFT 0xb
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK2__SHIFT 0xc
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK3__SHIFT 0xd
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK4__SHIFT 0xe
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK5__SHIFT 0xf
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK6__SHIFT 0x10
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK7__SHIFT 0x11
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK8__SHIFT 0x12
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK9__SHIFT 0x13
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK0_MASK 0x00000001L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK1_MASK 0x00000002L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK2_MASK 0x00000004L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK3_MASK 0x00000008L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK4_MASK 0x00000010L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK5_MASK 0x00000020L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK6_MASK 0x00000040L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK7_MASK 0x00000080L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK8_MASK 0x00000100L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK9_MASK 0x00000200L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK0_MASK 0x00000400L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK1_MASK 0x00000800L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK2_MASK 0x00001000L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK3_MASK 0x00002000L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK4_MASK 0x00004000L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK5_MASK 0x00008000L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK6_MASK 0x00010000L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK7_MASK 0x00020000L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK8_MASK 0x00040000L
+#define GDC_DMA_SION_CNTL_REG0__GDC_DMA_SION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK9_MASK 0x00080000L
+//GDC_DMA_SION_CNTL_REG1
+#define GDC_DMA_SION_CNTL_REG1__GDC_DMA_SION_LIVELOCK_WATCHDOG_THRESHOLD__SHIFT 0x0
+#define GDC_DMA_SION_CNTL_REG1__GDC_DMA_SION_CG_OFF_HYSTERESIS__SHIFT 0x8
+#define GDC_DMA_SION_CNTL_REG1__GDC_DMA_SION_LIVELOCK_WATCHDOG_THRESHOLD_MASK 0x000000FFL
+#define GDC_DMA_SION_CNTL_REG1__GDC_DMA_SION_CG_OFF_HYSTERESIS_MASK 0x0000FF00L
+
+
+// addressBlock: nbif_gdc_hst_sion_SIONDEC
+//GDC_HST_SION_CL0_RdRsp_BurstTarget_REG0
+#define GDC_HST_SION_CL0_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL0_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_RdRsp_BurstTarget_REG1
+#define GDC_HST_SION_CL0_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL0_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_RdRsp_TimeSlot_REG0
+#define GDC_HST_SION_CL0_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL0_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_RdRsp_TimeSlot_REG1
+#define GDC_HST_SION_CL0_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL0_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_WrRsp_BurstTarget_REG0
+#define GDC_HST_SION_CL0_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL0_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_WrRsp_BurstTarget_REG1
+#define GDC_HST_SION_CL0_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL0_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_WrRsp_TimeSlot_REG0
+#define GDC_HST_SION_CL0_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL0_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_WrRsp_TimeSlot_REG1
+#define GDC_HST_SION_CL0_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL0_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_Req_BurstTarget_REG0
+#define GDC_HST_SION_CL0_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL0_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_Req_BurstTarget_REG1
+#define GDC_HST_SION_CL0_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL0_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_Req_TimeSlot_REG0
+#define GDC_HST_SION_CL0_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL0_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_Req_TimeSlot_REG1
+#define GDC_HST_SION_CL0_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL0_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_ReqPoolCredit_Alloc_REG0
+#define GDC_HST_SION_CL0_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL0_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_ReqPoolCredit_Alloc_REG1
+#define GDC_HST_SION_CL0_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL0_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_DataPoolCredit_Alloc_REG0
+#define GDC_HST_SION_CL0_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL0_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_DataPoolCredit_Alloc_REG1
+#define GDC_HST_SION_CL0_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL0_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_RdRspPoolCredit_Alloc_REG0
+#define GDC_HST_SION_CL0_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL0_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_RdRspPoolCredit_Alloc_REG1
+#define GDC_HST_SION_CL0_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL0_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_WrRspPoolCredit_Alloc_REG0
+#define GDC_HST_SION_CL0_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL0_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL0_WrRspPoolCredit_Alloc_REG1
+#define GDC_HST_SION_CL0_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL0_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_RdRsp_BurstTarget_REG0
+#define GDC_HST_SION_CL1_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL1_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_RdRsp_BurstTarget_REG1
+#define GDC_HST_SION_CL1_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL1_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_RdRsp_TimeSlot_REG0
+#define GDC_HST_SION_CL1_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL1_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_RdRsp_TimeSlot_REG1
+#define GDC_HST_SION_CL1_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL1_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_WrRsp_BurstTarget_REG0
+#define GDC_HST_SION_CL1_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL1_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_WrRsp_BurstTarget_REG1
+#define GDC_HST_SION_CL1_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL1_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_WrRsp_TimeSlot_REG0
+#define GDC_HST_SION_CL1_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL1_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_WrRsp_TimeSlot_REG1
+#define GDC_HST_SION_CL1_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL1_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_Req_BurstTarget_REG0
+#define GDC_HST_SION_CL1_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL1_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_Req_BurstTarget_REG1
+#define GDC_HST_SION_CL1_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL1_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_Req_TimeSlot_REG0
+#define GDC_HST_SION_CL1_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL1_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_Req_TimeSlot_REG1
+#define GDC_HST_SION_CL1_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL1_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_ReqPoolCredit_Alloc_REG0
+#define GDC_HST_SION_CL1_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL1_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_ReqPoolCredit_Alloc_REG1
+#define GDC_HST_SION_CL1_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL1_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_DataPoolCredit_Alloc_REG0
+#define GDC_HST_SION_CL1_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL1_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_DataPoolCredit_Alloc_REG1
+#define GDC_HST_SION_CL1_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL1_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_RdRspPoolCredit_Alloc_REG0
+#define GDC_HST_SION_CL1_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL1_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_RdRspPoolCredit_Alloc_REG1
+#define GDC_HST_SION_CL1_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL1_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_WrRspPoolCredit_Alloc_REG0
+#define GDC_HST_SION_CL1_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0
+#define GDC_HST_SION_CL1_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CL1_WrRspPoolCredit_Alloc_REG1
+#define GDC_HST_SION_CL1_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0
+#define GDC_HST_SION_CL1_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL
+//GDC_HST_SION_CNTL_REG0
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK0__SHIFT 0x0
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK1__SHIFT 0x1
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK2__SHIFT 0x2
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK3__SHIFT 0x3
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK4__SHIFT 0x4
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK5__SHIFT 0x5
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK6__SHIFT 0x6
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK7__SHIFT 0x7
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK8__SHIFT 0x8
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK9__SHIFT 0x9
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK0__SHIFT 0xa
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK1__SHIFT 0xb
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK2__SHIFT 0xc
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK3__SHIFT 0xd
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK4__SHIFT 0xe
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK5__SHIFT 0xf
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK6__SHIFT 0x10
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK7__SHIFT 0x11
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK8__SHIFT 0x12
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK9__SHIFT 0x13
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK0_MASK 0x00000001L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK1_MASK 0x00000002L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK2_MASK 0x00000004L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK3_MASK 0x00000008L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK4_MASK 0x00000010L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK5_MASK 0x00000020L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK6_MASK 0x00000040L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK7_MASK 0x00000080L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK8_MASK 0x00000100L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK9_MASK 0x00000200L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK0_MASK 0x00000400L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK1_MASK 0x00000800L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK2_MASK 0x00001000L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK3_MASK 0x00002000L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK4_MASK 0x00004000L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK5_MASK 0x00008000L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK6_MASK 0x00010000L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK7_MASK 0x00020000L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK8_MASK 0x00040000L
+#define GDC_HST_SION_CNTL_REG0__GDC_HSTSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK9_MASK 0x00080000L
+//GDC_HST_SION_CNTL_REG1
+#define GDC_HST_SION_CNTL_REG1__GDC_HSTSION_LIVELOCK_WATCHDOG_THRESHOLD__SHIFT 0x0
+#define GDC_HST_SION_CNTL_REG1__GDC_HSTSION_CG_OFF_HYSTERESIS__SHIFT 0x8
+#define GDC_HST_SION_CNTL_REG1__GDC_HSTSION_LIVE_ACTIVE__SHIFT 0x10
+#define GDC_HST_SION_CNTL_REG1__GDC_HSTSION_LIVELOCK_WATCHDOG_THRESHOLD_MASK 0x000000FFL
+#define GDC_HST_SION_CNTL_REG1__GDC_HSTSION_CG_OFF_HYSTERESIS_MASK 0x0000FF00L
+#define GDC_HST_SION_CNTL_REG1__GDC_HSTSION_LIVE_ACTIVE_MASK 0xFFFF0000L
+
+
+// addressBlock: nbif_gdc_GDCDEC
+//GDC1_SHUB_REGS_IF_CTL
+#define GDC1_SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS__SHIFT 0x0
+#define GDC1_SHUB_REGS_IF_CTL__SHUB_REGS_VF_PROTECTION_DIS__SHIFT 0x1
+#define GDC1_SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS_MASK 0x00000001L
+#define GDC1_SHUB_REGS_IF_CTL__SHUB_REGS_VF_PROTECTION_DIS_MASK 0x00000002L
+//GDC1_A2S_QUEUE_FIFO_ARB_CNTL
+#define GDC1_A2S_QUEUE_FIFO_ARB_CNTL__WR_QUEUE_FIFO_POP_ARB_PRIORITY__SHIFT 0x0
+#define GDC1_A2S_QUEUE_FIFO_ARB_CNTL__RD_QUEUE_FIFO_POP_ARB_PRIORITY__SHIFT 0xa
+#define GDC1_A2S_QUEUE_FIFO_ARB_CNTL__WR_QUEUE_FIFO_POP_ARB_MODE__SHIFT 0x14
+#define GDC1_A2S_QUEUE_FIFO_ARB_CNTL__RD_QUEUE_FIFO_POP_ARB_MODE__SHIFT 0x15
+#define GDC1_A2S_QUEUE_FIFO_ARB_CNTL__WR_QUEUE_FIFO_POP_ARB_PRIORITY_MASK 0x000003FFL
+#define GDC1_A2S_QUEUE_FIFO_ARB_CNTL__RD_QUEUE_FIFO_POP_ARB_PRIORITY_MASK 0x000FFC00L
+#define GDC1_A2S_QUEUE_FIFO_ARB_CNTL__WR_QUEUE_FIFO_POP_ARB_MODE_MASK 0x00100000L
+#define GDC1_A2S_QUEUE_FIFO_ARB_CNTL__RD_QUEUE_FIFO_POP_ARB_MODE_MASK 0x00200000L
+//GDC1_NGDC_MGCG_CTRL
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_EN__SHIFT 0x0
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_MODE__SHIFT 0x1
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS__SHIFT 0x2
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS__SHIFT 0xa
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS__SHIFT 0xb
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS__SHIFT 0xc
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS__SHIFT 0xd
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_DBG_DIS__SHIFT 0xe
+#define GDC1_NGDC_MGCG_CTRL__NGDC_SRAM_FGCG_EN__SHIFT 0xf
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_EN_MASK 0x00000001L
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_MODE_MASK 0x00000002L
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS_MASK 0x000003FCL
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS_MASK 0x00000400L
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS_MASK 0x00000800L
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS_MASK 0x00001000L
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS_MASK 0x00002000L
+#define GDC1_NGDC_MGCG_CTRL__NGDC_MGCG_DBG_DIS_MASK 0x00004000L
+#define GDC1_NGDC_MGCG_CTRL__NGDC_SRAM_FGCG_EN_MASK 0x00008000L
+//GDC1_S2A_MISC_CNTL
+#define GDC1_S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS__SHIFT 0x3
+#define GDC1_S2A_MISC_CNTL__ATM_ARB_MODE__SHIFT 0x8
+#define GDC1_S2A_MISC_CNTL__RB_ARB_MODE__SHIFT 0xa
+#define GDC1_S2A_MISC_CNTL__HSTR_ARB_MODE__SHIFT 0xc
+#define GDC1_S2A_MISC_CNTL__HDP_PERF_ENH_DIS__SHIFT 0xf
+#define GDC1_S2A_MISC_CNTL__WRSP_ARB_MODE__SHIFT 0x10
+#define GDC1_S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS_MASK 0x00000008L
+#define GDC1_S2A_MISC_CNTL__ATM_ARB_MODE_MASK 0x00000300L
+#define GDC1_S2A_MISC_CNTL__RB_ARB_MODE_MASK 0x00000C00L
+#define GDC1_S2A_MISC_CNTL__HSTR_ARB_MODE_MASK 0x00003000L
+#define GDC1_S2A_MISC_CNTL__HDP_PERF_ENH_DIS_MASK 0x00008000L
+#define GDC1_S2A_MISC_CNTL__WRSP_ARB_MODE_MASK 0x000F0000L
+//GDC1_NGDC_EARLY_WAKEUP_CTRL
+#define GDC1_NGDC_EARLY_WAKEUP_CTRL__NGDC_EARLY_WAKEUP_BY_CLIENT_ACTIVE__SHIFT 0x0
+#define GDC1_NGDC_EARLY_WAKEUP_CTRL__NGDC_EARLY_WAKEUP_BY_CLIENT_DS_EXIT__SHIFT 0x1
+#define GDC1_NGDC_EARLY_WAKEUP_CTRL__NGDC_EARLY_WAKEUP_ALLOW_AER_ACTIVE__SHIFT 0x2
+#define GDC1_NGDC_EARLY_WAKEUP_CTRL__NGDC_EARLY_WAKEUP_BY_CLIENT_ACTIVE_MASK 0x00000001L
+#define GDC1_NGDC_EARLY_WAKEUP_CTRL__NGDC_EARLY_WAKEUP_BY_CLIENT_DS_EXIT_MASK 0x00000002L
+#define GDC1_NGDC_EARLY_WAKEUP_CTRL__NGDC_EARLY_WAKEUP_ALLOW_AER_ACTIVE_MASK 0x00000004L
+//GDC1_NGDC_PG_MISC_CTRL
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_ENDP_D3_ONLY__SHIFT 0xa
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM1__SHIFT 0xd
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_DS_ALLOW_DIS__SHIFT 0xe
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM2__SHIFT 0x10
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_CFG_REFCLK_CYCLE_FOR_200NS__SHIFT 0x18
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_CFG_PG_EXIT_OVERRIDE__SHIFT 0x1f
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_ENDP_D3_ONLY_MASK 0x00000400L
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM1_MASK 0x00002000L
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_DS_ALLOW_DIS_MASK 0x00004000L
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_PG_CLK_PERM2_MASK 0x00010000L
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_CFG_REFCLK_CYCLE_FOR_200NS_MASK 0x3F000000L
+#define GDC1_NGDC_PG_MISC_CTRL__NGDC_CFG_PG_EXIT_OVERRIDE_MASK 0x80000000L
+//GDC1_NGDC_PGMST_CTRL
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_PG_HYSTERESIS__SHIFT 0x0
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_PG_EN__SHIFT 0x8
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_IDLENESS_COUNT_EN__SHIFT 0xa
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_FW_PG_EXIT_EN__SHIFT 0xe
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_PG_HYSTERESIS_MASK 0x000000FFL
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_PG_EN_MASK 0x00000100L
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_IDLENESS_COUNT_EN_MASK 0x00003C00L
+#define GDC1_NGDC_PGMST_CTRL__NGDC_CFG_FW_PG_EXIT_EN_MASK 0x0000C000L
+//GDC1_NGDC_PGSLV_CTRL
+#define GDC1_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_0_IDLE_HYSTERESIS__SHIFT 0x0
+#define GDC1_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_1_IDLE_HYSTERESIS__SHIFT 0x5
+#define GDC1_NGDC_PGSLV_CTRL__NGDC_CFG_GDCCLK_IDLE_HYSTERESIS__SHIFT 0xa
+#define GDC1_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_0_IDLE_HYSTERESIS_MASK 0x0000001FL
+#define GDC1_NGDC_PGSLV_CTRL__NGDC_CFG_SHUBCLK_1_IDLE_HYSTERESIS_MASK 0x000003E0L
+#define GDC1_NGDC_PGSLV_CTRL__NGDC_CFG_GDCCLK_IDLE_HYSTERESIS_MASK 0x00007C00L
+//GDC1_ATDMA_MISC_CNTL
+#define GDC1_ATDMA_MISC_CNTL__ATDMA_WRR_ARB_MODE__SHIFT 0x0
+#define GDC1_ATDMA_MISC_CNTL__ATDMA_MISC_CNTL_INSERT_RD_ON_2ND_WDAT_EN__SHIFT 0x1
+#define GDC1_ATDMA_MISC_CNTL__ATDMA_RDRSP_ARB_MODE__SHIFT 0x2
+#define GDC1_ATDMA_MISC_CNTL__ATDMA_WRR_VC6_WEIGHT__SHIFT 0x8
+#define GDC1_ATDMA_MISC_CNTL__ATDMA_WRR_VC0_WEIGHT__SHIFT 0x10
+#define GDC1_ATDMA_MISC_CNTL__ATDMA_WRR_VC1_WEIGHT__SHIFT 0x18
+#define GDC1_ATDMA_MISC_CNTL__ATDMA_WRR_ARB_MODE_MASK 0x00000001L
+#define GDC1_ATDMA_MISC_CNTL__ATDMA_MISC_CNTL_INSERT_RD_ON_2ND_WDAT_EN_MASK 0x00000002L
+#define GDC1_ATDMA_MISC_CNTL__ATDMA_RDRSP_ARB_MODE_MASK 0x0000000CL
+#define GDC1_ATDMA_MISC_CNTL__ATDMA_WRR_VC6_WEIGHT_MASK 0x0000FF00L
+#define GDC1_ATDMA_MISC_CNTL__ATDMA_WRR_VC0_WEIGHT_MASK 0x00FF0000L
+#define GDC1_ATDMA_MISC_CNTL__ATDMA_WRR_VC1_WEIGHT_MASK 0xFF000000L
+
+
+// addressBlock: nbif_gdc_ras_gdc_ras_regblk
+//GDCSOC_ERR_RSP_CNTL
+#define GDCSOC_ERR_RSP_CNTL__GDCSOC_RDRSP_BYPASS__SHIFT 0x0
+#define GDCSOC_ERR_RSP_CNTL__GDCSOC_RDRSP_ACCUM_SEL__SHIFT 0x1
+#define GDCSOC_ERR_RSP_CNTL__GDCSOC_RDRSP_FORCE_EN__SHIFT 0x2
+#define GDCSOC_ERR_RSP_CNTL__GDCSOC_RDRSP_FORCE_DATA__SHIFT 0x3
+#define GDCSOC_ERR_RSP_CNTL__GDCSOC_RDRSP_STATUS_ACCUM_EN__SHIFT 0x4
+#define GDCSOC_ERR_RSP_CNTL__GDCSOC_RDRSP_DATASTATUS_ACCUM_EN__SHIFT 0x5
+#define GDCSOC_ERR_RSP_CNTL__GDCSOC_RDRSP_BYPASS_MASK 0x00000001L
+#define GDCSOC_ERR_RSP_CNTL__GDCSOC_RDRSP_ACCUM_SEL_MASK 0x00000002L
+#define GDCSOC_ERR_RSP_CNTL__GDCSOC_RDRSP_FORCE_EN_MASK 0x00000004L
+#define GDCSOC_ERR_RSP_CNTL__GDCSOC_RDRSP_FORCE_DATA_MASK 0x00000008L
+#define GDCSOC_ERR_RSP_CNTL__GDCSOC_RDRSP_STATUS_ACCUM_EN_MASK 0x00000010L
+#define GDCSOC_ERR_RSP_CNTL__GDCSOC_RDRSP_DATASTATUS_ACCUM_EN_MASK 0x00000020L
+//GDCSOC_RAS_CENTRAL_STATUS
+#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_L2C_EgStall_det__SHIFT 0x0
+#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_L2C_ErrEvent_det__SHIFT 0x1
+#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_C2L_EgStall_det__SHIFT 0x2
+#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_C2L_ErrEvent_det__SHIFT 0x3
+#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_L2C_EgStall_det_MASK 0x00000001L
+#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_L2C_ErrEvent_det_MASK 0x00000002L
+#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_C2L_EgStall_det_MASK 0x00000004L
+#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_C2L_ErrEvent_det_MASK 0x00000008L
+//GDCSOC_RAS_LEAF0_CTRL
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_ERREVENT_EN__SHIFT 0x1
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_STALL_EN__SHIFT 0x2
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_STALL_EN__SHIFT 0x4
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_RCVERREVENT_ERREVENT_EN__SHIFT 0x5
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_RCVERREVENT_STALL_EN__SHIFT 0x6
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_ERREVENT_LOG_MCA__SHIFT 0x11
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_ERREVENT_LOG_MCA__SHIFT 0x12
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_TIMEOUT_ERREVENT_LOG_MCA__SHIFT 0x13
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_RCVERREVENT_ERREVENT_LOG_MCA__SHIFT 0x14
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_UCP_EN__SHIFT 0x15
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_STALL_EN_MASK 0x00000004L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_STALL_EN_MASK 0x00000010L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_RCVERREVENT_ERREVENT_EN_MASK 0x00000020L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_RCVERREVENT_STALL_EN_MASK 0x00000040L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_ERREVENT_LOG_MCA_MASK 0x00020000L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_ERREVENT_LOG_MCA_MASK 0x00040000L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_TIMEOUT_ERREVENT_LOG_MCA_MASK 0x00080000L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_RCVERREVENT_ERREVENT_LOG_MCA_MASK 0x00100000L
+#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_UCP_EN_MASK 0x00200000L
+//GDCSOC_RAS_LEAF1_CTRL
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_ERREVENT_EN__SHIFT 0x1
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_STALL_EN__SHIFT 0x2
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_STALL_EN__SHIFT 0x4
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_RCVERREVENT_ERREVENT_EN__SHIFT 0x5
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_RCVERREVENT_STALL_EN__SHIFT 0x6
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_ERREVENT_LOG_MCA__SHIFT 0x11
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_ERREVENT_LOG_MCA__SHIFT 0x12
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_TIMEOUT_ERREVENT_LOG_MCA__SHIFT 0x13
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_RCVERREVENT_ERREVENT_LOG_MCA__SHIFT 0x14
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_UCP_EN__SHIFT 0x15
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_STALL_EN_MASK 0x00000004L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_STALL_EN_MASK 0x00000010L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_RCVERREVENT_ERREVENT_EN_MASK 0x00000020L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_RCVERREVENT_STALL_EN_MASK 0x00000040L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_ERREVENT_LOG_MCA_MASK 0x00020000L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_ERREVENT_LOG_MCA_MASK 0x00040000L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_TIMEOUT_ERREVENT_LOG_MCA_MASK 0x00080000L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_RCVERREVENT_ERREVENT_LOG_MCA_MASK 0x00100000L
+#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_UCP_EN_MASK 0x00200000L
+//GDCSOC_RAS_LEAF2_CTRL
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_ERREVENT_EN__SHIFT 0x1
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_STALL_EN__SHIFT 0x2
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_STALL_EN__SHIFT 0x4
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_RCVERREVENT_ERREVENT_EN__SHIFT 0x5
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_RCVERREVENT_STALL_EN__SHIFT 0x6
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_INTR_EN__SHIFT 0x10
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_ERREVENT_LOG_MCA__SHIFT 0x11
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_ERREVENT_LOG_MCA__SHIFT 0x12
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_TIMEOUT_ERREVENT_LOG_MCA__SHIFT 0x13
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_RCVERREVENT_ERREVENT_LOG_MCA__SHIFT 0x14
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_UCP_EN__SHIFT 0x15
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_STALL_EN_MASK 0x00000004L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_STALL_EN_MASK 0x00000010L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_RCVERREVENT_ERREVENT_EN_MASK 0x00000020L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_RCVERREVENT_STALL_EN_MASK 0x00000040L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_INTR_EN_MASK 0x00010000L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_ERREVENT_LOG_MCA_MASK 0x00020000L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_ERREVENT_LOG_MCA_MASK 0x00040000L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_TIMEOUT_ERREVENT_LOG_MCA_MASK 0x00080000L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_RCVERREVENT_ERREVENT_LOG_MCA_MASK 0x00100000L
+#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_UCP_EN_MASK 0x00200000L
+//GDCSOC_RAS_LEAF3_CTRL
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_ERREVENT_EN__SHIFT 0x1
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_STALL_EN__SHIFT 0x2
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_STALL_EN__SHIFT 0x4
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_RCVERREVENT_ERREVENT_EN__SHIFT 0x5
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_RCVERREVENT_STALL_EN__SHIFT 0x6
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_ERREVENT_LOG_MCA__SHIFT 0x11
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_ERREVENT_LOG_MCA__SHIFT 0x12
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_TIMEOUT_ERREVENT_LOG_MCA__SHIFT 0x13
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_RCVERREVENT_ERREVENT_LOG_MCA__SHIFT 0x14
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_UCP_EN__SHIFT 0x15
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_STALL_EN_MASK 0x00000004L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_STALL_EN_MASK 0x00000010L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_RCVERREVENT_ERREVENT_EN_MASK 0x00000020L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_RCVERREVENT_STALL_EN_MASK 0x00000040L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_ERREVENT_LOG_MCA_MASK 0x00020000L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_ERREVENT_LOG_MCA_MASK 0x00040000L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_TIMEOUT_ERREVENT_LOG_MCA_MASK 0x00080000L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_RCVERREVENT_ERREVENT_LOG_MCA_MASK 0x00100000L
+#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_UCP_EN_MASK 0x00200000L
+//GDCSOC_RAS_LEAF4_CTRL
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_ERREVENT_EN__SHIFT 0x1
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_STALL_EN__SHIFT 0x2
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_STALL_EN__SHIFT 0x4
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_RCVERREVENT_ERREVENT_EN__SHIFT 0x5
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_RCVERREVENT_STALL_EN__SHIFT 0x6
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_ERREVENT_LOG_MCA__SHIFT 0x11
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_ERREVENT_LOG_MCA__SHIFT 0x12
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_TIMEOUT_ERREVENT_LOG_MCA__SHIFT 0x13
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_RCVERREVENT_ERREVENT_LOG_MCA__SHIFT 0x14
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_UCP_EN__SHIFT 0x15
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_STALL_EN_MASK 0x00000004L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_STALL_EN_MASK 0x00000010L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_RCVERREVENT_ERREVENT_EN_MASK 0x00000020L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_RCVERREVENT_STALL_EN_MASK 0x00000040L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_ERREVENT_LOG_MCA_MASK 0x00020000L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_ERREVENT_LOG_MCA_MASK 0x00040000L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_TIMEOUT_ERREVENT_LOG_MCA_MASK 0x00080000L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_RCVERREVENT_ERREVENT_LOG_MCA_MASK 0x00100000L
+#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_UCP_EN_MASK 0x00200000L
+//GDCSOC_RAS_LEAF2_MISC_CTRL
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_HSTRSP_SHUB_DROP_EN__SHIFT 0x0
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_HSTRSP_CDC_DROP_EN__SHIFT 0x1
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_IHINTR_PORT_MASK_DIS__SHIFT 0x8
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_IHINTR_TRANS_MASK_DIS__SHIFT 0x9
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_ATHUB_RAS_ACTION_DIS_DMA_REQ__SHIFT 0xb
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_ATHUB_RAS_ACTION_EN_DMA_REQ_CHAIN_CHK__SHIFT 0xc
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_ATHUB_RAS_ACTION_DIS_DMA_RSP__SHIFT 0xd
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_ATHUB_DUMMYCHAIN_REQ_UNITID_EN__SHIFT 0x10
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_ATHUB_DUMMYCHAIN_REQ_TAG_CONST_EN__SHIFT 0x11
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_HSTRSP_SHUB_DROP_EN_MASK 0x00000001L
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_HSTRSP_CDC_DROP_EN_MASK 0x00000002L
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_IHINTR_PORT_MASK_DIS_MASK 0x00000100L
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_IHINTR_TRANS_MASK_DIS_MASK 0x00000200L
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_ATHUB_RAS_ACTION_DIS_DMA_REQ_MASK 0x00000800L
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_ATHUB_RAS_ACTION_EN_DMA_REQ_CHAIN_CHK_MASK 0x00001000L
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_ATHUB_RAS_ACTION_DIS_DMA_RSP_MASK 0x00002000L
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_ATHUB_DUMMYCHAIN_REQ_UNITID_EN_MASK 0x00010000L
+#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_ATHUB_DUMMYCHAIN_REQ_TAG_CONST_EN_MASK 0x00020000L
+//GDCSOC_RAS_LEAF2_MISC_CTRL2
+#define GDCSOC_RAS_LEAF2_MISC_CTRL2__GDCSOC_RAS_LEAF2_MISC_CTRL2_ERR_EVENT_RAS_ATHUB_DUMMYCHAIN_REQ_UNITID__SHIFT 0x0
+#define GDCSOC_RAS_LEAF2_MISC_CTRL2__GDCSOC_RAS_LEAF2_MISC_CTRL2_ERR_EVENT_RAS_ATHUB_DUMMYCHAIN_REQ_TAG__SHIFT 0xb
+#define GDCSOC_RAS_LEAF2_MISC_CTRL2__GDCSOC_RAS_LEAF2_MISC_CTRL2_ERR_EVENT_RAS_ATHUB_DUMMYCHAIN_REQ_TAG_OFFSET__SHIFT 0x15
+#define GDCSOC_RAS_LEAF2_MISC_CTRL2__GDCSOC_RAS_LEAF2_MISC_CTRL2_ERR_EVENT_RAS_ATHUB_DUMMYCHAIN_REQ_UNITID_MASK 0x000007FFL
+#define GDCSOC_RAS_LEAF2_MISC_CTRL2__GDCSOC_RAS_LEAF2_MISC_CTRL2_ERR_EVENT_RAS_ATHUB_DUMMYCHAIN_REQ_TAG_MASK 0x001FF800L
+#define GDCSOC_RAS_LEAF2_MISC_CTRL2__GDCSOC_RAS_LEAF2_MISC_CTRL2_ERR_EVENT_RAS_ATHUB_DUMMYCHAIN_REQ_TAG_OFFSET_MASK 0x7FE00000L
+//GDCSOC_RAS_LEAF0_STATUS
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_RECV__SHIFT 0x0
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_POISON_ERR_DET__SHIFT 0x1
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_PARITY_ERR_DET__SHIFT 0x2
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_RECV_MASK 0x00000001L
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_POISON_ERR_DET_MASK 0x00000002L
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_PARITY_ERR_DET_MASK 0x00000004L
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L
+#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
+//GDCSOC_RAS_LEAF1_STATUS
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_RECV__SHIFT 0x0
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_POISON_ERR_DET__SHIFT 0x1
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_PARITY_ERR_DET__SHIFT 0x2
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_RECV_MASK 0x00000001L
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_POISON_ERR_DET_MASK 0x00000002L
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_PARITY_ERR_DET_MASK 0x00000004L
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L
+#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
+//GDCSOC_RAS_LEAF2_STATUS
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_RECV__SHIFT 0x0
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_POISON_ERR_DET__SHIFT 0x1
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_PARITY_ERR_DET__SHIFT 0x2
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_RECV_MASK 0x00000001L
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_POISON_ERR_DET_MASK 0x00000002L
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_PARITY_ERR_DET_MASK 0x00000004L
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L
+#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
+//GDCSOC_RAS_LEAF3_STATUS
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_RECV__SHIFT 0x0
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_POISON_ERR_DET__SHIFT 0x1
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_PARITY_ERR_DET__SHIFT 0x2
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_RECV_MASK 0x00000001L
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_POISON_ERR_DET_MASK 0x00000002L
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_PARITY_ERR_DET_MASK 0x00000004L
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L
+#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
+//GDCSOC_RAS_LEAF4_STATUS
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_RECV__SHIFT 0x0
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_POISON_ERR_DET__SHIFT 0x1
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_PARITY_ERR_DET__SHIFT 0x2
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_RECV_MASK 0x00000001L
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_POISON_ERR_DET_MASK 0x00000002L
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_PARITY_ERR_DET_MASK 0x00000004L
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L
+#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L
+
+
+// addressBlock: nbif_gdc_rst_GDCRST_DEC
+//SHUB_PF_FLR_RST
+#define SHUB_PF_FLR_RST__DEV0_PF0_FLR_RST__SHIFT 0x0
+#define SHUB_PF_FLR_RST__DEV0_PF1_FLR_RST__SHIFT 0x1
+#define SHUB_PF_FLR_RST__DEV0_PF2_FLR_RST__SHIFT 0x2
+#define SHUB_PF_FLR_RST__DEV0_PF3_FLR_RST__SHIFT 0x3
+#define SHUB_PF_FLR_RST__DEV0_PF4_FLR_RST__SHIFT 0x4
+#define SHUB_PF_FLR_RST__DEV0_PF5_FLR_RST__SHIFT 0x5
+#define SHUB_PF_FLR_RST__DEV0_PF6_FLR_RST__SHIFT 0x6
+#define SHUB_PF_FLR_RST__DEV0_PF0_FLR_RST_MASK 0x00000001L
+#define SHUB_PF_FLR_RST__DEV0_PF1_FLR_RST_MASK 0x00000002L
+#define SHUB_PF_FLR_RST__DEV0_PF2_FLR_RST_MASK 0x00000004L
+#define SHUB_PF_FLR_RST__DEV0_PF3_FLR_RST_MASK 0x00000008L
+#define SHUB_PF_FLR_RST__DEV0_PF4_FLR_RST_MASK 0x00000010L
+#define SHUB_PF_FLR_RST__DEV0_PF5_FLR_RST_MASK 0x00000020L
+#define SHUB_PF_FLR_RST__DEV0_PF6_FLR_RST_MASK 0x00000040L
+//SHUB_GFX_DRV_VPU_RST
+#define SHUB_GFX_DRV_VPU_RST__GFX_DRV_MODE1_RST__SHIFT 0x0
+#define SHUB_GFX_DRV_VPU_RST__GFX_DRV_MODE1_RST_MASK 0x00000001L
+//SHUB_LINK_RESET
+#define SHUB_LINK_RESET__LINK_P0_RESET__SHIFT 0x0
+#define SHUB_LINK_RESET__LINK_P1_RESET__SHIFT 0x1
+#define SHUB_LINK_RESET__LINK_P2_RESET__SHIFT 0x2
+#define SHUB_LINK_RESET__LINK_P3_RESET__SHIFT 0x3
+#define SHUB_LINK_RESET__LINK_P0_RESET_MASK 0x00000001L
+#define SHUB_LINK_RESET__LINK_P1_RESET_MASK 0x00000002L
+#define SHUB_LINK_RESET__LINK_P2_RESET_MASK 0x00000004L
+#define SHUB_LINK_RESET__LINK_P3_RESET_MASK 0x00000008L
+//SHUB_HARD_RST_CTRL
+#define SHUB_HARD_RST_CTRL__COR_RESET_EN__SHIFT 0x0
+#define SHUB_HARD_RST_CTRL__REG_RESET_EN__SHIFT 0x1
+#define SHUB_HARD_RST_CTRL__STY_RESET_EN__SHIFT 0x2
+#define SHUB_HARD_RST_CTRL__NIC400_RESET_EN__SHIFT 0x3
+#define SHUB_HARD_RST_CTRL__SDP_PORT_RESET_EN__SHIFT 0x4
+#define SHUB_HARD_RST_CTRL__SION_AON_RESET_EN__SHIFT 0x5
+#define SHUB_HARD_RST_CTRL__COR_RESET_EN_MASK 0x00000001L
+#define SHUB_HARD_RST_CTRL__REG_RESET_EN_MASK 0x00000002L
+#define SHUB_HARD_RST_CTRL__STY_RESET_EN_MASK 0x00000004L
+#define SHUB_HARD_RST_CTRL__NIC400_RESET_EN_MASK 0x00000008L
+#define SHUB_HARD_RST_CTRL__SDP_PORT_RESET_EN_MASK 0x00000010L
+#define SHUB_HARD_RST_CTRL__SION_AON_RESET_EN_MASK 0x00000020L
+//SHUB_SOFT_RST_CTRL
+#define SHUB_SOFT_RST_CTRL__COR_RESET_EN__SHIFT 0x0
+#define SHUB_SOFT_RST_CTRL__REG_RESET_EN__SHIFT 0x1
+#define SHUB_SOFT_RST_CTRL__STY_RESET_EN__SHIFT 0x2
+#define SHUB_SOFT_RST_CTRL__NIC400_RESET_EN__SHIFT 0x3
+#define SHUB_SOFT_RST_CTRL__SDP_PORT_RESET_EN__SHIFT 0x4
+#define SHUB_SOFT_RST_CTRL__SION_AON_RESET_EN__SHIFT 0x5
+#define SHUB_SOFT_RST_CTRL__COR_RESET_EN_MASK 0x00000001L
+#define SHUB_SOFT_RST_CTRL__REG_RESET_EN_MASK 0x00000002L
+#define SHUB_SOFT_RST_CTRL__STY_RESET_EN_MASK 0x00000004L
+#define SHUB_SOFT_RST_CTRL__NIC400_RESET_EN_MASK 0x00000008L
+#define SHUB_SOFT_RST_CTRL__SDP_PORT_RESET_EN_MASK 0x00000010L
+#define SHUB_SOFT_RST_CTRL__SION_AON_RESET_EN_MASK 0x00000020L
+//SHUB_SDP_PORT_RST
+#define SHUB_SDP_PORT_RST__A2S_SDP_PORT_RST__SHIFT 0x0
+#define SHUB_SDP_PORT_RST__NBIFSION_BIF_SDP_PORT_RST__SHIFT 0x1
+#define SHUB_SDP_PORT_RST__ATHUB_HST_SDP_PORT_RST__SHIFT 0x2
+#define SHUB_SDP_PORT_RST__ATHUB_DMA_SDP_PORT_RST__SHIFT 0x3
+#define SHUB_SDP_PORT_RST__ATDMA_NBIFSOIN_SDP_PORT_RST__SHIFT 0x4
+#define SHUB_SDP_PORT_RST__MP4SDP_SDP_PORT_RST__SHIFT 0x6
+#define SHUB_SDP_PORT_RST__GDC_HST_SDP_PORT_RST__SHIFT 0x7
+#define SHUB_SDP_PORT_RST__NTB_HST_SDP_PORT_RST__SHIFT 0x8
+#define SHUB_SDP_PORT_RST__NTB_DMA_SDP_PORT_RST__SHIFT 0x9
+#define SHUB_SDP_PORT_RST__MPDMATF_DMA_SDP_PORT_RST__SHIFT 0xa
+#define SHUB_SDP_PORT_RST__MPDMAPM_DMA_SDP_PORT_RST__SHIFT 0xb
+#define SHUB_SDP_PORT_RST__MPDMATF_HST_SDP_PORT_RST__SHIFT 0xc
+#define SHUB_SDP_PORT_RST__SHUB_CNDI_HST_SDP_PORT_RST__SHIFT 0xd
+#define SHUB_SDP_PORT_RST__SION_AON_RST__SHIFT 0x18
+#define SHUB_SDP_PORT_RST__A2S_SDP_PORT_RST_MASK 0x00000001L
+#define SHUB_SDP_PORT_RST__NBIFSION_BIF_SDP_PORT_RST_MASK 0x00000002L
+#define SHUB_SDP_PORT_RST__ATHUB_HST_SDP_PORT_RST_MASK 0x00000004L
+#define SHUB_SDP_PORT_RST__ATHUB_DMA_SDP_PORT_RST_MASK 0x00000008L
+#define SHUB_SDP_PORT_RST__ATDMA_NBIFSOIN_SDP_PORT_RST_MASK 0x00000010L
+#define SHUB_SDP_PORT_RST__MP4SDP_SDP_PORT_RST_MASK 0x00000040L
+#define SHUB_SDP_PORT_RST__GDC_HST_SDP_PORT_RST_MASK 0x00000080L
+#define SHUB_SDP_PORT_RST__NTB_HST_SDP_PORT_RST_MASK 0x00000100L
+#define SHUB_SDP_PORT_RST__NTB_DMA_SDP_PORT_RST_MASK 0x00000200L
+#define SHUB_SDP_PORT_RST__MPDMATF_DMA_SDP_PORT_RST_MASK 0x00000400L
+#define SHUB_SDP_PORT_RST__MPDMAPM_DMA_SDP_PORT_RST_MASK 0x00000800L
+#define SHUB_SDP_PORT_RST__MPDMATF_HST_SDP_PORT_RST_MASK 0x00001000L
+#define SHUB_SDP_PORT_RST__SHUB_CNDI_HST_SDP_PORT_RST_MASK 0x00002000L
+#define SHUB_SDP_PORT_RST__SION_AON_RST_MASK 0x01000000L
+//SHUB_RST_MISC_TRL
+#define SHUB_RST_MISC_TRL__RSMU_SOFT_RST_ATOMIC__SHIFT 0x0
+#define SHUB_RST_MISC_TRL__RSMU_SOFT_RST_CYCLE__SHIFT 0x10
+#define SHUB_RST_MISC_TRL__RSMU_SOFT_RST_ATOMIC_MASK 0x00000001L
+#define SHUB_RST_MISC_TRL__RSMU_SOFT_RST_CYCLE_MASK 0x00FF0000L
+
+
+// addressBlock: nbif_gdc_s2a_GDCS2A_DEC
+//GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_0_CTRL__S2A_DOORBELL_PORT0_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_1_CTRL__S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_2_CTRL__S2A_DOORBELL_PORT2_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_3_CTRL__S2A_DOORBELL_PORT3_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_4_CTRL__S2A_DOORBELL_PORT4_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_5_CTRL__S2A_DOORBELL_PORT5_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_6_CTRL__S2A_DOORBELL_PORT6_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_7_CTRL__S2A_DOORBELL_PORT7_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_8_CTRL__S2A_DOORBELL_PORT8_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_9_CTRL__S2A_DOORBELL_PORT9_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_10_CTRL__S2A_DOORBELL_PORT10_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_11_CTRL__S2A_DOORBELL_PORT11_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_12_CTRL__S2A_DOORBELL_PORT12_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_13_CTRL__S2A_DOORBELL_PORT13_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_14_CTRL__S2A_DOORBELL_PORT14_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_ENABLE__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_AWID__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_FENCE_ENABLE__SHIFT 0x6
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_RANGE_OFFSET__SHIFT 0x7
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_RANGE_SIZE__SHIFT 0x11
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_64BIT_SUPPORT_DIS__SHIFT 0x19
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_NEED_DEDUCT_RANGE_OFFSET__SHIFT 0x1a
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_DROP_EN__SHIFT 0x1b
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_AWADDR_31_28_VALUE__SHIFT 0x1c
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_ENABLE_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_AWID_MASK 0x0000003EL
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_FENCE_ENABLE_MASK 0x00000040L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_RANGE_OFFSET_MASK 0x0001FF80L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_RANGE_SIZE_MASK 0x01FE0000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_64BIT_SUPPORT_DIS_MASK 0x02000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_NEED_DEDUCT_RANGE_OFFSET_MASK 0x04000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_DROP_EN_MASK 0x08000000L
+#define GDC_S2A1_S2A_DOORBELL_ENTRY_15_CTRL__S2A_DOORBELL_PORT15_AWADDR_31_28_VALUE_MASK 0xF0000000L
+//GDC_S2A1_S2A_DOORBELL_COMMON_CTRL_REG
+#define GDC_S2A1_S2A_DOORBELL_COMMON_CTRL_REG__S2A_DOORBELL_FENCE_ONCE_TRIGGER_DIS__SHIFT 0x0
+#define GDC_S2A1_S2A_DOORBELL_COMMON_CTRL_REG__S2A_DOORBE_FENCE_INTR_ENABLE__SHIFT 0x1
+#define GDC_S2A1_S2A_DOORBELL_COMMON_CTRL_REG__S2A_DOORBELL_FENCE_ONCE_TRIGGER_DIS_MASK 0x00000001L
+#define GDC_S2A1_S2A_DOORBELL_COMMON_CTRL_REG__S2A_DOORBE_FENCE_INTR_ENABLE_MASK 0x00000002L
+//GDC_S2A1_NBIF_GFX_DOORBELL_STATUS
+#define GDC_S2A1_NBIF_GFX_DOORBELL_STATUS__NBIF_GFX_DOORBELL_SENT__SHIFT 0x0
+#define GDC_S2A1_NBIF_GFX_DOORBELL_STATUS__S2A_DOORBELL_ALL_CLR_EN__SHIFT 0x10
+#define GDC_S2A1_NBIF_GFX_DOORBELL_STATUS__S2A_DOORBELL_ALL_CLR_ST__SHIFT 0x18
+#define GDC_S2A1_NBIF_GFX_DOORBELL_STATUS__NBIF_GFX_DOORBELL_SENT_MASK 0x0000FFFFL
+#define GDC_S2A1_NBIF_GFX_DOORBELL_STATUS__S2A_DOORBELL_ALL_CLR_EN_MASK 0x00010000L
+#define GDC_S2A1_NBIF_GFX_DOORBELL_STATUS__S2A_DOORBELL_ALL_CLR_ST_MASK 0x01000000L
+
+
+// addressBlock: nbif_gdc_a2s_GDCA2S_DEC
+//A2S_CNTL_SW0
+#define A2S_CNTL_SW0__STATIC_VC_ENABLE__SHIFT 0x0
+#define A2S_CNTL_SW0__STATIC_VC_VALUE__SHIFT 0x1
+#define A2S_CNTL_SW0__SDP_WR_CHAIN_DIS__SHIFT 0x9
+#define A2S_CNTL_SW0__WR_TAG_FOR_CHAIN_ENABLE__SHIFT 0xa
+#define A2S_CNTL_SW0__WR_TAG_FOR_NONCHAIN_ENABLE__SHIFT 0xb
+#define A2S_CNTL_SW0__SDP_DYNAMIC_VC_WR_CHAIN_DIS__SHIFT 0xc
+#define A2S_CNTL_SW0__WRR_RD_WEIGHT__SHIFT 0x10
+#define A2S_CNTL_SW0__WRR_WR_WEIGHT__SHIFT 0x18
+#define A2S_CNTL_SW0__STATIC_VC_ENABLE_MASK 0x00000001L
+#define A2S_CNTL_SW0__STATIC_VC_VALUE_MASK 0x0000000EL
+#define A2S_CNTL_SW0__SDP_WR_CHAIN_DIS_MASK 0x00000200L
+#define A2S_CNTL_SW0__WR_TAG_FOR_CHAIN_ENABLE_MASK 0x00000400L
+#define A2S_CNTL_SW0__WR_TAG_FOR_NONCHAIN_ENABLE_MASK 0x00000800L
+#define A2S_CNTL_SW0__SDP_DYNAMIC_VC_WR_CHAIN_DIS_MASK 0x00001000L
+#define A2S_CNTL_SW0__WRR_RD_WEIGHT_MASK 0x00FF0000L
+#define A2S_CNTL_SW0__WRR_WR_WEIGHT_MASK 0xFF000000L
+//A2S_CNTL_SW1
+#define A2S_CNTL_SW1__STATIC_VC_ENABLE__SHIFT 0x0
+#define A2S_CNTL_SW1__STATIC_VC_VALUE__SHIFT 0x1
+#define A2S_CNTL_SW1__SDP_WR_CHAIN_DIS__SHIFT 0x9
+#define A2S_CNTL_SW1__WR_TAG_FOR_CHAIN_ENABLE__SHIFT 0xa
+#define A2S_CNTL_SW1__WR_TAG_FOR_NONCHAIN_ENABLE__SHIFT 0xb
+#define A2S_CNTL_SW1__SDP_DYNAMIC_VC_WR_CHAIN_DIS__SHIFT 0xc
+#define A2S_CNTL_SW1__WRR_RD_WEIGHT__SHIFT 0x10
+#define A2S_CNTL_SW1__WRR_WR_WEIGHT__SHIFT 0x18
+#define A2S_CNTL_SW1__STATIC_VC_ENABLE_MASK 0x00000001L
+#define A2S_CNTL_SW1__STATIC_VC_VALUE_MASK 0x0000000EL
+#define A2S_CNTL_SW1__SDP_WR_CHAIN_DIS_MASK 0x00000200L
+#define A2S_CNTL_SW1__WR_TAG_FOR_CHAIN_ENABLE_MASK 0x00000400L
+#define A2S_CNTL_SW1__WR_TAG_FOR_NONCHAIN_ENABLE_MASK 0x00000800L
+#define A2S_CNTL_SW1__SDP_DYNAMIC_VC_WR_CHAIN_DIS_MASK 0x00001000L
+#define A2S_CNTL_SW1__WRR_RD_WEIGHT_MASK 0x00FF0000L
+#define A2S_CNTL_SW1__WRR_WR_WEIGHT_MASK 0xFF000000L
+//A2S_MISC_CNTL
+#define A2S_MISC_CNTL__BLKLVL_FOR_MSG__SHIFT 0x0
+#define A2S_MISC_CNTL__WRRSP_ACCUM_SEL__SHIFT 0x6
+#define A2S_MISC_CNTL__RDRSP_STS_DATSTS_PRIORITY__SHIFT 0x9
+#define A2S_MISC_CNTL__BLKLVL_FOR_MSG_MASK 0x00000003L
+#define A2S_MISC_CNTL__WRRSP_ACCUM_SEL_MASK 0x00000040L
+#define A2S_MISC_CNTL__RDRSP_STS_DATSTS_PRIORITY_MASK 0x00000200L
+//A2S_TAG_ALLOC_0
+#define A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_WR__SHIFT 0x0
+#define A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_RD__SHIFT 0x8
+#define A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC1_WR__SHIFT 0x10
+#define A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_WR_MASK 0x000000FFL
+#define A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC0_RD_MASK 0x0000FF00L
+#define A2S_TAG_ALLOC_0__TAG_ALLOC_FOR_VC1_WR_MASK 0x00FF0000L
+//A2S_TAG_ALLOC_1
+#define A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC3_WR__SHIFT 0x0
+#define A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_WR__SHIFT 0x10
+#define A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_RD__SHIFT 0x18
+#define A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC3_WR_MASK 0x000000FFL
+#define A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_WR_MASK 0x00FF0000L
+#define A2S_TAG_ALLOC_1__TAG_ALLOC_FOR_VC7_RD_MASK 0xFF000000L
+
+
+// addressBlock: nbif_syshub_mmreg_syshubdirect
+//HST_CLK0_SW0_CL0_CNTL
+#define HST_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
+#define HST_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
+#define HST_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
+#define HST_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
+//HST_CLK0_SW1_CL0_CNTL
+#define HST_CLK0_SW1_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
+#define HST_CLK0_SW1_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
+#define HST_CLK0_SW1_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
+#define HST_CLK0_SW1_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
+//DMA_CLK0_SW0_CL0_CNTL
+#define DMA_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0
+#define DMA_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1
+#define DMA_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L
+#define DMA_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L
+//NIC400_1_ASIB_0_FN_MOD
+#define NIC400_1_ASIB_0_FN_MOD__read_iss_override__SHIFT 0x0
+#define NIC400_1_ASIB_0_FN_MOD__write_iss_override__SHIFT 0x1
+#define NIC400_1_ASIB_0_FN_MOD__read_iss_override_MASK 0x00000001L
+#define NIC400_1_ASIB_0_FN_MOD__write_iss_override_MASK 0x00000002L
+//NIC400_1_IB_0_FN_MOD
+#define NIC400_1_IB_0_FN_MOD__read_iss_override__SHIFT 0x0
+#define NIC400_1_IB_0_FN_MOD__write_iss_override__SHIFT 0x1
+#define NIC400_1_IB_0_FN_MOD__read_iss_override_MASK 0x00000001L
+#define NIC400_1_IB_0_FN_MOD__write_iss_override_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf0_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf0_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF0_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF0_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF0_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf0_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF0_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf0_BIFDEC2
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF0_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf1_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf1_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF1_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF1_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF1_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf1_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF1_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf1_BIFDEC2
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF1_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf2_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf2_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF2_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF2_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF2_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf2_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF2_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf2_BIFDEC2
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF2_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf3_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf3_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF3_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF3_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF3_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf3_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF3_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf3_BIFDEC2
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF3_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf4_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf4_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF4_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF4_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF4_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf4_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF4_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf4_BIFDEC2
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF4_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf5_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf5_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF5_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF5_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF5_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf5_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF5_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf5_BIFDEC2
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF5_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf6_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf6_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF6_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF6_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF6_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf6_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF6_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf6_BIFDEC2
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF6_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf7_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf7_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF7_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF7_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF7_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf7_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF7_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf7_BIFDEC2
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF7_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf8_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf8_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF8_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF8_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF8_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf8_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF8_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF8_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF8_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF8_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf8_BIFDEC2
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF8_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf9_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf9_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF9_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF9_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF9_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf9_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF9_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF9_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF9_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF9_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf9_BIFDEC2
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF9_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf10_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf10_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF10_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF10_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF10_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf10_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF10_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF10_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF10_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF10_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf10_BIFDEC2
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF10_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf11_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf11_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF11_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF11_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF11_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf11_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF11_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF11_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF11_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF11_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf11_BIFDEC2
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF11_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf12_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf12_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF12_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF12_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF12_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf12_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF12_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF12_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF12_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF12_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf12_BIFDEC2
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF12_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf13_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf13_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF13_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF13_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF13_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf13_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF13_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF13_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF13_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF13_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf13_BIFDEC2
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF13_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf14_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf14_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF14_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF14_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF14_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf14_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF14_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF14_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF14_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF14_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf14_BIFDEC2
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF14_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf15_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_ONLY_CNTL__HDP_MEM_FLUSH_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL
+#define BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_INVALIDATE_ONLY_CNTL__HDP_MEM_INVALIDATE_ONLY_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf15_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF15_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF15_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF15_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf15_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF15_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF15_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF15_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF15_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf15_BIFDEC2
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF15_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf16_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF16_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF16_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF16_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF16_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF16_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF16_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF16_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF16_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF16_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF16_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF16_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF16_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF16_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF16_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF16_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF16_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf16_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF16_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF16_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF16_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF16_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF16_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF16_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF16_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF16_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF16_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf16_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF16_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF16_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF16_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF16_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF16_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF16_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF16_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF16_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF16_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF16_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF16_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF16_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF16_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF16_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf16_BIFDEC2
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF16_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF16_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf17_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF17_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF17_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF17_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF17_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF17_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF17_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF17_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF17_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF17_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF17_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF17_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF17_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF17_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF17_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF17_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF17_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf17_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF17_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF17_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF17_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF17_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF17_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF17_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF17_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF17_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF17_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf17_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF17_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF17_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF17_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF17_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF17_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF17_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF17_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF17_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF17_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF17_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF17_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF17_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF17_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF17_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf17_BIFDEC2
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF17_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF17_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf18_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF18_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF18_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF18_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF18_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF18_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF18_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF18_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF18_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF18_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF18_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF18_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF18_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF18_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF18_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF18_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF18_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf18_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF18_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF18_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF18_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF18_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF18_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF18_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF18_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF18_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF18_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf18_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF18_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF18_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF18_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF18_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF18_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF18_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF18_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF18_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF18_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF18_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF18_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF18_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF18_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF18_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf18_BIFDEC2
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF18_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF18_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf19_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF19_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF19_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF19_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF19_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF19_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF19_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF19_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF19_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF19_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF19_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF19_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF19_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF19_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF19_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF19_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF19_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf19_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF19_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF19_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF19_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF19_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF19_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF19_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF19_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF19_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF19_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf19_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF19_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF19_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF19_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF19_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF19_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF19_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF19_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF19_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF19_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF19_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF19_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF19_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF19_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF19_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf19_BIFDEC2
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF19_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF19_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf20_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF20_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF20_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF20_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF20_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF20_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF20_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF20_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF20_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF20_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF20_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF20_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF20_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF20_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF20_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF20_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF20_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf20_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF20_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF20_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF20_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF20_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF20_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF20_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF20_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF20_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF20_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf20_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF20_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF20_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF20_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF20_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF20_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF20_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF20_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF20_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF20_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF20_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF20_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF20_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF20_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF20_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf20_BIFDEC2
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF20_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF20_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf21_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF21_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF21_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF21_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF21_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF21_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF21_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF21_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF21_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF21_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF21_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF21_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF21_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF21_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF21_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF21_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF21_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf21_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF21_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF21_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF21_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF21_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF21_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF21_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF21_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF21_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF21_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf21_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF21_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF21_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF21_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF21_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF21_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF21_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF21_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF21_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF21_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF21_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF21_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF21_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF21_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF21_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf21_BIFDEC2
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF21_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF21_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf22_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF22_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF22_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF22_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF22_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF22_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF22_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF22_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF22_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF22_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF22_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF22_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF22_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF22_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF22_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF22_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF22_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf22_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF22_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF22_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF22_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF22_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF22_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF22_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF22_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF22_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF22_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf22_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF22_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF22_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF22_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF22_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF22_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF22_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF22_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF22_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF22_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF22_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF22_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF22_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF22_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF22_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf22_BIFDEC2
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF22_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF22_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf23_BIFPFVFDEC1
+//BIF_BX_DEV0_EPF0_VF23_BIF_BME_STATUS
+#define BIF_BX_DEV0_EPF0_VF23_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF23_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L
+//BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L
+//BIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_BASE_HIGH
+#define BIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_BASE_LOW
+#define BIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_CNTL
+#define BIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF23_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L
+//BIF_BX_DEV0_EPF0_VF23_HDP_REG_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF23_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF23_HDP_MEM_COHERENCY_FLUSH_CNTL
+#define BIF_BX_DEV0_EPF0_VF23_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+//BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_REQ__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG0__SHIFT 0xc
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG1__SHIFT 0xd
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG2__SHIFT 0xe
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG3__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG4__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG5__SHIFT 0x11
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG6__SHIFT 0x12
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG7__SHIFT 0x13
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG8__SHIFT 0x14
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG9__SHIFT 0x15
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG10__SHIFT 0x16
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG11__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG12__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG13__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG14__SHIFT 0x1a
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG15__SHIFT 0x1b
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG16__SHIFT 0x1c
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG17__SHIFT 0x1d
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG18__SHIFT 0x1e
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG19__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG9_MASK 0x00200000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG10_MASK 0x00400000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG11_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG12_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG13_MASK 0x02000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG14_MASK 0x04000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG15_MASK 0x08000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG16_MASK 0x10000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG17_MASK 0x20000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG18_MASK 0x40000000L
+#define BIF_BX_DEV0_EPF0_VF23_GPU_HDP_FLUSH_DONE__RSVD_ENG19_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF23_BIF_TRANS_PENDING
+#define BIF_BX_DEV0_EPF0_VF23_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF23_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW0
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW1
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW2
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW3
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW0
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW1
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW2
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW3
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF23_MAILBOX_CONTROL
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L
+//BIF_BX_DEV0_EPF0_VF23_MAILBOX_INT_CNTL
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF23_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L
+//BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L
+#define BIF_BX_DEV0_EPF0_VF23_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L
+
+
+// addressBlock: nbif_bif_bx_dev0_epf0_vf23_SYSPFVFDEC
+//BIF_BX_DEV0_EPF0_VF23_MM_INDEX
+#define BIF_BX_DEV0_EPF0_VF23_MM_INDEX__MM_OFFSET__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_MM_INDEX__MM_APER__SHIFT 0x1f
+#define BIF_BX_DEV0_EPF0_VF23_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL
+#define BIF_BX_DEV0_EPF0_VF23_MM_INDEX__MM_APER_MASK 0x80000000L
+//BIF_BX_DEV0_EPF0_VF23_MM_DATA
+#define BIF_BX_DEV0_EPF0_VF23_MM_DATA__MM_DATA__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL
+//BIF_BX_DEV0_EPF0_VF23_MM_INDEX_HI
+#define BIF_BX_DEV0_EPF0_VF23_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
+#define BIF_BX_DEV0_EPF0_VF23_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf23_BIFPFVFDEC1
+//RCC_DEV0_EPF0_VF23_RCC_ERR_LOG
+#define RCC_DEV0_EPF0_VF23_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF23_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF23_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L
+//RCC_DEV0_EPF0_VF23_RCC_DOORBELL_APER_EN
+#define RCC_DEV0_EPF0_VF23_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF23_RCC_CONFIG_MEMSIZE
+#define RCC_DEV0_EPF0_VF23_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF23_RCC_CONFIG_RESERVED
+#define RCC_DEV0_EPF0_VF23_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF23_RCC_IOV_FUNC_IDENTIFIER
+#define RCC_DEV0_EPF0_VF23_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f
+#define RCC_DEV0_EPF0_VF23_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF23_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L
+
+
+// addressBlock: nbif_rcc_dev0_epf0_vf23_BIFDEC2
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_ADDR_LO
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_ADDR_HI
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_MSG_DATA
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_CONTROL
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_ADDR_LO
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_ADDR_HI
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_MSG_DATA
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_CONTROL
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_ADDR_LO
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_ADDR_HI
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_MSG_DATA
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_CONTROL
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_ADDR_LO
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_ADDR_HI
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_MSG_DATA
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_MSG_DATA__MSG_DATA__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL
+//RCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_CONTROL
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_CONTROL__MASK_BIT__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_VECT3_CONTROL__MASK_BIT_MASK 0x00000001L
+//RCC_DEV0_EPF0_VF23_GFXMSIX_PBA
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
+#define RCC_DEV0_EPF0_VF23_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
index 6f80bfa7e41a..5ebe4cb40f9d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
@@ -8900,6 +8900,8 @@
#define regGDC0_BIF_IH_DOORBELL_RANGE_BASE_IDX 3
#define regGDC0_BIF_VCN0_DOORBELL_RANGE 0x4f0af3
#define regGDC0_BIF_VCN0_DOORBELL_RANGE_BASE_IDX 3
+#define regGDC0_BIF_VPE1_DOORBELL_RANGE 0x4f0af4
+#define regGDC0_BIF_VPE1_DOORBELL_RANGE_BASE_IDX 3
#define regGDC0_BIF_RLC_DOORBELL_RANGE 0x4f0af5
#define regGDC0_BIF_RLC_DOORBELL_RANGE_BASE_IDX 3
#define regGDC0_BIF_SDMA2_DOORBELL_RANGE 0x4f0af6
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h
index e0c28c29ddb0..a22481e7bcdb 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h
@@ -38896,13 +38896,5 @@
#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
-//PCIE_PERF_CNTL_TXCLK3
-#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL__SHIFT 0x0
-#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL_MASK 0x000000FFL
-
-//PCIE_PERF_CNTL_TXCLK7
-#define PCIE_PERF_CNTL_TXCLK7__EVENT0_SEL__SHIFT 0x0
-#define PCIE_PERF_CNTL_TXCLK7__EVENT0_SEL_MASK 0x000000FFL
-
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_offset.h
new file mode 100644
index 000000000000..45a961ef74ff
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_offset.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_7_0_0_OFFSET_HEADER
+#define _osssys_7_0_0_OFFSET_HEADER
+
+
+
+// addressBlock: osssys_osssysdec
+// base address: 0x4280
+#define regIH_VMID_0_LUT 0x0000
+#define regIH_VMID_0_LUT_BASE_IDX 0
+#define regIH_VMID_1_LUT 0x0001
+#define regIH_VMID_1_LUT_BASE_IDX 0
+#define regIH_VMID_2_LUT 0x0002
+#define regIH_VMID_2_LUT_BASE_IDX 0
+#define regIH_VMID_3_LUT 0x0003
+#define regIH_VMID_3_LUT_BASE_IDX 0
+#define regIH_VMID_4_LUT 0x0004
+#define regIH_VMID_4_LUT_BASE_IDX 0
+#define regIH_VMID_5_LUT 0x0005
+#define regIH_VMID_5_LUT_BASE_IDX 0
+#define regIH_VMID_6_LUT 0x0006
+#define regIH_VMID_6_LUT_BASE_IDX 0
+#define regIH_VMID_7_LUT 0x0007
+#define regIH_VMID_7_LUT_BASE_IDX 0
+#define regIH_VMID_8_LUT 0x0008
+#define regIH_VMID_8_LUT_BASE_IDX 0
+#define regIH_VMID_9_LUT 0x0009
+#define regIH_VMID_9_LUT_BASE_IDX 0
+#define regIH_VMID_10_LUT 0x000a
+#define regIH_VMID_10_LUT_BASE_IDX 0
+#define regIH_VMID_11_LUT 0x000b
+#define regIH_VMID_11_LUT_BASE_IDX 0
+#define regIH_VMID_12_LUT 0x000c
+#define regIH_VMID_12_LUT_BASE_IDX 0
+#define regIH_VMID_13_LUT 0x000d
+#define regIH_VMID_13_LUT_BASE_IDX 0
+#define regIH_VMID_14_LUT 0x000e
+#define regIH_VMID_14_LUT_BASE_IDX 0
+#define regIH_VMID_15_LUT 0x000f
+#define regIH_VMID_15_LUT_BASE_IDX 0
+#define regIH_VMID_0_LUT_MM 0x0010
+#define regIH_VMID_0_LUT_MM_BASE_IDX 0
+#define regIH_VMID_1_LUT_MM 0x0011
+#define regIH_VMID_1_LUT_MM_BASE_IDX 0
+#define regIH_VMID_2_LUT_MM 0x0012
+#define regIH_VMID_2_LUT_MM_BASE_IDX 0
+#define regIH_VMID_3_LUT_MM 0x0013
+#define regIH_VMID_3_LUT_MM_BASE_IDX 0
+#define regIH_VMID_4_LUT_MM 0x0014
+#define regIH_VMID_4_LUT_MM_BASE_IDX 0
+#define regIH_VMID_5_LUT_MM 0x0015
+#define regIH_VMID_5_LUT_MM_BASE_IDX 0
+#define regIH_VMID_6_LUT_MM 0x0016
+#define regIH_VMID_6_LUT_MM_BASE_IDX 0
+#define regIH_VMID_7_LUT_MM 0x0017
+#define regIH_VMID_7_LUT_MM_BASE_IDX 0
+#define regIH_VMID_8_LUT_MM 0x0018
+#define regIH_VMID_8_LUT_MM_BASE_IDX 0
+#define regIH_VMID_9_LUT_MM 0x0019
+#define regIH_VMID_9_LUT_MM_BASE_IDX 0
+#define regIH_VMID_10_LUT_MM 0x001a
+#define regIH_VMID_10_LUT_MM_BASE_IDX 0
+#define regIH_VMID_11_LUT_MM 0x001b
+#define regIH_VMID_11_LUT_MM_BASE_IDX 0
+#define regIH_VMID_12_LUT_MM 0x001c
+#define regIH_VMID_12_LUT_MM_BASE_IDX 0
+#define regIH_VMID_13_LUT_MM 0x001d
+#define regIH_VMID_13_LUT_MM_BASE_IDX 0
+#define regIH_VMID_14_LUT_MM 0x001e
+#define regIH_VMID_14_LUT_MM_BASE_IDX 0
+#define regIH_VMID_15_LUT_MM 0x001f
+#define regIH_VMID_15_LUT_MM_BASE_IDX 0
+#define regIH_COOKIE_0 0x0020
+#define regIH_COOKIE_0_BASE_IDX 0
+#define regIH_COOKIE_1 0x0021
+#define regIH_COOKIE_1_BASE_IDX 0
+#define regIH_COOKIE_2 0x0022
+#define regIH_COOKIE_2_BASE_IDX 0
+#define regIH_COOKIE_3 0x0023
+#define regIH_COOKIE_3_BASE_IDX 0
+#define regIH_COOKIE_4 0x0024
+#define regIH_COOKIE_4_BASE_IDX 0
+#define regIH_COOKIE_5 0x0025
+#define regIH_COOKIE_5_BASE_IDX 0
+#define regIH_COOKIE_6 0x0026
+#define regIH_COOKIE_6_BASE_IDX 0
+#define regIH_COOKIE_7 0x0027
+#define regIH_COOKIE_7_BASE_IDX 0
+#define regIH_REGISTER_LAST_PART0 0x003f
+#define regIH_REGISTER_LAST_PART0_BASE_IDX 0
+#define regIH_RB_CNTL 0x0080
+#define regIH_RB_CNTL_BASE_IDX 0
+#define regIH_RB_RPTR 0x0081
+#define regIH_RB_RPTR_BASE_IDX 0
+#define regIH_RB_WPTR 0x0082
+#define regIH_RB_WPTR_BASE_IDX 0
+#define regIH_RB_BASE 0x0083
+#define regIH_RB_BASE_BASE_IDX 0
+#define regIH_RB_BASE_HI 0x0084
+#define regIH_RB_BASE_HI_BASE_IDX 0
+#define regIH_RB_WPTR_ADDR_HI 0x0085
+#define regIH_RB_WPTR_ADDR_HI_BASE_IDX 0
+#define regIH_RB_WPTR_ADDR_LO 0x0086
+#define regIH_RB_WPTR_ADDR_LO_BASE_IDX 0
+#define regIH_DOORBELL_RPTR 0x0087
+#define regIH_DOORBELL_RPTR_BASE_IDX 0
+#define regIH_DOORBELL_RETRY_CAM 0x0088
+#define regIH_DOORBELL_RETRY_CAM_BASE_IDX 0
+#define regIH_RB_CNTL_RING1 0x008c
+#define regIH_RB_CNTL_RING1_BASE_IDX 0
+#define regIH_RB_RPTR_RING1 0x008d
+#define regIH_RB_RPTR_RING1_BASE_IDX 0
+#define regIH_RB_WPTR_RING1 0x008e
+#define regIH_RB_WPTR_RING1_BASE_IDX 0
+#define regIH_RB_BASE_RING1 0x008f
+#define regIH_RB_BASE_RING1_BASE_IDX 0
+#define regIH_RB_BASE_HI_RING1 0x0090
+#define regIH_RB_BASE_HI_RING1_BASE_IDX 0
+#define regIH_DOORBELL_RPTR_RING1 0x0093
+#define regIH_DOORBELL_RPTR_RING1_BASE_IDX 0
+#define regIH_RETRY_CAM_ACK 0x00a4
+#define regIH_RETRY_CAM_ACK_BASE_IDX 0
+#define regIH_VERSION 0x00a5
+#define regIH_VERSION_BASE_IDX 0
+#define regIH_CNTL 0x00a8
+#define regIH_CNTL_BASE_IDX 0
+#define regIH_CLK_CTRL 0x00a9
+#define regIH_CLK_CTRL_BASE_IDX 0
+#define regIH_STORM_CLIENT_LIST_CNTL 0x00aa
+#define regIH_STORM_CLIENT_LIST_CNTL_BASE_IDX 0
+#define regIH_LIMIT_INT_RATE_CNTL 0x00ab
+#define regIH_LIMIT_INT_RATE_CNTL_BASE_IDX 0
+#define regIH_RETRY_INT_CAM_CNTL 0x00ac
+#define regIH_RETRY_INT_CAM_CNTL_BASE_IDX 0
+#define regIH_MEM_POWER_CTRL 0x00ad
+#define regIH_MEM_POWER_CTRL_BASE_IDX 0
+#define regIH_MEM_POWER_CTRL2 0x00ae
+#define regIH_MEM_POWER_CTRL2_BASE_IDX 0
+#define regIH_CNTL2 0x00c1
+#define regIH_CNTL2_BASE_IDX 0
+#define regIH_STATUS 0x00c2
+#define regIH_STATUS_BASE_IDX 0
+#define regIH_PERFMON_CNTL 0x00c3
+#define regIH_PERFMON_CNTL_BASE_IDX 0
+#define regIH_PERFCOUNTER0_RESULT 0x00c4
+#define regIH_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define regIH_PERFCOUNTER1_RESULT 0x00c5
+#define regIH_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define regIH_DSM_MATCH_VALUE_BIT_31_0 0x00c7
+#define regIH_DSM_MATCH_VALUE_BIT_31_0_BASE_IDX 0
+#define regIH_DSM_MATCH_VALUE_BIT_63_32 0x00c8
+#define regIH_DSM_MATCH_VALUE_BIT_63_32_BASE_IDX 0
+#define regIH_DSM_MATCH_VALUE_BIT_95_64 0x00c9
+#define regIH_DSM_MATCH_VALUE_BIT_95_64_BASE_IDX 0
+#define regIH_DSM_MATCH_FIELD_CONTROL 0x00ca
+#define regIH_DSM_MATCH_FIELD_CONTROL_BASE_IDX 0
+#define regIH_DSM_MATCH_DATA_CONTROL 0x00cb
+#define regIH_DSM_MATCH_DATA_CONTROL_BASE_IDX 0
+#define regIH_DSM_MATCH_FCN_ID 0x00cc
+#define regIH_DSM_MATCH_FCN_ID_BASE_IDX 0
+#define regIH_VF_RB_STATUS 0x00ce
+#define regIH_VF_RB_STATUS_BASE_IDX 0
+#define regIH_VF_RB_STATUS2 0x00cf
+#define regIH_VF_RB_STATUS2_BASE_IDX 0
+#define regIH_VF_RB1_STATUS 0x00d0
+#define regIH_VF_RB1_STATUS_BASE_IDX 0
+#define regIH_VF_RB1_STATUS2 0x00d1
+#define regIH_VF_RB1_STATUS2_BASE_IDX 0
+#define regIH_RB_STATUS 0x00d4
+#define regIH_RB_STATUS_BASE_IDX 0
+#define regIH_INT_FLOOD_CNTL 0x00d5
+#define regIH_INT_FLOOD_CNTL_BASE_IDX 0
+#define regIH_RB0_INT_FLOOD_STATUS 0x00d6
+#define regIH_RB0_INT_FLOOD_STATUS_BASE_IDX 0
+#define regIH_RB1_INT_FLOOD_STATUS 0x00d7
+#define regIH_RB1_INT_FLOOD_STATUS_BASE_IDX 0
+#define regIH_INT_FLOOD_STATUS 0x00d9
+#define regIH_INT_FLOOD_STATUS_BASE_IDX 0
+#define regIH_INT_FLAGS 0x00dc
+#define regIH_INT_FLAGS_BASE_IDX 0
+#define regIH_SCRATCH 0x00e0
+#define regIH_SCRATCH_BASE_IDX 0
+#define regIH_CLIENT_CREDIT_ERROR 0x00e1
+#define regIH_CLIENT_CREDIT_ERROR_BASE_IDX 0
+#define regIH_GPU_IOV_VIOLATION_LOG 0x00e2
+#define regIH_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regIH_GPU_IOV_VIOLATION_LOG2 0x00e3
+#define regIH_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regIH_COOKIE_REC_VIOLATION_LOG 0x00e4
+#define regIH_COOKIE_REC_VIOLATION_LOG_BASE_IDX 0
+#define regIH_CREDIT_STATUS 0x00e5
+#define regIH_CREDIT_STATUS_BASE_IDX 0
+#define regIH_MMHUB_ERROR 0x00e6
+#define regIH_MMHUB_ERROR_BASE_IDX 0
+#define regIH_VF_RB_STATUS3 0x00ea
+#define regIH_VF_RB_STATUS3_BASE_IDX 0
+#define regIH_VF_RB_STATUS4 0x00eb
+#define regIH_VF_RB_STATUS4_BASE_IDX 0
+#define regIH_VF_RB1_STATUS3 0x00ec
+#define regIH_VF_RB1_STATUS3_BASE_IDX 0
+#define regIH_MSI_STORM_CTRL 0x00f1
+#define regIH_MSI_STORM_CTRL_BASE_IDX 0
+#define regIH_MSI_STORM_CLIENT_INDEX 0x00f2
+#define regIH_MSI_STORM_CLIENT_INDEX_BASE_IDX 0
+#define regIH_MSI_STORM_CLIENT_DATA 0x00f3
+#define regIH_MSI_STORM_CLIENT_DATA_BASE_IDX 0
+#define regIH_LAST_INT_INFO0 0x00f9
+#define regIH_LAST_INT_INFO0_BASE_IDX 0
+#define regIH_LAST_INT_INFO1 0x00fa
+#define regIH_LAST_INT_INFO1_BASE_IDX 0
+#define regIH_LAST_INT_INFO2 0x00fb
+#define regIH_LAST_INT_INFO2_BASE_IDX 0
+#define regIH_REGISTER_LAST_PART2 0x00ff
+#define regIH_REGISTER_LAST_PART2_BASE_IDX 0
+#define regSEM_MAILBOX 0x010a
+#define regSEM_MAILBOX_BASE_IDX 0
+#define regSEM_MAILBOX_CLEAR 0x010b
+#define regSEM_MAILBOX_CLEAR_BASE_IDX 0
+#define regSEM_REGISTER_LAST_PART2 0x017f
+#define regSEM_REGISTER_LAST_PART2_BASE_IDX 0
+#define regIH_ACTIVE_FCN_ID 0x0180
+#define regIH_ACTIVE_FCN_ID_BASE_IDX 0
+#define regIH_VIRT_RESET_REQ 0x0181
+#define regIH_VIRT_RESET_REQ_BASE_IDX 0
+#define regIH_CLIENT_CFG 0x0182
+#define regIH_CLIENT_CFG_BASE_IDX 0
+#define regIH_RING1_CLIENT_CFG_INDEX 0x0183
+#define regIH_RING1_CLIENT_CFG_INDEX_BASE_IDX 0
+#define regIH_RING1_CLIENT_CFG_DATA 0x0184
+#define regIH_RING1_CLIENT_CFG_DATA_BASE_IDX 0
+#define regIH_CLIENT_CFG_INDEX 0x0185
+#define regIH_CLIENT_CFG_INDEX_BASE_IDX 0
+#define regIH_CLIENT_CFG_DATA 0x0186
+#define regIH_CLIENT_CFG_DATA_BASE_IDX 0
+#define regIH_CLIENT_CFG_DATA2 0x0187
+#define regIH_CLIENT_CFG_DATA2_BASE_IDX 0
+#define regIH_CID_REMAP_INDEX 0x0188
+#define regIH_CID_REMAP_INDEX_BASE_IDX 0
+#define regIH_CID_REMAP_DATA 0x0189
+#define regIH_CID_REMAP_DATA_BASE_IDX 0
+#define regIH_CHICKEN 0x018a
+#define regIH_CHICKEN_BASE_IDX 0
+#define regIH_INT_DROP_CNTL 0x018c
+#define regIH_INT_DROP_CNTL_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_VALUE0 0x018d
+#define regIH_INT_DROP_MATCH_VALUE0_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_VALUE1 0x018e
+#define regIH_INT_DROP_MATCH_VALUE1_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_MASK0 0x018f
+#define regIH_INT_DROP_MATCH_MASK0_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_MASK1 0x0190
+#define regIH_INT_DROP_MATCH_MASK1_BASE_IDX 0
+#define regIH_MMHUB_CNTL 0x01a7
+#define regIH_MMHUB_CNTL_BASE_IDX 0
+#define regIH_REGISTER_LAST_PART1 0x01a8
+#define regIH_REGISTER_LAST_PART1_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_sh_mask.h
new file mode 100644
index 000000000000..a29607bc0db5
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_sh_mask.h
@@ -0,0 +1,1029 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_7_0_0_SH_MASK_HEADER
+#define _osssys_7_0_0_SH_MASK_HEADER
+
+
+// addressBlock: osssys_osssysdec
+//IH_VMID_0_LUT
+#define IH_VMID_0_LUT__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT
+#define IH_VMID_1_LUT__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT
+#define IH_VMID_2_LUT__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT
+#define IH_VMID_3_LUT__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT
+#define IH_VMID_4_LUT__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT
+#define IH_VMID_5_LUT__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT
+#define IH_VMID_6_LUT__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT
+#define IH_VMID_7_LUT__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT
+#define IH_VMID_8_LUT__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT
+#define IH_VMID_9_LUT__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT
+#define IH_VMID_10_LUT__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT
+#define IH_VMID_11_LUT__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT
+#define IH_VMID_12_LUT__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT
+#define IH_VMID_13_LUT__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT
+#define IH_VMID_14_LUT__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT
+#define IH_VMID_15_LUT__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_0_LUT_MM
+#define IH_VMID_0_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT_MM
+#define IH_VMID_1_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT_MM
+#define IH_VMID_2_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT_MM
+#define IH_VMID_3_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT_MM
+#define IH_VMID_4_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT_MM
+#define IH_VMID_5_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT_MM
+#define IH_VMID_6_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT_MM
+#define IH_VMID_7_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT_MM
+#define IH_VMID_8_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT_MM
+#define IH_VMID_9_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT_MM
+#define IH_VMID_10_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT_MM
+#define IH_VMID_11_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT_MM
+#define IH_VMID_12_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT_MM
+#define IH_VMID_13_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT_MM
+#define IH_VMID_14_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT_MM
+#define IH_VMID_15_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_COOKIE_0
+#define IH_COOKIE_0__CLIENT_ID__SHIFT 0x0
+#define IH_COOKIE_0__SOURCE_ID__SHIFT 0x8
+#define IH_COOKIE_0__RING_ID__SHIFT 0x10
+#define IH_COOKIE_0__VM_ID__SHIFT 0x18
+#define IH_COOKIE_0__RESERVED__SHIFT 0x1c
+#define IH_COOKIE_0__VMID_TYPE__SHIFT 0x1f
+#define IH_COOKIE_0__CLIENT_ID_MASK 0x000000FFL
+#define IH_COOKIE_0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_COOKIE_0__RING_ID_MASK 0x00FF0000L
+#define IH_COOKIE_0__VM_ID_MASK 0x0F000000L
+#define IH_COOKIE_0__RESERVED_MASK 0x70000000L
+#define IH_COOKIE_0__VMID_TYPE_MASK 0x80000000L
+//IH_COOKIE_1
+#define IH_COOKIE_1__TIMESTAMP_31_0__SHIFT 0x0
+#define IH_COOKIE_1__TIMESTAMP_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_2
+#define IH_COOKIE_2__TIMESTAMP_47_32__SHIFT 0x0
+#define IH_COOKIE_2__RESERVED__SHIFT 0x10
+#define IH_COOKIE_2__TIMESTAMP_SRC__SHIFT 0x1f
+#define IH_COOKIE_2__TIMESTAMP_47_32_MASK 0x0000FFFFL
+#define IH_COOKIE_2__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_2__TIMESTAMP_SRC_MASK 0x80000000L
+//IH_COOKIE_3
+#define IH_COOKIE_3__PAS_ID__SHIFT 0x0
+#define IH_COOKIE_3__RESERVED__SHIFT 0x10
+#define IH_COOKIE_3__PASID_SRC__SHIFT 0x1f
+#define IH_COOKIE_3__PAS_ID_MASK 0x0000FFFFL
+#define IH_COOKIE_3__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_3__PASID_SRC_MASK 0x80000000L
+//IH_COOKIE_4
+#define IH_COOKIE_4__CONTEXT_ID_31_0__SHIFT 0x0
+#define IH_COOKIE_4__CONTEXT_ID_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_5
+#define IH_COOKIE_5__CONTEXT_ID_63_32__SHIFT 0x0
+#define IH_COOKIE_5__CONTEXT_ID_63_32_MASK 0xFFFFFFFFL
+//IH_COOKIE_6
+#define IH_COOKIE_6__CONTEXT_ID_95_64__SHIFT 0x0
+#define IH_COOKIE_6__CONTEXT_ID_95_64_MASK 0xFFFFFFFFL
+//IH_COOKIE_7
+#define IH_COOKIE_7__CONTEXT_ID_128_96__SHIFT 0x0
+#define IH_COOKIE_7__CONTEXT_ID_128_96_MASK 0xFFFFFFFFL
+//IH_REGISTER_LAST_PART0
+#define IH_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL
+//IH_RB_CNTL
+#define IH_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL__ENABLE_INTR__SHIFT 0x11
+#define IH_RB_CNTL__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL__RPTR_REARM__SHIFT 0x15
+#define IH_RB_CNTL__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL__ENABLE_INTR_MASK 0x00020000L
+#define IH_RB_CNTL__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL__RPTR_REARM_MASK 0x00200000L
+#define IH_RB_CNTL__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_RPTR
+#define IH_RB_RPTR__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR
+#define IH_RB_WPTR__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_RB_BASE
+#define IH_RB_BASE__ADDR__SHIFT 0x0
+#define IH_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI
+#define IH_RB_BASE_HI__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI__ADDR_MASK 0x000000FFL
+//IH_RB_WPTR_ADDR_HI
+#define IH_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define IH_RB_WPTR_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//IH_RB_WPTR_ADDR_LO
+#define IH_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define IH_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//IH_DOORBELL_RPTR
+#define IH_DOORBELL_RPTR__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR__ENABLE_MASK 0x10000000L
+//IH_DOORBELL_RETRY_CAM
+#define IH_DOORBELL_RETRY_CAM__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RETRY_CAM__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RETRY_CAM__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RETRY_CAM__ENABLE_MASK 0x10000000L
+//IH_RB_CNTL_RING1
+#define IH_RB_CNTL_RING1__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL_RING1__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL_RING1__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL_RING1__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL_RING1__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL_RING1__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL_RING1__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL_RING1__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL_RING1__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL_RING1__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL_RING1__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL_RING1__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL_RING1__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL_RING1__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_RPTR_RING1
+#define IH_RB_RPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR_RING1__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR_RING1
+#define IH_RB_WPTR_RING1__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR_RING1__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR_RING1__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_RB_BASE_RING1
+#define IH_RB_BASE_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_RING1__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI_RING1
+#define IH_RB_BASE_HI_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI_RING1__ADDR_MASK 0x000000FFL
+//IH_DOORBELL_RPTR_RING1
+#define IH_DOORBELL_RPTR_RING1__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR_RING1__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR_RING1__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING1__ENABLE_MASK 0x10000000L
+//IH_RETRY_CAM_ACK
+#define IH_RETRY_CAM_ACK__INDEX__SHIFT 0x0
+#define IH_RETRY_CAM_ACK__INDEX_MASK 0x000003FFL
+//IH_VERSION
+#define IH_VERSION__MINVER__SHIFT 0x0
+#define IH_VERSION__MAJVER__SHIFT 0x8
+#define IH_VERSION__REV__SHIFT 0x10
+#define IH_VERSION__MINVER_MASK 0x0000007FL
+#define IH_VERSION__MAJVER_MASK 0x00007F00L
+#define IH_VERSION__REV_MASK 0x003F0000L
+//IH_CNTL
+#define IH_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x0
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL__SHIFT 0x6
+#define IH_CNTL__IH_FIFO_HIGHWATER__SHIFT 0x8
+#define IH_CNTL__MC_WR_CLEAN_CNT__SHIFT 0x14
+#define IH_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x0000001FL
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL_MASK 0x000000C0L
+#define IH_CNTL__IH_FIFO_HIGHWATER_MASK 0x00007F00L
+#define IH_CNTL__MC_WR_CLEAN_CNT_MASK 0x01F00000L
+//IH_CLK_CTRL
+#define IH_CLK_CTRL__IH_PASID_LUT_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x17
+#define IH_CLK_CTRL__MSI_STORM_COUNTER_CLK_SOFT_OVERRIDE__SHIFT 0x18
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define IH_CLK_CTRL__IH_PASID_LUT_MEM_CLK_SOFT_OVERRIDE_MASK 0x00800000L
+#define IH_CLK_CTRL__MSI_STORM_COUNTER_CLK_SOFT_OVERRIDE_MASK 0x01000000L
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//IH_STORM_CLIENT_LIST_CNTL
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT__SHIFT 0x1
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT__SHIFT 0x2
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT__SHIFT 0x3
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT__SHIFT 0x4
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT__SHIFT 0x5
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT__SHIFT 0x6
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT__SHIFT 0x7
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT__SHIFT 0x8
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT__SHIFT 0x9
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT__SHIFT 0xa
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT__SHIFT 0xb
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT__SHIFT 0xc
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT__SHIFT 0xd
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT__SHIFT 0xe
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT__SHIFT 0xf
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT__SHIFT 0x10
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT__SHIFT 0x11
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT__SHIFT 0x12
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT__SHIFT 0x13
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT__SHIFT 0x14
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT__SHIFT 0x15
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT__SHIFT 0x16
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT__SHIFT 0x17
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT__SHIFT 0x18
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT__SHIFT 0x19
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT__SHIFT 0x1a
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT__SHIFT 0x1b
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT__SHIFT 0x1c
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT__SHIFT 0x1d
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT__SHIFT 0x1e
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT__SHIFT 0x1f
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT_MASK 0x00000002L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT_MASK 0x00000004L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT_MASK 0x00000008L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT_MASK 0x00000010L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT_MASK 0x00000020L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT_MASK 0x00000040L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT_MASK 0x00000080L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT_MASK 0x00000100L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT_MASK 0x00000200L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT_MASK 0x00000400L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT_MASK 0x00000800L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT_MASK 0x00001000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT_MASK 0x00002000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT_MASK 0x00004000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT_MASK 0x00008000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT_MASK 0x00010000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT_MASK 0x00020000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT_MASK 0x00040000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT_MASK 0x00080000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT_MASK 0x00100000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT_MASK 0x00200000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT_MASK 0x00400000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT_MASK 0x00800000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT_MASK 0x01000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT_MASK 0x02000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT_MASK 0x04000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT_MASK 0x08000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT_MASK 0x10000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT_MASK 0x20000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L
+//IH_LIMIT_INT_RATE_CNTL
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE__SHIFT 0x0
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL__SHIFT 0x1
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD__SHIFT 0x5
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY__SHIFT 0x11
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT__SHIFT 0x15
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE_MASK 0x00000001L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL_MASK 0x0000001EL
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD_MASK 0x0000FFE0L
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY_MASK 0x001E0000L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT_MASK 0xFFE00000L
+//IH_RETRY_INT_CAM_CNTL
+#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE__SHIFT 0x0
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE__SHIFT 0x8
+#define IH_RETRY_INT_CAM_CNTL__ENABLE__SHIFT 0x10
+#define IH_RETRY_INT_CAM_CNTL__MM_BACK_PRESSURE_ENABLE__SHIFT 0x11
+#define IH_RETRY_INT_CAM_CNTL__GC_BACK_PRESSURE_ENABLE__SHIFT 0x12
+#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE__SHIFT 0x14
+#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE_MASK 0x0000001FL
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE_MASK 0x00003F00L
+#define IH_RETRY_INT_CAM_CNTL__ENABLE_MASK 0x00010000L
+#define IH_RETRY_INT_CAM_CNTL__MM_BACK_PRESSURE_ENABLE_MASK 0x00020000L
+#define IH_RETRY_INT_CAM_CNTL__GC_BACK_PRESSURE_ENABLE_MASK 0x00040000L
+#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE_MASK 0x00300000L
+//IH_MEM_POWER_CTRL
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN__SHIFT 0x1
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN__SHIFT 0x2
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN__SHIFT 0x3
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_CTRL_EN__SHIFT 0x10
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_LS_EN__SHIFT 0x11
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DS_EN__SHIFT 0x12
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_SD_EN__SHIFT 0x13
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_IDLE_HYSTERESIS__SHIFT 0x14
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x18
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0x1e
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN_MASK 0x00000002L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN_MASK 0x00000004L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN_MASK 0x00000008L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_LS_EN_MASK 0x00020000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DS_EN_MASK 0x00040000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_SD_EN_MASK 0x00080000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_IDLE_HYSTERESIS_MASK 0x00700000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_UP_RECOVER_DELAY_MASK 0x3F000000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DOWN_ENTER_DELAY_MASK 0xC0000000L
+//IH_MEM_POWER_CTRL2
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_LS_EN__SHIFT 0x1
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DS_EN__SHIFT 0x2
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_SD_EN__SHIFT 0x3
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_LS_EN_MASK 0x00000002L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DS_EN_MASK 0x00000004L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_SD_EN_MASK 0x00000008L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L
+//IH_CNTL2
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT__SHIFT 0x0
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE__SHIFT 0x8
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT_MASK 0x0000001FL
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE_MASK 0x00000100L
+//IH_STATUS
+#define IH_STATUS__IDLE__SHIFT 0x0
+#define IH_STATUS__INPUT_IDLE__SHIFT 0x1
+#define IH_STATUS__BUFFER_IDLE__SHIFT 0x2
+#define IH_STATUS__RB_FULL__SHIFT 0x3
+#define IH_STATUS__RB_FULL_DRAIN__SHIFT 0x4
+#define IH_STATUS__RB_OVERFLOW__SHIFT 0x5
+#define IH_STATUS__MC_WR_IDLE__SHIFT 0x6
+#define IH_STATUS__MC_WR_STALL__SHIFT 0x7
+#define IH_STATUS__MC_WR_CLEAN_PENDING__SHIFT 0x8
+#define IH_STATUS__MC_WR_CLEAN_STALL__SHIFT 0x9
+#define IH_STATUS__BIF_INTERRUPT_LINE__SHIFT 0xa
+#define IH_STATUS__SWITCH_READY__SHIFT 0xb
+#define IH_STATUS__RB1_FULL__SHIFT 0xc
+#define IH_STATUS__RB1_FULL_DRAIN__SHIFT 0xd
+#define IH_STATUS__RB1_OVERFLOW__SHIFT 0xe
+#define IH_STATUS__SELF_INT_GEN_IDLE__SHIFT 0x12
+#define IH_STATUS__RETRY_INT_CAM_IDLE__SHIFT 0x13
+#define IH_STATUS__ZSTATES_FENCE__SHIFT 0x14
+#define IH_STATUS__IH_BUFFER_MEM_POWER_GATED__SHIFT 0x15
+#define IH_STATUS__IH_RETRY_INT_CAM_MEM_POWER_GATED__SHIFT 0x16
+#define IH_STATUS__IH_PASID_LUT_MEM_POWER_GATED__SHIFT 0x17
+#define IH_STATUS__IDLE_MASK 0x00000001L
+#define IH_STATUS__INPUT_IDLE_MASK 0x00000002L
+#define IH_STATUS__BUFFER_IDLE_MASK 0x00000004L
+#define IH_STATUS__RB_FULL_MASK 0x00000008L
+#define IH_STATUS__RB_FULL_DRAIN_MASK 0x00000010L
+#define IH_STATUS__RB_OVERFLOW_MASK 0x00000020L
+#define IH_STATUS__MC_WR_IDLE_MASK 0x00000040L
+#define IH_STATUS__MC_WR_STALL_MASK 0x00000080L
+#define IH_STATUS__MC_WR_CLEAN_PENDING_MASK 0x00000100L
+#define IH_STATUS__MC_WR_CLEAN_STALL_MASK 0x00000200L
+#define IH_STATUS__BIF_INTERRUPT_LINE_MASK 0x00000400L
+#define IH_STATUS__SWITCH_READY_MASK 0x00000800L
+#define IH_STATUS__RB1_FULL_MASK 0x00001000L
+#define IH_STATUS__RB1_FULL_DRAIN_MASK 0x00002000L
+#define IH_STATUS__RB1_OVERFLOW_MASK 0x00004000L
+#define IH_STATUS__SELF_INT_GEN_IDLE_MASK 0x00040000L
+#define IH_STATUS__RETRY_INT_CAM_IDLE_MASK 0x00080000L
+#define IH_STATUS__ZSTATES_FENCE_MASK 0x00100000L
+#define IH_STATUS__IH_BUFFER_MEM_POWER_GATED_MASK 0x00200000L
+#define IH_STATUS__IH_RETRY_INT_CAM_MEM_POWER_GATED_MASK 0x00400000L
+#define IH_STATUS__IH_PASID_LUT_MEM_POWER_GATED_MASK 0x00800000L
+//IH_PERFMON_CNTL
+#define IH_PERFMON_CNTL__ENABLE0__SHIFT 0x0
+#define IH_PERFMON_CNTL__CLEAR0__SHIFT 0x1
+#define IH_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define IH_PERFMON_CNTL__ENABLE1__SHIFT 0x10
+#define IH_PERFMON_CNTL__CLEAR1__SHIFT 0x11
+#define IH_PERFMON_CNTL__PERF_SEL1__SHIFT 0x12
+#define IH_PERFMON_CNTL__ENABLE0_MASK 0x00000001L
+#define IH_PERFMON_CNTL__CLEAR0_MASK 0x00000002L
+#define IH_PERFMON_CNTL__PERF_SEL0_MASK 0x00000FFCL
+#define IH_PERFMON_CNTL__ENABLE1_MASK 0x00010000L
+#define IH_PERFMON_CNTL__CLEAR1_MASK 0x00020000L
+#define IH_PERFMON_CNTL__PERF_SEL1_MASK 0x0FFC0000L
+//IH_PERFCOUNTER0_RESULT
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_PERFCOUNTER1_RESULT
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_31_0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_63_32
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_95_64
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_FIELD_CONTROL
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN__SHIFT 0x0
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN__SHIFT 0x1
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN__SHIFT 0x2
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN__SHIFT 0x3
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN__SHIFT 0x4
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN__SHIFT 0x5
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN__SHIFT 0x6
+#define IH_DSM_MATCH_FIELD_CONTROL__DIEID_EN__SHIFT 0x7
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN_MASK 0x00000001L
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN_MASK 0x00000002L
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN_MASK 0x00000004L
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN_MASK 0x00000008L
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN_MASK 0x00000010L
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN_MASK 0x00000020L
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN_MASK 0x00000040L
+#define IH_DSM_MATCH_FIELD_CONTROL__DIEID_EN_MASK 0x00000080L
+//IH_DSM_MATCH_DATA_CONTROL
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE_MASK 0x0FFFFFFFL
+//IH_DSM_MATCH_FCN_ID
+#define IH_DSM_MATCH_FCN_ID__VF_ID__SHIFT 0x0
+#define IH_DSM_MATCH_FCN_ID__PF_VF__SHIFT 0x7
+#define IH_DSM_MATCH_FCN_ID__VF_ID_MASK 0x0000001FL
+#define IH_DSM_MATCH_FCN_ID__PF_VF_MASK 0x00000080L
+//IH_VF_RB_STATUS
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF_MASK 0x00FFFFFFL
+//IH_VF_RB_STATUS2
+#define IH_VF_RB_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS2__RB_FULL_VF_MASK 0x00FFFFFFL
+//IH_VF_RB1_STATUS
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF_MASK 0x00FFFFFFL
+//IH_VF_RB1_STATUS2
+#define IH_VF_RB1_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS2__RB_FULL_VF_MASK 0x00FFFFFFL
+//IH_RB_STATUS
+#define IH_RB_STATUS__RB_FULL__SHIFT 0x0
+#define IH_RB_STATUS__RB_FULL_DRAIN__SHIFT 0x1
+#define IH_RB_STATUS__RB_OVERFLOW__SHIFT 0x2
+#define IH_RB_STATUS__RB1_FULL__SHIFT 0x4
+#define IH_RB_STATUS__RB1_FULL_DRAIN__SHIFT 0x5
+#define IH_RB_STATUS__RB1_OVERFLOW__SHIFT 0x6
+#define IH_RB_STATUS__RB_FULL_MASK 0x00000001L
+#define IH_RB_STATUS__RB_FULL_DRAIN_MASK 0x00000002L
+#define IH_RB_STATUS__RB_OVERFLOW_MASK 0x00000004L
+#define IH_RB_STATUS__RB1_FULL_MASK 0x00000010L
+#define IH_RB_STATUS__RB1_FULL_DRAIN_MASK 0x00000020L
+#define IH_RB_STATUS__RB1_OVERFLOW_MASK 0x00000040L
+//IH_INT_FLOOD_CNTL
+#define IH_INT_FLOOD_CNTL__HIGHWATER__SHIFT 0x0
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE__SHIFT 0x3
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS__SHIFT 0x4
+#define IH_INT_FLOOD_CNTL__HIGHWATER_MASK 0x00000007L
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE_MASK 0x00000008L
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS_MASK 0x00000010L
+//IH_RB0_INT_FLOOD_STATUS
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x00FFFFFFL
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_RB1_INT_FLOOD_STATUS
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x00FFFFFFL
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_INT_FLOOD_STATUS
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT__SHIFT 0x0
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID__SHIFT 0x8
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID__SHIFT 0x10
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID__SHIFT 0x18
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF__SHIFT 0x1d
+#define IH_INT_FLOOD_STATUS__INT_DROPPED__SHIFT 0x1e
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT_MASK 0x000000FFL
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID_MASK 0x0000FF00L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID_MASK 0x00FF0000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID_MASK 0x1F000000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_MASK 0x20000000L
+#define IH_INT_FLOOD_STATUS__INT_DROPPED_MASK 0x40000000L
+//IH_INT_FLAGS
+#define IH_INT_FLAGS__CLIENT_0_FLAG__SHIFT 0x0
+#define IH_INT_FLAGS__CLIENT_1_FLAG__SHIFT 0x1
+#define IH_INT_FLAGS__CLIENT_2_FLAG__SHIFT 0x2
+#define IH_INT_FLAGS__CLIENT_3_FLAG__SHIFT 0x3
+#define IH_INT_FLAGS__CLIENT_4_FLAG__SHIFT 0x4
+#define IH_INT_FLAGS__CLIENT_5_FLAG__SHIFT 0x5
+#define IH_INT_FLAGS__CLIENT_6_FLAG__SHIFT 0x6
+#define IH_INT_FLAGS__CLIENT_7_FLAG__SHIFT 0x7
+#define IH_INT_FLAGS__CLIENT_8_FLAG__SHIFT 0x8
+#define IH_INT_FLAGS__CLIENT_9_FLAG__SHIFT 0x9
+#define IH_INT_FLAGS__CLIENT_10_FLAG__SHIFT 0xa
+#define IH_INT_FLAGS__CLIENT_11_FLAG__SHIFT 0xb
+#define IH_INT_FLAGS__CLIENT_12_FLAG__SHIFT 0xc
+#define IH_INT_FLAGS__CLIENT_13_FLAG__SHIFT 0xd
+#define IH_INT_FLAGS__CLIENT_14_FLAG__SHIFT 0xe
+#define IH_INT_FLAGS__CLIENT_15_FLAG__SHIFT 0xf
+#define IH_INT_FLAGS__CLIENT_16_FLAG__SHIFT 0x10
+#define IH_INT_FLAGS__CLIENT_17_FLAG__SHIFT 0x11
+#define IH_INT_FLAGS__CLIENT_18_FLAG__SHIFT 0x12
+#define IH_INT_FLAGS__CLIENT_19_FLAG__SHIFT 0x13
+#define IH_INT_FLAGS__CLIENT_20_FLAG__SHIFT 0x14
+#define IH_INT_FLAGS__CLIENT_21_FLAG__SHIFT 0x15
+#define IH_INT_FLAGS__CLIENT_22_FLAG__SHIFT 0x16
+#define IH_INT_FLAGS__CLIENT_23_FLAG__SHIFT 0x17
+#define IH_INT_FLAGS__CLIENT_24_FLAG__SHIFT 0x18
+#define IH_INT_FLAGS__CLIENT_25_FLAG__SHIFT 0x19
+#define IH_INT_FLAGS__CLIENT_26_FLAG__SHIFT 0x1a
+#define IH_INT_FLAGS__CLIENT_27_FLAG__SHIFT 0x1b
+#define IH_INT_FLAGS__CLIENT_28_FLAG__SHIFT 0x1c
+#define IH_INT_FLAGS__CLIENT_29_FLAG__SHIFT 0x1d
+#define IH_INT_FLAGS__CLIENT_30_FLAG__SHIFT 0x1e
+#define IH_INT_FLAGS__CLIENT_31_FLAG__SHIFT 0x1f
+#define IH_INT_FLAGS__CLIENT_0_FLAG_MASK 0x00000001L
+#define IH_INT_FLAGS__CLIENT_1_FLAG_MASK 0x00000002L
+#define IH_INT_FLAGS__CLIENT_2_FLAG_MASK 0x00000004L
+#define IH_INT_FLAGS__CLIENT_3_FLAG_MASK 0x00000008L
+#define IH_INT_FLAGS__CLIENT_4_FLAG_MASK 0x00000010L
+#define IH_INT_FLAGS__CLIENT_5_FLAG_MASK 0x00000020L
+#define IH_INT_FLAGS__CLIENT_6_FLAG_MASK 0x00000040L
+#define IH_INT_FLAGS__CLIENT_7_FLAG_MASK 0x00000080L
+#define IH_INT_FLAGS__CLIENT_8_FLAG_MASK 0x00000100L
+#define IH_INT_FLAGS__CLIENT_9_FLAG_MASK 0x00000200L
+#define IH_INT_FLAGS__CLIENT_10_FLAG_MASK 0x00000400L
+#define IH_INT_FLAGS__CLIENT_11_FLAG_MASK 0x00000800L
+#define IH_INT_FLAGS__CLIENT_12_FLAG_MASK 0x00001000L
+#define IH_INT_FLAGS__CLIENT_13_FLAG_MASK 0x00002000L
+#define IH_INT_FLAGS__CLIENT_14_FLAG_MASK 0x00004000L
+#define IH_INT_FLAGS__CLIENT_15_FLAG_MASK 0x00008000L
+#define IH_INT_FLAGS__CLIENT_16_FLAG_MASK 0x00010000L
+#define IH_INT_FLAGS__CLIENT_17_FLAG_MASK 0x00020000L
+#define IH_INT_FLAGS__CLIENT_18_FLAG_MASK 0x00040000L
+#define IH_INT_FLAGS__CLIENT_19_FLAG_MASK 0x00080000L
+#define IH_INT_FLAGS__CLIENT_20_FLAG_MASK 0x00100000L
+#define IH_INT_FLAGS__CLIENT_21_FLAG_MASK 0x00200000L
+#define IH_INT_FLAGS__CLIENT_22_FLAG_MASK 0x00400000L
+#define IH_INT_FLAGS__CLIENT_23_FLAG_MASK 0x00800000L
+#define IH_INT_FLAGS__CLIENT_24_FLAG_MASK 0x01000000L
+#define IH_INT_FLAGS__CLIENT_25_FLAG_MASK 0x02000000L
+#define IH_INT_FLAGS__CLIENT_26_FLAG_MASK 0x04000000L
+#define IH_INT_FLAGS__CLIENT_27_FLAG_MASK 0x08000000L
+#define IH_INT_FLAGS__CLIENT_28_FLAG_MASK 0x10000000L
+#define IH_INT_FLAGS__CLIENT_29_FLAG_MASK 0x20000000L
+#define IH_INT_FLAGS__CLIENT_30_FLAG_MASK 0x40000000L
+#define IH_INT_FLAGS__CLIENT_31_FLAG_MASK 0x80000000L
+//IH_SCRATCH
+#define IH_SCRATCH__DATA__SHIFT 0x0
+#define IH_SCRATCH__DATA_MASK 0xFFFFFFFFL
+//IH_CLIENT_CREDIT_ERROR
+#define IH_CLIENT_CREDIT_ERROR__CLEAR__SHIFT 0x0
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR__SHIFT 0x1
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR__SHIFT 0x2
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR__SHIFT 0x3
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR__SHIFT 0x4
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR__SHIFT 0x5
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR__SHIFT 0x6
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR__SHIFT 0x7
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR__SHIFT 0x8
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR__SHIFT 0x9
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR__SHIFT 0xa
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR__SHIFT 0xb
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR__SHIFT 0xc
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR__SHIFT 0xd
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR__SHIFT 0xe
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR__SHIFT 0xf
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR__SHIFT 0x10
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR__SHIFT 0x11
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR__SHIFT 0x12
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR__SHIFT 0x13
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR__SHIFT 0x14
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR__SHIFT 0x15
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR__SHIFT 0x16
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR__SHIFT 0x17
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR__SHIFT 0x18
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR__SHIFT 0x19
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR__SHIFT 0x1a
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR__SHIFT 0x1b
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR__SHIFT 0x1c
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR__SHIFT 0x1d
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR__SHIFT 0x1e
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR__SHIFT 0x1f
+#define IH_CLIENT_CREDIT_ERROR__CLEAR_MASK 0x00000001L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR_MASK 0x00000002L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR_MASK 0x00000004L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR_MASK 0x00000008L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR_MASK 0x00000010L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR_MASK 0x00000020L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR_MASK 0x00000040L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR_MASK 0x00000080L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR_MASK 0x00000100L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR_MASK 0x00000200L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR_MASK 0x00000400L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR_MASK 0x00000800L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR_MASK 0x00001000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR_MASK 0x00002000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR_MASK 0x00004000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR_MASK 0x00008000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR_MASK 0x00010000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR_MASK 0x00020000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR_MASK 0x00040000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR_MASK 0x00080000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR_MASK 0x00100000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR_MASK 0x00200000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR_MASK 0x00400000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR_MASK 0x00800000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR_MASK 0x01000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR_MASK 0x02000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR_MASK 0x04000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR_MASK 0x08000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR_MASK 0x10000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR_MASK 0x20000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR_MASK 0x40000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR_MASK 0x80000000L
+//IH_GPU_IOV_VIOLATION_LOG
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x16
+#define IH_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x17
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x18
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00400000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00800000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x1F000000L
+//IH_GPU_IOV_VIOLATION_LOG2
+#define IH_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define IH_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//IH_COOKIE_REC_VIOLATION_LOG
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID__SHIFT 0x8
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x10
+#define IH_COOKIE_REC_VIOLATION_LOG__DIE_ID__SHIFT 0x1a
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID_MASK 0x0000FF00L
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID_MASK 0x03FF0000L
+#define IH_COOKIE_REC_VIOLATION_LOG__DIE_ID_MASK 0x3C000000L
+//IH_CREDIT_STATUS
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED__SHIFT 0x1
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED__SHIFT 0x2
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED__SHIFT 0x3
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED__SHIFT 0x4
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED__SHIFT 0x5
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED__SHIFT 0x6
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED__SHIFT 0x7
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED__SHIFT 0x8
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED__SHIFT 0x9
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED__SHIFT 0xa
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED__SHIFT 0xb
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED__SHIFT 0xc
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED__SHIFT 0xd
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED__SHIFT 0xe
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED__SHIFT 0xf
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED__SHIFT 0x10
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED__SHIFT 0x11
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED__SHIFT 0x12
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED__SHIFT 0x13
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED__SHIFT 0x14
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED__SHIFT 0x15
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED__SHIFT 0x16
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED__SHIFT 0x17
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED__SHIFT 0x18
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED__SHIFT 0x19
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED__SHIFT 0x1a
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED__SHIFT 0x1b
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED__SHIFT 0x1c
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED__SHIFT 0x1d
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED__SHIFT 0x1e
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED__SHIFT 0x1f
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED_MASK 0x00000002L
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED_MASK 0x00000004L
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED_MASK 0x00000008L
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED_MASK 0x00000010L
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED_MASK 0x00000020L
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED_MASK 0x00000040L
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED_MASK 0x00000080L
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED_MASK 0x00000100L
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED_MASK 0x00000200L
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED_MASK 0x00000400L
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED_MASK 0x00000800L
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED_MASK 0x00001000L
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED_MASK 0x00002000L
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED_MASK 0x00004000L
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED_MASK 0x00008000L
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED_MASK 0x00010000L
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED_MASK 0x00020000L
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED_MASK 0x00040000L
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED_MASK 0x00080000L
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED_MASK 0x00100000L
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED_MASK 0x00200000L
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED_MASK 0x00400000L
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED_MASK 0x00800000L
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED_MASK 0x01000000L
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED_MASK 0x02000000L
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED_MASK 0x04000000L
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED_MASK 0x08000000L
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED_MASK 0x10000000L
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED_MASK 0x20000000L
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED_MASK 0x40000000L
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED_MASK 0x80000000L
+//IH_MMHUB_ERROR
+#define IH_MMHUB_ERROR__IH_BRESP_01__SHIFT 0x1
+#define IH_MMHUB_ERROR__IH_BRESP_10__SHIFT 0x2
+#define IH_MMHUB_ERROR__IH_BRESP_11__SHIFT 0x3
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01__SHIFT 0x5
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10__SHIFT 0x6
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11__SHIFT 0x7
+#define IH_MMHUB_ERROR__IH_BRESP_01_MASK 0x00000002L
+#define IH_MMHUB_ERROR__IH_BRESP_10_MASK 0x00000004L
+#define IH_MMHUB_ERROR__IH_BRESP_11_MASK 0x00000008L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01_MASK 0x00000020L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10_MASK 0x00000040L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11_MASK 0x00000080L
+//IH_VF_RB_STATUS3
+#define IH_VF_RB_STATUS3__RB_OVERFLOW_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS3__RB_OVERFLOW_VF_MASK 0x00FFFFFFL
+//IH_VF_RB_STATUS4
+#define IH_VF_RB_STATUS4__BIF_INTERRUPT_LINE_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS4__BIF_INTERRUPT_LINE_VF_MASK 0x00FFFFFFL
+//IH_VF_RB1_STATUS3
+#define IH_VF_RB1_STATUS3__RB_OVERFLOW_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS3__RB_OVERFLOW_VF_MASK 0x00FFFFFFL
+//IH_MSI_STORM_CTRL
+#define IH_MSI_STORM_CTRL__DELAY__SHIFT 0x0
+#define IH_MSI_STORM_CTRL__DELAY_MASK 0x00000FFFL
+//IH_MSI_STORM_CLIENT_INDEX
+#define IH_MSI_STORM_CLIENT_INDEX__INDEX__SHIFT 0x0
+#define IH_MSI_STORM_CLIENT_INDEX__INDEX_MASK 0x00000007L
+//IH_MSI_STORM_CLIENT_DATA
+#define IH_MSI_STORM_CLIENT_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID__SHIFT 0x8
+#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10
+#define IH_MSI_STORM_CLIENT_DATA__UTCL2_PAGE_FAULT_MATCH_ENABLE__SHIFT 0x11
+#define IH_MSI_STORM_CLIENT_DATA__ENTRY_VALID__SHIFT 0x1f
+#define IH_MSI_STORM_CLIENT_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MASK 0x0000FF00L
+#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L
+#define IH_MSI_STORM_CLIENT_DATA__UTCL2_PAGE_FAULT_MATCH_ENABLE_MASK 0x00020000L
+#define IH_MSI_STORM_CLIENT_DATA__ENTRY_VALID_MASK 0x80000000L
+//IH_LAST_INT_INFO0
+#define IH_LAST_INT_INFO0__CLIENT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO0__SOURCE_ID__SHIFT 0x8
+#define IH_LAST_INT_INFO0__RING_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO0__VM_ID__SHIFT 0x18
+#define IH_LAST_INT_INFO0__VMID_TYPE__SHIFT 0x1f
+#define IH_LAST_INT_INFO0__CLIENT_ID_MASK 0x000000FFL
+#define IH_LAST_INT_INFO0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_LAST_INT_INFO0__RING_ID_MASK 0x00FF0000L
+#define IH_LAST_INT_INFO0__VM_ID_MASK 0x0F000000L
+#define IH_LAST_INT_INFO0__VMID_TYPE_MASK 0x80000000L
+//IH_LAST_INT_INFO1
+#define IH_LAST_INT_INFO1__CONTEXT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO1__CONTEXT_ID_MASK 0xFFFFFFFFL
+//IH_LAST_INT_INFO2
+#define IH_LAST_INT_INFO2__PAS_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO2__VF_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO2__VF__SHIFT 0x17
+#define IH_LAST_INT_INFO2__PAS_ID_MASK 0x0000FFFFL
+#define IH_LAST_INT_INFO2__VF_ID_MASK 0x001F0000L
+#define IH_LAST_INT_INFO2__VF_MASK 0x00800000L
+//IH_REGISTER_LAST_PART2
+#define IH_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//SEM_MAILBOX
+#define SEM_MAILBOX__HOSTPORT__SHIFT 0x0
+#define SEM_MAILBOX__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX__HOSTPORT_MASK 0x0000FFFFL
+#define SEM_MAILBOX__RESERVED_MASK 0xFFFF0000L
+//SEM_MAILBOX_CLEAR
+#define SEM_MAILBOX_CLEAR__CLEAR__SHIFT 0x0
+#define SEM_MAILBOX_CLEAR__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX_CLEAR__CLEAR_MASK 0x0000FFFFL
+#define SEM_MAILBOX_CLEAR__RESERVED_MASK 0xFFFF0000L
+//SEM_REGISTER_LAST_PART2
+#define SEM_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//IH_ACTIVE_FCN_ID
+#define IH_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define IH_ACTIVE_FCN_ID__RESERVED__SHIFT 0x5
+#define IH_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define IH_ACTIVE_FCN_ID__VF_ID_MASK 0x0000001FL
+#define IH_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFE0L
+#define IH_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//IH_VIRT_RESET_REQ
+#define IH_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define IH_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define IH_VIRT_RESET_REQ__VF_MASK 0x00FFFFFFL
+#define IH_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//IH_CLIENT_CFG
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000001FL
+//IH_RING1_CLIENT_CFG_INDEX
+#define IH_RING1_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
+#define IH_RING1_CLIENT_CFG_INDEX__INDEX_MASK 0x00000007L
+//IH_RING1_CLIENT_CFG_DATA
+#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID__SHIFT 0x8
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10
+#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MASK 0x0000FF00L
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L
+//IH_CLIENT_CFG_INDEX
+#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
+#define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL
+//IH_CLIENT_CFG_DATA
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE__SHIFT 0x12
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT__SHIFT 0x16
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID__SHIFT 0x18
+#define IH_CLIENT_CFG_DATA__INTERFACE_TYPE__SHIFT 0x19
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE_MASK 0x000C0000L
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT_MASK 0x00C00000L
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID_MASK 0x01000000L
+#define IH_CLIENT_CFG_DATA__INTERFACE_TYPE_MASK 0x02000000L
+//IH_CLIENT_CFG_DATA2
+#define IH_CLIENT_CFG_DATA2__CREDIT_RETURN_ADDR__SHIFT 0x0
+#define IH_CLIENT_CFG_DATA2__CREDIT_RETURN_ADDR_MASK 0xFFFFFFFFL
+//IH_CID_REMAP_INDEX
+#define IH_CID_REMAP_INDEX__INDEX__SHIFT 0x0
+#define IH_CID_REMAP_INDEX__INDEX_MASK 0x00000003L
+//IH_CID_REMAP_DATA
+#define IH_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x18
+#define IH_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0003FF00L
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0xFF000000L
+//IH_CHICKEN
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0
+#define IH_CHICKEN__DBGU_TRIGGER_ENABLE__SHIFT 0x1
+#define IH_CHICKEN__CROSS_TRIGGER_ENABLE__SHIFT 0x2
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE__SHIFT 0x3
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE__SHIFT 0x4
+#define IH_CHICKEN__REG_FIREWALL_ENABLE__SHIFT 0x5
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L
+#define IH_CHICKEN__DBGU_TRIGGER_ENABLE_MASK 0x00000002L
+#define IH_CHICKEN__CROSS_TRIGGER_ENABLE_MASK 0x00000004L
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE_MASK 0x00000008L
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE_MASK 0x00000010L
+#define IH_CHICKEN__REG_FIREWALL_ENABLE_MASK 0x00000020L
+//IH_INT_DROP_CNTL
+#define IH_INT_DROP_CNTL__INT_DROP_EN__SHIFT 0x0
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN__SHIFT 0x1
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN__SHIFT 0x2
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN__SHIFT 0x3
+#define IH_INT_DROP_CNTL__VF_MATCH_EN__SHIFT 0x4
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN__SHIFT 0x5
+#define IH_INT_DROP_CNTL__INT_DROP_MODE__SHIFT 0x6
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN__SHIFT 0x8
+#define IH_INT_DROP_CNTL__INT_DROPPED__SHIFT 0x10
+#define IH_INT_DROP_CNTL__INT_DROP_EN_MASK 0x00000001L
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN_MASK 0x00000002L
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN_MASK 0x00000004L
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN_MASK 0x00000008L
+#define IH_INT_DROP_CNTL__VF_MATCH_EN_MASK 0x00000010L
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN_MASK 0x00000020L
+#define IH_INT_DROP_CNTL__INT_DROP_MODE_MASK 0x000000C0L
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN_MASK 0x00000100L
+#define IH_INT_DROP_CNTL__INT_DROPPED_MASK 0x00010000L
+//IH_INT_DROP_MATCH_VALUE0
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE__SHIFT 0x8
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE__SHIFT 0x10
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE__SHIFT 0x17
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE__SHIFT 0x18
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE_MASK 0x001F0000L
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_VALUE1
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE_MASK 0xFFFFFFFFL
+//IH_INT_DROP_MATCH_MASK0
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK__SHIFT 0x8
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK__SHIFT 0x10
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK__SHIFT 0x17
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK__SHIFT 0x18
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK_MASK 0x001F0000L
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_MASK1
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK_MASK 0xFFFFFFFFL
+//IH_MMHUB_CNTL
+#define IH_MMHUB_CNTL__UNITID__SHIFT 0x0
+#define IH_MMHUB_CNTL__IV_TLVL__SHIFT 0x8
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL__SHIFT 0xc
+#define IH_MMHUB_CNTL__UNITID_MASK 0x0000003FL
+#define IH_MMHUB_CNTL__IV_TLVL_MASK 0x00000F00L
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL_MASK 0x0000F000L
+//IH_REGISTER_LAST_PART1
+#define IH_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/pcie/pcie_6_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/pcie/pcie_6_1_0_offset.h
new file mode 100644
index 000000000000..d725cc063955
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/pcie/pcie_6_1_0_offset.h
@@ -0,0 +1,630 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _pcie_6_1_0_OFFSET_HEADER
+#define _pcie_6_1_0_OFFSET_HEADER
+
+
+// addressBlock: pcie_container_pcs0_pcie_lcu_pcie_pcs_prime_pcie_master_x1_xx16_pcs_prime_dir
+// base address: 0x11a08000
+#define regDXIO_HWDID 0x2270800
+#define regDXIO_HWDID_BASE_IDX 0
+#define regDXIO_LINKAGE_LANEGRP 0x2270802
+#define regDXIO_LINKAGE_LANEGRP_BASE_IDX 0
+#define regDXIO_LINKAGE_KPDMX 0x2270803
+#define regDXIO_LINKAGE_KPDMX_BASE_IDX 0
+#define regDXIO_LINKAGE_KPMX 0x2270804
+#define regDXIO_LINKAGE_KPFIFO 0x2270805
+#define regDXIO_LINKAGE_KPNP 0x2270806
+#define regMAC_CAPABILITIES1 0x2270814
+#define regMAC_CAPABILITIES1_BASE_IDX 0
+#define regMAC_CAPABILITIES2 0x2270815
+#define regMAC_CAPABILITIES2_BASE_IDX 0
+
+
+// addressBlock: pcie_container_pcie0_pswuscfg0_cfgdecp
+// base address: 0x1a300000
+#define regCOMMAND 0x0001
+#define regCOMMAND_BASE_IDX 1
+#define regSTATUS 0x0001
+#define regSTATUS_BASE_IDX 1
+#define regLATENCY 0x0003
+#define regLATENCY_BASE_IDX 1
+#define regHEADER 0x0003
+#define regHEADER_BASE_IDX 1
+#define regPCIE_LANE_ERROR_STATUS 0x009e
+#define regPCIE_LANE_ERROR_STATUS_BASE_IDX 1
+#define regPCIE_LANE_0_EQUALIZATION_CNTL 0x009f
+#define regPCIE_LANE_0_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_1_EQUALIZATION_CNTL 0x009f
+#define regPCIE_LANE_1_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_2_EQUALIZATION_CNTL 0x00a0
+#define regPCIE_LANE_2_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_3_EQUALIZATION_CNTL 0x00a0
+#define regPCIE_LANE_3_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_4_EQUALIZATION_CNTL 0x00a1
+#define regPCIE_LANE_4_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_5_EQUALIZATION_CNTL 0x00a1
+#define regPCIE_LANE_5_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_6_EQUALIZATION_CNTL 0x00a2
+#define regPCIE_LANE_6_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_7_EQUALIZATION_CNTL 0x00a2
+#define regPCIE_LANE_7_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_8_EQUALIZATION_CNTL 0x00a3
+#define regPCIE_LANE_8_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_9_EQUALIZATION_CNTL 0x00a3
+#define regPCIE_LANE_9_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_10_EQUALIZATION_CNTL 0x00a4
+#define regPCIE_LANE_10_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_11_EQUALIZATION_CNTL 0x00a4
+#define regPCIE_LANE_11_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_12_EQUALIZATION_CNTL 0x00a5
+#define regPCIE_LANE_12_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_13_EQUALIZATION_CNTL 0x00a5
+#define regPCIE_LANE_13_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_14_EQUALIZATION_CNTL 0x00a6
+#define regPCIE_LANE_14_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LANE_15_EQUALIZATION_CNTL 0x00a6
+#define regPCIE_LANE_15_EQUALIZATION_CNTL_BASE_IDX 1
+#define regPCIE_LTR_ENH_CAP_LIST 0x00c8
+#define regPCIE_LTR_ENH_CAP_LIST_BASE_IDX 1
+#define regPCIE_LTR_CAP 0x00c9
+#define regPCIE_LTR_CAP_BASE_IDX 1
+#define regPCIE_L1_PM_SUB_CAP_LIST 0x00dc
+#define regPCIE_L1_PM_SUB_CAP_LIST_BASE_IDX 1
+#define regPCIE_L1_PM_SUB_CAP 0x00dd
+#define regPCIE_L1_PM_SUB_CAP_BASE_IDX 1
+#define regPCIE_L1_PM_SUB_CNTL 0x00de
+#define regPCIE_L1_PM_SUB_CNTL_BASE_IDX 1
+#define regPCIE_L1_PM_SUB_CNTL2 0x00df
+#define regPCIE_L1_PM_SUB_CNTL2_BASE_IDX 1
+#define regPCIE_MARGINING_ENH_CAP_LIST 0x0110
+#define regPCIE_MARGINING_ENH_CAP_LIST_BASE_IDX 1
+
+
+// addressBlock: pcie_container_pcie0_pswusp0_pciedir_p
+// base address: 0x1a340000
+#define regPCIEP_RESERVED 0x10000
+#define regPCIEP_RESERVED_BASE_IDX 1
+#define regPCIEP_SCRATCH 0x10001
+#define regPCIEP_SCRATCH_BASE_IDX 1
+#define regPCIEP_PORT_CNTL 0x10010
+#define regPCIEP_PORT_CNTL_BASE_IDX 1
+#define regPCIE_TX_REQUESTER_ID 0x10021
+#define regPCIE_TX_REQUESTER_ID_BASE_IDX 1
+#define regPCIE_P_PORT_LANE_STATUS 0x10050
+#define regPCIE_P_PORT_LANE_STATUS_BASE_IDX 1
+#define regPCIE_ERR_CNTL 0x1006a
+#define regPCIE_ERR_CNTL_BASE_IDX 1
+#define regPCIE_RX_CNTL 0x10070
+#define regPCIE_RX_CNTL_BASE_IDX 1
+#define regPCIE_RX_EXPECTED_SEQNUM 0x10071
+#define regPCIE_RX_EXPECTED_SEQNUM_BASE_IDX 1
+#define regPCIE_RX_VENDOR_SPECIFIC 0x10072
+#define regPCIE_RX_VENDOR_SPECIFIC_BASE_IDX 1
+#define regPCIE_RX_CNTL3 0x10074
+#define regPCIE_RX_CNTL3_BASE_IDX 1
+#define regPCIE_RX_CREDITS_ALLOCATED_P 0x10080
+#define regPCIE_RX_CREDITS_ALLOCATED_P_BASE_IDX 1
+#define regPCIE_RX_CREDITS_ALLOCATED_NP 0x10081
+#define regPCIE_RX_CREDITS_ALLOCATED_NP_BASE_IDX 1
+#define regPCIE_RX_CREDITS_ALLOCATED_CPL 0x10082
+#define regPCIE_RX_CREDITS_ALLOCATED_CPL_BASE_IDX 1
+#define regPCIEP_ERROR_INJECT_PHYSICAL 0x10083
+#define regPCIEP_ERROR_INJECT_PHYSICAL_BASE_IDX 1
+#define regPCIEP_ERROR_INJECT_TRANSACTION 0x10084
+#define regPCIEP_ERROR_INJECT_TRANSACTION_BASE_IDX 1
+#define regPCIEP_NAK_COUNTER 0x10086
+#define regPCIEP_NAK_COUNTER_BASE_IDX 1
+#define regPCIE_LC_CNTL 0x100a0
+#define regPCIE_LC_CNTL_BASE_IDX 1
+#define regPCIE_LC_TRAINING_CNTL 0x100a1
+#define regPCIE_LC_TRAINING_CNTL_BASE_IDX 1
+#define regPCIE_LC_LINK_WIDTH_CNTL 0x100a2
+#define regPCIE_LC_LINK_WIDTH_CNTL_BASE_IDX 1
+#define regPCIE_LC_N_FTS_CNTL 0x100a3
+#define regPCIE_LC_N_FTS_CNTL_BASE_IDX 1
+#define regPCIE_LC_SPEED_CNTL 0x100a4
+#define regPCIE_LC_SPEED_CNTL_BASE_IDX 1
+#define regPCIE_LC_STATE0 0x100a5
+#define regPCIE_LC_STATE0_BASE_IDX 1
+#define regPCIE_LC_STATE1 0x100a6
+#define regPCIE_LC_STATE1_BASE_IDX 1
+#define regPCIE_LC_STATE2 0x100a7
+#define regPCIE_LC_STATE2_BASE_IDX 1
+#define regPCIE_LC_STATE3 0x100a8
+#define regPCIE_LC_STATE3_BASE_IDX 1
+#define regPCIE_LC_STATE4 0x100a9
+#define regPCIE_LC_STATE4_BASE_IDX 1
+#define regPCIE_LC_STATE5 0x100aa
+#define regPCIE_LC_STATE5_BASE_IDX 1
+#define regPCIE_LC_LINK_MANAGEMENT_CNTL2 0x100ab
+#define regPCIE_LC_LINK_MANAGEMENT_CNTL2_BASE_IDX 1
+#define regPCIE_LC_CNTL2 0x100b1
+#define regPCIE_LC_CNTL2_BASE_IDX 1
+#define regPCIE_LC_BW_CHANGE_CNTL 0x100b2
+#define regPCIE_LC_BW_CHANGE_CNTL_BASE_IDX 1
+#define regPCIE_LC_CDR_CNTL 0x100b3
+#define regPCIE_LC_CDR_CNTL_BASE_IDX 1
+#define regPCIE_LC_LANE_CNTL 0x100b4
+#define regPCIE_LC_LANE_CNTL_BASE_IDX 1
+#define regPCIE_LC_CNTL3 0x100b5
+#define regPCIE_LC_CNTL3_BASE_IDX 1
+#define regPCIE_LC_CNTL4 0x100b6
+#define regPCIE_LC_CNTL4_BASE_IDX 1
+#define regPCIE_LC_CNTL5 0x100b7
+#define regPCIE_LC_CNTL5_BASE_IDX 1
+#define regPCIE_LC_FORCE_COEFF 0x100b8
+#define regPCIE_LC_FORCE_COEFF_BASE_IDX 1
+#define regPCIE_LC_BEST_EQ_SETTINGS 0x100b9
+#define regPCIE_LC_BEST_EQ_SETTINGS_BASE_IDX 1
+#define regPCIE_LC_FORCE_EQ_REQ_COEFF 0x100ba
+#define regPCIE_LC_FORCE_EQ_REQ_COEFF_BASE_IDX 1
+#define regPCIE_LC_CNTL6 0x100bb
+#define regPCIE_LC_CNTL6_BASE_IDX 1
+#define regPCIE_LC_CNTL7 0x100bc
+#define regPCIE_LC_CNTL7_BASE_IDX 1
+#define regPCIE_LC_LINK_MANAGEMENT_STATUS 0x100bd
+#define regPCIE_LC_LINK_MANAGEMENT_STATUS_BASE_IDX 1
+#define regPCIE_LC_LINK_MANAGEMENT_MASK 0x100be
+#define regPCIE_LC_LINK_MANAGEMENT_MASK_BASE_IDX 1
+#define regPCIE_LC_LINK_MANAGEMENT_CNTL 0x100bf
+#define regPCIE_LC_LINK_MANAGEMENT_CNTL_BASE_IDX 1
+#define regPCIEP_STRAP_LC 0x100c0
+#define regPCIEP_STRAP_LC_BASE_IDX 1
+#define regPCIEP_STRAP_MISC 0x100c1
+#define regPCIEP_STRAP_MISC_BASE_IDX 1
+#define regPCIEP_STRAP_LC2 0x100c2
+#define regPCIEP_STRAP_LC2_BASE_IDX 1
+#define regPCIE_LC_L1_PM_SUBSTATE 0x100c6
+#define regPCIE_LC_L1_PM_SUBSTATE_BASE_IDX 1
+#define regPCIE_LC_L1_PM_SUBSTATE2 0x100c7
+#define regPCIE_LC_L1_PM_SUBSTATE2_BASE_IDX 1
+#define regPCIE_LC_L1_PM_SUBSTATE3 0x100c8
+#define regPCIE_LC_L1_PM_SUBSTATE3_BASE_IDX 1
+#define regPCIE_LC_L1_PM_SUBSTATE4 0x100c9
+#define regPCIE_LC_L1_PM_SUBSTATE4_BASE_IDX 1
+#define regPCIE_LC_L1_PM_SUBSTATE5 0x100ca
+#define regPCIE_LC_L1_PM_SUBSTATE5_BASE_IDX 1
+#define regPCIEP_BCH_ECC_CNTL 0x100d0
+#define regPCIEP_BCH_ECC_CNTL_BASE_IDX 1
+#define regPCIE_LC_CNTL8 0x100dd
+#define regPCIE_LC_CNTL8_BASE_IDX 1
+#define regPCIE_LC_CNTL9 0x100de
+#define regPCIE_LC_CNTL9_BASE_IDX 1
+#define regPCIE_LC_FORCE_COEFF2 0x100df
+#define regPCIE_LC_FORCE_COEFF2_BASE_IDX 1
+#define regPCIE_LC_FORCE_EQ_REQ_COEFF2 0x100e0
+#define regPCIE_LC_FORCE_EQ_REQ_COEFF2_BASE_IDX 1
+#define regPCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES 0x100e2
+#define regPCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES_BASE_IDX 1
+#define regPCIE_LC_CNTL10 0x100e3
+#define regPCIE_LC_CNTL10_BASE_IDX 1
+#define regPCIE_LC_EQ_CNTL_8GT 0x100e4
+#define regPCIE_LC_EQ_CNTL_8GT_BASE_IDX 1
+#define regPCIE_LC_EQ_CNTL_16GT 0x100e5
+#define regPCIE_LC_EQ_CNTL_16GT_BASE_IDX 1
+#define regPCIE_LC_SAVE_RESTORE_1 0x100e6
+#define regPCIE_LC_SAVE_RESTORE_1_BASE_IDX 1
+#define regPCIE_LC_SAVE_RESTORE_2 0x100e7
+#define regPCIE_LC_SAVE_RESTORE_2_BASE_IDX 1
+#define regPCIE_LC_SAVE_RESTORE_3 0x100e8
+#define regPCIE_LC_SAVE_RESTORE_3_BASE_IDX 1
+#define regPCIE_LC_EQ_CNTL_32GT 0x10100
+#define regPCIE_LC_EQ_CNTL_32GT_BASE_IDX 1
+#define regPCIE_LC_PRESET_MASK_CNTL 0x10101
+#define regPCIE_LC_PRESET_MASK_CNTL_BASE_IDX 1
+#define regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL 0x10102
+#define regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL_BASE_IDX 1
+#define regPCIE_LC_CNTL11 0x10103
+#define regPCIE_LC_CNTL11_BASE_IDX 1
+#define regPCIE_LC_CNTL12 0x10104
+#define regPCIE_LC_CNTL12_BASE_IDX 1
+#define regPCIE_LC_SPEED_CNTL2 0x10105
+#define regPCIE_LC_SPEED_CNTL2_BASE_IDX 1
+#define regPCIE_LC_FORCE_COEFF3 0x10106
+#define regPCIE_LC_FORCE_COEFF3_BASE_IDX 1
+#define regPCIE_LC_FORCE_EQ_REQ_COEFF3 0x10107
+#define regPCIE_LC_FORCE_EQ_REQ_COEFF3_BASE_IDX 1
+#define regPCIE_LC_LINK_MANAGEMENT_CNTL3 0x10108
+#define regPCIE_LC_LINK_MANAGEMENT_CNTL3_BASE_IDX 1
+#define regPCIE_LC_Z10_IDLE_CNTL 0x1010f
+#define regPCIE_LC_Z10_IDLE_CNTL_BASE_IDX 1
+#define regPCIE_LC_TRANMIT_FIFO_CDC_CNTL 0x1011a
+#define regPCIE_LC_TRANMIT_FIFO_CDC_CNTL_BASE_IDX 1
+#define regPCIE_LC_CNTL13 0x1011c
+#define regPCIE_LC_CNTL13_BASE_IDX 1
+#define regPCIE_LC_SWDS_CNTL 0x1011d
+#define regPCIE_LC_SWDS_CNTL_BASE_IDX 1
+#define regPCIE_TX_SEQ 0x10188
+#define regPCIE_TX_SEQ_BASE_IDX 1
+#define regPCIE_TX_REPLAY 0x10189
+#define regPCIE_TX_REPLAY_BASE_IDX 1
+#define regPCIE_TX_ACK_LATENCY_LIMIT 0x1018c
+#define regPCIE_TX_ACK_LATENCY_LIMIT_BASE_IDX 1
+#define regPCIE_TX_CREDITS_FCU_THRESHOLD 0x10190
+#define regPCIE_TX_CREDITS_FCU_THRESHOLD_BASE_IDX 1
+#define regPCIE_TX_VENDOR_SPECIFIC 0x10194
+#define regPCIE_TX_VENDOR_SPECIFIC_BASE_IDX 1
+#define regPCIE_TX_NOP_DLLP 0x10195
+#define regPCIE_TX_NOP_DLLP_BASE_IDX 1
+#define regPCIE_TX_REQUEST_NUM_CNTL 0x10198
+#define regPCIE_TX_REQUEST_NUM_CNTL_BASE_IDX 1
+#define regPCIE_TX_CREDITS_ADVT_P 0x101a0
+#define regPCIE_TX_CREDITS_ADVT_P_BASE_IDX 1
+#define regPCIE_TX_CREDITS_ADVT_NP 0x101a1
+#define regPCIE_TX_CREDITS_ADVT_NP_BASE_IDX 1
+#define regPCIE_TX_CREDITS_ADVT_CPL 0x101a2
+#define regPCIE_TX_CREDITS_ADVT_CPL_BASE_IDX 1
+#define regPCIE_TX_CREDITS_INIT_P 0x101a3
+#define regPCIE_TX_CREDITS_INIT_P_BASE_IDX 1
+#define regPCIE_TX_CREDITS_INIT_NP 0x101a4
+#define regPCIE_TX_CREDITS_INIT_NP_BASE_IDX 1
+#define regPCIE_TX_CREDITS_INIT_CPL 0x101a5
+#define regPCIE_TX_CREDITS_INIT_CPL_BASE_IDX 1
+#define regPCIE_TX_CREDITS_STATUS 0x101a6
+#define regPCIE_TX_CREDITS_STATUS_BASE_IDX 1
+#define regPCIE_FC_P 0x101a8
+#define regPCIE_FC_P_BASE_IDX 1
+#define regPCIE_FC_NP 0x101a9
+#define regPCIE_FC_NP_BASE_IDX 1
+#define regPCIE_FC_CPL 0x101aa
+#define regPCIE_FC_CPL_BASE_IDX 1
+#define regPCIE_FC_P_VC1 0x101ab
+#define regPCIE_FC_P_VC1_BASE_IDX 1
+#define regPCIE_FC_NP_VC1 0x101ac
+#define regPCIE_FC_NP_VC1_BASE_IDX 1
+#define regPCIE_FC_CPL_VC1 0x101ad
+#define regPCIE_FC_CPL_VC1_BASE_IDX 1
+
+
+// addressBlock: pcie_container_pcie0_pciedir
+// base address: 0x1a380000
+#define regPCIE_RESERVED 0x20000
+#define regPCIE_RESERVED_BASE_IDX 1
+#define regPCIE_SCRATCH 0x20001
+#define regPCIE_SCRATCH_BASE_IDX 1
+#define regPCIE_RX_NUM_NAK 0x2000e
+#define regPCIE_RX_NUM_NAK_BASE_IDX 1
+#define regPCIE_RX_NUM_NAK_GENERATED 0x2000f
+#define regPCIE_RX_NUM_NAK_GENERATED_BASE_IDX 1
+#define regPCIE_CNTL 0x20010
+#define regPCIE_CNTL_BASE_IDX 1
+#define regPCIE_CONFIG_CNTL 0x20011
+#define regPCIE_CONFIG_CNTL_BASE_IDX 1
+#define regPCIE_DEBUG_CNTL 0x20012
+#define regPCIE_DEBUG_CNTL_BASE_IDX 1
+#define regPCIE_RX_CNTL5 0x20018
+#define regPCIE_RX_CNTL5_BASE_IDX 1
+#define regPCIE_RX_CNTL4 0x20019
+#define regPCIE_RX_CNTL4_BASE_IDX 1
+#define regPCIE_COMMON_AER_MASK 0x2001a
+#define regPCIE_COMMON_AER_MASK_BASE_IDX 1
+#define regPCIE_CNTL2 0x2001c
+#define regPCIE_CNTL2_BASE_IDX 1
+#define regPCIE_RX_CNTL2 0x2001d
+#define regPCIE_RX_CNTL2_BASE_IDX 1
+#define regPCIE_CI_CNTL 0x20020
+#define regPCIE_CI_CNTL_BASE_IDX 1
+#define regPCIE_BUS_CNTL 0x20021
+#define regPCIE_BUS_CNTL_BASE_IDX 1
+#define regPCIE_LC_STATE6 0x20022
+#define regPCIE_LC_STATE6_BASE_IDX 1
+#define regPCIE_LC_STATE7 0x20023
+#define regPCIE_LC_STATE7_BASE_IDX 1
+#define regPCIE_LC_STATE8 0x20024
+#define regPCIE_LC_STATE8_BASE_IDX 1
+#define regPCIE_LC_STATE9 0x20025
+#define regPCIE_LC_STATE9_BASE_IDX 1
+#define regPCIE_LC_STATE10 0x20026
+#define regPCIE_LC_STATE10_BASE_IDX 1
+#define regPCIE_LC_STATE11 0x20027
+#define regPCIE_LC_STATE11_BASE_IDX 1
+#define regPCIE_LC_STATUS1 0x20028
+#define regPCIE_LC_STATUS1_BASE_IDX 1
+#define regPCIE_LC_STATUS2 0x20029
+#define regPCIE_LC_STATUS2_BASE_IDX 1
+#define regPCIE_WPR_CNTL 0x20030
+#define regPCIE_WPR_CNTL_BASE_IDX 1
+#define regPCIE_RX_LAST_TLP0 0x20031
+#define regPCIE_RX_LAST_TLP0_BASE_IDX 1
+#define regPCIE_RX_LAST_TLP1 0x20032
+#define regPCIE_RX_LAST_TLP1_BASE_IDX 1
+#define regPCIE_RX_LAST_TLP2 0x20033
+#define regPCIE_RX_LAST_TLP2_BASE_IDX 1
+#define regPCIE_RX_LAST_TLP3 0x20034
+#define regPCIE_RX_LAST_TLP3_BASE_IDX 1
+#define regPCIE_I2C_REG_ADDR_EXPAND 0x2003a
+#define regPCIE_I2C_REG_ADDR_EXPAND_BASE_IDX 1
+#define regPCIE_I2C_REG_DATA 0x2003b
+#define regPCIE_I2C_REG_DATA_BASE_IDX 1
+#define regPCIE_CFG_CNTL 0x2003c
+#define regPCIE_CFG_CNTL_BASE_IDX 1
+#define regPCIE_LC_PM_CNTL 0x2003d
+#define regPCIE_LC_PM_CNTL_BASE_IDX 1
+#define regPCIE_LC_PM_CNTL2 0x2003e
+#define regPCIE_LC_PM_CNTL2_BASE_IDX 1
+#define regPCIE_LC_STRAP_BUFF_CNTL 0x2003f
+#define regPCIE_LC_STRAP_BUFF_CNTL_BASE_IDX 1
+#define regPCIE_P_CNTL 0x20040
+#define regPCIE_P_CNTL_BASE_IDX 1
+#define regPCIE_P_BUF_STATUS 0x20041
+#define regPCIE_P_BUF_STATUS_BASE_IDX 1
+#define regPCIE_P_DECODER_STATUS 0x20042
+#define regPCIE_P_DECODER_STATUS_BASE_IDX 1
+#define regPCIE_P_MISC_STATUS 0x20043
+#define regPCIE_P_MISC_STATUS_BASE_IDX 1
+#define regPCIE_P_RCV_L0S_FTS_DET 0x20050
+#define regPCIE_P_RCV_L0S_FTS_DET_BASE_IDX 1
+#define regPCIE_RX_AD 0x20062
+#define regPCIE_RX_AD_BASE_IDX 1
+#define regPCIE_SDP_CTRL 0x20063
+#define regPCIE_SDP_CTRL_BASE_IDX 1
+#define regPCIE_SDP_SWUS_SLV_ATTR_CTRL 0x20065
+#define regPCIE_SDP_SWUS_SLV_ATTR_CTRL_BASE_IDX 1
+#define regPCIE_SDP_CTRL2 0x20068
+#define regPCIE_SDP_CTRL2_BASE_IDX 1
+#define regPCIE_PERF_COUNT_CNTL 0x20080
+#define regPCIE_PERF_COUNT_CNTL_BASE_IDX 1
+#define regPCIE_PERF_CNTL_TXCLK1 0x20081
+#define regPCIE_PERF_CNTL_TXCLK1_BASE_IDX 1
+#define regPCIE_PERF_COUNT0_TXCLK1 0x20082
+#define regPCIE_PERF_COUNT0_TXCLK1_BASE_IDX 1
+#define regPCIE_PERF_COUNT1_TXCLK1 0x20083
+#define regPCIE_PERF_COUNT1_TXCLK1_BASE_IDX 1
+#define regPCIE_PERF_CNTL_TXCLK2 0x20084
+#define regPCIE_PERF_CNTL_TXCLK2_BASE_IDX 1
+#define regPCIE_PERF_COUNT0_TXCLK2 0x20085
+#define regPCIE_PERF_COUNT0_TXCLK2_BASE_IDX 1
+#define regPCIE_PERF_COUNT1_TXCLK2 0x20086
+#define regPCIE_PERF_COUNT1_TXCLK2_BASE_IDX 1
+#define regPCIE_PERF_CNTL_TXCLK3 0x20087
+#define regPCIE_PERF_CNTL_TXCLK3_BASE_IDX 1
+#define regPCIE_PERF_COUNT0_TXCLK3 0x20088
+#define regPCIE_PERF_COUNT0_TXCLK3_BASE_IDX 1
+#define regPCIE_PERF_COUNT1_TXCLK3 0x20089
+#define regPCIE_PERF_COUNT1_TXCLK3_BASE_IDX 1
+#define regPCIE_PERF_CNTL_TXCLK4 0x2008a
+#define regPCIE_PERF_CNTL_TXCLK4_BASE_IDX 1
+#define regPCIE_PERF_COUNT0_TXCLK4 0x2008b
+#define regPCIE_PERF_COUNT0_TXCLK4_BASE_IDX 1
+#define regPCIE_PERF_COUNT1_TXCLK4 0x2008c
+#define regPCIE_PERF_COUNT1_TXCLK4_BASE_IDX 1
+#define regPCIE_PERF_CNTL_EVENT_LC_PORT_SEL 0x20093
+#define regPCIE_PERF_CNTL_EVENT_LC_PORT_SEL_BASE_IDX 1
+#define regPCIE_PERF_CNTL_EVENT_CI_PORT_SEL 0x20094
+#define regPCIE_PERF_CNTL_EVENT_CI_PORT_SEL_BASE_IDX 1
+#define regPCIE_PERF_CNTL_TXCLK5 0x20096
+#define regPCIE_PERF_CNTL_TXCLK5_BASE_IDX 1
+#define regPCIE_PERF_COUNT0_TXCLK5 0x20097
+#define regPCIE_PERF_COUNT0_TXCLK5_BASE_IDX 1
+#define regPCIE_PERF_COUNT1_TXCLK5 0x20098
+#define regPCIE_PERF_COUNT1_TXCLK5_BASE_IDX 1
+#define regPCIE_PERF_CNTL_TXCLK6 0x20099
+#define regPCIE_PERF_CNTL_TXCLK6_BASE_IDX 1
+#define regPCIE_PERF_COUNT0_TXCLK6 0x2009a
+#define regPCIE_PERF_COUNT0_TXCLK6_BASE_IDX 1
+#define regPCIE_PERF_COUNT1_TXCLK6 0x2009b
+#define regPCIE_PERF_COUNT1_TXCLK6_BASE_IDX 1
+#define regPCIE_STRAP_F0 0x200b0
+#define regPCIE_STRAP_F0_BASE_IDX 1
+#define regPCIE_STRAP_MISC 0x200c0
+#define regPCIE_STRAP_MISC_BASE_IDX 1
+#define regPCIE_STRAP_MISC2 0x200c1
+#define regPCIE_STRAP_MISC2_BASE_IDX 1
+#define regPCIE_STRAP_PI 0x200c2
+#define regPCIE_STRAP_PI_BASE_IDX 1
+#define regPCIE_STRAP_I2C_BD 0x200c4
+#define regPCIE_STRAP_I2C_BD_BASE_IDX 1
+#define regPCIE_PRBS_CLR 0x200c8
+#define regPCIE_PRBS_CLR_BASE_IDX 1
+#define regPCIE_PRBS_STATUS1 0x200c9
+#define regPCIE_PRBS_STATUS1_BASE_IDX 1
+#define regPCIE_PRBS_STATUS2 0x200ca
+#define regPCIE_PRBS_STATUS2_BASE_IDX 1
+#define regPCIE_PRBS_FREERUN 0x200cb
+#define regPCIE_PRBS_FREERUN_BASE_IDX 1
+#define regPCIE_PRBS_MISC 0x200cc
+#define regPCIE_PRBS_MISC_BASE_IDX 1
+#define regPCIE_PRBS_USER_PATTERN 0x200cd
+#define regPCIE_PRBS_USER_PATTERN_BASE_IDX 1
+#define regPCIE_PRBS_LO_BITCNT 0x200ce
+#define regPCIE_PRBS_LO_BITCNT_BASE_IDX 1
+#define regPCIE_PRBS_HI_BITCNT 0x200cf
+#define regPCIE_PRBS_HI_BITCNT_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_0 0x200d0
+#define regPCIE_PRBS_ERRCNT_0_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_1 0x200d1
+#define regPCIE_PRBS_ERRCNT_1_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_2 0x200d2
+#define regPCIE_PRBS_ERRCNT_2_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_3 0x200d3
+#define regPCIE_PRBS_ERRCNT_3_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_4 0x200d4
+#define regPCIE_PRBS_ERRCNT_4_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_5 0x200d5
+#define regPCIE_PRBS_ERRCNT_5_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_6 0x200d6
+#define regPCIE_PRBS_ERRCNT_6_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_7 0x200d7
+#define regPCIE_PRBS_ERRCNT_7_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_8 0x200d8
+#define regPCIE_PRBS_ERRCNT_8_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_9 0x200d9
+#define regPCIE_PRBS_ERRCNT_9_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_10 0x200da
+#define regPCIE_PRBS_ERRCNT_10_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_11 0x200db
+#define regPCIE_PRBS_ERRCNT_11_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_12 0x200dc
+#define regPCIE_PRBS_ERRCNT_12_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_13 0x200dd
+#define regPCIE_PRBS_ERRCNT_13_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_14 0x200de
+#define regPCIE_PRBS_ERRCNT_14_BASE_IDX 1
+#define regPCIE_PRBS_ERRCNT_15 0x200df
+#define regPCIE_PRBS_ERRCNT_15_BASE_IDX 1
+#define regSWRST_COMMAND_STATUS 0x20100
+#define regSWRST_COMMAND_STATUS_BASE_IDX 1
+#define regSWRST_GENERAL_CONTROL 0x20101
+#define regSWRST_GENERAL_CONTROL_BASE_IDX 1
+#define regSWRST_COMMAND_0 0x20102
+#define regSWRST_COMMAND_0_BASE_IDX 1
+#define regSWRST_COMMAND_1 0x20103
+#define regSWRST_COMMAND_1_BASE_IDX 1
+#define regSWRST_CONTROL_0 0x20104
+#define regSWRST_CONTROL_0_BASE_IDX 1
+#define regSWRST_CONTROL_1 0x20105
+#define regSWRST_CONTROL_1_BASE_IDX 1
+#define regSWRST_CONTROL_2 0x20106
+#define regSWRST_CONTROL_2_BASE_IDX 1
+#define regSWRST_CONTROL_3 0x20107
+#define regSWRST_CONTROL_3_BASE_IDX 1
+#define regSWRST_CONTROL_4 0x20108
+#define regSWRST_CONTROL_4_BASE_IDX 1
+#define regSWRST_CONTROL_5 0x20109
+#define regSWRST_CONTROL_5_BASE_IDX 1
+#define regSWRST_CONTROL_6 0x2010a
+#define regSWRST_CONTROL_6_BASE_IDX 1
+#define regSWRST_EP_COMMAND_0 0x2010b
+#define regSWRST_EP_COMMAND_0_BASE_IDX 1
+#define regSWRST_EP_CONTROL_0 0x2010c
+#define regSWRST_EP_CONTROL_0_BASE_IDX 1
+#define regCPM_CONTROL 0x20118
+#define regCPM_CONTROL_BASE_IDX 1
+#define regCPM_SPLIT_CONTROL 0x20119
+#define regCPM_SPLIT_CONTROL_BASE_IDX 1
+#define regCPM_CONTROL_EXT 0x2011a
+#define regCPM_CONTROL_EXT_BASE_IDX 1
+#define regCLKREQB_PAD_CNTL 0x2011b
+#define regCLKREQB_PAD_CNTL_BASE_IDX 1
+#define regSMN_APERTURE_ID_A 0x2011d
+#define regSMN_APERTURE_ID_A_BASE_IDX 1
+#define regSMN_APERTURE_ID_B 0x2011e
+#define regSMN_APERTURE_ID_B_BASE_IDX 1
+#define regLNCNT_CONTROL 0x20125
+#define regLNCNT_CONTROL_BASE_IDX 1
+#define regSMU_INT_PIN_SHARING_PORT_INDICATOR 0x2012f
+#define regSMU_INT_PIN_SHARING_PORT_INDICATOR_BASE_IDX 1
+#define regPCIE_PGMST_CNTL 0x20130
+#define regPCIE_PGMST_CNTL_BASE_IDX 1
+#define regPCIE_PGSLV_CNTL 0x20131
+#define regPCIE_PGSLV_CNTL_BASE_IDX 1
+#define regLC_CPM_CONTROL_0 0x20133
+#define regLC_CPM_CONTROL_0_BASE_IDX 1
+#define regLC_CPM_CONTROL_1 0x20134
+#define regLC_CPM_CONTROL_1_BASE_IDX 1
+#define regPCIE_RXMARGIN_CONTROL_CAPABILITIES 0x20135
+#define regPCIE_RXMARGIN_CONTROL_CAPABILITIES_BASE_IDX 1
+#define regPCIE_RXMARGIN_1_SETTINGS 0x20136
+#define regPCIE_RXMARGIN_1_SETTINGS_BASE_IDX 1
+#define regPCIE_RXMARGIN_2_SETTINGS 0x20137
+#define regPCIE_RXMARGIN_2_SETTINGS_BASE_IDX 1
+#define regPCIE_LC_DEBUG_CNTL 0x20139
+#define regPCIE_LC_DEBUG_CNTL_BASE_IDX 1
+#define regSMU_INT_PIN_SHARING_PORT_INDICATOR_TWO 0x2013a
+#define regSMU_INT_PIN_SHARING_PORT_INDICATOR_TWO_BASE_IDX 1
+#define regPCIE_LC_DESKEW_CNTL 0x20150
+#define regPCIE_LC_DESKEW_CNTL_BASE_IDX 1
+#define regPCIE_TX_LAST_TLP0 0x20180
+#define regPCIE_TX_LAST_TLP0_BASE_IDX 1
+#define regPCIE_TX_LAST_TLP1 0x20181
+#define regPCIE_TX_LAST_TLP1_BASE_IDX 1
+#define regPCIE_TX_LAST_TLP2 0x20182
+#define regPCIE_TX_LAST_TLP2_BASE_IDX 1
+#define regPCIE_TX_LAST_TLP3 0x20183
+#define regPCIE_TX_LAST_TLP3_BASE_IDX 1
+#define regPCIE_TX_TRACKING_ADDR_LO 0x20184
+#define regPCIE_TX_TRACKING_ADDR_LO_BASE_IDX 1
+#define regPCIE_TX_TRACKING_ADDR_HI 0x20185
+#define regPCIE_TX_TRACKING_ADDR_HI_BASE_IDX 1
+#define regPCIE_TX_TRACKING_CTRL_STATUS 0x20186
+#define regPCIE_TX_TRACKING_CTRL_STATUS_BASE_IDX 1
+#define regPCIE_TX_CTRL_4 0x2018b
+#define regPCIE_TX_CTRL_4_BASE_IDX 1
+#define regPCIE_TX_STATUS 0x20194
+#define regPCIE_TX_STATUS_BASE_IDX 1
+#define regPCIE_TX_F0_ATTR_CNTL 0x2019c
+#define regPCIE_TX_F0_ATTR_CNTL_BASE_IDX 1
+#define regPCIE_TX_SWUS_ATTR_CNTL 0x2019d
+#define regPCIE_TX_SWUS_ATTR_CNTL_BASE_IDX 1
+#define regPCIE_BW_BY_UNITID 0x201c0
+#define regPCIE_BW_BY_UNITID_BASE_IDX 1
+#define regPCIE_MST_CTRL_1 0x201c4
+#define regPCIE_MST_CTRL_1_BASE_IDX 1
+#define regPCIE_HIP_REG0 0x201e0
+#define regPCIE_HIP_REG0_BASE_IDX 1
+#define regPCIE_HIP_REG1 0x201e1
+#define regPCIE_HIP_REG1_BASE_IDX 1
+#define regPCIE_HIP_REG2 0x201e2
+#define regPCIE_HIP_REG2_BASE_IDX 1
+#define regPCIE_HIP_REG3 0x201e3
+#define regPCIE_HIP_REG3_BASE_IDX 1
+#define regPCIE_HIP_REG4 0x201e4
+#define regPCIE_HIP_REG4_BASE_IDX 1
+#define regPCIE_HIP_REG5 0x201e5
+#define regPCIE_HIP_REG5_BASE_IDX 1
+#define regPCIE_HIP_REG6 0x201e6
+#define regPCIE_HIP_REG6_BASE_IDX 1
+#define regPCIE_HIP_REG7 0x201e7
+#define regPCIE_HIP_REG7_BASE_IDX 1
+#define regPCIE_HIP_REG8 0x201e8
+#define regPCIE_HIP_REG8_BASE_IDX 1
+#define regPCIE_PERF_CNTL_TXCLK7 0x20222
+#define regPCIE_PERF_CNTL_TXCLK7_BASE_IDX 1
+#define regPCIE_PERF_COUNT0_TXCLK7 0x20223
+#define regPCIE_PERF_COUNT0_TXCLK7_BASE_IDX 1
+#define regPCIE_PERF_COUNT1_TXCLK7 0x20224
+#define regPCIE_PERF_COUNT1_TXCLK7_BASE_IDX 1
+#define regPCIE_PERF_CNTL_TXCLK8 0x20225
+#define regPCIE_PERF_CNTL_TXCLK8_BASE_IDX 1
+#define regPCIE_PERF_COUNT0_TXCLK8 0x20226
+#define regPCIE_PERF_COUNT0_TXCLK8_BASE_IDX 1
+#define regPCIE_PERF_COUNT1_TXCLK8 0x20227
+#define regPCIE_PERF_COUNT1_TXCLK8_BASE_IDX 1
+#define regPCIE_PERF_CNTL_TXCLK9 0x20228
+#define regPCIE_PERF_CNTL_TXCLK9_BASE_IDX 1
+#define regPCIE_PERF_COUNT0_TXCLK9 0x20229
+#define regPCIE_PERF_COUNT0_TXCLK9_BASE_IDX 1
+#define regPCIE_PERF_COUNT1_TXCLK9 0x2022a
+#define regPCIE_PERF_COUNT1_TXCLK9_BASE_IDX 1
+#define regPCIE_PERF_CNTL_TXCLK10 0x2022b
+#define regPCIE_PERF_CNTL_TXCLK10_BASE_IDX 1
+#define regPCIE_PERF_COUNT0_TXCLK10 0x2022c
+#define regPCIE_PERF_COUNT0_TXCLK10_BASE_IDX 1
+#define regPCIE_PERF_COUNT1_TXCLK10 0x2022d
+#define regPCIE_PERF_COUNT1_TXCLK10_BASE_IDX 1
+#define regPCIE_LANE_ERROR_COUNTERS_0 0x2025e
+#define regPCIE_LANE_ERROR_COUNTERS_0_BASE_IDX 1
+#define regPCIE_LANE_ERROR_COUNTERS_1 0x2025f
+#define regPCIE_LANE_ERROR_COUNTERS_1_BASE_IDX 1
+#define regPCIE_LANE_ERROR_COUNTERS_2 0x20260
+#define regPCIE_LANE_ERROR_COUNTERS_2_BASE_IDX 1
+#define regPCIE_LANE_ERROR_COUNTERS_3 0x20261
+#define regPCIE_LANE_ERROR_COUNTERS_3_BASE_IDX 1
+#define regSMU_PCIE_FENCED1_REG 0x20ffe
+#define regSMU_PCIE_FENCED1_REG_BASE_IDX 1
+#define regSMU_PCIE_FENCED2_REG 0x20fff
+#define regSMU_PCIE_FENCED2_REG_BASE_IDX 1
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/pcie/pcie_6_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/pcie/pcie_6_1_0_sh_mask.h
new file mode 100644
index 000000000000..6f09d00dff9d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/pcie/pcie_6_1_0_sh_mask.h
@@ -0,0 +1,4250 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _pcie_6_1_0_SH_MASK_HEADER
+#define _pcie_6_1_0_SH_MASK_HEADER
+
+
+// addressBlock: pcie_container_pcs0_pcie_lcu_pcie_pcs_prime_pcie_master_x1_xx16_pcs_prime_dir
+//DXIO_HWDID
+#define DXIO_HWDID__Hardware_Revision__SHIFT 0x0
+#define DXIO_HWDID__Hardware_Minor_Version_Number__SHIFT 0x6
+#define DXIO_HWDID__Hardware_Major_Version_Number__SHIFT 0xd
+#define DXIO_HWDID__Hardware_Revision_MASK 0x0000003FL
+#define DXIO_HWDID__Hardware_Minor_Version_Number_MASK 0x00001FC0L
+#define DXIO_HWDID__Hardware_Major_Version_Number_MASK 0x000FE000L
+//DXIO_LINKAGE_LANEGRP
+#define DXIO_LINKAGE_LANEGRP__Lane_Group_Indirect_Accesses__SHIFT 0x0
+#define DXIO_LINKAGE_LANEGRP__Lane_Group_Aperture_Size__SHIFT 0x2
+#define DXIO_LINKAGE_LANEGRP__Index_Offset__SHIFT 0x6
+#define DXIO_LINKAGE_LANEGRP__Presence__SHIFT 0x14
+#define DXIO_LINKAGE_LANEGRP__Lane_Group_Indirect_Accesses_MASK 0x00000001L
+#define DXIO_LINKAGE_LANEGRP__Lane_Group_Aperture_Size_MASK 0x0000003CL
+#define DXIO_LINKAGE_LANEGRP__Index_Offset_MASK 0x000FFFC0L
+#define DXIO_LINKAGE_LANEGRP__Presence_MASK 0x0FF00000L
+//DXIO_LINKAGE_KPDMX
+#define DXIO_LINKAGE_KPDMX__Overlay__SHIFT 0x1
+#define DXIO_LINKAGE_KPDMX__Base_Offset__SHIFT 0x6
+#define DXIO_LINKAGE_KPDMX__Presence__SHIFT 0x14
+#define DXIO_LINKAGE_KPDMX__Overlay_MASK 0x00000002L
+#define DXIO_LINKAGE_KPDMX__Base_Offset_MASK 0x000FFFC0L
+#define DXIO_LINKAGE_KPDMX__Presence_MASK 0x0FF00000L
+//DXIO_LINKAGE_KPMX
+//DXIO_LINKAGE_KPFIFO
+//DXIO_LINKAGE_KPNP
+//MAC_CAPABILITIES1
+#define MAC_CAPABILITIES1__Number_of_Lanes__SHIFT 0x0
+#define MAC_CAPABILITIES1__Number_of_Engines__SHIFT 0x8
+#define MAC_CAPABILITIES1__Number_of_Lanes_MASK 0x0000003FL
+#define MAC_CAPABILITIES1__Number_of_Engines_MASK 0x00003F00L
+//MAC_CAPABILITIES2
+#define MAC_CAPABILITIES2__reserved__SHIFT 0x0
+#define MAC_CAPABILITIES2__reserved_MASK 0x00000001L
+
+
+// addressBlock: pcie_container_pcie0_pswuscfg0_cfgdecp
+//COMMAND
+#define COMMAND__IO_ACCESS_EN__SHIFT 0x0
+#define COMMAND__MEM_ACCESS_EN__SHIFT 0x1
+#define COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3
+#define COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4
+#define COMMAND__PAL_SNOOP_EN__SHIFT 0x5
+#define COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6
+#define COMMAND__AD_STEPPING__SHIFT 0x7
+#define COMMAND__SERR_EN__SHIFT 0x8
+#define COMMAND__FAST_B2B_EN__SHIFT 0x9
+#define COMMAND__INT_DIS__SHIFT 0xa
+#define COMMAND__IO_ACCESS_EN_MASK 0x0001L
+#define COMMAND__MEM_ACCESS_EN_MASK 0x0002L
+#define COMMAND__BUS_MASTER_EN_MASK 0x0004L
+#define COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L
+#define COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L
+#define COMMAND__PAL_SNOOP_EN_MASK 0x0020L
+#define COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L
+#define COMMAND__AD_STEPPING_MASK 0x0080L
+#define COMMAND__SERR_EN_MASK 0x0100L
+#define COMMAND__FAST_B2B_EN_MASK 0x0200L
+#define COMMAND__INT_DIS_MASK 0x0400L
+//STATUS
+#define STATUS__IMMEDIATE_READINESS__SHIFT 0x0
+#define STATUS__INT_STATUS__SHIFT 0x3
+#define STATUS__CAP_LIST__SHIFT 0x4
+#define STATUS__PCI_66_CAP__SHIFT 0x5
+#define STATUS__FAST_BACK_CAPABLE__SHIFT 0x7
+#define STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8
+#define STATUS__DEVSEL_TIMING__SHIFT 0x9
+#define STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb
+#define STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc
+#define STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd
+#define STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe
+#define STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf
+#define STATUS__IMMEDIATE_READINESS_MASK 0x0001L
+#define STATUS__INT_STATUS_MASK 0x0008L
+#define STATUS__CAP_LIST_MASK 0x0010L
+#define STATUS__PCI_66_CAP_MASK 0x0020L
+#define STATUS__FAST_BACK_CAPABLE_MASK 0x0080L
+#define STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L
+#define STATUS__DEVSEL_TIMING_MASK 0x0600L
+#define STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L
+#define STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L
+#define STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L
+#define STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L
+#define STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L
+//LATENCY
+#define LATENCY__LATENCY_TIMER__SHIFT 0x0
+#define LATENCY__LATENCY_TIMER_MASK 0xFFL
+//HEADER
+#define HEADER__HEADER_TYPE__SHIFT 0x0
+#define HEADER__DEVICE_TYPE__SHIFT 0x7
+#define HEADER__HEADER_TYPE_MASK 0x7FL
+#define HEADER__DEVICE_TYPE_MASK 0x80L
+//PCIE_LANE_ERROR_STATUS
+#define PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0
+#define PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL
+//PCIE_LANE_0_EQUALIZATION_CNTL
+#define PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_1_EQUALIZATION_CNTL
+#define PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_2_EQUALIZATION_CNTL
+#define PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_3_EQUALIZATION_CNTL
+#define PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_4_EQUALIZATION_CNTL
+#define PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_5_EQUALIZATION_CNTL
+#define PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_6_EQUALIZATION_CNTL
+#define PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_7_EQUALIZATION_CNTL
+#define PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_8_EQUALIZATION_CNTL
+#define PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_9_EQUALIZATION_CNTL
+#define PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_10_EQUALIZATION_CNTL
+#define PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_11_EQUALIZATION_CNTL
+#define PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_12_EQUALIZATION_CNTL
+#define PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_13_EQUALIZATION_CNTL
+#define PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_14_EQUALIZATION_CNTL
+#define PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LANE_15_EQUALIZATION_CNTL
+#define PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0
+#define PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4
+#define PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8
+#define PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc
+#define PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL
+#define PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L
+#define PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L
+#define PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L
+//PCIE_LTR_ENH_CAP_LIST
+#define PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//PCIE_LTR_CAP
+#define PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0
+#define PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa
+#define PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10
+#define PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a
+#define PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL
+#define PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L
+#define PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L
+#define PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L
+//PCIE_L1_PM_SUB_CAP_LIST
+#define PCIE_L1_PM_SUB_CAP_LIST__CAP_ID__SHIFT 0x0
+#define PCIE_L1_PM_SUB_CAP_LIST__CAP_VER__SHIFT 0x10
+#define PCIE_L1_PM_SUB_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define PCIE_L1_PM_SUB_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define PCIE_L1_PM_SUB_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define PCIE_L1_PM_SUB_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+//PCIE_L1_PM_SUB_CAP
+#define PCIE_L1_PM_SUB_CAP__PCI_PM_L1_2_SUPPORTED__SHIFT 0x0
+#define PCIE_L1_PM_SUB_CAP__PCI_PM_L1_1_SUPPORTED__SHIFT 0x1
+#define PCIE_L1_PM_SUB_CAP__ASPM_L1_2_SUPPORTED__SHIFT 0x2
+#define PCIE_L1_PM_SUB_CAP__ASPM_L1_1_SUPPORTED__SHIFT 0x3
+#define PCIE_L1_PM_SUB_CAP__L1_PM_SUB_SUPPORTED__SHIFT 0x4
+#define PCIE_L1_PM_SUB_CAP__LINK_ACTIVATION_SUPPORTED__SHIFT 0x5
+#define PCIE_L1_PM_SUB_CAP__PORT_CM_RESTORE_TIME__SHIFT 0x8
+#define PCIE_L1_PM_SUB_CAP__PORT_T_POWER_ON_SCALE__SHIFT 0x10
+#define PCIE_L1_PM_SUB_CAP__PORT_T_POWER_ON_VALUE__SHIFT 0x13
+#define PCIE_L1_PM_SUB_CAP__PCI_PM_L1_2_SUPPORTED_MASK 0x00000001L
+#define PCIE_L1_PM_SUB_CAP__PCI_PM_L1_1_SUPPORTED_MASK 0x00000002L
+#define PCIE_L1_PM_SUB_CAP__ASPM_L1_2_SUPPORTED_MASK 0x00000004L
+#define PCIE_L1_PM_SUB_CAP__ASPM_L1_1_SUPPORTED_MASK 0x00000008L
+#define PCIE_L1_PM_SUB_CAP__L1_PM_SUB_SUPPORTED_MASK 0x00000010L
+#define PCIE_L1_PM_SUB_CAP__LINK_ACTIVATION_SUPPORTED_MASK 0x00000020L
+#define PCIE_L1_PM_SUB_CAP__PORT_CM_RESTORE_TIME_MASK 0x0000FF00L
+#define PCIE_L1_PM_SUB_CAP__PORT_T_POWER_ON_SCALE_MASK 0x00030000L
+#define PCIE_L1_PM_SUB_CAP__PORT_T_POWER_ON_VALUE_MASK 0x00F80000L
+//PCIE_L1_PM_SUB_CNTL
+#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN__SHIFT 0x0
+#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN__SHIFT 0x1
+#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN__SHIFT 0x2
+#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN__SHIFT 0x3
+#define PCIE_L1_PM_SUB_CNTL__LINK_ACTIVATION_INTERRUPT_EN__SHIFT 0x4
+#define PCIE_L1_PM_SUB_CNTL__LINK_ACTIVATION_CNTL__SHIFT 0x5
+#define PCIE_L1_PM_SUB_CNTL__COMMON_MODE_RESTORE_TIME__SHIFT 0x8
+#define PCIE_L1_PM_SUB_CNTL__LTR_L1_2_THRESHOLD_VALUE__SHIFT 0x10
+#define PCIE_L1_PM_SUB_CNTL__LTR_L1_2_THRESHOLD_SCALE__SHIFT 0x1d
+#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK 0x00000001L
+#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK 0x00000002L
+#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK 0x00000004L
+#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK 0x00000008L
+#define PCIE_L1_PM_SUB_CNTL__LINK_ACTIVATION_INTERRUPT_EN_MASK 0x00000010L
+#define PCIE_L1_PM_SUB_CNTL__LINK_ACTIVATION_CNTL_MASK 0x00000020L
+#define PCIE_L1_PM_SUB_CNTL__COMMON_MODE_RESTORE_TIME_MASK 0x0000FF00L
+#define PCIE_L1_PM_SUB_CNTL__LTR_L1_2_THRESHOLD_VALUE_MASK 0x03FF0000L
+#define PCIE_L1_PM_SUB_CNTL__LTR_L1_2_THRESHOLD_SCALE_MASK 0xE0000000L
+//PCIE_L1_PM_SUB_CNTL2
+#define PCIE_L1_PM_SUB_CNTL2__T_POWER_ON_SCALE__SHIFT 0x0
+#define PCIE_L1_PM_SUB_CNTL2__T_POWER_ON_VALUE__SHIFT 0x3
+#define PCIE_L1_PM_SUB_CNTL2__T_POWER_ON_SCALE_MASK 0x00000003L
+#define PCIE_L1_PM_SUB_CNTL2__T_POWER_ON_VALUE_MASK 0x000000F8L
+//PCIE_MARGINING_ENH_CAP_LIST
+#define PCIE_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0
+#define PCIE_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10
+#define PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14
+#define PCIE_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL
+#define PCIE_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L
+#define PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L
+
+
+// addressBlock: pcie_container_pcie0_pswusp0_pciedir_p
+//PCIEP_RESERVED
+#define PCIEP_RESERVED__RESERVED__SHIFT 0x0
+#define PCIEP_RESERVED__RESERVED_MASK 0xFFFFFFFFL
+//PCIEP_SCRATCH
+#define PCIEP_SCRATCH__PCIEP_SCRATCH__SHIFT 0x0
+#define PCIEP_SCRATCH__PCIEP_SCRATCH_MASK 0xFFFFFFFFL
+//PCIEP_PORT_CNTL
+#define PCIEP_PORT_CNTL__SLV_PORT_REQ_EN__SHIFT 0x0
+#define PCIEP_PORT_CNTL__CI_SNOOP_OVERRIDE__SHIFT 0x1
+#define PCIEP_PORT_CNTL__HOTPLUG_MSG_EN__SHIFT 0x2
+#define PCIEP_PORT_CNTL__NATIVE_PME_EN__SHIFT 0x3
+#define PCIEP_PORT_CNTL__PWR_FAULT_EN__SHIFT 0x4
+#define PCIEP_PORT_CNTL__PMI_BM_DIS__SHIFT 0x5
+#define PCIEP_PORT_CNTL__PME_EN_HW_DEBUG__SHIFT 0x6
+#define PCIEP_PORT_CNTL__PME_MODE_HW_DEBUG__SHIFT 0x7
+#define PCIEP_PORT_CNTL__CI_SLV_CPL_STATIC_ALLOC_LIMIT_S__SHIFT 0x8
+#define PCIEP_PORT_CNTL__CI_PRIV_MAX_CPL_PAYLOAD_SIZE__SHIFT 0x12
+#define PCIEP_PORT_CNTL__CI_SLV_RSP_POISONED_UR_MODE__SHIFT 0x18
+#define PCIEP_PORT_CNTL__CI_MAX_CPL_PAYLOAD_SIZE_MODE__SHIFT 0x1a
+#define PCIEP_PORT_CNTL__SLV_PORT_REQ_EN_MASK 0x00000001L
+#define PCIEP_PORT_CNTL__CI_SNOOP_OVERRIDE_MASK 0x00000002L
+#define PCIEP_PORT_CNTL__HOTPLUG_MSG_EN_MASK 0x00000004L
+#define PCIEP_PORT_CNTL__NATIVE_PME_EN_MASK 0x00000008L
+#define PCIEP_PORT_CNTL__PWR_FAULT_EN_MASK 0x00000010L
+#define PCIEP_PORT_CNTL__PMI_BM_DIS_MASK 0x00000020L
+#define PCIEP_PORT_CNTL__PME_EN_HW_DEBUG_MASK 0x00000040L
+#define PCIEP_PORT_CNTL__PME_MODE_HW_DEBUG_MASK 0x00000080L
+#define PCIEP_PORT_CNTL__CI_SLV_CPL_STATIC_ALLOC_LIMIT_S_MASK 0x0003FF00L
+#define PCIEP_PORT_CNTL__CI_PRIV_MAX_CPL_PAYLOAD_SIZE_MASK 0x001C0000L
+#define PCIEP_PORT_CNTL__CI_SLV_RSP_POISONED_UR_MODE_MASK 0x03000000L
+#define PCIEP_PORT_CNTL__CI_MAX_CPL_PAYLOAD_SIZE_MODE_MASK 0x0C000000L
+//PCIE_TX_REQUESTER_ID
+#define PCIE_TX_REQUESTER_ID__TX_SWUS_REQUESTER_ID_FUNCTION__SHIFT 0x10
+#define PCIE_TX_REQUESTER_ID__TX_SWUS_REQUESTER_ID_DEVICE__SHIFT 0x13
+#define PCIE_TX_REQUESTER_ID__TX_SWUS_REQUESTER_ID_BUS__SHIFT 0x18
+#define PCIE_TX_REQUESTER_ID__TX_SWUS_REQUESTER_ID_FUNCTION_MASK 0x00070000L
+#define PCIE_TX_REQUESTER_ID__TX_SWUS_REQUESTER_ID_DEVICE_MASK 0x00F80000L
+#define PCIE_TX_REQUESTER_ID__TX_SWUS_REQUESTER_ID_BUS_MASK 0xFF000000L
+//PCIE_P_PORT_LANE_STATUS
+#define PCIE_P_PORT_LANE_STATUS__PORT_LANE_REVERSAL__SHIFT 0x0
+#define PCIE_P_PORT_LANE_STATUS__PHY_LINK_WIDTH__SHIFT 0x1
+#define PCIE_P_PORT_LANE_STATUS__PORT_LANE_REVERSAL_MASK 0x00000001L
+#define PCIE_P_PORT_LANE_STATUS__PHY_LINK_WIDTH_MASK 0x0000007EL
+//PCIE_ERR_CNTL
+#define PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0
+#define PCIE_ERR_CNTL__STRAP_FIRST_RCVD_ERR_LOG__SHIFT 0x1
+#define PCIE_ERR_CNTL__RX_DROP_ECRC_FAILURES__SHIFT 0x2
+#define PCIE_ERR_CNTL__RX_GENERATE_LCRC_ERR__SHIFT 0x5
+#define PCIE_ERR_CNTL__RX_GENERATE_POIS_TLP__SHIFT 0x6
+#define PCIE_ERR_CNTL__RX_GENERATE_ECRC_ERR__SHIFT 0x7
+#define PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0xb
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_RCV_ERR__SHIFT 0xc
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_REPLAY_NUM_ROLLOVER__SHIFT 0xd
+#define PCIE_ERR_CNTL__CI_P_SLV_BUF_RD_HALT_STATUS__SHIFT 0xe
+#define PCIE_ERR_CNTL__CI_NP_SLV_BUF_RD_HALT_STATUS__SHIFT 0xf
+#define PCIE_ERR_CNTL__CI_SLV_BUF_HALT_RESET__SHIFT 0x10
+#define PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11
+#define PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_BAD_DLLP__SHIFT 0x13
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_BAD_TLP__SHIFT 0x14
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_INTERNAL_ERR__SHIFT 0x15
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_REPLAY_TIMER_TIMEOUT__SHIFT 0x16
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_CORR_INT_ERR__SHIFT 0x17
+#define PCIE_ERR_CNTL__PRIV_SURP_DIS_VEC__SHIFT 0x18
+#define PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define PCIE_ERR_CNTL__STRAP_FIRST_RCVD_ERR_LOG_MASK 0x00000002L
+#define PCIE_ERR_CNTL__RX_DROP_ECRC_FAILURES_MASK 0x00000004L
+#define PCIE_ERR_CNTL__RX_GENERATE_LCRC_ERR_MASK 0x00000020L
+#define PCIE_ERR_CNTL__RX_GENERATE_POIS_TLP_MASK 0x00000040L
+#define PCIE_ERR_CNTL__RX_GENERATE_ECRC_ERR_MASK 0x00000080L
+#define PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x00000800L
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_RCV_ERR_MASK 0x00001000L
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_REPLAY_NUM_ROLLOVER_MASK 0x00002000L
+#define PCIE_ERR_CNTL__CI_P_SLV_BUF_RD_HALT_STATUS_MASK 0x00004000L
+#define PCIE_ERR_CNTL__CI_NP_SLV_BUF_RD_HALT_STATUS_MASK 0x00008000L
+#define PCIE_ERR_CNTL__CI_SLV_BUF_HALT_RESET_MASK 0x00010000L
+#define PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L
+#define PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_BAD_DLLP_MASK 0x00080000L
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_BAD_TLP_MASK 0x00100000L
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_INTERNAL_ERR_MASK 0x00200000L
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_REPLAY_TIMER_TIMEOUT_MASK 0x00400000L
+#define PCIE_ERR_CNTL__AER_PRIV_MASK_CORR_INT_ERR_MASK 0x00800000L
+#define PCIE_ERR_CNTL__PRIV_SURP_DIS_VEC_MASK 0xFF000000L
+//PCIE_RX_CNTL
+#define PCIE_RX_CNTL__RX_IGNORE_IO_ERR__SHIFT 0x0
+#define PCIE_RX_CNTL__RX_IGNORE_BE_ERR__SHIFT 0x1
+#define PCIE_RX_CNTL__RX_IGNORE_MSG_ERR__SHIFT 0x2
+#define PCIE_RX_CNTL__RX_IGNORE_CRC_ERR__SHIFT 0x3
+#define PCIE_RX_CNTL__RX_IGNORE_CFG_ERR__SHIFT 0x4
+#define PCIE_RX_CNTL__RX_IGNORE_CPL_ERR__SHIFT 0x5
+#define PCIE_RX_CNTL__RX_IGNORE_EP_ERR__SHIFT 0x6
+#define PCIE_RX_CNTL__RX_IGNORE_LEN_MISMATCH_ERR__SHIFT 0x7
+#define PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8
+#define PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9
+#define PCIE_RX_CNTL__RX_IGNORE_CFG_UR__SHIFT 0xa
+#define PCIE_RX_CNTL__RX_IGNORE_IO_UR__SHIFT 0xb
+#define PCIE_RX_CNTL__RX_IGNORE_AT_ERR__SHIFT 0xc
+#define PCIE_RX_CNTL__RX_NAK_IF_FIFO_FULL__SHIFT 0xd
+#define PCIE_RX_CNTL__RX_GEN_ONE_NAK__SHIFT 0xe
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_L23_MODE__SHIFT 0xf
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT__SHIFT 0x10
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MODE__SHIFT 0x13
+#define PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14
+#define PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15
+#define PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16
+#define PCIE_RX_CNTL__RX_IGNORE_CPLPREFIX_ERR__SHIFT 0x17
+#define PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18
+#define PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19
+#define PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a
+#define PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b
+#define PCIE_RX_CNTL__CTO_MASK_PRIV__SHIFT 0x1c
+#define PCIE_RX_CNTL__RX_SWAP_RTRC_TO_BFRC_ENABLE__SHIFT 0x1d
+#define PCIE_RX_CNTL__DPC_PRIV_TRIGGER_ON_SURPDN_EN__SHIFT 0x1e
+#define PCIE_RX_CNTL__DPC_PRIV_TRIGGER_3_EN__SHIFT 0x1f
+#define PCIE_RX_CNTL__RX_IGNORE_IO_ERR_MASK 0x00000001L
+#define PCIE_RX_CNTL__RX_IGNORE_BE_ERR_MASK 0x00000002L
+#define PCIE_RX_CNTL__RX_IGNORE_MSG_ERR_MASK 0x00000004L
+#define PCIE_RX_CNTL__RX_IGNORE_CRC_ERR_MASK 0x00000008L
+#define PCIE_RX_CNTL__RX_IGNORE_CFG_ERR_MASK 0x00000010L
+#define PCIE_RX_CNTL__RX_IGNORE_CPL_ERR_MASK 0x00000020L
+#define PCIE_RX_CNTL__RX_IGNORE_EP_ERR_MASK 0x00000040L
+#define PCIE_RX_CNTL__RX_IGNORE_LEN_MISMATCH_ERR_MASK 0x00000080L
+#define PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L
+#define PCIE_RX_CNTL__RX_IGNORE_CFG_UR_MASK 0x00000400L
+#define PCIE_RX_CNTL__RX_IGNORE_IO_UR_MASK 0x00000800L
+#define PCIE_RX_CNTL__RX_IGNORE_AT_ERR_MASK 0x00001000L
+#define PCIE_RX_CNTL__RX_NAK_IF_FIFO_FULL_MASK 0x00002000L
+#define PCIE_RX_CNTL__RX_GEN_ONE_NAK_MASK 0x00004000L
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_L23_MODE_MASK 0x00008000L
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MASK 0x00070000L
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MODE_MASK 0x00080000L
+#define PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L
+#define PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L
+#define PCIE_RX_CNTL__RX_IGNORE_CPLPREFIX_ERR_MASK 0x00800000L
+#define PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L
+#define PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L
+#define PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L
+#define PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L
+#define PCIE_RX_CNTL__CTO_MASK_PRIV_MASK 0x10000000L
+#define PCIE_RX_CNTL__RX_SWAP_RTRC_TO_BFRC_ENABLE_MASK 0x20000000L
+#define PCIE_RX_CNTL__DPC_PRIV_TRIGGER_ON_SURPDN_EN_MASK 0x40000000L
+#define PCIE_RX_CNTL__DPC_PRIV_TRIGGER_3_EN_MASK 0x80000000L
+//PCIE_RX_EXPECTED_SEQNUM
+#define PCIE_RX_EXPECTED_SEQNUM__RX_EXPECTED_SEQNUM__SHIFT 0x0
+#define PCIE_RX_EXPECTED_SEQNUM__RX_EXPECTED_SEQNUM_MASK 0x00000FFFL
+//PCIE_RX_VENDOR_SPECIFIC
+#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_DATA__SHIFT 0x0
+#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_STATUS__SHIFT 0x18
+#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_DATA_MASK 0x00FFFFFFL
+#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_STATUS_MASK 0x01000000L
+//PCIE_RX_CNTL3
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMRDPASID_UR__SHIFT 0x0
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMWRPASID_UR__SHIFT 0x1
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_PRGRESPMSG_UR__SHIFT 0x2
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVREQ_UR__SHIFT 0x3
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVCPLPASID_UR__SHIFT 0x4
+#define PCIE_RX_CNTL3__RX_ENH_ATOMIC_EN__SHIFT 0x8
+#define PCIE_RX_CNTL3__RX_INGRESS_POISONED_BLOCKING_EN__SHIFT 0x9
+#define PCIE_RX_CNTL3__RX_SWAP_RTRC_TO_BFRC_HDR_ONLY_ENABLE__SHIFT 0xa
+#define PCIE_RX_CNTL3__RX_PRIV_POISON_EGRESS_BLOCK_EN__SHIFT 0xb
+#define PCIE_RX_CNTL3__RX_PH_SUPPRESS_MASK__SHIFT 0xc
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMRDPASID_UR_MASK 0x00000001L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMWRPASID_UR_MASK 0x00000002L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_PRGRESPMSG_UR_MASK 0x00000004L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVREQ_UR_MASK 0x00000008L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVCPLPASID_UR_MASK 0x00000010L
+#define PCIE_RX_CNTL3__RX_ENH_ATOMIC_EN_MASK 0x00000100L
+#define PCIE_RX_CNTL3__RX_INGRESS_POISONED_BLOCKING_EN_MASK 0x00000200L
+#define PCIE_RX_CNTL3__RX_SWAP_RTRC_TO_BFRC_HDR_ONLY_ENABLE_MASK 0x00000400L
+#define PCIE_RX_CNTL3__RX_PRIV_POISON_EGRESS_BLOCK_EN_MASK 0x00000800L
+#define PCIE_RX_CNTL3__RX_PH_SUPPRESS_MASK_MASK 0x0000F000L
+//PCIE_RX_CREDITS_ALLOCATED_P
+#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PD__SHIFT 0x0
+#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PH__SHIFT 0x10
+#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PD_MASK 0x00000FFFL
+#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PH_MASK 0x00FF0000L
+//PCIE_RX_CREDITS_ALLOCATED_NP
+#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPD__SHIFT 0x0
+#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPH__SHIFT 0x10
+#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPD_MASK 0x00000FFFL
+#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPH_MASK 0x00FF0000L
+//PCIE_RX_CREDITS_ALLOCATED_CPL
+#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLD__SHIFT 0x0
+#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLH__SHIFT 0x10
+#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLD_MASK 0x00000FFFL
+#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLH_MASK 0x00FF0000L
+//PCIEP_ERROR_INJECT_PHYSICAL
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LANE_ERR__SHIFT 0x0
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_FRAMING_ERR__SHIFT 0x2
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_PARITY_IN_SKP__SHIFT 0x4
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_LFSR_IN_SKP__SHIFT 0x6
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LOOPBACK_UFLOW__SHIFT 0x8
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LOOPBACK_OFLOW__SHIFT 0xa
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_DESKEW_ERR__SHIFT 0xc
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_8B10B_DISPARITY_ERR__SHIFT 0xe
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_8B10B_DECODE_ERR__SHIFT 0x10
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_SKP_OS_ERROR__SHIFT 0x12
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_INV_OS_IDENTIFIER__SHIFT 0x14
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_SYNC_HEADER__SHIFT 0x16
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LANE_ERR_MASK 0x00000003L
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_FRAMING_ERR_MASK 0x0000000CL
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_PARITY_IN_SKP_MASK 0x00000030L
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_LFSR_IN_SKP_MASK 0x000000C0L
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LOOPBACK_UFLOW_MASK 0x00000300L
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LOOPBACK_OFLOW_MASK 0x00000C00L
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_DESKEW_ERR_MASK 0x00003000L
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_8B10B_DISPARITY_ERR_MASK 0x0000C000L
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_8B10B_DECODE_ERR_MASK 0x00030000L
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_SKP_OS_ERROR_MASK 0x000C0000L
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_INV_OS_IDENTIFIER_MASK 0x00300000L
+#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_SYNC_HEADER_MASK 0x00C00000L
+//PCIEP_ERROR_INJECT_TRANSACTION
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_FLOW_CTL_ERR__SHIFT 0x0
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_REPLAY_NUM_ROLLOVER__SHIFT 0x2
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_BAD_DLLP__SHIFT 0x4
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_BAD_TLP__SHIFT 0x6
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_UNSUPPORTED_REQ__SHIFT 0x8
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_ECRC_ERROR__SHIFT 0xa
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_MALFORMED_TLP__SHIFT 0xc
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_UNEXPECTED_CMPLT__SHIFT 0xe
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_COMPLETER_ABORT__SHIFT 0x10
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_COMPLETION_TIMEOUT__SHIFT 0x12
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_FLOW_CTL_ERR_MASK 0x00000003L
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_REPLAY_NUM_ROLLOVER_MASK 0x0000000CL
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_BAD_DLLP_MASK 0x00000030L
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_BAD_TLP_MASK 0x000000C0L
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_UNSUPPORTED_REQ_MASK 0x00000300L
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_ECRC_ERROR_MASK 0x00000C00L
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_MALFORMED_TLP_MASK 0x00003000L
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_UNEXPECTED_CMPLT_MASK 0x0000C000L
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_COMPLETER_ABORT_MASK 0x00030000L
+#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_COMPLETION_TIMEOUT_MASK 0x000C0000L
+//PCIEP_NAK_COUNTER
+#define PCIEP_NAK_COUNTER__RX_NUM_NAK_RECEIVED_PORT__SHIFT 0x0
+#define PCIEP_NAK_COUNTER__RX_NUM_NAK_GENERATED_PORT__SHIFT 0x10
+#define PCIEP_NAK_COUNTER__RX_NUM_NAK_RECEIVED_PORT_MASK 0x0000FFFFL
+#define PCIEP_NAK_COUNTER__RX_NUM_NAK_GENERATED_PORT_MASK 0xFFFF0000L
+//PCIE_LC_CNTL
+#define PCIE_LC_CNTL__LC_ADVANCE_SPEED_COMPL_ON_EVERY_COMPL_ENTRY__SHIFT 0x0
+#define PCIE_LC_CNTL__LC_DONT_ENTER_L23_IN_D0__SHIFT 0x1
+#define PCIE_LC_CNTL__LC_RESET_L_IDLE_COUNT_EN__SHIFT 0x2
+#define PCIE_LC_CNTL__LC_RESET_LINK__SHIFT 0x3
+#define PCIE_LC_CNTL__LC_16X_CLEAR_TX_PIPE__SHIFT 0x4
+#define PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT 0x8
+#define PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT 0xc
+#define PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT 0x10
+#define PCIE_LC_CNTL__LC_INC_N_FTS_EN__SHIFT 0x11
+#define PCIE_LC_CNTL__LC_LOOK_FOR_IDLE_IN_L1L23__SHIFT 0x12
+#define PCIE_LC_CNTL__LC_FACTOR_IN_EXT_SYNC__SHIFT 0x14
+#define PCIE_LC_CNTL__LC_WAIT_FOR_PM_ACK_DIS__SHIFT 0x15
+#define PCIE_LC_CNTL__LC_WAKE_FROM_L23__SHIFT 0x16
+#define PCIE_LC_CNTL__LC_L1_IMMEDIATE_ACK__SHIFT 0x17
+#define PCIE_LC_CNTL__LC_ASPM_TO_L1_DIS__SHIFT 0x18
+#define PCIE_LC_CNTL__LC_DELAY_COUNT__SHIFT 0x19
+#define PCIE_LC_CNTL__LC_DELAY_L0S_EXIT__SHIFT 0x1b
+#define PCIE_LC_CNTL__LC_DELAY_L1_EXIT__SHIFT 0x1c
+#define PCIE_LC_CNTL__LC_EXTEND_WAIT_FOR_EL_IDLE__SHIFT 0x1d
+#define PCIE_LC_CNTL__LC_ESCAPE_L1L23_EN__SHIFT 0x1e
+#define PCIE_LC_CNTL__LC_GATE_RCVR_IDLE__SHIFT 0x1f
+#define PCIE_LC_CNTL__LC_ADVANCE_SPEED_COMPL_ON_EVERY_COMPL_ENTRY_MASK 0x00000001L
+#define PCIE_LC_CNTL__LC_DONT_ENTER_L23_IN_D0_MASK 0x00000002L
+#define PCIE_LC_CNTL__LC_RESET_L_IDLE_COUNT_EN_MASK 0x00000004L
+#define PCIE_LC_CNTL__LC_RESET_LINK_MASK 0x00000008L
+#define PCIE_LC_CNTL__LC_16X_CLEAR_TX_PIPE_MASK 0x000000F0L
+#define PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK 0x00000F00L
+#define PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK 0x0000F000L
+#define PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK 0x00010000L
+#define PCIE_LC_CNTL__LC_INC_N_FTS_EN_MASK 0x00020000L
+#define PCIE_LC_CNTL__LC_LOOK_FOR_IDLE_IN_L1L23_MASK 0x000C0000L
+#define PCIE_LC_CNTL__LC_FACTOR_IN_EXT_SYNC_MASK 0x00100000L
+#define PCIE_LC_CNTL__LC_WAIT_FOR_PM_ACK_DIS_MASK 0x00200000L
+#define PCIE_LC_CNTL__LC_WAKE_FROM_L23_MASK 0x00400000L
+#define PCIE_LC_CNTL__LC_L1_IMMEDIATE_ACK_MASK 0x00800000L
+#define PCIE_LC_CNTL__LC_ASPM_TO_L1_DIS_MASK 0x01000000L
+#define PCIE_LC_CNTL__LC_DELAY_COUNT_MASK 0x06000000L
+#define PCIE_LC_CNTL__LC_DELAY_L0S_EXIT_MASK 0x08000000L
+#define PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK 0x10000000L
+#define PCIE_LC_CNTL__LC_EXTEND_WAIT_FOR_EL_IDLE_MASK 0x20000000L
+#define PCIE_LC_CNTL__LC_ESCAPE_L1L23_EN_MASK 0x40000000L
+#define PCIE_LC_CNTL__LC_GATE_RCVR_IDLE_MASK 0x80000000L
+//PCIE_LC_TRAINING_CNTL
+#define PCIE_LC_TRAINING_CNTL__LC_TRAINING_CNTL__SHIFT 0x0
+#define PCIE_LC_TRAINING_CNTL__LC_COMPLIANCE_RECEIVE__SHIFT 0x4
+#define PCIE_LC_TRAINING_CNTL__LC_LOOK_FOR_MORE_NON_MATCHING_TS1__SHIFT 0x5
+#define PCIE_LC_TRAINING_CNTL__LC_L0S_L1_TRAINING_CNTL_EN__SHIFT 0x6
+#define PCIE_LC_TRAINING_CNTL__LC_L1_LONG_WAKE_FIX_EN__SHIFT 0x7
+#define PCIE_LC_TRAINING_CNTL__LC_POWER_STATE__SHIFT 0x8
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_GO_TO_L0S_IF_L1_ARMED__SHIFT 0xb
+#define PCIE_LC_TRAINING_CNTL__LC_INIT_SPD_CHG_WITH_CSR_EN__SHIFT 0xc
+#define PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH__SHIFT 0xd
+#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_SETS_IN_RCFG__SHIFT 0xe
+#define PCIE_LC_TRAINING_CNTL__LC_HOT_RESET_QUICK_EXIT_EN__SHIFT 0xf
+#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_WAIT_FOR_SKP__SHIFT 0x10
+#define PCIE_LC_TRAINING_CNTL__LC_AUTONOMOUS_CHANGE_OFF__SHIFT 0x11
+#define PCIE_LC_TRAINING_CNTL__LC_UPCONFIGURE_CAP_OFF__SHIFT 0x12
+#define PCIE_LC_TRAINING_CNTL__LC_HW_LINK_DIS_EN__SHIFT 0x13
+#define PCIE_LC_TRAINING_CNTL__LC_LINK_DIS_BY_HW__SHIFT 0x14
+#define PCIE_LC_TRAINING_CNTL__LC_STATIC_TX_PIPE_COUNT_EN__SHIFT 0x15
+#define PCIE_LC_TRAINING_CNTL__LC_ASPM_L1_NAK_TIMER_SEL__SHIFT 0x16
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_R_SPEED__SHIFT 0x18
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_TEST__SHIFT 0x19
+#define PCIE_LC_TRAINING_CNTL__LC_RESET_ASPM_L1_NAK_TIMER__SHIFT 0x1a
+#define PCIE_LC_TRAINING_CNTL__LC_SHORT_RCFG_TIMEOUT__SHIFT 0x1b
+#define PCIE_LC_TRAINING_CNTL__LC_ALLOW_TX_L1_CONTROL__SHIFT 0x1c
+#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_EQ_REQ_TIME__SHIFT 0x1d
+#define PCIE_LC_TRAINING_CNTL__LC_TRAINING_CNTL_MASK 0x0000000FL
+#define PCIE_LC_TRAINING_CNTL__LC_COMPLIANCE_RECEIVE_MASK 0x00000010L
+#define PCIE_LC_TRAINING_CNTL__LC_LOOK_FOR_MORE_NON_MATCHING_TS1_MASK 0x00000020L
+#define PCIE_LC_TRAINING_CNTL__LC_L0S_L1_TRAINING_CNTL_EN_MASK 0x00000040L
+#define PCIE_LC_TRAINING_CNTL__LC_L1_LONG_WAKE_FIX_EN_MASK 0x00000080L
+#define PCIE_LC_TRAINING_CNTL__LC_POWER_STATE_MASK 0x00000700L
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_GO_TO_L0S_IF_L1_ARMED_MASK 0x00000800L
+#define PCIE_LC_TRAINING_CNTL__LC_INIT_SPD_CHG_WITH_CSR_EN_MASK 0x00001000L
+#define PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK 0x00002000L
+#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_SETS_IN_RCFG_MASK 0x00004000L
+#define PCIE_LC_TRAINING_CNTL__LC_HOT_RESET_QUICK_EXIT_EN_MASK 0x00008000L
+#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_WAIT_FOR_SKP_MASK 0x00010000L
+#define PCIE_LC_TRAINING_CNTL__LC_AUTONOMOUS_CHANGE_OFF_MASK 0x00020000L
+#define PCIE_LC_TRAINING_CNTL__LC_UPCONFIGURE_CAP_OFF_MASK 0x00040000L
+#define PCIE_LC_TRAINING_CNTL__LC_HW_LINK_DIS_EN_MASK 0x00080000L
+#define PCIE_LC_TRAINING_CNTL__LC_LINK_DIS_BY_HW_MASK 0x00100000L
+#define PCIE_LC_TRAINING_CNTL__LC_STATIC_TX_PIPE_COUNT_EN_MASK 0x00200000L
+#define PCIE_LC_TRAINING_CNTL__LC_ASPM_L1_NAK_TIMER_SEL_MASK 0x00C00000L
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_R_SPEED_MASK 0x01000000L
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_TEST_MASK 0x02000000L
+#define PCIE_LC_TRAINING_CNTL__LC_RESET_ASPM_L1_NAK_TIMER_MASK 0x04000000L
+#define PCIE_LC_TRAINING_CNTL__LC_SHORT_RCFG_TIMEOUT_MASK 0x08000000L
+#define PCIE_LC_TRAINING_CNTL__LC_ALLOW_TX_L1_CONTROL_MASK 0x10000000L
+#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_EQ_REQ_TIME_MASK 0xE0000000L
+//PCIE_LC_LINK_WIDTH_CNTL
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT 0x0
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE__SHIFT 0x7
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW__SHIFT 0x8
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT__SHIFT 0x9
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN__SHIFT 0xa
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_SHORT_RECONFIG_EN__SHIFT 0xb
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT__SHIFT 0xc
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS__SHIFT 0xd
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_WAIT_FOR_RCVR_DIS__SHIFT 0xe
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_TIMER_SEL__SHIFT 0xf
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DEASSERT_TX_PDNB__SHIFT 0x10
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN__SHIFT 0x11
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYNLINK_MST_EN__SHIFT 0x12
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DUAL_END_RECONFIG_EN__SHIFT 0x13
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_CAPABLE__SHIFT 0x14
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT 0x15
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_ALIGN_REVERSE_XMIT__SHIFT 0x17
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_MULT_REVERSE_ATTEMP_EN__SHIFT 0x18
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RESET_TSX_CNT_IN_RCONFIG_EN__SHIFT 0x19
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_WAIT_FOR_L_IDLE_IN_R_IDLE__SHIFT 0x1a
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_WAIT_FOR_NON_EI_ON_RXL0S_EXIT__SHIFT 0x1b
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_HOLD_EI_FOR_RSPEED_CMD_CHANGE__SHIFT 0x1c
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_BYPASS_RXL0S_ON_SHORT_EI__SHIFT 0x1d
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_TURN_OFF_UNUSED_LANES__SHIFT 0x1e
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_BYPASS_RXSTANDBY_STATUS__SHIFT 0x1f
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK 0x00000007L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE_MASK 0x00000080L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK 0x00000100L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK 0x00000200L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK 0x00000400L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_SHORT_RECONFIG_EN_MASK 0x00000800L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK 0x00001000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK 0x00002000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_WAIT_FOR_RCVR_DIS_MASK 0x00004000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_TIMER_SEL_MASK 0x00008000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DEASSERT_TX_PDNB_MASK 0x00010000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN_MASK 0x00020000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYNLINK_MST_EN_MASK 0x00040000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DUAL_END_RECONFIG_EN_MASK 0x00080000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_CAPABLE_MASK 0x00100000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK 0x00600000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_ALIGN_REVERSE_XMIT_MASK 0x00800000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_MULT_REVERSE_ATTEMP_EN_MASK 0x01000000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RESET_TSX_CNT_IN_RCONFIG_EN_MASK 0x02000000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_WAIT_FOR_L_IDLE_IN_R_IDLE_MASK 0x04000000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_WAIT_FOR_NON_EI_ON_RXL0S_EXIT_MASK 0x08000000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_HOLD_EI_FOR_RSPEED_CMD_CHANGE_MASK 0x10000000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_BYPASS_RXL0S_ON_SHORT_EI_MASK 0x20000000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_TURN_OFF_UNUSED_LANES_MASK 0x40000000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_BYPASS_RXSTANDBY_STATUS_MASK 0x80000000L
+//PCIE_LC_N_FTS_CNTL
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT 0x0
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN__SHIFT 0x8
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_FTS_BEFORE_RECOVERY__SHIFT 0x9
+#define PCIE_LC_N_FTS_CNTL__LC_N_EIE_SEL__SHIFT 0xa
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_8GT_CNTL__SHIFT 0xc
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_16GT_CNTL__SHIFT 0xd
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_32GT_CNTL__SHIFT 0xe
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_LIMIT__SHIFT 0x10
+#define PCIE_LC_N_FTS_CNTL__LC_N_FTS__SHIFT 0x18
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK 0x000000FFL
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK 0x00000100L
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_FTS_BEFORE_RECOVERY_MASK 0x00000200L
+#define PCIE_LC_N_FTS_CNTL__LC_N_EIE_SEL_MASK 0x00000400L
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_8GT_CNTL_MASK 0x00001000L
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_16GT_CNTL_MASK 0x00002000L
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_32GT_CNTL_MASK 0x00004000L
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_LIMIT_MASK 0x00FF0000L
+#define PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK 0xFF000000L
+//PCIE_LC_SPEED_CNTL
+#define PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0
+#define PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1
+#define PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2
+#define PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP__SHIFT 0x3
+#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5
+#define PCIE_LC_SPEED_CNTL__LC_DATA_RATE_ADVERTISED__SHIFT 0x8
+#define PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_EN__SHIFT 0xb
+#define PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE__SHIFT 0xc
+#define PCIE_LC_SPEED_CNTL__LC_COMP_PATTERN_MAX_SPEED__SHIFT 0x10
+#define PCIE_LC_SPEED_CNTL__LC_CHECK_DATA_RATE__SHIFT 0x15
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN2__SHIFT 0x16
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN2__SHIFT 0x17
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN3__SHIFT 0x18
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN3__SHIFT 0x19
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN4__SHIFT 0x1a
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN4__SHIFT 0x1b
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN5__SHIFT 0x1c
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN5__SHIFT 0x1d
+#define PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L
+#define PCIE_LC_SPEED_CNTL__LC_GEN5_EN_STRAP_MASK 0x00000008L
+#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0x000000E0L
+#define PCIE_LC_SPEED_CNTL__LC_DATA_RATE_ADVERTISED_MASK 0x00000700L
+#define PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_EN_MASK 0x00000800L
+#define PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_MASK 0x00007000L
+#define PCIE_LC_SPEED_CNTL__LC_COMP_PATTERN_MAX_SPEED_MASK 0x00070000L
+#define PCIE_LC_SPEED_CNTL__LC_CHECK_DATA_RATE_MASK 0x00200000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN2_MASK 0x00400000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN2_MASK 0x00800000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN3_MASK 0x01000000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN3_MASK 0x02000000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN4_MASK 0x04000000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN4_MASK 0x08000000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN5_MASK 0x10000000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN5_MASK 0x20000000L
+//PCIE_LC_STATE0
+#define PCIE_LC_STATE0__LC_CURRENT_STATE__SHIFT 0x0
+#define PCIE_LC_STATE0__LC_PREV_STATE1__SHIFT 0x8
+#define PCIE_LC_STATE0__LC_PREV_STATE2__SHIFT 0x10
+#define PCIE_LC_STATE0__LC_PREV_STATE3__SHIFT 0x18
+#define PCIE_LC_STATE0__LC_CURRENT_STATE_MASK 0x0000003FL
+#define PCIE_LC_STATE0__LC_PREV_STATE1_MASK 0x00003F00L
+#define PCIE_LC_STATE0__LC_PREV_STATE2_MASK 0x003F0000L
+#define PCIE_LC_STATE0__LC_PREV_STATE3_MASK 0x3F000000L
+//PCIE_LC_STATE1
+#define PCIE_LC_STATE1__LC_PREV_STATE4__SHIFT 0x0
+#define PCIE_LC_STATE1__LC_PREV_STATE5__SHIFT 0x8
+#define PCIE_LC_STATE1__LC_PREV_STATE6__SHIFT 0x10
+#define PCIE_LC_STATE1__LC_PREV_STATE7__SHIFT 0x18
+#define PCIE_LC_STATE1__LC_PREV_STATE4_MASK 0x0000003FL
+#define PCIE_LC_STATE1__LC_PREV_STATE5_MASK 0x00003F00L
+#define PCIE_LC_STATE1__LC_PREV_STATE6_MASK 0x003F0000L
+#define PCIE_LC_STATE1__LC_PREV_STATE7_MASK 0x3F000000L
+//PCIE_LC_STATE2
+#define PCIE_LC_STATE2__LC_PREV_STATE8__SHIFT 0x0
+#define PCIE_LC_STATE2__LC_PREV_STATE9__SHIFT 0x8
+#define PCIE_LC_STATE2__LC_PREV_STATE10__SHIFT 0x10
+#define PCIE_LC_STATE2__LC_PREV_STATE11__SHIFT 0x18
+#define PCIE_LC_STATE2__LC_PREV_STATE8_MASK 0x0000003FL
+#define PCIE_LC_STATE2__LC_PREV_STATE9_MASK 0x00003F00L
+#define PCIE_LC_STATE2__LC_PREV_STATE10_MASK 0x003F0000L
+#define PCIE_LC_STATE2__LC_PREV_STATE11_MASK 0x3F000000L
+//PCIE_LC_STATE3
+#define PCIE_LC_STATE3__LC_PREV_STATE12__SHIFT 0x0
+#define PCIE_LC_STATE3__LC_PREV_STATE13__SHIFT 0x8
+#define PCIE_LC_STATE3__LC_PREV_STATE14__SHIFT 0x10
+#define PCIE_LC_STATE3__LC_PREV_STATE15__SHIFT 0x18
+#define PCIE_LC_STATE3__LC_PREV_STATE12_MASK 0x0000003FL
+#define PCIE_LC_STATE3__LC_PREV_STATE13_MASK 0x00003F00L
+#define PCIE_LC_STATE3__LC_PREV_STATE14_MASK 0x003F0000L
+#define PCIE_LC_STATE3__LC_PREV_STATE15_MASK 0x3F000000L
+//PCIE_LC_STATE4
+#define PCIE_LC_STATE4__LC_PREV_STATE16__SHIFT 0x0
+#define PCIE_LC_STATE4__LC_PREV_STATE17__SHIFT 0x8
+#define PCIE_LC_STATE4__LC_PREV_STATE18__SHIFT 0x10
+#define PCIE_LC_STATE4__LC_PREV_STATE19__SHIFT 0x18
+#define PCIE_LC_STATE4__LC_PREV_STATE16_MASK 0x0000003FL
+#define PCIE_LC_STATE4__LC_PREV_STATE17_MASK 0x00003F00L
+#define PCIE_LC_STATE4__LC_PREV_STATE18_MASK 0x003F0000L
+#define PCIE_LC_STATE4__LC_PREV_STATE19_MASK 0x3F000000L
+//PCIE_LC_STATE5
+#define PCIE_LC_STATE5__LC_PREV_STATE20__SHIFT 0x0
+#define PCIE_LC_STATE5__LC_PREV_STATE21__SHIFT 0x8
+#define PCIE_LC_STATE5__LC_PREV_STATE22__SHIFT 0x10
+#define PCIE_LC_STATE5__LC_PREV_STATE23__SHIFT 0x18
+#define PCIE_LC_STATE5__LC_PREV_STATE20_MASK 0x0000003FL
+#define PCIE_LC_STATE5__LC_PREV_STATE21_MASK 0x00003F00L
+#define PCIE_LC_STATE5__LC_PREV_STATE22_MASK 0x003F0000L
+#define PCIE_LC_STATE5__LC_PREV_STATE23_MASK 0x3F000000L
+//PCIE_LC_LINK_MANAGEMENT_CNTL2
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__LOW_BW_HINT__SHIFT 0x0
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__HIGH_BW_HINT__SHIFT 0x1
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__BW_HINT_COUNT__SHIFT 0x2
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__BW_HINT_MODE__SHIFT 0x5
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__BW_HINT_TX_EN__SHIFT 0x6
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__BW_HINT_RX_EN__SHIFT 0x7
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD__SHIFT 0x10
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD__SHIFT 0x14
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G2__SHIFT 0x18
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G2__SHIFT 0x1c
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__LOW_BW_HINT_MASK 0x00000001L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__HIGH_BW_HINT_MASK 0x00000002L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__BW_HINT_COUNT_MASK 0x0000001CL
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__BW_HINT_MODE_MASK 0x00000020L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__BW_HINT_TX_EN_MASK 0x00000040L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__BW_HINT_RX_EN_MASK 0x00000080L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_MASK 0x000F0000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_MASK 0x00F00000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G2_MASK 0x0F000000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G2_MASK 0xF0000000L
+//PCIE_LC_CNTL2
+#define PCIE_LC_CNTL2__LC_TIMED_OUT_STATE__SHIFT 0x0
+#define PCIE_LC_CNTL2__LC_STATE_TIMED_OUT__SHIFT 0x6
+#define PCIE_LC_CNTL2__LC_LOOK_FOR_BW_REDUCTION__SHIFT 0x7
+#define PCIE_LC_CNTL2__LC_MORE_TS2_EN__SHIFT 0x8
+#define PCIE_LC_CNTL2__LC_X12_NEGOTIATION_DIS__SHIFT 0x9
+#define PCIE_LC_CNTL2__LC_LINK_UP_REVERSAL_EN__SHIFT 0xa
+#define PCIE_LC_CNTL2__LC_ILLEGAL_STATE__SHIFT 0xb
+#define PCIE_LC_CNTL2__LC_ILLEGAL_STATE_RESTART_EN__SHIFT 0xc
+#define PCIE_LC_CNTL2__LC_WAIT_FOR_OTHER_LANES_MODE__SHIFT 0xd
+#define PCIE_LC_CNTL2__LC_ELEC_IDLE_MODE__SHIFT 0xe
+#define PCIE_LC_CNTL2__LC_DISABLE_INFERRED_ELEC_IDLE_DET__SHIFT 0x10
+#define PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1__SHIFT 0x11
+#define PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23__SHIFT 0x12
+#define PCIE_LC_CNTL2__LC_CONSECUTIVE_EIOS_RESET_EN__SHIFT 0x13
+#define PCIE_LC_CNTL2__LC_BLOCK_EL_IDLE_IN_L0__SHIFT 0x14
+#define PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS__SHIFT 0x15
+#define PCIE_LC_CNTL2__LC_ASSERT_INACTIVE_DURING_HOLD__SHIFT 0x16
+#define PCIE_LC_CNTL2__LC_WAIT_FOR_LANES_IN_LW_NEG__SHIFT 0x17
+#define PCIE_LC_CNTL2__LC_PWR_DOWN_NEG_OFF_LANES__SHIFT 0x19
+#define PCIE_LC_CNTL2__LC_DISABLE_LOST_SYM_LOCK_ARCS__SHIFT 0x1a
+#define PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b
+#define PCIE_LC_CNTL2__LC_PMI_L1_WAIT_FOR_SLV_IDLE__SHIFT 0x1c
+#define PCIE_LC_CNTL2__LC_TEST_TIMER_SEL__SHIFT 0x1d
+#define PCIE_LC_CNTL2__LC_ENABLE_INFERRED_ELEC_IDLE_FOR_PI__SHIFT 0x1f
+#define PCIE_LC_CNTL2__LC_TIMED_OUT_STATE_MASK 0x0000003FL
+#define PCIE_LC_CNTL2__LC_STATE_TIMED_OUT_MASK 0x00000040L
+#define PCIE_LC_CNTL2__LC_LOOK_FOR_BW_REDUCTION_MASK 0x00000080L
+#define PCIE_LC_CNTL2__LC_MORE_TS2_EN_MASK 0x00000100L
+#define PCIE_LC_CNTL2__LC_X12_NEGOTIATION_DIS_MASK 0x00000200L
+#define PCIE_LC_CNTL2__LC_LINK_UP_REVERSAL_EN_MASK 0x00000400L
+#define PCIE_LC_CNTL2__LC_ILLEGAL_STATE_MASK 0x00000800L
+#define PCIE_LC_CNTL2__LC_ILLEGAL_STATE_RESTART_EN_MASK 0x00001000L
+#define PCIE_LC_CNTL2__LC_WAIT_FOR_OTHER_LANES_MODE_MASK 0x00002000L
+#define PCIE_LC_CNTL2__LC_ELEC_IDLE_MODE_MASK 0x0000C000L
+#define PCIE_LC_CNTL2__LC_DISABLE_INFERRED_ELEC_IDLE_DET_MASK 0x00010000L
+#define PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK 0x00020000L
+#define PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK 0x00040000L
+#define PCIE_LC_CNTL2__LC_CONSECUTIVE_EIOS_RESET_EN_MASK 0x00080000L
+#define PCIE_LC_CNTL2__LC_BLOCK_EL_IDLE_IN_L0_MASK 0x00100000L
+#define PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK 0x00200000L
+#define PCIE_LC_CNTL2__LC_ASSERT_INACTIVE_DURING_HOLD_MASK 0x00400000L
+#define PCIE_LC_CNTL2__LC_WAIT_FOR_LANES_IN_LW_NEG_MASK 0x01800000L
+#define PCIE_LC_CNTL2__LC_PWR_DOWN_NEG_OFF_LANES_MASK 0x02000000L
+#define PCIE_LC_CNTL2__LC_DISABLE_LOST_SYM_LOCK_ARCS_MASK 0x04000000L
+#define PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L
+#define PCIE_LC_CNTL2__LC_PMI_L1_WAIT_FOR_SLV_IDLE_MASK 0x10000000L
+#define PCIE_LC_CNTL2__LC_TEST_TIMER_SEL_MASK 0x60000000L
+#define PCIE_LC_CNTL2__LC_ENABLE_INFERRED_ELEC_IDLE_FOR_PI_MASK 0x80000000L
+//PCIE_LC_BW_CHANGE_CNTL
+#define PCIE_LC_BW_CHANGE_CNTL__LC_BW_CHANGE_INT_EN__SHIFT 0x0
+#define PCIE_LC_BW_CHANGE_CNTL__LC_HW_INIT_SPEED_CHANGE__SHIFT 0x1
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SW_INIT_SPEED_CHANGE__SHIFT 0x2
+#define PCIE_LC_BW_CHANGE_CNTL__LC_OTHER_INIT_SPEED_CHANGE__SHIFT 0x3
+#define PCIE_LC_BW_CHANGE_CNTL__LC_RELIABILITY_SPEED_CHANGE__SHIFT 0x4
+#define PCIE_LC_BW_CHANGE_CNTL__LC_FAILED_SPEED_NEG__SHIFT 0x5
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LONG_LW_CHANGE__SHIFT 0x6
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SHORT_LW_CHANGE__SHIFT 0x7
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_OTHER__SHIFT 0x8
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_FAILED__SHIFT 0x9
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LINK_BW_NOTIFICATION_DETECT_MODE__SHIFT 0xa
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SPEED_NEG_UNSUCCESSFUL__SHIFT 0xb
+#define PCIE_LC_BW_CHANGE_CNTL__LC_BW_CHANGE_INT_EN_MASK 0x00000001L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_HW_INIT_SPEED_CHANGE_MASK 0x00000002L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SW_INIT_SPEED_CHANGE_MASK 0x00000004L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_OTHER_INIT_SPEED_CHANGE_MASK 0x00000008L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_RELIABILITY_SPEED_CHANGE_MASK 0x00000010L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_FAILED_SPEED_NEG_MASK 0x00000020L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LONG_LW_CHANGE_MASK 0x00000040L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SHORT_LW_CHANGE_MASK 0x00000080L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_OTHER_MASK 0x00000100L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_FAILED_MASK 0x00000200L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LINK_BW_NOTIFICATION_DETECT_MODE_MASK 0x00000400L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SPEED_NEG_UNSUCCESSFUL_MASK 0x00000800L
+//PCIE_LC_CDR_CNTL
+#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_OFF__SHIFT 0x0
+#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_SETS__SHIFT 0xc
+#define PCIE_LC_CDR_CNTL__LC_CDR_SET_TYPE__SHIFT 0x18
+#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_OFF_MASK 0x00000FFFL
+#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_SETS_MASK 0x00FFF000L
+#define PCIE_LC_CDR_CNTL__LC_CDR_SET_TYPE_MASK 0x03000000L
+//PCIE_LC_LANE_CNTL
+#define PCIE_LC_LANE_CNTL__LC_CORRUPTED_LANES__SHIFT 0x0
+#define PCIE_LC_LANE_CNTL__LC_CORRUPTED_LANES_MASK 0x0000FFFFL
+//PCIE_LC_CNTL3
+#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS__SHIFT 0x0
+#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_CNTL__SHIFT 0x1
+#define PCIE_LC_CNTL3__LC_RCVD_DEEMPHASIS__SHIFT 0x3
+#define PCIE_LC_CNTL3__LC_COMP_TO_DETECT__SHIFT 0x4
+#define PCIE_LC_CNTL3__LC_RESET_TSX_CNT_IN_RLOCK_EN__SHIFT 0x5
+#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPTS_ALLOWED__SHIFT 0x6
+#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPT_FAILED__SHIFT 0x8
+#define PCIE_LC_CNTL3__LC_CLR_FAILED_AUTO_SPD_CHANGE_CNT__SHIFT 0x9
+#define PCIE_LC_CNTL3__LC_ENHANCED_HOT_PLUG_EN__SHIFT 0xa
+#define PCIE_LC_CNTL3__LC_RCVR_DET_EN_OVERRIDE__SHIFT 0xb
+#define PCIE_LC_CNTL3__LC_LINK_DOWN_SPD_CHG_EN__SHIFT 0xc
+#define PCIE_LC_CNTL3__LC_CLR_DELAY_DLLP_WHEN_NO_AUTO_EQ__SHIFT 0xd
+#define PCIE_LC_CNTL3__LC_MULT_AUTO_SPD_CHG_ON_LAST_RATE__SHIFT 0xe
+#define PCIE_LC_CNTL3__LC_RST_FAILING_SPD_CHANGE_CNT_ON_SUCCESS_EN__SHIFT 0xf
+#define PCIE_LC_CNTL3__LC_CHIP_BIF_USB_IDLE_EN__SHIFT 0x10
+#define PCIE_LC_CNTL3__LC_L1_BLOCK_RECONFIG_EN__SHIFT 0x11
+#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_EN__SHIFT 0x12
+#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_MAX_FAIL_SEL__SHIFT 0x13
+#define PCIE_LC_CNTL3__LC_FAST_L1_ENTRY_EXIT_EN__SHIFT 0x15
+#define PCIE_LC_CNTL3__LC_POWERDOWN_P0_WAIT_FOR_REFCLKACK_ON_L1_EXIT__SHIFT 0x16
+#define PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK__SHIFT 0x17
+#define PCIE_LC_CNTL3__LC_HW_VOLTAGE_IF_CONTROL__SHIFT 0x18
+#define PCIE_LC_CNTL3__LC_VOLTAGE_TIMER_SEL__SHIFT 0x1a
+#define PCIE_LC_CNTL3__LC_GO_TO_RECOVERY__SHIFT 0x1e
+#define PCIE_LC_CNTL3__LC_AUTO_RECOVERY_DIS__SHIFT 0x1f
+#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_MASK 0x00000001L
+#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_CNTL_MASK 0x00000006L
+#define PCIE_LC_CNTL3__LC_RCVD_DEEMPHASIS_MASK 0x00000008L
+#define PCIE_LC_CNTL3__LC_COMP_TO_DETECT_MASK 0x00000010L
+#define PCIE_LC_CNTL3__LC_RESET_TSX_CNT_IN_RLOCK_EN_MASK 0x00000020L
+#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK 0x000000C0L
+#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPT_FAILED_MASK 0x00000100L
+#define PCIE_LC_CNTL3__LC_CLR_FAILED_AUTO_SPD_CHANGE_CNT_MASK 0x00000200L
+#define PCIE_LC_CNTL3__LC_ENHANCED_HOT_PLUG_EN_MASK 0x00000400L
+#define PCIE_LC_CNTL3__LC_RCVR_DET_EN_OVERRIDE_MASK 0x00000800L
+#define PCIE_LC_CNTL3__LC_LINK_DOWN_SPD_CHG_EN_MASK 0x00001000L
+#define PCIE_LC_CNTL3__LC_CLR_DELAY_DLLP_WHEN_NO_AUTO_EQ_MASK 0x00002000L
+#define PCIE_LC_CNTL3__LC_MULT_AUTO_SPD_CHG_ON_LAST_RATE_MASK 0x00004000L
+#define PCIE_LC_CNTL3__LC_RST_FAILING_SPD_CHANGE_CNT_ON_SUCCESS_EN_MASK 0x00008000L
+#define PCIE_LC_CNTL3__LC_CHIP_BIF_USB_IDLE_EN_MASK 0x00010000L
+#define PCIE_LC_CNTL3__LC_L1_BLOCK_RECONFIG_EN_MASK 0x00020000L
+#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_EN_MASK 0x00040000L
+#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_MAX_FAIL_SEL_MASK 0x00180000L
+#define PCIE_LC_CNTL3__LC_FAST_L1_ENTRY_EXIT_EN_MASK 0x00200000L
+#define PCIE_LC_CNTL3__LC_POWERDOWN_P0_WAIT_FOR_REFCLKACK_ON_L1_EXIT_MASK 0x00400000L
+#define PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK 0x00800000L
+#define PCIE_LC_CNTL3__LC_HW_VOLTAGE_IF_CONTROL_MASK 0x03000000L
+#define PCIE_LC_CNTL3__LC_VOLTAGE_TIMER_SEL_MASK 0x3C000000L
+#define PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK 0x40000000L
+#define PCIE_LC_CNTL3__LC_AUTO_RECOVERY_DIS_MASK 0x80000000L
+//PCIE_LC_CNTL4
+#define PCIE_LC_CNTL4__LC_TX_ENABLE_BEHAVIOUR__SHIFT 0x0
+#define PCIE_LC_CNTL4__LC_DIS_CONTIG_END_SET_CHECK__SHIFT 0x2
+#define PCIE_LC_CNTL4__LC_DIS_ASPM_L1_IN_SPEED_CHANGE__SHIFT 0x3
+#define PCIE_LC_CNTL4__LC_L1_POWERDOWN__SHIFT 0x4
+#define PCIE_LC_CNTL4__LC_P2_ENTRY__SHIFT 0x5
+#define PCIE_LC_CNTL4__LC_EXTEND_EIEOS__SHIFT 0x6
+#define PCIE_LC_CNTL4__LC_EXTEND_EIEOS_MODE__SHIFT 0x7
+#define PCIE_LC_CNTL4__LC_IGNORE_PARITY__SHIFT 0x8
+#define PCIE_LC_CNTL4__LC_WAIT_FOR_COEFF_IN_RLOCK_EN__SHIFT 0x9
+#define PCIE_LC_CNTL4__LC_DSC_CHECK_COEFFS_IN_RLOCK__SHIFT 0xa
+#define PCIE_LC_CNTL4__LC_DEFER_SKIP_FOR_EIEOS_EN__SHIFT 0xb
+#define PCIE_LC_CNTL4__LC_SEND_EIEOS_IN_RCFG__SHIFT 0xc
+#define PCIE_LC_CNTL4__LC_SET_QUIESCE__SHIFT 0xd
+#define PCIE_LC_CNTL4__LC_QUIESCE_RCVD__SHIFT 0xe
+#define PCIE_LC_CNTL4__LC_WAIT_FOR_TWO_EIEOS_SEQUENCE__SHIFT 0xf
+#define PCIE_LC_CNTL4__LC_GO_TO_RECOVERY_ANY_UNEXPECTED_EIOS__SHIFT 0x10
+#define PCIE_LC_CNTL4__LC_DONT_CHECK_EQTS_IN_RCFG__SHIFT 0x11
+#define PCIE_LC_CNTL4__LC_DELAY_COEFF_UPDATE_DIS__SHIFT 0x12
+#define PCIE_LC_CNTL4__LC_DYNAMIC_INACTIVE_TS_SELECT__SHIFT 0x13
+#define PCIE_LC_CNTL4__LC_WAIT_FOR_EIEOS_IN_RLOCK__SHIFT 0x15
+#define PCIE_LC_CNTL4__LC_USC_DELAY_DLLPS__SHIFT 0x16
+#define PCIE_LC_CNTL4__LC_TX_SWING__SHIFT 0x17
+#define PCIE_LC_CNTL4__LC_EQ_WAIT_FOR_EVAL_DONE__SHIFT 0x18
+#define PCIE_LC_CNTL4__LC_8GT_SKIP_ORDER_EN__SHIFT 0x19
+#define PCIE_LC_CNTL4__LC_WAIT_FOR_MORE_TS_IN_RLOCK__SHIFT 0x1a
+#define PCIE_LC_CNTL4__LC_TX_ENABLE_BEHAVIOUR_MASK 0x00000003L
+#define PCIE_LC_CNTL4__LC_DIS_CONTIG_END_SET_CHECK_MASK 0x00000004L
+#define PCIE_LC_CNTL4__LC_DIS_ASPM_L1_IN_SPEED_CHANGE_MASK 0x00000008L
+#define PCIE_LC_CNTL4__LC_L1_POWERDOWN_MASK 0x00000010L
+#define PCIE_LC_CNTL4__LC_P2_ENTRY_MASK 0x00000020L
+#define PCIE_LC_CNTL4__LC_EXTEND_EIEOS_MASK 0x00000040L
+#define PCIE_LC_CNTL4__LC_EXTEND_EIEOS_MODE_MASK 0x00000080L
+#define PCIE_LC_CNTL4__LC_IGNORE_PARITY_MASK 0x00000100L
+#define PCIE_LC_CNTL4__LC_WAIT_FOR_COEFF_IN_RLOCK_EN_MASK 0x00000200L
+#define PCIE_LC_CNTL4__LC_DSC_CHECK_COEFFS_IN_RLOCK_MASK 0x00000400L
+#define PCIE_LC_CNTL4__LC_DEFER_SKIP_FOR_EIEOS_EN_MASK 0x00000800L
+#define PCIE_LC_CNTL4__LC_SEND_EIEOS_IN_RCFG_MASK 0x00001000L
+#define PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK 0x00002000L
+#define PCIE_LC_CNTL4__LC_QUIESCE_RCVD_MASK 0x00004000L
+#define PCIE_LC_CNTL4__LC_WAIT_FOR_TWO_EIEOS_SEQUENCE_MASK 0x00008000L
+#define PCIE_LC_CNTL4__LC_GO_TO_RECOVERY_ANY_UNEXPECTED_EIOS_MASK 0x00010000L
+#define PCIE_LC_CNTL4__LC_DONT_CHECK_EQTS_IN_RCFG_MASK 0x00020000L
+#define PCIE_LC_CNTL4__LC_DELAY_COEFF_UPDATE_DIS_MASK 0x00040000L
+#define PCIE_LC_CNTL4__LC_DYNAMIC_INACTIVE_TS_SELECT_MASK 0x00180000L
+#define PCIE_LC_CNTL4__LC_WAIT_FOR_EIEOS_IN_RLOCK_MASK 0x00200000L
+#define PCIE_LC_CNTL4__LC_USC_DELAY_DLLPS_MASK 0x00400000L
+#define PCIE_LC_CNTL4__LC_TX_SWING_MASK 0x00800000L
+#define PCIE_LC_CNTL4__LC_EQ_WAIT_FOR_EVAL_DONE_MASK 0x01000000L
+#define PCIE_LC_CNTL4__LC_8GT_SKIP_ORDER_EN_MASK 0x02000000L
+#define PCIE_LC_CNTL4__LC_WAIT_FOR_MORE_TS_IN_RLOCK_MASK 0xFC000000L
+//PCIE_LC_CNTL5
+#define PCIE_LC_CNTL5__LC_LOCAL_EQ_SETTINGS_RATE__SHIFT 0x0
+#define PCIE_LC_CNTL5__LC_LOCAL_PRESET__SHIFT 0x2
+#define PCIE_LC_CNTL5__LC_LOCAL_PRE_CURSOR__SHIFT 0x6
+#define PCIE_LC_CNTL5__LC_LOCAL_CURSOR__SHIFT 0xa
+#define PCIE_LC_CNTL5__LC_LOCAL_POST_CURSOR__SHIFT 0x10
+#define PCIE_LC_CNTL5__LC_LOCAL_USE_PRESET__SHIFT 0x15
+#define PCIE_LC_CNTL5__LC_SAFE_RECOVER_CNTL__SHIFT 0x16
+#define PCIE_LC_CNTL5__LC_DSC_EQ_FS_LF_INVALID_TO_PRESETS__SHIFT 0x18
+#define PCIE_LC_CNTL5__LC_TX_SWING_OVERRIDE__SHIFT 0x19
+#define PCIE_LC_CNTL5__LC_ACCEPT_ALL_PRESETS__SHIFT 0x1a
+#define PCIE_LC_CNTL5__LC_ACCEPT_ALL_PRESETS_TEST__SHIFT 0x1b
+#define PCIE_LC_CNTL5__LC_WAIT_IN_DETECT__SHIFT 0x1c
+#define PCIE_LC_CNTL5__LC_HOLD_TRAINING_MODE__SHIFT 0x1d
+#define PCIE_LC_CNTL5__LC_LOCAL_EQ_SETTINGS_RATE_MASK 0x00000003L
+#define PCIE_LC_CNTL5__LC_LOCAL_PRESET_MASK 0x0000003CL
+#define PCIE_LC_CNTL5__LC_LOCAL_PRE_CURSOR_MASK 0x000003C0L
+#define PCIE_LC_CNTL5__LC_LOCAL_CURSOR_MASK 0x0000FC00L
+#define PCIE_LC_CNTL5__LC_LOCAL_POST_CURSOR_MASK 0x001F0000L
+#define PCIE_LC_CNTL5__LC_LOCAL_USE_PRESET_MASK 0x00200000L
+#define PCIE_LC_CNTL5__LC_SAFE_RECOVER_CNTL_MASK 0x00C00000L
+#define PCIE_LC_CNTL5__LC_DSC_EQ_FS_LF_INVALID_TO_PRESETS_MASK 0x01000000L
+#define PCIE_LC_CNTL5__LC_TX_SWING_OVERRIDE_MASK 0x02000000L
+#define PCIE_LC_CNTL5__LC_ACCEPT_ALL_PRESETS_MASK 0x04000000L
+#define PCIE_LC_CNTL5__LC_ACCEPT_ALL_PRESETS_TEST_MASK 0x08000000L
+#define PCIE_LC_CNTL5__LC_WAIT_IN_DETECT_MASK 0x10000000L
+#define PCIE_LC_CNTL5__LC_HOLD_TRAINING_MODE_MASK 0xE0000000L
+//PCIE_LC_FORCE_COEFF
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_COEFF_8GT__SHIFT 0x0
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_PRE_CURSOR_8GT__SHIFT 0x1
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_CURSOR_8GT__SHIFT 0x7
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_POST_CURSOR_8GT__SHIFT 0xd
+#define PCIE_LC_FORCE_COEFF__LC_3X3_COEFF_SEARCH_EN_8GT__SHIFT 0x13
+#define PCIE_LC_FORCE_COEFF__LC_PRESET_10_EN__SHIFT 0x14
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_COEFF_8GT_MASK 0x00000001L
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_PRE_CURSOR_8GT_MASK 0x0000007EL
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_CURSOR_8GT_MASK 0x00001F80L
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_POST_CURSOR_8GT_MASK 0x0007E000L
+#define PCIE_LC_FORCE_COEFF__LC_3X3_COEFF_SEARCH_EN_8GT_MASK 0x00080000L
+#define PCIE_LC_FORCE_COEFF__LC_PRESET_10_EN_MASK 0x00100000L
+//PCIE_LC_BEST_EQ_SETTINGS
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRESET__SHIFT 0x0
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRECURSOR__SHIFT 0x4
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_CURSOR__SHIFT 0xa
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_POSTCURSOR__SHIFT 0x10
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_FOM__SHIFT 0x16
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_SETTINGS_RATE__SHIFT 0x1e
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRESET_MASK 0x0000000FL
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRECURSOR_MASK 0x000003F0L
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_CURSOR_MASK 0x0000FC00L
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_POSTCURSOR_MASK 0x003F0000L
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_FOM_MASK 0x3FC00000L
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_SETTINGS_RATE_MASK 0xC0000000L
+//PCIE_LC_FORCE_EQ_REQ_COEFF
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_8GT__SHIFT 0x0
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_PRE_CURSOR_REQ_8GT__SHIFT 0x1
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_CURSOR_REQ_8GT__SHIFT 0x7
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_POST_CURSOR_REQ_8GT__SHIFT 0xd
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FS_OTHER_END_8GT__SHIFT 0x13
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_LF_OTHER_END_8GT__SHIFT 0x19
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_8GT_MASK 0x00000001L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_PRE_CURSOR_REQ_8GT_MASK 0x0000007EL
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_CURSOR_REQ_8GT_MASK 0x00001F80L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_POST_CURSOR_REQ_8GT_MASK 0x0007E000L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FS_OTHER_END_8GT_MASK 0x01F80000L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_LF_OTHER_END_8GT_MASK 0x7E000000L
+//PCIE_LC_CNTL6
+#define PCIE_LC_CNTL6__LC_SPC_MODE_2P5GT__SHIFT 0x0
+#define PCIE_LC_CNTL6__LC_SPC_MODE_5GT__SHIFT 0x2
+#define PCIE_LC_CNTL6__LC_SPC_MODE_8GT__SHIFT 0x4
+#define PCIE_LC_CNTL6__LC_SPC_MODE_16GT__SHIFT 0x6
+#define PCIE_LC_CNTL6__LC_SPC_MODE_32GT__SHIFT 0x8
+#define PCIE_LC_CNTL6__LC_SRIS_EN__SHIFT 0xc
+#define PCIE_LC_CNTL6__LC_SRNS_SKIP_IN_SRIS__SHIFT 0xd
+#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_EN__SHIFT 0x14
+#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_FACTOR__SHIFT 0x15
+#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_MODE__SHIFT 0x17
+#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_OUT_OF_RANGE__SHIFT 0x19
+#define PCIE_LC_CNTL6__LC_OVERRIDE_RETIMER_PRESENCE_EN__SHIFT 0x1a
+#define PCIE_LC_CNTL6__LC_OVERRIDE_RETIMER_PRESENCE__SHIFT 0x1b
+#define PCIE_LC_CNTL6__LC_IGNORE_RETIMER_PRESENCE__SHIFT 0x1d
+#define PCIE_LC_CNTL6__LC_RETIMER_PRESENCE__SHIFT 0x1e
+#define PCIE_LC_CNTL6__LC_SPC_MODE_2P5GT_MASK 0x00000003L
+#define PCIE_LC_CNTL6__LC_SPC_MODE_5GT_MASK 0x0000000CL
+#define PCIE_LC_CNTL6__LC_SPC_MODE_8GT_MASK 0x00000030L
+#define PCIE_LC_CNTL6__LC_SPC_MODE_16GT_MASK 0x000000C0L
+#define PCIE_LC_CNTL6__LC_SPC_MODE_32GT_MASK 0x00000300L
+#define PCIE_LC_CNTL6__LC_SRIS_EN_MASK 0x00001000L
+#define PCIE_LC_CNTL6__LC_SRNS_SKIP_IN_SRIS_MASK 0x0003E000L
+#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_EN_MASK 0x00100000L
+#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_FACTOR_MASK 0x00600000L
+#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_MODE_MASK 0x01800000L
+#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_OUT_OF_RANGE_MASK 0x02000000L
+#define PCIE_LC_CNTL6__LC_OVERRIDE_RETIMER_PRESENCE_EN_MASK 0x04000000L
+#define PCIE_LC_CNTL6__LC_OVERRIDE_RETIMER_PRESENCE_MASK 0x18000000L
+#define PCIE_LC_CNTL6__LC_IGNORE_RETIMER_PRESENCE_MASK 0x20000000L
+#define PCIE_LC_CNTL6__LC_RETIMER_PRESENCE_MASK 0xC0000000L
+//PCIE_LC_CNTL7
+#define PCIE_LC_CNTL7__LC_EXPECTED_TS2_CFG_COMPLETE__SHIFT 0x0
+#define PCIE_LC_CNTL7__LC_IGNORE_NON_CONTIG_SETS_IN_RCFG__SHIFT 0x1
+#define PCIE_LC_CNTL7__LC_ROBUST_TRAINING_BIT_CHK_EN__SHIFT 0x2
+#define PCIE_LC_CNTL7__LC_RESET_TS_COUNT_ON_EI__SHIFT 0x3
+#define PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN__SHIFT 0x4
+#define PCIE_LC_CNTL7__LC_CLEAR_REVERSE_ATTEMPT_IN_L0__SHIFT 0x5
+#define PCIE_LC_CNTL7__LC_LOCK_REVERSAL__SHIFT 0x6
+#define PCIE_LC_CNTL7__LC_FORCE_RX_EQ_IN_PROGRESS__SHIFT 0x7
+#define PCIE_LC_CNTL7__LC_EVER_IDLE_TO_RLOCK__SHIFT 0x8
+#define PCIE_LC_CNTL7__LC_RXEQEVAL_AFTER_TIMEOUT_EN__SHIFT 0x9
+#define PCIE_LC_CNTL7__LC_WAIT_FOR_LANES_IN_CONFIG__SHIFT 0xa
+#define PCIE_LC_CNTL7__LC_REQ_COEFFS_FOR_TXMARGIN_EN__SHIFT 0xb
+#define PCIE_LC_CNTL7__LC_ESM_WAIT_FOR_PLL_INIT_DONE_L1__SHIFT 0xc
+#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_INTERVAL__SHIFT 0xd
+#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_MODE__SHIFT 0x15
+#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_UPCONFIG_EN__SHIFT 0x16
+#define PCIE_LC_CNTL7__LC_LINK_MANAGEMENT_EN__SHIFT 0x17
+#define PCIE_LC_CNTL7__LC_AUTO_REJECT_AFTER_TIMEOUT__SHIFT 0x18
+#define PCIE_LC_CNTL7__LC_ESM_RATES__SHIFT 0x19
+#define PCIE_LC_CNTL7__LC_ESM_PLL_INIT_STATE__SHIFT 0x1b
+#define PCIE_LC_CNTL7__LC_ESM_PLL_INIT_DONE__SHIFT 0x1c
+#define PCIE_LC_CNTL7__LC_ESM_REDO_INIT__SHIFT 0x1d
+#define PCIE_LC_CNTL7__LC_MULTIPORT_ESM__SHIFT 0x1e
+#define PCIE_LC_CNTL7__LC_ESM_ENTRY_MODE__SHIFT 0x1f
+#define PCIE_LC_CNTL7__LC_EXPECTED_TS2_CFG_COMPLETE_MASK 0x00000001L
+#define PCIE_LC_CNTL7__LC_IGNORE_NON_CONTIG_SETS_IN_RCFG_MASK 0x00000002L
+#define PCIE_LC_CNTL7__LC_ROBUST_TRAINING_BIT_CHK_EN_MASK 0x00000004L
+#define PCIE_LC_CNTL7__LC_RESET_TS_COUNT_ON_EI_MASK 0x00000008L
+#define PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK 0x00000010L
+#define PCIE_LC_CNTL7__LC_CLEAR_REVERSE_ATTEMPT_IN_L0_MASK 0x00000020L
+#define PCIE_LC_CNTL7__LC_LOCK_REVERSAL_MASK 0x00000040L
+#define PCIE_LC_CNTL7__LC_FORCE_RX_EQ_IN_PROGRESS_MASK 0x00000080L
+#define PCIE_LC_CNTL7__LC_EVER_IDLE_TO_RLOCK_MASK 0x00000100L
+#define PCIE_LC_CNTL7__LC_RXEQEVAL_AFTER_TIMEOUT_EN_MASK 0x00000200L
+#define PCIE_LC_CNTL7__LC_WAIT_FOR_LANES_IN_CONFIG_MASK 0x00000400L
+#define PCIE_LC_CNTL7__LC_REQ_COEFFS_FOR_TXMARGIN_EN_MASK 0x00000800L
+#define PCIE_LC_CNTL7__LC_ESM_WAIT_FOR_PLL_INIT_DONE_L1_MASK 0x00001000L
+#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_INTERVAL_MASK 0x001FE000L
+#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_MODE_MASK 0x00200000L
+#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_UPCONFIG_EN_MASK 0x00400000L
+#define PCIE_LC_CNTL7__LC_LINK_MANAGEMENT_EN_MASK 0x00800000L
+#define PCIE_LC_CNTL7__LC_AUTO_REJECT_AFTER_TIMEOUT_MASK 0x01000000L
+#define PCIE_LC_CNTL7__LC_ESM_RATES_MASK 0x06000000L
+#define PCIE_LC_CNTL7__LC_ESM_PLL_INIT_STATE_MASK 0x08000000L
+#define PCIE_LC_CNTL7__LC_ESM_PLL_INIT_DONE_MASK 0x10000000L
+#define PCIE_LC_CNTL7__LC_ESM_REDO_INIT_MASK 0x20000000L
+#define PCIE_LC_CNTL7__LC_MULTIPORT_ESM_MASK 0x40000000L
+#define PCIE_LC_CNTL7__LC_ESM_ENTRY_MODE_MASK 0x80000000L
+//PCIE_LC_LINK_MANAGEMENT_STATUS
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_SPEED_UPDATE__SHIFT 0x0
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_SPEED_CHANGE_ATTEMPT_FAILED__SHIFT 0x1
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_PARTNER_SPEED_SUPPORT_UPDATE__SHIFT 0x2
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_WIDTH_UPDATE__SHIFT 0x3
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_WIDTH_CHANGE_ATTEMPT_FAILED__SHIFT 0x4
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_PARTNER_WIDTH_SUPPORT_UPDATE__SHIFT 0x5
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__POWER_DOWN_COMMAND_COMPLETE__SHIFT 0x6
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__BANDWIDTH_UPDATE__SHIFT 0x7
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_POWER_STATE_CHANGE__SHIFT 0x8
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__BW_REQUIREMENT_HINT__SHIFT 0x9
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__EQUALIZATION_REQUEST__SHIFT 0xa
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_PARTNER_ESM_REQUEST__SHIFT 0xb
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LOW_SPEED_REQD_IMMEDIATE__SHIFT 0xc
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__ESTABLISH_ESM_PLL_SETTINGS__SHIFT 0xd
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__TRAINING_SET_MESSAGE_RCVD__SHIFT 0xf
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__SAVE_RESTORE_EQ_SETTINGS_CHANGED__SHIFT 0x10
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__SAVE_RESTORE_RE_RESTORE_NEEDED__SHIFT 0x11
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_PARTNER_REQUIRES_HIGHER_SPEED__SHIFT 0x12
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_PARTNER_REQUIRES_WIDER_LINK_WIDTH__SHIFT 0x13
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__SAFE_RECOVER_SW_EVENT__SHIFT 0x14
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__FAAE_EQUALIZATION_ENTERED__SHIFT 0x15
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__FAAE_EVALUATION_READY__SHIFT 0x16
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__RETRAIN_TARGET_LINK_SPEED_CHANGE_LIMITED_BY_EQ__SHIFT 0x17
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__BW_REQUIREMENT_MONITOR__SHIFT 0x1b
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__ARBMUX_GEN_SUB_EVENTS__SHIFT 0x1c
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_SPEED_UPDATE_MASK 0x00000001L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_SPEED_CHANGE_ATTEMPT_FAILED_MASK 0x00000002L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_PARTNER_SPEED_SUPPORT_UPDATE_MASK 0x00000004L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_WIDTH_UPDATE_MASK 0x00000008L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_WIDTH_CHANGE_ATTEMPT_FAILED_MASK 0x00000010L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_PARTNER_WIDTH_SUPPORT_UPDATE_MASK 0x00000020L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__POWER_DOWN_COMMAND_COMPLETE_MASK 0x00000040L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__BANDWIDTH_UPDATE_MASK 0x00000080L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_POWER_STATE_CHANGE_MASK 0x00000100L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__BW_REQUIREMENT_HINT_MASK 0x00000200L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__EQUALIZATION_REQUEST_MASK 0x00000400L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_PARTNER_ESM_REQUEST_MASK 0x00000800L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LOW_SPEED_REQD_IMMEDIATE_MASK 0x00001000L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__ESTABLISH_ESM_PLL_SETTINGS_MASK 0x00002000L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__TRAINING_SET_MESSAGE_RCVD_MASK 0x00008000L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__SAVE_RESTORE_EQ_SETTINGS_CHANGED_MASK 0x00010000L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__SAVE_RESTORE_RE_RESTORE_NEEDED_MASK 0x00020000L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_PARTNER_REQUIRES_HIGHER_SPEED_MASK 0x00040000L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__LINK_PARTNER_REQUIRES_WIDER_LINK_WIDTH_MASK 0x00080000L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__SAFE_RECOVER_SW_EVENT_MASK 0x00100000L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__FAAE_EQUALIZATION_ENTERED_MASK 0x00200000L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__FAAE_EVALUATION_READY_MASK 0x00400000L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__RETRAIN_TARGET_LINK_SPEED_CHANGE_LIMITED_BY_EQ_MASK 0x00800000L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__BW_REQUIREMENT_MONITOR_MASK 0x08000000L
+#define PCIE_LC_LINK_MANAGEMENT_STATUS__ARBMUX_GEN_SUB_EVENTS_MASK 0x10000000L
+//PCIE_LC_LINK_MANAGEMENT_MASK
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_SPEED_UPDATE_MASK__SHIFT 0x0
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_SPEED_CHANGE_ATTEMPT_FAILED_MASK__SHIFT 0x1
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_PARTNER_SPEED_SUPPORT_UPDATE_MASK__SHIFT 0x2
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_WIDTH_UPDATE_MASK__SHIFT 0x3
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_WIDTH_CHANGE_ATTEMPT_FAILED_MASK__SHIFT 0x4
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_PARTNER_WIDTH_SUPPORT_UPDATE_MASK__SHIFT 0x5
+#define PCIE_LC_LINK_MANAGEMENT_MASK__POWER_DOWN_COMMAND_COMPLETE_MASK__SHIFT 0x6
+#define PCIE_LC_LINK_MANAGEMENT_MASK__BANDWIDTH_UPDATE_MASK__SHIFT 0x7
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_POWER_STATE_CHANGE_MASK__SHIFT 0x8
+#define PCIE_LC_LINK_MANAGEMENT_MASK__BW_REQUIREMENT_HINT_MASK__SHIFT 0x9
+#define PCIE_LC_LINK_MANAGEMENT_MASK__EQUALIZATION_REQUEST_MASK__SHIFT 0xa
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_PARTNER_ESM_REQUEST_MASK__SHIFT 0xb
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LOW_SPEED_REQD_IMMEDIATE_MASK__SHIFT 0xc
+#define PCIE_LC_LINK_MANAGEMENT_MASK__ESTABLISH_ESM_PLL_SETTINGS_MASK__SHIFT 0xd
+#define PCIE_LC_LINK_MANAGEMENT_MASK__TRAINING_SET_MESSAGE_RCVD_MASK__SHIFT 0xf
+#define PCIE_LC_LINK_MANAGEMENT_MASK__SAVE_RESTORE_EQ_SETTINGS_CHANGED_MASK__SHIFT 0x10
+#define PCIE_LC_LINK_MANAGEMENT_MASK__SAVE_RESTORE_RE_RESTORE_NEEDED_MASK__SHIFT 0x11
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_PARTNER_REQUIRES_HIGHER_SPEED_MASK__SHIFT 0x12
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_PARTNER_REQUIRES_WIDER_LINK_WIDTH_MASK__SHIFT 0x13
+#define PCIE_LC_LINK_MANAGEMENT_MASK__SAFE_RECOVER_SW_EVENT_MASK__SHIFT 0x14
+#define PCIE_LC_LINK_MANAGEMENT_MASK__FAAE_EQUALIZATION_ENTERED_MASK__SHIFT 0x15
+#define PCIE_LC_LINK_MANAGEMENT_MASK__FAAE_EVALUATION_READY_MASK__SHIFT 0x16
+#define PCIE_LC_LINK_MANAGEMENT_MASK__RETRAIN_TARGET_LINK_SPEED_CHANGE_LIMITED_BY_EQ_MASK__SHIFT 0x17
+#define PCIE_LC_LINK_MANAGEMENT_MASK__BW_REQUIREMENT_MONITOR_MASK__SHIFT 0x1b
+#define PCIE_LC_LINK_MANAGEMENT_MASK__ARBMUX_GEN_SUB_EVENTS_MASK__SHIFT 0x1c
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_SPEED_UPDATE_MASK_MASK 0x00000001L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_SPEED_CHANGE_ATTEMPT_FAILED_MASK_MASK 0x00000002L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_PARTNER_SPEED_SUPPORT_UPDATE_MASK_MASK 0x00000004L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_WIDTH_UPDATE_MASK_MASK 0x00000008L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_WIDTH_CHANGE_ATTEMPT_FAILED_MASK_MASK 0x00000010L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_PARTNER_WIDTH_SUPPORT_UPDATE_MASK_MASK 0x00000020L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__POWER_DOWN_COMMAND_COMPLETE_MASK_MASK 0x00000040L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__BANDWIDTH_UPDATE_MASK_MASK 0x00000080L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_POWER_STATE_CHANGE_MASK_MASK 0x00000100L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__BW_REQUIREMENT_HINT_MASK_MASK 0x00000200L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__EQUALIZATION_REQUEST_MASK_MASK 0x00000400L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_PARTNER_ESM_REQUEST_MASK_MASK 0x00000800L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LOW_SPEED_REQD_IMMEDIATE_MASK_MASK 0x00001000L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__ESTABLISH_ESM_PLL_SETTINGS_MASK_MASK 0x00002000L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__TRAINING_SET_MESSAGE_RCVD_MASK_MASK 0x00008000L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__SAVE_RESTORE_EQ_SETTINGS_CHANGED_MASK_MASK 0x00010000L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__SAVE_RESTORE_RE_RESTORE_NEEDED_MASK_MASK 0x00020000L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_PARTNER_REQUIRES_HIGHER_SPEED_MASK_MASK 0x00040000L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__LINK_PARTNER_REQUIRES_WIDER_LINK_WIDTH_MASK_MASK 0x00080000L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__SAFE_RECOVER_SW_EVENT_MASK_MASK 0x00100000L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__FAAE_EQUALIZATION_ENTERED_MASK_MASK 0x00200000L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__FAAE_EVALUATION_READY_MASK_MASK 0x00400000L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__RETRAIN_TARGET_LINK_SPEED_CHANGE_LIMITED_BY_EQ_MASK_MASK 0x00800000L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__BW_REQUIREMENT_MONITOR_MASK_MASK 0x08000000L
+#define PCIE_LC_LINK_MANAGEMENT_MASK__ARBMUX_GEN_SUB_EVENTS_MASK_MASK 0x10000000L
+//PCIE_LC_LINK_MANAGEMENT_CNTL
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__FAR_END_WIDTH_SUPPORT__SHIFT 0x0
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__LINK_POWER_STATE__SHIFT 0x3
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__LINK_POWER_STATE_MASK__SHIFT 0x8
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__LINK_DOWN_POWER_STATE_MASK__SHIFT 0xc
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__LINK_UP__SHIFT 0xd
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__PORT_POWERED_DOWN__SHIFT 0xe
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__SPC_MODE__SHIFT 0xf
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__CLOCK_RATE__SHIFT 0x11
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__QUIESCE_RCVD__SHIFT 0x14
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__QUIESCE_SENT__SHIFT 0x15
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__REQ_EQ_RCVD__SHIFT 0x16
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__REQ_EQ_SENT__SHIFT 0x17
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_8GT__SHIFT 0x18
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_16GT__SHIFT 0x19
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_32GT__SHIFT 0x1a
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__FAR_END_WIDTH_SUPPORT_MASK 0x00000007L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__LINK_POWER_STATE_MASK 0x000000F8L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__LINK_POWER_STATE_MASK_MASK 0x00000F00L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__LINK_DOWN_POWER_STATE_MASK_MASK 0x00001000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__LINK_UP_MASK 0x00002000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__PORT_POWERED_DOWN_MASK 0x00004000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__SPC_MODE_MASK 0x00018000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__CLOCK_RATE_MASK 0x000E0000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__QUIESCE_RCVD_MASK 0x00100000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__QUIESCE_SENT_MASK 0x00200000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__REQ_EQ_RCVD_MASK 0x00400000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__REQ_EQ_SENT_MASK 0x00800000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_8GT_MASK 0x01000000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_16GT_MASK 0x02000000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_32GT_MASK 0x04000000L
+//PCIEP_STRAP_LC
+#define PCIEP_STRAP_LC__STRAP_FTS_yTSx_COUNT__SHIFT 0x0
+#define PCIEP_STRAP_LC__STRAP_LONG_yTSx_COUNT__SHIFT 0x2
+#define PCIEP_STRAP_LC__STRAP_MED_yTSx_COUNT__SHIFT 0x4
+#define PCIEP_STRAP_LC__STRAP_SHORT_yTSx_COUNT__SHIFT 0x6
+#define PCIEP_STRAP_LC__STRAP_SKIP_INTERVAL__SHIFT 0x8
+#define PCIEP_STRAP_LC__STRAP_BYPASS_RCVR_DET__SHIFT 0xb
+#define PCIEP_STRAP_LC__STRAP_COMPLIANCE_DIS__SHIFT 0xc
+#define PCIEP_STRAP_LC__STRAP_FORCE_COMPLIANCE__SHIFT 0xd
+#define PCIEP_STRAP_LC__STRAP_REVERSE_LC_LANES__SHIFT 0xe
+#define PCIEP_STRAP_LC__STRAP_AUTO_RC_SPEED_NEGOTIATION_DIS__SHIFT 0xf
+#define PCIEP_STRAP_LC__STRAP_LANE_NEGOTIATION__SHIFT 0x10
+#define PCIEP_STRAP_LC__STRAP_MARGINING_USES_SOFTWARE__SHIFT 0x13
+#define PCIEP_STRAP_LC__STRAP_RTM1_PRESENCE_DET_SUPP__SHIFT 0x14
+#define PCIEP_STRAP_LC__STRAP_RTM2_PRESENCE_DET_SUPP__SHIFT 0x15
+#define PCIEP_STRAP_LC__STRAP_AUTO_RC_SPEED_NEGOTIATION_16GT_DIS__SHIFT 0x16
+#define PCIEP_STRAP_LC__STRAP_AUTO_RC_SPEED_NEGOTIATION_32GT_DIS__SHIFT 0x17
+#define PCIEP_STRAP_LC__STRAP_LC_TRANSMIT_MUX_PAD_SMALL_SKID_ENTRIES_PCLK_CHANGE__SHIFT 0x1e
+#define PCIEP_STRAP_LC__STRAP_LC_TRANSMIT_MUX_PAD_SMALL_SKID_ENTRIES__SHIFT 0x1f
+#define PCIEP_STRAP_LC__STRAP_FTS_yTSx_COUNT_MASK 0x00000003L
+#define PCIEP_STRAP_LC__STRAP_LONG_yTSx_COUNT_MASK 0x0000000CL
+#define PCIEP_STRAP_LC__STRAP_MED_yTSx_COUNT_MASK 0x00000030L
+#define PCIEP_STRAP_LC__STRAP_SHORT_yTSx_COUNT_MASK 0x000000C0L
+#define PCIEP_STRAP_LC__STRAP_SKIP_INTERVAL_MASK 0x00000700L
+#define PCIEP_STRAP_LC__STRAP_BYPASS_RCVR_DET_MASK 0x00000800L
+#define PCIEP_STRAP_LC__STRAP_COMPLIANCE_DIS_MASK 0x00001000L
+#define PCIEP_STRAP_LC__STRAP_FORCE_COMPLIANCE_MASK 0x00002000L
+#define PCIEP_STRAP_LC__STRAP_REVERSE_LC_LANES_MASK 0x00004000L
+#define PCIEP_STRAP_LC__STRAP_AUTO_RC_SPEED_NEGOTIATION_DIS_MASK 0x00008000L
+#define PCIEP_STRAP_LC__STRAP_LANE_NEGOTIATION_MASK 0x00070000L
+#define PCIEP_STRAP_LC__STRAP_MARGINING_USES_SOFTWARE_MASK 0x00080000L
+#define PCIEP_STRAP_LC__STRAP_RTM1_PRESENCE_DET_SUPP_MASK 0x00100000L
+#define PCIEP_STRAP_LC__STRAP_RTM2_PRESENCE_DET_SUPP_MASK 0x00200000L
+#define PCIEP_STRAP_LC__STRAP_AUTO_RC_SPEED_NEGOTIATION_16GT_DIS_MASK 0x00400000L
+#define PCIEP_STRAP_LC__STRAP_AUTO_RC_SPEED_NEGOTIATION_32GT_DIS_MASK 0x00800000L
+#define PCIEP_STRAP_LC__STRAP_LC_TRANSMIT_MUX_PAD_SMALL_SKID_ENTRIES_PCLK_CHANGE_MASK 0x40000000L
+#define PCIEP_STRAP_LC__STRAP_LC_TRANSMIT_MUX_PAD_SMALL_SKID_ENTRIES_MASK 0x80000000L
+//PCIEP_STRAP_MISC
+#define PCIEP_STRAP_MISC__STRAP_REVERSE_LANES__SHIFT 0x0
+#define PCIEP_STRAP_MISC__STRAP_E2E_PREFIX_EN__SHIFT 0x1
+#define PCIEP_STRAP_MISC__STRAP_EXTENDED_FMT_SUPPORTED__SHIFT 0x2
+#define PCIEP_STRAP_MISC__STRAP_OBFF_SUPPORTED__SHIFT 0x3
+#define PCIEP_STRAP_MISC__STRAP_LTR_SUPPORTED__SHIFT 0x5
+#define PCIEP_STRAP_MISC__STRAP_CCIX_EN__SHIFT 0x6
+#define PCIEP_STRAP_MISC__STRAP_CCIX_OPT_TLP_FMT_SUPPORT__SHIFT 0x7
+#define PCIEP_STRAP_MISC__STRAP_AP_EN__SHIFT 0x8
+#define PCIEP_STRAP_MISC__STRAP_REVERSE_LANES_MASK 0x00000001L
+#define PCIEP_STRAP_MISC__STRAP_E2E_PREFIX_EN_MASK 0x00000002L
+#define PCIEP_STRAP_MISC__STRAP_EXTENDED_FMT_SUPPORTED_MASK 0x00000004L
+#define PCIEP_STRAP_MISC__STRAP_OBFF_SUPPORTED_MASK 0x00000018L
+#define PCIEP_STRAP_MISC__STRAP_LTR_SUPPORTED_MASK 0x00000020L
+#define PCIEP_STRAP_MISC__STRAP_CCIX_EN_MASK 0x00000040L
+#define PCIEP_STRAP_MISC__STRAP_CCIX_OPT_TLP_FMT_SUPPORT_MASK 0x00000080L
+#define PCIEP_STRAP_MISC__STRAP_AP_EN_MASK 0x00000100L
+//PCIEP_STRAP_LC2
+#define PCIEP_STRAP_LC2__STRAP_ESM_MODE_SUPPORTED__SHIFT 0x0
+#define PCIEP_STRAP_LC2__STRAP_ESM_PHY_REACH_LEN_CAP__SHIFT 0x1
+#define PCIEP_STRAP_LC2__STRAP_ESM_RECAL_NEEDED__SHIFT 0x3
+#define PCIEP_STRAP_LC2__STRAP_ESM_CALIB_TIME__SHIFT 0x4
+#define PCIEP_STRAP_LC2__STRAP_ESM_QUICK_EQ_TIMEOUT__SHIFT 0x7
+#define PCIEP_STRAP_LC2__STRAP_ESM_MODE_SUPPORTED_MASK 0x00000001L
+#define PCIEP_STRAP_LC2__STRAP_ESM_PHY_REACH_LEN_CAP_MASK 0x00000006L
+#define PCIEP_STRAP_LC2__STRAP_ESM_RECAL_NEEDED_MASK 0x00000008L
+#define PCIEP_STRAP_LC2__STRAP_ESM_CALIB_TIME_MASK 0x00000070L
+#define PCIEP_STRAP_LC2__STRAP_ESM_QUICK_EQ_TIMEOUT_MASK 0x00000380L
+//PCIE_LC_L1_PM_SUBSTATE
+#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN__SHIFT 0x0
+#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE__SHIFT 0x1
+#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE__SHIFT 0x2
+#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE__SHIFT 0x3
+#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE__SHIFT 0x4
+#define PCIE_LC_L1_PM_SUBSTATE__LC_CLKREQ_FILTER_EN__SHIFT 0x5
+#define PCIE_LC_L1_PM_SUBSTATE__LC_T_POWER_ON_SCALE__SHIFT 0x6
+#define PCIE_LC_L1_PM_SUBSTATE__LC_T_POWER_ON_VALUE__SHIFT 0x8
+#define PCIE_LC_L1_PM_SUBSTATE__T_POWER_ON_FCH_COPY_EN__SHIFT 0xd
+#define PCIE_LC_L1_PM_SUBSTATE__T_POWER_ON_FCH_COPY_TRIGGER__SHIFT 0xe
+#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_2_BLOCK_EXIT_PG_COMMIT__SHIFT 0xf
+#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_1_POWERDOWN__SHIFT 0x10
+#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_2_POWERDOWN__SHIFT 0x14
+#define PCIE_LC_L1_PM_SUBSTATE__LC_DEFER_L1_2_EXIT__SHIFT 0x17
+#define PCIE_LC_L1_PM_SUBSTATE__LC_WAKE_FROM_ASPM_L1_ON_PM_CONTROL_CLEAR__SHIFT 0x1a
+#define PCIE_LC_L1_PM_SUBSTATE__LC_FORCE_L1_PG_EXIT_ON_REG_WRITE__SHIFT 0x1b
+#define PCIE_LC_L1_PM_SUBSTATE__LC_QUICK_L1_1_ABORT_IN_L1__SHIFT 0x1c
+#define PCIE_LC_L1_PM_SUBSTATE__LC_QUICK_L1_2_ABORT_IN_L1__SHIFT 0x1d
+#define PCIE_LC_L1_PM_SUBSTATE__LC_AUX_COUNT_REFCLK_INCREMENT_EN__SHIFT 0x1e
+#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK 0x00000004L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK 0x00000008L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK 0x00000010L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_CLKREQ_FILTER_EN_MASK 0x00000020L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_T_POWER_ON_SCALE_MASK 0x000000C0L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_T_POWER_ON_VALUE_MASK 0x00001F00L
+#define PCIE_LC_L1_PM_SUBSTATE__T_POWER_ON_FCH_COPY_EN_MASK 0x00002000L
+#define PCIE_LC_L1_PM_SUBSTATE__T_POWER_ON_FCH_COPY_TRIGGER_MASK 0x00004000L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_2_BLOCK_EXIT_PG_COMMIT_MASK 0x00008000L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_1_POWERDOWN_MASK 0x00070000L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_2_POWERDOWN_MASK 0x00700000L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_DEFER_L1_2_EXIT_MASK 0x03800000L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_WAKE_FROM_ASPM_L1_ON_PM_CONTROL_CLEAR_MASK 0x04000000L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_FORCE_L1_PG_EXIT_ON_REG_WRITE_MASK 0x08000000L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_QUICK_L1_1_ABORT_IN_L1_MASK 0x10000000L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_QUICK_L1_2_ABORT_IN_L1_MASK 0x20000000L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_AUX_COUNT_REFCLK_INCREMENT_EN_MASK 0x40000000L
+//PCIE_LC_L1_PM_SUBSTATE2
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_CM_RESTORE_TIME__SHIFT 0x0
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_LTR_THRESHOLD_SCALE__SHIFT 0x8
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_ABORT_L1_ENTRY_RX_ERROR__SHIFT 0xd
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_AUX_COUNT_REFCLK_INCREMENT_INTERNAL_POWERDOWN__SHIFT 0xe
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_AUX_COUNT_REFCLK_INCREMENT_INTERNAL_P2_EDGE__SHIFT 0xf
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_LTR_THRESHOLD_VALUE__SHIFT 0x10
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_DELAY_POWERDOWN_P2_L1_2_EXIT__SHIFT 0x1b
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_DELAY_REFCLK_L1_2_T_POWERON__SHIFT 0x1c
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_IGNORE_RX_ELEC_IDLE_IN_L1_2__SHIFT 0x1d
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_SKIP_L1_2_POWERDOWN_IN_ABORTED_ENTRY__SHIFT 0x1e
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_BLOCK_NEAREND_L1_2_WAKEUP__SHIFT 0x1f
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_CM_RESTORE_TIME_MASK 0x000000FFL
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_LTR_THRESHOLD_SCALE_MASK 0x00000700L
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_ABORT_L1_ENTRY_RX_ERROR_MASK 0x00002000L
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_AUX_COUNT_REFCLK_INCREMENT_INTERNAL_POWERDOWN_MASK 0x00004000L
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_AUX_COUNT_REFCLK_INCREMENT_INTERNAL_P2_EDGE_MASK 0x00008000L
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_LTR_THRESHOLD_VALUE_MASK 0x03FF0000L
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_DELAY_POWERDOWN_P2_L1_2_EXIT_MASK 0x08000000L
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_DELAY_REFCLK_L1_2_T_POWERON_MASK 0x10000000L
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_IGNORE_RX_ELEC_IDLE_IN_L1_2_MASK 0x20000000L
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_SKIP_L1_2_POWERDOWN_IN_ABORTED_ENTRY_MASK 0x40000000L
+#define PCIE_LC_L1_PM_SUBSTATE2__LC_BLOCK_NEAREND_L1_2_WAKEUP_MASK 0x80000000L
+//PCIE_LC_L1_PM_SUBSTATE3
+#define PCIE_LC_L1_PM_SUBSTATE3__T_POWER_ON_FCH_TARGET_ADDRESS_LO__SHIFT 0x0
+#define PCIE_LC_L1_PM_SUBSTATE3__T_POWER_ON_FCH_TARGET_ADDRESS_LO_MASK 0xFFFFFFFFL
+//PCIE_LC_L1_PM_SUBSTATE4
+#define PCIE_LC_L1_PM_SUBSTATE4__T_POWER_ON_FCH_TARGET_ADDRESS_HI__SHIFT 0x0
+#define PCIE_LC_L1_PM_SUBSTATE4__T_POWER_ON_FCH_TARGET_ADDRESS_HI_MASK 0xFFFFFFFFL
+//PCIE_LC_L1_PM_SUBSTATE5
+#define PCIE_LC_L1_PM_SUBSTATE5__T_POWER_ON_FCH_L12_CLKREQ_DELAY__SHIFT 0x0
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_RECOVERY_WAIT_FOR_ASPM_NAK_ABORT_TIMER__SHIFT 0x8
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_ABORT_L1_2_ENTRY_CLKREQ_PULSE__SHIFT 0x12
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_DSC_WAKE_FROM_L1_FOR_L23__SHIFT 0x13
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_DSC_CLEAR_L23_CONDITIONS_MODE__SHIFT 0x14
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_NBIF_ASPM_INPUT_MODE__SHIFT 0x15
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_BLOCK_NEW_GO_TO_PM_EN__SHIFT 0x16
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_EXTEND_LC_TX_CLEAR_PM_REQS__SHIFT 0x17
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_TX_PM_SIGNALS_IGNORE_HANDSHAKE__SHIFT 0x18
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_ASPM_L1_ABORT_ALWAYS_ASSERT_STOP_SENDING_PKTS__SHIFT 0x19
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_EXTEND_L1L2_ENTRY_SIGNALING_ON_ABORT__SHIFT 0x1a
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_BLOCK_EI_L1_REFCLK_OFF__SHIFT 0x1b
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_WAKE_L0S_FOR_ASPM_NAK__SHIFT 0x1c
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_DONT_BLOCK_RECOVERY_ASPM_NAK_PIPE_STOPPED__SHIFT 0x1d
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_DONT_GEN_L1_NAK_WHEN_PIPE_STOPPED__SHIFT 0x1e
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_IGNORE_ALL_RX_ELEC_IDLE_IN_L1SS__SHIFT 0x1f
+#define PCIE_LC_L1_PM_SUBSTATE5__T_POWER_ON_FCH_L12_CLKREQ_DELAY_MASK 0x000000FFL
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_RECOVERY_WAIT_FOR_ASPM_NAK_ABORT_TIMER_MASK 0x00000300L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_ABORT_L1_2_ENTRY_CLKREQ_PULSE_MASK 0x00040000L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_DSC_WAKE_FROM_L1_FOR_L23_MASK 0x00080000L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_DSC_CLEAR_L23_CONDITIONS_MODE_MASK 0x00100000L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_NBIF_ASPM_INPUT_MODE_MASK 0x00200000L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_BLOCK_NEW_GO_TO_PM_EN_MASK 0x00400000L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_EXTEND_LC_TX_CLEAR_PM_REQS_MASK 0x00800000L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_TX_PM_SIGNALS_IGNORE_HANDSHAKE_MASK 0x01000000L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_ASPM_L1_ABORT_ALWAYS_ASSERT_STOP_SENDING_PKTS_MASK 0x02000000L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_EXTEND_L1L2_ENTRY_SIGNALING_ON_ABORT_MASK 0x04000000L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_BLOCK_EI_L1_REFCLK_OFF_MASK 0x08000000L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_WAKE_L0S_FOR_ASPM_NAK_MASK 0x10000000L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_DONT_BLOCK_RECOVERY_ASPM_NAK_PIPE_STOPPED_MASK 0x20000000L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_DONT_GEN_L1_NAK_WHEN_PIPE_STOPPED_MASK 0x40000000L
+#define PCIE_LC_L1_PM_SUBSTATE5__LC_IGNORE_ALL_RX_ELEC_IDLE_IN_L1SS_MASK 0x80000000L
+//PCIEP_BCH_ECC_CNTL
+#define PCIEP_BCH_ECC_CNTL__STRAP_BCH_ECC_EN__SHIFT 0x0
+#define PCIEP_BCH_ECC_CNTL__BCH_ECC_ERROR_THRESHOLD__SHIFT 0x8
+#define PCIEP_BCH_ECC_CNTL__BCH_ECC_ERROR_STATUS__SHIFT 0x10
+#define PCIEP_BCH_ECC_CNTL__STRAP_BCH_ECC_EN_MASK 0x00000001L
+#define PCIEP_BCH_ECC_CNTL__BCH_ECC_ERROR_THRESHOLD_MASK 0x0000FF00L
+#define PCIEP_BCH_ECC_CNTL__BCH_ECC_ERROR_STATUS_MASK 0xFFFF0000L
+//PCIE_LC_CNTL8
+#define PCIE_LC_CNTL8__LC_FOM_TIME__SHIFT 0x0
+#define PCIE_LC_CNTL8__LC_EX_SEARCH_TRAVERSAL_MODE__SHIFT 0x2
+#define PCIE_LC_CNTL8__LC_LOCK_IN_EQ_RESPONSE__SHIFT 0x3
+#define PCIE_LC_CNTL8__LC_ESM_RATE0_TIMER_FACTOR__SHIFT 0x4
+#define PCIE_LC_CNTL8__LC_ESM_RATE1_TIMER_FACTOR__SHIFT 0x6
+#define PCIE_LC_CNTL8__LC_ESM_RATE2_TIMER_FACTOR__SHIFT 0x8
+#define PCIE_LC_CNTL8__LC_USC_ACCEPTABLE_PRESETS__SHIFT 0xa
+#define PCIE_LC_CNTL8__LC_FORCE_LOOPBACK_EQ_ON__SHIFT 0x14
+#define PCIE_LC_CNTL8__LC_LOOPBACK_EQ_IN_PROGRESS__SHIFT 0x15
+#define PCIE_LC_CNTL8__LC_FORCE_LOOPBACK_EQ_TRANSMIT_MOD_COMP_PATTERN__SHIFT 0x16
+#define PCIE_LC_CNTL8__LC_LOOPBACK_EQ_TRANSMIT_MOD_COMP_PATTERN__SHIFT 0x17
+#define PCIE_LC_CNTL8__LC_FORCE_LOOPBACK_EQ_LANE_UNDER_TEST__SHIFT 0x18
+#define PCIE_LC_CNTL8__LC_LOOPBACK_EQ_LANE_UNDER_TEST__SHIFT 0x1c
+#define PCIE_LC_CNTL8__LC_FOM_TIME_MASK 0x00000003L
+#define PCIE_LC_CNTL8__LC_EX_SEARCH_TRAVERSAL_MODE_MASK 0x00000004L
+#define PCIE_LC_CNTL8__LC_LOCK_IN_EQ_RESPONSE_MASK 0x00000008L
+#define PCIE_LC_CNTL8__LC_ESM_RATE0_TIMER_FACTOR_MASK 0x00000030L
+#define PCIE_LC_CNTL8__LC_ESM_RATE1_TIMER_FACTOR_MASK 0x000000C0L
+#define PCIE_LC_CNTL8__LC_ESM_RATE2_TIMER_FACTOR_MASK 0x00000300L
+#define PCIE_LC_CNTL8__LC_USC_ACCEPTABLE_PRESETS_MASK 0x000FFC00L
+#define PCIE_LC_CNTL8__LC_FORCE_LOOPBACK_EQ_ON_MASK 0x00100000L
+#define PCIE_LC_CNTL8__LC_LOOPBACK_EQ_IN_PROGRESS_MASK 0x00200000L
+#define PCIE_LC_CNTL8__LC_FORCE_LOOPBACK_EQ_TRANSMIT_MOD_COMP_PATTERN_MASK 0x00400000L
+#define PCIE_LC_CNTL8__LC_LOOPBACK_EQ_TRANSMIT_MOD_COMP_PATTERN_MASK 0x00800000L
+#define PCIE_LC_CNTL8__LC_FORCE_LOOPBACK_EQ_LANE_UNDER_TEST_MASK 0x0F000000L
+#define PCIE_LC_CNTL8__LC_LOOPBACK_EQ_LANE_UNDER_TEST_MASK 0xF0000000L
+//PCIE_LC_CNTL9
+#define PCIE_LC_CNTL9__LC_RESET_RCVR_DETECTED_ALL_ARCS__SHIFT 0x0
+#define PCIE_LC_CNTL9__LC_LOOPBACK_WAIT_FOR_ALL_ACTIVE_LANES__SHIFT 0x1
+#define PCIE_LC_CNTL9__LC_CHECK_EC_GEN3_LOOPBACK_ACTIVE__SHIFT 0x2
+#define PCIE_LC_CNTL9__LC_LOOPBACK_EQ_ARC_EN__SHIFT 0x3
+#define PCIE_LC_CNTL9__LC_LOOPBACK_EQ_TRANSMIT_MOD_COMP_PATTERN_EN__SHIFT 0x4
+#define PCIE_LC_CNTL9__LC_ENFORCE_SINGLE_L1_SUBSTATE_CLK_PDWN_ASSERTION_EN__SHIFT 0x5
+#define PCIE_LC_CNTL9__LC_EXT_ASPM_L12_COMMONMODE_COUNT_METHOD__SHIFT 0x6
+#define PCIE_LC_CNTL9__LC_ALT_RX_EQ_IN_PROGRESS_EN__SHIFT 0x7
+#define PCIE_LC_CNTL9__LC_USE_LONG_SERIAL_QUICKSIM_TIMEOUTS__SHIFT 0x8
+#define PCIE_LC_CNTL9__LC_ALLOW_DLLPS_OTHER_SIDE_REMOVE_SPEED__SHIFT 0x9
+#define PCIE_LC_CNTL9__LC_DELAY_POLL_COMP_SPD_CHG_AFTER_TXMARGIN__SHIFT 0xa
+#define PCIE_LC_CNTL9__LC_RESET_SKP_SELECT_16GT_ON_TRAINING_BIT__SHIFT 0xb
+#define PCIE_LC_CNTL9__LC_TRAINING_BITS_REQUIRED__SHIFT 0xc
+#define PCIE_LC_CNTL9__LC_REPEAT_RXEQEVAL_AFTER_TIMEOUT__SHIFT 0xe
+#define PCIE_LC_CNTL9__LC_CPM_IDLE_REFCLKREQ_CHECK__SHIFT 0xf
+#define PCIE_LC_CNTL9__LC_REFCLK_OFF_NO_RCVR_LANES__SHIFT 0x10
+#define PCIE_LC_CNTL9__LC_REFCLKREQ_IN_HOLD_TRAINING__SHIFT 0x12
+#define PCIE_LC_CNTL9__LC_DEASSERT_REFCLKREQ_IN_NON_SS_L1__SHIFT 0x13
+#define PCIE_LC_CNTL9__LC_HOLD_REFCLKREQ_UNTIL_L1SS_POWERDOWN__SHIFT 0x14
+#define PCIE_LC_CNTL9__LC_CLKGATE_WAIT_FOR_REFCLKACK__SHIFT 0x15
+#define PCIE_LC_CNTL9__LC_DYN_LANES_L1_SS_POWERDOWN__SHIFT 0x16
+#define PCIE_LC_CNTL9__LC_USE_OLD_PHYSTATUS_FOR_POWERDOWN_INACTIVE__SHIFT 0x17
+#define PCIE_LC_CNTL9__LC_BLOCK_L0s_FOR_POWERDOWN_CHANGE__SHIFT 0x18
+#define PCIE_LC_CNTL9__LC_RECOVERY_WAIT_FOR_ASPM_NAK__SHIFT 0x19
+#define PCIE_LC_CNTL9__LC_WAIT_FOR_NONPAD_LINK_NUM_LANE0__SHIFT 0x1a
+#define PCIE_LC_CNTL9__LC_CLR_LINK_LANE_NUM_ON_NO_TSX_LANE__SHIFT 0x1b
+#define PCIE_LC_CNTL9__LC_USE_NEW_EQ_SYMBOL_6_EN__SHIFT 0x1c
+#define PCIE_LC_CNTL9__LC_DEC_FAILED_SPEED_CHANGE_COUNT_ABORT_BYPASS_TO_HIGH_RATE__SHIFT 0x1d
+#define PCIE_LC_CNTL9__LC_CONFIG_WAIT_FOR_EIEOS__SHIFT 0x1e
+#define PCIE_LC_CNTL9__LC_HOLD_TLP_TO_XMIT_PULSE_IN_L1__SHIFT 0x1f
+#define PCIE_LC_CNTL9__LC_RESET_RCVR_DETECTED_ALL_ARCS_MASK 0x00000001L
+#define PCIE_LC_CNTL9__LC_LOOPBACK_WAIT_FOR_ALL_ACTIVE_LANES_MASK 0x00000002L
+#define PCIE_LC_CNTL9__LC_CHECK_EC_GEN3_LOOPBACK_ACTIVE_MASK 0x00000004L
+#define PCIE_LC_CNTL9__LC_LOOPBACK_EQ_ARC_EN_MASK 0x00000008L
+#define PCIE_LC_CNTL9__LC_LOOPBACK_EQ_TRANSMIT_MOD_COMP_PATTERN_EN_MASK 0x00000010L
+#define PCIE_LC_CNTL9__LC_ENFORCE_SINGLE_L1_SUBSTATE_CLK_PDWN_ASSERTION_EN_MASK 0x00000020L
+#define PCIE_LC_CNTL9__LC_EXT_ASPM_L12_COMMONMODE_COUNT_METHOD_MASK 0x00000040L
+#define PCIE_LC_CNTL9__LC_ALT_RX_EQ_IN_PROGRESS_EN_MASK 0x00000080L
+#define PCIE_LC_CNTL9__LC_USE_LONG_SERIAL_QUICKSIM_TIMEOUTS_MASK 0x00000100L
+#define PCIE_LC_CNTL9__LC_ALLOW_DLLPS_OTHER_SIDE_REMOVE_SPEED_MASK 0x00000200L
+#define PCIE_LC_CNTL9__LC_DELAY_POLL_COMP_SPD_CHG_AFTER_TXMARGIN_MASK 0x00000400L
+#define PCIE_LC_CNTL9__LC_RESET_SKP_SELECT_16GT_ON_TRAINING_BIT_MASK 0x00000800L
+#define PCIE_LC_CNTL9__LC_TRAINING_BITS_REQUIRED_MASK 0x00003000L
+#define PCIE_LC_CNTL9__LC_REPEAT_RXEQEVAL_AFTER_TIMEOUT_MASK 0x00004000L
+#define PCIE_LC_CNTL9__LC_CPM_IDLE_REFCLKREQ_CHECK_MASK 0x00008000L
+#define PCIE_LC_CNTL9__LC_REFCLK_OFF_NO_RCVR_LANES_MASK 0x00010000L
+#define PCIE_LC_CNTL9__LC_REFCLKREQ_IN_HOLD_TRAINING_MASK 0x00040000L
+#define PCIE_LC_CNTL9__LC_DEASSERT_REFCLKREQ_IN_NON_SS_L1_MASK 0x00080000L
+#define PCIE_LC_CNTL9__LC_HOLD_REFCLKREQ_UNTIL_L1SS_POWERDOWN_MASK 0x00100000L
+#define PCIE_LC_CNTL9__LC_CLKGATE_WAIT_FOR_REFCLKACK_MASK 0x00200000L
+#define PCIE_LC_CNTL9__LC_DYN_LANES_L1_SS_POWERDOWN_MASK 0x00400000L
+#define PCIE_LC_CNTL9__LC_USE_OLD_PHYSTATUS_FOR_POWERDOWN_INACTIVE_MASK 0x00800000L
+#define PCIE_LC_CNTL9__LC_BLOCK_L0s_FOR_POWERDOWN_CHANGE_MASK 0x01000000L
+#define PCIE_LC_CNTL9__LC_RECOVERY_WAIT_FOR_ASPM_NAK_MASK 0x02000000L
+#define PCIE_LC_CNTL9__LC_WAIT_FOR_NONPAD_LINK_NUM_LANE0_MASK 0x04000000L
+#define PCIE_LC_CNTL9__LC_CLR_LINK_LANE_NUM_ON_NO_TSX_LANE_MASK 0x08000000L
+#define PCIE_LC_CNTL9__LC_USE_NEW_EQ_SYMBOL_6_EN_MASK 0x10000000L
+#define PCIE_LC_CNTL9__LC_DEC_FAILED_SPEED_CHANGE_COUNT_ABORT_BYPASS_TO_HIGH_RATE_MASK 0x20000000L
+#define PCIE_LC_CNTL9__LC_CONFIG_WAIT_FOR_EIEOS_MASK 0x40000000L
+#define PCIE_LC_CNTL9__LC_HOLD_TLP_TO_XMIT_PULSE_IN_L1_MASK 0x80000000L
+//PCIE_LC_FORCE_COEFF2
+#define PCIE_LC_FORCE_COEFF2__LC_FORCE_COEFF_16GT__SHIFT 0x0
+#define PCIE_LC_FORCE_COEFF2__LC_FORCE_PRE_CURSOR_16GT__SHIFT 0x1
+#define PCIE_LC_FORCE_COEFF2__LC_FORCE_CURSOR_16GT__SHIFT 0x7
+#define PCIE_LC_FORCE_COEFF2__LC_FORCE_POST_CURSOR_16GT__SHIFT 0xd
+#define PCIE_LC_FORCE_COEFF2__LC_3X3_COEFF_SEARCH_EN_16GT__SHIFT 0x13
+#define PCIE_LC_FORCE_COEFF2__LC_FORCE_COEFF_16GT_MASK 0x00000001L
+#define PCIE_LC_FORCE_COEFF2__LC_FORCE_PRE_CURSOR_16GT_MASK 0x0000007EL
+#define PCIE_LC_FORCE_COEFF2__LC_FORCE_CURSOR_16GT_MASK 0x00001F80L
+#define PCIE_LC_FORCE_COEFF2__LC_FORCE_POST_CURSOR_16GT_MASK 0x0007E000L
+#define PCIE_LC_FORCE_COEFF2__LC_3X3_COEFF_SEARCH_EN_16GT_MASK 0x00080000L
+//PCIE_LC_FORCE_EQ_REQ_COEFF2
+#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_16GT__SHIFT 0x0
+#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_PRE_CURSOR_REQ_16GT__SHIFT 0x1
+#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_CURSOR_REQ_16GT__SHIFT 0x7
+#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_POST_CURSOR_REQ_16GT__SHIFT 0xd
+#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FS_OTHER_END_16GT__SHIFT 0x13
+#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_LF_OTHER_END_16GT__SHIFT 0x19
+#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_16GT_MASK 0x00000001L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_PRE_CURSOR_REQ_16GT_MASK 0x0000007EL
+#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_CURSOR_REQ_16GT_MASK 0x00001F80L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_POST_CURSOR_REQ_16GT_MASK 0x0007E000L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FS_OTHER_END_16GT_MASK 0x01F80000L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_LF_OTHER_END_16GT_MASK 0x7E000000L
+//PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_TRANSMIT_MUX_OUTPUT_GATING__SHIFT 0x0
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_SYMBOL_MUX_OUTPUT_GATING__SHIFT 0x1
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_LC_PKT_GEN_DYN_CLK_GATING__SHIFT 0x2
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_TRANSMIT_MUX_DYN_CLK_GATING__SHIFT 0x3
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_LTSSM_DYN_CLK_GATING__SHIFT 0x4
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_LANE_ORDER_OUTPUT_GATING__SHIFT 0x5
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_SYMBOL_MUX_GATING__SHIFT 0x14
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_LC_PKT_GEN_GATING__SHIFT 0x15
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_VLSM_PCIE_GATING__SHIFT 0x16
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_VLSM_CNLI_GATING__SHIFT 0x17
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_TXARB_GATING__SHIFT 0x18
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_SRB_GATING__SHIFT 0x19
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_RXDEMUX_GATING__SHIFT 0x1a
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_INACTIMER_GATING__SHIFT 0x1b
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_COLLECT_GATING__SHIFT 0x1c
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_LTSSM_GATING__SHIFT 0x1d
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_COEFF_GATING__SHIFT 0x1e
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_ALMPH_GATING__SHIFT 0x1f
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_TRANSMIT_MUX_OUTPUT_GATING_MASK 0x00000001L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_SYMBOL_MUX_OUTPUT_GATING_MASK 0x00000002L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_LC_PKT_GEN_DYN_CLK_GATING_MASK 0x00000004L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_TRANSMIT_MUX_DYN_CLK_GATING_MASK 0x00000008L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_LTSSM_DYN_CLK_GATING_MASK 0x00000010L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_LANE_ORDER_OUTPUT_GATING_MASK 0x00000020L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_SYMBOL_MUX_GATING_MASK 0x00100000L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_LC_PKT_GEN_GATING_MASK 0x00200000L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_VLSM_PCIE_GATING_MASK 0x00400000L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_VLSM_CNLI_GATING_MASK 0x00800000L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_TXARB_GATING_MASK 0x01000000L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_SRB_GATING_MASK 0x02000000L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_RXDEMUX_GATING_MASK 0x04000000L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_INACTIMER_GATING_MASK 0x08000000L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_COLLECT_GATING_MASK 0x10000000L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_LTSSM_GATING_MASK 0x20000000L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_COEFF_GATING_MASK 0x40000000L
+#define PCIE_LC_FINE_GRAIN_CLK_GATE_OVERRIDES__LC_DISABLE_DEBUG_BUS_ALMPH_GATING_MASK 0x80000000L
+//PCIE_LC_CNTL10
+#define PCIE_LC_CNTL10__LC_DEFAULT_PRESET_OVERRIDE_EN__SHIFT 0x0
+#define PCIE_LC_CNTL10__LC_DEFAULT_PRESET_OVERRIDE_MODE__SHIFT 0x1
+#define PCIE_LC_CNTL10__LC_DEFAULT_PRESET_OVERRIDE_PORT__SHIFT 0x2
+#define PCIE_LC_CNTL10__LC_DEFAULT_PRESET_OVERRIDE_RATE__SHIFT 0x3
+#define PCIE_LC_CNTL10__LC_DEFAULT_PRESET_OVERRIDE_VALUE__SHIFT 0x5
+#define PCIE_LC_CNTL10__LC_DEFAULT_PRESET_OVERRIDE_LANE__SHIFT 0x9
+#define PCIE_LC_CNTL10__LC_USE_PENDING_FOM_SKIP_SECOND_RXEQEVAL__SHIFT 0xd
+#define PCIE_LC_CNTL10__LC_DEFER_HOLD_TRAINING_GETLOCALPRESET__SHIFT 0xe
+#define PCIE_LC_CNTL10__LC_TIEOFF_PORTS_IGNORE_PHYSTATUS__SHIFT 0xf
+#define PCIE_LC_CNTL10__LC_CLEAR_CNTL_SKP_SELECT_DATASTREAM_EXIT__SHIFT 0x10
+#define PCIE_LC_CNTL10__LC_DEASSERT_REFCLKREQ_IN_L23__SHIFT 0x11
+#define PCIE_LC_CNTL10__LC_RELEASE_CLKREQ_IN_L23__SHIFT 0x12
+#define PCIE_LC_CNTL10__LC_RELEASE_CLKREQ_IN_NON_SS_L1__SHIFT 0x13
+#define PCIE_LC_CNTL10__LC_NO_SKIP_P0__SHIFT 0x14
+#define PCIE_LC_CNTL10__LC_DSC_L1_RXSTANDBY_WAIT_FOR_EIOS__SHIFT 0x15
+#define PCIE_LC_CNTL10__LC_LINK_DIS_DONT_WAIT_FOR_EIOS__SHIFT 0x16
+#define PCIE_LC_CNTL10__LC_LSLD_EN__SHIFT 0x17
+#define PCIE_LC_CNTL10__LC_LSLD_RATE_REQD__SHIFT 0x18
+#define PCIE_LC_CNTL10__LC_LSLD_MODE__SHIFT 0x1a
+#define PCIE_LC_CNTL10__LC_LSLD_DONE__SHIFT 0x1b
+#define PCIE_LC_CNTL10__LC_LSLD_TLS_ADVERTISED__SHIFT 0x1c
+#define PCIE_LC_CNTL10__LC_LSLD_CURRENT_RATE__SHIFT 0x1e
+#define PCIE_LC_CNTL10__LC_DEFAULT_PRESET_OVERRIDE_EN_MASK 0x00000001L
+#define PCIE_LC_CNTL10__LC_DEFAULT_PRESET_OVERRIDE_MODE_MASK 0x00000002L
+#define PCIE_LC_CNTL10__LC_DEFAULT_PRESET_OVERRIDE_PORT_MASK 0x00000004L
+#define PCIE_LC_CNTL10__LC_DEFAULT_PRESET_OVERRIDE_RATE_MASK 0x00000018L
+#define PCIE_LC_CNTL10__LC_DEFAULT_PRESET_OVERRIDE_VALUE_MASK 0x000001E0L
+#define PCIE_LC_CNTL10__LC_DEFAULT_PRESET_OVERRIDE_LANE_MASK 0x00001E00L
+#define PCIE_LC_CNTL10__LC_USE_PENDING_FOM_SKIP_SECOND_RXEQEVAL_MASK 0x00002000L
+#define PCIE_LC_CNTL10__LC_DEFER_HOLD_TRAINING_GETLOCALPRESET_MASK 0x00004000L
+#define PCIE_LC_CNTL10__LC_TIEOFF_PORTS_IGNORE_PHYSTATUS_MASK 0x00008000L
+#define PCIE_LC_CNTL10__LC_CLEAR_CNTL_SKP_SELECT_DATASTREAM_EXIT_MASK 0x00010000L
+#define PCIE_LC_CNTL10__LC_DEASSERT_REFCLKREQ_IN_L23_MASK 0x00020000L
+#define PCIE_LC_CNTL10__LC_RELEASE_CLKREQ_IN_L23_MASK 0x00040000L
+#define PCIE_LC_CNTL10__LC_RELEASE_CLKREQ_IN_NON_SS_L1_MASK 0x00080000L
+#define PCIE_LC_CNTL10__LC_NO_SKIP_P0_MASK 0x00100000L
+#define PCIE_LC_CNTL10__LC_DSC_L1_RXSTANDBY_WAIT_FOR_EIOS_MASK 0x00200000L
+#define PCIE_LC_CNTL10__LC_LINK_DIS_DONT_WAIT_FOR_EIOS_MASK 0x00400000L
+#define PCIE_LC_CNTL10__LC_LSLD_EN_MASK 0x00800000L
+#define PCIE_LC_CNTL10__LC_LSLD_RATE_REQD_MASK 0x03000000L
+#define PCIE_LC_CNTL10__LC_LSLD_MODE_MASK 0x04000000L
+#define PCIE_LC_CNTL10__LC_LSLD_DONE_MASK 0x08000000L
+#define PCIE_LC_CNTL10__LC_LSLD_TLS_ADVERTISED_MASK 0x30000000L
+#define PCIE_LC_CNTL10__LC_LSLD_CURRENT_RATE_MASK 0xC0000000L
+//PCIE_LC_EQ_CNTL_8GT
+#define PCIE_LC_EQ_CNTL_8GT__LC_BYPASS_EQ_8GT__SHIFT 0x0
+#define PCIE_LC_EQ_CNTL_8GT__LC_REDO_EQ_8GT__SHIFT 0x1
+#define PCIE_LC_EQ_CNTL_8GT__LC_EQ_SEARCH_MODE_8GT__SHIFT 0x2
+#define PCIE_LC_EQ_CNTL_8GT__LC_ENH_PRESET_SEARCH_SEL_8GT__SHIFT 0x4
+#define PCIE_LC_EQ_CNTL_8GT__LC_USC_EQ_NOT_REQD_8GT__SHIFT 0x6
+#define PCIE_LC_EQ_CNTL_8GT__LC_USC_GO_TO_EQ_8GT__SHIFT 0x7
+#define PCIE_LC_EQ_CNTL_8GT__LC_UNEXPECTED_COEFFS_RCVD_8GT__SHIFT 0x8
+#define PCIE_LC_EQ_CNTL_8GT__LC_BYPASS_EQ_REQ_PHASE_8GT__SHIFT 0x9
+#define PCIE_LC_EQ_CNTL_8GT__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_8GT__SHIFT 0xa
+#define PCIE_LC_EQ_CNTL_8GT__LC_FORCE_PRESET_VALUE_8GT__SHIFT 0xb
+#define PCIE_LC_EQ_CNTL_8GT__LC_SAFE_EQ_SEARCH_8GT__SHIFT 0xf
+#define PCIE_LC_EQ_CNTL_8GT__LC_8GT_EQ_REDO_EN__SHIFT 0x10
+#define PCIE_LC_EQ_CNTL_8GT__LC_DSC_ACCEPT_8GT_EQ_REDO__SHIFT 0x11
+#define PCIE_LC_EQ_CNTL_8GT__LC_USC_HW_8GT_EQ_REDO_EN__SHIFT 0x12
+#define PCIE_LC_EQ_CNTL_8GT__LC_ALWAYS_PERFORM_GEN3_PRESET_CONVERSION__SHIFT 0x13
+#define PCIE_LC_EQ_CNTL_8GT__LC_BYPASS_EQ_8GT_MASK 0x00000001L
+#define PCIE_LC_EQ_CNTL_8GT__LC_REDO_EQ_8GT_MASK 0x00000002L
+#define PCIE_LC_EQ_CNTL_8GT__LC_EQ_SEARCH_MODE_8GT_MASK 0x0000000CL
+#define PCIE_LC_EQ_CNTL_8GT__LC_ENH_PRESET_SEARCH_SEL_8GT_MASK 0x00000030L
+#define PCIE_LC_EQ_CNTL_8GT__LC_USC_EQ_NOT_REQD_8GT_MASK 0x00000040L
+#define PCIE_LC_EQ_CNTL_8GT__LC_USC_GO_TO_EQ_8GT_MASK 0x00000080L
+#define PCIE_LC_EQ_CNTL_8GT__LC_UNEXPECTED_COEFFS_RCVD_8GT_MASK 0x00000100L
+#define PCIE_LC_EQ_CNTL_8GT__LC_BYPASS_EQ_REQ_PHASE_8GT_MASK 0x00000200L
+#define PCIE_LC_EQ_CNTL_8GT__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_8GT_MASK 0x00000400L
+#define PCIE_LC_EQ_CNTL_8GT__LC_FORCE_PRESET_VALUE_8GT_MASK 0x00007800L
+#define PCIE_LC_EQ_CNTL_8GT__LC_SAFE_EQ_SEARCH_8GT_MASK 0x00008000L
+#define PCIE_LC_EQ_CNTL_8GT__LC_8GT_EQ_REDO_EN_MASK 0x00010000L
+#define PCIE_LC_EQ_CNTL_8GT__LC_DSC_ACCEPT_8GT_EQ_REDO_MASK 0x00020000L
+#define PCIE_LC_EQ_CNTL_8GT__LC_USC_HW_8GT_EQ_REDO_EN_MASK 0x00040000L
+#define PCIE_LC_EQ_CNTL_8GT__LC_ALWAYS_PERFORM_GEN3_PRESET_CONVERSION_MASK 0x00080000L
+//PCIE_LC_EQ_CNTL_16GT
+#define PCIE_LC_EQ_CNTL_16GT__LC_BYPASS_EQ_16GT__SHIFT 0x0
+#define PCIE_LC_EQ_CNTL_16GT__LC_REDO_EQ_16GT__SHIFT 0x1
+#define PCIE_LC_EQ_CNTL_16GT__LC_EQ_SEARCH_MODE_16GT__SHIFT 0x2
+#define PCIE_LC_EQ_CNTL_16GT__LC_ENH_PRESET_SEARCH_SEL_16GT__SHIFT 0x4
+#define PCIE_LC_EQ_CNTL_16GT__LC_USC_EQ_NOT_REQD_16GT__SHIFT 0x6
+#define PCIE_LC_EQ_CNTL_16GT__LC_USC_GO_TO_EQ_16GT__SHIFT 0x7
+#define PCIE_LC_EQ_CNTL_16GT__LC_UNEXPECTED_COEFFS_RCVD_16GT__SHIFT 0x8
+#define PCIE_LC_EQ_CNTL_16GT__LC_BYPASS_EQ_REQ_PHASE_16GT__SHIFT 0x9
+#define PCIE_LC_EQ_CNTL_16GT__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_16GT__SHIFT 0xa
+#define PCIE_LC_EQ_CNTL_16GT__LC_FORCE_PRESET_VALUE_16GT__SHIFT 0xb
+#define PCIE_LC_EQ_CNTL_16GT__LC_SAFE_EQ_SEARCH_16GT__SHIFT 0xf
+#define PCIE_LC_EQ_CNTL_16GT__LC_16GT_EQ_REDO_EN__SHIFT 0x10
+#define PCIE_LC_EQ_CNTL_16GT__LC_BYPASS_EQ_PRESET_16GT__SHIFT 0x11
+#define PCIE_LC_EQ_CNTL_16GT__LC_DSC_ACCEPT_16GT_EQ_REDO__SHIFT 0x15
+#define PCIE_LC_EQ_CNTL_16GT__LC_USC_HW_16GT_EQ_REDO_EN__SHIFT 0x16
+#define PCIE_LC_EQ_CNTL_16GT__LC_EQTS2_PRESET_EN_16GT__SHIFT 0x17
+#define PCIE_LC_EQ_CNTL_16GT__LC_EQTS2_PRESET_16GT__SHIFT 0x18
+#define PCIE_LC_EQ_CNTL_16GT__LC_USE_EQTS2_PRESET_16GT__SHIFT 0x1c
+#define PCIE_LC_EQ_CNTL_16GT__LC_ALWAYS_PERFORM_GEN4_PRESET_CONVERSION__SHIFT 0x1d
+#define PCIE_LC_EQ_CNTL_16GT__LC_EQTS2_PRESET_REDO_EN_16GT__SHIFT 0x1e
+#define PCIE_LC_EQ_CNTL_16GT__LC_EQTS2_PRESET_REDO_MODE_16GT__SHIFT 0x1f
+#define PCIE_LC_EQ_CNTL_16GT__LC_BYPASS_EQ_16GT_MASK 0x00000001L
+#define PCIE_LC_EQ_CNTL_16GT__LC_REDO_EQ_16GT_MASK 0x00000002L
+#define PCIE_LC_EQ_CNTL_16GT__LC_EQ_SEARCH_MODE_16GT_MASK 0x0000000CL
+#define PCIE_LC_EQ_CNTL_16GT__LC_ENH_PRESET_SEARCH_SEL_16GT_MASK 0x00000030L
+#define PCIE_LC_EQ_CNTL_16GT__LC_USC_EQ_NOT_REQD_16GT_MASK 0x00000040L
+#define PCIE_LC_EQ_CNTL_16GT__LC_USC_GO_TO_EQ_16GT_MASK 0x00000080L
+#define PCIE_LC_EQ_CNTL_16GT__LC_UNEXPECTED_COEFFS_RCVD_16GT_MASK 0x00000100L
+#define PCIE_LC_EQ_CNTL_16GT__LC_BYPASS_EQ_REQ_PHASE_16GT_MASK 0x00000200L
+#define PCIE_LC_EQ_CNTL_16GT__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_16GT_MASK 0x00000400L
+#define PCIE_LC_EQ_CNTL_16GT__LC_FORCE_PRESET_VALUE_16GT_MASK 0x00007800L
+#define PCIE_LC_EQ_CNTL_16GT__LC_SAFE_EQ_SEARCH_16GT_MASK 0x00008000L
+#define PCIE_LC_EQ_CNTL_16GT__LC_16GT_EQ_REDO_EN_MASK 0x00010000L
+#define PCIE_LC_EQ_CNTL_16GT__LC_BYPASS_EQ_PRESET_16GT_MASK 0x001E0000L
+#define PCIE_LC_EQ_CNTL_16GT__LC_DSC_ACCEPT_16GT_EQ_REDO_MASK 0x00200000L
+#define PCIE_LC_EQ_CNTL_16GT__LC_USC_HW_16GT_EQ_REDO_EN_MASK 0x00400000L
+#define PCIE_LC_EQ_CNTL_16GT__LC_EQTS2_PRESET_EN_16GT_MASK 0x00800000L
+#define PCIE_LC_EQ_CNTL_16GT__LC_EQTS2_PRESET_16GT_MASK 0x0F000000L
+#define PCIE_LC_EQ_CNTL_16GT__LC_USE_EQTS2_PRESET_16GT_MASK 0x10000000L
+#define PCIE_LC_EQ_CNTL_16GT__LC_ALWAYS_PERFORM_GEN4_PRESET_CONVERSION_MASK 0x20000000L
+#define PCIE_LC_EQ_CNTL_16GT__LC_EQTS2_PRESET_REDO_EN_16GT_MASK 0x40000000L
+#define PCIE_LC_EQ_CNTL_16GT__LC_EQTS2_PRESET_REDO_MODE_16GT_MASK 0x80000000L
+//PCIE_LC_SAVE_RESTORE_1
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_EN__SHIFT 0x0
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_DIRECTION__SHIFT 0x1
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_INDEX__SHIFT 0x2
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_ACKNOWLEDGE__SHIFT 0xa
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_DONE__SHIFT 0xb
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_FAST_RESTORE_EN__SHIFT 0xc
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_EQ_SETTINGS_RESTORED__SHIFT 0xd
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_SPEEDS__SHIFT 0xe
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_DATA_LO__SHIFT 0x10
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_EN_MASK 0x00000001L
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_DIRECTION_MASK 0x00000002L
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_INDEX_MASK 0x000003FCL
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_ACKNOWLEDGE_MASK 0x00000400L
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_DONE_MASK 0x00000800L
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_FAST_RESTORE_EN_MASK 0x00001000L
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_EQ_SETTINGS_RESTORED_MASK 0x00002000L
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_SPEEDS_MASK 0x0000C000L
+#define PCIE_LC_SAVE_RESTORE_1__LC_SAVE_RESTORE_DATA_LO_MASK 0xFFFF0000L
+//PCIE_LC_SAVE_RESTORE_2
+#define PCIE_LC_SAVE_RESTORE_2__LC_SAVE_RESTORE_DATA_HI__SHIFT 0x0
+#define PCIE_LC_SAVE_RESTORE_2__LC_SAVE_RESTORE_DATA_HI_MASK 0xFFFFFFFFL
+//PCIE_LC_SAVE_RESTORE_3
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_FORCE_NEAR_END_EN__SHIFT 0x0
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_FAST_RESTORE_NEGOTIATION_MODE__SHIFT 0x1
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_FAST_RESTORE_ABORT_MODE__SHIFT 0x2
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_OVERRIDE_EN__SHIFT 0x3
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_OVERRIDE_ACTIVE__SHIFT 0x4
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_ENABLE_L0_ABORT_EN__SHIFT 0x5
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_WAIT_MODE__SHIFT 0x6
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_FORCE_NEAR_END_EN_MASK 0x00000001L
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_FAST_RESTORE_NEGOTIATION_MODE_MASK 0x00000002L
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_FAST_RESTORE_ABORT_MODE_MASK 0x00000004L
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_OVERRIDE_EN_MASK 0x00000008L
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_OVERRIDE_ACTIVE_MASK 0x00000010L
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_ENABLE_L0_ABORT_EN_MASK 0x00000020L
+#define PCIE_LC_SAVE_RESTORE_3__LC_SAVE_RESTORE_WAIT_MODE_MASK 0x00000040L
+//PCIE_LC_EQ_CNTL_32GT
+#define PCIE_LC_EQ_CNTL_32GT__LC_BYPASS_EQ_32GT__SHIFT 0x0
+#define PCIE_LC_EQ_CNTL_32GT__LC_REDO_EQ_32GT__SHIFT 0x1
+#define PCIE_LC_EQ_CNTL_32GT__LC_EQ_SEARCH_MODE_32GT__SHIFT 0x2
+#define PCIE_LC_EQ_CNTL_32GT__LC_ENH_PRESET_SEARCH_SEL_32GT__SHIFT 0x4
+#define PCIE_LC_EQ_CNTL_32GT__LC_USC_EQ_NOT_REQD_32GT__SHIFT 0x6
+#define PCIE_LC_EQ_CNTL_32GT__LC_USC_GO_TO_EQ_32GT__SHIFT 0x7
+#define PCIE_LC_EQ_CNTL_32GT__LC_UNEXPECTED_COEFFS_RCVD_32GT__SHIFT 0x8
+#define PCIE_LC_EQ_CNTL_32GT__LC_BYPASS_EQ_REQ_PHASE_32GT__SHIFT 0x9
+#define PCIE_LC_EQ_CNTL_32GT__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_32GT__SHIFT 0xa
+#define PCIE_LC_EQ_CNTL_32GT__LC_FORCE_PRESET_VALUE_32GT__SHIFT 0xb
+#define PCIE_LC_EQ_CNTL_32GT__LC_SAFE_EQ_SEARCH_32GT__SHIFT 0xf
+#define PCIE_LC_EQ_CNTL_32GT__LC_32GT_EQ_REDO_EN__SHIFT 0x10
+#define PCIE_LC_EQ_CNTL_32GT__LC_BYPASS_EQ_PRESET_32GT__SHIFT 0x11
+#define PCIE_LC_EQ_CNTL_32GT__LC_DSC_ACCEPT_32GT_EQ_REDO__SHIFT 0x15
+#define PCIE_LC_EQ_CNTL_32GT__LC_USC_HW_32GT_EQ_REDO_EN__SHIFT 0x16
+#define PCIE_LC_EQ_CNTL_32GT__LC_EQTS2_PRESET_EN_32GT__SHIFT 0x17
+#define PCIE_LC_EQ_CNTL_32GT__LC_EQTS2_PRESET_32GT__SHIFT 0x18
+#define PCIE_LC_EQ_CNTL_32GT__LC_USE_EQTS2_PRESET_32GT__SHIFT 0x1c
+#define PCIE_LC_EQ_CNTL_32GT__LC_ALWAYS_PERFORM_GEN5_PRESET_CONVERSION__SHIFT 0x1d
+#define PCIE_LC_EQ_CNTL_32GT__LC_EQTS2_PRESET_REDO_EN_32GT__SHIFT 0x1e
+#define PCIE_LC_EQ_CNTL_32GT__LC_EQTS2_PRESET_REDO_MODE_32GT__SHIFT 0x1f
+#define PCIE_LC_EQ_CNTL_32GT__LC_BYPASS_EQ_32GT_MASK 0x00000001L
+#define PCIE_LC_EQ_CNTL_32GT__LC_REDO_EQ_32GT_MASK 0x00000002L
+#define PCIE_LC_EQ_CNTL_32GT__LC_EQ_SEARCH_MODE_32GT_MASK 0x0000000CL
+#define PCIE_LC_EQ_CNTL_32GT__LC_ENH_PRESET_SEARCH_SEL_32GT_MASK 0x00000030L
+#define PCIE_LC_EQ_CNTL_32GT__LC_USC_EQ_NOT_REQD_32GT_MASK 0x00000040L
+#define PCIE_LC_EQ_CNTL_32GT__LC_USC_GO_TO_EQ_32GT_MASK 0x00000080L
+#define PCIE_LC_EQ_CNTL_32GT__LC_UNEXPECTED_COEFFS_RCVD_32GT_MASK 0x00000100L
+#define PCIE_LC_EQ_CNTL_32GT__LC_BYPASS_EQ_REQ_PHASE_32GT_MASK 0x00000200L
+#define PCIE_LC_EQ_CNTL_32GT__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_32GT_MASK 0x00000400L
+#define PCIE_LC_EQ_CNTL_32GT__LC_FORCE_PRESET_VALUE_32GT_MASK 0x00007800L
+#define PCIE_LC_EQ_CNTL_32GT__LC_SAFE_EQ_SEARCH_32GT_MASK 0x00008000L
+#define PCIE_LC_EQ_CNTL_32GT__LC_32GT_EQ_REDO_EN_MASK 0x00010000L
+#define PCIE_LC_EQ_CNTL_32GT__LC_BYPASS_EQ_PRESET_32GT_MASK 0x001E0000L
+#define PCIE_LC_EQ_CNTL_32GT__LC_DSC_ACCEPT_32GT_EQ_REDO_MASK 0x00200000L
+#define PCIE_LC_EQ_CNTL_32GT__LC_USC_HW_32GT_EQ_REDO_EN_MASK 0x00400000L
+#define PCIE_LC_EQ_CNTL_32GT__LC_EQTS2_PRESET_EN_32GT_MASK 0x00800000L
+#define PCIE_LC_EQ_CNTL_32GT__LC_EQTS2_PRESET_32GT_MASK 0x0F000000L
+#define PCIE_LC_EQ_CNTL_32GT__LC_USE_EQTS2_PRESET_32GT_MASK 0x10000000L
+#define PCIE_LC_EQ_CNTL_32GT__LC_ALWAYS_PERFORM_GEN5_PRESET_CONVERSION_MASK 0x20000000L
+#define PCIE_LC_EQ_CNTL_32GT__LC_EQTS2_PRESET_REDO_EN_32GT_MASK 0x40000000L
+#define PCIE_LC_EQ_CNTL_32GT__LC_EQTS2_PRESET_REDO_MODE_32GT_MASK 0x80000000L
+//PCIE_LC_PRESET_MASK_CNTL
+#define PCIE_LC_PRESET_MASK_CNTL__LC_PRESET_MASK_8GT__SHIFT 0x0
+#define PCIE_LC_PRESET_MASK_CNTL__LC_PRESET_MASK_16GT__SHIFT 0xa
+#define PCIE_LC_PRESET_MASK_CNTL__LC_PRESET_MASK_32GT__SHIFT 0x14
+#define PCIE_LC_PRESET_MASK_CNTL__LC_PRESET_MASK_8GT_MASK 0x000003FFL
+#define PCIE_LC_PRESET_MASK_CNTL__LC_PRESET_MASK_16GT_MASK 0x000FFC00L
+#define PCIE_LC_PRESET_MASK_CNTL__LC_PRESET_MASK_32GT_MASK 0x3FF00000L
+//PCIE_LC_RXRECOVER_RXSTANDBY_CNTL
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXRECOVER_EN__SHIFT 0x0
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXRECOVER_TIMEOUT__SHIFT 0x1
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_USE_SEPARATE_RXRECOVER_TIMER__SHIFT 0x8
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXRECOVER_IN_POLL_ACTIVE_EN__SHIFT 0x9
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXRECOVER_IN_CONFIG_EN__SHIFT 0xa
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_LOOPBACK_RXEQEVAL_EN__SHIFT 0xb
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXEQEVAL_AFTER_BYPASSED_EQ_EN__SHIFT 0xc
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_FIRST_EQ_PHASE_RXEQEVAL_EN__SHIFT 0xd
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_FINAL_COEFF_TRACK_RX_MODE__SHIFT 0xe
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RX_L0S_STANDBY_EN__SHIFT 0x10
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_CLEAR_RXSTANDBY_ON_RATE_UPDATE_ONLY__SHIFT 0x11
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXSTANDBY_ON_SPEED_CHANGE_ONLY_EN__SHIFT 0x12
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_ASSERT_RXSTANDBY_FOR_RXRECOVER_IN_RECOVERY_LOCK__SHIFT 0x13
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_ASSERT_RXSTANDBY_FOR_RXRECOVER_IN_POLL_ACTIVE__SHIFT 0x14
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_ASSERT_RXSTANDBY_FOR_RXRECOVER_IN_CONFIG__SHIFT 0x15
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_HOLD_RXSTANDBY_UNTIL_EI_EXIT_IN_POLL_ACTIVE_EN__SHIFT 0x16
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXEQEVAL_WAIT_FOR_RXSTANDBY__SHIFT 0x17
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXSTANDBY_INACTIVE_LINK_CHECK_EN__SHIFT 0x18
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_ALWAYS_ASSERT_RXSTANDBY_DETECT__SHIFT 0x19
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_ALWAYS_ASSERT_RXSTANDBY_POLL_COMP_SPD__SHIFT 0x1a
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_ASSERT_RXSTANDBY_POLL_COMP_ENTRY__SHIFT 0x1b
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_DEFER_RXSTANDBY_POLL_ACTIVE__SHIFT 0x1c
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_DEASSERT_RX_EQ_IN_PROGRESS_MODE__SHIFT 0x1f
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXRECOVER_EN_MASK 0x00000001L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXRECOVER_TIMEOUT_MASK 0x000000FEL
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_USE_SEPARATE_RXRECOVER_TIMER_MASK 0x00000100L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXRECOVER_IN_POLL_ACTIVE_EN_MASK 0x00000200L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXRECOVER_IN_CONFIG_EN_MASK 0x00000400L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_LOOPBACK_RXEQEVAL_EN_MASK 0x00000800L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXEQEVAL_AFTER_BYPASSED_EQ_EN_MASK 0x00001000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_FIRST_EQ_PHASE_RXEQEVAL_EN_MASK 0x00002000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_FINAL_COEFF_TRACK_RX_MODE_MASK 0x00004000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RX_L0S_STANDBY_EN_MASK 0x00010000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_CLEAR_RXSTANDBY_ON_RATE_UPDATE_ONLY_MASK 0x00020000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXSTANDBY_ON_SPEED_CHANGE_ONLY_EN_MASK 0x00040000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_ASSERT_RXSTANDBY_FOR_RXRECOVER_IN_RECOVERY_LOCK_MASK 0x00080000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_ASSERT_RXSTANDBY_FOR_RXRECOVER_IN_POLL_ACTIVE_MASK 0x00100000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_ASSERT_RXSTANDBY_FOR_RXRECOVER_IN_CONFIG_MASK 0x00200000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_HOLD_RXSTANDBY_UNTIL_EI_EXIT_IN_POLL_ACTIVE_EN_MASK 0x00400000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXEQEVAL_WAIT_FOR_RXSTANDBY_MASK 0x00800000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RXSTANDBY_INACTIVE_LINK_CHECK_EN_MASK 0x01000000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_ALWAYS_ASSERT_RXSTANDBY_DETECT_MASK 0x02000000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_ALWAYS_ASSERT_RXSTANDBY_POLL_COMP_SPD_MASK 0x04000000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_ASSERT_RXSTANDBY_POLL_COMP_ENTRY_MASK 0x08000000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_DEFER_RXSTANDBY_POLL_ACTIVE_MASK 0x70000000L
+#define PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_DEASSERT_RX_EQ_IN_PROGRESS_MODE_MASK 0x80000000L
+//PCIE_LC_CNTL11
+#define PCIE_LC_CNTL11__LC_BYPASS_EQ_TO_HIGH_RATE_SUPPORT__SHIFT 0x0
+#define PCIE_LC_CNTL11__LC_ADVERTISE_EQ_TO_HIGH_RATE_SUPPORT__SHIFT 0x1
+#define PCIE_LC_CNTL11__LC_BYPASS_EQ_TO_HIGH_RATE_RESERVED__SHIFT 0x2
+#define PCIE_LC_CNTL11__LC_BYPASS_EQ_TO_HIGH_RATE_RCVD__SHIFT 0x3
+#define PCIE_LC_CNTL11__LC_BYPASS_EQ_TO_HIGH_RATE_NEGOTIATED__SHIFT 0x4
+#define PCIE_LC_CNTL11__LC_BYPASS_EQ_TO_HIGH_RATE_FAILURE__SHIFT 0x5
+#define PCIE_LC_CNTL11__LC_NO_EQ_NEEDED_SUPPORT__SHIFT 0x8
+#define PCIE_LC_CNTL11__LC_ADVERTISE_NO_EQ_NEEDED_SUPPORT__SHIFT 0x9
+#define PCIE_LC_CNTL11__LC_NO_EQ_NEEDED_RESERVED__SHIFT 0xa
+#define PCIE_LC_CNTL11__LC_NO_EQ_NEEDED_RCVD__SHIFT 0xb
+#define PCIE_LC_CNTL11__LC_NO_EQ_NEEDED_NEGOTIATED__SHIFT 0xc
+#define PCIE_LC_CNTL11__LC_NO_EQ_NEEDED_FAILURE__SHIFT 0xd
+#define PCIE_LC_CNTL11__LC_NO_EQ_NEEDED_PRESET_SEL__SHIFT 0xe
+#define PCIE_LC_CNTL11__LC_ENHANCED_LINK_BEHAVIOR_CNTL_SENT__SHIFT 0xf
+#define PCIE_LC_CNTL11__LC_ENHANCED_LINK_BEHAVIOR_CNTL_RCVD__SHIFT 0x11
+#define PCIE_LC_CNTL11__LC_DISABLE_TRAINING_BIT_ARCH_IND__SHIFT 0x13
+#define PCIE_LC_CNTL11__LC_SET_TRANSMITTER_PRECODE_REQUEST__SHIFT 0x18
+#define PCIE_LC_CNTL11__LC_TRANSMITTER_PRECODE_REQUEST_RCVD__SHIFT 0x19
+#define PCIE_LC_CNTL11__LC_TRANSMITTER_PRECODE_ON__SHIFT 0x1a
+#define PCIE_LC_CNTL11__LC_TRANSMITTER_PRECODE_ON_RCVD__SHIFT 0x1b
+#define PCIE_LC_CNTL11__LC_LAST_TRANSMITTER_PRECODE_REQUEST__SHIFT 0x1c
+#define PCIE_LC_CNTL11__LC_CHECK_TS1_EC_ON_EQ_EXIT__SHIFT 0x1d
+#define PCIE_LC_CNTL11__LC_DELAY_ALL_RCVD_TS1_VALID_LINK_LANE_FLP__SHIFT 0x1f
+#define PCIE_LC_CNTL11__LC_BYPASS_EQ_TO_HIGH_RATE_SUPPORT_MASK 0x00000001L
+#define PCIE_LC_CNTL11__LC_ADVERTISE_EQ_TO_HIGH_RATE_SUPPORT_MASK 0x00000002L
+#define PCIE_LC_CNTL11__LC_BYPASS_EQ_TO_HIGH_RATE_RESERVED_MASK 0x00000004L
+#define PCIE_LC_CNTL11__LC_BYPASS_EQ_TO_HIGH_RATE_RCVD_MASK 0x00000008L
+#define PCIE_LC_CNTL11__LC_BYPASS_EQ_TO_HIGH_RATE_NEGOTIATED_MASK 0x00000010L
+#define PCIE_LC_CNTL11__LC_BYPASS_EQ_TO_HIGH_RATE_FAILURE_MASK 0x00000020L
+#define PCIE_LC_CNTL11__LC_NO_EQ_NEEDED_SUPPORT_MASK 0x00000100L
+#define PCIE_LC_CNTL11__LC_ADVERTISE_NO_EQ_NEEDED_SUPPORT_MASK 0x00000200L
+#define PCIE_LC_CNTL11__LC_NO_EQ_NEEDED_RESERVED_MASK 0x00000400L
+#define PCIE_LC_CNTL11__LC_NO_EQ_NEEDED_RCVD_MASK 0x00000800L
+#define PCIE_LC_CNTL11__LC_NO_EQ_NEEDED_NEGOTIATED_MASK 0x00001000L
+#define PCIE_LC_CNTL11__LC_NO_EQ_NEEDED_FAILURE_MASK 0x00002000L
+#define PCIE_LC_CNTL11__LC_NO_EQ_NEEDED_PRESET_SEL_MASK 0x00004000L
+#define PCIE_LC_CNTL11__LC_ENHANCED_LINK_BEHAVIOR_CNTL_SENT_MASK 0x00018000L
+#define PCIE_LC_CNTL11__LC_ENHANCED_LINK_BEHAVIOR_CNTL_RCVD_MASK 0x00060000L
+#define PCIE_LC_CNTL11__LC_DISABLE_TRAINING_BIT_ARCH_IND_MASK 0x00F80000L
+#define PCIE_LC_CNTL11__LC_SET_TRANSMITTER_PRECODE_REQUEST_MASK 0x01000000L
+#define PCIE_LC_CNTL11__LC_TRANSMITTER_PRECODE_REQUEST_RCVD_MASK 0x02000000L
+#define PCIE_LC_CNTL11__LC_TRANSMITTER_PRECODE_ON_MASK 0x04000000L
+#define PCIE_LC_CNTL11__LC_TRANSMITTER_PRECODE_ON_RCVD_MASK 0x08000000L
+#define PCIE_LC_CNTL11__LC_LAST_TRANSMITTER_PRECODE_REQUEST_MASK 0x10000000L
+#define PCIE_LC_CNTL11__LC_CHECK_TS1_EC_ON_EQ_EXIT_MASK 0x20000000L
+#define PCIE_LC_CNTL11__LC_DELAY_ALL_RCVD_TS1_VALID_LINK_LANE_FLP_MASK 0x80000000L
+//PCIE_LC_CNTL12
+#define PCIE_LC_CNTL12__LC_DELAY_CLEAR_LANE_OFF_AFTER_LOOPBACK_SPD_CHG__SHIFT 0x0
+#define PCIE_LC_CNTL12__LC_DELAY_CLEAR_LANE_OFF_AFTER_LINKDIS_SPD_CHG__SHIFT 0x1
+#define PCIE_LC_CNTL12__LC_DETECT_PD_WAIT_FOR_REFCLKACK_OFF_LANES__SHIFT 0x2
+#define PCIE_LC_CNTL12__LC_DETECT_PD_HOLDTRAINING_WAIT_FOR_LANES_ON__SHIFT 0x3
+#define PCIE_LC_CNTL12__LC_ENSURE_TURN_OFF_DONE_LINKDIS__SHIFT 0x4
+#define PCIE_LC_CNTL12__LC_DELAY_PHASE1__SHIFT 0x5
+#define PCIE_LC_CNTL12__LC_BLOCKALIGN_IN_L1_ENTRY__SHIFT 0x8
+#define PCIE_LC_CNTL12__LC_USE_LEGACY_RXSB1_SPDCHG_ELECIDLE__SHIFT 0x9
+#define PCIE_LC_CNTL12__LC_LOCK_REVERSAL_EARLY_CONFIG_COMPLETE__SHIFT 0xa
+#define PCIE_LC_CNTL12__LC_LOCK_REVERSAL_IMMEDIATE_CONFIG_COMPLETE__SHIFT 0xb
+#define PCIE_LC_CNTL12__LC_USE_LOOPBACK_INACTIVE_LANES__SHIFT 0xc
+#define PCIE_LC_CNTL12__LC_LOOPBACK_TEST_MODE_RCVRDET__SHIFT 0xd
+#define PCIE_LC_CNTL12__LC_LOOPBACK_EQ_LOCK_REVERSAL__SHIFT 0xe
+#define PCIE_LC_CNTL12__LC_SKIP_LOCALPRESET_OFF_LANES__SHIFT 0xf
+#define PCIE_LC_CNTL12__LC_LIVE_DESKEW_MASK_EN__SHIFT 0x10
+#define PCIE_LC_CNTL12__LC_LIVE_DESKEW_8B10B_EN__SHIFT 0x11
+#define PCIE_LC_CNTL12__LC_SAFE_RECOVER_DATA_UNLOCK__SHIFT 0x12
+#define PCIE_LC_CNTL12__LC_SAFE_RECOVER_RX_RECOVER__SHIFT 0x13
+#define PCIE_LC_CNTL12__LC_SAFE_RECOVER_RX_ADAPT__SHIFT 0x14
+#define PCIE_LC_CNTL12__LC_SAFE_RECOVER_SW_INIT__SHIFT 0x15
+#define PCIE_LC_CNTL12__LC_SAFE_RECOVER_SW_EVENT_SEL__SHIFT 0x16
+#define PCIE_LC_CNTL12__LC_DEFER_SKIP_INTERVAL_MODE__SHIFT 0x18
+#define PCIE_LC_CNTL12__LC_RECOVERY_EQ_WAIT_FOR_PIPE_STOPPED__SHIFT 0x19
+#define PCIE_LC_CNTL12__LC_HOLD_TX_STOP_SENDING_PKTS_REPLAY_RETRAIN__SHIFT 0x1a
+#define PCIE_LC_CNTL12__LC_RESET_TSX_CNT_ON_SAFERECOVER__SHIFT 0x1b
+#define PCIE_LC_CNTL12__LC_DSC_INITIATE_EQUALIZATION_OS_BOUNDARY__SHIFT 0x1c
+#define PCIE_LC_CNTL12__LC_EQ_REQ_PHASE_WAIT_FOR_FINAL_TS1__SHIFT 0x1d
+#define PCIE_LC_CNTL12__LC_RESET_TSX_CNT_ON_RXEQEVAL__SHIFT 0x1e
+#define PCIE_LC_CNTL12__LC_TRACK_RX_WAIT_FOR_TS1__SHIFT 0x1f
+#define PCIE_LC_CNTL12__LC_DELAY_CLEAR_LANE_OFF_AFTER_LOOPBACK_SPD_CHG_MASK 0x00000001L
+#define PCIE_LC_CNTL12__LC_DELAY_CLEAR_LANE_OFF_AFTER_LINKDIS_SPD_CHG_MASK 0x00000002L
+#define PCIE_LC_CNTL12__LC_DETECT_PD_WAIT_FOR_REFCLKACK_OFF_LANES_MASK 0x00000004L
+#define PCIE_LC_CNTL12__LC_DETECT_PD_HOLDTRAINING_WAIT_FOR_LANES_ON_MASK 0x00000008L
+#define PCIE_LC_CNTL12__LC_ENSURE_TURN_OFF_DONE_LINKDIS_MASK 0x00000010L
+#define PCIE_LC_CNTL12__LC_DELAY_PHASE1_MASK 0x000000E0L
+#define PCIE_LC_CNTL12__LC_BLOCKALIGN_IN_L1_ENTRY_MASK 0x00000100L
+#define PCIE_LC_CNTL12__LC_USE_LEGACY_RXSB1_SPDCHG_ELECIDLE_MASK 0x00000200L
+#define PCIE_LC_CNTL12__LC_LOCK_REVERSAL_EARLY_CONFIG_COMPLETE_MASK 0x00000400L
+#define PCIE_LC_CNTL12__LC_LOCK_REVERSAL_IMMEDIATE_CONFIG_COMPLETE_MASK 0x00000800L
+#define PCIE_LC_CNTL12__LC_USE_LOOPBACK_INACTIVE_LANES_MASK 0x00001000L
+#define PCIE_LC_CNTL12__LC_LOOPBACK_TEST_MODE_RCVRDET_MASK 0x00002000L
+#define PCIE_LC_CNTL12__LC_LOOPBACK_EQ_LOCK_REVERSAL_MASK 0x00004000L
+#define PCIE_LC_CNTL12__LC_SKIP_LOCALPRESET_OFF_LANES_MASK 0x00008000L
+#define PCIE_LC_CNTL12__LC_LIVE_DESKEW_MASK_EN_MASK 0x00010000L
+#define PCIE_LC_CNTL12__LC_LIVE_DESKEW_8B10B_EN_MASK 0x00020000L
+#define PCIE_LC_CNTL12__LC_SAFE_RECOVER_DATA_UNLOCK_MASK 0x00040000L
+#define PCIE_LC_CNTL12__LC_SAFE_RECOVER_RX_RECOVER_MASK 0x00080000L
+#define PCIE_LC_CNTL12__LC_SAFE_RECOVER_RX_ADAPT_MASK 0x00100000L
+#define PCIE_LC_CNTL12__LC_SAFE_RECOVER_SW_INIT_MASK 0x00200000L
+#define PCIE_LC_CNTL12__LC_SAFE_RECOVER_SW_EVENT_SEL_MASK 0x00C00000L
+#define PCIE_LC_CNTL12__LC_DEFER_SKIP_INTERVAL_MODE_MASK 0x01000000L
+#define PCIE_LC_CNTL12__LC_RECOVERY_EQ_WAIT_FOR_PIPE_STOPPED_MASK 0x02000000L
+#define PCIE_LC_CNTL12__LC_HOLD_TX_STOP_SENDING_PKTS_REPLAY_RETRAIN_MASK 0x04000000L
+#define PCIE_LC_CNTL12__LC_RESET_TSX_CNT_ON_SAFERECOVER_MASK 0x08000000L
+#define PCIE_LC_CNTL12__LC_DSC_INITIATE_EQUALIZATION_OS_BOUNDARY_MASK 0x10000000L
+#define PCIE_LC_CNTL12__LC_EQ_REQ_PHASE_WAIT_FOR_FINAL_TS1_MASK 0x20000000L
+#define PCIE_LC_CNTL12__LC_RESET_TSX_CNT_ON_RXEQEVAL_MASK 0x40000000L
+#define PCIE_LC_CNTL12__LC_TRACK_RX_WAIT_FOR_TS1_MASK 0x80000000L
+//PCIE_LC_SPEED_CNTL2
+#define PCIE_LC_SPEED_CNTL2__LC_FORCE_EN_SW_SPEED_CHANGE__SHIFT 0x0
+#define PCIE_LC_SPEED_CNTL2__LC_FORCE_DIS_SW_SPEED_CHANGE__SHIFT 0x1
+#define PCIE_LC_SPEED_CNTL2__LC_FORCE_EN_HW_SPEED_CHANGE__SHIFT 0x2
+#define PCIE_LC_SPEED_CNTL2__LC_FORCE_DIS_HW_SPEED_CHANGE__SHIFT 0x3
+#define PCIE_LC_SPEED_CNTL2__LC_INIT_SPEED_NEG_IN_L0s_EN__SHIFT 0x4
+#define PCIE_LC_SPEED_CNTL2__LC_INIT_SPEED_NEG_IN_L1_EN__SHIFT 0x5
+#define PCIE_LC_SPEED_CNTL2__LC_INITIATE_LINK_SPEED_CHANGE__SHIFT 0x6
+#define PCIE_LC_SPEED_CNTL2__LC_SPEED_CHANGE_STATUS__SHIFT 0x7
+#define PCIE_LC_SPEED_CNTL2__LC_SPEED_CHANGE_ATTEMPTS_ALLOWED__SHIFT 0x8
+#define PCIE_LC_SPEED_CNTL2__LC_SPEED_CHANGE_ATTEMPT_FAILED__SHIFT 0xa
+#define PCIE_LC_SPEED_CNTL2__LC_CLR_FAILED_SPD_CHANGE_CNT__SHIFT 0xb
+#define PCIE_LC_SPEED_CNTL2__LC_MULT_UPSTREAM_AUTO_SPD_CHNG_EN__SHIFT 0xc
+#define PCIE_LC_SPEED_CNTL2__LC_DONT_CLR_TARGET_SPD_CHANGE_STATUS__SHIFT 0xd
+#define PCIE_LC_SPEED_CNTL2__LC_1_OR_MORE_TS2_SPEED_ARC_EN__SHIFT 0xe
+#define PCIE_LC_SPEED_CNTL2__LC_ABORT_AUTO_EQ_AFTER_FAILED_EQ__SHIFT 0xf
+#define PCIE_LC_SPEED_CNTL2__LC_ENFORCE_CORRECT_SPEED_FOR_EQ__SHIFT 0x10
+#define PCIE_LC_SPEED_CNTL2__LC_ENFORCE_SOFTWARE_PERFORM_EQ__SHIFT 0x11
+#define PCIE_LC_SPEED_CNTL2__LC_SEND_EQ_TS2_IF_OTHER_SIDE_EVER_ADVERTISED_SPEED__SHIFT 0x12
+#define PCIE_LC_SPEED_CNTL2__LC_ENFORCE_SINGLE_EQ_PER_RECOVERY__SHIFT 0x13
+#define PCIE_LC_SPEED_CNTL2__LC_USE_LEGACY_CLEAR_DELAY_DLLPs__SHIFT 0x14
+#define PCIE_LC_SPEED_CNTL2__LC_DEFER_RETRAIN_LINK_UNTIL_EXIT_RECOVERY__SHIFT 0x15
+#define PCIE_LC_SPEED_CNTL2__LC_ABORT_AUTO_EQ_ON_FAIL_SPEED_CHANGE_LIMIT__SHIFT 0x16
+#define PCIE_LC_SPEED_CNTL2__LC_DEFER_PRIVATE_SPEED_CHANGE_UNTIL_EXIT_RECOVERY__SHIFT 0x17
+#define PCIE_LC_SPEED_CNTL2__LC_DONT_UPDATE_GEN_SUPPORT_MID_RECOVERY__SHIFT 0x19
+#define PCIE_LC_SPEED_CNTL2__LC_ALLOW_SET_INITIATE_SPEED_CHANGE_IN_RECOVERY_LOCK__SHIFT 0x1a
+#define PCIE_LC_SPEED_CNTL2__LC_ENABLE_DATA_STREAM_EMERGENCY_EXIT__SHIFT 0x1b
+#define PCIE_LC_SPEED_CNTL2__LC_LOCK_TARGET_LINK_SPEED_IN_RECOVERY__SHIFT 0x1c
+#define PCIE_LC_SPEED_CNTL2__LC_FORCE_EN_SW_SPEED_CHANGE_MASK 0x00000001L
+#define PCIE_LC_SPEED_CNTL2__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK 0x00000002L
+#define PCIE_LC_SPEED_CNTL2__LC_FORCE_EN_HW_SPEED_CHANGE_MASK 0x00000004L
+#define PCIE_LC_SPEED_CNTL2__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK 0x00000008L
+#define PCIE_LC_SPEED_CNTL2__LC_INIT_SPEED_NEG_IN_L0s_EN_MASK 0x00000010L
+#define PCIE_LC_SPEED_CNTL2__LC_INIT_SPEED_NEG_IN_L1_EN_MASK 0x00000020L
+#define PCIE_LC_SPEED_CNTL2__LC_INITIATE_LINK_SPEED_CHANGE_MASK 0x00000040L
+#define PCIE_LC_SPEED_CNTL2__LC_SPEED_CHANGE_STATUS_MASK 0x00000080L
+#define PCIE_LC_SPEED_CNTL2__LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK 0x00000300L
+#define PCIE_LC_SPEED_CNTL2__LC_SPEED_CHANGE_ATTEMPT_FAILED_MASK 0x00000400L
+#define PCIE_LC_SPEED_CNTL2__LC_CLR_FAILED_SPD_CHANGE_CNT_MASK 0x00000800L
+#define PCIE_LC_SPEED_CNTL2__LC_MULT_UPSTREAM_AUTO_SPD_CHNG_EN_MASK 0x00001000L
+#define PCIE_LC_SPEED_CNTL2__LC_DONT_CLR_TARGET_SPD_CHANGE_STATUS_MASK 0x00002000L
+#define PCIE_LC_SPEED_CNTL2__LC_1_OR_MORE_TS2_SPEED_ARC_EN_MASK 0x00004000L
+#define PCIE_LC_SPEED_CNTL2__LC_ABORT_AUTO_EQ_AFTER_FAILED_EQ_MASK 0x00008000L
+#define PCIE_LC_SPEED_CNTL2__LC_ENFORCE_CORRECT_SPEED_FOR_EQ_MASK 0x00010000L
+#define PCIE_LC_SPEED_CNTL2__LC_ENFORCE_SOFTWARE_PERFORM_EQ_MASK 0x00020000L
+#define PCIE_LC_SPEED_CNTL2__LC_SEND_EQ_TS2_IF_OTHER_SIDE_EVER_ADVERTISED_SPEED_MASK 0x00040000L
+#define PCIE_LC_SPEED_CNTL2__LC_ENFORCE_SINGLE_EQ_PER_RECOVERY_MASK 0x00080000L
+#define PCIE_LC_SPEED_CNTL2__LC_USE_LEGACY_CLEAR_DELAY_DLLPs_MASK 0x00100000L
+#define PCIE_LC_SPEED_CNTL2__LC_DEFER_RETRAIN_LINK_UNTIL_EXIT_RECOVERY_MASK 0x00200000L
+#define PCIE_LC_SPEED_CNTL2__LC_ABORT_AUTO_EQ_ON_FAIL_SPEED_CHANGE_LIMIT_MASK 0x00400000L
+#define PCIE_LC_SPEED_CNTL2__LC_DEFER_PRIVATE_SPEED_CHANGE_UNTIL_EXIT_RECOVERY_MASK 0x01800000L
+#define PCIE_LC_SPEED_CNTL2__LC_DONT_UPDATE_GEN_SUPPORT_MID_RECOVERY_MASK 0x02000000L
+#define PCIE_LC_SPEED_CNTL2__LC_ALLOW_SET_INITIATE_SPEED_CHANGE_IN_RECOVERY_LOCK_MASK 0x04000000L
+#define PCIE_LC_SPEED_CNTL2__LC_ENABLE_DATA_STREAM_EMERGENCY_EXIT_MASK 0x08000000L
+#define PCIE_LC_SPEED_CNTL2__LC_LOCK_TARGET_LINK_SPEED_IN_RECOVERY_MASK 0x10000000L
+//PCIE_LC_FORCE_COEFF3
+#define PCIE_LC_FORCE_COEFF3__LC_FORCE_COEFF_32GT__SHIFT 0x0
+#define PCIE_LC_FORCE_COEFF3__LC_FORCE_PRE_CURSOR_32GT__SHIFT 0x1
+#define PCIE_LC_FORCE_COEFF3__LC_FORCE_CURSOR_32GT__SHIFT 0x7
+#define PCIE_LC_FORCE_COEFF3__LC_FORCE_POST_CURSOR_32GT__SHIFT 0xd
+#define PCIE_LC_FORCE_COEFF3__LC_3X3_COEFF_SEARCH_EN_32GT__SHIFT 0x13
+#define PCIE_LC_FORCE_COEFF3__LC_FORCE_COEFF_32GT_MASK 0x00000001L
+#define PCIE_LC_FORCE_COEFF3__LC_FORCE_PRE_CURSOR_32GT_MASK 0x0000007EL
+#define PCIE_LC_FORCE_COEFF3__LC_FORCE_CURSOR_32GT_MASK 0x00001F80L
+#define PCIE_LC_FORCE_COEFF3__LC_FORCE_POST_CURSOR_32GT_MASK 0x0007E000L
+#define PCIE_LC_FORCE_COEFF3__LC_3X3_COEFF_SEARCH_EN_32GT_MASK 0x00080000L
+//PCIE_LC_FORCE_EQ_REQ_COEFF3
+#define PCIE_LC_FORCE_EQ_REQ_COEFF3__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_32GT__SHIFT 0x0
+#define PCIE_LC_FORCE_EQ_REQ_COEFF3__LC_FORCE_PRE_CURSOR_REQ_32GT__SHIFT 0x1
+#define PCIE_LC_FORCE_EQ_REQ_COEFF3__LC_FORCE_CURSOR_REQ_32GT__SHIFT 0x7
+#define PCIE_LC_FORCE_EQ_REQ_COEFF3__LC_FORCE_POST_CURSOR_REQ_32GT__SHIFT 0xd
+#define PCIE_LC_FORCE_EQ_REQ_COEFF3__LC_FS_OTHER_END_32GT__SHIFT 0x13
+#define PCIE_LC_FORCE_EQ_REQ_COEFF3__LC_LF_OTHER_END_32GT__SHIFT 0x19
+#define PCIE_LC_FORCE_EQ_REQ_COEFF3__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_32GT_MASK 0x00000001L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF3__LC_FORCE_PRE_CURSOR_REQ_32GT_MASK 0x0000007EL
+#define PCIE_LC_FORCE_EQ_REQ_COEFF3__LC_FORCE_CURSOR_REQ_32GT_MASK 0x00001F80L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF3__LC_FORCE_POST_CURSOR_REQ_32GT_MASK 0x0007E000L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF3__LC_FS_OTHER_END_32GT_MASK 0x01F80000L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF3__LC_LF_OTHER_END_32GT_MASK 0x7E000000L
+//PCIE_LC_LINK_MANAGEMENT_CNTL3
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__LOW_BW_THRESHOLD_G3__SHIFT 0x0
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__HIGH_BW_THRESHOLD_G3__SHIFT 0x4
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__LOW_BW_THRESHOLD_G4__SHIFT 0x8
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__HIGH_BW_THRESHOLD_G4__SHIFT 0xc
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__LOW_BW_THRESHOLD_G5__SHIFT 0x10
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__HIGH_BW_THRESHOLD_G5__SHIFT 0x14
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__LC_NEG_LANE_OFF_ARC_OLD__SHIFT 0x18
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__LOW_BW_THRESHOLD_G3_MASK 0x0000000FL
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__HIGH_BW_THRESHOLD_G3_MASK 0x000000F0L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__LOW_BW_THRESHOLD_G4_MASK 0x00000F00L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__HIGH_BW_THRESHOLD_G4_MASK 0x0000F000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__LOW_BW_THRESHOLD_G5_MASK 0x000F0000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__HIGH_BW_THRESHOLD_G5_MASK 0x00F00000L
+#define PCIE_LC_LINK_MANAGEMENT_CNTL3__LC_NEG_LANE_OFF_ARC_OLD_MASK 0x01000000L
+//PCIE_LC_Z10_IDLE_CNTL
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_FORCE_NON_IDLE__SHIFT 0x0
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_FORCE_ALWAYS_IDLE__SHIFT 0x1
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_WAIT_FOR_REFCLKACK_IDLE_L12__SHIFT 0x2
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_WAIT_FOR_REFCLKACK_IDLE_DETECT__SHIFT 0x3
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_WAIT_FOR_REFCLKACK_IDLE_L23__SHIFT 0x4
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_REPORT_IDLE_IN_L12_EN__SHIFT 0x5
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_IDLE_STATUS__SHIFT 0x1c
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_IDLE_STATUS_DETECT_HOLDTRAINING__SHIFT 0x1d
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_IDLE_STATUS_L1_2__SHIFT 0x1e
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_IDLE_STATUS_L23__SHIFT 0x1f
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_FORCE_NON_IDLE_MASK 0x00000001L
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_FORCE_ALWAYS_IDLE_MASK 0x00000002L
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_WAIT_FOR_REFCLKACK_IDLE_L12_MASK 0x00000004L
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_WAIT_FOR_REFCLKACK_IDLE_DETECT_MASK 0x00000008L
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_WAIT_FOR_REFCLKACK_IDLE_L23_MASK 0x00000010L
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_REPORT_IDLE_IN_L12_EN_MASK 0x00000020L
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_IDLE_STATUS_MASK 0x10000000L
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_IDLE_STATUS_DETECT_HOLDTRAINING_MASK 0x20000000L
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_IDLE_STATUS_L1_2_MASK 0x40000000L
+#define PCIE_LC_Z10_IDLE_CNTL__LC_Z10_IDLE_STATUS_L23_MASK 0x80000000L
+//PCIE_LC_TRANMIT_FIFO_CDC_CNTL
+#define PCIE_LC_TRANMIT_FIFO_CDC_CNTL__LC_TFIFO_CDC_HIDE_EN__SHIFT 0x0
+#define PCIE_LC_TRANMIT_FIFO_CDC_CNTL__LC_TFIFO_CDC_HIDE_EARLY_RELEASE__SHIFT 0x1
+#define PCIE_LC_TRANMIT_FIFO_CDC_CNTL__LC_TFIFO_CDC_HIDE_DELAY__SHIFT 0x2
+#define PCIE_LC_TRANMIT_FIFO_CDC_CNTL__LC_TFIFO_CDC_HIDE_EN_MASK 0x00000001L
+#define PCIE_LC_TRANMIT_FIFO_CDC_CNTL__LC_TFIFO_CDC_HIDE_EARLY_RELEASE_MASK 0x00000002L
+#define PCIE_LC_TRANMIT_FIFO_CDC_CNTL__LC_TFIFO_CDC_HIDE_DELAY_MASK 0x000000FCL
+//PCIE_LC_CNTL13
+#define PCIE_LC_CNTL13__LC_CLEAR_PERFORMING_SCHEDULED_RXEQEVAL__SHIFT 0x0
+#define PCIE_LC_CNTL13__LC_SEND_EXTRA_SKIP_GEN3_CXL_SHB__SHIFT 0x1
+#define PCIE_LC_CNTL13__LC_SPEED_CHANGE_COUNT_MODE__SHIFT 0x2
+#define PCIE_LC_CNTL13__LC_ASSERT_RXSTANDBY_EARLIER_RECOVERY_SPEED__SHIFT 0x3
+#define PCIE_LC_CNTL13__LC_ALL_LANES_LOOPBACK_CHECK_MODE__SHIFT 0x4
+#define PCIE_LC_CNTL13__LC_DSC_PM_WAIT_FOR_FC_INIT__SHIFT 0x5
+#define PCIE_LC_CNTL13__LC_REQUIRE_RCV_SPEED_SUPPORT_IN_CONFIG_COMPLETE__SHIFT 0x6
+#define PCIE_LC_CNTL13__LC_SEND_CNTL_SKIP_IN_RIDLE_NO_DATASTREAM__SHIFT 0x7
+#define PCIE_LC_CNTL13__LC_RESET_SKIP_INTERVAL_ON_IDLE_CNTL_SKIP__SHIFT 0x8
+#define PCIE_LC_CNTL13__LC_EXTRA_WAIT_IN_DETECT__SHIFT 0x9
+#define PCIE_LC_CNTL13__LC_INFERRED_EI_FAILED_SPEED_MODE__SHIFT 0xc
+#define PCIE_LC_CNTL13__LC_FIRST_EQ_PHASE_RXEQEVAL_DELAY_MODE__SHIFT 0xd
+#define PCIE_LC_CNTL13__LC_TRACK_RX_WAIT_AUXCOUNT_RESET__SHIFT 0xf
+#define PCIE_LC_CNTL13__LC_BLOCK_NAK_GEN_ASPM_TIMEOUT_USC__SHIFT 0x10
+#define PCIE_LC_CNTL13__LC_L23_POWERDOWN_TARGET__SHIFT 0x11
+#define PCIE_LC_CNTL13__LC_ADVERTISE_MAX_SPEED_LINKUP_ZERO_CONFIG_COMPLETE__SHIFT 0x14
+#define PCIE_LC_CNTL13__LC_HR_WAIT_DETECT_EN__SHIFT 0x1d
+#define PCIE_LC_CNTL13__LC_LD_WAIT_DETECT_EN__SHIFT 0x1e
+#define PCIE_LC_CNTL13__LC_HR_LD_WAIT_DETECT_ACTIVE__SHIFT 0x1f
+#define PCIE_LC_CNTL13__LC_CLEAR_PERFORMING_SCHEDULED_RXEQEVAL_MASK 0x00000001L
+#define PCIE_LC_CNTL13__LC_SEND_EXTRA_SKIP_GEN3_CXL_SHB_MASK 0x00000002L
+#define PCIE_LC_CNTL13__LC_SPEED_CHANGE_COUNT_MODE_MASK 0x00000004L
+#define PCIE_LC_CNTL13__LC_ASSERT_RXSTANDBY_EARLIER_RECOVERY_SPEED_MASK 0x00000008L
+#define PCIE_LC_CNTL13__LC_ALL_LANES_LOOPBACK_CHECK_MODE_MASK 0x00000010L
+#define PCIE_LC_CNTL13__LC_DSC_PM_WAIT_FOR_FC_INIT_MASK 0x00000020L
+#define PCIE_LC_CNTL13__LC_REQUIRE_RCV_SPEED_SUPPORT_IN_CONFIG_COMPLETE_MASK 0x00000040L
+#define PCIE_LC_CNTL13__LC_SEND_CNTL_SKIP_IN_RIDLE_NO_DATASTREAM_MASK 0x00000080L
+#define PCIE_LC_CNTL13__LC_RESET_SKIP_INTERVAL_ON_IDLE_CNTL_SKIP_MASK 0x00000100L
+#define PCIE_LC_CNTL13__LC_EXTRA_WAIT_IN_DETECT_MASK 0x00000E00L
+#define PCIE_LC_CNTL13__LC_INFERRED_EI_FAILED_SPEED_MODE_MASK 0x00001000L
+#define PCIE_LC_CNTL13__LC_FIRST_EQ_PHASE_RXEQEVAL_DELAY_MODE_MASK 0x00006000L
+#define PCIE_LC_CNTL13__LC_TRACK_RX_WAIT_AUXCOUNT_RESET_MASK 0x00008000L
+#define PCIE_LC_CNTL13__LC_BLOCK_NAK_GEN_ASPM_TIMEOUT_USC_MASK 0x00010000L
+#define PCIE_LC_CNTL13__LC_L23_POWERDOWN_TARGET_MASK 0x000E0000L
+#define PCIE_LC_CNTL13__LC_ADVERTISE_MAX_SPEED_LINKUP_ZERO_CONFIG_COMPLETE_MASK 0x00100000L
+#define PCIE_LC_CNTL13__LC_HR_WAIT_DETECT_EN_MASK 0x20000000L
+#define PCIE_LC_CNTL13__LC_LD_WAIT_DETECT_EN_MASK 0x40000000L
+#define PCIE_LC_CNTL13__LC_HR_LD_WAIT_DETECT_ACTIVE_MASK 0x80000000L
+//PCIE_LC_SWDS_CNTL
+#define PCIE_LC_SWDS_CNTL__LC_SECONDARY_BUS_RESET_EXT_DISABLE__SHIFT 0x0
+#define PCIE_LC_SWDS_CNTL__LC_DSC_START_L23_IGNORE_LC_STATE__SHIFT 0x1
+#define PCIE_LC_SWDS_CNTL__LC_WAKE_FROM_L23_SWDS__SHIFT 0x2
+#define PCIE_LC_SWDS_CNTL__LC_SWITCH_US_WAKEUP_DS_L23_DISABLE__SHIFT 0x3
+#define PCIE_LC_SWDS_CNTL__LC_SWITCH_US_WAKEUP_DS_L1_DISABLE__SHIFT 0x4
+#define PCIE_LC_SWDS_CNTL__LC_SWITCH_DS_WAKEUP_US_L1_DISABLE__SHIFT 0x5
+#define PCIE_LC_SWDS_CNTL__LC_SECONDARY_BUS_RESET_EXT_DISABLE_MASK 0x00000001L
+#define PCIE_LC_SWDS_CNTL__LC_DSC_START_L23_IGNORE_LC_STATE_MASK 0x00000002L
+#define PCIE_LC_SWDS_CNTL__LC_WAKE_FROM_L23_SWDS_MASK 0x00000004L
+#define PCIE_LC_SWDS_CNTL__LC_SWITCH_US_WAKEUP_DS_L23_DISABLE_MASK 0x00000008L
+#define PCIE_LC_SWDS_CNTL__LC_SWITCH_US_WAKEUP_DS_L1_DISABLE_MASK 0x00000010L
+#define PCIE_LC_SWDS_CNTL__LC_SWITCH_DS_WAKEUP_US_L1_DISABLE_MASK 0x00000020L
+//PCIE_TX_SEQ
+#define PCIE_TX_SEQ__TX_NEXT_TRANSMIT_SEQ__SHIFT 0x0
+#define PCIE_TX_SEQ__TX_ACKD_SEQ__SHIFT 0x10
+#define PCIE_TX_SEQ__TX_NEXT_TRANSMIT_SEQ_MASK 0x00000FFFL
+#define PCIE_TX_SEQ__TX_ACKD_SEQ_MASK 0x0FFF0000L
+//PCIE_TX_REPLAY
+#define PCIE_TX_REPLAY__TX_REPLAY_NUM__SHIFT 0x0
+#define PCIE_TX_REPLAY__TX_REPLAY_ROLLOVER_EN__SHIFT 0x5
+#define PCIE_TX_REPLAY__TX_REPLAY_STALL__SHIFT 0xa
+#define PCIE_TX_REPLAY__TX_REPLAY_DISABLE__SHIFT 0xb
+#define PCIE_TX_REPLAY__TX_REPLAY_ALL__SHIFT 0xc
+#define PCIE_TX_REPLAY__TX_REPLAY_FORCE_WRSCH_ACK__SHIFT 0xd
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_DIS__SHIFT 0xe
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_OVERWRITE__SHIFT 0xf
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER__SHIFT 0x10
+#define PCIE_TX_REPLAY__TX_REPLAY_NUM_MASK 0x0000001FL
+#define PCIE_TX_REPLAY__TX_REPLAY_ROLLOVER_EN_MASK 0x00000020L
+#define PCIE_TX_REPLAY__TX_REPLAY_STALL_MASK 0x00000400L
+#define PCIE_TX_REPLAY__TX_REPLAY_DISABLE_MASK 0x00000800L
+#define PCIE_TX_REPLAY__TX_REPLAY_ALL_MASK 0x00001000L
+#define PCIE_TX_REPLAY__TX_REPLAY_FORCE_WRSCH_ACK_MASK 0x00002000L
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_DIS_MASK 0x00004000L
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_OVERWRITE_MASK 0x00008000L
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_MASK 0xFFFF0000L
+//PCIE_TX_ACK_LATENCY_LIMIT
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT__SHIFT 0x0
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_OVERWRITE__SHIFT 0xc
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_FC_ARB_ENABLE__SHIFT 0xd
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_SCALE__SHIFT 0x14
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_ADJUSTMENT__SHIFT 0x18
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_MASK 0x00000FFFL
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_OVERWRITE_MASK 0x00001000L
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_FC_ARB_ENABLE_MASK 0x00002000L
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_SCALE_MASK 0x00F00000L
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_ADJUSTMENT_MASK 0xFF000000L
+//PCIE_TX_CREDITS_FCU_THRESHOLD
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC0__SHIFT 0x0
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC0__SHIFT 0x4
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC0__SHIFT 0x8
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC1__SHIFT 0x10
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC1__SHIFT 0x14
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC1__SHIFT 0x18
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC0_MASK 0x00000007L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC0_MASK 0x00000070L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC0_MASK 0x00000700L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC1_MASK 0x00070000L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC1_MASK 0x00700000L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC1_MASK 0x07000000L
+//PCIE_TX_VENDOR_SPECIFIC
+#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_DATA__SHIFT 0x0
+#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_SEND__SHIFT 0x18
+#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_DATA_MASK 0x00FFFFFFL
+#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_SEND_MASK 0x01000000L
+//PCIE_TX_NOP_DLLP
+#define PCIE_TX_NOP_DLLP__TX_NOP_DATA__SHIFT 0x0
+#define PCIE_TX_NOP_DLLP__TX_NOP_SEND__SHIFT 0x18
+#define PCIE_TX_NOP_DLLP__TX_NOP_DATA_MASK 0x00FFFFFFL
+#define PCIE_TX_NOP_DLLP__TX_NOP_SEND_MASK 0x01000000L
+//PCIE_TX_REQUEST_NUM_CNTL
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP__SHIFT 0x18
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_VC1_EN__SHIFT 0x1e
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_EN__SHIFT 0x1f
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_MASK 0x3F000000L
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_VC1_EN_MASK 0x40000000L
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_EN_MASK 0x80000000L
+//PCIE_TX_CREDITS_ADVT_P
+#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PD__SHIFT 0x0
+#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PH__SHIFT 0x10
+#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PD_MASK 0x00003FFFL
+#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PH_MASK 0x03FF0000L
+//PCIE_TX_CREDITS_ADVT_NP
+#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPD__SHIFT 0x0
+#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPH__SHIFT 0x10
+#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPD_MASK 0x00003FFFL
+#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPH_MASK 0x03FF0000L
+//PCIE_TX_CREDITS_ADVT_CPL
+#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLD__SHIFT 0x0
+#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLH__SHIFT 0x10
+#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLD_MASK 0x00003FFFL
+#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLH_MASK 0x03FF0000L
+//PCIE_TX_CREDITS_INIT_P
+#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PD__SHIFT 0x0
+#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PH__SHIFT 0x10
+#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PD_MASK 0x00000FFFL
+#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PH_MASK 0x00FF0000L
+//PCIE_TX_CREDITS_INIT_NP
+#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPD__SHIFT 0x0
+#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPH__SHIFT 0x10
+#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPD_MASK 0x00000FFFL
+#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPH_MASK 0x00FF0000L
+//PCIE_TX_CREDITS_INIT_CPL
+#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLD__SHIFT 0x0
+#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLH__SHIFT 0x10
+#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLD_MASK 0x00000FFFL
+#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLH_MASK 0x00FF0000L
+//PCIE_TX_CREDITS_STATUS
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PD__SHIFT 0x0
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PH__SHIFT 0x1
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPD__SHIFT 0x2
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPH__SHIFT 0x3
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLD__SHIFT 0x4
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLH__SHIFT 0x5
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PD__SHIFT 0x10
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PH__SHIFT 0x11
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPD__SHIFT 0x12
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPH__SHIFT 0x13
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLD__SHIFT 0x14
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLH__SHIFT 0x15
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PD_MASK 0x00000001L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PH_MASK 0x00000002L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPD_MASK 0x00000004L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPH_MASK 0x00000008L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLD_MASK 0x00000010L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLH_MASK 0x00000020L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PD_MASK 0x00010000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PH_MASK 0x00020000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPD_MASK 0x00040000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPH_MASK 0x00080000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLD_MASK 0x00100000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLH_MASK 0x00200000L
+//PCIE_FC_P
+#define PCIE_FC_P__PD_CREDITS__SHIFT 0x0
+#define PCIE_FC_P__PH_CREDITS__SHIFT 0x10
+#define PCIE_FC_P__PD_CREDITS_MASK 0x0000FFFFL
+#define PCIE_FC_P__PH_CREDITS_MASK 0x0FFF0000L
+//PCIE_FC_NP
+#define PCIE_FC_NP__NPD_CREDITS__SHIFT 0x0
+#define PCIE_FC_NP__NPH_CREDITS__SHIFT 0x10
+#define PCIE_FC_NP__NPD_CREDITS_MASK 0x0000FFFFL
+#define PCIE_FC_NP__NPH_CREDITS_MASK 0x0FFF0000L
+//PCIE_FC_CPL
+#define PCIE_FC_CPL__CPLD_CREDITS__SHIFT 0x0
+#define PCIE_FC_CPL__CPLH_CREDITS__SHIFT 0x10
+#define PCIE_FC_CPL__CPLD_CREDITS_MASK 0x0000FFFFL
+#define PCIE_FC_CPL__CPLH_CREDITS_MASK 0x0FFF0000L
+//PCIE_FC_P_VC1
+#define PCIE_FC_P_VC1__ADVT_FC_VC1_PD_CREDITS__SHIFT 0x0
+#define PCIE_FC_P_VC1__ADVT_FC_VC1_PH_CREDITS__SHIFT 0x10
+#define PCIE_FC_P_VC1__ADVT_FC_VC1_PD_CREDITS_MASK 0x0000FFFFL
+#define PCIE_FC_P_VC1__ADVT_FC_VC1_PH_CREDITS_MASK 0x0FFF0000L
+//PCIE_FC_NP_VC1
+#define PCIE_FC_NP_VC1__ADVT_FC_VC1_NPD_CREDITS__SHIFT 0x0
+#define PCIE_FC_NP_VC1__ADVT_FC_VC1_NPH_CREDITS__SHIFT 0x10
+#define PCIE_FC_NP_VC1__ADVT_FC_VC1_NPD_CREDITS_MASK 0x0000FFFFL
+#define PCIE_FC_NP_VC1__ADVT_FC_VC1_NPH_CREDITS_MASK 0x0FFF0000L
+//PCIE_FC_CPL_VC1
+#define PCIE_FC_CPL_VC1__ADVT_FC_VC1_CPLD_CREDITS__SHIFT 0x0
+#define PCIE_FC_CPL_VC1__ADVT_FC_VC1_CPLH_CREDITS__SHIFT 0x10
+#define PCIE_FC_CPL_VC1__ADVT_FC_VC1_CPLD_CREDITS_MASK 0x0000FFFFL
+#define PCIE_FC_CPL_VC1__ADVT_FC_VC1_CPLH_CREDITS_MASK 0x0FFF0000L
+
+
+// addressBlock: pcie_container_pcie0_pciedir
+//PCIE_RESERVED
+#define PCIE_RESERVED__RESERVED__SHIFT 0x0
+#define PCIE_RESERVED__RESERVED_MASK 0xFFFFFFFFL
+//PCIE_SCRATCH
+#define PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0
+#define PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL
+//PCIE_RX_NUM_NAK
+#define PCIE_RX_NUM_NAK__RX_NUM_NAK__SHIFT 0x0
+#define PCIE_RX_NUM_NAK__RX_NUM_NAK_MASK 0xFFFFFFFFL
+//PCIE_RX_NUM_NAK_GENERATED
+#define PCIE_RX_NUM_NAK_GENERATED__RX_NUM_NAK_GENERATED__SHIFT 0x0
+#define PCIE_RX_NUM_NAK_GENERATED__RX_NUM_NAK_GENERATED_MASK 0xFFFFFFFFL
+//PCIE_CNTL
+#define PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0
+#define PCIE_CNTL__LC_HOT_PLUG_DELAY_SEL__SHIFT 0x1
+#define PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7
+#define PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8
+#define PCIE_CNTL__PCIE_HT_NP_MEM_WRITE__SHIFT 0x9
+#define PCIE_CNTL__RX_SB_ADJ_PAYLOAD_SIZE__SHIFT 0xa
+#define PCIE_CNTL__RX_RCB_ATS_UC_DIS__SHIFT 0xf
+#define PCIE_CNTL__RX_RCB_REORDER_EN__SHIFT 0x10
+#define PCIE_CNTL__RX_RCB_INVALID_SIZE_DIS__SHIFT 0x11
+#define PCIE_CNTL__RX_RCB_UNEXP_CPL_DIS__SHIFT 0x12
+#define PCIE_CNTL__RX_RCB_CPL_TIMEOUT_TEST_MODE__SHIFT 0x13
+#define PCIE_CNTL__RX_RCB_WRONG_PREFIX_DIS__SHIFT 0x14
+#define PCIE_CNTL__RX_RCB_WRONG_ATTR_DIS__SHIFT 0x15
+#define PCIE_CNTL__RX_RCB_WRONG_FUNCNUM_DIS__SHIFT 0x16
+#define PCIE_CNTL__RX_ATS_TRAN_CPL_SPLIT_DIS__SHIFT 0x17
+#define PCIE_CNTL__TX_CPL_DEBUG__SHIFT 0x18
+#define PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e
+#define PCIE_CNTL__RX_CPL_POSTED_REQ_ORD_EN__SHIFT 0x1f
+#define PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L
+#define PCIE_CNTL__LC_HOT_PLUG_DELAY_SEL_MASK 0x0000000EL
+#define PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L
+#define PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L
+#define PCIE_CNTL__PCIE_HT_NP_MEM_WRITE_MASK 0x00000200L
+#define PCIE_CNTL__RX_SB_ADJ_PAYLOAD_SIZE_MASK 0x00001C00L
+#define PCIE_CNTL__RX_RCB_ATS_UC_DIS_MASK 0x00008000L
+#define PCIE_CNTL__RX_RCB_REORDER_EN_MASK 0x00010000L
+#define PCIE_CNTL__RX_RCB_INVALID_SIZE_DIS_MASK 0x00020000L
+#define PCIE_CNTL__RX_RCB_UNEXP_CPL_DIS_MASK 0x00040000L
+#define PCIE_CNTL__RX_RCB_CPL_TIMEOUT_TEST_MODE_MASK 0x00080000L
+#define PCIE_CNTL__RX_RCB_WRONG_PREFIX_DIS_MASK 0x00100000L
+#define PCIE_CNTL__RX_RCB_WRONG_ATTR_DIS_MASK 0x00200000L
+#define PCIE_CNTL__RX_RCB_WRONG_FUNCNUM_DIS_MASK 0x00400000L
+#define PCIE_CNTL__RX_ATS_TRAN_CPL_SPLIT_DIS_MASK 0x00800000L
+#define PCIE_CNTL__TX_CPL_DEBUG_MASK 0x3F000000L
+#define PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L
+#define PCIE_CNTL__RX_CPL_POSTED_REQ_ORD_EN_MASK 0x80000000L
+//PCIE_CONFIG_CNTL
+#define PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT 0x0
+#define PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK 0x0000000FL
+//PCIE_DEBUG_CNTL
+#define PCIE_DEBUG_CNTL__DEBUG_PORT_EN__SHIFT 0x0
+#define PCIE_DEBUG_CNTL__DEBUG_SELECT__SHIFT 0x10
+#define PCIE_DEBUG_CNTL__DEBUG_PORT_EN_MASK 0x0000FFFFL
+#define PCIE_DEBUG_CNTL__DEBUG_SELECT_MASK 0x00010000L
+//PCIE_RX_CNTL5
+#define PCIE_RX_CNTL5__RX_SB_ARB_MODE__SHIFT 0x0
+#define PCIE_RX_CNTL5__RX_SB_ARB_LOWER_LIMIT__SHIFT 0x8
+#define PCIE_RX_CNTL5__RX_SB_ARB_UPPER_LIMIT__SHIFT 0x10
+#define PCIE_RX_CNTL5__RX_SB_ARB_MODE_MASK 0x00000003L
+#define PCIE_RX_CNTL5__RX_SB_ARB_LOWER_LIMIT_MASK 0x00003F00L
+#define PCIE_RX_CNTL5__RX_SB_ARB_UPPER_LIMIT_MASK 0x003F0000L
+//PCIE_RX_CNTL4
+#define PCIE_RX_CNTL4__RX_ENH_ATOMIC_UR_TPH_DIS__SHIFT 0x0
+#define PCIE_RX_CNTL4__RX_ENH_ATOMIC_UR_OPTYPE4_DIS__SHIFT 0x1
+#define PCIE_RX_CNTL4__RX_ENH_ATOMIC_UR_OPTYPE1_E_F_DIS__SHIFT 0x2
+#define PCIE_RX_CNTL4__CI_ATS_RO_DIS__SHIFT 0x3
+#define PCIE_RX_CNTL4__RX_7BIT_ST_TAG_EN__SHIFT 0x4
+#define PCIE_RX_CNTL4__DEGSIPCI61_953_DIS__SHIFT 0x5
+#define PCIE_RX_CNTL4__RX_RCB_CPL_TIMEOUT_FAIR_DIS__SHIFT 0x7
+#define PCIE_RX_CNTL4__RX_CTO_CPL_REFCLK_SPEED__SHIFT 0x8
+#define PCIE_RX_CNTL4__RX_OVERFLOW_PRIV_MASK__SHIFT 0xa
+#define PCIE_RX_CNTL4__RX_PD_OVERFLOW_FIX_DISABLE__SHIFT 0x10
+#define PCIE_RX_CNTL4__RX_NAK_COUNTER_MODE__SHIFT 0x11
+#define PCIE_RX_CNTL4__RX_SF_FILTERING_END_FROM_DLLP_DIS__SHIFT 0x12
+#define PCIE_RX_CNTL4__RX_SRAM_PIPEMEB_FIX_DIS__SHIFT 0x13
+#define PCIE_RX_CNTL4__SMCA_OOB_CRASH_DUMP_CLR_ALL_DIS__SHIFT 0x14
+#define PCIE_RX_CNTL4__RXECC_RPT_ANY_ERR_TO_TX_DIS__SHIFT 0x15
+#define PCIE_RX_CNTL4__MCA_INTREQ_STABLE_WRDATA_DIS__SHIFT 0x16
+#define PCIE_RX_CNTL4__RX_ENH_ATOMIC_UR_TPH_DIS_MASK 0x00000001L
+#define PCIE_RX_CNTL4__RX_ENH_ATOMIC_UR_OPTYPE4_DIS_MASK 0x00000002L
+#define PCIE_RX_CNTL4__RX_ENH_ATOMIC_UR_OPTYPE1_E_F_DIS_MASK 0x00000004L
+#define PCIE_RX_CNTL4__CI_ATS_RO_DIS_MASK 0x00000008L
+#define PCIE_RX_CNTL4__RX_7BIT_ST_TAG_EN_MASK 0x00000010L
+#define PCIE_RX_CNTL4__DEGSIPCI61_953_DIS_MASK 0x00000020L
+#define PCIE_RX_CNTL4__RX_RCB_CPL_TIMEOUT_FAIR_DIS_MASK 0x00000080L
+#define PCIE_RX_CNTL4__RX_CTO_CPL_REFCLK_SPEED_MASK 0x00000300L
+#define PCIE_RX_CNTL4__RX_OVERFLOW_PRIV_MASK_MASK 0x0000FC00L
+#define PCIE_RX_CNTL4__RX_PD_OVERFLOW_FIX_DISABLE_MASK 0x00010000L
+#define PCIE_RX_CNTL4__RX_NAK_COUNTER_MODE_MASK 0x00020000L
+#define PCIE_RX_CNTL4__RX_SF_FILTERING_END_FROM_DLLP_DIS_MASK 0x00040000L
+#define PCIE_RX_CNTL4__RX_SRAM_PIPEMEB_FIX_DIS_MASK 0x00080000L
+#define PCIE_RX_CNTL4__SMCA_OOB_CRASH_DUMP_CLR_ALL_DIS_MASK 0x00100000L
+#define PCIE_RX_CNTL4__RXECC_RPT_ANY_ERR_TO_TX_DIS_MASK 0x00200000L
+#define PCIE_RX_CNTL4__MCA_INTREQ_STABLE_WRDATA_DIS_MASK 0x00400000L
+//PCIE_COMMON_AER_MASK
+#define PCIE_COMMON_AER_MASK__PRIV_SURP_DIS_VEC__SHIFT 0x0
+#define PCIE_COMMON_AER_MASK__ERR_ROOT_ERR_STATUS_REPORTS_SFW_DIS__SHIFT 0x8
+#define PCIE_COMMON_AER_MASK__ERR_CTO_NONFATAL_MODE__SHIFT 0x9
+#define PCIE_COMMON_AER_MASK__IGNORE_BADTLP_IN_LINKDOWN_EN__SHIFT 0x10
+#define PCIE_COMMON_AER_MASK__IGNORE_BADDLLP_IN_LINKDOWN_EN__SHIFT 0x11
+#define PCIE_COMMON_AER_MASK__PRIV_SURP_DIS_VEC_MASK 0x000000FFL
+#define PCIE_COMMON_AER_MASK__ERR_ROOT_ERR_STATUS_REPORTS_SFW_DIS_MASK 0x00000100L
+#define PCIE_COMMON_AER_MASK__ERR_CTO_NONFATAL_MODE_MASK 0x00000200L
+#define PCIE_COMMON_AER_MASK__IGNORE_BADTLP_IN_LINKDOWN_EN_MASK 0x00010000L
+#define PCIE_COMMON_AER_MASK__IGNORE_BADDLLP_IN_LINKDOWN_EN_MASK 0x00020000L
+//PCIE_CNTL2
+#define PCIE_CNTL2__RCB_LS_EN__SHIFT 0x0
+#define PCIE_CNTL2__MST_CPL_LS_EN__SHIFT 0x1
+#define PCIE_CNTL2__SLVAER_LS_EN__SHIFT 0x2
+#define PCIE_CNTL2__SLV_MEM_LS_EN__SHIFT 0x10
+#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_LS_EN__SHIFT 0x11
+#define PCIE_CNTL2__SLV_MEM_SD_EN__SHIFT 0x14
+#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_SD_EN__SHIFT 0x15
+#define PCIE_CNTL2__RX_NP_MEM_WRITE_ENCODING__SHIFT 0x18
+#define PCIE_CNTL2__SLV_MEM_DS_EN__SHIFT 0x1d
+#define PCIE_CNTL2__RCB_LS_EN_MASK 0x00000001L
+#define PCIE_CNTL2__MST_CPL_LS_EN_MASK 0x00000002L
+#define PCIE_CNTL2__SLVAER_LS_EN_MASK 0x00000004L
+#define PCIE_CNTL2__SLV_MEM_LS_EN_MASK 0x00010000L
+#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_LS_EN_MASK 0x00020000L
+#define PCIE_CNTL2__SLV_MEM_SD_EN_MASK 0x00100000L
+#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_SD_EN_MASK 0x00200000L
+#define PCIE_CNTL2__RX_NP_MEM_WRITE_ENCODING_MASK 0x1F000000L
+#define PCIE_CNTL2__SLV_MEM_DS_EN_MASK 0x20000000L
+//PCIE_RX_CNTL2
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMRD_UR__SHIFT 0x1
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMWR_UR__SHIFT 0x2
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_ATSTRANSREQ_UR__SHIFT 0x3
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_PAGEREQMSG_UR__SHIFT 0x4
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVCPL_UR__SHIFT 0x5
+#define PCIE_RX_CNTL2__MCA_CLKGATE_DIS__SHIFT 0x6
+#define PCIE_RX_CNTL2__MCA_ERREVENT_INHIBIT_LOG_DIS__SHIFT 0x7
+#define PCIE_RX_CNTL2__RX_RCB_LATENCY_EN__SHIFT 0x8
+#define PCIE_RX_CNTL2__RX_RCB_LATENCY_SCALE__SHIFT 0x9
+#define PCIE_RX_CNTL2__SLVCPL_MEM_LS_EN__SHIFT 0xc
+#define PCIE_RX_CNTL2__SLVCPL_MEM_SD_EN__SHIFT 0xd
+#define PCIE_RX_CNTL2__SLVCPL_MEM_DS_EN__SHIFT 0xe
+#define PCIE_RX_CNTL2__SLV_SDP_PARITY_ERR__SHIFT 0xf
+#define PCIE_RX_CNTL2__RX_RCB_LATENCY_MAX_COUNT__SHIFT 0x10
+#define PCIE_RX_CNTL2__MCA_USE_SWRESET_DIS__SHIFT 0x1a
+#define PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c
+#define PCIE_RX_CNTL2__MCA_FATAL_CONTAINMENT_DIS__SHIFT 0x1f
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMRD_UR_MASK 0x00000002L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMWR_UR_MASK 0x00000004L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_ATSTRANSREQ_UR_MASK 0x00000008L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_PAGEREQMSG_UR_MASK 0x00000010L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVCPL_UR_MASK 0x00000020L
+#define PCIE_RX_CNTL2__MCA_CLKGATE_DIS_MASK 0x00000040L
+#define PCIE_RX_CNTL2__MCA_ERREVENT_INHIBIT_LOG_DIS_MASK 0x00000080L
+#define PCIE_RX_CNTL2__RX_RCB_LATENCY_EN_MASK 0x00000100L
+#define PCIE_RX_CNTL2__RX_RCB_LATENCY_SCALE_MASK 0x00000E00L
+#define PCIE_RX_CNTL2__SLVCPL_MEM_LS_EN_MASK 0x00001000L
+#define PCIE_RX_CNTL2__SLVCPL_MEM_SD_EN_MASK 0x00002000L
+#define PCIE_RX_CNTL2__SLVCPL_MEM_DS_EN_MASK 0x00004000L
+#define PCIE_RX_CNTL2__SLV_SDP_PARITY_ERR_MASK 0x00008000L
+#define PCIE_RX_CNTL2__RX_RCB_LATENCY_MAX_COUNT_MASK 0x03FF0000L
+#define PCIE_RX_CNTL2__MCA_USE_SWRESET_DIS_MASK 0x0C000000L
+#define PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L
+#define PCIE_RX_CNTL2__MCA_FATAL_CONTAINMENT_DIS_MASK 0x80000000L
+//PCIE_CI_CNTL
+#define PCIE_CI_CNTL__CI_SLV_SDP_CHAIN_DIS__SHIFT 0x0
+#define PCIE_CI_CNTL__CI_SLV_VC0_CREDIT_CHECK_MODE__SHIFT 0x1
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_OVERSUBSCRIBE_MODE__SHIFT 0x3
+#define PCIE_CI_CNTL__CI_SLV_RC_RD_REQ_SIZE__SHIFT 0x6
+#define PCIE_CI_CNTL__CI_SLV_ORDERING_DIS__SHIFT 0x8
+#define PCIE_CI_CNTL__CI_SLV_SDP_MEM_WR_FULL_DIS__SHIFT 0x9
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_DIS__SHIFT 0xa
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_MODE__SHIFT 0xb
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_SOR__SHIFT 0xc
+#define PCIE_CI_CNTL__CI_SLV_SDP_ERR_DATA_ON_POISONED_DIS__SHIFT 0x10
+#define PCIE_CI_CNTL__CI_SLV_SDP_CONNECT_EN__SHIFT 0x11
+#define PCIE_CI_CNTL__CI_SLV_SDP_MODE__SHIFT 0x12
+#define PCIE_CI_CNTL__CI_SLV_FATALOUT_LATCH_DIS__SHIFT 0x14
+#define PCIE_CI_CNTL__TX_PGMEM_CTRL_PGATE_DIS__SHIFT 0x15
+#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_UR_EN__SHIFT 0x16
+#define PCIE_CI_CNTL__RX_RCB_RC_DPC_EXCEPTION_EN__SHIFT 0x17
+#define PCIE_CI_CNTL__RX_RCB_RC_DPC_CPL_CTL_EN__SHIFT 0x18
+#define PCIE_CI_CNTL__RX_RCB_CTO_IGNORE_ON_SFI_CAM_DIS__SHIFT 0x19
+#define PCIE_CI_CNTL__RX_RCB_SWUS_NTB_CTO_TO_UR_EN__SHIFT 0x1a
+#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_CA_EN__SHIFT 0x1b
+#define PCIE_CI_CNTL__RX_RCB_SWUS_NTB_CTO_TO_CA_EN__SHIFT 0x1c
+#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_SC_IN_LINK_DOWN_EN__SHIFT 0x1d
+#define PCIE_CI_CNTL__SLV_ARB_LINKWIDTH_WEIGHTED_RROBIN_EN__SHIFT 0x1e
+#define PCIE_CI_CNTL__RX_RCB_RC_CTO_IGNORE_ERR_IN_LINK_DOWN_EN__SHIFT 0x1f
+#define PCIE_CI_CNTL__CI_SLV_SDP_CHAIN_DIS_MASK 0x00000001L
+#define PCIE_CI_CNTL__CI_SLV_VC0_CREDIT_CHECK_MODE_MASK 0x00000002L
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_OVERSUBSCRIBE_MODE_MASK 0x00000038L
+#define PCIE_CI_CNTL__CI_SLV_RC_RD_REQ_SIZE_MASK 0x000000C0L
+#define PCIE_CI_CNTL__CI_SLV_ORDERING_DIS_MASK 0x00000100L
+#define PCIE_CI_CNTL__CI_SLV_SDP_MEM_WR_FULL_DIS_MASK 0x00000200L
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_DIS_MASK 0x00000400L
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_MODE_MASK 0x00000800L
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_SOR_MASK 0x00001000L
+#define PCIE_CI_CNTL__CI_SLV_SDP_ERR_DATA_ON_POISONED_DIS_MASK 0x00010000L
+#define PCIE_CI_CNTL__CI_SLV_SDP_CONNECT_EN_MASK 0x00020000L
+#define PCIE_CI_CNTL__CI_SLV_SDP_MODE_MASK 0x000C0000L
+#define PCIE_CI_CNTL__CI_SLV_FATALOUT_LATCH_DIS_MASK 0x00100000L
+#define PCIE_CI_CNTL__TX_PGMEM_CTRL_PGATE_DIS_MASK 0x00200000L
+#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_UR_EN_MASK 0x00400000L
+#define PCIE_CI_CNTL__RX_RCB_RC_DPC_EXCEPTION_EN_MASK 0x00800000L
+#define PCIE_CI_CNTL__RX_RCB_RC_DPC_CPL_CTL_EN_MASK 0x01000000L
+#define PCIE_CI_CNTL__RX_RCB_CTO_IGNORE_ON_SFI_CAM_DIS_MASK 0x02000000L
+#define PCIE_CI_CNTL__RX_RCB_SWUS_NTB_CTO_TO_UR_EN_MASK 0x04000000L
+#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_CA_EN_MASK 0x08000000L
+#define PCIE_CI_CNTL__RX_RCB_SWUS_NTB_CTO_TO_CA_EN_MASK 0x10000000L
+#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_SC_IN_LINK_DOWN_EN_MASK 0x20000000L
+#define PCIE_CI_CNTL__SLV_ARB_LINKWIDTH_WEIGHTED_RROBIN_EN_MASK 0x40000000L
+#define PCIE_CI_CNTL__RX_RCB_RC_CTO_IGNORE_ERR_IN_LINK_DOWN_EN_MASK 0x80000000L
+//PCIE_BUS_CNTL
+#define PCIE_BUS_CNTL__PMI_INT_DIS__SHIFT 0x6
+#define PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7
+#define PCIE_BUS_CNTL__TRUE_PM_STATUS_EN__SHIFT 0xc
+#define PCIE_BUS_CNTL__PMI_INT_DIS_MASK 0x00000040L
+#define PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+#define PCIE_BUS_CNTL__TRUE_PM_STATUS_EN_MASK 0x00001000L
+//PCIE_LC_STATE6
+#define PCIE_LC_STATE6__LC_PREV_STATE24__SHIFT 0x0
+#define PCIE_LC_STATE6__LC_PREV_STATE25__SHIFT 0x8
+#define PCIE_LC_STATE6__LC_PREV_STATE26__SHIFT 0x10
+#define PCIE_LC_STATE6__LC_PREV_STATE27__SHIFT 0x18
+#define PCIE_LC_STATE6__LC_PREV_STATE24_MASK 0x0000003FL
+#define PCIE_LC_STATE6__LC_PREV_STATE25_MASK 0x00003F00L
+#define PCIE_LC_STATE6__LC_PREV_STATE26_MASK 0x003F0000L
+#define PCIE_LC_STATE6__LC_PREV_STATE27_MASK 0x3F000000L
+//PCIE_LC_STATE7
+#define PCIE_LC_STATE7__LC_PREV_STATE28__SHIFT 0x0
+#define PCIE_LC_STATE7__LC_PREV_STATE29__SHIFT 0x8
+#define PCIE_LC_STATE7__LC_PREV_STATE30__SHIFT 0x10
+#define PCIE_LC_STATE7__LC_PREV_STATE31__SHIFT 0x18
+#define PCIE_LC_STATE7__LC_PREV_STATE28_MASK 0x0000003FL
+#define PCIE_LC_STATE7__LC_PREV_STATE29_MASK 0x00003F00L
+#define PCIE_LC_STATE7__LC_PREV_STATE30_MASK 0x003F0000L
+#define PCIE_LC_STATE7__LC_PREV_STATE31_MASK 0x3F000000L
+//PCIE_LC_STATE8
+#define PCIE_LC_STATE8__LC_PREV_STATE32__SHIFT 0x0
+#define PCIE_LC_STATE8__LC_PREV_STATE33__SHIFT 0x8
+#define PCIE_LC_STATE8__LC_PREV_STATE34__SHIFT 0x10
+#define PCIE_LC_STATE8__LC_PREV_STATE35__SHIFT 0x18
+#define PCIE_LC_STATE8__LC_PREV_STATE32_MASK 0x0000003FL
+#define PCIE_LC_STATE8__LC_PREV_STATE33_MASK 0x00003F00L
+#define PCIE_LC_STATE8__LC_PREV_STATE34_MASK 0x003F0000L
+#define PCIE_LC_STATE8__LC_PREV_STATE35_MASK 0x3F000000L
+//PCIE_LC_STATE9
+#define PCIE_LC_STATE9__LC_PREV_STATE36__SHIFT 0x0
+#define PCIE_LC_STATE9__LC_PREV_STATE37__SHIFT 0x8
+#define PCIE_LC_STATE9__LC_PREV_STATE38__SHIFT 0x10
+#define PCIE_LC_STATE9__LC_PREV_STATE39__SHIFT 0x18
+#define PCIE_LC_STATE9__LC_PREV_STATE36_MASK 0x0000003FL
+#define PCIE_LC_STATE9__LC_PREV_STATE37_MASK 0x00003F00L
+#define PCIE_LC_STATE9__LC_PREV_STATE38_MASK 0x003F0000L
+#define PCIE_LC_STATE9__LC_PREV_STATE39_MASK 0x3F000000L
+//PCIE_LC_STATE10
+#define PCIE_LC_STATE10__LC_PREV_STATE40__SHIFT 0x0
+#define PCIE_LC_STATE10__LC_PREV_STATE41__SHIFT 0x8
+#define PCIE_LC_STATE10__LC_PREV_STATE42__SHIFT 0x10
+#define PCIE_LC_STATE10__LC_PREV_STATE43__SHIFT 0x18
+#define PCIE_LC_STATE10__LC_PREV_STATE40_MASK 0x0000003FL
+#define PCIE_LC_STATE10__LC_PREV_STATE41_MASK 0x00003F00L
+#define PCIE_LC_STATE10__LC_PREV_STATE42_MASK 0x003F0000L
+#define PCIE_LC_STATE10__LC_PREV_STATE43_MASK 0x3F000000L
+//PCIE_LC_STATE11
+#define PCIE_LC_STATE11__LC_PREV_STATE44__SHIFT 0x0
+#define PCIE_LC_STATE11__LC_PREV_STATE45__SHIFT 0x8
+#define PCIE_LC_STATE11__LC_PREV_STATE46__SHIFT 0x10
+#define PCIE_LC_STATE11__LC_PREV_STATE47__SHIFT 0x18
+#define PCIE_LC_STATE11__LC_PREV_STATE44_MASK 0x0000003FL
+#define PCIE_LC_STATE11__LC_PREV_STATE45_MASK 0x00003F00L
+#define PCIE_LC_STATE11__LC_PREV_STATE46_MASK 0x003F0000L
+#define PCIE_LC_STATE11__LC_PREV_STATE47_MASK 0x3F000000L
+//PCIE_LC_STATUS1
+#define PCIE_LC_STATUS1__LC_REVERSE_RCVR__SHIFT 0x0
+#define PCIE_LC_STATUS1__LC_REVERSE_XMIT__SHIFT 0x1
+#define PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT 0x2
+#define PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT 0x5
+#define PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK 0x00000001L
+#define PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK 0x00000002L
+#define PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK 0x0000001CL
+#define PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK 0x000000E0L
+//PCIE_LC_STATUS2
+#define PCIE_LC_STATUS2__LC_TOTAL_INACTIVE_LANES__SHIFT 0x0
+#define PCIE_LC_STATUS2__LC_TURN_ON_LANE__SHIFT 0x10
+#define PCIE_LC_STATUS2__LC_TOTAL_INACTIVE_LANES_MASK 0x0000FFFFL
+#define PCIE_LC_STATUS2__LC_TURN_ON_LANE_MASK 0xFFFF0000L
+//PCIE_WPR_CNTL
+#define PCIE_WPR_CNTL__WPR_RESET_HOT_RST_EN__SHIFT 0x0
+#define PCIE_WPR_CNTL__WPR_RESET_LNK_DWN_EN__SHIFT 0x1
+#define PCIE_WPR_CNTL__WPR_RESET_LNK_DIS_EN__SHIFT 0x2
+#define PCIE_WPR_CNTL__WPR_RESET_COR_EN__SHIFT 0x3
+#define PCIE_WPR_CNTL__WPR_RESET_REG_EN__SHIFT 0x4
+#define PCIE_WPR_CNTL__WPR_RESET_STY_EN__SHIFT 0x5
+#define PCIE_WPR_CNTL__WPR_RESET_PHY_EN__SHIFT 0x6
+#define PCIE_WPR_CNTL__WPR_RESET_HOT_RST_EN_MASK 0x00000001L
+#define PCIE_WPR_CNTL__WPR_RESET_LNK_DWN_EN_MASK 0x00000002L
+#define PCIE_WPR_CNTL__WPR_RESET_LNK_DIS_EN_MASK 0x00000004L
+#define PCIE_WPR_CNTL__WPR_RESET_COR_EN_MASK 0x00000008L
+#define PCIE_WPR_CNTL__WPR_RESET_REG_EN_MASK 0x00000010L
+#define PCIE_WPR_CNTL__WPR_RESET_STY_EN_MASK 0x00000020L
+#define PCIE_WPR_CNTL__WPR_RESET_PHY_EN_MASK 0x00000040L
+//PCIE_RX_LAST_TLP0
+#define PCIE_RX_LAST_TLP0__RX_LAST_TLP0__SHIFT 0x0
+#define PCIE_RX_LAST_TLP0__RX_LAST_TLP0_MASK 0xFFFFFFFFL
+//PCIE_RX_LAST_TLP1
+#define PCIE_RX_LAST_TLP1__RX_LAST_TLP1__SHIFT 0x0
+#define PCIE_RX_LAST_TLP1__RX_LAST_TLP1_MASK 0xFFFFFFFFL
+//PCIE_RX_LAST_TLP2
+#define PCIE_RX_LAST_TLP2__RX_LAST_TLP2__SHIFT 0x0
+#define PCIE_RX_LAST_TLP2__RX_LAST_TLP2_MASK 0xFFFFFFFFL
+//PCIE_RX_LAST_TLP3
+#define PCIE_RX_LAST_TLP3__RX_LAST_TLP3__SHIFT 0x0
+#define PCIE_RX_LAST_TLP3__RX_LAST_TLP3_MASK 0xFFFFFFFFL
+//PCIE_I2C_REG_ADDR_EXPAND
+#define PCIE_I2C_REG_ADDR_EXPAND__I2C_REG_ADDR__SHIFT 0x0
+#define PCIE_I2C_REG_ADDR_EXPAND__I2C_REG_ADDR_MASK 0x0001FFFFL
+//PCIE_I2C_REG_DATA
+#define PCIE_I2C_REG_DATA__I2C_REG_DATA__SHIFT 0x0
+#define PCIE_I2C_REG_DATA__I2C_REG_DATA_MASK 0xFFFFFFFFL
+//PCIE_CFG_CNTL
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+//PCIE_LC_PM_CNTL
+#define PCIE_LC_PM_CNTL__LC_PORT_0_CLKREQB_MAP__SHIFT 0x0
+#define PCIE_LC_PM_CNTL__LC_PORT_1_CLKREQB_MAP__SHIFT 0x4
+#define PCIE_LC_PM_CNTL__LC_PORT_2_CLKREQB_MAP__SHIFT 0x8
+#define PCIE_LC_PM_CNTL__LC_PORT_3_CLKREQB_MAP__SHIFT 0xc
+#define PCIE_LC_PM_CNTL__LC_PORT_4_CLKREQB_MAP__SHIFT 0x10
+#define PCIE_LC_PM_CNTL__LC_PORT_5_CLKREQB_MAP__SHIFT 0x14
+#define PCIE_LC_PM_CNTL__LC_PORT_6_CLKREQB_MAP__SHIFT 0x18
+#define PCIE_LC_PM_CNTL__LC_PORT_7_CLKREQB_MAP__SHIFT 0x1c
+#define PCIE_LC_PM_CNTL__LC_PORT_0_CLKREQB_MAP_MASK 0x0000000FL
+#define PCIE_LC_PM_CNTL__LC_PORT_1_CLKREQB_MAP_MASK 0x000000F0L
+#define PCIE_LC_PM_CNTL__LC_PORT_2_CLKREQB_MAP_MASK 0x00000F00L
+#define PCIE_LC_PM_CNTL__LC_PORT_3_CLKREQB_MAP_MASK 0x0000F000L
+#define PCIE_LC_PM_CNTL__LC_PORT_4_CLKREQB_MAP_MASK 0x000F0000L
+#define PCIE_LC_PM_CNTL__LC_PORT_5_CLKREQB_MAP_MASK 0x00F00000L
+#define PCIE_LC_PM_CNTL__LC_PORT_6_CLKREQB_MAP_MASK 0x0F000000L
+#define PCIE_LC_PM_CNTL__LC_PORT_7_CLKREQB_MAP_MASK 0xF0000000L
+//PCIE_LC_PM_CNTL2
+#define PCIE_LC_PM_CNTL2__LC_PORT_8_CLKREQB_MAP__SHIFT 0x0
+#define PCIE_LC_PM_CNTL2__LC_PORT_8_CLKREQB_MAP_MASK 0x0000000FL
+//PCIE_LC_STRAP_BUFF_CNTL
+#define PCIE_LC_STRAP_BUFF_CNTL__LC_STRAP_BUFF_WRITE_ON_CHANGE__SHIFT 0x0
+#define PCIE_LC_STRAP_BUFF_CNTL__LC_STRAP_BUFF_WRITE_ON_CHANGE_MASK 0x00000001L
+//PCIE_P_CNTL
+#define PCIE_P_CNTL__P_PWRDN_EN__SHIFT 0x0
+#define PCIE_P_CNTL__P_SYMALIGN_MODE__SHIFT 0x1
+#define PCIE_P_CNTL__P_SYMALIGN_HW_DEBUG__SHIFT 0x2
+#define PCIE_P_CNTL__P_ELASTDESKEW_HW_DEBUG__SHIFT 0x3
+#define PCIE_P_CNTL__P_IGNORE_CRC_ERR__SHIFT 0x4
+#define PCIE_P_CNTL__P_IGNORE_LEN_ERR__SHIFT 0x5
+#define PCIE_P_CNTL__P_IGNORE_EDB_ERR__SHIFT 0x6
+#define PCIE_P_CNTL__P_IGNORE_IDL_ERR__SHIFT 0x7
+#define PCIE_P_CNTL__P_IGNORE_TOK_ERR__SHIFT 0x8
+#define PCIE_P_CNTL__P_DESKEW_EMPTYMODE__SHIFT 0x9
+#define PCIE_P_CNTL__P_DESKEW_SKP_RMV__SHIFT 0xa
+#define PCIE_P_CNTL__LC_FREQ_ADJ_RESET_ACK_EN__SHIFT 0xb
+#define PCIE_P_CNTL__P_BLK_LOCK_MODE__SHIFT 0xc
+#define PCIE_P_CNTL__P_ALWAYS_USE_FAST_TXCLK__SHIFT 0xd
+#define PCIE_P_CNTL__P_ELEC_IDLE_MODE__SHIFT 0xe
+#define PCIE_P_CNTL__LC_TIEOFF_LANES_IGNORE_REFCLKACK__SHIFT 0x10
+#define PCIE_P_CNTL__ASSERT_DVALID_ON_EI_TRANS__SHIFT 0x11
+#define PCIE_P_CNTL__LC_PCLK_2GHZ_MAPPING__SHIFT 0x12
+#define PCIE_P_CNTL__MASTER_PLL_LANE_NUM__SHIFT 0x13
+#define PCIE_P_CNTL__MASTER_PLL_LANE_REFCLKREQ_EN__SHIFT 0x17
+#define PCIE_P_CNTL__REFCLKREQ_WAIT_FOR_MASTER_PLL__SHIFT 0x18
+#define PCIE_P_CNTL__LC_FILTER_SKP_FROM_L_IDLE__SHIFT 0x19
+#define PCIE_P_CNTL__P_IGNORE_CXL_EDS_LOCATION__SHIFT 0x1a
+#define PCIE_P_CNTL__P_IGNORE_CXL_UNEXPECTED_PID__SHIFT 0x1b
+#define PCIE_P_CNTL__LC_RESET_TRACK_TSX_COUNTER_NO_DATA_VLD__SHIFT 0x1c
+#define PCIE_P_CNTL__LC_MISSING_COM_RESET_SET_TRACK__SHIFT 0x1f
+#define PCIE_P_CNTL__P_PWRDN_EN_MASK 0x00000001L
+#define PCIE_P_CNTL__P_SYMALIGN_MODE_MASK 0x00000002L
+#define PCIE_P_CNTL__P_SYMALIGN_HW_DEBUG_MASK 0x00000004L
+#define PCIE_P_CNTL__P_ELASTDESKEW_HW_DEBUG_MASK 0x00000008L
+#define PCIE_P_CNTL__P_IGNORE_CRC_ERR_MASK 0x00000010L
+#define PCIE_P_CNTL__P_IGNORE_LEN_ERR_MASK 0x00000020L
+#define PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK 0x00000040L
+#define PCIE_P_CNTL__P_IGNORE_IDL_ERR_MASK 0x00000080L
+#define PCIE_P_CNTL__P_IGNORE_TOK_ERR_MASK 0x00000100L
+#define PCIE_P_CNTL__P_DESKEW_EMPTYMODE_MASK 0x00000200L
+#define PCIE_P_CNTL__P_DESKEW_SKP_RMV_MASK 0x00000400L
+#define PCIE_P_CNTL__LC_FREQ_ADJ_RESET_ACK_EN_MASK 0x00000800L
+#define PCIE_P_CNTL__P_BLK_LOCK_MODE_MASK 0x00001000L
+#define PCIE_P_CNTL__P_ALWAYS_USE_FAST_TXCLK_MASK 0x00002000L
+#define PCIE_P_CNTL__P_ELEC_IDLE_MODE_MASK 0x0000C000L
+#define PCIE_P_CNTL__LC_TIEOFF_LANES_IGNORE_REFCLKACK_MASK 0x00010000L
+#define PCIE_P_CNTL__ASSERT_DVALID_ON_EI_TRANS_MASK 0x00020000L
+#define PCIE_P_CNTL__LC_PCLK_2GHZ_MAPPING_MASK 0x00040000L
+#define PCIE_P_CNTL__MASTER_PLL_LANE_NUM_MASK 0x00780000L
+#define PCIE_P_CNTL__MASTER_PLL_LANE_REFCLKREQ_EN_MASK 0x00800000L
+#define PCIE_P_CNTL__REFCLKREQ_WAIT_FOR_MASTER_PLL_MASK 0x01000000L
+#define PCIE_P_CNTL__LC_FILTER_SKP_FROM_L_IDLE_MASK 0x02000000L
+#define PCIE_P_CNTL__P_IGNORE_CXL_EDS_LOCATION_MASK 0x04000000L
+#define PCIE_P_CNTL__P_IGNORE_CXL_UNEXPECTED_PID_MASK 0x08000000L
+#define PCIE_P_CNTL__LC_RESET_TRACK_TSX_COUNTER_NO_DATA_VLD_MASK 0x70000000L
+#define PCIE_P_CNTL__LC_MISSING_COM_RESET_SET_TRACK_MASK 0x80000000L
+//PCIE_P_BUF_STATUS
+#define PCIE_P_BUF_STATUS__P_OVERFLOW_ERR__SHIFT 0x0
+#define PCIE_P_BUF_STATUS__P_UNDERFLOW_ERR__SHIFT 0x10
+#define PCIE_P_BUF_STATUS__P_OVERFLOW_ERR_MASK 0x0000FFFFL
+#define PCIE_P_BUF_STATUS__P_UNDERFLOW_ERR_MASK 0xFFFF0000L
+//PCIE_P_DECODER_STATUS
+#define PCIE_P_DECODER_STATUS__P_DECODE_ERR__SHIFT 0x0
+#define PCIE_P_DECODER_STATUS__P_DECODE_ERR_MASK 0x0000FFFFL
+//PCIE_P_MISC_STATUS
+#define PCIE_P_MISC_STATUS__P_DESKEW_ERR__SHIFT 0x0
+#define PCIE_P_MISC_STATUS__P_SYMUNLOCK_ERR__SHIFT 0x10
+#define PCIE_P_MISC_STATUS__P_DESKEW_ERR_MASK 0x000001FFL
+#define PCIE_P_MISC_STATUS__P_SYMUNLOCK_ERR_MASK 0xFFFF0000L
+//PCIE_P_RCV_L0S_FTS_DET
+#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MIN__SHIFT 0x0
+#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MAX__SHIFT 0x8
+#define PCIE_P_RCV_L0S_FTS_DET__LC_TXPHYSTATUS_DONT_BLOCK_ARM_L1_L23_EN__SHIFT 0x10
+#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MIN_MASK 0x000000FFL
+#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MAX_MASK 0x0000FF00L
+#define PCIE_P_RCV_L0S_FTS_DET__LC_TXPHYSTATUS_DONT_BLOCK_ARM_L1_L23_EN_MASK 0x00010000L
+//PCIE_RX_AD
+#define PCIE_RX_AD__RX_SWUS_DROP_PME_TO__SHIFT 0x0
+#define PCIE_RX_AD__RX_SWUS_DROP_UNLOCK__SHIFT 0x1
+#define PCIE_RX_AD__RX_SWUS_UR_VDM0__SHIFT 0x2
+#define PCIE_RX_AD__RX_SWUS_DROP_VDM0__SHIFT 0x3
+#define PCIE_RX_AD__RX_SWUS_DROP_VDM1__SHIFT 0x4
+#define PCIE_RX_AD__RX_SWUS_UR_MSG_PREFIX_DIS__SHIFT 0x5
+#define PCIE_RX_AD__RX_RC_DROP_VDM0__SHIFT 0x8
+#define PCIE_RX_AD__RX_RC_UR_VDM0__SHIFT 0x9
+#define PCIE_RX_AD__RX_RC_DROP_VDM1__SHIFT 0xa
+#define PCIE_RX_AD__RX_RC_UR_SSPL_MSG__SHIFT 0xb
+#define PCIE_RX_AD__RX_RC_UR_BFRC_MSG__SHIFT 0xc
+#define PCIE_RX_AD__RX_RC_DROP_PME_TO_ACK__SHIFT 0xd
+#define PCIE_RX_AD__RX_RC_UR_ECRC_DIS__SHIFT 0xe
+#define PCIE_RX_AD__RX_RC_DROP_CPL_ECRC_FAILURE__SHIFT 0xf
+#define PCIE_RX_AD__RX_SB_DROP_LTAR_VDM_EN__SHIFT 0x10
+#define PCIE_RX_AD__RX_RC_UR_POIS_ATOP__SHIFT 0x11
+#define PCIE_RX_AD__RX_RC_LARGE_VDM_BFRC_EN__SHIFT 0x12
+#define PCIE_RX_AD__RC_IGNORE_ACS_ERR_ON_DRS_DIS__SHIFT 0x13
+#define PCIE_RX_AD__RX_SWUS_IGNORE_ROUTING_ON_VDM_EN__SHIFT 0x14
+#define PCIE_RX_AD__RX_SWUS_DROP_PME_TO_MASK 0x00000001L
+#define PCIE_RX_AD__RX_SWUS_DROP_UNLOCK_MASK 0x00000002L
+#define PCIE_RX_AD__RX_SWUS_UR_VDM0_MASK 0x00000004L
+#define PCIE_RX_AD__RX_SWUS_DROP_VDM0_MASK 0x00000008L
+#define PCIE_RX_AD__RX_SWUS_DROP_VDM1_MASK 0x00000010L
+#define PCIE_RX_AD__RX_SWUS_UR_MSG_PREFIX_DIS_MASK 0x00000020L
+#define PCIE_RX_AD__RX_RC_DROP_VDM0_MASK 0x00000100L
+#define PCIE_RX_AD__RX_RC_UR_VDM0_MASK 0x00000200L
+#define PCIE_RX_AD__RX_RC_DROP_VDM1_MASK 0x00000400L
+#define PCIE_RX_AD__RX_RC_UR_SSPL_MSG_MASK 0x00000800L
+#define PCIE_RX_AD__RX_RC_UR_BFRC_MSG_MASK 0x00001000L
+#define PCIE_RX_AD__RX_RC_DROP_PME_TO_ACK_MASK 0x00002000L
+#define PCIE_RX_AD__RX_RC_UR_ECRC_DIS_MASK 0x00004000L
+#define PCIE_RX_AD__RX_RC_DROP_CPL_ECRC_FAILURE_MASK 0x00008000L
+#define PCIE_RX_AD__RX_SB_DROP_LTAR_VDM_EN_MASK 0x00010000L
+#define PCIE_RX_AD__RX_RC_UR_POIS_ATOP_MASK 0x00020000L
+#define PCIE_RX_AD__RX_RC_LARGE_VDM_BFRC_EN_MASK 0x00040000L
+#define PCIE_RX_AD__RC_IGNORE_ACS_ERR_ON_DRS_DIS_MASK 0x00080000L
+#define PCIE_RX_AD__RX_SWUS_IGNORE_ROUTING_ON_VDM_EN_MASK 0x00100000L
+//PCIE_SDP_CTRL
+#define PCIE_SDP_CTRL__SDP_UNIT_ID__SHIFT 0x0
+#define PCIE_SDP_CTRL__CI_SLV_REQR_FULL_DISCONNECT_EN__SHIFT 0x4
+#define PCIE_SDP_CTRL__CI_SLV_REQR_PART_DISCONNECT_EN__SHIFT 0x5
+#define PCIE_SDP_CTRL__CI_SLAVE_TAG_STEALING_DIS__SHIFT 0x9
+#define PCIE_SDP_CTRL__SLAVE_PREFIX_PRELOAD_DIS__SHIFT 0xa
+#define PCIE_SDP_CTRL__CI_DISABLE_LTR_DROPPING__SHIFT 0xb
+#define PCIE_SDP_CTRL__RX_SWUS_SIDEBAND_CPLHDR_DIS__SHIFT 0xc
+#define PCIE_SDP_CTRL__CI_SWUS_RCVD_ERR_HANDLING_DIS__SHIFT 0xf
+#define PCIE_SDP_CTRL__EARLY_HW_WAKE_UP_EN__SHIFT 0x10
+#define PCIE_SDP_CTRL__SLV_SDP_DISCONNECT_WHEN_IN_L1_EN__SHIFT 0x11
+#define PCIE_SDP_CTRL__BLOCK_SLV_SDP_DISCONNECT_WHEN_EARLY_HW_WAKE_UP_EN__SHIFT 0x12
+#define PCIE_SDP_CTRL__CI_SLV_SDP_PARITY_CHECK_EN__SHIFT 0x13
+#define PCIE_SDP_CTRL__MCA_ERR_SEVERITY_0__SHIFT 0x14
+#define PCIE_SDP_CTRL__CI_SLV_GEN_ERREVENT_EN__SHIFT 0x17
+#define PCIE_SDP_CTRL__CI_VIRTUAL_WIRE_MODE__SHIFT 0x19
+#define PCIE_SDP_CTRL__SDP_UNIT_ID_LOWER__SHIFT 0x1a
+#define PCIE_SDP_CTRL__CI_SDP_RECONFIG_EN__SHIFT 0x1d
+#define PCIE_SDP_CTRL__CI_VIRTUAL_WIRE_BIT46_EN__SHIFT 0x1e
+#define PCIE_SDP_CTRL__SDP_UNIT_ID_MASK 0x0000000FL
+#define PCIE_SDP_CTRL__CI_SLV_REQR_FULL_DISCONNECT_EN_MASK 0x00000010L
+#define PCIE_SDP_CTRL__CI_SLV_REQR_PART_DISCONNECT_EN_MASK 0x00000020L
+#define PCIE_SDP_CTRL__CI_SLAVE_TAG_STEALING_DIS_MASK 0x00000200L
+#define PCIE_SDP_CTRL__SLAVE_PREFIX_PRELOAD_DIS_MASK 0x00000400L
+#define PCIE_SDP_CTRL__CI_DISABLE_LTR_DROPPING_MASK 0x00000800L
+#define PCIE_SDP_CTRL__RX_SWUS_SIDEBAND_CPLHDR_DIS_MASK 0x00001000L
+#define PCIE_SDP_CTRL__CI_SWUS_RCVD_ERR_HANDLING_DIS_MASK 0x00008000L
+#define PCIE_SDP_CTRL__EARLY_HW_WAKE_UP_EN_MASK 0x00010000L
+#define PCIE_SDP_CTRL__SLV_SDP_DISCONNECT_WHEN_IN_L1_EN_MASK 0x00020000L
+#define PCIE_SDP_CTRL__BLOCK_SLV_SDP_DISCONNECT_WHEN_EARLY_HW_WAKE_UP_EN_MASK 0x00040000L
+#define PCIE_SDP_CTRL__CI_SLV_SDP_PARITY_CHECK_EN_MASK 0x00080000L
+#define PCIE_SDP_CTRL__MCA_ERR_SEVERITY_0_MASK 0x00700000L
+#define PCIE_SDP_CTRL__CI_SLV_GEN_ERREVENT_EN_MASK 0x00800000L
+#define PCIE_SDP_CTRL__CI_VIRTUAL_WIRE_MODE_MASK 0x02000000L
+#define PCIE_SDP_CTRL__SDP_UNIT_ID_LOWER_MASK 0x1C000000L
+#define PCIE_SDP_CTRL__CI_SDP_RECONFIG_EN_MASK 0x20000000L
+#define PCIE_SDP_CTRL__CI_VIRTUAL_WIRE_BIT46_EN_MASK 0x40000000L
+//PCIE_SDP_SWUS_SLV_ATTR_CTRL
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_MEMWR__SHIFT 0x0
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_MEMRD__SHIFT 0x2
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_ATOMIC__SHIFT 0x4
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_MEMWR__SHIFT 0x6
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_MEMRD__SHIFT 0x8
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_ATOMIC__SHIFT 0xa
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_MEMWR__SHIFT 0xc
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_MEMRD__SHIFT 0xe
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_ATOMIC__SHIFT 0x10
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_MEMWR_MASK 0x00000003L
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_MEMRD_MASK 0x0000000CL
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_ATOMIC_MASK 0x00000030L
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_MEMWR_MASK 0x000000C0L
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_MEMRD_MASK 0x00000300L
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_ATOMIC_MASK 0x00000C00L
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_MEMWR_MASK 0x00003000L
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_MEMRD_MASK 0x0000C000L
+#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_ATOMIC_MASK 0x00030000L
+//PCIE_SDP_CTRL2
+#define PCIE_SDP_CTRL2__CI_VIRTUAL_WIRE_DIS__SHIFT 0x0
+#define PCIE_SDP_CTRL2__CI_SLV_SDP_INIT_CREDIT_WRRSP_VC0__SHIFT 0x8
+#define PCIE_SDP_CTRL2__CI_SLV_SDP_INIT_CREDIT_WRRSP_VC1__SHIFT 0x10
+#define PCIE_SDP_CTRL2__CI_SLV_SDP_INIT_CREDIT_RDRSP_VC0__SHIFT 0x18
+#define PCIE_SDP_CTRL2__CI_VIRTUAL_WIRE_DIS_MASK 0x00000001L
+#define PCIE_SDP_CTRL2__CI_SLV_SDP_INIT_CREDIT_WRRSP_VC0_MASK 0x0000FF00L
+#define PCIE_SDP_CTRL2__CI_SLV_SDP_INIT_CREDIT_WRRSP_VC1_MASK 0x00FF0000L
+#define PCIE_SDP_CTRL2__CI_SLV_SDP_INIT_CREDIT_RDRSP_VC0_MASK 0xFF000000L
+//PCIE_PERF_COUNT_CNTL
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_EN__SHIFT 0x0
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_SHADOW_WR__SHIFT 0x1
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_RESET__SHIFT 0x2
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_SHADOW_WR_LCLK_STATUS__SHIFT 0x1f
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_EN_MASK 0x00000001L
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_SHADOW_WR_MASK 0x00000002L
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_RESET_MASK 0x00000004L
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_SHADOW_WR_LCLK_STATUS_MASK 0x80000000L
+//PCIE_PERF_CNTL_TXCLK1
+#define PCIE_PERF_CNTL_TXCLK1__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK1__EVENT1_SEL__SHIFT 0x8
+#define PCIE_PERF_CNTL_TXCLK1__COUNTER0_FULL__SHIFT 0x10
+#define PCIE_PERF_CNTL_TXCLK1__COUNTER1_FULL__SHIFT 0x11
+#define PCIE_PERF_CNTL_TXCLK1__EVENT0_SEL_MASK 0x000000FFL
+#define PCIE_PERF_CNTL_TXCLK1__EVENT1_SEL_MASK 0x0000FF00L
+#define PCIE_PERF_CNTL_TXCLK1__COUNTER0_FULL_MASK 0x00010000L
+#define PCIE_PERF_CNTL_TXCLK1__COUNTER1_FULL_MASK 0x00020000L
+//PCIE_PERF_COUNT0_TXCLK1
+#define PCIE_PERF_COUNT0_TXCLK1__COUNTER0__SHIFT 0x0
+#define PCIE_PERF_COUNT0_TXCLK1__COUNTER0_MASK 0xFFFFFFFFL
+//PCIE_PERF_COUNT1_TXCLK1
+#define PCIE_PERF_COUNT1_TXCLK1__COUNTER1__SHIFT 0x0
+#define PCIE_PERF_COUNT1_TXCLK1__COUNTER1_MASK 0xFFFFFFFFL
+//PCIE_PERF_CNTL_TXCLK2
+#define PCIE_PERF_CNTL_TXCLK2__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK2__EVENT1_SEL__SHIFT 0x8
+#define PCIE_PERF_CNTL_TXCLK2__COUNTER0_FULL__SHIFT 0x10
+#define PCIE_PERF_CNTL_TXCLK2__COUNTER1_FULL__SHIFT 0x11
+#define PCIE_PERF_CNTL_TXCLK2__EVENT0_SEL_MASK 0x000000FFL
+#define PCIE_PERF_CNTL_TXCLK2__EVENT1_SEL_MASK 0x0000FF00L
+#define PCIE_PERF_CNTL_TXCLK2__COUNTER0_FULL_MASK 0x00010000L
+#define PCIE_PERF_CNTL_TXCLK2__COUNTER1_FULL_MASK 0x00020000L
+//PCIE_PERF_COUNT0_TXCLK2
+#define PCIE_PERF_COUNT0_TXCLK2__COUNTER0__SHIFT 0x0
+#define PCIE_PERF_COUNT0_TXCLK2__COUNTER0_MASK 0xFFFFFFFFL
+//PCIE_PERF_COUNT1_TXCLK2
+#define PCIE_PERF_COUNT1_TXCLK2__COUNTER1__SHIFT 0x0
+#define PCIE_PERF_COUNT1_TXCLK2__COUNTER1_MASK 0xFFFFFFFFL
+//PCIE_PERF_CNTL_TXCLK3
+#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK3__EVENT1_SEL__SHIFT 0x8
+#define PCIE_PERF_CNTL_TXCLK3__COUNTER0_FULL__SHIFT 0x10
+#define PCIE_PERF_CNTL_TXCLK3__COUNTER1_FULL__SHIFT 0x11
+#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL_MASK 0x000000FFL
+#define PCIE_PERF_CNTL_TXCLK3__EVENT1_SEL_MASK 0x0000FF00L
+#define PCIE_PERF_CNTL_TXCLK3__COUNTER0_FULL_MASK 0x00010000L
+#define PCIE_PERF_CNTL_TXCLK3__COUNTER1_FULL_MASK 0x00020000L
+//PCIE_PERF_COUNT0_TXCLK3
+#define PCIE_PERF_COUNT0_TXCLK3__COUNTER0__SHIFT 0x0
+#define PCIE_PERF_COUNT0_TXCLK3__COUNTER0_MASK 0xFFFFFFFFL
+//PCIE_PERF_COUNT1_TXCLK3
+#define PCIE_PERF_COUNT1_TXCLK3__COUNTER1__SHIFT 0x0
+#define PCIE_PERF_COUNT1_TXCLK3__COUNTER1_MASK 0xFFFFFFFFL
+//PCIE_PERF_CNTL_TXCLK4
+#define PCIE_PERF_CNTL_TXCLK4__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK4__EVENT1_SEL__SHIFT 0x8
+#define PCIE_PERF_CNTL_TXCLK4__COUNTER0_FULL__SHIFT 0x10
+#define PCIE_PERF_CNTL_TXCLK4__COUNTER1_FULL__SHIFT 0x11
+#define PCIE_PERF_CNTL_TXCLK4__EVENT0_SEL_MASK 0x000000FFL
+#define PCIE_PERF_CNTL_TXCLK4__EVENT1_SEL_MASK 0x0000FF00L
+#define PCIE_PERF_CNTL_TXCLK4__COUNTER0_FULL_MASK 0x00010000L
+#define PCIE_PERF_CNTL_TXCLK4__COUNTER1_FULL_MASK 0x00020000L
+//PCIE_PERF_COUNT0_TXCLK4
+#define PCIE_PERF_COUNT0_TXCLK4__COUNTER0__SHIFT 0x0
+#define PCIE_PERF_COUNT0_TXCLK4__COUNTER0_MASK 0xFFFFFFFFL
+//PCIE_PERF_COUNT1_TXCLK4
+#define PCIE_PERF_COUNT1_TXCLK4__COUNTER1__SHIFT 0x0
+#define PCIE_PERF_COUNT1_TXCLK4__COUNTER1_MASK 0xFFFFFFFFL
+//PCIE_PERF_CNTL_EVENT_LC_PORT_SEL
+#define PCIE_PERF_CNTL_EVENT_LC_PORT_SEL__PERF0_PORT_SEL_TXCLK1__SHIFT 0x0
+#define PCIE_PERF_CNTL_EVENT_LC_PORT_SEL__PERF1_PORT_SEL_TXCLK1__SHIFT 0x4
+#define PCIE_PERF_CNTL_EVENT_LC_PORT_SEL__PERF0_PORT_SEL_TXCLK2__SHIFT 0x8
+#define PCIE_PERF_CNTL_EVENT_LC_PORT_SEL__PERF1_PORT_SEL_TXCLK2__SHIFT 0xc
+#define PCIE_PERF_CNTL_EVENT_LC_PORT_SEL__PERF0_PORT_SEL_TXCLK1_MASK 0x0000000FL
+#define PCIE_PERF_CNTL_EVENT_LC_PORT_SEL__PERF1_PORT_SEL_TXCLK1_MASK 0x000000F0L
+#define PCIE_PERF_CNTL_EVENT_LC_PORT_SEL__PERF0_PORT_SEL_TXCLK2_MASK 0x00000F00L
+#define PCIE_PERF_CNTL_EVENT_LC_PORT_SEL__PERF1_PORT_SEL_TXCLK2_MASK 0x0000F000L
+//PCIE_PERF_CNTL_EVENT_CI_PORT_SEL
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF0_PORT_SEL_TXCLK3__SHIFT 0x0
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF1_PORT_SEL_TXCLK3__SHIFT 0x4
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF0_PORT_SEL_TXCLK4__SHIFT 0x8
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF1_PORT_SEL_TXCLK4__SHIFT 0xc
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF0_PORT_SEL_LCLK1__SHIFT 0x10
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF1_PORT_SEL_LCLK1__SHIFT 0x14
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF0_PORT_SEL_LCLK2__SHIFT 0x18
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF1_PORT_SEL_LCLK2__SHIFT 0x1c
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF0_PORT_SEL_TXCLK3_MASK 0x0000000FL
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF1_PORT_SEL_TXCLK3_MASK 0x000000F0L
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF0_PORT_SEL_TXCLK4_MASK 0x00000F00L
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF1_PORT_SEL_TXCLK4_MASK 0x0000F000L
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF0_PORT_SEL_LCLK1_MASK 0x000F0000L
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF1_PORT_SEL_LCLK1_MASK 0x00F00000L
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF0_PORT_SEL_LCLK2_MASK 0x0F000000L
+#define PCIE_PERF_CNTL_EVENT_CI_PORT_SEL__PERF1_PORT_SEL_LCLK2_MASK 0xF0000000L
+//PCIE_PERF_CNTL_TXCLK5
+#define PCIE_PERF_CNTL_TXCLK5__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK5__EVENT1_SEL__SHIFT 0x8
+#define PCIE_PERF_CNTL_TXCLK5__COUNTER0_FULL__SHIFT 0x10
+#define PCIE_PERF_CNTL_TXCLK5__COUNTER1_FULL__SHIFT 0x11
+#define PCIE_PERF_CNTL_TXCLK5__EVENT0_SEL_MASK 0x000000FFL
+#define PCIE_PERF_CNTL_TXCLK5__EVENT1_SEL_MASK 0x0000FF00L
+#define PCIE_PERF_CNTL_TXCLK5__COUNTER0_FULL_MASK 0x00010000L
+#define PCIE_PERF_CNTL_TXCLK5__COUNTER1_FULL_MASK 0x00020000L
+//PCIE_PERF_COUNT0_TXCLK5
+#define PCIE_PERF_COUNT0_TXCLK5__COUNTER0__SHIFT 0x0
+#define PCIE_PERF_COUNT0_TXCLK5__COUNTER0_MASK 0xFFFFFFFFL
+//PCIE_PERF_COUNT1_TXCLK5
+#define PCIE_PERF_COUNT1_TXCLK5__COUNTER1__SHIFT 0x0
+#define PCIE_PERF_COUNT1_TXCLK5__COUNTER1_MASK 0xFFFFFFFFL
+//PCIE_PERF_CNTL_TXCLK6
+#define PCIE_PERF_CNTL_TXCLK6__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK6__EVENT1_SEL__SHIFT 0x8
+#define PCIE_PERF_CNTL_TXCLK6__COUNTER0_FULL__SHIFT 0x10
+#define PCIE_PERF_CNTL_TXCLK6__COUNTER1_FULL__SHIFT 0x11
+#define PCIE_PERF_CNTL_TXCLK6__EVENT0_SEL_MASK 0x000000FFL
+#define PCIE_PERF_CNTL_TXCLK6__EVENT1_SEL_MASK 0x0000FF00L
+#define PCIE_PERF_CNTL_TXCLK6__COUNTER0_FULL_MASK 0x00010000L
+#define PCIE_PERF_CNTL_TXCLK6__COUNTER1_FULL_MASK 0x00020000L
+//PCIE_PERF_COUNT0_TXCLK6
+#define PCIE_PERF_COUNT0_TXCLK6__COUNTER0__SHIFT 0x0
+#define PCIE_PERF_COUNT0_TXCLK6__COUNTER0_MASK 0xFFFFFFFFL
+//PCIE_PERF_COUNT1_TXCLK6
+#define PCIE_PERF_COUNT1_TXCLK6__COUNTER1__SHIFT 0x0
+#define PCIE_PERF_COUNT1_TXCLK6__COUNTER1_MASK 0xFFFFFFFFL
+//PCIE_STRAP_F0
+#define PCIE_STRAP_F0__STRAP_F0_EN__SHIFT 0x0
+#define PCIE_STRAP_F0__STRAP_F0_LEGACY_DEVICE_TYPE_EN__SHIFT 0x1
+#define PCIE_STRAP_F0__STRAP_F0_MSI_EN__SHIFT 0x2
+#define PCIE_STRAP_F0__STRAP_F0_VC_EN__SHIFT 0x3
+#define PCIE_STRAP_F0__STRAP_F0_DSN_EN__SHIFT 0x4
+#define PCIE_STRAP_F0__STRAP_F0_AER_EN__SHIFT 0x5
+#define PCIE_STRAP_F0__STRAP_F0_ACS_EN__SHIFT 0x6
+#define PCIE_STRAP_F0__STRAP_F0_BAR_EN__SHIFT 0x7
+#define PCIE_STRAP_F0__STRAP_F0_PWR_EN__SHIFT 0x8
+#define PCIE_STRAP_F0__STRAP_F0_DPA_EN__SHIFT 0x9
+#define PCIE_STRAP_F0__STRAP_F0_ATS_EN__SHIFT 0xa
+#define PCIE_STRAP_F0__STRAP_F0_PAGE_REQ_EN__SHIFT 0xb
+#define PCIE_STRAP_F0__STRAP_F0_PASID_EN__SHIFT 0xc
+#define PCIE_STRAP_F0__STRAP_F0_ECRC_CHECK_EN__SHIFT 0xd
+#define PCIE_STRAP_F0__STRAP_F0_ECRC_GEN_EN__SHIFT 0xe
+#define PCIE_STRAP_F0__STRAP_F0_CPL_ABORT_ERR_EN__SHIFT 0xf
+#define PCIE_STRAP_F0__STRAP_F0_POISONED_ADVISORY_NONFATAL__SHIFT 0x10
+#define PCIE_STRAP_F0__STRAP_F0_MC_EN__SHIFT 0x11
+#define PCIE_STRAP_F0__STRAP_F0_ATOMIC_EN__SHIFT 0x12
+#define PCIE_STRAP_F0__STRAP_F0_ATOMIC_64BIT_EN__SHIFT 0x13
+#define PCIE_STRAP_F0__STRAP_F0_ATOMIC_ROUTING_EN__SHIFT 0x14
+#define PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP__SHIFT 0x15
+#define PCIE_STRAP_F0__STRAP_F0_VFn_MSI_MULTI_CAP__SHIFT 0x18
+#define PCIE_STRAP_F0__STRAP_F0_MSI_PERVECTOR_MASK_CAP__SHIFT 0x1b
+#define PCIE_STRAP_F0__STRAP_F0_NO_RO_ENABLED_P2P_PASSING__SHIFT 0x1c
+#define PCIE_STRAP_F0__STRAP_SWUS_ARI_EN__SHIFT 0x1d
+#define PCIE_STRAP_F0__STRAP_F0_SRIOV_EN__SHIFT 0x1e
+#define PCIE_STRAP_F0__STRAP_F0_MSI_MAP_EN__SHIFT 0x1f
+#define PCIE_STRAP_F0__STRAP_F0_EN_MASK 0x00000001L
+#define PCIE_STRAP_F0__STRAP_F0_LEGACY_DEVICE_TYPE_EN_MASK 0x00000002L
+#define PCIE_STRAP_F0__STRAP_F0_MSI_EN_MASK 0x00000004L
+#define PCIE_STRAP_F0__STRAP_F0_VC_EN_MASK 0x00000008L
+#define PCIE_STRAP_F0__STRAP_F0_DSN_EN_MASK 0x00000010L
+#define PCIE_STRAP_F0__STRAP_F0_AER_EN_MASK 0x00000020L
+#define PCIE_STRAP_F0__STRAP_F0_ACS_EN_MASK 0x00000040L
+#define PCIE_STRAP_F0__STRAP_F0_BAR_EN_MASK 0x00000080L
+#define PCIE_STRAP_F0__STRAP_F0_PWR_EN_MASK 0x00000100L
+#define PCIE_STRAP_F0__STRAP_F0_DPA_EN_MASK 0x00000200L
+#define PCIE_STRAP_F0__STRAP_F0_ATS_EN_MASK 0x00000400L
+#define PCIE_STRAP_F0__STRAP_F0_PAGE_REQ_EN_MASK 0x00000800L
+#define PCIE_STRAP_F0__STRAP_F0_PASID_EN_MASK 0x00001000L
+#define PCIE_STRAP_F0__STRAP_F0_ECRC_CHECK_EN_MASK 0x00002000L
+#define PCIE_STRAP_F0__STRAP_F0_ECRC_GEN_EN_MASK 0x00004000L
+#define PCIE_STRAP_F0__STRAP_F0_CPL_ABORT_ERR_EN_MASK 0x00008000L
+#define PCIE_STRAP_F0__STRAP_F0_POISONED_ADVISORY_NONFATAL_MASK 0x00010000L
+#define PCIE_STRAP_F0__STRAP_F0_MC_EN_MASK 0x00020000L
+#define PCIE_STRAP_F0__STRAP_F0_ATOMIC_EN_MASK 0x00040000L
+#define PCIE_STRAP_F0__STRAP_F0_ATOMIC_64BIT_EN_MASK 0x00080000L
+#define PCIE_STRAP_F0__STRAP_F0_ATOMIC_ROUTING_EN_MASK 0x00100000L
+#define PCIE_STRAP_F0__STRAP_F0_MSI_MULTI_CAP_MASK 0x00E00000L
+#define PCIE_STRAP_F0__STRAP_F0_VFn_MSI_MULTI_CAP_MASK 0x07000000L
+#define PCIE_STRAP_F0__STRAP_F0_MSI_PERVECTOR_MASK_CAP_MASK 0x08000000L
+#define PCIE_STRAP_F0__STRAP_F0_NO_RO_ENABLED_P2P_PASSING_MASK 0x10000000L
+#define PCIE_STRAP_F0__STRAP_SWUS_ARI_EN_MASK 0x20000000L
+#define PCIE_STRAP_F0__STRAP_F0_SRIOV_EN_MASK 0x40000000L
+#define PCIE_STRAP_F0__STRAP_F0_MSI_MAP_EN_MASK 0x80000000L
+//PCIE_STRAP_MISC
+#define PCIE_STRAP_MISC__STRAP_DLF_EN__SHIFT 0x0
+#define PCIE_STRAP_MISC__STRAP_16GT_EN__SHIFT 0x1
+#define PCIE_STRAP_MISC__STRAP_MARGINING_EN__SHIFT 0x2
+#define PCIE_STRAP_MISC__STRAP_NPEM_EN__SHIFT 0x3
+#define PCIE_STRAP_MISC__STRAP_32GT_EN__SHIFT 0x5
+#define PCIE_STRAP_MISC__STRAP_DOE_EN__SHIFT 0x7
+#define PCIE_STRAP_MISC__STRAP_CLK_PM_EN__SHIFT 0x18
+#define PCIE_STRAP_MISC__STRAP_EXT_VC_COUNT__SHIFT 0x1a
+#define PCIE_STRAP_MISC__STRAP_REVERSE_ALL__SHIFT 0x1c
+#define PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x1d
+#define PCIE_STRAP_MISC__STRAP_INTERNAL_ERR_EN__SHIFT 0x1f
+#define PCIE_STRAP_MISC__STRAP_DLF_EN_MASK 0x00000001L
+#define PCIE_STRAP_MISC__STRAP_16GT_EN_MASK 0x00000002L
+#define PCIE_STRAP_MISC__STRAP_MARGINING_EN_MASK 0x00000004L
+#define PCIE_STRAP_MISC__STRAP_NPEM_EN_MASK 0x00000008L
+#define PCIE_STRAP_MISC__STRAP_32GT_EN_MASK 0x00000020L
+#define PCIE_STRAP_MISC__STRAP_DOE_EN_MASK 0x00000080L
+#define PCIE_STRAP_MISC__STRAP_CLK_PM_EN_MASK 0x01000000L
+#define PCIE_STRAP_MISC__STRAP_EXT_VC_COUNT_MASK 0x04000000L
+#define PCIE_STRAP_MISC__STRAP_REVERSE_ALL_MASK 0x10000000L
+#define PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+#define PCIE_STRAP_MISC__STRAP_INTERNAL_ERR_EN_MASK 0x80000000L
+//PCIE_STRAP_MISC2
+#define PCIE_STRAP_MISC2__STRAP_LINK_BW_NOTIFICATION_CAP_EN__SHIFT 0x0
+#define PCIE_STRAP_MISC2__STRAP_GEN2_COMPLIANCE__SHIFT 0x1
+#define PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN__SHIFT 0x2
+#define PCIE_STRAP_MISC2__STRAP_GEN3_COMPLIANCE__SHIFT 0x3
+#define PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED__SHIFT 0x4
+#define PCIE_STRAP_MISC2__STRAP_GEN4_COMPLIANCE__SHIFT 0x5
+#define PCIE_STRAP_MISC2__STRAP_GEN5_COMPLIANCE__SHIFT 0x7
+#define PCIE_STRAP_MISC2__STRAP_LINK_BW_NOTIFICATION_CAP_EN_MASK 0x00000001L
+#define PCIE_STRAP_MISC2__STRAP_GEN2_COMPLIANCE_MASK 0x00000002L
+#define PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN_MASK 0x00000004L
+#define PCIE_STRAP_MISC2__STRAP_GEN3_COMPLIANCE_MASK 0x00000008L
+#define PCIE_STRAP_MISC2__STRAP_TPH_SUPPORTED_MASK 0x00000010L
+#define PCIE_STRAP_MISC2__STRAP_GEN4_COMPLIANCE_MASK 0x00000020L
+#define PCIE_STRAP_MISC2__STRAP_GEN5_COMPLIANCE_MASK 0x00000080L
+//PCIE_STRAP_PI
+#define PCIE_STRAP_PI__STRAP_QUICKSIM_START__SHIFT 0x0
+#define PCIE_STRAP_PI__STRAP_TEST_TOGGLE_PATTERN__SHIFT 0x1c
+#define PCIE_STRAP_PI__STRAP_TEST_TOGGLE_MODE__SHIFT 0x1d
+#define PCIE_STRAP_PI__LC_PCLK_SWITCH_WAIT_FOR_TX_HANDSHAKE__SHIFT 0x1e
+#define PCIE_STRAP_PI__LC_AUX_INC_REFCLK_NO_DIVIDE__SHIFT 0x1f
+#define PCIE_STRAP_PI__STRAP_QUICKSIM_START_MASK 0x00000001L
+#define PCIE_STRAP_PI__STRAP_TEST_TOGGLE_PATTERN_MASK 0x10000000L
+#define PCIE_STRAP_PI__STRAP_TEST_TOGGLE_MODE_MASK 0x20000000L
+#define PCIE_STRAP_PI__LC_PCLK_SWITCH_WAIT_FOR_TX_HANDSHAKE_MASK 0x40000000L
+#define PCIE_STRAP_PI__LC_AUX_INC_REFCLK_NO_DIVIDE_MASK 0x80000000L
+//PCIE_STRAP_I2C_BD
+#define PCIE_STRAP_I2C_BD__STRAP_BIF_I2C_SLV_ADR__SHIFT 0x0
+#define PCIE_STRAP_I2C_BD__STRAP_BIF_DBG_I2C_EN__SHIFT 0x7
+#define PCIE_STRAP_I2C_BD__STRAP_BIF_I2C_SLV_ADR_MASK 0x0000007FL
+#define PCIE_STRAP_I2C_BD__STRAP_BIF_DBG_I2C_EN_MASK 0x00000080L
+//PCIE_PRBS_CLR
+#define PCIE_PRBS_CLR__PRBS_CLR__SHIFT 0x0
+#define PCIE_PRBS_CLR__PRBS_CHECKER_DEBUG_BUS_SELECT__SHIFT 0x10
+#define PCIE_PRBS_CLR__PRBS_POLARITY_EN__SHIFT 0x18
+#define PCIE_PRBS_CLR__PRBS_CLR_MASK 0x0000FFFFL
+#define PCIE_PRBS_CLR__PRBS_CHECKER_DEBUG_BUS_SELECT_MASK 0x000F0000L
+#define PCIE_PRBS_CLR__PRBS_POLARITY_EN_MASK 0x01000000L
+//PCIE_PRBS_STATUS1
+#define PCIE_PRBS_STATUS1__PRBS_ERRSTAT__SHIFT 0x0
+#define PCIE_PRBS_STATUS1__PRBS_LOCKED__SHIFT 0x10
+#define PCIE_PRBS_STATUS1__PRBS_ERRSTAT_MASK 0x0000FFFFL
+#define PCIE_PRBS_STATUS1__PRBS_LOCKED_MASK 0xFFFF0000L
+//PCIE_PRBS_STATUS2
+#define PCIE_PRBS_STATUS2__PRBS_BITCNT_DONE__SHIFT 0x0
+#define PCIE_PRBS_STATUS2__PRBS_BITCNT_DONE_MASK 0x0000FFFFL
+//PCIE_PRBS_FREERUN
+#define PCIE_PRBS_FREERUN__PRBS_FREERUN__SHIFT 0x0
+#define PCIE_PRBS_FREERUN__PRBS_FREERUN_MASK 0x0000FFFFL
+//PCIE_PRBS_MISC
+#define PCIE_PRBS_MISC__PRBS_EN__SHIFT 0x0
+#define PCIE_PRBS_MISC__PRBS_TEST_MODE__SHIFT 0x1
+#define PCIE_PRBS_MISC__PRBS_USER_PATTERN_TOGGLE__SHIFT 0x4
+#define PCIE_PRBS_MISC__PRBS_8BIT_SEL__SHIFT 0x5
+#define PCIE_PRBS_MISC__PRBS_COMMA_NUM__SHIFT 0x6
+#define PCIE_PRBS_MISC__PRBS_LOCK_CNT__SHIFT 0x8
+#define PCIE_PRBS_MISC__PRBS_DATA_RATE__SHIFT 0xe
+#define PCIE_PRBS_MISC__PRBS_CHK_ERR_MASK__SHIFT 0x10
+#define PCIE_PRBS_MISC__PRBS_EN_MASK 0x00000001L
+#define PCIE_PRBS_MISC__PRBS_TEST_MODE_MASK 0x0000000EL
+#define PCIE_PRBS_MISC__PRBS_USER_PATTERN_TOGGLE_MASK 0x00000010L
+#define PCIE_PRBS_MISC__PRBS_8BIT_SEL_MASK 0x00000020L
+#define PCIE_PRBS_MISC__PRBS_COMMA_NUM_MASK 0x000000C0L
+#define PCIE_PRBS_MISC__PRBS_LOCK_CNT_MASK 0x00001F00L
+#define PCIE_PRBS_MISC__PRBS_DATA_RATE_MASK 0x0000C000L
+#define PCIE_PRBS_MISC__PRBS_CHK_ERR_MASK_MASK 0xFFFF0000L
+//PCIE_PRBS_USER_PATTERN
+#define PCIE_PRBS_USER_PATTERN__PRBS_USER_PATTERN__SHIFT 0x0
+#define PCIE_PRBS_USER_PATTERN__PRBS_USER_PATTERN_MASK 0x3FFFFFFFL
+//PCIE_PRBS_LO_BITCNT
+#define PCIE_PRBS_LO_BITCNT__PRBS_LO_BITCNT__SHIFT 0x0
+#define PCIE_PRBS_LO_BITCNT__PRBS_LO_BITCNT_MASK 0xFFFFFFFFL
+//PCIE_PRBS_HI_BITCNT
+#define PCIE_PRBS_HI_BITCNT__PRBS_HI_BITCNT__SHIFT 0x0
+#define PCIE_PRBS_HI_BITCNT__PRBS_HI_BITCNT_MASK 0x000000FFL
+//PCIE_PRBS_ERRCNT_0
+#define PCIE_PRBS_ERRCNT_0__PRBS_ERRCNT_0__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_0__PRBS_ERRCNT_0_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_1
+#define PCIE_PRBS_ERRCNT_1__PRBS_ERRCNT_1__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_1__PRBS_ERRCNT_1_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_2
+#define PCIE_PRBS_ERRCNT_2__PRBS_ERRCNT_2__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_2__PRBS_ERRCNT_2_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_3
+#define PCIE_PRBS_ERRCNT_3__PRBS_ERRCNT_3__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_3__PRBS_ERRCNT_3_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_4
+#define PCIE_PRBS_ERRCNT_4__PRBS_ERRCNT_4__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_4__PRBS_ERRCNT_4_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_5
+#define PCIE_PRBS_ERRCNT_5__PRBS_ERRCNT_5__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_5__PRBS_ERRCNT_5_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_6
+#define PCIE_PRBS_ERRCNT_6__PRBS_ERRCNT_6__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_6__PRBS_ERRCNT_6_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_7
+#define PCIE_PRBS_ERRCNT_7__PRBS_ERRCNT_7__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_7__PRBS_ERRCNT_7_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_8
+#define PCIE_PRBS_ERRCNT_8__PRBS_ERRCNT_8__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_8__PRBS_ERRCNT_8_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_9
+#define PCIE_PRBS_ERRCNT_9__PRBS_ERRCNT_9__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_9__PRBS_ERRCNT_9_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_10
+#define PCIE_PRBS_ERRCNT_10__PRBS_ERRCNT_10__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_10__PRBS_ERRCNT_10_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_11
+#define PCIE_PRBS_ERRCNT_11__PRBS_ERRCNT_11__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_11__PRBS_ERRCNT_11_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_12
+#define PCIE_PRBS_ERRCNT_12__PRBS_ERRCNT_12__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_12__PRBS_ERRCNT_12_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_13
+#define PCIE_PRBS_ERRCNT_13__PRBS_ERRCNT_13__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_13__PRBS_ERRCNT_13_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_14
+#define PCIE_PRBS_ERRCNT_14__PRBS_ERRCNT_14__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_14__PRBS_ERRCNT_14_MASK 0xFFFFFFFFL
+//PCIE_PRBS_ERRCNT_15
+#define PCIE_PRBS_ERRCNT_15__PRBS_ERRCNT_15__SHIFT 0x0
+#define PCIE_PRBS_ERRCNT_15__PRBS_ERRCNT_15_MASK 0xFFFFFFFFL
+//SWRST_COMMAND_STATUS
+#define SWRST_COMMAND_STATUS__RECONFIGURE__SHIFT 0x0
+#define SWRST_COMMAND_STATUS__ATOMIC_RESET__SHIFT 0x1
+#define SWRST_COMMAND_STATUS__RESET_COMPLETE__SHIFT 0x10
+#define SWRST_COMMAND_STATUS__WAIT_STATE__SHIFT 0x11
+#define SWRST_COMMAND_STATUS__PERST_ASRT__SHIFT 0x12
+#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET__SHIFT 0x18
+#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_CFG_ONLY__SHIFT 0x19
+#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_PHY_CALIB__SHIFT 0x1a
+#define SWRST_COMMAND_STATUS__SWDS_LINK_RESET__SHIFT 0x1b
+#define SWRST_COMMAND_STATUS__SWDS_LINK_RESET_CFG_ONLY__SHIFT 0x1c
+#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_HOT_RESET__SHIFT 0x1d
+#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_LINK_DISABLE__SHIFT 0x1e
+#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_LINK_DOWN__SHIFT 0x1f
+#define SWRST_COMMAND_STATUS__RECONFIGURE_MASK 0x00000001L
+#define SWRST_COMMAND_STATUS__ATOMIC_RESET_MASK 0x00000002L
+#define SWRST_COMMAND_STATUS__RESET_COMPLETE_MASK 0x00010000L
+#define SWRST_COMMAND_STATUS__WAIT_STATE_MASK 0x00020000L
+#define SWRST_COMMAND_STATUS__PERST_ASRT_MASK 0x00040000L
+#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_MASK 0x01000000L
+#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_CFG_ONLY_MASK 0x02000000L
+#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_PHY_CALIB_MASK 0x04000000L
+#define SWRST_COMMAND_STATUS__SWDS_LINK_RESET_MASK 0x08000000L
+#define SWRST_COMMAND_STATUS__SWDS_LINK_RESET_CFG_ONLY_MASK 0x10000000L
+#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_HOT_RESET_MASK 0x20000000L
+#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_LINK_DISABLE_MASK 0x40000000L
+#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_LINK_DOWN_MASK 0x80000000L
+//SWRST_GENERAL_CONTROL
+#define SWRST_GENERAL_CONTROL__RECONFIGURE_EN__SHIFT 0x0
+#define SWRST_GENERAL_CONTROL__ATOMIC_RESET_EN__SHIFT 0x1
+#define SWRST_GENERAL_CONTROL__RESET_PERIOD__SHIFT 0x2
+#define SWRST_GENERAL_CONTROL__WAIT_LINKUP__SHIFT 0x8
+#define SWRST_GENERAL_CONTROL__FORCE_REGIDLE__SHIFT 0x9
+#define SWRST_GENERAL_CONTROL__BLOCK_ON_IDLE__SHIFT 0xa
+#define SWRST_GENERAL_CONTROL__CONFIG_XFER_MODE__SHIFT 0xc
+#define SWRST_GENERAL_CONTROL__MP1_PCIE_CROSSFIRE_LOCKDOWN_EN__SHIFT 0x18
+#define SWRST_GENERAL_CONTROL__IGNORE_SDP_RESET__SHIFT 0x19
+#define SWRST_GENERAL_CONTROL__WAIT_FOR_SDP_CREDITS__SHIFT 0x1a
+#define SWRST_GENERAL_CONTROL__RECONFIGURE_EN_MASK 0x00000001L
+#define SWRST_GENERAL_CONTROL__ATOMIC_RESET_EN_MASK 0x00000002L
+#define SWRST_GENERAL_CONTROL__RESET_PERIOD_MASK 0x0000001CL
+#define SWRST_GENERAL_CONTROL__WAIT_LINKUP_MASK 0x00000100L
+#define SWRST_GENERAL_CONTROL__FORCE_REGIDLE_MASK 0x00000200L
+#define SWRST_GENERAL_CONTROL__BLOCK_ON_IDLE_MASK 0x00000400L
+#define SWRST_GENERAL_CONTROL__CONFIG_XFER_MODE_MASK 0x00001000L
+#define SWRST_GENERAL_CONTROL__MP1_PCIE_CROSSFIRE_LOCKDOWN_EN_MASK 0x01000000L
+#define SWRST_GENERAL_CONTROL__IGNORE_SDP_RESET_MASK 0x02000000L
+#define SWRST_GENERAL_CONTROL__WAIT_FOR_SDP_CREDITS_MASK 0x04000000L
+//SWRST_COMMAND_0
+#define SWRST_COMMAND_0__PORT0_COR_RESET__SHIFT 0x0
+#define SWRST_COMMAND_0__PORT0_CFG_RESET__SHIFT 0x8
+#define SWRST_COMMAND_0__PORT1_CFG_RESET__SHIFT 0x9
+#define SWRST_COMMAND_0__PORT2_CFG_RESET__SHIFT 0xa
+#define SWRST_COMMAND_0__PORT3_CFG_RESET__SHIFT 0xb
+#define SWRST_COMMAND_0__PORT4_CFG_RESET__SHIFT 0xc
+#define SWRST_COMMAND_0__PORT5_CFG_RESET__SHIFT 0xd
+#define SWRST_COMMAND_0__PORT6_CFG_RESET__SHIFT 0xe
+#define SWRST_COMMAND_0__PORT7_CFG_RESET__SHIFT 0xf
+#define SWRST_COMMAND_0__PORT8_CFG_RESET__SHIFT 0x10
+#define SWRST_COMMAND_0__BIF0_GLOBAL_RESET__SHIFT 0x18
+#define SWRST_COMMAND_0__BIF0_CALIB_RESET__SHIFT 0x19
+#define SWRST_COMMAND_0__BIF0_CORE_RESET__SHIFT 0x1a
+#define SWRST_COMMAND_0__BIF0_REGISTER_RESET__SHIFT 0x1b
+#define SWRST_COMMAND_0__BIF0_PHY_RESET__SHIFT 0x1c
+#define SWRST_COMMAND_0__BIF0_STICKY_RESET__SHIFT 0x1d
+#define SWRST_COMMAND_0__BIF0_CONFIG_RESET__SHIFT 0x1e
+#define SWRST_COMMAND_0__BIF0_SDP_CREDIT_RESET__SHIFT 0x1f
+#define SWRST_COMMAND_0__PORT0_COR_RESET_MASK 0x00000001L
+#define SWRST_COMMAND_0__PORT0_CFG_RESET_MASK 0x00000100L
+#define SWRST_COMMAND_0__PORT1_CFG_RESET_MASK 0x00000200L
+#define SWRST_COMMAND_0__PORT2_CFG_RESET_MASK 0x00000400L
+#define SWRST_COMMAND_0__PORT3_CFG_RESET_MASK 0x00000800L
+#define SWRST_COMMAND_0__PORT4_CFG_RESET_MASK 0x00001000L
+#define SWRST_COMMAND_0__PORT5_CFG_RESET_MASK 0x00002000L
+#define SWRST_COMMAND_0__PORT6_CFG_RESET_MASK 0x00004000L
+#define SWRST_COMMAND_0__PORT7_CFG_RESET_MASK 0x00008000L
+#define SWRST_COMMAND_0__PORT8_CFG_RESET_MASK 0x00010000L
+#define SWRST_COMMAND_0__BIF0_GLOBAL_RESET_MASK 0x01000000L
+#define SWRST_COMMAND_0__BIF0_CALIB_RESET_MASK 0x02000000L
+#define SWRST_COMMAND_0__BIF0_CORE_RESET_MASK 0x04000000L
+#define SWRST_COMMAND_0__BIF0_REGISTER_RESET_MASK 0x08000000L
+#define SWRST_COMMAND_0__BIF0_PHY_RESET_MASK 0x10000000L
+#define SWRST_COMMAND_0__BIF0_STICKY_RESET_MASK 0x20000000L
+#define SWRST_COMMAND_0__BIF0_CONFIG_RESET_MASK 0x40000000L
+#define SWRST_COMMAND_0__BIF0_SDP_CREDIT_RESET_MASK 0x80000000L
+//SWRST_COMMAND_1
+#define SWRST_COMMAND_1__RESETPCS0__SHIFT 0x0
+#define SWRST_COMMAND_1__RESETPCS1__SHIFT 0x1
+#define SWRST_COMMAND_1__RESETPCS2__SHIFT 0x2
+#define SWRST_COMMAND_1__RESETPCS3__SHIFT 0x3
+#define SWRST_COMMAND_1__RESETPCS4__SHIFT 0x4
+#define SWRST_COMMAND_1__RESETPCS5__SHIFT 0x5
+#define SWRST_COMMAND_1__RESETPCS6__SHIFT 0x6
+#define SWRST_COMMAND_1__RESETPCS7__SHIFT 0x7
+#define SWRST_COMMAND_1__RESETPCS8__SHIFT 0x8
+#define SWRST_COMMAND_1__RESETPCS9__SHIFT 0x9
+#define SWRST_COMMAND_1__RESETPCS10__SHIFT 0xa
+#define SWRST_COMMAND_1__RESETPCS11__SHIFT 0xb
+#define SWRST_COMMAND_1__RESETPCS12__SHIFT 0xc
+#define SWRST_COMMAND_1__RESETPCS13__SHIFT 0xd
+#define SWRST_COMMAND_1__RESETPCS14__SHIFT 0xe
+#define SWRST_COMMAND_1__RESETPCS15__SHIFT 0xf
+#define SWRST_COMMAND_1__SWITCHCLK__SHIFT 0x15
+#define SWRST_COMMAND_1__RESETAXIMST__SHIFT 0x16
+#define SWRST_COMMAND_1__RESETAXISLV__SHIFT 0x17
+#define SWRST_COMMAND_1__RESETAXIINT__SHIFT 0x18
+#define SWRST_COMMAND_1__RESETPCFG__SHIFT 0x19
+#define SWRST_COMMAND_1__RESETLNCT__SHIFT 0x1a
+#define SWRST_COMMAND_1__RESETMNTR__SHIFT 0x1b
+#define SWRST_COMMAND_1__RESETHLTR__SHIFT 0x1c
+#define SWRST_COMMAND_1__RESETCPM__SHIFT 0x1d
+#define SWRST_COMMAND_1__RESETPHY0__SHIFT 0x1e
+#define SWRST_COMMAND_1__TOGGLESTRAP__SHIFT 0x1f
+#define SWRST_COMMAND_1__RESETPCS0_MASK 0x00000001L
+#define SWRST_COMMAND_1__RESETPCS1_MASK 0x00000002L
+#define SWRST_COMMAND_1__RESETPCS2_MASK 0x00000004L
+#define SWRST_COMMAND_1__RESETPCS3_MASK 0x00000008L
+#define SWRST_COMMAND_1__RESETPCS4_MASK 0x00000010L
+#define SWRST_COMMAND_1__RESETPCS5_MASK 0x00000020L
+#define SWRST_COMMAND_1__RESETPCS6_MASK 0x00000040L
+#define SWRST_COMMAND_1__RESETPCS7_MASK 0x00000080L
+#define SWRST_COMMAND_1__RESETPCS8_MASK 0x00000100L
+#define SWRST_COMMAND_1__RESETPCS9_MASK 0x00000200L
+#define SWRST_COMMAND_1__RESETPCS10_MASK 0x00000400L
+#define SWRST_COMMAND_1__RESETPCS11_MASK 0x00000800L
+#define SWRST_COMMAND_1__RESETPCS12_MASK 0x00001000L
+#define SWRST_COMMAND_1__RESETPCS13_MASK 0x00002000L
+#define SWRST_COMMAND_1__RESETPCS14_MASK 0x00004000L
+#define SWRST_COMMAND_1__RESETPCS15_MASK 0x00008000L
+#define SWRST_COMMAND_1__SWITCHCLK_MASK 0x00200000L
+#define SWRST_COMMAND_1__RESETAXIMST_MASK 0x00400000L
+#define SWRST_COMMAND_1__RESETAXISLV_MASK 0x00800000L
+#define SWRST_COMMAND_1__RESETAXIINT_MASK 0x01000000L
+#define SWRST_COMMAND_1__RESETPCFG_MASK 0x02000000L
+#define SWRST_COMMAND_1__RESETLNCT_MASK 0x04000000L
+#define SWRST_COMMAND_1__RESETMNTR_MASK 0x08000000L
+#define SWRST_COMMAND_1__RESETHLTR_MASK 0x10000000L
+#define SWRST_COMMAND_1__RESETCPM_MASK 0x20000000L
+#define SWRST_COMMAND_1__RESETPHY0_MASK 0x40000000L
+#define SWRST_COMMAND_1__TOGGLESTRAP_MASK 0x80000000L
+//SWRST_CONTROL_0
+#define SWRST_CONTROL_0__PORT0_COR_RCEN__SHIFT 0x0
+#define SWRST_CONTROL_0__PORT0_CFG_RCEN__SHIFT 0x8
+#define SWRST_CONTROL_0__PORT1_CFG_RCEN__SHIFT 0x9
+#define SWRST_CONTROL_0__PORT2_CFG_RCEN__SHIFT 0xa
+#define SWRST_CONTROL_0__PORT3_CFG_RCEN__SHIFT 0xb
+#define SWRST_CONTROL_0__PORT4_CFG_RCEN__SHIFT 0xc
+#define SWRST_CONTROL_0__PORT5_CFG_RCEN__SHIFT 0xd
+#define SWRST_CONTROL_0__PORT6_CFG_RCEN__SHIFT 0xe
+#define SWRST_CONTROL_0__PORT7_CFG_RCEN__SHIFT 0xf
+#define SWRST_CONTROL_0__PORT8_CFG_RCEN__SHIFT 0x10
+#define SWRST_CONTROL_0__BIF0_GLOBAL_RESETRCEN__SHIFT 0x18
+#define SWRST_CONTROL_0__BIF0_CALIB_RESETRCEN__SHIFT 0x19
+#define SWRST_CONTROL_0__BIF0_CORE_RESETRCEN__SHIFT 0x1a
+#define SWRST_CONTROL_0__BIF0_REGISTER_RESETRCEN__SHIFT 0x1b
+#define SWRST_CONTROL_0__BIF0_PHY_RESETRCEN__SHIFT 0x1c
+#define SWRST_CONTROL_0__BIF0_STICKY_RESETRCEN__SHIFT 0x1d
+#define SWRST_CONTROL_0__BIF0_CONFIG_RESETRCEN__SHIFT 0x1e
+#define SWRST_CONTROL_0__BIF0_SDP_CREDIT_RESETRCEN__SHIFT 0x1f
+#define SWRST_CONTROL_0__PORT0_COR_RCEN_MASK 0x00000001L
+#define SWRST_CONTROL_0__PORT0_CFG_RCEN_MASK 0x00000100L
+#define SWRST_CONTROL_0__PORT1_CFG_RCEN_MASK 0x00000200L
+#define SWRST_CONTROL_0__PORT2_CFG_RCEN_MASK 0x00000400L
+#define SWRST_CONTROL_0__PORT3_CFG_RCEN_MASK 0x00000800L
+#define SWRST_CONTROL_0__PORT4_CFG_RCEN_MASK 0x00001000L
+#define SWRST_CONTROL_0__PORT5_CFG_RCEN_MASK 0x00002000L
+#define SWRST_CONTROL_0__PORT6_CFG_RCEN_MASK 0x00004000L
+#define SWRST_CONTROL_0__PORT7_CFG_RCEN_MASK 0x00008000L
+#define SWRST_CONTROL_0__PORT8_CFG_RCEN_MASK 0x00010000L
+#define SWRST_CONTROL_0__BIF0_GLOBAL_RESETRCEN_MASK 0x01000000L
+#define SWRST_CONTROL_0__BIF0_CALIB_RESETRCEN_MASK 0x02000000L
+#define SWRST_CONTROL_0__BIF0_CORE_RESETRCEN_MASK 0x04000000L
+#define SWRST_CONTROL_0__BIF0_REGISTER_RESETRCEN_MASK 0x08000000L
+#define SWRST_CONTROL_0__BIF0_PHY_RESETRCEN_MASK 0x10000000L
+#define SWRST_CONTROL_0__BIF0_STICKY_RESETRCEN_MASK 0x20000000L
+#define SWRST_CONTROL_0__BIF0_CONFIG_RESETRCEN_MASK 0x40000000L
+#define SWRST_CONTROL_0__BIF0_SDP_CREDIT_RESETRCEN_MASK 0x80000000L
+//SWRST_CONTROL_1
+#define SWRST_CONTROL_1__PCSRESET0_RCEN__SHIFT 0x0
+#define SWRST_CONTROL_1__PCSRESET1_RCEN__SHIFT 0x1
+#define SWRST_CONTROL_1__PCSRESET2_RCEN__SHIFT 0x2
+#define SWRST_CONTROL_1__PCSRESET3_RCEN__SHIFT 0x3
+#define SWRST_CONTROL_1__PCSRESET4_RCEN__SHIFT 0x4
+#define SWRST_CONTROL_1__PCSRESET5_RCEN__SHIFT 0x5
+#define SWRST_CONTROL_1__PCSRESET6_RCEN__SHIFT 0x6
+#define SWRST_CONTROL_1__PCSRESET7_RCEN__SHIFT 0x7
+#define SWRST_CONTROL_1__PCSRESET8_RCEN__SHIFT 0x8
+#define SWRST_CONTROL_1__PCSRESET9_RCEN__SHIFT 0x9
+#define SWRST_CONTROL_1__PCSRESET10_RCEN__SHIFT 0xa
+#define SWRST_CONTROL_1__PCSRESET11_RCEN__SHIFT 0xb
+#define SWRST_CONTROL_1__PCSRESET12_RCEN__SHIFT 0xc
+#define SWRST_CONTROL_1__PCSRESET13_RCEN__SHIFT 0xd
+#define SWRST_CONTROL_1__PCSRESET14_RCEN__SHIFT 0xe
+#define SWRST_CONTROL_1__PCSRESET15_RCEN__SHIFT 0xf
+#define SWRST_CONTROL_1__SWITCHCLK_RCEN__SHIFT 0x15
+#define SWRST_CONTROL_1__RESETAXIMST_RCEN__SHIFT 0x16
+#define SWRST_CONTROL_1__RESETAXISLV_RCEN__SHIFT 0x17
+#define SWRST_CONTROL_1__RESETAXIINT_RCEN__SHIFT 0x18
+#define SWRST_CONTROL_1__RESETPCFG_RCEN__SHIFT 0x19
+#define SWRST_CONTROL_1__RESETLNCT_RCEN__SHIFT 0x1a
+#define SWRST_CONTROL_1__RESETMNTR_RCEN__SHIFT 0x1b
+#define SWRST_CONTROL_1__RESETHLTR_RCEN__SHIFT 0x1c
+#define SWRST_CONTROL_1__RESETCPM_RCEN__SHIFT 0x1d
+#define SWRST_CONTROL_1__RESETPHY0_RCEN__SHIFT 0x1e
+#define SWRST_CONTROL_1__STRAPVLD_RCEN__SHIFT 0x1f
+#define SWRST_CONTROL_1__PCSRESET0_RCEN_MASK 0x00000001L
+#define SWRST_CONTROL_1__PCSRESET1_RCEN_MASK 0x00000002L
+#define SWRST_CONTROL_1__PCSRESET2_RCEN_MASK 0x00000004L
+#define SWRST_CONTROL_1__PCSRESET3_RCEN_MASK 0x00000008L
+#define SWRST_CONTROL_1__PCSRESET4_RCEN_MASK 0x00000010L
+#define SWRST_CONTROL_1__PCSRESET5_RCEN_MASK 0x00000020L
+#define SWRST_CONTROL_1__PCSRESET6_RCEN_MASK 0x00000040L
+#define SWRST_CONTROL_1__PCSRESET7_RCEN_MASK 0x00000080L
+#define SWRST_CONTROL_1__PCSRESET8_RCEN_MASK 0x00000100L
+#define SWRST_CONTROL_1__PCSRESET9_RCEN_MASK 0x00000200L
+#define SWRST_CONTROL_1__PCSRESET10_RCEN_MASK 0x00000400L
+#define SWRST_CONTROL_1__PCSRESET11_RCEN_MASK 0x00000800L
+#define SWRST_CONTROL_1__PCSRESET12_RCEN_MASK 0x00001000L
+#define SWRST_CONTROL_1__PCSRESET13_RCEN_MASK 0x00002000L
+#define SWRST_CONTROL_1__PCSRESET14_RCEN_MASK 0x00004000L
+#define SWRST_CONTROL_1__PCSRESET15_RCEN_MASK 0x00008000L
+#define SWRST_CONTROL_1__SWITCHCLK_RCEN_MASK 0x00200000L
+#define SWRST_CONTROL_1__RESETAXIMST_RCEN_MASK 0x00400000L
+#define SWRST_CONTROL_1__RESETAXISLV_RCEN_MASK 0x00800000L
+#define SWRST_CONTROL_1__RESETAXIINT_RCEN_MASK 0x01000000L
+#define SWRST_CONTROL_1__RESETPCFG_RCEN_MASK 0x02000000L
+#define SWRST_CONTROL_1__RESETLNCT_RCEN_MASK 0x04000000L
+#define SWRST_CONTROL_1__RESETMNTR_RCEN_MASK 0x08000000L
+#define SWRST_CONTROL_1__RESETHLTR_RCEN_MASK 0x10000000L
+#define SWRST_CONTROL_1__RESETCPM_RCEN_MASK 0x20000000L
+#define SWRST_CONTROL_1__RESETPHY0_RCEN_MASK 0x40000000L
+#define SWRST_CONTROL_1__STRAPVLD_RCEN_MASK 0x80000000L
+//SWRST_CONTROL_2
+#define SWRST_CONTROL_2__PORT0_COR_ATEN__SHIFT 0x0
+#define SWRST_CONTROL_2__PORT0_CFG_ATEN__SHIFT 0x8
+#define SWRST_CONTROL_2__PORT1_CFG_ATEN__SHIFT 0x9
+#define SWRST_CONTROL_2__PORT2_CFG_ATEN__SHIFT 0xa
+#define SWRST_CONTROL_2__PORT3_CFG_ATEN__SHIFT 0xb
+#define SWRST_CONTROL_2__PORT4_CFG_ATEN__SHIFT 0xc
+#define SWRST_CONTROL_2__PORT5_CFG_ATEN__SHIFT 0xd
+#define SWRST_CONTROL_2__PORT6_CFG_ATEN__SHIFT 0xe
+#define SWRST_CONTROL_2__PORT7_CFG_ATEN__SHIFT 0xf
+#define SWRST_CONTROL_2__PORT8_CFG_ATEN__SHIFT 0x10
+#define SWRST_CONTROL_2__BIF0_GLOBAL_RESETATEN__SHIFT 0x18
+#define SWRST_CONTROL_2__BIF0_CALIB_RESETATEN__SHIFT 0x19
+#define SWRST_CONTROL_2__BIF0_CORE_RESETATEN__SHIFT 0x1a
+#define SWRST_CONTROL_2__BIF0_REGISTER_RESETATEN__SHIFT 0x1b
+#define SWRST_CONTROL_2__BIF0_PHY_RESETATEN__SHIFT 0x1c
+#define SWRST_CONTROL_2__BIF0_STICKY_RESETATEN__SHIFT 0x1d
+#define SWRST_CONTROL_2__BIF0_CONFIG_RESETATEN__SHIFT 0x1e
+#define SWRST_CONTROL_2__BIF0_SDP_CREDIT_RESETATEN__SHIFT 0x1f
+#define SWRST_CONTROL_2__PORT0_COR_ATEN_MASK 0x00000001L
+#define SWRST_CONTROL_2__PORT0_CFG_ATEN_MASK 0x00000100L
+#define SWRST_CONTROL_2__PORT1_CFG_ATEN_MASK 0x00000200L
+#define SWRST_CONTROL_2__PORT2_CFG_ATEN_MASK 0x00000400L
+#define SWRST_CONTROL_2__PORT3_CFG_ATEN_MASK 0x00000800L
+#define SWRST_CONTROL_2__PORT4_CFG_ATEN_MASK 0x00001000L
+#define SWRST_CONTROL_2__PORT5_CFG_ATEN_MASK 0x00002000L
+#define SWRST_CONTROL_2__PORT6_CFG_ATEN_MASK 0x00004000L
+#define SWRST_CONTROL_2__PORT7_CFG_ATEN_MASK 0x00008000L
+#define SWRST_CONTROL_2__PORT8_CFG_ATEN_MASK 0x00010000L
+#define SWRST_CONTROL_2__BIF0_GLOBAL_RESETATEN_MASK 0x01000000L
+#define SWRST_CONTROL_2__BIF0_CALIB_RESETATEN_MASK 0x02000000L
+#define SWRST_CONTROL_2__BIF0_CORE_RESETATEN_MASK 0x04000000L
+#define SWRST_CONTROL_2__BIF0_REGISTER_RESETATEN_MASK 0x08000000L
+#define SWRST_CONTROL_2__BIF0_PHY_RESETATEN_MASK 0x10000000L
+#define SWRST_CONTROL_2__BIF0_STICKY_RESETATEN_MASK 0x20000000L
+#define SWRST_CONTROL_2__BIF0_CONFIG_RESETATEN_MASK 0x40000000L
+#define SWRST_CONTROL_2__BIF0_SDP_CREDIT_RESETATEN_MASK 0x80000000L
+//SWRST_CONTROL_3
+#define SWRST_CONTROL_3__PCSRESET0_ATEN__SHIFT 0x0
+#define SWRST_CONTROL_3__PCSRESET1_ATEN__SHIFT 0x1
+#define SWRST_CONTROL_3__PCSRESET2_ATEN__SHIFT 0x2
+#define SWRST_CONTROL_3__PCSRESET3_ATEN__SHIFT 0x3
+#define SWRST_CONTROL_3__PCSRESET4_ATEN__SHIFT 0x4
+#define SWRST_CONTROL_3__PCSRESET5_ATEN__SHIFT 0x5
+#define SWRST_CONTROL_3__PCSRESET6_ATEN__SHIFT 0x6
+#define SWRST_CONTROL_3__PCSRESET7_ATEN__SHIFT 0x7
+#define SWRST_CONTROL_3__PCSRESET8_ATEN__SHIFT 0x8
+#define SWRST_CONTROL_3__PCSRESET9_ATEN__SHIFT 0x9
+#define SWRST_CONTROL_3__PCSRESET10_ATEN__SHIFT 0xa
+#define SWRST_CONTROL_3__PCSRESET11_ATEN__SHIFT 0xb
+#define SWRST_CONTROL_3__PCSRESET12_ATEN__SHIFT 0xc
+#define SWRST_CONTROL_3__PCSRESET13_ATEN__SHIFT 0xd
+#define SWRST_CONTROL_3__PCSRESET14_ATEN__SHIFT 0xe
+#define SWRST_CONTROL_3__PCSRESET15_ATEN__SHIFT 0xf
+#define SWRST_CONTROL_3__SWITCHCLK_ATEN__SHIFT 0x15
+#define SWRST_CONTROL_3__RESETAXIMST_ATEN__SHIFT 0x16
+#define SWRST_CONTROL_3__RESETAXISLV_ATEN__SHIFT 0x17
+#define SWRST_CONTROL_3__RESETAXIINT_ATEN__SHIFT 0x18
+#define SWRST_CONTROL_3__RESETPCFG_ATEN__SHIFT 0x19
+#define SWRST_CONTROL_3__RESETLNCT_ATEN__SHIFT 0x1a
+#define SWRST_CONTROL_3__RESETMNTR_ATEN__SHIFT 0x1b
+#define SWRST_CONTROL_3__RESETHLTR_ATEN__SHIFT 0x1c
+#define SWRST_CONTROL_3__RESETCPM_ATEN__SHIFT 0x1d
+#define SWRST_CONTROL_3__RESETPHY0_ATEN__SHIFT 0x1e
+#define SWRST_CONTROL_3__STRAPVLD_ATEN__SHIFT 0x1f
+#define SWRST_CONTROL_3__PCSRESET0_ATEN_MASK 0x00000001L
+#define SWRST_CONTROL_3__PCSRESET1_ATEN_MASK 0x00000002L
+#define SWRST_CONTROL_3__PCSRESET2_ATEN_MASK 0x00000004L
+#define SWRST_CONTROL_3__PCSRESET3_ATEN_MASK 0x00000008L
+#define SWRST_CONTROL_3__PCSRESET4_ATEN_MASK 0x00000010L
+#define SWRST_CONTROL_3__PCSRESET5_ATEN_MASK 0x00000020L
+#define SWRST_CONTROL_3__PCSRESET6_ATEN_MASK 0x00000040L
+#define SWRST_CONTROL_3__PCSRESET7_ATEN_MASK 0x00000080L
+#define SWRST_CONTROL_3__PCSRESET8_ATEN_MASK 0x00000100L
+#define SWRST_CONTROL_3__PCSRESET9_ATEN_MASK 0x00000200L
+#define SWRST_CONTROL_3__PCSRESET10_ATEN_MASK 0x00000400L
+#define SWRST_CONTROL_3__PCSRESET11_ATEN_MASK 0x00000800L
+#define SWRST_CONTROL_3__PCSRESET12_ATEN_MASK 0x00001000L
+#define SWRST_CONTROL_3__PCSRESET13_ATEN_MASK 0x00002000L
+#define SWRST_CONTROL_3__PCSRESET14_ATEN_MASK 0x00004000L
+#define SWRST_CONTROL_3__PCSRESET15_ATEN_MASK 0x00008000L
+#define SWRST_CONTROL_3__SWITCHCLK_ATEN_MASK 0x00200000L
+#define SWRST_CONTROL_3__RESETAXIMST_ATEN_MASK 0x00400000L
+#define SWRST_CONTROL_3__RESETAXISLV_ATEN_MASK 0x00800000L
+#define SWRST_CONTROL_3__RESETAXIINT_ATEN_MASK 0x01000000L
+#define SWRST_CONTROL_3__RESETPCFG_ATEN_MASK 0x02000000L
+#define SWRST_CONTROL_3__RESETLNCT_ATEN_MASK 0x04000000L
+#define SWRST_CONTROL_3__RESETMNTR_ATEN_MASK 0x08000000L
+#define SWRST_CONTROL_3__RESETHLTR_ATEN_MASK 0x10000000L
+#define SWRST_CONTROL_3__RESETCPM_ATEN_MASK 0x20000000L
+#define SWRST_CONTROL_3__RESETPHY0_ATEN_MASK 0x40000000L
+#define SWRST_CONTROL_3__STRAPVLD_ATEN_MASK 0x80000000L
+//SWRST_CONTROL_4
+#define SWRST_CONTROL_4__PORT0_COR_WREN__SHIFT 0x0
+#define SWRST_CONTROL_4__PORT0_CFG_WREN__SHIFT 0x8
+#define SWRST_CONTROL_4__PORT1_CFG_WREN__SHIFT 0x9
+#define SWRST_CONTROL_4__PORT2_CFG_WREN__SHIFT 0xa
+#define SWRST_CONTROL_4__PORT3_CFG_WREN__SHIFT 0xb
+#define SWRST_CONTROL_4__PORT4_CFG_WREN__SHIFT 0xc
+#define SWRST_CONTROL_4__PORT5_CFG_WREN__SHIFT 0xd
+#define SWRST_CONTROL_4__PORT6_CFG_WREN__SHIFT 0xe
+#define SWRST_CONTROL_4__PORT7_CFG_WREN__SHIFT 0xf
+#define SWRST_CONTROL_4__PORT8_CFG_WREN__SHIFT 0x10
+#define SWRST_CONTROL_4__BIF0_GLOBAL_WRRESETEN__SHIFT 0x18
+#define SWRST_CONTROL_4__BIF0_CALIB_WRRESETEN__SHIFT 0x19
+#define SWRST_CONTROL_4__BIF0_CORE_WRRESETEN__SHIFT 0x1a
+#define SWRST_CONTROL_4__BIF0_REGISTER_WRRESETEN__SHIFT 0x1b
+#define SWRST_CONTROL_4__BIF0_PHY_WRRESETEN__SHIFT 0x1c
+#define SWRST_CONTROL_4__BIF0_STICKY_WRRESETEN__SHIFT 0x1d
+#define SWRST_CONTROL_4__BIF0_CONFIG_WRRESETEN__SHIFT 0x1e
+#define SWRST_CONTROL_4__BIF0_SDP_CREDIT_WRRESETEN__SHIFT 0x1f
+#define SWRST_CONTROL_4__PORT0_COR_WREN_MASK 0x00000001L
+#define SWRST_CONTROL_4__PORT0_CFG_WREN_MASK 0x00000100L
+#define SWRST_CONTROL_4__PORT1_CFG_WREN_MASK 0x00000200L
+#define SWRST_CONTROL_4__PORT2_CFG_WREN_MASK 0x00000400L
+#define SWRST_CONTROL_4__PORT3_CFG_WREN_MASK 0x00000800L
+#define SWRST_CONTROL_4__PORT4_CFG_WREN_MASK 0x00001000L
+#define SWRST_CONTROL_4__PORT5_CFG_WREN_MASK 0x00002000L
+#define SWRST_CONTROL_4__PORT6_CFG_WREN_MASK 0x00004000L
+#define SWRST_CONTROL_4__PORT7_CFG_WREN_MASK 0x00008000L
+#define SWRST_CONTROL_4__PORT8_CFG_WREN_MASK 0x00010000L
+#define SWRST_CONTROL_4__BIF0_GLOBAL_WRRESETEN_MASK 0x01000000L
+#define SWRST_CONTROL_4__BIF0_CALIB_WRRESETEN_MASK 0x02000000L
+#define SWRST_CONTROL_4__BIF0_CORE_WRRESETEN_MASK 0x04000000L
+#define SWRST_CONTROL_4__BIF0_REGISTER_WRRESETEN_MASK 0x08000000L
+#define SWRST_CONTROL_4__BIF0_PHY_WRRESETEN_MASK 0x10000000L
+#define SWRST_CONTROL_4__BIF0_STICKY_WRRESETEN_MASK 0x20000000L
+#define SWRST_CONTROL_4__BIF0_CONFIG_WRRESETEN_MASK 0x40000000L
+#define SWRST_CONTROL_4__BIF0_SDP_CREDIT_WRRESETEN_MASK 0x80000000L
+//SWRST_CONTROL_5
+#define SWRST_CONTROL_5__PCSRESET0_WREN__SHIFT 0x0
+#define SWRST_CONTROL_5__PCSRESET1_WREN__SHIFT 0x1
+#define SWRST_CONTROL_5__PCSRESET2_WREN__SHIFT 0x2
+#define SWRST_CONTROL_5__PCSRESET3_WREN__SHIFT 0x3
+#define SWRST_CONTROL_5__PCSRESET4_WREN__SHIFT 0x4
+#define SWRST_CONTROL_5__PCSRESET5_WREN__SHIFT 0x5
+#define SWRST_CONTROL_5__PCSRESET6_WREN__SHIFT 0x6
+#define SWRST_CONTROL_5__PCSRESET7_WREN__SHIFT 0x7
+#define SWRST_CONTROL_5__PCSRESET8_WREN__SHIFT 0x8
+#define SWRST_CONTROL_5__PCSRESET9_WREN__SHIFT 0x9
+#define SWRST_CONTROL_5__PCSRESET10_WREN__SHIFT 0xa
+#define SWRST_CONTROL_5__PCSRESET11_WREN__SHIFT 0xb
+#define SWRST_CONTROL_5__PCSRESET12_WREN__SHIFT 0xc
+#define SWRST_CONTROL_5__PCSRESET13_WREN__SHIFT 0xd
+#define SWRST_CONTROL_5__PCSRESET14_WREN__SHIFT 0xe
+#define SWRST_CONTROL_5__PCSRESET15_WREN__SHIFT 0xf
+#define SWRST_CONTROL_5__WRSWITCHCLK_EN__SHIFT 0x15
+#define SWRST_CONTROL_5__WRRESETAXIMST_EN__SHIFT 0x16
+#define SWRST_CONTROL_5__WRRESETAXISLV_EN__SHIFT 0x17
+#define SWRST_CONTROL_5__WRRESETAXIINT_EN__SHIFT 0x18
+#define SWRST_CONTROL_5__WRRESETPCFG_EN__SHIFT 0x19
+#define SWRST_CONTROL_5__WRRESETLNCT_EN__SHIFT 0x1a
+#define SWRST_CONTROL_5__WRRESETMNTR_EN__SHIFT 0x1b
+#define SWRST_CONTROL_5__WRRESETHLTR_EN__SHIFT 0x1c
+#define SWRST_CONTROL_5__WRRESETCPM_EN__SHIFT 0x1d
+#define SWRST_CONTROL_5__WRRESETPHY0_EN__SHIFT 0x1e
+#define SWRST_CONTROL_5__WRSTRAPVLD_EN__SHIFT 0x1f
+#define SWRST_CONTROL_5__PCSRESET0_WREN_MASK 0x00000001L
+#define SWRST_CONTROL_5__PCSRESET1_WREN_MASK 0x00000002L
+#define SWRST_CONTROL_5__PCSRESET2_WREN_MASK 0x00000004L
+#define SWRST_CONTROL_5__PCSRESET3_WREN_MASK 0x00000008L
+#define SWRST_CONTROL_5__PCSRESET4_WREN_MASK 0x00000010L
+#define SWRST_CONTROL_5__PCSRESET5_WREN_MASK 0x00000020L
+#define SWRST_CONTROL_5__PCSRESET6_WREN_MASK 0x00000040L
+#define SWRST_CONTROL_5__PCSRESET7_WREN_MASK 0x00000080L
+#define SWRST_CONTROL_5__PCSRESET8_WREN_MASK 0x00000100L
+#define SWRST_CONTROL_5__PCSRESET9_WREN_MASK 0x00000200L
+#define SWRST_CONTROL_5__PCSRESET10_WREN_MASK 0x00000400L
+#define SWRST_CONTROL_5__PCSRESET11_WREN_MASK 0x00000800L
+#define SWRST_CONTROL_5__PCSRESET12_WREN_MASK 0x00001000L
+#define SWRST_CONTROL_5__PCSRESET13_WREN_MASK 0x00002000L
+#define SWRST_CONTROL_5__PCSRESET14_WREN_MASK 0x00004000L
+#define SWRST_CONTROL_5__PCSRESET15_WREN_MASK 0x00008000L
+#define SWRST_CONTROL_5__WRSWITCHCLK_EN_MASK 0x00200000L
+#define SWRST_CONTROL_5__WRRESETAXIMST_EN_MASK 0x00400000L
+#define SWRST_CONTROL_5__WRRESETAXISLV_EN_MASK 0x00800000L
+#define SWRST_CONTROL_5__WRRESETAXIINT_EN_MASK 0x01000000L
+#define SWRST_CONTROL_5__WRRESETPCFG_EN_MASK 0x02000000L
+#define SWRST_CONTROL_5__WRRESETLNCT_EN_MASK 0x04000000L
+#define SWRST_CONTROL_5__WRRESETMNTR_EN_MASK 0x08000000L
+#define SWRST_CONTROL_5__WRRESETHLTR_EN_MASK 0x10000000L
+#define SWRST_CONTROL_5__WRRESETCPM_EN_MASK 0x20000000L
+#define SWRST_CONTROL_5__WRRESETPHY0_EN_MASK 0x40000000L
+#define SWRST_CONTROL_5__WRSTRAPVLD_EN_MASK 0x80000000L
+//SWRST_CONTROL_6
+#define SWRST_CONTROL_6__HOLD_TRAINING_A__SHIFT 0x0
+#define SWRST_CONTROL_6__HOLD_TRAINING_B__SHIFT 0x1
+#define SWRST_CONTROL_6__HOLD_TRAINING_C__SHIFT 0x2
+#define SWRST_CONTROL_6__HOLD_TRAINING_D__SHIFT 0x3
+#define SWRST_CONTROL_6__HOLD_TRAINING_E__SHIFT 0x4
+#define SWRST_CONTROL_6__HOLD_TRAINING_F__SHIFT 0x5
+#define SWRST_CONTROL_6__HOLD_TRAINING_G__SHIFT 0x6
+#define SWRST_CONTROL_6__HOLD_TRAINING_H__SHIFT 0x7
+#define SWRST_CONTROL_6__HOLD_TRAINING_I__SHIFT 0x8
+#define SWRST_CONTROL_6__HOLD_TRAINING_J__SHIFT 0x9
+#define SWRST_CONTROL_6__HOLD_TRAINING_K__SHIFT 0xa
+#define SWRST_CONTROL_6__HOLD_TRAINING_A_MASK 0x00000001L
+#define SWRST_CONTROL_6__HOLD_TRAINING_B_MASK 0x00000002L
+#define SWRST_CONTROL_6__HOLD_TRAINING_C_MASK 0x00000004L
+#define SWRST_CONTROL_6__HOLD_TRAINING_D_MASK 0x00000008L
+#define SWRST_CONTROL_6__HOLD_TRAINING_E_MASK 0x00000010L
+#define SWRST_CONTROL_6__HOLD_TRAINING_F_MASK 0x00000020L
+#define SWRST_CONTROL_6__HOLD_TRAINING_G_MASK 0x00000040L
+#define SWRST_CONTROL_6__HOLD_TRAINING_H_MASK 0x00000080L
+#define SWRST_CONTROL_6__HOLD_TRAINING_I_MASK 0x00000100L
+#define SWRST_CONTROL_6__HOLD_TRAINING_J_MASK 0x00000200L
+#define SWRST_CONTROL_6__HOLD_TRAINING_K_MASK 0x00000400L
+//SWRST_EP_COMMAND_0
+#define SWRST_EP_COMMAND_0__EP_CFG_RESET_ONLY__SHIFT 0x0
+#define SWRST_EP_COMMAND_0__EP_HOT_RESET__SHIFT 0x8
+#define SWRST_EP_COMMAND_0__EP_LNKDWN_RESET__SHIFT 0x9
+#define SWRST_EP_COMMAND_0__EP_LNKDIS_RESET__SHIFT 0xa
+#define SWRST_EP_COMMAND_0__EP_CFG_RESET_ONLY_MASK 0x00000001L
+#define SWRST_EP_COMMAND_0__EP_HOT_RESET_MASK 0x00000100L
+#define SWRST_EP_COMMAND_0__EP_LNKDWN_RESET_MASK 0x00000200L
+#define SWRST_EP_COMMAND_0__EP_LNKDIS_RESET_MASK 0x00000400L
+//SWRST_EP_CONTROL_0
+#define SWRST_EP_CONTROL_0__EP_CFG_RESET_ONLY_EN__SHIFT 0x0
+#define SWRST_EP_CONTROL_0__EP_HOT_RESET_EN__SHIFT 0x8
+#define SWRST_EP_CONTROL_0__EP_LNKDWN_RESET_EN__SHIFT 0x9
+#define SWRST_EP_CONTROL_0__EP_LNKDIS_RESET_EN__SHIFT 0xa
+#define SWRST_EP_CONTROL_0__EP_CFG_RESET_ONLY_EN_MASK 0x00000001L
+#define SWRST_EP_CONTROL_0__EP_HOT_RESET_EN_MASK 0x00000100L
+#define SWRST_EP_CONTROL_0__EP_LNKDWN_RESET_EN_MASK 0x00000200L
+#define SWRST_EP_CONTROL_0__EP_LNKDIS_RESET_EN_MASK 0x00000400L
+//CPM_CONTROL
+#define CPM_CONTROL__LCLK_DYN_GATE_ENABLE__SHIFT 0x0
+#define CPM_CONTROL__TXCLK_DYN_GATE_ENABLE__SHIFT 0x1
+#define CPM_CONTROL__L1_PWR_GATE_ENABLE__SHIFT 0x2
+#define CPM_CONTROL__L1_1_PWR_GATE_ENABLE__SHIFT 0x3
+#define CPM_CONTROL__L1_2_PWR_GATE_ENABLE__SHIFT 0x4
+#define CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE__SHIFT 0x5
+#define CPM_CONTROL__TXCLK_REGS_GATE_ENABLE__SHIFT 0x6
+#define CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE__SHIFT 0x7
+#define CPM_CONTROL__REFCLK_REGS_GATE_ENABLE__SHIFT 0x8
+#define CPM_CONTROL__LCLK_DYN_GATE_LATENCY__SHIFT 0x9
+#define CPM_CONTROL__TXCLK_DYN_GATE_LATENCY__SHIFT 0xb
+#define CPM_CONTROL__REFCLKREQ_REFCLKACK_LOOPBACK_ENABLE__SHIFT 0xd
+#define CPM_CONTROL__TXCLK_REGS_GATE_LATENCY__SHIFT 0xe
+#define CPM_CONTROL__REFCLK_REGS_GATE_LATENCY__SHIFT 0xf
+#define CPM_CONTROL__LCLK_GATE_TXCLK_FREE__SHIFT 0x10
+#define CPM_CONTROL__RCVR_DET_CLK_ENABLE__SHIFT 0x11
+#define CPM_CONTROL__FAST_TXCLK_LATENCY__SHIFT 0x12
+#define CPM_CONTROL__IGNORE_REGS_IDLE_IN_PG__SHIFT 0x15
+#define CPM_CONTROL__REFCLK_XSTCLK_ENABLE__SHIFT 0x16
+#define CPM_CONTROL__REFCLK_XSTCLK_LATENCY__SHIFT 0x17
+#define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE__SHIFT 0x18
+#define CPM_CONTROL__LCLK_GATE_ALLOW_IN_L1__SHIFT 0x19
+#define CPM_CONTROL__PG_EARLY_WAKE_ENABLE__SHIFT 0x1a
+#define CPM_CONTROL__PCIE_CORE_IDLE__SHIFT 0x1b
+#define CPM_CONTROL__PCIE_LINK_IDLE__SHIFT 0x1c
+#define CPM_CONTROL__PCIE_BUFFER_EMPTY__SHIFT 0x1d
+#define CPM_CONTROL__REGS_IDLE_TO_PG_LATENCY__SHIFT 0x1e
+#define CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK 0x00000001L
+#define CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK 0x00000002L
+#define CPM_CONTROL__L1_PWR_GATE_ENABLE_MASK 0x00000004L
+#define CPM_CONTROL__L1_1_PWR_GATE_ENABLE_MASK 0x00000008L
+#define CPM_CONTROL__L1_2_PWR_GATE_ENABLE_MASK 0x00000010L
+#define CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK 0x00000020L
+#define CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK 0x00000040L
+#define CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK 0x00000080L
+#define CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK 0x00000100L
+#define CPM_CONTROL__LCLK_DYN_GATE_LATENCY_MASK 0x00000600L
+#define CPM_CONTROL__TXCLK_DYN_GATE_LATENCY_MASK 0x00001800L
+#define CPM_CONTROL__REFCLKREQ_REFCLKACK_LOOPBACK_ENABLE_MASK 0x00002000L
+#define CPM_CONTROL__TXCLK_REGS_GATE_LATENCY_MASK 0x00004000L
+#define CPM_CONTROL__REFCLK_REGS_GATE_LATENCY_MASK 0x00008000L
+#define CPM_CONTROL__LCLK_GATE_TXCLK_FREE_MASK 0x00010000L
+#define CPM_CONTROL__RCVR_DET_CLK_ENABLE_MASK 0x00020000L
+#define CPM_CONTROL__FAST_TXCLK_LATENCY_MASK 0x001C0000L
+#define CPM_CONTROL__IGNORE_REGS_IDLE_IN_PG_MASK 0x00200000L
+#define CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK 0x00400000L
+#define CPM_CONTROL__REFCLK_XSTCLK_LATENCY_MASK 0x00800000L
+#define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK 0x01000000L
+#define CPM_CONTROL__LCLK_GATE_ALLOW_IN_L1_MASK 0x02000000L
+#define CPM_CONTROL__PG_EARLY_WAKE_ENABLE_MASK 0x04000000L
+#define CPM_CONTROL__PCIE_CORE_IDLE_MASK 0x08000000L
+#define CPM_CONTROL__PCIE_LINK_IDLE_MASK 0x10000000L
+#define CPM_CONTROL__PCIE_BUFFER_EMPTY_MASK 0x20000000L
+#define CPM_CONTROL__REGS_IDLE_TO_PG_LATENCY_MASK 0xC0000000L
+//CPM_SPLIT_CONTROL
+#define CPM_SPLIT_CONTROL__TXCLK_CCIX_DYN_GATE_ENABLE__SHIFT 0x0
+#define CPM_SPLIT_CONTROL__TXCLK_CCIX_DYN_GATE_ENABLE_MASK 0x00000001L
+//CPM_CONTROL_EXT
+#define CPM_CONTROL_EXT__PWRDOWN_EI_MASK_DISABLE__SHIFT 0x0
+#define CPM_CONTROL_EXT__DELAY_HOLD_TRAINING_ENABLE__SHIFT 0x1
+#define CPM_CONTROL_EXT__LCLK_DS_MODE__SHIFT 0x2
+#define CPM_CONTROL_EXT__LCLK_DS_ENABLE__SHIFT 0x4
+#define CPM_CONTROL_EXT__PG_STATE__SHIFT 0x5
+#define CPM_CONTROL_EXT__HOTPLUG_ALLOW_LCLK_GATING_EN__SHIFT 0x8
+#define CPM_CONTROL_EXT__RESPOND_SDP_CONNECT_WHEN_ALLPORT_UNPLUG_IN_PG__SHIFT 0x9
+#define CPM_CONTROL_EXT__EI_MASK_OFF_AT_PWRDOWN__SHIFT 0xa
+#define CPM_CONTROL_EXT__EI_MASK_OFF_ALL_TIME__SHIFT 0xb
+#define CPM_CONTROL_EXT__EI_DEASSERT_CAPTURE_TIMER__SHIFT 0xc
+#define CPM_CONTROL_EXT__EI_ASSERT_CAPTURE_TIMER__SHIFT 0xe
+#define CPM_CONTROL_EXT__PWRDOWN_EI_MASK_DISABLE_MASK 0x00000001L
+#define CPM_CONTROL_EXT__DELAY_HOLD_TRAINING_ENABLE_MASK 0x00000002L
+#define CPM_CONTROL_EXT__LCLK_DS_MODE_MASK 0x0000000CL
+#define CPM_CONTROL_EXT__LCLK_DS_ENABLE_MASK 0x00000010L
+#define CPM_CONTROL_EXT__PG_STATE_MASK 0x000000E0L
+#define CPM_CONTROL_EXT__HOTPLUG_ALLOW_LCLK_GATING_EN_MASK 0x00000100L
+#define CPM_CONTROL_EXT__RESPOND_SDP_CONNECT_WHEN_ALLPORT_UNPLUG_IN_PG_MASK 0x00000200L
+#define CPM_CONTROL_EXT__EI_MASK_OFF_AT_PWRDOWN_MASK 0x00000400L
+#define CPM_CONTROL_EXT__EI_MASK_OFF_ALL_TIME_MASK 0x00000800L
+#define CPM_CONTROL_EXT__EI_DEASSERT_CAPTURE_TIMER_MASK 0x00003000L
+#define CPM_CONTROL_EXT__EI_ASSERT_CAPTURE_TIMER_MASK 0x0000C000L
+//CLKREQB_PAD_CNTL
+#define CLKREQB_PAD_CNTL__CSel0p9__SHIFT 0x0
+#define CLKREQB_PAD_CNTL__CSel1p1__SHIFT 0x1
+#define CLKREQB_PAD_CNTL__RSel0p9__SHIFT 0x2
+#define CLKREQB_PAD_CNTL__RSel1p1__SHIFT 0x3
+#define CLKREQB_PAD_CNTL__mai2cfmp2_NG__SHIFT 0x4
+#define CLKREQB_PAD_CNTL__mai2cfmp2_ResBiasEn0__SHIFT 0x8
+#define CLKREQB_PAD_CNTL__mai2cfmp2_CompSel0__SHIFT 0x9
+#define CLKREQB_PAD_CNTL__mai2cfmp2_I2cRxSel0__SHIFT 0xa
+#define CLKREQB_PAD_CNTL__mai2cfmp2_PdEn0__SHIFT 0xb
+#define CLKREQB_PAD_CNTL__mai2cfmp2_SpikeRcEn0__SHIFT 0xc
+#define CLKREQB_PAD_CNTL__mai2cfmp2_SpikeRcSel0__SHIFT 0xd
+#define CLKREQB_PAD_CNTL__mai2cfmp2_FallSlewSel0__SHIFT 0xe
+#define CLKREQB_PAD_CNTL__mai2cfmp2_BiasCrtEn0__SHIFT 0xf
+#define CLKREQB_PAD_CNTL__mai2cfmp2_Slewn0__SHIFT 0x10
+#define CLKREQB_PAD_CNTL__mai2cfmp2_TstTermEn0__SHIFT 0x11
+#define CLKREQB_PAD_CNTL__mai2cfmp2_Spare0__SHIFT 0x12
+#define CLKREQB_PAD_CNTL__mai2cfmp2_Spare1__SHIFT 0x13
+#define CLKREQB_PAD_CNTL__mai2cfmp2_ResBiasEn1__SHIFT 0x14
+#define CLKREQB_PAD_CNTL__mai2cfmp2_CompSel1__SHIFT 0x15
+#define CLKREQB_PAD_CNTL__mai2cfmp2_I2cRxSel1__SHIFT 0x16
+#define CLKREQB_PAD_CNTL__mai2cfmp2_PdEn1__SHIFT 0x17
+#define CLKREQB_PAD_CNTL__mai2cfmp2_SpikeRcEn1__SHIFT 0x18
+#define CLKREQB_PAD_CNTL__mai2cfmp2_SpikeRcSel1__SHIFT 0x19
+#define CLKREQB_PAD_CNTL__mai2cfmp2_FallSlewSel1__SHIFT 0x1a
+#define CLKREQB_PAD_CNTL__mai2cfmp2_BiasCrtEn1__SHIFT 0x1b
+#define CLKREQB_PAD_CNTL__mai2cfmp2_Slewn1__SHIFT 0x1c
+#define CLKREQB_PAD_CNTL__mai2cfmp2_TstTermEn1__SHIFT 0x1d
+#define CLKREQB_PAD_CNTL__mai2cfmp_reserved__SHIFT 0x1e
+#define CLKREQB_PAD_CNTL__CSel0p9_MASK 0x00000001L
+#define CLKREQB_PAD_CNTL__CSel1p1_MASK 0x00000002L
+#define CLKREQB_PAD_CNTL__RSel0p9_MASK 0x00000004L
+#define CLKREQB_PAD_CNTL__RSel1p1_MASK 0x00000008L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_NG_MASK 0x000000F0L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_ResBiasEn0_MASK 0x00000100L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_CompSel0_MASK 0x00000200L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_I2cRxSel0_MASK 0x00000400L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_PdEn0_MASK 0x00000800L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_SpikeRcEn0_MASK 0x00001000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_SpikeRcSel0_MASK 0x00002000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_FallSlewSel0_MASK 0x00004000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_BiasCrtEn0_MASK 0x00008000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_Slewn0_MASK 0x00010000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_TstTermEn0_MASK 0x00020000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_Spare0_MASK 0x00040000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_Spare1_MASK 0x00080000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_ResBiasEn1_MASK 0x00100000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_CompSel1_MASK 0x00200000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_I2cRxSel1_MASK 0x00400000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_PdEn1_MASK 0x00800000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_SpikeRcEn1_MASK 0x01000000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_SpikeRcSel1_MASK 0x02000000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_FallSlewSel1_MASK 0x04000000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_BiasCrtEn1_MASK 0x08000000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_Slewn1_MASK 0x10000000L
+#define CLKREQB_PAD_CNTL__mai2cfmp2_TstTermEn1_MASK 0x20000000L
+#define CLKREQB_PAD_CNTL__mai2cfmp_reserved_MASK 0xC0000000L
+//SMN_APERTURE_ID_A
+#define SMN_APERTURE_ID_A__SMU_APERTURE_ID__SHIFT 0x0
+#define SMN_APERTURE_ID_A__SMU_APERTURE_ID_MASK 0x00000FFFL
+//SMN_APERTURE_ID_B
+#define SMN_APERTURE_ID_B__IOHUB_APERTURE_ID__SHIFT 0x0
+#define SMN_APERTURE_ID_B__NBIF_APERTURE_ID__SHIFT 0xc
+#define SMN_APERTURE_ID_B__IOHUB_APERTURE_ID_MASK 0x00000FFFL
+#define SMN_APERTURE_ID_B__NBIF_APERTURE_ID_MASK 0x00FFF000L
+//LNCNT_CONTROL
+#define LNCNT_CONTROL__CFG_LNC_BW_CNT_EN__SHIFT 0x0
+#define LNCNT_CONTROL__CFG_LNC_CMN_CNT_EN__SHIFT 0x1
+#define LNCNT_CONTROL__CFG_LNC_BW_QUAN_THRD__SHIFT 0x2
+#define LNCNT_CONTROL__CFG_LNC_CMN_QUAN_THRD__SHIFT 0x5
+#define LNCNT_CONTROL__CFG_LNC_BW_CNT_EN_MASK 0x00000001L
+#define LNCNT_CONTROL__CFG_LNC_CMN_CNT_EN_MASK 0x00000002L
+#define LNCNT_CONTROL__CFG_LNC_BW_QUAN_THRD_MASK 0x0000001CL
+#define LNCNT_CONTROL__CFG_LNC_CMN_QUAN_THRD_MASK 0x000000E0L
+//SMU_INT_PIN_SHARING_PORT_INDICATOR
+#define SMU_INT_PIN_SHARING_PORT_INDICATOR__LINK_MANAGEMENT_INT_STATUS__SHIFT 0x0
+#define SMU_INT_PIN_SHARING_PORT_INDICATOR__LTR_INT_STATUS__SHIFT 0x10
+#define SMU_INT_PIN_SHARING_PORT_INDICATOR__LINK_MANAGEMENT_INT_STATUS_MASK 0x0000FFFFL
+#define SMU_INT_PIN_SHARING_PORT_INDICATOR__LTR_INT_STATUS_MASK 0xFFFF0000L
+//PCIE_PGMST_CNTL
+#define PCIE_PGMST_CNTL__CFG_PG_HYSTERESIS__SHIFT 0x0
+#define PCIE_PGMST_CNTL__CFG_PG_EN__SHIFT 0x8
+#define PCIE_PGMST_CNTL__CFG_IDLENESS_COUNT_EN__SHIFT 0xa
+#define PCIE_PGMST_CNTL__CFG_FW_PG_EXIT_CNTL__SHIFT 0xe
+#define PCIE_PGMST_CNTL__PG_EXIT_TIMER__SHIFT 0x10
+#define PCIE_PGMST_CNTL__CFG_PG_HYSTERESIS_MASK 0x000000FFL
+#define PCIE_PGMST_CNTL__CFG_PG_EN_MASK 0x00000100L
+#define PCIE_PGMST_CNTL__CFG_IDLENESS_COUNT_EN_MASK 0x00003C00L
+#define PCIE_PGMST_CNTL__CFG_FW_PG_EXIT_CNTL_MASK 0x0000C000L
+#define PCIE_PGMST_CNTL__PG_EXIT_TIMER_MASK 0x00FF0000L
+//PCIE_PGSLV_CNTL
+#define PCIE_PGSLV_CNTL__CFG_IDLE_HYSTERESIS__SHIFT 0x0
+#define PCIE_PGSLV_CNTL__CFG_IDLE_HYSTERESIS_MASK 0x0000001FL
+//LC_CPM_CONTROL_0
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_A_GATE_ENABLE__SHIFT 0x0
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_B_GATE_ENABLE__SHIFT 0x1
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_C_GATE_ENABLE__SHIFT 0x2
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_D_GATE_ENABLE__SHIFT 0x3
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_E_GATE_ENABLE__SHIFT 0x4
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_F_GATE_ENABLE__SHIFT 0x5
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_G_GATE_ENABLE__SHIFT 0x6
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_H_GATE_ENABLE__SHIFT 0x7
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_I_GATE_ENABLE__SHIFT 0x8
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_J_GATE_ENABLE__SHIFT 0x9
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_K_GATE_ENABLE__SHIFT 0xa
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_L_GATE_ENABLE__SHIFT 0xb
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_M_GATE_ENABLE__SHIFT 0xc
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_N_GATE_ENABLE__SHIFT 0xd
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_O_GATE_ENABLE__SHIFT 0xe
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_P_GATE_ENABLE__SHIFT 0xf
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_A_GATE_ENABLE__SHIFT 0x10
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_B_GATE_ENABLE__SHIFT 0x11
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_C_GATE_ENABLE__SHIFT 0x12
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_D_GATE_ENABLE__SHIFT 0x13
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_E_GATE_ENABLE__SHIFT 0x14
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_F_GATE_ENABLE__SHIFT 0x15
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_G_GATE_ENABLE__SHIFT 0x16
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_H_GATE_ENABLE__SHIFT 0x17
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_I_GATE_ENABLE__SHIFT 0x18
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_J_GATE_ENABLE__SHIFT 0x19
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_K_GATE_ENABLE__SHIFT 0x1a
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_L_GATE_ENABLE__SHIFT 0x1b
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_M_GATE_ENABLE__SHIFT 0x1c
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_N_GATE_ENABLE__SHIFT 0x1d
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_O_GATE_ENABLE__SHIFT 0x1e
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_P_GATE_ENABLE__SHIFT 0x1f
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_A_GATE_ENABLE_MASK 0x00000001L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_B_GATE_ENABLE_MASK 0x00000002L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_C_GATE_ENABLE_MASK 0x00000004L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_D_GATE_ENABLE_MASK 0x00000008L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_E_GATE_ENABLE_MASK 0x00000010L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_F_GATE_ENABLE_MASK 0x00000020L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_G_GATE_ENABLE_MASK 0x00000040L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_H_GATE_ENABLE_MASK 0x00000080L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_I_GATE_ENABLE_MASK 0x00000100L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_J_GATE_ENABLE_MASK 0x00000200L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_K_GATE_ENABLE_MASK 0x00000400L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_L_GATE_ENABLE_MASK 0x00000800L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_M_GATE_ENABLE_MASK 0x00001000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_N_GATE_ENABLE_MASK 0x00002000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_O_GATE_ENABLE_MASK 0x00004000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_PORT_P_GATE_ENABLE_MASK 0x00008000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_A_GATE_ENABLE_MASK 0x00010000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_B_GATE_ENABLE_MASK 0x00020000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_C_GATE_ENABLE_MASK 0x00040000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_D_GATE_ENABLE_MASK 0x00080000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_E_GATE_ENABLE_MASK 0x00100000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_F_GATE_ENABLE_MASK 0x00200000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_G_GATE_ENABLE_MASK 0x00400000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_H_GATE_ENABLE_MASK 0x00800000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_I_GATE_ENABLE_MASK 0x01000000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_J_GATE_ENABLE_MASK 0x02000000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_K_GATE_ENABLE_MASK 0x04000000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_L_GATE_ENABLE_MASK 0x08000000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_M_GATE_ENABLE_MASK 0x10000000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_N_GATE_ENABLE_MASK 0x20000000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_O_GATE_ENABLE_MASK 0x40000000L
+#define LC_CPM_CONTROL_0__TXCLK_DYN_TR_PORT_P_GATE_ENABLE_MASK 0x80000000L
+//LC_CPM_CONTROL_1
+#define LC_CPM_CONTROL_1__TXCLK_DYN_PORT_GATE_LATENCY__SHIFT 0x0
+#define LC_CPM_CONTROL_1__RCVR_DET_EN_HANDSHAKE_DIS__SHIFT 0x8
+#define LC_CPM_CONTROL_1__TXCLK_PI_CLK_EN_ALL_LANES_GATE_ENABLE__SHIFT 0xf
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_A_GATE_ENABLE__SHIFT 0x10
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_B_GATE_ENABLE__SHIFT 0x11
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_C_GATE_ENABLE__SHIFT 0x12
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_D_GATE_ENABLE__SHIFT 0x13
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_E_GATE_ENABLE__SHIFT 0x14
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_F_GATE_ENABLE__SHIFT 0x15
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_G_GATE_ENABLE__SHIFT 0x16
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_H_GATE_ENABLE__SHIFT 0x17
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_I_GATE_ENABLE__SHIFT 0x18
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_J_GATE_ENABLE__SHIFT 0x19
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_K_GATE_ENABLE__SHIFT 0x1a
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_L_GATE_ENABLE__SHIFT 0x1b
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_M_GATE_ENABLE__SHIFT 0x1c
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_N_GATE_ENABLE__SHIFT 0x1d
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_O_GATE_ENABLE__SHIFT 0x1e
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_P_GATE_ENABLE__SHIFT 0x1f
+#define LC_CPM_CONTROL_1__TXCLK_DYN_PORT_GATE_LATENCY_MASK 0x00000007L
+#define LC_CPM_CONTROL_1__RCVR_DET_EN_HANDSHAKE_DIS_MASK 0x00000100L
+#define LC_CPM_CONTROL_1__TXCLK_PI_CLK_EN_ALL_LANES_GATE_ENABLE_MASK 0x00008000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_A_GATE_ENABLE_MASK 0x00010000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_B_GATE_ENABLE_MASK 0x00020000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_C_GATE_ENABLE_MASK 0x00040000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_D_GATE_ENABLE_MASK 0x00080000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_E_GATE_ENABLE_MASK 0x00100000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_F_GATE_ENABLE_MASK 0x00200000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_G_GATE_ENABLE_MASK 0x00400000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_H_GATE_ENABLE_MASK 0x00800000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_I_GATE_ENABLE_MASK 0x01000000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_J_GATE_ENABLE_MASK 0x02000000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_K_GATE_ENABLE_MASK 0x04000000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_L_GATE_ENABLE_MASK 0x08000000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_M_GATE_ENABLE_MASK 0x10000000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_N_GATE_ENABLE_MASK 0x20000000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_O_GATE_ENABLE_MASK 0x40000000L
+#define LC_CPM_CONTROL_1__TXCLK_RXP_CLK_EN_PORT_P_GATE_ENABLE_MASK 0x80000000L
+//PCIE_RXMARGIN_CONTROL_CAPABILITIES
+#define PCIE_RXMARGIN_CONTROL_CAPABILITIES__M_VOLTAGESUPPORTED__SHIFT 0x0
+#define PCIE_RXMARGIN_CONTROL_CAPABILITIES__M_INDUPDOWNVOLTAGE__SHIFT 0x1
+#define PCIE_RXMARGIN_CONTROL_CAPABILITIES__M_INDLEFTRIGHTTIMING__SHIFT 0x2
+#define PCIE_RXMARGIN_CONTROL_CAPABILITIES__M_SAMPLEREPORTINGMETHOD__SHIFT 0x3
+#define PCIE_RXMARGIN_CONTROL_CAPABILITIES__M_INDERRORSAMPLER__SHIFT 0x4
+#define PCIE_RXMARGIN_CONTROL_CAPABILITIES__M_VOLTAGESUPPORTED_MASK 0x00000001L
+#define PCIE_RXMARGIN_CONTROL_CAPABILITIES__M_INDUPDOWNVOLTAGE_MASK 0x00000002L
+#define PCIE_RXMARGIN_CONTROL_CAPABILITIES__M_INDLEFTRIGHTTIMING_MASK 0x00000004L
+#define PCIE_RXMARGIN_CONTROL_CAPABILITIES__M_SAMPLEREPORTINGMETHOD_MASK 0x00000008L
+#define PCIE_RXMARGIN_CONTROL_CAPABILITIES__M_INDERRORSAMPLER_MASK 0x00000010L
+//PCIE_RXMARGIN_1_SETTINGS
+#define PCIE_RXMARGIN_1_SETTINGS__M_NUMVOLTAGESTEPS__SHIFT 0x0
+#define PCIE_RXMARGIN_1_SETTINGS__M_NUMTIMINGSTEPS__SHIFT 0x7
+#define PCIE_RXMARGIN_1_SETTINGS__M_MAXTIMINGOFFSET__SHIFT 0xd
+#define PCIE_RXMARGIN_1_SETTINGS__M_MAXVOLTAGEOFFSET__SHIFT 0x14
+#define PCIE_RXMARGIN_1_SETTINGS__M_NUMVOLTAGESTEPS_MASK 0x0000007FL
+#define PCIE_RXMARGIN_1_SETTINGS__M_NUMTIMINGSTEPS_MASK 0x00001F80L
+#define PCIE_RXMARGIN_1_SETTINGS__M_MAXTIMINGOFFSET_MASK 0x000FE000L
+#define PCIE_RXMARGIN_1_SETTINGS__M_MAXVOLTAGEOFFSET_MASK 0x07F00000L
+//PCIE_RXMARGIN_2_SETTINGS
+#define PCIE_RXMARGIN_2_SETTINGS__M_SAMPLINGRATEVOLTAGE__SHIFT 0x0
+#define PCIE_RXMARGIN_2_SETTINGS__M_SAMPLINGRATETIMING__SHIFT 0x6
+#define PCIE_RXMARGIN_2_SETTINGS__M_SAMPLECOUNT__SHIFT 0xc
+#define PCIE_RXMARGIN_2_SETTINGS__M_MAXLANES__SHIFT 0x13
+#define PCIE_RXMARGIN_2_SETTINGS__M_ERROR_COUNT_LIMIT__SHIFT 0x18
+#define PCIE_RXMARGIN_2_SETTINGS__ENABLE_PRECODING__SHIFT 0x1e
+#define PCIE_RXMARGIN_2_SETTINGS__M_SAMPLINGRATEVOLTAGE_MASK 0x0000003FL
+#define PCIE_RXMARGIN_2_SETTINGS__M_SAMPLINGRATETIMING_MASK 0x00000FC0L
+#define PCIE_RXMARGIN_2_SETTINGS__M_SAMPLECOUNT_MASK 0x0007F000L
+#define PCIE_RXMARGIN_2_SETTINGS__M_MAXLANES_MASK 0x00F80000L
+#define PCIE_RXMARGIN_2_SETTINGS__M_ERROR_COUNT_LIMIT_MASK 0x3F000000L
+#define PCIE_RXMARGIN_2_SETTINGS__ENABLE_PRECODING_MASK 0x40000000L
+//PCIE_LC_DEBUG_CNTL
+#define PCIE_LC_DEBUG_CNTL__TX_SKID_DEBUG_PORT__SHIFT 0x0
+#define PCIE_LC_DEBUG_CNTL__DEBUG_LANE_EN__SHIFT 0x10
+#define PCIE_LC_DEBUG_CNTL__TX_SKID_DEBUG_PORT_MASK 0x0000000FL
+#define PCIE_LC_DEBUG_CNTL__DEBUG_LANE_EN_MASK 0xFFFF0000L
+//SMU_INT_PIN_SHARING_PORT_INDICATOR_TWO
+#define SMU_INT_PIN_SHARING_PORT_INDICATOR_TWO__DPC_INT_STATUS__SHIFT 0x0
+#define SMU_INT_PIN_SHARING_PORT_INDICATOR_TWO__PD_INT_STATUS__SHIFT 0x10
+#define SMU_INT_PIN_SHARING_PORT_INDICATOR_TWO__DPC_INT_STATUS_MASK 0x0000FFFFL
+#define SMU_INT_PIN_SHARING_PORT_INDICATOR_TWO__PD_INT_STATUS_MASK 0xFFFF0000L
+//PCIE_LC_DESKEW_CNTL
+#define PCIE_LC_DESKEW_CNTL__LC_LIVE_DESKEW_TRIGGER_CNT_LIMIT__SHIFT 0x0
+#define PCIE_LC_DESKEW_CNTL__LC_LIVE_DESKEW_ADD_LANE_EVENT_LIMIT__SHIFT 0x4
+#define PCIE_LC_DESKEW_CNTL__LC_LIVE_DESKEW_RMV_LANE_EVENT_LIMIT__SHIFT 0x8
+#define PCIE_LC_DESKEW_CNTL__LC_LIVE_DESKEW_BLOCK_TSX_EIEOS__SHIFT 0xc
+#define PCIE_LC_DESKEW_CNTL__LC_LIVE_DESKEW_TRIGGER_CNT_LIMIT_MASK 0x0000000FL
+#define PCIE_LC_DESKEW_CNTL__LC_LIVE_DESKEW_ADD_LANE_EVENT_LIMIT_MASK 0x000000F0L
+#define PCIE_LC_DESKEW_CNTL__LC_LIVE_DESKEW_RMV_LANE_EVENT_LIMIT_MASK 0x00000F00L
+#define PCIE_LC_DESKEW_CNTL__LC_LIVE_DESKEW_BLOCK_TSX_EIEOS_MASK 0x00001000L
+//PCIE_TX_LAST_TLP0
+#define PCIE_TX_LAST_TLP0__TX_LAST_TLP0__SHIFT 0x0
+#define PCIE_TX_LAST_TLP0__TX_LAST_TLP0_MASK 0xFFFFFFFFL
+//PCIE_TX_LAST_TLP1
+#define PCIE_TX_LAST_TLP1__TX_LAST_TLP1__SHIFT 0x0
+#define PCIE_TX_LAST_TLP1__TX_LAST_TLP1_MASK 0xFFFFFFFFL
+//PCIE_TX_LAST_TLP2
+#define PCIE_TX_LAST_TLP2__TX_LAST_TLP2__SHIFT 0x0
+#define PCIE_TX_LAST_TLP2__TX_LAST_TLP2_MASK 0xFFFFFFFFL
+//PCIE_TX_LAST_TLP3
+#define PCIE_TX_LAST_TLP3__TX_LAST_TLP3__SHIFT 0x0
+#define PCIE_TX_LAST_TLP3__TX_LAST_TLP3_MASK 0xFFFFFFFFL
+//PCIE_TX_TRACKING_ADDR_LO
+#define PCIE_TX_TRACKING_ADDR_LO__TX_TRACKING_ADDR_LO__SHIFT 0x2
+#define PCIE_TX_TRACKING_ADDR_LO__TX_TRACKING_ADDR_LO_MASK 0xFFFFFFFCL
+//PCIE_TX_TRACKING_ADDR_HI
+#define PCIE_TX_TRACKING_ADDR_HI__TX_TRACKING_ADDR_HI__SHIFT 0x0
+#define PCIE_TX_TRACKING_ADDR_HI__TX_TRACKING_ADDR_HI_MASK 0xFFFFFFFFL
+//PCIE_TX_TRACKING_CTRL_STATUS
+#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_ENABLE__SHIFT 0x0
+#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT__SHIFT 0x1
+#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID__SHIFT 0x8
+#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID__SHIFT 0xf
+#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_ENABLE_MASK 0x00000001L
+#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT_MASK 0x0000000EL
+#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID_MASK 0x00007F00L
+#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID_MASK 0x00008000L
+//PCIE_TX_CTRL_4
+#define PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW__SHIFT 0x0
+#define PCIE_TX_CTRL_4__TX_FC_STALL_DIS__SHIFT 0x7
+#define PCIE_TX_CTRL_4__TX_FC_STALL_FREQ__SHIFT 0x8
+#define PCIE_TX_CTRL_4__TX_FC_STALL_TIMER__SHIFT 0xc
+#define PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW_MASK 0x0000000FL
+#define PCIE_TX_CTRL_4__TX_FC_STALL_DIS_MASK 0x00000080L
+#define PCIE_TX_CTRL_4__TX_FC_STALL_FREQ_MASK 0x00000F00L
+#define PCIE_TX_CTRL_4__TX_FC_STALL_TIMER_MASK 0x0000F000L
+//PCIE_TX_STATUS
+#define PCIE_TX_STATUS__TX_MST_MEM_READY__SHIFT 0x0
+#define PCIE_TX_STATUS__CI_MST_REQ_IDLE__SHIFT 0x1
+#define PCIE_TX_STATUS__CI_NO_PENDING_MST_MRD__SHIFT 0x2
+#define PCIE_TX_STATUS__CI_MST_WRRSP_IDLE__SHIFT 0x3
+#define PCIE_TX_STATUS__CI_SLV_RDRSP_IDLE__SHIFT 0x4
+#define PCIE_TX_STATUS__CI_MST_TX_IDLE__SHIFT 0x5
+#define PCIE_TX_STATUS__CI_SLV_CLKREQ_IDLE__SHIFT 0x6
+#define PCIE_TX_STATUS__CI_MST_CLKREQ_IDLE__SHIFT 0x7
+#define PCIE_TX_STATUS__TX_P_HDR_EMPTY__SHIFT 0x8
+#define PCIE_TX_STATUS__TX_NP_HDR_EMPTY__SHIFT 0x9
+#define PCIE_TX_STATUS__TX_P_DAT_EMPTY__SHIFT 0xa
+#define PCIE_TX_STATUS__TX_NP_DAT_EMPTY__SHIFT 0xb
+#define PCIE_TX_STATUS__CI_P_HDR_NO_FREE_CREDITS__SHIFT 0xc
+#define PCIE_TX_STATUS__CI_NP_HDR_NO_FREE_CREDITS__SHIFT 0xd
+#define PCIE_TX_STATUS__CI_P_DAT_NO_FREE_CREDITS__SHIFT 0xe
+#define PCIE_TX_STATUS__CI_NP_DAT_NO_FREE_CREDITS__SHIFT 0xf
+#define PCIE_TX_STATUS__TX_MST_MEM_READY_MASK 0x00000001L
+#define PCIE_TX_STATUS__CI_MST_REQ_IDLE_MASK 0x00000002L
+#define PCIE_TX_STATUS__CI_NO_PENDING_MST_MRD_MASK 0x00000004L
+#define PCIE_TX_STATUS__CI_MST_WRRSP_IDLE_MASK 0x00000008L
+#define PCIE_TX_STATUS__CI_SLV_RDRSP_IDLE_MASK 0x00000010L
+#define PCIE_TX_STATUS__CI_MST_TX_IDLE_MASK 0x00000020L
+#define PCIE_TX_STATUS__CI_SLV_CLKREQ_IDLE_MASK 0x00000040L
+#define PCIE_TX_STATUS__CI_MST_CLKREQ_IDLE_MASK 0x00000080L
+#define PCIE_TX_STATUS__TX_P_HDR_EMPTY_MASK 0x00000100L
+#define PCIE_TX_STATUS__TX_NP_HDR_EMPTY_MASK 0x00000200L
+#define PCIE_TX_STATUS__TX_P_DAT_EMPTY_MASK 0x00000400L
+#define PCIE_TX_STATUS__TX_NP_DAT_EMPTY_MASK 0x00000800L
+#define PCIE_TX_STATUS__CI_P_HDR_NO_FREE_CREDITS_MASK 0x00001000L
+#define PCIE_TX_STATUS__CI_NP_HDR_NO_FREE_CREDITS_MASK 0x00002000L
+#define PCIE_TX_STATUS__CI_P_DAT_NO_FREE_CREDITS_MASK 0x00004000L
+#define PCIE_TX_STATUS__CI_NP_DAT_NO_FREE_CREDITS_MASK 0x00008000L
+//PCIE_TX_F0_ATTR_CNTL
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_P__SHIFT 0x0
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_NP__SHIFT 0x2
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_CPL__SHIFT 0x4
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_RO_OVERRIDE_P__SHIFT 0x6
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_RO_OVERRIDE_NP__SHIFT 0x8
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_SNR_OVERRIDE_P__SHIFT 0xa
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_SNR_OVERRIDE_NP__SHIFT 0xc
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_P_MASK 0x00000003L
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_NP_MASK 0x0000000CL
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_CPL_MASK 0x00000030L
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_RO_OVERRIDE_P_MASK 0x000000C0L
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_RO_OVERRIDE_NP_MASK 0x00000300L
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_SNR_OVERRIDE_P_MASK 0x00000C00L
+#define PCIE_TX_F0_ATTR_CNTL__TX_F0_SNR_OVERRIDE_NP_MASK 0x00003000L
+//PCIE_TX_SWUS_ATTR_CNTL
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_P__SHIFT 0x0
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_NP__SHIFT 0x2
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_CPL__SHIFT 0x4
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_RO_OVERRIDE_P__SHIFT 0x6
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_RO_OVERRIDE_NP__SHIFT 0x8
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_SNR_OVERRIDE_P__SHIFT 0xa
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_SNR_OVERRIDE_NP__SHIFT 0xc
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_P_MASK 0x00000003L
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_NP_MASK 0x0000000CL
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_CPL_MASK 0x00000030L
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_RO_OVERRIDE_P_MASK 0x000000C0L
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_RO_OVERRIDE_NP_MASK 0x00000300L
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_SNR_OVERRIDE_P_MASK 0x00000C00L
+#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_SNR_OVERRIDE_NP_MASK 0x00003000L
+//PCIE_BW_BY_UNITID
+#define PCIE_BW_BY_UNITID__CI_MST_PERF_UNITID_EN__SHIFT 0x0
+#define PCIE_BW_BY_UNITID__CI_MST_PERF_UNITID__SHIFT 0x8
+#define PCIE_BW_BY_UNITID__CI_MST_PERF_UNITID_EN_MASK 0x00000001L
+#define PCIE_BW_BY_UNITID__CI_MST_PERF_UNITID_MASK 0x00007F00L
+//PCIE_MST_CTRL_1
+#define PCIE_MST_CTRL_1__MST_PDAT_CREDITS_ADVT__SHIFT 0x0
+#define PCIE_MST_CTRL_1__MST_PDAT_CREDITS_OVERRIDE_EN__SHIFT 0x8
+#define PCIE_MST_CTRL_1__MST_PHDR_CREDITS_PENDING_RESET_DIS__SHIFT 0x9
+#define PCIE_MST_CTRL_1__CI_MSTSDP_ORIG_DISC_FIX_DIS__SHIFT 0xa
+#define PCIE_MST_CTRL_1__MST_SDP_CONNECT_EN__SHIFT 0xb
+#define PCIE_MST_CTRL_1__MST_SDP_MODE__SHIFT 0xc
+#define PCIE_MST_CTRL_1__MST_SDP_CREDITS_LIVE_OVERRIDE_DIS__SHIFT 0xe
+#define PCIE_MST_CTRL_1__MST_PHDR_CREDITS_OVERRIDE_EN__SHIFT 0xf
+#define PCIE_MST_CTRL_1__MST_PHDR_CREDITS_ADVT__SHIFT 0x10
+#define PCIE_MST_CTRL_1__MST_IDLE_HYSTERESIS__SHIFT 0x18
+#define PCIE_MST_CTRL_1__MST_PDAT_CREDITS_ADVT_MASK 0x000000FFL
+#define PCIE_MST_CTRL_1__MST_PDAT_CREDITS_OVERRIDE_EN_MASK 0x00000100L
+#define PCIE_MST_CTRL_1__MST_PHDR_CREDITS_PENDING_RESET_DIS_MASK 0x00000200L
+#define PCIE_MST_CTRL_1__CI_MSTSDP_ORIG_DISC_FIX_DIS_MASK 0x00000400L
+#define PCIE_MST_CTRL_1__MST_SDP_CONNECT_EN_MASK 0x00000800L
+#define PCIE_MST_CTRL_1__MST_SDP_MODE_MASK 0x00003000L
+#define PCIE_MST_CTRL_1__MST_SDP_CREDITS_LIVE_OVERRIDE_DIS_MASK 0x00004000L
+#define PCIE_MST_CTRL_1__MST_PHDR_CREDITS_OVERRIDE_EN_MASK 0x00008000L
+#define PCIE_MST_CTRL_1__MST_PHDR_CREDITS_ADVT_MASK 0x00FF0000L
+#define PCIE_MST_CTRL_1__MST_IDLE_HYSTERESIS_MASK 0xFF000000L
+//PCIE_HIP_REG0
+#define PCIE_HIP_REG0__CI_HIP_APT0_BASE_HI__SHIFT 0x0
+#define PCIE_HIP_REG0__CI_HIP_APT0_ENABLE__SHIFT 0x18
+#define PCIE_HIP_REG0__CI_HIP_APT0_PASID_MODE__SHIFT 0x19
+#define PCIE_HIP_REG0__CI_HIP_APT0_REQAT_MODE__SHIFT 0x1a
+#define PCIE_HIP_REG0__CI_HIP_APT0_REQIO_MODE__SHIFT 0x1d
+#define PCIE_HIP_REG0__CI_HIP_APT0_BASE_HI_MASK 0x000FFFFFL
+#define PCIE_HIP_REG0__CI_HIP_APT0_ENABLE_MASK 0x01000000L
+#define PCIE_HIP_REG0__CI_HIP_APT0_PASID_MODE_MASK 0x02000000L
+#define PCIE_HIP_REG0__CI_HIP_APT0_REQAT_MODE_MASK 0x1C000000L
+#define PCIE_HIP_REG0__CI_HIP_APT0_REQIO_MODE_MASK 0x60000000L
+//PCIE_HIP_REG1
+#define PCIE_HIP_REG1__CI_HIP_APT0_BASE_LO__SHIFT 0x0
+#define PCIE_HIP_REG1__CI_HIP_APT0_BASE_LO_MASK 0xFFFFFFFFL
+//PCIE_HIP_REG2
+#define PCIE_HIP_REG2__CI_HIP_APT0_LIMIT_HI__SHIFT 0x0
+#define PCIE_HIP_REG2__CI_HIP_APT0_LIMIT_HI_MASK 0x000FFFFFL
+//PCIE_HIP_REG3
+#define PCIE_HIP_REG3__CI_HIP_APT0_LIMIT_LO__SHIFT 0x0
+#define PCIE_HIP_REG3__CI_HIP_APT0_LIMIT_LO_MASK 0xFFFFFFFFL
+//PCIE_HIP_REG4
+#define PCIE_HIP_REG4__CI_HIP_APT1_BASE_HI__SHIFT 0x0
+#define PCIE_HIP_REG4__CI_HIP_APT1_ENABLE__SHIFT 0x18
+#define PCIE_HIP_REG4__CI_HIP_APT1_PASID_MODE__SHIFT 0x19
+#define PCIE_HIP_REG4__CI_HIP_APT1_REQAT_MODE__SHIFT 0x1a
+#define PCIE_HIP_REG4__CI_HIP_APT1_REQIO_MODE__SHIFT 0x1d
+#define PCIE_HIP_REG4__CI_HIP_APT1_BASE_HI_MASK 0x000FFFFFL
+#define PCIE_HIP_REG4__CI_HIP_APT1_ENABLE_MASK 0x01000000L
+#define PCIE_HIP_REG4__CI_HIP_APT1_PASID_MODE_MASK 0x02000000L
+#define PCIE_HIP_REG4__CI_HIP_APT1_REQAT_MODE_MASK 0x1C000000L
+#define PCIE_HIP_REG4__CI_HIP_APT1_REQIO_MODE_MASK 0x60000000L
+//PCIE_HIP_REG5
+#define PCIE_HIP_REG5__CI_HIP_APT1_BASE_LO__SHIFT 0x0
+#define PCIE_HIP_REG5__CI_HIP_APT1_BASE_LO_MASK 0xFFFFFFFFL
+//PCIE_HIP_REG6
+#define PCIE_HIP_REG6__CI_HIP_APT1_LIMIT_HI__SHIFT 0x0
+#define PCIE_HIP_REG6__CI_HIP_APT1_LIMIT_HI_MASK 0x000FFFFFL
+//PCIE_HIP_REG7
+#define PCIE_HIP_REG7__CI_HIP_APT1_LIMIT_LO__SHIFT 0x0
+#define PCIE_HIP_REG7__CI_HIP_APT1_LIMIT_LO_MASK 0xFFFFFFFFL
+//PCIE_HIP_REG8
+#define PCIE_HIP_REG8__CI_HIP_MASK__SHIFT 0x0
+#define PCIE_HIP_REG8__CI_HIP_MASK_MASK 0x000FFFFFL
+//PCIE_PERF_CNTL_TXCLK7
+#define PCIE_PERF_CNTL_TXCLK7__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK7__EVENT1_SEL__SHIFT 0x8
+#define PCIE_PERF_CNTL_TXCLK7__COUNTER0_FULL__SHIFT 0x10
+#define PCIE_PERF_CNTL_TXCLK7__COUNTER1_FULL__SHIFT 0x11
+#define PCIE_PERF_CNTL_TXCLK7__EVENT0_SEL_MASK 0x000000FFL
+#define PCIE_PERF_CNTL_TXCLK7__EVENT1_SEL_MASK 0x0000FF00L
+#define PCIE_PERF_CNTL_TXCLK7__COUNTER0_FULL_MASK 0x00010000L
+#define PCIE_PERF_CNTL_TXCLK7__COUNTER1_FULL_MASK 0x00020000L
+//PCIE_PERF_COUNT0_TXCLK7
+#define PCIE_PERF_COUNT0_TXCLK7__COUNTER0__SHIFT 0x0
+#define PCIE_PERF_COUNT0_TXCLK7__COUNTER0_MASK 0xFFFFFFFFL
+//PCIE_PERF_COUNT1_TXCLK7
+#define PCIE_PERF_COUNT1_TXCLK7__COUNTER1__SHIFT 0x0
+#define PCIE_PERF_COUNT1_TXCLK7__COUNTER1_MASK 0xFFFFFFFFL
+//PCIE_PERF_CNTL_TXCLK8
+#define PCIE_PERF_CNTL_TXCLK8__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK8__EVENT1_SEL__SHIFT 0x8
+#define PCIE_PERF_CNTL_TXCLK8__COUNTER0_FULL__SHIFT 0x10
+#define PCIE_PERF_CNTL_TXCLK8__COUNTER1_FULL__SHIFT 0x11
+#define PCIE_PERF_CNTL_TXCLK8__EVENT0_SEL_MASK 0x000000FFL
+#define PCIE_PERF_CNTL_TXCLK8__EVENT1_SEL_MASK 0x0000FF00L
+#define PCIE_PERF_CNTL_TXCLK8__COUNTER0_FULL_MASK 0x00010000L
+#define PCIE_PERF_CNTL_TXCLK8__COUNTER1_FULL_MASK 0x00020000L
+//PCIE_PERF_COUNT0_TXCLK8
+#define PCIE_PERF_COUNT0_TXCLK8__COUNTER0__SHIFT 0x0
+#define PCIE_PERF_COUNT0_TXCLK8__COUNTER0_MASK 0xFFFFFFFFL
+//PCIE_PERF_COUNT1_TXCLK8
+#define PCIE_PERF_COUNT1_TXCLK8__COUNTER1__SHIFT 0x0
+#define PCIE_PERF_COUNT1_TXCLK8__COUNTER1_MASK 0xFFFFFFFFL
+//PCIE_PERF_CNTL_TXCLK9
+#define PCIE_PERF_CNTL_TXCLK9__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK9__EVENT1_SEL__SHIFT 0x8
+#define PCIE_PERF_CNTL_TXCLK9__COUNTER0_FULL__SHIFT 0x10
+#define PCIE_PERF_CNTL_TXCLK9__COUNTER1_FULL__SHIFT 0x11
+#define PCIE_PERF_CNTL_TXCLK9__EVENT0_SEL_MASK 0x000000FFL
+#define PCIE_PERF_CNTL_TXCLK9__EVENT1_SEL_MASK 0x0000FF00L
+#define PCIE_PERF_CNTL_TXCLK9__COUNTER0_FULL_MASK 0x00010000L
+#define PCIE_PERF_CNTL_TXCLK9__COUNTER1_FULL_MASK 0x00020000L
+//PCIE_PERF_COUNT0_TXCLK9
+#define PCIE_PERF_COUNT0_TXCLK9__COUNTER0__SHIFT 0x0
+#define PCIE_PERF_COUNT0_TXCLK9__COUNTER0_MASK 0xFFFFFFFFL
+//PCIE_PERF_COUNT1_TXCLK9
+#define PCIE_PERF_COUNT1_TXCLK9__COUNTER1__SHIFT 0x0
+#define PCIE_PERF_COUNT1_TXCLK9__COUNTER1_MASK 0xFFFFFFFFL
+//PCIE_PERF_CNTL_TXCLK10
+#define PCIE_PERF_CNTL_TXCLK10__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK10__EVENT1_SEL__SHIFT 0x8
+#define PCIE_PERF_CNTL_TXCLK10__COUNTER0_FULL__SHIFT 0x10
+#define PCIE_PERF_CNTL_TXCLK10__COUNTER1_FULL__SHIFT 0x11
+#define PCIE_PERF_CNTL_TXCLK10__EVENT0_SEL_MASK 0x000000FFL
+#define PCIE_PERF_CNTL_TXCLK10__EVENT1_SEL_MASK 0x0000FF00L
+#define PCIE_PERF_CNTL_TXCLK10__COUNTER0_FULL_MASK 0x00010000L
+#define PCIE_PERF_CNTL_TXCLK10__COUNTER1_FULL_MASK 0x00020000L
+//PCIE_PERF_COUNT0_TXCLK10
+#define PCIE_PERF_COUNT0_TXCLK10__COUNTER0__SHIFT 0x0
+#define PCIE_PERF_COUNT0_TXCLK10__COUNTER0_MASK 0xFFFFFFFFL
+//PCIE_PERF_COUNT1_TXCLK10
+#define PCIE_PERF_COUNT1_TXCLK10__COUNTER1__SHIFT 0x0
+#define PCIE_PERF_COUNT1_TXCLK10__COUNTER1_MASK 0xFFFFFFFFL
+//PCIE_LANE_ERROR_COUNTERS_0
+#define PCIE_LANE_ERROR_COUNTERS_0__LANE0_ERROR_COUNTER__SHIFT 0x0
+#define PCIE_LANE_ERROR_COUNTERS_0__LANE1_ERROR_COUNTER__SHIFT 0x8
+#define PCIE_LANE_ERROR_COUNTERS_0__LANE2_ERROR_COUNTER__SHIFT 0x10
+#define PCIE_LANE_ERROR_COUNTERS_0__LANE3_ERROR_COUNTER__SHIFT 0x18
+#define PCIE_LANE_ERROR_COUNTERS_0__LANE0_ERROR_COUNTER_MASK 0x000000FFL
+#define PCIE_LANE_ERROR_COUNTERS_0__LANE1_ERROR_COUNTER_MASK 0x0000FF00L
+#define PCIE_LANE_ERROR_COUNTERS_0__LANE2_ERROR_COUNTER_MASK 0x00FF0000L
+#define PCIE_LANE_ERROR_COUNTERS_0__LANE3_ERROR_COUNTER_MASK 0xFF000000L
+//PCIE_LANE_ERROR_COUNTERS_1
+#define PCIE_LANE_ERROR_COUNTERS_1__LANE4_ERROR_COUNTER__SHIFT 0x0
+#define PCIE_LANE_ERROR_COUNTERS_1__LANE5_ERROR_COUNTER__SHIFT 0x8
+#define PCIE_LANE_ERROR_COUNTERS_1__LANE6_ERROR_COUNTER__SHIFT 0x10
+#define PCIE_LANE_ERROR_COUNTERS_1__LANE7_ERROR_COUNTER__SHIFT 0x18
+#define PCIE_LANE_ERROR_COUNTERS_1__LANE4_ERROR_COUNTER_MASK 0x000000FFL
+#define PCIE_LANE_ERROR_COUNTERS_1__LANE5_ERROR_COUNTER_MASK 0x0000FF00L
+#define PCIE_LANE_ERROR_COUNTERS_1__LANE6_ERROR_COUNTER_MASK 0x00FF0000L
+#define PCIE_LANE_ERROR_COUNTERS_1__LANE7_ERROR_COUNTER_MASK 0xFF000000L
+//PCIE_LANE_ERROR_COUNTERS_2
+#define PCIE_LANE_ERROR_COUNTERS_2__LANE8_ERROR_COUNTER__SHIFT 0x0
+#define PCIE_LANE_ERROR_COUNTERS_2__LANE9_ERROR_COUNTER__SHIFT 0x8
+#define PCIE_LANE_ERROR_COUNTERS_2__LANE10_ERROR_COUNTER__SHIFT 0x10
+#define PCIE_LANE_ERROR_COUNTERS_2__LANE11_ERROR_COUNTER__SHIFT 0x18
+#define PCIE_LANE_ERROR_COUNTERS_2__LANE8_ERROR_COUNTER_MASK 0x000000FFL
+#define PCIE_LANE_ERROR_COUNTERS_2__LANE9_ERROR_COUNTER_MASK 0x0000FF00L
+#define PCIE_LANE_ERROR_COUNTERS_2__LANE10_ERROR_COUNTER_MASK 0x00FF0000L
+#define PCIE_LANE_ERROR_COUNTERS_2__LANE11_ERROR_COUNTER_MASK 0xFF000000L
+//PCIE_LANE_ERROR_COUNTERS_3
+#define PCIE_LANE_ERROR_COUNTERS_3__LANE12_ERROR_COUNTER__SHIFT 0x0
+#define PCIE_LANE_ERROR_COUNTERS_3__LANE13_ERROR_COUNTER__SHIFT 0x8
+#define PCIE_LANE_ERROR_COUNTERS_3__LANE14_ERROR_COUNTER__SHIFT 0x10
+#define PCIE_LANE_ERROR_COUNTERS_3__LANE15_ERROR_COUNTER__SHIFT 0x18
+#define PCIE_LANE_ERROR_COUNTERS_3__LANE12_ERROR_COUNTER_MASK 0x000000FFL
+#define PCIE_LANE_ERROR_COUNTERS_3__LANE13_ERROR_COUNTER_MASK 0x0000FF00L
+#define PCIE_LANE_ERROR_COUNTERS_3__LANE14_ERROR_COUNTER_MASK 0x00FF0000L
+#define PCIE_LANE_ERROR_COUNTERS_3__LANE15_ERROR_COUNTER_MASK 0xFF000000L
+//SMU_PCIE_FENCED1_REG
+#define SMU_PCIE_FENCED1_REG__MP0_PCIE_CROSSFIRE_LOCKDOWN_EN__SHIFT 0x0
+#define SMU_PCIE_FENCED1_REG__MP0_PCIE_CROSSFIRE_LOCKDOWN_EN_MASK 0x00000001L
+//SMU_PCIE_FENCED2_REG
+#define SMU_PCIE_FENCED2_REG__MP0_PCIE_OVERCLOCKING_EN__SHIFT 0x0
+#define SMU_PCIE_FENCED2_REG__MP0_PCIE_OVERCLOCKING_EN_MASK 0x00000001L
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
new file mode 100644
index 000000000000..14574112c469
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
@@ -0,0 +1,1672 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _vcn_5_0_0_OFFSET_HEADER
+#define _vcn_5_0_0_OFFSET_HEADER
+
+
+
+// addressBlock: uvd_uvddec
+// base address: 0x1fc00
+#define regUVD_TOP_CTRL 0x0100
+#define regUVD_TOP_CTRL_BASE_IDX 1
+#define regUVD_CGC_GATE 0x0101
+#define regUVD_CGC_GATE_BASE_IDX 1
+#define regUVD_CGC_CTRL 0x0102
+#define regUVD_CGC_CTRL_BASE_IDX 1
+#define regAVM_SUVD_CGC_GATE 0x0104
+#define regAVM_SUVD_CGC_GATE_BASE_IDX 1
+#define regEFC_SUVD_CGC_GATE 0x0104
+#define regEFC_SUVD_CGC_GATE_BASE_IDX 1
+#define regENT_SUVD_CGC_GATE 0x0104
+#define regENT_SUVD_CGC_GATE_BASE_IDX 1
+#define regIME_SUVD_CGC_GATE 0x0104
+#define regIME_SUVD_CGC_GATE_BASE_IDX 1
+#define regPPU_SUVD_CGC_GATE 0x0104
+#define regPPU_SUVD_CGC_GATE_BASE_IDX 1
+#define regSAOE_SUVD_CGC_GATE 0x0104
+#define regSAOE_SUVD_CGC_GATE_BASE_IDX 1
+#define regSCM_SUVD_CGC_GATE 0x0104
+#define regSCM_SUVD_CGC_GATE_BASE_IDX 1
+#define regSDB_SUVD_CGC_GATE 0x0104
+#define regSDB_SUVD_CGC_GATE_BASE_IDX 1
+#define regSIT0_NXT_SUVD_CGC_GATE 0x0104
+#define regSIT0_NXT_SUVD_CGC_GATE_BASE_IDX 1
+#define regSIT1_NXT_SUVD_CGC_GATE 0x0104
+#define regSIT1_NXT_SUVD_CGC_GATE_BASE_IDX 1
+#define regSIT2_NXT_SUVD_CGC_GATE 0x0104
+#define regSIT2_NXT_SUVD_CGC_GATE_BASE_IDX 1
+#define regSIT_SUVD_CGC_GATE 0x0104
+#define regSIT_SUVD_CGC_GATE_BASE_IDX 1
+#define regSMPA_SUVD_CGC_GATE 0x0104
+#define regSMPA_SUVD_CGC_GATE_BASE_IDX 1
+#define regSMP_SUVD_CGC_GATE 0x0104
+#define regSMP_SUVD_CGC_GATE_BASE_IDX 1
+#define regSRE_SUVD_CGC_GATE 0x0104
+#define regSRE_SUVD_CGC_GATE_BASE_IDX 1
+#define regUVD_SUVD_CGC_GATE 0x0104
+#define regUVD_SUVD_CGC_GATE_BASE_IDX 1
+#define regAVM_SUVD_CGC_GATE2 0x0105
+#define regAVM_SUVD_CGC_GATE2_BASE_IDX 1
+#define regDBR_SUVD_CGC_GATE2 0x0105
+#define regDBR_SUVD_CGC_GATE2_BASE_IDX 1
+#define regENT_SUVD_CGC_GATE2 0x0105
+#define regENT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regIME_SUVD_CGC_GATE2 0x0105
+#define regIME_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSAOE_SUVD_CGC_GATE2 0x0105
+#define regSAOE_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSDB_SUVD_CGC_GATE2 0x0105
+#define regSDB_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSIT0_NXT_SUVD_CGC_GATE2 0x0105
+#define regSIT0_NXT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSIT1_NXT_SUVD_CGC_GATE2 0x0105
+#define regSIT1_NXT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSIT2_NXT_SUVD_CGC_GATE2 0x0105
+#define regSIT2_NXT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSIT_SUVD_CGC_GATE2 0x0105
+#define regSIT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSMPA_SUVD_CGC_GATE2 0x0105
+#define regSMPA_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSMP_SUVD_CGC_GATE2 0x0105
+#define regSMP_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSRE_SUVD_CGC_GATE2 0x0105
+#define regSRE_SUVD_CGC_GATE2_BASE_IDX 1
+#define regUVD_SUVD_CGC_GATE2 0x0105
+#define regUVD_SUVD_CGC_GATE2_BASE_IDX 1
+#define regAVM_SUVD_CGC_CTRL 0x0106
+#define regAVM_SUVD_CGC_CTRL_BASE_IDX 1
+#define regDBR_SUVD_CGC_CTRL 0x0106
+#define regDBR_SUVD_CGC_CTRL_BASE_IDX 1
+#define regEFC_SUVD_CGC_CTRL 0x0106
+#define regEFC_SUVD_CGC_CTRL_BASE_IDX 1
+#define regENT_SUVD_CGC_CTRL 0x0106
+#define regENT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regIME_SUVD_CGC_CTRL 0x0106
+#define regIME_SUVD_CGC_CTRL_BASE_IDX 1
+#define regPPU_SUVD_CGC_CTRL 0x0106
+#define regPPU_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSAOE_SUVD_CGC_CTRL 0x0106
+#define regSAOE_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSCM_SUVD_CGC_CTRL 0x0106
+#define regSCM_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSDB_SUVD_CGC_CTRL 0x0106
+#define regSDB_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSIT0_NXT_SUVD_CGC_CTRL 0x0106
+#define regSIT0_NXT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSIT1_NXT_SUVD_CGC_CTRL 0x0106
+#define regSIT1_NXT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSIT2_NXT_SUVD_CGC_CTRL 0x0106
+#define regSIT2_NXT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSIT_SUVD_CGC_CTRL 0x0106
+#define regSIT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSMPA_SUVD_CGC_CTRL 0x0106
+#define regSMPA_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSMP_SUVD_CGC_CTRL 0x0106
+#define regSMP_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSRE_SUVD_CGC_CTRL 0x0106
+#define regSRE_SUVD_CGC_CTRL_BASE_IDX 1
+#define regUVD_SUVD_CGC_CTRL 0x0106
+#define regUVD_SUVD_CGC_CTRL_BASE_IDX 1
+#define regUVD_CGC_CTRL3 0x010a
+#define regUVD_CGC_CTRL3_BASE_IDX 1
+#define regUVD_GPCOM_VCPU_DATA0 0x0110
+#define regUVD_GPCOM_VCPU_DATA0_BASE_IDX 1
+#define regUVD_GPCOM_VCPU_DATA1 0x0111
+#define regUVD_GPCOM_VCPU_DATA1_BASE_IDX 1
+#define regUVD_GPCOM_SYS_CMD 0x0112
+#define regUVD_GPCOM_SYS_CMD_BASE_IDX 1
+#define regUVD_GPCOM_SYS_DATA0 0x0113
+#define regUVD_GPCOM_SYS_DATA0_BASE_IDX 1
+#define regUVD_GPCOM_SYS_DATA1 0x0114
+#define regUVD_GPCOM_SYS_DATA1_BASE_IDX 1
+#define regUVD_VCPU_INT_EN 0x0115
+#define regUVD_VCPU_INT_EN_BASE_IDX 1
+#define regUVD_VCPU_INT_STATUS 0x0116
+#define regUVD_VCPU_INT_STATUS_BASE_IDX 1
+#define regUVD_VCPU_INT_ACK 0x0117
+#define regUVD_VCPU_INT_ACK_BASE_IDX 1
+#define regUVD_VCPU_INT_ROUTE 0x0118
+#define regUVD_VCPU_INT_ROUTE_BASE_IDX 1
+#define regUVD_DRV_FW_MSG 0x0119
+#define regUVD_DRV_FW_MSG_BASE_IDX 1
+#define regUVD_FW_DRV_MSG_ACK 0x011a
+#define regUVD_FW_DRV_MSG_ACK_BASE_IDX 1
+#define regUVD_SUVD_INT_EN 0x011b
+#define regUVD_SUVD_INT_EN_BASE_IDX 1
+#define regUVD_SUVD_INT_STATUS 0x011c
+#define regUVD_SUVD_INT_STATUS_BASE_IDX 1
+#define regUVD_SUVD_INT_ACK 0x011d
+#define regUVD_SUVD_INT_ACK_BASE_IDX 1
+#define regUVD_ENC_VCPU_INT_EN 0x011e
+#define regUVD_ENC_VCPU_INT_EN_BASE_IDX 1
+#define regUVD_ENC_VCPU_INT_STATUS 0x011f
+#define regUVD_ENC_VCPU_INT_STATUS_BASE_IDX 1
+#define regUVD_ENC_VCPU_INT_ACK 0x0120
+#define regUVD_ENC_VCPU_INT_ACK_BASE_IDX 1
+#define regUVD_MASTINT_EN 0x0121
+#define regUVD_MASTINT_EN_BASE_IDX 1
+#define regUVD_SYS_INT_EN 0x0122
+#define regUVD_SYS_INT_EN_BASE_IDX 1
+#define regUVD_SYS_INT_STATUS 0x0123
+#define regUVD_SYS_INT_STATUS_BASE_IDX 1
+#define regUVD_SYS_INT_ACK 0x0124
+#define regUVD_SYS_INT_ACK_BASE_IDX 1
+#define regUVD_JOB_DONE 0x0125
+#define regUVD_JOB_DONE_BASE_IDX 1
+#define regUVD_CBUF_ID 0x0126
+#define regUVD_CBUF_ID_BASE_IDX 1
+#define regUVD_CONTEXT_ID 0x0127
+#define regUVD_CONTEXT_ID_BASE_IDX 1
+#define regUVD_CONTEXT_ID2 0x0128
+#define regUVD_CONTEXT_ID2_BASE_IDX 1
+#define regUVD_NO_OP 0x0129
+#define regUVD_NO_OP_BASE_IDX 1
+#define regUVD_RB_BASE_LO 0x012a
+#define regUVD_RB_BASE_LO_BASE_IDX 1
+#define regUVD_RB_BASE_HI 0x012b
+#define regUVD_RB_BASE_HI_BASE_IDX 1
+#define regUVD_RB_SIZE 0x012c
+#define regUVD_RB_SIZE_BASE_IDX 1
+#define regUVD_RB_BASE_LO2 0x012f
+#define regUVD_RB_BASE_LO2_BASE_IDX 1
+#define regUVD_RB_BASE_HI2 0x0130
+#define regUVD_RB_BASE_HI2_BASE_IDX 1
+#define regUVD_RB_SIZE2 0x0131
+#define regUVD_RB_SIZE2_BASE_IDX 1
+#define regUVD_RB_BASE_LO3 0x0134
+#define regUVD_RB_BASE_LO3_BASE_IDX 1
+#define regUVD_RB_BASE_HI3 0x0135
+#define regUVD_RB_BASE_HI3_BASE_IDX 1
+#define regUVD_RB_SIZE3 0x0136
+#define regUVD_RB_SIZE3_BASE_IDX 1
+#define regUVD_RB_BASE_LO4 0x0139
+#define regUVD_RB_BASE_LO4_BASE_IDX 1
+#define regUVD_RB_BASE_HI4 0x013a
+#define regUVD_RB_BASE_HI4_BASE_IDX 1
+#define regUVD_RB_SIZE4 0x013b
+#define regUVD_RB_SIZE4_BASE_IDX 1
+#define regUVD_OUT_RB_BASE_LO 0x013e
+#define regUVD_OUT_RB_BASE_LO_BASE_IDX 1
+#define regUVD_OUT_RB_BASE_HI 0x013f
+#define regUVD_OUT_RB_BASE_HI_BASE_IDX 1
+#define regUVD_OUT_RB_SIZE 0x0140
+#define regUVD_OUT_RB_SIZE_BASE_IDX 1
+#define regUVD_IOV_ACTIVE_FCN_ID 0x0143
+#define regUVD_IOV_ACTIVE_FCN_ID_BASE_IDX 1
+#define regUVD_IOV_MAILBOX 0x0144
+#define regUVD_IOV_MAILBOX_BASE_IDX 1
+#define regUVD_IOV_MAILBOX_RESP 0x0145
+#define regUVD_IOV_MAILBOX_RESP_BASE_IDX 1
+#define regUVD_RB_ARB_CTRL 0x0146
+#define regUVD_RB_ARB_CTRL_BASE_IDX 1
+#define regUVD_CTX_INDEX 0x0147
+#define regUVD_CTX_INDEX_BASE_IDX 1
+#define regUVD_CTX_DATA 0x0148
+#define regUVD_CTX_DATA_BASE_IDX 1
+#define regUVD_CXW_WR 0x0149
+#define regUVD_CXW_WR_BASE_IDX 1
+#define regUVD_CXW_WR_INT_ID 0x014a
+#define regUVD_CXW_WR_INT_ID_BASE_IDX 1
+#define regUVD_CXW_WR_INT_CTX_ID 0x014b
+#define regUVD_CXW_WR_INT_CTX_ID_BASE_IDX 1
+#define regUVD_CXW_INT_ID 0x014c
+#define regUVD_CXW_INT_ID_BASE_IDX 1
+#define regUVD_MPEG2_ERROR 0x014d
+#define regUVD_MPEG2_ERROR_BASE_IDX 1
+#define regUVD_YBASE 0x0150
+#define regUVD_YBASE_BASE_IDX 1
+#define regUVD_UVBASE 0x0151
+#define regUVD_UVBASE_BASE_IDX 1
+#define regUVD_PITCH 0x0152
+#define regUVD_PITCH_BASE_IDX 1
+#define regUVD_WIDTH 0x0153
+#define regUVD_WIDTH_BASE_IDX 1
+#define regUVD_HEIGHT 0x0154
+#define regUVD_HEIGHT_BASE_IDX 1
+#define regUVD_PICCOUNT 0x0155
+#define regUVD_PICCOUNT_BASE_IDX 1
+#define regUVD_MPRD_INITIAL_XY 0x0156
+#define regUVD_MPRD_INITIAL_XY_BASE_IDX 1
+#define regUVD_MPEG2_CTRL 0x0157
+#define regUVD_MPEG2_CTRL_BASE_IDX 1
+#define regUVD_MB_CTL_BUF_BASE 0x0158
+#define regUVD_MB_CTL_BUF_BASE_BASE_IDX 1
+#define regUVD_PIC_CTL_BUF_BASE 0x0159
+#define regUVD_PIC_CTL_BUF_BASE_BASE_IDX 1
+#define regUVD_DXVA_BUF_SIZE 0x015a
+#define regUVD_DXVA_BUF_SIZE_BASE_IDX 1
+#define regUVD_SCRATCH_NP 0x015b
+#define regUVD_SCRATCH_NP_BASE_IDX 1
+#define regUVD_CLK_SWT_HANDSHAKE 0x015c
+#define regUVD_CLK_SWT_HANDSHAKE_BASE_IDX 1
+#define regUVD_GP_SCRATCH0 0x015e
+#define regUVD_GP_SCRATCH0_BASE_IDX 1
+#define regUVD_GP_SCRATCH1 0x015f
+#define regUVD_GP_SCRATCH1_BASE_IDX 1
+#define regUVD_GP_SCRATCH2 0x0160
+#define regUVD_GP_SCRATCH2_BASE_IDX 1
+#define regUVD_GP_SCRATCH3 0x0161
+#define regUVD_GP_SCRATCH3_BASE_IDX 1
+#define regUVD_GP_SCRATCH4 0x0162
+#define regUVD_GP_SCRATCH4_BASE_IDX 1
+#define regUVD_GP_SCRATCH5 0x0163
+#define regUVD_GP_SCRATCH5_BASE_IDX 1
+#define regUVD_GP_SCRATCH6 0x0164
+#define regUVD_GP_SCRATCH6_BASE_IDX 1
+#define regUVD_GP_SCRATCH7 0x0165
+#define regUVD_GP_SCRATCH7_BASE_IDX 1
+#define regUVD_GP_SCRATCH8 0x0166
+#define regUVD_GP_SCRATCH8_BASE_IDX 1
+#define regUVD_GP_SCRATCH9 0x0167
+#define regUVD_GP_SCRATCH9_BASE_IDX 1
+#define regUVD_GP_SCRATCH10 0x0168
+#define regUVD_GP_SCRATCH10_BASE_IDX 1
+#define regUVD_GP_SCRATCH11 0x0169
+#define regUVD_GP_SCRATCH11_BASE_IDX 1
+#define regUVD_GP_SCRATCH12 0x016a
+#define regUVD_GP_SCRATCH12_BASE_IDX 1
+#define regUVD_GP_SCRATCH13 0x016b
+#define regUVD_GP_SCRATCH13_BASE_IDX 1
+#define regUVD_GP_SCRATCH14 0x016c
+#define regUVD_GP_SCRATCH14_BASE_IDX 1
+#define regUVD_GP_SCRATCH15 0x016d
+#define regUVD_GP_SCRATCH15_BASE_IDX 1
+#define regUVD_GP_SCRATCH16 0x016e
+#define regUVD_GP_SCRATCH16_BASE_IDX 1
+#define regUVD_GP_SCRATCH17 0x016f
+#define regUVD_GP_SCRATCH17_BASE_IDX 1
+#define regUVD_GP_SCRATCH18 0x0170
+#define regUVD_GP_SCRATCH18_BASE_IDX 1
+#define regUVD_GP_SCRATCH19 0x0171
+#define regUVD_GP_SCRATCH19_BASE_IDX 1
+#define regUVD_GP_SCRATCH20 0x0172
+#define regUVD_GP_SCRATCH20_BASE_IDX 1
+#define regUVD_GP_SCRATCH21 0x0173
+#define regUVD_GP_SCRATCH21_BASE_IDX 1
+#define regUVD_GP_SCRATCH22 0x0174
+#define regUVD_GP_SCRATCH22_BASE_IDX 1
+#define regUVD_GP_SCRATCH23 0x0175
+#define regUVD_GP_SCRATCH23_BASE_IDX 1
+#define regUVD_AUDIO_RB_BASE_LO 0x0176
+#define regUVD_AUDIO_RB_BASE_LO_BASE_IDX 1
+#define regUVD_AUDIO_RB_BASE_HI 0x0177
+#define regUVD_AUDIO_RB_BASE_HI_BASE_IDX 1
+#define regUVD_AUDIO_RB_SIZE 0x0178
+#define regUVD_AUDIO_RB_SIZE_BASE_IDX 1
+#define regUVD_VCPU_INT_STATUS2 0x017b
+#define regUVD_VCPU_INT_STATUS2_BASE_IDX 1
+#define regUVD_VCPU_INT_ACK2 0x017c
+#define regUVD_VCPU_INT_ACK2_BASE_IDX 1
+#define regUVD_VCPU_INT_EN2 0x017d
+#define regUVD_VCPU_INT_EN2_BASE_IDX 1
+#define regUVD_SUVD_CGC_STATUS2 0x017e
+#define regUVD_SUVD_CGC_STATUS2_BASE_IDX 1
+#define regUVD_SUVD_INT_STATUS2 0x0180
+#define regUVD_SUVD_INT_STATUS2_BASE_IDX 1
+#define regUVD_SUVD_INT_EN2 0x0181
+#define regUVD_SUVD_INT_EN2_BASE_IDX 1
+#define regUVD_SUVD_INT_ACK2 0x0182
+#define regUVD_SUVD_INT_ACK2_BASE_IDX 1
+#define regUVD_STATUS 0x0183
+#define regUVD_STATUS_BASE_IDX 1
+#define regUVD_ENC_PIPE_BUSY 0x0184
+#define regUVD_ENC_PIPE_BUSY_BASE_IDX 1
+#define regUVD_FW_POWER_STATUS 0x0185
+#define regUVD_FW_POWER_STATUS_BASE_IDX 1
+#define regUVD_CNTL 0x0186
+#define regUVD_CNTL_BASE_IDX 1
+#define regUVD_SOFT_RESET 0x0187
+#define regUVD_SOFT_RESET_BASE_IDX 1
+#define regUVD_SOFT_RESET2 0x0188
+#define regUVD_SOFT_RESET2_BASE_IDX 1
+#define regUVD_MMSCH_SOFT_RESET 0x0189
+#define regUVD_MMSCH_SOFT_RESET_BASE_IDX 1
+#define regUVD_WIG_CTRL 0x018a
+#define regUVD_WIG_CTRL_BASE_IDX 1
+#define regUVD_CGC_STATUS 0x018c
+#define regUVD_CGC_STATUS_BASE_IDX 1
+#define regUVD_CGC_UDEC_STATUS 0x018e
+#define regUVD_CGC_UDEC_STATUS_BASE_IDX 1
+#define regUVD_SUVD_CGC_STATUS 0x0190
+#define regUVD_SUVD_CGC_STATUS_BASE_IDX 1
+#define regUVD_GPCOM_VCPU_CMD 0x0192
+#define regUVD_GPCOM_VCPU_CMD_BASE_IDX 1
+
+
+// addressBlock: uvd_vcn_cdefe_cdefe_broadcast_dec0
+// base address: 0x1fc00
+#define regCDEFE_SUVD_CGC_GATE 0x0104
+#define regCDEFE_SUVD_CGC_GATE_BASE_IDX 1
+#define regCDEFE_SUVD_CGC_GATE2 0x0105
+#define regCDEFE_SUVD_CGC_GATE2_BASE_IDX 1
+#define regCDEFE_SUVD_CGC_CTRL 0x0106
+#define regCDEFE_SUVD_CGC_CTRL_BASE_IDX 1
+
+
+// addressBlock: uvd_ecpudec
+// base address: 0x1ff00
+#define regUVD_VCPU_CACHE_OFFSET0 0x01c0
+#define regUVD_VCPU_CACHE_OFFSET0_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE0 0x01c1
+#define regUVD_VCPU_CACHE_SIZE0_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET1 0x01c2
+#define regUVD_VCPU_CACHE_OFFSET1_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE1 0x01c3
+#define regUVD_VCPU_CACHE_SIZE1_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET2 0x01c4
+#define regUVD_VCPU_CACHE_OFFSET2_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE2 0x01c5
+#define regUVD_VCPU_CACHE_SIZE2_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET3 0x01c6
+#define regUVD_VCPU_CACHE_OFFSET3_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE3 0x01c7
+#define regUVD_VCPU_CACHE_SIZE3_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET4 0x01c8
+#define regUVD_VCPU_CACHE_OFFSET4_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE4 0x01c9
+#define regUVD_VCPU_CACHE_SIZE4_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET5 0x01ca
+#define regUVD_VCPU_CACHE_OFFSET5_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE5 0x01cb
+#define regUVD_VCPU_CACHE_SIZE5_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET6 0x01cc
+#define regUVD_VCPU_CACHE_OFFSET6_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE6 0x01cd
+#define regUVD_VCPU_CACHE_SIZE6_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET7 0x01ce
+#define regUVD_VCPU_CACHE_OFFSET7_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE7 0x01cf
+#define regUVD_VCPU_CACHE_SIZE7_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET8 0x01d0
+#define regUVD_VCPU_CACHE_OFFSET8_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE8 0x01d1
+#define regUVD_VCPU_CACHE_SIZE8_BASE_IDX 1
+#define regUVD_VCPU_NONCACHE_OFFSET0 0x01d2
+#define regUVD_VCPU_NONCACHE_OFFSET0_BASE_IDX 1
+#define regUVD_VCPU_NONCACHE_SIZE0 0x01d3
+#define regUVD_VCPU_NONCACHE_SIZE0_BASE_IDX 1
+#define regUVD_VCPU_NONCACHE_OFFSET1 0x01d4
+#define regUVD_VCPU_NONCACHE_OFFSET1_BASE_IDX 1
+#define regUVD_VCPU_NONCACHE_SIZE1 0x01d5
+#define regUVD_VCPU_NONCACHE_SIZE1_BASE_IDX 1
+#define regUVD_VCPU_CNTL 0x01d6
+#define regUVD_VCPU_CNTL_BASE_IDX 1
+#define regUVD_VCPU_PRID 0x01d7
+#define regUVD_VCPU_PRID_BASE_IDX 1
+#define regUVD_VCPU_TRCE 0x01d8
+#define regUVD_VCPU_TRCE_BASE_IDX 1
+#define regUVD_VCPU_TRCE_RD 0x01d9
+#define regUVD_VCPU_TRCE_RD_BASE_IDX 1
+#define regUVD_VCPU_IND_INDEX 0x01db
+#define regUVD_VCPU_IND_INDEX_BASE_IDX 1
+#define regUVD_VCPU_IND_DATA 0x01dc
+#define regUVD_VCPU_IND_DATA_BASE_IDX 1
+
+
+// addressBlock: uvd_lmi_adpdec
+// base address: 0x20290
+#define regUVD_LMI_RE_64BIT_BAR_LOW 0x02af
+#define regUVD_LMI_RE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_RE_64BIT_BAR_HIGH 0x02b0
+#define regUVD_LMI_RE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_IT_64BIT_BAR_LOW 0x02b1
+#define regUVD_LMI_IT_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_IT_64BIT_BAR_HIGH 0x02b2
+#define regUVD_LMI_IT_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MP_64BIT_BAR_LOW 0x02b3
+#define regUVD_LMI_MP_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MP_64BIT_BAR_HIGH 0x02b4
+#define regUVD_LMI_MP_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_CM_64BIT_BAR_LOW 0x02b5
+#define regUVD_LMI_CM_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_CM_64BIT_BAR_HIGH 0x02b6
+#define regUVD_LMI_CM_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_DB_64BIT_BAR_LOW 0x02b7
+#define regUVD_LMI_DB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_DB_64BIT_BAR_HIGH 0x02b8
+#define regUVD_LMI_DB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_DBW_64BIT_BAR_LOW 0x02b9
+#define regUVD_LMI_DBW_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_DBW_64BIT_BAR_HIGH 0x02ba
+#define regUVD_LMI_DBW_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_IDCT_64BIT_BAR_LOW 0x02bb
+#define regUVD_LMI_IDCT_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_IDCT_64BIT_BAR_HIGH 0x02bc
+#define regUVD_LMI_IDCT_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MPRD_S0_64BIT_BAR_LOW 0x02bd
+#define regUVD_LMI_MPRD_S0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MPRD_S0_64BIT_BAR_HIGH 0x02be
+#define regUVD_LMI_MPRD_S0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MPRD_S1_64BIT_BAR_LOW 0x02bf
+#define regUVD_LMI_MPRD_S1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MPRD_S1_64BIT_BAR_HIGH 0x02c0
+#define regUVD_LMI_MPRD_S1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MPRD_DBW_64BIT_BAR_LOW 0x02c1
+#define regUVD_LMI_MPRD_DBW_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MPRD_DBW_64BIT_BAR_HIGH 0x02c2
+#define regUVD_LMI_MPRD_DBW_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x02c5
+#define regUVD_LMI_RBC_RB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x02c6
+#define regUVD_LMI_RBC_RB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x02c7
+#define regUVD_LMI_RBC_IB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_RBC_IB_64BIT_BAR_HIGH 0x02c8
+#define regUVD_LMI_RBC_IB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_LBSI_64BIT_BAR_LOW 0x02c9
+#define regUVD_LMI_LBSI_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_LBSI_64BIT_BAR_HIGH 0x02ca
+#define regUVD_LMI_LBSI_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW 0x02cb
+#define regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH 0x02cc
+#define regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC1_64BIT_BAR_LOW 0x02cd
+#define regUVD_LMI_VCPU_NC1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH 0x02ce
+#define regUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x02cf
+#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x02d0
+#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_CENC_64BIT_BAR_LOW 0x02d1
+#define regUVD_LMI_CENC_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_CENC_64BIT_BAR_HIGH 0x02d2
+#define regUVD_LMI_CENC_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_SRE_64BIT_BAR_LOW 0x02d3
+#define regUVD_LMI_SRE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_SRE_64BIT_BAR_HIGH 0x02d4
+#define regUVD_LMI_SRE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_LOW 0x02d5
+#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH 0x02d6
+#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW 0x02d7
+#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH 0x02d8
+#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW 0x02d9
+#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH 0x02da
+#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_DBW_64BIT_BAR_LOW 0x02dd
+#define regUVD_LMI_MIF_DBW_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_DBW_64BIT_BAR_HIGH 0x02de
+#define regUVD_LMI_MIF_DBW_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW 0x02df
+#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH 0x02e0
+#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP0_64BIT_BAR_LOW 0x02e1
+#define regUVD_LMI_MIF_BSP0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP0_64BIT_BAR_HIGH 0x02e2
+#define regUVD_LMI_MIF_BSP0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP1_64BIT_BAR_LOW 0x02e3
+#define regUVD_LMI_MIF_BSP1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP1_64BIT_BAR_HIGH 0x02e4
+#define regUVD_LMI_MIF_BSP1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP2_64BIT_BAR_LOW 0x02e5
+#define regUVD_LMI_MIF_BSP2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP2_64BIT_BAR_HIGH 0x02e6
+#define regUVD_LMI_MIF_BSP2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP3_64BIT_BAR_LOW 0x02e7
+#define regUVD_LMI_MIF_BSP3_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP3_64BIT_BAR_HIGH 0x02e8
+#define regUVD_LMI_MIF_BSP3_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD0_64BIT_BAR_LOW 0x02e9
+#define regUVD_LMI_MIF_BSD0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD0_64BIT_BAR_HIGH 0x02ea
+#define regUVD_LMI_MIF_BSD0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD1_64BIT_BAR_LOW 0x02eb
+#define regUVD_LMI_MIF_BSD1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD1_64BIT_BAR_HIGH 0x02ec
+#define regUVD_LMI_MIF_BSD1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD2_64BIT_BAR_LOW 0x02ed
+#define regUVD_LMI_MIF_BSD2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD2_64BIT_BAR_HIGH 0x02ee
+#define regUVD_LMI_MIF_BSD2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD3_64BIT_BAR_LOW 0x02ef
+#define regUVD_LMI_MIF_BSD3_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD3_64BIT_BAR_HIGH 0x02f0
+#define regUVD_LMI_MIF_BSD3_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD4_64BIT_BAR_LOW 0x02f1
+#define regUVD_LMI_MIF_BSD4_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD4_64BIT_BAR_HIGH 0x02f2
+#define regUVD_LMI_MIF_BSD4_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW 0x02fb
+#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH 0x02fc
+#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW 0x02fd
+#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH 0x02fe
+#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW 0x02ff
+#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH 0x0300
+#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW 0x0301
+#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH 0x0302
+#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW 0x0303
+#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH 0x0304
+#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW 0x0305
+#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH 0x0306
+#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW 0x0307
+#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH 0x0308
+#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW 0x0309
+#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH 0x030a
+#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_SCLR_64BIT_BAR_LOW 0x030b
+#define regUVD_LMI_MIF_SCLR_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_SCLR_64BIT_BAR_HIGH 0x030c
+#define regUVD_LMI_MIF_SCLR_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_LOW 0x030d
+#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH 0x030e
+#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_SPH_64BIT_BAR_HIGH 0x030f
+#define regUVD_LMI_SPH_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW 0x0318
+#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH 0x0319
+#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW 0x031a
+#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH 0x031b
+#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW 0x031c
+#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH 0x031d
+#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW 0x031e
+#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH 0x031f
+#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_ADP_ATOMIC_CONFIG 0x0321
+#define regUVD_ADP_ATOMIC_CONFIG_BASE_IDX 1
+#define regUVD_LMI_ARB_CTRL2 0x0322
+#define regUVD_LMI_ARB_CTRL2_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE_VMIDS_MULTI 0x0327
+#define regUVD_LMI_VCPU_CACHE_VMIDS_MULTI_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC_VMIDS_MULTI 0x0328
+#define regUVD_LMI_VCPU_NC_VMIDS_MULTI_BASE_IDX 1
+#define regUVD_LMI_LAT_CTRL 0x0329
+#define regUVD_LMI_LAT_CTRL_BASE_IDX 1
+#define regUVD_LMI_LAT_CNTR 0x032a
+#define regUVD_LMI_LAT_CNTR_BASE_IDX 1
+#define regUVD_LMI_AVG_LAT_CNTR 0x032b
+#define regUVD_LMI_AVG_LAT_CNTR_BASE_IDX 1
+#define regUVD_LMI_SPH 0x032c
+#define regUVD_LMI_SPH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE_VMID 0x032d
+#define regUVD_LMI_VCPU_CACHE_VMID_BASE_IDX 1
+#define regUVD_LMI_CTRL2 0x032e
+#define regUVD_LMI_CTRL2_BASE_IDX 1
+#define regUVD_LMI_URGENT_CTRL 0x032f
+#define regUVD_LMI_URGENT_CTRL_BASE_IDX 1
+#define regUVD_LMI_CTRL 0x0330
+#define regUVD_LMI_CTRL_BASE_IDX 1
+#define regUVD_LMI_STATUS 0x0331
+#define regUVD_LMI_STATUS_BASE_IDX 1
+#define regUVD_LMI_PERFMON_CTRL 0x0334
+#define regUVD_LMI_PERFMON_CTRL_BASE_IDX 1
+#define regUVD_LMI_PERFMON_COUNT_LO 0x0335
+#define regUVD_LMI_PERFMON_COUNT_LO_BASE_IDX 1
+#define regUVD_LMI_PERFMON_COUNT_HI 0x0336
+#define regUVD_LMI_PERFMON_COUNT_HI_BASE_IDX 1
+#define regUVD_LMI_ADP_SWAP_CNTL 0x0337
+#define regUVD_LMI_ADP_SWAP_CNTL_BASE_IDX 1
+#define regUVD_LMI_RBC_RB_VMID 0x0338
+#define regUVD_LMI_RBC_RB_VMID_BASE_IDX 1
+#define regUVD_LMI_RBC_IB_VMID 0x0339
+#define regUVD_LMI_RBC_IB_VMID_BASE_IDX 1
+#define regUVD_LMI_MC_CREDITS 0x033a
+#define regUVD_LMI_MC_CREDITS_BASE_IDX 1
+#define regUVD_LMI_ADP_IND_INDEX 0x033e
+#define regUVD_LMI_ADP_IND_INDEX_BASE_IDX 1
+#define regUVD_LMI_ADP_IND_DATA 0x033f
+#define regUVD_LMI_ADP_IND_DATA_BASE_IDX 1
+#define regUVD_LMI_ADP_PF_EN 0x0340
+#define regUVD_LMI_ADP_PF_EN_BASE_IDX 1
+#define regUVD_LMI_PREF_CTRL 0x0342
+#define regUVD_LMI_PREF_CTRL_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jpeg0_jpegnpdec
+// base address: 0x20f00
+#define regUVD_JPEG_CNTL 0x05c0
+#define regUVD_JPEG_CNTL_BASE_IDX 1
+#define regUVD_JPEG_RB_BASE 0x05c1
+#define regUVD_JPEG_RB_BASE_BASE_IDX 1
+#define regUVD_JPEG_RB_WPTR 0x05c2
+#define regUVD_JPEG_RB_WPTR_BASE_IDX 1
+#define regUVD_JPEG_RB_RPTR 0x05c3
+#define regUVD_JPEG_RB_RPTR_BASE_IDX 1
+#define regUVD_JPEG_RB_SIZE 0x05c4
+#define regUVD_JPEG_RB_SIZE_BASE_IDX 1
+#define regUVD_JPEG_DEC_CNT 0x05c5
+#define regUVD_JPEG_DEC_CNT_BASE_IDX 1
+#define regUVD_JPEG_SPS_INFO 0x05c6
+#define regUVD_JPEG_SPS_INFO_BASE_IDX 1
+#define regUVD_JPEG_SPS1_INFO 0x05c7
+#define regUVD_JPEG_SPS1_INFO_BASE_IDX 1
+#define regUVD_JPEG_RE_TIMER 0x05c8
+#define regUVD_JPEG_RE_TIMER_BASE_IDX 1
+#define regUVD_JPEG_DEC_SCRATCH0 0x05c9
+#define regUVD_JPEG_DEC_SCRATCH0_BASE_IDX 1
+#define regUVD_JPEG_INT_EN 0x05ca
+#define regUVD_JPEG_INT_EN_BASE_IDX 1
+#define regUVD_JPEG_INT_STAT 0x05cb
+#define regUVD_JPEG_INT_STAT_BASE_IDX 1
+#define regUVD_JPEG_TIER_CNTL0 0x05cc
+#define regUVD_JPEG_TIER_CNTL0_BASE_IDX 1
+#define regUVD_JPEG_TIER_CNTL1 0x05cd
+#define regUVD_JPEG_TIER_CNTL1_BASE_IDX 1
+#define regUVD_JPEG_TIER_CNTL2 0x05ce
+#define regUVD_JPEG_TIER_CNTL2_BASE_IDX 1
+#define regUVD_JPEG_TIER_STATUS 0x05cf
+#define regUVD_JPEG_TIER_STATUS_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jpeg_sclk0_jpegnpsclkdec
+// base address: 0x21000
+#define regUVD_JPEG_OUTBUF_CNTL 0x0600
+#define regUVD_JPEG_OUTBUF_CNTL_BASE_IDX 1
+#define regUVD_JPEG_OUTBUF_WPTR 0x0601
+#define regUVD_JPEG_OUTBUF_WPTR_BASE_IDX 1
+#define regUVD_JPEG_OUTBUF_RPTR 0x0602
+#define regUVD_JPEG_OUTBUF_RPTR_BASE_IDX 1
+#define regUVD_JPEG_PITCH 0x0603
+#define regUVD_JPEG_PITCH_BASE_IDX 1
+#define regUVD_JPEG_UV_PITCH 0x0604
+#define regUVD_JPEG_UV_PITCH_BASE_IDX 1
+#define regJPEG_DEC_Y_GFX8_TILING_SURFACE 0x0605
+#define regJPEG_DEC_Y_GFX8_TILING_SURFACE_BASE_IDX 1
+#define regJPEG_DEC_UV_GFX8_TILING_SURFACE 0x0606
+#define regJPEG_DEC_UV_GFX8_TILING_SURFACE_BASE_IDX 1
+#define regJPEG_DEC_GFX8_ADDR_CONFIG 0x0607
+#define regJPEG_DEC_GFX8_ADDR_CONFIG_BASE_IDX 1
+#define regJPEG_DEC_Y_GFX10_TILING_SURFACE 0x0608
+#define regJPEG_DEC_Y_GFX10_TILING_SURFACE_BASE_IDX 1
+#define regJPEG_DEC_UV_GFX10_TILING_SURFACE 0x0609
+#define regJPEG_DEC_UV_GFX10_TILING_SURFACE_BASE_IDX 1
+#define regJPEG_DEC_GFX10_ADDR_CONFIG 0x060a
+#define regJPEG_DEC_GFX10_ADDR_CONFIG_BASE_IDX 1
+#define regJPEG_DEC_ADDR_MODE 0x060b
+#define regJPEG_DEC_ADDR_MODE_BASE_IDX 1
+#define regUVD_JPEG_OUTPUT_XY 0x060c
+#define regUVD_JPEG_OUTPUT_XY_BASE_IDX 1
+#define regUVD_JPEG_GPCOM_CMD 0x060d
+#define regUVD_JPEG_GPCOM_CMD_BASE_IDX 1
+#define regUVD_JPEG_GPCOM_DATA0 0x060e
+#define regUVD_JPEG_GPCOM_DATA0_BASE_IDX 1
+#define regUVD_JPEG_GPCOM_DATA1 0x060f
+#define regUVD_JPEG_GPCOM_DATA1_BASE_IDX 1
+#define regUVD_JPEG_SCRATCH1 0x0610
+#define regUVD_JPEG_SCRATCH1_BASE_IDX 1
+#define regUVD_JPEG_DEC_SOFT_RST 0x0611
+#define regUVD_JPEG_DEC_SOFT_RST_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jrbc0_uvd_jrbc_dec
+// base address: 0x21100
+#define regUVD_JRBC_RB_WPTR 0x0640
+#define regUVD_JRBC_RB_WPTR_BASE_IDX 1
+#define regUVD_JRBC_RB_CNTL 0x0641
+#define regUVD_JRBC_RB_CNTL_BASE_IDX 1
+#define regUVD_JRBC_IB_SIZE 0x0642
+#define regUVD_JRBC_IB_SIZE_BASE_IDX 1
+#define regUVD_JRBC_URGENT_CNTL 0x0643
+#define regUVD_JRBC_URGENT_CNTL_BASE_IDX 1
+#define regUVD_JRBC_RB_REF_DATA 0x0644
+#define regUVD_JRBC_RB_REF_DATA_BASE_IDX 1
+#define regUVD_JRBC_RB_COND_RD_TIMER 0x0645
+#define regUVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 1
+#define regUVD_JRBC_SOFT_RESET 0x0648
+#define regUVD_JRBC_SOFT_RESET_BASE_IDX 1
+#define regUVD_JRBC_STATUS 0x0649
+#define regUVD_JRBC_STATUS_BASE_IDX 1
+#define regUVD_JRBC_RB_RPTR 0x064a
+#define regUVD_JRBC_RB_RPTR_BASE_IDX 1
+#define regUVD_JRBC_RB_BUF_STATUS 0x064b
+#define regUVD_JRBC_RB_BUF_STATUS_BASE_IDX 1
+#define regUVD_JRBC_IB_BUF_STATUS 0x064c
+#define regUVD_JRBC_IB_BUF_STATUS_BASE_IDX 1
+#define regUVD_JRBC_IB_SIZE_UPDATE 0x064d
+#define regUVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 1
+#define regUVD_JRBC_IB_COND_RD_TIMER 0x064e
+#define regUVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 1
+#define regUVD_JRBC_IB_REF_DATA 0x064f
+#define regUVD_JRBC_IB_REF_DATA_BASE_IDX 1
+#define regUVD_JPEG_PREEMPT_CMD 0x0650
+#define regUVD_JPEG_PREEMPT_CMD_BASE_IDX 1
+#define regUVD_JPEG_PREEMPT_FENCE_DATA0 0x0651
+#define regUVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 1
+#define regUVD_JPEG_PREEMPT_FENCE_DATA1 0x0652
+#define regUVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 1
+#define regUVD_JRBC_RB_SIZE 0x0653
+#define regUVD_JRBC_RB_SIZE_BASE_IDX 1
+#define regUVD_JRBC_SCRATCH0 0x0654
+#define regUVD_JRBC_SCRATCH0_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jmi0_uvd_jmi_dec
+// base address: 0x21180
+#define regUVD_JPEG_DEC_PF_CTRL 0x0660
+#define regUVD_JPEG_DEC_PF_CTRL_BASE_IDX 1
+#define regUVD_LMI_JRBC_CTRL 0x0661
+#define regUVD_LMI_JRBC_CTRL_BASE_IDX 1
+#define regUVD_LMI_JPEG_CTRL 0x0662
+#define regUVD_LMI_JPEG_CTRL_BASE_IDX 1
+#define regJPEG_LMI_DROP 0x0663
+#define regJPEG_LMI_DROP_BASE_IDX 1
+#define regUVD_LMI_JRBC_IB_VMID 0x0664
+#define regUVD_LMI_JRBC_IB_VMID_BASE_IDX 1
+#define regUVD_LMI_JRBC_RB_VMID 0x0665
+#define regUVD_LMI_JRBC_RB_VMID_BASE_IDX 1
+#define regUVD_LMI_JPEG_VMID 0x0666
+#define regUVD_LMI_JPEG_VMID_BASE_IDX 1
+#define regUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0667
+#define regUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0668
+#define regUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0669
+#define regUVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x066a
+#define regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x066b
+#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x066c
+#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JPEG_PREEMPT_VMID 0x066d
+#define regUVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 1
+#define regUVD_JMI_DEC_SWAP_CNTL 0x066e
+#define regUVD_JMI_DEC_SWAP_CNTL_BASE_IDX 1
+#define regUVD_JMI_ATOMIC_CNTL 0x066f
+#define regUVD_JMI_ATOMIC_CNTL_BASE_IDX 1
+#define regUVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x0670
+#define regUVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x0671
+#define regUVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0672
+#define regUVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0673
+#define regUVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0674
+#define regUVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0675
+#define regUVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0676
+#define regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0677
+#define regUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0678
+#define regUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0679
+#define regUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_JMI_ATOMIC_CNTL2 0x067d
+#define regUVD_JMI_ATOMIC_CNTL2_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jmi_common_dec
+// base address: 0x21300
+#define regUVD_JADP_MCIF_URGENT_CTRL 0x06c1
+#define regUVD_JADP_MCIF_URGENT_CTRL_BASE_IDX 1
+#define regUVD_JMI_URGENT_CTRL 0x06c2
+#define regUVD_JMI_URGENT_CTRL_BASE_IDX 1
+#define regUVD_JMI_CTRL 0x06c3
+#define regUVD_JMI_CTRL_BASE_IDX 1
+#define regJPEG_MEMCHECK_CLAMPING_CNTL 0x06c4
+#define regJPEG_MEMCHECK_CLAMPING_CNTL_BASE_IDX 1
+#define regJPEG_MEMCHECK_SAFE_ADDR 0x06c5
+#define regJPEG_MEMCHECK_SAFE_ADDR_BASE_IDX 1
+#define regJPEG_MEMCHECK_SAFE_ADDR_64BIT 0x06c6
+#define regJPEG_MEMCHECK_SAFE_ADDR_64BIT_BASE_IDX 1
+#define regUVD_JMI_LAT_CTRL 0x06c7
+#define regUVD_JMI_LAT_CTRL_BASE_IDX 1
+#define regUVD_JMI_LAT_CNTR 0x06c8
+#define regUVD_JMI_LAT_CNTR_BASE_IDX 1
+#define regUVD_JMI_AVG_LAT_CNTR 0x06c9
+#define regUVD_JMI_AVG_LAT_CNTR_BASE_IDX 1
+#define regUVD_JMI_PERFMON_CTRL 0x06ca
+#define regUVD_JMI_PERFMON_CTRL_BASE_IDX 1
+#define regUVD_JMI_PERFMON_COUNT_LO 0x06cb
+#define regUVD_JMI_PERFMON_COUNT_LO_BASE_IDX 1
+#define regUVD_JMI_PERFMON_COUNT_HI 0x06cc
+#define regUVD_JMI_PERFMON_COUNT_HI_BASE_IDX 1
+#define regUVD_JMI_CLEAN_STATUS 0x06cd
+#define regUVD_JMI_CLEAN_STATUS_BASE_IDX 1
+#define regUVD_JMI_CNTL 0x06ce
+#define regUVD_JMI_CNTL_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jpeg_common_dec
+// base address: 0x21400
+#define regJPEG_SOFT_RESET_STATUS 0x0700
+#define regJPEG_SOFT_RESET_STATUS_BASE_IDX 1
+#define regJPEG_SYS_INT_EN 0x0701
+#define regJPEG_SYS_INT_EN_BASE_IDX 1
+#define regJPEG_SYS_INT_EN1 0x0702
+#define regJPEG_SYS_INT_EN1_BASE_IDX 1
+#define regJPEG_SYS_INT_STATUS 0x0703
+#define regJPEG_SYS_INT_STATUS_BASE_IDX 1
+#define regJPEG_SYS_INT_STATUS1 0x0704
+#define regJPEG_SYS_INT_STATUS1_BASE_IDX 1
+#define regJPEG_SYS_INT_ACK 0x0705
+#define regJPEG_SYS_INT_ACK_BASE_IDX 1
+#define regJPEG_SYS_INT_ACK1 0x0706
+#define regJPEG_SYS_INT_ACK1_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_EN 0x0707
+#define regJPEG_MEMCHECK_SYS_INT_EN_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_EN1 0x0708
+#define regJPEG_MEMCHECK_SYS_INT_EN1_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_STAT 0x0709
+#define regJPEG_MEMCHECK_SYS_INT_STAT_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_STAT1 0x070a
+#define regJPEG_MEMCHECK_SYS_INT_STAT1_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_STAT2 0x070b
+#define regJPEG_MEMCHECK_SYS_INT_STAT2_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_ACK 0x070c
+#define regJPEG_MEMCHECK_SYS_INT_ACK_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_ACK1 0x070d
+#define regJPEG_MEMCHECK_SYS_INT_ACK1_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_ACK2 0x070e
+#define regJPEG_MEMCHECK_SYS_INT_ACK2_BASE_IDX 1
+#define regJPEG_MASTINT_EN 0x070f
+#define regJPEG_MASTINT_EN_BASE_IDX 1
+#define regJPEG_IH_CTRL 0x0710
+#define regJPEG_IH_CTRL_BASE_IDX 1
+#define regJRBBM_ARB_CTRL 0x0712
+#define regJRBBM_ARB_CTRL_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_jpeg_common_sclk_dec
+// base address: 0x21480
+#define regJPEG_CGC_GATE 0x0720
+#define regJPEG_CGC_GATE_BASE_IDX 1
+#define regJPEG_CGC_CTRL 0x0721
+#define regJPEG_CGC_CTRL_BASE_IDX 1
+#define regJPEG_CGC_STATUS 0x0722
+#define regJPEG_CGC_STATUS_BASE_IDX 1
+#define regJPEG_COMN_CGC_MEM_CTRL 0x0723
+#define regJPEG_COMN_CGC_MEM_CTRL_BASE_IDX 1
+#define regJPEG_DEC_CGC_MEM_CTRL 0x0724
+#define regJPEG_DEC_CGC_MEM_CTRL_BASE_IDX 1
+#define regJPEG_ENC_CGC_MEM_CTRL 0x0726
+#define regJPEG_ENC_CGC_MEM_CTRL_BASE_IDX 1
+#define regJPEG_PERF_BANK_CONF 0x0727
+#define regJPEG_PERF_BANK_CONF_BASE_IDX 1
+#define regJPEG_PERF_BANK_EVENT_SEL 0x0728
+#define regJPEG_PERF_BANK_EVENT_SEL_BASE_IDX 1
+#define regJPEG_PERF_BANK_COUNT0 0x0729
+#define regJPEG_PERF_BANK_COUNT0_BASE_IDX 1
+#define regJPEG_PERF_BANK_COUNT1 0x072a
+#define regJPEG_PERF_BANK_COUNT1_BASE_IDX 1
+#define regJPEG_PERF_BANK_COUNT2 0x072b
+#define regJPEG_PERF_BANK_COUNT2_BASE_IDX 1
+#define regJPEG_PERF_BANK_COUNT3 0x072c
+#define regJPEG_PERF_BANK_COUNT3_BASE_IDX 1
+
+
+// addressBlock: uvd_uvd_pg_dec
+// base address: 0x1f800
+#define regUVD_IPX_DLDO_CONFIG 0x0000
+#define regUVD_IPX_DLDO_CONFIG_BASE_IDX 1
+#define regUVD_IPX_DLDO_STATUS 0x0001
+#define regUVD_IPX_DLDO_STATUS_BASE_IDX 1
+#define regUVD_POWER_STATUS 0x0002
+#define regUVD_POWER_STATUS_BASE_IDX 1
+#define regUVD_JPEG_POWER_STATUS 0x0003
+#define regUVD_JPEG_POWER_STATUS_BASE_IDX 1
+#define regUVD_MC_DJPEG_RD_SPACE 0x0007
+#define regUVD_MC_DJPEG_RD_SPACE_BASE_IDX 1
+#define regUVD_MC_DJPEG_WR_SPACE 0x0008
+#define regUVD_MC_DJPEG_WR_SPACE_BASE_IDX 1
+#define regUVD_PG_IND_INDEX 0x000c
+#define regUVD_PG_IND_INDEX_BASE_IDX 1
+#define regUVD_PG_IND_DATA 0x000e
+#define regUVD_PG_IND_DATA_BASE_IDX 1
+#define regCC_UVD_HARVESTING 0x000f
+#define regCC_UVD_HARVESTING_BASE_IDX 1
+#define regUVD_DPG_LMA_CTL 0x0011
+#define regUVD_DPG_LMA_CTL_BASE_IDX 1
+#define regUVD_DPG_LMA_DATA 0x0012
+#define regUVD_DPG_LMA_DATA_BASE_IDX 1
+#define regUVD_DPG_LMA_MASK 0x0013
+#define regUVD_DPG_LMA_MASK_BASE_IDX 1
+#define regUVD_DPG_PAUSE 0x0014
+#define regUVD_DPG_PAUSE_BASE_IDX 1
+#define regUVD_SCRATCH1 0x0015
+#define regUVD_SCRATCH1_BASE_IDX 1
+#define regUVD_SCRATCH2 0x0016
+#define regUVD_SCRATCH2_BASE_IDX 1
+#define regUVD_SCRATCH3 0x0017
+#define regUVD_SCRATCH3_BASE_IDX 1
+#define regUVD_SCRATCH4 0x0018
+#define regUVD_SCRATCH4_BASE_IDX 1
+#define regUVD_SCRATCH5 0x0019
+#define regUVD_SCRATCH5_BASE_IDX 1
+#define regUVD_SCRATCH6 0x001a
+#define regUVD_SCRATCH6_BASE_IDX 1
+#define regUVD_SCRATCH7 0x001b
+#define regUVD_SCRATCH7_BASE_IDX 1
+#define regUVD_SCRATCH8 0x001c
+#define regUVD_SCRATCH8_BASE_IDX 1
+#define regUVD_SCRATCH9 0x001d
+#define regUVD_SCRATCH9_BASE_IDX 1
+#define regUVD_SCRATCH10 0x001e
+#define regUVD_SCRATCH10_BASE_IDX 1
+#define regUVD_SCRATCH11 0x001f
+#define regUVD_SCRATCH11_BASE_IDX 1
+#define regUVD_SCRATCH12 0x0020
+#define regUVD_SCRATCH12_BASE_IDX 1
+#define regUVD_SCRATCH13 0x0021
+#define regUVD_SCRATCH13_BASE_IDX 1
+#define regUVD_SCRATCH14 0x0022
+#define regUVD_SCRATCH14_BASE_IDX 1
+#define regUVD_FREE_COUNTER_REG 0x0023
+#define regUVD_FREE_COUNTER_REG_BASE_IDX 1
+#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x0024
+#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x0025
+#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_DPG_VCPU_CACHE_OFFSET0 0x0026
+#define regUVD_DPG_VCPU_CACHE_OFFSET0_BASE_IDX 1
+#define regUVD_DPG_LMI_VCPU_CACHE_VMID 0x0027
+#define regUVD_DPG_LMI_VCPU_CACHE_VMID_BASE_IDX 1
+#define regUVD_REG_FILTER_EN 0x0028
+#define regUVD_REG_FILTER_EN_BASE_IDX 1
+#define regUVD_SECURITY_REG_VIO_REPORT 0x0029
+#define regUVD_SECURITY_REG_VIO_REPORT_BASE_IDX 1
+#define regUVD_FW_VERSION 0x002a
+#define regUVD_FW_VERSION_BASE_IDX 1
+#define regUVD_PF_STATUS 0x002c
+#define regUVD_PF_STATUS_BASE_IDX 1
+#define regUVD_DPG_CLK_EN_VCPU_REPORT 0x002e
+#define regUVD_DPG_CLK_EN_VCPU_REPORT_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_DETECT_BOT_LO 0x002f
+#define regCC_UVD_VCPU_ERR_DETECT_BOT_LO_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_DETECT_BOT_HI 0x0030
+#define regCC_UVD_VCPU_ERR_DETECT_BOT_HI_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_DETECT_TOP_LO 0x0031
+#define regCC_UVD_VCPU_ERR_DETECT_TOP_LO_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_DETECT_TOP_HI 0x0032
+#define regCC_UVD_VCPU_ERR_DETECT_TOP_HI_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR 0x0033
+#define regCC_UVD_VCPU_ERR_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_INST_ADDR_LO 0x0034
+#define regCC_UVD_VCPU_ERR_INST_ADDR_LO_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_INST_ADDR_HI 0x0035
+#define regCC_UVD_VCPU_ERR_INST_ADDR_HI_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC_SPACE 0x003d
+#define regUVD_LMI_MMSCH_NC_SPACE_BASE_IDX 1
+#define regUVD_LMI_ATOMIC_SPACE 0x003e
+#define regUVD_LMI_ATOMIC_SPACE_BASE_IDX 1
+#define regUVD_GFX8_ADDR_CONFIG 0x0041
+#define regUVD_GFX8_ADDR_CONFIG_BASE_IDX 1
+#define regUVD_GFX10_ADDR_CONFIG 0x0042
+#define regUVD_GFX10_ADDR_CONFIG_BASE_IDX 1
+#define regUVD_GPCNT2_CNTL 0x0043
+#define regUVD_GPCNT2_CNTL_BASE_IDX 1
+#define regUVD_GPCNT2_TARGET_LOWER 0x0044
+#define regUVD_GPCNT2_TARGET_LOWER_BASE_IDX 1
+#define regUVD_GPCNT2_STATUS_LOWER 0x0045
+#define regUVD_GPCNT2_STATUS_LOWER_BASE_IDX 1
+#define regUVD_GPCNT2_TARGET_UPPER 0x0046
+#define regUVD_GPCNT2_TARGET_UPPER_BASE_IDX 1
+#define regUVD_GPCNT2_STATUS_UPPER 0x0047
+#define regUVD_GPCNT2_STATUS_UPPER_BASE_IDX 1
+#define regUVD_GPCNT3_CNTL 0x0048
+#define regUVD_GPCNT3_CNTL_BASE_IDX 1
+#define regUVD_GPCNT3_TARGET_LOWER 0x0049
+#define regUVD_GPCNT3_TARGET_LOWER_BASE_IDX 1
+#define regUVD_GPCNT3_STATUS_LOWER 0x004a
+#define regUVD_GPCNT3_STATUS_LOWER_BASE_IDX 1
+#define regUVD_GPCNT3_TARGET_UPPER 0x004b
+#define regUVD_GPCNT3_TARGET_UPPER_BASE_IDX 1
+#define regUVD_GPCNT3_STATUS_UPPER 0x004c
+#define regUVD_GPCNT3_STATUS_UPPER_BASE_IDX 1
+#define regUVD_VCLK_DS_CNTL 0x004d
+#define regUVD_VCLK_DS_CNTL_BASE_IDX 1
+#define regUVD_DCLK_DS_CNTL 0x004e
+#define regUVD_DCLK_DS_CNTL_BASE_IDX 1
+#define regUVD_TSC_LOWER 0x004f
+#define regUVD_TSC_LOWER_BASE_IDX 1
+#define regUVD_TSC_UPPER 0x0050
+#define regUVD_TSC_UPPER_BASE_IDX 1
+#define regVCN_FEATURES 0x0051
+#define regVCN_FEATURES_BASE_IDX 1
+#define regUVD_GPUIOV_STATUS 0x0055
+#define regUVD_GPUIOV_STATUS_BASE_IDX 1
+#define regUVD_SCRATCH15 0x005c
+#define regUVD_SCRATCH15_BASE_IDX 1
+#define regUVD_VERSION 0x005d
+#define regUVD_VERSION_BASE_IDX 1
+#define regVCN_UMSCH_CNTL 0x005e
+#define regVCN_UMSCH_CNTL_BASE_IDX 1
+#define regVCN_JPEG_DB_CTRL 0x0068
+#define regVCN_JPEG_DB_CTRL_BASE_IDX 1
+#define regVCN_RB1_DB_CTRL 0x0072
+#define regVCN_RB1_DB_CTRL_BASE_IDX 1
+#define regVCN_RB2_DB_CTRL 0x0073
+#define regVCN_RB2_DB_CTRL_BASE_IDX 1
+#define regVCN_RB3_DB_CTRL 0x0074
+#define regVCN_RB3_DB_CTRL_BASE_IDX 1
+#define regVCN_RB4_DB_CTRL 0x0075
+#define regVCN_RB4_DB_CTRL_BASE_IDX 1
+#define regVCN_UMSCH_RB_DB_CTRL 0x0076
+#define regVCN_UMSCH_RB_DB_CTRL_BASE_IDX 1
+#define regVCN_RB_DB_CTRL 0x0077
+#define regVCN_RB_DB_CTRL_BASE_IDX 1
+#define regVCN_AGDB_CTRL0 0x0079
+#define regVCN_AGDB_CTRL0_BASE_IDX 1
+#define regVCN_AGDB_CTRL1 0x007a
+#define regVCN_AGDB_CTRL1_BASE_IDX 1
+#define regVCN_AGDB_CTRL2 0x007b
+#define regVCN_AGDB_CTRL2_BASE_IDX 1
+#define regVCN_AGDB_CTRL3 0x007c
+#define regVCN_AGDB_CTRL3_BASE_IDX 1
+#define regVCN_AGDB_CTRL4 0x007d
+#define regVCN_AGDB_CTRL4_BASE_IDX 1
+#define regVCN_AGDB_CTRL5 0x007e
+#define regVCN_AGDB_CTRL5_BASE_IDX 1
+#define regVCN_AGDB_MASK0 0x007f
+#define regVCN_AGDB_MASK0_BASE_IDX 1
+#define regVCN_AGDB_MASK1 0x0080
+#define regVCN_AGDB_MASK1_BASE_IDX 1
+#define regVCN_AGDB_MASK2 0x0081
+#define regVCN_AGDB_MASK2_BASE_IDX 1
+#define regVCN_AGDB_MASK3 0x0082
+#define regVCN_AGDB_MASK3_BASE_IDX 1
+#define regVCN_AGDB_MASK4 0x0083
+#define regVCN_AGDB_MASK4_BASE_IDX 1
+#define regVCN_AGDB_MASK5 0x0084
+#define regVCN_AGDB_MASK5_BASE_IDX 1
+#define regVCN_RB_ENABLE 0x0085
+#define regVCN_RB_ENABLE_BASE_IDX 1
+#define regVCN_RB_WPTR_CTRL 0x0086
+#define regVCN_RB_WPTR_CTRL_BASE_IDX 1
+#define regUVD_RB_RPTR 0x00ac
+#define regUVD_RB_RPTR_BASE_IDX 1
+#define regUVD_RB_WPTR 0x00ad
+#define regUVD_RB_WPTR_BASE_IDX 1
+#define regUVD_RB_RPTR2 0x00ae
+#define regUVD_RB_RPTR2_BASE_IDX 1
+#define regUVD_RB_WPTR2 0x00af
+#define regUVD_RB_WPTR2_BASE_IDX 1
+#define regUVD_RB_RPTR3 0x00b0
+#define regUVD_RB_RPTR3_BASE_IDX 1
+#define regUVD_RB_WPTR3 0x00b1
+#define regUVD_RB_WPTR3_BASE_IDX 1
+#define regUVD_RB_RPTR4 0x00b2
+#define regUVD_RB_RPTR4_BASE_IDX 1
+#define regUVD_RB_WPTR4 0x00b3
+#define regUVD_RB_WPTR4_BASE_IDX 1
+#define regUVD_OUT_RB_RPTR 0x00b4
+#define regUVD_OUT_RB_RPTR_BASE_IDX 1
+#define regUVD_OUT_RB_WPTR 0x00b5
+#define regUVD_OUT_RB_WPTR_BASE_IDX 1
+#define regUVD_AUDIO_RB_RPTR 0x00b6
+#define regUVD_AUDIO_RB_RPTR_BASE_IDX 1
+#define regUVD_AUDIO_RB_WPTR 0x00b7
+#define regUVD_AUDIO_RB_WPTR_BASE_IDX 1
+#define regUVD_RBC_RB_RPTR 0x00b8
+#define regUVD_RBC_RB_RPTR_BASE_IDX 1
+#define regUVD_RBC_RB_WPTR 0x00b9
+#define regUVD_RBC_RB_WPTR_BASE_IDX 1
+#define regUVD_DPG_LMA_CTL2 0x00bb
+#define regUVD_DPG_LMA_CTL2_BASE_IDX 1
+
+
+// addressBlock: uvd_vcn_umsch_dec
+// base address: 0x21500
+#define regVCN_UMSCH_MES_CNTL 0x0740
+#define regVCN_UMSCH_MES_CNTL_BASE_IDX 1
+#define regUMSCH_CTL 0x0741
+#define regUMSCH_CTL_BASE_IDX 1
+#define regUMSCH_CTL2 0x0742
+#define regUMSCH_CTL2_BASE_IDX 1
+#define regVCN_UMSCH_AGDB_WPTR0 0x0743
+#define regVCN_UMSCH_AGDB_WPTR0_BASE_IDX 1
+#define regVCN_UMSCH_AGDB_WPTR1 0x0744
+#define regVCN_UMSCH_AGDB_WPTR1_BASE_IDX 1
+#define regVCN_UMSCH_AGDB_WPTR2 0x0745
+#define regVCN_UMSCH_AGDB_WPTR2_BASE_IDX 1
+#define regVCN_UMSCH_AGDB_WPTR3 0x0746
+#define regVCN_UMSCH_AGDB_WPTR3_BASE_IDX 1
+#define regVCN_UMSCH_AGDB_WPTR4 0x0747
+#define regVCN_UMSCH_AGDB_WPTR4_BASE_IDX 1
+#define regVCN_UMSCH_AGDB_WPTR5 0x0748
+#define regVCN_UMSCH_AGDB_WPTR5_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX0 0x0749
+#define regVCN_UMSCH_MAILBOX0_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX_RESP0 0x074a
+#define regVCN_UMSCH_MAILBOX_RESP0_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX1 0x074b
+#define regVCN_UMSCH_MAILBOX1_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX_RESP1 0x074c
+#define regVCN_UMSCH_MAILBOX_RESP1_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX2 0x074d
+#define regVCN_UMSCH_MAILBOX2_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX_RESP2 0x074e
+#define regVCN_UMSCH_MAILBOX_RESP2_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX3 0x074f
+#define regVCN_UMSCH_MAILBOX3_BASE_IDX 1
+#define regVCN_UMSCH_MAILBOX_RESP3 0x0750
+#define regVCN_UMSCH_MAILBOX_RESP3_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER0 0x0751
+#define regVCN_UMSCH_SPARE_REGISTER0_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER1 0x0752
+#define regVCN_UMSCH_SPARE_REGISTER1_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER2 0x0753
+#define regVCN_UMSCH_SPARE_REGISTER2_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER3 0x0754
+#define regVCN_UMSCH_SPARE_REGISTER3_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER4 0x0755
+#define regVCN_UMSCH_SPARE_REGISTER4_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER5 0x0756
+#define regVCN_UMSCH_SPARE_REGISTER5_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER6 0x0757
+#define regVCN_UMSCH_SPARE_REGISTER6_BASE_IDX 1
+#define regVCN_UMSCH_SPARE_REGISTER7 0x0758
+#define regVCN_UMSCH_SPARE_REGISTER7_BASE_IDX 1
+#define regVCN_UMSCH_MES_UTCL1_CNTL 0x0759
+#define regVCN_UMSCH_MES_UTCL1_CNTL_BASE_IDX 1
+#define regVCN_UMSCH_MES_BUSY 0x075a
+#define regVCN_UMSCH_MES_BUSY_BASE_IDX 1
+#define regVCN_UMSCH_RB_BASE_LO 0x075b
+#define regVCN_UMSCH_RB_BASE_LO_BASE_IDX 1
+#define regVCN_UMSCH_RB_BASE_HI 0x075c
+#define regVCN_UMSCH_RB_BASE_HI_BASE_IDX 1
+#define regVCN_UMSCH_RB_SIZE 0x075d
+#define regVCN_UMSCH_RB_SIZE_BASE_IDX 1
+#define regVCN_UMSCH_RB_RPTR 0x075e
+#define regVCN_UMSCH_RB_RPTR_BASE_IDX 1
+#define regVCN_UMSCH_RB_WPTR 0x075f
+#define regVCN_UMSCH_RB_WPTR_BASE_IDX 1
+#define regVCN_UMSCH_MASTINT_EN 0x0760
+#define regVCN_UMSCH_MASTINT_EN_BASE_IDX 1
+#define regVCN_UMSCH_IH_CTRL 0x0761
+#define regVCN_UMSCH_IH_CTRL_BASE_IDX 1
+#define regVCN_UMSCH_SYS_INT_EN 0x0762
+#define regVCN_UMSCH_SYS_INT_EN_BASE_IDX 1
+#define regVCN_UMSCH_SYS_INT_STATUS 0x0763
+#define regVCN_UMSCH_SYS_INT_STATUS_BASE_IDX 1
+#define regVCN_UMSCH_SYS_INT_ACK 0x0764
+#define regVCN_UMSCH_SYS_INT_ACK_BASE_IDX 1
+#define regVCN_UMSCH_SYS_INT_SRC 0x0765
+#define regVCN_UMSCH_SYS_INT_SRC_BASE_IDX 1
+#define regVCN_UMSCH_IH_CTX_CTRL 0x0766
+#define regVCN_UMSCH_IH_CTX_CTRL_BASE_IDX 1
+#define regUVD_UMSCH_FORCE 0x076b
+#define regUVD_UMSCH_FORCE_BASE_IDX 1
+#define regUMSCH_MES_RESET_CTRL 0x0770
+#define regUMSCH_MES_RESET_CTRL_BASE_IDX 1
+
+
+// addressBlock: uvd_vcn_cprs64dec
+// base address: 0x21600
+#define regVCN_MES_PRGRM_CNTR_START 0x0780
+#define regVCN_MES_PRGRM_CNTR_START_BASE_IDX 1
+#define regVCN_MES_INTR_ROUTINE_START 0x0781
+#define regVCN_MES_INTR_ROUTINE_START_BASE_IDX 1
+#define regVCN_MES_MTVEC_LO 0x0781
+#define regVCN_MES_MTVEC_LO_BASE_IDX 1
+#define regVCN_MES_INTR_ROUTINE_START_HI 0x0782
+#define regVCN_MES_INTR_ROUTINE_START_HI_BASE_IDX 1
+#define regVCN_MES_MTVEC_HI 0x0782
+#define regVCN_MES_MTVEC_HI_BASE_IDX 1
+#define regVCN_MES_CNTL 0x0787
+#define regVCN_MES_CNTL_BASE_IDX 1
+#define regVCN_MES_PIPE_PRIORITY_CNTS 0x0788
+#define regVCN_MES_PIPE_PRIORITY_CNTS_BASE_IDX 1
+#define regVCN_MES_PIPE0_PRIORITY 0x0789
+#define regVCN_MES_PIPE0_PRIORITY_BASE_IDX 1
+#define regVCN_MES_PIPE1_PRIORITY 0x078a
+#define regVCN_MES_PIPE1_PRIORITY_BASE_IDX 1
+#define regVCN_MES_PIPE2_PRIORITY 0x078b
+#define regVCN_MES_PIPE2_PRIORITY_BASE_IDX 1
+#define regVCN_MES_PIPE3_PRIORITY 0x078c
+#define regVCN_MES_PIPE3_PRIORITY_BASE_IDX 1
+#define regVCN_MES_HEADER_DUMP 0x078d
+#define regVCN_MES_HEADER_DUMP_BASE_IDX 1
+#define regVCN_MES_MIE_LO 0x078e
+#define regVCN_MES_MIE_LO_BASE_IDX 1
+#define regVCN_MES_MIE_HI 0x078f
+#define regVCN_MES_MIE_HI_BASE_IDX 1
+#define regVCN_MES_INTERRUPT 0x0790
+#define regVCN_MES_INTERRUPT_BASE_IDX 1
+#define regVCN_MES_SCRATCH_INDEX 0x0791
+#define regVCN_MES_SCRATCH_INDEX_BASE_IDX 1
+#define regVCN_MES_SCRATCH_DATA 0x0792
+#define regVCN_MES_SCRATCH_DATA_BASE_IDX 1
+#define regVCN_MES_INSTR_PNTR 0x0793
+#define regVCN_MES_INSTR_PNTR_BASE_IDX 1
+#define regVCN_MES_MSCRATCH_HI 0x0794
+#define regVCN_MES_MSCRATCH_HI_BASE_IDX 1
+#define regVCN_MES_MSCRATCH_LO 0x0795
+#define regVCN_MES_MSCRATCH_LO_BASE_IDX 1
+#define regVCN_MES_MSTATUS_LO 0x0796
+#define regVCN_MES_MSTATUS_LO_BASE_IDX 1
+#define regVCN_MES_MSTATUS_HI 0x0797
+#define regVCN_MES_MSTATUS_HI_BASE_IDX 1
+#define regVCN_MES_MEPC_LO 0x0798
+#define regVCN_MES_MEPC_LO_BASE_IDX 1
+#define regVCN_MES_MEPC_HI 0x0799
+#define regVCN_MES_MEPC_HI_BASE_IDX 1
+#define regVCN_MES_MCAUSE_LO 0x079a
+#define regVCN_MES_MCAUSE_LO_BASE_IDX 1
+#define regVCN_MES_MCAUSE_HI 0x079b
+#define regVCN_MES_MCAUSE_HI_BASE_IDX 1
+#define regVCN_MES_MBADADDR_LO 0x079c
+#define regVCN_MES_MBADADDR_LO_BASE_IDX 1
+#define regVCN_MES_MBADADDR_HI 0x079d
+#define regVCN_MES_MBADADDR_HI_BASE_IDX 1
+#define regVCN_MES_MIP_LO 0x079e
+#define regVCN_MES_MIP_LO_BASE_IDX 1
+#define regVCN_MES_MIP_HI 0x079f
+#define regVCN_MES_MIP_HI_BASE_IDX 1
+#define regVCN_MES_IC_OP_CNTL 0x07a0
+#define regVCN_MES_IC_OP_CNTL_BASE_IDX 1
+#define regVCN_MES_MCYCLE_LO 0x07a6
+#define regVCN_MES_MCYCLE_LO_BASE_IDX 1
+#define regVCN_MES_MCYCLE_HI 0x07a7
+#define regVCN_MES_MCYCLE_HI_BASE_IDX 1
+#define regVCN_MES_MTIME_LO 0x07a8
+#define regVCN_MES_MTIME_LO_BASE_IDX 1
+#define regVCN_MES_MTIME_HI 0x07a9
+#define regVCN_MES_MTIME_HI_BASE_IDX 1
+#define regVCN_MES_MINSTRET_LO 0x07aa
+#define regVCN_MES_MINSTRET_LO_BASE_IDX 1
+#define regVCN_MES_MINSTRET_HI 0x07ab
+#define regVCN_MES_MINSTRET_HI_BASE_IDX 1
+#define regVCN_MES_MISA_LO 0x07ac
+#define regVCN_MES_MISA_LO_BASE_IDX 1
+#define regVCN_MES_MISA_HI 0x07ad
+#define regVCN_MES_MISA_HI_BASE_IDX 1
+#define regVCN_MES_MVENDORID_LO 0x07ae
+#define regVCN_MES_MVENDORID_LO_BASE_IDX 1
+#define regVCN_MES_MVENDORID_HI 0x07af
+#define regVCN_MES_MVENDORID_HI_BASE_IDX 1
+#define regVCN_MES_MARCHID_LO 0x07b0
+#define regVCN_MES_MARCHID_LO_BASE_IDX 1
+#define regVCN_MES_MARCHID_HI 0x07b1
+#define regVCN_MES_MARCHID_HI_BASE_IDX 1
+#define regVCN_MES_MIMPID_LO 0x07b2
+#define regVCN_MES_MIMPID_LO_BASE_IDX 1
+#define regVCN_MES_MIMPID_HI 0x07b3
+#define regVCN_MES_MIMPID_HI_BASE_IDX 1
+#define regVCN_MES_MHARTID_LO 0x07b4
+#define regVCN_MES_MHARTID_LO_BASE_IDX 1
+#define regVCN_MES_MHARTID_HI 0x07b5
+#define regVCN_MES_MHARTID_HI_BASE_IDX 1
+#define regVCN_MES_DC_BASE_CNTL 0x07b6
+#define regVCN_MES_DC_BASE_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_OP_CNTL 0x07b7
+#define regVCN_MES_DC_OP_CNTL_BASE_IDX 1
+#define regVCN_MES_MTIMECMP_LO 0x07b8
+#define regVCN_MES_MTIMECMP_LO_BASE_IDX 1
+#define regVCN_MES_MTIMECMP_HI 0x07b9
+#define regVCN_MES_MTIMECMP_HI_BASE_IDX 1
+#define regVCN_MES_GP0_LO 0x07c3
+#define regVCN_MES_GP0_LO_BASE_IDX 1
+#define regVCN_MES_GP0_HI 0x07c4
+#define regVCN_MES_GP0_HI_BASE_IDX 1
+#define regVCN_MES_GP1_LO 0x07c5
+#define regVCN_MES_GP1_LO_BASE_IDX 1
+#define regVCN_MES_GP1_HI 0x07c6
+#define regVCN_MES_GP1_HI_BASE_IDX 1
+#define regVCN_MES_GP2_LO 0x07c7
+#define regVCN_MES_GP2_LO_BASE_IDX 1
+#define regVCN_MES_GP2_HI 0x07c8
+#define regVCN_MES_GP2_HI_BASE_IDX 1
+#define regVCN_MES_GP3_LO 0x07c9
+#define regVCN_MES_GP3_LO_BASE_IDX 1
+#define regVCN_MES_GP3_HI 0x07ca
+#define regVCN_MES_GP3_HI_BASE_IDX 1
+#define regVCN_MES_GP4_LO 0x07cb
+#define regVCN_MES_GP4_LO_BASE_IDX 1
+#define regVCN_MES_GP4_HI 0x07cc
+#define regVCN_MES_GP4_HI_BASE_IDX 1
+#define regVCN_MES_GP5_LO 0x07cd
+#define regVCN_MES_GP5_LO_BASE_IDX 1
+#define regVCN_MES_GP5_HI 0x07ce
+#define regVCN_MES_GP5_HI_BASE_IDX 1
+#define regVCN_MES_GP6_LO 0x07cf
+#define regVCN_MES_GP6_LO_BASE_IDX 1
+#define regVCN_MES_GP6_HI 0x07d0
+#define regVCN_MES_GP6_HI_BASE_IDX 1
+#define regVCN_MES_GP7_LO 0x07d1
+#define regVCN_MES_GP7_LO_BASE_IDX 1
+#define regVCN_MES_GP7_HI 0x07d2
+#define regVCN_MES_GP7_HI_BASE_IDX 1
+#define regVCN_MES_GP8_LO 0x07d3
+#define regVCN_MES_GP8_LO_BASE_IDX 1
+#define regVCN_MES_GP8_HI 0x07d4
+#define regVCN_MES_GP8_HI_BASE_IDX 1
+#define regVCN_MES_GP9_LO 0x07d5
+#define regVCN_MES_GP9_LO_BASE_IDX 1
+#define regVCN_MES_GP9_HI 0x07d6
+#define regVCN_MES_GP9_HI_BASE_IDX 1
+#define regVCN_MES_DM_INDEX_ADDR 0x0800
+#define regVCN_MES_DM_INDEX_ADDR_BASE_IDX 1
+#define regVCN_MES_DM_INDEX_DATA 0x0801
+#define regVCN_MES_DM_INDEX_DATA_BASE_IDX 1
+#define regVCN_MES_LOCAL_BASE0_LO 0x0803
+#define regVCN_MES_LOCAL_BASE0_LO_BASE_IDX 1
+#define regVCN_MES_LOCAL_BASE0_HI 0x0804
+#define regVCN_MES_LOCAL_BASE0_HI_BASE_IDX 1
+#define regVCN_MES_LOCAL_MASK0_LO 0x0805
+#define regVCN_MES_LOCAL_MASK0_LO_BASE_IDX 1
+#define regVCN_MES_LOCAL_MASK0_HI 0x0806
+#define regVCN_MES_LOCAL_MASK0_HI_BASE_IDX 1
+#define regVCN_MES_LOCAL_APERTURE 0x0807
+#define regVCN_MES_LOCAL_APERTURE_BASE_IDX 1
+#define regVCN_MES_LOCAL_INSTR_BASE_LO 0x0808
+#define regVCN_MES_LOCAL_INSTR_BASE_LO_BASE_IDX 1
+#define regVCN_MES_LOCAL_INSTR_BASE_HI 0x0809
+#define regVCN_MES_LOCAL_INSTR_BASE_HI_BASE_IDX 1
+#define regVCN_MES_LOCAL_INSTR_MASK_LO 0x080a
+#define regVCN_MES_LOCAL_INSTR_MASK_LO_BASE_IDX 1
+#define regVCN_MES_LOCAL_INSTR_MASK_HI 0x080b
+#define regVCN_MES_LOCAL_INSTR_MASK_HI_BASE_IDX 1
+#define regVCN_MES_LOCAL_INSTR_APERTURE 0x080c
+#define regVCN_MES_LOCAL_INSTR_APERTURE_BASE_IDX 1
+#define regVCN_MES_LOCAL_SCRATCH_APERTURE 0x080d
+#define regVCN_MES_LOCAL_SCRATCH_APERTURE_BASE_IDX 1
+#define regVCN_MES_LOCAL_SCRATCH_BASE_LO 0x080e
+#define regVCN_MES_LOCAL_SCRATCH_BASE_LO_BASE_IDX 1
+#define regVCN_MES_LOCAL_SCRATCH_BASE_HI 0x080f
+#define regVCN_MES_LOCAL_SCRATCH_BASE_HI_BASE_IDX 1
+#define regVCN_MES_PERFCOUNT_CNTL 0x0819
+#define regVCN_MES_PERFCOUNT_CNTL_BASE_IDX 1
+#define regVCN_MES_PENDING_INTERRUPT 0x081a
+#define regVCN_MES_PENDING_INTERRUPT_BASE_IDX 1
+#define regVCN_MES_PRGRM_CNTR_START_HI 0x081d
+#define regVCN_MES_PRGRM_CNTR_START_HI_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_16 0x081f
+#define regVCN_MES_INTERRUPT_DATA_16_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_17 0x0820
+#define regVCN_MES_INTERRUPT_DATA_17_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_18 0x0821
+#define regVCN_MES_INTERRUPT_DATA_18_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_19 0x0822
+#define regVCN_MES_INTERRUPT_DATA_19_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_20 0x0823
+#define regVCN_MES_INTERRUPT_DATA_20_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_21 0x0824
+#define regVCN_MES_INTERRUPT_DATA_21_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_22 0x0825
+#define regVCN_MES_INTERRUPT_DATA_22_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_23 0x0826
+#define regVCN_MES_INTERRUPT_DATA_23_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_24 0x0827
+#define regVCN_MES_INTERRUPT_DATA_24_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_25 0x0828
+#define regVCN_MES_INTERRUPT_DATA_25_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_26 0x0829
+#define regVCN_MES_INTERRUPT_DATA_26_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_27 0x082a
+#define regVCN_MES_INTERRUPT_DATA_27_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_28 0x082b
+#define regVCN_MES_INTERRUPT_DATA_28_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_29 0x082c
+#define regVCN_MES_INTERRUPT_DATA_29_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_30 0x082d
+#define regVCN_MES_INTERRUPT_DATA_30_BASE_IDX 1
+#define regVCN_MES_INTERRUPT_DATA_31 0x082e
+#define regVCN_MES_INTERRUPT_DATA_31_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE0_BASE 0x082f
+#define regVCN_MES_DC_APERTURE0_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE0_MASK 0x0830
+#define regVCN_MES_DC_APERTURE0_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE0_CNTL 0x0831
+#define regVCN_MES_DC_APERTURE0_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE1_BASE 0x0832
+#define regVCN_MES_DC_APERTURE1_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE1_MASK 0x0833
+#define regVCN_MES_DC_APERTURE1_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE1_CNTL 0x0834
+#define regVCN_MES_DC_APERTURE1_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE2_BASE 0x0835
+#define regVCN_MES_DC_APERTURE2_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE2_MASK 0x0836
+#define regVCN_MES_DC_APERTURE2_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE2_CNTL 0x0837
+#define regVCN_MES_DC_APERTURE2_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE3_BASE 0x0838
+#define regVCN_MES_DC_APERTURE3_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE3_MASK 0x0839
+#define regVCN_MES_DC_APERTURE3_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE3_CNTL 0x083a
+#define regVCN_MES_DC_APERTURE3_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE4_BASE 0x083b
+#define regVCN_MES_DC_APERTURE4_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE4_MASK 0x083c
+#define regVCN_MES_DC_APERTURE4_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE4_CNTL 0x083d
+#define regVCN_MES_DC_APERTURE4_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE5_BASE 0x083e
+#define regVCN_MES_DC_APERTURE5_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE5_MASK 0x083f
+#define regVCN_MES_DC_APERTURE5_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE5_CNTL 0x0840
+#define regVCN_MES_DC_APERTURE5_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE6_BASE 0x0841
+#define regVCN_MES_DC_APERTURE6_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE6_MASK 0x0842
+#define regVCN_MES_DC_APERTURE6_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE6_CNTL 0x0843
+#define regVCN_MES_DC_APERTURE6_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE7_BASE 0x0844
+#define regVCN_MES_DC_APERTURE7_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE7_MASK 0x0845
+#define regVCN_MES_DC_APERTURE7_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE7_CNTL 0x0846
+#define regVCN_MES_DC_APERTURE7_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE8_BASE 0x0847
+#define regVCN_MES_DC_APERTURE8_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE8_MASK 0x0848
+#define regVCN_MES_DC_APERTURE8_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE8_CNTL 0x0849
+#define regVCN_MES_DC_APERTURE8_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE9_BASE 0x084a
+#define regVCN_MES_DC_APERTURE9_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE9_MASK 0x084b
+#define regVCN_MES_DC_APERTURE9_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE9_CNTL 0x084c
+#define regVCN_MES_DC_APERTURE9_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE10_BASE 0x084d
+#define regVCN_MES_DC_APERTURE10_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE10_MASK 0x084e
+#define regVCN_MES_DC_APERTURE10_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE10_CNTL 0x084f
+#define regVCN_MES_DC_APERTURE10_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE11_BASE 0x0850
+#define regVCN_MES_DC_APERTURE11_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE11_MASK 0x0851
+#define regVCN_MES_DC_APERTURE11_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE11_CNTL 0x0852
+#define regVCN_MES_DC_APERTURE11_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE12_BASE 0x0853
+#define regVCN_MES_DC_APERTURE12_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE12_MASK 0x0854
+#define regVCN_MES_DC_APERTURE12_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE12_CNTL 0x0855
+#define regVCN_MES_DC_APERTURE12_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE13_BASE 0x0856
+#define regVCN_MES_DC_APERTURE13_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE13_MASK 0x0857
+#define regVCN_MES_DC_APERTURE13_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE13_CNTL 0x0858
+#define regVCN_MES_DC_APERTURE13_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE14_BASE 0x0859
+#define regVCN_MES_DC_APERTURE14_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE14_MASK 0x085a
+#define regVCN_MES_DC_APERTURE14_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE14_CNTL 0x085b
+#define regVCN_MES_DC_APERTURE14_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE15_BASE 0x085c
+#define regVCN_MES_DC_APERTURE15_BASE_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE15_MASK 0x085d
+#define regVCN_MES_DC_APERTURE15_MASK_BASE_IDX 1
+#define regVCN_MES_DC_APERTURE15_CNTL 0x085e
+#define regVCN_MES_DC_APERTURE15_CNTL_BASE_IDX 1
+
+
+// addressBlock: uvd_vcn_hypdec
+// base address: 0x21a00
+#define regVCN_MES_IC_BASE_LO 0x08d0
+#define regVCN_MES_IC_BASE_LO_BASE_IDX 1
+#define regVCN_MES_MIBASE_LO 0x08d0
+#define regVCN_MES_MIBASE_LO_BASE_IDX 1
+#define regVCN_MES_IC_BASE_HI 0x08d1
+#define regVCN_MES_IC_BASE_HI_BASE_IDX 1
+#define regVCN_MES_MIBASE_HI 0x08d1
+#define regVCN_MES_MIBASE_HI_BASE_IDX 1
+#define regVCN_MES_IC_BASE_CNTL 0x08d2
+#define regVCN_MES_IC_BASE_CNTL_BASE_IDX 1
+#define regVCN_MES_DC_BASE_LO 0x08d4
+#define regVCN_MES_DC_BASE_LO_BASE_IDX 1
+#define regVCN_MES_MDBASE_LO 0x08d4
+#define regVCN_MES_MDBASE_LO_BASE_IDX 1
+#define regVCN_MES_DC_BASE_HI 0x08d5
+#define regVCN_MES_DC_BASE_HI_BASE_IDX 1
+#define regVCN_MES_MDBASE_HI 0x08d5
+#define regVCN_MES_MDBASE_HI_BASE_IDX 1
+#define regVCN_MES_MIBOUND_LO 0x08db
+#define regVCN_MES_MIBOUND_LO_BASE_IDX 1
+#define regVCN_MES_MIBOUND_HI 0x08dc
+#define regVCN_MES_MIBOUND_HI_BASE_IDX 1
+#define regVCN_MES_MDBOUND_LO 0x08dd
+#define regVCN_MES_MDBOUND_LO_BASE_IDX 1
+#define regVCN_MES_MDBOUND_HI 0x08de
+#define regVCN_MES_MDBOUND_HI_BASE_IDX 1
+
+
+// addressBlock: uvd_slmi_adpdec
+// base address: 0x21c00
+#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW 0x0900
+#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH 0x0901
+#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW 0x0902
+#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH 0x0903
+#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW 0x0904
+#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH 0x0905
+#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW 0x0906
+#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH 0x0907
+#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW 0x0908
+#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH 0x0909
+#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW 0x090a
+#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH 0x090b
+#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW 0x090c
+#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH 0x090d
+#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW 0x090e
+#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH 0x090f
+#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC_VMID 0x0910
+#define regUVD_LMI_MMSCH_NC_VMID_BASE_IDX 1
+#define regUVD_LMI_MMSCH_CTRL 0x0911
+#define regUVD_LMI_MMSCH_CTRL_BASE_IDX 1
+#define regUVD_MMSCH_LMI_STATUS 0x0912
+#define regUVD_MMSCH_LMI_STATUS_BASE_IDX 1
+#define regUMSCH_IOV_ACTIVE_FCN_ID 0x0920
+#define regUMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 1
+#define regUVD_UMSCH_LMI_STATUS 0x0923
+#define regUVD_UMSCH_LMI_STATUS_BASE_IDX 1
+
+
+// addressBlock: uvdctxind
+// base address: 0x0
+#define ixUVD_CGC_MEM_CTRL 0x0000
+#define ixUVD_CGC_CTRL2 0x0001
+#define ixUVD_CGC_MEM_DS_CTRL 0x0002
+#define ixUVD_CGC_MEM_SD_CTRL 0x0003
+#define ixUVD_SW_SCRATCH_00 0x0004
+#define ixUVD_SW_SCRATCH_01 0x0005
+#define ixUVD_SW_SCRATCH_02 0x0006
+#define ixUVD_SW_SCRATCH_03 0x0007
+#define ixUVD_SW_SCRATCH_04 0x0008
+#define ixUVD_SW_SCRATCH_05 0x0009
+#define ixUVD_SW_SCRATCH_06 0x000a
+#define ixUVD_SW_SCRATCH_07 0x000b
+#define ixUVD_SW_SCRATCH_08 0x000c
+#define ixUVD_SW_SCRATCH_09 0x000d
+#define ixUVD_SW_SCRATCH_10 0x000e
+#define ixUVD_SW_SCRATCH_11 0x000f
+#define ixUVD_SW_SCRATCH_12 0x0010
+#define ixUVD_SW_SCRATCH_13 0x0011
+#define ixUVD_SW_SCRATCH_14 0x0012
+#define ixUVD_SW_SCRATCH_15 0x0013
+#define ixUVD_IH_SEM_CTRL 0x001e
+
+
+// addressBlock: lmi_adp_indirect
+// base address: 0x0
+#define ixUVD_LMI_CRC0 0x0000
+#define ixUVD_LMI_CRC1 0x0001
+#define ixUVD_LMI_CRC2 0x0002
+#define ixUVD_LMI_CRC3 0x0003
+#define ixUVD_LMI_CRC10 0x000a
+#define ixUVD_LMI_CRC11 0x000b
+#define ixUVD_LMI_CRC12 0x000c
+#define ixUVD_LMI_CRC13 0x000d
+#define ixUVD_LMI_CRC14 0x000e
+#define ixUVD_LMI_CRC15 0x000f
+#define ixUVD_LMI_SWAP_CNTL2 0x0029
+#define ixUVD_MEMCHECK_SYS_INT_EN 0x0134
+#define ixUVD_MEMCHECK_SYS_INT_STAT 0x0135
+#define ixUVD_MEMCHECK_SYS_INT_ACK 0x0136
+#define ixUVD_MEMCHECK_VCPU_INT_EN 0x0137
+#define ixUVD_MEMCHECK_VCPU_INT_STAT 0x0138
+#define ixUVD_MEMCHECK_VCPU_INT_ACK 0x0139
+#define ixUVD_MEMCHECK2_SYS_INT_STAT 0x0140
+#define ixUVD_MEMCHECK2_SYS_INT_ACK 0x0141
+#define ixUVD_MEMCHECK2_VCPU_INT_STAT 0x0142
+#define ixUVD_MEMCHECK2_VCPU_INT_ACK 0x0143
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
new file mode 100644
index 000000000000..5c119a6b87fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
@@ -0,0 +1,7627 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _vcn_5_0_0_SH_MASK_HEADER
+#define _vcn_5_0_0_SH_MASK_HEADER
+
+
+// addressBlock: uvd_uvddec
+//UVD_TOP_CTRL
+#define UVD_TOP_CTRL__STANDARD__SHIFT 0x0
+#define UVD_TOP_CTRL__STD_VERSION__SHIFT 0x4
+#define UVD_TOP_CTRL__STANDARD_MASK 0x0000000FL
+#define UVD_TOP_CTRL__STD_VERSION_MASK 0x00000010L
+//UVD_CGC_GATE
+#define UVD_CGC_GATE__SYS__SHIFT 0x0
+#define UVD_CGC_GATE__UDEC__SHIFT 0x1
+#define UVD_CGC_GATE__MPEG2__SHIFT 0x2
+#define UVD_CGC_GATE__REGS__SHIFT 0x3
+#define UVD_CGC_GATE__RBC__SHIFT 0x4
+#define UVD_CGC_GATE__LMI_MC__SHIFT 0x5
+#define UVD_CGC_GATE__LMI_UMC__SHIFT 0x6
+#define UVD_CGC_GATE__IDCT__SHIFT 0x7
+#define UVD_CGC_GATE__MPRD__SHIFT 0x8
+#define UVD_CGC_GATE__MPC__SHIFT 0x9
+#define UVD_CGC_GATE__LBSI__SHIFT 0xa
+#define UVD_CGC_GATE__LRBBM__SHIFT 0xb
+#define UVD_CGC_GATE__UDEC_RE__SHIFT 0xc
+#define UVD_CGC_GATE__UDEC_CM__SHIFT 0xd
+#define UVD_CGC_GATE__UDEC_IT__SHIFT 0xe
+#define UVD_CGC_GATE__UDEC_DB__SHIFT 0xf
+#define UVD_CGC_GATE__UDEC_MP__SHIFT 0x10
+#define UVD_CGC_GATE__WCB__SHIFT 0x11
+#define UVD_CGC_GATE__VCPU__SHIFT 0x12
+#define UVD_CGC_GATE__MMSCH__SHIFT 0x14
+#define UVD_CGC_GATE__LCM0__SHIFT 0x15
+#define UVD_CGC_GATE__LCM1__SHIFT 0x16
+#define UVD_CGC_GATE__MIF__SHIFT 0x17
+#define UVD_CGC_GATE__VREG__SHIFT 0x18
+#define UVD_CGC_GATE__PE__SHIFT 0x19
+#define UVD_CGC_GATE__PPU__SHIFT 0x1a
+#define UVD_CGC_GATE__SYS_MASK 0x00000001L
+#define UVD_CGC_GATE__UDEC_MASK 0x00000002L
+#define UVD_CGC_GATE__MPEG2_MASK 0x00000004L
+#define UVD_CGC_GATE__REGS_MASK 0x00000008L
+#define UVD_CGC_GATE__RBC_MASK 0x00000010L
+#define UVD_CGC_GATE__LMI_MC_MASK 0x00000020L
+#define UVD_CGC_GATE__LMI_UMC_MASK 0x00000040L
+#define UVD_CGC_GATE__IDCT_MASK 0x00000080L
+#define UVD_CGC_GATE__MPRD_MASK 0x00000100L
+#define UVD_CGC_GATE__MPC_MASK 0x00000200L
+#define UVD_CGC_GATE__LBSI_MASK 0x00000400L
+#define UVD_CGC_GATE__LRBBM_MASK 0x00000800L
+#define UVD_CGC_GATE__UDEC_RE_MASK 0x00001000L
+#define UVD_CGC_GATE__UDEC_CM_MASK 0x00002000L
+#define UVD_CGC_GATE__UDEC_IT_MASK 0x00004000L
+#define UVD_CGC_GATE__UDEC_DB_MASK 0x00008000L
+#define UVD_CGC_GATE__UDEC_MP_MASK 0x00010000L
+#define UVD_CGC_GATE__WCB_MASK 0x00020000L
+#define UVD_CGC_GATE__VCPU_MASK 0x00040000L
+#define UVD_CGC_GATE__MMSCH_MASK 0x00100000L
+#define UVD_CGC_GATE__LCM0_MASK 0x00200000L
+#define UVD_CGC_GATE__LCM1_MASK 0x00400000L
+#define UVD_CGC_GATE__MIF_MASK 0x00800000L
+#define UVD_CGC_GATE__VREG_MASK 0x01000000L
+#define UVD_CGC_GATE__PE_MASK 0x02000000L
+#define UVD_CGC_GATE__PPU_MASK 0x04000000L
+//UVD_CGC_CTRL
+#define UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0
+#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x2
+#define UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x6
+#define UVD_CGC_CTRL__UDEC_RE_MODE__SHIFT 0xb
+#define UVD_CGC_CTRL__UDEC_CM_MODE__SHIFT 0xc
+#define UVD_CGC_CTRL__UDEC_IT_MODE__SHIFT 0xd
+#define UVD_CGC_CTRL__UDEC_DB_MODE__SHIFT 0xe
+#define UVD_CGC_CTRL__UDEC_MP_MODE__SHIFT 0xf
+#define UVD_CGC_CTRL__SYS_MODE__SHIFT 0x10
+#define UVD_CGC_CTRL__UDEC_MODE__SHIFT 0x11
+#define UVD_CGC_CTRL__MPEG2_MODE__SHIFT 0x12
+#define UVD_CGC_CTRL__REGS_MODE__SHIFT 0x13
+#define UVD_CGC_CTRL__RBC_MODE__SHIFT 0x14
+#define UVD_CGC_CTRL__LMI_MC_MODE__SHIFT 0x15
+#define UVD_CGC_CTRL__LMI_UMC_MODE__SHIFT 0x16
+#define UVD_CGC_CTRL__IDCT_MODE__SHIFT 0x17
+#define UVD_CGC_CTRL__MPRD_MODE__SHIFT 0x18
+#define UVD_CGC_CTRL__MPC_MODE__SHIFT 0x19
+#define UVD_CGC_CTRL__LBSI_MODE__SHIFT 0x1a
+#define UVD_CGC_CTRL__LRBBM_MODE__SHIFT 0x1b
+#define UVD_CGC_CTRL__WCB_MODE__SHIFT 0x1c
+#define UVD_CGC_CTRL__VCPU_MODE__SHIFT 0x1d
+#define UVD_CGC_CTRL__MMSCH_MODE__SHIFT 0x1f
+#define UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L
+#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000003CL
+#define UVD_CGC_CTRL__CLK_OFF_DELAY_MASK 0x000007C0L
+#define UVD_CGC_CTRL__UDEC_RE_MODE_MASK 0x00000800L
+#define UVD_CGC_CTRL__UDEC_CM_MODE_MASK 0x00001000L
+#define UVD_CGC_CTRL__UDEC_IT_MODE_MASK 0x00002000L
+#define UVD_CGC_CTRL__UDEC_DB_MODE_MASK 0x00004000L
+#define UVD_CGC_CTRL__UDEC_MP_MODE_MASK 0x00008000L
+#define UVD_CGC_CTRL__SYS_MODE_MASK 0x00010000L
+#define UVD_CGC_CTRL__UDEC_MODE_MASK 0x00020000L
+#define UVD_CGC_CTRL__MPEG2_MODE_MASK 0x00040000L
+#define UVD_CGC_CTRL__REGS_MODE_MASK 0x00080000L
+#define UVD_CGC_CTRL__RBC_MODE_MASK 0x00100000L
+#define UVD_CGC_CTRL__LMI_MC_MODE_MASK 0x00200000L
+#define UVD_CGC_CTRL__LMI_UMC_MODE_MASK 0x00400000L
+#define UVD_CGC_CTRL__IDCT_MODE_MASK 0x00800000L
+#define UVD_CGC_CTRL__MPRD_MODE_MASK 0x01000000L
+#define UVD_CGC_CTRL__MPC_MODE_MASK 0x02000000L
+#define UVD_CGC_CTRL__LBSI_MODE_MASK 0x04000000L
+#define UVD_CGC_CTRL__LRBBM_MODE_MASK 0x08000000L
+#define UVD_CGC_CTRL__WCB_MODE_MASK 0x10000000L
+#define UVD_CGC_CTRL__VCPU_MODE_MASK 0x20000000L
+#define UVD_CGC_CTRL__MMSCH_MODE_MASK 0x80000000L
+//AVM_SUVD_CGC_GATE
+#define AVM_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define AVM_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define AVM_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define AVM_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define AVM_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define AVM_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define AVM_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define AVM_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define AVM_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define AVM_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define AVM_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define AVM_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define AVM_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define AVM_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define AVM_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define AVM_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define AVM_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define AVM_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define AVM_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define AVM_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define AVM_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define AVM_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define AVM_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define AVM_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define AVM_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define AVM_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define AVM_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define AVM_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define AVM_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define AVM_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define AVM_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define AVM_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define AVM_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define AVM_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define AVM_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define AVM_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define AVM_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define AVM_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define AVM_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define AVM_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define AVM_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define AVM_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define AVM_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define AVM_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define AVM_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define AVM_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define AVM_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define AVM_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define AVM_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define AVM_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define AVM_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define AVM_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define AVM_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define AVM_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define AVM_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define AVM_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define AVM_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define AVM_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define AVM_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//EFC_SUVD_CGC_GATE
+#define EFC_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define EFC_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define EFC_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define EFC_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define EFC_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define EFC_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define EFC_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define EFC_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define EFC_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define EFC_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define EFC_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define EFC_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define EFC_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define EFC_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define EFC_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define EFC_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define EFC_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define EFC_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define EFC_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define EFC_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define EFC_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define EFC_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define EFC_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define EFC_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define EFC_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define EFC_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define EFC_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define EFC_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define EFC_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define EFC_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define EFC_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define EFC_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define EFC_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define EFC_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define EFC_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define EFC_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define EFC_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define EFC_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define EFC_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define EFC_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define EFC_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define EFC_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define EFC_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define EFC_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define EFC_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define EFC_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define EFC_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define EFC_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define EFC_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define EFC_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define EFC_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define EFC_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define EFC_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define EFC_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define EFC_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define EFC_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define EFC_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define EFC_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define EFC_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//ENT_SUVD_CGC_GATE
+#define ENT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define ENT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define ENT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define ENT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define ENT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define ENT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define ENT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define ENT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define ENT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define ENT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define ENT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define ENT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define ENT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define ENT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define ENT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define ENT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define ENT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define ENT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define ENT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define ENT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define ENT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define ENT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define ENT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define ENT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define ENT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define ENT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define ENT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define ENT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define ENT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define ENT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define ENT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define ENT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define ENT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define ENT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define ENT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define ENT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define ENT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define ENT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define ENT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define ENT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define ENT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define ENT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define ENT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define ENT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define ENT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define ENT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define ENT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define ENT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define ENT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define ENT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define ENT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define ENT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define ENT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define ENT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define ENT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define ENT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define ENT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define ENT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define ENT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//IME_SUVD_CGC_GATE
+#define IME_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define IME_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define IME_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define IME_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define IME_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define IME_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define IME_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define IME_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define IME_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define IME_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define IME_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define IME_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define IME_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define IME_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define IME_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define IME_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define IME_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define IME_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define IME_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define IME_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define IME_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define IME_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define IME_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define IME_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define IME_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define IME_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define IME_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define IME_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define IME_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define IME_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define IME_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define IME_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define IME_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define IME_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define IME_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define IME_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define IME_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define IME_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define IME_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define IME_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define IME_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define IME_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define IME_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define IME_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define IME_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define IME_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define IME_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define IME_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define IME_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define IME_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define IME_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define IME_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define IME_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define IME_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define IME_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define IME_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define IME_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define IME_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define IME_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define IME_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define IME_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define IME_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define IME_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define IME_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//PPU_SUVD_CGC_GATE
+#define PPU_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define PPU_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define PPU_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define PPU_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define PPU_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define PPU_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define PPU_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define PPU_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define PPU_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define PPU_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define PPU_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define PPU_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define PPU_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define PPU_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define PPU_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define PPU_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define PPU_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define PPU_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define PPU_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define PPU_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define PPU_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define PPU_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define PPU_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define PPU_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define PPU_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define PPU_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define PPU_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define PPU_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define PPU_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define PPU_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define PPU_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define PPU_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define PPU_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define PPU_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define PPU_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define PPU_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define PPU_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define PPU_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define PPU_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define PPU_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define PPU_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define PPU_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define PPU_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define PPU_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define PPU_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define PPU_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define PPU_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define PPU_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define PPU_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define PPU_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define PPU_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define PPU_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define PPU_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define PPU_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define PPU_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define PPU_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define PPU_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define PPU_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define PPU_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SAOE_SUVD_CGC_GATE
+#define SAOE_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SAOE_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SAOE_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SAOE_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SAOE_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SAOE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SAOE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SAOE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SAOE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SAOE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SAOE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SAOE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SAOE_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SAOE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SAOE_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SAOE_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SAOE_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SAOE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SAOE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SAOE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SAOE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SAOE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SAOE_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SAOE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SAOE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SAOE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SAOE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SAOE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SAOE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SAOE_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SAOE_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SAOE_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SAOE_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SAOE_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SAOE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SAOE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SAOE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SAOE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SAOE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SAOE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SAOE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SAOE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SAOE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SAOE_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SAOE_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SAOE_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SAOE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SAOE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SAOE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SAOE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SAOE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SAOE_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SAOE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SAOE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SAOE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SAOE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SAOE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SAOE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SCM_SUVD_CGC_GATE
+#define SCM_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SCM_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SCM_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SCM_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SCM_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SCM_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SCM_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SCM_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SCM_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SCM_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SCM_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SCM_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SCM_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SCM_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SCM_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SCM_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SCM_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SCM_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SCM_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SCM_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SCM_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SCM_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SCM_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SCM_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SCM_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SCM_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SCM_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SCM_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SCM_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SCM_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SCM_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SCM_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SCM_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SCM_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SCM_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SCM_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SCM_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SCM_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SCM_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SCM_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SCM_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SCM_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SCM_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SCM_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SCM_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SCM_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SCM_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SCM_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SCM_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SCM_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SCM_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SCM_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SCM_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SCM_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SCM_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SCM_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SCM_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SCM_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SCM_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SDB_SUVD_CGC_GATE
+#define SDB_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SDB_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SDB_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SDB_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SDB_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SDB_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SDB_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SDB_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SDB_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SDB_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SDB_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SDB_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SDB_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SDB_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SDB_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SDB_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SDB_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SDB_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SDB_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SDB_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SDB_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SDB_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SDB_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SDB_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SDB_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SDB_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SDB_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SDB_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SDB_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SDB_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SDB_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SDB_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SDB_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SDB_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SDB_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SDB_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SDB_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SDB_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SDB_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SDB_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SDB_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SDB_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SDB_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SDB_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SDB_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SDB_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SDB_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SDB_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SDB_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SDB_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SDB_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SDB_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SDB_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SDB_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SDB_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SDB_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SDB_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SDB_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SDB_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SIT0_NXT_SUVD_CGC_GATE
+#define SIT0_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SIT0_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SIT0_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SIT0_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SIT0_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SIT0_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SIT0_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SIT0_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SIT0_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SIT0_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SIT0_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SIT0_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SIT0_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SIT0_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SIT0_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SIT0_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SIT0_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SIT0_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SIT0_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SIT0_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SIT0_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SIT0_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SIT0_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SIT0_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SIT0_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SIT0_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SIT0_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SIT0_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SIT1_NXT_SUVD_CGC_GATE
+#define SIT1_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SIT1_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SIT1_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SIT1_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SIT1_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SIT1_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SIT1_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SIT1_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SIT1_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SIT1_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SIT1_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SIT1_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SIT1_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SIT1_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SIT1_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SIT1_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SIT1_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SIT1_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SIT1_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SIT1_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SIT1_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SIT1_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SIT1_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SIT1_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SIT1_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SIT1_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SIT1_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SIT1_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SIT2_NXT_SUVD_CGC_GATE
+#define SIT2_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SIT2_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SIT2_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SIT2_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SIT2_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SIT2_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SIT2_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SIT2_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SIT2_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SIT2_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SIT2_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SIT2_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SIT2_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SIT2_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SIT2_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SIT2_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SIT2_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SIT2_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SIT2_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SIT2_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SIT2_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SIT2_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SIT2_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SIT2_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SIT2_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SIT2_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SIT2_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SIT2_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SIT_SUVD_CGC_GATE
+#define SIT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SIT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SIT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SIT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SIT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SIT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SIT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SIT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SIT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SIT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SIT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SIT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SIT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SIT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SIT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SIT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SIT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SIT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SIT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SIT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SIT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SIT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SIT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SIT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SIT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SIT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SIT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SIT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SIT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SIT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SIT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SIT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SIT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SIT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SIT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SIT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SIT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SIT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SIT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SIT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SIT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SIT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SIT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SIT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SIT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SIT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SIT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SIT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SIT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SIT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SIT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SIT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SIT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SIT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SIT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SIT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SIT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SIT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SIT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SMPA_SUVD_CGC_GATE
+#define SMPA_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SMPA_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SMPA_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SMPA_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SMPA_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SMPA_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SMPA_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SMPA_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SMPA_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SMPA_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SMPA_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SMPA_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SMPA_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SMPA_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SMPA_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SMPA_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SMPA_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SMPA_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SMPA_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SMPA_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SMPA_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SMPA_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SMPA_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SMPA_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SMPA_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SMPA_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SMPA_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SMPA_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SMPA_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SMPA_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SMPA_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SMPA_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SMPA_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SMPA_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SMPA_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SMPA_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SMPA_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SMPA_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SMPA_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SMPA_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SMPA_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SMPA_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SMPA_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SMPA_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SMPA_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SMPA_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SMPA_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SMPA_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SMPA_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SMPA_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SMPA_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SMPA_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SMPA_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SMPA_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SMPA_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SMPA_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SMPA_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SMPA_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SMP_SUVD_CGC_GATE
+#define SMP_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SMP_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SMP_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SMP_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SMP_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SMP_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SMP_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SMP_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SMP_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SMP_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SMP_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SMP_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SMP_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SMP_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SMP_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SMP_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SMP_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SMP_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SMP_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SMP_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SMP_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SMP_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SMP_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SMP_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SMP_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SMP_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SMP_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SMP_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SMP_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SMP_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SMP_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SMP_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SMP_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SMP_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SMP_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SMP_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SMP_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SMP_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SMP_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SMP_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SMP_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SMP_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SMP_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SMP_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SMP_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SMP_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SMP_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SMP_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SMP_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SMP_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SMP_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SMP_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SMP_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SMP_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SMP_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SMP_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SMP_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SMP_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SMP_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SRE_SUVD_CGC_GATE
+#define SRE_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SRE_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SRE_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SRE_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SRE_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SRE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SRE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SRE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SRE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SRE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SRE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SRE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SRE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SRE_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SRE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SRE_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SRE_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SRE_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SRE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SRE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SRE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SRE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SRE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SRE_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SRE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SRE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SRE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SRE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SRE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SRE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SRE_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SRE_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SRE_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SRE_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SRE_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SRE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SRE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SRE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SRE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SRE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SRE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SRE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SRE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SRE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SRE_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SRE_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SRE_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SRE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SRE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SRE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SRE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SRE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SRE_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SRE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SRE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SRE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SRE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SRE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SRE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//UVD_SUVD_CGC_GATE
+#define UVD_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define UVD_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define UVD_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define UVD_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define UVD_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define UVD_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define UVD_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define UVD_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define UVD_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define UVD_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define UVD_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define UVD_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define UVD_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define UVD_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define UVD_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define UVD_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define UVD_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define UVD_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define UVD_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define UVD_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define UVD_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define UVD_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define UVD_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define UVD_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define UVD_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define UVD_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define UVD_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define UVD_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define UVD_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define UVD_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define UVD_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define UVD_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define UVD_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define UVD_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define UVD_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define UVD_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define UVD_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define UVD_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define UVD_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define UVD_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define UVD_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define UVD_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define UVD_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define UVD_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define UVD_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define UVD_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define UVD_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define UVD_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define UVD_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define UVD_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define UVD_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define UVD_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define UVD_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define UVD_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//AVM_SUVD_CGC_GATE2
+#define AVM_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define AVM_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define AVM_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define AVM_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define AVM_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define AVM_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define AVM_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define AVM_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define AVM_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define AVM_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define AVM_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define AVM_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define AVM_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define AVM_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define AVM_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define AVM_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define AVM_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define AVM_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//DBR_SUVD_CGC_GATE2
+#define DBR_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define DBR_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define DBR_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define DBR_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define DBR_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define DBR_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define DBR_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define DBR_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define DBR_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define DBR_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define DBR_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define DBR_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define DBR_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define DBR_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define DBR_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define DBR_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define DBR_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define DBR_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//ENT_SUVD_CGC_GATE2
+#define ENT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define ENT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define ENT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define ENT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define ENT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define ENT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define ENT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define ENT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define ENT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define ENT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define ENT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define ENT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define ENT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define ENT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define ENT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define ENT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define ENT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define ENT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//IME_SUVD_CGC_GATE2
+#define IME_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define IME_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define IME_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define IME_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define IME_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define IME_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define IME_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define IME_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define IME_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define IME_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define IME_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define IME_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define IME_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define IME_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define IME_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define IME_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define IME_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define IME_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define IME_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define IME_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define IME_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define IME_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define IME_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define IME_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SAOE_SUVD_CGC_GATE2
+#define SAOE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SAOE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SAOE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SAOE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SAOE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SAOE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SAOE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SAOE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SAOE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SAOE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SAOE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SAOE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SAOE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SAOE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SAOE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SAOE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SAOE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SAOE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SDB_SUVD_CGC_GATE2
+#define SDB_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SDB_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SDB_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SDB_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SDB_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SDB_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SDB_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SDB_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SDB_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SDB_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SDB_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SDB_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SDB_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SDB_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SDB_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SDB_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SDB_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SDB_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SIT0_NXT_SUVD_CGC_GATE2
+#define SIT0_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SIT0_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SIT0_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SIT0_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SIT0_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SIT0_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SIT0_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SIT0_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SIT0_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SIT0_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SIT0_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SIT0_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SIT0_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SIT0_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SIT0_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SIT0_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SIT1_NXT_SUVD_CGC_GATE2
+#define SIT1_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SIT1_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SIT1_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SIT1_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SIT1_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SIT1_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SIT1_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SIT1_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SIT1_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SIT1_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SIT1_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SIT1_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SIT1_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SIT1_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SIT1_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SIT1_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SIT2_NXT_SUVD_CGC_GATE2
+#define SIT2_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SIT2_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SIT2_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SIT2_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SIT2_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SIT2_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SIT2_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SIT2_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SIT2_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SIT2_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SIT2_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SIT2_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SIT2_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SIT2_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SIT2_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SIT2_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SIT_SUVD_CGC_GATE2
+#define SIT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SIT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SIT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SIT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SIT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SIT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SIT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SIT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SIT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SIT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SIT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SIT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SIT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SIT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SIT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SIT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SIT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SIT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SMPA_SUVD_CGC_GATE2
+#define SMPA_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SMPA_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SMPA_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SMPA_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SMPA_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SMPA_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SMPA_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SMPA_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SMPA_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SMPA_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SMPA_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SMPA_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SMPA_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SMPA_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SMPA_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SMPA_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SMPA_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SMPA_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SMP_SUVD_CGC_GATE2
+#define SMP_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SMP_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SMP_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SMP_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SMP_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SMP_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SMP_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SMP_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SMP_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SMP_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SMP_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SMP_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SMP_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SMP_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SMP_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SMP_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SMP_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SMP_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SRE_SUVD_CGC_GATE2
+#define SRE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SRE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SRE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SRE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SRE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SRE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SRE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SRE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SRE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SRE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SRE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SRE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SRE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SRE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SRE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SRE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SRE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SRE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//UVD_SUVD_CGC_GATE2
+#define UVD_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define UVD_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define UVD_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define UVD_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define UVD_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define UVD_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define UVD_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define UVD_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define UVD_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define UVD_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define UVD_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define UVD_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define UVD_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define UVD_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define UVD_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define UVD_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define UVD_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define UVD_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//AVM_SUVD_CGC_CTRL
+#define AVM_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define AVM_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define AVM_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define AVM_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define AVM_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define AVM_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define AVM_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define AVM_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define AVM_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define AVM_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define AVM_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define AVM_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define AVM_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define AVM_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define AVM_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define AVM_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define AVM_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define AVM_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define AVM_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define AVM_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define AVM_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define AVM_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define AVM_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define AVM_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define AVM_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define AVM_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define AVM_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define AVM_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define AVM_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define AVM_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define AVM_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define AVM_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define AVM_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define AVM_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define AVM_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define AVM_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define AVM_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define AVM_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define AVM_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define AVM_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define AVM_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define AVM_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define AVM_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define AVM_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define AVM_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define AVM_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//DBR_SUVD_CGC_CTRL
+#define DBR_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define DBR_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define DBR_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define DBR_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define DBR_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define DBR_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define DBR_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define DBR_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define DBR_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define DBR_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define DBR_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define DBR_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define DBR_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define DBR_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define DBR_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define DBR_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define DBR_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define DBR_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define DBR_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define DBR_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define DBR_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define DBR_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define DBR_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define DBR_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define DBR_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define DBR_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define DBR_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define DBR_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define DBR_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define DBR_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define DBR_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define DBR_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define DBR_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define DBR_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define DBR_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define DBR_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define DBR_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define DBR_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define DBR_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define DBR_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define DBR_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define DBR_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define DBR_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define DBR_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define DBR_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define DBR_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//EFC_SUVD_CGC_CTRL
+#define EFC_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define EFC_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define EFC_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define EFC_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define EFC_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define EFC_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define EFC_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define EFC_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define EFC_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define EFC_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define EFC_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define EFC_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define EFC_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define EFC_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define EFC_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define EFC_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define EFC_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define EFC_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define EFC_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define EFC_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define EFC_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define EFC_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define EFC_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define EFC_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define EFC_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define EFC_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define EFC_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define EFC_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define EFC_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define EFC_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define EFC_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define EFC_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define EFC_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define EFC_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define EFC_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define EFC_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define EFC_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define EFC_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define EFC_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define EFC_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define EFC_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define EFC_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define EFC_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define EFC_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define EFC_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define EFC_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//ENT_SUVD_CGC_CTRL
+#define ENT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define ENT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define ENT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define ENT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define ENT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define ENT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define ENT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define ENT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define ENT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define ENT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define ENT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define ENT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define ENT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define ENT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define ENT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define ENT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define ENT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define ENT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define ENT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define ENT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define ENT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define ENT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define ENT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define ENT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define ENT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define ENT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define ENT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define ENT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define ENT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define ENT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define ENT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define ENT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define ENT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define ENT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define ENT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define ENT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define ENT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define ENT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define ENT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define ENT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define ENT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define ENT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define ENT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define ENT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define ENT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define ENT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//IME_SUVD_CGC_CTRL
+#define IME_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define IME_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define IME_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define IME_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define IME_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define IME_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define IME_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define IME_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define IME_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define IME_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define IME_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define IME_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define IME_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define IME_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define IME_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define IME_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define IME_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define IME_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define IME_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define IME_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define IME_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define IME_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define IME_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define IME_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define IME_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define IME_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define IME_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define IME_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define IME_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define IME_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define IME_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define IME_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define IME_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define IME_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define IME_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define IME_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define IME_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define IME_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define IME_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define IME_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define IME_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define IME_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define IME_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define IME_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define IME_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define IME_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define IME_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define IME_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define IME_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define IME_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define IME_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define IME_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//PPU_SUVD_CGC_CTRL
+#define PPU_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define PPU_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define PPU_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define PPU_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define PPU_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define PPU_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define PPU_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define PPU_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define PPU_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define PPU_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define PPU_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define PPU_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define PPU_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define PPU_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define PPU_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define PPU_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define PPU_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define PPU_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define PPU_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define PPU_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define PPU_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define PPU_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define PPU_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define PPU_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define PPU_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define PPU_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define PPU_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define PPU_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define PPU_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define PPU_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define PPU_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define PPU_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define PPU_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define PPU_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define PPU_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define PPU_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define PPU_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define PPU_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define PPU_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define PPU_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define PPU_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define PPU_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define PPU_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define PPU_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define PPU_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define PPU_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SAOE_SUVD_CGC_CTRL
+#define SAOE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SAOE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SAOE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SAOE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SAOE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SAOE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SAOE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SAOE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SAOE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SAOE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SAOE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SAOE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SAOE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SAOE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SAOE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SAOE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SAOE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SAOE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SAOE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SAOE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SAOE_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SAOE_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SAOE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SAOE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SAOE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SAOE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SAOE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SAOE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SAOE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SAOE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SAOE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SAOE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SAOE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SAOE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SAOE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SAOE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SAOE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SAOE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SAOE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SAOE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SAOE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SAOE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SAOE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SAOE_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SAOE_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SAOE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SCM_SUVD_CGC_CTRL
+#define SCM_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SCM_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SCM_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SCM_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SCM_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SCM_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SCM_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SCM_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SCM_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SCM_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SCM_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SCM_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SCM_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SCM_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SCM_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SCM_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SCM_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SCM_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SCM_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SCM_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SCM_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SCM_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SCM_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SCM_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SCM_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SCM_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SCM_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SCM_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SCM_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SCM_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SCM_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SCM_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SCM_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SCM_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SCM_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SCM_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SCM_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SCM_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SCM_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SCM_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SCM_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SCM_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SCM_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SCM_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SCM_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SCM_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SDB_SUVD_CGC_CTRL
+#define SDB_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SDB_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SDB_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SDB_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SDB_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SDB_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SDB_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SDB_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SDB_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SDB_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SDB_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SDB_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SDB_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SDB_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SDB_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SDB_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SDB_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SDB_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SDB_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SDB_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SDB_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SDB_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SDB_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SDB_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SDB_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SDB_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SDB_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SDB_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SDB_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SDB_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SDB_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SDB_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SDB_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SDB_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SDB_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SDB_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SDB_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SDB_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SDB_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SDB_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SDB_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SDB_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SDB_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SDB_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SDB_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SDB_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SIT0_NXT_SUVD_CGC_CTRL
+#define SIT0_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SIT0_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SIT0_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SIT0_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SIT0_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SIT0_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SIT0_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SIT0_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SIT0_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SIT0_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SIT0_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SIT0_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SIT0_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SIT0_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SIT0_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SIT0_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SIT0_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SIT0_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SIT0_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SIT0_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SIT0_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SIT0_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SIT0_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SIT0_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SIT0_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SIT0_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SIT0_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SIT0_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SIT0_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SIT0_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SIT0_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SIT0_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SIT0_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SIT0_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SIT0_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SIT0_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SIT0_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SIT0_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SIT0_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SIT0_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SIT0_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SIT1_NXT_SUVD_CGC_CTRL
+#define SIT1_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SIT1_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SIT1_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SIT1_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SIT1_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SIT1_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SIT1_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SIT1_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SIT1_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SIT1_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SIT1_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SIT1_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SIT1_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SIT1_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SIT1_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SIT1_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SIT1_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SIT1_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SIT1_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SIT1_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SIT1_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SIT1_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SIT1_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SIT1_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SIT1_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SIT1_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SIT1_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SIT1_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SIT1_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SIT1_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SIT1_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SIT1_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SIT1_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SIT1_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SIT1_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SIT1_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SIT1_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SIT1_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SIT1_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SIT1_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SIT1_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SIT2_NXT_SUVD_CGC_CTRL
+#define SIT2_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SIT2_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SIT2_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SIT2_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SIT2_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SIT2_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SIT2_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SIT2_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SIT2_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SIT2_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SIT2_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SIT2_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SIT2_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SIT2_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SIT2_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SIT2_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SIT2_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SIT2_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SIT2_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SIT2_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SIT2_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SIT2_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SIT2_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SIT2_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SIT2_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SIT2_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SIT2_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SIT2_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SIT2_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SIT2_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SIT2_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SIT2_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SIT2_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SIT2_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SIT2_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SIT2_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SIT2_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SIT2_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SIT2_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SIT2_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SIT2_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SIT_SUVD_CGC_CTRL
+#define SIT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SIT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SIT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SIT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SIT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SIT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SIT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SIT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SIT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SIT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SIT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SIT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SIT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SIT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SIT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SIT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SIT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SIT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SIT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SIT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SIT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SIT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SIT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SIT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SIT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SIT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SIT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SIT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SIT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SIT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SIT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SIT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SIT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SIT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SIT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SIT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SIT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SIT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SIT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SIT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SIT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SIT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SIT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SIT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SIT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SIT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SMPA_SUVD_CGC_CTRL
+#define SMPA_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SMPA_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SMPA_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SMPA_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SMPA_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SMPA_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SMPA_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SMPA_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SMPA_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SMPA_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SMPA_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SMPA_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SMPA_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SMPA_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SMPA_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SMPA_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SMPA_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SMPA_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SMPA_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SMPA_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SMPA_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SMPA_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SMPA_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SMPA_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SMPA_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SMPA_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SMPA_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SMPA_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SMPA_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SMPA_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SMPA_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SMPA_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SMPA_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SMPA_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SMPA_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SMPA_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SMPA_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SMPA_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SMPA_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SMPA_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SMPA_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SMPA_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SMPA_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SMPA_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SMPA_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SMPA_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SMP_SUVD_CGC_CTRL
+#define SMP_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SMP_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SMP_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SMP_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SMP_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SMP_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SMP_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SMP_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SMP_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SMP_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SMP_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SMP_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SMP_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SMP_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SMP_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SMP_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SMP_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SMP_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SMP_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SMP_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SMP_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SMP_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SMP_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SMP_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SMP_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SMP_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SMP_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SMP_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SMP_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SMP_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SMP_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SMP_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SMP_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SMP_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SMP_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SMP_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SMP_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SMP_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SMP_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SMP_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SMP_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SMP_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SMP_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SMP_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SMP_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SMP_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SRE_SUVD_CGC_CTRL
+#define SRE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SRE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SRE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SRE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SRE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SRE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SRE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SRE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SRE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SRE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SRE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SRE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SRE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SRE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SRE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SRE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SRE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SRE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SRE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SRE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SRE_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SRE_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SRE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SRE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SRE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SRE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SRE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SRE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SRE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SRE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SRE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SRE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SRE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SRE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SRE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SRE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SRE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SRE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SRE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SRE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SRE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SRE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SRE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SRE_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SRE_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SRE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//UVD_SUVD_CGC_CTRL
+#define UVD_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define UVD_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define UVD_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define UVD_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define UVD_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define UVD_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define UVD_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define UVD_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define UVD_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define UVD_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define UVD_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define UVD_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define UVD_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define UVD_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define UVD_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define UVD_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define UVD_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define UVD_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define UVD_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define UVD_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define UVD_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define UVD_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define UVD_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define UVD_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define UVD_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define UVD_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//UVD_CGC_CTRL3
+#define UVD_CGC_CTRL3__CGC_CLK_OFF_DELAY__SHIFT 0x0
+#define UVD_CGC_CTRL3__LCM0_MODE__SHIFT 0xb
+#define UVD_CGC_CTRL3__LCM1_MODE__SHIFT 0xc
+#define UVD_CGC_CTRL3__MIF_MODE__SHIFT 0xd
+#define UVD_CGC_CTRL3__VREG_MODE__SHIFT 0xe
+#define UVD_CGC_CTRL3__PE_MODE__SHIFT 0xf
+#define UVD_CGC_CTRL3__PPU_MODE__SHIFT 0x10
+#define UVD_CGC_CTRL3__CGC_CLK_OFF_DELAY_MASK 0x000000FFL
+#define UVD_CGC_CTRL3__LCM0_MODE_MASK 0x00000800L
+#define UVD_CGC_CTRL3__LCM1_MODE_MASK 0x00001000L
+#define UVD_CGC_CTRL3__MIF_MODE_MASK 0x00002000L
+#define UVD_CGC_CTRL3__VREG_MODE_MASK 0x00004000L
+#define UVD_CGC_CTRL3__PE_MODE_MASK 0x00008000L
+#define UVD_CGC_CTRL3__PPU_MODE_MASK 0x00010000L
+//UVD_GPCOM_VCPU_DATA0
+#define UVD_GPCOM_VCPU_DATA0__DATA0__SHIFT 0x0
+#define UVD_GPCOM_VCPU_DATA0__DATA0_MASK 0xFFFFFFFFL
+//UVD_GPCOM_VCPU_DATA1
+#define UVD_GPCOM_VCPU_DATA1__DATA1__SHIFT 0x0
+#define UVD_GPCOM_VCPU_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_GPCOM_SYS_CMD
+#define UVD_GPCOM_SYS_CMD__CMD_SEND__SHIFT 0x0
+#define UVD_GPCOM_SYS_CMD__CMD__SHIFT 0x1
+#define UVD_GPCOM_SYS_CMD__CMD_SOURCE__SHIFT 0x1f
+#define UVD_GPCOM_SYS_CMD__CMD_SEND_MASK 0x00000001L
+#define UVD_GPCOM_SYS_CMD__CMD_MASK 0x7FFFFFFEL
+#define UVD_GPCOM_SYS_CMD__CMD_SOURCE_MASK 0x80000000L
+//UVD_GPCOM_SYS_DATA0
+#define UVD_GPCOM_SYS_DATA0__DATA0__SHIFT 0x0
+#define UVD_GPCOM_SYS_DATA0__DATA0_MASK 0xFFFFFFFFL
+//UVD_GPCOM_SYS_DATA1
+#define UVD_GPCOM_SYS_DATA1__DATA1__SHIFT 0x0
+#define UVD_GPCOM_SYS_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_VCPU_INT_EN
+#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1
+#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2
+#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN__SHIFT 0x3
+#define UVD_VCPU_INT_EN__SW_RB1_INT_EN__SHIFT 0x4
+#define UVD_VCPU_INT_EN__SW_RB2_INT_EN__SHIFT 0x5
+#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6
+#define UVD_VCPU_INT_EN__SW_RB3_INT_EN__SHIFT 0x7
+#define UVD_VCPU_INT_EN__SW_RB4_INT_EN__SHIFT 0x9
+#define UVD_VCPU_INT_EN__SW_RB5_INT_EN__SHIFT 0xa
+#define UVD_VCPU_INT_EN__LBSI_EN__SHIFT 0xb
+#define UVD_VCPU_INT_EN__UDEC_EN__SHIFT 0xc
+#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN__SHIFT 0xd
+#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN__SHIFT 0xe
+#define UVD_VCPU_INT_EN__SUVD_EN__SHIFT 0xf
+#define UVD_VCPU_INT_EN__RPTR_WR_EN__SHIFT 0x10
+#define UVD_VCPU_INT_EN__JOB_START_EN__SHIFT 0x11
+#define UVD_VCPU_INT_EN__NJ_PF_EN__SHIFT 0x12
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17
+#define UVD_VCPU_INT_EN__IDCT_EN__SHIFT 0x18
+#define UVD_VCPU_INT_EN__MPRD_EN__SHIFT 0x19
+#define UVD_VCPU_INT_EN__AVM_INT_EN__SHIFT 0x1a
+#define UVD_VCPU_INT_EN__CLK_SWT_EN__SHIFT 0x1b
+#define UVD_VCPU_INT_EN__MIF_HWINT_EN__SHIFT 0x1c
+#define UVD_VCPU_INT_EN__MPRD_ERR_EN__SHIFT 0x1d
+#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN__SHIFT 0x1e
+#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN__SHIFT 0x1f
+#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L
+#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L
+#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN_MASK 0x00000008L
+#define UVD_VCPU_INT_EN__SW_RB1_INT_EN_MASK 0x00000010L
+#define UVD_VCPU_INT_EN__SW_RB2_INT_EN_MASK 0x00000020L
+#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L
+#define UVD_VCPU_INT_EN__SW_RB3_INT_EN_MASK 0x00000080L
+#define UVD_VCPU_INT_EN__SW_RB4_INT_EN_MASK 0x00000200L
+#define UVD_VCPU_INT_EN__SW_RB5_INT_EN_MASK 0x00000400L
+#define UVD_VCPU_INT_EN__LBSI_EN_MASK 0x00000800L
+#define UVD_VCPU_INT_EN__UDEC_EN_MASK 0x00001000L
+#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN_MASK 0x00002000L
+#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN_MASK 0x00004000L
+#define UVD_VCPU_INT_EN__SUVD_EN_MASK 0x00008000L
+#define UVD_VCPU_INT_EN__RPTR_WR_EN_MASK 0x00010000L
+#define UVD_VCPU_INT_EN__JOB_START_EN_MASK 0x00020000L
+#define UVD_VCPU_INT_EN__NJ_PF_EN_MASK 0x00040000L
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L
+#define UVD_VCPU_INT_EN__IDCT_EN_MASK 0x01000000L
+#define UVD_VCPU_INT_EN__MPRD_EN_MASK 0x02000000L
+#define UVD_VCPU_INT_EN__AVM_INT_EN_MASK 0x04000000L
+#define UVD_VCPU_INT_EN__CLK_SWT_EN_MASK 0x08000000L
+#define UVD_VCPU_INT_EN__MIF_HWINT_EN_MASK 0x10000000L
+#define UVD_VCPU_INT_EN__MPRD_ERR_EN_MASK 0x20000000L
+#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN_MASK 0x40000000L
+#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN_MASK 0x80000000L
+//UVD_VCPU_INT_STATUS
+#define UVD_VCPU_INT_STATUS__PIF_ADDR_ERR_INT__SHIFT 0x0
+#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT__SHIFT 0x1
+#define UVD_VCPU_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT__SHIFT 0x2
+#define UVD_VCPU_INT_STATUS__NJ_PF_RPT_INT__SHIFT 0x3
+#define UVD_VCPU_INT_STATUS__SW_RB1_INT__SHIFT 0x4
+#define UVD_VCPU_INT_STATUS__SW_RB2_INT__SHIFT 0x5
+#define UVD_VCPU_INT_STATUS__RBC_REG_PRIV_FAULT_INT__SHIFT 0x6
+#define UVD_VCPU_INT_STATUS__SW_RB3_INT__SHIFT 0x7
+#define UVD_VCPU_INT_STATUS__SW_RB4_INT__SHIFT 0x9
+#define UVD_VCPU_INT_STATUS__SW_RB5_INT__SHIFT 0xa
+#define UVD_VCPU_INT_STATUS__LBSI_INT__SHIFT 0xb
+#define UVD_VCPU_INT_STATUS__UDEC_INT__SHIFT 0xc
+#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT__SHIFT 0xd
+#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0xe
+#define UVD_VCPU_INT_STATUS__SUVD_INT__SHIFT 0xf
+#define UVD_VCPU_INT_STATUS__RPTR_WR_INT__SHIFT 0x10
+#define UVD_VCPU_INT_STATUS__JOB_START_INT__SHIFT 0x11
+#define UVD_VCPU_INT_STATUS__NJ_PF_INT__SHIFT 0x12
+#define UVD_VCPU_INT_STATUS__GPCOM_INT__SHIFT 0x14
+#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT__SHIFT 0x17
+#define UVD_VCPU_INT_STATUS__IDCT_INT__SHIFT 0x18
+#define UVD_VCPU_INT_STATUS__MPRD_INT__SHIFT 0x19
+#define UVD_VCPU_INT_STATUS__AVM_INT__SHIFT 0x1a
+#define UVD_VCPU_INT_STATUS__CLK_SWT_INT__SHIFT 0x1b
+#define UVD_VCPU_INT_STATUS__MIF_HWINT__SHIFT 0x1c
+#define UVD_VCPU_INT_STATUS__MPRD_ERR_INT__SHIFT 0x1d
+#define UVD_VCPU_INT_STATUS__DRV_FW_REQ_INT__SHIFT 0x1e
+#define UVD_VCPU_INT_STATUS__DRV_FW_ACK_INT__SHIFT 0x1f
+#define UVD_VCPU_INT_STATUS__PIF_ADDR_ERR_INT_MASK 0x00000001L
+#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT_MASK 0x00000002L
+#define UVD_VCPU_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT_MASK 0x00000004L
+#define UVD_VCPU_INT_STATUS__NJ_PF_RPT_INT_MASK 0x00000008L
+#define UVD_VCPU_INT_STATUS__SW_RB1_INT_MASK 0x00000010L
+#define UVD_VCPU_INT_STATUS__SW_RB2_INT_MASK 0x00000020L
+#define UVD_VCPU_INT_STATUS__RBC_REG_PRIV_FAULT_INT_MASK 0x00000040L
+#define UVD_VCPU_INT_STATUS__SW_RB3_INT_MASK 0x00000080L
+#define UVD_VCPU_INT_STATUS__SW_RB4_INT_MASK 0x00000200L
+#define UVD_VCPU_INT_STATUS__SW_RB5_INT_MASK 0x00000400L
+#define UVD_VCPU_INT_STATUS__LBSI_INT_MASK 0x00000800L
+#define UVD_VCPU_INT_STATUS__UDEC_INT_MASK 0x00001000L
+#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT_MASK 0x00002000L
+#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00004000L
+#define UVD_VCPU_INT_STATUS__SUVD_INT_MASK 0x00008000L
+#define UVD_VCPU_INT_STATUS__RPTR_WR_INT_MASK 0x00010000L
+#define UVD_VCPU_INT_STATUS__JOB_START_INT_MASK 0x00020000L
+#define UVD_VCPU_INT_STATUS__NJ_PF_INT_MASK 0x00040000L
+#define UVD_VCPU_INT_STATUS__GPCOM_INT_MASK 0x00100000L
+#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT_MASK 0x00800000L
+#define UVD_VCPU_INT_STATUS__IDCT_INT_MASK 0x01000000L
+#define UVD_VCPU_INT_STATUS__MPRD_INT_MASK 0x02000000L
+#define UVD_VCPU_INT_STATUS__AVM_INT_MASK 0x04000000L
+#define UVD_VCPU_INT_STATUS__CLK_SWT_INT_MASK 0x08000000L
+#define UVD_VCPU_INT_STATUS__MIF_HWINT_MASK 0x10000000L
+#define UVD_VCPU_INT_STATUS__MPRD_ERR_INT_MASK 0x20000000L
+#define UVD_VCPU_INT_STATUS__DRV_FW_REQ_INT_MASK 0x40000000L
+#define UVD_VCPU_INT_STATUS__DRV_FW_ACK_INT_MASK 0x80000000L
+//UVD_VCPU_INT_ACK
+#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1
+#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2
+#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK__SHIFT 0x3
+#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK__SHIFT 0x4
+#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK__SHIFT 0x5
+#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6
+#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK__SHIFT 0x7
+#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK__SHIFT 0x9
+#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK__SHIFT 0xa
+#define UVD_VCPU_INT_ACK__LBSI_ACK__SHIFT 0xb
+#define UVD_VCPU_INT_ACK__UDEC_ACK__SHIFT 0xc
+#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK__SHIFT 0xd
+#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK__SHIFT 0xe
+#define UVD_VCPU_INT_ACK__SUVD_ACK__SHIFT 0xf
+#define UVD_VCPU_INT_ACK__RPTR_WR_ACK__SHIFT 0x10
+#define UVD_VCPU_INT_ACK__JOB_START_ACK__SHIFT 0x11
+#define UVD_VCPU_INT_ACK__NJ_PF_ACK__SHIFT 0x12
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17
+#define UVD_VCPU_INT_ACK__IDCT_ACK__SHIFT 0x18
+#define UVD_VCPU_INT_ACK__MPRD_ACK__SHIFT 0x19
+#define UVD_VCPU_INT_ACK__AVM_INT_ACK__SHIFT 0x1a
+#define UVD_VCPU_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b
+#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c
+#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d
+#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK__SHIFT 0x1e
+#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK__SHIFT 0x1f
+#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L
+#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L
+#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK_MASK 0x00000008L
+#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK_MASK 0x00000010L
+#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK_MASK 0x00000020L
+#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L
+#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK_MASK 0x00000080L
+#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK_MASK 0x00000200L
+#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK_MASK 0x00000400L
+#define UVD_VCPU_INT_ACK__LBSI_ACK_MASK 0x00000800L
+#define UVD_VCPU_INT_ACK__UDEC_ACK_MASK 0x00001000L
+#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK_MASK 0x00002000L
+#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK_MASK 0x00004000L
+#define UVD_VCPU_INT_ACK__SUVD_ACK_MASK 0x00008000L
+#define UVD_VCPU_INT_ACK__RPTR_WR_ACK_MASK 0x00010000L
+#define UVD_VCPU_INT_ACK__JOB_START_ACK_MASK 0x00020000L
+#define UVD_VCPU_INT_ACK__NJ_PF_ACK_MASK 0x00040000L
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L
+#define UVD_VCPU_INT_ACK__IDCT_ACK_MASK 0x01000000L
+#define UVD_VCPU_INT_ACK__MPRD_ACK_MASK 0x02000000L
+#define UVD_VCPU_INT_ACK__AVM_INT_ACK_MASK 0x04000000L
+#define UVD_VCPU_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L
+#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L
+#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L
+#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK_MASK 0x40000000L
+#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK_MASK 0x80000000L
+//UVD_VCPU_INT_ROUTE
+#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG__SHIFT 0x0
+#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK__SHIFT 0x1
+#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM__SHIFT 0x2
+#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG_MASK 0x00000001L
+#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK_MASK 0x00000002L
+#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM_MASK 0x00000004L
+//UVD_DRV_FW_MSG
+#define UVD_DRV_FW_MSG__MSG__SHIFT 0x0
+#define UVD_DRV_FW_MSG__MSG_MASK 0xFFFFFFFFL
+//UVD_FW_DRV_MSG_ACK
+#define UVD_FW_DRV_MSG_ACK__ACK__SHIFT 0x0
+#define UVD_FW_DRV_MSG_ACK__ACK_MASK 0x00000001L
+//UVD_SUVD_INT_EN
+#define UVD_SUVD_INT_EN__SRE_FUNC_INT_EN__SHIFT 0x0
+#define UVD_SUVD_INT_EN__SRE_ERR_INT_EN__SHIFT 0x5
+#define UVD_SUVD_INT_EN__SIT_FUNC_INT_EN__SHIFT 0x6
+#define UVD_SUVD_INT_EN__SIT_ERR_INT_EN__SHIFT 0xb
+#define UVD_SUVD_INT_EN__SMP_FUNC_INT_EN__SHIFT 0xc
+#define UVD_SUVD_INT_EN__SMP_ERR_INT_EN__SHIFT 0x11
+#define UVD_SUVD_INT_EN__SCM_FUNC_INT_EN__SHIFT 0x12
+#define UVD_SUVD_INT_EN__SCM_ERR_INT_EN__SHIFT 0x17
+#define UVD_SUVD_INT_EN__SDB_FUNC_INT_EN__SHIFT 0x18
+#define UVD_SUVD_INT_EN__SDB_ERR_INT_EN__SHIFT 0x1d
+#define UVD_SUVD_INT_EN__FBC_ERR_INT_EN__SHIFT 0x1e
+#define UVD_SUVD_INT_EN__SRE_FUNC_INT_EN_MASK 0x0000001FL
+#define UVD_SUVD_INT_EN__SRE_ERR_INT_EN_MASK 0x00000020L
+#define UVD_SUVD_INT_EN__SIT_FUNC_INT_EN_MASK 0x000007C0L
+#define UVD_SUVD_INT_EN__SIT_ERR_INT_EN_MASK 0x00000800L
+#define UVD_SUVD_INT_EN__SMP_FUNC_INT_EN_MASK 0x0001F000L
+#define UVD_SUVD_INT_EN__SMP_ERR_INT_EN_MASK 0x00020000L
+#define UVD_SUVD_INT_EN__SCM_FUNC_INT_EN_MASK 0x007C0000L
+#define UVD_SUVD_INT_EN__SCM_ERR_INT_EN_MASK 0x00800000L
+#define UVD_SUVD_INT_EN__SDB_FUNC_INT_EN_MASK 0x1F000000L
+#define UVD_SUVD_INT_EN__SDB_ERR_INT_EN_MASK 0x20000000L
+#define UVD_SUVD_INT_EN__FBC_ERR_INT_EN_MASK 0x40000000L
+//UVD_SUVD_INT_STATUS
+#define UVD_SUVD_INT_STATUS__SRE_FUNC_INT__SHIFT 0x0
+#define UVD_SUVD_INT_STATUS__SRE_ERR_INT__SHIFT 0x5
+#define UVD_SUVD_INT_STATUS__SIT_FUNC_INT__SHIFT 0x6
+#define UVD_SUVD_INT_STATUS__SIT_ERR_INT__SHIFT 0xb
+#define UVD_SUVD_INT_STATUS__SMP_FUNC_INT__SHIFT 0xc
+#define UVD_SUVD_INT_STATUS__SMP_ERR_INT__SHIFT 0x11
+#define UVD_SUVD_INT_STATUS__SCM_FUNC_INT__SHIFT 0x12
+#define UVD_SUVD_INT_STATUS__SCM_ERR_INT__SHIFT 0x17
+#define UVD_SUVD_INT_STATUS__SDB_FUNC_INT__SHIFT 0x18
+#define UVD_SUVD_INT_STATUS__SDB_ERR_INT__SHIFT 0x1d
+#define UVD_SUVD_INT_STATUS__FBC_ERR_INT__SHIFT 0x1e
+#define UVD_SUVD_INT_STATUS__SRE_FUNC_INT_MASK 0x0000001FL
+#define UVD_SUVD_INT_STATUS__SRE_ERR_INT_MASK 0x00000020L
+#define UVD_SUVD_INT_STATUS__SIT_FUNC_INT_MASK 0x000007C0L
+#define UVD_SUVD_INT_STATUS__SIT_ERR_INT_MASK 0x00000800L
+#define UVD_SUVD_INT_STATUS__SMP_FUNC_INT_MASK 0x0001F000L
+#define UVD_SUVD_INT_STATUS__SMP_ERR_INT_MASK 0x00020000L
+#define UVD_SUVD_INT_STATUS__SCM_FUNC_INT_MASK 0x007C0000L
+#define UVD_SUVD_INT_STATUS__SCM_ERR_INT_MASK 0x00800000L
+#define UVD_SUVD_INT_STATUS__SDB_FUNC_INT_MASK 0x1F000000L
+#define UVD_SUVD_INT_STATUS__SDB_ERR_INT_MASK 0x20000000L
+#define UVD_SUVD_INT_STATUS__FBC_ERR_INT_MASK 0x40000000L
+//UVD_SUVD_INT_ACK
+#define UVD_SUVD_INT_ACK__SRE_FUNC_INT_ACK__SHIFT 0x0
+#define UVD_SUVD_INT_ACK__SRE_ERR_INT_ACK__SHIFT 0x5
+#define UVD_SUVD_INT_ACK__SIT_FUNC_INT_ACK__SHIFT 0x6
+#define UVD_SUVD_INT_ACK__SIT_ERR_INT_ACK__SHIFT 0xb
+#define UVD_SUVD_INT_ACK__SMP_FUNC_INT_ACK__SHIFT 0xc
+#define UVD_SUVD_INT_ACK__SMP_ERR_INT_ACK__SHIFT 0x11
+#define UVD_SUVD_INT_ACK__SCM_FUNC_INT_ACK__SHIFT 0x12
+#define UVD_SUVD_INT_ACK__SCM_ERR_INT_ACK__SHIFT 0x17
+#define UVD_SUVD_INT_ACK__SDB_FUNC_INT_ACK__SHIFT 0x18
+#define UVD_SUVD_INT_ACK__SDB_ERR_INT_ACK__SHIFT 0x1d
+#define UVD_SUVD_INT_ACK__FBC_ERR_INT_ACK__SHIFT 0x1e
+#define UVD_SUVD_INT_ACK__SRE_FUNC_INT_ACK_MASK 0x0000001FL
+#define UVD_SUVD_INT_ACK__SRE_ERR_INT_ACK_MASK 0x00000020L
+#define UVD_SUVD_INT_ACK__SIT_FUNC_INT_ACK_MASK 0x000007C0L
+#define UVD_SUVD_INT_ACK__SIT_ERR_INT_ACK_MASK 0x00000800L
+#define UVD_SUVD_INT_ACK__SMP_FUNC_INT_ACK_MASK 0x0001F000L
+#define UVD_SUVD_INT_ACK__SMP_ERR_INT_ACK_MASK 0x00020000L
+#define UVD_SUVD_INT_ACK__SCM_FUNC_INT_ACK_MASK 0x007C0000L
+#define UVD_SUVD_INT_ACK__SCM_ERR_INT_ACK_MASK 0x00800000L
+#define UVD_SUVD_INT_ACK__SDB_FUNC_INT_ACK_MASK 0x1F000000L
+#define UVD_SUVD_INT_ACK__SDB_ERR_INT_ACK_MASK 0x20000000L
+#define UVD_SUVD_INT_ACK__FBC_ERR_INT_ACK_MASK 0x40000000L
+//UVD_ENC_VCPU_INT_EN
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN__SHIFT 0x0
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN__SHIFT 0x1
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN__SHIFT 0x2
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN_MASK 0x00000001L
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN_MASK 0x00000002L
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN_MASK 0x00000004L
+//UVD_ENC_VCPU_INT_STATUS
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR_INT__SHIFT 0x0
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR2_INT__SHIFT 0x1
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR3_INT__SHIFT 0x2
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR_INT_MASK 0x00000001L
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR2_INT_MASK 0x00000002L
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR3_INT_MASK 0x00000004L
+//UVD_ENC_VCPU_INT_ACK
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK__SHIFT 0x0
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK__SHIFT 0x1
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK__SHIFT 0x2
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK_MASK 0x00000001L
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK_MASK 0x00000002L
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK_MASK 0x00000004L
+//UVD_MASTINT_EN
+#define UVD_MASTINT_EN__OVERRUN_RST__SHIFT 0x0
+#define UVD_MASTINT_EN__VCPU_EN__SHIFT 0x1
+#define UVD_MASTINT_EN__SYS_EN__SHIFT 0x2
+#define UVD_MASTINT_EN__INT_OVERRUN__SHIFT 0x4
+#define UVD_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L
+#define UVD_MASTINT_EN__VCPU_EN_MASK 0x00000002L
+#define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L
+#define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L
+//UVD_SYS_INT_EN
+#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1
+#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2
+#define UVD_SYS_INT_EN__CXW_WR_EN__SHIFT 0x3
+#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6
+#define UVD_SYS_INT_EN__LBSI_EN__SHIFT 0xb
+#define UVD_SYS_INT_EN__UDEC_EN__SHIFT 0xc
+#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN__SHIFT 0xd
+#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN__SHIFT 0xe
+#define UVD_SYS_INT_EN__SUVD_EN__SHIFT 0xf
+#define UVD_SYS_INT_EN__JOB_DONE_EN__SHIFT 0x10
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17
+#define UVD_SYS_INT_EN__IDCT_EN__SHIFT 0x18
+#define UVD_SYS_INT_EN__MPRD_EN__SHIFT 0x19
+#define UVD_SYS_INT_EN__CLK_SWT_EN__SHIFT 0x1b
+#define UVD_SYS_INT_EN__MIF_HWINT_EN__SHIFT 0x1c
+#define UVD_SYS_INT_EN__MPRD_ERR_EN__SHIFT 0x1d
+#define UVD_SYS_INT_EN__AVM_INT_EN__SHIFT 0x1f
+#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L
+#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L
+#define UVD_SYS_INT_EN__CXW_WR_EN_MASK 0x00000008L
+#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L
+#define UVD_SYS_INT_EN__LBSI_EN_MASK 0x00000800L
+#define UVD_SYS_INT_EN__UDEC_EN_MASK 0x00001000L
+#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN_MASK 0x00002000L
+#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN_MASK 0x00004000L
+#define UVD_SYS_INT_EN__SUVD_EN_MASK 0x00008000L
+#define UVD_SYS_INT_EN__JOB_DONE_EN_MASK 0x00010000L
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L
+#define UVD_SYS_INT_EN__IDCT_EN_MASK 0x01000000L
+#define UVD_SYS_INT_EN__MPRD_EN_MASK 0x02000000L
+#define UVD_SYS_INT_EN__CLK_SWT_EN_MASK 0x08000000L
+#define UVD_SYS_INT_EN__MIF_HWINT_EN_MASK 0x10000000L
+#define UVD_SYS_INT_EN__MPRD_ERR_EN_MASK 0x20000000L
+#define UVD_SYS_INT_EN__AVM_INT_EN_MASK 0x80000000L
+//UVD_SYS_INT_STATUS
+#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT__SHIFT 0x0
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT__SHIFT 0x1
+#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT__SHIFT 0x2
+#define UVD_SYS_INT_STATUS__CXW_WR_INT__SHIFT 0x3
+#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT__SHIFT 0x6
+#define UVD_SYS_INT_STATUS__LBSI_INT__SHIFT 0xb
+#define UVD_SYS_INT_STATUS__UDEC_INT__SHIFT 0xc
+#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT__SHIFT 0xd
+#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0xe
+#define UVD_SYS_INT_STATUS__SUVD_INT__SHIFT 0xf
+#define UVD_SYS_INT_STATUS__JOB_DONE_INT__SHIFT 0x10
+#define UVD_SYS_INT_STATUS__GPCOM_INT__SHIFT 0x12
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT__SHIFT 0x17
+#define UVD_SYS_INT_STATUS__IDCT_INT__SHIFT 0x18
+#define UVD_SYS_INT_STATUS__MPRD_INT__SHIFT 0x19
+#define UVD_SYS_INT_STATUS__CLK_SWT_INT__SHIFT 0x1b
+#define UVD_SYS_INT_STATUS__MIF_HWINT__SHIFT 0x1c
+#define UVD_SYS_INT_STATUS__MPRD_ERR_INT__SHIFT 0x1d
+#define UVD_SYS_INT_STATUS__AVM_INT__SHIFT 0x1f
+#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT_MASK 0x00000001L
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT_MASK 0x00000002L
+#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT_MASK 0x00000004L
+#define UVD_SYS_INT_STATUS__CXW_WR_INT_MASK 0x00000008L
+#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT_MASK 0x00000040L
+#define UVD_SYS_INT_STATUS__LBSI_INT_MASK 0x00000800L
+#define UVD_SYS_INT_STATUS__UDEC_INT_MASK 0x00001000L
+#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT_MASK 0x00002000L
+#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00004000L
+#define UVD_SYS_INT_STATUS__SUVD_INT_MASK 0x00008000L
+#define UVD_SYS_INT_STATUS__JOB_DONE_INT_MASK 0x00010000L
+#define UVD_SYS_INT_STATUS__GPCOM_INT_MASK 0x00040000L
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT_MASK 0x00800000L
+#define UVD_SYS_INT_STATUS__IDCT_INT_MASK 0x01000000L
+#define UVD_SYS_INT_STATUS__MPRD_INT_MASK 0x02000000L
+#define UVD_SYS_INT_STATUS__CLK_SWT_INT_MASK 0x08000000L
+#define UVD_SYS_INT_STATUS__MIF_HWINT_MASK 0x10000000L
+#define UVD_SYS_INT_STATUS__MPRD_ERR_INT_MASK 0x20000000L
+#define UVD_SYS_INT_STATUS__AVM_INT_MASK 0x80000000L
+//UVD_SYS_INT_ACK
+#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1
+#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2
+#define UVD_SYS_INT_ACK__CXW_WR_ACK__SHIFT 0x3
+#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6
+#define UVD_SYS_INT_ACK__LBSI_ACK__SHIFT 0xb
+#define UVD_SYS_INT_ACK__UDEC_ACK__SHIFT 0xc
+#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK__SHIFT 0xd
+#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK__SHIFT 0xe
+#define UVD_SYS_INT_ACK__SUVD_ACK__SHIFT 0xf
+#define UVD_SYS_INT_ACK__JOB_DONE_ACK__SHIFT 0x10
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17
+#define UVD_SYS_INT_ACK__IDCT_ACK__SHIFT 0x18
+#define UVD_SYS_INT_ACK__MPRD_ACK__SHIFT 0x19
+#define UVD_SYS_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b
+#define UVD_SYS_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c
+#define UVD_SYS_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d
+#define UVD_SYS_INT_ACK__AVM_INT_ACK__SHIFT 0x1f
+#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L
+#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L
+#define UVD_SYS_INT_ACK__CXW_WR_ACK_MASK 0x00000008L
+#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L
+#define UVD_SYS_INT_ACK__LBSI_ACK_MASK 0x00000800L
+#define UVD_SYS_INT_ACK__UDEC_ACK_MASK 0x00001000L
+#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK_MASK 0x00002000L
+#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK_MASK 0x00004000L
+#define UVD_SYS_INT_ACK__SUVD_ACK_MASK 0x00008000L
+#define UVD_SYS_INT_ACK__JOB_DONE_ACK_MASK 0x00010000L
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L
+#define UVD_SYS_INT_ACK__IDCT_ACK_MASK 0x01000000L
+#define UVD_SYS_INT_ACK__MPRD_ACK_MASK 0x02000000L
+#define UVD_SYS_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L
+#define UVD_SYS_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L
+#define UVD_SYS_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L
+#define UVD_SYS_INT_ACK__AVM_INT_ACK_MASK 0x80000000L
+//UVD_JOB_DONE
+#define UVD_JOB_DONE__JOB_DONE__SHIFT 0x0
+#define UVD_JOB_DONE__JOB_DONE_MASK 0x00000003L
+//UVD_CBUF_ID
+#define UVD_CBUF_ID__CBUF_ID__SHIFT 0x0
+#define UVD_CBUF_ID__CBUF_ID_MASK 0xFFFFFFFFL
+//UVD_CONTEXT_ID
+#define UVD_CONTEXT_ID__CONTEXT_ID__SHIFT 0x0
+#define UVD_CONTEXT_ID__CONTEXT_ID_MASK 0xFFFFFFFFL
+//UVD_CONTEXT_ID2
+#define UVD_CONTEXT_ID2__CONTEXT_ID2__SHIFT 0x0
+#define UVD_CONTEXT_ID2__CONTEXT_ID2_MASK 0xFFFFFFFFL
+//UVD_NO_OP
+#define UVD_NO_OP__NO_OP__SHIFT 0x0
+#define UVD_NO_OP__NO_OP_MASK 0xFFFFFFFFL
+//UVD_RB_BASE_LO
+#define UVD_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI
+#define UVD_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE
+#define UVD_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L
+//UVD_RB_BASE_LO2
+#define UVD_RB_BASE_LO2__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO2__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI2
+#define UVD_RB_BASE_HI2__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI2__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE2
+#define UVD_RB_SIZE2__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE2__RB_SIZE_MASK 0x007FFFF0L
+//UVD_RB_BASE_LO3
+#define UVD_RB_BASE_LO3__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO3__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI3
+#define UVD_RB_BASE_HI3__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI3__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE3
+#define UVD_RB_SIZE3__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE3__RB_SIZE_MASK 0x007FFFF0L
+//UVD_RB_BASE_LO4
+#define UVD_RB_BASE_LO4__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO4__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI4
+#define UVD_RB_BASE_HI4__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI4__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE4
+#define UVD_RB_SIZE4__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE4__RB_SIZE_MASK 0x007FFFF0L
+//UVD_OUT_RB_BASE_LO
+#define UVD_OUT_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6
+#define UVD_OUT_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_OUT_RB_BASE_HI
+#define UVD_OUT_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define UVD_OUT_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_OUT_RB_SIZE
+#define UVD_OUT_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_OUT_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L
+//UVD_IOV_ACTIVE_FCN_ID
+#define UVD_IOV_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define UVD_IOV_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define UVD_IOV_ACTIVE_FCN_ID__VF_ID_MASK 0x0000003FL
+#define UVD_IOV_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//UVD_IOV_MAILBOX
+#define UVD_IOV_MAILBOX__MAILBOX__SHIFT 0x0
+#define UVD_IOV_MAILBOX__MAILBOX_MASK 0xFFFFFFFFL
+//UVD_IOV_MAILBOX_RESP
+#define UVD_IOV_MAILBOX_RESP__RESP__SHIFT 0x0
+#define UVD_IOV_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL
+//UVD_RB_ARB_CTRL
+#define UVD_RB_ARB_CTRL__SRBM_DROP__SHIFT 0x0
+#define UVD_RB_ARB_CTRL__SRBM_DIS__SHIFT 0x1
+#define UVD_RB_ARB_CTRL__VCPU_DROP__SHIFT 0x2
+#define UVD_RB_ARB_CTRL__VCPU_DIS__SHIFT 0x3
+#define UVD_RB_ARB_CTRL__RBC_DROP__SHIFT 0x4
+#define UVD_RB_ARB_CTRL__RBC_DIS__SHIFT 0x5
+#define UVD_RB_ARB_CTRL__FWOFLD_DROP__SHIFT 0x6
+#define UVD_RB_ARB_CTRL__FWOFLD_DIS__SHIFT 0x7
+#define UVD_RB_ARB_CTRL__FAST_PATH_EN__SHIFT 0x8
+#define UVD_RB_ARB_CTRL__UVD_RB_DBG_EN__SHIFT 0x9
+#define UVD_RB_ARB_CTRL__SRBM_DROP_MASK 0x00000001L
+#define UVD_RB_ARB_CTRL__SRBM_DIS_MASK 0x00000002L
+#define UVD_RB_ARB_CTRL__VCPU_DROP_MASK 0x00000004L
+#define UVD_RB_ARB_CTRL__VCPU_DIS_MASK 0x00000008L
+#define UVD_RB_ARB_CTRL__RBC_DROP_MASK 0x00000010L
+#define UVD_RB_ARB_CTRL__RBC_DIS_MASK 0x00000020L
+#define UVD_RB_ARB_CTRL__FWOFLD_DROP_MASK 0x00000040L
+#define UVD_RB_ARB_CTRL__FWOFLD_DIS_MASK 0x00000080L
+#define UVD_RB_ARB_CTRL__FAST_PATH_EN_MASK 0x00000100L
+#define UVD_RB_ARB_CTRL__UVD_RB_DBG_EN_MASK 0x00000200L
+//UVD_CTX_INDEX
+#define UVD_CTX_INDEX__INDEX__SHIFT 0x0
+#define UVD_CTX_INDEX__INDEX_MASK 0x000001FFL
+//UVD_CTX_DATA
+#define UVD_CTX_DATA__DATA__SHIFT 0x0
+#define UVD_CTX_DATA__DATA_MASK 0xFFFFFFFFL
+//UVD_CXW_WR
+#define UVD_CXW_WR__DAT__SHIFT 0x0
+#define UVD_CXW_WR__STAT__SHIFT 0x1f
+#define UVD_CXW_WR__DAT_MASK 0x0FFFFFFFL
+#define UVD_CXW_WR__STAT_MASK 0x80000000L
+//UVD_CXW_WR_INT_ID
+#define UVD_CXW_WR_INT_ID__ID__SHIFT 0x0
+#define UVD_CXW_WR_INT_ID__ID_MASK 0x000000FFL
+//UVD_CXW_WR_INT_CTX_ID
+#define UVD_CXW_WR_INT_CTX_ID__ID__SHIFT 0x0
+#define UVD_CXW_WR_INT_CTX_ID__ID_MASK 0x0FFFFFFFL
+//UVD_CXW_INT_ID
+#define UVD_CXW_INT_ID__ID__SHIFT 0x0
+#define UVD_CXW_INT_ID__ID_MASK 0x000000FFL
+//UVD_MPEG2_ERROR
+#define UVD_MPEG2_ERROR__STATUS__SHIFT 0x0
+#define UVD_MPEG2_ERROR__STATUS_MASK 0xFFFFFFFFL
+//UVD_YBASE
+#define UVD_YBASE__DUM__SHIFT 0x0
+#define UVD_YBASE__DUM_MASK 0xFFFFFFFFL
+//UVD_UVBASE
+#define UVD_UVBASE__DUM__SHIFT 0x0
+#define UVD_UVBASE__DUM_MASK 0xFFFFFFFFL
+//UVD_PITCH
+#define UVD_PITCH__DUM__SHIFT 0x0
+#define UVD_PITCH__DUM_MASK 0xFFFFFFFFL
+//UVD_WIDTH
+#define UVD_WIDTH__DUM__SHIFT 0x0
+#define UVD_WIDTH__DUM_MASK 0xFFFFFFFFL
+//UVD_HEIGHT
+#define UVD_HEIGHT__DUM__SHIFT 0x0
+#define UVD_HEIGHT__DUM_MASK 0xFFFFFFFFL
+//UVD_PICCOUNT
+#define UVD_PICCOUNT__DUM__SHIFT 0x0
+#define UVD_PICCOUNT__DUM_MASK 0xFFFFFFFFL
+//UVD_MPRD_INITIAL_XY
+#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_X__SHIFT 0x0
+#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_Y__SHIFT 0x10
+#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_X_MASK 0x00000FFFL
+#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_Y_MASK 0x0FFF0000L
+//UVD_MPEG2_CTRL
+#define UVD_MPEG2_CTRL__EN__SHIFT 0x0
+#define UVD_MPEG2_CTRL__TRICK_MODE__SHIFT 0x1
+#define UVD_MPEG2_CTRL__NUM_MB_PER_JOB__SHIFT 0x10
+#define UVD_MPEG2_CTRL__EN_MASK 0x00000001L
+#define UVD_MPEG2_CTRL__TRICK_MODE_MASK 0x00000002L
+#define UVD_MPEG2_CTRL__NUM_MB_PER_JOB_MASK 0xFFFF0000L
+//UVD_MB_CTL_BUF_BASE
+#define UVD_MB_CTL_BUF_BASE__BASE__SHIFT 0x0
+#define UVD_MB_CTL_BUF_BASE__BASE_MASK 0xFFFFFFFFL
+//UVD_PIC_CTL_BUF_BASE
+#define UVD_PIC_CTL_BUF_BASE__BASE__SHIFT 0x0
+#define UVD_PIC_CTL_BUF_BASE__BASE_MASK 0xFFFFFFFFL
+//UVD_DXVA_BUF_SIZE
+#define UVD_DXVA_BUF_SIZE__PIC_SIZE__SHIFT 0x0
+#define UVD_DXVA_BUF_SIZE__MB_SIZE__SHIFT 0x10
+#define UVD_DXVA_BUF_SIZE__PIC_SIZE_MASK 0x0000FFFFL
+#define UVD_DXVA_BUF_SIZE__MB_SIZE_MASK 0xFFFF0000L
+//UVD_SCRATCH_NP
+#define UVD_SCRATCH_NP__DATA__SHIFT 0x0
+#define UVD_SCRATCH_NP__DATA_MASK 0xFFFFFFFFL
+//UVD_CLK_SWT_HANDSHAKE
+#define UVD_CLK_SWT_HANDSHAKE__CLK_SWT_TYPE__SHIFT 0x0
+#define UVD_CLK_SWT_HANDSHAKE__CLK_DOMAIN_SWT__SHIFT 0x8
+#define UVD_CLK_SWT_HANDSHAKE__CLK_SWT_TYPE_MASK 0x00000003L
+#define UVD_CLK_SWT_HANDSHAKE__CLK_DOMAIN_SWT_MASK 0x00000300L
+//UVD_GP_SCRATCH0
+#define UVD_GP_SCRATCH0__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH1
+#define UVD_GP_SCRATCH1__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH2
+#define UVD_GP_SCRATCH2__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH3
+#define UVD_GP_SCRATCH3__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH4
+#define UVD_GP_SCRATCH4__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH5
+#define UVD_GP_SCRATCH5__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH6
+#define UVD_GP_SCRATCH6__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH7
+#define UVD_GP_SCRATCH7__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH8
+#define UVD_GP_SCRATCH8__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH8__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH9
+#define UVD_GP_SCRATCH9__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH9__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH10
+#define UVD_GP_SCRATCH10__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH10__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH11
+#define UVD_GP_SCRATCH11__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH11__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH12
+#define UVD_GP_SCRATCH12__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH12__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH13
+#define UVD_GP_SCRATCH13__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH13__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH14
+#define UVD_GP_SCRATCH14__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH14__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH15
+#define UVD_GP_SCRATCH15__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH15__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH16
+#define UVD_GP_SCRATCH16__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH16__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH17
+#define UVD_GP_SCRATCH17__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH17__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH18
+#define UVD_GP_SCRATCH18__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH18__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH19
+#define UVD_GP_SCRATCH19__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH19__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH20
+#define UVD_GP_SCRATCH20__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH20__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH21
+#define UVD_GP_SCRATCH21__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH21__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH22
+#define UVD_GP_SCRATCH22__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH22__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH23
+#define UVD_GP_SCRATCH23__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH23__DATA_MASK 0xFFFFFFFFL
+//UVD_AUDIO_RB_BASE_LO
+#define UVD_AUDIO_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6
+#define UVD_AUDIO_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_AUDIO_RB_BASE_HI
+#define UVD_AUDIO_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define UVD_AUDIO_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_AUDIO_RB_SIZE
+#define UVD_AUDIO_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_AUDIO_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L
+//UVD_VCPU_INT_STATUS2
+#define UVD_VCPU_INT_STATUS2__SW_RB6_INT__SHIFT 0x0
+#define UVD_VCPU_INT_STATUS2__SW_RB6_INT_MASK 0x00000001L
+//UVD_VCPU_INT_ACK2
+#define UVD_VCPU_INT_ACK2__SW_RB6_INT_ACK__SHIFT 0x0
+#define UVD_VCPU_INT_ACK2__SW_RB6_INT_ACK_MASK 0x00000001L
+//UVD_VCPU_INT_EN2
+#define UVD_VCPU_INT_EN2__SW_RB6_INT_EN__SHIFT 0x0
+#define UVD_VCPU_INT_EN2__SW_RB6_INT_EN_MASK 0x00000001L
+//UVD_SUVD_CGC_STATUS2
+#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK__SHIFT 0x0
+#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK__SHIFT 0x1
+#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK__SHIFT 0x3
+#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK__SHIFT 0x4
+#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK__SHIFT 0x5
+#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK__SHIFT 0x6
+#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK__SHIFT 0x7
+#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK__SHIFT 0x8
+#define UVD_SUVD_CGC_STATUS2__SRE_AV1_ENC_DCLK__SHIFT 0x9
+#define UVD_SUVD_CGC_STATUS2__CDEFE_DCLK__SHIFT 0xa
+#define UVD_SUVD_CGC_STATUS2__SIT0_DCLK__SHIFT 0xb
+#define UVD_SUVD_CGC_STATUS2__SIT1_DCLK__SHIFT 0xc
+#define UVD_SUVD_CGC_STATUS2__SIT2_DCLK__SHIFT 0xd
+#define UVD_SUVD_CGC_STATUS2__FBC_PCLK__SHIFT 0x1c
+#define UVD_SUVD_CGC_STATUS2__FBC_CCLK__SHIFT 0x1d
+#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK_MASK 0x00000001L
+#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK_MASK 0x00000002L
+#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK_MASK 0x00000008L
+#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK_MASK 0x00000010L
+#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK_MASK 0x00000020L
+#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK_MASK 0x00000040L
+#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK_MASK 0x00000080L
+#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK_MASK 0x00000100L
+#define UVD_SUVD_CGC_STATUS2__SRE_AV1_ENC_DCLK_MASK 0x00000200L
+#define UVD_SUVD_CGC_STATUS2__CDEFE_DCLK_MASK 0x00000400L
+#define UVD_SUVD_CGC_STATUS2__SIT0_DCLK_MASK 0x00000800L
+#define UVD_SUVD_CGC_STATUS2__SIT1_DCLK_MASK 0x00001000L
+#define UVD_SUVD_CGC_STATUS2__SIT2_DCLK_MASK 0x00002000L
+#define UVD_SUVD_CGC_STATUS2__FBC_PCLK_MASK 0x10000000L
+#define UVD_SUVD_CGC_STATUS2__FBC_CCLK_MASK 0x20000000L
+//UVD_SUVD_INT_STATUS2
+#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT__SHIFT 0x0
+#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT__SHIFT 0x5
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT__SHIFT 0x6
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT__SHIFT 0xb
+#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT_MASK 0x0000001FL
+#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT_MASK 0x00000020L
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT_MASK 0x000007C0L
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT_MASK 0x00000800L
+//UVD_SUVD_INT_EN2
+#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN__SHIFT 0x0
+#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN__SHIFT 0x5
+#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN__SHIFT 0x6
+#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN__SHIFT 0xb
+#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN_MASK 0x0000001FL
+#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN_MASK 0x00000020L
+#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN_MASK 0x000007C0L
+#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN_MASK 0x00000800L
+//UVD_SUVD_INT_ACK2
+#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK__SHIFT 0x0
+#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK__SHIFT 0x5
+#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK__SHIFT 0x6
+#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK__SHIFT 0xb
+#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK_MASK 0x0000001FL
+#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK_MASK 0x00000020L
+#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK_MASK 0x000007C0L
+#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK_MASK 0x00000800L
+//UVD_STATUS
+#define UVD_STATUS__RBC_BUSY__SHIFT 0x0
+#define UVD_STATUS__VCPU_REPORT__SHIFT 0x1
+#define UVD_STATUS__FILL_0__SHIFT 0x8
+#define UVD_STATUS__RBC_ACCESS_GPCOM__SHIFT 0x10
+#define UVD_STATUS__DRM_BUSY__SHIFT 0x11
+#define UVD_STATUS__FILL_1__SHIFT 0x12
+#define UVD_STATUS__SYS_GPCOM_REQ__SHIFT 0x1f
+#define UVD_STATUS__RBC_BUSY_MASK 0x00000001L
+#define UVD_STATUS__VCPU_REPORT_MASK 0x000000FEL
+#define UVD_STATUS__FILL_0_MASK 0x0000FF00L
+#define UVD_STATUS__RBC_ACCESS_GPCOM_MASK 0x00010000L
+#define UVD_STATUS__DRM_BUSY_MASK 0x00020000L
+#define UVD_STATUS__FILL_1_MASK 0x7FFC0000L
+#define UVD_STATUS__SYS_GPCOM_REQ_MASK 0x80000000L
+//UVD_ENC_PIPE_BUSY
+#define UVD_ENC_PIPE_BUSY__IME_BUSY__SHIFT 0x0
+#define UVD_ENC_PIPE_BUSY__SMP_BUSY__SHIFT 0x1
+#define UVD_ENC_PIPE_BUSY__SIT_BUSY__SHIFT 0x2
+#define UVD_ENC_PIPE_BUSY__SDB_BUSY__SHIFT 0x3
+#define UVD_ENC_PIPE_BUSY__ENT_BUSY__SHIFT 0x4
+#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY__SHIFT 0x5
+#define UVD_ENC_PIPE_BUSY__LCM_BUSY__SHIFT 0x6
+#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY__SHIFT 0x7
+#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY__SHIFT 0x8
+#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY__SHIFT 0x9
+#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY__SHIFT 0xa
+#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY__SHIFT 0xb
+#define UVD_ENC_PIPE_BUSY__EFC_BUSY__SHIFT 0xc
+#define UVD_ENC_PIPE_BUSY__MDM_PPU_BUSY__SHIFT 0xd
+#define UVD_ENC_PIPE_BUSY__MIF_AUTODMA_BUSY__SHIFT 0xe
+#define UVD_ENC_PIPE_BUSY__CDEFE_BUSY__SHIFT 0xf
+#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY__SHIFT 0x10
+#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY__SHIFT 0x11
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY__SHIFT 0x12
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY__SHIFT 0x13
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY__SHIFT 0x14
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY__SHIFT 0x15
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY__SHIFT 0x16
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY__SHIFT 0x17
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY__SHIFT 0x18
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY__SHIFT 0x19
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY__SHIFT 0x1a
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY__SHIFT 0x1b
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY__SHIFT 0x1c
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY__SHIFT 0x1d
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY__SHIFT 0x1e
+#define UVD_ENC_PIPE_BUSY__SAOE_BUSY__SHIFT 0x1f
+#define UVD_ENC_PIPE_BUSY__IME_BUSY_MASK 0x00000001L
+#define UVD_ENC_PIPE_BUSY__SMP_BUSY_MASK 0x00000002L
+#define UVD_ENC_PIPE_BUSY__SIT_BUSY_MASK 0x00000004L
+#define UVD_ENC_PIPE_BUSY__SDB_BUSY_MASK 0x00000008L
+#define UVD_ENC_PIPE_BUSY__ENT_BUSY_MASK 0x00000010L
+#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY_MASK 0x00000020L
+#define UVD_ENC_PIPE_BUSY__LCM_BUSY_MASK 0x00000040L
+#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY_MASK 0x00000080L
+#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY_MASK 0x00000100L
+#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY_MASK 0x00000200L
+#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY_MASK 0x00000400L
+#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY_MASK 0x00000800L
+#define UVD_ENC_PIPE_BUSY__EFC_BUSY_MASK 0x00001000L
+#define UVD_ENC_PIPE_BUSY__MDM_PPU_BUSY_MASK 0x00002000L
+#define UVD_ENC_PIPE_BUSY__MIF_AUTODMA_BUSY_MASK 0x00004000L
+#define UVD_ENC_PIPE_BUSY__CDEFE_BUSY_MASK 0x00008000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY_MASK 0x00010000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY_MASK 0x00020000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY_MASK 0x00040000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY_MASK 0x00080000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY_MASK 0x00100000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY_MASK 0x00200000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY_MASK 0x00400000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY_MASK 0x00800000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY_MASK 0x01000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY_MASK 0x02000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY_MASK 0x04000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY_MASK 0x08000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY_MASK 0x10000000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY_MASK 0x20000000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY_MASK 0x40000000L
+#define UVD_ENC_PIPE_BUSY__SAOE_BUSY_MASK 0x80000000L
+//UVD_FW_POWER_STATUS
+#define UVD_FW_POWER_STATUS__UVDF_PWR_OFF__SHIFT 0x0
+#define UVD_FW_POWER_STATUS__UVDTC_PWR_OFF__SHIFT 0x1
+#define UVD_FW_POWER_STATUS__UVDB_PWR_OFF__SHIFT 0x2
+#define UVD_FW_POWER_STATUS__UVDTA_PWR_OFF__SHIFT 0x3
+#define UVD_FW_POWER_STATUS__UVDTD_PWR_OFF__SHIFT 0x4
+#define UVD_FW_POWER_STATUS__UVDTE_PWR_OFF__SHIFT 0x5
+#define UVD_FW_POWER_STATUS__UVDE_PWR_OFF__SHIFT 0x6
+#define UVD_FW_POWER_STATUS__UVDAB_PWR_OFF__SHIFT 0x7
+#define UVD_FW_POWER_STATUS__UVDTB_PWR_OFF__SHIFT 0x8
+#define UVD_FW_POWER_STATUS__UVDNA_PWR_OFF__SHIFT 0x9
+#define UVD_FW_POWER_STATUS__UVDNB_PWR_OFF__SHIFT 0xa
+#define UVD_FW_POWER_STATUS__UVDF_PWR_OFF_MASK 0x00000001L
+#define UVD_FW_POWER_STATUS__UVDTC_PWR_OFF_MASK 0x00000002L
+#define UVD_FW_POWER_STATUS__UVDB_PWR_OFF_MASK 0x00000004L
+#define UVD_FW_POWER_STATUS__UVDTA_PWR_OFF_MASK 0x00000008L
+#define UVD_FW_POWER_STATUS__UVDTD_PWR_OFF_MASK 0x00000010L
+#define UVD_FW_POWER_STATUS__UVDTE_PWR_OFF_MASK 0x00000020L
+#define UVD_FW_POWER_STATUS__UVDE_PWR_OFF_MASK 0x00000040L
+#define UVD_FW_POWER_STATUS__UVDAB_PWR_OFF_MASK 0x00000080L
+#define UVD_FW_POWER_STATUS__UVDTB_PWR_OFF_MASK 0x00000100L
+#define UVD_FW_POWER_STATUS__UVDNA_PWR_OFF_MASK 0x00000200L
+#define UVD_FW_POWER_STATUS__UVDNB_PWR_OFF_MASK 0x00000400L
+//UVD_CNTL
+#define UVD_CNTL__MIF_WR_LOW_THRESHOLD_BP__SHIFT 0x11
+#define UVD_CNTL__SUVD_EN__SHIFT 0x13
+#define UVD_CNTL__CABAC_MB_ACC__SHIFT 0x1c
+#define UVD_CNTL__LRBBM_SAFE_SYNC_DIS__SHIFT 0x1f
+#define UVD_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK 0x00020000L
+#define UVD_CNTL__SUVD_EN_MASK 0x00080000L
+#define UVD_CNTL__CABAC_MB_ACC_MASK 0x10000000L
+#define UVD_CNTL__LRBBM_SAFE_SYNC_DIS_MASK 0x80000000L
+//UVD_SOFT_RESET
+#define UVD_SOFT_RESET__RBC_SOFT_RESET__SHIFT 0x0
+#define UVD_SOFT_RESET__LBSI_SOFT_RESET__SHIFT 0x1
+#define UVD_SOFT_RESET__LMI_SOFT_RESET__SHIFT 0x2
+#define UVD_SOFT_RESET__VCPU_SOFT_RESET__SHIFT 0x3
+#define UVD_SOFT_RESET__UDEC_SOFT_RESET__SHIFT 0x4
+#define UVD_SOFT_RESET__CXW_SOFT_RESET__SHIFT 0x6
+#define UVD_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x7
+#define UVD_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x8
+#define UVD_SOFT_RESET__EFC_SOFT_RESET__SHIFT 0x9
+#define UVD_SOFT_RESET__IH_SOFT_RESET__SHIFT 0xa
+#define UVD_SOFT_RESET__MPRD_SOFT_RESET__SHIFT 0xb
+#define UVD_SOFT_RESET__IDCT_SOFT_RESET__SHIFT 0xc
+#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET__SHIFT 0xd
+#define UVD_SOFT_RESET__SPH_SOFT_RESET__SHIFT 0xe
+#define UVD_SOFT_RESET__MIF_SOFT_RESET__SHIFT 0xf
+#define UVD_SOFT_RESET__LCM_SOFT_RESET__SHIFT 0x10
+#define UVD_SOFT_RESET__SUVD_SOFT_RESET__SHIFT 0x11
+#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS__SHIFT 0x12
+#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS__SHIFT 0x13
+#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS__SHIFT 0x14
+#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS__SHIFT 0x15
+#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS__SHIFT 0x16
+#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS__SHIFT 0x17
+#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS__SHIFT 0x18
+#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS__SHIFT 0x19
+#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS__SHIFT 0x1a
+#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS__SHIFT 0x1b
+#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS__SHIFT 0x1c
+#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS__SHIFT 0x1d
+#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS__SHIFT 0x1e
+#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS__SHIFT 0x1f
+#define UVD_SOFT_RESET__RBC_SOFT_RESET_MASK 0x00000001L
+#define UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK 0x00000002L
+#define UVD_SOFT_RESET__LMI_SOFT_RESET_MASK 0x00000004L
+#define UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK 0x00000008L
+#define UVD_SOFT_RESET__UDEC_SOFT_RESET_MASK 0x00000010L
+#define UVD_SOFT_RESET__CXW_SOFT_RESET_MASK 0x00000040L
+#define UVD_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000080L
+#define UVD_SOFT_RESET__MPC_SOFT_RESET_MASK 0x00000100L
+#define UVD_SOFT_RESET__EFC_SOFT_RESET_MASK 0x00000200L
+#define UVD_SOFT_RESET__IH_SOFT_RESET_MASK 0x00000400L
+#define UVD_SOFT_RESET__MPRD_SOFT_RESET_MASK 0x00000800L
+#define UVD_SOFT_RESET__IDCT_SOFT_RESET_MASK 0x00001000L
+#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK 0x00002000L
+#define UVD_SOFT_RESET__SPH_SOFT_RESET_MASK 0x00004000L
+#define UVD_SOFT_RESET__MIF_SOFT_RESET_MASK 0x00008000L
+#define UVD_SOFT_RESET__LCM_SOFT_RESET_MASK 0x00010000L
+#define UVD_SOFT_RESET__SUVD_SOFT_RESET_MASK 0x00020000L
+#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS_MASK 0x00040000L
+#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS_MASK 0x00080000L
+#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS_MASK 0x00100000L
+#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS_MASK 0x00200000L
+#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS_MASK 0x00400000L
+#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS_MASK 0x00800000L
+#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS_MASK 0x01000000L
+#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS_MASK 0x02000000L
+#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS_MASK 0x04000000L
+#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS_MASK 0x08000000L
+#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS_MASK 0x10000000L
+#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS_MASK 0x20000000L
+#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS_MASK 0x40000000L
+#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS_MASK 0x80000000L
+//UVD_SOFT_RESET2
+#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET__SHIFT 0x0
+#define UVD_SOFT_RESET2__PPU_SOFT_RESET__SHIFT 0x1
+#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS__SHIFT 0x10
+#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET_MASK 0x00000001L
+#define UVD_SOFT_RESET2__PPU_SOFT_RESET_MASK 0x00000002L
+#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS_MASK 0x00010000L
+#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_MMSCH_SOFT_RESET
+#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET__SHIFT 0x0
+#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x1
+#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK__SHIFT 0x1f
+#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET_MASK 0x00000001L
+#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000002L
+#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK_MASK 0x80000000L
+//UVD_WIG_CTRL
+#define UVD_WIG_CTRL__AVM_SOFT_RESET__SHIFT 0x0
+#define UVD_WIG_CTRL__ACAP_SOFT_RESET__SHIFT 0x1
+#define UVD_WIG_CTRL__WIG_SOFT_RESET__SHIFT 0x2
+#define UVD_WIG_CTRL__WIG_REGCLK_FORCE_ON__SHIFT 0x3
+#define UVD_WIG_CTRL__AVM_REGCLK_FORCE_ON__SHIFT 0x4
+#define UVD_WIG_CTRL__AVM_SOFT_RESET_MASK 0x00000001L
+#define UVD_WIG_CTRL__ACAP_SOFT_RESET_MASK 0x00000002L
+#define UVD_WIG_CTRL__WIG_SOFT_RESET_MASK 0x00000004L
+#define UVD_WIG_CTRL__WIG_REGCLK_FORCE_ON_MASK 0x00000008L
+#define UVD_WIG_CTRL__AVM_REGCLK_FORCE_ON_MASK 0x00000010L
+//UVD_CGC_STATUS
+#define UVD_CGC_STATUS__SYS_SCLK__SHIFT 0x0
+#define UVD_CGC_STATUS__SYS_DCLK__SHIFT 0x1
+#define UVD_CGC_STATUS__SYS_VCLK__SHIFT 0x2
+#define UVD_CGC_STATUS__UDEC_SCLK__SHIFT 0x3
+#define UVD_CGC_STATUS__UDEC_DCLK__SHIFT 0x4
+#define UVD_CGC_STATUS__UDEC_VCLK__SHIFT 0x5
+#define UVD_CGC_STATUS__MPEG2_SCLK__SHIFT 0x6
+#define UVD_CGC_STATUS__MPEG2_DCLK__SHIFT 0x7
+#define UVD_CGC_STATUS__MPEG2_VCLK__SHIFT 0x8
+#define UVD_CGC_STATUS__REGS_SCLK__SHIFT 0x9
+#define UVD_CGC_STATUS__REGS_VCLK__SHIFT 0xa
+#define UVD_CGC_STATUS__RBC_SCLK__SHIFT 0xb
+#define UVD_CGC_STATUS__LMI_MC_SCLK__SHIFT 0xc
+#define UVD_CGC_STATUS__LMI_UMC_SCLK__SHIFT 0xd
+#define UVD_CGC_STATUS__IDCT_SCLK__SHIFT 0xe
+#define UVD_CGC_STATUS__IDCT_VCLK__SHIFT 0xf
+#define UVD_CGC_STATUS__MPRD_SCLK__SHIFT 0x10
+#define UVD_CGC_STATUS__MPRD_DCLK__SHIFT 0x11
+#define UVD_CGC_STATUS__MPRD_VCLK__SHIFT 0x12
+#define UVD_CGC_STATUS__MPC_SCLK__SHIFT 0x13
+#define UVD_CGC_STATUS__MPC_DCLK__SHIFT 0x14
+#define UVD_CGC_STATUS__LBSI_SCLK__SHIFT 0x15
+#define UVD_CGC_STATUS__LBSI_VCLK__SHIFT 0x16
+#define UVD_CGC_STATUS__LRBBM_SCLK__SHIFT 0x17
+#define UVD_CGC_STATUS__WCB_SCLK__SHIFT 0x18
+#define UVD_CGC_STATUS__VCPU_SCLK__SHIFT 0x19
+#define UVD_CGC_STATUS__VCPU_VCLK__SHIFT 0x1a
+#define UVD_CGC_STATUS__MMSCH_SCLK__SHIFT 0x1b
+#define UVD_CGC_STATUS__MMSCH_VCLK__SHIFT 0x1c
+#define UVD_CGC_STATUS__ALL_ENC_ACTIVE__SHIFT 0x1d
+#define UVD_CGC_STATUS__LRBBM_DCLK__SHIFT 0x1e
+#define UVD_CGC_STATUS__ALL_DEC_ACTIVE__SHIFT 0x1f
+#define UVD_CGC_STATUS__SYS_SCLK_MASK 0x00000001L
+#define UVD_CGC_STATUS__SYS_DCLK_MASK 0x00000002L
+#define UVD_CGC_STATUS__SYS_VCLK_MASK 0x00000004L
+#define UVD_CGC_STATUS__UDEC_SCLK_MASK 0x00000008L
+#define UVD_CGC_STATUS__UDEC_DCLK_MASK 0x00000010L
+#define UVD_CGC_STATUS__UDEC_VCLK_MASK 0x00000020L
+#define UVD_CGC_STATUS__MPEG2_SCLK_MASK 0x00000040L
+#define UVD_CGC_STATUS__MPEG2_DCLK_MASK 0x00000080L
+#define UVD_CGC_STATUS__MPEG2_VCLK_MASK 0x00000100L
+#define UVD_CGC_STATUS__REGS_SCLK_MASK 0x00000200L
+#define UVD_CGC_STATUS__REGS_VCLK_MASK 0x00000400L
+#define UVD_CGC_STATUS__RBC_SCLK_MASK 0x00000800L
+#define UVD_CGC_STATUS__LMI_MC_SCLK_MASK 0x00001000L
+#define UVD_CGC_STATUS__LMI_UMC_SCLK_MASK 0x00002000L
+#define UVD_CGC_STATUS__IDCT_SCLK_MASK 0x00004000L
+#define UVD_CGC_STATUS__IDCT_VCLK_MASK 0x00008000L
+#define UVD_CGC_STATUS__MPRD_SCLK_MASK 0x00010000L
+#define UVD_CGC_STATUS__MPRD_DCLK_MASK 0x00020000L
+#define UVD_CGC_STATUS__MPRD_VCLK_MASK 0x00040000L
+#define UVD_CGC_STATUS__MPC_SCLK_MASK 0x00080000L
+#define UVD_CGC_STATUS__MPC_DCLK_MASK 0x00100000L
+#define UVD_CGC_STATUS__LBSI_SCLK_MASK 0x00200000L
+#define UVD_CGC_STATUS__LBSI_VCLK_MASK 0x00400000L
+#define UVD_CGC_STATUS__LRBBM_SCLK_MASK 0x00800000L
+#define UVD_CGC_STATUS__WCB_SCLK_MASK 0x01000000L
+#define UVD_CGC_STATUS__VCPU_SCLK_MASK 0x02000000L
+#define UVD_CGC_STATUS__VCPU_VCLK_MASK 0x04000000L
+#define UVD_CGC_STATUS__MMSCH_SCLK_MASK 0x08000000L
+#define UVD_CGC_STATUS__MMSCH_VCLK_MASK 0x10000000L
+#define UVD_CGC_STATUS__ALL_ENC_ACTIVE_MASK 0x20000000L
+#define UVD_CGC_STATUS__LRBBM_DCLK_MASK 0x40000000L
+#define UVD_CGC_STATUS__ALL_DEC_ACTIVE_MASK 0x80000000L
+//UVD_CGC_UDEC_STATUS
+#define UVD_CGC_UDEC_STATUS__RE_SCLK__SHIFT 0x0
+#define UVD_CGC_UDEC_STATUS__RE_DCLK__SHIFT 0x1
+#define UVD_CGC_UDEC_STATUS__RE_VCLK__SHIFT 0x2
+#define UVD_CGC_UDEC_STATUS__CM_SCLK__SHIFT 0x3
+#define UVD_CGC_UDEC_STATUS__CM_DCLK__SHIFT 0x4
+#define UVD_CGC_UDEC_STATUS__CM_VCLK__SHIFT 0x5
+#define UVD_CGC_UDEC_STATUS__IT_SCLK__SHIFT 0x6
+#define UVD_CGC_UDEC_STATUS__IT_DCLK__SHIFT 0x7
+#define UVD_CGC_UDEC_STATUS__IT_VCLK__SHIFT 0x8
+#define UVD_CGC_UDEC_STATUS__DB_SCLK__SHIFT 0x9
+#define UVD_CGC_UDEC_STATUS__DB_DCLK__SHIFT 0xa
+#define UVD_CGC_UDEC_STATUS__DB_VCLK__SHIFT 0xb
+#define UVD_CGC_UDEC_STATUS__MP_SCLK__SHIFT 0xc
+#define UVD_CGC_UDEC_STATUS__MP_DCLK__SHIFT 0xd
+#define UVD_CGC_UDEC_STATUS__MP_VCLK__SHIFT 0xe
+#define UVD_CGC_UDEC_STATUS__RE_SCLK_MASK 0x00000001L
+#define UVD_CGC_UDEC_STATUS__RE_DCLK_MASK 0x00000002L
+#define UVD_CGC_UDEC_STATUS__RE_VCLK_MASK 0x00000004L
+#define UVD_CGC_UDEC_STATUS__CM_SCLK_MASK 0x00000008L
+#define UVD_CGC_UDEC_STATUS__CM_DCLK_MASK 0x00000010L
+#define UVD_CGC_UDEC_STATUS__CM_VCLK_MASK 0x00000020L
+#define UVD_CGC_UDEC_STATUS__IT_SCLK_MASK 0x00000040L
+#define UVD_CGC_UDEC_STATUS__IT_DCLK_MASK 0x00000080L
+#define UVD_CGC_UDEC_STATUS__IT_VCLK_MASK 0x00000100L
+#define UVD_CGC_UDEC_STATUS__DB_SCLK_MASK 0x00000200L
+#define UVD_CGC_UDEC_STATUS__DB_DCLK_MASK 0x00000400L
+#define UVD_CGC_UDEC_STATUS__DB_VCLK_MASK 0x00000800L
+#define UVD_CGC_UDEC_STATUS__MP_SCLK_MASK 0x00001000L
+#define UVD_CGC_UDEC_STATUS__MP_DCLK_MASK 0x00002000L
+#define UVD_CGC_UDEC_STATUS__MP_VCLK_MASK 0x00004000L
+//UVD_SUVD_CGC_STATUS
+#define UVD_SUVD_CGC_STATUS__SRE_VCLK__SHIFT 0x0
+#define UVD_SUVD_CGC_STATUS__SRE_DCLK__SHIFT 0x1
+#define UVD_SUVD_CGC_STATUS__SIT_DCLK__SHIFT 0x2
+#define UVD_SUVD_CGC_STATUS__SMP_DCLK__SHIFT 0x3
+#define UVD_SUVD_CGC_STATUS__SCM_DCLK__SHIFT 0x4
+#define UVD_SUVD_CGC_STATUS__SDB_DCLK__SHIFT 0x5
+#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK__SHIFT 0x6
+#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK__SHIFT 0x7
+#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK__SHIFT 0x8
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK__SHIFT 0x9
+#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK__SHIFT 0xa
+#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK__SHIFT 0xb
+#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK__SHIFT 0xc
+#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK__SHIFT 0xd
+#define UVD_SUVD_CGC_STATUS__SCLR_DCLK__SHIFT 0xe
+#define UVD_SUVD_CGC_STATUS__UVD_SC__SHIFT 0xf
+#define UVD_SUVD_CGC_STATUS__ENT_DCLK__SHIFT 0x10
+#define UVD_SUVD_CGC_STATUS__IME_DCLK__SHIFT 0x11
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK__SHIFT 0x12
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK__SHIFT 0x13
+#define UVD_SUVD_CGC_STATUS__SITE_DCLK__SHIFT 0x14
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK__SHIFT 0x15
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK__SHIFT 0x16
+#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK__SHIFT 0x17
+#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK__SHIFT 0x18
+#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK__SHIFT 0x19
+#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK__SHIFT 0x1a
+#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK__SHIFT 0x1b
+#define UVD_SUVD_CGC_STATUS__EFC_DCLK__SHIFT 0x1c
+#define UVD_SUVD_CGC_STATUS__SAOE_DCLK__SHIFT 0x1d
+#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK__SHIFT 0x1e
+#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK__SHIFT 0x1f
+#define UVD_SUVD_CGC_STATUS__SRE_VCLK_MASK 0x00000001L
+#define UVD_SUVD_CGC_STATUS__SRE_DCLK_MASK 0x00000002L
+#define UVD_SUVD_CGC_STATUS__SIT_DCLK_MASK 0x00000004L
+#define UVD_SUVD_CGC_STATUS__SMP_DCLK_MASK 0x00000008L
+#define UVD_SUVD_CGC_STATUS__SCM_DCLK_MASK 0x00000010L
+#define UVD_SUVD_CGC_STATUS__SDB_DCLK_MASK 0x00000020L
+#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK_MASK 0x00000040L
+#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK_MASK 0x00000080L
+#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK_MASK 0x00000100L
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK_MASK 0x00000200L
+#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK_MASK 0x00000400L
+#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK_MASK 0x00000800L
+#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK_MASK 0x00001000L
+#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK_MASK 0x00002000L
+#define UVD_SUVD_CGC_STATUS__SCLR_DCLK_MASK 0x00004000L
+#define UVD_SUVD_CGC_STATUS__UVD_SC_MASK 0x00008000L
+#define UVD_SUVD_CGC_STATUS__ENT_DCLK_MASK 0x00010000L
+#define UVD_SUVD_CGC_STATUS__IME_DCLK_MASK 0x00020000L
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK_MASK 0x00040000L
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK_MASK 0x00080000L
+#define UVD_SUVD_CGC_STATUS__SITE_DCLK_MASK 0x00100000L
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK_MASK 0x00200000L
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK_MASK 0x00400000L
+#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK_MASK 0x00800000L
+#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK_MASK 0x01000000L
+#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK_MASK 0x02000000L
+#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK_MASK 0x04000000L
+#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK_MASK 0x08000000L
+#define UVD_SUVD_CGC_STATUS__EFC_DCLK_MASK 0x10000000L
+#define UVD_SUVD_CGC_STATUS__SAOE_DCLK_MASK 0x20000000L
+#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK_MASK 0x40000000L
+#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK_MASK 0x80000000L
+//UVD_GPCOM_VCPU_CMD
+#define UVD_GPCOM_VCPU_CMD__CMD_SEND__SHIFT 0x0
+#define UVD_GPCOM_VCPU_CMD__CMD__SHIFT 0x1
+#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE__SHIFT 0x1f
+#define UVD_GPCOM_VCPU_CMD__CMD_SEND_MASK 0x00000001L
+#define UVD_GPCOM_VCPU_CMD__CMD_MASK 0x7FFFFFFEL
+#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE_MASK 0x80000000L
+
+
+// addressBlock: uvd_vcn_cdefe_cdefe_broadcast_dec0
+//CDEFE_SUVD_CGC_GATE
+#define CDEFE_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define CDEFE_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define CDEFE_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define CDEFE_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define CDEFE_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define CDEFE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define CDEFE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define CDEFE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define CDEFE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define CDEFE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define CDEFE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define CDEFE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define CDEFE_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define CDEFE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define CDEFE_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define CDEFE_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define CDEFE_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define CDEFE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define CDEFE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define CDEFE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define CDEFE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define CDEFE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define CDEFE_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define CDEFE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define CDEFE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define CDEFE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define CDEFE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define CDEFE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define CDEFE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define CDEFE_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define CDEFE_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define CDEFE_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define CDEFE_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define CDEFE_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define CDEFE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define CDEFE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define CDEFE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define CDEFE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define CDEFE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define CDEFE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define CDEFE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define CDEFE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define CDEFE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define CDEFE_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define CDEFE_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define CDEFE_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define CDEFE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define CDEFE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define CDEFE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define CDEFE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define CDEFE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define CDEFE_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define CDEFE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define CDEFE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define CDEFE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define CDEFE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define CDEFE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define CDEFE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//CDEFE_SUVD_CGC_GATE2
+#define CDEFE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define CDEFE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define CDEFE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define CDEFE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define CDEFE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define CDEFE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define CDEFE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define CDEFE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define CDEFE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define CDEFE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define CDEFE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define CDEFE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define CDEFE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define CDEFE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define CDEFE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define CDEFE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define CDEFE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define CDEFE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//CDEFE_SUVD_CGC_CTRL
+#define CDEFE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define CDEFE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define CDEFE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define CDEFE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define CDEFE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define CDEFE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define CDEFE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define CDEFE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define CDEFE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define CDEFE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define CDEFE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define CDEFE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define CDEFE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define CDEFE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define CDEFE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define CDEFE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define CDEFE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define CDEFE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define CDEFE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define CDEFE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define CDEFE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define CDEFE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define CDEFE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define CDEFE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define CDEFE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define CDEFE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define CDEFE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define CDEFE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define CDEFE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define CDEFE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define CDEFE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define CDEFE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define CDEFE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define CDEFE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define CDEFE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define CDEFE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define CDEFE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define CDEFE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define CDEFE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define CDEFE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define CDEFE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define CDEFE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+
+
+// addressBlock: uvd_ecpudec
+//UVD_VCPU_CACHE_OFFSET0
+#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE0
+#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET1
+#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE1
+#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET2
+#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE2
+#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET3
+#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE3
+#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET4
+#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE4
+#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET5
+#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE5
+#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET6
+#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE6
+#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET7
+#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE7
+#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET8
+#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE8
+#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8_MASK 0x001FFFFFL
+//UVD_VCPU_NONCACHE_OFFSET0
+#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0_MASK 0x01FFFFFFL
+//UVD_VCPU_NONCACHE_SIZE0
+#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0_MASK 0x001FFFFFL
+//UVD_VCPU_NONCACHE_OFFSET1
+#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1_MASK 0x01FFFFFFL
+//UVD_VCPU_NONCACHE_SIZE1
+#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1_MASK 0x001FFFFFL
+//UVD_VCPU_CNTL
+#define UVD_VCPU_CNTL__IRQ_ERR__SHIFT 0x0
+#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4__SHIFT 0x4
+#define UVD_VCPU_CNTL__PMB_ED_ENABLE__SHIFT 0x5
+#define UVD_VCPU_CNTL__PMB_SOFT_RESET__SHIFT 0x6
+#define UVD_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x7
+#define UVD_VCPU_CNTL__ABORT_REQ__SHIFT 0x8
+#define UVD_VCPU_CNTL__CLK_EN__SHIFT 0x9
+#define UVD_VCPU_CNTL__TRCE_EN__SHIFT 0xa
+#define UVD_VCPU_CNTL__TRCE_MUX__SHIFT 0xb
+#define UVD_VCPU_CNTL__DBG_MUX__SHIFT 0xd
+#define UVD_VCPU_CNTL__JTAG_EN__SHIFT 0x10
+#define UVD_VCPU_CNTL__TIMEOUT_DIS__SHIFT 0x12
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14
+#define UVD_VCPU_CNTL__BLK_RST__SHIFT 0x1c
+#define UVD_VCPU_CNTL__RUNSTALL__SHIFT 0x1d
+#define UVD_VCPU_CNTL__SRE_CMDIF_DRST__SHIFT 0x1e
+#define UVD_VCPU_CNTL__SRE_CMDIF_VRST__SHIFT 0x1f
+#define UVD_VCPU_CNTL__IRQ_ERR_MASK 0x0000000FL
+#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4_MASK 0x00000010L
+#define UVD_VCPU_CNTL__PMB_ED_ENABLE_MASK 0x00000020L
+#define UVD_VCPU_CNTL__PMB_SOFT_RESET_MASK 0x00000040L
+#define UVD_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00000080L
+#define UVD_VCPU_CNTL__ABORT_REQ_MASK 0x00000100L
+#define UVD_VCPU_CNTL__CLK_EN_MASK 0x00000200L
+#define UVD_VCPU_CNTL__TRCE_EN_MASK 0x00000400L
+#define UVD_VCPU_CNTL__TRCE_MUX_MASK 0x00001800L
+#define UVD_VCPU_CNTL__DBG_MUX_MASK 0x0000E000L
+#define UVD_VCPU_CNTL__JTAG_EN_MASK 0x00010000L
+#define UVD_VCPU_CNTL__TIMEOUT_DIS_MASK 0x00040000L
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L
+#define UVD_VCPU_CNTL__BLK_RST_MASK 0x10000000L
+#define UVD_VCPU_CNTL__RUNSTALL_MASK 0x20000000L
+#define UVD_VCPU_CNTL__SRE_CMDIF_DRST_MASK 0x40000000L
+#define UVD_VCPU_CNTL__SRE_CMDIF_VRST_MASK 0x80000000L
+//UVD_VCPU_PRID
+#define UVD_VCPU_PRID__PRID__SHIFT 0x0
+#define UVD_VCPU_PRID__PRID_MASK 0x0000FFFFL
+//UVD_VCPU_TRCE
+#define UVD_VCPU_TRCE__PC__SHIFT 0x0
+#define UVD_VCPU_TRCE__PC_MASK 0x0FFFFFFFL
+//UVD_VCPU_TRCE_RD
+#define UVD_VCPU_TRCE_RD__DATA__SHIFT 0x0
+#define UVD_VCPU_TRCE_RD__DATA_MASK 0xFFFFFFFFL
+//UVD_VCPU_IND_INDEX
+#define UVD_VCPU_IND_INDEX__INDEX__SHIFT 0x0
+#define UVD_VCPU_IND_INDEX__INDEX_MASK 0x000001FFL
+//UVD_VCPU_IND_DATA
+#define UVD_VCPU_IND_DATA__DATA__SHIFT 0x0
+#define UVD_VCPU_IND_DATA__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd_lmi_adpdec
+//UVD_LMI_RE_64BIT_BAR_LOW
+#define UVD_LMI_RE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_RE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_RE_64BIT_BAR_HIGH
+#define UVD_LMI_RE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_RE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_IT_64BIT_BAR_LOW
+#define UVD_LMI_IT_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_IT_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_IT_64BIT_BAR_HIGH
+#define UVD_LMI_IT_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_IT_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MP_64BIT_BAR_LOW
+#define UVD_LMI_MP_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MP_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MP_64BIT_BAR_HIGH
+#define UVD_LMI_MP_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MP_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_CM_64BIT_BAR_LOW
+#define UVD_LMI_CM_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_CM_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_CM_64BIT_BAR_HIGH
+#define UVD_LMI_CM_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_CM_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_DB_64BIT_BAR_LOW
+#define UVD_LMI_DB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_DB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_DB_64BIT_BAR_HIGH
+#define UVD_LMI_DB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_DB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_DBW_64BIT_BAR_LOW
+#define UVD_LMI_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_DBW_64BIT_BAR_HIGH
+#define UVD_LMI_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_IDCT_64BIT_BAR_LOW
+#define UVD_LMI_IDCT_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_IDCT_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_IDCT_64BIT_BAR_HIGH
+#define UVD_LMI_IDCT_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_IDCT_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_S0_64BIT_BAR_LOW
+#define UVD_LMI_MPRD_S0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MPRD_S0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_S0_64BIT_BAR_HIGH
+#define UVD_LMI_MPRD_S0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MPRD_S0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_S1_64BIT_BAR_LOW
+#define UVD_LMI_MPRD_S1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MPRD_S1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_S1_64BIT_BAR_HIGH
+#define UVD_LMI_MPRD_S1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MPRD_S1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_DBW_64BIT_BAR_LOW
+#define UVD_LMI_MPRD_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MPRD_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH
+#define UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_RB_64BIT_BAR_LOW
+#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_RB_64BIT_BAR_HIGH
+#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_IB_64BIT_BAR_LOW
+#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_IB_64BIT_BAR_HIGH
+#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_LBSI_64BIT_BAR_LOW
+#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_LBSI_64BIT_BAR_HIGH
+#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC0_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC1_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_CENC_64BIT_BAR_LOW
+#define UVD_LMI_CENC_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_CENC_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_CENC_64BIT_BAR_HIGH
+#define UVD_LMI_CENC_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_CENC_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_SRE_64BIT_BAR_LOW
+#define UVD_LMI_SRE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_SRE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_SRE_64BIT_BAR_HIGH
+#define UVD_LMI_SRE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_SRE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW
+#define UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_DBW_64BIT_BAR_LOW
+#define UVD_LMI_MIF_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_DBW_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW
+#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP0_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSP0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSP0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP1_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSP1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSP1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP2_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSP2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSP2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP3_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSP3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSP3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD0_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD1_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD2_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD3_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD4_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_SCLR_64BIT_BAR_LOW
+#define UVD_LMI_MIF_SCLR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_SCLR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW
+#define UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_SPH_64BIT_BAR_HIGH
+#define UVD_LMI_SPH_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_SPH_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_ADP_ATOMIC_CONFIG
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER0_WR_CACHE__SHIFT 0x0
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER1_WR_CACHE__SHIFT 0x4
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER2_WR_CACHE__SHIFT 0x8
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER3_WR_CACHE__SHIFT 0xc
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_RD_URG__SHIFT 0x10
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER0_WR_CACHE_MASK 0x0000000FL
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER1_WR_CACHE_MASK 0x000000F0L
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER2_WR_CACHE_MASK 0x00000F00L
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER3_WR_CACHE_MASK 0x0000F000L
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_RD_URG_MASK 0x000F0000L
+//UVD_LMI_ARB_CTRL2
+#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN__SHIFT 0x0
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN__SHIFT 0x1
+#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST__SHIFT 0x2
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST__SHIFT 0x6
+#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX__SHIFT 0xa
+#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX__SHIFT 0x14
+#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST_MASK 0x0000003CL
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST_MASK 0x000003C0L
+#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX_MASK 0x000FFC00L
+#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX_MASK 0xFFF00000L
+//UVD_LMI_VCPU_CACHE_VMIDS_MULTI
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID__SHIFT 0x4
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID__SHIFT 0x8
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID__SHIFT 0xc
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID__SHIFT 0x10
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID__SHIFT 0x14
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID__SHIFT 0x18
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID__SHIFT 0x1c
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID_MASK 0x0000000FL
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID_MASK 0x000000F0L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID_MASK 0x00000F00L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID_MASK 0x0000F000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID_MASK 0x000F0000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID_MASK 0x00F00000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID_MASK 0x0F000000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID_MASK 0xF0000000L
+//UVD_LMI_VCPU_NC_VMIDS_MULTI
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID__SHIFT 0x4
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID__SHIFT 0x8
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID__SHIFT 0xc
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID__SHIFT 0x10
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID__SHIFT 0x14
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID__SHIFT 0x18
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID_MASK 0x000000F0L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID_MASK 0x00000F00L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID_MASK 0x0000F000L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID_MASK 0x000F0000L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID_MASK 0x00F00000L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID_MASK 0x0F000000L
+//UVD_LMI_LAT_CTRL
+#define UVD_LMI_LAT_CTRL__SCALE__SHIFT 0x0
+#define UVD_LMI_LAT_CTRL__MAX_START__SHIFT 0x8
+#define UVD_LMI_LAT_CTRL__MIN_START__SHIFT 0x9
+#define UVD_LMI_LAT_CTRL__AVG_START__SHIFT 0xa
+#define UVD_LMI_LAT_CTRL__PERFMON_SYNC__SHIFT 0xb
+#define UVD_LMI_LAT_CTRL__SKIP__SHIFT 0x10
+#define UVD_LMI_LAT_CTRL__SCALE_MASK 0x000000FFL
+#define UVD_LMI_LAT_CTRL__MAX_START_MASK 0x00000100L
+#define UVD_LMI_LAT_CTRL__MIN_START_MASK 0x00000200L
+#define UVD_LMI_LAT_CTRL__AVG_START_MASK 0x00000400L
+#define UVD_LMI_LAT_CTRL__PERFMON_SYNC_MASK 0x00000800L
+#define UVD_LMI_LAT_CTRL__SKIP_MASK 0x000F0000L
+//UVD_LMI_LAT_CNTR
+#define UVD_LMI_LAT_CNTR__MAX_LAT__SHIFT 0x0
+#define UVD_LMI_LAT_CNTR__MIN_LAT__SHIFT 0x8
+#define UVD_LMI_LAT_CNTR__MAX_LAT_MASK 0x000000FFL
+#define UVD_LMI_LAT_CNTR__MIN_LAT_MASK 0x0000FF00L
+//UVD_LMI_AVG_LAT_CNTR
+#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW__SHIFT 0x0
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH__SHIFT 0x8
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT__SHIFT 0x10
+#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW_MASK 0x000000FFL
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH_MASK 0x0000FF00L
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT_MASK 0xFFFF0000L
+//UVD_LMI_SPH
+#define UVD_LMI_SPH__ADDR__SHIFT 0x0
+#define UVD_LMI_SPH__STS__SHIFT 0x1c
+#define UVD_LMI_SPH__STS_VALID__SHIFT 0x1e
+#define UVD_LMI_SPH__STS_OVERFLOW__SHIFT 0x1f
+#define UVD_LMI_SPH__ADDR_MASK 0x0FFFFFFFL
+#define UVD_LMI_SPH__STS_MASK 0x30000000L
+#define UVD_LMI_SPH__STS_VALID_MASK 0x40000000L
+#define UVD_LMI_SPH__STS_OVERFLOW_MASK 0x80000000L
+//UVD_LMI_VCPU_CACHE_VMID
+#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL
+//UVD_LMI_CTRL2
+#define UVD_LMI_CTRL2__SPH_DIS__SHIFT 0x0
+#define UVD_LMI_CTRL2__STALL_ARB__SHIFT 0x1
+#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT__SHIFT 0x2
+#define UVD_LMI_CTRL2__MASK_UMC_URGENT__SHIFT 0x3
+#define UVD_LMI_CTRL2__CRC1_RESET__SHIFT 0x4
+#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS__SHIFT 0x7
+#define UVD_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x8
+#define UVD_LMI_CTRL2__MC_READ_ID_SEL__SHIFT 0x9
+#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL__SHIFT 0xb
+#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN__SHIFT 0xd
+#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN__SHIFT 0xe
+#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN__SHIFT 0xf
+#define UVD_LMI_CTRL2__RE_OFFLOAD_EN__SHIFT 0x10
+#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT 0x11
+#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP__SHIFT 0x19
+#define UVD_LMI_CTRL2__NJ_MIF_GATING__SHIFT 0x1a
+#define UVD_LMI_CTRL2__CRC1_SEL__SHIFT 0x1b
+#define UVD_LMI_CTRL2__SPH_DIS_MASK 0x00000001L
+#define UVD_LMI_CTRL2__STALL_ARB_MASK 0x00000002L
+#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT_MASK 0x00000004L
+#define UVD_LMI_CTRL2__MASK_UMC_URGENT_MASK 0x00000008L
+#define UVD_LMI_CTRL2__CRC1_RESET_MASK 0x00000010L
+#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS_MASK 0x00000080L
+#define UVD_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L
+#define UVD_LMI_CTRL2__MC_READ_ID_SEL_MASK 0x00000600L
+#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL_MASK 0x00001800L
+#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN_MASK 0x00002000L
+#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN_MASK 0x00004000L
+#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN_MASK 0x00008000L
+#define UVD_LMI_CTRL2__RE_OFFLOAD_EN_MASK 0x00010000L
+#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM_MASK 0x01FE0000L
+#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP_MASK 0x02000000L
+#define UVD_LMI_CTRL2__NJ_MIF_GATING_MASK 0x04000000L
+#define UVD_LMI_CTRL2__CRC1_SEL_MASK 0xF8000000L
+//UVD_LMI_URGENT_CTRL
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL__SHIFT 0x0
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL__SHIFT 0x1
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT__SHIFT 0x2
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL__SHIFT 0x8
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL__SHIFT 0x9
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT__SHIFT 0xa
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL__SHIFT 0x10
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL__SHIFT 0x11
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT__SHIFT 0x12
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL__SHIFT 0x18
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL__SHIFT 0x19
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT__SHIFT 0x1a
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL_MASK 0x00000001L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL_MASK 0x00000002L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT_MASK 0x0000003CL
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL_MASK 0x00000100L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL_MASK 0x00000200L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT_MASK 0x00003C00L
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL_MASK 0x00010000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL_MASK 0x00020000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT_MASK 0x003C0000L
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL_MASK 0x01000000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL_MASK 0x02000000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT_MASK 0x3C000000L
+//UVD_LMI_CTRL
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT 0x0
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN__SHIFT 0x8
+#define UVD_LMI_CTRL__REQ_MODE__SHIFT 0x9
+#define UVD_LMI_CTRL__ASSERT_MC_URGENT__SHIFT 0xb
+#define UVD_LMI_CTRL__MASK_MC_URGENT__SHIFT 0xc
+#define UVD_LMI_CTRL__DATA_COHERENCY_EN__SHIFT 0xd
+#define UVD_LMI_CTRL__CRC_RESET__SHIFT 0xe
+#define UVD_LMI_CTRL__CRC_SEL__SHIFT 0xf
+#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL__SHIFT 0x14
+#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x15
+#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN__SHIFT 0x16
+#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN__SHIFT 0x17
+#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN__SHIFT 0x18
+#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN__SHIFT 0x19
+#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN__SHIFT 0x1a
+#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ__SHIFT 0x1b
+#define UVD_LMI_CTRL__MC_BLK_RST__SHIFT 0x1c
+#define UVD_LMI_CTRL__UMC_BLK_RST__SHIFT 0x1d
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_MASK 0x000000FFL
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK 0x00000100L
+#define UVD_LMI_CTRL__REQ_MODE_MASK 0x00000200L
+#define UVD_LMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000800L
+#define UVD_LMI_CTRL__MASK_MC_URGENT_MASK 0x00001000L
+#define UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK 0x00002000L
+#define UVD_LMI_CTRL__CRC_RESET_MASK 0x00004000L
+#define UVD_LMI_CTRL__CRC_SEL_MASK 0x000F8000L
+#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK 0x00100000L
+#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L
+#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN_MASK 0x00400000L
+#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN_MASK 0x00800000L
+#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN_MASK 0x01000000L
+#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN_MASK 0x02000000L
+#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN_MASK 0x04000000L
+#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ_MASK 0x08000000L
+#define UVD_LMI_CTRL__MC_BLK_RST_MASK 0x10000000L
+#define UVD_LMI_CTRL__UMC_BLK_RST_MASK 0x20000000L
+//UVD_LMI_STATUS
+#define UVD_LMI_STATUS__READ_CLEAN__SHIFT 0x0
+#define UVD_LMI_STATUS__WRITE_CLEAN__SHIFT 0x1
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW__SHIFT 0x2
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN__SHIFT 0x3
+#define UVD_LMI_STATUS__UMC_READ_CLEAN__SHIFT 0x4
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN__SHIFT 0x5
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW__SHIFT 0x6
+#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE__SHIFT 0x7
+#define UVD_LMI_STATUS__READ_CLEAN_RAW__SHIFT 0x8
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW__SHIFT 0x9
+#define UVD_LMI_STATUS__UMC_UVD_IDLE__SHIFT 0xa
+#define UVD_LMI_STATUS__UMC_AVP_IDLE__SHIFT 0xb
+#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN__SHIFT 0xc
+#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN__SHIFT 0xd
+#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN__SHIFT 0x12
+#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN__SHIFT 0x13
+#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN__SHIFT 0x14
+#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN__SHIFT 0x15
+#define UVD_LMI_STATUS__CENC_READ_CLEAN__SHIFT 0x16
+#define UVD_LMI_STATUS__READ_CLEAN_MASK 0x00000001L
+#define UVD_LMI_STATUS__WRITE_CLEAN_MASK 0x00000002L
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK 0x00000004L
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK 0x00000008L
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_MASK 0x00000010L
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_MASK 0x00000020L
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK 0x00000040L
+#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE_MASK 0x00000080L
+#define UVD_LMI_STATUS__READ_CLEAN_RAW_MASK 0x00000100L
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK 0x00000200L
+#define UVD_LMI_STATUS__UMC_UVD_IDLE_MASK 0x00000400L
+#define UVD_LMI_STATUS__UMC_AVP_IDLE_MASK 0x00000800L
+#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN_MASK 0x00001000L
+#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN_MASK 0x00002000L
+#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN_MASK 0x00040000L
+#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN_MASK 0x00080000L
+#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN_MASK 0x00100000L
+#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN_MASK 0x00200000L
+#define UVD_LMI_STATUS__CENC_READ_CLEAN_MASK 0x00400000L
+//UVD_LMI_PERFMON_CTRL
+#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0
+#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8
+#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L
+#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00001F00L
+//UVD_LMI_PERFMON_COUNT_LO
+#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0
+#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL
+//UVD_LMI_PERFMON_COUNT_HI
+#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0
+#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL
+//UVD_LMI_ADP_SWAP_CNTL
+#define UVD_LMI_ADP_SWAP_CNTL__VCPU_R_MC_SWAP__SHIFT 0x6
+#define UVD_LMI_ADP_SWAP_CNTL__VCPU_W_MC_SWAP__SHIFT 0x8
+#define UVD_LMI_ADP_SWAP_CNTL__CM_MC_SWAP__SHIFT 0xa
+#define UVD_LMI_ADP_SWAP_CNTL__IT_MC_SWAP__SHIFT 0xc
+#define UVD_LMI_ADP_SWAP_CNTL__DB_R_MC_SWAP__SHIFT 0xe
+#define UVD_LMI_ADP_SWAP_CNTL__DB_W_MC_SWAP__SHIFT 0x10
+#define UVD_LMI_ADP_SWAP_CNTL__CSM_MC_SWAP__SHIFT 0x12
+#define UVD_LMI_ADP_SWAP_CNTL__PREF_MC_SWAP__SHIFT 0x14
+#define UVD_LMI_ADP_SWAP_CNTL__DBW_MC_SWAP__SHIFT 0x18
+#define UVD_LMI_ADP_SWAP_CNTL__RE_MC_SWAP__SHIFT 0x1c
+#define UVD_LMI_ADP_SWAP_CNTL__MP_MC_SWAP__SHIFT 0x1e
+#define UVD_LMI_ADP_SWAP_CNTL__VCPU_R_MC_SWAP_MASK 0x000000C0L
+#define UVD_LMI_ADP_SWAP_CNTL__VCPU_W_MC_SWAP_MASK 0x00000300L
+#define UVD_LMI_ADP_SWAP_CNTL__CM_MC_SWAP_MASK 0x00000C00L
+#define UVD_LMI_ADP_SWAP_CNTL__IT_MC_SWAP_MASK 0x00003000L
+#define UVD_LMI_ADP_SWAP_CNTL__DB_R_MC_SWAP_MASK 0x0000C000L
+#define UVD_LMI_ADP_SWAP_CNTL__DB_W_MC_SWAP_MASK 0x00030000L
+#define UVD_LMI_ADP_SWAP_CNTL__CSM_MC_SWAP_MASK 0x000C0000L
+#define UVD_LMI_ADP_SWAP_CNTL__PREF_MC_SWAP_MASK 0x00300000L
+#define UVD_LMI_ADP_SWAP_CNTL__DBW_MC_SWAP_MASK 0x03000000L
+#define UVD_LMI_ADP_SWAP_CNTL__RE_MC_SWAP_MASK 0x30000000L
+#define UVD_LMI_ADP_SWAP_CNTL__MP_MC_SWAP_MASK 0xC0000000L
+//UVD_LMI_RBC_RB_VMID
+#define UVD_LMI_RBC_RB_VMID__RB_VMID__SHIFT 0x0
+#define UVD_LMI_RBC_RB_VMID__RB_VMID_MASK 0x0000000FL
+//UVD_LMI_RBC_IB_VMID
+#define UVD_LMI_RBC_IB_VMID__IB_VMID__SHIFT 0x0
+#define UVD_LMI_RBC_IB_VMID__IB_VMID_MASK 0x0000000FL
+//UVD_LMI_MC_CREDITS
+#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS__SHIFT 0x0
+#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS__SHIFT 0x8
+#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS__SHIFT 0x10
+#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS__SHIFT 0x18
+#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS_MASK 0x0000003FL
+#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS_MASK 0x00003F00L
+#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS_MASK 0x003F0000L
+#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS_MASK 0x3F000000L
+//UVD_LMI_ADP_IND_INDEX
+#define UVD_LMI_ADP_IND_INDEX__INDEX__SHIFT 0x0
+#define UVD_LMI_ADP_IND_INDEX__INDEX_MASK 0x00001FFFL
+//UVD_LMI_ADP_IND_DATA
+#define UVD_LMI_ADP_IND_DATA__DATA__SHIFT 0x0
+#define UVD_LMI_ADP_IND_DATA__DATA_MASK 0xFFFFFFFFL
+//UVD_LMI_ADP_PF_EN
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE0_PF_EN__SHIFT 0x0
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE1_PF_EN__SHIFT 0x1
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE2_PF_EN__SHIFT 0x2
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE0_PF_EN_MASK 0x00000001L
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE1_PF_EN_MASK 0x00000002L
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE2_PF_EN_MASK 0x00000004L
+//UVD_LMI_PREF_CTRL
+#define UVD_LMI_PREF_CTRL__PREF_RST__SHIFT 0x0
+#define UVD_LMI_PREF_CTRL__PREF_BUSY_STATUS__SHIFT 0x1
+#define UVD_LMI_PREF_CTRL__PREF_WSTRB__SHIFT 0x2
+#define UVD_LMI_PREF_CTRL__PREF_WRITE_SIZE__SHIFT 0x3
+#define UVD_LMI_PREF_CTRL__PREF_STEP_SIZE__SHIFT 0x4
+#define UVD_LMI_PREF_CTRL__PREF_SIZE__SHIFT 0x13
+#define UVD_LMI_PREF_CTRL__PREF_RST_MASK 0x00000001L
+#define UVD_LMI_PREF_CTRL__PREF_BUSY_STATUS_MASK 0x00000002L
+#define UVD_LMI_PREF_CTRL__PREF_WSTRB_MASK 0x00000004L
+#define UVD_LMI_PREF_CTRL__PREF_WRITE_SIZE_MASK 0x00000008L
+#define UVD_LMI_PREF_CTRL__PREF_STEP_SIZE_MASK 0x00000070L
+#define UVD_LMI_PREF_CTRL__PREF_SIZE_MASK 0xFFF80000L
+
+
+// addressBlock: uvd_uvd_jpeg0_jpegnpdec
+//UVD_JPEG_CNTL
+#define UVD_JPEG_CNTL__REQUEST_EN__SHIFT 0x1
+#define UVD_JPEG_CNTL__ERR_RST_EN__SHIFT 0x2
+#define UVD_JPEG_CNTL__DBG_MUX_SEL__SHIFT 0x8
+#define UVD_JPEG_CNTL__REQUEST_EN_MASK 0x00000002L
+#define UVD_JPEG_CNTL__ERR_RST_EN_MASK 0x00000004L
+#define UVD_JPEG_CNTL__DBG_MUX_SEL_MASK 0x00007F00L
+//UVD_JPEG_RB_BASE
+#define UVD_JPEG_RB_BASE__RB_BYTE_OFF__SHIFT 0x0
+#define UVD_JPEG_RB_BASE__RB_BASE__SHIFT 0x6
+#define UVD_JPEG_RB_BASE__RB_BYTE_OFF_MASK 0x0000003FL
+#define UVD_JPEG_RB_BASE__RB_BASE_MASK 0xFFFFFFC0L
+//UVD_JPEG_RB_WPTR
+#define UVD_JPEG_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JPEG_RB_WPTR__RB_WPTR_MASK 0x3FFFFFF0L
+//UVD_JPEG_RB_RPTR
+#define UVD_JPEG_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JPEG_RB_RPTR__RB_RPTR_MASK 0x3FFFFFF0L
+//UVD_JPEG_RB_SIZE
+#define UVD_JPEG_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JPEG_RB_SIZE__RB_SIZE_MASK 0x3FFFFFF0L
+//UVD_JPEG_DEC_CNT
+#define UVD_JPEG_DEC_CNT__DECODE_COUNT__SHIFT 0x0
+#define UVD_JPEG_DEC_CNT__DECODE_COUNT_MASK 0xFFFFFFFFL
+//UVD_JPEG_SPS_INFO
+#define UVD_JPEG_SPS_INFO__PIC_WIDTH__SHIFT 0x0
+#define UVD_JPEG_SPS_INFO__PIC_HEIGHT__SHIFT 0x10
+#define UVD_JPEG_SPS_INFO__PIC_WIDTH_MASK 0x0000FFFFL
+#define UVD_JPEG_SPS_INFO__PIC_HEIGHT_MASK 0xFFFF0000L
+//UVD_JPEG_SPS1_INFO
+#define UVD_JPEG_SPS1_INFO__CHROMA_FORMAT_IDC__SHIFT 0x0
+#define UVD_JPEG_SPS1_INFO__YUV422_SUBFORMAT__SHIFT 0x3
+#define UVD_JPEG_SPS1_INFO__OUT_FMT_422__SHIFT 0x4
+#define UVD_JPEG_SPS1_INFO__CHROMA_FORMAT_IDC_MASK 0x00000007L
+#define UVD_JPEG_SPS1_INFO__YUV422_SUBFORMAT_MASK 0x00000008L
+#define UVD_JPEG_SPS1_INFO__OUT_FMT_422_MASK 0x00000010L
+//UVD_JPEG_RE_TIMER
+#define UVD_JPEG_RE_TIMER__TIMER_OUT__SHIFT 0x0
+#define UVD_JPEG_RE_TIMER__TIMER_OUT_EN__SHIFT 0x10
+#define UVD_JPEG_RE_TIMER__TIMER_OUT_MASK 0x000000FFL
+#define UVD_JPEG_RE_TIMER__TIMER_OUT_EN_MASK 0x00010000L
+//UVD_JPEG_DEC_SCRATCH0
+#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+//UVD_JPEG_INT_EN
+#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN__SHIFT 0x0
+#define UVD_JPEG_INT_EN__JOB_AVAIL_EN__SHIFT 0x1
+#define UVD_JPEG_INT_EN__FENCE_VAL_EN__SHIFT 0x2
+#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN__SHIFT 0x6
+#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN__SHIFT 0x7
+#define UVD_JPEG_INT_EN__EOI_ERR_EN__SHIFT 0x8
+#define UVD_JPEG_INT_EN__HFM_ERR_EN__SHIFT 0x9
+#define UVD_JPEG_INT_EN__RST_ERR_EN__SHIFT 0xa
+#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN__SHIFT 0xb
+#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN__SHIFT 0xc
+#define UVD_JPEG_INT_EN__MARKER_ERR_EN__SHIFT 0xd
+#define UVD_JPEG_INT_EN__FMT_ERR_EN__SHIFT 0xe
+#define UVD_JPEG_INT_EN__PROFILE_ERR_EN__SHIFT 0xf
+#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN_MASK 0x00000001L
+#define UVD_JPEG_INT_EN__JOB_AVAIL_EN_MASK 0x00000002L
+#define UVD_JPEG_INT_EN__FENCE_VAL_EN_MASK 0x00000004L
+#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN_MASK 0x00000040L
+#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN_MASK 0x00000080L
+#define UVD_JPEG_INT_EN__EOI_ERR_EN_MASK 0x00000100L
+#define UVD_JPEG_INT_EN__HFM_ERR_EN_MASK 0x00000200L
+#define UVD_JPEG_INT_EN__RST_ERR_EN_MASK 0x00000400L
+#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN_MASK 0x00000800L
+#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN_MASK 0x00001000L
+#define UVD_JPEG_INT_EN__MARKER_ERR_EN_MASK 0x00002000L
+#define UVD_JPEG_INT_EN__FMT_ERR_EN_MASK 0x00004000L
+#define UVD_JPEG_INT_EN__PROFILE_ERR_EN_MASK 0x00008000L
+//UVD_JPEG_INT_STAT
+#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT__SHIFT 0x0
+#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT__SHIFT 0x1
+#define UVD_JPEG_INT_STAT__FENCE_VAL_INT__SHIFT 0x2
+#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT__SHIFT 0x6
+#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT__SHIFT 0x7
+#define UVD_JPEG_INT_STAT__EOI_ERR_INT__SHIFT 0x8
+#define UVD_JPEG_INT_STAT__HFM_ERR_INT__SHIFT 0x9
+#define UVD_JPEG_INT_STAT__RST_ERR_INT__SHIFT 0xa
+#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT__SHIFT 0xb
+#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT__SHIFT 0xc
+#define UVD_JPEG_INT_STAT__MARKER_ERR_INT__SHIFT 0xd
+#define UVD_JPEG_INT_STAT__FMT_ERR_INT__SHIFT 0xe
+#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT__SHIFT 0xf
+#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT_MASK 0x00000001L
+#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT_MASK 0x00000002L
+#define UVD_JPEG_INT_STAT__FENCE_VAL_INT_MASK 0x00000004L
+#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT_MASK 0x00000040L
+#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT_MASK 0x00000080L
+#define UVD_JPEG_INT_STAT__EOI_ERR_INT_MASK 0x00000100L
+#define UVD_JPEG_INT_STAT__HFM_ERR_INT_MASK 0x00000200L
+#define UVD_JPEG_INT_STAT__RST_ERR_INT_MASK 0x00000400L
+#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT_MASK 0x00000800L
+#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT_MASK 0x00001000L
+#define UVD_JPEG_INT_STAT__MARKER_ERR_INT_MASK 0x00002000L
+#define UVD_JPEG_INT_STAT__FMT_ERR_INT_MASK 0x00004000L
+#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT_MASK 0x00008000L
+//UVD_JPEG_TIER_CNTL0
+#define UVD_JPEG_TIER_CNTL0__TIER_SEL__SHIFT 0x0
+#define UVD_JPEG_TIER_CNTL0__Y_COMP_ID__SHIFT 0x2
+#define UVD_JPEG_TIER_CNTL0__U_COMP_ID__SHIFT 0x4
+#define UVD_JPEG_TIER_CNTL0__V_COMP_ID__SHIFT 0x6
+#define UVD_JPEG_TIER_CNTL0__Y_H_SAMP_FAC__SHIFT 0x8
+#define UVD_JPEG_TIER_CNTL0__Y_V_SAMP_FAC__SHIFT 0xb
+#define UVD_JPEG_TIER_CNTL0__U_H_SAMP_FAC__SHIFT 0xe
+#define UVD_JPEG_TIER_CNTL0__U_V_SAMP_FAC__SHIFT 0x11
+#define UVD_JPEG_TIER_CNTL0__V_H_SAMP_FAC__SHIFT 0x14
+#define UVD_JPEG_TIER_CNTL0__V_V_SAMP_FAC__SHIFT 0x17
+#define UVD_JPEG_TIER_CNTL0__Y_TQ__SHIFT 0x1a
+#define UVD_JPEG_TIER_CNTL0__U_TQ__SHIFT 0x1c
+#define UVD_JPEG_TIER_CNTL0__V_TQ__SHIFT 0x1e
+#define UVD_JPEG_TIER_CNTL0__TIER_SEL_MASK 0x00000003L
+#define UVD_JPEG_TIER_CNTL0__Y_COMP_ID_MASK 0x0000000CL
+#define UVD_JPEG_TIER_CNTL0__U_COMP_ID_MASK 0x00000030L
+#define UVD_JPEG_TIER_CNTL0__V_COMP_ID_MASK 0x000000C0L
+#define UVD_JPEG_TIER_CNTL0__Y_H_SAMP_FAC_MASK 0x00000700L
+#define UVD_JPEG_TIER_CNTL0__Y_V_SAMP_FAC_MASK 0x00003800L
+#define UVD_JPEG_TIER_CNTL0__U_H_SAMP_FAC_MASK 0x0001C000L
+#define UVD_JPEG_TIER_CNTL0__U_V_SAMP_FAC_MASK 0x000E0000L
+#define UVD_JPEG_TIER_CNTL0__V_H_SAMP_FAC_MASK 0x00700000L
+#define UVD_JPEG_TIER_CNTL0__V_V_SAMP_FAC_MASK 0x03800000L
+#define UVD_JPEG_TIER_CNTL0__Y_TQ_MASK 0x0C000000L
+#define UVD_JPEG_TIER_CNTL0__U_TQ_MASK 0x30000000L
+#define UVD_JPEG_TIER_CNTL0__V_TQ_MASK 0xC0000000L
+//UVD_JPEG_TIER_CNTL1
+#define UVD_JPEG_TIER_CNTL1__SRC_WIDTH__SHIFT 0x0
+#define UVD_JPEG_TIER_CNTL1__SRC_HEIGHT__SHIFT 0x10
+#define UVD_JPEG_TIER_CNTL1__SRC_WIDTH_MASK 0x0000FFFFL
+#define UVD_JPEG_TIER_CNTL1__SRC_HEIGHT_MASK 0xFFFF0000L
+//UVD_JPEG_TIER_CNTL2
+#define UVD_JPEG_TIER_CNTL2__TBL_ECS_SEL__SHIFT 0x0
+#define UVD_JPEG_TIER_CNTL2__TBL_TYPE__SHIFT 0x1
+#define UVD_JPEG_TIER_CNTL2__TQ__SHIFT 0x2
+#define UVD_JPEG_TIER_CNTL2__TH__SHIFT 0x4
+#define UVD_JPEG_TIER_CNTL2__TC__SHIFT 0x6
+#define UVD_JPEG_TIER_CNTL2__TD__SHIFT 0x7
+#define UVD_JPEG_TIER_CNTL2__TA__SHIFT 0xa
+#define UVD_JPEG_TIER_CNTL2__TIER2_HTBL_CNTLEN__SHIFT 0xe
+#define UVD_JPEG_TIER_CNTL2__DRI_VAL__SHIFT 0x10
+#define UVD_JPEG_TIER_CNTL2__TBL_ECS_SEL_MASK 0x00000001L
+#define UVD_JPEG_TIER_CNTL2__TBL_TYPE_MASK 0x00000002L
+#define UVD_JPEG_TIER_CNTL2__TQ_MASK 0x0000000CL
+#define UVD_JPEG_TIER_CNTL2__TH_MASK 0x00000030L
+#define UVD_JPEG_TIER_CNTL2__TC_MASK 0x00000040L
+#define UVD_JPEG_TIER_CNTL2__TD_MASK 0x00000380L
+#define UVD_JPEG_TIER_CNTL2__TA_MASK 0x00001C00L
+#define UVD_JPEG_TIER_CNTL2__TIER2_HTBL_CNTLEN_MASK 0x00004000L
+#define UVD_JPEG_TIER_CNTL2__DRI_VAL_MASK 0xFFFF0000L
+//UVD_JPEG_TIER_STATUS
+#define UVD_JPEG_TIER_STATUS__BSI_FETCH_DONE__SHIFT 0x0
+#define UVD_JPEG_TIER_STATUS__DECODE_DONE__SHIFT 0x1
+#define UVD_JPEG_TIER_STATUS__BSI_FETCH_DONE_MASK 0x00000001L
+#define UVD_JPEG_TIER_STATUS__DECODE_DONE_MASK 0x00000002L
+
+
+// addressBlock: uvd_uvd_jpeg_sclk0_jpegnpsclkdec
+//UVD_JPEG_OUTBUF_CNTL
+#define UVD_JPEG_OUTBUF_CNTL__OUTBUF_CNT__SHIFT 0x0
+#define UVD_JPEG_OUTBUF_CNTL__HGT_ALIGN__SHIFT 0x2
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_DECODE_DONE_FIX__SHIFT 0x6
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_MAX_CNT__SHIFT 0x7
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_TIMER__SHIFT 0x9
+#define UVD_JPEG_OUTBUF_CNTL__DIS_OBUF_AVAIL_CHECK__SHIFT 0x10
+#define UVD_JPEG_OUTBUF_CNTL__OUTBUF_CNT_MASK 0x00000003L
+#define UVD_JPEG_OUTBUF_CNTL__HGT_ALIGN_MASK 0x00000004L
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_DECODE_DONE_FIX_MASK 0x00000040L
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_MAX_CNT_MASK 0x00000180L
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_TIMER_MASK 0x00001E00L
+#define UVD_JPEG_OUTBUF_CNTL__DIS_OBUF_AVAIL_CHECK_MASK 0x00010000L
+//UVD_JPEG_OUTBUF_WPTR
+#define UVD_JPEG_OUTBUF_WPTR__OUTBUF_WPTR__SHIFT 0x0
+#define UVD_JPEG_OUTBUF_WPTR__OUTBUF_WPTR_MASK 0xFFFFFFFFL
+//UVD_JPEG_OUTBUF_RPTR
+#define UVD_JPEG_OUTBUF_RPTR__OUTBUF_RPTR__SHIFT 0x0
+#define UVD_JPEG_OUTBUF_RPTR__OUTBUF_RPTR_MASK 0xFFFFFFFFL
+//UVD_JPEG_PITCH
+#define UVD_JPEG_PITCH__PITCH__SHIFT 0x0
+#define UVD_JPEG_PITCH__PITCH_MASK 0xFFFFFFFFL
+//UVD_JPEG_UV_PITCH
+#define UVD_JPEG_UV_PITCH__UV_PITCH__SHIFT 0x0
+#define UVD_JPEG_UV_PITCH__UV_PITCH_MASK 0xFFFFFFFFL
+//JPEG_DEC_Y_GFX8_TILING_SURFACE
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L
+//JPEG_DEC_UV_GFX8_TILING_SURFACE
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L
+//JPEG_DEC_GFX8_ADDR_CONFIG
+#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//JPEG_DEC_Y_GFX10_TILING_SURFACE
+#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0
+#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL
+//JPEG_DEC_UV_GFX10_TILING_SURFACE
+#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0
+#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL
+//JPEG_DEC_GFX10_ADDR_CONFIG
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//JPEG_DEC_ADDR_MODE
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y__SHIFT 0x0
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV__SHIFT 0x2
+#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL__SHIFT 0xc
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y_MASK 0x00000003L
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV_MASK 0x0000000CL
+#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL_MASK 0x00007000L
+//UVD_JPEG_OUTPUT_XY
+#define UVD_JPEG_OUTPUT_XY__OUTPUT_X__SHIFT 0x0
+#define UVD_JPEG_OUTPUT_XY__OUTPUT_Y__SHIFT 0x10
+#define UVD_JPEG_OUTPUT_XY__OUTPUT_X_MASK 0x00003FFFL
+#define UVD_JPEG_OUTPUT_XY__OUTPUT_Y_MASK 0x3FFF0000L
+//UVD_JPEG_GPCOM_CMD
+#define UVD_JPEG_GPCOM_CMD__CMD__SHIFT 0x1
+#define UVD_JPEG_GPCOM_CMD__CMD_MASK 0x0000000EL
+//UVD_JPEG_GPCOM_DATA0
+#define UVD_JPEG_GPCOM_DATA0__DATA0__SHIFT 0x0
+#define UVD_JPEG_GPCOM_DATA0__DATA0_MASK 0xFFFFFFFFL
+//UVD_JPEG_GPCOM_DATA1
+#define UVD_JPEG_GPCOM_DATA1__DATA1__SHIFT 0x0
+#define UVD_JPEG_GPCOM_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_JPEG_SCRATCH1
+#define UVD_JPEG_SCRATCH1__SCRATCH1__SHIFT 0x0
+#define UVD_JPEG_SCRATCH1__SCRATCH1_MASK 0xFFFFFFFFL
+//UVD_JPEG_DEC_SOFT_RST
+#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET__SHIFT 0x0
+#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS__SHIFT 0x10
+#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET_MASK 0x00000001L
+#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS_MASK 0x00010000L
+
+
+// addressBlock: uvd_uvd_jrbc0_uvd_jrbc_dec
+//UVD_JRBC_RB_WPTR
+#define UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_JRBC_RB_CNTL
+#define UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0
+#define UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1
+#define UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4
+#define UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L
+#define UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L
+#define UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L
+//UVD_JRBC_IB_SIZE
+#define UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC_URGENT_CNTL
+#define UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_JRBC_RB_REF_DATA
+#define UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC_RB_COND_RD_TIMER
+#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC_SOFT_RESET
+#define UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0
+#define UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L
+#define UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_JRBC_STATUS
+#define UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0
+#define UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2
+#define UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3
+#define UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4
+#define UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5
+#define UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6
+#define UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7
+#define UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8
+#define UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9
+#define UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa
+#define UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb
+#define UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc
+#define UVD_JRBC_STATUS__INT_EN__SHIFT 0x10
+#define UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11
+#define UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L
+#define UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L
+#define UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L
+#define UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L
+#define UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L
+#define UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L
+#define UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L
+#define UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L
+#define UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L
+#define UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L
+#define UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L
+#define UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L
+#define UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L
+#define UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L
+//UVD_JRBC_RB_RPTR
+#define UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_JRBC_RB_BUF_STATUS
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC_IB_BUF_STATUS
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC_IB_SIZE_UPDATE
+#define UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC_IB_COND_RD_TIMER
+#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC_IB_REF_DATA
+#define UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JPEG_PREEMPT_CMD
+#define UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0
+#define UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1
+#define UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2
+#define UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L
+#define UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L
+#define UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L
+//UVD_JPEG_PREEMPT_FENCE_DATA0
+#define UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0
+#define UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL
+//UVD_JPEG_PREEMPT_FENCE_DATA1
+#define UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0
+#define UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL
+//UVD_JRBC_RB_SIZE
+#define UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L
+//UVD_JRBC_SCRATCH0
+#define UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd_uvd_jmi0_uvd_jmi_dec
+//UVD_JPEG_DEC_PF_CTRL
+#define UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0
+#define UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1
+#define UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L
+#define UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L
+//UVD_LMI_JRBC_CTRL
+#define UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_LMI_JPEG_CTRL
+#define UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L
+//JPEG_LMI_DROP
+#define JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0
+#define JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1
+#define JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2
+#define JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3
+#define JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L
+#define JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L
+#define JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L
+#define JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L
+//UVD_LMI_JRBC_IB_VMID
+#define UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4
+#define UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL
+#define UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L
+#define UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_LMI_JRBC_RB_VMID
+#define UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4
+#define UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL
+#define UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L
+#define UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_LMI_JPEG_VMID
+#define UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0
+#define UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4
+#define UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8
+#define UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL
+#define UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L
+#define UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L
+//UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW
+#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH
+#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_PREEMPT_VMID
+#define UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0
+#define UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL
+//UVD_JMI_DEC_SWAP_CNTL
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa
+#define UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc
+#define UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe
+#define UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L
+#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L
+#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L
+#define UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L
+#define UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L
+#define UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L
+//UVD_JMI_ATOMIC_CNTL
+#define UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0
+#define UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1
+#define UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5
+#define UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6
+#define UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7
+#define UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb
+#define UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L
+#define UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL
+#define UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L
+#define UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L
+#define UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L
+#define UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L
+//UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW
+#define UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_READ_64BIT_BAR_LOW
+#define UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_READ_64BIT_BAR_HIGH
+#define UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW
+#define UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH
+#define UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW
+#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI_ATOMIC_CNTL2
+#define UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10
+#define UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18
+#define UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L
+#define UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L
+
+
+// addressBlock: uvd_uvd_jmi_common_dec
+//UVD_JADP_MCIF_URGENT_CTRL
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_WATERMARK__SHIFT 0x0
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_WATERMARK__SHIFT 0x6
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_RD_URGENT_TIMER__SHIFT 0xb
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_URGENT_PROG_STEP__SHIFT 0x11
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_URGENT_PROG_STEP__SHIFT 0x15
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_QOS_EN__SHIFT 0x19
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_QOS_EN__SHIFT 0x1a
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_WATERMARK_MASK 0x0000003FL
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_WATERMARK_MASK 0x000007C0L
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_RD_URGENT_TIMER_MASK 0x0001F800L
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_URGENT_PROG_STEP_MASK 0x001E0000L
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_URGENT_PROG_STEP_MASK 0x01E00000L
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_QOS_EN_MASK 0x02000000L
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_QOS_EN_MASK 0x04000000L
+//UVD_JMI_URGENT_CTRL
+#define UVD_JMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL__SHIFT 0x0
+#define UVD_JMI_URGENT_CTRL__ASSERT_MC_RD_URGENT__SHIFT 0x4
+#define UVD_JMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL__SHIFT 0x10
+#define UVD_JMI_URGENT_CTRL__ASSERT_MC_WR_URGENT__SHIFT 0x14
+#define UVD_JMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL_MASK 0x00000001L
+#define UVD_JMI_URGENT_CTRL__ASSERT_MC_RD_URGENT_MASK 0x000000F0L
+#define UVD_JMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL_MASK 0x00010000L
+#define UVD_JMI_URGENT_CTRL__ASSERT_MC_WR_URGENT_MASK 0x00F00000L
+//UVD_JMI_CTRL
+#define UVD_JMI_CTRL__STALL_MC_ARB__SHIFT 0x0
+#define UVD_JMI_CTRL__MASK_MC_URGENT__SHIFT 0x1
+#define UVD_JMI_CTRL__ASSERT_MC_URGENT__SHIFT 0x2
+#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER__SHIFT 0x8
+#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER__SHIFT 0x10
+#define UVD_JMI_CTRL__STALL_MC_ARB_MASK 0x00000001L
+#define UVD_JMI_CTRL__MASK_MC_URGENT_MASK 0x00000002L
+#define UVD_JMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000004L
+#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER_MASK 0x0000FF00L
+#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER_MASK 0x00FF0000L
+//JPEG_MEMCHECK_CLAMPING_CNTL
+#define JPEG_MEMCHECK_CLAMPING_CNTL__CLAMP_TO_SAFE_ADDR_EN__SHIFT 0x0
+#define JPEG_MEMCHECK_CLAMPING_CNTL__CLAMP_TO_SAFE_ADDR_EN_MASK 0x00000001L
+//JPEG_MEMCHECK_SAFE_ADDR
+#define JPEG_MEMCHECK_SAFE_ADDR__MEMCHECK_SAFE_ADDR__SHIFT 0x0
+#define JPEG_MEMCHECK_SAFE_ADDR__MEMCHECK_SAFE_ADDR_MASK 0xFFFFFFFFL
+//JPEG_MEMCHECK_SAFE_ADDR_64BIT
+#define JPEG_MEMCHECK_SAFE_ADDR_64BIT__MEMCHECK_SAFE_ADDR_64BIT__SHIFT 0x0
+#define JPEG_MEMCHECK_SAFE_ADDR_64BIT__MEMCHECK_SAFE_ADDR_64BIT_MASK 0xFFFFFFFFL
+//UVD_JMI_LAT_CTRL
+#define UVD_JMI_LAT_CTRL__SCALE__SHIFT 0x0
+#define UVD_JMI_LAT_CTRL__MAX_START__SHIFT 0x8
+#define UVD_JMI_LAT_CTRL__MIN_START__SHIFT 0x9
+#define UVD_JMI_LAT_CTRL__AVG_START__SHIFT 0xa
+#define UVD_JMI_LAT_CTRL__PERFMON_SYNC__SHIFT 0xb
+#define UVD_JMI_LAT_CTRL__SKIP__SHIFT 0x10
+#define UVD_JMI_LAT_CTRL__SCALE_MASK 0x000000FFL
+#define UVD_JMI_LAT_CTRL__MAX_START_MASK 0x00000100L
+#define UVD_JMI_LAT_CTRL__MIN_START_MASK 0x00000200L
+#define UVD_JMI_LAT_CTRL__AVG_START_MASK 0x00000400L
+#define UVD_JMI_LAT_CTRL__PERFMON_SYNC_MASK 0x00000800L
+#define UVD_JMI_LAT_CTRL__SKIP_MASK 0x000F0000L
+//UVD_JMI_LAT_CNTR
+#define UVD_JMI_LAT_CNTR__MAX_LAT__SHIFT 0x0
+#define UVD_JMI_LAT_CNTR__MIN_LAT__SHIFT 0x8
+#define UVD_JMI_LAT_CNTR__MAX_LAT_MASK 0x000000FFL
+#define UVD_JMI_LAT_CNTR__MIN_LAT_MASK 0x0000FF00L
+//UVD_JMI_AVG_LAT_CNTR
+#define UVD_JMI_AVG_LAT_CNTR__ENV_LOW__SHIFT 0x0
+#define UVD_JMI_AVG_LAT_CNTR__ENV_HIGH__SHIFT 0x8
+#define UVD_JMI_AVG_LAT_CNTR__ENV_HIT__SHIFT 0x10
+#define UVD_JMI_AVG_LAT_CNTR__ENV_LOW_MASK 0x000000FFL
+#define UVD_JMI_AVG_LAT_CNTR__ENV_HIGH_MASK 0x0000FF00L
+#define UVD_JMI_AVG_LAT_CNTR__ENV_HIT_MASK 0xFFFF0000L
+//UVD_JMI_PERFMON_CTRL
+#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0
+#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8
+#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L
+#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00001F00L
+//UVD_JMI_PERFMON_COUNT_LO
+#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0
+#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL
+//UVD_JMI_PERFMON_COUNT_HI
+#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0
+#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL
+//UVD_JMI_CLEAN_STATUS
+#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN__SHIFT 0x0
+#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_RAW__SHIFT 0x1
+#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN__SHIFT 0x2
+#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_RAW__SHIFT 0x3
+#define UVD_JMI_CLEAN_STATUS__MC_WRITE_PENDING__SHIFT 0x4
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_READ_CLEAN__SHIFT 0x8
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_WRITE_CLEAN__SHIFT 0x10
+#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_MASK 0x00000001L
+#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_RAW_MASK 0x00000002L
+#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_MASK 0x00000004L
+#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_RAW_MASK 0x00000008L
+#define UVD_JMI_CLEAN_STATUS__MC_WRITE_PENDING_MASK 0x00000010L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_READ_CLEAN_MASK 0x00000100L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_WRITE_CLEAN_MASK 0x00010000L
+//UVD_JMI_CNTL
+#define UVD_JMI_CNTL__SOFT_RESET__SHIFT 0x0
+#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX__SHIFT 0x8
+#define UVD_JMI_CNTL__SOFT_RESET_MASK 0x00000001L
+#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX_MASK 0x0003FF00L
+
+
+// addressBlock: uvd_uvd_jpeg_common_dec
+//JPEG_SOFT_RESET_STATUS
+#define JPEG_SOFT_RESET_STATUS__JPEG0_DEC_RESET_STATUS__SHIFT 0x0
+#define JPEG_SOFT_RESET_STATUS__DJRBC0_RESET_STATUS__SHIFT 0x8
+#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS__SHIFT 0x11
+#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS__SHIFT 0x12
+#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS__SHIFT 0x18
+#define JPEG_SOFT_RESET_STATUS__JPEG0_DEC_RESET_STATUS_MASK 0x00000001L
+#define JPEG_SOFT_RESET_STATUS__DJRBC0_RESET_STATUS_MASK 0x00000100L
+#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS_MASK 0x00020000L
+#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS_MASK 0x00040000L
+#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS_MASK 0x01000000L
+//JPEG_SYS_INT_EN
+#define JPEG_SYS_INT_EN__DJPEG0_CORE__SHIFT 0x0
+#define JPEG_SYS_INT_EN__DJRBC0__SHIFT 0x8
+#define JPEG_SYS_INT_EN__DJPEG0_PF_RPT__SHIFT 0x10
+#define JPEG_SYS_INT_EN__DJPEG0_RAS_CNTL__SHIFT 0x18
+#define JPEG_SYS_INT_EN__DJPEG0_CORE_MASK 0x00000001L
+#define JPEG_SYS_INT_EN__DJRBC0_MASK 0x00000100L
+#define JPEG_SYS_INT_EN__DJPEG0_PF_RPT_MASK 0x00010000L
+#define JPEG_SYS_INT_EN__DJPEG0_RAS_CNTL_MASK 0x01000000L
+//JPEG_SYS_INT_EN1
+#define JPEG_SYS_INT_EN1__EJPEG_PF_RPT__SHIFT 0x0
+#define JPEG_SYS_INT_EN1__EJPEG_CORE__SHIFT 0x1
+#define JPEG_SYS_INT_EN1__EJRBC__SHIFT 0x2
+#define JPEG_SYS_INT_EN1__EJPEG_RAS_CNTL__SHIFT 0x3
+#define JPEG_SYS_INT_EN1__EJPEG_PF_RPT_MASK 0x00000001L
+#define JPEG_SYS_INT_EN1__EJPEG_CORE_MASK 0x00000002L
+#define JPEG_SYS_INT_EN1__EJRBC_MASK 0x00000004L
+#define JPEG_SYS_INT_EN1__EJPEG_RAS_CNTL_MASK 0x00000008L
+//JPEG_SYS_INT_STATUS
+#define JPEG_SYS_INT_STATUS__DJPEG0_CORE__SHIFT 0x0
+#define JPEG_SYS_INT_STATUS__DJRBC0__SHIFT 0x8
+#define JPEG_SYS_INT_STATUS__DJPEG0_PF_RPT__SHIFT 0x10
+#define JPEG_SYS_INT_STATUS__DJPEG0_RAS_CNTL__SHIFT 0x18
+#define JPEG_SYS_INT_STATUS__DJPEG0_CORE_MASK 0x00000001L
+#define JPEG_SYS_INT_STATUS__DJRBC0_MASK 0x00000100L
+#define JPEG_SYS_INT_STATUS__DJPEG0_PF_RPT_MASK 0x00010000L
+#define JPEG_SYS_INT_STATUS__DJPEG0_RAS_CNTL_MASK 0x01000000L
+//JPEG_SYS_INT_STATUS1
+#define JPEG_SYS_INT_STATUS1__EJPEG_PF_RPT__SHIFT 0x0
+#define JPEG_SYS_INT_STATUS1__EJPEG_CORE__SHIFT 0x1
+#define JPEG_SYS_INT_STATUS1__EJRBC__SHIFT 0x2
+#define JPEG_SYS_INT_STATUS1__EJPEG_RAS_CNTL__SHIFT 0x3
+#define JPEG_SYS_INT_STATUS1__EJPEG_PF_RPT_MASK 0x00000001L
+#define JPEG_SYS_INT_STATUS1__EJPEG_CORE_MASK 0x00000002L
+#define JPEG_SYS_INT_STATUS1__EJRBC_MASK 0x00000004L
+#define JPEG_SYS_INT_STATUS1__EJPEG_RAS_CNTL_MASK 0x00000008L
+//JPEG_SYS_INT_ACK
+#define JPEG_SYS_INT_ACK__DJPEG0_CORE__SHIFT 0x0
+#define JPEG_SYS_INT_ACK__DJRBC0__SHIFT 0x8
+#define JPEG_SYS_INT_ACK__DJPEG0_PF_RPT__SHIFT 0x10
+#define JPEG_SYS_INT_ACK__DJPEG0_RAS_CNTL__SHIFT 0x18
+#define JPEG_SYS_INT_ACK__DJPEG0_CORE_MASK 0x00000001L
+#define JPEG_SYS_INT_ACK__DJRBC0_MASK 0x00000100L
+#define JPEG_SYS_INT_ACK__DJPEG0_PF_RPT_MASK 0x00010000L
+#define JPEG_SYS_INT_ACK__DJPEG0_RAS_CNTL_MASK 0x01000000L
+//JPEG_SYS_INT_ACK1
+#define JPEG_SYS_INT_ACK1__EJPEG_PF_RPT__SHIFT 0x0
+#define JPEG_SYS_INT_ACK1__EJPEG_CORE__SHIFT 0x1
+#define JPEG_SYS_INT_ACK1__EJRBC__SHIFT 0x2
+#define JPEG_SYS_INT_ACK1__EJPEG_RAS_CNTL__SHIFT 0x3
+#define JPEG_SYS_INT_ACK1__EJPEG_PF_RPT_MASK 0x00000001L
+#define JPEG_SYS_INT_ACK1__EJPEG_CORE_MASK 0x00000002L
+#define JPEG_SYS_INT_ACK1__EJRBC_MASK 0x00000004L
+#define JPEG_SYS_INT_ACK1__EJPEG_RAS_CNTL_MASK 0x00000008L
+//JPEG_MEMCHECK_SYS_INT_EN
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_RD_ERR_EN__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH0_RD_ERR_EN__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_WR_ERR_EN__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF0_WR_ERR_EN__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_RD_ERR_EN_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH0_RD_ERR_EN_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_WR_ERR_EN_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF0_WR_ERR_EN_MASK 0x01000000L
+//JPEG_MEMCHECK_SYS_INT_EN1
+#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_RD_ERR_EN__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_EN1__PELFETCH_RD_ERR_EN__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_RD_ERR_EN__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_WR_ERR_EN__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_EN1__BS_WR_ERR_EN__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_WR_ERR_EN__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_RD_ERR_EN_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_EN1__PELFETCH_RD_ERR_EN_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_RD_ERR_EN_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_WR_ERR_EN_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_EN1__BS_WR_ERR_EN_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_WR_ERR_EN_MASK 0x00000020L
+//JPEG_MEMCHECK_SYS_INT_STAT
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_LO_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_HI_ERR__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_LO_ERR__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_LO_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_HI_ERR_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_LO_ERR_MASK 0x01000000L
+//JPEG_MEMCHECK_SYS_INT_STAT1
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_LO_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_HI_ERR__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_LO_ERR__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_LO_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_HI_ERR_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_LO_ERR_MASK 0x01000000L
+//JPEG_MEMCHECK_SYS_INT_STAT2
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_LO_ERR__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_HI_ERR__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_LO_ERR__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_HI_ERR__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_LO_ERR__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_HI_ERR__SHIFT 0x6
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_LO_ERR__SHIFT 0x7
+#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_HI_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_LO_ERR__SHIFT 0x9
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_HI_ERR__SHIFT 0xa
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_LO_ERR__SHIFT 0xb
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_LO_ERR_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_HI_ERR_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_LO_ERR_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_HI_ERR_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_LO_ERR_MASK 0x00000020L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_HI_ERR_MASK 0x00000040L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_LO_ERR_MASK 0x00000080L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_HI_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_LO_ERR_MASK 0x00000200L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_HI_ERR_MASK 0x00000400L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_LO_ERR_MASK 0x00000800L
+//JPEG_MEMCHECK_SYS_INT_ACK
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_LO_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_HI_ERR__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_LO_ERR__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_LO_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_HI_ERR_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_LO_ERR_MASK 0x01000000L
+//JPEG_MEMCHECK_SYS_INT_ACK1
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_LO_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_HI_ERR__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_LO_ERR__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_LO_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_HI_ERR_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_LO_ERR_MASK 0x01000000L
+//JPEG_MEMCHECK_SYS_INT_ACK2
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_LO_ERR__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_HI_ERR__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_LO_ERR__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_HI_ERR__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_LO_ERR__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_HI_ERR__SHIFT 0x6
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_LO_ERR__SHIFT 0x7
+#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_HI_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_LO_ERR__SHIFT 0x9
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_HI_ERR__SHIFT 0xa
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_LO_ERR__SHIFT 0xb
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_LO_ERR_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_HI_ERR_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_LO_ERR_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_HI_ERR_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_LO_ERR_MASK 0x00000020L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_HI_ERR_MASK 0x00000040L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_LO_ERR_MASK 0x00000080L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_HI_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_LO_ERR_MASK 0x00000200L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_HI_ERR_MASK 0x00000400L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_LO_ERR_MASK 0x00000800L
+//JPEG_MASTINT_EN
+#define JPEG_MASTINT_EN__OVERRUN_RST__SHIFT 0x0
+#define JPEG_MASTINT_EN__INT_OVERRUN__SHIFT 0x4
+#define JPEG_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L
+#define JPEG_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L
+//JPEG_IH_CTRL
+#define JPEG_IH_CTRL__IH_SOFT_RESET__SHIFT 0x0
+#define JPEG_IH_CTRL__IH_STALL_EN__SHIFT 0x1
+#define JPEG_IH_CTRL__IH_STATUS_CLEAN__SHIFT 0x2
+#define JPEG_IH_CTRL__IH_VMID__SHIFT 0x3
+#define JPEG_IH_CTRL__IH_USER_DATA__SHIFT 0x7
+#define JPEG_IH_CTRL__IH_RINGID__SHIFT 0x13
+#define JPEG_IH_CTRL__IH_SOFT_RESET_MASK 0x00000001L
+#define JPEG_IH_CTRL__IH_STALL_EN_MASK 0x00000002L
+#define JPEG_IH_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L
+#define JPEG_IH_CTRL__IH_VMID_MASK 0x00000078L
+#define JPEG_IH_CTRL__IH_USER_DATA_MASK 0x0007FF80L
+#define JPEG_IH_CTRL__IH_RINGID_MASK 0x07F80000L
+//JRBBM_ARB_CTRL
+#define JRBBM_ARB_CTRL__SRBM_DROP__SHIFT 0x0
+#define JRBBM_ARB_CTRL__EJRBC_DROP__SHIFT 0x1
+#define JRBBM_ARB_CTRL__DJRBC0_DROP__SHIFT 0x2
+#define JRBBM_ARB_CTRL__SRBM_DROP_MASK 0x00000001L
+#define JRBBM_ARB_CTRL__EJRBC_DROP_MASK 0x00000002L
+#define JRBBM_ARB_CTRL__DJRBC0_DROP_MASK 0x00000004L
+
+
+// addressBlock: uvd_uvd_jpeg_common_sclk_dec
+//JPEG_CGC_GATE
+#define JPEG_CGC_GATE__JPEG0_DEC__SHIFT 0x0
+#define JPEG_CGC_GATE__JPEG_ENC__SHIFT 0x8
+#define JPEG_CGC_GATE__JMCIF__SHIFT 0x9
+#define JPEG_CGC_GATE__JRBBM__SHIFT 0xa
+#define JPEG_CGC_GATE__JPEG0_DEC_MASK 0x00000001L
+#define JPEG_CGC_GATE__JPEG_ENC_MASK 0x00000100L
+#define JPEG_CGC_GATE__JMCIF_MASK 0x00000200L
+#define JPEG_CGC_GATE__JRBBM_MASK 0x00000400L
+//JPEG_CGC_CTRL
+#define JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0
+#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x1
+#define JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x5
+#define JPEG_CGC_CTRL__JPEG0_DEC_MODE__SHIFT 0x10
+#define JPEG_CGC_CTRL__JPEG_ENC_MODE__SHIFT 0x18
+#define JPEG_CGC_CTRL__JMCIF_MODE__SHIFT 0x19
+#define JPEG_CGC_CTRL__JRBBM_MODE__SHIFT 0x1a
+#define JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L
+#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000001EL
+#define JPEG_CGC_CTRL__CLK_OFF_DELAY_MASK 0x00001FE0L
+#define JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK 0x00010000L
+#define JPEG_CGC_CTRL__JPEG_ENC_MODE_MASK 0x01000000L
+#define JPEG_CGC_CTRL__JMCIF_MODE_MASK 0x02000000L
+#define JPEG_CGC_CTRL__JRBBM_MODE_MASK 0x04000000L
+//JPEG_CGC_STATUS
+#define JPEG_CGC_STATUS__JPEG0_DEC_VCLK_ACTIVE__SHIFT 0x0
+#define JPEG_CGC_STATUS__JPEG0_DEC_SCLK_ACTIVE__SHIFT 0x1
+#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE__SHIFT 0x10
+#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE__SHIFT 0x11
+#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE__SHIFT 0x12
+#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE__SHIFT 0x13
+#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE__SHIFT 0x14
+#define JPEG_CGC_STATUS__JPEG0_DEC_VCLK_ACTIVE_MASK 0x00000001L
+#define JPEG_CGC_STATUS__JPEG0_DEC_SCLK_ACTIVE_MASK 0x00000002L
+#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE_MASK 0x00010000L
+#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE_MASK 0x00020000L
+#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE_MASK 0x00040000L
+#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE_MASK 0x00080000L
+#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE_MASK 0x00100000L
+//JPEG_COMN_CGC_MEM_CTRL
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN__SHIFT 0x0
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN__SHIFT 0x1
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN__SHIFT 0x2
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_SW_EN__SHIFT 0x3
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN_MASK 0x00000001L
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN_MASK 0x00000002L
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN_MASK 0x00000004L
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_SW_EN_MASK 0x00000008L
+//JPEG_DEC_CGC_MEM_CTRL
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_EN__SHIFT 0x0
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_DS_EN__SHIFT 0x1
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_SD_EN__SHIFT 0x2
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_SW_EN__SHIFT 0x3
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_EN_MASK 0x00000001L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_DS_EN_MASK 0x00000002L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_SD_EN_MASK 0x00000004L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_SW_EN_MASK 0x00000008L
+//JPEG_ENC_CGC_MEM_CTRL
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN__SHIFT 0x0
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN__SHIFT 0x1
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN__SHIFT 0x2
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_SW_EN__SHIFT 0x3
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN_MASK 0x00000001L
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN_MASK 0x00000002L
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN_MASK 0x00000004L
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_SW_EN_MASK 0x00000008L
+//JPEG_PERF_BANK_CONF
+#define JPEG_PERF_BANK_CONF__RESET__SHIFT 0x0
+#define JPEG_PERF_BANK_CONF__PEEK__SHIFT 0x8
+#define JPEG_PERF_BANK_CONF__CONCATENATE__SHIFT 0x10
+#define JPEG_PERF_BANK_CONF__CORE_SEL__SHIFT 0x15
+#define JPEG_PERF_BANK_CONF__RESET_MASK 0x0000000FL
+#define JPEG_PERF_BANK_CONF__PEEK_MASK 0x00000F00L
+#define JPEG_PERF_BANK_CONF__CONCATENATE_MASK 0x00030000L
+#define JPEG_PERF_BANK_CONF__CORE_SEL_MASK 0x00E00000L
+//JPEG_PERF_BANK_EVENT_SEL
+#define JPEG_PERF_BANK_EVENT_SEL__SEL0__SHIFT 0x0
+#define JPEG_PERF_BANK_EVENT_SEL__SEL1__SHIFT 0x8
+#define JPEG_PERF_BANK_EVENT_SEL__SEL2__SHIFT 0x10
+#define JPEG_PERF_BANK_EVENT_SEL__SEL3__SHIFT 0x18
+#define JPEG_PERF_BANK_EVENT_SEL__SEL0_MASK 0x000000FFL
+#define JPEG_PERF_BANK_EVENT_SEL__SEL1_MASK 0x0000FF00L
+#define JPEG_PERF_BANK_EVENT_SEL__SEL2_MASK 0x00FF0000L
+#define JPEG_PERF_BANK_EVENT_SEL__SEL3_MASK 0xFF000000L
+//JPEG_PERF_BANK_COUNT0
+#define JPEG_PERF_BANK_COUNT0__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT0__COUNT_MASK 0xFFFFFFFFL
+//JPEG_PERF_BANK_COUNT1
+#define JPEG_PERF_BANK_COUNT1__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT1__COUNT_MASK 0xFFFFFFFFL
+//JPEG_PERF_BANK_COUNT2
+#define JPEG_PERF_BANK_COUNT2__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT2__COUNT_MASK 0xFFFFFFFFL
+//JPEG_PERF_BANK_COUNT3
+#define JPEG_PERF_BANK_COUNT3__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT3__COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd_uvd_pg_dec
+//UVD_IPX_DLDO_CONFIG
+#define UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT 0x2
+#define UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT 0x4
+#define UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT 0x6
+#define UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT 0x8
+#define UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT 0xa
+#define UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT 0xc
+#define UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG_MASK 0x0000000CL
+#define UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG_MASK 0x00000030L
+#define UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG_MASK 0x000000C0L
+#define UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG_MASK 0x00000300L
+#define UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG_MASK 0x00000C00L
+#define UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG_MASK 0x00003000L
+//UVD_IPX_DLDO_STATUS
+#define UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS__SHIFT 0x1
+#define UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT 0x2
+#define UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT 0x3
+#define UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT 0x4
+#define UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT 0x5
+#define UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT 0x6
+#define UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS_MASK 0x00000002L
+#define UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK 0x00000004L
+#define UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK 0x00000008L
+#define UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK 0x00000010L
+#define UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK 0x00000020L
+#define UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK 0x00000040L
+//UVD_POWER_STATUS
+#define UVD_POWER_STATUS__UVD_POWER_STATUS__SHIFT 0x0
+#define UVD_POWER_STATUS__UVD_PG_MODE__SHIFT 0x2
+#define UVD_POWER_STATUS__UVD_CG_MODE__SHIFT 0x4
+#define UVD_POWER_STATUS__UVD_PG_EN__SHIFT 0x8
+#define UVD_POWER_STATUS__RBC_SNOOP_DIS__SHIFT 0x9
+#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS__SHIFT 0xb
+#define UVD_POWER_STATUS__STALL_DPG_POWER_UP__SHIFT 0x1f
+#define UVD_POWER_STATUS__UVD_POWER_STATUS_MASK 0x00000001L
+#define UVD_POWER_STATUS__UVD_PG_MODE_MASK 0x00000004L
+#define UVD_POWER_STATUS__UVD_CG_MODE_MASK 0x00000030L
+#define UVD_POWER_STATUS__UVD_PG_EN_MASK 0x00000100L
+#define UVD_POWER_STATUS__RBC_SNOOP_DIS_MASK 0x00000200L
+#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS_MASK 0x00000800L
+#define UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK 0x80000000L
+//UVD_JPEG_POWER_STATUS
+#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS__SHIFT 0x0
+#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE__SHIFT 0x4
+#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS__SHIFT 0x8
+#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS__SHIFT 0x9
+#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP__SHIFT 0x1f
+#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK 0x00000001L
+#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK 0x00000010L
+#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS_MASK 0x00000100L
+#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS_MASK 0x00000200L
+#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP_MASK 0x80000000L
+//UVD_MC_DJPEG_RD_SPACE
+#define UVD_MC_DJPEG_RD_SPACE__DJPEG_RD_SPACE__SHIFT 0x0
+#define UVD_MC_DJPEG_RD_SPACE__DJPEG_RD_SPACE_MASK 0x0003FFFFL
+//UVD_MC_DJPEG_WR_SPACE
+#define UVD_MC_DJPEG_WR_SPACE__DJPEG_WR_SPACE__SHIFT 0x0
+#define UVD_MC_DJPEG_WR_SPACE__DJPEG_WR_SPACE_MASK 0x0003FFFFL
+//UVD_PG_IND_INDEX
+#define UVD_PG_IND_INDEX__INDEX__SHIFT 0x0
+#define UVD_PG_IND_INDEX__INDEX_MASK 0x0000003FL
+//UVD_PG_IND_DATA
+#define UVD_PG_IND_DATA__DATA__SHIFT 0x0
+#define UVD_PG_IND_DATA__DATA_MASK 0xFFFFFFFFL
+//CC_UVD_HARVESTING
+#define CC_UVD_HARVESTING__MMSCH_DISABLE__SHIFT 0x0
+#define CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
+#define CC_UVD_HARVESTING__MMSCH_DISABLE_MASK 0x00000001L
+#define CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
+//UVD_DPG_LMA_CTL
+#define UVD_DPG_LMA_CTL__READ_WRITE__SHIFT 0x0
+#define UVD_DPG_LMA_CTL__MASK_EN__SHIFT 0x1
+#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT__SHIFT 0x2
+#define UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT 0x4
+#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT 0xe
+#define UVD_DPG_LMA_CTL__READ_WRITE_MASK 0x00000001L
+#define UVD_DPG_LMA_CTL__MASK_EN_MASK 0x00000002L
+#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT_MASK 0x00000004L
+#define UVD_DPG_LMA_CTL__SRAM_SEL_MASK 0x00000010L
+#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR_MASK 0xFFFFC000L
+//UVD_DPG_LMA_DATA
+#define UVD_DPG_LMA_DATA__LMA_DATA__SHIFT 0x0
+#define UVD_DPG_LMA_DATA__LMA_DATA_MASK 0xFFFFFFFFL
+//UVD_DPG_LMA_MASK
+#define UVD_DPG_LMA_MASK__LMA_MASK__SHIFT 0x0
+#define UVD_DPG_LMA_MASK__LMA_MASK_MASK 0xFFFFFFFFL
+//UVD_DPG_PAUSE
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ__SHIFT 0x0
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK__SHIFT 0x1
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ__SHIFT 0x2
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK__SHIFT 0x3
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK 0x00000001L
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK 0x00000002L
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK 0x00000004L
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK 0x00000008L
+//UVD_SCRATCH1
+#define UVD_SCRATCH1__SCRATCH1_DATA__SHIFT 0x0
+#define UVD_SCRATCH1__SCRATCH1_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH2
+#define UVD_SCRATCH2__SCRATCH2_DATA__SHIFT 0x0
+#define UVD_SCRATCH2__SCRATCH2_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH3
+#define UVD_SCRATCH3__SCRATCH3_DATA__SHIFT 0x0
+#define UVD_SCRATCH3__SCRATCH3_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH4
+#define UVD_SCRATCH4__SCRATCH4_DATA__SHIFT 0x0
+#define UVD_SCRATCH4__SCRATCH4_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH5
+#define UVD_SCRATCH5__SCRATCH5_DATA__SHIFT 0x0
+#define UVD_SCRATCH5__SCRATCH5_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH6
+#define UVD_SCRATCH6__SCRATCH6_DATA__SHIFT 0x0
+#define UVD_SCRATCH6__SCRATCH6_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH7
+#define UVD_SCRATCH7__SCRATCH7_DATA__SHIFT 0x0
+#define UVD_SCRATCH7__SCRATCH7_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH8
+#define UVD_SCRATCH8__SCRATCH8_DATA__SHIFT 0x0
+#define UVD_SCRATCH8__SCRATCH8_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH9
+#define UVD_SCRATCH9__SCRATCH9_DATA__SHIFT 0x0
+#define UVD_SCRATCH9__SCRATCH9_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH10
+#define UVD_SCRATCH10__SCRATCH10_DATA__SHIFT 0x0
+#define UVD_SCRATCH10__SCRATCH10_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH11
+#define UVD_SCRATCH11__SCRATCH11_DATA__SHIFT 0x0
+#define UVD_SCRATCH11__SCRATCH11_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH12
+#define UVD_SCRATCH12__SCRATCH12_DATA__SHIFT 0x0
+#define UVD_SCRATCH12__SCRATCH12_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH13
+#define UVD_SCRATCH13__SCRATCH13_DATA__SHIFT 0x0
+#define UVD_SCRATCH13__SCRATCH13_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH14
+#define UVD_SCRATCH14__SCRATCH14_DATA__SHIFT 0x0
+#define UVD_SCRATCH14__SCRATCH14_DATA_MASK 0xFFFFFFFFL
+//UVD_FREE_COUNTER_REG
+#define UVD_FREE_COUNTER_REG__FREE_COUNTER__SHIFT 0x0
+#define UVD_FREE_COUNTER_REG__FREE_COUNTER_MASK 0xFFFFFFFFL
+//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_DPG_VCPU_CACHE_OFFSET0
+#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0
+#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x01FFFFFFL
+//UVD_DPG_LMI_VCPU_CACHE_VMID
+#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0
+#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL
+//UVD_REG_FILTER_EN
+#define UVD_REG_FILTER_EN__UVD_REG_FILTER_EN__SHIFT 0x0
+#define UVD_REG_FILTER_EN__MMSCH_HI_PRIV__SHIFT 0x1
+#define UVD_REG_FILTER_EN__VIDEO_PRIV_EN__SHIFT 0x2
+#define UVD_REG_FILTER_EN__JPEG_PRIV_EN__SHIFT 0x3
+#define UVD_REG_FILTER_EN__UVD_REG_FILTER_EN_MASK 0x00000001L
+#define UVD_REG_FILTER_EN__MMSCH_HI_PRIV_MASK 0x00000002L
+#define UVD_REG_FILTER_EN__VIDEO_PRIV_EN_MASK 0x00000004L
+#define UVD_REG_FILTER_EN__JPEG_PRIV_EN_MASK 0x00000008L
+//UVD_SECURITY_REG_VIO_REPORT
+#define UVD_SECURITY_REG_VIO_REPORT__HOST_REG_VIO__SHIFT 0x0
+#define UVD_SECURITY_REG_VIO_REPORT__VCPU_REG_VIO__SHIFT 0x1
+#define UVD_SECURITY_REG_VIO_REPORT__VIDEO_REG_VIO__SHIFT 0x2
+#define UVD_SECURITY_REG_VIO_REPORT__DPG_REG_VIO__SHIFT 0x3
+#define UVD_SECURITY_REG_VIO_REPORT__JPEG_REG_VIO__SHIFT 0x4
+#define UVD_SECURITY_REG_VIO_REPORT__JDPG_REG_VIO__SHIFT 0x5
+#define UVD_SECURITY_REG_VIO_REPORT__HOST_REG_VIO_MASK 0x00000001L
+#define UVD_SECURITY_REG_VIO_REPORT__VCPU_REG_VIO_MASK 0x00000002L
+#define UVD_SECURITY_REG_VIO_REPORT__VIDEO_REG_VIO_MASK 0x00000004L
+#define UVD_SECURITY_REG_VIO_REPORT__DPG_REG_VIO_MASK 0x00000008L
+#define UVD_SECURITY_REG_VIO_REPORT__JPEG_REG_VIO_MASK 0x00000010L
+#define UVD_SECURITY_REG_VIO_REPORT__JDPG_REG_VIO_MASK 0x00000020L
+//UVD_FW_VERSION
+#define UVD_FW_VERSION__FW_VERSION__SHIFT 0x0
+#define UVD_FW_VERSION__FW_VERSION_MASK 0xFFFFFFFFL
+//UVD_PF_STATUS
+#define UVD_PF_STATUS__JPEG_PF_OCCURED__SHIFT 0x0
+#define UVD_PF_STATUS__NJ_PF_OCCURED__SHIFT 0x1
+#define UVD_PF_STATUS__ENCODER0_PF_OCCURED__SHIFT 0x2
+#define UVD_PF_STATUS__ENCODER1_PF_OCCURED__SHIFT 0x3
+#define UVD_PF_STATUS__ENCODER2_PF_OCCURED__SHIFT 0x4
+#define UVD_PF_STATUS__ENCODER3_PF_OCCURED__SHIFT 0x5
+#define UVD_PF_STATUS__ENCODER4_PF_OCCURED__SHIFT 0x6
+#define UVD_PF_STATUS__EJPEG_PF_OCCURED__SHIFT 0x7
+#define UVD_PF_STATUS__JPEG_PF_CLEAR__SHIFT 0x8
+#define UVD_PF_STATUS__NJ_PF_CLEAR__SHIFT 0x9
+#define UVD_PF_STATUS__ENCODER0_PF_CLEAR__SHIFT 0xa
+#define UVD_PF_STATUS__ENCODER1_PF_CLEAR__SHIFT 0xb
+#define UVD_PF_STATUS__ENCODER2_PF_CLEAR__SHIFT 0xc
+#define UVD_PF_STATUS__ENCODER3_PF_CLEAR__SHIFT 0xd
+#define UVD_PF_STATUS__ENCODER4_PF_CLEAR__SHIFT 0xe
+#define UVD_PF_STATUS__EJPEG_PF_CLEAR__SHIFT 0xf
+#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED__SHIFT 0x10
+#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED__SHIFT 0x11
+#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED__SHIFT 0x12
+#define UVD_PF_STATUS__JPEG2_PF_OCCURED__SHIFT 0x13
+#define UVD_PF_STATUS__DJ2_ATM_PF_OCCURED__SHIFT 0x14
+#define UVD_PF_STATUS__JPEG2_PF_CLEAR__SHIFT 0x15
+#define UVD_PF_STATUS__ENCODER5_PF_OCCURED__SHIFT 0x16
+#define UVD_PF_STATUS__ENCODER5_PF_CLEAR__SHIFT 0x17
+#define UVD_PF_STATUS__JPEG_PF_OCCURED_MASK 0x00000001L
+#define UVD_PF_STATUS__NJ_PF_OCCURED_MASK 0x00000002L
+#define UVD_PF_STATUS__ENCODER0_PF_OCCURED_MASK 0x00000004L
+#define UVD_PF_STATUS__ENCODER1_PF_OCCURED_MASK 0x00000008L
+#define UVD_PF_STATUS__ENCODER2_PF_OCCURED_MASK 0x00000010L
+#define UVD_PF_STATUS__ENCODER3_PF_OCCURED_MASK 0x00000020L
+#define UVD_PF_STATUS__ENCODER4_PF_OCCURED_MASK 0x00000040L
+#define UVD_PF_STATUS__EJPEG_PF_OCCURED_MASK 0x00000080L
+#define UVD_PF_STATUS__JPEG_PF_CLEAR_MASK 0x00000100L
+#define UVD_PF_STATUS__NJ_PF_CLEAR_MASK 0x00000200L
+#define UVD_PF_STATUS__ENCODER0_PF_CLEAR_MASK 0x00000400L
+#define UVD_PF_STATUS__ENCODER1_PF_CLEAR_MASK 0x00000800L
+#define UVD_PF_STATUS__ENCODER2_PF_CLEAR_MASK 0x00001000L
+#define UVD_PF_STATUS__ENCODER3_PF_CLEAR_MASK 0x00002000L
+#define UVD_PF_STATUS__ENCODER4_PF_CLEAR_MASK 0x00004000L
+#define UVD_PF_STATUS__EJPEG_PF_CLEAR_MASK 0x00008000L
+#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED_MASK 0x00010000L
+#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED_MASK 0x00020000L
+#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED_MASK 0x00040000L
+#define UVD_PF_STATUS__JPEG2_PF_OCCURED_MASK 0x00080000L
+#define UVD_PF_STATUS__DJ2_ATM_PF_OCCURED_MASK 0x00100000L
+#define UVD_PF_STATUS__JPEG2_PF_CLEAR_MASK 0x00200000L
+#define UVD_PF_STATUS__ENCODER5_PF_OCCURED_MASK 0x00400000L
+#define UVD_PF_STATUS__ENCODER5_PF_CLEAR_MASK 0x00800000L
+//UVD_DPG_CLK_EN_VCPU_REPORT
+#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN__SHIFT 0x0
+#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT__SHIFT 0x1
+#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN_MASK 0x00000001L
+#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT_MASK 0x000000FEL
+//CC_UVD_VCPU_ERR_DETECT_BOT_LO
+#define CC_UVD_VCPU_ERR_DETECT_BOT_LO__UVD_VCPU_ERR_DETECT_BOT_LO__SHIFT 0xc
+#define CC_UVD_VCPU_ERR_DETECT_BOT_LO__UVD_VCPU_ERR_DETECT_BOT_LO_MASK 0xFFFFF000L
+//CC_UVD_VCPU_ERR_DETECT_BOT_HI
+#define CC_UVD_VCPU_ERR_DETECT_BOT_HI__UVD_VCPU_ERR_DETECT_BOT_HI__SHIFT 0x0
+#define CC_UVD_VCPU_ERR_DETECT_BOT_HI__UVD_VCPU_ERR_DETECT_BOT_HI_MASK 0x0000FFFFL
+//CC_UVD_VCPU_ERR_DETECT_TOP_LO
+#define CC_UVD_VCPU_ERR_DETECT_TOP_LO__UVD_VCPU_ERR_DETECT_TOP_LO__SHIFT 0xc
+#define CC_UVD_VCPU_ERR_DETECT_TOP_LO__UVD_VCPU_ERR_DETECT_TOP_LO_MASK 0xFFFFF000L
+//CC_UVD_VCPU_ERR_DETECT_TOP_HI
+#define CC_UVD_VCPU_ERR_DETECT_TOP_HI__UVD_VCPU_ERR_DETECT_TOP_HI__SHIFT 0x0
+#define CC_UVD_VCPU_ERR_DETECT_TOP_HI__UVD_VCPU_ERR_DETECT_TOP_HI_MASK 0x0000FFFFL
+//CC_UVD_VCPU_ERR
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_STATUS__SHIFT 0x0
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_CLEAR__SHIFT 0x1
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_DETECT_EN__SHIFT 0x2
+#define CC_UVD_VCPU_ERR__UVD_TMZ_DBG_DIS__SHIFT 0x3
+#define CC_UVD_VCPU_ERR__RESET_ON_FAULT__SHIFT 0x4
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_STATUS_MASK 0x00000001L
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_CLEAR_MASK 0x00000002L
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_DETECT_EN_MASK 0x00000004L
+#define CC_UVD_VCPU_ERR__UVD_TMZ_DBG_DIS_MASK 0x00000008L
+#define CC_UVD_VCPU_ERR__RESET_ON_FAULT_MASK 0x00000010L
+//CC_UVD_VCPU_ERR_INST_ADDR_LO
+#define CC_UVD_VCPU_ERR_INST_ADDR_LO__UVD_VCPU_ERR_INST_ADDR_LO__SHIFT 0x0
+#define CC_UVD_VCPU_ERR_INST_ADDR_LO__UVD_VCPU_ERR_INST_ADDR_LO_MASK 0xFFFFFFFFL
+//CC_UVD_VCPU_ERR_INST_ADDR_HI
+#define CC_UVD_VCPU_ERR_INST_ADDR_HI__UVD_VCPU_ERR_INST_ADDR_HI__SHIFT 0x0
+#define CC_UVD_VCPU_ERR_INST_ADDR_HI__UVD_VCPU_ERR_INST_ADDR_HI_MASK 0x0000FFFFL
+//UVD_LMI_MMSCH_NC_SPACE
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC0_SPACE__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC1_SPACE__SHIFT 0x3
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC2_SPACE__SHIFT 0x6
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC3_SPACE__SHIFT 0x9
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC4_SPACE__SHIFT 0xc
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC5_SPACE__SHIFT 0xf
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC6_SPACE__SHIFT 0x12
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC7_SPACE__SHIFT 0x15
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC0_SPACE_MASK 0x00000007L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC1_SPACE_MASK 0x00000038L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC2_SPACE_MASK 0x000001C0L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC3_SPACE_MASK 0x00000E00L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC4_SPACE_MASK 0x00007000L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC5_SPACE_MASK 0x00038000L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC6_SPACE_MASK 0x001C0000L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC7_SPACE_MASK 0x00E00000L
+//UVD_LMI_ATOMIC_SPACE
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER0_SPACE__SHIFT 0x0
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER1_SPACE__SHIFT 0x3
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER2_SPACE__SHIFT 0x6
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER3_SPACE__SHIFT 0x9
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER0_SPACE_MASK 0x00000007L
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER1_SPACE_MASK 0x00000038L
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER2_SPACE_MASK 0x000001C0L
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER3_SPACE_MASK 0x00000E00L
+//UVD_GFX8_ADDR_CONFIG
+#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//UVD_GFX10_ADDR_CONFIG
+#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define UVD_GFX10_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define UVD_GFX10_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define UVD_GFX10_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define UVD_GFX10_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//UVD_GPCNT2_CNTL
+#define UVD_GPCNT2_CNTL__CLR__SHIFT 0x0
+#define UVD_GPCNT2_CNTL__START__SHIFT 0x1
+#define UVD_GPCNT2_CNTL__COUNTUP__SHIFT 0x2
+#define UVD_GPCNT2_CNTL__CLR_MASK 0x00000001L
+#define UVD_GPCNT2_CNTL__START_MASK 0x00000002L
+#define UVD_GPCNT2_CNTL__COUNTUP_MASK 0x00000004L
+//UVD_GPCNT2_TARGET_LOWER
+#define UVD_GPCNT2_TARGET_LOWER__TARGET__SHIFT 0x0
+#define UVD_GPCNT2_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL
+//UVD_GPCNT2_STATUS_LOWER
+#define UVD_GPCNT2_STATUS_LOWER__COUNT__SHIFT 0x0
+#define UVD_GPCNT2_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL
+//UVD_GPCNT2_TARGET_UPPER
+#define UVD_GPCNT2_TARGET_UPPER__TARGET__SHIFT 0x0
+#define UVD_GPCNT2_TARGET_UPPER__TARGET_MASK 0x0000FFFFL
+//UVD_GPCNT2_STATUS_UPPER
+#define UVD_GPCNT2_STATUS_UPPER__COUNT__SHIFT 0x0
+#define UVD_GPCNT2_STATUS_UPPER__COUNT_MASK 0x0000FFFFL
+//UVD_GPCNT3_CNTL
+#define UVD_GPCNT3_CNTL__CLR__SHIFT 0x0
+#define UVD_GPCNT3_CNTL__START__SHIFT 0x1
+#define UVD_GPCNT3_CNTL__COUNTUP__SHIFT 0x2
+#define UVD_GPCNT3_CNTL__FREQ__SHIFT 0x3
+#define UVD_GPCNT3_CNTL__DIV__SHIFT 0xa
+#define UVD_GPCNT3_CNTL__CLR_MASK 0x00000001L
+#define UVD_GPCNT3_CNTL__START_MASK 0x00000002L
+#define UVD_GPCNT3_CNTL__COUNTUP_MASK 0x00000004L
+#define UVD_GPCNT3_CNTL__FREQ_MASK 0x000003F8L
+#define UVD_GPCNT3_CNTL__DIV_MASK 0x0001FC00L
+//UVD_GPCNT3_TARGET_LOWER
+#define UVD_GPCNT3_TARGET_LOWER__TARGET__SHIFT 0x0
+#define UVD_GPCNT3_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL
+//UVD_GPCNT3_STATUS_LOWER
+#define UVD_GPCNT3_STATUS_LOWER__COUNT__SHIFT 0x0
+#define UVD_GPCNT3_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL
+//UVD_GPCNT3_TARGET_UPPER
+#define UVD_GPCNT3_TARGET_UPPER__TARGET__SHIFT 0x0
+#define UVD_GPCNT3_TARGET_UPPER__TARGET_MASK 0x0000FFFFL
+//UVD_GPCNT3_STATUS_UPPER
+#define UVD_GPCNT3_STATUS_UPPER__COUNT__SHIFT 0x0
+#define UVD_GPCNT3_STATUS_UPPER__COUNT_MASK 0x0000FFFFL
+//UVD_VCLK_DS_CNTL
+#define UVD_VCLK_DS_CNTL__VCLK_DS_EN__SHIFT 0x0
+#define UVD_VCLK_DS_CNTL__VCLK_DS_STATUS__SHIFT 0x4
+#define UVD_VCLK_DS_CNTL__VCLK_DS_HYSTERESIS_CNT__SHIFT 0x10
+#define UVD_VCLK_DS_CNTL__VCLK_DS_EN_MASK 0x00000001L
+#define UVD_VCLK_DS_CNTL__VCLK_DS_STATUS_MASK 0x00000010L
+#define UVD_VCLK_DS_CNTL__VCLK_DS_HYSTERESIS_CNT_MASK 0xFFFF0000L
+//UVD_DCLK_DS_CNTL
+#define UVD_DCLK_DS_CNTL__DCLK_DS_EN__SHIFT 0x0
+#define UVD_DCLK_DS_CNTL__DCLK_DS_STATUS__SHIFT 0x4
+#define UVD_DCLK_DS_CNTL__DCLK_DS_HYSTERESIS_CNT__SHIFT 0x10
+#define UVD_DCLK_DS_CNTL__DCLK_DS_EN_MASK 0x00000001L
+#define UVD_DCLK_DS_CNTL__DCLK_DS_STATUS_MASK 0x00000010L
+#define UVD_DCLK_DS_CNTL__DCLK_DS_HYSTERESIS_CNT_MASK 0xFFFF0000L
+//UVD_TSC_LOWER
+#define UVD_TSC_LOWER__COUNT__SHIFT 0x0
+#define UVD_TSC_LOWER__COUNT_MASK 0xFFFFFFFFL
+//UVD_TSC_UPPER
+#define UVD_TSC_UPPER__COUNT__SHIFT 0x0
+#define UVD_TSC_UPPER__COUNT_MASK 0x00FFFFFFL
+//VCN_FEATURES
+#define VCN_FEATURES__HAS_VIDEO_DEC__SHIFT 0x0
+#define VCN_FEATURES__HAS_VIDEO_ENC__SHIFT 0x1
+#define VCN_FEATURES__HAS_MJPEG_DEC__SHIFT 0x2
+#define VCN_FEATURES__HAS_MJPEG_ENC__SHIFT 0x3
+#define VCN_FEATURES__HAS_VIDEO_VIRT__SHIFT 0x4
+#define VCN_FEATURES__HAS_H264_LEGACY_DEC__SHIFT 0x5
+#define VCN_FEATURES__HAS_UDEC_DEC__SHIFT 0x6
+#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC__SHIFT 0x7
+#define VCN_FEATURES__HAS_SCLR_DEC__SHIFT 0x8
+#define VCN_FEATURES__HAS_VP9_DEC__SHIFT 0x9
+#define VCN_FEATURES__HAS_AV1_DEC__SHIFT 0xa
+#define VCN_FEATURES__HAS_EFC_ENC__SHIFT 0xb
+#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC__SHIFT 0xc
+#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC__SHIFT 0xd
+#define VCN_FEATURES__HAS_AV1_ENC__SHIFT 0xe
+#define VCN_FEATURES__INSTANCE_ID__SHIFT 0x1c
+#define VCN_FEATURES__HAS_VIDEO_DEC_MASK 0x00000001L
+#define VCN_FEATURES__HAS_VIDEO_ENC_MASK 0x00000002L
+#define VCN_FEATURES__HAS_MJPEG_DEC_MASK 0x00000004L
+#define VCN_FEATURES__HAS_MJPEG_ENC_MASK 0x00000008L
+#define VCN_FEATURES__HAS_VIDEO_VIRT_MASK 0x00000010L
+#define VCN_FEATURES__HAS_H264_LEGACY_DEC_MASK 0x00000020L
+#define VCN_FEATURES__HAS_UDEC_DEC_MASK 0x00000040L
+#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC_MASK 0x00000080L
+#define VCN_FEATURES__HAS_SCLR_DEC_MASK 0x00000100L
+#define VCN_FEATURES__HAS_VP9_DEC_MASK 0x00000200L
+#define VCN_FEATURES__HAS_AV1_DEC_MASK 0x00000400L
+#define VCN_FEATURES__HAS_EFC_ENC_MASK 0x00000800L
+#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC_MASK 0x00001000L
+#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC_MASK 0x00002000L
+#define VCN_FEATURES__HAS_AV1_ENC_MASK 0x00004000L
+#define VCN_FEATURES__INSTANCE_ID_MASK 0xF0000000L
+//UVD_GPUIOV_STATUS
+#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE__SHIFT 0x0
+#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE_MASK 0x00000001L
+//UVD_SCRATCH15
+#define UVD_SCRATCH15__SCRATCH15_DATA__SHIFT 0x0
+#define UVD_SCRATCH15__SCRATCH15_DATA_MASK 0xFFFFFFFFL
+//UVD_VERSION
+#define UVD_VERSION__VARIANT_TYPE__SHIFT 0x0
+#define UVD_VERSION__MINOR_VERSION__SHIFT 0x8
+#define UVD_VERSION__MAJOR_VERSION__SHIFT 0x10
+#define UVD_VERSION__INSTANCE_ID__SHIFT 0x1c
+#define UVD_VERSION__VARIANT_TYPE_MASK 0x000000FFL
+#define UVD_VERSION__MINOR_VERSION_MASK 0x0000FF00L
+#define UVD_VERSION__MAJOR_VERSION_MASK 0x0FFF0000L
+#define UVD_VERSION__INSTANCE_ID_MASK 0xF0000000L
+//VCN_UMSCH_CNTL
+#define VCN_UMSCH_CNTL__umsch_fw_en__SHIFT 0x0
+#define VCN_UMSCH_CNTL__umsch_fw_en_MASK 0x00000001L
+//VCN_JPEG_DB_CTRL
+#define VCN_JPEG_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_JPEG_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_JPEG_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_JPEG_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_JPEG_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_JPEG_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB1_DB_CTRL
+#define VCN_RB1_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB1_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB1_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB1_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB1_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB1_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB2_DB_CTRL
+#define VCN_RB2_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB2_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB2_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB2_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB2_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB2_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB3_DB_CTRL
+#define VCN_RB3_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB3_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB3_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB3_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB3_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB3_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB4_DB_CTRL
+#define VCN_RB4_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB4_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB4_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB4_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB4_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB4_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_UMSCH_RB_DB_CTRL
+#define VCN_UMSCH_RB_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_UMSCH_RB_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_UMSCH_RB_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_UMSCH_RB_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_UMSCH_RB_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_UMSCH_RB_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB_DB_CTRL
+#define VCN_RB_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_AGDB_CTRL0
+#define VCN_AGDB_CTRL0__OFFSET__SHIFT 0x2
+#define VCN_AGDB_CTRL0__EN__SHIFT 0x1e
+#define VCN_AGDB_CTRL0__HIT__SHIFT 0x1f
+#define VCN_AGDB_CTRL0__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_AGDB_CTRL0__EN_MASK 0x40000000L
+#define VCN_AGDB_CTRL0__HIT_MASK 0x80000000L
+//VCN_AGDB_CTRL1
+#define VCN_AGDB_CTRL1__OFFSET__SHIFT 0x2
+#define VCN_AGDB_CTRL1__EN__SHIFT 0x1e
+#define VCN_AGDB_CTRL1__HIT__SHIFT 0x1f
+#define VCN_AGDB_CTRL1__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_AGDB_CTRL1__EN_MASK 0x40000000L
+#define VCN_AGDB_CTRL1__HIT_MASK 0x80000000L
+//VCN_AGDB_CTRL2
+#define VCN_AGDB_CTRL2__OFFSET__SHIFT 0x2
+#define VCN_AGDB_CTRL2__EN__SHIFT 0x1e
+#define VCN_AGDB_CTRL2__HIT__SHIFT 0x1f
+#define VCN_AGDB_CTRL2__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_AGDB_CTRL2__EN_MASK 0x40000000L
+#define VCN_AGDB_CTRL2__HIT_MASK 0x80000000L
+//VCN_AGDB_CTRL3
+#define VCN_AGDB_CTRL3__OFFSET__SHIFT 0x2
+#define VCN_AGDB_CTRL3__EN__SHIFT 0x1e
+#define VCN_AGDB_CTRL3__HIT__SHIFT 0x1f
+#define VCN_AGDB_CTRL3__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_AGDB_CTRL3__EN_MASK 0x40000000L
+#define VCN_AGDB_CTRL3__HIT_MASK 0x80000000L
+//VCN_AGDB_CTRL4
+#define VCN_AGDB_CTRL4__OFFSET__SHIFT 0x2
+#define VCN_AGDB_CTRL4__EN__SHIFT 0x1e
+#define VCN_AGDB_CTRL4__HIT__SHIFT 0x1f
+#define VCN_AGDB_CTRL4__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_AGDB_CTRL4__EN_MASK 0x40000000L
+#define VCN_AGDB_CTRL4__HIT_MASK 0x80000000L
+//VCN_AGDB_CTRL5
+#define VCN_AGDB_CTRL5__OFFSET__SHIFT 0x2
+#define VCN_AGDB_CTRL5__EN__SHIFT 0x1e
+#define VCN_AGDB_CTRL5__HIT__SHIFT 0x1f
+#define VCN_AGDB_CTRL5__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_AGDB_CTRL5__EN_MASK 0x40000000L
+#define VCN_AGDB_CTRL5__HIT_MASK 0x80000000L
+//VCN_AGDB_MASK0
+#define VCN_AGDB_MASK0__MASK__SHIFT 0x2
+#define VCN_AGDB_MASK0__MASK_MASK 0x0FFFFFFCL
+//VCN_AGDB_MASK1
+#define VCN_AGDB_MASK1__MASK__SHIFT 0x2
+#define VCN_AGDB_MASK1__MASK_MASK 0x0FFFFFFCL
+//VCN_AGDB_MASK2
+#define VCN_AGDB_MASK2__MASK__SHIFT 0x2
+#define VCN_AGDB_MASK2__MASK_MASK 0x0FFFFFFCL
+//VCN_AGDB_MASK3
+#define VCN_AGDB_MASK3__MASK__SHIFT 0x2
+#define VCN_AGDB_MASK3__MASK_MASK 0x0FFFFFFCL
+//VCN_AGDB_MASK4
+#define VCN_AGDB_MASK4__MASK__SHIFT 0x2
+#define VCN_AGDB_MASK4__MASK_MASK 0x0FFFFFFCL
+//VCN_AGDB_MASK5
+#define VCN_AGDB_MASK5__MASK__SHIFT 0x2
+#define VCN_AGDB_MASK5__MASK_MASK 0x0FFFFFFCL
+//VCN_RB_ENABLE
+#define VCN_RB_ENABLE__RB_EN__SHIFT 0x0
+#define VCN_RB_ENABLE__JPEG_RB_EN__SHIFT 0x1
+#define VCN_RB_ENABLE__RB1_EN__SHIFT 0x2
+#define VCN_RB_ENABLE__RB2_EN__SHIFT 0x3
+#define VCN_RB_ENABLE__RB3_EN__SHIFT 0x4
+#define VCN_RB_ENABLE__RB4_EN__SHIFT 0x5
+#define VCN_RB_ENABLE__UMSCH_RB_EN__SHIFT 0x6
+#define VCN_RB_ENABLE__EJPEG_RB_EN__SHIFT 0x7
+#define VCN_RB_ENABLE__AUDIO_RB_EN__SHIFT 0x8
+#define VCN_RB_ENABLE__RB_EN_MASK 0x00000001L
+#define VCN_RB_ENABLE__JPEG_RB_EN_MASK 0x00000002L
+#define VCN_RB_ENABLE__RB1_EN_MASK 0x00000004L
+#define VCN_RB_ENABLE__RB2_EN_MASK 0x00000008L
+#define VCN_RB_ENABLE__RB3_EN_MASK 0x00000010L
+#define VCN_RB_ENABLE__RB4_EN_MASK 0x00000020L
+#define VCN_RB_ENABLE__UMSCH_RB_EN_MASK 0x00000040L
+#define VCN_RB_ENABLE__EJPEG_RB_EN_MASK 0x00000080L
+#define VCN_RB_ENABLE__AUDIO_RB_EN_MASK 0x00000100L
+//VCN_RB_WPTR_CTRL
+#define VCN_RB_WPTR_CTRL__RB_CS_EN__SHIFT 0x0
+#define VCN_RB_WPTR_CTRL__JPEG_CS_EN__SHIFT 0x1
+#define VCN_RB_WPTR_CTRL__RB1_CS_EN__SHIFT 0x2
+#define VCN_RB_WPTR_CTRL__RB2_CS_EN__SHIFT 0x3
+#define VCN_RB_WPTR_CTRL__RB3_CS_EN__SHIFT 0x4
+#define VCN_RB_WPTR_CTRL__RB4_CS_EN__SHIFT 0x5
+#define VCN_RB_WPTR_CTRL__UMSCH_RB_CS_EN__SHIFT 0x6
+#define VCN_RB_WPTR_CTRL__EJPEG_RB_CS_EN__SHIFT 0x7
+#define VCN_RB_WPTR_CTRL__AUDIO_RB_CS_EN__SHIFT 0x8
+#define VCN_RB_WPTR_CTRL__RB_CS_EN_MASK 0x00000001L
+#define VCN_RB_WPTR_CTRL__JPEG_CS_EN_MASK 0x00000002L
+#define VCN_RB_WPTR_CTRL__RB1_CS_EN_MASK 0x00000004L
+#define VCN_RB_WPTR_CTRL__RB2_CS_EN_MASK 0x00000008L
+#define VCN_RB_WPTR_CTRL__RB3_CS_EN_MASK 0x00000010L
+#define VCN_RB_WPTR_CTRL__RB4_CS_EN_MASK 0x00000020L
+#define VCN_RB_WPTR_CTRL__UMSCH_RB_CS_EN_MASK 0x00000040L
+#define VCN_RB_WPTR_CTRL__EJPEG_RB_CS_EN_MASK 0x00000080L
+#define VCN_RB_WPTR_CTRL__AUDIO_RB_CS_EN_MASK 0x00000100L
+//UVD_RB_RPTR
+#define UVD_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR
+#define UVD_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RB_RPTR2
+#define UVD_RB_RPTR2__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR2__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR2
+#define UVD_RB_WPTR2__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR2__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RB_RPTR3
+#define UVD_RB_RPTR3__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR3__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR3
+#define UVD_RB_WPTR3__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR3__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RB_RPTR4
+#define UVD_RB_RPTR4__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR4__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR4
+#define UVD_RB_WPTR4__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR4__RB_WPTR_MASK 0x007FFFF0L
+//UVD_OUT_RB_RPTR
+#define UVD_OUT_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_OUT_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_OUT_RB_WPTR
+#define UVD_OUT_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_OUT_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_AUDIO_RB_RPTR
+#define UVD_AUDIO_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_AUDIO_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_AUDIO_RB_WPTR
+#define UVD_AUDIO_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_AUDIO_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RBC_RB_RPTR
+#define UVD_RBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_RBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RBC_RB_WPTR
+#define UVD_RBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_RBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_DPG_LMA_CTL2
+#define UVD_DPG_LMA_CTL2__DIRECT_ACCESS_SRAM_SEL__SHIFT 0x0
+#define UVD_DPG_LMA_CTL2__FIFO_DIRECT_ACCESS_EN__SHIFT 0x1
+#define UVD_DPG_LMA_CTL2__VID_WRITE_PTR__SHIFT 0x2
+#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR__SHIFT 0x9
+#define UVD_DPG_LMA_CTL2__DIRECT_ACCESS_SRAM_SEL_MASK 0x00000001L
+#define UVD_DPG_LMA_CTL2__FIFO_DIRECT_ACCESS_EN_MASK 0x00000002L
+#define UVD_DPG_LMA_CTL2__VID_WRITE_PTR_MASK 0x000001FCL
+#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR_MASK 0x0000FE00L
+
+
+// addressBlock: uvd_vcn_umsch_dec
+//VCN_UMSCH_MES_CNTL
+#define VCN_UMSCH_MES_CNTL__PIPE_ID__SHIFT 0x0
+#define VCN_UMSCH_MES_CNTL__PerfPipeSel__SHIFT 0x2
+#define VCN_UMSCH_MES_CNTL__RamClkGatingDisable__SHIFT 0x4
+#define VCN_UMSCH_MES_CNTL__InterruptChickenBit__SHIFT 0x5
+#define VCN_UMSCH_MES_CNTL__CpTcOneCycleWrDis__SHIFT 0x6
+#define VCN_UMSCH_MES_CNTL__PIPE_ID_MASK 0x00000003L
+#define VCN_UMSCH_MES_CNTL__PerfPipeSel_MASK 0x0000000CL
+#define VCN_UMSCH_MES_CNTL__RamClkGatingDisable_MASK 0x00000010L
+#define VCN_UMSCH_MES_CNTL__InterruptChickenBit_MASK 0x00000020L
+#define VCN_UMSCH_MES_CNTL__CpTcOneCycleWrDis_MASK 0x00000040L
+//UMSCH_CTL
+#define UMSCH_CTL__P_RESET__SHIFT 0x0
+#define UMSCH_CTL__UTCL2_CLIENT_ID__SHIFT 0x1
+#define UMSCH_CTL__UMSCH_BUSY__SHIFT 0xa
+#define UMSCH_CTL__IllegalRegReadAckLatency__SHIFT 0xd
+#define UMSCH_CTL__P_RESET_MASK 0x00000001L
+#define UMSCH_CTL__UTCL2_CLIENT_ID_MASK 0x000003FEL
+#define UMSCH_CTL__UMSCH_BUSY_MASK 0x00000400L
+#define UMSCH_CTL__IllegalRegReadAckLatency_MASK 0x0000E000L
+//UMSCH_CTL2
+#define UMSCH_CTL2__Spare__SHIFT 0x0
+#define UMSCH_CTL2__Spare_MASK 0xFFFFFFFFL
+//VCN_UMSCH_AGDB_WPTR0
+#define VCN_UMSCH_AGDB_WPTR0__WPTR__SHIFT 0x4
+#define VCN_UMSCH_AGDB_WPTR0__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_AGDB_WPTR1
+#define VCN_UMSCH_AGDB_WPTR1__WPTR__SHIFT 0x4
+#define VCN_UMSCH_AGDB_WPTR1__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_AGDB_WPTR2
+#define VCN_UMSCH_AGDB_WPTR2__WPTR__SHIFT 0x4
+#define VCN_UMSCH_AGDB_WPTR2__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_AGDB_WPTR3
+#define VCN_UMSCH_AGDB_WPTR3__WPTR__SHIFT 0x4
+#define VCN_UMSCH_AGDB_WPTR3__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_AGDB_WPTR4
+#define VCN_UMSCH_AGDB_WPTR4__WPTR__SHIFT 0x4
+#define VCN_UMSCH_AGDB_WPTR4__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_AGDB_WPTR5
+#define VCN_UMSCH_AGDB_WPTR5__WPTR__SHIFT 0x4
+#define VCN_UMSCH_AGDB_WPTR5__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_MAILBOX0
+#define VCN_UMSCH_MAILBOX0__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX0__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX_RESP0
+#define VCN_UMSCH_MAILBOX_RESP0__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX_RESP0__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX1
+#define VCN_UMSCH_MAILBOX1__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX1__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX_RESP1
+#define VCN_UMSCH_MAILBOX_RESP1__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX_RESP1__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX2
+#define VCN_UMSCH_MAILBOX2__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX2__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX_RESP2
+#define VCN_UMSCH_MAILBOX_RESP2__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX_RESP2__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX3
+#define VCN_UMSCH_MAILBOX3__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX3__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MAILBOX_RESP3
+#define VCN_UMSCH_MAILBOX_RESP3__DATA__SHIFT 0x0
+#define VCN_UMSCH_MAILBOX_RESP3__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER0
+#define VCN_UMSCH_SPARE_REGISTER0__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER0__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER1
+#define VCN_UMSCH_SPARE_REGISTER1__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER1__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER2
+#define VCN_UMSCH_SPARE_REGISTER2__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER2__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER3
+#define VCN_UMSCH_SPARE_REGISTER3__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER3__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER4
+#define VCN_UMSCH_SPARE_REGISTER4__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER4__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER5
+#define VCN_UMSCH_SPARE_REGISTER5__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER5__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER6
+#define VCN_UMSCH_SPARE_REGISTER6__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER6__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_SPARE_REGISTER7
+#define VCN_UMSCH_SPARE_REGISTER7__DATA__SHIFT 0x0
+#define VCN_UMSCH_SPARE_REGISTER7__DATA_MASK 0xFFFFFFFFL
+//VCN_UMSCH_MES_UTCL1_CNTL
+#define VCN_UMSCH_MES_UTCL1_CNTL__REDO_LATENCY__SHIFT 0x0
+#define VCN_UMSCH_MES_UTCL1_CNTL__ForceSnoop__SHIFT 0x14
+#define VCN_UMSCH_MES_UTCL1_CNTL__FragLimitMode__SHIFT 0x15
+#define VCN_UMSCH_MES_UTCL1_CNTL__DropMode__SHIFT 0x16
+#define VCN_UMSCH_MES_UTCL1_CNTL__Invalidate__SHIFT 0x17
+#define VCN_UMSCH_MES_UTCL1_CNTL__REDO_LATENCY_MASK 0x000FFFFFL
+#define VCN_UMSCH_MES_UTCL1_CNTL__ForceSnoop_MASK 0x00100000L
+#define VCN_UMSCH_MES_UTCL1_CNTL__FragLimitMode_MASK 0x00200000L
+#define VCN_UMSCH_MES_UTCL1_CNTL__DropMode_MASK 0x00400000L
+#define VCN_UMSCH_MES_UTCL1_CNTL__Invalidate_MASK 0x00800000L
+//VCN_UMSCH_MES_BUSY
+#define VCN_UMSCH_MES_BUSY__MesScratchRamBusy__SHIFT 0x0
+#define VCN_UMSCH_MES_BUSY__MesInstrCacheBusy__SHIFT 0x1
+#define VCN_UMSCH_MES_BUSY__MesDataCacheBusy__SHIFT 0x2
+#define VCN_UMSCH_MES_BUSY__MesBusy__SHIFT 0x3
+#define VCN_UMSCH_MES_BUSY__MesLoadBusy__SHIFT 0x4
+#define VCN_UMSCH_MES_BUSY__MesMutexBusy__SHIFT 0x5
+#define VCN_UMSCH_MES_BUSY__MesThreadBusy__SHIFT 0x6
+#define VCN_UMSCH_MES_BUSY__MesMessageBusy__SHIFT 0x8
+#define VCN_UMSCH_MES_BUSY__MesTcBusy__SHIFT 0xa
+#define VCN_UMSCH_MES_BUSY__MesDmaPending__SHIFT 0xc
+#define VCN_UMSCH_MES_BUSY__MesScratchRamBusy_MASK 0x00000001L
+#define VCN_UMSCH_MES_BUSY__MesInstrCacheBusy_MASK 0x00000002L
+#define VCN_UMSCH_MES_BUSY__MesDataCacheBusy_MASK 0x00000004L
+#define VCN_UMSCH_MES_BUSY__MesBusy_MASK 0x00000008L
+#define VCN_UMSCH_MES_BUSY__MesLoadBusy_MASK 0x00000010L
+#define VCN_UMSCH_MES_BUSY__MesMutexBusy_MASK 0x00000020L
+#define VCN_UMSCH_MES_BUSY__MesThreadBusy_MASK 0x000000C0L
+#define VCN_UMSCH_MES_BUSY__MesMessageBusy_MASK 0x00000300L
+#define VCN_UMSCH_MES_BUSY__MesTcBusy_MASK 0x00000C00L
+#define VCN_UMSCH_MES_BUSY__MesDmaPending_MASK 0x00003000L
+//VCN_UMSCH_RB_BASE_LO
+#define VCN_UMSCH_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6
+#define VCN_UMSCH_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L
+//VCN_UMSCH_RB_BASE_HI
+#define VCN_UMSCH_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define VCN_UMSCH_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL
+//VCN_UMSCH_RB_SIZE
+#define VCN_UMSCH_RB_SIZE__WPTR__SHIFT 0x4
+#define VCN_UMSCH_RB_SIZE__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_RB_RPTR
+#define VCN_UMSCH_RB_RPTR__WPTR__SHIFT 0x4
+#define VCN_UMSCH_RB_RPTR__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_RB_WPTR
+#define VCN_UMSCH_RB_WPTR__WPTR__SHIFT 0x4
+#define VCN_UMSCH_RB_WPTR__WPTR_MASK 0x007FFFF0L
+//VCN_UMSCH_MASTINT_EN
+#define VCN_UMSCH_MASTINT_EN__OVERRUN_RST__SHIFT 0x0
+#define VCN_UMSCH_MASTINT_EN__SYS_EN__SHIFT 0x2
+#define VCN_UMSCH_MASTINT_EN__INT_OVERRUN__SHIFT 0x4
+#define VCN_UMSCH_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L
+#define VCN_UMSCH_MASTINT_EN__SYS_EN_MASK 0x00000004L
+#define VCN_UMSCH_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L
+//VCN_UMSCH_IH_CTRL
+#define VCN_UMSCH_IH_CTRL__IH_SOFT_RESET__SHIFT 0x0
+#define VCN_UMSCH_IH_CTRL__IH_STALL_EN__SHIFT 0x1
+#define VCN_UMSCH_IH_CTRL__IH_STATUS_CLEAN__SHIFT 0x2
+#define VCN_UMSCH_IH_CTRL__IH_VMID__SHIFT 0x3
+#define VCN_UMSCH_IH_CTRL__IH_USER_DATA__SHIFT 0x7
+#define VCN_UMSCH_IH_CTRL__IH_RINGID__SHIFT 0x13
+#define VCN_UMSCH_IH_CTRL__IH_SOFT_RESET_MASK 0x00000001L
+#define VCN_UMSCH_IH_CTRL__IH_STALL_EN_MASK 0x00000002L
+#define VCN_UMSCH_IH_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L
+#define VCN_UMSCH_IH_CTRL__IH_VMID_MASK 0x00000078L
+#define VCN_UMSCH_IH_CTRL__IH_USER_DATA_MASK 0x0007FF80L
+#define VCN_UMSCH_IH_CTRL__IH_RINGID_MASK 0x07F80000L
+//VCN_UMSCH_SYS_INT_EN
+#define VCN_UMSCH_SYS_INT_EN__INT0__SHIFT 0x0
+#define VCN_UMSCH_SYS_INT_EN__INT1__SHIFT 0x1
+#define VCN_UMSCH_SYS_INT_EN__INT2__SHIFT 0x2
+#define VCN_UMSCH_SYS_INT_EN__INT3__SHIFT 0x3
+#define VCN_UMSCH_SYS_INT_EN__INT4__SHIFT 0x4
+#define VCN_UMSCH_SYS_INT_EN__INT5__SHIFT 0x5
+#define VCN_UMSCH_SYS_INT_EN__INT6__SHIFT 0x6
+#define VCN_UMSCH_SYS_INT_EN__INT7__SHIFT 0x7
+#define VCN_UMSCH_SYS_INT_EN__INT0_MASK 0x00000001L
+#define VCN_UMSCH_SYS_INT_EN__INT1_MASK 0x00000002L
+#define VCN_UMSCH_SYS_INT_EN__INT2_MASK 0x00000004L
+#define VCN_UMSCH_SYS_INT_EN__INT3_MASK 0x00000008L
+#define VCN_UMSCH_SYS_INT_EN__INT4_MASK 0x00000010L
+#define VCN_UMSCH_SYS_INT_EN__INT5_MASK 0x00000020L
+#define VCN_UMSCH_SYS_INT_EN__INT6_MASK 0x00000040L
+#define VCN_UMSCH_SYS_INT_EN__INT7_MASK 0x00000080L
+//VCN_UMSCH_SYS_INT_STATUS
+#define VCN_UMSCH_SYS_INT_STATUS__INT0__SHIFT 0x0
+#define VCN_UMSCH_SYS_INT_STATUS__INT1__SHIFT 0x1
+#define VCN_UMSCH_SYS_INT_STATUS__INT2__SHIFT 0x2
+#define VCN_UMSCH_SYS_INT_STATUS__INT3__SHIFT 0x3
+#define VCN_UMSCH_SYS_INT_STATUS__INT4__SHIFT 0x4
+#define VCN_UMSCH_SYS_INT_STATUS__INT5__SHIFT 0x5
+#define VCN_UMSCH_SYS_INT_STATUS__INT6__SHIFT 0x6
+#define VCN_UMSCH_SYS_INT_STATUS__INT7__SHIFT 0x7
+#define VCN_UMSCH_SYS_INT_STATUS__INT0_MASK 0x00000001L
+#define VCN_UMSCH_SYS_INT_STATUS__INT1_MASK 0x00000002L
+#define VCN_UMSCH_SYS_INT_STATUS__INT2_MASK 0x00000004L
+#define VCN_UMSCH_SYS_INT_STATUS__INT3_MASK 0x00000008L
+#define VCN_UMSCH_SYS_INT_STATUS__INT4_MASK 0x00000010L
+#define VCN_UMSCH_SYS_INT_STATUS__INT5_MASK 0x00000020L
+#define VCN_UMSCH_SYS_INT_STATUS__INT6_MASK 0x00000040L
+#define VCN_UMSCH_SYS_INT_STATUS__INT7_MASK 0x00000080L
+//VCN_UMSCH_SYS_INT_ACK
+#define VCN_UMSCH_SYS_INT_ACK__INT0__SHIFT 0x0
+#define VCN_UMSCH_SYS_INT_ACK__INT1__SHIFT 0x1
+#define VCN_UMSCH_SYS_INT_ACK__INT2__SHIFT 0x2
+#define VCN_UMSCH_SYS_INT_ACK__INT3__SHIFT 0x3
+#define VCN_UMSCH_SYS_INT_ACK__INT4__SHIFT 0x4
+#define VCN_UMSCH_SYS_INT_ACK__INT5__SHIFT 0x5
+#define VCN_UMSCH_SYS_INT_ACK__INT6__SHIFT 0x6
+#define VCN_UMSCH_SYS_INT_ACK__INT7__SHIFT 0x7
+#define VCN_UMSCH_SYS_INT_ACK__INT0_MASK 0x00000001L
+#define VCN_UMSCH_SYS_INT_ACK__INT1_MASK 0x00000002L
+#define VCN_UMSCH_SYS_INT_ACK__INT2_MASK 0x00000004L
+#define VCN_UMSCH_SYS_INT_ACK__INT3_MASK 0x00000008L
+#define VCN_UMSCH_SYS_INT_ACK__INT4_MASK 0x00000010L
+#define VCN_UMSCH_SYS_INT_ACK__INT5_MASK 0x00000020L
+#define VCN_UMSCH_SYS_INT_ACK__INT6_MASK 0x00000040L
+#define VCN_UMSCH_SYS_INT_ACK__INT7_MASK 0x00000080L
+//VCN_UMSCH_SYS_INT_SRC
+#define VCN_UMSCH_SYS_INT_SRC__INT0__SHIFT 0x0
+#define VCN_UMSCH_SYS_INT_SRC__INT1__SHIFT 0x1
+#define VCN_UMSCH_SYS_INT_SRC__INT2__SHIFT 0x2
+#define VCN_UMSCH_SYS_INT_SRC__INT3__SHIFT 0x3
+#define VCN_UMSCH_SYS_INT_SRC__INT4__SHIFT 0x4
+#define VCN_UMSCH_SYS_INT_SRC__INT5__SHIFT 0x5
+#define VCN_UMSCH_SYS_INT_SRC__INT6__SHIFT 0x6
+#define VCN_UMSCH_SYS_INT_SRC__INT7__SHIFT 0x7
+#define VCN_UMSCH_SYS_INT_SRC__INT0_MASK 0x00000001L
+#define VCN_UMSCH_SYS_INT_SRC__INT1_MASK 0x00000002L
+#define VCN_UMSCH_SYS_INT_SRC__INT2_MASK 0x00000004L
+#define VCN_UMSCH_SYS_INT_SRC__INT3_MASK 0x00000008L
+#define VCN_UMSCH_SYS_INT_SRC__INT4_MASK 0x00000010L
+#define VCN_UMSCH_SYS_INT_SRC__INT5_MASK 0x00000020L
+#define VCN_UMSCH_SYS_INT_SRC__INT6_MASK 0x00000040L
+#define VCN_UMSCH_SYS_INT_SRC__INT7_MASK 0x00000080L
+//VCN_UMSCH_IH_CTX_CTRL
+#define VCN_UMSCH_IH_CTX_CTRL__IH_CTX_ID__SHIFT 0x0
+#define VCN_UMSCH_IH_CTX_CTRL__IH_CTX_ID_MASK 0x0FFFFFFFL
+//UVD_UMSCH_FORCE
+#define UVD_UMSCH_FORCE__IC_FORCE_GPUVM__SHIFT 0x0
+#define UVD_UMSCH_FORCE__DC_FORCE_GPUVM__SHIFT 0x1
+#define UVD_UMSCH_FORCE__FORCE_DROP_DISABLE__SHIFT 0x2
+#define UVD_UMSCH_FORCE__FORCE_DROP_INT_DISABLE__SHIFT 0x3
+#define UVD_UMSCH_FORCE__BYPASS_UTCL2_ATC_AUTO_RESP__SHIFT 0x4
+#define UVD_UMSCH_FORCE__IC_FORCE_GPUVM_MASK 0x00000001L
+#define UVD_UMSCH_FORCE__DC_FORCE_GPUVM_MASK 0x00000002L
+#define UVD_UMSCH_FORCE__FORCE_DROP_DISABLE_MASK 0x00000004L
+#define UVD_UMSCH_FORCE__FORCE_DROP_INT_DISABLE_MASK 0x00000008L
+#define UVD_UMSCH_FORCE__BYPASS_UTCL2_ATC_AUTO_RESP_MASK 0x00000010L
+//UMSCH_MES_RESET_CTRL
+#define UMSCH_MES_RESET_CTRL__MES_CORE_SOFT_RESET__SHIFT 0x0
+#define UMSCH_MES_RESET_CTRL__MES_CORE_SOFT_RESET_MASK 0x00000001L
+
+
+// addressBlock: uvd_vcn_cprs64dec
+//VCN_MES_PRGRM_CNTR_START
+#define VCN_MES_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define VCN_MES_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//VCN_MES_INTR_ROUTINE_START
+#define VCN_MES_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define VCN_MES_INTR_ROUTINE_START__IR_START_MASK 0xFFFFFFFFL
+//VCN_MES_MTVEC_LO
+#define VCN_MES_MTVEC_LO__ADDR_LO__SHIFT 0x0
+#define VCN_MES_MTVEC_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//VCN_MES_INTR_ROUTINE_START_HI
+#define VCN_MES_INTR_ROUTINE_START_HI__IR_START__SHIFT 0x0
+#define VCN_MES_INTR_ROUTINE_START_HI__IR_START_MASK 0xFFFFFFFFL
+//VCN_MES_MTVEC_HI
+#define VCN_MES_MTVEC_HI__ADDR_LO__SHIFT 0x0
+#define VCN_MES_MTVEC_HI__ADDR_LO_MASK 0xFFFFFFFFL
+//VCN_MES_CNTL
+#define VCN_MES_CNTL__MES_INVALIDATE_ICACHE__SHIFT 0x4
+#define VCN_MES_CNTL__MES_PIPE0_RESET__SHIFT 0x10
+#define VCN_MES_CNTL__MES_PIPE1_RESET__SHIFT 0x11
+#define VCN_MES_CNTL__MES_PIPE2_RESET__SHIFT 0x12
+#define VCN_MES_CNTL__MES_PIPE3_RESET__SHIFT 0x13
+#define VCN_MES_CNTL__MES_PIPE0_ACTIVE__SHIFT 0x1a
+#define VCN_MES_CNTL__MES_PIPE1_ACTIVE__SHIFT 0x1b
+#define VCN_MES_CNTL__MES_PIPE2_ACTIVE__SHIFT 0x1c
+#define VCN_MES_CNTL__MES_PIPE3_ACTIVE__SHIFT 0x1d
+#define VCN_MES_CNTL__MES_HALT__SHIFT 0x1e
+#define VCN_MES_CNTL__MES_STEP__SHIFT 0x1f
+#define VCN_MES_CNTL__MES_INVALIDATE_ICACHE_MASK 0x00000010L
+#define VCN_MES_CNTL__MES_PIPE0_RESET_MASK 0x00010000L
+#define VCN_MES_CNTL__MES_PIPE1_RESET_MASK 0x00020000L
+#define VCN_MES_CNTL__MES_PIPE2_RESET_MASK 0x00040000L
+#define VCN_MES_CNTL__MES_PIPE3_RESET_MASK 0x00080000L
+#define VCN_MES_CNTL__MES_PIPE0_ACTIVE_MASK 0x04000000L
+#define VCN_MES_CNTL__MES_PIPE1_ACTIVE_MASK 0x08000000L
+#define VCN_MES_CNTL__MES_PIPE2_ACTIVE_MASK 0x10000000L
+#define VCN_MES_CNTL__MES_PIPE3_ACTIVE_MASK 0x20000000L
+#define VCN_MES_CNTL__MES_HALT_MASK 0x40000000L
+#define VCN_MES_CNTL__MES_STEP_MASK 0x80000000L
+//VCN_MES_PIPE_PRIORITY_CNTS
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//VCN_MES_PIPE0_PRIORITY
+#define VCN_MES_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define VCN_MES_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//VCN_MES_PIPE1_PRIORITY
+#define VCN_MES_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define VCN_MES_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//VCN_MES_PIPE2_PRIORITY
+#define VCN_MES_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define VCN_MES_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//VCN_MES_PIPE3_PRIORITY
+#define VCN_MES_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define VCN_MES_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//VCN_MES_HEADER_DUMP
+#define VCN_MES_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define VCN_MES_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//VCN_MES_MIE_LO
+#define VCN_MES_MIE_LO__MES_INT__SHIFT 0x0
+#define VCN_MES_MIE_LO__MES_INT_MASK 0xFFFFFFFFL
+//VCN_MES_MIE_HI
+#define VCN_MES_MIE_HI__MES_INT__SHIFT 0x0
+#define VCN_MES_MIE_HI__MES_INT_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT
+#define VCN_MES_INTERRUPT__MES_INT__SHIFT 0x0
+#define VCN_MES_INTERRUPT__MES_INT_MASK 0xFFFFFFFFL
+//VCN_MES_SCRATCH_INDEX
+#define VCN_MES_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define VCN_MES_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE__SHIFT 0x1f
+#define VCN_MES_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
+#define VCN_MES_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE_MASK 0x80000000L
+//VCN_MES_SCRATCH_DATA
+#define VCN_MES_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define VCN_MES_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INSTR_PNTR
+#define VCN_MES_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define VCN_MES_INSTR_PNTR__INSTR_PNTR_MASK 0x000FFFFFL
+//VCN_MES_MSCRATCH_HI
+#define VCN_MES_MSCRATCH_HI__DATA__SHIFT 0x0
+#define VCN_MES_MSCRATCH_HI__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_MSCRATCH_LO
+#define VCN_MES_MSCRATCH_LO__DATA__SHIFT 0x0
+#define VCN_MES_MSCRATCH_LO__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_MSTATUS_LO
+#define VCN_MES_MSTATUS_LO__STATUS_LO__SHIFT 0x0
+#define VCN_MES_MSTATUS_LO__STATUS_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MSTATUS_HI
+#define VCN_MES_MSTATUS_HI__STATUS_HI__SHIFT 0x0
+#define VCN_MES_MSTATUS_HI__STATUS_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MEPC_LO
+#define VCN_MES_MEPC_LO__MEPC_LO__SHIFT 0x0
+#define VCN_MES_MEPC_LO__MEPC_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MEPC_HI
+#define VCN_MES_MEPC_HI__MEPC_HI__SHIFT 0x0
+#define VCN_MES_MEPC_HI__MEPC_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MCAUSE_LO
+#define VCN_MES_MCAUSE_LO__CAUSE_LO__SHIFT 0x0
+#define VCN_MES_MCAUSE_LO__CAUSE_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MCAUSE_HI
+#define VCN_MES_MCAUSE_HI__CAUSE_HI__SHIFT 0x0
+#define VCN_MES_MCAUSE_HI__CAUSE_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MBADADDR_LO
+#define VCN_MES_MBADADDR_LO__ADDR_LO__SHIFT 0x0
+#define VCN_MES_MBADADDR_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MBADADDR_HI
+#define VCN_MES_MBADADDR_HI__ADDR_HI__SHIFT 0x0
+#define VCN_MES_MBADADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MIP_LO
+#define VCN_MES_MIP_LO__MIP_LO__SHIFT 0x0
+#define VCN_MES_MIP_LO__MIP_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MIP_HI
+#define VCN_MES_MIP_HI__MIP_HI__SHIFT 0x0
+#define VCN_MES_MIP_HI__MIP_HI_MASK 0xFFFFFFFFL
+//VCN_MES_IC_OP_CNTL
+#define VCN_MES_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define VCN_MES_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define VCN_MES_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define VCN_MES_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define VCN_MES_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define VCN_MES_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//VCN_MES_MCYCLE_LO
+#define VCN_MES_MCYCLE_LO__CYCLE_LO__SHIFT 0x0
+#define VCN_MES_MCYCLE_LO__CYCLE_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MCYCLE_HI
+#define VCN_MES_MCYCLE_HI__CYCLE_HI__SHIFT 0x0
+#define VCN_MES_MCYCLE_HI__CYCLE_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MTIME_LO
+#define VCN_MES_MTIME_LO__TIME_LO__SHIFT 0x0
+#define VCN_MES_MTIME_LO__TIME_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MTIME_HI
+#define VCN_MES_MTIME_HI__TIME_HI__SHIFT 0x0
+#define VCN_MES_MTIME_HI__TIME_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MINSTRET_LO
+#define VCN_MES_MINSTRET_LO__INSTRET_LO__SHIFT 0x0
+#define VCN_MES_MINSTRET_LO__INSTRET_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MINSTRET_HI
+#define VCN_MES_MINSTRET_HI__INSTRET_HI__SHIFT 0x0
+#define VCN_MES_MINSTRET_HI__INSTRET_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MISA_LO
+#define VCN_MES_MISA_LO__MISA_LO__SHIFT 0x0
+#define VCN_MES_MISA_LO__MISA_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MISA_HI
+#define VCN_MES_MISA_HI__MISA_HI__SHIFT 0x0
+#define VCN_MES_MISA_HI__MISA_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MVENDORID_LO
+#define VCN_MES_MVENDORID_LO__MVENDORID_LO__SHIFT 0x0
+#define VCN_MES_MVENDORID_LO__MVENDORID_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MVENDORID_HI
+#define VCN_MES_MVENDORID_HI__MVENDORID_HI__SHIFT 0x0
+#define VCN_MES_MVENDORID_HI__MVENDORID_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MARCHID_LO
+#define VCN_MES_MARCHID_LO__MARCHID_LO__SHIFT 0x0
+#define VCN_MES_MARCHID_LO__MARCHID_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MARCHID_HI
+#define VCN_MES_MARCHID_HI__MARCHID_HI__SHIFT 0x0
+#define VCN_MES_MARCHID_HI__MARCHID_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MIMPID_LO
+#define VCN_MES_MIMPID_LO__MIMPID_LO__SHIFT 0x0
+#define VCN_MES_MIMPID_LO__MIMPID_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MIMPID_HI
+#define VCN_MES_MIMPID_HI__MIMPID_HI__SHIFT 0x0
+#define VCN_MES_MIMPID_HI__MIMPID_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MHARTID_LO
+#define VCN_MES_MHARTID_LO__MHARTID_LO__SHIFT 0x0
+#define VCN_MES_MHARTID_LO__MHARTID_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MHARTID_HI
+#define VCN_MES_MHARTID_HI__MHARTID_HI__SHIFT 0x0
+#define VCN_MES_MHARTID_HI__MHARTID_HI_MASK 0xFFFFFFFFL
+//VCN_MES_DC_BASE_CNTL
+#define VCN_MES_DC_BASE_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define VCN_MES_DC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//VCN_MES_DC_OP_CNTL
+#define VCN_MES_DC_OP_CNTL__INVALIDATE_DCACHE__SHIFT 0x0
+#define VCN_MES_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE__SHIFT 0x1
+#define VCN_MES_DC_OP_CNTL__BYPASS_ALL__SHIFT 0x2
+#define VCN_MES_DC_OP_CNTL__DEPRECATED__SHIFT 0x3
+#define VCN_MES_DC_OP_CNTL__DEPRACATED__SHIFT 0x4
+#define VCN_MES_DC_OP_CNTL__INVALIDATE_DCACHE_MASK 0x00000001L
+#define VCN_MES_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE_MASK 0x00000002L
+#define VCN_MES_DC_OP_CNTL__BYPASS_ALL_MASK 0x00000004L
+#define VCN_MES_DC_OP_CNTL__DEPRECATED_MASK 0x00000008L
+#define VCN_MES_DC_OP_CNTL__DEPRACATED_MASK 0x00000010L
+//VCN_MES_MTIMECMP_LO
+#define VCN_MES_MTIMECMP_LO__TIME_LO__SHIFT 0x0
+#define VCN_MES_MTIMECMP_LO__TIME_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MTIMECMP_HI
+#define VCN_MES_MTIMECMP_HI__TIME_HI__SHIFT 0x0
+#define VCN_MES_MTIMECMP_HI__TIME_HI_MASK 0xFFFFFFFFL
+//VCN_MES_GP0_LO
+#define VCN_MES_GP0_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define VCN_MES_GP0_LO__DATA__SHIFT 0x1
+#define VCN_MES_GP0_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define VCN_MES_GP0_LO__DATA_MASK 0xFFFFFFFEL
+//VCN_MES_GP0_HI
+#define VCN_MES_GP0_HI__M_RET_ADDR__SHIFT 0x0
+#define VCN_MES_GP0_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//VCN_MES_GP1_LO
+#define VCN_MES_GP1_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define VCN_MES_GP1_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//VCN_MES_GP1_HI
+#define VCN_MES_GP1_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define VCN_MES_GP1_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//VCN_MES_GP2_LO
+#define VCN_MES_GP2_LO__STACK_PNTR_LO__SHIFT 0x0
+#define VCN_MES_GP2_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//VCN_MES_GP2_HI
+#define VCN_MES_GP2_HI__STACK_PNTR_HI__SHIFT 0x0
+#define VCN_MES_GP2_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//VCN_MES_GP3_LO
+#define VCN_MES_GP3_LO__DATA__SHIFT 0x0
+#define VCN_MES_GP3_LO__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP3_HI
+#define VCN_MES_GP3_HI__DATA__SHIFT 0x0
+#define VCN_MES_GP3_HI__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP4_LO
+#define VCN_MES_GP4_LO__DATA__SHIFT 0x0
+#define VCN_MES_GP4_LO__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP4_HI
+#define VCN_MES_GP4_HI__DATA__SHIFT 0x0
+#define VCN_MES_GP4_HI__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP5_LO
+#define VCN_MES_GP5_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define VCN_MES_GP5_LO__DATA__SHIFT 0x1
+#define VCN_MES_GP5_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define VCN_MES_GP5_LO__DATA_MASK 0xFFFFFFFEL
+//VCN_MES_GP5_HI
+#define VCN_MES_GP5_HI__M_RET_ADDR__SHIFT 0x0
+#define VCN_MES_GP5_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//VCN_MES_GP6_LO
+#define VCN_MES_GP6_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define VCN_MES_GP6_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//VCN_MES_GP6_HI
+#define VCN_MES_GP6_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define VCN_MES_GP6_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//VCN_MES_GP7_LO
+#define VCN_MES_GP7_LO__STACK_PNTR_LO__SHIFT 0x0
+#define VCN_MES_GP7_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//VCN_MES_GP7_HI
+#define VCN_MES_GP7_HI__STACK_PNTR_HI__SHIFT 0x0
+#define VCN_MES_GP7_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//VCN_MES_GP8_LO
+#define VCN_MES_GP8_LO__DATA__SHIFT 0x0
+#define VCN_MES_GP8_LO__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP8_HI
+#define VCN_MES_GP8_HI__DATA__SHIFT 0x0
+#define VCN_MES_GP8_HI__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP9_LO
+#define VCN_MES_GP9_LO__DATA__SHIFT 0x0
+#define VCN_MES_GP9_LO__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_GP9_HI
+#define VCN_MES_GP9_HI__DATA__SHIFT 0x0
+#define VCN_MES_GP9_HI__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_DM_INDEX_ADDR
+#define VCN_MES_DM_INDEX_ADDR__ADDR__SHIFT 0x0
+#define VCN_MES_DM_INDEX_ADDR__ADDR_MASK 0xFFFFFFFFL
+//VCN_MES_DM_INDEX_DATA
+#define VCN_MES_DM_INDEX_DATA__DATA__SHIFT 0x0
+#define VCN_MES_DM_INDEX_DATA__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_LOCAL_BASE0_LO
+#define VCN_MES_LOCAL_BASE0_LO__BASE0_LO__SHIFT 0x10
+#define VCN_MES_LOCAL_BASE0_LO__BASE0_LO_MASK 0xFFFF0000L
+//VCN_MES_LOCAL_BASE0_HI
+#define VCN_MES_LOCAL_BASE0_HI__BASE0_HI__SHIFT 0x0
+#define VCN_MES_LOCAL_BASE0_HI__BASE0_HI_MASK 0x0000FFFFL
+//VCN_MES_LOCAL_MASK0_LO
+#define VCN_MES_LOCAL_MASK0_LO__MASK0_LO__SHIFT 0x10
+#define VCN_MES_LOCAL_MASK0_LO__MASK0_LO_MASK 0xFFFF0000L
+//VCN_MES_LOCAL_MASK0_HI
+#define VCN_MES_LOCAL_MASK0_HI__MASK0_HI__SHIFT 0x0
+#define VCN_MES_LOCAL_MASK0_HI__MASK0_HI_MASK 0x0000FFFFL
+//VCN_MES_LOCAL_APERTURE
+#define VCN_MES_LOCAL_APERTURE__APERTURE__SHIFT 0x0
+#define VCN_MES_LOCAL_APERTURE__APERTURE_MASK 0x00000007L
+//VCN_MES_LOCAL_INSTR_BASE_LO
+#define VCN_MES_LOCAL_INSTR_BASE_LO__BASE_LO__SHIFT 0x10
+#define VCN_MES_LOCAL_INSTR_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//VCN_MES_LOCAL_INSTR_BASE_HI
+#define VCN_MES_LOCAL_INSTR_BASE_HI__BASE_HI__SHIFT 0x0
+#define VCN_MES_LOCAL_INSTR_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//VCN_MES_LOCAL_INSTR_MASK_LO
+#define VCN_MES_LOCAL_INSTR_MASK_LO__MASK_LO__SHIFT 0x10
+#define VCN_MES_LOCAL_INSTR_MASK_LO__MASK_LO_MASK 0xFFFF0000L
+//VCN_MES_LOCAL_INSTR_MASK_HI
+#define VCN_MES_LOCAL_INSTR_MASK_HI__MASK_HI__SHIFT 0x0
+#define VCN_MES_LOCAL_INSTR_MASK_HI__MASK_HI_MASK 0x0000FFFFL
+//VCN_MES_LOCAL_INSTR_APERTURE
+#define VCN_MES_LOCAL_INSTR_APERTURE__APERTURE__SHIFT 0x0
+#define VCN_MES_LOCAL_INSTR_APERTURE__APERTURE_MASK 0x00000007L
+//VCN_MES_LOCAL_SCRATCH_APERTURE
+#define VCN_MES_LOCAL_SCRATCH_APERTURE__APERTURE__SHIFT 0x0
+#define VCN_MES_LOCAL_SCRATCH_APERTURE__APERTURE_MASK 0x00000007L
+//VCN_MES_LOCAL_SCRATCH_BASE_LO
+#define VCN_MES_LOCAL_SCRATCH_BASE_LO__BASE_LO__SHIFT 0x10
+#define VCN_MES_LOCAL_SCRATCH_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//VCN_MES_LOCAL_SCRATCH_BASE_HI
+#define VCN_MES_LOCAL_SCRATCH_BASE_HI__BASE_HI__SHIFT 0x0
+#define VCN_MES_LOCAL_SCRATCH_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//VCN_MES_PERFCOUNT_CNTL
+#define VCN_MES_PERFCOUNT_CNTL__EVENT_SEL__SHIFT 0x0
+#define VCN_MES_PERFCOUNT_CNTL__EVENT_SEL_MASK 0x0000001FL
+//VCN_MES_PENDING_INTERRUPT
+#define VCN_MES_PENDING_INTERRUPT__PENDING_INTERRUPT__SHIFT 0x0
+#define VCN_MES_PENDING_INTERRUPT__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//VCN_MES_PRGRM_CNTR_START_HI
+#define VCN_MES_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define VCN_MES_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//VCN_MES_INTERRUPT_DATA_16
+#define VCN_MES_INTERRUPT_DATA_16__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_16__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_17
+#define VCN_MES_INTERRUPT_DATA_17__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_17__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_18
+#define VCN_MES_INTERRUPT_DATA_18__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_18__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_19
+#define VCN_MES_INTERRUPT_DATA_19__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_19__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_20
+#define VCN_MES_INTERRUPT_DATA_20__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_20__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_21
+#define VCN_MES_INTERRUPT_DATA_21__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_21__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_22
+#define VCN_MES_INTERRUPT_DATA_22__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_22__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_23
+#define VCN_MES_INTERRUPT_DATA_23__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_23__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_24
+#define VCN_MES_INTERRUPT_DATA_24__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_24__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_25
+#define VCN_MES_INTERRUPT_DATA_25__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_25__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_26
+#define VCN_MES_INTERRUPT_DATA_26__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_26__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_27
+#define VCN_MES_INTERRUPT_DATA_27__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_27__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_28
+#define VCN_MES_INTERRUPT_DATA_28__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_28__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_29
+#define VCN_MES_INTERRUPT_DATA_29__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_29__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_30
+#define VCN_MES_INTERRUPT_DATA_30__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_30__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_INTERRUPT_DATA_31
+#define VCN_MES_INTERRUPT_DATA_31__DATA__SHIFT 0x0
+#define VCN_MES_INTERRUPT_DATA_31__DATA_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE0_BASE
+#define VCN_MES_DC_APERTURE0_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE0_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE0_MASK
+#define VCN_MES_DC_APERTURE0_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE0_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE0_CNTL
+#define VCN_MES_DC_APERTURE0_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE0_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE0_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE0_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE1_BASE
+#define VCN_MES_DC_APERTURE1_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE1_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE1_MASK
+#define VCN_MES_DC_APERTURE1_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE1_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE1_CNTL
+#define VCN_MES_DC_APERTURE1_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE1_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE1_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE1_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE2_BASE
+#define VCN_MES_DC_APERTURE2_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE2_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE2_MASK
+#define VCN_MES_DC_APERTURE2_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE2_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE2_CNTL
+#define VCN_MES_DC_APERTURE2_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE2_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE2_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE2_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE3_BASE
+#define VCN_MES_DC_APERTURE3_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE3_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE3_MASK
+#define VCN_MES_DC_APERTURE3_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE3_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE3_CNTL
+#define VCN_MES_DC_APERTURE3_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE3_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE3_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE3_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE4_BASE
+#define VCN_MES_DC_APERTURE4_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE4_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE4_MASK
+#define VCN_MES_DC_APERTURE4_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE4_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE4_CNTL
+#define VCN_MES_DC_APERTURE4_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE4_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE4_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE4_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE5_BASE
+#define VCN_MES_DC_APERTURE5_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE5_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE5_MASK
+#define VCN_MES_DC_APERTURE5_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE5_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE5_CNTL
+#define VCN_MES_DC_APERTURE5_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE5_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE5_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE5_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE6_BASE
+#define VCN_MES_DC_APERTURE6_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE6_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE6_MASK
+#define VCN_MES_DC_APERTURE6_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE6_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE6_CNTL
+#define VCN_MES_DC_APERTURE6_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE6_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE6_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE6_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE7_BASE
+#define VCN_MES_DC_APERTURE7_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE7_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE7_MASK
+#define VCN_MES_DC_APERTURE7_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE7_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE7_CNTL
+#define VCN_MES_DC_APERTURE7_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE7_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE7_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE7_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE8_BASE
+#define VCN_MES_DC_APERTURE8_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE8_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE8_MASK
+#define VCN_MES_DC_APERTURE8_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE8_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE8_CNTL
+#define VCN_MES_DC_APERTURE8_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE8_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE8_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE8_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE9_BASE
+#define VCN_MES_DC_APERTURE9_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE9_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE9_MASK
+#define VCN_MES_DC_APERTURE9_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE9_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE9_CNTL
+#define VCN_MES_DC_APERTURE9_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE9_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE9_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE9_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE10_BASE
+#define VCN_MES_DC_APERTURE10_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE10_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE10_MASK
+#define VCN_MES_DC_APERTURE10_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE10_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE10_CNTL
+#define VCN_MES_DC_APERTURE10_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE10_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE10_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE10_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE11_BASE
+#define VCN_MES_DC_APERTURE11_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE11_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE11_MASK
+#define VCN_MES_DC_APERTURE11_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE11_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE11_CNTL
+#define VCN_MES_DC_APERTURE11_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE11_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE11_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE11_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE12_BASE
+#define VCN_MES_DC_APERTURE12_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE12_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE12_MASK
+#define VCN_MES_DC_APERTURE12_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE12_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE12_CNTL
+#define VCN_MES_DC_APERTURE12_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE12_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE12_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE12_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE13_BASE
+#define VCN_MES_DC_APERTURE13_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE13_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE13_MASK
+#define VCN_MES_DC_APERTURE13_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE13_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE13_CNTL
+#define VCN_MES_DC_APERTURE13_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE13_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE13_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE13_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE14_BASE
+#define VCN_MES_DC_APERTURE14_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE14_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE14_MASK
+#define VCN_MES_DC_APERTURE14_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE14_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE14_CNTL
+#define VCN_MES_DC_APERTURE14_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE14_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE14_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE14_CNTL__BYPASS_MODE_MASK 0x00000010L
+//VCN_MES_DC_APERTURE15_BASE
+#define VCN_MES_DC_APERTURE15_BASE__BASE__SHIFT 0x0
+#define VCN_MES_DC_APERTURE15_BASE__BASE_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE15_MASK
+#define VCN_MES_DC_APERTURE15_MASK__MASK__SHIFT 0x0
+#define VCN_MES_DC_APERTURE15_MASK__MASK_MASK 0xFFFFFFFFL
+//VCN_MES_DC_APERTURE15_CNTL
+#define VCN_MES_DC_APERTURE15_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_DC_APERTURE15_CNTL__BYPASS_MODE__SHIFT 0x4
+#define VCN_MES_DC_APERTURE15_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_DC_APERTURE15_CNTL__BYPASS_MODE_MASK 0x00000010L
+
+
+// addressBlock: uvd_vcn_hypdec
+//VCN_MES_IC_BASE_LO
+#define VCN_MES_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define VCN_MES_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//VCN_MES_MIBASE_LO
+#define VCN_MES_MIBASE_LO__IC_BASE_LO__SHIFT 0xc
+#define VCN_MES_MIBASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//VCN_MES_IC_BASE_HI
+#define VCN_MES_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define VCN_MES_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//VCN_MES_MIBASE_HI
+#define VCN_MES_MIBASE_HI__IC_BASE_HI__SHIFT 0x0
+#define VCN_MES_MIBASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//VCN_MES_IC_BASE_CNTL
+#define VCN_MES_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define VCN_MES_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define VCN_MES_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define VCN_MES_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define VCN_MES_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define VCN_MES_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//VCN_MES_DC_BASE_LO
+#define VCN_MES_DC_BASE_LO__DC_BASE_LO__SHIFT 0x10
+#define VCN_MES_DC_BASE_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//VCN_MES_MDBASE_LO
+#define VCN_MES_MDBASE_LO__BASE_LO__SHIFT 0x10
+#define VCN_MES_MDBASE_LO__BASE_LO_MASK 0xFFFF0000L
+//VCN_MES_DC_BASE_HI
+#define VCN_MES_DC_BASE_HI__DC_BASE_HI__SHIFT 0x0
+#define VCN_MES_DC_BASE_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//VCN_MES_MDBASE_HI
+#define VCN_MES_MDBASE_HI__BASE_HI__SHIFT 0x0
+#define VCN_MES_MDBASE_HI__BASE_HI_MASK 0x0000FFFFL
+//VCN_MES_MIBOUND_LO
+#define VCN_MES_MIBOUND_LO__BOUND_LO__SHIFT 0x0
+#define VCN_MES_MIBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MIBOUND_HI
+#define VCN_MES_MIBOUND_HI__BOUND_HI__SHIFT 0x0
+#define VCN_MES_MIBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+//VCN_MES_MDBOUND_LO
+#define VCN_MES_MDBOUND_LO__BOUND_LO__SHIFT 0x0
+#define VCN_MES_MDBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//VCN_MES_MDBOUND_HI
+#define VCN_MES_MDBOUND_HI__BOUND_HI__SHIFT 0x0
+#define VCN_MES_MDBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: uvd_slmi_adpdec
+//UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC_VMID
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID__SHIFT 0x4
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID__SHIFT 0x8
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID__SHIFT 0xc
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID__SHIFT 0x10
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID__SHIFT 0x14
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID__SHIFT 0x18
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID__SHIFT 0x1c
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID_MASK 0x0000000FL
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID_MASK 0x000000F0L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID_MASK 0x00000F00L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID_MASK 0x0000F000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID_MASK 0x000F0000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID_MASK 0x00F00000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID_MASK 0x0F000000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID_MASK 0xF0000000L
+//UVD_LMI_MMSCH_CTRL
+#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN__SHIFT 0x0
+#define UVD_LMI_MMSCH_CTRL__MMSCH_VM__SHIFT 0x1
+#define UVD_LMI_MMSCH_CTRL__PRIV_CLIENT_MMSCH__SHIFT 0x2
+#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP__SHIFT 0x3
+#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP__SHIFT 0x5
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD__SHIFT 0x7
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR__SHIFT 0x9
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP__SHIFT 0xb
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP__SHIFT 0xc
+#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN_MASK 0x00000001L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_VM_MASK 0x00000002L
+#define UVD_LMI_MMSCH_CTRL__PRIV_CLIENT_MMSCH_MASK 0x00000004L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP_MASK 0x00000018L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP_MASK 0x00000060L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_MASK 0x00000180L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_MASK 0x00000600L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP_MASK 0x00000800L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP_MASK 0x00001000L
+//UVD_MMSCH_LMI_STATUS
+#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_LEN_INT__SHIFT 0x0
+#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0x1
+#define UVD_MMSCH_LMI_STATUS__MMSCH_LMI_WRITE_CLEAN__SHIFT 0x2
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_LEN__SHIFT 0x4
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_ADDR_LSBS__SHIFT 0x8
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_AWRITE__SHIFT 0xc
+#define UVD_MMSCH_LMI_STATUS__MMSCH_RD_CLEAN__SHIFT 0xd
+#define UVD_MMSCH_LMI_STATUS__MMSCH_WR_CLEAN__SHIFT 0xe
+#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_LEN_INT_MASK 0x00000001L
+#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00000002L
+#define UVD_MMSCH_LMI_STATUS__MMSCH_LMI_WRITE_CLEAN_MASK 0x00000004L
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_LEN_MASK 0x000000F0L
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_ADDR_LSBS_MASK 0x00000700L
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_AWRITE_MASK 0x00001000L
+#define UVD_MMSCH_LMI_STATUS__MMSCH_RD_CLEAN_MASK 0x00002000L
+#define UVD_MMSCH_LMI_STATUS__MMSCH_WR_CLEAN_MASK 0x00004000L
+//UMSCH_IOV_ACTIVE_FCN_ID
+#define UMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID__SHIFT 0x0
+#define UMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF__SHIFT 0x1f
+#define UMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID_MASK 0x0000003FL
+#define UMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF_MASK 0x80000000L
+//UVD_UMSCH_LMI_STATUS
+#define UVD_UMSCH_LMI_STATUS__UMSCHIC_RD_CLEAN__SHIFT 0x0
+#define UVD_UMSCH_LMI_STATUS__UMSCHDC_RD_CLEAN__SHIFT 0x1
+#define UVD_UMSCH_LMI_STATUS__UMSCHDC_WR_CLEAN__SHIFT 0x2
+#define UVD_UMSCH_LMI_STATUS__UMSCHIC_RD_CLEAN_MASK 0x00000001L
+#define UVD_UMSCH_LMI_STATUS__UMSCHDC_RD_CLEAN_MASK 0x00000002L
+#define UVD_UMSCH_LMI_STATUS__UMSCHDC_WR_CLEAN_MASK 0x00000004L
+
+
+// addressBlock: uvdctxind
+//UVD_CGC_MEM_CTRL
+#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN__SHIFT 0x0
+#define UVD_CGC_MEM_CTRL__MPC_LS_EN__SHIFT 0x1
+#define UVD_CGC_MEM_CTRL__MPRD_LS_EN__SHIFT 0x2
+#define UVD_CGC_MEM_CTRL__WCB_LS_EN__SHIFT 0x3
+#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN__SHIFT 0x4
+#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN__SHIFT 0x5
+#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN__SHIFT 0x6
+#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN__SHIFT 0x7
+#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN__SHIFT 0x8
+#define UVD_CGC_MEM_CTRL__SYS_LS_EN__SHIFT 0x9
+#define UVD_CGC_MEM_CTRL__VCPU_LS_EN__SHIFT 0xa
+#define UVD_CGC_MEM_CTRL__MIF_LS_EN__SHIFT 0xc
+#define UVD_CGC_MEM_CTRL__LCM_LS_EN__SHIFT 0xd
+#define UVD_CGC_MEM_CTRL__MMSCH_LS_EN__SHIFT 0xe
+#define UVD_CGC_MEM_CTRL__MPC1_LS_EN__SHIFT 0xf
+#define UVD_CGC_MEM_CTRL__LS_SET_DELAY__SHIFT 0x10
+#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY__SHIFT 0x14
+#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN_MASK 0x00000001L
+#define UVD_CGC_MEM_CTRL__MPC_LS_EN_MASK 0x00000002L
+#define UVD_CGC_MEM_CTRL__MPRD_LS_EN_MASK 0x00000004L
+#define UVD_CGC_MEM_CTRL__WCB_LS_EN_MASK 0x00000008L
+#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN_MASK 0x00000010L
+#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN_MASK 0x00000020L
+#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN_MASK 0x00000040L
+#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN_MASK 0x00000080L
+#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN_MASK 0x00000100L
+#define UVD_CGC_MEM_CTRL__SYS_LS_EN_MASK 0x00000200L
+#define UVD_CGC_MEM_CTRL__VCPU_LS_EN_MASK 0x00000400L
+#define UVD_CGC_MEM_CTRL__MIF_LS_EN_MASK 0x00001000L
+#define UVD_CGC_MEM_CTRL__LCM_LS_EN_MASK 0x00002000L
+#define UVD_CGC_MEM_CTRL__MMSCH_LS_EN_MASK 0x00004000L
+#define UVD_CGC_MEM_CTRL__MPC1_LS_EN_MASK 0x00008000L
+#define UVD_CGC_MEM_CTRL__LS_SET_DELAY_MASK 0x000F0000L
+#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY_MASK 0x00F00000L
+//UVD_CGC_CTRL2
+#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN__SHIFT 0x0
+#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN__SHIFT 0x1
+#define UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT 0x2
+#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK 0x00000001L
+#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK 0x00000002L
+#define UVD_CGC_CTRL2__GATER_DIV_ID_MASK 0x0000001CL
+//UVD_CGC_MEM_DS_CTRL
+#define UVD_CGC_MEM_DS_CTRL__LMI_MC_DS_EN__SHIFT 0x0
+#define UVD_CGC_MEM_DS_CTRL__MPC_DS_EN__SHIFT 0x1
+#define UVD_CGC_MEM_DS_CTRL__MPRD_DS_EN__SHIFT 0x2
+#define UVD_CGC_MEM_DS_CTRL__WCB_DS_EN__SHIFT 0x3
+#define UVD_CGC_MEM_DS_CTRL__UDEC_RE_DS_EN__SHIFT 0x4
+#define UVD_CGC_MEM_DS_CTRL__UDEC_CM_DS_EN__SHIFT 0x5
+#define UVD_CGC_MEM_DS_CTRL__UDEC_IT_DS_EN__SHIFT 0x6
+#define UVD_CGC_MEM_DS_CTRL__UDEC_DB_DS_EN__SHIFT 0x7
+#define UVD_CGC_MEM_DS_CTRL__UDEC_MP_DS_EN__SHIFT 0x8
+#define UVD_CGC_MEM_DS_CTRL__SYS_DS_EN__SHIFT 0x9
+#define UVD_CGC_MEM_DS_CTRL__VCPU_DS_EN__SHIFT 0xa
+#define UVD_CGC_MEM_DS_CTRL__MIF_DS_EN__SHIFT 0xc
+#define UVD_CGC_MEM_DS_CTRL__LCM_DS_EN__SHIFT 0xd
+#define UVD_CGC_MEM_DS_CTRL__MMSCH_DS_EN__SHIFT 0xe
+#define UVD_CGC_MEM_DS_CTRL__MPC1_DS_EN__SHIFT 0xf
+#define UVD_CGC_MEM_DS_CTRL__LMI_MC_DS_EN_MASK 0x00000001L
+#define UVD_CGC_MEM_DS_CTRL__MPC_DS_EN_MASK 0x00000002L
+#define UVD_CGC_MEM_DS_CTRL__MPRD_DS_EN_MASK 0x00000004L
+#define UVD_CGC_MEM_DS_CTRL__WCB_DS_EN_MASK 0x00000008L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_RE_DS_EN_MASK 0x00000010L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_CM_DS_EN_MASK 0x00000020L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_IT_DS_EN_MASK 0x00000040L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_DB_DS_EN_MASK 0x00000080L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_MP_DS_EN_MASK 0x00000100L
+#define UVD_CGC_MEM_DS_CTRL__SYS_DS_EN_MASK 0x00000200L
+#define UVD_CGC_MEM_DS_CTRL__VCPU_DS_EN_MASK 0x00000400L
+#define UVD_CGC_MEM_DS_CTRL__MIF_DS_EN_MASK 0x00001000L
+#define UVD_CGC_MEM_DS_CTRL__LCM_DS_EN_MASK 0x00002000L
+#define UVD_CGC_MEM_DS_CTRL__MMSCH_DS_EN_MASK 0x00004000L
+#define UVD_CGC_MEM_DS_CTRL__MPC1_DS_EN_MASK 0x00008000L
+//UVD_CGC_MEM_SD_CTRL
+#define UVD_CGC_MEM_SD_CTRL__LMI_MC_SD_EN__SHIFT 0x0
+#define UVD_CGC_MEM_SD_CTRL__MPC_SD_EN__SHIFT 0x1
+#define UVD_CGC_MEM_SD_CTRL__MPRD_SD_EN__SHIFT 0x2
+#define UVD_CGC_MEM_SD_CTRL__WCB_SD_EN__SHIFT 0x3
+#define UVD_CGC_MEM_SD_CTRL__UDEC_RE_SD_EN__SHIFT 0x4
+#define UVD_CGC_MEM_SD_CTRL__UDEC_CM_SD_EN__SHIFT 0x5
+#define UVD_CGC_MEM_SD_CTRL__UDEC_IT_SD_EN__SHIFT 0x6
+#define UVD_CGC_MEM_SD_CTRL__UDEC_DB_SD_EN__SHIFT 0x7
+#define UVD_CGC_MEM_SD_CTRL__UDEC_MP_SD_EN__SHIFT 0x8
+#define UVD_CGC_MEM_SD_CTRL__SYS_SD_EN__SHIFT 0x9
+#define UVD_CGC_MEM_SD_CTRL__VCPU_SD_EN__SHIFT 0xa
+#define UVD_CGC_MEM_SD_CTRL__MIF_SD_EN__SHIFT 0xc
+#define UVD_CGC_MEM_SD_CTRL__LCM_SD_EN__SHIFT 0xd
+#define UVD_CGC_MEM_SD_CTRL__MMSCH_SD_EN__SHIFT 0xe
+#define UVD_CGC_MEM_SD_CTRL__MPC1_SD_EN__SHIFT 0xf
+#define UVD_CGC_MEM_SD_CTRL__LMI_MC_SD_EN_MASK 0x00000001L
+#define UVD_CGC_MEM_SD_CTRL__MPC_SD_EN_MASK 0x00000002L
+#define UVD_CGC_MEM_SD_CTRL__MPRD_SD_EN_MASK 0x00000004L
+#define UVD_CGC_MEM_SD_CTRL__WCB_SD_EN_MASK 0x00000008L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_RE_SD_EN_MASK 0x00000010L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_CM_SD_EN_MASK 0x00000020L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_IT_SD_EN_MASK 0x00000040L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_DB_SD_EN_MASK 0x00000080L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_MP_SD_EN_MASK 0x00000100L
+#define UVD_CGC_MEM_SD_CTRL__SYS_SD_EN_MASK 0x00000200L
+#define UVD_CGC_MEM_SD_CTRL__VCPU_SD_EN_MASK 0x00000400L
+#define UVD_CGC_MEM_SD_CTRL__MIF_SD_EN_MASK 0x00001000L
+#define UVD_CGC_MEM_SD_CTRL__LCM_SD_EN_MASK 0x00002000L
+#define UVD_CGC_MEM_SD_CTRL__MMSCH_SD_EN_MASK 0x00004000L
+#define UVD_CGC_MEM_SD_CTRL__MPC1_SD_EN_MASK 0x00008000L
+//UVD_SW_SCRATCH_00
+#define UVD_SW_SCRATCH_00__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_00__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_01
+#define UVD_SW_SCRATCH_01__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_01__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_02
+#define UVD_SW_SCRATCH_02__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_02__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_03
+#define UVD_SW_SCRATCH_03__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_03__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_04
+#define UVD_SW_SCRATCH_04__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_04__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_05
+#define UVD_SW_SCRATCH_05__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_05__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_06
+#define UVD_SW_SCRATCH_06__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_06__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_07
+#define UVD_SW_SCRATCH_07__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_07__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_08
+#define UVD_SW_SCRATCH_08__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_08__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_09
+#define UVD_SW_SCRATCH_09__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_09__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_10
+#define UVD_SW_SCRATCH_10__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_10__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_11
+#define UVD_SW_SCRATCH_11__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_11__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_12
+#define UVD_SW_SCRATCH_12__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_12__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_13
+#define UVD_SW_SCRATCH_13__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_13__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_14
+#define UVD_SW_SCRATCH_14__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_14__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_15
+#define UVD_SW_SCRATCH_15__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_15__DATA_MASK 0xFFFFFFFFL
+//UVD_IH_SEM_CTRL
+#define UVD_IH_SEM_CTRL__IH_STALL_EN__SHIFT 0x0
+#define UVD_IH_SEM_CTRL__SEM_STALL_EN__SHIFT 0x1
+#define UVD_IH_SEM_CTRL__IH_STATUS_CLEAN__SHIFT 0x2
+#define UVD_IH_SEM_CTRL__SEM_STATUS_CLEAN__SHIFT 0x3
+#define UVD_IH_SEM_CTRL__IH_VMID__SHIFT 0x4
+#define UVD_IH_SEM_CTRL__IH_USER_DATA__SHIFT 0x8
+#define UVD_IH_SEM_CTRL__IH_RINGID__SHIFT 0x14
+#define UVD_IH_SEM_CTRL__IH_STALL_EN_MASK 0x00000001L
+#define UVD_IH_SEM_CTRL__SEM_STALL_EN_MASK 0x00000002L
+#define UVD_IH_SEM_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L
+#define UVD_IH_SEM_CTRL__SEM_STATUS_CLEAN_MASK 0x00000008L
+#define UVD_IH_SEM_CTRL__IH_VMID_MASK 0x000000F0L
+#define UVD_IH_SEM_CTRL__IH_USER_DATA_MASK 0x000FFF00L
+#define UVD_IH_SEM_CTRL__IH_RINGID_MASK 0x0FF00000L
+//UVD_MISC_FEATURE_CTL
+#define UVD_MISC_FEATURE_CTL__ROW_PREEMPT_EN__SHIFT 0x0
+#define UVD_MISC_FEATURE_CTL__PREEMPT_BLOCKIF_DIS_EN__SHIFT 0x1
+#define UVD_MISC_FEATURE_CTL__ROW_PREEMPT_EN_MASK 0x00000001L
+#define UVD_MISC_FEATURE_CTL__PREEMPT_BLOCKIF_DIS_EN_MASK 0x00000002L
+
+
+// addressBlock: lmi_adp_indirect
+//UVD_LMI_CRC0
+#define UVD_LMI_CRC0__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC0__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC1
+#define UVD_LMI_CRC1__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC1__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC2
+#define UVD_LMI_CRC2__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC2__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC3
+#define UVD_LMI_CRC3__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC3__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC10
+#define UVD_LMI_CRC10__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC10__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC11
+#define UVD_LMI_CRC11__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC11__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC12
+#define UVD_LMI_CRC12__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC12__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC13
+#define UVD_LMI_CRC13__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC13__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC14
+#define UVD_LMI_CRC14__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC14__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC15
+#define UVD_LMI_CRC15__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC15__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_SWAP_CNTL2
+#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP__SHIFT 0x0
+#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP__SHIFT 0x2
+#define UVD_LMI_SWAP_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x4
+#define UVD_LMI_SWAP_CNTL2__CENC_MC_SWAP__SHIFT 0xc
+#define UVD_LMI_SWAP_CNTL2__FBC_KEY_MC_SWAP__SHIFT 0xe
+#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP_MASK 0x00000003L
+#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP_MASK 0x0000000CL
+#define UVD_LMI_SWAP_CNTL2__ATOMIC_MC_SWAP_MASK 0x00000FF0L
+#define UVD_LMI_SWAP_CNTL2__CENC_MC_SWAP_MASK 0x00003000L
+#define UVD_LMI_SWAP_CNTL2__FBC_KEY_MC_SWAP_MASK 0x0000C000L
+//UVD_MEMCHECK_SYS_INT_EN
+#define UVD_MEMCHECK_SYS_INT_EN__RE_ERR_EN__SHIFT 0x0
+#define UVD_MEMCHECK_SYS_INT_EN__IT_ERR_EN__SHIFT 0x1
+#define UVD_MEMCHECK_SYS_INT_EN__MP_ERR_EN__SHIFT 0x2
+#define UVD_MEMCHECK_SYS_INT_EN__DB_ERR_EN__SHIFT 0x3
+#define UVD_MEMCHECK_SYS_INT_EN__DBW_ERR_EN__SHIFT 0x4
+#define UVD_MEMCHECK_SYS_INT_EN__CM_ERR_EN__SHIFT 0x5
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_REF_ERR_EN__SHIFT 0x6
+#define UVD_MEMCHECK_SYS_INT_EN__VCPU_ERR_EN__SHIFT 0x7
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_DBW_ERR_EN__SHIFT 0x8
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_CM_COLOC_ERR_EN__SHIFT 0x9
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP0_ERR_EN__SHIFT 0xa
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP1_ERR_EN__SHIFT 0xb
+#define UVD_MEMCHECK_SYS_INT_EN__SRE_ERR_EN__SHIFT 0xc
+#define UVD_MEMCHECK_SYS_INT_EN__IT_RD_ERR_EN__SHIFT 0xf
+#define UVD_MEMCHECK_SYS_INT_EN__CM_RD_ERR_EN__SHIFT 0x10
+#define UVD_MEMCHECK_SYS_INT_EN__DB_RD_ERR_EN__SHIFT 0x11
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_RD_ERR_EN__SHIFT 0x12
+#define UVD_MEMCHECK_SYS_INT_EN__IDCT_RD_ERR_EN__SHIFT 0x13
+#define UVD_MEMCHECK_SYS_INT_EN__MPC_RD_ERR_EN__SHIFT 0x14
+#define UVD_MEMCHECK_SYS_INT_EN__LBSI_RD_ERR_EN__SHIFT 0x15
+#define UVD_MEMCHECK_SYS_INT_EN__RBC_RD_ERR_EN__SHIFT 0x18
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP2_ERR_EN__SHIFT 0x1b
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP3_ERR_EN__SHIFT 0x1c
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR_ERR_EN__SHIFT 0x1d
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR2_ERR_EN__SHIFT 0x1e
+#define UVD_MEMCHECK_SYS_INT_EN__PREF_ERR_EN__SHIFT 0x1f
+#define UVD_MEMCHECK_SYS_INT_EN__RE_ERR_EN_MASK 0x00000001L
+#define UVD_MEMCHECK_SYS_INT_EN__IT_ERR_EN_MASK 0x00000002L
+#define UVD_MEMCHECK_SYS_INT_EN__MP_ERR_EN_MASK 0x00000004L
+#define UVD_MEMCHECK_SYS_INT_EN__DB_ERR_EN_MASK 0x00000008L
+#define UVD_MEMCHECK_SYS_INT_EN__DBW_ERR_EN_MASK 0x00000010L
+#define UVD_MEMCHECK_SYS_INT_EN__CM_ERR_EN_MASK 0x00000020L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_REF_ERR_EN_MASK 0x00000040L
+#define UVD_MEMCHECK_SYS_INT_EN__VCPU_ERR_EN_MASK 0x00000080L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_DBW_ERR_EN_MASK 0x00000100L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_CM_COLOC_ERR_EN_MASK 0x00000200L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP0_ERR_EN_MASK 0x00000400L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP1_ERR_EN_MASK 0x00000800L
+#define UVD_MEMCHECK_SYS_INT_EN__SRE_ERR_EN_MASK 0x00001000L
+#define UVD_MEMCHECK_SYS_INT_EN__IT_RD_ERR_EN_MASK 0x00008000L
+#define UVD_MEMCHECK_SYS_INT_EN__CM_RD_ERR_EN_MASK 0x00010000L
+#define UVD_MEMCHECK_SYS_INT_EN__DB_RD_ERR_EN_MASK 0x00020000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_RD_ERR_EN_MASK 0x00040000L
+#define UVD_MEMCHECK_SYS_INT_EN__IDCT_RD_ERR_EN_MASK 0x00080000L
+#define UVD_MEMCHECK_SYS_INT_EN__MPC_RD_ERR_EN_MASK 0x00100000L
+#define UVD_MEMCHECK_SYS_INT_EN__LBSI_RD_ERR_EN_MASK 0x00200000L
+#define UVD_MEMCHECK_SYS_INT_EN__RBC_RD_ERR_EN_MASK 0x01000000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP2_ERR_EN_MASK 0x08000000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP3_ERR_EN_MASK 0x10000000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR_ERR_EN_MASK 0x20000000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR2_ERR_EN_MASK 0x40000000L
+#define UVD_MEMCHECK_SYS_INT_EN__PREF_ERR_EN_MASK 0x80000000L
+//UVD_MEMCHECK_SYS_INT_STAT
+#define UVD_MEMCHECK_SYS_INT_STAT__RE_LO_ERR__SHIFT 0x0
+#define UVD_MEMCHECK_SYS_INT_STAT__RE_HI_ERR__SHIFT 0x1
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_LO_ERR__SHIFT 0x2
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_HI_ERR__SHIFT 0x3
+#define UVD_MEMCHECK_SYS_INT_STAT__MP_LO_ERR__SHIFT 0x4
+#define UVD_MEMCHECK_SYS_INT_STAT__MP_HI_ERR__SHIFT 0x5
+#define UVD_MEMCHECK_SYS_INT_STAT__DB_LO_ERR__SHIFT 0x6
+#define UVD_MEMCHECK_SYS_INT_STAT__DB_HI_ERR__SHIFT 0x7
+#define UVD_MEMCHECK_SYS_INT_STAT__DBW_LO_ERR__SHIFT 0x8
+#define UVD_MEMCHECK_SYS_INT_STAT__DBW_HI_ERR__SHIFT 0x9
+#define UVD_MEMCHECK_SYS_INT_STAT__CM_LO_ERR__SHIFT 0xa
+#define UVD_MEMCHECK_SYS_INT_STAT__CM_HI_ERR__SHIFT 0xb
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_LO_ERR__SHIFT 0xc
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_HI_ERR__SHIFT 0xd
+#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_LO_ERR__SHIFT 0xe
+#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_HI_ERR__SHIFT 0xf
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_LO_ERR__SHIFT 0x10
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_HI_ERR__SHIFT 0x11
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_LO_ERR__SHIFT 0x12
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_HI_ERR__SHIFT 0x13
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_LO_ERR__SHIFT 0x14
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_HI_ERR__SHIFT 0x15
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_LO_ERR__SHIFT 0x16
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_HI_ERR__SHIFT 0x17
+#define UVD_MEMCHECK_SYS_INT_STAT__SRE_LO_ERR__SHIFT 0x18
+#define UVD_MEMCHECK_SYS_INT_STAT__SRE_HI_ERR__SHIFT 0x19
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_LO_ERR__SHIFT 0x1e
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_HI_ERR__SHIFT 0x1f
+#define UVD_MEMCHECK_SYS_INT_STAT__RE_LO_ERR_MASK 0x00000001L
+#define UVD_MEMCHECK_SYS_INT_STAT__RE_HI_ERR_MASK 0x00000002L
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_LO_ERR_MASK 0x00000004L
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_HI_ERR_MASK 0x00000008L
+#define UVD_MEMCHECK_SYS_INT_STAT__MP_LO_ERR_MASK 0x00000010L
+#define UVD_MEMCHECK_SYS_INT_STAT__MP_HI_ERR_MASK 0x00000020L
+#define UVD_MEMCHECK_SYS_INT_STAT__DB_LO_ERR_MASK 0x00000040L
+#define UVD_MEMCHECK_SYS_INT_STAT__DB_HI_ERR_MASK 0x00000080L
+#define UVD_MEMCHECK_SYS_INT_STAT__DBW_LO_ERR_MASK 0x00000100L
+#define UVD_MEMCHECK_SYS_INT_STAT__DBW_HI_ERR_MASK 0x00000200L
+#define UVD_MEMCHECK_SYS_INT_STAT__CM_LO_ERR_MASK 0x00000400L
+#define UVD_MEMCHECK_SYS_INT_STAT__CM_HI_ERR_MASK 0x00000800L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_LO_ERR_MASK 0x00001000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_HI_ERR_MASK 0x00002000L
+#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_LO_ERR_MASK 0x00004000L
+#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_HI_ERR_MASK 0x00008000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_LO_ERR_MASK 0x00010000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_HI_ERR_MASK 0x00020000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_LO_ERR_MASK 0x00040000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_HI_ERR_MASK 0x00080000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_LO_ERR_MASK 0x00100000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_HI_ERR_MASK 0x00200000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_LO_ERR_MASK 0x00400000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_HI_ERR_MASK 0x00800000L
+#define UVD_MEMCHECK_SYS_INT_STAT__SRE_LO_ERR_MASK 0x01000000L
+#define UVD_MEMCHECK_SYS_INT_STAT__SRE_HI_ERR_MASK 0x02000000L
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_LO_ERR_MASK 0x40000000L
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_HI_ERR_MASK 0x80000000L
+//UVD_MEMCHECK_SYS_INT_ACK
+#define UVD_MEMCHECK_SYS_INT_ACK__RE_LO_ACK__SHIFT 0x0
+#define UVD_MEMCHECK_SYS_INT_ACK__RE_HI_ACK__SHIFT 0x1
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_LO_ACK__SHIFT 0x2
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_HI_ACK__SHIFT 0x3
+#define UVD_MEMCHECK_SYS_INT_ACK__MP_LO_ACK__SHIFT 0x4
+#define UVD_MEMCHECK_SYS_INT_ACK__MP_HI_ACK__SHIFT 0x5
+#define UVD_MEMCHECK_SYS_INT_ACK__DB_LO_ACK__SHIFT 0x6
+#define UVD_MEMCHECK_SYS_INT_ACK__DB_HI_ACK__SHIFT 0x7
+#define UVD_MEMCHECK_SYS_INT_ACK__DBW_LO_ACK__SHIFT 0x8
+#define UVD_MEMCHECK_SYS_INT_ACK__DBW_HI_ACK__SHIFT 0x9
+#define UVD_MEMCHECK_SYS_INT_ACK__CM_LO_ACK__SHIFT 0xa
+#define UVD_MEMCHECK_SYS_INT_ACK__CM_HI_ACK__SHIFT 0xb
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_LO_ACK__SHIFT 0xc
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_HI_ACK__SHIFT 0xd
+#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_LO_ACK__SHIFT 0xe
+#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_HI_ACK__SHIFT 0xf
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_LO_ACK__SHIFT 0x10
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_HI_ACK__SHIFT 0x11
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_LO_ACK__SHIFT 0x12
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_HI_ACK__SHIFT 0x13
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_LO_ACK__SHIFT 0x14
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_HI_ACK__SHIFT 0x15
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_LO_ACK__SHIFT 0x16
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_HI_ACK__SHIFT 0x17
+#define UVD_MEMCHECK_SYS_INT_ACK__SRE_LO_ACK__SHIFT 0x18
+#define UVD_MEMCHECK_SYS_INT_ACK__SRE_HI_ACK__SHIFT 0x19
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_LO_ACK__SHIFT 0x1e
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_HI_ACK__SHIFT 0x1f
+#define UVD_MEMCHECK_SYS_INT_ACK__RE_LO_ACK_MASK 0x00000001L
+#define UVD_MEMCHECK_SYS_INT_ACK__RE_HI_ACK_MASK 0x00000002L
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_LO_ACK_MASK 0x00000004L
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_HI_ACK_MASK 0x00000008L
+#define UVD_MEMCHECK_SYS_INT_ACK__MP_LO_ACK_MASK 0x00000010L
+#define UVD_MEMCHECK_SYS_INT_ACK__MP_HI_ACK_MASK 0x00000020L
+#define UVD_MEMCHECK_SYS_INT_ACK__DB_LO_ACK_MASK 0x00000040L
+#define UVD_MEMCHECK_SYS_INT_ACK__DB_HI_ACK_MASK 0x00000080L
+#define UVD_MEMCHECK_SYS_INT_ACK__DBW_LO_ACK_MASK 0x00000100L
+#define UVD_MEMCHECK_SYS_INT_ACK__DBW_HI_ACK_MASK 0x00000200L
+#define UVD_MEMCHECK_SYS_INT_ACK__CM_LO_ACK_MASK 0x00000400L
+#define UVD_MEMCHECK_SYS_INT_ACK__CM_HI_ACK_MASK 0x00000800L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_LO_ACK_MASK 0x00001000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_HI_ACK_MASK 0x00002000L
+#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_LO_ACK_MASK 0x00004000L
+#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_HI_ACK_MASK 0x00008000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_LO_ACK_MASK 0x00010000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_HI_ACK_MASK 0x00020000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_LO_ACK_MASK 0x00040000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_HI_ACK_MASK 0x00080000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_LO_ACK_MASK 0x00100000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_HI_ACK_MASK 0x00200000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_LO_ACK_MASK 0x00400000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_HI_ACK_MASK 0x00800000L
+#define UVD_MEMCHECK_SYS_INT_ACK__SRE_LO_ACK_MASK 0x01000000L
+#define UVD_MEMCHECK_SYS_INT_ACK__SRE_HI_ACK_MASK 0x02000000L
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_LO_ACK_MASK 0x40000000L
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_HI_ACK_MASK 0x80000000L
+//UVD_MEMCHECK_VCPU_INT_EN
+#define UVD_MEMCHECK_VCPU_INT_EN__RE_ERR_EN__SHIFT 0x0
+#define UVD_MEMCHECK_VCPU_INT_EN__IT_ERR_EN__SHIFT 0x1
+#define UVD_MEMCHECK_VCPU_INT_EN__MP_ERR_EN__SHIFT 0x2
+#define UVD_MEMCHECK_VCPU_INT_EN__DB_ERR_EN__SHIFT 0x3
+#define UVD_MEMCHECK_VCPU_INT_EN__DBW_ERR_EN__SHIFT 0x4
+#define UVD_MEMCHECK_VCPU_INT_EN__CM_ERR_EN__SHIFT 0x5
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_REF_ERR_EN__SHIFT 0x6
+#define UVD_MEMCHECK_VCPU_INT_EN__VCPU_ERR_EN__SHIFT 0x7
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_DBW_ERR_EN__SHIFT 0x8
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_CM_COLOC_ERR_EN__SHIFT 0x9
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP0_ERR_EN__SHIFT 0xa
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP1_ERR_EN__SHIFT 0xb
+#define UVD_MEMCHECK_VCPU_INT_EN__SRE_ERR_EN__SHIFT 0xc
+#define UVD_MEMCHECK_VCPU_INT_EN__IT_RD_ERR_EN__SHIFT 0xf
+#define UVD_MEMCHECK_VCPU_INT_EN__CM_RD_ERR_EN__SHIFT 0x10
+#define UVD_MEMCHECK_VCPU_INT_EN__DB_RD_ERR_EN__SHIFT 0x11
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_RD_ERR_EN__SHIFT 0x12
+#define UVD_MEMCHECK_VCPU_INT_EN__IDCT_RD_ERR_EN__SHIFT 0x13
+#define UVD_MEMCHECK_VCPU_INT_EN__MPC_RD_ERR_EN__SHIFT 0x14
+#define UVD_MEMCHECK_VCPU_INT_EN__LBSI_RD_ERR_EN__SHIFT 0x15
+#define UVD_MEMCHECK_VCPU_INT_EN__RBC_RD_ERR_EN__SHIFT 0x18
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP2_ERR_EN__SHIFT 0x19
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP3_ERR_EN__SHIFT 0x1a
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR_ERR_EN__SHIFT 0x1b
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR2_ERR_EN__SHIFT 0x1c
+#define UVD_MEMCHECK_VCPU_INT_EN__PREF_ERR_EN__SHIFT 0x1d
+#define UVD_MEMCHECK_VCPU_INT_EN__RE_ERR_EN_MASK 0x00000001L
+#define UVD_MEMCHECK_VCPU_INT_EN__IT_ERR_EN_MASK 0x00000002L
+#define UVD_MEMCHECK_VCPU_INT_EN__MP_ERR_EN_MASK 0x00000004L
+#define UVD_MEMCHECK_VCPU_INT_EN__DB_ERR_EN_MASK 0x00000008L
+#define UVD_MEMCHECK_VCPU_INT_EN__DBW_ERR_EN_MASK 0x00000010L
+#define UVD_MEMCHECK_VCPU_INT_EN__CM_ERR_EN_MASK 0x00000020L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_REF_ERR_EN_MASK 0x00000040L
+#define UVD_MEMCHECK_VCPU_INT_EN__VCPU_ERR_EN_MASK 0x00000080L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_DBW_ERR_EN_MASK 0x00000100L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_CM_COLOC_ERR_EN_MASK 0x00000200L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP0_ERR_EN_MASK 0x00000400L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP1_ERR_EN_MASK 0x00000800L
+#define UVD_MEMCHECK_VCPU_INT_EN__SRE_ERR_EN_MASK 0x00001000L
+#define UVD_MEMCHECK_VCPU_INT_EN__IT_RD_ERR_EN_MASK 0x00008000L
+#define UVD_MEMCHECK_VCPU_INT_EN__CM_RD_ERR_EN_MASK 0x00010000L
+#define UVD_MEMCHECK_VCPU_INT_EN__DB_RD_ERR_EN_MASK 0x00020000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_RD_ERR_EN_MASK 0x00040000L
+#define UVD_MEMCHECK_VCPU_INT_EN__IDCT_RD_ERR_EN_MASK 0x00080000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MPC_RD_ERR_EN_MASK 0x00100000L
+#define UVD_MEMCHECK_VCPU_INT_EN__LBSI_RD_ERR_EN_MASK 0x00200000L
+#define UVD_MEMCHECK_VCPU_INT_EN__RBC_RD_ERR_EN_MASK 0x01000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP2_ERR_EN_MASK 0x02000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP3_ERR_EN_MASK 0x04000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR_ERR_EN_MASK 0x08000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR2_ERR_EN_MASK 0x10000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__PREF_ERR_EN_MASK 0x20000000L
+//UVD_MEMCHECK_VCPU_INT_STAT
+#define UVD_MEMCHECK_VCPU_INT_STAT__RE_LO_ERR__SHIFT 0x0
+#define UVD_MEMCHECK_VCPU_INT_STAT__RE_HI_ERR__SHIFT 0x1
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_LO_ERR__SHIFT 0x2
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_HI_ERR__SHIFT 0x3
+#define UVD_MEMCHECK_VCPU_INT_STAT__MP_LO_ERR__SHIFT 0x4
+#define UVD_MEMCHECK_VCPU_INT_STAT__MP_HI_ERR__SHIFT 0x5
+#define UVD_MEMCHECK_VCPU_INT_STAT__DB_LO_ERR__SHIFT 0x6
+#define UVD_MEMCHECK_VCPU_INT_STAT__DB_HI_ERR__SHIFT 0x7
+#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_LO_ERR__SHIFT 0x8
+#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_HI_ERR__SHIFT 0x9
+#define UVD_MEMCHECK_VCPU_INT_STAT__CM_LO_ERR__SHIFT 0xa
+#define UVD_MEMCHECK_VCPU_INT_STAT__CM_HI_ERR__SHIFT 0xb
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_LO_ERR__SHIFT 0xc
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_HI_ERR__SHIFT 0xd
+#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_LO_ERR__SHIFT 0xe
+#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_HI_ERR__SHIFT 0xf
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_LO_ERR__SHIFT 0x10
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_HI_ERR__SHIFT 0x11
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_LO_ERR__SHIFT 0x12
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_HI_ERR__SHIFT 0x13
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_LO_ERR__SHIFT 0x14
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_HI_ERR__SHIFT 0x15
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_LO_ERR__SHIFT 0x16
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_HI_ERR__SHIFT 0x17
+#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_LO_ERR__SHIFT 0x18
+#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_HI_ERR__SHIFT 0x19
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_LO_ERR__SHIFT 0x1e
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_HI_ERR__SHIFT 0x1f
+#define UVD_MEMCHECK_VCPU_INT_STAT__RE_LO_ERR_MASK 0x00000001L
+#define UVD_MEMCHECK_VCPU_INT_STAT__RE_HI_ERR_MASK 0x00000002L
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_LO_ERR_MASK 0x00000004L
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_HI_ERR_MASK 0x00000008L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MP_LO_ERR_MASK 0x00000010L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MP_HI_ERR_MASK 0x00000020L
+#define UVD_MEMCHECK_VCPU_INT_STAT__DB_LO_ERR_MASK 0x00000040L
+#define UVD_MEMCHECK_VCPU_INT_STAT__DB_HI_ERR_MASK 0x00000080L
+#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_LO_ERR_MASK 0x00000100L
+#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_HI_ERR_MASK 0x00000200L
+#define UVD_MEMCHECK_VCPU_INT_STAT__CM_LO_ERR_MASK 0x00000400L
+#define UVD_MEMCHECK_VCPU_INT_STAT__CM_HI_ERR_MASK 0x00000800L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_LO_ERR_MASK 0x00001000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_HI_ERR_MASK 0x00002000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_LO_ERR_MASK 0x00004000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_HI_ERR_MASK 0x00008000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_LO_ERR_MASK 0x00010000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_HI_ERR_MASK 0x00020000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_LO_ERR_MASK 0x00040000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_HI_ERR_MASK 0x00080000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_LO_ERR_MASK 0x00100000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_HI_ERR_MASK 0x00200000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_LO_ERR_MASK 0x00400000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_HI_ERR_MASK 0x00800000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_LO_ERR_MASK 0x01000000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_HI_ERR_MASK 0x02000000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_LO_ERR_MASK 0x40000000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_HI_ERR_MASK 0x80000000L
+//UVD_MEMCHECK_VCPU_INT_ACK
+#define UVD_MEMCHECK_VCPU_INT_ACK__RE_LO_ACK__SHIFT 0x0
+#define UVD_MEMCHECK_VCPU_INT_ACK__RE_HI_ACK__SHIFT 0x1
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_LO_ACK__SHIFT 0x2
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_HI_ACK__SHIFT 0x3
+#define UVD_MEMCHECK_VCPU_INT_ACK__MP_LO_ACK__SHIFT 0x4
+#define UVD_MEMCHECK_VCPU_INT_ACK__MP_HI_ACK__SHIFT 0x5
+#define UVD_MEMCHECK_VCPU_INT_ACK__DB_LO_ACK__SHIFT 0x6
+#define UVD_MEMCHECK_VCPU_INT_ACK__DB_HI_ACK__SHIFT 0x7
+#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_LO_ACK__SHIFT 0x8
+#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_HI_ACK__SHIFT 0x9
+#define UVD_MEMCHECK_VCPU_INT_ACK__CM_LO_ACK__SHIFT 0xa
+#define UVD_MEMCHECK_VCPU_INT_ACK__CM_HI_ACK__SHIFT 0xb
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_LO_ACK__SHIFT 0xc
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_HI_ACK__SHIFT 0xd
+#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_LO_ACK__SHIFT 0xe
+#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_HI_ACK__SHIFT 0xf
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_LO_ACK__SHIFT 0x10
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_HI_ACK__SHIFT 0x11
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_LO_ACK__SHIFT 0x12
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_HI_ACK__SHIFT 0x13
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_LO_ACK__SHIFT 0x14
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_HI_ACK__SHIFT 0x15
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_LO_ACK__SHIFT 0x16
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_HI_ACK__SHIFT 0x17
+#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_LO_ACK__SHIFT 0x18
+#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_HI_ACK__SHIFT 0x19
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_LO_ACK__SHIFT 0x1e
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_HI_ACK__SHIFT 0x1f
+#define UVD_MEMCHECK_VCPU_INT_ACK__RE_LO_ACK_MASK 0x00000001L
+#define UVD_MEMCHECK_VCPU_INT_ACK__RE_HI_ACK_MASK 0x00000002L
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_LO_ACK_MASK 0x00000004L
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_HI_ACK_MASK 0x00000008L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MP_LO_ACK_MASK 0x00000010L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MP_HI_ACK_MASK 0x00000020L
+#define UVD_MEMCHECK_VCPU_INT_ACK__DB_LO_ACK_MASK 0x00000040L
+#define UVD_MEMCHECK_VCPU_INT_ACK__DB_HI_ACK_MASK 0x00000080L
+#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_LO_ACK_MASK 0x00000100L
+#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_HI_ACK_MASK 0x00000200L
+#define UVD_MEMCHECK_VCPU_INT_ACK__CM_LO_ACK_MASK 0x00000400L
+#define UVD_MEMCHECK_VCPU_INT_ACK__CM_HI_ACK_MASK 0x00000800L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_LO_ACK_MASK 0x00001000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_HI_ACK_MASK 0x00002000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_LO_ACK_MASK 0x00004000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_HI_ACK_MASK 0x00008000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_LO_ACK_MASK 0x00010000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_HI_ACK_MASK 0x00020000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_LO_ACK_MASK 0x00040000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_HI_ACK_MASK 0x00080000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_LO_ACK_MASK 0x00100000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_HI_ACK_MASK 0x00200000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_LO_ACK_MASK 0x00400000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_HI_ACK_MASK 0x00800000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_LO_ACK_MASK 0x01000000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_HI_ACK_MASK 0x02000000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_LO_ACK_MASK 0x40000000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_HI_ACK_MASK 0x80000000L
+//UVD_MEMCHECK2_SYS_INT_STAT
+#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_LO_ERR__SHIFT 0x0
+#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_HI_ERR__SHIFT 0x1
+#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_LO_ERR__SHIFT 0x2
+#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_HI_ERR__SHIFT 0x3
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_LO_ERR__SHIFT 0x4
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_HI_ERR__SHIFT 0x5
+#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_LO_ERR__SHIFT 0x6
+#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_HI_ERR__SHIFT 0x7
+#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_LO_ERR__SHIFT 0x8
+#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_HI_ERR__SHIFT 0x9
+#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_LO_ERR__SHIFT 0xa
+#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_HI_ERR__SHIFT 0xb
+#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_LO_ERR__SHIFT 0x10
+#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_HI_ERR__SHIFT 0x11
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_LO_ERR__SHIFT 0x16
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_HI_ERR__SHIFT 0x17
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_LO_ERR__SHIFT 0x18
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_HI_ERR__SHIFT 0x19
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_LO_ERR__SHIFT 0x1a
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_HI_ERR__SHIFT 0x1b
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_LO_ERR__SHIFT 0x1c
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_HI_ERR__SHIFT 0x1d
+#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_LO_ERR__SHIFT 0x1e
+#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_HI_ERR__SHIFT 0x1f
+#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_LO_ERR_MASK 0x00000001L
+#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_HI_ERR_MASK 0x00000002L
+#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_LO_ERR_MASK 0x00000004L
+#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_HI_ERR_MASK 0x00000008L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_LO_ERR_MASK 0x00000010L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_HI_ERR_MASK 0x00000020L
+#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_LO_ERR_MASK 0x00000040L
+#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_HI_ERR_MASK 0x00000080L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_LO_ERR_MASK 0x00000100L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_HI_ERR_MASK 0x00000200L
+#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_LO_ERR_MASK 0x00000400L
+#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_HI_ERR_MASK 0x00000800L
+#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_LO_ERR_MASK 0x00010000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_HI_ERR_MASK 0x00020000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_LO_ERR_MASK 0x00400000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_HI_ERR_MASK 0x00800000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_LO_ERR_MASK 0x01000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_HI_ERR_MASK 0x02000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_LO_ERR_MASK 0x04000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_HI_ERR_MASK 0x08000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_LO_ERR_MASK 0x10000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_HI_ERR_MASK 0x20000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_LO_ERR_MASK 0x40000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_HI_ERR_MASK 0x80000000L
+//UVD_MEMCHECK2_SYS_INT_ACK
+#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_LO_ACK__SHIFT 0x0
+#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_HI_ACK__SHIFT 0x1
+#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_LO_ACK__SHIFT 0x2
+#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_HI_ACK__SHIFT 0x3
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_LO_ACK__SHIFT 0x4
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_HI_ACK__SHIFT 0x5
+#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_LO_ACK__SHIFT 0x6
+#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_HI_ACK__SHIFT 0x7
+#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_LO_ACK__SHIFT 0x8
+#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_HI_ACK__SHIFT 0x9
+#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_LO_ACK__SHIFT 0xa
+#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_HI_ACK__SHIFT 0xb
+#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_LO_ACK__SHIFT 0x10
+#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_HI_ACK__SHIFT 0x11
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_LO_ACK__SHIFT 0x16
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_HI_ACK__SHIFT 0x17
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_LO_ACK__SHIFT 0x18
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_HI_ACK__SHIFT 0x19
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_LO_ACK__SHIFT 0x1a
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_HI_ACK__SHIFT 0x1b
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_LO_ACK__SHIFT 0x1c
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_HI_ACK__SHIFT 0x1d
+#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_LO_ACK__SHIFT 0x1e
+#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_HI_ACK__SHIFT 0x1f
+#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_LO_ACK_MASK 0x00000001L
+#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_HI_ACK_MASK 0x00000002L
+#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_LO_ACK_MASK 0x00000004L
+#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_HI_ACK_MASK 0x00000008L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_LO_ACK_MASK 0x00000010L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_HI_ACK_MASK 0x00000020L
+#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_LO_ACK_MASK 0x00000040L
+#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_HI_ACK_MASK 0x00000080L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_LO_ACK_MASK 0x00000100L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_HI_ACK_MASK 0x00000200L
+#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_LO_ACK_MASK 0x00000400L
+#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_HI_ACK_MASK 0x00000800L
+#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_LO_ACK_MASK 0x00010000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_HI_ACK_MASK 0x00020000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_LO_ACK_MASK 0x00400000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_HI_ACK_MASK 0x00800000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_LO_ACK_MASK 0x01000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_HI_ACK_MASK 0x02000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_LO_ACK_MASK 0x04000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_HI_ACK_MASK 0x08000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_LO_ACK_MASK 0x10000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_HI_ACK_MASK 0x20000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_LO_ACK_MASK 0x40000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_HI_ACK_MASK 0x80000000L
+//UVD_MEMCHECK2_VCPU_INT_STAT
+#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_LO_ERR__SHIFT 0x0
+#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_HI_ERR__SHIFT 0x1
+#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_LO_ERR__SHIFT 0x2
+#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_HI_ERR__SHIFT 0x3
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_LO_ERR__SHIFT 0x4
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_HI_ERR__SHIFT 0x5
+#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_LO_ERR__SHIFT 0x6
+#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_HI_ERR__SHIFT 0x7
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_LO_ERR__SHIFT 0x8
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_HI_ERR__SHIFT 0x9
+#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_LO_ERR__SHIFT 0xa
+#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_HI_ERR__SHIFT 0xb
+#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_LO_ERR__SHIFT 0x10
+#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_HI_ERR__SHIFT 0x11
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_LO_ERR__SHIFT 0x12
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_HI_ERR__SHIFT 0x13
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_LO_ERR__SHIFT 0x14
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_HI_ERR__SHIFT 0x15
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_LO_ERR__SHIFT 0x16
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_HI_ERR__SHIFT 0x17
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_LO_ERR__SHIFT 0x18
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_HI_ERR__SHIFT 0x19
+#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_LO_ERR__SHIFT 0x1a
+#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_HI_ERR__SHIFT 0x1b
+#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_LO_ERR_MASK 0x00000001L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_HI_ERR_MASK 0x00000002L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_LO_ERR_MASK 0x00000004L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_HI_ERR_MASK 0x00000008L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_LO_ERR_MASK 0x00000010L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_HI_ERR_MASK 0x00000020L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_LO_ERR_MASK 0x00000040L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_HI_ERR_MASK 0x00000080L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_LO_ERR_MASK 0x00000100L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_HI_ERR_MASK 0x00000200L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_LO_ERR_MASK 0x00000400L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_HI_ERR_MASK 0x00000800L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_LO_ERR_MASK 0x00010000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_HI_ERR_MASK 0x00020000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_LO_ERR_MASK 0x00040000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_HI_ERR_MASK 0x00080000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_LO_ERR_MASK 0x00100000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_HI_ERR_MASK 0x00200000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_LO_ERR_MASK 0x00400000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_HI_ERR_MASK 0x00800000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_LO_ERR_MASK 0x01000000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_HI_ERR_MASK 0x02000000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_LO_ERR_MASK 0x04000000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_HI_ERR_MASK 0x08000000L
+//UVD_MEMCHECK2_VCPU_INT_ACK
+#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_LO_ACK__SHIFT 0x0
+#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_HI_ACK__SHIFT 0x1
+#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_LO_ACK__SHIFT 0x2
+#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_HI_ACK__SHIFT 0x3
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_LO_ACK__SHIFT 0x4
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_HI_ACK__SHIFT 0x5
+#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_LO_ACK__SHIFT 0x6
+#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_HI_ACK__SHIFT 0x7
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_LO_ACK__SHIFT 0x8
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_HI_ACK__SHIFT 0x9
+#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_LO_ACK__SHIFT 0xa
+#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_HI_ACK__SHIFT 0xb
+#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_LO_ACK__SHIFT 0x10
+#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_HI_ACK__SHIFT 0x11
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_LO_ACK__SHIFT 0x12
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_HI_ACK__SHIFT 0x13
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_LO_ACK__SHIFT 0x14
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_HI_ACK__SHIFT 0x15
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_LO_ACK__SHIFT 0x16
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_HI_ACK__SHIFT 0x17
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_LO_ACK__SHIFT 0x18
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_HI_ACK__SHIFT 0x19
+#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_LO_ACK__SHIFT 0x1a
+#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_HI_ACK__SHIFT 0x1b
+#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_LO_ACK_MASK 0x00000001L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_HI_ACK_MASK 0x00000002L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_LO_ACK_MASK 0x00000004L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_HI_ACK_MASK 0x00000008L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_LO_ACK_MASK 0x00000010L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_HI_ACK_MASK 0x00000020L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_LO_ACK_MASK 0x00000040L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_HI_ACK_MASK 0x00000080L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_LO_ACK_MASK 0x00000100L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_HI_ACK_MASK 0x00000200L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_LO_ACK_MASK 0x00000400L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_HI_ACK_MASK 0x00000800L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_LO_ACK_MASK 0x00010000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_HI_ACK_MASK 0x00020000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_LO_ACK_MASK 0x00040000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_HI_ACK_MASK 0x00080000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_LO_ACK_MASK 0x00100000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_HI_ACK_MASK 0x00200000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_LO_ACK_MASK 0x00400000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_HI_ACK_MASK 0x00800000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_LO_ACK_MASK 0x01000000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_HI_ACK_MASK 0x02000000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_LO_ACK_MASK 0x04000000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_HI_ACK_MASK 0x08000000L
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atom-bits.h b/drivers/gpu/drm/amd/include/atom-bits.h
index e8fae5c77514..2bfd6d0ff050 100644
--- a/drivers/gpu/drm/amd/include/atom-bits.h
+++ b/drivers/gpu/drm/amd/include/atom-bits.h
@@ -33,7 +33,7 @@ static inline uint8_t get_u8(void *bios, int ptr)
#define CU8(ptr) get_u8(ctx->bios, (ptr))
static inline uint16_t get_u16(void *bios, int ptr)
{
- return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
+ return get_u8(bios, ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
}
#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
#define CU16(ptr) get_u16(ctx->bios, (ptr))
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index fa7d6ced786f..af3eebb4c9bc 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -610,6 +610,38 @@ struct atom_firmware_info_v3_4 {
uint32_t reserved[2];
};
+struct atom_firmware_info_v3_5 {
+ struct atom_common_table_header table_header;
+ uint32_t firmware_revision;
+ uint32_t bootup_clk_reserved[2];
+ uint32_t firmware_capability; // enum atombios_firmware_capability
+ uint32_t fw_protect_region_size_in_kb; /* FW allocate a write protect region at top of FB. */
+ uint32_t bios_scratch_reg_startaddr; // 1st bios scratch register dword address
+ uint32_t bootup_voltage_reserved[2];
+ uint8_t mem_module_id;
+ uint8_t coolingsolution_id; /*0: Air cooling; 1: Liquid cooling ... */
+ uint8_t hw_blt_mode; //0:HW_BLT_DMA_PIO_MODE; 1:HW_BLT_LITE_SDMA_MODE; 2:HW_BLT_PCI_IO_MODE
+ uint8_t reserved1;
+ uint32_t mc_baseaddr_high;
+ uint32_t mc_baseaddr_low;
+ uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def
+ uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id
+ uint8_t board_i2c_feature_slave_addr;
+ uint8_t ras_rom_i2c_slave_addr;
+ uint32_t bootup_voltage_reserved1;
+ uint32_t zfb_reserved;
+ // if pplib_pptable_id!=0, pplib get powerplay table inside driver instead of from VBIOS
+ uint32_t pplib_pptable_id;
+ uint32_t hw_voltage_reserved[3];
+ uint32_t maco_pwrlimit_mw; // bomaco mode power limit in unit of m-watt
+ uint32_t usb_pwrlimit_mw; // power limit when USB is enable in unit of m-watt
+ uint32_t fw_reserved_size_in_kb; // VBIOS reserved extra fw size in unit of kb.
+ uint32_t pspbl_init_reserved[3];
+ uint32_t spi_rom_size; // GPU spi rom size
+ uint16_t support_dev_in_objinfo;
+ uint16_t disp_phy_tunning_size;
+ uint32_t reserved[16];
+};
/*
***************************************************************************
Data Table lcd_info structure
diff --git a/drivers/gpu/drm/amd/include/beige_goby_ip_offset.h b/drivers/gpu/drm/amd/include/beige_goby_ip_offset.h
index 26044cb285d2..48542ea6882a 100644
--- a/drivers/gpu/drm/amd/include/beige_goby_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/beige_goby_ip_offset.h
@@ -26,13 +26,11 @@
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 60a6536ff656..f40b6a03fe63 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -149,27 +149,26 @@ struct cgs_ops {
struct cgs_os_ops; /* To be define in OS-specific CGS header */
-struct cgs_device
-{
+struct cgs_device {
const struct cgs_ops *ops;
/* to be embedded at the start of driver private structure */
};
/* Convenience macros that make CGS indirect function calls look like
* normal function calls */
-#define CGS_CALL(func,dev,...) \
+#define CGS_CALL(func, dev, ...) \
(((struct cgs_device *)dev)->ops->func(dev, ##__VA_ARGS__))
-#define CGS_OS_CALL(func,dev,...) \
+#define CGS_OS_CALL(func, dev, ...) \
(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
-#define cgs_read_register(dev,offset) \
- CGS_CALL(read_register,dev,offset)
-#define cgs_write_register(dev,offset,value) \
- CGS_CALL(write_register,dev,offset,value)
-#define cgs_read_ind_register(dev,space,index) \
- CGS_CALL(read_ind_register,dev,space,index)
-#define cgs_write_ind_register(dev,space,index,value) \
- CGS_CALL(write_ind_register,dev,space,index,value)
+#define cgs_read_register(dev, offset) \
+ CGS_CALL(read_register, dev, offset)
+#define cgs_write_register(dev, offset, value) \
+ CGS_CALL(write_register, dev, offset, value)
+#define cgs_read_ind_register(dev, space, index) \
+ CGS_CALL(read_ind_register, dev, space, index)
+#define cgs_write_ind_register(dev, space, index, value) \
+ CGS_CALL(write_ind_register, dev, space, index, value)
#define cgs_get_firmware_info(dev, type, info) \
CGS_CALL(get_firmware_info, dev, type, info)
diff --git a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h
index ce79e5de8ce3..1a73296a9a74 100644
--- a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
} __maybe_unused;
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h b/drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h
index f84996a73de9..53cb4296df88 100644
--- a/drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
index 1d93a0c574c9..acd1cef61b7c 100644
--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
@@ -27,7 +27,7 @@
#define PP_MAX_CLOCK_LEVELS 16
-enum amd_pp_display_config_type{
+enum amd_pp_display_config_type {
AMD_PP_DisplayConfigType_None = 0,
AMD_PP_DisplayConfigType_DP54 ,
AMD_PP_DisplayConfigType_DP432 ,
@@ -36,8 +36,8 @@ enum amd_pp_display_config_type{
AMD_PP_DisplayConfigType_DP243,
AMD_PP_DisplayConfigType_DP216,
AMD_PP_DisplayConfigType_DP162,
- AMD_PP_DisplayConfigType_HDMI6G ,
- AMD_PP_DisplayConfigType_HDMI297 ,
+ AMD_PP_DisplayConfigType_HDMI6G,
+ AMD_PP_DisplayConfigType_HDMI297,
AMD_PP_DisplayConfigType_HDMI162,
AMD_PP_DisplayConfigType_LVDS,
AMD_PP_DisplayConfigType_DVI,
@@ -45,8 +45,7 @@ enum amd_pp_display_config_type{
AMD_PP_DisplayConfigType_VGA
};
-struct single_display_configuration
-{
+struct single_display_configuration {
uint32_t controller_index;
uint32_t controller_id;
uint32_t signal_type;
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index edcb85560ced..32054ecf0b87 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -244,8 +244,7 @@ enum pp_df_cstate {
* @PP_PWR_LIMIT_DEFAULT: Default Power Limit
* @PP_PWR_LIMIT_MAX: Maximum Power Limit
*/
-enum pp_power_limit_level
-{
+enum pp_power_limit_level {
PP_PWR_LIMIT_MIN = -1,
PP_PWR_LIMIT_CURRENT,
PP_PWR_LIMIT_DEFAULT,
@@ -260,8 +259,7 @@ enum pp_power_limit_level
* @PP_PWR_TYPE_FAST: manages the ~10 ms moving average of APU power,
* where supported.
*/
-enum pp_power_type
-{
+enum pp_power_type {
PP_PWR_TYPE_SUSTAINED,
PP_PWR_TYPE_FAST,
};
diff --git a/drivers/gpu/drm/amd/include/navi12_ip_offset.h b/drivers/gpu/drm/amd/include/navi12_ip_offset.h
index d8fc00478b6a..e94d80ec8d92 100644
--- a/drivers/gpu/drm/amd/include/navi12_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/navi12_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/navi14_ip_offset.h b/drivers/gpu/drm/amd/include/navi14_ip_offset.h
index c39ef651adc6..508011288dea 100644
--- a/drivers/gpu/drm/amd/include/navi14_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/navi14_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
index 5aac8d545bdc..2e8e6c9875f6 100644
--- a/drivers/gpu/drm/amd/include/pptable.h
+++ b/drivers/gpu/drm/amd/include/pptable.h
@@ -491,7 +491,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
- UCHAR clockInfo[1];
+ UCHAR clockInfo[];
}ClockInfoArray;
typedef struct _NonClockInfoArray{
@@ -501,7 +501,7 @@ typedef struct _NonClockInfoArray{
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
UCHAR ucEntrySize;
- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[];
}NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
@@ -658,7 +658,7 @@ typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
UCHAR numEntries;
- ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[];
}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_SAMU_Table
diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
index 7dff85c81e5a..fa023cfdf72d 100644
--- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h b/drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h
index b07bc2dd895d..054790470800 100644
--- a/drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h
@@ -25,13 +25,11 @@
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/v10_structs.h b/drivers/gpu/drm/amd/include/v10_structs.h
index c0e98a98a641..58002a83d1df 100644
--- a/drivers/gpu/drm/amd/include/v10_structs.h
+++ b/drivers/gpu/drm/amd/include/v10_structs.h
@@ -24,8 +24,7 @@
#ifndef V10_STRUCTS_H_
#define V10_STRUCTS_H_
-struct v10_gfx_mqd
-{
+struct v10_gfx_mqd {
uint32_t reserved_0; // offset: 0 (0x0)
uint32_t reserved_1; // offset: 1 (0x1)
uint32_t reserved_2; // offset: 2 (0x2)
diff --git a/drivers/gpu/drm/amd/include/vangogh_ip_offset.h b/drivers/gpu/drm/amd/include/vangogh_ip_offset.h
index 691073ed780e..695d7d04dfa6 100644
--- a/drivers/gpu/drm/amd/include/vangogh_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/vangogh_ip_offset.h
@@ -28,13 +28,11 @@
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/include/vega10_ip_offset.h b/drivers/gpu/drm/amd/include/vega10_ip_offset.h
index 3a22a5d16919..1e1ca69f21f7 100644
--- a/drivers/gpu/drm/amd/include/vega10_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/vega10_ip_offset.h
@@ -24,13 +24,11 @@
#define MAX_INSTANCE 5
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
diff --git a/drivers/gpu/drm/amd/include/vega20_ip_offset.h b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
index 1deb68f3d334..92cf2d9e767f 100644
--- a/drivers/gpu/drm/amd/include/vega20_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
@@ -25,139 +25,137 @@
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
-static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C20, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x00016E00, 0x00017000, 0x00017200, 0x0001B000, 0x0001B200 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE CLK_BASE ={ { { { 0x00016C00, 0x00016E00, 0x00017000, 0x00017200, 0x0001B000, 0x0001B200 } },
+static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE DCE_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0, 0 } },
+static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE DF_BASE ={ { { { 0x00007000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE FUSE_BASE ={ { { { 0x00017400, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE GC_BASE ={ { { { 0x00002000, 0x0000A000, 0, 0, 0, 0 } },
+static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE HDP_BASE ={ { { { 0x00000F20, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MMHUB_BASE ={ { { { 0x0001A000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE NBIO_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0, 0 } },
+static const struct IP_BASE SDMA1_BASE = { { { { 0x00001860, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0x00016A00, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SDMA0_BASE ={ { { { 0x00001260, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SDMA1_BASE ={ { { { 0x00001860, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SMUIO_BASE ={ { { { 0x00016800, 0x00016A00, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE THM_BASE ={ { { { 0x00016600, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE UMC_BASE ={ { { { 0x00014000, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE UVD_BASE ={ { { { 0x00007800, 0x00007E00, 0, 0, 0, 0 } },
+static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0, 0 } },
{ { 0, 0x00009000, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
/* Adjust VCE_BASE to make vce_4_1 use vce_4_0 offset header files*/
-static const struct IP_BASE VCE_BASE ={ { { { 0x00007E00/* 0x00008800 */, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE VCE_BASE = { { { { 0x00007E00/* 0x00008800 */, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE XDMA_BASE ={ { { { 0x00003400, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE XDMA_BASE = { { { { 0x00003400, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE RSMU_BASE ={ { { { 0x00012000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE RSMU_BASE = { { { { 0x00012000, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 6627ee07d52d..f84bfed50681 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -693,6 +693,21 @@ int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t si
return ret;
}
+int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_send_rma_reason(smu);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 087d57850304..f09b9d49297e 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2034,6 +2034,63 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
return 0;
}
+static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states)
+{
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
+
+ *states = ATTR_STATE_SUPPORTED;
+
+ if (!amdgpu_dpm_is_overdrive_supported(adev)) {
+ *states = ATTR_STATE_UNSUPPORTED;
+ return 0;
+ }
+
+ /* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */
+ if (gc_ver == IP_VERSION(9, 4, 3)) {
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ *states = ATTR_STATE_UNSUPPORTED;
+ return 0;
+ }
+
+ if (!(attr->flags & mask))
+ *states = ATTR_STATE_UNSUPPORTED;
+
+ return 0;
+}
+
+static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states)
+{
+ struct device_attribute *dev_attr = &attr->dev_attr;
+ uint32_t gc_ver;
+
+ *states = ATTR_STATE_SUPPORTED;
+
+ if (!(attr->flags & mask)) {
+ *states = ATTR_STATE_UNSUPPORTED;
+ return 0;
+ }
+
+ gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
+ /* dcefclk node is not available on gfx 11.0.3 sriov */
+ if ((gc_ver == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) ||
+ gc_ver < IP_VERSION(9, 0, 0) ||
+ !amdgpu_device_has_display_hardware(adev))
+ *states = ATTR_STATE_UNSUPPORTED;
+
+ /* SMU MP1 does not support dcefclk level setting,
+ * setting should not be allowed from VF if not in one VF mode.
+ */
+ if (gc_ver >= IP_VERSION(10, 0, 0) ||
+ (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
+
+ return 0;
+}
+
/* Following items will be read out to indicate current plpd policy:
* - -1: none
* - 0: disallow
@@ -2113,12 +2170,14 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_dcefclk_attr_update),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC,
+ .attr_update = pp_od_clk_voltage_attr_update),
AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
@@ -2156,17 +2215,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
if (gc_ver < IP_VERSION(9, 0, 0))
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
- if (gc_ver < IP_VERSION(9, 0, 0) ||
- !amdgpu_device_has_display_hardware(adev))
- *states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
if (mp1_ver < IP_VERSION(10, 0, 0))
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
- *states = ATTR_STATE_UNSUPPORTED;
- if (amdgpu_dpm_is_overdrive_supported(adev))
- *states = ATTR_STATE_SUPPORTED;
} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
if ((adev->flags & AMD_IS_APU &&
gc_ver != IP_VERSION(9, 4, 3)) ||
@@ -2174,7 +2225,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pcie_bw)) {
/* PCIe Perf counters won't work on APU nodes */
- if (adev->flags & AMD_IS_APU)
+ if (adev->flags & AMD_IS_APU ||
+ !adev->asic_funcs->get_pcie_usage)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(unique_id)) {
switch (gc_ver) {
@@ -2280,14 +2332,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
break;
}
- if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
- /* SMU MP1 does not support dcefclk level setting */
- if (gc_ver >= IP_VERSION(10, 0, 0)) {
- dev_attr->attr.mode &= ~S_IWUGO;
- dev_attr->store = NULL;
- }
- }
-
/* setting should not be allowed from VF if not in one VF mode */
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
dev_attr->attr.mode &= ~S_IWUGO;
@@ -2558,6 +2602,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err, ret;
+ u32 pwm_mode;
int value;
if (amdgpu_in_reset(adev))
@@ -2569,13 +2614,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
if (err)
return err;
+ if (value == 0)
+ pwm_mode = AMD_FAN_CTRL_NONE;
+ else if (value == 1)
+ pwm_mode = AMD_FAN_CTRL_MANUAL;
+ else if (value == 2)
+ pwm_mode = AMD_FAN_CTRL_AUTO;
+ else
+ return -EINVAL;
+
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
}
- ret = amdgpu_dpm_set_fan_control_mode(adev, value);
+ ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 3047ffe7f244..621200e0823f 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -450,6 +450,7 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable);
int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size);
int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size);
+int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev);
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index df4f20293c16..eb4da3666e05 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev)
return 0;
}
+static int si_set_temperature_range(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = si_thermal_enable_alert(adev, false);
+ if (ret)
+ return ret;
+ ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = si_thermal_enable_alert(adev, true);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static void si_dpm_disable(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
@@ -7608,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
static int si_dpm_late_init(void *handle)
{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ ret = si_set_temperature_range(adev);
+ if (ret)
+ return ret;
+#if 0 //TODO ?
+ si_dpm_powergate_uvd(adev, true);
+#endif
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index f503e61faa60..b1b4c09c3467 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -226,7 +226,7 @@ int atomctrl_set_engine_dram_timings_rv770(
return amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
- (uint32_t *)&engine_clock_parameters);
+ (uint32_t *)&engine_clock_parameters, sizeof(engine_clock_parameters));
}
/*
@@ -297,7 +297,7 @@ int atomctrl_get_memory_pll_dividers_si(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
- (uint32_t *)&mpll_parameters);
+ (uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
if (0 == result) {
mpll_param->mpll_fb_divider.clk_frac =
@@ -345,7 +345,7 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
- (uint32_t *)&mpll_parameters);
+ (uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
if (!result)
mpll_param->mpll_post_divider =
@@ -366,7 +366,7 @@ int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
- (uint32_t *)&mpll_parameters);
+ (uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
/* VEGAM's mpll takes sometime to finish computing */
udelay(10);
@@ -396,7 +396,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
- (uint32_t *)&pll_parameters);
+ (uint32_t *)&pll_parameters, sizeof(pll_parameters));
if (0 == result) {
dividers->pll_post_divider = pll_parameters.ucPostDiv;
@@ -420,7 +420,7 @@ int atomctrl_get_engine_pll_dividers_vi(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
- (uint32_t *)&pll_patameters);
+ (uint32_t *)&pll_patameters, sizeof(pll_patameters));
if (0 == result) {
dividers->pll_post_divider =
@@ -457,7 +457,7 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
- (uint32_t *)&pll_patameters);
+ (uint32_t *)&pll_patameters, sizeof(pll_patameters));
if (0 == result) {
dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
@@ -490,7 +490,7 @@ int atomctrl_get_dfs_pll_dividers_vi(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
- (uint32_t *)&pll_patameters);
+ (uint32_t *)&pll_patameters, sizeof(pll_patameters));
if (0 == result) {
dividers->pll_post_divider =
@@ -773,7 +773,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -794,7 +794,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -814,7 +814,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -835,7 +835,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -857,7 +857,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -878,7 +878,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -909,7 +909,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
if (result)
return result;
@@ -1134,7 +1134,7 @@ int atomctrl_get_voltage_evv_on_sclk(
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
- (uint32_t *)&get_voltage_info_param_space);
+ (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
*voltage = result ? 0 :
le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
@@ -1179,7 +1179,7 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
- (uint32_t *)&get_voltage_info_param_space);
+ (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
if (0 != result)
return result;
@@ -1359,7 +1359,7 @@ int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&efuse_param);
+ (uint32_t *)&efuse_param, sizeof(efuse_param));
*efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask;
return result;
@@ -1380,7 +1380,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
- (uint32_t *)&memory_clock_parameters);
+ (uint32_t *)&memory_clock_parameters, sizeof(memory_clock_parameters));
return result;
}
@@ -1399,7 +1399,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
- (uint32_t *)&get_voltage_info_param_space);
+ (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
*voltage = result ? 0 :
le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
@@ -1526,7 +1526,7 @@ int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual
result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, SetVoltage),
- (uint32_t *)voltage_parameters);
+ (uint32_t *)voltage_parameters, sizeof(*voltage_parameters));
*virtual_voltage_id = voltage_parameters->usVoltageLevel;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
index a47a47238e2b..82d540334318 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
@@ -258,7 +258,7 @@ int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
idx = GetIndexIntoMasterCmdTable(computegpuclockparam);
if (amdgpu_atom_execute_table(
- adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters))
+ adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters, sizeof(pll_parameters)))
return -EINVAL;
pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *)
@@ -505,7 +505,7 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
ix = GetIndexIntoMasterCmdTable(getsmuclockinfo);
if (amdgpu_atom_execute_table(
- adev->mode_info.atom_context, ix, (uint32_t *)&parameters))
+ adev->mode_info.atom_context, ix, (uint32_t *)&parameters, sizeof(parameters)))
return -EINVAL;
output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&parameters;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 0ad947df777a..246b211b1e85 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -712,6 +712,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
smu_v13_0_7_set_ppt_funcs(smu);
break;
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
smu_v14_0_0_set_ppt_funcs(smu);
break;
default:
@@ -751,6 +752,7 @@ static int smu_early_init(void *handle)
static int smu_set_default_dpm_table(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
int vcn_gate, jpeg_gate;
@@ -759,25 +761,34 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
if (!smu->ppt_funcs->set_default_dpm_table)
return 0;
- vcn_gate = atomic_read(&power_gate->vcn_gated);
- jpeg_gate = atomic_read(&power_gate->jpeg_gated);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
+ vcn_gate = atomic_read(&power_gate->vcn_gated);
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
+ jpeg_gate = atomic_read(&power_gate->jpeg_gated);
- ret = smu_dpm_set_vcn_enable(smu, true);
- if (ret)
- return ret;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+ ret = smu_dpm_set_vcn_enable(smu, true);
+ if (ret)
+ return ret;
+ }
- ret = smu_dpm_set_jpeg_enable(smu, true);
- if (ret)
- goto err_out;
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
+ ret = smu_dpm_set_jpeg_enable(smu, true);
+ if (ret)
+ goto err_out;
+ }
ret = smu->ppt_funcs->set_default_dpm_table(smu);
if (ret)
dev_err(smu->adev->dev,
"Failed to setup default dpm clock tables!\n");
- smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
+ smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
err_out:
- smu_dpm_set_vcn_enable(smu, !vcn_gate);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
+ smu_dpm_set_vcn_enable(smu, !vcn_gate);
+
return ret;
}
@@ -1885,6 +1896,7 @@ static int smu_disable_dpms(struct smu_context *smu)
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
return 0;
default:
break;
@@ -3669,3 +3681,13 @@ int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
return ret;
}
+
+int smu_send_rma_reason(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
+ ret = smu->ppt_funcs->send_rma_reason(smu);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 66e84defd0b6..a870bdd49a4e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1342,6 +1342,11 @@ struct pptable_funcs {
int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size);
/**
+ * @send_rma_reason: message rma reason event to SMU.
+ */
+ int (*send_rma_reason)(struct smu_context *smu);
+
+ /**
* @get_ecc_table: message SMU to get ECC INFO table.
*/
ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
@@ -1588,5 +1593,6 @@ int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size);
void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
+int smu_send_rma_reason(struct smu_context *smu);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 509e3cd483fb..86758051cb93 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -91,7 +91,8 @@
#define PPSMC_MSG_QueryValidMcaCeCount 0x3A
#define PPSMC_MSG_McaBankCeDumpDW 0x3B
#define PPSMC_MSG_SelectPLPDMode 0x40
-#define PPSMC_Message_Count 0x41
+#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
+#define PPSMC_Message_Count 0x44
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 953a767613b1..a941fdbf78b6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -261,7 +261,8 @@
__SMU_DUMMY_MAP(SetSoftMaxVpe), \
__SMU_DUMMY_MAP(SetSoftMinVpe), \
__SMU_DUMMY_MAP(GetMetricsVersion), \
- __SMU_DUMMY_MAP(EnableUCLKShadow),
+ __SMU_DUMMY_MAP(EnableUCLKShadow), \
+ __SMU_DUMMY_MAP(RmaDueToBadPageThreshold),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 4cd43bbec910..1d96eb274d72 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1303,13 +1303,12 @@ static int arcturus_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
@@ -2273,8 +2272,8 @@ static uint16_t arcturus_get_current_pcie_link_speed(struct smu_context *smu)
/* TODO: confirm this on real target */
esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
- if ((esm_ctrl >> 15) & 0x1FFFF)
- return (uint16_t)(((esm_ctrl >> 8) & 0x3F) + 128);
+ if ((esm_ctrl >> 15) & 0x1)
+ return (uint16_t)(((esm_ctrl >> 8) & 0x7F) + 128);
return smu_v11_0_get_current_pcie_link_speed(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 8d1d29ffb0f1..ed189a3878eb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2357,13 +2357,12 @@ static int navi10_get_power_limit(struct smu_context *smu,
*default_power_limit = power_limit;
if (smu->od_enabled &&
- navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 21fc033528fa..e2ad2b972ab0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -640,13 +640,12 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index c7bfa68bf00f..f6545093bfc1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -514,7 +514,7 @@ static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
getsmuclockinfo);
ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
- (uint32_t *)&input);
+ (uint32_t *)&input, sizeof(input));
if (ret)
return -EINVAL;
@@ -1432,24 +1432,24 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
orderly_poweroff(true);
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
- if (src_id == 0xfe) {
+ if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
/* ACK SMUToHost interrupt */
data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);
switch (ctxid) {
- case 0x3:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
dev_dbg(adev->dev, "Switched to AC mode!\n");
schedule_work(&smu->interrupt_work);
adev->pm.ac_power = true;
break;
- case 0x4:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
dev_dbg(adev->dev, "Switched to DC mode!\n");
schedule_work(&smu->interrupt_work);
adev->pm.ac_power = false;
break;
- case 0x7:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
/*
* Increment the throttle interrupt counter
*/
@@ -1462,6 +1462,10 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
schedule_work(&smu->throttling_logging_work);
break;
+ default:
+ dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+ ctxid, client_id);
+ break;
}
}
}
@@ -1504,7 +1508,7 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
return ret;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
- 0xfe,
+ SMU_IH_INTERRUPT_ID_TO_DRIVER,
irq_src);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 2ff6deedef95..da1f43999d09 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -451,7 +451,7 @@ static int vangogh_init_smc_tables(struct smu_context *smu)
#ifdef CONFIG_X86
/* AMD x86 APU only */
- smu->cpu_core_num = boot_cpu_data.x86_max_cores;
+ smu->cpu_core_num = topology_num_cores_per_package();
#else
smu->cpu_core_num = 4;
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 5e408a195860..ed15f5a0fd11 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -301,7 +301,7 @@ static int smu_v12_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
getsmuclockinfo);
ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
- (uint32_t *)&input);
+ (uint32_t *)&input, sizeof(input));
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index dd9bcbd630a1..f41ac6465f2a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1682,8 +1682,8 @@ static int aldebaran_get_current_pcie_link_speed(struct smu_context *smu)
/* TODO: confirm this on real target */
esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
- if ((esm_ctrl >> 15) & 0x1FFFF)
- return (((esm_ctrl >> 8) & 0x3F) + 128);
+ if ((esm_ctrl >> 15) & 0x1)
+ return (((esm_ctrl >> 8) & 0x7F) + 128);
return smu_v13_0_get_current_pcie_link_speed(smu);
}
@@ -1746,10 +1746,12 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_fan_speed = 0;
- gpu_metrics->pcie_link_width =
- smu_v13_0_get_current_pcie_link_width(smu);
- gpu_metrics->pcie_link_speed =
- aldebaran_get_current_pcie_link_speed(smu);
+ if (!amdgpu_sriov_vf(smu->adev)) {
+ gpu_metrics->pcie_link_width =
+ smu_v13_0_get_current_pcie_link_width(smu);
+ gpu_metrics->pcie_link_speed =
+ aldebaran_get_current_pcie_link_speed(smu);
+ }
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index c486182ff275..48170bb5112e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -1369,24 +1369,24 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
orderly_poweroff(true);
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
- if (src_id == 0xfe) {
+ if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
/* ACK SMUToHost interrupt */
data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
switch (ctxid) {
- case 0x3:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
dev_dbg(adev->dev, "Switched to AC mode!\n");
smu_v13_0_ack_ac_dc_interrupt(smu);
adev->pm.ac_power = true;
break;
- case 0x4:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
dev_dbg(adev->dev, "Switched to DC mode!\n");
smu_v13_0_ack_ac_dc_interrupt(smu);
adev->pm.ac_power = false;
break;
- case 0x7:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
/*
* Increment the throttle interrupt counter
*/
@@ -1399,7 +1399,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
schedule_work(&smu->throttling_logging_work);
break;
- case 0x8:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL:
high = smu->thermal_range.software_shutdown_temp +
smu->thermal_range.software_shutdown_temp_offset;
high = min_t(typeof(high),
@@ -1416,7 +1416,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
break;
- case 0x9:
+ case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY:
high = min_t(typeof(high),
SMU_THERMAL_MAXIMUM_ALERT_TEMP,
smu->thermal_range.software_shutdown_temp);
@@ -1429,6 +1429,10 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
break;
+ default:
+ dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+ ctxid, client_id);
+ break;
}
}
}
@@ -1473,7 +1477,7 @@ int smu_v13_0_register_irq_handler(struct smu_context *smu)
return ret;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
- 0xfe,
+ SMU_IH_INTERRUPT_ID_TO_DRIVER,
irq_src);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index a9954ffc02c5..9b80f18ea6c3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2369,13 +2369,12 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 7e1941cf1796..3957af057d54 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -45,6 +45,7 @@
#include <linux/pci.h>
#include "amdgpu_ras.h"
#include "amdgpu_mca.h"
+#include "amdgpu_aca.h"
#include "smu_cmn.h"
#include "mp/mp_13_0_6_offset.h"
#include "mp/mp_13_0_6_sh_mask.h"
@@ -171,6 +172,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, 0),
MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, 0),
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
+ MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
};
// clang-format on
@@ -1438,7 +1440,10 @@ static int smu_v13_0_6_irq_process(struct amdgpu_device *adev,
entry->src_data[1]);
schedule_work(&smu->throttling_logging_work);
}
-
+ break;
+ default:
+ dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+ ctxid, client_id);
break;
}
}
@@ -1574,6 +1579,8 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
struct smu_13_0_dpm_table *gfx_table =
&dpm_context->dpm_tables.gfx_table;
+ struct smu_13_0_dpm_table *uclk_table =
+ &dpm_context->dpm_tables.uclk_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
int ret;
@@ -1589,17 +1596,27 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
return 0;
case AMD_DPM_FORCED_LEVEL_AUTO:
- if ((gfx_table->min == pstate_table->gfxclk_pstate.curr.min) &&
- (gfx_table->max == pstate_table->gfxclk_pstate.curr.max))
- return 0;
+ if ((gfx_table->min != pstate_table->gfxclk_pstate.curr.min) ||
+ (gfx_table->max != pstate_table->gfxclk_pstate.curr.max)) {
+ ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
+ smu, gfx_table->min, gfx_table->max);
+ if (ret)
+ return ret;
- ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
- smu, gfx_table->min, gfx_table->max);
- if (ret)
- return ret;
+ pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ }
+
+ if (uclk_table->max != pstate_table->uclk_pstate.curr.max) {
+ /* Min UCLK is not expected to be changed */
+ ret = smu_v13_0_set_soft_freq_limited_range(
+ smu, SMU_UCLK, 0, uclk_table->max);
+ if (ret)
+ return ret;
+ pstate_table->uclk_pstate.curr.max = uclk_table->max;
+ }
+ pstate_table->uclk_pstate.custom.max = 0;
- pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
- pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
return 0;
case AMD_DPM_FORCED_LEVEL_MANUAL:
return 0;
@@ -1622,7 +1639,8 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
uint32_t max_clk;
int ret = 0;
- if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK &&
+ clk_type != SMU_UCLK)
return -EINVAL;
if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
@@ -1632,18 +1650,31 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
if (min >= max) {
dev_err(smu->adev->dev,
- "Minimum GFX clk should be less than the maximum allowed clock\n");
+ "Minimum clk should be less than the maximum allowed clock\n");
return -EINVAL;
}
- if ((min == pstate_table->gfxclk_pstate.curr.min) &&
- (max == pstate_table->gfxclk_pstate.curr.max))
- return 0;
+ if (clk_type == SMU_GFXCLK) {
+ if ((min == pstate_table->gfxclk_pstate.curr.min) &&
+ (max == pstate_table->gfxclk_pstate.curr.max))
+ return 0;
- ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min, max);
- if (!ret) {
- pstate_table->gfxclk_pstate.curr.min = min;
- pstate_table->gfxclk_pstate.curr.max = max;
+ ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
+ smu, min, max);
+ if (!ret) {
+ pstate_table->gfxclk_pstate.curr.min = min;
+ pstate_table->gfxclk_pstate.curr.max = max;
+ }
+ }
+
+ if (clk_type == SMU_UCLK) {
+ if (max == pstate_table->uclk_pstate.curr.max)
+ return 0;
+ /* Only max clock limiting is allowed for UCLK */
+ ret = smu_v13_0_set_soft_freq_limited_range(
+ smu, SMU_UCLK, 0, max);
+ if (!ret)
+ pstate_table->uclk_pstate.curr.max = max;
}
return ret;
@@ -1736,6 +1767,40 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
return -EINVAL;
}
break;
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (!smu_cmn_feature_is_enabled(smu,
+ SMU_FEATURE_DPM_UCLK_BIT)) {
+ dev_warn(smu->adev->dev,
+ "UCLK_LIMITS setting not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (input[0] == 0) {
+ dev_info(smu->adev->dev,
+ "Setting min UCLK level is not supported");
+ return -EINVAL;
+ } else if (input[0] == 1) {
+ if (input[1] > dpm_context->dpm_tables.uclk_table.max) {
+ dev_warn(
+ smu->adev->dev,
+ "Maximum UCLK (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
+ input[1],
+ dpm_context->dpm_tables.uclk_table.max);
+ pstate_table->uclk_pstate.custom.max =
+ pstate_table->uclk_pstate.curr.max;
+ return -EINVAL;
+ }
+
+ pstate_table->uclk_pstate.custom.max = input[1];
+ }
+ break;
+
case PP_OD_RESTORE_DEFAULT_TABLE:
if (size != 0) {
dev_err(smu->adev->dev,
@@ -1746,8 +1811,19 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
min_clk = dpm_context->dpm_tables.gfx_table.min;
max_clk = dpm_context->dpm_tables.gfx_table.max;
- return smu_v13_0_6_set_soft_freq_limited_range(
+ ret = smu_v13_0_6_set_soft_freq_limited_range(
smu, SMU_GFXCLK, min_clk, max_clk);
+
+ if (ret)
+ return ret;
+
+ min_clk = dpm_context->dpm_tables.uclk_table.min;
+ max_clk = dpm_context->dpm_tables.uclk_table.max;
+ ret = smu_v13_0_6_set_soft_freq_limited_range(
+ smu, SMU_UCLK, min_clk, max_clk);
+ if (ret)
+ return ret;
+ pstate_table->uclk_pstate.custom.max = 0;
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -1767,8 +1843,19 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
min_clk = pstate_table->gfxclk_pstate.custom.min;
max_clk = pstate_table->gfxclk_pstate.custom.max;
- return smu_v13_0_6_set_soft_freq_limited_range(
+ ret = smu_v13_0_6_set_soft_freq_limited_range(
smu, SMU_GFXCLK, min_clk, max_clk);
+
+ if (ret)
+ return ret;
+
+ if (!pstate_table->uclk_pstate.custom.max)
+ return 0;
+
+ min_clk = pstate_table->uclk_pstate.curr.min;
+ max_clk = pstate_table->uclk_pstate.custom.max;
+ return smu_v13_0_6_set_soft_freq_limited_range(
+ smu, SMU_UCLK, min_clk, max_clk);
}
break;
default:
@@ -2060,8 +2147,8 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
/* TODO: confirm this on real target */
esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
- if ((esm_ctrl >> 15) & 0x1FFFF)
- return (((esm_ctrl >> 8) & 0x3F) + 128);
+ if ((esm_ctrl >> 15) & 0x1)
+ return (((esm_ctrl >> 8) & 0x7F) + 128);
speed_level = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
@@ -2141,14 +2228,16 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0);
if (!(adev->flags & AMD_IS_APU)) {
- link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
- if (link_width_level > MAX_LINK_WIDTH)
- link_width_level = 0;
-
- gpu_metrics->pcie_link_width =
- DECODE_LANE_WIDTH(link_width_level);
- gpu_metrics->pcie_link_speed =
- smu_v13_0_6_get_current_pcie_link_speed(smu);
+ if (!amdgpu_sriov_vf(adev)) {
+ link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
+ if (link_width_level > MAX_LINK_WIDTH)
+ link_width_level = 0;
+
+ gpu_metrics->pcie_link_width =
+ DECODE_LANE_WIDTH(link_width_level);
+ gpu_metrics->pcie_link_speed =
+ smu_v13_0_6_get_current_pcie_link_speed(smu);
+ }
gpu_metrics->pcie_bandwidth_acc =
SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]);
gpu_metrics->pcie_bandwidth_inst =
@@ -2219,8 +2308,8 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
SMU_RESET_MODE_2);
- /* This is similar to FLR, wait till max FLR timeout */
- msleep(100);
+ /* Reset takes a bit longer, wait for 200ms. */
+ msleep(200);
dev_dbg(smu->adev->dev, "restore config space...\n");
/* Restore the config space saved during init */
@@ -2376,6 +2465,24 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+
+ /* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */
+ if ((adev->flags & AMD_IS_APU) || smu->smc_fw_version < 0x00555a00)
+ return 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "[%s] failed to send BadPageThreshold event to SMU\n",
+ __func__);
+
+ return ret;
+}
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -2547,18 +2654,22 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
{
uint64_t status0;
+ uint32_t ext_error_code;
+ uint32_t odecc_err_cnt;
status0 = entry->regs[MCA_REG_IDX_STATUS];
+ ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status0);
+ odecc_err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]);
if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
*count = 0;
return 0;
}
- if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(adev, status0))
- *count = 1;
- else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(adev, status0))
- *count = 1;
+ if (umc_v12_0_is_deferred_error(adev, status0) ||
+ umc_v12_0_is_uncorrectable_error(adev, status0) ||
+ umc_v12_0_is_correctable_error(adev, status0))
+ *count = (ext_error_code == 0) ? odecc_err_cnt : 1;
return 0;
}
@@ -2857,6 +2968,143 @@ static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = {
.mca_get_valid_mca_count = mca_smu_get_valid_mca_count,
};
+static int aca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_v13_0_6_mca_set_debug_mode(smu, enable);
+}
+
+static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_error_type type, u32 *count)
+{
+ uint32_t msg;
+ int ret;
+
+ if (!count)
+ return -EINVAL;
+
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ msg = SMU_MSG_QueryValidMcaCount;
+ break;
+ case ACA_ERROR_TYPE_CE:
+ msg = SMU_MSG_QueryValidMcaCeCount;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg(smu, msg, count);
+ if (ret) {
+ *count = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev,
+ enum aca_error_type type, u32 *count)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ case ACA_ERROR_TYPE_CE:
+ ret = smu_v13_0_6_get_valid_aca_count(smu, type, count);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type,
+ int idx, int offset, u32 *val)
+{
+ uint32_t msg, param;
+
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ msg = SMU_MSG_McaBankDumpDW;
+ break;
+ case ACA_ERROR_TYPE_CE:
+ msg = SMU_MSG_McaBankCeDumpDW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ param = ((idx & 0xffff) << 16) | (offset & 0xfffc);
+
+ return smu_cmn_send_smc_msg_with_param(smu, msg, param, (uint32_t *)val);
+}
+
+static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type,
+ int idx, int offset, u32 *val, int count)
+{
+ int ret, i;
+
+ if (!val)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ ret = __smu_v13_0_6_aca_bank_dump(smu, type, idx, offset + (i << 2), &val[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type type,
+ int idx, int reg_idx, u64 *val)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ u32 data[2] = {0, 0};
+ int ret;
+
+ if (!val || reg_idx >= ACA_REG_IDX_COUNT)
+ return -EINVAL;
+
+ ret = smu_v13_0_6_aca_bank_dump(smu, type, idx, reg_idx * 8, data, ARRAY_SIZE(data));
+ if (ret)
+ return ret;
+
+ *val = (u64)data[1] << 32 | data[0];
+
+ dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n",
+ type == ACA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val);
+
+ return 0;
+}
+
+static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
+ enum aca_error_type type, int idx, struct aca_bank *bank)
+{
+ int i, ret, count;
+
+ count = min_t(int, 16, ARRAY_SIZE(bank->regs));
+ for (i = 0; i < count; i++) {
+ ret = aca_bank_read_reg(adev, type, idx, i, &bank->regs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = {
+ .max_ue_bank_count = 12,
+ .max_ce_bank_count = 12,
+ .set_debug_mode = aca_smu_set_debug_mode,
+ .get_valid_aca_count = aca_smu_get_valid_aca_count,
+ .get_valid_aca_bank = aca_smu_get_valid_aca_bank,
+};
+
static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
enum pp_xgmi_plpd_mode mode)
{
@@ -2895,13 +3143,6 @@ static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
return ret;
}
-static ssize_t smu_v13_0_6_get_ecc_info(struct smu_context *smu,
- void *table)
-{
- /* Support ecc info by default */
- return 0;
-}
-
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -2956,7 +3197,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
- .get_ecc_info = smu_v13_0_6_get_ecc_info,
+ .send_rma_reason = smu_v13_0_6_send_rma_reason,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
@@ -2969,4 +3210,5 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
smu_v13_0_set_smu_mailbox_registers(smu);
amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
+ amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 0ffdb58af74e..3dc7b60cb075 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2333,13 +2333,12 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 4894f7ee737b..b06a3cc43305 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -53,6 +53,8 @@
MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
+#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
+
int smu_v14_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -229,10 +231,12 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
break;
case IP_VERSION(14, 0, 0):
- if ((smu->smc_fw_version < 0x5d3a00))
- dev_warn(smu->adev->dev, "The PMFW version(%x) is behind in this BIOS!\n", smu->smc_fw_version);
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
+ case IP_VERSION(14, 0, 1):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
+ break;
+
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
amdgpu_ip_version(adev, MP1_HWIP, 0));
@@ -736,6 +740,7 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@@ -892,7 +897,7 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu)
// TODO: THM related
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
- 0xfe,
+ SMU_IH_INTERRUPT_ID_TO_DRIVER,
irq_src);
if (ret)
return ret;
@@ -1630,11 +1635,16 @@ int smu_v14_0_baco_exit(struct smu_context *smu)
int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu)
{
uint16_t index;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
+ ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL);
+ }
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_EnableGfxImu);
- /* Param 1 to tell PMFW to enable GFXOFF feature */
- return smu_cmn_send_msg_without_waiting(smu, index, 1);
+ return smu_cmn_send_msg_without_waiting(smu, index, ENABLE_IMU_ARG_GFXOFF_ENABLE);
}
int smu_v14_0_set_default_dpm_tables(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 47fdbae4adfc..9310c4758e38 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -261,7 +261,10 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->MpipuclkFrequency;
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = metrics->GfxActivity / 100;
+ if ((smu->smc_fw_version > 0x5d4600))
+ *value = metrics->GfxActivity;
+ else
+ *value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_VCNACTIVITY:
*value = metrics->VcnActivity / 100;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 00cd615bbcdc..b8dbd4e25348 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -378,8 +378,15 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
res = __smu_cmn_reg2errno(smu, reg);
if (res != 0)
__smu_cmn_reg_print_error(smu, reg, index, param, msg);
- if (read_arg)
+ if (read_arg) {
smu_cmn_read_arg(smu, read_arg);
+ dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\
+ readval: 0x%08x\n",
+ smu_get_message_name(smu, msg), index, param, reg, *read_arg);
+ } else {
+ dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n",
+ smu_get_message_name(smu, msg), index, param, reg);
+ }
Out:
if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
amdgpu_device_halt(adev);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index cc590e27d88a..81bfce1406e5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -30,6 +30,16 @@
#define FDO_PWM_MODE_STATIC 1
#define FDO_PWM_MODE_STATIC_RPM 5
+#define SMU_IH_INTERRUPT_ID_TO_DRIVER 0xFE
+#define SMU_IH_INTERRUPT_CONTEXT_ID_BACO 0x2
+#define SMU_IH_INTERRUPT_CONTEXT_ID_AC 0x3
+#define SMU_IH_INTERRUPT_CONTEXT_ID_DC 0x4
+#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5
+#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6
+#define SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
+#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
+#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+
extern const int link_speed[];
/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 8be235144f6d..b5518ff97165 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -604,10 +604,10 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
* ADV75xx helpers
*/
-static struct edid *adv7511_get_edid(struct adv7511 *adv7511,
- struct drm_connector *connector)
+static const struct drm_edid *adv7511_edid_read(struct adv7511 *adv7511,
+ struct drm_connector *connector)
{
- struct edid *edid;
+ const struct drm_edid *drm_edid;
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
@@ -621,31 +621,44 @@ static struct edid *adv7511_get_edid(struct adv7511 *adv7511,
edid_i2c_addr);
}
- edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
+ drm_edid = drm_edid_read_custom(connector, adv7511_get_edid_block, adv7511);
if (!adv7511->powered)
__adv7511_power_off(adv7511);
- adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
- drm_detect_hdmi_monitor(edid));
+ if (drm_edid) {
+ /*
+ * FIXME: The CEC physical address should be set using
+ * cec_s_phys_addr(adap,
+ * connector->display_info.source_physical_address, false) from
+ * a path that has read the EDID and called
+ * drm_edid_connector_update().
+ */
+ const struct edid *edid = drm_edid_raw(drm_edid);
- cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
+ adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
+ drm_detect_hdmi_monitor(edid));
- return edid;
+ cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
+ } else {
+ cec_s_phys_addr_from_edid(adv7511->cec_adap, NULL);
+ }
+
+ return drm_edid;
}
static int adv7511_get_modes(struct adv7511 *adv7511,
struct drm_connector *connector)
{
- struct edid *edid;
+ const struct drm_edid *drm_edid;
unsigned int count;
- edid = adv7511_get_edid(adv7511, connector);
+ drm_edid = adv7511_edid_read(adv7511, connector);
- drm_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
+ drm_edid_connector_update(connector, drm_edid);
+ count = drm_edid_connector_add_modes(connector);
- kfree(edid);
+ drm_edid_free(drm_edid);
return count;
}
@@ -953,12 +966,12 @@ static enum drm_connector_status adv7511_bridge_detect(struct drm_bridge *bridge
return adv7511_detect(adv, NULL);
}
-static struct edid *adv7511_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *adv7511_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
- return adv7511_get_edid(adv, connector);
+ return adv7511_edid_read(adv, connector);
}
static void adv7511_bridge_hpd_notify(struct drm_bridge *bridge,
@@ -977,7 +990,7 @@ static const struct drm_bridge_funcs adv7511_bridge_funcs = {
.mode_valid = adv7511_bridge_mode_valid,
.attach = adv7511_bridge_attach,
.detect = adv7511_bridge_detect,
- .get_edid = adv7511_bridge_get_edid,
+ .edid_read = adv7511_bridge_edid_read,
.hpd_notify = adv7511_bridge_hpd_notify,
};
@@ -1277,17 +1290,6 @@ static int adv7511_probe(struct i2c_client *i2c)
INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
- if (i2c->irq) {
- init_waitqueue_head(&adv7511->wq);
-
- ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
- adv7511_irq_handler,
- IRQF_ONESHOT, dev_name(dev),
- adv7511);
- if (ret)
- goto err_unregister_cec;
- }
-
adv7511_power_off(adv7511);
i2c_set_clientdata(i2c, adv7511);
@@ -1311,6 +1313,17 @@ static int adv7511_probe(struct i2c_client *i2c)
adv7511_audio_init(dev, adv7511);
+ if (i2c->irq) {
+ init_waitqueue_head(&adv7511->wq);
+
+ ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
+ adv7511_irq_handler,
+ IRQF_ONESHOT, dev_name(dev),
+ adv7511);
+ if (ret)
+ goto err_unregister_audio;
+ }
+
if (adv7511->info->has_dsi) {
ret = adv7533_attach_dsi(adv7511);
if (ret)
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 29d91493b101..9d96d28d6fe8 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1784,24 +1784,14 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
return ret;
}
-static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
+static const struct drm_edid *anx7625_edid_read(struct anx7625_data *ctx)
{
struct device *dev = ctx->dev;
struct s_edid_data *p_edid = &ctx->slimport_edid_p;
int edid_num;
- u8 *edid;
- edid = kmalloc(FOUR_BLOCK_SIZE, GFP_KERNEL);
- if (!edid) {
- DRM_DEV_ERROR(dev, "Fail to allocate buffer\n");
- return NULL;
- }
-
- if (ctx->slimport_edid_p.edid_block_num > 0) {
- memcpy(edid, ctx->slimport_edid_p.edid_raw_data,
- FOUR_BLOCK_SIZE);
- return (struct edid *)edid;
- }
+ if (ctx->slimport_edid_p.edid_block_num > 0)
+ goto out;
pm_runtime_get_sync(dev);
_anx7625_hpd_polling(ctx, 5000 * 100);
@@ -1810,14 +1800,14 @@ static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
if (edid_num < 1) {
DRM_DEV_ERROR(dev, "Fail to read EDID: %d\n", edid_num);
- kfree(edid);
return NULL;
}
p_edid->edid_block_num = edid_num;
- memcpy(edid, ctx->slimport_edid_p.edid_raw_data, FOUR_BLOCK_SIZE);
- return (struct edid *)edid;
+out:
+ return drm_edid_alloc(ctx->slimport_edid_p.edid_raw_data,
+ FOUR_BLOCK_SIZE);
}
static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
@@ -2492,15 +2482,15 @@ anx7625_bridge_detect(struct drm_bridge *bridge)
return anx7625_sink_detect(ctx);
}
-static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *anx7625_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
DRM_DEV_DEBUG_DRIVER(dev, "drm bridge get edid\n");
- return anx7625_get_edid(ctx);
+ return anx7625_edid_read(ctx);
}
static const struct drm_bridge_funcs anx7625_bridge_funcs = {
@@ -2515,7 +2505,7 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.detect = anx7625_bridge_detect,
- .get_edid = anx7625_bridge_get_edid,
+ .edid_read = anx7625_bridge_edid_read,
};
static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index bb55f697a181..6886db2d9e00 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -25,20 +25,18 @@ static void drm_aux_hpd_bridge_release(struct device *dev)
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
of_node_put(adev->dev.platform_data);
+ of_node_put(adev->dev.of_node);
kfree(adev);
}
-static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
+static void drm_aux_hpd_bridge_free_adev(void *_adev)
{
- struct auxiliary_device *adev = _adev;
-
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
+ auxiliary_device_uninit(_adev);
}
/**
- * drm_dp_hpd_bridge_register - Create a simple HPD DisplayPort bridge
+ * devm_drm_dp_hpd_bridge_alloc - allocate a HPD DisplayPort bridge
* @parent: device instance providing this bridge
* @np: device node pointer corresponding to this bridge instance
*
@@ -46,11 +44,9 @@ static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
* DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is
* able to send the HPD events.
*
- * Return: device instance that will handle created bridge or an error code
- * encoded into the pointer.
+ * Return: bridge auxiliary device pointer or an error pointer
*/
-struct device *drm_dp_hpd_bridge_register(struct device *parent,
- struct device_node *np)
+struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np)
{
struct auxiliary_device *adev;
int ret;
@@ -74,18 +70,62 @@ struct device *drm_dp_hpd_bridge_register(struct device *parent,
ret = auxiliary_device_init(adev);
if (ret) {
+ of_node_put(adev->dev.platform_data);
+ of_node_put(adev->dev.of_node);
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
kfree(adev);
return ERR_PTR(ret);
}
- ret = auxiliary_device_add(adev);
- if (ret) {
- auxiliary_device_uninit(adev);
+ ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_free_adev, adev);
+ if (ret)
return ERR_PTR(ret);
- }
- ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_unregister_adev, adev);
+ return adev;
+}
+EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_alloc);
+
+static void drm_aux_hpd_bridge_del_adev(void *_adev)
+{
+ auxiliary_device_delete(_adev);
+}
+
+/**
+ * devm_drm_dp_hpd_bridge_add - register a HDP DisplayPort bridge
+ * @dev: struct device to tie registration lifetime to
+ * @adev: bridge auxiliary device to be registered
+ *
+ * Returns: zero on success or a negative errno
+ */
+int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev)
+{
+ int ret;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, drm_aux_hpd_bridge_del_adev, adev);
+}
+EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_add);
+
+/**
+ * drm_dp_hpd_bridge_register - allocate and register a HDP DisplayPort bridge
+ * @parent: device instance providing this bridge
+ * @np: device node pointer corresponding to this bridge instance
+ *
+ * Return: device instance that will handle created bridge or an error pointer
+ */
+struct device *drm_dp_hpd_bridge_register(struct device *parent, struct device_node *np)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = devm_drm_dp_hpd_bridge_alloc(parent, np);
+ if (IS_ERR(adev))
+ return ERR_CAST(adev);
+
+ ret = devm_drm_dp_hpd_bridge_add(parent, adev);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 7d470527455b..e226acc5c15e 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -1505,33 +1505,35 @@ static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
mhdp->link_up = false;
}
-static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp,
- struct drm_connector *connector)
+static const struct drm_edid *cdns_mhdp_edid_read(struct cdns_mhdp_device *mhdp,
+ struct drm_connector *connector)
{
if (!mhdp->plugged)
return NULL;
- return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
+ return drm_edid_read_custom(connector, cdns_mhdp_get_edid_block, mhdp);
}
static int cdns_mhdp_get_modes(struct drm_connector *connector)
{
struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int num_modes;
if (!mhdp->plugged)
return 0;
- edid = cdns_mhdp_get_edid(mhdp, connector);
- if (!edid) {
+ drm_edid = cdns_mhdp_edid_read(mhdp, connector);
+
+ drm_edid_connector_update(connector, drm_edid);
+
+ if (!drm_edid) {
dev_err(mhdp->dev, "Failed to read EDID\n");
return 0;
}
- drm_connector_update_edid_property(connector, edid);
- num_modes = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ num_modes = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
/*
* HACK: Warn about unsupported display formats until we deal
@@ -2220,12 +2222,12 @@ static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *brid
return cdns_mhdp_detect(mhdp);
}
-static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *cdns_mhdp_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
- return cdns_mhdp_get_edid(mhdp, connector);
+ return cdns_mhdp_edid_read(mhdp, connector);
}
static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
@@ -2239,7 +2241,7 @@ static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
.atomic_reset = cdns_mhdp_bridge_atomic_reset,
.atomic_get_input_bus_fmts = cdns_mhdp_get_input_bus_fmts,
.detect = cdns_mhdp_bridge_detect,
- .get_edid = cdns_mhdp_bridge_get_edid,
+ .edid_read = cdns_mhdp_bridge_edid_read,
.hpd_enable = cdns_mhdp_bridge_hpd_enable,
.hpd_disable = cdns_mhdp_bridge_hpd_disable,
};
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index 483c28c7fc99..c83486cf6b15 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -230,14 +230,14 @@ static const struct drm_connector_funcs ch7033_connector_funcs = {
static int ch7033_connector_get_modes(struct drm_connector *connector)
{
struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
- edid = drm_bridge_get_edid(priv->next_bridge, connector);
- drm_connector_update_edid_property(connector, edid);
- if (edid) {
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid = drm_bridge_edid_read(priv->next_bridge, connector);
+ drm_edid_connector_update(connector, drm_edid);
+ if (drm_edid) {
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
} else {
ret = drm_add_modes_noedid(connector, 1920, 1080);
drm_set_preferred_mode(connector, 1024, 768);
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 08bd5695ddae..ab8e00baf3f1 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -81,12 +81,12 @@ display_connector_detect(struct drm_bridge *bridge)
}
}
-static struct edid *display_connector_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *display_connector_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct display_connector *conn = to_display_connector(bridge);
- return drm_get_edid(connector, conn->bridge.ddc);
+ return drm_edid_read_ddc(connector, conn->bridge.ddc);
}
/*
@@ -172,7 +172,7 @@ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge,
static const struct drm_bridge_funcs display_connector_bridge_funcs = {
.attach = display_connector_attach,
.detect = display_connector_detect,
- .get_edid = display_connector_get_edid,
+ .edid_read = display_connector_edid_read,
.atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts,
.atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig
index 5a4f3d58501e..5965e8027529 100644
--- a/drivers/gpu/drm/bridge/imx/Kconfig
+++ b/drivers/gpu/drm/bridge/imx/Kconfig
@@ -3,6 +3,24 @@ if ARCH_MXC || COMPILE_TEST
config DRM_IMX_LDB_HELPER
tristate
+config DRM_IMX8MP_DW_HDMI_BRIDGE
+ tristate "Freescale i.MX8MP HDMI-TX bridge support"
+ depends on OF
+ depends on COMMON_CLK
+ select DRM_DW_HDMI
+ select DRM_IMX8MP_HDMI_PVI
+ select PHY_FSL_SAMSUNG_HDMI_PHY
+ help
+ Choose this to enable support for the internal HDMI encoder found
+ on the i.MX8MP SoC.
+
+config DRM_IMX8MP_HDMI_PVI
+ tristate "Freescale i.MX8MP HDMI PVI bridge support"
+ depends on OF
+ help
+ Choose this to enable support for the internal HDMI TX Parallel
+ Video Interface found on the Freescale i.MX8MP SoC.
+
config DRM_IMX8QM_LDB
tristate "Freescale i.MX8QM LVDS display bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/imx/Makefile b/drivers/gpu/drm/bridge/imx/Makefile
index 2b0c2e44aa1b..edb0a7b71b30 100644
--- a/drivers/gpu/drm/bridge/imx/Makefile
+++ b/drivers/gpu/drm/bridge/imx/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o
+obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o
+obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o
obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o
obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o
obj-$(CONFIG_DRM_IMX8QXP_PIXEL_COMBINER) += imx8qxp-pixel-combiner.o
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
new file mode 100644
index 000000000000..f2a09c879e3d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright (C) 2022 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define HTX_PVI_CTRL 0x0
+#define PVI_CTRL_OP_VSYNC_POL BIT(18)
+#define PVI_CTRL_OP_HSYNC_POL BIT(17)
+#define PVI_CTRL_OP_DE_POL BIT(16)
+#define PVI_CTRL_INP_VSYNC_POL BIT(14)
+#define PVI_CTRL_INP_HSYNC_POL BIT(13)
+#define PVI_CTRL_INP_DE_POL BIT(12)
+#define PVI_CTRL_MODE_MASK GENMASK(2, 1)
+#define PVI_CTRL_MODE_LCDIF 2
+#define PVI_CTRL_EN BIT(0)
+
+struct imx8mp_hdmi_pvi {
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct drm_bridge *next_bridge;
+ void __iomem *regs;
+};
+
+static inline struct imx8mp_hdmi_pvi *
+to_imx8mp_hdmi_pvi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct imx8mp_hdmi_pvi, bridge);
+}
+
+static int imx8mp_hdmi_pvi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
+
+ return drm_bridge_attach(bridge->encoder, pvi->next_bridge,
+ bridge, flags);
+}
+
+static void imx8mp_hdmi_pvi_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct drm_atomic_state *state = bridge_state->base.state;
+ struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
+ struct drm_connector_state *conn_state;
+ const struct drm_display_mode *mode;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ u32 bus_flags = 0, val;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+
+ if (WARN_ON(pm_runtime_resume_and_get(pvi->dev)))
+ return;
+
+ mode = &crtc_state->adjusted_mode;
+
+ val = FIELD_PREP(PVI_CTRL_MODE_MASK, PVI_CTRL_MODE_LCDIF) | PVI_CTRL_EN;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= PVI_CTRL_OP_VSYNC_POL | PVI_CTRL_INP_VSYNC_POL;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= PVI_CTRL_OP_HSYNC_POL | PVI_CTRL_INP_HSYNC_POL;
+
+ if (pvi->next_bridge->timings)
+ bus_flags = pvi->next_bridge->timings->input_bus_flags;
+ else if (bridge_state)
+ bus_flags = bridge_state->input_bus_cfg.flags;
+
+ if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
+ val |= PVI_CTRL_OP_DE_POL | PVI_CTRL_INP_DE_POL;
+
+ writel(val, pvi->regs + HTX_PVI_CTRL);
+}
+
+static void imx8mp_hdmi_pvi_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
+
+ writel(0x0, pvi->regs + HTX_PVI_CTRL);
+
+ pm_runtime_put(pvi->dev);
+}
+
+static u32 *
+imx8mp_hdmi_pvi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
+ struct drm_bridge *next_bridge = pvi->next_bridge;
+ struct drm_bridge_state *next_state;
+
+ if (!next_bridge->funcs->atomic_get_input_bus_fmts)
+ return NULL;
+
+ next_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ next_bridge);
+
+ return next_bridge->funcs->atomic_get_input_bus_fmts(next_bridge,
+ next_state,
+ crtc_state,
+ conn_state,
+ output_fmt,
+ num_input_fmts);
+}
+
+static const struct drm_bridge_funcs imx_hdmi_pvi_bridge_funcs = {
+ .attach = imx8mp_hdmi_pvi_bridge_attach,
+ .atomic_enable = imx8mp_hdmi_pvi_bridge_enable,
+ .atomic_disable = imx8mp_hdmi_pvi_bridge_disable,
+ .atomic_get_input_bus_fmts = imx8mp_hdmi_pvi_bridge_get_input_bus_fmts,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
+{
+ struct device_node *remote;
+ struct imx8mp_hdmi_pvi *pvi;
+
+ pvi = devm_kzalloc(&pdev->dev, sizeof(*pvi), GFP_KERNEL);
+ if (!pvi)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pvi);
+ pvi->dev = &pdev->dev;
+
+ pvi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pvi->regs))
+ return PTR_ERR(pvi->regs);
+
+ /* Get the next bridge in the pipeline. */
+ remote = of_graph_get_remote_node(pdev->dev.of_node, 1, -1);
+ if (!remote)
+ return -EINVAL;
+
+ pvi->next_bridge = of_drm_find_bridge(remote);
+ of_node_put(remote);
+
+ if (!pvi->next_bridge)
+ return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
+ "could not find next bridge\n");
+
+ pm_runtime_enable(&pdev->dev);
+
+ /* Register the bridge. */
+ pvi->bridge.funcs = &imx_hdmi_pvi_bridge_funcs;
+ pvi->bridge.of_node = pdev->dev.of_node;
+ pvi->bridge.timings = pvi->next_bridge->timings;
+
+ drm_bridge_add(&pvi->bridge);
+
+ return 0;
+}
+
+static int imx8mp_hdmi_pvi_remove(struct platform_device *pdev)
+{
+ struct imx8mp_hdmi_pvi *pvi = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&pvi->bridge);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id imx8mp_hdmi_pvi_match[] = {
+ {
+ .compatible = "fsl,imx8mp-hdmi-pvi",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pvi_match);
+
+static struct platform_driver imx8mp_hdmi_pvi_driver = {
+ .probe = imx8mp_hdmi_pvi_probe,
+ .remove = imx8mp_hdmi_pvi_remove,
+ .driver = {
+ .name = "imx-hdmi-pvi",
+ .of_match_table = imx8mp_hdmi_pvi_match,
+ },
+};
+module_platform_driver(imx8mp_hdmi_pvi_driver);
+
+MODULE_DESCRIPTION("i.MX8MP HDMI TX Parallel Video Interface bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
new file mode 100644
index 000000000000..89fc432ac611
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright (C) 2022 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_modes.h>
+
+struct imx8mp_hdmi {
+ struct dw_hdmi_plat_data plat_data;
+ struct dw_hdmi *dw_hdmi;
+ struct clk *pixclk;
+};
+
+static enum drm_mode_status
+imx8mp_hdmi_mode_valid(struct dw_hdmi *dw_hdmi, void *data,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct imx8mp_hdmi *hdmi = (struct imx8mp_hdmi *)data;
+
+ if (mode->clock < 13500)
+ return MODE_CLOCK_LOW;
+
+ if (mode->clock > 297000)
+ return MODE_CLOCK_HIGH;
+
+ if (clk_round_rate(hdmi->pixclk, mode->clock * 1000) !=
+ mode->clock * 1000)
+ return MODE_CLOCK_RANGE;
+
+ /* We don't support double-clocked and Interlaced modes */
+ if ((mode->flags & DRM_MODE_FLAG_DBLCLK) ||
+ (mode->flags & DRM_MODE_FLAG_INTERLACE))
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+static int imx8mp_hdmi_phy_init(struct dw_hdmi *dw_hdmi, void *data,
+ const struct drm_display_info *display,
+ const struct drm_display_mode *mode)
+{
+ return 0;
+}
+
+static void imx8mp_hdmi_phy_disable(struct dw_hdmi *dw_hdmi, void *data)
+{
+}
+
+static void im8mp_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data)
+{
+ /*
+ * Just release PHY core from reset, all other power management is done
+ * by the PHY driver.
+ */
+ dw_hdmi_phy_gen1_reset(hdmi);
+
+ dw_hdmi_phy_setup_hpd(hdmi, data);
+}
+
+static const struct dw_hdmi_phy_ops imx8mp_hdmi_phy_ops = {
+ .init = imx8mp_hdmi_phy_init,
+ .disable = imx8mp_hdmi_phy_disable,
+ .setup_hpd = im8mp_hdmi_phy_setup_hpd,
+ .read_hpd = dw_hdmi_phy_read_hpd,
+ .update_hpd = dw_hdmi_phy_update_hpd,
+};
+
+static int imx8mp_dw_hdmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_hdmi_plat_data *plat_data;
+ struct imx8mp_hdmi *hdmi;
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ plat_data = &hdmi->plat_data;
+
+ hdmi->pixclk = devm_clk_get(dev, "pix");
+ if (IS_ERR(hdmi->pixclk))
+ return dev_err_probe(dev, PTR_ERR(hdmi->pixclk),
+ "Unable to get pixel clock\n");
+
+ plat_data->mode_valid = imx8mp_hdmi_mode_valid;
+ plat_data->phy_ops = &imx8mp_hdmi_phy_ops;
+ plat_data->phy_name = "SAMSUNG HDMI TX PHY";
+ plat_data->priv_data = hdmi;
+ plat_data->phy_force_vendor = true;
+
+ hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data);
+ if (IS_ERR(hdmi->dw_hdmi))
+ return PTR_ERR(hdmi->dw_hdmi);
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return 0;
+}
+
+static int imx8mp_dw_hdmi_remove(struct platform_device *pdev)
+{
+ struct imx8mp_hdmi *hdmi = platform_get_drvdata(pdev);
+
+ dw_hdmi_remove(hdmi->dw_hdmi);
+
+ return 0;
+}
+
+static int __maybe_unused imx8mp_dw_hdmi_pm_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused imx8mp_dw_hdmi_pm_resume(struct device *dev)
+{
+ struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dw_hdmi_resume(hdmi->dw_hdmi);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx8mp_dw_hdmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx8mp_dw_hdmi_pm_suspend,
+ imx8mp_dw_hdmi_pm_resume)
+};
+
+static const struct of_device_id imx8mp_dw_hdmi_of_table[] = {
+ { .compatible = "fsl,imx8mp-hdmi-tx" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8mp_dw_hdmi_of_table);
+
+static struct platform_driver imx8mp_dw_hdmi_platform_driver = {
+ .probe = imx8mp_dw_hdmi_probe,
+ .remove = imx8mp_dw_hdmi_remove,
+ .driver = {
+ .name = "imx8mp-dw-hdmi-tx",
+ .of_match_table = imx8mp_dw_hdmi_of_table,
+ .pm = &imx8mp_dw_hdmi_pm_ops,
+ },
+};
+
+module_platform_driver(imx8mp_dw_hdmi_platform_driver);
+
+MODULE_DESCRIPTION("i.MX8MP HDMI encoder driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 2f300f5ca051..27334173e911 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -458,7 +458,7 @@ struct it6505 {
/* it6505 driver hold option */
bool enable_drv_hold;
- struct edid *cached_edid;
+ const struct drm_edid *cached_edid;
};
struct it6505_step_train_para {
@@ -2240,11 +2240,13 @@ static void it6505_link_training_work(struct work_struct *work)
ret = it6505_link_start_auto_train(it6505);
DRM_DEV_DEBUG_DRIVER(dev, "auto train %s, auto_train_retry: %d",
ret ? "pass" : "failed", it6505->auto_train_retry);
- it6505->auto_train_retry--;
if (ret) {
+ it6505->auto_train_retry = AUTO_TRAIN_RETRY;
it6505_link_train_ok(it6505);
return;
+ } else {
+ it6505->auto_train_retry--;
}
it6505_dump(it6505);
@@ -2261,7 +2263,7 @@ static void it6505_plugged_status_to_codec(struct it6505 *it6505)
static void it6505_remove_edid(struct it6505 *it6505)
{
- kfree(it6505->cached_edid);
+ drm_edid_free(it6505->cached_edid);
it6505->cached_edid = NULL;
}
@@ -3032,15 +3034,16 @@ it6505_bridge_detect(struct drm_bridge *bridge)
return it6505_detect(it6505);
}
-static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *it6505_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
if (!it6505->cached_edid) {
- it6505->cached_edid = drm_do_get_edid(connector, it6505_get_edid_block,
- it6505);
+ it6505->cached_edid = drm_edid_read_custom(connector,
+ it6505_get_edid_block,
+ it6505);
if (!it6505->cached_edid) {
DRM_DEV_DEBUG_DRIVER(dev, "failed to get edid!");
@@ -3048,7 +3051,7 @@ static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge,
}
}
- return drm_edid_duplicate(it6505->cached_edid);
+ return drm_edid_dup(it6505->cached_edid);
}
static const struct drm_bridge_funcs it6505_bridge_funcs = {
@@ -3063,7 +3066,7 @@ static const struct drm_bridge_funcs it6505_bridge_funcs = {
.atomic_pre_enable = it6505_bridge_atomic_pre_enable,
.atomic_post_disable = it6505_bridge_atomic_post_disable,
.detect = it6505_bridge_detect,
- .get_edid = it6505_bridge_get_edid,
+ .edid_read = it6505_bridge_edid_read,
};
static __maybe_unused int it6505_bridge_resume(struct device *dev)
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 1cf3fb1f13dc..1c3433b5e366 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -874,33 +874,33 @@ static void it66121_bridge_hpd_disable(struct drm_bridge *bridge)
dev_err(ctx->dev, "failed to disable HPD IRQ\n");
}
-static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *it66121_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
mutex_lock(&ctx->lock);
ret = it66121_preamble_ddc(ctx);
if (ret) {
- edid = NULL;
+ drm_edid = NULL;
goto out_unlock;
}
ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
IT66121_DDC_HEADER_EDID);
if (ret) {
- edid = NULL;
+ drm_edid = NULL;
goto out_unlock;
}
- edid = drm_do_get_edid(connector, it66121_get_edid_block, ctx);
+ drm_edid = drm_edid_read_custom(connector, it66121_get_edid_block, ctx);
out_unlock:
mutex_unlock(&ctx->lock);
- return edid;
+ return drm_edid;
}
static const struct drm_bridge_funcs it66121_bridge_funcs = {
@@ -916,7 +916,7 @@ static const struct drm_bridge_funcs it66121_bridge_funcs = {
.mode_set = it66121_bridge_mode_set,
.mode_valid = it66121_bridge_mode_valid,
.detect = it66121_bridge_detect,
- .get_edid = it66121_bridge_get_edid,
+ .edid_read = it66121_bridge_edid_read,
.hpd_enable = it66121_bridge_hpd_enable,
.hpd_disable = it66121_bridge_hpd_disable,
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 273157428c82..e7c4bef74aa4 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -440,16 +440,16 @@ lt8912_connector_mode_valid(struct drm_connector *connector,
static int lt8912_connector_get_modes(struct drm_connector *connector)
{
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret = -1;
int num = 0;
struct lt8912 *lt = connector_to_lt8912(connector);
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- edid = drm_bridge_get_edid(lt->hdmi_port, connector);
- if (edid) {
- drm_connector_update_edid_property(connector, edid);
- num = drm_add_edid_modes(connector, edid);
+ drm_edid = drm_bridge_edid_read(lt->hdmi_port, connector);
+ drm_edid_connector_update(connector, drm_edid);
+ if (drm_edid) {
+ num = drm_edid_connector_add_modes(connector);
} else {
return ret;
}
@@ -459,7 +459,7 @@ static int lt8912_connector_get_modes(struct drm_connector *connector)
if (ret)
num = ret;
- kfree(edid);
+ drm_edid_free(drm_edid);
return num;
}
@@ -620,8 +620,8 @@ lt8912_bridge_detect(struct drm_bridge *bridge)
return lt8912_check_cable_status(lt);
}
-static struct edid *lt8912_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *lt8912_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
@@ -630,7 +630,7 @@ static struct edid *lt8912_bridge_get_edid(struct drm_bridge *bridge,
* given to the hdmi connector node.
*/
if (lt->hdmi_port->ops & DRM_BRIDGE_OP_EDID)
- return drm_bridge_get_edid(lt->hdmi_port, connector);
+ return drm_bridge_edid_read(lt->hdmi_port, connector);
dev_warn(lt->dev, "The connected bridge does not supports DRM_BRIDGE_OP_EDID\n");
return NULL;
@@ -642,7 +642,7 @@ static const struct drm_bridge_funcs lt8912_bridge_funcs = {
.mode_set = lt8912_bridge_mode_set,
.enable = lt8912_bridge_enable,
.detect = lt8912_bridge_detect,
- .get_edid = lt8912_bridge_get_edid,
+ .edid_read = lt8912_bridge_edid_read,
};
static int lt8912_bridge_resume(struct device *dev)
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 9663601ce098..a9c7e2b07ea1 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -18,6 +18,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -846,13 +847,13 @@ lt9611_bridge_atomic_post_disable(struct drm_bridge *bridge,
lt9611_sleep_setup(lt9611);
}
-static struct edid *lt9611_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *lt9611_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
lt9611_power_on(lt9611);
- return drm_do_get_edid(connector, lt9611_get_edid_block, lt9611);
+ return drm_edid_read_custom(connector, lt9611_get_edid_block, lt9611);
}
static void lt9611_bridge_hpd_enable(struct drm_bridge *bridge)
@@ -892,7 +893,7 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.attach = lt9611_bridge_attach,
.mode_valid = lt9611_bridge_mode_valid,
.detect = lt9611_bridge_detect,
- .get_edid = lt9611_bridge_get_edid,
+ .edid_read = lt9611_bridge_edid_read,
.hpd_enable = lt9611_bridge_hpd_enable,
.atomic_pre_enable = lt9611_bridge_atomic_pre_enable,
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index e971b75e90ad..bcf8bccd86d6 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -21,6 +21,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -294,12 +295,12 @@ static int lt9611uxc_connector_get_modes(struct drm_connector *connector)
{
struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
unsigned int count;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
- edid = drm_bridge_get_edid(&lt9611uxc->bridge, connector);
- drm_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid = drm_bridge_edid_read(&lt9611uxc->bridge, connector);
+ drm_edid_connector_update(connector, drm_edid);
+ count = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return count;
}
@@ -494,8 +495,8 @@ static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, siz
return 0;
};
-static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *lt9611uxc_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
int ret;
@@ -509,7 +510,7 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge,
return NULL;
}
- return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc);
+ return drm_edid_read_custom(connector, lt9611uxc_get_edid_block, lt9611uxc);
}
static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = {
@@ -517,7 +518,7 @@ static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = {
.mode_valid = lt9611uxc_bridge_mode_valid,
.mode_set = lt9611uxc_bridge_mode_set,
.detect = lt9611uxc_bridge_detect,
- .get_edid = lt9611uxc_bridge_get_edid,
+ .edid_read = lt9611uxc_bridge_edid_read,
};
static int lt9611uxc_parse_dt(struct device *dev,
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index e93083bbec9d..4480523244e4 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -91,26 +91,26 @@ static int stdp2690_read_block(void *context, u8 *buf, unsigned int block, size_
return 0;
}
-static struct edid *ge_b850v3_lvds_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *ge_b850v3_lvds_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct i2c_client *client;
client = ge_b850v3_lvds_ptr->stdp2690_i2c;
- return drm_do_get_edid(connector, stdp2690_read_block, client);
+ return drm_edid_read_custom(connector, stdp2690_read_block, client);
}
static int ge_b850v3_lvds_get_modes(struct drm_connector *connector)
{
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int num_modes;
- edid = ge_b850v3_lvds_get_edid(&ge_b850v3_lvds_ptr->bridge, connector);
+ drm_edid = ge_b850v3_lvds_edid_read(&ge_b850v3_lvds_ptr->bridge, connector);
- drm_connector_update_edid_property(connector, edid);
- num_modes = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid_connector_update(connector, drm_edid);
+ num_modes = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return num_modes;
}
@@ -226,7 +226,7 @@ static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
static const struct drm_bridge_funcs ge_b850v3_lvds_funcs = {
.attach = ge_b850v3_lvds_attach,
.detect = ge_b850v3_lvds_bridge_detect,
- .get_edid = ge_b850v3_lvds_get_edid,
+ .edid_read = ge_b850v3_lvds_edid_read,
};
static int ge_b850v3_lvds_init(struct device *dev)
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 7c0076e49953..ed93fd4c3265 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -154,10 +154,11 @@ static void ptn3460_disable(struct drm_bridge *bridge)
}
-static struct edid *ptn3460_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *ptn3460_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
+ const struct drm_edid *drm_edid = NULL;
bool power_off;
u8 *edid;
int ret;
@@ -175,27 +176,28 @@ static struct edid *ptn3460_get_edid(struct drm_bridge *bridge,
EDID_LENGTH);
if (ret) {
kfree(edid);
- edid = NULL;
goto out;
}
+ drm_edid = drm_edid_alloc(edid, EDID_LENGTH);
+
out:
if (power_off)
ptn3460_disable(&ptn_bridge->bridge);
- return (struct edid *)edid;
+ return drm_edid;
}
static int ptn3460_connector_get_modes(struct drm_connector *connector)
{
struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int num_modes;
- edid = ptn3460_get_edid(&ptn_bridge->bridge, connector);
- drm_connector_update_edid_property(connector, edid);
- num_modes = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid = ptn3460_edid_read(&ptn_bridge->bridge, connector);
+ drm_edid_connector_update(connector, drm_edid);
+ num_modes = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return num_modes;
}
@@ -254,7 +256,7 @@ static const struct drm_bridge_funcs ptn3460_bridge_funcs = {
.pre_enable = ptn3460_pre_enable,
.disable = ptn3460_disable,
.attach = ptn3460_bridge_attach,
- .get_edid = ptn3460_get_edid,
+ .edid_read = ptn3460_edid_read,
};
static int ptn3460_probe(struct i2c_client *client)
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index 63a1a0c88be4..95fedc68b0ae 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -96,6 +96,7 @@
#define DSIM_MFLUSH_VS BIT(29)
/* This flag is valid only for exynos3250/3472/5260/5430 */
#define DSIM_CLKLANE_STOP BIT(30)
+#define DSIM_NON_CONTINUOUS_CLKLANE BIT(31)
/* DSIM_ESCMODE */
#define DSIM_TX_TRIGGER_RST BIT(4)
@@ -945,8 +946,12 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
* power consumption.
*/
if (driver_data->has_clklane_stop &&
- dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+ if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
+ reg |= DSIM_NON_CONTINUOUS_CLKLANE;
+
reg |= DSIM_CLKLANE_STOP;
+ }
samsung_dsim_write(dsi, DSIM_CONFIG_REG, reg);
lanes_mask = BIT(dsi->lanes) - 1;
@@ -1498,6 +1503,7 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
+ samsung_dsim_set_display_enable(dsi, false);
dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
}
@@ -1506,8 +1512,6 @@ static void samsung_dsim_atomic_post_disable(struct drm_bridge *bridge,
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
- samsung_dsim_set_display_enable(dsi, false);
-
dsi->state &= ~DSIM_STATE_ENABLED;
pm_runtime_put_sync(dsi->dev);
}
@@ -1992,11 +1996,11 @@ int samsung_dsim_probe(struct platform_device *pdev)
else
dsi->bridge.timings = &samsung_dsim_bridge_timings_de_high;
- if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->register_host)
+ if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->register_host) {
ret = dsi->plat_data->host_ops->register_host(dsi);
-
- if (ret)
- goto err_disable_runtime;
+ if (ret)
+ goto err_disable_runtime;
+ }
return 0;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 4560ae9cbce1..8f84e98249c7 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -278,39 +278,35 @@ static const struct drm_connector_funcs sii902x_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static struct edid *sii902x_get_edid(struct sii902x *sii902x,
- struct drm_connector *connector)
+static const struct drm_edid *sii902x_edid_read(struct sii902x *sii902x,
+ struct drm_connector *connector)
{
- struct edid *edid;
+ const struct drm_edid *drm_edid;
mutex_lock(&sii902x->mutex);
- edid = drm_get_edid(connector, sii902x->i2cmux->adapter[0]);
- if (edid) {
- if (drm_detect_hdmi_monitor(edid))
- sii902x->sink_is_hdmi = true;
- else
- sii902x->sink_is_hdmi = false;
- }
+ drm_edid = drm_edid_read_ddc(connector, sii902x->i2cmux->adapter[0]);
mutex_unlock(&sii902x->mutex);
- return edid;
+ return drm_edid;
}
static int sii902x_get_modes(struct drm_connector *connector)
{
struct sii902x *sii902x = connector_to_sii902x(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int num = 0;
- edid = sii902x_get_edid(sii902x, connector);
- drm_connector_update_edid_property(connector, edid);
- if (edid) {
- num = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid = sii902x_edid_read(sii902x, connector);
+ drm_edid_connector_update(connector, drm_edid);
+ if (drm_edid) {
+ num = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
}
+ sii902x->sink_is_hdmi = connector->display_info.is_hdmi;
+
return num;
}
@@ -465,12 +461,12 @@ static enum drm_connector_status sii902x_bridge_detect(struct drm_bridge *bridge
return sii902x_detect(sii902x);
}
-static struct edid *sii902x_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *sii902x_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
- return sii902x_get_edid(sii902x, connector);
+ return sii902x_edid_read(sii902x, connector);
}
static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
@@ -514,7 +510,7 @@ static const struct drm_bridge_funcs sii902x_bridge_funcs = {
.disable = sii902x_bridge_disable,
.enable = sii902x_bridge_enable,
.detect = sii902x_bridge_detect,
- .get_edid = sii902x_bridge_get_edid,
+ .edid_read = sii902x_bridge_edid_read,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index cbe8e778d7c7..5813a2c4fc5e 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -51,18 +51,20 @@ drm_connector_to_simple_bridge(struct drm_connector *connector)
static int simple_bridge_get_modes(struct drm_connector *connector)
{
struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
if (sbridge->next_bridge->ops & DRM_BRIDGE_OP_EDID) {
- edid = drm_bridge_get_edid(sbridge->next_bridge, connector);
- if (!edid)
+ drm_edid = drm_bridge_edid_read(sbridge->next_bridge, connector);
+ if (!drm_edid)
DRM_INFO("EDID read failed. Fallback to standard modes\n");
} else {
- edid = NULL;
+ drm_edid = NULL;
}
- if (!edid) {
+ drm_edid_connector_update(connector, drm_edid);
+
+ if (!drm_edid) {
/*
* In case we cannot retrieve the EDIDs (missing or broken DDC
* bus from the next bridge), fallback on the XGA standards and
@@ -73,9 +75,8 @@ static int simple_bridge_get_modes(struct drm_connector *connector)
return ret;
}
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index aca5bb0866f8..cceb5aab6c83 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -31,6 +31,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -2453,27 +2454,35 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi)
return result;
}
-static struct edid *dw_hdmi_get_edid(struct dw_hdmi *hdmi,
- struct drm_connector *connector)
+static const struct drm_edid *dw_hdmi_edid_read(struct dw_hdmi *hdmi,
+ struct drm_connector *connector)
{
- struct edid *edid;
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
if (!hdmi->ddc)
return NULL;
- edid = drm_get_edid(connector, hdmi->ddc);
- if (!edid) {
+ drm_edid = drm_edid_read_ddc(connector, hdmi->ddc);
+ if (!drm_edid) {
dev_dbg(hdmi->dev, "failed to get edid\n");
return NULL;
}
+ /*
+ * FIXME: This should use connector->display_info.is_hdmi and
+ * connector->display_info.has_audio from a path that has read the EDID
+ * and called drm_edid_connector_update().
+ */
+ edid = drm_edid_raw(drm_edid);
+
dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n",
edid->width_cm, edid->height_cm);
hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
- return edid;
+ return drm_edid;
}
/* -----------------------------------------------------------------------------
@@ -2492,17 +2501,16 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
- edid = dw_hdmi_get_edid(hdmi, connector);
- if (!edid)
- return 0;
+ drm_edid = dw_hdmi_edid_read(hdmi, connector);
- drm_connector_update_edid_property(connector, edid);
- cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid_connector_update(connector, drm_edid);
+ cec_notifier_set_phys_addr(hdmi->cec_notifier,
+ connector->display_info.source_physical_address);
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return ret;
}
@@ -2979,12 +2987,12 @@ static enum drm_connector_status dw_hdmi_bridge_detect(struct drm_bridge *bridge
return dw_hdmi_detect(hdmi);
}
-static struct edid *dw_hdmi_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *dw_hdmi_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct dw_hdmi *hdmi = bridge->driver_private;
- return dw_hdmi_get_edid(hdmi, connector);
+ return dw_hdmi_edid_read(hdmi, connector);
}
static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
@@ -3001,7 +3009,7 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
.mode_set = dw_hdmi_bridge_mode_set,
.mode_valid = dw_hdmi_bridge_mode_valid,
.detect = dw_hdmi_bridge_detect,
- .get_edid = dw_hdmi_bridge_get_edid,
+ .edid_read = dw_hdmi_bridge_edid_read,
};
/* -----------------------------------------------------------------------------
@@ -3541,6 +3549,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
hdmi->bridge.interlace_allowed = true;
hdmi->bridge.ddc = hdmi->ddc;
hdmi->bridge.of_node = pdev->dev.of_node;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = dev;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 615cc8f950d7..166f9a3e9622 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -41,8 +41,24 @@
/* Registers */
+/* DSI D-PHY Layer registers */
+#define D0W_DPHYCONTTX 0x0004
+#define CLW_DPHYCONTTX 0x0020
+#define D0W_DPHYCONTRX 0x0024
+#define D1W_DPHYCONTRX 0x0028
+#define D2W_DPHYCONTRX 0x002c
+#define D3W_DPHYCONTRX 0x0030
+#define COM_DPHYCONTRX 0x0038
+#define CLW_CNTRL 0x0040
+#define D0W_CNTRL 0x0044
+#define D1W_CNTRL 0x0048
+#define D2W_CNTRL 0x004c
+#define D3W_CNTRL 0x0050
+#define TESTMODE_CNTRL 0x0054
+
/* PPI layer registers */
#define PPI_STARTPPI 0x0104 /* START control bit */
+#define PPI_BUSYPPI 0x0108 /* PPI busy status */
#define PPI_LPTXTIMECNT 0x0114 /* LPTX timing signal */
#define LPX_PERIOD 3
#define PPI_LANEENABLE 0x0134
@@ -59,6 +75,7 @@
/* DSI layer registers */
#define DSI_STARTDSI 0x0204 /* START control bit of DSI-TX */
+#define DSI_BUSYDSI 0x0208 /* DSI busy status */
#define DSI_LANEENABLE 0x0210 /* Enables each lane */
#define DSI_RX_START BIT(0)
@@ -69,6 +86,20 @@
#define LANEENABLE_L2EN BIT(1)
#define LANEENABLE_L3EN BIT(2)
+#define DSI_LANESTATUS0 0x0214 /* DSI lane status 0 */
+#define DSI_LANESTATUS1 0x0218 /* DSI lane status 1 */
+#define DSI_INTSTATUS 0x0220 /* Interrupt Status */
+#define DSI_INTMASK 0x0224 /* Interrupt Mask */
+#define DSI_INTCLR 0x0228 /* Interrupt Clear */
+#define DSI_LPTXTO 0x0230 /* LPTX Time Out Counter */
+
+/* DSI General Registers */
+#define DSIERRCNT 0x0300 /* DSI Error Count Register */
+
+/* DSI Application Layer Registers */
+#define APLCTRL 0x0400 /* Application layer Control Register */
+#define RDPKTLN 0x0404 /* DSI Read packet Length Register */
+
/* Display Parallel Input Interface */
#define DPIPXLFMT 0x0440
#define VS_POL_ACTIVE_LOW (1 << 10)
@@ -114,35 +145,39 @@
#define VFUEN BIT(0) /* Video Frame Timing Upload */
/* System */
-#define TC_IDREG 0x0500
-#define SYSSTAT 0x0508
-#define SYSCTRL 0x0510
-#define DP0_AUDSRC_NO_INPUT (0 << 3)
-#define DP0_AUDSRC_I2S_RX (1 << 3)
-#define DP0_VIDSRC_NO_INPUT (0 << 0)
-#define DP0_VIDSRC_DSI_RX (1 << 0)
-#define DP0_VIDSRC_DPI_RX (2 << 0)
-#define DP0_VIDSRC_COLOR_BAR (3 << 0)
-#define SYSRSTENB 0x050c
+#define TC_IDREG 0x0500 /* Chip ID and Revision ID */
+#define SYSBOOT 0x0504 /* System BootStrap Status Register */
+#define SYSSTAT 0x0508 /* System Status Register */
+#define SYSRSTENB 0x050c /* System Reset/Enable Register */
#define ENBI2C (1 << 0)
#define ENBLCD0 (1 << 2)
#define ENBBM (1 << 3)
#define ENBDSIRX (1 << 4)
#define ENBREG (1 << 5)
#define ENBHDCP (1 << 8)
-#define GPIOM 0x0540
-#define GPIOC 0x0544
-#define GPIOO 0x0548
-#define GPIOI 0x054c
-#define INTCTL_G 0x0560
-#define INTSTS_G 0x0564
+#define SYSCTRL 0x0510 /* System Control Register */
+#define DP0_AUDSRC_NO_INPUT (0 << 3)
+#define DP0_AUDSRC_I2S_RX (1 << 3)
+#define DP0_VIDSRC_NO_INPUT (0 << 0)
+#define DP0_VIDSRC_DSI_RX (1 << 0)
+#define DP0_VIDSRC_DPI_RX (2 << 0)
+#define DP0_VIDSRC_COLOR_BAR (3 << 0)
+#define GPIOM 0x0540 /* GPIO Mode Control Register */
+#define GPIOC 0x0544 /* GPIO Direction Control Register */
+#define GPIOO 0x0548 /* GPIO Output Register */
+#define GPIOI 0x054c /* GPIO Input Register */
+#define INTCTL_G 0x0560 /* General Interrupts Control Register */
+#define INTSTS_G 0x0564 /* General Interrupts Status Register */
#define INT_SYSERR BIT(16)
#define INT_GPIO_H(x) (1 << (x == 0 ? 2 : 10))
#define INT_GPIO_LC(x) (1 << (x == 0 ? 3 : 11))
-#define INT_GP0_LCNT 0x0584
-#define INT_GP1_LCNT 0x0588
+#define TEST_INT_C 0x0570 /* Test Interrupts Control Register */
+#define TEST_INT_S 0x0574 /* Test Interrupts Status Register */
+
+#define INT_GP0_LCNT 0x0584 /* Interrupt GPIO0 Low Count Value Register */
+#define INT_GP1_LCNT 0x0588 /* Interrupt GPIO1 Low Count Value Register */
/* Control */
#define DP0CTL 0x0600
@@ -152,9 +187,12 @@
#define DP_EN BIT(0) /* Enable DPTX function */
/* Clocks */
-#define DP0_VIDMNGEN0 0x0610
-#define DP0_VIDMNGEN1 0x0614
-#define DP0_VMNGENSTATUS 0x0618
+#define DP0_VIDMNGEN0 0x0610 /* DP0 Video Force M Value Register */
+#define DP0_VIDMNGEN1 0x0614 /* DP0 Video Force N Value Register */
+#define DP0_VMNGENSTATUS 0x0618 /* DP0 Video Current M Value Register */
+#define DP0_AUDMNGEN0 0x0628 /* DP0 Audio Force M Value Register */
+#define DP0_AUDMNGEN1 0x062c /* DP0 Audio Force N Value Register */
+#define DP0_AMNGENSTATUS 0x0630 /* DP0 Audio Current M Value Register */
/* Main Channel */
#define DP0_SECSAMPLE 0x0640
@@ -224,8 +262,22 @@
#define DP0_SNKLTCHGREQ 0x06d4
#define DP0_LTLOOPCTRL 0x06d8
#define DP0_SNKLTCTRL 0x06e4
-
-#define DP1_SRCCTRL 0x07a0
+#define DP0_TPATDAT0 0x06e8 /* DP0 Test Pattern bits 29 to 0 */
+#define DP0_TPATDAT1 0x06ec /* DP0 Test Pattern bits 59 to 30 */
+#define DP0_TPATDAT2 0x06f0 /* DP0 Test Pattern bits 89 to 60 */
+#define DP0_TPATDAT3 0x06f4 /* DP0 Test Pattern bits 119 to 90 */
+
+#define AUDCFG0 0x0700 /* DP0 Audio Config0 Register */
+#define AUDCFG1 0x0704 /* DP0 Audio Config1 Register */
+#define AUDIFDATA0 0x0708 /* DP0 Audio Info Frame Bytes 3 to 0 */
+#define AUDIFDATA1 0x070c /* DP0 Audio Info Frame Bytes 7 to 4 */
+#define AUDIFDATA2 0x0710 /* DP0 Audio Info Frame Bytes 11 to 8 */
+#define AUDIFDATA3 0x0714 /* DP0 Audio Info Frame Bytes 15 to 12 */
+#define AUDIFDATA4 0x0718 /* DP0 Audio Info Frame Bytes 19 to 16 */
+#define AUDIFDATA5 0x071c /* DP0 Audio Info Frame Bytes 23 to 20 */
+#define AUDIFDATA6 0x0720 /* DP0 Audio Info Frame Bytes 27 to 24 */
+
+#define DP1_SRCCTRL 0x07a0 /* DP1 Control Register */
/* PHY */
#define DP_PHY_CTRL 0x0800
@@ -238,6 +290,25 @@
#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
+#define DP_PHY_CFG_WR 0x0810 /* DP PHY Configuration Test Write Register */
+#define DP_PHY_CFG_RD 0x0814 /* DP PHY Configuration Test Read Register */
+#define DP0_AUX_PHY_CTRL 0x0820 /* DP0 AUX PHY Control Register */
+#define DP0_MAIN_PHY_DBG 0x0840 /* DP0 Main PHY Test Debug Register */
+
+/* I2S */
+#define I2SCFG 0x0880 /* I2S Audio Config 0 Register */
+#define I2SCH0STAT0 0x0888 /* I2S Audio Channel 0 Status Bytes 3 to 0 */
+#define I2SCH0STAT1 0x088c /* I2S Audio Channel 0 Status Bytes 7 to 4 */
+#define I2SCH0STAT2 0x0890 /* I2S Audio Channel 0 Status Bytes 11 to 8 */
+#define I2SCH0STAT3 0x0894 /* I2S Audio Channel 0 Status Bytes 15 to 12 */
+#define I2SCH0STAT4 0x0898 /* I2S Audio Channel 0 Status Bytes 19 to 16 */
+#define I2SCH0STAT5 0x089c /* I2S Audio Channel 0 Status Bytes 23 to 20 */
+#define I2SCH1STAT0 0x08a0 /* I2S Audio Channel 1 Status Bytes 3 to 0 */
+#define I2SCH1STAT1 0x08a4 /* I2S Audio Channel 1 Status Bytes 7 to 4 */
+#define I2SCH1STAT2 0x08a8 /* I2S Audio Channel 1 Status Bytes 11 to 8 */
+#define I2SCH1STAT3 0x08ac /* I2S Audio Channel 1 Status Bytes 15 to 12 */
+#define I2SCH1STAT4 0x08b0 /* I2S Audio Channel 1 Status Bytes 19 to 16 */
+#define I2SCH1STAT5 0x08b4 /* I2S Audio Channel 1 Status Bytes 23 to 20 */
/* PLL */
#define DP0_PLLCTRL 0x0900
@@ -546,9 +617,14 @@ static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
continue;
for (i_post = 0; i_post < ARRAY_SIZE(ext_div); i_post++) {
for (div = 1; div <= 16; div++) {
- u32 clk;
+ u32 clk, iclk;
u64 tmp;
+ /* PCLK PLL input unit clock ... 6..40 MHz */
+ iclk = refclk / (div * ext_div[i_pre]);
+ if (iclk < 6000000 || iclk > 40000000)
+ continue;
+
tmp = pixelclock * ext_div[i_pre] *
ext_div[i_post] * div;
do_div(tmp, refclk);
@@ -1575,19 +1651,19 @@ static void tc_bridge_mode_set(struct drm_bridge *bridge,
drm_mode_copy(&tc->mode, mode);
}
-static struct edid *tc_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *tc_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct tc_data *tc = bridge_to_tc(bridge);
- return drm_get_edid(connector, &tc->aux.ddc);
+ return drm_edid_read_ddc(connector, &tc->aux.ddc);
}
static int tc_connector_get_modes(struct drm_connector *connector)
{
struct tc_data *tc = connector_to_tc(connector);
int num_modes;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
ret = tc_get_display_props(tc);
@@ -1602,9 +1678,10 @@ static int tc_connector_get_modes(struct drm_connector *connector)
return num_modes;
}
- edid = tc_get_edid(&tc->bridge, connector);
- num_modes = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ drm_edid = tc_edid_read(&tc->bridge, connector);
+ drm_edid_connector_update(connector, drm_edid);
+ num_modes = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return num_modes;
}
@@ -1773,7 +1850,7 @@ static const struct drm_bridge_funcs tc_edp_bridge_funcs = {
.atomic_enable = tc_edp_bridge_atomic_enable,
.atomic_disable = tc_edp_bridge_atomic_disable,
.detect = tc_bridge_detect,
- .get_edid = tc_get_edid,
+ .edid_read = tc_edid_read,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
@@ -1833,16 +1910,16 @@ static bool tc_readable_reg(struct device *dev, unsigned int reg)
case 0x1f4:
/* DSI Protocol Layer */
case DSI_STARTDSI:
- case 0x208:
+ case DSI_BUSYDSI:
case DSI_LANEENABLE:
- case 0x214:
- case 0x218:
- case 0x220:
+ case DSI_LANESTATUS0:
+ case DSI_LANESTATUS1:
+ case DSI_INTSTATUS:
case 0x224:
case 0x228:
case 0x230:
/* DSI General */
- case 0x300:
+ case DSIERRCNT:
/* DSI Application Layer */
case 0x400:
case 0x404:
@@ -1978,13 +2055,20 @@ static bool tc_readable_reg(struct device *dev, unsigned int reg)
}
static const struct regmap_range tc_volatile_ranges[] = {
+ regmap_reg_range(PPI_BUSYPPI, PPI_BUSYPPI),
+ regmap_reg_range(DSI_BUSYDSI, DSI_BUSYDSI),
+ regmap_reg_range(DSI_LANESTATUS0, DSI_INTSTATUS),
+ regmap_reg_range(DSIERRCNT, DSIERRCNT),
+ regmap_reg_range(VFUEN0, VFUEN0),
+ regmap_reg_range(SYSSTAT, SYSSTAT),
+ regmap_reg_range(GPIOI, GPIOI),
+ regmap_reg_range(INTSTS_G, INTSTS_G),
+ regmap_reg_range(DP0_VMNGENSTATUS, DP0_VMNGENSTATUS),
+ regmap_reg_range(DP0_AMNGENSTATUS, DP0_AMNGENSTATUS),
regmap_reg_range(DP0_AUXWDATA(0), DP0_AUXSTATUS),
regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ),
regmap_reg_range(DP_PHY_CTRL, DP_PHY_CTRL),
regmap_reg_range(DP0_PLLCTRL, PXL_PLLCTRL),
- regmap_reg_range(VFUEN0, VFUEN0),
- regmap_reg_range(INTSTS_G, INTSTS_G),
- regmap_reg_range(GPIOI, GPIOI),
};
static const struct regmap_access_table tc_volatile_table = {
@@ -1992,12 +2076,28 @@ static const struct regmap_access_table tc_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(tc_volatile_ranges),
};
-static bool tc_writeable_reg(struct device *dev, unsigned int reg)
-{
- return (reg != TC_IDREG) &&
- (reg != DP0_LTSTAT) &&
- (reg != DP0_SNKLTCHGREQ);
-}
+static const struct regmap_range tc_precious_ranges[] = {
+ regmap_reg_range(SYSSTAT, SYSSTAT),
+};
+
+static const struct regmap_access_table tc_precious_table = {
+ .yes_ranges = tc_precious_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tc_precious_ranges),
+};
+
+static const struct regmap_range tc_non_writeable_ranges[] = {
+ regmap_reg_range(PPI_BUSYPPI, PPI_BUSYPPI),
+ regmap_reg_range(DSI_BUSYDSI, DSI_BUSYDSI),
+ regmap_reg_range(DSI_LANESTATUS0, DSI_INTSTATUS),
+ regmap_reg_range(TC_IDREG, SYSSTAT),
+ regmap_reg_range(GPIOI, GPIOI),
+ regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ),
+};
+
+static const struct regmap_access_table tc_writeable_table = {
+ .no_ranges = tc_non_writeable_ranges,
+ .n_no_ranges = ARRAY_SIZE(tc_non_writeable_ranges),
+};
static const struct regmap_config tc_regmap_config = {
.name = "tc358767",
@@ -2008,7 +2108,8 @@ static const struct regmap_config tc_regmap_config = {
.cache_type = REGCACHE_MAPLE,
.readable_reg = tc_readable_reg,
.volatile_table = &tc_volatile_table,
- .writeable_reg = tc_writeable_reg,
+ .precious_table = &tc_precious_table,
+ .wr_table = &tc_writeable_table,
.reg_format_endian = REGMAP_ENDIAN_BIG,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 62cc3893dca5..84698a0b27a8 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -197,7 +197,7 @@ struct ti_sn65dsi86 {
DECLARE_BITMAP(gchip_output, SN_NUM_GPIOS);
#endif
#if defined(CONFIG_PWM)
- struct pwm_chip pchip;
+ struct pwm_chip *pchip;
bool pwm_enabled;
atomic_t pwm_pin_busy;
#endif
@@ -1207,19 +1207,19 @@ static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge)
: connector_status_disconnected;
}
-static struct edid *ti_sn_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
- return drm_get_edid(connector, &pdata->aux.ddc);
+ return drm_edid_read_ddc(connector, &pdata->aux.ddc);
}
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
.mode_valid = ti_sn_bridge_mode_valid,
- .get_edid = ti_sn_bridge_get_edid,
+ .edid_read = ti_sn_bridge_edid_read,
.detect = ti_sn_bridge_detect,
.atomic_pre_enable = ti_sn_bridge_atomic_pre_enable,
.atomic_enable = ti_sn_bridge_atomic_enable,
@@ -1374,7 +1374,7 @@ static void ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata)
static struct ti_sn65dsi86 *pwm_chip_to_ti_sn_bridge(struct pwm_chip *chip)
{
- return container_of(chip, struct ti_sn65dsi86, pchip);
+ return pwmchip_get_drvdata(chip);
}
static int ti_sn_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -1415,7 +1415,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
int ret;
if (!pdata->pwm_enabled) {
- ret = pm_runtime_resume_and_get(chip->dev);
+ ret = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (ret < 0)
return ret;
}
@@ -1431,7 +1431,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
SN_GPIO_MUX_MASK << (2 * SN_PWM_GPIO_IDX),
SN_GPIO_MUX_SPECIAL << (2 * SN_PWM_GPIO_IDX));
if (ret) {
- dev_err(chip->dev, "failed to mux in PWM function\n");
+ dev_err(pwmchip_parent(chip), "failed to mux in PWM function\n");
goto out;
}
}
@@ -1507,7 +1507,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ret = regmap_write(pdata->regmap, SN_PWM_PRE_DIV_REG, pre_div);
if (ret) {
- dev_err(chip->dev, "failed to update PWM_PRE_DIV\n");
+ dev_err(pwmchip_parent(chip), "failed to update PWM_PRE_DIV\n");
goto out;
}
@@ -1519,7 +1519,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
FIELD_PREP(SN_PWM_INV_MASK, state->polarity == PWM_POLARITY_INVERSED);
ret = regmap_write(pdata->regmap, SN_PWM_EN_INV_REG, pwm_en_inv);
if (ret) {
- dev_err(chip->dev, "failed to update PWM_EN/PWM_INV\n");
+ dev_err(pwmchip_parent(chip), "failed to update PWM_EN/PWM_INV\n");
goto out;
}
@@ -1527,7 +1527,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
out:
if (!pdata->pwm_enabled)
- pm_runtime_put_sync(chip->dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
return ret;
}
@@ -1585,24 +1585,28 @@ static const struct pwm_ops ti_sn_pwm_ops = {
static int ti_sn_pwm_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
+ struct pwm_chip *chip;
struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
- pdata->pchip.dev = &adev->dev;
- pdata->pchip.ops = &ti_sn_pwm_ops;
- pdata->pchip.npwm = 1;
- pdata->pchip.of_xlate = of_pwm_single_xlate;
- pdata->pchip.of_pwm_n_cells = 1;
+ pdata->pchip = chip = devm_pwmchip_alloc(&adev->dev, 1, 0);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ pwmchip_set_drvdata(chip, pdata);
+
+ chip->ops = &ti_sn_pwm_ops;
+ chip->of_xlate = of_pwm_single_xlate;
devm_pm_runtime_enable(&adev->dev);
- return pwmchip_add(&pdata->pchip);
+ return pwmchip_add(chip);
}
static void ti_sn_pwm_remove(struct auxiliary_device *adev)
{
struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
- pwmchip_remove(&pdata->pchip);
+ pwmchip_remove(pdata->pchip);
if (pdata->pwm_enabled)
pm_runtime_put_sync(&adev->dev);
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 28848a8eb42e..c7bef5c23927 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -50,18 +50,20 @@ drm_connector_to_tfp410(struct drm_connector *connector)
static int tfp410_get_modes(struct drm_connector *connector)
{
struct tfp410 *dvi = drm_connector_to_tfp410(connector);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int ret;
if (dvi->next_bridge->ops & DRM_BRIDGE_OP_EDID) {
- edid = drm_bridge_get_edid(dvi->next_bridge, connector);
- if (!edid)
+ drm_edid = drm_bridge_edid_read(dvi->next_bridge, connector);
+ if (!drm_edid)
DRM_INFO("EDID read failed. Fallback to standard modes\n");
} else {
- edid = NULL;
+ drm_edid = NULL;
}
- if (!edid) {
+ drm_edid_connector_update(connector, drm_edid);
+
+ if (!drm_edid) {
/*
* No EDID, fallback on the XGA standard modes and prefer a mode
* pretty much anything can handle.
@@ -71,11 +73,9 @@ static int tfp410_get_modes(struct drm_connector *connector)
return ret;
}
- drm_connector_update_edid_property(connector, edid);
-
- ret = drm_add_edid_modes(connector, edid);
+ ret = drm_edid_connector_add_modes(connector);
- kfree(edid);
+ drm_edid_free(drm_edid);
return ret;
}
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index f73f3471e94e..106f2d40d222 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -26,6 +26,7 @@ if [[ "$KERNEL_ARCH" = "arm64" ]]; then
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dtb"
elif [[ "$KERNEL_ARCH" = "arm" ]]; then
GCC_ARCH="arm-linux-gnueabihf"
DEBIAN_ARCH="armhf"
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index dac92cc2777c..084e3ff8e3f4 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,6 +1,6 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha edfbf74df1d4d6ce54ffe24566108be0e1a98c3d
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 9d162de9a05155e1c4041857a5848842749164cf
UPSTREAM_REPO: git://anongit.freedesktop.org/drm/drm
TARGET_BRANCH: drm-next
@@ -25,7 +25,9 @@ variables:
# per-job artifact storage on MinIO
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
# default kernel for rootfs before injecting the current kernel tree
- KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/gfx-ci/linux/v6.4.12-for-mesa-ci-f6b4ad45f48d
+ KERNEL_REPO: "gfx-ci/linux"
+ KERNEL_TAG: "v6.6.4-for-mesa-ci-e4f4c500f7fb"
+ KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/${KERNEL_REPO}/${KERNEL_TAG}
LAVA_TAGS: subset-1-gfx
LAVA_JOB_PRIORITY: 30
@@ -133,6 +135,11 @@ stages:
- if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
when: on_success
+.never-post-merge-rules:
+ rules:
+ - if: *is-post-merge
+ when: never
+
# Rule to filter for only scheduled pipelines.
.scheduled_pipeline-rules:
rules:
@@ -150,6 +157,7 @@ stages:
.build-rules:
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
+ - !reference [.never-post-merge-rules, rules]
# Run automatically once all dependency jobs have passed
- when: on_success
@@ -157,6 +165,7 @@ stages:
.container+build-rules:
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
+ - !reference [.never-post-merge-rules, rules]
- when: manual
.ci-deqp-artifacts:
@@ -175,6 +184,7 @@ stages:
.container-rules:
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
+ - !reference [.never-post-merge-rules, rules]
# Run pipeline by default in the main project if any CI pipeline
# configuration files were changed, to ensure docker images are up to date
- if: *is-post-merge
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 2c9a1838e728..0857773e5c5f 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -82,20 +82,35 @@
tags:
- $RUNNER_TAG
-msm:sc7180:
+.msm-sc7180:
extends:
- .lava-igt:arm64
stage: msm
- parallel: 4
variables:
DRIVER_NAME: msm
- DEVICE_TYPE: sc7180-trogdor-lazor-limozeen
- DTB: sc7180-trogdor-lazor-limozeen-nots-r5
BOOT_METHOD: depthcharge
KERNEL_IMAGE_TYPE: ""
- GPU_VERSION: sc7180
+
+msm:sc7180-trogdor-lazor-limozeen:
+ extends:
+ - .msm-sc7180
+ parallel: 4
+ variables:
+ DEVICE_TYPE: sc7180-trogdor-lazor-limozeen
+ DTB: sc7180-trogdor-lazor-limozeen-nots-r5
+ GPU_VERSION: ${DEVICE_TYPE}
RUNNER_TAG: mesa-ci-x86-64-lava-sc7180-trogdor-lazor-limozeen
+msm:sc7180-trogdor-kingoftown:
+ extends:
+ - .msm-sc7180
+ parallel: 6
+ variables:
+ DEVICE_TYPE: sc7180-trogdor-kingoftown
+ DTB: sc7180-trogdor-kingoftown
+ GPU_VERSION: ${DEVICE_TYPE}
+ RUNNER_TAG: mesa-ci-x86-64-lava-sc7180-trogdor-kingoftown
+
msm:apq8016:
extends:
- .baremetal-igt-arm64
@@ -104,7 +119,10 @@ msm:apq8016:
DRIVER_NAME: msm
BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8016-sbc-usb-host.dtb
GPU_VERSION: apq8016
- BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
+ # disabling unused clocks congests with the MDSS runtime PM trying to
+ # disable those clocks and causes boot to fail.
+ # Reproducer: DRM_MSM=y, DRM_I2C_ADV7511=m
+ BM_KERNEL_EXTRA_ARGS: clk_ignore_unused
RUNNER_TAG: google-freedreno-db410c
script:
- ./install/bare-metal/fastboot.sh
@@ -324,6 +342,7 @@ virtio_gpu:none:
GPU_VERSION: none
extends:
- .test-gl
+ - .test-rules
tags:
- kvm
script:
diff --git a/drivers/gpu/drm/ci/testlist.txt b/drivers/gpu/drm/ci/testlist.txt
index f82cd90372f4..3377f002f8c5 100644
--- a/drivers/gpu/drm/ci/testlist.txt
+++ b/drivers/gpu/drm/ci/testlist.txt
@@ -100,7 +100,7 @@ kms_atomic@plane-invalid-params-fence
kms_atomic@crtc-invalid-params
kms_atomic@crtc-invalid-params-fence
kms_atomic@atomic-invalid-params
-kms_atomic@atomic_plane_damage
+kms_atomic@atomic-plane-damage
kms_atomic_interruptible@legacy-setmode
kms_atomic_interruptible@atomic-setmode
kms_atomic_interruptible@legacy-dpms
@@ -321,726 +321,726 @@ kms_bw@linear-tiling-7-displays-3840x2160p
kms_bw@linear-tiling-8-displays-1920x1080p
kms_bw@linear-tiling-8-displays-2560x1440p
kms_bw@linear-tiling-8-displays-3840x2160p
-kms_ccs@pipe-A-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-A-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-A-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-A-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-A-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-A-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-A-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-A-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-B-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-B-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-B-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-B-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-B-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-B-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-B-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-C-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-C-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-C-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-C-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-C-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-C-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-C-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-D-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-D-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-D-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-D-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-D-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-D-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-D-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-E-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-E-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-E-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-E-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-E-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-E-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-E-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-F-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-F-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-F-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-F-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-F-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-F-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-F-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-G-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-G-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-G-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-G-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-G-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-G-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-G-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-bad-pixel-format-y_tiled_ccs
-kms_ccs@pipe-H-bad-pixel-format-yf_tiled_ccs
-kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-bad-rotation-90-y_tiled_ccs
-kms_ccs@pipe-H-bad-rotation-90-yf_tiled_ccs
-kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-crc-primary-basic-y_tiled_ccs
-kms_ccs@pipe-H-crc-primary-basic-yf_tiled_ccs
-kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-yf_tiled_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-random-ccs-data-y_tiled_ccs
-kms_ccs@pipe-H-random-ccs-data-yf_tiled_ccs
-kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_ccs
-kms_ccs@pipe-H-missing-ccs-buffer-yf_tiled_ccs
-kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_ccs
-kms_ccs@pipe-H-ccs-on-another-bo-yf_tiled_ccs
-kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-bad-aux-stride-y_tiled_ccs
-kms_ccs@pipe-H-bad-aux-stride-yf_tiled_ccs
-kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
-kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-yf_tiled_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
-kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
-kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
-kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-A-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-A-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-A-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-A-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-A-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-A-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-A-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-A-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-A-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-A-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-A-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-A-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-A-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-A-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-A-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-A-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-A-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-A-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-A-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-A-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-A-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-A-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-A-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-A-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-A-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-A-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-A-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-A-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-B-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-B-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-B-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-B-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-B-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-B-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-B-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-B-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-B-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-B-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-B-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-B-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-B-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-B-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-B-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-B-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-B-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-B-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-B-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-B-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-B-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-B-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-B-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-B-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-B-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-B-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-B-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-B-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-C-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-C-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-C-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-C-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-C-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-C-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-C-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-C-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-C-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-C-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-C-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-C-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-C-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-C-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-C-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-C-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-C-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-C-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-C-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-C-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-C-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-C-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-C-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-C-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-C-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-C-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-C-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-C-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-D-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-D-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-D-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-D-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-D-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-D-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-D-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-D-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-D-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-D-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-D-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-D-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-D-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-D-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-D-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-D-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-D-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-D-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-D-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-D-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-D-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-D-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-D-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-D-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-D-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-D-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-D-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-D-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-E-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-E-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-E-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-E-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-E-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-E-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-E-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-E-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-E-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-E-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-E-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-E-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-E-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-E-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-E-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-E-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-E-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-E-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-E-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-E-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-E-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-E-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-E-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-E-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-E-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-E-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-E-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-E-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-F-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-F-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-F-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-F-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-F-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-F-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-F-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-F-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-F-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-F-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-F-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-F-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-F-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-F-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-F-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-F-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-F-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-F-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-F-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-F-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-F-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-F-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-F-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-F-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-F-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-F-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-F-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-F-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-G-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-G-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-G-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-G-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-G-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-G-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-G-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-G-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-G-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-G-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-G-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-G-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-G-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-G-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-G-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-G-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-G-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-G-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-G-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-G-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-G-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-G-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-G-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-G-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-G-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-G-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-G-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-G-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-bad-pixel-format-y-tiled-ccs
+kms_ccs@pipe-H-bad-pixel-format-yf-tiled-ccs
+kms_ccs@pipe-H-bad-pixel-format-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-bad-pixel-format-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-bad-pixel-format-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-bad-pixel-format-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-H-bad-pixel-format-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-H-bad-pixel-format-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-H-bad-pixel-format-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-bad-pixel-format-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-bad-pixel-format-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-bad-rotation-90-y-tiled-ccs
+kms_ccs@pipe-H-bad-rotation-90-yf-tiled-ccs
+kms_ccs@pipe-H-bad-rotation-90-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-bad-rotation-90-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-bad-rotation-90-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-bad-rotation-90-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-H-bad-rotation-90-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-H-bad-rotation-90-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-H-bad-rotation-90-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-bad-rotation-90-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-bad-rotation-90-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-crc-primary-basic-y-tiled-ccs
+kms_ccs@pipe-H-crc-primary-basic-yf-tiled-ccs
+kms_ccs@pipe-H-crc-primary-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-crc-primary-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-crc-primary-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-crc-primary-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-H-crc-primary-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-H-crc-primary-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-H-crc-primary-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-crc-primary-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-crc-primary-basic-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-crc-primary-rotation-180-y-tiled-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-yf-tiled-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-crc-primary-rotation-180-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-random-ccs-data-y-tiled-ccs
+kms_ccs@pipe-H-random-ccs-data-yf-tiled-ccs
+kms_ccs@pipe-H-random-ccs-data-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-random-ccs-data-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-random-ccs-data-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-random-ccs-data-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-H-random-ccs-data-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-H-random-ccs-data-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-H-random-ccs-data-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-random-ccs-data-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-random-ccs-data-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-missing-ccs-buffer-y-tiled-ccs
+kms_ccs@pipe-H-missing-ccs-buffer-yf-tiled-ccs
+kms_ccs@pipe-H-missing-ccs-buffer-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-missing-ccs-buffer-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-missing-ccs-buffer-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-missing-ccs-buffer-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-ccs-on-another-bo-y-tiled-ccs
+kms_ccs@pipe-H-ccs-on-another-bo-yf-tiled-ccs
+kms_ccs@pipe-H-ccs-on-another-bo-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-ccs-on-another-bo-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-ccs-on-another-bo-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-ccs-on-another-bo-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-bad-aux-stride-y-tiled-ccs
+kms_ccs@pipe-H-bad-aux-stride-yf-tiled-ccs
+kms_ccs@pipe-H-bad-aux-stride-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-bad-aux-stride-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-bad-aux-stride-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-bad-aux-stride-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-bad-aux-stride-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-bad-aux-stride-4-tiled-mtl-rc-ccs-cc
+kms_ccs@pipe-H-crc-sprite-planes-basic-y-tiled-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-yf-tiled-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc
+kms_ccs@pipe-H-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc
+kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc
kms_cdclk@plane-scaling
kms_cdclk@mode-transition
kms_cdclk@mode-transition-all-outputs
@@ -1061,21 +1061,14 @@ kms_color@deep-color
kms_color@invalid-gamma-lut-sizes
kms_color@invalid-degamma-lut-sizes
kms_color@invalid-ctm-matrix-sizes
-kms_concurrent@pipe-A
-kms_concurrent@pipe-B
-kms_concurrent@pipe-C
-kms_concurrent@pipe-D
-kms_concurrent@pipe-E
-kms_concurrent@pipe-F
-kms_concurrent@pipe-G
-kms_concurrent@pipe-H
+kms_concurrent@multi-plane-atomic-lowres
kms_content_protection@legacy
kms_content_protection@atomic
kms_content_protection@atomic-dpms
-kms_content_protection@LIC
+kms_content_protection@lic
kms_content_protection@type1
-kms_content_protection@mei_interface
-kms_content_protection@content_type_change
+kms_content_protection@mei-interface
+kms_content_protection@content-type-change
kms_content_protection@uevent
kms_content_protection@srm
kms_content_protection@dp-mst-type-0
@@ -1218,8 +1211,8 @@ kms_cursor_legacy@cursorA-vs-flipA-atomic-transitions-varying-size
kms_cursor_legacy@cursorA-vs-flipB-atomic-transitions-varying-size
kms_cursor_legacy@cursorB-vs-flipA-atomic-transitions-varying-size
kms_cursor_legacy@cursorB-vs-flipB-atomic-transitions-varying-size
-kms_dither@FB-8BPC-Vs-Panel-6BPC
-kms_dither@FB-8BPC-Vs-Panel-8BPC
+kms_dither@fb-8bpc-vs-panel-6bpc
+kms_dither@fb-8bpc-vs-panel-8bpc
kms_dp_aux_dev
kms_tiled_display@basic-test-pattern
kms_tiled_display@basic-test-pattern-with-chamelium
@@ -2351,7 +2344,6 @@ kms_frontbuffer_tracking@psrdrrs-shrfb-scaledprimary
kms_frontbuffer_tracking@fbcpsrdrrs-indfb-scaledprimary
kms_frontbuffer_tracking@fbcpsrdrrs-shrfb-scaledprimary
kms_frontbuffer_tracking@fbc-modesetfrombusy
-kms_frontbuffer_tracking@fbc-badstride
kms_frontbuffer_tracking@fbc-stridechange
kms_frontbuffer_tracking@fbc-tiling-linear
kms_frontbuffer_tracking@fbc-tiling-y
@@ -2361,7 +2353,6 @@ kms_frontbuffer_tracking@psr-modesetfrombusy
kms_frontbuffer_tracking@psr-slowdraw
kms_frontbuffer_tracking@psr-suspend
kms_frontbuffer_tracking@fbcpsr-modesetfrombusy
-kms_frontbuffer_tracking@fbcpsr-badstride
kms_frontbuffer_tracking@fbcpsr-stridechange
kms_frontbuffer_tracking@fbcpsr-tiling-linear
kms_frontbuffer_tracking@fbcpsr-tiling-y
@@ -2372,7 +2363,6 @@ kms_frontbuffer_tracking@drrs-modesetfrombusy
kms_frontbuffer_tracking@drrs-slowdraw
kms_frontbuffer_tracking@drrs-suspend
kms_frontbuffer_tracking@fbcdrrs-modesetfrombusy
-kms_frontbuffer_tracking@fbcdrrs-badstride
kms_frontbuffer_tracking@fbcdrrs-stridechange
kms_frontbuffer_tracking@fbcdrrs-tiling-linear
kms_frontbuffer_tracking@fbcdrrs-tiling-y
@@ -2383,7 +2373,6 @@ kms_frontbuffer_tracking@psrdrrs-modesetfrombusy
kms_frontbuffer_tracking@psrdrrs-slowdraw
kms_frontbuffer_tracking@psrdrrs-suspend
kms_frontbuffer_tracking@fbcpsrdrrs-modesetfrombusy
-kms_frontbuffer_tracking@fbcpsrdrrs-badstride
kms_frontbuffer_tracking@fbcpsrdrrs-stridechange
kms_frontbuffer_tracking@fbcpsrdrrs-tiling-linear
kms_frontbuffer_tracking@fbcpsrdrrs-tiling-y
@@ -2456,7 +2445,7 @@ kms_plane@plane-position-hole-dpms
kms_plane@plane-panning-top-left
kms_plane@plane-panning-bottom-right
kms_plane@plane-panning-bottom-right-suspend
-kms_plane@invalid-pixel-format-settings
+kms_plane@planar-pixel-format-settings
kms_plane_alpha_blend@alpha-basic
kms_plane_alpha_blend@alpha-7efc
kms_plane_alpha_blend@coverage-7efc
@@ -2479,24 +2468,24 @@ kms_plane_multiple@tiling-x
kms_plane_multiple@tiling-y
kms_plane_multiple@tiling-yf
kms_plane_multiple@tiling-4
-kms_plane_scaling@plane-upscale-with-pixel-format-20x20
-kms_plane_scaling@plane-upscale-with-pixel-format-factor-0-25
-kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-25
-kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-5
-kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-75
-kms_plane_scaling@plane-scaler-with-pixel-format-unity-scaling
-kms_plane_scaling@plane-upscale-with-rotation-20x20
-kms_plane_scaling@plane-upscale-with-rotation-factor-0-25
-kms_plane_scaling@plane-downscale-with-rotation-factor-0-25
-kms_plane_scaling@plane-downscale-with-rotation-factor-0-5
-kms_plane_scaling@plane-downscale-with-rotation-factor-0-75
-kms_plane_scaling@plane-scaler-with-rotation-unity-scaling
-kms_plane_scaling@plane-upscale-with-modifiers-20x20
-kms_plane_scaling@plane-upscale-with-modifiers-factor-0-25
-kms_plane_scaling@plane-downscale-with-modifiers-factor-0-25
-kms_plane_scaling@plane-downscale-with-modifiers-factor-0-5
-kms_plane_scaling@plane-downscale-with-modifiers-factor-0-75
-kms_plane_scaling@plane-scaler-with-modifiers-unity-scaling
+kms_plane_scaling@plane-upscale-20x20-with-pixel-format
+kms_plane_scaling@plane-upscale-factor-0-25-with-pixel-format
+kms_plane_scaling@plane-downscale-factor-0-25-with-pixel-format
+kms_plane_scaling@plane-downscale-factor-0-5-with-pixel-format
+kms_plane_scaling@plane-downscale-factor-0-75-with-pixel-format
+kms_plane_scaling@plane-scaler-unity-scaling-with-pixel-format
+kms_plane_scaling@plane-upscale-20x20-with-rotation
+kms_plane_scaling@plane-upscale-factor-0-25-with-rotation
+kms_plane_scaling@plane-downscale-factor-0-25-with-rotation
+kms_plane_scaling@plane-downscale-factor-0-5-with-rotation
+kms_plane_scaling@plane-downscale-factor-0-75-with-rotation
+kms_plane_scaling@plane-scaler-unity-scaling-with-rotation
+kms_plane_scaling@plane-upscale-20x20-with-modifiers
+kms_plane_scaling@plane-upscale-factor-0-25-with-modifiers
+kms_plane_scaling@plane-downscale-factor-0-25-with-modifiers
+kms_plane_scaling@plane-downscale-factor-0-5-with-modifiers
+kms_plane_scaling@plane-downscale-factor-0-75-with-modifiers
+kms_plane_scaling@plane-scaler-unity-scaling-with-modifiers
kms_plane_scaling@plane-scaler-with-clipping-clamping-pixel-formats
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation
kms_plane_scaling@plane-scaler-with-clipping-clamping-modifiers
@@ -2551,48 +2540,69 @@ kms_properties@invalid-properties-legacy
kms_properties@invalid-properties-atomic
kms_properties@get_properties-sanity-atomic
kms_properties@get_properties-sanity-non-atomic
-kms_psr@basic
-kms_psr@no_drrs
-kms_psr@primary_page_flip
-kms_psr@primary_mmap_gtt
-kms_psr@primary_mmap_cpu
-kms_psr@primary_blt
-kms_psr@primary_render
-kms_psr@sprite_mmap_gtt
-kms_psr@cursor_mmap_gtt
-kms_psr@sprite_mmap_cpu
-kms_psr@cursor_mmap_cpu
-kms_psr@sprite_blt
-kms_psr@cursor_blt
-kms_psr@sprite_render
-kms_psr@cursor_render
-kms_psr@sprite_plane_move
-kms_psr@cursor_plane_move
-kms_psr@sprite_plane_onoff
-kms_psr@cursor_plane_onoff
-kms_psr@dpms
-kms_psr@suspend
-kms_psr@psr2_basic
-kms_psr@psr2_no_drrs
-kms_psr@psr2_primary_page_flip
-kms_psr@psr2_primary_mmap_gtt
-kms_psr@psr2_primary_mmap_cpu
-kms_psr@psr2_primary_blt
-kms_psr@psr2_primary_render
-kms_psr@psr2_sprite_mmap_gtt
-kms_psr@psr2_cursor_mmap_gtt
-kms_psr@psr2_sprite_mmap_cpu
-kms_psr@psr2_cursor_mmap_cpu
-kms_psr@psr2_sprite_blt
-kms_psr@psr2_cursor_blt
-kms_psr@psr2_sprite_render
-kms_psr@psr2_cursor_render
-kms_psr@psr2_sprite_plane_move
-kms_psr@psr2_cursor_plane_move
-kms_psr@psr2_sprite_plane_onoff
-kms_psr@psr2_cursor_plane_onoff
-kms_psr@psr2_dpms
-kms_psr@psr2_suspend
+kms_psr@pr-basic
+kms_psr@pr-no-drrs
+kms_psr@pr-primary-page-flip
+kms_psr@pr-primary-mmap-gtt
+kms_psr@pr-primary-mmap-cpu
+kms_psr@pr-primary-blt
+kms_psr@pr-primary-render
+kms_psr@pr-sprite-mmap-gtt
+kms_psr@pr-cursor-mmap-gtt
+kms_psr@pr-sprite-mmap-cpu
+kms_psr@pr-cursor-mmap-cpu
+kms_psr@pr-sprite-blt
+kms_psr@pr-cursor-blt
+kms_psr@pr-sprite-render
+kms_psr@pr-cursor-render
+kms_psr@pr-sprite-plane-move
+kms_psr@pr-cursor-plane-move
+kms_psr@pr-sprite-plane-onoff
+kms_psr@pr-cursor-plane-onoff
+kms_psr@pr-dpms
+kms_psr@pr-suspend
+kms_psr@psr-basic
+kms_psr@psr-no-drrs
+kms_psr@psr-primary-page-flip
+kms_psr@psr-primary-mmap-gtt
+kms_psr@psr-primary-mmap-cpu
+kms_psr@psr-primary-blt
+kms_psr@psr-primary-render
+kms_psr@psr-sprite-mmap-gtt
+kms_psr@psr-cursor-mmap-gtt
+kms_psr@psr-sprite-mmap-cpu
+kms_psr@psr-cursor-mmap-cpu
+kms_psr@psr-sprite-blt
+kms_psr@psr-cursor-blt
+kms_psr@psr-sprite-render
+kms_psr@psr-cursor-render
+kms_psr@psr-sprite-plane-move
+kms_psr@psr-cursor-plane-move
+kms_psr@psr-sprite-plane-onoff
+kms_psr@psr-cursor-plane-onoff
+kms_psr@psr-dpms
+kms_psr@psr-suspend
+kms_psr@psr2-basic
+kms_psr@psr2-no-drrs
+kms_psr@psr2-primary-page-flip
+kms_psr@psr2-primary-mmap-gtt
+kms_psr@psr2-primary-mmap-cpu
+kms_psr@psr2-primary-blt
+kms_psr@psr2-primary-render
+kms_psr@psr2-sprite-mmap-gtt
+kms_psr@psr2-cursor-mmap-gtt
+kms_psr@psr2-sprite-mmap-cpu
+kms_psr@psr2-cursor-mmap-cpu
+kms_psr@psr2-sprite-blt
+kms_psr@psr2-cursor-blt
+kms_psr@psr2-sprite-render
+kms_psr@psr2-cursor-render
+kms_psr@psr2-sprite-plane-move
+kms_psr@psr2-cursor-plane-move
+kms_psr@psr2-sprite-plane-onoff
+kms_psr@psr2-cursor-plane-onoff
+kms_psr@psr2-dpms
+kms_psr@psr2-suspend
kms_psr2_sf@primary-plane-update-sf-dmg-area
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb
kms_psr2_sf@overlay-plane-update-sf-dmg-area
@@ -2643,13 +2653,13 @@ kms_scaling_modes@scaling-mode-full
kms_scaling_modes@scaling-mode-center
kms_scaling_modes@scaling-mode-full-aspect
kms_scaling_modes@scaling-mode-none
-kms_selftest@drm_cmdline
-kms_selftest@drm_damage
-kms_selftest@drm_dp_mst
+kms_selftest@drm_cmdline_parser
+kms_selftest@drm_damage_helper
+kms_selftest@drm_dp_mst_helper
kms_selftest@drm_format_helper
kms_selftest@drm_format
-kms_selftest@framebuffer
-kms_selftest@drm_plane
+kms_selftest@drm_framebuffer
+kms_selftest@drm_plane_helper
kms_setmode@basic
kms_setmode@basic-clone-single-crtc
kms_setmode@invalid-clone-single-crtc
@@ -2658,248 +2668,38 @@ kms_setmode@clone-exclusive-crtc
kms_setmode@invalid-clone-single-crtc-stealing
kms_sysfs_edid_timing
kms_tv_load_detect@load-detect
-kms_universal_plane@universal-plane-pipe-A-functional
-kms_universal_plane@universal-plane-pipe-A-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-A
-kms_universal_plane@cursor-fb-leak-pipe-A
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-A
-kms_universal_plane@universal-plane-pipe-B-functional
-kms_universal_plane@universal-plane-pipe-B-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-B
-kms_universal_plane@cursor-fb-leak-pipe-B
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-B
-kms_universal_plane@universal-plane-pipe-C-functional
-kms_universal_plane@universal-plane-pipe-C-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-C
-kms_universal_plane@cursor-fb-leak-pipe-C
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-C
-kms_universal_plane@universal-plane-pipe-D-functional
-kms_universal_plane@universal-plane-pipe-D-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-D
-kms_universal_plane@cursor-fb-leak-pipe-D
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-D
-kms_universal_plane@universal-plane-pipe-E-functional
-kms_universal_plane@universal-plane-pipe-E-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-E
-kms_universal_plane@cursor-fb-leak-pipe-E
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-E
-kms_universal_plane@universal-plane-pipe-F-functional
-kms_universal_plane@universal-plane-pipe-F-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-F
-kms_universal_plane@cursor-fb-leak-pipe-F
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-F
-kms_universal_plane@universal-plane-pipe-G-functional
-kms_universal_plane@universal-plane-pipe-G-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-G
-kms_universal_plane@cursor-fb-leak-pipe-G
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-G
-kms_universal_plane@universal-plane-pipe-H-functional
-kms_universal_plane@universal-plane-pipe-H-sanity
-kms_universal_plane@disable-primary-vs-flip-pipe-H
-kms_universal_plane@cursor-fb-leak-pipe-H
-kms_universal_plane@universal-plane-pageflip-windowed-pipe-H
+kms_universal_plane@universal-plane-functional
+kms_universal_plane@universal-plane-sanity
+kms_universal_plane@disable-primary-vs-flip
+kms_universal_plane@cursor-fb-leak
+kms_universal_plane@universal-plane-pageflip-windowed
kms_vblank@invalid
kms_vblank@crtc-id
-kms_vblank@pipe-A-accuracy-idle
-kms_vblank@pipe-A-query-idle
-kms_vblank@pipe-A-query-idle-hang
-kms_vblank@pipe-A-query-forked
-kms_vblank@pipe-A-query-forked-hang
-kms_vblank@pipe-A-query-busy
-kms_vblank@pipe-A-query-busy-hang
-kms_vblank@pipe-A-query-forked-busy
-kms_vblank@pipe-A-query-forked-busy-hang
-kms_vblank@pipe-A-wait-idle
-kms_vblank@pipe-A-wait-idle-hang
-kms_vblank@pipe-A-wait-forked
-kms_vblank@pipe-A-wait-forked-hang
-kms_vblank@pipe-A-wait-busy
-kms_vblank@pipe-A-wait-busy-hang
-kms_vblank@pipe-A-wait-forked-busy
-kms_vblank@pipe-A-wait-forked-busy-hang
-kms_vblank@pipe-A-ts-continuation-idle
-kms_vblank@pipe-A-ts-continuation-idle-hang
-kms_vblank@pipe-A-ts-continuation-dpms-rpm
-kms_vblank@pipe-A-ts-continuation-dpms-suspend
-kms_vblank@pipe-A-ts-continuation-suspend
-kms_vblank@pipe-A-ts-continuation-modeset
-kms_vblank@pipe-A-ts-continuation-modeset-hang
-kms_vblank@pipe-A-ts-continuation-modeset-rpm
-kms_vblank@pipe-B-accuracy-idle
-kms_vblank@pipe-B-query-idle
-kms_vblank@pipe-B-query-idle-hang
-kms_vblank@pipe-B-query-forked
-kms_vblank@pipe-B-query-forked-hang
-kms_vblank@pipe-B-query-busy
-kms_vblank@pipe-B-query-busy-hang
-kms_vblank@pipe-B-query-forked-busy
-kms_vblank@pipe-B-query-forked-busy-hang
-kms_vblank@pipe-B-wait-idle
-kms_vblank@pipe-B-wait-idle-hang
-kms_vblank@pipe-B-wait-forked
-kms_vblank@pipe-B-wait-forked-hang
-kms_vblank@pipe-B-wait-busy
-kms_vblank@pipe-B-wait-busy-hang
-kms_vblank@pipe-B-wait-forked-busy
-kms_vblank@pipe-B-wait-forked-busy-hang
-kms_vblank@pipe-B-ts-continuation-idle
-kms_vblank@pipe-B-ts-continuation-idle-hang
-kms_vblank@pipe-B-ts-continuation-dpms-rpm
-kms_vblank@pipe-B-ts-continuation-dpms-suspend
-kms_vblank@pipe-B-ts-continuation-suspend
-kms_vblank@pipe-B-ts-continuation-modeset
-kms_vblank@pipe-B-ts-continuation-modeset-hang
-kms_vblank@pipe-B-ts-continuation-modeset-rpm
-kms_vblank@pipe-C-accuracy-idle
-kms_vblank@pipe-C-query-idle
-kms_vblank@pipe-C-query-idle-hang
-kms_vblank@pipe-C-query-forked
-kms_vblank@pipe-C-query-forked-hang
-kms_vblank@pipe-C-query-busy
-kms_vblank@pipe-C-query-busy-hang
-kms_vblank@pipe-C-query-forked-busy
-kms_vblank@pipe-C-query-forked-busy-hang
-kms_vblank@pipe-C-wait-idle
-kms_vblank@pipe-C-wait-idle-hang
-kms_vblank@pipe-C-wait-forked
-kms_vblank@pipe-C-wait-forked-hang
-kms_vblank@pipe-C-wait-busy
-kms_vblank@pipe-C-wait-busy-hang
-kms_vblank@pipe-C-wait-forked-busy
-kms_vblank@pipe-C-wait-forked-busy-hang
-kms_vblank@pipe-C-ts-continuation-idle
-kms_vblank@pipe-C-ts-continuation-idle-hang
-kms_vblank@pipe-C-ts-continuation-dpms-rpm
-kms_vblank@pipe-C-ts-continuation-dpms-suspend
-kms_vblank@pipe-C-ts-continuation-suspend
-kms_vblank@pipe-C-ts-continuation-modeset
-kms_vblank@pipe-C-ts-continuation-modeset-hang
-kms_vblank@pipe-C-ts-continuation-modeset-rpm
-kms_vblank@pipe-D-accuracy-idle
-kms_vblank@pipe-D-query-idle
-kms_vblank@pipe-D-query-idle-hang
-kms_vblank@pipe-D-query-forked
-kms_vblank@pipe-D-query-forked-hang
-kms_vblank@pipe-D-query-busy
-kms_vblank@pipe-D-query-busy-hang
-kms_vblank@pipe-D-query-forked-busy
-kms_vblank@pipe-D-query-forked-busy-hang
-kms_vblank@pipe-D-wait-idle
-kms_vblank@pipe-D-wait-idle-hang
-kms_vblank@pipe-D-wait-forked
-kms_vblank@pipe-D-wait-forked-hang
-kms_vblank@pipe-D-wait-busy
-kms_vblank@pipe-D-wait-busy-hang
-kms_vblank@pipe-D-wait-forked-busy
-kms_vblank@pipe-D-wait-forked-busy-hang
-kms_vblank@pipe-D-ts-continuation-idle
-kms_vblank@pipe-D-ts-continuation-idle-hang
-kms_vblank@pipe-D-ts-continuation-dpms-rpm
-kms_vblank@pipe-D-ts-continuation-dpms-suspend
-kms_vblank@pipe-D-ts-continuation-suspend
-kms_vblank@pipe-D-ts-continuation-modeset
-kms_vblank@pipe-D-ts-continuation-modeset-hang
-kms_vblank@pipe-D-ts-continuation-modeset-rpm
-kms_vblank@pipe-E-accuracy-idle
-kms_vblank@pipe-E-query-idle
-kms_vblank@pipe-E-query-idle-hang
-kms_vblank@pipe-E-query-forked
-kms_vblank@pipe-E-query-forked-hang
-kms_vblank@pipe-E-query-busy
-kms_vblank@pipe-E-query-busy-hang
-kms_vblank@pipe-E-query-forked-busy
-kms_vblank@pipe-E-query-forked-busy-hang
-kms_vblank@pipe-E-wait-idle
-kms_vblank@pipe-E-wait-idle-hang
-kms_vblank@pipe-E-wait-forked
-kms_vblank@pipe-E-wait-forked-hang
-kms_vblank@pipe-E-wait-busy
-kms_vblank@pipe-E-wait-busy-hang
-kms_vblank@pipe-E-wait-forked-busy
-kms_vblank@pipe-E-wait-forked-busy-hang
-kms_vblank@pipe-E-ts-continuation-idle
-kms_vblank@pipe-E-ts-continuation-idle-hang
-kms_vblank@pipe-E-ts-continuation-dpms-rpm
-kms_vblank@pipe-E-ts-continuation-dpms-suspend
-kms_vblank@pipe-E-ts-continuation-suspend
-kms_vblank@pipe-E-ts-continuation-modeset
-kms_vblank@pipe-E-ts-continuation-modeset-hang
-kms_vblank@pipe-E-ts-continuation-modeset-rpm
-kms_vblank@pipe-F-accuracy-idle
-kms_vblank@pipe-F-query-idle
-kms_vblank@pipe-F-query-idle-hang
-kms_vblank@pipe-F-query-forked
-kms_vblank@pipe-F-query-forked-hang
-kms_vblank@pipe-F-query-busy
-kms_vblank@pipe-F-query-busy-hang
-kms_vblank@pipe-F-query-forked-busy
-kms_vblank@pipe-F-query-forked-busy-hang
-kms_vblank@pipe-F-wait-idle
-kms_vblank@pipe-F-wait-idle-hang
-kms_vblank@pipe-F-wait-forked
-kms_vblank@pipe-F-wait-forked-hang
-kms_vblank@pipe-F-wait-busy
-kms_vblank@pipe-F-wait-busy-hang
-kms_vblank@pipe-F-wait-forked-busy
-kms_vblank@pipe-F-wait-forked-busy-hang
-kms_vblank@pipe-F-ts-continuation-idle
-kms_vblank@pipe-F-ts-continuation-idle-hang
-kms_vblank@pipe-F-ts-continuation-dpms-rpm
-kms_vblank@pipe-F-ts-continuation-dpms-suspend
-kms_vblank@pipe-F-ts-continuation-suspend
-kms_vblank@pipe-F-ts-continuation-modeset
-kms_vblank@pipe-F-ts-continuation-modeset-hang
-kms_vblank@pipe-F-ts-continuation-modeset-rpm
-kms_vblank@pipe-G-accuracy-idle
-kms_vblank@pipe-G-query-idle
-kms_vblank@pipe-G-query-idle-hang
-kms_vblank@pipe-G-query-forked
-kms_vblank@pipe-G-query-forked-hang
-kms_vblank@pipe-G-query-busy
-kms_vblank@pipe-G-query-busy-hang
-kms_vblank@pipe-G-query-forked-busy
-kms_vblank@pipe-G-query-forked-busy-hang
-kms_vblank@pipe-G-wait-idle
-kms_vblank@pipe-G-wait-idle-hang
-kms_vblank@pipe-G-wait-forked
-kms_vblank@pipe-G-wait-forked-hang
-kms_vblank@pipe-G-wait-busy
-kms_vblank@pipe-G-wait-busy-hang
-kms_vblank@pipe-G-wait-forked-busy
-kms_vblank@pipe-G-wait-forked-busy-hang
-kms_vblank@pipe-G-ts-continuation-idle
-kms_vblank@pipe-G-ts-continuation-idle-hang
-kms_vblank@pipe-G-ts-continuation-dpms-rpm
-kms_vblank@pipe-G-ts-continuation-dpms-suspend
-kms_vblank@pipe-G-ts-continuation-suspend
-kms_vblank@pipe-G-ts-continuation-modeset
-kms_vblank@pipe-G-ts-continuation-modeset-hang
-kms_vblank@pipe-G-ts-continuation-modeset-rpm
-kms_vblank@pipe-H-accuracy-idle
-kms_vblank@pipe-H-query-idle
-kms_vblank@pipe-H-query-idle-hang
-kms_vblank@pipe-H-query-forked
-kms_vblank@pipe-H-query-forked-hang
-kms_vblank@pipe-H-query-busy
-kms_vblank@pipe-H-query-busy-hang
-kms_vblank@pipe-H-query-forked-busy
-kms_vblank@pipe-H-query-forked-busy-hang
-kms_vblank@pipe-H-wait-idle
-kms_vblank@pipe-H-wait-idle-hang
-kms_vblank@pipe-H-wait-forked
-kms_vblank@pipe-H-wait-forked-hang
-kms_vblank@pipe-H-wait-busy
-kms_vblank@pipe-H-wait-busy-hang
-kms_vblank@pipe-H-wait-forked-busy
-kms_vblank@pipe-H-wait-forked-busy-hang
-kms_vblank@pipe-H-ts-continuation-idle
-kms_vblank@pipe-H-ts-continuation-idle-hang
-kms_vblank@pipe-H-ts-continuation-dpms-rpm
-kms_vblank@pipe-H-ts-continuation-dpms-suspend
-kms_vblank@pipe-H-ts-continuation-suspend
-kms_vblank@pipe-H-ts-continuation-modeset
-kms_vblank@pipe-H-ts-continuation-modeset-hang
-kms_vblank@pipe-H-ts-continuation-modeset-rpm
+kms_vblank@accuracy-idle
+kms_vblank@query-idle
+kms_vblank@query-idle-hang
+kms_vblank@query-forked
+kms_vblank@query-forked-hang
+kms_vblank@query-busy
+kms_vblank@query-busy-hang
+kms_vblank@query-forked-busy
+kms_vblank@query-forked-busy-hang
+kms_vblank@wait-idle
+kms_vblank@wait-idle-hang
+kms_vblank@wait-forked
+kms_vblank@wait-forked-hang
+kms_vblank@wait-busy
+kms_vblank@wait-busy-hang
+kms_vblank@wait-forked-busy
+kms_vblank@wait-forked-busy-hang
+kms_vblank@ts-continuation-idle
+kms_vblank@ts-continuation-idle-hang
+kms_vblank@ts-continuation-dpms-rpm
+kms_vblank@ts-continuation-dpms-suspend
+kms_vblank@ts-continuation-suspend
+kms_vblank@ts-continuation-modeset
+kms_vblank@ts-continuation-modeset-hang
+kms_vblank@ts-continuation-modeset-rpm
kms_vrr@flip-basic
kms_vrr@flip-dpms
kms_vrr@flip-suspend
@@ -2910,3 +2710,52 @@ kms_writeback@writeback-invalid-parameters
kms_writeback@writeback-fb-id
kms_writeback@writeback-check-output
prime_mmap_kms@buffer-sharing
+msm_shrink@copy-gpu-sanitycheck-8
+msm_shrink@copy-gpu-sanitycheck-32
+msm_shrink@copy-gpu-8
+msm_shrink@copy-gpu-32
+msm_shrink@copy-gpu-madvise-8
+msm_shrink@copy-gpu-madvise-32
+msm_shrink@copy-gpu-oom-8
+msm_shrink@copy-gpu-oom-32
+msm_shrink@copy-mmap-sanitycheck-8
+msm_shrink@copy-mmap-sanitycheck-32
+msm_shrink@copy-mmap-8
+msm_shrink@copy-mmap-32
+msm_shrink@copy-mmap-madvise-8
+msm_shrink@copy-mmap-madvise-32
+msm_shrink@copy-mmap-oom-8
+msm_shrink@copy-mmap-oom-32
+msm_shrink@copy-mmap-dmabuf-sanitycheck-8
+msm_shrink@copy-mmap-dmabuf-sanitycheck-32
+msm_shrink@copy-mmap-dmabuf-8
+msm_shrink@copy-mmap-dmabuf-32
+msm_shrink@copy-mmap-dmabuf-madvise-8
+msm_shrink@copy-mmap-dmabuf-madvise-32
+msm_shrink@copy-mmap-dmabuf-oom-8
+msm_shrink@copy-mmap-dmabuf-oom-32
+msm_mapping@ring
+msm_mapping@sqefw
+msm_mapping@shadow
+msm_submitoverhead@submitbench-10-bos
+msm_submitoverhead@submitbench-10-bos-no-implicit-sync
+msm_submitoverhead@submitbench-100-bos
+msm_submitoverhead@submitbench-100-bos-no-implicit-sync
+msm_submitoverhead@submitbench-250-bos
+msm_submitoverhead@submitbench-250-bos-no-implicit-sync
+msm_submitoverhead@submitbench-500-bos
+msm_submitoverhead@submitbench-500-bos-no-implicit-sync
+msm_submitoverhead@submitbench-1000-bos
+msm_submitoverhead@submitbench-1000-bos-no-implicit-sync
+msm_recovery@hangcheck
+msm_recovery@gpu-fault
+msm_recovery@gpu-fault-parallel
+msm_recovery@iova-fault
+msm_submit@empty-submit
+msm_submit@invalid-queue-submit
+msm_submit@invalid-flags-submit
+msm_submit@invalid-in-fence-submit
+msm_submit@invalid-duplicate-bo-submit
+msm_submit@invalid-cmd-idx-submit
+msm_submit@invalid-cmd-type-submit
+msm_submit@valid-submit
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index d39d254c935e..44a5c62dedad 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -6,8 +6,6 @@ kms_cursor_legacy@all-pipes-single-bo,Fail
kms_cursor_legacy@all-pipes-single-move,Fail
kms_cursor_legacy@all-pipes-torture-bo,Fail
kms_cursor_legacy@all-pipes-torture-move,Fail
-kms_cursor_legacy@forked-bo,Fail
-kms_cursor_legacy@forked-move,Fail
kms_cursor_legacy@pipe-A-forked-bo,Fail
kms_cursor_legacy@pipe-A-forked-move,Fail
kms_cursor_legacy@pipe-A-single-bo,Fail
@@ -18,3 +16,4 @@ kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
kms_selftest@drm_format,Timeout
kms_selftest@drm_format_helper,Timeout
+msm_mapping@ring,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
index 2cd49e8ee47f..88a1fc0a3b0d 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
@@ -1,4 +1,2 @@
kms_3d,Fail
kms_addfb_basic@addfb25-bad-modifier,Fail
-kms_force_connector_basic@force-edid,Fail
-kms_hdmi_inject@inject-4k,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt
deleted file mode 100644
index f71166a57731..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-kms_color@ctm-0-25,Fail
-kms_color@ctm-0-50,Fail
-kms_color@ctm-0-75,Fail
-kms_color@ctm-blue-to-red,Fail
-kms_color@ctm-green-to-red,Fail
-kms_color@ctm-negative,Fail
-kms_color@ctm-red-to-blue,Fail
-kms_color@ctm-signed,Fail
-kms_cursor_legacy@cursor-vs-flip-toggle,Fail
-kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
-kms_cursor_legacy@cursorA-vs-flipA-atomic-transitions,Crash
-kms_flip@flip-vs-modeset-vs-hang,Fail
-kms_flip@flip-vs-panning-vs-hang,Fail
-kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
-kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@coverage-7efc,Fail
-kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_alpha_blend@pipe-A-alpha-7efc,Fail
-kms_plane_alpha_blend@pipe-A-coverage-7efc,Fail
-kms_plane_alpha_blend@pipe-A-coverage-vs-premult-vs-constant,Fail
-kms_plane_alpha_blend@pipe-B-alpha-7efc,Fail
-kms_plane_alpha_blend@pipe-B-alpha-basic,Fail
-kms_plane_alpha_blend@pipe-B-alpha-opaque-fb,Fail
-kms_plane_alpha_blend@pipe-B-constant-alpha-max,Fail
-kms_plane_alpha_blend@pipe-B-constant-alpha-mid,Fail
-kms_plane_alpha_blend@pipe-B-coverage-7efc,Fail
-kms_plane_alpha_blend@pipe-B-coverage-vs-premult-vs-constant,Fail
-kms_rmfb@close-fd,Fail
-kms_universal_plane@disable-primary-vs-flip-pipe-b,Fail
-kms_universal_plane@universal-plane-pipe-B-sanity,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt
deleted file mode 100644
index 04730044ed12..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-kms_color@ctm-0-25
-kms_color@ctm-0-50
-kms_color@ctm-0-75
-kms_color@ctm-blue-to-red
-kms_color@ctm-green-to-red
-kms_color@ctm-negative
-kms_color@ctm-red-to-blue
-kms_color@ctm-signed
-kms_flip@flip-vs-modeset-vs-hang
-kms_flip@flip-vs-panning-vs-hang
-kms_plane@pixel-format
-kms_plane@pixel-format-source-clamping
-kms_plane@plane-position-covered
-kms_plane@plane-position-hole
-kms_plane@plane-position-hole-dpms
-kms_writeback@writeback-fb-id
-kms_writeback@writeback-invalid-parameters
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt
deleted file mode 100644
index e59a2fddfde0..000000000000
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-# Suspend to RAM seems to be broken on this machine
-.*suspend.*
-
-# Test incorrectly assumes that CTM support implies gamma/degamma
-# LUT support. None of the subtests handle the case of only having
-# CTM support
-#kms_color.*
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
new file mode 100644
index 000000000000..f0576aa629e8
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
@@ -0,0 +1,18 @@
+kms_color@ctm-0-25,Fail
+kms_color@ctm-0-50,Fail
+kms_color@ctm-0-75,Fail
+kms_color@ctm-blue-to-red,Fail
+kms_color@ctm-green-to-red,Fail
+kms_color@ctm-negative,Fail
+kms_color@ctm-red-to-blue,Fail
+kms_color@ctm-signed,Fail
+kms_cursor_legacy@cursor-vs-flip-toggle,Fail
+kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
+kms_plane_alpha_blend@alpha-7efc,Fail
+kms_plane_alpha_blend@coverage-7efc,Fail
+kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
+kms_rmfb@close-fd,Fail
+kms_universal_plane@universal-plane-sanity,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
new file mode 100644
index 000000000000..327039f70252
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -0,0 +1,2 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
new file mode 100644
index 000000000000..f0576aa629e8
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
@@ -0,0 +1,18 @@
+kms_color@ctm-0-25,Fail
+kms_color@ctm-0-50,Fail
+kms_color@ctm-0-75,Fail
+kms_color@ctm-blue-to-red,Fail
+kms_color@ctm-green-to-red,Fail
+kms_color@ctm-negative,Fail
+kms_color@ctm-red-to-blue,Fail
+kms_color@ctm-signed,Fail
+kms_cursor_legacy@cursor-vs-flip-toggle,Fail
+kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
+kms_plane_alpha_blend@alpha-7efc,Fail
+kms_plane_alpha_blend@coverage-7efc,Fail
+kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
+kms_rmfb@close-fd,Fail
+kms_universal_plane@universal-plane-sanity,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
new file mode 100644
index 000000000000..327039f70252
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -0,0 +1,2 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
index c55baa2d18c1..e9043a00383e 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
@@ -15,7 +15,7 @@ kms_color@pipe-A-ctm-max,Fail
kms_color@pipe-A-ctm-negative,Fail
kms_color@pipe-A-ctm-red-to-blue,Fail
kms_color@pipe-A-legacy-gamma,Fail
-kms_cursor_legacy@basic-flip-after-cursor-legacy,Fail
+kms_cursor_legacy@basic-flip-after-cursor-atomic,Fail
kms_cursor_legacy@basic-flip-after-cursor-varying-size,Fail
kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
@@ -29,9 +29,6 @@ kms_cursor_legacy@flip-vs-cursor-atomic,Fail
kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail
kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
kms_cursor_legacy@flip-vs-cursor-legacy,Fail
-kms_cursor_legacy@short-flip-after-cursor-atomic-transitions,Fail
-kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size,Fail
-kms_cursor_legacy@short-flip-after-cursor-toggle,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
index 16d205c04cbb..8a492f01eaa4 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
@@ -1,12 +1,22 @@
-kms_cursor_legacy@basic-flip-after-cursor-atomic
-kms_cursor_legacy@basic-flip-before-cursor-varying-size
-kms_cursor_legacy@cursorA-vs-flipA-toggle
-kms_cursor_legacy@flip-vs-cursor-atomic-transitions
+# Board Name: msm:sdm845
+# Bug Report: https://lore.kernel.org/dri-devel/46287831-edfa-78e8-6055-d7a08831c445@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gd2af13d9f
+# Linux Version: 6.7.0-rc3
+
+# Reported by deqp-runner
+kms_cursor_legacy@basic-flip-after-cursor-legacy
kms_cursor_legacy@flip-vs-cursor-toggle
kms_cursor_legacy@flip-vs-cursor-varying-size
+kms_cursor_legacy@short-flip-after-cursor-toggle
kms_cursor_legacy@short-flip-before-cursor-atomic-transitions
-kms_cursor_legacy@short-flip-before-cursor-toggle
-kms_flip@flip-vs-modeset-vs-hang
-kms_flip@flip-vs-panning-vs-hang
-kms_plane@pixel-format
-kms_plane@pixel-format-source-clamping
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size
+msm_shrink@copy-gpu-32
+msm_shrink@copy-gpu-oom-32
+
+# The below test shows inconsistency across multiple runs, giving
+# results of Pass and Fail alternately.
+kms_cursor_legacy@basic-flip-before-cursor-varying-size
+kms_cursor_legacy@flip-vs-cursor-atomic-transitions
+kms_cursor_legacy@short-flip-after-cursor-atomic-transitions
+kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
index 42675f1c6d76..618e3a3a7277 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -1,2 +1,7 @@
# Hangs machine
-kms_bw.* \ No newline at end of file
+kms_bw.*
+
+# Failing due to a bootloader/fw issue. The workaround in mesa CI involves these two patches
+# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/4b49f902ec6f2bb382cbbf489870573f4b43371e
+# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/38cdf4c5559771e2474ae0fecef8469f65147bc1
+msm_mapping@*
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index 09712b88a5b8..c0f56888c328 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -17,6 +17,27 @@ config DRM_DISPLAY_DP_HELPER
help
DRM display helpers for DisplayPort.
+config DRM_DISPLAY_DP_TUNNEL
+ bool
+ select DRM_DISPLAY_DP_HELPER
+ help
+ Enable support for DisplayPort tunnels. This allows drivers to use
+ DP tunnel features like the Bandwidth Allocation mode to maximize the
+ BW utilization for display streams on Thunderbolt links.
+
+config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+ bool "Enable debugging the DP tunnel state"
+ depends on REF_TRACKER
+ depends on DRM_DISPLAY_DP_TUNNEL
+ depends on DEBUG_KERNEL
+ depends on EXPERT
+ help
+ Enables debugging the DP tunnel manager's state, including the
+ consistency of all managed tunnels' reference counting and the state of
+ streams contained in tunnels.
+
+ If in doubt, say "N".
+
config DRM_DISPLAY_HDCP_HELPER
bool
depends on DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index 17ac4a1006a8..7ca61333c669 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -8,6 +8,8 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
drm_dp_helper.o \
drm_dp_mst_topology.o \
drm_dsc_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \
+ drm_dp_tunnel.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index 8a165be1a821..5afc26be9d2a 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -127,7 +127,7 @@ static void dp_aux_ep_shutdown(struct device *dev)
aux_ep_drv->shutdown(to_dp_aux_ep_dev(dev));
}
-static struct bus_type dp_aux_bus_type = {
+static const struct bus_type dp_aux_bus_type = {
.name = "dp-aux",
.match = dp_aux_ep_match,
.probe = dp_aux_ep_probe,
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index b1ca3a1100da..266826eac4a7 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -533,6 +533,15 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
mutex_lock(&aux->hw_mutex);
/*
+ * If the device attached to the aux bus is powered down then there's
+ * no reason to attempt a transfer. Error out immediately.
+ */
+ if (aux->powered_down) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /*
* The specification doesn't give any recommendation on how often to
* retry native transactions. We used to retry 7 times like for
* aux i2c transactions but real world devices this wasn't
@@ -600,6 +609,29 @@ int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset)
EXPORT_SYMBOL(drm_dp_dpcd_probe);
/**
+ * drm_dp_dpcd_set_powered() - Set whether the DP device is powered
+ * @aux: DisplayPort AUX channel; for convenience it's OK to pass NULL here
+ * and the function will be a no-op.
+ * @powered: true if powered; false if not
+ *
+ * If the endpoint device on the DP AUX bus is known to be powered down
+ * then this function can be called to make future transfers fail immediately
+ * instead of needing to time out.
+ *
+ * If this function is never called then a device defaults to being powered.
+ */
+void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered)
+{
+ if (!aux)
+ return;
+
+ mutex_lock(&aux->hw_mutex);
+ aux->powered_down = !powered;
+ mutex_unlock(&aux->hw_mutex);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
+
+/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
@@ -1858,6 +1890,9 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
struct drm_dp_aux_msg msg;
int err = 0;
+ if (aux->powered_down)
+ return -EBUSY;
+
dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
memset(&msg, 0, sizeof(msg));
@@ -2897,26 +2932,120 @@ static const char *dp_content_type_get_name(enum dp_content_type content_type)
}
}
-void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
- const struct drm_dp_vsc_sdp *vsc)
+void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc)
{
-#define DP_SDP_LOG(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__)
- DP_SDP_LOG("DP SDP: %s, revision %u, length %u\n", "VSC",
+ drm_printf(p, "DP SDP: VSC, revision %u, length %u\n",
vsc->revision, vsc->length);
- DP_SDP_LOG(" pixelformat: %s\n",
+ drm_printf(p, " pixelformat: %s\n",
dp_pixelformat_get_name(vsc->pixelformat));
- DP_SDP_LOG(" colorimetry: %s\n",
+ drm_printf(p, " colorimetry: %s\n",
dp_colorimetry_get_name(vsc->pixelformat, vsc->colorimetry));
- DP_SDP_LOG(" bpc: %u\n", vsc->bpc);
- DP_SDP_LOG(" dynamic range: %s\n",
+ drm_printf(p, " bpc: %u\n", vsc->bpc);
+ drm_printf(p, " dynamic range: %s\n",
dp_dynamic_range_get_name(vsc->dynamic_range));
- DP_SDP_LOG(" content type: %s\n",
+ drm_printf(p, " content type: %s\n",
dp_content_type_get_name(vsc->content_type));
-#undef DP_SDP_LOG
}
EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
/**
+ * drm_dp_vsc_sdp_supported() - check if vsc sdp is supported
+ * @aux: DisplayPort AUX channel
+ * @dpcd: DisplayPort configuration data
+ *
+ * Returns true if vsc sdp is supported, else returns false
+ */
+bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ u8 rx_feature;
+
+ if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
+ return false;
+
+ if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) != 1) {
+ drm_dbg_dp(aux->drm_dev, "failed to read DP_DPRX_FEATURE_ENUMERATION_LIST\n");
+ return false;
+ }
+
+ return (rx_feature & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED);
+}
+EXPORT_SYMBOL(drm_dp_vsc_sdp_supported);
+
+/**
+ * drm_dp_vsc_sdp_pack() - pack a given vsc sdp into generic dp_sdp
+ * @vsc: vsc sdp initialized according to its purpose as defined in
+ * table 2-118 - table 2-120 in DP 1.4a specification
+ * @sdp: valid handle to the generic dp_sdp which will be packed
+ *
+ * Returns length of sdp on success and error code on failure
+ */
+ssize_t drm_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
+ struct dp_sdp *sdp)
+{
+ size_t length = sizeof(struct dp_sdp);
+
+ memset(sdp, 0, sizeof(struct dp_sdp));
+
+ /*
+ * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
+ * VSC SDP Header Bytes
+ */
+ sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
+ sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
+ sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
+ sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
+
+ if (vsc->revision == 0x6) {
+ sdp->db[0] = 1;
+ sdp->db[3] = 1;
+ }
+
+ /*
+ * Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry
+ * Format as per DP 1.4a spec and DP 2.0 respectively.
+ */
+ if (!(vsc->revision == 0x5 || vsc->revision == 0x7))
+ goto out;
+
+ /* VSC SDP Payload for DB16 through DB18 */
+ /* Pixel Encoding and Colorimetry Formats */
+ sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
+ sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
+
+ switch (vsc->bpc) {
+ case 6:
+ /* 6bpc: 0x0 */
+ break;
+ case 8:
+ sdp->db[17] = 0x1; /* DB17[3:0] */
+ break;
+ case 10:
+ sdp->db[17] = 0x2;
+ break;
+ case 12:
+ sdp->db[17] = 0x3;
+ break;
+ case 16:
+ sdp->db[17] = 0x4;
+ break;
+ default:
+ WARN(1, "Missing case %d\n", vsc->bpc);
+ return -EINVAL;
+ }
+
+ /* Dynamic Range and Component Bit Depth */
+ if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
+ sdp->db[17] |= 0x80; /* DB17[7] */
+
+ /* Content Type */
+ sdp->db[18] = vsc->content_type & 0x7;
+
+out:
+ return length;
+}
+EXPORT_SYMBOL(drm_dp_vsc_sdp_pack);
+
+/**
* drm_dp_get_pcon_max_frl_bw() - maximum frl supported by PCON
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
@@ -4058,3 +4187,33 @@ int drm_dp_bw_channel_coding_efficiency(bool is_uhbr)
return 800000;
}
EXPORT_SYMBOL(drm_dp_bw_channel_coding_efficiency);
+
+/**
+ * drm_dp_max_dprx_data_rate - Get the max data bandwidth of a DPRX sink
+ * @max_link_rate: max DPRX link rate in 10kbps units
+ * @max_lanes: max DPRX lane count
+ *
+ * Given a link rate and lanes, get the data bandwidth.
+ *
+ * Data bandwidth is the actual payload rate, which depends on the data
+ * bandwidth efficiency and the link rate.
+ *
+ * Note that protocol layers above the DPRX link level considered here can
+ * further limit the maximum data rate. Such layers are the MST topology (with
+ * limits on the link between the source and first branch device as well as on
+ * the whole MST path until the DPRX link) and (Thunderbolt) DP tunnels -
+ * which in turn can encapsulate an MST link with its own limit - with each
+ * SST or MST encapsulated tunnel sharing the BW of a tunnel group.
+ *
+ * Returns the maximum data rate in kBps units.
+ */
+int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes)
+{
+ int ch_coding_efficiency =
+ drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
+
+ return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate * 10 * max_lanes,
+ ch_coding_efficiency),
+ 1000000 * 8);
+}
+EXPORT_SYMBOL(drm_dp_max_dprx_data_rate);
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index f7c6b60629c2..03d528209426 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -1306,7 +1306,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
+ DBG_PREFIX);
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
@@ -1593,10 +1594,11 @@ topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
}
static void
-__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
+__dump_topology_ref_history(struct drm_device *drm,
+ struct drm_dp_mst_topology_ref_history *history,
void *ptr, const char *type_str)
{
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(drm, DRM_UT_DP, DBG_PREFIX);
char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
int i;
@@ -1638,15 +1640,15 @@ out:
static __always_inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
{
- __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
- "MSTB");
+ __dump_topology_ref_history(mstb->mgr->dev, &mstb->topology_ref_history,
+ mstb, "MSTB");
}
static __always_inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
{
- __dump_topology_ref_history(&port->topology_ref_history, port,
- "Port");
+ __dump_topology_ref_history(port->mgr->dev, &port->topology_ref_history,
+ port, "Port");
}
static __always_inline void
@@ -2824,7 +2826,9 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
if (ret) {
if (drm_debug_enabled(DRM_UT_DP)) {
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(mgr->dev,
+ DRM_UT_DP,
+ DBG_PREFIX);
drm_printf(&p, "sideband msg failed to send\n");
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
@@ -2869,7 +2873,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
if (drm_debug_enabled(DRM_UT_DP)) {
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
+ DBG_PREFIX);
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
new file mode 100644
index 000000000000..120e0de674c1
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -0,0 +1,1949 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/ref_tracker.h>
+#include <linux/types.h>
+
+#include <drm/drm_atomic_state_helper.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_print.h>
+#include <drm/display/drm_dp.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dp_tunnel.h>
+
+#define to_group(__private_obj) \
+ container_of(__private_obj, struct drm_dp_tunnel_group, base)
+
+#define to_group_state(__private_state) \
+ container_of(__private_state, struct drm_dp_tunnel_group_state, base)
+
+#define is_dp_tunnel_private_obj(__obj) \
+ ((__obj)->funcs == &tunnel_group_funcs)
+
+#define for_each_new_group_in_state(__state, __new_group_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_private_objs; \
+ (__i)++) \
+ for_each_if ((__state)->private_objs[__i].ptr && \
+ is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \
+ ((__new_group_state) = \
+ to_group_state((__state)->private_objs[__i].new_state), 1))
+
+#define for_each_old_group_in_state(__state, __old_group_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_private_objs; \
+ (__i)++) \
+ for_each_if ((__state)->private_objs[__i].ptr && \
+ is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \
+ ((__old_group_state) = \
+ to_group_state((__state)->private_objs[__i].old_state), 1))
+
+#define for_each_tunnel_in_group(__group, __tunnel) \
+ list_for_each_entry(__tunnel, &(__group)->tunnels, node)
+
+#define for_each_tunnel_state(__group_state, __tunnel_state) \
+ list_for_each_entry(__tunnel_state, &(__group_state)->tunnel_states, node)
+
+#define for_each_tunnel_state_safe(__group_state, __tunnel_state, __tunnel_state_tmp) \
+ list_for_each_entry_safe(__tunnel_state, __tunnel_state_tmp, \
+ &(__group_state)->tunnel_states, node)
+
+#define kbytes_to_mbits(__kbytes) \
+ DIV_ROUND_UP((__kbytes) * 8, 1000)
+
+#define DPTUN_BW_ARG(__bw) ((__bw) < 0 ? (__bw) : kbytes_to_mbits(__bw))
+
+#define __tun_prn(__tunnel, __level, __type, __fmt, ...) \
+ drm_##__level##__type((__tunnel)->group->mgr->dev, \
+ "[DPTUN %s][%s] " __fmt, \
+ drm_dp_tunnel_name(__tunnel), \
+ (__tunnel)->aux->name, ## \
+ __VA_ARGS__)
+
+#define tun_dbg(__tunnel, __fmt, ...) \
+ __tun_prn(__tunnel, dbg, _kms, __fmt, ## __VA_ARGS__)
+
+#define tun_dbg_stat(__tunnel, __err, __fmt, ...) do { \
+ if (__err) \
+ __tun_prn(__tunnel, dbg, _kms, __fmt " (Failed, err: %pe)\n", \
+ ## __VA_ARGS__, ERR_PTR(__err)); \
+ else \
+ __tun_prn(__tunnel, dbg, _kms, __fmt " (Ok)\n", \
+ ## __VA_ARGS__); \
+} while (0)
+
+#define tun_dbg_atomic(__tunnel, __fmt, ...) \
+ __tun_prn(__tunnel, dbg, _atomic, __fmt, ## __VA_ARGS__)
+
+#define tun_grp_dbg(__group, __fmt, ...) \
+ drm_dbg_kms((__group)->mgr->dev, \
+ "[DPTUN %s] " __fmt, \
+ drm_dp_tunnel_group_name(__group), ## \
+ __VA_ARGS__)
+
+#define DP_TUNNELING_BASE DP_TUNNELING_OUI
+
+#define __DPTUN_REG_RANGE(__start, __size) \
+ GENMASK_ULL((__start) + (__size) - 1, (__start))
+
+#define DPTUN_REG_RANGE(__addr, __size) \
+ __DPTUN_REG_RANGE((__addr) - DP_TUNNELING_BASE, (__size))
+
+#define DPTUN_REG(__addr) DPTUN_REG_RANGE(__addr, 1)
+
+#define DPTUN_INFO_REG_MASK ( \
+ DPTUN_REG_RANGE(DP_TUNNELING_OUI, DP_TUNNELING_OUI_BYTES) | \
+ DPTUN_REG_RANGE(DP_TUNNELING_DEV_ID, DP_TUNNELING_DEV_ID_BYTES) | \
+ DPTUN_REG(DP_TUNNELING_HW_REV) | \
+ DPTUN_REG(DP_TUNNELING_SW_REV_MAJOR) | \
+ DPTUN_REG(DP_TUNNELING_SW_REV_MINOR) | \
+ DPTUN_REG(DP_TUNNELING_CAPABILITIES) | \
+ DPTUN_REG(DP_IN_ADAPTER_INFO) | \
+ DPTUN_REG(DP_USB4_DRIVER_ID) | \
+ DPTUN_REG(DP_USB4_DRIVER_BW_CAPABILITY) | \
+ DPTUN_REG(DP_IN_ADAPTER_TUNNEL_INFORMATION) | \
+ DPTUN_REG(DP_BW_GRANULARITY) | \
+ DPTUN_REG(DP_ESTIMATED_BW) | \
+ DPTUN_REG(DP_ALLOCATED_BW) | \
+ DPTUN_REG(DP_TUNNELING_MAX_LINK_RATE) | \
+ DPTUN_REG(DP_TUNNELING_MAX_LANE_COUNT) | \
+ DPTUN_REG(DP_DPTX_BW_ALLOCATION_MODE_CONTROL))
+
+static const DECLARE_BITMAP(dptun_info_regs, 64) = {
+ DPTUN_INFO_REG_MASK & -1UL,
+#if BITS_PER_LONG == 32
+ DPTUN_INFO_REG_MASK >> 32,
+#endif
+};
+
+struct drm_dp_tunnel_regs {
+ u8 buf[HWEIGHT64(DPTUN_INFO_REG_MASK)];
+};
+
+struct drm_dp_tunnel_group;
+
+struct drm_dp_tunnel {
+ struct drm_dp_tunnel_group *group;
+
+ struct list_head node;
+
+ struct kref kref;
+ struct ref_tracker *tracker;
+ struct drm_dp_aux *aux;
+ char name[8];
+
+ int bw_granularity;
+ int estimated_bw;
+ int allocated_bw;
+
+ int max_dprx_rate;
+ u8 max_dprx_lane_count;
+
+ u8 adapter_id;
+
+ bool bw_alloc_supported:1;
+ bool bw_alloc_enabled:1;
+ bool has_io_error:1;
+ bool destroyed:1;
+};
+
+struct drm_dp_tunnel_group_state;
+
+struct drm_dp_tunnel_state {
+ struct drm_dp_tunnel_group_state *group_state;
+
+ struct drm_dp_tunnel_ref tunnel_ref;
+
+ struct list_head node;
+
+ u32 stream_mask;
+ int *stream_bw;
+};
+
+struct drm_dp_tunnel_group_state {
+ struct drm_private_state base;
+
+ struct list_head tunnel_states;
+};
+
+struct drm_dp_tunnel_group {
+ struct drm_private_obj base;
+ struct drm_dp_tunnel_mgr *mgr;
+
+ struct list_head tunnels;
+
+ /* available BW including the allocated_bw of all tunnels in the group */
+ int available_bw;
+
+ u8 drv_group_id;
+ char name[8];
+
+ bool active:1;
+};
+
+struct drm_dp_tunnel_mgr {
+ struct drm_device *dev;
+
+ int group_count;
+ struct drm_dp_tunnel_group *groups;
+ wait_queue_head_t bw_req_queue;
+
+#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+ struct ref_tracker_dir ref_tracker;
+#endif
+};
+
+/*
+ * The following helpers provide a way to read out the tunneling DPCD
+ * registers with a minimal amount of AUX transfers (1 transfer per contiguous
+ * range, as permitted by the 16 byte per transfer AUX limit), not accessing
+ * other registers to avoid any read side-effects.
+ */
+static int next_reg_area(int *offset)
+{
+ *offset = find_next_bit(dptun_info_regs, 64, *offset);
+
+ return find_next_zero_bit(dptun_info_regs, 64, *offset + 1) - *offset;
+}
+
+#define tunnel_reg_ptr(__regs, __address) ({ \
+ WARN_ON(!test_bit((__address) - DP_TUNNELING_BASE, dptun_info_regs)); \
+ &(__regs)->buf[bitmap_weight(dptun_info_regs, (__address) - DP_TUNNELING_BASE)]; \
+})
+
+static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *regs)
+{
+ int offset = 0;
+ int len;
+
+ while ((len = next_reg_area(&offset))) {
+ int address = DP_TUNNELING_BASE + offset;
+
+ if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
+ return -EIO;
+
+ offset += len;
+ }
+
+ return 0;
+}
+
+static u8 tunnel_reg(const struct drm_dp_tunnel_regs *regs, int address)
+{
+ return *tunnel_reg_ptr(regs, address);
+}
+
+static u8 tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs *regs)
+{
+ u8 drv_id = tunnel_reg(regs, DP_USB4_DRIVER_ID) & DP_USB4_DRIVER_ID_MASK;
+ u8 group_id = tunnel_reg(regs, DP_IN_ADAPTER_TUNNEL_INFORMATION) & DP_GROUP_ID_MASK;
+
+ if (!group_id)
+ return 0;
+
+ return (drv_id << DP_GROUP_ID_BITS) | group_id;
+}
+
+/* Return granularity in kB/s units */
+static int tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs *regs)
+{
+ int gr = tunnel_reg(regs, DP_BW_GRANULARITY) & DP_BW_GRANULARITY_MASK;
+
+ if (gr > 2)
+ return -1;
+
+ return (250000 << gr) / 8;
+}
+
+static int tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs *regs)
+{
+ u8 bw_code = tunnel_reg(regs, DP_TUNNELING_MAX_LINK_RATE);
+
+ return drm_dp_bw_code_to_link_rate(bw_code);
+}
+
+static int tunnel_reg_max_dprx_lane_count(const struct drm_dp_tunnel_regs *regs)
+{
+ return tunnel_reg(regs, DP_TUNNELING_MAX_LANE_COUNT) &
+ DP_TUNNELING_MAX_LANE_COUNT_MASK;
+}
+
+static bool tunnel_reg_bw_alloc_supported(const struct drm_dp_tunnel_regs *regs)
+{
+ u8 cap_mask = DP_TUNNELING_SUPPORT | DP_IN_BW_ALLOCATION_MODE_SUPPORT;
+
+ if ((tunnel_reg(regs, DP_TUNNELING_CAPABILITIES) & cap_mask) != cap_mask)
+ return false;
+
+ return tunnel_reg(regs, DP_USB4_DRIVER_BW_CAPABILITY) &
+ DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT;
+}
+
+static bool tunnel_reg_bw_alloc_enabled(const struct drm_dp_tunnel_regs *regs)
+{
+ return tunnel_reg(regs, DP_DPTX_BW_ALLOCATION_MODE_CONTROL) &
+ DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE;
+}
+
+static u8 tunnel_group_drv_id(u8 drv_group_id)
+{
+ return drv_group_id >> DP_GROUP_ID_BITS;
+}
+
+static u8 tunnel_group_id(u8 drv_group_id)
+{
+ return drv_group_id & DP_GROUP_ID_MASK;
+}
+
+const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel)
+{
+ return tunnel->name;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_name);
+
+static const char *drm_dp_tunnel_group_name(const struct drm_dp_tunnel_group *group)
+{
+ return group->name;
+}
+
+static struct drm_dp_tunnel_group *
+lookup_or_alloc_group(struct drm_dp_tunnel_mgr *mgr, u8 drv_group_id)
+{
+ struct drm_dp_tunnel_group *group = NULL;
+ int i;
+
+ for (i = 0; i < mgr->group_count; i++) {
+ /*
+ * A tunnel group with 0 group ID shouldn't have more than one
+ * tunnels.
+ */
+ if (tunnel_group_id(drv_group_id) &&
+ mgr->groups[i].drv_group_id == drv_group_id)
+ return &mgr->groups[i];
+
+ if (!group && !mgr->groups[i].active)
+ group = &mgr->groups[i];
+ }
+
+ if (!group) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: Can't allocate more tunnel groups\n");
+ return NULL;
+ }
+
+ group->drv_group_id = drv_group_id;
+ group->active = true;
+
+ /*
+ * The group name format here and elsewhere: Driver-ID:Group-ID:*
+ * (* standing for all DP-Adapters/tunnels in the group).
+ */
+ snprintf(group->name, sizeof(group->name), "%d:%d:*",
+ tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),
+ tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1));
+
+ return group;
+}
+
+static void free_group(struct drm_dp_tunnel_group *group)
+{
+ struct drm_dp_tunnel_mgr *mgr = group->mgr;
+
+ if (drm_WARN_ON(mgr->dev, !list_empty(&group->tunnels)))
+ return;
+
+ group->drv_group_id = 0;
+ group->available_bw = -1;
+ group->active = false;
+}
+
+static struct drm_dp_tunnel *
+tunnel_get(struct drm_dp_tunnel *tunnel)
+{
+ kref_get(&tunnel->kref);
+
+ return tunnel;
+}
+
+static void free_tunnel(struct kref *kref)
+{
+ struct drm_dp_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
+ struct drm_dp_tunnel_group *group = tunnel->group;
+
+ list_del(&tunnel->node);
+ if (list_empty(&group->tunnels))
+ free_group(group);
+
+ kfree(tunnel);
+}
+
+static void tunnel_put(struct drm_dp_tunnel *tunnel)
+{
+ kref_put(&tunnel->kref, free_tunnel);
+}
+
+#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+ ref_tracker_alloc(&tunnel->group->mgr->ref_tracker,
+ tracker, GFP_KERNEL);
+}
+
+static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+ ref_tracker_free(&tunnel->group->mgr->ref_tracker,
+ tracker);
+}
+#else
+static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+}
+
+static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+}
+#endif
+
+/**
+ * drm_dp_tunnel_get - Get a reference for a DP tunnel
+ * @tunnel: Tunnel object
+ * @tracker: Debug tracker for the reference
+ *
+ * Get a reference for @tunnel, along with a debug tracker to help locating
+ * the source of a reference leak/double reference put etc. issue.
+ *
+ * The reference must be dropped after use calling drm_dp_tunnel_put()
+ * passing @tunnel and *@tracker returned from here.
+ *
+ * Returns @tunnel - as a convenience - along with *@tracker.
+ */
+struct drm_dp_tunnel *
+drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+ track_tunnel_ref(tunnel, tracker);
+
+ return tunnel_get(tunnel);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_get);
+
+/**
+ * drm_dp_tunnel_put - Put a reference for a DP tunnel
+ * @tunnel - Tunnel object
+ * @tracker - Debug tracker for the reference
+ *
+ * Put a reference for @tunnel along with its debug *@tracker, which
+ * was obtained with drm_dp_tunnel_get().
+ */
+void drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+ untrack_tunnel_ref(tunnel, tracker);
+
+ tunnel_put(tunnel);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_put);
+
+static bool add_tunnel_to_group(struct drm_dp_tunnel_mgr *mgr,
+ u8 drv_group_id,
+ struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_group *group;
+
+ group = lookup_or_alloc_group(mgr, drv_group_id);
+ if (!group)
+ return false;
+
+ tunnel->group = group;
+ list_add(&tunnel->node, &group->tunnels);
+
+ return true;
+}
+
+static struct drm_dp_tunnel *
+create_tunnel(struct drm_dp_tunnel_mgr *mgr,
+ struct drm_dp_aux *aux,
+ const struct drm_dp_tunnel_regs *regs)
+{
+ u8 drv_group_id = tunnel_reg_drv_group_id(regs);
+ struct drm_dp_tunnel *tunnel;
+
+ tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
+ if (!tunnel)
+ return NULL;
+
+ INIT_LIST_HEAD(&tunnel->node);
+
+ kref_init(&tunnel->kref);
+
+ tunnel->aux = aux;
+
+ tunnel->adapter_id = tunnel_reg(regs, DP_IN_ADAPTER_INFO) & DP_IN_ADAPTER_NUMBER_MASK;
+
+ snprintf(tunnel->name, sizeof(tunnel->name), "%d:%d:%d",
+ tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),
+ tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1),
+ tunnel->adapter_id & ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1));
+
+ tunnel->bw_granularity = tunnel_reg_bw_granularity(regs);
+ tunnel->allocated_bw = tunnel_reg(regs, DP_ALLOCATED_BW) *
+ tunnel->bw_granularity;
+ /*
+ * An initial allocated BW of 0 indicates an undefined state: the
+ * actual allocation is determined by the TBT CM, usually following a
+ * legacy allocation policy (based on the max DPRX caps). From the
+ * driver's POV the state becomes defined only after the first
+ * allocation request.
+ */
+ if (!tunnel->allocated_bw)
+ tunnel->allocated_bw = -1;
+
+ tunnel->bw_alloc_supported = tunnel_reg_bw_alloc_supported(regs);
+ tunnel->bw_alloc_enabled = tunnel_reg_bw_alloc_enabled(regs);
+
+ if (!add_tunnel_to_group(mgr, drv_group_id, tunnel)) {
+ kfree(tunnel);
+
+ return NULL;
+ }
+
+ track_tunnel_ref(tunnel, &tunnel->tracker);
+
+ return tunnel;
+}
+
+static void destroy_tunnel(struct drm_dp_tunnel *tunnel)
+{
+ untrack_tunnel_ref(tunnel, &tunnel->tracker);
+ tunnel_put(tunnel);
+}
+
+/**
+ * drm_dp_tunnel_set_io_error - Set the IO error flag for a DP tunnel
+ * @tunnel: Tunnel object
+ *
+ * Set the IO error flag for @tunnel. Drivers can call this function upon
+ * detecting a failure that affects the tunnel functionality, for instance
+ * after a DP AUX transfer failure on the port @tunnel is connected to.
+ *
+ * This disables further management of @tunnel, including any related
+ * AUX accesses for tunneling DPCD registers, returning error to the
+ * initiators of these. The driver is supposed to drop this tunnel and -
+ * optionally - recreate it.
+ */
+void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel)
+{
+ tunnel->has_io_error = true;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_set_io_error);
+
+#define SKIP_DPRX_CAPS_CHECK BIT(0)
+#define ALLOW_ALLOCATED_BW_CHANGE BIT(1)
+static bool tunnel_regs_are_valid(struct drm_dp_tunnel_mgr *mgr,
+ const struct drm_dp_tunnel_regs *regs,
+ unsigned int flags)
+{
+ u8 drv_group_id = tunnel_reg_drv_group_id(regs);
+ bool check_dprx = !(flags & SKIP_DPRX_CAPS_CHECK);
+ bool ret = true;
+
+ if (!tunnel_reg_bw_alloc_supported(regs)) {
+ if (tunnel_group_id(drv_group_id)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: A non-zero group ID is only allowed with BWA support\n");
+ ret = false;
+ }
+
+ if (tunnel_reg(regs, DP_ALLOCATED_BW)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: BW is allocated without BWA support\n");
+ ret = false;
+ }
+
+ return ret;
+ }
+
+ if (!tunnel_group_id(drv_group_id)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: BWA support requires a non-zero group ID\n");
+ ret = false;
+ }
+
+ if (check_dprx && hweight8(tunnel_reg_max_dprx_lane_count(regs)) != 1) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: Invalid DPRX lane count: %d\n",
+ tunnel_reg_max_dprx_lane_count(regs));
+
+ ret = false;
+ }
+
+ if (check_dprx && !tunnel_reg_max_dprx_rate(regs)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: DPRX rate is 0\n");
+
+ ret = false;
+ }
+
+ if (tunnel_reg_bw_granularity(regs) < 0) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: Invalid BW granularity\n");
+
+ ret = false;
+ }
+
+ if (tunnel_reg(regs, DP_ALLOCATED_BW) > tunnel_reg(regs, DP_ESTIMATED_BW)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: Allocated BW %d > estimated BW %d Mb/s\n",
+ DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) *
+ tunnel_reg_bw_granularity(regs)),
+ DPTUN_BW_ARG(tunnel_reg(regs, DP_ESTIMATED_BW) *
+ tunnel_reg_bw_granularity(regs)));
+
+ ret = false;
+ }
+
+ return ret;
+}
+
+static int tunnel_allocated_bw(const struct drm_dp_tunnel *tunnel)
+{
+ return max(tunnel->allocated_bw, 0);
+}
+
+static bool tunnel_info_changes_are_valid(struct drm_dp_tunnel *tunnel,
+ const struct drm_dp_tunnel_regs *regs,
+ unsigned int flags)
+{
+ u8 new_drv_group_id = tunnel_reg_drv_group_id(regs);
+ bool ret = true;
+
+ if (tunnel->bw_alloc_supported != tunnel_reg_bw_alloc_supported(regs)) {
+ tun_dbg(tunnel,
+ "BW alloc support has changed %s -> %s\n",
+ str_yes_no(tunnel->bw_alloc_supported),
+ str_yes_no(tunnel_reg_bw_alloc_supported(regs)));
+
+ ret = false;
+ }
+
+ if (tunnel->group->drv_group_id != new_drv_group_id) {
+ tun_dbg(tunnel,
+ "Driver/group ID has changed %d:%d:* -> %d:%d:*\n",
+ tunnel_group_drv_id(tunnel->group->drv_group_id),
+ tunnel_group_id(tunnel->group->drv_group_id),
+ tunnel_group_drv_id(new_drv_group_id),
+ tunnel_group_id(new_drv_group_id));
+
+ ret = false;
+ }
+
+ if (!tunnel->bw_alloc_supported)
+ return ret;
+
+ if (tunnel->bw_granularity != tunnel_reg_bw_granularity(regs)) {
+ tun_dbg(tunnel,
+ "BW granularity has changed: %d -> %d Mb/s\n",
+ DPTUN_BW_ARG(tunnel->bw_granularity),
+ DPTUN_BW_ARG(tunnel_reg_bw_granularity(regs)));
+
+ ret = false;
+ }
+
+ /*
+ * On some devices at least the BW alloc mode enabled status is always
+ * reported as 0, so skip checking that here.
+ */
+
+ if (!(flags & ALLOW_ALLOCATED_BW_CHANGE) &&
+ tunnel_allocated_bw(tunnel) !=
+ tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity) {
+ tun_dbg(tunnel,
+ "Allocated BW has changed: %d -> %d Mb/s\n",
+ DPTUN_BW_ARG(tunnel->allocated_bw),
+ DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity));
+
+ ret = false;
+ }
+
+ return ret;
+}
+
+static int
+read_and_verify_tunnel_regs(struct drm_dp_tunnel *tunnel,
+ struct drm_dp_tunnel_regs *regs,
+ unsigned int flags)
+{
+ int err;
+
+ err = read_tunnel_regs(tunnel->aux, regs);
+ if (err < 0) {
+ drm_dp_tunnel_set_io_error(tunnel);
+
+ return err;
+ }
+
+ if (!tunnel_regs_are_valid(tunnel->group->mgr, regs, flags))
+ return -EINVAL;
+
+ if (!tunnel_info_changes_are_valid(tunnel, regs, flags))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool update_dprx_caps(struct drm_dp_tunnel *tunnel, const struct drm_dp_tunnel_regs *regs)
+{
+ bool changed = false;
+
+ if (tunnel_reg_max_dprx_rate(regs) != tunnel->max_dprx_rate) {
+ tunnel->max_dprx_rate = tunnel_reg_max_dprx_rate(regs);
+ changed = true;
+ }
+
+ if (tunnel_reg_max_dprx_lane_count(regs) != tunnel->max_dprx_lane_count) {
+ tunnel->max_dprx_lane_count = tunnel_reg_max_dprx_lane_count(regs);
+ changed = true;
+ }
+
+ return changed;
+}
+
+static int dev_id_len(const u8 *dev_id, int max_len)
+{
+ while (max_len && dev_id[max_len - 1] == '\0')
+ max_len--;
+
+ return max_len;
+}
+
+static int get_max_dprx_bw(const struct drm_dp_tunnel *tunnel)
+{
+ int max_dprx_bw = drm_dp_max_dprx_data_rate(tunnel->max_dprx_rate,
+ tunnel->max_dprx_lane_count);
+
+ /*
+ * A BW request of roundup(max_dprx_bw, tunnel->bw_granularity) results in
+ * an allocation of max_dprx_bw. A BW request above this rounded-up
+ * value will fail.
+ */
+ return min(roundup(max_dprx_bw, tunnel->bw_granularity),
+ MAX_DP_REQUEST_BW * tunnel->bw_granularity);
+}
+
+static int get_max_tunnel_bw(const struct drm_dp_tunnel *tunnel)
+{
+ return min(get_max_dprx_bw(tunnel), tunnel->group->available_bw);
+}
+
+/**
+ * drm_dp_tunnel_detect - Detect DP tunnel on the link
+ * @mgr: Tunnel manager
+ * @aux: DP AUX on which the tunnel will be detected
+ *
+ * Detect if there is any DP tunnel on the link and add it to the tunnel
+ * group's tunnel list.
+ *
+ * Returns a pointer to a tunnel on success, or an ERR_PTR() error on
+ * failure.
+ */
+struct drm_dp_tunnel *
+drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
+ struct drm_dp_aux *aux)
+{
+ struct drm_dp_tunnel_regs regs;
+ struct drm_dp_tunnel *tunnel;
+ int err;
+
+ err = read_tunnel_regs(aux, &regs);
+ if (err)
+ return ERR_PTR(err);
+
+ if (!(tunnel_reg(&regs, DP_TUNNELING_CAPABILITIES) &
+ DP_TUNNELING_SUPPORT))
+ return ERR_PTR(-ENODEV);
+
+ /* The DPRX caps are valid only after enabling BW alloc mode. */
+ if (!tunnel_regs_are_valid(mgr, &regs, SKIP_DPRX_CAPS_CHECK))
+ return ERR_PTR(-EINVAL);
+
+ tunnel = create_tunnel(mgr, aux, &regs);
+ if (!tunnel)
+ return ERR_PTR(-ENOMEM);
+
+ tun_dbg(tunnel,
+ "OUI:%*phD DevID:%*pE Rev-HW:%d.%d SW:%d.%d PR-Sup:%s BWA-Sup:%s BWA-En:%s\n",
+ DP_TUNNELING_OUI_BYTES,
+ tunnel_reg_ptr(&regs, DP_TUNNELING_OUI),
+ dev_id_len(tunnel_reg_ptr(&regs, DP_TUNNELING_DEV_ID), DP_TUNNELING_DEV_ID_BYTES),
+ tunnel_reg_ptr(&regs, DP_TUNNELING_DEV_ID),
+ (tunnel_reg(&regs, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MAJOR_MASK) >>
+ DP_TUNNELING_HW_REV_MAJOR_SHIFT,
+ (tunnel_reg(&regs, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MINOR_MASK) >>
+ DP_TUNNELING_HW_REV_MINOR_SHIFT,
+ tunnel_reg(&regs, DP_TUNNELING_SW_REV_MAJOR),
+ tunnel_reg(&regs, DP_TUNNELING_SW_REV_MINOR),
+ str_yes_no(tunnel_reg(&regs, DP_TUNNELING_CAPABILITIES) &
+ DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT),
+ str_yes_no(tunnel->bw_alloc_supported),
+ str_yes_no(tunnel->bw_alloc_enabled));
+
+ return tunnel;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_detect);
+
+/**
+ * drm_dp_tunnel_destroy - Destroy tunnel object
+ * @tunnel: Tunnel object
+ *
+ * Remove the tunnel from the tunnel topology and destroy it.
+ *
+ * Returns 0 on success, -ENODEV if the tunnel has been destroyed already.
+ */
+int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel)
+{
+ if (!tunnel)
+ return 0;
+
+ if (drm_WARN_ON(tunnel->group->mgr->dev, tunnel->destroyed))
+ return -ENODEV;
+
+ tun_dbg(tunnel, "destroying\n");
+
+ tunnel->destroyed = true;
+ destroy_tunnel(tunnel);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_destroy);
+
+static int check_tunnel(const struct drm_dp_tunnel *tunnel)
+{
+ if (tunnel->destroyed)
+ return -ENODEV;
+
+ if (tunnel->has_io_error)
+ return -EIO;
+
+ return 0;
+}
+
+static int group_allocated_bw(struct drm_dp_tunnel_group *group)
+{
+ struct drm_dp_tunnel *tunnel;
+ int group_allocated_bw = 0;
+
+ for_each_tunnel_in_group(group, tunnel) {
+ if (check_tunnel(tunnel) == 0 &&
+ tunnel->bw_alloc_enabled)
+ group_allocated_bw += tunnel_allocated_bw(tunnel);
+ }
+
+ return group_allocated_bw;
+}
+
+/*
+ * The estimated BW reported by the TBT Connection Manager for each tunnel in
+ * a group includes the BW already allocated for the given tunnel and the
+ * unallocated BW which is free to be used by any tunnel in the group.
+ */
+static int group_free_bw(const struct drm_dp_tunnel *tunnel)
+{
+ return tunnel->estimated_bw - tunnel_allocated_bw(tunnel);
+}
+
+static int calc_group_available_bw(const struct drm_dp_tunnel *tunnel)
+{
+ return group_allocated_bw(tunnel->group) +
+ group_free_bw(tunnel);
+}
+
+static int update_group_available_bw(struct drm_dp_tunnel *tunnel,
+ const struct drm_dp_tunnel_regs *regs)
+{
+ struct drm_dp_tunnel *tunnel_iter;
+ int group_available_bw;
+ bool changed;
+
+ tunnel->estimated_bw = tunnel_reg(regs, DP_ESTIMATED_BW) * tunnel->bw_granularity;
+
+ if (calc_group_available_bw(tunnel) == tunnel->group->available_bw)
+ return 0;
+
+ for_each_tunnel_in_group(tunnel->group, tunnel_iter) {
+ int err;
+
+ if (tunnel_iter == tunnel)
+ continue;
+
+ if (check_tunnel(tunnel_iter) != 0 ||
+ !tunnel_iter->bw_alloc_enabled)
+ continue;
+
+ err = drm_dp_dpcd_probe(tunnel_iter->aux, DP_DPCD_REV);
+ if (err) {
+ tun_dbg(tunnel_iter,
+ "Probe failed, assume disconnected (err %pe)\n",
+ ERR_PTR(err));
+ drm_dp_tunnel_set_io_error(tunnel_iter);
+ }
+ }
+
+ group_available_bw = calc_group_available_bw(tunnel);
+
+ tun_dbg(tunnel, "Updated group available BW: %d->%d\n",
+ DPTUN_BW_ARG(tunnel->group->available_bw),
+ DPTUN_BW_ARG(group_available_bw));
+
+ changed = tunnel->group->available_bw != group_available_bw;
+
+ tunnel->group->available_bw = group_available_bw;
+
+ return changed ? 1 : 0;
+}
+
+static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
+{
+ u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ;
+ u8 val;
+
+ if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
+ goto out_err;
+
+ if (enable)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
+ goto out_err;
+
+ tunnel->bw_alloc_enabled = enable;
+
+ return 0;
+
+out_err:
+ drm_dp_tunnel_set_io_error(tunnel);
+
+ return -EIO;
+}
+
+/**
+ * drm_dp_tunnel_enable_bw_alloc - Enable DP tunnel BW allocation mode
+ * @tunnel: Tunnel object
+ *
+ * Enable the DP tunnel BW allocation mode on @tunnel if it supports it.
+ *
+ * Returns 0 in case of success, negative error code otherwise.
+ */
+int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_regs regs;
+ int err;
+
+ err = check_tunnel(tunnel);
+ if (err)
+ return err;
+
+ if (!tunnel->bw_alloc_supported)
+ return -EOPNOTSUPP;
+
+ if (!tunnel_group_id(tunnel->group->drv_group_id))
+ return -EINVAL;
+
+ err = set_bw_alloc_mode(tunnel, true);
+ if (err)
+ goto out;
+
+ /*
+ * After a BWA disable/re-enable sequence the allocated BW can either
+ * stay at its last requested value or, for instance after system
+ * suspend/resume, TBT CM can reset back the allocation to the amount
+ * allocated in the legacy/non-BWA mode. Accordingly allow for the
+ * allocation to change wrt. the last SW state.
+ */
+ err = read_and_verify_tunnel_regs(tunnel, &regs,
+ ALLOW_ALLOCATED_BW_CHANGE);
+ if (err) {
+ set_bw_alloc_mode(tunnel, false);
+
+ goto out;
+ }
+
+ if (!tunnel->max_dprx_rate)
+ update_dprx_caps(tunnel, &regs);
+
+ if (tunnel->group->available_bw == -1) {
+ err = update_group_available_bw(tunnel, &regs);
+ if (err > 0)
+ err = 0;
+ }
+out:
+ tun_dbg_stat(tunnel, err,
+ "Enabling BW alloc mode: DPRX:%dx%d Group alloc:%d/%d Mb/s",
+ tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count,
+ DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
+ DPTUN_BW_ARG(tunnel->group->available_bw));
+
+ return err;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_enable_bw_alloc);
+
+/**
+ * drm_dp_tunnel_disable_bw_alloc - Disable DP tunnel BW allocation mode
+ * @tunnel: Tunnel object
+ *
+ * Disable the DP tunnel BW allocation mode on @tunnel.
+ *
+ * Returns 0 in case of success, negative error code otherwise.
+ */
+int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel)
+{
+ int err;
+
+ err = check_tunnel(tunnel);
+ if (err)
+ return err;
+
+ tunnel->allocated_bw = -1;
+
+ err = set_bw_alloc_mode(tunnel, false);
+
+ tun_dbg_stat(tunnel, err, "Disabling BW alloc mode");
+
+ return err;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_disable_bw_alloc);
+
+/**
+ * drm_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation mode enabled state
+ * @tunnel: Tunnel object
+ *
+ * Query if the BW allocation mode is enabled for @tunnel.
+ *
+ * Returns %true if the BW allocation mode is enabled for @tunnel.
+ */
+bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel)
+{
+ return tunnel && tunnel->bw_alloc_enabled;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_bw_alloc_is_enabled);
+
+static int clear_bw_req_state(struct drm_dp_aux *aux)
+{
+ u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
+
+ if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed)
+{
+ u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
+ u8 status_change_mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
+ u8 val;
+ int err;
+
+ if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ return -EIO;
+
+ *status_changed = val & status_change_mask;
+
+ val &= bw_req_mask;
+
+ if (!val)
+ return -EAGAIN;
+
+ err = clear_bw_req_state(aux);
+ if (err < 0)
+ return err;
+
+ return val == DP_BW_REQUEST_SUCCEEDED ? 0 : -ENOSPC;
+}
+
+static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw)
+{
+ struct drm_dp_tunnel_mgr *mgr = tunnel->group->mgr;
+ int request_bw = DIV_ROUND_UP(bw, tunnel->bw_granularity);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ long timeout;
+ int err;
+
+ if (bw < 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (request_bw * tunnel->bw_granularity == tunnel->allocated_bw)
+ return 0;
+
+ /* Atomic check should prevent the following. */
+ if (drm_WARN_ON(mgr->dev, request_bw > MAX_DP_REQUEST_BW)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = clear_bw_req_state(tunnel->aux);
+ if (err)
+ goto out;
+
+ if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ timeout = msecs_to_jiffies(3000);
+ add_wait_queue(&mgr->bw_req_queue, &wait);
+
+ for (;;) {
+ bool status_changed;
+
+ err = bw_req_complete(tunnel->aux, &status_changed);
+ if (err != -EAGAIN)
+ break;
+
+ if (status_changed) {
+ struct drm_dp_tunnel_regs regs;
+
+ err = read_and_verify_tunnel_regs(tunnel, &regs,
+ ALLOW_ALLOCATED_BW_CHANGE);
+ if (err)
+ break;
+ }
+
+ if (!timeout) {
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE, timeout);
+ };
+
+ remove_wait_queue(&mgr->bw_req_queue, &wait);
+
+ if (err)
+ goto out;
+
+ tunnel->allocated_bw = request_bw * tunnel->bw_granularity;
+
+out:
+ tun_dbg_stat(tunnel, err, "Allocating %d/%d Mb/s for tunnel: Group alloc:%d/%d Mb/s",
+ DPTUN_BW_ARG(request_bw * tunnel->bw_granularity),
+ DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),
+ DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
+ DPTUN_BW_ARG(tunnel->group->available_bw));
+
+ if (err == -EIO)
+ drm_dp_tunnel_set_io_error(tunnel);
+
+ return err;
+}
+
+/**
+ * drm_dp_tunnel_alloc_bw - Allocate BW for a DP tunnel
+ * @tunnel: Tunnel object
+ * @bw: BW in kB/s units
+ *
+ * Allocate @bw kB/s for @tunnel. The allocated BW must be freed after use by
+ * calling this function for the same tunnel setting @bw to 0.
+ *
+ * Returns 0 in case of success, a negative error code otherwise.
+ */
+int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
+{
+ int err;
+
+ err = check_tunnel(tunnel);
+ if (err)
+ return err;
+
+ return allocate_tunnel_bw(tunnel, bw);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_alloc_bw);
+
+/**
+ * drm_dp_tunnel_atomic_get_allocated_bw - Get the BW allocated for a DP tunnel
+ * @tunnel: Tunnel object
+ *
+ * Get the current BW allocated for @tunnel. After the tunnel is created /
+ * resumed and the BW allocation mode is enabled for it, the allocation
+ * becomes determined only after the first allocation request by the driver
+ * calling drm_dp_tunnel_alloc_bw().
+ *
+ * Return the BW allocated for the tunnel, or -1 if the allocation is
+ * undetermined.
+ */
+int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel)
+{
+ return tunnel->allocated_bw;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_get_allocated_bw);
+
+/*
+ * Return 0 if the status hasn't changed, 1 if the status has changed, a
+ * negative error code in case of an I/O failure.
+ */
+static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
+{
+ u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
+ u8 val;
+
+ if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
+ goto out_err;
+
+ val &= mask;
+
+ if (val) {
+ if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
+ goto out_err;
+
+ return 1;
+ }
+
+ if (!drm_dp_tunnel_bw_alloc_is_enabled(tunnel))
+ return 0;
+
+ /*
+ * Check for estimated BW changes explicitly to account for lost
+ * BW change notifications.
+ */
+ if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
+ goto out_err;
+
+ if (val * tunnel->bw_granularity != tunnel->estimated_bw)
+ return 1;
+
+ return 0;
+
+out_err:
+ drm_dp_tunnel_set_io_error(tunnel);
+
+ return -EIO;
+}
+
+/**
+ * drm_dp_tunnel_update_state - Update DP tunnel SW state with the HW state
+ * @tunnel: Tunnel object
+ *
+ * Update the SW state of @tunnel with the HW state.
+ *
+ * Returns 0 if the state has not changed, 1 if it has changed and got updated
+ * successfully and a negative error code otherwise.
+ */
+int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_regs regs;
+ bool changed = false;
+ int ret;
+
+ ret = check_tunnel(tunnel);
+ if (ret < 0)
+ return ret;
+
+ ret = check_and_clear_status_change(tunnel);
+ if (ret < 0)
+ goto out;
+
+ if (!ret)
+ return 0;
+
+ ret = read_and_verify_tunnel_regs(tunnel, &regs, 0);
+ if (ret)
+ goto out;
+
+ if (update_dprx_caps(tunnel, &regs))
+ changed = true;
+
+ ret = update_group_available_bw(tunnel, &regs);
+ if (ret == 1)
+ changed = true;
+
+out:
+ tun_dbg_stat(tunnel, ret < 0 ? ret : 0,
+ "State update: Changed:%s DPRX:%dx%d Tunnel alloc:%d/%d Group alloc:%d/%d Mb/s",
+ str_yes_no(changed),
+ tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count,
+ DPTUN_BW_ARG(tunnel->allocated_bw),
+ DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),
+ DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
+ DPTUN_BW_ARG(tunnel->group->available_bw));
+
+ if (ret < 0)
+ return ret;
+
+ if (changed)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_update_state);
+
+/*
+ * drm_dp_tunnel_handle_irq - Handle DP tunnel IRQs
+ *
+ * Handle any pending DP tunnel IRQs, waking up waiters for a completion
+ * event.
+ *
+ * Returns 1 if the state of the tunnel has changed which requires calling
+ * drm_dp_tunnel_update_state(), a negative error code in case of a failure,
+ * 0 otherwise.
+ */
+int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *aux)
+{
+ u8 val;
+
+ if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ return -EIO;
+
+ if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED))
+ wake_up_all(&mgr->bw_req_queue);
+
+ if (val & (DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED))
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_handle_irq);
+
+/**
+ * drm_dp_tunnel_max_dprx_rate - Query the maximum rate of the tunnel's DPRX
+ * @tunnel: Tunnel object
+ *
+ * The function is used to query the maximum link rate of the DPRX connected
+ * to @tunnel. Note that this rate will not be limited by the BW limit of the
+ * tunnel, as opposed to the standard and extended DP_MAX_LINK_RATE DPCD
+ * registers.
+ *
+ * Returns the maximum link rate in 10 kbit/s units.
+ */
+int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel)
+{
+ return tunnel->max_dprx_rate;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_rate);
+
+/**
+ * drm_dp_tunnel_max_dprx_lane_count - Query the maximum lane count of the tunnel's DPRX
+ * @tunnel: Tunnel object
+ *
+ * The function is used to query the maximum lane count of the DPRX connected
+ * to @tunnel. Note that this lane count will not be limited by the BW limit of
+ * the tunnel, as opposed to the standard and extended DP_MAX_LANE_COUNT DPCD
+ * registers.
+ *
+ * Returns the maximum lane count.
+ */
+int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel)
+{
+ return tunnel->max_dprx_lane_count;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_lane_count);
+
+/**
+ * drm_dp_tunnel_available_bw - Query the estimated total available BW of the tunnel
+ * @tunnel: Tunnel object
+ *
+ * This function is used to query the estimated total available BW of the
+ * tunnel. This includes the currently allocated and free BW for all the
+ * tunnels in @tunnel's group. The available BW is valid only after the BW
+ * allocation mode has been enabled for the tunnel and its state got updated
+ * calling drm_dp_tunnel_update_state().
+ *
+ * Returns the @tunnel group's estimated total available bandwidth in kB/s
+ * units, or -1 if the available BW isn't valid (the BW allocation mode is
+ * not enabled or the tunnel's state hasn't been updated).
+ */
+int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel)
+{
+ return tunnel->group->available_bw;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_available_bw);
+
+static struct drm_dp_tunnel_group_state *
+drm_dp_tunnel_atomic_get_group_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel)
+{
+ return (struct drm_dp_tunnel_group_state *)
+ drm_atomic_get_private_obj_state(state,
+ &tunnel->group->base);
+}
+
+static struct drm_dp_tunnel_state *
+add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
+ struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_state *tunnel_state;
+
+ tun_dbg_atomic(tunnel,
+ "Adding state for tunnel %p to group state %p\n",
+ tunnel, group_state);
+
+ tunnel_state = kzalloc(sizeof(*tunnel_state), GFP_KERNEL);
+ if (!tunnel_state)
+ return NULL;
+
+ tunnel_state->group_state = group_state;
+
+ drm_dp_tunnel_ref_get(tunnel, &tunnel_state->tunnel_ref);
+
+ INIT_LIST_HEAD(&tunnel_state->node);
+ list_add(&tunnel_state->node, &group_state->tunnel_states);
+
+ return tunnel_state;
+}
+
+static void free_tunnel_state(struct drm_dp_tunnel_state *tunnel_state)
+{
+ tun_dbg_atomic(tunnel_state->tunnel_ref.tunnel,
+ "Freeing state for tunnel %p\n",
+ tunnel_state->tunnel_ref.tunnel);
+
+ list_del(&tunnel_state->node);
+
+ kfree(tunnel_state->stream_bw);
+ drm_dp_tunnel_ref_put(&tunnel_state->tunnel_ref);
+
+ kfree(tunnel_state);
+}
+
+static void free_group_state(struct drm_dp_tunnel_group_state *group_state)
+{
+ struct drm_dp_tunnel_state *tunnel_state;
+ struct drm_dp_tunnel_state *tunnel_state_tmp;
+
+ for_each_tunnel_state_safe(group_state, tunnel_state, tunnel_state_tmp)
+ free_tunnel_state(tunnel_state);
+
+ kfree(group_state);
+}
+
+static struct drm_dp_tunnel_state *
+get_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
+ const struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_state *tunnel_state;
+
+ for_each_tunnel_state(group_state, tunnel_state)
+ if (tunnel_state->tunnel_ref.tunnel == tunnel)
+ return tunnel_state;
+
+ return NULL;
+}
+
+static struct drm_dp_tunnel_state *
+get_or_add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
+ struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_state *tunnel_state;
+
+ tunnel_state = get_tunnel_state(group_state, tunnel);
+ if (tunnel_state)
+ return tunnel_state;
+
+ return add_tunnel_state(group_state, tunnel);
+}
+
+static struct drm_private_state *
+tunnel_group_duplicate_state(struct drm_private_obj *obj)
+{
+ struct drm_dp_tunnel_group_state *group_state;
+ struct drm_dp_tunnel_state *tunnel_state;
+
+ group_state = kzalloc(sizeof(*group_state), GFP_KERNEL);
+ if (!group_state)
+ return NULL;
+
+ INIT_LIST_HEAD(&group_state->tunnel_states);
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &group_state->base);
+
+ for_each_tunnel_state(to_group_state(obj->state), tunnel_state) {
+ struct drm_dp_tunnel_state *new_tunnel_state;
+
+ new_tunnel_state = get_or_add_tunnel_state(group_state,
+ tunnel_state->tunnel_ref.tunnel);
+ if (!new_tunnel_state)
+ goto out_free_state;
+
+ new_tunnel_state->stream_mask = tunnel_state->stream_mask;
+ new_tunnel_state->stream_bw = kmemdup(tunnel_state->stream_bw,
+ sizeof(*tunnel_state->stream_bw) *
+ hweight32(tunnel_state->stream_mask),
+ GFP_KERNEL);
+
+ if (!new_tunnel_state->stream_bw)
+ goto out_free_state;
+ }
+
+ return &group_state->base;
+
+out_free_state:
+ free_group_state(group_state);
+
+ return NULL;
+}
+
+static void tunnel_group_destroy_state(struct drm_private_obj *obj, struct drm_private_state *state)
+{
+ free_group_state(to_group_state(state));
+}
+
+static const struct drm_private_state_funcs tunnel_group_funcs = {
+ .atomic_duplicate_state = tunnel_group_duplicate_state,
+ .atomic_destroy_state = tunnel_group_destroy_state,
+};
+
+/**
+ * drm_dp_tunnel_atomic_get_state - get/allocate the new atomic state for a tunnel
+ * @state: Atomic state
+ * @tunnel: Tunnel to get the state for
+ *
+ * Get the new atomic state for @tunnel, duplicating it from the old tunnel
+ * state if not yet allocated.
+ *
+ * Return the state or an ERR_PTR() error on failure.
+ */
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
+ struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_group_state *group_state;
+ struct drm_dp_tunnel_state *tunnel_state;
+
+ group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
+ if (IS_ERR(group_state))
+ return ERR_CAST(group_state);
+
+ tunnel_state = get_or_add_tunnel_state(group_state, tunnel);
+ if (!tunnel_state)
+ return ERR_PTR(-ENOMEM);
+
+ return tunnel_state;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_state);
+
+/**
+ * drm_dp_tunnel_atomic_get_old_state - get the old atomic state for a tunnel
+ * @state: Atomic state
+ * @tunnel: Tunnel to get the state for
+ *
+ * Get the old atomic state for @tunnel.
+ *
+ * Return the old state or NULL if the tunnel's atomic state is not in @state.
+ */
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_group_state *old_group_state;
+ int i;
+
+ for_each_old_group_in_state(state, old_group_state, i)
+ if (to_group(old_group_state->base.obj) == tunnel->group)
+ return get_tunnel_state(old_group_state, tunnel);
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_old_state);
+
+/**
+ * drm_dp_tunnel_atomic_get_new_state - get the new atomic state for a tunnel
+ * @state: Atomic state
+ * @tunnel: Tunnel to get the state for
+ *
+ * Get the new atomic state for @tunnel.
+ *
+ * Return the new state or NULL if the tunnel's atomic state is not in @state.
+ */
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_group_state *new_group_state;
+ int i;
+
+ for_each_new_group_in_state(state, new_group_state, i)
+ if (to_group(new_group_state->base.obj) == tunnel->group)
+ return get_tunnel_state(new_group_state, tunnel);
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_new_state);
+
+static bool init_group(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_tunnel_group *group)
+{
+ struct drm_dp_tunnel_group_state *group_state;
+
+ group_state = kzalloc(sizeof(*group_state), GFP_KERNEL);
+ if (!group_state)
+ return false;
+
+ INIT_LIST_HEAD(&group_state->tunnel_states);
+
+ group->mgr = mgr;
+ group->available_bw = -1;
+ INIT_LIST_HEAD(&group->tunnels);
+
+ drm_atomic_private_obj_init(mgr->dev, &group->base, &group_state->base,
+ &tunnel_group_funcs);
+
+ return true;
+}
+
+static void cleanup_group(struct drm_dp_tunnel_group *group)
+{
+ drm_atomic_private_obj_fini(&group->base);
+}
+
+#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
+{
+ const struct drm_dp_tunnel_state *tunnel_state;
+ u32 stream_mask = 0;
+
+ for_each_tunnel_state(group_state, tunnel_state) {
+ drm_WARN(to_group(group_state->base.obj)->mgr->dev,
+ tunnel_state->stream_mask & stream_mask,
+ "[DPTUN %s]: conflicting stream IDs %x (IDs in other tunnels %x)\n",
+ tunnel_state->tunnel_ref.tunnel->name,
+ tunnel_state->stream_mask,
+ stream_mask);
+
+ stream_mask |= tunnel_state->stream_mask;
+ }
+}
+#else
+static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
+{
+}
+#endif
+
+static int stream_id_to_idx(u32 stream_mask, u8 stream_id)
+{
+ return hweight32(stream_mask & (BIT(stream_id) - 1));
+}
+
+static int resize_bw_array(struct drm_dp_tunnel_state *tunnel_state,
+ unsigned long old_mask, unsigned long new_mask)
+{
+ unsigned long move_mask = old_mask & new_mask;
+ int *new_bws = NULL;
+ int id;
+
+ WARN_ON(!new_mask);
+
+ if (old_mask == new_mask)
+ return 0;
+
+ new_bws = kcalloc(hweight32(new_mask), sizeof(*new_bws), GFP_KERNEL);
+ if (!new_bws)
+ return -ENOMEM;
+
+ for_each_set_bit(id, &move_mask, BITS_PER_TYPE(move_mask))
+ new_bws[stream_id_to_idx(new_mask, id)] =
+ tunnel_state->stream_bw[stream_id_to_idx(old_mask, id)];
+
+ kfree(tunnel_state->stream_bw);
+ tunnel_state->stream_bw = new_bws;
+ tunnel_state->stream_mask = new_mask;
+
+ return 0;
+}
+
+static int set_stream_bw(struct drm_dp_tunnel_state *tunnel_state,
+ u8 stream_id, int bw)
+{
+ int err;
+
+ err = resize_bw_array(tunnel_state,
+ tunnel_state->stream_mask,
+ tunnel_state->stream_mask | BIT(stream_id));
+ if (err)
+ return err;
+
+ tunnel_state->stream_bw[stream_id_to_idx(tunnel_state->stream_mask, stream_id)] = bw;
+
+ return 0;
+}
+
+static int clear_stream_bw(struct drm_dp_tunnel_state *tunnel_state,
+ u8 stream_id)
+{
+ if (!(tunnel_state->stream_mask & ~BIT(stream_id))) {
+ free_tunnel_state(tunnel_state);
+ return 0;
+ }
+
+ return resize_bw_array(tunnel_state,
+ tunnel_state->stream_mask,
+ tunnel_state->stream_mask & ~BIT(stream_id));
+}
+
+/**
+ * drm_dp_tunnel_atomic_set_stream_bw - Set the BW for a DP tunnel stream
+ * @state: Atomic state
+ * @tunnel: DP tunnel containing the stream
+ * @stream_id: Stream ID
+ * @bw: BW of the stream
+ *
+ * Set a DP tunnel stream's required BW in the atomic state.
+ *
+ * Returns 0 in case of success, a negative error code otherwise.
+ */
+int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
+ struct drm_dp_tunnel *tunnel,
+ u8 stream_id, int bw)
+{
+ struct drm_dp_tunnel_group_state *new_group_state;
+ struct drm_dp_tunnel_state *tunnel_state;
+ int err;
+
+ if (drm_WARN_ON(tunnel->group->mgr->dev,
+ stream_id > BITS_PER_TYPE(tunnel_state->stream_mask)))
+ return -EINVAL;
+
+ tun_dbg(tunnel,
+ "Setting %d Mb/s for stream %d\n",
+ DPTUN_BW_ARG(bw), stream_id);
+
+ new_group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
+ if (IS_ERR(new_group_state))
+ return PTR_ERR(new_group_state);
+
+ if (bw == 0) {
+ tunnel_state = get_tunnel_state(new_group_state, tunnel);
+ if (!tunnel_state)
+ return 0;
+
+ return clear_stream_bw(tunnel_state, stream_id);
+ }
+
+ tunnel_state = get_or_add_tunnel_state(new_group_state, tunnel);
+ if (drm_WARN_ON(state->dev, !tunnel_state))
+ return -EINVAL;
+
+ err = set_stream_bw(tunnel_state, stream_id, bw);
+ if (err)
+ return err;
+
+ check_unique_stream_ids(new_group_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_set_stream_bw);
+
+/**
+ * drm_dp_tunnel_atomic_get_required_bw - Get the BW required by a DP tunnel
+ * @tunnel_state: Atomic state of the queried tunnel
+ *
+ * Calculate the BW required by a tunnel adding up the required BW of all
+ * the streams in the tunnel.
+ *
+ * Return the total BW required by the tunnel.
+ */
+int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state)
+{
+ int tunnel_bw = 0;
+ int i;
+
+ if (!tunnel_state || !tunnel_state->stream_mask)
+ return 0;
+
+ for (i = 0; i < hweight32(tunnel_state->stream_mask); i++)
+ tunnel_bw += tunnel_state->stream_bw[i];
+
+ return tunnel_bw;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_required_bw);
+
+/**
+ * drm_dp_tunnel_atomic_get_group_streams_in_state - Get mask of stream IDs in a group
+ * @state: Atomic state
+ * @tunnel: Tunnel object
+ * @stream_mask: Mask of streams in @tunnel's group
+ *
+ * Get the mask of all the stream IDs in the tunnel group of @tunnel.
+ *
+ * Return 0 in case of success - with the stream IDs in @stream_mask - or a
+ * negative error code in case of failure.
+ */
+int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel,
+ u32 *stream_mask)
+{
+ struct drm_dp_tunnel_group_state *group_state;
+ struct drm_dp_tunnel_state *tunnel_state;
+
+ group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
+ if (IS_ERR(group_state))
+ return PTR_ERR(group_state);
+
+ *stream_mask = 0;
+ for_each_tunnel_state(group_state, tunnel_state)
+ *stream_mask |= tunnel_state->stream_mask;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_group_streams_in_state);
+
+static int
+drm_dp_tunnel_atomic_check_group_bw(struct drm_dp_tunnel_group_state *new_group_state,
+ u32 *failed_stream_mask)
+{
+ struct drm_dp_tunnel_group *group = to_group(new_group_state->base.obj);
+ struct drm_dp_tunnel_state *new_tunnel_state;
+ u32 group_stream_mask = 0;
+ int group_bw = 0;
+
+ for_each_tunnel_state(new_group_state, new_tunnel_state) {
+ struct drm_dp_tunnel *tunnel = new_tunnel_state->tunnel_ref.tunnel;
+ int max_dprx_bw = get_max_dprx_bw(tunnel);
+ int tunnel_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);
+
+ tun_dbg(tunnel,
+ "%sRequired %d/%d Mb/s total for tunnel.\n",
+ tunnel_bw > max_dprx_bw ? "Not enough BW: " : "",
+ DPTUN_BW_ARG(tunnel_bw),
+ DPTUN_BW_ARG(max_dprx_bw));
+
+ if (tunnel_bw > max_dprx_bw) {
+ *failed_stream_mask = new_tunnel_state->stream_mask;
+ return -ENOSPC;
+ }
+
+ group_bw += min(roundup(tunnel_bw, tunnel->bw_granularity),
+ max_dprx_bw);
+ group_stream_mask |= new_tunnel_state->stream_mask;
+ }
+
+ tun_grp_dbg(group,
+ "%sRequired %d/%d Mb/s total for tunnel group.\n",
+ group_bw > group->available_bw ? "Not enough BW: " : "",
+ DPTUN_BW_ARG(group_bw),
+ DPTUN_BW_ARG(group->available_bw));
+
+ if (group_bw > group->available_bw) {
+ *failed_stream_mask = group_stream_mask;
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_tunnel_atomic_check_stream_bws - Check BW limit for all streams in state
+ * @state: Atomic state
+ * @failed_stream_mask: Mask of stream IDs with a BW limit failure
+ *
+ * Check the required BW of each DP tunnel in @state against both the DPRX BW
+ * limit of the tunnel and the BW limit of the tunnel group. Return a mask of
+ * stream IDs in @failed_stream_mask once a check fails. The mask will contain
+ * either all the streams in a tunnel (in case a DPRX BW limit check failed) or
+ * all the streams in a tunnel group (in case a group BW limit check failed).
+ *
+ * Return 0 if all the BW limit checks passed, -ENOSPC in case a BW limit
+ * check failed - with @failed_stream_mask containing the streams failing the
+ * check - or a negative error code otherwise.
+ */
+int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
+ u32 *failed_stream_mask)
+{
+ struct drm_dp_tunnel_group_state *new_group_state;
+ int i;
+
+ for_each_new_group_in_state(state, new_group_state, i) {
+ int ret;
+
+ ret = drm_dp_tunnel_atomic_check_group_bw(new_group_state,
+ failed_stream_mask);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_check_stream_bws);
+
+static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
+{
+ int i;
+
+ for (i = 0; i < mgr->group_count; i++) {
+ cleanup_group(&mgr->groups[i]);
+ drm_WARN_ON(mgr->dev, !list_empty(&mgr->groups[i].tunnels));
+ }
+
+#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+ ref_tracker_dir_exit(&mgr->ref_tracker);
+#endif
+
+ kfree(mgr->groups);
+ kfree(mgr);
+}
+
+/**
+ * drm_dp_tunnel_mgr_create - Create a DP tunnel manager
+ * @dev: DRM device object
+ *
+ * Creates a DP tunnel manager for @dev.
+ *
+ * Returns a pointer to the tunnel manager if created successfully or NULL in
+ * case of an error.
+ */
+struct drm_dp_tunnel_mgr *
+drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
+{
+ struct drm_dp_tunnel_mgr *mgr;
+ int i;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return NULL;
+
+ mgr->dev = dev;
+ init_waitqueue_head(&mgr->bw_req_queue);
+
+ mgr->groups = kcalloc(max_group_count, sizeof(*mgr->groups), GFP_KERNEL);
+ if (!mgr->groups) {
+ kfree(mgr);
+
+ return NULL;
+ }
+
+#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+ ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun");
+#endif
+
+ for (i = 0; i < max_group_count; i++) {
+ if (!init_group(mgr, &mgr->groups[i])) {
+ destroy_mgr(mgr);
+
+ return NULL;
+ }
+
+ mgr->group_count++;
+ }
+
+ return mgr;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_mgr_create);
+
+/**
+ * drm_dp_tunnel_mgr_destroy - Destroy DP tunnel manager
+ * @mgr: Tunnel manager object
+ *
+ * Destroy the tunnel manager.
+ */
+void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr)
+{
+ destroy_mgr(mgr);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_mgr_destroy);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index cee3188adf3d..521a71c61b16 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -27,8 +27,9 @@
#include <linux/mutex.h>
#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_debugfs.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_file.h>
#include <drm/drm_of.h>
@@ -1207,26 +1208,26 @@ int drm_bridge_get_modes(struct drm_bridge *bridge,
EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
/**
- * drm_bridge_get_edid - get the EDID data of the connected display
+ * drm_bridge_edid_read - read the EDID data of the connected display
* @bridge: bridge control structure
* @connector: the connector to read EDID for
*
* If the bridge supports output EDID retrieval, as reported by the
- * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
- * get the EDID and return it. Otherwise return NULL.
+ * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
+ * the EDID and return it. Otherwise return NULL.
*
* RETURNS:
* The retrieved EDID on success, or NULL otherwise.
*/
-struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
return NULL;
- return bridge->funcs->get_edid(bridge, connector);
+ return bridge->funcs->edid_read(bridge, connector);
}
-EXPORT_SYMBOL_GPL(drm_bridge_get_edid);
+EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
/**
* drm_bridge_hpd_enable - enable hot plug detection for the bridge
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 3acd67021ec6..982552c9f92c 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -239,27 +239,27 @@ static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector,
struct drm_bridge *bridge)
{
enum drm_connector_status status;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int n;
status = drm_bridge_connector_detect(connector, false);
if (status != connector_status_connected)
goto no_edid;
- edid = drm_bridge_get_edid(bridge, connector);
- if (!drm_edid_is_valid(edid)) {
- kfree(edid);
+ drm_edid = drm_bridge_edid_read(bridge, connector);
+ if (!drm_edid_valid(drm_edid)) {
+ drm_edid_free(drm_edid);
goto no_edid;
}
- drm_connector_update_edid_property(connector, edid);
- n = drm_add_edid_modes(connector, edid);
+ drm_edid_connector_update(connector, drm_edid);
+ n = drm_edid_connector_add_modes(connector);
- kfree(edid);
+ drm_edid_free(drm_edid);
return n;
no_edid:
- drm_connector_update_edid_property(connector, NULL);
+ drm_edid_connector_update(connector, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index c1a99bf4dffd..5ebdd6f8f36e 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -332,6 +332,7 @@ alloc_range_bias(struct drm_buddy *mm,
u64 start, u64 end,
unsigned int order)
{
+ u64 req_size = mm->chunk_size << order;
struct drm_buddy_block *block;
struct drm_buddy_block *buddy;
LIST_HEAD(dfs);
@@ -367,6 +368,15 @@ alloc_range_bias(struct drm_buddy *mm,
if (drm_buddy_block_is_allocated(block))
continue;
+ if (block_start < start || block_end > end) {
+ u64 adjusted_start = max(block_start, start);
+ u64 adjusted_end = min(block_end, end);
+
+ if (round_down(adjusted_end + 1, req_size) <=
+ round_up(adjusted_start, req_size))
+ continue;
+ }
+
if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) {
/*
@@ -538,13 +548,13 @@ static int __alloc_range(struct drm_buddy *mm,
list_add(&block->left->tmp_link, dfs);
} while (1);
- list_splice_tail(&allocated, blocks);
-
if (total_allocated < size) {
err = -ENOSPC;
goto err_free;
}
+ list_splice_tail(&allocated, blocks);
+
return 0;
err_undo:
@@ -761,8 +771,12 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
return -EINVAL;
/* Actual range allocation */
- if (start + size == end)
+ if (start + size == end) {
+ if (!IS_ALIGNED(start | end, min_block_size))
+ return -EINVAL;
+
return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
+ }
original_size = size;
original_min_size = min_block_size;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 65f9f66933bb..82c665d3e74b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -61,13 +61,13 @@
* to one or more &drm_encoder, which are then each connected to one
* &drm_connector.
*
- * To create a CRTC, a KMS drivers allocates and zeroes an instances of
+ * To create a CRTC, a KMS driver allocates and zeroes an instance of
* &struct drm_crtc (possibly as part of a larger structure) and registers it
* with a call to drm_crtc_init_with_planes().
*
- * The CRTC is also the entry point for legacy modeset operations, see
- * &drm_crtc_funcs.set_config, legacy plane operations, see
- * &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2, and other legacy
+ * The CRTC is also the entry point for legacy modeset operations (see
+ * &drm_crtc_funcs.set_config), legacy plane operations (see
+ * &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2), and other legacy
* operations like &drm_crtc_funcs.gamma_set. For atomic drivers all these
* features are controlled through &drm_property and
* &drm_mode_config_funcs.atomic_check.
@@ -107,18 +107,6 @@ int drm_crtc_force_disable(struct drm_crtc *crtc)
return drm_mode_set_config_internal(&set);
}
-static unsigned int drm_num_crtcs(struct drm_device *dev)
-{
- unsigned int num = 0;
- struct drm_crtc *tmp;
-
- drm_for_each_crtc(tmp, dev) {
- num++;
- }
-
- return num;
-}
-
int drm_crtc_register_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
@@ -278,8 +266,7 @@ static int __drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *
if (name) {
crtc->name = kvasprintf(GFP_KERNEL, name, ap);
} else {
- crtc->name = kasprintf(GFP_KERNEL, "crtc-%d",
- drm_num_crtcs(dev));
+ crtc->name = kasprintf(GFP_KERNEL, "crtc-%d", config->num_crtc);
}
if (!crtc->name) {
drm_mode_object_unregister(dev, &crtc->base);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index f4715a67e340..08fcefd804bc 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -45,8 +45,6 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#if defined(CONFIG_DEBUG_FS)
-
/***************************************************
* Initialization, etc.
**************************************************/
@@ -647,5 +645,3 @@ void drm_debugfs_encoder_remove(struct drm_encoder *encoder)
debugfs_remove_recursive(encoder->debugfs_entry);
encoder->debugfs_entry = NULL;
}
-
-#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 69c68804023f..923c4423151c 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3611,7 +3611,8 @@ static bool mode_in_range(const struct drm_display_mode *mode,
if (!mode_in_vsync_range(mode, edid, t))
return false;
- if ((max_clock = range_pixel_clock(edid, t)))
+ max_clock = range_pixel_clock(edid, t);
+ if (max_clock)
if (mode->clock > max_clock)
return false;
@@ -6990,28 +6991,6 @@ int drm_add_modes_noedid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_add_modes_noedid);
-/**
- * drm_set_preferred_mode - Sets the preferred mode of a connector
- * @connector: connector whose mode list should be processed
- * @hpref: horizontal resolution of preferred mode
- * @vpref: vertical resolution of preferred mode
- *
- * Marks a mode as preferred if it matches the resolution specified by @hpref
- * and @vpref.
- */
-void drm_set_preferred_mode(struct drm_connector *connector,
- int hpref, int vpref)
-{
- struct drm_display_mode *mode;
-
- list_for_each_entry(mode, &connector->probed_modes, head) {
- if (mode->hdisplay == hpref &&
- mode->vdisplay == vpref)
- mode->type |= DRM_MODE_TYPE_PREFERRED;
- }
-}
-EXPORT_SYMBOL(drm_set_preferred_mode);
-
static bool is_hdmi2_sink(const struct drm_connector *connector)
{
/*
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 60fcb80bce61..d1c7e8298702 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -20,162 +20,28 @@
static char edid_firmware[PATH_MAX];
module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
-MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
- "from built-in data or /lib/firmware instead. ");
-
-#define GENERIC_EDIDS 6
-static const char * const generic_edid_name[GENERIC_EDIDS] = {
- "edid/800x600.bin",
- "edid/1024x768.bin",
- "edid/1280x1024.bin",
- "edid/1600x1200.bin",
- "edid/1680x1050.bin",
- "edid/1920x1080.bin",
-};
-
-static const u8 generic_edid[GENERIC_EDIDS][128] = {
- {
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
- 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x05, 0x16, 0x01, 0x03, 0x6d, 0x1b, 0x14, 0x78,
- 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
- 0x20, 0x50, 0x54, 0x01, 0x00, 0x00, 0x45, 0x40,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xa0, 0x0f,
- 0x20, 0x00, 0x31, 0x58, 0x1c, 0x20, 0x28, 0x80,
- 0x14, 0x00, 0x15, 0xd0, 0x10, 0x00, 0x00, 0x1e,
- 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
- 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
- 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
- 0x3d, 0x24, 0x26, 0x05, 0x00, 0x0a, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
- 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
- 0x56, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xc2,
- },
- {
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
- 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x05, 0x16, 0x01, 0x03, 0x6d, 0x23, 0x1a, 0x78,
- 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
- 0x20, 0x50, 0x54, 0x00, 0x08, 0x00, 0x61, 0x40,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x64, 0x19,
- 0x00, 0x40, 0x41, 0x00, 0x26, 0x30, 0x08, 0x90,
- 0x36, 0x00, 0x63, 0x0a, 0x11, 0x00, 0x00, 0x18,
- 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
- 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
- 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
- 0x3d, 0x2f, 0x31, 0x07, 0x00, 0x0a, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
- 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x58,
- 0x47, 0x41, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x55,
- },
- {
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
- 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2c, 0x23, 0x78,
- 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
- 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0x81, 0x80,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x30, 0x2a,
- 0x00, 0x98, 0x51, 0x00, 0x2a, 0x40, 0x30, 0x70,
- 0x13, 0x00, 0xbc, 0x63, 0x11, 0x00, 0x00, 0x1e,
- 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
- 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
- 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
- 0x3d, 0x3e, 0x40, 0x0b, 0x00, 0x0a, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
- 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
- 0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xa0,
- },
- {
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
- 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x05, 0x16, 0x01, 0x03, 0x6d, 0x37, 0x29, 0x78,
- 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
- 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xa9, 0x40,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x48, 0x3f,
- 0x40, 0x30, 0x62, 0xb0, 0x32, 0x40, 0x40, 0xc0,
- 0x13, 0x00, 0x2b, 0xa0, 0x21, 0x00, 0x00, 0x1e,
- 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
- 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
- 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
- 0x3d, 0x4a, 0x4c, 0x11, 0x00, 0x0a, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
- 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x55,
- 0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0x9d,
- },
- {
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
- 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78,
- 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
- 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x21, 0x39,
- 0x90, 0x30, 0x62, 0x1a, 0x27, 0x40, 0x68, 0xb0,
- 0x36, 0x00, 0xb5, 0x11, 0x11, 0x00, 0x00, 0x1e,
- 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
- 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
- 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
- 0x3d, 0x40, 0x42, 0x0f, 0x00, 0x0a, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
- 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x57,
- 0x53, 0x58, 0x47, 0x41, 0x0a, 0x20, 0x00, 0x26,
- },
- {
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
- 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x05, 0x16, 0x01, 0x03, 0x6d, 0x32, 0x1c, 0x78,
- 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
- 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xd1, 0xc0,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a,
- 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
- 0x45, 0x00, 0xf4, 0x19, 0x11, 0x00, 0x00, 0x1e,
- 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
- 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
- 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
- 0x3d, 0x42, 0x44, 0x0f, 0x00, 0x0a, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
- 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x46,
- 0x48, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x05,
- },
-};
+MODULE_PARM_DESC(edid_firmware,
+ "Do not probe monitor, use specified EDID blob from /lib/firmware instead.");
static const struct drm_edid *edid_load(struct drm_connector *connector, const char *name)
{
const struct firmware *fw = NULL;
- const u8 *fwdata;
const struct drm_edid *drm_edid;
- int fwsize, builtin;
-
- builtin = match_string(generic_edid_name, GENERIC_EDIDS, name);
- if (builtin >= 0) {
- fwdata = generic_edid[builtin];
- fwsize = sizeof(generic_edid[builtin]);
- } else {
- int err;
-
- err = request_firmware(&fw, name, connector->dev->dev);
- if (err) {
- drm_err(connector->dev,
- "[CONNECTOR:%d:%s] Requesting EDID firmware \"%s\" failed (err=%d)\n",
- connector->base.id, connector->name,
- name, err);
- return ERR_PTR(err);
- }
-
- fwdata = fw->data;
- fwsize = fw->size;
+ int err;
+
+ err = request_firmware(&fw, name, connector->dev->dev);
+ if (err) {
+ drm_err(connector->dev,
+ "[CONNECTOR:%d:%s] Requesting EDID firmware \"%s\" failed (err=%d)\n",
+ connector->base.id, connector->name,
+ name, err);
+ return ERR_PTR(err);
}
- drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Loaded %s firmware EDID \"%s\"\n",
- connector->base.id, connector->name,
- builtin >= 0 ? "built-in" : "external", name);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Loaded external firmware EDID \"%s\"\n",
+ connector->base.id, connector->name, name);
- drm_edid = drm_edid_alloc(fwdata, fwsize);
+ drm_edid = drm_edid_alloc(fw->data, fw->size);
if (!drm_edid_valid(drm_edid)) {
drm_err(connector->dev, "Invalid firmware EDID \"%s\"\n", name);
drm_edid_free(drm_edid);
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
index 48ee851b61d9..2da094bdf8a4 100644
--- a/drivers/gpu/drm/drm_exec.c
+++ b/drivers/gpu/drm/drm_exec.c
@@ -76,7 +76,7 @@ static void drm_exec_unlock_all(struct drm_exec *exec)
* If nr is non-zero then it is used as the initial objects table size.
* In either case, the table will grow (be re-allocated) on demand.
*/
-void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr)
+void drm_exec_init(struct drm_exec *exec, u32 flags, unsigned nr)
{
if (!nr)
nr = PAGE_SIZE / sizeof(void *);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 8c87287c3e16..638ffa4444f5 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -913,7 +913,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
DRM_GEM_OBJECT_PURGEABLE;
}
- if (obj->handle_count > 1) {
+ if (drm_gem_object_is_shared_for_memory_stats(obj)) {
status.shared += obj->size;
} else {
status.private += obj->size;
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index b67eafa55715..1ac284a9e8ee 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -147,7 +147,6 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
invariant_flags = TTM_PL_FLAG_TOPDOWN;
gbo->placement.placement = gbo->placements;
- gbo->placement.busy_placement = gbo->placements;
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) {
gbo->placements[c].mem_type = TTM_PL_VRAM;
@@ -160,7 +159,6 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
}
gbo->placement.num_placement = c;
- gbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
gbo->placements[i].fpfn = 0;
@@ -260,8 +258,7 @@ static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
}
/**
- * drm_gem_vram_offset() - \
- Returns a GEM VRAM object's offset in video memory
+ * drm_gem_vram_offset() - Returns a GEM VRAM object's offset in video memory
* @gbo: the GEM VRAM object
*
* This function returns the buffer object's offset in the device's video
@@ -470,14 +467,15 @@ void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
EXPORT_SYMBOL(drm_gem_vram_vunmap);
/**
- * drm_gem_vram_fill_create_dumb() - \
- Helper for implementing &struct drm_driver.dumb_create
+ * drm_gem_vram_fill_create_dumb() - Helper for implementing
+ * &struct drm_driver.dumb_create
+ *
* @file: the DRM file
* @dev: the DRM device
* @pg_align: the buffer's alignment in multiples of the page size
* @pitch_align: the scanline's alignment in powers of 2
- * @args: the arguments as provided to \
- &struct drm_driver.dumb_create
+ * @args: the arguments as provided to
+ * &struct drm_driver.dumb_create
*
* This helper function fills &struct drm_mode_create_dumb, which is used
* by &struct drm_driver.dumb_create. Implementations of this interface
@@ -575,8 +573,7 @@ static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
*/
/**
- * drm_gem_vram_object_free() - \
- Implements &struct drm_gem_object_funcs.free
+ * drm_gem_vram_object_free() - Implements &struct drm_gem_object_funcs.free
* @gem: GEM object. Refers to &struct drm_gem_vram_object.gem
*/
static void drm_gem_vram_object_free(struct drm_gem_object *gem)
@@ -591,12 +588,11 @@ static void drm_gem_vram_object_free(struct drm_gem_object *gem)
*/
/**
- * drm_gem_vram_driver_dumb_create() - \
- Implements &struct drm_driver.dumb_create
+ * drm_gem_vram_driver_dumb_create() - Implements &struct drm_driver.dumb_create
* @file: the DRM file
* @dev: the DRM device
- * @args: the arguments as provided to \
- &struct drm_driver.dumb_create
+ * @args: the arguments as provided to
+ * &struct drm_driver.dumb_create
*
* This function requires the driver to use @drm_device.vram_mm for its
* instance of VRAM MM.
@@ -639,8 +635,8 @@ static void __drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
}
/**
- * drm_gem_vram_plane_helper_prepare_fb() - \
- * Implements &struct drm_plane_helper_funcs.prepare_fb
+ * drm_gem_vram_plane_helper_prepare_fb() - Implements &struct
+ * drm_plane_helper_funcs.prepare_fb
* @plane: a DRM plane
* @new_state: the plane's new state
*
@@ -690,8 +686,8 @@ err_drm_gem_vram_unpin:
EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
/**
- * drm_gem_vram_plane_helper_cleanup_fb() - \
- * Implements &struct drm_plane_helper_funcs.cleanup_fb
+ * drm_gem_vram_plane_helper_cleanup_fb() - Implements &struct
+ * drm_plane_helper_funcs.cleanup_fb
* @plane: a DRM plane
* @old_state: the plane's old state
*
@@ -717,8 +713,8 @@ EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
*/
/**
- * drm_gem_vram_simple_display_pipe_prepare_fb() - \
- * Implements &struct drm_simple_display_pipe_funcs.prepare_fb
+ * drm_gem_vram_simple_display_pipe_prepare_fb() - Implements &struct
+ * drm_simple_display_pipe_funcs.prepare_fb
* @pipe: a simple display pipe
* @new_state: the plane's new state
*
@@ -739,8 +735,8 @@ int drm_gem_vram_simple_display_pipe_prepare_fb(
EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb);
/**
- * drm_gem_vram_simple_display_pipe_cleanup_fb() - \
- * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb
+ * drm_gem_vram_simple_display_pipe_cleanup_fb() - Implements &struct
+ * drm_simple_display_pipe_funcs.cleanup_fb
* @pipe: a simple display pipe
* @old_state: the plane's old state
*
@@ -761,8 +757,7 @@ EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb);
*/
/**
- * drm_gem_vram_object_pin() - \
- Implements &struct drm_gem_object_funcs.pin
+ * drm_gem_vram_object_pin() - Implements &struct drm_gem_object_funcs.pin
* @gem: The GEM object to pin
*
* Returns:
@@ -785,8 +780,7 @@ static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
}
/**
- * drm_gem_vram_object_unpin() - \
- Implements &struct drm_gem_object_funcs.unpin
+ * drm_gem_vram_object_unpin() - Implements &struct drm_gem_object_funcs.unpin
* @gem: The GEM object to unpin
*/
static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 129e2b91dbfe..e6b5b06de148 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -229,7 +229,7 @@ typedef struct drm_update_draw32 {
unsigned int num;
/* 64-bit version has a 32-bit pad here */
u64 data; /**< Pointer */
-} __attribute__((packed)) drm_update_draw32_t;
+} __packed drm_update_draw32_t;
static int compat_drm_update_draw(struct file *file, unsigned int cmd,
unsigned long arg)
@@ -296,7 +296,7 @@ typedef struct drm_mode_fb_cmd232 {
u32 pitches[4];
u32 offsets[4];
u64 modifier[4];
-} __attribute__((packed)) drm_mode_fb_cmd232_t;
+} __packed drm_mode_fb_cmd232_t;
static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
unsigned long arg)
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index bcd111404b12..7646f67bda4e 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -177,6 +177,45 @@ int __drmm_add_action_or_reset(struct drm_device *dev,
EXPORT_SYMBOL(__drmm_add_action_or_reset);
/**
+ * drmm_release_action - release a managed action from a &drm_device
+ * @dev: DRM device
+ * @action: function which would be called when @dev is released
+ * @data: opaque pointer, passed to @action
+ *
+ * This function calls the @action previously added by drmm_add_action()
+ * immediately.
+ * The @action is removed from the list of cleanup actions for @dev,
+ * which means that it won't be called in the final drm_dev_put().
+ */
+void drmm_release_action(struct drm_device *dev,
+ drmres_release_t action,
+ void *data)
+{
+ struct drmres *dr_match = NULL, *dr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->managed.lock, flags);
+ list_for_each_entry_reverse(dr, &dev->managed.resources, node.entry) {
+ if (dr->node.release == action) {
+ if (!data || (data && *(void **)dr->data == data)) {
+ dr_match = dr;
+ del_dr(dev, dr_match);
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dev->managed.lock, flags);
+
+ if (WARN_ON(!dr_match))
+ return;
+
+ action(dev, data);
+
+ free_dr(dr_match);
+}
+EXPORT_SYMBOL(drmm_release_action);
+
+/**
* drmm_kmalloc - &drm_device managed kmalloc()
* @dev: DRM device
* @size: size of the memory allocation
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 843a6dbda93a..ef6e416522f8 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -89,7 +89,7 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
.restore = pm_generic_restore,
};
-static struct bus_type mipi_dsi_bus_type = {
+static const struct bus_type mipi_dsi_bus_type = {
.name = "mipi-dsi",
.match = mipi_dsi_device_match,
.uevent = mipi_dsi_uevent,
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 8525ef851540..48fd2d67f352 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -544,7 +544,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
*/
WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- struct drm_printer p = drm_debug_printer("[leaked fb]");
+ struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]");
drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
drm_framebuffer_print_info(&p, 1, fb);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 893f52ee4926..c4f88c3a93b7 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -2752,3 +2752,25 @@ bool drm_mode_is_420(const struct drm_display_info *display,
drm_mode_is_420_also(display, mode);
}
EXPORT_SYMBOL(drm_mode_is_420);
+
+/**
+ * drm_set_preferred_mode - Sets the preferred mode of a connector
+ * @connector: connector whose mode list should be processed
+ * @hpref: horizontal resolution of preferred mode
+ * @vpref: vertical resolution of preferred mode
+ *
+ * Marks a mode as preferred if it matches the resolution specified by @hpref
+ * and @vpref.
+ */
+void drm_set_preferred_mode(struct drm_connector *connector,
+ int hpref, int vpref)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if (mode->hdisplay == hpref &&
+ mode->vdisplay == vpref)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+}
+EXPORT_SYMBOL(drm_set_preferred_mode);
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index f858dfedf2cf..2c582020cb42 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -193,13 +193,22 @@ int drm_mode_config_helper_suspend(struct drm_device *dev)
if (!dev)
return 0;
+ /*
+ * Don't disable polling if it was never initialized
+ */
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_disable(dev);
- drm_kms_helper_poll_disable(dev);
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1);
state = drm_atomic_helper_suspend(dev);
if (IS_ERR(state)) {
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
- drm_kms_helper_poll_enable(dev);
+ /*
+ * Don't enable polling if it was never initialized
+ */
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
+
return PTR_ERR(state);
}
@@ -239,7 +248,11 @@ int drm_mode_config_helper_resume(struct drm_device *dev)
dev->mode_config.suspend_state = NULL;
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
- drm_kms_helper_poll_enable(dev);
+ /*
+ * Don't enable polling if it is not initialized
+ */
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
return ret;
}
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 918065982db4..7694b85e75e3 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -91,7 +91,7 @@ static noinline depot_stack_handle_t __drm_stack_depot_save(void)
static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
{
- struct drm_printer p = drm_debug_printer("drm_modeset_lock");
+ struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_KMS, "drm_modeset_lock");
unsigned long *entries;
unsigned int nr_entries;
char *buf;
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 3d92f66e550c..aa93129c3397 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -117,6 +117,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_leftside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = {
+ .width = 1080,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.width = 1200,
.height = 1920,
@@ -279,6 +285,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
},
.driver_data = (void *)&lcd720x1280_rightside_up,
+ }, { /* GPD Win Mini */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1617-01")
+ },
+ .driver_data = (void *)&lcd1080x1920_rightside_up,
}, { /* I.T.Works TW891 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 5b93c11895bb..699b7dbffd7b 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -182,16 +182,35 @@ void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf)
}
EXPORT_SYMBOL(__drm_printfn_info);
-void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
+void __drm_printfn_dbg(struct drm_printer *p, struct va_format *vaf)
{
- /* pr_debug callsite decorations are unhelpful here */
- printk(KERN_DEBUG "%s %pV", p->prefix, vaf);
+ const struct drm_device *drm = p->arg;
+ const struct device *dev = drm ? drm->dev : NULL;
+ enum drm_debug_category category = p->category;
+ const char *prefix = p->prefix ?: "";
+ const char *prefix_pad = p->prefix ? " " : "";
+
+ if (!__drm_debug_enabled(category))
+ return;
+
+ /* Note: __builtin_return_address(0) is useless here. */
+ if (dev)
+ dev_printk(KERN_DEBUG, dev, "[" DRM_NAME "]%s%s %pV",
+ prefix_pad, prefix, vaf);
+ else
+ printk(KERN_DEBUG "[" DRM_NAME "]%s%s %pV",
+ prefix_pad, prefix, vaf);
}
-EXPORT_SYMBOL(__drm_printfn_debug);
+EXPORT_SYMBOL(__drm_printfn_dbg);
void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf)
{
- pr_err("*ERROR* %s %pV", p->prefix, vaf);
+ struct drm_device *drm = p->arg;
+
+ if (p->prefix)
+ drm_err(drm, "%s %pV", p->prefix, vaf);
+ else
+ drm_err(drm, "%pV", vaf);
}
EXPORT_SYMBOL(__drm_printfn_err);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 3f479483d7d8..4d60cc810b57 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -293,14 +293,17 @@ static void reschedule_output_poll_work(struct drm_device *dev)
* Drivers can call this helper from their device resume implementation. It is
* not an error to call this even when output polling isn't enabled.
*
+ * If device polling was never initialized before, this call will trigger a
+ * warning and return.
+ *
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
* callbacks.
*/
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
- if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll ||
- dev->mode_config.poll_running)
+ if (drm_WARN_ON_ONCE(dev, !dev->mode_config.poll_enabled) ||
+ !drm_kms_helper_poll || dev->mode_config.poll_running)
return;
if (drm_kms_helper_enable_hpd(dev) ||
@@ -619,8 +622,12 @@ retry:
0);
}
- /* Re-enable polling in case the global poll config changed. */
- drm_kms_helper_poll_enable(dev);
+ /*
+ * Re-enable polling in case the global poll config changed but polling
+ * is still initialized.
+ */
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -760,9 +767,11 @@ static void output_poll_execute(struct work_struct *work)
changed = dev->mode_config.delayed_event;
dev->mode_config.delayed_event = false;
- if (!drm_kms_helper_poll && dev->mode_config.poll_running) {
- drm_kms_helper_disable_hpd(dev);
- dev->mode_config.poll_running = false;
+ if (!drm_kms_helper_poll) {
+ if (dev->mode_config.poll_running) {
+ drm_kms_helper_disable_hpd(dev);
+ dev->mode_config.poll_running = false;
+ }
goto out;
}
@@ -871,12 +880,18 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
* not an error to call this even when output polling isn't enabled or already
* disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
*
+ * If however, the polling was never initialized, this call will trigger a
+ * warning and return
+ *
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
* callbacks.
*/
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
+ if (drm_WARN_ON(dev, !dev->mode_config.poll_enabled))
+ return;
+
if (dev->mode_config.poll_running)
drm_kms_helper_disable_hpd(dev);
@@ -1101,42 +1116,6 @@ enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_crtc_helper_mode_valid_fixed);
/**
- * drm_connector_helper_get_modes_from_ddc - Updates the connector's EDID
- * property from the connector's
- * DDC channel
- * @connector: The connector
- *
- * Returns:
- * The number of detected display modes.
- *
- * Uses a connector's DDC channel to retrieve EDID data and update the
- * connector's EDID property and display modes. Drivers can use this
- * function to implement struct &drm_connector_helper_funcs.get_modes
- * for connectors with a DDC channel.
- */
-int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector)
-{
- struct edid *edid;
- int count = 0;
-
- if (!connector->ddc)
- return 0;
-
- edid = drm_get_edid(connector, connector->ddc);
-
- // clears property if EDID is NULL
- drm_connector_update_edid_property(connector, edid);
-
- if (edid) {
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
-
- return count;
-}
-EXPORT_SYMBOL(drm_connector_helper_get_modes_from_ddc);
-
-/**
* drm_connector_helper_get_modes_fixed - Duplicates a display mode for a connector
* @connector: the connector
* @fixed_mode: the display hardware's mode
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 84101baeecc6..a0e94217b511 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -441,6 +441,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
int ret;
+ if (flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
+ return -EINVAL;
+
if (!syncobj)
return -ENOENT;
@@ -1040,8 +1043,11 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint64_t *points;
uint32_t signaled_count, i;
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
+ might_sleep();
lockdep_assert_none_held_once();
+ }
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
if (points == NULL)
@@ -1109,7 +1115,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
* fallthough and try a 0 timeout wait!
*/
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
for (i = 0; i < count; ++i)
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
@@ -1416,10 +1423,21 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
/* This happens inside the syncobj lock */
fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+ if (!fence)
+ return;
+
ret = dma_fence_chain_find_seqno(&fence, entry->point);
- if (ret != 0 || !fence) {
+ if (ret != 0) {
+ /* The given seqno has not been submitted yet. */
dma_fence_put(fence);
return;
+ } else if (!fence) {
+ /* If dma_fence_chain_find_seqno returns 0 but sets the fence
+ * to NULL, it implies that the given seqno is signaled and a
+ * later seqno has already been submitted. Assign a stub fence
+ * so that the eventfd still gets signaled below.
+ */
+ fence = dma_fence_get_stub();
}
list_del_init(&entry->node);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
index b106e8b288ad..9bf47327f436 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
@@ -54,6 +54,7 @@ static const struct {
ST(0x1480, 8),
ST(0x1500, 8),
ST(0x1520, 8),
+ ST(0x1540, 8),
ST(0x1608, 1),
ST(0x1610, 1),
ST(0x1658, 1),
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 6228ce603248..6500f3999c5f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -29,6 +29,17 @@
* DRM operations:
*/
+static struct device_node *etnaviv_of_first_available_node(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, "vivante,gc") {
+ if (of_device_is_available(np))
+ return np;
+ }
+
+ return NULL;
+}
static void load_gpu(struct drm_device *dev)
{
@@ -79,7 +90,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
drm_sched_entity_init(&ctx->sched_entity[i],
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL);
- }
+ }
}
file->driver_priv = ctx;
@@ -233,11 +244,11 @@ static int show_each_gpu(struct seq_file *m, void *arg)
}
static struct drm_info_list etnaviv_debugfs_list[] = {
- {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
- {"gem", show_unlocked, 0, etnaviv_gem_show},
- { "mm", show_unlocked, 0, etnaviv_mm_show },
- {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
- {"ring", show_each_gpu, 0, etnaviv_ring_show},
+ {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
+ {"gem", show_unlocked, 0, etnaviv_gem_show},
+ { "mm", show_unlocked, 0, etnaviv_mm_show },
+ {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
+ {"ring", show_each_gpu, 0, etnaviv_ring_show},
};
static void etnaviv_debugfs_init(struct drm_minor *minor)
@@ -494,7 +505,7 @@ static const struct drm_driver etnaviv_drm_driver = {
.desc = "etnaviv DRM",
.date = "20151214",
.major = 1,
- .minor = 3,
+ .minor = 4,
};
/*
@@ -597,9 +608,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
if (!of_device_is_available(core_node))
continue;
- if (!first_node)
- first_node = core_node;
-
drm_of_component_match_add(&pdev->dev, &match,
component_compare_of, core_node);
}
@@ -634,8 +642,11 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
* device as the GPU we found. This assumes that all Vivante
* GPUs in the system share the same DMA constraints.
*/
- if (first_node)
+ first_node = etnaviv_of_first_available_node();
+ if (first_node) {
of_dma_configure(&pdev->dev, first_node, true);
+ of_node_put(first_node);
+ }
return component_master_add_with_match(dev, &etnaviv_master_ops, match);
}
@@ -653,11 +664,43 @@ static struct platform_driver etnaviv_platform_driver = {
},
};
+static int etnaviv_create_platform_device(const char *name,
+ struct platform_device **ppdev)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
+ if (!pdev)
+ return -ENOMEM;
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ platform_device_put(pdev);
+ return ret;
+ }
+
+ *ppdev = pdev;
+
+ return 0;
+}
+
+static void etnaviv_destroy_platform_device(struct platform_device **ppdev)
+{
+ struct platform_device *pdev = *ppdev;
+
+ if (!pdev)
+ return;
+
+ platform_device_unregister(pdev);
+
+ *ppdev = NULL;
+}
+
static struct platform_device *etnaviv_drm;
static int __init etnaviv_init(void)
{
- struct platform_device *pdev;
int ret;
struct device_node *np;
@@ -675,27 +718,13 @@ static int __init etnaviv_init(void)
* If the DT contains at least one available GPU device, instantiate
* the DRM platform device.
*/
- for_each_compatible_node(np, NULL, "vivante,gc") {
- if (!of_device_is_available(np))
- continue;
-
- pdev = platform_device_alloc("etnaviv", PLATFORM_DEVID_NONE);
- if (!pdev) {
- ret = -ENOMEM;
- of_node_put(np);
- goto unregister_platform_driver;
- }
+ np = etnaviv_of_first_available_node();
+ if (np) {
+ of_node_put(np);
- ret = platform_device_add(pdev);
- if (ret) {
- platform_device_put(pdev);
- of_node_put(np);
+ ret = etnaviv_create_platform_device("etnaviv", &etnaviv_drm);
+ if (ret)
goto unregister_platform_driver;
- }
-
- etnaviv_drm = pdev;
- of_node_put(np);
- break;
}
return 0;
@@ -710,7 +739,7 @@ module_init(etnaviv_init);
static void __exit etnaviv_exit(void)
{
- platform_device_unregister(etnaviv_drm);
+ etnaviv_destroy_platform_device(&etnaviv_drm);
platform_driver_unregister(&etnaviv_platform_driver);
platform_driver_unregister(&etnaviv_gpu_driver);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index b5f73502e3dd..71a6d2b1c80f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -100,11 +100,10 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
if (!etnaviv_obj->sgt) {
struct drm_device *dev = etnaviv_obj->base.dev;
- int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
+ unsigned int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt;
- sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
- etnaviv_obj->pages, npages);
+ sgt = drm_prime_pages_to_sg(dev, etnaviv_obj->pages, npages);
if (IS_ERR(sgt)) {
dev_err(dev->dev, "failed to allocate sgt: %ld\n",
PTR_ERR(sgt));
@@ -542,7 +541,7 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
.vm_ops = &vm_ops,
};
-static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
+static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags,
const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
{
struct etnaviv_gem_object *etnaviv_obj;
@@ -591,8 +590,7 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
size = PAGE_ALIGN(size);
- ret = etnaviv_gem_new_impl(dev, size, flags,
- &etnaviv_gem_shmem_ops, &obj);
+ ret = etnaviv_gem_new_impl(dev, flags, &etnaviv_gem_shmem_ops, &obj);
if (ret)
goto fail;
@@ -627,7 +625,7 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
struct drm_gem_object *obj;
int ret;
- ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
+ ret = etnaviv_gem_new_impl(dev, flags, ops, &obj);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 9b8445d2a128..734412aae94d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -164,6 +164,26 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
*value = gpu->identity.eco_id;
break;
+ case ETNAVIV_PARAM_GPU_NN_CORE_COUNT:
+ *value = gpu->identity.nn_core_count;
+ break;
+
+ case ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE:
+ *value = gpu->identity.nn_mad_per_core;
+ break;
+
+ case ETNAVIV_PARAM_GPU_TP_CORE_COUNT:
+ *value = gpu->identity.tp_core_count;
+ break;
+
+ case ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE:
+ *value = gpu->identity.on_chip_sram_size;
+ break;
+
+ case ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE:
+ *value = gpu->identity.axi_sram_size;
+ break;
+
default:
DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
return -EINVAL;
@@ -513,8 +533,19 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
timeout = jiffies + msecs_to_jiffies(1000);
while (time_is_after_jiffies(timeout)) {
- /* enable clock */
unsigned int fscale = 1 << (6 - gpu->freq_scale);
+ u32 pulse_eater = 0x01590880;
+
+ /* disable clock gating */
+ gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, 0x0);
+
+ /* disable pulse eater */
+ pulse_eater |= BIT(17);
+ gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
+ pulse_eater |= BIT(0);
+ gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
+
+ /* enable clock */
control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
etnaviv_gpu_load_clock(gpu, control);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 197e0037732e..7d5e9158e13c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -54,6 +54,18 @@ struct etnaviv_chip_identity {
/* Number of Neural Network cores. */
u32 nn_core_count;
+ /* Number of MAD units per Neural Network core. */
+ u32 nn_mad_per_core;
+
+ /* Number of Tensor Processing cores. */
+ u32 tp_core_count;
+
+ /* Size in bytes of the SRAM inside the NPU. */
+ u32 on_chip_sram_size;
+
+ /* Size in bytes of the SRAM across the AXI bus. */
+ u32 axi_sram_size;
+
/* Size of the vertex cache. */
u32 vertex_cache_size;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index 67201242438b..d8e7334de8ce 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -17,6 +17,10 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 128,
.shader_core_count = 1,
.nn_core_count = 0,
+ .nn_mad_per_core = 0,
+ .tp_core_count = 0,
+ .on_chip_sram_size = 0,
+ .axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -48,6 +52,11 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 256,
.shader_core_count = 1,
+ .nn_core_count = 0,
+ .nn_mad_per_core = 0,
+ .tp_core_count = 0,
+ .on_chip_sram_size = 0,
+ .axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 512,
.pixel_pipes = 1,
@@ -80,6 +89,10 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
+ .nn_mad_per_core = 0,
+ .tp_core_count = 0,
+ .on_chip_sram_size = 0,
+ .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -112,6 +125,10 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
+ .nn_mad_per_core = 0,
+ .tp_core_count = 0,
+ .on_chip_sram_size = 0,
+ .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -143,6 +160,11 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
+ .nn_core_count = 0,
+ .nn_mad_per_core = 0,
+ .tp_core_count = 0,
+ .on_chip_sram_size = 0,
+ .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -175,6 +197,10 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 1024,
.shader_core_count = 4,
.nn_core_count = 0,
+ .nn_mad_per_core = 0,
+ .tp_core_count = 0,
+ .on_chip_sram_size = 0,
+ .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 2,
@@ -207,6 +233,10 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 8,
+ .nn_mad_per_core = 64,
+ .tp_core_count = 4,
+ .on_chip_sram_size = 524288,
+ .axi_sram_size = 1048576,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -239,6 +269,10 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 6,
+ .nn_mad_per_core = 64,
+ .tp_core_count = 3,
+ .on_chip_sram_size = 262144,
+ .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -265,6 +299,9 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
{
struct etnaviv_chip_identity *ident = &gpu->identity;
+ const u32 product_id = ident->product_id;
+ const u32 customer_id = ident->customer_id;
+ const u32 eco_id = ident->eco_id;
int i;
for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
@@ -278,6 +315,12 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
etnaviv_chip_identities[i].eco_id == ~0U)) {
memcpy(ident, &etnaviv_chip_identities[i],
sizeof(*ident));
+
+ /* Restore some id values as ~0U aka 'don't care' might been used. */
+ ident->product_id = product_id;
+ ident->customer_id = customer_id;
+ ident->eco_id = eco_id;
+
return true;
}
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 4fa72567183a..1661d589bf3e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -70,7 +70,7 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context,
}
static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
- struct sg_table *sgt, unsigned len, int prot)
+ struct sg_table *sgt, int prot)
{ struct scatterlist *sg;
unsigned int da = iova;
unsigned int i;
@@ -314,7 +314,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
goto unlock;
mapping->iova = node->start;
- ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
+ ret = etnaviv_iommu_map(context, node->start, sgt,
ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
if (ret < 0) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index bafdfe49c1d8..dc9dea664a28 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -511,7 +511,7 @@ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
domain->id = domain->iter;
domain->nr_signals = dom->nr_signals;
- strncpy(domain->name, dom->name, sizeof(domain->name));
+ strscpy_pad(domain->name, dom->name, sizeof(domain->name));
domain->iter++;
if (domain->iter == nr_domains)
@@ -540,7 +540,7 @@ int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
sig = &dom->signal[signal->iter];
signal->id = signal->iter;
- strncpy(signal->name, sig->name, sizeof(signal->name));
+ strscpy_pad(signal->name, sig->name, sizeof(signal->name));
signal->iter++;
if (signal->iter == dom->nr_signals)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index f957552c6c50..207aa3f660b0 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -18,7 +18,6 @@
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
-#include <drm/drm_edid.h>
#include <drm/drm_framebuffer.h>
struct hibmc_connector {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 8c6d2ea2a472..94e2c573a7af 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 3089029abba4..5932024f8f95 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -155,6 +155,20 @@ config DRM_I915_PXP
protected session and manage the status of the alive software session,
as well as its life cycle.
+config DRM_I915_DP_TUNNEL
+ bool "Enable DP tunnel support"
+ depends on DRM_I915
+ depends on USB4
+ select DRM_DISPLAY_DP_TUNNEL
+ default y
+ help
+ Choose this option to detect DP tunnels and enable the Bandwidth
+ Allocation mode for such tunnels. This allows using the maximum
+ resolution allowed by the link BW on all displays sharing the
+ link BW, for instance on a Thunderbolt link.
+
+ If in doubt, say "Y".
+
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 5b7162076850..bc18e2d9ea05 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -28,6 +28,7 @@ config DRM_I915_DEBUG
select STACKDEPOT
select STACKTRACE
select DRM_DP_AUX_CHARDEV
+ select DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE if DRM_I915_DP_TUNNEL
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index c13f14edb508..3ef6ed41e62b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -369,6 +369,9 @@ i915-y += \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
+i915-$(CONFIG_DRM_I915_DP_TUNNEL) += \
+ display/intel_dp_tunnel.o
+
i915-y += \
i915_perf.o
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index 0589994dde11..d0c3880d7f80 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -205,7 +205,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
const char *str;
u8 val;
- priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
return false;
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 6d948520e9a6..2e8e85da5a40 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -216,7 +216,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
u8 vendor, device;
char *name, *devid;
- ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
+ ch7xxx = kzalloc(sizeof(*ch7xxx), GFP_KERNEL);
if (ch7xxx == NULL)
return false;
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index f43d8c610d3f..eef72bb3b767 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -267,7 +267,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
u16 temp;
int i;
- priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
return false;
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index a724a8755673..1df212fb000e 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -476,7 +476,7 @@ static bool ns2501_init(struct intel_dvo_device *dvo,
struct ns2501_priv *ns;
unsigned char ch;
- ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
if (ns == NULL)
return false;
@@ -551,7 +551,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *adjusted_mode)
{
const struct ns2501_configuration *conf;
- struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ struct ns2501_priv *ns = dvo->dev_priv;
int mode_idx, i;
DRM_DEBUG_KMS
@@ -655,7 +655,7 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
/* set the NS2501 power state */
static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
{
- struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ struct ns2501_priv *ns = dvo->dev_priv;
DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index 4acc8ce29c0b..6c461024c8e3 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -141,7 +141,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
struct sil164_priv *sil;
unsigned char ch;
- sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL);
+ sil = kzalloc(sizeof(*sil), GFP_KERNEL);
if (sil == NULL)
return false;
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index 009d65b0f3e9..0939e097f4f9 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -173,7 +173,7 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
struct tfp410_priv *tfp;
int id;
- tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL);
+ tfp = kzalloc(sizeof(*tfp), GFP_KERNEL);
if (tfp == NULL)
return false;
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 91f2bc405cba..0279c8aabdd1 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -1060,3 +1060,33 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->fb = intel_fb;
}
+
+bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
+ const struct intel_initial_plane_config *plane_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 base;
+
+ if (!plane_state->uapi.visible)
+ return false;
+
+ base = intel_plane_ggtt_offset(plane_state);
+
+ /*
+ * We may have moved the surface to a different
+ * part of ggtt, make the plane aware of that.
+ */
+ if (plane_config->base == base)
+ return false;
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ intel_de_write(dev_priv, DSPSURF(i9xx_plane), base);
+ else
+ intel_de_write(dev_priv, DSPADDR(i9xx_plane), base);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
index b3d724a144cb..0ca12d1e6839 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.h
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -26,6 +26,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config);
+bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
+ const struct intel_initial_plane_config *plane_config);
#else
static inline unsigned int i965_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -46,6 +48,11 @@ static inline void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
}
+static inline bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
+ const struct intel_initial_plane_config *plane_config)
+{
+ return false;
+}
#endif
#endif
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 11ca9572e8b3..628e7192ebc9 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -70,26 +70,25 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
- bool is_ddr3,
- int fsb,
- int mem)
+static const struct cxsr_latency *intel_get_cxsr_latency(struct drm_i915_private *i915)
{
- const struct cxsr_latency *latency;
int i;
- if (fsb == 0 || mem == 0)
+ if (i915->fsb_freq == 0 || i915->mem_freq == 0)
return NULL;
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
- latency = &cxsr_latency_table[i];
+ const struct cxsr_latency *latency = &cxsr_latency_table[i];
+ bool is_desktop = !IS_MOBILE(i915);
+
if (is_desktop == latency->is_desktop &&
- is_ddr3 == latency->is_ddr3 &&
- fsb == latency->fsb_freq && mem == latency->mem_freq)
+ i915->is_ddr3 == latency->is_ddr3 &&
+ i915->fsb_freq == latency->fsb_freq &&
+ i915->mem_freq == latency->mem_freq)
return latency;
}
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ drm_dbg_kms(&i915->drm, "Unknown FSB/MEM found, disable CxSR\n");
return NULL;
}
@@ -525,6 +524,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
/**
* intel_calculate_wm - calculate watermark level
+ * @i915: the device
* @pixel_rate: pixel clock
* @wm: chip FIFO params
* @fifo_size: size of the FIFO buffer
@@ -542,7 +542,8 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
* past the watermark point. If the FIFO drains completely, a FIFO underrun
* will occur, and a display engine hang could result.
*/
-static unsigned int intel_calculate_wm(int pixel_rate,
+static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
+ int pixel_rate,
const struct intel_watermark_params *wm,
int fifo_size, int cpp,
unsigned int latency_ns)
@@ -559,10 +560,10 @@ static unsigned int intel_calculate_wm(int pixel_rate,
latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
wm->guard_size;
- DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
+ drm_dbg_kms(&i915->drm, "FIFO entries required for mode: %d\n", entries);
wm_size = fifo_size - entries;
- DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
+ drm_dbg_kms(&i915->drm, "FIFO watermark level: %d\n", wm_size);
/* Don't promote wm_size to unsigned... */
if (wm_size > wm->max_wm)
@@ -634,10 +635,7 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
u32 reg;
unsigned int wm;
- latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
- dev_priv->is_ddr3,
- dev_priv->fsb_freq,
- dev_priv->mem_freq);
+ latency = intel_get_cxsr_latency(dev_priv);
if (!latency) {
drm_dbg_kms(&dev_priv->drm,
"Unknown FSB/MEM found, disable CxSR\n");
@@ -653,7 +651,8 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
int cpp = fb->format->cpp[0];
/* Display SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_display_wm,
+ wm = intel_calculate_wm(dev_priv, pixel_rate,
+ &pnv_display_wm,
pnv_display_wm.fifo_size,
cpp, latency->display_sr);
reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
@@ -663,20 +662,23 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm,
+ wm = intel_calculate_wm(dev_priv, pixel_rate,
+ &pnv_cursor_wm,
pnv_display_wm.fifo_size,
4, latency->cursor_sr);
intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK,
FW_WM(wm, CURSOR_SR));
/* Display HPLL off SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm,
+ wm = intel_calculate_wm(dev_priv, pixel_rate,
+ &pnv_display_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
/* cursor HPLL off SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm,
+ wm = intel_calculate_wm(dev_priv, pixel_rate,
+ &pnv_cursor_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
@@ -2124,7 +2126,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
else
cpp = fb->format->cpp[0];
- planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2151,7 +2153,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
else
cpp = fb->format->cpp[0];
- planeb_wm = intel_calculate_wm(crtc->config->pixel_rate,
+ planeb_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2245,7 +2247,7 @@ static void i845_update_wm(struct drm_i915_private *dev_priv)
if (crtc == NULL)
return;
- planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
&i845_wm_info,
i845_get_fifo_size(dev_priv, PLANE_A),
4, pessimal_latency_ns);
@@ -2531,7 +2533,8 @@ static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
max->fbc = ilk_fbc_wm_reg_max(dev_priv);
}
-static bool ilk_validate_wm_level(int level,
+static bool ilk_validate_wm_level(struct drm_i915_private *i915,
+ int level,
const struct ilk_wm_maximums *max,
struct intel_wm_level *result)
{
@@ -2554,14 +2557,17 @@ static bool ilk_validate_wm_level(int level,
*/
if (level == 0 && !result->enable) {
if (result->pri_val > max->pri)
- DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
- level, result->pri_val, max->pri);
+ drm_dbg_kms(&i915->drm,
+ "Primary WM%d too large %u (max %u)\n",
+ level, result->pri_val, max->pri);
if (result->spr_val > max->spr)
- DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
- level, result->spr_val, max->spr);
+ drm_dbg_kms(&i915->drm,
+ "Sprite WM%d too large %u (max %u)\n",
+ level, result->spr_val, max->spr);
if (result->cur_val > max->cur)
- DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
- level, result->cur_val, max->cur);
+ drm_dbg_kms(&i915->drm,
+ "Cursor WM%d too large %u (max %u)\n",
+ level, result->cur_val, max->cur);
result->pri_val = min_t(u32, result->pri_val, max->pri);
result->spr_val = min_t(u32, result->spr_val, max->spr);
@@ -2761,7 +2767,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
}
}
-static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
+static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
struct intel_pipe_wm *pipe_wm)
{
/* LP0 watermark maximums depend on this pipe alone */
@@ -2776,7 +2782,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
/* At least LP0 must be valid */
- if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
+ if (!ilk_validate_wm_level(dev_priv, 0, &max, &pipe_wm->wm[0])) {
drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
return false;
}
@@ -2845,7 +2851,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
* register maximums since such watermarks are
* always invalid.
*/
- if (!ilk_validate_wm_level(level, &max, wm)) {
+ if (!ilk_validate_wm_level(dev_priv, level, &max, wm)) {
memset(wm, 0, sizeof(*wm));
break;
}
@@ -2976,7 +2982,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
if (level > last_enabled_level)
wm->enable = false;
- else if (!ilk_validate_wm_level(level, max, wm))
+ else if (!ilk_validate_wm_level(dev_priv, level, max, wm))
/* make sure all following levels get disabled */
last_enabled_level = level - 1;
@@ -4016,10 +4022,7 @@ void i9xx_wm_init(struct drm_i915_private *dev_priv)
g4x_setup_wm_latency(dev_priv);
dev_priv->display.funcs.wm = &g4x_wm_funcs;
} else if (IS_PINEVIEW(dev_priv)) {
- if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
- dev_priv->is_ddr3,
- dev_priv->fsb_freq,
- dev_priv->mem_freq)) {
+ if (!intel_get_cxsr_latency(dev_priv)) {
drm_info(&dev_priv->drm,
"failed to find known CxSR latency "
"(found ddr%s fsb freq %d, mem freq %d), "
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index ec0d5168b503..2bb270f82932 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -29,6 +29,7 @@
* See intel_atomic_plane.c for the plane-specific atomic functionality.
*/
+#include <drm/display/drm_dp_tunnel.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
@@ -38,6 +39,7 @@
#include "intel_atomic.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
+#include "intel_dp_tunnel.h"
#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
@@ -258,6 +260,10 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
if (crtc_state->post_csc_lut)
drm_property_blob_get(crtc_state->post_csc_lut);
+ if (crtc_state->dp_tunnel_ref.tunnel)
+ drm_dp_tunnel_ref_get(crtc_state->dp_tunnel_ref.tunnel,
+ &crtc_state->dp_tunnel_ref);
+
crtc_state->update_pipe = false;
crtc_state->update_m_n = false;
crtc_state->update_lrr = false;
@@ -309,6 +315,8 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
intel_crtc_free_hw_state(crtc_state);
+ if (crtc_state->dp_tunnel_ref.tunnel)
+ drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref);
kfree(crtc_state);
}
@@ -344,6 +352,8 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
/* state->internal not reset on purpose */
state->dpll_set = state->modeset = false;
+
+ intel_dp_tunnel_atomic_cleanup_inherited_state(state);
}
struct intel_crtc_state *
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 06c2455bdd78..76d77d5a0409 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -217,6 +217,9 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
int width, height;
unsigned int rel_data_rate;
+ if (plane->id == PLANE_CURSOR)
+ return 0;
+
if (!plane_state->uapi.visible)
return 0;
@@ -244,9 +247,6 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
rel_data_rate = width * height * fb->format->cpp[color_plane];
- if (plane->id == PLANE_CURSOR)
- return rel_data_rate;
-
return intel_adjusted_rate(&plane_state->uapi.src,
&plane_state->uapi.dst,
rel_data_rate);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 3f3cd944a1c5..1946d7fb3c2e 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -1465,7 +1465,7 @@ static bool cnp_backlight_controller_is_valid(struct drm_i915_private *i915, int
if (controller == 1 &&
INTEL_PCH_TYPE(i915) >= PCH_ICP &&
- INTEL_PCH_TYPE(i915) < PCH_MTP)
+ INTEL_PCH_TYPE(i915) <= PCH_ADP)
return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index aa169b0055e9..fe52c06271ef 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1759,7 +1759,8 @@ parse_mipi_config(struct drm_i915_private *i915,
/* Find the sequence block and size for the given panel. */
static const u8 *
-find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
+find_panel_sequence_block(struct drm_i915_private *i915,
+ const struct bdb_mipi_sequence *sequence,
u16 panel_id, u32 *seq_size)
{
u32 total = get_blocksize(sequence);
@@ -1776,7 +1777,7 @@ find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
if (index + header_size > total) {
- DRM_ERROR("Invalid sequence block (header)\n");
+ drm_err(&i915->drm, "Invalid sequence block (header)\n");
return NULL;
}
@@ -1789,7 +1790,7 @@ find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
index += header_size;
if (index + current_size > total) {
- DRM_ERROR("Invalid sequence block\n");
+ drm_err(&i915->drm, "Invalid sequence block\n");
return NULL;
}
@@ -1801,12 +1802,13 @@ find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
index += current_size;
}
- DRM_ERROR("Sequence block detected but no valid configuration\n");
+ drm_err(&i915->drm, "Sequence block detected but no valid configuration\n");
return NULL;
}
-static int goto_next_sequence(const u8 *data, int index, int total)
+static int goto_next_sequence(struct drm_i915_private *i915,
+ const u8 *data, int index, int total)
{
u16 len;
@@ -1836,7 +1838,7 @@ static int goto_next_sequence(const u8 *data, int index, int total)
len = *(data + index + 6) + 7;
break;
default:
- DRM_ERROR("Unknown operation byte\n");
+ drm_err(&i915->drm, "Unknown operation byte\n");
return 0;
}
}
@@ -1844,7 +1846,8 @@ static int goto_next_sequence(const u8 *data, int index, int total)
return 0;
}
-static int goto_next_sequence_v3(const u8 *data, int index, int total)
+static int goto_next_sequence_v3(struct drm_i915_private *i915,
+ const u8 *data, int index, int total)
{
int seq_end;
u16 len;
@@ -1855,7 +1858,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
* checking on the structure.
*/
if (total < 5) {
- DRM_ERROR("Too small sequence size\n");
+ drm_err(&i915->drm, "Too small sequence size\n");
return 0;
}
@@ -1872,7 +1875,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
seq_end = index + size_of_sequence;
if (seq_end > total) {
- DRM_ERROR("Invalid sequence size\n");
+ drm_err(&i915->drm, "Invalid sequence size\n");
return 0;
}
@@ -1882,7 +1885,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
if (operation_byte == MIPI_SEQ_ELEM_END) {
if (index != seq_end) {
- DRM_ERROR("Invalid element structure\n");
+ drm_err(&i915->drm, "Invalid element structure\n");
return 0;
}
return index;
@@ -1904,8 +1907,8 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
case MIPI_SEQ_ELEM_PMIC:
break;
default:
- DRM_ERROR("Unknown operation byte %u\n",
- operation_byte);
+ drm_err(&i915->drm, "Unknown operation byte %u\n",
+ operation_byte);
break;
}
}
@@ -2030,7 +2033,7 @@ parse_mipi_sequence(struct drm_i915_private *i915,
drm_dbg(&i915->drm, "Found MIPI sequence block v%u\n",
sequence->version);
- seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
+ seq_data = find_panel_sequence_block(i915, sequence, panel_type, &seq_size);
if (!seq_data)
return;
@@ -2058,9 +2061,9 @@ parse_mipi_sequence(struct drm_i915_private *i915,
panel->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
- index = goto_next_sequence_v3(data, index, seq_size);
+ index = goto_next_sequence_v3(i915, data, index, seq_size);
else
- index = goto_next_sequence(data, index, seq_size);
+ index = goto_next_sequence(i915, data, index, seq_size);
if (!index) {
drm_err(&i915->drm, "Invalid sequence %u\n",
seq_id);
@@ -2135,12 +2138,13 @@ parse_compression_parameters(struct drm_i915_private *i915)
}
}
-static u8 translate_iboost(u8 val)
+static u8 translate_iboost(struct drm_i915_private *i915, u8 val)
{
static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
if (val >= ARRAY_SIZE(mapping)) {
- DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
+ drm_dbg_kms(&i915->drm,
+ "Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
return 0;
}
return mapping[val];
@@ -2204,8 +2208,7 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
if (IS_DGFX(i915))
return vbt_pin;
- if (INTEL_PCH_TYPE(i915) >= PCH_LNL || HAS_PCH_MTP(i915) ||
- IS_ALDERLAKE_P(i915)) {
+ if (INTEL_PCH_TYPE(i915) >= PCH_MTL || IS_ALDERLAKE_P(i915)) {
ddc_pin_map = adlp_ddc_pin_map;
n_entries = ARRAY_SIZE(adlp_ddc_pin_map);
} else if (IS_ALDERLAKE_S(i915)) {
@@ -2898,12 +2901,14 @@ static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
/**
* intel_bios_is_valid_vbt - does the given buffer contain a valid VBT
+ * @i915: the device
* @buf: pointer to a buffer to validate
* @size: size of the buffer
*
* Returns true on valid VBT.
*/
-bool intel_bios_is_valid_vbt(const void *buf, size_t size)
+bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
+ const void *buf, size_t size)
{
const struct vbt_header *vbt = buf;
const struct bdb_header *bdb;
@@ -2912,17 +2917,17 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
return false;
if (sizeof(struct vbt_header) > size) {
- DRM_DEBUG_DRIVER("VBT header incomplete\n");
+ drm_dbg_kms(&i915->drm, "VBT header incomplete\n");
return false;
}
if (memcmp(vbt->signature, "$VBT", 4)) {
- DRM_DEBUG_DRIVER("VBT invalid signature\n");
+ drm_dbg_kms(&i915->drm, "VBT invalid signature\n");
return false;
}
if (vbt->vbt_size > size) {
- DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+ drm_dbg_kms(&i915->drm, "VBT incomplete (vbt_size overflows)\n");
return false;
}
@@ -2932,13 +2937,13 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
vbt->bdb_offset,
sizeof(struct bdb_header),
size)) {
- DRM_DEBUG_DRIVER("BDB header incomplete\n");
+ drm_dbg_kms(&i915->drm, "BDB header incomplete\n");
return false;
}
bdb = get_bdb_header(vbt);
if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) {
- DRM_DEBUG_DRIVER("BDB incomplete\n");
+ drm_dbg_kms(&i915->drm, "BDB incomplete\n");
return false;
}
@@ -2990,7 +2995,7 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915)
for (count = 0; count < vbt_size; count += 4)
*(vbt + store++) = intel_spi_read(&i915->uncore, found + count);
- if (!intel_bios_is_valid_vbt(vbt, vbt_size))
+ if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size))
goto err_free_vbt;
drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n");
@@ -3047,7 +3052,7 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
memcpy_fromio(vbt, p, vbt_size);
- if (!intel_bios_is_valid_vbt(vbt, vbt_size))
+ if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size))
goto err_free_vbt;
pci_unmap_rom(pdev, oprom);
@@ -3074,7 +3079,7 @@ err_unmap_oprom:
*/
void intel_bios_init(struct drm_i915_private *i915)
{
- const struct vbt_header *vbt = i915->display.opregion.vbt;
+ const struct vbt_header *vbt;
struct vbt_header *oprom_vbt = NULL;
const struct bdb_header *bdb;
@@ -3089,6 +3094,8 @@ void intel_bios_init(struct drm_i915_private *i915)
init_vbt_defaults(i915);
+ vbt = intel_opregion_get_vbt(i915, NULL);
+
/*
* If the OpRegion does not have VBT, look in SPI flash through MMIO or
* PCI mapping
@@ -3306,7 +3313,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
- if (i915->display.opregion.vbt)
+ if (intel_opregion_get_vbt(i915, NULL))
return true;
}
@@ -3397,6 +3404,7 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
struct dsc_compression_parameters_entry *dsc,
int dsc_max_bpc)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
int bpc = 8;
@@ -3410,8 +3418,8 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
else if (dsc->support_8bpc && dsc_max_bpc >= 8)
bpc = 8;
else
- DRM_DEBUG_KMS("VBT: Unsupported BPC %d for DCS\n",
- dsc_max_bpc);
+ drm_dbg_kms(&i915->drm, "VBT: Unsupported BPC %d for DCS\n",
+ dsc_max_bpc);
crtc_state->pipe_bpp = bpc * 3;
@@ -3431,16 +3439,16 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
} else {
/* FIXME */
if (!(dsc->slices_per_line & BIT(0)))
- DRM_DEBUG_KMS("VBT: Unsupported DSC slice count for DSI\n");
+ drm_dbg_kms(&i915->drm, "VBT: Unsupported DSC slice count for DSI\n");
crtc_state->dsc.slice_count = 1;
}
if (crtc_state->hw.adjusted_mode.crtc_hdisplay %
crtc_state->dsc.slice_count != 0)
- DRM_DEBUG_KMS("VBT: DSC hdisplay %d not divisible by slice count %d\n",
- crtc_state->hw.adjusted_mode.crtc_hdisplay,
- crtc_state->dsc.slice_count);
+ drm_dbg_kms(&i915->drm, "VBT: DSC hdisplay %d not divisible by slice count %d\n",
+ crtc_state->hw.adjusted_mode.crtc_hdisplay,
+ crtc_state->dsc.slice_count);
/*
* The VBT rc_buffer_block_size and rc_buffer_size definitions
@@ -3596,7 +3604,7 @@ int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
- return translate_iboost(devdata->child.dp_iboost_level);
+ return translate_iboost(devdata->i915, devdata->child.dp_iboost_level);
}
int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
@@ -3604,7 +3612,7 @@ int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
- return translate_iboost(devdata->child.hdmi_iboost_level);
+ return translate_iboost(devdata->i915, devdata->child.hdmi_iboost_level);
}
int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata)
@@ -3657,3 +3665,30 @@ void intel_bios_for_each_encoder(struct drm_i915_private *i915,
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
func(i915, devdata);
}
+
+static int intel_bios_vbt_show(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = m->private;
+ const void *vbt;
+ size_t vbt_size;
+
+ /*
+ * FIXME: VBT might originate from other places than opregion, and then
+ * this would be incorrect.
+ */
+ vbt = intel_opregion_get_vbt(i915, &vbt_size);
+ if (vbt)
+ seq_write(m, vbt, vbt_size);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_bios_vbt);
+
+void intel_bios_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_vbt", 0444, minor->debugfs_root,
+ i915, &intel_bios_vbt_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 49e24b7cf675..06a51be4afd8 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -242,17 +242,15 @@ void intel_bios_init_panel_late(struct drm_i915_private *dev_priv,
const struct drm_edid *drm_edid);
void intel_bios_fini_panel(struct intel_panel *panel);
void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
-bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
+ const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc);
-bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
-bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
const struct intel_bios_encoder_data *
intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port);
@@ -283,4 +281,6 @@ void intel_bios_for_each_encoder(struct drm_i915_private *i915,
void (*func)(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata));
+void intel_bios_debugfs_register(struct drm_i915_private *i915);
+
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index c985ebb6831a..ed89b86ea625 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -63,6 +63,16 @@
* DMC will not change the active CDCLK frequency however, so that part
* will still be performed by the driver directly.
*
+ * Several methods exist to change the CDCLK frequency, which ones are
+ * supported depends on the platform:
+ *
+ * - Full PLL disable + re-enable with new VCO frequency. Pipes must be inactive.
+ * - CD2X divider update. Single pipe can be active as the divider update
+ * can be synchronized with the pipe's start of vblank.
+ * - Crawl the PLL smoothly to the new VCO frequency. Pipes can be active.
+ * - Squash waveform update. Pipes can be active.
+ * - Crawl and squash can also be done back to back. Pipes can be active.
+ *
* RAWCLK is a fixed frequency clock, often used by various auxiliary
* blocks such as AUX CH or backlight PWM. Hence the only thing we
* really need to know about RAWCLK is its frequency so that various
@@ -1227,186 +1237,199 @@ struct intel_cdclk_vals {
u32 cdclk;
u16 refclk;
u16 waveform;
- u8 divider; /* CD2X divider * 2 */
u8 ratio;
};
static const struct intel_cdclk_vals bxt_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 },
- { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 },
- { .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 },
- { .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 },
- { .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 },
+ { .refclk = 19200, .cdclk = 144000, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 288000, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 384000, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 576000, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 624000, .ratio = 65 },
{}
};
static const struct intel_cdclk_vals glk_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 },
- { .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 },
- { .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 79200, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 158400, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 316800, .ratio = 33 },
{}
};
static const struct intel_cdclk_vals icl_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 },
- { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
- { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
- { .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 },
- { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
- { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
-
- { .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 },
- { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
- { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
- { .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 },
- { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
- { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
-
- { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 },
- { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
- { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
- { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 },
- { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ { .refclk = 19200, .cdclk = 172800, .ratio = 18 },
+ { .refclk = 19200, .cdclk = 192000, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 326400, .ratio = 68 },
+ { .refclk = 19200, .cdclk = 556800, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 180000, .ratio = 15 },
+ { .refclk = 24000, .cdclk = 192000, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 324000, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 552000, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 172800, .ratio = 9 },
+ { .refclk = 38400, .cdclk = 192000, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 326400, .ratio = 34 },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34 },
{}
};
static const struct intel_cdclk_vals rkl_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 172800, .divider = 4, .ratio = 36 },
- { .refclk = 19200, .cdclk = 192000, .divider = 4, .ratio = 40 },
- { .refclk = 19200, .cdclk = 307200, .divider = 4, .ratio = 64 },
- { .refclk = 19200, .cdclk = 326400, .divider = 8, .ratio = 136 },
- { .refclk = 19200, .cdclk = 556800, .divider = 4, .ratio = 116 },
- { .refclk = 19200, .cdclk = 652800, .divider = 4, .ratio = 136 },
-
- { .refclk = 24000, .cdclk = 180000, .divider = 4, .ratio = 30 },
- { .refclk = 24000, .cdclk = 192000, .divider = 4, .ratio = 32 },
- { .refclk = 24000, .cdclk = 312000, .divider = 4, .ratio = 52 },
- { .refclk = 24000, .cdclk = 324000, .divider = 8, .ratio = 108 },
- { .refclk = 24000, .cdclk = 552000, .divider = 4, .ratio = 92 },
- { .refclk = 24000, .cdclk = 648000, .divider = 4, .ratio = 108 },
-
- { .refclk = 38400, .cdclk = 172800, .divider = 4, .ratio = 18 },
- { .refclk = 38400, .cdclk = 192000, .divider = 4, .ratio = 20 },
- { .refclk = 38400, .cdclk = 307200, .divider = 4, .ratio = 32 },
- { .refclk = 38400, .cdclk = 326400, .divider = 8, .ratio = 68 },
- { .refclk = 38400, .cdclk = 556800, .divider = 4, .ratio = 58 },
- { .refclk = 38400, .cdclk = 652800, .divider = 4, .ratio = 68 },
+ { .refclk = 19200, .cdclk = 172800, .ratio = 36 },
+ { .refclk = 19200, .cdclk = 192000, .ratio = 40 },
+ { .refclk = 19200, .cdclk = 307200, .ratio = 64 },
+ { .refclk = 19200, .cdclk = 326400, .ratio = 136 },
+ { .refclk = 19200, .cdclk = 556800, .ratio = 116 },
+ { .refclk = 19200, .cdclk = 652800, .ratio = 136 },
+
+ { .refclk = 24000, .cdclk = 180000, .ratio = 30 },
+ { .refclk = 24000, .cdclk = 192000, .ratio = 32 },
+ { .refclk = 24000, .cdclk = 312000, .ratio = 52 },
+ { .refclk = 24000, .cdclk = 324000, .ratio = 108 },
+ { .refclk = 24000, .cdclk = 552000, .ratio = 92 },
+ { .refclk = 24000, .cdclk = 648000, .ratio = 108 },
+
+ { .refclk = 38400, .cdclk = 172800, .ratio = 18 },
+ { .refclk = 38400, .cdclk = 192000, .ratio = 20 },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 32 },
+ { .refclk = 38400, .cdclk = 326400, .ratio = 68 },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 58 },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 68 },
{}
};
static const struct intel_cdclk_vals adlp_a_step_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
- { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
- { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+ { .refclk = 19200, .cdclk = 307200, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 556800, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .ratio = 68 },
- { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
- { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
- { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 312000, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 552000, .ratio = 46 },
+ { .refclk = 24400, .cdclk = 648000, .ratio = 54 },
- { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
- { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34 },
{}
};
static const struct intel_cdclk_vals adlp_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 },
- { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
- { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
- { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
- { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
-
- { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 },
- { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
- { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
- { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
- { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
-
- { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
- { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
- { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
- { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ { .refclk = 19200, .cdclk = 172800, .ratio = 27 },
+ { .refclk = 19200, .cdclk = 192000, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 556800, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 176000, .ratio = 22 },
+ { .refclk = 24000, .cdclk = 192000, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 552000, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 179200, .ratio = 14 },
+ { .refclk = 38400, .cdclk = 192000, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34 },
{}
};
static const struct intel_cdclk_vals rplu_cdclk_table[] = {
- { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 },
- { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
- { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
- { .refclk = 19200, .cdclk = 480000, .divider = 2, .ratio = 50 },
- { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
- { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
-
- { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 },
- { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
- { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
- { .refclk = 24000, .cdclk = 480000, .divider = 2, .ratio = 40 },
- { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
- { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
-
- { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
- { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
- { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
- { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25 },
- { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ { .refclk = 19200, .cdclk = 172800, .ratio = 27 },
+ { .refclk = 19200, .cdclk = 192000, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 480000, .ratio = 50 },
+ { .refclk = 19200, .cdclk = 556800, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 176000, .ratio = 22 },
+ { .refclk = 24000, .cdclk = 192000, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 480000, .ratio = 40 },
+ { .refclk = 24000, .cdclk = 552000, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 179200, .ratio = 14 },
+ { .refclk = 38400, .cdclk = 192000, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 480000, .ratio = 25 },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34 },
{}
};
static const struct intel_cdclk_vals dg2_cdclk_table[] = {
- { .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 },
- { .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 },
- { .refclk = 38400, .cdclk = 244800, .divider = 2, .ratio = 34, .waveform = 0xa4a4 },
- { .refclk = 38400, .cdclk = 285600, .divider = 2, .ratio = 34, .waveform = 0xa54a },
- { .refclk = 38400, .cdclk = 326400, .divider = 2, .ratio = 34, .waveform = 0xaaaa },
- { .refclk = 38400, .cdclk = 367200, .divider = 2, .ratio = 34, .waveform = 0xad5a },
- { .refclk = 38400, .cdclk = 408000, .divider = 2, .ratio = 34, .waveform = 0xb6b6 },
- { .refclk = 38400, .cdclk = 448800, .divider = 2, .ratio = 34, .waveform = 0xdbb6 },
- { .refclk = 38400, .cdclk = 489600, .divider = 2, .ratio = 34, .waveform = 0xeeee },
- { .refclk = 38400, .cdclk = 530400, .divider = 2, .ratio = 34, .waveform = 0xf7de },
- { .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe },
- { .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 163200, .ratio = 34, .waveform = 0x8888 },
+ { .refclk = 38400, .cdclk = 204000, .ratio = 34, .waveform = 0x9248 },
+ { .refclk = 38400, .cdclk = 244800, .ratio = 34, .waveform = 0xa4a4 },
+ { .refclk = 38400, .cdclk = 285600, .ratio = 34, .waveform = 0xa54a },
+ { .refclk = 38400, .cdclk = 326400, .ratio = 34, .waveform = 0xaaaa },
+ { .refclk = 38400, .cdclk = 367200, .ratio = 34, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 408000, .ratio = 34, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 448800, .ratio = 34, .waveform = 0xdbb6 },
+ { .refclk = 38400, .cdclk = 489600, .ratio = 34, .waveform = 0xeeee },
+ { .refclk = 38400, .cdclk = 530400, .ratio = 34, .waveform = 0xf7de },
+ { .refclk = 38400, .cdclk = 571200, .ratio = 34, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 612000, .ratio = 34, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff },
{}
};
static const struct intel_cdclk_vals mtl_cdclk_table[] = {
- { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 16, .waveform = 0xad5a },
- { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 16, .waveform = 0xb6b6 },
- { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16, .waveform = 0x0000 },
- { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25, .waveform = 0x0000 },
- { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29, .waveform = 0x0000 },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 16, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0x0000 },
{}
};
static const struct intel_cdclk_vals lnl_cdclk_table[] = {
- { .refclk = 38400, .cdclk = 153600, .divider = 2, .ratio = 16, .waveform = 0xaaaa },
- { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 16, .waveform = 0xad5a },
- { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 16, .waveform = 0xb6b6 },
- { .refclk = 38400, .cdclk = 211200, .divider = 2, .ratio = 16, .waveform = 0xdbb6 },
- { .refclk = 38400, .cdclk = 230400, .divider = 2, .ratio = 16, .waveform = 0xeeee },
- { .refclk = 38400, .cdclk = 249600, .divider = 2, .ratio = 16, .waveform = 0xf7de },
- { .refclk = 38400, .cdclk = 268800, .divider = 2, .ratio = 16, .waveform = 0xfefe },
- { .refclk = 38400, .cdclk = 288000, .divider = 2, .ratio = 16, .waveform = 0xfffe },
- { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16, .waveform = 0xffff },
- { .refclk = 38400, .cdclk = 330000, .divider = 2, .ratio = 25, .waveform = 0xdbb6 },
- { .refclk = 38400, .cdclk = 360000, .divider = 2, .ratio = 25, .waveform = 0xeeee },
- { .refclk = 38400, .cdclk = 390000, .divider = 2, .ratio = 25, .waveform = 0xf7de },
- { .refclk = 38400, .cdclk = 420000, .divider = 2, .ratio = 25, .waveform = 0xfefe },
- { .refclk = 38400, .cdclk = 450000, .divider = 2, .ratio = 25, .waveform = 0xfffe },
- { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25, .waveform = 0xffff },
- { .refclk = 38400, .cdclk = 487200, .divider = 2, .ratio = 29, .waveform = 0xfefe },
- { .refclk = 38400, .cdclk = 522000, .divider = 2, .ratio = 29, .waveform = 0xfffe },
- { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29, .waveform = 0xffff },
- { .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe },
- { .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 153600, .ratio = 16, .waveform = 0xaaaa },
+ { .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 211200, .ratio = 16, .waveform = 0xdbb6 },
+ { .refclk = 38400, .cdclk = 230400, .ratio = 16, .waveform = 0xeeee },
+ { .refclk = 38400, .cdclk = 249600, .ratio = 16, .waveform = 0xf7de },
+ { .refclk = 38400, .cdclk = 268800, .ratio = 16, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 288000, .ratio = 16, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 16, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 330000, .ratio = 25, .waveform = 0xdbb6 },
+ { .refclk = 38400, .cdclk = 360000, .ratio = 25, .waveform = 0xeeee },
+ { .refclk = 38400, .cdclk = 390000, .ratio = 25, .waveform = 0xf7de },
+ { .refclk = 38400, .cdclk = 420000, .ratio = 25, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 450000, .ratio = 25, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 487200, .ratio = 29, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 522000, .ratio = 29, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 571200, .ratio = 34, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 612000, .ratio = 34, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff },
{}
};
+static const int cdclk_squash_len = 16;
+
+static int cdclk_squash_divider(u16 waveform)
+{
+ return hweight16(waveform ?: 0xffff);
+}
+
+static int cdclk_divider(int cdclk, int vco, u16 waveform)
+{
+ /* 2 * cd2x divider */
+ return DIV_ROUND_CLOSEST(vco * cdclk_squash_divider(waveform),
+ cdclk * cdclk_squash_len);
+}
+
static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
{
const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
@@ -1745,10 +1768,10 @@ static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe
}
static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
- int cdclk, int vco)
+ int cdclk, int vco, u16 waveform)
{
/* cdclk = vco / 2 / div{1,1.5,2,4} */
- switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
+ switch (cdclk_divider(cdclk, vco, waveform)) {
default:
drm_WARN_ON(&dev_priv->drm,
cdclk != dev_priv->display.cdclk.hw.bypass);
@@ -1765,7 +1788,7 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
}
}
-static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv,
+static u16 cdclk_squash_waveform(struct drm_i915_private *dev_priv,
int cdclk)
{
const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
@@ -1827,20 +1850,13 @@ static bool cdclk_pll_is_unknown(unsigned int vco)
return vco == ~0;
}
-static const int cdclk_squash_len = 16;
-
-static int cdclk_squash_divider(u16 waveform)
-{
- return hweight16(waveform ?: 0xffff);
-}
-
static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915,
const struct intel_cdclk_config *old_cdclk_config,
const struct intel_cdclk_config *new_cdclk_config,
struct intel_cdclk_config *mid_cdclk_config)
{
u16 old_waveform, new_waveform, mid_waveform;
- int div = 2;
+ int old_div, new_div, mid_div;
/* Return if PLL is in an unknown state, force a complete disable and re-enable. */
if (cdclk_pll_is_unknown(old_cdclk_config->vco))
@@ -1859,6 +1875,18 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
old_waveform == new_waveform)
return false;
+ old_div = cdclk_divider(old_cdclk_config->cdclk,
+ old_cdclk_config->vco, old_waveform);
+ new_div = cdclk_divider(new_cdclk_config->cdclk,
+ new_cdclk_config->vco, new_waveform);
+
+ /*
+ * Should not happen currently. We might need more midpoint
+ * transitions if we need to also change the cd2x divider.
+ */
+ if (drm_WARN_ON(&i915->drm, old_div != new_div))
+ return false;
+
*mid_cdclk_config = *new_cdclk_config;
/*
@@ -1871,15 +1899,17 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
if (cdclk_squash_divider(new_waveform) > cdclk_squash_divider(old_waveform)) {
mid_cdclk_config->vco = old_cdclk_config->vco;
+ mid_div = old_div;
mid_waveform = new_waveform;
} else {
mid_cdclk_config->vco = new_cdclk_config->vco;
+ mid_div = new_div;
mid_waveform = old_waveform;
}
mid_cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_squash_divider(mid_waveform) *
mid_cdclk_config->vco,
- cdclk_squash_len * div);
+ cdclk_squash_len * mid_div);
/* make sure the mid clock came out sane */
@@ -1901,15 +1931,43 @@ static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv)
dev_priv->display.cdclk.hw.vco > 0;
}
+static u32 bxt_cdclk_ctl(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
+{
+ int cdclk = cdclk_config->cdclk;
+ int vco = cdclk_config->vco;
+ u16 waveform;
+ u32 val;
+
+ waveform = cdclk_squash_waveform(i915, cdclk);
+
+ val = bxt_cdclk_cd2x_div_sel(i915, cdclk, vco, waveform) |
+ bxt_cdclk_cd2x_pipe(i915, pipe);
+
+ /*
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
+ */
+ if ((IS_GEMINILAKE(i915) || IS_BROXTON(i915)) &&
+ cdclk >= 500000)
+ val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+
+ if (DISPLAY_VER(i915) >= 20)
+ val |= MDCLK_SOURCE_SEL_CDCLK_PLL;
+ else
+ val |= skl_cdclk_decimal(cdclk);
+
+ return val;
+}
+
static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
- int unsquashed_cdclk;
u16 waveform;
- u32 val;
if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 &&
!cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
@@ -1926,29 +1984,10 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
waveform = cdclk_squash_waveform(dev_priv, cdclk);
- unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len,
- cdclk_squash_divider(waveform));
-
if (HAS_CDCLK_SQUASH(dev_priv))
dg2_cdclk_squash_program(dev_priv, waveform);
- val = bxt_cdclk_cd2x_div_sel(dev_priv, unsquashed_cdclk, vco) |
- bxt_cdclk_cd2x_pipe(dev_priv, pipe);
-
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- cdclk >= 500000)
- val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-
- if (DISPLAY_VER(dev_priv) >= 20)
- val |= MDCLK_SOURCE_SEL_CDCLK_PLL;
- else
- val |= skl_cdclk_decimal(cdclk);
-
- intel_de_write(dev_priv, CDCLK_CTL, val);
+ intel_de_write(dev_priv, CDCLK_CTL, bxt_cdclk_ctl(dev_priv, cdclk_config, pipe));
if (pipe != INVALID_PIPE)
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
@@ -2039,7 +2078,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
u32 cdctl, expected;
- int cdclk, clock, vco;
+ int cdclk, vco;
intel_update_cdclk(dev_priv);
intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
@@ -2048,20 +2087,6 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass)
goto sanitize;
- /* DPLL okay; verify the cdclock
- *
- * Some BIOS versions leave an incorrect decimal frequency value and
- * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
- * so sanitize this register.
- */
- cdctl = intel_de_read(dev_priv, CDCLK_CTL);
- /*
- * Let's ignore the pipe field, since BIOS could have configured the
- * dividers both synching to an active pipe, or asynchronously
- * (PIPE_NONE).
- */
- cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
-
/* Make sure this is a legal cdclk value for the platform */
cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk);
if (cdclk != dev_priv->display.cdclk.hw.cdclk)
@@ -2072,24 +2097,21 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
if (vco != dev_priv->display.cdclk.hw.vco)
goto sanitize;
- expected = skl_cdclk_decimal(cdclk);
-
- /* Figure out what CD2X divider we should be using for this cdclk */
- if (HAS_CDCLK_SQUASH(dev_priv))
- clock = dev_priv->display.cdclk.hw.vco / 2;
- else
- clock = dev_priv->display.cdclk.hw.cdclk;
-
- expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock,
- dev_priv->display.cdclk.hw.vco);
+ /*
+ * Some BIOS versions leave an incorrect decimal frequency value and
+ * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
+ * so sanitize this register.
+ */
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
+ expected = bxt_cdclk_ctl(dev_priv, &dev_priv->display.cdclk.hw, INVALID_PIPE);
/*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
+ * Let's ignore the pipe field, since BIOS could have configured the
+ * dividers both synching to an active pipe, or asynchronously
+ * (PIPE_NONE).
*/
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- dev_priv->display.cdclk.hw.cdclk >= 500000)
- expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
+ expected &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
if (cdctl == expected)
/* All well; nothing to sanitize */
@@ -3467,15 +3489,15 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
{
u32 freq;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
- freq = dg1_rawclk(dev_priv);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
/*
* MTL always uses a 38.4 MHz rawclk. The bspec tells us
* "RAWCLK_FREQ defaults to the values for 38.4 and does
* not need to be programmed."
*/
freq = 38400;
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ freq = dg1_rawclk(dev_priv);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index c5092b7e87d5..ca7112b32cb3 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -2111,7 +2111,8 @@ static u32 intel_degamma_lut_size(const struct intel_crtc_state *crtc_state)
return DISPLAY_INFO(i915)->color.degamma_lut_size;
}
-static int check_lut_size(const struct drm_property_blob *lut, int expected)
+static int check_lut_size(struct drm_i915_private *i915,
+ const struct drm_property_blob *lut, int expected)
{
int len;
@@ -2120,8 +2121,8 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
len = drm_color_lut_size(lut);
if (len != expected) {
- DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
- len, expected);
+ drm_dbg_kms(&i915->drm, "Invalid LUT size; got %d, expected %d\n",
+ len, expected);
return -EINVAL;
}
@@ -2146,8 +2147,8 @@ static int _check_luts(const struct intel_crtc_state *crtc_state,
degamma_length = intel_degamma_lut_size(crtc_state);
gamma_length = intel_gamma_lut_size(crtc_state);
- if (check_lut_size(degamma_lut, degamma_length) ||
- check_lut_size(gamma_lut, gamma_length))
+ if (check_lut_size(i915, degamma_lut, degamma_length) ||
+ check_lut_size(i915, gamma_lut, gamma_length))
return -EINVAL;
if (drm_color_lut_check(degamma_lut, degamma_tests) ||
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index abaacea5c2cc..93479db0f89f 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -42,6 +42,7 @@
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
@@ -846,6 +847,9 @@ intel_crt_detect(struct drm_connector *connector,
if (!intel_display_device_enabled(dev_priv))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(dev_priv))
+ return connector->status;
+
if (dev_priv->display.params.load_detect_test) {
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
@@ -929,6 +933,9 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct i2c_adapter *ddc;
int ret;
+ if (!intel_display_driver_check_access(dev_priv))
+ return drm_edid_connector_add_modes(connector);
+
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
@@ -1069,6 +1076,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
} else {
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
}
+ intel_connector->base.polled = intel_connector->polled;
if (HAS_DDI(dev_priv)) {
assert_port_valid(dev_priv, PORT_E);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 8a84a31c7b48..25593f6aae7d 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -461,70 +461,6 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
1000 * adjusted_mode->crtc_htotal);
}
-static int intel_mode_vblank_start(const struct drm_display_mode *mode)
-{
- int vblank_start = mode->crtc_vblank_start;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- vblank_start = DIV_ROUND_UP(vblank_start, 2);
-
- return vblank_start;
-}
-
-static void intel_crtc_vblank_evade_scanlines(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- int *min, int *max, int *vblank_start)
-{
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- const struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *crtc_state;
- const struct drm_display_mode *adjusted_mode;
-
- /*
- * During fastsets/etc. the transcoder is still
- * running with the old timings at this point.
- *
- * TODO: maybe just use the active timings here?
- */
- if (intel_crtc_needs_modeset(new_crtc_state))
- crtc_state = new_crtc_state;
- else
- crtc_state = old_crtc_state;
-
- adjusted_mode = &crtc_state->hw.adjusted_mode;
-
- if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
- /* timing changes should happen with VRR disabled */
- drm_WARN_ON(state->base.dev, intel_crtc_needs_modeset(new_crtc_state) ||
- new_crtc_state->update_m_n || new_crtc_state->update_lrr);
-
- if (intel_vrr_is_push_sent(crtc_state))
- *vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
- else
- *vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
- } else {
- *vblank_start = intel_mode_vblank_start(adjusted_mode);
- }
-
- /* FIXME needs to be calibrated sensibly */
- *min = *vblank_start - intel_usecs_to_scanlines(adjusted_mode,
- VBLANK_EVASION_TIME_US);
- *max = *vblank_start - 1;
-
- /*
- * M/N and TRANS_VTOTAL are double buffered on the transcoder's
- * undelayed vblank, so with seamless M/N and LRR we must evade
- * both vblanks.
- *
- * DSB execution waits for the transcoder's undelayed vblank,
- * hence we must kick off the commit before that.
- */
- if (new_crtc_state->dsb || new_crtc_state->update_m_n || new_crtc_state->update_lrr)
- *min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
-}
-
/**
* intel_pipe_update_start() - start update of a set of display registers
* @state: the atomic state
@@ -542,14 +478,12 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- long timeout = msecs_to_jiffies_timeout(1);
- int scanline, min, max, vblank_start;
- wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
- bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
- DEFINE_WAIT(wait);
+ struct intel_vblank_evade_ctx evade;
+ int scanline;
intel_psr_lock(new_crtc_state);
@@ -566,9 +500,7 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
if (intel_crtc_needs_vblank_work(new_crtc_state))
intel_crtc_vblank_work_init(new_crtc_state);
- intel_crtc_vblank_evade_scanlines(state, crtc, &min, &max, &vblank_start);
- if (min <= 0 || max <= 0)
- goto irq_disable;
+ intel_vblank_evade_init(old_crtc_state, new_crtc_state, &evade);
if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
goto irq_disable;
@@ -582,58 +514,14 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
local_irq_disable();
- crtc->debug.min_vbl = min;
- crtc->debug.max_vbl = max;
+ crtc->debug.min_vbl = evade.min;
+ crtc->debug.max_vbl = evade.max;
trace_intel_pipe_update_start(crtc);
- for (;;) {
- /*
- * prepare_to_wait() has a memory barrier, which guarantees
- * other CPUs can see the task state update by the time we
- * read the scanline.
- */
- prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
-
- scanline = intel_get_crtc_scanline(crtc);
- if (scanline < min || scanline > max)
- break;
-
- if (!timeout) {
- drm_err(&dev_priv->drm,
- "Potential atomic update failure on pipe %c\n",
- pipe_name(crtc->pipe));
- break;
- }
-
- local_irq_enable();
-
- timeout = schedule_timeout(timeout);
-
- local_irq_disable();
- }
-
- finish_wait(wq, &wait);
+ scanline = intel_vblank_evade(&evade);
drm_crtc_vblank_put(&crtc->base);
- /*
- * On VLV/CHV DSI the scanline counter would appear to
- * increment approx. 1/3 of a scanline before start of vblank.
- * The registers still get latched at start of vblank however.
- * This means we must not write any registers on the first
- * line of vblank (since not the whole line is actually in
- * vblank). And unfortunately we can't use the interrupt to
- * wait here since it will fire too soon. We could use the
- * frame start interrupt instead since it will fire after the
- * critical scanline, but that would require more changes
- * in the interrupt code. So for now we'll just do the nasty
- * thing and poll for the bad scanline to pass us by.
- *
- * FIXME figure out if BXT+ DSI suffers from this as well
- */
- while (need_vlv_dsi_wa && scanline == vblank_start)
- scanline = intel_get_crtc_scanline(crtc);
-
crtc->debug.scanline_start = scanline;
crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 49fd100ec98a..4bcf446c75f4 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -55,10 +55,9 @@ static void
intel_dump_dp_vsc_sdp(struct drm_i915_private *i915,
const struct drm_dp_vsc_sdp *vsc)
{
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
+ struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
- drm_dp_vsc_sdp_log(KERN_DEBUG, i915->drm.dev, vsc);
+ drm_dp_vsc_sdp_log(&p, vsc);
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 926e2de00eb5..f8b33999d43f 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -22,6 +22,7 @@
#include "intel_frontbuffer.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#include "intel_vblank.h"
#include "skl_watermark.h"
#include "gem/i915_gem_object.h"
@@ -47,12 +48,23 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
return base + plane_state->view.color_plane[0].offset;
}
-static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
+static u32 intel_cursor_position(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool early_tpt)
{
int x = plane_state->uapi.dst.x1;
int y = plane_state->uapi.dst.y1;
u32 pos = 0;
+ /*
+ * Formula from Bspec:
+ * MAX(-1 * <Cursor vertical size from CUR_CTL base on cursor mode
+ * select setting> + 1, CUR_POS Y Position - Update region Y position
+ */
+ if (early_tpt)
+ y = max(-1 * drm_rect_height(&plane_state->uapi.dst) + 1,
+ y - crtc_state->psr2_su_area.y1);
+
if (x < 0) {
pos |= CURSOR_POS_X_SIGN;
x = -x;
@@ -274,7 +286,7 @@ static void i845_cursor_update_arm(struct intel_plane *plane,
size = CURSOR_HEIGHT(height) | CURSOR_WIDTH(width);
base = intel_cursor_base(plane_state);
- pos = intel_cursor_position(plane_state);
+ pos = intel_cursor_position(crtc_state, plane_state, false);
}
/* On these chipsets we can only modify the base/size/stride
@@ -503,17 +515,24 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
if (!crtc_state->enable_psr2_sel_fetch)
return;
- if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0)
- intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0) {
+ if (crtc_state->enable_psr2_su_region_et) {
+ u32 val = intel_cursor_position(crtc_state, plane_state,
+ true);
+ intel_de_write_fw(dev_priv, CURPOS_ERLY_TPT(pipe), val);
+ }
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
plane_state->ctl);
- else
+ } else {
i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
+ }
}
/* TODO: split into noarm+arm pair */
@@ -536,7 +555,7 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1);
base = intel_cursor_base(plane_state);
- pos = intel_cursor_position(plane_state);
+ pos = intel_cursor_position(crtc_state, plane_state, false);
}
/*
@@ -647,12 +666,14 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
{
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->base.state);
struct intel_plane_state *new_plane_state;
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_crtc_state *new_crtc_state;
+ struct intel_vblank_evade_ctx evade;
int ret;
/*
@@ -745,13 +766,25 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
- /*
- * Technically we should do a vblank evasion here to make
- * sure all the cursor registers update on the same frame.
- * For now just make sure the register writes happen as
- * quickly as possible to minimize the race window.
- */
- local_irq_disable();
+ intel_vblank_evade_init(crtc_state, crtc_state, &evade);
+
+ intel_psr_lock(crtc_state);
+
+ if (!drm_WARN_ON(&i915->drm, drm_crtc_vblank_get(&crtc->base))) {
+ /*
+ * TODO: maybe check if we're still in PSR
+ * and skip the vblank evasion entirely?
+ */
+ intel_psr_wait_for_idle_locked(crtc_state);
+
+ local_irq_disable();
+
+ intel_vblank_evade(&evade);
+
+ drm_crtc_vblank_put(&crtc->base);
+ } else {
+ local_irq_disable();
+ }
if (new_plane_state->uapi.visible) {
intel_plane_update_noarm(plane, crtc_state, new_plane_state);
@@ -762,6 +795,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
local_irq_enable();
+ intel_psr_unlock(crtc_state);
+
intel_plane_unpin_fb(old_plane_state);
out_free:
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 6b25e195232f..64e0f820a789 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -78,7 +78,7 @@ static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder)
for_each_cx0_lane_in_mask(INTEL_CX0_BOTH_LANES, lane)
intel_de_rmw(i915,
- XELPDP_PORT_MSGBUS_TIMER(encoder->port, lane),
+ XELPDP_PORT_MSGBUS_TIMER(i915, encoder->port, lane),
XELPDP_PORT_MSGBUS_TIMER_VAL_MASK,
XELPDP_PORT_MSGBUS_TIMER_VAL);
}
@@ -117,7 +117,7 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w
static void intel_clear_response_ready_flag(struct drm_i915_private *i915,
enum port port, int lane)
{
- intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
+ intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
}
@@ -125,10 +125,10 @@ static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, i
{
enum phy phy = intel_port_to_phy(i915, port);
- intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET);
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_err_once(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy));
@@ -144,7 +144,7 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
enum phy phy = intel_port_to_phy(i915, port);
if (__intel_de_wait_for_register(i915,
- XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
+ XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
XELPDP_PORT_P2M_RESPONSE_READY,
XELPDP_PORT_P2M_RESPONSE_READY,
XELPDP_MSGBUS_TIMEOUT_FAST_US,
@@ -152,7 +152,7 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
phy_name(phy), *val);
- if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(port, lane)) &
+ if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(i915, port, lane)) &
XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT))
drm_dbg_kms(&i915->drm,
"PHY %c Hardware did not detect a timeout\n",
@@ -186,7 +186,7 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
int ack;
u32 val;
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
@@ -195,7 +195,7 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
return -ETIMEDOUT;
}
- intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING |
XELPDP_PORT_M2P_COMMAND_READ |
XELPDP_PORT_M2P_ADDRESS(addr));
@@ -253,7 +253,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
int ack;
u32 val;
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
@@ -262,14 +262,14 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
return -ETIMEDOUT;
}
- intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING |
(committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) |
XELPDP_PORT_M2P_DATA(data) |
XELPDP_PORT_M2P_ADDRESS(addr));
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
@@ -282,7 +282,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
if (ack < 0)
return ack;
- } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)) &
+ } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane)) &
XELPDP_PORT_P2M_ERROR_SET)) {
drm_dbg_kms(&i915->drm,
"PHY %c Error occurred during write command.\n", phy_name(phy));
@@ -848,10 +848,10 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
.clock = 1000000, /* 10 Gbps */
.tx = { 0xbe21, /* tx cfg0 */
- 0x4800, /* tx cfg1 */
+ 0xe800, /* tx cfg1 */
0x0000, /* tx cfg2 */
},
- .cmn = {0x0500, /* cmn cfg0*/
+ .cmn = {0x0700, /* cmn cfg0*/
0x0005, /* cmn cfg1 */
0x0000, /* cmn cfg2 */
0x0000, /* cmn cfg3 */
@@ -1641,7 +1641,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
.clock = 3000000,
.tx = { 0xbe98, /* tx cfg0 */
- 0x9800, /* tx cfg1 */
+ 0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
},
.cmn = { 0x0500, /* cmn cfg0*/
@@ -1649,8 +1649,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
0x0000, /* cmn cfg2 */
0x0000, /* cmn cfg3 */
},
- .mpllb = { 0x209c, /* mpllb cfg0 */
- 0x7d10, /* mpllb cfg1 */
+ .mpllb = { 0x309c, /* mpllb cfg0 */
+ 0x2110, /* mpllb cfg1 */
0xca06, /* mpllb cfg2 */
0xbe40, /* mpllb cfg3 */
0x0000, /* mpllb cfg4 */
@@ -1666,7 +1666,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
.clock = 6000000,
.tx = { 0xbe98, /* tx cfg0 */
- 0x9800, /* tx cfg1 */
+ 0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
},
.cmn = { 0x0500, /* cmn cfg0*/
@@ -1674,8 +1674,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
0x0000, /* cmn cfg2 */
0x0000, /* cmn cfg3 */
},
- .mpllb = { 0x009c, /* mpllb cfg0 */
- 0x7d08, /* mpllb cfg1 */
+ .mpllb = { 0x109c, /* mpllb cfg0 */
+ 0x2108, /* mpllb cfg1 */
0xca06, /* mpllb cfg2 */
0xbe40, /* mpllb cfg3 */
0x0000, /* mpllb cfg4 */
@@ -1691,7 +1691,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
.clock = 8000000,
.tx = { 0xbe98, /* tx cfg0 */
- 0x9800, /* tx cfg1 */
+ 0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
},
.cmn = { 0x0500, /* cmn cfg0*/
@@ -1699,8 +1699,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
0x0000, /* cmn cfg2 */
0x0000, /* cmn cfg3 */
},
- .mpllb = { 0x00d0, /* mpllb cfg0 */
- 0x7d08, /* mpllb cfg1 */
+ .mpllb = { 0x10d0, /* mpllb cfg0 */
+ 0x2108, /* mpllb cfg1 */
0x4a06, /* mpllb cfg2 */
0xbe40, /* mpllb cfg3 */
0x0000, /* mpllb cfg4 */
@@ -1716,7 +1716,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
.clock = 10000000,
.tx = { 0xbe98, /* tx cfg0 */
- 0x9800, /* tx cfg1 */
+ 0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
},
.cmn = { 0x0500, /* cmn cfg0*/
@@ -1725,7 +1725,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
0x0000, /* cmn cfg3 */
},
.mpllb = { 0x1104, /* mpllb cfg0 */
- 0x7d08, /* mpllb cfg1 */
+ 0x2108, /* mpllb cfg1 */
0x0a06, /* mpllb cfg2 */
0xbe40, /* mpllb cfg3 */
0x0000, /* mpllb cfg4 */
@@ -1741,7 +1741,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
.clock = 12000000,
.tx = { 0xbe98, /* tx cfg0 */
- 0x9800, /* tx cfg1 */
+ 0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
},
.cmn = { 0x0500, /* cmn cfg0*/
@@ -1749,8 +1749,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
0x0000, /* cmn cfg2 */
0x0000, /* cmn cfg3 */
},
- .mpllb = { 0x0138, /* mpllb cfg0 */
- 0x7d08, /* mpllb cfg1 */
+ .mpllb = { 0x1138, /* mpllb cfg0 */
+ 0x2108, /* mpllb cfg1 */
0x5486, /* mpllb cfg2 */
0xfe40, /* mpllb cfg3 */
0x0000, /* mpllb cfg4 */
@@ -2096,13 +2096,54 @@ int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state,
return intel_c20pll_calc_state(crtc_state, encoder);
}
-static bool intel_c20_use_mplla(u32 clock)
+static bool intel_c20phy_use_mpllb(const struct intel_c20pll_state *state)
{
- /* 10G and 20G rates use MPLLA */
- if (clock == 1000000 || clock == 2000000)
- return true;
+ return state->tx[0] & C20_PHY_USE_MPLLB;
+}
- return false;
+static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c20pll_state *pll_state)
+{
+ unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
+ unsigned int multiplier, refclk = 38400;
+ unsigned int tx_clk_div;
+ unsigned int ref_clk_mpllb_div;
+ unsigned int fb_clk_div4_en;
+ unsigned int ref, vco;
+ unsigned int tx_rate_mult;
+ unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]);
+
+ if (intel_c20phy_use_mpllb(pll_state)) {
+ tx_rate_mult = 1;
+ frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]);
+ frac_quot = pll_state->mpllb[8];
+ frac_rem = pll_state->mpllb[9];
+ frac_den = pll_state->mpllb[7];
+ multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]);
+ tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]);
+ ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]);
+ fb_clk_div4_en = 0;
+ } else {
+ tx_rate_mult = 2;
+ frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]);
+ frac_quot = pll_state->mplla[8];
+ frac_rem = pll_state->mplla[9];
+ frac_den = pll_state->mplla[7];
+ multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]);
+ tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]);
+ ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]);
+ fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]);
+ }
+
+ if (frac_en)
+ frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den);
+ else
+ frac = 0;
+
+ ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div);
+ vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10);
+
+ return vco << tx_rate_mult >> tx_clk_div >> tx_rate;
}
static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
@@ -2138,7 +2179,7 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
PHY_C20_A_CMN_CNTX_CFG(i));
}
- if (pll_state->tx[0] & C20_PHY_USE_MPLLB) {
+ if (intel_c20phy_use_mpllb(pll_state)) {
/* MPLLB configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
if (cntx)
@@ -2160,6 +2201,8 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
}
}
+ pll_state->clock = intel_c20pll_calc_port_clock(encoder, pll_state);
+
intel_cx0_phy_transaction_end(encoder, wakeref);
}
@@ -2174,12 +2217,12 @@ void intel_c20pll_dump_hw_state(struct drm_i915_private *i915,
drm_dbg_kms(&i915->drm, "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n",
hw_state->cmn[0], hw_state->cmn[1], hw_state->cmn[2], hw_state->cmn[3]);
- if (intel_c20_use_mplla(hw_state->clock)) {
- for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++)
- drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]);
- } else {
+ if (intel_c20phy_use_mpllb(hw_state)) {
for (i = 0; i < ARRAY_SIZE(hw_state->mpllb); i++)
drm_dbg_kms(&i915->drm, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++)
+ drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]);
}
}
@@ -2326,27 +2369,27 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
}
/* 3.3 mpllb or mplla configuration */
- if (intel_c20_use_mplla(clock)) {
- for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
+ if (intel_c20phy_use_mpllb(pll_state)) {
+ for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
if (cntx)
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
- PHY_C20_A_MPLLA_CNTX_CFG(i),
- pll_state->mplla[i]);
+ PHY_C20_A_MPLLB_CNTX_CFG(i),
+ pll_state->mpllb[i]);
else
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
- PHY_C20_B_MPLLA_CNTX_CFG(i),
- pll_state->mplla[i]);
+ PHY_C20_B_MPLLB_CNTX_CFG(i),
+ pll_state->mpllb[i]);
}
} else {
- for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
+ for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
- PHY_C20_A_MPLLB_CNTX_CFG(i),
- pll_state->mpllb[i]);
+ PHY_C20_A_MPLLA_CNTX_CFG(i),
+ pll_state->mplla[i]);
else
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
- PHY_C20_B_MPLLB_CNTX_CFG(i),
- pll_state->mpllb[i]);
+ PHY_C20_B_MPLLA_CNTX_CFG(i),
+ pll_state->mplla[i]);
}
}
@@ -2408,51 +2451,6 @@ static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
return tmpclk;
}
-static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c20pll_state *pll_state)
-{
- unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
- unsigned int multiplier, refclk = 38400;
- unsigned int tx_clk_div;
- unsigned int ref_clk_mpllb_div;
- unsigned int fb_clk_div4_en;
- unsigned int ref, vco;
- unsigned int tx_rate_mult;
- unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]);
-
- if (pll_state->tx[0] & C20_PHY_USE_MPLLB) {
- tx_rate_mult = 1;
- frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]);
- frac_quot = pll_state->mpllb[8];
- frac_rem = pll_state->mpllb[9];
- frac_den = pll_state->mpllb[7];
- multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]);
- tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]);
- ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]);
- fb_clk_div4_en = 0;
- } else {
- tx_rate_mult = 2;
- frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]);
- frac_quot = pll_state->mplla[8];
- frac_rem = pll_state->mplla[9];
- frac_den = pll_state->mplla[7];
- multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]);
- tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]);
- ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]);
- fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]);
- }
-
- if (frac_en)
- frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den);
- else
- frac = 0;
-
- ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div);
- vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10);
-
- return vco << tx_rate_mult >> tx_clk_div >> tx_rate;
-}
-
static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
bool lane_reversal)
@@ -2460,7 +2458,8 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u32 val = 0;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port), XELPDP_PORT_REVERSAL,
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port),
+ XELPDP_PORT_REVERSAL,
lane_reversal ? XELPDP_PORT_REVERSAL : 0);
if (lane_reversal)
@@ -2481,7 +2480,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
else
val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA |
XELPDP_SSC_ENABLE_PLLB, val);
@@ -2514,15 +2513,16 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
u8 lane_mask, u8 state)
{
enum phy phy = intel_port_to_phy(i915, port);
+ i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(i915, port);
int lane;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+ intel_de_rmw(i915, buf_ctl2_reg,
intel_cx0_get_powerdown_state(INTEL_CX0_BOTH_LANES, XELPDP_LANE_POWERDOWN_NEW_STATE_MASK),
intel_cx0_get_powerdown_state(lane_mask, state));
/* Wait for pending transactions.*/
for_each_cx0_lane_in_mask(lane_mask, lane)
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
@@ -2531,12 +2531,12 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
intel_cx0_bus_reset(i915, port, lane);
}
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+ intel_de_rmw(i915, buf_ctl2_reg,
intel_cx0_get_powerdown_update(INTEL_CX0_BOTH_LANES),
intel_cx0_get_powerdown_update(lane_mask));
/* Update Timeout Value */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
+ if (__intel_de_wait_for_register(i915, buf_ctl2_reg,
intel_cx0_get_powerdown_update(lane_mask), 0,
XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
@@ -2545,10 +2545,10 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port)
{
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port),
XELPDP_POWER_STATE_READY_MASK,
XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(port),
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(i915, port),
XELPDP_POWER_STATE_ACTIVE_MASK |
XELPDP_PLL_LANE_STAGGERING_DELAY_MASK,
XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) |
@@ -2593,27 +2593,27 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
XELPDP_LANE_PHY_CURRENT_STATUS(1))
: XELPDP_LANE_PHY_CURRENT_STATUS(0);
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(i915, port),
XELPDP_PORT_BUF_SOC_PHY_READY,
XELPDP_PORT_BUF_SOC_PHY_READY,
XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset,
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset,
lane_pipe_reset);
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(i915, port),
lane_phy_current_status, lane_phy_current_status,
XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
intel_cx0_get_pclk_refclk_request(owned_lane_mask),
intel_cx0_get_pclk_refclk_request(lane_mask));
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
intel_cx0_get_pclk_refclk_ack(lane_mask),
XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
@@ -2624,9 +2624,10 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
CX0_P2_STATE_RESET);
intel_cx0_setup_powerdown(i915, port);
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset, 0);
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, 0);
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(port), lane_phy_current_status,
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(i915, port),
+ lane_phy_current_status,
XELPDP_PORT_RESET_END_TIMEOUT))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n",
phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT);
@@ -2761,12 +2762,12 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
* 9. Set PORT_CLOCK_CTL register PCLK PLL Request
* LN<Lane for maxPCLK> to "1" to enable PLL.
*/
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES),
intel_cx0_get_pclk_pll_request(maxpclk_lane));
/* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
intel_cx0_get_pclk_pll_ack(maxpclk_lane),
XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
@@ -2786,7 +2787,7 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u32 clock;
- u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
+ u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port));
clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
@@ -2839,11 +2840,11 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
*/
val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(i915, crtc_state->port_clock));
val |= XELPDP_FORWARD_CLOCK_UNGATE;
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val);
/* 2. Read back PORT_CLOCK_CTL REGISTER */
- val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
+ val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port));
/*
* 3. Follow the Display Voltage Frequency Switching - Sequence
@@ -2854,10 +2855,10 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
* 4. Set PORT_CLOCK_CTL register TBT CLOCK Request to "1" to enable PLL.
*/
val |= XELPDP_TBT_CLOCK_REQUEST;
- intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), val);
+ intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), val);
/* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_TBT_CLOCK_ACK,
XELPDP_TBT_CLOCK_ACK,
100, 0, NULL))
@@ -2909,7 +2910,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
* 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK>
* to "0" to disable PLL.
*/
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES) |
intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES), 0);
@@ -2919,7 +2920,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/*
* 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
*/
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
@@ -2932,9 +2933,9 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
*/
/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_DDI_CLOCK_SELECT_MASK, 0);
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_FORWARD_CLOCK_UNGATE, 0);
intel_cx0_phy_transaction_end(encoder, wakeref);
@@ -2953,11 +2954,11 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
/*
* 2. Set PORT_CLOCK_CTL register TBT CLOCK Request to "0" to disable PLL.
*/
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_TBT_CLOCK_REQUEST, 0);
/* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
@@ -2970,7 +2971,7 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
/*
* 5. Program PORT CLOCK CTRL register to disable and gate clocks
*/
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_DDI_CLOCK_SELECT_MASK |
XELPDP_FORWARD_CLOCK_UNGATE, 0);
@@ -2997,7 +2998,7 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder,
* TODO: Determine the PLL type from the SW state, once MTL PLL
* handling is done via the standard shared DPLL framework.
*/
- u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
+ u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port));
u32 clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK ||
@@ -3016,6 +3017,9 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10;
int i;
+ if (intel_crtc_needs_fastset(state))
+ return;
+
for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
u8 expected = mpllb_sw_state->pll[i];
@@ -3067,10 +3071,15 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
- bool sw_use_mpllb = mpll_sw_state->tx[0] & C20_PHY_USE_MPLLB;
- bool hw_use_mpllb = mpll_hw_state->tx[0] & C20_PHY_USE_MPLLB;
+ bool sw_use_mpllb = intel_c20phy_use_mpllb(mpll_sw_state);
+ bool hw_use_mpllb = intel_c20phy_use_mpllb(mpll_hw_state);
int i;
+ I915_STATE_WARN(i915, mpll_hw_state->clock != mpll_sw_state->clock,
+ "[CRTC:%d:%s] mismatch in C20: Register CLOCK (expected %d, found %d)",
+ crtc->base.base.id, crtc->base.name,
+ mpll_sw_state->clock, mpll_hw_state->clock);
+
I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb,
"[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)",
crtc->base.base.id, crtc->base.name,
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index adf8f4ce0d49..bdd0c8c4ef97 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -7,16 +7,39 @@
#define __INTEL_CX0_PHY_REGS_H__
#include "i915_reg_defs.h"
+#include "intel_display_limits.h"
+
+/*
+ * Wrapper macro to convert from port number to the index used in some of the
+ * registers. For Display version 20 and above it converts the port number to a
+ * single range, starting with the TC offsets. When used together with
+ * _PICK_EVEN_2RANGES(idx, PORT_TC1, ...), this single range will be the second
+ * range. Example:
+ *
+ * PORT_TC1 -> PORT_TC1
+ * PORT_TC2 -> PORT_TC2
+ * PORT_TC3 -> PORT_TC3
+ * PORT_TC4 -> PORT_TC4
+ * PORT_A -> PORT_TC4 + 1
+ * PORT_B -> PORT_TC4 + 2
+ * ...
+ */
+#define __xe2lpd_port_idx(port) \
+ (port >= PORT_TC1 ? port : PORT_TC4 + 1 + port - PORT_A)
#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A 0x64040
#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B 0x64140
#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1 0x16F240
#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2 0x16F440
-#define XELPDP_PORT_M2P_MSGBUS_CTL(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+#define _XELPDP_PORT_M2P_MSGBUS_CTL(idx, lane) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) + (lane) * 4)
+#define XELPDP_PORT_M2P_MSGBUS_CTL(i915__, port, lane) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_M2P_MSGBUS_CTL(__xe2lpd_port_idx(port), lane) : \
+ _XELPDP_PORT_M2P_MSGBUS_CTL(port, lane))
#define XELPDP_PORT_M2P_TRANSACTION_PENDING REG_BIT(31)
#define XELPDP_PORT_M2P_COMMAND_TYPE_MASK REG_GENMASK(30, 27)
#define XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1)
@@ -27,11 +50,16 @@
#define XELPDP_PORT_M2P_TRANSACTION_RESET REG_BIT(15)
#define XELPDP_PORT_M2P_ADDRESS_MASK REG_GENMASK(11, 0)
#define XELPDP_PORT_M2P_ADDRESS(val) REG_FIELD_PREP(XELPDP_PORT_M2P_ADDRESS_MASK, val)
-#define XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+
+#define _XELPDP_PORT_P2M_MSGBUS_STATUS(idx, lane) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) + (lane) * 4 + 8)
+#define XELPDP_PORT_P2M_MSGBUS_STATUS(i915__, port, lane) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_P2M_MSGBUS_STATUS(__xe2lpd_port_idx(port), lane) : \
+ _XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane))
#define XELPDP_PORT_P2M_RESPONSE_READY REG_BIT(31)
#define XELPDP_PORT_P2M_COMMAND_TYPE_MASK REG_GENMASK(30, 27)
#define XELPDP_PORT_P2M_COMMAND_READ_ACK 0x4
@@ -54,11 +82,15 @@
#define _XELPDP_PORT_BUF_CTL1_LN0_B 0x64104
#define _XELPDP_PORT_BUF_CTL1_LN0_USBC1 0x16F200
#define _XELPDP_PORT_BUF_CTL1_LN0_USBC2 0x16F400
-#define XELPDP_PORT_BUF_CTL1(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+#define _XELPDP_PORT_BUF_CTL1(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_BUF_CTL1_LN0_A, \
_XELPDP_PORT_BUF_CTL1_LN0_B, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC2))
+#define XELPDP_PORT_BUF_CTL1(i915__, port) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_BUF_CTL1(__xe2lpd_port_idx(port)) : \
+ _XELPDP_PORT_BUF_CTL1(port))
#define XELPDP_PORT_BUF_D2D_LINK_ENABLE REG_BIT(29)
#define XELPDP_PORT_BUF_D2D_LINK_STATE REG_BIT(28)
#define XELPDP_PORT_BUF_SOC_PHY_READY REG_BIT(24)
@@ -75,12 +107,15 @@
#define XELPDP_PORT_WIDTH_MASK REG_GENMASK(3, 1)
#define XELPDP_PORT_WIDTH(val) REG_FIELD_PREP(XELPDP_PORT_WIDTH_MASK, val)
-#define XELPDP_PORT_BUF_CTL2(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+#define _XELPDP_PORT_BUF_CTL2(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_BUF_CTL1_LN0_A, \
_XELPDP_PORT_BUF_CTL1_LN0_B, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC2) + 4)
-
+#define XELPDP_PORT_BUF_CTL2(i915__, port) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_BUF_CTL2(__xe2lpd_port_idx(port)) : \
+ _XELPDP_PORT_BUF_CTL2(port))
#define XELPDP_LANE_PIPE_RESET(lane) _PICK(lane, REG_BIT(31), REG_BIT(30))
#define XELPDP_LANE_PHY_CURRENT_STATUS(lane) _PICK(lane, REG_BIT(29), REG_BIT(28))
#define XELPDP_LANE_POWERDOWN_UPDATE(lane) _PICK(lane, REG_BIT(25), REG_BIT(24))
@@ -95,11 +130,15 @@
#define XELPDP_POWER_STATE_READY_MASK REG_GENMASK(7, 4)
#define XELPDP_POWER_STATE_READY(val) REG_FIELD_PREP(XELPDP_POWER_STATE_READY_MASK, val)
-#define XELPDP_PORT_BUF_CTL3(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+#define _XELPDP_PORT_BUF_CTL3(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_BUF_CTL1_LN0_A, \
_XELPDP_PORT_BUF_CTL1_LN0_B, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC2) + 8)
+#define XELPDP_PORT_BUF_CTL3(i915__, port) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_BUF_CTL3(__xe2lpd_port_idx(port)) : \
+ _XELPDP_PORT_BUF_CTL3(port))
#define XELPDP_PLL_LANE_STAGGERING_DELAY_MASK REG_GENMASK(15, 8)
#define XELPDP_PLL_LANE_STAGGERING_DELAY(val) REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val)
#define XELPDP_POWER_STATE_ACTIVE_MASK REG_GENMASK(3, 0)
@@ -114,11 +153,15 @@
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_B 0x641d8
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_USBC1 0x16f258
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_USBC2 0x16f458
-#define XELPDP_PORT_MSGBUS_TIMER(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+#define _XELPDP_PORT_MSGBUS_TIMER(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
_XELPDP_PORT_MSGBUS_TIMER_LN0_A, \
_XELPDP_PORT_MSGBUS_TIMER_LN0_B, \
_XELPDP_PORT_MSGBUS_TIMER_LN0_USBC1, \
_XELPDP_PORT_MSGBUS_TIMER_LN0_USBC2) + (lane) * 4)
+#define XELPDP_PORT_MSGBUS_TIMER(i915__, port, lane) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_MSGBUS_TIMER(__xe2lpd_port_idx(port), lane) : \
+ _XELPDP_PORT_MSGBUS_TIMER(port, lane))
#define XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT REG_BIT(31)
#define XELPDP_PORT_MSGBUS_TIMER_VAL_MASK REG_GENMASK(23, 0)
#define XELPDP_PORT_MSGBUS_TIMER_VAL REG_FIELD_PREP(XELPDP_PORT_MSGBUS_TIMER_VAL_MASK, 0xa000)
@@ -127,11 +170,15 @@
#define _XELPDP_PORT_CLOCK_CTL_B 0x641E0
#define _XELPDP_PORT_CLOCK_CTL_USBC1 0x16F260
#define _XELPDP_PORT_CLOCK_CTL_USBC2 0x16F460
-#define XELPDP_PORT_CLOCK_CTL(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+#define _XELPDP_PORT_CLOCK_CTL(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_CLOCK_CTL_A, \
_XELPDP_PORT_CLOCK_CTL_B, \
_XELPDP_PORT_CLOCK_CTL_USBC1, \
_XELPDP_PORT_CLOCK_CTL_USBC2))
+#define XELPDP_PORT_CLOCK_CTL(i915__, port) \
+ (DISPLAY_VER(i915__) >= 20 ? \
+ _XELPDP_PORT_CLOCK_CTL(__xe2lpd_port_idx(port)) : \
+ _XELPDP_PORT_CLOCK_CTL(port))
#define XELPDP_LANE_PCLK_PLL_REQUEST(lane) REG_BIT(31 - ((lane) * 4))
#define XELPDP_LANE_PCLK_PLL_ACK(lane) REG_BIT(30 - ((lane) * 4))
#define XELPDP_LANE_PCLK_REFCLK_REQUEST(lane) REG_BIT(29 - ((lane) * 4))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 12a29363e5df..c587a8efeafc 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -54,6 +54,7 @@
#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_dsi.h"
#include "intel_fdi.h"
@@ -178,7 +179,7 @@ static void mtl_wait_ddi_buf_idle(struct drm_i915_private *i915, enum port port)
int ret;
/* FIXME: find out why Bspec's 100us timeout is too short */
- ret = wait_for_us((intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) &
+ ret = wait_for_us((intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port)) &
XELPDP_PORT_BUF_PHY_IDLE), 10000);
if (ret)
drm_err(&i915->drm, "Timeout waiting for DDI BUF %c to get idle\n",
@@ -226,7 +227,9 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
}
if (DISPLAY_VER(dev_priv) >= 14)
- ret = _wait_for(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_PORT_BUF_PHY_IDLE),
+ ret = _wait_for(!(intel_de_read(dev_priv,
+ XELPDP_PORT_BUF_CTL1(dev_priv, port)) &
+ XELPDP_PORT_BUF_PHY_IDLE),
timeout_us, 10, 10);
else
ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE),
@@ -2429,13 +2432,22 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
+ i915_reg_t reg;
+ u32 set_bits, wait_bits;
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port), 0,
- XELPDP_PORT_BUF_D2D_LINK_ENABLE);
+ if (DISPLAY_VER(dev_priv) >= 20) {
+ reg = DDI_BUF_CTL(port);
+ set_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
+ wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
+ } else {
+ reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ set_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
+ wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
+ }
- if (wait_for_us((intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) &
- XELPDP_PORT_BUF_D2D_LINK_STATE), 100)) {
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for PORT_BUF_CTL %c\n",
+ intel_de_rmw(dev_priv, reg, 0, set_bits);
+ if (wait_for_us(intel_de_read(dev_priv, reg) & wait_bits, 100)) {
+ drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
}
@@ -2448,7 +2460,7 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
enum port port = encoder->port;
u32 val;
- val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
+ val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port));
val &= ~XELPDP_PORT_WIDTH_MASK;
val |= XELPDP_PORT_WIDTH(mtl_get_port_width(crtc_state->lane_count));
@@ -2461,7 +2473,7 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
val |= XELPDP_PORT_REVERSAL;
- intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
+ intel_de_write(i915, XELPDP_PORT_BUF_CTL1(i915, port), val);
}
static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
@@ -2472,7 +2484,7 @@ static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
val = intel_tc_port_in_tbt_alt_mode(dig_port) ?
XELPDP_PORT_BUF_IO_SELECT_TBT : 0;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port),
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, val);
}
@@ -2898,13 +2910,22 @@ mtl_ddi_disable_d2d_link(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
+ i915_reg_t reg;
+ u32 clr_bits, wait_bits;
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port),
- XELPDP_PORT_BUF_D2D_LINK_ENABLE, 0);
+ if (DISPLAY_VER(dev_priv) >= 20) {
+ reg = DDI_BUF_CTL(port);
+ clr_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
+ wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
+ } else {
+ reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ clr_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
+ wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
+ }
- if (wait_for_us(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) &
- XELPDP_PORT_BUF_D2D_LINK_STATE), 100))
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for PORT_BUF_CTL %c\n",
+ intel_de_rmw(dev_priv, reg, clr_bits, 0);
+ if (wait_for_us(!(intel_de_read(dev_priv, reg) & wait_bits), 100))
+ drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@@ -3038,7 +3059,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
/* De-select Thunderbolt */
if (DISPLAY_VER(dev_priv) >= 14)
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(encoder->port),
+ intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, 0);
}
@@ -3319,10 +3340,13 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
port_buf |= XELPDP_PORT_REVERSAL;
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port),
+ intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
buf_ctl |= DDI_PORT_WIDTH(lane_count);
+
+ if (DISPLAY_VER(dev_priv) >= 20)
+ buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
} else if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) {
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
@@ -3543,6 +3567,9 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
/* 6.i Configure and enable DDI_CTL_DE to start sending valid data to port slice */
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+ if (DISPLAY_VER(dev_priv) >= 20)
+ intel_dp->DP |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
+
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
@@ -3941,11 +3968,11 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
if (DISPLAY_VER(dev_priv) >= 8)
bdw_get_trans_port_sync_config(pipe_config);
+ intel_psr_get_config(encoder, pipe_config);
+
intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA);
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
- intel_psr_get_config(encoder, pipe_config);
-
intel_audio_codec_get_config(encoder, pipe_config);
}
@@ -4124,7 +4151,7 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
intel_tc_port_sanitize_mode(enc_to_dig_port(encoder),
crtc_state);
- if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
+ if (intel_encoder_is_dp(encoder))
intel_dp_sync_state(encoder, crtc_state);
}
@@ -5117,6 +5144,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
encoder->suspend_complete = intel_ddi_tc_encoder_suspend_complete;
encoder->shutdown_complete = intel_ddi_tc_encoder_shutdown_complete;
+ dig_port->lock = intel_tc_port_lock;
+ dig_port->unlock = intel_tc_port_unlock;
+
if (intel_tc_port_init(dig_port, is_legacy) < 0)
goto err;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index b10aad15a63d..ab2f52d21bad 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -33,6 +33,7 @@
#include <linux/string_helpers.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dp_tunnel.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
@@ -73,6 +74,7 @@
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_dpt.h"
@@ -104,6 +106,7 @@
#include "intel_pmdemand.h"
#include "intel_pps.h"
#include "intel_psr.h"
+#include "intel_psr_regs.h"
#include "intel_sdvo.h"
#include "intel_snps_phy.h"
#include "intel_tc.h"
@@ -2477,7 +2480,7 @@ intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock);
u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16,
bw_overhead);
- u32 data_n = intel_dp_max_data_rate(link_clock, nlanes);
+ u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes);
/*
* Windows/BIOS uses fixed M/N values always. Follow suit.
@@ -2706,6 +2709,15 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
*/
intel_de_write(dev_priv, PIPESRC(pipe),
PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
+
+ if (!crtc_state->enable_psr2_su_region_et)
+ return;
+
+ width = drm_rect_width(&crtc_state->psr2_su_area);
+ height = drm_rect_height(&crtc_state->psr2_su_area);
+
+ intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe),
+ PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
@@ -4480,6 +4492,8 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
saved_state->crc_enabled = slave_crtc_state->crc_enabled;
intel_crtc_free_hw_state(slave_crtc_state);
+ if (slave_crtc_state->dp_tunnel_ref.tunnel)
+ drm_dp_tunnel_ref_put(&slave_crtc_state->dp_tunnel_ref);
memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
kfree(saved_state);
@@ -4495,6 +4509,10 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
&master_crtc_state->hw.adjusted_mode);
slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
+ if (master_crtc_state->dp_tunnel_ref.tunnel)
+ drm_dp_tunnel_ref_get(master_crtc_state->dp_tunnel_ref.tunnel,
+ &slave_crtc_state->dp_tunnel_ref);
+
copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
@@ -4523,6 +4541,8 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
/* free the old crtc_state->hw members */
intel_crtc_free_hw_state(crtc_state);
+ intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state);
+
/* FIXME: before the switch to atomic started, a new pipe_config was
* kzalloc'd. Code that depends on any field being zero should be
* fixed, so that the crtc_state can be safely duplicated. For now,
@@ -4764,7 +4784,11 @@ static bool
intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
const struct drm_dp_vsc_sdp *b)
{
- return memcmp(a, b, sizeof(*a)) == 0;
+ return a->pixelformat == b->pixelformat &&
+ a->colorimetry == b->colorimetry &&
+ a->bpc == b->bpc &&
+ a->dynamic_range == b->dynamic_range &&
+ a->content_type == b->content_type;
}
static bool
@@ -4799,28 +4823,27 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
}
static void
-pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
+pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *i915,
bool fastset, const char *name,
const struct drm_dp_vsc_sdp *a,
const struct drm_dp_vsc_sdp *b)
{
+ struct drm_printer p;
+
if (fastset) {
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
+ p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
- drm_dbg_kms(&dev_priv->drm,
- "fastset requirement not met in %s dp sdp\n", name);
- drm_dbg_kms(&dev_priv->drm, "expected:\n");
- drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
- drm_dbg_kms(&dev_priv->drm, "found:\n");
- drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
+ drm_printf(&p, "fastset requirement not met in %s dp sdp\n", name);
} else {
- drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
- drm_err(&dev_priv->drm, "expected:\n");
- drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
- drm_err(&dev_priv->drm, "found:\n");
- drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
+ p = drm_err_printer(&i915->drm, NULL);
+
+ drm_printf(&p, "mismatch in %s dp sdp\n", name);
}
+
+ drm_printf(&p, "expected:\n");
+ drm_dp_vsc_sdp_log(&p, a);
+ drm_printf(&p, "found:\n");
+ drm_dp_vsc_sdp_log(&p, b);
}
/* Returns the length up to and including the last differing byte */
@@ -4838,10 +4861,12 @@ memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
}
static void
-pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
- bool fastset, const char *name,
+pipe_config_buffer_mismatch(bool fastset, const struct intel_crtc *crtc,
+ const char *name,
const u8 *a, const u8 *b, size_t len)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
if (fastset) {
if (!drm_debug_enabled(DRM_UT_KMS))
return;
@@ -4850,7 +4875,8 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
len = memcmp_diff_len(a, b, len);
drm_dbg_kms(&dev_priv->drm,
- "fastset requirement not met in %s buffer\n", name);
+ "[CRTC:%d:%s] fastset requirement not met in %s buffer\n",
+ crtc->base.base.id, crtc->base.name, name);
print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
16, 0, a, len, false);
print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
@@ -4859,7 +4885,8 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
/* only dump up to the last difference */
len = memcmp_diff_len(a, b, len);
- drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name);
+ drm_err(&dev_priv->drm, "[CRTC:%d:%s] mismatch in %s buffer\n",
+ crtc->base.base.id, crtc->base.name, name);
print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
16, 0, a, len, false);
print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE,
@@ -4890,18 +4917,34 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
va_end(args);
}
-static bool fastboot_enabled(struct drm_i915_private *dev_priv)
+static void
+pipe_config_pll_mismatch(bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
+ const struct intel_dpll_hw_state *a,
+ const struct intel_dpll_hw_state *b)
{
- /* Enable fastboot by default on Skylake and newer */
- if (DISPLAY_VER(dev_priv) >= 9)
- return true;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- /* Enable fastboot by default on VLV and CHV */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return true;
+ if (fastset) {
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
- /* Disabled by default on all others */
- return false;
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] fastset requirement not met in %s\n",
+ crtc->base.base.id, crtc->base.name, name);
+ drm_dbg_kms(&i915->drm, "expected:\n");
+ intel_dpll_dump_hw_state(i915, a);
+ drm_dbg_kms(&i915->drm, "found:\n");
+ intel_dpll_dump_hw_state(i915, b);
+ } else {
+ drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s buffer\n",
+ crtc->base.base.id, crtc->base.name, name);
+ drm_err(&i915->drm, "expected:\n");
+ intel_dpll_dump_hw_state(i915, a);
+ drm_err(&i915->drm, "found:\n");
+ intel_dpll_dump_hw_state(i915, b);
+ }
}
bool
@@ -4912,14 +4955,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
bool ret = true;
- bool fixup_inherited = fastset &&
- current_config->inherited && !pipe_config->inherited;
-
- if (fixup_inherited && !fastboot_enabled(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
- "initial modeset and fastboot not set\n");
- ret = false;
- }
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
@@ -4999,7 +5034,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
-#define PIPE_CONF_CHECK_TIMINGS(name) do { \
+#define PIPE_CONF_CHECK_PLL(name) do { \
+ if (!intel_dpll_compare_hw_state(dev_priv, &current_config->name, \
+ &pipe_config->name)) { \
+ pipe_config_pll_mismatch(fastset, crtc, __stringify(name), \
+ &current_config->name, \
+ &pipe_config->name); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_TIMINGS(name) do { \
PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
PIPE_CONF_CHECK_I(name.crtc_htotal); \
PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
@@ -5045,8 +5090,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} while (0)
#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
- if (!current_config->has_psr && !pipe_config->has_psr && \
- !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
+ if (!intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
&current_config->infoframes.name, \
@@ -5059,7 +5103,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
- pipe_config_buffer_mismatch(dev_priv, fastset, __stringify(name), \
+ pipe_config_buffer_mismatch(fastset, crtc, __stringify(name), \
current_config->name, \
pipe_config->name, \
(len)); \
@@ -5199,53 +5243,16 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_CSC(csc);
PIPE_CONF_CHECK_CSC(output_csc);
-
- if (current_config->active_planes) {
- PIPE_CONF_CHECK_BOOL(has_psr);
- PIPE_CONF_CHECK_BOOL(has_psr2);
- PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
- PIPE_CONF_CHECK_I(dc3co_exitline);
- }
}
PIPE_CONF_CHECK_BOOL(double_wide);
- if (dev_priv->display.dpll.mgr) {
+ if (dev_priv->display.dpll.mgr)
PIPE_CONF_CHECK_P(shared_dpll);
- PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
- PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
- PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
- PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
- PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
- PIPE_CONF_CHECK_X(dpll_hw_state.spll);
- PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
- PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
- PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
- PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
- PIPE_CONF_CHECK_X(dpll_hw_state.div0);
- PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
- PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
- PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
- PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
- PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
- PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
- PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
- PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
- PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
- PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
- PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
- PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
- PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
- PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
- PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
- PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
- PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
- PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
- PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
- PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
- PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
- }
+ /* FIXME convert everything over the dpll_mgr */
+ if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv))
+ PIPE_CONF_CHECK_PLL(dpll_hw_state);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
@@ -5368,6 +5375,10 @@ static int intel_modeset_pipe(struct intel_atomic_state *state,
if (ret)
return ret;
+ ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc);
+ if (ret)
+ return ret;
+
ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
if (ret)
return ret;
@@ -6255,12 +6266,11 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
static int intel_atomic_check_config_and_link(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_link_bw_limits new_limits;
struct intel_link_bw_limits old_limits;
int ret;
- intel_link_bw_init_limits(i915, &new_limits);
+ intel_link_bw_init_limits(state, &new_limits);
old_limits = new_limits;
while (true) {
@@ -6307,6 +6317,9 @@ int intel_atomic_check(struct drm_device *dev,
int ret, i;
bool any_ms = false;
+ if (!intel_display_driver_check_access(dev_priv))
+ return -ENODEV;
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
/*
@@ -7068,6 +7081,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(&state->base);
drm_dp_mst_atomic_wait_for_dependencies(&state->base);
+ intel_atomic_global_state_wait_for_dependencies(state);
/*
* During full modesets we write a lot of registers, wait
@@ -7109,6 +7123,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_commit_modeset_disables(state);
+ intel_dp_tunnel_atomic_alloc_bw(state);
+
/* FIXME: Eventually get rid of our crtc->config pointer */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
crtc->config = new_crtc_state;
@@ -7244,6 +7260,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_pmdemand_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
+ intel_atomic_global_state_commit_done(state);
if (state->modeset) {
/* As one of the primary mmio accessors, KMS has a high
@@ -7294,6 +7311,38 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
plane->frontbuffer_bit);
}
+static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock)
+{
+ int ret;
+
+ ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_global_state_setup_commit(state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int intel_atomic_swap_state(struct intel_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_swap_state(&state->base, true);
+ if (ret)
+ return ret;
+
+ intel_atomic_swap_global_state(state);
+
+ intel_shared_dpll_swap_state(state);
+
+ intel_atomic_track_fbs(state);
+
+ return 0;
+}
+
int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
bool nonblock)
{
@@ -7339,11 +7388,9 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
return ret;
}
- ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
- if (!ret)
- ret = drm_atomic_helper_swap_state(&state->base, true);
+ ret = intel_atomic_setup_commit(state, nonblock);
if (!ret)
- intel_atomic_swap_global_state(state);
+ ret = intel_atomic_swap_state(state);
if (ret) {
struct intel_crtc_state *new_crtc_state;
@@ -7357,8 +7404,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
- intel_shared_dpll_swap_state(state);
- intel_atomic_track_fbs(state);
drm_atomic_state_get(&state->base);
INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
@@ -7811,6 +7856,7 @@ static const struct intel_display_funcs skl_display_funcs = {
.crtc_disable = hsw_crtc_disable,
.commit_modeset_enables = skl_commit_modeset_enables,
.get_initial_plane_config = skl_get_initial_plane_config,
+ .fixup_initial_plane_config = skl_fixup_initial_plane_config,
};
static const struct intel_display_funcs ddi_display_funcs = {
@@ -7819,6 +7865,7 @@ static const struct intel_display_funcs ddi_display_funcs = {
.crtc_disable = hsw_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
+ .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
static const struct intel_display_funcs pch_split_display_funcs = {
@@ -7827,6 +7874,7 @@ static const struct intel_display_funcs pch_split_display_funcs = {
.crtc_disable = ilk_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
+ .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
static const struct intel_display_funcs vlv_display_funcs = {
@@ -7835,6 +7883,7 @@ static const struct intel_display_funcs vlv_display_funcs = {
.crtc_disable = i9xx_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
+ .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
static const struct intel_display_funcs i9xx_display_funcs = {
@@ -7843,6 +7892,7 @@ static const struct intel_display_funcs i9xx_display_funcs = {
.crtc_disable = i9xx_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
+ .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
/**
@@ -8051,8 +8101,9 @@ void intel_hpd_poll_fini(struct drm_i915_private *i915)
/* Kill all the work that may have been queued by hpd. */
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
- if (connector->modeset_retry_work.func)
- cancel_work_sync(&connector->modeset_retry_work);
+ if (connector->modeset_retry_work.func &&
+ cancel_work_sync(&connector->modeset_retry_work))
+ drm_connector_put(&connector->base);
if (connector->hdcp.shim) {
cancel_delayed_work_sync(&connector->hdcp.check_work);
cancel_work_sync(&connector->hdcp.prop_work);
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 47297ed85822..2167dbee5eea 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -28,6 +28,8 @@
#include "intel_opregion.h"
#include "intel_wm_types.h"
+struct task_struct;
+
struct drm_i915_private;
struct drm_property;
struct drm_property_blob;
@@ -47,6 +49,7 @@ struct intel_fbdev;
struct intel_fdi_funcs;
struct intel_hotplug_funcs;
struct intel_initial_plane_config;
+struct intel_opregion;
struct intel_overlay;
/* Amount of SAGV/QGV points, BSpec precisely defines this */
@@ -64,6 +67,8 @@ struct intel_display_funcs {
struct intel_crtc_state *);
void (*get_initial_plane_config)(struct intel_crtc *,
struct intel_initial_plane_config *);
+ bool (*fixup_initial_plane_config)(struct intel_crtc *crtc,
+ const struct intel_initial_plane_config *plane_config);
void (*crtc_enable)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*crtc_disable)(struct intel_atomic_state *state,
@@ -172,6 +177,12 @@ struct intel_hotplug {
struct work_struct poll_init_work;
bool poll_enabled;
+ /*
+ * Queuing of hotplug_work, reenable_work and poll_init_work is
+ * enabled. Protected by drm_i915_private::irq_lock.
+ */
+ bool detection_work_enabled;
+
unsigned int hpd_storm_threshold;
/* Whether or not to count short HPD IRQs in HPD storms */
u8 hpd_short_storm_enabled;
@@ -299,6 +310,11 @@ struct intel_display {
} funcs;
struct {
+ bool any_task_allowed;
+ struct task_struct *allowed_task;
+ } access;
+
+ struct {
/* backlight registers and fields in struct intel_panel */
struct mutex lock;
} backlight;
@@ -508,12 +524,13 @@ struct intel_display {
} wq;
/* Grouping using named structs. Keep sorted. */
+ struct drm_dp_tunnel_mgr *dp_tunnel_mgr;
struct intel_audio audio;
struct intel_dpll dpll;
struct intel_fbc *fbc[I915_MAX_FBCS];
struct intel_frontbuffer_tracking fb_tracking;
struct intel_hotplug hotplug;
- struct intel_opregion opregion;
+ struct intel_opregion *opregion;
struct intel_overlay *overlay;
struct intel_display_params params;
struct intel_vbt_data vbt;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index d951edb36687..b99c024b0934 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -86,28 +86,6 @@ static int i915_sr_status(struct seq_file *m, void *unused)
return 0;
}
-static int i915_opregion(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_opregion *opregion = &i915->display.opregion;
-
- if (opregion->header)
- seq_write(m, opregion->header, OPREGION_SIZE);
-
- return 0;
-}
-
-static int i915_vbt(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_opregion *opregion = &i915->display.opregion;
-
- if (opregion->vbt)
- seq_write(m, opregion->vbt, opregion->vbt_size);
-
- return 0;
-}
-
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -210,7 +188,8 @@ static void intel_panel_info(struct seq_file *m,
}
static void intel_hdcp_info(struct seq_file *m,
- struct intel_connector *intel_connector)
+ struct intel_connector *intel_connector,
+ bool remote_req)
{
bool hdcp_cap, hdcp2_cap;
@@ -219,8 +198,14 @@ static void intel_hdcp_info(struct seq_file *m,
goto out;
}
- hdcp_cap = intel_hdcp_capable(intel_connector);
- hdcp2_cap = intel_hdcp2_capable(intel_connector);
+ if (remote_req) {
+ intel_hdcp_get_remote_capability(intel_connector,
+ &hdcp_cap,
+ &hdcp2_cap);
+ } else {
+ hdcp_cap = intel_hdcp_get_capability(intel_connector);
+ hdcp2_cap = intel_hdcp2_get_capability(intel_connector);
+ }
if (hdcp_cap)
seq_puts(m, "HDCP1.4 ");
@@ -307,7 +292,11 @@ static void intel_connector_info(struct seq_file *m,
}
seq_puts(m, "\tHDCP version: ");
- intel_hdcp_info(m, intel_connector);
+ if (intel_encoder_is_mst(encoder)) {
+ intel_hdcp_info(m, intel_connector, true);
+ seq_puts(m, "\tMST Hub HDCP version: ");
+ }
+ intel_hdcp_info(m, intel_connector, false);
seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc);
@@ -1066,8 +1055,6 @@ static const struct file_operations i915_fifo_underrun_reset_ops = {
static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
{"i915_sr_status", i915_sr_status, 0},
- {"i915_opregion", i915_opregion, 0},
- {"i915_vbt", i915_vbt, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
@@ -1105,10 +1092,12 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
ARRAY_SIZE(intel_display_debugfs_list),
minor->debugfs_root, minor);
+ intel_bios_debugfs_register(i915);
intel_cdclk_debugfs_register(i915);
intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
intel_hpd_debugfs_register(i915);
+ intel_opregion_debugfs_register(i915);
intel_psr_debugfs_register(i915);
intel_wm_debugfs_register(i915);
intel_display_debugfs_params(i915);
@@ -1153,7 +1142,7 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
seq_printf(m, "%s:%d HDCP version: ", connector->base.name,
connector->base.base.id);
- intel_hdcp_info(m, connector);
+ intel_hdcp_info(m, connector, false);
out:
drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
@@ -1413,6 +1402,20 @@ out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
return ret;
}
+static int i915_bigjoiner_enable_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct drm_crtc *crtc;
+
+ crtc = connector->base.state->crtc;
+ if (connector->base.status != connector_status_connected || !crtc)
+ return -ENODEV;
+
+ seq_printf(m, "Bigjoiner enable: %d\n", connector->force_bigjoiner_enable);
+
+ return 0;
+}
+
static ssize_t i915_dsc_output_format_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
@@ -1434,6 +1437,30 @@ static ssize_t i915_dsc_output_format_write(struct file *file,
return len;
}
+static ssize_t i915_bigjoiner_enable_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct drm_crtc *crtc;
+ bool bigjoiner_en = 0;
+ int ret;
+
+ crtc = connector->base.state->crtc;
+ if (connector->base.status != connector_status_connected || !crtc)
+ return -ENODEV;
+
+ ret = kstrtobool_from_user(ubuf, len, &bigjoiner_en);
+ if (ret < 0)
+ return ret;
+
+ connector->force_bigjoiner_enable = bigjoiner_en;
+ *offp += len;
+
+ return len;
+}
+
static int i915_dsc_output_format_open(struct inode *inode,
struct file *file)
{
@@ -1527,6 +1554,8 @@ static const struct file_operations i915_dsc_fractional_bpp_fops = {
.write = i915_dsc_fractional_bpp_write
};
+DEFINE_SHOW_STORE_ATTRIBUTE(i915_bigjoiner_enable);
+
/*
* Returns the Current CRTC's bpc.
* Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
@@ -1608,6 +1637,13 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
connector, &i915_dsc_fractional_bpp_fops);
}
+ if (DISPLAY_VER(i915) >= 11 &&
+ (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ debugfs_create_file("i915_bigjoiner_force_enable", 0644, root,
+ connector, &i915_bigjoiner_enable_fops);
+ }
+
if (connector_type == DRM_MODE_CONNECTOR_DSI ||
connector_type == DRM_MODE_CONNECTOR_eDP ||
connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
index b7e68eb62452..f35718748555 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -3,6 +3,7 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 0b522c6a8d6f..c02d79b50006 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -1012,7 +1012,7 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
goto display_fused_off;
}
- if (IS_GRAPHICS_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) {
+ if (IS_DISPLAY_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) {
u32 fuse_strap = intel_de_read(i915, FUSE_STRAP);
u32 sfuse_strap = intel_de_read(i915, SFUSE_STRAP);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 9df9097a0255..87dd07e0d138 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -35,6 +35,7 @@
#include "intel_dkl_phy.h"
#include "intel_dmc.h"
#include "intel_dp.h"
+#include "intel_dp_tunnel.h"
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_fb.h"
@@ -45,6 +46,7 @@
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_hti.h"
+#include "intel_modeset_lock.h"
#include "intel_modeset_setup.h"
#include "intel_opregion.h"
#include "intel_overlay.h"
@@ -276,12 +278,144 @@ cleanup_bios:
return ret;
}
+static void set_display_access(struct drm_i915_private *i915,
+ bool any_task_allowed,
+ struct task_struct *allowed_task)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ int err;
+
+ intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) {
+ err = drm_modeset_lock_all_ctx(&i915->drm, &ctx);
+ if (err)
+ continue;
+
+ i915->display.access.any_task_allowed = any_task_allowed;
+ i915->display.access.allowed_task = allowed_task;
+ }
+
+ drm_WARN_ON(&i915->drm, err);
+}
+
+/**
+ * intel_display_driver_enable_user_access - Enable display HW access for all threads
+ * @i915: i915 device instance
+ *
+ * Enable the display HW access for all threads. Examples for such accesses
+ * are modeset commits and connector probing.
+ *
+ * This function should be called during driver loading and system resume once
+ * all the HW initialization steps are done.
+ */
+void intel_display_driver_enable_user_access(struct drm_i915_private *i915)
+{
+ set_display_access(i915, true, NULL);
+
+ intel_hpd_enable_detection_work(i915);
+}
+
+/**
+ * intel_display_driver_disable_user_access - Disable display HW access for user threads
+ * @i915: i915 device instance
+ *
+ * Disable the display HW access for user threads. Examples for such accesses
+ * are modeset commits and connector probing. For the current thread the
+ * access is still enabled, which should only perform HW init/deinit
+ * programming (as the initial modeset during driver loading or the disabling
+ * modeset during driver unloading and system suspend/shutdown). This function
+ * should be followed by calling either intel_display_driver_enable_user_access()
+ * after completing the HW init programming or
+ * intel_display_driver_suspend_access() after completing the HW deinit
+ * programming.
+ *
+ * This function should be called during driver loading/unloading and system
+ * suspend/shutdown before starting the HW init/deinit programming.
+ */
+void intel_display_driver_disable_user_access(struct drm_i915_private *i915)
+{
+ intel_hpd_disable_detection_work(i915);
+
+ set_display_access(i915, false, current);
+}
+
+/**
+ * intel_display_driver_suspend_access - Suspend display HW access for all threads
+ * @i915: i915 device instance
+ *
+ * Disable the display HW access for all threads. Examples for such accesses
+ * are modeset commits and connector probing. This call should be either
+ * followed by calling intel_display_driver_resume_access(), or the driver
+ * should be unloaded/shutdown.
+ *
+ * This function should be called during driver unloading and system
+ * suspend/shutdown after completing the HW deinit programming.
+ */
+void intel_display_driver_suspend_access(struct drm_i915_private *i915)
+{
+ set_display_access(i915, false, NULL);
+}
+
+/**
+ * intel_display_driver_resume_access - Resume display HW access for the resume thread
+ * @i915: i915 device instance
+ *
+ * Enable the display HW access for the current resume thread, keeping the
+ * access disabled for all other (user) threads. Examples for such accesses
+ * are modeset commits and connector probing. The resume thread should only
+ * perform HW init programming (as the restoring modeset). This function
+ * should be followed by calling intel_display_driver_enable_user_access(),
+ * after completing the HW init programming steps.
+ *
+ * This function should be called during system resume before starting the HW
+ * init steps.
+ */
+void intel_display_driver_resume_access(struct drm_i915_private *i915)
+{
+ set_display_access(i915, false, current);
+}
+
+/**
+ * intel_display_driver_check_access - Check if the current thread has disaplay HW access
+ * @i915: i915 device instance
+ *
+ * Check whether the current thread has display HW access, print a debug
+ * message if it doesn't. Such accesses are modeset commits and connector
+ * probing. If the function returns %false any HW access should be prevented.
+ *
+ * Returns %true if the current thread has display HW access, %false
+ * otherwise.
+ */
+bool intel_display_driver_check_access(struct drm_i915_private *i915)
+{
+ char comm[TASK_COMM_LEN];
+ char current_task[TASK_COMM_LEN + 16];
+ char allowed_task[TASK_COMM_LEN + 16] = "none";
+
+ if (i915->display.access.any_task_allowed ||
+ i915->display.access.allowed_task == current)
+ return true;
+
+ snprintf(current_task, sizeof(current_task), "%s[%d]",
+ get_task_comm(comm, current),
+ task_pid_vnr(current));
+
+ if (i915->display.access.allowed_task)
+ snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
+ get_task_comm(comm, i915->display.access.allowed_task),
+ task_pid_vnr(i915->display.access.allowed_task));
+
+ drm_dbg_kms(&i915->drm,
+ "Reject display access from task %s (allowed to %s)\n",
+ current_task, allowed_task);
+
+ return false;
+}
+
/* part #2: call after irq install, but before gem init */
int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
{
struct drm_device *dev = &i915->drm;
enum pipe pipe;
- struct intel_crtc *crtc;
int ret;
if (!HAS_DISPLAY(i915))
@@ -301,10 +435,8 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
for_each_pipe(i915, pipe) {
ret = intel_crtc_init(i915, pipe);
- if (ret) {
- intel_mode_config_cleanup(i915);
- return ret;
- }
+ if (ret)
+ goto err_mode_config;
}
intel_plane_possible_crtcs_init(i915);
@@ -315,8 +447,6 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_display_driver_init_hw(i915);
intel_dpll_update_ref_clks(i915);
- intel_hdcp_component_init(i915);
-
if (i915->display.cdclk.max_cdclk_freq == 0)
intel_update_max_cdclk(i915);
@@ -326,16 +456,18 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_vga_disable(i915);
intel_setup_outputs(i915);
+ ret = intel_dp_tunnel_mgr_init(i915);
+ if (ret)
+ goto err_hdcp;
+
+ intel_display_driver_disable_user_access(i915);
+
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
intel_acpi_assign_connector_fwnodes(i915);
drm_modeset_unlock_all(dev);
- for_each_intel_crtc(dev, crtc) {
- if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
- continue;
- intel_crtc_initial_plane_config(crtc);
- }
+ intel_initial_plane_config(i915);
/*
* Make sure hardware watermarks really match the state we read out.
@@ -346,6 +478,13 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
ilk_wm_sanitize(i915);
return 0;
+
+err_hdcp:
+ intel_hdcp_component_fini(i915);
+err_mode_config:
+ intel_mode_config_cleanup(i915);
+
+ return ret;
}
/* part #3: call after gem init */
@@ -357,6 +496,13 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
return 0;
/*
+ * This will bind stuff into ggtt, so it needs to be done after
+ * the BIOS fb takeover and whatever else magic ggtt reservations
+ * happen during gem/ggtt init.
+ */
+ intel_hdcp_component_init(i915);
+
+ /*
* Force all active planes to recompute their states. So that on
* mode_setcrtc after probe, all the intel_plane_state variables
* are already calculated and there is no assert_plane warnings
@@ -374,7 +520,6 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(i915);
- intel_hpd_poll_disable(i915);
skl_watermark_ipc_init(i915);
@@ -383,7 +528,8 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
void intel_display_driver_register(struct drm_i915_private *i915)
{
- struct drm_printer p = drm_debug_printer("i915 display info:");
+ struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS,
+ "i915 display info:");
if (!HAS_DISPLAY(i915))
return;
@@ -394,6 +540,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_audio_init(i915);
+ intel_display_driver_enable_user_access(i915);
+
intel_display_debugfs_register(i915);
/*
@@ -412,6 +560,7 @@ void intel_display_driver_register(struct drm_i915_private *i915)
* fbdev->async_cookie.
*/
drm_kms_helper_poll_init(&i915->drm);
+ intel_hpd_poll_disable(i915);
intel_display_device_info_print(DISPLAY_INFO(i915),
DISPLAY_RUNTIME_INFO(i915), &p);
@@ -440,6 +589,8 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
+ intel_display_driver_suspend_access(i915);
+
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
@@ -458,6 +609,8 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
intel_mode_config_cleanup(i915);
+ intel_dp_tunnel_mgr_cleanup(i915);
+
intel_overlay_cleanup(i915);
intel_gmbus_teardown(i915);
@@ -486,14 +639,17 @@ void intel_display_driver_unregister(struct drm_i915_private *i915)
return;
intel_fbdev_unregister(i915);
- intel_audio_deinit(i915);
-
/*
* After flushing the fbdev (incl. a late async config which
* will have delayed queuing of a hotplug event), then flush
* the hotplug events.
*/
drm_kms_helper_poll_fini(&i915->drm);
+
+ intel_display_driver_disable_user_access(i915);
+
+ intel_audio_deinit(i915);
+
drm_atomic_helper_shutdown(&i915->drm);
acpi_video_unregister();
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.h b/drivers/gpu/drm/i915/display/intel_display_driver.h
index c276a58ee329..42cc4af6d3fd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.h
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.h
@@ -32,5 +32,11 @@ int __intel_display_driver_resume(struct drm_i915_private *i915,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx);
+void intel_display_driver_enable_user_access(struct drm_i915_private *i915);
+void intel_display_driver_disable_user_access(struct drm_i915_private *i915);
+void intel_display_driver_suspend_access(struct drm_i915_private *i915);
+void intel_display_driver_resume_access(struct drm_i915_private *i915);
+bool intel_display_driver_check_access(struct drm_i915_private *i915);
+
#endif /* __INTEL_DISPLAY_DRIVER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index a7d8f3fc98de..f846c5b108b5 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -266,12 +266,12 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
intel_uncore_posting_read(&dev_priv->uncore, reg);
}
-static bool i915_has_asle(struct drm_i915_private *dev_priv)
+static bool i915_has_asle(struct drm_i915_private *i915)
{
- if (!dev_priv->display.opregion.asle)
+ if (!IS_PINEVIEW(i915) && !IS_MOBILE(i915))
return false;
- return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+ return intel_opregion_asle_present(i915);
}
/**
@@ -986,7 +986,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
* their flags both in the PICA and SDE IIR.
*/
if (*pch_iir & SDE_PICAINTERRUPT) {
- drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTP);
+ drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0);
*pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR);
@@ -1587,7 +1587,7 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
struct intel_uncore *uncore = &i915->uncore;
u32 display_mask, extra_mask;
- if (GRAPHICS_VER(i915) >= 7) {
+ if (DISPLAY_VER(i915) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 47cd6bb04366..06900ff307b2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -246,7 +246,14 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
- return intel_port_to_phy(i915, dig_port->base.port);
+ /*
+ * FIXME should we care about the (VBT defined) dig_port->aux_ch
+ * relationship or should this be purely defined by the hardware layout?
+ * Currently if the port doesn't appear in the VBT, or if it's declared
+ * as HDMI-only and routed to a combo PHY, the encoder either won't be
+ * present at all or it will not have an aux_ch assigned.
+ */
+ return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE;
}
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
@@ -414,7 +421,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
- if (DISPLAY_VER(dev_priv) < 12)
+ /* FIXME this is a mess */
+ if (phy != PHY_NONE)
intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
0, ICL_LANE_ENABLE_AUX);
@@ -437,7 +445,10 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0);
+ /* FIXME this is a mess */
+ if (phy != PHY_NONE)
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
+ ICL_LANE_ENABLE_AUX, 0);
intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 3fdd8a517983..e67cd5b02e84 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -33,6 +33,7 @@
#include <drm/display/drm_dp_dual_mode_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
+#include <drm/display/drm_dp_tunnel.h>
#include <drm/display/drm_dsc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
@@ -327,7 +328,6 @@ struct intel_vbt_panel_data {
struct edp_power_seq pps;
u8 drrs_msa_timing_delay;
bool low_vswing;
- bool initialized;
bool hobl;
} edp;
@@ -499,15 +499,15 @@ struct intel_hdcp_shim {
struct intel_connector *connector);
/* Detects panel's hdcp capability. This is optional for HDMI. */
- int (*hdcp_capable)(struct intel_digital_port *dig_port,
- bool *hdcp_capable);
+ int (*hdcp_get_capability)(struct intel_digital_port *dig_port,
+ bool *hdcp_capable);
/* HDCP adaptation(DP/HDMI) required on the port */
enum hdcp_wired_protocol protocol;
/* Detects whether sink is HDCP2.2 capable */
- int (*hdcp_2_2_capable)(struct intel_connector *connector,
- bool *capable);
+ int (*hdcp_2_2_get_capability)(struct intel_connector *connector,
+ bool *capable);
/* Write HDCP2.2 messages */
int (*write_2_2_msg)(struct intel_connector *connector,
@@ -532,6 +532,10 @@ struct intel_hdcp_shim {
/* HDCP2.2 Link Integrity Check */
int (*check_2_2_link)(struct intel_digital_port *dig_port,
struct intel_connector *connector);
+
+ /* HDCP remote sink cap */
+ int (*get_remote_hdcp_capability)(struct intel_connector *connector,
+ bool *hdcp_capable, bool *hdcp2_capable);
};
struct intel_hdcp {
@@ -609,6 +613,13 @@ struct intel_connector {
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
+ /*
+ * Optional hook called during init/resume to sync any state
+ * stored in the connector (eg. DSC state) wrt. the HW state.
+ */
+ void (*sync_state)(struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state);
+
/* Panel info for eDP and LVDS */
struct intel_panel panel;
@@ -626,6 +637,8 @@ struct intel_connector {
struct intel_dp *mst_port;
+ bool force_bigjoiner_enable;
+
struct {
struct drm_dp_aux *dsc_decompression_aux;
u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
@@ -677,6 +690,8 @@ struct intel_atomic_state {
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
+ struct intel_dp_tunnel_inherited_state *inherited_dp_tunnels;
+
/*
* Current watermarks can't be trusted during hardware readout, so
* don't bother calculating intermediate watermarks.
@@ -780,6 +795,8 @@ struct intel_plane_state {
struct intel_initial_plane_config {
struct intel_framebuffer *fb;
+ struct intel_memory_region *mem;
+ resource_size_t phys_base;
struct i915_vma *vma;
unsigned int tiling;
int size;
@@ -1213,12 +1230,12 @@ struct intel_crtc_state {
bool has_psr;
bool has_psr2;
bool enable_psr2_sel_fetch;
+ bool enable_psr2_su_region_et;
bool req_psr2_sdp_prior_scanline;
bool has_panel_replay;
bool wm_level_disabled;
u32 dc3co_exitline;
u16 su_y_granularity;
- struct drm_dp_vsc_sdp psr_vsc;
/*
* Frequence the dpll for the port should run at. Differs from the
@@ -1372,6 +1389,9 @@ struct intel_crtc_state {
struct drm_dsc_config config;
} dsc;
+ /* DP tunnel used for BW allocation. */
+ struct drm_dp_tunnel_ref dp_tunnel_ref;
+
/* HSW+ linetime watermarks */
u16 linetime;
u16 ips_linetime;
@@ -1402,6 +1422,8 @@ struct intel_crtc_state {
u32 psr2_man_track_ctl;
+ struct drm_rect psr2_su_area;
+
/* Variable Refresh Rate state */
struct {
bool enable, in_range;
@@ -1682,13 +1704,14 @@ struct intel_psr {
/* Mutex for PSR state of the transcoder */
struct mutex lock;
-#define I915_PSR_DEBUG_MODE_MASK 0x0f
-#define I915_PSR_DEBUG_DEFAULT 0x00
-#define I915_PSR_DEBUG_DISABLE 0x01
-#define I915_PSR_DEBUG_ENABLE 0x02
-#define I915_PSR_DEBUG_FORCE_PSR1 0x03
-#define I915_PSR_DEBUG_ENABLE_SEL_FETCH 0x4
-#define I915_PSR_DEBUG_IRQ 0x10
+#define I915_PSR_DEBUG_MODE_MASK 0x0f
+#define I915_PSR_DEBUG_DEFAULT 0x00
+#define I915_PSR_DEBUG_DISABLE 0x01
+#define I915_PSR_DEBUG_ENABLE 0x02
+#define I915_PSR_DEBUG_FORCE_PSR1 0x03
+#define I915_PSR_DEBUG_ENABLE_SEL_FETCH 0x4
+#define I915_PSR_DEBUG_IRQ 0x10
+#define I915_PSR_DEBUG_SU_REGION_ET_DISABLE 0x20
u32 debug;
bool sink_support;
@@ -1702,14 +1725,20 @@ struct intel_psr {
unsigned int busy_frontbuffer_bits;
bool sink_psr2_support;
bool link_standby;
- bool colorimetry_support;
bool psr2_enabled;
bool psr2_sel_fetch_enabled;
bool psr2_sel_fetch_cff_enabled;
bool req_psr2_sdp_prior_scanline;
u8 sink_sync_latency;
- u8 io_wake_lines;
- u8 fast_wake_lines;
+
+ struct {
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
+
+ /* LNL and beyond */
+ u8 check_entry_lines;
+ } alpm_parameters;
+
ktime_t last_entry_attempt;
ktime_t last_exit;
bool sink_not_reliable;
@@ -1773,6 +1802,9 @@ struct intel_dp {
/* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
+ struct drm_dp_tunnel *tunnel;
+ bool tunnel_suspended:1;
+
/* mst connector list */
struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
struct drm_dp_mst_topology_mgr mst_mgr;
@@ -1833,6 +1865,8 @@ struct intel_dp {
/* When we last wrote the OUI for eDP */
unsigned long last_oui_write;
+
+ bool colorimetry_support;
};
enum lspcon_vendor {
@@ -1890,6 +1924,9 @@ struct intel_digital_port {
u32 (*infoframes_enabled)(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
bool (*connected)(struct intel_encoder *encoder);
+
+ void (*lock)(struct intel_digital_port *dig_port);
+ void (*unlock)(struct intel_digital_port *dig_port);
};
struct intel_dp_mst_encoder {
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index b70502586ab9..835781624482 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -1158,7 +1158,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
str_yes_no(intel_dmc_has_payload(i915)));
seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A");
seq_printf(m, "Pipe A fw needed: %s\n",
- str_yes_no(GRAPHICS_VER(i915) >= 12));
+ str_yes_no(DISPLAY_VER(i915) >= 12));
seq_printf(m, "Pipe A fw loaded: %s\n",
str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA)));
seq_printf(m, "Pipe B fw needed: %s\n",
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index ae647d03af25..f0c3ed37b350 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -36,6 +36,7 @@
#include <asm/byteorder.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dp_tunnel.h>
#include <drm/display/drm_dsc_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -56,12 +57,14 @@
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_dp_hdcp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_fifo_underrun.h"
@@ -151,6 +154,22 @@ int intel_dp_link_symbol_clock(int rate)
return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate));
}
+static int max_dprx_rate(struct intel_dp *intel_dp)
+{
+ if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+ return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
+
+ return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+}
+
+static int max_dprx_lane_count(struct intel_dp *intel_dp)
+{
+ if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+ return drm_dp_tunnel_max_dprx_lane_count(intel_dp->tunnel);
+
+ return drm_dp_max_lane_count(intel_dp->dpcd);
+}
+
static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
{
intel_dp->sink_rates[0] = 162000;
@@ -179,7 +198,7 @@ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
/*
* Sink rates for 8b/10b.
*/
- max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+ max_rate = max_dprx_rate(intel_dp);
max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
if (max_lttpr_rate)
max_rate = min(max_rate, max_lttpr_rate);
@@ -258,7 +277,7 @@ static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
- intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ intel_dp->max_sink_lane_count = max_dprx_lane_count(intel_dp);
switch (intel_dp->max_sink_lane_count) {
case 1:
@@ -308,7 +327,7 @@ static int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
}
/* Theoretical max between source and sink */
-static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
+int intel_dp_max_common_rate(struct intel_dp *intel_dp)
{
return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
}
@@ -325,7 +344,7 @@ static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
}
/* Theoretical max between source and sink */
-static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
+int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
int source_max = intel_dp_max_source_lane_count(dig_port);
@@ -382,50 +401,27 @@ int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
1000000 * 16 * 8);
}
-/*
- * Given a link rate and lanes, get the data bandwidth.
- *
- * Data bandwidth is the actual payload rate, which depends on the data
- * bandwidth efficiency and the link rate.
+/**
+ * intel_dp_max_link_data_rate: Calculate the maximum rate for the given link params
+ * @intel_dp: Intel DP object
+ * @max_dprx_rate: Maximum data rate of the DPRX
+ * @max_dprx_lanes: Maximum lane count of the DPRX
*
- * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency
- * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) =
- * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by
- * coincidence, the port clock in kHz matches the data bandwidth in kBps, and
- * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no
- * longer holds for data bandwidth as soon as FEC or MST is taken into account!)
+ * Calculate the maximum data rate for the provided link parameters taking into
+ * account any BW limitations by a DP tunnel attached to @intel_dp.
*
- * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For
- * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875
- * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000
- * does not match the symbol clock, the port clock (not even if you think in
- * terms of a byte clock), nor the data bandwidth. It only matches the link bit
- * rate in units of 10000 bps.
+ * Returns the maximum data rate in kBps units.
*/
-int
-intel_dp_max_data_rate(int max_link_rate, int max_lanes)
+int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
+ int max_dprx_rate, int max_dprx_lanes)
{
- int ch_coding_efficiency =
- drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
- int max_link_rate_kbps = max_link_rate * 10;
+ int max_rate = drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes);
- /*
- * UHBR rates always use 128b/132b channel encoding, and have
- * 97.71% data bandwidth efficiency. Consider max_link_rate the
- * link bit rate in units of 10000 bps.
- */
- /*
- * Lower than UHBR rates always use 8b/10b channel encoding, and have
- * 80% data bandwidth efficiency for SST non-FEC. However, this turns
- * out to be a nop by coincidence:
- *
- * int max_link_rate_kbps = max_link_rate * 10;
- * max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10);
- * max_link_rate = max_link_rate_kbps / 8;
- */
- return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes,
- ch_coding_efficiency),
- 1000000 * 8);
+ if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+ max_rate = min(max_rate,
+ drm_dp_tunnel_available_bw(intel_dp->tunnel));
+
+ return max_rate;
}
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
@@ -657,7 +653,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
int mode_rate, max_rate;
mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
- max_rate = intel_dp_max_data_rate(link_rate, lane_count);
+ max_rate = intel_dp_max_link_data_rate(intel_dp, link_rate, lane_count);
if (mode_rate > max_rate)
return false;
@@ -1204,11 +1200,13 @@ bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
int hdisplay, int clock)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
if (!intel_dp_can_bigjoiner(intel_dp))
return false;
- return clock > i915->max_dotclk_freq || hdisplay > 5120;
+ return clock > i915->max_dotclk_freq || hdisplay > 5120 ||
+ connector->force_bigjoiner_enable;
}
static enum drm_mode_status
@@ -1259,7 +1257,8 @@ intel_dp_mode_valid(struct drm_connector *_connector,
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
- max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+ max_rate = intel_dp_max_link_data_rate(intel_dp, max_link_clock, max_lanes);
+
mode_rate = intel_dp_link_required(target_clock,
intel_dp_mode_min_output_bpp(connector, mode));
@@ -1609,8 +1608,10 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
- link_avail = intel_dp_max_data_rate(link_rate,
- lane_count);
+ link_avail = intel_dp_max_link_data_rate(intel_dp,
+ link_rate,
+ lane_count);
+
if (mode_rate <= link_avail) {
pipe_config->lane_count = lane_count;
@@ -2386,6 +2387,17 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
limits);
}
+int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int bpp = crtc_state->dsc.compression_enable ?
+ to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) :
+ crtc_state->pipe_bpp;
+
+ return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
+}
+
static int
intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2453,31 +2465,16 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return ret;
}
- if (pipe_config->dsc.compression_enable) {
- drm_dbg_kms(&i915->drm,
- "DP lane count %d clock %d Input bpp %d Compressed bpp " BPP_X16_FMT "\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp,
- BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
-
- drm_dbg_kms(&i915->drm,
- "DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
- } else {
- drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp);
+ drm_dbg_kms(&i915->drm,
+ "DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp,
+ BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
+ intel_dp_config_required_rate(pipe_config),
+ intel_dp_max_link_data_rate(intel_dp,
+ pipe_config->port_clock,
+ pipe_config->lane_count));
- drm_dbg_kms(&i915->drm,
- "DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->pipe_bpp),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
- }
return 0;
}
@@ -2619,58 +2616,38 @@ static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
+ struct drm_dp_vsc_sdp *vsc;
- /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
- if (crtc_state->has_psr)
+ if ((!intel_dp->colorimetry_support ||
+ !intel_dp_needs_vsc_sdp(crtc_state, conn_state)) &&
+ !crtc_state->has_psr)
return;
- if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
- return;
+ vsc = &crtc_state->infoframes.vsc;
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
vsc->sdp_type = DP_SDP_VSC;
- intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
- &crtc_state->infoframes.vsc);
-}
-
-void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state,
- struct drm_dp_vsc_sdp *vsc)
-{
- vsc->sdp_type = DP_SDP_VSC;
- if (crtc_state->has_psr2) {
- if (intel_dp->psr.colorimetry_support &&
- intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
- /* [PSR2, +Colorimetry] */
- intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
- vsc);
- } else {
- /*
- * [PSR2, -Colorimetry]
- * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
- * 3D stereo + PSR/PSR2 + Y-coordinate.
- */
- vsc->revision = 0x4;
- vsc->length = 0xe;
- }
+ /* Needs colorimetry */
+ if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
+ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+ vsc);
+ } else if (crtc_state->has_psr2) {
+ /*
+ * [PSR2 without colorimetry]
+ * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
+ * 3D stereo + PSR/PSR2 + Y-coordinate.
+ */
+ vsc->revision = 0x4;
+ vsc->length = 0xe;
} else if (crtc_state->has_panel_replay) {
- if (intel_dp->psr.colorimetry_support &&
- intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
- /* [Panel Replay with colorimetry info] */
- intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
- vsc);
- } else {
- /*
- * [Panel Replay without colorimetry info]
- * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
- * VSC SDP supporting 3D stereo + Panel Replay.
- */
- vsc->revision = 0x6;
- vsc->length = 0x10;
- }
+ /*
+ * [Panel Replay without colorimetry info]
+ * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
+ * VSC SDP supporting 3D stereo + Panel Replay.
+ */
+ vsc->revision = 0x6;
+ vsc->length = 0x10;
} else {
/*
* [PSR1]
@@ -2859,12 +2836,47 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder,
intel_dp_is_uhbr(pipe_config);
}
+void intel_dp_queue_modeset_retry_work(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ drm_connector_get(&connector->base);
+ if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work))
+ drm_connector_put(&connector->base);
+}
+
+void
+intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_connector *connector;
+ struct intel_digital_connector_state *conn_state;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int i;
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ intel_dp_queue_modeset_retry_work(intel_dp->attached_connector);
+
+ return;
+ }
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ if (!conn_state->base.crtc)
+ continue;
+
+ if (connector->mst_port == intel_dp)
+ intel_dp_queue_modeset_retry_work(connector);
+ }
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
const struct drm_display_mode *fixed_mode;
@@ -2965,7 +2977,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
- return 0;
+ return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector,
+ pipe_config);
}
void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -3301,18 +3314,21 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- if (!crtc_state)
- return;
+ bool dpcd_updated = false;
/*
* Don't clobber DPCD if it's been already read out during output
* setup (eDP) or detect.
*/
- if (intel_dp->dpcd[DP_DPCD_REV] == 0)
+ if (crtc_state && intel_dp->dpcd[DP_DPCD_REV] == 0) {
intel_dp_get_dpcd(intel_dp);
+ dpcd_updated = true;
+ }
- intel_dp_reset_max_link_params(intel_dp);
+ intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated);
+
+ if (crtc_state)
+ intel_dp_reset_max_link_params(intel_dp);
}
bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
@@ -3348,13 +3364,6 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
fastset = false;
}
- if (CAN_PSR(intel_dp)) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute PSR state\n",
- encoder->base.base.id, encoder->base.name);
- crtc_state->uapi.mode_changed = true;
- fastset = false;
- }
-
return fastset;
}
@@ -3985,6 +3994,13 @@ intel_dp_has_sink_count(struct intel_dp *intel_dp)
&intel_dp->desc);
}
+void intel_dp_update_sink_caps(struct intel_dp *intel_dp)
+{
+ intel_dp_set_sink_rates(intel_dp);
+ intel_dp_set_max_sink_lane_count(intel_dp);
+ intel_dp_set_common_rates(intel_dp);
+}
+
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
@@ -4001,9 +4017,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
- intel_dp_set_sink_rates(intel_dp);
- intel_dp_set_max_sink_lane_count(intel_dp);
- intel_dp_set_common_rates(intel_dp);
+ intel_dp_update_sink_caps(intel_dp);
}
if (intel_dp_has_sink_count(intel_dp)) {
@@ -4113,73 +4127,6 @@ intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
return false;
}
-static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
- struct dp_sdp *sdp, size_t size)
-{
- size_t length = sizeof(struct dp_sdp);
-
- if (size < length)
- return -ENOSPC;
-
- memset(sdp, 0, size);
-
- /*
- * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
- * VSC SDP Header Bytes
- */
- sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
- sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
- sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
- sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
-
- if (vsc->revision == 0x6) {
- sdp->db[0] = 1;
- sdp->db[3] = 1;
- }
-
- /*
- * Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry
- * Format as per DP 1.4a spec and DP 2.0 respectively.
- */
- if (!(vsc->revision == 0x5 || vsc->revision == 0x7))
- goto out;
-
- /* VSC SDP Payload for DB16 through DB18 */
- /* Pixel Encoding and Colorimetry Formats */
- sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
- sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
-
- switch (vsc->bpc) {
- case 6:
- /* 6bpc: 0x0 */
- break;
- case 8:
- sdp->db[17] = 0x1; /* DB17[3:0] */
- break;
- case 10:
- sdp->db[17] = 0x2;
- break;
- case 12:
- sdp->db[17] = 0x3;
- break;
- case 16:
- sdp->db[17] = 0x4;
- break;
- default:
- MISSING_CASE(vsc->bpc);
- break;
- }
- /* Dynamic Range and Component Bit Depth */
- if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
- sdp->db[17] |= 0x80; /* DB17[7] */
-
- /* Content Type */
- sdp->db[18] = vsc->content_type & 0x7;
-
-out:
- return length;
-}
-
static ssize_t
intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
const struct hdmi_drm_infoframe *drm_infoframe,
@@ -4272,8 +4219,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
switch (type) {
case DP_SDP_VSC:
- len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
- sizeof(sdp));
+ len = drm_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp);
break;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv,
@@ -4291,24 +4237,6 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
}
-void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_dp_vsc_sdp *vsc)
-{
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct dp_sdp sdp = {};
- ssize_t len;
-
- len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
-
- if (drm_WARN_ON(&dev_priv->drm, len < 0))
- return;
-
- dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
- &sdp, len);
-}
-
void intel_dp_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
@@ -4335,9 +4263,7 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
if (!enable)
return;
- /* When PSR is enabled, VSC SDP is handled by PSR routine */
- if (!crtc_state->has_psr)
- intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
+ intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
}
@@ -4468,10 +4394,6 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
struct dp_sdp sdp = {};
int ret;
- /* When PSR is enabled, VSC SDP is handled by PSR routine */
- if (crtc_state->has_psr)
- return;
-
if ((crtc_state->infoframes.enable &
intel_hdmi_infoframe_enable(type)) == 0)
return;
@@ -4682,31 +4604,36 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
struct drm_dp_phy_test_params *data =
&intel_dp->compliance.test_data.phytest;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum pipe pipe = crtc->pipe;
u32 pattern_val;
switch (data->phy_pattern) {
- case DP_PHY_TEST_PATTERN_NONE:
+ case DP_LINK_QUAL_PATTERN_DISABLE:
drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
+ if (DISPLAY_VER(dev_priv) >= 10)
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
+ DP_TP_CTL_LINK_TRAIN_NORMAL);
break;
- case DP_PHY_TEST_PATTERN_D10_2:
+ case DP_LINK_QUAL_PATTERN_D10_2:
drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
break;
- case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+ case DP_LINK_QUAL_PATTERN_ERROR_RATE:
drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
DDI_DP_COMP_CTL_ENABLE |
DDI_DP_COMP_CTL_SCRAMBLED_0);
break;
- case DP_PHY_TEST_PATTERN_PRBS7:
+ case DP_LINK_QUAL_PATTERN_PRBS7:
drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
break;
- case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM:
/*
* FIXME: Ideally pattern should come from DPCD 0x250. As
* current firmware of DPR-100 could not set it, so hardcoding
@@ -4724,7 +4651,7 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
DDI_DP_COMP_CTL_ENABLE |
DDI_DP_COMP_CTL_CUSTOM80);
break;
- case DP_PHY_TEST_PATTERN_CP2520:
+ case DP_LINK_QUAL_PATTERN_CP2520_PAT_1:
/*
* FIXME: Ideally pattern should come from DPCD 0x24A. As
* current firmware of DPR-100 could not set it, so hardcoding
@@ -4736,8 +4663,19 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
pattern_val);
break;
+ case DP_LINK_QUAL_PATTERN_CP2520_PAT_3:
+ if (DISPLAY_VER(dev_priv) < 10) {
+ drm_warn(&dev_priv->drm, "Platform does not support TPS4\n");
+ break;
+ }
+ drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
+ DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4);
+ break;
default:
- WARN(1, "Invalid Phy Test Pattern\n");
+ drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n");
}
}
@@ -4902,13 +4840,15 @@ static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
* - %true if pending interrupts were serviced (or no interrupts were
* pending) w/o detecting an error condition.
* - %false if an error condition - like AUX failure or a loss of link - is
- * detected, which needs servicing from the hotplug work.
+ * detected, or another condition - like a DP tunnel BW state change - needs
+ * servicing from the hotplug work.
*/
static bool
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
bool link_ok = true;
+ bool reprobe_needed = false;
drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
@@ -4935,6 +4875,13 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
intel_dp_mst_hpd_irq(intel_dp, esi, ack);
+ if (esi[3] & DP_TUNNELING_IRQ) {
+ if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
+ &intel_dp->aux))
+ reprobe_needed = true;
+ ack[3] |= DP_TUNNELING_IRQ;
+ }
+
if (!memchr_inv(ack, 0, sizeof(ack)))
break;
@@ -4945,7 +4892,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
}
- return link_ok;
+ return link_ok && !reprobe_needed;
}
static void
@@ -5072,9 +5019,10 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
if (!crtc_state->hw.active)
continue;
- if (conn_state->commit &&
- !try_wait_for_completion(&conn_state->commit->hw_done))
- continue;
+ if (conn_state->commit)
+ drm_WARN_ON(&i915->drm,
+ !wait_for_completion_timeout(&conn_state->commit->hw_done,
+ msecs_to_jiffies(5000)));
*pipe_mask |= BIT(crtc->pipe);
}
@@ -5304,23 +5252,32 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
}
-static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
+static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ bool reprobe_needed = false;
u8 val;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
- return;
+ return false;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
- return;
+ return false;
+
+ if ((val & DP_TUNNELING_IRQ) &&
+ drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
+ &intel_dp->aux))
+ reprobe_needed = true;
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
- return;
+ return reprobe_needed;
if (val & HDMI_LINK_STATUS_CHANGED)
intel_dp_handle_hdmi_link_status_change(intel_dp);
+
+ return reprobe_needed;
}
/*
@@ -5341,6 +5298,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 old_sink_count = intel_dp->sink_count;
+ bool reprobe_needed = false;
bool ret;
/*
@@ -5363,7 +5321,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
}
intel_dp_check_device_service_irq(intel_dp);
- intel_dp_check_link_service_irq(intel_dp);
+ reprobe_needed = intel_dp_check_link_service_irq(intel_dp);
/* Handle CEC interrupts, if any */
drm_dp_cec_irq(&intel_dp->aux);
@@ -5390,10 +5348,10 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
* FIXME get rid of the ad-hoc phy test modeset code
* and properly incorporate it into the normal modeset.
*/
- return false;
+ reprobe_needed = true;
}
- return true;
+ return !reprobe_needed;
}
/* XXX this is probably wrong for multiple downstream ports */
@@ -5456,8 +5414,24 @@ edp_detect(struct intel_dp *intel_dp)
return connector_status_connected;
}
+void intel_digital_port_lock(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (dig_port->lock)
+ dig_port->lock(dig_port);
+}
+
+void intel_digital_port_unlock(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (dig_port->unlock)
+ dig_port->unlock(dig_port);
+}
+
/*
- * intel_digital_port_connected - is the specified port connected?
+ * intel_digital_port_connected_locked - is the specified port connected?
* @encoder: intel_encoder
*
* In cases where there's a connector physically connected but it can't be used
@@ -5465,21 +5439,44 @@ edp_detect(struct intel_dp *intel_dp)
* pretty much treat the port as disconnected. This is relevant for type-C
* (starting on ICL) where there's ownership involved.
*
+ * The caller must hold the lock acquired by calling intel_digital_port_lock()
+ * when calling this function.
+ *
* Return %true if port is connected, %false otherwise.
*/
-bool intel_digital_port_connected(struct intel_encoder *encoder)
+bool intel_digital_port_connected_locked(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port);
bool is_connected = false;
intel_wakeref_t wakeref;
- with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- is_connected = dig_port->connected(encoder);
+ with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
+ unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4);
+
+ do {
+ is_connected = dig_port->connected(encoder);
+ if (is_connected || is_glitch_free)
+ break;
+ usleep_range(10, 30);
+ } while (time_before(jiffies, wait_expires));
+ }
return is_connected;
}
+bool intel_digital_port_connected(struct intel_encoder *encoder)
+{
+ bool ret;
+
+ intel_digital_port_lock(encoder);
+ ret = intel_digital_port_connected_locked(encoder);
+ intel_digital_port_unlock(encoder);
+
+ return ret;
+}
+
static const struct drm_edid *
intel_dp_get_edid(struct intel_dp *intel_dp)
{
@@ -5664,6 +5661,7 @@ intel_dp_detect(struct drm_connector *connector,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
+ int ret;
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -5673,6 +5671,9 @@ intel_dp_detect(struct drm_connector *connector,
if (!intel_display_device_enabled(dev_priv))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(dev_priv))
+ return connector->status;
+
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
@@ -5696,9 +5697,21 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp->is_mst);
}
+ intel_dp_tunnel_disconnect(intel_dp);
+
goto out;
}
+ ret = intel_dp_tunnel_detect(intel_dp, ctx);
+ if (ret == -EDEADLK)
+ return ret;
+
+ if (ret == 1)
+ intel_connector->base.epoch_counter++;
+
+ if (!intel_dp_is_edp(intel_dp))
+ intel_psr_init_dpcd(intel_dp);
+
intel_dp_detect_dsc_caps(intel_dp, intel_connector);
intel_dp_configure_mst(intel_dp);
@@ -5729,8 +5742,6 @@ intel_dp_detect(struct drm_connector *connector,
* with an IRQ_HPD, so force a link status check.
*/
if (!intel_dp_is_edp(intel_dp)) {
- int ret;
-
ret = intel_dp_retrain_link(encoder, ctx);
if (ret)
return ret;
@@ -5773,6 +5784,10 @@ intel_dp_force(struct drm_connector *connector)
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+
+ if (!intel_display_driver_check_access(dev_priv))
+ return;
+
intel_dp_unset_edid(intel_dp);
if (connector->status != connector_status_connected)
@@ -5859,6 +5874,19 @@ intel_dp_connector_unregister(struct drm_connector *connector)
intel_connector_unregister(connector);
}
+void intel_dp_connector_sync_state(struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ if (crtc_state && crtc_state->dsc.compression_enable) {
+ drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
+ connector->dp.dsc_decompression_enabled = true;
+ } else {
+ connector->dp.dsc_decompression_enabled = false;
+ }
+}
+
void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
@@ -5866,6 +5894,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
intel_dp_mst_encoder_cleanup(dig_port);
+ intel_dp_tunnel_destroy(intel_dp);
+
intel_pps_vdd_off_sync(intel_dp);
/*
@@ -5882,6 +5912,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
intel_pps_vdd_off_sync(intel_dp);
+
+ intel_dp_tunnel_suspend(intel_dp);
}
void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
@@ -6019,6 +6051,15 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
return ret;
}
+ if (!intel_connector_needs_modeset(state, conn))
+ return 0;
+
+ ret = intel_dp_tunnel_atomic_check_state(state,
+ intel_dp,
+ intel_conn);
+ if (ret)
+ return ret;
+
/*
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
@@ -6026,9 +6067,6 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
if (DISPLAY_VER(dev_priv) < 9)
return 0;
- if (!intel_connector_needs_modeset(state, conn))
- return 0;
-
if (conn->has_tile) {
ret = intel_modeset_tile_group(state, conn->tile_group->id);
if (ret)
@@ -6057,7 +6095,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
spin_unlock_irq(&i915->irq_lock);
if (need_work)
- queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0);
+ intel_hpd_schedule_detection(i915);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -6085,6 +6123,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_dp *intel_dp = &dig_port->dp;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
(long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) {
@@ -6107,6 +6146,17 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
dig_port->base.base.name,
long_hpd ? "long" : "short");
+ /*
+ * TBT DP tunnels require the GFX driver to read out the DPRX caps in
+ * response to long HPD pulses. The DP hotplug handler does that,
+ * however the hotplug handler may be blocked by another
+ * connector's/encoder's hotplug handler. Since the TBT CM may not
+ * complete the DP tunnel BW request for the latter connector/encoder
+ * waiting for this encoder's DPRX read, perform a dummy read here.
+ */
+ if (long_hpd)
+ intel_dp_read_dprx_caps(intel_dp, dpcd);
+
if (long_hpd) {
intel_dp->reset_link_params = true;
return IRQ_NONE;
@@ -6427,6 +6477,14 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
mutex_unlock(&connector->dev->mode_config.mutex);
/* Send Hotplug uevent so userspace can reprobe */
drm_kms_helper_connector_hotplug_event(connector);
+
+ drm_connector_put(connector);
+}
+
+void intel_dp_init_modeset_retry_work(struct intel_connector *connector)
+{
+ INIT_WORK(&connector->modeset_retry_work,
+ intel_dp_modeset_retry_work_fn);
}
bool
@@ -6443,8 +6501,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
int type;
/* Initialize the work for modeset in case of link train failure */
- INIT_WORK(&intel_connector->modeset_retry_work,
- intel_dp_modeset_retry_work_fn);
+ intel_dp_init_modeset_retry_work(intel_connector);
if (drm_WARN(dev, dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
@@ -6500,6 +6557,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
connector->interlace_allowed = true;
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+ intel_connector->base.polled = intel_connector->polled;
intel_connector_attach_encoder(intel_connector, intel_encoder);
@@ -6530,6 +6588,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
"HDCP init failed, skipping.\n");
}
+ intel_dp->colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+
intel_dp->frl.is_trained = false;
intel_dp->frl.trained_rate_gbps = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 05db46b111f2..c540d3a73fe7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -43,8 +43,16 @@ void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
int intel_dp_min_bpp(enum intel_output_format output_format);
+void intel_dp_init_modeset_retry_work(struct intel_connector *connector);
+void intel_dp_queue_modeset_retry_work(struct intel_connector *connector);
+void
+intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
bool intel_dp_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
+void intel_dp_connector_sync_state(struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, int lane_count);
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
@@ -94,7 +102,11 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
+int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
+int intel_dp_max_common_rate(struct intel_dp *intel_dp);
+int intel_dp_max_common_lane_count(struct intel_dp *intel_dp);
+void intel_dp_update_sink_caps(struct intel_dp *intel_dp);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
@@ -105,24 +117,21 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
int bw_overhead);
-int intel_dp_max_data_rate(int max_link_rate, int max_lanes);
+int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
+ int max_dprx_rate, int max_dprx_lanes);
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
-void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state,
- struct drm_dp_vsc_sdp *vsc);
-void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_dp_vsc_sdp *vsc);
void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_read_dp_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
unsigned int type);
+void intel_digital_port_lock(struct intel_encoder *encoder);
+void intel_digital_port_unlock(struct intel_encoder *encoder);
bool intel_digital_port_connected(struct intel_encoder *encoder);
+bool intel_digital_port_connected_locked(struct intel_encoder *encoder);
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
u8 dsc_max_bpc);
u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 2e2af71bcd5a..4f4a0e3b3114 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -9,6 +9,7 @@
#include "intel_bios.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_dp_aux_regs.h"
#include "intel_pps.h"
@@ -228,9 +229,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u32 aux_send_ctl_flags)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain;
@@ -245,18 +245,16 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
- if (is_tc_port) {
- intel_tc_port_lock(dig_port);
- /*
- * Abort transfers on a disconnected port as required by
- * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
- * timeouts that would otherwise happen.
- * TODO: abort the transfer on non-TC ports as well.
- */
- if (!intel_tc_port_connected_locked(&dig_port->base)) {
- ret = -ENXIO;
- goto out_unlock;
- }
+ intel_digital_port_lock(encoder);
+ /*
+ * Abort transfers on a disconnected port as required by
+ * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
+ * timeouts that would otherwise happen.
+ */
+ if (!intel_dp_is_edp(intel_dp) &&
+ !intel_digital_port_connected_locked(&dig_port->base)) {
+ ret = -ENXIO;
+ goto out_unlock;
}
aux_domain = intel_aux_power_domain(dig_port);
@@ -423,8 +421,7 @@ out:
intel_pps_unlock(intel_dp, pps_wakeref);
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
out_unlock:
- if (is_tc_port)
- intel_tc_port_unlock(dig_port);
+ intel_digital_port_unlock(encoder);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 3a595cd433d4..b98a87883fef 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -36,8 +36,10 @@ static u32 transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
}
}
-static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
+static void intel_dp_hdcp_wait_for_cp_irq(struct intel_connector *connector,
+ int timeout)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
long ret;
#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
@@ -45,7 +47,8 @@ static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
msecs_to_jiffies(timeout));
if (!ret)
- DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
+ drm_dbg_kms(connector->base.dev,
+ "Timedout at waiting for CP_IRQ\n");
}
static
@@ -122,13 +125,13 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
}
static
-int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port,
+int intel_dp_hdcp_read_bcaps(struct drm_dp_aux *aux,
+ struct drm_i915_private *i915,
u8 *bcaps)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
- ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
+ ret = drm_dp_dpcd_read(aux, DP_AUX_HDCP_BCAPS,
bcaps, 1);
if (ret != 1) {
drm_dbg_kms(&i915->drm,
@@ -143,10 +146,11 @@ static
int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
bool *repeater_present)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
u8 bcaps;
- ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
+ ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps);
if (ret)
return ret;
@@ -265,13 +269,14 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port,
}
static
-int intel_dp_hdcp_capable(struct intel_digital_port *dig_port,
- bool *hdcp_capable)
+int intel_dp_hdcp_get_capability(struct intel_digital_port *dig_port,
+ bool *hdcp_capable)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
u8 bcaps;
- ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
+ ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps);
if (ret)
return ret;
@@ -330,23 +335,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
0, 0 },
};
-static struct drm_dp_aux *
-intel_dp_hdcp_get_aux(struct intel_connector *connector)
-{
- struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
-
- if (intel_encoder_is_mst(connector->encoder))
- return &connector->port->aux;
- else
- return &dig_port->dp.aux;
-}
-
static int
intel_dp_hdcp2_read_rx_status(struct intel_connector *connector,
u8 *rx_status)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
ssize_t ret;
ret = drm_dp_dpcd_read(aux,
@@ -387,7 +382,8 @@ int hdcp2_detect_msg_availability(struct intel_connector *connector,
*msg_ready = true;
break;
default:
- DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
+ drm_err(connector->base.dev,
+ "Unidentified msg_id: %d\n", msg_id);
return -EINVAL;
}
@@ -399,7 +395,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector,
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_hdcp *hdcp = &connector->hdcp;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
u8 msg_id = hdcp2_msg_data->msg_id;
int ret, timeout;
bool msg_ready = false;
@@ -421,7 +419,7 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector,
* As we want to check the msg availability at timeout, Ignoring
* the timeout at wait for CP_IRQ.
*/
- intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
+ intel_dp_hdcp_wait_for_cp_irq(connector, timeout);
ret = hdcp2_detect_msg_availability(connector, msg_id,
&msg_ready);
if (!msg_ready)
@@ -454,8 +452,9 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_write, len;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
const struct hdcp2_dp_msg_data *hdcp2_msg_data;
- struct drm_dp_aux *aux;
hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
if (!hdcp2_msg_data)
@@ -463,8 +462,6 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
offset = hdcp2_msg_data->offset;
- aux = intel_dp_hdcp_get_aux(connector);
-
/* No msg_id in DP HDCP2.2 msgs */
bytes_to_write = size - 1;
byte++;
@@ -490,7 +487,8 @@ static
ssize_t get_receiver_id_list_rx_info(struct intel_connector *connector,
u32 *dev_cnt, u8 *byte)
{
- struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
ssize_t ret;
u8 *rx_info = byte;
@@ -515,8 +513,9 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_dp_aux *aux;
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
@@ -530,8 +529,6 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
return -EINVAL;
offset = hdcp2_msg_data->offset;
- aux = intel_dp_hdcp_get_aux(connector);
-
ret = intel_dp_hdcp2_wait_for_msg(connector, hdcp2_msg_data);
if (ret < 0)
return ret;
@@ -561,13 +558,8 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
/* Entire msg read timeout since initiate of msg read */
if (bytes_to_recv == size - 1 && hdcp2_msg_data->msg_read_timeout > 0) {
- if (intel_encoder_is_mst(connector->encoder))
- msg_end = ktime_add_ms(ktime_get_raw(),
- hdcp2_msg_data->msg_read_timeout *
- connector->port->parent->num_ports);
- else
- msg_end = ktime_add_ms(ktime_get_raw(),
- hdcp2_msg_data->msg_read_timeout);
+ msg_end = ktime_add_ms(ktime_get_raw(),
+ hdcp2_msg_data->msg_read_timeout);
}
ret = drm_dp_dpcd_read(aux, offset,
@@ -648,25 +640,69 @@ int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port,
}
static
-int intel_dp_hdcp2_capable(struct intel_connector *connector,
- bool *capable)
+int _intel_dp_hdcp2_get_capability(struct drm_dp_aux *aux,
+ bool *capable)
{
- struct drm_dp_aux *aux;
u8 rx_caps[3];
+ int ret, i;
+
+ *capable = false;
+
+ /*
+ * Some HDCP monitors act really shady by not giving the correct hdcp
+ * capability on the first rx_caps read and usually take an extra read
+ * to give the capability. We read rx_caps three times before we
+ * declare a monitor not capable of HDCP 2.2.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = drm_dp_dpcd_read(aux,
+ DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
+ rx_caps, HDCP_2_2_RXCAPS_LEN);
+ if (ret != HDCP_2_2_RXCAPS_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
+ HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) {
+ *capable = true;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static
+int intel_dp_hdcp2_get_capability(struct intel_connector *connector,
+ bool *capable)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
+
+ return _intel_dp_hdcp2_get_capability(aux, capable);
+}
+
+static
+int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
+ bool *hdcp_capable,
+ bool *hdcp2_capable)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_dp_aux *aux = &connector->port->aux;
+ u8 bcaps;
int ret;
- aux = intel_dp_hdcp_get_aux(connector);
+ if (!intel_encoder_is_mst(connector->encoder))
+ return -EINVAL;
- *capable = false;
- ret = drm_dp_dpcd_read(aux,
- DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
- rx_caps, HDCP_2_2_RXCAPS_LEN);
- if (ret != HDCP_2_2_RXCAPS_LEN)
- return ret >= 0 ? -EIO : ret;
+ ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
+ if (ret)
+ return ret;
+
+ ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps);
+ if (ret)
+ return ret;
- if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
- HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
- *capable = true;
+ *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
return 0;
}
@@ -682,12 +718,12 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
.toggle_signalling = intel_dp_hdcp_toggle_signalling,
.check_link = intel_dp_hdcp_check_link,
- .hdcp_capable = intel_dp_hdcp_capable,
+ .hdcp_get_capability = intel_dp_hdcp_get_capability,
.write_2_2_msg = intel_dp_hdcp2_write_msg,
.read_2_2_msg = intel_dp_hdcp2_read_msg,
.config_stream_type = intel_dp_hdcp2_config_stream_type,
.check_2_2_link = intel_dp_hdcp2_check_link,
- .hdcp_2_2_capable = intel_dp_hdcp2_capable,
+ .hdcp_2_2_get_capability = intel_dp_hdcp2_get_capability,
.protocol = HDCP_PROTOCOL_DP,
};
@@ -812,13 +848,14 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.toggle_signalling = intel_dp_hdcp_toggle_signalling,
.stream_encryption = intel_dp_mst_hdcp_stream_encryption,
.check_link = intel_dp_hdcp_check_link,
- .hdcp_capable = intel_dp_hdcp_capable,
+ .hdcp_get_capability = intel_dp_hdcp_get_capability,
.write_2_2_msg = intel_dp_hdcp2_write_msg,
.read_2_2_msg = intel_dp_hdcp2_read_msg,
.config_stream_type = intel_dp_hdcp2_config_stream_type,
.stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption,
.check_2_2_link = intel_dp_mst_hdcp2_check_link,
- .hdcp_2_2_capable = intel_dp_hdcp2_capable,
+ .hdcp_2_2_get_capability = intel_dp_hdcp2_get_capability,
+ .get_remote_hdcp_capability = intel_dp_hdcp_get_remote_capability,
.protocol = HDCP_PROTOCOL_DP,
};
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 1abfafbbfa75..fb84ca98bb7a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -162,6 +162,28 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
return lttpr_count;
}
+int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (intel_dp_is_edp(intel_dp))
+ return 0;
+
+ /*
+ * Detecting LTTPRs must be avoided on platforms with an AUX timeout
+ * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
+ */
+ if (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))
+ if (drm_dp_dpcd_probe(&intel_dp->aux,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
+ return -EIO;
+
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
+ return -EIO;
+
+ return 0;
+}
+
/**
* intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
* @intel_dp: Intel DP struct
@@ -192,12 +214,10 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp) &&
(DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) {
u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ int err = intel_dp_read_dprx_caps(intel_dp, dpcd);
- if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
- return -EIO;
-
- if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
- return -EIO;
+ if (err != 0)
+ return err;
lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd);
}
@@ -1075,7 +1095,6 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) {
lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n");
@@ -1093,7 +1112,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
}
/* Schedule a Hotplug Uevent to userspace to start modeset */
- queue_work(i915->unordered_wq, &intel_connector->modeset_retry_work);
+ intel_dp_queue_modeset_retry_work(intel_connector);
}
/* Perform the link training on all LTTPRs and the DPRX on a link. */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 2c8f2775891b..19836a8a4f90 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -11,6 +11,7 @@
struct intel_crtc_state;
struct intel_dp;
+int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]);
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp);
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 8a9432335030..53aec023ce92 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -37,10 +37,12 @@
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
#include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@@ -522,6 +524,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
const struct intel_connector *connector =
@@ -618,7 +621,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
- return 0;
+ return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector,
+ pipe_config);
}
/*
@@ -875,6 +879,14 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
if (ret)
return ret;
+ if (intel_connector_needs_modeset(state, connector)) {
+ ret = intel_dp_tunnel_atomic_check_state(state,
+ intel_connector->mst_port,
+ intel_connector);
+ if (ret)
+ return ret;
+ }
+
return drm_dp_atomic_release_time_slots(&state->base,
&intel_connector->mst_port->mst_mgr,
intel_connector->port);
@@ -1196,6 +1208,7 @@ static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
struct intel_dp *intel_dp = intel_connector->mst_port;
const struct drm_edid *drm_edid;
int ret;
@@ -1203,6 +1216,9 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
if (drm_connector_is_unregistered(connector))
return intel_connector_update_modes(connector, NULL);
+ if (!intel_display_driver_check_access(i915))
+ return drm_edid_connector_add_modes(connector);
+
drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port);
ret = intel_connector_update_modes(connector, drm_edid);
@@ -1294,7 +1310,8 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
- max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+ max_rate = intel_dp_max_link_data_rate(intel_dp,
+ max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(mode->clock, min_bpp);
ret = drm_modeset_lock(&mgr->base.lock, ctx);
@@ -1410,6 +1427,9 @@ intel_dp_mst_detect(struct drm_connector *connector,
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(i915))
+ return connector->status;
+
return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
intel_connector->port);
}
@@ -1534,10 +1554,13 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
return NULL;
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ intel_connector->sync_state = intel_dp_connector_sync_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
drm_dp_mst_get_port_malloc(port);
+ intel_dp_init_modeset_retry_work(intel_connector);
+
intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
intel_connector->dp.dsc_hblank_expansion_quirk =
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
new file mode 100644
index 000000000000..75d76f91ecbd
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include <drm/display/drm_dp_tunnel.h>
+
+#include "intel_atomic.h"
+#include "intel_display_limits.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_dp_link_training.h"
+#include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
+#include "intel_link_bw.h"
+
+struct intel_dp_tunnel_inherited_state {
+ struct drm_dp_tunnel_ref ref[I915_MAX_PIPES];
+};
+
+/**
+ * intel_dp_tunnel_disconnect - Disconnect a DP tunnel from a port
+ * @intel_dp: DP port object the tunnel is connected to
+ *
+ * Disconnect a DP tunnel from @intel_dp, destroying any related state. This
+ * should be called after detecting a sink-disconnect event from the port.
+ */
+void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp)
+{
+ drm_dp_tunnel_destroy(intel_dp->tunnel);
+ intel_dp->tunnel = NULL;
+}
+
+/**
+ * intel_dp_tunnel_destroy - Destroy a DP tunnel
+ * @intel_dp: DP port object the tunnel is connected to
+ *
+ * Destroy a DP tunnel connected to @intel_dp, after disabling the BW
+ * allocation mode on the tunnel. This should be called while destroying the
+ * port.
+ */
+void intel_dp_tunnel_destroy(struct intel_dp *intel_dp)
+{
+ if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+ drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
+
+ intel_dp_tunnel_disconnect(intel_dp);
+}
+
+static int kbytes_to_mbits(int kbytes)
+{
+ return DIV_ROUND_UP(kbytes * 8, 1000);
+}
+
+static int get_current_link_bw(struct intel_dp *intel_dp,
+ bool *below_dprx_bw)
+{
+ int rate = intel_dp_max_common_rate(intel_dp);
+ int lane_count = intel_dp_max_common_lane_count(intel_dp);
+ int bw;
+
+ bw = intel_dp_max_link_data_rate(intel_dp, rate, lane_count);
+ *below_dprx_bw = bw < drm_dp_max_dprx_data_rate(rate, lane_count);
+
+ return bw;
+}
+
+static int update_tunnel_state(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ bool old_bw_below_dprx;
+ bool new_bw_below_dprx;
+ int old_bw;
+ int new_bw;
+ int ret;
+
+ old_bw = get_current_link_bw(intel_dp, &old_bw_below_dprx);
+
+ ret = drm_dp_tunnel_update_state(intel_dp->tunnel);
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm,
+ "[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n",
+ drm_dp_tunnel_name(intel_dp->tunnel),
+ encoder->base.base.id, encoder->base.name,
+ ERR_PTR(ret));
+
+ return ret;
+ }
+
+ if (ret == 0 ||
+ !drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel))
+ return 0;
+
+ intel_dp_update_sink_caps(intel_dp);
+
+ new_bw = get_current_link_bw(intel_dp, &new_bw_below_dprx);
+
+ /* Suppress the notification if the mode list can't change due to bw. */
+ if (old_bw_below_dprx == new_bw_below_dprx &&
+ !new_bw_below_dprx)
+ return 0;
+
+ drm_dbg_kms(&i915->drm,
+ "[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
+ drm_dp_tunnel_name(intel_dp->tunnel),
+ encoder->base.base.id, encoder->base.name,
+ kbytes_to_mbits(old_bw), kbytes_to_mbits(new_bw));
+
+ return 1;
+}
+
+/*
+ * Allocate the BW for a tunnel on a DP connector/port if the connector/port
+ * was already active when detecting the tunnel. The allocated BW must be
+ * freed by the next atomic modeset, storing the BW in the
+ * intel_atomic_state::inherited_dp_tunnels, and calling
+ * intel_dp_tunnel_atomic_free_bw().
+ */
+static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_crtc *crtc;
+ int tunnel_bw = 0;
+ int err;
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int stream_bw = intel_dp_config_required_rate(crtc_state);
+
+ tunnel_bw += stream_bw;
+
+ drm_dbg_kms(&i915->drm,
+ "[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n",
+ drm_dp_tunnel_name(intel_dp->tunnel),
+ encoder->base.base.id, encoder->base.name,
+ crtc->base.base.id, crtc->base.name,
+ crtc->pipe,
+ kbytes_to_mbits(stream_bw), kbytes_to_mbits(tunnel_bw));
+ }
+
+ err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw);
+ if (err) {
+ drm_dbg_kms(&i915->drm,
+ "[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n",
+ drm_dp_tunnel_name(intel_dp->tunnel),
+ encoder->base.base.id, encoder->base.name,
+ ERR_PTR(err));
+
+ return err;
+ }
+
+ return update_tunnel_state(intel_dp);
+}
+
+static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ u8 pipe_mask;
+ int err;
+
+ err = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
+ if (err)
+ return err;
+
+ return allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
+}
+
+static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_dp_tunnel *tunnel;
+ int ret;
+
+ tunnel = drm_dp_tunnel_detect(i915->display.dp_tunnel_mgr,
+ &intel_dp->aux);
+ if (IS_ERR(tunnel))
+ return PTR_ERR(tunnel);
+
+ intel_dp->tunnel = tunnel;
+
+ ret = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
+ if (ret) {
+ if (ret == -EOPNOTSUPP)
+ return 0;
+
+ drm_dbg_kms(&i915->drm,
+ "[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n",
+ drm_dp_tunnel_name(intel_dp->tunnel),
+ encoder->base.base.id, encoder->base.name,
+ ERR_PTR(ret));
+
+ /* Keep the tunnel with BWA disabled */
+ return 0;
+ }
+
+ ret = allocate_initial_tunnel_bw(intel_dp, ctx);
+ if (ret < 0)
+ intel_dp_tunnel_destroy(intel_dp);
+
+ return ret;
+}
+
+/**
+ * intel_dp_tunnel_detect - Detect a DP tunnel on a port
+ * @intel_dp: DP port object
+ * @ctx: lock context acquired by the connector detection handler
+ *
+ * Detect a DP tunnel on the @intel_dp port, enabling the BW allocation mode
+ * on it if supported and allocating the BW required on an already active port.
+ * The BW allocated this way must be freed by the next atomic modeset calling
+ * intel_dp_tunnel_atomic_free_bw().
+ *
+ * If @intel_dp has already a tunnel detected on it, update the tunnel's state
+ * wrt. its support for BW allocation mode and the available BW via the
+ * tunnel. If the tunnel's state change requires this - for instance the
+ * tunnel's group ID has changed - the tunnel will be dropped and recreated.
+ *
+ * Return 0 in case of success - after any tunnel detected and added to
+ * @intel_dp - 1 in case the BW on an already existing tunnel has changed in a
+ * way that requires notifying user space.
+ */
+int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
+{
+ int ret;
+
+ if (intel_dp_is_edp(intel_dp))
+ return 0;
+
+ if (intel_dp->tunnel) {
+ ret = update_tunnel_state(intel_dp);
+ if (ret >= 0)
+ return ret;
+
+ /* Try to recreate the tunnel after an update error. */
+ intel_dp_tunnel_destroy(intel_dp);
+ }
+
+ return detect_new_tunnel(intel_dp, ctx);
+}
+
+/**
+ * intel_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation support on a tunnel
+ * @intel_dp: DP port object
+ *
+ * Query whether a DP tunnel is connected on @intel_dp and the tunnel supports
+ * the BW allocation mode.
+ *
+ * Returns %true if the BW allocation mode is supported on @intel_dp.
+ */
+bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
+{
+ return drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel);
+}
+
+/**
+ * intel_dp_tunnel_suspend - Suspend a DP tunnel connected on a port
+ * @intel_dp: DP port object
+ *
+ * Suspend a DP tunnel on @intel_dp with BW allocation mode enabled on it.
+ */
+void intel_dp_tunnel_suspend(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+ return;
+
+ drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
+ drm_dp_tunnel_name(intel_dp->tunnel),
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name);
+
+ drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
+
+ intel_dp->tunnel_suspended = true;
+}
+
+/**
+ * intel_dp_tunnel_resume - Resume a DP tunnel connected on a port
+ * @intel_dp: DP port object
+ * @crtc_state: CRTC state
+ * @dpcd_updated: the DPCD DPRX capabilities got updated during resume
+ *
+ * Resume a DP tunnel on @intel_dp with BW allocation mode enabled on it.
+ */
+void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool dpcd_updated)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 pipe_mask;
+ int err = 0;
+
+ if (!intel_dp->tunnel_suspended)
+ return;
+
+ intel_dp->tunnel_suspended = false;
+
+ drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
+ drm_dp_tunnel_name(intel_dp->tunnel),
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name);
+
+ /*
+ * The TBT Connection Manager requires the GFX driver to read out
+ * the sink's DPRX caps to be able to service any BW requests later.
+ * During resume overriding the caps in @intel_dp cached before
+ * suspend must be avoided, so do here only a dummy read, unless the
+ * capabilities were updated already during resume.
+ */
+ if (!dpcd_updated) {
+ err = intel_dp_read_dprx_caps(intel_dp, dpcd);
+
+ if (err) {
+ drm_dp_tunnel_set_io_error(intel_dp->tunnel);
+ goto out_err;
+ }
+ }
+
+ err = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
+ if (err)
+ goto out_err;
+
+ pipe_mask = 0;
+ if (crtc_state) {
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ /* TODO: Add support for MST */
+ pipe_mask |= BIT(crtc->pipe);
+ }
+
+ err = allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
+ if (err < 0)
+ goto out_err;
+
+ return;
+
+out_err:
+ drm_dbg_kms(&i915->drm,
+ "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and redect it (err %pe)\n",
+ drm_dp_tunnel_name(intel_dp->tunnel),
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ ERR_PTR(err));
+}
+
+static struct drm_dp_tunnel *
+get_inherited_tunnel(struct intel_atomic_state *state, struct intel_crtc *crtc)
+{
+ if (!state->inherited_dp_tunnels)
+ return NULL;
+
+ return state->inherited_dp_tunnels->ref[crtc->pipe].tunnel;
+}
+
+static int
+add_inherited_tunnel(struct intel_atomic_state *state,
+ struct drm_dp_tunnel *tunnel,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct drm_dp_tunnel *old_tunnel;
+
+ old_tunnel = get_inherited_tunnel(state, crtc);
+ if (old_tunnel) {
+ drm_WARN_ON(&i915->drm, old_tunnel != tunnel);
+ return 0;
+ }
+
+ if (!state->inherited_dp_tunnels) {
+ state->inherited_dp_tunnels = kzalloc(sizeof(*state->inherited_dp_tunnels),
+ GFP_KERNEL);
+ if (!state->inherited_dp_tunnels)
+ return -ENOMEM;
+ }
+
+ drm_dp_tunnel_ref_get(tunnel, &state->inherited_dp_tunnels->ref[crtc->pipe]);
+
+ return 0;
+}
+
+static int check_inherited_tunnel_state(struct intel_atomic_state *state,
+ struct intel_dp *intel_dp,
+ const struct intel_digital_connector_state *old_conn_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_connector *connector =
+ to_intel_connector(old_conn_state->base.connector);
+ struct intel_crtc *old_crtc;
+ const struct intel_crtc_state *old_crtc_state;
+
+ /*
+ * If a BWA tunnel gets detected only after the corresponding
+ * connector got enabled already without a BWA tunnel, or a different
+ * BWA tunnel (which was removed meanwhile) the old CRTC state won't
+ * contain the state of the current tunnel. This tunnel still has a
+ * reserved BW, which needs to be released, add the state for such
+ * inherited tunnels separately only to this atomic state.
+ */
+ if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+ return 0;
+
+ if (!old_conn_state->base.crtc)
+ return 0;
+
+ old_crtc = to_intel_crtc(old_conn_state->base.crtc);
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, old_crtc);
+
+ if (!old_crtc_state->hw.active ||
+ old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel)
+ return 0;
+
+ drm_dbg_kms(&i915->drm,
+ "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n",
+ drm_dp_tunnel_name(intel_dp->tunnel),
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ old_crtc->base.base.id, old_crtc->base.name,
+ intel_dp->tunnel);
+
+ return add_inherited_tunnel(state, intel_dp->tunnel, old_crtc);
+}
+
+/**
+ * intel_dp_tunnel_atomic_cleanup_inherited_state - Free any inherited DP tunnel state
+ * @state: Atomic state
+ *
+ * Free the inherited DP tunnel state in @state.
+ */
+void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state)
+{
+ enum pipe pipe;
+
+ if (!state->inherited_dp_tunnels)
+ return;
+
+ for_each_pipe(to_i915(state->base.dev), pipe)
+ if (state->inherited_dp_tunnels->ref[pipe].tunnel)
+ drm_dp_tunnel_ref_put(&state->inherited_dp_tunnels->ref[pipe]);
+
+ kfree(state->inherited_dp_tunnels);
+ state->inherited_dp_tunnels = NULL;
+}
+
+static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state,
+ struct drm_dp_tunnel *tunnel)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ u32 pipe_mask;
+ int err;
+
+ err = drm_dp_tunnel_atomic_get_group_streams_in_state(&state->base,
+ tunnel, &pipe_mask);
+ if (err)
+ return err;
+
+ drm_WARN_ON(&i915->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
+
+ return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask);
+}
+
+/**
+ * intel_dp_tunnel_atomic_add_state_for_crtc - Add CRTC specific DP tunnel state
+ * @state: Atomic state
+ * @crtc: CRTC to add the tunnel state for
+ *
+ * Add the DP tunnel state for @crtc if the CRTC (aka DP tunnel stream) is enabled
+ * via a DP tunnel.
+ *
+ * Return 0 in case of success, a negative error code otherwise.
+ */
+int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_dp_tunnel_state *tunnel_state;
+ struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel;
+
+ if (!tunnel)
+ return 0;
+
+ tunnel_state = drm_dp_tunnel_atomic_get_state(&state->base, tunnel);
+ if (IS_ERR(tunnel_state))
+ return PTR_ERR(tunnel_state);
+
+ return 0;
+}
+
+static int check_group_state(struct intel_atomic_state *state,
+ struct intel_dp *intel_dp,
+ struct intel_connector *connector,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->dp_tunnel_ref.tunnel)
+ return 0;
+
+ drm_dbg_kms(&i915->drm,
+ "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n",
+ drm_dp_tunnel_name(intel_dp->tunnel),
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ crtc->base.base.id, crtc->base.name,
+ crtc_state->dp_tunnel_ref.tunnel);
+
+ return intel_dp_tunnel_atomic_add_group_state(state, crtc_state->dp_tunnel_ref.tunnel);
+}
+
+/**
+ * intel_dp_tunnel_atomic_check_state - Check a connector's DP tunnel specific state
+ * @state: Atomic state
+ * @intel_dp: DP port object
+ * @connector: connector using @intel_dp
+ *
+ * Check and add the DP tunnel atomic state for @intel_dp/@connector to
+ * @state, if there is a DP tunnel detected on @intel_dp with BW allocation
+ * mode enabled on it, or if @intel_dp/@connector was previously enabled via a
+ * DP tunnel.
+ *
+ * Returns 0 in case of success, or a negative error code otherwise.
+ */
+int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
+ struct intel_dp *intel_dp,
+ struct intel_connector *connector)
+{
+ const struct intel_digital_connector_state *old_conn_state =
+ intel_atomic_get_old_connector_state(state, connector);
+ const struct intel_digital_connector_state *new_conn_state =
+ intel_atomic_get_new_connector_state(state, connector);
+ int err;
+
+ if (old_conn_state->base.crtc) {
+ err = check_group_state(state, intel_dp, connector,
+ to_intel_crtc(old_conn_state->base.crtc));
+ if (err)
+ return err;
+ }
+
+ if (new_conn_state->base.crtc &&
+ new_conn_state->base.crtc != old_conn_state->base.crtc) {
+ err = check_group_state(state, intel_dp, connector,
+ to_intel_crtc(new_conn_state->base.crtc));
+ if (err)
+ return err;
+ }
+
+ return check_inherited_tunnel_state(state, intel_dp, old_conn_state);
+}
+
+/**
+ * intel_dp_tunnel_atomic_compute_stream_bw - Compute the BW required by a DP tunnel stream
+ * @state: Atomic state
+ * @intel_dp: DP object
+ * @connector: connector using @intel_dp
+ * @crtc_state: state of CRTC of the given DP tunnel stream
+ *
+ * Compute the required BW of CRTC (aka DP tunnel stream), storing this BW to
+ * the DP tunnel state containing the stream in @state. Before re-calculating a
+ * BW requirement in the crtc_state state the old BW requirement computed by this
+ * function must be cleared by calling intel_dp_tunnel_atomic_clear_stream_bw().
+ *
+ * Returns 0 in case of success, a negative error code otherwise.
+ */
+int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
+ struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int required_rate = intel_dp_config_required_rate(crtc_state);
+ int ret;
+
+ if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+ return 0;
+
+ drm_dbg_kms(&i915->drm,
+ "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n",
+ drm_dp_tunnel_name(intel_dp->tunnel),
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ crtc->base.base.id, crtc->base.name,
+ crtc->pipe,
+ kbytes_to_mbits(required_rate));
+
+ ret = drm_dp_tunnel_atomic_set_stream_bw(&state->base, intel_dp->tunnel,
+ crtc->pipe, required_rate);
+ if (ret < 0)
+ return ret;
+
+ drm_dp_tunnel_ref_get(intel_dp->tunnel,
+ &crtc_state->dp_tunnel_ref);
+
+ return 0;
+}
+
+/**
+ * intel_dp_tunnel_atomic_clear_stream_bw - Clear any DP tunnel stream BW requirement
+ * @state: Atomic state
+ * @crtc_state: state of CRTC of the given DP tunnel stream
+ *
+ * Clear any DP tunnel stream BW requirement set by
+ * intel_dp_tunnel_atomic_compute_stream_bw().
+ */
+void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (!crtc_state->dp_tunnel_ref.tunnel)
+ return;
+
+ drm_dp_tunnel_atomic_set_stream_bw(&state->base,
+ crtc_state->dp_tunnel_ref.tunnel,
+ crtc->pipe, 0);
+ drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref);
+}
+
+/**
+ * intel_dp_tunnel_atomic_check_link - Check the DP tunnel atomic state
+ * @state: intel atomic state
+ * @limits: link BW limits
+ *
+ * Check the link configuration for all DP tunnels in @state. If the
+ * configuration is invalid @limits will be updated if possible to
+ * reduce the total BW, after which the configuration for all CRTCs in
+ * @state must be recomputed with the updated @limits.
+ *
+ * Returns:
+ * - 0 if the confugration is valid
+ * - %-EAGAIN, if the configuration is invalid and @limits got updated
+ * with fallback values with which the configuration of all CRTCs in
+ * @state must be recomputed
+ * - Other negative error, if the configuration is invalid without a
+ * fallback possibility, or the check failed for another reason
+ */
+int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits)
+{
+ u32 failed_stream_mask;
+ int err;
+
+ err = drm_dp_tunnel_atomic_check_stream_bws(&state->base,
+ &failed_stream_mask);
+ if (err != -ENOSPC)
+ return err;
+
+ err = intel_link_bw_reduce_bpp(state, limits,
+ failed_stream_mask, "DP tunnel link BW");
+
+ return err ? : -EAGAIN;
+}
+
+static void atomic_decrease_bw(struct intel_atomic_state *state)
+{
+ struct intel_crtc *crtc;
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ const struct drm_dp_tunnel_state *new_tunnel_state;
+ struct drm_dp_tunnel *tunnel;
+ int old_bw;
+ int new_bw;
+
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ tunnel = get_inherited_tunnel(state, crtc);
+ if (!tunnel)
+ tunnel = old_crtc_state->dp_tunnel_ref.tunnel;
+
+ if (!tunnel)
+ continue;
+
+ old_bw = drm_dp_tunnel_get_allocated_bw(tunnel);
+
+ new_tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
+ new_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);
+
+ if (new_bw >= old_bw)
+ continue;
+
+ drm_dp_tunnel_alloc_bw(tunnel, new_bw);
+ }
+}
+
+static void queue_retry_work(struct intel_atomic_state *state,
+ struct drm_dp_tunnel *tunnel,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_encoder *encoder;
+
+ encoder = intel_get_crtc_new_encoder(state, crtc_state);
+
+ if (!intel_digital_port_connected(encoder))
+ return;
+
+ drm_dbg_kms(&i915->drm,
+ "[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n",
+ drm_dp_tunnel_name(tunnel),
+ encoder->base.base.id,
+ encoder->base.name);
+
+ intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
+}
+
+static void atomic_increase_bw(struct intel_atomic_state *state)
+{
+ struct intel_crtc *crtc;
+ const struct intel_crtc_state *crtc_state;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ struct drm_dp_tunnel_state *tunnel_state;
+ struct drm_dp_tunnel *tunnel = crtc_state->dp_tunnel_ref.tunnel;
+ int bw;
+
+ if (!intel_crtc_needs_modeset(crtc_state))
+ continue;
+
+ if (!tunnel)
+ continue;
+
+ tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
+
+ bw = drm_dp_tunnel_atomic_get_required_bw(tunnel_state);
+
+ if (drm_dp_tunnel_alloc_bw(tunnel, bw) != 0)
+ queue_retry_work(state, tunnel, crtc_state);
+ }
+}
+
+/**
+ * intel_dp_tunnel_atomic_alloc_bw - Allocate the BW for all modeset tunnels
+ * @state: Atomic state
+ *
+ * Allocate the required BW for all tunnels in @state.
+ */
+void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
+{
+ atomic_decrease_bw(state);
+ atomic_increase_bw(state);
+}
+
+/**
+ * intel_dp_tunnel_mgr_init - Initialize the DP tunnel manager
+ * @i915: i915 device object
+ *
+ * Initialize the DP tunnel manager. The tunnel manager will support the
+ * detection/management of DP tunnels on all DP connectors, so the function
+ * must be called after all these connectors have been registered already.
+ *
+ * Return 0 in case of success, a negative error code otherwise.
+ */
+int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
+{
+ struct drm_dp_tunnel_mgr *tunnel_mgr;
+ struct drm_connector_list_iter connector_list_iter;
+ struct intel_connector *connector;
+ int dp_connectors = 0;
+
+ drm_connector_list_iter_begin(&i915->drm, &connector_list_iter);
+ for_each_intel_connector_iter(connector, &connector_list_iter) {
+ if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ dp_connectors++;
+ }
+ drm_connector_list_iter_end(&connector_list_iter);
+
+ tunnel_mgr = drm_dp_tunnel_mgr_create(&i915->drm, dp_connectors);
+ if (IS_ERR(tunnel_mgr))
+ return PTR_ERR(tunnel_mgr);
+
+ i915->display.dp_tunnel_mgr = tunnel_mgr;
+
+ return 0;
+}
+
+/**
+ * intel_dp_tunnel_mgr_cleanup - Clean up the DP tunnel manager state
+ * @i915: i915 device object
+ *
+ * Clean up the DP tunnel manager state.
+ */
+void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915)
+{
+ drm_dp_tunnel_mgr_destroy(i915->display.dp_tunnel_mgr);
+ i915->display.dp_tunnel_mgr = NULL;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
new file mode 100644
index 000000000000..08b2cba84af2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_TUNNEL_H__
+#define __INTEL_DP_TUNNEL_H__
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_connector_state;
+struct drm_modeset_acquire_ctx;
+
+struct intel_atomic_state;
+struct intel_connector;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+struct intel_link_bw_limits;
+
+#if defined(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915)
+
+int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx);
+void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp);
+void intel_dp_tunnel_destroy(struct intel_dp *intel_dp);
+void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool dpcd_updated);
+void intel_dp_tunnel_suspend(struct intel_dp *intel_dp);
+
+bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp);
+
+void
+intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state);
+
+int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
+ struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ struct intel_crtc_state *crtc_state);
+void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
+ struct intel_crtc_state *crtc_state);
+
+int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits);
+int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
+ struct intel_dp *intel_dp,
+ struct intel_connector *connector);
+
+void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state);
+
+int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915);
+void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915);
+
+#else
+
+static inline int
+intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp) {}
+static inline void intel_dp_tunnel_destroy(struct intel_dp *intel_dp) {}
+static inline void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool dpcd_updated) {}
+static inline void intel_dp_tunnel_suspend(struct intel_dp *intel_dp) {}
+
+static inline bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
+{
+ return false;
+}
+
+static inline void
+intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state) {}
+
+static inline int
+intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
+ struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ struct intel_crtc_state *crtc_state)
+{
+ return 0;
+}
+
+static inline void
+intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
+ struct intel_crtc_state *crtc_state) {}
+
+static inline int
+intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ return 0;
+}
+
+static inline int
+intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits)
+{
+ return 0;
+}
+
+static inline int
+intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
+ struct intel_dp *intel_dp,
+ struct intel_connector *connector)
+{
+ return 0;
+}
+
+static inline int
+intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
+{
+ return 0;
+}
+
+static inline int
+intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
+{
+ return 0;
+}
+
+static inline void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915) {}
+
+#endif /* CONFIG_DRM_I915_DP_TUNNEL */
+
+#endif /* __INTEL_DP_TUNNEL_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index ef57dad1a9cb..ff480f171f75 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -109,6 +109,8 @@ struct intel_dpll_mgr {
void (*update_ref_clks)(struct drm_i915_private *i915);
void (*dump_hw_state)(struct drm_i915_private *i915,
const struct intel_dpll_hw_state *hw_state);
+ bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
+ const struct intel_dpll_hw_state *b);
};
static void
@@ -644,6 +646,15 @@ static void ibx_dump_hw_state(struct drm_i915_private *i915,
hw_state->fp1);
}
+static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *a,
+ const struct intel_dpll_hw_state *b)
+{
+ return a->dpll == b->dpll &&
+ a->dpll_md == b->dpll_md &&
+ a->fp0 == b->fp0 &&
+ a->fp1 == b->fp1;
+}
+
static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
.enable = ibx_pch_dpll_enable,
.disable = ibx_pch_dpll_disable,
@@ -662,6 +673,7 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
.get_dplls = ibx_get_dpll,
.put_dplls = intel_put_dpll,
.dump_hw_state = ibx_dump_hw_state,
+ .compare_hw_state = ibx_compare_hw_state,
};
static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
@@ -1220,6 +1232,13 @@ static void hsw_dump_hw_state(struct drm_i915_private *i915,
hw_state->wrpll, hw_state->spll);
}
+static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *a,
+ const struct intel_dpll_hw_state *b)
+{
+ return a->wrpll == b->wrpll &&
+ a->spll == b->spll;
+}
+
static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
.enable = hsw_ddi_wrpll_enable,
.disable = hsw_ddi_wrpll_disable,
@@ -1263,11 +1282,11 @@ static const struct dpll_info hsw_plls[] = {
{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
- .flags = INTEL_DPLL_ALWAYS_ON, },
+ .always_on = true, },
{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
- .flags = INTEL_DPLL_ALWAYS_ON, },
+ .always_on = true, },
{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
- .flags = INTEL_DPLL_ALWAYS_ON, },
+ .always_on = true, },
{}
};
@@ -1278,6 +1297,7 @@ static const struct intel_dpll_mgr hsw_pll_mgr = {
.put_dplls = intel_put_dpll,
.update_ref_clks = hsw_update_dpll_ref_clks,
.dump_hw_state = hsw_dump_hw_state,
+ .compare_hw_state = hsw_compare_hw_state,
};
struct skl_dpll_regs {
@@ -1929,6 +1949,14 @@ static void skl_dump_hw_state(struct drm_i915_private *i915,
hw_state->cfgcr2);
}
+static bool skl_compare_hw_state(const struct intel_dpll_hw_state *a,
+ const struct intel_dpll_hw_state *b)
+{
+ return a->ctrl1 == b->ctrl1 &&
+ a->cfgcr1 == b->cfgcr1 &&
+ a->cfgcr2 == b->cfgcr2;
+}
+
static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
.enable = skl_ddi_pll_enable,
.disable = skl_ddi_pll_disable,
@@ -1945,7 +1973,7 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
static const struct dpll_info skl_plls[] = {
{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
- .flags = INTEL_DPLL_ALWAYS_ON, },
+ .always_on = true, },
{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
@@ -1959,6 +1987,7 @@ static const struct intel_dpll_mgr skl_pll_mgr = {
.put_dplls = intel_put_dpll,
.update_ref_clks = skl_update_dpll_ref_clks,
.dump_hw_state = skl_dump_hw_state,
+ .compare_hw_state = skl_compare_hw_state,
};
static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
@@ -2392,6 +2421,21 @@ static void bxt_dump_hw_state(struct drm_i915_private *i915,
hw_state->pcsdw12);
}
+static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *a,
+ const struct intel_dpll_hw_state *b)
+{
+ return a->ebb0 == b->ebb0 &&
+ a->ebb4 == b->ebb4 &&
+ a->pll0 == b->pll0 &&
+ a->pll1 == b->pll1 &&
+ a->pll2 == b->pll2 &&
+ a->pll3 == b->pll3 &&
+ a->pll6 == b->pll6 &&
+ a->pll8 == b->pll8 &&
+ a->pll10 == b->pll10 &&
+ a->pcsdw12 == b->pcsdw12;
+}
+
static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
.enable = bxt_ddi_pll_enable,
.disable = bxt_ddi_pll_disable,
@@ -2413,6 +2457,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
.put_dplls = intel_put_dpll,
.update_ref_clks = bxt_update_dpll_ref_clks,
.dump_hw_state = bxt_dump_hw_state,
+ .compare_hw_state = bxt_compare_hw_state,
};
static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
@@ -3308,6 +3353,8 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
struct skl_wrpll_params pll_params = {};
@@ -3326,7 +3373,11 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
return ret;
/* this is mainly for the fastset check */
- icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
+ if (old_crtc_state->shared_dpll &&
+ old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
+ else
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
&port_dpll->hw_state);
@@ -3999,6 +4050,25 @@ static void icl_dump_hw_state(struct drm_i915_private *i915,
hw_state->mg_pll_tdc_coldst_bias);
}
+static bool icl_compare_hw_state(const struct intel_dpll_hw_state *a,
+ const struct intel_dpll_hw_state *b)
+{
+ /* FIXME split combo vs. mg more thoroughly */
+ return a->cfgcr0 == b->cfgcr0 &&
+ a->cfgcr1 == b->cfgcr1 &&
+ a->div0 == b->div0 &&
+ a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
+ a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
+ a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
+ a->mg_pll_div0 == b->mg_pll_div0 &&
+ a->mg_pll_div1 == b->mg_pll_div1 &&
+ a->mg_pll_lf == b->mg_pll_lf &&
+ a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
+ a->mg_pll_ssc == b->mg_pll_ssc &&
+ a->mg_pll_bias == b->mg_pll_bias &&
+ a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
+}
+
static const struct intel_shared_dpll_funcs combo_pll_funcs = {
.enable = combo_pll_enable,
.disable = combo_pll_disable,
@@ -4023,7 +4093,8 @@ static const struct intel_shared_dpll_funcs mg_pll_funcs = {
static const struct dpll_info icl_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
- { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
+ { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
+ .is_alt_port_dpll = true, },
{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
@@ -4039,6 +4110,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
.update_active_dpll = icl_update_active_dpll,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
+ .compare_hw_state = icl_compare_hw_state,
};
static const struct dpll_info ehl_plls[] = {
@@ -4056,6 +4128,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
+ .compare_hw_state = icl_compare_hw_state,
};
static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
@@ -4068,7 +4141,8 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
static const struct dpll_info tgl_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
- { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
+ { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
+ .is_alt_port_dpll = true, },
{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
@@ -4086,6 +4160,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
.update_active_dpll = icl_update_active_dpll,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
+ .compare_hw_state = icl_compare_hw_state,
};
static const struct dpll_info rkl_plls[] = {
@@ -4102,6 +4177,7 @@ static const struct intel_dpll_mgr rkl_pll_mgr = {
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
+ .compare_hw_state = icl_compare_hw_state,
};
static const struct dpll_info dg1_plls[] = {
@@ -4119,6 +4195,7 @@ static const struct intel_dpll_mgr dg1_pll_mgr = {
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
+ .compare_hw_state = icl_compare_hw_state,
};
static const struct dpll_info adls_plls[] = {
@@ -4136,12 +4213,14 @@ static const struct intel_dpll_mgr adls_pll_mgr = {
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
+ .compare_hw_state = icl_compare_hw_state,
};
static const struct dpll_info adlp_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
- { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
+ { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
+ .is_alt_port_dpll = true, },
{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
@@ -4157,6 +4236,7 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
.update_active_dpll = icl_update_active_dpll,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
+ .compare_hw_state = icl_compare_hw_state,
};
/**
@@ -4449,13 +4529,31 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
/* fallback for platforms that don't use the shared dpll
* infrastructure
*/
- drm_dbg_kms(&i915->drm,
- "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
- "fp0: 0x%x, fp1: 0x%x\n",
- hw_state->dpll,
- hw_state->dpll_md,
- hw_state->fp0,
- hw_state->fp1);
+ ibx_dump_hw_state(i915, hw_state);
+ }
+}
+
+/**
+ * intel_dpll_compare_hw_state - compare the two states
+ * @i915: i915 drm device
+ * @a: first DPLL hw state
+ * @b: second DPLL hw state
+ *
+ * Compare DPLL hw states @a and @b.
+ *
+ * Returns: true if the states are equal, false if the differ
+ */
+bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
+ const struct intel_dpll_hw_state *a,
+ const struct intel_dpll_hw_state *b)
+{
+ if (i915->display.dpll.mgr) {
+ return i915->display.dpll.mgr->compare_hw_state(a, b);
+ } else {
+ /* fallback for platforms that don't use the shared dpll
+ * infrastructure
+ */
+ return ibx_compare_hw_state(a, b);
}
}
@@ -4465,31 +4563,29 @@ verify_single_dpll_state(struct drm_i915_private *i915,
struct intel_crtc *crtc,
const struct intel_crtc_state *new_crtc_state)
{
- struct intel_dpll_hw_state dpll_hw_state;
+ struct intel_dpll_hw_state dpll_hw_state = {};
u8 pipe_mask;
bool active;
- memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
-
- drm_dbg_kms(&i915->drm, "%s\n", pll->info->name);
-
active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
- if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
+ if (!pll->info->always_on) {
I915_STATE_WARN(i915, !pll->on && pll->active_mask,
- "pll in active use but not on in sw tracking\n");
+ "%s: pll in active use but not on in sw tracking\n",
+ pll->info->name);
I915_STATE_WARN(i915, pll->on && !pll->active_mask,
- "pll is on but not used by any active pipe\n");
+ "%s: pll is on but not used by any active pipe\n",
+ pll->info->name);
I915_STATE_WARN(i915, pll->on != active,
- "pll on state mismatch (expected %i, found %i)\n",
- pll->on, active);
+ "%s: pll on state mismatch (expected %i, found %i)\n",
+ pll->info->name, pll->on, active);
}
if (!crtc) {
I915_STATE_WARN(i915,
pll->active_mask & ~pll->state.pipe_mask,
- "more active pll users than references: 0x%x vs 0x%x\n",
- pll->active_mask, pll->state.pipe_mask);
+ "%s: more active pll users than references: 0x%x vs 0x%x\n",
+ pll->info->name, pll->active_mask, pll->state.pipe_mask);
return;
}
@@ -4498,21 +4594,29 @@ verify_single_dpll_state(struct drm_i915_private *i915,
if (new_crtc_state->hw.active)
I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
- "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
- pipe_name(crtc->pipe), pll->active_mask);
+ "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
else
I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
- "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
- pipe_name(crtc->pipe), pll->active_mask);
+ "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
- "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
- pipe_mask, pll->state.pipe_mask);
+ "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
+ pll->info->name, pipe_mask, pll->state.pipe_mask);
I915_STATE_WARN(i915,
pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
- "pll hw state mismatch\n");
+ "%s: pll hw state mismatch\n",
+ pll->info->name);
+}
+
+static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
+ const struct intel_shared_dpll *new_pll)
+{
+ return old_pll && new_pll && old_pll != new_pll &&
+ (old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
}
void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
@@ -4534,11 +4638,15 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
- "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
- pipe_name(crtc->pipe), pll->active_mask);
- I915_STATE_WARN(i915, pll->state.pipe_mask & pipe_mask,
- "pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
- pipe_name(crtc->pipe), pll->state.pipe_mask);
+ "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
+
+ /* TC ports have both MG/TC and TBT PLL referenced simultaneously */
+ I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll,
+ new_crtc_state->shared_dpll) &&
+ pll->state.pipe_mask & pipe_mask,
+ "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 2e7ea0d8d3ff..cc0e1386309d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -276,15 +276,21 @@ struct dpll_info {
*/
enum intel_display_power_domain power_domain;
-#define INTEL_DPLL_ALWAYS_ON (1 << 0)
/**
- * @flags:
+ * @always_on:
*
- * INTEL_DPLL_ALWAYS_ON
- * Inform the state checker that the DPLL is kept enabled even if
- * not in use by any CRTC.
+ * Inform the state checker that the DPLL is kept enabled even if
+ * not in use by any CRTC.
*/
- u32 flags;
+ bool always_on;
+
+ /**
+ * @is_alt_port_dpll:
+ *
+ * Inform the state checker that the DPLL can be used as a fallback
+ * (for TC->TBT fallback).
+ */
+ bool is_alt_port_dpll;
};
/**
@@ -372,6 +378,9 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915);
void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
const struct intel_dpll_hw_state *hw_state);
+bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
+ const struct intel_dpll_hw_state *a,
+ const struct intel_dpll_hw_state *b);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 6282ec0fc9b4..169ef38ff188 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -299,6 +299,7 @@ void intel_drrs_crtc_init(struct intel_crtc *crtc)
static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_crtc *crtc = m->private;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_crtc_state *crtc_state;
int ret;
@@ -310,6 +311,11 @@ static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
mutex_lock(&crtc->drrs.mutex);
+ seq_printf(m, "DRRS capable: %s\n",
+ str_yes_no(crtc_state->has_drrs ||
+ HAS_DOUBLE_BUFFERED_M_N(i915) ||
+ intel_cpu_transcoder_has_m2_n2(i915, crtc_state->cpu_transcoder)));
+
seq_printf(m, "DRRS enabled: %s\n",
str_yes_no(crtc_state->has_drrs));
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 482c28b5c2de..d62e050185e7 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -325,7 +325,7 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- unsigned int latency = skl_watermark_max_latency(i915);
+ unsigned int latency = skl_watermark_max_latency(i915, 0);
int vblank_start;
if (crtc_state->vrr.enable) {
@@ -453,6 +453,10 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
if (!HAS_DSB(i915))
return NULL;
+ /* TODO: DSB is broken in Xe KMD, so disabling it until fixed */
+ if (!IS_ENABLED(I915))
+ return NULL;
+
dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
if (!dsb)
goto out;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index 083390e5e442..e99c94edfaae 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -57,9 +57,6 @@ struct intel_dsi {
u16 phys; /* ICL DSI */
};
- /* if true, use HS mode, otherwise LP */
- bool hs;
-
/* virtual channel */
int channel;
@@ -93,7 +90,6 @@ struct intel_dsi {
bool bgr_enabled;
u8 pixel_overlap;
- u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 9111e9d46486..c076da75b066 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -30,11 +30,13 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
@@ -328,14 +330,21 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(i915))
+ return connector->base.status;
+
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
static int intel_dvo_get_modes(struct drm_connector *_connector)
{
struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int num_modes;
+ if (!intel_display_driver_check_access(i915))
+ return drm_edid_connector_add_modes(&connector->base);
+
/*
* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
@@ -536,6 +545,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
if (intel_dvo->dev.type == INTEL_DVO_CHIP_TMDS)
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->base.polled = connector->polled;
drm_connector_init_with_ddc(&i915->drm, &connector->base,
&intel_dvo_connector_funcs,
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index f7e98e1c6470..af7b04539b93 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -54,12 +54,6 @@ struct intel_dvo_dev_ops {
struct i2c_adapter *i2cbus);
/*
- * Called to allow the output a chance to create properties after the
- * RandR objects have been created.
- */
- void (*create_resources)(struct intel_dvo_device *dvo);
-
- /*
* Turn on/off output.
*
* Because none of our dvo drivers support an intermediate power levels,
@@ -80,16 +74,6 @@ struct intel_dvo_dev_ops {
struct drm_display_mode *mode);
/*
- * Callback for preparing mode changes on an output
- */
- void (*prepare)(struct intel_dvo_device *dvo);
-
- /*
- * Callback for committing mode changes on an output
- */
- void (*commit)(struct intel_dvo_device *dvo);
-
- /*
* Callback for setting up a video mode after fixups have been made.
*
* This is only called while the output is disabled. The dpms callback
@@ -112,15 +96,6 @@ struct intel_dvo_dev_ops {
bool (*get_hw_state)(struct intel_dvo_device *dev);
/**
- * Query the device for the modes it provides.
- *
- * This function may also update MonInfo, mm_width, and mm_height.
- *
- * \return singly-linked list of modes or NULL if no modes found.
- */
- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
-
- /**
* Clean up driver-specific bits of the output
*/
void (*destroy) (struct intel_dvo_device *dvo);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 0c0144eaa8fa..3ea6470d6d92 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1849,9 +1849,10 @@ static int intel_plane_check_stride(const struct intel_plane_state *plane_state)
fb->modifier, rotation);
if (stride > max_stride) {
- DRM_DEBUG_KMS("[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n",
- fb->base.id, stride,
- plane->base.base.id, plane->base.name, max_stride);
+ drm_dbg_kms(plane->base.dev,
+ "[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n",
+ fb->base.id, stride,
+ plane->base.base.id, plane->base.name, max_stride);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index f17a1afb4929..b453fcbd67da 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -1087,18 +1087,7 @@ static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state)
static bool skl_fbc_tiling_valid(const struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- case I915_FORMAT_MOD_4_TILED:
- case I915_FORMAT_MOD_X_TILED:
- return true;
- default:
- return false;
- }
+ return true;
}
static bool tiling_is_valid(const struct intel_plane_state *plane_state)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index 717c3a3237c4..0665f943f65f 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -78,8 +78,9 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
/* Use fbdev's framebuffer from lmem for discrete */
info->fix.smem_start =
- (unsigned long)(mem->io_start +
- i915_gem_object_get_dma_address(obj, 0));
+ (unsigned long)(mem->io.start +
+ i915_gem_object_get_dma_address(obj, 0) -
+ mem->region.start);
info->fix.smem_len = obj->base.size;
} else {
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index e8e8be54143b..cbcd1e91b7be 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -10,12 +10,55 @@
#include "intel_display_types.h"
#include "intel_global_state.h"
+struct intel_global_commit {
+ struct kref ref;
+ struct completion done;
+};
+
+static struct intel_global_commit *commit_new(void)
+{
+ struct intel_global_commit *commit;
+
+ commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+ if (!commit)
+ return NULL;
+
+ init_completion(&commit->done);
+ kref_init(&commit->ref);
+
+ return commit;
+}
+
+static void __commit_free(struct kref *kref)
+{
+ struct intel_global_commit *commit =
+ container_of(kref, typeof(*commit), ref);
+
+ kfree(commit);
+}
+
+static struct intel_global_commit *commit_get(struct intel_global_commit *commit)
+{
+ if (commit)
+ kref_get(&commit->ref);
+
+ return commit;
+}
+
+static void commit_put(struct intel_global_commit *commit)
+{
+ if (commit)
+ kref_put(&commit->ref, __commit_free);
+}
+
static void __intel_atomic_global_state_free(struct kref *kref)
{
struct intel_global_state *obj_state =
container_of(kref, struct intel_global_state, ref);
struct intel_global_obj *obj = obj_state->obj;
+ commit_put(obj_state->commit);
+
obj->funcs->atomic_destroy_state(obj, obj_state);
}
@@ -127,6 +170,8 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
obj_state->obj = obj;
obj_state->changed = false;
+ obj_state->serialized = false;
+ obj_state->commit = NULL;
kref_init(&obj_state->ref);
@@ -239,19 +284,13 @@ int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
{
- struct intel_atomic_state *state = obj_state->state;
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
+ int ret;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
+ ret = intel_atomic_lock_global_state(obj_state);
+ if (ret)
+ return ret;
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
-
- obj_state->changed = true;
+ obj_state->serialized = true;
return 0;
}
@@ -267,3 +306,79 @@ intel_atomic_global_state_is_serialized(struct intel_atomic_state *state)
return false;
return true;
}
+
+int
+intel_atomic_global_state_setup_commit(struct intel_atomic_state *state)
+{
+ const struct intel_global_state *old_obj_state;
+ struct intel_global_state *new_obj_state;
+ struct intel_global_obj *obj;
+ int i;
+
+ for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
+ new_obj_state, i) {
+ struct intel_global_commit *commit = NULL;
+
+ if (new_obj_state->serialized) {
+ /*
+ * New commit which is going to be completed
+ * after the hardware reprogramming is done.
+ */
+ commit = commit_new();
+ if (!commit)
+ return -ENOMEM;
+ } else if (new_obj_state->changed) {
+ /*
+ * We're going to swap to this state, so carry the
+ * previous commit along, in case it's not yet done.
+ */
+ commit = commit_get(old_obj_state->commit);
+ }
+
+ new_obj_state->commit = commit;
+ }
+
+ return 0;
+}
+
+int
+intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_global_state *old_obj_state;
+ struct intel_global_obj *obj;
+ int i;
+
+ for_each_old_global_obj_in_state(state, obj, old_obj_state, i) {
+ struct intel_global_commit *commit = old_obj_state->commit;
+ long ret;
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_timeout(&commit->done, 10 * HZ);
+ if (ret == 0) {
+ drm_err(&i915->drm, "global state timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+void
+intel_atomic_global_state_commit_done(struct intel_atomic_state *state)
+{
+ const struct intel_global_state *new_obj_state;
+ struct intel_global_obj *obj;
+ int i;
+
+ for_each_new_global_obj_in_state(state, obj, new_obj_state, i) {
+ struct intel_global_commit *commit = new_obj_state->commit;
+
+ if (!new_obj_state->serialized)
+ continue;
+
+ complete_all(&commit->done);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
index 5477de8f0b30..6506a8e32972 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.h
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -37,11 +37,11 @@ struct intel_global_obj {
(__i)++) \
for_each_if(obj)
-#define for_each_old_global_obj_in_state(__state, obj, new_obj_state, __i) \
+#define for_each_old_global_obj_in_state(__state, obj, old_obj_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->num_global_objs && \
((obj) = (__state)->global_objs[__i].ptr, \
- (new_obj_state) = (__state)->global_objs[__i].old_state, 1); \
+ (old_obj_state) = (__state)->global_objs[__i].old_state, 1); \
(__i)++) \
for_each_if(obj)
@@ -54,11 +54,14 @@ struct intel_global_obj {
(__i)++) \
for_each_if(obj)
+struct intel_global_commit;
+
struct intel_global_state {
struct intel_global_obj *obj;
struct intel_atomic_state *state;
+ struct intel_global_commit *commit;
struct kref ref;
- bool changed;
+ bool changed, serialized;
};
struct __intel_global_objs_state {
@@ -87,6 +90,10 @@ void intel_atomic_clear_global_state(struct intel_atomic_state *state);
int intel_atomic_lock_global_state(struct intel_global_state *obj_state);
int intel_atomic_serialize_global_state(struct intel_global_state *obj_state);
+int intel_atomic_global_state_setup_commit(struct intel_atomic_state *state);
+void intel_atomic_global_state_commit_done(struct intel_atomic_state *state);
+int intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state);
+
bool intel_atomic_global_state_is_serialized(struct intel_atomic_state *state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index e9e4dcf345f9..d3e03ed5b79c 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -155,7 +155,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
const struct gmbus_pin *pins;
size_t size;
- if (INTEL_PCH_TYPE(i915) >= PCH_LNL) {
+ if (INTEL_PCH_TYPE(i915) >= PCH_MTL) {
pins = gmbus_pins_mtp;
size = ARRAY_SIZE(gmbus_pins_mtp);
} else if (INTEL_PCH_TYPE(i915) >= PCH_DG2) {
@@ -164,9 +164,6 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
} else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
pins = gmbus_pins_dg1;
size = ARRAY_SIZE(gmbus_pins_dg1);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_MTP) {
- pins = gmbus_pins_mtp;
- size = ARRAY_SIZE(gmbus_pins_mtp);
} else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
pins = gmbus_pins_icp;
size = ARRAY_SIZE(gmbus_pins_icp);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 39b3f7c0c77c..9edac27bab26 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -30,7 +30,7 @@
#define KEY_LOAD_TRIES 5
#define HDCP2_LC_RETRY_CNT 3
-static int intel_conn_to_vcpi(struct drm_atomic_state *state,
+static int intel_conn_to_vcpi(struct intel_atomic_state *state,
struct intel_connector *connector)
{
struct drm_dp_mst_topology_mgr *mgr;
@@ -43,7 +43,7 @@ static int intel_conn_to_vcpi(struct drm_atomic_state *state,
return 0;
mgr = connector->port->mgr;
- drm_modeset_lock(&mgr->base.lock, state->acquire_ctx);
+ drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx);
mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
if (drm_WARN_ON(mgr->dev, !payload))
@@ -68,19 +68,51 @@ out:
* DP MST topology. Though it is not compulsory, security fw should change its
* policy to mark different content_types for different streams.
*/
-static void
-intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
+static int
+intel_hdcp_required_content_stream(struct intel_atomic_state *state,
+ struct intel_digital_port *dig_port)
{
+ struct drm_connector_list_iter conn_iter;
+ struct intel_digital_port *conn_dig_port;
+ struct intel_connector *connector;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
bool enforce_type0 = false;
int k;
if (dig_port->hdcp_auth_status)
- return;
+ return 0;
+
+ data->k = 0;
if (!dig_port->hdcp_mst_type1_capable)
enforce_type0 = true;
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (connector->base.status == connector_status_disconnected)
+ continue;
+
+ if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
+ continue;
+
+ conn_dig_port = intel_attached_dig_port(connector);
+ if (conn_dig_port != dig_port)
+ continue;
+
+ data->streams[data->k].stream_id =
+ intel_conn_to_vcpi(state, connector);
+ data->k++;
+
+ /* if there is only one active stream */
+ if (dig_port->dp.active_mst_links <= 1)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
+ return -EINVAL;
+
/*
* Apply common protection level across all streams in DP MST Topology.
* Use highest supported content type for all streams in DP MST Topology.
@@ -88,19 +120,25 @@ intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
for (k = 0; k < data->k; k++)
data->streams[k].stream_type =
enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
+
+ return 0;
}
-static void intel_hdcp_prepare_streams(struct intel_connector *connector)
+static int intel_hdcp_prepare_streams(struct intel_atomic_state *state,
+ struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
- if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
- data->streams[0].stream_type = hdcp->content_type;
- } else {
- intel_hdcp_required_content_stream(dig_port);
- }
+ if (intel_encoder_is_mst(intel_attached_encoder(connector)))
+ return intel_hdcp_required_content_stream(state, dig_port);
+
+ data->k = 1;
+ data->streams[0].stream_id = 0;
+ data->streams[0].stream_type = hdcp->content_type;
+
+ return 0;
}
static
@@ -140,7 +178,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
}
/* Is HDCP1.4 capable on Platform and Sink */
-bool intel_hdcp_capable(struct intel_connector *connector)
+bool intel_hdcp_get_capability(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
@@ -150,8 +188,8 @@ bool intel_hdcp_capable(struct intel_connector *connector)
if (!shim)
return capable;
- if (shim->hdcp_capable) {
- shim->hdcp_capable(dig_port, &capable);
+ if (shim->hdcp_get_capability) {
+ shim->hdcp_get_capability(dig_port, &capable);
} else {
if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
capable = true;
@@ -160,12 +198,14 @@ bool intel_hdcp_capable(struct intel_connector *connector)
return capable;
}
-/* Is HDCP2.2 capable on Platform and Sink */
-bool intel_hdcp2_capable(struct intel_connector *connector)
+/*
+ * Check if the source has all the building blocks ready to make
+ * HDCP 2.2 work
+ */
+static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- bool capable = false;
/* I915 support for HDCP2.2 */
if (!hdcp->hdcp2_supported)
@@ -185,12 +225,40 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
}
mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ return true;
+}
+
+/* Is HDCP2.2 capable on Platform and Sink */
+bool intel_hdcp2_get_capability(struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool capable = false;
+
+ if (!intel_hdcp2_prerequisite(connector))
+ return false;
+
/* Sink's capability for HDCP2.2 */
- hdcp->shim->hdcp_2_2_capable(connector, &capable);
+ hdcp->shim->hdcp_2_2_get_capability(connector, &capable);
return capable;
}
+void intel_hdcp_get_remote_capability(struct intel_connector *connector,
+ bool *hdcp_capable,
+ bool *hdcp2_capable)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+
+ if (!hdcp->shim->get_remote_hdcp_capability)
+ return;
+
+ hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable,
+ hdcp2_capable);
+
+ if (!intel_hdcp2_prerequisite(connector))
+ *hdcp2_capable = false;
+}
+
static bool intel_hdcp_in_use(struct drm_i915_private *i915,
enum transcoder cpu_transcoder, enum port port)
{
@@ -347,7 +415,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
default:
drm_err(&i915->drm, "Unknown transcoder %d\n",
cpu_transcoder);
- return -EINVAL;
+ return 0;
}
}
@@ -364,7 +432,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
default:
drm_err(&i915->drm, "Unknown port %d\n", port);
- return -EINVAL;
+ return 0;
}
}
@@ -726,8 +794,8 @@ static int intel_hdcp_auth(struct intel_connector *connector)
* whether the display supports HDCP before we write An. For HDMI
* displays, this is not necessary.
*/
- if (shim->hdcp_capable) {
- ret = shim->hdcp_capable(dig_port, &hdcp_capable);
+ if (shim->hdcp_get_capability) {
+ ret = shim->hdcp_get_capability(dig_port, &hdcp_capable);
if (ret)
return ret;
if (!hdcp_capable) {
@@ -853,8 +921,8 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (shim->stream_encryption) {
ret = shim->stream_encryption(connector, true);
if (ret) {
- drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
- connector->base.name, connector->base.base.id);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n",
+ connector->base.base.id, connector->base.name);
return ret;
}
drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
@@ -878,14 +946,14 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
u32 repeater_ctl;
int ret;
- drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being disabled...\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n",
+ connector->base.base.id, connector->base.name);
if (hdcp->shim->stream_encryption) {
ret = hdcp->shim->stream_encryption(connector, false);
if (ret) {
- drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
- connector->base.name, connector->base.base.id);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n",
+ connector->base.base.id, connector->base.name);
return ret;
}
drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
@@ -929,8 +997,8 @@ static int intel_hdcp1_enable(struct intel_connector *connector)
struct intel_hdcp *hdcp = &connector->hdcp;
int i, ret, tries = 3;
- drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being enabled...\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n",
+ connector->base.base.id, connector->base.name);
if (!hdcp_key_loadable(i915)) {
drm_err(&i915->drm, "HDCP key Load is not possible\n");
@@ -1027,8 +1095,8 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
if (drm_WARN_ON(&i915->drm,
!intel_hdcp_in_use(i915, cpu_transcoder, port))) {
drm_err(&i915->drm,
- "%s:%d HDCP link stopped encryption,%x\n",
- connector->base.name, connector->base.base.id,
+ "[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n",
+ connector->base.base.id, connector->base.name,
intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
ret = -ENXIO;
intel_hdcp_update_value(connector,
@@ -1046,8 +1114,8 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
}
drm_dbg_kms(&i915->drm,
- "[%s:%d] HDCP link failed, retrying authentication\n",
- connector->base.name, connector->base.base.id);
+ "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n",
+ connector->base.base.id, connector->base.name);
ret = _intel_hdcp_disable(connector);
if (ret) {
@@ -1058,15 +1126,9 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- ret = intel_hdcp1_enable(connector);
- if (ret) {
- drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret);
- intel_hdcp_update_value(connector,
- DRM_MODE_CONTENT_PROTECTION_DESIRED,
- true);
- goto out;
- }
-
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
out:
mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
@@ -1633,6 +1695,12 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
+ if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) {
+ drm_dbg_kms(&i915->drm,
+ "HDCP1.x or 2.0 Legacy Device Downstream\n");
+ return -EINVAL;
+ }
+
/* Converting and Storing the seq_num_v to local variable as DWORD */
seq_num_v =
drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
@@ -1731,8 +1799,8 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS)) {
- drm_err(&i915->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
- connector->base.name, connector->base.base.id);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n",
+ connector->base.base.id, connector->base.name);
ret = -EPERM;
goto link_recover;
}
@@ -1740,8 +1808,8 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
if (hdcp->shim->stream_2_2_encryption) {
ret = hdcp->shim->stream_2_2_encryption(connector, true);
if (ret) {
- drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
- connector->base.name, connector->base.base.id);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n",
+ connector->base.base.id, connector->base.name);
return ret;
}
drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
@@ -1865,7 +1933,8 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector)
return ret;
}
-static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
+static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
+ struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
@@ -1874,7 +1943,13 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
ret = hdcp2_authenticate_sink(connector);
if (!ret) {
- intel_hdcp_prepare_streams(connector);
+ ret = intel_hdcp_prepare_streams(state, connector);
+ if (ret) {
+ drm_dbg_kms(&i915->drm,
+ "Prepare stream failed.(%d)\n",
+ ret);
+ break;
+ }
ret = hdcp2_propagate_stream_management_info(connector);
if (ret) {
@@ -1919,25 +1994,26 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
return ret;
}
-static int _intel_hdcp2_enable(struct intel_connector *connector)
+static int _intel_hdcp2_enable(struct intel_atomic_state *state,
+ struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
- connector->base.name, connector->base.base.id,
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n",
+ connector->base.base.id, connector->base.name,
hdcp->content_type);
- ret = hdcp2_authenticate_and_encrypt(connector);
+ ret = hdcp2_authenticate_and_encrypt(state, connector);
if (ret) {
drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
hdcp->content_type, ret);
return ret;
}
- drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
- connector->base.name, connector->base.base.id,
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n",
+ connector->base.base.id, connector->base.name,
hdcp->content_type);
hdcp->hdcp2_encrypted = true;
@@ -1953,14 +2029,14 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n",
+ connector->base.base.id, connector->base.name);
if (hdcp->shim->stream_2_2_encryption) {
ret = hdcp->shim->stream_2_2_encryption(connector, false);
if (ret) {
- drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
- connector->base.name, connector->base.base.id);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n",
+ connector->base.base.id, connector->base.name);
return ret;
}
drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
@@ -2032,45 +2108,24 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
drm_dbg_kms(&i915->drm,
"HDCP2.2 Downstream topology change\n");
- ret = hdcp2_authenticate_repeater_topology(connector);
- if (!ret) {
- intel_hdcp_update_value(connector,
- DRM_MODE_CONTENT_PROTECTION_ENABLED,
- true);
- goto out;
- }
- drm_dbg_kms(&i915->drm,
- "[%s:%d] Repeater topology auth failed.(%d)\n",
- connector->base.name, connector->base.base.id,
- ret);
} else {
drm_dbg_kms(&i915->drm,
- "[%s:%d] HDCP2.2 link failed, retrying auth\n",
- connector->base.name, connector->base.base.id);
+ "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
+ connector->base.base.id, connector->base.name);
}
ret = _intel_hdcp2_disable(connector, true);
if (ret) {
drm_err(&i915->drm,
- "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
- connector->base.name, connector->base.base.id, ret);
+ "[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n",
+ connector->base.base.id, connector->base.name, ret);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
goto out;
}
- ret = _intel_hdcp2_enable(connector);
- if (ret) {
- drm_dbg_kms(&i915->drm,
- "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
- connector->base.name, connector->base.base.id,
- ret);
- intel_hdcp_update_value(connector,
- DRM_MODE_CONTENT_PROTECTION_DESIRED,
- true);
- goto out;
- }
-
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
out:
mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
@@ -2278,52 +2333,6 @@ int intel_hdcp_init(struct intel_connector *connector,
return 0;
}
-static int
-intel_hdcp_set_streams(struct intel_digital_port *dig_port,
- struct intel_atomic_state *state)
-{
- struct drm_connector_list_iter conn_iter;
- struct intel_digital_port *conn_dig_port;
- struct intel_connector *connector;
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
-
- if (!intel_encoder_is_mst(&dig_port->base)) {
- data->k = 1;
- data->streams[0].stream_id = 0;
- return 0;
- }
-
- data->k = 0;
-
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
- for_each_intel_connector_iter(connector, &conn_iter) {
- if (connector->base.status == connector_status_disconnected)
- continue;
-
- if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
- continue;
-
- conn_dig_port = intel_attached_dig_port(connector);
- if (conn_dig_port != dig_port)
- continue;
-
- data->streams[data->k].stream_id =
- intel_conn_to_vcpi(&state->base, connector);
- data->k++;
-
- /* if there is only one active stream */
- if (dig_port->dp.active_mst_links <= 1)
- break;
- }
- drm_connector_list_iter_end(&conn_iter);
-
- if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
- return -EINVAL;
-
- return 0;
-}
-
static int _intel_hdcp_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@@ -2341,8 +2350,8 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
return -ENOENT;
if (!connector->encoder) {
- drm_err(&i915->drm, "[%s:%d] encoder is not initialized\n",
- connector->base.name, connector->base.base.id);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
+ connector->base.base.id, connector->base.name);
return -ENODEV;
}
@@ -2368,25 +2377,18 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
* is capable of HDCP2.2, it is preferred to use HDCP2.2.
*/
- if (intel_hdcp2_capable(connector)) {
- ret = intel_hdcp_set_streams(dig_port, state);
- if (!ret) {
- ret = _intel_hdcp2_enable(connector);
- if (!ret)
- check_link_interval =
- DRM_HDCP2_CHECK_PERIOD_MS;
- } else {
- drm_dbg_kms(&i915->drm,
- "Set content streams failed: (%d)\n",
- ret);
- }
+ if (intel_hdcp2_get_capability(connector)) {
+ ret = _intel_hdcp2_enable(state, connector);
+ if (!ret)
+ check_link_interval =
+ DRM_HDCP2_CHECK_PERIOD_MS;
}
/*
* When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
* be attempted.
*/
- if (ret && intel_hdcp_capable(connector) &&
+ if (ret && intel_hdcp_get_capability(connector) &&
hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
ret = intel_hdcp1_enable(connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index a9c784fd9ba5..477f2d2bb120 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -38,8 +38,11 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool is_hdcp_supported(struct drm_i915_private *i915, enum port port);
-bool intel_hdcp_capable(struct intel_connector *connector);
-bool intel_hdcp2_capable(struct intel_connector *connector);
+bool intel_hdcp_get_capability(struct intel_connector *connector);
+bool intel_hdcp2_get_capability(struct intel_connector *connector);
+void intel_hdcp_get_remote_capability(struct intel_connector *connector,
+ bool *hdcp_capable,
+ bool *hdcp2_capable);
void intel_hdcp_component_init(struct drm_i915_private *i915);
void intel_hdcp_component_fini(struct drm_i915_private *i915);
void intel_hdcp_cleanup(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index 18117b789b16..302bff75b06c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -65,7 +65,7 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
goto out_unmap;
}
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
goto out_unmap;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
index 8023c85c7fa0..a568a457e532 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
@@ -8,6 +8,8 @@
#include "intel_display_reg_defs.h"
+#define TRANS_HDCP(__i915) (DISPLAY_VER(__i915) >= 12)
+
/* HDCP Key Registers */
#define HDCP_KEY_CONF _MMIO(0x66c00)
#define HDCP_AKSV_SEND_TRIGGER REG_BIT(31)
@@ -82,7 +84,7 @@
#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
_TRANSB_HDCP_CONF)
#define HDCP_CONF(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_CONF(trans) : \
PORT_HDCP_CONF(port))
@@ -95,7 +97,7 @@
_TRANSA_HDCP_ANINIT, \
_TRANSB_HDCP_ANINIT)
#define HDCP_ANINIT(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_ANINIT(trans) : \
PORT_HDCP_ANINIT(port))
@@ -105,7 +107,7 @@
#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
_TRANSB_HDCP_ANLO)
#define HDCP_ANLO(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_ANLO(trans) : \
PORT_HDCP_ANLO(port))
@@ -115,7 +117,7 @@
#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
_TRANSB_HDCP_ANHI)
#define HDCP_ANHI(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_ANHI(trans) : \
PORT_HDCP_ANHI(port))
@@ -126,7 +128,7 @@
_TRANSA_HDCP_BKSVLO, \
_TRANSB_HDCP_BKSVLO)
#define HDCP_BKSVLO(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_BKSVLO(trans) : \
PORT_HDCP_BKSVLO(port))
@@ -137,7 +139,7 @@
_TRANSA_HDCP_BKSVHI, \
_TRANSB_HDCP_BKSVHI)
#define HDCP_BKSVHI(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_BKSVHI(trans) : \
PORT_HDCP_BKSVHI(port))
@@ -148,7 +150,7 @@
_TRANSA_HDCP_RPRIME, \
_TRANSB_HDCP_RPRIME)
#define HDCP_RPRIME(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_RPRIME(trans) : \
PORT_HDCP_RPRIME(port))
@@ -159,7 +161,7 @@
_TRANSA_HDCP_STATUS, \
_TRANSB_HDCP_STATUS)
#define HDCP_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_STATUS(trans) : \
PORT_HDCP_STATUS(port))
@@ -200,7 +202,7 @@
#define AUTH_FORCE_CLR_INPUTCTR REG_BIT(19)
#define AUTH_CLR_KEYS REG_BIT(18)
#define HDCP2_AUTH(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_AUTH(trans) : \
PORT_HDCP2_AUTH(port))
@@ -211,7 +213,7 @@
_TRANSB_HDCP2_CTL)
#define CTL_LINK_ENCRYPTION_REQ REG_BIT(31)
#define HDCP2_CTL(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_CTL(trans) : \
PORT_HDCP2_CTL(port))
@@ -225,7 +227,7 @@
#define LINK_AUTH_STATUS REG_BIT(21)
#define LINK_ENCRYPTION_STATUS REG_BIT(20)
#define HDCP2_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_STATUS(trans) : \
PORT_HDCP2_STATUS(port))
@@ -247,7 +249,7 @@
#define STREAM_ENCRYPTION_STATUS REG_BIT(31)
#define STREAM_TYPE_STATUS REG_BIT(30)
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_STREAM_STATUS(trans) : \
PIPE_HDCP2_STREAM_STATUS(pipe))
@@ -263,7 +265,7 @@
_TRANSB_HDCP2_AUTH_STREAM)
#define AUTH_STREAM_TYPE REG_BIT(31)
#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
+ (TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_AUTH_STREAM(trans) : \
PORT_HDCP2_AUTH_STREAM(port))
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 39e4f5f7c817..90d2236fede3 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -49,6 +49,7 @@
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_gmbus.h"
@@ -523,10 +524,12 @@ void hsw_write_infoframe(struct intel_encoder *encoder,
0);
/* Wa_14013475917 */
- if (IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr && type == DP_SDP_VSC)
- return;
+ if (!(IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr && type == DP_SDP_VSC))
+ val |= hsw_infoframe_enable(type);
+
+ if (type == DP_SDP_VSC)
+ val |= VSC_DIP_HW_DATA_SW_HEA;
- val |= hsw_infoframe_enable(type);
intel_de_write(dev_priv, ctl_reg, val);
intel_de_posting_read(dev_priv, ctl_reg);
}
@@ -1729,8 +1732,8 @@ int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port,
}
static
-int intel_hdmi_hdcp2_capable(struct intel_connector *connector,
- bool *capable)
+int intel_hdmi_hdcp2_get_capability(struct intel_connector *connector,
+ bool *capable)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
u8 hdcp2_version;
@@ -1759,7 +1762,7 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
.write_2_2_msg = intel_hdmi_hdcp2_write_msg,
.read_2_2_msg = intel_hdmi_hdcp2_read_msg,
.check_2_2_link = intel_hdmi_hdcp2_check_link,
- .hdcp_2_2_capable = intel_hdmi_hdcp2_capable,
+ .hdcp_2_2_get_capability = intel_hdmi_hdcp2_get_capability,
.protocol = HDCP_PROTOCOL_HDMI,
};
@@ -2503,6 +2506,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (!intel_display_device_enabled(dev_priv))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(dev_priv))
+ return connector->status;
+
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
if (DISPLAY_VER(dev_priv) >= 11 &&
@@ -2531,6 +2537,9 @@ intel_hdmi_force(struct drm_connector *connector)
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+ if (!intel_display_driver_check_access(i915))
+ return;
+
intel_hdmi_unset_edid(connector);
if (connector->status != connector_status_connected)
@@ -3015,6 +3024,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
connector->ycbcr_420_allowed = true;
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+ intel_connector->base.polled = intel_connector->polled;
if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 0c0700c6ec66..d9ec349f3c8c 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -177,6 +177,46 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
return storm;
}
+static bool detection_work_enabled(struct drm_i915_private *i915)
+{
+ lockdep_assert_held(&i915->irq_lock);
+
+ return i915->display.hotplug.detection_work_enabled;
+}
+
+static bool
+mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+{
+ lockdep_assert_held(&i915->irq_lock);
+
+ if (!detection_work_enabled(i915))
+ return false;
+
+ return mod_delayed_work(i915->unordered_wq, work, delay);
+}
+
+static bool
+queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+{
+ lockdep_assert_held(&i915->irq_lock);
+
+ if (!detection_work_enabled(i915))
+ return false;
+
+ return queue_delayed_work(i915->unordered_wq, work, delay);
+}
+
+static bool
+queue_detection_work(struct drm_i915_private *i915, struct work_struct *work)
+{
+ lockdep_assert_held(&i915->irq_lock);
+
+ if (!detection_work_enabled(i915))
+ return false;
+
+ return queue_work(i915->unordered_wq, work);
+}
+
static void
intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
{
@@ -213,9 +253,9 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
drm_kms_helper_poll_reschedule(&dev_priv->drm);
- mod_delayed_work(dev_priv->unordered_wq,
- &dev_priv->display.hotplug.reenable_work,
- msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
+ mod_delayed_detection_work(dev_priv,
+ &dev_priv->display.hotplug.reenable_work,
+ msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
}
@@ -348,9 +388,9 @@ static void i915_digport_work_func(struct work_struct *work)
if (old_bits) {
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->display.hotplug.event_bits |= old_bits;
+ queue_delayed_detection_work(dev_priv,
+ &dev_priv->display.hotplug.hotplug_work, 0);
spin_unlock_irq(&dev_priv->irq_lock);
- queue_delayed_work(dev_priv->unordered_wq,
- &dev_priv->display.hotplug.hotplug_work, 0);
}
}
@@ -467,11 +507,11 @@ static void i915_hotplug_work_func(struct work_struct *work)
if (retry) {
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->display.hotplug.retry_bits |= retry;
- spin_unlock_irq(&dev_priv->irq_lock);
- mod_delayed_work(dev_priv->unordered_wq,
- &dev_priv->display.hotplug.hotplug_work,
- msecs_to_jiffies(HPD_RETRY_DELAY));
+ mod_delayed_detection_work(dev_priv,
+ &dev_priv->display.hotplug.hotplug_work,
+ msecs_to_jiffies(HPD_RETRY_DELAY));
+ spin_unlock_irq(&dev_priv->irq_lock);
}
}
@@ -590,7 +630,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
*/
if (storm_detected)
intel_hpd_irq_setup(dev_priv);
- spin_unlock(&dev_priv->irq_lock);
/*
* Our hotplug handler can grab modeset locks (by calling down into the
@@ -601,8 +640,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (queue_dig)
queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
if (queue_hp)
- queue_delayed_work(dev_priv->unordered_wq,
- &dev_priv->display.hotplug.hotplug_work, 0);
+ queue_delayed_detection_work(dev_priv,
+ &dev_priv->display.hotplug.hotplug_work, 0);
+
+ spin_unlock(&dev_priv->irq_lock);
}
/**
@@ -710,6 +751,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
cancel_work(&dev_priv->display.hotplug.poll_init_work);
}
+ spin_lock_irq(&dev_priv->irq_lock);
+
drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -718,6 +761,9 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (pin == HPD_NONE)
continue;
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
+ continue;
+
connector->base.polled = connector->polled;
if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
@@ -726,6 +772,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
}
drm_connector_list_iter_end(&conn_iter);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
if (enabled)
drm_kms_helper_poll_reschedule(&dev_priv->drm);
@@ -774,8 +822,10 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
- queue_work(dev_priv->unordered_wq,
- &dev_priv->display.hotplug.poll_init_work);
+ spin_lock_irq(&dev_priv->irq_lock);
+ queue_detection_work(dev_priv,
+ &dev_priv->display.hotplug.poll_init_work);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
/**
@@ -803,8 +853,11 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
return;
WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
- queue_work(dev_priv->unordered_wq,
- &dev_priv->display.hotplug.poll_init_work);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ queue_detection_work(dev_priv,
+ &dev_priv->display.hotplug.poll_init_work);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
void intel_hpd_init_early(struct drm_i915_private *i915)
@@ -826,6 +879,20 @@ void intel_hpd_init_early(struct drm_i915_private *i915)
i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
}
+static bool cancel_all_detection_work(struct drm_i915_private *i915)
+{
+ bool was_pending = false;
+
+ if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work))
+ was_pending = true;
+ if (cancel_work_sync(&i915->display.hotplug.poll_init_work))
+ was_pending = true;
+ if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work))
+ was_pending = true;
+
+ return was_pending;
+}
+
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
{
if (!HAS_DISPLAY(dev_priv))
@@ -841,9 +908,13 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
- cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work);
- cancel_work_sync(&dev_priv->display.hotplug.poll_init_work);
- cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work);
+
+ /*
+ * All other work triggered by hotplug events should be canceled by
+ * now.
+ */
+ if (cancel_all_detection_work(dev_priv))
+ drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n");
}
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
@@ -873,6 +944,62 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
spin_unlock_irq(&dev_priv->irq_lock);
}
+static void queue_work_for_missed_irqs(struct drm_i915_private *i915)
+{
+ bool queue_work = false;
+ enum hpd_pin pin;
+
+ lockdep_assert_held(&i915->irq_lock);
+
+ if (i915->display.hotplug.event_bits ||
+ i915->display.hotplug.retry_bits)
+ queue_work = true;
+
+ for_each_hpd_pin(pin) {
+ switch (i915->display.hotplug.stats[pin].state) {
+ case HPD_MARK_DISABLED:
+ queue_work = true;
+ break;
+ case HPD_ENABLED:
+ break;
+ default:
+ MISSING_CASE(i915->display.hotplug.stats[pin].state);
+ }
+ }
+
+ if (queue_work)
+ queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
+}
+
+void intel_hpd_enable_detection_work(struct drm_i915_private *i915)
+{
+ spin_lock_irq(&i915->irq_lock);
+ i915->display.hotplug.detection_work_enabled = true;
+ queue_work_for_missed_irqs(i915);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+void intel_hpd_disable_detection_work(struct drm_i915_private *i915)
+{
+ spin_lock_irq(&i915->irq_lock);
+ i915->display.hotplug.detection_work_enabled = false;
+ spin_unlock_irq(&i915->irq_lock);
+
+ cancel_all_detection_work(i915);
+}
+
+bool intel_hpd_schedule_detection(struct drm_i915_private *i915)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&i915->irq_lock, flags);
+ ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
+ spin_unlock_irqrestore(&i915->irq_lock, flags);
+
+ return ret;
+}
+
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 424ae5dbf5a0..a17253ddec83 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -30,4 +30,8 @@ bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_debugfs_register(struct drm_i915_private *i915);
+void intel_hpd_enable_detection_work(struct drm_i915_private *i915);
+void intel_hpd_disable_detection_work(struct drm_i915_private *i915);
+bool intel_hpd_schedule_detection(struct drm_i915_private *i915);
+
#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 04f62f27ad74..76076509f771 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -163,12 +163,10 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
(!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
return;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_LNL)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
hpd->pch_hpd = hpd_mtp;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
hpd->pch_hpd = hpd_sde_dg1;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP)
- hpd->pch_hpd = hpd_mtp;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
hpd->pch_hpd = hpd_icp;
else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
@@ -1139,7 +1137,7 @@ static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915)
if (INTEL_PCH_TYPE(i915) >= PCH_LNL)
xe2lpd_sde_hpd_irq_setup(i915);
- else if (INTEL_PCH_TYPE(i915) >= PCH_MTP)
+ else if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
mtp_hpd_irq_setup(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index 9c6d35a405a1..dfd7d5e23f3f 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -6,26 +6,41 @@
#include "i915_drv.h"
#include "intel_atomic.h"
+#include "intel_crtc.h"
#include "intel_display_types.h"
#include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
#include "intel_fdi.h"
#include "intel_link_bw.h"
/**
* intel_link_bw_init_limits - initialize BW limits
- * @i915: device instance
+ * @state: Atomic state
* @limits: link BW limits
*
* Initialize @limits.
*/
-void intel_link_bw_init_limits(struct drm_i915_private *i915, struct intel_link_bw_limits *limits)
+void intel_link_bw_init_limits(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe pipe;
limits->force_fec_pipes = 0;
limits->bpp_limit_reached_pipes = 0;
- for_each_pipe(i915, pipe)
- limits->max_bpp_x16[pipe] = INT_MAX;
+ for_each_pipe(i915, pipe) {
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state,
+ intel_crtc_for_pipe(i915, pipe));
+
+ if (state->base.duplicated && crtc_state) {
+ limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16;
+ if (crtc_state->fec_enable)
+ limits->force_fec_pipes |= BIT(pipe);
+ } else {
+ limits->max_bpp_x16[pipe] = INT_MAX;
+ }
+ }
}
/**
@@ -149,6 +164,10 @@ static int check_all_link_config(struct intel_atomic_state *state,
if (ret)
return ret;
+ ret = intel_dp_tunnel_atomic_check_link(state, limits);
+ if (ret)
+ return ret;
+
ret = intel_fdi_atomic_check_link(state, limits);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index 2cf57307cc24..6b0ccfff59da 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -22,7 +22,7 @@ struct intel_link_bw_limits {
int max_bpp_x16[I915_MAX_PIPES];
};
-void intel_link_bw_init_limits(struct drm_i915_private *i915,
+void intel_link_bw_init_limits(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits);
int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits,
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 94eece7f63be..caeca3a8442c 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -318,12 +318,6 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- if (crtc_state->dsc.compression_enable) {
- drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
- connector->dp.dsc_decompression_enabled = true;
- } else {
- connector->dp.dsc_decompression_enabled = false;
- }
conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
}
}
@@ -775,8 +769,9 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
+ struct intel_crtc_state *crtc_state = NULL;
+
if (connector->get_hw_state(connector)) {
- struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
connector->base.dpms = DRM_MODE_DPMS_ON;
@@ -802,6 +797,10 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
+
+ if (connector->sync_state)
+ connector->sync_state(connector, crtc_state);
+
drm_dbg_kms(&i915->drm,
"[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id, connector->base.name,
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 1ce785db6a5e..fcbb083318a7 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -250,11 +250,36 @@ struct opregion_asle_ext {
#define MAX_DSLP 1500
+#define OPREGION_SIZE (8 * 1024)
+
+struct intel_opregion {
+ struct drm_i915_private *i915;
+
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ u32 swsci_gbda_sub_functions;
+ u32 swsci_sbcb_sub_functions;
+ struct opregion_asle *asle;
+ struct opregion_asle_ext *asle_ext;
+ void *rvda;
+ void *vbt_firmware;
+ const void *vbt;
+ u32 vbt_size;
+ struct work_struct asle_work;
+ struct notifier_block acpi_notifier;
+};
+
static int check_swsci_function(struct drm_i915_private *i915, u32 function)
{
- struct opregion_swsci *swsci = i915->display.opregion.swsci;
+ struct intel_opregion *opregion = i915->display.opregion;
+ struct opregion_swsci *swsci;
u32 main_function, sub_function;
+ if (!opregion)
+ return -ENODEV;
+
+ swsci = opregion->swsci;
if (!swsci)
return -ENODEV;
@@ -265,11 +290,11 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
/* Check if we can call the function. See swsci_setup for details. */
if (main_function == SWSCI_SBCB) {
- if ((i915->display.opregion.swsci_sbcb_sub_functions &
+ if ((opregion->swsci_sbcb_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
} else if (main_function == SWSCI_GBDA) {
- if ((i915->display.opregion.swsci_gbda_sub_functions &
+ if ((opregion->swsci_gbda_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
}
@@ -280,7 +305,7 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
static int swsci(struct drm_i915_private *dev_priv,
u32 function, u32 parm, u32 *parm_out)
{
- struct opregion_swsci *swsci = dev_priv->display.opregion.swsci;
+ struct opregion_swsci *swsci;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 scic, dslp;
u16 swsci_val;
@@ -290,6 +315,8 @@ static int swsci(struct drm_i915_private *dev_priv,
if (ret)
return ret;
+ swsci = dev_priv->display.opregion->swsci;
+
/* Driver sleep timeout in ms. */
dslp = swsci->dslp;
if (!dslp) {
@@ -462,7 +489,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- struct opregion_asle *asle = dev_priv->display.opregion.asle;
+ struct opregion_asle *asle = dev_priv->display.opregion->asle;
drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
@@ -584,9 +611,8 @@ static void asle_work(struct work_struct *work)
{
struct intel_opregion *opregion =
container_of(work, struct intel_opregion, asle_work);
- struct drm_i915_private *dev_priv =
- container_of(opregion, struct drm_i915_private, display.opregion);
- struct opregion_asle *asle = dev_priv->display.opregion.asle;
+ struct drm_i915_private *dev_priv = opregion->i915;
+ struct opregion_asle *asle = opregion->asle;
u32 aslc_stat = 0;
u32 aslc_req;
@@ -632,11 +658,17 @@ static void asle_work(struct work_struct *work)
asle->aslc = aslc_stat;
}
-void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+bool intel_opregion_asle_present(struct drm_i915_private *i915)
+{
+ return i915->display.opregion && i915->display.opregion->asle;
+}
+
+void intel_opregion_asle_intr(struct drm_i915_private *i915)
{
- if (dev_priv->display.opregion.asle)
- queue_work(dev_priv->unordered_wq,
- &dev_priv->display.opregion.asle_work);
+ struct intel_opregion *opregion = i915->display.opregion;
+
+ if (opregion && opregion->asle)
+ queue_work(i915->unordered_wq, &opregion->asle_work);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -692,7 +724,7 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->display.opregion;
+ struct intel_opregion *opregion = dev_priv->display.opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0, max_outputs;
@@ -731,7 +763,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->display.opregion;
+ struct intel_opregion *opregion = dev_priv->display.opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0;
@@ -761,7 +793,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
static void swsci_setup(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->display.opregion;
+ struct intel_opregion *opregion = dev_priv->display.opregion;
bool requested_callbacks = false;
u32 tmp;
@@ -839,7 +871,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->display.opregion;
+ struct intel_opregion *opregion = dev_priv->display.opregion;
const struct firmware *fw = NULL;
const char *name = dev_priv->display.params.vbt_firmware;
int ret;
@@ -855,7 +887,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
return ret;
}
- if (intel_bios_is_valid_vbt(fw->data, fw->size)) {
+ if (intel_bios_is_valid_vbt(dev_priv, fw->data, fw->size)) {
opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (opregion->vbt_firmware) {
drm_dbg_kms(&dev_priv->drm,
@@ -879,7 +911,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->display.opregion;
+ struct intel_opregion *opregion;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
@@ -902,11 +934,20 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
return -ENOTSUPP;
}
+ opregion = kzalloc(sizeof(*opregion), GFP_KERNEL);
+ if (!opregion)
+ return -ENOMEM;
+
+ opregion->i915 = dev_priv;
+ dev_priv->display.opregion = opregion;
+
INIT_WORK(&opregion->asle_work, asle_work);
base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
- if (!base)
- return -ENOMEM;
+ if (!base) {
+ err = -ENOMEM;
+ goto err_memremap;
+ }
memcpy(buf, base, sizeof(buf));
@@ -916,7 +957,6 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
goto err_out;
}
opregion->header = base;
- opregion->lid_state = base + ACPI_CLID;
drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n",
opregion->header->over.major,
@@ -994,7 +1034,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
vbt = opregion->rvda;
vbt_size = opregion->asle->rvds;
- if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
+ if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) {
drm_dbg_kms(&dev_priv->drm,
"Found valid VBT in ACPI OpRegion (RVDA)\n");
opregion->vbt = vbt;
@@ -1019,7 +1059,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
vbt_size = (mboxes & MBOX_ASLE_EXT) ?
OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
vbt_size -= OPREGION_VBT_OFFSET;
- if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
+ if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) {
drm_dbg_kms(&dev_priv->drm,
"Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
opregion->vbt = vbt;
@@ -1034,6 +1074,10 @@ out:
err_out:
memunmap(base);
+err_memremap:
+ kfree(opregion);
+ dev_priv->display.opregion = NULL;
+
return err;
}
@@ -1106,12 +1150,12 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
{
struct drm_connector *connector = &intel_connector->base;
struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
const struct drm_edid *drm_edid;
const void *edid;
int len;
- if (!opregion->asle_ext)
+ if (!opregion || !opregion->asle_ext)
return NULL;
edid = opregion->asle_ext->bddc;
@@ -1132,10 +1176,28 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
return drm_edid;
}
+const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
+{
+ struct intel_opregion *opregion = i915->display.opregion;
+
+ if (!opregion || !opregion->vbt)
+ return NULL;
+
+ if (size)
+ *size = opregion->vbt_size;
+
+ return opregion->vbt;
+}
+
bool intel_opregion_headless_sku(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
- struct opregion_header *header = opregion->header;
+ struct intel_opregion *opregion = i915->display.opregion;
+ struct opregion_header *header;
+
+ if (!opregion)
+ return false;
+
+ header = opregion->header;
if (!header || header->over.major < 2 ||
(header->over.major == 2 && header->over.minor < 3))
@@ -1146,9 +1208,9 @@ bool intel_opregion_headless_sku(struct drm_i915_private *i915)
void intel_opregion_register(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
- if (!opregion->header)
+ if (!opregion)
return;
if (opregion->acpi) {
@@ -1162,7 +1224,7 @@ void intel_opregion_register(struct drm_i915_private *i915)
static void intel_opregion_resume_display(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
if (opregion->acpi) {
intel_didl_outputs(i915);
@@ -1188,9 +1250,9 @@ static void intel_opregion_resume_display(struct drm_i915_private *i915)
void intel_opregion_resume(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
- if (!opregion->header)
+ if (!opregion)
return;
if (HAS_DISPLAY(i915))
@@ -1201,12 +1263,12 @@ void intel_opregion_resume(struct drm_i915_private *i915)
static void intel_opregion_suspend_display(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
if (opregion->asle)
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
- cancel_work_sync(&i915->display.opregion.asle_work);
+ cancel_work_sync(&opregion->asle_work);
if (opregion->acpi)
opregion->acpi->drdy = 0;
@@ -1214,9 +1276,9 @@ static void intel_opregion_suspend_display(struct drm_i915_private *i915)
void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
- if (!opregion->header)
+ if (!opregion)
return;
intel_opregion_notify_adapter(i915, state);
@@ -1227,11 +1289,11 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
void intel_opregion_unregister(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
intel_opregion_suspend(i915, PCI_D1);
- if (!opregion->header)
+ if (!opregion)
return;
if (opregion->acpi_notifier.notifier_call) {
@@ -1242,26 +1304,36 @@ void intel_opregion_unregister(struct drm_i915_private *i915)
void intel_opregion_cleanup(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->display.opregion;
+ struct intel_opregion *opregion = i915->display.opregion;
- if (!opregion->header)
+ if (!opregion)
return;
- /* just clear all opregion memory pointers now */
memunmap(opregion->header);
- if (opregion->rvda) {
+ if (opregion->rvda)
memunmap(opregion->rvda);
- opregion->rvda = NULL;
- }
- if (opregion->vbt_firmware) {
- kfree(opregion->vbt_firmware);
- opregion->vbt_firmware = NULL;
- }
- opregion->header = NULL;
- opregion->acpi = NULL;
- opregion->swsci = NULL;
- opregion->asle = NULL;
- opregion->asle_ext = NULL;
- opregion->vbt = NULL;
- opregion->lid_state = NULL;
+ kfree(opregion->vbt_firmware);
+ kfree(opregion);
+ i915->display.opregion = NULL;
+}
+
+static int intel_opregion_show(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = m->private;
+ struct intel_opregion *opregion = i915->display.opregion;
+
+ if (opregion)
+ seq_write(m, opregion->header, OPREGION_SIZE);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_opregion);
+
+void intel_opregion_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_opregion", 0444, minor->debugfs_root,
+ i915, &intel_opregion_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index fd2ea8ef0fa2..0bec224f711f 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -25,38 +25,13 @@
#ifndef _INTEL_OPREGION_H_
#define _INTEL_OPREGION_H_
-#include <linux/workqueue.h>
#include <linux/pci.h>
+#include <linux/types.h>
struct drm_i915_private;
struct intel_connector;
struct intel_encoder;
-struct opregion_header;
-struct opregion_acpi;
-struct opregion_swsci;
-struct opregion_asle;
-struct opregion_asle_ext;
-
-struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
- struct opregion_swsci *swsci;
- u32 swsci_gbda_sub_functions;
- u32 swsci_sbcb_sub_functions;
- struct opregion_asle *asle;
- struct opregion_asle_ext *asle_ext;
- void *rvda;
- void *vbt_firmware;
- const void *vbt;
- u32 vbt_size;
- u32 *lid_state;
- struct work_struct asle_work;
- struct notifier_block acpi_notifier;
-};
-
-#define OPREGION_SIZE (8 * 1024)
-
#ifdef CONFIG_ACPI
int intel_opregion_setup(struct drm_i915_private *dev_priv);
@@ -69,6 +44,7 @@ void intel_opregion_resume(struct drm_i915_private *dev_priv);
void intel_opregion_suspend(struct drm_i915_private *dev_priv,
pci_power_t state);
+bool intel_opregion_asle_present(struct drm_i915_private *i915);
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
@@ -77,8 +53,12 @@ int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector);
+const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size);
+
bool intel_opregion_headless_sku(struct drm_i915_private *i915);
+void intel_opregion_debugfs_register(struct drm_i915_private *i915);
+
#else /* CONFIG_ACPI*/
static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
@@ -107,6 +87,11 @@ static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
{
}
+static inline bool intel_opregion_asle_present(struct drm_i915_private *i915)
+{
+ return false;
+}
+
static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
}
@@ -134,11 +119,21 @@ intel_opregion_get_edid(struct intel_connector *connector)
return NULL;
}
+static inline const void *
+intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
+{
+ return NULL;
+}
+
static inline bool intel_opregion_headless_sku(struct drm_i915_private *i915)
{
return false;
}
+static inline void intel_opregion_debugfs_register(struct drm_i915_private *i915)
+{
+}
+
#endif /* CONFIG_ACPI */
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 0d8e5320a4f8..073ea3166c36 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -37,6 +37,7 @@
#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_lvds_regs.h"
@@ -683,6 +684,9 @@ intel_panel_detect(struct drm_connector *connector, bool force)
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(i915))
+ return connector->status;
+
return connector_status_connected;
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index a55c09cbd0e4..ada1792df5b3 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -3,9 +3,11 @@
* Copyright © 2021 Intel Corporation
*/
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "intel_atomic_plane.h"
+#include "intel_crtc.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_fb.h"
@@ -13,20 +15,21 @@
#include "intel_plane_initial.h"
static bool
-intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
- const struct intel_initial_plane_config *plane_config,
+intel_reuse_initial_plane_obj(struct intel_crtc *this,
+ const struct intel_initial_plane_config plane_configs[],
struct drm_framebuffer **fb,
struct i915_vma **vma)
{
+ struct drm_i915_private *i915 = to_i915(this->base.dev);
struct intel_crtc *crtc;
for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
- struct intel_plane_state *plane_state =
+ const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
if (!crtc_state->uapi.active)
continue;
@@ -34,7 +37,7 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
if (!plane_state->ggtt_vma)
continue;
- if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
+ if (plane_configs[this->pipe].base == plane_configs[crtc->pipe].base) {
*fb = plane_state->hw.fb;
*vma = plane_state->ggtt_vma;
return true;
@@ -44,12 +47,100 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
return false;
}
+static bool
+initial_plane_phys_lmem(struct drm_i915_private *i915,
+ struct intel_initial_plane_config *plane_config)
+{
+ gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
+ struct intel_memory_region *mem;
+ dma_addr_t dma_addr;
+ gen8_pte_t pte;
+ u32 base;
+
+ base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
+
+ gte += base / I915_GTT_PAGE_SIZE;
+
+ pte = ioread64(gte);
+ if (!(pte & GEN12_GGTT_PTE_LM)) {
+ drm_err(&i915->drm,
+ "Initial plane programming missing PTE_LM bit\n");
+ return false;
+ }
+
+ dma_addr = pte & GEN12_GGTT_PTE_ADDR_MASK;
+
+ if (IS_DGFX(i915))
+ mem = i915->mm.regions[INTEL_REGION_LMEM_0];
+ else
+ mem = i915->mm.stolen_region;
+ if (!mem) {
+ drm_dbg_kms(&i915->drm,
+ "Initial plane memory region not initialized\n");
+ return false;
+ }
+
+ /*
+ * On lmem we don't currently expect this to
+ * ever be placed in the stolen portion.
+ */
+ if (dma_addr < mem->region.start || dma_addr > mem->region.end) {
+ drm_err(&i915->drm,
+ "Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n",
+ &dma_addr, mem->region.name, &mem->region.start, &mem->region.end);
+ return false;
+ }
+
+ drm_dbg(&i915->drm,
+ "Using dma_addr=%pa, based on initial plane programming\n",
+ &dma_addr);
+
+ plane_config->phys_base = dma_addr - mem->region.start;
+ plane_config->mem = mem;
+
+ return true;
+}
+
+static bool
+initial_plane_phys_smem(struct drm_i915_private *i915,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct intel_memory_region *mem;
+ u32 base;
+
+ base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
+
+ mem = i915->mm.stolen_region;
+ if (!mem) {
+ drm_dbg_kms(&i915->drm,
+ "Initial plane memory region not initialized\n");
+ return false;
+ }
+
+ /* FIXME get and validate the dma_addr from the PTE */
+ plane_config->phys_base = base;
+ plane_config->mem = mem;
+
+ return true;
+}
+
+static bool
+initial_plane_phys(struct drm_i915_private *i915,
+ struct intel_initial_plane_config *plane_config)
+{
+ if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
+ return initial_plane_phys_lmem(i915, plane_config);
+ else
+ return initial_plane_phys_smem(i915, plane_config);
+}
+
static struct i915_vma *
initial_plane_vma(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
struct intel_memory_region *mem;
struct drm_i915_gem_object *obj;
+ struct drm_mm_node orig_mm = {};
struct i915_vma *vma;
resource_size_t phys_base;
u32 base, size;
@@ -58,45 +149,13 @@ initial_plane_vma(struct drm_i915_private *i915,
if (plane_config->size == 0)
return NULL;
- base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
- if (IS_DGFX(i915)) {
- gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
- gen8_pte_t pte;
-
- gte += base / I915_GTT_PAGE_SIZE;
-
- pte = ioread64(gte);
- if (!(pte & GEN12_GGTT_PTE_LM)) {
- drm_err(&i915->drm,
- "Initial plane programming missing PTE_LM bit\n");
- return NULL;
- }
-
- phys_base = pte & I915_GTT_PAGE_MASK;
- mem = i915->mm.regions[INTEL_REGION_LMEM_0];
-
- /*
- * We don't currently expect this to ever be placed in the
- * stolen portion.
- */
- if (phys_base >= resource_size(&mem->region)) {
- drm_err(&i915->drm,
- "Initial plane programming using invalid range, phys_base=%pa\n",
- &phys_base);
- return NULL;
- }
-
- drm_dbg(&i915->drm,
- "Using phys_base=%pa, based on initial plane programming\n",
- &phys_base);
- } else {
- phys_base = base;
- mem = i915->mm.stolen_region;
- }
-
- if (!mem)
+ if (!initial_plane_phys(i915, plane_config))
return NULL;
+ phys_base = plane_config->phys_base;
+ mem = plane_config->mem;
+
+ base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
size = round_up(plane_config->base + plane_config->size,
mem->min_page_size);
size -= base;
@@ -108,14 +167,19 @@ initial_plane_vma(struct drm_i915_private *i915,
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
mem == i915->mm.stolen_region &&
- size * 2 > i915->dsm.usable_size)
+ size * 2 > i915->dsm.usable_size) {
+ drm_dbg_kms(&i915->drm, "Initial FB size exceeds half of stolen, discarding\n");
return NULL;
+ }
obj = i915_gem_object_create_region_at(mem, phys_base, size,
I915_BO_ALLOC_USER |
I915_BO_PREALLOC);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ drm_dbg_kms(&i915->drm, "Failed to preallocate initial FB in %s\n",
+ mem->region.name);
return NULL;
+ }
/*
* Mark it WT ahead of time to avoid changing the
@@ -139,23 +203,66 @@ initial_plane_vma(struct drm_i915_private *i915,
goto err_obj;
}
+ /*
+ * MTL GOP likes to place the framebuffer high up in ggtt,
+ * which can cause problems for ggtt_reserve_guc_top().
+ * Try to pin it to a low ggtt address instead to avoid that.
+ */
+ base = 0;
+
+ if (base != plane_config->base) {
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+ int ret;
+
+ /*
+ * Make sure the original and new locations
+ * can't overlap. That would corrupt the original
+ * PTEs which are still being used for scanout.
+ */
+ ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &orig_mm,
+ size, plane_config->base,
+ I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
+ if (ret)
+ goto err_obj;
+ }
+
vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
if (IS_ERR(vma))
goto err_obj;
+retry:
pinctl = PIN_GLOBAL | PIN_OFFSET_FIXED | base;
- if (HAS_GMCH(i915))
+ if (!i915_gem_object_is_lmem(obj))
pinctl |= PIN_MAPPABLE;
- if (i915_vma_pin(vma, 0, 0, pinctl))
+ if (i915_vma_pin(vma, 0, 0, pinctl)) {
+ if (drm_mm_node_allocated(&orig_mm)) {
+ drm_mm_remove_node(&orig_mm);
+ /*
+ * Try again, but this time pin
+ * it to its original location.
+ */
+ base = plane_config->base;
+ goto retry;
+ }
goto err_obj;
+ }
if (i915_gem_object_is_tiled(obj) &&
!i915_vma_is_map_and_fenceable(vma))
goto err_obj;
+ if (drm_mm_node_allocated(&orig_mm))
+ drm_mm_remove_node(&orig_mm);
+
+ drm_dbg_kms(&i915->drm,
+ "Initial plane fb bound to 0x%x in the ggtt (original 0x%x)\n",
+ i915_ggtt_offset(vma), plane_config->base);
+
return vma;
err_obj:
+ if (drm_mm_node_allocated(&orig_mm))
+ drm_mm_remove_node(&orig_mm);
i915_gem_object_put(obj);
return NULL;
}
@@ -210,10 +317,11 @@ err_vma:
static void
intel_find_initial_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
+ struct intel_initial_plane_config plane_configs[])
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_initial_plane_config *plane_config =
+ &plane_configs[crtc->pipe];
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
@@ -239,7 +347,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
* Failed to alloc the obj, check to see if we should share
* an fb with another CRTC instead
*/
- if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
+ if (intel_reuse_initial_plane_obj(crtc, plane_configs, &fb, &vma))
goto valid_fb;
/*
@@ -302,25 +410,36 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
i915_vma_put(plane_config->vma);
}
-void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
+void intel_initial_plane_config(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_initial_plane_config plane_config = {};
+ struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
- /*
- * Note that reserving the BIOS fb up front prevents us
- * from stuffing other stolen allocations like the ring
- * on top. This prevents some ugliness at boot time, and
- * can even allow for smooth boot transitions if the BIOS
- * fb is large enough for the active pipe configuration.
- */
- dev_priv->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_initial_plane_config *plane_config =
+ &plane_configs[crtc->pipe];
- /*
- * If the fb is shared between multiple heads, we'll
- * just get the first one.
- */
- intel_find_initial_plane_obj(crtc, &plane_config);
+ if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
+ continue;
+
+ /*
+ * Note that reserving the BIOS fb up front prevents us
+ * from stuffing other stolen allocations like the ring
+ * on top. This prevents some ugliness at boot time, and
+ * can even allow for smooth boot transitions if the BIOS
+ * fb is large enough for the active pipe configuration.
+ */
+ i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
- plane_config_fini(&plane_config);
+ /*
+ * If the fb is shared between multiple heads, we'll
+ * just get the first one.
+ */
+ intel_find_initial_plane_obj(crtc, plane_configs);
+
+ if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ plane_config_fini(plane_config);
+ }
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.h b/drivers/gpu/drm/i915/display/intel_plane_initial.h
index c7e35ab3182b..64ab95239cd4 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.h
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.h
@@ -6,8 +6,8 @@
#ifndef __INTEL_PLANE_INITIAL_H__
#define __INTEL_PLANE_INITIAL_H__
-struct intel_crtc;
+struct drm_i915_private;
-void intel_crtc_initial_plane_config(struct intel_crtc *crtc);
+void intel_initial_plane_config(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index a8fa3a20990e..2d65a538f83e 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -366,7 +366,7 @@ static bool intel_pps_is_valid(struct intel_dp *intel_dp)
if (intel_dp->pps.pps_idx == 1 &&
INTEL_PCH_TYPE(i915) >= PCH_ICP &&
- INTEL_PCH_TYPE(i915) < PCH_MTP)
+ INTEL_PCH_TYPE(i915) <= PCH_ADP)
return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 57bbf3e3af92..6927785fd6ff 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -173,6 +173,12 @@
* irrelevant for normal operation.
*/
+#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
+ (intel_dp)->psr.source_support)
+
+#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
+ (intel_dp)->psr.source_panel_replay_support)
+
bool intel_encoder_can_psr(struct intel_encoder *encoder)
{
if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
@@ -528,7 +534,7 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
intel_dp_get_sink_sync_latency(intel_dp);
if (DISPLAY_VER(i915) >= 9 &&
- intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
+ intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
bool y_req = intel_dp->psr_dpcd[1] &
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
bool alpm = intel_dp_get_alpm_status(intel_dp);
@@ -560,11 +566,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
if (intel_dp->psr_dpcd[0])
_psr_init_dpcd(intel_dp);
- if (intel_dp->psr.sink_psr2_support) {
- intel_dp->psr.colorimetry_support =
- intel_dp_get_colorimetry_status(intel_dp);
+ if (intel_dp->psr.sink_psr2_support)
intel_dp_get_su_granularity(intel_dp);
- }
}
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
@@ -604,6 +607,18 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
aux_ctl);
}
+static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (DISPLAY_VER(i915) >= 20 &&
+ intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
+ !(intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE))
+ return true;
+
+ return false;
+}
+
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -619,6 +634,8 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
+ if (psr2_su_region_et_valid(intel_dp))
+ dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
} else {
if (intel_dp->psr.link_standby)
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
@@ -762,8 +779,8 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
static int psr2_block_count_lines(struct intel_dp *intel_dp)
{
- return intel_dp->psr.io_wake_lines < 9 &&
- intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
+ return intel_dp->psr.alpm_parameters.io_wake_lines < 9 &&
+ intel_dp->psr.alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
}
static int psr2_block_count(struct intel_dp *intel_dp)
@@ -800,6 +817,7 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_psr *psr = &intel_dp->psr;
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
u32 psr_val = 0;
@@ -841,17 +859,18 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
*/
int tmp;
- tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
+ tmp = map[psr->alpm_parameters.io_wake_lines -
+ TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
- tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = map[psr->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
} else if (DISPLAY_VER(dev_priv) >= 12) {
- val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
- val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
+ val |= TGL_EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
} else if (DISPLAY_VER(dev_priv) >= 9) {
- val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
- val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
+ val |= EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
+ val |= EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
}
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
@@ -869,6 +888,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
}
+ if (psr2_su_region_et_valid(intel_dp))
+ val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
+
/*
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
@@ -1031,6 +1053,9 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (psr2_su_region_et_valid(intel_dp))
+ crtc_state->enable_psr2_su_region_et = true;
+
return crtc_state->enable_psr2_sel_fetch = true;
}
@@ -1101,10 +1126,34 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
}
-static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
+static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int check_entry_lines;
+
+ if (DISPLAY_VER(i915) < 20)
+ return true;
+
+ /* ALPM Entry Check = 2 + CEILING( 5us /tline ) */
+ check_entry_lines = 2 +
+ intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 5);
+
+ if (check_entry_lines > 15)
+ return false;
+
+ if (i915->display.params.psr_safest_params)
+ check_entry_lines = 15;
+
+ intel_dp->psr.alpm_parameters.check_entry_lines = check_entry_lines;
+
+ return true;
+}
+
+static bool _compute_alpm_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
u8 max_wake_lines;
@@ -1115,6 +1164,8 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
* it is not enough -> use 45 us.
*/
fast_wake_time = 45;
+
+ /* TODO: Check how we can use ALPM_CTL fast wake extended field */
max_wake_lines = 12;
} else {
io_wake_time = 50;
@@ -1131,12 +1182,15 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
fast_wake_lines > max_wake_lines)
return false;
+ if (!_lnl_compute_alpm_params(intel_dp, crtc_state))
+ return false;
+
if (i915->display.params.psr_safest_params)
io_wake_lines = fast_wake_lines = max_wake_lines;
/* According to Bspec lower limit should be set as 7 lines. */
- intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
- intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
+ intel_dp->psr.alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
+ intel_dp->psr.alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
return true;
}
@@ -1268,7 +1322,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
+ if (!_compute_alpm_params(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, Unable to use long enough wake times\n");
return false;
@@ -1377,10 +1431,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
-
- crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
- intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
- &crtc_state->psr_vsc);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1504,6 +1554,21 @@ static void wm_optimization_wa(struct intel_dp *intel_dp,
wa_16013835468_bit_get(intel_dp), 0);
}
+static void lnl_alpm_configure(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ struct intel_psr *psr = &intel_dp->psr;
+
+ if (DISPLAY_VER(dev_priv) < 20)
+ return;
+
+ intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder),
+ ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
+ ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines) |
+ ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines));
+}
+
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -1569,6 +1634,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
+ lnl_alpm_configure(intel_dp);
+
/*
* Wa_16013835468
* Wa_14015648006
@@ -1634,7 +1701,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
- struct intel_encoder *encoder = &dig_port->base;
u32 val;
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
@@ -1662,7 +1728,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
intel_dp->psr.psr2_enabled ? "2" : "1");
- intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
intel_psr_enable_sink(intel_dp);
intel_psr_enable_source(intel_dp, crtc_state);
@@ -1951,7 +2016,7 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
- struct drm_rect *clip, bool full_update)
+ bool full_update)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1966,17 +2031,21 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
goto exit;
}
- if (clip->y1 == -1)
+ if (crtc_state->psr2_su_area.y1 == -1)
goto exit;
if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
- val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
- val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
+ val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
+ val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
} else {
- drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
+ drm_WARN_ON(crtc_state->uapi.crtc->dev,
+ crtc_state->psr2_su_area.y1 % 4 ||
+ crtc_state->psr2_su_area.y2 % 4);
- val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
- val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
+ val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
+ crtc_state->psr2_su_area.y1 / 4 + 1);
+ val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
+ crtc_state->psr2_su_area.y2 / 4 + 1);
}
exit:
crtc_state->psr2_man_track_ctl = val;
@@ -2002,8 +2071,7 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
overlap_damage_area->y2 = damage_area->y2;
}
-static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
- struct drm_rect *pipe_clip)
+static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
@@ -2016,9 +2084,32 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c
else
y_alignment = crtc_state->su_y_granularity;
- pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
- if (pipe_clip->y2 % y_alignment)
- pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
+ crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
+ if (crtc_state->psr2_su_area.y2 % y_alignment)
+ crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
+ y_alignment) + 1) * y_alignment;
+}
+
+/*
+ * When early transport is in use we need to extend SU area to cover
+ * cursor fully when cursor is in SU area.
+ */
+static void
+intel_psr2_sel_fetch_et_alignment(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *cursor_state)
+{
+ struct drm_rect inter;
+
+ if (!crtc_state->enable_psr2_su_region_et ||
+ !cursor_state->uapi.visible)
+ return;
+
+ inter = crtc_state->psr2_su_area;
+ if (!drm_rect_intersect(&inter, &cursor_state->uapi.dst))
+ return;
+
+ clip_area_update(&crtc_state->psr2_su_area, &cursor_state->uapi.dst,
+ &crtc_state->pipe_src);
}
/*
@@ -2061,8 +2152,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
- struct intel_plane_state *new_plane_state, *old_plane_state;
+ struct intel_plane_state *new_plane_state, *old_plane_state,
+ *cursor_plane_state = NULL;
struct intel_plane *plane;
bool full_update = false;
int i, ret;
@@ -2075,6 +2166,11 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
goto skip_sel_fetch_set_loop;
}
+ crtc_state->psr2_su_area.x1 = 0;
+ crtc_state->psr2_su_area.y1 = -1;
+ crtc_state->psr2_su_area.x2 = INT_MAX;
+ crtc_state->psr2_su_area.y2 = -1;
+
/*
* Calculate minimal selective fetch area of each plane and calculate
* the pipe damaged area.
@@ -2109,14 +2205,14 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (old_plane_state->uapi.visible) {
damaged_area.y1 = old_plane_state->uapi.dst.y1;
damaged_area.y2 = old_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area,
+ clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
&crtc_state->pipe_src);
}
if (new_plane_state->uapi.visible) {
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area,
+ clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
&crtc_state->pipe_src);
}
continue;
@@ -2124,7 +2220,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
/* If alpha changed mark the whole plane area as damaged */
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area,
+ clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
&crtc_state->pipe_src);
continue;
}
@@ -2141,7 +2237,14 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
- clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
+ clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
+
+ /*
+ * Cursor plane new state is stored to adjust su area to cover
+ * cursor are fully.
+ */
+ if (plane->id == PLANE_CURSOR)
+ cursor_plane_state = new_plane_state;
}
/*
@@ -2150,7 +2253,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
* should identify cases where this happens and fix the area
* calculation for those.
*/
- if (pipe_clip.y1 == -1) {
+ if (crtc_state->psr2_su_area.y1 == -1) {
drm_info_once(&dev_priv->drm,
"Selective fetch area calculation failed in pipe %c\n",
pipe_name(crtc->pipe));
@@ -2164,13 +2267,17 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
crtc_state->splitter.enable)
- pipe_clip.y1 = 0;
+ crtc_state->psr2_su_area.y1 = 0;
ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
if (ret)
return ret;
- intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
+ /* Adjust su area to cover cursor fully as necessary */
+ if (cursor_plane_state)
+ intel_psr2_sel_fetch_et_alignment(crtc_state, cursor_plane_state);
+
+ intel_psr2_sel_fetch_pipe_alignment(crtc_state);
/*
* Now that we have the pipe damaged area check if it intersect with
@@ -2185,7 +2292,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
!new_plane_state->uapi.visible)
continue;
- inter = pipe_clip;
+ inter = crtc_state->psr2_su_area;
sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
sel_fetch_area->y1 = -1;
@@ -2230,7 +2337,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
}
skip_sel_fetch_set_loop:
- psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
+ psr2_man_trk_ctl_calc(crtc_state, full_update);
return 0;
}
@@ -2776,9 +2883,6 @@ void intel_psr_init(struct intel_dp *intel_dp)
if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
return;
- if (!intel_dp_is_edp(intel_dp))
- intel_psr_init_dpcd(intel_dp);
-
/*
* HSW spec explicitly says PSR is tied to port A.
* BDW+ platforms have a instance of PSR registers per transcoder but
@@ -2799,6 +2903,9 @@ void intel_psr_init(struct intel_dp *intel_dp)
else
intel_dp->psr.source_support = true;
+ /* Disable early transport for now */
+ intel_dp->psr.debug |= I915_PSR_DEBUG_SU_REGION_ET_DISABLE;
+
/* Set link_standby x link_off defaults */
if (DISPLAY_VER(dev_priv) < 12)
/* For new platforms up to TGL let's respect VBT back again */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 143e0595c097..cde781df84d5 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -21,12 +21,6 @@ struct intel_encoder;
struct intel_plane;
struct intel_plane_state;
-#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
- (intel_dp)->psr.source_support)
-
-#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
- (intel_dp)->psr.source_panel_replay_support)
-
bool intel_encoder_can_psr(struct intel_encoder *encoder);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index efe4306b37e0..8427a736f639 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -159,6 +159,7 @@
#define TGL_EDP_PSR2_BLOCK_COUNT_MASK REG_BIT(28)
#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 REG_FIELD_PREP(TGL_EDP_PSR2_BLOCK_COUNT_MASK, 0)
#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 REG_FIELD_PREP(TGL_EDP_PSR2_BLOCK_COUNT_MASK, 1)
+#define LNL_EDP_PSR2_SU_REGION_ET_ENABLE REG_BIT(27)
#define EDP_Y_COORDINATE_ENABLE REG_BIT(25) /* display 10, 11 and 12 */
#define EDP_PSR2_SU_SDP_SCANLINE REG_BIT(25) /* display 13+ */
#define EDP_MAX_SU_DISABLE_TIME_MASK REG_GENMASK(24, 20)
@@ -245,6 +246,11 @@
#define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14)
#define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13)
+/* PSR2 Early transport */
+#define _PIPE_SRCSZ_ERLY_TPT_A 0x70074
+
+#define PIPE_SRCSZ_ERLY_TPT(trans) _MMIO_TRANS2(trans, _PIPE_SRCSZ_ERLY_TPT_A)
+
#define _SEL_FETCH_PLANE_BASE_1_A 0x70890
#define _SEL_FETCH_PLANE_BASE_2_A 0x708B0
#define _SEL_FETCH_PLANE_BASE_3_A 0x708D0
@@ -290,4 +296,61 @@
_SEL_FETCH_PLANE_OFFSET_1_A - \
_SEL_FETCH_PLANE_BASE_1_A)
+#define _ALPM_CTL_A 0x60950
+#define ALPM_CTL(tran) _MMIO_TRANS2(tran, _ALPM_CTL_A)
+#define ALPM_CTL_ALPM_ENABLE REG_BIT(31)
+#define ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(30)
+#define ALPM_CTL_LOBF_ENABLE REG_BIT(29)
+#define ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE REG_BIT(28)
+#define ALPM_CTL_KEEP_FEC_ENABLE_FOR_AUX_WAKE_SLEEP REG_BIT(27)
+#define ALPM_CTL_RESTORE_OCCURED REG_BIT(26)
+#define ALPM_CTL_RESTORE_TO_SLEEP REG_BIT(25)
+#define ALPM_CTL_RESTORE_TO_DEEP_SLEEP REG_BIT(24)
+#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK REG_GENMASK(23, 21)
+#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 0)
+#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_128_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 1)
+#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_256_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 2)
+#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_512_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 3)
+#define ALPM_CTL_AUX_WAKE_SLEEP_HOLD_ENABLE REG_BIT(20)
+#define ALPM_CTL_ALPM_ENTRY_CHECK_MASK REG_GENMASK(19, 16)
+#define ALPM_CTL_ALPM_ENTRY_CHECK(val) REG_FIELD_PREP(ALPM_CTL_ALPM_ENTRY_CHECK_MASK, val)
+#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK REG_GENMASK(13, 8)
+#define ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES 5
+#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME(lines) REG_FIELD_PREP(ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK, (lines) - ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES)
+#define ALPM_CTL_AUX_LESS_WAKE_TIME_MASK REG_GENMASK(5, 0)
+#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val)
+
+#define _ALPM_CTL2_A 0x60954
+#define ALPM_CTL2(tran) _MMIO_TRANS2(tran, _ALPM_CTL2_A)
+#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK REG_GENMASK(28, 24)
+#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY(val) REG_FIELD_PREP(ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK, val)
+#define ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION_MASK REG_GENMASK(19, 16)
+#define ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION(val) REG_FIELD_PREP(ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION_MASK, val)
+#define ALPM_CTL2_NUMBER_OF_LTTPR_MASK REG_GENMASK(15, 12)
+#define ALPM_CTL2_NUMBER_OF_LTTPR(val) REG_FIELD_PREP(ALPM_CTL2_NUMBER_OF_LTTPR_MASK, val)
+#define ALPM_CTL2_LTTPR_AUX_LESS_SLEEP_HOLD_TIME_MASK REG_GENMASK(10, 8)
+#define ALPM_CTL2_LTTPR_AUX_LESS_SLEEP_HOLD_TIME(val) REG_FIELD_PREP(ALPM_CTL2_LTTPR_AUX_LESS_SLEEP_HOLD_TIME_MASK, val)
+#define ALPM_CTL2_FEC_DECODE_EN_POSITION_AFTER_WAKE_SR REG_BIT(4)
+#define ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES_MASK REG_GENMASK(2, 0)
+#define ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES(val) REG_FIELD_PREP(ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES_MASK, val)
+
+#define _PORT_ALPM_CTL_A 0x16fa2c
+#define PORT_ALPM_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_CTL_A)
+#define PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(31)
+#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(23, 20)
+#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK, val)
+#define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK REG_GENMASK(19, 16)
+#define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK, val)
+#define PORT_ALPM_CTL_SILENCE_PERIOD_MASK REG_GENMASK(7, 0)
+#define PORT_ALPM_CTL_SILENCE_PERIOD(val) REG_FIELD_PREP(PORT_ALPM_CTL_SILENCE_PERIOD_MASK, val)
+
+#define _PORT_ALPM_LFPS_CTL_A 0x16fa30
+#define PORT_ALPM_LFPS_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_LFPS_CTL_A)
+#define PORT_ALPM_LFPS_CTL_LFPS_START_POLARITY REG_BIT(31)
+#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK REG_GENMASK(27, 24)
+#define ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES 5
+#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME(lines) REG_FIELD_PREP(ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK, (lines) - ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES)
+#define ALPM_CTL_AUX_LESS_WAKE_TIME_MASK REG_GENMASK(5, 0)
+#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val)
+
#endif /* __INTEL_PSR_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index acc6b6804105..5f9e748adc89 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -44,6 +44,7 @@
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
@@ -251,6 +252,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct i2c_msg msgs[] = {
{
.addr = intel_sdvo->slave_addr,
@@ -270,7 +272,7 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
return true;
- DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+ drm_dbg_kms(&i915->drm, "i2c transfer returned %d\n", ret);
return false;
}
@@ -436,7 +438,8 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
- DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
+ drm_dbg_kms(&dev_priv->drm, "%s: W: %02X %s\n", SDVO_NAME(intel_sdvo),
+ cmd, buffer);
}
static const char * const cmd_status_names[] = {
@@ -461,6 +464,7 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len,
bool unlocked)
{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
u8 *buf, status;
struct i2c_msg *msgs;
int i, ret = true;
@@ -510,13 +514,13 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
else
ret = __i2c_transfer(intel_sdvo->i2c, msgs, i+3);
if (ret < 0) {
- DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ drm_dbg_kms(&i915->drm, "I2c transfer returned %d\n", ret);
ret = false;
goto out;
}
if (ret != i+3) {
/* failure in I2C transfer */
- DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+ drm_dbg_kms(&i915->drm, "I2c transfer returned %d/%d\n", ret, i+3);
ret = false;
}
@@ -603,12 +607,13 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
- DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
+ drm_dbg_kms(&dev_priv->drm, "%s: R: %s\n",
+ SDVO_NAME(intel_sdvo), buffer);
return true;
log_fail:
- DRM_DEBUG_KMS("%s: R: ... failed %s\n",
- SDVO_NAME(intel_sdvo), buffer);
+ drm_dbg_kms(&dev_priv->drm, "%s: R: ... failed %s\n",
+ SDVO_NAME(intel_sdvo), buffer);
return false;
}
@@ -757,7 +762,7 @@ static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
}
static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_dtd *dtd)
+ struct intel_sdvo_dtd *dtd)
{
return intel_sdvo_set_timing(intel_sdvo,
SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
@@ -925,8 +930,8 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
BUILD_BUG_ON(sizeof(encode) != 2);
return intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_SUPP_ENCODE,
- &encode, sizeof(encode));
+ SDVO_CMD_GET_SUPP_ENCODE,
+ &encode, sizeof(encode));
}
static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
@@ -1003,6 +1008,7 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
unsigned int if_index, u8 tx_rate,
const u8 *data, unsigned int length)
{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
u8 set_buf_index[2] = { if_index, 0 };
u8 hbuf_size, tmp[8];
int i;
@@ -1015,8 +1021,9 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
return false;
- DRM_DEBUG_KMS("writing sdvo hbuf: %i, length %u, hbuf_size: %i\n",
- if_index, length, hbuf_size);
+ drm_dbg_kms(&i915->drm,
+ "writing sdvo hbuf: %i, length %u, hbuf_size: %i\n",
+ if_index, length, hbuf_size);
if (hbuf_size < length)
return false;
@@ -1041,6 +1048,7 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
unsigned int if_index,
u8 *data, unsigned int length)
{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
u8 set_buf_index[2] = { if_index, 0 };
u8 hbuf_size, tx_rate, av_split;
int i;
@@ -1070,8 +1078,9 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
return false;
- DRM_DEBUG_KMS("reading sdvo hbuf: %i, length %u, hbuf_size: %i\n",
- if_index, length, hbuf_size);
+ drm_dbg_kms(&i915->drm,
+ "reading sdvo hbuf: %i, length %u, hbuf_size: %i\n",
+ if_index, length, hbuf_size);
hbuf_size = min_t(unsigned int, length, hbuf_size);
@@ -1150,6 +1159,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
ssize_t len;
@@ -1161,7 +1171,7 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
sdvo_data, sizeof(sdvo_data));
if (len < 0) {
- DRM_DEBUG_KMS("failed to read AVI infoframe\n");
+ drm_dbg_kms(&i915->drm, "failed to read AVI infoframe\n");
return;
} else if (len == 0) {
return;
@@ -1172,13 +1182,14 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
ret = hdmi_infoframe_unpack(frame, sdvo_data, len);
if (ret) {
- DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n");
+ drm_dbg_kms(&i915->drm, "Failed to unpack AVI infoframe\n");
return;
}
if (frame->any.type != HDMI_INFOFRAME_TYPE_AVI)
- DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
- frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
+ drm_dbg_kms(&i915->drm,
+ "Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+ frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
}
static void intel_sdvo_get_eld(struct intel_sdvo *intel_sdvo,
@@ -1209,7 +1220,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_tv_format format;
u32 format_map;
- format_map = 1 << conn_state->tv.mode;
+ format_map = 1 << conn_state->tv.legacy_mode;
memset(&format, 0, sizeof(format));
memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
@@ -1347,6 +1358,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
@@ -1359,7 +1371,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
return -EINVAL;
}
- DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
+ drm_dbg_kms(&i915->drm, "forcing bpc to 8 for SDVO\n");
/* FIXME: Don't increase pipe_bpp */
pipe_config->pipe_bpp = 8*3;
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -1438,7 +1450,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
if (!intel_sdvo_compute_avi_infoframe(intel_sdvo,
pipe_config, conn_state)) {
- DRM_DEBUG_KMS("bad AVI infoframe\n");
+ drm_dbg_kms(&i915->drm, "bad AVI infoframe\n");
return -EINVAL;
}
@@ -1915,8 +1927,8 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
*/
if (success && !input1) {
drm_dbg_kms(&dev_priv->drm,
- "First %s output reported failure to "
- "sync\n", SDVO_NAME(intel_sdvo));
+ "First %s output reported failure to sync\n",
+ SDVO_NAME(intel_sdvo));
}
if (0)
@@ -1975,37 +1987,38 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
BUILD_BUG_ON(sizeof(*caps) != 8);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_DEVICE_CAPS,
caps, sizeof(*caps)))
return false;
- DRM_DEBUG_KMS("SDVO capabilities:\n"
- " vendor_id: %d\n"
- " device_id: %d\n"
- " device_rev_id: %d\n"
- " sdvo_version_major: %d\n"
- " sdvo_version_minor: %d\n"
- " sdvo_num_inputs: %d\n"
- " smooth_scaling: %d\n"
- " sharp_scaling: %d\n"
- " up_scaling: %d\n"
- " down_scaling: %d\n"
- " stall_support: %d\n"
- " output_flags: %d\n",
- caps->vendor_id,
- caps->device_id,
- caps->device_rev_id,
- caps->sdvo_version_major,
- caps->sdvo_version_minor,
- caps->sdvo_num_inputs,
- caps->smooth_scaling,
- caps->sharp_scaling,
- caps->up_scaling,
- caps->down_scaling,
- caps->stall_support,
- caps->output_flags);
+ drm_dbg_kms(&i915->drm, "SDVO capabilities:\n"
+ " vendor_id: %d\n"
+ " device_id: %d\n"
+ " device_rev_id: %d\n"
+ " sdvo_version_major: %d\n"
+ " sdvo_version_minor: %d\n"
+ " sdvo_num_inputs: %d\n"
+ " smooth_scaling: %d\n"
+ " sharp_scaling: %d\n"
+ " up_scaling: %d\n"
+ " down_scaling: %d\n"
+ " stall_support: %d\n"
+ " output_flags: %d\n",
+ caps->vendor_id,
+ caps->device_id,
+ caps->device_rev_id,
+ caps->sdvo_version_major,
+ caps->sdvo_version_minor,
+ caps->sdvo_num_inputs,
+ caps->smooth_scaling,
+ caps->sharp_scaling,
+ caps->up_scaling,
+ caps->down_scaling,
+ caps->stall_support,
+ caps->output_flags);
return true;
}
@@ -2037,7 +2050,7 @@ static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
return 0;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
- &hotplug, sizeof(hotplug)))
+ &hotplug, sizeof(hotplug)))
return 0;
return hotplug;
@@ -2120,8 +2133,9 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
bool monitor_is_digital = drm_edid_is_digital(drm_edid);
bool connector_is_digital = !!IS_DIGITAL(sdvo);
- DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
- connector_is_digital, monitor_is_digital);
+ drm_dbg_kms(sdvo->base.base.dev,
+ "connector_is_digital? %d, monitor_is_digital? %d\n",
+ connector_is_digital, monitor_is_digital);
return connector_is_digital == monitor_is_digital;
}
@@ -2134,12 +2148,15 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret;
u16 response;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(i915))
+ return connector->status;
+
if (!intel_sdvo_set_target_output(intel_sdvo,
intel_sdvo_connector->output_flag))
return connector_status_unknown;
@@ -2149,9 +2166,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
&response, 2))
return connector_status_unknown;
- DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
- response & 0xff, response >> 8,
- intel_sdvo_connector->output_flag);
+ drm_dbg_kms(&i915->drm, "SDVO response %d %d [%x]\n",
+ response & 0xff, response >> 8,
+ intel_sdvo_connector->output_flag);
if (response == 0)
return connector_status_disconnected;
@@ -2185,11 +2202,15 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
static int intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
int num_modes = 0;
const struct drm_edid *drm_edid;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ if (!intel_display_driver_check_access(i915))
+ return drm_edid_connector_add_modes(connector);
/* set the bus switch and get the modes */
drm_edid = intel_sdvo_get_edid(connector);
@@ -2283,6 +2304,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
const struct drm_connector_state *conn_state = connector->state;
@@ -2291,14 +2313,17 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
int num_modes = 0;
int i;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ if (!intel_display_driver_check_access(i915))
+ return 0;
/*
* Read the list of supported input resolutions for the selected TV
* format.
*/
- format_map = 1 << conn_state->tv.mode;
+ format_map = 1 << conn_state->tv.legacy_mode;
memcpy(&tv_res, &format_map,
min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
@@ -2363,7 +2388,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
int i;
for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
- if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
+ if (state->tv.legacy_mode == intel_sdvo_connector->tv_format_supported[i]) {
*val = i;
return 0;
@@ -2419,7 +2444,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
if (property == intel_sdvo_connector->tv_format) {
- state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
+ state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[val];
if (state->crtc) {
struct drm_crtc_state *crtc_state =
@@ -2779,10 +2804,11 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- DRM_DEBUG_KMS("initialising DVI type 0x%x\n", type);
+ drm_dbg_kms(&i915->drm, "initialising DVI type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@@ -2793,7 +2819,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
if (intel_sdvo_get_hotplug_support(intel_sdvo) &
- intel_sdvo_connector->output_flag) {
+ intel_sdvo_connector->output_flag) {
intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
/*
* Some SDVO devices have one-shot hotplug interrupts.
@@ -2805,6 +2831,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
} else {
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
}
+ intel_connector->base.polled = intel_connector->polled;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
@@ -2827,12 +2854,13 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
static bool
intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, u16 type)
{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- DRM_DEBUG_KMS("initialising TV type 0x%x\n", type);
+ drm_dbg_kms(&i915->drm, "initialising TV type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@@ -2866,12 +2894,13 @@ err:
static bool
intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type)
{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- DRM_DEBUG_KMS("initialising analog type 0x%x\n", type);
+ drm_dbg_kms(&i915->drm, "initialising analog type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@@ -2880,6 +2909,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ intel_connector->base.polled = intel_connector->polled;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
@@ -2902,7 +2932,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- DRM_DEBUG_KMS("initialising LVDS type 0x%x\n", type);
+ drm_dbg_kms(&i915->drm, "initialising LVDS type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@@ -2986,6 +3016,7 @@ static bool intel_sdvo_output_init(struct intel_sdvo *sdvo, u16 type)
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo)
{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
static const u16 probe_order[] = {
SDVO_OUTPUT_TMDS0,
SDVO_OUTPUT_TMDS1,
@@ -3004,8 +3035,9 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo)
flags = intel_sdvo_filter_output_flags(intel_sdvo->caps.output_flags);
if (flags == 0) {
- DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%04x)\n",
- SDVO_NAME(intel_sdvo), intel_sdvo->caps.output_flags);
+ drm_dbg_kms(&i915->drm,
+ "%s: Unknown SDVO output type (0x%04x)\n",
+ SDVO_NAME(intel_sdvo), intel_sdvo->caps.output_flags);
return false;
}
@@ -3067,8 +3099,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->tv_format =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "mode", intel_sdvo_connector->format_supported_num);
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", intel_sdvo_connector->format_supported_num);
if (!intel_sdvo_connector->tv_format)
return false;
@@ -3076,7 +3108,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
drm_property_add_enum(intel_sdvo_connector->tv_format, i,
tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
- intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
+ intel_sdvo_connector->base.base.state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[0];
drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
return true;
@@ -3094,8 +3126,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
state_assignment = response; \
drm_object_attach_property(&connector->base, \
intel_sdvo_connector->name, 0); \
- DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
- data_value[0], data_value[1], response); \
+ drm_dbg_kms(dev, #name ": max %d, default %d, current %d\n", \
+ data_value[0], data_value[1], response); \
} \
} while (0)
@@ -3106,6 +3138,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
struct intel_sdvo_enhancements_reply enhancements)
{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_connector *connector = &intel_sdvo_connector->base.base;
struct drm_connector_state *conn_state = connector->state;
@@ -3142,10 +3175,9 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
return false;
drm_object_attach_property(&connector->base,
- intel_sdvo_connector->right, 0);
- DRM_DEBUG_KMS("h_overscan: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
+ intel_sdvo_connector->right, 0);
+ drm_dbg_kms(&i915->drm, "h_overscan: max %d, default %d, current %d\n",
+ data_value[0], data_value[1], response);
}
if (enhancements.overscan_v) {
@@ -3164,7 +3196,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->max_vscan = data_value[0];
intel_sdvo_connector->top =
drm_property_create_range(dev, 0,
- "top_margin", 0, data_value[0]);
+ "top_margin", 0, data_value[0]);
if (!intel_sdvo_connector->top)
return false;
@@ -3173,15 +3205,14 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->bottom =
drm_property_create_range(dev, 0,
- "bottom_margin", 0, data_value[0]);
+ "bottom_margin", 0, data_value[0]);
if (!intel_sdvo_connector->bottom)
return false;
drm_object_attach_property(&connector->base,
- intel_sdvo_connector->bottom, 0);
- DRM_DEBUG_KMS("v_overscan: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
+ intel_sdvo_connector->bottom, 0);
+ drm_dbg_kms(&i915->drm, "v_overscan: max %d, default %d, current %d\n",
+ data_value[0], data_value[1], response);
}
ENHANCEMENT(&sdvo_state->tv, hpos, HPOS);
@@ -3209,7 +3240,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->dot_crawl, 0);
- DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+ drm_dbg_kms(&i915->drm, "dot crawl: current %d\n", response);
}
return true;
@@ -3234,6 +3265,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector)
{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
union {
struct intel_sdvo_enhancements_reply reply;
u16 response;
@@ -3245,7 +3277,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
&enhancements, sizeof(enhancements)) ||
enhancements.response == 0) {
- DRM_DEBUG_KMS("No enhancement is supported\n");
+ drm_dbg_kms(&i915->drm, "No enhancement is supported\n");
return true;
}
@@ -3465,23 +3497,23 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
goto err_output;
drm_dbg_kms(&dev_priv->drm, "%s device VID/DID: %02X:%02X.%02X, "
- "clock range %dMHz - %dMHz, "
- "num inputs: %d, "
- "output 1: %c, output 2: %c\n",
- SDVO_NAME(intel_sdvo),
- intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
- intel_sdvo->caps.device_rev_id,
- intel_sdvo->pixel_clock_min / 1000,
- intel_sdvo->pixel_clock_max / 1000,
- intel_sdvo->caps.sdvo_num_inputs,
- /* check currently supported outputs */
- intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0 |
- SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_SVID0 |
- SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB0) ? 'Y' : 'N',
- intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1 |
- SDVO_OUTPUT_LVDS1) ? 'Y' : 'N');
+ "clock range %dMHz - %dMHz, "
+ "num inputs: %d, "
+ "output 1: %c, output 2: %c\n",
+ SDVO_NAME(intel_sdvo),
+ intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
+ intel_sdvo->caps.device_rev_id,
+ intel_sdvo->pixel_clock_min / 1000,
+ intel_sdvo->pixel_clock_max / 1000,
+ intel_sdvo->caps.sdvo_num_inputs,
+ /* check currently supported outputs */
+ intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0 |
+ SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_SVID0 |
+ SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB0) ? 'Y' : 'N',
+ intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1 |
+ SDVO_OUTPUT_LVDS1) ? 'Y' : 'N');
return true;
err_output:
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index dcf05e00e505..6b374d481cd9 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -122,6 +122,15 @@ bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
}
+bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+
+ return intel_phy_is_tc(i915, phy) && !tc->legacy_port;
+}
+
/*
* The display power domains used for TC ports depending on the
* platform and TC mode (legacy, DP-alt, TBT):
@@ -986,10 +995,11 @@ xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
{
struct drm_i915_private *i915 = tc_to_i915(tc);
enum port port = tc->dig_port->base.port;
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TCSS_POWER_STATE;
+ return intel_de_read(i915, reg) & XELPDP_TCSS_POWER_STATE;
}
static bool
@@ -1012,16 +1022,17 @@ static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool ena
{
struct drm_i915_private *i915 = tc_to_i915(tc);
enum port port = tc->dig_port->base.port;
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
+ val = intel_de_read(i915, reg);
if (enable)
val |= XELPDP_TCSS_POWER_REQUEST;
else
val &= ~XELPDP_TCSS_POWER_REQUEST;
- intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
+ intel_de_write(i915, reg, val);
}
static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
@@ -1055,26 +1066,28 @@ static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
{
struct drm_i915_private *i915 = tc_to_i915(tc);
enum port port = tc->dig_port->base.port;
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
+ val = intel_de_read(i915, reg);
if (take)
val |= XELPDP_TC_PHY_OWNERSHIP;
else
val &= ~XELPDP_TC_PHY_OWNERSHIP;
- intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
+ intel_de_write(i915, reg, val);
}
static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
{
struct drm_i915_private *i915 = tc_to_i915(tc);
enum port port = tc->dig_port->base.port;
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TC_PHY_OWNERSHIP;
+ return intel_de_read(i915, reg) & XELPDP_TC_PHY_OWNERSHIP;
}
static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
@@ -1590,7 +1603,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
* connected ports are usable, and avoids exposing to the users objects they
* can't really use.
*/
-bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
+bool intel_tc_port_connected(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -1605,19 +1618,6 @@ bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
return tc_phy_hpd_live_status(tc) & mask;
}
-bool intel_tc_port_connected(struct intel_encoder *encoder)
-{
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct intel_tc_port *tc = to_tc_port(dig_port);
- bool is_connected;
-
- mutex_lock(&tc->lock);
- is_connected = intel_tc_port_connected_locked(encoder);
- mutex_unlock(&tc->lock);
-
- return is_connected;
-}
-
static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
{
bool ret;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 80a61e52850e..26c4265368c1 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -15,9 +15,9 @@ struct intel_encoder;
bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port);
+bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port);
bool intel_tc_port_connected(struct intel_encoder *encoder);
-bool intel_tc_port_connected_locked(struct intel_encoder *encoder);
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index d4386cb3569e..2b77d399f1a1 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -40,6 +40,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
@@ -949,7 +950,7 @@ intel_disable_tv(struct intel_atomic_state *state,
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
{
- int format = conn_state->tv.mode;
+ int format = conn_state->tv.legacy_mode;
return &tv_modes[format];
}
@@ -1327,7 +1328,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
* the active portion. Hence following this formula seems
* more trouble that it's worth.
*
- * if (GRAPHICS_VER(dev_priv) == 4) {
+ * if (DISPLAY_VER(dev_priv) == 4) {
* num = cdclk * (tv_mode->oversample >> !tv_mode->progressive);
* den = tv_mode->clock;
* } else {
@@ -1704,7 +1705,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
break;
}
- connector->state->tv.mode = i;
+ connector->state->tv.legacy_mode = i;
}
static int
@@ -1723,6 +1724,9 @@ intel_tv_detect(struct drm_connector *connector,
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
+ if (!intel_display_driver_check_access(i915))
+ return connector->status;
+
if (force) {
struct drm_atomic_state *state;
@@ -1859,7 +1863,7 @@ static int intel_tv_atomic_check(struct drm_connector *connector,
old_state = drm_atomic_get_old_connector_state(state, connector);
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
- if (old_state->tv.mode != new_state->tv.mode ||
+ if (old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
old_state->tv.margins.left != new_state->tv.margins.left ||
old_state->tv.margins.right != new_state->tv.margins.right ||
old_state->tv.margins.top != new_state->tv.margins.top ||
@@ -1896,7 +1900,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
conn_state->tv.margins.right = 46;
conn_state->tv.margins.bottom = 37;
- conn_state->tv.mode = 0;
+ conn_state->tv.legacy_mode = 0;
/* Create TV properties then attach current values */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
@@ -1910,7 +1914,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
drm_object_attach_property(&connector->base,
i915->drm.mode_config.legacy_tv_mode_property,
- conn_state->tv.mode);
+ conn_state->tv.legacy_mode);
drm_object_attach_property(&connector->base,
i915->drm.mode_config.tv_left_margin_property,
conn_state->tv.margins.left);
@@ -1990,6 +1994,7 @@ intel_tv_init(struct drm_i915_private *dev_priv)
* More recent chipsets favour HDMI rather than integrated S-Video.
*/
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ intel_connector->base.polled = intel_connector->polled;
drm_connector_init(&dev_priv->drm, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index fe256bf7b485..baf7354cb6e2 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_vblank.h"
@@ -581,3 +582,132 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
intel_vblank_section_exit(i915);
spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
}
+
+static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+{
+ int vblank_start = mode->crtc_vblank_start;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+ return vblank_start;
+}
+
+void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state,
+ struct intel_vblank_evade_ctx *evade)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state;
+ const struct drm_display_mode *adjusted_mode;
+
+ evade->crtc = crtc;
+
+ evade->need_vlv_dsi_wa = (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
+ intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
+
+ /*
+ * During fastsets/etc. the transcoder is still
+ * running with the old timings at this point.
+ *
+ * TODO: maybe just use the active timings here?
+ */
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ crtc_state = new_crtc_state;
+ else
+ crtc_state = old_crtc_state;
+
+ adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
+ /* timing changes should happen with VRR disabled */
+ drm_WARN_ON(crtc->base.dev, intel_crtc_needs_modeset(new_crtc_state) ||
+ new_crtc_state->update_m_n || new_crtc_state->update_lrr);
+
+ if (intel_vrr_is_push_sent(crtc_state))
+ evade->vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+ else
+ evade->vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+ } else {
+ evade->vblank_start = intel_mode_vblank_start(adjusted_mode);
+ }
+
+ /* FIXME needs to be calibrated sensibly */
+ evade->min = evade->vblank_start - intel_usecs_to_scanlines(adjusted_mode,
+ VBLANK_EVASION_TIME_US);
+ evade->max = evade->vblank_start - 1;
+
+ /*
+ * M/N and TRANS_VTOTAL are double buffered on the transcoder's
+ * undelayed vblank, so with seamless M/N and LRR we must evade
+ * both vblanks.
+ *
+ * DSB execution waits for the transcoder's undelayed vblank,
+ * hence we must kick off the commit before that.
+ */
+ if (new_crtc_state->dsb || new_crtc_state->update_m_n || new_crtc_state->update_lrr)
+ evade->min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
+}
+
+/* must be called with vblank interrupt already enabled! */
+int intel_vblank_evade(struct intel_vblank_evade_ctx *evade)
+{
+ struct intel_crtc *crtc = evade->crtc;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ long timeout = msecs_to_jiffies_timeout(1);
+ wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+ DEFINE_WAIT(wait);
+ int scanline;
+
+ if (evade->min <= 0 || evade->max <= 0)
+ return 0;
+
+ for (;;) {
+ /*
+ * prepare_to_wait() has a memory barrier, which guarantees
+ * other CPUs can see the task state update by the time we
+ * read the scanline.
+ */
+ prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+
+ scanline = intel_get_crtc_scanline(crtc);
+ if (scanline < evade->min || scanline > evade->max)
+ break;
+
+ if (!timeout) {
+ drm_err(&i915->drm,
+ "Potential atomic update failure on pipe %c\n",
+ pipe_name(crtc->pipe));
+ break;
+ }
+
+ local_irq_enable();
+
+ timeout = schedule_timeout(timeout);
+
+ local_irq_disable();
+ }
+
+ finish_wait(wq, &wait);
+
+ /*
+ * On VLV/CHV DSI the scanline counter would appear to
+ * increment approx. 1/3 of a scanline before start of vblank.
+ * The registers still get latched at start of vblank however.
+ * This means we must not write any registers on the first
+ * line of vblank (since not the whole line is actually in
+ * vblank). And unfortunately we can't use the interrupt to
+ * wait here since it will fire too soon. We could use the
+ * frame start interrupt instead since it will fire after the
+ * critical scanline, but that would require more changes
+ * in the interrupt code. So for now we'll just do the nasty
+ * thing and poll for the bad scanline to pass us by.
+ *
+ * FIXME figure out if BXT+ DSI suffers from this as well
+ */
+ while (evade->need_vlv_dsi_wa && scanline == evade->vblank_start)
+ scanline = intel_get_crtc_scanline(crtc);
+
+ return scanline;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
index 17636f140c71..ec6c3da3eeac 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.h
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -13,6 +13,18 @@ struct drm_crtc;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_vblank_evade_ctx {
+ struct intel_crtc *crtc;
+ int min, max, vblank_start;
+ bool need_vlv_dsi_wa;
+};
+
+void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state,
+ struct intel_vblank_evade_ctx *evade);
+/* must be called with vblank interrupt already enabled! */
+int intel_vblank_evade(struct intel_vblank_evade_ctx *evade);
+
u32 i915_get_vblank_counter(struct drm_crtc *crtc);
u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 511dc1544854..e941e2e4fd14 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -948,6 +948,11 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
if (DISPLAY_VER(dev_priv) == 13)
plane_ctl |= adlp_plane_ctl_arb_slots(plane_state);
+ if (GRAPHICS_VER(dev_priv) >= 20 &&
+ fb->modifier == I915_FORMAT_MOD_4_TILED) {
+ plane_ctl |= PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ }
+
return plane_ctl;
}
@@ -2624,3 +2629,31 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
error:
kfree(intel_fb);
}
+
+bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
+ const struct intel_initial_plane_config *plane_config)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = crtc->pipe;
+ u32 base;
+
+ if (!plane_state->uapi.visible)
+ return false;
+
+ base = intel_plane_ggtt_offset(plane_state);
+
+ /*
+ * We may have moved the surface to a different
+ * part of ggtt, make the plane aware of that.
+ */
+ if (plane_config->base == base)
+ return false;
+
+ intel_de_write(i915, PLANE_SURF(pipe, plane_id), base);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.h b/drivers/gpu/drm/i915/display/skl_universal_plane.h
index be64c201f9b3..e92e00c01b29 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.h
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.h
@@ -22,6 +22,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
void skl_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config);
+bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
+ const struct intel_initial_plane_config *plane_config);
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 56588d6e24ae..c6b9be80d83c 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -23,6 +23,12 @@
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
+/*It is expected that DSB can do posted writes to every register in
+ * the pipe and planes within 100us. For flip queue use case, the
+ * recommended DSB execution time is 100us + one SAGV block time.
+ */
+#define DSB_EXE_TIME 100
+
static void skl_sagv_disable(struct drm_i915_private *i915);
/* Stores plane specific WM parameters */
@@ -443,12 +449,35 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc,
new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case).
+ *
+ * drm_atomic_check_only() gets upset if we pull more crtcs
+ * into the state, so we have to calculate this based on the
+ * individual intel_crtc_can_enable_sagv() rather than
+ * the overall intel_can_enable_sagv(). Otherwise the
+ * crtcs not included in the commit would not switch to the
+ * SAGV watermarks when we are about to enable SAGV, and that
+ * would lead to underruns. This does mean extra power draw
+ * when only a subset of the crtcs are blocking SAGV as the
+ * other crtcs can't be allowed to use the more optimal
+ * normal (ie. non-SAGV) watermarks.
+ */
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
+ DISPLAY_VER(i915) >= 12 &&
+ intel_crtc_can_enable_sagv(new_crtc_state);
+
if (intel_crtc_can_enable_sagv(new_crtc_state))
new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
else
@@ -478,21 +507,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
return ret;
}
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- /*
- * We store use_sagv_wm in the crtc state rather than relying on
- * that bw state since we have no convenient way to get at the
- * latter from the plane commit hooks (especially in the legacy
- * cursor case)
- */
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
- DISPLAY_VER(i915) >= 12 &&
- intel_can_enable_sagv(i915, new_bw_state);
- }
-
return 0;
}
@@ -1367,7 +1381,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
u64 data_rate = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20)
+ if (plane_id == PLANE_CURSOR)
continue;
data_rate += crtc_state->rel_data_rate[plane_id];
@@ -1514,12 +1528,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
return 0;
/* Allocate fixed number of blocks for cursor. */
- if (DISPLAY_VER(i915) < 20) {
- cursor_size = skl_cursor_allocation(crtc_state, num_active);
- iter.size -= cursor_size;
- skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
- alloc->end - cursor_size, alloc->end);
- }
+ cursor_size = skl_cursor_allocation(crtc_state, num_active);
+ iter.size -= cursor_size;
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
+ alloc->end - cursor_size, alloc->end);
iter.data_rate = skl_total_relative_data_rate(crtc_state);
@@ -1533,7 +1545,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20) {
+ if (plane_id == PLANE_CURSOR) {
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
@@ -1581,7 +1593,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20)
+ if (plane_id == PLANE_CURSOR)
continue;
if (DISPLAY_VER(i915) < 11 &&
@@ -2898,12 +2910,51 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
return 0;
}
+/*
+ * If Fixed Refresh Rate:
+ * Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from
+ * watermark level1 and up and above. If watermark level 1 is
+ * invalid program it with all 1's.
+ * Program PKG_C_LATENCY Added Wake Time = DSB execution time
+ * If Variable Refresh Rate:
+ * Program DEEP PKG_C_LATENCY Pkg C with all 1's.
+ * Program PKG_C_LATENCY Added Wake Time = 0
+ */
+static void
+skl_program_dpkgc_latency(struct drm_i915_private *i915, bool vrr_enabled)
+{
+ u32 max_latency = 0;
+ u32 clear = 0, val = 0;
+ u32 added_wake_time = 0;
+
+ if (DISPLAY_VER(i915) < 20)
+ return;
+
+ if (vrr_enabled) {
+ max_latency = LNL_PKG_C_LATENCY_MASK;
+ added_wake_time = 0;
+ } else {
+ max_latency = skl_watermark_max_latency(i915, 1);
+ if (max_latency == 0)
+ max_latency = LNL_PKG_C_LATENCY_MASK;
+ added_wake_time = DSB_EXE_TIME +
+ i915->display.sagv.block_time_us;
+ }
+
+ clear |= LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
+ val |= REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, max_latency);
+ val |= REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
+
+ intel_uncore_rmw(&i915->uncore, LNL_PKG_C_LATENCY, clear, val);
+}
+
static int
skl_compute_wm(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
struct intel_crtc_state __maybe_unused *new_crtc_state;
int ret, i;
+ bool vrr_enabled = false;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = skl_build_pipe_wm(state, crtc);
@@ -2928,8 +2979,13 @@ skl_compute_wm(struct intel_atomic_state *state)
ret = skl_wm_add_affected_planes(state, crtc);
if (ret)
return ret;
+
+ if (new_crtc_state->vrr.enable)
+ vrr_enabled = true;
}
+ skl_program_dpkgc_latency(to_i915(state->base.dev), vrr_enabled);
+
skl_print_wm_changes(state);
return 0;
@@ -3725,11 +3781,11 @@ void skl_watermark_debugfs_register(struct drm_i915_private *i915)
&intel_sagv_status_fops);
}
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915)
+unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level)
{
int level;
- for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) {
unsigned int latency = skl_wm_latency(i915, level, NULL);
if (latency)
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index fb0da36fd3ec..e3d1d74a7b17 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -46,8 +46,8 @@ void skl_watermark_ipc_update(struct drm_i915_private *i915);
bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
void skl_watermark_debugfs_register(struct drm_i915_private *i915);
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915);
-
+unsigned int skl_watermark_max_latency(struct drm_i915_private *i915,
+ int initial_wm_level);
void skl_wm_init(struct drm_i915_private *i915);
struct intel_dbuf_state {
diff --git a/drivers/gpu/drm/i915/display/skl_watermark_regs.h b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
index 628c5920ad49..20b30c9a6613 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
@@ -157,4 +157,8 @@
#define MTL_LATENCY_SAGV _MMIO(0x4578c)
#define MTL_LATENCY_QCLK_SAGV REG_GENMASK(12, 0)
+#define LNL_PKG_C_LATENCY _MMIO(0x46460)
+#define LNL_ADDED_WAKE_TIME_MASK REG_GENMASK(28, 16)
+#define LNL_PKG_C_LATENCY_MASK REG_GENMASK(12, 0)
+
#endif /* __SKL_WATERMARK_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 555022c0652c..d3a771afb083 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2160,12 +2160,6 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
#ifdef CONFIG_MMU_NOTIFIER
if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) {
- read_lock(&eb->i915->mm.notifier_lock);
-
- /*
- * count is always at least 1, otherwise __EXEC_USERPTR_USED
- * could not have been set
- */
for (i = 0; i < count; i++) {
struct eb_vma *ev = &eb->vma[i];
struct drm_i915_gem_object *obj = ev->vma->obj;
@@ -2177,8 +2171,6 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
if (err)
break;
}
-
- read_unlock(&eb->i915->mm.notifier_lock);
}
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 0d812f4d787d..3b27218aabe2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -28,6 +28,13 @@ void i915_gem_suspend(struct drm_i915_private *i915)
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0);
+ /*
+ * On rare occasions, we've observed the fence completion triggers
+ * free_engines asynchronously via rcu_call. Ensure those are done.
+ * This path is only called on suspend, so it's an acceptable cost.
+ */
+ rcu_barrier();
+
flush_workqueue(i915->wq);
/*
@@ -160,6 +167,9 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
* machine in an unusable condition.
*/
+ /* Like i915_gem_suspend, flush tasks staged from fence triggers */
+ rcu_barrier();
+
for_each_gt(gt, i915, i)
intel_gt_suspend_late(gt);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index a4fb577eceb4..b09b74a2448b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -129,7 +129,7 @@ i915_gem_object_create_region_at(struct intel_memory_region *mem,
return ERR_PTR(-EINVAL);
if (!(flags & I915_BO_ALLOC_GPU_ONLY) &&
- offset + size > mem->io_size &&
+ offset + size > resource_size(&mem->io) &&
!i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt))
return ERR_PTR(-ENOSPC);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 8c88075eeab2..ad6dd7f3259b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -541,7 +541,9 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
/* Exclude the reserved region from driver use */
mem->region.end = i915->dsm.reserved.start - 1;
- mem->io_size = min(mem->io_size, resource_size(&mem->region));
+ mem->io = DEFINE_RES_MEM(mem->io.start,
+ min(resource_size(&mem->io),
+ resource_size(&mem->region)));
i915->dsm.usable_size = resource_size(&mem->region);
@@ -752,7 +754,7 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
* With discrete devices, where we lack a mappable aperture there is no
* possible way to ever access this memory on the CPU side.
*/
- if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
+ if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) &&
!(flags & I915_BO_ALLOC_GPU_ONLY))
return -ENOSPC;
@@ -826,7 +828,6 @@ static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
static int init_stolen_lmem(struct intel_memory_region *mem)
{
- struct drm_i915_private *i915 = mem->i915;
int err;
if (GEM_WARN_ON(resource_size(&mem->region) == 0))
@@ -838,14 +839,10 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
return 0;
}
- if (mem->io_size &&
- !io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size))
+ if (resource_size(&mem->io) &&
+ !io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io)))
goto err_cleanup;
- drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
- &mem->io_start);
- drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start);
-
return 0;
err_cleanup:
@@ -855,7 +852,7 @@ err_cleanup:
static int release_stolen_lmem(struct intel_memory_region *mem)
{
- if (mem->io_size)
+ if (resource_size(&mem->io))
io_mapping_fini(&mem->iomap);
i915_gem_cleanup_stolen(mem->i915);
return 0;
@@ -938,13 +935,17 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
} else {
/* Use DSM base address instead for stolen memory */
- dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
+ dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
if (WARN_ON(lmem_size < dsm_base))
return ERR_PTR(-ENODEV);
dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
}
- if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
+ if (i915_direct_stolen_access(i915)) {
+ drm_dbg(&i915->drm, "Using direct DSM access\n");
+ io_start = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
+ io_size = dsm_size;
+ } else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
io_start = 0;
io_size = 0;
} else {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 9227f8146a58..e6f177183c0f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -65,8 +65,6 @@ static const struct ttm_place sys_placement_flags = {
static struct ttm_placement i915_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags,
};
/**
@@ -144,45 +142,41 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
place->fpfn = offset >> PAGE_SHIFT;
WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn));
place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
- } else if (mr->io_size && mr->io_size < mr->total) {
+ } else if (resource_size(&mr->io) && resource_size(&mr->io) < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN;
} else {
place->fpfn = 0;
- WARN_ON(overflows_type(mr->io_size >> PAGE_SHIFT, place->lpfn));
- place->lpfn = mr->io_size >> PAGE_SHIFT;
+ WARN_ON(overflows_type(resource_size(&mr->io) >> PAGE_SHIFT, place->lpfn));
+ place->lpfn = resource_size(&mr->io) >> PAGE_SHIFT;
}
}
}
static void
i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
- struct ttm_place *requested,
- struct ttm_place *busy,
+ struct ttm_place *places,
struct ttm_placement *placement)
{
unsigned int num_allowed = obj->mm.n_placements;
unsigned int flags = obj->flags;
unsigned int i;
- placement->num_placement = 1;
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
- obj->mm.region, requested, obj->bo_offset,
+ obj->mm.region, &places[0], obj->bo_offset,
obj->base.size, flags);
+ places[0].flags |= TTM_PL_FLAG_DESIRED;
/* Cache this on object? */
- placement->num_busy_placement = num_allowed;
- for (i = 0; i < placement->num_busy_placement; ++i)
- i915_ttm_place_from_region(obj->mm.placements[i], busy + i,
- obj->bo_offset, obj->base.size, flags);
-
- if (num_allowed == 0) {
- *busy = *requested;
- placement->num_busy_placement = 1;
+ for (i = 0; i < num_allowed; ++i) {
+ i915_ttm_place_from_region(obj->mm.placements[i],
+ &places[i + 1], obj->bo_offset,
+ obj->base.size, flags);
+ places[i + 1].flags |= TTM_PL_FLAG_FALLBACK;
}
- placement->placement = requested;
- placement->busy_placement = busy;
+ placement->num_placement = num_allowed + 1;
+ placement->placement = places;
}
static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
@@ -789,7 +783,8 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
int ret;
/* First try only the requested placement. No eviction. */
- real_num_busy = fetch_and_zero(&placement->num_busy_placement);
+ real_num_busy = placement->num_placement;
+ placement->num_placement = 1;
ret = ttm_bo_validate(bo, placement, &ctx);
if (ret) {
ret = i915_ttm_err_to_gem(ret);
@@ -805,7 +800,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
* If the initial attempt fails, allow all accepted placements,
* evicting if necessary.
*/
- placement->num_busy_placement = real_num_busy;
+ placement->num_placement = real_num_busy;
ret = ttm_bo_validate(bo, placement, &ctx);
if (ret)
return i915_ttm_err_to_gem(ret);
@@ -839,7 +834,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
{
- struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
+ struct ttm_place places[I915_TTM_MAX_PLACEMENTS + 1];
struct ttm_placement placement;
/* restricted by sg_alloc_table */
@@ -849,7 +844,7 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
/* Move to the requested placement. */
- i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
+ i915_ttm_placement_from_obj(obj, places, &placement);
return __i915_ttm_get_pages(obj, &placement);
}
@@ -879,9 +874,7 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
obj->base.size, flags);
placement.num_placement = 1;
- placement.num_busy_placement = 1;
placement.placement = &requested;
- placement.busy_placement = &requested;
ret = __i915_ttm_get_pages(obj, &placement);
if (ret)
@@ -1090,7 +1083,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct intel_memory_region *mr = obj->mm.placements[i];
unsigned int flags;
- if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
+ if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM)
continue;
flags = obj->flags;
@@ -1101,8 +1094,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
}
if (err) {
- drm_dbg(dev, "Unable to make resource CPU accessible(err = %pe)\n",
- ERR_PTR(err));
+ drm_dbg_ratelimited(dev,
+ "Unable to make resource CPU accessible(err = %pe)\n",
+ ERR_PTR(err));
dma_resv_unlock(bo->base.resv);
ret = VM_FAULT_SIGBUS;
goto out_rpm;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 1d3ebdf4069b..61abfb505766 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -42,7 +42,6 @@
#include "i915_drv.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
-#include "i915_gem_userptr.h"
#include "i915_scatterlist.h"
#ifdef CONFIG_MMU_NOTIFIER
@@ -61,36 +60,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
- struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- long r;
-
- if (!mmu_notifier_range_blockable(range))
- return false;
-
- write_lock(&i915->mm.notifier_lock);
-
mmu_interval_set_seq(mni, cur_seq);
-
- write_unlock(&i915->mm.notifier_lock);
-
- /*
- * We don't wait when the process is exiting. This is valid
- * because the object will be cleaned up anyway.
- *
- * This is also temporarily required as a hack, because we
- * cannot currently force non-consistent batch buffers to preempt
- * and reschedule by waiting on it, hanging processes on exit.
- */
- if (current->flags & PF_EXITING)
- return true;
-
- /* we will unbind on next submission, still have userptr pins */
- r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
- MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
-
return true;
}
@@ -379,6 +349,9 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj)
{
GEM_WARN_ON(obj->userptr.page_ref);
+ if (!obj->userptr.notifier.mm)
+ return;
+
mmu_interval_notifier_remove(&obj->userptr.notifier);
obj->userptr.notifier.mm = NULL;
}
@@ -580,15 +553,3 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
#endif
}
-int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
-{
-#ifdef CONFIG_MMU_NOTIFIER
- rwlock_init(&dev_priv->mm.notifier_lock);
-#endif
-
- return 0;
-}
-
-void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
-{
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.h b/drivers/gpu/drm/i915/gem/i915_gem_userptr.h
deleted file mode 100644
index 8dadb2f8436d..000000000000
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2021 Intel Corporation
- */
-
-#ifndef __I915_GEM_USERPTR_H__
-#define __I915_GEM_USERPTR_H__
-
-struct drm_i915_private;
-
-int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
-
-#endif /* __I915_GEM_USERPTR_H__ */
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 2c51a2c452fc..99a9ade73956 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1054,7 +1054,7 @@ static int igt_fill_mappable(struct intel_memory_region *mr,
int err;
total = 0;
- size = mr->io_size;
+ size = resource_size(&mr->io);
do {
struct drm_i915_gem_object *obj;
@@ -1315,28 +1315,28 @@ static int igt_mmap_migrate(void *arg)
struct intel_memory_region *mixed[] = { mr, system };
struct intel_memory_region *single[] = { mr };
struct ttm_resource_manager *man = mr->region_private;
- resource_size_t saved_io_size;
+ struct resource saved_io;
int err;
if (mr->private)
continue;
- if (!mr->io_size)
+ if (!resource_size(&mr->io))
continue;
/*
* For testing purposes let's force small BAR, if not already
* present.
*/
- saved_io_size = mr->io_size;
- if (mr->io_size == mr->total) {
- resource_size_t io_size = mr->io_size;
+ saved_io = mr->io;
+ if (resource_size(&mr->io) == mr->total) {
+ resource_size_t io_size = resource_size(&mr->io);
io_size = rounddown_pow_of_two(io_size >> 1);
if (io_size < PAGE_SIZE)
continue;
- mr->io_size = io_size;
+ mr->io = DEFINE_RES_MEM(mr->io.start, io_size);
i915_ttm_buddy_man_force_visible_size(man,
io_size >> PAGE_SHIFT);
}
@@ -1396,9 +1396,9 @@ static int igt_mmap_migrate(void *arg)
IGT_MMAP_MIGRATE_FAIL_GPU |
IGT_MMAP_MIGRATE_UNFAULTABLE);
out_io_size:
- mr->io_size = saved_io_size;
+ mr->io = saved_io;
i915_ttm_buddy_man_force_visible_size(man,
- mr->io_size >> PAGE_SHIFT);
+ resource_size(&mr->io) >> PAGE_SHIFT);
if (err)
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 86a04afff64b..e1bf13e3d307 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -226,7 +226,7 @@ u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
static int mtl_dummy_pipe_control(struct i915_request *rq)
{
/* Wa_14016712196 */
- if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 71)) ||
+ if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
IS_DG2(rq->i915)) {
u32 *cs;
@@ -822,7 +822,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
flags |= PIPE_CONTROL_FLUSH_L3;
/* Wa_14016712196 */
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
/* dummy PIPE_CONTROL + depth flush */
cs = gen12_emit_pipe_control(cs, 0,
PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 40687806d22a..1ade568ffbfa 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1190,7 +1190,8 @@ static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
num = ARRAY_SIZE(xelpmp_regs);
}
} else {
- if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
+ if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 74) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 1a8e2b7db013..8d4bb95f8424 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -96,7 +96,8 @@ static void heartbeat_commit(struct i915_request *rq,
static void show_heartbeat(const struct i915_request *rq,
struct intel_engine_cs *engine)
{
- struct drm_printer p = drm_debug_printer("heartbeat");
+ struct drm_printer p =
+ drm_dbg_printer(&engine->i915->drm, DRM_UT_DRIVER, "heartbeat");
if (!rq) {
intel_engine_dump(engine, &p,
@@ -290,6 +291,9 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine)
heartbeat_commit(rq, &attr);
GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
+ /* Ensure the forced pulse gets a full period to execute */
+ next_heartbeat(engine);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 21a7e3191c18..ec1cbe229f0e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -24,6 +24,7 @@
#include "intel_ring.h"
#include "i915_drv.h"
#include "i915_pci.h"
+#include "i915_reg.h"
#include "i915_request.h"
#include "i915_scatterlist.h"
#include "i915_utils.h"
@@ -1152,13 +1153,20 @@ static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
phys_addr_t phys_addr;
u32 pte_flags;
int ret;
GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
- phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
+
+ if (i915_direct_stolen_access(i915)) {
+ drm_dbg(&i915->drm, "Using direct GSM access\n");
+ phys_addr = intel_uncore_read64(uncore, GEN6_GSMBASE) & GEN11_BDSM_MASK;
+ } else {
+ phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
+ }
if (needs_wc_ggtt_mapping(i915))
ggtt->gsm = ioremap_wc(phys_addr, size);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index f0dea54880af..c0b202223940 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -176,27 +176,13 @@ static u32 get_residency(struct intel_gt *gt, enum intel_rc6_res_type id)
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
-static u8 get_rc6_mask(struct intel_gt *gt)
-{
- u8 mask = 0;
-
- if (HAS_RC6(gt->i915))
- mask |= BIT(0);
- if (HAS_RC6p(gt->i915))
- mask |= BIT(1);
- if (HAS_RC6pp(gt->i915))
- mask |= BIT(2);
-
- return mask;
-}
-
static ssize_t rc6_enable_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- return sysfs_emit(buff, "%x\n", get_rc6_mask(gt));
+ return sysfs_emit(buff, "%x\n", gt->rc6.enabled);
}
static ssize_t rc6_enable_dev_show(struct device *dev,
@@ -205,7 +191,7 @@ static ssize_t rc6_enable_dev_show(struct device *dev,
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(&dev->kobj, attr->attr.name);
- return sysfs_emit(buff, "%x\n", get_rc6_mask(gt));
+ return sysfs_emit(buff, "%x\n", gt->rc6.enabled);
}
static u32 __rc6_residency_ms_show(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 86f73fe558ca..7811a8c9da06 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -24,7 +24,8 @@
bool i915_ggtt_require_binder(struct drm_i915_private *i915)
{
/* Wa_13010847436 & Wa_14019519902 */
- return MEDIA_VER_FULL(i915) == IP_VER(13, 0);
+ return !i915_direct_stolen_access(i915) &&
+ MEDIA_VER_FULL(i915) == IP_VER(13, 0);
}
static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 353f93baaca0..25c1023eb5f9 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -495,7 +495,7 @@ static unsigned int get_mocs_settings(struct drm_i915_private *i915,
memset(table, 0, sizeof(struct drm_i915_mocs_table));
table->unused_entries_index = I915_MOCS_PTE;
- if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 71))) {
+ if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 74))) {
table->size = ARRAY_SIZE(mtl_mocs_table);
table->table = mtl_mocs_table;
table->n_entries = MTL_NUM_MOCS_ENTRIES;
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 7090e4be29cb..8f4b3c8af09c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -123,7 +123,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
* temporary wa and should be removed after fixing real cause
* of forcewake timeouts.
*/
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
pg_enable =
GEN9_MEDIA_PG_ENABLE |
GEN11_MEDIA_SAMPLER_PG_ENABLE;
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index f8512aee58a8..51bb27e10a4f 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -144,8 +144,8 @@ region_lmem_init(struct intel_memory_region *mem)
int ret;
if (!io_mapping_init_wc(&mem->iomap,
- mem->io_start,
- mem->io_size))
+ mem->io.start,
+ resource_size(&mem->io)))
return -EIO;
ret = intel_region_ttm_init(mem);
@@ -240,7 +240,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
lmem_size -= tile_stolen;
} else {
/* Stolen starts from GSMBASE without CCS */
- lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE);
+ lmem_size = intel_uncore_read64(&i915->uncore, GEN6_GSMBASE);
}
i915_resize_lmem_bar(i915, lmem_size);
@@ -273,14 +273,6 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
if (err)
goto err_region_put;
- drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
- drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
- &mem->io_start);
- drm_info(&i915->drm, "Local memory IO size: %pa\n",
- &mem->io_size);
- drm_info(&i915->drm, "Local memory available: %pa\n",
- &lmem_size);
-
if (io_size < lmem_size)
drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n",
(u64)io_size >> 20);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 6801f8b95c53..c8e9aa41fdea 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1015,7 +1015,8 @@ void intel_gt_set_wedged(struct intel_gt *gt)
mutex_lock(&gt->reset.mutex);
if (GEM_SHOW_DEBUG()) {
- struct drm_printer p = drm_debug_printer(__func__);
+ struct drm_printer p = drm_dbg_printer(&gt->i915->drm,
+ DRM_UT_DRIVER, __func__);
struct intel_engine_cs *engine;
enum intel_engine_id id;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 3eacbc50caf8..d67d44611c28 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -789,8 +789,13 @@ static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
dg2_ctx_gt_tuning_init(engine, wal);
- if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
- IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
+ /*
+ * Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in
+ * gen12_emit_indirect_ctx_rcs() rather than here on some early
+ * steppings.
+ */
+ if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
+ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)))
wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
}
@@ -820,6 +825,9 @@ static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_18019271663 */
wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
+
+ /* Wa_14019877138 */
+ wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
}
static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
@@ -908,7 +916,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
goto done;
- if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_ctx_workarounds_init(engine, wal);
else if (IS_PONTEVECCHIO(i915))
; /* noop; none at this time */
@@ -1233,7 +1241,8 @@ static void __set_mcr_steering(struct i915_wa_list *wal,
static void debug_dump_steering(struct intel_gt *gt)
{
- struct drm_printer p = drm_debug_printer("MCR Steering:");
+ struct drm_printer p = drm_dbg_printer(&gt->i915->drm, DRM_UT_DRIVER,
+ "MCR Steering:");
if (drm_debug_enabled(DRM_UT_DRIVER))
intel_gt_mcr_report_steering(&p, gt, false);
@@ -1643,7 +1652,7 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- /* Wa_14018778641 / Wa_18018781329 */
+ /* Wa_14018575942 / Wa_18018781329 */
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
@@ -1710,7 +1719,7 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
*/
static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
{
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
}
@@ -1743,7 +1752,7 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
return;
}
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_gt_workarounds_init(gt, wal);
else if (IS_PONTEVECCHIO(i915))
pvc_gt_workarounds_init(gt, wal);
@@ -2216,7 +2225,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
if (engine->gt->type == GT_MEDIA)
; /* none yet */
- else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_whitelist_build(engine);
else if (IS_PONTEVECCHIO(i915))
pvc_whitelist_build(engine);
@@ -2828,7 +2837,7 @@ add_render_compute_tuning_settings(struct intel_gt *gt,
{
struct drm_i915_private *i915 = gt->i915;
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
/*
@@ -2881,7 +2890,8 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
}
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
- IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
+ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) ||
+ IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74)))
/* Wa_14017856879 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 47070cba7eb1..12eca750f7d0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -285,7 +285,8 @@ out_engine:
intel_engine_pm_flush(engine);
if (intel_engine_pm_is_awake(engine)) {
- struct drm_printer p = drm_debug_printer(__func__);
+ struct drm_printer p = drm_dbg_printer(&engine->i915->drm,
+ DRM_UT_DRIVER, __func__);
intel_engine_dump(engine, &p,
"%s is still awake:%d after idle-barriers\n",
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index bc441ce7b380..ef014df4c4fc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -122,9 +122,9 @@ static int __live_idle_pulse(struct intel_engine_cs *engine,
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
if (engine_sync_barrier(engine)) {
- struct drm_printer m = drm_err_printer("pulse");
+ struct drm_printer m = drm_err_printer(&engine->i915->drm, "pulse");
- pr_err("%s: no heartbeat pulse?\n", engine->name);
+ drm_printf(&m, "%s: no heartbeat pulse?\n", engine->name);
intel_engine_dump(engine, &m, "%s", engine->name);
err = -ETIME;
@@ -136,10 +136,10 @@ static int __live_idle_pulse(struct intel_engine_cs *engine,
pulse_unlock_wait(p); /* synchronize with the retirement callback */
if (!i915_active_is_idle(&p->active)) {
- struct drm_printer m = drm_err_printer("pulse");
+ struct drm_printer m = drm_err_printer(&engine->i915->drm, "pulse");
- pr_err("%s: heartbeat pulse did not flush idle tasks\n",
- engine->name);
+ drm_printf(&m, "%s: heartbeat pulse did not flush idle tasks\n",
+ engine->name);
i915_active_print(&p->active, &m);
err = -EINVAL;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index a7189c2d660c..1aa1446c8fb0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -62,12 +62,12 @@ int live_rc6_manual(void *arg)
dt = ktime_get();
rc0_power = librapl_energy_uJ();
- msleep(250);
+ msleep(1000);
rc0_power = librapl_energy_uJ() - rc0_power;
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
if ((res[1] - res[0]) >> 10) {
- pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
+ pr_err("RC6 residency increased by %lldus while disabled for 1000ms!\n",
(res[1] - res[0]) >> 10);
err = -EINVAL;
goto out_unlock;
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
index 00b872b6380b..3941f2d6fa47 100644
--- a/drivers/gpu/drm/i915/gt/selftest_tlb.c
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -206,8 +206,8 @@ static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt)
* of pages. To succeed with both allocations, especially in case of Small
* BAR, try to allocate no more than quarter of mappable memory.
*/
- if (mr && size > mr->io_size / 4)
- size = mr->io_size / 4;
+ if (mr && size > resource_size(&mr->io) / 4)
+ size = resource_size(&mr->io) / 4;
return i915_gem_object_create_lmem(gt->i915, size, I915_BO_ALLOC_CONTIGUOUS);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 813cc888e6fa..be70c46604b4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -206,8 +206,6 @@ struct intel_guc {
u32 ads_golden_ctxt_size;
/** @ads_capture_size: size of register lists in the ADS used for error capture */
u32 ads_capture_size;
- /** @ads_engine_usage_size: size of engine usage in the ADS */
- u32 ads_engine_usage_size;
/** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */
struct i915_vma *lrc_desc_pool_v69;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 63724e17829a..f7372f736a77 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -377,8 +377,13 @@ static int guc_mmio_regset_init(struct temp_regset *regset,
CCS_MASK(engine->gt))
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN12_RCU_MODE, true);
+ /*
+ * some of the WA registers are MCR registers. As it is safe to
+ * use MCR form for non-MCR registers, for code simplicity, all
+ * WA registers are added with MCR form.
+ */
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- ret |= GUC_MMIO_REG_ADD(gt, regset, wa->reg, wa->masked_reg);
+ ret |= GUC_MCR_REG_ADD(gt, regset, wa->mcr_reg, wa->masked_reg);
/* Be extra paranoid and include all whitelist registers. */
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++)
@@ -394,13 +399,13 @@ static int guc_mmio_regset_init(struct temp_regset *regset,
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
if (GRAPHICS_VER(engine->i915) >= 12) {
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL0, false);
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL1, false);
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL2, false);
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL3, false);
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL4, false);
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL5, false);
- ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL6, false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL0)), false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL1)), false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL2)), false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL3)), false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL4)), false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL5)), false);
+ ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL6)), false);
}
return ret ? -1 : 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 0f79cb658518..52332bb14339 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -184,7 +184,7 @@ static int guc_wait_ucode(struct intel_guc *guc)
* in the seconds range. However, there is a limit on how long an
* individual wait_for() can wait. So wrap it in a loop.
*/
- before_freq = intel_rps_read_actual_frequency(&uncore->gt->rps);
+ before_freq = intel_rps_read_actual_frequency(&gt->rps);
before = ktime_get();
for (count = 0; count < GUC_LOAD_RETRY_LIMIT; count++) {
ret = wait_for(guc_load_done(uncore, &status, &success), 1000);
@@ -192,7 +192,7 @@ static int guc_wait_ucode(struct intel_guc *guc)
break;
guc_dbg(guc, "load still in progress, count = %d, freq = %dMHz, status = 0x%08X [0x%02X/%02X]\n",
- count, intel_rps_read_actual_frequency(&uncore->gt->rps), status,
+ count, intel_rps_read_actual_frequency(&gt->rps), status,
REG_FIELD_GET(GS_BOOTROM_MASK, status),
REG_FIELD_GET(GS_UKERNEL_MASK, status));
}
@@ -204,7 +204,7 @@ static int guc_wait_ucode(struct intel_guc *guc)
u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status);
guc_info(guc, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz, ret = %d\n",
- status, delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps), ret);
+ status, delta_ms, intel_rps_read_actual_frequency(&gt->rps), ret);
guc_info(guc, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
REG_FIELD_GET(GS_MIA_IN_RESET, status),
bootrom, ukernel,
@@ -254,11 +254,11 @@ static int guc_wait_ucode(struct intel_guc *guc)
guc_warn(guc, "excessive init time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
delta_ms, status, count, ret);
guc_warn(guc, "excessive init time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
- intel_rps_read_actual_frequency(&uncore->gt->rps), before_freq,
+ intel_rps_read_actual_frequency(&gt->rps), before_freq,
intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
} else {
guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
- delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps),
+ delta_ms, intel_rps_read_actual_frequency(&gt->rps),
before_freq, status, count, ret);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index a259f1118c5a..f3dcae4b9d45 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -236,6 +236,13 @@ set_context_destroyed(struct intel_context *ce)
ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
}
+static inline void
+clr_context_destroyed(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_DESTROYED;
+}
+
static inline bool context_pending_disable(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
@@ -613,6 +620,8 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc,
u32 g2h_len_dw,
bool loop)
{
+ int ret;
+
/*
* We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
* so we don't handle the case where we don't get a reply because we
@@ -623,7 +632,11 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc,
if (g2h_len_dw)
atomic_inc(&guc->outstanding_submission_g2h);
- return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
+ ret = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
+ if (ret)
+ atomic_dec(&guc->outstanding_submission_g2h);
+
+ return ret;
}
int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
@@ -1362,7 +1375,45 @@ static void guc_enable_busyness_worker(struct intel_guc *guc)
static void guc_cancel_busyness_worker(struct intel_guc *guc)
{
- cancel_delayed_work_sync(&guc->timestamp.work);
+ /*
+ * There are many different call stacks that can get here. Some of them
+ * hold the reset mutex. The busyness worker also attempts to acquire the
+ * reset mutex. Synchronously flushing a worker thread requires acquiring
+ * the worker mutex. Lockdep sees this as a conflict. It thinks that the
+ * flush can deadlock because it holds the worker mutex while waiting for
+ * the reset mutex, but another thread is holding the reset mutex and might
+ * attempt to use other worker functions.
+ *
+ * In practice, this scenario does not exist because the busyness worker
+ * does not block waiting for the reset mutex. It does a try-lock on it and
+ * immediately exits if the lock is already held. Unfortunately, the mutex
+ * in question (I915_RESET_BACKOFF) is an i915 implementation which has lockdep
+ * annotation but not to the extent of explaining the 'might lock' is also a
+ * 'does not need to lock'. So one option would be to add more complex lockdep
+ * annotations to ignore the issue (if at all possible). A simpler option is to
+ * just not flush synchronously when a rest in progress. Given that the worker
+ * will just early exit and re-schedule itself anyway, there is no advantage
+ * to running it immediately.
+ *
+ * If a reset is not in progress, then the synchronous flush may be required.
+ * As noted many call stacks lead here, some during suspend and driver unload
+ * which do require a synchronous flush to make sure the worker is stopped
+ * before memory is freed.
+ *
+ * Trying to pass a 'need_sync' or 'in_reset' flag all the way down through
+ * every possible call stack is unfeasible. It would be too intrusive to many
+ * areas that really don't care about the GuC backend. However, there is the
+ * 'reset_in_progress' flag available, so just use that.
+ *
+ * And note that in the case of a reset occurring during driver unload
+ * (wedge_on_fini), skipping the cancel in _prepare (when the reset flag is set
+ * is fine because there is another cancel in _finish (when the reset flag is
+ * not).
+ */
+ if (guc_to_gt(guc)->uc.reset_in_progress)
+ cancel_delayed_work(&guc->timestamp.work);
+ else
+ cancel_delayed_work_sync(&guc->timestamp.work);
}
static void __reset_guc_busyness_stats(struct intel_guc *guc)
@@ -1613,6 +1664,11 @@ static void guc_flush_submissions(struct intel_guc *guc)
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
+void intel_guc_submission_flush_work(struct intel_guc *guc)
+{
+ flush_work(&guc->submission_state.destroyed_worker);
+}
+
static void guc_flush_destroyed_contexts(struct intel_guc *guc);
void intel_guc_submission_reset_prepare(struct intel_guc *guc)
@@ -1948,8 +2004,16 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
+ /*
+ * Ensure the busyness worker gets cancelled even on a fatal wedge.
+ * Note that reset_prepare is not allowed to because it confuses lockdep.
+ */
+ if (guc_submission_initialized(guc))
+ guc_cancel_busyness_worker(guc);
+
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
+ !intel_guc_is_fw_running(guc) ||
intel_gt_is_wedged(guc_to_gt(guc)))) {
return;
}
@@ -3283,12 +3347,13 @@ static void guc_context_close(struct intel_context *ce)
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
}
-static inline void guc_lrc_desc_unpin(struct intel_context *ce)
+static inline int guc_lrc_desc_unpin(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
struct intel_gt *gt = guc_to_gt(guc);
unsigned long flags;
bool disabled;
+ int ret;
GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
@@ -3299,18 +3364,41 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
spin_lock_irqsave(&ce->guc_state.lock, flags);
disabled = submission_disabled(guc);
if (likely(!disabled)) {
+ /*
+ * Take a gt-pm ref and change context state to be destroyed.
+ * NOTE: a G2H IRQ that comes after will put this gt-pm ref back
+ */
__intel_gt_pm_get(gt);
set_context_destroyed(ce);
clr_context_registered(ce);
}
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
if (unlikely(disabled)) {
release_guc_id(guc, ce);
__guc_context_destroy(ce);
- return;
+ return 0;
}
- deregister_context(ce, ce->guc_id.id);
+ /*
+ * GuC is active, lets destroy this context, but at this point we can still be racing
+ * with suspend, so we undo everything if the H2G fails in deregister_context so
+ * that GuC reset will find this context during clean up.
+ */
+ ret = deregister_context(ce, ce->guc_id.id);
+ if (ret) {
+ spin_lock(&ce->guc_state.lock);
+ set_context_registered(ce);
+ clr_context_destroyed(ce);
+ spin_unlock(&ce->guc_state.lock);
+ /*
+ * As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
+ * the wakeref immediately but per function spec usage call this after unlock.
+ */
+ intel_wakeref_put_async(&gt->wakeref);
+ }
+
+ return ret;
}
static void __guc_context_destroy(struct intel_context *ce)
@@ -3378,7 +3466,22 @@ static void deregister_destroyed_contexts(struct intel_guc *guc)
if (!ce)
break;
- guc_lrc_desc_unpin(ce);
+ if (guc_lrc_desc_unpin(ce)) {
+ /*
+ * This means GuC's CT link severed mid-way which could happen
+ * in suspend-resume corner cases. In this case, put the
+ * context back into the destroyed_contexts list which will
+ * get picked up on the next context deregistration event or
+ * purged in a GuC sanitization event (reset/unload/wedged/...).
+ */
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ list_add_tail(&ce->destroyed_link,
+ &guc->submission_state.destroyed_contexts);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+ /* Bail now since the list might never be emptied if h2gs fail */
+ break;
+ }
+
}
}
@@ -3389,6 +3492,17 @@ static void destroyed_worker_func(struct work_struct *w)
struct intel_gt *gt = guc_to_gt(guc);
intel_wakeref_t wakeref;
+ /*
+ * In rare cases we can get here via async context-free fence-signals that
+ * come very late in suspend flow or very early in resume flows. In these
+ * cases, GuC won't be ready but just skipping it here is fine as these
+ * pending-destroy-contexts get destroyed totally at GuC reset time at the
+ * end of suspend.. OR.. this worker can be picked up later on the next
+ * context destruction trigger after resume-completes
+ */
+ if (!intel_guc_is_ready(guc))
+ return;
+
with_intel_gt_pm(gt, wakeref)
deregister_destroyed_contexts(guc);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index c57b29cdb1a6..b6df75622d3b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -38,6 +38,8 @@ int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
bool interruptible,
long timeout);
+void intel_guc_submission_flush_work(struct intel_guc *guc);
+
static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
{
return guc->submission_supported;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index ba9e07fc2b57..0945b177d5f9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include "gt/intel_gt.h"
+#include "gt/intel_rps.h"
#include "intel_guc_reg.h"
#include "intel_huc.h"
#include "intel_huc_print.h"
@@ -447,17 +448,68 @@ static const char *auth_mode_string(struct intel_huc *huc,
return partial ? "clear media" : "all workloads";
}
+/*
+ * Use a longer timeout for debug builds so that problems can be detected
+ * and analysed. But a shorter timeout for releases so that user's don't
+ * wait forever to find out there is a problem. Note that the only reason
+ * an end user should hit the timeout is in case of extreme thermal throttling.
+ * And a system that is that hot during boot is probably dead anyway!
+ */
+#if defined(CONFIG_DRM_I915_DEBUG_GEM)
+#define HUC_LOAD_RETRY_LIMIT 20
+#else
+#define HUC_LOAD_RETRY_LIMIT 3
+#endif
+
int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
enum intel_huc_authentication_type type)
{
struct intel_gt *gt = huc_to_gt(huc);
- int ret;
+ struct intel_uncore *uncore = gt->uncore;
+ ktime_t before, after, delta;
+ int ret, count;
+ u64 delta_ms;
+ u32 before_freq;
- ret = __intel_wait_for_register(gt->uncore,
- huc->status[type].reg,
- huc->status[type].mask,
- huc->status[type].value,
- 2, 50, NULL);
+ /*
+ * The KMD requests maximum frequency during driver load, however thermal
+ * throttling can force the frequency down to minimum (although the board
+ * really should never get that hot in real life!). IFWI issues have been
+ * seen to cause sporadic failures to grant the higher frequency. And at
+ * minimum frequency, the authentication time can be in the seconds range.
+ * Note that there is a limit on how long an individual wait_for() can wait.
+ * So wrap it in a loop.
+ */
+ before_freq = intel_rps_read_actual_frequency(&gt->rps);
+ before = ktime_get();
+ for (count = 0; count < HUC_LOAD_RETRY_LIMIT; count++) {
+ ret = __intel_wait_for_register(gt->uncore,
+ huc->status[type].reg,
+ huc->status[type].mask,
+ huc->status[type].value,
+ 2, 1000, NULL);
+ if (!ret)
+ break;
+
+ huc_dbg(huc, "auth still in progress, count = %d, freq = %dMHz, status = 0x%08X\n",
+ count, intel_rps_read_actual_frequency(&gt->rps),
+ huc->status[type].reg.reg);
+ }
+ after = ktime_get();
+ delta = ktime_sub(after, before);
+ delta_ms = ktime_to_ms(delta);
+
+ if (delta_ms > 50) {
+ huc_warn(huc, "excessive auth time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
+ delta_ms, huc->status[type].reg.reg, count, ret);
+ huc_warn(huc, "excessive auth time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
+ intel_rps_read_actual_frequency(&gt->rps), before_freq,
+ intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
+ } else {
+ huc_dbg(huc, "auth took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
+ delta_ms, intel_rps_read_actual_frequency(&gt->rps),
+ before_freq, huc->status[type].reg.reg, count, ret);
+ }
/* mark the load process as complete even if the wait failed */
delayed_huc_load_complete(huc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 3872d309ed31..6dfe5d9456c6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -640,7 +640,7 @@ void intel_uc_reset_finish(struct intel_uc *uc)
uc->reset_in_progress = false;
/* Firmware expected to be running when this function is called */
- if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc))
+ if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_reset_finish(guc);
}
@@ -690,6 +690,8 @@ void intel_uc_suspend(struct intel_uc *uc)
return;
}
+ intel_guc_submission_flush_work(guc);
+
with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) {
err = intel_guc_suspend(guc);
if (err)
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index 4eff44194439..fa6503900c84 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -152,17 +152,6 @@ struct intel_vgpu_cursor_plane_format {
u32 y_hot; /* in pixels */
};
-struct intel_vgpu_pipe_format {
- struct intel_vgpu_primary_plane_format primary;
- struct intel_vgpu_sprite_plane_format sprite;
- struct intel_vgpu_cursor_plane_format cursor;
- enum DDI_PORT ddi_port; /* the DDI port that pipe is connected to */
-};
-
-struct intel_vgpu_fb_format {
- struct intel_vgpu_pipe_format pipes[I915_MAX_PIPES];
-};
-
int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane);
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 4cb183e06e95..cb50700e6cc9 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -93,8 +93,6 @@ struct intel_gvt_gtt_gma_ops {
struct intel_gvt_gtt {
const struct intel_gvt_gtt_pte_ops *pte_ops;
const struct intel_gvt_gtt_gma_ops *gma_ops;
- int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
- void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
struct mutex ppgtt_mm_lock;
@@ -210,7 +208,6 @@ struct intel_vgpu_scratch_pt {
struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm;
- unsigned long active_ppgtt_mm_bitmap;
struct list_head ppgtt_mm_list_head;
struct radix_tree_root spt_tree;
struct list_head oos_page_list_head;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index c57aba09091f..2c95aeef4e41 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -89,7 +89,6 @@ struct intel_vgpu_gm {
/* Fences owned by a vGPU */
struct intel_vgpu_fence {
struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
- u32 base;
u32 size;
};
@@ -119,7 +118,6 @@ struct intel_vgpu_irq {
};
struct intel_vgpu_opregion {
- bool mapped;
void *va;
u32 gfn[INTEL_GVT_OPREGION_PAGES];
};
@@ -223,7 +221,6 @@ struct intel_vgpu {
struct vfio_region *region;
int num_regions;
- struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
/*
@@ -256,7 +253,6 @@ struct intel_gvt_fence {
/* Special MMIO blocks. */
struct gvt_mmio_block {
- unsigned int device;
i915_reg_t offset;
unsigned int size;
gvt_mmio_func read;
@@ -444,7 +440,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define vgpu_hidden_gmadr_end(vgpu) \
(vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
-#define vgpu_fence_base(vgpu) (vgpu->fence.base)
#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
/* ring context size i.e. the first 0x50 dwords*/
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index c8e7dfc9f791..336d079c4207 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -40,7 +40,6 @@ struct intel_gvt_irq_info {
char *name;
i915_reg_t reg_base;
enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH];
- unsigned long warned;
int group;
DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH);
bool has_upstream_irq;
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
index e60ad476fe60..cd214be98668 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.h
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -177,7 +177,6 @@ enum intel_gvt_irq_type {
/* per-event information */
struct intel_gvt_event_info {
int bit; /* map to register bit */
- int policy; /* forwarding policy */
struct intel_gvt_irq_info *info; /* register info */
gvt_event_virt_handler_t v_handler; /* for v_event */
};
@@ -188,7 +187,6 @@ struct intel_gvt_irq {
struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX];
DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX);
struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX];
- DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
struct intel_gvt_irq_map *irq_map;
};
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index faf21be724c3..4f74d867fe1a 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -574,7 +574,7 @@ int intel_gvt_set_opregion(struct intel_vgpu *vgpu)
ret = intel_vgpu_register_reg(vgpu,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
- &intel_vgpu_regops_opregion, OPREGION_SIZE,
+ &intel_vgpu_regops_opregion, INTEL_GVT_OPREGION_SIZE,
VFIO_REGION_INFO_FLAG_READ, base);
return ret;
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index bba154e38705..32ebacb078e8 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -62,10 +62,8 @@ typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
struct intel_gvt_mmio_info {
u32 offset;
u64 ro_mask;
- u32 device;
gvt_mmio_func read;
gvt_mmio_func write;
- u32 addr_range;
struct hlist_node node;
};
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 1f391b3da2cc..cd94993278b6 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -104,10 +104,8 @@ struct intel_vgpu_workload {
/* execlist context information */
struct execlist_ctx_descriptor_format ctx_desc;
- struct execlist_ring_context *ring_context;
unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
unsigned long guest_rb_head;
- bool restore_inhibit;
struct intel_vgpu_elsp_dwords elsp_dwords;
bool emulate_schedule_in;
atomic_t shadow_ctx_active;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index db99c2ef66db..990eaa029d9c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -147,7 +147,7 @@ static const char *i915_cache_level_str(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = obj_to_i915(obj);
- if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 71))) {
+ if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 74))) {
switch (obj->pat_index) {
case 0: return " WB";
case 1: return " WT";
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index c7d7c3b7ecc6..9ee902d5b72c 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -681,7 +681,8 @@ i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p)
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
if (drm_debug_enabled(DRM_UT_DRIVER)) {
- struct drm_printer p = drm_debug_printer("i915 device info:");
+ struct drm_printer p = drm_dbg_printer(&dev_priv->drm, DRM_UT_DRIVER,
+ "device info:");
struct intel_gt *gt;
unsigned int i;
@@ -1003,8 +1004,10 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_runtime_pm_disable(&i915->runtime_pm);
intel_power_domains_disable(i915);
+ intel_fbdev_set_suspend(&i915->drm, FBINFO_STATE_SUSPENDED, true);
if (HAS_DISPLAY(i915)) {
drm_kms_helper_poll_disable(&i915->drm);
+ intel_display_driver_disable_user_access(i915);
drm_atomic_helper_shutdown(&i915->drm);
}
@@ -1014,6 +1017,9 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_runtime_pm_disable_interrupts(i915);
intel_hpd_cancel_work(i915);
+ if (HAS_DISPLAY(i915))
+ intel_display_driver_suspend_access(i915);
+
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
@@ -1080,8 +1086,11 @@ static int i915_drm_suspend(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
intel_power_domains_disable(dev_priv);
- if (HAS_DISPLAY(dev_priv))
+ intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
+ if (HAS_DISPLAY(dev_priv)) {
drm_kms_helper_poll_disable(dev);
+ intel_display_driver_disable_user_access(dev_priv);
+ }
pci_save_state(pdev);
@@ -1092,6 +1101,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
+ if (HAS_DISPLAY(dev_priv))
+ intel_display_driver_suspend_access(dev_priv);
+
intel_suspend_encoders(dev_priv);
/* Must be called before GGTT is suspended. */
@@ -1103,8 +1115,6 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_suspend(dev_priv, opregion_target_state);
- intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
-
dev_priv->suspend_count++;
intel_dmc_suspend(dev_priv);
@@ -1243,15 +1253,21 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_driver_init_hw(dev_priv);
intel_clock_gating_init(dev_priv);
+
+ if (HAS_DISPLAY(dev_priv))
+ intel_display_driver_resume_access(dev_priv);
+
intel_hpd_init(dev_priv);
/* MST sideband requires HPD interrupts enabled */
intel_dp_mst_resume(dev_priv);
intel_display_driver_resume(dev_priv);
- intel_hpd_poll_disable(dev_priv);
- if (HAS_DISPLAY(dev_priv))
+ if (HAS_DISPLAY(dev_priv)) {
+ intel_display_driver_enable_user_access(dev_priv);
drm_kms_helper_poll_enable(dev);
+ }
+ intel_hpd_poll_disable(dev_priv);
intel_opregion_resume(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index fa6852713bee..f58682505491 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -53,7 +53,7 @@ obj_meminfo(struct drm_i915_gem_object *obj,
obj->mm.region->id : INTEL_REGION_SMEM;
const u64 sz = obj->base.size;
- if (obj->base.handle_count > 1)
+ if (drm_gem_object_is_shared_for_memory_stats(&obj->base))
stats[id].shared += sz;
else
stats[id].private += sz;
diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
index a439dd789936..2e7a50d16a88 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.h
+++ b/drivers/gpu/drm/i915/i915_drm_client.h
@@ -24,8 +24,6 @@ struct drm_printer;
struct i915_drm_client {
struct kref kref;
- unsigned int id;
-
spinlock_t ctx_lock; /* For add/remove from ctx_list. */
struct list_head ctx_list; /* List of contexts belonging to client. */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 861567362abd..e81b3b2858ac 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -165,14 +165,6 @@ struct i915_gem_mm {
struct notifier_block vmap_notifier;
struct shrinker *shrinker;
-#ifdef CONFIG_MMU_NOTIFIER
- /**
- * notifier_lock for mmu notifiers, memory may not be allocated
- * while holding this lock.
- */
- rwlock_t notifier_lock;
-#endif
-
/* shrinker accounting, also useful for userland debugging */
u64 shrink_memory;
u32 shrink_count;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 92758b6b41f0..1391c01d7663 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -48,7 +48,6 @@
#include "gem/i915_gem_object_frontbuffer.h"
#include "gem/i915_gem_pm.h"
#include "gem/i915_gem_region.h"
-#include "gem/i915_gem_userptr.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
@@ -1165,10 +1164,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
- ret = i915_gem_init_userptr(dev_priv);
- if (ret)
- return ret;
-
for_each_gt(gt, dev_priv, i) {
intel_uc_fetch_firmwares(&gt->uc);
intel_wopcm_init(&gt->wopcm);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d04660b60046..a0b784ebaddd 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1157,7 +1157,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
dma_addr_t offset = dma - mem->region.start;
void __iomem *s;
- if (offset + PAGE_SIZE > mem->io_size) {
+ if (offset + PAGE_SIZE > resource_size(&mem->io)) {
ret = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2d695818f006..bd9d812b1afa 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3225,7 +3225,7 @@ u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915)
struct intel_gt *gt = to_gt(i915);
/* Wa_18013179988 */
- if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
+ if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
intel_wakeref_t wakeref;
u32 reg, shift;
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index 46445248d193..39fb6ce4a7ef 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -288,7 +288,6 @@ struct i915_perf_stream {
struct i915_vma *vma;
u8 *vaddr;
u32 last_ctx_id;
- int size_exponent;
/**
* @oa_buffer.ptr_lock: Locks reads and writes to all
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 00871ef99792..3baa2f54a86e 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -502,7 +502,7 @@ static int query_memregion_info(struct drm_i915_private *i915,
info.probed_size = mr->total;
if (mr->type == INTEL_MEMORY_LOCAL)
- info.probed_cpu_visible_size = mr->io_size;
+ info.probed_cpu_visible_size = resource_size(&mr->io);
else
info.probed_cpu_visible_size = mr->total;
@@ -551,6 +551,38 @@ static int query_hwconfig_blob(struct drm_i915_private *i915,
return hwconfig->size;
}
+static int
+query_guc_submission_version(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query)
+{
+ struct drm_i915_query_guc_submission_version __user *query_ptr =
+ u64_to_user_ptr(query->data_ptr);
+ struct drm_i915_query_guc_submission_version ver;
+ struct intel_guc *guc = &to_gt(i915)->uc.guc;
+ const size_t size = sizeof(ver);
+ int ret;
+
+ if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc))
+ return -ENODEV;
+
+ ret = copy_query_item(&ver, size, size, query);
+ if (ret != 0)
+ return ret;
+
+ if (ver.branch || ver.major || ver.minor || ver.patch)
+ return -EINVAL;
+
+ ver.branch = 0;
+ ver.major = guc->submission_version.major;
+ ver.minor = guc->submission_version.minor;
+ ver.patch = guc->submission_version.patch;
+
+ if (copy_to_user(query_ptr, &ver, size))
+ return -EFAULT;
+
+ return 0;
+}
+
static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item) = {
query_topology_info,
@@ -559,6 +591,7 @@ static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
query_memregion_info,
query_hwconfig_blob,
query_geometry_subslices,
+ query_guc_submission_version,
};
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 27dc903f0553..e00557e1a57f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3059,6 +3059,7 @@
#define MCURSOR_MODE_64_ARGB_AX (0x20 | MCURSOR_MODE_64_32B_AX)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
+#define _CURAPOS_ERLY_TPT 0x7008c
#define CURSOR_POS_Y_SIGN REG_BIT(31)
#define CURSOR_POS_Y_MASK REG_GENMASK(30, 16)
#define CURSOR_POS_Y(y) REG_FIELD_PREP(CURSOR_POS_Y_MASK, (y))
@@ -3087,6 +3088,7 @@
#define CURCNTR(pipe) _MMIO_CURSOR2(pipe, _CURACNTR)
#define CURBASE(pipe) _MMIO_CURSOR2(pipe, _CURABASE)
#define CURPOS(pipe) _MMIO_CURSOR2(pipe, _CURAPOS)
+#define CURPOS_ERLY_TPT(pipe) _MMIO_CURSOR2(pipe, _CURAPOS_ERLY_TPT)
#define CURSIZE(pipe) _MMIO_CURSOR2(pipe, _CURASIZE)
#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(pipe, _CUR_FBC_CTL_A)
#define CUR_CHICKEN(pipe) _MMIO_CURSOR2(pipe, _CUR_CHICKEN_A)
@@ -5412,6 +5414,9 @@
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
#define GEN6_PCODE_DATA1 _MMIO(0x13812C)
+#define MTL_PCODE_STOLEN_ACCESS _MMIO(0x138914)
+#define STOLEN_ACCESS_ALLOWED 0x1
+
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
#define GEN7_L3CDERRST1_ROW_MASK (0x7ff << 14)
@@ -5652,6 +5657,10 @@ enum skl_power_gate {
#define DP_TP_CTL_MODE_SST (0 << 27)
#define DP_TP_CTL_MODE_MST (1 << 27)
#define DP_TP_CTL_FORCE_ACT (1 << 25)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK (3 << 19)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A (0 << 19)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B (1 << 19)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C (2 << 19)
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1 << 18)
#define DP_TP_CTL_FDI_AUTOTRAIN (1 << 15)
#define DP_TP_CTL_LINK_TRAIN_MASK (7 << 8)
@@ -5684,6 +5693,8 @@ enum skl_power_gate {
/* Known as DDI_CTL_DE in MTL+ */
#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1 << 31)
+#define XE2LPD_DDI_BUF_D2D_LINK_ENABLE REG_BIT(29)
+#define XE2LPD_DDI_BUF_D2D_LINK_STATE REG_BIT(28)
#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
#define DDI_BUF_EMP_MASK (0xf << 24)
#define DDI_BUF_PHY_LINK_RATE(r) ((r) << 20)
@@ -6314,9 +6325,10 @@ enum skl_power_gate {
#define GMS_MASK REG_GENMASK(15, 8)
#define GGMS_MASK REG_GENMASK(7, 6)
-#define GEN12_GSMBASE _MMIO(0x108100)
-#define GEN12_DSMBASE _MMIO(0x1080C0)
-#define GEN12_BDSM_MASK REG_GENMASK64(63, 20)
+#define GEN6_GSMBASE _MMIO(0x108100)
+#define GEN6_DSMBASE _MMIO(0x1080C0)
+#define GEN6_BDSM_MASK REG_GENMASK64(31, 20)
+#define GEN11_BDSM_MASK REG_GENMASK64(63, 20)
#define XEHP_CLOCK_GATE_DIS _MMIO(0x101014)
#define SGSI_SIDECLK_DIS REG_BIT(17)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index f59081066a19..519e096c607c 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -52,7 +52,6 @@
struct execute_cb {
struct irq_work work;
struct i915_sw_fence *fence;
- struct i915_request *signal;
};
static struct kmem_cache *slab_requests;
diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c
index 60404dbb2e9f..df6437c37373 100644
--- a/drivers/gpu/drm/i915/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/i915_syncmap.c
@@ -75,13 +75,10 @@ struct i915_syncmap {
unsigned int height;
unsigned int bitmap;
struct i915_syncmap *parent;
- /*
- * Following this header is an array of either seqno or child pointers:
- * union {
- * u32 seqno[KSYNCMAP];
- * struct i915_syncmap *child[KSYNCMAP];
- * };
- */
+ union {
+ DECLARE_FLEX_ARRAY(u32, seqno);
+ DECLARE_FLEX_ARRAY(struct i915_syncmap *, child);
+ };
};
/**
@@ -99,13 +96,13 @@ void i915_syncmap_init(struct i915_syncmap **root)
static inline u32 *__sync_seqno(struct i915_syncmap *p)
{
GEM_BUG_ON(p->height);
- return (u32 *)(p + 1);
+ return p->seqno;
}
static inline struct i915_syncmap **__sync_child(struct i915_syncmap *p)
{
GEM_BUG_ON(!p->height);
- return (struct i915_syncmap **)(p + 1);
+ return p->child;
}
static inline unsigned int
@@ -200,7 +197,7 @@ __sync_alloc_leaf(struct i915_syncmap *parent, u64 id)
{
struct i915_syncmap *p;
- p = kmalloc(sizeof(*p) + KSYNCMAP * sizeof(u32), GFP_KERNEL);
+ p = kmalloc(struct_size(p, seqno, KSYNCMAP), GFP_KERNEL);
if (unlikely(!p))
return NULL;
@@ -282,7 +279,7 @@ static noinline int __sync_set(struct i915_syncmap **root, u64 id, u32 seqno)
unsigned int above;
/* Insert a join above the current layer */
- next = kzalloc(sizeof(*next) + KSYNCMAP * sizeof(next),
+ next = kzalloc(struct_size(next, child, KSYNCMAP),
GFP_KERNEL);
if (unlikely(!next))
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 29fd02bf5ea8..6f9e7b354b54 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include "i915_drv.h"
+#include "i915_reg.h"
#include "i915_utils.h"
#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
@@ -125,3 +126,19 @@ bool i915_vtd_active(struct drm_i915_private *i915)
/* Running as a guest, we assume the host is enforcing VT'd */
return i915_run_as_guest();
}
+
+bool i915_direct_stolen_access(struct drm_i915_private *i915)
+{
+ /*
+ * Wa_22018444074
+ *
+ * Access via BAR can hang MTL, go directly to GSM/DSM,
+ * except for VM guests which won't have access to it.
+ *
+ * Normally this would not work but on MTL the system firmware
+ * should have relaxed the access permissions sufficiently.
+ * 0x138914==0x1 indicates that the firmware has done its job.
+ */
+ return IS_METEORLAKE(i915) && !i915_run_as_guest() &&
+ intel_uncore_read(&i915->uncore, MTL_PCODE_STOLEN_ACCESS) == STOLEN_ACCESS_ALLOWED;
+}
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index f98577967b7f..b45ef0560611 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -391,4 +391,6 @@ static inline bool i915_run_as_guest(void)
bool i915_vtd_active(struct drm_i915_private *i915);
+bool i915_direct_stolen_access(struct drm_i915_private *i915);
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 64472b7f0e77..559de74d0b11 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -290,7 +290,6 @@ struct i915_vma {
struct list_head obj_link; /* Link in the object's VMA list */
struct rb_node obj_node;
- struct hlist_node obj_hash;
/** This vma's place in the eviction list */
struct list_head evict_link;
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 60a03340bbd4..52d998e5c21a 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -50,7 +50,7 @@ static int __iopagetest(struct intel_memory_region *mem,
if (memchr_inv(result, value, sizeof(result))) {
dev_err(mem->i915->drm.dev,
"Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
- &mem->region, &mem->io_start, &offset, caller,
+ &mem->region, &mem->io.start, &offset, caller,
value, result[0], result[1], result[2]);
return -EINVAL;
}
@@ -67,11 +67,11 @@ static int iopagetest(struct intel_memory_region *mem,
int err;
int i;
- va = ioremap_wc(mem->io_start + offset, PAGE_SIZE);
+ va = ioremap_wc(mem->io.start + offset, PAGE_SIZE);
if (!va) {
dev_err(mem->i915->drm.dev,
"Failed to ioremap memory region [%pa + %pa] for %ps\n",
- &mem->io_start, &offset, caller);
+ &mem->io.start, &offset, caller);
return -EFAULT;
}
@@ -102,10 +102,10 @@ static int iomemtest(struct intel_memory_region *mem,
resource_size_t last, page;
int err;
- if (mem->io_size < PAGE_SIZE)
+ if (resource_size(&mem->io) < PAGE_SIZE)
return 0;
- last = mem->io_size - PAGE_SIZE;
+ last = resource_size(&mem->io) - PAGE_SIZE;
/*
* Quick test to check read/write access to the iomap (backing store).
@@ -207,7 +207,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
struct drm_i915_private *i915 = mem->i915;
int err = 0;
- if (!mem->io_start)
+ if (!mem->io.start)
return 0;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
@@ -252,8 +252,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->i915 = i915;
mem->region = DEFINE_RES_MEM(start, size);
- mem->io_start = io_start;
- mem->io_size = io_size;
+ mem->io = DEFINE_RES_MEM(io_start, io_size);
mem->min_page_size = min_page_size;
mem->ops = ops;
mem->total = size;
@@ -373,6 +372,24 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
i915->mm.regions[i] = mem;
}
+ for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+ struct intel_memory_region *mem = i915->mm.regions[i];
+ u64 region_size, io_size;
+
+ if (!mem)
+ continue;
+
+ region_size = resource_size(&mem->region) >> 20;
+ io_size = resource_size(&mem->io) >> 20;
+
+ if (resource_size(&mem->io))
+ drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: %llu MiB %pR\n",
+ mem->id, mem->name, region_size, &mem->region, io_size, &mem->io);
+ else
+ drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: n/a\n",
+ mem->id, mem->name, region_size, &mem->region);
+ }
+
return 0;
out_cleanup:
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 9ba36454e51b..8c927e303c4a 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -50,8 +50,6 @@ enum intel_region_id {
for_each_if((mr) = (i915)->mm.regions[id])
struct intel_memory_region_ops {
- unsigned int flags;
-
int (*init)(struct intel_memory_region *mem);
int (*release)(struct intel_memory_region *mem);
@@ -71,8 +69,7 @@ struct intel_memory_region {
struct io_mapping iomap;
struct resource region;
- resource_size_t io_start;
- resource_size_t io_size;
+ struct resource io;
resource_size_t min_page_size;
resource_size_t total;
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c
index bf6097e7433d..04525d92bec5 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -87,7 +87,7 @@ int intel_region_ttm_init(struct intel_memory_region *mem)
ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
resource_size(&mem->region),
- mem->io_size,
+ resource_size(&mem->io),
mem->min_page_size, PAGE_SIZE);
if (ret)
return ret;
@@ -219,16 +219,16 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
goto out;
}
place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
- } else if (mem->io_size && mem->io_size < mem->total) {
+ } else if (resource_size(&mem->io) && resource_size(&mem->io) < mem->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place.flags |= TTM_PL_FLAG_TOPDOWN;
} else {
place.fpfn = 0;
- if (WARN_ON(overflows_type(mem->io_size >> PAGE_SHIFT, place.lpfn))) {
+ if (WARN_ON(overflows_type(resource_size(&mem->io) >> PAGE_SHIFT, place.lpfn))) {
ret = -E2BIG;
goto out;
}
- place.lpfn = mem->io_size >> PAGE_SHIFT;
+ place.lpfn = resource_size(&mem->io) >> PAGE_SHIFT;
}
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 860b51b56a92..d4e844128826 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -246,7 +246,10 @@ static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm
* function, since the power state is undefined. This applies
* atm to the late/early system suspend/resume handlers.
*/
- if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0)
+ if ((ignore_usecount &&
+ pm_runtime_get_if_active(rpm->kdev) <= 0) ||
+ (!ignore_usecount &&
+ pm_runtime_get_if_in_use(rpm->kdev) <= 0))
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index dfefad5a5fec..76400e9c40f0 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1800,7 +1800,10 @@ static const struct intel_forcewake_range __mtl_fw_ranges[] = {
GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
0x24000 - 0x2407f: always on
0x24080 - 0x2ffff: reserved */
- GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT)
+ GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x40000, 0x1901ef, 0),
+ GEN_FW_RANGE(0x1901f0, 0x1901f3, FORCEWAKE_GT)
+ /* FIXME: WA to wake GT while triggering H2G */
};
/*
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index b61fe850e924..0d89d70b9c36 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -156,9 +156,9 @@ static int live_active_wait(void *arg)
__i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
if (!READ_ONCE(active->retired)) {
- struct drm_printer p = drm_err_printer(__func__);
+ struct drm_printer p = drm_err_printer(&i915->drm, __func__);
- pr_err("i915_active not retired after waiting!\n");
+ drm_printf(&p, "i915_active not retired after waiting!\n");
i915_active_print(&active->base, &p);
err = -EINVAL;
@@ -189,9 +189,9 @@ static int live_active_retire(void *arg)
err = -EIO;
if (!READ_ONCE(active->retired)) {
- struct drm_printer p = drm_err_printer(__func__);
+ struct drm_printer p = drm_err_printer(&i915->drm, __func__);
- pr_err("i915_active not retired after flushing!\n");
+ drm_printf(&p, "i915_active not retired after flushing!\n");
i915_active_print(&active->base, &p);
err = -EINVAL;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index d985d9bae2e8..ae6070b5bf07 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -544,8 +544,8 @@ static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj)
u64 start = drm_buddy_block_offset(block);
u64 end = start + drm_buddy_block_size(mm, block);
- if (start < mr->io_size)
- total += min_t(u64, end, mr->io_size) - start;
+ if (start < resource_size(&mr->io))
+ total += min_t(u64, end, resource_size(&mr->io)) - start;
}
return total;
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
index 2990dd4d4a0d..e14ac0ab1314 100644
--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
@@ -3,6 +3,8 @@
* Copyright © 2021 Intel Corporation
*/
+#include <linux/jiffies.h>
+
//#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
@@ -12,7 +14,7 @@
#define REDUCED_TIMESLICE 5
#define REDUCED_PREEMPT 10
-#define WAIT_FOR_RESET_TIME 10000
+#define WAIT_FOR_RESET_TIME_MS 10000
struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
{
@@ -91,7 +93,7 @@ int intel_selftest_wait_for_rq(struct i915_request *rq)
{
long ret;
- ret = i915_request_wait(rq, 0, WAIT_FOR_RESET_TIME);
+ ret = i915_request_wait(rq, 0, msecs_to_jiffies(WAIT_FOR_RESET_TIME_MS));
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c
index 240beafb38ed..3cad6dac06b0 100644
--- a/drivers/gpu/drm/i915/soc/intel_pch.c
+++ b/drivers/gpu/drm/i915/soc/intel_pch.c
@@ -140,11 +140,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
!IS_ALDERLAKE_P(dev_priv));
return PCH_ADP;
- case INTEL_PCH_MTP_DEVICE_ID_TYPE:
- case INTEL_PCH_MTP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Meteor Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_METEORLAKE(dev_priv));
- return PCH_MTP;
default:
return PCH_NONE;
}
@@ -173,9 +168,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
* make an educated guess as to which PCH is really there.
*/
- if (IS_METEORLAKE(dev_priv))
- id = INTEL_PCH_MTP_DEVICE_ID_TYPE;
- else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
+ if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
@@ -225,6 +218,13 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
if (DISPLAY_VER(dev_priv) >= 20) {
dev_priv->pch_type = PCH_LNL;
return;
+ } else if (IS_METEORLAKE(dev_priv)) {
+ /*
+ * Both north display and south display are on the SoC die.
+ * The real PCH is uninvolved in display.
+ */
+ dev_priv->pch_type = PCH_MTL;
+ return;
} else if (IS_DG2(dev_priv)) {
dev_priv->pch_type = PCH_DG2;
return;
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.h b/drivers/gpu/drm/i915/soc/intel_pch.h
index 1b03ea60a7a8..89e89ede265d 100644
--- a/drivers/gpu/drm/i915/soc/intel_pch.h
+++ b/drivers/gpu/drm/i915/soc/intel_pch.h
@@ -25,11 +25,11 @@ enum intel_pch {
PCH_ICP, /* Ice Lake/Jasper Lake PCH */
PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
PCH_ADP, /* Alder Lake PCH */
- PCH_MTP, /* Meteor Lake PCH */
/* Fake PCHs, functionality handled on the same PCI dev */
PCH_DG1 = 1024,
PCH_DG2,
+ PCH_MTL,
PCH_LNL,
};
@@ -59,16 +59,12 @@ enum intel_pch {
#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
-#define INTEL_PCH_MTP_DEVICE_ID_TYPE 0x7E00
-#define INTEL_PCH_MTP2_DEVICE_ID_TYPE 0xAE00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
-#define HAS_PCH_LNL(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LNL)
-#define HAS_PCH_MTP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MTP)
#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
diff --git a/drivers/gpu/drm/imx/dcss/dcss-blkctl.c b/drivers/gpu/drm/imx/dcss/dcss-blkctl.c
index c9b54bb2692d..803e3fcdb50f 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-blkctl.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-blkctl.c
@@ -42,14 +42,13 @@ int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base)
{
struct dcss_blkctl *blkctl;
- blkctl = kzalloc(sizeof(*blkctl), GFP_KERNEL);
+ blkctl = devm_kzalloc(dcss->dev, sizeof(*blkctl), GFP_KERNEL);
if (!blkctl)
return -ENOMEM;
- blkctl->base_reg = ioremap(blkctl_base, SZ_4K);
+ blkctl->base_reg = devm_ioremap(dcss->dev, blkctl_base, SZ_4K);
if (!blkctl->base_reg) {
dev_err(dcss->dev, "unable to remap BLK CTRL base\n");
- kfree(blkctl);
return -ENOMEM;
}
@@ -60,11 +59,3 @@ int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base)
return 0;
}
-
-void dcss_blkctl_exit(struct dcss_blkctl *blkctl)
-{
- if (blkctl->base_reg)
- iounmap(blkctl->base_reg);
-
- kfree(blkctl);
-}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-ctxld.c b/drivers/gpu/drm/imx/dcss/dcss-ctxld.c
index 3a84cb3209c4..e41d5c2a3ea4 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-ctxld.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-ctxld.c
@@ -202,7 +202,7 @@ int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
struct dcss_ctxld *ctxld;
int ret;
- ctxld = kzalloc(sizeof(*ctxld), GFP_KERNEL);
+ ctxld = devm_kzalloc(dcss->dev, sizeof(*ctxld), GFP_KERNEL);
if (!ctxld)
return -ENOMEM;
@@ -217,7 +217,7 @@ int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
goto err;
}
- ctxld->ctxld_reg = ioremap(ctxld_base, SZ_4K);
+ ctxld->ctxld_reg = devm_ioremap(dcss->dev, ctxld_base, SZ_4K);
if (!ctxld->ctxld_reg) {
dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n");
ret = -ENOMEM;
@@ -226,18 +226,14 @@ int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev));
if (ret)
- goto err_irq;
+ goto err;
dcss_ctxld_hw_cfg(ctxld);
return 0;
-err_irq:
- iounmap(ctxld->ctxld_reg);
-
err:
dcss_ctxld_free_ctx(ctxld);
- kfree(ctxld);
return ret;
}
@@ -246,11 +242,7 @@ void dcss_ctxld_exit(struct dcss_ctxld *ctxld)
{
free_irq(ctxld->irq, ctxld);
- if (ctxld->ctxld_reg)
- iounmap(ctxld->ctxld_reg);
-
dcss_ctxld_free_ctx(ctxld);
- kfree(ctxld);
}
static int dcss_ctxld_enable_locked(struct dcss_ctxld *ctxld)
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c
index 4f3af0dfb344..7fd0c4c14205 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c
@@ -109,8 +109,6 @@ dtg_err:
dcss_ctxld_exit(dcss->ctxld);
ctxld_err:
- dcss_blkctl_exit(dcss->blkctl);
-
dcss_clocks_disable(dcss);
return ret;
@@ -124,7 +122,6 @@ static void dcss_submodules_stop(struct dcss_dev *dcss)
dcss_ss_exit(dcss->ss);
dcss_dtg_exit(dcss->dtg);
dcss_ctxld_exit(dcss->ctxld);
- dcss_blkctl_exit(dcss->blkctl);
dcss_clocks_disable(dcss);
}
@@ -183,7 +180,12 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
return ERR_PTR(-EINVAL);
}
- dcss = kzalloc(sizeof(*dcss), GFP_KERNEL);
+ if (!devm_request_mem_region(dev, res->start, resource_size(res), "dcss")) {
+ dev_err(dev, "cannot request memory region\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ dcss = devm_kzalloc(dev, sizeof(*dcss), GFP_KERNEL);
if (!dcss)
return ERR_PTR(-ENOMEM);
@@ -194,7 +196,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
ret = dcss_clks_init(dcss);
if (ret) {
dev_err(dev, "clocks initialization failed\n");
- goto err;
+ return ERR_PTR(ret);
}
dcss->of_port = of_graph_get_port_by_id(dev->of_node, 0);
@@ -226,9 +228,6 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
clks_err:
dcss_clks_release(dcss);
-err:
- kfree(dcss);
-
return ERR_PTR(ret);
}
@@ -246,8 +245,6 @@ void dcss_dev_destroy(struct dcss_dev *dcss)
dcss_submodules_stop(dcss);
dcss_clks_release(dcss);
-
- kfree(dcss);
}
static int dcss_dev_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.h b/drivers/gpu/drm/imx/dcss/dcss-dev.h
index f27b87c09599..b032e873d227 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.h
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.h
@@ -104,7 +104,6 @@ extern const struct dev_pm_ops dcss_dev_pm_ops;
/* BLKCTL */
int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base);
void dcss_blkctl_cfg(struct dcss_blkctl *blkctl);
-void dcss_blkctl_exit(struct dcss_blkctl *blkctl);
/* CTXLD */
int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dpr.c b/drivers/gpu/drm/imx/dcss/dcss-dpr.c
index df9dab949bf2..072eb209249f 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dpr.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dpr.c
@@ -135,7 +135,7 @@ static int dcss_dpr_ch_init_all(struct dcss_dpr *dpr, unsigned long dpr_base)
ch->base_ofs = dpr_base + i * 0x1000;
- ch->base_reg = ioremap(ch->base_ofs, SZ_4K);
+ ch->base_reg = devm_ioremap(dpr->dev, ch->base_ofs, SZ_4K);
if (!ch->base_reg) {
dev_err(dpr->dev, "dpr: unable to remap ch %d base\n",
i);
@@ -155,7 +155,7 @@ int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base)
{
struct dcss_dpr *dpr;
- dpr = kzalloc(sizeof(*dpr), GFP_KERNEL);
+ dpr = devm_kzalloc(dcss->dev, sizeof(*dpr), GFP_KERNEL);
if (!dpr)
return -ENOMEM;
@@ -164,18 +164,8 @@ int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base)
dpr->ctxld = dcss->ctxld;
dpr->ctx_id = CTX_SB_HP;
- if (dcss_dpr_ch_init_all(dpr, dpr_base)) {
- int i;
-
- for (i = 0; i < 3; i++) {
- if (dpr->ch[i].base_reg)
- iounmap(dpr->ch[i].base_reg);
- }
-
- kfree(dpr);
-
+ if (dcss_dpr_ch_init_all(dpr, dpr_base))
return -ENOMEM;
- }
return 0;
}
@@ -189,12 +179,7 @@ void dcss_dpr_exit(struct dcss_dpr *dpr)
struct dcss_dpr_ch *ch = &dpr->ch[ch_no];
dcss_writel(0, ch->base_reg + DCSS_DPR_SYSTEM_CTRL0);
-
- if (ch->base_reg)
- iounmap(ch->base_reg);
}
-
- kfree(dpr);
}
static u32 dcss_dpr_x_pix_wide_adjust(struct dcss_dpr_ch *ch, u32 pix_wide,
diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c
index ad5f29ea8f6a..d881f5a34760 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-drv.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c
@@ -51,15 +51,13 @@ static int dcss_drv_platform_probe(struct platform_device *pdev)
of_node_put(remote);
- mdrv = kzalloc(sizeof(*mdrv), GFP_KERNEL);
+ mdrv = devm_kzalloc(dev, sizeof(*mdrv), GFP_KERNEL);
if (!mdrv)
return -ENOMEM;
mdrv->dcss = dcss_dev_create(dev, hdmi_output);
- if (IS_ERR(mdrv->dcss)) {
- err = PTR_ERR(mdrv->dcss);
- goto err;
- }
+ if (IS_ERR(mdrv->dcss))
+ return PTR_ERR(mdrv->dcss);
dev_set_drvdata(dev, mdrv);
@@ -75,8 +73,6 @@ static int dcss_drv_platform_probe(struct platform_device *pdev)
dcss_shutoff:
dcss_dev_destroy(mdrv->dcss);
-err:
- kfree(mdrv);
return err;
}
@@ -86,8 +82,6 @@ static void dcss_drv_platform_remove(struct platform_device *pdev)
dcss_kms_detach(mdrv->kms);
dcss_dev_destroy(mdrv->dcss);
-
- kfree(mdrv);
}
static void dcss_drv_platform_shutdown(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dtg.c b/drivers/gpu/drm/imx/dcss/dcss-dtg.c
index 30de00540f63..2968f5d5bd41 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dtg.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dtg.c
@@ -152,7 +152,7 @@ int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base)
int ret = 0;
struct dcss_dtg *dtg;
- dtg = kzalloc(sizeof(*dtg), GFP_KERNEL);
+ dtg = devm_kzalloc(dcss->dev, sizeof(*dtg), GFP_KERNEL);
if (!dtg)
return -ENOMEM;
@@ -160,11 +160,10 @@ int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base)
dtg->dev = dcss->dev;
dtg->ctxld = dcss->ctxld;
- dtg->base_reg = ioremap(dtg_base, SZ_4K);
+ dtg->base_reg = devm_ioremap(dtg->dev, dtg_base, SZ_4K);
if (!dtg->base_reg) {
- dev_err(dcss->dev, "dtg: unable to remap dtg base\n");
- ret = -ENOMEM;
- goto err_ioremap;
+ dev_err(dtg->dev, "dtg: unable to remap dtg base\n");
+ return -ENOMEM;
}
dtg->base_ofs = dtg_base;
@@ -175,17 +174,7 @@ int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base)
dtg->control_status |= OVL_DATA_MODE | BLENDER_VIDEO_ALPHA_SEL |
((dtg->alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK);
- ret = dcss_dtg_irq_config(dtg, to_platform_device(dcss->dev));
- if (ret)
- goto err_irq;
-
- return 0;
-
-err_irq:
- iounmap(dtg->base_reg);
-
-err_ioremap:
- kfree(dtg);
+ ret = dcss_dtg_irq_config(dtg, to_platform_device(dtg->dev));
return ret;
}
@@ -193,11 +182,6 @@ err_ioremap:
void dcss_dtg_exit(struct dcss_dtg *dtg)
{
free_irq(dtg->ctxld_kick_irq, dtg);
-
- if (dtg->base_reg)
- iounmap(dtg->base_reg);
-
- kfree(dtg);
}
void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm)
diff --git a/drivers/gpu/drm/imx/dcss/dcss-scaler.c b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
index 47852b9dd5ea..825728c356ff 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-scaler.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
@@ -302,7 +302,7 @@ static int dcss_scaler_ch_init_all(struct dcss_scaler *scl,
ch->base_ofs = scaler_base + i * 0x400;
- ch->base_reg = ioremap(ch->base_ofs, SZ_4K);
+ ch->base_reg = devm_ioremap(scl->dev, ch->base_ofs, SZ_4K);
if (!ch->base_reg) {
dev_err(scl->dev, "scaler: unable to remap ch base\n");
return -ENOMEM;
@@ -318,7 +318,7 @@ int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base)
{
struct dcss_scaler *scaler;
- scaler = kzalloc(sizeof(*scaler), GFP_KERNEL);
+ scaler = devm_kzalloc(dcss->dev, sizeof(*scaler), GFP_KERNEL);
if (!scaler)
return -ENOMEM;
@@ -327,18 +327,8 @@ int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base)
scaler->ctxld = dcss->ctxld;
scaler->ctx_id = CTX_SB_HP;
- if (dcss_scaler_ch_init_all(scaler, scaler_base)) {
- int i;
-
- for (i = 0; i < 3; i++) {
- if (scaler->ch[i].base_reg)
- iounmap(scaler->ch[i].base_reg);
- }
-
- kfree(scaler);
-
+ if (dcss_scaler_ch_init_all(scaler, scaler_base))
return -ENOMEM;
- }
return 0;
}
@@ -351,12 +341,7 @@ void dcss_scaler_exit(struct dcss_scaler *scl)
struct dcss_scaler_ch *ch = &scl->ch[ch_no];
dcss_writel(0, ch->base_reg + DCSS_SCALER_CTRL);
-
- if (ch->base_reg)
- iounmap(ch->base_reg);
}
-
- kfree(scl);
}
void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en)
diff --git a/drivers/gpu/drm/imx/dcss/dcss-ss.c b/drivers/gpu/drm/imx/dcss/dcss-ss.c
index 8ddf08da911b..0df81866fb7b 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-ss.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-ss.c
@@ -83,7 +83,7 @@ int dcss_ss_init(struct dcss_dev *dcss, unsigned long ss_base)
{
struct dcss_ss *ss;
- ss = kzalloc(sizeof(*ss), GFP_KERNEL);
+ ss = devm_kzalloc(dcss->dev, sizeof(*ss), GFP_KERNEL);
if (!ss)
return -ENOMEM;
@@ -91,10 +91,9 @@ int dcss_ss_init(struct dcss_dev *dcss, unsigned long ss_base)
ss->dev = dcss->dev;
ss->ctxld = dcss->ctxld;
- ss->base_reg = ioremap(ss_base, SZ_4K);
+ ss->base_reg = devm_ioremap(ss->dev, ss_base, SZ_4K);
if (!ss->base_reg) {
- dev_err(dcss->dev, "ss: unable to remap ss base\n");
- kfree(ss);
+ dev_err(ss->dev, "ss: unable to remap ss base\n");
return -ENOMEM;
}
@@ -108,11 +107,6 @@ void dcss_ss_exit(struct dcss_ss *ss)
{
/* stop SS */
dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL);
-
- if (ss->base_reg)
- iounmap(ss->base_reg);
-
- kfree(ss);
}
void dcss_ss_subsam_set(struct dcss_ss *ss)
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
index 53840ab054c7..71d70194fcbd 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
@@ -655,7 +655,7 @@ static int imx_ldb_probe(struct platform_device *pdev)
for (i = 0; i < 4; i++) {
char clkname[16];
- sprintf(clkname, "di%d_sel", i);
+ snprintf(clkname, sizeof(clkname), "di%d_sel", i);
imx_ldb->clk_sel[i] = devm_clk_get(imx_ldb->dev, clkname);
if (IS_ERR(imx_ldb->clk_sel[i])) {
ret = PTR_ERR(imx_ldb->clk_sel[i]);
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index b440e0cdc057..3db117c5edd9 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -11,7 +11,6 @@ config DRM_INGENIC
select DRM_GEM_DMA_HELPER
select REGMAP
select REGMAP_MMIO
- select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the Ingenic SoCs.
diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c
index 8389f2d7d021..0e668fc1e0f9 100644
--- a/drivers/gpu/drm/lima/lima_ctx.c
+++ b/drivers/gpu/drm/lima/lima_ctx.c
@@ -19,7 +19,7 @@ int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
kref_init(&ctx->refcnt);
for (i = 0; i < lima_pipe_num; i++) {
- err = lima_sched_context_init(dev->pipe + i, ctx->context + i, &ctx->guilty);
+ err = lima_sched_context_init(dev->pipe + i, ctx->context + i);
if (err)
goto err_out0;
}
diff --git a/drivers/gpu/drm/lima/lima_ctx.h b/drivers/gpu/drm/lima/lima_ctx.h
index 74e2be09090f..5b1063ce968b 100644
--- a/drivers/gpu/drm/lima/lima_ctx.h
+++ b/drivers/gpu/drm/lima/lima_ctx.h
@@ -13,7 +13,6 @@ struct lima_ctx {
struct kref refcnt;
struct lima_device *dev;
struct lima_sched_context context[lima_pipe_num];
- atomic_t guilty;
/* debug info */
char pname[TASK_COMM_LEN];
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 4f9736e5f929..7ea244d876ca 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -75,29 +75,34 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
} else {
bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
if (!bo->base.sgt) {
- sg_free_table(&sgt);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_out0;
}
}
ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
- if (ret) {
- sg_free_table(&sgt);
- kfree(bo->base.sgt);
- bo->base.sgt = NULL;
- return ret;
- }
+ if (ret)
+ goto err_out1;
*bo->base.sgt = sgt;
if (vm) {
ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
if (ret)
- return ret;
+ goto err_out2;
}
bo->heap_size = new_size;
return 0;
+
+err_out2:
+ dma_unmap_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
+err_out1:
+ kfree(bo->base.sgt);
+ bo->base.sgt = NULL;
+err_out0:
+ sg_free_table(&sgt);
+ return ret;
}
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
index 8dd501b7a3d0..6b354e2fb61d 100644
--- a/drivers/gpu/drm/lima/lima_gp.c
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -34,11 +34,11 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
if (state & LIMA_GP_IRQ_MASK_ERROR) {
if ((state & LIMA_GP_IRQ_MASK_ERROR) ==
LIMA_GP_IRQ_PLBU_OUT_OF_MEM) {
- dev_dbg(dev->dev, "gp out of heap irq status=%x\n",
- status);
+ dev_dbg(dev->dev, "%s out of heap irq status=%x\n",
+ lima_ip_name(ip), status);
} else {
- dev_err(dev->dev, "gp error irq state=%x status=%x\n",
- state, status);
+ dev_err(dev->dev, "%s error irq state=%x status=%x\n",
+ lima_ip_name(ip), state, status);
if (task)
task->recoverable = false;
}
@@ -89,7 +89,8 @@ static int lima_gp_soft_reset_async_wait(struct lima_ip *ip)
v & LIMA_GP_IRQ_RESET_COMPLETED,
0, 100);
if (err) {
- dev_err(dev->dev, "gp soft reset time out\n");
+ dev_err(dev->dev, "%s soft reset time out\n",
+ lima_ip_name(ip));
return err;
}
@@ -166,6 +167,11 @@ static void lima_gp_task_run(struct lima_sched_pipe *pipe,
gp_write(LIMA_GP_CMD, cmd);
}
+static int lima_gp_bus_stop_poll(struct lima_ip *ip)
+{
+ return !!(gp_read(LIMA_GP_STATUS) & LIMA_GP_STATUS_BUS_STOPPED);
+}
+
static int lima_gp_hard_reset_poll(struct lima_ip *ip)
{
gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000);
@@ -179,16 +185,30 @@ static int lima_gp_hard_reset(struct lima_ip *ip)
gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000);
gp_write(LIMA_GP_INT_MASK, 0);
+
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_STOP_BUS);
+ ret = lima_poll_timeout(ip, lima_gp_bus_stop_poll, 10, 100);
+ if (ret) {
+ dev_err(dev->dev, "%s bus stop timeout\n", lima_ip_name(ip));
+ return ret;
+ }
gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET);
ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100);
if (ret) {
- dev_err(dev->dev, "gp hard reset timeout\n");
+ dev_err(dev->dev, "%s hard reset timeout\n", lima_ip_name(ip));
return ret;
}
gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0);
gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
+
+ /*
+ * if there was an async soft reset queued,
+ * don't wait for it in the next job
+ */
+ ip->data.async_reset = false;
+
return 0;
}
@@ -201,8 +221,9 @@ static void lima_gp_task_error(struct lima_sched_pipe *pipe)
{
struct lima_ip *ip = pipe->processor[0];
- dev_err(ip->dev->dev, "gp task error int_state=%x status=%x\n",
- gp_read(LIMA_GP_INT_STAT), gp_read(LIMA_GP_STATUS));
+ dev_err(ip->dev->dev, "%s task error int_state=%x status=%x\n",
+ lima_ip_name(ip), gp_read(LIMA_GP_INT_STAT),
+ gp_read(LIMA_GP_STATUS));
lima_gp_hard_reset(ip);
}
@@ -305,7 +326,7 @@ int lima_gp_init(struct lima_ip *ip)
err = devm_request_irq(dev->dev, ip->irq, lima_gp_irq_handler,
IRQF_SHARED, lima_ip_name(ip), ip);
if (err) {
- dev_err(dev->dev, "gp %s fail to request irq\n",
+ dev_err(dev->dev, "%s fail to request irq\n",
lima_ip_name(ip));
return err;
}
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.c b/drivers/gpu/drm/lima/lima_l2_cache.c
index c4080a02957b..184106ce55f8 100644
--- a/drivers/gpu/drm/lima/lima_l2_cache.c
+++ b/drivers/gpu/drm/lima/lima_l2_cache.c
@@ -21,7 +21,8 @@ static int lima_l2_cache_wait_idle(struct lima_ip *ip)
!(v & LIMA_L2_CACHE_STATUS_COMMAND_BUSY),
0, 1000);
if (err) {
- dev_err(dev->dev, "l2 cache wait command timeout\n");
+ dev_err(dev->dev, "%s wait command timeout\n",
+ lima_ip_name(ip));
return err;
}
return 0;
@@ -83,7 +84,8 @@ int lima_l2_cache_init(struct lima_ip *ip)
spin_lock_init(&ip->data.lock);
size = l2_cache_read(LIMA_L2_CACHE_SIZE);
- dev_info(dev->dev, "l2 cache %uK, %u-way, %ubyte cache line, %ubit external bus\n",
+ dev_info(dev->dev, "%s %uK, %u-way, %ubyte cache line, %ubit external bus\n",
+ lima_ip_name(ip),
1 << (((size >> 16) & 0xff) - 10),
1 << ((size >> 8) & 0xff),
1 << (size & 0xff),
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index a1ae6c252dc2..e18317c5ca8c 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -22,7 +22,8 @@
cond, 0, 100); \
if (__ret) \
dev_err(dev->dev, \
- "mmu command %x timeout\n", cmd); \
+ "%s command %x timeout\n", \
+ lima_ip_name(ip), cmd); \
__ret; \
})
@@ -40,14 +41,13 @@ static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
if (status & LIMA_MMU_INT_PAGE_FAULT) {
u32 fault = mmu_read(LIMA_MMU_PAGE_FAULT_ADDR);
- dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n",
- fault, LIMA_MMU_STATUS_BUS_ID(status),
- status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read",
- lima_ip_name(ip));
+ dev_err(dev->dev, "%s page fault at 0x%x from bus id %d of type %s\n",
+ lima_ip_name(ip), fault, LIMA_MMU_STATUS_BUS_ID(status),
+ status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read");
}
if (status & LIMA_MMU_INT_READ_BUS_ERROR)
- dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip));
+ dev_err(dev->dev, "%s irq bus error\n", lima_ip_name(ip));
/* mask all interrupts before resume */
mmu_write(LIMA_MMU_INT_MASK, 0);
@@ -102,14 +102,14 @@ int lima_mmu_init(struct lima_ip *ip)
mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
if (mmu_read(LIMA_MMU_DTE_ADDR) != 0xCAFEB000) {
- dev_err(dev->dev, "mmu %s dte write test fail\n", lima_ip_name(ip));
+ dev_err(dev->dev, "%s dte write test fail\n", lima_ip_name(ip));
return -EIO;
}
err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler,
IRQF_SHARED, lima_ip_name(ip), ip);
if (err) {
- dev_err(dev->dev, "mmu %s fail to request irq\n", lima_ip_name(ip));
+ dev_err(dev->dev, "%s fail to request irq\n", lima_ip_name(ip));
return err;
}
@@ -152,7 +152,7 @@ void lima_mmu_page_fault_resume(struct lima_ip *ip)
u32 v;
if (status & LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE) {
- dev_info(dev->dev, "mmu resume\n");
+ dev_info(dev->dev, "%s resume\n", lima_ip_name(ip));
mmu_write(LIMA_MMU_INT_MASK, 0);
mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
diff --git a/drivers/gpu/drm/lima/lima_pmu.c b/drivers/gpu/drm/lima/lima_pmu.c
index e397e1146e96..113cb9b215cd 100644
--- a/drivers/gpu/drm/lima/lima_pmu.c
+++ b/drivers/gpu/drm/lima/lima_pmu.c
@@ -21,7 +21,8 @@ static int lima_pmu_wait_cmd(struct lima_ip *ip)
v, v & LIMA_PMU_INT_CMD_MASK,
100, 100000);
if (err) {
- dev_err(dev->dev, "timeout wait pmu cmd\n");
+ dev_err(dev->dev, "%s timeout wait pmu cmd\n",
+ lima_ip_name(ip));
return err;
}
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
index a5c95bed08c0..d0d2db0ef1ce 100644
--- a/drivers/gpu/drm/lima/lima_pp.c
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -26,8 +26,8 @@ static void lima_pp_handle_irq(struct lima_ip *ip, u32 state)
if (state & LIMA_PP_IRQ_MASK_ERROR) {
u32 status = pp_read(LIMA_PP_STATUS);
- dev_err(dev->dev, "pp error irq state=%x status=%x\n",
- state, status);
+ dev_err(dev->dev, "%s error irq state=%x status=%x\n",
+ lima_ip_name(ip), state, status);
pipe->error = true;
@@ -125,7 +125,7 @@ static int lima_pp_soft_reset_async_wait_one(struct lima_ip *ip)
ret = lima_poll_timeout(ip, lima_pp_soft_reset_poll, 0, 100);
if (ret) {
- dev_err(dev->dev, "pp %s reset time out\n", lima_ip_name(ip));
+ dev_err(dev->dev, "%s reset time out\n", lima_ip_name(ip));
return ret;
}
@@ -168,6 +168,11 @@ static void lima_pp_write_frame(struct lima_ip *ip, u32 *frame, u32 *wb)
}
}
+static int lima_pp_bus_stop_poll(struct lima_ip *ip)
+{
+ return !!(pp_read(LIMA_PP_STATUS) & LIMA_PP_STATUS_BUS_STOPPED);
+}
+
static int lima_pp_hard_reset_poll(struct lima_ip *ip)
{
pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC01A0000);
@@ -181,16 +186,31 @@ static int lima_pp_hard_reset(struct lima_ip *ip)
pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC0FFE000);
pp_write(LIMA_PP_INT_MASK, 0);
+
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_STOP_BUS);
+ ret = lima_poll_timeout(ip, lima_pp_bus_stop_poll, 10, 100);
+ if (ret) {
+ dev_err(dev->dev, "%s bus stop timeout\n", lima_ip_name(ip));
+ return ret;
+ }
+
pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_FORCE_RESET);
ret = lima_poll_timeout(ip, lima_pp_hard_reset_poll, 10, 100);
if (ret) {
- dev_err(dev->dev, "pp hard reset timeout\n");
+ dev_err(dev->dev, "%s hard reset timeout\n", lima_ip_name(ip));
return ret;
}
pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0);
pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL);
pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED);
+
+ /*
+ * if there was an async soft reset queued,
+ * don't wait for it in the next job
+ */
+ ip->data.async_reset = false;
+
return 0;
}
@@ -254,7 +274,7 @@ int lima_pp_init(struct lima_ip *ip)
err = devm_request_irq(dev->dev, ip->irq, lima_pp_irq_handler,
IRQF_SHARED, lima_ip_name(ip), ip);
if (err) {
- dev_err(dev->dev, "pp %s fail to request irq\n",
+ dev_err(dev->dev, "%s fail to request irq\n",
lima_ip_name(ip));
return err;
}
@@ -289,7 +309,7 @@ int lima_pp_bcast_init(struct lima_ip *ip)
err = devm_request_irq(dev->dev, ip->irq, lima_pp_bcast_irq_handler,
IRQF_SHARED, lima_ip_name(ip), ip);
if (err) {
- dev_err(dev->dev, "pp %s fail to request irq\n",
+ dev_err(dev->dev, "%s fail to request irq\n",
lima_ip_name(ip));
return err;
}
@@ -403,8 +423,9 @@ static void lima_pp_task_error(struct lima_sched_pipe *pipe)
for (i = 0; i < pipe->num_processor; i++) {
struct lima_ip *ip = pipe->processor[i];
- dev_err(ip->dev->dev, "pp task error %d int_state=%x status=%x\n",
- i, pp_read(LIMA_PP_INT_STATUS), pp_read(LIMA_PP_STATUS));
+ dev_err(ip->dev->dev, "%s task error %d int_state=%x status=%x\n",
+ lima_ip_name(ip), i, pp_read(LIMA_PP_INT_STATUS),
+ pp_read(LIMA_PP_STATUS));
lima_pp_hard_reset(ip);
}
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index c3bf8cda8498..00b19adfc888 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+#include <linux/hardirq.h>
#include <linux/iosys-map.h>
#include <linux/kthread.h>
#include <linux/slab.h>
@@ -153,13 +154,12 @@ void lima_sched_task_fini(struct lima_sched_task *task)
}
int lima_sched_context_init(struct lima_sched_pipe *pipe,
- struct lima_sched_context *context,
- atomic_t *guilty)
+ struct lima_sched_context *context)
{
struct drm_gpu_scheduler *sched = &pipe->base;
return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL,
- &sched, 1, guilty);
+ &sched, 1, NULL);
}
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
@@ -401,9 +401,35 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
struct lima_sched_task *task = to_lima_task(job);
struct lima_device *ldev = pipe->ldev;
+ struct lima_ip *ip = pipe->processor[0];
+ int i;
+
+ /*
+ * If the GPU managed to complete this jobs fence, the timeout is
+ * spurious. Bail out.
+ */
+ if (dma_fence_is_signaled(task->fence)) {
+ DRM_WARN("%s spurious timeout\n", lima_ip_name(ip));
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+ }
+
+ /*
+ * Lima IRQ handler may take a long time to process an interrupt
+ * if there is another IRQ handler hogging the processing.
+ * In order to catch such cases and not report spurious Lima job
+ * timeouts, synchronize the IRQ handler and re-check the fence
+ * status.
+ */
+ for (i = 0; i < pipe->num_processor; i++)
+ synchronize_irq(pipe->processor[i]->irq);
+
+ if (dma_fence_is_signaled(task->fence)) {
+ DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip));
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+ }
if (!pipe->error)
- DRM_ERROR("lima job timeout\n");
+ DRM_ERROR("%s job timeout\n", lima_ip_name(ip));
drm_sched_stop(&pipe->base, &task->base);
@@ -417,8 +443,6 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
if (pipe->bcast_mmu)
lima_mmu_page_fault_resume(pipe->bcast_mmu);
else {
- int i;
-
for (i = 0; i < pipe->num_mmu; i++)
lima_mmu_page_fault_resume(pipe->mmu[i]);
}
@@ -481,7 +505,7 @@ static void lima_sched_recover_work(struct work_struct *work)
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
{
unsigned int timeout = lima_sched_timeout_ms > 0 ?
- lima_sched_timeout_ms : 500;
+ lima_sched_timeout_ms : 10000;
pipe->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&pipe->fence_lock);
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 6a11764d87b3..6bd4f3b70109 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -91,8 +91,7 @@ int lima_sched_task_init(struct lima_sched_task *task,
void lima_sched_task_fini(struct lima_sched_task *task);
int lima_sched_context_init(struct lima_sched_pipe *pipe,
- struct lima_sched_context *context,
- atomic_t *guilty);
+ struct lima_sched_context *context);
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
struct lima_sched_context *context);
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task);
diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c
index 89ccc0c43169..d8ff60b46abe 100644
--- a/drivers/gpu/drm/loongson/lsdc_drv.c
+++ b/drivers/gpu/drm/loongson/lsdc_drv.c
@@ -184,7 +184,7 @@ static int lsdc_get_dedicated_vram(struct lsdc_device *ldev,
drm_info(ddev, "Dedicated vram start: 0x%llx, size: %uMiB\n",
(u64)base, (u32)(size >> 20));
- return 0;
+ return (size > SZ_1M) ? 0 : -ENODEV;
}
static struct lsdc_device *
diff --git a/drivers/gpu/drm/loongson/lsdc_ttm.c b/drivers/gpu/drm/loongson/lsdc_ttm.c
index bf79dc55afa4..465f622ac05d 100644
--- a/drivers/gpu/drm/loongson/lsdc_ttm.c
+++ b/drivers/gpu/drm/loongson/lsdc_ttm.c
@@ -54,7 +54,6 @@ static void lsdc_bo_set_placement(struct lsdc_bo *lbo, u32 domain)
pflags |= TTM_PL_FLAG_TOPDOWN;
lbo->placement.placement = lbo->placements;
- lbo->placement.busy_placement = lbo->placements;
if (domain & LSDC_GEM_DOMAIN_VRAM) {
lbo->placements[c].mem_type = TTM_PL_VRAM;
@@ -77,7 +76,6 @@ static void lsdc_bo_set_placement(struct lsdc_bo *lbo, u32 domain)
}
lbo->placement.num_placement = c;
- lbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
lbo->placements[i].fpfn = 0;
diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig
index 4f3d68e11bc1..907460b69d4f 100644
--- a/drivers/gpu/drm/mcde/Kconfig
+++ b/drivers/gpu/drm/mcde/Kconfig
@@ -11,7 +11,6 @@ config DRM_MCDE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
- select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the ST-Ericsson MCDE
Multi-Channel Display Engine.
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
index 74fa56339383..90e64467ea8f 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -73,6 +73,8 @@ void mtk_merge_advance_config(struct device *dev, unsigned int l_w, unsigned int
struct cmdq_pkt *cmdq_pkt);
void mtk_merge_start_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt);
void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt);
+enum drm_mode_status mtk_merge_mode_valid(struct device *dev,
+ const struct drm_display_mode *mode);
void mtk_ovl_bgclr_in_on(struct device *dev);
void mtk_ovl_bgclr_in_off(struct device *dev);
@@ -131,6 +133,8 @@ unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev);
struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev);
const u32 *mtk_ovl_adaptor_get_formats(struct device *dev);
size_t mtk_ovl_adaptor_get_num_formats(struct device *dev);
+enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev,
+ const struct drm_display_mode *mode);
void mtk_rdma_bypass_shadow(struct device *dev);
int mtk_rdma_clk_enable(struct device *dev);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
index 22f768d923d5..32a29924bd54 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
@@ -222,6 +222,71 @@ void mtk_merge_clk_disable(struct device *dev)
clk_disable_unprepare(priv->clk);
}
+enum drm_mode_status mtk_merge_mode_valid(struct device *dev,
+ const struct drm_display_mode *mode)
+{
+ struct mtk_disp_merge *priv = dev_get_drvdata(dev);
+ unsigned long rate;
+
+ rate = clk_get_rate(priv->clk);
+
+ /* Convert to KHz and round the number */
+ rate = (rate + 500) / 1000;
+
+ if (rate && mode->clock > rate) {
+ dev_dbg(dev, "invalid clock: %d (>%lu)\n", mode->clock, rate);
+ return MODE_CLOCK_HIGH;
+ }
+
+ /*
+ * Measure the bandwidth requirement of hardware prefetch (per frame)
+ *
+ * let N = prefetch buffer size in lines
+ * (ex. N=3, then prefetch buffer size = 3 lines)
+ *
+ * prefetch size = htotal * N (pixels)
+ * time per line = 1 / fps / vtotal (seconds)
+ * duration = vbp * time per line
+ * = vbp / fps / vtotal
+ *
+ * data rate = prefetch size / duration
+ * = htotal * N / (vbp / fps / vtotal)
+ * = htotal * vtotal * fps * N / vbp
+ * = clk * N / vbp (pixels per second)
+ *
+ * Say 4K60 (CEA-861) is the maximum mode supported by the SoC
+ * data rate = 594000K * N / 72 = 8250 (standard)
+ * (remove K * N due to the same unit)
+ *
+ * For 2560x1440@144 (clk=583600K, vbp=17):
+ * data rate = 583600 / 17 ~= 34329 > 8250 (NG)
+ *
+ * For 2560x1440@120 (clk=497760K, vbp=77):
+ * data rate = 497760 / 77 ~= 6464 < 8250 (OK)
+ *
+ * A non-standard 4K60 timing (clk=521280K, vbp=54)
+ * data rate = 521280 / 54 ~= 9653 > 8250 (NG)
+ *
+ * Bandwidth requirement of hardware prefetch increases significantly
+ * when the VBP decreases (more than 4x in this example).
+ *
+ * The proposed formula is only one way to estimate whether our SoC
+ * supports the mode setting. The basic idea behind it is just to check
+ * if the data rate requirement is too high (directly proportional to
+ * pixel clock, inversely proportional to vbp). Please adjust the
+ * function if it doesn't fit your situation in the future.
+ */
+ rate = mode->clock / (mode->vtotal - mode->vsync_end);
+
+ if (rate > 8250) {
+ dev_dbg(dev, "invalid rate: %lu (>8250): " DRM_MODE_FMT "\n",
+ rate, DRM_MODE_ARG(mode));
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
static int mtk_disp_merge_bind(struct device *dev, struct device *master,
void *data)
{
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index 12a37f740bf4..034d31824d4d 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -30,6 +30,7 @@ enum mtk_ovl_adaptor_comp_type {
OVL_ADAPTOR_TYPE_ETHDR,
OVL_ADAPTOR_TYPE_MDP_RDMA,
OVL_ADAPTOR_TYPE_MERGE,
+ OVL_ADAPTOR_TYPE_PADDING,
OVL_ADAPTOR_TYPE_NUM,
};
@@ -47,6 +48,14 @@ enum mtk_ovl_adaptor_comp_id {
OVL_ADAPTOR_MERGE1,
OVL_ADAPTOR_MERGE2,
OVL_ADAPTOR_MERGE3,
+ OVL_ADAPTOR_PADDING0,
+ OVL_ADAPTOR_PADDING1,
+ OVL_ADAPTOR_PADDING2,
+ OVL_ADAPTOR_PADDING3,
+ OVL_ADAPTOR_PADDING4,
+ OVL_ADAPTOR_PADDING5,
+ OVL_ADAPTOR_PADDING6,
+ OVL_ADAPTOR_PADDING7,
OVL_ADAPTOR_ID_MAX
};
@@ -67,6 +76,7 @@ static const char * const private_comp_stem[OVL_ADAPTOR_TYPE_NUM] = {
[OVL_ADAPTOR_TYPE_ETHDR] = "ethdr",
[OVL_ADAPTOR_TYPE_MDP_RDMA] = "vdo1-rdma",
[OVL_ADAPTOR_TYPE_MERGE] = "merge",
+ [OVL_ADAPTOR_TYPE_PADDING] = "padding",
};
static const struct mtk_ddp_comp_funcs ethdr = {
@@ -79,6 +89,14 @@ static const struct mtk_ddp_comp_funcs ethdr = {
static const struct mtk_ddp_comp_funcs merge = {
.clk_enable = mtk_merge_clk_enable,
.clk_disable = mtk_merge_clk_disable,
+ .mode_valid = mtk_merge_mode_valid,
+};
+
+static const struct mtk_ddp_comp_funcs padding = {
+ .clk_enable = mtk_padding_clk_enable,
+ .clk_disable = mtk_padding_clk_disable,
+ .start = mtk_padding_start,
+ .stop = mtk_padding_stop,
};
static const struct mtk_ddp_comp_funcs rdma = {
@@ -102,6 +120,14 @@ static const struct ovl_adaptor_comp_match comp_matches[OVL_ADAPTOR_ID_MAX] = {
[OVL_ADAPTOR_MERGE1] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE2, 2, &merge },
[OVL_ADAPTOR_MERGE2] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE3, 3, &merge },
[OVL_ADAPTOR_MERGE3] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE4, 4, &merge },
+ [OVL_ADAPTOR_PADDING0] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING0, 0, &padding },
+ [OVL_ADAPTOR_PADDING1] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING1, 1, &padding },
+ [OVL_ADAPTOR_PADDING2] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING2, 2, &padding },
+ [OVL_ADAPTOR_PADDING3] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING3, 3, &padding },
+ [OVL_ADAPTOR_PADDING4] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING4, 4, &padding },
+ [OVL_ADAPTOR_PADDING5] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING5, 5, &padding },
+ [OVL_ADAPTOR_PADDING6] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING6, 6, &padding },
+ [OVL_ADAPTOR_PADDING7] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING7, 7, &padding },
};
void mtk_ovl_adaptor_layer_config(struct device *dev, unsigned int idx,
@@ -317,6 +343,22 @@ void mtk_ovl_adaptor_clk_disable(struct device *dev)
}
}
+enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev,
+ const struct drm_display_mode *mode)
+
+{
+ int i;
+ struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
+
+ for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
+ dev = ovl_adaptor->ovl_adaptor_comp[i];
+ if (!dev || !comp_matches[i].funcs->mode_valid)
+ continue;
+ return comp_matches[i].funcs->mode_valid(dev, mode);
+ }
+ return MODE_OK;
+}
+
unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev)
{
return MTK_OVL_ADAPTOR_LAYER_NUM;
@@ -437,6 +479,7 @@ static int ovl_adaptor_comp_get_id(struct device *dev, struct device_node *node,
}
static const struct of_device_id mtk_ovl_adaptor_comp_dt_ids[] = {
+ { .compatible = "mediatek,mt8188-disp-padding", .data = (void *)OVL_ADAPTOR_TYPE_PADDING },
{ .compatible = "mediatek,mt8195-disp-ethdr", .data = (void *)OVL_ADAPTOR_TYPE_ETHDR },
{ .compatible = "mediatek,mt8195-disp-merge", .data = (void *)OVL_ADAPTOR_TYPE_MERGE },
{ .compatible = "mediatek,mt8195-vdo1-rdma", .data = (void *)OVL_ADAPTOR_TYPE_MDP_RDMA },
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 2136a596efa1..0ba72102636a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2042,12 +2042,12 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
return ret;
}
-static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *mtk_dp_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
bool enabled = mtk_dp->enabled;
- struct edid *new_edid = NULL;
+ const struct drm_edid *drm_edid;
struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg;
if (!enabled) {
@@ -2055,7 +2055,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
mtk_dp_aux_panel_poweron(mtk_dp, true);
}
- new_edid = drm_get_edid(connector, &mtk_dp->aux.ddc);
+ drm_edid = drm_edid_read_ddc(connector, &mtk_dp->aux.ddc);
/*
* Parse capability here to let atomic_get_input_bus_fmts and
@@ -2063,17 +2063,26 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
*/
if (mtk_dp_parse_capabilities(mtk_dp)) {
drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
- kfree(new_edid);
- new_edid = NULL;
+ drm_edid_free(drm_edid);
+ drm_edid = NULL;
}
- if (new_edid) {
+ if (drm_edid) {
+ /*
+ * FIXME: get rid of drm_edid_raw()
+ */
+ const struct edid *edid = drm_edid_raw(drm_edid);
struct cea_sad *sads;
- audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads);
+ audio_caps->sad_count = drm_edid_to_sad(edid, &sads);
kfree(sads);
- audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid);
+ /*
+ * FIXME: This should use connector->display_info.has_audio from
+ * a path that has read the EDID and called
+ * drm_edid_connector_update().
+ */
+ audio_caps->detect_monitor = drm_detect_monitor_audio(edid);
}
if (!enabled) {
@@ -2081,7 +2090,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
drm_atomic_bridge_chain_post_disable(bridge, connector->state->state);
}
- return new_edid;
+ return drm_edid;
}
static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
@@ -2433,7 +2442,7 @@ static const struct drm_bridge_funcs mtk_dp_bridge_funcs = {
.atomic_enable = mtk_dp_bridge_atomic_enable,
.atomic_disable = mtk_dp_bridge_atomic_disable,
.mode_valid = mtk_dp_bridge_mode_valid,
- .get_edid = mtk_dp_get_edid,
+ .edid_read = mtk_dp_edid_read,
.detect = mtk_dp_bdg_detect,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index c729af3b9822..a04499c4f9ca 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -95,11 +95,13 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
struct drm_crtc *crtc = &mtk_crtc->base;
unsigned long flags;
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
- drm_crtc_vblank_put(crtc);
- mtk_crtc->event = NULL;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ if (mtk_crtc->event) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
+ drm_crtc_vblank_put(crtc);
+ mtk_crtc->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
}
static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
@@ -213,6 +215,22 @@ static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
kfree(to_mtk_crtc_state(state));
}
+static enum drm_mode_status
+mtk_drm_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ enum drm_mode_status status = MODE_OK;
+ int i;
+
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ status = mtk_ddp_comp_mode_valid(mtk_crtc->ddp_comp[i], mode);
+ if (status != MODE_OK)
+ break;
+ }
+ return status;
+}
+
static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -831,6 +849,7 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = {
static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
.mode_fixup = mtk_drm_crtc_mode_fixup,
.mode_set_nofb = mtk_drm_crtc_mode_set_nofb,
+ .mode_valid = mtk_drm_crtc_mode_valid,
.atomic_begin = mtk_drm_crtc_atomic_begin,
.atomic_flush = mtk_drm_crtc_atomic_flush,
.atomic_enable = mtk_drm_crtc_atomic_enable,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index a9b5a21cde2d..a515e96cfefc 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -418,6 +418,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = {
.remove = mtk_ovl_adaptor_remove_comp,
.get_formats = mtk_ovl_adaptor_get_formats,
.get_num_formats = mtk_ovl_adaptor_get_num_formats,
+ .mode_valid = mtk_ovl_adaptor_mode_valid,
};
static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 15b2eafff438..93d79a1366e9 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -12,6 +12,8 @@
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
+#include <drm/drm_modes.h>
+
struct device;
struct device_node;
struct drm_crtc;
@@ -85,6 +87,7 @@ struct mtk_ddp_comp_funcs {
void (*add)(struct device *dev, struct mtk_mutex *mutex);
void (*remove)(struct device *dev, struct mtk_mutex *mutex);
unsigned int (*encoder_index)(struct device *dev);
+ enum drm_mode_status (*mode_valid)(struct device *dev, const struct drm_display_mode *mode);
};
struct mtk_ddp_comp {
@@ -126,6 +129,15 @@ static inline void mtk_ddp_comp_clk_disable(struct mtk_ddp_comp *comp)
comp->funcs->clk_disable(comp->dev);
}
+static inline
+enum drm_mode_status mtk_ddp_comp_mode_valid(struct mtk_ddp_comp *comp,
+ const struct drm_display_mode *mode)
+{
+ if (comp && comp->funcs && comp->funcs->mode_valid)
+ return comp->funcs->mode_valid(comp->dev, mode);
+ return MODE_OK;
+}
+
static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
unsigned int w, unsigned int h,
unsigned int vrefresh, unsigned int bpc,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 14a1e0157cc4..74832c213092 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -293,7 +293,7 @@ static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = {
.main_len = ARRAY_SIZE(mt8188_mtk_ddp_main),
.conn_routes = mt8188_mtk_ddp_main_routes,
.num_conn_routes = ARRAY_SIZE(mt8188_mtk_ddp_main_routes),
- .mmsys_dev_num = 1,
+ .mmsys_dev_num = 2,
};
static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
@@ -334,6 +334,8 @@ static const struct of_device_id mtk_drm_of_ids[] = {
.data = &mt8186_mmsys_driver_data},
{ .compatible = "mediatek,mt8188-vdosys0",
.data = &mt8188_vdosys0_driver_data},
+ { .compatible = "mediatek,mt8188-vdosys1",
+ .data = &mt8195_vdosys1_driver_data},
{ .compatible = "mediatek,mt8192-mmsys",
.data = &mt8192_mmsys_driver_data},
{ .compatible = "mediatek,mt8195-mmsys",
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index a2fdfc8ddb15..9501f4019199 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -3,6 +3,7 @@
* Copyright (c) 2015 MediaTek Inc.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/iopoll.h>
@@ -12,6 +13,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/units.h>
#include <video/mipi_display.h>
#include <video/videomode.h>
@@ -58,28 +60,31 @@
#define DSI_TXRX_CTRL 0x18
#define VC_NUM BIT(1)
-#define LANE_NUM (0xf << 2)
+#define LANE_NUM GENMASK(5, 2)
#define DIS_EOT BIT(6)
#define NULL_EN BIT(7)
#define TE_FREERUN BIT(8)
#define EXT_TE_EN BIT(9)
#define EXT_TE_EDGE BIT(10)
-#define MAX_RTN_SIZE (0xf << 12)
+#define MAX_RTN_SIZE GENMASK(15, 12)
#define HSTX_CKLP_EN BIT(16)
#define DSI_PSCTRL 0x1c
-#define DSI_PS_WC 0x3fff
-#define DSI_PS_SEL (3 << 16)
-#define PACKED_PS_16BIT_RGB565 (0 << 16)
-#define LOOSELY_PS_18BIT_RGB666 (1 << 16)
-#define PACKED_PS_18BIT_RGB666 (2 << 16)
-#define PACKED_PS_24BIT_RGB888 (3 << 16)
+#define DSI_PS_WC GENMASK(13, 0)
+#define DSI_PS_SEL GENMASK(17, 16)
+#define PACKED_PS_16BIT_RGB565 0
+#define PACKED_PS_18BIT_RGB666 1
+#define LOOSELY_PS_24BIT_RGB666 2
+#define PACKED_PS_24BIT_RGB888 3
#define DSI_VSA_NL 0x20
#define DSI_VBP_NL 0x24
#define DSI_VFP_NL 0x28
#define DSI_VACT_NL 0x2C
+#define VACT_NL GENMASK(14, 0)
#define DSI_SIZE_CON 0x38
+#define DSI_HEIGHT GENMASK(30, 16)
+#define DSI_WIDTH GENMASK(14, 0)
#define DSI_HSA_WC 0x50
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
@@ -109,26 +114,27 @@
#define LD0_WAKEUP_EN BIT(2)
#define DSI_PHY_TIMECON0 0x110
-#define LPX (0xff << 0)
-#define HS_PREP (0xff << 8)
-#define HS_ZERO (0xff << 16)
-#define HS_TRAIL (0xff << 24)
+#define LPX GENMASK(7, 0)
+#define HS_PREP GENMASK(15, 8)
+#define HS_ZERO GENMASK(23, 16)
+#define HS_TRAIL GENMASK(31, 24)
#define DSI_PHY_TIMECON1 0x114
-#define TA_GO (0xff << 0)
-#define TA_SURE (0xff << 8)
-#define TA_GET (0xff << 16)
-#define DA_HS_EXIT (0xff << 24)
+#define TA_GO GENMASK(7, 0)
+#define TA_SURE GENMASK(15, 8)
+#define TA_GET GENMASK(23, 16)
+#define DA_HS_EXIT GENMASK(31, 24)
#define DSI_PHY_TIMECON2 0x118
-#define CONT_DET (0xff << 0)
-#define CLK_ZERO (0xff << 16)
-#define CLK_TRAIL (0xff << 24)
+#define CONT_DET GENMASK(7, 0)
+#define DA_HS_SYNC GENMASK(15, 8)
+#define CLK_ZERO GENMASK(23, 16)
+#define CLK_TRAIL GENMASK(31, 24)
#define DSI_PHY_TIMECON3 0x11c
-#define CLK_HS_PREP (0xff << 0)
-#define CLK_HS_POST (0xff << 8)
-#define CLK_HS_EXIT (0xff << 16)
+#define CLK_HS_PREP GENMASK(7, 0)
+#define CLK_HS_POST GENMASK(15, 8)
+#define CLK_HS_EXIT GENMASK(23, 16)
#define DSI_VM_CMD_CON 0x130
#define VM_CMD_EN BIT(0)
@@ -138,13 +144,14 @@
#define FORCE_COMMIT BIT(0)
#define BYPASS_SHADOW BIT(1)
-#define CONFIG (0xff << 0)
+/* CMDQ related bits */
+#define CONFIG GENMASK(7, 0)
#define SHORT_PACKET 0
#define LONG_PACKET 2
#define BTA BIT(2)
-#define DATA_ID (0xff << 8)
-#define DATA_0 (0xff << 16)
-#define DATA_1 (0xff << 24)
+#define DATA_ID GENMASK(15, 8)
+#define DATA_0 GENMASK(23, 16)
+#define DATA_1 GENMASK(31, 24)
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
@@ -232,7 +239,7 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
- u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000);
+ u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, HZ_PER_MHZ);
struct mtk_phy_timing *timing = &dsi->phy_timing;
timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
@@ -252,14 +259,23 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
timing->clk_hs_zero = timing->clk_hs_trail * 4;
timing->clk_hs_exit = 2 * timing->clk_hs_trail;
- timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
- timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
- timcon1 = timing->ta_go | timing->ta_sure << 8 |
- timing->ta_get << 16 | timing->da_hs_exit << 24;
- timcon2 = 1 << 8 | timing->clk_hs_zero << 16 |
- timing->clk_hs_trail << 24;
- timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 |
- timing->clk_hs_exit << 16;
+ timcon0 = FIELD_PREP(LPX, timing->lpx) |
+ FIELD_PREP(HS_PREP, timing->da_hs_prepare) |
+ FIELD_PREP(HS_ZERO, timing->da_hs_zero) |
+ FIELD_PREP(HS_TRAIL, timing->da_hs_trail);
+
+ timcon1 = FIELD_PREP(TA_GO, timing->ta_go) |
+ FIELD_PREP(TA_SURE, timing->ta_sure) |
+ FIELD_PREP(TA_GET, timing->ta_get) |
+ FIELD_PREP(DA_HS_EXIT, timing->da_hs_exit);
+
+ timcon2 = FIELD_PREP(DA_HS_SYNC, 1) |
+ FIELD_PREP(CLK_ZERO, timing->clk_hs_zero) |
+ FIELD_PREP(CLK_TRAIL, timing->clk_hs_trail);
+
+ timcon3 = FIELD_PREP(CLK_HS_PREP, timing->clk_hs_prepare) |
+ FIELD_PREP(CLK_HS_POST, timing->clk_hs_post) |
+ FIELD_PREP(CLK_HS_EXIT, timing->clk_hs_exit);
writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
@@ -350,101 +366,63 @@ static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
}
-static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
-{
- struct videomode *vm = &dsi->vm;
- u32 dsi_buf_bpp, ps_wc;
- u32 ps_bpp_mode;
-
- if (dsi->format == MIPI_DSI_FMT_RGB565)
- dsi_buf_bpp = 2;
- else
- dsi_buf_bpp = 3;
-
- ps_wc = vm->hactive * dsi_buf_bpp;
- ps_bpp_mode = ps_wc;
-
- switch (dsi->format) {
- case MIPI_DSI_FMT_RGB888:
- ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
- break;
- case MIPI_DSI_FMT_RGB666:
- ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
- break;
- case MIPI_DSI_FMT_RGB565:
- ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
- break;
- }
-
- writel(vm->vactive, dsi->regs + DSI_VACT_NL);
- writel(ps_bpp_mode, dsi->regs + DSI_PSCTRL);
- writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
-}
-
static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
{
- u32 tmp_reg;
+ u32 regval, tmp_reg = 0;
+ u8 i;
- switch (dsi->lanes) {
- case 1:
- tmp_reg = 1 << 2;
- break;
- case 2:
- tmp_reg = 3 << 2;
- break;
- case 3:
- tmp_reg = 7 << 2;
- break;
- case 4:
- tmp_reg = 0xf << 2;
- break;
- default:
- tmp_reg = 0xf << 2;
- break;
- }
+ /* Number of DSI lanes (max 4 lanes), each bit enables one DSI lane. */
+ for (i = 0; i < dsi->lanes; i++)
+ tmp_reg |= BIT(i);
+
+ regval = FIELD_PREP(LANE_NUM, tmp_reg);
if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
- tmp_reg |= HSTX_CKLP_EN;
+ regval |= HSTX_CKLP_EN;
if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
- tmp_reg |= DIS_EOT;
+ regval |= DIS_EOT;
- writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
+ writel(regval, dsi->regs + DSI_TXRX_CTRL);
}
-static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
+static void mtk_dsi_ps_control(struct mtk_dsi *dsi, bool config_vact)
{
- u32 dsi_tmp_buf_bpp;
- u32 tmp_reg;
+ u32 dsi_buf_bpp, ps_val, ps_wc, vact_nl;
+
+ if (dsi->format == MIPI_DSI_FMT_RGB565)
+ dsi_buf_bpp = 2;
+ else
+ dsi_buf_bpp = 3;
+
+ /* Word count */
+ ps_wc = FIELD_PREP(DSI_PS_WC, dsi->vm.hactive * dsi_buf_bpp);
+ ps_val = ps_wc;
+ /* Pixel Stream type */
switch (dsi->format) {
+ default:
+ fallthrough;
case MIPI_DSI_FMT_RGB888:
- tmp_reg = PACKED_PS_24BIT_RGB888;
- dsi_tmp_buf_bpp = 3;
+ ps_val |= FIELD_PREP(DSI_PS_SEL, PACKED_PS_24BIT_RGB888);
break;
case MIPI_DSI_FMT_RGB666:
- tmp_reg = LOOSELY_PS_18BIT_RGB666;
- dsi_tmp_buf_bpp = 3;
+ ps_val |= FIELD_PREP(DSI_PS_SEL, LOOSELY_PS_24BIT_RGB666);
break;
case MIPI_DSI_FMT_RGB666_PACKED:
- tmp_reg = PACKED_PS_18BIT_RGB666;
- dsi_tmp_buf_bpp = 3;
+ ps_val |= FIELD_PREP(DSI_PS_SEL, PACKED_PS_18BIT_RGB666);
break;
case MIPI_DSI_FMT_RGB565:
- tmp_reg = PACKED_PS_16BIT_RGB565;
- dsi_tmp_buf_bpp = 2;
- break;
- default:
- tmp_reg = PACKED_PS_24BIT_RGB888;
- dsi_tmp_buf_bpp = 3;
+ ps_val |= FIELD_PREP(DSI_PS_SEL, PACKED_PS_16BIT_RGB565);
break;
}
- tmp_reg += dsi->vm.hactive * dsi_tmp_buf_bpp & DSI_PS_WC;
- writel(tmp_reg, dsi->regs + DSI_PSCTRL);
+ if (config_vact) {
+ vact_nl = FIELD_PREP(VACT_NL, dsi->vm.vactive);
+ writel(vact_nl, dsi->regs + DSI_VACT_NL);
+ writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
+ }
+ writel(ps_val, dsi->regs + DSI_PSCTRL);
}
static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
@@ -471,7 +449,8 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(vm->vactive, dsi->regs + DSI_VACT_NL);
if (dsi->driver_data->has_size_ctl)
- writel(vm->vactive << 16 | vm->hactive,
+ writel(FIELD_PREP(DSI_HEIGHT, vm->vactive) |
+ FIELD_PREP(DSI_WIDTH, vm->hactive),
dsi->regs + DSI_SIZE_CON);
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
@@ -520,7 +499,7 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
- mtk_dsi_ps_control(dsi);
+ mtk_dsi_ps_control(dsi, false);
}
static void mtk_dsi_start(struct mtk_dsi *dsi)
@@ -619,19 +598,12 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
if (++dsi->refcount != 1)
return 0;
- switch (dsi->format) {
- case MIPI_DSI_FMT_RGB565:
- bit_per_pixel = 16;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- bit_per_pixel = 18;
- break;
- case MIPI_DSI_FMT_RGB666:
- case MIPI_DSI_FMT_RGB888:
- default:
- bit_per_pixel = 24;
- break;
+ ret = mipi_dsi_pixel_format_to_bpp(dsi->format);
+ if (ret < 0) {
+ dev_err(dev, "Unknown MIPI DSI format %d\n", dsi->format);
+ return ret;
}
+ bit_per_pixel = ret;
dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel,
dsi->lanes);
@@ -665,7 +637,7 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
- mtk_dsi_ps_control_vact(dsi);
+ mtk_dsi_ps_control(dsi, true);
mtk_dsi_set_vm_cmd(dsi);
mtk_dsi_config_vdo_timing(dsi);
mtk_dsi_set_interrupt_enable(dsi);
@@ -814,12 +786,11 @@ mtk_dsi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
- u32 bpp;
+ int bpp;
- if (dsi->format == MIPI_DSI_FMT_RGB565)
- bpp = 16;
- else
- bpp = 24;
+ bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+ if (bpp < 0)
+ return MODE_ERROR;
if (mode->clock * bpp / dsi->lanes > 1500000)
return MODE_CLOCK_HIGH;
@@ -1135,67 +1106,47 @@ static int mtk_dsi_probe(struct platform_device *pdev)
if (!dsi)
return -ENOMEM;
- dsi->host.ops = &mtk_dsi_ops;
- dsi->host.dev = dev;
- ret = mipi_dsi_host_register(&dsi->host);
- if (ret < 0) {
- dev_err(dev, "failed to register DSI host: %d\n", ret);
- return ret;
- }
-
dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine");
- if (IS_ERR(dsi->engine_clk)) {
- ret = PTR_ERR(dsi->engine_clk);
+ if (IS_ERR(dsi->engine_clk))
+ return dev_err_probe(dev, PTR_ERR(dsi->engine_clk),
+ "Failed to get engine clock\n");
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get engine clock: %d\n", ret);
- goto err_unregister_host;
- }
dsi->digital_clk = devm_clk_get(dev, "digital");
- if (IS_ERR(dsi->digital_clk)) {
- ret = PTR_ERR(dsi->digital_clk);
-
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get digital clock: %d\n", ret);
- goto err_unregister_host;
- }
+ if (IS_ERR(dsi->digital_clk))
+ return dev_err_probe(dev, PTR_ERR(dsi->digital_clk),
+ "Failed to get digital clock\n");
dsi->hs_clk = devm_clk_get(dev, "hs");
- if (IS_ERR(dsi->hs_clk)) {
- ret = PTR_ERR(dsi->hs_clk);
- dev_err(dev, "Failed to get hs clock: %d\n", ret);
- goto err_unregister_host;
- }
+ if (IS_ERR(dsi->hs_clk))
+ return dev_err_probe(dev, PTR_ERR(dsi->hs_clk), "Failed to get hs clock\n");
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(dev, regs);
- if (IS_ERR(dsi->regs)) {
- ret = PTR_ERR(dsi->regs);
- dev_err(dev, "Failed to ioremap memory: %d\n", ret);
- goto err_unregister_host;
- }
+ if (IS_ERR(dsi->regs))
+ return dev_err_probe(dev, PTR_ERR(dsi->regs), "Failed to ioremap memory\n");
dsi->phy = devm_phy_get(dev, "dphy");
- if (IS_ERR(dsi->phy)) {
- ret = PTR_ERR(dsi->phy);
- dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
- goto err_unregister_host;
- }
+ if (IS_ERR(dsi->phy))
+ return dev_err_probe(dev, PTR_ERR(dsi->phy), "Failed to get MIPI-DPHY\n");
irq_num = platform_get_irq(pdev, 0);
- if (irq_num < 0) {
- ret = irq_num;
- goto err_unregister_host;
- }
+ if (irq_num < 0)
+ return irq_num;
+
+ dsi->host.ops = &mtk_dsi_ops;
+ dsi->host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi->host);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to register DSI host\n");
ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
IRQF_TRIGGER_NONE, dev_name(&pdev->dev), dsi);
if (ret) {
- dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
- goto err_unregister_host;
+ mipi_dsi_host_unregister(&dsi->host);
+ return dev_err_probe(&pdev->dev, ret, "Failed to request DSI irq\n");
}
init_waitqueue_head(&dsi->irq_wait_queue);
@@ -1207,10 +1158,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
return 0;
-
-err_unregister_host:
- mipi_dsi_host_unregister(&dsi->host);
- return ret;
}
static void mtk_dsi_remove(struct platform_device *pdev)
@@ -1249,17 +1196,12 @@ static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = {
};
static const struct of_device_id mtk_dsi_of_match[] = {
- { .compatible = "mediatek,mt2701-dsi",
- .data = &mt2701_dsi_driver_data },
- { .compatible = "mediatek,mt8173-dsi",
- .data = &mt8173_dsi_driver_data },
- { .compatible = "mediatek,mt8183-dsi",
- .data = &mt8183_dsi_driver_data },
- { .compatible = "mediatek,mt8186-dsi",
- .data = &mt8186_dsi_driver_data },
- { .compatible = "mediatek,mt8188-dsi",
- .data = &mt8188_dsi_driver_data },
- { },
+ { .compatible = "mediatek,mt2701-dsi", .data = &mt2701_dsi_driver_data },
+ { .compatible = "mediatek,mt8173-dsi", .data = &mt8173_dsi_driver_data },
+ { .compatible = "mediatek,mt8183-dsi", .data = &mt8183_dsi_driver_data },
+ { .compatible = "mediatek,mt8186-dsi", .data = &mt8186_dsi_driver_data },
+ { .compatible = "mediatek,mt8188-dsi", .data = &mt8188_dsi_driver_data },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_dsi_of_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 86133bf16326..c6bdc565e4a9 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1265,19 +1265,27 @@ static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridg
return mtk_hdmi_detect(hdmi);
}
-static struct edid *mtk_hdmi_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *mtk_hdmi_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
if (!hdmi->ddc_adpt)
return NULL;
- edid = drm_get_edid(connector, hdmi->ddc_adpt);
- if (!edid)
- return NULL;
- hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
- return edid;
+ drm_edid = drm_edid_read_ddc(connector, hdmi->ddc_adpt);
+ if (drm_edid) {
+ /*
+ * FIXME: This should use !connector->display_info.has_audio (or
+ * !connector->display_info.is_hdmi) from a path that has read
+ * the EDID and called drm_edid_connector_update().
+ */
+ const struct edid *edid = drm_edid_raw(drm_edid);
+
+ hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
+ }
+
+ return drm_edid;
}
static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
@@ -1417,7 +1425,7 @@ static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
.atomic_pre_enable = mtk_hdmi_bridge_atomic_pre_enable,
.atomic_enable = mtk_hdmi_bridge_atomic_enable,
.detect = mtk_hdmi_bridge_detect,
- .get_edid = mtk_hdmi_bridge_get_edid,
+ .edid_read = mtk_hdmi_bridge_edid_read,
};
static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index cb674966e9ac..17a5cca007e2 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -312,7 +312,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
/* Encoder Initialization */
- ret = meson_encoder_cvbs_init(priv);
+ ret = meson_encoder_cvbs_probe(priv);
if (ret)
goto exit_afbcd;
@@ -326,12 +326,12 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
}
}
- ret = meson_encoder_hdmi_init(priv);
+ ret = meson_encoder_hdmi_probe(priv);
if (ret)
goto exit_afbcd;
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
- ret = meson_encoder_dsi_init(priv);
+ ret = meson_encoder_dsi_probe(priv);
if (ret)
goto exit_afbcd;
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index 3f73b211fa8e..d1191de855d9 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -219,7 +219,7 @@ static const struct drm_bridge_funcs meson_encoder_cvbs_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
};
-int meson_encoder_cvbs_init(struct meson_drm *priv)
+int meson_encoder_cvbs_probe(struct meson_drm *priv)
{
struct drm_device *drm = priv->drm;
struct meson_encoder_cvbs *meson_encoder_cvbs;
@@ -240,10 +240,9 @@ int meson_encoder_cvbs_init(struct meson_drm *priv)
meson_encoder_cvbs->next_bridge = of_drm_find_bridge(remote);
of_node_put(remote);
- if (!meson_encoder_cvbs->next_bridge) {
- dev_err(priv->dev, "Failed to find CVBS Connector bridge\n");
- return -EPROBE_DEFER;
- }
+ if (!meson_encoder_cvbs->next_bridge)
+ return dev_err_probe(priv->dev, -EPROBE_DEFER,
+ "Failed to find CVBS Connector bridge\n");
/* CVBS Encoder Bridge */
meson_encoder_cvbs->bridge.funcs = &meson_encoder_cvbs_bridge_funcs;
@@ -259,10 +258,9 @@ int meson_encoder_cvbs_init(struct meson_drm *priv)
/* Encoder */
ret = drm_simple_encoder_init(priv->drm, &meson_encoder_cvbs->encoder,
DRM_MODE_ENCODER_TVDAC);
- if (ret) {
- dev_err(priv->dev, "Failed to init CVBS encoder: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "Failed to init CVBS encoder\n");
meson_encoder_cvbs->encoder.possible_crtcs = BIT(0);
@@ -276,10 +274,10 @@ int meson_encoder_cvbs_init(struct meson_drm *priv)
/* Initialize & attach Bridge Connector */
connector = drm_bridge_connector_init(priv->drm, &meson_encoder_cvbs->encoder);
- if (IS_ERR(connector)) {
- dev_err(priv->dev, "Unable to create CVBS bridge connector\n");
- return PTR_ERR(connector);
- }
+ if (IS_ERR(connector))
+ return dev_err_probe(priv->dev, PTR_ERR(connector),
+ "Unable to create CVBS bridge connector\n");
+
drm_connector_attach_encoder(connector, &meson_encoder_cvbs->encoder);
priv->encoders[MESON_ENC_CVBS] = meson_encoder_cvbs;
@@ -294,6 +292,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_CVBS]) {
meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
drm_bridge_remove(&meson_encoder_cvbs->bridge);
- drm_bridge_remove(meson_encoder_cvbs->next_bridge);
}
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.h b/drivers/gpu/drm/meson/meson_encoder_cvbs.h
index 09710fec3c66..7b7bc85c03f7 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.h
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.h
@@ -24,7 +24,7 @@ struct meson_cvbs_mode {
/* Modes supported by the CVBS output */
extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT];
-int meson_encoder_cvbs_init(struct meson_drm *priv);
+int meson_encoder_cvbs_probe(struct meson_drm *priv);
void meson_encoder_cvbs_remove(struct meson_drm *priv);
#endif /* __MESON_VENC_CVBS_H */
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index 3f93c70488ca..7816902f5907 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -100,7 +100,7 @@ static const struct drm_bridge_funcs meson_encoder_dsi_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
};
-int meson_encoder_dsi_init(struct meson_drm *priv)
+int meson_encoder_dsi_probe(struct meson_drm *priv)
{
struct meson_encoder_dsi *meson_encoder_dsi;
struct device_node *remote;
@@ -118,10 +118,9 @@ int meson_encoder_dsi_init(struct meson_drm *priv)
}
meson_encoder_dsi->next_bridge = of_drm_find_bridge(remote);
- if (!meson_encoder_dsi->next_bridge) {
- dev_dbg(priv->dev, "Failed to find DSI transceiver bridge\n");
- return -EPROBE_DEFER;
- }
+ if (!meson_encoder_dsi->next_bridge)
+ return dev_err_probe(priv->dev, -EPROBE_DEFER,
+ "Failed to find DSI transceiver bridge\n");
/* DSI Encoder Bridge */
meson_encoder_dsi->bridge.funcs = &meson_encoder_dsi_bridge_funcs;
@@ -135,19 +134,17 @@ int meson_encoder_dsi_init(struct meson_drm *priv)
/* Encoder */
ret = drm_simple_encoder_init(priv->drm, &meson_encoder_dsi->encoder,
DRM_MODE_ENCODER_DSI);
- if (ret) {
- dev_err(priv->dev, "Failed to init DSI encoder: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "Failed to init DSI encoder\n");
meson_encoder_dsi->encoder.possible_crtcs = BIT(0);
/* Attach DSI Encoder Bridge to Encoder */
ret = drm_bridge_attach(&meson_encoder_dsi->encoder, &meson_encoder_dsi->bridge, NULL, 0);
- if (ret) {
- dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "Failed to attach bridge\n");
/*
* We should have now in place:
@@ -168,6 +165,5 @@ void meson_encoder_dsi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_DSI]) {
meson_encoder_dsi = priv->encoders[MESON_ENC_DSI];
drm_bridge_remove(&meson_encoder_dsi->bridge);
- drm_bridge_remove(meson_encoder_dsi->next_bridge);
}
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.h b/drivers/gpu/drm/meson/meson_encoder_dsi.h
index 9277d7015193..85d5b61805f2 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.h
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.h
@@ -7,7 +7,7 @@
#ifndef __MESON_ENCODER_DSI_H
#define __MESON_ENCODER_DSI_H
-int meson_encoder_dsi_init(struct meson_drm *priv);
+int meson_encoder_dsi_probe(struct meson_drm *priv);
void meson_encoder_dsi_remove(struct meson_drm *priv);
#endif /* __MESON_ENCODER_DSI_H */
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 25ea76558690..0593a1cde906 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -323,19 +323,31 @@ static void meson_encoder_hdmi_hpd_notify(struct drm_bridge *bridge,
enum drm_connector_status status)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
- struct edid *edid;
if (!encoder_hdmi->cec_notifier)
return;
if (status == connector_status_connected) {
- edid = drm_bridge_get_edid(encoder_hdmi->next_bridge, encoder_hdmi->connector);
- if (!edid)
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
+
+ drm_edid = drm_bridge_edid_read(encoder_hdmi->next_bridge,
+ encoder_hdmi->connector);
+ if (!drm_edid)
return;
+ /*
+ * FIXME: The CEC physical address should be set using
+ * cec_notifier_set_phys_addr(encoder_hdmi->cec_notifier,
+ * connector->display_info.source_physical_address) from a path
+ * that has read the EDID and called
+ * drm_edid_connector_update().
+ */
+ edid = drm_edid_raw(drm_edid);
+
cec_notifier_set_phys_addr_from_edid(encoder_hdmi->cec_notifier, edid);
- kfree(edid);
+ drm_edid_free(drm_edid);
} else
cec_notifier_phys_addr_invalidate(encoder_hdmi->cec_notifier);
}
@@ -354,7 +366,7 @@ static const struct drm_bridge_funcs meson_encoder_hdmi_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
};
-int meson_encoder_hdmi_init(struct meson_drm *priv)
+int meson_encoder_hdmi_probe(struct meson_drm *priv)
{
struct meson_encoder_hdmi *meson_encoder_hdmi;
struct platform_device *pdev;
@@ -374,8 +386,8 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
meson_encoder_hdmi->next_bridge = of_drm_find_bridge(remote);
if (!meson_encoder_hdmi->next_bridge) {
- dev_err(priv->dev, "Failed to find HDMI transceiver bridge\n");
- ret = -EPROBE_DEFER;
+ ret = dev_err_probe(priv->dev, -EPROBE_DEFER,
+ "Failed to find HDMI transceiver bridge\n");
goto err_put_node;
}
@@ -393,7 +405,7 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
ret = drm_simple_encoder_init(priv->drm, &meson_encoder_hdmi->encoder,
DRM_MODE_ENCODER_TMDS);
if (ret) {
- dev_err(priv->dev, "Failed to init HDMI encoder: %d\n", ret);
+ dev_err_probe(priv->dev, ret, "Failed to init HDMI encoder\n");
goto err_put_node;
}
@@ -403,7 +415,7 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
ret = drm_bridge_attach(&meson_encoder_hdmi->encoder, &meson_encoder_hdmi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
- dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
+ dev_err_probe(priv->dev, ret, "Failed to attach bridge\n");
goto err_put_node;
}
@@ -411,8 +423,9 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
meson_encoder_hdmi->connector = drm_bridge_connector_init(priv->drm,
&meson_encoder_hdmi->encoder);
if (IS_ERR(meson_encoder_hdmi->connector)) {
- dev_err(priv->dev, "Unable to create HDMI bridge connector\n");
- ret = PTR_ERR(meson_encoder_hdmi->connector);
+ ret = dev_err_probe(priv->dev,
+ PTR_ERR(meson_encoder_hdmi->connector),
+ "Unable to create HDMI bridge connector\n");
goto err_put_node;
}
drm_connector_attach_encoder(meson_encoder_hdmi->connector,
@@ -474,6 +487,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_HDMI]) {
meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
drm_bridge_remove(&meson_encoder_hdmi->bridge);
- drm_bridge_remove(meson_encoder_hdmi->next_bridge);
}
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.h b/drivers/gpu/drm/meson/meson_encoder_hdmi.h
index a6cd38eb5f71..fd5485875db8 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.h
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.h
@@ -7,7 +7,7 @@
#ifndef __MESON_ENCODER_HDMI_H
#define __MESON_ENCODER_HDMI_H
-int meson_encoder_hdmi_init(struct meson_drm *priv);
+int meson_encoder_hdmi_probe(struct meson_drm *priv);
void meson_encoder_hdmi_remove(struct meson_drm *priv);
#endif /* __MESON_ENCODER_HDMI_H */
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index b28c5e4828f4..5e4d48df4854 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -11,3 +11,15 @@ config DRM_MGAG200
MGA G200 desktop chips and the server variants. It requires 0.3.0
of the modesetting userspace driver, and a version of mga driver
that will fail on KMS enabled devices.
+
+config DRM_MGAG200_IOBURST_WORKAROUND
+ bool "Disable buffer caching"
+ depends on DRM_MGAG200 && PREEMPT_RT && X86
+ help
+ Enable a workaround to avoid I/O bursts within the mgag200 driver at
+ the expense of overall display performance.
+ It restores the <v5.10 behavior, by mapping the framebuffer in system
+ RAM as Write-Combining, and flushing the cache after each write.
+ This is only useful on x86_64 if you want to run processes with
+ deterministic latency.
+ If unsure, say N.
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 2fb18b782b05..573dbe256aa8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -84,6 +84,20 @@ resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size)
return offset - 65536;
}
+#if defined(CONFIG_DRM_MGAG200_IOBURST_WORKAROUND)
+static struct drm_gem_object *mgag200_create_object(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_shmem_object *shmem;
+
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
+ return NULL;
+
+ shmem->map_wc = true;
+ return &shmem->base;
+}
+#endif
+
/*
* DRM driver
*/
@@ -99,6 +113,9 @@ static const struct drm_driver mgag200_driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
+#if defined(CONFIG_DRM_MGAG200_IOBURST_WORKAROUND)
+ .gem_create_object = mgag200_create_object,
+#endif
DRM_GEM_SHMEM_DRIVER_OPS,
};
@@ -146,14 +163,13 @@ int mgag200_device_preinit(struct mga_device *mdev)
}
mdev->vram_res = res;
- /* Don't fail on errors, but performance might be reduced. */
- devm_arch_io_reserve_memtype_wc(dev->dev, res->start, resource_size(res));
- devm_arch_phys_wc_add(dev->dev, res->start, resource_size(res));
-
- mdev->vram = devm_ioremap(dev->dev, res->start, resource_size(res));
+ mdev->vram = devm_ioremap_wc(dev->dev, res->start, resource_size(res));
if (!mdev->vram)
return -ENOMEM;
+ /* Don't fail on errors, but performance might be reduced. */
+ devm_arch_phys_wc_add(dev->dev, res->start, resource_size(res));
+
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 0f0d59938c3a..e17cb4c5f774 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -13,14 +13,15 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_cache.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
@@ -436,6 +437,13 @@ static void mgag200_handle_damage(struct mga_device *mdev, const struct iosys_ma
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
drm_fb_memcpy(&dst, fb->pitches, vmap, fb, clip);
+
+ /* Flushing the cache greatly improves latency on x86_64 */
+#if defined(CONFIG_DRM_MGAG200_IOBURST_WORKAROUND)
+ if (!vmap->is_iomem)
+ drm_clflush_virt_range(vmap->vaddr + clip->y1 * fb->pitches[0],
+ drm_rect_height(clip) * fb->pitches[0]);
+#endif
}
/*
@@ -717,17 +725,23 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st
int mgag200_vga_connector_helper_get_modes(struct drm_connector *connector)
{
struct mga_device *mdev = to_mga_device(connector->dev);
- int ret;
+ const struct drm_edid *drm_edid;
+ int count;
/*
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
mutex_lock(&mdev->rmmio_lock);
- ret = drm_connector_helper_get_modes_from_ddc(connector);
+
+ drm_edid = drm_edid_read(connector);
+ drm_edid_connector_update(connector, drm_edid);
+ count = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
+
mutex_unlock(&mdev->rmmio_lock);
- return ret;
+ return count;
}
/*
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index b1173128b5b9..b21ae2880c71 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -127,9 +127,8 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
dp/dp_drm.o \
dp/dp_link.o \
dp/dp_panel.o \
- dp/dp_parser.o \
- dp/dp_power.o \
- dp/dp_audio.o
+ dp/dp_audio.o \
+ dp/dp_utils.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index f87a1312f580..23141cbcea97 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -3,28 +3,20 @@
/* Autogenerated file, DO NOT EDIT manually!
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
-
-Copyright (C) 2013-2023 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024)
+
+Copyright (C) 2013-2024 by the following authors:
+- Rob Clark <robdclark@gmail.com> Rob Clark
+- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -45,8 +37,21 @@ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
*/
+#ifdef __KERNEL__
+#include <linux/bug.h>
+#define assert(x) BUG_ON(!(x))
+#else
+#include <assert.h>
+#endif
+
+#ifdef __cplusplus
+#define __struct_cast(X)
+#else
+#define __struct_cast(X) (struct X)
+#endif
enum a2xx_rb_dither_type {
DITHER_PIXEL = 0,
@@ -1442,16 +1447,18 @@ static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val)
{
- return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
}
#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5
static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val)
{
- return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
}
-static inline uint32_t REG_A2XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+#define REG_A2XX_VSC_PIPE(i0) (0x00000c06 + 0x3*(i0))
static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
@@ -1661,7 +1668,8 @@ static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
{
- return ((val >> 12) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
}
#define REG_A2XX_RB_DEPTH_INFO 0x00002002
@@ -1675,7 +1683,8 @@ static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
{
- return ((val >> 12) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
}
#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
@@ -2654,7 +2663,8 @@ static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val)
#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0
static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val)
{
- return ((val >> 5) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
}
#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b
@@ -3027,7 +3037,8 @@ static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
{
- return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
}
#define A2XX_SQ_TEX_0_TILED 0x80000000
@@ -3061,7 +3072,8 @@ static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val)
#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT 12
static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val)
{
- return ((val >> 12) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
}
#define REG_A2XX_SQ_TEX_2 0x00000002
@@ -3229,8 +3241,11 @@ static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val)
#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT 12
static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val)
{
- return ((val >> 12) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
}
+#ifdef __cplusplus
+#endif
#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 237b564445be..5edd740ad3bb 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -3,28 +3,20 @@
/* Autogenerated file, DO NOT EDIT manually!
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84323 bytes, from Wed Aug 23 10:39:39 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024)
+
+Copyright (C) 2013-2024 by the following authors:
+- Rob Clark <robdclark@gmail.com> Rob Clark
+- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -45,8 +37,21 @@ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
*/
+#ifdef __KERNEL__
+#include <linux/bug.h>
+#define assert(x) BUG_ON(!(x))
+#else
+#include <assert.h>
+#endif
+
+#ifdef __cplusplus
+#define __struct_cast(X)
+#else
+#define __struct_cast(X) (struct X)
+#endif
enum a3xx_tile_mode {
LINEAR = 0,
@@ -612,6 +617,7 @@ enum a3xx_tex_msaa {
#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000
#define A3XX_INT0_MISC_HANG_DETECT 0x01000000
#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000
+
#define REG_A3XX_RBBM_HW_VERSION 0x00000000
#define REG_A3XX_RBBM_HW_RELEASE 0x00000001
@@ -672,13 +678,9 @@ enum a3xx_tex_msaa {
#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
#define REG_A3XX_RBBM_INT_SET_CMD 0x00000060
-
#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
-
#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
-
#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
-
#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
#define A3XX_RBBM_PERFCTR_CTL_ENABLE 0x00000001
@@ -912,7 +914,7 @@ enum a3xx_tex_msaa {
#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f
-static inline uint32_t REG_A3XX_CP_PROTECT(uint32_t i0) { return 0x00000460 + 0x1*i0; }
+#define REG_A3XX_CP_PROTECT(i0) (0x00000460 + 0x1*(i0))
static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; }
@@ -1167,7 +1169,8 @@ static inline uint32_t A3XX_RB_MODE_CONTROL_MRT(uint32_t val)
#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
{
- return ((val >> 5) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
}
#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
@@ -1218,7 +1221,7 @@ static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val)
return ((_mesa_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK;
}
-static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
+#define REG_A3XX_RB_MRT(i0) (0x000020c4 + 0x4*(i0))
static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
@@ -1267,7 +1270,8 @@ static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
{
- return ((val >> 5) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
}
static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; }
@@ -1275,7 +1279,8 @@ static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6
#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4
static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val)
{
- return ((val >> 5) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
}
static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; }
@@ -1407,7 +1412,8 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
{
- return ((val >> 14) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+ assert(!(val & 0x3fff));
+ return (((val >> 14)) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
}
#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
@@ -1415,7 +1421,8 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
{
- return ((val >> 5) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
}
#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee
@@ -1423,7 +1430,8 @@ static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
{
- return ((val >> 5) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
}
#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef
@@ -1491,7 +1499,8 @@ static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
{
- return ((val >> 12) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
}
#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
@@ -1499,7 +1508,8 @@ static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
#define A3XX_RB_DEPTH_PITCH__SHIFT 0
static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
{
- return ((val >> 3) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
+ assert(!(val & 0x7));
+ return (((val >> 3)) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
}
#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
@@ -1562,7 +1572,8 @@ static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 11
static inline uint32_t A3XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
{
- return ((val >> 12) << A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
}
#define REG_A3XX_RB_STENCIL_PITCH 0x00002107
@@ -1570,7 +1581,8 @@ static inline uint32_t A3XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
#define A3XX_RB_STENCIL_PITCH__SHIFT 0
static inline uint32_t A3XX_RB_STENCIL_PITCH(uint32_t val)
{
- return ((val >> 3) << A3XX_RB_STENCIL_PITCH__SHIFT) & A3XX_RB_STENCIL_PITCH__MASK;
+ assert(!(val & 0x7));
+ return (((val >> 3)) << A3XX_RB_STENCIL_PITCH__SHIFT) & A3XX_RB_STENCIL_PITCH__MASK;
}
#define REG_A3XX_RB_STENCILREFMASK 0x00002108
@@ -1877,7 +1889,7 @@ static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2(uint32_t val)
return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK;
}
-static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK(uint32_t i0) { return 0x0000220b + 0x2*i0; }
+#define REG_A3XX_HLSQ_CL_GLOBAL_WORK(i0) (0x0000220b + 0x2*(i0))
static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_SIZE(uint32_t i0) { return 0x0000220b + 0x2*i0; }
@@ -1889,7 +1901,7 @@ static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_OFFSET(uint32_t i0) { return
#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
-static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP(uint32_t i0) { return 0x00002215 + 0x1*i0; }
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP(i0) (0x00002215 + 0x1*(i0))
static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP_RATIO(uint32_t i0) { return 0x00002215 + 0x1*i0; }
@@ -1965,7 +1977,7 @@ static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
-static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
+#define REG_A3XX_VFD_FETCH(i0) (0x00002246 + 0x2*(i0))
static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
@@ -1997,7 +2009,7 @@ static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; }
-static inline uint32_t REG_A3XX_VFD_DECODE(uint32_t i0) { return 0x00002266 + 0x1*i0; }
+#define REG_A3XX_VFD_DECODE(i0) (0x00002266 + 0x1*(i0))
static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; }
#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
@@ -2084,7 +2096,7 @@ static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
}
-static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+#define REG_A3XX_VPC_VARYING_INTERP(i0) (0x00002282 + 0x1*(i0))
static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
#define A3XX_VPC_VARYING_INTERP_MODE_C0__MASK 0x00000003
@@ -2184,7 +2196,7 @@ static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val)
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CF__MASK;
}
-static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
+#define REG_A3XX_VPC_VARYING_PS_REPL(i0) (0x00002286 + 0x1*(i0))
static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; }
#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK 0x00000003
@@ -2392,7 +2404,7 @@ static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
}
-static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+#define REG_A3XX_SP_VS_OUT(i0) (0x000022c7 + 0x1*(i0))
static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
@@ -2422,7 +2434,7 @@ static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
}
-static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
+#define REG_A3XX_SP_VS_VPC_DST(i0) (0x000022d0 + 0x1*(i0))
static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x0000007f
@@ -2477,7 +2489,8 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
{
- return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+ assert(!(val & 0x7f));
+ return (((val >> 7)) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
}
#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
@@ -2503,7 +2516,8 @@ static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
{
- return ((val >> 5) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
}
#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
@@ -2641,7 +2655,8 @@ static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
{
- return ((val >> 5) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
}
#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
@@ -2665,7 +2680,7 @@ static inline uint32_t A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
return ((val) << A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
}
-static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
+#define REG_A3XX_SP_FS_MRT(i0) (0x000022f0 + 0x1*(i0))
static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
@@ -2678,7 +2693,7 @@ static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
#define A3XX_SP_FS_MRT_REG_SINT 0x00000400
#define A3XX_SP_FS_MRT_REG_UINT 0x00000800
-static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
+#define REG_A3XX_SP_FS_IMAGE_OUTPUT(i0) (0x000022f4 + 0x1*(i0))
static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f
@@ -2821,18 +2836,20 @@ static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
{
- return ((val >> 5) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
}
#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
{
- return ((val >> 5) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
}
#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02
-static inline uint32_t REG_A3XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+#define REG_A3XX_VSC_PIPE(i0) (0x00000c06 + 0x3*(i0))
static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff
@@ -2887,7 +2904,7 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x000
#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b
-static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
+#define REG_A3XX_GRAS_CL_USER_PLANE(i0) (0x00000ca0 + 0x4*(i0))
static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
@@ -3228,7 +3245,8 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
{
- return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK;
}
#define A3XX_TEX_CONST_3_DEPTH__MASK 0x0ffe0000
#define A3XX_TEX_CONST_3_DEPTH__SHIFT 17
@@ -3240,8 +3258,11 @@ static inline uint32_t A3XX_TEX_CONST_3_DEPTH(uint32_t val)
#define A3XX_TEX_CONST_3_LAYERSZ2__SHIFT 28
static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
{
- return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK;
}
+#ifdef __cplusplus
+#endif
#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index c86b377f6f0d..5273dc849838 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -134,6 +134,13 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* Set up AOOO: */
gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
+ } else if (adreno_is_a305b(adreno_gpu)) {
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x00181818);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x00181818);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000018);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000018);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x00000303);
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
} else if (adreno_is_a306(adreno_gpu)) {
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a);
@@ -230,7 +237,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
/* Enable Clock gating: */
- if (adreno_is_a306(adreno_gpu))
+ if (adreno_is_a305b(adreno_gpu) || adreno_is_a306(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
else if (adreno_is_a320(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
@@ -333,7 +340,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
- } else if (adreno_is_a330(adreno_gpu)) {
+ } else if (adreno_is_a330(adreno_gpu) || adreno_is_a305b(adreno_gpu)) {
/* NOTE: this (value take from downstream android driver)
* includes some bits outside of the known bitfields. But
* A330 has this "MERCIU queue" thing too, which might
@@ -559,7 +566,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
goto fail;
/* if needed, allocate gmem: */
- if (adreno_is_a330(adreno_gpu)) {
+ if (adreno_is_a330(adreno_gpu) || adreno_is_a305b(adreno_gpu)) {
ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev,
adreno_gpu, &a3xx_gpu->ocmem);
if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index ff5f1e98a5fc..103a416a787f 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -3,28 +3,20 @@
/* Autogenerated file, DO NOT EDIT manually!
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
-
-Copyright (C) 2013-2022 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024)
+
+Copyright (C) 2013-2024 by the following authors:
+- Rob Clark <robdclark@gmail.com> Rob Clark
+- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -45,8 +37,21 @@ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
*/
+#ifdef __KERNEL__
+#include <linux/bug.h>
+#define assert(x) BUG_ON(!(x))
+#else
+#include <assert.h>
+#endif
+
+#ifdef __cplusplus
+#define __struct_cast(X)
+#else
+#define __struct_cast(X) (struct X)
+#endif
enum a4xx_color_fmt {
RB4_A8_UNORM = 1,
@@ -846,6 +851,7 @@ static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
{
return ((val) << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT) & A4XX_CGC_HLSQ_EARLY_CYC__MASK;
}
+
#define A4XX_INT0_RBBM_GPU_IDLE 0x00000001
#define A4XX_INT0_RBBM_AHB_ERROR 0x00000002
#define A4XX_INT0_RBBM_REG_TIMEOUT 0x00000004
@@ -870,6 +876,7 @@ static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
#define A4XX_INT0_CP_AHB_ERROR_HALT 0x00200000
#define A4XX_INT0_MISC_HANG_DETECT 0x01000000
#define A4XX_INT0_UCHE_OOB_ACCESS 0x02000000
+
#define REG_A4XX_RB_GMEM_BASE_ADDR 0x00000cc0
#define REG_A4XX_RB_PERFCTR_RB_SEL_0 0x00000cc7
@@ -923,13 +930,15 @@ static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
#define A4XX_RB_MODE_CONTROL_WIDTH__SHIFT 0
static inline uint32_t A4XX_RB_MODE_CONTROL_WIDTH(uint32_t val)
{
- return ((val >> 5) << A4XX_RB_MODE_CONTROL_WIDTH__SHIFT) & A4XX_RB_MODE_CONTROL_WIDTH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A4XX_RB_MODE_CONTROL_WIDTH__SHIFT) & A4XX_RB_MODE_CONTROL_WIDTH__MASK;
}
#define A4XX_RB_MODE_CONTROL_HEIGHT__MASK 0x00003f00
#define A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT 8
static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
{
- return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
}
#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000
@@ -968,7 +977,7 @@ static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val)
#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_SAMPLE 0x00004000
#define A4XX_RB_RENDER_CONTROL2_SIZE 0x00008000
-static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
+#define REG_A4XX_RB_MRT(i0) (0x000020a4 + 0x5*(i0))
static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
@@ -1018,7 +1027,8 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14
static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
{
- return ((val >> 4) << A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
+ assert(!(val & 0xf));
+ return (((val >> 4)) << A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
}
static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
@@ -1217,7 +1227,8 @@ static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT 2
static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val)
{
- return ((val >> 2) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK;
}
#define REG_A4XX_RB_RENDER_COMPONENTS 0x000020fb
@@ -1293,7 +1304,8 @@ static inline uint32_t A4XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
#define A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
{
- return ((val >> 14) << A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+ assert(!(val & 0x3fff));
+ return (((val >> 14)) << A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
}
#define REG_A4XX_RB_COPY_DEST_BASE 0x000020fd
@@ -1301,7 +1313,8 @@ static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 5
static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
{
- return ((val >> 5) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK;
}
#define REG_A4XX_RB_COPY_DEST_PITCH 0x000020fe
@@ -1309,7 +1322,8 @@ static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
#define A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
static inline uint32_t A4XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
{
- return ((val >> 5) << A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A4XX_RB_COPY_DEST_PITCH_PITCH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A4XX_RB_COPY_DEST_PITCH_PITCH__MASK;
}
#define REG_A4XX_RB_COPY_DEST_INFO 0x000020ff
@@ -1387,7 +1401,8 @@ static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum a4xx_depth_format va
#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
{
- return ((val >> 12) << A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
}
#define REG_A4XX_RB_DEPTH_PITCH 0x00002104
@@ -1395,7 +1410,8 @@ static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
#define A4XX_RB_DEPTH_PITCH__SHIFT 0
static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val)
{
- return ((val >> 5) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK;
}
#define REG_A4XX_RB_DEPTH_PITCH2 0x00002105
@@ -1403,7 +1419,8 @@ static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val)
#define A4XX_RB_DEPTH_PITCH2__SHIFT 0
static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val)
{
- return ((val >> 5) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK;
}
#define REG_A4XX_RB_STENCIL_CONTROL 0x00002106
@@ -1468,7 +1485,8 @@ static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12
static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
{
- return ((val >> 12) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
}
#define REG_A4XX_RB_STENCIL_PITCH 0x00002109
@@ -1476,7 +1494,8 @@ static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
#define A4XX_RB_STENCIL_PITCH__SHIFT 0
static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val)
{
- return ((val >> 5) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK;
}
#define REG_A4XX_RB_STENCILREFMASK 0x0000210b
@@ -1534,7 +1553,7 @@ static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val)
return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK;
}
-static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP(uint32_t i0) { return 0x00002120 + 0x2*i0; }
+#define REG_A4XX_RB_VPORT_Z_CLAMP(i0) (0x00002120 + 0x2*(i0))
static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MIN(uint32_t i0) { return 0x00002120 + 0x2*i0; }
@@ -1544,19 +1563,19 @@ static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MAX(uint32_t i0) { return 0x000
#define REG_A4XX_RBBM_HW_CONFIGURATION 0x00000002
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP(uint32_t i0) { return 0x00000004 + 0x1*i0; }
+#define REG_A4XX_RBBM_CLOCK_CTL_TP(i0) (0x00000004 + 0x1*(i0))
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP_REG(uint32_t i0) { return 0x00000004 + 0x1*i0; }
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP(uint32_t i0) { return 0x00000008 + 0x1*i0; }
+#define REG_A4XX_RBBM_CLOCK_CTL2_TP(i0) (0x00000008 + 0x1*(i0))
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP_REG(uint32_t i0) { return 0x00000008 + 0x1*i0; }
-static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP(uint32_t i0) { return 0x0000000c + 0x1*i0; }
+#define REG_A4XX_RBBM_CLOCK_HYST_TP(i0) (0x0000000c + 0x1*(i0))
static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP_REG(uint32_t i0) { return 0x0000000c + 0x1*i0; }
-static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP(uint32_t i0) { return 0x00000010 + 0x1*i0; }
+#define REG_A4XX_RBBM_CLOCK_DELAY_TP(i0) (0x00000010 + 0x1*(i0))
static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x00000010 + 0x1*i0; }
@@ -2008,35 +2027,35 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x
#define REG_A4XX_RBBM_ALWAYSON_COUNTER_HI 0x0000016f
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
+#define REG_A4XX_RBBM_CLOCK_CTL_SP(i0) (0x00000068 + 0x1*(i0))
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP(uint32_t i0) { return 0x0000006c + 0x1*i0; }
+#define REG_A4XX_RBBM_CLOCK_CTL2_SP(i0) (0x0000006c + 0x1*(i0))
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP_REG(uint32_t i0) { return 0x0000006c + 0x1*i0; }
-static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP(uint32_t i0) { return 0x00000070 + 0x1*i0; }
+#define REG_A4XX_RBBM_CLOCK_HYST_SP(i0) (0x00000070 + 0x1*(i0))
static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP_REG(uint32_t i0) { return 0x00000070 + 0x1*i0; }
-static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP(uint32_t i0) { return 0x00000074 + 0x1*i0; }
+#define REG_A4XX_RBBM_CLOCK_DELAY_SP(i0) (0x00000074 + 0x1*(i0))
static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP_REG(uint32_t i0) { return 0x00000074 + 0x1*i0; }
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB(uint32_t i0) { return 0x00000078 + 0x1*i0; }
+#define REG_A4XX_RBBM_CLOCK_CTL_RB(i0) (0x00000078 + 0x1*(i0))
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB_REG(uint32_t i0) { return 0x00000078 + 0x1*i0; }
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB(uint32_t i0) { return 0x0000007c + 0x1*i0; }
+#define REG_A4XX_RBBM_CLOCK_CTL2_RB(i0) (0x0000007c + 0x1*(i0))
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB_REG(uint32_t i0) { return 0x0000007c + 0x1*i0; }
-static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(uint32_t i0) { return 0x00000082 + 0x1*i0; }
+#define REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i0) (0x00000082 + 0x1*(i0))
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU_REG(uint32_t i0) { return 0x00000082 + 0x1*i0; }
-static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(uint32_t i0) { return 0x00000086 + 0x1*i0; }
+#define REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i0) (0x00000086 + 0x1*(i0))
static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU_REG(uint32_t i0) { return 0x00000086 + 0x1*i0; }
@@ -2052,7 +2071,7 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU_REG(uint32_t i0) { r
#define REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM 0x0000008d
-static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+#define REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i0) (0x0000008e + 0x1*(i0))
static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
@@ -2192,7 +2211,7 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
-static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+#define REG_A4XX_CP_PROTECT(i0) (0x00000240 + 0x1*(i0))
static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
@@ -2207,18 +2226,8 @@ static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
{
return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
}
-#define A4XX_CP_PROTECT_REG_TRAP_WRITE__MASK 0x20000000
-#define A4XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT 29
-static inline uint32_t A4XX_CP_PROTECT_REG_TRAP_WRITE(uint32_t val)
-{
- return ((val) << A4XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT) & A4XX_CP_PROTECT_REG_TRAP_WRITE__MASK;
-}
-#define A4XX_CP_PROTECT_REG_TRAP_READ__MASK 0x40000000
-#define A4XX_CP_PROTECT_REG_TRAP_READ__SHIFT 30
-static inline uint32_t A4XX_CP_PROTECT_REG_TRAP_READ(uint32_t val)
-{
- return ((val) << A4XX_CP_PROTECT_REG_TRAP_READ__SHIFT) & A4XX_CP_PROTECT_REG_TRAP_READ__MASK;
-}
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ 0x40000000
#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
@@ -2254,7 +2263,7 @@ static inline uint32_t A4XX_CP_PROTECT_REG_TRAP_READ(uint32_t val)
#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b
-static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
+#define REG_A4XX_CP_SCRATCH(i0) (0x00000578 + 0x1*(i0))
static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578 + 0x1*i0; }
@@ -2364,7 +2373,7 @@ static inline uint32_t A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
return ((val) << A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
}
-static inline uint32_t REG_A4XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+#define REG_A4XX_SP_VS_OUT(i0) (0x000022c7 + 0x1*(i0))
static inline uint32_t REG_A4XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
#define A4XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
@@ -2392,7 +2401,7 @@ static inline uint32_t A4XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
return ((val) << A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
}
-static inline uint32_t REG_A4XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
+#define REG_A4XX_SP_VS_VPC_DST(i0) (0x000022d8 + 0x1*(i0))
static inline uint32_t REG_A4XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
@@ -2532,7 +2541,7 @@ static inline uint32_t A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID(uint32_t val)
return ((val) << A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK;
}
-static inline uint32_t REG_A4XX_SP_FS_MRT(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
+#define REG_A4XX_SP_FS_MRT(i0) (0x000022f1 + 0x1*(i0))
static inline uint32_t REG_A4XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
#define A4XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
@@ -2636,7 +2645,7 @@ static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK;
}
-static inline uint32_t REG_A4XX_SP_DS_OUT(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+#define REG_A4XX_SP_DS_OUT(i0) (0x0000231b + 0x1*(i0))
static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; }
#define A4XX_SP_DS_OUT_REG_A_REGID__MASK 0x000001ff
@@ -2664,7 +2673,7 @@ static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
}
-static inline uint32_t REG_A4XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+#define REG_A4XX_SP_DS_VPC_DST(i0) (0x0000232c + 0x1*(i0))
static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; }
#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
@@ -2734,7 +2743,7 @@ static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK;
}
-static inline uint32_t REG_A4XX_SP_GS_OUT(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+#define REG_A4XX_SP_GS_OUT(i0) (0x00002342 + 0x1*(i0))
static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; }
#define A4XX_SP_GS_OUT_REG_A_REGID__MASK 0x000001ff
@@ -2762,7 +2771,7 @@ static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
}
-static inline uint32_t REG_A4XX_SP_GS_VPC_DST(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+#define REG_A4XX_SP_GS_VPC_DST(i0) (0x00002353 + 0x1*(i0))
static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; }
#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
@@ -2862,11 +2871,11 @@ static inline uint32_t A4XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
return ((val) << A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
}
-static inline uint32_t REG_A4XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002142 + 0x1*i0; }
+#define REG_A4XX_VPC_VARYING_INTERP(i0) (0x00002142 + 0x1*(i0))
static inline uint32_t REG_A4XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002142 + 0x1*i0; }
-static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000214a + 0x1*i0; }
+#define REG_A4XX_VPC_VARYING_PS_REPL(i0) (0x0000214a + 0x1*(i0))
static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000214a + 0x1*i0; }
@@ -2877,13 +2886,15 @@ static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0
#define A4XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
static inline uint32_t A4XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
{
- return ((val >> 5) << A4XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A4XX_VSC_BIN_SIZE_WIDTH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A4XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A4XX_VSC_BIN_SIZE_WIDTH__MASK;
}
#define A4XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
#define A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
static inline uint32_t A4XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
{
- return ((val >> 5) << A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A4XX_VSC_BIN_SIZE_HEIGHT__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A4XX_VSC_BIN_SIZE_HEIGHT__MASK;
}
#define REG_A4XX_VSC_SIZE_ADDRESS 0x00000c01
@@ -2892,7 +2903,7 @@ static inline uint32_t A4XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
#define REG_A4XX_VSC_DEBUG_ECO_CONTROL 0x00000c03
-static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
+#define REG_A4XX_VSC_PIPE_CONFIG(i0) (0x00000c08 + 0x1*(i0))
static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
#define A4XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
@@ -2920,11 +2931,11 @@ static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
return ((val) << A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_H__MASK;
}
-static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+#define REG_A4XX_VSC_PIPE_DATA_ADDRESS(i0) (0x00000c10 + 0x1*(i0))
static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
-static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
+#define REG_A4XX_VSC_PIPE_DATA_LENGTH(i0) (0x00000c18 + 0x1*(i0))
static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
@@ -3028,7 +3039,7 @@ static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
#define REG_A4XX_VFD_INDEX_OFFSET 0x00002208
-static inline uint32_t REG_A4XX_VFD_FETCH(uint32_t i0) { return 0x0000220a + 0x4*i0; }
+#define REG_A4XX_VFD_FETCH(i0) (0x0000220a + 0x4*(i0))
static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x0000220a + 0x4*i0; }
#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
@@ -3064,7 +3075,7 @@ static inline uint32_t A4XX_VFD_FETCH_INSTR_3_STEPRATE(uint32_t val)
return ((val) << A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK;
}
-static inline uint32_t REG_A4XX_VFD_DECODE(uint32_t i0) { return 0x0000228a + 0x1*i0; }
+#define REG_A4XX_VFD_DECODE(i0) (0x0000228a + 0x1*(i0))
static inline uint32_t REG_A4XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000228a + 0x1*i0; }
#define A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
@@ -4262,7 +4273,8 @@ static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
#define A4XX_TEX_CONST_3_LAYERSZ__SHIFT 0
static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val)
{
- return ((val >> 12) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK;
}
#define A4XX_TEX_CONST_3_DEPTH__MASK 0x7ffc0000
#define A4XX_TEX_CONST_3_DEPTH__SHIFT 18
@@ -4276,13 +4288,15 @@ static inline uint32_t A4XX_TEX_CONST_3_DEPTH(uint32_t val)
#define A4XX_TEX_CONST_4_LAYERSZ__SHIFT 0
static inline uint32_t A4XX_TEX_CONST_4_LAYERSZ(uint32_t val)
{
- return ((val >> 12) << A4XX_TEX_CONST_4_LAYERSZ__SHIFT) & A4XX_TEX_CONST_4_LAYERSZ__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A4XX_TEX_CONST_4_LAYERSZ__SHIFT) & A4XX_TEX_CONST_4_LAYERSZ__MASK;
}
#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffe0
#define A4XX_TEX_CONST_4_BASE__SHIFT 5
static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
{
- return ((val >> 5) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK;
}
#define REG_A4XX_TEX_CONST_5 0x00000005
@@ -4296,7 +4310,8 @@ static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
#define A4XX_SSBO_0_0_BASE__SHIFT 5
static inline uint32_t A4XX_SSBO_0_0_BASE(uint32_t val)
{
- return ((val >> 5) << A4XX_SSBO_0_0_BASE__SHIFT) & A4XX_SSBO_0_0_BASE__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A4XX_SSBO_0_0_BASE__SHIFT) & A4XX_SSBO_0_0_BASE__MASK;
}
#define REG_A4XX_SSBO_0_1 0x00000001
@@ -4312,7 +4327,8 @@ static inline uint32_t A4XX_SSBO_0_1_PITCH(uint32_t val)
#define A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
static inline uint32_t A4XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 12) << A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A4XX_SSBO_0_2_ARRAY_PITCH__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A4XX_SSBO_0_2_ARRAY_PITCH__MASK;
}
#define REG_A4XX_SSBO_0_3 0x00000003
@@ -4357,5 +4373,7 @@ static inline uint32_t A4XX_SSBO_1_1_DEPTH(uint32_t val)
return ((val) << A4XX_SSBO_1_1_DEPTH__SHIFT) & A4XX_SSBO_1_1_DEPTH__MASK;
}
+#ifdef __cplusplus
+#endif
#endif /* A4XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index 03b7ee592b11..d66306c14986 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -3,28 +3,20 @@
/* Autogenerated file, DO NOT EDIT manually!
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
-
-Copyright (C) 2013-2023 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 151693 bytes, from Wed Aug 23 10:39:39 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024)
+
+Copyright (C) 2013-2024 by the following authors:
+- Rob Clark <robdclark@gmail.com> Rob Clark
+- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -45,8 +37,21 @@ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
*/
+#ifdef __KERNEL__
+#include <linux/bug.h>
+#define assert(x) BUG_ON(!(x))
+#else
+#include <assert.h>
+#endif
+
+#ifdef __cplusplus
+#define __struct_cast(X)
+#else
+#define __struct_cast(X) (struct X)
+#endif
enum a5xx_color_fmt {
RB5_A8_UNORM = 2,
@@ -907,12 +912,14 @@ enum a5xx_tex_type {
#define A5XX_INT0_GPMU_FIRMWARE 0x20000000
#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000
#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000
+
#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001
#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002
#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008
#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020
+
#define REG_A5XX_CP_RB_BASE 0x00000800
#define REG_A5XX_CP_RB_BASE_HI 0x00000801
@@ -1031,11 +1038,11 @@ enum a5xx_tex_type {
#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24
-static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+#define REG_A5XX_CP_SCRATCH(i0) (0x00000b78 + 0x1*(i0))
static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
-static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+#define REG_A5XX_CP_PROTECT(i0) (0x00000880 + 0x1*(i0))
static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; }
#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
@@ -1050,18 +1057,8 @@ static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
{
return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
}
-#define A5XX_CP_PROTECT_REG_TRAP_WRITE__MASK 0x20000000
-#define A5XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT 29
-static inline uint32_t A5XX_CP_PROTECT_REG_TRAP_WRITE(uint32_t val)
-{
- return ((val) << A5XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT) & A5XX_CP_PROTECT_REG_TRAP_WRITE__MASK;
-}
-#define A5XX_CP_PROTECT_REG_TRAP_READ__MASK 0x40000000
-#define A5XX_CP_PROTECT_REG_TRAP_READ__SHIFT 30
-static inline uint32_t A5XX_CP_PROTECT_REG_TRAP_READ(uint32_t val)
-{
- return ((val) << A5XX_CP_PROTECT_REG_TRAP_READ__SHIFT) & A5XX_CP_PROTECT_REG_TRAP_READ__MASK;
-}
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ 0x40000000
#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0
@@ -1833,192 +1830,37 @@ static inline uint32_t A5XX_CP_PROTECT_REG_TRAP_READ(uint32_t val)
#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3
#define REG_A5XX_RBBM_STATUS 0x000004f5
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__MASK 0x80000000
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__SHIFT 31
-static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__MASK;
-}
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__MASK 0x40000000
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__SHIFT 30
-static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__MASK;
-}
-#define A5XX_RBBM_STATUS_HLSQ_BUSY__MASK 0x20000000
-#define A5XX_RBBM_STATUS_HLSQ_BUSY__SHIFT 29
-static inline uint32_t A5XX_RBBM_STATUS_HLSQ_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_HLSQ_BUSY__SHIFT) & A5XX_RBBM_STATUS_HLSQ_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_VSC_BUSY__MASK 0x10000000
-#define A5XX_RBBM_STATUS_VSC_BUSY__SHIFT 28
-static inline uint32_t A5XX_RBBM_STATUS_VSC_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_VSC_BUSY__SHIFT) & A5XX_RBBM_STATUS_VSC_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_TPL1_BUSY__MASK 0x08000000
-#define A5XX_RBBM_STATUS_TPL1_BUSY__SHIFT 27
-static inline uint32_t A5XX_RBBM_STATUS_TPL1_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_TPL1_BUSY__SHIFT) & A5XX_RBBM_STATUS_TPL1_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_SP_BUSY__MASK 0x04000000
-#define A5XX_RBBM_STATUS_SP_BUSY__SHIFT 26
-static inline uint32_t A5XX_RBBM_STATUS_SP_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_SP_BUSY__SHIFT) & A5XX_RBBM_STATUS_SP_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_UCHE_BUSY__MASK 0x02000000
-#define A5XX_RBBM_STATUS_UCHE_BUSY__SHIFT 25
-static inline uint32_t A5XX_RBBM_STATUS_UCHE_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_UCHE_BUSY__SHIFT) & A5XX_RBBM_STATUS_UCHE_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_VPC_BUSY__MASK 0x01000000
-#define A5XX_RBBM_STATUS_VPC_BUSY__SHIFT 24
-static inline uint32_t A5XX_RBBM_STATUS_VPC_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_VPC_BUSY__SHIFT) & A5XX_RBBM_STATUS_VPC_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_VFDP_BUSY__MASK 0x00800000
-#define A5XX_RBBM_STATUS_VFDP_BUSY__SHIFT 23
-static inline uint32_t A5XX_RBBM_STATUS_VFDP_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_VFDP_BUSY__SHIFT) & A5XX_RBBM_STATUS_VFDP_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_VFD_BUSY__MASK 0x00400000
-#define A5XX_RBBM_STATUS_VFD_BUSY__SHIFT 22
-static inline uint32_t A5XX_RBBM_STATUS_VFD_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_VFD_BUSY__SHIFT) & A5XX_RBBM_STATUS_VFD_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_TESS_BUSY__MASK 0x00200000
-#define A5XX_RBBM_STATUS_TESS_BUSY__SHIFT 21
-static inline uint32_t A5XX_RBBM_STATUS_TESS_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_TESS_BUSY__SHIFT) & A5XX_RBBM_STATUS_TESS_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_PC_VSD_BUSY__MASK 0x00100000
-#define A5XX_RBBM_STATUS_PC_VSD_BUSY__SHIFT 20
-static inline uint32_t A5XX_RBBM_STATUS_PC_VSD_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_PC_VSD_BUSY__SHIFT) & A5XX_RBBM_STATUS_PC_VSD_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_PC_DCALL_BUSY__MASK 0x00080000
-#define A5XX_RBBM_STATUS_PC_DCALL_BUSY__SHIFT 19
-static inline uint32_t A5XX_RBBM_STATUS_PC_DCALL_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_PC_DCALL_BUSY__SHIFT) & A5XX_RBBM_STATUS_PC_DCALL_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__MASK 0x00040000
-#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__SHIFT 18
-static inline uint32_t A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__SHIFT) & A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_DCOM_BUSY__MASK 0x00020000
-#define A5XX_RBBM_STATUS_DCOM_BUSY__SHIFT 17
-static inline uint32_t A5XX_RBBM_STATUS_DCOM_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_DCOM_BUSY__SHIFT) & A5XX_RBBM_STATUS_DCOM_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_COM_BUSY__MASK 0x00010000
-#define A5XX_RBBM_STATUS_COM_BUSY__SHIFT 16
-static inline uint32_t A5XX_RBBM_STATUS_COM_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_COM_BUSY__SHIFT) & A5XX_RBBM_STATUS_COM_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_LRZ_BUZY__MASK 0x00008000
-#define A5XX_RBBM_STATUS_LRZ_BUZY__SHIFT 15
-static inline uint32_t A5XX_RBBM_STATUS_LRZ_BUZY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_LRZ_BUZY__SHIFT) & A5XX_RBBM_STATUS_LRZ_BUZY__MASK;
-}
-#define A5XX_RBBM_STATUS_A2D_DSP_BUSY__MASK 0x00004000
-#define A5XX_RBBM_STATUS_A2D_DSP_BUSY__SHIFT 14
-static inline uint32_t A5XX_RBBM_STATUS_A2D_DSP_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_A2D_DSP_BUSY__SHIFT) & A5XX_RBBM_STATUS_A2D_DSP_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_CCUFCHE_BUSY__MASK 0x00002000
-#define A5XX_RBBM_STATUS_CCUFCHE_BUSY__SHIFT 13
-static inline uint32_t A5XX_RBBM_STATUS_CCUFCHE_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_CCUFCHE_BUSY__SHIFT) & A5XX_RBBM_STATUS_CCUFCHE_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_RB_BUSY__MASK 0x00001000
-#define A5XX_RBBM_STATUS_RB_BUSY__SHIFT 12
-static inline uint32_t A5XX_RBBM_STATUS_RB_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_RB_BUSY__SHIFT) & A5XX_RBBM_STATUS_RB_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_RAS_BUSY__MASK 0x00000800
-#define A5XX_RBBM_STATUS_RAS_BUSY__SHIFT 11
-static inline uint32_t A5XX_RBBM_STATUS_RAS_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_RAS_BUSY__SHIFT) & A5XX_RBBM_STATUS_RAS_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_TSE_BUSY__MASK 0x00000400
-#define A5XX_RBBM_STATUS_TSE_BUSY__SHIFT 10
-static inline uint32_t A5XX_RBBM_STATUS_TSE_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_TSE_BUSY__SHIFT) & A5XX_RBBM_STATUS_TSE_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_VBIF_BUSY__MASK 0x00000200
-#define A5XX_RBBM_STATUS_VBIF_BUSY__SHIFT 9
-static inline uint32_t A5XX_RBBM_STATUS_VBIF_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_VBIF_BUSY__SHIFT) & A5XX_RBBM_STATUS_VBIF_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__MASK 0x00000100
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__SHIFT 8
-static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__MASK;
-}
-#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__MASK 0x00000080
-#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__SHIFT 7
-static inline uint32_t A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__SHIFT) & A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__MASK;
-}
-#define A5XX_RBBM_STATUS_CP_BUSY__MASK 0x00000040
-#define A5XX_RBBM_STATUS_CP_BUSY__SHIFT 6
-static inline uint32_t A5XX_RBBM_STATUS_CP_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_CP_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__MASK 0x00000020
-#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__SHIFT 5
-static inline uint32_t A5XX_RBBM_STATUS_GPMU_MASTER_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__SHIFT) & A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_CP_CRASH_BUSY__MASK 0x00000010
-#define A5XX_RBBM_STATUS_CP_CRASH_BUSY__SHIFT 4
-static inline uint32_t A5XX_RBBM_STATUS_CP_CRASH_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_CP_CRASH_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_CRASH_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_CP_ETS_BUSY__MASK 0x00000008
-#define A5XX_RBBM_STATUS_CP_ETS_BUSY__SHIFT 3
-static inline uint32_t A5XX_RBBM_STATUS_CP_ETS_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_CP_ETS_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_ETS_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_CP_PFP_BUSY__MASK 0x00000004
-#define A5XX_RBBM_STATUS_CP_PFP_BUSY__SHIFT 2
-static inline uint32_t A5XX_RBBM_STATUS_CP_PFP_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_CP_PFP_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_PFP_BUSY__MASK;
-}
-#define A5XX_RBBM_STATUS_CP_ME_BUSY__MASK 0x00000002
-#define A5XX_RBBM_STATUS_CP_ME_BUSY__SHIFT 1
-static inline uint32_t A5XX_RBBM_STATUS_CP_ME_BUSY(uint32_t val)
-{
- return ((val) << A5XX_RBBM_STATUS_CP_ME_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_ME_BUSY__MASK;
-}
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x40000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A5XX_RBBM_STATUS_VSC_BUSY 0x10000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY 0x08000000
+#define A5XX_RBBM_STATUS_SP_BUSY 0x04000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY 0x02000000
+#define A5XX_RBBM_STATUS_VPC_BUSY 0x01000000
+#define A5XX_RBBM_STATUS_VFDP_BUSY 0x00800000
+#define A5XX_RBBM_STATUS_VFD_BUSY 0x00400000
+#define A5XX_RBBM_STATUS_TESS_BUSY 0x00200000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY 0x00040000
+#define A5XX_RBBM_STATUS_DCOM_BUSY 0x00020000
+#define A5XX_RBBM_STATUS_COM_BUSY 0x00010000
+#define A5XX_RBBM_STATUS_LRZ_BUZY 0x00008000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY 0x00004000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY 0x00002000
+#define A5XX_RBBM_STATUS_RB_BUSY 0x00001000
+#define A5XX_RBBM_STATUS_RAS_BUSY 0x00000800
+#define A5XX_RBBM_STATUS_TSE_BUSY 0x00000400
+#define A5XX_RBBM_STATUS_VBIF_BUSY 0x00000200
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST 0x00000100
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST 0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY 0x00000040
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY 0x00000020
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY 0x00000010
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY 0x00000008
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A5XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001
#define REG_A5XX_RBBM_STATUS3 0x00000530
@@ -2113,13 +1955,15 @@ static inline uint32_t A5XX_RBBM_STATUS_CP_ME_BUSY(uint32_t val)
#define A5XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
static inline uint32_t A5XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
{
- return ((val >> 5) << A5XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A5XX_VSC_BIN_SIZE_WIDTH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A5XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A5XX_VSC_BIN_SIZE_WIDTH__MASK;
}
#define A5XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001fe00
#define A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT 9
static inline uint32_t A5XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
{
- return ((val >> 5) << A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A5XX_VSC_BIN_SIZE_HEIGHT__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A5XX_VSC_BIN_SIZE_HEIGHT__MASK;
}
#define REG_A5XX_VSC_SIZE_ADDRESS_LO 0x00000bc3
@@ -2130,7 +1974,7 @@ static inline uint32_t A5XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
#define REG_A5XX_UNKNOWN_0BC6 0x00000bc6
-static inline uint32_t REG_A5XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000bd0 + 0x1*i0; }
+#define REG_A5XX_VSC_PIPE_CONFIG(i0) (0x00000bd0 + 0x1*(i0))
static inline uint32_t REG_A5XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000bd0 + 0x1*i0; }
#define A5XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
@@ -2158,13 +2002,13 @@ static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
return ((val) << A5XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_H__MASK;
}
-static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000be0 + 0x2*i0; }
+#define REG_A5XX_VSC_PIPE_DATA_ADDRESS(i0) (0x00000be0 + 0x2*(i0))
static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_LO(uint32_t i0) { return 0x00000be0 + 0x2*i0; }
static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_HI(uint32_t i0) { return 0x00000be1 + 0x2*i0; }
-static inline uint32_t REG_A5XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c00 + 0x1*i0; }
+#define REG_A5XX_VSC_PIPE_DATA_LENGTH(i0) (0x00000c00 + 0x1*(i0))
static inline uint32_t REG_A5XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c00 + 0x1*i0; }
@@ -2594,36 +2438,6 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800
-#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881
-
-#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886
-
-#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887
-
-#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b
-#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000
-
-#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d
-#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000
-
-#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891
-
-#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892
-
-#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893
-
-#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894
-
-#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1
-
-#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6
-
-#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8
-
-#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0
-
-#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1
-
#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840
#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841
@@ -2748,10 +2562,42 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d
+#define REG_A5XX_GPMU_GPMU_SP_CLOCK_CONTROL 0x0000a880
+
+#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881
+
+#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886
+
+#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887
+
+#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b
+#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d
+#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893
+
+#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894
+
#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8
+#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1
+
+#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6
+
+#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8
+
+#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0
+
+#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1
+
#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00
#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01
@@ -3112,7 +2958,8 @@ static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
#define A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT 0
static inline uint32_t A5XX_GRAS_LRZ_BUFFER_PITCH(uint32_t val)
{
- return ((val >> 5) << A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT) & A5XX_GRAS_LRZ_BUFFER_PITCH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT) & A5XX_GRAS_LRZ_BUFFER_PITCH__MASK;
}
#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104
@@ -3124,13 +2971,15 @@ static inline uint32_t A5XX_GRAS_LRZ_BUFFER_PITCH(uint32_t val)
#define A5XX_RB_CNTL_WIDTH__SHIFT 0
static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val)
{
- return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
}
#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00
#define A5XX_RB_CNTL_HEIGHT__SHIFT 9
static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
{
- return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
}
#define A5XX_RB_CNTL_BYPASS 0x00020000
@@ -3248,7 +3097,7 @@ static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
return ((val) << A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT7__MASK;
}
-static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+#define REG_A5XX_RB_MRT(i0) (0x0000e150 + 0x7*(i0))
static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
@@ -3337,7 +3186,8 @@ static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 +
#define A5XX_RB_MRT_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_MRT_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_MRT_PITCH__SHIFT) & A5XX_RB_MRT_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_MRT_PITCH__SHIFT) & A5XX_RB_MRT_PITCH__MASK;
}
static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; }
@@ -3345,7 +3195,8 @@ static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e
#define A5XX_RB_MRT_ARRAY_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_MRT_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH__MASK;
}
static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; }
@@ -3527,7 +3378,8 @@ static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_fo
#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
}
#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6
@@ -3535,7 +3387,8 @@ static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
}
#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0
@@ -3603,7 +3456,8 @@ static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
#define A5XX_RB_STENCIL_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_STENCIL_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_STENCIL_PITCH__SHIFT) & A5XX_RB_STENCIL_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_STENCIL_PITCH__SHIFT) & A5XX_RB_STENCIL_PITCH__MASK;
}
#define REG_A5XX_RB_STENCIL_ARRAY_PITCH 0x0000e1c5
@@ -3611,7 +3465,8 @@ static inline uint32_t A5XX_RB_STENCIL_PITCH(uint32_t val)
#define A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_STENCIL_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT) & A5XX_RB_STENCIL_ARRAY_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT) & A5XX_RB_STENCIL_ARRAY_PITCH__MASK;
}
#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6
@@ -3722,7 +3577,8 @@ static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
#define A5XX_RB_BLIT_DST_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_BLIT_DST_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_BLIT_DST_PITCH__SHIFT) & A5XX_RB_BLIT_DST_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_BLIT_DST_PITCH__SHIFT) & A5XX_RB_BLIT_DST_PITCH__MASK;
}
#define REG_A5XX_RB_BLIT_DST_ARRAY_PITCH 0x0000e217
@@ -3730,7 +3586,8 @@ static inline uint32_t A5XX_RB_BLIT_DST_PITCH(uint32_t val)
#define A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
}
#define REG_A5XX_RB_CLEAR_COLOR_DW0 0x0000e218
@@ -3757,7 +3614,7 @@ static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val)
#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242
-static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x0000e243 + 0x4*i0; }
+#define REG_A5XX_RB_MRT_FLAG_BUFFER(i0) (0x0000e243 + 0x4*(i0))
static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x0000e243 + 0x4*i0; }
@@ -3768,7 +3625,8 @@ static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0
#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK;
}
static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t i0) { return 0x0000e246 + 0x4*i0; }
@@ -3776,7 +3634,8 @@ static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t i0) { re
#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK;
}
#define REG_A5XX_RB_BLIT_FLAG_DST_LO 0x0000e263
@@ -3788,7 +3647,8 @@ static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
#define A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_BLIT_FLAG_DST_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_PITCH__MASK;
}
#define REG_A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH 0x0000e266
@@ -3796,7 +3656,8 @@ static inline uint32_t A5XX_RB_BLIT_FLAG_DST_PITCH(uint32_t val)
#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK;
}
#define REG_A5XX_RB_SAMPLE_COUNT_ADDR_LO 0x0000e267
@@ -3812,11 +3673,11 @@ static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val)
}
#define A5XX_VPC_CNTL_0_VARYING 0x00000800
-static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+#define REG_A5XX_VPC_VARYING_INTERP(i0) (0x0000e282 + 0x1*(i0))
static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
-static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+#define REG_A5XX_VPC_VARYING_PS_REPL(i0) (0x0000e28a + 0x1*(i0))
static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
@@ -3824,7 +3685,7 @@ static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0
#define REG_A5XX_UNKNOWN_E293 0x0000e293
-static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+#define REG_A5XX_VPC_VAR(i0) (0x0000e294 + 0x1*(i0))
static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
@@ -3890,7 +3751,8 @@ static inline uint32_t A5XX_VPC_SO_PROG_A_BUF(uint32_t val)
#define A5XX_VPC_SO_PROG_A_OFF__SHIFT 2
static inline uint32_t A5XX_VPC_SO_PROG_A_OFF(uint32_t val)
{
- return ((val >> 2) << A5XX_VPC_SO_PROG_A_OFF__SHIFT) & A5XX_VPC_SO_PROG_A_OFF__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A5XX_VPC_SO_PROG_A_OFF__SHIFT) & A5XX_VPC_SO_PROG_A_OFF__MASK;
}
#define A5XX_VPC_SO_PROG_A_EN 0x00000800
#define A5XX_VPC_SO_PROG_B_BUF__MASK 0x00003000
@@ -3903,11 +3765,12 @@ static inline uint32_t A5XX_VPC_SO_PROG_B_BUF(uint32_t val)
#define A5XX_VPC_SO_PROG_B_OFF__SHIFT 14
static inline uint32_t A5XX_VPC_SO_PROG_B_OFF(uint32_t val)
{
- return ((val >> 2) << A5XX_VPC_SO_PROG_B_OFF__SHIFT) & A5XX_VPC_SO_PROG_B_OFF__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A5XX_VPC_SO_PROG_B_OFF__SHIFT) & A5XX_VPC_SO_PROG_B_OFF__MASK;
}
#define A5XX_VPC_SO_PROG_B_EN 0x00800000
-static inline uint32_t REG_A5XX_VPC_SO(uint32_t i0) { return 0x0000e2a7 + 0x7*i0; }
+#define REG_A5XX_VPC_SO(i0) (0x0000e2a7 + 0x7*(i0))
static inline uint32_t REG_A5XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000e2a7 + 0x7*i0; }
@@ -4066,7 +3929,7 @@ static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409
-static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+#define REG_A5XX_VFD_FETCH(i0) (0x0000e40a + 0x4*(i0))
static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
@@ -4076,7 +3939,7 @@ static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c
static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; }
-static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+#define REG_A5XX_VFD_DECODE(i0) (0x0000e48a + 0x2*(i0))
static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
@@ -4103,7 +3966,7 @@ static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
-static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+#define REG_A5XX_VFD_DEST_CNTL(i0) (0x0000e4ca + 0x1*(i0))
static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
@@ -4254,7 +4117,7 @@ static inline uint32_t A5XX_SP_PRIMITIVE_CNTL_VSOUT(uint32_t val)
return ((val) << A5XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT) & A5XX_SP_PRIMITIVE_CNTL_VSOUT__MASK;
}
-static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+#define REG_A5XX_SP_VS_OUT(i0) (0x0000e593 + 0x1*(i0))
static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
@@ -4282,7 +4145,7 @@ static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
}
-static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+#define REG_A5XX_SP_VS_VPC_DST(i0) (0x0000e5a3 + 0x1*(i0))
static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
@@ -4316,6 +4179,39 @@ static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad
+#define REG_A5XX_SP_VS_PVT_MEM_PARAM 0x0000e5ae
+#define A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ assert(!(val & 0x1ff));
+ return (((val >> 9)) << A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00
+#define A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val)
+{
+ assert(!(val & 0x7ff));
+ return (((val >> 11)) << A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK;
+}
+#define A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A5XX_SP_VS_PVT_MEM_ADDR 0x0000e5af
+
+#define REG_A5XX_SP_VS_PVT_MEM_SIZE 0x0000e5b1
+#define A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+
#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0
#define A5XX_SP_FS_CTRL_REG0_BUFFER 0x00000004
#define A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00000008
@@ -4351,6 +4247,39 @@ static inline uint32_t A5XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4
+#define REG_A5XX_SP_FS_PVT_MEM_PARAM 0x0000e5c5
+#define A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ assert(!(val & 0x1ff));
+ return (((val >> 9)) << A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00
+#define A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val)
+{
+ assert(!(val & 0x7ff));
+ return (((val >> 11)) << A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK;
+}
+#define A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A5XX_SP_FS_PVT_MEM_ADDR 0x0000e5c6
+
+#define REG_A5XX_SP_FS_PVT_MEM_SIZE 0x0000e5c8
+#define A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+
#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
@@ -4381,7 +4310,7 @@ static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val)
return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK;
}
-static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+#define REG_A5XX_SP_FS_OUTPUT(i0) (0x0000e5cb + 0x1*(i0))
static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
@@ -4392,7 +4321,7 @@ static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
}
#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
-static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+#define REG_A5XX_SP_FS_MRT(i0) (0x0000e5d3 + 0x1*(i0))
static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
@@ -4442,6 +4371,39 @@ static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4
+#define REG_A5XX_SP_CS_PVT_MEM_PARAM 0x0000e5f5
+#define A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ assert(!(val & 0x1ff));
+ return (((val >> 9)) << A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00
+#define A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val)
+{
+ assert(!(val & 0x7ff));
+ return (((val >> 11)) << A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK;
+}
+#define A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A5XX_SP_CS_PVT_MEM_ADDR 0x0000e5f6
+
+#define REG_A5XX_SP_CS_PVT_MEM_SIZE 0x0000e5f8
+#define A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+
#define REG_A5XX_SP_HS_CTRL_REG0 0x0000e600
#define A5XX_SP_HS_CTRL_REG0_BUFFER 0x00000004
#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00000008
@@ -4477,6 +4439,39 @@ static inline uint32_t A5XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_HS_OBJ_START_HI 0x0000e604
+#define REG_A5XX_SP_HS_PVT_MEM_PARAM 0x0000e605
+#define A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ assert(!(val & 0x1ff));
+ return (((val >> 9)) << A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00
+#define A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val)
+{
+ assert(!(val & 0x7ff));
+ return (((val >> 11)) << A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK;
+}
+#define A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A5XX_SP_HS_PVT_MEM_ADDR 0x0000e606
+
+#define REG_A5XX_SP_HS_PVT_MEM_SIZE 0x0000e608
+#define A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+
#define REG_A5XX_SP_DS_CTRL_REG0 0x0000e610
#define A5XX_SP_DS_CTRL_REG0_BUFFER 0x00000004
#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00000008
@@ -4512,6 +4507,39 @@ static inline uint32_t A5XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_DS_OBJ_START_HI 0x0000e62d
+#define REG_A5XX_SP_DS_PVT_MEM_PARAM 0x0000e62e
+#define A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ assert(!(val & 0x1ff));
+ return (((val >> 9)) << A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00
+#define A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val)
+{
+ assert(!(val & 0x7ff));
+ return (((val >> 11)) << A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK;
+}
+#define A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A5XX_SP_DS_PVT_MEM_ADDR 0x0000e62f
+
+#define REG_A5XX_SP_DS_PVT_MEM_SIZE 0x0000e631
+#define A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+
#define REG_A5XX_SP_GS_CTRL_REG0 0x0000e640
#define A5XX_SP_GS_CTRL_REG0_BUFFER 0x00000004
#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00000008
@@ -4547,6 +4575,39 @@ static inline uint32_t A5XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_GS_OBJ_START_HI 0x0000e65d
+#define REG_A5XX_SP_GS_PVT_MEM_PARAM 0x0000e65e
+#define A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ assert(!(val & 0x1ff));
+ return (((val >> 9)) << A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00
+#define A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val)
+{
+ assert(!(val & 0x7ff));
+ return (((val >> 11)) << A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK;
+}
+#define A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A5XX_SP_GS_PVT_MEM_ADDR 0x0000e65f
+
+#define REG_A5XX_SP_GS_PVT_MEM_SIZE 0x0000e661
+#define A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+
#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704
#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -5061,13 +5122,15 @@ static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
#define A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_2D_SRC_SIZE_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_PITCH__MASK;
}
#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK 0xffff0000
#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT 16
static inline uint32_t A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK;
}
#define REG_A5XX_RB_2D_DST_INFO 0x00002110
@@ -5101,13 +5164,15 @@ static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
#define A5XX_RB_2D_DST_SIZE_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_PITCH__MASK;
}
#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK 0xffff0000
#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT 16
static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK;
}
#define REG_A5XX_RB_2D_SRC_FLAGS_LO 0x00002140
@@ -5119,7 +5184,8 @@ static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val)
#define A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_2D_SRC_FLAGS_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_SRC_FLAGS_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_SRC_FLAGS_PITCH__MASK;
}
#define REG_A5XX_RB_2D_DST_FLAGS_LO 0x00002143
@@ -5131,7 +5197,8 @@ static inline uint32_t A5XX_RB_2D_SRC_FLAGS_PITCH(uint32_t val)
#define A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0
static inline uint32_t A5XX_RB_2D_DST_FLAGS_PITCH(uint32_t val)
{
- return ((val >> 6) << A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_DST_FLAGS_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_DST_FLAGS_PITCH__MASK;
}
#define REG_A5XX_GRAS_2D_BLIT_CNTL 0x00002180
@@ -5357,13 +5424,15 @@ static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
#define A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
static inline uint32_t A5XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 12) << A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A5XX_TEX_CONST_3_ARRAY_PITCH__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A5XX_TEX_CONST_3_ARRAY_PITCH__MASK;
}
#define A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000
#define A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23
static inline uint32_t A5XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
{
- return ((val >> 12) << A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
}
#define A5XX_TEX_CONST_3_TILE_ALL 0x08000000
#define A5XX_TEX_CONST_3_FLAG 0x10000000
@@ -5373,7 +5442,8 @@ static inline uint32_t A5XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
#define A5XX_TEX_CONST_4_BASE_LO__SHIFT 5
static inline uint32_t A5XX_TEX_CONST_4_BASE_LO(uint32_t val)
{
- return ((val >> 5) << A5XX_TEX_CONST_4_BASE_LO__SHIFT) & A5XX_TEX_CONST_4_BASE_LO__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A5XX_TEX_CONST_4_BASE_LO__SHIFT) & A5XX_TEX_CONST_4_BASE_LO__MASK;
}
#define REG_A5XX_TEX_CONST_5 0x00000005
@@ -5407,7 +5477,8 @@ static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
#define A5XX_SSBO_0_0_BASE_LO__SHIFT 5
static inline uint32_t A5XX_SSBO_0_0_BASE_LO(uint32_t val)
{
- return ((val >> 5) << A5XX_SSBO_0_0_BASE_LO__SHIFT) & A5XX_SSBO_0_0_BASE_LO__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A5XX_SSBO_0_0_BASE_LO__SHIFT) & A5XX_SSBO_0_0_BASE_LO__MASK;
}
#define REG_A5XX_SSBO_0_1 0x00000001
@@ -5423,7 +5494,8 @@ static inline uint32_t A5XX_SSBO_0_1_PITCH(uint32_t val)
#define A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
static inline uint32_t A5XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 12) << A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A5XX_SSBO_0_2_ARRAY_PITCH__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A5XX_SSBO_0_2_ARRAY_PITCH__MASK;
}
#define REG_A5XX_SSBO_0_3 0x00000003
@@ -5494,5 +5566,7 @@ static inline uint32_t A5XX_UBO_1_BASE_HI(uint32_t val)
return ((val) << A5XX_UBO_1_BASE_HI__SHIFT) & A5XX_UBO_1_BASE_HI__MASK;
}
+#ifdef __cplusplus
+#endif
#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index 863b5e3b0e67..92e23bf2458d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -3,28 +3,20 @@
/* Autogenerated file, DO NOT EDIT manually!
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
-
-Copyright (C) 2013-2023 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 243381 bytes, from Sat Feb 24 09:06:40 2024)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85856 bytes, from Fri Feb 23 13:07:00 2024)
+
+Copyright (C) 2013-2024 by the following authors:
+- Rob Clark <robdclark@gmail.com> Rob Clark
+- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -45,8 +37,21 @@ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
*/
+#ifdef __KERNEL__
+#include <linux/bug.h>
+#define assert(x) BUG_ON(!(x))
+#else
+#include <assert.h>
+#endif
+
+#ifdef __cplusplus
+#define __struct_cast(X)
+#else
+#define __struct_cast(X) (struct X)
+#endif
enum a6xx_tile_mode {
TILE6_LINEAR = 0,
@@ -246,6 +251,85 @@ enum a6xx_shader_id {
A6XX_HLSQ_INST_RAM_1 = 115,
};
+enum a7xx_statetype_id {
+ A7XX_TP0_NCTX_REG = 0,
+ A7XX_TP0_CTX0_3D_CVS_REG = 1,
+ A7XX_TP0_CTX0_3D_CPS_REG = 2,
+ A7XX_TP0_CTX1_3D_CVS_REG = 3,
+ A7XX_TP0_CTX1_3D_CPS_REG = 4,
+ A7XX_TP0_CTX2_3D_CPS_REG = 5,
+ A7XX_TP0_CTX3_3D_CPS_REG = 6,
+ A7XX_TP0_TMO_DATA = 9,
+ A7XX_TP0_SMO_DATA = 10,
+ A7XX_TP0_MIPMAP_BASE_DATA = 11,
+ A7XX_SP_NCTX_REG = 32,
+ A7XX_SP_CTX0_3D_CVS_REG = 33,
+ A7XX_SP_CTX0_3D_CPS_REG = 34,
+ A7XX_SP_CTX1_3D_CVS_REG = 35,
+ A7XX_SP_CTX1_3D_CPS_REG = 36,
+ A7XX_SP_CTX2_3D_CPS_REG = 37,
+ A7XX_SP_CTX3_3D_CPS_REG = 38,
+ A7XX_SP_INST_DATA = 39,
+ A7XX_SP_INST_DATA_1 = 40,
+ A7XX_SP_LB_0_DATA = 41,
+ A7XX_SP_LB_1_DATA = 42,
+ A7XX_SP_LB_2_DATA = 43,
+ A7XX_SP_LB_3_DATA = 44,
+ A7XX_SP_LB_4_DATA = 45,
+ A7XX_SP_LB_5_DATA = 46,
+ A7XX_SP_LB_6_DATA = 47,
+ A7XX_SP_LB_7_DATA = 48,
+ A7XX_SP_CB_RAM = 49,
+ A7XX_SP_LB_13_DATA = 50,
+ A7XX_SP_LB_14_DATA = 51,
+ A7XX_SP_INST_TAG = 52,
+ A7XX_SP_INST_DATA_2 = 53,
+ A7XX_SP_TMO_TAG = 54,
+ A7XX_SP_SMO_TAG = 55,
+ A7XX_SP_STATE_DATA = 56,
+ A7XX_SP_HWAVE_RAM = 57,
+ A7XX_SP_L0_INST_BUF = 58,
+ A7XX_SP_LB_8_DATA = 59,
+ A7XX_SP_LB_9_DATA = 60,
+ A7XX_SP_LB_10_DATA = 61,
+ A7XX_SP_LB_11_DATA = 62,
+ A7XX_SP_LB_12_DATA = 63,
+ A7XX_HLSQ_DATAPATH_DSTR_META = 64,
+ A7XX_HLSQ_L2STC_TAG_RAM = 67,
+ A7XX_HLSQ_L2STC_INFO_CMD = 68,
+ A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG = 69,
+ A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG = 70,
+ A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM = 71,
+ A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM = 72,
+ A7XX_HLSQ_CHUNK_CVS_RAM = 73,
+ A7XX_HLSQ_CHUNK_CPS_RAM = 74,
+ A7XX_HLSQ_CHUNK_CVS_RAM_TAG = 75,
+ A7XX_HLSQ_CHUNK_CPS_RAM_TAG = 76,
+ A7XX_HLSQ_ICB_CVS_CB_BASE_TAG = 77,
+ A7XX_HLSQ_ICB_CPS_CB_BASE_TAG = 78,
+ A7XX_HLSQ_CVS_MISC_RAM = 79,
+ A7XX_HLSQ_CPS_MISC_RAM = 80,
+ A7XX_HLSQ_CPS_MISC_RAM_1 = 81,
+ A7XX_HLSQ_INST_RAM = 82,
+ A7XX_HLSQ_GFX_CVS_CONST_RAM = 83,
+ A7XX_HLSQ_GFX_CPS_CONST_RAM = 84,
+ A7XX_HLSQ_CVS_MISC_RAM_TAG = 85,
+ A7XX_HLSQ_CPS_MISC_RAM_TAG = 86,
+ A7XX_HLSQ_INST_RAM_TAG = 87,
+ A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 88,
+ A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 89,
+ A7XX_HLSQ_GFX_LOCAL_MISC_RAM = 90,
+ A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG = 91,
+ A7XX_HLSQ_INST_RAM_1 = 92,
+ A7XX_HLSQ_STPROC_META = 93,
+ A7XX_HLSQ_BV_BE_META = 94,
+ A7XX_HLSQ_INST_RAM_2 = 95,
+ A7XX_HLSQ_DATAPATH_META = 96,
+ A7XX_HLSQ_FRONTEND_META = 97,
+ A7XX_HLSQ_INDIRECT_META = 98,
+ A7XX_HLSQ_BACKEND_META = 99,
+};
+
enum a6xx_debugbus_id {
A6XX_DBGBUS_CP = 1,
A6XX_DBGBUS_RBBM = 2,
@@ -305,6 +389,140 @@ enum a6xx_debugbus_id {
A6XX_DBGBUS_SPTP_5 = 93,
};
+enum a7xx_state_location {
+ A7XX_HLSQ_STATE = 0,
+ A7XX_HLSQ_DP = 1,
+ A7XX_SP_TOP = 2,
+ A7XX_USPTP = 3,
+};
+
+enum a7xx_pipe {
+ A7XX_PIPE_NONE = 0,
+ A7XX_PIPE_BR = 1,
+ A7XX_PIPE_BV = 2,
+ A7XX_PIPE_LPAC = 3,
+};
+
+enum a7xx_cluster {
+ A7XX_CLUSTER_NONE = 0,
+ A7XX_CLUSTER_FE = 1,
+ A7XX_CLUSTER_SP_VS = 2,
+ A7XX_CLUSTER_PC_VS = 3,
+ A7XX_CLUSTER_GRAS = 4,
+ A7XX_CLUSTER_SP_PS = 5,
+ A7XX_CLUSTER_VPC_PS = 6,
+ A7XX_CLUSTER_PS = 7,
+};
+
+enum a7xx_debugbus_id {
+ A7XX_DBGBUS_CP_0_0 = 1,
+ A7XX_DBGBUS_CP_0_1 = 2,
+ A7XX_DBGBUS_RBBM = 3,
+ A7XX_DBGBUS_GBIF_GX = 5,
+ A7XX_DBGBUS_GBIF_CX = 6,
+ A7XX_DBGBUS_HLSQ = 7,
+ A7XX_DBGBUS_UCHE_0 = 9,
+ A7XX_DBGBUS_UCHE_1 = 10,
+ A7XX_DBGBUS_TESS_BR = 13,
+ A7XX_DBGBUS_TESS_BV = 14,
+ A7XX_DBGBUS_PC_BR = 17,
+ A7XX_DBGBUS_PC_BV = 18,
+ A7XX_DBGBUS_VFDP_BR = 21,
+ A7XX_DBGBUS_VFDP_BV = 22,
+ A7XX_DBGBUS_VPC_BR = 25,
+ A7XX_DBGBUS_VPC_BV = 26,
+ A7XX_DBGBUS_TSE_BR = 29,
+ A7XX_DBGBUS_TSE_BV = 30,
+ A7XX_DBGBUS_RAS_BR = 33,
+ A7XX_DBGBUS_RAS_BV = 34,
+ A7XX_DBGBUS_VSC = 37,
+ A7XX_DBGBUS_COM_0 = 39,
+ A7XX_DBGBUS_LRZ_BR = 43,
+ A7XX_DBGBUS_LRZ_BV = 44,
+ A7XX_DBGBUS_UFC_0 = 47,
+ A7XX_DBGBUS_UFC_1 = 48,
+ A7XX_DBGBUS_GMU_GX = 55,
+ A7XX_DBGBUS_DBGC = 59,
+ A7XX_DBGBUS_CX = 60,
+ A7XX_DBGBUS_GMU_CX = 61,
+ A7XX_DBGBUS_GPC_BR = 62,
+ A7XX_DBGBUS_GPC_BV = 63,
+ A7XX_DBGBUS_LARC = 66,
+ A7XX_DBGBUS_HLSQ_SPTP = 68,
+ A7XX_DBGBUS_RB_0 = 70,
+ A7XX_DBGBUS_RB_1 = 71,
+ A7XX_DBGBUS_RB_2 = 72,
+ A7XX_DBGBUS_RB_3 = 73,
+ A7XX_DBGBUS_RB_4 = 74,
+ A7XX_DBGBUS_RB_5 = 75,
+ A7XX_DBGBUS_UCHE_WRAPPER = 102,
+ A7XX_DBGBUS_CCU_0 = 106,
+ A7XX_DBGBUS_CCU_1 = 107,
+ A7XX_DBGBUS_CCU_2 = 108,
+ A7XX_DBGBUS_CCU_3 = 109,
+ A7XX_DBGBUS_CCU_4 = 110,
+ A7XX_DBGBUS_CCU_5 = 111,
+ A7XX_DBGBUS_VFD_BR_0 = 138,
+ A7XX_DBGBUS_VFD_BR_1 = 139,
+ A7XX_DBGBUS_VFD_BR_2 = 140,
+ A7XX_DBGBUS_VFD_BR_3 = 141,
+ A7XX_DBGBUS_VFD_BR_4 = 142,
+ A7XX_DBGBUS_VFD_BR_5 = 143,
+ A7XX_DBGBUS_VFD_BR_6 = 144,
+ A7XX_DBGBUS_VFD_BR_7 = 145,
+ A7XX_DBGBUS_VFD_BV_0 = 202,
+ A7XX_DBGBUS_VFD_BV_1 = 203,
+ A7XX_DBGBUS_VFD_BV_2 = 204,
+ A7XX_DBGBUS_VFD_BV_3 = 205,
+ A7XX_DBGBUS_USP_0 = 234,
+ A7XX_DBGBUS_USP_1 = 235,
+ A7XX_DBGBUS_USP_2 = 236,
+ A7XX_DBGBUS_USP_3 = 237,
+ A7XX_DBGBUS_USP_4 = 238,
+ A7XX_DBGBUS_USP_5 = 239,
+ A7XX_DBGBUS_TP_0 = 266,
+ A7XX_DBGBUS_TP_1 = 267,
+ A7XX_DBGBUS_TP_2 = 268,
+ A7XX_DBGBUS_TP_3 = 269,
+ A7XX_DBGBUS_TP_4 = 270,
+ A7XX_DBGBUS_TP_5 = 271,
+ A7XX_DBGBUS_TP_6 = 272,
+ A7XX_DBGBUS_TP_7 = 273,
+ A7XX_DBGBUS_TP_8 = 274,
+ A7XX_DBGBUS_TP_9 = 275,
+ A7XX_DBGBUS_TP_10 = 276,
+ A7XX_DBGBUS_TP_11 = 277,
+ A7XX_DBGBUS_USPTP_0 = 330,
+ A7XX_DBGBUS_USPTP_1 = 331,
+ A7XX_DBGBUS_USPTP_2 = 332,
+ A7XX_DBGBUS_USPTP_3 = 333,
+ A7XX_DBGBUS_USPTP_4 = 334,
+ A7XX_DBGBUS_USPTP_5 = 335,
+ A7XX_DBGBUS_USPTP_6 = 336,
+ A7XX_DBGBUS_USPTP_7 = 337,
+ A7XX_DBGBUS_USPTP_8 = 338,
+ A7XX_DBGBUS_USPTP_9 = 339,
+ A7XX_DBGBUS_USPTP_10 = 340,
+ A7XX_DBGBUS_USPTP_11 = 341,
+ A7XX_DBGBUS_CCHE_0 = 396,
+ A7XX_DBGBUS_CCHE_1 = 397,
+ A7XX_DBGBUS_CCHE_2 = 398,
+ A7XX_DBGBUS_VPC_DSTR_0 = 408,
+ A7XX_DBGBUS_VPC_DSTR_1 = 409,
+ A7XX_DBGBUS_VPC_DSTR_2 = 410,
+ A7XX_DBGBUS_HLSQ_DP_STR_0 = 411,
+ A7XX_DBGBUS_HLSQ_DP_STR_1 = 412,
+ A7XX_DBGBUS_HLSQ_DP_STR_2 = 413,
+ A7XX_DBGBUS_HLSQ_DP_STR_3 = 414,
+ A7XX_DBGBUS_HLSQ_DP_STR_4 = 415,
+ A7XX_DBGBUS_HLSQ_DP_STR_5 = 416,
+ A7XX_DBGBUS_UFC_DSTR_0 = 443,
+ A7XX_DBGBUS_UFC_DSTR_1 = 444,
+ A7XX_DBGBUS_UFC_DSTR_2 = 445,
+ A7XX_DBGBUS_CGC_SUBCORE = 446,
+ A7XX_DBGBUS_CGC_CORE = 447,
+};
+
enum a6xx_cp_perfcounter_select {
PERF_CP_ALWAYS_COUNT = 0,
PERF_CP_BUSY_GFX_CORE_IDLE = 1,
@@ -914,6 +1132,19 @@ enum a6xx_ztest_mode {
A6XX_INVALID_ZTEST = 3,
};
+enum a6xx_tess_spacing {
+ TESS_EQUAL = 0,
+ TESS_FRACTIONAL_ODD = 2,
+ TESS_FRACTIONAL_EVEN = 3,
+};
+
+enum a6xx_tess_output {
+ TESS_POINTS = 0,
+ TESS_LINES = 1,
+ TESS_CW_TRIS = 2,
+ TESS_CCW_TRIS = 3,
+};
+
enum a6xx_sequenced_thread_dist {
DIST_SCREEN_COORD = 0,
DIST_ALL_TO_RB0 = 1,
@@ -967,17 +1198,25 @@ enum a6xx_rotation {
ROTATE_VFLIP = 5,
};
-enum a6xx_tess_spacing {
- TESS_EQUAL = 0,
- TESS_FRACTIONAL_ODD = 2,
- TESS_FRACTIONAL_EVEN = 3,
+enum a6xx_ccu_cache_size {
+ CCU_CACHE_SIZE_FULL = 0,
+ CCU_CACHE_SIZE_HALF = 1,
+ CCU_CACHE_SIZE_QUARTER = 2,
+ CCU_CACHE_SIZE_EIGHTH = 3,
};
-enum a6xx_tess_output {
- TESS_POINTS = 0,
- TESS_LINES = 1,
- TESS_CW_TRIS = 2,
- TESS_CCW_TRIS = 3,
+enum a6xx_varying_interp_mode {
+ INTERP_SMOOTH = 0,
+ INTERP_FLAT = 1,
+ INTERP_ZERO = 2,
+ INTERP_ONE = 3,
+};
+
+enum a6xx_varying_ps_repl_mode {
+ PS_REPL_NONE = 0,
+ PS_REPL_S = 1,
+ PS_REPL_T = 2,
+ PS_REPL_ONE_MINUS_T = 3,
};
enum a6xx_threadsize {
@@ -991,9 +1230,17 @@ enum a6xx_bindless_descriptor_size {
};
enum a6xx_isam_mode {
+ ISAMMODE_CL = 1,
ISAMMODE_GL = 2,
};
+enum a7xx_cs_yalign {
+ CS_YALIGN_1 = 8,
+ CS_YALIGN_2 = 4,
+ CS_YALIGN_4 = 2,
+ CS_YALIGN_8 = 1,
+};
+
enum a6xx_tex_filter {
A6XX_TEX_NEAREST = 0,
A6XX_TEX_LINEAR = 1,
@@ -1069,6 +1316,7 @@ enum a6xx_tex_type {
#define A6XX_RBBM_INT_0_MASK_TSBWRITEERROR 0x10000000
#define A6XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
#define A6XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+
#define A6XX_CP_INT_CP_OPCODE_ERROR 0x00000001
#define A6XX_CP_INT_CP_UCODE_ERROR 0x00000002
#define A6XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
@@ -1086,6 +1334,7 @@ enum a6xx_tex_type {
#define A6XX_CP_INT_CP_HW_FAULT_ERROR_BV 0x00008000
#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR_BV 0x00010000
#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR_BV 0x00020000
+
#define REG_A6XX_CP_RB_BASE 0x00000800
#define REG_A6XX_CP_RB_CNTL 0x00000802
@@ -1104,7 +1353,6 @@ enum a6xx_tex_type {
#define REG_A6XX_CP_HW_FAULT 0x00000821
#define REG_A6XX_CP_INTERRUPT_STATUS 0x00000823
-
#define REG_A6XX_CP_PROTECT_STATUS 0x00000824
#define REG_A6XX_CP_STATUS_1 0x00000825
@@ -1128,25 +1376,29 @@ enum a6xx_tex_type {
#define A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__SHIFT 0
static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_MRB_START(uint32_t val)
{
- return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__MASK;
}
#define A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__MASK 0x0000ff00
#define A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__SHIFT 8
static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_VSD_START(uint32_t val)
{
- return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__MASK;
}
#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK 0x00ff0000
#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT 16
static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB1_START(uint32_t val)
{
- return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK;
}
#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK 0xff000000
#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT 24
static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB2_START(uint32_t val)
{
- return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK;
}
#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
@@ -1154,13 +1406,15 @@ static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB2_START(uint32_t val)
#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT 0
static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_SDS_START(uint32_t val)
{
- return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK;
}
#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK 0xffff0000
#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT 16
static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE(uint32_t val)
{
- return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK;
}
#define REG_A6XX_CP_MEM_POOL_SIZE 0x000008c3
@@ -1176,11 +1430,11 @@ static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE(uint32_t val)
#define A6XX_CP_PROTECT_CNTL_ACCESS_FAULT_ON_VIOL_EN 0x00000002
#define A6XX_CP_PROTECT_CNTL_ACCESS_PROT_EN 0x00000001
-static inline uint32_t REG_A6XX_CP_SCRATCH(uint32_t i0) { return 0x00000883 + 0x1*i0; }
+#define REG_A6XX_CP_SCRATCH(i0) (0x00000883 + 0x1*(i0))
static inline uint32_t REG_A6XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000883 + 0x1*i0; }
-static inline uint32_t REG_A6XX_CP_PROTECT(uint32_t i0) { return 0x00000850 + 0x1*i0; }
+#define REG_A6XX_CP_PROTECT(i0) (0x00000850 + 0x1*(i0))
static inline uint32_t REG_A6XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000850 + 0x1*i0; }
#define A6XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0003ffff
@@ -1209,9 +1463,9 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define REG_A7XX_CP_CONTEXT_SWITCH_LEVEL_STATUS 0x000008ab
-static inline uint32_t REG_A6XX_CP_PERFCTR_CP_SEL(uint32_t i0) { return 0x000008d0 + 0x1*i0; }
+#define REG_A6XX_CP_PERFCTR_CP_SEL(i0) (0x000008d0 + 0x1*(i0))
-static inline uint32_t REG_A7XX_CP_BV_PERFCTR_CP_SEL(uint32_t i0) { return 0x000008e0 + 0x1*i0; }
+#define REG_A7XX_CP_BV_PERFCTR_CP_SEL(i0) (0x000008e0 + 0x1*(i0))
#define REG_A6XX_CP_CRASH_SCRIPT_BASE 0x00000900
@@ -1405,8 +1659,48 @@ static inline uint32_t A6XX_CP_ROQ_AVAIL_VSD_REM(uint32_t val)
#define REG_A6XX_CP_APERTURE_CNTL_HOST 0x00000a00
+#define REG_A7XX_CP_APERTURE_CNTL_HOST 0x00000a00
+#define A7XX_CP_APERTURE_CNTL_HOST_PIPE__MASK 0x00003000
+#define A7XX_CP_APERTURE_CNTL_HOST_PIPE__SHIFT 12
+static inline uint32_t A7XX_CP_APERTURE_CNTL_HOST_PIPE(enum a7xx_pipe val)
+{
+ return ((val) << A7XX_CP_APERTURE_CNTL_HOST_PIPE__SHIFT) & A7XX_CP_APERTURE_CNTL_HOST_PIPE__MASK;
+}
+#define A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__MASK 0x00000700
+#define A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__SHIFT 8
+static inline uint32_t A7XX_CP_APERTURE_CNTL_HOST_CLUSTER(enum a7xx_cluster val)
+{
+ return ((val) << A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__SHIFT) & A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__MASK;
+}
+#define A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__MASK 0x00000030
+#define A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__SHIFT 4
+static inline uint32_t A7XX_CP_APERTURE_CNTL_HOST_CONTEXT(uint32_t val)
+{
+ return ((val) << A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__SHIFT) & A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__MASK;
+}
+
#define REG_A6XX_CP_APERTURE_CNTL_CD 0x00000a03
+#define REG_A7XX_CP_APERTURE_CNTL_CD 0x00000a03
+#define A7XX_CP_APERTURE_CNTL_CD_PIPE__MASK 0x00003000
+#define A7XX_CP_APERTURE_CNTL_CD_PIPE__SHIFT 12
+static inline uint32_t A7XX_CP_APERTURE_CNTL_CD_PIPE(enum a7xx_pipe val)
+{
+ return ((val) << A7XX_CP_APERTURE_CNTL_CD_PIPE__SHIFT) & A7XX_CP_APERTURE_CNTL_CD_PIPE__MASK;
+}
+#define A7XX_CP_APERTURE_CNTL_CD_CLUSTER__MASK 0x00000700
+#define A7XX_CP_APERTURE_CNTL_CD_CLUSTER__SHIFT 8
+static inline uint32_t A7XX_CP_APERTURE_CNTL_CD_CLUSTER(enum a7xx_cluster val)
+{
+ return ((val) << A7XX_CP_APERTURE_CNTL_CD_CLUSTER__SHIFT) & A7XX_CP_APERTURE_CNTL_CD_CLUSTER__MASK;
+}
+#define A7XX_CP_APERTURE_CNTL_CD_CONTEXT__MASK 0x00000030
+#define A7XX_CP_APERTURE_CNTL_CD_CONTEXT__SHIFT 4
+static inline uint32_t A7XX_CP_APERTURE_CNTL_CD_CONTEXT(uint32_t val)
+{
+ return ((val) << A7XX_CP_APERTURE_CNTL_CD_CONTEXT__SHIFT) & A7XX_CP_APERTURE_CNTL_CD_CONTEXT__MASK;
+}
+
#define REG_A7XX_CP_BV_PROTECT_STATUS 0x00000a61
#define REG_A7XX_CP_BV_HW_FAULT 0x00000a64
@@ -1472,7 +1766,6 @@ static inline uint32_t A6XX_CP_ROQ_AVAIL_VSD_REM(uint32_t val)
#define REG_A6XX_RBBM_GPR0_CNTL 0x00000018
#define REG_A6XX_RBBM_INT_0_STATUS 0x00000201
-
#define REG_A6XX_RBBM_STATUS 0x00000210
#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x00800000
#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x00400000
@@ -1520,93 +1813,93 @@ static inline uint32_t A6XX_CP_ROQ_AVAIL_VSD_REM(uint32_t val)
#define REG_A7XX_RBBM_CLOCK_MODE_BV_GPC 0x00000288
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_CP(uint32_t i0) { return 0x00000400 + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_CP(i0) (0x00000400 + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM(uint32_t i0) { return 0x0000041c + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_RBBM(i0) (0x0000041c + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_PC(uint32_t i0) { return 0x00000424 + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_PC(i0) (0x00000424 + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_VFD(uint32_t i0) { return 0x00000434 + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_VFD(i0) (0x00000434 + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_HLSQ(uint32_t i0) { return 0x00000444 + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_HLSQ(i0) (0x00000444 + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_VPC(uint32_t i0) { return 0x00000450 + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_VPC(i0) (0x00000450 + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_CCU(uint32_t i0) { return 0x0000045c + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_CCU(i0) (0x0000045c + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_TSE(uint32_t i0) { return 0x00000466 + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_TSE(i0) (0x00000466 + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_RAS(uint32_t i0) { return 0x0000046e + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_RAS(i0) (0x0000046e + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_UCHE(uint32_t i0) { return 0x00000476 + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_UCHE(i0) (0x00000476 + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_TP(uint32_t i0) { return 0x0000048e + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_TP(i0) (0x0000048e + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_SP(uint32_t i0) { return 0x000004a6 + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_SP(i0) (0x000004a6 + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_RB(uint32_t i0) { return 0x000004d6 + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_RB(i0) (0x000004d6 + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_VSC(uint32_t i0) { return 0x000004e6 + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_VSC(i0) (0x000004e6 + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_LRZ(uint32_t i0) { return 0x000004ea + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_LRZ(i0) (0x000004ea + 0x2*(i0))
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_CMP(uint32_t i0) { return 0x000004f2 + 0x2*i0; }
+#define REG_A6XX_RBBM_PERFCTR_CMP(i0) (0x000004f2 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_CP(uint32_t i0) { return 0x00000300 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_CP(i0) (0x00000300 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_RBBM(uint32_t i0) { return 0x0000031c + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_RBBM(i0) (0x0000031c + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_PC(uint32_t i0) { return 0x00000324 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_PC(i0) (0x00000324 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_VFD(uint32_t i0) { return 0x00000334 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_VFD(i0) (0x00000334 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_HLSQ(uint32_t i0) { return 0x00000344 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_HLSQ(i0) (0x00000344 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_VPC(uint32_t i0) { return 0x00000350 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_VPC(i0) (0x00000350 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_CCU(uint32_t i0) { return 0x0000035c + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_CCU(i0) (0x0000035c + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_TSE(uint32_t i0) { return 0x00000366 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_TSE(i0) (0x00000366 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_RAS(uint32_t i0) { return 0x0000036e + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_RAS(i0) (0x0000036e + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_UCHE(uint32_t i0) { return 0x00000376 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_UCHE(i0) (0x00000376 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_TP(uint32_t i0) { return 0x0000038e + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_TP(i0) (0x0000038e + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_SP(uint32_t i0) { return 0x000003a6 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_SP(i0) (0x000003a6 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_RB(uint32_t i0) { return 0x000003d6 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_RB(i0) (0x000003d6 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_VSC(uint32_t i0) { return 0x000003e6 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_VSC(i0) (0x000003e6 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_LRZ(uint32_t i0) { return 0x000003ea + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_LRZ(i0) (0x000003ea + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_CMP(uint32_t i0) { return 0x000003f2 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_CMP(i0) (0x000003f2 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_UFC(uint32_t i0) { return 0x000003fa + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_UFC(i0) (0x000003fa + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR2_HLSQ(uint32_t i0) { return 0x00000410 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR2_HLSQ(i0) (0x00000410 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR2_CP(uint32_t i0) { return 0x0000041c + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR2_CP(i0) (0x0000041c + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR2_SP(uint32_t i0) { return 0x0000042a + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR2_SP(i0) (0x0000042a + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR2_TP(uint32_t i0) { return 0x00000442 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR2_TP(i0) (0x00000442 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR2_UFC(uint32_t i0) { return 0x0000044e + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR2_UFC(i0) (0x0000044e + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_PC(uint32_t i0) { return 0x00000460 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_BV_PC(i0) (0x00000460 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_VFD(uint32_t i0) { return 0x00000470 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_BV_VFD(i0) (0x00000470 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_VPC(uint32_t i0) { return 0x00000480 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_BV_VPC(i0) (0x00000480 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_TSE(uint32_t i0) { return 0x0000048c + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_BV_TSE(i0) (0x0000048c + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_RAS(uint32_t i0) { return 0x00000494 + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_BV_RAS(i0) (0x00000494 + 0x2*(i0))
-static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_LRZ(uint32_t i0) { return 0x0000049c + 0x2*i0; }
+#define REG_A7XX_RBBM_PERFCTR_BV_LRZ(i0) (0x0000049c + 0x2*(i0))
#define REG_A6XX_RBBM_PERFCTR_CNTL 0x00000500
@@ -1622,7 +1915,7 @@ static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_LRZ(uint32_t i0) { return 0x0000
#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000506
-static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00000507 + 0x1*i0; }
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL(i0) (0x00000507 + 0x1*(i0))
#define REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000050b
@@ -1710,9 +2003,7 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00
#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
-
#define REG_A6XX_RBBM_INT_0_MASK 0x00000038
-
#define REG_A7XX_RBBM_INT_2_MASK 0x0000003a
#define REG_A6XX_RBBM_SP_HYST_CNT 0x00000042
@@ -1725,6 +2016,8 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00
#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+#define REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL 0x000000ad
+
#define REG_A6XX_RBBM_CLOCK_CNTL 0x000000ae
#define REG_A6XX_RBBM_CLOCK_CNTL_SP0 0x000000b0
@@ -1939,12 +2232,37 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00
#define REG_A6XX_RBBM_CLOCK_HYST_HLSQ 0x0000011d
+#define REG_A7XX_RBBM_CGC_GLOBAL_LOAD_CMD 0x0000011e
+
+#define REG_A7XX_RBBM_CGC_P2S_TRIG_CMD 0x0000011f
+
#define REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE 0x00000120
#define REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE 0x00000121
#define REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE 0x00000122
+#define REG_A7XX_RBBM_CGC_P2S_STATUS 0x00000122
+#define A7XX_RBBM_CGC_P2S_STATUS_TXDONE 0x00000001
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_FCHE 0x00000123
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_FCHE 0x00000124
+
+#define REG_A6XX_RBBM_CLOCK_HYST_FCHE 0x00000125
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_MHUB 0x00000126
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_MHUB 0x00000127
+
+#define REG_A6XX_RBBM_CLOCK_HYST_MHUB 0x00000128
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_GLC 0x00000129
+
+#define REG_A6XX_RBBM_CLOCK_HYST_GLC 0x0000012a
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_GLC 0x0000012b
+
#define REG_A7XX_RBBM_CLOCK_HYST2_VFD 0x0000012f
#define REG_A6XX_RBBM_LPAC_GBIF_CLIENT_QOS_CNTL 0x000005ff
@@ -2117,7 +2435,10 @@ static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000630
-static inline uint32_t REG_A6XX_VSC_PERFCTR_VSC_SEL(uint32_t i0) { return 0x00000cd8 + 0x1*i0; }
+#define REG_A6XX_VSC_PERFCTR_VSC_SEL(i0) (0x00000cd8 + 0x1*(i0))
+
+#define REG_A7XX_VSC_UNKNOWN_0CD8 0x00000cd8
+#define A7XX_VSC_UNKNOWN_0CD8_BINNING 0x00000001
#define REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000c800
@@ -2149,7 +2470,7 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
return ((val) << A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT) & A6XX_UCHE_CLIENT_PF_PERFSEL__MASK;
}
-static inline uint32_t REG_A6XX_UCHE_PERFCTR_UCHE_SEL(uint32_t i0) { return 0x00000e1c + 0x1*i0; }
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL(i0) (0x00000e1c + 0x1*(i0))
#define REG_A6XX_UCHE_GBIF_GX_CONFIG 0x00000e3a
@@ -2291,13 +2612,15 @@ static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
#define A6XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
static inline uint32_t A6XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
{
- return ((val >> 5) << A6XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A6XX_VSC_BIN_SIZE_WIDTH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A6XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A6XX_VSC_BIN_SIZE_WIDTH__MASK;
}
#define A6XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001ff00
#define A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT 8
static inline uint32_t A6XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
{
- return ((val >> 4) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK;
+ assert(!(val & 0xf));
+ return (((val >> 4)) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK;
}
#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS 0x00000c03
@@ -2316,7 +2639,7 @@ static inline uint32_t A6XX_VSC_BIN_COUNT_NY(uint32_t val)
return ((val) << A6XX_VSC_BIN_COUNT_NY__SHIFT) & A6XX_VSC_BIN_COUNT_NY__MASK;
}
-static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+#define REG_A6XX_VSC_PIPE_CONFIG(i0) (0x00000c10 + 0x1*(i0))
static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
#define A6XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
@@ -2356,18 +2679,22 @@ static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
#define REG_A6XX_VSC_DRAW_STRM_LIMIT 0x00000c37
-static inline uint32_t REG_A6XX_VSC_STATE(uint32_t i0) { return 0x00000c38 + 0x1*i0; }
+#define REG_A6XX_VSC_STATE(i0) (0x00000c38 + 0x1*(i0))
static inline uint32_t REG_A6XX_VSC_STATE_REG(uint32_t i0) { return 0x00000c38 + 0x1*i0; }
-static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE(uint32_t i0) { return 0x00000c58 + 0x1*i0; }
+#define REG_A6XX_VSC_PRIM_STRM_SIZE(i0) (0x00000c58 + 0x1*(i0))
static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE_REG(uint32_t i0) { return 0x00000c58 + 0x1*i0; }
-static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+#define REG_A6XX_VSC_DRAW_STRM_SIZE(i0) (0x00000c78 + 0x1*(i0))
static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+#define REG_A7XX_UCHE_UNKNOWN_0E10 0x00000e10
+
+#define REG_A7XX_UCHE_UNKNOWN_0E11 0x00000e11
+
#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12
#define REG_A6XX_GRAS_CL_CNTL 0x00008000
@@ -2437,6 +2764,8 @@ static inline uint32_t A6XX_GRAS_CNTL_COORD_MASK(uint32_t val)
{
return ((val) << A6XX_GRAS_CNTL_COORD_MASK__SHIFT) & A6XX_GRAS_CNTL_COORD_MASK__MASK;
}
+#define A6XX_GRAS_CNTL_UNK10 0x00000400
+#define A6XX_GRAS_CNTL_UNK11 0x00000800
#define REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x00008006
#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000001ff
@@ -2452,7 +2781,19 @@ static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
}
-static inline uint32_t REG_A6XX_GRAS_CL_VPORT(uint32_t i0) { return 0x00008010 + 0x6*i0; }
+#define REG_A7XX_GRAS_UNKNOWN_8007 0x00008007
+
+#define REG_A7XX_GRAS_UNKNOWN_8008 0x00008008
+
+#define REG_A7XX_GRAS_UNKNOWN_8009 0x00008009
+
+#define REG_A7XX_GRAS_UNKNOWN_800A 0x0000800a
+
+#define REG_A7XX_GRAS_UNKNOWN_800B 0x0000800b
+
+#define REG_A7XX_GRAS_UNKNOWN_800C 0x0000800c
+
+#define REG_A6XX_GRAS_CL_VPORT(i0) (0x00008010 + 0x6*(i0))
static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XOFFSET(uint32_t i0) { return 0x00008010 + 0x6*i0; }
#define A6XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
@@ -2502,7 +2843,7 @@ static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE(float val)
return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE__MASK;
}
-static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP(uint32_t i0) { return 0x00008070 + 0x2*i0; }
+#define REG_A6XX_GRAS_CL_Z_CLAMP(i0) (0x00008070 + 0x2*(i0))
static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MIN(uint32_t i0) { return 0x00008070 + 0x2*i0; }
#define A6XX_GRAS_CL_Z_CLAMP_MIN__MASK 0xffffffff
@@ -2531,12 +2872,7 @@ static inline uint32_t A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
return ((((int32_t)(val * 4.0))) << A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
}
#define A6XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
-#define A6XX_GRAS_SU_CNTL_UNK12__MASK 0x00001000
-#define A6XX_GRAS_SU_CNTL_UNK12__SHIFT 12
-static inline uint32_t A6XX_GRAS_SU_CNTL_UNK12(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SU_CNTL_UNK12__SHIFT) & A6XX_GRAS_SU_CNTL_UNK12__MASK;
-}
+#define A6XX_GRAS_SU_CNTL_UNK12 0x00001000
#define A6XX_GRAS_SU_CNTL_LINE_MODE__MASK 0x00002000
#define A6XX_GRAS_SU_CNTL_LINE_MODE__SHIFT 13
static inline uint32_t A6XX_GRAS_SU_CNTL_LINE_MODE(enum a5xx_line_mode val)
@@ -2549,13 +2885,14 @@ static inline uint32_t A6XX_GRAS_SU_CNTL_UNK15(uint32_t val)
{
return ((val) << A6XX_GRAS_SU_CNTL_UNK15__SHIFT) & A6XX_GRAS_SU_CNTL_UNK15__MASK;
}
-#define A6XX_GRAS_SU_CNTL_UNK17 0x00020000
-#define A6XX_GRAS_SU_CNTL_MULTIVIEW_ENABLE 0x00040000
-#define A6XX_GRAS_SU_CNTL_UNK19__MASK 0x00780000
-#define A6XX_GRAS_SU_CNTL_UNK19__SHIFT 19
-static inline uint32_t A6XX_GRAS_SU_CNTL_UNK19(uint32_t val)
+#define A6XX_GRAS_SU_CNTL_MULTIVIEW_ENABLE 0x00020000
+#define A6XX_GRAS_SU_CNTL_RENDERTARGETINDEXINCR 0x00040000
+#define A6XX_GRAS_SU_CNTL_VIEWPORTINDEXINCR 0x00080000
+#define A6XX_GRAS_SU_CNTL_UNK20__MASK 0x00700000
+#define A6XX_GRAS_SU_CNTL_UNK20__SHIFT 20
+static inline uint32_t A6XX_GRAS_SU_CNTL_UNK20(uint32_t val)
{
- return ((val) << A6XX_GRAS_SU_CNTL_UNK19__SHIFT) & A6XX_GRAS_SU_CNTL_UNK19__MASK;
+ return ((val) << A6XX_GRAS_SU_CNTL_UNK20__SHIFT) & A6XX_GRAS_SU_CNTL_UNK20__MASK;
}
#define REG_A6XX_GRAS_SU_POINT_MINMAX 0x00008091
@@ -2619,12 +2956,7 @@ static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_dep
{
return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
}
-#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000008
-#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__SHIFT 3
-static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3(uint32_t val)
-{
- return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__MASK;
-}
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3 0x00000008
#define REG_A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x00008099
#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN 0x00000001
@@ -2703,13 +3035,15 @@ static inline uint32_t A6XX_GRAS_SC_CNTL_ROTATION(uint32_t val)
#define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT 0
static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val)
{
- return ((val >> 5) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK;
}
#define A6XX_GRAS_BIN_CONTROL_BINH__MASK 0x00007f00
#define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT 8
static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val)
{
- return ((val >> 4) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK;
+ assert(!(val & 0xf));
+ return (((val >> 4)) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK;
}
#define A6XX_GRAS_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000
#define A6XX_GRAS_BIN_CONTROL_RENDER_MODE__SHIFT 18
@@ -2730,12 +3064,7 @@ static inline uint32_t A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t va
{
return ((val) << A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK;
}
-#define A6XX_GRAS_BIN_CONTROL_UNK27__MASK 0x08000000
-#define A6XX_GRAS_BIN_CONTROL_UNK27__SHIFT 27
-static inline uint32_t A6XX_GRAS_BIN_CONTROL_UNK27(uint32_t val)
-{
- return ((val) << A6XX_GRAS_BIN_CONTROL_UNK27__SHIFT) & A6XX_GRAS_BIN_CONTROL_UNK27__MASK;
-}
+#define A6XX_GRAS_BIN_CONTROL_UNK27 0x08000000
#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2
#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
@@ -2744,18 +3073,8 @@ static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples va
{
return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK;
}
-#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2__MASK 0x00000004
-#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2__SHIFT 2
-static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_UNK2(uint32_t val)
-{
- return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_UNK2__MASK;
-}
-#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3__MASK 0x00000008
-#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3__SHIFT 3
-static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_UNK3(uint32_t val)
-{
- return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_UNK3__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_UNK3__MASK;
-}
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2 0x00000004
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3 0x00000008
#define REG_A6XX_GRAS_DEST_MSAA_CNTL 0x000080a3
#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
@@ -2775,49 +3094,49 @@ static inline uint32_t A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples v
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
}
#define REG_A6XX_GRAS_SAMPLE_LOCATION_1 0x000080a6
@@ -2825,54 +3144,56 @@ static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
}
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
}
+#define REG_A7XX_GRAS_UNKNOWN_80A7 0x000080a7
+
#define REG_A6XX_GRAS_UNKNOWN_80AF 0x000080af
-static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR(uint32_t i0) { return 0x000080b0 + 0x2*i0; }
+#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR(i0) (0x000080b0 + 0x2*(i0))
static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL(uint32_t i0) { return 0x000080b0 + 0x2*i0; }
#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x0000ffff
@@ -2902,7 +3223,7 @@ static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
}
-static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR(uint32_t i0) { return 0x000080d0 + 0x2*i0; }
+#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR(i0) (0x000080d0 + 0x2*(i0))
static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL(uint32_t i0) { return 0x000080d0 + 0x2*i0; }
#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK 0x0000ffff
@@ -2960,6 +3281,18 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
}
+#define REG_A7XX_GRAS_UNKNOWN_80F4 0x000080f4
+
+#define REG_A7XX_GRAS_UNKNOWN_80F5 0x000080f5
+
+#define REG_A7XX_GRAS_UNKNOWN_80F6 0x000080f6
+
+#define REG_A7XX_GRAS_UNKNOWN_80F8 0x000080f8
+
+#define REG_A7XX_GRAS_UNKNOWN_80F9 0x000080f9
+
+#define REG_A7XX_GRAS_UNKNOWN_80FA 0x000080fa
+
#define REG_A6XX_GRAS_LRZ_CNTL 0x00008100
#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
@@ -2975,6 +3308,12 @@ static inline uint32_t A6XX_GRAS_LRZ_CNTL_DIR(enum a6xx_lrz_dir_status val)
}
#define A6XX_GRAS_LRZ_CNTL_DIR_WRITE 0x00000100
#define A6XX_GRAS_LRZ_CNTL_DISABLE_ON_WRONG_DIR 0x00000200
+#define A6XX_GRAS_LRZ_CNTL_Z_FUNC__MASK 0x00003800
+#define A6XX_GRAS_LRZ_CNTL_Z_FUNC__SHIFT 11
+static inline uint32_t A6XX_GRAS_LRZ_CNTL_Z_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_GRAS_LRZ_CNTL_Z_FUNC__SHIFT) & A6XX_GRAS_LRZ_CNTL_Z_FUNC__MASK;
+}
#define REG_A6XX_GRAS_LRZ_PS_INPUT_CNTL 0x00008101
#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_SAMPLEID 0x00000001
@@ -2994,34 +3333,24 @@ static inline uint32_t A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT(enum a6xx_forma
}
#define REG_A6XX_GRAS_LRZ_BUFFER_BASE 0x00008103
-#define A6XX_GRAS_LRZ_BUFFER_BASE__MASK 0xffffffff
-#define A6XX_GRAS_LRZ_BUFFER_BASE__SHIFT 0
-static inline uint32_t A6XX_GRAS_LRZ_BUFFER_BASE(uint32_t val)
-{
- return ((val) << A6XX_GRAS_LRZ_BUFFER_BASE__SHIFT) & A6XX_GRAS_LRZ_BUFFER_BASE__MASK;
-}
#define REG_A6XX_GRAS_LRZ_BUFFER_PITCH 0x00008105
#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK 0x000000ff
#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT 0
static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(uint32_t val)
{
- return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK;
}
#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffffc00
#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT 10
static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 4) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
+ assert(!(val & 0xf));
+ return (((val >> 4)) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
}
#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE 0x00008106
-#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__MASK 0xffffffff
-#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__SHIFT 0
-static inline uint32_t A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(uint32_t val)
-{
- return ((val) << A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__SHIFT) & A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__MASK;
-}
#define REG_A6XX_GRAS_SAMPLE_CNTL 0x00008109
#define A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001
@@ -3046,8 +3375,24 @@ static inline uint32_t A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL(uint32_t val)
return ((val) << A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__SHIFT) & A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__MASK;
}
+#define REG_A7XX_GRAS_UNKNOWN_810B 0x0000810b
+
#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110
+#define REG_A7XX_GRAS_LRZ_CLEAR_DEPTH_F32 0x00008111
+#define A7XX_GRAS_LRZ_CLEAR_DEPTH_F32__MASK 0xffffffff
+#define A7XX_GRAS_LRZ_CLEAR_DEPTH_F32__SHIFT 0
+static inline uint32_t A7XX_GRAS_LRZ_CLEAR_DEPTH_F32(float val)
+{
+ return ((fui(val)) << A7XX_GRAS_LRZ_CLEAR_DEPTH_F32__SHIFT) & A7XX_GRAS_LRZ_CLEAR_DEPTH_F32__MASK;
+}
+
+#define REG_A7XX_GRAS_UNKNOWN_8113 0x00008113
+
+#define REG_A7XX_GRAS_UNKNOWN_8120 0x00008120
+
+#define REG_A7XX_GRAS_UNKNOWN_8121 0x00008121
+
#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK 0x00000007
#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT 0
@@ -3095,14 +3440,39 @@ static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE(enum a6xx_raster_mode
{
return ((val) << A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__MASK;
}
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK30 0x40000000
#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
+#define A6XX_GRAS_2D_SRC_TL_X__MASK 0x01ffff00
+#define A6XX_GRAS_2D_SRC_TL_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_TL_X(int32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_TL_X__SHIFT) & A6XX_GRAS_2D_SRC_TL_X__MASK;
+}
#define REG_A6XX_GRAS_2D_SRC_BR_X 0x00008402
+#define A6XX_GRAS_2D_SRC_BR_X__MASK 0x01ffff00
+#define A6XX_GRAS_2D_SRC_BR_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_BR_X(int32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_BR_X__SHIFT) & A6XX_GRAS_2D_SRC_BR_X__MASK;
+}
#define REG_A6XX_GRAS_2D_SRC_TL_Y 0x00008403
+#define A6XX_GRAS_2D_SRC_TL_Y__MASK 0x01ffff00
+#define A6XX_GRAS_2D_SRC_TL_Y__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_TL_Y(int32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_TL_Y__SHIFT) & A6XX_GRAS_2D_SRC_TL_Y__MASK;
+}
#define REG_A6XX_GRAS_2D_SRC_BR_Y 0x00008404
+#define A6XX_GRAS_2D_SRC_BR_Y__MASK 0x01ffff00
+#define A6XX_GRAS_2D_SRC_BR_Y__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_BR_Y(int32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_BR_Y__SHIFT) & A6XX_GRAS_2D_SRC_BR_Y__MASK;
+}
#define REG_A6XX_GRAS_2D_DST_TL 0x00008405
#define A6XX_GRAS_2D_DST_TL_X__MASK 0x00003fff
@@ -3174,24 +3544,26 @@ static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_Y(uint32_t val)
#define REG_A7XX_GRAS_NC_MODE_CNTL 0x00008602
-static inline uint32_t REG_A6XX_GRAS_PERFCTR_TSE_SEL(uint32_t i0) { return 0x00008610 + 0x1*i0; }
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL(i0) (0x00008610 + 0x1*(i0))
-static inline uint32_t REG_A6XX_GRAS_PERFCTR_RAS_SEL(uint32_t i0) { return 0x00008614 + 0x1*i0; }
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL(i0) (0x00008614 + 0x1*(i0))
-static inline uint32_t REG_A6XX_GRAS_PERFCTR_LRZ_SEL(uint32_t i0) { return 0x00008618 + 0x1*i0; }
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL(i0) (0x00008618 + 0x1*(i0))
#define REG_A6XX_RB_BIN_CONTROL 0x00008800
#define A6XX_RB_BIN_CONTROL_BINW__MASK 0x0000003f
#define A6XX_RB_BIN_CONTROL_BINW__SHIFT 0
static inline uint32_t A6XX_RB_BIN_CONTROL_BINW(uint32_t val)
{
- return ((val >> 5) << A6XX_RB_BIN_CONTROL_BINW__SHIFT) & A6XX_RB_BIN_CONTROL_BINW__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A6XX_RB_BIN_CONTROL_BINW__SHIFT) & A6XX_RB_BIN_CONTROL_BINW__MASK;
}
#define A6XX_RB_BIN_CONTROL_BINH__MASK 0x00007f00
#define A6XX_RB_BIN_CONTROL_BINH__SHIFT 8
static inline uint32_t A6XX_RB_BIN_CONTROL_BINH(uint32_t val)
{
- return ((val >> 4) << A6XX_RB_BIN_CONTROL_BINH__SHIFT) & A6XX_RB_BIN_CONTROL_BINH__MASK;
+ assert(!(val & 0xf));
+ return (((val >> 4)) << A6XX_RB_BIN_CONTROL_BINH__SHIFT) & A6XX_RB_BIN_CONTROL_BINH__MASK;
}
#define A6XX_RB_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000
#define A6XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT 18
@@ -3213,6 +3585,35 @@ static inline uint32_t A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val)
return ((val) << A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK;
}
+#define REG_A7XX_RB_BIN_CONTROL 0x00008800
+#define A7XX_RB_BIN_CONTROL_BINW__MASK 0x0000003f
+#define A7XX_RB_BIN_CONTROL_BINW__SHIFT 0
+static inline uint32_t A7XX_RB_BIN_CONTROL_BINW(uint32_t val)
+{
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A7XX_RB_BIN_CONTROL_BINW__SHIFT) & A7XX_RB_BIN_CONTROL_BINW__MASK;
+}
+#define A7XX_RB_BIN_CONTROL_BINH__MASK 0x00007f00
+#define A7XX_RB_BIN_CONTROL_BINH__SHIFT 8
+static inline uint32_t A7XX_RB_BIN_CONTROL_BINH(uint32_t val)
+{
+ assert(!(val & 0xf));
+ return (((val >> 4)) << A7XX_RB_BIN_CONTROL_BINH__SHIFT) & A7XX_RB_BIN_CONTROL_BINH__MASK;
+}
+#define A7XX_RB_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000
+#define A7XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT 18
+static inline uint32_t A7XX_RB_BIN_CONTROL_RENDER_MODE(enum a6xx_render_mode val)
+{
+ return ((val) << A7XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT) & A7XX_RB_BIN_CONTROL_RENDER_MODE__MASK;
+}
+#define A7XX_RB_BIN_CONTROL_FORCE_LRZ_WRITE_DIS 0x00200000
+#define A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK 0x07000000
+#define A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT 24
+static inline uint32_t A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val)
+{
+ return ((val) << A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK;
+}
+
#define REG_A6XX_RB_RENDER_CNTL 0x00008801
#define A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__MASK 0x00000038
#define A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__SHIFT 3
@@ -3250,6 +3651,27 @@ static inline uint32_t A6XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val)
return ((val) << A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK;
}
+#define REG_A7XX_RB_RENDER_CNTL 0x00008801
+#define A7XX_RB_RENDER_CNTL_EARLYVIZOUTEN 0x00000040
+#define A7XX_RB_RENDER_CNTL_BINNING 0x00000080
+#define A7XX_RB_RENDER_CNTL_RASTER_MODE__MASK 0x00000100
+#define A7XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT 8
+static inline uint32_t A7XX_RB_RENDER_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
+{
+ return ((val) << A7XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT) & A7XX_RB_RENDER_CNTL_RASTER_MODE__MASK;
+}
+#define A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK 0x00000600
+#define A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT 9
+static inline uint32_t A7XX_RB_RENDER_CNTL_RASTER_DIRECTION(enum a6xx_raster_direction val)
+{
+ return ((val) << A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT) & A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK;
+}
+#define A7XX_RB_RENDER_CNTL_CONSERVATIVERASEN 0x00000800
+#define A7XX_RB_RENDER_CNTL_INNERCONSERVATIVERASEN 0x00001000
+
+#define REG_A7XX_GRAS_SU_RENDER_CNTL 0x00008116
+#define A7XX_GRAS_SU_RENDER_CNTL_BINNING 0x00000080
+
#define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802
#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -3257,18 +3679,8 @@ static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
{
return ((val) << A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
}
-#define A6XX_RB_RAS_MSAA_CNTL_UNK2__MASK 0x00000004
-#define A6XX_RB_RAS_MSAA_CNTL_UNK2__SHIFT 2
-static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_UNK2(uint32_t val)
-{
- return ((val) << A6XX_RB_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_UNK2__MASK;
-}
-#define A6XX_RB_RAS_MSAA_CNTL_UNK3__MASK 0x00000008
-#define A6XX_RB_RAS_MSAA_CNTL_UNK3__SHIFT 3
-static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_UNK3(uint32_t val)
-{
- return ((val) << A6XX_RB_RAS_MSAA_CNTL_UNK3__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_UNK3__MASK;
-}
+#define A6XX_RB_RAS_MSAA_CNTL_UNK2 0x00000004
+#define A6XX_RB_RAS_MSAA_CNTL_UNK3 0x00000008
#define REG_A6XX_RB_DEST_MSAA_CNTL 0x00008803
#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
@@ -3288,49 +3700,49 @@ static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
}
#define REG_A6XX_RB_SAMPLE_LOCATION_1 0x00008806
@@ -3338,49 +3750,49 @@ static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
}
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
}
#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809
@@ -3514,7 +3926,7 @@ static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5(enum adreno_rb_dithe
{
return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK;
}
-#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK 0x00001000
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK 0x00003000
#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT 12
static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6(enum adreno_rb_dither_mode val)
{
@@ -3542,6 +3954,8 @@ static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dithe
#define REG_A6XX_RB_UNKNOWN_8811 0x00008811
+#define REG_A7XX_RB_UNKNOWN_8812 0x00008812
+
#define REG_A6XX_RB_UNKNOWN_8818 0x00008818
#define REG_A6XX_RB_UNKNOWN_8819 0x00008819
@@ -3556,7 +3970,7 @@ static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dithe
#define REG_A6XX_RB_UNKNOWN_881E 0x0000881e
-static inline uint32_t REG_A6XX_RB_MRT(uint32_t i0) { return 0x00008820 + 0x8*i0; }
+#define REG_A6XX_RB_MRT(i0) (0x00008820 + 0x8*(i0))
static inline uint32_t REG_A6XX_RB_MRT_CONTROL(uint32_t i0) { return 0x00008820 + 0x8*i0; }
#define A6XX_RB_MRT_CONTROL_BLEND 0x00000001
@@ -3626,12 +4040,7 @@ static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode
{
return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
}
-#define A6XX_RB_MRT_BUF_INFO_UNK10__MASK 0x00000400
-#define A6XX_RB_MRT_BUF_INFO_UNK10__SHIFT 10
-static inline uint32_t A6XX_RB_MRT_BUF_INFO_UNK10(uint32_t val)
-{
- return ((val) << A6XX_RB_MRT_BUF_INFO_UNK10__SHIFT) & A6XX_RB_MRT_BUF_INFO_UNK10__MASK;
-}
+#define A6XX_RB_MRT_BUF_INFO_UNK10 0x00000400
#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
@@ -3639,37 +4048,49 @@ static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
}
+static inline uint32_t REG_A7XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; }
+#define A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A7XX_RB_MRT_BUF_INFO_UNK10 0x00000400
+#define A7XX_RB_MRT_BUF_INFO_LOSSLESSCOMPEN 0x00000800
+#define A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
+#define A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
+static inline uint32_t A7XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+
static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; }
-#define A6XX_RB_MRT_PITCH__MASK 0x0000ffff
+#define A6XX_RB_MRT_PITCH__MASK 0xffffffff
#define A6XX_RB_MRT_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_MRT_PITCH__SHIFT) & A6XX_RB_MRT_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_MRT_PITCH__SHIFT) & A6XX_RB_MRT_PITCH__MASK;
}
static inline uint32_t REG_A6XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x00008824 + 0x8*i0; }
-#define A6XX_RB_MRT_ARRAY_PITCH__MASK 0x1fffffff
+#define A6XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff
#define A6XX_RB_MRT_ARRAY_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_MRT_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_MRT_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_ARRAY_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_MRT_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_ARRAY_PITCH__MASK;
}
static inline uint32_t REG_A6XX_RB_MRT_BASE(uint32_t i0) { return 0x00008825 + 0x8*i0; }
-#define A6XX_RB_MRT_BASE__MASK 0xffffffff
-#define A6XX_RB_MRT_BASE__SHIFT 0
-static inline uint32_t A6XX_RB_MRT_BASE(uint32_t val)
-{
- return ((val) << A6XX_RB_MRT_BASE__SHIFT) & A6XX_RB_MRT_BASE__MASK;
-}
static inline uint32_t REG_A6XX_RB_MRT_BASE_GMEM(uint32_t i0) { return 0x00008827 + 0x8*i0; }
-#define A6XX_RB_MRT_BASE_GMEM__MASK 0xfffff000
-#define A6XX_RB_MRT_BASE_GMEM__SHIFT 12
-static inline uint32_t A6XX_RB_MRT_BASE_GMEM(uint32_t val)
-{
- return ((val >> 12) << A6XX_RB_MRT_BASE_GMEM__SHIFT) & A6XX_RB_MRT_BASE_GMEM__MASK;
-}
#define REG_A6XX_RB_BLEND_RED_F32 0x00008860
#define A6XX_RB_BLEND_RED_F32__MASK 0xffffffff
@@ -3757,6 +4178,9 @@ static inline uint32_t A6XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
#define A6XX_RB_DEPTH_CNTL_Z_READ_ENABLE 0x00000040
#define A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE 0x00000080
+#define REG_A6XX_GRAS_SU_DEPTH_CNTL 0x00008114
+#define A6XX_GRAS_SU_DEPTH_CNTL_Z_TEST_ENABLE 0x00000001
+
#define REG_A6XX_RB_DEPTH_BUFFER_INFO 0x00008872
#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
@@ -3771,12 +4195,34 @@ static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_UNK3(uint32_t val)
return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK;
}
+#define REG_A7XX_RB_DEPTH_BUFFER_INFO 0x00008872
+#define A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
+{
+ return ((val) << A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+#define A7XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000018
+#define A7XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT 3
+static inline uint32_t A7XX_RB_DEPTH_BUFFER_INFO_UNK3(uint32_t val)
+{
+ return ((val) << A7XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A7XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK;
+}
+#define A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__MASK 0x00000060
+#define A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__SHIFT 5
+static inline uint32_t A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__SHIFT) & A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__MASK;
+}
+#define A7XX_RB_DEPTH_BUFFER_INFO_LOSSLESSCOMPEN 0x00000080
+
#define REG_A6XX_RB_DEPTH_BUFFER_PITCH 0x00008873
#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK 0x00003fff
#define A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_PITCH__MASK;
}
#define REG_A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x00008874
@@ -3784,24 +4230,13 @@ static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
}
#define REG_A6XX_RB_DEPTH_BUFFER_BASE 0x00008875
-#define A6XX_RB_DEPTH_BUFFER_BASE__MASK 0xffffffff
-#define A6XX_RB_DEPTH_BUFFER_BASE__SHIFT 0
-static inline uint32_t A6XX_RB_DEPTH_BUFFER_BASE(uint32_t val)
-{
- return ((val) << A6XX_RB_DEPTH_BUFFER_BASE__SHIFT) & A6XX_RB_DEPTH_BUFFER_BASE__MASK;
-}
#define REG_A6XX_RB_DEPTH_BUFFER_BASE_GMEM 0x00008877
-#define A6XX_RB_DEPTH_BUFFER_BASE_GMEM__MASK 0xfffff000
-#define A6XX_RB_DEPTH_BUFFER_BASE_GMEM__SHIFT 12
-static inline uint32_t A6XX_RB_DEPTH_BUFFER_BASE_GMEM(uint32_t val)
-{
- return ((val >> 12) << A6XX_RB_DEPTH_BUFFER_BASE_GMEM__SHIFT) & A6XX_RB_DEPTH_BUFFER_BASE_GMEM__MASK;
-}
#define REG_A6XX_RB_Z_BOUNDS_MIN 0x00008878
#define A6XX_RB_Z_BOUNDS_MIN__MASK 0xffffffff
@@ -3872,16 +4307,30 @@ static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
}
+#define REG_A6XX_GRAS_SU_STENCIL_CNTL 0x00008115
+#define A6XX_GRAS_SU_STENCIL_CNTL_STENCIL_ENABLE 0x00000001
+
#define REG_A6XX_RB_STENCIL_INFO 0x00008881
#define A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
#define A6XX_RB_STENCIL_INFO_UNK1 0x00000002
+#define REG_A7XX_RB_STENCIL_INFO 0x00008881
+#define A7XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+#define A7XX_RB_STENCIL_INFO_UNK1 0x00000002
+#define A7XX_RB_STENCIL_INFO_TILEMODE__MASK 0x0000000c
+#define A7XX_RB_STENCIL_INFO_TILEMODE__SHIFT 2
+static inline uint32_t A7XX_RB_STENCIL_INFO_TILEMODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A7XX_RB_STENCIL_INFO_TILEMODE__SHIFT) & A7XX_RB_STENCIL_INFO_TILEMODE__MASK;
+}
+
#define REG_A6XX_RB_STENCIL_BUFFER_PITCH 0x00008882
#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK 0x00000fff
#define A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_PITCH__MASK;
}
#define REG_A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH 0x00008883
@@ -3889,24 +4338,13 @@ static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val)
#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK;
}
#define REG_A6XX_RB_STENCIL_BUFFER_BASE 0x00008884
-#define A6XX_RB_STENCIL_BUFFER_BASE__MASK 0xffffffff
-#define A6XX_RB_STENCIL_BUFFER_BASE__SHIFT 0
-static inline uint32_t A6XX_RB_STENCIL_BUFFER_BASE(uint32_t val)
-{
- return ((val) << A6XX_RB_STENCIL_BUFFER_BASE__SHIFT) & A6XX_RB_STENCIL_BUFFER_BASE__MASK;
-}
#define REG_A6XX_RB_STENCIL_BUFFER_BASE_GMEM 0x00008886
-#define A6XX_RB_STENCIL_BUFFER_BASE_GMEM__MASK 0xfffff000
-#define A6XX_RB_STENCIL_BUFFER_BASE_GMEM__SHIFT 12
-static inline uint32_t A6XX_RB_STENCIL_BUFFER_BASE_GMEM(uint32_t val)
-{
- return ((val >> 12) << A6XX_RB_STENCIL_BUFFER_BASE_GMEM__SHIFT) & A6XX_RB_STENCIL_BUFFER_BASE_GMEM__MASK;
-}
#define REG_A6XX_RB_STENCILREF 0x00008887
#define A6XX_RB_STENCILREF_REF__MASK 0x000000ff
@@ -3971,6 +4409,8 @@ static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
#define REG_A6XX_RB_LRZ_CNTL 0x00008898
#define A6XX_RB_LRZ_CNTL_ENABLE 0x00000001
+#define REG_A7XX_RB_UNKNOWN_8899 0x00008899
+
#define REG_A6XX_RB_Z_CLAMP_MIN 0x000088c0
#define A6XX_RB_Z_CLAMP_MIN__MASK 0xffffffff
#define A6XX_RB_Z_CLAMP_MIN__SHIFT 0
@@ -4034,13 +4474,15 @@ static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
#define A6XX_RB_BIN_CONTROL2_BINW__SHIFT 0
static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val)
{
- return ((val >> 5) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK;
}
#define A6XX_RB_BIN_CONTROL2_BINH__MASK 0x00007f00
#define A6XX_RB_BIN_CONTROL2_BINH__SHIFT 8
static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val)
{
- return ((val >> 4) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK;
+ assert(!(val & 0xf));
+ return (((val >> 4)) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK;
}
#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4
@@ -4066,12 +4508,6 @@ static inline uint32_t A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES(enum a3xx_msaa_sample
}
#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
-#define A6XX_RB_BLIT_BASE_GMEM__MASK 0xfffff000
-#define A6XX_RB_BLIT_BASE_GMEM__SHIFT 12
-static inline uint32_t A6XX_RB_BLIT_BASE_GMEM(uint32_t val)
-{
- return ((val >> 12) << A6XX_RB_BLIT_BASE_GMEM__SHIFT) & A6XX_RB_BLIT_BASE_GMEM__MASK;
-}
#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK 0x00000003
@@ -4102,19 +4538,14 @@ static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_format val)
#define A6XX_RB_BLIT_DST_INFO_UNK15 0x00008000
#define REG_A6XX_RB_BLIT_DST 0x000088d8
-#define A6XX_RB_BLIT_DST__MASK 0xffffffff
-#define A6XX_RB_BLIT_DST__SHIFT 0
-static inline uint32_t A6XX_RB_BLIT_DST(uint32_t val)
-{
- return ((val) << A6XX_RB_BLIT_DST__SHIFT) & A6XX_RB_BLIT_DST__MASK;
-}
#define REG_A6XX_RB_BLIT_DST_PITCH 0x000088da
#define A6XX_RB_BLIT_DST_PITCH__MASK 0x0000ffff
#define A6XX_RB_BLIT_DST_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_BLIT_DST_PITCH__SHIFT) & A6XX_RB_BLIT_DST_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_BLIT_DST_PITCH__SHIFT) & A6XX_RB_BLIT_DST_PITCH__MASK;
}
#define REG_A6XX_RB_BLIT_DST_ARRAY_PITCH 0x000088db
@@ -4122,29 +4553,26 @@ static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val)
#define A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
}
#define REG_A6XX_RB_BLIT_FLAG_DST 0x000088dc
-#define A6XX_RB_BLIT_FLAG_DST__MASK 0xffffffff
-#define A6XX_RB_BLIT_FLAG_DST__SHIFT 0
-static inline uint32_t A6XX_RB_BLIT_FLAG_DST(uint32_t val)
-{
- return ((val) << A6XX_RB_BLIT_FLAG_DST__SHIFT) & A6XX_RB_BLIT_FLAG_DST__MASK;
-}
#define REG_A6XX_RB_BLIT_FLAG_DST_PITCH 0x000088de
#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK 0x000007ff
#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK;
}
#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK 0x0ffff800
#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT 11
static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 7) << A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK;
+ assert(!(val & 0x7f));
+ return (((val >> 7)) << A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK;
}
#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0 0x000088df
@@ -4179,46 +4607,82 @@ static inline uint32_t A6XX_RB_BLIT_INFO_BUFFER_ID(uint32_t val)
return ((val) << A6XX_RB_BLIT_INFO_BUFFER_ID__SHIFT) & A6XX_RB_BLIT_INFO_BUFFER_ID__MASK;
}
-#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0
+#define REG_A7XX_RB_UNKNOWN_88E4 0x000088e4
+#define A7XX_RB_UNKNOWN_88E4_UNK0 0x00000001
-#define REG_A6XX_RB_UNK_FLAG_BUFFER_BASE 0x000088f1
-#define A6XX_RB_UNK_FLAG_BUFFER_BASE__MASK 0xffffffff
-#define A6XX_RB_UNK_FLAG_BUFFER_BASE__SHIFT 0
-static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_BASE(uint32_t val)
+#define REG_A7XX_RB_CCU_CNTL2 0x000088e5
+#define A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI__MASK 0x00000001
+#define A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI__SHIFT 0
+static inline uint32_t A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI(uint32_t val)
+{
+ return ((val) << A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI__SHIFT) & A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI__MASK;
+}
+#define A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI__MASK 0x00000004
+#define A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI__SHIFT 2
+static inline uint32_t A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI(uint32_t val)
{
- return ((val) << A6XX_RB_UNK_FLAG_BUFFER_BASE__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_BASE__MASK;
+ return ((val) << A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI__SHIFT) & A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI__MASK;
+}
+#define A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE__MASK 0x00000c00
+#define A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE__SHIFT 10
+static inline uint32_t A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE(enum a6xx_ccu_cache_size val)
+{
+ return ((val) << A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE__SHIFT) & A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE__MASK;
+}
+#define A7XX_RB_CCU_CNTL2_DEPTH_OFFSET__MASK 0x001ff000
+#define A7XX_RB_CCU_CNTL2_DEPTH_OFFSET__SHIFT 12
+static inline uint32_t A7XX_RB_CCU_CNTL2_DEPTH_OFFSET(uint32_t val)
+{
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A7XX_RB_CCU_CNTL2_DEPTH_OFFSET__SHIFT) & A7XX_RB_CCU_CNTL2_DEPTH_OFFSET__MASK;
+}
+#define A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE__MASK 0x00600000
+#define A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE__SHIFT 21
+static inline uint32_t A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE(enum a6xx_ccu_cache_size val)
+{
+ return ((val) << A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE__SHIFT) & A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE__MASK;
+}
+#define A7XX_RB_CCU_CNTL2_COLOR_OFFSET__MASK 0xff800000
+#define A7XX_RB_CCU_CNTL2_COLOR_OFFSET__SHIFT 23
+static inline uint32_t A7XX_RB_CCU_CNTL2_COLOR_OFFSET(uint32_t val)
+{
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A7XX_RB_CCU_CNTL2_COLOR_OFFSET__SHIFT) & A7XX_RB_CCU_CNTL2_COLOR_OFFSET__MASK;
}
+#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0
+
+#define REG_A6XX_RB_UNK_FLAG_BUFFER_BASE 0x000088f1
+
#define REG_A6XX_RB_UNK_FLAG_BUFFER_PITCH 0x000088f3
#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK;
}
#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x00fff800
#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 7) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+ assert(!(val & 0x7f));
+ return (((val >> 7)) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
}
#define REG_A6XX_RB_UNKNOWN_88F4 0x000088f4
+#define REG_A7XX_RB_UNKNOWN_88F5 0x000088f5
+
#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE 0x00008900
-#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__MASK 0xffffffff
-#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__SHIFT 0
-static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_BASE(uint32_t val)
-{
- return ((val) << A6XX_RB_DEPTH_FLAG_BUFFER_BASE__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_BASE__MASK;
-}
#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x00008902
#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK 0x0000007f
#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK;
}
#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK 0x00000700
#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT 8
@@ -4230,40 +4694,31 @@ static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8(uint32_t val)
#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 7) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+ assert(!(val & 0x7f));
+ return (((val >> 7)) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
}
-static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+#define REG_A6XX_RB_MRT_FLAG_BUFFER(i0) (0x00008903 + 0x3*(i0))
static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t i0) { return 0x00008903 + 0x3*i0; }
-#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__MASK 0xffffffff
-#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__SHIFT 0
-static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t val)
-{
- return ((val) << A6XX_RB_MRT_FLAG_BUFFER_ADDR__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_ADDR__MASK;
-}
static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x00008905 + 0x3*i0; }
#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK;
}
#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffff800
#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 7) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+ assert(!(val & 0x7f));
+ return (((val >> 7)) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
}
#define REG_A6XX_RB_SAMPLE_COUNT_ADDR 0x00008927
-#define A6XX_RB_SAMPLE_COUNT_ADDR__MASK 0xffffffff
-#define A6XX_RB_SAMPLE_COUNT_ADDR__SHIFT 0
-static inline uint32_t A6XX_RB_SAMPLE_COUNT_ADDR(uint32_t val)
-{
- return ((val) << A6XX_RB_SAMPLE_COUNT_ADDR__SHIFT) & A6XX_RB_SAMPLE_COUNT_ADDR__MASK;
-}
#define REG_A6XX_RB_UNKNOWN_8A00 0x00008a00
@@ -4320,6 +4775,7 @@ static inline uint32_t A6XX_RB_2D_BLIT_CNTL_RASTER_MODE(enum a6xx_raster_mode va
{
return ((val) << A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__SHIFT) & A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__MASK;
}
+#define A6XX_RB_2D_BLIT_CNTL_UNK30 0x40000000
#define REG_A6XX_RB_2D_UNKNOWN_8C01 0x00008c01
@@ -4366,75 +4822,49 @@ static inline uint32_t A6XX_RB_2D_DST_INFO_UNK23(uint32_t val)
#define A6XX_RB_2D_DST_INFO_UNK28 0x10000000
#define REG_A6XX_RB_2D_DST 0x00008c18
-#define A6XX_RB_2D_DST__MASK 0xffffffff
-#define A6XX_RB_2D_DST__SHIFT 0
-static inline uint32_t A6XX_RB_2D_DST(uint32_t val)
-{
- return ((val) << A6XX_RB_2D_DST__SHIFT) & A6XX_RB_2D_DST__MASK;
-}
#define REG_A6XX_RB_2D_DST_PITCH 0x00008c1a
#define A6XX_RB_2D_DST_PITCH__MASK 0x0000ffff
#define A6XX_RB_2D_DST_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_2D_DST_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_2D_DST_PITCH__SHIFT) & A6XX_RB_2D_DST_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_2D_DST_PITCH__SHIFT) & A6XX_RB_2D_DST_PITCH__MASK;
}
#define REG_A6XX_RB_2D_DST_PLANE1 0x00008c1b
-#define A6XX_RB_2D_DST_PLANE1__MASK 0xffffffff
-#define A6XX_RB_2D_DST_PLANE1__SHIFT 0
-static inline uint32_t A6XX_RB_2D_DST_PLANE1(uint32_t val)
-{
- return ((val) << A6XX_RB_2D_DST_PLANE1__SHIFT) & A6XX_RB_2D_DST_PLANE1__MASK;
-}
#define REG_A6XX_RB_2D_DST_PLANE_PITCH 0x00008c1d
#define A6XX_RB_2D_DST_PLANE_PITCH__MASK 0x0000ffff
#define A6XX_RB_2D_DST_PLANE_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_2D_DST_PLANE_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_2D_DST_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_PLANE_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_2D_DST_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_PLANE_PITCH__MASK;
}
#define REG_A6XX_RB_2D_DST_PLANE2 0x00008c1e
-#define A6XX_RB_2D_DST_PLANE2__MASK 0xffffffff
-#define A6XX_RB_2D_DST_PLANE2__SHIFT 0
-static inline uint32_t A6XX_RB_2D_DST_PLANE2(uint32_t val)
-{
- return ((val) << A6XX_RB_2D_DST_PLANE2__SHIFT) & A6XX_RB_2D_DST_PLANE2__MASK;
-}
#define REG_A6XX_RB_2D_DST_FLAGS 0x00008c20
-#define A6XX_RB_2D_DST_FLAGS__MASK 0xffffffff
-#define A6XX_RB_2D_DST_FLAGS__SHIFT 0
-static inline uint32_t A6XX_RB_2D_DST_FLAGS(uint32_t val)
-{
- return ((val) << A6XX_RB_2D_DST_FLAGS__SHIFT) & A6XX_RB_2D_DST_FLAGS__MASK;
-}
#define REG_A6XX_RB_2D_DST_FLAGS_PITCH 0x00008c22
#define A6XX_RB_2D_DST_FLAGS_PITCH__MASK 0x000000ff
#define A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_2D_DST_FLAGS_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PITCH__MASK;
}
#define REG_A6XX_RB_2D_DST_FLAGS_PLANE 0x00008c23
-#define A6XX_RB_2D_DST_FLAGS_PLANE__MASK 0xffffffff
-#define A6XX_RB_2D_DST_FLAGS_PLANE__SHIFT 0
-static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE(uint32_t val)
-{
- return ((val) << A6XX_RB_2D_DST_FLAGS_PLANE__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE__MASK;
-}
#define REG_A6XX_RB_2D_DST_FLAGS_PLANE_PITCH 0x00008c25
#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK 0x000000ff
#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK;
}
#define REG_A6XX_RB_2D_SRC_SOLID_C0 0x00008c2c
@@ -4451,7 +4881,10 @@ static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE_PITCH(uint32_t val)
#define REG_A6XX_RB_ADDR_MODE_CNTL 0x00008e05
+#define REG_A7XX_RB_UNKNOWN_8E06 0x00008e06
+
#define REG_A6XX_RB_CCU_CNTL 0x00008e07
+#define A6XX_RB_CCU_CNTL_GMEM_FAST_CLEAR_DISABLE 0x00000001
#define A6XX_RB_CCU_CNTL_CONCURRENT_RESOLVE 0x00000004
#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__MASK 0x00000080
#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__SHIFT 7
@@ -4465,20 +4898,37 @@ static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI(uint32_t val)
{
return ((val) << A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__MASK;
}
+#define A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__MASK 0x00000c00
+#define A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__SHIFT 10
+static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE(enum a6xx_ccu_cache_size val)
+{
+ return ((val) << A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__MASK;
+}
#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK 0x001ff000
#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT 12
static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_OFFSET(uint32_t val)
{
- return ((val >> 12) << A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK;
+}
+#define A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__MASK 0x00600000
+#define A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__SHIFT 21
+static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE(enum a6xx_ccu_cache_size val)
+{
+ return ((val) << A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__MASK;
}
-#define A6XX_RB_CCU_CNTL_GMEM 0x00400000
#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK 0xff800000
#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT 23
static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_OFFSET(uint32_t val)
{
- return ((val >> 12) << A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK;
}
+#define REG_A7XX_RB_CCU_CNTL 0x00008e07
+#define A7XX_RB_CCU_CNTL_GMEM_FAST_CLEAR_DISABLE 0x00000001
+#define A7XX_RB_CCU_CNTL_CONCURRENT_RESOLVE 0x00000004
+
#define REG_A6XX_RB_NC_MODE_CNTL 0x00008e08
#define A6XX_RB_NC_MODE_CNTL_MODE 0x00000001
#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK 0x00000006
@@ -4503,15 +4953,17 @@ static inline uint32_t A6XX_RB_NC_MODE_CNTL_UNK12(uint32_t val)
return ((val) << A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT) & A6XX_RB_NC_MODE_CNTL_UNK12__MASK;
}
-static inline uint32_t REG_A6XX_RB_PERFCTR_RB_SEL(uint32_t i0) { return 0x00008e10 + 0x1*i0; }
+#define REG_A7XX_RB_UNKNOWN_8E09 0x00008e09
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL(i0) (0x00008e10 + 0x1*(i0))
-static inline uint32_t REG_A6XX_RB_PERFCTR_CCU_SEL(uint32_t i0) { return 0x00008e18 + 0x1*i0; }
+#define REG_A6XX_RB_PERFCTR_CCU_SEL(i0) (0x00008e18 + 0x1*(i0))
#define REG_A6XX_RB_UNKNOWN_8E28 0x00008e28
-static inline uint32_t REG_A6XX_RB_PERFCTR_CMP_SEL(uint32_t i0) { return 0x00008e2c + 0x1*i0; }
+#define REG_A6XX_RB_PERFCTR_CMP_SEL(i0) (0x00008e2c + 0x1*(i0))
-static inline uint32_t REG_A7XX_RB_PERFCTR_UFC_SEL(uint32_t i0) { return 0x00008e30 + 0x1*i0; }
+#define REG_A7XX_RB_PERFCTR_UFC_SEL(i0) (0x00008e30 + 0x1*(i0))
#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST 0x00008e3b
@@ -4520,12 +4972,8 @@ static inline uint32_t REG_A7XX_RB_PERFCTR_UFC_SEL(uint32_t i0) { return 0x00008
#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x00008e50
#define REG_A6XX_RB_UNKNOWN_8E51 0x00008e51
-#define A6XX_RB_UNKNOWN_8E51__MASK 0xffffffff
-#define A6XX_RB_UNKNOWN_8E51__SHIFT 0
-static inline uint32_t A6XX_RB_UNKNOWN_8E51(uint32_t val)
-{
- return ((val) << A6XX_RB_UNKNOWN_8E51__SHIFT) & A6XX_RB_UNKNOWN_8E51__MASK;
-}
+
+#define REG_A7XX_RB_UNKNOWN_8E79 0x00008e79
#define REG_A6XX_VPC_GS_PARAM 0x00009100
#define A6XX_VPC_GS_PARAM_LINELENGTHLOC__MASK 0x000000ff
@@ -4595,6 +5043,66 @@ static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
}
+#define REG_A6XX_VPC_VS_CLIP_CNTL_V2 0x00009311
+#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK__MASK 0x000000ff
+#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A6XX_VPC_GS_CLIP_CNTL_V2 0x00009312
+#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK__MASK 0x000000ff
+#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A6XX_VPC_DS_CLIP_CNTL_V2 0x00009313
+#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK__MASK 0x000000ff
+#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK;
+}
+
#define REG_A6XX_VPC_VS_LAYER_CNTL 0x00009104
#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT 0
@@ -4608,6 +5116,12 @@ static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_VIEWLOC(uint32_t val)
{
return ((val) << A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK;
}
+#define A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC__MASK 0x00ff0000
+#define A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC__MASK;
+}
#define REG_A6XX_VPC_GS_LAYER_CNTL 0x00009105
#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
@@ -4622,6 +5136,12 @@ static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_VIEWLOC(uint32_t val)
{
return ((val) << A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK;
}
+#define A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC__MASK 0x00ff0000
+#define A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC__MASK;
+}
#define REG_A6XX_VPC_DS_LAYER_CNTL 0x00009106
#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
@@ -4636,6 +5156,72 @@ static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_VIEWLOC(uint32_t val)
{
return ((val) << A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK;
}
+#define A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC__MASK 0x00ff0000
+#define A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC__MASK;
+}
+
+#define REG_A6XX_VPC_VS_LAYER_CNTL_V2 0x00009314
+#define A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC__MASK 0x000000ff
+#define A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC__MASK;
+}
+#define A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC__MASK 0x0000ff00
+#define A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC__MASK;
+}
+#define A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC__MASK 0x00ff0000
+#define A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC__MASK;
+}
+
+#define REG_A6XX_VPC_GS_LAYER_CNTL_V2 0x00009315
+#define A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC__MASK 0x000000ff
+#define A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC__MASK;
+}
+#define A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC__MASK 0x0000ff00
+#define A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC__MASK;
+}
+#define A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC__MASK 0x00ff0000
+#define A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC__MASK;
+}
+
+#define REG_A6XX_VPC_DS_LAYER_CNTL_V2 0x00009316
+#define A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC__MASK 0x000000ff
+#define A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC__MASK;
+}
+#define A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC__MASK 0x0000ff00
+#define A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC__MASK;
+}
+#define A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC__MASK 0x00ff0000
+#define A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC__MASK;
+}
#define REG_A6XX_VPC_UNKNOWN_9107 0x00009107
#define A6XX_VPC_UNKNOWN_9107_RASTER_DISCARD 0x00000001
@@ -4649,11 +5235,51 @@ static inline uint32_t A6XX_VPC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
return ((val) << A6XX_VPC_POLYGON_MODE_MODE__SHIFT) & A6XX_VPC_POLYGON_MODE_MODE__MASK;
}
-static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; }
+#define REG_A7XX_VPC_PRIMITIVE_CNTL_0 0x00009109
+#define A7XX_VPC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
+#define A7XX_VPC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
+#define A7XX_VPC_PRIMITIVE_CNTL_0_D3D_VERTEX_ORDERING 0x00000004
+#define A7XX_VPC_PRIMITIVE_CNTL_0_UNK3 0x00000008
+
+#define REG_A7XX_VPC_PRIMITIVE_CNTL_5 0x0000910a
+#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK 0x000000ff
+#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT 0
+static inline uint32_t A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(uint32_t val)
+{
+ return ((val) << A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT) & A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK;
+}
+#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK 0x00007c00
+#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT 10
+static inline uint32_t A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(uint32_t val)
+{
+ return ((val) << A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT) & A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK;
+}
+#define A7XX_VPC_PRIMITIVE_CNTL_5_LINELENGTHEN 0x00008000
+#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK 0x00030000
+#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT 16
+static inline uint32_t A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT(enum a6xx_tess_output val)
+{
+ return ((val) << A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT) & A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK;
+}
+#define A7XX_VPC_PRIMITIVE_CNTL_5_UNK18 0x00040000
+
+#define REG_A7XX_VPC_MULTIVIEW_MASK 0x0000910b
+
+#define REG_A7XX_VPC_MULTIVIEW_CNTL 0x0000910c
+#define A7XX_VPC_MULTIVIEW_CNTL_ENABLE 0x00000001
+#define A7XX_VPC_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002
+#define A7XX_VPC_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c
+#define A7XX_VPC_MULTIVIEW_CNTL_VIEWS__SHIFT 2
+static inline uint32_t A7XX_VPC_MULTIVIEW_CNTL_VIEWS(uint32_t val)
+{
+ return ((val) << A7XX_VPC_MULTIVIEW_CNTL_VIEWS__SHIFT) & A7XX_VPC_MULTIVIEW_CNTL_VIEWS__MASK;
+}
+
+#define REG_A6XX_VPC_VARYING_INTERP(i0) (0x00009200 + 0x1*(i0))
static inline uint32_t REG_A6XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00009200 + 0x1*i0; }
-static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00009208 + 0x1*i0; }
+#define REG_A6XX_VPC_VARYING_PS_REPL(i0) (0x00009208 + 0x1*(i0))
static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00009208 + 0x1*i0; }
@@ -4661,7 +5287,7 @@ static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0
#define REG_A6XX_VPC_UNKNOWN_9211 0x00009211
-static inline uint32_t REG_A6XX_VPC_VAR(uint32_t i0) { return 0x00009212 + 0x1*i0; }
+#define REG_A6XX_VPC_VAR(i0) (0x00009212 + 0x1*(i0))
static inline uint32_t REG_A6XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x00009212 + 0x1*i0; }
@@ -4685,7 +5311,8 @@ static inline uint32_t A6XX_VPC_SO_PROG_A_BUF(uint32_t val)
#define A6XX_VPC_SO_PROG_A_OFF__SHIFT 2
static inline uint32_t A6XX_VPC_SO_PROG_A_OFF(uint32_t val)
{
- return ((val >> 2) << A6XX_VPC_SO_PROG_A_OFF__SHIFT) & A6XX_VPC_SO_PROG_A_OFF__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_VPC_SO_PROG_A_OFF__SHIFT) & A6XX_VPC_SO_PROG_A_OFF__MASK;
}
#define A6XX_VPC_SO_PROG_A_EN 0x00000800
#define A6XX_VPC_SO_PROG_B_BUF__MASK 0x00003000
@@ -4698,59 +5325,24 @@ static inline uint32_t A6XX_VPC_SO_PROG_B_BUF(uint32_t val)
#define A6XX_VPC_SO_PROG_B_OFF__SHIFT 14
static inline uint32_t A6XX_VPC_SO_PROG_B_OFF(uint32_t val)
{
- return ((val >> 2) << A6XX_VPC_SO_PROG_B_OFF__SHIFT) & A6XX_VPC_SO_PROG_B_OFF__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_VPC_SO_PROG_B_OFF__SHIFT) & A6XX_VPC_SO_PROG_B_OFF__MASK;
}
#define A6XX_VPC_SO_PROG_B_EN 0x00800000
#define REG_A6XX_VPC_SO_STREAM_COUNTS 0x00009218
-#define A6XX_VPC_SO_STREAM_COUNTS__MASK 0xffffffff
-#define A6XX_VPC_SO_STREAM_COUNTS__SHIFT 0
-static inline uint32_t A6XX_VPC_SO_STREAM_COUNTS(uint32_t val)
-{
- return ((val) << A6XX_VPC_SO_STREAM_COUNTS__SHIFT) & A6XX_VPC_SO_STREAM_COUNTS__MASK;
-}
-static inline uint32_t REG_A6XX_VPC_SO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+#define REG_A6XX_VPC_SO(i0) (0x0000921a + 0x7*(i0))
static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE(uint32_t i0) { return 0x0000921a + 0x7*i0; }
-#define A6XX_VPC_SO_BUFFER_BASE__MASK 0xffffffff
-#define A6XX_VPC_SO_BUFFER_BASE__SHIFT 0
-static inline uint32_t A6XX_VPC_SO_BUFFER_BASE(uint32_t val)
-{
- return ((val) << A6XX_VPC_SO_BUFFER_BASE__SHIFT) & A6XX_VPC_SO_BUFFER_BASE__MASK;
-}
static inline uint32_t REG_A6XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000921c + 0x7*i0; }
-#define A6XX_VPC_SO_BUFFER_SIZE__MASK 0xfffffffc
-#define A6XX_VPC_SO_BUFFER_SIZE__SHIFT 2
-static inline uint32_t A6XX_VPC_SO_BUFFER_SIZE(uint32_t val)
-{
- return ((val >> 2) << A6XX_VPC_SO_BUFFER_SIZE__SHIFT) & A6XX_VPC_SO_BUFFER_SIZE__MASK;
-}
static inline uint32_t REG_A6XX_VPC_SO_BUFFER_STRIDE(uint32_t i0) { return 0x0000921d + 0x7*i0; }
-#define A6XX_VPC_SO_BUFFER_STRIDE__MASK 0x000003ff
-#define A6XX_VPC_SO_BUFFER_STRIDE__SHIFT 0
-static inline uint32_t A6XX_VPC_SO_BUFFER_STRIDE(uint32_t val)
-{
- return ((val >> 2) << A6XX_VPC_SO_BUFFER_STRIDE__SHIFT) & A6XX_VPC_SO_BUFFER_STRIDE__MASK;
-}
static inline uint32_t REG_A6XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000921e + 0x7*i0; }
-#define A6XX_VPC_SO_BUFFER_OFFSET__MASK 0xfffffffc
-#define A6XX_VPC_SO_BUFFER_OFFSET__SHIFT 2
-static inline uint32_t A6XX_VPC_SO_BUFFER_OFFSET(uint32_t val)
-{
- return ((val >> 2) << A6XX_VPC_SO_BUFFER_OFFSET__SHIFT) & A6XX_VPC_SO_BUFFER_OFFSET__MASK;
-}
static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE(uint32_t i0) { return 0x0000921f + 0x7*i0; }
-#define A6XX_VPC_SO_FLUSH_BASE__MASK 0xffffffff
-#define A6XX_VPC_SO_FLUSH_BASE__SHIFT 0
-static inline uint32_t A6XX_VPC_SO_FLUSH_BASE(uint32_t val)
-{
- return ((val) << A6XX_VPC_SO_FLUSH_BASE__SHIFT) & A6XX_VPC_SO_FLUSH_BASE__MASK;
-}
#define REG_A6XX_VPC_POINT_COORD_INVERT 0x00009236
#define A6XX_VPC_POINT_COORD_INVERT_INVERT 0x00000001
@@ -4891,6 +5483,38 @@ static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val)
#define REG_A6XX_VPC_SO_DISABLE 0x00009306
#define A6XX_VPC_SO_DISABLE_DISABLE 0x00000001
+#define REG_A7XX_VPC_POLYGON_MODE2 0x00009307
+#define A7XX_VPC_POLYGON_MODE2_MODE__MASK 0x00000003
+#define A7XX_VPC_POLYGON_MODE2_MODE__SHIFT 0
+static inline uint32_t A7XX_VPC_POLYGON_MODE2_MODE(enum a6xx_polygon_mode val)
+{
+ return ((val) << A7XX_VPC_POLYGON_MODE2_MODE__SHIFT) & A7XX_VPC_POLYGON_MODE2_MODE__MASK;
+}
+
+#define REG_A7XX_VPC_ATTR_BUF_SIZE_GMEM 0x00009308
+#define A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__MASK 0xffffffff
+#define A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__SHIFT 0
+static inline uint32_t A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM(uint32_t val)
+{
+ return ((val) << A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__SHIFT) & A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__MASK;
+}
+
+#define REG_A7XX_VPC_ATTR_BUF_BASE_GMEM 0x00009309
+#define A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM__MASK 0xffffffff
+#define A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM__SHIFT 0
+static inline uint32_t A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM(uint32_t val)
+{
+ return ((val) << A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM__SHIFT) & A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM__MASK;
+}
+
+#define REG_A7XX_PC_ATTR_BUF_SIZE_GMEM 0x00009b09
+#define A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__MASK 0xffffffff
+#define A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__SHIFT 0
+static inline uint32_t A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM(uint32_t val)
+{
+ return ((val) << A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__SHIFT) & A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__MASK;
+}
+
#define REG_A6XX_VPC_DBG_ECO_CNTL 0x00009600
#define REG_A6XX_VPC_ADDR_MODE_CNTL 0x00009601
@@ -4899,9 +5523,9 @@ static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val)
#define REG_A6XX_VPC_UNKNOWN_9603 0x00009603
-static inline uint32_t REG_A6XX_VPC_PERFCTR_VPC_SEL(uint32_t i0) { return 0x00009604 + 0x1*i0; }
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL(i0) (0x00009604 + 0x1*(i0))
-static inline uint32_t REG_A7XX_VPC_PERFCTR_VPC_SEL(uint32_t i0) { return 0x0000960b + 0x1*i0; }
+#define REG_A7XX_VPC_PERFCTR_VPC_SEL(i0) (0x0000960b + 0x1*(i0))
#define REG_A6XX_PC_TESS_NUM_VERTEX 0x00009800
@@ -4912,12 +5536,7 @@ static inline uint32_t A6XX_PC_HS_INPUT_SIZE_SIZE(uint32_t val)
{
return ((val) << A6XX_PC_HS_INPUT_SIZE_SIZE__SHIFT) & A6XX_PC_HS_INPUT_SIZE_SIZE__MASK;
}
-#define A6XX_PC_HS_INPUT_SIZE_UNK13__MASK 0x00002000
-#define A6XX_PC_HS_INPUT_SIZE_UNK13__SHIFT 13
-static inline uint32_t A6XX_PC_HS_INPUT_SIZE_UNK13(uint32_t val)
-{
- return ((val) << A6XX_PC_HS_INPUT_SIZE_UNK13__SHIFT) & A6XX_PC_HS_INPUT_SIZE_UNK13__MASK;
-}
+#define A6XX_PC_HS_INPUT_SIZE_UNK13 0x00002000
#define REG_A6XX_PC_TESS_CNTL 0x00009802
#define A6XX_PC_TESS_CNTL_SPACING__MASK 0x00000003
@@ -4939,7 +5558,8 @@ static inline uint32_t A6XX_PC_TESS_CNTL_OUTPUT(enum a6xx_tess_output val)
#define REG_A6XX_PC_POWER_CNTL 0x00009805
-#define REG_A6XX_PC_PRIMID_PASSTHRU 0x00009806
+#define REG_A6XX_PC_PS_CNTL 0x00009806
+#define A6XX_PC_PS_CNTL_PRIMITIVEIDEN 0x00000001
#define REG_A6XX_PC_SO_STREAM_CNTL 0x00009808
#define A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__MASK 0x00078000
@@ -4992,6 +5612,14 @@ static inline uint32_t A6XX_PC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
return ((val) << A6XX_PC_POLYGON_MODE_MODE__SHIFT) & A6XX_PC_POLYGON_MODE_MODE__MASK;
}
+#define REG_A7XX_PC_POLYGON_MODE 0x00009809
+#define A7XX_PC_POLYGON_MODE_MODE__MASK 0x00000003
+#define A7XX_PC_POLYGON_MODE_MODE__SHIFT 0
+static inline uint32_t A7XX_PC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
+{
+ return ((val) << A7XX_PC_POLYGON_MODE_MODE__SHIFT) & A7XX_PC_POLYGON_MODE_MODE__MASK;
+}
+
#define REG_A6XX_PC_RASTER_CNTL 0x00009980
#define A6XX_PC_RASTER_CNTL_STREAM__MASK 0x00000003
#define A6XX_PC_RASTER_CNTL_STREAM__SHIFT 0
@@ -5001,10 +5629,28 @@ static inline uint32_t A6XX_PC_RASTER_CNTL_STREAM(uint32_t val)
}
#define A6XX_PC_RASTER_CNTL_DISCARD 0x00000004
+#define REG_A7XX_PC_RASTER_CNTL 0x00009107
+#define A7XX_PC_RASTER_CNTL_STREAM__MASK 0x00000003
+#define A7XX_PC_RASTER_CNTL_STREAM__SHIFT 0
+static inline uint32_t A7XX_PC_RASTER_CNTL_STREAM(uint32_t val)
+{
+ return ((val) << A7XX_PC_RASTER_CNTL_STREAM__SHIFT) & A7XX_PC_RASTER_CNTL_STREAM__MASK;
+}
+#define A7XX_PC_RASTER_CNTL_DISCARD 0x00000004
+
+#define REG_A7XX_PC_RASTER_CNTL_V2 0x00009317
+#define A7XX_PC_RASTER_CNTL_V2_STREAM__MASK 0x00000003
+#define A7XX_PC_RASTER_CNTL_V2_STREAM__SHIFT 0
+static inline uint32_t A7XX_PC_RASTER_CNTL_V2_STREAM(uint32_t val)
+{
+ return ((val) << A7XX_PC_RASTER_CNTL_V2_STREAM__SHIFT) & A7XX_PC_RASTER_CNTL_V2_STREAM__MASK;
+}
+#define A7XX_PC_RASTER_CNTL_V2_DISCARD 0x00000004
+
#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00
#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
-#define A6XX_PC_PRIMITIVE_CNTL_0_TESS_UPPER_LEFT_DOMAIN_ORIGIN 0x00000004
+#define A6XX_PC_PRIMITIVE_CNTL_0_D3D_VERTEX_ORDERING 0x00000004
#define A6XX_PC_PRIMITIVE_CNTL_0_UNK3 0x00000008
#define REG_A6XX_PC_VS_OUT_CNTL 0x00009b01
@@ -5024,6 +5670,7 @@ static inline uint32_t A6XX_PC_VS_OUT_CNTL_CLIP_MASK(uint32_t val)
{
return ((val) << A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK;
}
+#define A6XX_PC_VS_OUT_CNTL_SHADINGRATE 0x01000000
#define REG_A6XX_PC_GS_OUT_CNTL 0x00009b02
#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
@@ -5042,6 +5689,7 @@ static inline uint32_t A6XX_PC_GS_OUT_CNTL_CLIP_MASK(uint32_t val)
{
return ((val) << A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK;
}
+#define A6XX_PC_GS_OUT_CNTL_SHADINGRATE 0x01000000
#define REG_A6XX_PC_HS_OUT_CNTL 0x00009b03
#define A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
@@ -5060,6 +5708,7 @@ static inline uint32_t A6XX_PC_HS_OUT_CNTL_CLIP_MASK(uint32_t val)
{
return ((val) << A6XX_PC_HS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_HS_OUT_CNTL_CLIP_MASK__MASK;
}
+#define A6XX_PC_HS_OUT_CNTL_SHADINGRATE 0x01000000
#define REG_A6XX_PC_DS_OUT_CNTL 0x00009b04
#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
@@ -5078,6 +5727,7 @@ static inline uint32_t A6XX_PC_DS_OUT_CNTL_CLIP_MASK(uint32_t val)
{
return ((val) << A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK;
}
+#define A6XX_PC_DS_OUT_CNTL_SHADINGRATE 0x01000000
#define REG_A6XX_PC_PRIMITIVE_CNTL_5 0x00009b05
#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK 0x000000ff
@@ -5099,12 +5749,7 @@ static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(enum a6xx_tess_output
{
return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK;
}
-#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18__MASK 0x00040000
-#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18__SHIFT 18
-static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_UNK18(uint32_t val)
-{
- return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_UNK18__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_UNK18__MASK;
-}
+#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18 0x00040000
#define REG_A6XX_PC_PRIMITIVE_CNTL_6 0x00009b06
#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK 0x000007ff
@@ -5151,12 +5796,8 @@ static inline uint32_t A6XX_PC_2D_EVENT_CMD_STATE_ID(uint32_t val)
#define REG_A6XX_PC_DRAW_MAX_INDICES 0x00009e07
#define REG_A6XX_PC_TESSFACTOR_ADDR 0x00009e08
-#define A6XX_PC_TESSFACTOR_ADDR__MASK 0xffffffff
-#define A6XX_PC_TESSFACTOR_ADDR__SHIFT 0
-static inline uint32_t A6XX_PC_TESSFACTOR_ADDR(uint32_t val)
-{
- return ((val) << A6XX_PC_TESSFACTOR_ADDR__SHIFT) & A6XX_PC_TESSFACTOR_ADDR__MASK;
-}
+
+#define REG_A7XX_PC_TESSFACTOR_ADDR 0x00009810
#define REG_A6XX_PC_DRAW_INITIATOR 0x00009e0b
#define A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
@@ -5217,27 +5858,17 @@ static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_N(uint32_t val)
}
#define REG_A6XX_PC_BIN_PRIM_STRM 0x00009e12
-#define A6XX_PC_BIN_PRIM_STRM__MASK 0xffffffff
-#define A6XX_PC_BIN_PRIM_STRM__SHIFT 0
-static inline uint32_t A6XX_PC_BIN_PRIM_STRM(uint32_t val)
-{
- return ((val) << A6XX_PC_BIN_PRIM_STRM__SHIFT) & A6XX_PC_BIN_PRIM_STRM__MASK;
-}
#define REG_A6XX_PC_BIN_DRAW_STRM 0x00009e14
-#define A6XX_PC_BIN_DRAW_STRM__MASK 0xffffffff
-#define A6XX_PC_BIN_DRAW_STRM__SHIFT 0
-static inline uint32_t A6XX_PC_BIN_DRAW_STRM(uint32_t val)
-{
- return ((val) << A6XX_PC_BIN_DRAW_STRM__SHIFT) & A6XX_PC_BIN_DRAW_STRM__MASK;
-}
#define REG_A6XX_PC_VISIBILITY_OVERRIDE 0x00009e1c
#define A6XX_PC_VISIBILITY_OVERRIDE_OVERRIDE 0x00000001
-static inline uint32_t REG_A6XX_PC_PERFCTR_PC_SEL(uint32_t i0) { return 0x00009e34 + 0x1*i0; }
+#define REG_A7XX_PC_UNKNOWN_9E24 0x00009e24
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL(i0) (0x00009e34 + 0x1*(i0))
-static inline uint32_t REG_A7XX_PC_PERFCTR_PC_SEL(uint32_t i0) { return 0x00009e42 + 0x1*i0; }
+#define REG_A7XX_PC_PERFCTR_PC_SEL(i0) (0x00009e42 + 0x1*(i0))
#define REG_A6XX_PC_UNKNOWN_9E72 0x00009e72
@@ -5344,7 +5975,7 @@ static inline uint32_t A6XX_VFD_CONTROL_5_UNK8(uint32_t val)
}
#define REG_A6XX_VFD_CONTROL_6 0x0000a006
-#define A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU 0x00000001
+#define A6XX_VFD_CONTROL_6_PRIMID4PSEN 0x00000001
#define REG_A6XX_VFD_MODE_CNTL 0x0000a007
#define A6XX_VFD_MODE_CNTL_RENDER_MODE__MASK 0x00000007
@@ -5372,21 +6003,15 @@ static inline uint32_t A6XX_VFD_MULTIVIEW_CNTL_VIEWS(uint32_t val)
#define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f
-static inline uint32_t REG_A6XX_VFD_FETCH(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+#define REG_A6XX_VFD_FETCH(i0) (0x0000a010 + 0x4*(i0))
static inline uint32_t REG_A6XX_VFD_FETCH_BASE(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
-#define A6XX_VFD_FETCH_BASE__MASK 0xffffffff
-#define A6XX_VFD_FETCH_BASE__SHIFT 0
-static inline uint32_t A6XX_VFD_FETCH_BASE(uint32_t val)
-{
- return ((val) << A6XX_VFD_FETCH_BASE__SHIFT) & A6XX_VFD_FETCH_BASE__MASK;
-}
static inline uint32_t REG_A6XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000a012 + 0x4*i0; }
static inline uint32_t REG_A6XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000a013 + 0x4*i0; }
-static inline uint32_t REG_A6XX_VFD_DECODE(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
+#define REG_A6XX_VFD_DECODE(i0) (0x0000a090 + 0x2*(i0))
static inline uint32_t REG_A6XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
#define A6XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
@@ -5419,7 +6044,7 @@ static inline uint32_t A6XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
static inline uint32_t REG_A6XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000a091 + 0x2*i0; }
-static inline uint32_t REG_A6XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
+#define REG_A6XX_VFD_DEST_CNTL(i0) (0x0000a0d0 + 0x1*(i0))
static inline uint32_t REG_A6XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
@@ -5437,15 +6062,15 @@ static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
#define REG_A6XX_VFD_POWER_CNTL 0x0000a0f8
+#define REG_A7XX_VFD_UNKNOWN_A600 0x0000a600
+
#define REG_A6XX_VFD_ADDR_MODE_CNTL 0x0000a601
-static inline uint32_t REG_A6XX_VFD_PERFCTR_VFD_SEL(uint32_t i0) { return 0x0000a610 + 0x1*i0; }
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL(i0) (0x0000a610 + 0x1*(i0))
-static inline uint32_t REG_A7XX_VFD_PERFCTR_VFD_SEL(uint32_t i0) { return 0x0000a610 + 0x1*i0; }
+#define REG_A7XX_VFD_PERFCTR_VFD_SEL(i0) (0x0000a610 + 0x1*(i0))
#define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800
-#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x00100000
-#define A6XX_SP_VS_CTRL_REG0_EARLYPREAMBLE 0x00200000
#define A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
#define A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
@@ -5471,6 +6096,8 @@ static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
{
return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
}
+#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x00100000
+#define A6XX_SP_VS_CTRL_REG0_EARLYPREAMBLE 0x00200000
#define REG_A6XX_SP_VS_BRANCH_COND 0x0000a801
@@ -5488,7 +6115,7 @@ static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
}
-static inline uint32_t REG_A6XX_SP_VS_OUT(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
+#define REG_A6XX_SP_VS_OUT(i0) (0x0000a803 + 0x1*(i0))
static inline uint32_t REG_A6XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
#define A6XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
@@ -5516,7 +6143,7 @@ static inline uint32_t A6XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
return ((val) << A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
}
-static inline uint32_t REG_A6XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
+#define REG_A6XX_SP_VS_VPC_DST(i0) (0x0000a813 + 0x1*(i0))
static inline uint32_t REG_A6XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
@@ -5547,19 +6174,14 @@ static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
#define REG_A6XX_SP_VS_OBJ_FIRST_EXEC_OFFSET 0x0000a81b
#define REG_A6XX_SP_VS_OBJ_START 0x0000a81c
-#define A6XX_SP_VS_OBJ_START__MASK 0xffffffff
-#define A6XX_SP_VS_OBJ_START__SHIFT 0
-static inline uint32_t A6XX_SP_VS_OBJ_START(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_OBJ_START__SHIFT) & A6XX_SP_VS_OBJ_START__MASK;
-}
#define REG_A6XX_SP_VS_PVT_MEM_PARAM 0x0000a81e
#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
{
- return ((val >> 9) << A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+ assert(!(val & 0x1ff));
+ return (((val >> 9)) << A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
}
#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
@@ -5569,19 +6191,14 @@ static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t va
}
#define REG_A6XX_SP_VS_PVT_MEM_ADDR 0x0000a81f
-#define A6XX_SP_VS_PVT_MEM_ADDR__MASK 0xffffffff
-#define A6XX_SP_VS_PVT_MEM_ADDR__SHIFT 0
-static inline uint32_t A6XX_SP_VS_PVT_MEM_ADDR(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_VS_PVT_MEM_ADDR__MASK;
-}
#define REG_A6XX_SP_VS_PVT_MEM_SIZE 0x0000a821
#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
static inline uint32_t A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
{
- return ((val >> 12) << A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
}
#define A6XX_SP_VS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
@@ -5619,11 +6236,13 @@ static inline uint32_t A6XX_SP_VS_CONFIG_NIBO(uint32_t val)
#define A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
static inline uint32_t A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
{
- return ((val >> 11) << A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+ assert(!(val & 0x7ff));
+ return (((val >> 11)) << A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
}
+#define REG_A7XX_SP_VS_VGPR_CONFIG 0x0000a82d
+
#define REG_A6XX_SP_HS_CTRL_REG0 0x0000a830
-#define A6XX_SP_HS_CTRL_REG0_EARLYPREAMBLE 0x00100000
#define A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK 0x00000001
#define A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT 0
static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
@@ -5649,6 +6268,7 @@ static inline uint32_t A6XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
{
return ((val) << A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
}
+#define A6XX_SP_HS_CTRL_REG0_EARLYPREAMBLE 0x00100000
#define REG_A6XX_SP_HS_WAVE_INPUT_SIZE 0x0000a831
@@ -5657,19 +6277,14 @@ static inline uint32_t A6XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A6XX_SP_HS_OBJ_FIRST_EXEC_OFFSET 0x0000a833
#define REG_A6XX_SP_HS_OBJ_START 0x0000a834
-#define A6XX_SP_HS_OBJ_START__MASK 0xffffffff
-#define A6XX_SP_HS_OBJ_START__SHIFT 0
-static inline uint32_t A6XX_SP_HS_OBJ_START(uint32_t val)
-{
- return ((val) << A6XX_SP_HS_OBJ_START__SHIFT) & A6XX_SP_HS_OBJ_START__MASK;
-}
#define REG_A6XX_SP_HS_PVT_MEM_PARAM 0x0000a836
#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
{
- return ((val >> 9) << A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+ assert(!(val & 0x1ff));
+ return (((val >> 9)) << A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
}
#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
@@ -5679,19 +6294,14 @@ static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t va
}
#define REG_A6XX_SP_HS_PVT_MEM_ADDR 0x0000a837
-#define A6XX_SP_HS_PVT_MEM_ADDR__MASK 0xffffffff
-#define A6XX_SP_HS_PVT_MEM_ADDR__SHIFT 0
-static inline uint32_t A6XX_SP_HS_PVT_MEM_ADDR(uint32_t val)
-{
- return ((val) << A6XX_SP_HS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_HS_PVT_MEM_ADDR__MASK;
-}
#define REG_A6XX_SP_HS_PVT_MEM_SIZE 0x0000a839
#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
static inline uint32_t A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
{
- return ((val >> 12) << A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
}
#define A6XX_SP_HS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
@@ -5729,11 +6339,13 @@ static inline uint32_t A6XX_SP_HS_CONFIG_NIBO(uint32_t val)
#define A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
static inline uint32_t A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
{
- return ((val >> 11) << A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+ assert(!(val & 0x7ff));
+ return (((val >> 11)) << A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
}
+#define REG_A7XX_SP_HS_VGPR_CONFIG 0x0000a82f
+
#define REG_A6XX_SP_DS_CTRL_REG0 0x0000a840
-#define A6XX_SP_DS_CTRL_REG0_EARLYPREAMBLE 0x00100000
#define A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK 0x00000001
#define A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT 0
static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
@@ -5759,6 +6371,7 @@ static inline uint32_t A6XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
{
return ((val) << A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
}
+#define A6XX_SP_DS_CTRL_REG0_EARLYPREAMBLE 0x00100000
#define REG_A6XX_SP_DS_BRANCH_COND 0x0000a841
@@ -5776,7 +6389,7 @@ static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
}
-static inline uint32_t REG_A6XX_SP_DS_OUT(uint32_t i0) { return 0x0000a843 + 0x1*i0; }
+#define REG_A6XX_SP_DS_OUT(i0) (0x0000a843 + 0x1*(i0))
static inline uint32_t REG_A6XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000a843 + 0x1*i0; }
#define A6XX_SP_DS_OUT_REG_A_REGID__MASK 0x000000ff
@@ -5804,7 +6417,7 @@ static inline uint32_t A6XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
return ((val) << A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
}
-static inline uint32_t REG_A6XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000a853 + 0x1*i0; }
+#define REG_A6XX_SP_DS_VPC_DST(i0) (0x0000a853 + 0x1*(i0))
static inline uint32_t REG_A6XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000a853 + 0x1*i0; }
#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
@@ -5835,19 +6448,14 @@ static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
#define REG_A6XX_SP_DS_OBJ_FIRST_EXEC_OFFSET 0x0000a85b
#define REG_A6XX_SP_DS_OBJ_START 0x0000a85c
-#define A6XX_SP_DS_OBJ_START__MASK 0xffffffff
-#define A6XX_SP_DS_OBJ_START__SHIFT 0
-static inline uint32_t A6XX_SP_DS_OBJ_START(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_OBJ_START__SHIFT) & A6XX_SP_DS_OBJ_START__MASK;
-}
#define REG_A6XX_SP_DS_PVT_MEM_PARAM 0x0000a85e
#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
{
- return ((val >> 9) << A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+ assert(!(val & 0x1ff));
+ return (((val >> 9)) << A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
}
#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
@@ -5857,19 +6465,14 @@ static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t va
}
#define REG_A6XX_SP_DS_PVT_MEM_ADDR 0x0000a85f
-#define A6XX_SP_DS_PVT_MEM_ADDR__MASK 0xffffffff
-#define A6XX_SP_DS_PVT_MEM_ADDR__SHIFT 0
-static inline uint32_t A6XX_SP_DS_PVT_MEM_ADDR(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_DS_PVT_MEM_ADDR__MASK;
-}
#define REG_A6XX_SP_DS_PVT_MEM_SIZE 0x0000a861
#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
static inline uint32_t A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
{
- return ((val >> 12) << A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
}
#define A6XX_SP_DS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
@@ -5907,11 +6510,13 @@ static inline uint32_t A6XX_SP_DS_CONFIG_NIBO(uint32_t val)
#define A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
static inline uint32_t A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
{
- return ((val >> 11) << A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+ assert(!(val & 0x7ff));
+ return (((val >> 11)) << A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
}
+#define REG_A7XX_SP_DS_VGPR_CONFIG 0x0000a868
+
#define REG_A6XX_SP_GS_CTRL_REG0 0x0000a870
-#define A6XX_SP_GS_CTRL_REG0_EARLYPREAMBLE 0x00100000
#define A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK 0x00000001
#define A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT 0
static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
@@ -5937,6 +6542,7 @@ static inline uint32_t A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
{
return ((val) << A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
}
+#define A6XX_SP_GS_CTRL_REG0_EARLYPREAMBLE 0x00100000
#define REG_A6XX_SP_GS_PRIM_SIZE 0x0000a871
@@ -5956,7 +6562,7 @@ static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
}
-static inline uint32_t REG_A6XX_SP_GS_OUT(uint32_t i0) { return 0x0000a874 + 0x1*i0; }
+#define REG_A6XX_SP_GS_OUT(i0) (0x0000a874 + 0x1*(i0))
static inline uint32_t REG_A6XX_SP_GS_OUT_REG(uint32_t i0) { return 0x0000a874 + 0x1*i0; }
#define A6XX_SP_GS_OUT_REG_A_REGID__MASK 0x000000ff
@@ -5984,7 +6590,7 @@ static inline uint32_t A6XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
return ((val) << A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
}
-static inline uint32_t REG_A6XX_SP_GS_VPC_DST(uint32_t i0) { return 0x0000a884 + 0x1*i0; }
+#define REG_A6XX_SP_GS_VPC_DST(i0) (0x0000a884 + 0x1*(i0))
static inline uint32_t REG_A6XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x0000a884 + 0x1*i0; }
#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
@@ -6015,19 +6621,14 @@ static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
#define REG_A6XX_SP_GS_OBJ_FIRST_EXEC_OFFSET 0x0000a88c
#define REG_A6XX_SP_GS_OBJ_START 0x0000a88d
-#define A6XX_SP_GS_OBJ_START__MASK 0xffffffff
-#define A6XX_SP_GS_OBJ_START__SHIFT 0
-static inline uint32_t A6XX_SP_GS_OBJ_START(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_OBJ_START__SHIFT) & A6XX_SP_GS_OBJ_START__MASK;
-}
#define REG_A6XX_SP_GS_PVT_MEM_PARAM 0x0000a88f
#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
{
- return ((val >> 9) << A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+ assert(!(val & 0x1ff));
+ return (((val >> 9)) << A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
}
#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
@@ -6037,19 +6638,14 @@ static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t va
}
#define REG_A6XX_SP_GS_PVT_MEM_ADDR 0x0000a890
-#define A6XX_SP_GS_PVT_MEM_ADDR__MASK 0xffffffff
-#define A6XX_SP_GS_PVT_MEM_ADDR__SHIFT 0
-static inline uint32_t A6XX_SP_GS_PVT_MEM_ADDR(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_GS_PVT_MEM_ADDR__MASK;
-}
#define REG_A6XX_SP_GS_PVT_MEM_SIZE 0x0000a892
#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
static inline uint32_t A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
{
- return ((val >> 12) << A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
}
#define A6XX_SP_GS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
@@ -6087,89 +6683,29 @@ static inline uint32_t A6XX_SP_GS_CONFIG_NIBO(uint32_t val)
#define A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
static inline uint32_t A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
{
- return ((val >> 11) << A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+ assert(!(val & 0x7ff));
+ return (((val >> 11)) << A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
}
+#define REG_A7XX_SP_GS_VGPR_CONFIG 0x0000a899
+
#define REG_A6XX_SP_VS_TEX_SAMP 0x0000a8a0
-#define A6XX_SP_VS_TEX_SAMP__MASK 0xffffffff
-#define A6XX_SP_VS_TEX_SAMP__SHIFT 0
-static inline uint32_t A6XX_SP_VS_TEX_SAMP(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_TEX_SAMP__SHIFT) & A6XX_SP_VS_TEX_SAMP__MASK;
-}
#define REG_A6XX_SP_HS_TEX_SAMP 0x0000a8a2
-#define A6XX_SP_HS_TEX_SAMP__MASK 0xffffffff
-#define A6XX_SP_HS_TEX_SAMP__SHIFT 0
-static inline uint32_t A6XX_SP_HS_TEX_SAMP(uint32_t val)
-{
- return ((val) << A6XX_SP_HS_TEX_SAMP__SHIFT) & A6XX_SP_HS_TEX_SAMP__MASK;
-}
#define REG_A6XX_SP_DS_TEX_SAMP 0x0000a8a4
-#define A6XX_SP_DS_TEX_SAMP__MASK 0xffffffff
-#define A6XX_SP_DS_TEX_SAMP__SHIFT 0
-static inline uint32_t A6XX_SP_DS_TEX_SAMP(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_TEX_SAMP__SHIFT) & A6XX_SP_DS_TEX_SAMP__MASK;
-}
#define REG_A6XX_SP_GS_TEX_SAMP 0x0000a8a6
-#define A6XX_SP_GS_TEX_SAMP__MASK 0xffffffff
-#define A6XX_SP_GS_TEX_SAMP__SHIFT 0
-static inline uint32_t A6XX_SP_GS_TEX_SAMP(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_TEX_SAMP__SHIFT) & A6XX_SP_GS_TEX_SAMP__MASK;
-}
#define REG_A6XX_SP_VS_TEX_CONST 0x0000a8a8
-#define A6XX_SP_VS_TEX_CONST__MASK 0xffffffff
-#define A6XX_SP_VS_TEX_CONST__SHIFT 0
-static inline uint32_t A6XX_SP_VS_TEX_CONST(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_TEX_CONST__SHIFT) & A6XX_SP_VS_TEX_CONST__MASK;
-}
#define REG_A6XX_SP_HS_TEX_CONST 0x0000a8aa
-#define A6XX_SP_HS_TEX_CONST__MASK 0xffffffff
-#define A6XX_SP_HS_TEX_CONST__SHIFT 0
-static inline uint32_t A6XX_SP_HS_TEX_CONST(uint32_t val)
-{
- return ((val) << A6XX_SP_HS_TEX_CONST__SHIFT) & A6XX_SP_HS_TEX_CONST__MASK;
-}
#define REG_A6XX_SP_DS_TEX_CONST 0x0000a8ac
-#define A6XX_SP_DS_TEX_CONST__MASK 0xffffffff
-#define A6XX_SP_DS_TEX_CONST__SHIFT 0
-static inline uint32_t A6XX_SP_DS_TEX_CONST(uint32_t val)
-{
- return ((val) << A6XX_SP_DS_TEX_CONST__SHIFT) & A6XX_SP_DS_TEX_CONST__MASK;
-}
#define REG_A6XX_SP_GS_TEX_CONST 0x0000a8ae
-#define A6XX_SP_GS_TEX_CONST__MASK 0xffffffff
-#define A6XX_SP_GS_TEX_CONST__SHIFT 0
-static inline uint32_t A6XX_SP_GS_TEX_CONST(uint32_t val)
-{
- return ((val) << A6XX_SP_GS_TEX_CONST__SHIFT) & A6XX_SP_GS_TEX_CONST__MASK;
-}
#define REG_A6XX_SP_FS_CTRL_REG0 0x0000a980
-#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val)
-{
- return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A6XX_SP_FS_CTRL_REG0_UNK21 0x00200000
-#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000
-#define A6XX_SP_FS_CTRL_REG0_DIFF_FINE 0x00800000
-#define A6XX_SP_FS_CTRL_REG0_UNK24 0x01000000
-#define A6XX_SP_FS_CTRL_REG0_UNK25 0x02000000
-#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
-#define A6XX_SP_FS_CTRL_REG0_UNK27 0x08000000
-#define A6XX_SP_FS_CTRL_REG0_EARLYPREAMBLE 0x10000000
-#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
#define A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
#define A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
@@ -6195,25 +6731,35 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
{
return ((val) << A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK;
}
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_UNK21 0x00200000
+#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_FS_CTRL_REG0_LODPIXMASK 0x00800000
+#define A6XX_SP_FS_CTRL_REG0_UNK24 0x01000000
+#define A6XX_SP_FS_CTRL_REG0_UNK25 0x02000000
+#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_FS_CTRL_REG0_UNK27 0x08000000
+#define A6XX_SP_FS_CTRL_REG0_EARLYPREAMBLE 0x10000000
+#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
#define REG_A6XX_SP_FS_BRANCH_COND 0x0000a981
#define REG_A6XX_SP_FS_OBJ_FIRST_EXEC_OFFSET 0x0000a982
#define REG_A6XX_SP_FS_OBJ_START 0x0000a983
-#define A6XX_SP_FS_OBJ_START__MASK 0xffffffff
-#define A6XX_SP_FS_OBJ_START__SHIFT 0
-static inline uint32_t A6XX_SP_FS_OBJ_START(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_OBJ_START__SHIFT) & A6XX_SP_FS_OBJ_START__MASK;
-}
#define REG_A6XX_SP_FS_PVT_MEM_PARAM 0x0000a985
#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
{
- return ((val >> 9) << A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+ assert(!(val & 0x1ff));
+ return (((val >> 9)) << A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
}
#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
@@ -6223,19 +6769,14 @@ static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t va
}
#define REG_A6XX_SP_FS_PVT_MEM_ADDR 0x0000a986
-#define A6XX_SP_FS_PVT_MEM_ADDR__MASK 0xffffffff
-#define A6XX_SP_FS_PVT_MEM_ADDR__SHIFT 0
-static inline uint32_t A6XX_SP_FS_PVT_MEM_ADDR(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_FS_PVT_MEM_ADDR__MASK;
-}
#define REG_A6XX_SP_FS_PVT_MEM_SIZE 0x0000a988
#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
static inline uint32_t A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
{
- return ((val >> 12) << A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
}
#define A6XX_SP_FS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
@@ -6339,7 +6880,7 @@ static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL1_MRT(uint32_t val)
return ((val) << A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK;
}
-static inline uint32_t REG_A6XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+#define REG_A6XX_SP_FS_OUTPUT(i0) (0x0000a98e + 0x1*(i0))
static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
#define A6XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
@@ -6350,7 +6891,7 @@ static inline uint32_t A6XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
}
#define A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
-static inline uint32_t REG_A6XX_SP_FS_MRT(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
+#define REG_A6XX_SP_FS_MRT(i0) (0x0000a996 + 0x1*(i0))
static inline uint32_t REG_A6XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
@@ -6371,16 +6912,22 @@ static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_COUNT(uint32_t val)
return ((val) << A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK;
}
#define A6XX_SP_FS_PREFETCH_CNTL_IJ_WRITE_DISABLE 0x00000008
-#define A6XX_SP_FS_PREFETCH_CNTL_UNK4 0x00000010
+#define A6XX_SP_FS_PREFETCH_CNTL_ENDOFQUAD 0x00000010
#define A6XX_SP_FS_PREFETCH_CNTL_WRITE_COLOR_TO_OUTPUT 0x00000020
-#define A6XX_SP_FS_PREFETCH_CNTL_UNK6__MASK 0x00007fc0
-#define A6XX_SP_FS_PREFETCH_CNTL_UNK6__SHIFT 6
-static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_UNK6(uint32_t val)
+#define A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID__MASK 0x00007fc0
+#define A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID__SHIFT 6
+static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD__MASK 0x01ff0000
+#define A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD__SHIFT 16
+static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD(uint32_t val)
{
- return ((val) << A6XX_SP_FS_PREFETCH_CNTL_UNK6__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_UNK6__MASK;
+ return ((val) << A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD__MASK;
}
-static inline uint32_t REG_A6XX_SP_FS_PREFETCH(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
+#define REG_A6XX_SP_FS_PREFETCH(i0) (0x0000a99f + 0x1*(i0))
static inline uint32_t REG_A6XX_SP_FS_PREFETCH_CMD(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
#define A6XX_SP_FS_PREFETCH_CMD_SRC__MASK 0x0000007f
@@ -6423,7 +6970,49 @@ static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_CMD(enum a6xx_tex_prefetch_cmd va
return ((val) << A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_CMD__MASK;
}
-static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; }
+#define REG_A7XX_SP_FS_PREFETCH(i0) (0x0000a99f + 0x1*(i0))
+
+static inline uint32_t REG_A7XX_SP_FS_PREFETCH_CMD(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
+#define A7XX_SP_FS_PREFETCH_CMD_SRC__MASK 0x0000007f
+#define A7XX_SP_FS_PREFETCH_CMD_SRC__SHIFT 0
+static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_SRC(uint32_t val)
+{
+ return ((val) << A7XX_SP_FS_PREFETCH_CMD_SRC__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_SRC__MASK;
+}
+#define A7XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK 0x00000380
+#define A7XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT 7
+static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_SAMP_ID(uint32_t val)
+{
+ return ((val) << A7XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK;
+}
+#define A7XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK 0x00001c00
+#define A7XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT 10
+static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_TEX_ID(uint32_t val)
+{
+ return ((val) << A7XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK;
+}
+#define A7XX_SP_FS_PREFETCH_CMD_DST__MASK 0x0007e000
+#define A7XX_SP_FS_PREFETCH_CMD_DST__SHIFT 13
+static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_DST(uint32_t val)
+{
+ return ((val) << A7XX_SP_FS_PREFETCH_CMD_DST__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_DST__MASK;
+}
+#define A7XX_SP_FS_PREFETCH_CMD_WRMASK__MASK 0x00780000
+#define A7XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT 19
+static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_WRMASK(uint32_t val)
+{
+ return ((val) << A7XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_WRMASK__MASK;
+}
+#define A7XX_SP_FS_PREFETCH_CMD_HALF 0x00800000
+#define A7XX_SP_FS_PREFETCH_CMD_BINDLESS 0x02000000
+#define A7XX_SP_FS_PREFETCH_CMD_CMD__MASK 0x3c000000
+#define A7XX_SP_FS_PREFETCH_CMD_CMD__SHIFT 26
+static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_CMD(enum a6xx_tex_prefetch_cmd val)
+{
+ return ((val) << A7XX_SP_FS_PREFETCH_CMD_CMD__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_CMD__MASK;
+}
+
+#define REG_A6XX_SP_FS_BINDLESS_PREFETCH(i0) (0x0000a9a3 + 0x1*(i0))
static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; }
#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK 0x0000ffff
@@ -6448,20 +7037,11 @@ static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(uint32_t val)
#define A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
static inline uint32_t A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
{
- return ((val >> 11) << A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+ assert(!(val & 0x7ff));
+ return (((val >> 11)) << A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
}
#define REG_A6XX_SP_CS_CTRL_REG0 0x0000a9b0
-#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val)
-{
- return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A6XX_SP_CS_CTRL_REG0_UNK21 0x00200000
-#define A6XX_SP_CS_CTRL_REG0_UNK22 0x00400000
-#define A6XX_SP_CS_CTRL_REG0_EARLYPREAMBLE 0x00800000
-#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000
#define A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK 0x00000001
#define A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT 0
static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
@@ -6487,6 +7067,16 @@ static inline uint32_t A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
{
return ((val) << A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
}
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_UNK21 0x00200000
+#define A6XX_SP_CS_CTRL_REG0_UNK22 0x00400000
+#define A6XX_SP_CS_CTRL_REG0_EARLYPREAMBLE 0x00800000
+#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000
#define REG_A6XX_SP_CS_UNKNOWN_A9B1 0x0000a9b1
#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__MASK 0x0000001f
@@ -6503,19 +7093,14 @@ static inline uint32_t A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE(uint32_t val)
#define REG_A6XX_SP_CS_OBJ_FIRST_EXEC_OFFSET 0x0000a9b3
#define REG_A6XX_SP_CS_OBJ_START 0x0000a9b4
-#define A6XX_SP_CS_OBJ_START__MASK 0xffffffff
-#define A6XX_SP_CS_OBJ_START__SHIFT 0
-static inline uint32_t A6XX_SP_CS_OBJ_START(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_OBJ_START__SHIFT) & A6XX_SP_CS_OBJ_START__MASK;
-}
#define REG_A6XX_SP_CS_PVT_MEM_PARAM 0x0000a9b6
#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
{
- return ((val >> 9) << A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+ assert(!(val & 0x1ff));
+ return (((val >> 9)) << A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
}
#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
@@ -6525,19 +7110,14 @@ static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t va
}
#define REG_A6XX_SP_CS_PVT_MEM_ADDR 0x0000a9b7
-#define A6XX_SP_CS_PVT_MEM_ADDR__MASK 0xffffffff
-#define A6XX_SP_CS_PVT_MEM_ADDR__SHIFT 0
-static inline uint32_t A6XX_SP_CS_PVT_MEM_ADDR(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_CS_PVT_MEM_ADDR__MASK;
-}
#define REG_A6XX_SP_CS_PVT_MEM_SIZE 0x0000a9b9
#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
static inline uint32_t A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
{
- return ((val >> 12) << A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
}
#define A6XX_SP_CS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
@@ -6575,9 +7155,14 @@ static inline uint32_t A6XX_SP_CS_CONFIG_NIBO(uint32_t val)
#define A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
static inline uint32_t A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
{
- return ((val >> 11) << A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+ assert(!(val & 0x7ff));
+ return (((val >> 11)) << A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
}
+#define REG_A7XX_SP_CS_UNKNOWN_A9BE 0x0000a9be
+
+#define REG_A7XX_SP_CS_VGPR_CONFIG 0x0000a9c5
+
#define REG_A6XX_SP_CS_CNTL_0 0x0000a9c2
#define A6XX_SP_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
#define A6XX_SP_CS_CNTL_0_WGIDCONSTID__SHIFT 0
@@ -6620,39 +7205,31 @@ static inline uint32_t A6XX_SP_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val)
}
#define A6XX_SP_CS_CNTL_1_THREADSIZE_SCALAR 0x00000400
-#define REG_A6XX_SP_FS_TEX_SAMP 0x0000a9e0
-#define A6XX_SP_FS_TEX_SAMP__MASK 0xffffffff
-#define A6XX_SP_FS_TEX_SAMP__SHIFT 0
-static inline uint32_t A6XX_SP_FS_TEX_SAMP(uint32_t val)
+#define REG_A7XX_SP_CS_CNTL_1 0x0000a9c3
+#define A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff
+#define A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0
+static inline uint32_t A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val)
{
- return ((val) << A6XX_SP_FS_TEX_SAMP__SHIFT) & A6XX_SP_FS_TEX_SAMP__MASK;
+ return ((val) << A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK;
}
-
-#define REG_A6XX_SP_CS_TEX_SAMP 0x0000a9e2
-#define A6XX_SP_CS_TEX_SAMP__MASK 0xffffffff
-#define A6XX_SP_CS_TEX_SAMP__SHIFT 0
-static inline uint32_t A6XX_SP_CS_TEX_SAMP(uint32_t val)
+#define A7XX_SP_CS_CNTL_1_THREADSIZE__MASK 0x00000100
+#define A7XX_SP_CS_CNTL_1_THREADSIZE__SHIFT 8
+static inline uint32_t A7XX_SP_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val)
{
- return ((val) << A6XX_SP_CS_TEX_SAMP__SHIFT) & A6XX_SP_CS_TEX_SAMP__MASK;
+ return ((val) << A7XX_SP_CS_CNTL_1_THREADSIZE__SHIFT) & A7XX_SP_CS_CNTL_1_THREADSIZE__MASK;
}
+#define A7XX_SP_CS_CNTL_1_THREADSIZE_SCALAR 0x00000200
+#define A7XX_SP_CS_CNTL_1_UNK15 0x00008000
+
+#define REG_A6XX_SP_FS_TEX_SAMP 0x0000a9e0
+
+#define REG_A6XX_SP_CS_TEX_SAMP 0x0000a9e2
#define REG_A6XX_SP_FS_TEX_CONST 0x0000a9e4
-#define A6XX_SP_FS_TEX_CONST__MASK 0xffffffff
-#define A6XX_SP_FS_TEX_CONST__SHIFT 0
-static inline uint32_t A6XX_SP_FS_TEX_CONST(uint32_t val)
-{
- return ((val) << A6XX_SP_FS_TEX_CONST__SHIFT) & A6XX_SP_FS_TEX_CONST__MASK;
-}
#define REG_A6XX_SP_CS_TEX_CONST 0x0000a9e6
-#define A6XX_SP_CS_TEX_CONST__MASK 0xffffffff
-#define A6XX_SP_CS_TEX_CONST__SHIFT 0
-static inline uint32_t A6XX_SP_CS_TEX_CONST(uint32_t val)
-{
- return ((val) << A6XX_SP_CS_TEX_CONST__SHIFT) & A6XX_SP_CS_TEX_CONST__MASK;
-}
-static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
+#define REG_A6XX_SP_CS_BINDLESS_BASE(i0) (0x0000a9e8 + 0x2*(i0))
static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
@@ -6661,23 +7238,92 @@ static inline uint32_t A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_b
{
return ((val) << A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
}
-#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffc
+#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc
#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
-static inline uint32_t A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint32_t val)
+static inline uint32_t A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val)
{
- return ((val >> 2) << A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
}
-#define REG_A6XX_SP_CS_IBO 0x0000a9f2
-#define A6XX_SP_CS_IBO__MASK 0xffffffff
-#define A6XX_SP_CS_IBO__SHIFT 0
-static inline uint32_t A6XX_SP_CS_IBO(uint32_t val)
+#define REG_A7XX_SP_CS_BINDLESS_BASE(i0) (0x0000a9e8 + 0x2*(i0))
+
+static inline uint32_t REG_A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
+#define A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
+#define A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0
+static inline uint32_t A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val)
+{
+ return ((val) << A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
+}
+#define A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc
+#define A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
+static inline uint32_t A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val)
{
- return ((val) << A6XX_SP_CS_IBO__SHIFT) & A6XX_SP_CS_IBO__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
}
+#define REG_A6XX_SP_CS_IBO 0x0000a9f2
+
#define REG_A6XX_SP_CS_IBO_COUNT 0x0000aa00
+#define REG_A7XX_SP_FS_VGPR_CONFIG 0x0000aa01
+
+#define REG_A7XX_SP_PS_ALIASED_COMPONENTS_CONTROL 0x0000aa02
+#define A7XX_SP_PS_ALIASED_COMPONENTS_CONTROL_ENABLED 0x00000001
+
+#define REG_A7XX_SP_PS_ALIASED_COMPONENTS 0x0000aa03
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT0__MASK 0x0000000f
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT0__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT0__MASK;
+}
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT1__MASK 0x000000f0
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT1__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT1__MASK;
+}
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT2__MASK 0x00000f00
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT2__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT2__MASK;
+}
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT3__MASK 0x0000f000
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT3__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT3__MASK;
+}
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT4__MASK 0x000f0000
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT4__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT4__MASK;
+}
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT5__MASK 0x00f00000
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT5__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT5__MASK;
+}
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT6__MASK 0x0f000000
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT6__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT6__MASK;
+}
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT7__MASK 0xf0000000
+#define A7XX_SP_PS_ALIASED_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT7__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A6XX_SP_UNKNOWN_AAF2 0x0000aaf2
+
#define REG_A6XX_SP_MODE_CONTROL 0x0000ab00
#define A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE 0x00000001
#define A6XX_SP_MODE_CONTROL_ISAMMODE__MASK 0x00000006
@@ -6688,6 +7334,10 @@ static inline uint32_t A6XX_SP_MODE_CONTROL_ISAMMODE(enum a6xx_isam_mode val)
}
#define A6XX_SP_MODE_CONTROL_SHARED_CONSTS_ENABLE 0x00000008
+#define REG_A7XX_SP_UNKNOWN_AB01 0x0000ab01
+
+#define REG_A7XX_SP_UNKNOWN_AB02 0x0000ab02
+
#define REG_A6XX_SP_FS_CONFIG 0x0000ab04
#define A6XX_SP_FS_CONFIG_BINDLESS_TEX 0x00000001
#define A6XX_SP_FS_CONFIG_BINDLESS_SAMP 0x00000002
@@ -6715,7 +7365,7 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NIBO(uint32_t val)
#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05
-static inline uint32_t REG_A6XX_SP_BINDLESS_BASE(uint32_t i0) { return 0x0000ab10 + 0x2*i0; }
+#define REG_A6XX_SP_BINDLESS_BASE(i0) (0x0000ab10 + 0x2*(i0))
static inline uint32_t REG_A6XX_SP_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000ab10 + 0x2*i0; }
#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
@@ -6724,23 +7374,37 @@ static inline uint32_t A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bind
{
return ((val) << A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
}
-#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffc
+#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc
#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
-static inline uint32_t A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR(uint32_t val)
+static inline uint32_t A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val)
{
- return ((val >> 2) << A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
}
-#define REG_A6XX_SP_IBO 0x0000ab1a
-#define A6XX_SP_IBO__MASK 0xffffffff
-#define A6XX_SP_IBO__SHIFT 0
-static inline uint32_t A6XX_SP_IBO(uint32_t val)
+#define REG_A7XX_SP_BINDLESS_BASE(i0) (0x0000ab0a + 0x2*(i0))
+
+static inline uint32_t REG_A7XX_SP_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000ab0a + 0x2*i0; }
+#define A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
+#define A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0
+static inline uint32_t A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val)
{
- return ((val) << A6XX_SP_IBO__SHIFT) & A6XX_SP_IBO__MASK;
+ return ((val) << A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
+}
+#define A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc
+#define A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
+static inline uint32_t A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val)
+{
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
}
+#define REG_A6XX_SP_IBO 0x0000ab1a
+
#define REG_A6XX_SP_IBO_COUNT 0x0000ab20
+#define REG_A7XX_SP_UNKNOWN_AB22 0x0000ab22
+
#define REG_A6XX_SP_2D_DST_FORMAT 0x0000acc0
#define A6XX_SP_2D_DST_FORMAT_NORM 0x00000001
#define A6XX_SP_2D_DST_FORMAT_SINT 0x00000002
@@ -6759,6 +7423,24 @@ static inline uint32_t A6XX_SP_2D_DST_FORMAT_MASK(uint32_t val)
return ((val) << A6XX_SP_2D_DST_FORMAT_MASK__SHIFT) & A6XX_SP_2D_DST_FORMAT_MASK__MASK;
}
+#define REG_A7XX_SP_2D_DST_FORMAT 0x0000a9bf
+#define A7XX_SP_2D_DST_FORMAT_NORM 0x00000001
+#define A7XX_SP_2D_DST_FORMAT_SINT 0x00000002
+#define A7XX_SP_2D_DST_FORMAT_UINT 0x00000004
+#define A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK 0x000007f8
+#define A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT 3
+static inline uint32_t A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT) & A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK;
+}
+#define A7XX_SP_2D_DST_FORMAT_SRGB 0x00000800
+#define A7XX_SP_2D_DST_FORMAT_MASK__MASK 0x0000f000
+#define A7XX_SP_2D_DST_FORMAT_MASK__SHIFT 12
+static inline uint32_t A7XX_SP_2D_DST_FORMAT_MASK(uint32_t val)
+{
+ return ((val) << A7XX_SP_2D_DST_FORMAT_MASK__SHIFT) & A7XX_SP_2D_DST_FORMAT_MASK__MASK;
+}
+
#define REG_A6XX_SP_DBG_ECO_CNTL 0x0000ae00
#define REG_A6XX_SP_ADDR_MODE_CNTL 0x0000ae01
@@ -6770,6 +7452,14 @@ static inline uint32_t A6XX_SP_2D_DST_FORMAT_MASK(uint32_t val)
#define REG_A6XX_SP_FLOAT_CNTL 0x0000ae04
#define A6XX_SP_FLOAT_CNTL_F16_NO_INF 0x00000008
+#define REG_A7XX_SP_UNKNOWN_AE06 0x0000ae06
+
+#define REG_A7XX_SP_UNKNOWN_AE08 0x0000ae08
+
+#define REG_A7XX_SP_UNKNOWN_AE09 0x0000ae09
+
+#define REG_A7XX_SP_UNKNOWN_AE0A 0x0000ae0a
+
#define REG_A6XX_SP_PERFCTR_ENABLE 0x0000ae0f
#define A6XX_SP_PERFCTR_ENABLE_VS 0x00000001
#define A6XX_SP_PERFCTR_ENABLE_HS 0x00000002
@@ -6778,23 +7468,57 @@ static inline uint32_t A6XX_SP_2D_DST_FORMAT_MASK(uint32_t val)
#define A6XX_SP_PERFCTR_ENABLE_FS 0x00000010
#define A6XX_SP_PERFCTR_ENABLE_CS 0x00000020
-static inline uint32_t REG_A6XX_SP_PERFCTR_SP_SEL(uint32_t i0) { return 0x0000ae10 + 0x1*i0; }
+#define REG_A6XX_SP_PERFCTR_SP_SEL(i0) (0x0000ae10 + 0x1*(i0))
+
+#define REG_A7XX_SP_PERFCTR_HLSQ_SEL(i0) (0x0000ae60 + 0x1*(i0))
-static inline uint32_t REG_A7XX_SP_PERFCTR_HLSQ_SEL(uint32_t i0) { return 0x0000ae60 + 0x1*i0; }
+#define REG_A7XX_SP_UNKNOWN_AE6A 0x0000ae6a
+
+#define REG_A7XX_SP_UNKNOWN_AE6B 0x0000ae6b
+
+#define REG_A7XX_SP_UNKNOWN_AE6C 0x0000ae6c
#define REG_A7XX_SP_READ_SEL 0x0000ae6d
+#define A7XX_SP_READ_SEL_LOCATION__MASK 0x000c0000
+#define A7XX_SP_READ_SEL_LOCATION__SHIFT 18
+static inline uint32_t A7XX_SP_READ_SEL_LOCATION(enum a7xx_state_location val)
+{
+ return ((val) << A7XX_SP_READ_SEL_LOCATION__SHIFT) & A7XX_SP_READ_SEL_LOCATION__MASK;
+}
+#define A7XX_SP_READ_SEL_PIPE__MASK 0x00030000
+#define A7XX_SP_READ_SEL_PIPE__SHIFT 16
+static inline uint32_t A7XX_SP_READ_SEL_PIPE(enum a7xx_pipe val)
+{
+ return ((val) << A7XX_SP_READ_SEL_PIPE__SHIFT) & A7XX_SP_READ_SEL_PIPE__MASK;
+}
+#define A7XX_SP_READ_SEL_STATETYPE__MASK 0x0000ff00
+#define A7XX_SP_READ_SEL_STATETYPE__SHIFT 8
+static inline uint32_t A7XX_SP_READ_SEL_STATETYPE(enum a7xx_statetype_id val)
+{
+ return ((val) << A7XX_SP_READ_SEL_STATETYPE__SHIFT) & A7XX_SP_READ_SEL_STATETYPE__MASK;
+}
+#define A7XX_SP_READ_SEL_USPTP__MASK 0x000000f0
+#define A7XX_SP_READ_SEL_USPTP__SHIFT 4
+static inline uint32_t A7XX_SP_READ_SEL_USPTP(uint32_t val)
+{
+ return ((val) << A7XX_SP_READ_SEL_USPTP__SHIFT) & A7XX_SP_READ_SEL_USPTP__MASK;
+}
+#define A7XX_SP_READ_SEL_SPTP__MASK 0x0000000f
+#define A7XX_SP_READ_SEL_SPTP__SHIFT 0
+static inline uint32_t A7XX_SP_READ_SEL_SPTP(uint32_t val)
+{
+ return ((val) << A7XX_SP_READ_SEL_SPTP__SHIFT) & A7XX_SP_READ_SEL_SPTP__MASK;
+}
+
+#define REG_A7XX_SP_DBG_CNTL 0x0000ae71
-static inline uint32_t REG_A7XX_SP_PERFCTR_SP_SEL(uint32_t i0) { return 0x0000ae80 + 0x1*i0; }
+#define REG_A7XX_SP_UNKNOWN_AE73 0x0000ae73
+
+#define REG_A7XX_SP_PERFCTR_SP_SEL(i0) (0x0000ae80 + 0x1*(i0))
#define REG_A6XX_SP_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22
#define REG_A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR 0x0000b180
-#define A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__MASK 0xffffffff
-#define A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__SHIFT 0
-static inline uint32_t A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(uint32_t val)
-{
- return ((val) << A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__SHIFT) & A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__MASK;
-}
#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182
@@ -6828,12 +7552,6 @@ static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples
#define A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR 0x0000b302
-#define A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__MASK 0xffffffff
-#define A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__SHIFT 0
-static inline uint32_t A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(uint32_t val)
-{
- return ((val) << A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__SHIFT) & A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__MASK;
-}
#define REG_A6XX_SP_TP_SAMPLE_CONFIG 0x0000b304
#define A6XX_SP_TP_SAMPLE_CONFIG_UNK0 0x00000001
@@ -6844,49 +7562,49 @@ static inline uint32_t A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(uint32_t val)
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
}
#define REG_A6XX_SP_TP_SAMPLE_LOCATION_1 0x0000b306
@@ -6894,49 +7612,49 @@ static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
}
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
{
- return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+ return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
}
#define REG_A6XX_SP_TP_WINDOW_OFFSET 0x0000b307
@@ -6967,6 +7685,8 @@ static inline uint32_t A6XX_SP_TP_MODE_CNTL_UNK3(uint32_t val)
return ((val) << A6XX_SP_TP_MODE_CNTL_UNK3__SHIFT) & A6XX_SP_TP_MODE_CNTL_UNK3__MASK;
}
+#define REG_A7XX_SP_UNKNOWN_B310 0x0000b310
+
#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0
#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
@@ -7024,12 +7744,6 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
}
#define REG_A6XX_SP_PS_2D_SRC 0x0000b4c2
-#define A6XX_SP_PS_2D_SRC__MASK 0xffffffff
-#define A6XX_SP_PS_2D_SRC__SHIFT 0
-static inline uint32_t A6XX_SP_PS_2D_SRC(uint32_t val)
-{
- return ((val) << A6XX_SP_PS_2D_SRC__SHIFT) & A6XX_SP_PS_2D_SRC__MASK;
-}
#define REG_A6XX_SP_PS_2D_SRC_PITCH 0x0000b4c4
#define A6XX_SP_PS_2D_SRC_PITCH_UNK0__MASK 0x000001ff
@@ -7042,47 +7756,129 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_UNK0(uint32_t val)
#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9
static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
}
-#define REG_A6XX_SP_PS_2D_SRC_PLANE1 0x0000b4c5
-#define A6XX_SP_PS_2D_SRC_PLANE1__MASK 0xffffffff
-#define A6XX_SP_PS_2D_SRC_PLANE1__SHIFT 0
-static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE1(uint32_t val)
+#define REG_A7XX_SP_PS_2D_SRC_INFO 0x0000b2c0
+#define A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A7XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A7XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A7XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK;
+}
+#define A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
- return ((val) << A6XX_SP_PS_2D_SRC_PLANE1__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE1__MASK;
+ return ((val) << A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
}
+#define A7XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
+#define A7XX_SP_PS_2D_SRC_INFO_SRGB 0x00002000
+#define A7XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK 0x0000c000
+#define A7XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT 14
+static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A7XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK;
+}
+#define A7XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000
+#define A7XX_SP_PS_2D_SRC_INFO_UNK17 0x00020000
+#define A7XX_SP_PS_2D_SRC_INFO_SAMPLES_AVERAGE 0x00040000
+#define A7XX_SP_PS_2D_SRC_INFO_UNK19 0x00080000
+#define A7XX_SP_PS_2D_SRC_INFO_UNK20 0x00100000
+#define A7XX_SP_PS_2D_SRC_INFO_UNK21 0x00200000
+#define A7XX_SP_PS_2D_SRC_INFO_UNK22 0x00400000
+#define A7XX_SP_PS_2D_SRC_INFO_UNK23__MASK 0x07800000
+#define A7XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT 23
+static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_UNK23(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_UNK23__MASK;
+}
+#define A7XX_SP_PS_2D_SRC_INFO_UNK28 0x10000000
+
+#define REG_A7XX_SP_PS_2D_SRC_SIZE 0x0000b2c1
+#define A7XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff
+#define A7XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A7XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A7XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK;
+}
+#define A7XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK 0x3fff8000
+#define A7XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT 15
+static inline uint32_t A7XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A7XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A7XX_SP_PS_2D_SRC 0x0000b2c2
+
+#define REG_A7XX_SP_PS_2D_SRC_PITCH 0x0000b2c4
+#define A7XX_SP_PS_2D_SRC_PITCH_UNK0__MASK 0x000001ff
+#define A7XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT 0
+static inline uint32_t A7XX_SP_PS_2D_SRC_PITCH_UNK0(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT) & A7XX_SP_PS_2D_SRC_PITCH_UNK0__MASK;
+}
+#define A7XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x00fffe00
+#define A7XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9
+static inline uint32_t A7XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
+{
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A7XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A7XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_PLANE1 0x0000b4c5
#define REG_A6XX_SP_PS_2D_SRC_PLANE_PITCH 0x0000b4c7
#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK 0x00000fff
#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT 0
static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK;
}
#define REG_A6XX_SP_PS_2D_SRC_PLANE2 0x0000b4c8
-#define A6XX_SP_PS_2D_SRC_PLANE2__MASK 0xffffffff
-#define A6XX_SP_PS_2D_SRC_PLANE2__SHIFT 0
-static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE2(uint32_t val)
+
+#define REG_A7XX_SP_PS_2D_SRC_PLANE1 0x0000b2c5
+
+#define REG_A7XX_SP_PS_2D_SRC_PLANE_PITCH 0x0000b2c7
+#define A7XX_SP_PS_2D_SRC_PLANE_PITCH__MASK 0x00000fff
+#define A7XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT 0
+static inline uint32_t A7XX_SP_PS_2D_SRC_PLANE_PITCH(uint32_t val)
{
- return ((val) << A6XX_SP_PS_2D_SRC_PLANE2__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE2__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A7XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT) & A7XX_SP_PS_2D_SRC_PLANE_PITCH__MASK;
}
+#define REG_A7XX_SP_PS_2D_SRC_PLANE2 0x0000b2c8
+
#define REG_A6XX_SP_PS_2D_SRC_FLAGS 0x0000b4ca
-#define A6XX_SP_PS_2D_SRC_FLAGS__MASK 0xffffffff
-#define A6XX_SP_PS_2D_SRC_FLAGS__SHIFT 0
-static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS(uint32_t val)
-{
- return ((val) << A6XX_SP_PS_2D_SRC_FLAGS__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS__MASK;
-}
#define REG_A6XX_SP_PS_2D_SRC_FLAGS_PITCH 0x0000b4cc
#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK 0x000000ff
#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT 0
static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK;
+}
+
+#define REG_A7XX_SP_PS_2D_SRC_FLAGS 0x0000b2ca
+
+#define REG_A7XX_SP_PS_2D_SRC_FLAGS_PITCH 0x0000b2cc
+#define A7XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK 0x000000ff
+#define A7XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT 0
+static inline uint32_t A7XX_SP_PS_2D_SRC_FLAGS_PITCH(uint32_t val)
+{
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A7XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT) & A7XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK;
}
#define REG_A6XX_SP_PS_UNKNOWN_B4CD 0x0000b4cd
@@ -7107,6 +7903,44 @@ static inline uint32_t A6XX_SP_WINDOW_OFFSET_Y(uint32_t val)
return ((val) << A6XX_SP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_WINDOW_OFFSET_Y__MASK;
}
+#define REG_A7XX_SP_PS_UNKNOWN_B4CD 0x0000b2cd
+
+#define REG_A7XX_SP_PS_UNKNOWN_B4CE 0x0000b2ce
+
+#define REG_A7XX_SP_PS_UNKNOWN_B4CF 0x0000b2cf
+
+#define REG_A7XX_SP_PS_UNKNOWN_B4D0 0x0000b2d0
+
+#define REG_A7XX_SP_PS_2D_WINDOW_OFFSET 0x0000b2d1
+#define A7XX_SP_PS_2D_WINDOW_OFFSET_X__MASK 0x00003fff
+#define A7XX_SP_PS_2D_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A7XX_SP_PS_2D_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_2D_WINDOW_OFFSET_X__SHIFT) & A7XX_SP_PS_2D_WINDOW_OFFSET_X__MASK;
+}
+#define A7XX_SP_PS_2D_WINDOW_OFFSET_Y__MASK 0x3fff0000
+#define A7XX_SP_PS_2D_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A7XX_SP_PS_2D_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A7XX_SP_PS_2D_WINDOW_OFFSET_Y__SHIFT) & A7XX_SP_PS_2D_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A7XX_SP_PS_UNKNOWN_B2D2 0x0000b2d2
+
+#define REG_A7XX_SP_WINDOW_OFFSET 0x0000ab21
+#define A7XX_SP_WINDOW_OFFSET_X__MASK 0x00003fff
+#define A7XX_SP_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A7XX_SP_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A7XX_SP_WINDOW_OFFSET_X__SHIFT) & A7XX_SP_WINDOW_OFFSET_X__MASK;
+}
+#define A7XX_SP_WINDOW_OFFSET_Y__MASK 0x3fff0000
+#define A7XX_SP_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A7XX_SP_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A7XX_SP_WINDOW_OFFSET_Y__SHIFT) & A7XX_SP_WINDOW_OFFSET_Y__MASK;
+}
+
#define REG_A6XX_TPL1_DBG_ECO_CNTL 0x0000b600
#define REG_A6XX_TPL1_ADDR_MODE_CNTL 0x0000b601
@@ -7147,53 +7981,126 @@ static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_UNK6(uint32_t val)
#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c
-static inline uint32_t REG_A6XX_TPL1_PERFCTR_TP_SEL(uint32_t i0) { return 0x0000b610 + 0x1*i0; }
+#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608
+
+#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609
+
+#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a
+
+#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b
+
+#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL(i0) (0x0000b610 + 0x1*(i0))
#define REG_A6XX_HLSQ_VS_CNTL 0x0000b800
#define A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff
#define A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT 0
static inline uint32_t A6XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val)
{
- return ((val >> 2) << A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK;
}
#define A6XX_HLSQ_VS_CNTL_ENABLED 0x00000100
+#define A6XX_HLSQ_VS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
#define REG_A6XX_HLSQ_HS_CNTL 0x0000b801
#define A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff
#define A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT 0
static inline uint32_t A6XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val)
{
- return ((val >> 2) << A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK;
}
#define A6XX_HLSQ_HS_CNTL_ENABLED 0x00000100
+#define A6XX_HLSQ_HS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
#define REG_A6XX_HLSQ_DS_CNTL 0x0000b802
#define A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff
#define A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT 0
static inline uint32_t A6XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val)
{
- return ((val >> 2) << A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK;
}
#define A6XX_HLSQ_DS_CNTL_ENABLED 0x00000100
+#define A6XX_HLSQ_DS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
#define REG_A6XX_HLSQ_GS_CNTL 0x0000b803
#define A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff
#define A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT 0
static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
{
- return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
}
#define A6XX_HLSQ_GS_CNTL_ENABLED 0x00000100
+#define A6XX_HLSQ_GS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_CMD 0x0000b820
+#define REG_A7XX_HLSQ_VS_CNTL 0x0000a827
+#define A7XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A7XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A7XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val)
+{
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A7XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_VS_CNTL_CONSTLEN__MASK;
+}
+#define A7XX_HLSQ_VS_CNTL_ENABLED 0x00000100
+#define A7XX_HLSQ_VS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
-#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR 0x0000b821
-#define A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__MASK 0xffffffff
-#define A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__SHIFT 0
-static inline uint32_t A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR(uint32_t val)
+#define REG_A7XX_HLSQ_HS_CNTL 0x0000a83f
+#define A7XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A7XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A7XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val)
{
- return ((val) << A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__SHIFT) & A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A7XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_HS_CNTL_CONSTLEN__MASK;
}
+#define A7XX_HLSQ_HS_CNTL_ENABLED 0x00000100
+#define A7XX_HLSQ_HS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
+
+#define REG_A7XX_HLSQ_DS_CNTL 0x0000a867
+#define A7XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A7XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A7XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val)
+{
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A7XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_DS_CNTL_CONSTLEN__MASK;
+}
+#define A7XX_HLSQ_DS_CNTL_ENABLED 0x00000100
+#define A7XX_HLSQ_DS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
+
+#define REG_A7XX_HLSQ_GS_CNTL 0x0000a898
+#define A7XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A7XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A7XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
+{
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A7XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
+}
+#define A7XX_HLSQ_GS_CNTL_ENABLED 0x00000100
+#define A7XX_HLSQ_GS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
+
+#define REG_A7XX_HLSQ_FS_UNKNOWN_A9AA 0x0000a9aa
+#define A7XX_HLSQ_FS_UNKNOWN_A9AA_CONSTS_LOAD_DISABLE 0x00000001
+
+#define REG_A7XX_HLSQ_UNKNOWN_A9AC 0x0000a9ac
+
+#define REG_A7XX_HLSQ_UNKNOWN_A9AD 0x0000a9ad
+
+#define REG_A7XX_HLSQ_UNKNOWN_A9AE 0x0000a9ae
+#define A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT__MASK 0x000000ff
+#define A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT__SHIFT 0
+static inline uint32_t A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT__SHIFT) & A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT__MASK;
+}
+#define A7XX_HLSQ_UNKNOWN_A9AE_UNK8 0x00000100
+#define A7XX_HLSQ_UNKNOWN_A9AE_UNK9 0x00000200
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_CMD 0x0000b820
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR 0x0000b821
#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_DATA 0x0000b823
@@ -7215,8 +8122,12 @@ static inline uint32_t A6XX_HLSQ_FS_CNTL_0_UNK2(uint32_t val)
#define REG_A6XX_HLSQ_UNKNOWN_B981 0x0000b981
#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982
-
-#define REG_A7XX_HLSQ_CONTROL_1_REG 0x0000a9c7
+#define A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x00000007
+#define A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK;
+}
#define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983
#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
@@ -7244,32 +8155,6 @@ static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val)
return ((val) << A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK;
}
-#define REG_A7XX_HLSQ_CONTROL_2_REG 0x0000a9c8
-#define A7XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
-#define A7XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
-}
-#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
-#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
-static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
-}
-#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
-#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
-static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
-}
-#define A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000
-#define A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24
-static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK;
-}
-
#define REG_A6XX_HLSQ_CONTROL_3_REG 0x0000b984
#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
@@ -7296,32 +8181,6 @@ static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
}
-#define REG_A7XX_HLSQ_CONTROL_3_REG 0x0000a9c9
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
-static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
-}
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
-static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
-}
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
-static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
-}
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
-#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
-static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
-{
- return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
-}
-
#define REG_A6XX_HLSQ_CONTROL_4_REG 0x0000b985
#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
@@ -7348,6 +8207,106 @@ static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
return ((val) << A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
}
+#define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986
+#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_CNTL 0x0000b987
+#define A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_CONSTLEN(uint32_t val)
+{
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_ENABLED 0x00000100
+#define A6XX_HLSQ_CS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
+
+#define REG_A7XX_HLSQ_FS_CNTL_0 0x0000a9c6
+#define A7XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK 0x00000001
+#define A7XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT 0
+static inline uint32_t A7XX_HLSQ_FS_CNTL_0_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A7XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT) & A7XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK;
+}
+#define A7XX_HLSQ_FS_CNTL_0_VARYINGS 0x00000002
+#define A7XX_HLSQ_FS_CNTL_0_UNK2__MASK 0x00000ffc
+#define A7XX_HLSQ_FS_CNTL_0_UNK2__SHIFT 2
+static inline uint32_t A7XX_HLSQ_FS_CNTL_0_UNK2(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_FS_CNTL_0_UNK2__SHIFT) & A7XX_HLSQ_FS_CNTL_0_UNK2__MASK;
+}
+
+#define REG_A7XX_HLSQ_CONTROL_1_REG 0x0000a9c7
+#define A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x00000007
+#define A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A7XX_HLSQ_CONTROL_2_REG 0x0000a9c8
+#define A7XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A7XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
+#define A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000
+#define A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24
+static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK;
+}
+
+#define REG_A7XX_HLSQ_CONTROL_3_REG 0x0000a9c9
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
+static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
+static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
+static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
+}
+
#define REG_A7XX_HLSQ_CONTROL_4_REG 0x0000a9ca
#define A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
#define A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
@@ -7374,20 +8333,6 @@ static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
return ((val) << A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
}
-#define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986
-#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK 0x000000ff
-#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK;
-}
-#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK 0x0000ff00
-#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT 8
-static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID(uint32_t val)
-{
- return ((val) << A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK;
-}
-
#define REG_A7XX_HLSQ_CONTROL_5_REG 0x0000a9cb
#define A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK 0x000000ff
#define A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT 0
@@ -7402,14 +8347,16 @@ static inline uint32_t A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID(uint32_t va
return ((val) << A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT) & A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK;
}
-#define REG_A6XX_HLSQ_CS_CNTL 0x0000b987
-#define A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK 0x000000ff
-#define A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CS_CNTL_CONSTLEN(uint32_t val)
+#define REG_A7XX_HLSQ_CS_CNTL 0x0000a9cd
+#define A7XX_HLSQ_CS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A7XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CS_CNTL_CONSTLEN(uint32_t val)
{
- return ((val >> 2) << A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A7XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_CS_CNTL_CONSTLEN__MASK;
}
-#define A6XX_HLSQ_CS_CNTL_ENABLED 0x00000100
+#define A7XX_HLSQ_CS_CNTL_ENABLED 0x00000100
+#define A7XX_HLSQ_CS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
#define REG_A6XX_HLSQ_CS_NDRANGE_0 0x0000b990
#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
@@ -7533,19 +8480,136 @@ static inline uint32_t A6XX_HLSQ_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val)
#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000b99b
-#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_CMD 0x0000b9a0
+#define REG_A7XX_HLSQ_CS_NDRANGE_0 0x0000a9d4
+#define A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK;
+}
-#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR 0x0000b9a1
-#define A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__MASK 0xffffffff
-#define A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__SHIFT 0
-static inline uint32_t A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR(uint32_t val)
+#define REG_A7XX_HLSQ_CS_NDRANGE_1 0x0000a9d5
+#define A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
+#define A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
+}
+
+#define REG_A7XX_HLSQ_CS_NDRANGE_2 0x0000a9d6
+#define A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
+#define A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
+}
+
+#define REG_A7XX_HLSQ_CS_NDRANGE_3 0x0000a9d7
+#define A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
+#define A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
+}
+
+#define REG_A7XX_HLSQ_CS_NDRANGE_4 0x0000a9d8
+#define A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
+#define A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
+}
+
+#define REG_A7XX_HLSQ_CS_NDRANGE_5 0x0000a9d9
+#define A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
+#define A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
+}
+
+#define REG_A7XX_HLSQ_CS_NDRANGE_6 0x0000a9da
+#define A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
+#define A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
{
- return ((val) << A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__SHIFT) & A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__MASK;
+ return ((val) << A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
}
+#define REG_A7XX_HLSQ_CS_KERNEL_GROUP_X 0x0000a9dc
+
+#define REG_A7XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000a9dd
+
+#define REG_A7XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000a9de
+
+#define REG_A7XX_HLSQ_CS_CNTL_1 0x0000a9db
+#define A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff
+#define A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK;
+}
+#define A7XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK 0x00000200
+#define A7XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT 9
+static inline uint32_t A7XX_HLSQ_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A7XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT) & A7XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK;
+}
+#define A7XX_HLSQ_CS_CNTL_1_UNK11 0x00000800
+#define A7XX_HLSQ_CS_CNTL_1_UNK22 0x00400000
+#define A7XX_HLSQ_CS_CNTL_1_UNK26 0x04000000
+#define A7XX_HLSQ_CS_CNTL_1_YALIGN__MASK 0x78000000
+#define A7XX_HLSQ_CS_CNTL_1_YALIGN__SHIFT 27
+static inline uint32_t A7XX_HLSQ_CS_CNTL_1_YALIGN(enum a7xx_cs_yalign val)
+{
+ return ((val) << A7XX_HLSQ_CS_CNTL_1_YALIGN__SHIFT) & A7XX_HLSQ_CS_CNTL_1_YALIGN__MASK;
+}
+
+#define REG_A7XX_HLSQ_CS_LOCAL_SIZE 0x0000a9df
+#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX__MASK 0x00000ffc
+#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX__SHIFT 2
+static inline uint32_t A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX__SHIFT) & A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX__MASK;
+}
+#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY__MASK 0x003ff000
+#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY__SHIFT 12
+static inline uint32_t A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY__SHIFT) & A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY__MASK;
+}
+#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ__MASK 0xffc00000
+#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ__SHIFT) & A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ__MASK;
+}
+
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_CMD 0x0000b9a0
+
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR 0x0000b9a1
+
#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_DATA 0x0000b9a3
-static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; }
+#define REG_A6XX_HLSQ_CS_BINDLESS_BASE(i0) (0x0000b9c0 + 0x2*(i0))
static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; }
#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
@@ -7554,11 +8618,12 @@ static inline uint32_t A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx
{
return ((val) << A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
}
-#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffc
+#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc
#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
-static inline uint32_t A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint32_t val)
+static inline uint32_t A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val)
{
- return ((val >> 2) << A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
}
#define REG_A6XX_HLSQ_CS_UNKNOWN_B9D0 0x0000b9d0
@@ -7625,19 +8690,56 @@ static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(uint32_t val)
return ((val) << A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK;
}
+#define REG_A7XX_HLSQ_INVALIDATE_CMD 0x0000ab1f
+#define A7XX_HLSQ_INVALIDATE_CMD_VS_STATE 0x00000001
+#define A7XX_HLSQ_INVALIDATE_CMD_HS_STATE 0x00000002
+#define A7XX_HLSQ_INVALIDATE_CMD_DS_STATE 0x00000004
+#define A7XX_HLSQ_INVALIDATE_CMD_GS_STATE 0x00000008
+#define A7XX_HLSQ_INVALIDATE_CMD_FS_STATE 0x00000010
+#define A7XX_HLSQ_INVALIDATE_CMD_CS_STATE 0x00000020
+#define A7XX_HLSQ_INVALIDATE_CMD_CS_IBO 0x00000040
+#define A7XX_HLSQ_INVALIDATE_CMD_GFX_IBO 0x00000080
+#define A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK 0x0001fe00
+#define A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT 9
+static inline uint32_t A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT) & A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK;
+}
+#define A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK 0x01fe0000
+#define A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT 17
+static inline uint32_t A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT) & A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK;
+}
+
#define REG_A6XX_HLSQ_FS_CNTL 0x0000bb10
#define A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff
#define A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT 0
static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
{
- return ((val >> 2) << A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK;
}
#define A6XX_HLSQ_FS_CNTL_ENABLED 0x00000100
+#define A6XX_HLSQ_FS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
+
+#define REG_A7XX_HLSQ_FS_CNTL 0x0000ab03
+#define A7XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A7XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A7XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
+{
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A7XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_FS_CNTL_CONSTLEN__MASK;
+}
+#define A7XX_HLSQ_FS_CNTL_ENABLED 0x00000100
+#define A7XX_HLSQ_FS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200
+
+#define REG_A7XX_HLSQ_SHARED_CONSTS_IMM(i0) (0x0000ab40 + 0x1*(i0))
#define REG_A6XX_HLSQ_SHARED_CONSTS 0x0000bb11
#define A6XX_HLSQ_SHARED_CONSTS_ENABLE 0x00000001
-static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE(uint32_t i0) { return 0x0000bb20 + 0x2*i0; }
+#define REG_A6XX_HLSQ_BINDLESS_BASE(i0) (0x0000bb20 + 0x2*(i0))
static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000bb20 + 0x2*i0; }
#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
@@ -7646,11 +8748,12 @@ static inline uint32_t A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bi
{
return ((val) << A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
}
-#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffc
+#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc
#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
-static inline uint32_t A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR(uint32_t val)
+static inline uint32_t A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val)
{
- return ((val >> 2) << A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
}
#define REG_A6XX_HLSQ_2D_EVENT_CMD 0x0000bd80
@@ -7677,12 +8780,18 @@ static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_EVENT(enum vgt_event_type val)
#define REG_A6XX_HLSQ_UNKNOWN_BE08 0x0000be08
-static inline uint32_t REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL(uint32_t i0) { return 0x0000be10 + 0x1*i0; }
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL(i0) (0x0000be10 + 0x1*(i0))
#define REG_A6XX_HLSQ_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22
#define REG_A7XX_SP_AHB_READ_APERTURE 0x0000c000
+#define REG_A7XX_SP_UNKNOWN_0CE2 0x00000ce2
+
+#define REG_A7XX_SP_UNKNOWN_0CE4 0x00000ce4
+
+#define REG_A7XX_SP_UNKNOWN_0CE6 0x00000ce6
+
#define REG_A6XX_CP_EVENT_START 0x0000d600
#define A6XX_CP_EVENT_START_STATE_ID__MASK 0x000000ff
#define A6XX_CP_EVENT_START_STATE_ID__SHIFT 0
@@ -7907,17 +9016,19 @@ static inline uint32_t A6XX_TEX_CONST_2_TYPE(enum a6xx_tex_type val)
}
#define REG_A6XX_TEX_CONST_3 0x00000003
-#define A6XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
+#define A6XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x007fffff
#define A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
static inline uint32_t A6XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 12) << A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_3_ARRAY_PITCH__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_3_ARRAY_PITCH__MASK;
}
#define A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000
#define A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23
static inline uint32_t A6XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
{
- return ((val >> 12) << A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
+ assert(!(val & 0xfff));
+ return (((val >> 12)) << A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
}
#define A6XX_TEX_CONST_3_TILE_ALL 0x08000000
#define A6XX_TEX_CONST_3_FLAG 0x10000000
@@ -7927,7 +9038,8 @@ static inline uint32_t A6XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
#define A6XX_TEX_CONST_4_BASE_LO__SHIFT 5
static inline uint32_t A6XX_TEX_CONST_4_BASE_LO(uint32_t val)
{
- return ((val >> 5) << A6XX_TEX_CONST_4_BASE_LO__SHIFT) & A6XX_TEX_CONST_4_BASE_LO__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A6XX_TEX_CONST_4_BASE_LO__SHIFT) & A6XX_TEX_CONST_4_BASE_LO__MASK;
}
#define REG_A6XX_TEX_CONST_5 0x00000005
@@ -7963,7 +9075,8 @@ static inline uint32_t A6XX_TEX_CONST_6_PLANE_PITCH(uint32_t val)
#define A6XX_TEX_CONST_7_FLAG_LO__SHIFT 5
static inline uint32_t A6XX_TEX_CONST_7_FLAG_LO(uint32_t val)
{
- return ((val >> 5) << A6XX_TEX_CONST_7_FLAG_LO__SHIFT) & A6XX_TEX_CONST_7_FLAG_LO__MASK;
+ assert(!(val & 0x1f));
+ return (((val >> 5)) << A6XX_TEX_CONST_7_FLAG_LO__SHIFT) & A6XX_TEX_CONST_7_FLAG_LO__MASK;
}
#define REG_A6XX_TEX_CONST_8 0x00000008
@@ -7979,7 +9092,8 @@ static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val)
#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0
static inline uint32_t A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 4) << A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK;
+ assert(!(val & 0xf));
+ return (((val >> 4)) << A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK;
}
#define REG_A6XX_TEX_CONST_10 0x0000000a
@@ -7987,7 +9101,8 @@ static inline uint32_t A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT 0
static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH(uint32_t val)
{
- return ((val >> 6) << A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK;
+ assert(!(val & 0x3f));
+ return (((val >> 6)) << A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK;
}
#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK 0x00000f00
#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT 8
@@ -8262,4 +9377,2482 @@ static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
#define REG_A7XX_CX_MISC_TCM_RET_CNTL 0x00000039
+#ifdef __cplusplus
+template<chip CHIP> constexpr inline uint16_t CMD_REGS[] = {};
+template<chip CHIP> constexpr inline uint16_t RP_BLIT_REGS[] = {};
+template<> constexpr inline uint16_t CMD_REGS<A6XX>[] = {
+ 0xc03,
+ 0xc04,
+ 0xc30,
+ 0xc31,
+ 0xc32,
+ 0xc33,
+ 0xc34,
+ 0xc35,
+ 0xc36,
+ 0xc37,
+ 0xe12,
+ 0xe17,
+ 0xe19,
+ 0x8099,
+ 0x80af,
+ 0x810a,
+ 0x8110,
+ 0x8600,
+ 0x880e,
+ 0x8811,
+ 0x8818,
+ 0x8819,
+ 0x881a,
+ 0x881b,
+ 0x881c,
+ 0x881d,
+ 0x881e,
+ 0x8864,
+ 0x8891,
+ 0x88f0,
+ 0x8927,
+ 0x8928,
+ 0x8e01,
+ 0x8e04,
+ 0x8e07,
+ 0x9210,
+ 0x9211,
+ 0x9218,
+ 0x9219,
+ 0x921a,
+ 0x921b,
+ 0x921c,
+ 0x921d,
+ 0x921e,
+ 0x921f,
+ 0x9220,
+ 0x9221,
+ 0x9222,
+ 0x9223,
+ 0x9224,
+ 0x9225,
+ 0x9226,
+ 0x9227,
+ 0x9228,
+ 0x9229,
+ 0x922a,
+ 0x922b,
+ 0x922c,
+ 0x922d,
+ 0x922e,
+ 0x922f,
+ 0x9230,
+ 0x9231,
+ 0x9232,
+ 0x9233,
+ 0x9234,
+ 0x9235,
+ 0x9236,
+ 0x9300,
+ 0x9600,
+ 0x9601,
+ 0x9602,
+ 0x9e08,
+ 0x9e09,
+ 0x9e72,
+ 0xa007,
+ 0xa009,
+ 0xa8a0,
+ 0xa8a1,
+ 0xa8a2,
+ 0xa8a3,
+ 0xa8a4,
+ 0xa8a5,
+ 0xa8a6,
+ 0xa8a7,
+ 0xa8a8,
+ 0xa8a9,
+ 0xa8aa,
+ 0xa8ab,
+ 0xa8ac,
+ 0xa8ad,
+ 0xa8ae,
+ 0xa8af,
+ 0xa9a8,
+ 0xa9b0,
+ 0xa9b1,
+ 0xa9b2,
+ 0xa9b3,
+ 0xa9b4,
+ 0xa9b5,
+ 0xa9b6,
+ 0xa9b7,
+ 0xa9b8,
+ 0xa9b9,
+ 0xa9ba,
+ 0xa9bb,
+ 0xa9bc,
+ 0xa9bd,
+ 0xa9c2,
+ 0xa9c3,
+ 0xa9e2,
+ 0xa9e3,
+ 0xa9e6,
+ 0xa9e7,
+ 0xa9e8,
+ 0xa9e9,
+ 0xa9ea,
+ 0xa9eb,
+ 0xa9ec,
+ 0xa9ed,
+ 0xa9ee,
+ 0xa9ef,
+ 0xa9f0,
+ 0xa9f1,
+ 0xaaf2,
+ 0xab1a,
+ 0xab1b,
+ 0xab20,
+ 0xae00,
+ 0xae03,
+ 0xae04,
+ 0xae0f,
+ 0xb180,
+ 0xb181,
+ 0xb182,
+ 0xb183,
+ 0xb302,
+ 0xb303,
+ 0xb309,
+ 0xb600,
+ 0xb602,
+ 0xb605,
+ 0xb987,
+ 0xb9d0,
+ 0xbb08,
+ 0xbb11,
+ 0xbb20,
+ 0xbb21,
+ 0xbb22,
+ 0xbb23,
+ 0xbb24,
+ 0xbb25,
+ 0xbb26,
+ 0xbb27,
+ 0xbb28,
+ 0xbb29,
+ 0xbe00,
+ 0xbe01,
+ 0xbe04,
+};
+template<> constexpr inline uint16_t CMD_REGS<A7XX>[] = {
+ 0xc03,
+ 0xc04,
+ 0xc30,
+ 0xc31,
+ 0xc32,
+ 0xc33,
+ 0xc34,
+ 0xc35,
+ 0xc36,
+ 0xc37,
+ 0xce2,
+ 0xce3,
+ 0xce4,
+ 0xce5,
+ 0xce6,
+ 0xce7,
+ 0xe10,
+ 0xe11,
+ 0xe12,
+ 0xe17,
+ 0xe19,
+ 0x8008,
+ 0x8009,
+ 0x800a,
+ 0x800b,
+ 0x800c,
+ 0x8099,
+ 0x80a7,
+ 0x80af,
+ 0x80f4,
+ 0x80f5,
+ 0x80f5,
+ 0x80f6,
+ 0x80f6,
+ 0x80f7,
+ 0x80f8,
+ 0x80f9,
+ 0x80f9,
+ 0x80fa,
+ 0x80fa,
+ 0x80fb,
+ 0x810a,
+ 0x810b,
+ 0x8110,
+ 0x8120,
+ 0x8121,
+ 0x8600,
+ 0x880e,
+ 0x8811,
+ 0x8818,
+ 0x8819,
+ 0x881a,
+ 0x881b,
+ 0x881c,
+ 0x881d,
+ 0x881e,
+ 0x8864,
+ 0x8891,
+ 0x8899,
+ 0x88e5,
+ 0x88f0,
+ 0x8927,
+ 0x8928,
+ 0x8e01,
+ 0x8e04,
+ 0x8e06,
+ 0x8e07,
+ 0x8e09,
+ 0x8e79,
+ 0x9218,
+ 0x9219,
+ 0x921a,
+ 0x921b,
+ 0x921c,
+ 0x921d,
+ 0x921e,
+ 0x921f,
+ 0x9220,
+ 0x9221,
+ 0x9222,
+ 0x9223,
+ 0x9224,
+ 0x9225,
+ 0x9226,
+ 0x9227,
+ 0x9228,
+ 0x9229,
+ 0x922a,
+ 0x922b,
+ 0x922c,
+ 0x922d,
+ 0x922e,
+ 0x922f,
+ 0x9230,
+ 0x9231,
+ 0x9232,
+ 0x9233,
+ 0x9234,
+ 0x9235,
+ 0x9236,
+ 0x9300,
+ 0x9600,
+ 0x9601,
+ 0x9602,
+ 0x9810,
+ 0x9811,
+ 0x9e24,
+ 0x9e72,
+ 0xa007,
+ 0xa009,
+ 0xa600,
+ 0xa82d,
+ 0xa82f,
+ 0xa868,
+ 0xa899,
+ 0xa8a0,
+ 0xa8a1,
+ 0xa8a2,
+ 0xa8a3,
+ 0xa8a4,
+ 0xa8a5,
+ 0xa8a6,
+ 0xa8a7,
+ 0xa8a8,
+ 0xa8a9,
+ 0xa8aa,
+ 0xa8ab,
+ 0xa8ac,
+ 0xa8ad,
+ 0xa8ae,
+ 0xa8af,
+ 0xa9a8,
+ 0xa9ac,
+ 0xa9ad,
+ 0xa9b0,
+ 0xa9b1,
+ 0xa9b2,
+ 0xa9b3,
+ 0xa9b4,
+ 0xa9b5,
+ 0xa9b6,
+ 0xa9b7,
+ 0xa9b8,
+ 0xa9b9,
+ 0xa9ba,
+ 0xa9bb,
+ 0xa9bc,
+ 0xa9bd,
+ 0xa9be,
+ 0xa9c2,
+ 0xa9c3,
+ 0xa9c5,
+ 0xa9cd,
+ 0xa9df,
+ 0xa9e2,
+ 0xa9e3,
+ 0xa9e6,
+ 0xa9e7,
+ 0xa9e8,
+ 0xa9e9,
+ 0xa9ea,
+ 0xa9eb,
+ 0xa9ec,
+ 0xa9ed,
+ 0xa9ee,
+ 0xa9ef,
+ 0xa9f0,
+ 0xa9f1,
+ 0xa9f2,
+ 0xa9f3,
+ 0xa9f4,
+ 0xa9f5,
+ 0xa9f6,
+ 0xa9f7,
+ 0xaa01,
+ 0xaa02,
+ 0xaa03,
+ 0xaaf2,
+ 0xab01,
+ 0xab02,
+ 0xab1a,
+ 0xab1b,
+ 0xab1f,
+ 0xab20,
+ 0xab22,
+ 0xae00,
+ 0xae03,
+ 0xae04,
+ 0xae06,
+ 0xae08,
+ 0xae09,
+ 0xae0a,
+ 0xae0f,
+ 0xae6a,
+ 0xae6b,
+ 0xae6c,
+ 0xae73,
+ 0xb180,
+ 0xb181,
+ 0xb182,
+ 0xb183,
+ 0xb302,
+ 0xb303,
+ 0xb309,
+ 0xb310,
+ 0xb600,
+ 0xb602,
+ 0xb608,
+ 0xb609,
+ 0xb60a,
+ 0xb60b,
+ 0xb60c,
+};
+template<> constexpr inline uint16_t RP_BLIT_REGS<A6XX>[] = {
+ 0xc02,
+ 0xc06,
+ 0xc10,
+ 0xc11,
+ 0xc12,
+ 0xc13,
+ 0xc14,
+ 0xc15,
+ 0xc16,
+ 0xc17,
+ 0xc18,
+ 0xc19,
+ 0xc1a,
+ 0xc1b,
+ 0xc1c,
+ 0xc1d,
+ 0xc1e,
+ 0xc1f,
+ 0xc20,
+ 0xc21,
+ 0xc22,
+ 0xc23,
+ 0xc24,
+ 0xc25,
+ 0xc26,
+ 0xc27,
+ 0xc28,
+ 0xc29,
+ 0xc2a,
+ 0xc2b,
+ 0xc2c,
+ 0xc2d,
+ 0xc2e,
+ 0xc2f,
+ 0xc38,
+ 0xc39,
+ 0xc3a,
+ 0xc3b,
+ 0xc3c,
+ 0xc3d,
+ 0xc3e,
+ 0xc3f,
+ 0xc40,
+ 0xc41,
+ 0xc42,
+ 0xc43,
+ 0xc44,
+ 0xc45,
+ 0xc46,
+ 0xc47,
+ 0xc48,
+ 0xc49,
+ 0xc4a,
+ 0xc4b,
+ 0xc4c,
+ 0xc4d,
+ 0xc4e,
+ 0xc4f,
+ 0xc50,
+ 0xc51,
+ 0xc52,
+ 0xc53,
+ 0xc54,
+ 0xc55,
+ 0xc56,
+ 0xc57,
+ 0xc58,
+ 0xc59,
+ 0xc5a,
+ 0xc5b,
+ 0xc5c,
+ 0xc5d,
+ 0xc5e,
+ 0xc5f,
+ 0xc60,
+ 0xc61,
+ 0xc62,
+ 0xc63,
+ 0xc64,
+ 0xc65,
+ 0xc66,
+ 0xc67,
+ 0xc68,
+ 0xc69,
+ 0xc6a,
+ 0xc6b,
+ 0xc6c,
+ 0xc6d,
+ 0xc6e,
+ 0xc6f,
+ 0xc70,
+ 0xc71,
+ 0xc72,
+ 0xc73,
+ 0xc74,
+ 0xc75,
+ 0xc76,
+ 0xc77,
+ 0xc78,
+ 0xc79,
+ 0xc7a,
+ 0xc7b,
+ 0xc7c,
+ 0xc7d,
+ 0xc7e,
+ 0xc7f,
+ 0xc80,
+ 0xc81,
+ 0xc82,
+ 0xc83,
+ 0xc84,
+ 0xc85,
+ 0xc86,
+ 0xc87,
+ 0xc88,
+ 0xc89,
+ 0xc8a,
+ 0xc8b,
+ 0xc8c,
+ 0xc8d,
+ 0xc8e,
+ 0xc8f,
+ 0xc90,
+ 0xc91,
+ 0xc92,
+ 0xc93,
+ 0xc94,
+ 0xc95,
+ 0xc96,
+ 0xc97,
+ 0x8000,
+ 0x8001,
+ 0x8002,
+ 0x8003,
+ 0x8004,
+ 0x8005,
+ 0x8006,
+ 0x8010,
+ 0x8011,
+ 0x8012,
+ 0x8013,
+ 0x8014,
+ 0x8015,
+ 0x8016,
+ 0x8017,
+ 0x8018,
+ 0x8019,
+ 0x801a,
+ 0x801b,
+ 0x801c,
+ 0x801d,
+ 0x801e,
+ 0x801f,
+ 0x8020,
+ 0x8021,
+ 0x8022,
+ 0x8023,
+ 0x8024,
+ 0x8025,
+ 0x8026,
+ 0x8027,
+ 0x8028,
+ 0x8029,
+ 0x802a,
+ 0x802b,
+ 0x802c,
+ 0x802d,
+ 0x802e,
+ 0x802f,
+ 0x8030,
+ 0x8031,
+ 0x8032,
+ 0x8033,
+ 0x8034,
+ 0x8035,
+ 0x8036,
+ 0x8037,
+ 0x8038,
+ 0x8039,
+ 0x803a,
+ 0x803b,
+ 0x803c,
+ 0x803d,
+ 0x803e,
+ 0x803f,
+ 0x8040,
+ 0x8041,
+ 0x8042,
+ 0x8043,
+ 0x8044,
+ 0x8045,
+ 0x8046,
+ 0x8047,
+ 0x8048,
+ 0x8049,
+ 0x804a,
+ 0x804b,
+ 0x804c,
+ 0x804d,
+ 0x804e,
+ 0x804f,
+ 0x8050,
+ 0x8051,
+ 0x8052,
+ 0x8053,
+ 0x8054,
+ 0x8055,
+ 0x8056,
+ 0x8057,
+ 0x8058,
+ 0x8059,
+ 0x805a,
+ 0x805b,
+ 0x805c,
+ 0x805d,
+ 0x805e,
+ 0x805f,
+ 0x8060,
+ 0x8061,
+ 0x8062,
+ 0x8063,
+ 0x8064,
+ 0x8065,
+ 0x8066,
+ 0x8067,
+ 0x8068,
+ 0x8069,
+ 0x806a,
+ 0x806b,
+ 0x806c,
+ 0x806d,
+ 0x806e,
+ 0x806f,
+ 0x8070,
+ 0x8071,
+ 0x8072,
+ 0x8073,
+ 0x8074,
+ 0x8075,
+ 0x8076,
+ 0x8077,
+ 0x8078,
+ 0x8079,
+ 0x807a,
+ 0x807b,
+ 0x807c,
+ 0x807d,
+ 0x807e,
+ 0x807f,
+ 0x8080,
+ 0x8081,
+ 0x8082,
+ 0x8083,
+ 0x8084,
+ 0x8085,
+ 0x8086,
+ 0x8087,
+ 0x8088,
+ 0x8089,
+ 0x808a,
+ 0x808b,
+ 0x808c,
+ 0x808d,
+ 0x808e,
+ 0x808f,
+ 0x8090,
+ 0x8091,
+ 0x8092,
+ 0x8094,
+ 0x8095,
+ 0x8096,
+ 0x8097,
+ 0x8098,
+ 0x809b,
+ 0x809c,
+ 0x809d,
+ 0x80a0,
+ 0x80a1,
+ 0x80a2,
+ 0x80a3,
+ 0x80a4,
+ 0x80a5,
+ 0x80a6,
+ 0x80b0,
+ 0x80b1,
+ 0x80b2,
+ 0x80b3,
+ 0x80b4,
+ 0x80b5,
+ 0x80b6,
+ 0x80b7,
+ 0x80b8,
+ 0x80b9,
+ 0x80ba,
+ 0x80bb,
+ 0x80bc,
+ 0x80bd,
+ 0x80be,
+ 0x80bf,
+ 0x80c0,
+ 0x80c1,
+ 0x80c2,
+ 0x80c3,
+ 0x80c4,
+ 0x80c5,
+ 0x80c6,
+ 0x80c7,
+ 0x80c8,
+ 0x80c9,
+ 0x80ca,
+ 0x80cb,
+ 0x80cc,
+ 0x80cd,
+ 0x80ce,
+ 0x80cf,
+ 0x80d0,
+ 0x80d1,
+ 0x80d2,
+ 0x80d3,
+ 0x80d4,
+ 0x80d5,
+ 0x80d6,
+ 0x80d7,
+ 0x80d8,
+ 0x80d9,
+ 0x80da,
+ 0x80db,
+ 0x80dc,
+ 0x80dd,
+ 0x80de,
+ 0x80df,
+ 0x80e0,
+ 0x80e1,
+ 0x80e2,
+ 0x80e3,
+ 0x80e4,
+ 0x80e5,
+ 0x80e6,
+ 0x80e7,
+ 0x80e8,
+ 0x80e9,
+ 0x80ea,
+ 0x80eb,
+ 0x80ec,
+ 0x80ed,
+ 0x80ee,
+ 0x80ef,
+ 0x80f0,
+ 0x80f1,
+ 0x8100,
+ 0x8101,
+ 0x8102,
+ 0x8103,
+ 0x8104,
+ 0x8105,
+ 0x8106,
+ 0x8107,
+ 0x8109,
+ 0x8114,
+ 0x8115,
+ 0x8400,
+ 0x8401,
+ 0x8402,
+ 0x8403,
+ 0x8404,
+ 0x8405,
+ 0x8406,
+ 0x840a,
+ 0x840b,
+ 0x8800,
+ 0x8801,
+ 0x8802,
+ 0x8803,
+ 0x8804,
+ 0x8805,
+ 0x8806,
+ 0x8809,
+ 0x880a,
+ 0x880b,
+ 0x880c,
+ 0x880d,
+ 0x880f,
+ 0x8810,
+ 0x8820,
+ 0x8821,
+ 0x8822,
+ 0x8823,
+ 0x8824,
+ 0x8825,
+ 0x8826,
+ 0x8827,
+ 0x8828,
+ 0x8829,
+ 0x882a,
+ 0x882b,
+ 0x882c,
+ 0x882d,
+ 0x882e,
+ 0x882f,
+ 0x8830,
+ 0x8831,
+ 0x8832,
+ 0x8833,
+ 0x8834,
+ 0x8835,
+ 0x8836,
+ 0x8837,
+ 0x8838,
+ 0x8839,
+ 0x883a,
+ 0x883b,
+ 0x883c,
+ 0x883d,
+ 0x883e,
+ 0x883f,
+ 0x8840,
+ 0x8841,
+ 0x8842,
+ 0x8843,
+ 0x8844,
+ 0x8845,
+ 0x8846,
+ 0x8847,
+ 0x8848,
+ 0x8849,
+ 0x884a,
+ 0x884b,
+ 0x884c,
+ 0x884d,
+ 0x884e,
+ 0x884f,
+ 0x8850,
+ 0x8851,
+ 0x8852,
+ 0x8853,
+ 0x8854,
+ 0x8855,
+ 0x8856,
+ 0x8857,
+ 0x8858,
+ 0x8859,
+ 0x885a,
+ 0x885b,
+ 0x885c,
+ 0x885d,
+ 0x885e,
+ 0x885f,
+ 0x8860,
+ 0x8861,
+ 0x8862,
+ 0x8863,
+ 0x8865,
+ 0x8870,
+ 0x8871,
+ 0x8872,
+ 0x8873,
+ 0x8874,
+ 0x8875,
+ 0x8876,
+ 0x8877,
+ 0x8878,
+ 0x8879,
+ 0x8880,
+ 0x8881,
+ 0x8882,
+ 0x8883,
+ 0x8884,
+ 0x8885,
+ 0x8886,
+ 0x8887,
+ 0x8888,
+ 0x8889,
+ 0x8890,
+ 0x8898,
+ 0x88c0,
+ 0x88c1,
+ 0x88d0,
+ 0x88d1,
+ 0x88d2,
+ 0x88d3,
+ 0x88d4,
+ 0x88d5,
+ 0x88d6,
+ 0x88d7,
+ 0x88d8,
+ 0x88d9,
+ 0x88da,
+ 0x88db,
+ 0x88dc,
+ 0x88dd,
+ 0x88de,
+ 0x88df,
+ 0x88e0,
+ 0x88e1,
+ 0x88e2,
+ 0x88e3,
+ 0x8900,
+ 0x8901,
+ 0x8902,
+ 0x8903,
+ 0x8904,
+ 0x8905,
+ 0x8906,
+ 0x8907,
+ 0x8908,
+ 0x8909,
+ 0x890a,
+ 0x890b,
+ 0x890c,
+ 0x890d,
+ 0x890e,
+ 0x890f,
+ 0x8910,
+ 0x8911,
+ 0x8912,
+ 0x8913,
+ 0x8914,
+ 0x8915,
+ 0x8916,
+ 0x8917,
+ 0x8918,
+ 0x8919,
+ 0x891a,
+ 0x8a00,
+ 0x8a10,
+ 0x8a20,
+ 0x8a30,
+ 0x8c00,
+ 0x8c01,
+ 0x8c17,
+ 0x8c18,
+ 0x8c19,
+ 0x8c1a,
+ 0x8c1b,
+ 0x8c1c,
+ 0x8c1d,
+ 0x8c1e,
+ 0x8c1f,
+ 0x8c20,
+ 0x8c21,
+ 0x8c22,
+ 0x8c23,
+ 0x8c24,
+ 0x8c25,
+ 0x8c2c,
+ 0x8c2d,
+ 0x8c2e,
+ 0x8c2f,
+ 0x9100,
+ 0x9101,
+ 0x9102,
+ 0x9103,
+ 0x9104,
+ 0x9105,
+ 0x9106,
+ 0x9107,
+ 0x9108,
+ 0x9200,
+ 0x9201,
+ 0x9202,
+ 0x9203,
+ 0x9204,
+ 0x9205,
+ 0x9206,
+ 0x9207,
+ 0x9208,
+ 0x9209,
+ 0x920a,
+ 0x920b,
+ 0x920c,
+ 0x920d,
+ 0x920e,
+ 0x920f,
+ 0x9212,
+ 0x9213,
+ 0x9214,
+ 0x9215,
+ 0x9216,
+ 0x9217,
+ 0x9301,
+ 0x9302,
+ 0x9303,
+ 0x9304,
+ 0x9305,
+ 0x9306,
+ 0x9311,
+ 0x9312,
+ 0x9313,
+ 0x9314,
+ 0x9315,
+ 0x9316,
+ 0x9800,
+ 0x9801,
+ 0x9802,
+ 0x9803,
+ 0x9804,
+ 0x9805,
+ 0x9806,
+ 0x9808,
+ 0x9980,
+ 0x9981,
+ 0x9b00,
+ 0x9b01,
+ 0x9b02,
+ 0x9b03,
+ 0x9b04,
+ 0x9b05,
+ 0x9b06,
+ 0x9b07,
+ 0x9b08,
+ 0xa000,
+ 0xa001,
+ 0xa002,
+ 0xa003,
+ 0xa004,
+ 0xa005,
+ 0xa006,
+ 0xa008,
+ 0xa00e,
+ 0xa00f,
+ 0xa010,
+ 0xa011,
+ 0xa012,
+ 0xa013,
+ 0xa014,
+ 0xa015,
+ 0xa016,
+ 0xa017,
+ 0xa018,
+ 0xa019,
+ 0xa01a,
+ 0xa01b,
+ 0xa01c,
+ 0xa01d,
+ 0xa01e,
+ 0xa01f,
+ 0xa020,
+ 0xa021,
+ 0xa022,
+ 0xa023,
+ 0xa024,
+ 0xa025,
+ 0xa026,
+ 0xa027,
+ 0xa028,
+ 0xa029,
+ 0xa02a,
+ 0xa02b,
+ 0xa02c,
+ 0xa02d,
+ 0xa02e,
+ 0xa02f,
+ 0xa030,
+ 0xa031,
+ 0xa032,
+ 0xa033,
+ 0xa034,
+ 0xa035,
+ 0xa036,
+ 0xa037,
+ 0xa038,
+ 0xa039,
+ 0xa03a,
+ 0xa03b,
+ 0xa03c,
+ 0xa03d,
+ 0xa03e,
+ 0xa03f,
+ 0xa040,
+ 0xa041,
+ 0xa042,
+ 0xa043,
+ 0xa044,
+ 0xa045,
+ 0xa046,
+ 0xa047,
+ 0xa048,
+ 0xa049,
+ 0xa04a,
+ 0xa04b,
+ 0xa04c,
+ 0xa04d,
+ 0xa04e,
+ 0xa04f,
+ 0xa050,
+ 0xa051,
+ 0xa052,
+ 0xa053,
+ 0xa054,
+ 0xa055,
+ 0xa056,
+ 0xa057,
+ 0xa058,
+ 0xa059,
+ 0xa05a,
+ 0xa05b,
+ 0xa05c,
+ 0xa05d,
+ 0xa05e,
+ 0xa05f,
+ 0xa060,
+ 0xa061,
+ 0xa062,
+ 0xa063,
+ 0xa064,
+ 0xa065,
+ 0xa066,
+ 0xa067,
+ 0xa068,
+ 0xa069,
+ 0xa06a,
+ 0xa06b,
+ 0xa06c,
+ 0xa06d,
+ 0xa06e,
+ 0xa06f,
+ 0xa070,
+ 0xa071,
+ 0xa072,
+ 0xa073,
+ 0xa074,
+ 0xa075,
+ 0xa076,
+ 0xa077,
+ 0xa078,
+ 0xa079,
+ 0xa07a,
+ 0xa07b,
+ 0xa07c,
+ 0xa07d,
+ 0xa07e,
+ 0xa07f,
+ 0xa080,
+ 0xa081,
+ 0xa082,
+ 0xa083,
+ 0xa084,
+ 0xa085,
+ 0xa086,
+ 0xa087,
+ 0xa088,
+ 0xa089,
+ 0xa08a,
+ 0xa08b,
+ 0xa08c,
+ 0xa08d,
+ 0xa08e,
+ 0xa08f,
+ 0xa090,
+ 0xa091,
+ 0xa092,
+ 0xa093,
+ 0xa094,
+ 0xa095,
+ 0xa096,
+ 0xa097,
+ 0xa098,
+ 0xa099,
+ 0xa09a,
+ 0xa09b,
+ 0xa09c,
+ 0xa09d,
+ 0xa09e,
+ 0xa09f,
+ 0xa0a0,
+ 0xa0a1,
+ 0xa0a2,
+ 0xa0a3,
+ 0xa0a4,
+ 0xa0a5,
+ 0xa0a6,
+ 0xa0a7,
+ 0xa0a8,
+ 0xa0a9,
+ 0xa0aa,
+ 0xa0ab,
+ 0xa0ac,
+ 0xa0ad,
+ 0xa0ae,
+ 0xa0af,
+ 0xa0b0,
+ 0xa0b1,
+ 0xa0b2,
+ 0xa0b3,
+ 0xa0b4,
+ 0xa0b5,
+ 0xa0b6,
+ 0xa0b7,
+ 0xa0b8,
+ 0xa0b9,
+ 0xa0ba,
+ 0xa0bb,
+ 0xa0bc,
+ 0xa0bd,
+ 0xa0be,
+ 0xa0bf,
+ 0xa0c0,
+ 0xa0c1,
+ 0xa0c2,
+ 0xa0c3,
+ 0xa0c4,
+ 0xa0c5,
+ 0xa0c6,
+ 0xa0c7,
+ 0xa0c8,
+ 0xa0c9,
+ 0xa0ca,
+ 0xa0cb,
+ 0xa0cc,
+ 0xa0cd,
+ 0xa0ce,
+ 0xa0cf,
+ 0xa0d0,
+ 0xa0d1,
+ 0xa0d2,
+ 0xa0d3,
+ 0xa0d4,
+ 0xa0d5,
+ 0xa0d6,
+ 0xa0d7,
+ 0xa0d8,
+ 0xa0d9,
+ 0xa0da,
+ 0xa0db,
+ 0xa0dc,
+ 0xa0dd,
+ 0xa0de,
+ 0xa0df,
+ 0xa0e0,
+ 0xa0e1,
+ 0xa0e2,
+ 0xa0e3,
+ 0xa0e4,
+ 0xa0e5,
+ 0xa0e6,
+ 0xa0e7,
+ 0xa0e8,
+ 0xa0e9,
+ 0xa0ea,
+ 0xa0eb,
+ 0xa0ec,
+ 0xa0ed,
+ 0xa0ee,
+ 0xa0ef,
+ 0xa0f8,
+ 0xa800,
+ 0xa802,
+ 0xa803,
+ 0xa804,
+ 0xa805,
+ 0xa806,
+ 0xa807,
+ 0xa808,
+ 0xa809,
+ 0xa80a,
+ 0xa80b,
+ 0xa80c,
+ 0xa80d,
+ 0xa80e,
+ 0xa80f,
+ 0xa810,
+ 0xa811,
+ 0xa812,
+ 0xa813,
+ 0xa814,
+ 0xa815,
+ 0xa816,
+ 0xa817,
+ 0xa818,
+ 0xa819,
+ 0xa81a,
+ 0xa81b,
+ 0xa81c,
+ 0xa81d,
+ 0xa81e,
+ 0xa81f,
+ 0xa820,
+ 0xa821,
+ 0xa822,
+ 0xa823,
+ 0xa824,
+ 0xa825,
+ 0xa830,
+ 0xa831,
+ 0xa832,
+ 0xa833,
+ 0xa834,
+ 0xa835,
+ 0xa836,
+ 0xa837,
+ 0xa838,
+ 0xa839,
+ 0xa83a,
+ 0xa83b,
+ 0xa83c,
+ 0xa83d,
+ 0xa840,
+ 0xa842,
+ 0xa843,
+ 0xa844,
+ 0xa845,
+ 0xa846,
+ 0xa847,
+ 0xa848,
+ 0xa849,
+ 0xa84a,
+ 0xa84b,
+ 0xa84c,
+ 0xa84d,
+ 0xa84e,
+ 0xa84f,
+ 0xa850,
+ 0xa851,
+ 0xa852,
+ 0xa853,
+ 0xa854,
+ 0xa855,
+ 0xa856,
+ 0xa857,
+ 0xa858,
+ 0xa859,
+ 0xa85a,
+ 0xa85b,
+ 0xa85c,
+ 0xa85d,
+ 0xa85e,
+ 0xa85f,
+ 0xa860,
+ 0xa861,
+ 0xa862,
+ 0xa863,
+ 0xa864,
+ 0xa865,
+ 0xa870,
+ 0xa871,
+ 0xa872,
+ 0xa873,
+ 0xa874,
+ 0xa875,
+ 0xa876,
+ 0xa877,
+ 0xa878,
+ 0xa879,
+ 0xa87a,
+ 0xa87b,
+ 0xa87c,
+ 0xa87d,
+ 0xa87e,
+ 0xa87f,
+ 0xa880,
+ 0xa881,
+ 0xa882,
+ 0xa883,
+ 0xa884,
+ 0xa885,
+ 0xa886,
+ 0xa887,
+ 0xa888,
+ 0xa889,
+ 0xa88a,
+ 0xa88b,
+ 0xa88c,
+ 0xa88d,
+ 0xa88e,
+ 0xa88f,
+ 0xa890,
+ 0xa891,
+ 0xa892,
+ 0xa893,
+ 0xa894,
+ 0xa895,
+ 0xa896,
+ 0xa980,
+ 0xa982,
+ 0xa983,
+ 0xa984,
+ 0xa985,
+ 0xa986,
+ 0xa987,
+ 0xa988,
+ 0xa989,
+ 0xa98a,
+ 0xa98b,
+ 0xa98c,
+ 0xa98d,
+ 0xa98e,
+ 0xa98f,
+ 0xa990,
+ 0xa991,
+ 0xa992,
+ 0xa993,
+ 0xa994,
+ 0xa995,
+ 0xa996,
+ 0xa997,
+ 0xa998,
+ 0xa999,
+ 0xa99a,
+ 0xa99b,
+ 0xa99c,
+ 0xa99d,
+ 0xa99e,
+ 0xa99f,
+ 0xa9a0,
+ 0xa9a1,
+ 0xa9a2,
+ 0xa9a3,
+ 0xa9a4,
+ 0xa9a5,
+ 0xa9a6,
+ 0xa9a7,
+ 0xa9a9,
+ 0xa9e0,
+ 0xa9e1,
+ 0xa9e4,
+ 0xa9e5,
+ 0xab00,
+ 0xab04,
+ 0xab05,
+ 0xab10,
+ 0xab11,
+ 0xab12,
+ 0xab13,
+ 0xab14,
+ 0xab15,
+ 0xab16,
+ 0xab17,
+ 0xab18,
+ 0xab19,
+ 0xacc0,
+ 0xb300,
+ 0xb301,
+ 0xb304,
+ 0xb305,
+ 0xb306,
+ 0xb307,
+ 0xb4c0,
+ 0xb4c1,
+ 0xb4c2,
+ 0xb4c3,
+ 0xb4c4,
+ 0xb4ca,
+ 0xb4cb,
+ 0xb4cc,
+ 0xb4d1,
+ 0xb800,
+ 0xb801,
+ 0xb802,
+ 0xb803,
+ 0xb980,
+ 0xb982,
+ 0xb983,
+ 0xb984,
+ 0xb985,
+ 0xb986,
+ 0xb990,
+ 0xb991,
+ 0xb992,
+ 0xb993,
+ 0xb994,
+ 0xb995,
+ 0xb996,
+ 0xb997,
+ 0xb998,
+ 0xb999,
+ 0xb99a,
+ 0xb99b,
+ 0xb9c0,
+ 0xb9c1,
+ 0xb9c2,
+ 0xb9c3,
+ 0xb9c4,
+ 0xb9c5,
+ 0xb9c6,
+ 0xb9c7,
+ 0xb9c8,
+ 0xb9c9,
+ 0xbb10,
+};
+template<> constexpr inline uint16_t RP_BLIT_REGS<A7XX>[] = {
+ 0xc02,
+ 0xc06,
+ 0xc10,
+ 0xc11,
+ 0xc12,
+ 0xc13,
+ 0xc14,
+ 0xc15,
+ 0xc16,
+ 0xc17,
+ 0xc18,
+ 0xc19,
+ 0xc1a,
+ 0xc1b,
+ 0xc1c,
+ 0xc1d,
+ 0xc1e,
+ 0xc1f,
+ 0xc20,
+ 0xc21,
+ 0xc22,
+ 0xc23,
+ 0xc24,
+ 0xc25,
+ 0xc26,
+ 0xc27,
+ 0xc28,
+ 0xc29,
+ 0xc2a,
+ 0xc2b,
+ 0xc2c,
+ 0xc2d,
+ 0xc2e,
+ 0xc2f,
+ 0xc38,
+ 0xc39,
+ 0xc3a,
+ 0xc3b,
+ 0xc3c,
+ 0xc3d,
+ 0xc3e,
+ 0xc3f,
+ 0xc40,
+ 0xc41,
+ 0xc42,
+ 0xc43,
+ 0xc44,
+ 0xc45,
+ 0xc46,
+ 0xc47,
+ 0xc48,
+ 0xc49,
+ 0xc4a,
+ 0xc4b,
+ 0xc4c,
+ 0xc4d,
+ 0xc4e,
+ 0xc4f,
+ 0xc50,
+ 0xc51,
+ 0xc52,
+ 0xc53,
+ 0xc54,
+ 0xc55,
+ 0xc56,
+ 0xc57,
+ 0x8000,
+ 0x8001,
+ 0x8002,
+ 0x8003,
+ 0x8004,
+ 0x8005,
+ 0x8006,
+ 0x8007,
+ 0x8010,
+ 0x8011,
+ 0x8012,
+ 0x8013,
+ 0x8014,
+ 0x8015,
+ 0x8016,
+ 0x8017,
+ 0x8018,
+ 0x8019,
+ 0x801a,
+ 0x801b,
+ 0x801c,
+ 0x801d,
+ 0x801e,
+ 0x801f,
+ 0x8020,
+ 0x8021,
+ 0x8022,
+ 0x8023,
+ 0x8024,
+ 0x8025,
+ 0x8026,
+ 0x8027,
+ 0x8028,
+ 0x8029,
+ 0x802a,
+ 0x802b,
+ 0x802c,
+ 0x802d,
+ 0x802e,
+ 0x802f,
+ 0x8030,
+ 0x8031,
+ 0x8032,
+ 0x8033,
+ 0x8034,
+ 0x8035,
+ 0x8036,
+ 0x8037,
+ 0x8038,
+ 0x8039,
+ 0x803a,
+ 0x803b,
+ 0x803c,
+ 0x803d,
+ 0x803e,
+ 0x803f,
+ 0x8040,
+ 0x8041,
+ 0x8042,
+ 0x8043,
+ 0x8044,
+ 0x8045,
+ 0x8046,
+ 0x8047,
+ 0x8048,
+ 0x8049,
+ 0x804a,
+ 0x804b,
+ 0x804c,
+ 0x804d,
+ 0x804e,
+ 0x804f,
+ 0x8050,
+ 0x8051,
+ 0x8052,
+ 0x8053,
+ 0x8054,
+ 0x8055,
+ 0x8056,
+ 0x8057,
+ 0x8058,
+ 0x8059,
+ 0x805a,
+ 0x805b,
+ 0x805c,
+ 0x805d,
+ 0x805e,
+ 0x805f,
+ 0x8060,
+ 0x8061,
+ 0x8062,
+ 0x8063,
+ 0x8064,
+ 0x8065,
+ 0x8066,
+ 0x8067,
+ 0x8068,
+ 0x8069,
+ 0x806a,
+ 0x806b,
+ 0x806c,
+ 0x806d,
+ 0x806e,
+ 0x806f,
+ 0x8070,
+ 0x8071,
+ 0x8072,
+ 0x8073,
+ 0x8074,
+ 0x8075,
+ 0x8076,
+ 0x8077,
+ 0x8078,
+ 0x8079,
+ 0x807a,
+ 0x807b,
+ 0x807c,
+ 0x807d,
+ 0x807e,
+ 0x807f,
+ 0x8080,
+ 0x8081,
+ 0x8082,
+ 0x8083,
+ 0x8084,
+ 0x8085,
+ 0x8086,
+ 0x8087,
+ 0x8088,
+ 0x8089,
+ 0x808a,
+ 0x808b,
+ 0x808c,
+ 0x808d,
+ 0x808e,
+ 0x808f,
+ 0x8090,
+ 0x8091,
+ 0x8092,
+ 0x8094,
+ 0x8095,
+ 0x8096,
+ 0x8097,
+ 0x8098,
+ 0x809b,
+ 0x809c,
+ 0x809d,
+ 0x80a0,
+ 0x80a1,
+ 0x80a2,
+ 0x80a3,
+ 0x80a4,
+ 0x80a5,
+ 0x80a6,
+ 0x80b0,
+ 0x80b1,
+ 0x80b2,
+ 0x80b3,
+ 0x80b4,
+ 0x80b5,
+ 0x80b6,
+ 0x80b7,
+ 0x80b8,
+ 0x80b9,
+ 0x80ba,
+ 0x80bb,
+ 0x80bc,
+ 0x80bd,
+ 0x80be,
+ 0x80bf,
+ 0x80c0,
+ 0x80c1,
+ 0x80c2,
+ 0x80c3,
+ 0x80c4,
+ 0x80c5,
+ 0x80c6,
+ 0x80c7,
+ 0x80c8,
+ 0x80c9,
+ 0x80ca,
+ 0x80cb,
+ 0x80cc,
+ 0x80cd,
+ 0x80ce,
+ 0x80cf,
+ 0x80d0,
+ 0x80d1,
+ 0x80d2,
+ 0x80d3,
+ 0x80d4,
+ 0x80d5,
+ 0x80d6,
+ 0x80d7,
+ 0x80d8,
+ 0x80d9,
+ 0x80da,
+ 0x80db,
+ 0x80dc,
+ 0x80dd,
+ 0x80de,
+ 0x80df,
+ 0x80e0,
+ 0x80e1,
+ 0x80e2,
+ 0x80e3,
+ 0x80e4,
+ 0x80e5,
+ 0x80e6,
+ 0x80e7,
+ 0x80e8,
+ 0x80e9,
+ 0x80ea,
+ 0x80eb,
+ 0x80ec,
+ 0x80ed,
+ 0x80ee,
+ 0x80ef,
+ 0x80f0,
+ 0x80f1,
+ 0x8100,
+ 0x8101,
+ 0x8102,
+ 0x8103,
+ 0x8104,
+ 0x8105,
+ 0x8106,
+ 0x8107,
+ 0x8109,
+ 0x8113,
+ 0x8114,
+ 0x8115,
+ 0x8116,
+ 0x8400,
+ 0x8401,
+ 0x8402,
+ 0x8403,
+ 0x8404,
+ 0x8405,
+ 0x8406,
+ 0x840a,
+ 0x840b,
+ 0x8800,
+ 0x8801,
+ 0x8802,
+ 0x8803,
+ 0x8804,
+ 0x8805,
+ 0x8806,
+ 0x8809,
+ 0x880a,
+ 0x880b,
+ 0x880c,
+ 0x880d,
+ 0x880f,
+ 0x8810,
+ 0x8812,
+ 0x8820,
+ 0x8821,
+ 0x8822,
+ 0x8823,
+ 0x8824,
+ 0x8825,
+ 0x8826,
+ 0x8827,
+ 0x8828,
+ 0x8829,
+ 0x882a,
+ 0x882b,
+ 0x882c,
+ 0x882d,
+ 0x882e,
+ 0x882f,
+ 0x8830,
+ 0x8831,
+ 0x8832,
+ 0x8833,
+ 0x8834,
+ 0x8835,
+ 0x8836,
+ 0x8837,
+ 0x8838,
+ 0x8839,
+ 0x883a,
+ 0x883b,
+ 0x883c,
+ 0x883d,
+ 0x883e,
+ 0x883f,
+ 0x8840,
+ 0x8841,
+ 0x8842,
+ 0x8843,
+ 0x8844,
+ 0x8845,
+ 0x8846,
+ 0x8847,
+ 0x8848,
+ 0x8849,
+ 0x884a,
+ 0x884b,
+ 0x884c,
+ 0x884d,
+ 0x884e,
+ 0x884f,
+ 0x8850,
+ 0x8851,
+ 0x8852,
+ 0x8853,
+ 0x8854,
+ 0x8855,
+ 0x8856,
+ 0x8857,
+ 0x8858,
+ 0x8859,
+ 0x885a,
+ 0x885b,
+ 0x885c,
+ 0x885d,
+ 0x885e,
+ 0x885f,
+ 0x8860,
+ 0x8861,
+ 0x8862,
+ 0x8863,
+ 0x8865,
+ 0x8870,
+ 0x8871,
+ 0x8872,
+ 0x8873,
+ 0x8874,
+ 0x8875,
+ 0x8876,
+ 0x8877,
+ 0x8878,
+ 0x8879,
+ 0x8880,
+ 0x8881,
+ 0x8882,
+ 0x8883,
+ 0x8884,
+ 0x8885,
+ 0x8886,
+ 0x8887,
+ 0x8888,
+ 0x8889,
+ 0x8890,
+ 0x8898,
+ 0x88c0,
+ 0x88c1,
+ 0x88d0,
+ 0x88d1,
+ 0x88d2,
+ 0x88d3,
+ 0x88d4,
+ 0x88d5,
+ 0x88d6,
+ 0x88d7,
+ 0x88d8,
+ 0x88d9,
+ 0x88da,
+ 0x88db,
+ 0x88dc,
+ 0x88dd,
+ 0x88de,
+ 0x88df,
+ 0x88e0,
+ 0x88e1,
+ 0x88e2,
+ 0x88e3,
+ 0x8900,
+ 0x8901,
+ 0x8902,
+ 0x8903,
+ 0x8904,
+ 0x8905,
+ 0x8906,
+ 0x8907,
+ 0x8908,
+ 0x8909,
+ 0x890a,
+ 0x890b,
+ 0x890c,
+ 0x890d,
+ 0x890e,
+ 0x890f,
+ 0x8910,
+ 0x8911,
+ 0x8912,
+ 0x8913,
+ 0x8914,
+ 0x8915,
+ 0x8916,
+ 0x8917,
+ 0x8918,
+ 0x8919,
+ 0x891a,
+ 0x8c00,
+ 0x8c01,
+ 0x8c17,
+ 0x8c18,
+ 0x8c19,
+ 0x8c1a,
+ 0x8c1b,
+ 0x8c1c,
+ 0x8c1d,
+ 0x8c1e,
+ 0x8c1f,
+ 0x8c20,
+ 0x8c21,
+ 0x8c22,
+ 0x8c23,
+ 0x8c24,
+ 0x8c25,
+ 0x8c2c,
+ 0x8c2d,
+ 0x8c2e,
+ 0x8c2f,
+ 0x9101,
+ 0x9102,
+ 0x9103,
+ 0x9104,
+ 0x9105,
+ 0x9106,
+ 0x9107,
+ 0x9108,
+ 0x9109,
+ 0x910a,
+ 0x910b,
+ 0x910c,
+ 0x9200,
+ 0x9201,
+ 0x9202,
+ 0x9203,
+ 0x9204,
+ 0x9205,
+ 0x9206,
+ 0x9207,
+ 0x9208,
+ 0x9209,
+ 0x920a,
+ 0x920b,
+ 0x920c,
+ 0x920d,
+ 0x920e,
+ 0x920f,
+ 0x9212,
+ 0x9213,
+ 0x9214,
+ 0x9215,
+ 0x9216,
+ 0x9217,
+ 0x9301,
+ 0x9302,
+ 0x9303,
+ 0x9304,
+ 0x9305,
+ 0x9306,
+ 0x9307,
+ 0x9308,
+ 0x9309,
+ 0x9311,
+ 0x9312,
+ 0x9313,
+ 0x9314,
+ 0x9315,
+ 0x9316,
+ 0x9317,
+ 0x9800,
+ 0x9801,
+ 0x9802,
+ 0x9803,
+ 0x9804,
+ 0x9805,
+ 0x9806,
+ 0x9808,
+ 0x9809,
+ 0x9b00,
+ 0x9b01,
+ 0x9b02,
+ 0x9b03,
+ 0x9b04,
+ 0x9b05,
+ 0x9b07,
+ 0x9b08,
+ 0x9b09,
+ 0xa000,
+ 0xa001,
+ 0xa002,
+ 0xa003,
+ 0xa004,
+ 0xa005,
+ 0xa006,
+ 0xa008,
+ 0xa00e,
+ 0xa00f,
+ 0xa010,
+ 0xa011,
+ 0xa012,
+ 0xa013,
+ 0xa014,
+ 0xa015,
+ 0xa016,
+ 0xa017,
+ 0xa018,
+ 0xa019,
+ 0xa01a,
+ 0xa01b,
+ 0xa01c,
+ 0xa01d,
+ 0xa01e,
+ 0xa01f,
+ 0xa020,
+ 0xa021,
+ 0xa022,
+ 0xa023,
+ 0xa024,
+ 0xa025,
+ 0xa026,
+ 0xa027,
+ 0xa028,
+ 0xa029,
+ 0xa02a,
+ 0xa02b,
+ 0xa02c,
+ 0xa02d,
+ 0xa02e,
+ 0xa02f,
+ 0xa030,
+ 0xa031,
+ 0xa032,
+ 0xa033,
+ 0xa034,
+ 0xa035,
+ 0xa036,
+ 0xa037,
+ 0xa038,
+ 0xa039,
+ 0xa03a,
+ 0xa03b,
+ 0xa03c,
+ 0xa03d,
+ 0xa03e,
+ 0xa03f,
+ 0xa040,
+ 0xa041,
+ 0xa042,
+ 0xa043,
+ 0xa044,
+ 0xa045,
+ 0xa046,
+ 0xa047,
+ 0xa048,
+ 0xa049,
+ 0xa04a,
+ 0xa04b,
+ 0xa04c,
+ 0xa04d,
+ 0xa04e,
+ 0xa04f,
+ 0xa050,
+ 0xa051,
+ 0xa052,
+ 0xa053,
+ 0xa054,
+ 0xa055,
+ 0xa056,
+ 0xa057,
+ 0xa058,
+ 0xa059,
+ 0xa05a,
+ 0xa05b,
+ 0xa05c,
+ 0xa05d,
+ 0xa05e,
+ 0xa05f,
+ 0xa060,
+ 0xa061,
+ 0xa062,
+ 0xa063,
+ 0xa064,
+ 0xa065,
+ 0xa066,
+ 0xa067,
+ 0xa068,
+ 0xa069,
+ 0xa06a,
+ 0xa06b,
+ 0xa06c,
+ 0xa06d,
+ 0xa06e,
+ 0xa06f,
+ 0xa070,
+ 0xa071,
+ 0xa072,
+ 0xa073,
+ 0xa074,
+ 0xa075,
+ 0xa076,
+ 0xa077,
+ 0xa078,
+ 0xa079,
+ 0xa07a,
+ 0xa07b,
+ 0xa07c,
+ 0xa07d,
+ 0xa07e,
+ 0xa07f,
+ 0xa080,
+ 0xa081,
+ 0xa082,
+ 0xa083,
+ 0xa084,
+ 0xa085,
+ 0xa086,
+ 0xa087,
+ 0xa088,
+ 0xa089,
+ 0xa08a,
+ 0xa08b,
+ 0xa08c,
+ 0xa08d,
+ 0xa08e,
+ 0xa08f,
+ 0xa090,
+ 0xa091,
+ 0xa092,
+ 0xa093,
+ 0xa094,
+ 0xa095,
+ 0xa096,
+ 0xa097,
+ 0xa098,
+ 0xa099,
+ 0xa09a,
+ 0xa09b,
+ 0xa09c,
+ 0xa09d,
+ 0xa09e,
+ 0xa09f,
+ 0xa0a0,
+ 0xa0a1,
+ 0xa0a2,
+ 0xa0a3,
+ 0xa0a4,
+ 0xa0a5,
+ 0xa0a6,
+ 0xa0a7,
+ 0xa0a8,
+ 0xa0a9,
+ 0xa0aa,
+ 0xa0ab,
+ 0xa0ac,
+ 0xa0ad,
+ 0xa0ae,
+ 0xa0af,
+ 0xa0b0,
+ 0xa0b1,
+ 0xa0b2,
+ 0xa0b3,
+ 0xa0b4,
+ 0xa0b5,
+ 0xa0b6,
+ 0xa0b7,
+ 0xa0b8,
+ 0xa0b9,
+ 0xa0ba,
+ 0xa0bb,
+ 0xa0bc,
+ 0xa0bd,
+ 0xa0be,
+ 0xa0bf,
+ 0xa0c0,
+ 0xa0c1,
+ 0xa0c2,
+ 0xa0c3,
+ 0xa0c4,
+ 0xa0c5,
+ 0xa0c6,
+ 0xa0c7,
+ 0xa0c8,
+ 0xa0c9,
+ 0xa0ca,
+ 0xa0cb,
+ 0xa0cc,
+ 0xa0cd,
+ 0xa0ce,
+ 0xa0cf,
+ 0xa0d0,
+ 0xa0d1,
+ 0xa0d2,
+ 0xa0d3,
+ 0xa0d4,
+ 0xa0d5,
+ 0xa0d6,
+ 0xa0d7,
+ 0xa0d8,
+ 0xa0d9,
+ 0xa0da,
+ 0xa0db,
+ 0xa0dc,
+ 0xa0dd,
+ 0xa0de,
+ 0xa0df,
+ 0xa0e0,
+ 0xa0e1,
+ 0xa0e2,
+ 0xa0e3,
+ 0xa0e4,
+ 0xa0e5,
+ 0xa0e6,
+ 0xa0e7,
+ 0xa0e8,
+ 0xa0e9,
+ 0xa0ea,
+ 0xa0eb,
+ 0xa0ec,
+ 0xa0ed,
+ 0xa0ee,
+ 0xa0ef,
+ 0xa0f8,
+ 0xa800,
+ 0xa802,
+ 0xa803,
+ 0xa804,
+ 0xa805,
+ 0xa806,
+ 0xa807,
+ 0xa808,
+ 0xa809,
+ 0xa80a,
+ 0xa80b,
+ 0xa80c,
+ 0xa80d,
+ 0xa80e,
+ 0xa80f,
+ 0xa810,
+ 0xa811,
+ 0xa812,
+ 0xa813,
+ 0xa814,
+ 0xa815,
+ 0xa816,
+ 0xa817,
+ 0xa818,
+ 0xa819,
+ 0xa81a,
+ 0xa81b,
+ 0xa81c,
+ 0xa81d,
+ 0xa81e,
+ 0xa81f,
+ 0xa820,
+ 0xa821,
+ 0xa822,
+ 0xa823,
+ 0xa824,
+ 0xa825,
+ 0xa827,
+ 0xa830,
+ 0xa831,
+ 0xa832,
+ 0xa833,
+ 0xa834,
+ 0xa835,
+ 0xa836,
+ 0xa837,
+ 0xa838,
+ 0xa839,
+ 0xa83a,
+ 0xa83b,
+ 0xa83c,
+ 0xa83d,
+ 0xa83f,
+ 0xa840,
+ 0xa842,
+ 0xa843,
+ 0xa844,
+ 0xa845,
+ 0xa846,
+ 0xa847,
+ 0xa848,
+ 0xa849,
+ 0xa84a,
+ 0xa84b,
+ 0xa84c,
+ 0xa84d,
+ 0xa84e,
+ 0xa84f,
+ 0xa850,
+ 0xa851,
+ 0xa852,
+ 0xa853,
+ 0xa854,
+ 0xa855,
+ 0xa856,
+ 0xa857,
+ 0xa858,
+ 0xa859,
+ 0xa85a,
+ 0xa85b,
+ 0xa85c,
+ 0xa85d,
+ 0xa85e,
+ 0xa85f,
+ 0xa860,
+ 0xa861,
+ 0xa862,
+ 0xa863,
+ 0xa864,
+ 0xa865,
+ 0xa867,
+ 0xa870,
+ 0xa871,
+ 0xa872,
+ 0xa873,
+ 0xa874,
+ 0xa875,
+ 0xa876,
+ 0xa877,
+ 0xa878,
+ 0xa879,
+ 0xa87a,
+ 0xa87b,
+ 0xa87c,
+ 0xa87d,
+ 0xa87e,
+ 0xa87f,
+ 0xa880,
+ 0xa881,
+ 0xa882,
+ 0xa883,
+ 0xa884,
+ 0xa885,
+ 0xa886,
+ 0xa887,
+ 0xa888,
+ 0xa889,
+ 0xa88a,
+ 0xa88b,
+ 0xa88c,
+ 0xa88d,
+ 0xa88e,
+ 0xa88f,
+ 0xa890,
+ 0xa891,
+ 0xa892,
+ 0xa893,
+ 0xa894,
+ 0xa895,
+ 0xa896,
+ 0xa898,
+ 0xa980,
+ 0xa982,
+ 0xa983,
+ 0xa984,
+ 0xa985,
+ 0xa986,
+ 0xa987,
+ 0xa988,
+ 0xa989,
+ 0xa98a,
+ 0xa98b,
+ 0xa98c,
+ 0xa98d,
+ 0xa98e,
+ 0xa98f,
+ 0xa990,
+ 0xa991,
+ 0xa992,
+ 0xa993,
+ 0xa994,
+ 0xa995,
+ 0xa996,
+ 0xa997,
+ 0xa998,
+ 0xa999,
+ 0xa99a,
+ 0xa99b,
+ 0xa99c,
+ 0xa99d,
+ 0xa99e,
+ 0xa99f,
+ 0xa9a0,
+ 0xa9a1,
+ 0xa9a2,
+ 0xa9a3,
+ 0xa9a4,
+ 0xa9a5,
+ 0xa9a6,
+ 0xa9a7,
+ 0xa9a9,
+ 0xa9aa,
+ 0xa9ae,
+ 0xa9bf,
+ 0xa9c6,
+ 0xa9c7,
+ 0xa9c8,
+ 0xa9c9,
+ 0xa9ca,
+ 0xa9cb,
+ 0xa9d4,
+ 0xa9d5,
+ 0xa9d6,
+ 0xa9d7,
+ 0xa9d8,
+ 0xa9d9,
+ 0xa9da,
+ 0xa9db,
+ 0xa9dc,
+ 0xa9dd,
+ 0xa9de,
+ 0xa9e0,
+ 0xa9e1,
+ 0xa9e4,
+ 0xa9e5,
+ 0xab00,
+ 0xab03,
+ 0xab04,
+ 0xab05,
+ 0xab0a,
+ 0xab0b,
+ 0xab0c,
+ 0xab0d,
+ 0xab0e,
+ 0xab0f,
+ 0xab10,
+ 0xab11,
+ 0xab12,
+ 0xab13,
+ 0xab14,
+ 0xab15,
+ 0xab16,
+ 0xab17,
+ 0xab18,
+ 0xab19,
+ 0xab21,
+ 0xb2c0,
+ 0xb2c2,
+ 0xb2c3,
+ 0xb2ca,
+ 0xb2cb,
+ 0xb2cc,
+ 0xb2d2,
+ 0xb300,
+ 0xb301,
+ 0xb304,
+ 0xb305,
+ 0xb306,
+ 0xb307,
+};
+#endif
+
#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 8c4900444b2c..8bea8ef26f77 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -223,7 +223,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
* note: downstream saves the value in poweroff and restores it here
*/
if (adreno_is_a7xx(adreno_gpu))
- gmu_write(gmu, REG_A6XX_GMU_GENERAL_9, 0);
+ gmu_write(gmu, REG_A7XX_GMU_GENERAL_9, 0);
else
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
@@ -842,6 +842,8 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
*/
if (adreno_is_a740(adreno_gpu))
chipid_min = 2;
+ else if (adreno_is_a750(adreno_gpu))
+ chipid_min = 9;
else
return -EINVAL;
@@ -863,8 +865,8 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
}
if (adreno_is_a7xx(adreno_gpu)) {
- gmu_write(gmu, REG_A6XX_GMU_GENERAL_10, chipid);
- gmu_write(gmu, REG_A6XX_GMU_GENERAL_8,
+ gmu_write(gmu, REG_A7XX_GMU_GENERAL_10, chipid);
+ gmu_write(gmu, REG_A7XX_GMU_GENERAL_8,
(gmu->log.iova & GENMASK(31, 12)) |
((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0)));
} else {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index 5b66efafc901..9d7f93929367 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -3,28 +3,19 @@
/* Autogenerated file, DO NOT EDIT manually!
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
-
-Copyright (C) 2013-2023 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11820 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
+
+Copyright (C) 2013-2024 by the following authors:
+- Rob Clark <robdclark@gmail.com> Rob Clark
+- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -45,112 +36,42 @@ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
*/
+#ifdef __KERNEL__
+#include <linux/bug.h>
+#define assert(x) BUG_ON(!(x))
+#else
+#include <assert.h>
+#endif
+
+#ifdef __cplusplus
+#define __struct_cast(X)
+#else
+#define __struct_cast(X) (struct X)
+#endif
+
+#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB 0x00800000
+#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB 0x40000000
+
+#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK 0x00400000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK 0x40000000
+#define A6XX_GMU_OOB_DCVS_SET_MASK 0x00800000
+#define A6XX_GMU_OOB_DCVS_CHECK_MASK 0x80000000
+#define A6XX_GMU_OOB_DCVS_CLEAR_MASK 0x80000000
+#define A6XX_GMU_OOB_GPU_SET_MASK 0x00040000
+#define A6XX_GMU_OOB_GPU_CHECK_MASK 0x04000000
+#define A6XX_GMU_OOB_GPU_CLEAR_MASK 0x04000000
+#define A6XX_GMU_OOB_PERFCNTR_SET_MASK 0x00020000
+#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK 0x02000000
+#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK 0x02000000
-#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK 0x00800000
-#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT 23
-static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB(uint32_t val)
-{
- return ((val) << A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK;
-}
-#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK 0x40000000
-#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT 30
-static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB(uint32_t val)
-{
- return ((val) << A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK;
-}
-#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK 0x00400000
-#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT 22
-static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK(uint32_t val)
-{
- return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK;
-}
-#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK 0x40000000
-#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT 30
-static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK(uint32_t val)
-{
- return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK;
-}
-#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK 0x40000000
-#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT 30
-static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK(uint32_t val)
-{
- return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK;
-}
-#define A6XX_GMU_OOB_DCVS_SET_MASK__MASK 0x00800000
-#define A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT 23
-static inline uint32_t A6XX_GMU_OOB_DCVS_SET_MASK(uint32_t val)
-{
- return ((val) << A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_SET_MASK__MASK;
-}
-#define A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK 0x80000000
-#define A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT 31
-static inline uint32_t A6XX_GMU_OOB_DCVS_CHECK_MASK(uint32_t val)
-{
- return ((val) << A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK;
-}
-#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK 0x80000000
-#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT 31
-static inline uint32_t A6XX_GMU_OOB_DCVS_CLEAR_MASK(uint32_t val)
-{
- return ((val) << A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK;
-}
-#define A6XX_GMU_OOB_GPU_SET_MASK__MASK 0x00040000
-#define A6XX_GMU_OOB_GPU_SET_MASK__SHIFT 18
-static inline uint32_t A6XX_GMU_OOB_GPU_SET_MASK(uint32_t val)
-{
- return ((val) << A6XX_GMU_OOB_GPU_SET_MASK__SHIFT) & A6XX_GMU_OOB_GPU_SET_MASK__MASK;
-}
-#define A6XX_GMU_OOB_GPU_CHECK_MASK__MASK 0x04000000
-#define A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT 26
-static inline uint32_t A6XX_GMU_OOB_GPU_CHECK_MASK(uint32_t val)
-{
- return ((val) << A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CHECK_MASK__MASK;
-}
-#define A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK 0x04000000
-#define A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT 26
-static inline uint32_t A6XX_GMU_OOB_GPU_CLEAR_MASK(uint32_t val)
-{
- return ((val) << A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK;
-}
-#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK 0x00020000
-#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT 17
-static inline uint32_t A6XX_GMU_OOB_PERFCNTR_SET_MASK(uint32_t val)
-{
- return ((val) << A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK;
-}
-#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK 0x02000000
-#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT 25
-static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CHECK_MASK(uint32_t val)
-{
- return ((val) << A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK;
-}
-#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK 0x02000000
-#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT 25
-static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK(uint32_t val)
-{
- return ((val) << A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK;
-}
#define A6XX_HFI_IRQ_MSGQ_MASK 0x00000001
-#define A6XX_HFI_IRQ_DSGQ_MASK__MASK 0x00000002
-#define A6XX_HFI_IRQ_DSGQ_MASK__SHIFT 1
-static inline uint32_t A6XX_HFI_IRQ_DSGQ_MASK(uint32_t val)
-{
- return ((val) << A6XX_HFI_IRQ_DSGQ_MASK__SHIFT) & A6XX_HFI_IRQ_DSGQ_MASK__MASK;
-}
-#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK 0x00000004
-#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT 2
-static inline uint32_t A6XX_HFI_IRQ_BLOCKED_MSG_MASK(uint32_t val)
-{
- return ((val) << A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT) & A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK;
-}
-#define A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK 0x00800000
-#define A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT 23
-static inline uint32_t A6XX_HFI_IRQ_CM3_FAULT_MASK(uint32_t val)
-{
- return ((val) << A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT) & A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK;
-}
+#define A6XX_HFI_IRQ_DSGQ_MASK 0x00000002
+#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK 0x00000004
+#define A6XX_HFI_IRQ_CM3_FAULT_MASK 0x00800000
#define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK 0x007f0000
#define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT 16
static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val)
@@ -163,7 +84,9 @@ static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val)
{
return ((val) << A6XX_HFI_IRQ_OOB_MASK__SHIFT) & A6XX_HFI_IRQ_OOB_MASK__MASK;
}
+
#define A6XX_HFI_H2F_IRQ_MASK_BIT 0x00000001
+
#define REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL 0x00000080
#define REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL 0x00000081
@@ -356,15 +279,19 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_3 0x0000519e
+#define REG_A6XX_GMU_GENERAL_0 0x000051c5
+
#define REG_A6XX_GMU_GENERAL_1 0x000051c6
+#define REG_A6XX_GMU_GENERAL_6 0x000051cb
+
#define REG_A6XX_GMU_GENERAL_7 0x000051cc
-#define REG_A6XX_GMU_GENERAL_8 0x000051cd
+#define REG_A7XX_GMU_GENERAL_8 0x000051cd
-#define REG_A6XX_GMU_GENERAL_9 0x000051ce
+#define REG_A7XX_GMU_GENERAL_9 0x000051ce
-#define REG_A6XX_GMU_GENERAL_10 0x000051cf
+#define REG_A7XX_GMU_GENERAL_10 0x000051cf
#define REG_A6XX_GMU_ISENSE_CTRL 0x0000515d
@@ -489,5 +416,7 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000053e
+#ifdef __cplusplus
+#endif
#endif /* A6XX_GMU_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index c9c55e2ea584..0674aca0f8a3 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -837,6 +837,65 @@ const struct adreno_reglist a690_hwcg[] = {
{}
};
+const struct adreno_reglist a702_hwcg[] = {
+ { REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000081 },
+ { REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf },
+ { REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111 },
+ { REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01202222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220 },
+ { REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05522022 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011 },
+ { REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222 },
+ { REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222 },
+ { REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002 },
+ { REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004 },
+ { REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002 },
+ { REG_A6XX_RBBM_ISDB_CNT, 0x00000182 },
+ { REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000 },
+ { REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111 },
+ { REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_FCHE, 0x00000222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_FCHE, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_FCHE, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_GLC, 0x00222222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_GLC, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_GLC, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_MHUB, 0x00000002 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_MHUB, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_MHUB, 0x00000000 },
+ {}
+};
+
const struct adreno_reglist a730_hwcg[] = {
{ REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022222 },
@@ -961,13 +1020,15 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
unsigned int i;
u32 val, clock_cntl_on, cgc_mode;
- if (!adreno_gpu->info->hwcg)
+ if (!(adreno_gpu->info->hwcg || adreno_is_a7xx(adreno_gpu)))
return;
if (adreno_is_a630(adreno_gpu))
clock_cntl_on = 0x8aa8aa02;
else if (adreno_is_a610(adreno_gpu))
clock_cntl_on = 0xaaa8aa82;
+ else if (adreno_is_a702(adreno_gpu))
+ clock_cntl_on = 0xaaaaaa82;
else
clock_cntl_on = 0x8aa8aa82;
@@ -982,6 +1043,25 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
state ? 0x5555 : 0);
}
+ if (!adreno_gpu->info->hwcg) {
+ gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 1);
+ gpu_write(gpu, REG_A7XX_RBBM_CGC_GLOBAL_LOAD_CMD, state ? 1 : 0);
+
+ if (state) {
+ gpu_write(gpu, REG_A7XX_RBBM_CGC_P2S_TRIG_CMD, 1);
+
+ if (gpu_poll_timeout(gpu, REG_A7XX_RBBM_CGC_P2S_STATUS, val,
+ val & A7XX_RBBM_CGC_P2S_STATUS_TXDONE, 1, 10)) {
+ dev_err(&gpu->pdev->dev, "RBBM_CGC_P2S_STATUS TXDONE Poll failed\n");
+ return;
+ }
+
+ gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 0);
+ }
+
+ return;
+ }
+
val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
/* Don't re-program the registers if they are already correct */
@@ -989,14 +1069,14 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
return;
/* Disable SP clock before programming HWCG registers */
- if (!adreno_is_a610(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
+ if (!adreno_is_a610_family(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
gpu_write(gpu, reg->offset, state ? reg->value : 0);
/* Enable SP clock */
- if (!adreno_is_a610(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
+ if (!adreno_is_a610_family(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
@@ -1224,7 +1304,7 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
const u32 *regs = a6xx_protect;
unsigned i, count, count_max;
- if (adreno_is_a650(adreno_gpu)) {
+ if (adreno_is_a650(adreno_gpu) || adreno_is_a702(adreno_gpu)) {
regs = a650_protect;
count = ARRAY_SIZE(a650_protect);
count_max = 48;
@@ -1239,7 +1319,9 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
count = ARRAY_SIZE(a660_protect);
count_max = 48;
BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48);
- } else if (adreno_is_a730(adreno_gpu) || adreno_is_a740(adreno_gpu)) {
+ } else if (adreno_is_a730(adreno_gpu) ||
+ adreno_is_a740(adreno_gpu) ||
+ adreno_is_a750(adreno_gpu)) {
regs = a730_protect;
count = ARRAY_SIZE(a730_protect);
count_max = 48;
@@ -1292,9 +1374,8 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.ubwc_mode = 1;
}
- /* a618 is using the hw default values */
if (adreno_is_a618(gpu))
- return;
+ gpu->ubwc_config.highest_bank_bit = 14;
if (adreno_is_a619_holi(gpu))
gpu->ubwc_config.highest_bank_bit = 13;
@@ -1320,6 +1401,12 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.rgb565_predicator = 1;
gpu->ubwc_config.uavflagprd_inv = 2;
}
+
+ if (adreno_is_a702(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 14;
+ gpu->ubwc_config.min_acc_len = 1;
+ gpu->ubwc_config.ubwc_mode = 2;
+ }
}
static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
@@ -1453,7 +1540,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
return false;
/* A7xx is safe! */
- if (adreno_is_a7xx(adreno_gpu))
+ if (adreno_is_a7xx(adreno_gpu) || adreno_is_a702(adreno_gpu))
return true;
/*
@@ -1671,7 +1758,7 @@ static int hw_init(struct msm_gpu *gpu)
a6xx_set_hwcg(gpu, true);
/* VBIF/GBIF start*/
- if (adreno_is_a610(adreno_gpu) ||
+ if (adreno_is_a610_family(adreno_gpu) ||
adreno_is_a640_family(adreno_gpu) ||
adreno_is_a650_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu)) {
@@ -1705,6 +1792,7 @@ static int hw_init(struct msm_gpu *gpu)
}
if (!(adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a702(adreno_gpu) ||
adreno_is_a730(adreno_gpu))) {
gmem_range_min = adreno_is_a740_family(adreno_gpu) ? SZ_16M : SZ_1M;
@@ -1725,7 +1813,7 @@ static int hw_init(struct msm_gpu *gpu)
if (adreno_is_a640_family(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
- } else if (adreno_is_a610(adreno_gpu)) {
+ } else if (adreno_is_a610_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
} else if (!adreno_is_a7xx(adreno_gpu)) {
@@ -1740,13 +1828,18 @@ static int hw_init(struct msm_gpu *gpu)
if (adreno_is_a610(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48);
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47);
+ } else if (adreno_is_a702(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 64);
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 63);
} else if (!adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
/* Setting the primFifo thresholds default values,
* and vccCacheSkipDis=1 bit (0x200) for A640 and newer
*/
- if (adreno_is_a690(adreno_gpu))
+ if (adreno_is_a702(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x0000c000);
+ else if (adreno_is_a690(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00800200);
else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
@@ -1786,7 +1879,7 @@ static int hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x4fffff);
else if (adreno_is_a619(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff);
- else if (adreno_is_a610(adreno_gpu))
+ else if (adreno_is_a610(adreno_gpu) || adreno_is_a702(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff);
else
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff);
@@ -1822,6 +1915,9 @@ static int hw_init(struct msm_gpu *gpu)
else
gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);
+ } else if (adreno_is_a702(adreno_gpu)) {
+ /* Something to do with the HLSQ cluster */
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, BIT(24));
}
if (adreno_is_a690(adreno_gpu))
@@ -2043,13 +2139,19 @@ static void a6xx_recover(struct msm_gpu *gpu)
static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
static const char *uche_clients[7] = {
"VFD", "SP", "VSC", "VPC", "HLSQ", "PC", "LRZ",
};
u32 val;
- if (mid < 1 || mid > 3)
- return "UNKNOWN";
+ if (adreno_is_a7xx(adreno_gpu)) {
+ if (mid != 1 && mid != 2 && mid != 3 && mid != 8)
+ return "UNKNOWN";
+ } else {
+ if (mid < 1 || mid > 3)
+ return "UNKNOWN";
+ }
/*
* The source of the data depends on the mid ID read from FSYNR1.
@@ -2057,26 +2159,95 @@ static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
*/
val = gpu_read(gpu, REG_A6XX_UCHE_CLIENT_PF);
- /* mid = 3 is most precise and refers to only one block per client */
- if (mid == 3)
- return uche_clients[val & 7];
+ if (adreno_is_a7xx(adreno_gpu)) {
+ /* Bit 3 for mid=3 indicates BR or BV */
+ static const char *uche_clients_a7xx[16] = {
+ "BR_VFD", "BR_SP", "BR_VSC", "BR_VPC",
+ "BR_HLSQ", "BR_PC", "BR_LRZ", "BR_TP",
+ "BV_VFD", "BV_SP", "BV_VSC", "BV_VPC",
+ "BV_HLSQ", "BV_PC", "BV_LRZ", "BV_TP",
+ };
+
+ /* LPAC has the same clients as BR and BV, but because it is
+ * compute-only some of them do not exist and there are holes
+ * in the array.
+ */
+ static const char *uche_clients_lpac_a7xx[8] = {
+ "-", "LPAC_SP", "-", "-",
+ "LPAC_HLSQ", "-", "-", "LPAC_TP",
+ };
+
+ val &= GENMASK(6, 0);
+
+ /* mid=3 refers to BR or BV */
+ if (mid == 3) {
+ if (val < ARRAY_SIZE(uche_clients_a7xx))
+ return uche_clients_a7xx[val];
+ else
+ return "UCHE";
+ }
+
+ /* mid=8 refers to LPAC */
+ if (mid == 8) {
+ if (val < ARRAY_SIZE(uche_clients_lpac_a7xx))
+ return uche_clients_lpac_a7xx[val];
+ else
+ return "UCHE_LPAC";
+ }
+
+ /* mid=2 is a catchall for everything else in LPAC */
+ if (mid == 2)
+ return "UCHE_LPAC";
+
+ /* mid=1 is a catchall for everything else in BR/BV */
+ return "UCHE";
+ } else if (adreno_is_a660_family(adreno_gpu)) {
+ static const char *uche_clients_a660[8] = {
+ "VFD", "SP", "VSC", "VPC", "HLSQ", "PC", "LRZ", "TP",
+ };
- /* For mid=2 the source is TP or VFD except when the client id is 0 */
- if (mid == 2)
- return ((val & 7) == 0) ? "TP" : "TP|VFD";
+ static const char *uche_clients_a660_not[8] = {
+ "not VFD", "not SP", "not VSC", "not VPC",
+ "not HLSQ", "not PC", "not LRZ", "not TP",
+ };
- /* For mid=1 just return "UCHE" as a catchall for everything else */
- return "UCHE";
+ val &= GENMASK(6, 0);
+
+ if (mid == 3 && val < ARRAY_SIZE(uche_clients_a660))
+ return uche_clients_a660[val];
+
+ if (mid == 1 && val < ARRAY_SIZE(uche_clients_a660_not))
+ return uche_clients_a660_not[val];
+
+ return "UCHE";
+ } else {
+ /* mid = 3 is most precise and refers to only one block per client */
+ if (mid == 3)
+ return uche_clients[val & 7];
+
+ /* For mid=2 the source is TP or VFD except when the client id is 0 */
+ if (mid == 2)
+ return ((val & 7) == 0) ? "TP" : "TP|VFD";
+
+ /* For mid=1 just return "UCHE" as a catchall for everything else */
+ return "UCHE";
+ }
}
static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
if (id == 0)
return "CP";
else if (id == 4)
return "CCU";
else if (id == 6)
return "CDP Prefetch";
+ else if (id == 7)
+ return "GMU";
+ else if (id == 5 && adreno_is_a7xx(adreno_gpu))
+ return "Flag cache";
return a6xx_uche_fault_block(gpu, id);
}
@@ -2427,7 +2598,7 @@ static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
msm_devfreq_resume(gpu);
- adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate : a6xx_llc_activate(a6xx_gpu);
+ adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate(a6xx_gpu) : a6xx_llc_activate(a6xx_gpu);
return ret;
}
@@ -2880,7 +3051,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
/* gpu->info only gets assigned in adreno_gpu_init() */
is_a7xx = config->info->family == ADRENO_7XX_GEN1 ||
- config->info->family == ADRENO_7XX_GEN2;
+ config->info->family == ADRENO_7XX_GEN2 ||
+ config->info->family == ADRENO_7XX_GEN3;
a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 91a564a24dbe..1f5245fc2cdc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -8,6 +8,17 @@
#include "a6xx_gpu_state.h"
#include "a6xx_gmu.xml.h"
+/* Ignore diagnostics about register tables that we aren't using yet. We don't
+ * want to modify these headers too much from their original source.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-variable"
+
+#include "adreno_gen7_0_0_snapshot.h"
+#include "adreno_gen7_2_0_snapshot.h"
+
+#pragma GCC diagnostic pop
+
struct a6xx_gpu_state_obj {
const void *handle;
u32 *data;
@@ -322,12 +333,98 @@ static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
}
+static void a6xx_get_debugbus_blocks(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ int nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) +
+ (a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0);
+
+ if (adreno_is_a650_family(to_adreno_gpu(gpu)))
+ nr_debugbus_blocks += ARRAY_SIZE(a650_debugbus_blocks);
+
+ a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks,
+ sizeof(*a6xx_state->debugbus));
+
+ if (a6xx_state->debugbus) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_debugbus_blocks); i++)
+ a6xx_get_debugbus_block(gpu,
+ a6xx_state,
+ &a6xx_debugbus_blocks[i],
+ &a6xx_state->debugbus[i]);
+
+ a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
+
+ /*
+ * GBIF has same debugbus as of other GPU blocks, fall back to
+ * default path if GPU uses GBIF, also GBIF uses exactly same
+ * ID as of VBIF.
+ */
+ if (a6xx_has_gbif(to_adreno_gpu(gpu))) {
+ a6xx_get_debugbus_block(gpu, a6xx_state,
+ &a6xx_gbif_debugbus_block,
+ &a6xx_state->debugbus[i]);
+
+ a6xx_state->nr_debugbus += 1;
+ }
+
+
+ if (adreno_is_a650_family(to_adreno_gpu(gpu))) {
+ for (i = 0; i < ARRAY_SIZE(a650_debugbus_blocks); i++)
+ a6xx_get_debugbus_block(gpu,
+ a6xx_state,
+ &a650_debugbus_blocks[i],
+ &a6xx_state->debugbus[i]);
+ }
+ }
+}
+
+static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int debugbus_blocks_count, total_debugbus_blocks;
+ const u32 *debugbus_blocks;
+ int i;
+
+ if (adreno_is_a730(adreno_gpu)) {
+ debugbus_blocks = gen7_0_0_debugbus_blocks;
+ debugbus_blocks_count = ARRAY_SIZE(gen7_0_0_debugbus_blocks);
+ } else {
+ BUG_ON(!adreno_is_a740_family(adreno_gpu));
+ debugbus_blocks = gen7_2_0_debugbus_blocks;
+ debugbus_blocks_count = ARRAY_SIZE(gen7_2_0_debugbus_blocks);
+ }
+
+ total_debugbus_blocks = debugbus_blocks_count +
+ ARRAY_SIZE(a7xx_gbif_debugbus_blocks);
+
+ a6xx_state->debugbus = state_kcalloc(a6xx_state, total_debugbus_blocks,
+ sizeof(*a6xx_state->debugbus));
+
+ if (a6xx_state->debugbus) {
+ for (i = 0; i < debugbus_blocks_count; i++) {
+ a6xx_get_debugbus_block(gpu,
+ a6xx_state, &a7xx_debugbus_blocks[debugbus_blocks[i]],
+ &a6xx_state->debugbus[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(a7xx_gbif_debugbus_blocks); i++) {
+ a6xx_get_debugbus_block(gpu,
+ a6xx_state, &a7xx_gbif_debugbus_blocks[i],
+ &a6xx_state->debugbus[i + debugbus_blocks_count]);
+ }
+ }
+
+}
+
static void a6xx_get_debugbus(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct resource *res;
void __iomem *cxdbg = NULL;
- int nr_debugbus_blocks;
/* Set up the GX debug bus */
@@ -382,51 +479,14 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
}
- nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) +
- (a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0);
-
- if (adreno_is_a650_family(to_adreno_gpu(gpu)))
- nr_debugbus_blocks += ARRAY_SIZE(a650_debugbus_blocks);
-
- a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks,
- sizeof(*a6xx_state->debugbus));
-
- if (a6xx_state->debugbus) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(a6xx_debugbus_blocks); i++)
- a6xx_get_debugbus_block(gpu,
- a6xx_state,
- &a6xx_debugbus_blocks[i],
- &a6xx_state->debugbus[i]);
-
- a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
-
- /*
- * GBIF has same debugbus as of other GPU blocks, fall back to
- * default path if GPU uses GBIF, also GBIF uses exactly same
- * ID as of VBIF.
- */
- if (a6xx_has_gbif(to_adreno_gpu(gpu))) {
- a6xx_get_debugbus_block(gpu, a6xx_state,
- &a6xx_gbif_debugbus_block,
- &a6xx_state->debugbus[i]);
-
- a6xx_state->nr_debugbus += 1;
- }
-
-
- if (adreno_is_a650_family(to_adreno_gpu(gpu))) {
- for (i = 0; i < ARRAY_SIZE(a650_debugbus_blocks); i++)
- a6xx_get_debugbus_block(gpu,
- a6xx_state,
- &a650_debugbus_blocks[i],
- &a6xx_state->debugbus[i]);
- }
+ if (adreno_is_a7xx(adreno_gpu)) {
+ a7xx_get_debugbus_blocks(gpu, a6xx_state);
+ } else {
+ a6xx_get_debugbus_blocks(gpu, a6xx_state);
}
/* Dump the VBIF debugbus on applicable targets */
- if (!a6xx_has_gbif(to_adreno_gpu(gpu))) {
+ if (!a6xx_has_gbif(adreno_gpu)) {
a6xx_state->vbif_debugbus =
state_kcalloc(a6xx_state, 1,
sizeof(*a6xx_state->vbif_debugbus));
@@ -437,22 +497,34 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
}
if (cxdbg) {
+ unsigned nr_cx_debugbus_blocks;
+ const struct a6xx_debugbus_block *cx_debugbus_blocks;
+
+ if (adreno_is_a7xx(adreno_gpu)) {
+ BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)));
+ cx_debugbus_blocks = a7xx_cx_debugbus_blocks;
+ nr_cx_debugbus_blocks = ARRAY_SIZE(a7xx_cx_debugbus_blocks);
+ } else {
+ cx_debugbus_blocks = a6xx_cx_debugbus_blocks;
+ nr_cx_debugbus_blocks = ARRAY_SIZE(a6xx_cx_debugbus_blocks);
+ }
+
a6xx_state->cx_debugbus =
state_kcalloc(a6xx_state,
- ARRAY_SIZE(a6xx_cx_debugbus_blocks),
+ nr_cx_debugbus_blocks,
sizeof(*a6xx_state->cx_debugbus));
if (a6xx_state->cx_debugbus) {
int i;
- for (i = 0; i < ARRAY_SIZE(a6xx_cx_debugbus_blocks); i++)
+ for (i = 0; i < nr_cx_debugbus_blocks; i++)
a6xx_get_cx_debugbus_block(cxdbg,
a6xx_state,
- &a6xx_cx_debugbus_blocks[i],
+ &cx_debugbus_blocks[i],
&a6xx_state->cx_debugbus[i]);
a6xx_state->nr_cx_debugbus =
- ARRAY_SIZE(a6xx_cx_debugbus_blocks);
+ nr_cx_debugbus_blocks;
}
iounmap(cxdbg);
@@ -508,6 +580,48 @@ static void a6xx_get_dbgahb_cluster(struct msm_gpu *gpu,
datasize);
}
+static void a7xx_get_dbgahb_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct gen7_sptp_cluster_registers *dbgahb,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+
+ in += CRASHDUMP_WRITE(in, REG_A7XX_SP_READ_SEL,
+ A7XX_SP_READ_SEL_LOCATION(dbgahb->location_id) |
+ A7XX_SP_READ_SEL_PIPE(dbgahb->pipe_id) |
+ A7XX_SP_READ_SEL_STATETYPE(dbgahb->statetype));
+
+ for (i = 0; dbgahb->regs[i] != UINT_MAX; i += 2) {
+ int count = RANGE(dbgahb->regs, i);
+ u32 offset = REG_A7XX_SP_AHB_READ_APERTURE +
+ dbgahb->regs[i] - dbgahb->regbase;
+
+ in += CRASHDUMP_READ(in, offset, count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = dbgahb;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
static void a6xx_get_dbgahb_clusters(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
@@ -529,6 +643,39 @@ static void a6xx_get_dbgahb_clusters(struct msm_gpu *gpu,
&a6xx_state->dbgahb_clusters[i], dumper);
}
+static void a7xx_get_dbgahb_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
+ const struct gen7_sptp_cluster_registers *dbgahb_clusters;
+ unsigned dbgahb_clusters_size;
+
+ if (adreno_is_a730(adreno_gpu)) {
+ dbgahb_clusters = gen7_0_0_sptp_clusters;
+ dbgahb_clusters_size = ARRAY_SIZE(gen7_0_0_sptp_clusters);
+ } else {
+ BUG_ON(!adreno_is_a740_family(adreno_gpu));
+ dbgahb_clusters = gen7_2_0_sptp_clusters;
+ dbgahb_clusters_size = ARRAY_SIZE(gen7_2_0_sptp_clusters);
+ }
+
+ a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
+ dbgahb_clusters_size,
+ sizeof(*a6xx_state->dbgahb_clusters));
+
+ if (!a6xx_state->dbgahb_clusters)
+ return;
+
+ a6xx_state->nr_dbgahb_clusters = dbgahb_clusters_size;
+
+ for (i = 0; i < dbgahb_clusters_size; i++)
+ a7xx_get_dbgahb_cluster(gpu, a6xx_state,
+ &dbgahb_clusters[i],
+ &a6xx_state->dbgahb_clusters[i], dumper);
+}
+
/* Read a data cluster from the CP aperture with the crashdumper */
static void a6xx_get_cluster(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
@@ -590,6 +737,51 @@ static void a6xx_get_cluster(struct msm_gpu *gpu,
datasize);
}
+static void a7xx_get_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct gen7_cluster_registers *cluster,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+
+ /* Some clusters need a selector register to be programmed too */
+ if (cluster->sel)
+ in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val);
+
+ in += CRASHDUMP_WRITE(in, REG_A7XX_CP_APERTURE_CNTL_CD,
+ A7XX_CP_APERTURE_CNTL_CD_PIPE(cluster->pipe_id) |
+ A7XX_CP_APERTURE_CNTL_CD_CLUSTER(cluster->cluster_id) |
+ A7XX_CP_APERTURE_CNTL_CD_CONTEXT(cluster->context_id));
+
+ for (i = 0; cluster->regs[i] != UINT_MAX; i += 2) {
+ int count = RANGE(cluster->regs, i);
+
+ in += CRASHDUMP_READ(in, cluster->regs[i],
+ count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = cluster;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
static void a6xx_get_clusters(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
@@ -609,6 +801,37 @@ static void a6xx_get_clusters(struct msm_gpu *gpu,
&a6xx_state->clusters[i], dumper);
}
+static void a7xx_get_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
+ const struct gen7_cluster_registers *clusters;
+ unsigned clusters_size;
+
+ if (adreno_is_a730(adreno_gpu)) {
+ clusters = gen7_0_0_clusters;
+ clusters_size = ARRAY_SIZE(gen7_0_0_clusters);
+ } else {
+ BUG_ON(!adreno_is_a740_family(adreno_gpu));
+ clusters = gen7_2_0_clusters;
+ clusters_size = ARRAY_SIZE(gen7_2_0_clusters);
+ }
+
+ a6xx_state->clusters = state_kcalloc(a6xx_state,
+ clusters_size, sizeof(*a6xx_state->clusters));
+
+ if (!a6xx_state->clusters)
+ return;
+
+ a6xx_state->nr_clusters = clusters_size;
+
+ for (i = 0; i < clusters_size; i++)
+ a7xx_get_cluster(gpu, a6xx_state, &clusters[i],
+ &a6xx_state->clusters[i], dumper);
+}
+
/* Read a shader / debug block from the HLSQ aperture with the crashdumper */
static void a6xx_get_shader_block(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
@@ -617,6 +840,7 @@ static void a6xx_get_shader_block(struct msm_gpu *gpu,
struct a6xx_crashdumper *dumper)
{
u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
size_t datasize = block->size * A6XX_NUM_SHADER_BANKS * sizeof(u32);
int i;
@@ -629,6 +853,8 @@ static void a6xx_get_shader_block(struct msm_gpu *gpu,
in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
+
+ out += block->size * sizeof(u32);
}
CRASHDUMP_FINI(in);
@@ -641,6 +867,56 @@ static void a6xx_get_shader_block(struct msm_gpu *gpu,
datasize);
}
+static void a7xx_get_shader_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct gen7_shader_block *block,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize = block->size * block->num_sps * block->num_usptps * sizeof(u32);
+ int i, j;
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (adreno_is_a730(adreno_gpu)) {
+ gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 3);
+ }
+
+ for (i = 0; i < block->num_sps; i++) {
+ for (j = 0; j < block->num_usptps; j++) {
+ in += CRASHDUMP_WRITE(in, REG_A7XX_SP_READ_SEL,
+ A7XX_SP_READ_SEL_LOCATION(block->location) |
+ A7XX_SP_READ_SEL_PIPE(block->pipeid) |
+ A7XX_SP_READ_SEL_STATETYPE(block->statetype) |
+ A7XX_SP_READ_SEL_USPTP(j) |
+ A7XX_SP_READ_SEL_SPTP(i));
+
+ in += CRASHDUMP_READ(in, REG_A7XX_SP_AHB_READ_APERTURE,
+ block->size, out);
+
+ out += block->size * sizeof(u32);
+ }
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ goto out;
+
+ obj->handle = block;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+
+out:
+ if (adreno_is_a730(adreno_gpu)) {
+ gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 0);
+ }
+}
+
static void a6xx_get_shaders(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
@@ -660,6 +936,37 @@ static void a6xx_get_shaders(struct msm_gpu *gpu,
&a6xx_state->shaders[i], dumper);
}
+static void a7xx_get_shaders(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct gen7_shader_block *shader_blocks;
+ unsigned num_shader_blocks;
+ int i;
+
+ if (adreno_is_a730(adreno_gpu)) {
+ shader_blocks = gen7_0_0_shader_blocks;
+ num_shader_blocks = ARRAY_SIZE(gen7_0_0_shader_blocks);
+ } else {
+ BUG_ON(!adreno_is_a740_family(adreno_gpu));
+ shader_blocks = gen7_2_0_shader_blocks;
+ num_shader_blocks = ARRAY_SIZE(gen7_2_0_shader_blocks);
+ }
+
+ a6xx_state->shaders = state_kcalloc(a6xx_state,
+ num_shader_blocks, sizeof(*a6xx_state->shaders));
+
+ if (!a6xx_state->shaders)
+ return;
+
+ a6xx_state->nr_shaders = num_shader_blocks;
+
+ for (i = 0; i < num_shader_blocks; i++)
+ a7xx_get_shader_block(gpu, a6xx_state, &shader_blocks[i],
+ &a6xx_state->shaders[i], dumper);
+}
+
/* Read registers from behind the HLSQ aperture with the crashdumper */
static void a6xx_get_crashdumper_hlsq_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
@@ -741,6 +1048,44 @@ static void a6xx_get_crashdumper_registers(struct msm_gpu *gpu,
regcount * sizeof(u32));
}
+static void a7xx_get_crashdumper_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct gen7_reg_list *regs,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ int i, regcount = 0;
+
+ /* Some blocks might need to program a selector register first */
+ if (regs->sel)
+ in += CRASHDUMP_WRITE(in, regs->sel->cd_reg, regs->sel->val);
+
+ for (i = 0; regs->regs[i] != UINT_MAX; i += 2) {
+ u32 count = RANGE(regs->regs, i);
+
+ in += CRASHDUMP_READ(in, regs->regs[i], count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = regs->regs;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ regcount * sizeof(u32));
+}
+
+
/* Read a block of registers via AHB */
static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
@@ -772,6 +1117,41 @@ static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
}
}
+static void a7xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const u32 *regs,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i, regcount = 0, index = 0;
+
+ for (i = 0; regs[i] != UINT_MAX; i += 2)
+ regcount += RANGE(regs, i);
+
+ obj->handle = (const void *) regs;
+ obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ for (i = 0; regs[i] != UINT_MAX; i += 2) {
+ u32 count = RANGE(regs, i);
+ int j;
+
+ for (j = 0; j < count; j++)
+ obj->data[index++] = gpu_read(gpu, regs[i] + j);
+ }
+}
+
+static void a7xx_get_ahb_gpu_reglist(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct gen7_reg_list *regs,
+ struct a6xx_gpu_state_obj *obj)
+{
+ if (regs->sel)
+ gpu_write(gpu, regs->sel->host_reg, regs->sel->val);
+
+ a7xx_get_ahb_gpu_registers(gpu, a6xx_state, regs->regs, obj);
+}
+
/* Read a block of GMU registers */
static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
@@ -902,20 +1282,11 @@ static void a6xx_get_registers(struct msm_gpu *gpu,
a6xx_state->nr_registers = count;
- if (adreno_is_a7xx(adreno_gpu))
- a6xx_get_ahb_gpu_registers(gpu,
- a6xx_state, &a7xx_ahb_reglist,
- &a6xx_state->registers[index++]);
- else
- a6xx_get_ahb_gpu_registers(gpu,
- a6xx_state, &a6xx_ahb_reglist,
- &a6xx_state->registers[index++]);
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_ahb_reglist,
+ &a6xx_state->registers[index++]);
- if (adreno_is_a7xx(adreno_gpu))
- a6xx_get_ahb_gpu_registers(gpu,
- a6xx_state, &a7xx_gbif_reglist,
- &a6xx_state->registers[index++]);
- else if (a6xx_has_gbif(adreno_gpu))
+ if (a6xx_has_gbif(adreno_gpu))
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a6xx_gbif_reglist,
&a6xx_state->registers[index++]);
@@ -951,6 +1322,80 @@ static void a6xx_get_registers(struct msm_gpu *gpu,
dumper);
}
+#define A7XX_PRE_CRASHDUMPER_SIZE 1
+#define A7XX_POST_CRASHDUMPER_SIZE 1
+static void a7xx_get_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i, count;
+ int index = 0;
+ const u32 *pre_crashdumper_regs;
+ const struct gen7_reg_list *reglist;
+
+ if (adreno_is_a730(adreno_gpu)) {
+ reglist = gen7_0_0_reg_list;
+ pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers;
+ } else {
+ BUG_ON(!adreno_is_a740_family(adreno_gpu));
+ reglist = gen7_2_0_reg_list;
+ pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers;
+ }
+
+ count = A7XX_PRE_CRASHDUMPER_SIZE + A7XX_POST_CRASHDUMPER_SIZE;
+
+ /* The downstream reglist contains registers in other memory regions
+ * (cx_misc/cx_mem and cx_dbgc) and we need to plumb through their
+ * offsets and map them to read them on the CPU. For now only read the
+ * first region which is the main one.
+ */
+ if (dumper) {
+ for (i = 0; reglist[i].regs; i++)
+ count++;
+ } else {
+ count++;
+ }
+
+ a6xx_state->registers = state_kcalloc(a6xx_state,
+ count, sizeof(*a6xx_state->registers));
+
+ if (!a6xx_state->registers)
+ return;
+
+ a6xx_state->nr_registers = count;
+
+ a7xx_get_ahb_gpu_registers(gpu, a6xx_state, pre_crashdumper_regs,
+ &a6xx_state->registers[index++]);
+
+ if (!dumper) {
+ a7xx_get_ahb_gpu_reglist(gpu,
+ a6xx_state, &reglist[0],
+ &a6xx_state->registers[index++]);
+ return;
+ }
+
+ for (i = 0; reglist[i].regs; i++)
+ a7xx_get_crashdumper_registers(gpu,
+ a6xx_state, &reglist[i],
+ &a6xx_state->registers[index++],
+ dumper);
+}
+
+static void a7xx_get_post_crashdumper_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const u32 *regs;
+
+ BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)));
+ regs = gen7_0_0_post_crashdumper_registers;
+
+ a7xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, regs,
+ &a6xx_state->registers[a6xx_state->nr_registers - 1]);
+}
+
static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu)
{
/* The value at [16:31] is in 4dword units. Convert it to dwords */
@@ -1045,8 +1490,10 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i, indexed_count, mempool_count;
+ BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)));
indexed_count = ARRAY_SIZE(a7xx_indexed_reglist);
mempool_count = ARRAY_SIZE(a7xx_cp_bv_mempool_indexed);
@@ -1068,8 +1515,8 @@ static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
/* Get the contents of the CP_BV mempool */
for (i = 0; i < mempool_count; i++)
- a6xx_get_indexed_regs(gpu, a6xx_state, a7xx_cp_bv_mempool_indexed,
- &a6xx_state->indexed_regs[indexed_count - 1 + i]);
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a7xx_cp_bv_mempool_indexed[i],
+ &a6xx_state->indexed_regs[indexed_count + i]);
gpu_rmw(gpu, REG_A6XX_CP_CHICKEN_DBG, BIT(2), 0);
gpu_rmw(gpu, REG_A7XX_CP_BV_CHICKEN_DBG, BIT(2), 0);
@@ -1109,13 +1556,10 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
return &a6xx_state->base;
/* Get the banks of indexed registers */
- if (adreno_is_a7xx(adreno_gpu)) {
+ if (adreno_is_a7xx(adreno_gpu))
a7xx_get_indexed_registers(gpu, a6xx_state);
- /* Further codeflow is untested on A7xx. */
- return &a6xx_state->base;
- }
-
- a6xx_get_indexed_registers(gpu, a6xx_state);
+ else
+ a6xx_get_indexed_registers(gpu, a6xx_state);
/*
* Try to initialize the crashdumper, if we are not dumping state
@@ -1128,14 +1572,28 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
dumper = &_dumper;
}
- a6xx_get_registers(gpu, a6xx_state, dumper);
+ if (adreno_is_a7xx(adreno_gpu)) {
+ a7xx_get_registers(gpu, a6xx_state, dumper);
- if (dumper) {
- a6xx_get_shaders(gpu, a6xx_state, dumper);
- a6xx_get_clusters(gpu, a6xx_state, dumper);
- a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
+ if (dumper) {
+ a7xx_get_shaders(gpu, a6xx_state, dumper);
+ a7xx_get_clusters(gpu, a6xx_state, dumper);
+ a7xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
+
+ msm_gem_kernel_put(dumper->bo, gpu->aspace);
+ }
+
+ a7xx_get_post_crashdumper_registers(gpu, a6xx_state);
+ } else {
+ a6xx_get_registers(gpu, a6xx_state, dumper);
- msm_gem_kernel_put(dumper->bo, gpu->aspace);
+ if (dumper) {
+ a6xx_get_shaders(gpu, a6xx_state, dumper);
+ a6xx_get_clusters(gpu, a6xx_state, dumper);
+ a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
+
+ msm_gem_kernel_put(dumper->bo, gpu->aspace);
+ }
}
if (snapshot_debugbus)
@@ -1203,6 +1661,35 @@ static void a6xx_show_registers(const u32 *registers, u32 *data, size_t count,
}
}
+static void a7xx_show_registers_indented(const u32 *registers, u32 *data,
+ struct drm_printer *p, unsigned indent)
+{
+ int i, index = 0;
+
+ for (i = 0; registers[i] != UINT_MAX; i += 2) {
+ u32 count = RANGE(registers, i);
+ u32 offset = registers[i];
+ int j;
+
+ for (j = 0; j < count; index++, offset++, j++) {
+ int k;
+
+ if (data[index] == 0xdeafbead)
+ continue;
+
+ for (k = 0; k < indent; k++)
+ drm_printf(p, " ");
+ drm_printf(p, "- { offset: 0x%06x, value: 0x%08x }\n",
+ offset << 2, data[index]);
+ }
+ }
+}
+
+static void a7xx_show_registers(const u32 *registers, u32 *data, struct drm_printer *p)
+{
+ a7xx_show_registers_indented(registers, data, p, 1);
+}
+
static void print_ascii85(struct drm_printer *p, size_t len, u32 *data)
{
char out[ASCII85_BUFSZ];
@@ -1258,6 +1745,36 @@ static void a6xx_show_shader(struct a6xx_gpu_state_obj *obj,
}
}
+static void a7xx_show_shader(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct gen7_shader_block *block = obj->handle;
+ int i, j;
+ u32 *data = obj->data;
+
+ if (!obj->handle)
+ return;
+
+ print_name(p, " - type: ", a7xx_statetype_names[block->statetype]);
+ print_name(p, " - pipe: ", a7xx_pipe_names[block->pipeid]);
+
+ for (i = 0; i < block->num_sps; i++) {
+ drm_printf(p, " - sp: %d\n", i);
+
+ for (j = 0; j < block->num_usptps; j++) {
+ drm_printf(p, " - usptp: %d\n", j);
+ drm_printf(p, " size: %d\n", block->size);
+
+ if (!obj->data)
+ continue;
+
+ print_ascii85(p, block->size << 2, data);
+
+ data += block->size;
+ }
+ }
+}
+
static void a6xx_show_cluster_data(const u32 *registers, int size, u32 *data,
struct drm_printer *p)
{
@@ -1308,6 +1825,34 @@ static void a6xx_show_cluster(struct a6xx_gpu_state_obj *obj,
}
}
+static void a7xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct gen7_sptp_cluster_registers *dbgahb = obj->handle;
+
+ if (dbgahb) {
+ print_name(p, " - pipe: ", a7xx_pipe_names[dbgahb->pipe_id]);
+ print_name(p, " - cluster-name: ", a7xx_cluster_names[dbgahb->cluster_id]);
+ drm_printf(p, " - context: %d\n", dbgahb->context_id);
+ a7xx_show_registers_indented(dbgahb->regs, obj->data, p, 4);
+ }
+}
+
+static void a7xx_show_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct gen7_cluster_registers *cluster = obj->handle;
+
+ if (cluster) {
+ int context = (cluster->context_id == STATE_FORCE_CTXT_1) ? 1 : 0;
+
+ print_name(p, " - pipe: ", a7xx_pipe_names[cluster->pipe_id]);
+ print_name(p, " - cluster-name: ", a7xx_cluster_names[cluster->cluster_id]);
+ drm_printf(p, " - context: %d\n", context);
+ a7xx_show_registers_indented(cluster->regs, obj->data, p, 4);
+ }
+}
+
static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj,
struct drm_printer *p)
{
@@ -1369,6 +1914,7 @@ static void a6xx_show_debugbus(struct a6xx_gpu_state *a6xx_state,
void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu_state *a6xx_state = container_of(state,
struct a6xx_gpu_state, base);
int i;
@@ -1421,12 +1967,17 @@ void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
drm_puts(p, "registers:\n");
for (i = 0; i < a6xx_state->nr_registers; i++) {
struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
- const struct a6xx_registers *regs = obj->handle;
if (!obj->handle)
continue;
- a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ if (adreno_is_a7xx(adreno_gpu)) {
+ a7xx_show_registers(obj->handle, obj->data, p);
+ } else {
+ const struct a6xx_registers *regs = obj->handle;
+
+ a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ }
}
drm_puts(p, "registers-gmu:\n");
@@ -1445,15 +1996,27 @@ void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
a6xx_show_indexed_regs(&a6xx_state->indexed_regs[i], p);
drm_puts(p, "shader-blocks:\n");
- for (i = 0; i < a6xx_state->nr_shaders; i++)
- a6xx_show_shader(&a6xx_state->shaders[i], p);
+ for (i = 0; i < a6xx_state->nr_shaders; i++) {
+ if (adreno_is_a7xx(adreno_gpu))
+ a7xx_show_shader(&a6xx_state->shaders[i], p);
+ else
+ a6xx_show_shader(&a6xx_state->shaders[i], p);
+ }
drm_puts(p, "clusters:\n");
- for (i = 0; i < a6xx_state->nr_clusters; i++)
- a6xx_show_cluster(&a6xx_state->clusters[i], p);
+ for (i = 0; i < a6xx_state->nr_clusters; i++) {
+ if (adreno_is_a7xx(adreno_gpu))
+ a7xx_show_cluster(&a6xx_state->clusters[i], p);
+ else
+ a6xx_show_cluster(&a6xx_state->clusters[i], p);
+ }
- for (i = 0; i < a6xx_state->nr_dbgahb_clusters; i++)
- a6xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p);
+ for (i = 0; i < a6xx_state->nr_dbgahb_clusters; i++) {
+ if (adreno_is_a7xx(adreno_gpu))
+ a7xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p);
+ else
+ a6xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p);
+ }
drm_puts(p, "debugbus:\n");
a6xx_show_debugbus(a6xx_state, p);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 9560fc1b858a..5ddd32063bcc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#ifndef _A6XX_CRASH_DUMP_H_
#define _A6XX_CRASH_DUMP_H_
@@ -51,6 +54,7 @@ static const u32 a6xx_pc_vs_cluster[] = {
#define CLUSTER_SP_PS 4
#define CLUSTER_PS 5
#define CLUSTER_VPC_PS 6
+#define CLUSTER_NONE 7
#define CLUSTER(_id, _reg, _sel_reg, _sel_val) \
{ .id = _id, .name = #_id,\
@@ -337,27 +341,6 @@ static const struct a6xx_registers a6xx_vbif_reglist =
static const struct a6xx_registers a6xx_gbif_reglist =
REGS(a6xx_gbif_registers, 0, 0);
-static const u32 a7xx_ahb_registers[] = {
- /* RBBM_STATUS */
- 0x210, 0x210,
- /* RBBM_STATUS2-3 */
- 0x212, 0x213,
-};
-
-static const u32 a7xx_gbif_registers[] = {
- 0x3c00, 0x3c0b,
- 0x3c40, 0x3c42,
- 0x3c45, 0x3c47,
- 0x3c49, 0x3c4a,
- 0x3cc0, 0x3cd1,
-};
-
-static const struct a6xx_registers a7xx_ahb_reglist=
- REGS(a7xx_ahb_registers, 0, 0);
-
-static const struct a6xx_registers a7xx_gbif_reglist =
- REGS(a7xx_gbif_registers, 0, 0);
-
static const u32 a6xx_gmu_gx_registers[] = {
/* GMU GX */
0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
@@ -534,4 +517,288 @@ static const struct a6xx_debugbus_block a650_debugbus_blocks[] = {
DEBUGBUS(A6XX_DBGBUS_SPTP_5, 0x100),
};
+static const struct a6xx_debugbus_block a7xx_gbif_debugbus_blocks[] = {
+ DEBUGBUS(A7XX_DBGBUS_GBIF_CX, 0x100),
+ DEBUGBUS(A7XX_DBGBUS_GBIF_GX, 0x100),
+};
+
+static const struct a6xx_debugbus_block a7xx_cx_debugbus_blocks[] = {
+ DEBUGBUS(A7XX_DBGBUS_GMU_CX, 0x100),
+ DEBUGBUS(A7XX_DBGBUS_CX, 0x100),
+ DEBUGBUS(A7XX_DBGBUS_GBIF_CX, 0x100),
+};
+
+#define STATE_NON_CONTEXT 0
+#define STATE_TOGGLE_CTXT 1
+#define STATE_FORCE_CTXT_0 2
+#define STATE_FORCE_CTXT_1 3
+
+struct gen7_sel_reg {
+ unsigned int host_reg;
+ unsigned int cd_reg;
+ unsigned int val;
+};
+
+struct gen7_cluster_registers {
+ /* cluster_id: Cluster identifier */
+ int cluster_id;
+ /* pipe_id: Pipe Identifier */
+ int pipe_id;
+ /* context_id: one of STATE_ that identifies the context to dump */
+ int context_id;
+ /* regs: Pointer to an array of register pairs */
+ const u32 *regs;
+ /* sel: Pointer to a selector register to write before reading */
+ const struct gen7_sel_reg *sel;
+};
+
+struct gen7_sptp_cluster_registers {
+ /* cluster_id: Cluster identifier */
+ enum a7xx_cluster cluster_id;
+ /* statetype: SP block state type for the cluster */
+ enum a7xx_statetype_id statetype;
+ /* pipe_id: Pipe identifier */
+ enum a7xx_pipe pipe_id;
+ /* context_id: Context identifier */
+ int context_id;
+ /* location_id: Location identifier */
+ enum a7xx_state_location location_id;
+ /* regs: Pointer to the list of register pairs to read */
+ const u32 *regs;
+ /* regbase: Dword offset of the register block in the GPu register space */
+ unsigned int regbase;
+};
+
+struct gen7_shader_block {
+ /* statetype: Type identifer for the block */
+ u32 statetype;
+ /* size: Size of the block (in dwords) */
+ u32 size;
+ /* num_sps: The SP id to dump */
+ u32 num_sps;
+ /* num_usptps: The number of USPTPs to dump */;
+ u32 num_usptps;
+ /* pipe_id: Pipe identifier for the block data */
+ u32 pipeid;
+ /* location: Location identifer for the block data */
+ u32 location;
+};
+
+struct gen7_reg_list {
+ const u32 *regs;
+ const struct gen7_sel_reg *sel;
+};
+
+/* adreno_gen7_x_y_snapshot.h defines which debugbus blocks a given family has, but the
+ * list of debugbus blocks is global on a7xx.
+ */
+
+#define A7XX_DEBUGBUS(_id, _count) [_id] = { .id = _id, .name = #_id, .count = _count },
+static const struct a6xx_debugbus_block a7xx_debugbus_blocks[] = {
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CP_0_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CP_0_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_RBBM, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_GBIF_GX, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_GBIF_CX, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_HLSQ, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_UCHE_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_UCHE_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TESS_BR, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TESS_BV, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_PC_BR, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_PC_BV, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFDP_BR, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFDP_BV, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VPC_BR, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VPC_BV, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TSE_BR, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TSE_BV, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_RAS_BR, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_RAS_BV, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VSC, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_COM_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_LRZ_BR, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_LRZ_BV, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_UFC_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_UFC_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_GMU_GX, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_DBGC, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CX, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_GMU_CX, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_GPC_BR, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_GPC_BV, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_LARC, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_HLSQ_SPTP, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_RB_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_RB_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_RB_2, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_RB_3, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_RB_4, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_RB_5, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_UCHE_WRAPPER, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CCU_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CCU_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CCU_2, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CCU_3, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CCU_4, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CCU_5, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFD_BR_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFD_BR_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFD_BR_2, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFD_BR_3, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFD_BR_4, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFD_BR_5, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFD_BR_6, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFD_BR_7, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFD_BV_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFD_BV_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFD_BV_2, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VFD_BV_3, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USP_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USP_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USP_2, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USP_3, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USP_4, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USP_5, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TP_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TP_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TP_2, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TP_3, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TP_4, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TP_5, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TP_6, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TP_7, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TP_8, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TP_9, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TP_10, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_TP_11, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USPTP_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USPTP_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USPTP_2, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USPTP_3, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USPTP_4, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USPTP_5, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USPTP_6, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USPTP_7, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USPTP_8, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USPTP_9, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USPTP_10, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_USPTP_11, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CCHE_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CCHE_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CCHE_2, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VPC_DSTR_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VPC_DSTR_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_VPC_DSTR_2, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_HLSQ_DP_STR_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_HLSQ_DP_STR_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_HLSQ_DP_STR_2, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_HLSQ_DP_STR_3, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_HLSQ_DP_STR_4, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_HLSQ_DP_STR_5, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_UFC_DSTR_0, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_UFC_DSTR_1, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_UFC_DSTR_2, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CGC_SUBCORE, 0x100)
+ A7XX_DEBUGBUS(A7XX_DBGBUS_CGC_CORE, 0x100)
+};
+
+#define A7XX_NAME(enumval) [enumval] = #enumval
+static const char *a7xx_statetype_names[] = {
+ A7XX_NAME(A7XX_TP0_NCTX_REG),
+ A7XX_NAME(A7XX_TP0_CTX0_3D_CVS_REG),
+ A7XX_NAME(A7XX_TP0_CTX0_3D_CPS_REG),
+ A7XX_NAME(A7XX_TP0_CTX1_3D_CVS_REG),
+ A7XX_NAME(A7XX_TP0_CTX1_3D_CPS_REG),
+ A7XX_NAME(A7XX_TP0_CTX2_3D_CPS_REG),
+ A7XX_NAME(A7XX_TP0_CTX3_3D_CPS_REG),
+ A7XX_NAME(A7XX_TP0_TMO_DATA),
+ A7XX_NAME(A7XX_TP0_SMO_DATA),
+ A7XX_NAME(A7XX_TP0_MIPMAP_BASE_DATA),
+ A7XX_NAME(A7XX_SP_NCTX_REG),
+ A7XX_NAME(A7XX_SP_CTX0_3D_CVS_REG),
+ A7XX_NAME(A7XX_SP_CTX0_3D_CPS_REG),
+ A7XX_NAME(A7XX_SP_CTX1_3D_CVS_REG),
+ A7XX_NAME(A7XX_SP_CTX1_3D_CPS_REG),
+ A7XX_NAME(A7XX_SP_CTX2_3D_CPS_REG),
+ A7XX_NAME(A7XX_SP_CTX3_3D_CPS_REG),
+ A7XX_NAME(A7XX_SP_INST_DATA),
+ A7XX_NAME(A7XX_SP_INST_DATA_1),
+ A7XX_NAME(A7XX_SP_LB_0_DATA),
+ A7XX_NAME(A7XX_SP_LB_1_DATA),
+ A7XX_NAME(A7XX_SP_LB_2_DATA),
+ A7XX_NAME(A7XX_SP_LB_3_DATA),
+ A7XX_NAME(A7XX_SP_LB_4_DATA),
+ A7XX_NAME(A7XX_SP_LB_5_DATA),
+ A7XX_NAME(A7XX_SP_LB_6_DATA),
+ A7XX_NAME(A7XX_SP_LB_7_DATA),
+ A7XX_NAME(A7XX_SP_CB_RAM),
+ A7XX_NAME(A7XX_SP_LB_13_DATA),
+ A7XX_NAME(A7XX_SP_LB_14_DATA),
+ A7XX_NAME(A7XX_SP_INST_TAG),
+ A7XX_NAME(A7XX_SP_INST_DATA_2),
+ A7XX_NAME(A7XX_SP_TMO_TAG),
+ A7XX_NAME(A7XX_SP_SMO_TAG),
+ A7XX_NAME(A7XX_SP_STATE_DATA),
+ A7XX_NAME(A7XX_SP_HWAVE_RAM),
+ A7XX_NAME(A7XX_SP_L0_INST_BUF),
+ A7XX_NAME(A7XX_SP_LB_8_DATA),
+ A7XX_NAME(A7XX_SP_LB_9_DATA),
+ A7XX_NAME(A7XX_SP_LB_10_DATA),
+ A7XX_NAME(A7XX_SP_LB_11_DATA),
+ A7XX_NAME(A7XX_SP_LB_12_DATA),
+ A7XX_NAME(A7XX_HLSQ_DATAPATH_DSTR_META),
+ A7XX_NAME(A7XX_HLSQ_L2STC_TAG_RAM),
+ A7XX_NAME(A7XX_HLSQ_L2STC_INFO_CMD),
+ A7XX_NAME(A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG),
+ A7XX_NAME(A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG),
+ A7XX_NAME(A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM),
+ A7XX_NAME(A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM),
+ A7XX_NAME(A7XX_HLSQ_CHUNK_CVS_RAM),
+ A7XX_NAME(A7XX_HLSQ_CHUNK_CPS_RAM),
+ A7XX_NAME(A7XX_HLSQ_CHUNK_CVS_RAM_TAG),
+ A7XX_NAME(A7XX_HLSQ_CHUNK_CPS_RAM_TAG),
+ A7XX_NAME(A7XX_HLSQ_ICB_CVS_CB_BASE_TAG),
+ A7XX_NAME(A7XX_HLSQ_ICB_CPS_CB_BASE_TAG),
+ A7XX_NAME(A7XX_HLSQ_CVS_MISC_RAM),
+ A7XX_NAME(A7XX_HLSQ_CPS_MISC_RAM),
+ A7XX_NAME(A7XX_HLSQ_CPS_MISC_RAM_1),
+ A7XX_NAME(A7XX_HLSQ_INST_RAM),
+ A7XX_NAME(A7XX_HLSQ_GFX_CVS_CONST_RAM),
+ A7XX_NAME(A7XX_HLSQ_GFX_CPS_CONST_RAM),
+ A7XX_NAME(A7XX_HLSQ_CVS_MISC_RAM_TAG),
+ A7XX_NAME(A7XX_HLSQ_CPS_MISC_RAM_TAG),
+ A7XX_NAME(A7XX_HLSQ_INST_RAM_TAG),
+ A7XX_NAME(A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG),
+ A7XX_NAME(A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG),
+ A7XX_NAME(A7XX_HLSQ_GFX_LOCAL_MISC_RAM),
+ A7XX_NAME(A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG),
+ A7XX_NAME(A7XX_HLSQ_INST_RAM_1),
+ A7XX_NAME(A7XX_HLSQ_STPROC_META),
+ A7XX_NAME(A7XX_HLSQ_BV_BE_META),
+ A7XX_NAME(A7XX_HLSQ_INST_RAM_2),
+ A7XX_NAME(A7XX_HLSQ_DATAPATH_META),
+ A7XX_NAME(A7XX_HLSQ_FRONTEND_META),
+ A7XX_NAME(A7XX_HLSQ_INDIRECT_META),
+ A7XX_NAME(A7XX_HLSQ_BACKEND_META),
+};
+
+static const char *a7xx_pipe_names[] = {
+ A7XX_NAME(A7XX_PIPE_NONE),
+ A7XX_NAME(A7XX_PIPE_BR),
+ A7XX_NAME(A7XX_PIPE_BV),
+ A7XX_NAME(A7XX_PIPE_LPAC),
+};
+
+static const char *a7xx_cluster_names[] = {
+ A7XX_NAME(A7XX_CLUSTER_NONE),
+ A7XX_NAME(A7XX_CLUSTER_FE),
+ A7XX_NAME(A7XX_CLUSTER_SP_VS),
+ A7XX_NAME(A7XX_CLUSTER_PC_VS),
+ A7XX_NAME(A7XX_CLUSTER_GRAS),
+ A7XX_NAME(A7XX_CLUSTER_SP_PS),
+ A7XX_NAME(A7XX_CLUSTER_VPC_PS),
+ A7XX_NAME(A7XX_CLUSTER_PS),
+};
+
#endif
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 51c320a2e5c0..fbc27930e550 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -3,50 +3,27 @@
/* Autogenerated file, DO NOT EDIT manually!
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
-
-Copyright (C) 2013-2023 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
*/
+#ifdef __KERNEL__
+#include <linux/bug.h>
+#define assert(x) BUG_ON(!(x))
+#else
+#include <assert.h>
+#endif
+
+#ifdef __cplusplus
+#define __struct_cast(X)
+#else
+#define __struct_cast(X) (struct X)
+#endif
enum chip {
A2XX = 2,
@@ -141,11 +118,13 @@ enum a3xx_rop_code {
ROP_COPY_INVERTED = 3,
ROP_AND_REVERSE = 4,
ROP_INVERT = 5,
+ ROP_XOR = 6,
ROP_NAND = 7,
ROP_AND = 8,
ROP_EQUIV = 9,
ROP_NOOP = 10,
ROP_OR_INVERTED = 11,
+ ROP_COPY = 12,
ROP_OR_REVERSE = 13,
ROP_OR = 14,
ROP_SET = 15,
@@ -258,7 +237,8 @@ static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val)
#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2
static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val)
{
- return ((val >> 2) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK;
}
#define REG_AXXX_CP_RB_RPTR 0x000001c4
@@ -471,174 +451,34 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
#define REG_AXXX_CP_IB2_BUFSZ 0x0000045b
#define REG_AXXX_CP_STAT 0x0000047f
-#define AXXX_CP_STAT_CP_BUSY__MASK 0x80000000
-#define AXXX_CP_STAT_CP_BUSY__SHIFT 31
-static inline uint32_t AXXX_CP_STAT_CP_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_CP_BUSY__SHIFT) & AXXX_CP_STAT_CP_BUSY__MASK;
-}
-#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__MASK 0x40000000
-#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__SHIFT 30
-static inline uint32_t AXXX_CP_STAT_VS_EVENT_FIFO_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__MASK;
-}
-#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__MASK 0x20000000
-#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__SHIFT 29
-static inline uint32_t AXXX_CP_STAT_PS_EVENT_FIFO_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__MASK;
-}
-#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__MASK 0x10000000
-#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__SHIFT 28
-static inline uint32_t AXXX_CP_STAT_CF_EVENT_FIFO_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__MASK;
-}
-#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__MASK 0x08000000
-#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__SHIFT 27
-static inline uint32_t AXXX_CP_STAT_RB_EVENT_FIFO_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__MASK;
-}
-#define AXXX_CP_STAT_ME_BUSY__MASK 0x04000000
-#define AXXX_CP_STAT_ME_BUSY__SHIFT 26
-static inline uint32_t AXXX_CP_STAT_ME_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_ME_BUSY__SHIFT) & AXXX_CP_STAT_ME_BUSY__MASK;
-}
-#define AXXX_CP_STAT_MIU_WR_C_BUSY__MASK 0x02000000
-#define AXXX_CP_STAT_MIU_WR_C_BUSY__SHIFT 25
-static inline uint32_t AXXX_CP_STAT_MIU_WR_C_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_MIU_WR_C_BUSY__SHIFT) & AXXX_CP_STAT_MIU_WR_C_BUSY__MASK;
-}
-#define AXXX_CP_STAT_CP_3D_BUSY__MASK 0x00800000
-#define AXXX_CP_STAT_CP_3D_BUSY__SHIFT 23
-static inline uint32_t AXXX_CP_STAT_CP_3D_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_CP_3D_BUSY__SHIFT) & AXXX_CP_STAT_CP_3D_BUSY__MASK;
-}
-#define AXXX_CP_STAT_CP_NRT_BUSY__MASK 0x00400000
-#define AXXX_CP_STAT_CP_NRT_BUSY__SHIFT 22
-static inline uint32_t AXXX_CP_STAT_CP_NRT_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_CP_NRT_BUSY__SHIFT) & AXXX_CP_STAT_CP_NRT_BUSY__MASK;
-}
-#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY__MASK 0x00200000
-#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY__SHIFT 21
-static inline uint32_t AXXX_CP_STAT_RBIU_SCRATCH_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_RBIU_SCRATCH_BUSY__SHIFT) & AXXX_CP_STAT_RBIU_SCRATCH_BUSY__MASK;
-}
-#define AXXX_CP_STAT_RCIU_ME_BUSY__MASK 0x00100000
-#define AXXX_CP_STAT_RCIU_ME_BUSY__SHIFT 20
-static inline uint32_t AXXX_CP_STAT_RCIU_ME_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_RCIU_ME_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_ME_BUSY__MASK;
-}
-#define AXXX_CP_STAT_RCIU_PFP_BUSY__MASK 0x00080000
-#define AXXX_CP_STAT_RCIU_PFP_BUSY__SHIFT 19
-static inline uint32_t AXXX_CP_STAT_RCIU_PFP_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_RCIU_PFP_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_PFP_BUSY__MASK;
-}
-#define AXXX_CP_STAT_MEQ_RING_BUSY__MASK 0x00040000
-#define AXXX_CP_STAT_MEQ_RING_BUSY__SHIFT 18
-static inline uint32_t AXXX_CP_STAT_MEQ_RING_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_MEQ_RING_BUSY__SHIFT) & AXXX_CP_STAT_MEQ_RING_BUSY__MASK;
-}
-#define AXXX_CP_STAT_PFP_BUSY__MASK 0x00020000
-#define AXXX_CP_STAT_PFP_BUSY__SHIFT 17
-static inline uint32_t AXXX_CP_STAT_PFP_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_PFP_BUSY__SHIFT) & AXXX_CP_STAT_PFP_BUSY__MASK;
-}
-#define AXXX_CP_STAT_ST_QUEUE_BUSY__MASK 0x00010000
-#define AXXX_CP_STAT_ST_QUEUE_BUSY__SHIFT 16
-static inline uint32_t AXXX_CP_STAT_ST_QUEUE_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_ST_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_ST_QUEUE_BUSY__MASK;
-}
-#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__MASK 0x00002000
-#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__SHIFT 13
-static inline uint32_t AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__MASK;
-}
-#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__MASK 0x00001000
-#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__SHIFT 12
-static inline uint32_t AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__MASK;
-}
-#define AXXX_CP_STAT_RING_QUEUE_BUSY__MASK 0x00000800
-#define AXXX_CP_STAT_RING_QUEUE_BUSY__SHIFT 11
-static inline uint32_t AXXX_CP_STAT_RING_QUEUE_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_RING_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_RING_QUEUE_BUSY__MASK;
-}
-#define AXXX_CP_STAT_CSF_BUSY__MASK 0x00000400
-#define AXXX_CP_STAT_CSF_BUSY__SHIFT 10
-static inline uint32_t AXXX_CP_STAT_CSF_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_CSF_BUSY__SHIFT) & AXXX_CP_STAT_CSF_BUSY__MASK;
-}
-#define AXXX_CP_STAT_CSF_ST_BUSY__MASK 0x00000200
-#define AXXX_CP_STAT_CSF_ST_BUSY__SHIFT 9
-static inline uint32_t AXXX_CP_STAT_CSF_ST_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_CSF_ST_BUSY__SHIFT) & AXXX_CP_STAT_CSF_ST_BUSY__MASK;
-}
-#define AXXX_CP_STAT_EVENT_BUSY__MASK 0x00000100
-#define AXXX_CP_STAT_EVENT_BUSY__SHIFT 8
-static inline uint32_t AXXX_CP_STAT_EVENT_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_EVENT_BUSY__SHIFT) & AXXX_CP_STAT_EVENT_BUSY__MASK;
-}
-#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY__MASK 0x00000080
-#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY__SHIFT 7
-static inline uint32_t AXXX_CP_STAT_CSF_INDIRECT2_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_CSF_INDIRECT2_BUSY__SHIFT) & AXXX_CP_STAT_CSF_INDIRECT2_BUSY__MASK;
-}
-#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY__MASK 0x00000040
-#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY__SHIFT 6
-static inline uint32_t AXXX_CP_STAT_CSF_INDIRECTS_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_CSF_INDIRECTS_BUSY__SHIFT) & AXXX_CP_STAT_CSF_INDIRECTS_BUSY__MASK;
-}
-#define AXXX_CP_STAT_CSF_RING_BUSY__MASK 0x00000020
-#define AXXX_CP_STAT_CSF_RING_BUSY__SHIFT 5
-static inline uint32_t AXXX_CP_STAT_CSF_RING_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_CSF_RING_BUSY__SHIFT) & AXXX_CP_STAT_CSF_RING_BUSY__MASK;
-}
-#define AXXX_CP_STAT_RCIU_BUSY__MASK 0x00000010
-#define AXXX_CP_STAT_RCIU_BUSY__SHIFT 4
-static inline uint32_t AXXX_CP_STAT_RCIU_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_RCIU_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_BUSY__MASK;
-}
-#define AXXX_CP_STAT_RBIU_BUSY__MASK 0x00000008
-#define AXXX_CP_STAT_RBIU_BUSY__SHIFT 3
-static inline uint32_t AXXX_CP_STAT_RBIU_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_RBIU_BUSY__SHIFT) & AXXX_CP_STAT_RBIU_BUSY__MASK;
-}
-#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY__MASK 0x00000004
-#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY__SHIFT 2
-static inline uint32_t AXXX_CP_STAT_MIU_RD_RETURN_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_MIU_RD_RETURN_BUSY__SHIFT) & AXXX_CP_STAT_MIU_RD_RETURN_BUSY__MASK;
-}
-#define AXXX_CP_STAT_MIU_RD_REQ_BUSY__MASK 0x00000002
-#define AXXX_CP_STAT_MIU_RD_REQ_BUSY__SHIFT 1
-static inline uint32_t AXXX_CP_STAT_MIU_RD_REQ_BUSY(uint32_t val)
-{
- return ((val) << AXXX_CP_STAT_MIU_RD_REQ_BUSY__SHIFT) & AXXX_CP_STAT_MIU_RD_REQ_BUSY__MASK;
-}
+#define AXXX_CP_STAT_CP_BUSY 0x80000000
+#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY 0x40000000
+#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY 0x20000000
+#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY 0x10000000
+#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY 0x08000000
+#define AXXX_CP_STAT_ME_BUSY 0x04000000
+#define AXXX_CP_STAT_MIU_WR_C_BUSY 0x02000000
+#define AXXX_CP_STAT_CP_3D_BUSY 0x00800000
+#define AXXX_CP_STAT_CP_NRT_BUSY 0x00400000
+#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY 0x00200000
+#define AXXX_CP_STAT_RCIU_ME_BUSY 0x00100000
+#define AXXX_CP_STAT_RCIU_PFP_BUSY 0x00080000
+#define AXXX_CP_STAT_MEQ_RING_BUSY 0x00040000
+#define AXXX_CP_STAT_PFP_BUSY 0x00020000
+#define AXXX_CP_STAT_ST_QUEUE_BUSY 0x00010000
+#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY 0x00002000
+#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY 0x00001000
+#define AXXX_CP_STAT_RING_QUEUE_BUSY 0x00000800
+#define AXXX_CP_STAT_CSF_BUSY 0x00000400
+#define AXXX_CP_STAT_CSF_ST_BUSY 0x00000200
+#define AXXX_CP_STAT_EVENT_BUSY 0x00000100
+#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY 0x00000080
+#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY 0x00000040
+#define AXXX_CP_STAT_CSF_RING_BUSY 0x00000020
+#define AXXX_CP_STAT_RCIU_BUSY 0x00000010
+#define AXXX_CP_STAT_RBIU_BUSY 0x00000008
+#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY 0x00000004
+#define AXXX_CP_STAT_MIU_RD_REQ_BUSY 0x00000002
#define AXXX_CP_STAT_MIU_WR_BUSY 0x00000001
#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
@@ -693,5 +533,7 @@ static inline uint32_t AXXX_CP_STAT_MIU_RD_REQ_BUSY(uint32_t val)
#define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA 0x00000614
+#ifdef __cplusplus
+#endif
#endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 2ce7d7b1690d..c3703a51287b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -55,10 +55,17 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, {
- .chip_ids = ADRENO_CHIP_IDS(
- 0x03000512,
- 0x03000520
- ),
+ .chip_ids = ADRENO_CHIP_IDS(0x03000512),
+ .family = ADRENO_3XX,
+ .fw = {
+ [ADRENO_FW_PM4] = "a330_pm4.fw",
+ [ADRENO_FW_PFP] = "a330_pfp.fw",
+ },
+ .gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a3xx_gpu_init,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x03000520),
.family = ADRENO_3XX,
.revn = 305,
.fw = {
@@ -294,6 +301,27 @@ static const struct adreno_info gpulist[] = {
{ 127, 4 },
),
}, {
+ .machine = "qcom,sm7150",
+ .chip_ids = ADRENO_CHIP_IDS(0x06010800),
+ .family = ADRENO_6XX_GEN1,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .init = a6xx_gpu_init,
+ .zapfw = "a615_zap.mbn",
+ .hwcg = a615_hwcg,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 128, 1 },
+ { 146, 2 },
+ { 167, 3 },
+ { 172, 4 },
+ ),
+ }, {
.chip_ids = ADRENO_CHIP_IDS(0x06010800),
.family = ADRENO_6XX_GEN1,
.revn = 618,
@@ -493,6 +521,24 @@ static const struct adreno_info gpulist[] = {
.hwcg = a690_hwcg,
.address_space_size = SZ_16G,
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x07000200),
+ .family = ADRENO_6XX_GEN1, /* NOT a mistake! */
+ .fw = {
+ [ADRENO_FW_SQE] = "a702_sqe.fw",
+ },
+ .gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .zapfw = "a702_zap.mbn",
+ .hwcg = a702_hwcg,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 236, 1 },
+ { 178, 2 },
+ { 142, 3 },
+ ),
+ }, {
.chip_ids = ADRENO_CHIP_IDS(0x07030001),
.family = ADRENO_7XX_GEN1,
.fw = {
@@ -522,6 +568,20 @@ static const struct adreno_info gpulist[] = {
.zapfw = "a740_zap.mdt",
.hwcg = a740_hwcg,
.address_space_size = SZ_16G,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */
+ .family = ADRENO_7XX_GEN3,
+ .fw = {
+ [ADRENO_FW_SQE] = "gen70900_sqe.fw",
+ [ADRENO_FW_GMU] = "gmu_gen70900.bin",
+ },
+ .gmem = 3 * SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .zapfw = "gen70900_zap.mbn",
+ .address_space_size = SZ_16G,
},
};
@@ -539,6 +599,7 @@ MODULE_FIRMWARE("qcom/a530_zap.b00");
MODULE_FIRMWARE("qcom/a530_zap.b01");
MODULE_FIRMWARE("qcom/a530_zap.b02");
MODULE_FIRMWARE("qcom/a540_gpmu.fw2");
+MODULE_FIRMWARE("qcom/a615_zap.mbn");
MODULE_FIRMWARE("qcom/a619_gmu.bin");
MODULE_FIRMWARE("qcom/a630_sqe.fw");
MODULE_FIRMWARE("qcom/a630_gmu.bin");
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
new file mode 100644
index 000000000000..cb66ece6606b
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
@@ -0,0 +1,928 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef __ADRENO_GEN7_0_0_SNAPSHOT_H
+#define __ADRENO_GEN7_0_0_SNAPSHOT_H
+
+#include "a6xx_gpu_state.h"
+
+static const u32 gen7_0_0_debugbus_blocks[] = {
+ A7XX_DBGBUS_CP_0_0,
+ A7XX_DBGBUS_CP_0_1,
+ A7XX_DBGBUS_RBBM,
+ A7XX_DBGBUS_HLSQ,
+ A7XX_DBGBUS_UCHE_0,
+ A7XX_DBGBUS_TESS_BR,
+ A7XX_DBGBUS_TESS_BV,
+ A7XX_DBGBUS_PC_BR,
+ A7XX_DBGBUS_PC_BV,
+ A7XX_DBGBUS_VFDP_BR,
+ A7XX_DBGBUS_VFDP_BV,
+ A7XX_DBGBUS_VPC_BR,
+ A7XX_DBGBUS_VPC_BV,
+ A7XX_DBGBUS_TSE_BR,
+ A7XX_DBGBUS_TSE_BV,
+ A7XX_DBGBUS_RAS_BR,
+ A7XX_DBGBUS_RAS_BV,
+ A7XX_DBGBUS_VSC,
+ A7XX_DBGBUS_COM_0,
+ A7XX_DBGBUS_LRZ_BR,
+ A7XX_DBGBUS_LRZ_BV,
+ A7XX_DBGBUS_UFC_0,
+ A7XX_DBGBUS_UFC_1,
+ A7XX_DBGBUS_GMU_GX,
+ A7XX_DBGBUS_DBGC,
+ A7XX_DBGBUS_GPC_BR,
+ A7XX_DBGBUS_GPC_BV,
+ A7XX_DBGBUS_LARC,
+ A7XX_DBGBUS_HLSQ_SPTP,
+ A7XX_DBGBUS_RB_0,
+ A7XX_DBGBUS_RB_1,
+ A7XX_DBGBUS_RB_2,
+ A7XX_DBGBUS_RB_3,
+ A7XX_DBGBUS_UCHE_WRAPPER,
+ A7XX_DBGBUS_CCU_0,
+ A7XX_DBGBUS_CCU_1,
+ A7XX_DBGBUS_CCU_2,
+ A7XX_DBGBUS_CCU_3,
+ A7XX_DBGBUS_VFD_BR_0,
+ A7XX_DBGBUS_VFD_BR_1,
+ A7XX_DBGBUS_VFD_BR_2,
+ A7XX_DBGBUS_VFD_BR_3,
+ A7XX_DBGBUS_VFD_BR_4,
+ A7XX_DBGBUS_VFD_BR_5,
+ A7XX_DBGBUS_VFD_BR_6,
+ A7XX_DBGBUS_VFD_BR_7,
+ A7XX_DBGBUS_VFD_BV_0,
+ A7XX_DBGBUS_VFD_BV_1,
+ A7XX_DBGBUS_VFD_BV_2,
+ A7XX_DBGBUS_VFD_BV_3,
+ A7XX_DBGBUS_USP_0,
+ A7XX_DBGBUS_USP_1,
+ A7XX_DBGBUS_USP_2,
+ A7XX_DBGBUS_USP_3,
+ A7XX_DBGBUS_TP_0,
+ A7XX_DBGBUS_TP_1,
+ A7XX_DBGBUS_TP_2,
+ A7XX_DBGBUS_TP_3,
+ A7XX_DBGBUS_TP_4,
+ A7XX_DBGBUS_TP_5,
+ A7XX_DBGBUS_TP_6,
+ A7XX_DBGBUS_TP_7,
+ A7XX_DBGBUS_USPTP_0,
+ A7XX_DBGBUS_USPTP_1,
+ A7XX_DBGBUS_USPTP_2,
+ A7XX_DBGBUS_USPTP_3,
+ A7XX_DBGBUS_USPTP_4,
+ A7XX_DBGBUS_USPTP_5,
+ A7XX_DBGBUS_USPTP_6,
+ A7XX_DBGBUS_USPTP_7,
+};
+
+static struct gen7_shader_block gen7_0_0_shader_blocks[] = {
+ {A7XX_TP0_TMO_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_TP0_SMO_DATA, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA_1, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_0_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_1_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_2_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_3_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_4_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_5_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_6_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_7_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_CB_RAM, 0x390, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_TAG, 0x90, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA_2, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_TMO_TAG, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_SMO_TAG, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_STATE_DATA, 0x40, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_HWAVE_RAM, 0x100, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_L0_INST_BUF, 0x50, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_8_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_9_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_10_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_11_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_12_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_1, 0x200, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+};
+
+static const u32 gen7_0_0_pre_crashdumper_gpu_registers[] = {
+ 0x00210, 0x00210, 0x00212, 0x00213, 0x03c00, 0x03c0b, 0x03c40, 0x03c42,
+ 0x03c45, 0x03c47, 0x03c49, 0x03c4a, 0x03cc0, 0x03cd1,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_pre_crashdumper_gpu_registers), 8));
+
+static const u32 gen7_0_0_post_crashdumper_registers[] = {
+ 0x00535, 0x00535, 0x0f400, 0x0f400, 0x0f800, 0x0f803, 0x0fc00, 0x0fc01,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_post_crashdumper_registers), 8));
+
+static const u32 gen7_0_0_gpu_registers[] = {
+ 0x00000, 0x00000, 0x00002, 0x00002, 0x00011, 0x00012, 0x00016, 0x0001b,
+ 0x0001f, 0x00032, 0x00038, 0x0003c, 0x00042, 0x00042, 0x00044, 0x00044,
+ 0x00047, 0x00047, 0x00049, 0x0004a, 0x0004c, 0x0004c, 0x00050, 0x00050,
+ 0x00056, 0x00056, 0x00073, 0x00075, 0x000ad, 0x000ae, 0x000b0, 0x000b0,
+ 0x000b4, 0x000b4, 0x000b8, 0x000b8, 0x000bc, 0x000bc, 0x000c0, 0x000c0,
+ 0x000c4, 0x000c4, 0x000c8, 0x000c8, 0x000cc, 0x000cc, 0x000d0, 0x000d0,
+ 0x000d4, 0x000d4, 0x000d8, 0x000d8, 0x000dc, 0x000dc, 0x000e0, 0x000e0,
+ 0x000e4, 0x000e4, 0x000e8, 0x000e8, 0x000ec, 0x000ec, 0x000f0, 0x000f0,
+ 0x000f4, 0x000f4, 0x000f8, 0x000f8, 0x00100, 0x00100, 0x00104, 0x0010b,
+ 0x0010f, 0x0011d, 0x0012f, 0x0012f, 0x00200, 0x0020d, 0x00211, 0x00211,
+ 0x00215, 0x00243, 0x00260, 0x00268, 0x00272, 0x00274, 0x00281, 0x0028d,
+ 0x00300, 0x00401, 0x00410, 0x00451, 0x00460, 0x004a3, 0x004c0, 0x004d1,
+ 0x00500, 0x00500, 0x00507, 0x0050b, 0x0050f, 0x0050f, 0x00511, 0x00511,
+ 0x00533, 0x00534, 0x00536, 0x00536, 0x00540, 0x00555, 0x00564, 0x00567,
+ 0x00574, 0x00577, 0x005fb, 0x005ff, 0x00800, 0x00808, 0x00810, 0x00813,
+ 0x00820, 0x00821, 0x00823, 0x00827, 0x00830, 0x00834, 0x0083f, 0x00841,
+ 0x00843, 0x00847, 0x0084f, 0x00886, 0x008a0, 0x008ab, 0x008c0, 0x008c0,
+ 0x008c4, 0x008c5, 0x008d0, 0x008dd, 0x008e0, 0x008e6, 0x008f0, 0x008f3,
+ 0x00900, 0x00903, 0x00908, 0x00911, 0x00928, 0x0093e, 0x00942, 0x0094d,
+ 0x00980, 0x00984, 0x0098d, 0x0098f, 0x009b0, 0x009b4, 0x009c2, 0x009c9,
+ 0x009ce, 0x009d7, 0x009e0, 0x009e7, 0x00a00, 0x00a00, 0x00a02, 0x00a03,
+ 0x00a10, 0x00a4f, 0x00a61, 0x00a9f, 0x00ad0, 0x00adb, 0x00b00, 0x00b31,
+ 0x00b35, 0x00b3c, 0x00b40, 0x00b40, 0x00c00, 0x00c00, 0x00c02, 0x00c04,
+ 0x00c06, 0x00c06, 0x00c10, 0x00cd9, 0x00ce0, 0x00d0c, 0x00df0, 0x00df4,
+ 0x00e01, 0x00e02, 0x00e07, 0x00e0e, 0x00e10, 0x00e13, 0x00e17, 0x00e19,
+ 0x00e1b, 0x00e2b, 0x00e30, 0x00e32, 0x00e38, 0x00e3c,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_gpu_registers), 8));
+
+static const u32 gen7_0_0_gmu_registers[] = {
+ 0x10001, 0x10001, 0x10003, 0x10003, 0x10401, 0x10401, 0x10403, 0x10403,
+ 0x10801, 0x10801, 0x10803, 0x10803, 0x10c01, 0x10c01, 0x10c03, 0x10c03,
+ 0x11001, 0x11001, 0x11003, 0x11003, 0x11401, 0x11401, 0x11403, 0x11403,
+ 0x11801, 0x11801, 0x11803, 0x11803, 0x11c01, 0x11c01, 0x11c03, 0x11c03,
+ 0x1f400, 0x1f40d, 0x1f40f, 0x1f411, 0x1f500, 0x1f500, 0x1f507, 0x1f507,
+ 0x1f509, 0x1f50b, 0x1f800, 0x1f804, 0x1f807, 0x1f808, 0x1f80b, 0x1f80c,
+ 0x1f80f, 0x1f80f, 0x1f811, 0x1f811, 0x1f813, 0x1f817, 0x1f819, 0x1f81c,
+ 0x1f824, 0x1f82a, 0x1f82d, 0x1f830, 0x1f840, 0x1f853, 0x1f860, 0x1f860,
+ 0x1f870, 0x1f879, 0x1f87f, 0x1f87f, 0x1f888, 0x1f889, 0x1f8a0, 0x1f8a2,
+ 0x1f8a4, 0x1f8af, 0x1f8c0, 0x1f8c1, 0x1f8c3, 0x1f8c4, 0x1f8d0, 0x1f8d0,
+ 0x1f8ec, 0x1f8ec, 0x1f8f0, 0x1f8f1, 0x1f910, 0x1f914, 0x1f920, 0x1f921,
+ 0x1f924, 0x1f925, 0x1f928, 0x1f929, 0x1f92c, 0x1f92d, 0x1f940, 0x1f940,
+ 0x1f942, 0x1f944, 0x1f948, 0x1f94a, 0x1f94f, 0x1f951, 0x1f958, 0x1f95a,
+ 0x1f95d, 0x1f95d, 0x1f962, 0x1f962, 0x1f964, 0x1f96b, 0x1f970, 0x1f979,
+ 0x1f980, 0x1f981, 0x1f984, 0x1f986, 0x1f992, 0x1f993, 0x1f996, 0x1f99e,
+ 0x1f9c0, 0x1f9c0, 0x1f9c5, 0x1f9d4, 0x1f9f0, 0x1f9f1, 0x1f9f8, 0x1f9fa,
+ 0x1fa00, 0x1fa03, 0x20000, 0x20005, 0x20008, 0x2000c, 0x20010, 0x20012,
+ 0x20018, 0x20018, 0x20020, 0x20023, 0x20030, 0x20031, 0x23801, 0x23801,
+ 0x23803, 0x23803, 0x23805, 0x23805, 0x23807, 0x23807, 0x23809, 0x23809,
+ 0x2380b, 0x2380b, 0x2380d, 0x2380d, 0x2380f, 0x2380f, 0x23811, 0x23811,
+ 0x23813, 0x23813, 0x23815, 0x23815, 0x23817, 0x23817, 0x23819, 0x23819,
+ 0x2381b, 0x2381b, 0x2381d, 0x2381d, 0x2381f, 0x23820, 0x23822, 0x23822,
+ 0x23824, 0x23824, 0x23826, 0x23826, 0x23828, 0x23828, 0x2382a, 0x2382a,
+ 0x2382c, 0x2382c, 0x2382e, 0x2382e, 0x23830, 0x23830, 0x23832, 0x23832,
+ 0x23834, 0x23834, 0x23836, 0x23836, 0x23838, 0x23838, 0x2383a, 0x2383a,
+ 0x2383c, 0x2383c, 0x2383e, 0x2383e, 0x23840, 0x23847, 0x23b00, 0x23b01,
+ 0x23b03, 0x23b03, 0x23b05, 0x23b0e, 0x23b10, 0x23b13, 0x23b15, 0x23b16,
+ 0x23b20, 0x23b20, 0x23b28, 0x23b28, 0x23b30, 0x23b30,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_gmu_registers), 8));
+
+static const u32 gen7_0_0_gmugx_registers[] = {
+ 0x1a400, 0x1a41f, 0x1a440, 0x1a45f, 0x1a480, 0x1a49f, 0x1a4c0, 0x1a4df,
+ 0x1a500, 0x1a51f, 0x1a540, 0x1a55f, 0x1a580, 0x1a59f, 0x1a5c0, 0x1a5df,
+ 0x1a780, 0x1a781, 0x1a783, 0x1a785, 0x1a787, 0x1a789, 0x1a78b, 0x1a78d,
+ 0x1a78f, 0x1a791, 0x1a793, 0x1a795, 0x1a797, 0x1a799, 0x1a79b, 0x1a79b,
+ 0x1a7c0, 0x1a7c1, 0x1a7c4, 0x1a7c5, 0x1a7c8, 0x1a7c9, 0x1a7cc, 0x1a7cd,
+ 0x1a7d0, 0x1a7d1, 0x1a7d4, 0x1a7d5, 0x1a7d8, 0x1a7d9, 0x1a7fc, 0x1a7fd,
+ 0x1a800, 0x1a802, 0x1a804, 0x1a804, 0x1a816, 0x1a816, 0x1a81e, 0x1a81e,
+ 0x1a826, 0x1a826, 0x1a82e, 0x1a82e, 0x1a836, 0x1a836, 0x1a83e, 0x1a83e,
+ 0x1a846, 0x1a846, 0x1a860, 0x1a862, 0x1a864, 0x1a867, 0x1a870, 0x1a870,
+ 0x1a883, 0x1a884, 0x1a8c0, 0x1a8c2, 0x1a8c4, 0x1a8c7, 0x1a8d0, 0x1a8d3,
+ 0x1a900, 0x1a92b, 0x1a940, 0x1a940,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_gmugx_registers), 8));
+
+static const u32 gen7_0_0_noncontext_pipe_br_registers[] = {
+ 0x00887, 0x0088c, 0x08600, 0x08600, 0x08602, 0x08602, 0x08610, 0x0861b,
+ 0x08620, 0x08620, 0x08630, 0x08630, 0x08637, 0x08639, 0x08640, 0x08640,
+ 0x09600, 0x09600, 0x09602, 0x09603, 0x0960a, 0x09616, 0x09624, 0x0963a,
+ 0x09640, 0x09640, 0x09e00, 0x09e00, 0x09e02, 0x09e07, 0x09e0a, 0x09e16,
+ 0x09e19, 0x09e19, 0x09e1c, 0x09e1c, 0x09e20, 0x09e25, 0x09e30, 0x09e31,
+ 0x09e40, 0x09e51, 0x09e64, 0x09e64, 0x09e70, 0x09e72, 0x09e78, 0x09e79,
+ 0x09e80, 0x09fff, 0x0a600, 0x0a600, 0x0a603, 0x0a603, 0x0a610, 0x0a61f,
+ 0x0a630, 0x0a631, 0x0a638, 0x0a638,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_noncontext_pipe_br_registers), 8));
+
+static const u32 gen7_0_0_noncontext_pipe_bv_registers[] = {
+ 0x00887, 0x0088c, 0x08600, 0x08600, 0x08602, 0x08602, 0x08610, 0x0861b,
+ 0x08620, 0x08620, 0x08630, 0x08630, 0x08637, 0x08639, 0x08640, 0x08640,
+ 0x09600, 0x09600, 0x09602, 0x09603, 0x0960a, 0x09616, 0x09624, 0x0963a,
+ 0x09640, 0x09640, 0x09e00, 0x09e00, 0x09e02, 0x09e07, 0x09e0a, 0x09e16,
+ 0x09e19, 0x09e19, 0x09e1c, 0x09e1c, 0x09e20, 0x09e25, 0x09e30, 0x09e31,
+ 0x09e40, 0x09e51, 0x09e64, 0x09e64, 0x09e70, 0x09e72, 0x09e78, 0x09e79,
+ 0x09e80, 0x09fff, 0x0a600, 0x0a600, 0x0a603, 0x0a603, 0x0a610, 0x0a61f,
+ 0x0a630, 0x0a631, 0x0a638, 0x0a638,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_noncontext_pipe_bv_registers), 8));
+
+static const u32 gen7_0_0_noncontext_pipe_lpac_registers[] = {
+ 0x00887, 0x0088c, 0x00f80, 0x00f80,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_noncontext_pipe_lpac_registers), 8));
+
+static const u32 gen7_0_0_noncontext_rb_rac_pipe_br_registers[] = {
+ 0x08e10, 0x08e1c, 0x08e20, 0x08e25, 0x08e51, 0x08e5a,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_noncontext_rb_rac_pipe_br_registers), 8));
+
+static const u32 gen7_0_0_noncontext_rb_rbp_pipe_br_registers[] = {
+ 0x08e01, 0x08e01, 0x08e04, 0x08e04, 0x08e06, 0x08e09, 0x08e0c, 0x08e0c,
+ 0x08e28, 0x08e28, 0x08e2c, 0x08e35, 0x08e3b, 0x08e3f, 0x08e50, 0x08e50,
+ 0x08e5b, 0x08e5d, 0x08e5f, 0x08e5f, 0x08e61, 0x08e61, 0x08e63, 0x08e65,
+ 0x08e68, 0x08e68, 0x08e70, 0x08e79, 0x08e80, 0x08e8f,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_noncontext_rb_rbp_pipe_br_registers), 8));
+
+/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_gras_cluster_gras_pipe_br_registers[] = {
+ 0x08000, 0x08008, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d,
+ 0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa,
+ 0x08100, 0x08107, 0x08109, 0x0810b, 0x08110, 0x08110, 0x08120, 0x0813f,
+ 0x08400, 0x08406, 0x0840a, 0x0840b,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_gras_cluster_gras_pipe_br_registers), 8));
+
+/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: A7XX_PIPE_BV */
+static const u32 gen7_0_0_gras_cluster_gras_pipe_bv_registers[] = {
+ 0x08000, 0x08008, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d,
+ 0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa,
+ 0x08100, 0x08107, 0x08109, 0x0810b, 0x08110, 0x08110, 0x08120, 0x0813f,
+ 0x08400, 0x08406, 0x0840a, 0x0840b,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_gras_cluster_gras_pipe_bv_registers), 8));
+
+/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_pc_cluster_fe_pipe_br_registers[] = {
+ 0x09800, 0x09804, 0x09806, 0x0980a, 0x09810, 0x09811, 0x09884, 0x09886,
+ 0x09b00, 0x09b08,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_pc_cluster_fe_pipe_br_registers), 8));
+
+/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BV */
+static const u32 gen7_0_0_pc_cluster_fe_pipe_bv_registers[] = {
+ 0x09800, 0x09804, 0x09806, 0x0980a, 0x09810, 0x09811, 0x09884, 0x09886,
+ 0x09b00, 0x09b08,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_pc_cluster_fe_pipe_bv_registers), 8));
+
+/* Block: RB_RAC Cluster: A7XX_CLUSTER_PS Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_rb_rac_cluster_ps_pipe_br_registers[] = {
+ 0x08802, 0x08802, 0x08804, 0x08806, 0x08809, 0x0880a, 0x0880e, 0x08811,
+ 0x08818, 0x0881e, 0x08821, 0x08821, 0x08823, 0x08826, 0x08829, 0x08829,
+ 0x0882b, 0x0882e, 0x08831, 0x08831, 0x08833, 0x08836, 0x08839, 0x08839,
+ 0x0883b, 0x0883e, 0x08841, 0x08841, 0x08843, 0x08846, 0x08849, 0x08849,
+ 0x0884b, 0x0884e, 0x08851, 0x08851, 0x08853, 0x08856, 0x08859, 0x08859,
+ 0x0885b, 0x0885e, 0x08860, 0x08864, 0x08870, 0x08870, 0x08873, 0x08876,
+ 0x08878, 0x08879, 0x08882, 0x08885, 0x08887, 0x08889, 0x08891, 0x08891,
+ 0x08898, 0x08898, 0x088c0, 0x088c1, 0x088e5, 0x088e5, 0x088f4, 0x088f5,
+ 0x08a00, 0x08a05, 0x08a10, 0x08a15, 0x08a20, 0x08a25, 0x08a30, 0x08a35,
+ 0x08c00, 0x08c01, 0x08c18, 0x08c1f, 0x08c26, 0x08c34,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_rb_rac_cluster_ps_pipe_br_registers), 8));
+
+/* Block: RB_RBP Cluster: A7XX_CLUSTER_PS Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers[] = {
+ 0x08800, 0x08801, 0x08803, 0x08803, 0x0880b, 0x0880d, 0x08812, 0x08812,
+ 0x08820, 0x08820, 0x08822, 0x08822, 0x08827, 0x08828, 0x0882a, 0x0882a,
+ 0x0882f, 0x08830, 0x08832, 0x08832, 0x08837, 0x08838, 0x0883a, 0x0883a,
+ 0x0883f, 0x08840, 0x08842, 0x08842, 0x08847, 0x08848, 0x0884a, 0x0884a,
+ 0x0884f, 0x08850, 0x08852, 0x08852, 0x08857, 0x08858, 0x0885a, 0x0885a,
+ 0x0885f, 0x0885f, 0x08865, 0x08865, 0x08871, 0x08872, 0x08877, 0x08877,
+ 0x08880, 0x08881, 0x08886, 0x08886, 0x08890, 0x08890, 0x088d0, 0x088e4,
+ 0x088e8, 0x088ea, 0x088f0, 0x088f0, 0x08900, 0x0891a, 0x08927, 0x08928,
+ 0x08c17, 0x08c17, 0x08c20, 0x08c25,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: HLSQ_STATE */
+static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers[] = {
+ 0x0a980, 0x0a980, 0x0a982, 0x0a984, 0x0a99e, 0x0a99e, 0x0a9a7, 0x0a9a7,
+ 0x0a9aa, 0x0a9aa, 0x0a9ae, 0x0a9b0, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9ba,
+ 0x0a9bc, 0x0a9bc, 0x0a9c4, 0x0a9c4, 0x0a9cd, 0x0a9cd, 0x0a9e0, 0x0a9fc,
+ 0x0aa00, 0x0aa00, 0x0aa30, 0x0aa31, 0x0aa40, 0x0aabf, 0x0ab00, 0x0ab03,
+ 0x0ab05, 0x0ab05, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: HLSQ_STATE */
+static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers[] = {
+ 0x0a9b0, 0x0a9b0, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9ba, 0x0a9bc, 0x0a9bc,
+ 0x0a9c4, 0x0a9c4, 0x0a9cd, 0x0a9cd, 0x0a9e2, 0x0a9e3, 0x0a9e6, 0x0a9fc,
+ 0x0aa00, 0x0aa00, 0x0aa31, 0x0aa31, 0x0ab00, 0x0ab01,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: HLSQ_DP */
+static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers[] = {
+ 0x0a9b1, 0x0a9b1, 0x0a9c6, 0x0a9cb, 0x0a9d4, 0x0a9df,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: HLSQ_DP */
+static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers[] = {
+ 0x0a9b1, 0x0a9b1, 0x0a9d4, 0x0a9df,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: SP_TOP */
+static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers[] = {
+ 0x0a980, 0x0a980, 0x0a982, 0x0a984, 0x0a99e, 0x0a9a2, 0x0a9a7, 0x0a9a8,
+ 0x0a9aa, 0x0a9aa, 0x0a9ae, 0x0a9ae, 0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5,
+ 0x0a9ba, 0x0a9bc, 0x0a9e0, 0x0a9f9, 0x0aa00, 0x0aa00, 0x0ab00, 0x0ab00,
+ 0x0ab02, 0x0ab02, 0x0ab04, 0x0ab05, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: SP_TOP */
+static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers[] = {
+ 0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9bc, 0x0a9e2, 0x0a9e3,
+ 0x0a9e6, 0x0a9f9, 0x0aa00, 0x0aa00, 0x0ab00, 0x0ab00,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: uSPTP */
+static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers[] = {
+ 0x0a980, 0x0a982, 0x0a985, 0x0a9a6, 0x0a9a8, 0x0a9a9, 0x0a9ab, 0x0a9ae,
+ 0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9bf, 0x0a9c2, 0x0a9c3,
+ 0x0a9cd, 0x0a9cd, 0x0a9d0, 0x0a9d3, 0x0aa30, 0x0aa31, 0x0aa40, 0x0aabf,
+ 0x0ab00, 0x0ab05, 0x0ab21, 0x0ab22, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: uSPTP */
+static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers[] = {
+ 0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9be, 0x0a9c2, 0x0a9c3,
+ 0x0a9cd, 0x0a9cd, 0x0a9d0, 0x0a9d3, 0x0aa31, 0x0aa31, 0x0ab00, 0x0ab01,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR Location: HLSQ_STATE */
+static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers[] = {
+ 0x0a800, 0x0a800, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824,
+ 0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a833, 0x0a835, 0x0a83a, 0x0a83a,
+ 0x0a83c, 0x0a83c, 0x0a83f, 0x0a840, 0x0a85b, 0x0a85d, 0x0a862, 0x0a862,
+ 0x0a864, 0x0a864, 0x0a867, 0x0a867, 0x0a870, 0x0a870, 0x0a88c, 0x0a88e,
+ 0x0a893, 0x0a893, 0x0a895, 0x0a895, 0x0a898, 0x0a898, 0x0a89a, 0x0a89d,
+ 0x0a8a0, 0x0a8af, 0x0a8c0, 0x0a8c3, 0x0ab00, 0x0ab03, 0x0ab05, 0x0ab05,
+ 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV Location: HLSQ_STATE */
+static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers[] = {
+ 0x0a800, 0x0a800, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824,
+ 0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a833, 0x0a835, 0x0a83a, 0x0a83a,
+ 0x0a83c, 0x0a83c, 0x0a83f, 0x0a840, 0x0a85b, 0x0a85d, 0x0a862, 0x0a862,
+ 0x0a864, 0x0a864, 0x0a867, 0x0a867, 0x0a870, 0x0a870, 0x0a88c, 0x0a88e,
+ 0x0a893, 0x0a893, 0x0a895, 0x0a895, 0x0a898, 0x0a898, 0x0a89a, 0x0a89d,
+ 0x0a8a0, 0x0a8af, 0x0a8c0, 0x0a8c3, 0x0ab00, 0x0ab02, 0x0ab0a, 0x0ab1b,
+ 0x0ab20, 0x0ab20, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR Location: SP_TOP */
+static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers[] = {
+ 0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a830, 0x0a831,
+ 0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840, 0x0a85c, 0x0a85d,
+ 0x0a862, 0x0a864, 0x0a870, 0x0a871, 0x0a88d, 0x0a88e, 0x0a893, 0x0a895,
+ 0x0a8a0, 0x0a8af, 0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab04, 0x0ab05,
+ 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV Location: SP_TOP */
+static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers[] = {
+ 0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a830, 0x0a831,
+ 0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840, 0x0a85c, 0x0a85d,
+ 0x0a862, 0x0a864, 0x0a870, 0x0a871, 0x0a88d, 0x0a88e, 0x0a893, 0x0a895,
+ 0x0a8a0, 0x0a8af, 0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab0a, 0x0ab1b,
+ 0x0ab20, 0x0ab20,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR Location: uSPTP */
+static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers[] = {
+ 0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a830, 0x0a833,
+ 0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861, 0x0a863, 0x0a867,
+ 0x0a870, 0x0a88c, 0x0a88f, 0x0a892, 0x0a894, 0x0a898, 0x0a8c0, 0x0a8c3,
+ 0x0ab00, 0x0ab05, 0x0ab21, 0x0ab22, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV Location: uSPTP */
+static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers[] = {
+ 0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a830, 0x0a833,
+ 0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861, 0x0a863, 0x0a867,
+ 0x0a870, 0x0a88c, 0x0a88f, 0x0a892, 0x0a894, 0x0a898, 0x0a8c0, 0x0a8c3,
+ 0x0ab00, 0x0ab02, 0x0ab21, 0x0ab22, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers), 8));
+
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers[] = {
+ 0x0b180, 0x0b183, 0x0b190, 0x0b195, 0x0b2c0, 0x0b2d5, 0x0b300, 0x0b307,
+ 0x0b309, 0x0b309, 0x0b310, 0x0b310,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV Location: HLSQ_STATE */
+static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_bv_hlsq_state_registers[] = {
+ 0x0ab00, 0x0ab02, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_bv_hlsq_state_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV Location: SP_TOP */
+static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_bv_sp_top_registers[] = {
+ 0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_bv_sp_top_registers), 8));
+
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV Location: uSPTP */
+static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_bv_usptp_registers[] = {
+ 0x0ab00, 0x0ab02, 0x0ab21, 0x0ab22, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_bv_usptp_registers), 8));
+
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV */
+static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_bv_registers[] = {
+ 0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_ps_pipe_bv_registers), 8));
+
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC */
+static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers[] = {
+ 0x0b180, 0x0b181, 0x0b300, 0x0b301, 0x0b307, 0x0b307, 0x0b309, 0x0b309,
+ 0x0b310, 0x0b310,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers), 8));
+
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers[] = {
+ 0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers), 8));
+
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV */
+static const u32 gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers[] = {
+ 0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers), 8));
+
+/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_vfd_cluster_fe_pipe_br_registers[] = {
+ 0x0a000, 0x0a009, 0x0a00e, 0x0a0ef,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_vfd_cluster_fe_pipe_br_registers), 8));
+
+/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BV */
+static const u32 gen7_0_0_vfd_cluster_fe_pipe_bv_registers[] = {
+ 0x0a000, 0x0a009, 0x0a00e, 0x0a0ef,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_vfd_cluster_fe_pipe_bv_registers), 8));
+
+/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_vpc_cluster_fe_pipe_br_registers[] = {
+ 0x09300, 0x09307,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_fe_pipe_br_registers), 8));
+
+/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BV */
+static const u32 gen7_0_0_vpc_cluster_fe_pipe_bv_registers[] = {
+ 0x09300, 0x09307,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_fe_pipe_bv_registers), 8));
+
+/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers[] = {
+ 0x09101, 0x0910c, 0x09300, 0x09307,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers), 8));
+
+/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: A7XX_PIPE_BV */
+static const u32 gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers[] = {
+ 0x09101, 0x0910c, 0x09300, 0x09307,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers), 8));
+
+/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers[] = {
+ 0x09200, 0x0920f, 0x09212, 0x09216, 0x09218, 0x09236, 0x09300, 0x09307,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers), 8));
+
+/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: A7XX_PIPE_BV */
+static const u32 gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers[] = {
+ 0x09200, 0x0920f, 0x09212, 0x09216, 0x09218, 0x09236, 0x09300, 0x09307,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers), 8));
+
+/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_BR Location: HLSQ_STATE */
+static const u32 gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers[] = {
+ 0x0ae52, 0x0ae52, 0x0ae60, 0x0ae67, 0x0ae69, 0x0ae73,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers), 8));
+
+/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_BR Location: SP_TOP */
+static const u32 gen7_0_0_sp_noncontext_pipe_br_sp_top_registers[] = {
+ 0x0ae00, 0x0ae00, 0x0ae02, 0x0ae04, 0x0ae06, 0x0ae09, 0x0ae0c, 0x0ae0c,
+ 0x0ae0f, 0x0ae0f, 0x0ae28, 0x0ae2b, 0x0ae35, 0x0ae35, 0x0ae3a, 0x0ae3f,
+ 0x0ae50, 0x0ae52, 0x0ae80, 0x0aea3,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_br_sp_top_registers), 8));
+
+/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_BR Location: uSPTP */
+static const u32 gen7_0_0_sp_noncontext_pipe_br_usptp_registers[] = {
+ 0x0ae00, 0x0ae00, 0x0ae02, 0x0ae04, 0x0ae06, 0x0ae09, 0x0ae0c, 0x0ae0c,
+ 0x0ae0f, 0x0ae0f, 0x0ae30, 0x0ae32, 0x0ae35, 0x0ae35, 0x0ae3a, 0x0ae3b,
+ 0x0ae3e, 0x0ae3f, 0x0ae50, 0x0ae52,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_br_usptp_registers), 8));
+
+/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_LPAC Location: HLSQ_STATE */
+static const u32 gen7_0_0_sp_noncontext_pipe_lpac_hlsq_state_registers[] = {
+ 0x0af88, 0x0af8a,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_hlsq_state_registers), 8));
+
+/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_LPAC Location: SP_TOP */
+static const u32 gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers[] = {
+ 0x0af80, 0x0af84,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers), 8));
+
+/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_LPAC Location: uSPTP */
+static const u32 gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers[] = {
+ 0x0af80, 0x0af84, 0x0af90, 0x0af92,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers), 8));
+
+/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
+ 0x0b600, 0x0b600, 0x0b602, 0x0b602, 0x0b604, 0x0b604, 0x0b608, 0x0b60c,
+ 0x0b60f, 0x0b621, 0x0b630, 0x0b633,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_br_registers), 8));
+
+/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_LPAC */
+static const u32 gen7_0_0_tpl1_noncontext_pipe_lpac_registers[] = {
+ 0x0b780, 0x0b780,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_lpac_registers), 8));
+
+static const struct gen7_sel_reg gen7_0_0_rb_rac_sel = {
+ .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .val = 0x0,
+};
+
+static const struct gen7_sel_reg gen7_0_0_rb_rbp_sel = {
+ .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .val = 0x9,
+};
+
+static struct gen7_cluster_registers gen7_0_0_clusters[] = {
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ gen7_0_0_noncontext_pipe_br_registers, },
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
+ gen7_0_0_noncontext_pipe_bv_registers, },
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_LPAC, STATE_NON_CONTEXT,
+ gen7_0_0_noncontext_pipe_lpac_registers, },
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ gen7_0_0_noncontext_rb_rac_pipe_br_registers, &gen7_0_0_rb_rac_sel, },
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ gen7_0_0_noncontext_rb_rbp_pipe_br_registers, &gen7_0_0_rb_rbp_sel, },
+ { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_gras_cluster_gras_pipe_br_registers, },
+ { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_0_0_gras_cluster_gras_pipe_bv_registers, },
+ { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_gras_cluster_gras_pipe_br_registers, },
+ { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_0_0_gras_cluster_gras_pipe_bv_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_pc_cluster_fe_pipe_br_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_0_0_pc_cluster_fe_pipe_bv_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_pc_cluster_fe_pipe_br_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_0_0_pc_cluster_fe_pipe_bv_registers, },
+ { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rac_sel, },
+ { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rac_sel, },
+ { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rbp_sel, },
+ { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rbp_sel, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_vfd_cluster_fe_pipe_br_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_0_0_vfd_cluster_fe_pipe_bv_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_vfd_cluster_fe_pipe_br_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_0_0_vfd_cluster_fe_pipe_bv_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_vpc_cluster_fe_pipe_br_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_0_0_vpc_cluster_fe_pipe_bv_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_vpc_cluster_fe_pipe_br_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_0_0_vpc_cluster_fe_pipe_bv_registers, },
+ { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, },
+ { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, },
+ { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, },
+ { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, },
+ { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, },
+ { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
+ { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, },
+ { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
+};
+
+static struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ gen7_0_0_sp_noncontext_pipe_br_sp_top_registers, 0xae00 },
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_0_0_sp_noncontext_pipe_br_usptp_registers, 0xae00 },
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ gen7_0_0_sp_noncontext_pipe_lpac_hlsq_state_registers, 0xaf80 },
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers, 0xaf80 },
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 },
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 },
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_STATE,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_SP_TOP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_STATE,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_SP_TOP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_DP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_HLSQ_STATE,
+ gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_SP_TOP,
+ gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_HLSQ_STATE,
+ gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_SP_TOP,
+ gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 },
+};
+
+static const u32 gen7_0_0_rscc_registers[] = {
+ 0x14000, 0x14036, 0x14040, 0x14042, 0x14080, 0x14084, 0x14089, 0x1408c,
+ 0x14091, 0x14094, 0x14099, 0x1409c, 0x140a1, 0x140a4, 0x140a9, 0x140ac,
+ 0x14100, 0x14102, 0x14114, 0x14119, 0x14124, 0x1412e, 0x14140, 0x14143,
+ 0x14180, 0x14197, 0x14340, 0x14342, 0x14344, 0x14347, 0x1434c, 0x14373,
+ 0x143ec, 0x143ef, 0x143f4, 0x1441b, 0x14494, 0x14497, 0x1449c, 0x144c3,
+ 0x1453c, 0x1453f, 0x14544, 0x1456b, 0x145e4, 0x145e7, 0x145ec, 0x14613,
+ 0x1468c, 0x1468f, 0x14694, 0x146bb, 0x14734, 0x14737, 0x1473c, 0x14763,
+ 0x147dc, 0x147df, 0x147e4, 0x1480b, 0x14884, 0x14887, 0x1488c, 0x148b3,
+ 0x1492c, 0x1492f, 0x14934, 0x1495b, 0x14f51, 0x14f54,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_rscc_registers), 8));
+
+static const u32 gen7_0_0_cpr_registers[] = {
+ 0x26800, 0x26805, 0x26808, 0x2680c, 0x26814, 0x26814, 0x2681c, 0x2681c,
+ 0x26820, 0x26838, 0x26840, 0x26840, 0x26848, 0x26848, 0x26850, 0x26850,
+ 0x26880, 0x26898, 0x26980, 0x269b0, 0x269c0, 0x269c8, 0x269e0, 0x269ee,
+ 0x269fb, 0x269ff, 0x26a02, 0x26a07, 0x26a09, 0x26a0b, 0x26a10, 0x26b0f,
+ 0x27440, 0x27441, 0x27444, 0x27444, 0x27480, 0x274a2, 0x274ac, 0x274ac,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_cpr_registers), 8));
+
+static const u32 gen7_0_0_gpucc_registers[] = {
+ 0x24000, 0x2400e, 0x24400, 0x2440e, 0x24800, 0x24805, 0x24c00, 0x24cff,
+ 0x25800, 0x25804, 0x25c00, 0x25c04, 0x26000, 0x26004, 0x26400, 0x26405,
+ 0x26414, 0x2641d, 0x2642a, 0x26430, 0x26432, 0x26432, 0x26441, 0x26455,
+ 0x26466, 0x26468, 0x26478, 0x2647a, 0x26489, 0x2648a, 0x2649c, 0x2649e,
+ 0x264a0, 0x264a3, 0x264b3, 0x264b5, 0x264c5, 0x264c7, 0x264d6, 0x264d8,
+ 0x264e8, 0x264e9, 0x264f9, 0x264fc, 0x2650b, 0x2650c, 0x2651c, 0x2651e,
+ 0x26540, 0x26570, 0x26600, 0x26616, 0x26620, 0x2662d,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_gpucc_registers), 8));
+
+static const u32 gen7_0_0_cx_misc_registers[] = {
+ 0x27800, 0x27800, 0x27810, 0x27814, 0x27820, 0x27824, 0x27832, 0x27857,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_cx_misc_registers), 8));
+
+static const u32 gen7_0_0_dpm_registers[] = {
+ 0x1aa00, 0x1aa06, 0x1aa09, 0x1aa0a, 0x1aa0c, 0x1aa0d, 0x1aa0f, 0x1aa12,
+ 0x1aa14, 0x1aa47, 0x1aa50, 0x1aa51,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_dpm_registers), 8));
+
+static struct gen7_reg_list gen7_0_0_reg_list[] = {
+ { gen7_0_0_gpu_registers, NULL },
+ { gen7_0_0_cx_misc_registers, NULL },
+ { gen7_0_0_dpm_registers, NULL },
+ { NULL, NULL },
+};
+
+static const u32 *gen7_0_0_external_core_regs[] = {
+ gen7_0_0_gpucc_registers,
+ gen7_0_0_cpr_registers,
+};
+#endif /*_ADRENO_GEN7_0_0_SNAPSHOT_H */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
new file mode 100644
index 000000000000..6f8ad50f32ce
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
@@ -0,0 +1,753 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef __ADRENO_GEN7_2_0_SNAPSHOT_H
+#define __ADRENO_GEN7_2_0_SNAPSHOT_H
+
+#include "a6xx_gpu_state.h"
+
+static const u32 gen7_2_0_debugbus_blocks[] = {
+ A7XX_DBGBUS_CP_0_0,
+ A7XX_DBGBUS_CP_0_1,
+ A7XX_DBGBUS_RBBM,
+ A7XX_DBGBUS_HLSQ,
+ A7XX_DBGBUS_UCHE_0,
+ A7XX_DBGBUS_UCHE_1,
+ A7XX_DBGBUS_TESS_BR,
+ A7XX_DBGBUS_TESS_BV,
+ A7XX_DBGBUS_PC_BR,
+ A7XX_DBGBUS_PC_BV,
+ A7XX_DBGBUS_VFDP_BR,
+ A7XX_DBGBUS_VFDP_BV,
+ A7XX_DBGBUS_VPC_BR,
+ A7XX_DBGBUS_VPC_BV,
+ A7XX_DBGBUS_TSE_BR,
+ A7XX_DBGBUS_TSE_BV,
+ A7XX_DBGBUS_RAS_BR,
+ A7XX_DBGBUS_RAS_BV,
+ A7XX_DBGBUS_VSC,
+ A7XX_DBGBUS_COM_0,
+ A7XX_DBGBUS_LRZ_BR,
+ A7XX_DBGBUS_LRZ_BV,
+ A7XX_DBGBUS_UFC_0,
+ A7XX_DBGBUS_UFC_1,
+ A7XX_DBGBUS_GMU_GX,
+ A7XX_DBGBUS_DBGC,
+ A7XX_DBGBUS_GPC_BR,
+ A7XX_DBGBUS_GPC_BV,
+ A7XX_DBGBUS_LARC,
+ A7XX_DBGBUS_HLSQ_SPTP,
+ A7XX_DBGBUS_RB_0,
+ A7XX_DBGBUS_RB_1,
+ A7XX_DBGBUS_RB_2,
+ A7XX_DBGBUS_RB_3,
+ A7XX_DBGBUS_RB_4,
+ A7XX_DBGBUS_RB_5,
+ A7XX_DBGBUS_UCHE_WRAPPER,
+ A7XX_DBGBUS_CCU_0,
+ A7XX_DBGBUS_CCU_1,
+ A7XX_DBGBUS_CCU_2,
+ A7XX_DBGBUS_CCU_3,
+ A7XX_DBGBUS_CCU_4,
+ A7XX_DBGBUS_CCU_5,
+ A7XX_DBGBUS_VFD_BR_0,
+ A7XX_DBGBUS_VFD_BR_1,
+ A7XX_DBGBUS_VFD_BR_2,
+ A7XX_DBGBUS_VFD_BR_3,
+ A7XX_DBGBUS_VFD_BR_4,
+ A7XX_DBGBUS_VFD_BR_5,
+ A7XX_DBGBUS_VFD_BV_0,
+ A7XX_DBGBUS_VFD_BV_1,
+ A7XX_DBGBUS_USP_0,
+ A7XX_DBGBUS_USP_1,
+ A7XX_DBGBUS_USP_2,
+ A7XX_DBGBUS_USP_3,
+ A7XX_DBGBUS_USP_4,
+ A7XX_DBGBUS_USP_5,
+ A7XX_DBGBUS_TP_0,
+ A7XX_DBGBUS_TP_1,
+ A7XX_DBGBUS_TP_2,
+ A7XX_DBGBUS_TP_3,
+ A7XX_DBGBUS_TP_4,
+ A7XX_DBGBUS_TP_5,
+ A7XX_DBGBUS_TP_6,
+ A7XX_DBGBUS_TP_7,
+ A7XX_DBGBUS_TP_8,
+ A7XX_DBGBUS_TP_9,
+ A7XX_DBGBUS_TP_10,
+ A7XX_DBGBUS_TP_11,
+ A7XX_DBGBUS_USPTP_0,
+ A7XX_DBGBUS_USPTP_1,
+ A7XX_DBGBUS_USPTP_2,
+ A7XX_DBGBUS_USPTP_3,
+ A7XX_DBGBUS_USPTP_4,
+ A7XX_DBGBUS_USPTP_5,
+ A7XX_DBGBUS_USPTP_6,
+ A7XX_DBGBUS_USPTP_7,
+ A7XX_DBGBUS_USPTP_8,
+ A7XX_DBGBUS_USPTP_9,
+ A7XX_DBGBUS_USPTP_10,
+ A7XX_DBGBUS_USPTP_11,
+ A7XX_DBGBUS_CCHE_0,
+ A7XX_DBGBUS_CCHE_1,
+ A7XX_DBGBUS_CCHE_2,
+};
+
+static struct gen7_shader_block gen7_2_0_shader_blocks[] = {
+ {A7XX_TP0_TMO_DATA, 0x200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_TP0_SMO_DATA, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA_1, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_0_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_1_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_2_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_3_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_4_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_5_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_6_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_7_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_CB_RAM, 0x390, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_13_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_14_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_TAG, 0xc0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA_2, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_TMO_TAG, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_SMO_TAG, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_STATE_DATA, 0x40, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_HWAVE_RAM, 0x100, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_L0_INST_BUF, 0x50, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_8_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_9_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_10_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_11_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_12_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
+ {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM, 0x180, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM, 0x200, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_1, 0x1c0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x200, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x38, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+};
+
+static const u32 gen7_2_0_gpu_registers[] = {
+ 0x00000, 0x00000, 0x00002, 0x00002, 0x00011, 0x00012, 0x00016, 0x0001b,
+ 0x0001f, 0x00032, 0x00038, 0x0003c, 0x00042, 0x00042, 0x00044, 0x00044,
+ 0x00047, 0x00047, 0x00049, 0x0004a, 0x0004c, 0x0004c, 0x00050, 0x00050,
+ 0x00056, 0x00056, 0x00073, 0x0007d, 0x000ad, 0x000ae, 0x000b0, 0x000b0,
+ 0x000b4, 0x000b4, 0x000b8, 0x000b8, 0x000bc, 0x000bc, 0x000c0, 0x000c0,
+ 0x000c4, 0x000c4, 0x000c8, 0x000c8, 0x000cc, 0x000cc, 0x000d0, 0x000d0,
+ 0x000d4, 0x000d4, 0x000d8, 0x000d8, 0x000dc, 0x000dc, 0x000e0, 0x000e0,
+ 0x000e4, 0x000e4, 0x000e8, 0x000e8, 0x000ec, 0x000ec, 0x000f0, 0x000f0,
+ 0x000f4, 0x000f4, 0x000f8, 0x000f8, 0x00100, 0x00100, 0x00104, 0x0010c,
+ 0x0010f, 0x0011d, 0x0012f, 0x0012f, 0x00200, 0x0020d, 0x00211, 0x00211,
+ 0x00215, 0x00253, 0x00260, 0x00270, 0x00272, 0x00274, 0x00281, 0x0028d,
+ 0x00300, 0x00401, 0x00410, 0x00451, 0x00460, 0x004a3, 0x004c0, 0x004d1,
+ 0x00500, 0x00500, 0x00507, 0x0050b, 0x0050f, 0x0050f, 0x00511, 0x00511,
+ 0x00533, 0x00536, 0x00540, 0x00555, 0x00564, 0x00567, 0x00574, 0x00577,
+ 0x00584, 0x0059b, 0x005fb, 0x005ff, 0x00800, 0x00808, 0x00810, 0x00813,
+ 0x00820, 0x00821, 0x00823, 0x00827, 0x00830, 0x00834, 0x0083f, 0x00841,
+ 0x00843, 0x00847, 0x0084f, 0x00886, 0x008a0, 0x008ab, 0x008c0, 0x008c0,
+ 0x008c4, 0x008c6, 0x008d0, 0x008dd, 0x008e0, 0x008e6, 0x008f0, 0x008f3,
+ 0x00900, 0x00903, 0x00908, 0x00911, 0x00928, 0x0093e, 0x00942, 0x0094d,
+ 0x00980, 0x00984, 0x0098d, 0x0098f, 0x009b0, 0x009b4, 0x009c2, 0x009c9,
+ 0x009ce, 0x009d7, 0x009e0, 0x009e7, 0x00a00, 0x00a00, 0x00a02, 0x00a03,
+ 0x00a10, 0x00a4f, 0x00a61, 0x00a9f, 0x00ad0, 0x00adb, 0x00b00, 0x00b31,
+ 0x00b35, 0x00b3c, 0x00b40, 0x00b40, 0x00c00, 0x00c00, 0x00c02, 0x00c04,
+ 0x00c06, 0x00c06, 0x00c10, 0x00cd9, 0x00ce0, 0x00d0c, 0x00df0, 0x00df4,
+ 0x00e01, 0x00e02, 0x00e07, 0x00e0e, 0x00e10, 0x00e13, 0x00e17, 0x00e19,
+ 0x00e1b, 0x00e2b, 0x00e30, 0x00e32, 0x00e38, 0x00e3c, 0x00e40, 0x00e4b,
+ 0x0ec00, 0x0ec01, 0x0ec05, 0x0ec05, 0x0ec07, 0x0ec07, 0x0ec0a, 0x0ec0a,
+ 0x0ec12, 0x0ec12, 0x0ec26, 0x0ec28, 0x0ec2b, 0x0ec2d, 0x0ec2f, 0x0ec2f,
+ 0x0ec40, 0x0ec41, 0x0ec45, 0x0ec45, 0x0ec47, 0x0ec47, 0x0ec4a, 0x0ec4a,
+ 0x0ec52, 0x0ec52, 0x0ec66, 0x0ec68, 0x0ec6b, 0x0ec6d, 0x0ec6f, 0x0ec6f,
+ 0x0ec80, 0x0ec81, 0x0ec85, 0x0ec85, 0x0ec87, 0x0ec87, 0x0ec8a, 0x0ec8a,
+ 0x0ec92, 0x0ec92, 0x0eca6, 0x0eca8, 0x0ecab, 0x0ecad, 0x0ecaf, 0x0ecaf,
+ 0x0ecc0, 0x0ecc1, 0x0ecc5, 0x0ecc5, 0x0ecc7, 0x0ecc7, 0x0ecca, 0x0ecca,
+ 0x0ecd2, 0x0ecd2, 0x0ece6, 0x0ece8, 0x0eceb, 0x0eced, 0x0ecef, 0x0ecef,
+ 0x0ed00, 0x0ed01, 0x0ed05, 0x0ed05, 0x0ed07, 0x0ed07, 0x0ed0a, 0x0ed0a,
+ 0x0ed12, 0x0ed12, 0x0ed26, 0x0ed28, 0x0ed2b, 0x0ed2d, 0x0ed2f, 0x0ed2f,
+ 0x0ed40, 0x0ed41, 0x0ed45, 0x0ed45, 0x0ed47, 0x0ed47, 0x0ed4a, 0x0ed4a,
+ 0x0ed52, 0x0ed52, 0x0ed66, 0x0ed68, 0x0ed6b, 0x0ed6d, 0x0ed6f, 0x0ed6f,
+ 0x0ed80, 0x0ed81, 0x0ed85, 0x0ed85, 0x0ed87, 0x0ed87, 0x0ed8a, 0x0ed8a,
+ 0x0ed92, 0x0ed92, 0x0eda6, 0x0eda8, 0x0edab, 0x0edad, 0x0edaf, 0x0edaf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_gpu_registers), 8));
+
+static const u32 gen7_2_0_gmu_registers[] = {
+ 0x10001, 0x10001, 0x10003, 0x10003, 0x10401, 0x10401, 0x10403, 0x10403,
+ 0x10801, 0x10801, 0x10803, 0x10803, 0x10c01, 0x10c01, 0x10c03, 0x10c03,
+ 0x11001, 0x11001, 0x11003, 0x11003, 0x11401, 0x11401, 0x11403, 0x11403,
+ 0x11801, 0x11801, 0x11803, 0x11803, 0x11c01, 0x11c01, 0x11c03, 0x11c03,
+ 0x1a79b, 0x1a79b, 0x1a7ac, 0x1a7b9, 0x1a7dc, 0x1a7dd, 0x1a7e0, 0x1a7e1,
+ 0x1a803, 0x1a803, 0x1a805, 0x1a806, 0x1a84e, 0x1a84e, 0x1a856, 0x1a856,
+ 0x1f400, 0x1f40d, 0x1f40f, 0x1f411, 0x1f500, 0x1f500, 0x1f507, 0x1f507,
+ 0x1f509, 0x1f50b, 0x1f700, 0x1f701, 0x1f704, 0x1f706, 0x1f708, 0x1f709,
+ 0x1f70c, 0x1f70d, 0x1f710, 0x1f711, 0x1f713, 0x1f716, 0x1f720, 0x1f724,
+ 0x1f729, 0x1f729, 0x1f730, 0x1f747, 0x1f760, 0x1f761, 0x1f764, 0x1f76b,
+ 0x1f800, 0x1f804, 0x1f807, 0x1f808, 0x1f80b, 0x1f80c, 0x1f80f, 0x1f80f,
+ 0x1f811, 0x1f811, 0x1f813, 0x1f817, 0x1f819, 0x1f81c, 0x1f824, 0x1f82a,
+ 0x1f82d, 0x1f830, 0x1f840, 0x1f853, 0x1f860, 0x1f860, 0x1f862, 0x1f864,
+ 0x1f868, 0x1f868, 0x1f870, 0x1f879, 0x1f87f, 0x1f87f, 0x1f888, 0x1f889,
+ 0x1f8a0, 0x1f8a2, 0x1f890, 0x1f892, 0x1f894, 0x1f896, 0x1f8a4, 0x1f8af,
+ 0x1f8b8, 0x1f8b9, 0x1f8c0, 0x1f8c1, 0x1f8c3, 0x1f8c4, 0x1f8d0, 0x1f8d0,
+ 0x1f8ec, 0x1f8ec, 0x1f8f0, 0x1f8f1, 0x1f910, 0x1f917, 0x1f920, 0x1f921,
+ 0x1f924, 0x1f925, 0x1f928, 0x1f929, 0x1f92c, 0x1f92d, 0x1f940, 0x1f940,
+ 0x1f942, 0x1f944, 0x1f948, 0x1f94a, 0x1f94f, 0x1f951, 0x1f954, 0x1f955,
+ 0x1f958, 0x1f95a, 0x1f95d, 0x1f95d, 0x1f962, 0x1f96b, 0x1f970, 0x1f979,
+ 0x1f97c, 0x1f97c, 0x1f980, 0x1f981, 0x1f984, 0x1f986, 0x1f992, 0x1f993,
+ 0x1f996, 0x1f99e, 0x1f9c0, 0x1f9c0, 0x1f9c5, 0x1f9d4, 0x1f9f0, 0x1f9f1,
+ 0x1f9f8, 0x1f9fa, 0x1f9fc, 0x1f9fc, 0x1fa00, 0x1fa03, 0x20000, 0x20012,
+ 0x20018, 0x20018, 0x2001a, 0x2001a, 0x20020, 0x20024, 0x20030, 0x20031,
+ 0x20034, 0x20036, 0x23801, 0x23801, 0x23803, 0x23803, 0x23805, 0x23805,
+ 0x23807, 0x23807, 0x23809, 0x23809, 0x2380b, 0x2380b, 0x2380d, 0x2380d,
+ 0x2380f, 0x2380f, 0x23811, 0x23811, 0x23813, 0x23813, 0x23815, 0x23815,
+ 0x23817, 0x23817, 0x23819, 0x23819, 0x2381b, 0x2381b, 0x2381d, 0x2381d,
+ 0x2381f, 0x23820, 0x23822, 0x23822, 0x23824, 0x23824, 0x23826, 0x23826,
+ 0x23828, 0x23828, 0x2382a, 0x2382a, 0x2382c, 0x2382c, 0x2382e, 0x2382e,
+ 0x23830, 0x23830, 0x23832, 0x23832, 0x23834, 0x23834, 0x23836, 0x23836,
+ 0x23838, 0x23838, 0x2383a, 0x2383a, 0x2383c, 0x2383c, 0x2383e, 0x2383e,
+ 0x23840, 0x23847, 0x23b00, 0x23b01, 0x23b03, 0x23b03, 0x23b05, 0x23b0e,
+ 0x23b10, 0x23b13, 0x23b15, 0x23b16, 0x23b20, 0x23b20, 0x23b28, 0x23b28,
+ 0x23b30, 0x23b30,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_gmu_registers), 8));
+
+static const u32 gen7_2_0_gmugx_registers[] = {
+ 0x1a400, 0x1a41f, 0x1a440, 0x1a45f, 0x1a480, 0x1a49f, 0x1a4c0, 0x1a4df,
+ 0x1a500, 0x1a51f, 0x1a540, 0x1a55f, 0x1a580, 0x1a59f, 0x1a5c0, 0x1a5df,
+ 0x1a600, 0x1a61f, 0x1a640, 0x1a65f, 0x1a780, 0x1a781, 0x1a783, 0x1a785,
+ 0x1a787, 0x1a789, 0x1a78b, 0x1a78d, 0x1a78f, 0x1a791, 0x1a793, 0x1a795,
+ 0x1a797, 0x1a799, 0x1a79c, 0x1a79d, 0x1a79f, 0x1a79f, 0x1a7a0, 0x1a7a1,
+ 0x1a7a3, 0x1a7a3, 0x1a7a8, 0x1a7ab, 0x1a7c0, 0x1a7c1, 0x1a7c4, 0x1a7c5,
+ 0x1a7c8, 0x1a7c9, 0x1a7cc, 0x1a7cd, 0x1a7d0, 0x1a7d1, 0x1a7d4, 0x1a7d5,
+ 0x1a7d8, 0x1a7d9, 0x1a7fc, 0x1a7fd, 0x1a800, 0x1a802, 0x1a804, 0x1a804,
+ 0x1a816, 0x1a816, 0x1a81e, 0x1a81e, 0x1a826, 0x1a826, 0x1a82e, 0x1a82e,
+ 0x1a836, 0x1a836, 0x1a83e, 0x1a83e, 0x1a846, 0x1a846, 0x1a860, 0x1a862,
+ 0x1a864, 0x1a867, 0x1a870, 0x1a870, 0x1a883, 0x1a884, 0x1a8c0, 0x1a8c2,
+ 0x1a8c4, 0x1a8c7, 0x1a8d0, 0x1a8d3, 0x1a900, 0x1a92b, 0x1a940, 0x1a940,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_gmugx_registers), 8));
+
+static const u32 gen7_2_0_noncontext_pipe_br_registers[] = {
+ 0x00887, 0x0088c, 0x08600, 0x08600, 0x08602, 0x08602, 0x08610, 0x0861b,
+ 0x08620, 0x08620, 0x08630, 0x08630, 0x08637, 0x08639, 0x08640, 0x08640,
+ 0x09600, 0x09600, 0x09602, 0x09603, 0x0960a, 0x09616, 0x09624, 0x0963a,
+ 0x09640, 0x09640, 0x09e00, 0x09e00, 0x09e02, 0x09e07, 0x09e0a, 0x09e16,
+ 0x09e19, 0x09e19, 0x09e1c, 0x09e1c, 0x09e20, 0x09e25, 0x09e30, 0x09e31,
+ 0x09e40, 0x09e51, 0x09e64, 0x09e64, 0x09e70, 0x09e72, 0x09e78, 0x09e79,
+ 0x09e80, 0x09fff, 0x0a600, 0x0a600, 0x0a603, 0x0a603, 0x0a610, 0x0a61f,
+ 0x0a630, 0x0a631, 0x0a638, 0x0a63c,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_noncontext_pipe_br_registers), 8));
+
+static const u32 gen7_2_0_noncontext_pipe_bv_registers[] = {
+ 0x00887, 0x0088c, 0x08600, 0x08600, 0x08602, 0x08602, 0x08610, 0x0861b,
+ 0x08620, 0x08620, 0x08630, 0x08630, 0x08637, 0x08639, 0x08640, 0x08640,
+ 0x09600, 0x09600, 0x09602, 0x09603, 0x0960a, 0x09616, 0x09624, 0x0963a,
+ 0x09640, 0x09640, 0x09e00, 0x09e00, 0x09e02, 0x09e07, 0x09e0a, 0x09e16,
+ 0x09e19, 0x09e19, 0x09e1c, 0x09e1c, 0x09e20, 0x09e25, 0x09e30, 0x09e31,
+ 0x09e40, 0x09e51, 0x09e64, 0x09e64, 0x09e70, 0x09e72, 0x09e78, 0x09e79,
+ 0x09e80, 0x09fff, 0x0a600, 0x0a600, 0x0a603, 0x0a603, 0x0a610, 0x0a61f,
+ 0x0a630, 0x0a631, 0x0a638, 0x0a63c,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_noncontext_pipe_bv_registers), 8));
+
+static const u32 gen7_2_0_noncontext_rb_rac_pipe_br_registers[] = {
+ 0x08e10, 0x08e1c, 0x08e20, 0x08e25, 0x08e51, 0x08e5a, 0x08ea0, 0x08ea3,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_noncontext_rb_rac_pipe_br_registers), 8));
+
+static const u32 gen7_2_0_noncontext_rb_rbp_pipe_br_registers[] = {
+ 0x08e01, 0x08e01, 0x08e04, 0x08e04, 0x08e06, 0x08e09, 0x08e0c, 0x08e0c,
+ 0x08e28, 0x08e28, 0x08e2c, 0x08e35, 0x08e3b, 0x08e40, 0x08e50, 0x08e50,
+ 0x08e5b, 0x08e5d, 0x08e5f, 0x08e5f, 0x08e61, 0x08e61, 0x08e63, 0x08e66,
+ 0x08e68, 0x08e69, 0x08e70, 0x08e79, 0x08e80, 0x08e8f,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_noncontext_rb_rbp_pipe_br_registers), 8));
+
+static const u32 gen7_2_0_gras_cluster_gras_pipe_br_registers[] = {
+ 0x08000, 0x0800c, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d,
+ 0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa,
+ 0x08100, 0x08107, 0x08109, 0x0810b, 0x08110, 0x08113, 0x08120, 0x0813f,
+ 0x08400, 0x08406, 0x0840a, 0x0840b,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_gras_cluster_gras_pipe_br_registers), 8));
+
+static const u32 gen7_2_0_gras_cluster_gras_pipe_bv_registers[] = {
+ 0x08000, 0x0800c, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d,
+ 0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa,
+ 0x08100, 0x08107, 0x08109, 0x0810b, 0x08110, 0x08113, 0x08120, 0x0813f,
+ 0x08400, 0x08406, 0x0840a, 0x0840b,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_gras_cluster_gras_pipe_bv_registers), 8));
+
+static const u32 gen7_2_0_rb_rac_cluster_ps_pipe_br_registers[] = {
+ 0x08802, 0x08802, 0x08804, 0x08806, 0x08809, 0x0880a, 0x0880e, 0x08811,
+ 0x08818, 0x0881e, 0x08821, 0x08821, 0x08823, 0x08826, 0x08829, 0x08829,
+ 0x0882b, 0x0882e, 0x08831, 0x08831, 0x08833, 0x08836, 0x08839, 0x08839,
+ 0x0883b, 0x0883e, 0x08841, 0x08841, 0x08843, 0x08846, 0x08849, 0x08849,
+ 0x0884b, 0x0884e, 0x08851, 0x08851, 0x08853, 0x08856, 0x08859, 0x08859,
+ 0x0885b, 0x0885e, 0x08860, 0x08864, 0x08870, 0x08870, 0x08873, 0x08876,
+ 0x08878, 0x08879, 0x08882, 0x08885, 0x08887, 0x08889, 0x08891, 0x08891,
+ 0x08898, 0x08899, 0x088c0, 0x088c1, 0x088e5, 0x088e5, 0x088f4, 0x088f5,
+ 0x08a00, 0x08a05, 0x08a10, 0x08a15, 0x08a20, 0x08a25, 0x08a30, 0x08a35,
+ 0x08c00, 0x08c01, 0x08c18, 0x08c1f, 0x08c26, 0x08c34,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_rb_rac_cluster_ps_pipe_br_registers), 8));
+
+static const u32 gen7_2_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers[] = {
+ 0x0a980, 0x0a984, 0x0a99e, 0x0a99e, 0x0a9a7, 0x0a9a7, 0x0a9aa, 0x0a9aa,
+ 0x0a9ae, 0x0a9b0, 0x0a9b2, 0x0a9b5, 0x0a9ba, 0x0a9ba, 0x0a9bc, 0x0a9bc,
+ 0x0a9c4, 0x0a9c4, 0x0a9cd, 0x0a9cd, 0x0a9e0, 0x0a9fc, 0x0aa00, 0x0aa00,
+ 0x0aa30, 0x0aa31, 0x0aa40, 0x0aabf, 0x0ab00, 0x0ab03, 0x0ab05, 0x0ab05,
+ 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers), 8));
+
+static const u32 gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers[] = {
+ 0x0a980, 0x0a980, 0x0a982, 0x0a984, 0x0a99e, 0x0a9a2, 0x0a9a7, 0x0a9a8,
+ 0x0a9aa, 0x0a9aa, 0x0a9ae, 0x0a9ae, 0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5,
+ 0x0a9ba, 0x0a9bc, 0x0a9c5, 0x0a9c5, 0x0a9e0, 0x0a9f9, 0x0aa00, 0x0aa01,
+ 0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab04, 0x0ab05, 0x0ab0a, 0x0ab1b,
+ 0x0ab20, 0x0ab20,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers), 8));
+
+static const u32 gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers[] = {
+ 0x0a980, 0x0a982, 0x0a985, 0x0a9a6, 0x0a9a8, 0x0a9a9, 0x0a9ab, 0x0a9ae,
+ 0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9bf, 0x0a9c2, 0x0a9c3,
+ 0x0a9c5, 0x0a9c5, 0x0a9cd, 0x0a9cd, 0x0a9d0, 0x0a9d3, 0x0aa01, 0x0aa01,
+ 0x0aa30, 0x0aa31, 0x0aa40, 0x0aabf, 0x0ab00, 0x0ab05, 0x0ab21, 0x0ab22,
+ 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers), 8));
+
+static const u32 gen7_2_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers[] = {
+ 0x0a9b0, 0x0a9b0, 0x0a9b2, 0x0a9b5, 0x0a9ba, 0x0a9ba, 0x0a9bc, 0x0a9bc,
+ 0x0a9c4, 0x0a9c4, 0x0a9cd, 0x0a9cd, 0x0a9e2, 0x0a9e3, 0x0a9e6, 0x0a9fc,
+ 0x0aa00, 0x0aa00, 0x0aa31, 0x0aa31, 0x0ab00, 0x0ab01,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers), 8));
+
+static const u32 gen7_2_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers[] = {
+ 0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9bc, 0x0a9c5, 0x0a9c5,
+ 0x0a9e2, 0x0a9e3, 0x0a9e6, 0x0a9f9, 0x0aa00, 0x0aa00, 0x0ab00, 0x0ab00,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers), 8));
+
+static const u32 gen7_2_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers[] = {
+ 0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9be, 0x0a9c2, 0x0a9c3,
+ 0x0a9c5, 0x0a9c5, 0x0a9cd, 0x0a9cd, 0x0a9d0, 0x0a9d3, 0x0aa31, 0x0aa31,
+ 0x0ab00, 0x0ab01,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers), 8));
+
+static const u32 gen7_2_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers[] = {
+ 0x0a800, 0x0a801, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824,
+ 0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a832, 0x0a835, 0x0a83a, 0x0a83a,
+ 0x0a83c, 0x0a83c, 0x0a83f, 0x0a841, 0x0a85b, 0x0a85d, 0x0a862, 0x0a862,
+ 0x0a864, 0x0a864, 0x0a867, 0x0a867, 0x0a870, 0x0a870, 0x0a872, 0x0a872,
+ 0x0a88c, 0x0a88e, 0x0a893, 0x0a893, 0x0a895, 0x0a895, 0x0a898, 0x0a898,
+ 0x0a89a, 0x0a89d, 0x0a8a0, 0x0a8af, 0x0a8c0, 0x0a8c3, 0x0ab00, 0x0ab03,
+ 0x0ab05, 0x0ab05, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers), 8));
+
+static const u32 gen7_2_0_sp_cluster_sp_vs_pipe_br_sp_top_registers[] = {
+ 0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a82d, 0x0a82d,
+ 0x0a82f, 0x0a831, 0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840,
+ 0x0a85c, 0x0a85d, 0x0a862, 0x0a864, 0x0a868, 0x0a868, 0x0a870, 0x0a871,
+ 0x0a88d, 0x0a88e, 0x0a893, 0x0a895, 0x0a899, 0x0a899, 0x0a8a0, 0x0a8af,
+ 0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab04, 0x0ab05, 0x0ab0a, 0x0ab1b,
+ 0x0ab20, 0x0ab20,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_cluster_sp_vs_pipe_br_sp_top_registers), 8));
+
+static const u32 gen7_2_0_sp_cluster_sp_vs_pipe_br_usptp_registers[] = {
+ 0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a82d, 0x0a82d,
+ 0x0a82f, 0x0a833, 0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861,
+ 0x0a863, 0x0a868, 0x0a870, 0x0a88c, 0x0a88f, 0x0a892, 0x0a894, 0x0a899,
+ 0x0a8c0, 0x0a8c3, 0x0ab00, 0x0ab05, 0x0ab21, 0x0ab22, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_cluster_sp_vs_pipe_br_usptp_registers), 8));
+
+static const u32 gen7_2_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers[] = {
+ 0x0a800, 0x0a801, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824,
+ 0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a832, 0x0a835, 0x0a83a, 0x0a83a,
+ 0x0a83c, 0x0a83c, 0x0a83f, 0x0a841, 0x0a85b, 0x0a85d, 0x0a862, 0x0a862,
+ 0x0a864, 0x0a864, 0x0a867, 0x0a867, 0x0a870, 0x0a870, 0x0a872, 0x0a872,
+ 0x0a88c, 0x0a88e, 0x0a893, 0x0a893, 0x0a895, 0x0a895, 0x0a898, 0x0a898,
+ 0x0a89a, 0x0a89d, 0x0a8a0, 0x0a8af, 0x0a8c0, 0x0a8c3, 0x0ab00, 0x0ab02,
+ 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers), 8));
+
+static const u32 gen7_2_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers[] = {
+ 0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a82d, 0x0a82d,
+ 0x0a82f, 0x0a831, 0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840,
+ 0x0a85c, 0x0a85d, 0x0a862, 0x0a864, 0x0a868, 0x0a868, 0x0a870, 0x0a871,
+ 0x0a88d, 0x0a88e, 0x0a893, 0x0a895, 0x0a899, 0x0a899, 0x0a8a0, 0x0a8af,
+ 0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers), 8));
+
+static const u32 gen7_2_0_sp_cluster_sp_vs_pipe_bv_usptp_registers[] = {
+ 0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a82d, 0x0a82d,
+ 0x0a82f, 0x0a833, 0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861,
+ 0x0a863, 0x0a868, 0x0a870, 0x0a88c, 0x0a88f, 0x0a892, 0x0a894, 0x0a899,
+ 0x0a8c0, 0x0a8c3, 0x0ab00, 0x0ab02, 0x0ab21, 0x0ab22, 0x0ab40, 0x0abbf,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_cluster_sp_vs_pipe_bv_usptp_registers), 8));
+
+static const u32 gen7_2_0_sp_noncontext_pipe_lpac_hlsq_state_registers[] = {
+ 0x0af88, 0x0af8b,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_noncontext_pipe_lpac_hlsq_state_registers), 8));
+
+static const struct gen7_sel_reg gen7_2_0_rb_rac_sel = {
+ .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .val = 0x0,
+};
+
+static const struct gen7_sel_reg gen7_2_0_rb_rbp_sel = {
+ .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .val = 0x9,
+};
+
+static struct gen7_cluster_registers gen7_2_0_clusters[] = {
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ gen7_2_0_noncontext_pipe_br_registers, },
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
+ gen7_2_0_noncontext_pipe_bv_registers, },
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_LPAC, STATE_NON_CONTEXT,
+ gen7_0_0_noncontext_pipe_lpac_registers, },
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ gen7_2_0_noncontext_rb_rac_pipe_br_registers, &gen7_2_0_rb_rac_sel, },
+ { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ gen7_2_0_noncontext_rb_rbp_pipe_br_registers, &gen7_2_0_rb_rbp_sel, },
+ { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_2_0_gras_cluster_gras_pipe_br_registers, },
+ { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_2_0_gras_cluster_gras_pipe_bv_registers, },
+ { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_2_0_gras_cluster_gras_pipe_br_registers, },
+ { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_2_0_gras_cluster_gras_pipe_bv_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_pc_cluster_fe_pipe_br_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_0_0_pc_cluster_fe_pipe_bv_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_pc_cluster_fe_pipe_br_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_0_0_pc_cluster_fe_pipe_bv_registers, },
+ { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_2_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rac_sel, },
+ { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_2_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rac_sel, },
+ { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rbp_sel, },
+ { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rbp_sel, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_vfd_cluster_fe_pipe_br_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_0_0_vfd_cluster_fe_pipe_bv_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_vfd_cluster_fe_pipe_br_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_0_0_vfd_cluster_fe_pipe_bv_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_vpc_cluster_fe_pipe_br_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_0_0_vpc_cluster_fe_pipe_bv_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_vpc_cluster_fe_pipe_br_registers, },
+ { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_0_0_vpc_cluster_fe_pipe_bv_registers, },
+ { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, },
+ { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, },
+ { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, },
+ { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, },
+ { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, },
+ { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
+ { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, },
+ { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
+};
+
+static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ gen7_0_0_sp_noncontext_pipe_br_sp_top_registers, 0xae00 },
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_0_0_sp_noncontext_pipe_br_usptp_registers, 0xae00 },
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ gen7_2_0_sp_noncontext_pipe_lpac_hlsq_state_registers, 0xaf80 },
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers, 0xaf80 },
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 },
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 },
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ gen7_2_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ gen7_2_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_SP_TOP,
+ gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_SP_TOP,
+ gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ gen7_2_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_DP,
+ gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ gen7_2_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ gen7_2_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ gen7_2_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_HLSQ_STATE,
+ gen7_2_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ gen7_2_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_SP_TOP,
+ gen7_2_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_2_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ gen7_2_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ gen7_2_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_HLSQ_STATE,
+ gen7_2_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ gen7_2_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_SP_TOP,
+ gen7_2_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ gen7_2_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ gen7_2_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 },
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 },
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 },
+};
+
+static const u32 gen7_2_0_dbgc_registers[] = {
+ 0x005ff, 0x0061c, 0x0061e, 0x00634, 0x00640, 0x0065e, 0x00679, 0x0067e,
+ 0x00699, 0x00699, 0x0069b, 0x0069e, 0x006a0, 0x006a3, 0x006c0, 0x006c1,
+ 0x18400, 0x1841c, 0x1841e, 0x18434, 0x18440, 0x1845c, 0x18479, 0x1847c,
+ 0x18580, 0x18581,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_dbgc_registers), 8));
+
+static const u32 gen7_2_0_rscc_registers[] = {
+ 0x14000, 0x14036, 0x14040, 0x14047, 0x14080, 0x14084, 0x14089, 0x1408c,
+ 0x14091, 0x14094, 0x14099, 0x1409c, 0x140a1, 0x140a4, 0x140a9, 0x140ac,
+ 0x14100, 0x14104, 0x14114, 0x14119, 0x14124, 0x14132, 0x14154, 0x1416b,
+ 0x14340, 0x14342, 0x14344, 0x1437c, 0x143f0, 0x143f8, 0x143fa, 0x143fe,
+ 0x14400, 0x14404, 0x14406, 0x1440a, 0x1440c, 0x14410, 0x14412, 0x14416,
+ 0x14418, 0x1441c, 0x1441e, 0x14422, 0x14424, 0x14424, 0x14498, 0x144a0,
+ 0x144a2, 0x144a6, 0x144a8, 0x144ac, 0x144ae, 0x144b2, 0x144b4, 0x144b8,
+ 0x144ba, 0x144be, 0x144c0, 0x144c4, 0x144c6, 0x144ca, 0x144cc, 0x144cc,
+ 0x14540, 0x14548, 0x1454a, 0x1454e, 0x14550, 0x14554, 0x14556, 0x1455a,
+ 0x1455c, 0x14560, 0x14562, 0x14566, 0x14568, 0x1456c, 0x1456e, 0x14572,
+ 0x14574, 0x14574, 0x145e8, 0x145f0, 0x145f2, 0x145f6, 0x145f8, 0x145fc,
+ 0x145fe, 0x14602, 0x14604, 0x14608, 0x1460a, 0x1460e, 0x14610, 0x14614,
+ 0x14616, 0x1461a, 0x1461c, 0x1461c, 0x14690, 0x14698, 0x1469a, 0x1469e,
+ 0x146a0, 0x146a4, 0x146a6, 0x146aa, 0x146ac, 0x146b0, 0x146b2, 0x146b6,
+ 0x146b8, 0x146bc, 0x146be, 0x146c2, 0x146c4, 0x146c4, 0x14738, 0x14740,
+ 0x14742, 0x14746, 0x14748, 0x1474c, 0x1474e, 0x14752, 0x14754, 0x14758,
+ 0x1475a, 0x1475e, 0x14760, 0x14764, 0x14766, 0x1476a, 0x1476c, 0x1476c,
+ 0x147e0, 0x147e8, 0x147ea, 0x147ee, 0x147f0, 0x147f4, 0x147f6, 0x147fa,
+ 0x147fc, 0x14800, 0x14802, 0x14806, 0x14808, 0x1480c, 0x1480e, 0x14812,
+ 0x14814, 0x14814, 0x14888, 0x14890, 0x14892, 0x14896, 0x14898, 0x1489c,
+ 0x1489e, 0x148a2, 0x148a4, 0x148a8, 0x148aa, 0x148ae, 0x148b0, 0x148b4,
+ 0x148b6, 0x148ba, 0x148bc, 0x148bc, 0x14930, 0x14938, 0x1493a, 0x1493e,
+ 0x14940, 0x14944, 0x14946, 0x1494a, 0x1494c, 0x14950, 0x14952, 0x14956,
+ 0x14958, 0x1495c, 0x1495e, 0x14962, 0x14964, 0x14964,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_rscc_registers), 8));
+
+static const u32 gen7_2_0_cpr_registers[] = {
+ 0x26800, 0x26805, 0x26808, 0x2680c, 0x26814, 0x26814, 0x2681c, 0x2681c,
+ 0x26820, 0x26838, 0x26840, 0x26840, 0x26848, 0x26848, 0x26850, 0x26850,
+ 0x26880, 0x2689e, 0x26980, 0x269b0, 0x269c0, 0x269c8, 0x269e0, 0x269ee,
+ 0x269fb, 0x269ff, 0x26a02, 0x26a07, 0x26a09, 0x26a0b, 0x26a10, 0x26b0f,
+ 0x27440, 0x27441, 0x27444, 0x27444, 0x27480, 0x274a2, 0x274ac, 0x274ad,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_cpr_registers), 8));
+
+static const u32 gen7_2_0_dpm_lkg_registers[] = {
+ 0x21c00, 0x21c00, 0x21c08, 0x21c09, 0x21c0e, 0x21c0f, 0x21c4f, 0x21c50,
+ 0x21c52, 0x21c52, 0x21c54, 0x21c56, 0x21c58, 0x21c5a, 0x21c5c, 0x21c60,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_dpm_lkg_registers), 8));
+
+static const u32 gen7_2_0_gpucc_registers[] = {
+ 0x24000, 0x2400f, 0x24400, 0x2440f, 0x24800, 0x24805, 0x24c00, 0x24cff,
+ 0x25400, 0x25404, 0x25800, 0x25804, 0x25c00, 0x25c04, 0x26000, 0x26004,
+ 0x26400, 0x26405, 0x26414, 0x2641d, 0x2642a, 0x26430, 0x26432, 0x26433,
+ 0x26441, 0x2644b, 0x2644d, 0x26457, 0x26466, 0x26468, 0x26478, 0x2647a,
+ 0x26489, 0x2648a, 0x2649c, 0x2649e, 0x264a0, 0x264a4, 0x264c5, 0x264c7,
+ 0x264d6, 0x264d8, 0x264e8, 0x264e9, 0x264f9, 0x264fc, 0x2651c, 0x2651e,
+ 0x26540, 0x26576, 0x26600, 0x26616, 0x26620, 0x2662d, 0x26630, 0x26631,
+ 0x26635, 0x26635, 0x26637, 0x26637, 0x2663a, 0x2663a, 0x26642, 0x26642,
+ 0x26656, 0x26658, 0x2665b, 0x2665d, 0x2665f, 0x26662,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_gpucc_registers), 8));
+
+static const u32 gen7_2_0_cx_misc_registers[] = {
+ 0x27800, 0x27800, 0x27810, 0x27814, 0x27820, 0x27824, 0x27832, 0x27857,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_cx_misc_registers), 8));
+
+static const u32 gen7_2_0_dpm_registers[] = {
+ 0x1aa00, 0x1aa06, 0x1aa09, 0x1aa0a, 0x1aa0c, 0x1aa0d, 0x1aa0f, 0x1aa12,
+ 0x1aa14, 0x1aa47, 0x1aa50, 0x1aa51,
+ UINT_MAX, UINT_MAX,
+};
+static_assert(IS_ALIGNED(sizeof(gen7_2_0_dpm_registers), 8));
+
+static struct gen7_reg_list gen7_2_0_reg_list[] = {
+ { gen7_2_0_gpu_registers, NULL },
+ { gen7_2_0_cx_misc_registers, NULL },
+ { gen7_2_0_dpm_registers, NULL },
+ { gen7_2_0_dbgc_registers, NULL },
+ { NULL, NULL },
+};
+
+static const u32 *gen7_2_0_external_core_regs[] = {
+ gen7_2_0_gpucc_registers,
+ gen7_2_0_cpr_registers,
+ gen7_2_0_dpm_lkg_registers,
+};
+#endif /*_ADRENO_GEN7_2_0_SNAPSHOT_H */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index bc14df96feb0..77526892eb8c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -48,6 +48,7 @@ enum adreno_family {
ADRENO_6XX_GEN4, /* a660 family */
ADRENO_7XX_GEN1, /* a730 family */
ADRENO_7XX_GEN2, /* a740 family */
+ ADRENO_7XX_GEN3, /* a750 family */
};
#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
@@ -77,7 +78,7 @@ struct adreno_reglist {
};
extern const struct adreno_reglist a612_hwcg[], a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[];
-extern const struct adreno_reglist a660_hwcg[], a690_hwcg[], a730_hwcg[], a740_hwcg[];
+extern const struct adreno_reglist a660_hwcg[], a690_hwcg[], a702_hwcg[], a730_hwcg[], a740_hwcg[];
struct adreno_speedbin {
uint16_t fuse;
@@ -256,6 +257,11 @@ static inline bool adreno_is_a305(const struct adreno_gpu *gpu)
return adreno_is_revn(gpu, 305);
}
+static inline bool adreno_is_a305b(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x03000512;
+}
+
static inline bool adreno_is_a306(const struct adreno_gpu *gpu)
{
/* yes, 307, because a305c is 306 */
@@ -382,6 +388,20 @@ static inline int adreno_is_a690(const struct adreno_gpu *gpu)
return gpu->info->chip_ids[0] == 0x06090000;
}
+static inline int adreno_is_a702(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x07000200;
+}
+
+static inline int adreno_is_a610_family(const struct adreno_gpu *gpu)
+{
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+
+ /* TODO: A612 */
+ return adreno_is_a610(gpu) || adreno_is_a702(gpu);
+}
+
/* check for a615, a616, a618, a619 or any a630 derivatives */
static inline int adreno_is_a630_family(const struct adreno_gpu *gpu)
{
@@ -423,12 +443,17 @@ static inline int adreno_is_a740(struct adreno_gpu *gpu)
return gpu->info->chip_ids[0] == 0x43050a01;
}
-/* Placeholder to make future diffs smaller */
+static inline int adreno_is_a750(struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x43051401;
+}
+
static inline int adreno_is_a740_family(struct adreno_gpu *gpu)
{
if (WARN_ON_ONCE(!gpu->info))
return false;
- return gpu->info->family == ADRENO_7XX_GEN2;
+ return gpu->info->family == ADRENO_7XX_GEN2 ||
+ gpu->info->family == ADRENO_7XX_GEN3;
}
static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 8a4a2d161a29..7067376e25e1 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -3,50 +3,28 @@
/* Autogenerated file, DO NOT EDIT manually!
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
-
-Copyright (C) 2013-2023 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85856 bytes, from Fri Feb 23 13:07:00 2024)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
*/
+#ifdef __KERNEL__
+#include <linux/bug.h>
+#define assert(x) BUG_ON(!(x))
+#else
+#include <assert.h>
+#endif
+
+#ifdef __cplusplus
+#define __struct_cast(X)
+#else
+#define __struct_cast(X) (struct X)
+#endif
enum vgt_event_type {
VS_DEALLOC = 0,
@@ -94,12 +72,14 @@ enum vgt_event_type {
LRZ_FLUSH = 38,
BLIT_OP_FILL_2D = 39,
BLIT_OP_COPY_2D = 40,
+ UNK_40 = 40,
BLIT_OP_SCALE_2D = 42,
CONTEXT_DONE_2D = 43,
UNK_2C = 44,
UNK_2D = 45,
CACHE_INVALIDATE = 49,
LABEL = 63,
+ DUMMY_EVENT = 1,
CCU_INVALIDATE_DEPTH = 24,
CCU_INVALIDATE_COLOR = 25,
CCU_RESOLVE_CLEAN = 26,
@@ -192,7 +172,7 @@ enum pc_di_vis_cull_mode {
};
enum adreno_pm4_packet_type {
- CP_TYPE0_PKT = 0,
+ CP_TYPE0_PKT = 0x00000000,
CP_TYPE1_PKT = 0x40000000,
CP_TYPE2_PKT = 0x80000000,
CP_TYPE3_PKT = 0xc0000000,
@@ -224,6 +204,7 @@ enum adreno_pm4_type3_packets {
CP_COND_WRITE = 69,
CP_COND_WRITE5 = 69,
CP_EVENT_WRITE = 70,
+ CP_EVENT_WRITE7 = 70,
CP_EVENT_WRITE_SHD = 88,
CP_EVENT_WRITE_CFL = 89,
CP_EVENT_WRITE_ZPD = 91,
@@ -318,6 +299,7 @@ enum adreno_pm4_type3_packets {
CP_WAIT_TWO_REGS = 112,
CP_MEMCPY = 117,
CP_SET_BIN_DATA5_OFFSET = 46,
+ CP_SET_UNK_BIN_DATA = 45,
CP_CONTEXT_SWITCH = 84,
CP_SET_CTXSWITCH_IB = 85,
CP_REG_WRITE = 109,
@@ -325,13 +307,16 @@ enum adreno_pm4_type3_packets {
CP_END_BIN = 81,
CP_PREEMPT_DISABLE = 108,
CP_WAIT_TIMESTAMP = 20,
+ CP_GLOBAL_TIMESTAMP = 21,
+ CP_LOCAL_TIMESTAMP = 22,
CP_THREAD_CONTROL = 23,
+ CP_RESOURCE_LIST = 24,
+ CP_BV_BR_COUNT_OPS = 27,
+ CP_MODIFY_TIMESTAMP = 28,
CP_CONTEXT_REG_BUNCH2 = 93,
- CP_UNK15 = 21,
- CP_UNK16 = 22,
- CP_UNK18 = 24,
- CP_UNK1B = 27,
- CP_UNK49 = 73,
+ CP_MEM_TO_SCRATCH_MEM = 73,
+ CP_FIXED_STRIDE_DRAW_TABLE = 127,
+ CP_RESET_CONTEXT_STATE = 31,
};
enum adreno_state_block {
@@ -456,6 +441,13 @@ enum cp_cond_function {
WRITE_GT = 6,
};
+enum poll_memory_type {
+ POLL_REGISTER = 0,
+ POLL_MEMORY = 1,
+ POLL_SCRATCH = 2,
+ POLL_ON_CHIP = 3,
+};
+
enum render_mode_cmd {
BYPASS = 1,
BINNING = 2,
@@ -465,6 +457,19 @@ enum render_mode_cmd {
END2D = 8,
};
+enum event_write_src {
+ EV_WRITE_USER_32B = 0,
+ EV_WRITE_USER_64B = 1,
+ EV_WRITE_TIMESTAMP_SUM = 2,
+ EV_WRITE_ALWAYSON = 3,
+ EV_WRITE_REGS_CONTENT = 4,
+};
+
+enum event_write_dst {
+ EV_DST_RAM = 0,
+ EV_DST_ONCHIP = 1,
+};
+
enum cp_blit_cmd {
BLIT_OP_FILL = 0,
BLIT_OP_COPY = 1,
@@ -492,12 +497,31 @@ enum pseudo_reg {
SECURE_SAVE_ADDR = 2,
NON_PRIV_SAVE_ADDR = 3,
COUNTER = 4,
+ DRAW_STRM_ADDRESS = 8,
+ DRAW_STRM_SIZE_ADDRESS = 9,
+ PRIM_STRM_ADDRESS = 10,
+ UNK_STRM_ADDRESS = 11,
+ UNK_STRM_SIZE_ADDRESS = 12,
+ BINDLESS_BASE_0_ADDR = 16,
+ BINDLESS_BASE_1_ADDR = 17,
+ BINDLESS_BASE_2_ADDR = 18,
+ BINDLESS_BASE_3_ADDR = 19,
+ BINDLESS_BASE_4_ADDR = 20,
+ BINDLESS_BASE_5_ADDR = 21,
+ BINDLESS_BASE_6_ADDR = 22,
+};
+
+enum source_type {
+ SOURCE_REG = 0,
+ SOURCE_SCRATCH_MEM = 1,
};
enum compare_mode {
PRED_TEST = 1,
REG_COMPARE = 2,
RENDER_MODE = 3,
+ REG_COMPARE_IMM = 4,
+ THREAD_MODE = 5,
};
enum ctxswitch_ib {
@@ -514,6 +538,30 @@ enum reg_tracker {
TRACK_LRZ = 8,
};
+enum ts_wait_value_src {
+ TS_WAIT_GE_32B = 0,
+ TS_WAIT_GE_64B = 1,
+ TS_WAIT_GE_TIMESTAMP_SUM = 2,
+};
+
+enum ts_wait_type {
+ TS_WAIT_RAM = 0,
+ TS_WAIT_ONCHIP = 1,
+};
+
+enum pipe_count_op {
+ PIPE_CLEAR_BV_BR = 1,
+ PIPE_SET_BR_OFFSET = 2,
+ PIPE_BR_WAIT_FOR_BV = 3,
+ PIPE_BV_WAIT_FOR_BR = 4,
+};
+
+enum timestamp_op {
+ MODIFY_TIMESTAMP_CLEAR = 0,
+ MODIFY_TIMESTAMP_ADD_GLOBAL = 1,
+ MODIFY_TIMESTAMP_ADD_LOCAL = 2,
+};
+
enum cp_thread {
CP_SET_THREAD_BR = 1,
CP_SET_THREAD_BV = 2,
@@ -557,7 +605,8 @@ static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val)
#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2
static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
{
- return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
}
#define REG_CP_LOAD_STATE4_0 0x00000000
@@ -597,7 +646,8 @@ static inline uint32_t CP_LOAD_STATE4_1_STATE_TYPE(enum a4xx_state_type val)
#define CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT 2
static inline uint32_t CP_LOAD_STATE4_1_EXT_SRC_ADDR(uint32_t val)
{
- return ((val >> 2) << CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK;
}
#define REG_CP_LOAD_STATE4_2 0x00000002
@@ -645,7 +695,8 @@ static inline uint32_t CP_LOAD_STATE6_0_NUM_UNIT(uint32_t val)
#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT 2
static inline uint32_t CP_LOAD_STATE6_1_EXT_SRC_ADDR(uint32_t val)
{
- return ((val >> 2) << CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK;
+ assert(!(val & 0x3));
+ return (((val >> 2)) << CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK;
}
#define REG_CP_LOAD_STATE6_2 0x00000002
@@ -834,37 +885,36 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_3_FIRST_INDX(uint32_t val)
return ((val) << CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT) & CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK;
}
-
-#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
-#define CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK 0xffffffff
-#define CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO(uint32_t val)
+#define REG_A5XX_CP_DRAW_INDX_OFFSET_4 0x00000004
+#define A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK;
+ return ((val) << A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT) & A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK;
}
-#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005
-#define CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK 0xffffffff
-#define CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI(uint32_t val)
+#define REG_A5XX_CP_DRAW_INDX_OFFSET_5 0x00000005
+#define A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK;
+ return ((val) << A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK;
}
-#define REG_CP_DRAW_INDX_OFFSET_INDX_BASE 0x00000004
+#define REG_A5XX_CP_DRAW_INDX_OFFSET_INDX_BASE 0x00000004
-#define REG_CP_DRAW_INDX_OFFSET_6 0x00000006
-#define CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK 0xffffffff
-#define CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_6_MAX_INDICES(uint32_t val)
+#define REG_A5XX_CP_DRAW_INDX_OFFSET_6 0x00000006
+#define A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK;
+ return ((val) << A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT) & A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK;
}
#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK 0xffffffff
#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE(uint32_t val)
+static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE(uint64_t val)
{
return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK;
}
@@ -911,7 +961,6 @@ static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type v
#define A4XX_CP_DRAW_INDIRECT_0_GS_ENABLE 0x00010000
#define A4XX_CP_DRAW_INDIRECT_0_TESS_ENABLE 0x00020000
-
#define REG_A4XX_CP_DRAW_INDIRECT_1 0x00000001
#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK 0xffffffff
#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT 0
@@ -920,7 +969,6 @@ static inline uint32_t A4XX_CP_DRAW_INDIRECT_1_INDIRECT(uint32_t val)
return ((val) << A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK;
}
-
#define REG_A5XX_CP_DRAW_INDIRECT_1 0x00000001
#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK 0xffffffff
#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT 0
@@ -973,7 +1021,6 @@ static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_t
#define A4XX_CP_DRAW_INDX_INDIRECT_0_GS_ENABLE 0x00010000
#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_ENABLE 0x00020000
-
#define REG_A4XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK 0xffffffff
#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT 0
@@ -998,7 +1045,6 @@ static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT(uint32_t val)
return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK;
}
-
#define REG_A5XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK 0xffffffff
#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT 0
@@ -1093,37 +1139,93 @@ static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(uint32_t val)
#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_DRAW_COUNT 0x00000002
+#define REG_INDIRECT_OP_NORMAL_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000003
-#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000003
+#define REG_INDIRECT_OP_NORMAL_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000005
-#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000005
+#define REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_INDEX 0x00000003
+#define REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_MAX_INDICES 0x00000005
-#define REG_CP_DRAW_INDIRECT_MULTI_INDEX_INDEXED 0x00000003
+#define REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000006
-#define REG_CP_DRAW_INDIRECT_MULTI_MAX_INDICES_INDEXED 0x00000005
+#define REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000008
-#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDEXED 0x00000006
+#define REG_INDIRECT_OP_INDIRECT_COUNT_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000003
-#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDEXED 0x00000008
+#define REG_INDIRECT_OP_INDIRECT_COUNT_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT 0x00000005
+#define REG_INDIRECT_OP_INDIRECT_COUNT_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000007
-#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDIRECT 0x00000003
+#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_INDEX 0x00000003
-#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT_INDIRECT 0x00000005
+#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_MAX_INDICES 0x00000005
-#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDIRECT 0x00000007
+#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000006
+#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT 0x00000008
-#define REG_CP_DRAW_INDIRECT_MULTI_INDEX_INDIRECT_INDEXED 0x00000003
+#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_STRIDE 0x0000000a
-#define REG_CP_DRAW_INDIRECT_MULTI_MAX_INDICES_INDIRECT_INDEXED 0x00000005
+#define REG_CP_DRAW_AUTO_0 0x00000000
+#define CP_DRAW_AUTO_0_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_AUTO_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_AUTO_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_AUTO_0_PRIM_TYPE__SHIFT) & CP_DRAW_AUTO_0_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_AUTO_0_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_AUTO_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_AUTO_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_AUTO_0_SOURCE_SELECT__SHIFT) & CP_DRAW_AUTO_0_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_AUTO_0_VIS_CULL__MASK 0x00000300
+#define CP_DRAW_AUTO_0_VIS_CULL__SHIFT 8
+static inline uint32_t CP_DRAW_AUTO_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_AUTO_0_VIS_CULL__SHIFT) & CP_DRAW_AUTO_0_VIS_CULL__MASK;
+}
+#define CP_DRAW_AUTO_0_INDEX_SIZE__MASK 0x00000c00
+#define CP_DRAW_AUTO_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t CP_DRAW_AUTO_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << CP_DRAW_AUTO_0_INDEX_SIZE__SHIFT) & CP_DRAW_AUTO_0_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_AUTO_0_PATCH_TYPE__MASK 0x00003000
+#define CP_DRAW_AUTO_0_PATCH_TYPE__SHIFT 12
+static inline uint32_t CP_DRAW_AUTO_0_PATCH_TYPE(enum a6xx_patch_type val)
+{
+ return ((val) << CP_DRAW_AUTO_0_PATCH_TYPE__SHIFT) & CP_DRAW_AUTO_0_PATCH_TYPE__MASK;
+}
+#define CP_DRAW_AUTO_0_GS_ENABLE 0x00010000
+#define CP_DRAW_AUTO_0_TESS_ENABLE 0x00020000
-#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDIRECT_INDEXED 0x00000006
+#define REG_CP_DRAW_AUTO_1 0x00000001
+#define CP_DRAW_AUTO_1_NUM_INSTANCES__MASK 0xffffffff
+#define CP_DRAW_AUTO_1_NUM_INSTANCES__SHIFT 0
+static inline uint32_t CP_DRAW_AUTO_1_NUM_INSTANCES(uint32_t val)
+{
+ return ((val) << CP_DRAW_AUTO_1_NUM_INSTANCES__SHIFT) & CP_DRAW_AUTO_1_NUM_INSTANCES__MASK;
+}
-#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT_INDIRECT_INDEXED 0x00000008
+#define REG_CP_DRAW_AUTO_NUM_VERTICES_BASE 0x00000002
-#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDIRECT_INDEXED 0x0000000a
+#define REG_CP_DRAW_AUTO_4 0x00000004
+#define CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__MASK 0xffffffff
+#define CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__SHIFT 0
+static inline uint32_t CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET(uint32_t val)
+{
+ return ((val) << CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__SHIFT) & CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__MASK;
+}
+
+#define REG_CP_DRAW_AUTO_5 0x00000005
+#define CP_DRAW_AUTO_5_STRIDE__MASK 0xffffffff
+#define CP_DRAW_AUTO_5_STRIDE__SHIFT 0
+static inline uint32_t CP_DRAW_AUTO_5_STRIDE(uint32_t val)
+{
+ return ((val) << CP_DRAW_AUTO_5_STRIDE__SHIFT) & CP_DRAW_AUTO_5_STRIDE__MASK;
+}
#define REG_CP_DRAW_PRED_ENABLE_GLOBAL_0 0x00000000
#define CP_DRAW_PRED_ENABLE_GLOBAL_0_ENABLE 0x00000001
@@ -1147,7 +1249,7 @@ static inline uint32_t CP_DRAW_PRED_SET_0_TEST(enum cp_draw_pred_test val)
#define REG_CP_DRAW_PRED_SET_MEM_ADDR 0x00000001
-static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define REG_CP_SET_DRAW_STATE_(i0) (0x00000000 + 0x3*(i0))
static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
#define CP_SET_DRAW_STATE__0_COUNT__MASK 0x0000ffff
@@ -1693,8 +1795,12 @@ static inline uint32_t CP_COND_WRITE5_0_FUNCTION(enum cp_cond_function val)
return ((val) << CP_COND_WRITE5_0_FUNCTION__SHIFT) & CP_COND_WRITE5_0_FUNCTION__MASK;
}
#define CP_COND_WRITE5_0_SIGNED_COMPARE 0x00000008
-#define CP_COND_WRITE5_0_POLL_MEMORY 0x00000010
-#define CP_COND_WRITE5_0_POLL_SCRATCH 0x00000020
+#define CP_COND_WRITE5_0_POLL__MASK 0x00000030
+#define CP_COND_WRITE5_0_POLL__SHIFT 4
+static inline uint32_t CP_COND_WRITE5_0_POLL(enum poll_memory_type val)
+{
+ return ((val) << CP_COND_WRITE5_0_POLL__SHIFT) & CP_COND_WRITE5_0_POLL__MASK;
+}
#define CP_COND_WRITE5_0_WRITE_MEMORY 0x00000100
#define REG_CP_COND_WRITE5_1 0x00000001
@@ -1793,8 +1899,12 @@ static inline uint32_t CP_WAIT_REG_MEM_0_FUNCTION(enum cp_cond_function val)
return ((val) << CP_WAIT_REG_MEM_0_FUNCTION__SHIFT) & CP_WAIT_REG_MEM_0_FUNCTION__MASK;
}
#define CP_WAIT_REG_MEM_0_SIGNED_COMPARE 0x00000008
-#define CP_WAIT_REG_MEM_0_POLL_MEMORY 0x00000010
-#define CP_WAIT_REG_MEM_0_POLL_SCRATCH 0x00000020
+#define CP_WAIT_REG_MEM_0_POLL__MASK 0x00000030
+#define CP_WAIT_REG_MEM_0_POLL__SHIFT 4
+static inline uint32_t CP_WAIT_REG_MEM_0_POLL(enum poll_memory_type val)
+{
+ return ((val) << CP_WAIT_REG_MEM_0_POLL__SHIFT) & CP_WAIT_REG_MEM_0_POLL__MASK;
+}
#define CP_WAIT_REG_MEM_0_WRITE_MEMORY 0x00000100
#define REG_CP_WAIT_REG_MEM_1 0x00000001
@@ -1960,14 +2070,14 @@ static inline uint32_t CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI(uint32_t val)
#define REG_CP_COMPUTE_CHECKPOINT_2 0x00000002
#define REG_CP_COMPUTE_CHECKPOINT_3 0x00000003
-#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK 0xffffffff
-#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT 0
-static inline uint32_t CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN(uint32_t val)
-{
- return ((val) << CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK;
-}
#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004
+#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN(uint32_t val)
+{
+ return ((val) << CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK;
+}
#define REG_CP_COMPUTE_CHECKPOINT_5 0x00000005
#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK 0xffffffff
@@ -2033,6 +2143,90 @@ static inline uint32_t CP_EVENT_WRITE_2_ADDR_0_HI(uint32_t val)
#define REG_CP_EVENT_WRITE_3 0x00000003
+#define REG_CP_EVENT_WRITE7_0 0x00000000
+#define CP_EVENT_WRITE7_0_EVENT__MASK 0x000000ff
+#define CP_EVENT_WRITE7_0_EVENT__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE7_0_EVENT(enum vgt_event_type val)
+{
+ return ((val) << CP_EVENT_WRITE7_0_EVENT__SHIFT) & CP_EVENT_WRITE7_0_EVENT__MASK;
+}
+#define CP_EVENT_WRITE7_0_WRITE_SAMPLE_COUNT 0x00001000
+#define CP_EVENT_WRITE7_0_SAMPLE_COUNT_END_OFFSET 0x00002000
+#define CP_EVENT_WRITE7_0_WRITE_SAMPLE_COUNT_DIFF 0x00004000
+#define CP_EVENT_WRITE7_0_INC_BV_COUNT 0x00010000
+#define CP_EVENT_WRITE7_0_INC_BR_COUNT 0x00020000
+#define CP_EVENT_WRITE7_0_CLEAR_RENDER_RESOURCE 0x00040000
+#define CP_EVENT_WRITE7_0_CLEAR_LRZ_RESOURCE 0x00080000
+#define CP_EVENT_WRITE7_0_WRITE_SRC__MASK 0x00700000
+#define CP_EVENT_WRITE7_0_WRITE_SRC__SHIFT 20
+static inline uint32_t CP_EVENT_WRITE7_0_WRITE_SRC(enum event_write_src val)
+{
+ return ((val) << CP_EVENT_WRITE7_0_WRITE_SRC__SHIFT) & CP_EVENT_WRITE7_0_WRITE_SRC__MASK;
+}
+#define CP_EVENT_WRITE7_0_WRITE_DST__MASK 0x01000000
+#define CP_EVENT_WRITE7_0_WRITE_DST__SHIFT 24
+static inline uint32_t CP_EVENT_WRITE7_0_WRITE_DST(enum event_write_dst val)
+{
+ return ((val) << CP_EVENT_WRITE7_0_WRITE_DST__SHIFT) & CP_EVENT_WRITE7_0_WRITE_DST__MASK;
+}
+#define CP_EVENT_WRITE7_0_WRITE_ENABLED 0x08000000
+
+#define REG_EV_DST_RAM_CP_EVENT_WRITE7_1 0x00000001
+#define EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO__MASK 0xffffffff
+#define EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO__SHIFT) & EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO__MASK;
+}
+
+#define REG_EV_DST_RAM_CP_EVENT_WRITE7_2 0x00000002
+#define EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI__MASK 0xffffffff
+#define EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI__SHIFT) & EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI__MASK;
+}
+
+#define REG_EV_DST_RAM_CP_EVENT_WRITE7_3 0x00000003
+#define EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK 0xffffffff
+#define EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT 0
+static inline uint32_t EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0(uint32_t val)
+{
+ return ((val) << EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT) & EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK;
+}
+
+#define REG_EV_DST_RAM_CP_EVENT_WRITE7_4 0x00000004
+#define EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK 0xffffffff
+#define EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT 0
+static inline uint32_t EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1(uint32_t val)
+{
+ return ((val) << EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT) & EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK;
+}
+
+#define REG_EV_DST_ONCHIP_CP_EVENT_WRITE7_1 0x00000001
+#define EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__MASK 0xffffffff
+#define EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__SHIFT 0
+static inline uint32_t EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0(uint32_t val)
+{
+ return ((val) << EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__SHIFT) & EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__MASK;
+}
+
+#define REG_EV_DST_ONCHIP_CP_EVENT_WRITE7_3 0x00000003
+#define EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK 0xffffffff
+#define EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT 0
+static inline uint32_t EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0(uint32_t val)
+{
+ return ((val) << EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT) & EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK;
+}
+
+#define REG_EV_DST_ONCHIP_CP_EVENT_WRITE7_4 0x00000004
+#define EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK 0xffffffff
+#define EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT 0
+static inline uint32_t EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1(uint32_t val)
+{
+ return ((val) << EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT) & EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK;
+}
+
#define REG_CP_BLIT_0 0x00000000
#define CP_BLIT_0_OP__MASK 0x0000000f
#define CP_BLIT_0_OP__SHIFT 0
@@ -2125,7 +2319,6 @@ static inline uint32_t CP_EXEC_CS_3_NGROUPS_Z(uint32_t val)
#define REG_A4XX_CP_EXEC_CS_INDIRECT_0 0x00000000
-
#define REG_A4XX_CP_EXEC_CS_INDIRECT_1 0x00000001
#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK 0xffffffff
#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT 0
@@ -2154,7 +2347,6 @@ static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ(uint32_t val)
return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK;
}
-
#define REG_A5XX_CP_EXEC_CS_INDIRECT_1 0x00000001
#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK 0xffffffff
#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT 0
@@ -2205,10 +2397,10 @@ static inline uint32_t A6XX_CP_SET_MARKER_0_MARKER(enum a6xx_marker val)
return ((val) << A6XX_CP_SET_MARKER_0_MARKER__SHIFT) & A6XX_CP_SET_MARKER_0_MARKER__MASK;
}
-static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define REG_A6XX_CP_SET_PSEUDO_REG_(i0) (0x00000000 + 0x3*(i0))
static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
-#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK 0x00000007
+#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK 0x000007ff
#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT 0
static inline uint32_t A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val)
{
@@ -2238,6 +2430,18 @@ static inline uint32_t A6XX_CP_REG_TEST_0_REG(uint32_t val)
{
return ((val) << A6XX_CP_REG_TEST_0_REG__SHIFT) & A6XX_CP_REG_TEST_0_REG__MASK;
}
+#define A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__MASK 0x0003ffff
+#define A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__SHIFT 0
+static inline uint32_t A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET(uint32_t val)
+{
+ return ((val) << A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__SHIFT) & A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__MASK;
+}
+#define A6XX_CP_REG_TEST_0_SOURCE__MASK 0x00040000
+#define A6XX_CP_REG_TEST_0_SOURCE__SHIFT 18
+static inline uint32_t A6XX_CP_REG_TEST_0_SOURCE(enum source_type val)
+{
+ return ((val) << A6XX_CP_REG_TEST_0_SOURCE__SHIFT) & A6XX_CP_REG_TEST_0_SOURCE__MASK;
+}
#define A6XX_CP_REG_TEST_0_BIT__MASK 0x01f00000
#define A6XX_CP_REG_TEST_0_BIT__SHIFT 20
static inline uint32_t A6XX_CP_REG_TEST_0_BIT(uint32_t val)
@@ -2270,9 +2474,14 @@ static inline uint32_t CP_COND_REG_EXEC_0_PRED_BIT(uint32_t val)
{
return ((val) << CP_COND_REG_EXEC_0_PRED_BIT__SHIFT) & CP_COND_REG_EXEC_0_PRED_BIT__MASK;
}
+#define CP_COND_REG_EXEC_0_SKIP_WAIT_FOR_ME 0x00800000
+#define CP_COND_REG_EXEC_0_ONCHIP_MEM 0x01000000
#define CP_COND_REG_EXEC_0_BINNING 0x02000000
#define CP_COND_REG_EXEC_0_GMEM 0x04000000
#define CP_COND_REG_EXEC_0_SYSMEM 0x08000000
+#define CP_COND_REG_EXEC_0_BV 0x02000000
+#define CP_COND_REG_EXEC_0_BR 0x04000000
+#define CP_COND_REG_EXEC_0_LPAC 0x08000000
#define CP_COND_REG_EXEC_0_MODE__MASK 0xf0000000
#define CP_COND_REG_EXEC_0_MODE__SHIFT 28
static inline uint32_t CP_COND_REG_EXEC_0_MODE(enum compare_mode val)
@@ -2280,12 +2489,53 @@ static inline uint32_t CP_COND_REG_EXEC_0_MODE(enum compare_mode val)
return ((val) << CP_COND_REG_EXEC_0_MODE__SHIFT) & CP_COND_REG_EXEC_0_MODE__MASK;
}
-#define REG_CP_COND_REG_EXEC_1 0x00000001
-#define CP_COND_REG_EXEC_1_DWORDS__MASK 0xffffffff
-#define CP_COND_REG_EXEC_1_DWORDS__SHIFT 0
-static inline uint32_t CP_COND_REG_EXEC_1_DWORDS(uint32_t val)
+#define REG_PRED_TEST_CP_COND_REG_EXEC_1 0x00000001
+#define PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__MASK 0x00ffffff
+#define PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__SHIFT 0
+static inline uint32_t PRED_TEST_CP_COND_REG_EXEC_1_DWORDS(uint32_t val)
+{
+ return ((val) << PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__SHIFT) & PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__MASK;
+}
+
+#define REG_REG_COMPARE_CP_COND_REG_EXEC_1 0x00000001
+#define REG_COMPARE_CP_COND_REG_EXEC_1_REG1__MASK 0x0003ffff
+#define REG_COMPARE_CP_COND_REG_EXEC_1_REG1__SHIFT 0
+static inline uint32_t REG_COMPARE_CP_COND_REG_EXEC_1_REG1(uint32_t val)
{
- return ((val) << CP_COND_REG_EXEC_1_DWORDS__SHIFT) & CP_COND_REG_EXEC_1_DWORDS__MASK;
+ return ((val) << REG_COMPARE_CP_COND_REG_EXEC_1_REG1__SHIFT) & REG_COMPARE_CP_COND_REG_EXEC_1_REG1__MASK;
+}
+#define REG_COMPARE_CP_COND_REG_EXEC_1_ONCHIP_MEM 0x01000000
+
+#define REG_RENDER_MODE_CP_COND_REG_EXEC_1 0x00000001
+#define RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK 0x00ffffff
+#define RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT 0
+static inline uint32_t RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS(uint32_t val)
+{
+ return ((val) << RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT) & RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK;
+}
+
+#define REG_REG_COMPARE_IMM_CP_COND_REG_EXEC_1 0x00000001
+#define REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__MASK 0xffffffff
+#define REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__SHIFT 0
+static inline uint32_t REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM(uint32_t val)
+{
+ return ((val) << REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__SHIFT) & REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__MASK;
+}
+
+#define REG_THREAD_MODE_CP_COND_REG_EXEC_1 0x00000001
+#define THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK 0x00ffffff
+#define THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT 0
+static inline uint32_t THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS(uint32_t val)
+{
+ return ((val) << THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT) & THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK;
+}
+
+#define REG_CP_COND_REG_EXEC_2 0x00000002
+#define CP_COND_REG_EXEC_2_DWORDS__MASK 0x00ffffff
+#define CP_COND_REG_EXEC_2_DWORDS__SHIFT 0
+static inline uint32_t CP_COND_REG_EXEC_2_DWORDS(uint32_t val)
+{
+ return ((val) << CP_COND_REG_EXEC_2_DWORDS__SHIFT) & CP_COND_REG_EXEC_2_DWORDS__MASK;
}
#define REG_CP_COND_EXEC_0 0x00000000
@@ -2425,10 +2675,88 @@ static inline uint32_t CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(uint32_t val)
#define REG_CP_START_BIN_BODY_DWORDS 0x00000004
#define REG_CP_WAIT_TIMESTAMP_0 0x00000000
+#define CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__MASK 0x00000003
+#define CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__SHIFT 0
+static inline uint32_t CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC(enum ts_wait_value_src val)
+{
+ return ((val) << CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__SHIFT) & CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__MASK;
+}
+#define CP_WAIT_TIMESTAMP_0_WAIT_DST__MASK 0x00000010
+#define CP_WAIT_TIMESTAMP_0_WAIT_DST__SHIFT 4
+static inline uint32_t CP_WAIT_TIMESTAMP_0_WAIT_DST(enum ts_wait_type val)
+{
+ return ((val) << CP_WAIT_TIMESTAMP_0_WAIT_DST__SHIFT) & CP_WAIT_TIMESTAMP_0_WAIT_DST__MASK;
+}
+
+#define REG_TS_WAIT_RAM_CP_WAIT_TIMESTAMP_ADDR 0x00000001
-#define REG_CP_WAIT_TIMESTAMP_ADDR 0x00000001
+#define REG_TS_WAIT_ONCHIP_CP_WAIT_TIMESTAMP_ONCHIP_ADDR_0 0x00000001
-#define REG_CP_WAIT_TIMESTAMP_TIMESTAMP 0x00000003
+#define REG_CP_WAIT_TIMESTAMP_SRC_0 0x00000003
+
+#define REG_CP_WAIT_TIMESTAMP_SRC_1 0x00000004
+
+#define REG_CP_BV_BR_COUNT_OPS_0 0x00000000
+#define CP_BV_BR_COUNT_OPS_0_OP__MASK 0x0000000f
+#define CP_BV_BR_COUNT_OPS_0_OP__SHIFT 0
+static inline uint32_t CP_BV_BR_COUNT_OPS_0_OP(enum pipe_count_op val)
+{
+ return ((val) << CP_BV_BR_COUNT_OPS_0_OP__SHIFT) & CP_BV_BR_COUNT_OPS_0_OP__MASK;
+}
+
+#define REG_CP_BV_BR_COUNT_OPS_1 0x00000001
+#define CP_BV_BR_COUNT_OPS_1_BR_OFFSET__MASK 0x0000ffff
+#define CP_BV_BR_COUNT_OPS_1_BR_OFFSET__SHIFT 0
+static inline uint32_t CP_BV_BR_COUNT_OPS_1_BR_OFFSET(uint32_t val)
+{
+ return ((val) << CP_BV_BR_COUNT_OPS_1_BR_OFFSET__SHIFT) & CP_BV_BR_COUNT_OPS_1_BR_OFFSET__MASK;
+}
+
+#define REG_CP_MODIFY_TIMESTAMP_0 0x00000000
+#define CP_MODIFY_TIMESTAMP_0_ADD__MASK 0x000000ff
+#define CP_MODIFY_TIMESTAMP_0_ADD__SHIFT 0
+static inline uint32_t CP_MODIFY_TIMESTAMP_0_ADD(uint32_t val)
+{
+ return ((val) << CP_MODIFY_TIMESTAMP_0_ADD__SHIFT) & CP_MODIFY_TIMESTAMP_0_ADD__MASK;
+}
+#define CP_MODIFY_TIMESTAMP_0_OP__MASK 0xf0000000
+#define CP_MODIFY_TIMESTAMP_0_OP__SHIFT 28
+static inline uint32_t CP_MODIFY_TIMESTAMP_0_OP(enum timestamp_op val)
+{
+ return ((val) << CP_MODIFY_TIMESTAMP_0_OP__SHIFT) & CP_MODIFY_TIMESTAMP_0_OP__MASK;
+}
+
+#define REG_CP_MEM_TO_SCRATCH_MEM_0 0x00000000
+#define CP_MEM_TO_SCRATCH_MEM_0_CNT__MASK 0x0000003f
+#define CP_MEM_TO_SCRATCH_MEM_0_CNT__SHIFT 0
+static inline uint32_t CP_MEM_TO_SCRATCH_MEM_0_CNT(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_SCRATCH_MEM_0_CNT__SHIFT) & CP_MEM_TO_SCRATCH_MEM_0_CNT__MASK;
+}
+
+#define REG_CP_MEM_TO_SCRATCH_MEM_1 0x00000001
+#define CP_MEM_TO_SCRATCH_MEM_1_OFFSET__MASK 0x0000003f
+#define CP_MEM_TO_SCRATCH_MEM_1_OFFSET__SHIFT 0
+static inline uint32_t CP_MEM_TO_SCRATCH_MEM_1_OFFSET(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_SCRATCH_MEM_1_OFFSET__SHIFT) & CP_MEM_TO_SCRATCH_MEM_1_OFFSET__MASK;
+}
+
+#define REG_CP_MEM_TO_SCRATCH_MEM_2 0x00000002
+#define CP_MEM_TO_SCRATCH_MEM_2_SRC__MASK 0xffffffff
+#define CP_MEM_TO_SCRATCH_MEM_2_SRC__SHIFT 0
+static inline uint32_t CP_MEM_TO_SCRATCH_MEM_2_SRC(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_SCRATCH_MEM_2_SRC__SHIFT) & CP_MEM_TO_SCRATCH_MEM_2_SRC__MASK;
+}
+
+#define REG_CP_MEM_TO_SCRATCH_MEM_3 0x00000003
+#define CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__MASK 0xffffffff
+#define CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__SHIFT 0
+static inline uint32_t CP_MEM_TO_SCRATCH_MEM_3_SRC_HI(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__SHIFT) & CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__MASK;
+}
#define REG_CP_THREAD_CONTROL_0 0x00000000
#define CP_THREAD_CONTROL_0_THREAD__MASK 0x00000003
@@ -2440,5 +2768,36 @@ static inline uint32_t CP_THREAD_CONTROL_0_THREAD(enum cp_thread val)
#define CP_THREAD_CONTROL_0_CONCURRENT_BIN_DISABLE 0x08000000
#define CP_THREAD_CONTROL_0_SYNC_THREADS 0x80000000
+#define REG_CP_FIXED_STRIDE_DRAW_TABLE_IB_BASE 0x00000000
+
+#define REG_CP_FIXED_STRIDE_DRAW_TABLE_2 0x00000002
+#define CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__MASK 0x00000fff
+#define CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__SHIFT 0
+static inline uint32_t CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE(uint32_t val)
+{
+ return ((val) << CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__SHIFT) & CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__MASK;
+}
+#define CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__MASK 0xfff00000
+#define CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__SHIFT 20
+static inline uint32_t CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE(uint32_t val)
+{
+ return ((val) << CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__SHIFT) & CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__MASK;
+}
+
+#define REG_CP_FIXED_STRIDE_DRAW_TABLE_3 0x00000003
+#define CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__MASK 0xffffffff
+#define CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__SHIFT 0
+static inline uint32_t CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT(uint32_t val)
+{
+ return ((val) << CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__SHIFT) & CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__MASK;
+}
+
+#define REG_CP_RESET_CONTEXT_STATE_0 0x00000000
+#define CP_RESET_CONTEXT_STATE_0_CLEAR_ON_CHIP_TS 0x00000001
+#define CP_RESET_CONTEXT_STATE_0_CLEAR_RESOURCE_TABLE 0x00000002
+#define CP_RESET_CONTEXT_STATE_0_CLEAR_GLOBAL_LOCAL_TS 0x00000004
+
+#ifdef __cplusplus
+#endif
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
new file mode 100644
index 000000000000..424815e7fb7d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023. Linaro Inc. All rights reserved.
+ */
+
+#ifndef _DPU_3_2_SDM660_H
+#define _DPU_3_2_SDM660_H
+
+static const struct dpu_caps sdm660_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x7,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg sdm660_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x458,
+ .features = BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ },
+};
+
+static const struct dpu_ctl_cfg sdm660_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ },
+};
+
+static const struct dpu_sspp_cfg sdm660_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1ac,
+ .features = DMA_MSM8998_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1ac,
+ .features = DMA_MSM8998_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1ac,
+ .features = DMA_CURSOR_MSM8998_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ },
+};
+
+static const struct dpu_lm_cfg sdm660_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ },
+};
+
+static const struct dpu_pingpong_cfg sdm660_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
+ },
+};
+
+static const struct dpu_dsc_cfg sdm660_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ },
+};
+
+static const struct dpu_dspp_cfg sdm660_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
+};
+
+static const struct dpu_intf_cfg sdm660_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg sdm660_perf_data = {
+ .max_bw_low = 6600000,
+ .max_bw_high = 6600000,
+ .min_core_ib = 3100000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 25,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 200,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sdm660_mdss_ver = {
+ .core_major_ver = 3,
+ .core_minor_ver = 2,
+};
+
+const struct dpu_mdss_cfg dpu_sdm660_cfg = {
+ .mdss_ver = &sdm660_mdss_ver,
+ .caps = &sdm660_dpu_caps,
+ .mdp = &sdm660_mdp,
+ .ctl_count = ARRAY_SIZE(sdm660_ctl),
+ .ctl = sdm660_ctl,
+ .sspp_count = ARRAY_SIZE(sdm660_sspp),
+ .sspp = sdm660_sspp,
+ .mixer_count = ARRAY_SIZE(sdm660_lm),
+ .mixer = sdm660_lm,
+ .dspp_count = ARRAY_SIZE(sdm660_dspp),
+ .dspp = sdm660_dspp,
+ .pingpong_count = ARRAY_SIZE(sdm660_pp),
+ .pingpong = sdm660_pp,
+ .dsc_count = ARRAY_SIZE(sdm660_dsc),
+ .dsc = sdm660_dsc,
+ .intf_count = ARRAY_SIZE(sdm660_intf),
+ .intf = sdm660_intf,
+ .vbif_count = ARRAY_SIZE(msm8998_vbif),
+ .vbif = msm8998_vbif,
+ .perf = &sdm660_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
new file mode 100644
index 000000000000..df01227fc364
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023. Linaro Inc. All rights reserved.
+ */
+
+#ifndef _DPU_3_3_SDM630_H
+#define _DPU_3_3_SDM630_H
+
+static const struct dpu_caps sdm630_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
+ .max_mixer_blendstages = 0x7,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = DEFAULT_DPU_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg sdm630_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x458,
+ .features = BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ },
+};
+
+static const struct dpu_ctl_cfg sdm630_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ },
+};
+
+static const struct dpu_sspp_cfg sdm630_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1ac,
+ .features = DMA_MSM8998_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1ac,
+ .features = DMA_MSM8998_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1ac,
+ .features = DMA_CURSOR_MSM8998_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ },
+};
+
+static const struct dpu_lm_cfg sdm630_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .pingpong = PINGPONG_2,
+ },
+};
+
+static const struct dpu_pingpong_cfg sdm630_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
+ },
+};
+
+static const struct dpu_dspp_cfg sdm630_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
+};
+
+static const struct dpu_intf_cfg sdm630_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg sdm630_perf_data = {
+ .max_bw_low = 4100000,
+ .max_bw_high = 4100000,
+ .min_core_ib = 3200000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 25,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 200,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sdm630_mdss_ver = {
+ .core_major_ver = 3,
+ .core_minor_ver = 3,
+};
+
+const struct dpu_mdss_cfg dpu_sdm630_cfg = {
+ .mdss_ver = &sdm630_mdss_ver,
+ .caps = &sdm630_dpu_caps,
+ .mdp = &sdm630_mdp,
+ .ctl_count = ARRAY_SIZE(sdm630_ctl),
+ .ctl = sdm630_ctl,
+ .sspp_count = ARRAY_SIZE(sdm630_sspp),
+ .sspp = sdm630_sspp,
+ .mixer_count = ARRAY_SIZE(sdm630_lm),
+ .mixer = sdm630_lm,
+ .dspp_count = ARRAY_SIZE(sdm630_dspp),
+ .dspp = sdm630_dspp,
+ .pingpong_count = ARRAY_SIZE(sdm630_pp),
+ .pingpong = sdm630_pp,
+ .intf_count = ARRAY_SIZE(sdm630_intf),
+ .intf = sdm630_intf,
+ .vbif_count = ARRAY_SIZE(msm8998_vbif),
+ .vbif = msm8998_vbif,
+ .perf = &sdm630_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
new file mode 100644
index 000000000000..9a9f7092c526
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -0,0 +1,449 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DPU_9_2_X1E80100_H
+#define _DPU_9_2_X1E80100_H
+
+static const struct dpu_caps x1e80100_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg x1e80100_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
+static const struct dpu_ctl_cfg x1e80100_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x290,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x290,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg x1e80100_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
+};
+
+static const struct dpu_lm_cfg x1e80100_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg x1e80100_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg x1e80100_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x66000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x66400, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ },
+};
+
+static const struct dpu_merge_3d_cfg x1e80100_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x66700, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg x1e80100_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg x1e80100_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_intf_cfg x1e80100_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ }, {
+ .name = "intf_4", .id = INTF_4,
+ .base = 0x38000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_2,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
+ }, {
+ .name = "intf_5", .id = INTF_5,
+ .base = 0x39000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_3,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
+ },
+};
+
+static const struct dpu_perf_cfg x1e80100_perf_data = {
+ .max_bw_low = 13600000,
+ .max_bw_high = 18200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version x1e80100_mdss_ver = {
+ .core_major_ver = 9,
+ .core_minor_ver = 2,
+};
+
+const struct dpu_mdss_cfg dpu_x1e80100_cfg = {
+ .mdss_ver = &x1e80100_mdss_ver,
+ .caps = &x1e80100_dpu_caps,
+ .mdp = &x1e80100_mdp,
+ .ctl_count = ARRAY_SIZE(x1e80100_ctl),
+ .ctl = x1e80100_ctl,
+ .sspp_count = ARRAY_SIZE(x1e80100_sspp),
+ .sspp = x1e80100_sspp,
+ .mixer_count = ARRAY_SIZE(x1e80100_lm),
+ .mixer = x1e80100_lm,
+ .dspp_count = ARRAY_SIZE(x1e80100_dspp),
+ .dspp = x1e80100_dspp,
+ .pingpong_count = ARRAY_SIZE(x1e80100_pp),
+ .pingpong = x1e80100_pp,
+ .dsc_count = ARRAY_SIZE(x1e80100_dsc),
+ .dsc = x1e80100_dsc,
+ .merge_3d_count = ARRAY_SIZE(x1e80100_merge_3d),
+ .merge_3d = x1e80100_merge_3d,
+ .wb_count = ARRAY_SIZE(x1e80100_wb),
+ .wb = x1e80100_wb,
+ .intf_count = ARRAY_SIZE(x1e80100_intf),
+ .intf = x1e80100_intf,
+ .vbif_count = ARRAY_SIZE(sm8550_vbif),
+ .vbif = sm8550_vbif,
+ .perf = &x1e80100_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 6a4b489d44e5..9a14d2232e4a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -126,6 +126,8 @@ enum dpu_enc_rc_states {
* @base: drm_encoder base class for registration with DRM
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enabled: True if the encoder is active, protected by enc_lock
+ * @commit_done_timedout: True if there has been a timeout on commit after
+ * enabling the encoder.
* @num_phys_encs: Actual number of physical encoders contained.
* @phys_encs: Container of physical encoders managed.
* @cur_master: Pointer to the current master in this mode. Optimization
@@ -172,6 +174,7 @@ struct dpu_encoder_virt {
spinlock_t enc_spinlock;
bool enabled;
+ bool commit_done_timedout;
unsigned int num_phys_encs;
struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
@@ -218,12 +221,66 @@ static u32 dither_matrix[DITHER_MATRIX_SZ] = {
15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
};
+u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc)
+{
+ struct drm_encoder *drm_enc;
+ struct dpu_encoder_virt *dpu_enc;
+ struct drm_display_info *info;
+ struct drm_display_mode *mode;
+
+ drm_enc = phys_enc->parent;
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ info = &dpu_enc->connector->display_info;
+ mode = &phys_enc->cached_mode;
+
+ if (drm_mode_is_420_only(info, mode))
+ return DRM_FORMAT_YUV420;
+
+ return DRM_FORMAT_RGB888;
+}
+
+bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc)
+{
+ struct drm_encoder *drm_enc;
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_display_info *disp_info;
+ struct msm_drm_private *priv;
+ struct drm_display_mode *mode;
+
+ drm_enc = phys_enc->parent;
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ disp_info = &dpu_enc->disp_info;
+ priv = drm_enc->dev->dev_private;
+ mode = &phys_enc->cached_mode;
+
+ return phys_enc->hw_intf->cap->type == INTF_DP &&
+ msm_dp_needs_periph_flush(priv->dp[disp_info->h_tile_instance[0]], mode);
+}
bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
{
+ const struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv = drm_enc->dev->dev_private;
+ const struct msm_display_info *disp_info;
+ int index;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ disp_info = &dpu_enc->disp_info;
+ index = disp_info->h_tile_instance[0];
+
+ if (disp_info->intf_type == INTF_DP)
+ return msm_dp_wide_bus_available(priv->dp[index]);
+ else if (disp_info->intf_type == INTF_DSI)
+ return msm_dsi_wide_bus_enabled(priv->dsi[index]);
+
+ return false;
+}
+
+bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
+{
const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
- return dpu_enc->wide_bus_en;
+ return dpu_enc->dsc ? true : false;
}
int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
@@ -581,10 +638,10 @@ static int dpu_encoder_virt_atomic_check(
struct dpu_kms *dpu_kms;
struct drm_display_mode *adj_mode;
struct msm_display_topology topology;
+ struct msm_display_info *disp_info;
struct dpu_global_state *global_state;
struct drm_framebuffer *fb;
struct drm_dsc_config *dsc;
- int i = 0;
int ret = 0;
if (!drm_enc || !crtc_state || !conn_state) {
@@ -597,6 +654,7 @@ static int dpu_encoder_virt_atomic_check(
DPU_DEBUG_ENC(dpu_enc, "\n");
priv = drm_enc->dev->dev_private;
+ disp_info = &dpu_enc->disp_info;
dpu_kms = to_dpu_kms(priv->kms);
adj_mode = &crtc_state->adjusted_mode;
global_state = dpu_kms_get_global_state(crtc_state->state);
@@ -605,40 +663,29 @@ static int dpu_encoder_virt_atomic_check(
trace_dpu_enc_atomic_check(DRMID(drm_enc));
- /* perform atomic check on the first physical encoder (master) */
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (phys->ops.atomic_check)
- ret = phys->ops.atomic_check(phys, crtc_state,
- conn_state);
- if (ret) {
- DPU_ERROR_ENC(dpu_enc,
- "mode unsupported, phys idx %d\n", i);
- return ret;
- }
- }
-
dsc = dpu_encoder_get_dsc_config(drm_enc);
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
/*
- * Use CDM only for writeback at the moment as other interfaces cannot handle it.
- * if writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
+ * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
+ * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
* earlier.
*/
- if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
+ if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) {
fb = conn_state->writeback_job->fb;
if (fb && DPU_FORMAT_IS_YUV(to_dpu_format(msm_framebuffer_format(fb))))
topology.needs_cdm = true;
- if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
- crtc_state->mode_changed = true;
- else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
- crtc_state->mode_changed = true;
+ } else if (disp_info->intf_type == INTF_DP) {
+ if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
+ topology.needs_cdm = true;
}
+ if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
+ crtc_state->mode_changed = true;
+ else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
+ crtc_state->mode_changed = true;
/*
* Release and Allocate resources on every modeset
* Dont allocate when active is false.
@@ -714,7 +761,7 @@ static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
}
}
-static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
+static void _dpu_encoder_irq_enable(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
int i;
@@ -726,18 +773,35 @@ static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
dpu_enc = to_dpu_encoder_virt(drm_enc);
- DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys->ops.irq_control)
- phys->ops.irq_control(phys, enable);
+ phys->ops.irq_enable(phys);
+ }
+}
+
+static void _dpu_encoder_irq_disable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
}
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ phys->ops.irq_disable(phys);
+ }
}
-static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
- bool enable)
+static void _dpu_encoder_resource_enable(struct drm_encoder *drm_enc)
{
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
@@ -747,28 +811,42 @@ static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
- trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
+ trace_dpu_enc_rc_enable(DRMID(drm_enc));
if (!dpu_enc->cur_master) {
DPU_ERROR("encoder master not set\n");
return;
}
- if (enable) {
- /* enable DPU core clks */
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ /* enable DPU core clks */
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
- /* enable all the irq */
- _dpu_encoder_irq_control(drm_enc, true);
+ /* enable all the irq */
+ _dpu_encoder_irq_enable(drm_enc);
+}
- } else {
- /* disable all the irq */
- _dpu_encoder_irq_control(drm_enc, false);
+static void _dpu_encoder_resource_disable(struct drm_encoder *drm_enc)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_encoder_virt *dpu_enc;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
- /* disable DPU core clks */
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ trace_dpu_enc_rc_disable(DRMID(drm_enc));
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("encoder master not set\n");
+ return;
}
+ /* disable all the irq */
+ _dpu_encoder_irq_disable(drm_enc);
+
+ /* disable DPU core clks */
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
@@ -824,9 +902,9 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
}
if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
- _dpu_encoder_irq_control(drm_enc, true);
+ _dpu_encoder_irq_enable(drm_enc);
else
- _dpu_encoder_resource_control_helper(drm_enc, true);
+ _dpu_encoder_resource_enable(drm_enc);
dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
@@ -879,7 +957,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
if (is_vid_mode &&
dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
- _dpu_encoder_irq_control(drm_enc, true);
+ _dpu_encoder_irq_enable(drm_enc);
}
/* skip if is already OFF or IDLE, resources are off already */
else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
@@ -921,7 +999,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
* and in IDLE state the resources are already disabled
*/
if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
- _dpu_encoder_resource_control_helper(drm_enc, false);
+ _dpu_encoder_resource_disable(drm_enc);
dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
@@ -954,9 +1032,9 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
}
if (is_vid_mode)
- _dpu_encoder_irq_control(drm_enc, false);
+ _dpu_encoder_irq_disable(drm_enc);
else
- _dpu_encoder_resource_control_helper(drm_enc, false);
+ _dpu_encoder_resource_disable(drm_enc);
dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
@@ -1079,7 +1157,8 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
dpu_enc->dsc_mask = dsc_mask;
- if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
+ if ((dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) ||
+ dpu_enc->disp_info.intf_type == INTF_DP) {
struct dpu_hw_blk *hw_cdm = NULL;
dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
@@ -1121,8 +1200,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
phys->cached_mode = crtc_state->adjusted_mode;
- if (phys->ops.atomic_mode_set)
- phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
}
}
@@ -1188,26 +1265,20 @@ static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
struct dpu_encoder_virt *dpu_enc = NULL;
int ret = 0;
struct drm_display_mode *cur_mode = NULL;
- struct msm_drm_private *priv = drm_enc->dev->dev_private;
- struct msm_display_info *disp_info;
- int index;
dpu_enc = to_dpu_encoder_virt(drm_enc);
- disp_info = &dpu_enc->disp_info;
- index = disp_info->h_tile_instance[0];
-
dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
- if (disp_info->intf_type == INTF_DP)
- dpu_enc->wide_bus_en = msm_dp_wide_bus_available(priv->dp[index]);
- else if (disp_info->intf_type == INTF_DSI)
- dpu_enc->wide_bus_en = msm_dsi_wide_bus_enabled(priv->dsi[index]);
-
mutex_lock(&dpu_enc->enc_lock);
+
+ dpu_enc->commit_done_timedout = false;
+
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+ dpu_enc->wide_bus_en = dpu_encoder_is_widebus_enabled(drm_enc);
+
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
cur_mode->vdisplay);
@@ -1261,7 +1332,7 @@ static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
trace_dpu_enc_disable(DRMID(drm_enc));
/* wait for idle */
- dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+ dpu_encoder_wait_for_tx_complete(drm_enc);
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
@@ -1853,7 +1924,9 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
dsc_common_mode = 0;
pic_width = dsc->pic_width;
- dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
+ dsc_common_mode = DSC_MODE_SPLIT_PANEL;
+ if (dpu_encoder_use_dsc_merge(enc_master->parent))
+ dsc_common_mode |= DSC_MODE_MULTIPLEX;
if (enc_master->intf_mode == INTF_MODE_VIDEO)
dsc_common_mode |= DSC_MODE_VIDEO;
@@ -2110,6 +2183,84 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
ctl->ops.clear_pending_flush(ctl);
}
+void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
+ const struct dpu_format *dpu_fmt,
+ u32 output_type)
+{
+ struct dpu_hw_cdm *hw_cdm;
+ struct dpu_hw_cdm_cfg *cdm_cfg;
+ struct dpu_hw_pingpong *hw_pp;
+ int ret;
+
+ if (!phys_enc)
+ return;
+
+ cdm_cfg = &phys_enc->cdm_cfg;
+ hw_pp = phys_enc->hw_pp;
+ hw_cdm = phys_enc->hw_cdm;
+
+ if (!hw_cdm)
+ return;
+
+ if (!DPU_FORMAT_IS_YUV(dpu_fmt)) {
+ DPU_DEBUG("[enc:%d] cdm_disable fmt:%x\n", DRMID(phys_enc->parent),
+ dpu_fmt->base.pixel_format);
+ if (hw_cdm->ops.bind_pingpong_blk)
+ hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE);
+
+ return;
+ }
+
+ memset(cdm_cfg, 0, sizeof(struct dpu_hw_cdm_cfg));
+
+ cdm_cfg->output_width = phys_enc->cached_mode.hdisplay;
+ cdm_cfg->output_height = phys_enc->cached_mode.vdisplay;
+ cdm_cfg->output_fmt = dpu_fmt;
+ cdm_cfg->output_type = output_type;
+ cdm_cfg->output_bit_depth = DPU_FORMAT_IS_DX(dpu_fmt) ?
+ CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
+ cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l;
+
+ /* enable 10 bit logic */
+ switch (cdm_cfg->output_fmt->chroma_sample) {
+ case DPU_CHROMA_RGB:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case DPU_CHROMA_H2V1:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case DPU_CHROMA_420:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
+ break;
+ case DPU_CHROMA_H1V2:
+ default:
+ DPU_ERROR("[enc:%d] unsupported chroma sampling type\n",
+ DRMID(phys_enc->parent));
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ }
+
+ DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
+ DRMID(phys_enc->parent), cdm_cfg->output_width,
+ cdm_cfg->output_height, cdm_cfg->output_fmt->base.pixel_format,
+ cdm_cfg->output_type, cdm_cfg->output_bit_depth,
+ cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type);
+
+ if (hw_cdm->ops.enable) {
+ cdm_cfg->pp_id = hw_pp->idx;
+ ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
+ if (ret < 0) {
+ DPU_ERROR("[enc:%d] failed to enable CDM; ret:%d\n",
+ DRMID(phys_enc->parent), ret);
+ return;
+ }
+ }
+}
+
#ifdef CONFIG_DEBUG_FS
static int _dpu_encoder_status_show(struct seq_file *s, void *data)
{
@@ -2379,10 +2530,18 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
return &dpu_enc->base;
}
-int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
- enum msm_event_wait event)
+/**
+ * dpu_encoder_wait_for_commit_done() - Wait for encoder to flush pending state
+ * @drm_enc: encoder pointer
+ *
+ * Wait for hardware to have flushed the current pending changes to hardware at
+ * a vblank or CTL_START. Physical encoders will map this differently depending
+ * on the type: vid mode -> vsync_irq, cmd mode -> CTL_START.
+ *
+ * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
{
- int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
struct dpu_encoder_virt *dpu_enc = NULL;
int i, ret = 0;
@@ -2396,23 +2555,51 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- switch (event) {
- case MSM_ENC_COMMIT_DONE:
- fn_wait = phys->ops.wait_for_commit_done;
- break;
- case MSM_ENC_TX_COMPLETE:
- fn_wait = phys->ops.wait_for_tx_complete;
- break;
- default:
- DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
- event);
- return -EINVAL;
+ if (phys->ops.wait_for_commit_done) {
+ DPU_ATRACE_BEGIN("wait_for_commit_done");
+ ret = phys->ops.wait_for_commit_done(phys);
+ DPU_ATRACE_END("wait_for_commit_done");
+ if (ret == -ETIMEDOUT && !dpu_enc->commit_done_timedout) {
+ dpu_enc->commit_done_timedout = true;
+ msm_disp_snapshot_state(drm_enc->dev);
+ }
+ if (ret)
+ return ret;
}
+ }
+
+ return ret;
+}
+
+/**
+ * dpu_encoder_wait_for_tx_complete() - Wait for encoder to transfer pixels to panel
+ * @drm_enc: encoder pointer
+ *
+ * Wait for the hardware to transfer all the pixels to the panel. Physical
+ * encoders will map this differently depending on the type: vid mode -> vsync_irq,
+ * cmd mode -> pp_done.
+ *
+ * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (fn_wait) {
- DPU_ATRACE_BEGIN("wait_for_completion_event");
- ret = fn_wait(phys);
- DPU_ATRACE_END("wait_for_completion_event");
+ if (phys->ops.wait_for_tx_complete) {
+ DPU_ATRACE_BEGIN("wait_for_tx_complete");
+ ret = phys->ops.wait_for_tx_complete(phys);
+ DPU_ATRACE_END("wait_for_tx_complete");
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 4c05fd5e9ed1..76be77e30954 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -93,25 +93,9 @@ void dpu_encoder_kickoff(struct drm_encoder *encoder);
*/
int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time);
-/**
- * dpu_encoder_wait_for_event - Waits for encoder events
- * @encoder: encoder pointer
- * @event: event to wait for
- * MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending
- * frames to hardware at a vblank or ctl_start
- * Encoders will map this differently depending on the
- * panel type.
- * vid mode -> vsync_irq
- * cmd mode -> ctl_start
- * MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to
- * the panel. Encoders will map this differently
- * depending on the panel type.
- * vid mode -> vsync_irq
- * cmd mode -> pp_done
- * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
- */
-int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
- enum msm_event_wait event);
+int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder);
+
+int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_encoder);
/*
* dpu_encoder_get_intf_mode - get interface mode of the given encoder
@@ -156,9 +140,20 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc);
*/
int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
+/**
+ * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
/**
+ * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled
+ * for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc);
+
+/**
* dpu_encoder_get_crc_values_cnt - get number of physical encoders contained
* in virtual encoder that can collect CRC values
* @drm_enc: Pointer to previously created drm encoder structure
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 993f26343331..98d1b64a43e8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -69,11 +69,8 @@ struct dpu_encoder_phys;
* @is_master: Whether this phys_enc is the current master
* encoder. Can be switched at enable time. Based
* on split_role and current mode (CMD/VID).
- * @atomic_mode_set: DRM Call. Set a DRM mode.
- * This likely caches the mode, for use at enable.
* @enable: DRM Call. Enable a DRM mode.
* @disable: DRM Call. Disable mode.
- * @atomic_check: DRM Call. Atomic check new DRM state.
* @control_vblank_irq Register/Deregister for VBLANK IRQ
* @wait_for_commit_done: Wait for hardware to have flushed the
* current pending frames to hardware
@@ -85,7 +82,8 @@ struct dpu_encoder_phys;
* @handle_post_kickoff: Do any work necessary post-kickoff work
* @trigger_start: Process start event on physical encoder
* @needs_single_flush: Whether encoder slaves need to be flushed
- * @irq_control: Handler to enable/disable all the encoder IRQs
+ * @irq_enable: Handler to enable all the encoder IRQs
+ * @irq_disable: Handler to disable all the encoder IRQs
* @prepare_idle_pc: phys encoder can update the vsync_enable status
* on idle power collapse prepare
* @restore: Restore all the encoder configs.
@@ -95,14 +93,8 @@ struct dpu_encoder_phys;
struct dpu_encoder_phys_ops {
void (*prepare_commit)(struct dpu_encoder_phys *encoder);
bool (*is_master)(struct dpu_encoder_phys *encoder);
- void (*atomic_mode_set)(struct dpu_encoder_phys *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state);
void (*enable)(struct dpu_encoder_phys *encoder);
void (*disable)(struct dpu_encoder_phys *encoder);
- int (*atomic_check)(struct dpu_encoder_phys *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state);
int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
@@ -110,7 +102,8 @@ struct dpu_encoder_phys_ops {
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
- void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
+ void (*irq_enable)(struct dpu_encoder_phys *phys);
+ void (*irq_disable)(struct dpu_encoder_phys *phys);
void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
void (*restore)(struct dpu_encoder_phys *phys);
int (*get_line_count)(struct dpu_encoder_phys *phys);
@@ -154,6 +147,7 @@ enum dpu_intr_idx {
* @hw_wb: Hardware interface to the wb registers
* @hw_cdm: Hardware interface to the CDM registers
* @dpu_kms: Pointer to the dpu_kms top level
+ * @cdm_cfg: CDM block config needed to store WB/DP block's CDM configuration
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
* @vblank_ctl_lock: Vblank ctl mutex lock to protect vblank_refcount
* @enabled: Whether the encoder has enabled and running a mode
@@ -184,6 +178,7 @@ struct dpu_encoder_phys {
struct dpu_hw_wb *hw_wb;
struct dpu_hw_cdm *hw_cdm;
struct dpu_kms *dpu_kms;
+ struct dpu_hw_cdm_cfg cdm_cfg;
struct drm_display_mode cached_mode;
struct mutex vblank_ctl_lock;
enum dpu_enc_split_role split_role;
@@ -213,7 +208,6 @@ static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
* @wbirq_refcount: Reference count of writeback interrupt
* @wb_done_timeout_cnt: number of wb done irq timeout errors
* @wb_cfg: writeback block config to store fb related details
- * @cdm_cfg: cdm block config needed to store writeback block's CDM configuration
* @wb_conn: backpointer to writeback connector
* @wb_job: backpointer to current writeback job
* @dest: dpu buffer layout for current writeback output buffer
@@ -223,7 +217,6 @@ struct dpu_encoder_phys_wb {
atomic_t wbirq_refcount;
int wb_done_timeout_cnt;
struct dpu_hw_wb_cfg wb_cfg;
- struct dpu_hw_cdm_cfg cdm_cfg;
struct drm_writeback_connector *wb_conn;
struct drm_writeback_job *wb_job;
struct dpu_hw_fmt_layout dest;
@@ -342,6 +335,19 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc);
/**
+ * dpu_encoder_get_drm_fmt - return DRM fourcc format
+ * @phys_enc: Pointer to physical encoder structure
+ */
+u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc);
+
+/**
+ * dpu_encoder_needs_periph_flush - return true if physical encoder requires
+ * peripheral flush
+ * @phys_enc: Pointer to physical encoder structure
+ */
+bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc);
+
+/**
* dpu_encoder_helper_split_config - split display configuration helper function
* This helper function may be used by physical encoders to configure
* the split display related registers.
@@ -382,6 +388,15 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
/**
+ * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block
+ * @phys_enc: Pointer to physical encoder
+ * @output_type: HDMI/WB
+ */
+void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
+ const struct dpu_format *dpu_fmt,
+ u32 output_type);
+
+/**
* dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception
* @drm_enc: Pointer to drm encoder structure
* @phys_enc: Pointer to physical encoder
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index a301e2833177..fc1d5736d7fc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -142,23 +142,6 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg)
dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
}
-static void dpu_encoder_phys_cmd_atomic_mode_set(
- struct dpu_encoder_phys *phys_enc,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
-
- phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
-
- if (phys_enc->has_intf_te)
- phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
- else
- phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
-
- phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
-}
-
static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
struct dpu_encoder_phys *phys_enc)
{
@@ -291,40 +274,54 @@ end:
return ret;
}
-static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
- bool enable)
+static void dpu_encoder_phys_cmd_irq_enable(struct dpu_encoder_phys *phys_enc)
{
- trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
- phys_enc->hw_pp->idx - PINGPONG_0,
- enable, phys_enc->vblank_refcount);
+ trace_dpu_enc_phys_cmd_irq_enable(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->vblank_refcount);
- if (enable) {
- dpu_core_irq_register_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_PINGPONG],
- dpu_encoder_phys_cmd_pp_tx_done_irq,
- phys_enc);
+ phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
+ phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
+
+ if (phys_enc->has_intf_te)
+ phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
+ else
+ phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
+
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_PINGPONG],
+ dpu_encoder_phys_cmd_pp_tx_done_irq,
+ phys_enc);
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN],
+ dpu_encoder_phys_cmd_underrun_irq,
+ phys_enc);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
dpu_core_irq_register_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_UNDERRUN],
- dpu_encoder_phys_cmd_underrun_irq,
- phys_enc);
- dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
-
- if (dpu_encoder_phys_cmd_is_master(phys_enc))
- dpu_core_irq_register_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_CTL_START],
- dpu_encoder_phys_cmd_ctl_start_irq,
- phys_enc);
- } else {
- if (dpu_encoder_phys_cmd_is_master(phys_enc))
- dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_CTL_START]);
+ phys_enc->irq[INTR_IDX_CTL_START],
+ dpu_encoder_phys_cmd_ctl_start_irq,
+ phys_enc);
+}
+static void dpu_encoder_phys_cmd_irq_disable(struct dpu_encoder_phys *phys_enc)
+{
+ trace_dpu_enc_phys_cmd_irq_disable(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->vblank_refcount);
+
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_UNDERRUN]);
- dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
- dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_PINGPONG]);
- }
+ phys_enc->irq[INTR_IDX_CTL_START]);
+
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_UNDERRUN]);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_PINGPONG]);
+
+ phys_enc->irq[INTR_IDX_CTL_START] = 0;
+ phys_enc->irq[INTR_IDX_PINGPONG] = 0;
+ phys_enc->irq[INTR_IDX_RDPTR] = 0;
}
static void dpu_encoder_phys_cmd_tearcheck_config(
@@ -704,7 +701,6 @@ static void dpu_encoder_phys_cmd_init_ops(
struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_cmd_is_master;
- ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
ops->enable = dpu_encoder_phys_cmd_enable;
ops->disable = dpu_encoder_phys_cmd_disable;
ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
@@ -713,7 +709,8 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
- ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+ ops->irq_enable = dpu_encoder_phys_cmd_irq_enable;
+ ops->irq_disable = dpu_encoder_phys_cmd_irq_disable;
ops->restore = dpu_encoder_phys_cmd_enable_helper;
ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
@@ -742,6 +739,8 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_CMD;
+ phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
+
cmd_enc->stream_sel = 0;
if (!phys_enc->hw_intf) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index d0f56c5c4cce..d9e7dbf0499c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -102,6 +102,7 @@ static void drm_mode_to_intf_timing_params(
}
timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
+ timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
/*
* for DP, divide the horizonal parameters by 2 when
@@ -235,7 +236,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
struct drm_display_mode mode;
struct dpu_hw_intf_timing_params timing_params = { 0 };
const struct dpu_format *fmt = NULL;
- u32 fmt_fourcc = DRM_FORMAT_RGB888;
+ u32 fmt_fourcc;
unsigned long lock_flags;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
@@ -254,17 +255,21 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
DPU_DEBUG_VIDENC(phys_enc, "enabling mode:\n");
drm_mode_debug_printmodeline(&mode);
- if (phys_enc->split_role != ENC_ROLE_SOLO) {
+ fmt_fourcc = dpu_encoder_get_drm_fmt(phys_enc);
+
+ if (phys_enc->split_role != ENC_ROLE_SOLO || fmt_fourcc == DRM_FORMAT_YUV420) {
mode.hdisplay >>= 1;
mode.htotal >>= 1;
mode.hsync_start >>= 1;
mode.hsync_end >>= 1;
+ mode.hskew >>= 1;
DPU_DEBUG_VIDENC(phys_enc,
- "split_role %d, halve horizontal %d %d %d %d\n",
+ "split_role %d, halve horizontal %d %d %d %d %d\n",
phys_enc->split_role,
mode.hdisplay, mode.htotal,
- mode.hsync_start, mode.hsync_end);
+ mode.hsync_start, mode.hsync_end,
+ mode.hskew);
}
drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
@@ -272,6 +277,8 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
fmt = dpu_get_dpu_format(fmt_fourcc);
DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+ if (phys_enc->hw_cdm)
+ intf_cfg.cdm = phys_enc->hw_cdm->idx;
intf_cfg.intf = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
@@ -349,16 +356,6 @@ static bool dpu_encoder_phys_vid_needs_single_flush(
return phys_enc->split_role != ENC_ROLE_SOLO;
}
-static void dpu_encoder_phys_vid_atomic_mode_set(
- struct dpu_encoder_phys *phys_enc,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
-
- phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
-}
-
static int dpu_encoder_phys_vid_control_vblank_irq(
struct dpu_encoder_phys *phys_enc,
bool enable)
@@ -412,8 +409,12 @@ end:
static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
+ const struct dpu_format *fmt;
+ u32 fmt_fourcc;
ctl = phys_enc->hw_ctl;
+ fmt_fourcc = dpu_encoder_get_drm_fmt(phys_enc);
+ fmt = dpu_get_dpu_format(fmt_fourcc);
DPU_DEBUG_VIDENC(phys_enc, "\n");
@@ -422,6 +423,8 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
+ dpu_encoder_helper_phys_setup_cdm(phys_enc, fmt, CDM_CDWN_OUTPUT_HDMI);
+
dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
/*
@@ -437,6 +440,16 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx);
+ if (ctl->ops.update_pending_flush_cdm && phys_enc->hw_cdm)
+ ctl->ops.update_pending_flush_cdm(ctl, phys_enc->hw_cdm->idx);
+
+ /*
+ * Peripheral flush must be updated whenever flushing SDP packets is needed.
+ * SDP packets are required for any YUV format (YUV420, YUV422, YUV444).
+ */
+ if (ctl->ops.update_pending_flush_periph && dpu_encoder_needs_periph_flush(phys_enc))
+ ctl->ops.update_pending_flush_periph(ctl, phys_enc->hw_intf->idx);
+
skip_flush:
DPU_DEBUG_VIDENC(phys_enc,
"update pending flush ctl %d intf %d\n",
@@ -489,7 +502,7 @@ static int dpu_encoder_phys_vid_wait_for_commit_done(
(hw_ctl->ops.get_flush_register(hw_ctl) == 0),
msecs_to_jiffies(50));
if (ret <= 0) {
- DPU_ERROR("vblank timeout\n");
+ DPU_ERROR("vblank timeout: %x\n", hw_ctl->ops.get_flush_register(hw_ctl));
return -ETIMEDOUT;
}
@@ -615,30 +628,33 @@ static void dpu_encoder_phys_vid_handle_post_kickoff(
}
}
-static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
- bool enable)
+static void dpu_encoder_phys_vid_irq_enable(struct dpu_encoder_phys *phys_enc)
{
int ret;
- trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
- phys_enc->hw_intf->idx - INTF_0,
- enable,
- phys_enc->vblank_refcount);
+ trace_dpu_enc_phys_vid_irq_enable(DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0,
+ phys_enc->vblank_refcount);
- if (enable) {
- ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
- if (WARN_ON(ret))
- return;
-
- dpu_core_irq_register_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_UNDERRUN],
- dpu_encoder_phys_vid_underrun_irq,
- phys_enc);
- } else {
- dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
- dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_UNDERRUN]);
- }
+ ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+ if (WARN_ON(ret))
+ return;
+
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN],
+ dpu_encoder_phys_vid_underrun_irq,
+ phys_enc);
+}
+
+static void dpu_encoder_phys_vid_irq_disable(struct dpu_encoder_phys *phys_enc)
+{
+ trace_dpu_enc_phys_vid_irq_disable(DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0,
+ phys_enc->vblank_refcount);
+
+ dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN]);
}
static int dpu_encoder_phys_vid_get_line_count(
@@ -683,13 +699,13 @@ static int dpu_encoder_phys_vid_get_frame_count(
static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_vid_is_master;
- ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set;
ops->enable = dpu_encoder_phys_vid_enable;
ops->disable = dpu_encoder_phys_vid_disable;
ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_tx_complete;
- ops->irq_control = dpu_encoder_phys_vid_irq_control;
+ ops->irq_enable = dpu_encoder_phys_vid_irq_enable;
+ ops->irq_disable = dpu_encoder_phys_vid_irq_disable;
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
@@ -721,6 +737,8 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_VIDEO;
+ phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
+ phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->hw_intf->idx);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 4cd2d9e3131a..1924a2b28e53 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -265,149 +265,6 @@ static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
}
/**
- * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block
- * This API does not handle DPU_CHROMA_H1V2.
- * @phys_enc:Pointer to physical encoder
- */
-static void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc)
-{
- struct dpu_hw_cdm *hw_cdm;
- struct dpu_hw_cdm_cfg *cdm_cfg;
- struct dpu_hw_pingpong *hw_pp;
- struct dpu_encoder_phys_wb *wb_enc;
- const struct msm_format *format;
- const struct dpu_format *dpu_fmt;
- struct drm_writeback_job *wb_job;
- int ret;
-
- if (!phys_enc)
- return;
-
- wb_enc = to_dpu_encoder_phys_wb(phys_enc);
- cdm_cfg = &wb_enc->cdm_cfg;
- hw_pp = phys_enc->hw_pp;
- hw_cdm = phys_enc->hw_cdm;
- wb_job = wb_enc->wb_job;
-
- format = msm_framebuffer_format(wb_enc->wb_job->fb);
- dpu_fmt = dpu_get_dpu_format_ext(format->pixel_format, wb_job->fb->modifier);
-
- if (!hw_cdm)
- return;
-
- if (!DPU_FORMAT_IS_YUV(dpu_fmt)) {
- DPU_DEBUG("[enc:%d] cdm_disable fmt:%x\n", DRMID(phys_enc->parent),
- dpu_fmt->base.pixel_format);
- if (hw_cdm->ops.bind_pingpong_blk)
- hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE);
-
- return;
- }
-
- memset(cdm_cfg, 0, sizeof(struct dpu_hw_cdm_cfg));
-
- cdm_cfg->output_width = wb_job->fb->width;
- cdm_cfg->output_height = wb_job->fb->height;
- cdm_cfg->output_fmt = dpu_fmt;
- cdm_cfg->output_type = CDM_CDWN_OUTPUT_WB;
- cdm_cfg->output_bit_depth = DPU_FORMAT_IS_DX(dpu_fmt) ?
- CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
- cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l;
-
- /* enable 10 bit logic */
- switch (cdm_cfg->output_fmt->chroma_sample) {
- case DPU_CHROMA_RGB:
- cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
- cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
- break;
- case DPU_CHROMA_H2V1:
- cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
- cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
- break;
- case DPU_CHROMA_420:
- cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
- cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
- break;
- case DPU_CHROMA_H1V2:
- default:
- DPU_ERROR("[enc:%d] unsupported chroma sampling type\n",
- DRMID(phys_enc->parent));
- cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
- cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
- break;
- }
-
- DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
- DRMID(phys_enc->parent), cdm_cfg->output_width,
- cdm_cfg->output_height, cdm_cfg->output_fmt->base.pixel_format,
- cdm_cfg->output_type, cdm_cfg->output_bit_depth,
- cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type);
-
- if (hw_cdm->ops.enable) {
- cdm_cfg->pp_id = hw_pp->idx;
- ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
- if (ret < 0) {
- DPU_ERROR("[enc:%d] failed to enable CDM; ret:%d\n",
- DRMID(phys_enc->parent), ret);
- return;
- }
- }
-}
-
-/**
- * dpu_encoder_phys_wb_atomic_check - verify and fixup given atomic states
- * @phys_enc: Pointer to physical encoder
- * @crtc_state: Pointer to CRTC atomic state
- * @conn_state: Pointer to connector atomic state
- */
-static int dpu_encoder_phys_wb_atomic_check(
- struct dpu_encoder_phys *phys_enc,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct drm_framebuffer *fb;
- const struct drm_display_mode *mode = &crtc_state->mode;
-
- DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n",
- phys_enc->hw_wb->idx, mode->name, mode->hdisplay, mode->vdisplay);
-
- if (!conn_state || !conn_state->connector) {
- DPU_ERROR("invalid connector state\n");
- return -EINVAL;
- } else if (conn_state->connector->status !=
- connector_status_connected) {
- DPU_ERROR("connector not connected %d\n",
- conn_state->connector->status);
- return -EINVAL;
- }
-
- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
- return 0;
-
- fb = conn_state->writeback_job->fb;
-
- DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
- fb->width, fb->height);
-
- if (fb->width != mode->hdisplay) {
- DPU_ERROR("invalid fb w=%d, mode w=%d\n", fb->width,
- mode->hdisplay);
- return -EINVAL;
- } else if (fb->height != mode->vdisplay) {
- DPU_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
- mode->vdisplay);
- return -EINVAL;
- } else if (fb->width > phys_enc->hw_wb->caps->maxlinewidth) {
- DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
- fb->width, phys_enc->hw_wb->caps->maxlinewidth);
- return -EINVAL;
- }
-
- return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
-}
-
-
-/**
* _dpu_encoder_phys_wb_update_flush - flush hardware update
* @phys_enc: Pointer to physical encoder
*/
@@ -462,6 +319,14 @@ static void dpu_encoder_phys_wb_setup(
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
struct drm_display_mode mode = phys_enc->cached_mode;
struct drm_framebuffer *fb = NULL;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ struct drm_writeback_job *wb_job;
+ const struct msm_format *format;
+ const struct dpu_format *dpu_fmt;
+
+ wb_job = wb_enc->wb_job;
+ format = msm_framebuffer_format(wb_enc->wb_job->fb);
+ dpu_fmt = dpu_get_dpu_format_ext(format->pixel_format, wb_job->fb->modifier);
DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n",
hw_wb->idx - WB_0, mode.name,
@@ -475,7 +340,7 @@ static void dpu_encoder_phys_wb_setup(
dpu_encoder_phys_wb_setup_fb(phys_enc, fb);
- dpu_encoder_helper_phys_setup_cdm(phys_enc);
+ dpu_encoder_helper_phys_setup_cdm(phys_enc, dpu_fmt, CDM_CDWN_OUTPUT_WB);
dpu_encoder_phys_wb_setup_ctl(phys_enc);
}
@@ -511,31 +376,32 @@ static void dpu_encoder_phys_wb_done_irq(void *arg)
}
/**
- * dpu_encoder_phys_wb_irq_ctrl - irq control of WB
+ * dpu_encoder_phys_wb_irq_enable - irq control of WB
* @phys: Pointer to physical encoder
- * @enable: indicates enable or disable interrupts
*/
-static void dpu_encoder_phys_wb_irq_ctrl(
- struct dpu_encoder_phys *phys, bool enable)
+static void dpu_encoder_phys_wb_irq_enable(struct dpu_encoder_phys *phys)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys);
- if (enable && atomic_inc_return(&wb_enc->wbirq_refcount) == 1)
+ if (atomic_inc_return(&wb_enc->wbirq_refcount) == 1)
dpu_core_irq_register_callback(phys->dpu_kms,
- phys->irq[INTR_IDX_WB_DONE], dpu_encoder_phys_wb_done_irq, phys);
- else if (!enable &&
- atomic_dec_return(&wb_enc->wbirq_refcount) == 0)
- dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]);
+ phys->irq[INTR_IDX_WB_DONE],
+ dpu_encoder_phys_wb_done_irq,
+ phys);
}
-static void dpu_encoder_phys_wb_atomic_mode_set(
- struct dpu_encoder_phys *phys_enc,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+/**
+ * dpu_encoder_phys_wb_irq_disable - irq control of WB
+ * @phys: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_irq_disable(struct dpu_encoder_phys *phys)
{
- phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys);
+
+ if (atomic_dec_return(&wb_enc->wbirq_refcount) == 0)
+ dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]);
}
static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
@@ -774,10 +640,8 @@ static bool dpu_encoder_phys_wb_is_valid_for_commit(struct dpu_encoder_phys *phy
static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_wb_is_master;
- ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
ops->enable = dpu_encoder_phys_wb_enable;
ops->disable = dpu_encoder_phys_wb_disable;
- ops->atomic_check = dpu_encoder_phys_wb_atomic_check;
ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_wb_handle_post_kickoff;
@@ -785,7 +649,8 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
ops->trigger_start = dpu_encoder_helper_trigger_start;
ops->prepare_wb_job = dpu_encoder_phys_wb_prepare_wb_job;
ops->cleanup_wb_job = dpu_encoder_phys_wb_cleanup_wb_job;
- ops->irq_control = dpu_encoder_phys_wb_irq_ctrl;
+ ops->irq_enable = dpu_encoder_phys_wb_irq_enable;
+ ops->irq_disable = dpu_encoder_phys_wb_irq_disable;
ops->is_valid_for_commit = dpu_encoder_phys_wb_is_valid_for_commit;
}
@@ -820,6 +685,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
dpu_encoder_phys_wb_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_WB_LINE;
+ phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done;
atomic_set(&wb_enc->wbirq_refcount, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 54e8717403a0..f2b6eac7601d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -680,6 +680,8 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
*************************************************************/
#include "catalog/dpu_3_0_msm8998.h"
+#include "catalog/dpu_3_2_sdm660.h"
+#include "catalog/dpu_3_3_sdm630.h"
#include "catalog/dpu_4_0_sdm845.h"
#include "catalog/dpu_4_1_sdm670.h"
@@ -703,4 +705,6 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_9_0_sm8550.h"
+#include "catalog/dpu_9_2_x1e80100.h"
+
#include "catalog/dpu_10_0_sm8650.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index ba82ef4560a6..d1aef778340b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -832,6 +832,8 @@ struct dpu_mdss_cfg {
};
extern const struct dpu_mdss_cfg dpu_msm8998_cfg;
+extern const struct dpu_mdss_cfg dpu_sdm630_cfg;
+extern const struct dpu_mdss_cfg dpu_sdm660_cfg;
extern const struct dpu_mdss_cfg dpu_sdm845_cfg;
extern const struct dpu_mdss_cfg dpu_sdm670_cfg;
extern const struct dpu_mdss_cfg dpu_sm8150_cfg;
@@ -849,5 +851,6 @@ extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg;
extern const struct dpu_mdss_cfg dpu_sm8450_cfg;
extern const struct dpu_mdss_cfg dpu_sm8550_cfg;
extern const struct dpu_mdss_cfg dpu_sm8650_cfg;
+extern const struct dpu_mdss_cfg dpu_x1e80100_cfg;
#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
index e9cdc7934a49..9016b3ade6bc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -186,7 +186,7 @@ static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *cdm)
dpu_hw_cdm_setup_cdwn(ctx, cdm);
if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
- if (fmt->chroma_sample != DPU_CHROMA_H1V2)
+ if (fmt->chroma_sample == DPU_CHROMA_H1V2)
return -EINVAL; /*unsupported format */
opmode = CDM_HDMI_PACK_OP_MODE_EN;
opmode |= (fmt->chroma_sample << 1);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index e76565c3e6a4..a06f69d0b257 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -39,6 +39,7 @@
#define CTL_WB_FLUSH 0x108
#define CTL_INTF_FLUSH 0x110
#define CTL_CDM_FLUSH 0x114
+#define CTL_PERIPH_FLUSH 0x128
#define CTL_INTF_MASTER 0x134
#define CTL_DSPP_n_FLUSH(n) ((0x13C) + ((n) * 4))
@@ -49,6 +50,7 @@
#define MERGE_3D_IDX 23
#define DSC_IDX 22
#define CDM_IDX 26
+#define PERIPH_IDX 30
#define INTF_IDX 31
#define WB_IDX 16
#define DSPP_IDX 29 /* From DPU hw rev 7.x.x */
@@ -151,6 +153,10 @@ static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
}
+ if (ctx->pending_flush_mask & BIT(PERIPH_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_PERIPH_FLUSH,
+ ctx->pending_periph_flush_mask);
+
if (ctx->pending_flush_mask & BIT(DSC_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
ctx->pending_dsc_flush_mask);
@@ -311,6 +317,13 @@ static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= BIT(INTF_IDX);
}
+static void dpu_hw_ctl_update_pending_flush_periph_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_intf intf)
+{
+ ctx->pending_periph_flush_mask |= BIT(intf - INTF_0);
+ ctx->pending_flush_mask |= BIT(PERIPH_IDX);
+}
+
static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
enum dpu_merge_3d merge_3d)
{
@@ -680,6 +693,10 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf_v1;
+
+ ops->update_pending_flush_periph =
+ dpu_hw_ctl_update_pending_flush_periph_v1;
+
ops->update_pending_flush_merge_3d =
dpu_hw_ctl_update_pending_flush_merge_3d_v1;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index ff85b5ee0acf..ef56280bea93 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -123,6 +123,15 @@ struct dpu_hw_ctl_ops {
enum dpu_intf blk);
/**
+ * OR in the given flushbits to the cached pending_(periph_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : interface block index
+ */
+ void (*update_pending_flush_periph)(struct dpu_hw_ctl *ctx,
+ enum dpu_intf blk);
+
+ /**
* OR in the given flushbits to the cached pending_(merge_3d_)flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
@@ -264,6 +273,7 @@ struct dpu_hw_ctl {
u32 pending_flush_mask;
u32 pending_intf_flush_mask;
u32 pending_wb_flush_mask;
+ u32 pending_periph_flush_mask;
u32 pending_merge_3d_flush_mask;
u32 pending_dspp_flush_mask[DSPP_MAX - DSPP_0];
u32 pending_dsc_flush_mask;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 6bba531d6dc4..965692ef7892 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -163,13 +163,8 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
display_hctl = (hsync_end_x << 16) | hsync_start_x;
- /*
- * DATA_HCTL_EN controls data timing which can be different from
- * video timing. It is recommended to enable it for all cases, except
- * if compression is enabled in 1 pixel per clock mode
- */
if (p->wide_bus_en)
- intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN;
+ intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN;
data_width = p->width;
@@ -229,6 +224,14 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
+ /*
+ * DATA_HCTL_EN controls data timing which can be different from
+ * video timing. It is recommended to enable it for all cases, except
+ * if compression is enabled in 1 pixel per clock mode
+ */
+ if (!(p->compression_en && !p->wide_bus_en))
+ intf_cfg2 |= INTF_CFG2_DATA_HCTL_EN;
+
DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 0bd57a32144a..6f4c87244f94 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -33,6 +33,7 @@ struct dpu_hw_intf_timing_params {
u32 hsync_skew;
bool wide_bus_en;
+ bool compression_en;
};
struct dpu_hw_intf_prog_fetch {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 723cc1d82143..a1f5d7c4ab91 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -317,11 +317,6 @@ struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
struct msm_drm_private *priv = s->dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_private_state *priv_state;
- int ret;
-
- ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
- if (ret)
- return ERR_PTR(ret);
priv_state = drm_atomic_get_private_obj_state(s,
&dpu_kms->global_state);
@@ -362,8 +357,6 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
{
struct dpu_global_state *state;
- drm_modeset_lock_init(&dpu_kms->global_state_lock);
-
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
@@ -374,6 +367,11 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
return 0;
}
+static void dpu_kms_global_obj_fini(struct dpu_kms *dpu_kms)
+{
+ drm_atomic_private_obj_fini(&dpu_kms->global_state);
+}
+
static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
{
struct icc_path *path0;
@@ -478,7 +476,7 @@ static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
* mode panels. This may be a no-op for command mode panels.
*/
trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
- ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
+ ret = dpu_encoder_wait_for_commit_done(encoder);
if (ret && ret != -EWOULDBLOCK) {
DPU_ERROR("wait for commit done returned %d\n", ret);
break;
@@ -565,6 +563,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
{
struct drm_encoder *encoder = NULL;
struct msm_display_info info;
+ bool yuv_supported;
int rc;
int i;
@@ -583,7 +582,8 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
return PTR_ERR(encoder);
}
- rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
+ yuv_supported = !!dpu_kms->catalog->cdm;
+ rc = msm_dp_modeset_init(priv->dp[i], dev, encoder, yuv_supported);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
return rc;
@@ -630,23 +630,26 @@ static int _dpu_kms_initialize_writeback(struct drm_device *dev,
{
struct drm_encoder *encoder = NULL;
struct msm_display_info info;
+ const enum dpu_wb wb_idx = WB_2;
+ u32 maxlinewidth;
int rc;
memset(&info, 0, sizeof(info));
info.num_of_h_tiles = 1;
/* use only WB idx 2 instance for DPU */
- info.h_tile_instance[0] = WB_2;
+ info.h_tile_instance[0] = wb_idx;
info.intf_type = INTF_WB;
+ maxlinewidth = dpu_rm_get_wb(&dpu_kms->rm, info.h_tile_instance[0])->caps->maxlinewidth;
+
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL, &info);
if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
return PTR_ERR(encoder);
}
- rc = dpu_writeback_init(dev, encoder, wb_formats,
- n_formats);
+ rc = dpu_writeback_init(dev, encoder, wb_formats, n_formats, maxlinewidth);
if (rc) {
DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
return rc;
@@ -801,6 +804,8 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_kms->hw_vbif[i] = NULL;
}
+ dpu_kms_global_obj_fini(dpu_kms);
+
dpu_kms->catalog = NULL;
dpu_kms->hw_mdp = NULL;
@@ -1197,6 +1202,78 @@ static int dpu_kms_init(struct drm_device *ddev)
return 0;
}
+static int dpu_kms_mmap_mdp5(struct dpu_kms *dpu_kms)
+{
+ struct platform_device *pdev = dpu_kms->pdev;
+ struct platform_device *mdss_dev;
+ int ret;
+
+ if (!dev_is_platform(dpu_kms->pdev->dev.parent))
+ return -EINVAL;
+
+ mdss_dev = to_platform_device(dpu_kms->pdev->dev.parent);
+
+ dpu_kms->mmio = msm_ioremap(pdev, "mdp_phys");
+ if (IS_ERR(dpu_kms->mmio)) {
+ ret = PTR_ERR(dpu_kms->mmio);
+ DPU_ERROR("mdp register memory map failed: %d\n", ret);
+ dpu_kms->mmio = NULL;
+ return ret;
+ }
+ DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+
+ dpu_kms->vbif[VBIF_RT] = msm_ioremap_mdss(mdss_dev,
+ dpu_kms->pdev,
+ "vbif_phys");
+ if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+ ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+ DPU_ERROR("vbif register memory map failed: %d\n", ret);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+ return ret;
+ }
+
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap_mdss(mdss_dev,
+ dpu_kms->pdev,
+ "vbif_nrt_phys");
+ if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+ DPU_DEBUG("VBIF NRT is not defined");
+ }
+
+ return 0;
+}
+
+static int dpu_kms_mmap_dpu(struct dpu_kms *dpu_kms)
+{
+ struct platform_device *pdev = dpu_kms->pdev;
+ int ret;
+
+ dpu_kms->mmio = msm_ioremap(pdev, "mdp");
+ if (IS_ERR(dpu_kms->mmio)) {
+ ret = PTR_ERR(dpu_kms->mmio);
+ DPU_ERROR("mdp register memory map failed: %d\n", ret);
+ dpu_kms->mmio = NULL;
+ return ret;
+ }
+ DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+
+ dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
+ if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+ ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+ DPU_ERROR("vbif register memory map failed: %d\n", ret);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+ return ret;
+ }
+
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(pdev, "vbif_nrt");
+ if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+ DPU_DEBUG("VBIF NRT is not defined");
+ }
+
+ return 0;
+}
+
static int dpu_dev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1204,6 +1281,9 @@ static int dpu_dev_probe(struct platform_device *pdev)
int irq;
int ret = 0;
+ if (!msm_disp_drv_should_bind(&pdev->dev, true))
+ return -ENODEV;
+
dpu_kms = devm_kzalloc(dev, sizeof(*dpu_kms), GFP_KERNEL);
if (!dpu_kms)
return -ENOMEM;
@@ -1230,28 +1310,12 @@ static int dpu_dev_probe(struct platform_device *pdev)
dpu_kms->base.irq = irq;
- dpu_kms->mmio = msm_ioremap(pdev, "mdp");
- if (IS_ERR(dpu_kms->mmio)) {
- ret = PTR_ERR(dpu_kms->mmio);
- DPU_ERROR("mdp register memory map failed: %d\n", ret);
- dpu_kms->mmio = NULL;
- return ret;
- }
- DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
-
- dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
- if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
- ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
- DPU_ERROR("vbif register memory map failed: %d\n", ret);
- dpu_kms->vbif[VBIF_RT] = NULL;
+ if (of_device_is_compatible(dpu_kms->pdev->dev.of_node, "qcom,mdp5"))
+ ret = dpu_kms_mmap_mdp5(dpu_kms);
+ else
+ ret = dpu_kms_mmap_dpu(dpu_kms);
+ if (ret)
return ret;
- }
-
- dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(pdev, "vbif_nrt");
- if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
- dpu_kms->vbif[VBIF_NRT] = NULL;
- DPU_DEBUG("VBIF NRT is not defined");
- }
ret = dpu_kms_parse_data_bus_icc_path(dpu_kms);
if (ret)
@@ -1318,6 +1382,8 @@ static const struct dev_pm_ops dpu_pm_ops = {
static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
{ .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
+ { .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, },
+ { .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, },
{ .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, },
{ .compatible = "qcom,sdm845-dpu", .data = &dpu_sdm845_cfg, },
{ .compatible = "qcom,sc7180-dpu", .data = &dpu_sc7180_cfg, },
@@ -1334,6 +1400,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, },
{ .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, },
{ .compatible = "qcom,sm8650-dpu", .data = &dpu_sm8650_cfg, },
+ { .compatible = "qcom,x1e80100-dpu", .data = &dpu_x1e80100_cfg, },
{}
};
MODULE_DEVICE_TABLE(of, dpu_dt_match);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index d1207f4ec3ae..b5db3fc76ca6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -85,7 +85,6 @@ struct dpu_kms {
* Global private object state, Do not access directly, use
* dpu_kms_global_get_state()
*/
- struct drm_modeset_lock global_state_lock;
struct drm_private_obj global_state;
struct dpu_rm rm;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 724537ab776d..cb5ce3c62a22 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -409,29 +409,153 @@ static int _dpu_rm_reserve_ctls(
return 0;
}
-static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
- struct dpu_global_state *global_state,
- struct drm_encoder *enc,
- const struct msm_display_topology *top)
+static int _dpu_rm_pingpong_next_index(struct dpu_global_state *global_state,
+ int start,
+ uint32_t enc_id)
{
- int num_dsc = top->num_dsc;
int i;
- /* check if DSC required are allocated or not */
- for (i = 0; i < num_dsc; i++) {
- if (!rm->dsc_blks[i]) {
- DPU_ERROR("DSC %d does not exist\n", i);
- return -EIO;
+ for (i = start; i < (PINGPONG_MAX - PINGPONG_0); i++) {
+ if (global_state->pingpong_to_enc_id[i] == enc_id)
+ return i;
+ }
+
+ return -ENAVAIL;
+}
+
+static int _dpu_rm_pingpong_dsc_check(int dsc_idx, int pp_idx)
+{
+ /*
+ * DSC with even index must be used with the PINGPONG with even index
+ * DSC with odd index must be used with the PINGPONG with odd index
+ */
+ if ((dsc_idx & 0x01) != (pp_idx & 0x01))
+ return -ENAVAIL;
+
+ return 0;
+}
+
+static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id,
+ const struct msm_display_topology *top)
+{
+ int num_dsc = 0;
+ int pp_idx = 0;
+ int dsc_idx;
+ int ret;
+
+ for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) &&
+ num_dsc < top->num_dsc; dsc_idx++) {
+ if (!rm->dsc_blks[dsc_idx])
+ continue;
+
+ if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id))
+ continue;
+
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
+ if (pp_idx < 0)
+ return -ENAVAIL;
+
+ ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx);
+ if (ret)
+ return -ENAVAIL;
+
+ global_state->dsc_to_enc_id[dsc_idx] = enc_id;
+ num_dsc++;
+ pp_idx++;
+ }
+
+ if (num_dsc < top->num_dsc) {
+ DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n",
+ num_dsc, top->num_dsc);
+ return -ENAVAIL;
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id,
+ const struct msm_display_topology *top)
+{
+ int num_dsc = 0;
+ int dsc_idx, pp_idx = 0;
+ int ret;
+
+ /* only start from even dsc index */
+ for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) &&
+ num_dsc < top->num_dsc; dsc_idx += 2) {
+ if (!rm->dsc_blks[dsc_idx] ||
+ !rm->dsc_blks[dsc_idx + 1])
+ continue;
+
+ /* consective dsc index to be paired */
+ if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id) ||
+ reserved_by_other(global_state->dsc_to_enc_id, dsc_idx + 1, enc_id))
+ continue;
+
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
+ if (pp_idx < 0)
+ return -ENAVAIL;
+
+ ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx);
+ if (ret) {
+ pp_idx = 0;
+ continue;
}
- if (global_state->dsc_to_enc_id[i]) {
- DPU_ERROR("DSC %d is already allocated\n", i);
- return -EIO;
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, enc_id);
+ if (pp_idx < 0)
+ return -ENAVAIL;
+
+ ret = _dpu_rm_pingpong_dsc_check(dsc_idx + 1, pp_idx);
+ if (ret) {
+ pp_idx = 0;
+ continue;
}
+
+ global_state->dsc_to_enc_id[dsc_idx] = enc_id;
+ global_state->dsc_to_enc_id[dsc_idx + 1] = enc_id;
+ num_dsc += 2;
+ pp_idx++; /* start for next pair */
}
- for (i = 0; i < num_dsc; i++)
- global_state->dsc_to_enc_id[i] = enc->base.id;
+ if (num_dsc < top->num_dsc) {
+ DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n",
+ num_dsc, top->num_dsc);
+ return -ENAVAIL;
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *enc,
+ const struct msm_display_topology *top)
+{
+ uint32_t enc_id = enc->base.id;
+
+ if (!top->num_dsc || !top->num_intf)
+ return 0;
+
+ /*
+ * Facts:
+ * 1) no pingpong split (two layer mixers shared one pingpong)
+ * 2) DSC pair starts from even index, such as index(0,1), (2,3), etc
+ * 3) even PINGPONG connects to even DSC
+ * 4) odd PINGPONG connects to odd DSC
+ * 5) pair: encoder +--> pp_idx_0 --> dsc_idx_0
+ * +--> pp_idx_1 --> dsc_idx_1
+ */
+
+ /* num_dsc should be either 1, 2 or 4 */
+ if (top->num_dsc > top->num_intf) /* merge mode */
+ return _dpu_rm_dsc_alloc_pair(rm, global_state, enc_id, top);
+ else
+ return _dpu_rm_dsc_alloc(rm, global_state, enc_id, top);
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index 35d03b121a0b..bd92fb2979aa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -273,6 +273,14 @@ DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_runtime_resume,
TP_PROTO(uint32_t drm_id),
TP_ARGS(drm_id)
);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_rc_enable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_rc_disable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
TRACE_EVENT(dpu_enc_enable,
TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
@@ -342,10 +350,6 @@ DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
TP_printk("id=%u, enable=%s",
__entry->drm_id, __entry->enable ? "true" : "false")
);
-DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
- TP_PROTO(uint32_t drm_id, bool enable),
- TP_ARGS(drm_id, enable)
-);
DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
TP_PROTO(uint32_t drm_id, bool enable),
TP_ARGS(drm_id, enable)
@@ -514,24 +518,41 @@ TRACE_EVENT(dpu_enc_wait_event_timeout,
__entry->expected_time, __entry->atomic_cnt)
);
-TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl,
- TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable,
+TRACE_EVENT(dpu_enc_phys_cmd_irq_enable,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp,
int refcnt),
- TP_ARGS(drm_id, pp, enable, refcnt),
+ TP_ARGS(drm_id, pp, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, pp=%d, refcnt=%d", __entry->drm_id,
+ __entry->pp,
+ __entry->refcnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_irq_disable,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp,
+ int refcnt),
+ TP_ARGS(drm_id, pp, refcnt),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( enum dpu_pingpong, pp )
- __field( bool, enable )
__field( int, refcnt )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->pp = pp;
- __entry->enable = enable;
__entry->refcnt = refcnt;
),
- TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id,
- __entry->pp, __entry->enable ? "true" : "false",
+ TP_printk("id=%u, pp=%d, refcnt=%d", __entry->drm_id,
+ __entry->pp,
__entry->refcnt)
);
@@ -592,24 +613,41 @@ TRACE_EVENT(dpu_enc_phys_vid_post_kickoff,
TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx)
);
-TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl,
- TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable,
+TRACE_EVENT(dpu_enc_phys_vid_irq_enable,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
int refcnt),
- TP_ARGS(drm_id, intf_idx, enable, refcnt),
+ TP_ARGS(drm_id, intf_idx, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, intf_idx=%d refcnt=%d", __entry->drm_id,
+ __entry->intf_idx,
+ __entry->drm_id)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_irq_disable,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+ int refcnt),
+ TP_ARGS(drm_id, intf_idx, refcnt),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( enum dpu_intf, intf_idx )
- __field( bool, enable )
__field( int, refcnt )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->intf_idx = intf_idx;
- __entry->enable = enable;
__entry->refcnt = refcnt;
),
- TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id,
- __entry->intf_idx, __entry->enable ? "true" : "false",
+ TP_printk("id=%u, intf_idx=%d refcnt=%d", __entry->drm_id,
+ __entry->intf_idx,
__entry->drm_id)
);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
index 2a5a68366582..16f144cbc0c9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_edid.h>
+#include <drm/drm_framebuffer.h>
#include "dpu_writeback.h"
@@ -24,6 +25,61 @@ static int dpu_wb_conn_get_modes(struct drm_connector *connector)
dev->mode_config.max_height);
}
+static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector);
+ struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(wb_conn);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ const struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+
+ DPU_DEBUG("[atomic_check:%d]\n", connector->base.id);
+
+ if (!conn_state || !conn_state->connector) {
+ DPU_ERROR("invalid connector state\n");
+ return -EINVAL;
+ } else if (conn_state->connector->status != connector_status_connected) {
+ DPU_ERROR("connector not connected %d\n", conn_state->connector->status);
+ return -EINVAL;
+ }
+
+ crtc = conn_state->crtc;
+ if (!crtc)
+ return 0;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ mode = &crtc_state->mode;
+
+ fb = conn_state->writeback_job->fb;
+
+ DPU_DEBUG("[fb_id:%u][fb:%u,%u][mode:\"%s\":%ux%u]\n", fb->base.id, fb->width, fb->height,
+ mode->name, mode->hdisplay, mode->vdisplay);
+
+ if (fb->width != mode->hdisplay) {
+ DPU_ERROR("invalid fb w=%d, mode w=%d\n", fb->width, mode->hdisplay);
+ return -EINVAL;
+ } else if (fb->height != mode->vdisplay) {
+ DPU_ERROR("invalid fb h=%d, mode h=%d\n", fb->height, mode->vdisplay);
+ return -EINVAL;
+ } else if (fb->width > dpu_wb_conn->maxlinewidth) {
+ DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
+ fb->width, dpu_wb_conn->maxlinewidth);
+ return -EINVAL;
+ }
+
+ return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
+}
+
static const struct drm_connector_funcs dpu_wb_conn_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -59,12 +115,13 @@ static void dpu_wb_conn_cleanup_job(struct drm_writeback_connector *connector,
static const struct drm_connector_helper_funcs dpu_wb_conn_helper_funcs = {
.get_modes = dpu_wb_conn_get_modes,
+ .atomic_check = dpu_wb_conn_atomic_check,
.prepare_writeback_job = dpu_wb_conn_prepare_job,
.cleanup_writeback_job = dpu_wb_conn_cleanup_job,
};
int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
- const u32 *format_list, u32 num_formats)
+ const u32 *format_list, u32 num_formats, u32 maxlinewidth)
{
struct dpu_wb_connector *dpu_wb_conn;
int rc = 0;
@@ -73,6 +130,8 @@ int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
if (!dpu_wb_conn)
return -ENOMEM;
+ dpu_wb_conn->maxlinewidth = maxlinewidth;
+
drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
/* DPU initializes the encoder and sets it up completely for writeback
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
index 5a75ea916101..4b11cca8014c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
@@ -18,6 +18,7 @@
struct dpu_wb_connector {
struct drm_writeback_connector base;
struct drm_encoder *wb_enc;
+ u32 maxlinewidth;
};
static inline struct dpu_wb_connector *to_dpu_wb_conn(struct drm_writeback_connector *conn)
@@ -26,6 +27,6 @@ static inline struct dpu_wb_connector *to_dpu_wb_conn(struct drm_writeback_conne
}
int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
- const u32 *format_list, u32 num_formats);
+ const u32 *format_list, u32 num_formats, u32 maxlinewidth);
#endif /*_DPU_WRITEBACK_H */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index a640af22eafc..e5662412db9b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -158,46 +158,4 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
mdp5_cmd_enc->enabled = true;
}
-
-int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
- struct drm_encoder *slave_encoder)
-{
- struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
- struct mdp5_kms *mdp5_kms;
- struct device *dev;
- int intf_num;
- u32 data = 0;
-
- if (!encoder || !slave_encoder)
- return -EINVAL;
-
- mdp5_kms = get_kms(encoder);
- intf_num = mdp5_cmd_enc->intf->num;
-
- /* Switch slave encoder's trigger MUX, to use the master's
- * start signal for the slave encoder
- */
- if (intf_num == 1)
- data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
- else if (intf_num == 2)
- data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
- else
- return -EINVAL;
-
- /* Smart Panel, Sync mode */
- data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
-
- dev = &mdp5_kms->pdev->dev;
-
- /* Make sure clocks are on when connectors calling this function. */
- pm_runtime_get_sync(dev);
- mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
-
- mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
- MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
- mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
- pm_runtime_put_sync(dev);
-
- return 0;
-}
#endif /* CONFIG_DRM_MSM_DSI */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
index 8db97083e14d..eaba3b2d73b5 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -263,48 +263,6 @@ u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
}
-int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
- struct drm_encoder *slave_encoder)
-{
- struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
- struct mdp5_kms *mdp5_kms;
- struct device *dev;
- int intf_num;
- u32 data = 0;
-
- if (!encoder || !slave_encoder)
- return -EINVAL;
-
- mdp5_kms = get_kms(encoder);
- intf_num = mdp5_encoder->intf->num;
-
- /* Switch slave encoder's TimingGen Sync mode,
- * to use the master's enable signal for the slave encoder.
- */
- if (intf_num == 1)
- data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
- else if (intf_num == 2)
- data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
- else
- return -EINVAL;
-
- dev = &mdp5_kms->pdev->dev;
- /* Make sure clocks are on when connectors calling this function. */
- pm_runtime_get_sync(dev);
-
- /* Dumb Panel, Sync mode */
- mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
- mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
- mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
-
- mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
-
- pm_runtime_put_sync(dev);
-
- return 0;
-}
-
void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
index 43443a435d59..b40ed3a847c8 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
@@ -31,8 +31,6 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
if (dumpstate && __ratelimit(&rs)) {
struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev);
drm_state_dump(mdp5_kms->dev, &p);
- if (mdp5_kms->smp)
- mdp5_smp_dump(mdp5_kms->smp, &p);
}
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 0827634664ae..a874fd95cc20 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -84,11 +84,6 @@ struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct drm_private_state *priv_state;
- int ret;
-
- ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
- if (ret)
- return ERR_PTR(ret);
priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
if (IS_ERR(priv_state))
@@ -119,17 +114,25 @@ static void mdp5_global_destroy_state(struct drm_private_obj *obj,
kfree(mdp5_state);
}
+static void mdp5_global_print_state(struct drm_printer *p,
+ const struct drm_private_state *state)
+{
+ struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);
+
+ if (mdp5_state->mdp5_kms->smp)
+ mdp5_smp_dump(mdp5_state->mdp5_kms->smp, p, mdp5_state);
+}
+
static const struct drm_private_state_funcs mdp5_global_state_funcs = {
.atomic_duplicate_state = mdp5_global_duplicate_state,
.atomic_destroy_state = mdp5_global_destroy_state,
+ .atomic_print_state = mdp5_global_print_state,
};
static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
{
struct mdp5_global_state *state;
- drm_modeset_lock_init(&mdp5_kms->glob_state_lock);
-
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
@@ -190,19 +193,6 @@ static void mdp5_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
}
-static int mdp5_set_split_display(struct msm_kms *kms,
- struct drm_encoder *encoder,
- struct drm_encoder *slave_encoder,
- bool is_cmd_mode)
-{
- if (is_cmd_mode)
- return mdp5_cmd_encoder_set_split_display(encoder,
- slave_encoder);
- else
- return mdp5_vid_encoder_set_split_display(encoder,
- slave_encoder);
-}
-
static void mdp5_destroy(struct mdp5_kms *mdp5_kms);
static void mdp5_kms_destroy(struct msm_kms *kms)
@@ -219,39 +209,6 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
mdp5_destroy(mdp5_kms);
}
-#ifdef CONFIG_DEBUG_FS
-static int smp_show(struct seq_file *m, void *arg)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct msm_drm_private *priv = dev->dev_private;
- struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
- struct drm_printer p = drm_seq_file_printer(m);
-
- if (!mdp5_kms->smp) {
- drm_printf(&p, "no SMP pool\n");
- return 0;
- }
-
- mdp5_smp_dump(mdp5_kms->smp, &p);
-
- return 0;
-}
-
-static struct drm_info_list mdp5_debugfs_list[] = {
- {"smp", smp_show },
-};
-
-static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
-{
- drm_debugfs_create_files(mdp5_debugfs_list,
- ARRAY_SIZE(mdp5_debugfs_list),
- minor->debugfs_root, minor);
-
- return 0;
-}
-#endif
-
static const struct mdp_kms_funcs kms_funcs = {
.base = {
.hw_init = mdp5_hw_init,
@@ -268,11 +225,7 @@ static const struct mdp_kms_funcs kms_funcs = {
.wait_flush = mdp5_wait_flush,
.complete_commit = mdp5_complete_commit,
.get_format = mdp_get_format,
- .set_split_display = mdp5_set_split_display,
.destroy = mdp5_kms_destroy,
-#ifdef CONFIG_DEBUG_FS
- .debugfs_init = mdp5_kms_debugfs_init,
-#endif
},
.set_irqmask = mdp5_set_irqmask,
};
@@ -620,7 +573,6 @@ static void mdp5_destroy(struct mdp5_kms *mdp5_kms)
pm_runtime_disable(&mdp5_kms->pdev->dev);
drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
- drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
}
static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
@@ -866,6 +818,9 @@ static int mdp5_dev_probe(struct platform_device *pdev)
DBG("");
+ if (!msm_disp_drv_should_bind(&pdev->dev, false))
+ return -ENODEV;
+
mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
if (!mdp5_kms)
return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
index 29bf11f08601..fac9f05aa639 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
@@ -40,7 +40,6 @@ struct mdp5_kms {
* Global private object state, Do not access directly, use
* mdp5_global_get_state()
*/
- struct drm_modeset_lock glob_state_lock;
struct drm_private_obj glob_state;
struct mdp5_smp *smp;
@@ -291,8 +290,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
struct mdp5_interface *intf, struct mdp5_ctl *ctl);
-int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
- struct drm_encoder *slave_encoder);
void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode);
int mdp5_encoder_get_linecount(struct drm_encoder *encoder);
u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder);
@@ -303,8 +300,6 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
void mdp5_cmd_encoder_disable(struct drm_encoder *encoder);
void mdp5_cmd_encoder_enable(struct drm_encoder *encoder);
-int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
- struct drm_encoder *slave_encoder);
#else
static inline void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -317,11 +312,6 @@ static inline void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
static inline void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
{
}
-static inline int mdp5_cmd_encoder_set_split_display(
- struct drm_encoder *encoder, struct drm_encoder *slave_encoder)
-{
- return -EINVAL;
-}
#endif
#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index 8b59562e29e2..b4bebb425d22 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -325,22 +325,17 @@ void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state
state->released = 0;
}
-void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
+void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p,
+ struct mdp5_global_state *global_state)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
struct mdp5_hw_pipe_state *hwpstate;
struct mdp5_smp_state *state;
- struct mdp5_global_state *global_state;
int total = 0, i, j;
drm_printf(p, "name\tinuse\tplane\n");
drm_printf(p, "----\t-----\t-----\n");
- if (drm_can_sleep())
- drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL);
-
- global_state = mdp5_get_existing_global_state(mdp5_kms);
-
/* grab these *after* we hold the state_lock */
hwpstate = &global_state->hwpipe;
state = &global_state->smp;
@@ -365,9 +360,6 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
bitmap_weight(state->state, smp->blk_cnt));
-
- if (drm_can_sleep())
- drm_modeset_unlock(&mdp5_kms->glob_state_lock);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
index d8b6a11413d9..21732ed485be 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
@@ -69,7 +69,9 @@ struct mdp5_smp;
struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms,
const struct mdp5_smp_block *cfg);
-void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p);
+struct mdp5_global_state;
+void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p,
+ struct mdp5_global_state *global_state);
uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
const struct mdp_format *format,
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 4a2e479723a8..7634e4b74208 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -15,13 +15,7 @@
#include "dp_audio.h"
#include "dp_panel.h"
#include "dp_display.h"
-
-#define HEADER_BYTE_2_BIT 0
-#define PARITY_BYTE_2_BIT 8
-#define HEADER_BYTE_1_BIT 16
-#define PARITY_BYTE_1_BIT 24
-#define HEADER_BYTE_3_BIT 16
-#define PARITY_BYTE_3_BIT 24
+#include "dp_utils.h"
struct dp_audio_private {
struct platform_device *audio_pdev;
@@ -36,71 +30,6 @@ struct dp_audio_private {
struct dp_audio dp_audio;
};
-static u8 dp_audio_get_g0_value(u8 data)
-{
- u8 c[4];
- u8 g[4];
- u8 ret_data = 0;
- u8 i;
-
- for (i = 0; i < 4; i++)
- c[i] = (data >> i) & 0x01;
-
- g[0] = c[3];
- g[1] = c[0] ^ c[3];
- g[2] = c[1];
- g[3] = c[2];
-
- for (i = 0; i < 4; i++)
- ret_data = ((g[i] & 0x01) << i) | ret_data;
-
- return ret_data;
-}
-
-static u8 dp_audio_get_g1_value(u8 data)
-{
- u8 c[4];
- u8 g[4];
- u8 ret_data = 0;
- u8 i;
-
- for (i = 0; i < 4; i++)
- c[i] = (data >> i) & 0x01;
-
- g[0] = c[0] ^ c[3];
- g[1] = c[0] ^ c[1] ^ c[3];
- g[2] = c[1] ^ c[2];
- g[3] = c[2] ^ c[3];
-
- for (i = 0; i < 4; i++)
- ret_data = ((g[i] & 0x01) << i) | ret_data;
-
- return ret_data;
-}
-
-static u8 dp_audio_calculate_parity(u32 data)
-{
- u8 x0 = 0;
- u8 x1 = 0;
- u8 ci = 0;
- u8 iData = 0;
- u8 i = 0;
- u8 parity_byte;
- u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2;
-
- for (i = 0; i < num_byte; i++) {
- iData = (data >> i*4) & 0xF;
-
- ci = iData ^ x1;
- x1 = x0 ^ dp_audio_get_g1_value(ci);
- x0 = dp_audio_get_g0_value(ci);
- }
-
- parity_byte = x1 | (x0 << 4);
-
- return parity_byte;
-}
-
static u32 dp_audio_get_header(struct dp_catalog *catalog,
enum dp_catalog_audio_sdp_type sdp,
enum dp_catalog_audio_header_type header)
@@ -134,7 +63,7 @@ static void dp_audio_stream_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
new_value = 0x02;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
@@ -147,7 +76,7 @@ static void dp_audio_stream_sdp(struct dp_audio_private *audio)
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
new_value = value;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
@@ -162,7 +91,7 @@ static void dp_audio_stream_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
new_value = audio->channels - 1;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
@@ -184,7 +113,7 @@ static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
new_value = 0x1;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
@@ -198,7 +127,7 @@ static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
new_value = 0x17;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
@@ -212,7 +141,7 @@ static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
@@ -233,7 +162,7 @@ static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
new_value = 0x84;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
@@ -247,7 +176,7 @@ static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
new_value = 0x1b;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
@@ -261,7 +190,7 @@ static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
@@ -282,7 +211,7 @@ static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
new_value = 0x05;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
@@ -296,7 +225,7 @@ static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
@@ -310,7 +239,7 @@ static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
new_value = 0x0;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
@@ -331,7 +260,7 @@ static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
new_value = 0x06;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
@@ -345,7 +274,7 @@ static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 03f4951c49f4..adbd5a367395 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -4,6 +4,7 @@
*/
#include <linux/delay.h>
+#include <linux/phy/phy.h>
#include <drm/drm_print.h>
#include "dp_reg.h"
@@ -23,6 +24,8 @@ struct dp_aux_private {
struct device *dev;
struct dp_catalog *catalog;
+ struct phy *phy;
+
struct mutex mutex;
struct completion comp;
@@ -336,7 +339,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
if (aux->native) {
aux->retry_cnt++;
if (!(aux->retry_cnt % MAX_AUX_RETRIES))
- dp_catalog_aux_update_cfg(aux->catalog);
+ phy_calibrate(aux->phy);
}
/* reset aux if link is in connected state */
if (dp_catalog_link_is_connected(aux->catalog))
@@ -439,7 +442,7 @@ void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
- dp_catalog_aux_update_cfg(aux->catalog);
+ phy_calibrate(aux->phy);
dp_catalog_aux_reset(aux->catalog);
}
@@ -517,6 +520,7 @@ static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux,
}
struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
+ struct phy *phy,
bool is_edp)
{
struct dp_aux_private *aux;
@@ -537,6 +541,7 @@ struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
aux->dev = dev;
aux->catalog = catalog;
+ aux->phy = phy;
aux->retry_cnt = 0;
/*
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 511305da4f66..f47d591c1f54 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -16,7 +16,9 @@ void dp_aux_init(struct drm_dp_aux *dp_aux);
void dp_aux_deinit(struct drm_dp_aux *dp_aux);
void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
+struct phy;
struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
+ struct phy *phy,
bool is_edp);
void dp_aux_put(struct drm_dp_aux *aux);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 5142aeb705a4..3e7c84cdef47 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -7,8 +7,7 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
-#include <linux/phy/phy.h>
-#include <linux/phy/phy-dp.h>
+#include <linux/platform_device.h>
#include <linux/rational.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_print.h>
@@ -55,10 +54,31 @@
(PSR_UPDATE_MASK | PSR_CAPTURE_MASK | PSR_EXIT_MASK | \
PSR_UPDATE_ERROR_MASK | PSR_WAKE_ERROR_MASK)
+#define DP_DEFAULT_AHB_OFFSET 0x0000
+#define DP_DEFAULT_AHB_SIZE 0x0200
+#define DP_DEFAULT_AUX_OFFSET 0x0200
+#define DP_DEFAULT_AUX_SIZE 0x0200
+#define DP_DEFAULT_LINK_OFFSET 0x0400
+#define DP_DEFAULT_LINK_SIZE 0x0C00
+#define DP_DEFAULT_P0_OFFSET 0x1000
+#define DP_DEFAULT_P0_SIZE 0x0400
+
+struct dss_io_region {
+ size_t len;
+ void __iomem *base;
+};
+
+struct dss_io_data {
+ struct dss_io_region ahb;
+ struct dss_io_region aux;
+ struct dss_io_region link;
+ struct dss_io_region p0;
+};
+
struct dp_catalog_private {
struct device *dev;
struct drm_device *drm_dev;
- struct dp_io *io;
+ struct dss_io_data io;
u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
struct dp_catalog dp_catalog;
u8 aux_lut_cfg_index[PHY_AUX_CFG_MAX];
@@ -68,7 +88,7 @@ void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *d
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
- struct dss_io_data *dss = &catalog->io->dp_controller;
+ struct dss_io_data *dss = &catalog->io;
msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb");
msm_disp_snapshot_add_block(disp_state, dss->aux.len, dss->aux.base, "dp_aux");
@@ -78,7 +98,7 @@ void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *d
static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset)
{
- return readl_relaxed(catalog->io->dp_controller.aux.base + offset);
+ return readl_relaxed(catalog->io.aux.base + offset);
}
static inline void dp_write_aux(struct dp_catalog_private *catalog,
@@ -88,12 +108,12 @@ static inline void dp_write_aux(struct dp_catalog_private *catalog,
* To make sure aux reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- writel(data, catalog->io->dp_controller.aux.base + offset);
+ writel(data, catalog->io.aux.base + offset);
}
static inline u32 dp_read_ahb(const struct dp_catalog_private *catalog, u32 offset)
{
- return readl_relaxed(catalog->io->dp_controller.ahb.base + offset);
+ return readl_relaxed(catalog->io.ahb.base + offset);
}
static inline void dp_write_ahb(struct dp_catalog_private *catalog,
@@ -103,7 +123,7 @@ static inline void dp_write_ahb(struct dp_catalog_private *catalog,
* To make sure phy reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- writel(data, catalog->io->dp_controller.ahb.base + offset);
+ writel(data, catalog->io.ahb.base + offset);
}
static inline void dp_write_p0(struct dp_catalog_private *catalog,
@@ -113,7 +133,7 @@ static inline void dp_write_p0(struct dp_catalog_private *catalog,
* To make sure interface reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- writel(data, catalog->io->dp_controller.p0.base + offset);
+ writel(data, catalog->io.p0.base + offset);
}
static inline u32 dp_read_p0(struct dp_catalog_private *catalog,
@@ -123,12 +143,12 @@ static inline u32 dp_read_p0(struct dp_catalog_private *catalog,
* To make sure interface reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- return readl_relaxed(catalog->io->dp_controller.p0.base + offset);
+ return readl_relaxed(catalog->io.p0.base + offset);
}
static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset)
{
- return readl_relaxed(catalog->io->dp_controller.link.base + offset);
+ return readl_relaxed(catalog->io.link.base + offset);
}
static inline void dp_write_link(struct dp_catalog_private *catalog,
@@ -138,7 +158,7 @@ static inline void dp_write_link(struct dp_catalog_private *catalog,
* To make sure link reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- writel(data, catalog->io->dp_controller.link.base + offset);
+ writel(data, catalog->io.link.base + offset);
}
/* aux related catalog functions */
@@ -243,16 +263,6 @@ void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable)
dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
}
-void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog)
-{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
- struct dp_io *dp_io = catalog->io;
- struct phy *phy = dp_io->phy;
-
- phy_calibrate(phy);
-}
-
int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog)
{
u32 state;
@@ -260,7 +270,7 @@ int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog)
struct dp_catalog_private, dp_catalog);
/* poll for hpd connected status every 2ms and timeout after 500ms */
- return readl_poll_timeout(catalog->io->dp_controller.aux.base +
+ return readl_poll_timeout(catalog->io.aux.base +
REG_DP_DP_HPD_INT_STATUS,
state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
2000, 500000);
@@ -288,7 +298,7 @@ void dp_catalog_dump_regs(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
- struct dss_io_data *io = &catalog->io->dp_controller;
+ struct dss_io_data *io = &catalog->io;
pr_info("AHB regs\n");
dump_regs(io->ahb.base, io->ahb.len);
@@ -440,9 +450,26 @@ void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog,
dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
}
+void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog)
+{
+ u32 mainlink_ctrl, hw_revision;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+
+ hw_revision = dp_catalog_hw_revision(dp_catalog);
+ if (hw_revision >= DP_HW_VERSION_1_2)
+ mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_SDE_PERIPH_UPDATE;
+ else
+ mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_UPDATE_SDP;
+
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+}
+
void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
u32 rate, u32 stream_rate_khz,
- bool fixed_nvid)
+ bool fixed_nvid, bool is_ycbcr_420)
{
u32 pixel_m, pixel_n;
u32 mvid, nvid, pixel_div = 0, dispcc_input_rate;
@@ -485,6 +512,9 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
nvid = temp;
}
+ if (is_ycbcr_420)
+ mvid /= 2;
+
if (link_rate_hbr2 == rate)
nvid *= 2;
@@ -512,7 +542,7 @@ int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog,
bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
/* Poll for mainlink ready status */
- ret = readx_poll_timeout(readl, catalog->io->dp_controller.link.base +
+ ret = readx_poll_timeout(readl, catalog->io.link.base +
REG_DP_MAINLINK_READY,
data, data & bit,
POLLING_SLEEP_US, POLLING_TIMEOUT_US);
@@ -575,7 +605,7 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog)
struct dp_catalog_private, dp_catalog);
/* Poll for mainlink ready status */
- ret = readl_poll_timeout(catalog->io->dp_controller.link.base +
+ ret = readl_poll_timeout(catalog->io.link.base +
REG_DP_MAINLINK_READY,
data, data & DP_MAINLINK_READY_FOR_VIDEO,
POLLING_SLEEP_US, POLLING_TIMEOUT_US);
@@ -765,25 +795,6 @@ void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog)
dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0);
}
-int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog,
- u8 v_level, u8 p_level)
-{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
- struct dp_io *dp_io = catalog->io;
- struct phy *phy = dp_io->phy;
- struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
-
- /* TODO: Update for all lanes instead of just first one */
- opts_dp->voltage[0] = v_level;
- opts_dp->pre[0] = p_level;
- opts_dp->set_voltages = 1;
- phy_configure(phy, &dp_io->phy_opts);
- opts_dp->set_voltages = 0;
-
- return 0;
-}
-
void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
u32 pattern)
{
@@ -898,6 +909,99 @@ int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
return 0;
}
+static void dp_catalog_panel_send_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp)
+{
+ struct dp_catalog_private *catalog;
+ u32 header[2];
+ u32 val;
+ int i;
+
+ catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog);
+
+ dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header);
+
+ dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]);
+ dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]);
+
+ for (i = 0; i < sizeof(vsc_sdp->db); i += 4) {
+ val = ((vsc_sdp->db[i]) | (vsc_sdp->db[i + 1] << 8) | (vsc_sdp->db[i + 2] << 16) |
+ (vsc_sdp->db[i + 3] << 24));
+ dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val);
+ }
+}
+
+static void dp_catalog_panel_update_sdp(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ u32 hw_revision;
+
+ catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog);
+
+ hw_revision = dp_catalog_hw_revision(dp_catalog);
+ if (hw_revision < DP_HW_VERSION_1_2 && hw_revision >= DP_HW_VERSION_1_0) {
+ dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01);
+ dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00);
+ }
+}
+
+void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp)
+{
+ struct dp_catalog_private *catalog;
+ u32 cfg, cfg2, misc;
+
+ catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog);
+
+ cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG);
+ cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2);
+ misc = dp_read_link(catalog, REG_DP_MISC1_MISC0);
+
+ cfg |= GEN0_SDP_EN;
+ dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
+
+ cfg2 |= GENERIC0_SDPSIZE_VALID;
+ dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
+
+ dp_catalog_panel_send_vsc_sdp(dp_catalog, vsc_sdp);
+
+ /* indicates presence of VSC (BIT(6) of MISC1) */
+ misc |= DP_MISC1_VSC_SDP;
+
+ drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=1\n");
+
+ pr_debug("misc settings = 0x%x\n", misc);
+ dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
+
+ dp_catalog_panel_update_sdp(dp_catalog);
+}
+
+void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ u32 cfg, cfg2, misc;
+
+ catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog);
+
+ cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG);
+ cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2);
+ misc = dp_read_link(catalog, REG_DP_MISC1_MISC0);
+
+ cfg &= ~GEN0_SDP_EN;
+ dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
+
+ cfg2 &= ~GENERIC0_SDPSIZE_VALID;
+ dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
+
+ /* switch back to MSA */
+ misc &= ~DP_MISC1_VSC_SDP;
+
+ drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=0\n");
+
+ pr_debug("misc settings = 0x%x\n", misc);
+ dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
+
+ dp_catalog_panel_update_sdp(dp_catalog);
+}
+
void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
struct drm_display_mode *drm_mode)
{
@@ -976,21 +1080,84 @@ void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog)
dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0);
}
-struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
+static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
{
- struct dp_catalog_private *catalog;
+ struct resource *res;
+ void __iomem *base;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
+ if (!IS_ERR(base))
+ *len = resource_size(res);
+
+ return base;
+}
+
+static int dp_catalog_get_io(struct dp_catalog_private *catalog)
+{
+ struct platform_device *pdev = to_platform_device(catalog->dev);
+ struct dss_io_data *dss = &catalog->io;
+
+ dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len);
+ if (IS_ERR(dss->ahb.base))
+ return PTR_ERR(dss->ahb.base);
- if (!io) {
- DRM_ERROR("invalid input\n");
- return ERR_PTR(-EINVAL);
+ dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len);
+ if (IS_ERR(dss->aux.base)) {
+ /*
+ * The initial binding had a single reg, but in order to
+ * support variation in the sub-region sizes this was split.
+ * dp_ioremap() will fail with -EINVAL here if only a single
+ * reg is specified, so fill in the sub-region offsets and
+ * lengths based on this single region.
+ */
+ if (PTR_ERR(dss->aux.base) == -EINVAL) {
+ if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
+ DRM_ERROR("legacy memory region not large enough\n");
+ return -EINVAL;
+ }
+
+ dss->ahb.len = DP_DEFAULT_AHB_SIZE;
+ dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET;
+ dss->aux.len = DP_DEFAULT_AUX_SIZE;
+ dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET;
+ dss->link.len = DP_DEFAULT_LINK_SIZE;
+ dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET;
+ dss->p0.len = DP_DEFAULT_P0_SIZE;
+ } else {
+ DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base);
+ return PTR_ERR(dss->aux.base);
+ }
+ } else {
+ dss->link.base = dp_ioremap(pdev, 2, &dss->link.len);
+ if (IS_ERR(dss->link.base)) {
+ DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
+ return PTR_ERR(dss->link.base);
+ }
+
+ dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len);
+ if (IS_ERR(dss->p0.base)) {
+ DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
+ return PTR_ERR(dss->p0.base);
+ }
}
+ return 0;
+}
+
+struct dp_catalog *dp_catalog_get(struct device *dev)
+{
+ struct dp_catalog_private *catalog;
+ int ret;
+
catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
if (!catalog)
return ERR_PTR(-ENOMEM);
catalog->dev = dev;
- catalog->io = io;
+
+ ret = dp_catalog_get_io(catalog);
+ if (ret)
+ return ERR_PTR(ret);
return &catalog->dp_catalog;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 38786e855b51..75ec290127c7 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -8,7 +8,7 @@
#include <drm/drm_modes.h>
-#include "dp_parser.h"
+#include "dp_utils.h"
#include "disp/msm_disp_snapshot.h"
/* interrupts */
@@ -30,6 +30,9 @@
#define DP_AUX_CFG_MAX_VALUE_CNT 3
+#define DP_HW_VERSION_1_0 0x10000000
+#define DP_HW_VERSION_1_2 0x10020000
+
/* PHY AUX config registers */
enum dp_phy_aux_config_type {
PHY_AUX_CFG0,
@@ -84,7 +87,6 @@ int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read);
int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog);
void dp_catalog_aux_reset(struct dp_catalog *dp_catalog);
void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable);
-void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog);
int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog);
u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
@@ -94,9 +96,10 @@ void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config);
void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable);
void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog, bool enable);
+void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb);
void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate,
- u32 stream_rate_khz, bool fixed_nvid);
+ u32 stream_rate_khz, bool fixed_nvid, bool is_ycbcr_420);
int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, u32 pattern);
u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog);
@@ -111,8 +114,6 @@ void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter);
u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog);
u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog);
-int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level,
- u8 p_level);
int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog);
u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
@@ -124,12 +125,14 @@ u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog);
/* DP Panel APIs */
int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog);
+void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp);
+void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog);
void dp_catalog_dump_regs(struct dp_catalog *dp_catalog);
void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
struct drm_display_mode *drm_mode);
void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog);
-struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io);
+struct dp_catalog *dp_catalog_get(struct device *dev);
/* DP Audio APIs */
void dp_catalog_audio_get_header(struct dp_catalog *catalog);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index fb588fde298a..c4dda1faef67 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -76,13 +76,27 @@ struct dp_ctrl_private {
struct drm_dp_aux *aux;
struct dp_panel *panel;
struct dp_link *link;
- struct dp_power *power;
- struct dp_parser *parser;
struct dp_catalog *catalog;
+ struct phy *phy;
+
+ unsigned int num_core_clks;
+ struct clk_bulk_data *core_clks;
+
+ unsigned int num_link_clks;
+ struct clk_bulk_data *link_clks;
+
+ struct clk *pixel_clk;
+
+ union phy_configure_opts phy_opts;
+
struct completion idle_comp;
struct completion psr_op_comp;
struct completion video_comp;
+
+ bool core_clks_on;
+ bool link_clks_on;
+ bool stream_clks_on;
};
static int dp_aux_link_configure(struct drm_dp_aux *aux,
@@ -128,6 +142,9 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
/* Default-> LSCLK DIV: 1/4 LCLK */
config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT);
+ if (ctrl->panel->dp_mode.out_fmt_is_yuv_420)
+ config |= DP_CONFIGURATION_CTRL_RGB_YUV; /* YUV420 */
+
/* Scrambler reset enable */
if (drm_dp_alternate_scrambler_reset_cap(dpcd))
config |= DP_CONFIGURATION_CTRL_ASSR;
@@ -162,6 +179,7 @@ static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl)
dp_catalog_ctrl_lane_mapping(ctrl->catalog);
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+ dp_catalog_setup_peripheral_flush(ctrl->catalog);
dp_ctrl_config_ctrl(ctrl);
@@ -952,7 +970,7 @@ static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
in.hporch = drm_mode->htotal - drm_mode->hdisplay;
in.nlanes = ctrl->link->link_params.num_lanes;
in.bpp = ctrl->panel->dp_mode.bpp;
- in.pixel_enc = 444;
+ in.pixel_enc = ctrl->panel->dp_mode.out_fmt_is_yuv_420 ? 420 : 444;
in.dsc_en = 0;
in.async_en = 0;
in.fec_en = 0;
@@ -1001,6 +1019,21 @@ static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
return ret;
}
+static int dp_ctrl_set_vx_px(struct dp_ctrl_private *ctrl,
+ u8 v_level, u8 p_level)
+{
+ union phy_configure_opts *phy_opts = &ctrl->phy_opts;
+
+ /* TODO: Update for all lanes instead of just first one */
+ phy_opts->dp.voltage[0] = v_level;
+ phy_opts->dp.pre[0] = p_level;
+ phy_opts->dp.set_voltages = 1;
+ phy_configure(ctrl->phy, phy_opts);
+ phy_opts->dp.set_voltages = 0;
+
+ return 0;
+}
+
static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
{
struct dp_link *link = ctrl->link;
@@ -1013,7 +1046,7 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
drm_dbg_dp(ctrl->drm_dev,
"voltage level: %d emphasis level: %d\n",
voltage_swing_level, pre_emphasis_level);
- ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog,
+ ret = dp_ctrl_set_vx_px(ctrl,
voltage_swing_level, pre_emphasis_level);
if (ret)
@@ -1312,44 +1345,115 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
return ret;
}
-static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl,
- enum dp_pm_type module, char *name, unsigned long rate)
+int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl)
{
- u32 num = ctrl->parser->mp[module].num_clk;
- struct clk_bulk_data *cfg = ctrl->parser->mp[module].clocks;
+ struct dp_ctrl_private *ctrl;
+ int ret = 0;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- while (num && strcmp(cfg->id, name)) {
- num--;
- cfg++;
+ if (ctrl->core_clks_on) {
+ drm_dbg_dp(ctrl->drm_dev, "core clks already enabled\n");
+ return 0;
}
- drm_dbg_dp(ctrl->drm_dev, "setting rate=%lu on clk=%s\n",
- rate, name);
+ ret = clk_bulk_prepare_enable(ctrl->num_core_clks, ctrl->core_clks);
+ if (ret)
+ return ret;
- if (num)
- clk_set_rate(cfg->clk, rate);
- else
- DRM_ERROR("%s clock doesn't exit to set rate %lu\n",
- name, rate);
+ ctrl->core_clks_on = true;
+
+ drm_dbg_dp(ctrl->drm_dev, "enable core clocks \n");
+ drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
+ ctrl->stream_clks_on ? "on" : "off",
+ ctrl->link_clks_on ? "on" : "off",
+ ctrl->core_clks_on ? "on" : "off");
+
+ return 0;
+}
+
+void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ clk_bulk_disable_unprepare(ctrl->num_core_clks, ctrl->core_clks);
+
+ ctrl->core_clks_on = false;
+
+ drm_dbg_dp(ctrl->drm_dev, "disable core clocks \n");
+ drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
+ ctrl->stream_clks_on ? "on" : "off",
+ ctrl->link_clks_on ? "on" : "off",
+ ctrl->core_clks_on ? "on" : "off");
+}
+
+static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ int ret = 0;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ if (ctrl->link_clks_on) {
+ drm_dbg_dp(ctrl->drm_dev, "links clks already enabled\n");
+ return 0;
+ }
+
+ if (!ctrl->core_clks_on) {
+ drm_dbg_dp(ctrl->drm_dev, "Enable core clks before link clks\n");
+
+ dp_ctrl_core_clk_enable(dp_ctrl);
+ }
+
+ ret = clk_bulk_prepare_enable(ctrl->num_link_clks, ctrl->link_clks);
+ if (ret)
+ return ret;
+
+ ctrl->link_clks_on = true;
+
+ drm_dbg_dp(ctrl->drm_dev, "enable link clocks\n");
+ drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
+ ctrl->stream_clks_on ? "on" : "off",
+ ctrl->link_clks_on ? "on" : "off",
+ ctrl->core_clks_on ? "on" : "off");
+
+ return 0;
+}
+
+static void dp_ctrl_link_clk_disable(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ clk_bulk_disable_unprepare(ctrl->num_link_clks, ctrl->link_clks);
+
+ ctrl->link_clks_on = false;
+
+ drm_dbg_dp(ctrl->drm_dev, "disabled link clocks\n");
+ drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
+ ctrl->stream_clks_on ? "on" : "off",
+ ctrl->link_clks_on ? "on" : "off",
+ ctrl->core_clks_on ? "on" : "off");
}
static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
{
int ret = 0;
- struct dp_io *dp_io = &ctrl->parser->io;
- struct phy *phy = dp_io->phy;
- struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+ struct phy *phy = ctrl->phy;
const u8 *dpcd = ctrl->panel->dpcd;
- opts_dp->lanes = ctrl->link->link_params.num_lanes;
- opts_dp->link_rate = ctrl->link->link_params.rate / 100;
- opts_dp->ssc = drm_dp_max_downspread(dpcd);
+ ctrl->phy_opts.dp.lanes = ctrl->link->link_params.num_lanes;
+ ctrl->phy_opts.dp.link_rate = ctrl->link->link_params.rate / 100;
+ ctrl->phy_opts.dp.ssc = drm_dp_max_downspread(dpcd);
- phy_configure(phy, &dp_io->phy_opts);
+ phy_configure(phy, &ctrl->phy_opts);
phy_power_on(phy);
dev_pm_opp_set_rate(ctrl->dev, ctrl->link->link_params.rate * 1000);
- ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, true);
+ ret = dp_ctrl_link_clk_enable(&ctrl->dp_ctrl);
if (ret)
DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
@@ -1436,12 +1540,10 @@ void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enter)
void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
- struct dp_io *dp_io;
struct phy *phy;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- dp_io = &ctrl->parser->io;
- phy = dp_io->phy;
+ phy = ctrl->phy;
dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_init(phy);
@@ -1453,12 +1555,10 @@ void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
- struct dp_io *dp_io;
struct phy *phy;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- dp_io = &ctrl->parser->io;
- phy = dp_io->phy;
+ phy = ctrl->phy;
dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_exit(phy);
@@ -1483,25 +1583,21 @@ static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
{
+ struct phy *phy = ctrl->phy;
int ret = 0;
- struct dp_io *dp_io = &ctrl->parser->io;
- struct phy *phy = dp_io->phy;
- struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
- opts_dp->lanes = ctrl->link->link_params.num_lanes;
- phy_configure(phy, &dp_io->phy_opts);
+ ctrl->phy_opts.dp.lanes = ctrl->link->link_params.num_lanes;
+ phy_configure(phy, &ctrl->phy_opts);
/*
* Disable and re-enable the mainlink clock since the
* link clock might have been adjusted as part of the
* link maintenance.
*/
dev_pm_opp_set_rate(ctrl->dev, 0);
- ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
- if (ret) {
- DRM_ERROR("Failed to disable clocks. ret=%d\n", ret);
- return ret;
- }
+
+ dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
+
phy_power_off(phy);
/* hw recommended delay before re-enabling clocks */
msleep(20);
@@ -1517,22 +1613,16 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
{
- struct dp_io *dp_io;
struct phy *phy;
- int ret;
- dp_io = &ctrl->parser->io;
- phy = dp_io->phy;
+ phy = ctrl->phy;
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
dp_catalog_ctrl_reset(ctrl->catalog);
dev_pm_opp_set_rate(ctrl->dev, 0);
- ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
- if (ret) {
- DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
- }
+ dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
phy_power_off(phy);
@@ -1576,7 +1666,7 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
drm_dbg_dp(ctrl->drm_dev, "request: 0x%x\n", pattern_requested);
- if (dp_catalog_ctrl_update_vx_px(ctrl->catalog,
+ if (dp_ctrl_set_vx_px(ctrl,
ctrl->link->phy_params.v_level,
ctrl->link->phy_params.p_level)) {
DRM_ERROR("Failed to set v/p levels\n");
@@ -1636,11 +1726,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
* running. Add the global reset just before disabling the
* link clocks and core clocks.
*/
- ret = dp_ctrl_off(&ctrl->dp_ctrl);
- if (ret) {
- DRM_ERROR("failed to disable DP controller\n");
- return ret;
- }
+ dp_ctrl_off(&ctrl->dp_ctrl);
ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
if (ret) {
@@ -1649,14 +1735,23 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
}
pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
- dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
-
- ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
+ ret = clk_set_rate(ctrl->pixel_clk, pixel_rate * 1000);
if (ret) {
- DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+ DRM_ERROR("Failed to set pixel clock rate. ret=%d\n", ret);
return ret;
}
+ if (ctrl->stream_clks_on) {
+ drm_dbg_dp(ctrl->drm_dev, "pixel clks already enabled\n");
+ } else {
+ ret = clk_prepare_enable(ctrl->pixel_clk);
+ if (ret) {
+ DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+ return ret;
+ }
+ ctrl->stream_clks_on = true;
+ }
+
dp_ctrl_send_phy_test_pattern(ctrl);
return 0;
@@ -1747,7 +1842,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
rate = ctrl->panel->link_info.rate;
pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
- dp_power_clk_enable(ctrl->power, DP_CORE_PM, true);
+ dp_ctrl_core_clk_enable(&ctrl->dp_ctrl);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
drm_dbg_dp(ctrl->drm_dev,
@@ -1758,6 +1853,8 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
ctrl->link->link_params.rate = rate;
ctrl->link->link_params.num_lanes =
ctrl->panel->link_info.num_lanes;
+ if (ctrl->panel->dp_mode.out_fmt_is_yuv_420)
+ pixel_rate >>= 1;
}
drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
@@ -1873,14 +1970,18 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock;
- if (dp_ctrl->wide_bus_en)
+ if (dp_ctrl->wide_bus_en || ctrl->panel->dp_mode.out_fmt_is_yuv_420)
pixel_rate >>= 1;
drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
ctrl->link->link_params.rate,
ctrl->link->link_params.num_lanes, pixel_rate);
- if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */
+ drm_dbg_dp(ctrl->drm_dev,
+ "core_clk_on=%d link_clk_on=%d stream_clk_on=%d\n",
+ ctrl->core_clks_on, ctrl->link_clks_on, ctrl->stream_clks_on);
+
+ if (!ctrl->link_clks_on) { /* link clk is off */
ret = dp_ctrl_enable_mainlink_clocks(ctrl);
if (ret) {
DRM_ERROR("Failed to start link clocks. ret=%d\n", ret);
@@ -1888,14 +1989,23 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
}
}
- dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
-
- ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
+ ret = clk_set_rate(ctrl->pixel_clk, pixel_rate * 1000);
if (ret) {
- DRM_ERROR("Unable to start pixel clocks. ret=%d\n", ret);
+ DRM_ERROR("Failed to set pixel clock rate. ret=%d\n", ret);
goto end;
}
+ if (ctrl->stream_clks_on) {
+ drm_dbg_dp(ctrl->drm_dev, "pixel clks already enabled\n");
+ } else {
+ ret = clk_prepare_enable(ctrl->pixel_clk);
+ if (ret) {
+ DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+ goto end;
+ }
+ ctrl->stream_clks_on = true;
+ }
+
if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl))
dp_ctrl_link_retrain(ctrl);
@@ -1912,7 +2022,8 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
dp_catalog_ctrl_config_msa(ctrl->catalog,
ctrl->link->link_params.rate,
- pixel_rate_orig, dp_ctrl_use_fixed_nvid(ctrl));
+ pixel_rate_orig, dp_ctrl_use_fixed_nvid(ctrl),
+ ctrl->panel->dp_mode.out_fmt_is_yuv_420);
dp_ctrl_setup_tr_unit(ctrl);
@@ -1930,36 +2041,28 @@ end:
return ret;
}
-int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
+void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
- struct dp_io *dp_io;
struct phy *phy;
- int ret;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- dp_io = &ctrl->parser->io;
- phy = dp_io->phy;
+ phy = ctrl->phy;
+
+ dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
/* set dongle to D3 (power off) mode */
dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
- if (dp_power_clk_status(ctrl->power, DP_STREAM_PM)) {
- ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false);
- if (ret) {
- DRM_ERROR("Failed to disable pclk. ret=%d\n", ret);
- return ret;
- }
+ if (ctrl->stream_clks_on) {
+ clk_disable_unprepare(ctrl->pixel_clk);
+ ctrl->stream_clks_on = false;
}
dev_pm_opp_set_rate(ctrl->dev, 0);
- ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
- if (ret) {
- DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
- return ret;
- }
+ dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
phy_power_off(phy);
@@ -1969,26 +2072,19 @@ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
- return ret;
}
-int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl)
+void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
- struct dp_io *dp_io;
struct phy *phy;
- int ret;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- dp_io = &ctrl->parser->io;
- phy = dp_io->phy;
+ phy = ctrl->phy;
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
- ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
- if (ret) {
- DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
- }
+ dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
@@ -1997,43 +2093,33 @@ int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl)
DRM_DEBUG_DP("After, phy=%p init_count=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
-
- return ret;
}
-int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
+void dp_ctrl_off(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
- struct dp_io *dp_io;
struct phy *phy;
- int ret = 0;
-
- if (!dp_ctrl)
- return -EINVAL;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- dp_io = &ctrl->parser->io;
- phy = dp_io->phy;
+ phy = ctrl->phy;
+
+ dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
dp_catalog_ctrl_reset(ctrl->catalog);
- ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false);
- if (ret)
- DRM_ERROR("Failed to disable pixel clocks. ret=%d\n", ret);
+ if (ctrl->stream_clks_on) {
+ clk_disable_unprepare(ctrl->pixel_clk);
+ ctrl->stream_clks_on = false;
+ }
dev_pm_opp_set_rate(ctrl->dev, 0);
- ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
- if (ret) {
- DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
- }
+ dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
phy_power_off(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
-
- return ret;
}
irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
@@ -2081,10 +2167,60 @@ irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
return ret;
}
+static const char *core_clks[] = {
+ "core_iface",
+ "core_aux",
+};
+
+static const char *ctrl_clks[] = {
+ "ctrl_link",
+ "ctrl_link_iface",
+};
+
+static int dp_ctrl_clk_init(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ struct device *dev;
+ int i, rc;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ dev = ctrl->dev;
+
+ ctrl->num_core_clks = ARRAY_SIZE(core_clks);
+ ctrl->core_clks = devm_kcalloc(dev, ctrl->num_core_clks, sizeof(*ctrl->core_clks), GFP_KERNEL);
+ if (!ctrl->core_clks)
+ return -ENOMEM;
+
+ for (i = 0; i < ctrl->num_core_clks; i++)
+ ctrl->core_clks[i].id = core_clks[i];
+
+ rc = devm_clk_bulk_get(dev, ctrl->num_core_clks, ctrl->core_clks);
+ if (rc)
+ return rc;
+
+ ctrl->num_link_clks = ARRAY_SIZE(ctrl_clks);
+ ctrl->link_clks = devm_kcalloc(dev, ctrl->num_link_clks, sizeof(*ctrl->link_clks), GFP_KERNEL);
+ if (!ctrl->link_clks)
+ return -ENOMEM;
+
+ for (i = 0; i < ctrl->num_link_clks; i++)
+ ctrl->link_clks[i].id = ctrl_clks[i];
+
+ rc = devm_clk_bulk_get(dev, ctrl->num_link_clks, ctrl->link_clks);
+ if (rc)
+ return rc;
+
+ ctrl->pixel_clk = devm_clk_get(dev, "stream_pixel");
+ if (IS_ERR(ctrl->pixel_clk))
+ return PTR_ERR(ctrl->pixel_clk);
+
+ return 0;
+}
+
struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
struct dp_panel *panel, struct drm_dp_aux *aux,
- struct dp_power *power, struct dp_catalog *catalog,
- struct dp_parser *parser)
+ struct dp_catalog *catalog,
+ struct phy *phy)
{
struct dp_ctrl_private *ctrl;
int ret;
@@ -2118,13 +2254,18 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
init_completion(&ctrl->video_comp);
/* in parameters */
- ctrl->parser = parser;
ctrl->panel = panel;
- ctrl->power = power;
ctrl->aux = aux;
ctrl->link = link;
ctrl->catalog = catalog;
ctrl->dev = dev;
+ ctrl->phy = phy;
+
+ ret = dp_ctrl_clk_init(&ctrl->dp_ctrl);
+ if (ret) {
+ dev_err(dev, "failed to init clocks\n");
+ return ERR_PTR(ret);
+ }
return &ctrl->dp_ctrl;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index b2c27d3532bf..fa014cee7e21 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -9,8 +9,6 @@
#include "dp_aux.h"
#include "dp_panel.h"
#include "dp_link.h"
-#include "dp_parser.h"
-#include "dp_power.h"
#include "dp_catalog.h"
struct dp_ctrl {
@@ -18,18 +16,20 @@ struct dp_ctrl {
bool wide_bus_en;
};
+struct phy;
+
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train);
-int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
-int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl);
-int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_off(struct dp_ctrl *dp_ctrl);
void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl);
struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
struct dp_panel *panel, struct drm_dp_aux *aux,
- struct dp_power *power, struct dp_catalog *catalog,
- struct dp_parser *parser);
+ struct dp_catalog *catalog,
+ struct phy *phy);
void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable);
void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl);
@@ -39,4 +39,7 @@ void dp_ctrl_irq_phy_exit(struct dp_ctrl *dp_ctrl);
void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enable);
void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl);
+
#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 6c281dc095b9..eca5a02f9003 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -9,7 +9,6 @@
#include <drm/drm_connector.h>
#include <drm/drm_file.h>
-#include "dp_parser.h"
#include "dp_catalog.h"
#include "dp_aux.h"
#include "dp_ctrl.h"
@@ -101,7 +100,7 @@ static int dp_test_data_show(struct seq_file *m, void *data)
seq_printf(m, "vdisplay: %d\n",
debug->link->test_video.test_v_height);
seq_printf(m, "bpc: %u\n",
- dp_link_bit_depth_to_bpc(bpc));
+ dp_link_bit_depth_to_bpp(bpc) / 3);
} else {
seq_puts(m, "0");
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index d37d599aec27..c4cb82af5c2f 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -9,19 +9,19 @@
#include <linux/debugfs.h>
#include <linux/component.h>
#include <linux/of_irq.h>
+#include <linux/phy/phy.h>
#include <linux/delay.h>
#include <drm/display/drm_dp_aux_bus.h>
+#include <drm/drm_edid.h>
#include "msm_drv.h"
#include "msm_kms.h"
-#include "dp_parser.h"
-#include "dp_power.h"
+#include "dp_ctrl.h"
#include "dp_catalog.h"
#include "dp_aux.h"
#include "dp_reg.h"
#include "dp_link.h"
#include "dp_panel.h"
-#include "dp_ctrl.h"
#include "dp_display.h"
#include "dp_drm.h"
#include "dp_audio.h"
@@ -88,8 +88,6 @@ struct dp_display_private {
struct drm_device *drm_dev;
struct dentry *root;
- struct dp_parser *parser;
- struct dp_power *power;
struct dp_catalog *catalog;
struct drm_dp_aux *aux;
struct dp_link *link;
@@ -113,7 +111,7 @@ struct dp_display_private {
struct dp_event event_list[DP_EVENT_Q_MAX];
spinlock_t event_lock;
- bool wide_bus_en;
+ bool wide_bus_supported;
struct dp_audio *audio;
};
@@ -122,7 +120,7 @@ struct msm_dp_desc {
phys_addr_t io_start;
unsigned int id;
unsigned int connector_type;
- bool wide_bus_en;
+ bool wide_bus_supported;
};
static const struct msm_dp_desc sc7180_dp_descs[] = {
@@ -131,8 +129,8 @@ static const struct msm_dp_desc sc7180_dp_descs[] = {
};
static const struct msm_dp_desc sc7280_dp_descs[] = {
- { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
- { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
+ { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true },
{}
};
@@ -144,22 +142,22 @@ static const struct msm_dp_desc sc8180x_dp_descs[] = {
};
static const struct msm_dp_desc sc8280xp_dp_descs[] = {
- { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
- { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
- { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
- { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
- { .io_start = 0x22090000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
- { .io_start = 0x22098000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
- { .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
- { .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
+ { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
+ { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
+ { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
+ { .io_start = 0x22090000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
+ { .io_start = 0x22098000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
+ { .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
+ { .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true },
{}
};
static const struct msm_dp_desc sc8280xp_edp_descs[] = {
- { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
- { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
- { .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
- { .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
+ { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true },
+ { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true },
+ { .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true },
+ { .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true },
{}
};
@@ -329,10 +327,26 @@ static const struct component_ops dp_display_comp_ops = {
.unbind = dp_display_unbind,
};
+static void dp_display_send_hpd_event(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+ struct drm_connector *connector;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ connector = dp->dp_display.connector;
+ drm_helper_hpd_irq_event(connector->dev);
+}
+
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
- struct drm_bridge *bridge = dp->dp_display.bridge;
+ if ((hpd && dp->dp_display.link_ready) ||
+ (!hpd && !dp->dp_display.link_ready)) {
+ drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
+ (hpd ? "on" : "off"));
+ return 0;
+ }
/* reset video pattern flag on disconnect */
if (!hpd) {
@@ -348,7 +362,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
dp->dp_display.connector_type, hpd);
- drm_bridge_hpd_notify(bridge, dp->dp_display.link_ready);
+ dp_display_send_hpd_event(&dp->dp_display);
return 0;
}
@@ -358,12 +372,6 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
int rc = 0;
struct edid *edid;
- dp->panel->max_dp_lanes = dp->parser->max_dp_lanes;
- dp->panel->max_dp_link_rate = dp->parser->max_dp_link_rate;
-
- drm_dbg_dp(dp->drm_dev, "max_lanes=%d max_link_rate=%d\n",
- dp->panel->max_dp_lanes, dp->panel->max_dp_link_rate);
-
rc = dp_panel_read_sink_caps(dp->panel, dp->dp_display.connector);
if (rc)
goto end;
@@ -383,8 +391,6 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
dp->audio_supported = drm_detect_monitor_audio(edid);
dp_panel_handle_sink_request(dp->panel);
- dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes;
-
/*
* set sink to normal operation mode -- D0
* before dpcd read
@@ -434,7 +440,7 @@ static void dp_display_host_init(struct dp_display_private *dp)
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
- dp_power_init(dp->power);
+ dp_ctrl_core_clk_enable(dp->ctrl);
dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
dp_aux_init(dp->aux);
dp->core_initialized = true;
@@ -448,7 +454,7 @@ static void dp_display_host_deinit(struct dp_display_private *dp)
dp_ctrl_reset_irq_ctrl(dp->ctrl, false);
dp_aux_deinit(dp->aux);
- dp_power_deinit(dp->power);
+ dp_ctrl_core_clk_disable(dp->ctrl);
dp->core_initialized = false;
}
@@ -714,16 +720,13 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
struct dp_panel_in panel_in = {
.dev = dev,
};
+ struct phy *phy;
- dp->parser = dp_parser_get(dp->dp_display.pdev);
- if (IS_ERR(dp->parser)) {
- rc = PTR_ERR(dp->parser);
- DRM_ERROR("failed to initialize parser, rc = %d\n", rc);
- dp->parser = NULL;
- goto error;
- }
+ phy = devm_phy_get(dev, "dp");
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
- dp->catalog = dp_catalog_get(dev, &dp->parser->io);
+ dp->catalog = dp_catalog_get(dev);
if (IS_ERR(dp->catalog)) {
rc = PTR_ERR(dp->catalog);
DRM_ERROR("failed to initialize catalog, rc = %d\n", rc);
@@ -731,15 +734,9 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error;
}
- dp->power = dp_power_get(dev, dp->parser);
- if (IS_ERR(dp->power)) {
- rc = PTR_ERR(dp->power);
- DRM_ERROR("failed to initialize power, rc = %d\n", rc);
- dp->power = NULL;
- goto error;
- }
-
- dp->aux = dp_aux_get(dev, dp->catalog, dp->dp_display.is_edp);
+ dp->aux = dp_aux_get(dev, dp->catalog,
+ phy,
+ dp->dp_display.is_edp);
if (IS_ERR(dp->aux)) {
rc = PTR_ERR(dp->aux);
DRM_ERROR("failed to initialize aux, rc = %d\n", rc);
@@ -768,7 +765,8 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
}
dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux,
- dp->power, dp->catalog, dp->parser);
+ dp->catalog,
+ phy);
if (IS_ERR(dp->ctrl)) {
rc = PTR_ERR(dp->ctrl);
DRM_ERROR("failed to initialize ctrl, rc = %d\n", rc);
@@ -784,10 +782,6 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error_ctrl;
}
- /* populate wide_bus_en to differernt layers */
- dp->ctrl->wide_bus_en = dp->wide_bus_en;
- dp->catalog->wide_bus_en = dp->wide_bus_en;
-
return rc;
error_ctrl:
@@ -808,6 +802,7 @@ static int dp_display_set_mode(struct msm_dp *dp_display,
drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode);
dp->panel->dp_mode.bpp = mode->bpp;
dp->panel->dp_mode.capabilities = mode->capabilities;
+ dp->panel->dp_mode.out_fmt_is_yuv_420 = mode->out_fmt_is_yuv_420;
dp_panel_init_panel_info(dp->panel);
return 0;
}
@@ -936,6 +931,10 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
dp_display = container_of(dp, struct dp_display_private, dp_display);
link_info = &dp_display->panel->link_info;
+ if (drm_mode_is_420_only(&dp->connector->display_info, mode) &&
+ dp_display->panel->vsc_sdp_supported)
+ mode_pclk_khz /= 2;
+
mode_bpp = dp->connector->display_info.bpc * num_components;
if (!mode_bpp)
mode_bpp = default_bpp;
@@ -1210,16 +1209,25 @@ static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pde
return NULL;
}
-static int dp_display_get_next_bridge(struct msm_dp *dp);
-
static int dp_display_probe_tail(struct device *dev)
{
struct msm_dp *dp = dev_get_drvdata(dev);
int ret;
- ret = dp_display_get_next_bridge(dp);
- if (ret)
- return ret;
+ /*
+ * External bridges are mandatory for eDP interfaces: one has to
+ * provide at least an eDP panel (which gets wrapped into panel-bridge).
+ *
+ * For DisplayPort interfaces external bridges are optional, so
+ * silently ignore an error if one is not present (-ENODEV).
+ */
+ dp->next_bridge = devm_drm_of_get_bridge(&dp->pdev->dev, dp->pdev->dev.of_node, 1, 0);
+ if (IS_ERR(dp->next_bridge)) {
+ ret = PTR_ERR(dp->next_bridge);
+ dp->next_bridge = NULL;
+ if (dp->is_edp || ret != -ENODEV)
+ return ret;
+ }
ret = component_add(dev, &dp_display_comp_ops);
if (ret)
@@ -1256,7 +1264,7 @@ static int dp_display_probe(struct platform_device *pdev)
dp->name = "drm_dp";
dp->id = desc->id;
dp->dp_display.connector_type = desc->connector_type;
- dp->wide_bus_en = desc->wide_bus_en;
+ dp->wide_bus_supported = desc->wide_bus_supported;
dp->dp_display.is_edp =
(dp->dp_display.connector_type == DRM_MODE_CONNECTOR_eDP);
@@ -1266,18 +1274,6 @@ static int dp_display_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- rc = dp->parser->parse(dp->parser);
- if (rc) {
- DRM_ERROR("device tree parsing failed\n");
- goto err;
- }
-
- rc = dp_power_client_init(dp->power);
- if (rc) {
- DRM_ERROR("Power client create failed\n");
- goto err;
- }
-
/* setup event q */
mutex_init(&dp->event_mutex);
init_waitqueue_head(&dp->event_q);
@@ -1396,13 +1392,34 @@ void __exit msm_dp_unregister(void)
platform_driver_unregister(&dp_display_driver);
}
+bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display,
+ const struct drm_display_mode *mode)
+{
+ struct dp_display_private *dp;
+ const struct drm_display_info *info;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+ info = &dp_display->connector->display_info;
+
+ return dp->panel->vsc_sdp_supported && drm_mode_is_420_only(info, mode);
+}
+
+bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display,
+ const struct drm_display_mode *mode)
+{
+ return msm_dp_is_yuv_420_enabled(dp_display, mode);
+}
+
bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
{
struct dp_display_private *dp;
dp = container_of(dp_display, struct dp_display_private, dp_display);
- return dp->wide_bus_en;
+ if (dp->dp_mode.out_fmt_is_yuv_420)
+ return false;
+
+ return dp->wide_bus_supported;
}
void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, bool is_edp)
@@ -1424,32 +1441,8 @@ void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, boo
}
}
-static int dp_display_get_next_bridge(struct msm_dp *dp)
-{
- int rc;
- struct dp_display_private *dp_priv;
-
- dp_priv = container_of(dp, struct dp_display_private, dp_display);
-
- /*
- * External bridges are mandatory for eDP interfaces: one has to
- * provide at least an eDP panel (which gets wrapped into panel-bridge).
- *
- * For DisplayPort interfaces external bridges are optional, so
- * silently ignore an error if one is not present (-ENODEV).
- */
- rc = devm_dp_parser_find_next_bridge(&dp->pdev->dev, dp_priv->parser);
- if (!dp->is_edp && rc == -ENODEV)
- return 0;
-
- if (!rc)
- dp->next_bridge = dp_priv->parser->next_bridge;
-
- return rc;
-}
-
int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
- struct drm_encoder *encoder)
+ struct drm_encoder *encoder, bool yuv_supported)
{
struct dp_display_private *dp_priv;
int ret;
@@ -1465,7 +1458,7 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
return ret;
}
- dp_display->connector = dp_drm_connector_init(dp_display, encoder);
+ dp_display->connector = dp_drm_connector_init(dp_display, encoder, yuv_supported);
if (IS_ERR(dp_display->connector)) {
ret = PTR_ERR(dp_display->connector);
DRM_DEV_ERROR(dev->dev,
@@ -1595,8 +1588,10 @@ void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = dp_bridge->dp_display;
struct dp_display_private *dp_display;
+ struct dp_panel *dp_panel;
dp_display = container_of(dp, struct dp_display_private, dp_display);
+ dp_panel = dp_display->panel;
memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode));
@@ -1615,6 +1610,16 @@ void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
dp_display->dp_mode.h_active_low =
!!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC);
+
+ dp_display->dp_mode.out_fmt_is_yuv_420 =
+ drm_mode_is_420_only(&dp->connector->display_info, adjusted_mode) &&
+ dp_panel->vsc_sdp_supported;
+
+ /* populate wide_bus_support to different layers */
+ dp_display->ctrl->wide_bus_en =
+ dp_display->dp_mode.out_fmt_is_yuv_420 ? false : dp_display->wide_bus_supported;
+ dp_display->catalog->wide_bus_en =
+ dp_display->dp_mode.out_fmt_is_yuv_420 ? false : dp_display->wide_bus_supported;
}
void dp_bridge_hpd_enable(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 102f3507d824..234dada88687 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -10,6 +10,8 @@
#include <sound/hdmi-codec.h>
#include "disp/msm_disp_snapshot.h"
+#define DP_MAX_PIXEL_CLK_KHZ 675000
+
struct msm_dp {
struct drm_device *drm_dev;
struct platform_device *pdev;
@@ -28,7 +30,6 @@ struct msm_dp {
bool wide_bus_en;
- u32 max_dp_lanes;
struct dp_audio *dp_audio;
bool psr_supported;
};
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 46e6889037e8..a819a4ff76a9 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -353,7 +353,8 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
}
/* connector initialization */
-struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder)
+struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder,
+ bool yuv_supported)
{
struct drm_connector *connector = NULL;
@@ -364,6 +365,9 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct dr
if (!dp_display->is_edp)
drm_connector_attach_dp_subconnector_property(connector);
+ if (yuv_supported)
+ connector->ycbcr_420_allowed = true;
+
drm_connector_attach_encoder(connector, encoder);
return connector;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index b3d684db2383..45e57ac25a4d 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -19,7 +19,8 @@ struct msm_dp_bridge {
#define to_dp_bridge(x) container_of((x), struct msm_dp_bridge, bridge)
-struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder);
+struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder,
+ bool yuv_supported);
int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 9dd4dd926530..83da170bc56b 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -112,29 +112,6 @@ static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
}
}
-/**
- * dp_test_bit_depth_to_bpc() - convert test bit depth to bpc
- * @tbd: test bit depth
- *
- * Returns the bits per comp (bpc) to be used corresponding to the
- * bit depth value. This function assumes that bit depth has
- * already been validated.
- */
-static inline u32 dp_link_bit_depth_to_bpc(u32 tbd)
-{
- switch (tbd) {
- case DP_TEST_BIT_DEPTH_6:
- return 6;
- case DP_TEST_BIT_DEPTH_8:
- return 8;
- case DP_TEST_BIT_DEPTH_10:
- return 10;
- case DP_TEST_BIT_DEPTH_UNKNOWN:
- default:
- return 0;
- }
-}
-
void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link);
u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp);
int dp_link_process_request(struct dp_link *dp_link);
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 127f6af995cd..8e7069453952 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -4,11 +4,16 @@
*/
#include "dp_panel.h"
+#include "dp_utils.h"
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
+#define DP_MAX_NUM_DP_LANES 4
+#define DP_LINK_RATE_HBR2 540000 /* kbytes */
+
struct dp_panel_private {
struct device *dev;
struct drm_device *drm_dev;
@@ -53,6 +58,7 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
if (rc)
return rc;
+ dp_panel->vsc_sdp_supported = drm_dp_vsc_sdp_supported(panel->aux, dpcd);
link_info = &dp_panel->link_info;
link_info->revision = dpcd[DP_DPCD_REV];
major = (link_info->revision >> 4) & 0x0f;
@@ -138,6 +144,9 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ drm_dbg_dp(panel->drm_dev, "max_lanes=%d max_link_rate=%d\n",
+ dp_panel->max_dp_lanes, dp_panel->max_dp_link_rate);
+
rc = dp_panel_read_dpcd(dp_panel);
if (rc) {
DRM_ERROR("read dpcd failed %d\n", rc);
@@ -280,6 +289,53 @@ void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode);
}
+static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel)
+{
+ struct dp_catalog *catalog;
+ struct dp_panel_private *panel;
+ struct dp_display_mode *dp_mode;
+ struct drm_dp_vsc_sdp vsc_sdp_data;
+ struct dp_sdp vsc_sdp;
+ ssize_t len;
+
+ if (!dp_panel) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ catalog = panel->catalog;
+ dp_mode = &dp_panel->dp_mode;
+
+ memset(&vsc_sdp_data, 0, sizeof(vsc_sdp_data));
+
+ /* VSC SDP header as per table 2-118 of DP 1.4 specification */
+ vsc_sdp_data.sdp_type = DP_SDP_VSC;
+ vsc_sdp_data.revision = 0x05;
+ vsc_sdp_data.length = 0x13;
+
+ /* VSC SDP Payload for DB16 */
+ vsc_sdp_data.pixelformat = DP_PIXELFORMAT_YUV420;
+ vsc_sdp_data.colorimetry = DP_COLORIMETRY_DEFAULT;
+
+ /* VSC SDP Payload for DB17 */
+ vsc_sdp_data.bpc = dp_mode->bpp / 3;
+ vsc_sdp_data.dynamic_range = DP_DYNAMIC_RANGE_CTA;
+
+ /* VSC SDP Payload for DB18 */
+ vsc_sdp_data.content_type = DP_CONTENT_TYPE_GRAPHICS;
+
+ len = drm_dp_vsc_sdp_pack(&vsc_sdp_data, &vsc_sdp);
+ if (len < 0) {
+ DRM_ERROR("unable to pack vsc sdp\n");
+ return len;
+ }
+
+ dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp);
+
+ return 0;
+}
+
void dp_panel_dump_regs(struct dp_panel *dp_panel)
{
struct dp_catalog *catalog;
@@ -343,6 +399,10 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
catalog->dp_active = data;
dp_catalog_panel_timing_cfg(catalog);
+
+ if (dp_panel->dp_mode.out_fmt_is_yuv_420)
+ dp_panel_setup_vsc_sdp_yuv_420(dp_panel);
+
panel->panel_on = true;
return 0;
@@ -386,10 +446,65 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel)
return 0;
}
+static u32 dp_panel_link_frequencies(struct device_node *of_node)
+{
+ struct device_node *endpoint;
+ u64 frequency = 0;
+ int cnt;
+
+ endpoint = of_graph_get_endpoint_by_regs(of_node, 1, 0); /* port@1 */
+ if (!endpoint)
+ return 0;
+
+ cnt = of_property_count_u64_elems(endpoint, "link-frequencies");
+
+ if (cnt > 0)
+ of_property_read_u64_index(endpoint, "link-frequencies",
+ cnt - 1, &frequency);
+ of_node_put(endpoint);
+
+ do_div(frequency,
+ 10 * /* from symbol rate to link rate */
+ 1000); /* kbytes */
+
+ return frequency;
+}
+
+static int dp_panel_parse_dt(struct dp_panel *dp_panel)
+{
+ struct dp_panel_private *panel;
+ struct device_node *of_node;
+ int cnt;
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ of_node = panel->dev->of_node;
+
+ /*
+ * data-lanes is the property of dp_out endpoint
+ */
+ cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES);
+ if (cnt < 0) {
+ /* legacy code, data-lanes is the property of mdss_dp node */
+ cnt = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES);
+ }
+
+ if (cnt > 0)
+ dp_panel->max_dp_lanes = cnt;
+ else
+ dp_panel->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */
+
+ dp_panel->max_dp_link_rate = dp_panel_link_frequencies(of_node);
+ if (!dp_panel->max_dp_link_rate)
+ dp_panel->max_dp_link_rate = DP_LINK_RATE_HBR2;
+
+ return 0;
+}
+
struct dp_panel *dp_panel_get(struct dp_panel_in *in)
{
struct dp_panel_private *panel;
struct dp_panel *dp_panel;
+ int ret;
if (!in->dev || !in->catalog || !in->aux || !in->link) {
DRM_ERROR("invalid input\n");
@@ -408,6 +523,10 @@ struct dp_panel *dp_panel_get(struct dp_panel_in *in)
dp_panel = &panel->dp_panel;
dp_panel->max_bw_code = DP_LINK_BW_8_1;
+ ret = dp_panel_parse_dt(dp_panel);
+ if (ret)
+ return ERR_PTR(ret);
+
return dp_panel;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index a0dfc579c5f9..e843f5062d1f 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -19,6 +19,7 @@ struct dp_display_mode {
u32 bpp;
u32 h_active_low;
u32 v_active_low;
+ bool out_fmt_is_yuv_420;
};
struct dp_panel_in {
@@ -45,6 +46,7 @@ struct dp_panel {
struct dp_display_mode dp_mode;
struct dp_panel_psr psr_cap;
bool video_test;
+ bool vsc_sdp_supported;
u32 vic;
u32 max_dp_lanes;
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
deleted file mode 100644
index 7032dcc8842b..000000000000
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ /dev/null
@@ -1,327 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/of_gpio.h>
-#include <linux/phy/phy.h>
-
-#include <drm/drm_of.h>
-#include <drm/drm_print.h>
-#include <drm/drm_bridge.h>
-
-#include "dp_parser.h"
-#include "dp_reg.h"
-
-#define DP_DEFAULT_AHB_OFFSET 0x0000
-#define DP_DEFAULT_AHB_SIZE 0x0200
-#define DP_DEFAULT_AUX_OFFSET 0x0200
-#define DP_DEFAULT_AUX_SIZE 0x0200
-#define DP_DEFAULT_LINK_OFFSET 0x0400
-#define DP_DEFAULT_LINK_SIZE 0x0C00
-#define DP_DEFAULT_P0_OFFSET 0x1000
-#define DP_DEFAULT_P0_SIZE 0x0400
-
-static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
-{
- struct resource *res;
- void __iomem *base;
-
- base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
- if (!IS_ERR(base))
- *len = resource_size(res);
-
- return base;
-}
-
-static int dp_parser_ctrl_res(struct dp_parser *parser)
-{
- struct platform_device *pdev = parser->pdev;
- struct dp_io *io = &parser->io;
- struct dss_io_data *dss = &io->dp_controller;
-
- dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len);
- if (IS_ERR(dss->ahb.base))
- return PTR_ERR(dss->ahb.base);
-
- dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len);
- if (IS_ERR(dss->aux.base)) {
- /*
- * The initial binding had a single reg, but in order to
- * support variation in the sub-region sizes this was split.
- * dp_ioremap() will fail with -EINVAL here if only a single
- * reg is specified, so fill in the sub-region offsets and
- * lengths based on this single region.
- */
- if (PTR_ERR(dss->aux.base) == -EINVAL) {
- if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
- DRM_ERROR("legacy memory region not large enough\n");
- return -EINVAL;
- }
-
- dss->ahb.len = DP_DEFAULT_AHB_SIZE;
- dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET;
- dss->aux.len = DP_DEFAULT_AUX_SIZE;
- dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET;
- dss->link.len = DP_DEFAULT_LINK_SIZE;
- dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET;
- dss->p0.len = DP_DEFAULT_P0_SIZE;
- } else {
- DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base);
- return PTR_ERR(dss->aux.base);
- }
- } else {
- dss->link.base = dp_ioremap(pdev, 2, &dss->link.len);
- if (IS_ERR(dss->link.base)) {
- DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
- return PTR_ERR(dss->link.base);
- }
-
- dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len);
- if (IS_ERR(dss->p0.base)) {
- DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
- return PTR_ERR(dss->p0.base);
- }
- }
-
- io->phy = devm_phy_get(&pdev->dev, "dp");
- if (IS_ERR(io->phy))
- return PTR_ERR(io->phy);
-
- return 0;
-}
-
-static u32 dp_parser_link_frequencies(struct device_node *of_node)
-{
- struct device_node *endpoint;
- u64 frequency = 0;
- int cnt;
-
- endpoint = of_graph_get_endpoint_by_regs(of_node, 1, 0); /* port@1 */
- if (!endpoint)
- return 0;
-
- cnt = of_property_count_u64_elems(endpoint, "link-frequencies");
-
- if (cnt > 0)
- of_property_read_u64_index(endpoint, "link-frequencies",
- cnt - 1, &frequency);
- of_node_put(endpoint);
-
- do_div(frequency,
- 10 * /* from symbol rate to link rate */
- 1000); /* kbytes */
-
- return frequency;
-}
-
-static int dp_parser_misc(struct dp_parser *parser)
-{
- struct device_node *of_node = parser->pdev->dev.of_node;
- int cnt;
-
- /*
- * data-lanes is the property of dp_out endpoint
- */
- cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES);
- if (cnt < 0) {
- /* legacy code, data-lanes is the property of mdss_dp node */
- cnt = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES);
- }
-
- if (cnt > 0)
- parser->max_dp_lanes = cnt;
- else
- parser->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */
-
- parser->max_dp_link_rate = dp_parser_link_frequencies(of_node);
- if (!parser->max_dp_link_rate)
- parser->max_dp_link_rate = DP_LINK_RATE_HBR2;
-
- return 0;
-}
-
-static inline bool dp_parser_check_prefix(const char *clk_prefix,
- const char *clk_name)
-{
- return !strncmp(clk_prefix, clk_name, strlen(clk_prefix));
-}
-
-static int dp_parser_init_clk_data(struct dp_parser *parser)
-{
- int num_clk, i, rc;
- int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
- const char *clk_name;
- struct device *dev = &parser->pdev->dev;
- struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
- struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
- struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
-
- num_clk = of_property_count_strings(dev->of_node, "clock-names");
- if (num_clk <= 0) {
- DRM_ERROR("no clocks are defined\n");
- return -EINVAL;
- }
-
- for (i = 0; i < num_clk; i++) {
- rc = of_property_read_string_index(dev->of_node,
- "clock-names", i, &clk_name);
- if (rc < 0)
- return rc;
-
- if (dp_parser_check_prefix("core", clk_name))
- core_clk_count++;
-
- if (dp_parser_check_prefix("ctrl", clk_name))
- ctrl_clk_count++;
-
- if (dp_parser_check_prefix("stream", clk_name))
- stream_clk_count++;
- }
-
- /* Initialize the CORE power module */
- if (core_clk_count == 0) {
- DRM_ERROR("no core clocks are defined\n");
- return -EINVAL;
- }
-
- core_power->num_clk = core_clk_count;
- core_power->clocks = devm_kcalloc(dev,
- core_power->num_clk, sizeof(struct clk_bulk_data),
- GFP_KERNEL);
- if (!core_power->clocks)
- return -ENOMEM;
-
- /* Initialize the CTRL power module */
- if (ctrl_clk_count == 0) {
- DRM_ERROR("no ctrl clocks are defined\n");
- return -EINVAL;
- }
-
- ctrl_power->num_clk = ctrl_clk_count;
- ctrl_power->clocks = devm_kcalloc(dev,
- ctrl_power->num_clk, sizeof(struct clk_bulk_data),
- GFP_KERNEL);
- if (!ctrl_power->clocks) {
- ctrl_power->num_clk = 0;
- return -ENOMEM;
- }
-
- /* Initialize the STREAM power module */
- if (stream_clk_count == 0) {
- DRM_ERROR("no stream (pixel) clocks are defined\n");
- return -EINVAL;
- }
-
- stream_power->num_clk = stream_clk_count;
- stream_power->clocks = devm_kcalloc(dev,
- stream_power->num_clk, sizeof(struct clk_bulk_data),
- GFP_KERNEL);
- if (!stream_power->clocks) {
- stream_power->num_clk = 0;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int dp_parser_clock(struct dp_parser *parser)
-{
- int rc = 0, i = 0;
- int num_clk = 0;
- int core_clk_index = 0, ctrl_clk_index = 0, stream_clk_index = 0;
- int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
- const char *clk_name;
- struct device *dev = &parser->pdev->dev;
- struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
- struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
- struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
-
- rc = dp_parser_init_clk_data(parser);
- if (rc) {
- DRM_ERROR("failed to initialize power data %d\n", rc);
- return -EINVAL;
- }
-
- core_clk_count = core_power->num_clk;
- ctrl_clk_count = ctrl_power->num_clk;
- stream_clk_count = stream_power->num_clk;
-
- num_clk = core_clk_count + ctrl_clk_count + stream_clk_count;
-
- for (i = 0; i < num_clk; i++) {
- rc = of_property_read_string_index(dev->of_node, "clock-names",
- i, &clk_name);
- if (rc) {
- DRM_ERROR("error reading clock-names %d\n", rc);
- return rc;
- }
- if (dp_parser_check_prefix("core", clk_name) &&
- core_clk_index < core_clk_count) {
- core_power->clocks[core_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
- core_clk_index++;
- } else if (dp_parser_check_prefix("stream", clk_name) &&
- stream_clk_index < stream_clk_count) {
- stream_power->clocks[stream_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
- stream_clk_index++;
- } else if (dp_parser_check_prefix("ctrl", clk_name) &&
- ctrl_clk_index < ctrl_clk_count) {
- ctrl_power->clocks[ctrl_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
- ctrl_clk_index++;
- }
- }
-
- return 0;
-}
-
-int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser)
-{
- struct platform_device *pdev = parser->pdev;
- struct drm_bridge *bridge;
-
- bridge = devm_drm_of_get_bridge(dev, pdev->dev.of_node, 1, 0);
- if (IS_ERR(bridge))
- return PTR_ERR(bridge);
-
- parser->next_bridge = bridge;
-
- return 0;
-}
-
-static int dp_parser_parse(struct dp_parser *parser)
-{
- int rc = 0;
-
- if (!parser) {
- DRM_ERROR("invalid input\n");
- return -EINVAL;
- }
-
- rc = dp_parser_ctrl_res(parser);
- if (rc)
- return rc;
-
- rc = dp_parser_misc(parser);
- if (rc)
- return rc;
-
- rc = dp_parser_clock(parser);
- if (rc)
- return rc;
-
- return 0;
-}
-
-struct dp_parser *dp_parser_get(struct platform_device *pdev)
-{
- struct dp_parser *parser;
-
- parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL);
- if (!parser)
- return ERR_PTR(-ENOMEM);
-
- parser->parse = dp_parser_parse;
- parser->pdev = pdev;
-
- return parser;
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
deleted file mode 100644
index 1f068626d445..000000000000
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_PARSER_H_
-#define _DP_PARSER_H_
-
-#include <linux/platform_device.h>
-#include <linux/phy/phy.h>
-#include <linux/phy/phy-dp.h>
-
-#include "msm_drv.h"
-
-#define DP_LABEL "MDSS DP DISPLAY"
-#define DP_MAX_PIXEL_CLK_KHZ 675000
-#define DP_MAX_NUM_DP_LANES 4
-#define DP_LINK_RATE_HBR2 540000 /* kbytes */
-
-enum dp_pm_type {
- DP_CORE_PM,
- DP_CTRL_PM,
- DP_STREAM_PM,
- DP_PHY_PM,
- DP_MAX_PM
-};
-
-struct dss_io_region {
- size_t len;
- void __iomem *base;
-};
-
-struct dss_io_data {
- struct dss_io_region ahb;
- struct dss_io_region aux;
- struct dss_io_region link;
- struct dss_io_region p0;
-};
-
-static inline const char *dp_parser_pm_name(enum dp_pm_type module)
-{
- switch (module) {
- case DP_CORE_PM: return "DP_CORE_PM";
- case DP_CTRL_PM: return "DP_CTRL_PM";
- case DP_STREAM_PM: return "DP_STREAM_PM";
- case DP_PHY_PM: return "DP_PHY_PM";
- default: return "???";
- }
-}
-
-/**
- * struct dp_display_data - display related device tree data.
- *
- * @ctrl_node: referece to controller device
- * @phy_node: reference to phy device
- * @is_active: is the controller currently active
- * @name: name of the display
- * @display_type: type of the display
- */
-struct dp_display_data {
- struct device_node *ctrl_node;
- struct device_node *phy_node;
- bool is_active;
- const char *name;
- const char *display_type;
-};
-
-/**
- * struct dp_ctrl_resource - controller's IO related data
- *
- * @dp_controller: Display Port controller mapped memory address
- * @phy_io: phy's mapped memory address
- */
-struct dp_io {
- struct dss_io_data dp_controller;
- struct phy *phy;
- union phy_configure_opts phy_opts;
-};
-
-/**
- * struct dp_pinctrl - DP's pin control
- *
- * @pin: pin-controller's instance
- * @state_active: active state pin control
- * @state_hpd_active: hpd active state pin control
- * @state_suspend: suspend state pin control
- */
-struct dp_pinctrl {
- struct pinctrl *pin;
- struct pinctrl_state *state_active;
- struct pinctrl_state *state_hpd_active;
- struct pinctrl_state *state_suspend;
-};
-
-/* Regulators for DP devices */
-struct dp_reg_entry {
- char name[32];
- int enable_load;
- int disable_load;
-};
-
-struct dss_module_power {
- unsigned int num_clk;
- struct clk_bulk_data *clocks;
-};
-
-/**
- * struct dp_parser - DP parser's data exposed to clients
- *
- * @pdev: platform data of the client
- * @mp: gpio, regulator and clock related data
- * @pinctrl: pin-control related data
- * @disp_data: controller's display related data
- * @parse: function to be called by client to parse device tree.
- */
-struct dp_parser {
- struct platform_device *pdev;
- struct dss_module_power mp[DP_MAX_PM];
- struct dp_pinctrl pinctrl;
- struct dp_io io;
- struct dp_display_data disp_data;
- u32 max_dp_lanes;
- u32 max_dp_link_rate;
- struct drm_bridge *next_bridge;
-
- int (*parse)(struct dp_parser *parser);
-};
-
-/**
- * dp_parser_get() - get the DP's device tree parser module
- *
- * @pdev: platform data of the client
- * return: pointer to dp_parser structure.
- *
- * This function provides client capability to parse the
- * device tree and populate the data structures. The data
- * related to clock, regulators, pin-control and other
- * can be parsed using this module.
- */
-struct dp_parser *dp_parser_get(struct platform_device *pdev);
-
-/**
- * devm_dp_parser_find_next_bridge() - find an additional bridge to DP
- *
- * @dev: device to tie bridge lifetime to
- * @parser: dp_parser data from client
- *
- * This function is used to find any additional bridge attached to
- * the DP controller. The eDP interface requires a panel bridge.
- *
- * Return: 0 if able to get the bridge, otherwise negative errno for failure.
- */
-int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser);
-
-#endif
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
deleted file mode 100644
index c4843dd69f47..000000000000
--- a/drivers/gpu/drm/msm/dp/dp_power.c
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/regulator/consumer.h>
-#include <linux/pm_opp.h>
-#include "dp_power.h"
-#include "msm_drv.h"
-
-struct dp_power_private {
- struct dp_parser *parser;
- struct device *dev;
- struct drm_device *drm_dev;
- struct clk *link_clk_src;
- struct clk *pixel_provider;
- struct clk *link_provider;
-
- struct dp_power dp_power;
-};
-
-static int dp_power_clk_init(struct dp_power_private *power)
-{
- int rc = 0;
- struct dss_module_power *core, *ctrl, *stream;
- struct device *dev = power->dev;
-
- core = &power->parser->mp[DP_CORE_PM];
- ctrl = &power->parser->mp[DP_CTRL_PM];
- stream = &power->parser->mp[DP_STREAM_PM];
-
- rc = devm_clk_bulk_get(dev, core->num_clk, core->clocks);
- if (rc)
- return rc;
-
- rc = devm_clk_bulk_get(dev, ctrl->num_clk, ctrl->clocks);
- if (rc)
- return -ENODEV;
-
- rc = devm_clk_bulk_get(dev, stream->num_clk, stream->clocks);
- if (rc)
- return -ENODEV;
-
- return 0;
-}
-
-int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type)
-{
- struct dp_power_private *power;
-
- power = container_of(dp_power, struct dp_power_private, dp_power);
-
- drm_dbg_dp(power->drm_dev,
- "core_clk_on=%d link_clk_on=%d stream_clk_on=%d\n",
- dp_power->core_clks_on, dp_power->link_clks_on, dp_power->stream_clks_on);
-
- if (pm_type == DP_CORE_PM)
- return dp_power->core_clks_on;
-
- if (pm_type == DP_CTRL_PM)
- return dp_power->link_clks_on;
-
- if (pm_type == DP_STREAM_PM)
- return dp_power->stream_clks_on;
-
- return 0;
-}
-
-int dp_power_clk_enable(struct dp_power *dp_power,
- enum dp_pm_type pm_type, bool enable)
-{
- int rc = 0;
- struct dp_power_private *power;
- struct dss_module_power *mp;
-
- power = container_of(dp_power, struct dp_power_private, dp_power);
-
- if (pm_type != DP_CORE_PM && pm_type != DP_CTRL_PM &&
- pm_type != DP_STREAM_PM) {
- DRM_ERROR("unsupported power module: %s\n",
- dp_parser_pm_name(pm_type));
- return -EINVAL;
- }
-
- if (enable) {
- if (pm_type == DP_CORE_PM && dp_power->core_clks_on) {
- drm_dbg_dp(power->drm_dev,
- "core clks already enabled\n");
- return 0;
- }
-
- if (pm_type == DP_CTRL_PM && dp_power->link_clks_on) {
- drm_dbg_dp(power->drm_dev,
- "links clks already enabled\n");
- return 0;
- }
-
- if (pm_type == DP_STREAM_PM && dp_power->stream_clks_on) {
- drm_dbg_dp(power->drm_dev,
- "pixel clks already enabled\n");
- return 0;
- }
-
- if ((pm_type == DP_CTRL_PM) && (!dp_power->core_clks_on)) {
- drm_dbg_dp(power->drm_dev,
- "Enable core clks before link clks\n");
- mp = &power->parser->mp[DP_CORE_PM];
-
- rc = clk_bulk_prepare_enable(mp->num_clk, mp->clocks);
- if (rc)
- return rc;
-
- dp_power->core_clks_on = true;
- }
- }
-
- mp = &power->parser->mp[pm_type];
- if (enable) {
- rc = clk_bulk_prepare_enable(mp->num_clk, mp->clocks);
- if (rc)
- return rc;
- } else {
- clk_bulk_disable_unprepare(mp->num_clk, mp->clocks);
- }
-
- if (pm_type == DP_CORE_PM)
- dp_power->core_clks_on = enable;
- else if (pm_type == DP_STREAM_PM)
- dp_power->stream_clks_on = enable;
- else
- dp_power->link_clks_on = enable;
-
- drm_dbg_dp(power->drm_dev, "%s clocks for %s\n",
- enable ? "enable" : "disable",
- dp_parser_pm_name(pm_type));
- drm_dbg_dp(power->drm_dev,
- "strem_clks:%s link_clks:%s core_clks:%s\n",
- dp_power->stream_clks_on ? "on" : "off",
- dp_power->link_clks_on ? "on" : "off",
- dp_power->core_clks_on ? "on" : "off");
-
- return 0;
-}
-
-int dp_power_client_init(struct dp_power *dp_power)
-{
- struct dp_power_private *power;
-
- power = container_of(dp_power, struct dp_power_private, dp_power);
-
- return dp_power_clk_init(power);
-}
-
-int dp_power_init(struct dp_power *dp_power)
-{
- return dp_power_clk_enable(dp_power, DP_CORE_PM, true);
-}
-
-int dp_power_deinit(struct dp_power *dp_power)
-{
- return dp_power_clk_enable(dp_power, DP_CORE_PM, false);
-}
-
-struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser)
-{
- struct dp_power_private *power;
- struct dp_power *dp_power;
-
- power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
- if (!power)
- return ERR_PTR(-ENOMEM);
-
- power->parser = parser;
- power->dev = dev;
-
- dp_power = &power->dp_power;
-
- return dp_power;
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h
deleted file mode 100644
index 55ada51edb57..000000000000
--- a/drivers/gpu/drm/msm/dp/dp_power.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DP_POWER_H_
-#define _DP_POWER_H_
-
-#include "dp_parser.h"
-
-/**
- * sruct dp_power - DisplayPort's power related data
- *
- * @init: initializes the regulators/core clocks/GPIOs/pinctrl
- * @deinit: turns off the regulators/core clocks/GPIOs/pinctrl
- * @clk_enable: enable/disable the DP clocks
- * @set_pixel_clk_parent: set the parent of DP pixel clock
- */
-struct dp_power {
- bool core_clks_on;
- bool link_clks_on;
- bool stream_clks_on;
-};
-
-/**
- * dp_power_init() - enable power supplies for display controller
- *
- * @power: instance of power module
- * return: 0 if success or error if failure.
- *
- * This API will turn on the regulators and configures gpio's
- * aux/hpd.
- */
-int dp_power_init(struct dp_power *power);
-
-/**
- * dp_power_deinit() - turn off regulators and gpios.
- *
- * @power: instance of power module
- * return: 0 for success
- *
- * This API turns off power and regulators.
- */
-int dp_power_deinit(struct dp_power *power);
-
-/**
- * dp_power_clk_status() - display controller clocks status
- *
- * @power: instance of power module
- * @pm_type: type of pm, core/ctrl/phy
- * return: status of power clocks
- *
- * This API return status of DP clocks
- */
-
-int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type);
-
-/**
- * dp_power_clk_enable() - enable display controller clocks
- *
- * @power: instance of power module
- * @pm_type: type of pm, core/ctrl/phy
- * @enable: enables or disables
- * return: pointer to allocated power module data
- *
- * This API will call setrate and enable for DP clocks
- */
-
-int dp_power_clk_enable(struct dp_power *power, enum dp_pm_type pm_type,
- bool enable);
-
-/**
- * dp_power_client_init() - initialize clock and regulator modules
- *
- * @power: instance of power module
- * return: 0 for success, error for failure.
- *
- * This API will configure the DisplayPort's clocks and regulator
- * modules.
- */
-int dp_power_client_init(struct dp_power *power);
-
-/**
- * dp_power_get() - configure and get the DisplayPort power module data
- *
- * @parser: instance of parser module
- * return: pointer to allocated power module data
- *
- * This API will configure the DisplayPort's power module and provides
- * methods to be called by the client to configure the power related
- * modules.
- */
-struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser);
-
-#endif /* _DP_POWER_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index 78785ed4b40c..3835c7f5cb98 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -6,6 +6,9 @@
#ifndef _DP_REG_H_
#define _DP_REG_H_
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
/* DP_TX Registers */
#define REG_DP_HW_VERSION (0x00000000)
@@ -102,6 +105,9 @@
#define DP_MAINLINK_CTRL_ENABLE (0x00000001)
#define DP_MAINLINK_CTRL_RESET (0x00000002)
#define DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER (0x00000010)
+#define DP_MAINLINK_CTRL_FLUSH_MODE_MASK GENMASK(24, 23)
+#define DP_MAINLINK_FLUSH_MODE_UPDATE_SDP FIELD_PREP(DP_MAINLINK_CTRL_FLUSH_MODE_MASK, 1)
+#define DP_MAINLINK_FLUSH_MODE_SDE_PERIPH_UPDATE FIELD_PREP(DP_MAINLINK_CTRL_FLUSH_MODE_MASK, 3)
#define DP_MAINLINK_FB_BOUNDARY_SEL (0x02000000)
#define REG_DP_STATE_CTRL (0x00000004)
@@ -142,6 +148,7 @@
#define DP_MISC0_SYNCHRONOUS_CLK (0x00000001)
#define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001)
#define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005)
+#define DP_MISC1_VSC_SDP (0x00004000)
#define DP_MISC0_COLORIMERY_CFG_LEGACY_RGB (0)
#define DP_MISC0_COLORIMERY_CFG_CEA_RGB (0x04)
@@ -204,9 +211,11 @@
#define MMSS_DP_AUDIO_CTRL_RESET (0x00000214)
#define MMSS_DP_SDP_CFG (0x00000228)
+#define GEN0_SDP_EN (0x00020000)
#define MMSS_DP_SDP_CFG2 (0x0000022C)
#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000230)
#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000234)
+#define GENERIC0_SDPSIZE_VALID (0x00010000)
#define MMSS_DP_AUDIO_STREAM_0 (0x00000240)
#define MMSS_DP_AUDIO_STREAM_1 (0x00000244)
diff --git a/drivers/gpu/drm/msm/dp/dp_utils.c b/drivers/gpu/drm/msm/dp/dp_utils.c
new file mode 100644
index 000000000000..da9207caf72d
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_utils.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+
+#include "dp_utils.h"
+
+#define DP_SDP_HEADER_SIZE 8
+
+u8 dp_utils_get_g0_value(u8 data)
+{
+ u8 c[4];
+ u8 g[4];
+ u8 ret_data = 0;
+ u8 i;
+
+ for (i = 0; i < 4; i++)
+ c[i] = (data >> i) & 0x01;
+
+ g[0] = c[3];
+ g[1] = c[0] ^ c[3];
+ g[2] = c[1];
+ g[3] = c[2];
+
+ for (i = 0; i < 4; i++)
+ ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+ return ret_data;
+}
+
+u8 dp_utils_get_g1_value(u8 data)
+{
+ u8 c[4];
+ u8 g[4];
+ u8 ret_data = 0;
+ u8 i;
+
+ for (i = 0; i < 4; i++)
+ c[i] = (data >> i) & 0x01;
+
+ g[0] = c[0] ^ c[3];
+ g[1] = c[0] ^ c[1] ^ c[3];
+ g[2] = c[1] ^ c[2];
+ g[3] = c[2] ^ c[3];
+
+ for (i = 0; i < 4; i++)
+ ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+ return ret_data;
+}
+
+u8 dp_utils_calculate_parity(u32 data)
+{
+ u8 x0 = 0;
+ u8 x1 = 0;
+ u8 ci = 0;
+ u8 iData = 0;
+ u8 i = 0;
+ u8 parity_byte;
+ u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2;
+
+ for (i = 0; i < num_byte; i++) {
+ iData = (data >> i * 4) & 0xF;
+
+ ci = iData ^ x1;
+ x1 = x0 ^ dp_utils_get_g1_value(ci);
+ x0 = dp_utils_get_g0_value(ci);
+ }
+
+ parity_byte = x1 | (x0 << 4);
+
+ return parity_byte;
+}
+
+ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff)
+{
+ size_t length;
+
+ length = sizeof(header_buff);
+ if (length < DP_SDP_HEADER_SIZE)
+ return -ENOSPC;
+
+ header_buff[0] = FIELD_PREP(HEADER_0_MASK, sdp_header->HB0) |
+ FIELD_PREP(PARITY_0_MASK, dp_utils_calculate_parity(sdp_header->HB0)) |
+ FIELD_PREP(HEADER_1_MASK, sdp_header->HB1) |
+ FIELD_PREP(PARITY_1_MASK, dp_utils_calculate_parity(sdp_header->HB1));
+
+ header_buff[1] = FIELD_PREP(HEADER_2_MASK, sdp_header->HB2) |
+ FIELD_PREP(PARITY_2_MASK, dp_utils_calculate_parity(sdp_header->HB2)) |
+ FIELD_PREP(HEADER_3_MASK, sdp_header->HB3) |
+ FIELD_PREP(PARITY_3_MASK, dp_utils_calculate_parity(sdp_header->HB3));
+
+ return length;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_utils.h b/drivers/gpu/drm/msm/dp/dp_utils.h
new file mode 100644
index 000000000000..7c056d9798dc
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_utils.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_UTILS_H_
+#define _DP_UTILS_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <drm/display/drm_dp_helper.h>
+
+#define HEADER_BYTE_0_BIT 0
+#define PARITY_BYTE_0_BIT 8
+#define HEADER_BYTE_1_BIT 16
+#define PARITY_BYTE_1_BIT 24
+#define HEADER_BYTE_2_BIT 0
+#define PARITY_BYTE_2_BIT 8
+#define HEADER_BYTE_3_BIT 16
+#define PARITY_BYTE_3_BIT 24
+
+#define HEADER_0_MASK GENMASK(7, 0)
+#define PARITY_0_MASK GENMASK(15, 8)
+#define HEADER_1_MASK GENMASK(23, 16)
+#define PARITY_1_MASK GENMASK(31, 24)
+#define HEADER_2_MASK GENMASK(7, 0)
+#define PARITY_2_MASK GENMASK(15, 8)
+#define HEADER_3_MASK GENMASK(23, 16)
+#define PARITY_3_MASK GENMASK(31, 24)
+
+u8 dp_utils_get_g0_value(u8 data);
+u8 dp_utils_get_g1_value(u8 data);
+u8 dp_utils_calculate_parity(u32 data);
+ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff);
+
+#endif /* _DP_UTILS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index c6bd7bf15605..37c4c07005fe 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -216,6 +216,7 @@ void __exit msm_dsi_unregister(void)
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder)
{
+ struct drm_bridge *bridge;
int ret;
msm_dsi->dev = dev;
@@ -235,15 +236,14 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
return 0;
}
- msm_dsi->encoder = encoder;
-
- ret = msm_dsi_manager_bridge_init(msm_dsi);
- if (ret) {
+ bridge = msm_dsi_manager_bridge_init(msm_dsi, encoder);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
return ret;
}
- ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id);
+ ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id, bridge);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 28379b1af63f..2ad9a842c678 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -35,41 +35,25 @@ struct msm_dsi {
struct drm_device *dev;
struct platform_device *pdev;
- /* internal dsi bridge attached to MDP interface */
- struct drm_bridge *bridge;
-
struct mipi_dsi_host *host;
struct msm_dsi_phy *phy;
- /*
- * external_bridge connected to dsi bridge output
- */
- struct drm_bridge *external_bridge;
-
struct device *phy_dev;
bool phy_enabled;
- /* the encoder we are hooked to (outside of dsi block) */
- struct drm_encoder *encoder;
-
int id;
};
/* dsi manager */
-int msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi);
-int msm_dsi_manager_ext_bridge_init(u8 id);
+struct drm_bridge *msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi,
+ struct drm_encoder *encoder);
+int msm_dsi_manager_ext_bridge_init(u8 id, struct drm_bridge *int_bridge);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
void msm_dsi_manager_tpg_enable(void);
-/* msm dsi */
-static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
-{
- return msm_dsi->external_bridge;
-}
-
/* dsi host */
struct msm_dsi_host;
int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index deeecdfd6c4e..9d86a6aca6f2 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -183,16 +183,6 @@ struct msm_dsi_host {
int irq;
};
-static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
-{
- switch (fmt) {
- case MIPI_DSI_FMT_RGB565: return 16;
- case MIPI_DSI_FMT_RGB666_PACKED: return 18;
- case MIPI_DSI_FMT_RGB666:
- case MIPI_DSI_FMT_RGB888:
- default: return 24;
- }
-}
static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
{
@@ -529,6 +519,25 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
clk_disable_unprepare(msm_host->byte_clk);
}
+/**
+ * dsi_adjust_pclk_for_compression() - Adjust the pclk rate for compression case
+ * @mode: The selected mode for the DSI output
+ * @dsc: DRM DSC configuration for this DSI output
+ *
+ * Adjust the pclk rate by calculating a new hdisplay proportional to
+ * the compression ratio such that:
+ * new_hdisplay = old_hdisplay * compressed_bpp / uncompressed_bpp
+ *
+ * Porches do not need to be adjusted:
+ * - For VIDEO mode they are not compressed by DSC and are passed as is.
+ * - For CMD mode there are no actual porches. Instead these fields
+ * currently represent the overhead to the image data transfer. As such, they
+ * are calculated for the final mode parameters (after the compression) and
+ * are not to be adjusted too.
+ *
+ * FIXME: Reconsider this if/when CMD mode handling is rewritten to use
+ * transfer time and data overhead as a starting point of the calculations.
+ */
static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mode *mode,
const struct drm_dsc_config *dsc)
{
@@ -567,7 +576,7 @@ unsigned long dsi_byte_clk_get_rate(struct mipi_dsi_host *host, bool is_bonded_d
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
u8 lanes = msm_host->lanes;
- u32 bpp = dsi_get_bpp(msm_host->format);
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(msm_host->format);
unsigned long pclk_rate = dsi_get_pclk_rate(mode, msm_host->dsc, is_bonded_dsi);
unsigned long pclk_bpp;
@@ -610,7 +619,7 @@ int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
- u32 bpp = dsi_get_bpp(msm_host->format);
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(msm_host->format);
unsigned int esc_mhz, esc_div;
unsigned long byte_mhz;
@@ -951,8 +960,18 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
if (ret)
return;
- /* Divide the display by 3 but keep back/font porch and
- * pulse width same
+ /*
+ * DPU sends 3 bytes per pclk cycle to DSI. If widebus is
+ * enabled, bus width is extended to 6 bytes.
+ *
+ * Calculate the number of pclks needed to transmit one line of
+ * the compressed data.
+
+ * The back/font porch and pulse width are kept intact. For
+ * VIDEO mode they represent timing parameters rather than
+ * actual data transfer, see the documentation for
+ * dsi_adjust_pclk_for_compression(). For CMD mode they are
+ * unused anyway.
*/
h_total -= hdisplay;
if (wide_bus_enabled && !(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
@@ -993,7 +1012,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
/* image data and 1 byte write_memory_start cmd */
if (!msm_host->dsc)
- wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+ wc = hdisplay * mipi_dsi_pixel_format_to_bpp(msm_host->format) / 8 + 1;
else
/*
* When DSC is enabled, WC = slice_chunk_size * slice_per_pkt + 1.
@@ -1413,7 +1432,7 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
{
int len, ret;
int bllp_len = msm_host->mode->hdisplay *
- dsi_get_bpp(msm_host->format) / 8;
+ mipi_dsi_pixel_format_to_bpp(msm_host->format) / 8;
len = dsi_cmd_dma_add(msm_host, msg);
if (len < 0) {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 896f369fdd53..af2a287cb3bd 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -198,36 +198,6 @@ static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
return dsi_bridge->id;
}
-static void msm_dsi_manager_set_split_display(u8 id)
-{
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
- struct msm_drm_private *priv = msm_dsi->dev->dev_private;
- struct msm_kms *kms = priv->kms;
- struct msm_dsi *master_dsi, *slave_dsi;
-
- if (IS_BONDED_DSI() && !IS_MASTER_DSI_LINK(id)) {
- master_dsi = other_dsi;
- slave_dsi = msm_dsi;
- } else {
- master_dsi = msm_dsi;
- slave_dsi = other_dsi;
- }
-
- if (!msm_dsi->external_bridge || !IS_BONDED_DSI())
- return;
-
- /*
- * Set split display info to kms once bonded DSI panel is connected to
- * both hosts.
- */
- if (other_dsi && other_dsi->external_bridge && kms->funcs->set_split_display) {
- kms->funcs->set_split_display(kms, master_dsi->encoder,
- slave_dsi->encoder,
- msm_dsi_is_cmd_mode(msm_dsi));
- }
-}
-
static int dsi_mgr_bridge_power_on(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
@@ -305,8 +275,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
int ret;
DBG("id=%d", id);
- if (!msm_dsi_device_connected(msm_dsi))
- return;
/* Do nothing with the host if it is slave-DSI in case of bonded DSI */
if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
@@ -364,9 +332,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
DBG("id=%d", id);
- if (!msm_dsi_device_connected(msm_dsi))
- return;
-
/*
* Do nothing with the host if it is slave-DSI in case of bonded DSI.
* It is safe to call dsi_mgr_phy_disable() here because a single PHY
@@ -466,55 +431,48 @@ static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
};
/* initialize bridge */
-int msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi)
+struct drm_bridge *msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi,
+ struct drm_encoder *encoder)
{
- struct drm_bridge *bridge = NULL;
+ struct drm_bridge *bridge;
struct dsi_bridge *dsi_bridge;
- struct drm_encoder *encoder;
int ret;
dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
sizeof(*dsi_bridge), GFP_KERNEL);
if (!dsi_bridge)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
dsi_bridge->id = msm_dsi->id;
- encoder = msm_dsi->encoder;
-
bridge = &dsi_bridge->base;
bridge->funcs = &dsi_mgr_bridge_funcs;
ret = devm_drm_bridge_add(msm_dsi->dev->dev, bridge);
if (ret)
- return ret;
+ return ERR_PTR(ret);
ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
- return ret;
-
- msm_dsi->bridge = bridge;
+ return ERR_PTR(ret);
- return 0;
+ return bridge;
}
-int msm_dsi_manager_ext_bridge_init(u8 id)
+int msm_dsi_manager_ext_bridge_init(u8 id, struct drm_bridge *int_bridge)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_device *dev = msm_dsi->dev;
struct drm_encoder *encoder;
- struct drm_bridge *int_bridge, *ext_bridge;
+ struct drm_bridge *ext_bridge;
int ret;
- int_bridge = msm_dsi->bridge;
ext_bridge = devm_drm_of_get_bridge(&msm_dsi->pdev->dev,
msm_dsi->pdev->dev.of_node, 1, 0);
if (IS_ERR(ext_bridge))
return PTR_ERR(ext_bridge);
- msm_dsi->external_bridge = ext_bridge;
-
- encoder = msm_dsi->encoder;
+ encoder = int_bridge->encoder;
/*
* Try first to create the bridge without it creating its own
@@ -546,9 +504,6 @@ int msm_dsi_manager_ext_bridge_init(u8 id)
return ret;
}
- /* The pipeline is ready, ping encoders if necessary */
- msm_dsi_manager_set_split_display(id);
-
return 0;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index f5e01471b0b0..4a5b5112227f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -236,24 +236,33 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
msm_hdmi_audio_update(hdmi);
}
-static struct edid *msm_hdmi_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
uint32_t hdmi_ctrl;
hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
- edid = drm_get_edid(connector, hdmi->i2c);
+ drm_edid = drm_edid_read_ddc(connector, hdmi->i2c);
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
- hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
+ if (drm_edid) {
+ /*
+ * FIXME: This should use connector->display_info.is_hdmi from a
+ * path that has read the EDID and called
+ * drm_edid_connector_update().
+ */
+ const struct edid *edid = drm_edid_raw(drm_edid);
- return edid;
+ hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
+ }
+
+ return drm_edid;
}
static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
@@ -290,12 +299,12 @@ static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge
}
static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
- .pre_enable = msm_hdmi_bridge_pre_enable,
- .post_disable = msm_hdmi_bridge_post_disable,
- .mode_set = msm_hdmi_bridge_mode_set,
- .mode_valid = msm_hdmi_bridge_mode_valid,
- .get_edid = msm_hdmi_bridge_get_edid,
- .detect = msm_hdmi_bridge_detect,
+ .pre_enable = msm_hdmi_bridge_pre_enable,
+ .post_disable = msm_hdmi_bridge_post_disable,
+ .mode_set = msm_hdmi_bridge_mode_set,
+ .mode_valid = msm_hdmi_bridge_mode_valid,
+ .edid_read = msm_hdmi_bridge_edid_read,
+ .detect = msm_hdmi_bridge_detect,
};
static void
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 50b65ffc24b1..97790faffd23 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -969,6 +969,39 @@ static int add_components_mdp(struct device *master_dev,
return 0;
}
+#if !IS_REACHABLE(CONFIG_DRM_MSM_MDP5) || !IS_REACHABLE(CONFIG_DRM_MSM_DPU)
+bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver)
+{
+ /* If just a single driver is enabled, use it no matter what */
+ return true;
+}
+#else
+
+static bool prefer_mdp5 = true;
+MODULE_PARM_DESC(prefer_mdp5, "Select whether MDP5 or DPU driver should be preferred");
+module_param(prefer_mdp5, bool, 0444);
+
+/* list all platforms supported by both mdp5 and dpu drivers */
+static const char *const msm_mdp5_dpu_migration[] = {
+ "qcom,sdm630-mdp5",
+ "qcom,sdm660-mdp5",
+ NULL,
+};
+
+bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver)
+{
+ /* If it is not an MDP5 device, do not try MDP5 driver */
+ if (!of_device_is_compatible(dev->of_node, "qcom,mdp5"))
+ return dpu_driver;
+
+ /* If it is not in the migration list, use MDP5 */
+ if (!of_device_compatible_match(dev->of_node, msm_mdp5_dpu_migration))
+ return !dpu_driver;
+
+ return prefer_mdp5 ? !dpu_driver : dpu_driver;
+}
+#endif
+
/*
* We don't know what's the best binding to link the gpu with the drm device.
* Fow now, we just hunt for all the possible gpus that we support, and add them
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 16a7cbc0b7dd..65f213660452 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -75,16 +75,6 @@ enum msm_dsi_controller {
#define MAX_H_TILES_PER_DISPLAY 2
/**
- * enum msm_event_wait - type of HW events to wait for
- * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
- * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
- */
-enum msm_event_wait {
- MSM_ENC_COMMIT_DONE = 0,
- MSM_ENC_TX_COMPLETE,
-};
-
-/**
* struct msm_display_topology - defines a display topology pipeline
* @num_lm: number of layer mixers used
* @num_intf: number of interfaces the panel is mounted on
@@ -385,9 +375,12 @@ static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_
int __init msm_dp_register(void);
void __exit msm_dp_unregister(void);
int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
- struct drm_encoder *encoder);
+ struct drm_encoder *encoder, bool yuv_supported);
void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display);
-
+bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display,
+ const struct drm_display_mode *mode);
+bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display,
+ const struct drm_display_mode *mode);
bool msm_dp_wide_bus_available(const struct msm_dp *dp_display);
#else
@@ -400,7 +393,8 @@ static inline void __exit msm_dp_unregister(void)
}
static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
struct drm_device *dev,
- struct drm_encoder *encoder)
+ struct drm_encoder *encoder,
+ bool yuv_supported)
{
return -EINVAL;
}
@@ -409,6 +403,18 @@ static inline void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm
{
}
+static inline bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display,
+ const struct drm_display_mode *mode)
+{
+ return false;
+}
+
+static inline bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display,
+ const struct drm_display_mode *mode)
+{
+ return false;
+}
+
static inline bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
{
return false;
@@ -476,6 +482,9 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name);
void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
phys_addr_t *size);
void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name);
+void __iomem *msm_ioremap_mdss(struct platform_device *mdss_pdev,
+ struct platform_device *dev,
+ const char *name);
struct icc_path *msm_icc_get(struct device *dev, const char *name);
@@ -560,5 +569,6 @@ int msm_drv_probe(struct device *dev,
struct msm_kms *kms);
void msm_kms_shutdown(struct platform_device *pdev);
+bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver);
#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_io_utils.c b/drivers/gpu/drm/msm/msm_io_utils.c
index 59d2788c4510..afedd61c3e28 100644
--- a/drivers/gpu/drm/msm/msm_io_utils.c
+++ b/drivers/gpu/drm/msm/msm_io_utils.c
@@ -50,6 +50,19 @@ struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
return clk;
}
+void __iomem *msm_ioremap_mdss(struct platform_device *mdss_pdev,
+ struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(mdss_pdev, IORESOURCE_MEM, name);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+
+ return devm_ioremap_resource(&pdev->dev, res);
+}
+
static void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
bool quiet, phys_addr_t *psize)
{
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 44aa435d68ce..0641f6111b93 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -105,10 +105,6 @@ struct msm_kms_funcs {
/* misc: */
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
- int (*set_split_display)(struct msm_kms *kms,
- struct drm_encoder *encoder,
- struct drm_encoder *slave_encoder,
- bool is_cmd_mode);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 35423d10aafa..fab6ad4e5107 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018, The Linux Foundation
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interconnect.h>
@@ -213,6 +214,49 @@ static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
}
}
+#define MDSS_HW_MAJ_MIN GENMASK(31, 16)
+
+#define MDSS_HW_MSM8996 0x1007
+#define MDSS_HW_MSM8937 0x100e
+#define MDSS_HW_MSM8953 0x1010
+#define MDSS_HW_MSM8998 0x3000
+#define MDSS_HW_SDM660 0x3002
+#define MDSS_HW_SDM630 0x3003
+
+/*
+ * MDP5 platforms use generic qcom,mdp5 compat string, so we have to generate this data
+ */
+static const struct msm_mdss_data *msm_mdss_generate_mdp5_mdss_data(struct msm_mdss *mdss)
+{
+ struct msm_mdss_data *data;
+ u32 hw_rev;
+
+ data = devm_kzalloc(mdss->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ hw_rev = readl_relaxed(mdss->mmio + HW_REV);
+ hw_rev = FIELD_GET(MDSS_HW_MAJ_MIN, hw_rev);
+
+ if (hw_rev == MDSS_HW_MSM8996 ||
+ hw_rev == MDSS_HW_MSM8937 ||
+ hw_rev == MDSS_HW_MSM8953 ||
+ hw_rev == MDSS_HW_MSM8998 ||
+ hw_rev == MDSS_HW_SDM660 ||
+ hw_rev == MDSS_HW_SDM630) {
+ data->ubwc_dec_version = UBWC_1_0;
+ data->ubwc_enc_version = UBWC_1_0;
+ }
+
+ if (hw_rev == MDSS_HW_MSM8996 ||
+ hw_rev == MDSS_HW_MSM8998)
+ data->highest_bank_bit = 2;
+ else
+ data->highest_bank_bit = 1;
+
+ return data;
+}
+
const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev)
{
struct msm_mdss *mdss;
@@ -222,6 +266,13 @@ const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev)
mdss = dev_get_drvdata(dev);
+ /*
+ * We could not do it at the probe time, since hw revision register was
+ * not readable. Fill data structure now for the MDP5 platforms.
+ */
+ if (!mdss->mdss_data && mdss->is_mdp5)
+ mdss->mdss_data = msm_mdss_generate_mdp5_mdss_data(mdss);
+
return mdss->mdss_data;
}
@@ -636,6 +687,18 @@ static const struct msm_mdss_data sm8550_data = {
.macrotile_mode = 1,
.reg_bus_bw = 57000,
};
+
+static const struct msm_mdss_data x1e80100_data = {
+ .ubwc_enc_version = UBWC_4_0,
+ .ubwc_dec_version = UBWC_4_3,
+ .ubwc_swizzle = 6,
+ .ubwc_static = 1,
+ /* TODO: highest_bank_bit = 2 for LP_DDR4 */
+ .highest_bank_bit = 3,
+ .macrotile_mode = 1,
+ /* TODO: Add reg_bus_bw with real value */
+};
+
static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
@@ -656,6 +719,7 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,sm8450-mdss", .data = &sm8350_data },
{ .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
{ .compatible = "qcom,sm8650-mdss", .data = &sm8550_data},
+ { .compatible = "qcom,x1e80100-mdss", .data = &x1e80100_data},
{}
};
MODULE_DEVICE_TABLE(of, mdss_dt_match);
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index 18de2f17e249..ea10bf81582e 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -167,7 +167,11 @@ static int lcdif_load(struct drm_device *drm)
return ret;
/* Modeset init */
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret) {
+ dev_err(drm->dev, "Failed to initialize mode config\n");
+ return ret;
+ }
ret = lcdif_kms_init(lcdif);
if (ret < 0) {
@@ -227,7 +231,6 @@ static void lcdif_unload(struct drm_device *drm)
drm_crtc_vblank_off(&lcdif->crtc);
drm_kms_helper_poll_fini(drm);
- drm_mode_config_cleanup(drm);
pm_runtime_put_sync(drm->dev);
pm_runtime_disable(drm->dev);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index b483ef48216a..cb5ce4e81fc7 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -249,7 +249,11 @@ static int mxsfb_load(struct drm_device *drm,
pm_runtime_enable(drm->dev);
/* Modeset init */
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret) {
+ dev_err(drm->dev, "Failed to initialize mode config\n");
+ goto err_vblank;
+ }
ret = mxsfb_kms_init(mxsfb);
if (ret < 0) {
@@ -312,7 +316,6 @@ err_vblank:
static void mxsfb_unload(struct drm_device *drm)
{
drm_kms_helper_poll_fini(drm);
- drm_mode_config_cleanup(drm);
pm_runtime_get_sync(drm->dev);
mxsfb_irq_uninstall(drm);
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 1e6aaf95ff7c..ceef470c9fbf 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -100,3 +100,11 @@ config DRM_NOUVEAU_SVM
help
Say Y here if you want to enable experimental support for
Shared Virtual Memory (SVM).
+
+config DRM_NOUVEAU_GSP_DEFAULT
+ bool "Use GSP firmware for Turing/Ampere (needs firmware installed)"
+ depends on DRM_NOUVEAU
+ default n
+ help
+ Say Y here if you want to use the GSP codepaths by default on
+ Turing and Ampere GPUs.
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index a34917b048f9..4310ad71870b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -449,7 +449,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
regp->Attribute[NV_CIO_AR_CSEL_INDEX] = 0x00;
}
-/**
+/*
* Sets up registers for the given mode/adjusted_mode pair.
*
* The clocks, CRTCs and outputs attached to this CRTC must be off.
@@ -625,7 +625,7 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
return ret;
}
-/**
+/*
* Sets up registers for the given mode/adjusted_mode pair.
*
* The clocks, CRTCs and outputs attached to this CRTC must be off.
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 8d37a694b772..0c3d88ad0b0e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -28,6 +28,7 @@
#include "wndw.h"
#include "handles.h"
+#include <linux/backlight.h>
#include <linux/dma-mapping.h>
#include <linux/hdmi.h>
#include <linux/component.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 5f490fbf1877..83355dbc15ee 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -32,6 +32,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include "nouveau_connector.h"
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index 0d9fc741a719..932c9fd0b2d8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -11,6 +11,7 @@ struct nvkm_client {
u32 debug;
struct rb_root objroot;
+ spinlock_t obj_lock;
void *data;
int (*event)(u64 token, void *argv, u32 argc);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index d1bb8151a1df..80f74ee0fc78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -199,6 +199,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
+ struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
struct nvkm_gr *gr = nvxx_gr(device);
struct drm_nouveau_getparam *getparam = data;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -263,6 +264,14 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
break;
}
+ case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
+ getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
+ break;
+ case NOUVEAU_GETPARAM_VRAM_USED: {
+ struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
+ getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
+ break;
+ }
default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 00cc7d1abaa3..56dcd25db1ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -405,27 +405,6 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
}
static void
-set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain)
-{
- *n = 0;
-
- if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
- pl[*n].mem_type = TTM_PL_VRAM;
- pl[*n].flags = 0;
- (*n)++;
- }
- if (domain & NOUVEAU_GEM_DOMAIN_GART) {
- pl[*n].mem_type = TTM_PL_TT;
- pl[*n].flags = 0;
- (*n)++;
- }
- if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
- pl[*n].mem_type = TTM_PL_SYSTEM;
- pl[(*n)++].flags = 0;
- }
-}
-
-static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
@@ -452,10 +431,6 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
nvbo->placements[i].fpfn = fpfn;
nvbo->placements[i].lpfn = lpfn;
}
- for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
- nvbo->busy_placements[i].fpfn = fpfn;
- nvbo->busy_placements[i].lpfn = lpfn;
- }
}
}
@@ -463,15 +438,32 @@ void
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
uint32_t busy)
{
- struct ttm_placement *pl = &nvbo->placement;
+ unsigned int *n = &nvbo->placement.num_placement;
+ struct ttm_place *pl = nvbo->placements;
- pl->placement = nvbo->placements;
- set_placement_list(nvbo->placements, &pl->num_placement, domain);
+ domain |= busy;
- pl->busy_placement = nvbo->busy_placements;
- set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
- domain | busy);
+ *n = 0;
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
+ pl[*n].mem_type = TTM_PL_VRAM;
+ pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_VRAM ?
+ TTM_PL_FLAG_FALLBACK : 0;
+ (*n)++;
+ }
+ if (domain & NOUVEAU_GEM_DOMAIN_GART) {
+ pl[*n].mem_type = TTM_PL_TT;
+ pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_GART ?
+ TTM_PL_FLAG_FALLBACK : 0;
+ (*n)++;
+ }
+ if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
+ pl[*n].mem_type = TTM_PL_SYSTEM;
+ pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_CPU ?
+ TTM_PL_FLAG_FALLBACK : 0;
+ (*n)++;
+ }
+ nvbo->placement.placement = nvbo->placements;
set_placement_range(nvbo, domain);
}
@@ -1314,11 +1306,6 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placements[i].lpfn = mappable;
}
- for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
- nvbo->busy_placements[i].fpfn = 0;
- nvbo->busy_placements[i].lpfn = mappable;
- }
-
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 70c551921a9e..e9dfab6a8156 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -15,7 +15,6 @@ struct nouveau_bo {
struct ttm_placement placement;
u32 valid_domains;
struct ttm_place placements[3];
- struct ttm_place busy_placements[3];
bool force_coherent;
struct ttm_bo_kmap_obj kmap;
struct list_head head;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index a2df4918340c..0608cabed058 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -35,7 +35,6 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_util.h>
@@ -44,6 +43,7 @@
struct nvkm_i2c_port;
struct dcb_output;
+struct edid;
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
struct nouveau_backlight {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 49c2bcbef129..5a887d67dc0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -764,7 +764,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
return -ENOMEM;
if (unlikely(nouveau_cli_uvmm(cli)))
- return -ENOSYS;
+ return nouveau_abi16_put(abi16, -ENOSYS);
list_for_each_entry(temp, &abi16->channels, head) {
if (temp->chan->chid == req->channel) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index adf01ca9e035..2af3615c5205 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file mga_ioc32.c
*
* 32-bit ioctl compatibility routines for the MGA DRM.
@@ -38,7 +38,7 @@
#include "nouveau_ioctl.h"
-/**
+/*
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 5e4565c5011a..b4da82ddbb6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -112,7 +112,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_nouveau_svm_bind *args = data;
- unsigned target, cmd, priority;
+ unsigned target, cmd;
unsigned long addr, end;
struct mm_struct *mm;
@@ -136,9 +136,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
return -EINVAL;
}
- priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
- priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
-
/* FIXME support CPU target ie all target value < GPU_VRAM */
target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
target &= NOUVEAU_SVM_BIND_TARGET_MASK;
@@ -926,15 +923,14 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
unsigned long addr, u64 *pfns, unsigned long npages)
{
struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
- int ret;
args->p.addr = addr;
args->p.size = npages << PAGE_SHIFT;
mutex_lock(&svmm->mutex);
- ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args,
- struct_size(args, p.phys, npages), NULL);
+ nvif_object_ioctl(&svmm->vmm->vmm.object, args,
+ struct_size(args, p.phys, npages), NULL);
mutex_unlock(&svmm->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/nvif/outp.c b/drivers/gpu/drm/nouveau/nvif/outp.c
index 5d3190c05250..6daeb7f0b09b 100644
--- a/drivers/gpu/drm/nouveau/nvif/outp.c
+++ b/drivers/gpu/drm/nouveau/nvif/outp.c
@@ -452,13 +452,12 @@ nvif_outp_edid_get(struct nvif_outp *outp, u8 **pedid)
if (ret)
goto done;
- *pedid = kmalloc(args->size, GFP_KERNEL);
+ *pedid = kmemdup(args->data, args->size, GFP_KERNEL);
if (!*pedid) {
ret = -ENOMEM;
goto done;
}
- memcpy(*pedid, args->data, args->size);
ret = args->size;
done:
kfree(args);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index ebdeb8eb9e77..c55662937ab2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -180,6 +180,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg, const char *dbg,
client->device = device;
client->debug = nvkm_dbgopt(dbg, "CLIENT");
client->objroot = RB_ROOT;
+ spin_lock_init(&client->obj_lock);
client->event = event;
INIT_LIST_HEAD(&client->umem);
spin_lock_init(&client->lock);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index 7c554c14e884..aea3ba72027a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -30,8 +30,10 @@ nvkm_object_search(struct nvkm_client *client, u64 handle,
const struct nvkm_object_func *func)
{
struct nvkm_object *object;
+ unsigned long flags;
if (handle) {
+ spin_lock_irqsave(&client->obj_lock, flags);
struct rb_node *node = client->objroot.rb_node;
while (node) {
object = rb_entry(node, typeof(*object), node);
@@ -40,9 +42,12 @@ nvkm_object_search(struct nvkm_client *client, u64 handle,
else
if (handle > object->object)
node = node->rb_right;
- else
+ else {
+ spin_unlock_irqrestore(&client->obj_lock, flags);
goto done;
+ }
}
+ spin_unlock_irqrestore(&client->obj_lock, flags);
return ERR_PTR(-ENOENT);
} else {
object = &client->object;
@@ -57,30 +62,39 @@ done:
void
nvkm_object_remove(struct nvkm_object *object)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&object->client->obj_lock, flags);
if (!RB_EMPTY_NODE(&object->node))
rb_erase(&object->node, &object->client->objroot);
+ spin_unlock_irqrestore(&object->client->obj_lock, flags);
}
bool
nvkm_object_insert(struct nvkm_object *object)
{
- struct rb_node **ptr = &object->client->objroot.rb_node;
+ struct rb_node **ptr;
struct rb_node *parent = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&object->client->obj_lock, flags);
+ ptr = &object->client->objroot.rb_node;
while (*ptr) {
struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node);
parent = *ptr;
- if (object->object < this->object)
+ if (object->object < this->object) {
ptr = &parent->rb_left;
- else
- if (object->object > this->object)
+ } else if (object->object > this->object) {
ptr = &parent->rb_right;
- else
+ } else {
+ spin_unlock_irqrestore(&object->client->obj_lock, flags);
return false;
+ }
}
rb_link_node(&object->node, parent, ptr);
rb_insert_color(&object->node, &object->client->objroot);
+ spin_unlock_irqrestore(&object->client->obj_lock, flags);
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index c494a1ff2d57..986e8d547c94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1040,7 +1040,7 @@ gf100_gr_zbc_init(struct gf100_gr *gr)
}
}
-/**
+/*
* Wait until GR goes idle. GR is considered idle if it is disabled by the
* MC (0x200) register, or GR is not busy and a context switch is not in
* progress.
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
index f36a359d4531..bd104a030243 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
@@ -218,7 +218,7 @@ nvkm_acr_lsfw_load_sig_image_desc_v2(struct nvkm_subdev *subdev,
const struct firmware *hsbl;
const struct nvfw_ls_hsbl_bin_hdr *hdr;
const struct nvfw_ls_hsbl_hdr *hshdr;
- u32 loc, sig, cnt, *meta;
+ u32 sig, cnt, *meta;
ret = nvkm_firmware_load_name(subdev, path, "hs_bl_sig", ver, &hsbl);
if (ret)
@@ -227,7 +227,6 @@ nvkm_acr_lsfw_load_sig_image_desc_v2(struct nvkm_subdev *subdev,
hdr = nvfw_ls_hsbl_bin_hdr(subdev, hsbl->data);
hshdr = nvfw_ls_hsbl_hdr(subdev, hsbl->data + hdr->header_offset);
meta = (u32 *)(hsbl->data + hshdr->meta_data_offset);
- loc = *(u32 *)(hsbl->data + hshdr->patch_loc);
sig = *(u32 *)(hsbl->data + hshdr->patch_sig);
cnt = *(u32 *)(hsbl->data + hshdr->num_sig);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
index 4135690326f4..3a30bea30e36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
@@ -168,12 +168,11 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
rm->flush = r535_bar_flush;
ret = gf100_bar_new_(rm, device, type, inst, &bar);
- *pbar = bar;
if (ret) {
- if (!bar)
- kfree(rm);
+ kfree(rm);
return ret;
}
+ *pbar = bar;
bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
if (!bar->flushBAR2PhysMode)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index 142079403864..b54f044c4483 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -575,7 +575,7 @@ init_tmds_reg(struct nvbios_init *init, u8 tmds)
* init opcode handlers
*****************************************************************************/
-/**
+/*
* init_reserved - stub for various unknown/unused single-byte opcodes
*
*/
@@ -602,7 +602,7 @@ init_reserved(struct nvbios_init *init)
init->offset += length;
}
-/**
+/*
* INIT_DONE - opcode 0x71
*
*/
@@ -613,7 +613,7 @@ init_done(struct nvbios_init *init)
init->offset = 0x0000;
}
-/**
+/*
* INIT_IO_RESTRICT_PROG - opcode 0x32
*
*/
@@ -650,7 +650,7 @@ init_io_restrict_prog(struct nvbios_init *init)
trace("}]\n");
}
-/**
+/*
* INIT_REPEAT - opcode 0x33
*
*/
@@ -676,7 +676,7 @@ init_repeat(struct nvbios_init *init)
init->repeat = repeat;
}
-/**
+/*
* INIT_IO_RESTRICT_PLL - opcode 0x34
*
*/
@@ -716,7 +716,7 @@ init_io_restrict_pll(struct nvbios_init *init)
trace("}]\n");
}
-/**
+/*
* INIT_END_REPEAT - opcode 0x36
*
*/
@@ -732,7 +732,7 @@ init_end_repeat(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_COPY - opcode 0x37
*
*/
@@ -759,7 +759,7 @@ init_copy(struct nvbios_init *init)
init_wrvgai(init, port, index, data);
}
-/**
+/*
* INIT_NOT - opcode 0x38
*
*/
@@ -771,7 +771,7 @@ init_not(struct nvbios_init *init)
init_exec_inv(init);
}
-/**
+/*
* INIT_IO_FLAG_CONDITION - opcode 0x39
*
*/
@@ -788,7 +788,7 @@ init_io_flag_condition(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_GENERIC_CONDITION - opcode 0x3a
*
*/
@@ -840,7 +840,7 @@ init_generic_condition(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_IO_MASK_OR - opcode 0x3b
*
*/
@@ -859,7 +859,7 @@ init_io_mask_or(struct nvbios_init *init)
init_wrvgai(init, 0x03d4, index, data &= ~(1 << or));
}
-/**
+/*
* INIT_IO_OR - opcode 0x3c
*
*/
@@ -878,7 +878,7 @@ init_io_or(struct nvbios_init *init)
init_wrvgai(init, 0x03d4, index, data | (1 << or));
}
-/**
+/*
* INIT_ANDN_REG - opcode 0x47
*
*/
@@ -895,7 +895,7 @@ init_andn_reg(struct nvbios_init *init)
init_mask(init, reg, mask, 0);
}
-/**
+/*
* INIT_OR_REG - opcode 0x48
*
*/
@@ -912,7 +912,7 @@ init_or_reg(struct nvbios_init *init)
init_mask(init, reg, 0, mask);
}
-/**
+/*
* INIT_INDEX_ADDRESS_LATCHED - opcode 0x49
*
*/
@@ -942,7 +942,7 @@ init_idx_addr_latched(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_IO_RESTRICT_PLL2 - opcode 0x4a
*
*/
@@ -977,7 +977,7 @@ init_io_restrict_pll2(struct nvbios_init *init)
trace("}]\n");
}
-/**
+/*
* INIT_PLL2 - opcode 0x4b
*
*/
@@ -994,7 +994,7 @@ init_pll2(struct nvbios_init *init)
init_prog_pll(init, reg, freq);
}
-/**
+/*
* INIT_I2C_BYTE - opcode 0x4c
*
*/
@@ -1025,7 +1025,7 @@ init_i2c_byte(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_ZM_I2C_BYTE - opcode 0x4d
*
*/
@@ -1051,7 +1051,7 @@ init_zm_i2c_byte(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_ZM_I2C - opcode 0x4e
*
*/
@@ -1085,7 +1085,7 @@ init_zm_i2c(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_TMDS - opcode 0x4f
*
*/
@@ -1111,7 +1111,7 @@ init_tmds(struct nvbios_init *init)
init_wr32(init, reg + 0, addr);
}
-/**
+/*
* INIT_ZM_TMDS_GROUP - opcode 0x50
*
*/
@@ -1138,7 +1138,7 @@ init_zm_tmds_group(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_CR_INDEX_ADDRESS_LATCHED - opcode 0x51
*
*/
@@ -1168,7 +1168,7 @@ init_cr_idx_adr_latch(struct nvbios_init *init)
init_wrvgai(init, 0x03d4, addr0, save0);
}
-/**
+/*
* INIT_CR - opcode 0x52
*
*/
@@ -1188,7 +1188,7 @@ init_cr(struct nvbios_init *init)
init_wrvgai(init, 0x03d4, addr, val | data);
}
-/**
+/*
* INIT_ZM_CR - opcode 0x53
*
*/
@@ -1205,7 +1205,7 @@ init_zm_cr(struct nvbios_init *init)
init_wrvgai(init, 0x03d4, addr, data);
}
-/**
+/*
* INIT_ZM_CR_GROUP - opcode 0x54
*
*/
@@ -1229,7 +1229,7 @@ init_zm_cr_group(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_CONDITION_TIME - opcode 0x56
*
*/
@@ -1256,7 +1256,7 @@ init_condition_time(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_LTIME - opcode 0x57
*
*/
@@ -1273,7 +1273,7 @@ init_ltime(struct nvbios_init *init)
mdelay(msec);
}
-/**
+/*
* INIT_ZM_REG_SEQUENCE - opcode 0x58
*
*/
@@ -1298,7 +1298,7 @@ init_zm_reg_sequence(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_PLL_INDIRECT - opcode 0x59
*
*/
@@ -1317,7 +1317,7 @@ init_pll_indirect(struct nvbios_init *init)
init_prog_pll(init, reg, freq);
}
-/**
+/*
* INIT_ZM_REG_INDIRECT - opcode 0x5a
*
*/
@@ -1336,7 +1336,7 @@ init_zm_reg_indirect(struct nvbios_init *init)
init_wr32(init, addr, data);
}
-/**
+/*
* INIT_SUB_DIRECT - opcode 0x5b
*
*/
@@ -1362,7 +1362,7 @@ init_sub_direct(struct nvbios_init *init)
init->offset += 3;
}
-/**
+/*
* INIT_JUMP - opcode 0x5c
*
*/
@@ -1380,7 +1380,7 @@ init_jump(struct nvbios_init *init)
init->offset += 3;
}
-/**
+/*
* INIT_I2C_IF - opcode 0x5e
*
*/
@@ -1407,7 +1407,7 @@ init_i2c_if(struct nvbios_init *init)
init_exec_force(init, false);
}
-/**
+/*
* INIT_COPY_NV_REG - opcode 0x5f
*
*/
@@ -1433,7 +1433,7 @@ init_copy_nv_reg(struct nvbios_init *init)
init_mask(init, dreg, ~dmask, (data & smask) ^ sxor);
}
-/**
+/*
* INIT_ZM_INDEX_IO - opcode 0x62
*
*/
@@ -1451,7 +1451,7 @@ init_zm_index_io(struct nvbios_init *init)
init_wrvgai(init, port, index, data);
}
-/**
+/*
* INIT_COMPUTE_MEM - opcode 0x63
*
*/
@@ -1469,7 +1469,7 @@ init_compute_mem(struct nvbios_init *init)
init_exec_force(init, false);
}
-/**
+/*
* INIT_RESET - opcode 0x65
*
*/
@@ -1496,7 +1496,7 @@ init_reset(struct nvbios_init *init)
init_exec_force(init, false);
}
-/**
+/*
* INIT_CONFIGURE_MEM - opcode 0x66
*
*/
@@ -1555,7 +1555,7 @@ init_configure_mem(struct nvbios_init *init)
init_exec_force(init, false);
}
-/**
+/*
* INIT_CONFIGURE_CLK - opcode 0x67
*
*/
@@ -1589,7 +1589,7 @@ init_configure_clk(struct nvbios_init *init)
init_exec_force(init, false);
}
-/**
+/*
* INIT_CONFIGURE_PREINIT - opcode 0x68
*
*/
@@ -1615,7 +1615,7 @@ init_configure_preinit(struct nvbios_init *init)
init_exec_force(init, false);
}
-/**
+/*
* INIT_IO - opcode 0x69
*
*/
@@ -1655,7 +1655,7 @@ init_io(struct nvbios_init *init)
init_wrport(init, port, data | value);
}
-/**
+/*
* INIT_SUB - opcode 0x6b
*
*/
@@ -1682,7 +1682,7 @@ init_sub(struct nvbios_init *init)
init->offset += 2;
}
-/**
+/*
* INIT_RAM_CONDITION - opcode 0x6d
*
*/
@@ -1701,7 +1701,7 @@ init_ram_condition(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_NV_REG - opcode 0x6e
*
*/
@@ -1719,7 +1719,7 @@ init_nv_reg(struct nvbios_init *init)
init_mask(init, reg, ~mask, data);
}
-/**
+/*
* INIT_MACRO - opcode 0x6f
*
*/
@@ -1743,7 +1743,7 @@ init_macro(struct nvbios_init *init)
init->offset += 2;
}
-/**
+/*
* INIT_RESUME - opcode 0x72
*
*/
@@ -1755,7 +1755,7 @@ init_resume(struct nvbios_init *init)
init_exec_set(init, true);
}
-/**
+/*
* INIT_STRAP_CONDITION - opcode 0x73
*
*/
@@ -1773,7 +1773,7 @@ init_strap_condition(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_TIME - opcode 0x74
*
*/
@@ -1794,7 +1794,7 @@ init_time(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_CONDITION - opcode 0x75
*
*/
@@ -1811,7 +1811,7 @@ init_condition(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_IO_CONDITION - opcode 0x76
*
*/
@@ -1828,7 +1828,7 @@ init_io_condition(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_ZM_REG16 - opcode 0x77
*
*/
@@ -1845,7 +1845,7 @@ init_zm_reg16(struct nvbios_init *init)
init_wr32(init, addr, data);
}
-/**
+/*
* INIT_INDEX_IO - opcode 0x78
*
*/
@@ -1867,7 +1867,7 @@ init_index_io(struct nvbios_init *init)
init_wrvgai(init, port, index, data | value);
}
-/**
+/*
* INIT_PLL - opcode 0x79
*
*/
@@ -1884,7 +1884,7 @@ init_pll(struct nvbios_init *init)
init_prog_pll(init, reg, freq);
}
-/**
+/*
* INIT_ZM_REG - opcode 0x7a
*
*/
@@ -1904,7 +1904,7 @@ init_zm_reg(struct nvbios_init *init)
init_wr32(init, addr, data);
}
-/**
+/*
* INIT_RAM_RESTRICT_PLL - opcde 0x87
*
*/
@@ -1934,7 +1934,7 @@ init_ram_restrict_pll(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_RESET_BEGUN - opcode 0x8c
*
*/
@@ -1945,7 +1945,7 @@ init_reset_begun(struct nvbios_init *init)
init->offset += 1;
}
-/**
+/*
* INIT_RESET_END - opcode 0x8d
*
*/
@@ -1956,7 +1956,7 @@ init_reset_end(struct nvbios_init *init)
init->offset += 1;
}
-/**
+/*
* INIT_GPIO - opcode 0x8e
*
*/
@@ -1972,7 +1972,7 @@ init_gpio(struct nvbios_init *init)
nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
}
-/**
+/*
* INIT_RAM_RESTRICT_ZM_GROUP - opcode 0x8f
*
*/
@@ -2010,7 +2010,7 @@ init_ram_restrict_zm_reg_group(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_COPY_ZM_REG - opcode 0x90
*
*/
@@ -2027,7 +2027,7 @@ init_copy_zm_reg(struct nvbios_init *init)
init_wr32(init, dreg, init_rd32(init, sreg));
}
-/**
+/*
* INIT_ZM_REG_GROUP - opcode 0x91
*
*/
@@ -2049,7 +2049,7 @@ init_zm_reg_group(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_XLAT - opcode 0x96
*
*/
@@ -2077,7 +2077,7 @@ init_xlat(struct nvbios_init *init)
init_mask(init, daddr, ~dmask, data);
}
-/**
+/*
* INIT_ZM_MASK_ADD - opcode 0x97
*
*/
@@ -2098,7 +2098,7 @@ init_zm_mask_add(struct nvbios_init *init)
init_wr32(init, addr, data);
}
-/**
+/*
* INIT_AUXCH - opcode 0x98
*
*/
@@ -2122,7 +2122,7 @@ init_auxch(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_AUXCH - opcode 0x99
*
*/
@@ -2144,7 +2144,7 @@ init_zm_auxch(struct nvbios_init *init)
}
}
-/**
+/*
* INIT_I2C_LONG_IF - opcode 0x9a
*
*/
@@ -2183,7 +2183,7 @@ init_i2c_long_if(struct nvbios_init *init)
init_exec_set(init, false);
}
-/**
+/*
* INIT_GPIO_NE - opcode 0xa9
*
*/
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 19188683c8fc..8c2bf1c16f2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
return (void *)fw;
}
+static void
+shadow_fw_release(void *fw)
+{
+ release_firmware(fw);
+}
+
static const struct nvbios_source
shadow_fw = {
.name = "firmware",
.init = shadow_fw_init,
- .fini = (void(*)(void *))release_firmware,
+ .fini = shadow_fw_release,
.read = shadow_fw_read,
.rw = false,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index a41735ab6068..a73a5b589790 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -1054,8 +1054,6 @@ r535_gsp_postinit(struct nvkm_gsp *gsp)
/* Release the DMA buffers that were needed only for boot and init */
nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw);
nvkm_gsp_mem_dtor(gsp, &gsp->libos);
- nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
- nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
return ret;
}
@@ -2163,6 +2161,8 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
r535_gsp_dtor_fws(gsp);
+ nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
+ nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem);
nvkm_gsp_mem_dtor(gsp, &gsp->loginit);
nvkm_gsp_mem_dtor(gsp, &gsp->logintr);
@@ -2312,8 +2312,12 @@ r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
{
struct nvkm_subdev *subdev = &gsp->subdev;
int ret;
+ bool enable_gsp = fwif->enable;
- if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable))
+#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
+ enable_gsp = true;
+#endif
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
return -EINVAL;
if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index 8c2faa964511..ccac88da8864 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -45,7 +45,7 @@ static const struct cvb_coef gk20a_cvb_coef[] = {
/* 852 */ { 1608418, -21643, -269, 0, 763, -48},
};
-/**
+/*
* cvb_mv = ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0)
*/
static inline int
@@ -58,7 +58,7 @@ gk20a_volt_get_cvb_voltage(int speedo, int s_scale, const struct cvb_coef *coef)
return mv;
}
-/**
+/*
* cvb_t_mv =
* ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0) +
* ((c3 * speedo / s_scale + c4 + c5 * T / t_scale) * T / t_scale)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index a26b77d99d52..9b8747d83ee8 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -436,11 +436,11 @@ static void hdmi4_bridge_hpd_notify(struct drm_bridge *bridge,
hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
}
-static struct edid *hdmi4_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *hdmi4_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
- struct edid *edid = NULL;
+ const struct drm_edid *drm_edid = NULL;
unsigned int cec_addr;
bool need_enable;
int r;
@@ -461,13 +461,21 @@ static struct edid *hdmi4_bridge_get_edid(struct drm_bridge *bridge,
if (r)
goto done;
- edid = drm_do_get_edid(connector, hdmi4_core_ddc_read, &hdmi->core);
+ drm_edid = drm_edid_read_custom(connector, hdmi4_core_ddc_read, &hdmi->core);
done:
hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
- if (edid && edid->extensions) {
+ if (drm_edid) {
+ /*
+ * FIXME: The CEC physical address should be set using
+ * hdmi4_cec_set_phys_addr(&hdmi->core,
+ * connector->display_info.source_physical_address) from a path
+ * that has read the EDID and called
+ * drm_edid_connector_update().
+ */
+ const struct edid *edid = drm_edid_raw(drm_edid);
unsigned int len = (edid->extensions + 1) * EDID_LENGTH;
cec_addr = cec_get_edid_phys_addr((u8 *)edid, len, NULL);
@@ -480,7 +488,7 @@ done:
if (need_enable)
hdmi4_core_disable(&hdmi->core);
- return edid;
+ return drm_edid;
}
static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
@@ -492,7 +500,7 @@ static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
.atomic_enable = hdmi4_bridge_enable,
.atomic_disable = hdmi4_bridge_disable,
.hpd_notify = hdmi4_bridge_hpd_notify,
- .get_edid = hdmi4_bridge_get_edid,
+ .edid_read = hdmi4_bridge_edid_read,
};
static void hdmi4_bridge_init(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index e6611c683857..c7ae2235ae99 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -425,11 +425,11 @@ static void hdmi5_bridge_disable(struct drm_bridge *bridge,
mutex_unlock(&hdmi->lock);
}
-static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *hdmi5_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
- struct edid *edid;
+ const struct drm_edid *drm_edid;
bool need_enable;
int idlemode;
int r;
@@ -452,7 +452,7 @@ static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge,
hdmi5_core_ddc_init(&hdmi->core);
- edid = drm_do_get_edid(connector, hdmi5_core_ddc_read, &hdmi->core);
+ drm_edid = drm_edid_read_custom(connector, hdmi5_core_ddc_read, &hdmi->core);
hdmi5_core_ddc_uninit(&hdmi->core);
@@ -464,7 +464,7 @@ static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge,
if (need_enable)
hdmi_core_disable(hdmi);
- return (struct edid *)edid;
+ return drm_edid;
}
static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
@@ -475,7 +475,7 @@ static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_enable = hdmi5_bridge_enable,
.atomic_disable = hdmi5_bridge_disable,
- .get_edid = hdmi5_bridge_get_edid,
+ .edid_read = hdmi5_bridge_edid_read,
};
static void hdmi5_bridge_init(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 8f3783742208..d037b3b8b999 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -67,61 +67,25 @@ config DRM_PANEL_BOE_HIMAX8279D
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
-config DRM_PANEL_BOE_TV101WUM_NL6
- tristate "BOE TV101WUM and AUO KD101N80 45NA 1200x1920 panel"
+config DRM_PANEL_BOE_TH101MB31UIG002_28A
+ tristate "Boe TH101MB31UIG002-28A panel"
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
help
- Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
- 45NA WUXGA PANEL DSI Video Mode panel
+ Say Y here if you want to enable support for Boe
+ TH101MB31UIG002-28A TFT-LCD modules. The panel has a 800x1280
+ resolution and uses 24 bit RGB per pixel. It provides a MIPI DSI
+ interface to the host and has a built-in LED backlight.
-config DRM_PANEL_DSI_CM
- tristate "Generic DSI command mode panels"
+config DRM_PANEL_BOE_TV101WUM_NL6
+ tristate "BOE TV101WUM and AUO KD101N80 45NA 1200x1920 panel"
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
help
- DRM panel driver for DSI command mode panels with support for
- embedded and external backlights.
-
-config DRM_PANEL_LVDS
- tristate "Generic LVDS panel driver"
- depends on OF
- depends on BACKLIGHT_CLASS_DEVICE
- select VIDEOMODE_HELPERS
- help
- This driver supports LVDS panels that don't require device-specific
- handling of power supplies or control signals. It implements automatic
- backlight handling if the panel is attached to a backlight controller.
-
-config DRM_PANEL_SIMPLE
- tristate "support for simple panels (other than eDP ones)"
- depends on OF
- depends on BACKLIGHT_CLASS_DEVICE
- depends on PM
- select VIDEOMODE_HELPERS
- help
- DRM panel driver for dumb non-eDP panels that need at most a regulator
- and a GPIO to be powered up. Optionally a backlight can be attached so
- that it can be automatically turned off when the panel goes into a
- low power state.
-
-config DRM_PANEL_EDP
- tristate "support for simple Embedded DisplayPort panels"
- depends on OF
- depends on BACKLIGHT_CLASS_DEVICE
- depends on PM
- select VIDEOMODE_HELPERS
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
- select DRM_KMS_HELPER
- help
- DRM panel driver for dumb eDP panels that need at most a regulator and
- a GPIO to be powered up. Optionally a backlight can be attached so
- that it can be automatically turned off when the panel goes into a
- low power state.
+ Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
+ 45NA WUXGA PANEL DSI Video Mode panel
config DRM_PANEL_EBBG_FT8719
tristate "EBBG FT8719 panel driver"
@@ -162,6 +126,35 @@ config DRM_PANEL_FEIYANG_FY07024DI26A30D
Say Y if you want to enable support for panels based on the
Feiyang FY07024DI26A30-D MIPI-DSI interface.
+config DRM_PANEL_DSI_CM
+ tristate "Generic DSI command mode panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ DRM panel driver for DSI command mode panels with support for
+ embedded and external backlights.
+
+config DRM_PANEL_LVDS
+ tristate "Generic LVDS panel driver"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ This driver supports LVDS panels that don't require device-specific
+ handling of power supplies or control signals. It implements automatic
+ backlight handling if the panel is attached to a backlight controller.
+
+config DRM_PANEL_HIMAX_HX83112A
+ tristate "Himax HX83112A-based DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_KMS_HELPER
+ help
+ Say Y here if you want to enable support for Himax HX83112A-based
+ display panels, such as the one found in the Fairphone 4 smartphone.
+
config DRM_PANEL_HIMAX_HX8394
tristate "HIMAX HX8394 MIPI-DSI LCD panels"
depends on OF
@@ -251,17 +244,6 @@ config DRM_PANEL_JADARD_JD9365DA_H3
WXGA MIPI DSI panel. The panel support TFT dot matrix LCD with
800RGBx1280 dots at maximum.
-config DRM_PANEL_JDI_LT070ME05000
- tristate "JDI LT070ME05000 WUXGA DSI panel"
- depends on OF
- depends on DRM_MIPI_DSI
- depends on BACKLIGHT_CLASS_DEVICE
- help
- Say Y here if you want to enable support for JDI DSI video mode
- panel as found in Google Nexus 7 (2013) devices.
- The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses
- 24 bit per pixel.
-
config DRM_PANEL_JDI_LPM102A188A
tristate "JDI LPM102A188A DSI panel"
depends on OF && GPIOLIB
@@ -273,6 +255,17 @@ config DRM_PANEL_JDI_LPM102A188A
The panel has a 2560×1800 resolution. It provides a MIPI DSI interface
to the host.
+config DRM_PANEL_JDI_LT070ME05000
+ tristate "JDI LT070ME05000 WUXGA DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for JDI DSI video mode
+ panel as found in Google Nexus 7 (2013) devices.
+ The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses
+ 24 bit per pixel.
+
config DRM_PANEL_JDI_R63452
tristate "JDI R63452 Full HD DSI panel"
depends on OF
@@ -326,12 +319,6 @@ config DRM_PANEL_LEADTEK_LTK500HD1829
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
-config DRM_PANEL_SAMSUNG_LD9040
- tristate "Samsung LD9040 RGB/SPI panel"
- depends on OF && SPI
- depends on BACKLIGHT_CLASS_DEVICE
- select VIDEOMODE_HELPERS
-
config DRM_PANEL_LG_LB035Q02
tristate "LG LB035Q024573 RGB panel"
depends on GPIOLIB && OF && SPI
@@ -359,6 +346,17 @@ config DRM_PANEL_MAGNACHIP_D53E6EA8966
with the Magnachip D53E6EA8966 panel IC. This panel receives
video data via DSI but commands via 9-bit SPI using DBI.
+config DRM_PANEL_MANTIX_MLAF057WE51
+ tristate "Mantix MLAF057WE51-X MIPI-DSI LCD panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Mantix
+ MLAF057WE51-X MIPI DSI panel as e.g. used in the Librem 5. It
+ has a resolution of 720x1440 pixels, a built in backlight and touch
+ controller.
+
config DRM_PANEL_NEC_NL8048HL11
tristate "NEC NL8048HL11 RGB panel"
depends on GPIOLIB && OF && SPI
@@ -438,6 +436,16 @@ config DRM_PANEL_NOVATEK_NT36672A
around the Novatek NT36672A display controller, such as some
Tianma panels used in a few Xiaomi Poco F1 mobile phones.
+config DRM_PANEL_NOVATEK_NT36672E
+ tristate "Novatek NT36672E DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Novatek NT36672E DSI Video Mode
+ LCD panel module. The panel has a resolution of 1080x2408 and uses 24 bit
+ RGB per pixel.
+
config DRM_PANEL_NOVATEK_NT39016
tristate "Novatek NT39016 RGB/SPI panel"
depends on OF && SPI
@@ -447,17 +455,6 @@ config DRM_PANEL_NOVATEK_NT39016
Say Y here if you want to enable support for the panels built
around the Novatek NT39016 display controller.
-config DRM_PANEL_MANTIX_MLAF057WE51
- tristate "Mantix MLAF057WE51-X MIPI-DSI LCD panel"
- depends on OF
- depends on DRM_MIPI_DSI
- depends on BACKLIGHT_CLASS_DEVICE
- help
- Say Y here if you want to enable support for the Mantix
- MLAF057WE51-X MIPI DSI panel as e.g. used in the Librem 5. It
- has a resolution of 720x1440 pixels, a built in backlight and touch
- controller.
-
config DRM_PANEL_OLIMEX_LCD_OLINUXINO
tristate "Olimex LCD-OLinuXino panel"
depends on OF
@@ -554,6 +551,12 @@ config DRM_PANEL_RONBO_RB070D30
Say Y here if you want to enable support for Ronbo Electronics
RB070D30 1024x600 DSI panel.
+config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
+ tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller"
+ depends on OF
+ select DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_ATNA33XC20
tristate "Samsung ATNA33XC20 eDP panel"
depends on OF
@@ -577,6 +580,12 @@ config DRM_PANEL_SAMSUNG_DB7430
DB7430 DPI display controller used in such devices as the
LMS397KF04 480x800 DPI panel.
+config DRM_PANEL_SAMSUNG_LD9040
+ tristate "Samsung LD9040 RGB/SPI panel"
+ depends on OF && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_S6D16D0
tristate "Samsung S6D16D0 DSI video mode panel"
depends on OF
@@ -642,12 +651,6 @@ config DRM_PANEL_SAMSUNG_S6E63M0_DSI
Say Y here if you want to be able to access the Samsung
S6E63M0 panel using DSI.
-config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
- tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller"
- depends on OF
- select DRM_MIPI_DSI
- select VIDEOMODE_HELPERS
-
config DRM_PANEL_SAMSUNG_S6E8AA0
tristate "Samsung S6E8AA0 DSI video mode panel"
depends on OF
@@ -746,15 +749,6 @@ config DRM_PANEL_SITRONIX_ST7789V
Say Y here if you want to enable support for the Sitronix
ST7789V controller for 240x320 LCD panels
-config DRM_PANEL_SYNAPTICS_R63353
- tristate "Synaptics R63353-based panels"
- depends on OF
- depends on DRM_MIPI_DSI
- depends on BACKLIGHT_CLASS_DEVICE
- help
- Say Y if you want to enable support for panels based on the
- Synaptics R63353 controller.
-
config DRM_PANEL_SONY_ACX565AKM
tristate "Sony ACX565AKM panel"
depends on GPIOLIB && OF && SPI
@@ -794,6 +788,43 @@ config DRM_PANEL_STARTEK_KD070FHFID015
with a resolution of 1024 x 600 pixels. It provides a MIPI DSI interface to
the host, a built-in LED backlight and touch controller.
+config DRM_PANEL_EDP
+ tristate "support for simple Embedded DisplayPort panels"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on PM
+ select VIDEOMODE_HELPERS
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_DP_AUX_BUS
+ select DRM_KMS_HELPER
+ help
+ DRM panel driver for dumb eDP panels that need at most a regulator and
+ a GPIO to be powered up. Optionally a backlight can be attached so
+ that it can be automatically turned off when the panel goes into a
+ low power state.
+
+config DRM_PANEL_SIMPLE
+ tristate "support for simple panels (other than eDP ones)"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on PM
+ select VIDEOMODE_HELPERS
+ help
+ DRM panel driver for dumb non-eDP panels that need at most a regulator
+ and a GPIO to be powered up. Optionally a backlight can be attached so
+ that it can be automatically turned off when the panel goes into a
+ low power state.
+
+config DRM_PANEL_SYNAPTICS_R63353
+ tristate "Synaptics R63353-based panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Synaptics R63353 controller.
+
config DRM_PANEL_TDO_TL070WSH30
tristate "TDO TL070WSH30 DSI panel"
depends on OF
@@ -837,6 +868,17 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
Video Mode panel
+config DRM_PANEL_VISIONOX_R66451
+ tristate "Visionox R66451"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HELPER
+ help
+ Say Y here if you want to enable support for Visionox
+ R66451 1080x2340 AMOLED DSI panel.
+
config DRM_PANEL_VISIONOX_RM69299
tristate "Visionox RM69299"
depends on OF
@@ -854,17 +896,6 @@ config DRM_PANEL_VISIONOX_VTDR6130
Say Y here if you want to enable support for Visionox
VTDR6130 1080x2400 AMOLED DSI panel.
-config DRM_PANEL_VISIONOX_R66451
- tristate "Visionox R66451"
- depends on OF
- depends on DRM_MIPI_DSI
- depends on BACKLIGHT_CLASS_DEVICE
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HELPER
- help
- Say Y here if you want to enable support for Visionox
- R66451 1080x2340 AMOLED DSI panel.
-
config DRM_PANEL_WIDECHIPS_WS2401
tristate "Widechips WS2401 DPI panel driver"
depends on SPI && GPIOLIB
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index d94a644d0a6c..f156d7fa0bcc 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.
obj-$(CONFIG_DRM_PANEL_AUO_A030JTN01) += panel-auo-a030jtn01.o
obj-$(CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0) += panel-boe-bf060y8m-aj0.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
+obj-$(CONFIG_DRM_PANEL_BOE_TH101MB31UIG002_28A) += panel-boe-th101mb31ig002-28a.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
@@ -14,6 +15,7 @@ obj-$(CONFIG_DRM_PANEL_EBBG_FT8719) += panel-ebbg-ft8719.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
+obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
@@ -41,6 +43,7 @@ obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35560) += panel-novatek-nt35560.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35950) += panel-novatek-nt35950.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36523) += panel-novatek-nt36523.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672E) += panel-novatek-nt36672e.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
index 11b64acbe8a9..e225840b0d67 100644
--- a/drivers/gpu/drm/panel/panel-boe-himax8279d.c
+++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
@@ -854,26 +854,20 @@ static int panel_add(struct panel_info *pinfo)
pinfo->pp18_gpio = devm_gpiod_get(dev, "pp18", GPIOD_OUT_HIGH);
if (IS_ERR(pinfo->pp18_gpio)) {
- ret = PTR_ERR(pinfo->pp18_gpio);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get pp18 gpio: %d\n", ret);
- return ret;
+ return dev_err_probe(dev, PTR_ERR(pinfo->pp18_gpio),
+ "failed to get pp18 gpio\n");
}
pinfo->pp33_gpio = devm_gpiod_get(dev, "pp33", GPIOD_OUT_HIGH);
if (IS_ERR(pinfo->pp33_gpio)) {
- ret = PTR_ERR(pinfo->pp33_gpio);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get pp33 gpio: %d\n", ret);
- return ret;
+ return dev_err_probe(dev, PTR_ERR(pinfo->pp33_gpio),
+ "failed to get pp33 gpio\n");
}
pinfo->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
if (IS_ERR(pinfo->enable_gpio)) {
- ret = PTR_ERR(pinfo->enable_gpio);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get enable gpio: %d\n", ret);
- return ret;
+ return dev_err_probe(dev, PTR_ERR(pinfo->enable_gpio),
+ "failed to get enable gpio\n");
}
drm_panel_init(&pinfo->base, dev, &panel_funcs,
diff --git a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
new file mode 100644
index 000000000000..763e9f8342d3
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Alexander Warnecke <awarnecke002@hotmail.com>
+ * Copyright (c) 2023 Manuel Traut <manut@mecka.net>
+ * Copyright (c) 2023 Dang Huynh <danct12@riseup.net>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct boe_th101mb31ig002 {
+ struct drm_panel panel;
+
+ struct mipi_dsi_device *dsi;
+
+ struct regulator *power;
+ struct gpio_desc *enable;
+ struct gpio_desc *reset;
+
+ enum drm_panel_orientation orientation;
+};
+
+static void boe_th101mb31ig002_reset(struct boe_th101mb31ig002 *ctx)
+{
+ gpiod_direction_output(ctx->reset, 0);
+ usleep_range(10, 100);
+ gpiod_direction_output(ctx->reset, 1);
+ usleep_range(10, 100);
+ gpiod_direction_output(ctx->reset, 0);
+ usleep_range(5000, 6000);
+}
+
+static int boe_th101mb31ig002_enable(struct drm_panel *panel)
+{
+ struct boe_th101mb31ig002 *ctx = container_of(panel,
+ struct boe_th101mb31ig002,
+ panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ mipi_dsi_dcs_write_seq(dsi, 0xE0, 0xAB, 0xBA);
+ mipi_dsi_dcs_write_seq(dsi, 0xE1, 0xBA, 0xAB);
+ mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x10, 0x01, 0x47, 0xFF);
+ mipi_dsi_dcs_write_seq(dsi, 0xB2, 0x0C, 0x14, 0x04, 0x50, 0x50, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0xB3, 0x56, 0x53, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x33, 0x30, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xB6, 0xB0, 0x00, 0x00, 0x10, 0x00, 0x10,
+ 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x05, 0x12, 0x29, 0x49, 0x48, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xB9, 0x7C, 0x65, 0x55, 0x49, 0x46, 0x36,
+ 0x3B, 0x24, 0x3D, 0x3C, 0x3D, 0x5C, 0x4C,
+ 0x55, 0x47, 0x46, 0x39, 0x26, 0x06, 0x7C,
+ 0x65, 0x55, 0x49, 0x46, 0x36, 0x3B, 0x24,
+ 0x3D, 0x3C, 0x3D, 0x5C, 0x4C, 0x55, 0x47,
+ 0x46, 0x39, 0x26, 0x06);
+ mipi_dsi_dcs_write_seq(dsi, 0x00, 0xFF, 0x87, 0x12, 0x34, 0x44, 0x44,
+ 0x44, 0x44, 0x98, 0x04, 0x98, 0x04, 0x0F,
+ 0x00, 0x00, 0xC1);
+ mipi_dsi_dcs_write_seq(dsi, 0xC1, 0x54, 0x94, 0x02, 0x85, 0x9F, 0x00,
+ 0x7F, 0x00, 0x54, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xC2, 0x17, 0x09, 0x08, 0x89, 0x08, 0x11,
+ 0x22, 0x20, 0x44, 0xFF, 0x18, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xC3, 0x86, 0x46, 0x05, 0x05, 0x1C, 0x1C,
+ 0x1D, 0x1D, 0x02, 0x1F, 0x1F, 0x1E, 0x1E,
+ 0x0F, 0x0F, 0x0D, 0x0D, 0x13, 0x13, 0x11,
+ 0x11, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xC4, 0x07, 0x07, 0x04, 0x04, 0x1C, 0x1C,
+ 0x1D, 0x1D, 0x02, 0x1F, 0x1F, 0x1E, 0x1E,
+ 0x0E, 0x0E, 0x0C, 0x0C, 0x12, 0x12, 0x10,
+ 0x10, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xC6, 0x2A, 0x2A);
+ mipi_dsi_dcs_write_seq(dsi, 0xC8, 0x21, 0x00, 0x31, 0x42, 0x34, 0x16);
+ mipi_dsi_dcs_write_seq(dsi, 0xCA, 0xCB, 0x43);
+ mipi_dsi_dcs_write_seq(dsi, 0xCD, 0x0E, 0x4B, 0x4B, 0x20, 0x19, 0x6B,
+ 0x06, 0xB3);
+ mipi_dsi_dcs_write_seq(dsi, 0xD2, 0xE3, 0x2B, 0x38, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xD4, 0x00, 0x01, 0x00, 0x0E, 0x04, 0x44,
+ 0x08, 0x10, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xE6, 0x80, 0x01, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF);
+ mipi_dsi_dcs_write_seq(dsi, 0xF0, 0x12, 0x03, 0x20, 0x00, 0xFF);
+ mipi_dsi_dcs_write_seq(dsi, 0xF3, 0x00);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set panel on: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int boe_th101mb31ig002_disable(struct drm_panel *panel)
+{
+ struct boe_th101mb31ig002 *ctx = container_of(panel,
+ struct boe_th101mb31ig002,
+ panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ dev_err(dev, "Failed to set panel off: %d\n", ret);
+
+ msleep(120);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0)
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+
+ return 0;
+}
+
+static int boe_th101mb31ig002_unprepare(struct drm_panel *panel)
+{
+ struct boe_th101mb31ig002 *ctx = container_of(panel,
+ struct boe_th101mb31ig002,
+ panel);
+
+ gpiod_set_value_cansleep(ctx->reset, 1);
+ gpiod_set_value_cansleep(ctx->enable, 0);
+ regulator_disable(ctx->power);
+
+ return 0;
+}
+
+static int boe_th101mb31ig002_prepare(struct drm_panel *panel)
+{
+ struct boe_th101mb31ig002 *ctx = container_of(panel,
+ struct boe_th101mb31ig002,
+ panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_enable(ctx->power);
+ if (ret) {
+ dev_err(dev, "Failed to enable power supply: %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(ctx->enable, 1);
+ msleep(50);
+ boe_th101mb31ig002_reset(ctx);
+ boe_th101mb31ig002_enable(panel);
+
+ return 0;
+}
+
+static const struct drm_display_mode boe_th101mb31ig002_default_mode = {
+ .clock = 73500,
+ .hdisplay = 800,
+ .hsync_start = 800 + 64,
+ .hsync_end = 800 + 64 + 16,
+ .htotal = 800 + 64 + 16 + 64,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 2,
+ .vsync_end = 1280 + 2 + 4,
+ .vtotal = 1280 + 2 + 4 + 12,
+ .width_mm = 135,
+ .height_mm = 216,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int boe_th101mb31ig002_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct boe_th101mb31ig002 *ctx = container_of(panel,
+ struct boe_th101mb31ig002,
+ panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev,
+ &boe_th101mb31ig002_default_mode);
+ if (!mode) {
+ dev_err(panel->dev, "Failed to add mode %ux%u@%u\n",
+ boe_th101mb31ig002_default_mode.hdisplay,
+ boe_th101mb31ig002_default_mode.vdisplay,
+ drm_mode_vrefresh(&boe_th101mb31ig002_default_mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ /*
+ * TODO: Remove once all drm drivers call
+ * drm_connector_set_orientation_from_panel()
+ */
+ drm_connector_set_panel_orientation(connector, ctx->orientation);
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static enum drm_panel_orientation
+boe_th101mb31ig002_get_orientation(struct drm_panel *panel)
+{
+ struct boe_th101mb31ig002 *ctx = container_of(panel,
+ struct boe_th101mb31ig002,
+ panel);
+
+ return ctx->orientation;
+}
+
+static const struct drm_panel_funcs boe_th101mb31ig002_funcs = {
+ .prepare = boe_th101mb31ig002_prepare,
+ .unprepare = boe_th101mb31ig002_unprepare,
+ .disable = boe_th101mb31ig002_disable,
+ .get_modes = boe_th101mb31ig002_get_modes,
+ .get_orientation = boe_th101mb31ig002_get_orientation,
+};
+
+static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct boe_th101mb31ig002 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_NO_EOT_PACKET |
+ MIPI_DSI_MODE_LPM;
+
+ ctx->power = devm_regulator_get(&dsi->dev, "power");
+ if (IS_ERR(ctx->power))
+ return dev_err_probe(&dsi->dev, PTR_ERR(ctx->power),
+ "Failed to get power regulator\n");
+
+ ctx->enable = devm_gpiod_get(&dsi->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->enable))
+ return dev_err_probe(&dsi->dev, PTR_ERR(ctx->enable),
+ "Failed to get enable GPIO\n");
+
+ ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset))
+ return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
+ "Failed to get reset GPIO\n");
+
+ ret = of_drm_get_panel_orientation(dsi->dev.of_node,
+ &ctx->orientation);
+ if (ret)
+ return dev_err_probe(&dsi->dev, ret,
+ "Failed to get orientation\n");
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &boe_th101mb31ig002_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err_probe(&dsi->dev, ret,
+ "Failed to attach panel to DSI host\n");
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void boe_th101mb31ig002_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct boe_th101mb31ig002 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id boe_th101mb31ig002_of_match[] = {
+ { .compatible = "boe,th101mb31ig002-28a", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_th101mb31ig002_of_match);
+
+static struct mipi_dsi_driver boe_th101mb31ig002_driver = {
+ .driver = {
+ .name = "boe-th101mb31ig002-28a",
+ .of_match_table = boe_th101mb31ig002_of_match,
+ },
+ .probe = boe_th101mb31ig002_dsi_probe,
+ .remove = boe_th101mb31ig002_dsi_remove,
+};
+module_mipi_dsi_driver(boe_th101mb31ig002_driver);
+
+MODULE_AUTHOR("Alexander Warnecke <awarnecke002@hotmail.com>");
+MODULE_DESCRIPTION("BOE TH101MB31IG002-28A MIPI-DSI LCD panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index c4c0f08e9202..0ffe8f8c01de 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1768,11 +1768,11 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
};
static const struct drm_display_mode starry_himax83102_j02_default_mode = {
- .clock = 162850,
+ .clock = 162680,
.hdisplay = 1200,
- .hsync_start = 1200 + 50,
- .hsync_end = 1200 + 50 + 20,
- .htotal = 1200 + 50 + 20 + 50,
+ .hsync_start = 1200 + 60,
+ .hsync_end = 1200 + 60 + 20,
+ .htotal = 1200 + 60 + 20 + 40,
.vdisplay = 1920,
.vsync_start = 1920 + 116,
.vsync_end = 1920 + 116 + 8,
@@ -1871,6 +1871,8 @@ static int boe_panel_add(struct boe_panel *boe)
gpiod_set_value(boe->enable_gpio, 0);
+ boe->base.prepare_prev_first = true;
+
drm_panel_init(&boe->base, dev, &boe_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
err = of_drm_get_panel_orientation(dev->of_node, &boe->orientation);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index a0b6f69b916f..d58f90bc48fb 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -71,6 +71,21 @@ struct panel_delay {
unsigned int hpd_absent;
/**
+ * @powered_on_to_enable: Time between panel powered on and enable.
+ *
+ * The minimum time, in milliseconds, that needs to have passed
+ * between when panel powered on and enable may begin.
+ *
+ * This is (T3+T4+T5+T6+T8)-min on eDP timing diagrams or after the
+ * power supply enabled until we can turn the backlight on and see
+ * valid data.
+ *
+ * This doesn't normally need to be set if timings are already met by
+ * prepare_to_enable or enable.
+ */
+ unsigned int powered_on_to_enable;
+
+ /**
* @prepare_to_enable: Time between prepare and enable.
*
* The minimum time, in milliseconds, that needs to have passed
@@ -216,6 +231,7 @@ struct panel_edp {
bool prepared;
ktime_t prepared_time;
+ ktime_t powered_on_time;
ktime_t unprepared_time;
const struct panel_desc *desc;
@@ -397,6 +413,7 @@ static int panel_edp_suspend(struct device *dev)
{
struct panel_edp *p = dev_get_drvdata(dev);
+ drm_dp_dpcd_set_powered(p->aux, false);
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
p->unprepared_time = ktime_get_boottime();
@@ -413,8 +430,7 @@ static int panel_edp_unprepare(struct drm_panel *panel)
if (!p->prepared)
return 0;
- pm_runtime_mark_last_busy(panel->dev);
- ret = pm_runtime_put_autosuspend(panel->dev);
+ ret = pm_runtime_put_sync_suspend(panel->dev);
if (ret < 0)
return ret;
p->prepared = false;
@@ -454,6 +470,9 @@ static int panel_edp_prepare_once(struct panel_edp *p)
}
gpiod_set_value_cansleep(p->enable_gpio, 1);
+ drm_dp_dpcd_set_powered(p->aux, true);
+
+ p->powered_on_time = ktime_get_boottime();
delay = p->desc->delay.hpd_reliable;
if (p->no_hpd)
@@ -490,6 +509,7 @@ static int panel_edp_prepare_once(struct panel_edp *p)
return 0;
error:
+ drm_dp_dpcd_set_powered(p->aux, false);
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
p->unprepared_time = ktime_get_boottime();
@@ -579,6 +599,8 @@ static int panel_edp_enable(struct drm_panel *panel)
panel_edp_wait(p->prepared_time, p->desc->delay.prepare_to_enable);
+ panel_edp_wait(p->powered_on_time, p->desc->delay.powered_on_to_enable);
+
p->enabled = true;
return 0;
@@ -983,19 +1005,6 @@ static const struct panel_desc auo_b101ean01 = {
},
};
-static const struct drm_display_mode auo_b116xa3_mode = {
- .clock = 70589,
- .hdisplay = 1366,
- .hsync_start = 1366 + 40,
- .hsync_end = 1366 + 40 + 40,
- .htotal = 1366 + 40 + 40 + 32,
- .vdisplay = 768,
- .vsync_start = 768 + 10,
- .vsync_end = 768 + 10 + 12,
- .vtotal = 768 + 10 + 12 + 6,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
static const struct drm_display_mode auo_b116xak01_mode = {
.clock = 69300,
.hdisplay = 1366,
@@ -1837,6 +1846,13 @@ static const struct panel_delay delay_200_500_p2e80 = {
.prepare_to_enable = 80,
};
+static const struct panel_delay delay_200_500_e50_p2e80 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+ .prepare_to_enable = 80,
+};
+
static const struct panel_delay delay_200_500_p2e100 = {
.hpd_absent = 200,
.unprepare = 500,
@@ -1874,6 +1890,13 @@ static const struct panel_delay delay_200_500_e200 = {
.enable = 200,
};
+static const struct panel_delay delay_200_500_e200_d200 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 200,
+ .disable = 200,
+};
+
static const struct panel_delay delay_200_500_e200_d10 = {
.hpd_absent = 200,
.unprepare = 500,
@@ -1887,6 +1910,13 @@ static const struct panel_delay delay_200_150_e200 = {
.enable = 200,
};
+static const struct panel_delay delay_200_500_e50_po2e200 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+ .powered_on_to_enable = 200,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.name = _name, \
@@ -1912,7 +1942,9 @@ static const struct panel_delay delay_200_150_e200 = {
* Sort first by vendor, then by product ID.
*/
static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x105c, &delay_200_500_e50, "B116XTN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x125c, &delay_200_500_e50, "Unknown"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
@@ -1921,56 +1953,91 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"),
- EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
- &auo_b116xa3_mode),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x435c, &delay_200_500_e50, "Unknown"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
- EDP_PANEL_ENTRY2('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1",
- &auo_b116xa3_mode),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0607, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0608, &delay_200_500_e50, "NT116WHM-N11"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0668, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x068f, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x06e5, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0705, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0715, &delay_200_150_e200, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0717, &delay_200_500_e50_po2e200, "NV133FHM-N42"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0731, &delay_200_500_e80, "NT116WHM-N42"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0741, &delay_200_500_e200, "NT116WHM-N44"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0744, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x074c, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0751, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0754, &delay_200_500_e50_po2e200, "NV116WHM-N45"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0771, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0797, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d3, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x07f6, &delay_200_500_e200, "NT140FHM-N44"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x07f8, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0813, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0827, &delay_200_500_e50_p2e80, "NT140WHM-N44 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0843, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x08b2, &delay_200_500_e200, "NT140WHM-N49"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0848, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0849, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09c3, &delay_200_500_e50, "NT116WHM-N21,836X2"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0951, &delay_200_500_e80, "NV116WHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x096e, &delay_200_500_e50_po2e200, "NV116WHM-T07 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0993, &delay_200_500_e80, "NV116WHM-T14 V8.0"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ad, &delay_200_500_e80, "NV116WHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ae, &delay_200_500_e200, "NT140FHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a36, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80, "NV116WHM-N49"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1138, &innolux_n116bca_ea1.delay, "N116BCA-EA1-RC4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1139, &delay_200_500_e80_d50, "N116BGE-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1141, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1145, &delay_200_500_e80_d50, "N116BCN-EB1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x114a, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1152, &delay_200_500_e80_d50, "N116BCN-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1153, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1156, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1157, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50, "MNC207QS1-1"),
+
+ EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x048e, &delay_200_500_e200_d10, "M116NWR6 R5"),
@@ -1979,11 +2046,25 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "R133NW4K-R0"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x8c4d, &delay_200_150_e200, "R140NWFM R1"),
+ EDP_PANEL_ENTRY('K', 'D', 'B', 0x044f, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
+ EDP_PANEL_ENTRY('K', 'D', 'B', 0x1118, &delay_200_500_e50, "KD116N29-30NK-A005"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x0000, &delay_200_500_e200_d200, "Unknown"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x048d, &delay_200_500_e200_d200, "Unknown"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x0497, &delay_200_500_e200_d200, "LP116WH7-SPB1"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x052c, &delay_200_500_e200_d200, "LP133WF2-SPL7"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x0537, &delay_200_500_e200_d200, "Unknown"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x054a, &delay_200_500_e200_d200, "LP116WH8-SPC1"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x0567, &delay_200_500_e200_d200, "Unknown"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x05af, &delay_200_500_e200_d200, "Unknown"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x05f1, &delay_200_500_e200_d200, "Unknown"),
+
EDP_PANEL_ENTRY('S', 'D', 'C', 0x416d, &delay_100_500_e200, "ATNA45AF01"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112a.c b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
new file mode 100644
index 000000000000..466c27012abf
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree.
+ * Copyright (c) 2024 Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+/* Manufacturer specific DSI commands */
+#define HX83112A_SETPOWER1 0xb1
+#define HX83112A_SETDISP 0xb2
+#define HX83112A_SETDRV 0xb4
+#define HX83112A_SETEXTC 0xb9
+#define HX83112A_SETBANK 0xbd
+#define HX83112A_SETPTBA 0xbf
+#define HX83112A_SETDGCLUT 0xc1
+#define HX83112A_SETTCON 0xc7
+#define HX83112A_SETCLOCK 0xcb
+#define HX83112A_SETPANEL 0xcc
+#define HX83112A_SETPOWER2 0xd2
+#define HX83112A_SETGIP0 0xd3
+#define HX83112A_SETGIP1 0xd5
+#define HX83112A_SETGIP2 0xd6
+#define HX83112A_SETGIP3 0xd8
+#define HX83112A_SETTP1 0xe7
+#define HX83112A_UNKNOWN1 0xe9
+
+struct hx83112a_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data supplies[3];
+ struct gpio_desc *reset_gpio;
+};
+
+static inline struct hx83112a_panel *to_hx83112a_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct hx83112a_panel, panel);
+}
+
+static void hx83112a_reset(struct hx83112a_panel *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(50);
+}
+
+static int hx83112a_on(struct hx83112a_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETEXTC, 0x83, 0x11, 0x2a);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPOWER1,
+ 0x08, 0x28, 0x28, 0x83, 0x83, 0x4c, 0x4f, 0x33);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDISP,
+ 0x00, 0x02, 0x00, 0x90, 0x24, 0x00, 0x08, 0x19,
+ 0xea, 0x11, 0x11, 0x00, 0x11, 0xa3);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDRV,
+ 0x58, 0x68, 0x58, 0x68, 0x0f, 0xef, 0x0b, 0xc0,
+ 0x0b, 0xc0, 0x0b, 0xc0, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0x00, 0x14, 0x15, 0x00, 0x29, 0x11, 0x07,
+ 0x12, 0x00, 0x29);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDRV,
+ 0x00, 0x12, 0x12, 0x11, 0x88, 0x12, 0x12, 0x00,
+ 0x53);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT,
+ 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6,
+ 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6,
+ 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d,
+ 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49,
+ 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a,
+ 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3,
+ 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad,
+ 0x40);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT,
+ 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6,
+ 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6,
+ 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d,
+ 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49,
+ 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a,
+ 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3,
+ 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad,
+ 0x40);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT,
+ 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6,
+ 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6,
+ 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d,
+ 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49,
+ 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a,
+ 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3,
+ 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad,
+ 0x40);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTCON,
+ 0x70, 0x00, 0x04, 0xe0, 0x33, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPANEL, 0x08);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPOWER2, 0x2b, 0x2b);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP0,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x08,
+ 0x08, 0x03, 0x03, 0x22, 0x18, 0x07, 0x07, 0x07,
+ 0x07, 0x32, 0x10, 0x06, 0x00, 0x06, 0x32, 0x10,
+ 0x07, 0x00, 0x07, 0x32, 0x19, 0x31, 0x09, 0x31,
+ 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x09, 0x30, 0x00, 0x00, 0x00, 0x06, 0x0d, 0x00,
+ 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP0,
+ 0x00, 0x00, 0x19, 0x10, 0x00, 0x0a, 0x00, 0x81);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP1,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0xc0, 0xc0, 0x18, 0x18, 0x19, 0x19, 0x18, 0x18,
+ 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f,
+ 0x28, 0x28, 0x24, 0x24, 0x02, 0x03, 0x02, 0x03,
+ 0x00, 0x01, 0x00, 0x01, 0x31, 0x31, 0x31, 0x31,
+ 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP2,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x19, 0x19,
+ 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f,
+ 0x24, 0x24, 0x28, 0x28, 0x01, 0x00, 0x01, 0x00,
+ 0x03, 0x02, 0x03, 0x02, 0x31, 0x31, 0x31, 0x31,
+ 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3,
+ 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, 0xaa, 0xaa);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3,
+ 0xaa, 0x2e, 0x28, 0x00, 0x00, 0x00, 0xaa, 0x2e,
+ 0x28, 0x00, 0x00, 0x00, 0xaa, 0xee, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xee, 0xaa, 0xaa, 0xaa, 0xaa);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3,
+ 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0xff,
+ 0xff, 0xff, 0xff, 0xff);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3,
+ 0xaa, 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1,
+ 0x0e, 0x0e, 0x1e, 0x65, 0x1c, 0x65, 0x00, 0x50,
+ 0x20, 0x20, 0x00, 0x00, 0x02, 0x02, 0x02, 0x05,
+ 0x14, 0x14, 0x32, 0xb9, 0x23, 0xb9, 0x08);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1,
+ 0x02, 0x00, 0xa8, 0x01, 0xa8, 0x0d, 0xa4, 0x0e);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1,
+ 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0xc3);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETCLOCK, 0xd1, 0xd6);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0x3f);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0xc6);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPTBA, 0x37);
+ mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0x3f);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(150);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+ msleep(50);
+
+ return 0;
+}
+
+static int hx83112a_disable(struct drm_panel *panel)
+{
+ struct hx83112a_panel *ctx = to_hx83112a_panel(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ return 0;
+}
+
+static int hx83112a_prepare(struct drm_panel *panel)
+{
+ struct hx83112a_panel *ctx = to_hx83112a_panel(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ hx83112a_reset(ctx);
+
+ ret = hx83112a_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hx83112a_unprepare(struct drm_panel *panel)
+{
+ struct hx83112a_panel *ctx = to_hx83112a_panel(panel);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode hx83112a_mode = {
+ .clock = (1080 + 28 + 8 + 8) * (2340 + 27 + 5 + 5) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 28,
+ .hsync_end = 1080 + 28 + 8,
+ .htotal = 1080 + 28 + 8 + 8,
+ .vdisplay = 2340,
+ .vsync_start = 2340 + 27,
+ .vsync_end = 2340 + 27 + 5,
+ .vtotal = 2340 + 27 + 5 + 5,
+ .width_mm = 67,
+ .height_mm = 145,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int hx83112a_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &hx83112a_mode);
+}
+
+static const struct drm_panel_funcs hx83112a_panel_funcs = {
+ .prepare = hx83112a_prepare,
+ .unprepare = hx83112a_unprepare,
+ .disable = hx83112a_disable,
+ .get_modes = hx83112a_get_modes,
+};
+
+static int hx83112a_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct hx83112a_panel *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "vdd1";
+ ctx->supplies[1].supply = "vsn";
+ ctx->supplies[2].supply = "vsp";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_HSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ drm_panel_init(&ctx->panel, dev, &hx83112a_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hx83112a_remove(struct mipi_dsi_device *dsi)
+{
+ struct hx83112a_panel *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id hx83112a_of_match[] = {
+ { .compatible = "djn,9a-3r063-1102b" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hx83112a_of_match);
+
+static struct mipi_dsi_driver hx83112a_driver = {
+ .probe = hx83112a_probe,
+ .remove = hx83112a_remove,
+ .driver = {
+ .name = "panel-himax-hx83112a",
+ .of_match_table = hx83112a_of_match,
+ },
+};
+module_mipi_dsi_driver(hx83112a_driver);
+
+MODULE_DESCRIPTION("DRM driver for hx83112a-equipped DSI panels");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 30919c872ac8..9d87cc1a357e 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -646,26 +646,17 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
return -EINVAL;
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ctx->reset_gpio)) {
- dev_err(dev, "cannot get reset gpio\n");
- return PTR_ERR(ctx->reset_gpio);
- }
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "cannot get reset gpio\n");
ctx->vci = devm_regulator_get(dev, "vci");
- if (IS_ERR(ctx->vci)) {
- ret = PTR_ERR(ctx->vci);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request vci regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->vci))
+ return dev_err_probe(dev, PTR_ERR(ctx->vci), "Failed to request vci regulator\n");
ctx->iovcc = devm_regulator_get(dev, "iovcc");
- if (IS_ERR(ctx->iovcc)) {
- ret = PTR_ERR(ctx->iovcc);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->iovcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
+ "Failed to request iovcc regulator\n");
mipi_dsi_set_drvdata(dsi, ctx);
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
index 39e408c9f762..a4c9a5cb9811 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -11,6 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
@@ -21,25 +22,224 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+struct ltk500hd1829_cmd {
+ char cmd;
+ char data;
+};
+
+struct ltk500hd1829_desc {
+ const struct drm_display_mode *mode;
+ const struct ltk500hd1829_cmd *init;
+ unsigned int num_init;
+};
+
struct ltk500hd1829 {
struct device *dev;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
struct regulator *vcc;
struct regulator *iovcc;
+ const struct ltk500hd1829_desc *panel_desc;
bool prepared;
};
-struct ltk500hd1829_cmd {
- char cmd;
- char data;
+static const struct ltk500hd1829_cmd ltk101b4029w_init[] = {
+ /* Page0 */
+ { 0xE0, 0x00 },
+ /* PASSWORD */
+ { 0xE1, 0x93 },
+ { 0xE2, 0x65 },
+ { 0xE3, 0xF8 },
+ { 0x80, 0x03 }, /* 0X03:4-LANE; 0X02:3-LANE; 0X01:2-LANE */
+ /* Page1 */
+ { 0xE0, 0x01 },
+ /* Set VCOM */
+ { 0x00, 0x00 },
+ { 0x01, 0x6F },
+ /* Set Gamma Power, VGMP,VGMN,VGSP,VGSN */
+ { 0x17, 0x00 },
+ { 0x18, 0xAF }, /* 4.3V */
+ { 0x19, 0x01 }, /* 0.3V */
+ { 0x1A, 0x00 },
+ { 0x1B, 0xAF }, /* 4.3V */
+ { 0x1C, 0x01 }, /* 0.3V */
+ /* Set Gate Power */
+ { 0x1F, 0x3E }, /* VGH_R = 15V */
+ { 0x20, 0x28 }, /* VGL_R = -12V */
+ { 0x21, 0x28 }, /* VGL_R2 = -12V */
+ { 0x22, 0x7E },
+ /* SETPANEL */
+ { 0x35, 0x26 },
+ { 0x37, 0x09 },
+ /* SET RGBCYC */
+ { 0x38, 0x04 },
+ { 0x39, 0x00 },
+ { 0x3A, 0x01 },
+ { 0x3C, 0x7C },
+ { 0x3D, 0xFF },
+ { 0x3E, 0xFF },
+ { 0x3F, 0x7F },
+ /* Set TCON */
+ { 0x40, 0x06 }, /* RSO = 800 RGB */
+ { 0x41, 0xA0 }, /* LN = 640->1280 line */
+ { 0x42, 0x81 },
+ { 0x43, 0x08 }, /* VFP = 8 */
+ { 0x44, 0x0B }, /* VBP = 12 */
+ { 0x45, 0x28 }, /* HBP = 40 */
+ /* power voltage */
+ { 0x55, 0x0F }, /* DCDCM = 0001, JD PWR_IC */
+ { 0x57, 0x69 },
+ { 0x59, 0x0A }, /* VCL = -2.9V */
+ { 0x5A, 0x28 }, /* VGH = 15V */
+ { 0x5B, 0x14 }, /* VGL = -11V */
+ /* Gamma */
+ { 0x5D, 0x7C },
+ { 0x5E, 0x65 },
+ { 0x5F, 0x55 },
+ { 0x60, 0x47 },
+ { 0x61, 0x43 },
+ { 0x62, 0x32 },
+ { 0x63, 0x34 },
+ { 0x64, 0x1C },
+ { 0x65, 0x33 },
+ { 0x66, 0x31 },
+ { 0x67, 0x30 },
+ { 0x68, 0x4E },
+ { 0x69, 0x3C },
+ { 0x6A, 0x44 },
+ { 0x6B, 0x35 },
+ { 0x6C, 0x31 },
+ { 0x6D, 0x23 },
+ { 0x6E, 0x11 },
+ { 0x6F, 0x00 },
+ { 0x70, 0x7C },
+ { 0x71, 0x65 },
+ { 0x72, 0x55 },
+ { 0x73, 0x47 },
+ { 0x74, 0x43 },
+ { 0x75, 0x32 },
+ { 0x76, 0x34 },
+ { 0x77, 0x1C },
+ { 0x78, 0x33 },
+ { 0x79, 0x31 },
+ { 0x7A, 0x30 },
+ { 0x7B, 0x4E },
+ { 0x7C, 0x3C },
+ { 0x7D, 0x44 },
+ { 0x7E, 0x35 },
+ { 0x7F, 0x31 },
+ { 0x80, 0x23 },
+ { 0x81, 0x11 },
+ { 0x82, 0x00 },
+ /* Page2, for GIP */
+ { 0xE0, 0x02 },
+ /* GIP_L Pin mapping */
+ { 0x00, 0x1E },
+ { 0x01, 0x1E },
+ { 0x02, 0x41 },
+ { 0x03, 0x41 },
+ { 0x04, 0x43 },
+ { 0x05, 0x43 },
+ { 0x06, 0x1F },
+ { 0x07, 0x1F },
+ { 0x08, 0x35 },
+ { 0x09, 0x1F },
+ { 0x0A, 0x15 },
+ { 0x0B, 0x15 },
+ { 0x0C, 0x1F },
+ { 0x0D, 0x47 },
+ { 0x0E, 0x47 },
+ { 0x0F, 0x45 },
+ { 0x10, 0x45 },
+ { 0x11, 0x4B },
+ { 0x12, 0x4B },
+ { 0x13, 0x49 },
+ { 0x14, 0x49 },
+ { 0x15, 0x1F },
+ /* GIP_R Pin mapping */
+ { 0x16, 0x1E },
+ { 0x17, 0x1E },
+ { 0x18, 0x40 },
+ { 0x19, 0x40 },
+ { 0x1A, 0x42 },
+ { 0x1B, 0x42 },
+ { 0x1C, 0x1F },
+ { 0x1D, 0x1F },
+ { 0x1E, 0x35 },
+ { 0x1F, 0x1F },
+ { 0x20, 0x15 },
+ { 0x21, 0x15 },
+ { 0x22, 0x1f },
+ { 0x23, 0x46 },
+ { 0x24, 0x46 },
+ { 0x25, 0x44 },
+ { 0x26, 0x44 },
+ { 0x27, 0x4A },
+ { 0x28, 0x4A },
+ { 0x29, 0x48 },
+ { 0x2A, 0x48 },
+ { 0x2B, 0x1F },
+ /* GIP Timing */
+ { 0x58, 0x40 },
+ { 0x5B, 0x30 },
+ { 0x5C, 0x03 },
+ { 0x5D, 0x30 },
+ { 0x5E, 0x01 },
+ { 0x5F, 0x02 },
+ { 0x63, 0x14 },
+ { 0x64, 0x6A },
+ { 0x67, 0x73 },
+ { 0x68, 0x05 },
+ { 0x69, 0x14 },
+ { 0x6A, 0x6A },
+ { 0x6B, 0x08 },
+ { 0x6C, 0x00 },
+ { 0x6D, 0x00 },
+ { 0x6E, 0x00 },
+ { 0x6F, 0x88 },
+ { 0x77, 0xDD },
+ { 0x79, 0x0E },
+ { 0x7A, 0x03 },
+ { 0x7D, 0x14 },
+ { 0x7E, 0x6A },
+ /* Page4 */
+ { 0xE0, 0x04 },
+ { 0x09, 0x11 },
+ { 0x0E, 0x48 },
+ { 0x2B, 0x2B },
+ { 0x2D, 0x03 },
+ { 0x2E, 0x44 },
+ /* Page0 */
+ { 0xE0, 0x00 },
+ { 0xE6, 0x02 },
+ { 0xE7, 0x0C },
+};
+
+static const struct drm_display_mode ltk101b4029w_mode = {
+ .hdisplay = 800,
+ .hsync_start = 800 + 18,
+ .hsync_end = 800 + 18 + 18,
+ .htotal = 800 + 18 + 18 + 18,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 24,
+ .vsync_end = 1280 + 24 + 4,
+ .vtotal = 1280 + 24 + 4 + 8,
+ .clock = 67330,
+ .width_mm = 136,
+ .height_mm = 218,
+};
+
+static const struct ltk500hd1829_desc ltk101b4029w_data = {
+ .mode = &ltk101b4029w_mode,
+ .init = ltk101b4029w_init,
+ .num_init = ARRAY_SIZE(ltk101b4029w_init),
};
/*
* There is no description in the Reference Manual about these commands.
* We received them from the vendor, so just use them as is.
*/
-static const struct ltk500hd1829_cmd init_code[] = {
+static const struct ltk500hd1829_cmd ltk500hd1829_init[] = {
{ 0xE0, 0x00 },
{ 0xE1, 0x93 },
{ 0xE2, 0x65 },
@@ -260,6 +460,26 @@ static const struct ltk500hd1829_cmd init_code[] = {
{ 0x35, 0x00 },
};
+static const struct drm_display_mode ltk500hd1829_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 50,
+ .hsync_end = 720 + 50 + 50,
+ .htotal = 720 + 50 + 50 + 50,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 30,
+ .vsync_end = 1280 + 30 + 4,
+ .vtotal = 1280 + 30 + 4 + 12,
+ .clock = 69217,
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
+static const struct ltk500hd1829_desc ltk500hd1829_data = {
+ .mode = &ltk500hd1829_mode,
+ .init = ltk500hd1829_init,
+ .num_init = ARRAY_SIZE(ltk500hd1829_init),
+};
+
static inline
struct ltk500hd1829 *panel_to_ltk500hd1829(struct drm_panel *panel)
{
@@ -324,8 +544,8 @@ static int ltk500hd1829_prepare(struct drm_panel *panel)
/* tRT: >= 5ms */
usleep_range(5000, 6000);
- for (i = 0; i < ARRAY_SIZE(init_code); i++) {
- ret = mipi_dsi_generic_write(dsi, &init_code[i],
+ for (i = 0; i < ctx->panel_desc->num_init; i++) {
+ ret = mipi_dsi_generic_write(dsi, &ctx->panel_desc->init[i],
sizeof(struct ltk500hd1829_cmd));
if (ret < 0) {
dev_err(panel->dev, "failed to write init cmds: %d\n", ret);
@@ -359,31 +579,17 @@ disable_vcc:
return ret;
}
-static const struct drm_display_mode default_mode = {
- .hdisplay = 720,
- .hsync_start = 720 + 50,
- .hsync_end = 720 + 50 + 50,
- .htotal = 720 + 50 + 50 + 50,
- .vdisplay = 1280,
- .vsync_start = 1280 + 30,
- .vsync_end = 1280 + 30 + 4,
- .vtotal = 1280 + 30 + 4 + 12,
- .clock = 69217,
- .width_mm = 62,
- .height_mm = 110,
-};
-
static int ltk500hd1829_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, ctx->panel_desc->mode);
if (!mode) {
dev_err(ctx->dev, "failed to add mode %ux%u@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ ctx->panel_desc->mode->hdisplay, ctx->panel_desc->mode->vdisplay,
+ drm_mode_vrefresh(ctx->panel_desc->mode));
return -ENOMEM;
}
@@ -413,6 +619,10 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
if (!ctx)
return -ENOMEM;
+ ctx->panel_desc = of_device_get_match_data(dev);
+ if (!ctx->panel_desc)
+ return -EINVAL;
+
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset gpio\n");
@@ -492,7 +702,14 @@ static void ltk500hd1829_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id ltk500hd1829_of_match[] = {
- { .compatible = "leadtek,ltk500hd1829", },
+ {
+ .compatible = "leadtek,ltk101b4029w",
+ .data = &ltk101b4029w_data,
+ },
+ {
+ .compatible = "leadtek,ltk500hd1829",
+ .data = &ltk500hd1829_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ltk500hd1829_of_match);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index 83a9cf53d269..d3bfdfc9cff6 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -36,6 +36,9 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+#define NT35510_CMD_CORRECT_GAMMA BIT(0)
+#define NT35510_CMD_CONTROL_DISPLAY BIT(1)
+
#define MCS_CMD_MAUCCTR 0xF0 /* Manufacturer command enable */
#define MCS_CMD_READ_ID1 0xDA
#define MCS_CMD_READ_ID2 0xDB
@@ -112,18 +115,33 @@
/* AVDD and AVEE setting 3 bytes */
#define NT35510_P1_AVDD_LEN 3
#define NT35510_P1_AVEE_LEN 3
+#define NT35510_P1_VCL_LEN 3
#define NT35510_P1_VGH_LEN 3
#define NT35510_P1_VGL_LEN 3
#define NT35510_P1_VGP_LEN 3
#define NT35510_P1_VGN_LEN 3
+#define NT35510_P1_VCMOFF_LEN 2
/* BT1CTR thru BT5CTR setting 3 bytes */
#define NT35510_P1_BT1CTR_LEN 3
#define NT35510_P1_BT2CTR_LEN 3
+#define NT35510_P1_BT3CTR_LEN 3
#define NT35510_P1_BT4CTR_LEN 3
#define NT35510_P1_BT5CTR_LEN 3
/* 52 gamma parameters times two per color: positive and negative */
#define NT35510_P1_GAMMA_LEN 52
+#define NT35510_WRCTRLD_BCTRL BIT(5)
+#define NT35510_WRCTRLD_A BIT(4)
+#define NT35510_WRCTRLD_DD BIT(3)
+#define NT35510_WRCTRLD_BL BIT(2)
+#define NT35510_WRCTRLD_DB BIT(1)
+#define NT35510_WRCTRLD_G BIT(0)
+
+#define NT35510_WRCABC_OFF 0
+#define NT35510_WRCABC_UI_MODE 1
+#define NT35510_WRCABC_STILL_MODE 2
+#define NT35510_WRCABC_MOVING_MODE 3
+
/**
* struct nt35510_config - the display-specific NT35510 configuration
*
@@ -172,6 +190,14 @@ struct nt35510_config {
*/
const struct drm_display_mode mode;
/**
+ * @mode_flags: DSI operation mode related flags
+ */
+ unsigned long mode_flags;
+ /**
+ * @cmds: enable DSI commands
+ */
+ u32 cmds;
+ /**
* @avdd: setting for AVDD ranging from 0x00 = 6.5V to 0x14 = 4.5V
* in 0.1V steps the default is 0x05 which means 6.0V
*/
@@ -221,6 +247,25 @@ struct nt35510_config {
*/
u8 bt2ctr[NT35510_P1_BT2CTR_LEN];
/**
+ * @vcl: setting for VCL ranging from 0x00 = -2.5V to 0x11 = -4.0V
+ * in 1V steps, the default is 0x00 which means -2.5V
+ */
+ u8 vcl[NT35510_P1_VCL_LEN];
+ /**
+ * @bt3ctr: setting for boost power control for the VCL step-up
+ * circuit (3)
+ * bits 0..2 in the lower nibble controls CLCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTCL, the boosting
+ * amplification for the step-up circuit.
+ * 0 = Disable
+ * 1 = -0.5 x VDDB
+ * 2 = -1 x VDDB
+ * 3 = -2 x VDDB
+ * The defaults are 4 and 2 yielding 0x24
+ */
+ u8 bt3ctr[NT35510_P1_BT3CTR_LEN];
+ /**
* @vgh: setting for VGH ranging from 0x00 = 7.0V to 0x0B = 18.0V
* in 1V steps, the default is 0x08 which means 15V
*/
@@ -274,6 +319,113 @@ struct nt35510_config {
*/
u8 vgn[NT35510_P1_VGN_LEN];
/**
+ * @vcmoff: setting the DC VCOM offset voltage
+ * The first byte contains bit 8 of VCM in bit 0 and VCMOFFSEL in bit 4.
+ * The second byte contains bits 0..7 of VCM.
+ * VCMOFFSEL the common voltage offset mode.
+ * VCMOFFSEL 0x00 = VCOM .. 0x01 Gamma.
+ * The default is 0x00.
+ * VCM the VCOM output voltage (VCMOFFSEL = 0) or the internal register
+ * offset for gamma voltage (VCMOFFSEL = 1).
+ * VCM 0x00 = 0V/0 .. 0x118 = 3.5V/280 in steps of 12.5mV/1step
+ * The default is 0x00 = 0V/0.
+ */
+ u8 vcmoff[NT35510_P1_VCMOFF_LEN];
+ /**
+ * @dopctr: setting optional control for display
+ * ERR bits 0..1 in the first byte is the ERR pin output signal setting.
+ * 0 = Disable, ERR pin output low
+ * 1 = ERR pin output CRC error only
+ * 2 = ERR pin output ECC error only
+ * 3 = ERR pin output CRC and ECC error
+ * The default is 0.
+ * N565 bit 2 in the first byte is the 16-bit/pixel format selection.
+ * 0 = R[4:0] + G[5:3] & G[2:0] + B[4:0]
+ * 1 = G[2:0] + R[4:0] & B[4:0] + G[5:3]
+ * The default is 0.
+ * DIS_EoTP_HS bit 3 in the first byte is "DSI protocol violation" error
+ * reporting.
+ * 0 = reporting when error
+ * 1 = not reporting when error
+ * DSIM bit 4 in the first byte is the video mode data type enable
+ * 0 = Video mode data type disable
+ * 1 = Video mode data type enable
+ * The default is 0.
+ * DSIG bit 5 int the first byte is the generic r/w data type enable
+ * 0 = Generic r/w disable
+ * 1 = Generic r/w enable
+ * The default is 0.
+ * DSITE bit 6 in the first byte is TE line enable
+ * 0 = TE line is disabled
+ * 1 = TE line is enabled
+ * The default is 0.
+ * RAMKP bit 7 in the first byte is the frame memory keep/loss in
+ * sleep-in mode
+ * 0 = contents loss in sleep-in
+ * 1 = contents keep in sleep-in
+ * The default is 0.
+ * CRL bit 1 in the second byte is the source driver data shift
+ * direction selection. This bit is XOR operation with bit RSMX
+ * of 3600h command.
+ * 0 (RMSX = 0) = S1 -> S1440
+ * 0 (RMSX = 1) = S1440 -> S1
+ * 1 (RMSX = 0) = S1440 -> S1
+ * 1 (RMSX = 1) = S1 -> S1440
+ * The default is 0.
+ * CTB bit 2 in the second byte is the vertical scanning direction
+ * selection for gate control signals. This bit is XOR operation
+ * with bit ML of 3600h command.
+ * 0 (ML = 0) = Forward (top -> bottom)
+ * 0 (ML = 1) = Reverse (bottom -> top)
+ * 1 (ML = 0) = Reverse (bottom -> top)
+ * 1 (ML = 1) = Forward (top -> bottom)
+ * The default is 0.
+ * CRGB bit 3 in the second byte is RGB-BGR order selection. This
+ * bit is XOR operation with bit RGB of 3600h command.
+ * 0 (RGB = 0) = RGB/Normal
+ * 0 (RGB = 1) = BGR/RB swap
+ * 1 (RGB = 0) = BGR/RB swap
+ * 1 (RGB = 1) = RGB/Normal
+ * The default is 0.
+ * TE_PWR_SEL bit 4 in the second byte is the TE output voltage
+ * level selection (only valid when DSTB_SEL = 0 or DSTB_SEL = 1,
+ * VSEL = High and VDDI = 1.665~3.3V).
+ * 0 = TE output voltage level is VDDI
+ * 1 = TE output voltage level is VDDA
+ * The default is 0.
+ */
+ u8 dopctr[NT35510_P0_DOPCTR_LEN];
+ /**
+ * @madctl: Memory data access control
+ * RSMY bit 0 is flip vertical. Flips the display image top to down.
+ * RSMX bit 1 is flip horizontal. Flips the display image left to right.
+ * MH bit 2 is the horizontal refresh order.
+ * RGB bit 3 is the RGB-BGR order.
+ * 0 = RGB color sequence
+ * 1 = BGR color sequence
+ * ML bit 4 is the vertical refresh order.
+ * MV bit 5 is the row/column exchange.
+ * MX bit 6 is the column address order.
+ * MY bit 7 is the row address order.
+ */
+ u8 madctl;
+ /**
+ * @sdhdtctr: source output data hold time
+ * 0x00..0x3F = 0..31.5us in steps of 0.5us
+ * The default is 0x05 = 2.5us.
+ */
+ u8 sdhdtctr;
+ /**
+ * @gseqctr: EQ control for gate signals
+ * GFEQ_XX[3:0]: time setting of EQ step for falling edge in steps
+ * of 0.5us.
+ * The default is 0x07 = 3.5us
+ * GREQ_XX[7:4]: time setting of EQ step for rising edge in steps
+ * of 0.5us.
+ * The default is 0x07 = 3.5us
+ */
+ u8 gseqctr[NT35510_P0_GSEQCTR_LEN];
+ /**
* @sdeqctr: Source driver control settings, first byte is
* 0 for mode 1 and 1 for mode 2. Mode 1 uses two steps and
* mode 2 uses three steps meaning EQS3 is not used in mode
@@ -343,6 +495,43 @@ struct nt35510_config {
* @gamma_corr_neg_b: Blue gamma correction parameters, negative
*/
u8 gamma_corr_neg_b[NT35510_P1_GAMMA_LEN];
+ /**
+ * @wrdisbv: write display brightness
+ * 0x00 value means the lowest brightness and 0xff value means
+ * the highest brightness.
+ * The default is 0x00.
+ */
+ u8 wrdisbv;
+ /**
+ * @wrctrld: write control display
+ * G bit 0 selects gamma curve: 0 = Manual, 1 = Automatic
+ * DB bit 1 selects display brightness: 0 = Manual, 1 = Automatic
+ * BL bit 2 controls backlight control: 0 = Off, 1 = On
+ * DD bit 3 controls display dimming: 0 = Off, 1 = On
+ * A bit 4 controls LABC block: 0 = Off, 1 = On
+ * BCTRL bit 5 controls brightness block: 0 = Off, 1 = On
+ */
+ u8 wrctrld;
+ /**
+ * @wrcabc: write content adaptive brightness control
+ * There is possible to use 4 different modes for content adaptive
+ * image functionality:
+ * 0: Off
+ * 1: User Interface Image (UI-Mode)
+ * 2: Still Picture Image (Still-Mode)
+ * 3: Moving Picture Image (Moving-Mode)
+ * The default is 0
+ */
+ u8 wrcabc;
+ /**
+ * @wrcabcmb: write CABC minimum brightness
+ * Set the minimum brightness value of the display for CABC
+ * function.
+ * 0x00 value means the lowest brightness for CABC and 0xff
+ * value means the highest brightness for CABC.
+ * The default is 0x00.
+ */
+ u8 wrcabcmb;
};
/**
@@ -486,6 +675,16 @@ static int nt35510_setup_power(struct nt35510 *nt)
nt->conf->bt2ctr);
if (ret)
return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVCL,
+ NT35510_P1_VCL_LEN,
+ nt->conf->vcl);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT3CTR,
+ NT35510_P1_BT3CTR_LEN,
+ nt->conf->bt3ctr);
+ if (ret)
+ return ret;
ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGH,
NT35510_P1_VGH_LEN,
nt->conf->vgh);
@@ -522,6 +721,12 @@ static int nt35510_setup_power(struct nt35510 *nt)
if (ret)
return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVCMOFF,
+ NT35510_P1_VCMOFF_LEN,
+ nt->conf->vcmoff);
+ if (ret)
+ return ret;
+
/* Typically 10 ms */
usleep_range(10000, 20000);
@@ -536,46 +741,28 @@ static int nt35510_setup_display(struct nt35510 *nt)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
const struct nt35510_config *conf = nt->conf;
- u8 dopctr[NT35510_P0_DOPCTR_LEN];
- u8 gseqctr[NT35510_P0_GSEQCTR_LEN];
u8 dpfrctr[NT35510_P0_DPFRCTR1_LEN];
- /* FIXME: set up any rotation (assume none for now) */
- u8 addr_mode = NT35510_ROTATE_0_SETTING;
- u8 val;
int ret;
- /* Enable TE, EoTP and RGB pixel format */
- dopctr[0] = NT35510_DOPCTR_0_DSITE | NT35510_DOPCTR_0_EOTP |
- NT35510_DOPCTR_0_N565;
- dopctr[1] = NT35510_DOPCTR_1_CTB;
ret = nt35510_send_long(nt, dsi, NT35510_P0_DOPCTR,
NT35510_P0_DOPCTR_LEN,
- dopctr);
+ conf->dopctr);
if (ret)
return ret;
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_ADDRESS_MODE, &addr_mode,
- sizeof(addr_mode));
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_ADDRESS_MODE, &conf->madctl,
+ sizeof(conf->madctl));
if (ret < 0)
return ret;
- /*
- * Source data hold time, default 0x05 = 2.5us
- * 0x00..0x3F = 0 .. 31.5us in steps of 0.5us
- * 0x0A = 5us
- */
- val = 0x0A;
- ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDHDTCTR, &val,
- sizeof(val));
+ ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDHDTCTR, &conf->sdhdtctr,
+ sizeof(conf->sdhdtctr));
if (ret < 0)
return ret;
- /* EQ control for gate signals, 0x00 = 0 us */
- gseqctr[0] = 0x00;
- gseqctr[1] = 0x00;
ret = nt35510_send_long(nt, dsi, NT35510_P0_GSEQCTR,
NT35510_P0_GSEQCTR_LEN,
- gseqctr);
+ conf->gseqctr);
if (ret)
return ret;
@@ -719,36 +906,38 @@ static int nt35510_power_on(struct nt35510 *nt)
if (ret)
return ret;
- ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_POS,
- NT35510_P1_GAMMA_LEN,
- nt->conf->gamma_corr_pos_r);
- if (ret)
- return ret;
- ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_POS,
- NT35510_P1_GAMMA_LEN,
- nt->conf->gamma_corr_pos_g);
- if (ret)
- return ret;
- ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_POS,
- NT35510_P1_GAMMA_LEN,
- nt->conf->gamma_corr_pos_b);
- if (ret)
- return ret;
- ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_NEG,
- NT35510_P1_GAMMA_LEN,
- nt->conf->gamma_corr_neg_r);
- if (ret)
- return ret;
- ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_NEG,
- NT35510_P1_GAMMA_LEN,
- nt->conf->gamma_corr_neg_g);
- if (ret)
- return ret;
- ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_NEG,
- NT35510_P1_GAMMA_LEN,
- nt->conf->gamma_corr_neg_b);
- if (ret)
- return ret;
+ if (nt->conf->cmds & NT35510_CMD_CORRECT_GAMMA) {
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_r);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_g);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_b);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_r);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_g);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_b);
+ if (ret)
+ return ret;
+ }
/* Set up stuff in manufacturer control, page 0 */
ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
@@ -827,6 +1016,26 @@ static int nt35510_prepare(struct drm_panel *panel)
/* Up to 120 ms */
usleep_range(120000, 150000);
+ if (nt->conf->cmds & NT35510_CMD_CONTROL_DISPLAY) {
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &nt->conf->wrctrld,
+ sizeof(nt->conf->wrctrld));
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_POWER_SAVE,
+ &nt->conf->wrcabc,
+ sizeof(nt->conf->wrcabc));
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS,
+ &nt->conf->wrcabcmb,
+ sizeof(nt->conf->wrcabcmb));
+ if (ret < 0)
+ return ret;
+ }
+
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret) {
dev_err(nt->dev, "failed to turn display on (%d)\n", ret);
@@ -896,7 +1105,6 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
*/
dsi->hs_rate = 349440000;
dsi->lp_rate = 9600000;
- dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
/*
* Every new incarnation of this display must have a unique
@@ -908,6 +1116,8 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
return -ENODEV;
}
+ dsi->mode_flags = nt->conf->mode_flags;
+
nt->supplies[0].supply = "vdd"; /* 2.3-4.8 V */
nt->supplies[1].supply = "vddi"; /* 1.65-3.3V */
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(nt->supplies),
@@ -923,7 +1133,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- nt->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ nt->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(nt->reset_gpio)) {
dev_err(dev, "error getting RESET GPIO\n");
return PTR_ERR(nt->reset_gpio);
@@ -952,7 +1162,10 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(bl);
}
bl->props.max_brightness = 255;
- bl->props.brightness = 255;
+ if (nt->conf->cmds & NT35510_CMD_CONTROL_DISPLAY)
+ bl->props.brightness = nt->conf->wrdisbv;
+ else
+ bl->props.brightness = 255;
bl->props.power = FB_BLANK_POWERDOWN;
nt->panel.backlight = bl;
}
@@ -1030,6 +1243,8 @@ static const struct nt35510_config nt35510_hydis_hva40wv1 = {
.vtotal = 800 + 2 + 0 + 5, /* VBP = 5 */
.flags = 0,
},
+ .mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS,
+ .cmds = NT35510_CMD_CORRECT_GAMMA,
/* 0x09: AVDD = 5.6V */
.avdd = { 0x09, 0x09, 0x09 },
/* 0x34: PCK = Hsync/2, BTP = 2 x VDDB */
@@ -1038,6 +1253,10 @@ static const struct nt35510_config nt35510_hydis_hva40wv1 = {
.avee = { 0x09, 0x09, 0x09 },
/* 0x24: NCK = Hsync/2, BTN = -2 x VDDB */
.bt2ctr = { 0x24, 0x24, 0x24 },
+ /* VBCLA: -2.5V, VBCLB: -2.5V, VBCLC: -2.5V */
+ .vcl = { 0x00, 0x00, 0x00 },
+ /* 0x24: CLCK = Hsync/2, BTN = -1 x VDDB */
+ .bt3ctr = { 0x24, 0x24, 0x24 },
/* 0x05 = 12V */
.vgh = { 0x05, 0x05, 0x05 },
/* 0x24: NCKA = Hsync/2, VGH = 2 x AVDD - AVEE */
@@ -1050,6 +1269,16 @@ static const struct nt35510_config nt35510_hydis_hva40wv1 = {
.vgp = { 0x00, 0xA3, 0x00 },
/* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
.vgn = { 0x00, 0xA3, 0x00 },
+ /* VCMOFFSEL = VCOM voltage offset mode, VCM = 0V */
+ .vcmoff = { 0x00, 0x00 },
+ /* Enable TE, EoTP and RGB pixel format */
+ .dopctr = { NT35510_DOPCTR_0_DSITE | NT35510_DOPCTR_0_EOTP |
+ NT35510_DOPCTR_0_N565, NT35510_DOPCTR_1_CTB },
+ .madctl = NT35510_ROTATE_0_SETTING,
+ /* 0x0A: SDT = 5 us */
+ .sdhdtctr = 0x0A,
+ /* EQ control for gate signals, 0x00 = 0 us */
+ .gseqctr = { 0x00, 0x00 },
/* SDEQCTR: source driver EQ mode 2, 2.5 us rise time on each step */
.sdeqctr = { 0x01, 0x05, 0x05, 0x05 },
/* SDVPCTR: Normal operation off color during v porch */
@@ -1073,8 +1302,89 @@ static const struct nt35510_config nt35510_hydis_hva40wv1 = {
.gamma_corr_neg_b = { NT35510_GAMMA_NEG_DEFAULT },
};
+static const struct nt35510_config nt35510_frida_frd400b25025 = {
+ .width_mm = 52,
+ .height_mm = 86,
+ .mode = {
+ .clock = 23000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 34, /* HFP = 34 */
+ .hsync_end = 480 + 34 + 2, /* HSync = 2 */
+ .htotal = 480 + 34 + 2 + 34, /* HBP = 34 */
+ .vdisplay = 800,
+ .vsync_start = 800 + 15, /* VFP = 15 */
+ .vsync_end = 800 + 15 + 12, /* VSync = 12 */
+ .vtotal = 800 + 15 + 12 + 15, /* VBP = 15 */
+ .flags = 0,
+ },
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM,
+ .cmds = NT35510_CMD_CONTROL_DISPLAY,
+ /* 0x03: AVDD = 6.2V */
+ .avdd = { 0x03, 0x03, 0x03 },
+ /* 0x46: PCK = 2 x Hsync, BTP = 2.5 x VDDB */
+ .bt1ctr = { 0x46, 0x46, 0x46 },
+ /* 0x03: AVEE = -6.2V */
+ .avee = { 0x03, 0x03, 0x03 },
+ /* 0x36: PCK = 2 x Hsync, BTP = 2 x VDDB */
+ .bt2ctr = { 0x36, 0x36, 0x36 },
+ /* VBCLA: -2.5V, VBCLB: -2.5V, VBCLC: -3.5V */
+ .vcl = { 0x00, 0x00, 0x02 },
+ /* 0x26: CLCK = 2 x Hsync, BTN = -1 x VDDB */
+ .bt3ctr = { 0x26, 0x26, 0x26 },
+ /* 0x09 = 16V */
+ .vgh = { 0x09, 0x09, 0x09 },
+ /* 0x36: HCK = 2 x Hsync, VGH = 2 x AVDD - AVEE */
+ .bt4ctr = { 0x36, 0x36, 0x36 },
+ /* 0x08 = -10V */
+ .vgl = { 0x08, 0x08, 0x08 },
+ /* 0x26: LCK = 2 x Hsync, VGL = AVDD + VCL - AVDD */
+ .bt5ctr = { 0x26, 0x26, 0x26 },
+ /* VGMP: 0x080 = 4.6V, VGSP = 0V */
+ .vgp = { 0x00, 0x80, 0x00 },
+ /* VGMP: 0x080 = 4.6V, VGSP = 0V */
+ .vgn = { 0x00, 0x80, 0x00 },
+ /* VCMOFFSEL = VCOM voltage offset mode, VCM = -1V */
+ .vcmoff = { 0x00, 0x50 },
+ .dopctr = { NT35510_DOPCTR_0_RAMKP | NT35510_DOPCTR_0_DSITE |
+ NT35510_DOPCTR_0_DSIG | NT35510_DOPCTR_0_DSIM |
+ NT35510_DOPCTR_0_EOTP | NT35510_DOPCTR_0_N565, 0 },
+ .madctl = NT35510_ROTATE_180_SETTING,
+ /* 0x03: SDT = 1.5 us */
+ .sdhdtctr = 0x03,
+ /* EQ control for gate signals, 0x00 = 0 us */
+ .gseqctr = { 0x00, 0x00 },
+ /* SDEQCTR: source driver EQ mode 2, 1 us rise time on each step */
+ .sdeqctr = { 0x01, 0x02, 0x02, 0x02 },
+ /* SDVPCTR: Normal operation off color during v porch */
+ .sdvpctr = 0x01,
+ /* T1: number of pixel clocks on one scanline: 0x184 = 389 clocks */
+ .t1 = 0x0184,
+ /* VBP: vertical back porch toward the panel */
+ .vbp = 0x1C,
+ /* VFP: vertical front porch toward the panel */
+ .vfp = 0x1C,
+ /* PSEL: divide pixel clock 23MHz with 1 (no clock downscaling) */
+ .psel = 0,
+ /* DPTMCTR12: 0x03: LVGL = VGLX, overlap mode, swap R->L O->E */
+ .dpmctr12 = { 0x03, 0x00, 0x00, },
+ /* write display brightness */
+ .wrdisbv = 0x7f,
+ /* write control display */
+ .wrctrld = NT35510_WRCTRLD_BCTRL | NT35510_WRCTRLD_DD |
+ NT35510_WRCTRLD_BL,
+ /* write content adaptive brightness control */
+ .wrcabc = NT35510_WRCABC_STILL_MODE,
+ /* write CABC minimum brightness */
+ .wrcabcmb = 0xff,
+};
+
static const struct of_device_id nt35510_of_match[] = {
{
+ .compatible = "frida,frd400b25025",
+ .data = &nt35510_frida_frd400b25025,
+ },
+ {
.compatible = "hydis,hva40wv1",
.data = &nt35510_hydis_hva40wv1,
},
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index a189ce236328..18bd2ee71201 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -933,8 +933,7 @@ static int j606f_boe_init_sequence(struct panel_info *pinfo)
static const struct drm_display_mode elish_boe_modes[] = {
{
- /* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */
- .clock = (1600 + 60 + 8 + 60) * (2560 + 26 + 4 + 168) * 104 / 1000,
+ .clock = (1600 + 60 + 8 + 60) * (2560 + 26 + 4 + 168) * 120 / 1000,
.hdisplay = 1600,
.hsync_start = 1600 + 60,
.hsync_end = 1600 + 60 + 8,
@@ -948,8 +947,7 @@ static const struct drm_display_mode elish_boe_modes[] = {
static const struct drm_display_mode elish_csot_modes[] = {
{
- /* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */
- .clock = (1600 + 200 + 40 + 52) * (2560 + 26 + 4 + 168) * 104 / 1000,
+ .clock = (1600 + 200 + 40 + 52) * (2560 + 26 + 4 + 168) * 120 / 1000,
.hdisplay = 1600,
.hsync_start = 1600 + 200,
.hsync_end = 1600 + 200 + 40,
@@ -1270,6 +1268,8 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
return ret;
}
+ pinfo->panel.prepare_prev_first = true;
+
if (pinfo->desc->has_dcs_backlight) {
pinfo->panel.backlight = nt36523_create_backlight(dsi);
if (IS_ERR(pinfo->panel.backlight))
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
new file mode 100644
index 000000000000..cb7406d74466
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+static const char * const regulator_names[] = {
+ "vddi",
+ "avdd",
+ "avee",
+};
+
+static const unsigned long regulator_enable_loads[] = {
+ 62000,
+ 100000,
+ 100000,
+};
+
+static const unsigned long regulator_disable_loads[] = {
+ 80,
+ 100,
+ 100,
+};
+
+struct panel_desc {
+ const struct drm_display_mode *display_mode;
+ u32 width_mm;
+ u32 height_mm;
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ unsigned int lanes;
+ const char *panel_name;
+ int (*init_sequence)(struct mipi_dsi_device *dsi);
+};
+
+struct nt36672e_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[3];
+ const struct panel_desc *desc;
+};
+
+static inline struct nt36672e_panel *to_nt36672e_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct nt36672e_panel, panel);
+}
+
+static int nt36672e_1080x2408_60hz_init(struct mipi_dsi_device *dsi)
+{
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x89, 0x28, 0x00, 0x08, 0x00, 0xaa, 0x02,
+ 0x0e, 0x00, 0x2b, 0x00, 0x07, 0x0d, 0xb7, 0x0c, 0xb7);
+
+ mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x1b, 0xa0);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x01, 0x66);
+ mipi_dsi_dcs_write_seq(dsi, 0x06, 0x40);
+ mipi_dsi_dcs_write_seq(dsi, 0x07, 0x38);
+ mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x83);
+ mipi_dsi_dcs_write_seq(dsi, 0x69, 0x91);
+ mipi_dsi_dcs_write_seq(dsi, 0x95, 0xd1);
+ mipi_dsi_dcs_write_seq(dsi, 0x96, 0xd1);
+ mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x64);
+ mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x54);
+ mipi_dsi_dcs_write_seq(dsi, 0xf4, 0x64);
+ mipi_dsi_dcs_write_seq(dsi, 0xf5, 0x54);
+ mipi_dsi_dcs_write_seq(dsi, 0xf6, 0x64);
+ mipi_dsi_dcs_write_seq(dsi, 0xf7, 0x54);
+ mipi_dsi_dcs_write_seq(dsi, 0xf8, 0x64);
+ mipi_dsi_dcs_write_seq(dsi, 0xf9, 0x54);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x01, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x03, 0x0c);
+ mipi_dsi_dcs_write_seq(dsi, 0x05, 0x1d);
+ mipi_dsi_dcs_write_seq(dsi, 0x08, 0x2f);
+ mipi_dsi_dcs_write_seq(dsi, 0x09, 0x2e);
+ mipi_dsi_dcs_write_seq(dsi, 0x0a, 0x2d);
+ mipi_dsi_dcs_write_seq(dsi, 0x0b, 0x2c);
+ mipi_dsi_dcs_write_seq(dsi, 0x11, 0x17);
+ mipi_dsi_dcs_write_seq(dsi, 0x12, 0x13);
+ mipi_dsi_dcs_write_seq(dsi, 0x13, 0x15);
+ mipi_dsi_dcs_write_seq(dsi, 0x15, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0x16, 0x16);
+ mipi_dsi_dcs_write_seq(dsi, 0x17, 0x18);
+ mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x1d);
+ mipi_dsi_dcs_write_seq(dsi, 0x20, 0x2f);
+ mipi_dsi_dcs_write_seq(dsi, 0x21, 0x2e);
+ mipi_dsi_dcs_write_seq(dsi, 0x22, 0x2d);
+ mipi_dsi_dcs_write_seq(dsi, 0x23, 0x2c);
+ mipi_dsi_dcs_write_seq(dsi, 0x29, 0x17);
+ mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x13);
+ mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x15);
+ mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0x30, 0x16);
+ mipi_dsi_dcs_write_seq(dsi, 0x31, 0x18);
+ mipi_dsi_dcs_write_seq(dsi, 0x32, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x34, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0x35, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0x36, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0x4d, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0x4e, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x4f, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x53, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x71, 0x30);
+ mipi_dsi_dcs_write_seq(dsi, 0x79, 0x11);
+ mipi_dsi_dcs_write_seq(dsi, 0x7a, 0x82);
+ mipi_dsi_dcs_write_seq(dsi, 0x7b, 0x8f);
+ mipi_dsi_dcs_write_seq(dsi, 0x7d, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x80, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x81, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x82, 0x13);
+ mipi_dsi_dcs_write_seq(dsi, 0x84, 0x31);
+ mipi_dsi_dcs_write_seq(dsi, 0x85, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x86, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x87, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x90, 0x13);
+ mipi_dsi_dcs_write_seq(dsi, 0x92, 0x31);
+ mipi_dsi_dcs_write_seq(dsi, 0x93, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x94, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x95, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x9c, 0xf4);
+ mipi_dsi_dcs_write_seq(dsi, 0x9d, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xa0, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0xa2, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0xa3, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, 0xa4, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xa5, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xc0);
+ mipi_dsi_dcs_write_seq(dsi, 0xc9, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xd9, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xe9, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x18, 0x22);
+ mipi_dsi_dcs_write_seq(dsi, 0x19, 0xe4);
+ mipi_dsi_dcs_write_seq(dsi, 0x21, 0x40);
+ mipi_dsi_dcs_write_seq(dsi, 0x66, 0xd8);
+ mipi_dsi_dcs_write_seq(dsi, 0x68, 0x50);
+ mipi_dsi_dcs_write_seq(dsi, 0x69, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0x6b, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x0d);
+ mipi_dsi_dcs_write_seq(dsi, 0x6e, 0x48);
+ mipi_dsi_dcs_write_seq(dsi, 0x72, 0x41);
+ mipi_dsi_dcs_write_seq(dsi, 0x73, 0x4a);
+ mipi_dsi_dcs_write_seq(dsi, 0x74, 0xd0);
+ mipi_dsi_dcs_write_seq(dsi, 0x77, 0x62);
+ mipi_dsi_dcs_write_seq(dsi, 0x79, 0x7e);
+ mipi_dsi_dcs_write_seq(dsi, 0x7d, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x15);
+ mipi_dsi_dcs_write_seq(dsi, 0x7f, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x84, 0x4d);
+ mipi_dsi_dcs_write_seq(dsi, 0xcf, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xd7, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xef, 0x20);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x84);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x81, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x83, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x84, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, 0x85, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x86, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, 0x87, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x88, 0x05);
+ mipi_dsi_dcs_write_seq(dsi, 0x8a, 0x1a);
+ mipi_dsi_dcs_write_seq(dsi, 0x8b, 0x11);
+ mipi_dsi_dcs_write_seq(dsi, 0x8c, 0x24);
+ mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x42);
+ mipi_dsi_dcs_write_seq(dsi, 0x8f, 0x11);
+ mipi_dsi_dcs_write_seq(dsi, 0x90, 0x11);
+ mipi_dsi_dcs_write_seq(dsi, 0x91, 0x11);
+ mipi_dsi_dcs_write_seq(dsi, 0x9a, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0x9b, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x9c, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x9d, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x9e, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x27);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x01, 0x68);
+ mipi_dsi_dcs_write_seq(dsi, 0x20, 0x81);
+ mipi_dsi_dcs_write_seq(dsi, 0x21, 0x6a);
+ mipi_dsi_dcs_write_seq(dsi, 0x25, 0x81);
+ mipi_dsi_dcs_write_seq(dsi, 0x26, 0x94);
+ mipi_dsi_dcs_write_seq(dsi, 0x6e, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x6f, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x70, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x71, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x72, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x75, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x76, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x77, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x7d, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x67);
+ mipi_dsi_dcs_write_seq(dsi, 0x80, 0x23);
+ mipi_dsi_dcs_write_seq(dsi, 0x82, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, 0x83, 0x67);
+ mipi_dsi_dcs_write_seq(dsi, 0x88, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x89, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0xa5, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0xa6, 0x23);
+ mipi_dsi_dcs_write_seq(dsi, 0xa7, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x40);
+ mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, 0xe6, 0xd3);
+ mipi_dsi_dcs_write_seq(dsi, 0xeb, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, 0xec, 0x28);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x00, 0x91);
+ mipi_dsi_dcs_write_seq(dsi, 0x03, 0x20);
+ mipi_dsi_dcs_write_seq(dsi, 0x07, 0x50);
+ mipi_dsi_dcs_write_seq(dsi, 0x0a, 0x70);
+ mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x40);
+ mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x11, 0xe0);
+ mipi_dsi_dcs_write_seq(dsi, 0x15, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x16, 0xa4);
+ mipi_dsi_dcs_write_seq(dsi, 0x19, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x78);
+ mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x23);
+ mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x3e);
+ mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x3e);
+ mipi_dsi_dcs_write_seq(dsi, 0x20, 0x3e);
+ mipi_dsi_dcs_write_seq(dsi, 0x28, 0xfd);
+ mipi_dsi_dcs_write_seq(dsi, 0x29, 0x12);
+ mipi_dsi_dcs_write_seq(dsi, 0x2a, 0xe1);
+ mipi_dsi_dcs_write_seq(dsi, 0x2d, 0x0a);
+ mipi_dsi_dcs_write_seq(dsi, 0x30, 0x49);
+ mipi_dsi_dcs_write_seq(dsi, 0x33, 0x96);
+ mipi_dsi_dcs_write_seq(dsi, 0x34, 0xff);
+ mipi_dsi_dcs_write_seq(dsi, 0x35, 0x40);
+ mipi_dsi_dcs_write_seq(dsi, 0x36, 0xde);
+ mipi_dsi_dcs_write_seq(dsi, 0x37, 0xf9);
+ mipi_dsi_dcs_write_seq(dsi, 0x38, 0x45);
+ mipi_dsi_dcs_write_seq(dsi, 0x39, 0xd9);
+ mipi_dsi_dcs_write_seq(dsi, 0x3a, 0x49);
+ mipi_dsi_dcs_write_seq(dsi, 0x4a, 0xf0);
+ mipi_dsi_dcs_write_seq(dsi, 0x7a, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, 0x7b, 0x40);
+ mipi_dsi_dcs_write_seq(dsi, 0x7f, 0xf0);
+ mipi_dsi_dcs_write_seq(dsi, 0x83, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x84, 0xa4);
+ mipi_dsi_dcs_write_seq(dsi, 0x87, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x88, 0x78);
+ mipi_dsi_dcs_write_seq(dsi, 0x89, 0x23);
+ mipi_dsi_dcs_write_seq(dsi, 0x8b, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x8c, 0x7d);
+ mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x7d);
+ mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x7d);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x00, 0x00, 0x17, 0x00, 0x49, 0x00,
+ 0x6a, 0x00, 0x89, 0x00, 0x9f, 0x00, 0xb6, 0x00, 0xc8);
+ mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xd9, 0x01, 0x10, 0x01, 0x3a, 0x01,
+ 0x7a, 0x01, 0xa9, 0x01, 0xf2, 0x02, 0x2d, 0x02, 0x2e);
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x64, 0x02, 0xa3, 0x02, 0xca, 0x03,
+ 0x00, 0x03, 0x1e, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
+ mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
+ 0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x51, 0x00,
+ 0x71, 0x00, 0x90, 0x00, 0xa7, 0x00, 0xbf, 0x00, 0xd1);
+ mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xe2, 0x01, 0x1a, 0x01, 0x43, 0x01,
+ 0x83, 0x01, 0xb2, 0x01, 0xfa, 0x02, 0x34, 0x02, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x6b, 0x02, 0xa8, 0x02, 0xd0, 0x03,
+ 0x03, 0x03, 0x21, 0x03, 0x4d, 0x03, 0x5b, 0x03, 0x6b);
+ mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x7e, 0x03, 0x94, 0x03, 0xac, 0x03,
+ 0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x51, 0x00,
+ 0x72, 0x00, 0x92, 0x00, 0xa8, 0x00, 0xbf, 0x00, 0xd1);
+ mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xe2, 0x01, 0x18, 0x01, 0x42, 0x01,
+ 0x81, 0x01, 0xaf, 0x01, 0xf5, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x68, 0x02, 0xa6, 0x02, 0xcd, 0x03,
+ 0x01, 0x03, 0x1f, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
+ mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
+ 0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x21);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x00, 0x00, 0x17, 0x00, 0x49, 0x00,
+ 0x6a, 0x00, 0x89, 0x00, 0x9f, 0x00, 0xb6, 0x00, 0xc8);
+ mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xd9, 0x01, 0x10, 0x01, 0x3a, 0x01,
+ 0x7a, 0x01, 0xa9, 0x01, 0xf2, 0x02, 0x2d, 0x02, 0x2e);
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x64, 0x02, 0xa3, 0x02, 0xca, 0x03,
+ 0x00, 0x03, 0x1e, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
+ mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
+ 0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x51, 0x00,
+ 0x71, 0x00, 0x90, 0x00, 0xa7, 0x00, 0xbf, 0x00, 0xd1);
+ mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xe2, 0x01, 0x1a, 0x01, 0x43, 0x01,
+ 0x83, 0x01, 0xb2, 0x01, 0xfa, 0x02, 0x34, 0x02, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x6b, 0x02, 0xa8, 0x02, 0xd0, 0x03,
+ 0x03, 0x03, 0x21, 0x03, 0x4d, 0x03, 0x5b, 0x03, 0x6b);
+ mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x7e, 0x03, 0x94, 0x03, 0xac, 0x03,
+ 0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x51, 0x00,
+ 0x72, 0x00, 0x92, 0x00, 0xa8, 0x00, 0xbf, 0x00, 0xd1);
+ mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xe2, 0x01, 0x18, 0x01, 0x42, 0x01,
+ 0x81, 0x01, 0xaf, 0x01, 0xf5, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x68, 0x02, 0xa6, 0x02, 0xcd, 0x03,
+ 0x01, 0x03, 0x1f, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
+ mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
+ 0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2c);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x61, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0x62, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, 0x6a, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0x6b, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x6c, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x36);
+ mipi_dsi_dcs_write_seq(dsi, 0x53, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x54, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x55, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x56, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x58, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0x59, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0xf0);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x00);
+
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x51, 0xff);
+ mipi_dsi_dcs_write_seq(dsi, 0x53, 0x24);
+ mipi_dsi_dcs_write_seq(dsi, 0x55, 0x01);
+
+ return 0;
+}
+
+static int nt36672e_power_on(struct nt36672e_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
+ ret = regulator_set_load(ctx->supplies[i].consumer,
+ regulator_enable_loads[i]);
+ if (ret) {
+ dev_err(&dsi->dev, "regulator set load failed for supply %s: %d\n",
+ ctx->supplies[i].supply, ret);
+ return ret;
+ }
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "regulator bulk enable failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Reset sequence of nt36672e panel requires the panel to be out of reset
+ * for 10ms, followed by being held in reset for 10ms and then out again.
+ */
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+static int nt36672e_power_off(struct nt36672e_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ int ret = 0;
+ int i;
+
+ gpiod_set_value(ctx->reset_gpio, 0);
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
+ ret = regulator_set_load(ctx->supplies[i].consumer,
+ regulator_disable_loads[i]);
+ if (ret) {
+ dev_err(&dsi->dev, "regulator set load failed for supply %s: %d\n",
+ ctx->supplies[i].supply, ret);
+ return ret;
+ }
+ }
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret)
+ dev_err(&dsi->dev, "regulator bulk disable failed: %d\n", ret);
+
+ return ret;
+}
+
+static int nt36672e_on(struct nt36672e_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ const struct panel_desc *desc = ctx->desc;
+ int ret = 0;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ if (desc->init_sequence) {
+ ret = desc->init_sequence(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "panel init sequence failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+ msleep(100);
+
+ return 0;
+}
+
+static int nt36672e_off(struct nt36672e_panel *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ int ret = 0;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(60);
+
+ return 0;
+}
+
+static int nt36672e_panel_prepare(struct drm_panel *panel)
+{
+ struct nt36672e_panel *ctx = to_nt36672e_panel(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ int ret = 0;
+
+ ret = nt36672e_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = nt36672e_on(ctx);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "Failed to initialize panel: %d\n", ret);
+ if (nt36672e_power_off(ctx))
+ dev_err(&dsi->dev, "power off failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nt36672e_panel_unprepare(struct drm_panel *panel)
+{
+ struct nt36672e_panel *ctx = to_nt36672e_panel(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ int ret = 0;
+
+ ret = nt36672e_off(ctx);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to un-initialize panel: %d\n", ret);
+
+ ret = nt36672e_power_off(ctx);
+ if (ret < 0)
+ dev_err(&dsi->dev, "power off failed: %d\n", ret);
+
+ return 0;
+}
+
+static const struct drm_display_mode nt36672e_1080x2408_60hz = {
+ .name = "1080x2408",
+ .clock = 181690,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 76,
+ .hsync_end = 1080 + 76 + 12,
+ .htotal = 1080 + 76 + 12 + 56,
+ .vdisplay = 2408,
+ .vsync_start = 2408 + 46,
+ .vsync_end = 2408 + 46 + 10,
+ .vtotal = 2408 + 46 + 10 + 10,
+ .flags = 0,
+};
+
+static const struct panel_desc nt36672e_panel_desc = {
+ .display_mode = &nt36672e_1080x2408_60hz,
+ .width_mm = 74,
+ .height_mm = 131,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+ .panel_name = "nt36672e fhd plus panel",
+ .init_sequence = nt36672e_1080x2408_60hz_init,
+};
+
+static int nt36672e_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector)
+{
+ struct nt36672e_panel *ctx = to_nt36672e_panel(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, ctx->desc->display_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = ctx->desc->width_mm;
+ connector->display_info.height_mm = ctx->desc->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs nt36672e_drm_funcs = {
+ .prepare = nt36672e_panel_prepare,
+ .unprepare = nt36672e_panel_unprepare,
+ .get_modes = nt36672e_panel_get_modes,
+};
+
+static int nt36672e_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct nt36672e_panel *ctx;
+ int i, ret = 0;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->desc = of_device_get_match_data(dev);
+ if (!ctx->desc) {
+ dev_err(dev, "missing device configuration\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
+ ctx->supplies[i].supply = regulator_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = ctx->desc->lanes;
+ dsi->format = ctx->desc->format;
+ dsi->mode_flags = ctx->desc->mode_flags;
+
+ drm_panel_init(&ctx->panel, dev, &nt36672e_drm_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ ctx->panel.prepare_prev_first = true;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ goto err_dsi_attach;
+ }
+
+ return 0;
+
+err_dsi_attach:
+ drm_panel_remove(&ctx->panel);
+ return ret;
+}
+
+static void nt36672e_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct nt36672e_panel *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(ctx->dsi);
+ mipi_dsi_device_unregister(ctx->dsi);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id nt36672e_of_match[] = {
+ {
+ .compatible = "novatek,nt36672e",
+ .data = &nt36672e_panel_desc,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nt36672e_of_match);
+
+static struct mipi_dsi_driver nt36672e_panel_driver = {
+ .driver = {
+ .name = "panel-novatek-nt36672e",
+ .of_match_table = nt36672e_of_match,
+ },
+ .probe = nt36672e_panel_probe,
+ .remove = nt36672e_panel_remove,
+};
+module_mipi_dsi_driver(nt36672e_panel_driver);
+
+MODULE_AUTHOR("Ritesh Kumar <quic_riteshk@quicinc.com>");
+MODULE_DESCRIPTION("Novatek NT36672E DSI Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
index 5703f4712d96..76c2a8f6718c 100644
--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
@@ -72,6 +72,7 @@ static int atana33xc20_suspend(struct device *dev)
if (p->el3_was_on)
atana33xc20_wait(p->el_on3_off_time, 150);
+ drm_dp_dpcd_set_powered(p->aux, false);
ret = regulator_disable(p->supply);
if (ret)
return ret;
@@ -93,6 +94,7 @@ static int atana33xc20_resume(struct device *dev)
ret = regulator_enable(p->supply);
if (ret)
return ret;
+ drm_dp_dpcd_set_powered(p->aux, true);
p->powered_on_time = ktime_get_boottime();
if (p->no_hpd) {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index d493ee735c73..20e3df1c59d4 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1367,6 +1367,23 @@ static const struct drm_display_mode boe_bp101wx1_100_mode = {
.vtotal = 800 + 6 + 8 + 2,
};
+static const struct panel_desc boe_bp082wx1_100 = {
+ .modes = &boe_bp101wx1_100_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 177,
+ .height = 110,
+ },
+ .delay = {
+ .enable = 50,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct panel_desc boe_bp101wx1_100 = {
.modes = &boe_bp101wx1_100_mode,
.num_modes = 1,
@@ -1980,6 +1997,33 @@ static const struct panel_desc edt_etml0700y5dha = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing edt_etml1010g3dra_timing = {
+ .pixelclock = { 66300000, 72400000, 78900000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 12, 72, 132 },
+ .hback_porch = { 86, 86, 86 },
+ .hsync_len = { 2, 2, 2 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 1, 15, 49 },
+ .vback_porch = { 21, 21, 21 },
+ .vsync_len = { 2, 2, 2 },
+ .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc edt_etml1010g3dra = {
+ .timings = &edt_etml1010g3dra_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode edt_etmv570g2dhu_mode = {
.clock = 25175,
.hdisplay = 640,
@@ -2754,21 +2798,21 @@ static const struct panel_desc lemaker_bl035_rgb_002 = {
.bus_flags = DRM_BUS_FLAG_DE_LOW,
};
-static const struct drm_display_mode lg_lb070wv8_mode = {
- .clock = 33246,
- .hdisplay = 800,
- .hsync_start = 800 + 88,
- .hsync_end = 800 + 88 + 80,
- .htotal = 800 + 88 + 80 + 88,
- .vdisplay = 480,
- .vsync_start = 480 + 10,
- .vsync_end = 480 + 10 + 25,
- .vtotal = 480 + 10 + 25 + 10,
+static const struct display_timing lg_lb070wv8_timing = {
+ .pixelclock = { 31950000, 33260000, 34600000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 88, 88, 88 },
+ .hback_porch = { 88, 88, 88 },
+ .hsync_len = { 80, 80, 80 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 10, 10, 10 },
+ .vback_porch = { 10, 10, 10 },
+ .vsync_len = { 25, 25, 25 },
};
static const struct panel_desc lg_lb070wv8 = {
- .modes = &lg_lb070wv8_mode,
- .num_modes = 1,
+ .timings = &lg_lb070wv8_timing,
+ .num_timings = 1,
.bpc = 8,
.size = {
.width = 151,
@@ -3516,14 +3560,15 @@ static const struct display_timing rocktech_rk043fn48h_timing = {
.pixelclock = { 6000000, 9000000, 12000000 },
.hactive = { 480, 480, 480 },
.hback_porch = { 8, 43, 43 },
- .hfront_porch = { 2, 8, 8 },
+ .hfront_porch = { 2, 8, 10 },
.hsync_len = { 1, 1, 1 },
.vactive = { 272, 272, 272 },
- .vback_porch = { 2, 12, 12 },
+ .vback_porch = { 2, 12, 26 },
.vfront_porch = { 1, 4, 4 },
.vsync_len = { 1, 10, 10 },
.flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
};
static const struct panel_desc rocktech_rk043fn48h = {
@@ -4346,6 +4391,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "bananapi,s070wv20-ct16",
.data = &bananapi_s070wv20_ct16,
}, {
+ .compatible = "boe,bp082wx1-100",
+ .data = &boe_bp082wx1_100,
+ }, {
.compatible = "boe,bp101wx1-100",
.data = &boe_bp101wx1_100,
}, {
@@ -4424,6 +4472,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,etml0700y5dha",
.data = &edt_etml0700y5dha,
}, {
+ .compatible = "edt,etml1010g3dra",
+ .data = &edt_etml1010g3dra,
+ }, {
.compatible = "edt,etmv570g2dhu",
.data = &edt_etmv570g2dhu,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index b55bafd1a8be..a3e142f156d5 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -62,6 +62,7 @@ struct st7703 {
struct dentry *debugfs;
const struct st7703_panel_desc *desc;
+ enum drm_panel_orientation orientation;
};
struct st7703_panel_desc {
@@ -521,6 +522,96 @@ static const struct st7703_panel_desc rgb30panel_desc = {
.init_sequence = rgb30panel_init_sequence,
};
+static int rgb10max3_panel_init_sequence(struct st7703 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* Init sequence extracted from Powkiddy RGB10MAX3 BSP kernel. */
+
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEXTC, 0xf1, 0x12, 0x83);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETAPID, 0x00, 0x00, 0x00, 0xda,
+ 0x80);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETDISP, 0xc8, 0x02, 0x30);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETRGBIF, 0x10, 0x10, 0x28,
+ 0x28, 0x03, 0xff, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETBGP, 0x04, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVCOM, 0x78, 0x78);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER_EXT, 0x25, 0x22, 0xf0,
+ 0x63);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETMIPI, 0x33, 0x81, 0x05, 0xf9,
+ 0x0e, 0x0e, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x44, 0x25, 0x00, 0x90, 0x0a, 0x00,
+ 0x00, 0x01, 0x4f, 0x01, 0x00, 0x00, 0x37);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVDC, 0x47);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETSCR, 0x73, 0x73, 0x50, 0x50,
+ 0x00, 0x00, 0x12, 0x70, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER, 0x25, 0x00, 0x32,
+ 0x32, 0x77, 0xe1, 0xff, 0xff, 0xcc, 0xcc, 0x77,
+ 0x77);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETECO, 0x82, 0x00, 0xbf, 0xff,
+ 0x00, 0xff);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETIO, 0xb8, 0x00, 0x0a, 0x00,
+ 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETCABC, 0x10, 0x40, 0x1e,
+ 0x02);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0b);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGAMMA, 0x00, 0x04, 0x07,
+ 0x2a, 0x39, 0x3f, 0x36, 0x31, 0x06, 0x0b, 0x0e,
+ 0x12, 0x14, 0x12, 0x13, 0x0f, 0x17, 0x00, 0x04,
+ 0x07, 0x2a, 0x39, 0x3f, 0x36, 0x31, 0x06, 0x0b,
+ 0x0e, 0x12, 0x14, 0x12, 0x13, 0x0f, 0x17);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEQ, 0x03, 0x03, 0x03, 0x03,
+ 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0xff, 0x80,
+ 0xc0, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP1, 0xc8, 0x10, 0x08, 0x00,
+ 0x00, 0x41, 0xf8, 0x12, 0x31, 0x23, 0x37, 0x86,
+ 0x11, 0xc8, 0x37, 0x2a, 0x00, 0x00, 0x0c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00,
+ 0x88, 0x20, 0x46, 0x02, 0x88, 0x88, 0x88, 0x88,
+ 0x88, 0x88, 0xff, 0x88, 0x31, 0x57, 0x13, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0xff, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP2, 0x00, 0x1a, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x8f, 0x13, 0x31, 0x75, 0x88, 0x88, 0x88, 0x88,
+ 0x88, 0x88, 0xf8, 0x8f, 0x02, 0x20, 0x64, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0xf8, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_EF, 0xff, 0xff, 0x01);
+
+ return 0;
+}
+
+static const struct drm_display_mode rgb10max3_panel_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 40,
+ .hsync_end = 720 + 40 + 10,
+ .htotal = 720 + 40 + 10 + 40,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 16,
+ .vsync_end = 1280 + 16 + 4,
+ .vtotal = 1280 + 16 + 4 + 14,
+ .clock = 63800,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 62,
+ .height_mm = 109,
+};
+
+static const struct st7703_panel_desc rgb10max3_panel_desc = {
+ .mode = &rgb10max3_panel_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = rgb10max3_panel_init_sequence,
+};
+
static int st7703_enable(struct drm_panel *panel)
{
struct st7703 *ctx = panel_to_st7703(panel);
@@ -653,12 +744,20 @@ static int st7703_get_modes(struct drm_panel *panel,
return 1;
}
+static enum drm_panel_orientation st7703_get_orientation(struct drm_panel *panel)
+{
+ struct st7703 *st7703 = panel_to_st7703(panel);
+
+ return st7703->orientation;
+}
+
static const struct drm_panel_funcs st7703_drm_funcs = {
.disable = st7703_disable,
.unprepare = st7703_unprepare,
.prepare = st7703_prepare,
.enable = st7703_enable,
.get_modes = st7703_get_modes,
+ .get_orientation = st7703_get_orientation,
};
static int allpixelson_set(void *data, u64 val)
@@ -727,6 +826,10 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
"Failed to request iovcc regulator\n");
+ ret = of_drm_get_panel_orientation(dsi->dev.of_node, &ctx->orientation);
+ if (ret < 0)
+ return dev_err_probe(&dsi->dev, ret, "Failed to get orientation\n");
+
drm_panel_init(&ctx->panel, dev, &st7703_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -784,6 +887,7 @@ static void st7703_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id st7703_of_match[] = {
{ .compatible = "anbernic,rg353v-panel-v2", .data = &rg353v2_desc },
+ { .compatible = "powkiddy,rgb10max3-panel", .data = &rgb10max3_panel_desc },
{ .compatible = "powkiddy,rgb30-panel", .data = &rgb30panel_desc },
{ .compatible = "rocktech,jh057n00900", .data = &jh057n00900_panel_desc },
{ .compatible = "xingbangda,xbd599", .data = &xbd599_desc },
diff --git a/drivers/gpu/drm/panel/panel-visionox-r66451.c b/drivers/gpu/drm/panel/panel-visionox-r66451.c
index fbb73464de33..493f2a6076f8 100644
--- a/drivers/gpu/drm/panel/panel-visionox-r66451.c
+++ b/drivers/gpu/drm/panel/panel-visionox-r66451.c
@@ -322,6 +322,7 @@ static int visionox_r66451_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ ctx->panel.prepare_prev_first = true;
drm_panel_init(&ctx->panel, dev, &visionox_r66451_funcs, DRM_MODE_CONNECTOR_DSI);
ctx->panel.backlight = visionox_r66451_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
index a23407b9f6fb..540099253e1b 100644
--- a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
+++ b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
@@ -287,6 +287,7 @@ static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ ctx->panel.prepare_prev_first = true;
drm_panel_init(&ctx->panel, dev, &visionox_vtdr6130_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
index ad24cdf1d992..20fe1d2c0aaf 100644
--- a/drivers/gpu/drm/pl111/Kconfig
+++ b/drivers/gpu/drm/pl111/Kconfig
@@ -9,7 +9,6 @@ config DRM_PL111
select DRM_GEM_DMA_HELPER
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
- select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the PL111 CLCD controller.
If M is selected the module will be called pl111_drm.
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 06a58dad5f5c..1e46b0a6e478 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -66,7 +66,6 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
pflag |= TTM_PL_FLAG_TOPDOWN;
qbo->placement.placement = qbo->placements;
- qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM) {
qbo->placements[c].mem_type = TTM_PL_VRAM;
qbo->placements[c++].flags = pflag;
@@ -86,7 +85,6 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
qbo->placements[c++].flags = 0;
}
qbo->placement.num_placement = c;
- qbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
qbo->placements[i].fpfn = 0;
qbo->placements[i].lpfn = 0;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 1a82629bce3f..765a144cea14 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -60,9 +60,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
if (!qxl_ttm_bo_is_qxl_bo(bo)) {
placement->placement = &placements;
- placement->busy_placement = &placements;
placement->num_placement = 1;
- placement->num_busy_placement = 1;
return;
}
qbo = to_qxl_bo(bo);
diff --git a/drivers/gpu/drm/radeon/atom-bits.h b/drivers/gpu/drm/radeon/atom-bits.h
index e8fae5c77514..2bfd6d0ff050 100644
--- a/drivers/gpu/drm/radeon/atom-bits.h
+++ b/drivers/gpu/drm/radeon/atom-bits.h
@@ -33,7 +33,7 @@ static inline uint8_t get_u8(void *bios, int ptr)
#define CU8(ptr) get_u8(ctx->bios, (ptr))
static inline uint16_t get_u16(void *bios, int ptr)
{
- return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
+ return get_u8(bios, ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
}
#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
#define CU16(ptr) get_u16(ctx->bios, (ptr))
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index ceb6d772ef94..5bc3e6b41c34 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -60,6 +60,7 @@
typedef struct {
struct atom_context *ctx;
uint32_t *ps, *ws;
+ int ps_size, ws_size;
int ps_shift;
uint16_t start;
unsigned last_jump;
@@ -68,8 +69,8 @@ typedef struct {
} atom_exec_context;
int atom_debug = 0;
-static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params);
-int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size);
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
static uint32_t atom_arg_mask[8] = {
0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
@@ -221,7 +222,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
(*ptr)++;
/* get_unaligned_le32 avoids unaligned accesses from atombios
* tables, noticed on a DEC Alpha. */
- val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+ if (idx < ctx->ps_size)
+ val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+ else
+ pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
if (print)
DEBUG("PS[0x%02X,0x%04X]", idx, val);
break;
@@ -259,7 +263,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
val = gctx->reg_block;
break;
default:
- val = ctx->ws[idx];
+ if (idx < ctx->ws_size)
+ val = ctx->ws[idx];
+ else
+ pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
}
break;
case ATOM_ARG_ID:
@@ -494,6 +501,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
idx = U8(*ptr);
(*ptr)++;
DEBUG("PS[0x%02X]", idx);
+ if (idx >= ctx->ps_size) {
+ pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
+ return;
+ }
ctx->ps[idx] = cpu_to_le32(val);
break;
case ATOM_ARG_WS:
@@ -526,6 +537,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
gctx->reg_block = val;
break;
default:
+ if (idx >= ctx->ws_size) {
+ pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
+ return;
+ }
ctx->ws[idx] = val;
}
break;
@@ -623,7 +638,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
else
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
- r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift, ctx->ps_size - ctx->ps_shift);
if (r) {
ctx->abort = true;
}
@@ -1152,7 +1167,7 @@ static struct {
atom_op_shr, ATOM_ARG_MC}, {
atom_op_debug, 0},};
-static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params)
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
@@ -1174,12 +1189,16 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
ectx.ps_shift = ps / 4;
ectx.start = base;
ectx.ps = params;
+ ectx.ps_size = params_size;
ectx.abort = false;
ectx.last_jump = 0;
- if (ws)
+ if (ws) {
ectx.ws = kcalloc(4, ws, GFP_KERNEL);
- else
+ ectx.ws_size = ws;
+ } else {
ectx.ws = NULL;
+ ectx.ws_size = 0;
+ }
debug_depth++;
while (1) {
@@ -1212,7 +1231,7 @@ free:
return ret;
}
-int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t *params)
+int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
{
int r;
@@ -1228,16 +1247,16 @@ int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uin
/* reset divmul */
ctx->divmul[0] = 0;
ctx->divmul[1] = 0;
- r = atom_execute_table_locked(ctx, index, params);
+ r = atom_execute_table_locked(ctx, index, params, params_size);
mutex_unlock(&ctx->mutex);
return r;
}
-int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params)
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size)
{
int r;
mutex_lock(&ctx->scratch_mutex);
- r = atom_execute_table_scratch_unlocked(ctx, index, params);
+ r = atom_execute_table_scratch_unlocked(ctx, index, params, params_size);
mutex_unlock(&ctx->scratch_mutex);
return r;
}
@@ -1335,7 +1354,7 @@ int atom_asic_init(struct atom_context *ctx)
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
- ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+ ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps, 16);
if (ret)
return ret;
@@ -1343,7 +1362,7 @@ int atom_asic_init(struct atom_context *ctx)
if (rdev->family < CHIP_R600) {
if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
- atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
+ atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps, 16);
}
return ret;
}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 5de0563b63d2..5bf06c0bd6ff 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -145,8 +145,8 @@ struct atom_context {
extern int atom_debug;
struct atom_context *atom_parse(struct card_info *, void *);
-int atom_execute_table(struct atom_context *, int, uint32_t *);
-int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *);
+int atom_execute_table(struct atom_context *, int, uint32_t *, int);
+int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *, int);
int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *);
bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index ade13173921b..9b3a3a9d60e2 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -77,7 +77,7 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
break;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void atombios_scaler_setup(struct drm_crtc *crtc)
@@ -157,7 +157,7 @@ static void atombios_scaler_setup(struct drm_crtc *crtc)
break;
}
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
if ((is_tv || is_cv)
&& rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) {
atom_rv515_force_tv_scaler(rdev, radeon_crtc);
@@ -178,7 +178,7 @@ static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
args.ucCRTC = radeon_crtc->crtc_id;
args.ucEnable = lock;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
@@ -194,7 +194,7 @@ static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
args.ucCRTC = radeon_crtc->crtc_id;
args.ucEnable = state;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
@@ -210,7 +210,7 @@ static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
args.ucCRTC = radeon_crtc->crtc_id;
args.ucEnable = state;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static const u32 vga_control_regs[6] =
@@ -242,7 +242,7 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
args.ucCRTC = radeon_crtc->crtc_id;
args.ucBlanking = state;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
if (ASIC_IS_DCE8(rdev))
WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
@@ -261,7 +261,7 @@ static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
args.ucDispPipeId = radeon_crtc->crtc_id;
args.ucEnable = state;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -343,7 +343,7 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void atombios_crtc_set_timing(struct drm_crtc *crtc,
@@ -389,7 +389,7 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
@@ -546,7 +546,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
args.lvds_ss.ucEnable = enable;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union adjust_pixel_clock {
@@ -692,7 +692,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
ADJUST_DISPLAY_CONFIG_SS_ENABLE;
atom_execute_table(rdev->mode_info.atom_context,
- index, (uint32_t *)&args);
+ index, (uint32_t *)&args, sizeof(args));
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
case 3:
@@ -725,7 +725,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v3.sInput.ucExtTransmitterID = 0;
atom_execute_table(rdev->mode_info.atom_context,
- index, (uint32_t *)&args);
+ index, (uint32_t *)&args, sizeof(args));
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
if (args.v3.sOutput.ucRefDiv) {
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
@@ -809,7 +809,7 @@ static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void atombios_crtc_program_pll(struct drm_crtc *crtc,
@@ -949,7 +949,7 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
return;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 009333645438..fca8b08535a5 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -112,7 +112,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
if (ASIC_IS_DCE4(rdev))
args.v2.ucHPD_ID = chan->rec.hpd;
- atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*ack = args.v1.ucReplyStatus;
@@ -354,7 +354,7 @@ static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
args.ucLaneNum = lane_num;
args.ucStatus = 0;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return args.ucStatus;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 6e537c5bd295..2bff0d9e20f5 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -119,12 +119,12 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
if (dig->backlight_level == 0) {
args.ucAction = ATOM_LCD_BLOFF;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
} else {
args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
args.ucAction = ATOM_LCD_BLON;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -389,7 +389,7 @@ atombios_dac_setup(struct drm_encoder *encoder, int action)
}
args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
@@ -445,7 +445,7 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
@@ -546,7 +546,7 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
break;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union lvds_encoder_control {
@@ -664,7 +664,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
break;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
int
@@ -979,7 +979,7 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
break;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
@@ -1361,7 +1361,7 @@ atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t
break;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void
@@ -1397,7 +1397,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
args.v1.ucAction = action;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
/* wait for the panel to power up */
if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
@@ -1519,7 +1519,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void
@@ -1554,7 +1554,7 @@ atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
args.ucEnable = ATOM_ENABLE;
args.ucCRTC = radeon_crtc->crtc_id;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
WREG32(reg, temp);
}
@@ -1618,10 +1618,10 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
WREG32(RADEON_BIOS_3_SCRATCH, reg);
} else
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (rdev->mode_info.bl_encoder) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -1629,7 +1629,7 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
} else {
args.ucAction = ATOM_LCD_BLON;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
}
break;
@@ -1637,10 +1637,10 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
args.ucAction = ATOM_DISABLE;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLOFF;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
break;
}
@@ -1983,7 +1983,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
return;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
/* update scratch regs with new routing */
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
@@ -2311,7 +2311,7 @@ atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *conn
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return true;
} else
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index ab4d21072191..730f0b25312b 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -78,7 +78,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
- atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 4e64ed38c439..70931b04bbac 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -53,8 +53,7 @@
extern int ni_mc_load_microcode(struct radeon_device *rdev);
//********* BARTS **************//
-static const u32 barts_cgcg_cgls_default[] =
-{
+static const u32 barts_cgcg_cgls_default[] = {
/* Register, Value, Mask bits */
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
@@ -107,8 +106,7 @@ static const u32 barts_cgcg_cgls_default[] =
};
#define BARTS_CGCG_CGLS_DEFAULT_LENGTH sizeof(barts_cgcg_cgls_default) / (3 * sizeof(u32))
-static const u32 barts_cgcg_cgls_disable[] =
-{
+static const u32 barts_cgcg_cgls_disable[] = {
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
@@ -162,8 +160,7 @@ static const u32 barts_cgcg_cgls_disable[] =
};
#define BARTS_CGCG_CGLS_DISABLE_LENGTH sizeof(barts_cgcg_cgls_disable) / (3 * sizeof(u32))
-static const u32 barts_cgcg_cgls_enable[] =
-{
+static const u32 barts_cgcg_cgls_enable[] = {
/* 0x0000c124, 0x84180000, 0x00180000, */
0x00000644, 0x000f7892, 0x001f4080,
0x000008f8, 0x00000010, 0xffffffff,
@@ -217,8 +214,7 @@ static const u32 barts_cgcg_cgls_enable[] =
};
#define BARTS_CGCG_CGLS_ENABLE_LENGTH sizeof(barts_cgcg_cgls_enable) / (3 * sizeof(u32))
-static const u32 barts_mgcg_default[] =
-{
+static const u32 barts_mgcg_default[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x00005448, 0x00000100, 0xffffffff,
0x000055e4, 0x00600100, 0xffffffff,
@@ -366,8 +362,7 @@ static const u32 barts_mgcg_default[] =
};
#define BARTS_MGCG_DEFAULT_LENGTH sizeof(barts_mgcg_default) / (3 * sizeof(u32))
-static const u32 barts_mgcg_disable[] =
-{
+static const u32 barts_mgcg_disable[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
@@ -381,8 +376,7 @@ static const u32 barts_mgcg_disable[] =
};
#define BARTS_MGCG_DISABLE_LENGTH sizeof(barts_mgcg_disable) / (3 * sizeof(u32))
-static const u32 barts_mgcg_enable[] =
-{
+static const u32 barts_mgcg_enable[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
@@ -397,8 +391,7 @@ static const u32 barts_mgcg_enable[] =
#define BARTS_MGCG_ENABLE_LENGTH sizeof(barts_mgcg_enable) / (3 * sizeof(u32))
//********* CAICOS **************//
-static const u32 caicos_cgcg_cgls_default[] =
-{
+static const u32 caicos_cgcg_cgls_default[] = {
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
@@ -450,8 +443,7 @@ static const u32 caicos_cgcg_cgls_default[] =
};
#define CAICOS_CGCG_CGLS_DEFAULT_LENGTH sizeof(caicos_cgcg_cgls_default) / (3 * sizeof(u32))
-static const u32 caicos_cgcg_cgls_disable[] =
-{
+static const u32 caicos_cgcg_cgls_disable[] = {
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
@@ -505,8 +497,7 @@ static const u32 caicos_cgcg_cgls_disable[] =
};
#define CAICOS_CGCG_CGLS_DISABLE_LENGTH sizeof(caicos_cgcg_cgls_disable) / (3 * sizeof(u32))
-static const u32 caicos_cgcg_cgls_enable[] =
-{
+static const u32 caicos_cgcg_cgls_enable[] = {
/* 0x0000c124, 0x84180000, 0x00180000, */
0x00000644, 0x000f7892, 0x001f4080,
0x000008f8, 0x00000010, 0xffffffff,
@@ -560,8 +551,7 @@ static const u32 caicos_cgcg_cgls_enable[] =
};
#define CAICOS_CGCG_CGLS_ENABLE_LENGTH sizeof(caicos_cgcg_cgls_enable) / (3 * sizeof(u32))
-static const u32 caicos_mgcg_default[] =
-{
+static const u32 caicos_mgcg_default[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x00005448, 0x00000100, 0xffffffff,
0x000055e4, 0x00600100, 0xffffffff,
@@ -640,8 +630,7 @@ static const u32 caicos_mgcg_default[] =
};
#define CAICOS_MGCG_DEFAULT_LENGTH sizeof(caicos_mgcg_default) / (3 * sizeof(u32))
-static const u32 caicos_mgcg_disable[] =
-{
+static const u32 caicos_mgcg_disable[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
@@ -655,8 +644,7 @@ static const u32 caicos_mgcg_disable[] =
};
#define CAICOS_MGCG_DISABLE_LENGTH sizeof(caicos_mgcg_disable) / (3 * sizeof(u32))
-static const u32 caicos_mgcg_enable[] =
-{
+static const u32 caicos_mgcg_enable[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
@@ -671,8 +659,7 @@ static const u32 caicos_mgcg_enable[] =
#define CAICOS_MGCG_ENABLE_LENGTH sizeof(caicos_mgcg_enable) / (3 * sizeof(u32))
//********* TURKS **************//
-static const u32 turks_cgcg_cgls_default[] =
-{
+static const u32 turks_cgcg_cgls_default[] = {
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
@@ -724,8 +711,7 @@ static const u32 turks_cgcg_cgls_default[] =
};
#define TURKS_CGCG_CGLS_DEFAULT_LENGTH sizeof(turks_cgcg_cgls_default) / (3 * sizeof(u32))
-static const u32 turks_cgcg_cgls_disable[] =
-{
+static const u32 turks_cgcg_cgls_disable[] = {
0x000008f8, 0x00000010, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
0x000008f8, 0x00000011, 0xffffffff,
@@ -779,8 +765,7 @@ static const u32 turks_cgcg_cgls_disable[] =
};
#define TURKS_CGCG_CGLS_DISABLE_LENGTH sizeof(turks_cgcg_cgls_disable) / (3 * sizeof(u32))
-static const u32 turks_cgcg_cgls_enable[] =
-{
+static const u32 turks_cgcg_cgls_enable[] = {
/* 0x0000c124, 0x84180000, 0x00180000, */
0x00000644, 0x000f7892, 0x001f4080,
0x000008f8, 0x00000010, 0xffffffff,
@@ -835,8 +820,7 @@ static const u32 turks_cgcg_cgls_enable[] =
#define TURKS_CGCG_CGLS_ENABLE_LENGTH sizeof(turks_cgcg_cgls_enable) / (3 * sizeof(u32))
// These are the sequences for turks_mgcg_shls
-static const u32 turks_mgcg_default[] =
-{
+static const u32 turks_mgcg_default[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x00005448, 0x00000100, 0xffffffff,
0x000055e4, 0x00600100, 0xffffffff,
@@ -935,8 +919,7 @@ static const u32 turks_mgcg_default[] =
};
#define TURKS_MGCG_DEFAULT_LENGTH sizeof(turks_mgcg_default) / (3 * sizeof(u32))
-static const u32 turks_mgcg_disable[] =
-{
+static const u32 turks_mgcg_disable[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0xffffffff, 0xffffffff,
@@ -950,8 +933,7 @@ static const u32 turks_mgcg_disable[] =
};
#define TURKS_MGCG_DISABLE_LENGTH sizeof(turks_mgcg_disable) / (3 * sizeof(u32))
-static const u32 turks_mgcg_enable[] =
-{
+static const u32 turks_mgcg_enable[] = {
0x0000802c, 0xc0000000, 0xffffffff,
0x000008f8, 0x00000000, 0xffffffff,
0x000008fc, 0x00000000, 0xffffffff,
@@ -972,8 +954,7 @@ static const u32 turks_mgcg_enable[] =
//********* BARTS **************//
-static const u32 barts_sysls_default[] =
-{
+static const u32 barts_sysls_default[] = {
/* Register, Value, Mask bits */
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
@@ -993,8 +974,7 @@ static const u32 barts_sysls_default[] =
};
#define BARTS_SYSLS_DEFAULT_LENGTH sizeof(barts_sysls_default) / (3 * sizeof(u32))
-static const u32 barts_sysls_disable[] =
-{
+static const u32 barts_sysls_disable[] = {
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
0x000015c0, 0x00041401, 0xffffffff,
@@ -1013,8 +993,7 @@ static const u32 barts_sysls_disable[] =
};
#define BARTS_SYSLS_DISABLE_LENGTH sizeof(barts_sysls_disable) / (3 * sizeof(u32))
-static const u32 barts_sysls_enable[] =
-{
+static const u32 barts_sysls_enable[] = {
0x000055e8, 0x00000001, 0xffffffff,
0x0000d0bc, 0x00000100, 0xffffffff,
0x000015c0, 0x000c1401, 0xffffffff,
@@ -1034,8 +1013,7 @@ static const u32 barts_sysls_enable[] =
#define BARTS_SYSLS_ENABLE_LENGTH sizeof(barts_sysls_enable) / (3 * sizeof(u32))
//********* CAICOS **************//
-static const u32 caicos_sysls_default[] =
-{
+static const u32 caicos_sysls_default[] = {
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
0x000015c0, 0x000c1401, 0xffffffff,
@@ -1053,8 +1031,7 @@ static const u32 caicos_sysls_default[] =
};
#define CAICOS_SYSLS_DEFAULT_LENGTH sizeof(caicos_sysls_default) / (3 * sizeof(u32))
-static const u32 caicos_sysls_disable[] =
-{
+static const u32 caicos_sysls_disable[] = {
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
0x000015c0, 0x00041401, 0xffffffff,
@@ -1072,8 +1049,7 @@ static const u32 caicos_sysls_disable[] =
};
#define CAICOS_SYSLS_DISABLE_LENGTH sizeof(caicos_sysls_disable) / (3 * sizeof(u32))
-static const u32 caicos_sysls_enable[] =
-{
+static const u32 caicos_sysls_enable[] = {
0x000055e8, 0x00000001, 0xffffffff,
0x0000d0bc, 0x00000100, 0xffffffff,
0x000015c0, 0x000c1401, 0xffffffff,
@@ -1092,8 +1068,7 @@ static const u32 caicos_sysls_enable[] =
#define CAICOS_SYSLS_ENABLE_LENGTH sizeof(caicos_sysls_enable) / (3 * sizeof(u32))
//********* TURKS **************//
-static const u32 turks_sysls_default[] =
-{
+static const u32 turks_sysls_default[] = {
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
0x000015c0, 0x000c1401, 0xffffffff,
@@ -1112,8 +1087,7 @@ static const u32 turks_sysls_default[] =
};
#define TURKS_SYSLS_DEFAULT_LENGTH sizeof(turks_sysls_default) / (3 * sizeof(u32))
-static const u32 turks_sysls_disable[] =
-{
+static const u32 turks_sysls_disable[] = {
0x000055e8, 0x00000000, 0xffffffff,
0x0000d0bc, 0x00000000, 0xffffffff,
0x000015c0, 0x00041401, 0xffffffff,
@@ -1132,8 +1106,7 @@ static const u32 turks_sysls_disable[] =
};
#define TURKS_SYSLS_DISABLE_LENGTH sizeof(turks_sysls_disable) / (3 * sizeof(u32))
-static const u32 turks_sysls_enable[] =
-{
+static const u32 turks_sysls_enable[] = {
0x000055e8, 0x00000001, 0xffffffff,
0x0000d0bc, 0x00000100, 0xffffffff,
0x000015c0, 0x000c1401, 0xffffffff,
@@ -1154,8 +1127,7 @@ static const u32 turks_sysls_enable[] =
#endif
-u32 btc_valid_sclk[40] =
-{
+u32 btc_valid_sclk[40] = {
5000, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000,
55000, 60000, 65000, 70000, 75000, 80000, 85000, 90000, 95000, 100000,
105000, 110000, 11500, 120000, 125000, 130000, 135000, 140000, 145000, 150000,
@@ -1194,7 +1166,7 @@ void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_t
if ((table == NULL) || (table->count == 0))
return;
- for (i= 0; i < table->count; i++) {
+ for (i = 0; i < table->count; i++) {
if (clock <= table->entries[i].clk) {
if (*voltage < table->entries[i].v)
*voltage = (u16)((table->entries[i].v < max_voltage) ?
@@ -1441,7 +1413,7 @@ void btc_program_mgcg_hw_sequence(struct radeon_device *rdev,
u32 i, length = count * 3;
u32 tmp;
- for (i = 0; i < length; i+=3) {
+ for (i = 0; i < length; i += 3) {
tmp = RREG32(sequence[i]);
tmp &= ~sequence[i+2];
tmp |= sequence[i+1] & sequence[i+2];
@@ -2003,7 +1975,7 @@ static int btc_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
for (i = 0; i < table->num_entries; i++) {
eg_table->mc_reg_table_entry[i].mclk_max =
table->mc_reg_table_entry[i].mclk_max;
- for(j = 0; j < table->last; j++)
+ for (j = 0; j < table->last; j++)
eg_table->mc_reg_table_entry[i].mc_data[j] =
table->mc_reg_table_entry[i].mc_data[j];
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index b8f4dac68d85..abe9d65cc460 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -46,36 +46,31 @@
#define VOLTAGE_VID_OFFSET_SCALE1 625
#define VOLTAGE_VID_OFFSET_SCALE2 100
-static const struct ci_pt_defaults defaults_hawaii_xt =
-{
+static const struct ci_pt_defaults defaults_hawaii_xt = {
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
{ 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
};
-static const struct ci_pt_defaults defaults_hawaii_pro =
-{
+static const struct ci_pt_defaults defaults_hawaii_pro = {
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
{ 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
};
-static const struct ci_pt_defaults defaults_bonaire_xt =
-{
+static const struct ci_pt_defaults defaults_bonaire_xt = {
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
{ 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
};
-static const struct ci_pt_defaults defaults_saturn_xt =
-{
+static const struct ci_pt_defaults defaults_saturn_xt = {
1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
{ 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
};
-static const struct ci_pt_config_reg didt_config_ci[] =
-{
+static const struct ci_pt_config_reg didt_config_ci[] = {
{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
@@ -1216,7 +1211,7 @@ static void ci_thermal_initialize(struct radeon_device *rdev)
if (rdev->pm.fan_pulses_per_revolution) {
tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
- tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
+ tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution - 1);
WREG32_SMC(CG_TACH_CTRL, tmp);
}
@@ -3333,7 +3328,7 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev)
}
static void ci_reset_single_dpm_table(struct radeon_device *rdev,
- struct ci_single_dpm_table* dpm_table,
+ struct ci_single_dpm_table *dpm_table,
u32 count)
{
u32 i;
@@ -3343,7 +3338,7 @@ static void ci_reset_single_dpm_table(struct radeon_device *rdev,
dpm_table->dpm_levels[i].enabled = false;
}
-static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
+static void ci_setup_pcie_table_entry(struct ci_single_dpm_table *dpm_table,
u32 index, u32 pcie_gen, u32 pcie_lanes)
{
dpm_table->dpm_levels[index].value = pcie_gen;
@@ -3503,7 +3498,7 @@ static int ci_find_boot_level(struct ci_single_dpm_table *table,
u32 i;
int ret = -EINVAL;
- for(i = 0; i < table->count; i++) {
+ for (i = 0; i < table->count; i++) {
if (value == table->dpm_levels[i].value) {
*boot_level = i;
ret = 0;
@@ -4304,7 +4299,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev,
for (i = 0, j = table->last; i < table->last; i++) {
if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
- switch(table->mc_reg_address[i].s1 << 2) {
+ switch (table->mc_reg_address[i].s1 << 2) {
case MC_SEQ_MISC1:
temp_reg = RREG32(MC_PMG_CMD_EMRS);
table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
@@ -4369,7 +4364,7 @@ static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
{
bool result = true;
- switch(in_reg) {
+ switch (in_reg) {
case MC_SEQ_RAS_TIMING >> 2:
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
break;
@@ -4508,7 +4503,7 @@ static int ci_register_patching_mc_seq(struct radeon_device *rdev,
for (i = 0; i < table->last; i++) {
if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
- switch(table->mc_reg_address[i].s1 >> 2) {
+ switch (table->mc_reg_address[i].s1 >> 2) {
case MC_SEQ_MISC1:
for (k = 0; k < table->num_entries; k++) {
if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
@@ -4683,7 +4678,7 @@ static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
struct ci_power_info *pi = ci_get_pi(rdev);
u32 i = 0;
- for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
+ for (i = 0; i < pi->mc_reg_table.num_entries; i++) {
if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
break;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
index ac12db5f2cf7..74b95c200222 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.h
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -87,8 +87,7 @@ struct ci_mc_reg_table {
SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
};
-struct ci_ulv_parm
-{
+struct ci_ulv_parm {
bool supported;
u32 cg_ulv_parameter;
u32 volt_change_delay;
@@ -113,8 +112,7 @@ struct ci_dpm_level_enable_mask {
u32 pcie_dpm_enable_mask;
};
-struct ci_vbios_boot_state
-{
+struct ci_vbios_boot_state {
u16 mvdd_bootup_value;
u16 vddc_bootup_value;
u16 vddci_bootup_value;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 10be30366c2b..b5e96a8fc2c1 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -9592,28 +9592,18 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
PCI_EXP_LNKCTL_HAWD);
/* linkctl2 */
- pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
- &tmp16);
- tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN);
- tmp16 |= (bridge_cfg2 &
- (PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN));
- pcie_capability_write_word(root,
- PCI_EXP_LNKCTL2,
- tmp16);
-
- pcie_capability_read_word(rdev->pdev,
- PCI_EXP_LNKCTL2,
- &tmp16);
- tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN);
- tmp16 |= (gpu_cfg2 &
- (PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN));
- pcie_capability_write_word(rdev->pdev,
- PCI_EXP_LNKCTL2,
- tmp16);
+ pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN,
+ bridge_cfg2 |
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN,
+ gpu_cfg2 |
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -9627,15 +9617,15 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+ tmp16 = 0;
if (speed_cap == PCIE_SPEED_8_0GT)
tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
- pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
index 4774e04c4da6..7693fb6624a3 100644
--- a/drivers/gpu/drm/radeon/clearstate_cayman.h
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -23,8 +23,7 @@
#include "clearstate_defs.h"
-static const u32 SECT_CONTEXT_def_1[] =
-{
+static const u32 SECT_CONTEXT_def_1[] = {
0x00000000, // DB_RENDER_CONTROL
0x00000000, // DB_COUNT_CONTROL
0x00000000, // DB_DEPTH_VIEW
@@ -514,8 +513,7 @@ static const u32 SECT_CONTEXT_def_1[] =
0x00000000, // CB_BLEND6_CONTROL
0x00000000, // CB_BLEND7_CONTROL
};
-static const u32 SECT_CONTEXT_def_2[] =
-{
+static const u32 SECT_CONTEXT_def_2[] = {
0x00000000, // PA_CL_POINT_X_RAD
0x00000000, // PA_CL_POINT_Y_RAD
0x00000000, // PA_CL_POINT_SIZE
@@ -523,8 +521,7 @@ static const u32 SECT_CONTEXT_def_2[] =
0x00000000, // VGT_DMA_BASE_HI
0x00000000, // VGT_DMA_BASE
};
-static const u32 SECT_CONTEXT_def_3[] =
-{
+static const u32 SECT_CONTEXT_def_3[] = {
0x00000000, // DB_DEPTH_CONTROL
0x00000000, // DB_EQAA
0x00000000, // CB_COLOR_CONTROL
diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h
index c1b6c22dbed7..0045d42aa27c 100644
--- a/drivers/gpu/drm/radeon/clearstate_ci.h
+++ b/drivers/gpu/drm/radeon/clearstate_ci.h
@@ -23,8 +23,7 @@
#include "clearstate_defs.h"
-static const unsigned int ci_SECT_CONTEXT_def_1[] =
-{
+static const unsigned int ci_SECT_CONTEXT_def_1[] = {
0x00000000, // DB_RENDER_CONTROL
0x00000000, // DB_COUNT_CONTROL
0x00000000, // DB_DEPTH_VIEW
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index a424b86008b8..c634dc28e6c3 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2514,8 +2514,7 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
}
-static const unsigned ni_dig_offsets[] =
-{
+static const unsigned ni_dig_offsets[] = {
NI_DIG0_REGISTER_OFFSET,
NI_DIG1_REGISTER_OFFSET,
NI_DIG2_REGISTER_OFFSET,
@@ -2524,8 +2523,7 @@ static const unsigned ni_dig_offsets[] =
NI_DIG5_REGISTER_OFFSET
};
-static const unsigned ni_tx_offsets[] =
-{
+static const unsigned ni_tx_offsets[] = {
NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
@@ -2534,8 +2532,7 @@ static const unsigned ni_tx_offsets[] =
NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
};
-static const unsigned evergreen_dp_offsets[] =
-{
+static const unsigned evergreen_dp_offsets[] = {
EVERGREEN_DP0_REGISTER_OFFSET,
EVERGREEN_DP1_REGISTER_OFFSET,
EVERGREEN_DP2_REGISTER_OFFSET,
@@ -2544,8 +2541,7 @@ static const unsigned evergreen_dp_offsets[] =
EVERGREEN_DP5_REGISTER_OFFSET
};
-static const unsigned evergreen_disp_int_status[] =
-{
+static const unsigned evergreen_disp_int_status[] = {
DISP_INTERRUPT_STATUS,
DISP_INTERRUPT_STATUS_CONTINUE,
DISP_INTERRUPT_STATUS_CONTINUE2,
@@ -2643,7 +2639,7 @@ static void evergreen_blank_dp_output(struct radeon_device *rdev,
return;
}
- stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
+ stream_ctrl &= ~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
evergreen_dp_offsets[dig_fe], stream_ctrl);
@@ -2655,7 +2651,7 @@ static void evergreen_blank_dp_output(struct radeon_device *rdev,
stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
evergreen_dp_offsets[dig_fe]);
}
- if (counter >= 32 )
+ if (counter >= 32)
DRM_ERROR("counter exceeds %d\n", counter);
fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
@@ -2716,7 +2712,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
/*for now we do it this manually*/
/**/
if (ASIC_IS_DCE5(rdev) &&
- evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
+ evergreen_is_dp_sst_stream_enabled(rdev, i, &dig_fe))
evergreen_blank_dp_output(rdev, dig_fe);
/*we could remove 6 lines below*/
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
@@ -3597,7 +3593,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
- sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+ sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 12 / 32);
sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 0de79f3a7e3f..1fe6e0d883c7 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -33,8 +33,8 @@
#include "evergreen_reg_safe.h"
#include "cayman_reg_safe.h"
-#define MAX(a,b) (((a)>(b))?(a):(b))
-#define MIN(a,b) (((a)<(b))?(a):(b))
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm)
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index b436badf9efa..3ff9fda54aa3 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -265,8 +265,8 @@
#define NI_DIG_BE_CNTL 0x7140
-# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
-# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
+# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8) & 0x3F)
+# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7)
#define NI_DIG_BE_EN_CNTL 0x7144
# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
@@ -284,7 +284,7 @@
#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
-# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
+# define EVERGREEN_DP_VID_STREAM_STATUS (1 << 16)
#define EVERGREEN_DP_STEER_FIFO 0x7310
# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
#define EVERGREEN_DP_SEC_CNTL 0x7280
@@ -302,8 +302,8 @@
# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
/*DCIO_UNIPHY block*/
-#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
-#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 - 0x6600)
+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 - 0x6600)
#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
index 3a03ba37d043..b34d54b567b7 100644
--- a/drivers/gpu/drm/radeon/evergreen_smc.h
+++ b/drivers/gpu/drm/radeon/evergreen_smc.h
@@ -29,8 +29,7 @@
#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
-struct SMC_Evergreen_MCRegisterAddress
-{
+struct SMC_Evergreen_MCRegisterAddress {
uint16_t s0;
uint16_t s1;
};
@@ -38,15 +37,13 @@ struct SMC_Evergreen_MCRegisterAddress
typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
-struct SMC_Evergreen_MCRegisterSet
-{
+struct SMC_Evergreen_MCRegisterSet {
uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
};
typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
-struct SMC_Evergreen_MCRegisters
-{
+struct SMC_Evergreen_MCRegisters {
uint8_t last;
uint8_t reserved[3];
SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index f7735da07feb..55dbf450bd9c 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -64,8 +64,7 @@ extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
extern void cik_update_cg(struct radeon_device *rdev,
u32 block, bool enable);
-static const struct kv_pt_config_reg didt_config_kv[] =
-{
+static const struct kv_pt_config_reg didt_config_kv[] = {
{ 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
@@ -931,9 +930,9 @@ static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
pi->graphics_level[i].ClkBypassCntl = 2;
else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
pi->graphics_level[i].ClkBypassCntl = 7;
- else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
+ else if (kv_get_clock_difference(table->entries[i].clk, 20000) < 200)
pi->graphics_level[i].ClkBypassCntl = 6;
- else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
+ else if (kv_get_clock_difference(table->entries[i].clk, 10000) < 200)
pi->graphics_level[i].ClkBypassCntl = 8;
else
pi->graphics_level[i].ClkBypassCntl = 0;
@@ -1577,7 +1576,7 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
if ((new_ps->levels[0].sclk -
table->entries[pi->highest_valid].sclk_frequency) >
(table->entries[pi->lowest_valid].sclk_frequency -
- new_ps->levels[new_ps->num_levels -1].sclk))
+ new_ps->levels[new_ps->num_levels - 1].sclk))
pi->highest_valid = pi->lowest_valid;
else
pi->lowest_valid = pi->highest_valid;
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
index c0a59527e7b8..65831cca6730 100644
--- a/drivers/gpu/drm/radeon/kv_smc.c
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -189,7 +189,7 @@ int kv_copy_bytes_to_smc(struct radeon_device *rdev,
if (ret)
return ret;
- original_data= RREG32(SMC_IND_DATA_0);
+ original_data = RREG32(SMC_IND_DATA_0);
extra_shift = 8 * (4 - byte_count);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 927e5f42e97d..77aee99e473a 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -66,8 +66,7 @@ void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
}
-static const u32 tn_rlc_save_restore_register_list[] =
-{
+static const u32 tn_rlc_save_restore_register_list[] = {
0x98fc,
0x98f0,
0x9834,
@@ -216,8 +215,7 @@ MODULE_FIRMWARE("radeon/ARUBA_me.bin");
MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
-static const u32 cayman_golden_registers2[] =
-{
+static const u32 cayman_golden_registers2[] = {
0x3e5c, 0xffffffff, 0x00000000,
0x3e48, 0xffffffff, 0x00000000,
0x3e4c, 0xffffffff, 0x00000000,
@@ -226,8 +224,7 @@ static const u32 cayman_golden_registers2[] =
0x3e60, 0xffffffff, 0x00000000
};
-static const u32 cayman_golden_registers[] =
-{
+static const u32 cayman_golden_registers[] = {
0x5eb4, 0xffffffff, 0x00000002,
0x5e78, 0x8f311ff1, 0x001000f0,
0x3f90, 0xffff0000, 0xff000000,
@@ -267,16 +264,14 @@ static const u32 cayman_golden_registers[] =
0x8974, 0xffffffff, 0x00000000
};
-static const u32 dvst_golden_registers2[] =
-{
+static const u32 dvst_golden_registers2[] = {
0x8f8, 0xffffffff, 0,
0x8fc, 0x00380000, 0,
0x8f8, 0xffffffff, 1,
0x8fc, 0x0e000000, 0
};
-static const u32 dvst_golden_registers[] =
-{
+static const u32 dvst_golden_registers[] = {
0x690, 0x3fff3fff, 0x20c00033,
0x918c, 0x0fff0fff, 0x00010006,
0x91a8, 0x0fff0fff, 0x00010006,
@@ -333,8 +328,7 @@ static const u32 dvst_golden_registers[] =
0x8974, 0xffffffff, 0x00000000
};
-static const u32 scrapper_golden_registers[] =
-{
+static const u32 scrapper_golden_registers[] = {
0x690, 0x3fff3fff, 0x20c00033,
0x918c, 0x0fff0fff, 0x00010006,
0x918c, 0x0fff0fff, 0x00010006,
@@ -624,7 +618,7 @@ static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
int ni_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
- u32 mem_type, running, blackout = 0;
+ u32 mem_type, running;
u32 *io_mc_regs;
int i, ucode_size, regs_size;
@@ -659,11 +653,6 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
- if (running) {
- blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
- WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
- }
-
/* reset the engine and set to writable */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
@@ -689,9 +678,6 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
break;
udelay(1);
}
-
- if (running)
- WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
}
return 0;
@@ -754,7 +740,8 @@ int ni_init_microcode(struct radeon_device *rdev)
rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
mc_req_size = 0;
break;
- default: BUG();
+ default:
+ BUG();
}
DRM_INFO("Loading %s Microcode\n", chip_name);
@@ -813,7 +800,7 @@ int ni_init_microcode(struct radeon_device *rdev)
err = 0;
} else if (rdev->smc_fw->size != smc_req_size) {
pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
+ rdev->smc_fw->size, fw_name);
err = -EINVAL;
}
}
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 3e1c1a392fb7..e08559c44a5c 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3103,9 +3103,6 @@ static int ni_init_simplified_leakage_table(struct radeon_device *rdev,
u32 smc_leakage, max_leakage = 0;
u32 scaling_factor;
- if (!leakage_table)
- return -EINVAL;
-
table_size = leakage_table->count;
if (eg_pi->vddc_voltage_table.count != table_size)
diff --git a/drivers/gpu/drm/radeon/ni_dpm.h b/drivers/gpu/drm/radeon/ni_dpm.h
index 74e301936906..4e3e7303e035 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.h
+++ b/drivers/gpu/drm/radeon/ni_dpm.h
@@ -59,8 +59,7 @@ struct ni_mc_reg_table {
#define NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 2
-enum ni_dc_cac_level
-{
+enum ni_dc_cac_level {
NISLANDS_DCCAC_LEVEL_0 = 0,
NISLANDS_DCCAC_LEVEL_1,
NISLANDS_DCCAC_LEVEL_2,
@@ -72,8 +71,7 @@ enum ni_dc_cac_level
NISLANDS_DCCAC_MAX_LEVELS
};
-struct ni_leakage_coeffients
-{
+struct ni_leakage_coeffients {
u32 at;
u32 bt;
u32 av;
@@ -83,8 +81,7 @@ struct ni_leakage_coeffients
u32 t_ref;
};
-struct ni_cac_data
-{
+struct ni_cac_data {
struct ni_leakage_coeffients leakage_coefficients;
u32 i_leakage;
s32 leakage_minimum_temperature;
@@ -100,8 +97,7 @@ struct ni_cac_data
u8 lts_truncate_n;
};
-struct ni_cac_weights
-{
+struct ni_cac_weights {
u32 weight_tcp_sig0;
u32 weight_tcp_sig1;
u32 weight_ta_sig;
diff --git a/drivers/gpu/drm/radeon/nislands_smc.h b/drivers/gpu/drm/radeon/nislands_smc.h
index 42f3bab0f9ee..097893c38915 100644
--- a/drivers/gpu/drm/radeon/nislands_smc.h
+++ b/drivers/gpu/drm/radeon/nislands_smc.h
@@ -27,8 +27,7 @@
#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-struct PP_NIslands_Dpm2PerfLevel
-{
+struct PP_NIslands_Dpm2PerfLevel {
uint8_t MaxPS;
uint8_t TgtAct;
uint8_t MaxPS_StepInc;
@@ -44,8 +43,7 @@ struct PP_NIslands_Dpm2PerfLevel
typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
-struct PP_NIslands_DPM2Parameters
-{
+struct PP_NIslands_DPM2Parameters {
uint32_t TDPLimit;
uint32_t NearTDPLimit;
uint32_t SafePowerLimit;
@@ -53,8 +51,7 @@ struct PP_NIslands_DPM2Parameters
};
typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
-struct NISLANDS_SMC_SCLK_VALUE
-{
+struct NISLANDS_SMC_SCLK_VALUE {
uint32_t vCG_SPLL_FUNC_CNTL;
uint32_t vCG_SPLL_FUNC_CNTL_2;
uint32_t vCG_SPLL_FUNC_CNTL_3;
@@ -66,8 +63,7 @@ struct NISLANDS_SMC_SCLK_VALUE
typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
-struct NISLANDS_SMC_MCLK_VALUE
-{
+struct NISLANDS_SMC_MCLK_VALUE {
uint32_t vMPLL_FUNC_CNTL;
uint32_t vMPLL_FUNC_CNTL_1;
uint32_t vMPLL_FUNC_CNTL_2;
@@ -84,8 +80,7 @@ struct NISLANDS_SMC_MCLK_VALUE
typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
-struct NISLANDS_SMC_VOLTAGE_VALUE
-{
+struct NISLANDS_SMC_VOLTAGE_VALUE {
uint16_t value;
uint8_t index;
uint8_t padding;
@@ -93,8 +88,7 @@ struct NISLANDS_SMC_VOLTAGE_VALUE
typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
-struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
-{
+struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL {
uint8_t arbValue;
uint8_t ACIndex;
uint8_t displayWatermark;
@@ -132,8 +126,7 @@ struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
-struct NISLANDS_SMC_SWSTATE
-{
+struct NISLANDS_SMC_SWSTATE {
uint8_t flags;
uint8_t levelCount;
uint8_t padding2;
@@ -156,8 +149,7 @@ struct NISLANDS_SMC_SWSTATE_SINGLE {
#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
#define NISLANDS_SMC_VOLTAGEMASK_MAX 4
-struct NISLANDS_SMC_VOLTAGEMASKTABLE
-{
+struct NISLANDS_SMC_VOLTAGEMASKTABLE {
uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
};
@@ -166,8 +158,7 @@ typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
#define NISLANDS_MAX_NO_VREG_STEPS 32
-struct NISLANDS_SMC_STATETABLE
-{
+struct NISLANDS_SMC_STATETABLE {
uint8_t thermalProtectType;
uint8_t systemFlags;
uint8_t maxVDDCIndexInPPTable;
@@ -203,8 +194,7 @@ typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
#define SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
#define SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES 4
-struct SMC_NISLANDS_MC_TPP_CAC_TABLE
-{
+struct SMC_NISLANDS_MC_TPP_CAC_TABLE {
uint32_t tpp[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES];
uint32_t cacValue[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES];
};
@@ -212,8 +202,7 @@ struct SMC_NISLANDS_MC_TPP_CAC_TABLE
typedef struct SMC_NISLANDS_MC_TPP_CAC_TABLE SMC_NISLANDS_MC_TPP_CAC_TABLE;
-struct PP_NIslands_CACTABLES
-{
+struct PP_NIslands_CACTABLES {
uint32_t cac_bif_lut[SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES];
uint32_t cac_lkge_lut[SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
@@ -257,8 +246,7 @@ typedef struct PP_NIslands_CACTABLES PP_NIslands_CACTABLES;
#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
-struct SMC_NIslands_MCRegisterAddress
-{
+struct SMC_NIslands_MCRegisterAddress {
uint16_t s0;
uint16_t s1;
};
@@ -266,15 +254,13 @@ struct SMC_NIslands_MCRegisterAddress
typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
-struct SMC_NIslands_MCRegisterSet
-{
+struct SMC_NIslands_MCRegisterSet {
uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
};
typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
-struct SMC_NIslands_MCRegisters
-{
+struct SMC_NIslands_MCRegisters {
uint8_t last;
uint8_t reserved[3];
SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
@@ -283,8 +269,7 @@ struct SMC_NIslands_MCRegisters
typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
-struct SMC_NIslands_MCArbDramTimingRegisterSet
-{
+struct SMC_NIslands_MCArbDramTimingRegisterSet {
uint32_t mc_arb_dram_timing;
uint32_t mc_arb_dram_timing2;
uint8_t mc_arb_rfsh_rate;
@@ -293,8 +278,7 @@ struct SMC_NIslands_MCArbDramTimingRegisterSet
typedef struct SMC_NIslands_MCArbDramTimingRegisterSet SMC_NIslands_MCArbDramTimingRegisterSet;
-struct SMC_NIslands_MCArbDramTimingRegisters
-{
+struct SMC_NIslands_MCArbDramTimingRegisters {
uint8_t arb_current;
uint8_t reserved[3];
SMC_NIslands_MCArbDramTimingRegisterSet data[20];
@@ -302,8 +286,7 @@ struct SMC_NIslands_MCArbDramTimingRegisters
typedef struct SMC_NIslands_MCArbDramTimingRegisters SMC_NIslands_MCArbDramTimingRegisters;
-struct SMC_NISLANDS_SPLL_DIV_TABLE
-{
+struct SMC_NISLANDS_SPLL_DIV_TABLE {
uint32_t freq[256];
uint32_t ss[256];
};
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cfeca2694d5f..86b8b770af19 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1327,7 +1327,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
return -EINVAL;
}
track->num_arrays = c;
- for (i = 0; i < (c - 1); i+=2, idx+=3) {
+ for (i = 0; i < (c - 1); i += 2, idx += 3) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 9d341cff63ee..d776f929d5c3 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -825,7 +825,7 @@
# define R300_TX_MIN_FILTER_ANISO_LINEAR (0 << 13)
# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13)
# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (2 << 13)
-# define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) )
+# define R300_TX_MIN_FILTER_MASK ((15 << 11) | (3 << 13))
# define R300_TX_MAX_ANISO_1_TO_1 (0 << 21)
# define R300_TX_MAX_ANISO_2_TO_1 (2 << 21)
# define R300_TX_MAX_ANISO_4_TO_1 (4 << 21)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a17b95eec65f..b5e97d95a19f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -99,8 +99,7 @@ MODULE_FIRMWARE("radeon/SUMO_me.bin");
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
-static const u32 crtc_offsets[2] =
-{
+static const u32 crtc_offsets[2] = {
0,
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
};
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 9d2bcb9551e6..64980a61d38a 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -28,8 +28,7 @@
#include "r600_dpm.h"
#include "atom.h"
-const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
-{
+const u32 r600_utc[R600_PM_NUMBER_OF_TC] = {
R600_UTC_DFLT_00,
R600_UTC_DFLT_01,
R600_UTC_DFLT_02,
@@ -47,8 +46,7 @@ const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
R600_UTC_DFLT_14,
};
-const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
-{
+const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = {
R600_DTC_DFLT_00,
R600_DTC_DFLT_01,
R600_DTC_DFLT_02,
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 6e4d22ed2a00..5c2513c84c48 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -119,8 +119,7 @@ enum r600_display_watermark {
R600_DISPLAY_WATERMARK_HIGH = 1,
};
-enum r600_display_gap
-{
+enum r600_display_gap {
R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
R600_PM_DISPLAY_GAP_VBLANK = 1,
R600_PM_DISPLAY_GAP_WATERMARK = 2,
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3d3d2109dfeb..3e5ff17e3caf 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1355,14 +1355,12 @@ struct radeon_dpm_thermal {
bool high_to_low;
};
-enum radeon_clk_action
-{
+enum radeon_clk_action {
RADEON_SCLK_UP = 1,
RADEON_SCLK_DOWN
};
-struct radeon_blacklist_clocks
-{
+struct radeon_blacklist_clocks {
u32 sclk;
u32 mclk;
enum radeon_clk_action action;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 802b5af19261..b5a0109b2e2c 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2400,10 +2400,10 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_RS880:
rdev->asic = &rs780_asic;
/* 760G/780V/880V don't have UVD */
- if ((rdev->pdev->device == 0x9616)||
- (rdev->pdev->device == 0x9611)||
- (rdev->pdev->device == 0x9613)||
- (rdev->pdev->device == 0x9711)||
+ if ((rdev->pdev->device == 0x9616) ||
+ (rdev->pdev->device == 0x9611) ||
+ (rdev->pdev->device == 0x9613) ||
+ (rdev->pdev->device == 0x9711) ||
(rdev->pdev->device == 0x9713))
rdev->has_uvd = false;
else
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 3596ea4a8b60..bb1f0a3371ab 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2852,7 +2852,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
args.v1.ucAction = clock_type;
args.v1.ulClock = cpu_to_le32(clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
dividers->post_div = args.v1.ucPostDiv;
dividers->fb_div = args.v1.ucFbDiv;
@@ -2866,7 +2866,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
args.v2.ucAction = clock_type;
args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
dividers->post_div = args.v2.ucPostDiv;
dividers->fb_div = le16_to_cpu(args.v2.usFbDiv);
@@ -2881,7 +2881,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
dividers->post_div = args.v3.ucPostDiv;
dividers->enable_post_div = (args.v3.ucCntlFlag &
@@ -2901,7 +2901,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
if (strobe_mode)
args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
dividers->post_div = args.v5.ucPostDiv;
dividers->enable_post_div = (args.v5.ucCntlFlag &
@@ -2920,7 +2920,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
/* fusion */
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
dividers->real_clock = le32_to_cpu(args.v4.ulClock);
@@ -2931,7 +2931,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
args.v6_in.ulClock.ulComputeClockFlag = clock_type;
args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
@@ -2972,7 +2972,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
if (strobe_mode)
args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
@@ -3005,7 +3005,7 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
args.ucEnable = enable;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
@@ -3013,7 +3013,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
GET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return le32_to_cpu(args.ulReturnEngineClock);
}
@@ -3022,7 +3022,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
GET_MEMORY_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return le32_to_cpu(args.ulReturnMemoryClock);
}
@@ -3034,7 +3034,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev,
args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void radeon_atom_set_memory_clock(struct radeon_device *rdev,
@@ -3048,7 +3048,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
@@ -3067,7 +3067,7 @@ void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
if (mem_clock)
args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void radeon_atom_update_memory_dll(struct radeon_device *rdev,
@@ -3078,7 +3078,7 @@ void radeon_atom_update_memory_dll(struct radeon_device *rdev,
args = cpu_to_le32(mem_clock); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void radeon_atom_set_ac_timing(struct radeon_device *rdev,
@@ -3090,7 +3090,7 @@ void radeon_atom_set_ac_timing(struct radeon_device *rdev,
args.ulTargetMemoryClock = cpu_to_le32(tmp); /* 10 khz */
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union set_voltage {
@@ -3134,7 +3134,7 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
return;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
@@ -3155,7 +3155,7 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
args.v2.ucVoltageMode = 0;
args.v2.usVoltageLevel = 0;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
break;
@@ -3164,7 +3164,7 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*voltage = le16_to_cpu(args.v3.usVoltageLevel);
break;
@@ -3200,7 +3200,7 @@ int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
args.v3.usVoltageLevel = 0;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
break;
@@ -3327,7 +3327,7 @@ int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
args.in.ulSCLKFreq =
cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
@@ -3353,7 +3353,7 @@ int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK;
args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*gpio_mask = le32_to_cpu(*(u32 *)&args.v2);
@@ -3361,7 +3361,7 @@ int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL;
args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*gpio_value = le32_to_cpu(*(u32 *)&args.v2);
break;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 595354e3ce0b..f557535c1d7b 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -61,19 +61,23 @@ struct atpx_mux {
u16 mux;
} __packed;
-bool radeon_has_atpx(void) {
+bool radeon_has_atpx(void)
+{
return radeon_atpx_priv.atpx_detected;
}
-bool radeon_has_atpx_dgpu_power_cntl(void) {
+bool radeon_has_atpx_dgpu_power_cntl(void)
+{
return radeon_atpx_priv.atpx.functions.power_cntl;
}
-bool radeon_is_atpx_hybrid(void) {
+bool radeon_is_atpx_hybrid(void)
+{
return radeon_atpx_priv.atpx.is_hybrid;
}
-bool radeon_atpx_dgpu_req_power_for_displays(void) {
+bool radeon_atpx_dgpu_req_power_for_displays(void)
+{
return radeon_atpx_priv.atpx.dgpu_req_power_for_displays;
}
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 91b58fbc2be7..74753bb26d33 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -37,15 +37,14 @@
void dce6_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin,
u8 enable_mask);
-struct r600_audio_pin* r600_audio_get_pin(struct radeon_device *rdev);
-struct r600_audio_pin* dce6_audio_get_pin(struct radeon_device *rdev);
+struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
+struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode);
static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode);
-static const u32 pin_offsets[7] =
-{
+static const u32 pin_offsets[7] = {
(0x5e00 - 0x5e00),
(0x5e18 - 0x5e00),
(0x5e30 - 0x5e00),
@@ -361,7 +360,7 @@ static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
}
-struct r600_audio_pin* radeon_audio_get_pin(struct drm_encoder *encoder)
+struct r600_audio_pin *radeon_audio_get_pin(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -528,7 +527,7 @@ static void radeon_audio_calc_cts(unsigned int clock, int *CTS, int *N, int freq
*N, *CTS, freq);
}
-static const struct radeon_hdmi_acr* radeon_audio_acr(unsigned int clock)
+static const struct radeon_hdmi_acr *radeon_audio_acr(unsigned int clock)
{
static struct radeon_hdmi_acr res;
u8 i;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index dacaaa007051..a073dadd0638 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -34,8 +34,7 @@ struct cea_sad;
#define WREG32_ENDPOINT(block, reg, v) \
radeon_audio_endpoint_wreg(rdev, (block), (reg), (v))
-struct radeon_audio_basic_funcs
-{
+struct radeon_audio_basic_funcs {
u32 (*endpoint_rreg)(struct radeon_device *rdev, u32 offset, u32 reg);
void (*endpoint_wreg)(struct radeon_device *rdev,
u32 offset, u32 reg, u32 v);
@@ -43,8 +42,7 @@ struct radeon_audio_basic_funcs
struct r600_audio_pin *pin, u8 enable_mask);
};
-struct radeon_audio_funcs
-{
+struct radeon_audio_funcs {
void (*select_pin)(struct drm_encoder *encoder);
struct r600_audio_pin* (*get_pin)(struct radeon_device *rdev);
void (*write_latency_fields)(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 59c4db13d90a..546381a5c918 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -603,8 +603,7 @@ struct atom_memory_info {
#define MAX_AC_TIMING_ENTRIES 16
-struct atom_memory_clock_range_table
-{
+struct atom_memory_clock_range_table {
u8 num_entries;
u8 rsv[3];
u32 mclk[MAX_AC_TIMING_ENTRIES];
@@ -632,14 +631,12 @@ struct atom_mc_reg_table {
#define MAX_VOLTAGE_ENTRIES 32
-struct atom_voltage_table_entry
-{
+struct atom_voltage_table_entry {
u16 value;
u32 smio_low;
};
-struct atom_voltage_table
-{
+struct atom_voltage_table {
u32 count;
u32 mask_low;
u32 phase_delay;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 10c0fbd9d2b4..a955f8a2f7fe 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -78,7 +78,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
u32 c = 0, i;
rbo->placement.placement = rbo->placements;
- rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM) {
/* Try placing BOs which don't need CPU access outside of the
* CPU accessible part of VRAM
@@ -114,7 +113,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
}
rbo->placement.num_placement = c;
- rbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index b73fd9ab0252..4482c8c5f5ce 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -587,7 +587,7 @@ static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev,
int err;
int value;
- if(!rdev->asic->dpm.fan_ctrl_set_mode)
+ if (!rdev->asic->dpm.fan_ctrl_set_mode)
return -EINVAL;
err = kstrtoint(buf, 10, &value);
@@ -789,7 +789,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* Skip vddc attribute if get_current_vddc is not implemented */
- if(attr == &sensor_dev_attr_in0_input.dev_attr.attr &&
+ if (attr == &sensor_dev_attr_in0_input.dev_attr.attr &&
!rdev->asic->dpm.get_current_vddc)
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index de4e6d78f1e1..2078b0000e22 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -92,9 +92,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
if (!radeon_ttm_bo_is_radeon_bo(bo)) {
placement->placement = &placements;
- placement->busy_placement = &placements;
placement->num_placement = 1;
- placement->num_busy_placement = 1;
return;
}
rbo = container_of(bo, struct radeon_bo, tbo);
@@ -114,15 +112,11 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
*/
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
RADEON_GEM_DOMAIN_GTT);
- rbo->placement.num_busy_placement = 0;
for (i = 0; i < rbo->placement.num_placement; i++) {
if (rbo->placements[i].mem_type == TTM_PL_VRAM) {
if (rbo->placements[i].fpfn < fpfn)
rbo->placements[i].fpfn = fpfn;
- } else {
- rbo->placement.busy_placement =
- &rbo->placements[i];
- rbo->placement.num_busy_placement = 1;
+ rbo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
}
}
} else
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index a2cda184b2b2..058a1c8451b2 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -324,7 +324,6 @@ void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
rbo->placement.num_placement++;
- rbo->placement.num_busy_placement++;
}
void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 922a29e58880..d7f552d441ab 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -86,7 +86,7 @@ int rs400_gart_init(struct radeon_device *rdev)
return 0;
}
/* Check gart size */
- switch(rdev->mc.gtt_size / (1024 * 1024)) {
+ switch (rdev->mc.gtt_size / (1024 * 1024)) {
case 32:
case 64:
case 128:
@@ -116,7 +116,7 @@ int rs400_gart_enable(struct radeon_device *rdev)
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
/* Check gart size */
- switch(rdev->mc.gtt_size / (1024 * 1024)) {
+ switch (rdev->mc.gtt_size / (1024 * 1024)) {
case 32:
size_reg = RS480_VA_SIZE_32MB;
break;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 8cf87a0a2b2a..5c162778899b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -54,8 +54,7 @@
static void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
-static const u32 crtc_offsets[2] =
-{
+static const u32 crtc_offsets[2] = {
0,
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
};
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 76260fdfbaa7..79709d26d983 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -42,8 +42,7 @@
static void rv515_gpu_init(struct radeon_device *rdev);
int rv515_mc_wait_for_idle(struct radeon_device *rdev);
-static const u32 crtc_offsets[2] =
-{
+static const u32 crtc_offsets[2] = {
0,
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
};
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.h b/drivers/gpu/drm/radeon/rv6xx_dpm.h
index 8035d53ebea6..020c0dc8361d 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.h
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.h
@@ -28,8 +28,7 @@
#include "r600_dpm.h"
/* Represents a single SCLK step. */
-struct rv6xx_sclk_stepping
-{
+struct rv6xx_sclk_stepping {
u32 vco_frequency;
u32 post_divider;
};
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index ef2f1a048cfe..e3e1f6833f12 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -1010,7 +1010,7 @@ int rv770_populate_initial_mvdd_value(struct radeon_device *rdev,
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
if ((pi->s0_vid_lower_smio_cntl & pi->mvdd_mask_low) ==
- (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low) ) {
+ (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low)) {
voltage->index = MVDD_LOW_INDEX;
voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
} else {
@@ -1260,7 +1260,7 @@ static int rv770_construct_vddc_table(struct radeon_device *rdev)
pi->vddc_mask_low = gpio_mask;
if (i > 0) {
if ((pi->vddc_table[i].low_smio !=
- pi->vddc_table[i - 1].low_smio ) ||
+ pi->vddc_table[i - 1].low_smio) ||
(pi->vddc_table[i].high_smio !=
pi->vddc_table[i - 1].high_smio))
vddc_index++;
diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h
index 3b2c963c4880..d8e8f70135f2 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.h
+++ b/drivers/gpu/drm/radeon/rv770_smc.h
@@ -31,8 +31,7 @@
#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3
-struct RV770_SMC_SCLK_VALUE
-{
+struct RV770_SMC_SCLK_VALUE {
uint32_t vCG_SPLL_FUNC_CNTL;
uint32_t vCG_SPLL_FUNC_CNTL_2;
uint32_t vCG_SPLL_FUNC_CNTL_3;
@@ -43,8 +42,7 @@ struct RV770_SMC_SCLK_VALUE
typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
-struct RV770_SMC_MCLK_VALUE
-{
+struct RV770_SMC_MCLK_VALUE {
uint32_t vMPLL_AD_FUNC_CNTL;
uint32_t vMPLL_AD_FUNC_CNTL_2;
uint32_t vMPLL_DQ_FUNC_CNTL;
@@ -59,8 +57,7 @@ struct RV770_SMC_MCLK_VALUE
typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
-struct RV730_SMC_MCLK_VALUE
-{
+struct RV730_SMC_MCLK_VALUE {
uint32_t vMCLK_PWRMGT_CNTL;
uint32_t vDLL_CNTL;
uint32_t vMPLL_FUNC_CNTL;
@@ -73,8 +70,7 @@ struct RV730_SMC_MCLK_VALUE
typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
-struct RV770_SMC_VOLTAGE_VALUE
-{
+struct RV770_SMC_VOLTAGE_VALUE {
uint16_t value;
uint8_t index;
uint8_t padding;
@@ -82,16 +78,14 @@ struct RV770_SMC_VOLTAGE_VALUE
typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
-union RV7XX_SMC_MCLK_VALUE
-{
+union RV7XX_SMC_MCLK_VALUE {
RV770_SMC_MCLK_VALUE mclk770;
RV730_SMC_MCLK_VALUE mclk730;
};
typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
-struct RV770_SMC_HW_PERFORMANCE_LEVEL
-{
+struct RV770_SMC_HW_PERFORMANCE_LEVEL {
uint8_t arbValue;
union{
uint8_t seqValue;
@@ -126,8 +120,7 @@ struct RV770_SMC_HW_PERFORMANCE_LEVEL
typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
-struct RV770_SMC_SWSTATE
-{
+struct RV770_SMC_SWSTATE {
uint8_t flags;
uint8_t padding1;
uint8_t padding2;
@@ -142,8 +135,7 @@ typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
#define RV770_SMC_VOLTAGEMASK_VDDCI 2
#define RV770_SMC_VOLTAGEMASK_MAX 4
-struct RV770_SMC_VOLTAGEMASKTABLE
-{
+struct RV770_SMC_VOLTAGEMASKTABLE {
uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX];
uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
};
@@ -152,8 +144,7 @@ typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
#define MAX_NO_VREG_STEPS 32
-struct RV770_SMC_STATETABLE
-{
+struct RV770_SMC_STATETABLE {
uint8_t thermalProtectType;
uint8_t systemFlags;
uint8_t maxVDDCIndexInPPTable;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 85e9cba49cec..15759c8ca5b7 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -138,8 +138,7 @@ static void si_fini_pg(struct radeon_device *rdev);
static void si_fini_cg(struct radeon_device *rdev);
static void si_rlc_stop(struct radeon_device *rdev);
-static const u32 crtc_offsets[] =
-{
+static const u32 crtc_offsets[] = {
EVERGREEN_CRTC0_REGISTER_OFFSET,
EVERGREEN_CRTC1_REGISTER_OFFSET,
EVERGREEN_CRTC2_REGISTER_OFFSET,
@@ -148,8 +147,7 @@ static const u32 crtc_offsets[] =
EVERGREEN_CRTC5_REGISTER_OFFSET
};
-static const u32 si_disp_int_status[] =
-{
+static const u32 si_disp_int_status[] = {
DISP_INTERRUPT_STATUS,
DISP_INTERRUPT_STATUS_CONTINUE,
DISP_INTERRUPT_STATUS_CONTINUE2,
@@ -162,8 +160,7 @@ static const u32 si_disp_int_status[] =
#define DC_HPDx_INT_CONTROL(x) (DC_HPD1_INT_CONTROL + (x * 0xc))
#define DC_HPDx_INT_STATUS_REG(x) (DC_HPD1_INT_STATUS + (x * 0xc))
-static const u32 verde_rlc_save_restore_register_list[] =
-{
+static const u32 verde_rlc_save_restore_register_list[] = {
(0x8000 << 16) | (0x98f4 >> 2),
0x00000000,
(0x8040 << 16) | (0x98f4 >> 2),
@@ -384,8 +381,7 @@ static const u32 verde_rlc_save_restore_register_list[] =
0x00000000
};
-static const u32 tahiti_golden_rlc_registers[] =
-{
+static const u32 tahiti_golden_rlc_registers[] = {
0xc424, 0xffffffff, 0x00601005,
0xc47c, 0xffffffff, 0x10104040,
0xc488, 0xffffffff, 0x0100000a,
@@ -394,8 +390,7 @@ static const u32 tahiti_golden_rlc_registers[] =
0xf4a8, 0xffffffff, 0x00000000
};
-static const u32 tahiti_golden_registers[] =
-{
+static const u32 tahiti_golden_registers[] = {
0x9a10, 0x00010000, 0x00018208,
0x9830, 0xffffffff, 0x00000000,
0x9834, 0xf00fffff, 0x00000400,
@@ -429,13 +424,11 @@ static const u32 tahiti_golden_registers[] =
0x15c0, 0x000c0fc0, 0x000c0400
};
-static const u32 tahiti_golden_registers2[] =
-{
+static const u32 tahiti_golden_registers2[] = {
0xc64, 0x00000001, 0x00000001
};
-static const u32 pitcairn_golden_rlc_registers[] =
-{
+static const u32 pitcairn_golden_rlc_registers[] = {
0xc424, 0xffffffff, 0x00601004,
0xc47c, 0xffffffff, 0x10102020,
0xc488, 0xffffffff, 0x01000020,
@@ -443,8 +436,7 @@ static const u32 pitcairn_golden_rlc_registers[] =
0xc30c, 0xffffffff, 0x800000a4
};
-static const u32 pitcairn_golden_registers[] =
-{
+static const u32 pitcairn_golden_registers[] = {
0x9a10, 0x00010000, 0x00018208,
0x9830, 0xffffffff, 0x00000000,
0x9834, 0xf00fffff, 0x00000400,
@@ -474,8 +466,7 @@ static const u32 pitcairn_golden_registers[] =
0x15c0, 0x000c0fc0, 0x000c0400
};
-static const u32 verde_golden_rlc_registers[] =
-{
+static const u32 verde_golden_rlc_registers[] = {
0xc424, 0xffffffff, 0x033f1005,
0xc47c, 0xffffffff, 0x10808020,
0xc488, 0xffffffff, 0x00800008,
@@ -483,8 +474,7 @@ static const u32 verde_golden_rlc_registers[] =
0xc30c, 0xffffffff, 0x80010014
};
-static const u32 verde_golden_registers[] =
-{
+static const u32 verde_golden_registers[] = {
0x9a10, 0x00010000, 0x00018208,
0x9830, 0xffffffff, 0x00000000,
0x9834, 0xf00fffff, 0x00000400,
@@ -539,8 +529,7 @@ static const u32 verde_golden_registers[] =
0x15c0, 0x000c0fc0, 0x000c0400
};
-static const u32 oland_golden_rlc_registers[] =
-{
+static const u32 oland_golden_rlc_registers[] = {
0xc424, 0xffffffff, 0x00601005,
0xc47c, 0xffffffff, 0x10104040,
0xc488, 0xffffffff, 0x0100000a,
@@ -548,8 +537,7 @@ static const u32 oland_golden_rlc_registers[] =
0xc30c, 0xffffffff, 0x800000f4
};
-static const u32 oland_golden_registers[] =
-{
+static const u32 oland_golden_registers[] = {
0x9a10, 0x00010000, 0x00018208,
0x9830, 0xffffffff, 0x00000000,
0x9834, 0xf00fffff, 0x00000400,
@@ -579,8 +567,7 @@ static const u32 oland_golden_registers[] =
0x15c0, 0x000c0fc0, 0x000c0400
};
-static const u32 hainan_golden_registers[] =
-{
+static const u32 hainan_golden_registers[] = {
0x9a10, 0x00010000, 0x00018208,
0x9830, 0xffffffff, 0x00000000,
0x9834, 0xf00fffff, 0x00000400,
@@ -608,13 +595,11 @@ static const u32 hainan_golden_registers[] =
0x15c0, 0x000c0fc0, 0x000c0400
};
-static const u32 hainan_golden_registers2[] =
-{
+static const u32 hainan_golden_registers2[] = {
0x98f8, 0xffffffff, 0x02010001
};
-static const u32 tahiti_mgcg_cgcg_init[] =
-{
+static const u32 tahiti_mgcg_cgcg_init[] = {
0xc400, 0xffffffff, 0xfffffffc,
0x802c, 0xffffffff, 0xe0000000,
0x9a60, 0xffffffff, 0x00000100,
@@ -743,8 +728,7 @@ static const u32 tahiti_mgcg_cgcg_init[] =
0xd8c0, 0xfffffff0, 0x00000100
};
-static const u32 pitcairn_mgcg_cgcg_init[] =
-{
+static const u32 pitcairn_mgcg_cgcg_init[] = {
0xc400, 0xffffffff, 0xfffffffc,
0x802c, 0xffffffff, 0xe0000000,
0x9a60, 0xffffffff, 0x00000100,
@@ -841,8 +825,7 @@ static const u32 pitcairn_mgcg_cgcg_init[] =
0xd8c0, 0xfffffff0, 0x00000100
};
-static const u32 verde_mgcg_cgcg_init[] =
-{
+static const u32 verde_mgcg_cgcg_init[] = {
0xc400, 0xffffffff, 0xfffffffc,
0x802c, 0xffffffff, 0xe0000000,
0x9a60, 0xffffffff, 0x00000100,
@@ -941,8 +924,7 @@ static const u32 verde_mgcg_cgcg_init[] =
0xd8c0, 0xfffffff0, 0x00000100
};
-static const u32 oland_mgcg_cgcg_init[] =
-{
+static const u32 oland_mgcg_cgcg_init[] = {
0xc400, 0xffffffff, 0xfffffffc,
0x802c, 0xffffffff, 0xe0000000,
0x9a60, 0xffffffff, 0x00000100,
@@ -1021,8 +1003,7 @@ static const u32 oland_mgcg_cgcg_init[] =
0xd8c0, 0xfffffff0, 0x00000100
};
-static const u32 hainan_mgcg_cgcg_init[] =
-{
+static const u32 hainan_mgcg_cgcg_init[] = {
0xc400, 0xffffffff, 0xfffffffc,
0x802c, 0xffffffff, 0xe0000000,
0x9a60, 0xffffffff, 0x00000100,
@@ -1098,8 +1079,7 @@ static const u32 hainan_mgcg_cgcg_init[] =
0xd8c0, 0xfffffff0, 0x00000100
};
-static u32 verde_pg_init[] =
-{
+static u32 verde_pg_init[] = {
0x353c, 0xffffffff, 0x40000,
0x3538, 0xffffffff, 0x200010ff,
0x353c, 0xffffffff, 0x0,
@@ -1768,7 +1748,8 @@ static int si_init_microcode(struct radeon_device *rdev)
mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
break;
- default: BUG();
+ default:
+ BUG();
}
/* this memory configuration requires special firmware */
@@ -7193,28 +7174,18 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
PCI_EXP_LNKCTL_HAWD);
/* linkctl2 */
- pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
- &tmp16);
- tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN);
- tmp16 |= (bridge_cfg2 &
- (PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN));
- pcie_capability_write_word(root,
- PCI_EXP_LNKCTL2,
- tmp16);
-
- pcie_capability_read_word(rdev->pdev,
- PCI_EXP_LNKCTL2,
- &tmp16);
- tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN);
- tmp16 |= (gpu_cfg2 &
- (PCI_EXP_LNKCTL2_ENTER_COMP |
- PCI_EXP_LNKCTL2_TX_MARGIN));
- pcie_capability_write_word(rdev->pdev,
- PCI_EXP_LNKCTL2,
- tmp16);
+ pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN,
+ bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN,
+ gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -7228,15 +7199,15 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+ tmp16 = 0;
if (speed_cap == PCIE_SPEED_8_0GT)
tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
- pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index fbf968e3f6d7..9deb91970d4d 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -46,8 +46,7 @@
#define SCLK_MIN_DEEPSLEEP_FREQ 1350
-static const struct si_cac_config_reg cac_weights_tahiti[] =
-{
+static const struct si_cac_config_reg cac_weights_tahiti[] = {
{ 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
@@ -111,8 +110,7 @@ static const struct si_cac_config_reg cac_weights_tahiti[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg lcac_tahiti[] =
-{
+static const struct si_cac_config_reg lcac_tahiti[] = {
{ 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
@@ -203,13 +201,11 @@ static const struct si_cac_config_reg lcac_tahiti[] =
};
-static const struct si_cac_config_reg cac_override_tahiti[] =
-{
+static const struct si_cac_config_reg cac_override_tahiti[] = {
{ 0xFFFFFFFF }
};
-static const struct si_powertune_data powertune_data_tahiti =
-{
+static const struct si_powertune_data powertune_data_tahiti = {
((1 << 16) | 27027),
6,
0,
@@ -239,8 +235,7 @@ static const struct si_powertune_data powertune_data_tahiti =
true
};
-static const struct si_dte_data dte_data_tahiti =
-{
+static const struct si_dte_data dte_data_tahiti = {
{ 1159409, 0, 0, 0, 0 },
{ 777, 0, 0, 0, 0 },
2,
@@ -257,8 +252,7 @@ static const struct si_dte_data dte_data_tahiti =
false
};
-static const struct si_dte_data dte_data_tahiti_pro =
-{
+static const struct si_dte_data dte_data_tahiti_pro = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -275,8 +269,7 @@ static const struct si_dte_data dte_data_tahiti_pro =
true
};
-static const struct si_dte_data dte_data_new_zealand =
-{
+static const struct si_dte_data dte_data_new_zealand = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
{ 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
0x5,
@@ -293,8 +286,7 @@ static const struct si_dte_data dte_data_new_zealand =
true
};
-static const struct si_dte_data dte_data_aruba_pro =
-{
+static const struct si_dte_data dte_data_aruba_pro = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -311,8 +303,7 @@ static const struct si_dte_data dte_data_aruba_pro =
true
};
-static const struct si_dte_data dte_data_malta =
-{
+static const struct si_dte_data dte_data_malta = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -329,8 +320,7 @@ static const struct si_dte_data dte_data_malta =
true
};
-static struct si_cac_config_reg cac_weights_pitcairn[] =
-{
+static struct si_cac_config_reg cac_weights_pitcairn[] = {
{ 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
@@ -394,8 +384,7 @@ static struct si_cac_config_reg cac_weights_pitcairn[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg lcac_pitcairn[] =
-{
+static const struct si_cac_config_reg lcac_pitcairn[] = {
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
@@ -485,13 +474,11 @@ static const struct si_cac_config_reg lcac_pitcairn[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_override_pitcairn[] =
-{
+static const struct si_cac_config_reg cac_override_pitcairn[] = {
{ 0xFFFFFFFF }
};
-static const struct si_powertune_data powertune_data_pitcairn =
-{
+static const struct si_powertune_data powertune_data_pitcairn = {
((1 << 16) | 27027),
5,
0,
@@ -521,8 +508,7 @@ static const struct si_powertune_data powertune_data_pitcairn =
true
};
-static const struct si_dte_data dte_data_pitcairn =
-{
+static const struct si_dte_data dte_data_pitcairn = {
{ 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0 },
0,
@@ -539,8 +525,7 @@ static const struct si_dte_data dte_data_pitcairn =
false
};
-static const struct si_dte_data dte_data_curacao_xt =
-{
+static const struct si_dte_data dte_data_curacao_xt = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -557,8 +542,7 @@ static const struct si_dte_data dte_data_curacao_xt =
true
};
-static const struct si_dte_data dte_data_curacao_pro =
-{
+static const struct si_dte_data dte_data_curacao_pro = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -575,8 +559,7 @@ static const struct si_dte_data dte_data_curacao_pro =
true
};
-static const struct si_dte_data dte_data_neptune_xt =
-{
+static const struct si_dte_data dte_data_neptune_xt = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -593,8 +576,7 @@ static const struct si_dte_data dte_data_neptune_xt =
true
};
-static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
-{
+static const struct si_cac_config_reg cac_weights_chelsea_pro[] = {
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -658,8 +640,7 @@ static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
-{
+static const struct si_cac_config_reg cac_weights_chelsea_xt[] = {
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -723,8 +704,7 @@ static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_heathrow[] =
-{
+static const struct si_cac_config_reg cac_weights_heathrow[] = {
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -788,8 +768,7 @@ static const struct si_cac_config_reg cac_weights_heathrow[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
-{
+static const struct si_cac_config_reg cac_weights_cape_verde_pro[] = {
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -853,8 +832,7 @@ static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_cape_verde[] =
-{
+static const struct si_cac_config_reg cac_weights_cape_verde[] = {
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -918,8 +896,7 @@ static const struct si_cac_config_reg cac_weights_cape_verde[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg lcac_cape_verde[] =
-{
+static const struct si_cac_config_reg lcac_cape_verde[] = {
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
@@ -977,13 +954,11 @@ static const struct si_cac_config_reg lcac_cape_verde[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_override_cape_verde[] =
-{
+static const struct si_cac_config_reg cac_override_cape_verde[] = {
{ 0xFFFFFFFF }
};
-static const struct si_powertune_data powertune_data_cape_verde =
-{
+static const struct si_powertune_data powertune_data_cape_verde = {
((1 << 16) | 0x6993),
5,
0,
@@ -1013,8 +988,7 @@ static const struct si_powertune_data powertune_data_cape_verde =
true
};
-static const struct si_dte_data dte_data_cape_verde =
-{
+static const struct si_dte_data dte_data_cape_verde = {
{ 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0 },
0,
@@ -1031,8 +1005,7 @@ static const struct si_dte_data dte_data_cape_verde =
false
};
-static const struct si_dte_data dte_data_venus_xtx =
-{
+static const struct si_dte_data dte_data_venus_xtx = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
5,
@@ -1049,8 +1022,7 @@ static const struct si_dte_data dte_data_venus_xtx =
true
};
-static const struct si_dte_data dte_data_venus_xt =
-{
+static const struct si_dte_data dte_data_venus_xt = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
5,
@@ -1067,8 +1039,7 @@ static const struct si_dte_data dte_data_venus_xt =
true
};
-static const struct si_dte_data dte_data_venus_pro =
-{
+static const struct si_dte_data dte_data_venus_pro = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
5,
@@ -1085,8 +1056,7 @@ static const struct si_dte_data dte_data_venus_pro =
true
};
-static struct si_cac_config_reg cac_weights_oland[] =
-{
+static struct si_cac_config_reg cac_weights_oland[] = {
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -1150,8 +1120,7 @@ static struct si_cac_config_reg cac_weights_oland[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_mars_pro[] =
-{
+static const struct si_cac_config_reg cac_weights_mars_pro[] = {
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
@@ -1215,8 +1184,7 @@ static const struct si_cac_config_reg cac_weights_mars_pro[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_mars_xt[] =
-{
+static const struct si_cac_config_reg cac_weights_mars_xt[] = {
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
@@ -1280,8 +1248,7 @@ static const struct si_cac_config_reg cac_weights_mars_xt[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_oland_pro[] =
-{
+static const struct si_cac_config_reg cac_weights_oland_pro[] = {
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
@@ -1345,8 +1312,7 @@ static const struct si_cac_config_reg cac_weights_oland_pro[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_weights_oland_xt[] =
-{
+static const struct si_cac_config_reg cac_weights_oland_xt[] = {
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
@@ -1410,8 +1376,7 @@ static const struct si_cac_config_reg cac_weights_oland_xt[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg lcac_oland[] =
-{
+static const struct si_cac_config_reg lcac_oland[] = {
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
@@ -1457,8 +1422,7 @@ static const struct si_cac_config_reg lcac_oland[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg lcac_mars_pro[] =
-{
+static const struct si_cac_config_reg lcac_mars_pro[] = {
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
@@ -1504,13 +1468,11 @@ static const struct si_cac_config_reg lcac_mars_pro[] =
{ 0xFFFFFFFF }
};
-static const struct si_cac_config_reg cac_override_oland[] =
-{
+static const struct si_cac_config_reg cac_override_oland[] = {
{ 0xFFFFFFFF }
};
-static const struct si_powertune_data powertune_data_oland =
-{
+static const struct si_powertune_data powertune_data_oland = {
((1 << 16) | 0x6993),
5,
0,
@@ -1540,8 +1502,7 @@ static const struct si_powertune_data powertune_data_oland =
true
};
-static const struct si_powertune_data powertune_data_mars_pro =
-{
+static const struct si_powertune_data powertune_data_mars_pro = {
((1 << 16) | 0x6993),
5,
0,
@@ -1571,8 +1532,7 @@ static const struct si_powertune_data powertune_data_mars_pro =
true
};
-static const struct si_dte_data dte_data_oland =
-{
+static const struct si_dte_data dte_data_oland = {
{ 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0 },
0,
@@ -1589,8 +1549,7 @@ static const struct si_dte_data dte_data_oland =
false
};
-static const struct si_dte_data dte_data_mars_pro =
-{
+static const struct si_dte_data dte_data_mars_pro = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -1607,8 +1566,7 @@ static const struct si_dte_data dte_data_mars_pro =
true
};
-static const struct si_dte_data dte_data_sun_xt =
-{
+static const struct si_dte_data dte_data_sun_xt = {
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
@@ -1626,8 +1584,7 @@ static const struct si_dte_data dte_data_sun_xt =
};
-static const struct si_cac_config_reg cac_weights_hainan[] =
-{
+static const struct si_cac_config_reg cac_weights_hainan[] = {
{ 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
@@ -1691,8 +1648,7 @@ static const struct si_cac_config_reg cac_weights_hainan[] =
{ 0xFFFFFFFF }
};
-static const struct si_powertune_data powertune_data_hainan =
-{
+static const struct si_powertune_data powertune_data_hainan = {
((1 << 16) | 0x6993),
5,
0,
diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h
index aa857906ef93..4887edebd348 100644
--- a/drivers/gpu/drm/radeon/si_dpm.h
+++ b/drivers/gpu/drm/radeon/si_dpm.h
@@ -26,15 +26,13 @@
#include "ni_dpm.h"
#include "sislands_smc.h"
-enum si_cac_config_reg_type
-{
+enum si_cac_config_reg_type {
SISLANDS_CACCONFIG_MMR = 0,
SISLANDS_CACCONFIG_CGIND,
SISLANDS_CACCONFIG_MAX
};
-struct si_cac_config_reg
-{
+struct si_cac_config_reg {
u32 offset;
u32 mask;
u32 shift;
@@ -42,8 +40,7 @@ struct si_cac_config_reg
enum si_cac_config_reg_type type;
};
-struct si_powertune_data
-{
+struct si_powertune_data {
u32 cac_window;
u32 l2_lta_window_size_default;
u8 lts_truncate_default;
@@ -56,8 +53,7 @@ struct si_powertune_data
bool enable_powertune_by_default;
};
-struct si_dyn_powertune_data
-{
+struct si_dyn_powertune_data {
u32 cac_leakage;
s32 leakage_minimum_temperature;
u32 wintime;
@@ -68,8 +64,7 @@ struct si_dyn_powertune_data
bool disable_uvd_powertune;
};
-struct si_dte_data
-{
+struct si_dte_data {
u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
u32 k;
@@ -122,8 +117,7 @@ struct si_mc_reg_table {
#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2
#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3
-struct si_leakage_voltage_entry
-{
+struct si_leakage_voltage_entry {
u16 voltage;
u16 leakage_index;
};
@@ -131,8 +125,7 @@ struct si_leakage_voltage_entry
#define SISLANDS_LEAKAGE_INDEX0 0xff01
#define SISLANDS_MAX_LEAKAGE_COUNT 4
-struct si_leakage_voltage
-{
+struct si_leakage_voltage {
u16 count;
struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
};
diff --git a/drivers/gpu/drm/radeon/smu7.h b/drivers/gpu/drm/radeon/smu7.h
index 75a380a15292..985d720dbc0d 100644
--- a/drivers/gpu/drm/radeon/smu7.h
+++ b/drivers/gpu/drm/radeon/smu7.h
@@ -82,8 +82,7 @@
#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
-struct SMU7_PIDController
-{
+struct SMU7_PIDController {
uint32_t Ki;
int32_t LFWindupUL;
int32_t LFWindupLL;
@@ -117,8 +116,7 @@ typedef struct SMU7_PIDController SMU7_PIDController;
#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
-struct SMU7_Firmware_Header
-{
+struct SMU7_Firmware_Header {
uint32_t Digest[5];
uint32_t Version;
uint32_t HeaderSize;
diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h
index 0b0b404ff091..1f63cbbd6515 100644
--- a/drivers/gpu/drm/radeon/smu7_discrete.h
+++ b/drivers/gpu/drm/radeon/smu7_discrete.h
@@ -35,8 +35,7 @@
#define SMU7_NUM_GPU_TES 1
#define SMU7_NUM_NON_TES 2
-struct SMU7_SoftRegisters
-{
+struct SMU7_SoftRegisters {
uint32_t RefClockFrequency;
uint32_t PmTimerP;
uint32_t FeatureEnables;
@@ -89,8 +88,7 @@ struct SMU7_SoftRegisters
typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
-struct SMU7_Discrete_VoltageLevel
-{
+struct SMU7_Discrete_VoltageLevel {
uint16_t Voltage;
uint16_t StdVoltageHiSidd;
uint16_t StdVoltageLoSidd;
@@ -100,8 +98,7 @@ struct SMU7_Discrete_VoltageLevel
typedef struct SMU7_Discrete_VoltageLevel SMU7_Discrete_VoltageLevel;
-struct SMU7_Discrete_GraphicsLevel
-{
+struct SMU7_Discrete_GraphicsLevel {
uint32_t Flags;
uint32_t MinVddc;
uint32_t MinVddcPhases;
@@ -131,8 +128,7 @@ struct SMU7_Discrete_GraphicsLevel
typedef struct SMU7_Discrete_GraphicsLevel SMU7_Discrete_GraphicsLevel;
-struct SMU7_Discrete_ACPILevel
-{
+struct SMU7_Discrete_ACPILevel {
uint32_t Flags;
uint32_t MinVddc;
uint32_t MinVddcPhases;
@@ -153,8 +149,7 @@ struct SMU7_Discrete_ACPILevel
typedef struct SMU7_Discrete_ACPILevel SMU7_Discrete_ACPILevel;
-struct SMU7_Discrete_Ulv
-{
+struct SMU7_Discrete_Ulv {
uint32_t CcPwrDynRm;
uint32_t CcPwrDynRm1;
uint16_t VddcOffset;
@@ -165,8 +160,7 @@ struct SMU7_Discrete_Ulv
typedef struct SMU7_Discrete_Ulv SMU7_Discrete_Ulv;
-struct SMU7_Discrete_MemoryLevel
-{
+struct SMU7_Discrete_MemoryLevel {
uint32_t MinVddc;
uint32_t MinVddcPhases;
uint32_t MinVddci;
@@ -206,8 +200,7 @@ struct SMU7_Discrete_MemoryLevel
typedef struct SMU7_Discrete_MemoryLevel SMU7_Discrete_MemoryLevel;
-struct SMU7_Discrete_LinkLevel
-{
+struct SMU7_Discrete_LinkLevel {
uint8_t PcieGenSpeed;
uint8_t PcieLaneCount;
uint8_t EnabledForActivity;
@@ -220,8 +213,7 @@ struct SMU7_Discrete_LinkLevel
typedef struct SMU7_Discrete_LinkLevel SMU7_Discrete_LinkLevel;
-struct SMU7_Discrete_MCArbDramTimingTableEntry
-{
+struct SMU7_Discrete_MCArbDramTimingTableEntry {
uint32_t McArbDramTiming;
uint32_t McArbDramTiming2;
uint8_t McArbBurstTime;
@@ -230,15 +222,13 @@ struct SMU7_Discrete_MCArbDramTimingTableEntry
typedef struct SMU7_Discrete_MCArbDramTimingTableEntry SMU7_Discrete_MCArbDramTimingTableEntry;
-struct SMU7_Discrete_MCArbDramTimingTable
-{
+struct SMU7_Discrete_MCArbDramTimingTable {
SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
};
typedef struct SMU7_Discrete_MCArbDramTimingTable SMU7_Discrete_MCArbDramTimingTable;
-struct SMU7_Discrete_UvdLevel
-{
+struct SMU7_Discrete_UvdLevel {
uint32_t VclkFrequency;
uint32_t DclkFrequency;
uint16_t MinVddc;
@@ -250,8 +240,7 @@ struct SMU7_Discrete_UvdLevel
typedef struct SMU7_Discrete_UvdLevel SMU7_Discrete_UvdLevel;
-struct SMU7_Discrete_ExtClkLevel
-{
+struct SMU7_Discrete_ExtClkLevel {
uint32_t Frequency;
uint16_t MinVoltage;
uint8_t MinPhases;
@@ -260,8 +249,7 @@ struct SMU7_Discrete_ExtClkLevel
typedef struct SMU7_Discrete_ExtClkLevel SMU7_Discrete_ExtClkLevel;
-struct SMU7_Discrete_StateInfo
-{
+struct SMU7_Discrete_StateInfo {
uint32_t SclkFrequency;
uint32_t MclkFrequency;
uint32_t VclkFrequency;
@@ -285,8 +273,7 @@ struct SMU7_Discrete_StateInfo
typedef struct SMU7_Discrete_StateInfo SMU7_Discrete_StateInfo;
-struct SMU7_Discrete_DpmTable
-{
+struct SMU7_Discrete_DpmTable {
SMU7_PIDController GraphicsPIDController;
SMU7_PIDController MemoryPIDController;
SMU7_PIDController LinkPIDController;
@@ -406,23 +393,20 @@ typedef struct SMU7_Discrete_DpmTable SMU7_Discrete_DpmTable;
#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU7_MAX_LEVELS_MEMORY
-struct SMU7_Discrete_MCRegisterAddress
-{
+struct SMU7_Discrete_MCRegisterAddress {
uint16_t s0;
uint16_t s1;
};
typedef struct SMU7_Discrete_MCRegisterAddress SMU7_Discrete_MCRegisterAddress;
-struct SMU7_Discrete_MCRegisterSet
-{
+struct SMU7_Discrete_MCRegisterSet {
uint32_t value[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
};
typedef struct SMU7_Discrete_MCRegisterSet SMU7_Discrete_MCRegisterSet;
-struct SMU7_Discrete_MCRegisters
-{
+struct SMU7_Discrete_MCRegisters {
uint8_t last;
uint8_t reserved[3];
SMU7_Discrete_MCRegisterAddress address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
@@ -431,8 +415,7 @@ struct SMU7_Discrete_MCRegisters
typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
-struct SMU7_Discrete_FanTable
-{
+struct SMU7_Discrete_FanTable {
uint16_t FdoMode;
int16_t TempMin;
int16_t TempMed;
diff --git a/drivers/gpu/drm/radeon/smu7_fusion.h b/drivers/gpu/drm/radeon/smu7_fusion.h
index 78ada9ffd508..e130f52fe8d6 100644
--- a/drivers/gpu/drm/radeon/smu7_fusion.h
+++ b/drivers/gpu/drm/radeon/smu7_fusion.h
@@ -36,8 +36,7 @@
#define SMU7_NUM_NON_TES 2
// All 'soft registers' should be uint32_t.
-struct SMU7_SoftRegisters
-{
+struct SMU7_SoftRegisters {
uint32_t RefClockFrequency;
uint32_t PmTimerP;
uint32_t FeatureEnables;
@@ -80,8 +79,7 @@ struct SMU7_SoftRegisters
typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
-struct SMU7_Fusion_GraphicsLevel
-{
+struct SMU7_Fusion_GraphicsLevel {
uint32_t MinVddNb;
uint32_t SclkFrequency;
@@ -111,8 +109,7 @@ struct SMU7_Fusion_GraphicsLevel
typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel;
-struct SMU7_Fusion_GIOLevel
-{
+struct SMU7_Fusion_GIOLevel {
uint8_t EnabledForActivity;
uint8_t LclkDid;
uint8_t Vid;
@@ -137,8 +134,7 @@ struct SMU7_Fusion_GIOLevel
typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel;
// UVD VCLK/DCLK state (level) definition.
-struct SMU7_Fusion_UvdLevel
-{
+struct SMU7_Fusion_UvdLevel {
uint32_t VclkFrequency;
uint32_t DclkFrequency;
uint16_t MinVddNb;
@@ -155,8 +151,7 @@ struct SMU7_Fusion_UvdLevel
typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel;
// Clocks for other external blocks (VCE, ACP, SAMU).
-struct SMU7_Fusion_ExtClkLevel
-{
+struct SMU7_Fusion_ExtClkLevel {
uint32_t Frequency;
uint16_t MinVoltage;
uint8_t Divider;
@@ -166,8 +161,7 @@ struct SMU7_Fusion_ExtClkLevel
};
typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel;
-struct SMU7_Fusion_ACPILevel
-{
+struct SMU7_Fusion_ACPILevel {
uint32_t Flags;
uint32_t MinVddNb;
uint32_t SclkFrequency;
@@ -181,8 +175,7 @@ struct SMU7_Fusion_ACPILevel
typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel;
-struct SMU7_Fusion_NbDpm
-{
+struct SMU7_Fusion_NbDpm {
uint8_t DpmXNbPsHi;
uint8_t DpmXNbPsLo;
uint8_t Dpm0PgNbPsHi;
@@ -197,8 +190,7 @@ struct SMU7_Fusion_NbDpm
typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm;
-struct SMU7_Fusion_StateInfo
-{
+struct SMU7_Fusion_StateInfo {
uint32_t SclkFrequency;
uint32_t LclkFrequency;
uint32_t VclkFrequency;
@@ -214,8 +206,7 @@ struct SMU7_Fusion_StateInfo
typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo;
-struct SMU7_Fusion_DpmTable
-{
+struct SMU7_Fusion_DpmTable {
uint32_t SystemFlags;
SMU7_PIDController GraphicsPIDController;
@@ -230,12 +221,12 @@ struct SMU7_Fusion_DpmTable
uint8_t SamuLevelCount;
uint16_t FpsHighT;
- SMU7_Fusion_GraphicsLevel GraphicsLevel [SMU__NUM_SCLK_DPM_STATE];
+ SMU7_Fusion_GraphicsLevel GraphicsLevel[SMU__NUM_SCLK_DPM_STATE];
SMU7_Fusion_ACPILevel ACPILevel;
- SMU7_Fusion_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
- SMU7_Fusion_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
- SMU7_Fusion_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
- SMU7_Fusion_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
+ SMU7_Fusion_UvdLevel UvdLevel[SMU7_MAX_LEVELS_UVD];
+ SMU7_Fusion_ExtClkLevel VceLevel[SMU7_MAX_LEVELS_VCE];
+ SMU7_Fusion_ExtClkLevel AcpLevel[SMU7_MAX_LEVELS_ACP];
+ SMU7_Fusion_ExtClkLevel SamuLevel[SMU7_MAX_LEVELS_SAMU];
uint8_t UvdBootLevel;
uint8_t VceBootLevel;
@@ -266,10 +257,9 @@ struct SMU7_Fusion_DpmTable
};
-struct SMU7_Fusion_GIODpmTable
-{
+struct SMU7_Fusion_GIODpmTable {
- SMU7_Fusion_GIOLevel GIOLevel [SMU7_MAX_LEVELS_GIO];
+ SMU7_Fusion_GIOLevel GIOLevel[SMU7_MAX_LEVELS_GIO];
SMU7_PIDController GioPIDController;
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index d49c145db437..21d27e6235f3 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -33,8 +33,7 @@
#define SUMO_MINIMUM_ENGINE_CLOCK 800
#define BOOST_DPM_LEVEL 7
-static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] =
-{
+static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] = {
SUMO_UTC_DFLT_00,
SUMO_UTC_DFLT_01,
SUMO_UTC_DFLT_02,
@@ -52,8 +51,7 @@ static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] =
SUMO_UTC_DFLT_14,
};
-static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] =
-{
+static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] = {
SUMO_DTC_DFLT_00,
SUMO_DTC_DFLT_01,
SUMO_DTC_DFLT_02,
@@ -109,11 +107,11 @@ static void sumo_mg_clockgating_enable(struct radeon_device *rdev, bool enable)
local1 = RREG32(CG_CGTT_LOCAL_1);
if (enable) {
- WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
- WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+ WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK));
+ WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK));
} else {
- WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
- WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+ WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK));
+ WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK));
}
}
@@ -702,9 +700,9 @@ static void sumo_post_notify_alt_vddnb_change(struct radeon_device *rdev,
u32 nbps1_new = 0;
if (old_ps != NULL)
- nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0;
+ nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
- nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0;
+ nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
if (nbps1_old == 0 && nbps1_new == 1)
sumo_smu_notify_alt_vddnb_change(rdev, 1, 1);
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index ef1cc7bad20a..b9a2c7ccc881 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -39,8 +39,7 @@
#ifndef TRINITY_MGCG_SEQUENCE
#define TRINITY_MGCG_SEQUENCE 100
-static const u32 trinity_mgcg_shls_default[] =
-{
+static const u32 trinity_mgcg_shls_default[] = {
/* Register, Value, Mask */
0x0000802c, 0xc0000000, 0xffffffff,
0x00003fc4, 0xc0000000, 0xffffffff,
@@ -122,8 +121,7 @@ static const u32 trinity_mgcg_shls_default[] =
#ifndef TRINITY_SYSLS_SEQUENCE
#define TRINITY_SYSLS_SEQUENCE 100
-static const u32 trinity_sysls_disable[] =
-{
+static const u32 trinity_sysls_disable[] = {
/* Register, Value, Mask */
0x0000d0c0, 0x00000000, 0xffffffff,
0x0000d8c0, 0x00000000, 0xffffffff,
@@ -146,8 +144,7 @@ static const u32 trinity_sysls_disable[] =
0x00006dfc, 0x0000007f, 0xffffffff
};
-static const u32 trinity_sysls_enable[] =
-{
+static const u32 trinity_sysls_enable[] = {
/* Register, Value, Mask */
0x000055e8, 0x00000001, 0xffffffff,
0x0000d0bc, 0x00000100, 0xffffffff,
@@ -169,8 +166,7 @@ static const u32 trinity_sysls_enable[] =
};
#endif
-static const u32 trinity_override_mgpg_sequences[] =
-{
+static const u32 trinity_override_mgpg_sequences[] = {
/* Register, Value */
0x00000200, 0xE030032C,
0x00000204, 0x00000FFF,
@@ -366,9 +362,9 @@ static void trinity_mg_clockgating_enable(struct radeon_device *rdev,
local1 = RREG32_CG(CG_CGTT_LOCAL_1);
WREG32_CG(CG_CGTT_LOCAL_0,
- (0x00380000 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
+ (0x00380000 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK));
WREG32_CG(CG_CGTT_LOCAL_1,
- (0x0E000000 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+ (0x0E000000 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK));
WREG32(CGTS_SM_CTRL_REG, CGTS_SM_CTRL_REG_ENABLE);
} else {
@@ -378,9 +374,9 @@ static void trinity_mg_clockgating_enable(struct radeon_device *rdev,
local1 = RREG32_CG(CG_CGTT_LOCAL_1);
WREG32_CG(CG_CGTT_LOCAL_0,
- CGCG_CGTT_LOCAL0_MASK | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
+ CGCG_CGTT_LOCAL0_MASK | (local0 & ~CGCG_CGTT_LOCAL0_MASK));
WREG32_CG(CG_CGTT_LOCAL_1,
- CGCG_CGTT_LOCAL1_MASK | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+ CGCG_CGTT_LOCAL1_MASK | (local1 & ~CGCG_CGTT_LOCAL1_MASK));
}
}
@@ -1434,7 +1430,7 @@ static void trinity_adjust_uvd_state(struct radeon_device *rdev,
if (pi->uvd_dpm && r600_is_uvd_state(rps->class, rps->class2)) {
high_index = trinity_get_uvd_clock_index(rdev, rps);
- switch(high_index) {
+ switch (high_index) {
case 3:
case 2:
low_index = 1;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h
index c261657750ca..431e2b68d21e 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.h
+++ b/drivers/gpu/drm/radeon/trinity_dpm.h
@@ -64,8 +64,7 @@ struct trinity_ps {
#define TRINITY_NUM_NBPSTATES 4
-struct trinity_uvd_clock_table_entry
-{
+struct trinity_uvd_clock_table_entry {
u32 vclk;
u32 dclk;
u8 vclk_did;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 58557c2263a7..5684639d20a6 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -142,7 +142,7 @@ int uvd_v1_0_resume(struct radeon_device *rdev)
addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
- WREG32(UVD_FW_START, *((uint32_t*)rdev->uvd.cpu_addr));
+ WREG32(UVD_FW_START, *((uint32_t *)rdev->uvd.cpu_addr));
return 0;
}
diff --git a/drivers/gpu/drm/renesas/Kconfig b/drivers/gpu/drm/renesas/Kconfig
index 3777dad17f81..21862a8ef710 100644
--- a/drivers/gpu/drm/renesas/Kconfig
+++ b/drivers/gpu/drm/renesas/Kconfig
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
source "drivers/gpu/drm/renesas/rcar-du/Kconfig"
+source "drivers/gpu/drm/renesas/rz-du/Kconfig"
source "drivers/gpu/drm/renesas/shmobile/Kconfig"
diff --git a/drivers/gpu/drm/renesas/Makefile b/drivers/gpu/drm/renesas/Makefile
index ec0e89e7a592..b8d8bc53967f 100644
--- a/drivers/gpu/drm/renesas/Makefile
+++ b/drivers/gpu/drm/renesas/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += rcar-du/
+obj-y += rz-du/
obj-$(CONFIG_DRM_SHMOBILE) += shmobile/
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
new file mode 100644
index 000000000000..5f0db2c5fee6
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+config DRM_RZG2L_DU
+ tristate "DRM Support for RZ/G2L Display Unit"
+ depends on ARCH_RZG2L || COMPILE_TEST
+ depends on DRM && OF
+ depends on VIDEO_RENESAS_VSP1
+ select DRM_GEM_DMA_HELPER
+ select DRM_KMS_HELPER
+ select VIDEOMODE_HELPERS
+ help
+ Choose this option if you have an RZ/G2L alike chipset.
+ If M is selected the module will be called rzg2l-du-drm.
diff --git a/drivers/gpu/drm/renesas/rz-du/Makefile b/drivers/gpu/drm/renesas/rz-du/Makefile
new file mode 100644
index 000000000000..663b82a2577f
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+rzg2l-du-drm-y := rzg2l_du_crtc.o \
+ rzg2l_du_drv.o \
+ rzg2l_du_encoder.o \
+ rzg2l_du_kms.o \
+
+rzg2l-du-drm-$(CONFIG_VIDEO_RENESAS_VSP1) += rzg2l_du_vsp.o
+obj-$(CONFIG_DRM_RZG2L_DU) += rzg2l-du-drm.o
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
new file mode 100644
index 000000000000..6e7aac6219be
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RZ/G2L Display Unit CRTCs
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_crtc.c
+ */
+
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "rzg2l_du_crtc.h"
+#include "rzg2l_du_drv.h"
+#include "rzg2l_du_encoder.h"
+#include "rzg2l_du_kms.h"
+#include "rzg2l_du_vsp.h"
+
+#define DU_MCR0 0x00
+#define DU_MCR0_DI_EN BIT(8)
+
+#define DU_DITR0 0x10
+#define DU_DITR0_DEMD_HIGH (BIT(8) | BIT(9))
+#define DU_DITR0_VSPOL BIT(16)
+#define DU_DITR0_HSPOL BIT(17)
+
+#define DU_DITR1 0x14
+#define DU_DITR1_VSA(x) ((x) << 0)
+#define DU_DITR1_VACTIVE(x) ((x) << 16)
+
+#define DU_DITR2 0x18
+#define DU_DITR2_VBP(x) ((x) << 0)
+#define DU_DITR2_VFP(x) ((x) << 16)
+
+#define DU_DITR3 0x1c
+#define DU_DITR3_HSA(x) ((x) << 0)
+#define DU_DITR3_HACTIVE(x) ((x) << 16)
+
+#define DU_DITR4 0x20
+#define DU_DITR4_HBP(x) ((x) << 0)
+#define DU_DITR4_HFP(x) ((x) << 16)
+
+#define DU_MCR1 0x40
+#define DU_MCR1_PB_AUTOCLR BIT(16)
+
+#define DU_PBCR0 0x4c
+#define DU_PBCR0_PB_DEP(x) ((x) << 0)
+
+/* -----------------------------------------------------------------------------
+ * Hardware Setup
+ */
+
+static void rzg2l_du_crtc_set_display_timing(struct rzg2l_du_crtc *rcrtc)
+{
+ const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
+ unsigned long mode_clock = mode->clock * 1000;
+ u32 ditr0, ditr1, ditr2, ditr3, ditr4, pbcr0;
+ struct rzg2l_du_device *rcdu = rcrtc->dev;
+
+ clk_prepare_enable(rcrtc->rzg2l_clocks.dclk);
+ clk_set_rate(rcrtc->rzg2l_clocks.dclk, mode_clock);
+
+ ditr0 = (DU_DITR0_DEMD_HIGH
+ | ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DU_DITR0_VSPOL : 0)
+ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DU_DITR0_HSPOL : 0));
+
+ ditr1 = DU_DITR1_VSA(mode->vsync_end - mode->vsync_start)
+ | DU_DITR1_VACTIVE(mode->vdisplay);
+
+ ditr2 = DU_DITR2_VBP(mode->vtotal - mode->vsync_end)
+ | DU_DITR2_VFP(mode->vsync_start - mode->vdisplay);
+
+ ditr3 = DU_DITR3_HSA(mode->hsync_end - mode->hsync_start)
+ | DU_DITR3_HACTIVE(mode->hdisplay);
+
+ ditr4 = DU_DITR4_HBP(mode->htotal - mode->hsync_end)
+ | DU_DITR4_HFP(mode->hsync_start - mode->hdisplay);
+
+ pbcr0 = DU_PBCR0_PB_DEP(0x1f);
+
+ writel(ditr0, rcdu->mmio + DU_DITR0);
+ writel(ditr1, rcdu->mmio + DU_DITR1);
+ writel(ditr2, rcdu->mmio + DU_DITR2);
+ writel(ditr3, rcdu->mmio + DU_DITR3);
+ writel(ditr4, rcdu->mmio + DU_DITR4);
+ writel(pbcr0, rcdu->mmio + DU_PBCR0);
+
+ /* Enable auto clear */
+ writel(DU_MCR1_PB_AUTOCLR, rcdu->mmio + DU_MCR1);
+}
+
+/* -----------------------------------------------------------------------------
+ * Page Flip
+ */
+
+void rzg2l_du_crtc_finish_page_flip(struct rzg2l_du_crtc *rcrtc)
+{
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = rcrtc->event;
+ rcrtc->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (!event)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ drm_crtc_send_vblank_event(&rcrtc->crtc, event);
+ wake_up(&rcrtc->flip_wait);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ drm_crtc_vblank_put(&rcrtc->crtc);
+}
+
+static bool rzg2l_du_crtc_page_flip_pending(struct rzg2l_du_crtc *rcrtc)
+{
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
+ bool pending;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ pending = rcrtc->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return pending;
+}
+
+static void rzg2l_du_crtc_wait_page_flip(struct rzg2l_du_crtc *rcrtc)
+{
+ struct rzg2l_du_device *rcdu = rcrtc->dev;
+
+ if (wait_event_timeout(rcrtc->flip_wait,
+ !rzg2l_du_crtc_page_flip_pending(rcrtc),
+ msecs_to_jiffies(50)))
+ return;
+
+ dev_warn(rcdu->dev, "page flip timeout\n");
+
+ rzg2l_du_crtc_finish_page_flip(rcrtc);
+}
+
+/* -----------------------------------------------------------------------------
+ * Start/Stop and Suspend/Resume
+ */
+
+static void rzg2l_du_crtc_setup(struct rzg2l_du_crtc *rcrtc)
+{
+ /* Configure display timings and output routing */
+ rzg2l_du_crtc_set_display_timing(rcrtc);
+
+ /* Enable the VSP compositor. */
+ rzg2l_du_vsp_enable(rcrtc);
+
+ /* Turn vertical blanking interrupt reporting on. */
+ drm_crtc_vblank_on(&rcrtc->crtc);
+}
+
+static int rzg2l_du_crtc_get(struct rzg2l_du_crtc *rcrtc)
+{
+ int ret;
+
+ /*
+ * Guard against double-get, as the function is called from both the
+ * .atomic_enable() and .atomic_flush() handlers.
+ */
+ if (rcrtc->initialized)
+ return 0;
+
+ ret = clk_prepare_enable(rcrtc->rzg2l_clocks.aclk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(rcrtc->rzg2l_clocks.pclk);
+ if (ret < 0)
+ goto error_bus_clock;
+
+ ret = reset_control_deassert(rcrtc->rstc);
+ if (ret < 0)
+ goto error_peri_clock;
+
+ rzg2l_du_crtc_setup(rcrtc);
+ rcrtc->initialized = true;
+
+ return 0;
+
+error_peri_clock:
+ clk_disable_unprepare(rcrtc->rzg2l_clocks.pclk);
+error_bus_clock:
+ clk_disable_unprepare(rcrtc->rzg2l_clocks.aclk);
+ return ret;
+}
+
+static void rzg2l_du_crtc_put(struct rzg2l_du_crtc *rcrtc)
+{
+ clk_disable_unprepare(rcrtc->rzg2l_clocks.dclk);
+ reset_control_assert(rcrtc->rstc);
+ clk_disable_unprepare(rcrtc->rzg2l_clocks.pclk);
+ clk_disable_unprepare(rcrtc->rzg2l_clocks.aclk);
+
+ rcrtc->initialized = false;
+}
+
+static void rzg2l_du_start_stop(struct rzg2l_du_crtc *rcrtc, bool start)
+{
+ struct rzg2l_du_device *rcdu = rcrtc->dev;
+
+ writel(start ? DU_MCR0_DI_EN : 0, rcdu->mmio + DU_MCR0);
+}
+
+static void rzg2l_du_crtc_start(struct rzg2l_du_crtc *rcrtc)
+{
+ rzg2l_du_start_stop(rcrtc, true);
+}
+
+static void rzg2l_du_crtc_stop(struct rzg2l_du_crtc *rcrtc)
+{
+ struct drm_crtc *crtc = &rcrtc->crtc;
+
+ /*
+ * Disable vertical blanking interrupt reporting. We first need to wait
+ * for page flip completion before stopping the CRTC as userspace
+ * expects page flips to eventually complete.
+ */
+ rzg2l_du_crtc_wait_page_flip(rcrtc);
+ drm_crtc_vblank_off(crtc);
+
+ /* Disable the VSP compositor. */
+ rzg2l_du_vsp_disable(rcrtc);
+
+ rzg2l_du_start_stop(rcrtc, false);
+}
+
+/* -----------------------------------------------------------------------------
+ * CRTC Functions
+ */
+
+static void rzg2l_du_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc);
+
+ rzg2l_du_crtc_get(rcrtc);
+
+ rzg2l_du_crtc_start(rcrtc);
+}
+
+static void rzg2l_du_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc);
+
+ rzg2l_du_crtc_stop(rcrtc);
+ rzg2l_du_crtc_put(rcrtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static void rzg2l_du_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc);
+ struct drm_device *dev = rcrtc->crtc.dev;
+ unsigned long flags;
+
+ WARN_ON(!crtc->state->enable);
+
+ if (crtc->state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ rcrtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ rzg2l_du_vsp_atomic_flush(rcrtc);
+}
+
+static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
+ .atomic_flush = rzg2l_du_crtc_atomic_flush,
+ .atomic_enable = rzg2l_du_crtc_atomic_enable,
+ .atomic_disable = rzg2l_du_crtc_atomic_disable,
+};
+
+static struct drm_crtc_state *
+rzg2l_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct rzg2l_du_crtc_state *state;
+ struct rzg2l_du_crtc_state *copy;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ state = to_rzg2l_crtc_state(crtc->state);
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &copy->state);
+
+ return &copy->state;
+}
+
+static void rzg2l_du_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(to_rzg2l_crtc_state(state));
+}
+
+static void rzg2l_du_crtc_reset(struct drm_crtc *crtc)
+{
+ struct rzg2l_du_crtc_state *state;
+
+ if (crtc->state) {
+ rzg2l_du_crtc_atomic_destroy_state(crtc, crtc->state);
+ crtc->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+
+ __drm_atomic_helper_crtc_reset(crtc, &state->state);
+}
+
+static int rzg2l_du_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc);
+
+ rcrtc->vblank_enable = true;
+
+ return 0;
+}
+
+static void rzg2l_du_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc);
+
+ rcrtc->vblank_enable = false;
+}
+
+static const struct drm_crtc_funcs crtc_funcs_rz = {
+ .reset = rzg2l_du_crtc_reset,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = rzg2l_du_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = rzg2l_du_crtc_atomic_destroy_state,
+ .enable_vblank = rzg2l_du_crtc_enable_vblank,
+ .disable_vblank = rzg2l_du_crtc_disable_vblank,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+int rzg2l_du_crtc_create(struct rzg2l_du_device *rcdu)
+{
+ struct rzg2l_du_crtc *rcrtc = &rcdu->crtcs[0];
+ struct drm_crtc *crtc = &rcrtc->crtc;
+ struct drm_plane *primary;
+ int ret;
+
+ rcrtc->rstc = devm_reset_control_get_shared(rcdu->dev, NULL);
+ if (IS_ERR(rcrtc->rstc)) {
+ dev_err(rcdu->dev, "can't get cpg reset\n");
+ return PTR_ERR(rcrtc->rstc);
+ }
+
+ rcrtc->rzg2l_clocks.aclk = devm_clk_get(rcdu->dev, "aclk");
+ if (IS_ERR(rcrtc->rzg2l_clocks.aclk)) {
+ dev_err(rcdu->dev, "no axi clock for DU\n");
+ return PTR_ERR(rcrtc->rzg2l_clocks.aclk);
+ }
+
+ rcrtc->rzg2l_clocks.pclk = devm_clk_get(rcdu->dev, "pclk");
+ if (IS_ERR(rcrtc->rzg2l_clocks.pclk)) {
+ dev_err(rcdu->dev, "no peripheral clock for DU\n");
+ return PTR_ERR(rcrtc->rzg2l_clocks.pclk);
+ }
+
+ rcrtc->rzg2l_clocks.dclk = devm_clk_get(rcdu->dev, "vclk");
+ if (IS_ERR(rcrtc->rzg2l_clocks.dclk)) {
+ dev_err(rcdu->dev, "no video clock for DU\n");
+ return PTR_ERR(rcrtc->rzg2l_clocks.dclk);
+ }
+
+ init_waitqueue_head(&rcrtc->flip_wait);
+ rcrtc->dev = rcdu;
+
+ primary = rzg2l_du_vsp_get_drm_plane(rcrtc, rcrtc->vsp_pipe);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ ret = drmm_crtc_init_with_planes(&rcdu->ddev, crtc, primary, NULL,
+ &crtc_funcs_rz, NULL);
+ if (ret < 0)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &crtc_helper_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.h
new file mode 100644
index 000000000000..cbba38acc377
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RZ/G2L Display Unit CRTCs
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_crtc.h
+ */
+
+#ifndef __RZG2L_DU_CRTC_H__
+#define __RZG2L_DU_CRTC_H__
+
+#include <linux/container_of.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_writeback.h>
+
+#include <media/vsp1.h>
+
+struct clk;
+struct reset_control;
+struct rzg2l_du_vsp;
+struct rzg2l_du_format_info;
+
+/**
+ * struct rzg2l_du_crtc - the CRTC, representing a DU superposition processor
+ * @crtc: base DRM CRTC
+ * @dev: the DU device
+ * @initialized: whether the CRTC has been initialized and clocks enabled
+ * @vblank_enable: whether vblank events are enabled on this CRTC
+ * @event: event to post when the pending page flip completes
+ * @flip_wait: wait queue used to signal page flip completion
+ * @vsp: VSP feeding video to this CRTC
+ * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC
+ * @rstc: reset controller
+ * @rzg2l_clocks: the bus, main and video clock
+ */
+struct rzg2l_du_crtc {
+ struct drm_crtc crtc;
+
+ struct rzg2l_du_device *dev;
+ bool initialized;
+
+ bool vblank_enable;
+ struct drm_pending_vblank_event *event;
+ wait_queue_head_t flip_wait;
+
+ struct rzg2l_du_vsp *vsp;
+ unsigned int vsp_pipe;
+
+ const char *const *sources;
+ unsigned int sources_count;
+
+ struct reset_control *rstc;
+ struct {
+ struct clk *aclk;
+ struct clk *pclk;
+ struct clk *dclk;
+ } rzg2l_clocks;
+};
+
+static inline struct rzg2l_du_crtc *to_rzg2l_crtc(struct drm_crtc *c)
+{
+ return container_of(c, struct rzg2l_du_crtc, crtc);
+}
+
+/**
+ * struct rzg2l_du_crtc_state - Driver-specific CRTC state
+ * @state: base DRM CRTC state
+ * @outputs: bitmask of the outputs (enum rzg2l_du_output) driven by this CRTC
+ */
+struct rzg2l_du_crtc_state {
+ struct drm_crtc_state state;
+ unsigned int outputs;
+};
+
+static inline struct rzg2l_du_crtc_state *to_rzg2l_crtc_state(struct drm_crtc_state *s)
+{
+ return container_of(s, struct rzg2l_du_crtc_state, state);
+}
+
+int rzg2l_du_crtc_create(struct rzg2l_du_device *rcdu);
+
+void rzg2l_du_crtc_finish_page_flip(struct rzg2l_du_crtc *rcrtc);
+
+#endif /* __RZG2L_DU_CRTC_H__ */
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
new file mode 100644
index 000000000000..470d34da1d6c
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RZ/G2L Display Unit DRM driver
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_drv.c
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_probe_helper.h>
+
+#include "rzg2l_du_drv.h"
+#include "rzg2l_du_kms.h"
+
+/* -----------------------------------------------------------------------------
+ * Device Information
+ */
+
+static const struct rzg2l_du_device_info rzg2l_du_r9a07g044_info = {
+ .channels_mask = BIT(0),
+ .routes = {
+ [RZG2L_DU_OUTPUT_DSI0] = {
+ .possible_outputs = BIT(0),
+ .port = 0,
+ },
+ [RZG2L_DU_OUTPUT_DPAD0] = {
+ .possible_outputs = BIT(0),
+ .port = 1,
+ }
+ }
+};
+
+static const struct of_device_id rzg2l_du_of_table[] = {
+ { .compatible = "renesas,r9a07g044-du", .data = &rzg2l_du_r9a07g044_info },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, rzg2l_du_of_table);
+
+const char *rzg2l_du_output_name(enum rzg2l_du_output output)
+{
+ static const char * const names[] = {
+ [RZG2L_DU_OUTPUT_DSI0] = "DSI0",
+ [RZG2L_DU_OUTPUT_DPAD0] = "DPAD0"
+ };
+
+ if (output >= ARRAY_SIZE(names))
+ return "UNKNOWN";
+
+ return names[output];
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM operations
+ */
+
+DEFINE_DRM_GEM_DMA_FOPS(rzg2l_du_fops);
+
+static const struct drm_driver rzg2l_du_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .dumb_create = rzg2l_du_dumb_create,
+ .fops = &rzg2l_du_fops,
+ .name = "rzg2l-du",
+ .desc = "Renesas RZ/G2L Display Unit",
+ .date = "20230410",
+ .major = 1,
+ .minor = 0,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform driver
+ */
+
+static void rzg2l_du_remove(struct platform_device *pdev)
+{
+ struct rzg2l_du_device *rcdu = platform_get_drvdata(pdev);
+ struct drm_device *ddev = &rcdu->ddev;
+
+ drm_dev_unregister(ddev);
+ drm_atomic_helper_shutdown(ddev);
+
+ drm_kms_helper_poll_fini(ddev);
+}
+
+static void rzg2l_du_shutdown(struct platform_device *pdev)
+{
+ struct rzg2l_du_device *rcdu = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(&rcdu->ddev);
+}
+
+static int rzg2l_du_probe(struct platform_device *pdev)
+{
+ struct rzg2l_du_device *rcdu;
+ int ret;
+
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ /* Allocate and initialize the RZ/G2L device structure. */
+ rcdu = devm_drm_dev_alloc(&pdev->dev, &rzg2l_du_driver,
+ struct rzg2l_du_device, ddev);
+ if (IS_ERR(rcdu))
+ return PTR_ERR(rcdu);
+
+ rcdu->dev = &pdev->dev;
+ rcdu->info = of_device_get_match_data(rcdu->dev);
+
+ platform_set_drvdata(pdev, rcdu);
+
+ /* I/O resources */
+ rcdu->mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rcdu->mmio))
+ return PTR_ERR(rcdu->mmio);
+
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ /* DRM/KMS objects */
+ ret = rzg2l_du_modeset_init(rcdu);
+ if (ret < 0) {
+ /*
+ * Don't use dev_err_probe(), as it would overwrite the probe
+ * deferral reason recorded in rzg2l_du_modeset_init().
+ */
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to initialize DRM/KMS (%d)\n", ret);
+ goto error;
+ }
+
+ /*
+ * Register the DRM device with the core and the connectors with
+ * sysfs.
+ */
+ ret = drm_dev_register(&rcdu->ddev, 0);
+ if (ret)
+ goto error;
+
+ drm_info(&rcdu->ddev, "Device %s probed\n", dev_name(&pdev->dev));
+
+ drm_fbdev_generic_setup(&rcdu->ddev, 32);
+
+ return 0;
+
+error:
+ drm_kms_helper_poll_fini(&rcdu->ddev);
+ return ret;
+}
+
+static struct platform_driver rzg2l_du_platform_driver = {
+ .probe = rzg2l_du_probe,
+ .remove_new = rzg2l_du_remove,
+ .shutdown = rzg2l_du_shutdown,
+ .driver = {
+ .name = "rzg2l-du",
+ .of_match_table = rzg2l_du_of_table,
+ },
+};
+
+module_platform_driver(rzg2l_du_platform_driver);
+
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/G2L Display Unit DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.h
new file mode 100644
index 000000000000..58806c2a8f2b
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RZ/G2L Display Unit DRM driver
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_drv.h
+ */
+
+#ifndef __RZG2L_DU_DRV_H__
+#define __RZG2L_DU_DRV_H__
+
+#include <linux/kernel.h>
+
+#include <drm/drm_device.h>
+
+#include "rzg2l_du_crtc.h"
+#include "rzg2l_du_vsp.h"
+
+struct device;
+struct drm_property;
+
+enum rzg2l_du_output {
+ RZG2L_DU_OUTPUT_DSI0,
+ RZG2L_DU_OUTPUT_DPAD0,
+ RZG2L_DU_OUTPUT_MAX,
+};
+
+/*
+ * struct rzg2l_du_output_routing - Output routing specification
+ * @possible_outputs: bitmask of possible outputs
+ * @port: device tree port number corresponding to this output route
+ *
+ * The DU has 2 possible outputs (DPAD0, DSI0). Output routing data
+ * specify the valid SoC outputs, which CRTC can drive the output, and the type
+ * of in-SoC encoder for the output.
+ */
+struct rzg2l_du_output_routing {
+ unsigned int possible_outputs;
+ unsigned int port;
+};
+
+/*
+ * struct rzg2l_du_device_info - DU model-specific information
+ * @channels_mask: bit mask of available DU channels
+ * @routes: array of CRTC to output routes, indexed by output (RZG2L_DU_OUTPUT_*)
+ */
+struct rzg2l_du_device_info {
+ unsigned int channels_mask;
+ struct rzg2l_du_output_routing routes[RZG2L_DU_OUTPUT_MAX];
+};
+
+#define RZG2L_DU_MAX_CRTCS 1
+#define RZG2L_DU_MAX_VSPS 1
+#define RZG2L_DU_MAX_DSI 1
+
+struct rzg2l_du_device {
+ struct device *dev;
+ const struct rzg2l_du_device_info *info;
+
+ void __iomem *mmio;
+
+ struct drm_device ddev;
+
+ struct rzg2l_du_crtc crtcs[RZG2L_DU_MAX_CRTCS];
+ unsigned int num_crtcs;
+
+ struct rzg2l_du_vsp vsps[RZG2L_DU_MAX_VSPS];
+};
+
+static inline struct rzg2l_du_device *to_rzg2l_du_device(struct drm_device *dev)
+{
+ return container_of(dev, struct rzg2l_du_device, ddev);
+}
+
+const char *rzg2l_du_output_name(enum rzg2l_du_output output);
+
+#endif /* __RZG2L_DU_DRV_H__ */
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
new file mode 100644
index 000000000000..339cbaaea0b5
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RZ/G2L Display Unit Encoder
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_encoder.c
+ */
+
+#include <linux/export.h>
+#include <linux/of.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_panel.h>
+
+#include "rzg2l_du_drv.h"
+#include "rzg2l_du_encoder.h"
+
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
+static const struct drm_encoder_funcs rzg2l_du_encoder_funcs = {
+};
+
+int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
+ enum rzg2l_du_output output,
+ struct device_node *enc_node)
+{
+ struct rzg2l_du_encoder *renc;
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
+ int ret;
+
+ /* Locate the DRM bridge from the DT node. */
+ bridge = of_drm_find_bridge(enc_node);
+ if (!bridge)
+ return -EPROBE_DEFER;
+
+ dev_dbg(rcdu->dev, "initializing encoder %pOF for output %s\n",
+ enc_node, rzg2l_du_output_name(output));
+
+ renc = drmm_encoder_alloc(&rcdu->ddev, struct rzg2l_du_encoder, base,
+ &rzg2l_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
+ NULL);
+ if (IS_ERR(renc))
+ return PTR_ERR(renc);
+
+ renc->output = output;
+
+ /* Attach the bridge to the encoder. */
+ ret = drm_bridge_attach(&renc->base, bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(rcdu->dev,
+ "failed to attach bridge %pOF for output %s (%d)\n",
+ bridge->of_node, rzg2l_du_output_name(output), ret);
+ return ret;
+ }
+
+ /* Create the connector for the chain of bridges. */
+ connector = drm_bridge_connector_init(&rcdu->ddev, &renc->base);
+ if (IS_ERR(connector)) {
+ dev_err(rcdu->dev,
+ "failed to created connector for output %s (%ld)\n",
+ rzg2l_du_output_name(output), PTR_ERR(connector));
+ return PTR_ERR(connector);
+ }
+
+ return drm_connector_attach_encoder(connector, &renc->base);
+}
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.h
new file mode 100644
index 000000000000..3e430c1f6132
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RZ/G2L Display Unit Encoder
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_encoder.h
+ */
+
+#ifndef __RZG2L_DU_ENCODER_H__
+#define __RZG2L_DU_ENCODER_H__
+
+#include <drm/drm_encoder.h>
+#include <linux/container_of.h>
+
+struct rzg2l_du_device;
+
+struct rzg2l_du_encoder {
+ struct drm_encoder base;
+ enum rzg2l_du_output output;
+};
+
+static inline struct rzg2l_du_encoder *to_rzg2l_encoder(struct drm_encoder *e)
+{
+ return container_of(e, struct rzg2l_du_encoder, base);
+}
+
+int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
+ enum rzg2l_du_output output,
+ struct device_node *enc_node);
+
+#endif /* __RZG2L_DU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
new file mode 100644
index 000000000000..07b312b6f81e
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RZ/G2L Display Unit Mode Setting
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_kms.c
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "rzg2l_du_crtc.h"
+#include "rzg2l_du_drv.h"
+#include "rzg2l_du_encoder.h"
+#include "rzg2l_du_kms.h"
+#include "rzg2l_du_vsp.h"
+
+/* -----------------------------------------------------------------------------
+ * Format helpers
+ */
+
+static const struct rzg2l_du_format_info rzg2l_du_format_infos[] = {
+ {
+ .fourcc = DRM_FORMAT_XRGB8888,
+ .v4l2 = V4L2_PIX_FMT_XBGR32,
+ .bpp = 32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB8888,
+ .v4l2 = V4L2_PIX_FMT_ABGR32,
+ .bpp = 32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_RGB888,
+ .v4l2 = V4L2_PIX_FMT_BGR24,
+ .bpp = 24,
+ .planes = 1,
+ .hsub = 1,
+ }
+};
+
+const struct rzg2l_du_format_info *rzg2l_du_format_info(u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rzg2l_du_format_infos); ++i) {
+ if (rzg2l_du_format_infos[i].fourcc == fourcc)
+ return &rzg2l_du_format_infos[i];
+ }
+
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame buffer
+ */
+
+int rzg2l_du_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ unsigned int align = 16 * args->bpp / 8;
+
+ args->pitch = roundup(min_pitch, align);
+
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
+}
+
+static struct drm_framebuffer *
+rzg2l_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ const struct rzg2l_du_format_info *format;
+ unsigned int max_pitch;
+
+ format = rzg2l_du_format_info(mode_cmd->pixel_format);
+ if (!format) {
+ dev_dbg(dev->dev, "unsupported pixel format %p4cc\n",
+ &mode_cmd->pixel_format);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * On RZ/G2L the memory interface is handled by the VSP that limits the
+ * pitch to 65535 bytes.
+ */
+ max_pitch = 65535;
+ if (mode_cmd->pitches[0] > max_pitch) {
+ dev_dbg(dev->dev, "invalid pitch value %u\n",
+ mode_cmd->pitches[0]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+static const struct drm_mode_config_helper_funcs rzg2l_du_mode_config_helper = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
+static const struct drm_mode_config_funcs rzg2l_du_mode_config_funcs = {
+ .fb_create = rzg2l_du_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int rzg2l_du_encoders_init_one(struct rzg2l_du_device *rcdu,
+ enum rzg2l_du_output output,
+ struct of_endpoint *ep)
+{
+ struct device_node *entity;
+ int ret;
+
+ /* Locate the connected entity and initialize the encoder. */
+ entity = of_graph_get_remote_port_parent(ep->local_node);
+ if (!entity) {
+ dev_dbg(rcdu->dev, "unconnected endpoint %pOF, skipping\n",
+ ep->local_node);
+ return -ENODEV;
+ }
+
+ if (!of_device_is_available(entity)) {
+ dev_dbg(rcdu->dev,
+ "connected entity %pOF is disabled, skipping\n",
+ entity);
+ of_node_put(entity);
+ return -ENODEV;
+ }
+
+ ret = rzg2l_du_encoder_init(rcdu, output, entity);
+ if (ret && ret != -EPROBE_DEFER && ret != -ENOLINK)
+ dev_warn(rcdu->dev,
+ "failed to initialize encoder %pOF on output %s (%d), skipping\n",
+ entity, rzg2l_du_output_name(output), ret);
+
+ of_node_put(entity);
+
+ return ret;
+}
+
+static int rzg2l_du_encoders_init(struct rzg2l_du_device *rcdu)
+{
+ struct device_node *np = rcdu->dev->of_node;
+ struct device_node *ep_node;
+ unsigned int num_encoders = 0;
+
+ /*
+ * Iterate over the endpoints and create one encoder for each output
+ * pipeline.
+ */
+ for_each_endpoint_of_node(np, ep_node) {
+ enum rzg2l_du_output output;
+ struct of_endpoint ep;
+ unsigned int i;
+ int ret;
+
+ ret = of_graph_parse_endpoint(ep_node, &ep);
+ if (ret < 0) {
+ of_node_put(ep_node);
+ return ret;
+ }
+
+ /* Find the output route corresponding to the port number. */
+ for (i = 0; i < RZG2L_DU_OUTPUT_MAX; ++i) {
+ if (rcdu->info->routes[i].port == ep.port) {
+ output = i;
+ break;
+ }
+ }
+
+ if (i == RZG2L_DU_OUTPUT_MAX) {
+ dev_warn(rcdu->dev,
+ "port %u references unexisting output, skipping\n",
+ ep.port);
+ continue;
+ }
+
+ /* Process the output pipeline. */
+ ret = rzg2l_du_encoders_init_one(rcdu, output, &ep);
+ if (ret < 0) {
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(ep_node);
+ return ret;
+ }
+
+ continue;
+ }
+
+ num_encoders++;
+ }
+
+ return num_encoders;
+}
+
+static int rzg2l_du_vsps_init(struct rzg2l_du_device *rcdu)
+{
+ const struct device_node *np = rcdu->dev->of_node;
+ const char *vsps_prop_name = "renesas,vsps";
+ struct of_phandle_args args;
+ struct {
+ struct device_node *np;
+ unsigned int crtcs_mask;
+ } vsps[RZG2L_DU_MAX_VSPS] = { { NULL, }, };
+ unsigned int vsps_count = 0;
+ unsigned int cells;
+ unsigned int i;
+ int ret;
+
+ /*
+ * First parse the DT vsps property to populate the list of VSPs. Each
+ * entry contains a pointer to the VSP DT node and a bitmask of the
+ * connected DU CRTCs.
+ */
+ ret = of_property_count_u32_elems(np, vsps_prop_name);
+ cells = ret / rcdu->num_crtcs - 1;
+ if (cells != 1)
+ return -EINVAL;
+
+ for (i = 0; i < rcdu->num_crtcs; ++i) {
+ unsigned int j;
+
+ ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name,
+ cells, i, &args);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * Add the VSP to the list or update the corresponding existing
+ * entry if the VSP has already been added.
+ */
+ for (j = 0; j < vsps_count; ++j) {
+ if (vsps[j].np == args.np)
+ break;
+ }
+
+ if (j < vsps_count)
+ of_node_put(args.np);
+ else
+ vsps[vsps_count++].np = args.np;
+
+ vsps[j].crtcs_mask |= BIT(i);
+
+ /*
+ * Store the VSP pointer and pipe index in the CRTC. If the
+ * second cell of the 'renesas,vsps' specifier isn't present,
+ * default to 0.
+ */
+ rcdu->crtcs[i].vsp = &rcdu->vsps[j];
+ rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
+ }
+
+ /*
+ * Then initialize all the VSPs from the node pointers and CRTCs bitmask
+ * computed previously.
+ */
+ for (i = 0; i < vsps_count; ++i) {
+ struct rzg2l_du_vsp *vsp = &rcdu->vsps[i];
+
+ vsp->index = i;
+ vsp->dev = rcdu;
+
+ ret = rzg2l_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask);
+ if (ret)
+ goto done;
+ }
+
+done:
+ for (i = 0; i < ARRAY_SIZE(vsps); ++i)
+ of_node_put(vsps[i].np);
+
+ return ret;
+}
+
+int rzg2l_du_modeset_init(struct rzg2l_du_device *rcdu)
+{
+ struct drm_device *dev = &rcdu->ddev;
+ struct drm_encoder *encoder;
+ unsigned int num_encoders;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.normalize_zpos = true;
+ dev->mode_config.funcs = &rzg2l_du_mode_config_funcs;
+ dev->mode_config.helper_private = &rzg2l_du_mode_config_helper;
+
+ /*
+ * The RZ DU uses the VSP1 for memory access, and is limited
+ * to frame sizes of 1920x1080.
+ */
+ dev->mode_config.max_width = 1920;
+ dev->mode_config.max_height = 1080;
+
+ rcdu->num_crtcs = hweight8(rcdu->info->channels_mask);
+
+ /*
+ * Initialize vertical blanking interrupts handling. Start with vblank
+ * disabled for all CRTCs.
+ */
+ ret = drm_vblank_init(dev, rcdu->num_crtcs);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize the compositors. */
+ ret = rzg2l_du_vsps_init(rcdu);
+ if (ret < 0)
+ return ret;
+
+ /* Create the CRTCs. */
+ ret = rzg2l_du_crtc_create(rcdu);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize the encoders. */
+ ret = rzg2l_du_encoders_init(rcdu);
+ if (ret < 0)
+ return dev_err_probe(rcdu->dev, ret,
+ "failed to initialize encoders\n");
+
+ if (ret == 0) {
+ dev_err(rcdu->dev, "error: no encoder could be initialized\n");
+ return -EINVAL;
+ }
+
+ num_encoders = ret;
+
+ /*
+ * Set the possible CRTCs and possible clones. There's always at least
+ * one way for all encoders to clone each other, set all bits in the
+ * possible clones field.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct rzg2l_du_encoder *renc = to_rzg2l_encoder(encoder);
+ const struct rzg2l_du_output_routing *route =
+ &rcdu->info->routes[renc->output];
+
+ encoder->possible_crtcs = route->possible_outputs;
+ encoder->possible_clones = (1 << num_encoders) - 1;
+ }
+
+ drm_mode_config_reset(dev);
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
new file mode 100644
index 000000000000..876e97cfbf45
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RZ/G2L Display Unit Mode Setting
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_kms.h
+ */
+
+#ifndef __RZG2L_DU_KMS_H__
+#define __RZG2L_DU_KMS_H__
+
+#include <linux/types.h>
+
+struct dma_buf_attachment;
+struct drm_file;
+struct drm_device;
+struct drm_gem_object;
+struct drm_mode_create_dumb;
+struct rzg2l_du_device;
+struct sg_table;
+
+struct rzg2l_du_format_info {
+ u32 fourcc;
+ u32 v4l2;
+ unsigned int bpp;
+ unsigned int planes;
+ unsigned int hsub;
+};
+
+const struct rzg2l_du_format_info *rzg2l_du_format_info(u32 fourcc);
+
+int rzg2l_du_modeset_init(struct rzg2l_du_device *rcdu);
+
+int rzg2l_du_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+struct drm_gem_object *
+rzg2l_du_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+#endif /* __RZG2L_DU_KMS_H__ */
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
new file mode 100644
index 000000000000..0ae6331d6430
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RZ/G2L Display Unit VSP-Based Compositor
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_vsp.c
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_vblank.h>
+
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include <media/vsp1.h>
+
+#include "rzg2l_du_drv.h"
+#include "rzg2l_du_kms.h"
+#include "rzg2l_du_vsp.h"
+
+static void rzg2l_du_vsp_complete(void *private, unsigned int status, u32 crc)
+{
+ struct rzg2l_du_crtc *crtc = private;
+
+ if (crtc->vblank_enable)
+ drm_crtc_handle_vblank(&crtc->crtc);
+
+ if (status & VSP1_DU_STATUS_COMPLETE)
+ rzg2l_du_crtc_finish_page_flip(crtc);
+
+ drm_crtc_add_crc_entry(&crtc->crtc, false, 0, &crc);
+}
+
+void rzg2l_du_vsp_enable(struct rzg2l_du_crtc *crtc)
+{
+ const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
+ struct vsp1_du_lif_config cfg = {
+ .width = mode->hdisplay,
+ .height = mode->vdisplay,
+ .interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE,
+ .callback = rzg2l_du_vsp_complete,
+ .callback_data = crtc,
+ };
+
+ vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
+}
+
+void rzg2l_du_vsp_disable(struct rzg2l_du_crtc *crtc)
+{
+ vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, NULL);
+}
+
+void rzg2l_du_vsp_atomic_flush(struct rzg2l_du_crtc *crtc)
+{
+ struct vsp1_du_atomic_pipe_config cfg = { { 0, } };
+ struct rzg2l_du_crtc_state *state;
+
+ state = to_rzg2l_crtc_state(crtc->crtc.state);
+
+ vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
+}
+
+struct drm_plane *rzg2l_du_vsp_get_drm_plane(struct rzg2l_du_crtc *crtc,
+ unsigned int pipe_index)
+{
+ struct rzg2l_du_device *rcdu = crtc->vsp->dev;
+ struct drm_plane *plane = NULL;
+
+ drm_for_each_plane(plane, &rcdu->ddev) {
+ struct rzg2l_du_vsp_plane *vsp_plane = to_rzg2l_vsp_plane(plane);
+
+ if (vsp_plane->index == pipe_index)
+ break;
+ }
+
+ return plane ? plane : ERR_PTR(-EINVAL);
+}
+
+static const u32 rzg2l_du_vsp_formats[] = {
+ DRM_FORMAT_RGB332,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU444,
+};
+
+static void rzg2l_du_vsp_plane_setup(struct rzg2l_du_vsp_plane *plane)
+{
+ struct rzg2l_du_vsp_plane_state *state =
+ to_rzg2l_vsp_plane_state(plane->plane.state);
+ struct rzg2l_du_crtc *crtc = to_rzg2l_crtc(state->state.crtc);
+ struct drm_framebuffer *fb = plane->plane.state->fb;
+ const struct rzg2l_du_format_info *format;
+ struct vsp1_du_atomic_config cfg = {
+ .pixelformat = 0,
+ .pitch = fb->pitches[0],
+ .alpha = state->state.alpha >> 8,
+ .zpos = state->state.zpos,
+ };
+ u32 fourcc = state->format->fourcc;
+ unsigned int i;
+
+ cfg.src.left = state->state.src.x1 >> 16;
+ cfg.src.top = state->state.src.y1 >> 16;
+ cfg.src.width = drm_rect_width(&state->state.src) >> 16;
+ cfg.src.height = drm_rect_height(&state->state.src) >> 16;
+
+ cfg.dst.left = state->state.dst.x1;
+ cfg.dst.top = state->state.dst.y1;
+ cfg.dst.width = drm_rect_width(&state->state.dst);
+ cfg.dst.height = drm_rect_height(&state->state.dst);
+
+ for (i = 0; i < state->format->planes; ++i) {
+ struct drm_gem_dma_object *gem;
+
+ gem = drm_fb_dma_get_gem_obj(fb, i);
+ cfg.mem[i] = gem->dma_addr + fb->offsets[i];
+ }
+
+ if (state->state.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) {
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ fourcc = DRM_FORMAT_XRGB1555;
+ break;
+
+ case DRM_FORMAT_ARGB4444:
+ fourcc = DRM_FORMAT_XRGB4444;
+ break;
+
+ case DRM_FORMAT_ARGB8888:
+ fourcc = DRM_FORMAT_XRGB8888;
+ break;
+ }
+ }
+
+ format = rzg2l_du_format_info(fourcc);
+ cfg.pixelformat = format->v4l2;
+
+ cfg.premult = state->state.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI;
+
+ vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe,
+ plane->index, &cfg);
+}
+
+static int __rzg2l_du_vsp_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ const struct rzg2l_du_format_info **format)
+{
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ if (!state->crtc) {
+ /*
+ * The visible field is not reset by the DRM core but only
+ * updated by drm_atomic_helper_check_plane_state, set it
+ * manually.
+ */
+ state->visible = false;
+ *format = NULL;
+ return 0;
+ }
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+ if (ret < 0)
+ return ret;
+
+ if (!state->visible) {
+ *format = NULL;
+ return 0;
+ }
+
+ *format = rzg2l_du_format_info(state->fb->format->format);
+
+ return 0;
+}
+
+static int rzg2l_du_vsp_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct rzg2l_du_vsp_plane_state *rstate = to_rzg2l_vsp_plane_state(new_plane_state);
+
+ return __rzg2l_du_vsp_plane_atomic_check(plane, new_plane_state, &rstate->format);
+}
+
+static void rzg2l_du_vsp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct rzg2l_du_vsp_plane *rplane = to_rzg2l_vsp_plane(plane);
+ struct rzg2l_du_crtc *crtc = to_rzg2l_crtc(old_state->crtc);
+
+ if (new_state->visible)
+ rzg2l_du_vsp_plane_setup(rplane);
+ else if (old_state->crtc)
+ vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe,
+ rplane->index, NULL);
+}
+
+static const struct drm_plane_helper_funcs rzg2l_du_vsp_plane_helper_funcs = {
+ .atomic_check = rzg2l_du_vsp_plane_atomic_check,
+ .atomic_update = rzg2l_du_vsp_plane_atomic_update,
+};
+
+static struct drm_plane_state *
+rzg2l_du_vsp_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct rzg2l_du_vsp_plane_state *copy;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ copy = kzalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &copy->state);
+
+ return &copy->state;
+}
+
+static void rzg2l_du_vsp_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(to_rzg2l_vsp_plane_state(state));
+}
+
+static void rzg2l_du_vsp_plane_reset(struct drm_plane *plane)
+{
+ struct rzg2l_du_vsp_plane_state *state;
+
+ if (plane->state) {
+ rzg2l_du_vsp_plane_atomic_destroy_state(plane, plane->state);
+ plane->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+
+ __drm_atomic_helper_plane_reset(plane, &state->state);
+}
+
+static const struct drm_plane_funcs rzg2l_du_vsp_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = rzg2l_du_vsp_plane_reset,
+ .atomic_duplicate_state = rzg2l_du_vsp_plane_atomic_duplicate_state,
+ .atomic_destroy_state = rzg2l_du_vsp_plane_atomic_destroy_state,
+};
+
+static void rzg2l_du_vsp_cleanup(struct drm_device *dev, void *res)
+{
+ struct rzg2l_du_vsp *vsp = res;
+
+ put_device(vsp->vsp);
+}
+
+int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np,
+ unsigned int crtcs)
+{
+ struct rzg2l_du_device *rcdu = vsp->dev;
+ struct platform_device *pdev;
+ unsigned int num_crtcs = hweight32(crtcs);
+ unsigned int num_planes = 2;
+ unsigned int i;
+ int ret;
+
+ /* Find the VSP device and initialize it. */
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return -ENXIO;
+
+ vsp->vsp = &pdev->dev;
+
+ ret = drmm_add_action_or_reset(&rcdu->ddev, rzg2l_du_vsp_cleanup, vsp);
+ if (ret < 0)
+ return ret;
+
+ ret = vsp1_du_init(vsp->vsp);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num_planes; ++i) {
+ enum drm_plane_type type = i < num_crtcs
+ ? DRM_PLANE_TYPE_PRIMARY
+ : DRM_PLANE_TYPE_OVERLAY;
+ struct rzg2l_du_vsp_plane *plane;
+
+ plane = drmm_universal_plane_alloc(&rcdu->ddev, struct rzg2l_du_vsp_plane,
+ plane, crtcs, &rzg2l_du_vsp_plane_funcs,
+ rzg2l_du_vsp_formats,
+ ARRAY_SIZE(rzg2l_du_vsp_formats),
+ NULL, type, NULL);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
+
+ plane->vsp = vsp;
+ plane->index = i;
+
+ drm_plane_helper_add(&plane->plane,
+ &rzg2l_du_vsp_plane_helper_funcs);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.h
new file mode 100644
index 000000000000..322eb80dcbaf
--- /dev/null
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RZ/G2L Display Unit VSP-Based Compositor
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ *
+ * Based on rcar_du_vsp.h
+ */
+
+#ifndef __RZG2L_DU_VSP_H__
+#define __RZG2L_DU_VSP_H__
+
+#include <drm/drm_plane.h>
+#include <linux/container_of.h>
+#include <linux/scatterlist.h>
+
+struct device;
+struct drm_framebuffer;
+struct rzg2l_du_device;
+struct rzg2l_du_format_info;
+struct rzg2l_du_vsp;
+
+struct rzg2l_du_vsp_plane {
+ struct drm_plane plane;
+ struct rzg2l_du_vsp *vsp;
+ unsigned int index;
+};
+
+struct rzg2l_du_vsp {
+ unsigned int index;
+ struct device *vsp;
+ struct rzg2l_du_device *dev;
+};
+
+static inline struct rzg2l_du_vsp_plane *to_rzg2l_vsp_plane(struct drm_plane *p)
+{
+ return container_of(p, struct rzg2l_du_vsp_plane, plane);
+}
+
+/**
+ * struct rzg2l_du_vsp_plane_state - Driver-specific plane state
+ * @state: base DRM plane state
+ * @format: information about the pixel format used by the plane
+ */
+struct rzg2l_du_vsp_plane_state {
+ struct drm_plane_state state;
+
+ const struct rzg2l_du_format_info *format;
+};
+
+static inline struct rzg2l_du_vsp_plane_state *
+to_rzg2l_vsp_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct rzg2l_du_vsp_plane_state, state);
+}
+
+#if IS_ENABLED(CONFIG_VIDEO_RENESAS_VSP1)
+int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np,
+ unsigned int crtcs);
+void rzg2l_du_vsp_enable(struct rzg2l_du_crtc *crtc);
+void rzg2l_du_vsp_disable(struct rzg2l_du_crtc *crtc);
+void rzg2l_du_vsp_atomic_flush(struct rzg2l_du_crtc *crtc);
+struct drm_plane *rzg2l_du_vsp_get_drm_plane(struct rzg2l_du_crtc *crtc,
+ unsigned int pipe_index);
+#else
+static inline int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np,
+ unsigned int crtcs)
+{
+ return -ENXIO;
+}
+
+static inline void rzg2l_du_vsp_enable(struct rzg2l_du_crtc *crtc) { };
+static inline void rzg2l_du_vsp_disable(struct rzg2l_du_crtc *crtc) { };
+static inline void rzg2l_du_vsp_atomic_flush(struct rzg2l_du_crtc *crtc) { };
+static inline struct drm_plane *rzg2l_du_vsp_get_drm_plane(struct rzg2l_du_crtc *crtc,
+ unsigned int pipe_index)
+{
+ return ERR_PTR(-ENXIO);
+}
+#endif
+
+#endif /* __RZG2L_DU_VSP_H__ */
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index bd08d57486fe..7069a3d4d581 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -343,6 +343,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
return ret;
}
+ rockchip_drm_encoder_set_crtc_endpoint_id(&dp->encoder,
+ dev->of_node, 0, 0);
+
dp->plat_data.encoder = &dp->encoder.encoder;
ret = analogix_dp_bind(dp->adp, drm_dev);
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index e6fbe040ccf6..1d2261643743 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -10,12 +10,12 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hdmi.h>
-#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
@@ -26,12 +26,17 @@
#include "inno_hdmi.h"
-struct hdmi_data_info {
- int vic;
- bool sink_has_audio;
- unsigned int enc_in_format;
- unsigned int enc_out_format;
- unsigned int colorimetry;
+#define INNO_HDMI_MIN_TMDS_CLOCK 25000000U
+
+struct inno_hdmi_phy_config {
+ unsigned long pixelclock;
+ u8 pre_emphasis;
+ u8 voltage_level_control;
+};
+
+struct inno_hdmi_variant {
+ struct inno_hdmi_phy_config *phy_configs;
+ struct inno_hdmi_phy_config *default_phy_config;
};
struct inno_hdmi_i2c {
@@ -46,10 +51,9 @@ struct inno_hdmi_i2c {
struct inno_hdmi {
struct device *dev;
- struct drm_device *drm_dev;
- int irq;
struct clk *pclk;
+ struct clk *refclk;
void __iomem *regs;
struct drm_connector connector;
@@ -58,10 +62,14 @@ struct inno_hdmi {
struct inno_hdmi_i2c *i2c;
struct i2c_adapter *ddc;
- unsigned int tmds_rate;
+ const struct inno_hdmi_variant *variant;
+};
- struct hdmi_data_info hdmi_data;
- struct drm_display_mode previous_mode;
+struct inno_hdmi_connector_state {
+ struct drm_connector_state base;
+ unsigned int enc_out_format;
+ unsigned int colorimetry;
+ bool rgb_limited_range;
};
static struct inno_hdmi *encoder_to_inno_hdmi(struct drm_encoder *encoder)
@@ -76,10 +84,10 @@ static struct inno_hdmi *connector_to_inno_hdmi(struct drm_connector *connector)
return container_of(connector, struct inno_hdmi, connector);
}
+#define to_inno_hdmi_conn_state(conn_state) \
+ container_of_const(conn_state, struct inno_hdmi_connector_state, base)
+
enum {
- CSC_ITU601_16_235_TO_RGB_0_255_8BIT,
- CSC_ITU601_0_255_TO_RGB_0_255_8BIT,
- CSC_ITU709_16_235_TO_RGB_0_255_8BIT,
CSC_RGB_0_255_TO_ITU601_16_235_8BIT,
CSC_RGB_0_255_TO_ITU709_16_235_8BIT,
CSC_RGB_0_255_TO_RGB_16_235_8BIT,
@@ -87,40 +95,6 @@ enum {
static const char coeff_csc[][24] = {
/*
- * YUV2RGB:601 SD mode(Y[16:235], UV[16:240], RGB[0:255]):
- * R = 1.164*Y + 1.596*V - 204
- * G = 1.164*Y - 0.391*U - 0.813*V + 154
- * B = 1.164*Y + 2.018*U - 258
- */
- {
- 0x04, 0xa7, 0x00, 0x00, 0x06, 0x62, 0x02, 0xcc,
- 0x04, 0xa7, 0x11, 0x90, 0x13, 0x40, 0x00, 0x9a,
- 0x04, 0xa7, 0x08, 0x12, 0x00, 0x00, 0x03, 0x02
- },
- /*
- * YUV2RGB:601 SD mode(YUV[0:255],RGB[0:255]):
- * R = Y + 1.402*V - 248
- * G = Y - 0.344*U - 0.714*V + 135
- * B = Y + 1.772*U - 227
- */
- {
- 0x04, 0x00, 0x00, 0x00, 0x05, 0x9b, 0x02, 0xf8,
- 0x04, 0x00, 0x11, 0x60, 0x12, 0xdb, 0x00, 0x87,
- 0x04, 0x00, 0x07, 0x16, 0x00, 0x00, 0x02, 0xe3
- },
- /*
- * YUV2RGB:709 HD mode(Y[16:235],UV[16:240],RGB[0:255]):
- * R = 1.164*Y + 1.793*V - 248
- * G = 1.164*Y - 0.213*U - 0.534*V + 77
- * B = 1.164*Y + 2.115*U - 289
- */
- {
- 0x04, 0xa7, 0x00, 0x00, 0x07, 0x2c, 0x02, 0xf8,
- 0x04, 0xa7, 0x10, 0xda, 0x12, 0x22, 0x00, 0x4d,
- 0x04, 0xa7, 0x08, 0x74, 0x00, 0x00, 0x03, 0x21
- },
-
- /*
* RGB2YUV:601 SD mode:
* Cb = -0.291G - 0.148R + 0.439B + 128
* Y = 0.504G + 0.257R + 0.098B + 16
@@ -155,6 +129,36 @@ static const char coeff_csc[][24] = {
},
};
+static struct inno_hdmi_phy_config rk3036_hdmi_phy_configs[] = {
+ { 74250000, 0x3f, 0xbb },
+ { 165000000, 0x6f, 0xbb },
+ { ~0UL, 0x00, 0x00 }
+};
+
+static struct inno_hdmi_phy_config rk3128_hdmi_phy_configs[] = {
+ { 74250000, 0x3f, 0xaa },
+ { 165000000, 0x5f, 0xaa },
+ { ~0UL, 0x00, 0x00 }
+};
+
+static int inno_hdmi_find_phy_config(struct inno_hdmi *hdmi,
+ unsigned long pixelclk)
+{
+ const struct inno_hdmi_phy_config *phy_configs =
+ hdmi->variant->phy_configs;
+ int i;
+
+ for (i = 0; phy_configs[i].pixelclock != ~0UL; i++) {
+ if (pixelclk <= phy_configs[i].pixelclock)
+ return i;
+ }
+
+ DRM_DEV_DEBUG(hdmi->dev, "No phy configuration for pixelclock %lu\n",
+ pixelclk);
+
+ return -EINVAL;
+}
+
static inline u8 hdmi_readb(struct inno_hdmi *hdmi, u16 offset)
{
return readl_relaxed(hdmi->regs + (offset) * 0x04);
@@ -174,11 +178,11 @@ static inline void hdmi_modb(struct inno_hdmi *hdmi, u16 offset,
hdmi_writeb(hdmi, offset, temp);
}
-static void inno_hdmi_i2c_init(struct inno_hdmi *hdmi)
+static void inno_hdmi_i2c_init(struct inno_hdmi *hdmi, unsigned long long rate)
{
- int ddc_bus_freq;
+ unsigned long long ddc_bus_freq = rate >> 2;
- ddc_bus_freq = (hdmi->tmds_rate >> 2) / HDMI_SCL_RATE;
+ do_div(ddc_bus_freq, HDMI_SCL_RATE);
hdmi_writeb(hdmi, DDC_BUS_FREQ_L, ddc_bus_freq & 0xFF);
hdmi_writeb(hdmi, DDC_BUS_FREQ_H, (ddc_bus_freq >> 8) & 0xFF);
@@ -196,38 +200,44 @@ static void inno_hdmi_sys_power(struct inno_hdmi *hdmi, bool enable)
hdmi_modb(hdmi, HDMI_SYS_CTRL, m_POWER, v_PWR_OFF);
}
-static void inno_hdmi_set_pwr_mode(struct inno_hdmi *hdmi, int mode)
+static void inno_hdmi_standby(struct inno_hdmi *hdmi)
{
- switch (mode) {
- case NORMAL:
- inno_hdmi_sys_power(hdmi, false);
+ inno_hdmi_sys_power(hdmi, false);
- hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x6f);
- hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0xbb);
+ hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0x00);
+ hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x00);
+ hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x00);
+ hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
+};
- hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
- hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x14);
- hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x10);
- hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x0f);
- hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x00);
- hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x01);
+static void inno_hdmi_power_up(struct inno_hdmi *hdmi,
+ unsigned long mpixelclock)
+{
+ struct inno_hdmi_phy_config *phy_config;
+ int ret = inno_hdmi_find_phy_config(hdmi, mpixelclock);
- inno_hdmi_sys_power(hdmi, true);
- break;
+ if (ret < 0) {
+ phy_config = hdmi->variant->default_phy_config;
+ DRM_DEV_ERROR(hdmi->dev,
+ "Using default phy configuration for TMDS rate %lu",
+ mpixelclock);
+ } else {
+ phy_config = &hdmi->variant->phy_configs[ret];
+ }
- case LOWER_PWR:
- inno_hdmi_sys_power(hdmi, false);
- hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0x00);
- hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x00);
- hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x00);
- hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
+ inno_hdmi_sys_power(hdmi, false);
- break;
+ hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, phy_config->pre_emphasis);
+ hdmi_writeb(hdmi, HDMI_PHY_DRIVER, phy_config->voltage_level_control);
+ hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
+ hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x14);
+ hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x10);
+ hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x0f);
+ hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x00);
+ hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x01);
- default:
- DRM_DEV_ERROR(hdmi->dev, "Unknown power mode %d\n", mode);
- }
-}
+ inno_hdmi_sys_power(hdmi, true);
+};
static void inno_hdmi_reset(struct inno_hdmi *hdmi)
{
@@ -244,75 +254,96 @@ static void inno_hdmi_reset(struct inno_hdmi *hdmi)
val = v_REG_CLK_INV | v_REG_CLK_SOURCE_SYS | v_PWR_ON | v_INT_POL_HIGH;
hdmi_modb(hdmi, HDMI_SYS_CTRL, msk, val);
- inno_hdmi_set_pwr_mode(hdmi, NORMAL);
+ inno_hdmi_standby(hdmi);
}
-static int inno_hdmi_upload_frame(struct inno_hdmi *hdmi, int setup_rc,
- union hdmi_infoframe *frame, u32 frame_index,
- u32 mask, u32 disable, u32 enable)
+static void inno_hdmi_disable_frame(struct inno_hdmi *hdmi,
+ enum hdmi_infoframe_type type)
{
- if (mask)
- hdmi_modb(hdmi, HDMI_PACKET_SEND_AUTO, mask, disable);
+ struct drm_connector *connector = &hdmi->connector;
- hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_BUF_INDEX, frame_index);
-
- if (setup_rc >= 0) {
- u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
- ssize_t rc, i;
+ if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ drm_err(connector->dev,
+ "Unsupported infoframe type: %u\n", type);
+ return;
+ }
- rc = hdmi_infoframe_pack(frame, packed_frame,
- sizeof(packed_frame));
- if (rc < 0)
- return rc;
+ hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_BUF_INDEX, INFOFRAME_AVI);
+}
- for (i = 0; i < rc; i++)
- hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_ADDR + i,
- packed_frame[i]);
+static int inno_hdmi_upload_frame(struct inno_hdmi *hdmi,
+ union hdmi_infoframe *frame, enum hdmi_infoframe_type type)
+{
+ struct drm_connector *connector = &hdmi->connector;
+ u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
+ ssize_t rc, i;
- if (mask)
- hdmi_modb(hdmi, HDMI_PACKET_SEND_AUTO, mask, enable);
+ if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ drm_err(connector->dev,
+ "Unsupported infoframe type: %u\n", type);
+ return 0;
}
- return setup_rc;
-}
+ inno_hdmi_disable_frame(hdmi, type);
-static int inno_hdmi_config_video_vsi(struct inno_hdmi *hdmi,
- struct drm_display_mode *mode)
-{
- union hdmi_infoframe frame;
- int rc;
+ rc = hdmi_infoframe_pack(frame, packed_frame,
+ sizeof(packed_frame));
+ if (rc < 0)
+ return rc;
- rc = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
- &hdmi->connector,
- mode);
+ for (i = 0; i < rc; i++)
+ hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_ADDR + i,
+ packed_frame[i]);
- return inno_hdmi_upload_frame(hdmi, rc, &frame, INFOFRAME_VSI,
- m_PACKET_VSI_EN, v_PACKET_VSI_EN(0), v_PACKET_VSI_EN(1));
+ return 0;
}
static int inno_hdmi_config_video_avi(struct inno_hdmi *hdmi,
struct drm_display_mode *mode)
{
+ struct drm_connector *connector = &hdmi->connector;
+ struct drm_connector_state *conn_state = connector->state;
+ struct inno_hdmi_connector_state *inno_conn_state =
+ to_inno_hdmi_conn_state(conn_state);
union hdmi_infoframe frame;
int rc;
rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
&hdmi->connector,
mode);
+ if (rc) {
+ inno_hdmi_disable_frame(hdmi, HDMI_INFOFRAME_TYPE_AVI);
+ return rc;
+ }
- if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
+ if (inno_conn_state->enc_out_format == HDMI_COLORSPACE_YUV444)
frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
- else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422)
+ else if (inno_conn_state->enc_out_format == HDMI_COLORSPACE_YUV422)
frame.avi.colorspace = HDMI_COLORSPACE_YUV422;
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
- return inno_hdmi_upload_frame(hdmi, rc, &frame, INFOFRAME_AVI, 0, 0, 0);
+ if (inno_conn_state->enc_out_format == HDMI_COLORSPACE_RGB) {
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ connector, mode,
+ inno_conn_state->rgb_limited_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame.avi.ycc_quantization_range =
+ HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
+
+ return inno_hdmi_upload_frame(hdmi, &frame, HDMI_INFOFRAME_TYPE_AVI);
}
static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi)
{
- struct hdmi_data_info *data = &hdmi->hdmi_data;
+ struct drm_connector *connector = &hdmi->connector;
+ struct drm_connector_state *conn_state = connector->state;
+ struct inno_hdmi_connector_state *inno_conn_state =
+ to_inno_hdmi_conn_state(conn_state);
int c0_c2_change = 0;
int csc_enable = 0;
int csc_mode = 0;
@@ -330,9 +361,14 @@ static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi)
v_VIDEO_INPUT_CSP(0);
hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL2, value);
- if (data->enc_in_format == data->enc_out_format) {
- if ((data->enc_in_format == HDMI_COLORSPACE_RGB) ||
- (data->enc_in_format >= HDMI_COLORSPACE_YUV444)) {
+ if (inno_conn_state->enc_out_format == HDMI_COLORSPACE_RGB) {
+ if (inno_conn_state->rgb_limited_range) {
+ csc_mode = CSC_RGB_0_255_TO_RGB_16_235_8BIT;
+ auto_csc = AUTO_CSC_DISABLE;
+ c0_c2_change = C0_C2_CHANGE_DISABLE;
+ csc_enable = v_CSC_ENABLE;
+
+ } else {
value = v_SOF_DISABLE | v_COLOR_DEPTH_NOT_INDICATED(1);
hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL3, value);
@@ -342,35 +378,21 @@ static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi)
v_VIDEO_C0_C2_SWAP(C0_C2_CHANGE_DISABLE));
return 0;
}
- }
-
- if (data->colorimetry == HDMI_COLORIMETRY_ITU_601) {
- if ((data->enc_in_format == HDMI_COLORSPACE_RGB) &&
- (data->enc_out_format == HDMI_COLORSPACE_YUV444)) {
- csc_mode = CSC_RGB_0_255_TO_ITU601_16_235_8BIT;
- auto_csc = AUTO_CSC_DISABLE;
- c0_c2_change = C0_C2_CHANGE_DISABLE;
- csc_enable = v_CSC_ENABLE;
- } else if ((data->enc_in_format == HDMI_COLORSPACE_YUV444) &&
- (data->enc_out_format == HDMI_COLORSPACE_RGB)) {
- csc_mode = CSC_ITU601_16_235_TO_RGB_0_255_8BIT;
- auto_csc = AUTO_CSC_ENABLE;
- c0_c2_change = C0_C2_CHANGE_DISABLE;
- csc_enable = v_CSC_DISABLE;
- }
} else {
- if ((data->enc_in_format == HDMI_COLORSPACE_RGB) &&
- (data->enc_out_format == HDMI_COLORSPACE_YUV444)) {
- csc_mode = CSC_RGB_0_255_TO_ITU709_16_235_8BIT;
- auto_csc = AUTO_CSC_DISABLE;
- c0_c2_change = C0_C2_CHANGE_DISABLE;
- csc_enable = v_CSC_ENABLE;
- } else if ((data->enc_in_format == HDMI_COLORSPACE_YUV444) &&
- (data->enc_out_format == HDMI_COLORSPACE_RGB)) {
- csc_mode = CSC_ITU709_16_235_TO_RGB_0_255_8BIT;
- auto_csc = AUTO_CSC_ENABLE;
- c0_c2_change = C0_C2_CHANGE_DISABLE;
- csc_enable = v_CSC_DISABLE;
+ if (inno_conn_state->colorimetry == HDMI_COLORIMETRY_ITU_601) {
+ if (inno_conn_state->enc_out_format == HDMI_COLORSPACE_YUV444) {
+ csc_mode = CSC_RGB_0_255_TO_ITU601_16_235_8BIT;
+ auto_csc = AUTO_CSC_DISABLE;
+ c0_c2_change = C0_C2_CHANGE_DISABLE;
+ csc_enable = v_CSC_ENABLE;
+ }
+ } else {
+ if (inno_conn_state->enc_out_format == HDMI_COLORSPACE_YUV444) {
+ csc_mode = CSC_RGB_0_255_TO_ITU709_16_235_8BIT;
+ auto_csc = AUTO_CSC_DISABLE;
+ c0_c2_change = C0_C2_CHANGE_DISABLE;
+ csc_enable = v_CSC_ENABLE;
+ }
}
}
@@ -411,7 +433,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_H, (value >> 8) & 0xFF);
- value = mode->hsync_start - mode->hdisplay;
+ value = mode->htotal - mode->hsync_start;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_H, (value >> 8) & 0xFF);
@@ -426,7 +448,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
value = mode->vtotal - mode->vdisplay;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VBLANK, value & 0xFF);
- value = mode->vsync_start - mode->vdisplay;
+ value = mode->vtotal - mode->vsync_start;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDELAY, value & 0xFF);
value = mode->vsync_end - mode->vsync_start;
@@ -443,19 +465,7 @@ static int inno_hdmi_setup(struct inno_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct drm_display_info *display = &hdmi->connector.display_info;
-
- hdmi->hdmi_data.vic = drm_match_cea_mode(mode);
-
- hdmi->hdmi_data.enc_in_format = HDMI_COLORSPACE_RGB;
- hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB;
-
- if ((hdmi->hdmi_data.vic == 6) || (hdmi->hdmi_data.vic == 7) ||
- (hdmi->hdmi_data.vic == 21) || (hdmi->hdmi_data.vic == 22) ||
- (hdmi->hdmi_data.vic == 2) || (hdmi->hdmi_data.vic == 3) ||
- (hdmi->hdmi_data.vic == 17) || (hdmi->hdmi_data.vic == 18))
- hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
- else
- hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
+ unsigned long mpixelclock = mode->clock * 1000;
/* Mute video and audio output */
hdmi_modb(hdmi, HDMI_AV_MUTE, m_AUDIO_MUTE | m_VIDEO_BLACK,
@@ -469,10 +479,8 @@ static int inno_hdmi_setup(struct inno_hdmi *hdmi,
inno_hdmi_config_video_csc(hdmi);
- if (display->is_hdmi) {
+ if (display->is_hdmi)
inno_hdmi_config_video_avi(hdmi, mode);
- inno_hdmi_config_video_vsi(hdmi, mode);
- }
/*
* When IP controller have configured to an accurate video
@@ -480,47 +488,73 @@ static int inno_hdmi_setup(struct inno_hdmi *hdmi,
* DCLK_LCDC, so we need to init the TMDS rate to mode pixel
* clock rate, and reconfigure the DDC clock.
*/
- hdmi->tmds_rate = mode->clock * 1000;
- inno_hdmi_i2c_init(hdmi);
+ inno_hdmi_i2c_init(hdmi, mpixelclock);
/* Unmute video and audio output */
hdmi_modb(hdmi, HDMI_AV_MUTE, m_AUDIO_MUTE | m_VIDEO_BLACK,
v_AUDIO_MUTE(0) | v_VIDEO_MUTE(0));
+ inno_hdmi_power_up(hdmi, mpixelclock);
+
return 0;
}
-static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
+static enum drm_mode_status inno_hdmi_display_mode_valid(struct inno_hdmi *hdmi,
+ struct drm_display_mode *mode)
{
- struct inno_hdmi *hdmi = encoder_to_inno_hdmi(encoder);
+ unsigned long mpixelclk, max_tolerance;
+ long rounded_refclk;
- inno_hdmi_setup(hdmi, adj_mode);
+ /* No support for double-clock modes */
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_BAD;
- /* Store the display mode for plugin/DPMS poweron events */
- drm_mode_copy(&hdmi->previous_mode, adj_mode);
-}
+ mpixelclk = mode->clock * 1000;
-static void inno_hdmi_encoder_enable(struct drm_encoder *encoder)
-{
- struct inno_hdmi *hdmi = encoder_to_inno_hdmi(encoder);
+ if (mpixelclk < INNO_HDMI_MIN_TMDS_CLOCK)
+ return MODE_CLOCK_LOW;
+
+ if (inno_hdmi_find_phy_config(hdmi, mpixelclk) < 0)
+ return MODE_CLOCK_HIGH;
- inno_hdmi_set_pwr_mode(hdmi, NORMAL);
+ if (hdmi->refclk) {
+ rounded_refclk = clk_round_rate(hdmi->refclk, mpixelclk);
+ if (rounded_refclk < 0)
+ return MODE_BAD;
+
+ /* Vesa DMT standard mentions +/- 0.5% max tolerance */
+ max_tolerance = mpixelclk / 200;
+ if (abs_diff((unsigned long)rounded_refclk, mpixelclk) > max_tolerance)
+ return MODE_NOCLOCK;
+ }
+
+ return MODE_OK;
}
-static void inno_hdmi_encoder_disable(struct drm_encoder *encoder)
+static void inno_hdmi_encoder_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct inno_hdmi *hdmi = encoder_to_inno_hdmi(encoder);
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
- inno_hdmi_set_pwr_mode(hdmi, LOWER_PWR);
+ conn_state = drm_atomic_get_new_connector_state(state, &hdmi->connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ inno_hdmi_setup(hdmi, &crtc_state->adjusted_mode);
}
-static bool inno_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
+static void inno_hdmi_encoder_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
- return true;
+ struct inno_hdmi *hdmi = encoder_to_inno_hdmi(encoder);
+
+ inno_hdmi_standby(hdmi);
}
static int
@@ -529,19 +563,35 @@ inno_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct inno_hdmi *hdmi = encoder_to_inno_hdmi(encoder);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ u8 vic = drm_match_cea_mode(mode);
+ struct inno_hdmi_connector_state *inno_conn_state =
+ to_inno_hdmi_conn_state(conn_state);
s->output_mode = ROCKCHIP_OUT_MODE_P888;
s->output_type = DRM_MODE_CONNECTOR_HDMIA;
- return 0;
+ if (vic == 6 || vic == 7 ||
+ vic == 21 || vic == 22 ||
+ vic == 2 || vic == 3 ||
+ vic == 17 || vic == 18)
+ inno_conn_state->colorimetry = HDMI_COLORIMETRY_ITU_601;
+ else
+ inno_conn_state->colorimetry = HDMI_COLORIMETRY_ITU_709;
+
+ inno_conn_state->enc_out_format = HDMI_COLORSPACE_RGB;
+ inno_conn_state->rgb_limited_range =
+ drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_LIMITED;
+
+ return inno_hdmi_display_mode_valid(hdmi,
+ &crtc_state->adjusted_mode) == MODE_OK ? 0 : -EINVAL;
}
static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
- .enable = inno_hdmi_encoder_enable,
- .disable = inno_hdmi_encoder_disable,
- .mode_fixup = inno_hdmi_encoder_mode_fixup,
- .mode_set = inno_hdmi_encoder_mode_set,
- .atomic_check = inno_hdmi_encoder_atomic_check,
+ .atomic_check = inno_hdmi_encoder_atomic_check,
+ .atomic_enable = inno_hdmi_encoder_enable,
+ .atomic_disable = inno_hdmi_encoder_disable,
};
static enum drm_connector_status
@@ -564,7 +614,6 @@ static int inno_hdmi_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, hdmi->ddc);
if (edid) {
- hdmi->hdmi_data.sink_has_audio = drm_detect_monitor_audio(edid);
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -577,14 +626,9 @@ static enum drm_mode_status
inno_hdmi_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- return MODE_OK;
-}
+ struct inno_hdmi *hdmi = connector_to_inno_hdmi(connector);
-static int
-inno_hdmi_probe_single_connector_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
-{
- return drm_helper_probe_single_connector_modes(connector, 1920, 1080);
+ return inno_hdmi_display_mode_valid(hdmi, mode);
}
static void inno_hdmi_connector_destroy(struct drm_connector *connector)
@@ -593,13 +637,64 @@ static void inno_hdmi_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
+static void
+inno_hdmi_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct inno_hdmi_connector_state *inno_conn_state =
+ to_inno_hdmi_conn_state(state);
+
+ __drm_atomic_helper_connector_destroy_state(&inno_conn_state->base);
+ kfree(inno_conn_state);
+}
+
+static void inno_hdmi_connector_reset(struct drm_connector *connector)
+{
+ struct inno_hdmi_connector_state *inno_conn_state;
+
+ if (connector->state) {
+ inno_hdmi_connector_destroy_state(connector, connector->state);
+ connector->state = NULL;
+ }
+
+ inno_conn_state = kzalloc(sizeof(*inno_conn_state), GFP_KERNEL);
+ if (!inno_conn_state)
+ return;
+
+ __drm_atomic_helper_connector_reset(connector, &inno_conn_state->base);
+
+ inno_conn_state->colorimetry = HDMI_COLORIMETRY_ITU_709;
+ inno_conn_state->enc_out_format = HDMI_COLORSPACE_RGB;
+ inno_conn_state->rgb_limited_range = false;
+}
+
+static struct drm_connector_state *
+inno_hdmi_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct inno_hdmi_connector_state *inno_conn_state;
+
+ if (WARN_ON(!connector->state))
+ return NULL;
+
+ inno_conn_state = kmemdup(to_inno_hdmi_conn_state(connector->state),
+ sizeof(*inno_conn_state), GFP_KERNEL);
+
+ if (!inno_conn_state)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector,
+ &inno_conn_state->base);
+
+ return &inno_conn_state->base;
+}
+
static const struct drm_connector_funcs inno_hdmi_connector_funcs = {
- .fill_modes = inno_hdmi_probe_single_connector_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes,
.detect = inno_hdmi_connector_detect,
.destroy = inno_hdmi_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = inno_hdmi_connector_reset,
+ .atomic_duplicate_state = inno_hdmi_connector_duplicate_state,
+ .atomic_destroy_state = inno_hdmi_connector_destroy_state,
};
static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = {
@@ -819,6 +914,7 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct inno_hdmi *hdmi;
+ const struct inno_hdmi_variant *variant;
int irq;
int ret;
@@ -827,7 +923,12 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
return -ENOMEM;
hdmi->dev = dev;
- hdmi->drm_dev = drm;
+
+ variant = of_device_get_match_data(hdmi->dev);
+ if (!variant)
+ return -EINVAL;
+
+ hdmi->variant = variant;
hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
@@ -846,6 +947,20 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
return ret;
}
+ hdmi->refclk = devm_clk_get_optional(hdmi->dev, "ref");
+ if (IS_ERR(hdmi->refclk)) {
+ DRM_DEV_ERROR(hdmi->dev, "Unable to get HDMI reference clock\n");
+ ret = PTR_ERR(hdmi->refclk);
+ goto err_disable_pclk;
+ }
+
+ ret = clk_prepare_enable(hdmi->refclk);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev,
+ "Cannot enable HDMI reference clock: %d\n", ret);
+ goto err_disable_pclk;
+ }
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
@@ -862,13 +977,16 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
}
/*
- * When IP controller haven't configured to an accurate video
- * timing, then the TMDS clock source would be switched to
- * PCLK_HDMI, so we need to init the TMDS rate to PCLK rate,
- * and reconfigure the DDC clock.
+ * When the controller isn't configured to an accurate
+ * video timing and there is no reference clock available,
+ * then the TMDS clock source would be switched to PCLK_HDMI,
+ * so we need to init the TMDS rate to PCLK rate, and
+ * reconfigure the DDC clock.
*/
- hdmi->tmds_rate = clk_get_rate(hdmi->pclk);
- inno_hdmi_i2c_init(hdmi);
+ if (hdmi->refclk)
+ inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->refclk));
+ else
+ inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->pclk));
ret = inno_hdmi_register(drm, hdmi);
if (ret)
@@ -892,6 +1010,8 @@ err_cleanup_hdmi:
err_put_adapter:
i2c_put_adapter(hdmi->ddc);
err_disable_clk:
+ clk_disable_unprepare(hdmi->refclk);
+err_disable_pclk:
clk_disable_unprepare(hdmi->pclk);
return ret;
}
@@ -905,6 +1025,7 @@ static void inno_hdmi_unbind(struct device *dev, struct device *master,
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
i2c_put_adapter(hdmi->ddc);
+ clk_disable_unprepare(hdmi->refclk);
clk_disable_unprepare(hdmi->pclk);
}
@@ -923,8 +1044,22 @@ static void inno_hdmi_remove(struct platform_device *pdev)
component_del(&pdev->dev, &inno_hdmi_ops);
}
+static const struct inno_hdmi_variant rk3036_inno_hdmi_variant = {
+ .phy_configs = rk3036_hdmi_phy_configs,
+ .default_phy_config = &rk3036_hdmi_phy_configs[1],
+};
+
+static const struct inno_hdmi_variant rk3128_inno_hdmi_variant = {
+ .phy_configs = rk3128_hdmi_phy_configs,
+ .default_phy_config = &rk3128_hdmi_phy_configs[1],
+};
+
static const struct of_device_id inno_hdmi_dt_ids[] = {
{ .compatible = "rockchip,rk3036-inno-hdmi",
+ .data = &rk3036_inno_hdmi_variant,
+ },
+ { .compatible = "rockchip,rk3128-inno-hdmi",
+ .data = &rk3128_inno_hdmi_variant,
},
{},
};
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.h b/drivers/gpu/drm/rockchip/inno_hdmi.h
index 93245b55f967..a7edf3559e60 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.h
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.h
@@ -10,11 +10,6 @@
#define DDC_SEGMENT_ADDR 0x30
-enum PWR_MODE {
- NORMAL,
- LOWER_PWR,
-};
-
#define HDMI_SCL_RATE (100*1000)
#define DDC_BUS_FREQ_L 0x4b
#define DDC_BUS_FREQ_H 0x4c
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 59341654ec32..77b76cff1adb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -576,8 +576,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
ret = -EINVAL;
goto err_put_port;
} else if (ret) {
- DRM_DEV_ERROR(dev, "failed to find panel and bridge node\n");
- ret = -EPROBE_DEFER;
+ dev_err_probe(dev, ret, "failed to find panel and bridge node\n");
goto err_put_port;
}
if (lvds->panel)
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index c51ca82320cb..b9ee02061d5b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -227,11 +227,22 @@ static const struct vop_win_data rk3126_vop_win_data[] = {
.type = DRM_PLANE_TYPE_CURSOR },
};
+static const struct vop_output rk3126_output = {
+ .pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
+ .hdmi_pin_pol = VOP_REG(RK3126_INT_SCALER, 0x7, 4),
+ .hdmi_en = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 22),
+ .hdmi_dclk_pol = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 23),
+ .rgb_en = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 24),
+ .rgb_dclk_pol = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 25),
+ .mipi_en = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 28),
+ .mipi_dclk_pol = VOP_REG(RK3036_AXI_BUS_CTRL, 0x1, 29),
+};
+
static const struct vop_data rk3126_vop = {
.intr = &rk3036_intr,
.common = &rk3036_common,
.modeset = &rk3036_modeset,
- .output = &rk3036_output,
+ .output = &rk3126_output,
.win = rk3126_vop_win_data,
.win_size = ARRAY_SIZE(rk3126_vop_win_data),
.max_output = { 1920, 1080 },
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index 406e981c75bd..fbf1bcc68625 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -872,6 +872,9 @@
/* rk3036 register definition end */
/* rk3126 register definition */
+#define RK3126_INT_SCALER 0x0c
+
+/* win1 register */
#define RK3126_WIN1_MST 0x4c
#define RK3126_WIN1_DSP_INFO 0x50
#define RK3126_WIN1_DSP_ST 0x54
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 06cedfe4b486..0f35f009b9d3 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -33,9 +33,7 @@ static struct kmem_cache *sched_fence_slab;
static int __init drm_sched_fence_slab_init(void)
{
- sched_fence_slab = kmem_cache_create(
- "drm_sched_fence", sizeof(struct drm_sched_fence), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ sched_fence_slab = KMEM_CACHE(drm_sched_fence, SLAB_HWCACHE_ALIGN);
if (!sched_fence_slab)
return -ENOMEM;
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index d442b893275b..7e90c9f95611 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1251,7 +1251,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
long timeout, struct workqueue_struct *timeout_wq,
atomic_t *score, const char *name, struct device *dev)
{
- int i, ret;
+ int i;
sched->ops = ops;
sched->credit_limit = credit_limit;
@@ -1287,11 +1287,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->own_submit_wq = true;
}
- ret = -ENOMEM;
+
sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
GFP_KERNEL | __GFP_ZERO);
if (!sched->sched_rq)
- goto Out_free;
+ goto Out_check_own;
sched->num_rqs = num_rqs;
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
@@ -1316,13 +1316,14 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
Out_unroll:
for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
kfree(sched->sched_rq[i]);
-Out_free:
+
kfree(sched->sched_rq);
sched->sched_rq = NULL;
+Out_check_own:
if (sched->own_submit_wq)
destroy_workqueue(sched->submit_wq);
drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
- return ret;
+ return -ENOMEM;
}
EXPORT_SYMBOL(drm_sched_init);
diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c
index 84e035a7ab3f..84bfde31d172 100644
--- a/drivers/gpu/drm/solomon/ssd130x-spi.c
+++ b/drivers/gpu/drm/solomon/ssd130x-spi.c
@@ -142,6 +142,11 @@ static const struct of_device_id ssd130x_of_match[] = {
.compatible = "solomon,ssd1327",
.data = &ssd130x_variants[SSD1327_ID],
},
+ /* ssd133x family */
+ {
+ .compatible = "solomon,ssd1331",
+ .data = &ssd130x_variants[SSD1331_ID],
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ssd130x_of_match);
@@ -166,6 +171,8 @@ static const struct spi_device_id ssd130x_spi_table[] = {
{ "ssd1322", SSD1322_ID },
{ "ssd1325", SSD1325_ID },
{ "ssd1327", SSD1327_ID },
+ /* ssd133x family */
+ { "ssd1331", SSD1331_ID },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, ssd130x_spi_table);
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 3d0e093a7e6e..ebd943b9e357 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -119,6 +119,26 @@
#define SSD130X_SET_VCOMH_VOLTAGE 0xbe
#define SSD132X_SET_FUNCTION_SELECT_B 0xd5
+/* ssd133x commands */
+#define SSD133X_SET_COL_RANGE 0x15
+#define SSD133X_SET_ROW_RANGE 0x75
+#define SSD133X_CONTRAST_A 0x81
+#define SSD133X_CONTRAST_B 0x82
+#define SSD133X_CONTRAST_C 0x83
+#define SSD133X_SET_MASTER_CURRENT 0x87
+#define SSD132X_SET_PRECHARGE_A 0x8a
+#define SSD132X_SET_PRECHARGE_B 0x8b
+#define SSD132X_SET_PRECHARGE_C 0x8c
+#define SSD133X_SET_DISPLAY_START 0xa1
+#define SSD133X_SET_DISPLAY_OFFSET 0xa2
+#define SSD133X_SET_DISPLAY_NORMAL 0xa4
+#define SSD133X_SET_MASTER_CONFIG 0xad
+#define SSD133X_POWER_SAVE_MODE 0xb0
+#define SSD133X_PHASES_PERIOD 0xb1
+#define SSD133X_SET_CLOCK_FREQ 0xb3
+#define SSD133X_SET_PRECHARGE_VOLTAGE 0xbb
+#define SSD133X_SET_VCOMH_VOLTAGE 0xbe
+
#define MAX_CONTRAST 255
const struct ssd130x_deviceinfo ssd130x_variants[] = {
@@ -180,6 +200,12 @@ const struct ssd130x_deviceinfo ssd130x_variants[] = {
.default_width = 128,
.default_height = 128,
.family_id = SSD132X_FAMILY,
+ },
+ /* ssd133x family */
+ [SSD1331_ID] = {
+ .default_width = 96,
+ .default_height = 64,
+ .family_id = SSD133X_FAMILY,
}
};
EXPORT_SYMBOL_NS_GPL(ssd130x_variants, DRM_SSD130X);
@@ -589,6 +615,117 @@ static int ssd132x_init(struct ssd130x_device *ssd130x)
return 0;
}
+static int ssd133x_init(struct ssd130x_device *ssd130x)
+{
+ int ret;
+
+ /* Set color A contrast */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_CONTRAST_A, 0x91);
+ if (ret < 0)
+ return ret;
+
+ /* Set color B contrast */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_CONTRAST_B, 0x50);
+ if (ret < 0)
+ return ret;
+
+ /* Set color C contrast */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_CONTRAST_C, 0x7d);
+ if (ret < 0)
+ return ret;
+
+ /* Set master current */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_MASTER_CURRENT, 0x06);
+ if (ret < 0)
+ return ret;
+
+ /* Set column start and end */
+ ret = ssd130x_write_cmd(ssd130x, 3, SSD133X_SET_COL_RANGE, 0x00, ssd130x->width - 1);
+ if (ret < 0)
+ return ret;
+
+ /* Set row start and end */
+ ret = ssd130x_write_cmd(ssd130x, 3, SSD133X_SET_ROW_RANGE, 0x00, ssd130x->height - 1);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Horizontal Address Increment
+ * Normal order SA,SB,SC (e.g. RGB)
+ * COM Split Odd Even
+ * 256 color format
+ */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD13XX_SET_SEG_REMAP, 0x20);
+ if (ret < 0)
+ return ret;
+
+ /* Set display start and offset */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_DISPLAY_START, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_DISPLAY_OFFSET, 0x00);
+ if (ret < 0)
+ return ret;
+
+ /* Set display mode normal */
+ ret = ssd130x_write_cmd(ssd130x, 1, SSD133X_SET_DISPLAY_NORMAL);
+ if (ret < 0)
+ return ret;
+
+ /* Set multiplex ratio value */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD13XX_SET_MULTIPLEX_RATIO, ssd130x->height - 1);
+ if (ret < 0)
+ return ret;
+
+ /* Set master configuration */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_MASTER_CONFIG, 0x8e);
+ if (ret < 0)
+ return ret;
+
+ /* Set power mode */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_POWER_SAVE_MODE, 0x0b);
+ if (ret < 0)
+ return ret;
+
+ /* Set Phase 1 and 2 period */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_PHASES_PERIOD, 0x31);
+ if (ret < 0)
+ return ret;
+
+ /* Set clock divider */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_CLOCK_FREQ, 0xf0);
+ if (ret < 0)
+ return ret;
+
+ /* Set pre-charge A */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD132X_SET_PRECHARGE_A, 0x64);
+ if (ret < 0)
+ return ret;
+
+ /* Set pre-charge B */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD132X_SET_PRECHARGE_B, 0x78);
+ if (ret < 0)
+ return ret;
+
+ /* Set pre-charge C */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD132X_SET_PRECHARGE_C, 0x64);
+ if (ret < 0)
+ return ret;
+
+ /* Set pre-charge level */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_PRECHARGE_VOLTAGE, 0x3a);
+ if (ret < 0)
+ return ret;
+
+ /* Set VCOMH voltage */
+ ret = ssd130x_write_cmd(ssd130x, 2, SSD133X_SET_VCOMH_VOLTAGE, 0x3e);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
struct drm_rect *rect, u8 *buf,
u8 *data_array)
@@ -753,6 +890,47 @@ static int ssd132x_update_rect(struct ssd130x_device *ssd130x,
return ret;
}
+static int ssd133x_update_rect(struct ssd130x_device *ssd130x,
+ struct drm_rect *rect, u8 *data_array,
+ unsigned int pitch)
+{
+ unsigned int x = rect->x1;
+ unsigned int y = rect->y1;
+ unsigned int columns = drm_rect_width(rect);
+ unsigned int rows = drm_rect_height(rect);
+ int ret;
+
+ /*
+ * The screen is divided in Segment and Common outputs, where
+ * COM0 to COM[N - 1] are the rows and SEG0 to SEG[M - 1] are
+ * the columns.
+ *
+ * Each Segment has a 8-bit pixel and each Common output has a
+ * row of pixels. When using the (default) horizontal address
+ * increment mode, each byte of data sent to the controller has
+ * a Segment (e.g: SEG0).
+ *
+ * When using the 256 color depth format, each pixel contains 3
+ * sub-pixels for color A, B and C. These have 3 bit, 3 bit and
+ * 2 bits respectively.
+ */
+
+ /* Set column start and end */
+ ret = ssd130x_write_cmd(ssd130x, 3, SSD133X_SET_COL_RANGE, x, columns - 1);
+ if (ret < 0)
+ return ret;
+
+ /* Set row start and end */
+ ret = ssd130x_write_cmd(ssd130x, 3, SSD133X_SET_ROW_RANGE, y, rows - 1);
+ if (ret < 0)
+ return ret;
+
+ /* Write out update in one go since horizontal addressing mode is used */
+ ret = ssd130x_write_data(ssd130x, data_array, pitch * rows);
+
+ return ret;
+}
+
static void ssd130x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
{
unsigned int pages = DIV_ROUND_UP(ssd130x->height, SSD130X_PAGE_HEIGHT);
@@ -805,6 +983,22 @@ static void ssd132x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
ssd130x_write_data(ssd130x, data_array, columns * height);
}
+static void ssd133x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
+{
+ const struct drm_format_info *fi = drm_format_info(DRM_FORMAT_RGB332);
+ unsigned int pitch;
+
+ if (!fi)
+ return;
+
+ pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
+
+ memset(data_array, 0, pitch * ssd130x->height);
+
+ /* Write out update in one go since horizontal addressing mode is used */
+ ssd130x_write_data(ssd130x, data_array, pitch * ssd130x->height);
+}
+
static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb,
const struct iosys_map *vmap,
struct drm_rect *rect,
@@ -866,6 +1060,36 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
return ret;
}
+static int ssd133x_fb_blit_rect(struct drm_framebuffer *fb,
+ const struct iosys_map *vmap,
+ struct drm_rect *rect, u8 *data_array,
+ struct drm_format_conv_state *fmtcnv_state)
+{
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
+ const struct drm_format_info *fi = drm_format_info(DRM_FORMAT_RGB332);
+ unsigned int dst_pitch;
+ struct iosys_map dst;
+ int ret = 0;
+
+ if (!fi)
+ return -EINVAL;
+
+ dst_pitch = drm_format_info_min_pitch(fi, 0, drm_rect_width(rect));
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ iosys_map_set_vaddr(&dst, data_array);
+ drm_fb_xrgb8888_to_rgb332(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
+
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+
+ ssd133x_update_rect(ssd130x, rect, data_array, dst_pitch);
+
+ return ret;
+}
+
static int ssd130x_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -964,6 +1188,29 @@ static int ssd132x_primary_plane_atomic_check(struct drm_plane *plane,
return 0;
}
+static int ssd133x_primary_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state = NULL;
+ int ret;
+
+ if (crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!plane_state->visible)
+ return 0;
+
+ return 0;
+}
+
static void ssd130x_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -1034,6 +1281,39 @@ static void ssd132x_primary_plane_atomic_update(struct drm_plane *plane,
drm_dev_exit(idx);
}
+static void ssd133x_primary_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
+ struct ssd130x_crtc_state *ssd130x_crtc_state = to_ssd130x_crtc_state(crtc_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_device *drm = plane->dev;
+ struct drm_rect dst_clip;
+ struct drm_rect damage;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ dst_clip = plane_state->dst;
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ ssd133x_fb_blit_rect(fb, &shadow_plane_state->data[0], &dst_clip,
+ ssd130x_crtc_state->data_array,
+ &shadow_plane_state->fmtcnv_state);
+ }
+
+ drm_dev_exit(idx);
+}
+
static void ssd130x_primary_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -1082,6 +1362,30 @@ static void ssd132x_primary_plane_atomic_disable(struct drm_plane *plane,
drm_dev_exit(idx);
}
+static void ssd133x_primary_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = plane->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc_state *crtc_state;
+ struct ssd130x_crtc_state *ssd130x_crtc_state;
+ int idx;
+
+ if (!plane_state->crtc)
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
+ ssd130x_crtc_state = to_ssd130x_crtc_state(crtc_state);
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ ssd133x_clear_screen(ssd130x, ssd130x_crtc_state->data_array);
+
+ drm_dev_exit(idx);
+}
+
/* Called during init to allocate the plane's atomic state. */
static void ssd130x_primary_plane_reset(struct drm_plane *plane)
{
@@ -1144,6 +1448,12 @@ static const struct drm_plane_helper_funcs ssd130x_primary_plane_helper_funcs[]
.atomic_check = ssd132x_primary_plane_atomic_check,
.atomic_update = ssd132x_primary_plane_atomic_update,
.atomic_disable = ssd132x_primary_plane_atomic_disable,
+ },
+ [SSD133X_FAMILY] = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = ssd133x_primary_plane_atomic_check,
+ .atomic_update = ssd133x_primary_plane_atomic_update,
+ .atomic_disable = ssd133x_primary_plane_atomic_disable,
}
};
@@ -1214,6 +1524,33 @@ static int ssd132x_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
+static int ssd133x_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = crtc->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct ssd130x_crtc_state *ssd130x_state = to_ssd130x_crtc_state(crtc_state);
+ const struct drm_format_info *fi = drm_format_info(DRM_FORMAT_RGB332);
+ unsigned int pitch;
+ int ret;
+
+ if (!fi)
+ return -EINVAL;
+
+ ret = drm_crtc_helper_atomic_check(crtc, state);
+ if (ret)
+ return ret;
+
+ pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
+
+ ssd130x_state->data_array = kmalloc(pitch * ssd130x->height, GFP_KERNEL);
+ if (!ssd130x_state->data_array)
+ return -ENOMEM;
+
+ return 0;
+}
+
/* Called during init to allocate the CRTC's atomic state. */
static void ssd130x_crtc_reset(struct drm_crtc *crtc)
{
@@ -1275,6 +1612,10 @@ static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs[] = {
.mode_valid = ssd130x_crtc_mode_valid,
.atomic_check = ssd132x_crtc_atomic_check,
},
+ [SSD133X_FAMILY] = {
+ .mode_valid = ssd130x_crtc_mode_valid,
+ .atomic_check = ssd133x_crtc_atomic_check,
+ },
};
static const struct drm_crtc_funcs ssd130x_crtc_funcs = {
@@ -1337,6 +1678,31 @@ power_off:
ssd130x_power_off(ssd130x);
}
+static void ssd133x_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = encoder->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ int ret;
+
+ ret = ssd130x_power_on(ssd130x);
+ if (ret)
+ return;
+
+ ret = ssd133x_init(ssd130x);
+ if (ret)
+ goto power_off;
+
+ ssd130x_write_cmd(ssd130x, 1, SSD13XX_DISPLAY_ON);
+
+ backlight_enable(ssd130x->bl_dev);
+
+ return;
+
+power_off:
+ ssd130x_power_off(ssd130x);
+}
+
static void ssd130x_encoder_atomic_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
@@ -1358,6 +1724,10 @@ static const struct drm_encoder_helper_funcs ssd130x_encoder_helper_funcs[] = {
[SSD132X_FAMILY] = {
.atomic_enable = ssd132x_encoder_atomic_enable,
.atomic_disable = ssd130x_encoder_atomic_disable,
+ },
+ [SSD133X_FAMILY] = {
+ .atomic_enable = ssd133x_encoder_atomic_enable,
+ .atomic_disable = ssd130x_encoder_atomic_disable,
}
};
diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
index 075c5c3ee75a..a4554018bb2a 100644
--- a/drivers/gpu/drm/solomon/ssd130x.h
+++ b/drivers/gpu/drm/solomon/ssd130x.h
@@ -25,7 +25,8 @@
enum ssd130x_family_ids {
SSD130X_FAMILY,
- SSD132X_FAMILY
+ SSD132X_FAMILY,
+ SSD133X_FAMILY
};
enum ssd130x_variants {
@@ -39,6 +40,8 @@ enum ssd130x_variants {
SSD1322_ID,
SSD1325_ID,
SSD1327_ID,
+ /* ssd133x family */
+ SSD1331_ID,
NR_SSD130X_VARIANTS
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 152375f3de2e..69001a3dc0df 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -16,6 +16,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
@@ -30,19 +31,11 @@
#include "sun4i_drv.h"
#include "sun4i_hdmi.h"
-static inline struct sun4i_hdmi *
-drm_encoder_to_sun4i_hdmi(struct drm_encoder *encoder)
-{
- return container_of(encoder, struct sun4i_hdmi,
- encoder);
-}
+#define drm_encoder_to_sun4i_hdmi(e) \
+ container_of_const(e, struct sun4i_hdmi, encoder)
-static inline struct sun4i_hdmi *
-drm_connector_to_sun4i_hdmi(struct drm_connector *connector)
-{
- return container_of(connector, struct sun4i_hdmi,
- connector);
-}
+#define drm_connector_to_sun4i_hdmi(c) \
+ container_of_const(c, struct sun4i_hdmi, connector)
static int sun4i_hdmi_setup_avi_infoframes(struct sun4i_hdmi *hdmi,
struct drm_display_mode *mode)
@@ -70,19 +63,8 @@ static int sun4i_hdmi_setup_avi_infoframes(struct sun4i_hdmi *hdmi,
return 0;
}
-static int sun4i_hdmi_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct drm_display_mode *mode = &crtc_state->mode;
-
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- return -EINVAL;
-
- return 0;
-}
-
-static void sun4i_hdmi_disable(struct drm_encoder *encoder)
+static void sun4i_hdmi_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
u32 val;
@@ -96,37 +78,17 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
clk_disable_unprepare(hdmi->tmds_clk);
}
-static void sun4i_hdmi_enable(struct drm_encoder *encoder)
+static void sun4i_hdmi_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
struct drm_display_info *display = &hdmi->connector.display_info;
+ unsigned int x, y;
u32 val = 0;
DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
- clk_prepare_enable(hdmi->tmds_clk);
-
- sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
- val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
- val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
- writel(val, hdmi->base + SUN4I_HDMI_PKT_CTRL_REG(0));
-
- val = SUN4I_HDMI_VID_CTRL_ENABLE;
- if (display->is_hdmi)
- val |= SUN4I_HDMI_VID_CTRL_HDMI_MODE;
-
- writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
-}
-
-static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
- unsigned int x, y;
- u32 val;
-
clk_set_rate(hdmi->mod_clk, mode->crtc_clock * 1000);
clk_set_rate(hdmi->tmds_clk, mode->crtc_clock * 1000);
@@ -178,34 +140,76 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
val |= SUN4I_HDMI_VID_TIMING_POL_VSYNC;
writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG);
+
+ clk_prepare_enable(hdmi->tmds_clk);
+
+ sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
+ val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
+ val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
+ writel(val, hdmi->base + SUN4I_HDMI_PKT_CTRL_REG(0));
+
+ val = SUN4I_HDMI_VID_CTRL_ENABLE;
+ if (display->is_hdmi)
+ val |= SUN4I_HDMI_VID_CTRL_HDMI_MODE;
+
+ writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
}
-static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder,
- const struct drm_display_mode *mode)
+static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
+ .atomic_disable = sun4i_hdmi_disable,
+ .atomic_enable = sun4i_hdmi_enable,
+};
+
+static enum drm_mode_status
+sun4i_hdmi_connector_clock_valid(const struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ unsigned long long clock)
{
- struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
- unsigned long rate = mode->clock * 1000;
- unsigned long diff = rate / 200; /* +-0.5% allowed by HDMI spec */
+ const struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
+ unsigned long diff = clock / 200; /* +-0.5% allowed by HDMI spec */
long rounded_rate;
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_BAD;
+
/* 165 MHz is the typical max pixelclock frequency for HDMI <= 1.2 */
- if (rate > 165000000)
+ if (clock > 165000000)
return MODE_CLOCK_HIGH;
- rounded_rate = clk_round_rate(hdmi->tmds_clk, rate);
+
+ rounded_rate = clk_round_rate(hdmi->tmds_clk, clock);
if (rounded_rate > 0 &&
- max_t(unsigned long, rounded_rate, rate) -
- min_t(unsigned long, rounded_rate, rate) < diff)
+ max_t(unsigned long, rounded_rate, clock) -
+ min_t(unsigned long, rounded_rate, clock) < diff)
return MODE_OK;
+
return MODE_NOCLOCK;
}
-static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
- .atomic_check = sun4i_hdmi_atomic_check,
- .disable = sun4i_hdmi_disable,
- .enable = sun4i_hdmi_enable,
- .mode_set = sun4i_hdmi_mode_set,
- .mode_valid = sun4i_hdmi_mode_valid,
-};
+static int sun4i_hdmi_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc *crtc = conn_state->crtc;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ enum drm_mode_status status;
+
+ status = sun4i_hdmi_connector_clock_valid(connector, mode,
+ mode->clock * 1000);
+ if (status != MODE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static enum drm_mode_status
+sun4i_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return sun4i_hdmi_connector_clock_valid(connector, mode,
+ mode->clock * 1000);
+}
static int sun4i_hdmi_get_modes(struct drm_connector *connector)
{
@@ -251,6 +255,8 @@ static struct i2c_adapter *sun4i_hdmi_get_ddc(struct device *dev)
}
static const struct drm_connector_helper_funcs sun4i_hdmi_connector_helper_funcs = {
+ .atomic_check = sun4i_hdmi_connector_atomic_check,
+ .mode_valid = sun4i_hdmi_connector_mode_valid,
.get_modes = sun4i_hdmi_get_modes,
};
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index ef02d530f78d..ae12d001a04b 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -522,7 +522,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
dpaux->irq, err);
- return err;
+ goto err_pm_disable;
}
disable_irq(dpaux->irq);
@@ -542,7 +542,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
*/
err = tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_I2C);
if (err < 0)
- return err;
+ goto err_pm_disable;
#ifdef CONFIG_GENERIC_PINCONF
dpaux->desc.name = dev_name(&pdev->dev);
@@ -555,7 +555,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
if (IS_ERR(dpaux->pinctrl)) {
dev_err(&pdev->dev, "failed to register pincontrol\n");
- return PTR_ERR(dpaux->pinctrl);
+ err = PTR_ERR(dpaux->pinctrl);
+ goto err_pm_disable;
}
#endif
/* enable and clear all interrupts */
@@ -571,10 +572,15 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux);
if (err < 0) {
dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err);
- return err;
+ goto err_pm_disable;
}
return 0;
+
+err_pm_disable:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return err;
}
static void tegra_dpaux_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index a73cff7a3070..03d1c76aec2d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1243,9 +1243,26 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm_mode_config_reset(drm);
- err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
- if (err < 0)
- goto hub;
+ /*
+ * Only take over from a potential firmware framebuffer if any CRTCs
+ * have been registered. This must not be a fatal error because there
+ * are other accelerators that are exposed via this driver.
+ *
+ * Another case where this happens is on Tegra234 where the display
+ * hardware is no longer part of the host1x complex, so this driver
+ * will not expose any modesetting features.
+ */
+ if (drm->mode_config.num_crtc > 0) {
+ err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
+ if (err < 0)
+ goto hub;
+ } else {
+ /*
+ * Indicate to userspace that this doesn't expose any display
+ * capabilities.
+ */
+ drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+ }
err = drm_dev_register(drm, 0);
if (err < 0)
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index ccb5d74fa227..682011166a8f 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -13,7 +13,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fixed.h>
#include <drm/drm_probe_helper.h>
@@ -26,6 +25,7 @@
/* XXX move to include/uapi/drm/drm_fourcc.h? */
#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22)
+struct edid;
struct reset_control;
struct tegra_drm {
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index fbfe92a816d4..db606e151afc 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1544,9 +1544,11 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0);
if (np) {
struct platform_device *gangster = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!gangster)
+ return -EPROBE_DEFER;
dsi->slave = platform_get_drvdata(gangster);
- of_node_put(np);
if (!dsi->slave) {
put_device(&gangster->dev);
@@ -1594,44 +1596,58 @@ static int tegra_dsi_probe(struct platform_device *pdev)
if (!pdev->dev.pm_domain) {
dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
- if (IS_ERR(dsi->rst))
- return PTR_ERR(dsi->rst);
+ if (IS_ERR(dsi->rst)) {
+ err = PTR_ERR(dsi->rst);
+ goto remove;
+ }
}
dsi->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(dsi->clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk),
- "cannot get DSI clock\n");
+ if (IS_ERR(dsi->clk)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk),
+ "cannot get DSI clock\n");
+ goto remove;
+ }
dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
- if (IS_ERR(dsi->clk_lp))
- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp),
- "cannot get low-power clock\n");
+ if (IS_ERR(dsi->clk_lp)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp),
+ "cannot get low-power clock\n");
+ goto remove;
+ }
dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
- if (IS_ERR(dsi->clk_parent))
- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent),
- "cannot get parent clock\n");
+ if (IS_ERR(dsi->clk_parent)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent),
+ "cannot get parent clock\n");
+ goto remove;
+ }
dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
- if (IS_ERR(dsi->vdd))
- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd),
- "cannot get VDD supply\n");
+ if (IS_ERR(dsi->vdd)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd),
+ "cannot get VDD supply\n");
+ goto remove;
+ }
err = tegra_dsi_setup_clocks(dsi);
if (err < 0) {
dev_err(&pdev->dev, "cannot setup clocks\n");
- return err;
+ goto remove;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(dsi->regs))
- return PTR_ERR(dsi->regs);
+ if (IS_ERR(dsi->regs)) {
+ err = PTR_ERR(dsi->regs);
+ goto remove;
+ }
dsi->mipi = tegra_mipi_request(&pdev->dev, pdev->dev.of_node);
- if (IS_ERR(dsi->mipi))
- return PTR_ERR(dsi->mipi);
+ if (IS_ERR(dsi->mipi)) {
+ err = PTR_ERR(dsi->mipi);
+ goto remove;
+ }
dsi->host.ops = &tegra_dsi_host_ops;
dsi->host.dev = &pdev->dev;
@@ -1659,9 +1675,12 @@ static int tegra_dsi_probe(struct platform_device *pdev)
return 0;
unregister:
+ pm_runtime_disable(&pdev->dev);
mipi_dsi_host_unregister(&dsi->host);
mipi_free:
tegra_mipi_free(dsi->mipi);
+remove:
+ tegra_output_remove(&dsi->output);
return err;
}
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index a719af1dc9a5..46170753699d 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -159,6 +159,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
if (gem->size < size) {
err = -EINVAL;
+ drm_gem_object_put(gem);
goto unreference;
}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index a1fcee665023..09987e372e3e 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -24,6 +24,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
@@ -1856,12 +1857,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
return err;
hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hdmi->regs))
- return PTR_ERR(hdmi->regs);
+ if (IS_ERR(hdmi->regs)) {
+ err = PTR_ERR(hdmi->regs);
+ goto remove;
+ }
err = platform_get_irq(pdev, 0);
if (err < 0)
- return err;
+ goto remove;
hdmi->irq = err;
@@ -1870,18 +1873,18 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
hdmi->irq, err);
- return err;
+ goto remove;
}
platform_set_drvdata(pdev, hdmi);
err = devm_pm_runtime_enable(&pdev->dev);
if (err)
- return err;
+ goto remove;
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
- return err;
+ goto remove;
INIT_LIST_HEAD(&hdmi->client.list);
hdmi->client.ops = &hdmi_client_ops;
@@ -1891,10 +1894,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- return err;
+ goto remove;
}
return 0;
+
+remove:
+ tegra_output_remove(&hdmi->output);
+ return err;
}
static void tegra_hdmi_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index dc2dcb5ca1c8..4da3c3d1abbc 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -8,6 +8,7 @@
#include <linux/of.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
@@ -142,8 +143,10 @@ int tegra_output_probe(struct tegra_output *output)
GPIOD_IN,
"HDMI hotplug detect");
if (IS_ERR(output->hpd_gpio)) {
- if (PTR_ERR(output->hpd_gpio) != -ENOENT)
- return PTR_ERR(output->hpd_gpio);
+ if (PTR_ERR(output->hpd_gpio) != -ENOENT) {
+ err = PTR_ERR(output->hpd_gpio);
+ goto put_i2c;
+ }
output->hpd_gpio = NULL;
}
@@ -152,7 +155,7 @@ int tegra_output_probe(struct tegra_output *output)
err = gpiod_to_irq(output->hpd_gpio);
if (err < 0) {
dev_err(output->dev, "gpiod_to_irq(): %d\n", err);
- return err;
+ goto put_i2c;
}
output->hpd_irq = err;
@@ -165,7 +168,7 @@ int tegra_output_probe(struct tegra_output *output)
if (err < 0) {
dev_err(output->dev, "failed to request IRQ#%u: %d\n",
output->hpd_irq, err);
- return err;
+ goto put_i2c;
}
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
@@ -179,6 +182,12 @@ int tegra_output_probe(struct tegra_output *output)
}
return 0;
+
+put_i2c:
+ if (output->ddc)
+ i2c_put_adapter(output->ddc);
+
+ return err;
}
void tegra_output_remove(struct tegra_output *output)
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index fc66bbd913b2..1e8ec50b759e 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -225,26 +225,28 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
rgb->clk = devm_clk_get(dc->dev, NULL);
if (IS_ERR(rgb->clk)) {
dev_err(dc->dev, "failed to get clock\n");
- return PTR_ERR(rgb->clk);
+ err = PTR_ERR(rgb->clk);
+ goto remove;
}
rgb->clk_parent = devm_clk_get(dc->dev, "parent");
if (IS_ERR(rgb->clk_parent)) {
dev_err(dc->dev, "failed to get parent clock\n");
- return PTR_ERR(rgb->clk_parent);
+ err = PTR_ERR(rgb->clk_parent);
+ goto remove;
}
err = clk_set_parent(rgb->clk, rgb->clk_parent);
if (err < 0) {
dev_err(dc->dev, "failed to set parent clock: %d\n", err);
- return err;
+ goto remove;
}
rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0");
if (IS_ERR(rgb->pll_d_out0)) {
err = PTR_ERR(rgb->pll_d_out0);
dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err);
- return err;
+ goto remove;
}
if (dc->soc->has_pll_d2_out0) {
@@ -252,13 +254,19 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
if (IS_ERR(rgb->pll_d2_out0)) {
err = PTR_ERR(rgb->pll_d2_out0);
dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err);
- return err;
+ goto put_pll;
}
}
dc->rgb = &rgb->output;
return 0;
+
+put_pll:
+ clk_put(rgb->pll_d_out0);
+remove:
+ tegra_output_remove(&rgb->output);
+ return err;
}
void tegra_dc_rgb_remove(struct tegra_dc *dc)
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 83341576630d..bad3b8fcc726 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -20,6 +20,7 @@
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
#include <drm/drm_file.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index fee6bec757d1..e48863a44556 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -14,14 +14,220 @@
#include "../lib/drm_random.h"
+static unsigned int random_seed;
+
static inline u64 get_size(int order, u64 chunk_size)
{
return (1 << order) * chunk_size;
}
+static void drm_test_buddy_alloc_range_bias(struct kunit *test)
+{
+ u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem;
+ DRM_RND_STATE(prng, random_seed);
+ unsigned int i, count, *order;
+ struct drm_buddy mm;
+ LIST_HEAD(allocated);
+
+ bias_size = SZ_1M;
+ ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
+ ps = max(SZ_4K, ps);
+ mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
+
+ kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+ "buddy_init failed\n");
+
+ count = mm_size / bias_size;
+ order = drm_random_order(count, &prng);
+ KUNIT_EXPECT_TRUE(test, order);
+
+ /*
+ * Idea is to split the address space into uniform bias ranges, and then
+ * in some random order allocate within each bias, using various
+ * patterns within. This should detect if allocations leak out from a
+ * given bias, for example.
+ */
+
+ for (i = 0; i < count; i++) {
+ LIST_HEAD(tmp);
+ u32 size;
+
+ bias_start = order[i] * bias_size;
+ bias_end = bias_start + bias_size;
+ bias_rem = bias_size;
+
+ /* internal round_up too big */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size + ps, bias_size,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size, bias_size);
+
+ /* size too big */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size + ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size + ps, ps);
+
+ /* bias range too small for size */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start + ps,
+ bias_end, bias_size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start + ps, bias_end, bias_size, ps);
+
+ /* bias misaligned */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start + ps,
+ bias_end - ps,
+ bias_size >> 1, bias_size >> 1,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
+
+ /* single big page */
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size, bias_size,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size, bias_size);
+ drm_buddy_free_list(&mm, &tmp);
+
+ /* single page with internal round_up */
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, ps, bias_size,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, ps, bias_size);
+ drm_buddy_free_list(&mm, &tmp);
+
+ /* random size within */
+ size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+ if (size)
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+
+ bias_rem -= size;
+ /* too big for current avail */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_rem + ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_rem + ps, ps);
+
+ if (bias_rem) {
+ /* random fill of the remainder */
+ size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+ size = max(size, ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+ /*
+ * Intentionally allow some space to be left
+ * unallocated, and ideally not always on the bias
+ * boundaries.
+ */
+ drm_buddy_free_list(&mm, &tmp);
+ } else {
+ list_splice_tail(&tmp, &allocated);
+ }
+ }
+
+ kfree(order);
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+
+ /*
+ * Something more free-form. Idea is to pick a random starting bias
+ * range within the address space and then start filling it up. Also
+ * randomly grow the bias range in both directions as we go along. This
+ * should give us bias start/end which is not always uniform like above,
+ * and in some cases will require the allocator to jump over already
+ * allocated nodes in the middle of the address space.
+ */
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+ "buddy_init failed\n");
+
+ bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
+ bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
+ bias_end = max(bias_end, bias_start + ps);
+ bias_rem = bias_end - bias_start;
+
+ do {
+ u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+ bias_rem -= size;
+
+ /*
+ * Try to randomly grow the bias range in both directions, or
+ * only one, or perhaps don't grow at all.
+ */
+ do {
+ u32 old_bias_start = bias_start;
+ u32 old_bias_end = bias_end;
+
+ if (bias_start)
+ bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
+ if (bias_end != mm_size)
+ bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
+
+ bias_rem += old_bias_start - bias_start;
+ bias_rem += bias_end - old_bias_end;
+ } while (!bias_rem && (bias_start || bias_end != mm_size));
+ } while (bias_rem);
+
+ KUNIT_ASSERT_EQ(test, bias_start, 0);
+ KUNIT_ASSERT_EQ(test, bias_end, mm_size);
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
+ ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc passed with bias(%x-%x), size=%u\n",
+ bias_start, bias_end, ps);
+
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+}
+
static void drm_test_buddy_alloc_contiguous(struct kunit *test)
{
- u64 mm_size, ps = SZ_4K, i, n_pages, total;
+ const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
+ unsigned long i, n_pages, total;
struct drm_buddy_block *block;
struct drm_buddy mm;
LIST_HEAD(left);
@@ -29,8 +235,6 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
LIST_HEAD(right);
LIST_HEAD(allocated);
- mm_size = 16 * 3 * SZ_4K;
-
KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
/*
@@ -56,30 +260,30 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
KUNIT_ASSERT_FALSE_MSG(test,
drm_buddy_alloc_blocks(&mm, 0, mm_size,
ps, ps, list, 0),
- "buddy_alloc hit an error size=%d\n",
+ "buddy_alloc hit an error size=%lu\n",
ps);
} while (++i < n_pages);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
- "buddy_alloc didn't error size=%d\n", 3 * ps);
+ "buddy_alloc didn't error size=%lu\n", 3 * ps);
drm_buddy_free_list(&mm, &middle);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
- "buddy_alloc didn't error size=%llu\n", 3 * ps);
+ "buddy_alloc didn't error size=%lu\n", 3 * ps);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
2 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
- "buddy_alloc didn't error size=%llu\n", 2 * ps);
+ "buddy_alloc didn't error size=%lu\n", 2 * ps);
drm_buddy_free_list(&mm, &right);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
- "buddy_alloc didn't error size=%llu\n", 3 * ps);
+ "buddy_alloc didn't error size=%lu\n", 3 * ps);
/*
* At this point we should have enough contiguous space for 2 blocks,
* however they are never buddies (since we freed middle and right) so
@@ -88,13 +292,13 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test)
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
2 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
- "buddy_alloc hit an error size=%d\n", 2 * ps);
+ "buddy_alloc hit an error size=%lu\n", 2 * ps);
drm_buddy_free_list(&mm, &left);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
3 * ps, ps, &allocated,
DRM_BUDDY_CONTIGUOUS_ALLOCATION),
- "buddy_alloc hit an error size=%d\n", 3 * ps);
+ "buddy_alloc hit an error size=%lu\n", 3 * ps);
total = 0;
list_for_each_entry(block, &allocated, link)
@@ -363,17 +567,30 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
drm_buddy_fini(&mm);
}
+static int drm_buddy_suite_init(struct kunit_suite *suite)
+{
+ while (!random_seed)
+ random_seed = get_random_u32();
+
+ kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
+ random_seed);
+
+ return 0;
+}
+
static struct kunit_case drm_buddy_tests[] = {
KUNIT_CASE(drm_test_buddy_alloc_limit),
KUNIT_CASE(drm_test_buddy_alloc_optimistic),
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
KUNIT_CASE(drm_test_buddy_alloc_pathological),
KUNIT_CASE(drm_test_buddy_alloc_contiguous),
+ KUNIT_CASE(drm_test_buddy_alloc_range_bias),
{}
};
static struct kunit_suite drm_buddy_test_suite = {
.name = "drm_buddy",
+ .suite_init = drm_buddy_suite_init,
.test_cases = drm_buddy_tests,
};
diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c
index c66aa2dc8d9d..44f82ed2a958 100644
--- a/drivers/gpu/drm/tests/drm_connector_test.c
+++ b/drivers/gpu/drm/tests/drm_connector_test.c
@@ -3,10 +3,175 @@
* Kunit test for drm_modes functions
*/
+#include <linux/i2c.h>
+
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
#include <kunit/test.h>
+struct drm_connector_init_priv {
+ struct drm_device drm;
+ struct drm_connector connector;
+ struct i2c_adapter ddc;
+};
+
+static const struct drm_connector_funcs dummy_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static int dummy_ddc_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ return num;
+}
+
+static u32 dummy_ddc_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm dummy_ddc_algorithm = {
+ .master_xfer = dummy_ddc_xfer,
+ .functionality = dummy_ddc_func,
+};
+
+static void i2c_del_adapter_wrapper(void *ptr)
+{
+ struct i2c_adapter *adap = ptr;
+
+ i2c_del_adapter(adap);
+}
+
+static int drm_test_connector_init(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ priv = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct drm_connector_init_priv, drm,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ strscpy(priv->ddc.name, "dummy-connector-ddc", sizeof(priv->ddc.name));
+ priv->ddc.owner = THIS_MODULE;
+ priv->ddc.algo = &dummy_ddc_algorithm;
+ priv->ddc.dev.parent = dev;
+
+ ret = i2c_add_adapter(&priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = kunit_add_action_or_reset(test, i2c_del_adapter_wrapper, &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ test->priv = priv;
+ return 0;
+}
+
+/*
+ * Test that the registration of a bog standard connector works as
+ * expected and doesn't report any error.
+ */
+static void drm_test_drmm_connector_init(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ ret = drmm_connector_init(&priv->drm, &priv->connector,
+ &dummy_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ &priv->ddc);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+/*
+ * Test that the registration of a connector without a DDC adapter
+ * doesn't report any error.
+ */
+static void drm_test_drmm_connector_init_null_ddc(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ ret = drmm_connector_init(&priv->drm, &priv->connector,
+ &dummy_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ NULL);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+/*
+ * Test that the registration of a connector succeeds for all possible
+ * connector types.
+ */
+static void drm_test_drmm_connector_init_type_valid(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ unsigned int connector_type = *(unsigned int *)test->param_value;
+ int ret;
+
+ ret = drmm_connector_init(&priv->drm, &priv->connector,
+ &dummy_funcs,
+ connector_type,
+ &priv->ddc);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static const unsigned int drm_connector_init_type_valid_tests[] = {
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_VGA,
+ DRM_MODE_CONNECTOR_DVII,
+ DRM_MODE_CONNECTOR_DVID,
+ DRM_MODE_CONNECTOR_DVIA,
+ DRM_MODE_CONNECTOR_Composite,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ DRM_MODE_CONNECTOR_LVDS,
+ DRM_MODE_CONNECTOR_Component,
+ DRM_MODE_CONNECTOR_9PinDIN,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ DRM_MODE_CONNECTOR_HDMIA,
+ DRM_MODE_CONNECTOR_HDMIB,
+ DRM_MODE_CONNECTOR_TV,
+ DRM_MODE_CONNECTOR_eDP,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ DRM_MODE_CONNECTOR_DSI,
+ DRM_MODE_CONNECTOR_DPI,
+ DRM_MODE_CONNECTOR_WRITEBACK,
+ DRM_MODE_CONNECTOR_SPI,
+ DRM_MODE_CONNECTOR_USB,
+};
+
+static void drm_connector_init_type_desc(const unsigned int *type, char *desc)
+{
+ sprintf(desc, "%s", drm_get_connector_type_name(*type));
+}
+
+KUNIT_ARRAY_PARAM(drm_connector_init_type_valid,
+ drm_connector_init_type_valid_tests,
+ drm_connector_init_type_desc);
+
+static struct kunit_case drmm_connector_init_tests[] = {
+ KUNIT_CASE(drm_test_drmm_connector_init),
+ KUNIT_CASE(drm_test_drmm_connector_init_null_ddc),
+ KUNIT_CASE_PARAM(drm_test_drmm_connector_init_type_valid,
+ drm_connector_init_type_valid_gen_params),
+ { }
+};
+
+static struct kunit_suite drmm_connector_init_test_suite = {
+ .name = "drmm_connector_init",
+ .init = drm_test_connector_init,
+ .test_cases = drmm_connector_init_tests,
+};
+
struct drm_get_tv_mode_from_name_test {
const char *name;
enum drm_connector_tv_mode expected_mode;
@@ -70,7 +235,10 @@ static struct kunit_suite drm_get_tv_mode_from_name_test_suite = {
.test_cases = drm_get_tv_mode_from_name_tests,
};
-kunit_test_suite(drm_get_tv_mode_from_name_test_suite);
+kunit_test_suites(
+ &drmm_connector_init_test_suite,
+ &drm_get_tv_mode_from_name_test_suite
+);
MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index ca4f8e4c5d5d..d5317d13d3fc 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_managed.h>
@@ -14,6 +16,8 @@
#define KUNIT_DEVICE_NAME "drm-kunit-mock-device"
static const struct drm_mode_config_funcs drm_mode_config_funcs = {
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
};
/**
@@ -161,5 +165,151 @@ drm_kunit_helper_atomic_state_alloc(struct kunit *test,
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_atomic_state_alloc);
+static const uint32_t default_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t default_plane_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const struct drm_plane_helper_funcs default_plane_helper_funcs = {
+};
+
+static const struct drm_plane_funcs default_plane_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .reset = drm_atomic_helper_plane_reset,
+};
+
+/**
+ * drm_kunit_helper_create_primary_plane - Creates a mock primary plane for a KUnit test
+ * @test: The test context object
+ * @drm: The device to alloc the plane for
+ * @funcs: Callbacks for the new plane. Optional.
+ * @helper_funcs: Helpers callbacks for the new plane. Optional.
+ * @formats: array of supported formats (DRM_FORMAT\_\*). Optional.
+ * @num_formats: number of elements in @formats
+ * @modifiers: array of struct drm_format modifiers terminated by
+ * DRM_FORMAT_MOD_INVALID. Optional.
+ *
+ * This allocates and initializes a mock struct &drm_plane meant to be
+ * part of a mock device for a KUnit test.
+ *
+ * Resources will be cleaned up automatically.
+ *
+ * @funcs will default to the default helpers implementations.
+ * @helper_funcs will default to an empty implementation. @formats will
+ * default to XRGB8888 only. @modifiers will default to a linear
+ * modifier only.
+ *
+ * Returns:
+ * A pointer to the new plane, or an ERR_PTR() otherwise.
+ */
+struct drm_plane *
+drm_kunit_helper_create_primary_plane(struct kunit *test,
+ struct drm_device *drm,
+ const struct drm_plane_funcs *funcs,
+ const struct drm_plane_helper_funcs *helper_funcs,
+ const uint32_t *formats,
+ unsigned int num_formats,
+ const uint64_t *modifiers)
+{
+ struct drm_plane *plane;
+
+ if (!funcs)
+ funcs = &default_plane_funcs;
+
+ if (!helper_funcs)
+ helper_funcs = &default_plane_helper_funcs;
+
+ if (!formats || !num_formats) {
+ formats = default_plane_formats;
+ num_formats = ARRAY_SIZE(default_plane_formats);
+ }
+
+ if (!modifiers)
+ modifiers = default_plane_modifiers;
+
+ plane = __drmm_universal_plane_alloc(drm,
+ sizeof(struct drm_plane), 0,
+ 0,
+ funcs,
+ formats,
+ num_formats,
+ default_plane_modifiers,
+ DRM_PLANE_TYPE_PRIMARY,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane);
+
+ drm_plane_helper_add(plane, helper_funcs);
+
+ return plane;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_create_primary_plane);
+
+static const struct drm_crtc_helper_funcs default_crtc_helper_funcs = {
+};
+
+static const struct drm_crtc_funcs default_crtc_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .reset = drm_atomic_helper_crtc_reset,
+};
+
+/**
+ * drm_kunit_helper_create_crtc - Creates a mock CRTC for a KUnit test
+ * @test: The test context object
+ * @drm: The device to alloc the plane for
+ * @primary: Primary plane for CRTC
+ * @cursor: Cursor plane for CRTC. Optional.
+ * @funcs: Callbacks for the new plane. Optional.
+ * @helper_funcs: Helpers callbacks for the new plane. Optional.
+ *
+ * This allocates and initializes a mock struct &drm_crtc meant to be
+ * part of a mock device for a KUnit test.
+ *
+ * Resources will be cleaned up automatically.
+ *
+ * @funcs will default to the default helpers implementations.
+ * @helper_funcs will default to an empty implementation.
+ *
+ * Returns:
+ * A pointer to the new CRTC, or an ERR_PTR() otherwise.
+ */
+struct drm_crtc *
+drm_kunit_helper_create_crtc(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const struct drm_crtc_helper_funcs *helper_funcs)
+{
+ struct drm_crtc *crtc;
+ int ret;
+
+ if (!funcs)
+ funcs = &default_crtc_funcs;
+
+ if (!helper_funcs)
+ helper_funcs = &default_crtc_helper_funcs;
+
+ crtc = drmm_kzalloc(drm, sizeof(*crtc), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, crtc);
+
+ ret = drmm_crtc_init_with_planes(drm, crtc,
+ primary,
+ cursor,
+ funcs,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_crtc_helper_add(crtc, helper_funcs);
+
+ return crtc;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc);
+
MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_managed_test.c b/drivers/gpu/drm/tests/drm_managed_test.c
index 1652dca11d30..76eb273c9b36 100644
--- a/drivers/gpu/drm/tests/drm_managed_test.c
+++ b/drivers/gpu/drm/tests/drm_managed_test.c
@@ -12,6 +12,7 @@
#define TEST_TIMEOUT_MS 100
struct managed_test_priv {
+ struct drm_device *drm;
bool action_done;
wait_queue_head_t action_wq;
};
@@ -24,44 +25,88 @@ static void drm_action(struct drm_device *drm, void *ptr)
wake_up_interruptible(&priv->action_wq);
}
-static void drm_test_managed_run_action(struct kunit *test)
+/*
+ * The test verifies that the release action is called when
+ * drmm_release_action is called.
+ */
+static void drm_test_managed_release_action(struct kunit *test)
{
- struct managed_test_priv *priv;
- struct drm_device *drm;
- struct device *dev;
+ struct managed_test_priv *priv = test->priv;
int ret;
- priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
- init_waitqueue_head(&priv->action_wq);
+ ret = drmm_add_action_or_reset(priv->drm, drm_action, priv);
+ KUNIT_EXPECT_EQ(test, ret, 0);
- dev = drm_kunit_helper_alloc_device(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ ret = drm_dev_register(priv->drm, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drmm_release_action(priv->drm, drm_action, priv);
+ ret = wait_event_interruptible_timeout(priv->action_wq, priv->action_done,
+ msecs_to_jiffies(TEST_TIMEOUT_MS));
+ KUNIT_EXPECT_GT(test, ret, 0);
+
+ drm_dev_unregister(priv->drm);
+ drm_kunit_helper_free_device(test, priv->drm->dev);
+}
- drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm), 0, DRIVER_MODESET);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm);
+/*
+ * The test verifies that the release action is called automatically when the
+ * device is released.
+ */
+static void drm_test_managed_run_action(struct kunit *test)
+{
+ struct managed_test_priv *priv = test->priv;
+ int ret;
- ret = drmm_add_action_or_reset(drm, drm_action, priv);
+ ret = drmm_add_action_or_reset(priv->drm, drm_action, priv);
KUNIT_EXPECT_EQ(test, ret, 0);
- ret = drm_dev_register(drm, 0);
+ ret = drm_dev_register(priv->drm, 0);
KUNIT_ASSERT_EQ(test, ret, 0);
- drm_dev_unregister(drm);
- drm_kunit_helper_free_device(test, dev);
+ drm_dev_unregister(priv->drm);
+ drm_kunit_helper_free_device(test, priv->drm->dev);
ret = wait_event_interruptible_timeout(priv->action_wq, priv->action_done,
msecs_to_jiffies(TEST_TIMEOUT_MS));
KUNIT_EXPECT_GT(test, ret, 0);
}
+static int drm_managed_test_init(struct kunit *test)
+{
+ struct managed_test_priv *priv;
+ struct device *dev;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+ init_waitqueue_head(&priv->action_wq);
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /*
+ * DRM device can't be embedded in priv, since priv->action_done needs
+ * to remain allocated beyond both parent device and drm_device
+ * lifetime.
+ */
+ priv->drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*priv->drm), 0,
+ DRIVER_MODESET);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
+
+ test->priv = priv;
+
+ return 0;
+}
+
static struct kunit_case drm_managed_tests[] = {
+ KUNIT_CASE(drm_test_managed_release_action),
KUNIT_CASE(drm_test_managed_run_action),
{}
};
static struct kunit_suite drm_managed_test_suite = {
- .name = "drm-test-managed",
+ .name = "drm_managed",
+ .init = drm_managed_test_init,
.test_cases = drm_managed_tests
};
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 1eb0c304f960..8497d9990b96 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -157,7 +157,7 @@ static void drm_test_mm_init(struct kunit *test)
/* After creation, it should all be one massive hole */
if (!assert_one_hole(test, &mm, 0, size)) {
- KUNIT_FAIL(test, "");
+ KUNIT_FAIL(test, "mm not one hole on creation");
goto out;
}
@@ -171,14 +171,14 @@ static void drm_test_mm_init(struct kunit *test)
/* After filling the range entirely, there should be no holes */
if (!assert_no_holes(test, &mm)) {
- KUNIT_FAIL(test, "");
+ KUNIT_FAIL(test, "mm has holes when filled");
goto out;
}
/* And then after emptying it again, the massive hole should be back */
drm_mm_remove_node(&tmp);
if (!assert_one_hole(test, &mm, 0, size)) {
- KUNIT_FAIL(test, "");
+ KUNIT_FAIL(test, "mm does not have single hole after emptying");
goto out;
}
@@ -188,7 +188,7 @@ out:
static void drm_test_mm_debug(struct kunit *test)
{
- struct drm_printer p = drm_debug_printer(test->name);
+ struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, test->name);
struct drm_mm mm;
struct drm_mm_node nodes[2];
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 5f838980c7a1..94f8e3178df5 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -265,6 +265,16 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
reinit_completion(&tcrtc->framedone_completion);
+ /*
+ * If a layer is left enabled when the videoport is disabled, and the
+ * vid pipeline that was used for the layer is taken into use on
+ * another videoport, the DSS will report sync lost issues. Disable all
+ * the layers here as a work-around.
+ */
+ for (u32 layer = 0; layer < tidss->feat->num_planes; layer++)
+ dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
+ false);
+
dispc_vp_disable(tidss->dispc, tcrtc->hw_videoport);
if (!wait_for_completion_timeout(&tcrtc->framedone_completion,
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index e1c0ef0c3894..68fed531f6a7 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -213,7 +213,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs);
- drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0,
+ drm_plane_create_zpos_property(&tplane->plane, tidss->num_planes, 0,
num_planes - 1);
ret = drm_plane_create_color_properties(&tplane->plane,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 23bf16f596f6..cd5eefa06060 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -182,9 +182,6 @@ static void tilcdc_fini(struct drm_device *dev)
if (priv->clk)
clk_put(priv->clk);
- if (priv->mmio)
- iounmap(priv->mmio);
-
if (priv->wq)
destroy_workqueue(priv->wq);
@@ -201,7 +198,6 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *node = dev->of_node;
struct tilcdc_drm_private *priv;
- struct resource *res;
u32 bpp = 0;
int ret;
@@ -226,17 +222,10 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
goto init_failed;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get memory resource\n");
- ret = -EINVAL;
- goto init_failed;
- }
-
- priv->mmio = ioremap(res->start, resource_size(res));
- if (!priv->mmio) {
- dev_err(dev, "failed to ioremap\n");
- ret = -ENOMEM;
+ priv->mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->mmio)) {
+ dev_err(dev, "failed to request / ioremap\n");
+ ret = PTR_ERR(priv->mmio);
goto init_failed;
}
diff --git a/drivers/gpu/drm/ttm/tests/Makefile b/drivers/gpu/drm/ttm/tests/Makefile
index ec87c4fc1ad5..468535f7eed2 100644
--- a/drivers/gpu/drm/ttm/tests/Makefile
+++ b/drivers/gpu/drm/ttm/tests/Makefile
@@ -3,4 +3,7 @@
obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += \
ttm_device_test.o \
ttm_pool_test.o \
+ ttm_resource_test.o \
+ ttm_tt_test.o \
+ ttm_bo_test.o \
ttm_kunit_helpers.o
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
new file mode 100644
index 000000000000..1f8a4f8adc92
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/dma-resv.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/ww_mutex.h>
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_kunit_helpers.h"
+
+#define BO_SIZE SZ_8K
+
+struct ttm_bo_test_case {
+ const char *description;
+ bool interruptible;
+ bool no_wait;
+};
+
+static const struct ttm_bo_test_case ttm_bo_reserved_cases[] = {
+ {
+ .description = "Cannot be interrupted and sleeps",
+ .interruptible = false,
+ .no_wait = false,
+ },
+ {
+ .description = "Cannot be interrupted, locks straight away",
+ .interruptible = false,
+ .no_wait = true,
+ },
+ {
+ .description = "Can be interrupted, sleeps",
+ .interruptible = true,
+ .no_wait = false,
+ },
+};
+
+static void ttm_bo_init_case_desc(const struct ttm_bo_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_bo_reserve, ttm_bo_reserved_cases, ttm_bo_init_case_desc);
+
+static void ttm_bo_reserve_optimistic_no_ticket(struct kunit *test)
+{
+ const struct ttm_bo_test_case *params = test->param_value;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_bo_reserve(bo, params->interruptible, params->no_wait, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ dma_resv_unlock(bo->base.resv);
+}
+
+static void ttm_bo_reserve_locked_no_sleep(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ bool interruptible = false;
+ bool no_wait = true;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ /* Let's lock it beforehand */
+ dma_resv_lock(bo->base.resv, NULL);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, NULL);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, err, -EBUSY);
+}
+
+static void ttm_bo_reserve_no_wait_ticket(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ww_acquire_ctx ctx;
+ bool interruptible = false;
+ bool no_wait = true;
+ int err;
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+ KUNIT_ASSERT_EQ(test, err, -EBUSY);
+
+ ww_acquire_fini(&ctx);
+}
+
+static void ttm_bo_reserve_double_resv(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ww_acquire_ctx ctx;
+ bool interruptible = false;
+ bool no_wait = false;
+ int err;
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+
+ dma_resv_unlock(bo->base.resv);
+ ww_acquire_fini(&ctx);
+
+ KUNIT_ASSERT_EQ(test, err, -EALREADY);
+}
+
+/*
+ * A test case heavily inspired by ww_test_edeadlk_normal(). It injects
+ * a deadlock by manipulating the sequence number of the context that holds
+ * dma_resv lock of bo2 so the other context is "wounded" and has to back off
+ * (indicated by -EDEADLK). The subtest checks if ttm_bo_reserve() properly
+ * propagates that error.
+ */
+static void ttm_bo_reserve_deadlock(struct kunit *test)
+{
+ struct ttm_buffer_object *bo1, *bo2;
+ struct ww_acquire_ctx ctx1, ctx2;
+ bool interruptible = false;
+ bool no_wait = false;
+ int err;
+
+ bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ ww_acquire_init(&ctx1, &reservation_ww_class);
+ mutex_lock(&bo2->base.resv->lock.base);
+
+ /* The deadlock will be caught by WW mutex, don't warn about it */
+ lock_release(&bo2->base.resv->lock.base.dep_map, 1);
+
+ bo2->base.resv->lock.ctx = &ctx2;
+ ctx2 = ctx1;
+ ctx2.stamp--; /* Make the context holding the lock younger */
+
+ err = ttm_bo_reserve(bo1, interruptible, no_wait, &ctx1);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_bo_reserve(bo2, interruptible, no_wait, &ctx1);
+ KUNIT_ASSERT_EQ(test, err, -EDEADLK);
+
+ dma_resv_unlock(bo1->base.resv);
+ ww_acquire_fini(&ctx1);
+}
+
+#if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
+struct signal_timer {
+ struct timer_list timer;
+ struct ww_acquire_ctx *ctx;
+};
+
+static void signal_for_ttm_bo_reserve(struct timer_list *t)
+{
+ struct signal_timer *s_timer = from_timer(s_timer, t, timer);
+ struct task_struct *task = s_timer->ctx->task;
+
+ do_send_sig_info(SIGTERM, SEND_SIG_PRIV, task, PIDTYPE_PID);
+}
+
+static int threaded_ttm_bo_reserve(void *arg)
+{
+ struct ttm_buffer_object *bo = arg;
+ struct signal_timer s_timer;
+ struct ww_acquire_ctx ctx;
+ bool interruptible = true;
+ bool no_wait = false;
+ int err;
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ /* Prepare a signal that will interrupt the reservation attempt */
+ timer_setup_on_stack(&s_timer.timer, &signal_for_ttm_bo_reserve, 0);
+ s_timer.ctx = &ctx;
+
+ mod_timer(&s_timer.timer, msecs_to_jiffies(100));
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+
+ timer_delete_sync(&s_timer.timer);
+ destroy_timer_on_stack(&s_timer.timer);
+
+ ww_acquire_fini(&ctx);
+
+ return err;
+}
+
+static void ttm_bo_reserve_interrupted(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct task_struct *task;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ task = kthread_create(threaded_ttm_bo_reserve, bo, "ttm-bo-reserve");
+
+ if (IS_ERR(task))
+ KUNIT_FAIL(test, "Couldn't create ttm bo reserve task\n");
+
+ /* Take a lock so the threaded reserve has to wait */
+ mutex_lock(&bo->base.resv->lock.base);
+
+ wake_up_process(task);
+ msleep(20);
+ err = kthread_stop(task);
+
+ mutex_unlock(&bo->base.resv->lock.base);
+
+ KUNIT_ASSERT_EQ(test, err, -ERESTARTSYS);
+}
+#endif /* IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST) */
+
+static void ttm_bo_unreserve_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource *res1, *res2;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+ unsigned int bo_prio = TTM_MAX_BO_PRIORITY - 1;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo->priority = bo_prio;
+
+ err = ttm_resource_alloc(bo, place, &res1);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ bo->resource = res1;
+
+ /* Add a dummy resource to populate LRU */
+ ttm_resource_alloc(bo, place, &res2);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unreserve(bo);
+
+ man = ttm_manager_type(priv->ttm_dev, mem_type);
+ KUNIT_ASSERT_EQ(test,
+ list_is_last(&res1->lru, &man->lru[bo->priority]), 1);
+
+ ttm_resource_free(bo, &res2);
+ ttm_resource_free(bo, &res1);
+}
+
+static void ttm_bo_unreserve_pinned(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource *res1, *res2;
+ struct ttm_place *place;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_pin(bo);
+
+ err = ttm_resource_alloc(bo, place, &res1);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res1;
+
+ /* Add a dummy resource to the pinned list */
+ err = ttm_resource_alloc(bo, place, &res2);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test,
+ list_is_last(&res2->lru, &priv->ttm_dev->pinned), 1);
+
+ ttm_bo_unreserve(bo);
+ KUNIT_ASSERT_EQ(test,
+ list_is_last(&res1->lru, &priv->ttm_dev->pinned), 1);
+
+ ttm_resource_free(bo, &res1);
+ ttm_resource_free(bo, &res2);
+}
+
+static void ttm_bo_unreserve_bulk(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_lru_bulk_move lru_bulk_move;
+ struct ttm_lru_bulk_move_pos *pos;
+ struct ttm_buffer_object *bo1, *bo2;
+ struct ttm_resource *res1, *res2;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ unsigned int bo_priority = 0;
+ int err;
+
+ ttm_lru_bulk_move_init(&lru_bulk_move);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ dma_resv_lock(bo1->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo1, &lru_bulk_move);
+ dma_resv_unlock(bo1->base.resv);
+
+ err = ttm_resource_alloc(bo1, place, &res1);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo1->resource = res1;
+
+ dma_resv_lock(bo2->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo2, &lru_bulk_move);
+ dma_resv_unlock(bo2->base.resv);
+
+ err = ttm_resource_alloc(bo2, place, &res2);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo2->resource = res2;
+
+ ttm_bo_reserve(bo1, false, false, NULL);
+ ttm_bo_unreserve(bo1);
+
+ pos = &lru_bulk_move.pos[mem_type][bo_priority];
+ KUNIT_ASSERT_PTR_EQ(test, res1, pos->last);
+
+ ttm_resource_free(bo1, &res1);
+ ttm_resource_free(bo2, &res2);
+}
+
+static void ttm_bo_put_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo->type = ttm_bo_type_device;
+
+ err = ttm_resource_alloc(bo, place, &res);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ ttm_bo_put(bo);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "kunit-ttm-bo-put";
+}
+
+static const struct dma_fence_ops mock_fence_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+};
+
+static void ttm_bo_put_shared_resv(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct dma_resv *external_resv;
+ struct dma_fence *fence;
+ /* A dummy DMA fence lock */
+ spinlock_t fence_lock;
+ struct ttm_device *ttm_dev;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ external_resv = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, external_resv);
+
+ dma_resv_init(external_resv);
+
+ fence = kunit_kzalloc(test, sizeof(*fence), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, fence);
+
+ spin_lock_init(&fence_lock);
+ dma_fence_init(fence, &mock_fence_ops, &fence_lock, 0, 0);
+
+ dma_resv_lock(external_resv, NULL);
+ dma_resv_reserve_fences(external_resv, 1);
+ dma_resv_add_fence(external_resv, fence, DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_unlock(external_resv);
+
+ dma_fence_signal(fence);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo->type = ttm_bo_type_device;
+ bo->base.resv = external_resv;
+
+ ttm_bo_put(bo);
+}
+
+static void ttm_bo_pin_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_device *ttm_dev;
+ unsigned int no_pins = 3;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ for (int i = 0; i < no_pins; i++) {
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_pin(bo);
+ dma_resv_unlock(bo->base.resv);
+ }
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, no_pins);
+}
+
+static void ttm_bo_pin_unpin_resource(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_lru_bulk_move lru_bulk_move;
+ struct ttm_lru_bulk_move_pos *pos;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ unsigned int bo_priority = 0;
+ int err;
+
+ ttm_lru_bulk_move_init(&lru_bulk_move);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_resource_alloc(bo, place, &res);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo, &lru_bulk_move);
+ ttm_bo_pin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ pos = &lru_bulk_move.pos[mem_type][bo_priority];
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
+ KUNIT_ASSERT_NULL(test, pos->first);
+ KUNIT_ASSERT_NULL(test, pos->last);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_PTR_EQ(test, res, pos->last);
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 0);
+
+ ttm_resource_free(bo, &res);
+}
+
+static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_lru_bulk_move lru_bulk_move;
+ struct ttm_lru_bulk_move_pos *pos;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ unsigned int bo_priority = 0;
+ int err;
+
+ ttm_lru_bulk_move_init(&lru_bulk_move);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_resource_alloc(bo, place, &res);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo, &lru_bulk_move);
+
+ /* Multiple pins */
+ ttm_bo_pin(bo);
+ ttm_bo_pin(bo);
+
+ dma_resv_unlock(bo->base.resv);
+
+ pos = &lru_bulk_move.pos[mem_type][bo_priority];
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 2);
+ KUNIT_ASSERT_NULL(test, pos->first);
+ KUNIT_ASSERT_NULL(test, pos->last);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
+ KUNIT_ASSERT_NULL(test, pos->first);
+ KUNIT_ASSERT_NULL(test, pos->last);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ ttm_resource_free(bo, &res);
+}
+
+static struct kunit_case ttm_bo_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_bo_reserve_optimistic_no_ticket,
+ ttm_bo_reserve_gen_params),
+ KUNIT_CASE(ttm_bo_reserve_locked_no_sleep),
+ KUNIT_CASE(ttm_bo_reserve_no_wait_ticket),
+ KUNIT_CASE(ttm_bo_reserve_double_resv),
+#if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
+ KUNIT_CASE(ttm_bo_reserve_interrupted),
+#endif
+ KUNIT_CASE(ttm_bo_reserve_deadlock),
+ KUNIT_CASE(ttm_bo_unreserve_basic),
+ KUNIT_CASE(ttm_bo_unreserve_pinned),
+ KUNIT_CASE(ttm_bo_unreserve_bulk),
+ KUNIT_CASE(ttm_bo_put_basic),
+ KUNIT_CASE(ttm_bo_put_shared_resv),
+ KUNIT_CASE(ttm_bo_pin_basic),
+ KUNIT_CASE(ttm_bo_pin_unpin_resource),
+ KUNIT_CASE(ttm_bo_multiple_pin_one_unpin),
+ {}
+};
+
+static struct kunit_suite ttm_bo_test_suite = {
+ .name = "ttm_bo",
+ .init = ttm_test_devices_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_bo_test_cases,
+};
+
+kunit_test_suites(&ttm_bo_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
index 81661d8827aa..7b7c1fa805fc 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
@@ -2,9 +2,33 @@
/*
* Copyright © 2023 Intel Corporation
*/
+#include <drm/ttm/ttm_tt.h>
+
#include "ttm_kunit_helpers.h"
+static struct ttm_tt *ttm_tt_simple_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
+
+ return tt;
+}
+
+static void ttm_tt_simple_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
+{
+ kfree(ttm);
+}
+
+static void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo)
+{
+}
+
struct ttm_device_funcs ttm_dev_funcs = {
+ .ttm_tt_create = ttm_tt_simple_create,
+ .ttm_tt_destroy = ttm_tt_simple_destroy,
};
EXPORT_SYMBOL_GPL(ttm_dev_funcs);
@@ -29,19 +53,41 @@ struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
struct ttm_test_devices *devs,
size_t size)
{
- struct drm_gem_object gem_obj = { .size = size };
+ struct drm_gem_object gem_obj = { };
struct ttm_buffer_object *bo;
+ int err;
bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, bo);
bo->base = gem_obj;
+ err = drm_gem_object_init(devs->drm, &bo->base, size);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
bo->bdev = devs->ttm_dev;
+ bo->destroy = dummy_ttm_bo_destroy;
+
+ kref_init(&bo->kref);
return bo;
}
EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
+struct ttm_place *ttm_place_kunit_init(struct kunit *test,
+ uint32_t mem_type, uint32_t flags)
+{
+ struct ttm_place *place;
+
+ place = kunit_kzalloc(test, sizeof(*place), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, place);
+
+ place->mem_type = mem_type;
+ place->flags = flags;
+
+ return place;
+}
+EXPORT_SYMBOL_GPL(ttm_place_kunit_init);
+
struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
{
struct ttm_test_devices *devs;
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
index e261e3660d0b..2f51c833a536 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
#include <drm/drm_kunit_helpers.h>
#include <kunit/test.h>
@@ -28,6 +29,8 @@ int ttm_device_kunit_init(struct ttm_test_devices *priv,
struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
struct ttm_test_devices *devs,
size_t size);
+struct ttm_place *ttm_place_kunit_init(struct kunit *test,
+ uint32_t mem_type, uint32_t flags);
struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test);
struct ttm_test_devices *ttm_test_devices_all(struct kunit *test);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
index cceaa18d4e46..0a3fede84da9 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -78,10 +78,9 @@ static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
struct ttm_test_devices *devs = priv->devs;
struct ttm_pool *pool;
struct ttm_tt *tt;
- unsigned long order = __fls(size / PAGE_SIZE);
int err;
- tt = ttm_tt_kunit_init(test, order, caching, size);
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
KUNIT_ASSERT_NOT_NULL(test, tt);
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
new file mode 100644
index 000000000000..029e1f094bb0
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <drm/ttm/ttm_resource.h>
+
+#include "ttm_kunit_helpers.h"
+
+#define RES_SIZE SZ_4K
+#define TTM_PRIV_DUMMY_REG (TTM_NUM_MEM_TYPES - 1)
+
+struct ttm_resource_test_case {
+ const char *description;
+ uint32_t mem_type;
+ uint32_t flags;
+};
+
+struct ttm_resource_test_priv {
+ struct ttm_test_devices *devs;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+};
+
+static const struct ttm_resource_manager_func ttm_resource_manager_mock_funcs = { };
+
+static int ttm_resource_test_init(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv->devs = ttm_test_devices_all(test);
+ KUNIT_ASSERT_NOT_NULL(test, priv->devs);
+
+ test->priv = priv;
+
+ return 0;
+}
+
+static void ttm_resource_test_fini(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+
+ ttm_test_devices_put(test, priv->devs);
+}
+
+static void ttm_init_test_mocks(struct kunit *test,
+ struct ttm_resource_test_priv *priv,
+ uint32_t mem_type, uint32_t flags)
+{
+ size_t size = RES_SIZE;
+
+ /* Make sure we have what we need for a good BO mock */
+ KUNIT_ASSERT_NOT_NULL(test, priv->devs->ttm_dev);
+
+ priv->bo = ttm_bo_kunit_init(test, priv->devs, size);
+ priv->place = ttm_place_kunit_init(test, mem_type, flags);
+}
+
+static void ttm_init_test_manager(struct kunit *test,
+ struct ttm_resource_test_priv *priv,
+ uint32_t mem_type)
+{
+ struct ttm_device *ttm_dev = priv->devs->ttm_dev;
+ struct ttm_resource_manager *man;
+ size_t size = SZ_16K;
+
+ man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, man);
+
+ man->use_tt = false;
+ man->func = &ttm_resource_manager_mock_funcs;
+
+ ttm_resource_manager_init(man, ttm_dev, size);
+ ttm_set_driver_manager(ttm_dev, mem_type, man);
+ ttm_resource_manager_set_used(man, true);
+}
+
+static const struct ttm_resource_test_case ttm_resource_cases[] = {
+ {
+ .description = "Init resource in TTM_PL_SYSTEM",
+ .mem_type = TTM_PL_SYSTEM,
+ },
+ {
+ .description = "Init resource in TTM_PL_VRAM",
+ .mem_type = TTM_PL_VRAM,
+ },
+ {
+ .description = "Init resource in a private placement",
+ .mem_type = TTM_PRIV_DUMMY_REG,
+ },
+ {
+ .description = "Init resource in TTM_PL_SYSTEM, set placement flags",
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_FLAG_TOPDOWN,
+ },
+};
+
+static void ttm_resource_case_desc(const struct ttm_resource_test_case *t, char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_resource, ttm_resource_cases, ttm_resource_case_desc);
+
+static void ttm_resource_init_basic(struct kunit *test)
+{
+ const struct ttm_resource_test_case *params = test->param_value;
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+ uint64_t expected_usage;
+
+ ttm_init_test_mocks(test, priv, params->mem_type, params->flags);
+ bo = priv->bo;
+ place = priv->place;
+
+ if (params->mem_type > TTM_PL_SYSTEM)
+ ttm_init_test_manager(test, priv, params->mem_type);
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+ expected_usage = man->usage + RES_SIZE;
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[bo->priority]));
+
+ ttm_resource_init(bo, place, res);
+
+ KUNIT_ASSERT_EQ(test, res->start, 0);
+ KUNIT_ASSERT_EQ(test, res->size, RES_SIZE);
+ KUNIT_ASSERT_EQ(test, res->mem_type, place->mem_type);
+ KUNIT_ASSERT_EQ(test, res->placement, place->flags);
+ KUNIT_ASSERT_PTR_EQ(test, res->bo, bo);
+
+ KUNIT_ASSERT_NULL(test, res->bus.addr);
+ KUNIT_ASSERT_EQ(test, res->bus.offset, 0);
+ KUNIT_ASSERT_FALSE(test, res->bus.is_iomem);
+ KUNIT_ASSERT_EQ(test, res->bus.caching, ttm_cached);
+ KUNIT_ASSERT_EQ(test, man->usage, expected_usage);
+
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority]));
+
+ ttm_resource_fini(man, res);
+}
+
+static void ttm_resource_init_pinned(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+
+ ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+ KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned));
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_pin(bo);
+ ttm_resource_init(bo, place, res);
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&bo->bdev->pinned));
+
+ ttm_bo_unpin(bo);
+ ttm_resource_fini(man, res);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned));
+}
+
+static void ttm_resource_fini_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+
+ ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ ttm_resource_init(bo, place, res);
+ ttm_resource_fini(man, res);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&res->lru));
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+}
+
+static void ttm_resource_manager_init_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+ size_t size = SZ_16K;
+
+ man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, man);
+
+ ttm_resource_manager_init(man, priv->devs->ttm_dev, size);
+
+ KUNIT_ASSERT_PTR_EQ(test, man->bdev, priv->devs->ttm_dev);
+ KUNIT_ASSERT_EQ(test, man->size, size);
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+ KUNIT_ASSERT_NULL(test, man->move);
+ KUNIT_ASSERT_NOT_NULL(test, &man->move_lock);
+
+ for (int i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[i]));
+}
+
+static void ttm_resource_manager_usage_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+ uint64_t actual_usage;
+
+ ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, TTM_PL_FLAG_TOPDOWN);
+ bo = priv->bo;
+ place = priv->place;
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+
+ ttm_resource_init(bo, place, res);
+ actual_usage = ttm_resource_manager_usage(man);
+
+ KUNIT_ASSERT_EQ(test, actual_usage, RES_SIZE);
+
+ ttm_resource_fini(man, res);
+}
+
+static void ttm_resource_manager_set_used_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, TTM_PL_SYSTEM);
+ KUNIT_ASSERT_TRUE(test, man->use_type);
+
+ ttm_resource_manager_set_used(man, false);
+ KUNIT_ASSERT_FALSE(test, man->use_type);
+}
+
+static void ttm_sys_man_alloc_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource *res;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ int ret;
+
+ ttm_init_test_mocks(test, priv, mem_type, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, mem_type);
+ ret = man->func->alloc(man, bo, place, &res);
+
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_EQ(test, res->size, RES_SIZE);
+ KUNIT_ASSERT_EQ(test, res->mem_type, mem_type);
+ KUNIT_ASSERT_PTR_EQ(test, res->bo, bo);
+
+ ttm_resource_fini(man, res);
+}
+
+static void ttm_sys_man_free_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource *res;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+
+ ttm_init_test_mocks(test, priv, mem_type, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ ttm_resource_alloc(bo, place, &res);
+
+ man = ttm_manager_type(priv->devs->ttm_dev, mem_type);
+ man->func->free(man, res);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[bo->priority]));
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+}
+
+static struct kunit_case ttm_resource_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_resource_init_basic, ttm_resource_gen_params),
+ KUNIT_CASE(ttm_resource_init_pinned),
+ KUNIT_CASE(ttm_resource_fini_basic),
+ KUNIT_CASE(ttm_resource_manager_init_basic),
+ KUNIT_CASE(ttm_resource_manager_usage_basic),
+ KUNIT_CASE(ttm_resource_manager_set_used_basic),
+ KUNIT_CASE(ttm_sys_man_alloc_basic),
+ KUNIT_CASE(ttm_sys_man_free_basic),
+ {}
+};
+
+static struct kunit_suite ttm_resource_test_suite = {
+ .name = "ttm_resource",
+ .init = ttm_resource_test_init,
+ .exit = ttm_resource_test_fini,
+ .test_cases = ttm_resource_test_cases,
+};
+
+kunit_test_suites(&ttm_resource_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_tt_test.c b/drivers/gpu/drm/ttm/tests/ttm_tt_test.c
new file mode 100644
index 000000000000..fd4502c18de6
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_tt_test.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/shmem_fs.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_kunit_helpers.h"
+
+#define BO_SIZE SZ_4K
+
+struct ttm_tt_test_case {
+ const char *description;
+ uint32_t size;
+ uint32_t extra_pages_num;
+};
+
+static int ttm_tt_test_init(struct kunit *test)
+{
+ struct ttm_test_devices *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv = ttm_test_devices_all(test);
+ test->priv = priv;
+
+ return 0;
+}
+
+static const struct ttm_tt_test_case ttm_tt_init_basic_cases[] = {
+ {
+ .description = "Page-aligned size",
+ .size = SZ_4K,
+ },
+ {
+ .description = "Extra pages requested",
+ .size = SZ_4K,
+ .extra_pages_num = 1,
+ },
+};
+
+static void ttm_tt_init_case_desc(const struct ttm_tt_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_tt_init_basic, ttm_tt_init_basic_cases,
+ ttm_tt_init_case_desc);
+
+static void ttm_tt_init_basic(struct kunit *test)
+{
+ const struct ttm_tt_test_case *params = test->param_value;
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ uint32_t page_flags = TTM_TT_FLAG_ZERO_ALLOC;
+ enum ttm_caching caching = ttm_cached;
+ uint32_t extra_pages = params->extra_pages_num;
+ int num_pages = params->size >> PAGE_SHIFT;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, params->size);
+
+ err = ttm_tt_init(tt, bo, page_flags, caching, extra_pages);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages + extra_pages);
+
+ KUNIT_ASSERT_EQ(test, tt->page_flags, page_flags);
+ KUNIT_ASSERT_EQ(test, tt->caching, caching);
+
+ KUNIT_ASSERT_NULL(test, tt->dma_address);
+ KUNIT_ASSERT_NULL(test, tt->swap_storage);
+}
+
+static void ttm_tt_init_misaligned(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ uint32_t size = SZ_8K;
+ int num_pages = (size + SZ_4K) >> PAGE_SHIFT;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size);
+
+ /* Make the object size misaligned */
+ bo->base.size += 1;
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages);
+}
+
+static void ttm_tt_fini_basic(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, tt->pages);
+
+ ttm_tt_fini(tt);
+ KUNIT_ASSERT_NULL(test, tt->pages);
+}
+
+static void ttm_tt_fini_sg(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_sg_tt_init(tt, bo, 0, caching);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, tt->dma_address);
+
+ ttm_tt_fini(tt);
+ KUNIT_ASSERT_NULL(test, tt->dma_address);
+}
+
+static void ttm_tt_fini_shmem(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ struct file *shmem;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ shmem = shmem_file_setup("ttm swap", BO_SIZE, 0);
+ tt->swap_storage = shmem;
+
+ ttm_tt_fini(tt);
+ KUNIT_ASSERT_NULL(test, tt->swap_storage);
+}
+
+static void ttm_tt_create_basic(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo->type = ttm_bo_type_device;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
+
+ /* Free manually, as it was allocated outside of KUnit */
+ kfree(bo->ttm);
+}
+
+static void ttm_tt_create_invalid_bo_type(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo->type = ttm_bo_type_sg + 1;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, -EINVAL);
+ KUNIT_EXPECT_NULL(test, bo->ttm);
+}
+
+static void ttm_tt_create_ttm_exists(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->ttm = tt;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ /* Expect to keep the previous TTM */
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_PTR_EQ(test, tt, bo->ttm);
+}
+
+static struct ttm_tt *ttm_tt_null_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ return NULL;
+}
+
+static struct ttm_device_funcs ttm_dev_empty_funcs = {
+ .ttm_tt_create = ttm_tt_null_create,
+};
+
+static void ttm_tt_create_failed(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ /* Update ttm_device_funcs so we don't alloc ttm_tt */
+ devs->ttm_dev->funcs = &ttm_dev_empty_funcs;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, err, -ENOMEM);
+}
+
+static void ttm_tt_destroy_basic(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
+
+ ttm_tt_destroy(devs->ttm_dev, bo->ttm);
+}
+
+static struct kunit_case ttm_tt_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_tt_init_basic, ttm_tt_init_basic_gen_params),
+ KUNIT_CASE(ttm_tt_init_misaligned),
+ KUNIT_CASE(ttm_tt_fini_basic),
+ KUNIT_CASE(ttm_tt_fini_sg),
+ KUNIT_CASE(ttm_tt_fini_shmem),
+ KUNIT_CASE(ttm_tt_create_basic),
+ KUNIT_CASE(ttm_tt_create_invalid_bo_type),
+ KUNIT_CASE(ttm_tt_create_ttm_exists),
+ KUNIT_CASE(ttm_tt_create_failed),
+ KUNIT_CASE(ttm_tt_destroy_basic),
+ {}
+};
+
+static struct kunit_suite ttm_tt_test_suite = {
+ .name = "ttm_tt",
+ .init = ttm_tt_test_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_tt_test_cases,
+};
+
+kunit_test_suites(&ttm_tt_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index edf10618fe2b..96a724e8f3ff 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -49,7 +49,7 @@
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- struct drm_printer p = drm_debug_printer(TTM_PFX);
+ struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, TTM_PFX);
struct ttm_resource_manager *man;
int i, mem_type;
@@ -410,8 +410,8 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
struct ttm_resource *hop_mem;
int ret;
- hop_placement.num_placement = hop_placement.num_busy_placement = 1;
- hop_placement.placement = hop_placement.busy_placement = hop;
+ hop_placement.num_placement = 1;
+ hop_placement.placement = hop;
/* find space in the bounce domain */
ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
@@ -440,10 +440,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
placement.num_placement = 0;
- placement.num_busy_placement = 0;
bdev->funcs->evict_flags(bo, &placement);
- if (!placement.num_placement && !placement.num_busy_placement) {
+ if (!placement.num_placement) {
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
return ret;
@@ -770,7 +769,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
* This function may sleep while waiting for space to become available.
* Returns:
* -EBUSY: No space available (only if no_wait == 1).
- * -ENOMEM: Could not allocate memory for the buffer object, either due to
+ * -ENOSPC: Could not allocate space for the buffer object, either due to
* fragmentation or concurrent allocators.
* -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
@@ -791,6 +790,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
+ if (place->flags & TTM_PL_FLAG_FALLBACK)
+ continue;
+
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
continue;
@@ -813,10 +815,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
return 0;
}
- for (i = 0; i < placement->num_busy_placement; ++i) {
- const struct ttm_place *place = &placement->busy_placement[i];
+ for (i = 0; i < placement->num_placement; ++i) {
+ const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
+ if (place->flags & TTM_PL_FLAG_DESIRED)
+ continue;
+
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
continue;
@@ -830,7 +835,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
goto error;
}
- ret = -ENOMEM;
+ ret = -ENOSPC;
if (!type_found) {
pr_err(TTM_PFX "No compatible memory type found\n");
ret = -EINVAL;
@@ -904,11 +909,11 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Remove the backing store if no placement is given.
*/
- if (!placement->num_placement && !placement->num_busy_placement)
+ if (!placement->num_placement)
return ttm_bo_pipeline_gutting(bo);
/* Check whether we need to move buffer. */
- if (bo->resource && ttm_resource_compat(bo->resource, placement))
+ if (bo->resource && ttm_resource_compatible(bo->resource, placement))
return 0;
/* Moving of pinned BOs is forbidden */
@@ -916,6 +921,9 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
return -EINVAL;
ret = ttm_bo_move_buffer(bo, placement, ctx);
+ /* For backward compatibility with userspace */
+ if (ret == -ENOSPC)
+ return -ENOMEM;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fd9fd3d15101..0b3f4267130c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -294,7 +294,13 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
enum ttm_caching caching;
man = ttm_manager_type(bo->bdev, res->mem_type);
- caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
+ if (man->use_tt) {
+ caching = bo->ttm->caching;
+ if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
+ tmp = pgprot_decrypted(tmp);
+ } else {
+ caching = res->bus.caching;
+ }
return ttm_prot_from_caching(caching, tmp);
}
@@ -337,6 +343,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
.no_wait_gpu = false
};
struct ttm_tt *ttm = bo->ttm;
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, bo->resource->mem_type);
pgprot_t prot;
int ret;
@@ -346,7 +354,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
if (ret)
return ret;
- if (num_pages == 1 && ttm->caching == ttm_cached) {
+ if (num_pages == 1 && ttm->caching == ttm_cached &&
+ !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index b62f420a9f96..112438d965ff 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -387,7 +387,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
enum ttm_caching caching,
pgoff_t start_page, pgoff_t end_page)
{
- struct page **pages = tt->pages;
+ struct page **pages = &tt->pages[start_page];
unsigned int order;
pgoff_t i, nr;
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 46ff9c75bb12..fb14f7716cf8 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -30,6 +30,8 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_resource.h>
+#include <drm/drm_util.h>
+
/**
* ttm_lru_bulk_move_init - initialize a bulk move structure
* @bulk: the structure to init
@@ -240,6 +242,7 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
spin_unlock(&bo->bdev->lru_lock);
return 0;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
{
@@ -288,37 +291,15 @@ bool ttm_resource_intersects(struct ttm_device *bdev,
}
/**
- * ttm_resource_compatible - test for compatibility
- *
- * @bdev: TTM device structure
- * @res: The resource to test
- * @place: The placement to test
- * @size: How many bytes the new allocation needs.
+ * ttm_resource_compatible - check if resource is compatible with placement
*
- * Test if @res compatible with @place and @size.
+ * @res: the resource to check
+ * @placement: the placement to check against
*
- * Returns true if the res placement compatible with @place and @size.
+ * Returns true if the placement is compatible.
*/
-bool ttm_resource_compatible(struct ttm_device *bdev,
- struct ttm_resource *res,
- const struct ttm_place *place,
- size_t size)
-{
- struct ttm_resource_manager *man;
-
- if (!res || !place)
- return false;
-
- man = ttm_manager_type(bdev, res->mem_type);
- if (!man->func->compatible)
- return true;
-
- return man->func->compatible(man, res, place, size);
-}
-
-static bool ttm_resource_places_compat(struct ttm_resource *res,
- const struct ttm_place *places,
- unsigned num_placement)
+bool ttm_resource_compatible(struct ttm_resource *res,
+ struct ttm_placement *placement)
{
struct ttm_buffer_object *bo = res->bo;
struct ttm_device *bdev = bo->bdev;
@@ -327,44 +308,25 @@ static bool ttm_resource_places_compat(struct ttm_resource *res,
if (res->placement & TTM_PL_FLAG_TEMPORARY)
return false;
- for (i = 0; i < num_placement; i++) {
- const struct ttm_place *heap = &places[i];
+ for (i = 0; i < placement->num_placement; i++) {
+ const struct ttm_place *place = &placement->placement[i];
+ struct ttm_resource_manager *man;
- if (!ttm_resource_compatible(bdev, res, heap, bo->base.size))
+ if (res->mem_type != place->mem_type)
+ continue;
+
+ man = ttm_manager_type(bdev, res->mem_type);
+ if (man->func->compatible &&
+ !man->func->compatible(man, res, place, bo->base.size))
continue;
- if ((res->mem_type == heap->mem_type) &&
- (!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
+ if ((!(place->flags & TTM_PL_FLAG_CONTIGUOUS) ||
(res->placement & TTM_PL_FLAG_CONTIGUOUS)))
return true;
}
return false;
}
-/**
- * ttm_resource_compat - check if resource is compatible with placement
- *
- * @res: the resource to check
- * @placement: the placement to check against
- *
- * Returns true if the placement is compatible.
- */
-bool ttm_resource_compat(struct ttm_resource *res,
- struct ttm_placement *placement)
-{
- if (ttm_resource_places_compat(res, placement->placement,
- placement->num_placement))
- return true;
-
- if ((placement->busy_placement != placement->placement ||
- placement->num_busy_placement > placement->num_placement) &&
- ttm_resource_places_compat(res, placement->busy_placement,
- placement->num_busy_placement))
- return true;
-
- return false;
-}
-
void ttm_resource_set_bo(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e0a77671edd6..578a7c37f00b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,11 +31,14 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include <linux/cc_platform.h>
#include <linux/sched.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_util.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h>
@@ -60,6 +63,7 @@ static atomic_long_t ttm_dma32_pages_allocated;
int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_device *bdev = bo->bdev;
+ struct drm_device *ddev = bo->base.dev;
uint32_t page_flags = 0;
dma_resv_assert_held(bo->base.resv);
@@ -81,6 +85,15 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
pr_err("Illegal buffer object type\n");
return -EINVAL;
}
+ /*
+ * When using dma_alloc_coherent with memory encryption the
+ * mapped TT pages need to be decrypted or otherwise the drivers
+ * will end up sending encrypted mem to the gpu.
+ */
+ if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ page_flags |= TTM_TT_FLAG_DECRYPTED;
+ drm_info(ddev, "TT memory decryption enabled.");
+ }
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
if (unlikely(bo->ttm == NULL))
@@ -91,6 +104,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
return 0;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create);
/*
* Allocates storage for pointers to the pages that back the ttm.
@@ -129,6 +143,7 @@ void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
bdev->funcs->ttm_tt_destroy(bdev, ttm);
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy);
static void ttm_tt_init_fields(struct ttm_tt *ttm,
struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/tve200/Kconfig b/drivers/gpu/drm/tve200/Kconfig
index 11e865be81c6..5121fed571a5 100644
--- a/drivers/gpu/drm/tve200/Kconfig
+++ b/drivers/gpu/drm/tve200/Kconfig
@@ -9,7 +9,6 @@ config DRM_TVE200
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
- select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the Faraday TV Encoder
TVE200 Controller.
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 1bdfac8beafd..a07ede668cc1 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -40,7 +40,7 @@ void v3d_free_object(struct drm_gem_object *obj)
mutex_lock(&v3d->bo_lock);
v3d->bo_stats.num_allocated--;
- v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
+ v3d->bo_stats.pages_allocated -= obj->size >> V3D_MMU_PAGE_SHIFT;
mutex_unlock(&v3d->bo_lock);
spin_lock(&v3d->mm_lock);
@@ -109,8 +109,8 @@ v3d_bo_create_finish(struct drm_gem_object *obj)
* lifetime of the BO.
*/
ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
- obj->size >> PAGE_SHIFT,
- GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
+ obj->size >> V3D_MMU_PAGE_SHIFT,
+ GMP_GRANULARITY >> V3D_MMU_PAGE_SHIFT, 0, 0);
spin_unlock(&v3d->mm_lock);
if (ret)
return ret;
@@ -118,7 +118,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj)
/* Track stats for /debug/dri/n/bo_stats. */
mutex_lock(&v3d->bo_lock);
v3d->bo_stats.num_allocated++;
- v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
+ v3d->bo_stats.pages_allocated += obj->size >> V3D_MMU_PAGE_SHIFT;
mutex_unlock(&v3d->bo_lock);
v3d_mmu_insert_ptes(bo);
@@ -201,7 +201,7 @@ int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(bo))
return PTR_ERR(bo);
- args->offset = bo->node.start << PAGE_SHIFT;
+ args->offset = bo->node.start << V3D_MMU_PAGE_SHIFT;
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
drm_gem_object_put(&bo->base.base);
@@ -246,7 +246,7 @@ int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
}
bo = to_v3d_bo(gem_obj);
- args->offset = bo->node.start << PAGE_SHIFT;
+ args->offset = bo->node.start << V3D_MMU_PAGE_SHIFT;
drm_gem_object_put(gem_obj);
return 0;
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 94eafcecc65b..19e3ee7ac897 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -219,7 +219,7 @@ static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused)
seq_printf(m, "allocated bos: %d\n",
v3d->bo_stats.num_allocated);
seq_printf(m, "allocated bo size (kb): %ld\n",
- (long)v3d->bo_stats.pages_allocated << (PAGE_SHIFT - 10));
+ (long)v3d->bo_stats.pages_allocated << (V3D_MMU_PAGE_SHIFT - 10));
mutex_unlock(&v3d->bo_lock);
return 0;
@@ -260,11 +260,26 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
return 0;
}
+static int v3d_debugfs_mm(struct seq_file *m, void *unused)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+
+ spin_lock(&v3d->mm_lock);
+ drm_mm_print(&v3d->mm, &p);
+ spin_unlock(&v3d->mm_lock);
+
+ return 0;
+}
+
static const struct drm_debugfs_info v3d_debugfs_list[] = {
{"v3d_ident", v3d_v3d_debugfs_ident, 0},
{"v3d_regs", v3d_v3d_debugfs_regs, 0},
{"measure_clock", v3d_measure_clock, 0},
{"bo_stats", v3d_debugfs_bo_stats, 0},
+ {"v3d_mm", v3d_debugfs_mm, 0},
};
void
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 3c7d58866570..1950c723dde1 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -19,6 +19,8 @@ struct reset_control;
#define GMP_GRANULARITY (128 * 1024)
+#define V3D_MMU_PAGE_SHIFT 12
+
#define V3D_MAX_QUEUES (V3D_CPU + 1)
static inline char *v3d_queue_to_string(enum v3d_queue queue)
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index afc76390a197..2e04f6cb661e 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -70,7 +70,7 @@ v3d_overflow_mem_work(struct work_struct *work)
list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
- V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
+ V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << V3D_MMU_PAGE_SHIFT);
V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
out:
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index 5a453532901f..14f3af40d6f6 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -21,8 +21,6 @@
#include "v3d_drv.h"
#include "v3d_regs.h"
-#define V3D_MMU_PAGE_SHIFT 12
-
/* Note: All PTEs for the 1MB superpage must be filled with the
* superpage bit set.
*/
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index f05e2c95a60d..34f807ed1c31 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -35,6 +35,7 @@
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 00e713faecd5..07caf2a47c6c 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1497,16 +1497,16 @@ static int vc4_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_bo *bo;
+ int ret;
if (!state->fb)
return 0;
bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
- drm_gem_plane_helper_prepare_fb(plane, state);
-
- if (plane->state->fb == state->fb)
- return 0;
+ ret = drm_gem_plane_helper_prepare_fb(plane, state);
+ if (ret)
+ return ret;
return vc4_bo_inc_usecnt(bo);
}
@@ -1516,7 +1516,7 @@ static void vc4_cleanup_fb(struct drm_plane *plane,
{
struct vc4_bo *bo;
- if (plane->state->fb == state->fb || !state->fb)
+ if (!state->fb)
return;
bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
index 5c514946bbad..1c7c7f61a222 100644
--- a/drivers/gpu/drm/virtio/virtgpu_submit.c
+++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
@@ -99,8 +99,8 @@ virtio_gpu_parse_deps(struct virtio_gpu_submit *submit)
return 0;
/*
- * kvalloc at first tries to allocate memory using kmalloc and
- * falls back to vmalloc only on failure. It also uses __GFP_NOWARN
+ * kvmalloc() at first tries to allocate memory using kmalloc() and
+ * falls back to vmalloc() only on failure. It also uses __GFP_NOWARN
* internally for allocations larger than a page size, preventing
* storm of KMSG warnings.
*/
@@ -529,7 +529,7 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
virtio_gpu_submit(&submit);
/*
- * Set up usr-out data after submitting the job to optimize
+ * Set up user-out data after submitting the job to optimize
* the job submission path.
*/
virtio_gpu_install_out_fence_fd(&submit);
diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig
new file mode 100644
index 000000000000..b9ecdebecb0b
--- /dev/null
+++ b/drivers/gpu/drm/vkms/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_VKMS
+ tristate "Virtual KMS (EXPERIMENTAL)"
+ depends on DRM && MMU
+ select DRM_KMS_HELPER
+ select DRM_GEM_SHMEM_HELPER
+ select CRC32
+ default n
+ help
+ Virtual Kernel Mode-Setting (VKMS) is used for testing or for
+ running GPU in a headless machines. Choose this option to get
+ a VKMS.
+
+ If M is selected the module will be called vkms.
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index 3c99fb8b54e2..e7441b227b3c 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -123,6 +123,8 @@ static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 chan
enum lut_channel channel)
{
s64 lut_index = get_lut_index(lut, channel_value);
+ u16 *floor_lut_value, *ceil_lut_value;
+ u16 floor_channel_value, ceil_channel_value;
/*
* This checks if `struct drm_color_lut` has any gap added by the compiler
@@ -130,11 +132,15 @@ static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 chan
*/
static_assert(sizeof(struct drm_color_lut) == sizeof(__u16) * 4);
- u16 *floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
- u16 *ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
+ floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
+ if (drm_fixp2int(lut_index) == (lut->lut_length - 1))
+ /* We're at the end of the LUT array, use same value for ceil and floor */
+ ceil_lut_value = floor_lut_value;
+ else
+ ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
- u16 floor_channel_value = floor_lut_value[channel];
- u16 ceil_channel_value = ceil_lut_value[channel];
+ floor_channel_value = floor_lut_value[channel];
+ ceil_channel_value = ceil_lut_value[channel];
return lerp_u16(floor_channel_value, ceil_channel_value,
lut_index & DRM_FIXED_DECIMAL_MASK);
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index ddf8373c1d77..6806c05e57f6 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -648,7 +648,6 @@ out_unref:
* @tfile: struct ttm_object_file identifying the caller
* @size: The size of the dma_bufs we export.
* @prime: The object to be initialized.
- * @shareable: See ttm_base_object_init
* @type: See ttm_base_object_init
* @refcount_release: See ttm_base_object_init
*
@@ -656,10 +655,11 @@ out_unref:
* for data sharing between processes and devices.
*/
int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
- struct ttm_prime_object *prime, bool shareable,
+ struct ttm_prime_object *prime,
enum ttm_object_type type,
void (*refcount_release) (struct ttm_base_object **))
{
+ bool shareable = !!(type == VMW_RES_SURFACE);
mutex_init(&prime->mutex);
prime->size = PAGE_ALIGN(size);
prime->real_type = type;
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
index e6b77ee33e55..573e038c0fab 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright (c) 2006-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2006-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -288,7 +288,6 @@ extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
extern int ttm_prime_object_init(struct ttm_object_file *tfile,
size_t size,
struct ttm_prime_object *prime,
- bool shareable,
enum ttm_object_type type,
void (*refcount_release)
(struct ttm_base_object **));
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 2bfac3aad7b7..bfd41ce3c8f4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -742,9 +742,21 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
vmw_resource_unbind_list(vbo);
}
+static u32 placement_flags(u32 domain, u32 desired, u32 fallback)
+{
+ if (desired & fallback & domain)
+ return 0;
+
+ if (desired & domain)
+ return TTM_PL_FLAG_DESIRED;
+
+ return TTM_PL_FLAG_FALLBACK;
+}
+
static u32
-set_placement_list(struct ttm_place *pl, u32 domain)
+set_placement_list(struct ttm_place *pl, u32 desired, u32 fallback)
{
+ u32 domain = desired | fallback;
u32 n = 0;
/*
@@ -752,35 +764,40 @@ set_placement_list(struct ttm_place *pl, u32 domain)
*/
if (domain & VMW_BO_DOMAIN_MOB) {
pl[n].mem_type = VMW_PL_MOB;
- pl[n].flags = 0;
+ pl[n].flags = placement_flags(VMW_BO_DOMAIN_MOB, desired,
+ fallback);
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_GMR) {
pl[n].mem_type = VMW_PL_GMR;
- pl[n].flags = 0;
+ pl[n].flags = placement_flags(VMW_BO_DOMAIN_GMR, desired,
+ fallback);
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_VRAM) {
pl[n].mem_type = TTM_PL_VRAM;
- pl[n].flags = 0;
+ pl[n].flags = placement_flags(VMW_BO_DOMAIN_VRAM, desired,
+ fallback);
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
pl[n].mem_type = VMW_PL_SYSTEM;
- pl[n].flags = 0;
+ pl[n].flags = placement_flags(VMW_BO_DOMAIN_WAITABLE_SYS,
+ desired, fallback);
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_SYS) {
pl[n].mem_type = TTM_PL_SYSTEM;
- pl[n].flags = 0;
+ pl[n].flags = placement_flags(VMW_BO_DOMAIN_SYS, desired,
+ fallback);
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
@@ -806,7 +823,7 @@ void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
u32 i;
pl->placement = bo->places;
- pl->num_placement = set_placement_list(bo->places, domain);
+ pl->num_placement = set_placement_list(bo->places, domain, busy_domain);
if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
for (i = 0; i < pl->num_placement; ++i) {
@@ -821,8 +838,6 @@ void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
__func__, bo->tbo.resource->mem_type, domain);
}
- pl->busy_placement = bo->busy_places;
- pl->num_busy_placement = set_placement_list(bo->busy_places, busy_domain);
}
void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 3cd5090dedfc..12efecc17df6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -942,7 +942,6 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
extern const size_t vmw_tt_size;
extern struct ttm_placement vmw_vram_placement;
-extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement;
extern struct ttm_device_funcs vmw_bo_driver;
extern const struct vmw_sg_table *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 36987ef3fc30..cc3086e649eb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -447,7 +447,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
vmw_res_type(ctx) == vmw_res_dx_context) {
for (i = 0; i < cotable_max; ++i) {
res = vmw_context_cotable(ctx, i);
- if (IS_ERR(res))
+ if (IS_ERR_OR_NULL(res))
continue;
ret = vmw_execbuf_res_val_add(sw_context, res,
@@ -621,10 +621,10 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @dirty: Whether to change dirty status.
- * @converter: User-space visisble type specific information.
+ * @converter: User-space visible type specific information.
* @id_loc: Pointer to the location in the command buffer currently being parsed
* from where the user-space resource id handle is located.
- * @p_res: Pointer to pointer to resource validalidation node. Populated on
+ * @p_res: Pointer to pointer to resource validation node. Populated on
* exit.
*/
static int
@@ -1266,6 +1266,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
return -EINVAL;
cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
+ if (IS_ERR_OR_NULL(cotable_res))
+ return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
return ret;
@@ -2484,6 +2486,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
return ret;
res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
+ if (IS_ERR_OR_NULL(res))
+ return res ? PTR_ERR(res) : -EINVAL;
ret = vmw_cotable_notify(res, cmd->defined_id);
if (unlikely(ret != 0))
return ret;
@@ -2569,8 +2573,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
so_type = vmw_so_cmd_to_type(header->id);
res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
- if (IS_ERR(res))
- return PTR_ERR(res);
+ if (IS_ERR_OR_NULL(res))
+ return res ? PTR_ERR(res) : -EINVAL;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cotable_notify(res, cmd->defined_id);
@@ -2689,6 +2693,8 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
return -EINVAL;
res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
+ if (IS_ERR_OR_NULL(res))
+ return res ? PTR_ERR(res) : -EINVAL;
ret = vmw_cotable_notify(res, cmd->body.shaderId);
if (ret)
return ret;
@@ -3010,6 +3016,8 @@ static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
}
res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
+ if (IS_ERR_OR_NULL(res))
+ return res ? PTR_ERR(res) : -EINVAL;
ret = vmw_cotable_notify(res, cmd->body.soid);
if (ret)
return ret;
@@ -3603,6 +3611,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_bind_streamoutput, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
&vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V4,
+ &vmw_cmd_invalid, false, false, true),
};
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index ceb4d3d3b965..a0b47c9b33f5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -64,8 +64,11 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
ttm_resource_init(bo, place, *res);
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
- if (id < 0)
+ if (id < 0) {
+ ttm_resource_fini(man, *res);
+ kfree(*res);
return id;
+ }
spin_lock(&gman->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 5fd0ccaa0b41..cd4925346ed4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -35,6 +35,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include <drm/drm_sysfs.h>
+#include <drm/drm_edid.h>
void vmw_du_cleanup(struct vmw_display_unit *du)
{
@@ -184,13 +185,12 @@ static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
*/
static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
{
- bool is_iomem;
if (vps->surf) {
if (vps->surf_mapped)
return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
return vps->surf->snooper.image;
} else if (vps->bo)
- return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
+ return vmw_bo_map_and_cache(vps->bo);
return NULL;
}
@@ -272,6 +272,7 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
u32 i;
u32 cursor_max_dim, mob_max_size;
+ struct vmw_fence_obj *fence = NULL;
int ret;
if (!dev_priv->has_mob ||
@@ -313,7 +314,15 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
if (ret != 0)
goto teardown;
- vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
+ ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (ret != 0) {
+ ttm_bo_unreserve(&vps->cursor.bo->tbo);
+ goto teardown;
+ }
+
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
+
ttm_bo_unreserve(&vps->cursor.bo->tbo);
return 0;
@@ -643,22 +652,12 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- bool is_iomem;
if (vps->surf_mapped) {
vmw_bo_unmap(vps->surf->res.guest_memory_bo);
vps->surf_mapped = false;
}
- if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
- const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
-
- if (likely(ret == 0)) {
- ttm_bo_kunmap(&vps->bo->map);
- ttm_bo_unreserve(&vps->bo->tbo);
- }
- }
-
vmw_du_cursor_plane_unmap_cm(vps);
vmw_du_put_cursor_mob(vcp, vps);
@@ -694,6 +693,10 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
int ret = 0;
if (vps->surf) {
+ if (vps->surf_mapped) {
+ vmw_bo_unmap(vps->surf->res.guest_memory_bo);
+ vps->surf_mapped = false;
+ }
vmw_surface_unreference(&vps->surf);
vps->surf = NULL;
}
@@ -2278,107 +2281,6 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
connector_status_connected : connector_status_disconnected);
}
-static struct drm_display_mode vmw_kms_connector_builtin[] = {
- /* 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1152x864@75Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x720@60Hz */
- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
- 1472, 1664, 0, 720, 723, 728, 748, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@60Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
- 1472, 1664, 0, 768, 771, 778, 798, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@60Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
- 1480, 1680, 0, 800, 803, 809, 831, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x960@60Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
- 1488, 1800, 0, 960, 961, 964, 1000, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@60Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1360x768@60Hz */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
- 1536, 1792, 0, 768, 771, 777, 795, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@60Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
- 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@60Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
- 1672, 1904, 0, 900, 903, 909, 934, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@60Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@60Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
- 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@60Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
- 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1853x1392@60Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
- 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@60Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
- 2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@60Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
- 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@60Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
- 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1440@60Hz */
- { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
- 2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2560x1600@60Hz */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
- 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2880x1800@60Hz */
- { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
- 2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 3840x2160@60Hz */
- { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
- 3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 3840x2400@60Hz */
- { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
- 3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* Terminate */
- { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
-};
-
/**
* vmw_guess_mode_timing - Provide fake timings for a
* 60Hz vrefresh mode.
@@ -2400,88 +2302,6 @@ void vmw_guess_mode_timing(struct drm_display_mode *mode)
}
-int vmw_du_connector_fill_modes(struct drm_connector *connector,
- uint32_t max_width, uint32_t max_height)
-{
- struct vmw_display_unit *du = vmw_connector_to_du(connector);
- struct drm_device *dev = connector->dev;
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_display_mode *mode = NULL;
- struct drm_display_mode *bmode;
- struct drm_display_mode prefmode = { DRM_MODE("preferred",
- DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
- };
- int i;
- u32 assumed_bpp = 4;
-
- if (dev_priv->assume_16bpp)
- assumed_bpp = 2;
-
- max_width = min(max_width, dev_priv->texture_max_width);
- max_height = min(max_height, dev_priv->texture_max_height);
-
- /*
- * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
- * HEIGHT registers.
- */
- if (dev_priv->active_display_unit == vmw_du_screen_target) {
- max_width = min(max_width, dev_priv->stdu_max_width);
- max_height = min(max_height, dev_priv->stdu_max_height);
- }
-
- /* Add preferred mode */
- mode = drm_mode_duplicate(dev, &prefmode);
- if (!mode)
- return 0;
- mode->hdisplay = du->pref_width;
- mode->vdisplay = du->pref_height;
- vmw_guess_mode_timing(mode);
- drm_mode_set_name(mode);
-
- if (vmw_kms_validate_mode_vram(dev_priv,
- mode->hdisplay * assumed_bpp,
- mode->vdisplay)) {
- drm_mode_probed_add(connector, mode);
- } else {
- drm_mode_destroy(dev, mode);
- mode = NULL;
- }
-
- if (du->pref_mode) {
- list_del_init(&du->pref_mode->head);
- drm_mode_destroy(dev, du->pref_mode);
- }
-
- /* mode might be null here, this is intended */
- du->pref_mode = mode;
-
- for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
- bmode = &vmw_kms_connector_builtin[i];
- if (bmode->hdisplay > max_width ||
- bmode->vdisplay > max_height)
- continue;
-
- if (!vmw_kms_validate_mode_vram(dev_priv,
- bmode->hdisplay * assumed_bpp,
- bmode->vdisplay))
- continue;
-
- mode = drm_mode_duplicate(dev, bmode);
- if (!mode)
- return 0;
-
- drm_mode_probed_add(connector, mode);
- }
-
- drm_connector_list_update(connector);
- /* Move the prefered mode first, help apps pick the right mode. */
- drm_mode_sort(&connector->modes);
-
- return 1;
-}
-
/**
* vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
* @dev: drm device for the ioctl
@@ -3022,3 +2842,91 @@ out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret;
}
+
+/**
+ * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
+ *
+ * @connector: the drm connector, part of a DU container
+ * @mode: drm mode to check
+ *
+ * Returns MODE_OK on success, or a drm_mode_status error code.
+ */
+enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ u32 max_width = dev_priv->texture_max_width;
+ u32 max_height = dev_priv->texture_max_height;
+ u32 assumed_cpp = 4;
+
+ if (dev_priv->assume_16bpp)
+ assumed_cpp = 2;
+
+ if (dev_priv->active_display_unit == vmw_du_screen_target) {
+ max_width = min(dev_priv->stdu_max_width, max_width);
+ max_height = min(dev_priv->stdu_max_height, max_height);
+ }
+
+ if (max_width < mode->hdisplay)
+ return MODE_BAD_HVALUE;
+
+ if (max_height < mode->vdisplay)
+ return MODE_BAD_VVALUE;
+
+ if (!vmw_kms_validate_mode_vram(dev_priv,
+ mode->hdisplay * assumed_cpp,
+ mode->vdisplay))
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
+/**
+ * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
+ *
+ * @connector: the drm connector, part of a DU container
+ *
+ * Returns the number of added modes.
+ */
+int vmw_connector_get_modes(struct drm_connector *connector)
+{
+ struct vmw_display_unit *du = vmw_connector_to_du(connector);
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode prefmode = { DRM_MODE("preferred",
+ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ };
+ u32 max_width;
+ u32 max_height;
+ u32 num_modes;
+
+ /* Add preferred mode */
+ mode = drm_mode_duplicate(dev, &prefmode);
+ if (!mode)
+ return 0;
+
+ mode->hdisplay = du->pref_width;
+ mode->vdisplay = du->pref_height;
+ vmw_guess_mode_timing(mode);
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+
+ /* Probe connector for all modes not exceeding our geom limits */
+ max_width = dev_priv->texture_max_width;
+ max_height = dev_priv->texture_max_height;
+
+ if (dev_priv->active_display_unit == vmw_du_screen_target) {
+ max_width = min(dev_priv->stdu_max_width, max_width);
+ max_height = min(dev_priv->stdu_max_height, max_height);
+ }
+
+ num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
+
+ return num_modes;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index db81e635dc06..a94947b588e8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -378,7 +378,6 @@ struct vmw_display_unit {
unsigned pref_width;
unsigned pref_height;
bool pref_active;
- struct drm_display_mode *pref_mode;
/*
* Gui positioning
@@ -428,8 +427,6 @@ void vmw_du_connector_save(struct drm_connector *connector);
void vmw_du_connector_restore(struct drm_connector *connector);
enum drm_connector_status
vmw_du_connector_detect(struct drm_connector *connector, bool force);
-int vmw_du_connector_fill_modes(struct drm_connector *connector,
- uint32_t max_width, uint32_t max_height);
int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
const struct drm_clip_rect *clips,
@@ -438,6 +435,9 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int num_clips,
int increment,
struct vmw_kms_dirty *dirty);
+enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+int vmw_connector_get_modes(struct drm_connector *connector);
void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a82fa9700370..c4db4aecca6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -304,7 +304,7 @@ static void vmw_ldu_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
- .fill_modes = vmw_du_connector_fill_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vmw_ldu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
@@ -313,6 +313,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
+ .get_modes = vmw_connector_get_modes,
+ .mode_valid = vmw_connector_mode_valid
};
static int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
@@ -449,7 +451,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
ldu->base.pref_active = (unit == 0);
ldu->base.pref_width = dev_priv->initial_width;
ldu->base.pref_height = dev_priv->initial_height;
- ldu->base.pref_mode = NULL;
/*
* Remove this after enabling atomic because property values can
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 556a403b7eb5..30c3ad27b662 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -347,7 +347,7 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
- .fill_modes = vmw_du_connector_fill_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vmw_sou_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
@@ -357,6 +357,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
+ .get_modes = vmw_connector_get_modes,
+ .mode_valid = vmw_connector_mode_valid
};
@@ -826,7 +828,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
sou->base.pref_active = (unit == 0);
sou->base.pref_width = dev_priv->initial_width;
sou->base.pref_height = dev_priv->initial_height;
- sou->base.pref_mode = NULL;
/*
* Remove this after enabling atomic because property values can
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index ba0c0e12cfe9..3c8414a13dba 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -53,7 +53,6 @@ enum stdu_content_type {
* struct vmw_stdu_dirty - closure structure for the update functions
*
* @base: The base type we derive from. Used by vmw_kms_helper_dirty().
- * @transfer: Transfer direction for DMA command.
* @left: Left side of bounding box.
* @right: Right side of bounding box.
* @top: Top side of bounding box.
@@ -100,7 +99,7 @@ struct vmw_stdu_update_gb_image {
};
/**
- * struct vmw_screen_target_display_unit
+ * struct vmw_screen_target_display_unit - conglomerated STDU structure
*
* @base: VMW specific DU structure
* @display_srf: surface to be displayed. The dimension of this will always
@@ -208,6 +207,8 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
* @res: Buffer to bind to the screen target. Set to NULL to blank screen.
*
* Binding a surface to a Screen Target the same as flipping
+ *
+ * Returns: %0 on success or -errno code on failure
*/
static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
struct vmw_screen_target_display_unit *stdu,
@@ -314,6 +315,9 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv,
*
* @dev_priv: VMW DRM device
* @stdu: display unit to destroy
+ *
+ * Returns: %0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
*/
static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
struct vmw_screen_target_display_unit *stdu)
@@ -536,7 +540,8 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
* If DMA-ing till the screen target system, the function will also notify
* the screen target system that a bounding box of the cliprects has been
* updated.
- * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ *
+ * Returns: %0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
int vmw_kms_stdu_readback(struct vmw_private *dev_priv,
@@ -703,7 +708,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
* case the device has already synchronized.
* @crtc: If crtc is passed, perform surface dirty on that crtc only.
*
- * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * Returns: %0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
@@ -830,7 +835,7 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
- .fill_modes = vmw_du_connector_fill_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vmw_stdu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
@@ -840,6 +845,8 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
+ .get_modes = vmw_connector_get_modes,
+ .mode_valid = vmw_connector_mode_valid
};
@@ -887,7 +894,7 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
* backed by a buffer object. The display surface is pinned here, and it'll
* be unpinned in .cleanup_fb()
*
- * Returns 0 on success
+ * Returns: %0 on success
*/
static int
vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
@@ -1465,6 +1472,8 @@ static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = {
* This function is called once per CRTC, and allocates one Screen Target
* display unit to represent that CRTC. Since the SVGA device does not separate
* out encoder and connector, they are represented as part of the STDU as well.
+ *
+ * Returns: %0 on success or -errno code on failure
*/
static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 680441bb1786..e7a744dfcecf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -44,7 +44,6 @@
* struct vmw_user_surface - User-space visible surface resource
*
* @prime: The TTM prime object.
- * @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata.
* @master: Master of the creating client. Used for security check.
*/
@@ -833,8 +832,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->snooper.image = NULL;
}
- user_srf->prime.base.shareable = false;
- user_srf->prime.base.tfile = NULL;
if (drm_is_primary_client(file_priv))
user_srf->master = drm_file_get_master(file_priv);
@@ -848,10 +845,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
/*
- * A gb-aware client referencing a shared surface will
- * expect a backup buffer to be present.
+ * A gb-aware client referencing a surface will expect a backup
+ * buffer to be present.
*/
- if (dev_priv->has_mob && req->shareable) {
+ if (dev_priv->has_mob) {
struct vmw_bo_params params = {
.domain = VMW_BO_DOMAIN_SYS,
.busy_domain = VMW_BO_DOMAIN_SYS,
@@ -870,8 +867,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
tmp = vmw_resource_reference(&srf->res);
- ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
- req->shareable, VMW_RES_SURFACE,
+ ret = ttm_prime_object_init(tfile, res->guest_memory_size,
+ &user_srf->prime,
+ VMW_RES_SURFACE,
&vmw_user_surface_base_release);
if (unlikely(ret != 0)) {
@@ -1550,8 +1548,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
- req->base.drm_surface_flags &
- drm_vmw_surface_flag_shareable,
VMW_RES_SURFACE,
&vmw_user_surface_base_release);
@@ -2053,8 +2049,6 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
}
*srf_out = &user_srf->srf;
- user_srf->prime.base.shareable = false;
- user_srf->prime.base.tfile = NULL;
srf = &user_srf->srf;
srf->metadata = *req;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index af8562c95cc3..4d23d0a70bcb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -43,46 +43,14 @@ static const struct ttm_place sys_placement_flags = {
.flags = 0
};
-static const struct ttm_place gmr_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
-};
-
struct ttm_placement vmw_vram_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &vram_placement_flags
-};
-
-static const struct ttm_place vram_gmr_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_VRAM,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
- }
-};
-
-struct ttm_placement vmw_vram_gmr_placement = {
- .num_placement = 2,
- .placement = vram_gmr_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &gmr_placement_flags
};
struct ttm_placement vmw_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags
};
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
diff --git a/drivers/gpu/drm/xe/.kunitconfig b/drivers/gpu/drm/xe/.kunitconfig
index 9590eac91af3..ad4b9b4a9f55 100644
--- a/drivers/gpu/drm/xe/.kunitconfig
+++ b/drivers/gpu/drm/xe/.kunitconfig
@@ -11,3 +11,8 @@ CONFIG_DRM_XE_DISPLAY=n
CONFIG_EXPERT=y
CONFIG_FB=y
CONFIG_DRM_XE_KUNIT_TEST=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_LOCKDEP=y
+CONFIG_DEBUG_LOCKDEP=y
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index e36ae1f0d885..1a556d087e63 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_XE
tristate "Intel Xe Graphics"
- depends on DRM && PCI && MMU && (m || (y && KUNIT=y)) && 64BIT
+ depends on DRM && PCI && MMU && (m || (y && KUNIT=y))
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -10,6 +10,7 @@ config DRM_XE
select DRM_BUDDY
select DRM_EXEC
select DRM_KMS_HELPER
+ select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n
select DRM_PANEL
select DRM_SUBALLOC_HELPER
select DRM_DISPLAY_DP_HELPER
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index 549065f57a78..df02e5d17d26 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -76,7 +76,6 @@ config DRM_XE_KUNIT_TEST
depends on DRM_XE && KUNIT && DEBUG_FS
default KUNIT_ALL_TESTS
select DRM_EXPORT_FOR_TESTS if m
- select DRM_KUNIT_TEST_HELPERS
help
Choose this option to allow the driver to perform selftests under
the kunit framework
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index efcf0ab7a1a6..5a428ca00f10 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -42,7 +42,8 @@ generated_oob := $(obj)/generated/xe_wa_oob.c $(obj)/generated/xe_wa_oob.h
quiet_cmd_wa_oob = GEN $(notdir $(generated_oob))
cmd_wa_oob = mkdir -p $(@D); $^ $(generated_oob)
-$(generated_oob) &: $(obj)/xe_gen_wa_oob $(srctree)/$(src)/xe_wa_oob.rules
+$(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
+ $(srctree)/$(src)/xe_wa_oob.rules
$(call cmd,wa_oob)
uses_generated_oob := \
@@ -76,6 +77,7 @@ xe-y += xe_bb.o \
xe_ggtt.o \
xe_gpu_scheduler.o \
xe_gsc.o \
+ xe_gsc_proxy.o \
xe_gsc_submit.o \
xe_gt.o \
xe_gt_ccs_mode.o \
@@ -92,6 +94,7 @@ xe-y += xe_bb.o \
xe_guc.o \
xe_guc_ads.o \
xe_guc_ct.o \
+ xe_guc_db_mgr.o \
xe_guc_debugfs.o \
xe_guc_hwconfig.o \
xe_guc_log.o \
@@ -137,6 +140,7 @@ xe-y += xe_bb.o \
xe_uc_debugfs.o \
xe_uc_fw.o \
xe_vm.o \
+ xe_vram_freq.o \
xe_wait_user_fence.o \
xe_wa.o \
xe_wopcm.o
@@ -145,18 +149,25 @@ xe-y += xe_bb.o \
xe-$(CONFIG_HWMON) += xe_hwmon.o
# graphics virtualization (SR-IOV) support
-xe-y += xe_sriov.o
+xe-y += \
+ xe_guc_relay.o \
+ xe_memirq.o \
+ xe_sriov.o
xe-$(CONFIG_PCI_IOV) += \
xe_lmtt.o \
xe_lmtt_2l.o \
xe_lmtt_ml.o
+# include helpers for tests even when XE is built-in
+ifdef CONFIG_DRM_XE_KUNIT_TEST
+xe-y += tests/xe_kunit_helpers.o
+endif
+
# i915 Display compat #defines and #includes
subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \
-I$(srctree)/$(src)/display/ext \
-I$(srctree)/$(src)/compat-i915-headers \
- -I$(srctree)/drivers/gpu/drm/xe/display/ \
-I$(srctree)/drivers/gpu/drm/i915/display/ \
-Ddrm_i915_gem_object=xe_bo \
-Ddrm_i915_private=xe_device
@@ -176,17 +187,17 @@ $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE
# Display code specific to xe
xe-$(CONFIG_DRM_XE_DISPLAY) += \
- xe_display.o \
- display/xe_fb_pin.o \
- display/xe_hdcp_gsc.o \
- display/xe_plane_initial.o \
- display/xe_display_rps.o \
+ display/ext/i915_irq.o \
+ display/ext/i915_utils.o \
+ display/intel_fb_bo.o \
+ display/intel_fbdev_fb.o \
+ display/xe_display.o \
display/xe_display_misc.o \
+ display/xe_display_rps.o \
display/xe_dsb_buffer.o \
- display/intel_fbdev_fb.o \
- display/intel_fb_bo.o \
- display/ext/i915_irq.o \
- display/ext/i915_utils.o
+ display/xe_fb_pin.o \
+ display/xe_hdcp_gsc.o \
+ display/xe_plane_initial.o
# SOC code shared with i915
xe-$(CONFIG_DRM_XE_DISPLAY) += \
@@ -213,8 +224,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_ddi.o \
i915-display/intel_ddi_buf_trans.o \
i915-display/intel_display.o \
- i915-display/intel_display_debugfs.o \
- i915-display/intel_display_debugfs_params.o \
i915-display/intel_display_device.o \
i915-display/intel_display_driver.o \
i915-display/intel_display_irq.o \
@@ -258,7 +267,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_modeset_setup.o \
i915-display/intel_modeset_verify.o \
i915-display/intel_panel.o \
- i915-display/intel_pipe_crc.o \
i915-display/intel_pmdemand.o \
i915-display/intel_pps.o \
i915-display/intel_psr.o \
@@ -285,6 +293,13 @@ ifeq ($(CONFIG_DRM_FBDEV_EMULATION),y)
xe-$(CONFIG_DRM_XE_DISPLAY) += i915-display/intel_fbdev.o
endif
+ifeq ($(CONFIG_DEBUG_FS),y)
+ xe-$(CONFIG_DRM_XE_DISPLAY) += \
+ i915-display/intel_display_debugfs.o \
+ i915-display/intel_display_debugfs_params.o \
+ i915-display/intel_pipe_crc.o
+endif
+
obj-$(CONFIG_DRM_XE) += xe.o
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/xe/abi/gsc_proxy_commands_abi.h b/drivers/gpu/drm/xe/abi/gsc_proxy_commands_abi.h
new file mode 100644
index 000000000000..80bbf06a3eb8
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/gsc_proxy_commands_abi.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _ABI_GSC_PROXY_COMMANDS_ABI_H
+#define _ABI_GSC_PROXY_COMMANDS_ABI_H
+
+#include <linux/types.h>
+
+/* Heci client ID for proxy commands */
+#define HECI_MEADDRESS_PROXY 10
+
+/* FW-defined proxy header */
+struct xe_gsc_proxy_header {
+ /*
+ * hdr:
+ * Bits 0-7: type of the proxy message (see enum xe_gsc_proxy_type)
+ * Bits 8-15: rsvd
+ * Bits 16-31: length in bytes of the payload following the proxy header
+ */
+ u32 hdr;
+#define GSC_PROXY_TYPE GENMASK(7, 0)
+#define GSC_PROXY_PAYLOAD_LENGTH GENMASK(31, 16)
+
+ u32 source; /* Source of the Proxy message */
+ u32 destination; /* Destination of the Proxy message */
+#define GSC_PROXY_ADDRESSING_KMD 0x10000
+#define GSC_PROXY_ADDRESSING_GSC 0x20000
+#define GSC_PROXY_ADDRESSING_CSME 0x30000
+
+ u32 status; /* Command status */
+} __packed;
+
+/* FW-defined proxy types */
+enum xe_gsc_proxy_type {
+ GSC_PROXY_MSG_TYPE_PROXY_INVALID = 0,
+ GSC_PROXY_MSG_TYPE_PROXY_QUERY = 1,
+ GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD = 2,
+ GSC_PROXY_MSG_TYPE_PROXY_END = 3,
+ GSC_PROXY_MSG_TYPE_PROXY_NOTIFICATION = 4,
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
new file mode 100644
index 000000000000..5496a5890847
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _GUC_ACTIONS_PF_ABI_H
+#define _GUC_ACTIONS_PF_ABI_H
+
+#include "guc_communication_ctb_abi.h"
+
+/**
+ * DOC: GUC2PF_RELAY_FROM_VF
+ *
+ * This message is used by the GuC firmware to forward a VF2PF `Relay Message`_
+ * received from the Virtual Function (VF) driver to this Physical Function (PF)
+ * driver.
+ *
+ * This message is always sent as `CTB HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF` = 0x5100 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **VFID** - source VF identifier |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **RELAY_ID** - VF/PF message ID |
+ * +---+-------+-----------------+--------------------------------------------+
+ * | 3 | 31:0 | **RELAY_DATA1** | |
+ * +---+-------+-----------------+ |
+ * |...| | | [Embedded `Relay Message`_] |
+ * +---+-------+-----------------+ |
+ * | n | 31:0 | **RELAY_DATAx** | |
+ * +---+-------+-----------------+--------------------------------------------+
+ */
+#define XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF 0x5100
+
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 2u)
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN \
+ (GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MAX_LEN)
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_3_RELAY_DATA1 GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_n_RELAY_DATAx GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN
+
+/**
+ * DOC: PF2GUC_RELAY_TO_VF
+ *
+ * This H2G message is used by the Physical Function (PF) driver to send embedded
+ * VF2PF `Relay Message`_ to the VF.
+ *
+ * This action message must be sent over CTB as `CTB HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = `GUC_HXG_TYPE_FAST_REQUEST`_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`XE_GUC_ACTION_PF2GUC_RELAY_TO_VF` = 0x5101 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **VFID** - target VF identifier |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **RELAY_ID** - VF/PF message ID |
+ * +---+-------+-----------------+--------------------------------------------+
+ * | 3 | 31:0 | **RELAY_DATA1** | |
+ * +---+-------+-----------------+ |
+ * |...| | | [Embedded `Relay Message`_] |
+ * +---+-------+-----------------+ |
+ * | n | 31:0 | **RELAY_DATAx** | |
+ * +---+-------+-----------------+--------------------------------------------+
+ */
+#define XE_GUC_ACTION_PF2GUC_RELAY_TO_VF 0x5101
+
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 2u)
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_MAX_LEN \
+ (PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN + GUC_RELAY_MSG_MAX_LEN)
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_3_RELAY_DATA1 GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_n_RELAY_DATAx GUC_HXG_REQUEST_MSG_n_DATAn
+#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN
+
+/**
+ * DOC: GUC2VF_RELAY_FROM_PF
+ *
+ * This message is used by the GuC firmware to deliver `Relay Message`_ from the
+ * Physical Function (PF) driver to this Virtual Function (VF) driver.
+ * See `GuC Relay Communication`_ for details.
+ *
+ * This message is always sent over CTB.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF` = 0x5102 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **RELAY_ID** - VF/PF message ID |
+ * +---+-------+-----------------+--------------------------------------------+
+ * | 2 | 31:0 | **RELAY_DATA1** | |
+ * +---+-------+-----------------+ |
+ * |...| | | [Embedded `Relay Message`_] |
+ * +---+-------+-----------------+ |
+ * | n | 31:0 | **RELAY_DATAx** | |
+ * +---+-------+-----------------+--------------------------------------------+
+ */
+#define XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF 0x5102
+
+#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 1u)
+#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN \
+ (GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MAX_LEN)
+#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0
+#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_n_RELAY_DATAx GUC_HXG_EVENT_MSG_n_DATAn
+#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN
+
+/**
+ * DOC: VF2GUC_RELAY_TO_PF
+ *
+ * This message is used by the Virtual Function (VF) drivers to communicate with
+ * the Physical Function (PF) driver and send `Relay Message`_ to the PF driver.
+ * See `GuC Relay Communication`_ for details.
+ *
+ * This message must be sent over CTB.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ or GUC_HXG_TYPE_FAST_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`XE_GUC_ACTION_VF2GUC_RELAY_TO_PF` = 0x5103 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **RELAY_ID** - VF/PF message ID |
+ * +---+-------+-----------------+--------------------------------------------+
+ * | 2 | 31:0 | **RELAY_DATA1** | |
+ * +---+-------+-----------------+ |
+ * |...| | | [Embedded `Relay Message`_] |
+ * +---+-------+-----------------+ |
+ * | n | 31:0 | **RELAY_DATAx** | |
+ * +---+-------+-----------------+--------------------------------------------+
+ */
+#define XE_GUC_ACTION_VF2GUC_RELAY_TO_PF 0x5103
+
+#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u)
+#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_MAX_LEN \
+ (VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN + GUC_RELAY_MSG_MAX_LEN)
+#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_1_RELAY_ID GUC_HXG_REQUEST_MSG_n_DATAn
+#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_n_RELAY_DATAx GUC_HXG_REQUEST_MSG_n_DATAn
+#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
index 0b1146d0c997..8f86a16dc577 100644
--- a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
@@ -81,12 +81,13 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
#define GUC_CTB_HDR_LEN 1u
#define GUC_CTB_MSG_MIN_LEN GUC_CTB_HDR_LEN
-#define GUC_CTB_MSG_MAX_LEN 256u
+#define GUC_CTB_MSG_MAX_LEN (GUC_CTB_MSG_MIN_LEN + GUC_CTB_MAX_DWORDS)
#define GUC_CTB_MSG_0_FENCE (0xffffu << 16)
#define GUC_CTB_MSG_0_FORMAT (0xfu << 12)
#define GUC_CTB_FORMAT_HXG 0u
#define GUC_CTB_MSG_0_RESERVED (0xfu << 8)
#define GUC_CTB_MSG_0_NUM_DWORDS (0xffu << 0)
+#define GUC_CTB_MAX_DWORDS 255
/**
* DOC: CTB HXG Message
diff --git a/drivers/gpu/drm/xe/abi/guc_messages_abi.h b/drivers/gpu/drm/xe/abi/guc_messages_abi.h
index 29e414c82d56..534a39db7772 100644
--- a/drivers/gpu/drm/xe/abi/guc_messages_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_messages_abi.h
@@ -24,6 +24,7 @@
* | | 30:28 | **TYPE** - message type |
* | | | - _`GUC_HXG_TYPE_REQUEST` = 0 |
* | | | - _`GUC_HXG_TYPE_EVENT` = 1 |
+ * | | | - _`GUC_HXG_TYPE_FAST_REQUEST` = 2 |
* | | | - _`GUC_HXG_TYPE_NO_RESPONSE_BUSY` = 3 |
* | | | - _`GUC_HXG_TYPE_NO_RESPONSE_RETRY` = 5 |
* | | | - _`GUC_HXG_TYPE_RESPONSE_FAILURE` = 6 |
@@ -46,6 +47,7 @@
#define GUC_HXG_MSG_0_TYPE (0x7u << 28)
#define GUC_HXG_TYPE_REQUEST 0u
#define GUC_HXG_TYPE_EVENT 1u
+#define GUC_HXG_TYPE_FAST_REQUEST 2u
#define GUC_HXG_TYPE_NO_RESPONSE_BUSY 3u
#define GUC_HXG_TYPE_NO_RESPONSE_RETRY 5u
#define GUC_HXG_TYPE_RESPONSE_FAILURE 6u
diff --git a/drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h
new file mode 100644
index 000000000000..747e428de421
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_RELAY_ACTIONS_ABI_H_
+#define _ABI_GUC_RELAY_ACTIONS_ABI_H_
+
+/**
+ * DOC: GuC Relay Debug Actions
+ *
+ * This range of action codes is reserved for debugging purposes only and should
+ * be used only on debug builds. These actions may not be supported by the
+ * production drivers. Their definitions could be changed in the future.
+ *
+ * _`GUC_RELAY_ACTION_DEBUG_ONLY_START` = 0xDEB0
+ * _`GUC_RELAY_ACTION_DEBUG_ONLY_END` = 0xDEFF
+ */
+
+#define GUC_RELAY_ACTION_DEBUG_ONLY_START 0xDEB0
+#define GUC_RELAY_ACTION_DEBUG_ONLY_END 0xDEFF
+
+/**
+ * DOC: VFXPF_TESTLOOP
+ *
+ * This `Relay Message`_ is used to selftest the `GuC Relay Communication`_.
+ *
+ * The following opcodes are defined:
+ * VFXPF_TESTLOOP_OPCODE_NOP_ will return no data.
+ * VFXPF_TESTLOOP_OPCODE_BUSY_ will reply with BUSY response first.
+ * VFXPF_TESTLOOP_OPCODE_RETRY_ will reply with RETRY response instead.
+ * VFXPF_TESTLOOP_OPCODE_ECHO_ will return same data as received.
+ * VFXPF_TESTLOOP_OPCODE_FAIL_ will always fail with error.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ or GUC_HXG_TYPE_FAST_REQUEST_ |
+ * | | | or GUC_HXG_TYPE_EVENT_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | **OPCODE** |
+ * | | | - _`VFXPF_TESTLOOP_OPCODE_NOP` = 0x0 |
+ * | | | - _`VFXPF_TESTLOOP_OPCODE_BUSY` = 0xB |
+ * | | | - _`VFXPF_TESTLOOP_OPCODE_RETRY` = 0xD |
+ * | | | - _`VFXPF_TESTLOOP_OPCODE_ECHO` = 0xE |
+ * | | | - _`VFXPF_TESTLOOP_OPCODE_FAIL` = 0xF |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`IOV_ACTION_SELFTEST_RELAY` |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **DATA1** = optional, depends on **OPCODE**: |
+ * | | | for VFXPF_TESTLOOP_OPCODE_BUSY_: time in ms for reply |
+ * | | | for VFXPF_TESTLOOP_OPCODE_FAIL_: expected error |
+ * | | | for VFXPF_TESTLOOP_OPCODE_ECHO_: payload |
+ * +---+-------+--------------------------------------------------------------+
+ * |...| 31:0 | **DATAn** = only for **OPCODE** VFXPF_TESTLOOP_OPCODE_ECHO_ |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ * |...| 31:0 | DATAn = only for **OPCODE** VFXPF_TESTLOOP_OPCODE_ECHO_ |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_RELAY_ACTION_VFXPF_TESTLOOP (GUC_RELAY_ACTION_DEBUG_ONLY_START + 1)
+#define VFXPF_TESTLOOP_OPCODE_NOP 0x0
+#define VFXPF_TESTLOOP_OPCODE_BUSY 0xB
+#define VFXPF_TESTLOOP_OPCODE_RETRY 0xD
+#define VFXPF_TESTLOOP_OPCODE_ECHO 0xE
+#define VFXPF_TESTLOOP_OPCODE_FAIL 0xF
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_relay_communication_abi.h b/drivers/gpu/drm/xe/abi/guc_relay_communication_abi.h
new file mode 100644
index 000000000000..f92625f04796
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_relay_communication_abi.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_RELAY_COMMUNICATION_ABI_H
+#define _ABI_GUC_RELAY_COMMUNICATION_ABI_H
+
+#include <linux/build_bug.h>
+
+#include "guc_actions_sriov_abi.h"
+#include "guc_communication_ctb_abi.h"
+#include "guc_messages_abi.h"
+
+/**
+ * DOC: GuC Relay Communication
+ *
+ * The communication between Virtual Function (VF) drivers and Physical Function
+ * (PF) drivers is based on the GuC firmware acting as a proxy (relay) agent.
+ *
+ * To communicate with the PF driver, VF's drivers use `VF2GUC_RELAY_TO_PF`_
+ * action that takes the `Relay Message`_ as opaque payload and requires the
+ * relay message identifier (RID) as additional parameter.
+ *
+ * This identifier is used by the drivers to match related messages.
+ *
+ * The GuC forwards this `Relay Message`_ and its identifier to the PF driver
+ * in `GUC2PF_RELAY_FROM_VF`_ action. This event message additionally contains
+ * the identifier of the origin VF (VFID).
+ *
+ * Likewise, to communicate with the VF drivers, PF driver use
+ * `VF2GUC_RELAY_TO_PF`_ action that in addition to the `Relay Message`_
+ * and the relay message identifier (RID) also takes the target VF identifier.
+ *
+ * The GuC uses this target VFID from the message to select where to send the
+ * `GUC2VF_RELAY_FROM_PF`_ with the embedded `Relay Message`_ with response::
+ *
+ * VF GuC PF
+ * | | |
+ * [ ] VF2GUC_RELAY_TO_PF | |
+ * [ ]---------------------------> [ ] |
+ * [ ] { rid, msg } [ ] |
+ * [ ] [ ] GUC2PF_RELAY_FROM_VF |
+ * [ ] [ ]---------------------------> [ ]
+ * [ ] | { VFID, rid, msg } [ ]
+ * [ ] | [ ]
+ * [ ] | PF2GUC_RELAY_TO_VF [ ]
+ * [ ] [ ] <---------------------------[ ]
+ * [ ] [ ] { VFID, rid, reply } |
+ * [ ] GUC2VF_RELAY_FROM_PF [ ] |
+ * [ ] <---------------------------[ ] |
+ * | { rid, reply } | |
+ * | | |
+ *
+ * It is also possible that PF driver will initiate communication with the
+ * selected VF driver. The same GuC action messages will be used::
+ *
+ * VF GuC PF
+ * | | |
+ * | | PF2GUC_RELAY_TO_VF [ ]
+ * | [ ] <---------------------------[ ]
+ * | [ ] { VFID, rid, msg } [ ]
+ * | GUC2VF_RELAY_FROM_PF [ ] [ ]
+ * [ ] <---------------------------[ ] [ ]
+ * [ ] { rid, msg } | [ ]
+ * [ ] | [ ]
+ * [ ] VF2GUC_RELAY_TO_PF | [ ]
+ * [ ]---------------------------> [ ] [ ]
+ * | { rid, reply } [ ] [ ]
+ * | [ ] GUC2PF_RELAY_FROM_VF [ ]
+ * | [ ]---------------------------> [ ]
+ * | | { VFID, rid, reply } |
+ * | | |
+ */
+
+/**
+ * DOC: Relay Message
+ *
+ * The `Relay Message`_ is used by Physical Function (PF) driver and Virtual
+ * Function (VF) drivers to communicate using `GuC Relay Communication`_.
+ *
+ * Format of the `Relay Message`_ follows format of the generic `HXG Message`_.
+ *
+ * +--------------------------------------------------------------------------+
+ * | `Relay Message`_ |
+ * +==========================================================================+
+ * | `HXG Message`_ |
+ * +--------------------------------------------------------------------------+
+ *
+ * Maximum length of the `Relay Message`_ is limited by the maximum length of
+ * the `CTB HXG Message`_ and format of the `GUC2PF_RELAY_FROM_VF`_ message.
+ */
+
+#define GUC_RELAY_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_RELAY_MSG_MAX_LEN \
+ (GUC_CTB_MAX_DWORDS - GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN)
+
+static_assert(PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN >
+ VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN);
+
+/**
+ * DOC: Relay Error Codes
+ *
+ * The `GuC Relay Communication`_ can be used to pass `Relay Message`_ between
+ * drivers that run on different Operating Systems. To help in troubleshooting,
+ * `GuC Relay Communication`_ uses error codes that mostly match errno values.
+ */
+
+#define GUC_RELAY_ERROR_UNDISCLOSED 0
+#define GUC_RELAY_ERROR_OPERATION_NOT_PERMITTED 1 /* EPERM */
+#define GUC_RELAY_ERROR_PERMISSION_DENIED 13 /* EACCES */
+#define GUC_RELAY_ERROR_INVALID_ARGUMENT 22 /* EINVAL */
+#define GUC_RELAY_ERROR_INVALID_REQUEST_CODE 56 /* EBADRQC */
+#define GUC_RELAY_ERROR_NO_DATA_AVAILABLE 61 /* ENODATA */
+#define GUC_RELAY_ERROR_PROTOCOL_ERROR 71 /* EPROTO */
+#define GUC_RELAY_ERROR_MESSAGE_SIZE 90 /* EMSGSIZE */
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index 5d2a77b52db4..420eba0e4be0 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -162,18 +162,18 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#include "intel_wakeref.h"
-static inline bool intel_runtime_pm_get(struct xe_runtime_pm *pm)
+static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
if (xe_pm_runtime_get(xe) < 0) {
xe_pm_runtime_put(xe);
- return false;
+ return 0;
}
- return true;
+ return 1;
}
-static inline bool intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
+static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
@@ -187,7 +187,7 @@ static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm)
xe_pm_runtime_put(xe);
}
-static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, bool wakeref)
+static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_t wakeref)
{
if (wakeref)
intel_runtime_pm_put_unchecked(pm);
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
index 888e7a87a925..bd233007c1b7 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
@@ -19,6 +19,9 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
int err;
u32 flags = XE_BO_CREATE_PINNED_BIT | XE_BO_CREATE_STOLEN_BIT;
+ if (align)
+ size = ALIGN(size, align);
+
bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
NULL, size, start, end,
ttm_bo_type_kernel, flags);
diff --git a/drivers/gpu/drm/xe/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index e4db069f0db3..e4db069f0db3 100644
--- a/drivers/gpu/drm/xe/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
diff --git a/drivers/gpu/drm/xe/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 710e56180b52..710e56180b52 100644
--- a/drivers/gpu/drm/xe/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index ccf83c12b545..866d1dd6eeb4 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "intel_atomic_plane.h"
+#include "intel_crtc.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_fb.h"
@@ -18,19 +19,20 @@
#include "intel_plane_initial.h"
static bool
-intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
- const struct intel_initial_plane_config *plane_config,
+intel_reuse_initial_plane_obj(struct intel_crtc *this,
+ const struct intel_initial_plane_config plane_configs[],
struct drm_framebuffer **fb)
{
+ struct drm_i915_private *i915 = to_i915(this->base.dev);
struct intel_crtc *crtc;
for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
- struct intel_plane_state *plane_state =
+ const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
if (!crtc_state->uapi.active)
continue;
@@ -38,7 +40,7 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
if (!plane_state->ggtt_vma)
continue;
- if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
+ if (plane_configs[this->pipe].base == plane_configs[crtc->pipe].base) {
*fb = plane_state->hw.fb;
return true;
}
@@ -178,10 +180,10 @@ err_bo:
static void
intel_find_initial_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
+ struct intel_initial_plane_config plane_configs[])
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_initial_plane_config *plane_config =
+ &plane_configs[crtc->pipe];
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
@@ -201,7 +203,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
if (intel_alloc_initial_plane_obj(crtc, plane_config))
fb = &plane_config->fb->base;
- else if (!intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb))
+ else if (!intel_reuse_initial_plane_obj(crtc, plane_configs, &fb))
goto nofb;
plane_state->uapi.rotation = plane_config->rotation;
@@ -267,25 +269,36 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
}
}
-void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
+void intel_initial_plane_config(struct drm_i915_private *i915)
{
- struct xe_device *xe = to_xe_device(crtc->base.dev);
- struct intel_initial_plane_config plane_config = {};
+ struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
- /*
- * Note that reserving the BIOS fb up front prevents us
- * from stuffing other stolen allocations like the ring
- * on top. This prevents some ugliness at boot time, and
- * can even allow for smooth boot transitions if the BIOS
- * fb is large enough for the active pipe configuration.
- */
- xe->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_initial_plane_config *plane_config =
+ &plane_configs[crtc->pipe];
- /*
- * If the fb is shared between multiple heads, we'll
- * just get the first one.
- */
- intel_find_initial_plane_obj(crtc, &plane_config);
+ if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
+ continue;
- plane_config_fini(&plane_config);
+ /*
+ * Note that reserving the BIOS fb up front prevents us
+ * from stuffing other stolen allocations like the ring
+ * on top. This prevents some ugliness at boot time, and
+ * can even allow for smooth boot transitions if the BIOS
+ * fb is large enough for the active pipe configuration.
+ */
+ i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
+
+ /*
+ * If the fb is shared between multiple heads, we'll
+ * just get the first one.
+ */
+ intel_find_initial_plane_obj(crtc, plane_configs);
+
+ if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ plane_config_fini(plane_config);
+ }
}
diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
index 1cfa96167fde..c74ceb550dce 100644
--- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
@@ -56,6 +56,9 @@
#define MI_FLUSH_IMM_QW REG_FIELD_PREP(MI_FLUSH_DW_LEN_DW, 5 - 2)
#define MI_FLUSH_DW_USE_GTT REG_BIT(2)
+#define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4))
+#define MI_LRM_USE_GGTT REG_BIT(22)
+
#define MI_BATCH_BUFFER_START __MI_INSTR(0x31)
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 5592774fc690..0b1266c88a6a 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -75,12 +75,17 @@
#define FF_THREAD_MODE(base) XE_REG((base) + 0xa0)
#define FF_TESSELATION_DOP_GATE_DISABLE BIT(19)
+#define RING_INT_SRC_RPT_PTR(base) XE_REG((base) + 0xa4)
#define RING_IMR(base) XE_REG((base) + 0xa8)
+#define RING_INT_STATUS_RPT_PTR(base) XE_REG((base) + 0xac)
#define RING_EIR(base) XE_REG((base) + 0xb0)
#define RING_EMR(base) XE_REG((base) + 0xb4)
#define RING_ESR(base) XE_REG((base) + 0xb8)
+#define INSTPM(base) XE_REG((base) + 0xc0, XE_REG_OPTION_MASKED)
+#define ENABLE_SEMAPHORE_POLL_BIT REG_BIT(13)
+
#define RING_CMD_CCTL(base) XE_REG((base) + 0xc4, XE_REG_OPTION_MASKED)
/*
* CMD_CCTL read/write fields take a MOCS value and _not_ a table index.
@@ -136,6 +141,7 @@
#define TAIL_ADDR 0x001FFFF8
#define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8)
+#define CSBE_DEBUG_STATUS(base) XE_REG((base) + 0x3fc)
#define RING_FORCE_TO_NONPRIV(base, i) XE_REG(((base) + 0x4d0) + (i) * 4)
#define RING_FORCE_TO_NONPRIV_DENY REG_BIT(30)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 1dd361046b5d..15ac2d284d48 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -144,8 +144,12 @@
#define GSCPSMI_BASE XE_REG(0x880c)
+#define CCCHKNREG1 XE_REG_MCR(0x8828)
+#define ENCOMPPERFFIX REG_BIT(18)
+
/* Fuse readout registers for GT */
#define XEHP_FUSE4 XE_REG(0x9114)
+#define CFEG_WMTP_DISABLE REG_BIT(20)
#define CCS_EN_MASK REG_GENMASK(19, 16)
#define GT_L3_EXC_MASK REG_GENMASK(6, 4)
@@ -288,6 +292,9 @@
#define XEHP_L3NODEARBCFG XE_REG_MCR(0xb0b4)
#define XEHP_LNESPARE REG_BIT(19)
+#define L3SQCREG3 XE_REG_MCR(0xb108)
+#define COMPPWOVERFETCHEN REG_BIT(28)
+
#define XEHP_L3SQCREG5 XE_REG_MCR(0xb158)
#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
@@ -344,6 +351,9 @@
#define ROW_CHICKEN3 XE_REG_MCR(0xe49c, XE_REG_OPTION_MASKED)
#define DIS_FIX_EOT1_FLUSH REG_BIT(9)
+#define TDL_TSL_CHICKEN XE_REG_MCR(0xe4c4, XE_REG_OPTION_MASKED)
+#define SLM_WMTP_RESTORE REG_BIT(11)
+
#define ROW_CHICKEN XE_REG_MCR(0xe4f0, XE_REG_OPTION_MASKED)
#define UGM_BACKUP_MODE REG_BIT(13)
#define MDQ_ARBITRATION_MODE REG_BIT(12)
@@ -430,6 +440,15 @@
#define VOLTAGE_MASK REG_GENMASK(10, 0)
#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4))
+#define INTR_GSC REG_BIT(31)
+#define INTR_GUC REG_BIT(25)
+#define INTR_MGUC REG_BIT(24)
+#define INTR_BCS8 REG_BIT(23)
+#define INTR_BCS(x) REG_BIT(15 - (x))
+#define INTR_CCS(x) REG_BIT(4 + (x))
+#define INTR_RCS0 REG_BIT(0)
+#define INTR_VECS(x) REG_BIT(31 - (x))
+#define INTR_VCS(x) REG_BIT(x)
#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030)
#define VCS_VECS_INTR_ENABLE XE_REG(0x190034)
@@ -446,6 +465,7 @@
#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x)
#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x)
#define OTHER_GUC_INSTANCE 0
+#define OTHER_GSC_HECI2_INSTANCE 3
#define OTHER_GSC_INSTANCE 6
#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4))
@@ -454,6 +474,7 @@
#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8)
#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac)
#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0)
+#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4)
#define GUC_SG_INTR_MASK XE_REG(0x1900e8)
#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec)
#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4)
@@ -469,10 +490,4 @@
#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
#define GT_RENDER_USER_INTERRUPT REG_BIT(0)
-#define PVC_GT0_PACKAGE_ENERGY_STATUS XE_REG(0x281004)
-#define PVC_GT0_PACKAGE_RAPL_LIMIT XE_REG(0x281008)
-#define PVC_GT0_PACKAGE_POWER_SKU_UNIT XE_REG(0x281068)
-#define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c)
-#define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080)
-
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 4be81abc86ad..1825d8f79db6 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -14,4 +14,13 @@
#define CTX_PDP0_UDW (0x30 + 1)
#define CTX_PDP0_LDW (0x32 + 1)
+#define CTX_LRM_INT_MASK_ENABLE 0x50
+#define CTX_INT_MASK_ENABLE_REG (CTX_LRM_INT_MASK_ENABLE + 1)
+#define CTX_INT_MASK_ENABLE_PTR (CTX_LRM_INT_MASK_ENABLE + 2)
+#define CTX_LRI_INT_REPORT_PTR 0x55
+#define CTX_INT_STATUS_REPORT_REG (CTX_LRI_INT_REPORT_PTR + 1)
+#define CTX_INT_STATUS_REPORT_PTR (CTX_LRI_INT_REPORT_PTR + 2)
+#define CTX_INT_SRC_REPORT_REG (CTX_LRI_INT_REPORT_PTR + 3)
+#define CTX_INT_SRC_REPORT_PTR (CTX_LRI_INT_REPORT_PTR + 4)
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
new file mode 100644
index 000000000000..3dae858508c8
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_PCODE_REGS_H_
+#define _XE_PCODE_REGS_H_
+
+#include "regs/xe_reg_defs.h"
+
+/*
+ * This file contains addresses of PCODE registers visible through GT MMIO space.
+ */
+
+#define PVC_GT0_PACKAGE_ENERGY_STATUS XE_REG(0x281004)
+#define PVC_GT0_PACKAGE_RAPL_LIMIT XE_REG(0x281008)
+#define PVC_GT0_PACKAGE_POWER_SKU_UNIT XE_REG(0x281068)
+#define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c)
+#define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080)
+
+#endif /* _XE_PCODE_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile
index 39d8a0892274..9d1d88af8b2f 100644
--- a/drivers/gpu/drm/xe/tests/Makefile
+++ b/drivers/gpu/drm/xe/tests/Makefile
@@ -1,10 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
+# "live" kunit tests
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += \
xe_bo_test.o \
xe_dma_buf_test.o \
xe_migrate_test.o \
- xe_mocs_test.o \
+ xe_mocs_test.o
+
+# Normal kunit tests
+obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_test.o
+xe_test-y = xe_test_mod.o \
xe_pci_test.o \
xe_rtp_test.o \
xe_wa_test.o
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_db_mgr_test.c b/drivers/gpu/drm/xe/tests/xe_guc_db_mgr_test.c
new file mode 100644
index 000000000000..a87a7b4b040a
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_guc_db_mgr_test.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_device.h"
+#include "xe_kunit_helpers.h"
+
+static int guc_dbm_test_init(struct kunit *test)
+{
+ struct xe_guc_db_mgr *dbm;
+
+ xe_kunit_helper_xe_device_test_init(test);
+ dbm = &xe_device_get_gt(test->priv, 0)->uc.guc.dbm;
+
+ mutex_init(dbm_mutex(dbm));
+ test->priv = dbm;
+ return 0;
+}
+
+static void test_empty(struct kunit *test)
+{
+ struct xe_guc_db_mgr *dbm = test->priv;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, 0), 0);
+ KUNIT_ASSERT_EQ(test, dbm->count, 0);
+
+ mutex_lock(dbm_mutex(dbm));
+ KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
+ mutex_unlock(dbm_mutex(dbm));
+
+ KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+}
+
+static void test_default(struct kunit *test)
+{
+ struct xe_guc_db_mgr *dbm = test->priv;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0);
+ KUNIT_ASSERT_EQ(test, dbm->count, GUC_NUM_DOORBELLS);
+}
+
+static const unsigned int guc_dbm_params[] = {
+ GUC_NUM_DOORBELLS / 64,
+ GUC_NUM_DOORBELLS / 32,
+ GUC_NUM_DOORBELLS / 8,
+ GUC_NUM_DOORBELLS,
+};
+
+static void uint_param_get_desc(const unsigned int *p, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u", *p);
+}
+
+KUNIT_ARRAY_PARAM(guc_dbm, guc_dbm_params, uint_param_get_desc);
+
+static void test_size(struct kunit *test)
+{
+ const unsigned int *p = test->param_value;
+ struct xe_guc_db_mgr *dbm = test->priv;
+ unsigned int n;
+ int id;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, *p), 0);
+ KUNIT_ASSERT_EQ(test, dbm->count, *p);
+
+ mutex_lock(dbm_mutex(dbm));
+ for (n = 0; n < *p; n++) {
+ KUNIT_EXPECT_GE(test, id = xe_guc_db_mgr_reserve_id_locked(dbm), 0);
+ KUNIT_EXPECT_LT(test, id, dbm->count);
+ }
+ KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
+ mutex_unlock(dbm_mutex(dbm));
+
+ mutex_lock(dbm_mutex(dbm));
+ for (n = 0; n < *p; n++)
+ xe_guc_db_mgr_release_id_locked(dbm, n);
+ mutex_unlock(dbm_mutex(dbm));
+}
+
+static void test_reuse(struct kunit *test)
+{
+ const unsigned int *p = test->param_value;
+ struct xe_guc_db_mgr *dbm = test->priv;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, *p), 0);
+
+ mutex_lock(dbm_mutex(dbm));
+ for (n = 0; n < *p; n++)
+ KUNIT_EXPECT_GE(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
+ KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
+ mutex_unlock(dbm_mutex(dbm));
+
+ mutex_lock(dbm_mutex(dbm));
+ for (n = 0; n < *p; n++) {
+ xe_guc_db_mgr_release_id_locked(dbm, n);
+ KUNIT_EXPECT_EQ(test, xe_guc_db_mgr_reserve_id_locked(dbm), n);
+ }
+ KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0);
+ mutex_unlock(dbm_mutex(dbm));
+
+ mutex_lock(dbm_mutex(dbm));
+ for (n = 0; n < *p; n++)
+ xe_guc_db_mgr_release_id_locked(dbm, n);
+ mutex_unlock(dbm_mutex(dbm));
+}
+
+static void test_range_overlap(struct kunit *test)
+{
+ const unsigned int *p = test->param_value;
+ struct xe_guc_db_mgr *dbm = test->priv;
+ int id1, id2, id3;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0);
+ KUNIT_ASSERT_LE(test, *p, dbm->count);
+
+ KUNIT_ASSERT_GE(test, id1 = xe_guc_db_mgr_reserve_range(dbm, *p, 0), 0);
+ for (n = 0; n < dbm->count - *p; n++) {
+ KUNIT_ASSERT_GE(test, id2 = xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+ KUNIT_ASSERT_NE(test, id2, id1);
+ KUNIT_ASSERT_NE_MSG(test, id2 < id1, id2 > id1 + *p - 1,
+ "id1=%d id2=%d", id1, id2);
+ }
+ KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+ xe_guc_db_mgr_release_range(dbm, 0, dbm->count);
+
+ if (*p >= 1) {
+ KUNIT_ASSERT_GE(test, id1 = xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+ KUNIT_ASSERT_GE(test, id2 = xe_guc_db_mgr_reserve_range(dbm, *p - 1, 0), 0);
+ KUNIT_ASSERT_NE(test, id2, id1);
+ KUNIT_ASSERT_NE_MSG(test, id1 < id2, id1 > id2 + *p - 2,
+ "id1=%d id2=%d", id1, id2);
+ for (n = 0; n < dbm->count - *p; n++) {
+ KUNIT_ASSERT_GE(test, id3 = xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+ KUNIT_ASSERT_NE(test, id3, id1);
+ KUNIT_ASSERT_NE(test, id3, id2);
+ KUNIT_ASSERT_NE_MSG(test, id3 < id2, id3 > id2 + *p - 2,
+ "id3=%d id2=%d", id3, id2);
+ }
+ KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+ xe_guc_db_mgr_release_range(dbm, 0, dbm->count);
+ }
+}
+
+static void test_range_compact(struct kunit *test)
+{
+ const unsigned int *p = test->param_value;
+ struct xe_guc_db_mgr *dbm = test->priv;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0);
+ KUNIT_ASSERT_NE(test, *p, 0);
+ KUNIT_ASSERT_LE(test, *p, dbm->count);
+ if (dbm->count % *p)
+ kunit_skip(test, "must be divisible");
+
+ KUNIT_ASSERT_GE(test, xe_guc_db_mgr_reserve_range(dbm, *p, 0), 0);
+ for (n = 1; n < dbm->count / *p; n++)
+ KUNIT_ASSERT_GE(test, xe_guc_db_mgr_reserve_range(dbm, *p, 0), 0);
+ KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0);
+ xe_guc_db_mgr_release_range(dbm, 0, dbm->count);
+}
+
+static void test_range_spare(struct kunit *test)
+{
+ const unsigned int *p = test->param_value;
+ struct xe_guc_db_mgr *dbm = test->priv;
+ int id;
+
+ KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0);
+ KUNIT_ASSERT_LE(test, *p, dbm->count);
+
+ KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, *p, dbm->count), 0);
+ KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, *p, dbm->count - *p + 1), 0);
+ KUNIT_ASSERT_EQ(test, id = xe_guc_db_mgr_reserve_range(dbm, *p, dbm->count - *p), 0);
+ KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, dbm->count - *p), 0);
+ xe_guc_db_mgr_release_range(dbm, id, *p);
+}
+
+static struct kunit_case guc_dbm_test_cases[] = {
+ KUNIT_CASE(test_empty),
+ KUNIT_CASE(test_default),
+ KUNIT_CASE_PARAM(test_size, guc_dbm_gen_params),
+ KUNIT_CASE_PARAM(test_reuse, guc_dbm_gen_params),
+ KUNIT_CASE_PARAM(test_range_overlap, guc_dbm_gen_params),
+ KUNIT_CASE_PARAM(test_range_compact, guc_dbm_gen_params),
+ KUNIT_CASE_PARAM(test_range_spare, guc_dbm_gen_params),
+ {}
+};
+
+static struct kunit_suite guc_dbm_suite = {
+ .name = "guc_dbm",
+ .test_cases = guc_dbm_test_cases,
+ .init = guc_dbm_test_init,
+};
+
+kunit_test_suites(&guc_dbm_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_relay_test.c b/drivers/gpu/drm/xe/tests/xe_guc_relay_test.c
new file mode 100644
index 000000000000..13701451b923
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_guc_relay_test.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <kunit/static_stub.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include "xe_device.h"
+#include "xe_kunit_helpers.h"
+#include "xe_pci_test.h"
+
+#define TEST_RID 1234
+#define TEST_VFID 5
+#define TEST_LEN 6
+#define TEST_ACTION 0xa
+#define TEST_DATA(n) (0xd0 + (n))
+
+static int replacement_relay_get_totalvfs(struct xe_guc_relay *relay)
+{
+ return TEST_VFID;
+}
+
+static int relay_test_init(struct kunit *test)
+{
+ struct xe_pci_fake_data fake = {
+ .sriov_mode = XE_SRIOV_MODE_PF,
+ .platform = XE_TIGERLAKE, /* some random platform */
+ .subplatform = XE_SUBPLATFORM_NONE,
+ };
+ struct xe_guc_relay *relay;
+ struct xe_device *xe;
+
+ test->priv = &fake;
+ xe_kunit_helper_xe_device_test_init(test);
+
+ xe = test->priv;
+ KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
+
+ relay = &xe_device_get_gt(xe, 0)->uc.guc.relay;
+ kunit_activate_static_stub(test, relay_get_totalvfs,
+ replacement_relay_get_totalvfs);
+
+ KUNIT_ASSERT_EQ(test, xe_guc_relay_init(relay), 0);
+ KUNIT_EXPECT_TRUE(test, relay_is_ready(relay));
+ relay->last_rid = TEST_RID - 1;
+
+ test->priv = relay;
+ return 0;
+}
+
+static const u32 TEST_MSG[TEST_LEN] = {
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
+ FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_ACTION, TEST_ACTION) |
+ FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_DATA0, TEST_DATA(0)),
+ TEST_DATA(1), TEST_DATA(2), TEST_DATA(3), TEST_DATA(4),
+};
+
+static int replacement_xe_guc_ct_send_recv_always_fails(struct xe_guc_ct *ct,
+ const u32 *msg, u32 len,
+ u32 *response_buffer)
+{
+ struct kunit *test = kunit_get_current_test();
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ct);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, msg);
+ KUNIT_ASSERT_GE(test, len, GUC_HXG_MSG_MIN_LEN);
+
+ return -ECOMM;
+}
+
+static int replacement_xe_guc_ct_send_recv_expects_pf2guc_relay(struct xe_guc_ct *ct,
+ const u32 *msg, u32 len,
+ u32 *response_buffer)
+{
+ struct kunit *test = kunit_get_current_test();
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ct);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, msg);
+ KUNIT_ASSERT_GE(test, len, PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN);
+ KUNIT_ASSERT_EQ(test, len, PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN + TEST_LEN);
+ KUNIT_EXPECT_EQ(test, GUC_HXG_ORIGIN_HOST, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]));
+ KUNIT_EXPECT_EQ(test, GUC_HXG_TYPE_REQUEST, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]));
+ KUNIT_EXPECT_EQ(test, XE_GUC_ACTION_PF2GUC_RELAY_TO_VF,
+ FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]));
+ KUNIT_EXPECT_EQ(test, TEST_VFID,
+ FIELD_GET(PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID, msg[1]));
+ KUNIT_EXPECT_EQ(test, TEST_RID,
+ FIELD_GET(PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID, msg[2]));
+ KUNIT_EXPECT_MEMEQ(test, TEST_MSG, msg + PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN,
+ sizeof(u32) * TEST_LEN);
+ return 0;
+}
+
+static const u32 test_guc2pf[GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN] = {
+ /* transport */
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) |
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
+ FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_ACTION, XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF),
+ FIELD_PREP_CONST(GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID, TEST_VFID),
+ FIELD_PREP_CONST(GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID, TEST_RID),
+ /* payload */
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS),
+};
+
+static const u32 test_guc2vf[GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN] = {
+ /* transport */
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) |
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
+ FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_ACTION, XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF),
+ FIELD_PREP_CONST(GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID, TEST_RID),
+ /* payload */
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS),
+};
+
+static void pf_rejects_guc2pf_too_short(struct kunit *test)
+{
+ const u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN - 1;
+ struct xe_guc_relay *relay = test->priv;
+ const u32 *msg = test_guc2pf;
+
+ KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2pf(relay, msg, len));
+}
+
+static void pf_rejects_guc2pf_too_long(struct kunit *test)
+{
+ const u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN + 1;
+ struct xe_guc_relay *relay = test->priv;
+ const u32 *msg = test_guc2pf;
+
+ KUNIT_ASSERT_EQ(test, -EMSGSIZE, xe_guc_relay_process_guc2pf(relay, msg, len));
+}
+
+static void pf_rejects_guc2pf_no_payload(struct kunit *test)
+{
+ const u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN;
+ struct xe_guc_relay *relay = test->priv;
+ const u32 *msg = test_guc2pf;
+
+ KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2pf(relay, msg, len));
+}
+
+static void pf_fails_no_payload(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ const u32 msg = 0;
+
+ KUNIT_ASSERT_EQ(test, -EPROTO, relay_process_msg(relay, TEST_VFID, TEST_RID, &msg, 0));
+}
+
+static void pf_fails_bad_origin(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ static const u32 msg[] = {
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) |
+ FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS),
+ };
+ u32 len = ARRAY_SIZE(msg);
+
+ KUNIT_ASSERT_EQ(test, -EPROTO, relay_process_msg(relay, TEST_VFID, TEST_RID, msg, len));
+}
+
+static void pf_fails_bad_type(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ const u32 msg[] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, 4), /* only 4 is undefined */
+ };
+ u32 len = ARRAY_SIZE(msg);
+
+ KUNIT_ASSERT_EQ(test, -EBADRQC, relay_process_msg(relay, TEST_VFID, TEST_RID, msg, len));
+}
+
+static void pf_txn_reports_error(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ struct relay_transaction *txn;
+
+ txn = __relay_get_transaction(relay, false, TEST_VFID, TEST_RID,
+ TEST_MSG, TEST_LEN, NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, txn);
+
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_always_fails);
+ KUNIT_EXPECT_EQ(test, -ECOMM, relay_send_transaction(relay, txn));
+
+ relay_release_transaction(relay, txn);
+}
+
+static void pf_txn_sends_pf2guc(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ struct relay_transaction *txn;
+
+ txn = __relay_get_transaction(relay, false, TEST_VFID, TEST_RID,
+ TEST_MSG, TEST_LEN, NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, txn);
+
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_expects_pf2guc_relay);
+ KUNIT_ASSERT_EQ(test, 0, relay_send_transaction(relay, txn));
+
+ relay_release_transaction(relay, txn);
+}
+
+static void pf_sends_pf2guc(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_expects_pf2guc_relay);
+ KUNIT_ASSERT_EQ(test, 0,
+ xe_guc_relay_send_to_vf(relay, TEST_VFID,
+ TEST_MSG, TEST_LEN, NULL, 0));
+}
+
+static int replacement_xe_guc_ct_send_recv_loopback_relay(struct xe_guc_ct *ct,
+ const u32 *msg, u32 len,
+ u32 *response_buffer)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct xe_guc_relay *relay = test->priv;
+ u32 *reply = kunit_kzalloc(test, len * sizeof(u32), GFP_KERNEL);
+ int (*guc2relay)(struct xe_guc_relay *, const u32 *, u32);
+ u32 action;
+ int err;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ct);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, msg);
+ KUNIT_ASSERT_GE(test, len, GUC_HXG_MSG_MIN_LEN);
+ KUNIT_ASSERT_EQ(test, GUC_HXG_TYPE_REQUEST,
+ FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]));
+ KUNIT_ASSERT_GE(test, len, GUC_HXG_REQUEST_MSG_MIN_LEN);
+ KUNIT_ASSERT_NOT_NULL(test, reply);
+
+ switch (FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0])) {
+ case XE_GUC_ACTION_PF2GUC_RELAY_TO_VF:
+ KUNIT_ASSERT_GE(test, len, PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN);
+ action = XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF;
+ guc2relay = xe_guc_relay_process_guc2pf;
+ break;
+ case XE_GUC_ACTION_VF2GUC_RELAY_TO_PF:
+ KUNIT_ASSERT_GE(test, len, VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN);
+ action = XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF;
+ guc2relay = xe_guc_relay_process_guc2vf;
+ break;
+ default:
+ KUNIT_FAIL(test, "bad RELAY action %#x", msg[0]);
+ return -EINVAL;
+ }
+
+ reply[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
+ FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION, action);
+ memcpy(reply + 1, msg + 1, sizeof(u32) * (len - 1));
+
+ err = guc2relay(relay, reply, len);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ return err;
+}
+
+static void test_requires_relay_testloop(struct kunit *test)
+{
+ /*
+ * The debug relay action GUC_RELAY_ACTION_VFXPF_TESTLOOP is available
+ * only on builds with CONFIG_DRM_XE_DEBUG_SRIOV enabled.
+ * See "kunit.py --kconfig_add" option if it's missing.
+ */
+ if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV))
+ kunit_skip(test, "requires %s\n", __stringify(CONFIG_DRM_XE_DEBUG_SRIOV));
+}
+
+static void pf_loopback_nop(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ u32 request[] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_NOP),
+ };
+ u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN];
+ int ret;
+
+ test_requires_relay_testloop(test);
+
+ kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_loopback_relay);
+ ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
+ request, ARRAY_SIZE(request),
+ response, ARRAY_SIZE(response));
+ KUNIT_ASSERT_EQ(test, ret, GUC_HXG_RESPONSE_MSG_MIN_LEN);
+ KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, response[0]),
+ GUC_HXG_ORIGIN_HOST);
+ KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_TYPE, response[0]),
+ GUC_HXG_TYPE_RESPONSE_SUCCESS);
+ KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, response[0]), 0);
+}
+
+static void pf_loopback_echo(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ u32 request[] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_ECHO),
+ TEST_DATA(1), TEST_DATA(2), TEST_DATA(3), TEST_DATA(4),
+ };
+ u32 response[ARRAY_SIZE(request)];
+ unsigned int n;
+ int ret;
+
+ test_requires_relay_testloop(test);
+
+ kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_loopback_relay);
+ ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
+ request, ARRAY_SIZE(request),
+ response, ARRAY_SIZE(response));
+ KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(response));
+ KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, response[0]),
+ GUC_HXG_ORIGIN_HOST);
+ KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_TYPE, response[0]),
+ GUC_HXG_TYPE_RESPONSE_SUCCESS);
+ KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, response[0]),
+ ARRAY_SIZE(response));
+ for (n = GUC_HXG_RESPONSE_MSG_MIN_LEN; n < ret; n++)
+ KUNIT_EXPECT_EQ(test, request[n], response[n]);
+}
+
+static void pf_loopback_fail(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ u32 request[] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_FAIL),
+ };
+ u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN];
+ int ret;
+
+ test_requires_relay_testloop(test);
+
+ kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_loopback_relay);
+ ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
+ request, ARRAY_SIZE(request),
+ response, ARRAY_SIZE(response));
+ KUNIT_ASSERT_EQ(test, ret, -EREMOTEIO);
+}
+
+static void pf_loopback_busy(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ u32 request[] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_BUSY),
+ TEST_DATA(0xb),
+ };
+ u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN];
+ int ret;
+
+ test_requires_relay_testloop(test);
+
+ kunit_activate_static_stub(test, relay_testonly_nop, relay_process_incoming_action);
+ kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_loopback_relay);
+ ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
+ request, ARRAY_SIZE(request),
+ response, ARRAY_SIZE(response));
+ KUNIT_ASSERT_EQ(test, ret, GUC_HXG_RESPONSE_MSG_MIN_LEN);
+}
+
+static void pf_loopback_retry(struct kunit *test)
+{
+ struct xe_guc_relay *relay = test->priv;
+ u32 request[] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_RETRY),
+ TEST_DATA(0xd), TEST_DATA(0xd),
+ };
+ u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN];
+ int ret;
+
+ test_requires_relay_testloop(test);
+
+ kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action);
+ kunit_activate_static_stub(test, xe_guc_ct_send_recv,
+ replacement_xe_guc_ct_send_recv_loopback_relay);
+ ret = xe_guc_relay_send_to_vf(relay, TEST_VFID,
+ request, ARRAY_SIZE(request),
+ response, ARRAY_SIZE(response));
+ KUNIT_ASSERT_EQ(test, ret, GUC_HXG_RESPONSE_MSG_MIN_LEN);
+}
+
+static struct kunit_case pf_relay_test_cases[] = {
+ KUNIT_CASE(pf_rejects_guc2pf_too_short),
+ KUNIT_CASE(pf_rejects_guc2pf_too_long),
+ KUNIT_CASE(pf_rejects_guc2pf_no_payload),
+ KUNIT_CASE(pf_fails_no_payload),
+ KUNIT_CASE(pf_fails_bad_origin),
+ KUNIT_CASE(pf_fails_bad_type),
+ KUNIT_CASE(pf_txn_reports_error),
+ KUNIT_CASE(pf_txn_sends_pf2guc),
+ KUNIT_CASE(pf_sends_pf2guc),
+ KUNIT_CASE(pf_loopback_nop),
+ KUNIT_CASE(pf_loopback_echo),
+ KUNIT_CASE(pf_loopback_fail),
+ KUNIT_CASE_SLOW(pf_loopback_busy),
+ KUNIT_CASE_SLOW(pf_loopback_retry),
+ {}
+};
+
+static struct kunit_suite pf_relay_suite = {
+ .name = "pf_relay",
+ .test_cases = pf_relay_test_cases,
+ .init = relay_test_init,
+};
+
+static void vf_rejects_guc2vf_too_short(struct kunit *test)
+{
+ const u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN - 1;
+ struct xe_guc_relay *relay = test->priv;
+ const u32 *msg = test_guc2vf;
+
+ KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2vf(relay, msg, len));
+}
+
+static void vf_rejects_guc2vf_too_long(struct kunit *test)
+{
+ const u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN + 1;
+ struct xe_guc_relay *relay = test->priv;
+ const u32 *msg = test_guc2vf;
+
+ KUNIT_ASSERT_EQ(test, -EMSGSIZE, xe_guc_relay_process_guc2vf(relay, msg, len));
+}
+
+static void vf_rejects_guc2vf_no_payload(struct kunit *test)
+{
+ const u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN;
+ struct xe_guc_relay *relay = test->priv;
+ const u32 *msg = test_guc2vf;
+
+ KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2vf(relay, msg, len));
+}
+
+static struct kunit_case vf_relay_test_cases[] = {
+ KUNIT_CASE(vf_rejects_guc2vf_too_short),
+ KUNIT_CASE(vf_rejects_guc2vf_too_long),
+ KUNIT_CASE(vf_rejects_guc2vf_no_payload),
+ {}
+};
+
+static struct kunit_suite vf_relay_suite = {
+ .name = "vf_relay",
+ .test_cases = vf_relay_test_cases,
+ .init = relay_test_init,
+};
+
+static void xe_drops_guc2pf_if_not_ready(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ struct xe_guc_relay *relay = &xe_device_get_gt(xe, 0)->uc.guc.relay;
+ const u32 *msg = test_guc2pf;
+ u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MIN_LEN;
+
+ KUNIT_ASSERT_EQ(test, -ENODEV, xe_guc_relay_process_guc2pf(relay, msg, len));
+}
+
+static void xe_drops_guc2vf_if_not_ready(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ struct xe_guc_relay *relay = &xe_device_get_gt(xe, 0)->uc.guc.relay;
+ const u32 *msg = test_guc2vf;
+ u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MIN_LEN;
+
+ KUNIT_ASSERT_EQ(test, -ENODEV, xe_guc_relay_process_guc2vf(relay, msg, len));
+}
+
+static void xe_rejects_send_if_not_ready(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ struct xe_guc_relay *relay = &xe_device_get_gt(xe, 0)->uc.guc.relay;
+ u32 msg[GUC_RELAY_MSG_MIN_LEN];
+ u32 len = ARRAY_SIZE(msg);
+
+ KUNIT_ASSERT_EQ(test, -ENODEV, xe_guc_relay_send_to_pf(relay, msg, len, NULL, 0));
+ KUNIT_ASSERT_EQ(test, -ENODEV, relay_send_to(relay, TEST_VFID, msg, len, NULL, 0));
+}
+
+static struct kunit_case no_relay_test_cases[] = {
+ KUNIT_CASE(xe_drops_guc2pf_if_not_ready),
+ KUNIT_CASE(xe_drops_guc2vf_if_not_ready),
+ KUNIT_CASE(xe_rejects_send_if_not_ready),
+ {}
+};
+
+static struct kunit_suite no_relay_suite = {
+ .name = "no_relay",
+ .test_cases = no_relay_test_cases,
+ .init = xe_kunit_helper_xe_device_test_init,
+};
+
+kunit_test_suites(&no_relay_suite,
+ &pf_relay_suite,
+ &vf_relay_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
new file mode 100644
index 000000000000..fefe79b3b75a
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <kunit/test.h>
+#include <kunit/static_stub.h>
+#include <kunit/visibility.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include "tests/xe_kunit_helpers.h"
+#include "tests/xe_pci_test.h"
+#include "xe_device_types.h"
+
+/**
+ * xe_kunit_helper_alloc_xe_device - Allocate a &xe_device for a KUnit test.
+ * @test: the &kunit where this &xe_device will be used
+ * @dev: The parent device object
+ *
+ * This function allocates xe_device using drm_kunit_helper_alloc_device().
+ * The xe_device allocation is managed by the test.
+ *
+ * @dev should be allocated using drm_kunit_helper_alloc_device().
+ *
+ * This function uses KUNIT_ASSERT to detect any allocation failures.
+ *
+ * Return: A pointer to the new &xe_device.
+ */
+struct xe_device *xe_kunit_helper_alloc_xe_device(struct kunit *test,
+ struct device *dev)
+{
+ struct xe_device *xe;
+
+ xe = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct xe_device,
+ drm, DRIVER_GEM);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
+ return xe;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_alloc_xe_device);
+
+static void kunit_action_restore_priv(void *priv)
+{
+ struct kunit *test = kunit_get_current_test();
+
+ test->priv = priv;
+}
+
+/**
+ * xe_kunit_helper_xe_device_test_init - Prepare a &xe_device for a KUnit test.
+ * @test: the &kunit where this fake &xe_device will be used
+ *
+ * This function allocates and initializes a fake &xe_device and stores its
+ * pointer as &kunit.priv to allow the test code to access it.
+ *
+ * This function can be directly used as custom implementation of
+ * &kunit_suite.init.
+ *
+ * It is possible to prepare specific variant of the fake &xe_device by passing
+ * in &kunit.priv pointer to the struct xe_pci_fake_data supplemented with
+ * desired parameters prior to calling this function.
+ *
+ * This function uses KUNIT_ASSERT to detect any failures.
+ *
+ * Return: Always 0.
+ */
+int xe_kunit_helper_xe_device_test_init(struct kunit *test)
+{
+ struct xe_device *xe;
+ struct device *dev;
+ int err;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ xe = xe_kunit_helper_alloc_xe_device(test, dev);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
+
+ err = xe_pci_fake_device_init(xe);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = kunit_add_action_or_reset(test, kunit_action_restore_priv, test->priv);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ test->priv = xe;
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_test_init);
diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
new file mode 100644
index 000000000000..067a1babf049
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_KUNIT_HELPERS_H_
+#define _XE_KUNIT_HELPERS_H_
+
+struct device;
+struct kunit;
+struct xe_device;
+
+struct xe_device *xe_kunit_helper_alloc_xe_device(struct kunit *test,
+ struct device *dev);
+int xe_kunit_helper_xe_device_test_init(struct kunit *test);
+
+#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index a6523df0f1d3..c347e2c29f81 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -114,21 +114,21 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
region |
XE_BO_NEEDS_CPU_ACCESS);
if (IS_ERR(remote)) {
- KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %li\n",
- str, PTR_ERR(remote));
+ KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
+ str, remote);
return;
}
err = xe_bo_validate(remote, NULL, false);
if (err) {
- KUNIT_FAIL(test, "Failed to validate system bo for %s: %li\n",
+ KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n",
str, err);
goto out_unlock;
}
err = xe_bo_vmap(remote);
if (err) {
- KUNIT_FAIL(test, "Failed to vmap system bo for %s: %li\n",
+ KUNIT_FAIL(test, "Failed to vmap system bo for %s: %i\n",
str, err);
goto out_unlock;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 7dd34f94e809..df5c36b70ab4 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -128,3 +128,39 @@ void xe_live_mocs_kernel_kunit(struct kunit *test)
xe_call_for_each_device(mocs_kernel_test_run_device);
}
EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_kernel_kunit);
+
+static int mocs_reset_test_run_device(struct xe_device *xe)
+{
+ /* Check the mocs setup is retained over GT reset */
+
+ struct live_mocs mocs;
+ struct xe_gt *gt;
+ unsigned int flags;
+ int id;
+ struct kunit *test = xe_cur_kunit();
+
+ for_each_gt(gt, xe, id) {
+ flags = live_mocs_init(&mocs, gt);
+ kunit_info(test, "mocs_reset_test before reset\n");
+ if (flags & HAS_GLOBAL_MOCS)
+ read_mocs_table(gt, &mocs.table);
+ if (flags & HAS_LNCF_MOCS)
+ read_l3cc_table(gt, &mocs.table);
+
+ xe_gt_reset_async(gt);
+ flush_work(&gt->reset.worker);
+
+ kunit_info(test, "mocs_reset_test after reset\n");
+ if (flags & HAS_GLOBAL_MOCS)
+ read_mocs_table(gt, &mocs.table);
+ if (flags & HAS_LNCF_MOCS)
+ read_l3cc_table(gt, &mocs.table);
+ }
+ return 0;
+}
+
+void xe_live_mocs_reset_kunit(struct kunit *test)
+{
+ xe_call_for_each_device(mocs_reset_test_run_device);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_reset_kunit);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
index ef56bd517b28..ee40f31e1e12 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
@@ -9,6 +9,7 @@
static struct kunit_case xe_mocs_tests[] = {
KUNIT_CASE(xe_live_mocs_kernel_kunit),
+ KUNIT_CASE(xe_live_mocs_reset_kunit),
{}
};
@@ -21,4 +22,5 @@ kunit_test_suite(xe_mocs_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe_mocs kunit test");
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.h b/drivers/gpu/drm/xe/tests/xe_mocs_test.h
index 7faa3575e6c3..e7699d495411 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.h
@@ -9,5 +9,6 @@
struct kunit;
void xe_live_mocs_kernel_kunit(struct kunit *test);
+void xe_live_mocs_reset_kunit(struct kunit *test);
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 602793644f61..f62809ca8b51 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -156,6 +156,9 @@ int xe_pci_fake_device_init(struct xe_device *xe)
return -ENODEV;
done:
+ xe->sriov.__mode = data && data->sriov_mode ?
+ data->sriov_mode : XE_SRIOV_MODE_NONE;
+
kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid);
xe_info_init_early(xe, desc, subplatform_desc);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
index 171e4180f1aa..a6705a536391 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -64,8 +64,3 @@ static struct kunit_suite xe_pci_test_suite = {
};
kunit_test_suite(xe_pci_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_pci kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
index 811ffe5bd9fd..f40dcec83992 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include "xe_platform_types.h"
+#include "xe_sriov_types.h"
struct xe_device;
struct xe_graphics_desc;
@@ -23,6 +24,7 @@ void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn);
void xe_call_for_each_media_ip(xe_media_fn xe_fn);
struct xe_pci_fake_data {
+ enum xe_sriov_mode sriov_mode;
enum xe_platform platform;
enum xe_subplatform subplatform;
u32 graphics_verx100;
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index 4a6972897675..06759d754783 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -15,6 +15,7 @@
#include "regs/xe_reg_defs.h"
#include "xe_device.h"
#include "xe_device_types.h"
+#include "xe_kunit_helpers.h"
#include "xe_pci_test.h"
#include "xe_reg_sr.h"
#include "xe_rtp.h"
@@ -276,9 +277,7 @@ static int xe_rtp_test_init(struct kunit *test)
dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
- xe = drm_kunit_helper_alloc_drm_device(test, dev,
- struct xe_device,
- drm, DRIVER_GEM);
+ xe = xe_kunit_helper_alloc_xe_device(test, dev);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
/* Initialize an empty device */
@@ -312,8 +311,3 @@ static struct kunit_suite xe_rtp_test_suite = {
};
kunit_test_suite(xe_rtp_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_rtp kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_test_mod.c b/drivers/gpu/drm/xe/tests/xe_test_mod.c
new file mode 100644
index 000000000000..875f3e6f965e
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_test_mod.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe kunit tests");
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
index b4715b78ef3b..44570d888355 100644
--- a/drivers/gpu/drm/xe/tests/xe_wa_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -9,6 +9,7 @@
#include <kunit/test.h>
#include "xe_device.h"
+#include "xe_kunit_helpers.h"
#include "xe_pci_test.h"
#include "xe_reg_sr.h"
#include "xe_tuning.h"
@@ -65,14 +66,8 @@ static const struct platform_test_case cases[] = {
PLATFORM_CASE(ALDERLAKE_P, C0),
SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0),
SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0),
- SUBPLATFORM_CASE(DG2, G10, A0),
- SUBPLATFORM_CASE(DG2, G10, A1),
- SUBPLATFORM_CASE(DG2, G10, B0),
SUBPLATFORM_CASE(DG2, G10, C0),
- SUBPLATFORM_CASE(DG2, G11, A0),
- SUBPLATFORM_CASE(DG2, G11, B0),
SUBPLATFORM_CASE(DG2, G11, B1),
- SUBPLATFORM_CASE(DG2, G12, A0),
SUBPLATFORM_CASE(DG2, G12, A1),
GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
@@ -105,9 +100,7 @@ static int xe_wa_test_init(struct kunit *test)
dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
- xe = drm_kunit_helper_alloc_drm_device(test, dev,
- struct xe_device,
- drm, DRIVER_GEM);
+ xe = xe_kunit_helper_alloc_xe_device(test, dev);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
test->priv = &data;
@@ -160,8 +153,3 @@ static struct kunit_suite xe_rtp_test_suite = {
};
kunit_test_suite(xe_rtp_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_wa kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 0b0e262e2166..6603a0ea79c5 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -28,6 +28,14 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
+const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = {
+ [XE_PL_SYSTEM] = "system",
+ [XE_PL_TT] = "gtt",
+ [XE_PL_VRAM0] = "vram0",
+ [XE_PL_VRAM1] = "vram1",
+ [XE_PL_STOLEN] = "stolen"
+};
+
static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
@@ -38,22 +46,26 @@ static const struct ttm_place sys_placement_flags = {
static struct ttm_placement sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags,
};
-static const struct ttm_place tt_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = XE_PL_TT,
- .flags = 0,
+static const struct ttm_place tt_placement_flags[] = {
+ {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = XE_PL_TT,
+ .flags = TTM_PL_FLAG_DESIRED,
+ },
+ {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = XE_PL_SYSTEM,
+ .flags = TTM_PL_FLAG_FALLBACK,
+ }
};
static struct ttm_placement tt_placement = {
- .num_placement = 1,
- .placement = &tt_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags,
+ .num_placement = 2,
+ .placement = tt_placement_flags,
};
bool mem_type_is_vram(u32 mem_type)
@@ -230,8 +242,6 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
bo->placement = (struct ttm_placement) {
.num_placement = c,
.placement = bo->placements,
- .num_busy_placement = c,
- .busy_placement = bo->placements,
};
return 0;
@@ -251,7 +261,6 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
/* Don't handle scatter gather BOs */
if (tbo->type == ttm_bo_type_sg) {
placement->num_placement = 0;
- placement->num_busy_placement = 0;
return;
}
@@ -586,6 +595,8 @@ static int xe_bo_move_notify(struct xe_bo *bo,
{
struct ttm_buffer_object *ttm_bo = &bo->ttm;
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ struct ttm_resource *old_mem = ttm_bo->resource;
+ u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
int ret;
/*
@@ -605,6 +616,18 @@ static int xe_bo_move_notify(struct xe_bo *bo,
if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
dma_buf_move_notify(ttm_bo->base.dma_buf);
+ /*
+ * TTM has already nuked the mmap for us (see ttm_bo_unmap_virtual),
+ * so if we moved from VRAM make sure to unlink this from the userfault
+ * tracking.
+ */
+ if (mem_type_is_vram(old_mem_type)) {
+ mutex_lock(&xe->mem_access.vram_userfault.lock);
+ if (!list_empty(&bo->vram_userfault_link))
+ list_del_init(&bo->vram_userfault_link);
+ mutex_unlock(&xe->mem_access.vram_userfault.lock);
+ }
+
return 0;
}
@@ -713,8 +736,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
migrate = xe->tiles[0].migrate;
xe_assert(xe, migrate);
-
- trace_xe_bo_move(bo);
+ trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
xe_device_mem_access_get(xe);
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
@@ -1027,7 +1049,7 @@ static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
}
}
-struct ttm_device_funcs xe_ttm_funcs = {
+const struct ttm_device_funcs xe_ttm_funcs = {
.ttm_tt_create = xe_ttm_tt_create,
.ttm_tt_populate = xe_ttm_tt_populate,
.ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
@@ -1063,6 +1085,11 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
if (bo->vm && xe_bo_is_user(bo))
xe_vm_put(bo->vm);
+ mutex_lock(&xe->mem_access.vram_userfault.lock);
+ if (!list_empty(&bo->vram_userfault_link))
+ list_del(&bo->vram_userfault_link);
+ mutex_unlock(&xe->mem_access.vram_userfault.lock);
+
kfree(bo);
}
@@ -1110,16 +1137,20 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
struct drm_device *ddev = tbo->base.dev;
+ struct xe_device *xe = to_xe_device(ddev);
+ struct xe_bo *bo = ttm_to_xe_bo(tbo);
+ bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK;
vm_fault_t ret;
int idx, r = 0;
+ if (needs_rpm)
+ xe_device_mem_access_get(xe);
+
ret = ttm_bo_vm_reserve(tbo, vmf);
if (ret)
- return ret;
+ goto out;
if (drm_dev_enter(ddev, &idx)) {
- struct xe_bo *bo = ttm_to_xe_bo(tbo);
-
trace_xe_bo_cpu_fault(bo);
if (should_migrate_to_system(bo)) {
@@ -1137,10 +1168,24 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
+
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
+ goto out;
+ /*
+ * ttm_bo_vm_reserve() already has dma_resv_lock.
+ */
+ if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) {
+ mutex_lock(&xe->mem_access.vram_userfault.lock);
+ if (list_empty(&bo->vram_userfault_link))
+ list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list);
+ mutex_unlock(&xe->mem_access.vram_userfault.lock);
+ }
dma_resv_unlock(tbo->base.resv);
+out:
+ if (needs_rpm)
+ xe_device_mem_access_put(xe);
+
return ret;
}
@@ -1254,6 +1299,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
#ifdef CONFIG_PROC_FS
INIT_LIST_HEAD(&bo->client_link);
#endif
+ INIT_LIST_HEAD(&bo->vram_userfault_link);
drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
@@ -1353,8 +1399,6 @@ static int __xe_bo_fixed_placement(struct xe_device *xe,
bo->placement = (struct ttm_placement) {
.num_placement = 1,
.placement = place,
- .num_busy_placement = 1,
- .busy_placement = place,
};
return 0;
@@ -1568,6 +1612,38 @@ struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_til
return bo;
}
+/**
+ * xe_managed_bo_reinit_in_vram
+ * @xe: xe device
+ * @tile: Tile where the new buffer will be created
+ * @src: Managed buffer object allocated in system memory
+ *
+ * Replace a managed src buffer object allocated in system memory with a new
+ * one allocated in vram, copying the data between them.
+ * Buffer object in VRAM is not going to have the same GGTT address, the caller
+ * is responsible for making sure that any old references to it are updated.
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
+{
+ struct xe_bo *bo;
+
+ xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, !(*src)->vmap.is_iomem);
+
+ bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, (*src)->size,
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ drmm_release_action(&xe->drm, __xe_bo_unpin_map_no_vm, *src);
+ *src = bo;
+
+ return 0;
+}
+
/*
* XXX: This is in the VM bind data path, likely should calculate this once and
* store, with a recalculation if the BO is moved.
@@ -2112,9 +2188,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
xe_place_from_ttm_type(mem_type, &requested);
placement.num_placement = 1;
- placement.num_busy_placement = 1;
placement.placement = &requested;
- placement.busy_placement = &requested;
/*
* Stolen needs to be handled like below VRAM handling if we ever need
@@ -2264,6 +2338,16 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
return err;
}
+void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo)
+{
+ struct ttm_buffer_object *tbo = &bo->ttm;
+ struct ttm_device *bdev = tbo->bdev;
+
+ drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping);
+
+ list_del_init(&bo->vram_userfault_link);
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_bo.c"
#endif
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 9b1279aca127..c59ad15961ce 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -44,6 +44,7 @@
#define XE_BO_FIXED_PLACEMENT_BIT BIT(11)
#define XE_BO_PAGETABLE BIT(12)
#define XE_BO_NEEDS_CPU_ACCESS BIT(13)
+#define XE_BO_NEEDS_UC BIT(14)
/* this one is trigger internally only */
#define XE_BO_INTERNAL_TEST BIT(30)
#define XE_BO_INTERNAL_64K BIT(31)
@@ -128,6 +129,7 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
size_t size, u32 flags);
struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
const void *data, size_t size, u32 flags);
+int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src);
int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags);
@@ -242,12 +244,15 @@ int xe_bo_evict(struct xe_bo *bo, bool force_alloc);
int xe_bo_evict_pinned(struct xe_bo *bo);
int xe_bo_restore_pinned(struct xe_bo *bo);
-extern struct ttm_device_funcs xe_ttm_funcs;
+extern const struct ttm_device_funcs xe_ttm_funcs;
+extern const char *const xe_mem_type_to_name[];
int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo);
+
int xe_bo_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 64c2249a4e40..14ef13b7b421 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -88,6 +88,9 @@ struct xe_bo {
* objects.
*/
u16 cpu_caching;
+
+ /** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
+ struct list_head vram_userfault_link;
};
#define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base)
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index c56fd7d59f05..01db5b27bec5 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -55,6 +55,7 @@ static int info(struct seq_file *m, void *data)
drm_printf(&p, "force_execlist %s\n", str_yes_no(xe->info.force_execlist));
drm_printf(&p, "has_flat_ccs %s\n", str_yes_no(xe->info.has_flat_ccs));
drm_printf(&p, "has_usm %s\n", str_yes_no(xe->info.has_usm));
+ drm_printf(&p, "skip_guc_pc %s\n", str_yes_no(xe->info.skip_guc_pc));
for_each_gt(gt, xe, id) {
drm_printf(&p, "gt%d force wake %d\n", id,
xe_force_wake_ref(gt_to_fw(gt), XE_FW_GT));
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 68abc0b195be..68d3d623a05b 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -16,6 +16,8 @@
#include "xe_guc_ct.h"
#include "xe_guc_submit.h"
#include "xe_hw_engine.h"
+#include "xe_sched_job.h"
+#include "xe_vm.h"
/**
* DOC: Xe device coredump
@@ -58,11 +60,22 @@ static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
return &q->gt->uc.guc;
}
+static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
+{
+ struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
+
+ xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
+ if (ss->vm)
+ xe_vm_snapshot_capture_delayed(ss->vm);
+ xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
+}
+
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
struct xe_devcoredump *coredump = data;
- struct xe_devcoredump_snapshot *ss;
+ struct xe_device *xe = coredump_to_xe(coredump);
+ struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
struct drm_printer p;
struct drm_print_iterator iter;
struct timespec64 ts;
@@ -72,12 +85,14 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
if (!data || !coredump_to_xe(coredump))
return -ENODEV;
+ /* Ensure delayed work is captured before continuing */
+ flush_work(&ss->work);
+
iter.data = buffer;
iter.offset = 0;
iter.start = offset;
iter.remain = count;
- ss = &coredump->snapshot;
p = drm_coredump_printer(&iter);
drm_printf(&p, "**** Xe Device Coredump ****\n");
@@ -88,16 +103,24 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
ts = ktime_to_timespec64(ss->boot_time);
drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
+ xe_device_snapshot_print(xe, &p);
drm_printf(&p, "\n**** GuC CT ****\n");
xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p);
xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p);
+ drm_printf(&p, "\n**** Job ****\n");
+ xe_sched_job_snapshot_print(coredump->snapshot.job, &p);
+
drm_printf(&p, "\n**** HW Engines ****\n");
for (i = 0; i < XE_NUM_HW_ENGINES; i++)
if (coredump->snapshot.hwe[i])
xe_hw_engine_snapshot_print(coredump->snapshot.hwe[i],
&p);
+ if (coredump->snapshot.vm) {
+ drm_printf(&p, "\n**** VM state ****\n");
+ xe_vm_snapshot_print(coredump->snapshot.vm, &p);
+ }
return count - iter.remain;
}
@@ -111,21 +134,28 @@ static void xe_devcoredump_free(void *data)
if (!data || !coredump_to_xe(coredump))
return;
+ cancel_work_sync(&coredump->snapshot.work);
+
xe_guc_ct_snapshot_free(coredump->snapshot.ct);
xe_guc_exec_queue_snapshot_free(coredump->snapshot.ge);
+ xe_sched_job_snapshot_free(coredump->snapshot.job);
for (i = 0; i < XE_NUM_HW_ENGINES; i++)
if (coredump->snapshot.hwe[i])
xe_hw_engine_snapshot_free(coredump->snapshot.hwe[i]);
+ xe_vm_snapshot_free(coredump->snapshot.vm);
+ /* To prevent stale data on next snapshot, clear everything */
+ memset(&coredump->snapshot, 0, sizeof(coredump->snapshot));
coredump->captured = false;
drm_info(&coredump_to_xe(coredump)->drm,
"Xe device coredump has been deleted.\n");
}
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
- struct xe_exec_queue *q)
+ struct xe_sched_job *job)
{
struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
+ struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
@@ -137,6 +167,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
ss->snapshot_time = ktime_get_real();
ss->boot_time = ktime_get_boottime();
+ ss->gt = q->gt;
+ INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work);
+
cookie = dma_fence_begin_signalling();
for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
if (adj_logical_mask & BIT(i)) {
@@ -150,7 +183,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
- coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
+ coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(job);
+ coredump->snapshot.job = xe_sched_job_snapshot_capture(job);
+ coredump->snapshot.vm = xe_vm_snapshot_capture(q->vm);
for_each_hw_engine(hwe, q->gt, id) {
if (hwe->class != q->hwe->class ||
@@ -161,21 +196,24 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe);
}
+ if (ss->vm)
+ queue_work(system_unbound_wq, &ss->work);
+
xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
dma_fence_end_signalling(cookie);
}
/**
* xe_devcoredump - Take the required snapshots and initialize coredump device.
- * @q: The faulty xe_exec_queue, where the issue was detected.
+ * @job: The faulty xe_sched_job, where the issue was detected.
*
* This function should be called at the crash time within the serialized
* gt_reset. It is skipped if we still have the core dump device available
* with the information of the 'first' snapshot.
*/
-void xe_devcoredump(struct xe_exec_queue *q)
+void xe_devcoredump(struct xe_sched_job *job)
{
- struct xe_device *xe = gt_to_xe(q->gt);
+ struct xe_device *xe = gt_to_xe(job->q->gt);
struct xe_devcoredump *coredump = &xe->devcoredump;
if (coredump->captured) {
@@ -184,7 +222,7 @@ void xe_devcoredump(struct xe_exec_queue *q)
}
coredump->captured = true;
- devcoredump_snapshot(coredump, q);
+ devcoredump_snapshot(coredump, job);
drm_info(&xe->drm, "Xe device coredump has been created\n");
drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
@@ -194,3 +232,4 @@ void xe_devcoredump(struct xe_exec_queue *q)
xe_devcoredump_read, xe_devcoredump_free);
}
#endif
+
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
index 6ac218a5c194..df8671f0b5eb 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump.h
@@ -7,12 +7,12 @@
#define _XE_DEVCOREDUMP_H_
struct xe_device;
-struct xe_exec_queue;
+struct xe_sched_job;
#ifdef CONFIG_DEV_COREDUMP
-void xe_devcoredump(struct xe_exec_queue *q);
+void xe_devcoredump(struct xe_sched_job *job);
#else
-static inline void xe_devcoredump(struct xe_exec_queue *q)
+static inline void xe_devcoredump(struct xe_sched_job *job)
{
}
#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
index 7fdad9c3d3dd..6f654b63c7f1 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -12,6 +12,7 @@
#include "xe_hw_engine_types.h"
struct xe_device;
+struct xe_gt;
/**
* struct xe_devcoredump_snapshot - Crash snapshot
@@ -26,13 +27,23 @@ struct xe_devcoredump_snapshot {
/** @boot_time: Relative boot time so the uptime can be calculated. */
ktime_t boot_time;
+ /** @gt: Affected GT, used by forcewake for delayed capture */
+ struct xe_gt *gt;
+ /** @work: Workqueue for deferred capture outside of signaling context */
+ struct work_struct work;
+
/* GuC snapshots */
/** @ct: GuC CT snapshot */
struct xe_guc_ct_snapshot *ct;
/** @ge: Guc Engine snapshot */
struct xe_guc_submit_exec_queue_snapshot *ge;
+
/** @hwe: HW Engine snapshot array */
struct xe_hw_engine_snapshot *hwe[XE_NUM_HW_ENGINES];
+ /** @job: Snapshot of job state */
+ struct xe_sched_job_snapshot *job;
+ /** @vm: Snapshot of VM state */
+ struct xe_vm_snapshot *vm;
};
/**
@@ -44,8 +55,6 @@ struct xe_devcoredump_snapshot {
* for reading the information.
*/
struct xe_devcoredump {
- /** @xe: Xe device. */
- struct xe_device *xe;
/** @captured: The snapshot of the first hang has already been taken. */
bool captured;
/** @snapshot: Snapshot is captured at time of the first crash */
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 1f0b4b9ce84f..ca85e81fdb44 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -15,32 +15,35 @@
#include <drm/drm_print.h>
#include <drm/xe_drm.h>
+#include "display/xe_display.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_bo.h"
#include "xe_debugfs.h"
-#include "xe_display.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
#include "xe_drv.h"
-#include "xe_exec_queue.h"
#include "xe_exec.h"
+#include "xe_exec_queue.h"
#include "xe_ggtt.h"
+#include "xe_gsc_proxy.h"
#include "xe_gt.h"
#include "xe_gt_mcr.h"
+#include "xe_hwmon.h"
#include "xe_irq.h"
+#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_module.h"
#include "xe_pat.h"
#include "xe_pcode.h"
#include "xe_pm.h"
#include "xe_query.h"
+#include "xe_sriov.h"
#include "xe_tile.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_sys_mgr.h"
#include "xe_vm.h"
#include "xe_wait_user_fence.h"
-#include "xe_hwmon.h"
#ifdef CONFIG_LOCKDEP
struct lockdep_map xe_device_mem_access_lockdep_map = {
@@ -83,9 +86,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-static void device_kill_persistent_exec_queues(struct xe_device *xe,
- struct xe_file *xef);
-
static void xe_file_close(struct drm_device *dev, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
@@ -102,8 +102,6 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
mutex_unlock(&xef->exec_queue.lock);
xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock);
- device_kill_persistent_exec_queues(xe, xef);
-
mutex_lock(&xef->vm.lock);
xa_for_each(&xef->vm.xa, idx, vm)
xe_vm_close_and_put(vm);
@@ -255,9 +253,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xa_erase(&xe->usm.asid_to_vm, asid);
}
- drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock);
- INIT_LIST_HEAD(&xe->persistent_engines.list);
-
spin_lock_init(&xe->pinned.lock);
INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
INIT_LIST_HEAD(&xe->pinned.external_vram);
@@ -432,10 +427,15 @@ int xe_device_probe(struct xe_device *xe)
struct xe_tile *tile;
struct xe_gt *gt;
int err;
+ u8 last_gt;
u8 id;
xe_pat_init_early(xe);
+ err = xe_sriov_init(xe);
+ if (err)
+ return err;
+
xe->info.mem_region_mask = 1;
err = xe_display_init_nommio(xe);
if (err)
@@ -456,6 +456,17 @@ int xe_device_probe(struct xe_device *xe)
err = xe_ggtt_init_early(tile->mem.ggtt);
if (err)
return err;
+ if (IS_SRIOV_VF(xe)) {
+ err = xe_memirq_init(&tile->sriov.vf.memirq);
+ if (err)
+ return err;
+ }
+ }
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_init_hwconfig(gt);
+ if (err)
+ return err;
}
err = drmm_add_action_or_reset(&xe->drm, xe_driver_flr_fini, xe);
@@ -510,16 +521,18 @@ int xe_device_probe(struct xe_device *xe)
goto err_irq_shutdown;
for_each_gt(gt, xe, id) {
+ last_gt = id;
+
err = xe_gt_init(gt);
if (err)
- goto err_irq_shutdown;
+ goto err_fini_gt;
}
xe_heci_gsc_init(xe);
err = xe_display_init(xe);
if (err)
- goto err_irq_shutdown;
+ goto err_fini_gt;
err = drm_dev_register(&xe->drm, 0);
if (err)
@@ -540,6 +553,14 @@ int xe_device_probe(struct xe_device *xe)
err_fini_display:
xe_display_driver_remove(xe);
+err_fini_gt:
+ for_each_gt(gt, xe, id) {
+ if (id < last_gt)
+ xe_gt_remove(gt);
+ else
+ break;
+ }
+
err_irq_shutdown:
xe_irq_shutdown(xe);
err:
@@ -557,12 +578,18 @@ static void xe_device_remove_display(struct xe_device *xe)
void xe_device_remove(struct xe_device *xe)
{
+ struct xe_gt *gt;
+ u8 id;
+
xe_device_remove_display(xe);
xe_display_fini(xe);
xe_heci_gsc_fini(xe);
+ for_each_gt(gt, xe, id)
+ xe_gt_remove(gt);
+
xe_irq_shutdown(xe);
}
@@ -570,37 +597,6 @@ void xe_device_shutdown(struct xe_device *xe)
{
}
-void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q)
-{
- mutex_lock(&xe->persistent_engines.lock);
- list_add_tail(&q->persistent.link, &xe->persistent_engines.list);
- mutex_unlock(&xe->persistent_engines.lock);
-}
-
-void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
- struct xe_exec_queue *q)
-{
- mutex_lock(&xe->persistent_engines.lock);
- if (!list_empty(&q->persistent.link))
- list_del(&q->persistent.link);
- mutex_unlock(&xe->persistent_engines.lock);
-}
-
-static void device_kill_persistent_exec_queues(struct xe_device *xe,
- struct xe_file *xef)
-{
- struct xe_exec_queue *q, *next;
-
- mutex_lock(&xe->persistent_engines.lock);
- list_for_each_entry_safe(q, next, &xe->persistent_engines.list,
- persistent.link)
- if (q->persistent.xef == xef) {
- xe_exec_queue_kill(q);
- list_del_init(&q->persistent.link);
- }
- mutex_unlock(&xe->persistent_engines.lock);
-}
-
void xe_device_wmb(struct xe_device *xe)
{
struct xe_gt *gt = xe_root_mmio_gt(xe);
@@ -698,3 +694,33 @@ void xe_device_mem_access_put(struct xe_device *xe)
xe_assert(xe, ref >= 0);
}
+
+void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
+{
+ struct xe_gt *gt;
+ u8 id;
+
+ drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid);
+ drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid);
+
+ for_each_gt(gt, xe, id) {
+ drm_printf(p, "GT id: %u\n", id);
+ drm_printf(p, "\tType: %s\n",
+ gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
+ drm_printf(p, "\tIP ver: %u.%u.%u\n",
+ REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid),
+ REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid),
+ REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid));
+ drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock);
+ }
+}
+
+u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address)
+{
+ return sign_extend64(address, xe->info.va_bits - 1);
+}
+
+u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
+{
+ return address & GENMASK_ULL(xe->info.va_bits - 1, 0);
+}
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 3da83b233206..14be34d9f543 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -42,10 +42,6 @@ int xe_device_probe(struct xe_device *xe);
void xe_device_remove(struct xe_device *xe);
void xe_device_shutdown(struct xe_device *xe);
-void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q);
-void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
- struct xe_exec_queue *q);
-
void xe_device_wmb(struct xe_device *xe);
static inline struct xe_file *to_xe_file(const struct drm_file *file)
@@ -168,6 +164,16 @@ static inline bool xe_device_has_sriov(struct xe_device *xe)
return xe->info.has_sriov;
}
+static inline bool xe_device_has_memirq(struct xe_device *xe)
+{
+ return GRAPHICS_VERx100(xe) >= 1250;
+}
+
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size);
+void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p);
+
+u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address);
+u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 5dc9127a2029..9785eef2e5a4 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -16,6 +16,7 @@
#include "xe_heci_gsc.h"
#include "xe_gt_types.h"
#include "xe_lmtt_types.h"
+#include "xe_memirq_types.h"
#include "xe_platform_types.h"
#include "xe_pt_types.h"
#include "xe_sriov_types.h"
@@ -142,10 +143,10 @@ struct xe_tile {
* * 8MB-16MB: global GTT
*/
struct {
- /** @size: size of tile's MMIO space */
+ /** @mmio.size: size of tile's MMIO space */
size_t size;
- /** @regs: pointer to tile's MMIO space (starting with registers) */
+ /** @mmio.regs: pointer to tile's MMIO space (starting with registers) */
void __iomem *regs;
} mmio;
@@ -155,31 +156,31 @@ struct xe_tile {
* Each tile has its own additional 256MB (28-bit) MMIO-extension space.
*/
struct {
- /** @size: size of tile's additional MMIO-extension space */
+ /** @mmio_ext.size: size of tile's additional MMIO-extension space */
size_t size;
- /** @regs: pointer to tile's additional MMIO-extension space */
+ /** @mmio_ext.regs: pointer to tile's additional MMIO-extension space */
void __iomem *regs;
} mmio_ext;
/** @mem: memory management info for tile */
struct {
/**
- * @vram: VRAM info for tile.
+ * @mem.vram: VRAM info for tile.
*
* Although VRAM is associated with a specific tile, it can
* still be accessed by all tiles' GTs.
*/
struct xe_mem_region vram;
- /** @vram_mgr: VRAM TTM manager */
+ /** @mem.vram_mgr: VRAM TTM manager */
struct xe_ttm_vram_mgr *vram_mgr;
- /** @ggtt: Global graphics translation table */
+ /** @mem.ggtt: Global graphics translation table */
struct xe_ggtt *ggtt;
/**
- * @kernel_bb_pool: Pool from which batchbuffers are allocated.
+ * @mem.kernel_bb_pool: Pool from which batchbuffers are allocated.
*
* Media GT shares a pool with its primary GT.
*/
@@ -192,6 +193,10 @@ struct xe_tile {
/** @sriov.pf.lmtt: Local Memory Translation Table. */
struct xe_lmtt lmtt;
} pf;
+ struct {
+ /** @sriov.vf.memirq: Memory Based Interrupts. */
+ struct xe_memirq memirq;
+ } vf;
} sriov;
/** @migrate: Migration helper for vram blits and clearing */
@@ -213,68 +218,68 @@ struct xe_device {
/** @info: device info */
struct intel_device_info {
- /** @graphics_name: graphics IP name */
+ /** @info.graphics_name: graphics IP name */
const char *graphics_name;
- /** @media_name: media IP name */
+ /** @info.media_name: media IP name */
const char *media_name;
- /** @tile_mmio_ext_size: size of MMIO extension space, per-tile */
+ /** @info.tile_mmio_ext_size: size of MMIO extension space, per-tile */
u32 tile_mmio_ext_size;
- /** @graphics_verx100: graphics IP version */
+ /** @info.graphics_verx100: graphics IP version */
u32 graphics_verx100;
- /** @media_verx100: media IP version */
+ /** @info.media_verx100: media IP version */
u32 media_verx100;
- /** @mem_region_mask: mask of valid memory regions */
+ /** @info.mem_region_mask: mask of valid memory regions */
u32 mem_region_mask;
- /** @platform: XE platform enum */
+ /** @info.platform: XE platform enum */
enum xe_platform platform;
- /** @subplatform: XE subplatform enum */
+ /** @info.subplatform: XE subplatform enum */
enum xe_subplatform subplatform;
- /** @devid: device ID */
+ /** @info.devid: device ID */
u16 devid;
- /** @revid: device revision */
+ /** @info.revid: device revision */
u8 revid;
- /** @step: stepping information for each IP */
+ /** @info.step: stepping information for each IP */
struct xe_step_info step;
- /** @dma_mask_size: DMA address bits */
+ /** @info.dma_mask_size: DMA address bits */
u8 dma_mask_size;
- /** @vram_flags: Vram flags */
+ /** @info.vram_flags: Vram flags */
u8 vram_flags;
- /** @tile_count: Number of tiles */
+ /** @info.tile_count: Number of tiles */
u8 tile_count;
- /** @gt_count: Total number of GTs for entire device */
+ /** @info.gt_count: Total number of GTs for entire device */
u8 gt_count;
- /** @vm_max_level: Max VM level */
+ /** @info.vm_max_level: Max VM level */
u8 vm_max_level;
- /** @va_bits: Maximum bits of a virtual address */
+ /** @info.va_bits: Maximum bits of a virtual address */
u8 va_bits;
- /** @is_dgfx: is discrete device */
+ /** @info.is_dgfx: is discrete device */
u8 is_dgfx:1;
- /** @has_asid: Has address space ID */
+ /** @info.has_asid: Has address space ID */
u8 has_asid:1;
- /** @force_execlist: Forced execlist submission */
+ /** @info.force_execlist: Forced execlist submission */
u8 force_execlist:1;
- /** @has_flat_ccs: Whether flat CCS metadata is used */
+ /** @info.has_flat_ccs: Whether flat CCS metadata is used */
u8 has_flat_ccs:1;
- /** @has_llc: Device has a shared CPU+GPU last level cache */
+ /** @info.has_llc: Device has a shared CPU+GPU last level cache */
u8 has_llc:1;
- /** @has_mmio_ext: Device has extra MMIO address range */
+ /** @info.has_mmio_ext: Device has extra MMIO address range */
u8 has_mmio_ext:1;
- /** @has_range_tlb_invalidation: Has range based TLB invalidations */
+ /** @info.has_range_tlb_invalidation: Has range based TLB invalidations */
u8 has_range_tlb_invalidation:1;
- /** @has_sriov: Supports SR-IOV */
+ /** @info.has_sriov: Supports SR-IOV */
u8 has_sriov:1;
- /** @has_usm: Device has unified shared memory support */
+ /** @info.has_usm: Device has unified shared memory support */
u8 has_usm:1;
- /** @enable_display: display enabled */
+ /** @info.enable_display: display enabled */
u8 enable_display:1;
- /** @skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
+ /** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
u8 skip_mtcfg:1;
- /** @skip_pcode: skip access to PCODE uC */
+ /** @info.skip_pcode: skip access to PCODE uC */
u8 skip_pcode:1;
- /** @has_heci_gscfi: device has heci gscfi */
+ /** @info.has_heci_gscfi: device has heci gscfi */
u8 has_heci_gscfi:1;
- /** @skip_guc_pc: Skip GuC based PM feature init */
+ /** @info.skip_guc_pc: Skip GuC based PM feature init */
u8 skip_guc_pc:1;
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
@@ -286,10 +291,10 @@ struct xe_device {
/** @irq: device interrupt state */
struct {
- /** @lock: lock for processing irq's on this device */
+ /** @irq.lock: lock for processing irq's on this device */
spinlock_t lock;
- /** @enabled: interrupts enabled on this device */
+ /** @irq.enabled: interrupts enabled on this device */
bool enabled;
} irq;
@@ -298,17 +303,17 @@ struct xe_device {
/** @mmio: mmio info for device */
struct {
- /** @size: size of MMIO space for device */
+ /** @mmio.size: size of MMIO space for device */
size_t size;
- /** @regs: pointer to MMIO space for device */
+ /** @mmio.regs: pointer to MMIO space for device */
void __iomem *regs;
} mmio;
/** @mem: memory info for device */
struct {
- /** @vram: VRAM info for device */
+ /** @mem.vram: VRAM info for device */
struct xe_mem_region vram;
- /** @sys_mgr: system TTM manager */
+ /** @mem.sys_mgr: system TTM manager */
struct ttm_resource_manager sys_mgr;
} mem;
@@ -316,48 +321,42 @@ struct xe_device {
struct {
/** @sriov.__mode: SR-IOV mode (Don't access directly!) */
enum xe_sriov_mode __mode;
+ /** @sriov.wq: workqueue used by the virtualization workers */
+ struct workqueue_struct *wq;
} sriov;
/** @clients: drm clients info */
struct {
- /** @lock: Protects drm clients info */
+ /** @clients.lock: Protects drm clients info */
spinlock_t lock;
- /** @count: number of drm clients */
+ /** @clients.count: number of drm clients */
u64 count;
} clients;
/** @usm: unified memory state */
struct {
- /** @asid: convert a ASID to VM */
+ /** @usm.asid: convert a ASID to VM */
struct xarray asid_to_vm;
- /** @next_asid: next ASID, used to cyclical alloc asids */
+ /** @usm.next_asid: next ASID, used to cyclical alloc asids */
u32 next_asid;
- /** @num_vm_in_fault_mode: number of VM in fault mode */
+ /** @usm.num_vm_in_fault_mode: number of VM in fault mode */
u32 num_vm_in_fault_mode;
- /** @num_vm_in_non_fault_mode: number of VM in non-fault mode */
+ /** @usm.num_vm_in_non_fault_mode: number of VM in non-fault mode */
u32 num_vm_in_non_fault_mode;
- /** @lock: protects UM state */
+ /** @usm.lock: protects UM state */
struct mutex lock;
} usm;
- /** @persistent_engines: engines that are closed but still running */
- struct {
- /** @lock: protects persistent engines */
- struct mutex lock;
- /** @list: list of persistent engines */
- struct list_head list;
- } persistent_engines;
-
/** @pinned: pinned BO state */
struct {
- /** @lock: protected pinned BO list state */
+ /** @pinned.lock: protected pinned BO list state */
spinlock_t lock;
- /** @evicted: pinned kernel BO that are present */
+ /** @pinned.kernel_bo_present: pinned kernel BO that are present */
struct list_head kernel_bo_present;
- /** @evicted: pinned BO that have been evicted */
+ /** @pinned.evicted: pinned BO that have been evicted */
struct list_head evicted;
- /** @external_vram: pinned external BO in vram*/
+ /** @pinned.external_vram: pinned external BO in vram*/
struct list_head external_vram;
} pinned;
@@ -378,36 +377,57 @@ struct xe_device {
* triggering additional actions when they occur.
*/
struct {
- /** @ref: ref count of memory accesses */
+ /** @mem_access.ref: ref count of memory accesses */
atomic_t ref;
+
+ /**
+ * @mem_access.vram_userfault: Encapsulate vram_userfault
+ * related stuff
+ */
+ struct {
+ /**
+ * @mem_access.vram_userfault.lock: Protects access to
+ * @vram_usefault.list Using mutex instead of spinlock
+ * as lock is applied to entire list operation which
+ * may sleep
+ */
+ struct mutex lock;
+
+ /**
+ * @mem_access.vram_userfault.list: Keep list of userfaulted
+ * vram bo, which require to release their mmap mappings
+ * at runtime suspend path
+ */
+ struct list_head list;
+ } vram_userfault;
} mem_access;
/**
* @pat: Encapsulate PAT related stuff
*/
struct {
- /** Internal operations to abstract platforms */
+ /** @pat.ops: Internal operations to abstract platforms */
const struct xe_pat_ops *ops;
- /** PAT table to program in the HW */
+ /** @pat.table: PAT table to program in the HW */
const struct xe_pat_table_entry *table;
- /** Number of PAT entries */
+ /** @pat.n_entries: Number of PAT entries */
int n_entries;
u32 idx[__XE_CACHE_LEVEL_COUNT];
} pat;
/** @d3cold: Encapsulate d3cold related stuff */
struct {
- /** capable: Indicates if root port is d3cold capable */
+ /** @d3cold.capable: Indicates if root port is d3cold capable */
bool capable;
- /** @allowed: Indicates if d3cold is a valid device state */
+ /** @d3cold.allowed: Indicates if d3cold is a valid device state */
bool allowed;
- /** @power_lost: Indicates if card has really lost power. */
+ /** @d3cold.power_lost: Indicates if card has really lost power. */
bool power_lost;
/**
- * @vram_threshold:
+ * @d3cold.vram_threshold:
*
* This represents the permissible threshold(in megabytes)
* for vram save/restore. d3cold will be disallowed,
@@ -416,7 +436,7 @@ struct xe_device {
* Default threshold value is 300mb.
*/
u32 vram_threshold;
- /** @lock: protect vram_threshold */
+ /** @d3cold.lock: protect vram_threshold */
struct mutex lock;
} d3cold;
@@ -524,17 +544,17 @@ struct xe_file {
/** @vm: VM state for file */
struct {
- /** @xe: xarray to store VMs */
+ /** @vm.xe: xarray to store VMs */
struct xarray xa;
- /** @lock: protects file VM state */
+ /** @vm.lock: protects file VM state */
struct mutex lock;
} vm;
/** @exec_queue: Submission exec queue state for file */
struct {
- /** @xe: xarray to store engines */
+ /** @exec_queue.xe: xarray to store engines */
struct xarray xa;
- /** @lock: protects file engine state */
+ /** @exec_queue.lock: protects file engine state */
struct mutex lock;
} exec_queue;
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 82d1305e831f..87c10bd7958b 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -113,7 +113,7 @@ static void bo_meminfo(struct xe_bo *bo,
else
mem_type = XE_PL_TT;
- if (bo->ttm.base.handle_count > 1)
+ if (drm_gem_object_is_shared_for_memory_stats(&bo->ttm.base))
stats[mem_type].shared += sz;
else
stats[mem_type].private += sz;
@@ -131,14 +131,6 @@ static void bo_meminfo(struct xe_bo *bo,
static void show_meminfo(struct drm_printer *p, struct drm_file *file)
{
- static const char *const mem_type_to_name[TTM_NUM_MEM_TYPES] = {
- [XE_PL_SYSTEM] = "system",
- [XE_PL_TT] = "gtt",
- [XE_PL_VRAM0] = "vram0",
- [XE_PL_VRAM1] = "vram1",
- [4 ... 6] = NULL,
- [XE_PL_STOLEN] = "stolen"
- };
struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {};
struct xe_file *xef = file->driver_priv;
struct ttm_device *bdev = &xef->xe->ttm;
@@ -171,7 +163,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
spin_unlock(&client->bos_lock);
for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) {
- if (!mem_type_to_name[mem_type])
+ if (!xe_mem_type_to_name[mem_type])
continue;
man = ttm_manager_type(bdev, mem_type);
@@ -182,7 +174,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
DRM_GEM_OBJECT_RESIDENT |
(mem_type != XE_PL_SYSTEM ? 0 :
DRM_GEM_OBJECT_PURGEABLE),
- mem_type_to_name[mem_type]);
+ xe_mem_type_to_name[mem_type]);
}
}
}
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 17f26952e665..952496c6260d 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -96,7 +96,46 @@
static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
{
- return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
+ struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm);
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int num_fences;
+ int ret;
+
+ ret = drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
+ if (ret)
+ return ret;
+
+ /*
+ * 1 fence slot for the final submit, and 1 more for every per-tile for
+ * GPU bind and 1 extra for CPU bind. Note that there are potentially
+ * many vma per object/dma-resv, however the fence slot will just be
+ * re-used, since they are largely the same timeline and the seqno
+ * should be in order. In the case of CPU bind there is dummy fence used
+ * for all CPU binds, so no need to have a per-tile slot for that.
+ */
+ num_fences = 1 + 1 + vm->xe->info.tile_count;
+
+ /*
+ * We don't know upfront exactly how many fence slots we will need at
+ * the start of the exec, since the TTM bo_validate above can consume
+ * numerous fence slots. Also due to how the dma_resv_reserve_fences()
+ * works it only ensures that at least that many fence slots are
+ * available i.e if there are already 10 slots available and we reserve
+ * two more, it can just noop without reserving anything. With this it
+ * is quite possible that TTM steals some of the fence slots and then
+ * when it comes time to do the vma binding and final exec stage we are
+ * lacking enough fence slots, leading to some nasty BUG_ON() when
+ * adding the fences. Hence just add our own fences here, after the
+ * validate stage.
+ */
+ drm_exec_for_each_locked_object(&vm_exec->exec, index, obj) {
+ ret = dma_resv_reserve_fences(obj->resv, num_fences);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -197,7 +236,6 @@ retry:
}
vm_exec.vm = &vm->gpuvm;
- vm_exec.num_fences = 1 + vm->xe->info.tile_count;
vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
if (xe_vm_in_lr_mode(vm)) {
drm_exec_init(exec, vm_exec.flags, 0);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 254b1d3af4cb..11e150f4c0c1 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -30,21 +30,23 @@ enum xe_exec_queue_sched_prop {
XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
};
-static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
- struct xe_vm *vm,
- u32 logical_mask,
- u16 width, struct xe_hw_engine *hwe,
- u32 flags)
+static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
+ u64 extensions, int ext_number, bool create);
+
+static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
+ struct xe_vm *vm,
+ u32 logical_mask,
+ u16 width, struct xe_hw_engine *hwe,
+ u32 flags, u64 extensions)
{
struct xe_exec_queue *q;
struct xe_gt *gt = hwe->gt;
int err;
- int i;
/* only kernel queues can be permanent */
XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
- q = kzalloc(sizeof(*q) + sizeof(struct xe_lrc) * width, GFP_KERNEL);
+ q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
if (!q)
return ERR_PTR(-ENOMEM);
@@ -52,38 +54,63 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
q->flags = flags;
q->hwe = hwe;
q->gt = gt;
- if (vm)
- q->vm = xe_vm_get(vm);
q->class = hwe->class;
q->width = width;
q->logical_mask = logical_mask;
q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class];
q->ops = gt->exec_queue_ops;
- INIT_LIST_HEAD(&q->persistent.link);
INIT_LIST_HEAD(&q->compute.link);
INIT_LIST_HEAD(&q->multi_gt_link);
q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
q->sched_props.preempt_timeout_us =
hwe->eclass->sched_props.preempt_timeout_us;
+ q->sched_props.job_timeout_ms =
+ hwe->eclass->sched_props.job_timeout_ms;
if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
else
q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
+ if (extensions) {
+ /*
+ * may set q->usm, must come before xe_lrc_init(),
+ * may overwrite q->sched_props, must come before q->ops->init()
+ */
+ err = exec_queue_user_extensions(xe, q, extensions, 0, true);
+ if (err) {
+ kfree(q);
+ return ERR_PTR(err);
+ }
+ }
+
+ if (vm)
+ q->vm = xe_vm_get(vm);
+
if (xe_exec_queue_is_parallel(q)) {
q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
}
- if (q->flags & EXEC_QUEUE_FLAG_VM) {
- q->bind.fence_ctx = dma_fence_context_alloc(1);
- q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
- }
- for (i = 0; i < width; ++i) {
- err = xe_lrc_init(q->lrc + i, hwe, q, vm, SZ_16K);
+ return q;
+}
+
+static void __xe_exec_queue_free(struct xe_exec_queue *q)
+{
+ if (q->vm)
+ xe_vm_put(q->vm);
+ kfree(q);
+}
+
+static int __xe_exec_queue_init(struct xe_exec_queue *q)
+{
+ struct xe_device *xe = gt_to_xe(q->gt);
+ int i, err;
+
+ for (i = 0; i < q->width; ++i) {
+ err = xe_lrc_init(q->lrc + i, q->hwe, q, q->vm, SZ_16K);
if (err)
goto err_lrc;
}
@@ -100,35 +127,47 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
* can perform GuC CT actions when needed. Caller is expected to have
* already grabbed the rpm ref outside any sensitive locks.
*/
- if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !vm))
+ if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
- return q;
+ return 0;
err_lrc:
for (i = i - 1; i >= 0; --i)
xe_lrc_finish(q->lrc + i);
- kfree(q);
- return ERR_PTR(err);
+ return err;
}
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
- struct xe_hw_engine *hwe, u32 flags)
+ struct xe_hw_engine *hwe, u32 flags,
+ u64 extensions)
{
struct xe_exec_queue *q;
int err;
+ q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
+ extensions);
+ if (IS_ERR(q))
+ return q;
+
if (vm) {
err = xe_vm_lock(vm, true);
if (err)
- return ERR_PTR(err);
+ goto err_post_alloc;
}
- q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags);
+
+ err = __xe_exec_queue_init(q);
if (vm)
xe_vm_unlock(vm);
+ if (err)
+ goto err_post_alloc;
return q;
+
+err_post_alloc:
+ __xe_exec_queue_free(q);
+ return ERR_PTR(err);
}
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
@@ -153,7 +192,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe
if (!logical_mask)
return ERR_PTR(-ENODEV);
- return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags);
+ return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, 0);
}
void xe_exec_queue_destroy(struct kref *ref)
@@ -179,10 +218,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
xe_lrc_finish(q->lrc + i);
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
xe_device_mem_access_put(gt_to_xe(q->gt));
- if (q->vm)
- xe_vm_put(q->vm);
-
- kfree(q);
+ __xe_exec_queue_free(q);
}
void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
@@ -240,7 +276,11 @@ static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q
if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
return -EPERM;
- return q->ops->set_priority(q, value);
+ if (!create)
+ return q->ops->set_priority(q, value);
+
+ q->sched_props.priority = value;
+ return 0;
}
static bool xe_exec_queue_enforce_schedule_limit(void)
@@ -307,102 +347,10 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
!xe_hw_engine_timeout_in_range(value, min, max))
return -EINVAL;
- return q->ops->set_timeslice(q, value);
-}
-
-static int exec_queue_set_preemption_timeout(struct xe_device *xe,
- struct xe_exec_queue *q, u64 value,
- bool create)
-{
- u32 min = 0, max = 0;
-
- xe_exec_queue_get_prop_minmax(q->hwe->eclass,
- XE_EXEC_QUEUE_PREEMPT_TIMEOUT, &min, &max);
-
- if (xe_exec_queue_enforce_schedule_limit() &&
- !xe_hw_engine_timeout_in_range(value, min, max))
- return -EINVAL;
-
- return q->ops->set_preempt_timeout(q, value);
-}
-
-static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm)))
- return -EINVAL;
-
- if (value)
- q->flags |= EXEC_QUEUE_FLAG_PERSISTENT;
- else
- q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
-
- return 0;
-}
-
-static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- u32 min = 0, max = 0;
-
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- xe_exec_queue_get_prop_minmax(q->hwe->eclass,
- XE_EXEC_QUEUE_JOB_TIMEOUT, &min, &max);
-
- if (xe_exec_queue_enforce_schedule_limit() &&
- !xe_hw_engine_timeout_in_range(value, min, max))
- return -EINVAL;
-
- return q->ops->set_job_timeout(q, value);
-}
-
-static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
- return -EINVAL;
-
- q->usm.acc_trigger = value;
-
- return 0;
-}
-
-static int exec_queue_set_acc_notify(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
- return -EINVAL;
-
- q->usm.acc_notify = value;
-
- return 0;
-}
-
-static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
- return -EINVAL;
-
- if (value > DRM_XE_ACC_GRANULARITY_64M)
- return -EINVAL;
-
- q->usm.acc_granularity = value;
+ if (!create)
+ return q->ops->set_timeslice(q, value);
+ q->sched_props.timeslice_us = value;
return 0;
}
@@ -413,12 +361,6 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
};
static int exec_queue_user_ext_set_property(struct xe_device *xe,
@@ -437,10 +379,15 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
if (XE_IOCTL_DBG(xe, ext.property >=
ARRAY_SIZE(exec_queue_set_property_funcs)) ||
- XE_IOCTL_DBG(xe, ext.pad))
+ XE_IOCTL_DBG(xe, ext.pad) ||
+ XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
+ ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
+ if (!exec_queue_set_property_funcs[idx])
+ return -EINVAL;
+
return exec_queue_set_property_funcs[idx](xe, q, ext.value, create);
}
@@ -633,6 +580,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
for_each_gt(gt, xe, id) {
struct xe_exec_queue *new;
+ u32 flags;
if (xe_gt_is_media_type(gt))
continue;
@@ -651,14 +599,12 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
/* The migration vm doesn't hold rpm ref */
xe_device_mem_access_get(xe);
+ flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
+
migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
- args->width, hwe,
- EXEC_QUEUE_FLAG_PERSISTENT |
- EXEC_QUEUE_FLAG_VM |
- (id ?
- EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD :
- 0));
+ args->width, hwe, flags,
+ args->extensions);
xe_device_mem_access_put(xe); /* now held by engine */
@@ -704,9 +650,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
}
q = xe_exec_queue_create(xe, vm, logical_mask,
- args->width, hwe,
- xe_vm_in_lr_mode(vm) ? 0 :
- EXEC_QUEUE_FLAG_PERSISTENT);
+ args->width, hwe, 0,
+ args->extensions);
up_read(&vm->lock);
xe_vm_put(vm);
if (IS_ERR(q))
@@ -722,14 +667,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
}
}
- if (args->extensions) {
- err = exec_queue_user_extensions(xe, q, args->extensions, 0, true);
- if (XE_IOCTL_DBG(xe, err))
- goto kill_exec_queue;
- }
-
- q->persistent.xef = xef;
-
mutex_lock(&xef->exec_queue.lock);
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
mutex_unlock(&xef->exec_queue.lock);
@@ -872,10 +809,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
- if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT))
- xe_exec_queue_kill(q);
- else
- xe_device_add_persistent_exec_queues(xe, q);
+ xe_exec_queue_kill(q);
trace_xe_exec_queue_close(q);
xe_exec_queue_put(q);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index d959cc4a1a82..02ce8d204622 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -16,7 +16,8 @@ struct xe_file;
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
- struct xe_hw_engine *hw_engine, u32 flags);
+ struct xe_hw_engine *hw_engine, u32 flags,
+ u64 extensions);
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
struct xe_vm *vm,
enum xe_engine_class class, u32 flags);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 8d4b7feb8c30..62b3d9d1d7cd 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -106,70 +106,41 @@ struct xe_exec_queue {
};
/**
- * @persistent: persistent exec queue state
+ * @parallel: parallel submission state
*/
struct {
- /** @xef: file which this exec queue belongs to */
- struct xe_file *xef;
- /** @link: link in list of persistent exec queues */
- struct list_head link;
- } persistent;
-
- union {
- /**
- * @parallel: parallel submission state
- */
- struct {
- /** @composite_fence_ctx: context composite fence */
- u64 composite_fence_ctx;
- /** @composite_fence_seqno: seqno for composite fence */
- u32 composite_fence_seqno;
- } parallel;
- /**
- * @bind: bind submission state
- */
- struct {
- /** @fence_ctx: context bind fence */
- u64 fence_ctx;
- /** @fence_seqno: seqno for bind fence */
- u32 fence_seqno;
- } bind;
- };
+ /** @parallel.composite_fence_ctx: context composite fence */
+ u64 composite_fence_ctx;
+ /** @parallel.composite_fence_seqno: seqno for composite fence */
+ u32 composite_fence_seqno;
+ } parallel;
/** @sched_props: scheduling properties */
struct {
- /** @timeslice_us: timeslice period in micro-seconds */
+ /** @sched_props.timeslice_us: timeslice period in micro-seconds */
u32 timeslice_us;
- /** @preempt_timeout_us: preemption timeout in micro-seconds */
+ /** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
u32 preempt_timeout_us;
- /** @priority: priority of this exec queue */
+ /** @sched_props.job_timeout_ms: job timeout in milliseconds */
+ u32 job_timeout_ms;
+ /** @sched_props.priority: priority of this exec queue */
enum xe_exec_queue_priority priority;
} sched_props;
/** @compute: compute exec queue state */
struct {
- /** @pfence: preemption fence */
+ /** @compute.pfence: preemption fence */
struct dma_fence *pfence;
- /** @context: preemption fence context */
+ /** @compute.context: preemption fence context */
u64 context;
- /** @seqno: preemption fence seqno */
+ /** @compute.seqno: preemption fence seqno */
u32 seqno;
- /** @link: link into VM's list of exec queues */
+ /** @compute.link: link into VM's list of exec queues */
struct list_head link;
- /** @lock: preemption fences lock */
+ /** @compute.lock: preemption fences lock */
spinlock_t lock;
} compute;
- /** @usm: unified shared memory state */
- struct {
- /** @acc_trigger: access counter trigger */
- u32 acc_trigger;
- /** @acc_notify: access counter notify */
- u32 acc_notify;
- /** @acc_granularity: access counter granularity */
- u32 acc_granularity;
- } usm;
-
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;
@@ -198,8 +169,6 @@ struct xe_exec_queue_ops {
int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
/** @set_preempt_timeout: Set preemption timeout for exec queue */
int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
- /** @set_job_timeout: Set job timeout for exec queue */
- int (*set_job_timeout)(struct xe_exec_queue *q, u32 job_timeout_ms);
/**
* @suspend: Suspend exec queue from executing, allowed to be called
* multiple times in a row before resume with the caveat that
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 96b5224eb478..dece2785933c 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -212,7 +212,7 @@ static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
{
struct xe_execlist_port *port = exl->port;
- enum xe_exec_queue_priority priority = exl->active_priority;
+ enum xe_exec_queue_priority priority = exl->q->sched_props.priority;
XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET);
XE_WARN_ON(priority < 0);
@@ -378,8 +378,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
list_del(&exl->active_link);
spin_unlock_irqrestore(&exl->port->lock, flags);
- if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
- xe_device_remove_persistent_exec_queues(xe, q);
drm_sched_entity_fini(&exl->entity);
drm_sched_fini(&exl->sched);
kfree(exl);
@@ -418,13 +416,6 @@ static int execlist_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
return 0;
}
-static int execlist_exec_queue_set_job_timeout(struct xe_exec_queue *q,
- u32 job_timeout_ms)
-{
- /* NIY */
- return 0;
-}
-
static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
{
/* NIY */
@@ -455,7 +446,6 @@ static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
.set_priority = execlist_exec_queue_set_priority,
.set_timeslice = execlist_exec_queue_set_timeslice,
.set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
- .set_job_timeout = execlist_exec_queue_set_job_timeout,
.suspend = execlist_exec_queue_suspend,
.suspend_wait = execlist_exec_queue_suspend_wait,
.resume = execlist_exec_queue_resume,
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 3efd2d066bf7..ab96edb058d6 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -11,12 +11,16 @@
#include <drm/i915_drm.h>
#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
+#include "xe_assert.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
#include "xe_mmio.h"
+#include "xe_sriov.h"
#include "xe_wopcm.h"
#define XELPG_GGTT_PTE_PAT0 BIT_ULL(52)
@@ -141,7 +145,11 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
unsigned int gsm_size;
- gsm_size = probe_gsm_size(pdev);
+ if (IS_SRIOV_VF(xe))
+ gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */
+ else
+ gsm_size = probe_gsm_size(pdev);
+
if (gsm_size == 0) {
drm_err(&xe->drm, "Hardware reported no preallocated GSM\n");
return -ENOMEM;
@@ -312,6 +320,74 @@ void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix)
}
}
+static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
+ const struct drm_mm_node *node, const char *description)
+{
+ char buf[10];
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ xe_gt_dbg(ggtt->tile->primary_gt, "GGTT %#llx-%#llx (%s) %s\n",
+ node->start, node->start + node->size, buf, description);
+ }
+}
+
+/**
+ * xe_ggtt_balloon - prevent allocation of specified GGTT addresses
+ * @ggtt: the &xe_ggtt where we want to make reservation
+ * @start: the starting GGTT address of the reserved region
+ * @end: then end GGTT address of the reserved region
+ * @node: the &drm_mm_node to hold reserved GGTT node
+ *
+ * Use xe_ggtt_deballoon() to release a reserved GGTT node.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 end, struct drm_mm_node *node)
+{
+ int err;
+
+ xe_tile_assert(ggtt->tile, start < end);
+ xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
+ xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
+ xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(node));
+
+ node->color = 0;
+ node->start = start;
+ node->size = end - start;
+
+ mutex_lock(&ggtt->lock);
+ err = drm_mm_reserve_node(&ggtt->mm, node);
+ mutex_unlock(&ggtt->lock);
+
+ if (xe_gt_WARN(ggtt->tile->primary_gt, err,
+ "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
+ node->start, node->start + node->size, ERR_PTR(err)))
+ return err;
+
+ xe_ggtt_dump_node(ggtt, node, "balloon");
+ return 0;
+}
+
+/**
+ * xe_ggtt_deballoon - release a reserved GGTT region
+ * @ggtt: the &xe_ggtt where reserved node belongs
+ * @node: the &drm_mm_node with reserved GGTT region
+ *
+ * See xe_ggtt_balloon() for details.
+ */
+void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct drm_mm_node *node)
+{
+ if (!drm_mm_node_allocated(node))
+ return;
+
+ xe_ggtt_dump_node(ggtt, node, "deballoon");
+
+ mutex_lock(&ggtt->lock);
+ drm_mm_remove_node(node);
+ mutex_unlock(&ggtt->lock);
+}
+
int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt, struct drm_mm_node *node,
u32 size, u32 align, u32 mm_flags)
{
@@ -334,7 +410,8 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
- u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
+ u16 cache_mode = bo->flags & XE_BO_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
u64 start = bo->ggtt_node.start;
u64 offset, pte;
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
index a09c166dff70..42705e1338e1 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.h
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -16,6 +16,9 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt);
int xe_ggtt_init(struct xe_ggtt *ggtt);
void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix);
+int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 size, struct drm_mm_node *node);
+void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct drm_mm_node *node);
+
int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
u32 size, u32 align);
int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt,
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index a8a895cf4b44..a61994292c43 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -7,12 +7,14 @@
#include <drm/drm_managed.h>
+#include <generated/xe_wa_oob.h>
+
#include "abi/gsc_mkhi_commands_abi.h"
-#include "generated/xe_wa_oob.h"
#include "xe_bb.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_exec_queue.h"
+#include "xe_gsc_proxy.h"
#include "xe_gsc_submit.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
@@ -242,8 +244,31 @@ static int gsc_upload(struct xe_gsc *gsc)
if (err)
return err;
+ return 0;
+}
+
+static int gsc_upload_and_init(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ int ret;
+
+ ret = gsc_upload(gsc);
+ if (ret)
+ return ret;
+
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
xe_gt_dbg(gt, "GSC FW async load completed\n");
+ /* HuC auth failure is not fatal */
+ if (xe_huc_is_authenticated(&gt->uc.huc, XE_HUC_AUTH_VIA_GUC))
+ xe_huc_auth(&gt->uc.huc, XE_HUC_AUTH_VIA_GSC);
+
+ ret = xe_gsc_proxy_start(gsc);
+ if (ret)
+ return ret;
+
+ xe_gt_dbg(gt, "GSC proxy init completed\n");
+
return 0;
}
@@ -252,24 +277,28 @@ static void gsc_work(struct work_struct *work)
struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
struct xe_gt *gt = gsc_to_gt(gsc);
struct xe_device *xe = gt_to_xe(gt);
+ u32 actions;
int ret;
+ spin_lock_irq(&gsc->lock);
+ actions = gsc->work_actions;
+ gsc->work_actions = 0;
+ spin_unlock_irq(&gsc->lock);
+
xe_device_mem_access_get(xe);
xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
- ret = gsc_upload(gsc);
- if (ret && ret != -EEXIST) {
- xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
- goto out;
+ if (actions & GSC_ACTION_FW_LOAD) {
+ ret = gsc_upload_and_init(gsc);
+ if (ret && ret != -EEXIST)
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
+ else
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
}
- xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
-
- /* HuC auth failure is not fatal */
- if (xe_huc_is_authenticated(&gt->uc.huc, XE_HUC_AUTH_VIA_GUC))
- xe_huc_auth(&gt->uc.huc, XE_HUC_AUTH_VIA_GSC);
+ if (actions & GSC_ACTION_SW_PROXY)
+ xe_gsc_proxy_request_handler(gsc);
-out:
xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
xe_device_mem_access_put(xe);
}
@@ -282,6 +311,7 @@ int xe_gsc_init(struct xe_gsc *gsc)
gsc->fw.type = XE_UC_FW_TYPE_GSC;
INIT_WORK(&gsc->work, gsc_work);
+ spin_lock_init(&gsc->lock);
/* The GSC uC is only available on the media GT */
if (tile->media_gt && (gt != tile->media_gt)) {
@@ -302,6 +332,10 @@ int xe_gsc_init(struct xe_gsc *gsc)
else if (ret)
goto out;
+ ret = xe_gsc_proxy_init(gsc);
+ if (ret && ret != -ENODEV)
+ goto out;
+
return 0;
out:
@@ -356,7 +390,7 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
q = xe_exec_queue_create(xe, NULL,
BIT(hwe->logical_instance), 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
- EXEC_QUEUE_FLAG_PERMANENT);
+ EXEC_QUEUE_FLAG_PERMANENT, 0);
if (IS_ERR(q)) {
xe_gt_err(gt, "Failed to create queue for GSC submission\n");
err = PTR_ERR(q);
@@ -401,6 +435,10 @@ void xe_gsc_load_start(struct xe_gsc *gsc)
return;
}
+ spin_lock_irq(&gsc->lock);
+ gsc->work_actions |= GSC_ACTION_FW_LOAD;
+ spin_unlock_irq(&gsc->lock);
+
queue_work(gsc->wq, &gsc->work);
}
@@ -410,6 +448,15 @@ void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc)
flush_work(&gsc->work);
}
+/**
+ * xe_gsc_remove() - Clean up the GSC structures before driver removal
+ * @gsc: the GSC uC
+ */
+void xe_gsc_remove(struct xe_gsc *gsc)
+{
+ xe_gsc_proxy_remove(gsc);
+}
+
/*
* wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
* GSC engine reset by writing a notification bit in the GS1 register and then
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
index bc1ef7f31ea2..c6fb32e3fd79 100644
--- a/drivers/gpu/drm/xe/xe_gsc.h
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -14,6 +14,7 @@ int xe_gsc_init(struct xe_gsc *gsc);
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
void xe_gsc_load_start(struct xe_gsc *gsc);
+void xe_gsc_remove(struct xe_gsc *gsc);
void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep);
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
new file mode 100644
index 000000000000..309ef80e3b95
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_gsc_proxy.h"
+
+#include <linux/component.h>
+#include <linux/delay.h>
+
+#include <drm/drm_managed.h>
+#include <drm/i915_component.h>
+#include <drm/i915_gsc_proxy_mei_interface.h>
+
+#include "abi/gsc_proxy_commands_abi.h"
+#include "regs/xe_gsc_regs.h"
+#include "xe_bo.h"
+#include "xe_gsc.h"
+#include "xe_gsc_submit.h"
+#include "xe_gt.h"
+#include "xe_gt_printk.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_pm.h"
+
+/*
+ * GSC proxy:
+ * The GSC uC needs to communicate with the CSME to perform certain operations.
+ * Since the GSC can't perform this communication directly on platforms where it
+ * is integrated in GT, the graphics driver needs to transfer the messages from
+ * GSC to CSME and back. The proxy flow must be manually started after the GSC
+ * is loaded to signal to GSC that we're ready to handle its messages and allow
+ * it to query its init data from CSME; GSC will then trigger an HECI2 interrupt
+ * if it needs to send messages to CSME again.
+ * The proxy flow is as follow:
+ * 1 - Xe submits a request to GSC asking for the message to CSME
+ * 2 - GSC replies with the proxy header + payload for CSME
+ * 3 - Xe sends the reply from GSC as-is to CSME via the mei proxy component
+ * 4 - CSME replies with the proxy header + payload for GSC
+ * 5 - Xe submits a request to GSC with the reply from CSME
+ * 6 - GSC replies either with a new header + payload (same as step 2, so we
+ * restart from there) or with an end message.
+ */
+
+/*
+ * The component should load quite quickly in most cases, but it could take
+ * a bit. Using a very big timeout just to cover the worst case scenario
+ */
+#define GSC_PROXY_INIT_TIMEOUT_MS 20000
+
+/* shorthand define for code compactness */
+#define PROXY_HDR_SIZE (sizeof(struct xe_gsc_proxy_header))
+
+/* the protocol supports up to 32K in each direction */
+#define GSC_PROXY_BUFFER_SIZE SZ_32K
+#define GSC_PROXY_CHANNEL_SIZE (GSC_PROXY_BUFFER_SIZE * 2)
+
+static struct xe_gt *
+gsc_to_gt(struct xe_gsc *gsc)
+{
+ return container_of(gsc, struct xe_gt, uc.gsc);
+}
+
+static inline struct xe_device *kdev_to_xe(struct device *kdev)
+{
+ return dev_get_drvdata(kdev);
+}
+
+static bool gsc_proxy_init_done(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ u32 fwsts1 = xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE));
+
+ return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fwsts1) ==
+ HECI1_FWSTS1_PROXY_STATE_NORMAL;
+}
+
+static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+
+ /* make sure we never accidentally write the RST bit */
+ clr |= HECI_H_CSR_RST;
+
+ xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set);
+}
+
+static void gsc_proxy_irq_clear(struct xe_gsc *gsc)
+{
+ /* The status bit is cleared by writing to it */
+ __gsc_proxy_irq_rmw(gsc, 0, HECI_H_CSR_IS);
+}
+
+static void gsc_proxy_irq_toggle(struct xe_gsc *gsc, bool enabled)
+{
+ u32 set = enabled ? HECI_H_CSR_IE : 0;
+ u32 clr = enabled ? 0 : HECI_H_CSR_IE;
+
+ __gsc_proxy_irq_rmw(gsc, clr, set);
+}
+
+static int proxy_send_to_csme(struct xe_gsc *gsc, u32 size)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct i915_gsc_proxy_component *comp = gsc->proxy.component;
+ int ret;
+
+ ret = comp->ops->send(comp->mei_dev, gsc->proxy.to_csme, size);
+ if (ret < 0) {
+ xe_gt_err(gt, "Failed to send CSME proxy message\n");
+ return ret;
+ }
+
+ ret = comp->ops->recv(comp->mei_dev, gsc->proxy.from_csme, GSC_PROXY_BUFFER_SIZE);
+ if (ret < 0) {
+ xe_gt_err(gt, "Failed to receive CSME proxy message\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int proxy_send_to_gsc(struct xe_gsc *gsc, u32 size)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ u64 addr_in = xe_bo_ggtt_addr(gsc->proxy.bo);
+ u64 addr_out = addr_in + GSC_PROXY_BUFFER_SIZE;
+ int err;
+
+ /* the message must contain at least the gsc and proxy headers */
+ if (size > GSC_PROXY_BUFFER_SIZE) {
+ xe_gt_err(gt, "Invalid GSC proxy message size: %u\n", size);
+ return -EINVAL;
+ }
+
+ err = xe_gsc_pkt_submit_kernel(gsc, addr_in, size,
+ addr_out, GSC_PROXY_BUFFER_SIZE);
+ if (err) {
+ xe_gt_err(gt, "Failed to submit gsc proxy rq (%pe)\n", ERR_PTR(err));
+ return err;
+ }
+
+ return 0;
+}
+
+static int validate_proxy_header(struct xe_gsc_proxy_header *header,
+ u32 source, u32 dest, u32 max_size)
+{
+ u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr);
+ u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr);
+
+ if (header->destination != dest || header->source != source)
+ return -ENOEXEC;
+
+ if (length + PROXY_HDR_SIZE > max_size)
+ return -E2BIG;
+
+ switch (type) {
+ case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD:
+ if (length > 0)
+ break;
+ fallthrough;
+ case GSC_PROXY_MSG_TYPE_PROXY_INVALID:
+ return -EIO;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define proxy_header_wr(xe_, map_, offset_, field_, val_) \
+ xe_map_wr_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_, val_)
+
+#define proxy_header_rd(xe_, map_, offset_, field_) \
+ xe_map_rd_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_)
+
+static u32 emit_proxy_header(struct xe_device *xe, struct iosys_map *map, u32 offset)
+{
+ xe_map_memset(xe, map, offset, 0, PROXY_HDR_SIZE);
+
+ proxy_header_wr(xe, map, offset, hdr,
+ FIELD_PREP(GSC_PROXY_TYPE, GSC_PROXY_MSG_TYPE_PROXY_QUERY) |
+ FIELD_PREP(GSC_PROXY_PAYLOAD_LENGTH, 0));
+
+ proxy_header_wr(xe, map, offset, source, GSC_PROXY_ADDRESSING_KMD);
+ proxy_header_wr(xe, map, offset, destination, GSC_PROXY_ADDRESSING_GSC);
+ proxy_header_wr(xe, map, offset, status, 0);
+
+ return offset + PROXY_HDR_SIZE;
+}
+
+static int proxy_query(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_gsc_proxy_header *to_csme_hdr = gsc->proxy.to_csme;
+ void *to_csme_payload = gsc->proxy.to_csme + PROXY_HDR_SIZE;
+ u32 wr_offset;
+ u32 reply_offset;
+ u32 size;
+ int ret;
+
+ wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0,
+ HECI_MEADDRESS_PROXY, 0, PROXY_HDR_SIZE);
+ wr_offset = emit_proxy_header(xe, &gsc->proxy.to_gsc, wr_offset);
+
+ size = wr_offset;
+
+ while (1) {
+ /*
+ * Poison the GSC response header space to make sure we don't
+ * read a stale reply.
+ */
+ xe_gsc_poison_header(xe, &gsc->proxy.from_gsc, 0);
+
+ /* send proxy message to GSC */
+ ret = proxy_send_to_gsc(gsc, size);
+ if (ret)
+ goto proxy_error;
+
+ /* check the reply from GSC */
+ ret = xe_gsc_read_out_header(xe, &gsc->proxy.from_gsc, 0,
+ PROXY_HDR_SIZE, &reply_offset);
+ if (ret) {
+ xe_gt_err(gt, "Invalid gsc header in proxy reply (%pe)\n",
+ ERR_PTR(ret));
+ goto proxy_error;
+ }
+
+ /* copy the proxy header reply from GSC */
+ xe_map_memcpy_from(xe, to_csme_hdr, &gsc->proxy.from_gsc,
+ reply_offset, PROXY_HDR_SIZE);
+
+ /* stop if this was the last message */
+ if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END)
+ break;
+
+ /* make sure the GSC-to-CSME proxy header is sane */
+ ret = validate_proxy_header(to_csme_hdr,
+ GSC_PROXY_ADDRESSING_GSC,
+ GSC_PROXY_ADDRESSING_CSME,
+ GSC_PROXY_BUFFER_SIZE - reply_offset);
+ if (ret) {
+ xe_gt_err(gt, "invalid GSC to CSME proxy header! (%pe)\n",
+ ERR_PTR(ret));
+ goto proxy_error;
+ }
+
+ /* copy the rest of the message */
+ size = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, to_csme_hdr->hdr);
+ xe_map_memcpy_from(xe, to_csme_payload, &gsc->proxy.from_gsc,
+ reply_offset + PROXY_HDR_SIZE, size);
+
+ /* send the GSC message to the CSME */
+ ret = proxy_send_to_csme(gsc, size + PROXY_HDR_SIZE);
+ if (ret < 0)
+ goto proxy_error;
+
+ /* reply size from CSME, including the proxy header */
+ size = ret;
+ if (size < PROXY_HDR_SIZE) {
+ xe_gt_err(gt, "CSME to GSC proxy msg too small: 0x%x\n", size);
+ ret = -EPROTO;
+ goto proxy_error;
+ }
+
+ /* make sure the CSME-to-GSC proxy header is sane */
+ ret = validate_proxy_header(gsc->proxy.from_csme,
+ GSC_PROXY_ADDRESSING_CSME,
+ GSC_PROXY_ADDRESSING_GSC,
+ GSC_PROXY_BUFFER_SIZE - reply_offset);
+ if (ret) {
+ xe_gt_err(gt, "invalid CSME to GSC proxy header! %d\n", ret);
+ goto proxy_error;
+ }
+
+ /* Emit a new header for sending the reply to the GSC */
+ wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0,
+ HECI_MEADDRESS_PROXY, 0, size);
+
+ /* copy the CSME reply and update the total msg size to include the GSC header */
+ xe_map_memcpy_to(xe, &gsc->proxy.to_gsc, wr_offset, gsc->proxy.from_csme, size);
+
+ size += wr_offset;
+ }
+
+proxy_error:
+ return ret < 0 ? ret : 0;
+}
+
+int xe_gsc_proxy_request_handler(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ int slept;
+ int err;
+
+ if (!gsc->proxy.component_added)
+ return -ENODEV;
+
+ /* when GSC is loaded, we can queue this before the component is bound */
+ for (slept = 0; slept < GSC_PROXY_INIT_TIMEOUT_MS; slept += 100) {
+ if (gsc->proxy.component)
+ break;
+
+ msleep(100);
+ }
+
+ mutex_lock(&gsc->proxy.mutex);
+ if (!gsc->proxy.component) {
+ xe_gt_err(gt, "GSC proxy component not bound!\n");
+ err = -EIO;
+ } else {
+ /*
+ * clear the pending interrupt and allow new proxy requests to
+ * be generated while we handle the current one
+ */
+ gsc_proxy_irq_clear(gsc);
+ err = proxy_query(gsc);
+ }
+ mutex_unlock(&gsc->proxy.mutex);
+ return err;
+}
+
+void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+
+ if (unlikely(!iir))
+ return;
+
+ if (!gsc->proxy.component) {
+ xe_gt_err(gt, "GSC proxy irq received without the component being bound!\n");
+ return;
+ }
+
+ spin_lock(&gsc->lock);
+ gsc->work_actions |= GSC_ACTION_SW_PROXY;
+ spin_unlock(&gsc->lock);
+
+ queue_work(gsc->wq, &gsc->work);
+}
+
+static int xe_gsc_proxy_component_bind(struct device *xe_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct xe_device *xe = kdev_to_xe(xe_kdev);
+ struct xe_gt *gt = xe->tiles[0].media_gt;
+ struct xe_gsc *gsc = &gt->uc.gsc;
+
+ mutex_lock(&gsc->proxy.mutex);
+ gsc->proxy.component = data;
+ gsc->proxy.component->mei_dev = mei_kdev;
+ mutex_unlock(&gsc->proxy.mutex);
+
+ return 0;
+}
+
+static void xe_gsc_proxy_component_unbind(struct device *xe_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct xe_device *xe = kdev_to_xe(xe_kdev);
+ struct xe_gt *gt = xe->tiles[0].media_gt;
+ struct xe_gsc *gsc = &gt->uc.gsc;
+
+ xe_gsc_wait_for_worker_completion(gsc);
+
+ mutex_lock(&gsc->proxy.mutex);
+ gsc->proxy.component = NULL;
+ mutex_unlock(&gsc->proxy.mutex);
+}
+
+static const struct component_ops xe_gsc_proxy_component_ops = {
+ .bind = xe_gsc_proxy_component_bind,
+ .unbind = xe_gsc_proxy_component_unbind,
+};
+
+static void proxy_channel_free(struct drm_device *drm, void *arg)
+{
+ struct xe_gsc *gsc = arg;
+
+ if (!gsc->proxy.bo)
+ return;
+
+ if (gsc->proxy.to_csme) {
+ kfree(gsc->proxy.to_csme);
+ gsc->proxy.to_csme = NULL;
+ gsc->proxy.from_csme = NULL;
+ }
+
+ if (gsc->proxy.bo) {
+ iosys_map_clear(&gsc->proxy.to_gsc);
+ iosys_map_clear(&gsc->proxy.from_gsc);
+ xe_bo_unpin_map_no_vm(gsc->proxy.bo);
+ gsc->proxy.bo = NULL;
+ }
+}
+
+static int proxy_channel_alloc(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_bo *bo;
+ void *csme;
+ int err;
+
+ csme = kzalloc(GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
+ if (!csme)
+ return -ENOMEM;
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_SYSTEM_BIT |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(bo)) {
+ kfree(csme);
+ return PTR_ERR(bo);
+ }
+
+ gsc->proxy.bo = bo;
+ gsc->proxy.to_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0);
+ gsc->proxy.from_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, GSC_PROXY_BUFFER_SIZE);
+ gsc->proxy.to_csme = csme;
+ gsc->proxy.from_csme = csme + GSC_PROXY_BUFFER_SIZE;
+
+ err = drmm_add_action_or_reset(&xe->drm, proxy_channel_free, gsc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * xe_gsc_proxy_init() - init objects and MEI component required by GSC proxy
+ * @gsc: the GSC uC
+ *
+ * Return: 0 if the initialization was successful, a negative errno otherwise.
+ */
+int xe_gsc_proxy_init(struct xe_gsc *gsc)
+{
+ int err;
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
+
+ mutex_init(&gsc->proxy.mutex);
+
+ if (!IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)) {
+ xe_gt_info(gt, "can't init GSC proxy due to missing mei component\n");
+ return -ENODEV;
+ }
+
+ /* no multi-tile devices with this feature yet */
+ if (tile->id > 0) {
+ xe_gt_err(gt, "unexpected GSC proxy init on tile %u\n", tile->id);
+ return -EINVAL;
+ }
+
+ err = proxy_channel_alloc(gsc);
+ if (err)
+ return err;
+
+ err = component_add_typed(xe->drm.dev, &xe_gsc_proxy_component_ops,
+ I915_COMPONENT_GSC_PROXY);
+ if (err < 0) {
+ xe_gt_err(gt, "Failed to add GSC_PROXY component (%pe)\n", ERR_PTR(err));
+ return err;
+ }
+
+ gsc->proxy.component_added = true;
+
+ /* the component must be removed before unload, so can't use drmm for cleanup */
+
+ return 0;
+}
+
+/**
+ * xe_gsc_proxy_remove() - remove the GSC proxy MEI component
+ * @gsc: the GSC uC
+ */
+void xe_gsc_proxy_remove(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_device *xe = gt_to_xe(gt);
+ int err = 0;
+
+ if (!gsc->proxy.component_added)
+ return;
+
+ /* disable HECI2 IRQs */
+ xe_pm_runtime_get(xe);
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ if (err)
+ xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n");
+
+ /* try do disable irq even if forcewake failed */
+ gsc_proxy_irq_toggle(gsc, false);
+
+ if (!err)
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+ xe_pm_runtime_put(xe);
+
+ xe_gsc_wait_for_worker_completion(gsc);
+
+ component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
+ gsc->proxy.component_added = false;
+}
+
+/**
+ * xe_gsc_proxy_start() - start the proxy by submitting the first request
+ * @gsc: the GSC uC
+ *
+ * Return: 0 if the proxy are now enabled, a negative errno otherwise.
+ */
+int xe_gsc_proxy_start(struct xe_gsc *gsc)
+{
+ int err;
+
+ /* enable the proxy interrupt in the GSC shim layer */
+ gsc_proxy_irq_toggle(gsc, true);
+
+ /*
+ * The handling of the first proxy request must be manually triggered to
+ * notify the GSC that we're ready to support the proxy flow.
+ */
+ err = xe_gsc_proxy_request_handler(gsc);
+ if (err)
+ return err;
+
+ if (!gsc_proxy_init_done(gsc)) {
+ xe_gt_err(gsc_to_gt(gsc), "GSC FW reports proxy init not completed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h
new file mode 100644
index 000000000000..908f9441f093
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GSC_PROXY_H_
+#define _XE_GSC_PROXY_H_
+
+#include <linux/types.h>
+
+struct xe_gsc;
+
+int xe_gsc_proxy_init(struct xe_gsc *gsc);
+void xe_gsc_proxy_remove(struct xe_gsc *gsc);
+int xe_gsc_proxy_start(struct xe_gsc *gsc);
+
+int xe_gsc_proxy_request_handler(struct xe_gsc *gsc);
+void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc_submit.c b/drivers/gpu/drm/xe/xe_gsc_submit.c
index 8c5381e5913f..348994b271be 100644
--- a/drivers/gpu/drm/xe/xe_gsc_submit.c
+++ b/drivers/gpu/drm/xe/xe_gsc_submit.c
@@ -5,6 +5,8 @@
#include "xe_gsc_submit.h"
+#include <linux/poison.h>
+
#include "abi/gsc_command_header_abi.h"
#include "xe_bb.h"
#include "xe_exec_queue.h"
@@ -69,6 +71,17 @@ u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset,
};
/**
+ * xe_gsc_poison_header - poison the MTL GSC header in memory
+ * @xe: the Xe device
+ * @map: the iosys map to write to
+ * @offset: offset from the start of the map at which the header resides
+ */
+void xe_gsc_poison_header(struct xe_device *xe, struct iosys_map *map, u32 offset)
+{
+ xe_map_memset(xe, map, offset, POISON_FREE, GSC_HDR_SIZE);
+};
+
+/**
* xe_gsc_check_and_update_pending - check the pending bit and update the input
* header with the retry handle from the output header
* @xe: the Xe device
@@ -112,11 +125,18 @@ int xe_gsc_read_out_header(struct xe_device *xe,
{
u32 marker = mtl_gsc_header_rd(xe, map, offset, validity_marker);
u32 size = mtl_gsc_header_rd(xe, map, offset, message_size);
+ u32 status = mtl_gsc_header_rd(xe, map, offset, status);
u32 payload_size = size - GSC_HDR_SIZE;
if (marker != GSC_HECI_VALIDITY_MARKER)
return -EPROTO;
+ if (status != 0) {
+ drm_err(&xe->drm, "GSC header readout indicates error: %d\n",
+ status);
+ return -EINVAL;
+ }
+
if (size < GSC_HDR_SIZE || payload_size < min_payload_size)
return -ENODATA;
diff --git a/drivers/gpu/drm/xe/xe_gsc_submit.h b/drivers/gpu/drm/xe/xe_gsc_submit.h
index 0801da5d446a..1939855031a6 100644
--- a/drivers/gpu/drm/xe/xe_gsc_submit.h
+++ b/drivers/gpu/drm/xe/xe_gsc_submit.h
@@ -14,6 +14,7 @@ struct xe_gsc;
u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset,
u8 heci_client_id, u64 host_session_id, u32 payload_size);
+void xe_gsc_poison_header(struct xe_device *xe, struct iosys_map *map, u32 offset);
bool xe_gsc_check_and_update_pending(struct xe_device *xe,
struct iosys_map *in, u32 offset_in,
diff --git a/drivers/gpu/drm/xe/xe_gsc_types.h b/drivers/gpu/drm/xe/xe_gsc_types.h
index 57fefd66a7ea..138d8cc0f19c 100644
--- a/drivers/gpu/drm/xe/xe_gsc_types.h
+++ b/drivers/gpu/drm/xe/xe_gsc_types.h
@@ -6,12 +6,17 @@
#ifndef _XE_GSC_TYPES_H_
#define _XE_GSC_TYPES_H_
+#include <linux/iosys-map.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
#include "xe_uc_fw_types.h"
struct xe_bo;
struct xe_exec_queue;
+struct i915_gsc_proxy_component;
/**
* struct xe_gsc - GSC
@@ -34,6 +39,34 @@ struct xe_gsc {
/** @work: delayed load and proxy handling work */
struct work_struct work;
+
+ /** @lock: protects access to the work_actions mask */
+ spinlock_t lock;
+
+ /** @work_actions: mask of actions to be performed in the work */
+ u32 work_actions;
+#define GSC_ACTION_FW_LOAD BIT(0)
+#define GSC_ACTION_SW_PROXY BIT(1)
+
+ /** @proxy: sub-structure containing the SW proxy-related variables */
+ struct {
+ /** @proxy.component: struct for communication with mei component */
+ struct i915_gsc_proxy_component *component;
+ /** @proxy.mutex: protects the component binding and usage */
+ struct mutex mutex;
+ /** @proxy.component_added: whether the component has been added */
+ bool component_added;
+ /** @proxy.bo: object to store message to and from the GSC */
+ struct xe_bo *bo;
+ /** @proxy.to_gsc: map of the memory used to send messages to the GSC */
+ struct iosys_map to_gsc;
+ /** @proxy.from_gsc: map of the memory used to recv messages from the GSC */
+ struct iosys_map from_gsc;
+ /** @proxy.to_csme: pointer to the memory used to send messages to CSME */
+ void *to_csme;
+ /** @proxy.from_csme: pointer to the memory used to recv messages from CSME */
+ void *from_csme;
+ } proxy;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 35474ddbaf97..a0afe1ba6dd5 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -78,6 +78,19 @@ void xe_gt_sanitize(struct xe_gt *gt)
gt->uc.guc.submission_state.enabled = false;
}
+/**
+ * xe_gt_remove() - Clean up the GT structures before driver removal
+ * @gt: the GT object
+ *
+ * This function should only act on objects/structures that must be cleaned
+ * before the driver removal callback is complete and therefore can't be
+ * deferred to a drmm action.
+ */
+void xe_gt_remove(struct xe_gt *gt)
+{
+ xe_uc_remove(&gt->uc);
+}
+
static void gt_fini(struct drm_device *drm, void *arg)
{
struct xe_gt *gt = arg;
@@ -235,7 +248,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
return -ENOMEM;
q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
- hwe, EXEC_QUEUE_FLAG_KERNEL);
+ hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
if (IS_ERR(q)) {
err = PTR_ERR(q);
xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
@@ -252,7 +265,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
}
nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
- 1, hwe, EXEC_QUEUE_FLAG_KERNEL);
+ 1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
if (IS_ERR(nop_q)) {
err = PTR_ERR(nop_q);
xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
@@ -301,9 +314,6 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
- xe_gt_topology_init(gt);
- xe_gt_mcr_init(gt);
-
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
if (err)
return err;
@@ -327,7 +337,7 @@ static void dump_pat_on_error(struct xe_gt *gt)
char prefix[32];
snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
- p = drm_debug_printer(prefix);
+ p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
xe_pat_dump(gt, &p);
}
@@ -341,8 +351,6 @@ static int gt_fw_domain_init(struct xe_gt *gt)
if (err)
goto err_hw_fence_irq;
- xe_pat_init(gt);
-
if (!xe_gt_is_media_type(gt)) {
err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
if (err)
@@ -351,22 +359,8 @@ static int gt_fw_domain_init(struct xe_gt *gt)
xe_lmtt_init(&gt_to_tile(gt)->sriov.pf.lmtt);
}
- err = xe_uc_init(&gt->uc);
- if (err)
- goto err_force_wake;
-
- /* Raise GT freq to speed up HuC/GuC load */
- xe_guc_pc_init_early(&gt->uc.guc.pc);
-
- err = xe_uc_init_hwconfig(&gt->uc);
- if (err)
- goto err_force_wake;
-
xe_gt_idle_sysfs_init(&gt->gtidle);
- /* XXX: Fake that we pull the engine mask from hwconfig blob */
- gt->info.engine_mask = gt->info.__engine_mask;
-
/* Enable per hw engine IRQs */
xe_irq_enable_hwe(gt);
@@ -386,6 +380,12 @@ static int gt_fw_domain_init(struct xe_gt *gt)
/* Initialize CCS mode sysfs after early initialization of HW engines */
xe_gt_ccs_mode_sysfs_init(gt);
+ /*
+ * Stash hardware-reported version. Since this register does not exist
+ * on pre-MTL platforms, reading it there will (correctly) return 0.
+ */
+ gt->info.gmdid = xe_mmio_read32(gt, GMD_ID);
+
err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
XE_WARN_ON(err);
xe_device_mem_access_put(gt_to_xe(gt));
@@ -428,10 +428,6 @@ static int all_fw_domain_init(struct xe_gt *gt)
if (err)
goto err_force_wake;
- err = xe_uc_init_post_hwconfig(&gt->uc);
- if (err)
- goto err_force_wake;
-
if (!xe_gt_is_media_type(gt)) {
/*
* USM has its only SA pool to non-block behind user operations
@@ -458,6 +454,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
}
}
+ err = xe_uc_init_post_hwconfig(&gt->uc);
+ if (err)
+ goto err_force_wake;
+
err = xe_uc_init_hw(&gt->uc);
if (err)
goto err_force_wake;
@@ -487,6 +487,42 @@ err_hw_fence_irq:
return err;
}
+/*
+ * Initialize enough GT to be able to load GuC in order to obtain hwconfig and
+ * enable CTB communication.
+ */
+int xe_gt_init_hwconfig(struct xe_gt *gt)
+{
+ int err;
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ goto out;
+
+ xe_gt_topology_init(gt);
+ xe_gt_mcr_init(gt);
+ xe_pat_init(gt);
+
+ err = xe_uc_init(&gt->uc);
+ if (err)
+ goto out_fw;
+
+ err = xe_uc_init_hwconfig(&gt->uc);
+ if (err)
+ goto out_fw;
+
+ /* XXX: Fake that we pull the engine mask from hwconfig blob */
+ gt->info.engine_mask = gt->info.__engine_mask;
+
+out_fw:
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+out:
+ xe_device_mem_access_put(gt_to_xe(gt));
+
+ return err;
+}
+
int xe_gt_init(struct xe_gt *gt)
{
int err;
@@ -622,12 +658,12 @@ static int gt_reset(struct xe_gt *gt)
if (err)
goto err_out;
+ xe_gt_tlb_invalidation_reset(gt);
+
err = do_gt_reset(gt);
if (err)
goto err_out;
- xe_gt_tlb_invalidation_reset(gt);
-
err = do_gt_restart(gt);
if (err)
goto err_out;
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 4486e083f5ef..ed6ea8057e35 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -33,6 +33,7 @@ static inline bool xe_fault_inject_gt_reset(void)
#endif
struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
+int xe_gt_init_hwconfig(struct xe_gt *gt);
int xe_gt_init_early(struct xe_gt *gt);
int xe_gt_init(struct xe_gt *gt);
int xe_gt_record_default_lrcs(struct xe_gt *gt);
@@ -41,6 +42,7 @@ int xe_gt_suspend(struct xe_gt *gt);
int xe_gt_resume(struct xe_gt *gt);
void xe_gt_reset_async(struct xe_gt *gt);
void xe_gt_sanitize(struct xe_gt *gt);
+void xe_gt_remove(struct xe_gt *gt);
/**
* xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index 9358f7336889..9fcae65b6469 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -145,10 +145,10 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
}
if (xe_gt_is_media_type(gt)) {
- sprintf(gtidle->name, "gt%d-mc\n", gt->info.id);
+ sprintf(gtidle->name, "gt%d-mc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_mc6_residency;
} else {
- sprintf(gtidle->name, "gt%d-rc\n", gt->info.id);
+ sprintf(gtidle->name, "gt%d-rc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_rc6_residency;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 8546cd3cc50d..a7ab9ba645f9 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -10,6 +10,7 @@
#include "xe_gt_topology.h"
#include "xe_gt_types.h"
#include "xe_mmio.h"
+#include "xe_sriov.h"
/**
* DOC: GT Multicast/Replicated (MCR) Register Support
@@ -38,6 +39,8 @@
* ``init_steering_*()`` functions is to apply the platform-specific rules for
* each MCR register type to identify a steering target that will select a
* non-terminated instance.
+ *
+ * MCR registers are not available on Virtual Function (VF).
*/
#define STEER_SEMAPHORE XE_REG(0xFD0)
@@ -352,6 +355,9 @@ void xe_gt_mcr_init(struct xe_gt *gt)
BUILD_BUG_ON(IMPLICIT_STEERING + 1 != NUM_STEERING_TYPES);
BUILD_BUG_ON(ARRAY_SIZE(xe_steering_types) != NUM_STEERING_TYPES);
+ if (IS_SRIOV_VF(xe))
+ return;
+
spin_lock_init(&gt->mcr_lock);
if (gt->info.type == XE_GT_TYPE_MEDIA) {
@@ -405,6 +411,9 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ if (IS_SRIOV_VF(xe))
+ return;
+
if (xe->info.platform == XE_DG2) {
u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) |
REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2);
@@ -588,6 +597,8 @@ u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr)
u32 val;
bool steer;
+ xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt)));
+
steer = xe_gt_mcr_get_nonterminated_steering(gt, reg_mcr,
&group, &instance);
@@ -619,6 +630,8 @@ u32 xe_gt_mcr_unicast_read(struct xe_gt *gt,
{
u32 val;
+ xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt)));
+
mcr_lock(gt);
val = rw_with_mcr_steering(gt, reg_mcr, MCR_OP_READ, group, instance, 0);
mcr_unlock(gt);
@@ -640,6 +653,8 @@ u32 xe_gt_mcr_unicast_read(struct xe_gt *gt,
void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
u32 value, int group, int instance)
{
+ xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt)));
+
mcr_lock(gt);
rw_with_mcr_steering(gt, reg_mcr, MCR_OP_WRITE, group, instance, value);
mcr_unlock(gt);
@@ -658,6 +673,8 @@ void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
{
struct xe_reg reg = to_xe_reg(reg_mcr);
+ xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt)));
+
/*
* Synchronize with any unicast operations. Once we have exclusive
* access, the MULTICAST bit should already be set, so there's no need
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 73f08f1924df..73c535193a98 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -146,10 +146,12 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
/* ASID to VM */
mutex_lock(&xe->usm.lock);
vm = xa_load(&xe->usm.asid_to_vm, pf->asid);
- if (vm)
+ if (vm && xe_vm_in_fault_mode(vm))
xe_vm_get(vm);
+ else
+ vm = NULL;
mutex_unlock(&xe->usm.lock);
- if (!vm || !xe_vm_in_fault_mode(vm))
+ if (!vm)
return -EINVAL;
retry_userptr:
@@ -285,9 +287,9 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
bool ret = false;
spin_lock_irq(&pf_queue->lock);
- if (pf_queue->head != pf_queue->tail) {
+ if (pf_queue->tail != pf_queue->head) {
desc = (const struct xe_guc_pagefault_desc *)
- (pf_queue->data + pf_queue->head);
+ (pf_queue->data + pf_queue->tail);
pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0);
pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0);
@@ -305,7 +307,7 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) <<
PFD_VIRTUAL_ADDR_LO_SHIFT;
- pf_queue->head = (pf_queue->head + PF_MSG_LEN_DW) %
+ pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
PF_QUEUE_NUM_DW;
ret = true;
}
@@ -318,7 +320,7 @@ static bool pf_queue_full(struct pf_queue *pf_queue)
{
lockdep_assert_held(&pf_queue->lock);
- return CIRC_SPACE(pf_queue->tail, pf_queue->head, PF_QUEUE_NUM_DW) <=
+ return CIRC_SPACE(pf_queue->head, pf_queue->tail, PF_QUEUE_NUM_DW) <=
PF_MSG_LEN_DW;
}
@@ -331,6 +333,11 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
u32 asid;
bool full;
+ /*
+ * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
+ */
+ BUILD_BUG_ON(PF_QUEUE_NUM_DW % PF_MSG_LEN_DW);
+
if (unlikely(len != PF_MSG_LEN_DW))
return -EPROTO;
@@ -340,8 +347,8 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
spin_lock_irqsave(&pf_queue->lock, flags);
full = pf_queue_full(pf_queue);
if (!full) {
- memcpy(pf_queue->data + pf_queue->tail, msg, len * sizeof(u32));
- pf_queue->tail = (pf_queue->tail + len) % PF_QUEUE_NUM_DW;
+ memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
+ pf_queue->head = (pf_queue->head + len) % PF_QUEUE_NUM_DW;
queue_work(gt->usm.pf_wq, &pf_queue->worker);
} else {
drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
@@ -387,7 +394,7 @@ static void pf_queue_work_func(struct work_struct *w)
send_pagefault_reply(&gt->uc.guc, &reply);
if (time_after(jiffies, threshold) &&
- pf_queue->head != pf_queue->tail) {
+ pf_queue->tail != pf_queue->head) {
queue_work(gt->usm.pf_wq, w);
break;
}
@@ -562,9 +569,9 @@ static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
bool ret = false;
spin_lock(&acc_queue->lock);
- if (acc_queue->head != acc_queue->tail) {
+ if (acc_queue->tail != acc_queue->head) {
desc = (const struct xe_guc_acc_desc *)
- (acc_queue->data + acc_queue->head);
+ (acc_queue->data + acc_queue->tail);
acc->granularity = FIELD_GET(ACC_GRANULARITY, desc->dw2);
acc->sub_granularity = FIELD_GET(ACC_SUBG_HI, desc->dw1) << 31 |
@@ -577,7 +584,7 @@ static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
acc->va_range_base = make_u64(desc->dw3 & ACC_VIRTUAL_ADDR_RANGE_HI,
desc->dw2 & ACC_VIRTUAL_ADDR_RANGE_LO);
- acc_queue->head = (acc_queue->head + ACC_MSG_LEN_DW) %
+ acc_queue->tail = (acc_queue->tail + ACC_MSG_LEN_DW) %
ACC_QUEUE_NUM_DW;
ret = true;
}
@@ -605,7 +612,7 @@ static void acc_queue_work_func(struct work_struct *w)
}
if (time_after(jiffies, threshold) &&
- acc_queue->head != acc_queue->tail) {
+ acc_queue->tail != acc_queue->head) {
queue_work(gt->usm.acc_wq, w);
break;
}
@@ -616,7 +623,7 @@ static bool acc_queue_full(struct acc_queue *acc_queue)
{
lockdep_assert_held(&acc_queue->lock);
- return CIRC_SPACE(acc_queue->tail, acc_queue->head, ACC_QUEUE_NUM_DW) <=
+ return CIRC_SPACE(acc_queue->head, acc_queue->tail, ACC_QUEUE_NUM_DW) <=
ACC_MSG_LEN_DW;
}
@@ -627,6 +634,11 @@ int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
u32 asid;
bool full;
+ /*
+ * The below logic doesn't work unless ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW == 0
+ */
+ BUILD_BUG_ON(ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW);
+
if (unlikely(len != ACC_MSG_LEN_DW))
return -EPROTO;
@@ -636,9 +648,9 @@ int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
spin_lock(&acc_queue->lock);
full = acc_queue_full(acc_queue);
if (!full) {
- memcpy(acc_queue->data + acc_queue->tail, msg,
+ memcpy(acc_queue->data + acc_queue->head, msg,
len * sizeof(u32));
- acc_queue->tail = (acc_queue->tail + len) % ACC_QUEUE_NUM_DW;
+ acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW;
queue_work(gt->usm.acc_wq, &acc_queue->worker);
} else {
drm_warn(&gt_to_xe(gt)->drm, "ACC Queue full, dropping ACC");
diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h
index 5991bcadd47e..c2b004d3f48e 100644
--- a/drivers/gpu/drm/xe/xe_gt_printk.h
+++ b/drivers/gpu/drm/xe/xe_gt_printk.h
@@ -43,4 +43,48 @@
#define xe_gt_WARN_ON_ONCE(_gt, _condition) \
xe_gt_WARN_ONCE((_gt), _condition, "%s(%s)", "gt_WARN_ON_ONCE", __stringify(_condition))
+static inline void __xe_gt_printfn_err(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_gt *gt = p->arg;
+
+ xe_gt_err(gt, "%pV", vaf);
+}
+
+static inline void __xe_gt_printfn_info(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_gt *gt = p->arg;
+
+ xe_gt_info(gt, "%pV", vaf);
+}
+
+/**
+ * xe_gt_err_printer - Construct a &drm_printer that outputs to xe_gt_err()
+ * @gt: the &xe_gt pointer to use in xe_gt_err()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_gt_err_printer(struct xe_gt *gt)
+{
+ struct drm_printer p = {
+ .printfn = __xe_gt_printfn_err,
+ .arg = gt,
+ };
+ return p;
+}
+
+/**
+ * xe_gt_info_printer - Construct a &drm_printer that outputs to xe_gt_info()
+ * @gt: the &xe_gt pointer to use in xe_gt_info()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_gt_info_printer(struct xe_gt *gt)
+{
+ struct drm_printer p = {
+ .printfn = __xe_gt_printfn_info,
+ .arg = gt,
+ };
+ return p;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_printk.h b/drivers/gpu/drm/xe/xe_gt_sriov_printk.h
new file mode 100644
index 000000000000..17624b16300a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_printk.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PRINTK_H_
+#define _XE_GT_SRIOV_PRINTK_H_
+
+#include "xe_gt_printk.h"
+#include "xe_sriov_printk.h"
+
+#define __xe_gt_sriov_printk(gt, _level, fmt, ...) \
+ xe_gt_printk((gt), _level, "%s" fmt, xe_sriov_printk_prefix(gt_to_xe(gt)), ##__VA_ARGS__)
+
+#define xe_gt_sriov_err(_gt, _fmt, ...) \
+ __xe_gt_sriov_printk(_gt, err, _fmt, ##__VA_ARGS__)
+
+#define xe_gt_sriov_notice(_gt, _fmt, ...) \
+ __xe_gt_sriov_printk(_gt, notice, _fmt, ##__VA_ARGS__)
+
+#define xe_gt_sriov_info(_gt, _fmt, ...) \
+ __xe_gt_sriov_printk(_gt, info, _fmt, ##__VA_ARGS__)
+
+#define xe_gt_sriov_dbg(_gt, _fmt, ...) \
+ __xe_gt_sriov_printk(_gt, dbg, _fmt, ##__VA_ARGS__)
+
+/* for low level noisy debug messages */
+#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
+#define xe_gt_sriov_dbg_verbose(_gt, _fmt, ...) xe_gt_sriov_dbg(_gt, _fmt, ##__VA_ARGS__)
+#else
+#define xe_gt_sriov_dbg_verbose(_gt, _fmt, ...) typecheck(struct xe_gt *, (_gt))
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 7eef23a00d77..f03e077f81a0 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -8,6 +8,7 @@
#include "abi/guc_actions_abi.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_trace.h"
@@ -30,8 +31,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
break;
trace_xe_gt_tlb_invalidation_fence_timeout(fence);
- drm_err(&gt_to_xe(gt)->drm, "gt%d: TLB invalidation fence timeout, seqno=%d recv=%d",
- gt->info.id, fence->seqno, gt->tlb_invalidation.seqno_recv);
+ xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
+ fence->seqno, gt->tlb_invalidation.seqno_recv);
list_del(&fence->link);
fence->base.error = -ETIME;
@@ -247,6 +248,14 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
xe_gt_assert(gt, vma);
+ /* Execlists not supported */
+ if (gt_to_xe(gt)->info.force_execlist) {
+ if (fence)
+ __invalidation_fence_signal(fence);
+
+ return 0;
+ }
+
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
if (!xe->info.has_range_tlb_invalidation) {
@@ -312,11 +321,13 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
*/
int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
{
- struct xe_device *xe = gt_to_xe(gt);
struct xe_guc *guc = &gt->uc.guc;
- struct drm_printer p = drm_err_printer(__func__);
int ret;
+ /* Execlists not supported */
+ if (gt_to_xe(gt)->info.force_execlist)
+ return 0;
+
/*
* XXX: See above, this algorithm only works if seqno are always in
* order
@@ -325,8 +336,10 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
tlb_invalidation_seqno_past(gt, seqno),
TLB_TIMEOUT);
if (!ret) {
- drm_err(&xe->drm, "gt%d: TLB invalidation time'd out, seqno=%d, recv=%d\n",
- gt->info.id, seqno, gt->tlb_invalidation.seqno_recv);
+ struct drm_printer p = xe_gt_err_printer(gt);
+
+ xe_gt_err(gt, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
+ seqno, gt->tlb_invalidation.seqno_recv);
xe_guc_ct_print(&guc->ct, &p, true);
return -ETIME;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index a8d7f272c30a..5dc62fe1be49 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -84,7 +84,7 @@ void
xe_gt_topology_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- struct drm_printer p = drm_debug_printer("GT topology");
+ struct drm_printer p;
int num_geometry_regs, num_compute_regs;
get_num_dss_regs(xe, &num_geometry_regs, &num_compute_regs);
@@ -107,6 +107,8 @@ xe_gt_topology_init(struct xe_gt *gt)
XE2_GT_COMPUTE_DSS_2);
load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss);
+ p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology");
+
xe_gt_topology_dump(gt, &p);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index f74684660475..70c615dd1498 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -103,20 +103,22 @@ struct xe_gt {
/** @info: GT info */
struct {
- /** @type: type of GT */
+ /** @info.type: type of GT */
enum xe_gt_type type;
- /** @id: Unique ID of this GT within the PCI Device */
+ /** @info.id: Unique ID of this GT within the PCI Device */
u8 id;
- /** @reference_clock: clock frequency */
+ /** @info.reference_clock: clock frequency */
u32 reference_clock;
- /** @engine_mask: mask of engines present on GT */
+ /** @info.engine_mask: mask of engines present on GT */
u64 engine_mask;
/**
- * @__engine_mask: mask of engines present on GT read from
+ * @info.__engine_mask: mask of engines present on GT read from
* xe_pci.c, used to fake reading the engine_mask from the
* hwconfig blob.
*/
u64 __engine_mask;
+ /** @info.gmdid: raw GMD_ID value from hardware */
+ u32 gmdid;
} info;
/**
@@ -125,14 +127,14 @@ struct xe_gt {
* specific offset, as well as their own forcewake handling.
*/
struct {
- /** @fw: force wake for GT */
+ /** @mmio.fw: force wake for GT */
struct xe_force_wake fw;
/**
- * @adj_limit: adjust MMIO address if address is below this
+ * @mmio.adj_limit: adjust MMIO address if address is below this
* value
*/
u32 adj_limit;
- /** @adj_offset: offect to add to MMIO address when adjusting */
+ /** @mmio.adj_offset: offect to add to MMIO address when adjusting */
u32 adj_offset;
} mmio;
@@ -144,7 +146,7 @@ struct xe_gt {
/** @reset: state for GT resets */
struct {
/**
- * @worker: work so GT resets can done async allowing to reset
+ * @reset.worker: work so GT resets can done async allowing to reset
* code to safely flush all code paths
*/
struct work_struct worker;
@@ -152,36 +154,37 @@ struct xe_gt {
/** @tlb_invalidation: TLB invalidation state */
struct {
- /** @seqno: TLB invalidation seqno, protected by CT lock */
+ /** @tlb_invalidation.seqno: TLB invalidation seqno, protected by CT lock */
#define TLB_INVALIDATION_SEQNO_MAX 0x100000
int seqno;
/**
- * @seqno_recv: last received TLB invalidation seqno, protected by CT lock
+ * @tlb_invalidation.seqno_recv: last received TLB invalidation seqno,
+ * protected by CT lock
*/
int seqno_recv;
/**
- * @pending_fences: list of pending fences waiting TLB
+ * @tlb_invalidation.pending_fences: list of pending fences waiting TLB
* invaliations, protected by CT lock
*/
struct list_head pending_fences;
/**
- * @pending_lock: protects @pending_fences and updating
- * @seqno_recv.
+ * @tlb_invalidation.pending_lock: protects @tlb_invalidation.pending_fences
+ * and updating @tlb_invalidation.seqno_recv.
*/
spinlock_t pending_lock;
/**
- * @fence_tdr: schedules a delayed call to
+ * @tlb_invalidation.fence_tdr: schedules a delayed call to
* xe_gt_tlb_fence_timeout after the timeut interval is over.
*/
struct delayed_work fence_tdr;
- /** @fence_context: context for TLB invalidation fences */
+ /** @tlb_invalidation.fence_context: context for TLB invalidation fences */
u64 fence_context;
/**
- * @fence_seqno: seqno to TLB invalidation fences, protected by
+ * @tlb_invalidation.fence_seqno: seqno to TLB invalidation fences, protected by
* tlb_invalidation.lock
*/
u32 fence_seqno;
- /** @lock: protects TLB invalidation fences */
+ /** @tlb_invalidation.lock: protects TLB invalidation fences */
spinlock_t lock;
} tlb_invalidation;
@@ -196,7 +199,7 @@ struct xe_gt {
/** @usm: unified shared memory state */
struct {
/**
- * @bb_pool: Pool from which batchbuffers, for USM operations
+ * @usm.bb_pool: Pool from which batchbuffers, for USM operations
* (e.g. migrations, fixing page tables), are allocated.
* Dedicated pool needed so USM operations to not get blocked
* behind any user operations which may have resulted in a
@@ -204,66 +207,67 @@ struct xe_gt {
*/
struct xe_sa_manager *bb_pool;
/**
- * @reserved_bcs_instance: reserved BCS instance used for USM
+ * @usm.reserved_bcs_instance: reserved BCS instance used for USM
* operations (e.g. mmigrations, fixing page tables)
*/
u16 reserved_bcs_instance;
- /** @pf_wq: page fault work queue, unbound, high priority */
+ /** @usm.pf_wq: page fault work queue, unbound, high priority */
struct workqueue_struct *pf_wq;
- /** @acc_wq: access counter work queue, unbound, high priority */
+ /** @usm.acc_wq: access counter work queue, unbound, high priority */
struct workqueue_struct *acc_wq;
/**
- * @pf_queue: Page fault queue used to sync faults so faults can
+ * @usm.pf_queue: Page fault queue used to sync faults so faults can
* be processed not under the GuC CT lock. The queue is sized so
* it can sync all possible faults (1 per physical engine).
* Multiple queues exists for page faults from different VMs are
* be processed in parallel.
*/
struct pf_queue {
- /** @gt: back pointer to GT */
+ /** @usm.pf_queue.gt: back pointer to GT */
struct xe_gt *gt;
#define PF_QUEUE_NUM_DW 128
- /** @data: data in the page fault queue */
+ /** @usm.pf_queue.data: data in the page fault queue */
u32 data[PF_QUEUE_NUM_DW];
/**
- * @head: head pointer in DWs for page fault queue,
- * moved by worker which processes faults.
+ * @usm.pf_queue.tail: tail pointer in DWs for page fault queue,
+ * moved by worker which processes faults (consumer).
*/
- u16 head;
+ u16 tail;
/**
- * @tail: tail pointer in DWs for page fault queue,
- * moved by G2H handler.
+ * @usm.pf_queue.head: head pointer in DWs for page fault queue,
+ * moved by G2H handler (producer).
*/
- u16 tail;
- /** @lock: protects page fault queue */
+ u16 head;
+ /** @usm.pf_queue.lock: protects page fault queue */
spinlock_t lock;
- /** @worker: to process page faults */
+ /** @usm.pf_queue.worker: to process page faults */
struct work_struct worker;
#define NUM_PF_QUEUE 4
} pf_queue[NUM_PF_QUEUE];
/**
- * @acc_queue: Same as page fault queue, cannot process access
+ * @usm.acc_queue: Same as page fault queue, cannot process access
* counters under CT lock.
*/
struct acc_queue {
- /** @gt: back pointer to GT */
+ /** @usm.acc_queue.gt: back pointer to GT */
struct xe_gt *gt;
#define ACC_QUEUE_NUM_DW 128
- /** @data: data in the page fault queue */
+ /** @usm.acc_queue.data: data in the page fault queue */
u32 data[ACC_QUEUE_NUM_DW];
/**
- * @head: head pointer in DWs for page fault queue,
- * moved by worker which processes faults.
+ * @usm.acc_queue.tail: tail pointer in DWs for access counter queue,
+ * moved by worker which processes counters
+ * (consumer).
*/
- u16 head;
+ u16 tail;
/**
- * @tail: tail pointer in DWs for page fault queue,
- * moved by G2H handler.
+ * @usm.acc_queue.head: head pointer in DWs for access counter queue,
+ * moved by G2H handler (producer).
*/
- u16 tail;
- /** @lock: protects page fault queue */
+ u16 head;
+ /** @usm.acc_queue.lock: protects page fault queue */
spinlock_t lock;
- /** @worker: to process access counters */
+ /** @usm.acc_queue.worker: to process access counters */
struct work_struct worker;
#define NUM_ACC_QUEUE 4
} acc_queue[NUM_ACC_QUEUE];
@@ -300,7 +304,7 @@ struct xe_gt {
/** @pcode: GT's PCODE */
struct {
- /** @lock: protecting GT's PCODE mailbox data */
+ /** @pcode.lock: protecting GT's PCODE mailbox data */
struct mutex lock;
} pcode;
@@ -312,32 +316,32 @@ struct xe_gt {
/** @mocs: info */
struct {
- /** @uc_index: UC index */
+ /** @mocs.uc_index: UC index */
u8 uc_index;
- /** @wb_index: WB index, only used on L3_CCS platforms */
+ /** @mocs.wb_index: WB index, only used on L3_CCS platforms */
u8 wb_index;
} mocs;
/** @fuse_topo: GT topology reported by fuse registers */
struct {
- /** @g_dss_mask: dual-subslices usable by geometry */
+ /** @fuse_topo.g_dss_mask: dual-subslices usable by geometry */
xe_dss_mask_t g_dss_mask;
- /** @c_dss_mask: dual-subslices usable by compute */
+ /** @fuse_topo.c_dss_mask: dual-subslices usable by compute */
xe_dss_mask_t c_dss_mask;
- /** @eu_mask_per_dss: EU mask per DSS*/
+ /** @fuse_topo.eu_mask_per_dss: EU mask per DSS*/
xe_eu_mask_t eu_mask_per_dss;
} fuse_topo;
/** @steering: register steering for individual HW units */
struct {
- /* @ranges: register ranges used for this steering type */
+ /** @steering.ranges: register ranges used for this steering type */
const struct xe_mmio_range *ranges;
- /** @group_target: target to steer accesses to */
+ /** @steering.group_target: target to steer accesses to */
u16 group_target;
- /** @instance_target: instance to steer accesses to */
+ /** @steering.instance_target: instance to steer accesses to */
u16 instance_target;
} steering[NUM_STEERING_TYPES];
@@ -349,13 +353,13 @@ struct xe_gt {
/** @wa_active: keep track of active workarounds */
struct {
- /** @gt: bitmap with active GT workarounds */
+ /** @wa_active.gt: bitmap with active GT workarounds */
unsigned long *gt;
- /** @engine: bitmap with active engine workarounds */
+ /** @wa_active.engine: bitmap with active engine workarounds */
unsigned long *engine;
- /** @lrc: bitmap with active LRC workarounds */
+ /** @wa_active.lrc: bitmap with active LRC workarounds */
unsigned long *lrc;
- /** @oob: bitmap with active OOB workaroudns */
+ /** @wa_active.oob: bitmap with active OOB workaroudns */
unsigned long *oob;
} wa_active;
};
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 0a61390c64a7..0d2a2dd13f11 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -7,9 +7,10 @@
#include <drm/drm_managed.h>
+#include <generated/xe_wa_oob.h>
+
#include "abi/guc_actions_abi.h"
#include "abi/guc_errors_abi.h"
-#include "generated/xe_wa_oob.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_guc_regs.h"
#include "xe_bo.h"
@@ -21,9 +22,12 @@
#include "xe_guc_hwconfig.h"
#include "xe_guc_log.h"
#include "xe_guc_pc.h"
+#include "xe_guc_relay.h"
#include "xe_guc_submit.h"
+#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_platform_types.h"
+#include "xe_sriov.h"
#include "xe_uc.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
@@ -129,22 +133,24 @@ static u32 guc_ctl_ads_flags(struct xe_guc *guc)
return flags;
}
+#define GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat))
+
static u32 guc_ctl_wa_flags(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_uc_fw *uc_fw = &guc->fw;
+ struct xe_uc_fw_version *version = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE];
+
u32 flags = 0;
if (XE_WA(gt, 22012773006))
flags |= GUC_WA_POLLCS;
- if (XE_WA(gt, 16011759253))
- flags |= GUC_WA_GAM_CREDITS;
-
if (XE_WA(gt, 14014475959))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
- if (XE_WA(gt, 22011391025) || XE_WA(gt, 14012197797))
+ if (XE_WA(gt, 22011391025))
flags |= GUC_WA_DUAL_QUEUE;
/*
@@ -155,9 +161,6 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
if (GRAPHICS_VERx100(xe) < 1270)
flags |= GUC_WA_PRE_PARSER;
- if (XE_WA(gt, 16011777198))
- flags |= GUC_WA_RCS_RESET_BEFORE_RC6;
-
if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
flags |= GUC_WA_CONTEXT_ISOLATION;
@@ -168,6 +171,14 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
if (XE_WA(gt, 1509372804))
flags |= GUC_WA_RENDER_RST_RC6_EXIT;
+ if (XE_WA(gt, 14018913170)) {
+ if (GUC_VER(version->major, version->minor, version->patch) >= GUC_VER(70, 7, 0))
+ flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
+ else
+ drm_dbg(&xe->drm, "Skip WA 14018913170: GUC version expected >= 70.7.0, found %u.%u.%u\n",
+ version->major, version->minor, version->patch);
+ }
+
return flags;
}
@@ -241,11 +252,54 @@ static void guc_fini(struct drm_device *drm, void *arg)
struct xe_guc *guc = arg;
xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
- xe_guc_pc_fini(&guc->pc);
xe_uc_fini_hw(&guc_to_gt(guc)->uc);
xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
}
+/**
+ * xe_guc_comm_init_early - early initialization of GuC communication
+ * @guc: the &xe_guc to initialize
+ *
+ * Must be called prior to first MMIO communication with GuC firmware.
+ */
+void xe_guc_comm_init_early(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ if (xe_gt_is_media_type(gt))
+ guc->notify_reg = MED_GUC_HOST_INTERRUPT;
+ else
+ guc->notify_reg = GUC_HOST_INTERRUPT;
+}
+
+static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
+{
+ struct xe_tile *tile = gt_to_tile(guc_to_gt(guc));
+ struct xe_device *xe = guc_to_xe(guc);
+ int ret;
+
+ if (!IS_DGFX(guc_to_xe(guc)))
+ return 0;
+
+ ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo);
+ if (ret)
+ return ret;
+
+ ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo);
+ if (ret)
+ return ret;
+
+ ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo);
+ if (ret)
+ return ret;
+
+ ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ct.bo);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
int xe_guc_init(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
@@ -272,7 +326,7 @@ int xe_guc_init(struct xe_guc *guc)
if (ret)
goto out;
- ret = xe_guc_pc_init(&guc->pc);
+ ret = xe_guc_relay_init(&guc->relay);
if (ret)
goto out;
@@ -282,10 +336,7 @@ int xe_guc_init(struct xe_guc *guc)
guc_init_params(guc);
- if (xe_gt_is_media_type(gt))
- guc->notify_reg = MED_GUC_HOST_INTERRUPT;
- else
- guc->notify_reg = GUC_HOST_INTERRUPT;
+ xe_guc_comm_init_early(guc);
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
@@ -304,8 +355,18 @@ out:
*/
int xe_guc_init_post_hwconfig(struct xe_guc *guc)
{
+ int ret;
+
+ ret = xe_guc_realloc_post_hwconfig(guc);
+ if (ret)
+ return ret;
+
guc_init_params_post_hwconfig(guc);
+ ret = xe_guc_pc_init(&guc->pc);
+ if (ret)
+ return ret;
+
return xe_guc_ads_init_post_hwconfig(&guc->ads);
}
@@ -429,7 +490,6 @@ static int guc_wait_ucode(struct xe_guc *guc)
if (ret) {
struct drm_device *drm = &xe->drm;
- struct drm_printer p = drm_info_printer(drm->dev);
drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
drm_info(drm, "GuC load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
@@ -451,8 +511,6 @@ static int guc_wait_ucode(struct xe_guc *guc)
SOFT_SCRATCH(13)));
ret = -ENXIO;
}
-
- xe_guc_log_print(&guc->log, &p);
} else {
drm_dbg(&xe->drm, "GuC successfully loaded");
}
@@ -516,6 +574,9 @@ int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
xe_guc_ads_populate_minimal(&guc->ads);
+ /* Raise GT freq to speed up HuC/GuC load */
+ xe_guc_pc_init_early(&guc->pc);
+
ret = __xe_guc_upload(guc);
if (ret)
return ret;
@@ -579,10 +640,20 @@ static void guc_enable_irq(struct xe_guc *guc)
int xe_guc_enable_communication(struct xe_guc *guc)
{
+ struct xe_device *xe = guc_to_xe(guc);
int err;
guc_enable_irq(guc);
+ if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc);
+ if (err)
+ return err;
+ }
+
xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
ARAT_EXPIRED_INTRMSK, 0);
@@ -650,7 +721,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
- xe_assert(xe, !guc->ct.enabled);
+ xe_assert(xe, !xe_guc_ct_enabled(&guc->ct));
xe_assert(xe, len);
xe_assert(xe, len <= VF_SW_FLAG_COUNT);
xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
@@ -707,8 +778,12 @@ timeout:
if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
GUC_HXG_ORIGIN_GUC))
goto proto;
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
+ GUC_HXG_TYPE_NO_RESPONSE_BUSY)
+ goto proto;
goto timeout;
+ }
}
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
@@ -832,7 +907,7 @@ int xe_guc_stop(struct xe_guc *guc)
{
int ret;
- xe_guc_ct_disable(&guc->ct);
+ xe_guc_ct_stop(&guc->ct);
ret = xe_guc_submit_stop(guc);
if (ret)
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index d3e49e7fd7c3..94f2dc5f6f90 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -13,6 +13,7 @@
struct drm_printer;
+void xe_guc_comm_init_early(struct xe_guc *guc);
int xe_guc_init(struct xe_guc *guc);
int xe_guc_init_post_hwconfig(struct xe_guc *guc);
int xe_guc_post_load_init(struct xe_guc *guc);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 390e6f1bf4e1..6ad4c1a90a78 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -273,7 +273,7 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
ads->regset_size = calculate_regset_size(gt);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 24a33fa36496..355edd4d758a 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -9,16 +9,21 @@
#include <linux/circ_buf.h>
#include <linux/delay.h>
+#include <kunit/static_stub.h>
+
#include <drm/drm_managed.h>
#include "abi/guc_actions_abi.h"
+#include "abi/guc_actions_sriov_abi.h"
#include "abi/guc_klvs_abi.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
+#include "xe_gt_printk.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
+#include "xe_guc_relay.h"
#include "xe_guc_submit.h"
#include "xe_map.h"
#include "xe_pm.h"
@@ -28,6 +33,7 @@
struct g2h_fence {
u32 *response_buffer;
u32 seqno;
+ u32 response_data;
u16 response_len;
u16 error;
u16 hint;
@@ -40,6 +46,7 @@ struct g2h_fence {
static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
{
g2h_fence->response_buffer = response_buffer;
+ g2h_fence->response_data = 0;
g2h_fence->response_len = 0;
g2h_fence->fail = false;
g2h_fence->retry = false;
@@ -148,7 +155,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
primelockdep(ct);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -159,6 +166,8 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
if (err)
return err;
+ xe_assert(xe, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
+ ct->state = XE_GUC_CT_STATE_DISABLED;
return 0;
}
@@ -278,12 +287,35 @@ static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
return ret > 0 ? -EPROTO : ret;
}
+static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
+ enum xe_guc_ct_state state)
+{
+ mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
+ spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
+
+ xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
+ state == XE_GUC_CT_STATE_STOPPED);
+
+ ct->g2h_outstanding = 0;
+ ct->state = state;
+
+ spin_unlock_irq(&ct->fast_lock);
+
+ /*
+ * Lockdep doesn't like this under the fast lock and he destroy only
+ * needs to be serialized with the send path which ct lock provides.
+ */
+ xa_destroy(&ct->fence_lookup);
+
+ mutex_unlock(&ct->lock);
+}
+
int xe_guc_ct_enable(struct xe_guc_ct *ct)
{
struct xe_device *xe = ct_to_xe(ct);
int err;
- xe_assert(xe, !ct->enabled);
+ xe_assert(xe, !xe_guc_ct_enabled(ct));
guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
@@ -300,12 +332,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
if (err)
goto err_out;
- mutex_lock(&ct->lock);
- spin_lock_irq(&ct->fast_lock);
- ct->g2h_outstanding = 0;
- ct->enabled = true;
- spin_unlock_irq(&ct->fast_lock);
- mutex_unlock(&ct->lock);
+ xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED);
smp_mb();
wake_up_all(&ct->wq);
@@ -319,15 +346,34 @@ err_out:
return err;
}
+static void stop_g2h_handler(struct xe_guc_ct *ct)
+{
+ cancel_work_sync(&ct->g2h_worker);
+}
+
+/**
+ * xe_guc_ct_disable - Set GuC to disabled state
+ * @ct: the &xe_guc_ct
+ *
+ * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
+ * in this transition.
+ */
void xe_guc_ct_disable(struct xe_guc_ct *ct)
{
- mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
- spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
- ct->enabled = false; /* Finally disable CT communication */
- spin_unlock_irq(&ct->fast_lock);
- mutex_unlock(&ct->lock);
+ xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED);
+ stop_g2h_handler(ct);
+}
- xa_destroy(&ct->fence_lookup);
+/**
+ * xe_guc_ct_stop - Set GuC to stopped state
+ * @ct: the &xe_guc_ct
+ *
+ * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
+ */
+void xe_guc_ct_stop(struct xe_guc_ct *ct)
+{
+ xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
+ stop_g2h_handler(ct);
}
static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
@@ -448,7 +494,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
} else {
cmd[1] =
- FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
}
@@ -475,13 +521,34 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
return 0;
}
+/*
+ * The CT protocol accepts a 16 bits fence. This field is fully owned by the
+ * driver, the GuC will just copy it to the reply message. Since we need to
+ * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
+ * we use one bit of the seqno as an indicator for that and a rolling counter
+ * for the remaining 15 bits.
+ */
+#define CT_SEQNO_MASK GENMASK(14, 0)
+#define CT_SEQNO_UNTRACKED BIT(15)
+static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
+{
+ u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
+
+ if (!is_g2h_fence)
+ seqno |= CT_SEQNO_UNTRACKED;
+
+ return seqno;
+}
+
static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
{
struct xe_device *xe = ct_to_xe(ct);
+ u16 seqno;
int ret;
+ xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
xe_assert(xe, !g2h_len || !g2h_fence);
xe_assert(xe, !num_g2h || !g2h_fence);
xe_assert(xe, !g2h_len || num_g2h);
@@ -493,11 +560,18 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
goto out;
}
- if (unlikely(!ct->enabled)) {
+ if (ct->state == XE_GUC_CT_STATE_DISABLED) {
ret = -ENODEV;
goto out;
}
+ if (ct->state == XE_GUC_CT_STATE_STOPPED) {
+ ret = -ECANCELED;
+ goto out;
+ }
+
+ xe_assert(xe, xe_guc_ct_enabled(ct));
+
if (g2h_fence) {
g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
num_g2h = 1;
@@ -505,7 +579,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
if (g2h_fence_needs_alloc(g2h_fence)) {
void *ptr;
- g2h_fence->seqno = (ct->fence_seqno++ & 0xffff);
+ g2h_fence->seqno = next_ct_seqno(ct, true);
ptr = xa_store(&ct->fence_lookup,
g2h_fence->seqno,
g2h_fence, GFP_ATOMIC);
@@ -514,6 +588,10 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
goto out;
}
}
+
+ seqno = g2h_fence->seqno;
+ } else {
+ seqno = next_ct_seqno(ct, false);
}
if (g2h_len)
@@ -523,8 +601,7 @@ retry:
if (unlikely(ret))
goto out_unlock;
- ret = h2g_write(ct, action, len, g2h_fence ? g2h_fence->seqno : 0,
- !!g2h_fence);
+ ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
if (unlikely(ret)) {
if (ret == -EAGAIN)
goto retry;
@@ -682,7 +759,8 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret)
return false;
#define ct_alive(ct) \
- (ct->enabled && !ct->ctbs.h2g.info.broken && !ct->ctbs.g2h.info.broken)
+ (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
+ !ct->ctbs.g2h.info.broken)
if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
return false;
#undef ct_alive
@@ -752,12 +830,31 @@ retry_same_fence:
ret = -EIO;
}
- return ret > 0 ? 0 : ret;
+ return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret;
}
+/**
+ * xe_guc_ct_send_recv - Send and receive HXG to the GuC
+ * @ct: the &xe_guc_ct
+ * @action: the dword array with `HXG Request`_ message (can't be NULL)
+ * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
+ * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
+ *
+ * Send a `HXG Request`_ message to the GuC over CT communication channel and
+ * blocks until GuC replies with a `HXG Response`_ message.
+ *
+ * For non-blocking communication with GuC use xe_guc_ct_send().
+ *
+ * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
+ *
+ * Return: response length (in dwords) if &response_buffer was not NULL, or
+ * DATA0 from `HXG Response`_ if &response_buffer was NULL, or
+ * a negative error code on failure.
+ */
int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buffer)
{
+ KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
return guc_ct_send_recv(ct, action, len, response_buffer, false);
}
@@ -767,9 +864,20 @@ int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
return guc_ct_send_recv(ct, action, len, response_buffer, true);
}
+static u32 *msg_to_hxg(u32 *msg)
+{
+ return msg + GUC_CTB_MSG_MIN_LEN;
+}
+
+static u32 msg_len_to_hxg_len(u32 len)
+{
+ return len - GUC_CTB_MSG_MIN_LEN;
+}
+
static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
- u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
+ u32 *hxg = msg_to_hxg(msg);
+ u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
lockdep_assert_held(&ct->lock);
@@ -786,18 +894,41 @@ static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
- struct xe_device *xe = ct_to_xe(ct);
- u32 response_len = len - GUC_CTB_MSG_MIN_LEN;
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 *hxg = msg_to_hxg(msg);
+ u32 hxg_len = msg_len_to_hxg_len(len);
u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
- u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]);
+ u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
struct g2h_fence *g2h_fence;
lockdep_assert_held(&ct->lock);
+ /*
+ * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
+ * Those messages should never fail, so if we do get an error back it
+ * means we're likely doing an illegal operation and the GuC is
+ * rejecting it. We have no way to inform the code that submitted the
+ * H2G that the message was rejected, so we need to escalate the
+ * failure to trigger a reset.
+ */
+ if (fence & CT_SEQNO_UNTRACKED) {
+ if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
+ xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
+ fence,
+ FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
+ FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
+ else
+ xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
+ type, fence);
+
+ return -EPROTO;
+ }
+
g2h_fence = xa_erase(&ct->fence_lookup, fence);
if (unlikely(!g2h_fence)) {
/* Don't tear down channel, as send could've timed out */
- drm_warn(&xe->drm, "G2H fence (%u) not found!\n", fence);
+ xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
return 0;
}
@@ -806,18 +937,16 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
g2h_fence->fail = true;
- g2h_fence->error =
- FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, msg[1]);
- g2h_fence->hint =
- FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, msg[1]);
+ g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
+ g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
} else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
g2h_fence->retry = true;
- g2h_fence->reason =
- FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, msg[1]);
+ g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
} else if (g2h_fence->response_buffer) {
- g2h_fence->response_len = response_len;
- memcpy(g2h_fence->response_buffer, msg + GUC_CTB_MSG_MIN_LEN,
- response_len * sizeof(u32));
+ g2h_fence->response_len = hxg_len;
+ memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
+ } else {
+ g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
}
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
@@ -833,14 +962,13 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_device *xe = ct_to_xe(ct);
- u32 hxg, origin, type;
+ u32 *hxg = msg_to_hxg(msg);
+ u32 origin, type;
int ret;
lockdep_assert_held(&ct->lock);
- hxg = msg[1];
-
- origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg);
+ origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
drm_err(&xe->drm,
"G2H channel broken on read, origin=%d, reset required\n",
@@ -850,7 +978,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
return -EPROTO;
}
- type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg);
+ type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
switch (type) {
case GUC_HXG_TYPE_EVENT:
ret = parse_g2h_event(ct, msg, len);
@@ -876,14 +1004,19 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_guc *guc = ct_to_guc(ct);
- u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
- u32 *payload = msg + GUC_CTB_HXG_MSG_MIN_LEN;
- u32 adj_len = len - GUC_CTB_HXG_MSG_MIN_LEN;
+ u32 hxg_len = msg_len_to_hxg_len(len);
+ u32 *hxg = msg_to_hxg(msg);
+ u32 action, adj_len;
+ u32 *payload;
int ret = 0;
- if (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]) != GUC_HXG_TYPE_EVENT)
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
return 0;
+ action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
+ payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
+ adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
+
switch (action) {
case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
ret = xe_guc_sched_done_handler(guc, payload, adj_len);
@@ -920,6 +1053,12 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
ret = xe_guc_access_counter_notify_handler(guc, payload,
adj_len);
break;
+ case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
+ ret = xe_guc_relay_process_guc2pf(&guc->relay, payload, adj_len);
+ break;
+ case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
+ ret = xe_guc_relay_process_guc2vf(&guc->relay, payload, adj_len);
+ break;
default:
drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
}
@@ -938,15 +1077,22 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
u32 tail, head, len;
s32 avail;
u32 action;
+ u32 *hxg;
+ xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
lockdep_assert_held(&ct->fast_lock);
- if (!ct->enabled)
+ if (ct->state == XE_GUC_CT_STATE_DISABLED)
return -ENODEV;
+ if (ct->state == XE_GUC_CT_STATE_STOPPED)
+ return -ECANCELED;
+
if (g2h->info.broken)
return -EPIPE;
+ xe_assert(xe, xe_guc_ct_enabled(ct));
+
/* Calculate DW available to read */
tail = desc_read(xe, g2h, tail);
avail = tail - g2h->info.head;
@@ -988,10 +1134,11 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
avail * sizeof(u32));
}
- action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
+ hxg = msg_to_hxg(msg);
+ action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
if (fast_path) {
- if (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]) != GUC_HXG_TYPE_EVENT)
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
return 0;
switch (action) {
@@ -1017,9 +1164,11 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_guc *guc = ct_to_guc(ct);
- u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
- u32 *payload = msg + GUC_CTB_HXG_MSG_MIN_LEN;
- u32 adj_len = len - GUC_CTB_HXG_MSG_MIN_LEN;
+ u32 hxg_len = msg_len_to_hxg_len(len);
+ u32 *hxg = msg_to_hxg(msg);
+ u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
+ u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
+ u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
int ret = 0;
switch (action) {
@@ -1245,7 +1394,7 @@ struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
return NULL;
}
- if (ct->enabled) {
+ if (xe_guc_ct_enabled(ct)) {
snapshot->ct_enabled = true;
snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g,
@@ -1271,7 +1420,7 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
return;
if (snapshot->ct_enabled) {
- drm_puts(p, "\nH2G CTB (all sizes in DW):\n");
+ drm_puts(p, "H2G CTB (all sizes in DW):\n");
guc_ctb_snapshot_print(&snapshot->h2g, p);
drm_puts(p, "\nG2H CTB (all sizes in DW):\n");
@@ -1280,7 +1429,7 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
drm_printf(p, "\tg2h outstanding: %d\n",
snapshot->g2h_outstanding);
} else {
- drm_puts(p, "\nCT disabled\n");
+ drm_puts(p, "CT disabled\n");
}
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index f15f8a4857e0..5083e099064f 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -13,6 +13,7 @@ struct drm_printer;
int xe_guc_ct_init(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
void xe_guc_ct_disable(struct xe_guc_ct *ct);
+void xe_guc_ct_stop(struct xe_guc_ct *ct);
void xe_guc_ct_fast_path(struct xe_guc_ct *ct);
struct xe_guc_ct_snapshot *
@@ -22,11 +23,18 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic);
+static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct)
+{
+ return ct->state == XE_GUC_CT_STATE_ENABLED;
+}
+
static inline void xe_guc_ct_irq_handler(struct xe_guc_ct *ct)
{
+ if (!xe_guc_ct_enabled(ct))
+ return;
+
wake_up_all(&ct->wq);
- if (ct->enabled)
- queue_work(system_unbound_wq, &ct->g2h_worker);
+ queue_work(system_unbound_wq, &ct->g2h_worker);
xe_guc_ct_fast_path(ct);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
index d814d4ee3fc6..d29144c9f20b 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -73,6 +73,20 @@ struct xe_guc_ct_snapshot {
};
/**
+ * enum xe_guc_ct_state - CT state
+ * @XE_GUC_CT_STATE_NOT_INITIALIZED: CT not initialized, messages not expected in this state
+ * @XE_GUC_CT_STATE_DISABLED: CT disabled, messages not expected in this state
+ * @XE_GUC_CT_STATE_STOPPED: CT stopped, drop messages without errors
+ * @XE_GUC_CT_STATE_ENABLED: CT enabled, messages sent / received in this state
+ */
+enum xe_guc_ct_state {
+ XE_GUC_CT_STATE_NOT_INITIALIZED = 0,
+ XE_GUC_CT_STATE_DISABLED,
+ XE_GUC_CT_STATE_STOPPED,
+ XE_GUC_CT_STATE_ENABLED,
+};
+
+/**
* struct xe_guc_ct - GuC command transport (CT) layer
*
* Includes a pair of CT buffers for bi-directional communication and tracking
@@ -87,17 +101,17 @@ struct xe_guc_ct {
spinlock_t fast_lock;
/** @ctbs: buffers for sending and receiving commands */
struct {
- /** @send: Host to GuC (H2G, send) channel */
+ /** @ctbs.send: Host to GuC (H2G, send) channel */
struct guc_ctb h2g;
- /** @recv: GuC to Host (G2H, receive) channel */
+ /** @ctbs.recv: GuC to Host (G2H, receive) channel */
struct guc_ctb g2h;
} ctbs;
/** @g2h_outstanding: number of outstanding G2H */
u32 g2h_outstanding;
/** @g2h_worker: worker to process G2H messages */
struct work_struct g2h_worker;
- /** @enabled: CT enabled */
- bool enabled;
+ /** @state: CT state */
+ enum xe_guc_ct_state state;
/** @fence_seqno: G2H fence seqno - 16 bits used by CT */
u32 fence_seqno;
/** @fence_lookup: G2H fence lookup */
diff --git a/drivers/gpu/drm/xe/xe_guc_db_mgr.c b/drivers/gpu/drm/xe/xe_guc_db_mgr.c
new file mode 100644
index 000000000000..8d9a0287df6b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_db_mgr.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/bitmap.h>
+#include <linux/mutex.h>
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_guc_regs.h"
+
+#include "xe_assert.h"
+#include "xe_gt_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_db_mgr.h"
+#include "xe_guc_types.h"
+
+/**
+ * DOC: GuC Doorbells
+ *
+ * The GFX doorbell solution provides a mechanism for submission of workload
+ * to the graphics hardware by a ring3 application without the penalty of
+ * ring transition for each workload submission.
+ *
+ * In SR-IOV mode, the doorbells are treated as shared resource and PF must
+ * be able to provision exclusive range of IDs across VFs, which may want to
+ * use this feature.
+ */
+
+static struct xe_guc *dbm_to_guc(struct xe_guc_db_mgr *dbm)
+{
+ return container_of(dbm, struct xe_guc, dbm);
+}
+
+static struct xe_gt *dbm_to_gt(struct xe_guc_db_mgr *dbm)
+{
+ return guc_to_gt(dbm_to_guc(dbm));
+}
+
+static struct xe_device *dbm_to_xe(struct xe_guc_db_mgr *dbm)
+{
+ return gt_to_xe(dbm_to_gt(dbm));
+}
+
+#define dbm_assert(_dbm, _cond) xe_gt_assert(dbm_to_gt(_dbm), _cond)
+#define dbm_mutex(_dbm) (&dbm_to_guc(_dbm)->submission_state.lock)
+
+static void dbm_print_locked(struct xe_guc_db_mgr *dbm, struct drm_printer *p, int indent);
+
+static void __fini_dbm(struct drm_device *drm, void *arg)
+{
+ struct xe_guc_db_mgr *dbm = arg;
+ unsigned int weight;
+
+ mutex_lock(dbm_mutex(dbm));
+
+ weight = bitmap_weight(dbm->bitmap, dbm->count);
+ if (weight) {
+ struct drm_printer p = xe_gt_info_printer(dbm_to_gt(dbm));
+
+ xe_gt_err(dbm_to_gt(dbm), "GuC doorbells manager unclean (%u/%u)\n",
+ weight, dbm->count);
+ dbm_print_locked(dbm, &p, 1);
+ }
+
+ bitmap_free(dbm->bitmap);
+ dbm->bitmap = NULL;
+ dbm->count = 0;
+
+ mutex_unlock(dbm_mutex(dbm));
+}
+
+/**
+ * xe_guc_db_mgr_init() - Initialize GuC Doorbells Manager.
+ * @dbm: the &xe_guc_db_mgr to initialize
+ * @count: number of doorbells to manage
+ *
+ * The bare-metal or PF driver can pass ~0 as &count to indicate that all
+ * doorbells supported by the hardware are available for use.
+ *
+ * Only VF's drivers will have to provide explicit number of doorbells IDs
+ * that they can use.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_db_mgr_init(struct xe_guc_db_mgr *dbm, unsigned int count)
+{
+ int ret;
+
+ if (count == ~0)
+ count = GUC_NUM_DOORBELLS;
+
+ dbm_assert(dbm, !dbm->bitmap);
+ dbm_assert(dbm, count <= GUC_NUM_DOORBELLS);
+
+ if (!count)
+ goto done;
+
+ dbm->bitmap = bitmap_zalloc(count, GFP_KERNEL);
+ if (!dbm->bitmap)
+ return -ENOMEM;
+ dbm->count = count;
+
+ ret = drmm_add_action_or_reset(&dbm_to_xe(dbm)->drm, __fini_dbm, dbm);
+ if (ret)
+ return ret;
+done:
+ xe_gt_dbg(dbm_to_gt(dbm), "using %u doorbell(s)\n", dbm->count);
+ return 0;
+}
+
+static int dbm_reserve_chunk_locked(struct xe_guc_db_mgr *dbm,
+ unsigned int count, unsigned int spare)
+{
+ unsigned int used;
+ int index;
+
+ dbm_assert(dbm, count);
+ dbm_assert(dbm, count <= GUC_NUM_DOORBELLS);
+ dbm_assert(dbm, dbm->count <= GUC_NUM_DOORBELLS);
+ lockdep_assert_held(dbm_mutex(dbm));
+
+ if (!dbm->count)
+ return -ENODATA;
+
+ if (spare) {
+ used = bitmap_weight(dbm->bitmap, dbm->count);
+ if (used + count + spare > dbm->count)
+ return -EDQUOT;
+ }
+
+ index = bitmap_find_next_zero_area(dbm->bitmap, dbm->count, 0, count, 0);
+ if (index >= dbm->count)
+ return -ENOSPC;
+
+ bitmap_set(dbm->bitmap, index, count);
+
+ return index;
+}
+
+static void dbm_release_chunk_locked(struct xe_guc_db_mgr *dbm,
+ unsigned int start, unsigned int count)
+{
+ dbm_assert(dbm, count);
+ dbm_assert(dbm, count <= GUC_NUM_DOORBELLS);
+ dbm_assert(dbm, dbm->count);
+ dbm_assert(dbm, dbm->count <= GUC_NUM_DOORBELLS);
+ lockdep_assert_held(dbm_mutex(dbm));
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ unsigned int n;
+
+ for (n = 0; n < count; n++)
+ dbm_assert(dbm, test_bit(start + n, dbm->bitmap));
+ }
+ bitmap_clear(dbm->bitmap, start, count);
+}
+
+/**
+ * xe_guc_db_mgr_reserve_id_locked() - Reserve a single GuC Doorbell ID.
+ * @dbm: the &xe_guc_db_mgr
+ *
+ * This function expects that submission lock is already taken.
+ *
+ * Return: ID of the allocated GuC doorbell or a negative error code on failure.
+ */
+int xe_guc_db_mgr_reserve_id_locked(struct xe_guc_db_mgr *dbm)
+{
+ return dbm_reserve_chunk_locked(dbm, 1, 0);
+}
+
+/**
+ * xe_guc_db_mgr_release_id_locked() - Release a single GuC Doorbell ID.
+ * @dbm: the &xe_guc_db_mgr
+ * @id: the GuC Doorbell ID to release
+ *
+ * This function expects that submission lock is already taken.
+ */
+void xe_guc_db_mgr_release_id_locked(struct xe_guc_db_mgr *dbm, unsigned int id)
+{
+ return dbm_release_chunk_locked(dbm, id, 1);
+}
+
+/**
+ * xe_guc_db_mgr_reserve_range() - Reserve a range of GuC Doorbell IDs.
+ * @dbm: the &xe_guc_db_mgr
+ * @count: number of GuC doorbell IDs to reserve
+ * @spare: number of GuC doorbell IDs to keep available
+ *
+ * This function is dedicated for the for use by the PF which expects that
+ * allocated range for the VF will be contiguous and that there will be at
+ * least &spare IDs still available for the PF use after this reservation.
+ *
+ * Return: starting ID of the allocated GuC doorbell ID range or
+ * a negative error code on failure.
+ */
+int xe_guc_db_mgr_reserve_range(struct xe_guc_db_mgr *dbm,
+ unsigned int count, unsigned int spare)
+{
+ int ret;
+
+ mutex_lock(dbm_mutex(dbm));
+ ret = dbm_reserve_chunk_locked(dbm, count, spare);
+ mutex_unlock(dbm_mutex(dbm));
+
+ return ret;
+}
+
+/**
+ * xe_guc_db_mgr_release_range() - Release a range of Doorbell IDs.
+ * @dbm: the &xe_guc_db_mgr
+ * @start: the starting ID of GuC doorbell ID range to release
+ * @count: number of GuC doorbell IDs to release
+ */
+void xe_guc_db_mgr_release_range(struct xe_guc_db_mgr *dbm,
+ unsigned int start, unsigned int count)
+{
+ mutex_lock(dbm_mutex(dbm));
+ dbm_release_chunk_locked(dbm, start, count);
+ mutex_unlock(dbm_mutex(dbm));
+}
+
+static void dbm_print_locked(struct xe_guc_db_mgr *dbm, struct drm_printer *p, int indent)
+{
+ unsigned int rs, re;
+ unsigned int total;
+
+ drm_printf_indent(p, indent, "count: %u\n", dbm->count);
+ if (!dbm->bitmap)
+ return;
+
+ total = 0;
+ for_each_clear_bitrange(rs, re, dbm->bitmap, dbm->count) {
+ drm_printf_indent(p, indent, "available range: %u..%u (%u)\n",
+ rs, re - 1, re - rs);
+ total += re - rs;
+ }
+ drm_printf_indent(p, indent, "available total: %u\n", total);
+
+ total = 0;
+ for_each_set_bitrange(rs, re, dbm->bitmap, dbm->count) {
+ drm_printf_indent(p, indent, "reserved range: %u..%u (%u)\n",
+ rs, re - 1, re - rs);
+ total += re - rs;
+ }
+ drm_printf_indent(p, indent, "reserved total: %u\n", total);
+}
+
+/**
+ * xe_guc_db_mgr_print() - Print status of GuC Doorbells Manager.
+ * @dbm: the &xe_guc_db_mgr to print
+ * @p: the &drm_printer to print to
+ * @indent: tab indentation level
+ */
+void xe_guc_db_mgr_print(struct xe_guc_db_mgr *dbm,
+ struct drm_printer *p, int indent)
+{
+ mutex_lock(dbm_mutex(dbm));
+ dbm_print_locked(dbm, p, indent);
+ mutex_unlock(dbm_mutex(dbm));
+}
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_guc_db_mgr_test.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_db_mgr.h b/drivers/gpu/drm/xe/xe_guc_db_mgr.h
new file mode 100644
index 000000000000..c250fa0ca9d6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_db_mgr.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GUC_DB_MGR_H_
+#define _XE_GUC_DB_MGR_H_
+
+struct drm_printer;
+struct xe_guc_db_mgr;
+
+int xe_guc_db_mgr_init(struct xe_guc_db_mgr *dbm, unsigned int count);
+
+int xe_guc_db_mgr_reserve_id_locked(struct xe_guc_db_mgr *dbm);
+void xe_guc_db_mgr_release_id_locked(struct xe_guc_db_mgr *dbm, unsigned int id);
+
+int xe_guc_db_mgr_reserve_range(struct xe_guc_db_mgr *dbm, unsigned int count, unsigned int spare);
+void xe_guc_db_mgr_release_range(struct xe_guc_db_mgr *dbm, unsigned int start, unsigned int count);
+
+void xe_guc_db_mgr_print(struct xe_guc_db_mgr *dbm, struct drm_printer *p, int indent);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index 4dd5a88a7826..c281fdbfd2d6 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -97,6 +97,7 @@ struct guc_update_exec_queue_policy {
#define GUC_WA_POLLCS BIT(18)
#define GUC_WA_RENDER_RST_RC6_EXIT BIT(19)
#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
+#define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22)
#define GUC_CTL_FEATURE 2
#define GUC_CTL_ENABLE_SLPC BIT(2)
diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.c b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
index 2a13a00917f8..ea49f3885c10 100644
--- a/drivers/gpu/drm/xe/xe_guc_hwconfig.c
+++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
@@ -78,7 +78,7 @@ int xe_guc_hwconfig_init(struct xe_guc *guc)
return -EINVAL;
bo = xe_managed_bo_create_pin_map(xe, tile, PAGE_ALIGN(size),
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_hxg_helpers.h b/drivers/gpu/drm/xe/xe_guc_hxg_helpers.h
new file mode 100644
index 000000000000..aeeb573c6842
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_hxg_helpers.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GUC_HXG_HELPERS_H_
+#define _XE_GUC_HXG_HELPERS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+
+#include "abi/guc_messages_abi.h"
+
+/**
+ * hxg_sizeof - Queries size of the object or type (in HXG units).
+ * @T: the object or type
+ *
+ * Force a compilation error if actual size is not aligned to HXG unit (u32).
+ *
+ * Return: size in dwords (u32).
+ */
+#define hxg_sizeof(T) (sizeof(T) / sizeof(u32) + BUILD_BUG_ON_ZERO(sizeof(T) % sizeof(u32)))
+
+static inline const char *guc_hxg_type_to_string(unsigned int type)
+{
+ switch (type) {
+ case GUC_HXG_TYPE_REQUEST:
+ return "request";
+ case GUC_HXG_TYPE_FAST_REQUEST:
+ return "fast-request";
+ case GUC_HXG_TYPE_EVENT:
+ return "event";
+ case GUC_HXG_TYPE_NO_RESPONSE_BUSY:
+ return "busy";
+ case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
+ return "retry";
+ case GUC_HXG_TYPE_RESPONSE_FAILURE:
+ return "failure";
+ case GUC_HXG_TYPE_RESPONSE_SUCCESS:
+ return "response";
+ default:
+ return "<invalid>";
+ }
+}
+
+static inline bool guc_hxg_type_is_action(unsigned int type)
+{
+ switch (type) {
+ case GUC_HXG_TYPE_REQUEST:
+ case GUC_HXG_TYPE_FAST_REQUEST:
+ case GUC_HXG_TYPE_EVENT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool guc_hxg_type_is_reply(unsigned int type)
+{
+ switch (type) {
+ case GUC_HXG_TYPE_NO_RESPONSE_BUSY:
+ case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
+ case GUC_HXG_TYPE_RESPONSE_FAILURE:
+ case GUC_HXG_TYPE_RESPONSE_SUCCESS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline u32 guc_hxg_msg_encode_success(u32 *msg, u32 data0)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
+ FIELD_PREP(GUC_HXG_RESPONSE_MSG_0_DATA0, data0);
+
+ return GUC_HXG_RESPONSE_MSG_MIN_LEN;
+}
+
+static inline u32 guc_hxg_msg_encode_failure(u32 *msg, u32 error, u32 hint)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_FAILURE) |
+ FIELD_PREP(GUC_HXG_FAILURE_MSG_0_HINT, hint) |
+ FIELD_PREP(GUC_HXG_FAILURE_MSG_0_ERROR, error);
+
+ return GUC_HXG_FAILURE_MSG_LEN;
+}
+
+static inline u32 guc_hxg_msg_encode_busy(u32 *msg, u32 counter)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_NO_RESPONSE_BUSY) |
+ FIELD_PREP(GUC_HXG_BUSY_MSG_0_COUNTER, counter);
+
+ return GUC_HXG_BUSY_MSG_LEN;
+}
+
+static inline u32 guc_hxg_msg_encode_retry(u32 *msg, u32 reason)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_NO_RESPONSE_RETRY) |
+ FIELD_PREP(GUC_HXG_RETRY_MSG_0_REASON, reason);
+
+ return GUC_HXG_RETRY_MSG_LEN;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index bcd2f4d34081..45135c3520e5 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -84,7 +84,7 @@ int xe_guc_log_init(struct xe_guc_log *log)
struct xe_bo *bo;
bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index d91702592520..2839d685631b 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -956,10 +956,12 @@ out:
/**
* xe_guc_pc_fini - Finalize GuC's Power Conservation component
- * @pc: Xe_GuC_PC instance
+ * @drm: DRM device
+ * @arg: opaque pointer that should point to Xe_GuC_PC instance
*/
-void xe_guc_pc_fini(struct xe_guc_pc *pc)
+static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
{
+ struct xe_guc_pc *pc = arg;
struct xe_device *xe = pc_to_xe(pc);
if (xe->info.skip_guc_pc) {
@@ -969,9 +971,10 @@ void xe_guc_pc_fini(struct xe_guc_pc *pc)
return;
}
+ xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
XE_WARN_ON(xe_guc_pc_stop(pc));
- mutex_destroy(&pc->freq_lock);
+ xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
}
/**
@@ -985,11 +988,14 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
struct xe_device *xe = gt_to_xe(gt);
struct xe_bo *bo;
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
+ int err;
if (xe->info.skip_guc_pc)
return 0;
- mutex_init(&pc->freq_lock);
+ err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
+ if (err)
+ return err;
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
@@ -998,5 +1004,10 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
return PTR_ERR(bo);
pc->bo = bo;
+
+ err = drmm_add_action_or_reset(&xe->drm, xe_guc_pc_fini, pc);
+ if (err)
+ return err;
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index cecad8e9300b..d3680d89490e 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -9,7 +9,6 @@
#include "xe_guc_pc_types.h"
int xe_guc_pc_init(struct xe_guc_pc *pc);
-void xe_guc_pc_fini(struct xe_guc_pc *pc);
int xe_guc_pc_start(struct xe_guc_pc *pc);
int xe_guc_pc_stop(struct xe_guc_pc *pc);
int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc);
diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c
new file mode 100644
index 000000000000..c0a2d8d5d3b3
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_relay.c
@@ -0,0 +1,941 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+
+#include <drm/drm_managed.h>
+
+#include <kunit/static_stub.h>
+#include <kunit/test-bug.h>
+
+#include "abi/guc_actions_sriov_abi.h"
+#include "abi/guc_relay_actions_abi.h"
+#include "abi/guc_relay_communication_abi.h"
+
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_hxg_helpers.h"
+#include "xe_guc_relay.h"
+#include "xe_guc_relay_types.h"
+#include "xe_sriov.h"
+
+/*
+ * How long should we wait for the response?
+ * XXX this value is subject for the profiling.
+ */
+#define RELAY_TIMEOUT_MSEC (2500)
+
+static void relays_worker_fn(struct work_struct *w);
+
+static struct xe_guc *relay_to_guc(struct xe_guc_relay *relay)
+{
+ return container_of(relay, struct xe_guc, relay);
+}
+
+static struct xe_guc_ct *relay_to_ct(struct xe_guc_relay *relay)
+{
+ return &relay_to_guc(relay)->ct;
+}
+
+static struct xe_gt *relay_to_gt(struct xe_guc_relay *relay)
+{
+ return guc_to_gt(relay_to_guc(relay));
+}
+
+static struct xe_device *relay_to_xe(struct xe_guc_relay *relay)
+{
+ return gt_to_xe(relay_to_gt(relay));
+}
+
+#define relay_assert(relay, condition) xe_gt_assert(relay_to_gt(relay), condition)
+#define relay_notice(relay, msg...) xe_gt_sriov_notice(relay_to_gt(relay), "relay: " msg)
+#define relay_debug(relay, msg...) xe_gt_sriov_dbg_verbose(relay_to_gt(relay), "relay: " msg)
+
+static int relay_get_totalvfs(struct xe_guc_relay *relay)
+{
+ struct xe_device *xe = relay_to_xe(relay);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+
+ KUNIT_STATIC_STUB_REDIRECT(relay_get_totalvfs, relay);
+ return IS_SRIOV_VF(xe) ? 0 : pci_sriov_get_totalvfs(pdev);
+}
+
+static bool relay_is_ready(struct xe_guc_relay *relay)
+{
+ return mempool_initialized(&relay->pool);
+}
+
+static u32 relay_get_next_rid(struct xe_guc_relay *relay)
+{
+ u32 rid;
+
+ spin_lock(&relay->lock);
+ rid = ++relay->last_rid;
+ spin_unlock(&relay->lock);
+
+ return rid;
+}
+
+/**
+ * struct relay_transaction - internal data used to handle transactions
+ *
+ * Relation between struct relay_transaction members::
+ *
+ * <-------------------- GUC_CTB_MAX_DWORDS -------------->
+ * <-------- GUC_RELAY_MSG_MAX_LEN --->
+ * <--- offset ---> <--- request_len ------->
+ * +----------------+-------------------------+----------+--+
+ * | | | | |
+ * +----------------+-------------------------+----------+--+
+ * ^ ^
+ * / /
+ * request_buf request
+ *
+ * <-------------------- GUC_CTB_MAX_DWORDS -------------->
+ * <-------- GUC_RELAY_MSG_MAX_LEN --->
+ * <--- offset ---> <--- response_len --->
+ * +----------------+----------------------+-------------+--+
+ * | | | | |
+ * +----------------+----------------------+-------------+--+
+ * ^ ^
+ * / /
+ * response_buf response
+ */
+struct relay_transaction {
+ /**
+ * @incoming: indicates whether this transaction represents an incoming
+ * request from the remote VF/PF or this transaction
+ * represents outgoing request to the remote VF/PF.
+ */
+ bool incoming;
+
+ /**
+ * @remote: PF/VF identifier of the origin (or target) of the relay
+ * request message.
+ */
+ u32 remote;
+
+ /** @rid: identifier of the VF/PF relay message. */
+ u32 rid;
+
+ /**
+ * @request: points to the inner VF/PF request message, copied to the
+ * #response_buf starting at #offset.
+ */
+ u32 *request;
+
+ /** @request_len: length of the inner VF/PF request message. */
+ u32 request_len;
+
+ /**
+ * @response: points to the placeholder buffer where inner VF/PF
+ * response will be located, for outgoing transaction
+ * this could be caller's buffer (if provided) otherwise
+ * it points to the #response_buf starting at #offset.
+ */
+ u32 *response;
+
+ /**
+ * @response_len: length of the inner VF/PF response message (only
+ * if #status is 0), initially set to the size of the
+ * placeholder buffer where response message will be
+ * copied.
+ */
+ u32 response_len;
+
+ /**
+ * @offset: offset to the start of the inner VF/PF relay message inside
+ * buffers; this offset is equal the length of the outer GuC
+ * relay header message.
+ */
+ u32 offset;
+
+ /**
+ * @request_buf: buffer with VF/PF request message including outer
+ * transport message.
+ */
+ u32 request_buf[GUC_CTB_MAX_DWORDS];
+
+ /**
+ * @response_buf: buffer with VF/PF response message including outer
+ * transport message.
+ */
+ u32 response_buf[GUC_CTB_MAX_DWORDS];
+
+ /**
+ * @reply: status of the reply, 0 means that data pointed by the
+ * #response is valid.
+ */
+ int reply;
+
+ /** @done: completion of the outgoing transaction. */
+ struct completion done;
+
+ /** @link: transaction list link */
+ struct list_head link;
+};
+
+static u32 prepare_pf2guc(u32 *msg, u32 target, u32 rid)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, XE_GUC_ACTION_PF2GUC_RELAY_TO_VF);
+ msg[1] = FIELD_PREP(PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID, target);
+ msg[2] = FIELD_PREP(PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID, rid);
+
+ return PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN;
+}
+
+static u32 prepare_vf2guc(u32 *msg, u32 rid)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, XE_GUC_ACTION_VF2GUC_RELAY_TO_PF);
+ msg[1] = FIELD_PREP(VF2GUC_RELAY_TO_PF_REQUEST_MSG_1_RELAY_ID, rid);
+
+ return VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN;
+}
+
+static struct relay_transaction *
+__relay_get_transaction(struct xe_guc_relay *relay, bool incoming, u32 remote, u32 rid,
+ const u32 *action, u32 action_len, u32 *resp, u32 resp_size)
+{
+ struct relay_transaction *txn;
+
+ relay_assert(relay, action_len >= GUC_RELAY_MSG_MIN_LEN);
+ relay_assert(relay, action_len <= GUC_RELAY_MSG_MAX_LEN);
+ relay_assert(relay, !(!!resp ^ !!resp_size));
+ relay_assert(relay, resp_size <= GUC_RELAY_MSG_MAX_LEN);
+ relay_assert(relay, resp_size == 0 || resp_size >= GUC_RELAY_MSG_MIN_LEN);
+
+ if (unlikely(!relay_is_ready(relay)))
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * For incoming requests we can't use GFP_KERNEL as those are delivered
+ * with CTB lock held which is marked as used in the reclaim path.
+ * Btw, that's one of the reason why we use mempool here!
+ */
+ txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_KERNEL);
+ if (!txn)
+ return ERR_PTR(-ENOMEM);
+
+ txn->incoming = incoming;
+ txn->remote = remote;
+ txn->rid = rid;
+ txn->offset = remote ?
+ prepare_pf2guc(incoming ? txn->response_buf : txn->request_buf, remote, rid) :
+ prepare_vf2guc(incoming ? txn->response_buf : txn->request_buf, rid);
+
+ relay_assert(relay, txn->offset);
+ relay_assert(relay, txn->offset + GUC_RELAY_MSG_MAX_LEN <= ARRAY_SIZE(txn->request_buf));
+ relay_assert(relay, txn->offset + GUC_RELAY_MSG_MAX_LEN <= ARRAY_SIZE(txn->response_buf));
+
+ txn->request = txn->request_buf + txn->offset;
+ memcpy(&txn->request_buf[txn->offset], action, sizeof(u32) * action_len);
+ txn->request_len = action_len;
+
+ txn->response = resp ?: txn->response_buf + txn->offset;
+ txn->response_len = resp_size ?: GUC_RELAY_MSG_MAX_LEN;
+ txn->reply = -ENOMSG;
+ INIT_LIST_HEAD(&txn->link);
+ init_completion(&txn->done);
+
+ return txn;
+}
+
+static struct relay_transaction *
+relay_new_transaction(struct xe_guc_relay *relay, u32 target, const u32 *action, u32 len,
+ u32 *resp, u32 resp_size)
+{
+ u32 rid = relay_get_next_rid(relay);
+
+ return __relay_get_transaction(relay, false, target, rid, action, len, resp, resp_size);
+}
+
+static struct relay_transaction *
+relay_new_incoming_transaction(struct xe_guc_relay *relay, u32 origin, u32 rid,
+ const u32 *action, u32 len)
+{
+ return __relay_get_transaction(relay, true, origin, rid, action, len, NULL, 0);
+}
+
+static void relay_release_transaction(struct xe_guc_relay *relay, struct relay_transaction *txn)
+{
+ relay_assert(relay, list_empty(&txn->link));
+
+ txn->offset = 0;
+ txn->response = NULL;
+ txn->reply = -ESTALE;
+ mempool_free(txn, &relay->pool);
+}
+
+static int relay_send_transaction(struct xe_guc_relay *relay, struct relay_transaction *txn)
+{
+ u32 len = txn->incoming ? txn->response_len : txn->request_len;
+ u32 *buf = txn->incoming ? txn->response_buf : txn->request_buf;
+ u32 *msg = buf + txn->offset;
+ int ret;
+
+ relay_assert(relay, txn->offset);
+ relay_assert(relay, txn->offset + len <= GUC_CTB_MAX_DWORDS);
+ relay_assert(relay, len >= GUC_RELAY_MSG_MIN_LEN);
+ relay_assert(relay, len <= GUC_RELAY_MSG_MAX_LEN);
+
+ relay_debug(relay, "sending %s.%u to %u = %*ph\n",
+ guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])),
+ txn->rid, txn->remote, (int)sizeof(u32) * len, msg);
+
+ ret = xe_guc_ct_send_block(relay_to_ct(relay), buf, len + txn->offset);
+
+ if (unlikely(ret > 0)) {
+ relay_notice(relay, "Unexpected data=%d from GuC, wrong ABI?\n", ret);
+ ret = -EPROTO;
+ }
+ if (unlikely(ret < 0)) {
+ relay_notice(relay, "Failed to send %s.%x to GuC (%pe) %*ph ...\n",
+ guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, buf[0])),
+ FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, buf[0]),
+ ERR_PTR(ret), (int)sizeof(u32) * txn->offset, buf);
+ relay_notice(relay, "Failed to send %s.%u to %u (%pe) %*ph\n",
+ guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])),
+ txn->rid, txn->remote, ERR_PTR(ret), (int)sizeof(u32) * len, msg);
+ }
+
+ return ret;
+}
+
+static void __fini_relay(struct drm_device *drm, void *arg)
+{
+ struct xe_guc_relay *relay = arg;
+
+ mempool_exit(&relay->pool);
+}
+
+/**
+ * xe_guc_relay_init - Initialize a &xe_guc_relay
+ * @relay: the &xe_guc_relay to initialize
+ *
+ * Initialize remaining members of &xe_guc_relay that may depend
+ * on the SR-IOV mode.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_relay_init(struct xe_guc_relay *relay)
+{
+ const int XE_RELAY_MEMPOOL_MIN_NUM = 1;
+ struct xe_device *xe = relay_to_xe(relay);
+ int err;
+
+ relay_assert(relay, !relay_is_ready(relay));
+
+ if (!IS_SRIOV(xe))
+ return 0;
+
+ spin_lock_init(&relay->lock);
+ INIT_WORK(&relay->worker, relays_worker_fn);
+ INIT_LIST_HEAD(&relay->pending_relays);
+ INIT_LIST_HEAD(&relay->incoming_actions);
+
+ err = mempool_init_kmalloc_pool(&relay->pool, XE_RELAY_MEMPOOL_MIN_NUM +
+ relay_get_totalvfs(relay),
+ sizeof(struct relay_transaction));
+ if (err)
+ return err;
+
+ relay_debug(relay, "using mempool with %d elements\n", relay->pool.min_nr);
+
+ return drmm_add_action_or_reset(&xe->drm, __fini_relay, relay);
+}
+
+static u32 to_relay_error(int err)
+{
+ /* XXX: assume that relay errors match errno codes */
+ return err < 0 ? -err : GUC_RELAY_ERROR_UNDISCLOSED;
+}
+
+static int from_relay_error(u32 error)
+{
+ /* XXX: assume that relay errors match errno codes */
+ return error ? -error : -ENODATA;
+}
+
+static u32 sanitize_relay_error(u32 error)
+{
+ /* XXX TBD if generic error codes will be allowed */
+ if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG))
+ error = GUC_RELAY_ERROR_UNDISCLOSED;
+ return error;
+}
+
+static u32 sanitize_relay_error_hint(u32 hint)
+{
+ /* XXX TBD if generic error codes will be allowed */
+ if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG))
+ hint = 0;
+ return hint;
+}
+
+static u32 prepare_error_reply(u32 *msg, u32 error, u32 hint)
+{
+ msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_FAILURE) |
+ FIELD_PREP(GUC_HXG_FAILURE_MSG_0_HINT, hint) |
+ FIELD_PREP(GUC_HXG_FAILURE_MSG_0_ERROR, error);
+
+ XE_WARN_ON(!FIELD_FIT(GUC_HXG_FAILURE_MSG_0_ERROR, error));
+ XE_WARN_ON(!FIELD_FIT(GUC_HXG_FAILURE_MSG_0_HINT, hint));
+
+ return GUC_HXG_FAILURE_MSG_LEN;
+}
+
+static void relay_testonly_nop(struct xe_guc_relay *relay)
+{
+ KUNIT_STATIC_STUB_REDIRECT(relay_testonly_nop, relay);
+}
+
+static int relay_send_message_and_wait(struct xe_guc_relay *relay,
+ struct relay_transaction *txn,
+ u32 *buf, u32 buf_size)
+{
+ unsigned long timeout = msecs_to_jiffies(RELAY_TIMEOUT_MSEC);
+ u32 *msg = &txn->request_buf[txn->offset];
+ u32 len = txn->request_len;
+ u32 type, action, data0;
+ int ret;
+ long n;
+
+ type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]);
+ action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
+ data0 = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]);
+
+ relay_debug(relay, "%s.%u to %u action %#x:%u\n",
+ guc_hxg_type_to_string(type),
+ txn->rid, txn->remote, action, data0);
+
+ /* list ordering does not need to match RID ordering */
+ spin_lock(&relay->lock);
+ list_add_tail(&txn->link, &relay->pending_relays);
+ spin_unlock(&relay->lock);
+
+resend:
+ ret = relay_send_transaction(relay, txn);
+ if (unlikely(ret < 0))
+ goto unlink;
+
+wait:
+ n = wait_for_completion_timeout(&txn->done, timeout);
+ if (unlikely(n == 0 && txn->reply)) {
+ ret = -ETIME;
+ goto unlink;
+ }
+
+ relay_debug(relay, "%u.%u reply %d after %u msec\n",
+ txn->remote, txn->rid, txn->reply, jiffies_to_msecs(timeout - n));
+ if (unlikely(txn->reply)) {
+ reinit_completion(&txn->done);
+ if (txn->reply == -EAGAIN)
+ goto resend;
+ if (txn->reply == -EBUSY) {
+ relay_testonly_nop(relay);
+ goto wait;
+ }
+ if (txn->reply > 0)
+ ret = from_relay_error(txn->reply);
+ else
+ ret = txn->reply;
+ goto unlink;
+ }
+
+ relay_debug(relay, "%u.%u response %*ph\n", txn->remote, txn->rid,
+ (int)sizeof(u32) * txn->response_len, txn->response);
+ relay_assert(relay, txn->response_len >= GUC_RELAY_MSG_MIN_LEN);
+ ret = txn->response_len;
+
+unlink:
+ spin_lock(&relay->lock);
+ list_del_init(&txn->link);
+ spin_unlock(&relay->lock);
+
+ if (unlikely(ret < 0)) {
+ relay_notice(relay, "Unsuccessful %s.%u %#x:%u to %u (%pe) %*ph\n",
+ guc_hxg_type_to_string(type), txn->rid,
+ action, data0, txn->remote, ERR_PTR(ret),
+ (int)sizeof(u32) * len, msg);
+ }
+
+ return ret;
+}
+
+static int relay_send_to(struct xe_guc_relay *relay, u32 target,
+ const u32 *msg, u32 len, u32 *buf, u32 buf_size)
+{
+ struct relay_transaction *txn;
+ int ret;
+
+ relay_assert(relay, len >= GUC_RELAY_MSG_MIN_LEN);
+ relay_assert(relay, len <= GUC_RELAY_MSG_MAX_LEN);
+ relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_HOST);
+ relay_assert(relay, guc_hxg_type_is_action(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])));
+
+ if (unlikely(!relay_is_ready(relay)))
+ return -ENODEV;
+
+ txn = relay_new_transaction(relay, target, msg, len, buf, buf_size);
+ if (IS_ERR(txn))
+ return PTR_ERR(txn);
+
+ switch (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])) {
+ case GUC_HXG_TYPE_REQUEST:
+ ret = relay_send_message_and_wait(relay, txn, buf, buf_size);
+ break;
+ case GUC_HXG_TYPE_FAST_REQUEST:
+ relay_assert(relay, !GUC_HXG_TYPE_FAST_REQUEST);
+ fallthrough;
+ case GUC_HXG_TYPE_EVENT:
+ ret = relay_send_transaction(relay, txn);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ relay_release_transaction(relay, txn);
+ return ret;
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * xe_guc_relay_send_to_vf - Send a message to the VF.
+ * @relay: the &xe_guc_relay which will send the message
+ * @target: target VF number
+ * @msg: request message to be sent
+ * @len: length of the request message (in dwords, can't be 0)
+ * @buf: placeholder for the response message
+ * @buf_size: size of the response message placeholder (in dwords)
+ *
+ * This function can only be used by the driver running in the SR-IOV PF mode.
+ *
+ * Return: Non-negative response length (in dwords) or
+ * a negative error code on failure.
+ */
+int xe_guc_relay_send_to_vf(struct xe_guc_relay *relay, u32 target,
+ const u32 *msg, u32 len, u32 *buf, u32 buf_size)
+{
+ relay_assert(relay, IS_SRIOV_PF(relay_to_xe(relay)));
+
+ return relay_send_to(relay, target, msg, len, buf, buf_size);
+}
+#endif
+
+/**
+ * xe_guc_relay_send_to_pf - Send a message to the PF.
+ * @relay: the &xe_guc_relay which will send the message
+ * @msg: request message to be sent
+ * @len: length of the message (in dwords, can't be 0)
+ * @buf: placeholder for the response message
+ * @buf_size: size of the response message placeholder (in dwords)
+ *
+ * This function can only be used by driver running in SR-IOV VF mode.
+ *
+ * Return: Non-negative response length (in dwords) or
+ * a negative error code on failure.
+ */
+int xe_guc_relay_send_to_pf(struct xe_guc_relay *relay,
+ const u32 *msg, u32 len, u32 *buf, u32 buf_size)
+{
+ relay_assert(relay, IS_SRIOV_VF(relay_to_xe(relay)));
+
+ return relay_send_to(relay, PFID, msg, len, buf, buf_size);
+}
+
+static int relay_handle_reply(struct xe_guc_relay *relay, u32 origin,
+ u32 rid, int reply, const u32 *msg, u32 len)
+{
+ struct relay_transaction *pending;
+ int err = -ESRCH;
+
+ spin_lock(&relay->lock);
+ list_for_each_entry(pending, &relay->pending_relays, link) {
+ if (pending->remote != origin || pending->rid != rid) {
+ relay_debug(relay, "%u.%u still awaits response\n",
+ pending->remote, pending->rid);
+ continue;
+ }
+ err = 0; /* found! */
+ if (reply == 0) {
+ if (len > pending->response_len) {
+ reply = -ENOBUFS;
+ err = -ENOBUFS;
+ } else {
+ memcpy(pending->response, msg, 4 * len);
+ pending->response_len = len;
+ }
+ }
+ pending->reply = reply;
+ complete_all(&pending->done);
+ break;
+ }
+ spin_unlock(&relay->lock);
+
+ return err;
+}
+
+static int relay_handle_failure(struct xe_guc_relay *relay, u32 origin,
+ u32 rid, const u32 *msg, u32 len)
+{
+ int error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, msg[0]);
+ u32 hint __maybe_unused = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, msg[0]);
+
+ relay_assert(relay, len);
+ relay_debug(relay, "%u.%u error %#x (%pe) hint %u debug %*ph\n",
+ origin, rid, error, ERR_PTR(-error), hint, 4 * (len - 1), msg + 1);
+
+ return relay_handle_reply(relay, origin, rid, error ?: -EREMOTEIO, NULL, 0);
+}
+
+static int relay_testloop_action_handler(struct xe_guc_relay *relay, u32 origin,
+ const u32 *msg, u32 len, u32 *response, u32 size)
+{
+ static ktime_t last_reply = 0;
+ u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]);
+ u32 action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
+ u32 opcode = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]);
+ ktime_t now = ktime_get();
+ bool busy;
+ int ret;
+
+ relay_assert(relay, guc_hxg_type_is_action(type));
+ relay_assert(relay, action == GUC_RELAY_ACTION_VFXPF_TESTLOOP);
+
+ if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV))
+ return -ECONNREFUSED;
+
+ if (!last_reply)
+ last_reply = now;
+ busy = ktime_before(now, ktime_add_ms(last_reply, 2 * RELAY_TIMEOUT_MSEC));
+ if (!busy)
+ last_reply = now;
+
+ switch (opcode) {
+ case VFXPF_TESTLOOP_OPCODE_NOP:
+ if (type == GUC_HXG_TYPE_EVENT)
+ return 0;
+ return guc_hxg_msg_encode_success(response, 0);
+ case VFXPF_TESTLOOP_OPCODE_BUSY:
+ if (type == GUC_HXG_TYPE_EVENT)
+ return -EPROTO;
+ msleep(RELAY_TIMEOUT_MSEC / 8);
+ if (busy)
+ return -EINPROGRESS;
+ return guc_hxg_msg_encode_success(response, 0);
+ case VFXPF_TESTLOOP_OPCODE_RETRY:
+ if (type == GUC_HXG_TYPE_EVENT)
+ return -EPROTO;
+ msleep(RELAY_TIMEOUT_MSEC / 8);
+ if (busy)
+ return guc_hxg_msg_encode_retry(response, 0);
+ return guc_hxg_msg_encode_success(response, 0);
+ case VFXPF_TESTLOOP_OPCODE_ECHO:
+ if (type == GUC_HXG_TYPE_EVENT)
+ return -EPROTO;
+ if (size < len)
+ return -ENOBUFS;
+ ret = guc_hxg_msg_encode_success(response, len);
+ memcpy(response + ret, msg + ret, (len - ret) * sizeof(u32));
+ return len;
+ case VFXPF_TESTLOOP_OPCODE_FAIL:
+ return -EHWPOISON;
+ default:
+ break;
+ }
+
+ relay_notice(relay, "Unexpected action %#x opcode %#x\n", action, opcode);
+ return -EBADRQC;
+}
+
+static int relay_action_handler(struct xe_guc_relay *relay, u32 origin,
+ const u32 *msg, u32 len, u32 *response, u32 size)
+{
+ u32 type;
+ int ret;
+
+ relay_assert(relay, len >= GUC_HXG_MSG_MIN_LEN);
+
+ if (FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]) == GUC_RELAY_ACTION_VFXPF_TESTLOOP)
+ return relay_testloop_action_handler(relay, origin, msg, len, response, size);
+
+ type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]);
+
+ /* XXX: PF services will be added later */
+ ret = -EOPNOTSUPP;
+
+ if (type == GUC_HXG_TYPE_EVENT)
+ relay_assert(relay, ret <= 0);
+
+ return ret;
+}
+
+static struct relay_transaction *relay_dequeue_transaction(struct xe_guc_relay *relay)
+{
+ struct relay_transaction *txn;
+
+ spin_lock(&relay->lock);
+ txn = list_first_entry_or_null(&relay->incoming_actions, struct relay_transaction, link);
+ if (txn)
+ list_del_init(&txn->link);
+ spin_unlock(&relay->lock);
+
+ return txn;
+}
+
+static void relay_process_incoming_action(struct xe_guc_relay *relay)
+{
+ struct relay_transaction *txn;
+ bool again = false;
+ u32 type;
+ int ret;
+
+ txn = relay_dequeue_transaction(relay);
+ if (!txn)
+ return;
+
+ type = FIELD_GET(GUC_HXG_MSG_0_TYPE, txn->request_buf[txn->offset]);
+
+ ret = relay_action_handler(relay, txn->remote,
+ txn->request_buf + txn->offset, txn->request_len,
+ txn->response_buf + txn->offset,
+ ARRAY_SIZE(txn->response_buf) - txn->offset);
+
+ if (ret == -EINPROGRESS) {
+ again = true;
+ ret = guc_hxg_msg_encode_busy(txn->response_buf + txn->offset, 0);
+ }
+
+ if (ret > 0) {
+ txn->response_len = ret;
+ ret = relay_send_transaction(relay, txn);
+ }
+
+ if (ret < 0) {
+ u32 error = to_relay_error(ret);
+
+ relay_notice(relay, "Failed to handle %s.%u from %u (%pe) %*ph\n",
+ guc_hxg_type_to_string(type), txn->rid, txn->remote,
+ ERR_PTR(ret), 4 * txn->request_len, txn->request_buf + txn->offset);
+
+ txn->response_len = prepare_error_reply(txn->response_buf + txn->offset,
+ txn->remote ?
+ sanitize_relay_error(error) : error,
+ txn->remote ?
+ sanitize_relay_error_hint(-ret) : -ret);
+ ret = relay_send_transaction(relay, txn);
+ again = false;
+ }
+
+ if (again) {
+ spin_lock(&relay->lock);
+ list_add(&txn->link, &relay->incoming_actions);
+ spin_unlock(&relay->lock);
+ return;
+ }
+
+ if (unlikely(ret < 0))
+ relay_notice(relay, "Failed to process action.%u (%pe) %*ph\n",
+ txn->rid, ERR_PTR(ret), 4 * txn->request_len,
+ txn->request_buf + txn->offset);
+
+ relay_release_transaction(relay, txn);
+}
+
+static bool relay_needs_worker(struct xe_guc_relay *relay)
+{
+ return !list_empty(&relay->incoming_actions);
+}
+
+static void relay_kick_worker(struct xe_guc_relay *relay)
+{
+ KUNIT_STATIC_STUB_REDIRECT(relay_kick_worker, relay);
+ queue_work(relay_to_xe(relay)->sriov.wq, &relay->worker);
+}
+
+static void relays_worker_fn(struct work_struct *w)
+{
+ struct xe_guc_relay *relay = container_of(w, struct xe_guc_relay, worker);
+
+ relay_process_incoming_action(relay);
+
+ if (relay_needs_worker(relay))
+ relay_kick_worker(relay);
+}
+
+static int relay_queue_action_msg(struct xe_guc_relay *relay, u32 origin, u32 rid,
+ const u32 *msg, u32 len)
+{
+ struct relay_transaction *txn;
+
+ txn = relay_new_incoming_transaction(relay, origin, rid, msg, len);
+ if (IS_ERR(txn))
+ return PTR_ERR(txn);
+
+ spin_lock(&relay->lock);
+ list_add_tail(&txn->link, &relay->incoming_actions);
+ spin_unlock(&relay->lock);
+
+ relay_kick_worker(relay);
+ return 0;
+}
+
+static int relay_process_msg(struct xe_guc_relay *relay, u32 origin, u32 rid,
+ const u32 *msg, u32 len)
+{
+ u32 type;
+ int err;
+
+ if (unlikely(len < GUC_HXG_MSG_MIN_LEN))
+ return -EPROTO;
+
+ if (FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) != GUC_HXG_ORIGIN_HOST)
+ return -EPROTO;
+
+ type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]);
+ relay_debug(relay, "received %s.%u from %u = %*ph\n",
+ guc_hxg_type_to_string(type), rid, origin, 4 * len, msg);
+
+ switch (type) {
+ case GUC_HXG_TYPE_REQUEST:
+ case GUC_HXG_TYPE_FAST_REQUEST:
+ case GUC_HXG_TYPE_EVENT:
+ err = relay_queue_action_msg(relay, origin, rid, msg, len);
+ break;
+ case GUC_HXG_TYPE_RESPONSE_SUCCESS:
+ err = relay_handle_reply(relay, origin, rid, 0, msg, len);
+ break;
+ case GUC_HXG_TYPE_NO_RESPONSE_BUSY:
+ err = relay_handle_reply(relay, origin, rid, -EBUSY, NULL, 0);
+ break;
+ case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
+ err = relay_handle_reply(relay, origin, rid, -EAGAIN, NULL, 0);
+ break;
+ case GUC_HXG_TYPE_RESPONSE_FAILURE:
+ err = relay_handle_failure(relay, origin, rid, msg, len);
+ break;
+ default:
+ err = -EBADRQC;
+ }
+
+ if (unlikely(err))
+ relay_notice(relay, "Failed to process %s.%u from %u (%pe) %*ph\n",
+ guc_hxg_type_to_string(type), rid, origin,
+ ERR_PTR(err), 4 * len, msg);
+
+ return err;
+}
+
+/**
+ * xe_guc_relay_process_guc2vf - Handle relay notification message from the GuC.
+ * @relay: the &xe_guc_relay which will handle the message
+ * @msg: message to be handled
+ * @len: length of the message (in dwords)
+ *
+ * This function will handle relay messages received from the GuC.
+ *
+ * This function is can only be used if driver is running in SR-IOV mode.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_relay_process_guc2vf(struct xe_guc_relay *relay, const u32 *msg, u32 len)
+{
+ u32 rid;
+
+ relay_assert(relay, len >= GUC_HXG_MSG_MIN_LEN);
+ relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC);
+ relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT);
+ relay_assert(relay, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) ==
+ XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF);
+
+ if (unlikely(!IS_SRIOV_VF(relay_to_xe(relay)) && !kunit_get_current_test()))
+ return -EPERM;
+
+ if (unlikely(!relay_is_ready(relay)))
+ return -ENODEV;
+
+ if (unlikely(len < GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN))
+ return -EPROTO;
+
+ if (unlikely(len > GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN))
+ return -EMSGSIZE;
+
+ if (unlikely(FIELD_GET(GUC_HXG_EVENT_MSG_0_DATA0, msg[0])))
+ return -EPFNOSUPPORT;
+
+ rid = FIELD_GET(GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID, msg[1]);
+
+ return relay_process_msg(relay, PFID, rid,
+ msg + GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN,
+ len - GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN);
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * xe_guc_relay_process_guc2pf - Handle relay notification message from the GuC.
+ * @relay: the &xe_guc_relay which will handle the message
+ * @msg: message to be handled
+ * @len: length of the message (in dwords)
+ *
+ * This function will handle relay messages received from the GuC.
+ *
+ * This function can only be used if driver is running in SR-IOV PF mode.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_relay_process_guc2pf(struct xe_guc_relay *relay, const u32 *msg, u32 len)
+{
+ u32 origin, rid;
+ int err;
+
+ relay_assert(relay, len >= GUC_HXG_EVENT_MSG_MIN_LEN);
+ relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC);
+ relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT);
+ relay_assert(relay, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) ==
+ XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF);
+
+ if (unlikely(!IS_SRIOV_PF(relay_to_xe(relay)) && !kunit_get_current_test()))
+ return -EPERM;
+
+ if (unlikely(!relay_is_ready(relay)))
+ return -ENODEV;
+
+ if (unlikely(len < GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN))
+ return -EPROTO;
+
+ if (unlikely(len > GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN))
+ return -EMSGSIZE;
+
+ if (unlikely(FIELD_GET(GUC_HXG_EVENT_MSG_0_DATA0, msg[0])))
+ return -EPFNOSUPPORT;
+
+ origin = FIELD_GET(GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID, msg[1]);
+ rid = FIELD_GET(GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID, msg[2]);
+
+ if (unlikely(origin > relay_get_totalvfs(relay)))
+ return -ENOENT;
+
+ err = relay_process_msg(relay, origin, rid,
+ msg + GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN,
+ len - GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN);
+
+ return err;
+}
+#endif
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_guc_relay_test.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_relay.h b/drivers/gpu/drm/xe/xe_guc_relay.h
new file mode 100644
index 000000000000..385429aa188a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_relay.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GUC_RELAY_H_
+#define _XE_GUC_RELAY_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+struct xe_guc_relay;
+
+int xe_guc_relay_init(struct xe_guc_relay *relay);
+
+int xe_guc_relay_send_to_pf(struct xe_guc_relay *relay,
+ const u32 *msg, u32 len, u32 *buf, u32 buf_size);
+
+int xe_guc_relay_process_guc2vf(struct xe_guc_relay *relay, const u32 *msg, u32 len);
+
+#ifdef CONFIG_PCI_IOV
+int xe_guc_relay_send_to_vf(struct xe_guc_relay *relay, u32 target,
+ const u32 *msg, u32 len, u32 *buf, u32 buf_size);
+int xe_guc_relay_process_guc2pf(struct xe_guc_relay *relay, const u32 *msg, u32 len);
+#else
+static inline int xe_guc_relay_send_to_vf(struct xe_guc_relay *relay, u32 target,
+ const u32 *msg, u32 len, u32 *buf, u32 buf_size)
+{
+ return -ENODEV;
+}
+static inline int xe_guc_relay_process_guc2pf(struct xe_guc_relay *relay, const u32 *msg, u32 len)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_relay_types.h b/drivers/gpu/drm/xe/xe_guc_relay_types.h
new file mode 100644
index 000000000000..5999fcb77e96
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_relay_types.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GUC_RELAY_TYPES_H_
+#define _XE_GUC_RELAY_TYPES_H_
+
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+/**
+ * struct xe_guc_relay - Data used by the VF-PF Relay Communication over GuC.
+ */
+struct xe_guc_relay {
+ /**@lock: protects all internal data. */
+ spinlock_t lock;
+
+ /** @worker: dispatches incoming action messages. */
+ struct work_struct worker;
+
+ /** @pending_relays: list of sent requests that await a response. */
+ struct list_head pending_relays;
+
+ /** @incoming_actions: list of incoming relay action messages to process. */
+ struct list_head incoming_actions;
+
+ /** @pool: pool of the relay message buffers. */
+ mempool_t pool;
+
+ /** @last_rid: last Relay-ID used while sending a message. */
+ u32 last_rid;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 54ffcfcdd41f..ff77bc8da1b2 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -23,6 +23,7 @@
#include "xe_force_wake.h"
#include "xe_gpu_scheduler.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
@@ -311,7 +312,7 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
q->guc->id - GUC_ID_START_MLRC,
order_base_2(q->width));
else
- ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id);
+ ida_free(&guc->submission_state.guc_ids, q->guc->id);
}
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
@@ -335,8 +336,8 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
ret = bitmap_find_free_region(bitmap, GUC_ID_NUMBER_MLRC,
order_base_2(q->width));
} else {
- ret = ida_simple_get(&guc->submission_state.guc_ids, 0,
- GUC_ID_NUMBER_SLRC, GFP_NOWAIT);
+ ret = ida_alloc_max(&guc->submission_state.guc_ids,
+ GUC_ID_NUMBER_SLRC - 1, GFP_NOWAIT);
}
if (ret < 0)
return ret;
@@ -811,7 +812,8 @@ static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
static void simple_error_capture(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
- struct drm_printer p = drm_err_printer("");
+ struct xe_device *xe = guc_to_xe(guc);
+ struct drm_printer p = drm_err_printer(&xe->drm, NULL);
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
u32 adj_logical_mask = q->logical_mask;
@@ -928,13 +930,15 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
int i = 0;
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
- xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_KERNEL));
- xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)));
-
drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
xe_sched_job_seqno(job), q->guc->id, q->flags);
+ xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
+ "Kernel-submitted job timed out\n");
+ xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
+ "VM job timed out on non-killed execqueue\n");
+
simple_error_capture(q);
- xe_devcoredump(q);
+ xe_devcoredump(job);
} else {
drm_dbg(&xe->drm, "Timedout signaled job: seqno=%u, guc_id=%d, flags=0x%lx",
xe_sched_job_seqno(job), q->guc->id, q->flags);
@@ -1028,8 +1032,6 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
- if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
- xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
@@ -1218,7 +1220,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
init_waitqueue_head(&ge->suspend_wait);
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
- q->hwe->eclass->sched_props.job_timeout_ms;
+ q->sched_props.job_timeout_ms;
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
get_submit_wq(guc),
q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64,
@@ -1350,21 +1352,6 @@ static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
return 0;
}
-static int guc_exec_queue_set_job_timeout(struct xe_exec_queue *q, u32 job_timeout_ms)
-{
- struct xe_gpu_scheduler *sched = &q->guc->sched;
- struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
-
- xe_assert(xe, !exec_queue_registered(q));
- xe_assert(xe, !exec_queue_banned(q));
- xe_assert(xe, !exec_queue_killed(q));
-
- sched->base.timeout = job_timeout_ms;
-
- return 0;
-}
-
static int guc_exec_queue_suspend(struct xe_exec_queue *q)
{
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
@@ -1415,7 +1402,6 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
.set_priority = guc_exec_queue_set_priority,
.set_timeslice = guc_exec_queue_set_timeslice,
.set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
- .set_job_timeout = guc_exec_queue_set_job_timeout,
.suspend = guc_exec_queue_suspend,
.suspend_wait = guc_exec_queue_suspend_wait,
.resume = guc_exec_queue_resume,
@@ -1796,7 +1782,7 @@ guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
/**
* xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
- * @q: Xe exec queue.
+ * @job: faulty Xe scheduled job.
*
* This can be printed out in a later stage like during dev_coredump
* analysis.
@@ -1805,21 +1791,17 @@ guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
* caller, using `xe_guc_exec_queue_snapshot_free`.
*/
struct xe_guc_submit_exec_queue_snapshot *
-xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
+xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job)
{
- struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_exec_queue *q = job->q;
struct xe_gpu_scheduler *sched = &q->guc->sched;
- struct xe_sched_job *job;
struct xe_guc_submit_exec_queue_snapshot *snapshot;
int i;
snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
- if (!snapshot) {
- drm_err(&xe->drm, "Skipping GuC Engine snapshot entirely.\n");
+ if (!snapshot)
return NULL;
- }
snapshot->guc.id = q->guc->id;
memcpy(&snapshot->name, &q->name, sizeof(snapshot->name));
@@ -1835,9 +1817,7 @@ xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
snapshot->lrc = kmalloc_array(q->width, sizeof(struct lrc_snapshot),
GFP_ATOMIC);
- if (!snapshot->lrc) {
- drm_err(&xe->drm, "Skipping GuC Engine LRC snapshot.\n");
- } else {
+ if (snapshot->lrc) {
for (i = 0; i < q->width; ++i) {
struct xe_lrc *lrc = q->lrc + i;
@@ -1865,17 +1845,17 @@ xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
sizeof(struct pending_list_snapshot),
GFP_ATOMIC);
- if (!snapshot->pending_list) {
- drm_err(&xe->drm, "Skipping GuC Engine pending_list snapshot.\n");
- } else {
+ if (snapshot->pending_list) {
+ struct xe_sched_job *job_iter;
+
i = 0;
- list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ list_for_each_entry(job_iter, &sched->base.pending_list, drm.list) {
snapshot->pending_list[i].seqno =
- xe_sched_job_seqno(job);
+ xe_sched_job_seqno(job_iter);
snapshot->pending_list[i].fence =
- dma_fence_is_signaled(job->fence) ? 1 : 0;
+ dma_fence_is_signaled(job_iter->fence) ? 1 : 0;
snapshot->pending_list[i].finished =
- dma_fence_is_signaled(&job->drm.s_fence->finished)
+ dma_fence_is_signaled(&job_iter->drm.s_fence->finished)
? 1 : 0;
i++;
}
@@ -1961,10 +1941,28 @@ void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *s
static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
{
struct xe_guc_submit_exec_queue_snapshot *snapshot;
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_job *job;
+ bool found = false;
- snapshot = xe_guc_exec_queue_snapshot_capture(q);
+ spin_lock(&sched->base.job_list_lock);
+ list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ if (job->q == q) {
+ xe_sched_job_get(job);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock(&sched->base.job_list_lock);
+
+ if (!found)
+ return;
+
+ snapshot = xe_guc_exec_queue_snapshot_capture(job);
xe_guc_exec_queue_snapshot_print(snapshot, p);
xe_guc_exec_queue_snapshot_free(snapshot);
+
+ xe_sched_job_put(job);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index fc97869c5b86..723dc2bd8df9 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -9,8 +9,8 @@
#include <linux/types.h>
struct drm_printer;
-struct xe_exec_queue;
struct xe_guc;
+struct xe_sched_job;
int xe_guc_submit_init(struct xe_guc *guc);
@@ -27,7 +27,7 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
struct xe_guc_submit_exec_queue_snapshot *
-xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
+xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job);
void
xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit_types.h b/drivers/gpu/drm/xe/xe_guc_submit_types.h
index 649b0a852692..72fc0f42b0a5 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit_types.h
@@ -102,9 +102,9 @@ struct xe_guc_submit_exec_queue_snapshot {
/** @sched_props: scheduling properties */
struct {
- /** @timeslice_us: timeslice period in micro-seconds */
+ /** @sched_props.timeslice_us: timeslice period in micro-seconds */
u32 timeslice_us;
- /** @preempt_timeout_us: preemption timeout in micro-seconds */
+ /** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
u32 preempt_timeout_us;
} sched_props;
@@ -118,11 +118,11 @@ struct xe_guc_submit_exec_queue_snapshot {
/** @guc: GuC Engine Snapshot */
struct {
- /** @wqi_head: work queue item head */
+ /** @guc.wqi_head: work queue item head */
u32 wqi_head;
- /** @wqi_tail: work queue item tail */
+ /** @guc.wqi_tail: work queue item tail */
u32 wqi_tail;
- /** @id: GuC id for this exec_queue */
+ /** @guc.id: GuC id for this exec_queue */
u16 id;
} guc;
@@ -133,13 +133,13 @@ struct xe_guc_submit_exec_queue_snapshot {
bool parallel_execution;
/** @parallel: snapshot of the useful parallel scratch */
struct {
- /** @wq_desc: Workqueue description */
+ /** @parallel.wq_desc: Workqueue description */
struct {
- /** @head: Workqueue Head */
+ /** @parallel.wq_desc.head: Workqueue Head */
u32 head;
- /** @tail: Workqueue Tail */
+ /** @parallel.wq_desc.tail: Workqueue Tail */
u32 tail;
- /** @status: Workqueue Status */
+ /** @parallel.wq_desc.status: Workqueue Status */
u32 status;
} wq_desc;
/** @wq: Workqueue Items */
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index cd80802e8918..edcd1a950bd3 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -15,9 +15,23 @@
#include "xe_guc_fwif.h"
#include "xe_guc_log_types.h"
#include "xe_guc_pc_types.h"
+#include "xe_guc_relay_types.h"
#include "xe_uc_fw_types.h"
/**
+ * struct xe_guc_db_mgr - GuC Doorbells Manager.
+ *
+ * Note: GuC Doorbells Manager is relying on &xe_guc::submission_state.lock
+ * to protect its members.
+ */
+struct xe_guc_db_mgr {
+ /** @count: number of doorbells to manage */
+ unsigned int count;
+ /** @bitmap: bitmap to track allocated doorbells */
+ unsigned long *bitmap;
+};
+
+/**
* struct xe_guc - Graphic micro controller
*/
struct xe_guc {
@@ -31,45 +45,50 @@ struct xe_guc {
struct xe_guc_ct ct;
/** @pc: GuC Power Conservation */
struct xe_guc_pc pc;
+ /** @dbm: GuC Doorbell Manager */
+ struct xe_guc_db_mgr dbm;
/** @submission_state: GuC submission state */
struct {
- /** @exec_queue_lookup: Lookup an xe_engine from guc_id */
+ /** @submission_state.exec_queue_lookup: Lookup an xe_engine from guc_id */
struct xarray exec_queue_lookup;
- /** @guc_ids: used to allocate new guc_ids, single-lrc */
+ /** @submission_state.guc_ids: used to allocate new guc_ids, single-lrc */
struct ida guc_ids;
- /** @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
+ /** @submission_state.guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
unsigned long *guc_ids_bitmap;
- /** @stopped: submissions are stopped */
+ /** @submission_state.stopped: submissions are stopped */
atomic_t stopped;
- /** @lock: protects submission state */
+ /** @submission_state.lock: protects submission state */
struct mutex lock;
- /** @suspend: suspend fence state */
+ /** @submission_state.suspend: suspend fence state */
struct {
- /** @lock: suspend fences lock */
+ /** @submission_state.suspend.lock: suspend fences lock */
spinlock_t lock;
- /** @context: suspend fences context */
+ /** @submission_state.suspend.context: suspend fences context */
u64 context;
- /** @seqno: suspend fences seqno */
+ /** @submission_state.suspend.seqno: suspend fences seqno */
u32 seqno;
} suspend;
#ifdef CONFIG_PROVE_LOCKING
#define NUM_SUBMIT_WQ 256
- /** @submit_wq_pool: submission ordered workqueues pool */
+ /** @submission_state.submit_wq_pool: submission ordered workqueues pool */
struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
- /** @submit_wq_idx: submission ordered workqueue index */
+ /** @submission_state.submit_wq_idx: submission ordered workqueue index */
int submit_wq_idx;
#endif
- /** @enabled: submission is enabled */
+ /** @submission_state.enabled: submission is enabled */
bool enabled;
} submission_state;
/** @hwconfig: Hardware config state */
struct {
- /** @bo: buffer object of the hardware config */
+ /** @hwconfig.bo: buffer object of the hardware config */
struct xe_bo *bo;
- /** @size: size of the hardware config */
+ /** @hwconfig.size: size of the hardware config */
u32 size;
} hwconfig;
+ /** @relay: GuC Relay Communication used in SR-IOV */
+ struct xe_guc_relay relay;
+
/**
* @notify_reg: Register which is written to notify GuC of H2G messages
*/
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index bfdd33b9b23b..1c9d38b6f5f1 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -29,7 +29,7 @@ static void heci_gsc_irq_unmask(struct irq_data *d)
/* generic irq handling */
}
-static struct irq_chip heci_gsc_irq_chip = {
+static const struct irq_chip heci_gsc_irq_chip = {
.name = "gsc_irq_chip",
.irq_mask = heci_gsc_irq_mask,
.irq_unmask = heci_gsc_irq_unmask,
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index eca109791c6a..b545f850087c 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -112,6 +112,25 @@ out:
return ret;
}
+int xe_huc_init_post_hwconfig(struct xe_huc *huc)
+{
+ struct xe_tile *tile = gt_to_tile(huc_to_gt(huc));
+ struct xe_device *xe = huc_to_xe(huc);
+ int ret;
+
+ if (!IS_DGFX(huc_to_xe(huc)))
+ return 0;
+
+ if (!xe_uc_fw_is_loadable(&huc->fw))
+ return 0;
+
+ ret = xe_managed_bo_reinit_in_vram(xe, tile, &huc->fw.bo);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
int xe_huc_upload(struct xe_huc *huc)
{
if (!xe_uc_fw_is_loadable(&huc->fw))
diff --git a/drivers/gpu/drm/xe/xe_huc.h b/drivers/gpu/drm/xe/xe_huc.h
index 532017230287..3ab56cc14b00 100644
--- a/drivers/gpu/drm/xe/xe_huc.h
+++ b/drivers/gpu/drm/xe/xe_huc.h
@@ -17,6 +17,7 @@ enum xe_huc_auth_types {
};
int xe_huc_init(struct xe_huc *huc);
+int xe_huc_init_post_hwconfig(struct xe_huc *huc);
int xe_huc_upload(struct xe_huc *huc);
int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type);
bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 1fa5cf5eea97..b5e83ea172f3 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -25,6 +25,7 @@
#include "xe_reg_sr.h"
#include "xe_rtp.h"
#include "xe_sched_job.h"
+#include "xe_sriov.h"
#include "xe_tuning.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
@@ -34,6 +35,7 @@ struct engine_info {
const char *name;
unsigned int class : 8;
unsigned int instance : 8;
+ unsigned int irq_offset : 8;
enum xe_force_wake_domains domain;
u32 mmio_base;
};
@@ -43,6 +45,7 @@ static const struct engine_info engine_infos[] = {
.name = "rcs0",
.class = XE_ENGINE_CLASS_RENDER,
.instance = 0,
+ .irq_offset = ilog2(INTR_RCS0),
.domain = XE_FW_RENDER,
.mmio_base = RENDER_RING_BASE,
},
@@ -50,6 +53,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs0",
.class = XE_ENGINE_CLASS_COPY,
.instance = 0,
+ .irq_offset = ilog2(INTR_BCS(0)),
.domain = XE_FW_RENDER,
.mmio_base = BLT_RING_BASE,
},
@@ -57,6 +61,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs1",
.class = XE_ENGINE_CLASS_COPY,
.instance = 1,
+ .irq_offset = ilog2(INTR_BCS(1)),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS1_RING_BASE,
},
@@ -64,6 +69,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs2",
.class = XE_ENGINE_CLASS_COPY,
.instance = 2,
+ .irq_offset = ilog2(INTR_BCS(2)),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS2_RING_BASE,
},
@@ -71,6 +77,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs3",
.class = XE_ENGINE_CLASS_COPY,
.instance = 3,
+ .irq_offset = ilog2(INTR_BCS(3)),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS3_RING_BASE,
},
@@ -78,6 +85,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs4",
.class = XE_ENGINE_CLASS_COPY,
.instance = 4,
+ .irq_offset = ilog2(INTR_BCS(4)),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS4_RING_BASE,
},
@@ -85,6 +93,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs5",
.class = XE_ENGINE_CLASS_COPY,
.instance = 5,
+ .irq_offset = ilog2(INTR_BCS(5)),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS5_RING_BASE,
},
@@ -92,12 +101,14 @@ static const struct engine_info engine_infos[] = {
.name = "bcs6",
.class = XE_ENGINE_CLASS_COPY,
.instance = 6,
+ .irq_offset = ilog2(INTR_BCS(6)),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS6_RING_BASE,
},
[XE_HW_ENGINE_BCS7] = {
.name = "bcs7",
.class = XE_ENGINE_CLASS_COPY,
+ .irq_offset = ilog2(INTR_BCS(7)),
.instance = 7,
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS7_RING_BASE,
@@ -106,6 +117,7 @@ static const struct engine_info engine_infos[] = {
.name = "bcs8",
.class = XE_ENGINE_CLASS_COPY,
.instance = 8,
+ .irq_offset = ilog2(INTR_BCS8),
.domain = XE_FW_RENDER,
.mmio_base = XEHPC_BCS8_RING_BASE,
},
@@ -114,6 +126,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs0",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 0,
+ .irq_offset = 32 + ilog2(INTR_VCS(0)),
.domain = XE_FW_MEDIA_VDBOX0,
.mmio_base = BSD_RING_BASE,
},
@@ -121,6 +134,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs1",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 1,
+ .irq_offset = 32 + ilog2(INTR_VCS(1)),
.domain = XE_FW_MEDIA_VDBOX1,
.mmio_base = BSD2_RING_BASE,
},
@@ -128,6 +142,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs2",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 2,
+ .irq_offset = 32 + ilog2(INTR_VCS(2)),
.domain = XE_FW_MEDIA_VDBOX2,
.mmio_base = BSD3_RING_BASE,
},
@@ -135,6 +150,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs3",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 3,
+ .irq_offset = 32 + ilog2(INTR_VCS(3)),
.domain = XE_FW_MEDIA_VDBOX3,
.mmio_base = BSD4_RING_BASE,
},
@@ -142,6 +158,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs4",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 4,
+ .irq_offset = 32 + ilog2(INTR_VCS(4)),
.domain = XE_FW_MEDIA_VDBOX4,
.mmio_base = XEHP_BSD5_RING_BASE,
},
@@ -149,6 +166,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs5",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 5,
+ .irq_offset = 32 + ilog2(INTR_VCS(5)),
.domain = XE_FW_MEDIA_VDBOX5,
.mmio_base = XEHP_BSD6_RING_BASE,
},
@@ -156,6 +174,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs6",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 6,
+ .irq_offset = 32 + ilog2(INTR_VCS(6)),
.domain = XE_FW_MEDIA_VDBOX6,
.mmio_base = XEHP_BSD7_RING_BASE,
},
@@ -163,6 +182,7 @@ static const struct engine_info engine_infos[] = {
.name = "vcs7",
.class = XE_ENGINE_CLASS_VIDEO_DECODE,
.instance = 7,
+ .irq_offset = 32 + ilog2(INTR_VCS(7)),
.domain = XE_FW_MEDIA_VDBOX7,
.mmio_base = XEHP_BSD8_RING_BASE,
},
@@ -170,6 +190,7 @@ static const struct engine_info engine_infos[] = {
.name = "vecs0",
.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
.instance = 0,
+ .irq_offset = 32 + ilog2(INTR_VECS(0)),
.domain = XE_FW_MEDIA_VEBOX0,
.mmio_base = VEBOX_RING_BASE,
},
@@ -177,6 +198,7 @@ static const struct engine_info engine_infos[] = {
.name = "vecs1",
.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
.instance = 1,
+ .irq_offset = 32 + ilog2(INTR_VECS(1)),
.domain = XE_FW_MEDIA_VEBOX1,
.mmio_base = VEBOX2_RING_BASE,
},
@@ -184,6 +206,7 @@ static const struct engine_info engine_infos[] = {
.name = "vecs2",
.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
.instance = 2,
+ .irq_offset = 32 + ilog2(INTR_VECS(2)),
.domain = XE_FW_MEDIA_VEBOX2,
.mmio_base = XEHP_VEBOX3_RING_BASE,
},
@@ -191,6 +214,7 @@ static const struct engine_info engine_infos[] = {
.name = "vecs3",
.class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
.instance = 3,
+ .irq_offset = 32 + ilog2(INTR_VECS(3)),
.domain = XE_FW_MEDIA_VEBOX3,
.mmio_base = XEHP_VEBOX4_RING_BASE,
},
@@ -198,6 +222,7 @@ static const struct engine_info engine_infos[] = {
.name = "ccs0",
.class = XE_ENGINE_CLASS_COMPUTE,
.instance = 0,
+ .irq_offset = ilog2(INTR_CCS(0)),
.domain = XE_FW_RENDER,
.mmio_base = COMPUTE0_RING_BASE,
},
@@ -205,6 +230,7 @@ static const struct engine_info engine_infos[] = {
.name = "ccs1",
.class = XE_ENGINE_CLASS_COMPUTE,
.instance = 1,
+ .irq_offset = ilog2(INTR_CCS(1)),
.domain = XE_FW_RENDER,
.mmio_base = COMPUTE1_RING_BASE,
},
@@ -212,6 +238,7 @@ static const struct engine_info engine_infos[] = {
.name = "ccs2",
.class = XE_ENGINE_CLASS_COMPUTE,
.instance = 2,
+ .irq_offset = ilog2(INTR_CCS(2)),
.domain = XE_FW_RENDER,
.mmio_base = COMPUTE2_RING_BASE,
},
@@ -219,6 +246,7 @@ static const struct engine_info engine_infos[] = {
.name = "ccs3",
.class = XE_ENGINE_CLASS_COMPUTE,
.instance = 3,
+ .irq_offset = ilog2(INTR_CCS(3)),
.domain = XE_FW_RENDER,
.mmio_base = COMPUTE3_RING_BASE,
},
@@ -289,6 +317,19 @@ static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt,
xe_rtp_match_first_render_or_compute(gt, hwe);
}
+static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ if (GRAPHICS_VER(gt_to_xe(gt)) < 20)
+ return false;
+
+ if (hwe->class != XE_ENGINE_CLASS_COMPUTE &&
+ hwe->class != XE_ENGINE_CLASS_RENDER)
+ return false;
+
+ return xe_mmio_read32(hwe->gt, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
+}
+
void
xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
{
@@ -319,6 +360,14 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
RCU_MODE_FIXED_SLICE_CCS_MODE))
},
+ /* Disable WMTP if HW doesn't support it */
+ { XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
+ XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
+ XE_RTP_ACTIONS(FIELD_SET(CS_CHICKEN1(0),
+ PREEMPT_GPGPU_LEVEL_MASK,
+ PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
+ },
{}
};
@@ -397,6 +446,7 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
hwe->class = info->class;
hwe->instance = info->instance;
hwe->mmio_base = info->mmio_base;
+ hwe->irq_offset = info->irq_offset;
hwe->domain = info->domain;
hwe->name = info->name;
hwe->fence_irq = &gt->fence_irq[info->class];
@@ -700,7 +750,7 @@ struct xe_hw_engine_snapshot *
xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
{
struct xe_hw_engine_snapshot *snapshot;
- int len;
+ u64 val;
if (!xe_hw_engine_is_valid(hwe))
return NULL;
@@ -710,11 +760,7 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
if (!snapshot)
return NULL;
- len = strlen(hwe->name) + 1;
- snapshot->name = kzalloc(len, GFP_ATOMIC);
- if (snapshot->name)
- strscpy(snapshot->name, hwe->name, len);
-
+ snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
snapshot->class = hwe->class;
snapshot->logical_instance = hwe->logical_instance;
snapshot->forcewake.domain = hwe->domain;
@@ -722,19 +768,35 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
hwe->domain);
snapshot->mmio_base = hwe->mmio_base;
- snapshot->reg.ring_hwstam = hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
- snapshot->reg.ring_hws_pga = hw_engine_mmio_read32(hwe,
- RING_HWS_PGA(0));
- snapshot->reg.ring_execlist_status_lo =
+ /* no more VF accessible data below this point */
+ if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
+ return snapshot;
+
+ snapshot->reg.ring_execlist_status =
hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0));
- snapshot->reg.ring_execlist_status_hi =
- hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
- snapshot->reg.ring_execlist_sq_contents_lo =
- hw_engine_mmio_read32(hwe,
- RING_EXECLIST_SQ_CONTENTS_LO(0));
- snapshot->reg.ring_execlist_sq_contents_hi =
- hw_engine_mmio_read32(hwe,
- RING_EXECLIST_SQ_CONTENTS_HI(0));
+ val = hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
+ snapshot->reg.ring_execlist_status |= val << 32;
+
+ snapshot->reg.ring_execlist_sq_contents =
+ hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0));
+ val = hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0));
+ snapshot->reg.ring_execlist_sq_contents |= val << 32;
+
+ snapshot->reg.ring_acthd = hw_engine_mmio_read32(hwe, RING_ACTHD(0));
+ val = hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
+ snapshot->reg.ring_acthd |= val << 32;
+
+ snapshot->reg.ring_bbaddr = hw_engine_mmio_read32(hwe, RING_BBADDR(0));
+ val = hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
+ snapshot->reg.ring_bbaddr |= val << 32;
+
+ snapshot->reg.ring_dma_fadd =
+ hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
+ val = hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
+ snapshot->reg.ring_dma_fadd |= val << 32;
+
+ snapshot->reg.ring_hwstam = hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
+ snapshot->reg.ring_hws_pga = hw_engine_mmio_read32(hwe, RING_HWS_PGA(0));
snapshot->reg.ring_start = hw_engine_mmio_read32(hwe, RING_START(0));
snapshot->reg.ring_head =
hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR;
@@ -748,16 +810,6 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
snapshot->reg.ring_esr = hw_engine_mmio_read32(hwe, RING_ESR(0));
snapshot->reg.ring_emr = hw_engine_mmio_read32(hwe, RING_EMR(0));
snapshot->reg.ring_eir = hw_engine_mmio_read32(hwe, RING_EIR(0));
- snapshot->reg.ring_acthd_udw =
- hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
- snapshot->reg.ring_acthd = hw_engine_mmio_read32(hwe, RING_ACTHD(0));
- snapshot->reg.ring_bbaddr_udw =
- hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
- snapshot->reg.ring_bbaddr = hw_engine_mmio_read32(hwe, RING_BBADDR(0));
- snapshot->reg.ring_dma_fadd_udw =
- hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
- snapshot->reg.ring_dma_fadd =
- hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
snapshot->reg.ipehr = hw_engine_mmio_read32(hwe, RING_IPEHR(0));
if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
@@ -786,33 +838,25 @@ void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
snapshot->forcewake.domain, snapshot->forcewake.ref);
drm_printf(p, "\tHWSTAM: 0x%08x\n", snapshot->reg.ring_hwstam);
drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n", snapshot->reg.ring_hws_pga);
- drm_printf(p, "\tRING_EXECLIST_STATUS_LO: 0x%08x\n",
- snapshot->reg.ring_execlist_status_lo);
- drm_printf(p, "\tRING_EXECLIST_STATUS_HI: 0x%08x\n",
- snapshot->reg.ring_execlist_status_hi);
- drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_LO: 0x%08x\n",
- snapshot->reg.ring_execlist_sq_contents_lo);
- drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_HI: 0x%08x\n",
- snapshot->reg.ring_execlist_sq_contents_hi);
+ drm_printf(p, "\tRING_EXECLIST_STATUS: 0x%016llx\n",
+ snapshot->reg.ring_execlist_status);
+ drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS: 0x%016llx\n",
+ snapshot->reg.ring_execlist_sq_contents);
drm_printf(p, "\tRING_START: 0x%08x\n", snapshot->reg.ring_start);
- drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head);
- drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail);
+ drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head);
+ drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail);
drm_printf(p, "\tRING_CTL: 0x%08x\n", snapshot->reg.ring_ctl);
drm_printf(p, "\tRING_MI_MODE: 0x%08x\n", snapshot->reg.ring_mi_mode);
drm_printf(p, "\tRING_MODE: 0x%08x\n",
snapshot->reg.ring_mode);
- drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr);
- drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr);
- drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr);
- drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir);
- drm_printf(p, "\tACTHD: 0x%08x_%08x\n", snapshot->reg.ring_acthd_udw,
- snapshot->reg.ring_acthd);
- drm_printf(p, "\tBBADDR: 0x%08x_%08x\n", snapshot->reg.ring_bbaddr_udw,
- snapshot->reg.ring_bbaddr);
- drm_printf(p, "\tDMA_FADDR: 0x%08x_%08x\n",
- snapshot->reg.ring_dma_fadd_udw,
- snapshot->reg.ring_dma_fadd);
- drm_printf(p, "\tIPEHR: 0x%08x\n\n", snapshot->reg.ipehr);
+ drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr);
+ drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr);
+ drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr);
+ drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir);
+ drm_printf(p, "\tACTHD: 0x%016llx\n", snapshot->reg.ring_acthd);
+ drm_printf(p, "\tBBADDR: 0x%016llx\n", snapshot->reg.ring_bbaddr);
+ drm_printf(p, "\tDMA_FADDR: 0x%016llx\n", snapshot->reg.ring_dma_fadd);
+ drm_printf(p, "\tIPEHR: 0x%08x\n", snapshot->reg.ipehr);
if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
drm_printf(p, "\tRCU_MODE: 0x%08x\n",
snapshot->reg.rcu_mode);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
index e49bc14f0ecf..2345fb42fa39 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
@@ -73,7 +73,7 @@ static ssize_t job_timeout_max_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_max);
}
-static struct kobj_attribute job_timeout_max_attr =
+static const struct kobj_attribute job_timeout_max_attr =
__ATTR(job_timeout_max, 0644, job_timeout_max_show, job_timeout_max_store);
static ssize_t job_timeout_min_store(struct kobject *kobj,
@@ -109,7 +109,7 @@ static ssize_t job_timeout_min_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_min);
}
-static struct kobj_attribute job_timeout_min_attr =
+static const struct kobj_attribute job_timeout_min_attr =
__ATTR(job_timeout_min, 0644, job_timeout_min_show, job_timeout_min_store);
static ssize_t job_timeout_store(struct kobject *kobj,
@@ -142,7 +142,7 @@ static ssize_t job_timeout_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_ms);
}
-static struct kobj_attribute job_timeout_attr =
+static const struct kobj_attribute job_timeout_attr =
__ATTR(job_timeout_ms, 0644, job_timeout_show, job_timeout_store);
static ssize_t job_timeout_default(struct kobject *kobj,
@@ -153,7 +153,7 @@ static ssize_t job_timeout_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.job_timeout_ms);
}
-static struct kobj_attribute job_timeout_def =
+static const struct kobj_attribute job_timeout_def =
__ATTR(job_timeout_ms, 0444, job_timeout_default, NULL);
static ssize_t job_timeout_min_default(struct kobject *kobj,
@@ -164,7 +164,7 @@ static ssize_t job_timeout_min_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.job_timeout_min);
}
-static struct kobj_attribute job_timeout_min_def =
+static const struct kobj_attribute job_timeout_min_def =
__ATTR(job_timeout_min, 0444, job_timeout_min_default, NULL);
static ssize_t job_timeout_max_default(struct kobject *kobj,
@@ -175,7 +175,7 @@ static ssize_t job_timeout_max_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.job_timeout_max);
}
-static struct kobj_attribute job_timeout_max_def =
+static const struct kobj_attribute job_timeout_max_def =
__ATTR(job_timeout_max, 0444, job_timeout_max_default, NULL);
static ssize_t timeslice_duration_store(struct kobject *kobj,
@@ -234,7 +234,7 @@ static ssize_t timeslice_duration_max_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.timeslice_max);
}
-static struct kobj_attribute timeslice_duration_max_attr =
+static const struct kobj_attribute timeslice_duration_max_attr =
__ATTR(timeslice_duration_max, 0644, timeslice_duration_max_show,
timeslice_duration_max_store);
@@ -272,7 +272,7 @@ static ssize_t timeslice_duration_min_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.timeslice_min);
}
-static struct kobj_attribute timeslice_duration_min_attr =
+static const struct kobj_attribute timeslice_duration_min_attr =
__ATTR(timeslice_duration_min, 0644, timeslice_duration_min_show,
timeslice_duration_min_store);
@@ -284,7 +284,7 @@ static ssize_t timeslice_duration_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.timeslice_us);
}
-static struct kobj_attribute timeslice_duration_attr =
+static const struct kobj_attribute timeslice_duration_attr =
__ATTR(timeslice_duration_us, 0644, timeslice_duration_show,
timeslice_duration_store);
@@ -296,7 +296,7 @@ static ssize_t timeslice_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.timeslice_us);
}
-static struct kobj_attribute timeslice_duration_def =
+static const struct kobj_attribute timeslice_duration_def =
__ATTR(timeslice_duration_us, 0444, timeslice_default, NULL);
static ssize_t timeslice_min_default(struct kobject *kobj,
@@ -307,7 +307,7 @@ static ssize_t timeslice_min_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.timeslice_min);
}
-static struct kobj_attribute timeslice_duration_min_def =
+static const struct kobj_attribute timeslice_duration_min_def =
__ATTR(timeslice_duration_min, 0444, timeslice_min_default, NULL);
static ssize_t timeslice_max_default(struct kobject *kobj,
@@ -318,7 +318,7 @@ static ssize_t timeslice_max_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.timeslice_max);
}
-static struct kobj_attribute timeslice_duration_max_def =
+static const struct kobj_attribute timeslice_duration_max_def =
__ATTR(timeslice_duration_max, 0444, timeslice_max_default, NULL);
static ssize_t preempt_timeout_store(struct kobject *kobj,
@@ -351,7 +351,7 @@ static ssize_t preempt_timeout_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_us);
}
-static struct kobj_attribute preempt_timeout_attr =
+static const struct kobj_attribute preempt_timeout_attr =
__ATTR(preempt_timeout_us, 0644, preempt_timeout_show, preempt_timeout_store);
static ssize_t preempt_timeout_default(struct kobject *kobj,
@@ -363,7 +363,7 @@ static ssize_t preempt_timeout_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_us);
}
-static struct kobj_attribute preempt_timeout_def =
+static const struct kobj_attribute preempt_timeout_def =
__ATTR(preempt_timeout_us, 0444, preempt_timeout_default, NULL);
static ssize_t preempt_timeout_min_default(struct kobject *kobj,
@@ -375,7 +375,7 @@ static ssize_t preempt_timeout_min_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_min);
}
-static struct kobj_attribute preempt_timeout_min_def =
+static const struct kobj_attribute preempt_timeout_min_def =
__ATTR(preempt_timeout_min, 0444, preempt_timeout_min_default, NULL);
static ssize_t preempt_timeout_max_default(struct kobject *kobj,
@@ -387,7 +387,7 @@ static ssize_t preempt_timeout_max_default(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_max);
}
-static struct kobj_attribute preempt_timeout_max_def =
+static const struct kobj_attribute preempt_timeout_max_def =
__ATTR(preempt_timeout_max, 0444, preempt_timeout_max_default, NULL);
static ssize_t preempt_timeout_max_store(struct kobject *kobj,
@@ -423,7 +423,7 @@ static ssize_t preempt_timeout_max_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_max);
}
-static struct kobj_attribute preempt_timeout_max_attr =
+static const struct kobj_attribute preempt_timeout_max_attr =
__ATTR(preempt_timeout_max, 0644, preempt_timeout_max_show,
preempt_timeout_max_store);
@@ -460,7 +460,7 @@ static ssize_t preempt_timeout_min_show(struct kobject *kobj,
return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_min);
}
-static struct kobj_attribute preempt_timeout_min_attr =
+static const struct kobj_attribute preempt_timeout_min_attr =
__ATTR(preempt_timeout_min, 0644, preempt_timeout_min_show,
preempt_timeout_min_store);
@@ -477,7 +477,7 @@ static const struct attribute *defaults[] = {
NULL
};
-static const struct attribute *files[] = {
+static const struct attribute * const files[] = {
&job_timeout_attr.attr,
&job_timeout_min_attr.attr,
&job_timeout_max_attr.attr,
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
index 39908dec042a..d7f828c76cc5 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
@@ -79,23 +79,23 @@ struct xe_hw_engine_class_intf {
* @defaults: default scheduling properties
*/
struct {
- /** @set_job_timeout: Set job timeout in ms for engine */
+ /** @sched_props.set_job_timeout: Set job timeout in ms for engine */
u32 job_timeout_ms;
- /** @job_timeout_min: Min job timeout in ms for engine */
+ /** @sched_props.job_timeout_min: Min job timeout in ms for engine */
u32 job_timeout_min;
- /** @job_timeout_max: Max job timeout in ms for engine */
+ /** @sched_props.job_timeout_max: Max job timeout in ms for engine */
u32 job_timeout_max;
- /** @timeslice_us: timeslice period in micro-seconds */
+ /** @sched_props.timeslice_us: timeslice period in micro-seconds */
u32 timeslice_us;
- /** @timeslice_min: min timeslice period in micro-seconds */
+ /** @sched_props.timeslice_min: min timeslice period in micro-seconds */
u32 timeslice_min;
- /** @timeslice_max: max timeslice period in micro-seconds */
+ /** @sched_props.timeslice_max: max timeslice period in micro-seconds */
u32 timeslice_max;
- /** @preempt_timeout_us: preemption timeout in micro-seconds */
+ /** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
u32 preempt_timeout_us;
- /** @preempt_timeout_min: min preemption timeout in micro-seconds */
+ /** @sched_props.preempt_timeout_min: min preemption timeout in micro-seconds */
u32 preempt_timeout_min;
- /** @preempt_timeout_max: max preemption timeout in micro-seconds */
+ /** @sched_props.preempt_timeout_max: max preemption timeout in micro-seconds */
u32 preempt_timeout_max;
} sched_props, defaults;
};
@@ -116,6 +116,8 @@ struct xe_hw_engine {
u16 instance;
/** @logical_instance: logical instance of this hw engine */
u16 logical_instance;
+ /** @irq_offset: IRQ offset of this hw engine */
+ u16 irq_offset;
/** @mmio_base: MMIO base address of this hw engine*/
u32 mmio_base;
/**
@@ -162,62 +164,52 @@ struct xe_hw_engine_snapshot {
u16 logical_instance;
/** @forcewake: Force Wake information snapshot */
struct {
- /** @domain: force wake domain of this hw engine */
+ /** @forcewake.domain: force wake domain of this hw engine */
enum xe_force_wake_domains domain;
- /** @ref: Forcewake ref for the above domain */
+ /** @forcewake.ref: Forcewake ref for the above domain */
int ref;
} forcewake;
/** @mmio_base: MMIO base address of this hw engine*/
u32 mmio_base;
/** @reg: Useful MMIO register snapshot */
struct {
- /** @ring_hwstam: RING_HWSTAM */
+ /** @reg.ring_execlist_status: RING_EXECLIST_STATUS */
+ u64 ring_execlist_status;
+ /** @reg.ring_execlist_sq_contents: RING_EXECLIST_SQ_CONTENTS */
+ u64 ring_execlist_sq_contents;
+ /** @reg.ring_acthd: RING_ACTHD */
+ u64 ring_acthd;
+ /** @reg.ring_bbaddr: RING_BBADDR */
+ u64 ring_bbaddr;
+ /** @reg.ring_dma_fadd: RING_DMA_FADD */
+ u64 ring_dma_fadd;
+ /** @reg.ring_hwstam: RING_HWSTAM */
u32 ring_hwstam;
- /** @ring_hws_pga: RING_HWS_PGA */
+ /** @reg.ring_hws_pga: RING_HWS_PGA */
u32 ring_hws_pga;
- /** @ring_execlist_status_lo: RING_EXECLIST_STATUS_LO */
- u32 ring_execlist_status_lo;
- /** @ring_execlist_status_hi: RING_EXECLIST_STATUS_HI */
- u32 ring_execlist_status_hi;
- /** @ring_execlist_sq_contents_lo: RING_EXECLIST_SQ_CONTENTS */
- u32 ring_execlist_sq_contents_lo;
- /** @ring_execlist_sq_contents_hi: RING_EXECLIST_SQ_CONTENTS + 4 */
- u32 ring_execlist_sq_contents_hi;
- /** @ring_start: RING_START */
+ /** @reg.ring_start: RING_START */
u32 ring_start;
- /** @ring_head: RING_HEAD */
+ /** @reg.ring_head: RING_HEAD */
u32 ring_head;
- /** @ring_tail: RING_TAIL */
+ /** @reg.ring_tail: RING_TAIL */
u32 ring_tail;
- /** @ring_ctl: RING_CTL */
+ /** @reg.ring_ctl: RING_CTL */
u32 ring_ctl;
- /** @ring_mi_mode: RING_MI_MODE */
+ /** @reg.ring_mi_mode: RING_MI_MODE */
u32 ring_mi_mode;
- /** @ring_mode: RING_MODE */
+ /** @reg.ring_mode: RING_MODE */
u32 ring_mode;
- /** @ring_imr: RING_IMR */
+ /** @reg.ring_imr: RING_IMR */
u32 ring_imr;
- /** @ring_esr: RING_ESR */
+ /** @reg.ring_esr: RING_ESR */
u32 ring_esr;
- /** @ring_emr: RING_EMR */
+ /** @reg.ring_emr: RING_EMR */
u32 ring_emr;
- /** @ring_eir: RING_EIR */
+ /** @reg.ring_eir: RING_EIR */
u32 ring_eir;
- /** @ring_acthd_udw: RING_ACTHD_UDW */
- u32 ring_acthd_udw;
- /** @ring_acthd: RING_ACTHD */
- u32 ring_acthd;
- /** @ring_bbaddr_udw: RING_BBADDR_UDW */
- u32 ring_bbaddr_udw;
- /** @ring_bbaddr: RING_BBADDR */
- u32 ring_bbaddr;
- /** @ring_dma_fadd_udw: RING_DMA_FADD_UDW */
- u32 ring_dma_fadd_udw;
- /** @ring_dma_fadd: RING_DMA_FADD */
- u32 ring_dma_fadd;
- /** @ipehr: IPEHR */
+ /** @reg.ipehr: IPEHR */
u32 ipehr;
- /** @rcu_mode: RCU_MODE */
+ /** @reg.rcu_mode: RCU_MODE */
u32 rcu_mode;
} reg;
};
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 174ed2185481..b82233a41606 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -10,12 +10,14 @@
#include <drm/drm_managed.h>
#include "regs/xe_gt_regs.h"
#include "regs/xe_mchbar_regs.h"
+#include "regs/xe_pcode_regs.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_hwmon.h"
#include "xe_mmio.h"
#include "xe_pcode.h"
#include "xe_pcode_api.h"
+#include "xe_sriov.h"
enum xe_hwmon_reg {
REG_PKG_RAPL_LIMIT,
@@ -77,32 +79,32 @@ static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg)
switch (hwmon_reg) {
case REG_PKG_RAPL_LIMIT:
- if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_RAPL_LIMIT;
- else if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC)
reg = PVC_GT0_PACKAGE_RAPL_LIMIT;
+ else if (xe->info.platform == XE_DG2)
+ reg = PCU_CR_PACKAGE_RAPL_LIMIT;
break;
case REG_PKG_POWER_SKU:
- if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_POWER_SKU;
- else if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC)
reg = PVC_GT0_PACKAGE_POWER_SKU;
+ else if (xe->info.platform == XE_DG2)
+ reg = PCU_CR_PACKAGE_POWER_SKU;
break;
case REG_PKG_POWER_SKU_UNIT:
- if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_POWER_SKU_UNIT;
- else if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC)
reg = PVC_GT0_PACKAGE_POWER_SKU_UNIT;
+ else if (xe->info.platform == XE_DG2)
+ reg = PCU_CR_PACKAGE_POWER_SKU_UNIT;
break;
case REG_GT_PERF_STATUS:
if (xe->info.platform == XE_DG2)
reg = GT_PERF_STATUS;
break;
case REG_PKG_ENERGY_STATUS:
- if (xe->info.platform == XE_DG2)
- reg = PCU_CR_PACKAGE_ENERGY_STATUS;
- else if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC)
reg = PVC_GT0_PLATFORM_ENERGY_STATUS;
+ else if (xe->info.platform == XE_DG2)
+ reg = PCU_CR_PACKAGE_ENERGY_STATUS;
break;
default:
drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
@@ -402,7 +404,7 @@ static const struct attribute_group *hwmon_groups[] = {
NULL
};
-static const struct hwmon_channel_info *hwmon_info[] = {
+static const struct hwmon_channel_info * const hwmon_info[] = {
HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
@@ -745,6 +747,10 @@ void xe_hwmon_register(struct xe_device *xe)
if (!IS_DGFX(xe))
return;
+ /* hwmon is not available on VFs */
+ if (IS_SRIOV_VF(xe))
+ return;
+
hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
return;
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index d1f5ba4bb745..2f5d179e0d00 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -9,15 +9,18 @@
#include <drm/drm_managed.h>
+#include "display/xe_display.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_device.h"
-#include "xe_display.h"
#include "xe_drv.h"
+#include "xe_gsc_proxy.h"
#include "xe_gt.h"
#include "xe_guc.h"
#include "xe_hw_engine.h"
+#include "xe_memirq.h"
#include "xe_mmio.h"
+#include "xe_sriov.h"
/*
* Interrupt registers for a unit are always consecutive and ordered
@@ -129,6 +132,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
u32 ccs_mask, bcs_mask;
u32 irqs, dmask, smask;
u32 gsc_mask = 0;
+ u32 heci_mask = 0;
if (xe_device_uc_enabled(xe)) {
irqs = GT_RENDER_USER_INTERRUPT |
@@ -178,14 +182,23 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask);
xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask);
- if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER))
+ /*
+ * the heci2 interrupt is enabled via the same register as the
+ * GSCCS interrupts, but it has its own mask register.
+ */
+ if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
gsc_mask = irqs;
- else if (HAS_HECI_GSCFI(xe))
+ heci_mask = GSC_IRQ_INTF(1);
+ } else if (HAS_HECI_GSCFI(xe)) {
gsc_mask = GSC_IRQ_INTF(1);
+ }
+
if (gsc_mask) {
- xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask);
+ xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~gsc_mask);
}
+ if (heci_mask)
+ xe_mmio_write32(gt, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
}
}
@@ -232,6 +245,8 @@ gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
return xe_guc_irq_handler(&gt->uc.guc, iir);
if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
return xe_guc_irq_handler(&gt->uc.guc, iir);
+ if (instance == OTHER_GSC_HECI2_INSTANCE && xe_gt_is_media_type(gt))
+ return xe_gsc_proxy_irq_handler(&gt->uc.gsc, iir);
if (instance != OTHER_GUC_INSTANCE &&
instance != OTHER_MEDIA_GUC_INSTANCE) {
@@ -249,15 +264,23 @@ static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
if (MEDIA_VER(xe) < 13)
return tile->primary_gt;
- if (class == XE_ENGINE_CLASS_VIDEO_DECODE ||
- class == XE_ENGINE_CLASS_VIDEO_ENHANCE)
+ switch (class) {
+ case XE_ENGINE_CLASS_VIDEO_DECODE:
+ case XE_ENGINE_CLASS_VIDEO_ENHANCE:
return tile->media_gt;
-
- if (class == XE_ENGINE_CLASS_OTHER &&
- (instance == OTHER_MEDIA_GUC_INSTANCE || instance == OTHER_GSC_INSTANCE))
- return tile->media_gt;
-
- return tile->primary_gt;
+ case XE_ENGINE_CLASS_OTHER:
+ switch (instance) {
+ case OTHER_MEDIA_GUC_INSTANCE:
+ case OTHER_GSC_INSTANCE:
+ case OTHER_GSC_HECI2_INSTANCE:
+ return tile->media_gt;
+ default:
+ break;
+ };
+ fallthrough;
+ default:
+ return tile->primary_gt;
+ }
}
static void gt_irq_handler(struct xe_tile *tile,
@@ -419,7 +442,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
* irq as device is inaccessible.
*/
if (master_ctl == REG_GENMASK(31, 0)) {
- dev_dbg(tile_to_xe(tile)->drm.dev,
+ drm_dbg(&tile_to_xe(tile)->drm,
"Ignore this IRQ as device might be in DPC containment.\n");
return IRQ_HANDLED;
}
@@ -484,6 +507,7 @@ static void gt_irq_reset(struct xe_tile *tile)
HAS_HECI_GSCFI(tile_to_xe(tile))) {
xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
+ xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
}
xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
@@ -498,6 +522,9 @@ static void xelp_irq_reset(struct xe_tile *tile)
gt_irq_reset(tile);
+ if (IS_SRIOV_VF(tile_to_xe(tile)))
+ return;
+
mask_and_disable(tile, PCU_IRQ_OFFSET);
}
@@ -508,6 +535,9 @@ static void dg1_irq_reset(struct xe_tile *tile)
gt_irq_reset(tile);
+ if (IS_SRIOV_VF(tile_to_xe(tile)))
+ return;
+
mask_and_disable(tile, PCU_IRQ_OFFSET);
}
@@ -518,11 +548,34 @@ static void dg1_irq_reset_mstr(struct xe_tile *tile)
xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
}
+static void vf_irq_reset(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ unsigned int id;
+
+ xe_assert(xe, IS_SRIOV_VF(xe));
+
+ if (GRAPHICS_VERx100(xe) < 1210)
+ xelp_intr_disable(xe);
+ else
+ xe_assert(xe, xe_device_has_memirq(xe));
+
+ for_each_tile(tile, xe, id) {
+ if (xe_device_has_memirq(xe))
+ xe_memirq_reset(&tile->sriov.vf.memirq);
+ else
+ gt_irq_reset(tile);
+ }
+}
+
static void xe_irq_reset(struct xe_device *xe)
{
struct xe_tile *tile;
u8 id;
+ if (IS_SRIOV_VF(xe))
+ return vf_irq_reset(xe);
+
for_each_tile(tile, xe, id) {
if (GRAPHICS_VERx100(xe) >= 1210)
dg1_irq_reset(tile);
@@ -545,8 +598,26 @@ static void xe_irq_reset(struct xe_device *xe)
}
}
+static void vf_irq_postinstall(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ unsigned int id;
+
+ for_each_tile(tile, xe, id)
+ if (xe_device_has_memirq(xe))
+ xe_memirq_postinstall(&tile->sriov.vf.memirq);
+
+ if (GRAPHICS_VERx100(xe) < 1210)
+ xelp_intr_enable(xe, true);
+ else
+ xe_assert(xe, xe_device_has_memirq(xe));
+}
+
static void xe_irq_postinstall(struct xe_device *xe)
{
+ if (IS_SRIOV_VF(xe))
+ return vf_irq_postinstall(xe);
+
xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
/*
@@ -563,8 +634,30 @@ static void xe_irq_postinstall(struct xe_device *xe)
xelp_intr_enable(xe, true);
}
+static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_tile *tile;
+ unsigned int id;
+
+ spin_lock(&xe->irq.lock);
+ if (!xe->irq.enabled) {
+ spin_unlock(&xe->irq.lock);
+ return IRQ_NONE;
+ }
+ spin_unlock(&xe->irq.lock);
+
+ for_each_tile(tile, xe, id)
+ xe_memirq_handler(&tile->sriov.vf.memirq);
+
+ return IRQ_HANDLED;
+}
+
static irq_handler_t xe_irq_handler(struct xe_device *xe)
{
+ if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
+ return vf_mem_irq_handler;
+
if (GRAPHICS_VERx100(xe) >= 1210)
return dg1_irq_handler;
else
@@ -590,8 +683,9 @@ static void irq_uninstall(struct drm_device *drm, void *arg)
int xe_irq_install(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ unsigned int irq_flags = PCI_IRQ_MSIX;
irq_handler_t irq_handler;
- int err, irq;
+ int err, irq, nvec;
irq_handler = xe_irq_handler(xe);
if (!irq_handler) {
@@ -601,7 +695,19 @@ int xe_irq_install(struct xe_device *xe)
xe_irq_reset(xe);
- err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ nvec = pci_msix_vec_count(pdev);
+ if (nvec <= 0) {
+ if (nvec == -EINVAL) {
+ /* MSIX capability is not supported in the device, using MSI */
+ irq_flags = PCI_IRQ_MSI;
+ nvec = 1;
+ } else {
+ drm_err(&xe->drm, "MSIX: Failed getting count\n");
+ return nvec;
+ }
+ }
+
+ err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
if (err < 0) {
drm_err(&xe->drm, "MSI/MSIX: Failed to enable support %d\n", err);
return err;
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 0ec5ad2539f1..7ad853b0788a 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -19,6 +19,8 @@
#include "xe_gt_printk.h"
#include "xe_hw_fence.h"
#include "xe_map.h"
+#include "xe_memirq.h"
+#include "xe_sriov.h"
#include "xe_vm.h"
#define LRC_VALID (1 << 0)
@@ -532,6 +534,27 @@ static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
/* TODO: Timestamp */
}
+static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
+{
+ struct xe_memirq *memirq = &gt_to_tile(hwe->gt)->sriov.vf.memirq;
+ struct xe_device *xe = gt_to_xe(hwe->gt);
+
+ if (!IS_SRIOV_VF(xe) || !xe_device_has_memirq(xe))
+ return;
+
+ regs[CTX_LRM_INT_MASK_ENABLE] = MI_LOAD_REGISTER_MEM |
+ MI_LRI_LRM_CS_MMIO | MI_LRM_USE_GGTT;
+ regs[CTX_INT_MASK_ENABLE_REG] = RING_IMR(0).addr;
+ regs[CTX_INT_MASK_ENABLE_PTR] = xe_memirq_enable_ptr(memirq);
+
+ regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
+ MI_LRI_LRM_CS_MMIO | MI_LRI_FORCE_POSTED;
+ regs[CTX_INT_STATUS_REPORT_REG] = RING_INT_STATUS_RPT_PTR(0).addr;
+ regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq);
+ regs[CTX_INT_SRC_REPORT_REG] = RING_INT_SRC_RPT_PTR(0).addr;
+ regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq);
+}
+
static int lrc_ring_mi_mode(struct xe_hw_engine *hwe)
{
struct xe_device *xe = gt_to_xe(hwe->gt);
@@ -667,6 +690,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe)
regs = data + LRC_PPHWSP_SIZE;
set_offsets(regs, reg_offsets(xe, hwe->class), hwe);
set_context_control(regs, hwe);
+ set_memory_based_intr(regs, hwe);
reset_stop_ring(regs, hwe);
return data;
@@ -682,8 +706,6 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
#define PVC_CTX_ASID (0x2e + 1)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
-#define ACC_GRANULARITY_S 20
-#define ACC_NOTIFY_S 16
int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
@@ -754,13 +776,7 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_write_ctx_reg(lrc, CTX_RING_CTL,
RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
if (xe->info.has_asid && vm)
- xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID,
- (q->usm.acc_granularity <<
- ACC_GRANULARITY_S) | vm->usm.asid);
- if (xe->info.has_usm && vm)
- xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD,
- (q->usm.acc_notify << ACC_NOTIFY_S) |
- q->usm.acc_trigger);
+ xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
lrc->desc = LRC_VALID;
lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;
@@ -964,6 +980,20 @@ static int dump_mi_command(struct drm_printer *p,
drm_printf(p, " - %#6x = %#010x\n", dw[i], dw[i + 1]);
return numdw;
+ case MI_LOAD_REGISTER_MEM & MI_OPCODE:
+ drm_printf(p, "[%#010x] MI_LOAD_REGISTER_MEM: %s%s\n",
+ inst_header,
+ dw[0] & MI_LRI_LRM_CS_MMIO ? "CS_MMIO " : "",
+ dw[0] & MI_LRM_USE_GGTT ? "USE_GGTT " : "");
+ if (numdw == 4)
+ drm_printf(p, " - %#6x = %#010llx\n",
+ dw[1], ((u64)(dw[3]) << 32 | (u64)(dw[2])));
+ else
+ drm_printf(p, " - %*ph (%s)\n",
+ (int)sizeof(u32) * (numdw - 1), dw + 1,
+ numdw < 4 ? "truncated" : "malformed");
+ return numdw;
+
case MI_FORCE_WAKEUP:
drm_printf(p, "[%#010x] MI_FORCE_WAKEUP\n", inst_header);
return numdw;
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
index 78220336062c..24f20ed66fd1 100644
--- a/drivers/gpu/drm/xe/xe_lrc_types.h
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -28,11 +28,11 @@ struct xe_lrc {
/** @ring: submission ring state */
struct {
- /** @size: size of submission ring */
+ /** @ring.size: size of submission ring */
u32 size;
- /** @tail: tail of submission ring */
+ /** @ring.tail: tail of submission ring */
u32 tail;
- /** @old_tail: shadow of tail */
+ /** @ring.old_tail: shadow of tail */
u32 old_tail;
} ring;
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
new file mode 100644
index 000000000000..76e95535d7f6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_guc_regs.h"
+#include "regs/xe_regs.h"
+
+#include "xe_assert.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_gt.h"
+#include "xe_gt_printk.h"
+#include "xe_guc.h"
+#include "xe_hw_engine.h"
+#include "xe_map.h"
+#include "xe_memirq.h"
+#include "xe_sriov.h"
+#include "xe_sriov_printk.h"
+
+#define memirq_assert(m, condition) xe_tile_assert(memirq_to_tile(m), condition)
+#define memirq_debug(m, msg...) xe_sriov_dbg_verbose(memirq_to_xe(m), "MEMIRQ: " msg)
+
+static struct xe_tile *memirq_to_tile(struct xe_memirq *memirq)
+{
+ return container_of(memirq, struct xe_tile, sriov.vf.memirq);
+}
+
+static struct xe_device *memirq_to_xe(struct xe_memirq *memirq)
+{
+ return tile_to_xe(memirq_to_tile(memirq));
+}
+
+static const char *guc_name(struct xe_guc *guc)
+{
+ return xe_gt_is_media_type(guc_to_gt(guc)) ? "media GuC" : "GuC";
+}
+
+/**
+ * DOC: Memory Based Interrupts
+ *
+ * MMIO register based interrupts infrastructure used for non-virtualized mode
+ * or SRIOV-8 (which supports 8 Virtual Functions) does not scale efficiently
+ * to allow delivering interrupts to a large number of Virtual machines or
+ * containers. Memory based interrupt status reporting provides an efficient
+ * and scalable infrastructure.
+ *
+ * For memory based interrupt status reporting hardware sequence is:
+ * * Engine writes the interrupt event to memory
+ * (Pointer to memory location is provided by SW. This memory surface must
+ * be mapped to system memory and must be marked as un-cacheable (UC) on
+ * Graphics IP Caches)
+ * * Engine triggers an interrupt to host.
+ */
+
+/**
+ * DOC: Memory Based Interrupts Page Layout
+ *
+ * `Memory Based Interrupts`_ requires three different objects, which are
+ * called "page" in the specs, even if they aren't page-sized or aligned.
+ *
+ * To simplify the code we allocate a single page size object and then use
+ * offsets to embedded "pages". The address of those "pages" are then
+ * programmed in the HW via LRI and LRM in the context image.
+ *
+ * - _`Interrupt Status Report Page`: this page contains the interrupt
+ * status vectors for each unit. Each bit in the interrupt vectors is
+ * converted to a byte, with the byte being set to 0xFF when an
+ * interrupt is triggered; interrupt vectors are 16b big so each unit
+ * gets 16B. One space is reserved for each bit in one of the
+ * GT_INTR_DWx registers, so this object needs a total of 1024B.
+ * This object needs to be 4KiB aligned.
+ *
+ * - _`Interrupt Source Report Page`: this is the equivalent of the
+ * GEN11_GT_INTR_DWx registers, with each bit in those registers being
+ * mapped to a byte here. The offsets are the same, just bytes instead
+ * of bits. This object needs to be cacheline aligned.
+ *
+ * - Interrupt Mask: the HW needs a location to fetch the interrupt
+ * mask vector to be used by the LRM in the context, so we just use
+ * the next available space in the interrupt page.
+ *
+ * ::
+ *
+ * 0x0000 +===========+ <== Interrupt Status Report Page
+ * | |
+ * | | ____ +----+----------------+
+ * | | / | 0 | USER INTERRUPT |
+ * +-----------+ __/ | 1 | |
+ * | HWE(n) | __ | | CTX SWITCH |
+ * +-----------+ \ | | WAIT SEMAPHORE |
+ * | | \____ | 15 | |
+ * | | +----+----------------+
+ * | |
+ * 0x0400 +===========+ <== Interrupt Source Report Page
+ * | HWE(0) |
+ * | HWE(1) |
+ * | |
+ * | HWE(x) |
+ * 0x0440 +===========+ <== Interrupt Enable Mask
+ * | |
+ * | |
+ * +-----------+
+ */
+
+static void __release_xe_bo(struct drm_device *drm, void *arg)
+{
+ struct xe_bo *bo = arg;
+
+ xe_bo_unpin_map_no_vm(bo);
+}
+
+static int memirq_alloc_pages(struct xe_memirq *memirq)
+{
+ struct xe_device *xe = memirq_to_xe(memirq);
+ struct xe_tile *tile = memirq_to_tile(memirq);
+ struct xe_bo *bo;
+ int err;
+
+ BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET, SZ_64));
+ BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET, SZ_4K));
+
+ /* XXX: convert to managed bo */
+ bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_SYSTEM_BIT |
+ XE_BO_CREATE_GGTT_BIT |
+ XE_BO_NEEDS_UC |
+ XE_BO_NEEDS_CPU_ACCESS);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ goto out;
+ }
+
+ memirq_assert(memirq, !xe_bo_is_vram(bo));
+ memirq_assert(memirq, !memirq->bo);
+
+ iosys_map_memset(&bo->vmap, 0, 0, SZ_4K);
+
+ memirq->bo = bo;
+ memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET);
+ memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET);
+ memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET);
+
+ memirq_assert(memirq, !memirq->source.is_iomem);
+ memirq_assert(memirq, !memirq->status.is_iomem);
+ memirq_assert(memirq, !memirq->mask.is_iomem);
+
+ memirq_debug(memirq, "page offsets: source %#x status %#x\n",
+ xe_memirq_source_ptr(memirq), xe_memirq_status_ptr(memirq));
+
+ return drmm_add_action_or_reset(&xe->drm, __release_xe_bo, memirq->bo);
+
+out:
+ xe_sriov_err(memirq_to_xe(memirq),
+ "Failed to allocate memirq page (%pe)\n", ERR_PTR(err));
+ return err;
+}
+
+static void memirq_set_enable(struct xe_memirq *memirq, bool enable)
+{
+ iosys_map_wr(&memirq->mask, 0, u32, enable ? GENMASK(15, 0) : 0);
+
+ memirq->enabled = enable;
+}
+
+/**
+ * xe_memirq_init - Initialize data used by `Memory Based Interrupts`_.
+ * @memirq: the &xe_memirq to initialize
+ *
+ * Allocate `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
+ * used by `Memory Based Interrupts`_.
+ *
+ * These allocations are managed and will be implicitly released on unload.
+ *
+ * Note: This function shall be called only by the VF driver.
+ *
+ * If this function fails then VF driver won't be able to operate correctly.
+ * If `Memory Based Interrupts`_ are not used this function will return 0.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_memirq_init(struct xe_memirq *memirq)
+{
+ struct xe_device *xe = memirq_to_xe(memirq);
+ int err;
+
+ memirq_assert(memirq, IS_SRIOV_VF(xe));
+
+ if (!xe_device_has_memirq(xe))
+ return 0;
+
+ err = memirq_alloc_pages(memirq);
+ if (unlikely(err))
+ return err;
+
+ /* we need to start with all irqs enabled */
+ memirq_set_enable(memirq, true);
+
+ return 0;
+}
+
+/**
+ * xe_memirq_source_ptr - Get GGTT's offset of the `Interrupt Source Report Page`_.
+ * @memirq: the &xe_memirq to query
+ *
+ * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * and xe_memirq_init() didn't fail.
+ *
+ * Return: GGTT's offset of the `Interrupt Source Report Page`_.
+ */
+u32 xe_memirq_source_ptr(struct xe_memirq *memirq)
+{
+ memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, memirq->bo);
+
+ return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET;
+}
+
+/**
+ * xe_memirq_status_ptr - Get GGTT's offset of the `Interrupt Status Report Page`_.
+ * @memirq: the &xe_memirq to query
+ *
+ * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * and xe_memirq_init() didn't fail.
+ *
+ * Return: GGTT's offset of the `Interrupt Status Report Page`_.
+ */
+u32 xe_memirq_status_ptr(struct xe_memirq *memirq)
+{
+ memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, memirq->bo);
+
+ return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET;
+}
+
+/**
+ * xe_memirq_enable_ptr - Get GGTT's offset of the Interrupt Enable Mask.
+ * @memirq: the &xe_memirq to query
+ *
+ * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * and xe_memirq_init() didn't fail.
+ *
+ * Return: GGTT's offset of the Interrupt Enable Mask.
+ */
+u32 xe_memirq_enable_ptr(struct xe_memirq *memirq)
+{
+ memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, memirq->bo);
+
+ return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_ENABLE_OFFSET;
+}
+
+/**
+ * xe_memirq_init_guc - Prepare GuC for `Memory Based Interrupts`_.
+ * @memirq: the &xe_memirq
+ * @guc: the &xe_guc to setup
+ *
+ * Register `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
+ * to be used by the GuC when `Memory Based Interrupts`_ are required.
+ *
+ * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * and xe_memirq_init() didn't fail.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc)
+{
+ bool is_media = xe_gt_is_media_type(guc_to_gt(guc));
+ u32 offset = is_media ? ilog2(INTR_MGUC) : ilog2(INTR_GUC);
+ u32 source, status;
+ int err;
+
+ memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, memirq->bo);
+
+ source = xe_memirq_source_ptr(memirq) + offset;
+ status = xe_memirq_status_ptr(memirq) + offset * SZ_16;
+
+ err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_KEY,
+ source);
+ if (unlikely(err))
+ goto failed;
+
+ err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_STATUS_ADDR_KEY,
+ status);
+ if (unlikely(err))
+ goto failed;
+
+ return 0;
+
+failed:
+ xe_sriov_err(memirq_to_xe(memirq),
+ "Failed to setup report pages in %s (%pe)\n",
+ guc_name(guc), ERR_PTR(err));
+ return err;
+}
+
+/**
+ * xe_memirq_reset - Disable processing of `Memory Based Interrupts`_.
+ * @memirq: struct xe_memirq
+ *
+ * This is part of the driver IRQ setup flow.
+ *
+ * This function shall only be used by the VF driver on platforms that use
+ * `Memory Based Interrupts`_.
+ */
+void xe_memirq_reset(struct xe_memirq *memirq)
+{
+ memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+
+ if (memirq->bo)
+ memirq_set_enable(memirq, false);
+}
+
+/**
+ * xe_memirq_postinstall - Enable processing of `Memory Based Interrupts`_.
+ * @memirq: the &xe_memirq
+ *
+ * This is part of the driver IRQ setup flow.
+ *
+ * This function shall only be used by the VF driver on platforms that use
+ * `Memory Based Interrupts`_.
+ */
+void xe_memirq_postinstall(struct xe_memirq *memirq)
+{
+ memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+
+ if (memirq->bo)
+ memirq_set_enable(memirq, true);
+}
+
+static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
+ u16 offset, const char *name)
+{
+ u8 value;
+
+ value = iosys_map_rd(vector, offset, u8);
+ if (value) {
+ if (value != 0xff)
+ xe_sriov_err_ratelimited(memirq_to_xe(memirq),
+ "Unexpected memirq value %#x from %s at %u\n",
+ value, name, offset);
+ iosys_map_wr(vector, offset, u8, 0x00);
+ }
+
+ return value;
+}
+
+static void memirq_dispatch_engine(struct xe_memirq *memirq, struct iosys_map *status,
+ struct xe_hw_engine *hwe)
+{
+ memirq_debug(memirq, "STATUS %s %*ph\n", hwe->name, 16, status->vaddr);
+
+ if (memirq_received(memirq, status, ilog2(GT_RENDER_USER_INTERRUPT), hwe->name))
+ xe_hw_engine_handle_irq(hwe, GT_RENDER_USER_INTERRUPT);
+}
+
+static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *status,
+ struct xe_guc *guc)
+{
+ const char *name = guc_name(guc);
+
+ memirq_debug(memirq, "STATUS %s %*ph\n", name, 16, status->vaddr);
+
+ if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
+ xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
+}
+
+/**
+ * xe_memirq_handler - The `Memory Based Interrupts`_ Handler.
+ * @memirq: the &xe_memirq
+ *
+ * This function reads and dispatches `Memory Based Interrupts`.
+ */
+void xe_memirq_handler(struct xe_memirq *memirq)
+{
+ struct xe_device *xe = memirq_to_xe(memirq);
+ struct xe_tile *tile = memirq_to_tile(memirq);
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ struct iosys_map map;
+ unsigned int gtid;
+ struct xe_gt *gt;
+
+ if (!memirq->bo)
+ return;
+
+ memirq_assert(memirq, !memirq->source.is_iomem);
+ memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr);
+ memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr + 32);
+
+ for_each_gt(gt, xe, gtid) {
+ if (gt->tile != tile)
+ continue;
+
+ for_each_hw_engine(hwe, gt, id) {
+ if (memirq_received(memirq, &memirq->source, hwe->irq_offset, "SRC")) {
+ map = IOSYS_MAP_INIT_OFFSET(&memirq->status,
+ hwe->irq_offset * SZ_16);
+ memirq_dispatch_engine(memirq, &map, hwe);
+ }
+ }
+ }
+
+ /* GuC and media GuC (if present) must be checked separately */
+
+ if (memirq_received(memirq, &memirq->source, ilog2(INTR_GUC), "SRC")) {
+ map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_GUC) * SZ_16);
+ memirq_dispatch_guc(memirq, &map, &tile->primary_gt->uc.guc);
+ }
+
+ if (!tile->media_gt)
+ return;
+
+ if (memirq_received(memirq, &memirq->source, ilog2(INTR_MGUC), "SRC")) {
+ map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_MGUC) * SZ_16);
+ memirq_dispatch_guc(memirq, &map, &tile->media_gt->uc.guc);
+ }
+}
diff --git a/drivers/gpu/drm/xe/xe_memirq.h b/drivers/gpu/drm/xe/xe_memirq.h
new file mode 100644
index 000000000000..2d40d03c3095
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_memirq.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_MEMIRQ_H_
+#define _XE_MEMIRQ_H_
+
+#include <linux/types.h>
+
+struct xe_guc;
+struct xe_memirq;
+
+int xe_memirq_init(struct xe_memirq *memirq);
+
+u32 xe_memirq_source_ptr(struct xe_memirq *memirq);
+u32 xe_memirq_status_ptr(struct xe_memirq *memirq);
+u32 xe_memirq_enable_ptr(struct xe_memirq *memirq);
+
+void xe_memirq_reset(struct xe_memirq *memirq);
+void xe_memirq_postinstall(struct xe_memirq *memirq);
+void xe_memirq_handler(struct xe_memirq *memirq);
+
+int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_memirq_types.h b/drivers/gpu/drm/xe/xe_memirq_types.h
new file mode 100644
index 000000000000..625b6b8736cc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_memirq_types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_MEMIRQ_TYPES_H_
+#define _XE_MEMIRQ_TYPES_H_
+
+#include <linux/iosys-map.h>
+
+struct xe_bo;
+
+/* ISR */
+#define XE_MEMIRQ_STATUS_OFFSET 0x0
+/* IIR */
+#define XE_MEMIRQ_SOURCE_OFFSET 0x400
+/* IMR */
+#define XE_MEMIRQ_ENABLE_OFFSET 0x440
+
+/**
+ * struct xe_memirq - Data used by the `Memory Based Interrupts`_.
+ *
+ * @bo: buffer object with `Memory Based Interrupts Page Layout`_.
+ * @source: iosys pointer to `Interrupt Source Report Page`_.
+ * @status: iosys pointer to `Interrupt Status Report Page`_.
+ * @mask: iosys pointer to Interrupt Enable Mask.
+ * @enabled: internal flag used to control processing of the interrupts.
+ */
+struct xe_memirq {
+ struct xe_bo *bo;
+ struct iosys_map source;
+ struct iosys_map status;
+ struct iosys_map mask;
+ bool enabled;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 70480c305602..ee1bb938c493 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -12,7 +12,8 @@
#include <drm/ttm/ttm_tt.h>
#include <drm/xe_drm.h>
-#include "generated/xe_wa_oob.h"
+#include <generated/xe_wa_oob.h>
+
#include "instructions/xe_mi_commands.h"
#include "regs/xe_gpu_commands.h"
#include "tests/xe_test.h"
@@ -71,6 +72,16 @@ struct xe_migrate {
#define NUM_KERNEL_PDE 17
#define NUM_PT_SLOTS 32
#define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
+#define MAX_NUM_PTE 512
+
+/*
+ * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
+ * legal value accepted. Since that instruction field is always stored in
+ * (val-2) format, this translates to 0x400 dwords for the true maximum length
+ * of the instruction. Subtracting the instruction header (1 dword) and
+ * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
+ */
+#define MAX_PTE_PER_SDI 0x1FE
/**
* xe_tile_migrate_engine() - Get this tile's migrate engine.
@@ -360,7 +371,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
EXEC_QUEUE_FLAG_PERMANENT |
- EXEC_QUEUE_FLAG_HIGH_PRIORITY);
+ EXEC_QUEUE_FLAG_HIGH_PRIORITY, 0);
} else {
m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
XE_ENGINE_CLASS_COPY,
@@ -451,13 +462,13 @@ static u32 pte_update_size(struct xe_migrate *m,
} else {
/* Clip L0 to available size */
u64 size = min(*L0, (u64)avail_pts * SZ_2M);
- u64 num_4k_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+ u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
*L0 = size;
*L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
/* MI_STORE_DATA_IMM */
- cmds += 3 * DIV_ROUND_UP(num_4k_pages, 0x1ff);
+ cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
/* PDE qwords */
cmds += num_4k_pages * 2;
@@ -492,7 +503,7 @@ static void emit_pte(struct xe_migrate *m,
ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
while (ptes) {
- u32 chunk = min(0x1ffU, ptes);
+ u32 chunk = min(MAX_PTE_PER_SDI, ptes);
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
bb->cs[bb->len++] = ofs;
@@ -1111,7 +1122,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
* This shouldn't be possible in practice.. might change when 16K
* pages are used. Hence the assert.
*/
- xe_tile_assert(tile, update->qwords <= 0x1ff);
+ xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
if (!ppgtt_ofs)
ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
xe_bo_addr(update->pt_bo, 0,
@@ -1120,7 +1131,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
do {
u64 addr = ppgtt_ofs + ofs * 8;
- chunk = min(update->qwords, 0x1ffU);
+ chunk = min(size, MAX_PTE_PER_SDI);
/* Ensure populatefn can do memset64 by aligning bb->cs */
if (!(bb->len & 1))
@@ -1299,7 +1310,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
batch_size = 6 + num_updates * 2;
for (i = 0; i < num_updates; i++) {
- u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, 0x1ff);
+ u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI);
/* align noop + MI_STORE_DATA_IMM cmd prefix */
batch_size += 4 * num_cmds + updates[i].qwords * 2;
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 5f6b53ea5528..7ba2477452d7 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -20,6 +20,7 @@
#include "xe_gt_mcr.h"
#include "xe_macros.h"
#include "xe_module.h"
+#include "xe_sriov.h"
#include "xe_tile.h"
#define XEHP_MTCFG_ADDR XE_REG(0x101800)
@@ -105,7 +106,7 @@ static void xe_resize_vram_bar(struct xe_device *xe)
pci_bus_for_each_resource(root, root_res, i) {
if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
- root_res->start > 0x100000000ull)
+ (u64)root_res->start > 0x100000000ul)
break;
}
@@ -363,13 +364,19 @@ static int xe_verify_lmem_ready(struct xe_device *xe)
{
struct xe_gt *gt = xe_root_mmio_gt(xe);
+ if (!IS_DGFX(xe))
+ return 0;
+
+ if (IS_SRIOV_VF(xe))
+ return 0;
+
/*
* The boot firmware initializes local memory and assesses its health.
* If memory training fails, the punit will have been instructed to
* keep the GT powered down; we won't be able to communicate with it
* and we should not continue with driver initialization.
*/
- if (IS_DGFX(xe) && !(xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT)) {
+ if (!(xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT)) {
drm_err(&xe->drm, "VRAM not initialized by firmware\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index ef79552e4f2f..609d997b3e9b 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -13,6 +13,7 @@
#include "xe_gt_mcr.h"
#include "xe_mmio.h"
#include "xe_platform_types.h"
+#include "xe_sriov.h"
#include "xe_step_types.h"
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
@@ -290,18 +291,6 @@ static const struct xe_mocs_entry dg2_mocs_desc[] = {
MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
};
-static const struct xe_mocs_entry dg2_mocs_desc_g10_ax[] = {
- /* Wa_14011441408: Set Go to Memory for MOCS#0 */
- MOCS_ENTRY(0, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
- /* UC - Coherent; GO:Memory */
- MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
- /* UC - Non-Coherent; GO:Memory */
- MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
-
- /* WB - LC */
- MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
-};
-
static const struct xe_mocs_entry pvc_mocs_desc[] = {
/* Error */
MOCS_ENTRY(0, 0, L3_3_WB),
@@ -409,15 +398,8 @@ static unsigned int get_mocs_settings(struct xe_device *xe,
info->unused_entries_index = 1;
break;
case XE_DG2:
- if (xe->info.subplatform == XE_SUBPLATFORM_DG2_G10 &&
- xe->info.step.graphics >= STEP_A0 &&
- xe->info.step.graphics <= STEP_B0) {
- info->size = ARRAY_SIZE(dg2_mocs_desc_g10_ax);
- info->table = dg2_mocs_desc_g10_ax;
- } else {
- info->size = ARRAY_SIZE(dg2_mocs_desc);
- info->table = dg2_mocs_desc;
- }
+ info->size = ARRAY_SIZE(dg2_mocs_desc);
+ info->table = dg2_mocs_desc;
info->uc_index = 1;
info->n_entries = XELP_NUM_MOCS_ENTRIES;
info->unused_entries_index = 3;
@@ -558,6 +540,9 @@ void xe_mocs_init(struct xe_gt *gt)
struct xe_mocs_info table;
unsigned int flags;
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
/*
* MOCS settings are split between "GLOB_MOCS" and/or "LNCFCMOCS"
* registers depending on platform.
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index 1ff6bc79e7d4..e148934d554b 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -13,6 +13,7 @@
#include "xe_gt.h"
#include "xe_gt_mcr.h"
#include "xe_mmio.h"
+#include "xe_sriov.h"
#define _PAT_ATS 0x47fc
#define _PAT_INDEX(index) _PICK_EVEN_2RANGES(index, 8, \
@@ -433,6 +434,10 @@ void xe_pat_init_early(struct xe_device *xe)
drm_err(&xe->drm, "Missing PAT table for platform with graphics version %d.%02d!\n",
GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100);
}
+
+ /* VFs can't program nor dump PAT settings */
+ if (IS_SRIOV_VF(xe))
+ xe->pat.ops = NULL;
}
void xe_pat_init(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index dcc5ded1558e..557f2d88a8c1 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -15,9 +15,9 @@
#include <drm/drm_drv.h>
#include <drm/xe_pciids.h>
+#include "display/xe_display.h"
#include "regs/xe_gt_regs.h"
#include "xe_device.h"
-#include "xe_display.h"
#include "xe_drv.h"
#include "xe_gt.h"
#include "xe_macros.h"
@@ -165,7 +165,7 @@ static const struct xe_graphics_desc graphics_xelpg = {
.has_asid = 1, \
.has_flat_ccs = 1, \
.has_range_tlb_invalidation = 1, \
- .has_usm = 0 /* FIXME: implementation missing */, \
+ .has_usm = 1, \
.va_bits = 48, \
.vm_max_level = 4, \
.hw_engine_mask = \
@@ -340,14 +340,14 @@ static const struct xe_device_desc lnl_desc = {
__diag_pop();
/* Map of GMD_ID values to graphics IP */
-static struct gmdid_map graphics_ip_map[] = {
+static const struct gmdid_map graphics_ip_map[] = {
{ 1270, &graphics_xelpg },
{ 1271, &graphics_xelpg },
{ 2004, &graphics_xe2 },
};
/* Map of GMD_ID values to media IP */
-static struct gmdid_map media_ip_map[] = {
+static const struct gmdid_map media_ip_map[] = {
{ 1300, &media_xelpmp },
{ 2000, &media_xe2 },
};
@@ -774,6 +774,8 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
str_yes_no(xe_device_has_sriov(xe)),
xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
+ xe_pm_init_early(xe);
+
err = xe_device_probe(xe);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h
index 5935cfe30204..f153ce96f69a 100644
--- a/drivers/gpu/drm/xe/xe_pcode_api.h
+++ b/drivers/gpu/drm/xe/xe_pcode_api.h
@@ -42,6 +42,13 @@
#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
+#define PCODE_FREQUENCY_CONFIG 0x6e
+/* Frequency Config Sub Commands (param1) */
+#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
+#define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1
+/* Domain IDs (param2) */
+#define PCODE_MBOX_DOMAIN_HBM 0x2
+
struct pcode_err_decode {
int errno;
const char *str;
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index b429c2876a76..53b3b0b019ac 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -10,11 +10,11 @@
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_placement.h>
+#include "display/xe_display.h"
#include "xe_bo.h"
#include "xe_bo_evict.h"
#include "xe_device.h"
#include "xe_device_sysfs.h"
-#include "xe_display.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_guc.h"
@@ -125,17 +125,26 @@ int xe_pm_resume(struct xe_device *xe)
return 0;
}
-static bool xe_pm_pci_d3cold_capable(struct pci_dev *pdev)
+static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct pci_dev *root_pdev;
root_pdev = pcie_find_root_port(pdev);
if (!root_pdev)
return false;
- /* D3Cold requires PME capability and _PR3 power resource */
- if (!pci_pme_capable(root_pdev, PCI_D3cold) || !pci_pr3_present(root_pdev))
+ /* D3Cold requires PME capability */
+ if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
+ drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
return false;
+ }
+
+ /* D3Cold requires _PR3 power resource */
+ if (!pci_pr3_present(root_pdev)) {
+ drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
+ return false;
+ }
return true;
}
@@ -163,17 +172,21 @@ static void xe_pm_runtime_init(struct xe_device *xe)
pm_runtime_put(dev);
}
-void xe_pm_init(struct xe_device *xe)
+void xe_pm_init_early(struct xe_device *xe)
{
- struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
+ drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
+}
+void xe_pm_init(struct xe_device *xe)
+{
/* For now suspend/resume is only allowed with GuC */
if (!xe_device_uc_enabled(xe))
return;
drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
- xe->d3cold.capable = xe_pm_pci_d3cold_capable(pdev);
+ xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
if (xe->d3cold.capable) {
xe_device_sysfs_init(xe);
@@ -214,6 +227,7 @@ struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
int xe_pm_runtime_suspend(struct xe_device *xe)
{
+ struct xe_bo *bo, *on;
struct xe_gt *gt;
u8 id;
int err = 0;
@@ -247,6 +261,16 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
*/
lock_map_acquire(&xe_device_mem_access_lockdep_map);
+ /*
+ * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
+ * also checks and delets bo entry from user fault list.
+ */
+ mutex_lock(&xe->mem_access.vram_userfault.lock);
+ list_for_each_entry_safe(bo, on,
+ &xe->mem_access.vram_userfault.list, vram_userfault_link)
+ xe_bo_runtime_pm_release_mmap_offset(bo);
+ mutex_unlock(&xe->mem_access.vram_userfault.lock);
+
if (xe->d3cold.allowed) {
err = xe_bo_evict_all(xe);
if (err)
@@ -330,7 +354,7 @@ int xe_pm_runtime_put(struct xe_device *xe)
int xe_pm_runtime_get_if_active(struct xe_device *xe)
{
- return pm_runtime_get_if_active(xe->drm.dev, true);
+ return pm_runtime_get_if_active(xe->drm.dev);
}
void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 6b9031f7af24..64a97c6726a7 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -20,6 +20,7 @@ struct xe_device;
int xe_pm_suspend(struct xe_device *xe);
int xe_pm_resume(struct xe_device *xe);
+void xe_pm_init_early(struct xe_device *xe);
void xe_pm_init(struct xe_device *xe);
void xe_pm_runtime_fini(struct xe_device *xe);
int xe_pm_runtime_suspend(struct xe_device *xe);
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index ac19bfa3f798..7f54bc3e389d 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -499,10 +499,12 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
* this device *requires* 64K PTE size for VRAM, fail.
*/
if (level == 0 && !xe_parent->is_compact) {
- if (xe_pt_is_pte_ps64K(addr, next, xe_walk))
+ if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
+ xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
pte |= XE_PTE_PS64;
- else if (XE_WARN_ON(xe_walk->needs_64K))
+ } else if (XE_WARN_ON(xe_walk->needs_64K)) {
return -EINVAL;
+ }
}
ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte);
@@ -545,13 +547,16 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
*child = &xe_child->base;
/*
- * Prefer the compact pagetable layout for L0 if possible.
+ * Prefer the compact pagetable layout for L0 if possible. Only
+ * possible if VMA covers entire 2MB region as compact 64k and
+ * 4k pages cannot be mixed within a 2MB region.
* TODO: Suballocate the pt bo to avoid wasting a lot of
* memory.
*/
if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 &&
covers && xe_pt_scan_64K(addr, next, xe_walk)) {
walk->shifts = xe_compact_pt_shifts;
+ xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT;
flags |= XE_PDE_64K;
xe_child->is_compact = true;
}
@@ -872,8 +877,7 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries, u32 *num_entries,
- bool rebind)
+ struct xe_vm_pgtable_update *entries, u32 *num_entries)
{
int err;
@@ -1229,7 +1233,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
"Preparing bind, with range [%llx...%llx) engine %p.\n",
xe_vma_start(vma), xe_vma_end(vma), q);
- err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind);
+ err = xe_pt_prepare_bind(tile, vma, entries, &num_entries);
if (err)
goto err;
xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 7e924faeeea0..92bb06c0586e 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -198,7 +198,7 @@ static int query_engines(struct xe_device *xe,
return -EINVAL;
}
- engines = kmalloc(size, GFP_KERNEL);
+ engines = kzalloc(size, GFP_KERNEL);
if (!engines)
return -ENOMEM;
@@ -212,14 +212,10 @@ static int query_engines(struct xe_device *xe,
engines->engines[i].instance.engine_instance =
hwe->logical_instance;
engines->engines[i].instance.gt_id = gt->info.id;
- engines->engines[i].instance.pad = 0;
- memset(engines->engines[i].reserved, 0,
- sizeof(engines->engines[i].reserved));
i++;
}
- engines->pad = 0;
engines->num_engines = i;
if (copy_to_user(query_ptr, engines, size)) {
@@ -520,6 +516,49 @@ static int query_gt_topology(struct xe_device *xe,
return 0;
}
+static int
+query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query)
+{
+ struct drm_xe_query_uc_fw_version __user *query_ptr = u64_to_user_ptr(query->data);
+ size_t size = sizeof(struct drm_xe_query_uc_fw_version);
+ struct drm_xe_query_uc_fw_version resp;
+ struct xe_uc_fw_version *version = NULL;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&resp, query_ptr, size))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved))
+ return -EINVAL;
+
+ switch (resp.uc_type) {
+ case XE_QUERY_UC_TYPE_GUC_SUBMISSION: {
+ struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc;
+
+ version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ resp.branch_ver = 0;
+ resp.major_ver = version->major;
+ resp.minor_ver = version->minor;
+ resp.patch_ver = version->patch;
+
+ if (copy_to_user(query_ptr, &resp, size))
+ return -EFAULT;
+
+ return 0;
+}
+
static int (* const xe_query_funcs[])(struct xe_device *xe,
struct drm_xe_device_query *query) = {
query_engines,
@@ -529,6 +568,7 @@ static int (* const xe_query_funcs[])(struct xe_device *xe,
query_hwconfig,
query_gt_topology,
query_engine_cycles,
+ query_uc_fw_version,
};
int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
index 87adefb56024..440ac572f6e5 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.c
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -231,7 +231,7 @@ void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe)
if (err)
goto err_force_wake;
- p = drm_debug_printer(KBUILD_MODNAME);
+ p = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL);
xa_for_each(&sr->xa, reg, entry) {
if (slot == RING_MAX_NONPRIV_SLOTS) {
xe_gt_err(gt,
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
index e66ae1bdaf9c..3fa2ece7d228 100644
--- a/drivers/gpu/drm/xe/xe_reg_whitelist.c
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -7,9 +7,11 @@
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
#include "xe_gt_types.h"
#include "xe_platform_types.h"
#include "xe_rtp.h"
+#include "xe_step.h"
#undef XE_REG_MCR
#define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1)
@@ -56,6 +58,12 @@ static const struct xe_rtp_entry_sr register_whitelist[] = {
RING_FORCE_TO_NONPRIV_DENY,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
+ { XE_RTP_NAME("16020183090"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(WHITELIST(CSBE_DEBUG_STATUS(RENDER_RING_BASE), 0))
+ },
+
{}
};
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 1e4c06eacd98..c4edffcd4a32 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -5,7 +5,8 @@
#include "xe_ring_ops.h"
-#include "generated/xe_wa_oob.h"
+#include <generated/xe_wa_oob.h>
+
#include "instructions/xe_mi_commands.h"
#include "regs/xe_engine_regs.h"
#include "regs/xe_gpu_commands.h"
@@ -113,6 +114,19 @@ static int emit_flush_invalidate(u32 flag, u32 *dw, int i)
return i;
}
+static int
+emit_pipe_control(u32 *dw, int i, u32 bit_group_0, u32 bit_group_1, u32 offset, u32 value)
+{
+ dw[i++] = GFX_OP_PIPE_CONTROL(6) | bit_group_0;
+ dw[i++] = bit_group_1;
+ dw[i++] = offset;
+ dw[i++] = 0;
+ dw[i++] = value;
+ dw[i++] = 0;
+
+ return i;
+}
+
static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
int i)
{
@@ -131,14 +145,7 @@ static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
flags &= ~mask_flags;
- dw[i++] = GFX_OP_PIPE_CONTROL(6);
- dw[i++] = flags;
- dw[i++] = LRC_PPHWSP_SCRATCH_ADDR;
- dw[i++] = 0;
- dw[i++] = 0;
- dw[i++] = 0;
-
- return i;
+ return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_SCRATCH_ADDR, 0);
}
static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
@@ -174,14 +181,7 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
- dw[i++] = GFX_OP_PIPE_CONTROL(6) | PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
- dw[i++] = flags;
- dw[i++] = 0;
- dw[i++] = 0;
- dw[i++] = 0;
- dw[i++] = 0;
-
- return i;
+ return emit_pipe_control(dw, i, PIPE_CONTROL0_HDC_PIPELINE_FLUSH, flags, 0, 0);
}
static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int i)
@@ -189,14 +189,9 @@ static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int
if (hwe->class != XE_ENGINE_CLASS_RENDER)
return i;
- if (XE_WA(hwe->gt, 16020292621)) {
- dw[i++] = GFX_OP_PIPE_CONTROL(6);
- dw[i++] = PIPE_CONTROL_LRI_POST_SYNC;
- dw[i++] = RING_NOPID(hwe->mmio_base).addr;
- dw[i++] = 0;
- dw[i++] = 0;
- dw[i++] = 0;
- }
+ if (XE_WA(hwe->gt, 16020292621))
+ i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_LRI_POST_SYNC,
+ RING_NOPID(hwe->mmio_base).addr, 0);
return i;
}
@@ -204,16 +199,13 @@ static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int
static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
int i)
{
- dw[i++] = GFX_OP_PIPE_CONTROL(6);
- dw[i++] = (stall_only ? PIPE_CONTROL_CS_STALL :
- PIPE_CONTROL_FLUSH_ENABLE | PIPE_CONTROL_CS_STALL) |
- PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE;
- dw[i++] = addr;
- dw[i++] = 0;
- dw[i++] = value;
- dw[i++] = 0; /* We're thrashing one extra dword. */
+ u32 flags = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_QW_WRITE;
- return i;
+ if (!stall_only)
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+
+ return emit_pipe_control(dw, i, 0, flags, addr, value);
}
static u32 get_ppgtt_flag(struct xe_sched_job *job)
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 4e2ccad0e52f..8151ddafb940 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -277,3 +277,41 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
return drm_sched_job_add_dependency(&job->drm, fence);
}
+
+struct xe_sched_job_snapshot *
+xe_sched_job_snapshot_capture(struct xe_sched_job *job)
+{
+ struct xe_exec_queue *q = job->q;
+ struct xe_device *xe = q->gt->tile->xe;
+ struct xe_sched_job_snapshot *snapshot;
+ size_t len = sizeof(*snapshot) + (sizeof(u64) * q->width);
+ u16 i;
+
+ snapshot = kzalloc(len, GFP_ATOMIC);
+ if (!snapshot)
+ return NULL;
+
+ snapshot->batch_addr_len = q->width;
+ for (i = 0; i < q->width; i++)
+ snapshot->batch_addr[i] = xe_device_uncanonicalize_addr(xe, job->batch_addr[i]);
+
+ return snapshot;
+}
+
+void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot)
+{
+ kfree(snapshot);
+}
+
+void
+xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ u16 i;
+
+ if (!snapshot)
+ return;
+
+ for (i = 0; i < snapshot->batch_addr_len; i++)
+ drm_printf(p, "batch_addr[%u]: 0x%016llx\n", i, snapshot->batch_addr[i]);
+}
diff --git a/drivers/gpu/drm/xe/xe_sched_job.h b/drivers/gpu/drm/xe/xe_sched_job.h
index 34f475ba7f50..f1a660648cf0 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.h
+++ b/drivers/gpu/drm/xe/xe_sched_job.h
@@ -8,6 +8,7 @@
#include "xe_sched_job_types.h"
+struct drm_printer;
struct xe_vm;
#define XE_SCHED_HANG_LIMIT 1
@@ -77,4 +78,8 @@ xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags)
bool xe_sched_job_is_migration(struct xe_exec_queue *q);
+struct xe_sched_job_snapshot *xe_sched_job_snapshot_capture(struct xe_sched_job *job);
+void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot);
+void xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index 71213ba9735b..b1d83da50a53 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -30,11 +30,11 @@ struct xe_sched_job {
struct dma_fence *fence;
/** @user_fence: write back value when BB is complete */
struct {
- /** @used: user fence is used */
+ /** @user_fence.used: user fence is used */
bool used;
- /** @addr: address to write to */
+ /** @user_fence.addr: address to write to */
u64 addr;
- /** @value: write back value */
+ /** @user_fence.value: write back value */
u64 value;
} user_fence;
/** @migrate_flush_flags: Additional flush flags for migration jobs */
@@ -43,4 +43,9 @@ struct xe_sched_job {
u64 batch_addr[];
};
+struct xe_sched_job_snapshot {
+ u16 batch_addr_len;
+ u64 batch_addr[];
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index 42a0e0c917a0..f295d91886b1 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -3,6 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
+#include <drm/drm_managed.h>
+
#include "xe_assert.h"
#include "xe_sriov.h"
@@ -53,3 +55,33 @@ void xe_sriov_probe_early(struct xe_device *xe, bool has_sriov)
drm_info(&xe->drm, "Running in %s mode\n",
xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
}
+
+static void fini_sriov(struct drm_device *drm, void *arg)
+{
+ struct xe_device *xe = arg;
+
+ destroy_workqueue(xe->sriov.wq);
+ xe->sriov.wq = NULL;
+}
+
+/**
+ * xe_sriov_init - Initialize SR-IOV specific data.
+ * @xe: the &xe_device to initialize
+ *
+ * In this function we create dedicated workqueue that will be used
+ * by the SR-IOV specific workers.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_init(struct xe_device *xe)
+{
+ if (!IS_SRIOV(xe))
+ return 0;
+
+ xe_assert(xe, !xe->sriov.wq);
+ xe->sriov.wq = alloc_workqueue("xe-sriov-wq", 0, 0);
+ if (!xe->sriov.wq)
+ return -ENOMEM;
+
+ return drmm_add_action_or_reset(&xe->drm, fini_sriov, xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov.h b/drivers/gpu/drm/xe/xe_sriov.h
index 5af73a3172b0..1545552162c9 100644
--- a/drivers/gpu/drm/xe/xe_sriov.h
+++ b/drivers/gpu/drm/xe/xe_sriov.h
@@ -13,6 +13,7 @@
const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode);
void xe_sriov_probe_early(struct xe_device *xe, bool has_sriov);
+int xe_sriov_init(struct xe_device *xe);
static inline enum xe_sriov_mode xe_device_sriov_mode(struct xe_device *xe)
{
diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h
index 999a4311b98b..1a138108d139 100644
--- a/drivers/gpu/drm/xe/xe_sriov_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_types.h
@@ -9,6 +9,18 @@
#include <linux/build_bug.h>
/**
+ * VFID - Virtual Function Identifier
+ * @n: VF number
+ *
+ * Helper macro to represent Virtual Function (VF) Identifier.
+ * VFID(0) is used as alias to the PFID that represents Physical Function.
+ *
+ * Note: According to PCI spec, SR-IOV VF's numbers are 1-based (VF1, VF2, ...).
+ */
+#define VFID(n) (n)
+#define PFID VFID(0)
+
+/**
* enum xe_sriov_mode - SR-IOV mode
* @XE_SRIOV_MODE_NONE: bare-metal mode (non-virtualized)
* @XE_SRIOV_MODE_PF: SR-IOV Physical Function (PF) mode
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index aab92bee1d7c..02c9577fe418 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -19,7 +19,7 @@
#include "xe_macros.h"
#include "xe_sched_job_types.h"
-struct user_fence {
+struct xe_user_fence {
struct xe_device *xe;
struct kref refcount;
struct dma_fence_cb cb;
@@ -27,31 +27,32 @@ struct user_fence {
struct mm_struct *mm;
u64 __user *addr;
u64 value;
+ int signalled;
};
static void user_fence_destroy(struct kref *kref)
{
- struct user_fence *ufence = container_of(kref, struct user_fence,
+ struct xe_user_fence *ufence = container_of(kref, struct xe_user_fence,
refcount);
mmdrop(ufence->mm);
kfree(ufence);
}
-static void user_fence_get(struct user_fence *ufence)
+static void user_fence_get(struct xe_user_fence *ufence)
{
kref_get(&ufence->refcount);
}
-static void user_fence_put(struct user_fence *ufence)
+static void user_fence_put(struct xe_user_fence *ufence)
{
kref_put(&ufence->refcount, user_fence_destroy);
}
-static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
- u64 value)
+static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
+ u64 value)
{
- struct user_fence *ufence;
+ struct xe_user_fence *ufence;
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
if (!ufence)
@@ -69,7 +70,7 @@ static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
static void user_fence_worker(struct work_struct *w)
{
- struct user_fence *ufence = container_of(w, struct user_fence, worker);
+ struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
if (mmget_not_zero(ufence->mm)) {
kthread_use_mm(ufence->mm);
@@ -80,10 +81,11 @@ static void user_fence_worker(struct work_struct *w)
}
wake_up_all(&ufence->xe->ufence_wq);
+ WRITE_ONCE(ufence->signalled, 1);
user_fence_put(ufence);
}
-static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
+static void kick_ufence(struct xe_user_fence *ufence, struct dma_fence *fence)
{
INIT_WORK(&ufence->worker, user_fence_worker);
queue_work(ufence->xe->ordered_wq, &ufence->worker);
@@ -92,7 +94,7 @@ static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- struct user_fence *ufence = container_of(cb, struct user_fence, cb);
+ struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb);
kick_ufence(ufence, fence);
}
@@ -340,3 +342,39 @@ err_out:
return ERR_PTR(-ENOMEM);
}
+
+/**
+ * xe_sync_ufence_get() - Get user fence from sync
+ * @sync: input sync
+ *
+ * Get a user fence reference from sync.
+ *
+ * Return: xe_user_fence pointer with reference
+ */
+struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync)
+{
+ user_fence_get(sync->ufence);
+
+ return sync->ufence;
+}
+
+/**
+ * xe_sync_ufence_put() - Put user fence reference
+ * @ufence: user fence reference
+ *
+ */
+void xe_sync_ufence_put(struct xe_user_fence *ufence)
+{
+ user_fence_put(ufence);
+}
+
+/**
+ * xe_sync_ufence_get_status() - Get user fence status
+ * @ufence: user fence
+ *
+ * Return: 1 if signalled, 0 not signalled, <0 on error
+ */
+int xe_sync_ufence_get_status(struct xe_user_fence *ufence)
+{
+ return READ_ONCE(ufence->signalled);
+}
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index f43cdcaca6c5..0fd0d51208e6 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -38,4 +38,8 @@ static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
return !!sync->ufence;
}
+struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
+void xe_sync_ufence_put(struct xe_user_fence *ufence);
+int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sync_types.h b/drivers/gpu/drm/xe/xe_sync_types.h
index 852db5e7884f..30ac3f51993b 100644
--- a/drivers/gpu/drm/xe/xe_sync_types.h
+++ b/drivers/gpu/drm/xe/xe_sync_types.h
@@ -18,7 +18,7 @@ struct xe_sync_entry {
struct drm_syncobj *syncobj;
struct dma_fence *fence;
struct dma_fence_chain *chain_fence;
- struct user_fence *ufence;
+ struct xe_user_fence *ufence;
u64 addr;
u64 timeline_value;
u32 type;
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 044c20881de7..0650b2fa75ef 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -167,9 +167,10 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
goto err_mem_access;
tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
- if (IS_ERR(tile->mem.kernel_bb_pool))
+ if (IS_ERR(tile->mem.kernel_bb_pool)) {
err = PTR_ERR(tile->mem.kernel_bb_pool);
-
+ goto err_mem_access;
+ }
xe_wa_apply_tile_workarounds(tile);
xe_tile_sysfs_init(tile);
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c
index 0f8d3e7fce46..0662968d7bcb 100644
--- a/drivers/gpu/drm/xe/xe_tile_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c
@@ -9,6 +9,7 @@
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
+#include "xe_vram_freq.h"
static void xe_tile_sysfs_kobj_release(struct kobject *kobj)
{
@@ -50,6 +51,8 @@ void xe_tile_sysfs_init(struct xe_tile *tile)
tile->sysfs = &kt->base;
+ xe_vram_freq_sysfs_init(tile);
+
err = drmm_add_action_or_reset(&xe->drm, tile_sysfs_fini, tile);
if (err)
drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 95163c303f3e..4ddc55527f9a 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -12,6 +12,7 @@
#include <linux/tracepoint.h>
#include <linux/types.h>
+#include "xe_bo.h"
#include "xe_bo_types.h"
#include "xe_exec_queue_types.h"
#include "xe_gpu_scheduler_types.h"
@@ -26,16 +27,16 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(u64, fence)
+ __field(struct xe_gt_tlb_invalidation_fence *, fence)
__field(int, seqno)
),
TP_fast_assign(
- __entry->fence = (u64)fence;
+ __entry->fence = fence;
__entry->seqno = fence->seqno;
),
- TP_printk("fence=0x%016llx, seqno=%d",
+ TP_printk("fence=%p, seqno=%d",
__entry->fence, __entry->seqno)
);
@@ -82,16 +83,16 @@ DECLARE_EVENT_CLASS(xe_bo,
TP_STRUCT__entry(
__field(size_t, size)
__field(u32, flags)
- __field(u64, vm)
+ __field(struct xe_vm *, vm)
),
TP_fast_assign(
__entry->size = bo->size;
__entry->flags = bo->flags;
- __entry->vm = (unsigned long)bo->vm;
+ __entry->vm = bo->vm;
),
- TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx",
+ TP_printk("size=%zu, flags=0x%02x, vm=%p",
__entry->size, __entry->flags, __entry->vm)
);
@@ -100,9 +101,31 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
TP_ARGS(bo)
);
-DEFINE_EVENT(xe_bo, xe_bo_move,
- TP_PROTO(struct xe_bo *bo),
- TP_ARGS(bo)
+TRACE_EVENT(xe_bo_move,
+ TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
+ bool move_lacks_source),
+ TP_ARGS(bo, new_placement, old_placement, move_lacks_source),
+ TP_STRUCT__entry(
+ __field(struct xe_bo *, bo)
+ __field(size_t, size)
+ __field(u32, new_placement)
+ __field(u32, old_placement)
+ __array(char, device_id, 12)
+ __field(bool, move_lacks_source)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->size = bo->size;
+ __entry->new_placement = new_placement;
+ __entry->old_placement = old_placement;
+ strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
+ __entry->move_lacks_source = move_lacks_source;
+ ),
+ TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
+ __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
+ xe_mem_type_to_name[__entry->old_placement],
+ xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
);
DECLARE_EVENT_CLASS(xe_exec_queue,
@@ -327,16 +350,16 @@ DECLARE_EVENT_CLASS(xe_hw_fence,
TP_STRUCT__entry(
__field(u64, ctx)
__field(u32, seqno)
- __field(u64, fence)
+ __field(struct xe_hw_fence *, fence)
),
TP_fast_assign(
__entry->ctx = fence->dma.context;
__entry->seqno = fence->dma.seqno;
- __entry->fence = (unsigned long)fence;
+ __entry->fence = fence;
),
- TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u",
+ TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
__entry->ctx, __entry->fence, __entry->seqno)
);
@@ -365,7 +388,7 @@ DECLARE_EVENT_CLASS(xe_vma,
TP_ARGS(vma),
TP_STRUCT__entry(
- __field(u64, vma)
+ __field(struct xe_vma *, vma)
__field(u32, asid)
__field(u64, start)
__field(u64, end)
@@ -373,14 +396,14 @@ DECLARE_EVENT_CLASS(xe_vma,
),
TP_fast_assign(
- __entry->vma = (unsigned long)vma;
+ __entry->vma = vma;
__entry->asid = xe_vma_vm(vma)->usm.asid;
__entry->start = xe_vma_start(vma);
__entry->end = xe_vma_end(vma) - 1;
__entry->ptr = xe_vma_userptr(vma);
),
- TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,",
+ TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
__entry->vma, __entry->asid, __entry->start,
__entry->end, __entry->ptr)
)
@@ -465,16 +488,16 @@ DECLARE_EVENT_CLASS(xe_vm,
TP_ARGS(vm),
TP_STRUCT__entry(
- __field(u64, vm)
+ __field(struct xe_vm *, vm)
__field(u32, asid)
),
TP_fast_assign(
- __entry->vm = (unsigned long)vm;
+ __entry->vm = vm;
__entry->asid = vm->usm.asid;
),
- TP_printk("vm=0x%016llx, asid=0x%05x", __entry->vm,
+ TP_printk("vm=%p, asid=0x%05x", __entry->vm,
__entry->asid)
);
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index e5d7d5e2bec1..3107d2a12426 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -11,7 +11,8 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
-#include "generated/xe_wa_oob.h"
+#include <generated/xe_wa_oob.h>
+
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_bo.h"
@@ -19,6 +20,7 @@
#include "xe_gt.h"
#include "xe_mmio.h"
#include "xe_res_cursor.h"
+#include "xe_sriov.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wa.h"
@@ -205,7 +207,9 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
u64 stolen_size, io_size, pgsize;
int err;
- if (IS_DGFX(xe))
+ if (IS_SRIOV_VF(xe))
+ stolen_size = 0;
+ else if (IS_DGFX(xe))
stolen_size = detect_bar2_dgfx(xe, mgr);
else if (GRAPHICS_VERx100(xe) >= 1270)
stolen_size = detect_bar2_integrated(xe, mgr);
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 53ccd338fd8c..5c83c75bc497 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -37,7 +37,14 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
XE_RTP_ACTIONS(FIELD_SET(XE2LPM_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
-
+ { XE_RTP_NAME("Tuning: Compression Overfetch"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2004, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX)),
+ },
+ { XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2004, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN))
+ },
{}
};
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 25e1ddfd2f86..7033f8c1b431 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -7,8 +7,10 @@
#include "xe_device.h"
#include "xe_gsc.h"
+#include "xe_gsc_proxy.h"
#include "xe_gt.h"
#include "xe_guc.h"
+#include "xe_guc_db_mgr.h"
#include "xe_guc_pc.h"
#include "xe_guc_submit.h"
#include "xe_huc.h"
@@ -30,13 +32,15 @@ uc_to_xe(struct xe_uc *uc)
/* Should be called once at driver load only */
int xe_uc_init(struct xe_uc *uc)
{
+ struct xe_device *xe = uc_to_xe(uc);
int ret;
+ xe_device_mem_access_get(xe);
+
/*
* We call the GuC/HuC/GSC init functions even if GuC submission is off
* to correctly move our tracking of the FW state to "disabled".
*/
-
ret = xe_guc_init(&uc->guc);
if (ret)
goto err;
@@ -50,7 +54,7 @@ int xe_uc_init(struct xe_uc *uc)
goto err;
if (!xe_device_uc_enabled(uc_to_xe(uc)))
- return 0;
+ goto err;
ret = xe_wopcm_init(&uc->wopcm);
if (ret)
@@ -60,9 +64,17 @@ int xe_uc_init(struct xe_uc *uc)
if (ret)
goto err;
+ ret = xe_guc_db_mgr_init(&uc->guc.dbm, ~0);
+ if (ret)
+ goto err;
+
+ xe_device_mem_access_put(xe);
+
return 0;
err:
+ xe_device_mem_access_put(xe);
+
return ret;
}
@@ -88,6 +100,10 @@ int xe_uc_init_post_hwconfig(struct xe_uc *uc)
if (err)
return err;
+ err = xe_huc_init_post_hwconfig(&uc->huc);
+ if (err)
+ return err;
+
return xe_gsc_init_post_hwconfig(&uc->gsc);
}
@@ -256,3 +272,16 @@ int xe_uc_suspend(struct xe_uc *uc)
return xe_guc_suspend(&uc->guc);
}
+
+/**
+ * xe_uc_remove() - Clean up the UC structures before driver removal
+ * @uc: the UC object
+ *
+ * This function should only act on objects/structures that must be cleaned
+ * before the driver removal callback is complete and therefore can't be
+ * deferred to a drmm action.
+ */
+void xe_uc_remove(struct xe_uc *uc)
+{
+ xe_gsc_remove(&uc->gsc);
+}
diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
index 5d5110c0c834..e4d4e3c99f0e 100644
--- a/drivers/gpu/drm/xe/xe_uc.h
+++ b/drivers/gpu/drm/xe/xe_uc.h
@@ -20,5 +20,6 @@ int xe_uc_stop(struct xe_uc *uc);
int xe_uc_start(struct xe_uc *uc);
int xe_uc_suspend(struct xe_uc *uc);
int xe_uc_sanitize_reset(struct xe_uc *uc);
+void xe_uc_remove(struct xe_uc *uc);
#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index 9dff96dfe455..a9d25b3fa67c 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -92,6 +92,7 @@ struct uc_fw_entry {
const char *path;
u16 major;
u16 minor;
+ u16 patch;
bool full_ver_required;
};
};
@@ -102,14 +103,15 @@ struct fw_blobs_by_type {
};
#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
- fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 7)) \
- fw_def(DG2, major_ver(i915, guc, dg2, 70, 5)) \
- fw_def(DG1, major_ver(i915, guc, dg1, 70, 5)) \
- fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 5)) \
- fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 5)) \
- fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 5)) \
- fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 5)) \
- fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 5))
+ fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 19, 2)) \
+ fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 19, 2)) \
+ fw_def(DG2, major_ver(i915, guc, dg2, 70, 19, 2)) \
+ fw_def(DG1, major_ver(i915, guc, dg1, 70, 19, 2)) \
+ fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 19, 2)) \
+ fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 19, 2)) \
+ fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 19, 2)) \
+ fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 19, 2)) \
+ fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 19, 2))
#define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \
fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \
@@ -121,24 +123,24 @@ struct fw_blobs_by_type {
/* for the GSC FW we match the compatibility version and not the release one */
#define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \
- fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0))
+ fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0, 0))
#define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \
__stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin"
#define fw_filename_mmp_ver(dir_, uc_, shortname_, a, b, c) \
MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(a ## . ## b ## . ## c))
-#define fw_filename_major_ver(dir_, uc_, shortname_, a, b) \
+#define fw_filename_major_ver(dir_, uc_, shortname_, a, b, c) \
MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(a))
#define fw_filename_no_ver(dir_, uc_, shortname_) \
MAKE_FW_PATH(dir_, uc_, shortname_, "")
#define uc_fw_entry_mmp_ver(dir_, uc_, shortname_, a, b, c) \
{ fw_filename_mmp_ver(dir_, uc_, shortname_, a, b, c), \
- a, b, true }
-#define uc_fw_entry_major_ver(dir_, uc_, shortname_, a, b) \
- { fw_filename_major_ver(dir_, uc_, shortname_, a, b), \
- a, b }
+ a, b, c, true }
+#define uc_fw_entry_major_ver(dir_, uc_, shortname_, a, b, c) \
+ { fw_filename_major_ver(dir_, uc_, shortname_, a, b, c), \
+ a, b, c }
#define uc_fw_entry_no_ver(dir_, uc_, shortname_) \
{ fw_filename_no_ver(dir_, uc_, shortname_), \
0, 0 }
@@ -221,6 +223,7 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw)
uc_fw->path = entries[i].path;
uc_fw->versions.wanted.major = entries[i].major;
uc_fw->versions.wanted.minor = entries[i].minor;
+ uc_fw->versions.wanted.patch = entries[i].patch;
uc_fw->full_ver_required = entries[i].full_ver_required;
if (uc_fw->type == XE_UC_FW_TYPE_GSC)
@@ -340,19 +343,22 @@ int xe_uc_fw_check_version_requirements(struct xe_uc_fw *uc_fw)
* Otherwise, at least the major version.
*/
if (wanted->major != found->major ||
- (uc_fw->full_ver_required && wanted->minor != found->minor)) {
- drm_notice(&xe->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ (uc_fw->full_ver_required &&
+ ((wanted->minor != found->minor) ||
+ (wanted->patch != found->patch)))) {
+ drm_notice(&xe->drm, "%s firmware %s: unexpected version: %u.%u.%u != %u.%u.%u\n",
xe_uc_fw_type_repr(uc_fw->type), uc_fw->path,
- found->major, found->minor,
- wanted->major, wanted->minor);
+ found->major, found->minor, found->patch,
+ wanted->major, wanted->minor, wanted->patch);
goto fail;
}
- if (wanted->minor > found->minor) {
- drm_notice(&xe->drm, "%s firmware (%u.%u) is recommended, but only (%u.%u) was found in %s\n",
+ if (wanted->minor > found->minor ||
+ (wanted->minor == found->minor && wanted->patch > found->patch)) {
+ drm_notice(&xe->drm, "%s firmware (%u.%u.%u) is recommended, but only (%u.%u.%u) was found in %s\n",
xe_uc_fw_type_repr(uc_fw->type),
- wanted->major, wanted->minor,
- found->major, found->minor,
+ wanted->major, wanted->minor, wanted->patch,
+ found->major, found->minor, found->patch,
uc_fw->path);
drm_info(&xe->drm, "Consider updating your linux-firmware pkg or downloading from %s\n",
XE_UC_FIRMWARE_URL);
@@ -652,14 +658,18 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
xe_assert(xe, !uc_fw->path);
uc_fw_auto_select(xe, uc_fw);
+ uc_fw_override(uc_fw);
xe_uc_fw_change_status(uc_fw, uc_fw->path ?
XE_UC_FIRMWARE_SELECTED :
XE_UC_FIRMWARE_NOT_SUPPORTED);
- if (!xe_uc_fw_is_supported(uc_fw))
+ if (!xe_uc_fw_is_supported(uc_fw)) {
+ if (uc_fw->type == XE_UC_FW_TYPE_GUC) {
+ drm_err(&xe->drm, "No GuC firmware defined for platform\n");
+ return -ENOENT;
+ }
return 0;
-
- uc_fw_override(uc_fw);
+ }
/* an empty path means the firmware is disabled */
if (!xe_device_uc_enabled(xe) || !(*uc_fw->path)) {
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index ee914a5d8523..bc800b696866 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -124,11 +124,14 @@ struct xe_uc_fw {
/** @versions: FW versions wanted and found */
struct {
- /** @wanted: firmware version wanted by platform */
+ /** @versions.wanted: firmware version wanted by platform */
struct xe_uc_fw_version wanted;
- /** @wanted_type: type of firmware version wanted (release vs compatibility) */
+ /**
+ * @versions.wanted_type: type of firmware version wanted
+ * (release vs compatibility)
+ */
enum xe_uc_fw_version_types wanted_type;
- /** @found: fw versions found in firmware blob */
+ /** @versions.found: fw versions found in firmware blob */
struct xe_uc_fw_version found[XE_UC_FW_VER_TYPE_COUNT];
} versions;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 7b00faa67287..d28260351af2 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -13,11 +13,14 @@
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/xe_drm.h>
+#include <linux/ascii85.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/swap.h>
+#include <generated/xe_wa_oob.h>
+
#include "xe_assert.h"
#include "xe_bo.h"
#include "xe_device.h"
@@ -34,7 +37,6 @@
#include "xe_res_cursor.h"
#include "xe_sync.h"
#include "xe_trace.h"
-#include "generated/xe_wa_oob.h"
#include "xe_wa.h"
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
@@ -792,6 +794,7 @@ static void xe_vma_free(struct xe_vma *vma)
#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
#define VMA_CREATE_FLAG_IS_NULL BIT(1)
+#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
@@ -804,6 +807,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
u8 id;
bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
+ bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -838,6 +842,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.va.range = end - start + 1;
if (read_only)
vma->gpuva.flags |= XE_VMA_READ_ONLY;
+ if (dumpable)
+ vma->gpuva.flags |= XE_VMA_DUMPABLE;
for_each_tile(tile, vm->xe, id)
vma->tile_mask |= 0x1 << id;
@@ -897,6 +903,11 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
struct xe_device *xe = vm->xe;
bool read_only = xe_vma_read_only(vma);
+ if (vma->ufence) {
+ xe_sync_ufence_put(vma->ufence);
+ vma->ufence = NULL;
+ }
+
if (xe_vma_is_userptr(vma)) {
struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
@@ -1051,7 +1062,9 @@ static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
xe_assert(vm->xe, xe_vma_vm(vma) == vm);
lockdep_assert_held(&vm->lock);
+ mutex_lock(&vm->snap_mutex);
err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
+ mutex_unlock(&vm->snap_mutex);
XE_WARN_ON(err); /* Shouldn't be possible */
return err;
@@ -1062,7 +1075,9 @@ static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
xe_assert(vm->xe, xe_vma_vm(vma) == vm);
lockdep_assert_held(&vm->lock);
+ mutex_lock(&vm->snap_mutex);
drm_gpuva_remove(&vma->gpuva);
+ mutex_unlock(&vm->snap_mutex);
if (vm->usm.last_fault_vma == vma)
vm->usm.last_fault_vma = NULL;
}
@@ -1081,7 +1096,7 @@ static struct drm_gpuva_op *xe_vm_op_alloc(void)
static void xe_vm_free(struct drm_gpuvm *gpuvm);
-static struct drm_gpuvm_ops gpuvm_ops = {
+static const struct drm_gpuvm_ops gpuvm_ops = {
.op_alloc = xe_vm_op_alloc,
.vm_bo_validate = xe_gpuvm_validate,
.vm_free = xe_vm_free,
@@ -1289,6 +1304,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->flags = flags;
init_rwsem(&vm->lock);
+ mutex_init(&vm->snap_mutex);
INIT_LIST_HEAD(&vm->rebind_list);
@@ -1414,6 +1430,7 @@ err_close:
return ERR_PTR(err);
err_no_resv:
+ mutex_destroy(&vm->snap_mutex);
for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]);
kfree(vm);
@@ -1538,6 +1555,8 @@ static void vm_destroy_work_func(struct work_struct *w)
/* xe_vm_close_and_put was not called? */
xe_assert(xe, !vm->size);
+ mutex_destroy(&vm->snap_mutex);
+
if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
xe_device_mem_access_put(xe);
@@ -1608,6 +1627,16 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
trace_xe_vma_unbind(vma);
+ if (vma->ufence) {
+ struct xe_user_fence * const f = vma->ufence;
+
+ if (!xe_sync_ufence_get_status(f))
+ return ERR_PTR(-EBUSY);
+
+ vma->ufence = NULL;
+ xe_sync_ufence_put(f);
+ }
+
if (number_tiles > 1) {
fences = kmalloc_array(number_tiles, sizeof(*fences),
GFP_KERNEL);
@@ -1741,6 +1770,21 @@ err_fences:
return ERR_PTR(err);
}
+static struct xe_user_fence *
+find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_syncs; i++) {
+ struct xe_sync_entry *e = &syncs[i];
+
+ if (xe_sync_is_ufence(e))
+ return xe_sync_ufence_get(e);
+ }
+
+ return NULL;
+}
+
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
u32 num_syncs, bool immediate, bool first_op,
@@ -1748,9 +1792,16 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
{
struct dma_fence *fence;
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
+ struct xe_user_fence *ufence;
xe_vm_assert_held(vm);
+ ufence = find_ufence_get(syncs, num_syncs);
+ if (vma->ufence && ufence)
+ xe_sync_ufence_put(vma->ufence);
+
+ vma->ufence = ufence ?: vma->ufence;
+
if (immediate) {
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
last_op);
@@ -2117,11 +2168,8 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
if (__op->op == DRM_GPUVA_OP_MAP) {
- op->map.immediate =
- flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
- op->map.read_only =
- flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
op->prefetch.region = prefetch_region;
@@ -2190,15 +2238,17 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma)
{
if (vma->gpuva.flags & XE_VMA_PTE_1G)
return SZ_1G;
- else if (vma->gpuva.flags & XE_VMA_PTE_2M)
+ else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
return SZ_2M;
+ else if (vma->gpuva.flags & XE_VMA_PTE_64K)
+ return SZ_64K;
else if (vma->gpuva.flags & XE_VMA_PTE_4K)
return SZ_4K;
return SZ_1G; /* Uninitialized, used max size */
}
-static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
+static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
{
switch (size) {
case SZ_1G:
@@ -2207,9 +2257,13 @@ static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
case SZ_2M:
vma->gpuva.flags |= XE_VMA_PTE_2M;
break;
+ case SZ_64K:
+ vma->gpuva.flags |= XE_VMA_PTE_64K;
+ break;
+ case SZ_4K:
+ vma->gpuva.flags |= XE_VMA_PTE_4K;
+ break;
}
-
- return SZ_4K;
}
static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
@@ -2281,6 +2335,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs,
struct list_head *ops_list, bool last)
{
+ struct xe_device *xe = vm->xe;
struct xe_vma_op *last_op = NULL;
struct drm_gpuva_op *__op;
int err = 0;
@@ -2307,10 +2362,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
- flags |= op->map.read_only ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->map.dumpable ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
vma = new_vma(vm, &op->base.map, op->map.pat_index,
flags);
@@ -2335,6 +2390,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
flags |= op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE ?
VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_DUMPABLE ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
vma = new_vma(vm, op->base.remap.prev,
old->pat_index, flags);
@@ -2356,6 +2414,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
xe_vma_end(vma) -
xe_vma_start(old);
op->remap.start = xe_vma_end(vma);
+ vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
+ (ULL)op->remap.start,
+ (ULL)op->remap.range);
}
}
@@ -2366,6 +2427,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
flags |= op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE ?
VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_DUMPABLE ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
vma = new_vma(vm, op->base.remap.next,
old->pat_index, flags);
@@ -2386,6 +2450,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
op->remap.range -=
xe_vma_end(old) -
xe_vma_start(vma);
+ vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
+ (ULL)op->remap.start,
+ (ULL)op->remap.range);
}
}
break;
@@ -2439,7 +2506,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
op->syncs, op->num_syncs,
- op->map.immediate || !xe_vm_in_fault_mode(vm),
+ !xe_vm_in_fault_mode(vm),
op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST);
break;
@@ -2714,14 +2781,11 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
return 0;
}
-#define SUPPORTED_FLAGS \
- (DRM_XE_VM_BIND_FLAG_READONLY | \
- DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL)
+#define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \
+ DRM_XE_VM_BIND_FLAG_DUMPABLE)
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
-#define MAX_BINDS 512 /* FIXME: Picking random upper limit */
-
static int vm_bind_ioctl_check_args(struct xe_device *xe,
struct drm_xe_vm_bind *args,
struct drm_xe_vm_bind_op **bind_ops)
@@ -2733,16 +2797,16 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->extensions) ||
- XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
+ if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
if (args->num_binds > 1) {
u64 __user *bind_user =
u64_to_user_ptr(args->vector_of_binds);
- *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
- args->num_binds, GFP_KERNEL);
+ *bind_ops = kvmalloc_array(args->num_binds,
+ sizeof(struct drm_xe_vm_bind_op),
+ GFP_KERNEL | __GFP_ACCOUNT);
if (!*bind_ops)
return -ENOMEM;
@@ -2832,7 +2896,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
free_bind_ops:
if (args->num_binds > 1)
- kfree(*bind_ops);
+ kvfree(*bind_ops);
return err;
}
@@ -2920,13 +2984,15 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
if (args->num_binds) {
- bos = kcalloc(args->num_binds, sizeof(*bos), GFP_KERNEL);
+ bos = kvcalloc(args->num_binds, sizeof(*bos),
+ GFP_KERNEL | __GFP_ACCOUNT);
if (!bos) {
err = -ENOMEM;
goto release_vm_lock;
}
- ops = kcalloc(args->num_binds, sizeof(*ops), GFP_KERNEL);
+ ops = kvcalloc(args->num_binds, sizeof(*ops),
+ GFP_KERNEL | __GFP_ACCOUNT);
if (!ops) {
err = -ENOMEM;
goto release_vm_lock;
@@ -3067,10 +3133,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; bos && i < args->num_binds; ++i)
xe_bo_put(bos[i]);
- kfree(bos);
- kfree(ops);
+ kvfree(bos);
+ kvfree(ops);
if (args->num_binds > 1)
- kfree(bind_ops);
+ kvfree(bind_ops);
return err;
@@ -3094,10 +3160,10 @@ put_exec_queue:
if (q)
xe_exec_queue_put(q);
free_objs:
- kfree(bos);
- kfree(ops);
+ kvfree(bos);
+ kvfree(ops);
if (args->num_binds > 1)
- kfree(bind_ops);
+ kvfree(bind_ops);
return err;
}
@@ -3241,3 +3307,168 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
return 0;
}
+
+struct xe_vm_snapshot {
+ unsigned long num_snaps;
+ struct {
+ u64 ofs, bo_ofs;
+ unsigned long len;
+ struct xe_bo *bo;
+ void *data;
+ struct mm_struct *mm;
+ } snap[];
+};
+
+struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
+{
+ unsigned long num_snaps = 0, i;
+ struct xe_vm_snapshot *snap = NULL;
+ struct drm_gpuva *gpuva;
+
+ if (!vm)
+ return NULL;
+
+ mutex_lock(&vm->snap_mutex);
+ drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
+ if (gpuva->flags & XE_VMA_DUMPABLE)
+ num_snaps++;
+ }
+
+ if (num_snaps)
+ snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
+ if (!snap)
+ goto out_unlock;
+
+ snap->num_snaps = num_snaps;
+ i = 0;
+ drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+ struct xe_bo *bo = vma->gpuva.gem.obj ?
+ gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
+
+ if (!(gpuva->flags & XE_VMA_DUMPABLE))
+ continue;
+
+ snap->snap[i].ofs = xe_vma_start(vma);
+ snap->snap[i].len = xe_vma_size(vma);
+ if (bo) {
+ snap->snap[i].bo = xe_bo_get(bo);
+ snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
+ } else if (xe_vma_is_userptr(vma)) {
+ struct mm_struct *mm =
+ to_userptr_vma(vma)->userptr.notifier.mm;
+
+ if (mmget_not_zero(mm))
+ snap->snap[i].mm = mm;
+ else
+ snap->snap[i].data = ERR_PTR(-EFAULT);
+
+ snap->snap[i].bo_ofs = xe_vma_userptr(vma);
+ } else {
+ snap->snap[i].data = ERR_PTR(-ENOENT);
+ }
+ i++;
+ }
+
+out_unlock:
+ mutex_unlock(&vm->snap_mutex);
+ return snap;
+}
+
+void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
+{
+ for (int i = 0; i < snap->num_snaps; i++) {
+ struct xe_bo *bo = snap->snap[i].bo;
+ struct iosys_map src;
+ int err;
+
+ if (IS_ERR(snap->snap[i].data))
+ continue;
+
+ snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
+ if (!snap->snap[i].data) {
+ snap->snap[i].data = ERR_PTR(-ENOMEM);
+ goto cleanup_bo;
+ }
+
+ if (bo) {
+ dma_resv_lock(bo->ttm.base.resv, NULL);
+ err = ttm_bo_vmap(&bo->ttm, &src);
+ if (!err) {
+ xe_map_memcpy_from(xe_bo_device(bo),
+ snap->snap[i].data,
+ &src, snap->snap[i].bo_ofs,
+ snap->snap[i].len);
+ ttm_bo_vunmap(&bo->ttm, &src);
+ }
+ dma_resv_unlock(bo->ttm.base.resv);
+ } else {
+ void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
+
+ kthread_use_mm(snap->snap[i].mm);
+ if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
+ err = 0;
+ else
+ err = -EFAULT;
+ kthread_unuse_mm(snap->snap[i].mm);
+
+ mmput(snap->snap[i].mm);
+ snap->snap[i].mm = NULL;
+ }
+
+ if (err) {
+ kvfree(snap->snap[i].data);
+ snap->snap[i].data = ERR_PTR(err);
+ }
+
+cleanup_bo:
+ xe_bo_put(bo);
+ snap->snap[i].bo = NULL;
+ }
+}
+
+void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
+{
+ unsigned long i, j;
+
+ for (i = 0; i < snap->num_snaps; i++) {
+ if (IS_ERR(snap->snap[i].data))
+ goto uncaptured;
+
+ drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
+ drm_printf(p, "[%llx].data: ",
+ snap->snap[i].ofs);
+
+ for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
+ u32 *val = snap->snap[i].data + j;
+ char dumped[ASCII85_BUFSZ];
+
+ drm_puts(p, ascii85_encode(*val, dumped));
+ }
+
+ drm_puts(p, "\n");
+ continue;
+
+uncaptured:
+ drm_printf(p, "Unable to capture range [%llx-%llx]: %li\n",
+ snap->snap[i].ofs, snap->snap[i].ofs + snap->snap[i].len - 1,
+ PTR_ERR(snap->snap[i].data));
+ }
+}
+
+void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
+{
+ unsigned long i;
+
+ if (!snap)
+ return;
+
+ for (i = 0; i < snap->num_snaps; i++) {
+ if (!IS_ERR(snap->snap[i].data))
+ kvfree(snap->snap[i].data);
+ xe_bo_put(snap->snap[i].bo);
+ if (snap->snap[i].mm)
+ mmput(snap->snap[i].mm);
+ }
+ kvfree(snap);
+}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 9654a0612fc2..6df1f1c7f85d 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -211,8 +211,6 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
int xe_vm_invalidate_vma(struct xe_vma *vma);
-extern struct ttm_device_funcs xe_ttm_funcs;
-
static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
{
xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
@@ -273,3 +271,8 @@ static inline void vm_dbg(const struct drm_device *dev,
{ /* noop */ }
#endif
#endif
+
+struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
+void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
+void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
+void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 5ac9c5bebabc..79b5cab57711 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -19,6 +19,7 @@
struct xe_bo;
struct xe_sync_entry;
+struct xe_user_fence;
struct xe_vm;
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
@@ -29,6 +30,9 @@ struct xe_vm;
#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
+#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
+#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
+#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 10)
/** struct xe_userptr - User pointer */
struct xe_userptr {
@@ -102,6 +106,12 @@ struct xe_vma {
* @pat_index: The pat index to use when encoding the PTEs for this vma.
*/
u16 pat_index;
+
+ /**
+ * @ufence: The user fence that was provided with MAP.
+ * Needs to be signalled before UNMAP can be processed.
+ */
+ struct xe_user_fence *ufence;
};
/**
@@ -157,6 +167,11 @@ struct xe_vm {
* VM
*/
struct rw_semaphore lock;
+ /**
+ * @snap_mutex: Mutex used to guard insertions and removals from gpuva,
+ * so we can take a snapshot safely from devcoredump.
+ */
+ struct mutex snap_mutex;
/**
* @rebind_list: list of VMAs that need rebinding. Protected by the
@@ -181,30 +196,6 @@ struct xe_vm {
*/
struct xe_range_fence_tree rftree[XE_MAX_TILES_PER_DEVICE];
- /** @async_ops: async VM operations (bind / unbinds) */
- struct {
- /** @list: list of pending async VM ops */
- struct list_head pending;
- /** @work: worker to execute async VM ops */
- struct work_struct work;
- /** @lock: protects list of pending async VM ops and fences */
- spinlock_t lock;
- /** @fence: fence state */
- struct {
- /** @context: context of async fence */
- u64 context;
- /** @seqno: seqno of async fence */
- u32 seqno;
- } fence;
- /** @error: error state for async VM ops */
- int error;
- /**
- * @munmap_rebind_inflight: an munmap style VM bind is in the
- * middle of a set of ops which requires a rebind at the end.
- */
- bool munmap_rebind_inflight;
- } async_ops;
-
const struct xe_pt_ops *pt_ops;
/** @userptr: user pointer state */
@@ -286,12 +277,10 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
- /** @immediate: Immediate bind */
- bool immediate;
- /** @read_only: Read only */
- bool read_only;
/** @is_null: is NULL binding */
bool is_null;
+ /** @dumpable: whether BO is dumped on GPU hang */
+ bool dumpable;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;
};
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.c b/drivers/gpu/drm/xe/xe_vram_freq.c
new file mode 100644
index 000000000000..079cc283a186
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vram_freq.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#include <linux/sysfs.h>
+#include <drm/drm_managed.h>
+
+#include "xe_gt_types.h"
+#include "xe_pcode.h"
+#include "xe_pcode_api.h"
+#include "xe_tile.h"
+#include "xe_tile_sysfs.h"
+#include "xe_vram_freq.h"
+
+/**
+ * DOC: Xe VRAM freq
+ *
+ * Provides sysfs entries for vram frequency in tile
+ *
+ * device/tile#/memory/freq0/max_freq - This is maximum frequency. This value is read-only as it
+ * is the fixed fuse point P0. It is not the system
+ * configuration.
+ * device/tile#/memory/freq0/min_freq - This is minimum frequency. This value is read-only as it
+ * is the fixed fuse point PN. It is not the system
+ * configuration.
+ */
+
+static struct xe_tile *dev_to_tile(struct device *dev)
+{
+ return kobj_to_tile(dev->kobj.parent);
+}
+
+static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct xe_tile *tile = dev_to_tile(dev);
+ struct xe_gt *gt = tile->primary_gt;
+ u32 val, mbox;
+ int err;
+
+ mbox = REG_FIELD_PREP(PCODE_MB_COMMAND, PCODE_FREQUENCY_CONFIG)
+ | REG_FIELD_PREP(PCODE_MB_PARAM1, PCODE_MBOX_FC_SC_READ_FUSED_P0)
+ | REG_FIELD_PREP(PCODE_MB_PARAM2, PCODE_MBOX_DOMAIN_HBM);
+
+ err = xe_pcode_read(gt, mbox, &val, NULL);
+ if (err)
+ return err;
+
+ /* data_out - Fused P0 for domain ID in units of 50 MHz */
+ val *= 50;
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+static DEVICE_ATTR_RO(max_freq);
+
+static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct xe_tile *tile = dev_to_tile(dev);
+ struct xe_gt *gt = tile->primary_gt;
+ u32 val, mbox;
+ int err;
+
+ mbox = REG_FIELD_PREP(PCODE_MB_COMMAND, PCODE_FREQUENCY_CONFIG)
+ | REG_FIELD_PREP(PCODE_MB_PARAM1, PCODE_MBOX_FC_SC_READ_FUSED_PN)
+ | REG_FIELD_PREP(PCODE_MB_PARAM2, PCODE_MBOX_DOMAIN_HBM);
+
+ err = xe_pcode_read(gt, mbox, &val, NULL);
+ if (err)
+ return err;
+
+ /* data_out - Fused Pn for domain ID in units of 50 MHz */
+ val *= 50;
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+static DEVICE_ATTR_RO(min_freq);
+
+static struct attribute *freq_attrs[] = {
+ &dev_attr_max_freq.attr,
+ &dev_attr_min_freq.attr,
+ NULL
+};
+
+static const struct attribute_group freq_group_attrs = {
+ .name = "freq0",
+ .attrs = freq_attrs,
+};
+
+static void vram_freq_sysfs_fini(struct drm_device *drm, void *arg)
+{
+ struct kobject *kobj = arg;
+
+ sysfs_remove_group(kobj, &freq_group_attrs);
+ kobject_put(kobj);
+}
+
+/**
+ * xe_vram_freq_sysfs_init - Initialize vram frequency sysfs component
+ * @tile: Xe Tile object
+ *
+ * It needs to be initialized after the main tile component is ready
+ */
+void xe_vram_freq_sysfs_init(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct kobject *kobj;
+ int err;
+
+ if (xe->info.platform != XE_PVC)
+ return;
+
+ kobj = kobject_create_and_add("memory", tile->sysfs);
+ if (!kobj)
+ drm_warn(&xe->drm, "failed to add memory directory, err: %d\n", -ENOMEM);
+
+ err = sysfs_create_group(kobj, &freq_group_attrs);
+ if (err) {
+ kobject_put(kobj);
+ drm_warn(&xe->drm, "failed to register vram freq sysfs, err: %d\n", err);
+ return;
+ }
+
+ err = drmm_add_action_or_reset(&xe->drm, vram_freq_sysfs_fini, kobj);
+ if (err)
+ drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
+ __func__, err);
+}
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.h b/drivers/gpu/drm/xe/xe_vram_freq.h
new file mode 100644
index 000000000000..cbe8c12fbd64
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vram_freq.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_VRAM_FREQ_H_
+#define _XE_VRAM_FREQ_H_
+
+struct xe_tile;
+
+void xe_vram_freq_sysfs_init(struct xe_tile *tile);
+
+#endif /* _XE_VRAM_FREQ_H_ */
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 5f61dd87c586..a0264eedd443 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -9,7 +9,8 @@
#include <kunit/visibility.h>
#include <linux/compiler_types.h>
-#include "generated/xe_wa_oob.h"
+#include <generated/xe_wa_oob.h>
+
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
@@ -125,13 +126,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
/* DG2 */
- { XE_RTP_NAME("16010515920"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10),
- GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(VIDEO_DECODE)),
- XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F18(0), ALNUNIT_CLKGATE_DIS)),
- XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
- },
{ XE_RTP_NAME("22010523718"),
XE_RTP_RULES(SUBPLATFORM(DG2, G10)),
XE_RTP_ACTIONS(SET(UNSLICE_UNIT_LEVEL_CLKGATE, CG3DDISCFEG_CLKGATE_DIS))
@@ -140,61 +134,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_RULES(SUBPLATFORM(DG2, G10)),
XE_RTP_ACTIONS(SET(SUBSLICE_UNIT_LEVEL_CLKGATE, DSS_ROUTER_CLKGATE_DIS))
},
- { XE_RTP_NAME("14012362059"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB))
- },
- { XE_RTP_NAME("14012362059"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB))
- },
- { XE_RTP_NAME("14010948348"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS))
- },
- { XE_RTP_NAME("14011037102"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(UNSLCGCTL9444, LTCDD_CLKGATE_DIS))
- },
- { XE_RTP_NAME("14011371254"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(XEHP_SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS))
- },
- { XE_RTP_NAME("14011431319"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(UNSLCGCTL9440,
- GAMTLBOACS_CLKGATE_DIS |
- GAMTLBVDBOX7_CLKGATE_DIS | GAMTLBVDBOX6_CLKGATE_DIS |
- GAMTLBVDBOX5_CLKGATE_DIS | GAMTLBVDBOX4_CLKGATE_DIS |
- GAMTLBVDBOX3_CLKGATE_DIS | GAMTLBVDBOX2_CLKGATE_DIS |
- GAMTLBVDBOX1_CLKGATE_DIS | GAMTLBVDBOX0_CLKGATE_DIS |
- GAMTLBKCR_CLKGATE_DIS | GAMTLBGUC_CLKGATE_DIS |
- GAMTLBBLT_CLKGATE_DIS),
- SET(UNSLCGCTL9444,
- GAMTLBGFXA0_CLKGATE_DIS | GAMTLBGFXA1_CLKGATE_DIS |
- GAMTLBCOMPA0_CLKGATE_DIS | GAMTLBCOMPA1_CLKGATE_DIS |
- GAMTLBCOMPB0_CLKGATE_DIS | GAMTLBCOMPB1_CLKGATE_DIS |
- GAMTLBCOMPC0_CLKGATE_DIS | GAMTLBCOMPC1_CLKGATE_DIS |
- GAMTLBCOMPD0_CLKGATE_DIS | GAMTLBCOMPD1_CLKGATE_DIS |
- GAMTLBMERT_CLKGATE_DIS |
- GAMTLBVEBOX3_CLKGATE_DIS | GAMTLBVEBOX2_CLKGATE_DIS |
- GAMTLBVEBOX1_CLKGATE_DIS | GAMTLBVEBOX0_CLKGATE_DIS))
- },
- { XE_RTP_NAME("14010569222"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(UNSLICE_UNIT_LEVEL_CLKGATE, GAMEDIA_CLKGATE_DIS))
- },
- { XE_RTP_NAME("14011028019"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(SSMCGCTL9530, RTFUNIT_CLKGATE_DIS))
- },
- { XE_RTP_NAME("14010680813"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(XEHP_GAMSTLB_CTRL,
- CONTROL_BLOCK_CLKGATE_DIS |
- EGRESS_BLOCK_CLKGATE_DIS |
- TAG_BLOCK_CLKGATE_DIS))
- },
{ XE_RTP_NAME("14014830051"),
XE_RTP_RULES(PLATFORM(DG2)),
XE_RTP_ACTIONS(CLR(SARB_CHICKEN1, COMP_CKN_IN))
@@ -212,10 +151,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
INVALIDATION_BROADCAST_MODE_DIS |
GLOBAL_INVALIDATION_MODE))
},
- { XE_RTP_NAME("14010648519"),
- XE_RTP_RULES(PLATFORM(DG2)),
- XE_RTP_ACTIONS(SET(XEHP_L3NODEARBCFG, XEHP_LNESPARE))
- },
/* PVC */
@@ -377,13 +312,6 @@ static const struct xe_rtp_entry_sr engine_was[] = {
POLYGON_TRIFAN_LINELOOP_DISABLE))
},
{ XE_RTP_NAME("22012826095, 22013059131"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0),
- FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(FIELD_SET(LSC_CHICKEN_BIT_0_UDW,
- MAXREQS_PER_BANK,
- REG_FIELD_PREP(MAXREQS_PER_BANK, 2)))
- },
- { XE_RTP_NAME("22012826095, 22013059131"),
XE_RTP_RULES(SUBPLATFORM(DG2, G11),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(FIELD_SET(LSC_CHICKEN_BIT_0_UDW,
@@ -391,27 +319,10 @@ static const struct xe_rtp_entry_sr engine_was[] = {
REG_FIELD_PREP(MAXREQS_PER_BANK, 2)))
},
{ XE_RTP_NAME("22013059131"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0),
- FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, FORCE_1_SUB_MESSAGE_PER_FRAGMENT))
- },
- { XE_RTP_NAME("22013059131"),
XE_RTP_RULES(SUBPLATFORM(DG2, G11),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, FORCE_1_SUB_MESSAGE_PER_FRAGMENT))
},
- { XE_RTP_NAME("14010918519"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0),
- FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW,
- FORCE_SLM_FENCE_SCOPE_TO_TILE |
- FORCE_UGM_FENCE_SCOPE_TO_TILE,
- /*
- * Ignore read back as it always returns 0 in these
- * steps
- */
- .read_mask = 0))
- },
{ XE_RTP_NAME("14015227452"),
XE_RTP_RULES(PLATFORM(DG2),
FUNC(xe_rtp_match_first_render_or_compute)),
@@ -428,22 +339,12 @@ static const struct xe_rtp_entry_sr engine_was[] = {
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3))
},
- { XE_RTP_NAME("16011620976, 22015475538"),
+ { XE_RTP_NAME("22015475538"),
XE_RTP_RULES(PLATFORM(DG2),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8))
},
{ XE_RTP_NAME("22012654132"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0),
- FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(CACHE_MODE_SS, ENABLE_PREFETCH_INTO_IC,
- /*
- * Register can't be read back for verification on
- * DG2 due to Wa_14012342262
- */
- .read_mask = 0))
- },
- { XE_RTP_NAME("22012654132"),
XE_RTP_RULES(SUBPLATFORM(DG2, G11),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(CACHE_MODE_SS, ENABLE_PREFETCH_INTO_IC,
@@ -461,68 +362,11 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN2, DISABLE_READ_SUPPRESSION))
},
- { XE_RTP_NAME("14013392000"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN2, ENABLE_LARGE_GRF_MODE))
- },
- { XE_RTP_NAME("14012419201"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN4,
- DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX))
- },
- { XE_RTP_NAME("14012419201"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN4,
- DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX))
- },
- { XE_RTP_NAME("1308578152"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0),
- ENGINE_CLASS(RENDER),
- FUNC(xe_rtp_match_first_gslice_fused_off)),
- XE_RTP_ACTIONS(CLR(CS_DEBUG_MODE1(RENDER_RING_BASE),
- REPLAY_MODE_GRANULARITY))
- },
{ XE_RTP_NAME("22010960976, 14013347512"),
XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(CLR(XEHP_HDC_CHICKEN0,
LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK))
},
- { XE_RTP_NAME("1608949956, 14010198302"),
- XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN,
- MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE))
- },
- { XE_RTP_NAME("22010430635"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN4,
- DISABLE_GRF_CLEAR))
- },
- { XE_RTP_NAME("14013202645"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY))
- },
- { XE_RTP_NAME("14013202645"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY))
- },
- { XE_RTP_NAME("22012532006"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7,
- DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA))
- },
- { XE_RTP_NAME("22012532006"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0),
- ENGINE_CLASS(RENDER)),
- XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7,
- DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA))
- },
{ XE_RTP_NAME("14015150844"),
XE_RTP_RULES(PLATFORM(DG2), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(XEHP_HDC_CHICKEN0, DIS_ATOMIC_CHAINING_TYPED_WRITES,
@@ -612,7 +456,10 @@ static const struct xe_rtp_entry_sr engine_was[] = {
PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
-
+ { XE_RTP_NAME("16018610683"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, SLM_WMTP_RESTORE))
+ },
{}
};
@@ -652,21 +499,6 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
/* DG2 */
- { XE_RTP_NAME("16011186671"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(CLR(VFLSKPD, DIS_MULT_MISS_RD_SQUASH),
- SET(VFLSKPD, DIS_OVER_FETCH_CACHE))
- },
- { XE_RTP_NAME("14010469329"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(XEHP_COMMON_SLICE_CHICKEN3,
- XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE))
- },
- { XE_RTP_NAME("14010698770, 22010613112, 22010465075"),
- XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
- XE_RTP_ACTIONS(SET(XEHP_COMMON_SLICE_CHICKEN3,
- DISABLE_CPS_AWARE_COLOR_PIPE))
- },
{ XE_RTP_NAME("16013271637"),
XE_RTP_RULES(PLATFORM(DG2)),
XE_RTP_ACTIONS(SET(XEHP_SLICE_COMMON_ECO_CHICKEN1,
@@ -708,6 +540,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271)),
XE_RTP_ACTIONS(SET(CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE))
},
+ { XE_RTP_NAME("14019877138"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT))
+ },
/* Xe2_LPG */
@@ -739,6 +575,11 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD))
},
+ { XE_RTP_NAME("16020183090"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(INSTPM(RENDER_RING_BASE), ENABLE_SEMAPHORE_POLL_BIT))
+ },
{}
};
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 727bdc429212..b138cbd51bdb 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -1,13 +1,8 @@
22012773006 GRAPHICS_VERSION_RANGE(1200, 1250)
-16011759253 SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)
14014475959 GRAPHICS_VERSION_RANGE(1270, 1271), GRAPHICS_STEP(A0, B0)
PLATFORM(DG2)
22011391025 PLATFORM(DG2)
-14012197797 PLATFORM(DG2), GRAPHICS_STEP(A0, B0)
-16011777198 SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0)
- SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0)
-22012727170 SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0)
- SUBPLATFORM(DG2, G11)
+22012727170 SUBPLATFORM(DG2, G11)
22012727685 SUBPLATFORM(DG2, G11)
16015675438 PLATFORM(PVC)
SUBPLATFORM(DG2, G10)
@@ -22,3 +17,8 @@
14019821291 MEDIA_VERSION_RANGE(1300, 2000)
14015076503 MEDIA_VERSION(1300)
16020292621 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)
+14018913170 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)
+ MEDIA_VERSION(2000), GRAPHICS_STEP(A0, A1)
+ GRAPHICS_VERSION_RANGE(1270, 1274)
+ MEDIA_VERSION(1300)
+ PLATFORM(DG2)
diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c
index a75eeba7bfe5..f69721339201 100644
--- a/drivers/gpu/drm/xe/xe_wait_user_fence.c
+++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c
@@ -148,7 +148,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
if (q) {
if (q->ops->reset_status(q)) {
- drm_info(&xe->drm, "exec gueue reset detected\n");
+ drm_info(&xe->drm, "exec queue reset detected\n");
err = -EIO;
break;
}
diff --git a/drivers/gpu/drm/xe/xe_wopcm_types.h b/drivers/gpu/drm/xe/xe_wopcm_types.h
index 486d850c4084..99d34837c408 100644
--- a/drivers/gpu/drm/xe/xe_wopcm_types.h
+++ b/drivers/gpu/drm/xe/xe_wopcm_types.h
@@ -16,9 +16,9 @@ struct xe_wopcm {
u32 size;
/** @guc: GuC WOPCM Region info */
struct {
- /** @base: GuC WOPCM base which is offset from WOPCM base */
+ /** @guc.base: GuC WOPCM base which is offset from WOPCM base */
u32 base;
- /** @size: Size of the GuC WOPCM region */
+ /** @guc.size: Size of the GuC WOPCM region */
u32 size;
} guc;
};
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index 407bc07cec69..8a39b3accce5 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -1166,7 +1166,7 @@ void zynqmp_disp_enable(struct zynqmp_disp *disp)
/* Choose clock source based on the DT clock handle. */
zynqmp_disp_avbuf_set_clocks_sources(disp, disp->dpsub->vid_clk_from_ps,
disp->dpsub->aud_clk_from_ps,
- true);
+ disp->dpsub->vid_clk_from_ps);
zynqmp_disp_avbuf_enable_channels(disp);
zynqmp_disp_avbuf_enable_audio(disp);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index a0606fab0e22..1846c4971fd8 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1560,12 +1560,12 @@ disconnected:
return connector_status_disconnected;
}
-static struct edid *zynqmp_dp_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
+static const struct drm_edid *zynqmp_dp_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
- return drm_get_edid(connector, &dp->aux.ddc);
+ return drm_edid_read_ddc(connector, &dp->aux.ddc);
}
static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = {
@@ -1579,7 +1579,7 @@ static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_check = zynqmp_dp_bridge_atomic_check,
.detect = zynqmp_dp_bridge_detect,
- .get_edid = zynqmp_dp_bridge_get_edid,
+ .edid_read = zynqmp_dp_bridge_edid_read,
};
/* -----------------------------------------------------------------------------
@@ -1624,8 +1624,17 @@ static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
u32 status, mask;
status = zynqmp_dp_read(dp, ZYNQMP_DP_INT_STATUS);
+ /* clear status register as soon as we read it */
+ zynqmp_dp_write(dp, ZYNQMP_DP_INT_STATUS, status);
mask = zynqmp_dp_read(dp, ZYNQMP_DP_INT_MASK);
- if (!(status & ~mask))
+
+ /*
+ * Status register may report some events, which corresponding interrupts
+ * have been disabled. Filter out those events against interrupts' mask.
+ */
+ status &= ~mask;
+
+ if (!status)
return IRQ_NONE;
/* dbg for diagnostic, but not much that the driver can do */
@@ -1634,8 +1643,6 @@ static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
if (status & ZYNQMP_DP_INT_CHBUF_OVERFLW_MASK)
dev_dbg_ratelimited(dp->dev, "overflow interrupt\n");
- zynqmp_dp_write(dp, ZYNQMP_DP_INT_STATUS, status);
-
if (status & ZYNQMP_DP_INT_VBLANK_START)
zynqmp_dpsub_drm_handle_vblank(dp->dpsub);
@@ -1721,6 +1728,7 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
bridge->type = DRM_MODE_CONNECTOR_DisplayPort;
+ bridge->of_node = dp->dev->of_node;
dpsub->bridge = bridge;
/*
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 84d042796d2e..783975d1384f 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -365,7 +365,7 @@ static const struct dev_pm_ops host1x_device_pm_ops = {
.restore = pm_generic_restore,
};
-struct bus_type host1x_bus_type = {
+const struct bus_type host1x_bus_type = {
.name = "host1x",
.match = host1x_device_match,
.uevent = host1x_device_uevent,
diff --git a/drivers/gpu/host1x/bus.h b/drivers/gpu/host1x/bus.h
index a4adf9abc3b4..a80ceadfeb34 100644
--- a/drivers/gpu/host1x/bus.h
+++ b/drivers/gpu/host1x/bus.h
@@ -10,7 +10,7 @@
struct bus_type;
struct host1x;
-extern struct bus_type host1x_bus_type;
+extern const struct bus_type host1x_bus_type;
int host1x_register(struct host1x *host1x);
int host1x_unregister(struct host1x *host1x);
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index d1336e438f4f..407ed9b9cf64 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -625,8 +625,7 @@ void host1x_cdma_push_wide(struct host1x_cdma *cdma, u32 op1, u32 op2,
struct host1x_channel *channel = cdma_to_channel(cdma);
struct host1x *host1x = cdma_to_host1x(cdma);
struct push_buffer *pb = &cdma->push_buffer;
- unsigned int space = cdma->slots_free;
- unsigned int needed = 2, extra = 0;
+ unsigned int space, needed = 2, extra = 0;
if (host1x_debug_trace_cmdbuf)
trace_host1x_cdma_push_wide(dev_name(channel->dev), op1, op2,
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 42fd504abbcd..89983d7d73ca 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -169,6 +169,7 @@ static const struct host1x_info host1x06_info = {
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
.reserve_vblank_syncpts = false,
+ .skip_reset_assert = true,
};
static const struct host1x_sid_entry tegra194_sid_table[] = {
@@ -680,13 +681,15 @@ static int __maybe_unused host1x_runtime_suspend(struct device *dev)
host1x_intr_stop(host);
host1x_syncpt_save(host);
- err = reset_control_bulk_assert(host->nresets, host->resets);
- if (err) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- goto resume_host1x;
- }
+ if (!host->info->skip_reset_assert) {
+ err = reset_control_bulk_assert(host->nresets, host->resets);
+ if (err) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ goto resume_host1x;
+ }
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
+ }
clk_disable_unprepare(host->clk);
reset_control_bulk_release(host->nresets, host->resets);
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index c8e302de7625..925a118db23f 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -116,6 +116,12 @@ struct host1x_info {
* the display driver disables VBLANK increments.
*/
bool reserve_vblank_syncpts;
+ /*
+ * On Tegra186, secure world applications may require access to
+ * host1x during suspend/resume. To allow this, we need to leave
+ * host1x not in reset.
+ */
+ bool skip_reset_assert;
};
struct host1x {
diff --git a/drivers/hid/amd-sfh-hid/Kconfig b/drivers/hid/amd-sfh-hid/Kconfig
index af752dd3a340..329de5e12c1a 100644
--- a/drivers/hid/amd-sfh-hid/Kconfig
+++ b/drivers/hid/amd-sfh-hid/Kconfig
@@ -6,6 +6,7 @@ menu "AMD SFH HID Support"
config AMD_SFH_HID
tristate "AMD Sensor Fusion Hub"
depends on HID
+ depends on X86
help
If you say yes to this option, support will be included for the
AMD Sensor Fusion Hub.
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
index a1950bc6e6ce..e5620d7db569 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
@@ -19,6 +19,9 @@
#define AMD_C2P_MSG(regno) (0x10500 + ((regno) * 4))
#define AMD_P2C_MSG(regno) (0x10680 + ((regno) * 4))
+#define AMD_C2P_MSG_V1(regno) (0x10900 + ((regno) * 4))
+#define AMD_P2C_MSG_V1(regno) (0x10500 + ((regno) * 4))
+
#define SENSOR_ENABLED 4
#define SENSOR_DISABLED 5
@@ -53,6 +56,9 @@ struct amd_mp2_dev {
/* mp2 active control status */
u32 mp2_acs;
struct sfh_dev_status dev_en;
+ struct work_struct work;
+ u8 init_done;
+ u8 rver;
};
struct amd_mp2_ops {
@@ -79,4 +85,14 @@ void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata);
int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata);
void amd_sfh_clear_intr(struct amd_mp2_dev *privdata);
int amd_sfh_irq_init(struct amd_mp2_dev *privdata);
+
+static inline u64 amd_get_c2p_val(struct amd_mp2_dev *mp2, u32 idx)
+{
+ return mp2->rver == 1 ? AMD_C2P_MSG_V1(idx) : AMD_C2P_MSG(idx);
+}
+
+static inline u64 amd_get_p2c_val(struct amd_mp2_dev *mp2, u32 idx)
+{
+ return mp2->rver == 1 ? AMD_P2C_MSG_V1(idx) : AMD_P2C_MSG(idx);
+}
#endif
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 2530fa98b568..9e97c26c4482 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/devm-helpers.h>
#include <linux/dma-mapping.h>
#include <linux/dmi.h>
#include <linux/interrupt.h>
@@ -35,15 +36,17 @@ static int sensor_mask_override = -1;
module_param_named(sensor_mask, sensor_mask_override, int, 0444);
MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
+static bool intr_disable = true;
+
static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts)
{
union cmd_response cmd_resp;
- /* Get response with status within a max of 1600 ms timeout */
+ /* Get response with status within a max of 10 seconds timeout */
if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
(cmd_resp.response_v2.response == sensor_sts &&
cmd_resp.response_v2.status == 0 && (sid == 0xff ||
- cmd_resp.response_v2.sensor_id == sid)), 500, 1600000))
+ cmd_resp.response_v2.sensor_id == sid)), 500, 10000000))
return cmd_resp.response_v2.response;
return SENSOR_DISABLED;
@@ -55,7 +58,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR;
- cmd_base.cmd_v2.intr_disable = 1;
+ cmd_base.cmd_v2.intr_disable = intr_disable;
cmd_base.cmd_v2.period = info.period;
cmd_base.cmd_v2.sensor_id = info.sensor_idx;
cmd_base.cmd_v2.length = 16;
@@ -73,7 +76,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR;
- cmd_base.cmd_v2.intr_disable = 1;
+ cmd_base.cmd_v2.intr_disable = intr_disable;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = sensor_idx;
cmd_base.cmd_v2.length = 16;
@@ -87,7 +90,7 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
union sfh_cmd_base cmd_base;
cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS;
- cmd_base.cmd_v2.intr_disable = 1;
+ cmd_base.cmd_v2.intr_disable = intr_disable;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = 0;
@@ -96,9 +99,9 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata)
{
- if (readl(privdata->mmio + AMD_P2C_MSG(4))) {
- writel(0, privdata->mmio + AMD_P2C_MSG(4));
- writel(0xf, privdata->mmio + AMD_P2C_MSG(5));
+ if (readl(privdata->mmio + amd_get_p2c_val(privdata, 4))) {
+ writel(0, privdata->mmio + amd_get_p2c_val(privdata, 4));
+ writel(0xf, privdata->mmio + amd_get_p2c_val(privdata, 5));
}
}
@@ -292,6 +295,26 @@ int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
return 0;
}
+static int mp2_disable_intr(const struct dmi_system_id *id)
+{
+ intr_disable = false;
+ return 0;
+}
+
+static const struct dmi_system_id dmi_sfh_table[] = {
+ {
+ /*
+ * https://bugzilla.kernel.org/show_bug.cgi?id=218104
+ */
+ .callback = mp2_disable_intr,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook x360 435 G7"),
+ },
+ },
+ {}
+};
+
static const struct dmi_system_id dmi_nodevs[] = {
{
/*
@@ -307,6 +330,48 @@ static const struct dmi_system_id dmi_nodevs[] = {
{ }
};
+static void sfh1_1_init_work(struct work_struct *work)
+{
+ struct amd_mp2_dev *mp2 = container_of(work, struct amd_mp2_dev, work);
+ struct pci_dev *pdev = mp2->pdev;
+ int rc;
+
+ rc = mp2->sfh1_1_ops->init(mp2);
+ if (rc) {
+ dev_err(&pdev->dev, "sfh1_1_init failed err %d\n", rc);
+ return;
+ }
+
+ amd_sfh_clear_intr(mp2);
+ mp2->init_done = 1;
+}
+
+static void sfh_init_work(struct work_struct *work)
+{
+ struct amd_mp2_dev *mp2 = container_of(work, struct amd_mp2_dev, work);
+ struct pci_dev *pdev = mp2->pdev;
+ int rc;
+
+ rc = amd_sfh_hid_client_init(mp2);
+ if (rc) {
+ amd_sfh_clear_intr(mp2);
+ dev_err(&pdev->dev, "amd_sfh_hid_client_init failed err %d\n", rc);
+ return;
+ }
+
+ amd_sfh_clear_intr(mp2);
+ mp2->init_done = 1;
+}
+
+static void amd_sfh_remove(struct pci_dev *pdev)
+{
+ struct amd_mp2_dev *mp2 = pci_get_drvdata(pdev);
+
+ flush_work(&mp2->work);
+ if (mp2->init_done)
+ mp2->mp2_ops->remove(mp2);
+}
+
static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct amd_mp2_dev *privdata;
@@ -315,6 +380,8 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
if (dmi_first_match(dmi_nodevs))
return -ENODEV;
+ dmi_check_system(dmi_sfh_table);
+
privdata = devm_kzalloc(&pdev->dev, sizeof(*privdata), GFP_KERNEL);
if (!privdata)
return -ENOMEM;
@@ -343,10 +410,15 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
privdata->sfh1_1_ops = (const struct amd_sfh1_1_ops *)id->driver_data;
if (privdata->sfh1_1_ops) {
- rc = privdata->sfh1_1_ops->init(privdata);
+ if (boot_cpu_data.x86 >= 0x1A)
+ privdata->rver = 1;
+
+ rc = devm_work_autocancel(&pdev->dev, &privdata->work, sfh1_1_init_work);
if (rc)
return rc;
- goto init_done;
+
+ schedule_work(&privdata->work);
+ return 0;
}
mp2_select_ops(privdata);
@@ -357,33 +429,34 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
return rc;
}
- rc = amd_sfh_hid_client_init(privdata);
+ rc = devm_work_autocancel(&pdev->dev, &privdata->work, sfh_init_work);
if (rc) {
amd_sfh_clear_intr(privdata);
- if (rc != -EOPNOTSUPP)
- dev_err(&pdev->dev, "amd_sfh_hid_client_init failed\n");
return rc;
}
-init_done:
- amd_sfh_clear_intr(privdata);
-
- return devm_add_action_or_reset(&pdev->dev, privdata->mp2_ops->remove, privdata);
+ schedule_work(&privdata->work);
+ return 0;
}
static void amd_sfh_shutdown(struct pci_dev *pdev)
{
struct amd_mp2_dev *mp2 = pci_get_drvdata(pdev);
- if (mp2 && mp2->mp2_ops)
- mp2->mp2_ops->stop_all(mp2);
+ if (mp2) {
+ flush_work(&mp2->work);
+ if (mp2->init_done)
+ mp2->mp2_ops->stop_all(mp2);
+ }
}
static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
{
struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
- mp2->mp2_ops->resume(mp2);
+ flush_work(&mp2->work);
+ if (mp2->init_done)
+ mp2->mp2_ops->resume(mp2);
return 0;
}
@@ -392,7 +465,9 @@ static int __maybe_unused amd_mp2_pci_suspend(struct device *dev)
{
struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
- mp2->mp2_ops->suspend(mp2);
+ flush_work(&mp2->work);
+ if (mp2->init_done)
+ mp2->mp2_ops->suspend(mp2);
return 0;
}
@@ -414,6 +489,7 @@ static struct pci_driver amd_mp2_pci_driver = {
.probe = amd_mp2_pci_probe,
.driver.pm = &amd_mp2_pm_ops,
.shutdown = amd_sfh_shutdown,
+ .remove = amd_sfh_remove,
};
module_pci_driver(amd_mp2_pci_driver);
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index 70add75fc506..05e400a4a83e 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -90,10 +90,10 @@ enum mem_use_type {
struct hpd_status {
union {
struct {
- u32 human_presence_report : 4;
- u32 human_presence_actual : 4;
- u32 probablity : 8;
u32 object_distance : 16;
+ u32 probablity : 8;
+ u32 human_presence_actual : 4;
+ u32 human_presence_report : 4;
} shpd;
u32 val;
};
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
index 33fbdde8aff0..c8916afefa62 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
@@ -251,7 +251,7 @@ static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
break;
case HPD_IDX:
get_common_inputs(&hpd_input.common_property, report_id);
- hpdstatus.val = readl(mp2->mmio + AMD_C2P_MSG(4));
+ hpdstatus.val = readl(mp2->mmio + amd_get_c2p_val(mp2, 4));
hpd_input.human_presence = hpdstatus.shpd.presence;
report_size = sizeof(hpd_input);
memcpy(input_report, &hpd_input, sizeof(hpd_input));
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index 9dbe6f4cb294..5b24d5f63701 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -172,7 +172,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
if (rc)
goto cleanup;
- writel(0, privdata->mmio + AMD_P2C_MSG(0));
+ writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0));
mp2_ops->start(privdata, info);
status = amd_sfh_wait_for_response
(privdata, cl_data->sensor_idx[i], ENABLE_SENSOR);
@@ -298,7 +298,7 @@ static void amd_sfh_set_ops(struct amd_mp2_dev *mp2)
int amd_sfh1_1_init(struct amd_mp2_dev *mp2)
{
- u32 phy_base = readl(mp2->mmio + AMD_C2P_MSG(22));
+ u32 phy_base = readl(mp2->mmio + amd_get_c2p_val(mp2, 22));
struct device *dev = &mp2->pdev->dev;
struct sfh_base_info binfo;
int rc;
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
index ae36312bc236..2de2668a0277 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
@@ -20,7 +20,7 @@ static int amd_sfh_wait_response(struct amd_mp2_dev *mp2, u8 sid, u32 cmd_id)
struct sfh_cmd_response cmd_resp;
/* Get response with status within a max of 10000 ms timeout */
- if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
+ if (!readl_poll_timeout(mp2->mmio + amd_get_p2c_val(mp2, 0), cmd_resp.resp,
(cmd_resp.response.response == 0 &&
cmd_resp.response.cmd_id == cmd_id && (sid == 0xff ||
cmd_resp.response.sensor_id == sid)), 500, 10000000))
@@ -39,7 +39,7 @@ static void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor
cmd_base.cmd.sub_cmd_value = 1;
cmd_base.cmd.sensor_id = info.sensor_idx;
- writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
+ writel(cmd_base.ul, privdata->mmio + amd_get_c2p_val(privdata, 0));
}
static void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx)
@@ -52,8 +52,8 @@ static void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx)
cmd_base.cmd.sub_cmd_value = 1;
cmd_base.cmd.sensor_id = sensor_idx;
- writeq(0x0, privdata->mmio + AMD_C2P_MSG(1));
- writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
+ writeq(0x0, privdata->mmio + amd_get_c2p_val(privdata, 1));
+ writel(cmd_base.ul, privdata->mmio + amd_get_c2p_val(privdata, 0));
}
static void amd_stop_all_sensor(struct amd_mp2_dev *privdata)
@@ -66,7 +66,7 @@ static void amd_stop_all_sensor(struct amd_mp2_dev *privdata)
/* 0xf indicates all sensors */
cmd_base.cmd.sensor_id = 0xf;
- writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
+ writel(cmd_base.ul, privdata->mmio + amd_get_c2p_val(privdata, 0));
}
static struct amd_mp2_ops amd_sfh_ops = {
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index 470ae2c29c94..e630caf644e8 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -176,9 +176,9 @@ __bpf_kfunc_end_defs();
* The following set contains all functions we agree BPF programs
* can use.
*/
-BTF_SET8_START(hid_bpf_kfunc_ids)
+BTF_KFUNCS_START(hid_bpf_kfunc_ids)
BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
-BTF_SET8_END(hid_bpf_kfunc_ids)
+BTF_KFUNCS_END(hid_bpf_kfunc_ids)
static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
.owner = THIS_MODULE,
@@ -487,12 +487,12 @@ static const struct btf_kfunc_id_set hid_bpf_fmodret_set = {
};
/* for syscall HID-BPF */
-BTF_SET8_START(hid_bpf_syscall_kfunc_ids)
+BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids)
BTF_ID_FLAGS(func, hid_bpf_attach_prog)
BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
BTF_ID_FLAGS(func, hid_bpf_hw_request)
-BTF_SET8_END(hid_bpf_syscall_kfunc_ids)
+BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids)
static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index b9c7c0ed7bcc..bd022e004356 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -79,7 +79,6 @@ struct apple_non_apple_keyboard {
struct apple_sc_backlight {
struct led_classdev cdev;
struct hid_device *hdev;
- unsigned short backlight_off, backlight_on_min, backlight_on_max;
};
struct apple_sc {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 828a5c022c64..8376fb5e2d0b 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -430,6 +430,7 @@
#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1 0x2BED
#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2 0x2BEE
#define I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG 0x2D02
+#define I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM 0x2F81
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -1146,8 +1147,15 @@
#define USB_DEVICE_ID_SAITEK_X65 0x0b6a
#define USB_VENDOR_ID_SAMSUNG 0x0419
+#define USB_VENDOR_ID_SAMSUNG_ELECTRONICS 0x04e8
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD 0x7021
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_GAMEPAD 0xa000
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_ACTIONMOUSE 0xa004
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_BOOKCOVER 0xa005
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_UNIVERSAL_KBD 0xa006
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_MULTI_HOGP_KBD 0xa064
#define USB_VENDOR_ID_SEMICO 0x1a2c
#define USB_DEVICE_ID_SEMICO_USB_KEYKOARD 0x0023
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index c8b20d44b147..e03d300d2bac 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -411,6 +411,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM),
+ HID_BATTERY_QUIRK_AVOID_QUERY },
{}
};
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 149a3c74346b..f86c1ea83a03 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -54,10 +54,10 @@ struct lenovo_drvdata {
/* 0: Up
* 1: Down (undecided)
* 2: Scrolling
- * 3: Patched firmware, disable workaround
*/
u8 middlebutton_state;
bool fn_lock;
+ bool middleclick_workaround_cptkbd;
};
#define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
@@ -621,6 +621,36 @@ static ssize_t attr_sensitivity_store_cptkbd(struct device *dev,
return count;
}
+static ssize_t attr_middleclick_workaround_show_cptkbd(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ cptkbd_data->middleclick_workaround_cptkbd);
+}
+
+static ssize_t attr_middleclick_workaround_store_cptkbd(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+ int value;
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ cptkbd_data->middleclick_workaround_cptkbd = !!value;
+
+ return count;
+}
+
static struct device_attribute dev_attr_fn_lock =
__ATTR(fn_lock, S_IWUSR | S_IRUGO,
@@ -632,10 +662,16 @@ static struct device_attribute dev_attr_sensitivity_cptkbd =
attr_sensitivity_show_cptkbd,
attr_sensitivity_store_cptkbd);
+static struct device_attribute dev_attr_middleclick_workaround_cptkbd =
+ __ATTR(middleclick_workaround, S_IWUSR | S_IRUGO,
+ attr_middleclick_workaround_show_cptkbd,
+ attr_middleclick_workaround_store_cptkbd);
+
static struct attribute *lenovo_attributes_cptkbd[] = {
&dev_attr_fn_lock.attr,
&dev_attr_sensitivity_cptkbd.attr,
+ &dev_attr_middleclick_workaround_cptkbd.attr,
NULL
};
@@ -686,23 +722,7 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
{
struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
- if (cptkbd_data->middlebutton_state != 3) {
- /* REL_X and REL_Y events during middle button pressed
- * are only possible on patched, bug-free firmware
- * so set middlebutton_state to 3
- * to never apply workaround anymore
- */
- if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD &&
- cptkbd_data->middlebutton_state == 1 &&
- usage->type == EV_REL &&
- (usage->code == REL_X || usage->code == REL_Y)) {
- cptkbd_data->middlebutton_state = 3;
- /* send middle button press which was hold before */
- input_event(field->hidinput->input,
- EV_KEY, BTN_MIDDLE, 1);
- input_sync(field->hidinput->input);
- }
-
+ if (cptkbd_data->middleclick_workaround_cptkbd) {
/* "wheel" scroll events */
if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
usage->code == REL_HWHEEL)) {
@@ -1166,6 +1186,7 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
cptkbd_data->middlebutton_state = 0;
cptkbd_data->fn_lock = true;
cptkbd_data->sensitivity = 0x05;
+ cptkbd_data->middleclick_workaround_cptkbd = true;
lenovo_features_set_cptkbd(hdev);
ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_cptkbd);
diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
index b7e1949f3cf7..109735b89b7a 100644
--- a/drivers/hid/hid-lg3ff.c
+++ b/drivers/hid/hid-lg3ff.c
@@ -41,10 +41,6 @@
* I'm sure these are effects that I don't know enough about them
*/
-struct lg3ff_device {
- struct hid_report *report;
-};
-
static int hid_lg3ff_play(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 3e91e4d6ba6f..04a014cd2a2f 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -130,7 +130,6 @@ struct mt_application {
* > 1 means hybrid (multitouch) protocol
*/
- __s32 dev_time; /* the scan time provided by the device */
unsigned long jiffies; /* the frame's jiffies */
int timestamp; /* the timestamp to be sent */
int prev_scantime; /* scantime reported previously */
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index ccc4032fb2b0..ab5953fc2436 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -667,16 +667,6 @@ struct joycon_ctlr {
* These helpers are most useful early during the HID probe or in conjunction
* with the capability helpers below.
*/
-static inline bool joycon_device_is_left_joycon(struct joycon_ctlr *ctlr)
-{
- return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONL;
-}
-
-static inline bool joycon_device_is_right_joycon(struct joycon_ctlr *ctlr)
-{
- return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONR;
-}
-
static inline bool joycon_device_is_procon(struct joycon_ctlr *ctlr)
{
return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_PROCON;
@@ -764,18 +754,6 @@ static inline bool joycon_type_is_right_nescon(struct joycon_ctlr *ctlr)
return ctlr->ctlr_type == JOYCON_CTLR_TYPE_NESR;
}
-static inline bool joycon_type_has_left_controls(struct joycon_ctlr *ctlr)
-{
- return joycon_type_is_left_joycon(ctlr) ||
- joycon_type_is_procon(ctlr);
-}
-
-static inline bool joycon_type_has_right_controls(struct joycon_ctlr *ctlr)
-{
- return joycon_type_is_right_joycon(ctlr) ||
- joycon_type_is_procon(ctlr);
-}
-
static inline bool joycon_type_is_any_joycon(struct joycon_ctlr *ctlr)
{
return joycon_type_is_left_joycon(ctlr) ||
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index c16d2ba6ea16..a593ed62c969 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -32,13 +32,6 @@
struct pcmidi_snd;
-struct pk_device {
- unsigned long quirks;
-
- struct hid_device *hdev;
- struct pcmidi_snd *pm; /* pcmidi device context */
-};
-
struct pcmidi_sustain {
unsigned long in_use;
struct pcmidi_snd *pm;
@@ -50,7 +43,7 @@ struct pcmidi_sustain {
#define PCMIDI_SUSTAINED_MAX 32
struct pcmidi_snd {
- struct pk_device *pk;
+ struct hid_device *hdev;
unsigned short ifnum;
struct hid_report *pcmidi_report6;
struct input_dev *input_ep82;
@@ -66,9 +59,7 @@ struct pcmidi_snd {
struct snd_card *card;
struct snd_rawmidi *rwmidi;
struct snd_rawmidi_substream *in_substream;
- struct snd_rawmidi_substream *out_substream;
unsigned long in_triggered;
- unsigned long out_active;
};
#define PK_QUIRK_NOGET 0x00010000
@@ -100,11 +91,11 @@ static ssize_t show_channel(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hid_device *hdev = to_hid_device(dev);
- struct pk_device *pk = hid_get_drvdata(hdev);
+ struct pcmidi_snd *pm = hid_get_drvdata(hdev);
- dbg_hid("pcmidi sysfs read channel=%u\n", pk->pm->midi_channel);
+ dbg_hid("pcmidi sysfs read channel=%u\n", pm->midi_channel);
- return sprintf(buf, "%u (min:%u, max:%u)\n", pk->pm->midi_channel,
+ return sprintf(buf, "%u (min:%u, max:%u)\n", pm->midi_channel,
PCMIDI_CHANNEL_MIN, PCMIDI_CHANNEL_MAX);
}
@@ -113,13 +104,13 @@ static ssize_t store_channel(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct hid_device *hdev = to_hid_device(dev);
- struct pk_device *pk = hid_get_drvdata(hdev);
+ struct pcmidi_snd *pm = hid_get_drvdata(hdev);
unsigned channel = 0;
if (sscanf(buf, "%u", &channel) > 0 && channel <= PCMIDI_CHANNEL_MAX) {
dbg_hid("pcmidi sysfs write channel=%u\n", channel);
- pk->pm->midi_channel = channel;
+ pm->midi_channel = channel;
return strlen(buf);
}
return -EINVAL;
@@ -137,11 +128,11 @@ static ssize_t show_sustain(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hid_device *hdev = to_hid_device(dev);
- struct pk_device *pk = hid_get_drvdata(hdev);
+ struct pcmidi_snd *pm = hid_get_drvdata(hdev);
- dbg_hid("pcmidi sysfs read sustain=%u\n", pk->pm->midi_sustain);
+ dbg_hid("pcmidi sysfs read sustain=%u\n", pm->midi_sustain);
- return sprintf(buf, "%u (off:%u, max:%u (ms))\n", pk->pm->midi_sustain,
+ return sprintf(buf, "%u (off:%u, max:%u (ms))\n", pm->midi_sustain,
PCMIDI_SUSTAIN_MIN, PCMIDI_SUSTAIN_MAX);
}
@@ -150,15 +141,14 @@ static ssize_t store_sustain(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct hid_device *hdev = to_hid_device(dev);
- struct pk_device *pk = hid_get_drvdata(hdev);
+ struct pcmidi_snd *pm = hid_get_drvdata(hdev);
unsigned sustain = 0;
if (sscanf(buf, "%u", &sustain) > 0 && sustain <= PCMIDI_SUSTAIN_MAX) {
dbg_hid("pcmidi sysfs write sustain=%u\n", sustain);
- pk->pm->midi_sustain = sustain;
- pk->pm->midi_sustain_mode =
- (0 == sustain || !pk->pm->midi_mode) ? 0 : 1;
+ pm->midi_sustain = sustain;
+ pm->midi_sustain_mode = (0 == sustain || !pm->midi_mode) ? 0 : 1;
return strlen(buf);
}
return -EINVAL;
@@ -176,11 +166,11 @@ static ssize_t show_octave(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hid_device *hdev = to_hid_device(dev);
- struct pk_device *pk = hid_get_drvdata(hdev);
+ struct pcmidi_snd *pm = hid_get_drvdata(hdev);
- dbg_hid("pcmidi sysfs read octave=%d\n", pk->pm->midi_octave);
+ dbg_hid("pcmidi sysfs read octave=%d\n", pm->midi_octave);
- return sprintf(buf, "%d (min:%d, max:%d)\n", pk->pm->midi_octave,
+ return sprintf(buf, "%d (min:%d, max:%d)\n", pm->midi_octave,
PCMIDI_OCTAVE_MIN, PCMIDI_OCTAVE_MAX);
}
@@ -189,14 +179,14 @@ static ssize_t store_octave(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct hid_device *hdev = to_hid_device(dev);
- struct pk_device *pk = hid_get_drvdata(hdev);
+ struct pcmidi_snd *pm = hid_get_drvdata(hdev);
int octave = 0;
if (sscanf(buf, "%d", &octave) > 0 &&
octave >= PCMIDI_OCTAVE_MIN && octave <= PCMIDI_OCTAVE_MAX) {
dbg_hid("pcmidi sysfs write octave=%d\n", octave);
- pk->pm->midi_octave = octave;
+ pm->midi_octave = octave;
return strlen(buf);
}
return -EINVAL;
@@ -270,7 +260,7 @@ static void stop_sustain_timers(struct pcmidi_snd *pm)
static int pcmidi_get_output_report(struct pcmidi_snd *pm)
{
- struct hid_device *hdev = pm->pk->hdev;
+ struct hid_device *hdev = pm->hdev;
struct hid_report *report;
list_for_each_entry(report,
@@ -295,7 +285,7 @@ static int pcmidi_get_output_report(struct pcmidi_snd *pm)
static void pcmidi_submit_output_report(struct pcmidi_snd *pm, int state)
{
- struct hid_device *hdev = pm->pk->hdev;
+ struct hid_device *hdev = pm->hdev;
struct hid_report *report = pm->pcmidi_report6;
report->field[0]->value[0] = 0x01;
report->field[0]->value[1] = state;
@@ -622,7 +612,7 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
/* Setup sound card */
- err = snd_card_new(&pm->pk->hdev->dev, index[dev], id[dev],
+ err = snd_card_new(&pm->hdev->dev, index[dev], id[dev],
THIS_MODULE, 0, &card);
if (err < 0) {
pk_error("failed to create pc-midi sound card\n");
@@ -660,7 +650,7 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
&pcmidi_in_ops);
/* create sysfs variables */
- err = device_create_file(&pm->pk->hdev->dev,
+ err = device_create_file(&pm->hdev->dev,
sysfs_device_attr_channel);
if (err < 0) {
pk_error("failed to create sysfs attribute channel: error %d\n",
@@ -668,7 +658,7 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
goto fail;
}
- err = device_create_file(&pm->pk->hdev->dev,
+ err = device_create_file(&pm->hdev->dev,
sysfs_device_attr_sustain);
if (err < 0) {
pk_error("failed to create sysfs attribute sustain: error %d\n",
@@ -676,7 +666,7 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
goto fail_attr_sustain;
}
- err = device_create_file(&pm->pk->hdev->dev,
+ err = device_create_file(&pm->hdev->dev,
sysfs_device_attr_octave);
if (err < 0) {
pk_error("failed to create sysfs attribute octave: error %d\n",
@@ -706,11 +696,11 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
fail_register:
stop_sustain_timers(pm);
- device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_octave);
+ device_remove_file(&pm->hdev->dev, sysfs_device_attr_octave);
fail_attr_octave:
- device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain);
+ device_remove_file(&pm->hdev->dev, sysfs_device_attr_sustain);
fail_attr_sustain:
- device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_channel);
+ device_remove_file(&pm->hdev->dev, sysfs_device_attr_channel);
fail:
if (pm->card) {
snd_card_free(pm->card);
@@ -724,12 +714,9 @@ static int pcmidi_snd_terminate(struct pcmidi_snd *pm)
if (pm->card) {
stop_sustain_timers(pm);
- device_remove_file(&pm->pk->hdev->dev,
- sysfs_device_attr_channel);
- device_remove_file(&pm->pk->hdev->dev,
- sysfs_device_attr_sustain);
- device_remove_file(&pm->pk->hdev->dev,
- sysfs_device_attr_octave);
+ device_remove_file(&pm->hdev->dev, sysfs_device_attr_channel);
+ device_remove_file(&pm->hdev->dev, sysfs_device_attr_sustain);
+ device_remove_file(&pm->hdev->dev, sysfs_device_attr_octave);
snd_card_disconnect(pm->card);
snd_card_free_when_closed(pm->card);
@@ -759,10 +746,7 @@ static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- struct pk_device *pk = hid_get_drvdata(hdev);
- struct pcmidi_snd *pm;
-
- pm = pk->pm;
+ struct pcmidi_snd *pm = hid_get_drvdata(hdev);
if (HID_UP_MSVENDOR == (usage->hid & HID_USAGE_PAGE) &&
1 == pm->ifnum) {
@@ -777,16 +761,16 @@ static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *data, int size)
{
- struct pk_device *pk = hid_get_drvdata(hdev);
+ struct pcmidi_snd *pm = hid_get_drvdata(hdev);
int ret = 0;
- if (1 == pk->pm->ifnum) {
+ if (1 == pm->ifnum) {
if (report->id == data[0])
switch (report->id) {
case 0x01: /* midi keys (qwerty)*/
case 0x03: /* midi keyboard (musical)*/
case 0x04: /* extra/midi keys (qwerty)*/
- ret = pcmidi_handle_report(pk->pm,
+ ret = pcmidi_handle_report(pm,
report->id, data, size);
break;
}
@@ -801,8 +785,7 @@ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct usb_interface *intf;
unsigned short ifnum;
unsigned long quirks = id->driver_data;
- struct pk_device *pk;
- struct pcmidi_snd *pm = NULL;
+ struct pcmidi_snd *pm;
if (!hid_is_usb(hdev))
return -EINVAL;
@@ -810,26 +793,16 @@ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
intf = to_usb_interface(hdev->dev.parent);
ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
- pk = kzalloc(sizeof(*pk), GFP_KERNEL);
- if (pk == NULL) {
- hid_err(hdev, "can't alloc descriptor\n");
- return -ENOMEM;
- }
-
- pk->hdev = hdev;
-
pm = kzalloc(sizeof(*pm), GFP_KERNEL);
if (pm == NULL) {
hid_err(hdev, "can't alloc descriptor\n");
- ret = -ENOMEM;
- goto err_free_pk;
+ return -ENOMEM;
}
- pm->pk = pk;
- pk->pm = pm;
+ pm->hdev = hdev;
pm->ifnum = ifnum;
- hid_set_drvdata(hdev, pk);
+ hid_set_drvdata(hdev, pm);
ret = hid_parse(hdev);
if (ret) {
@@ -856,26 +829,18 @@ err_stop:
hid_hw_stop(hdev);
err_free:
kfree(pm);
-err_free_pk:
- kfree(pk);
return ret;
}
static void pk_remove(struct hid_device *hdev)
{
- struct pk_device *pk = hid_get_drvdata(hdev);
- struct pcmidi_snd *pm;
-
- pm = pk->pm;
- if (pm) {
- pcmidi_snd_terminate(pm);
- kfree(pm);
- }
+ struct pcmidi_snd *pm = hid_get_drvdata(hdev);
+ pcmidi_snd_terminate(pm);
hid_hw_stop(hdev);
- kfree(pk);
+ kfree(pm);
}
static const struct hid_device_id pk_devices[] = {
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index cf5992e97094..08fb25b8459a 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -58,33 +58,25 @@ static inline void samsung_irda_dev_trace(struct hid_device *hdev,
static __u8 *samsung_irda_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize == 184 && rdesc[175] == 0x25 && rdesc[176] == 0x40 &&
- rdesc[177] == 0x75 && rdesc[178] == 0x30 &&
- rdesc[179] == 0x95 && rdesc[180] == 0x01 &&
+ if (*rsize == 184 && !memcmp(&rdesc[175], "\x25\x40\x75\x30\x95\x01", 6) &&
rdesc[182] == 0x40) {
samsung_irda_dev_trace(hdev, 184);
rdesc[176] = 0xff;
rdesc[178] = 0x08;
rdesc[180] = 0x06;
rdesc[182] = 0x42;
- } else
- if (*rsize == 203 && rdesc[192] == 0x15 && rdesc[193] == 0x0 &&
- rdesc[194] == 0x25 && rdesc[195] == 0x12) {
+ } else if (*rsize == 203 && !memcmp(&rdesc[192], "\x15\x00\x25\x12", 4)) {
samsung_irda_dev_trace(hdev, 203);
- rdesc[193] = 0x1;
- rdesc[195] = 0xf;
- } else
- if (*rsize == 135 && rdesc[124] == 0x15 && rdesc[125] == 0x0 &&
- rdesc[126] == 0x25 && rdesc[127] == 0x11) {
+ rdesc[193] = 0x01;
+ rdesc[195] = 0x0f;
+ } else if (*rsize == 135 && !memcmp(&rdesc[124], "\x15\x00\x25\x11", 4)) {
samsung_irda_dev_trace(hdev, 135);
- rdesc[125] = 0x1;
- rdesc[127] = 0xe;
- } else
- if (*rsize == 171 && rdesc[160] == 0x15 && rdesc[161] == 0x0 &&
- rdesc[162] == 0x25 && rdesc[163] == 0x01) {
+ rdesc[125] = 0x01;
+ rdesc[127] = 0x0e;
+ } else if (*rsize == 171 && !memcmp(&rdesc[160], "\x15\x00\x25\x01", 4)) {
samsung_irda_dev_trace(hdev, 171);
- rdesc[161] = 0x1;
- rdesc[163] = 0x3;
+ rdesc[161] = 0x01;
+ rdesc[163] = 0x03;
}
return rdesc;
}
@@ -99,7 +91,7 @@ static int samsung_kbd_mouse_input_mapping(struct hid_device *hdev,
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
- if (1 != ifnum || HID_UP_CONSUMER != (usage->hid & HID_USAGE_PAGE))
+ if (ifnum != 1 || HID_UP_CONSUMER != (usage->hid & HID_USAGE_PAGE))
return 0;
dbg_hid("samsung wireless keyboard/mouse input mapping event [0x%x]\n",
@@ -107,17 +99,39 @@ static int samsung_kbd_mouse_input_mapping(struct hid_device *hdev,
switch (usage->hid & HID_USAGE) {
/* report 2 */
- case 0x183: samsung_kbd_mouse_map_key_clear(KEY_MEDIA); break;
- case 0x195: samsung_kbd_mouse_map_key_clear(KEY_EMAIL); break;
- case 0x196: samsung_kbd_mouse_map_key_clear(KEY_CALC); break;
- case 0x197: samsung_kbd_mouse_map_key_clear(KEY_COMPUTER); break;
- case 0x22b: samsung_kbd_mouse_map_key_clear(KEY_SEARCH); break;
- case 0x22c: samsung_kbd_mouse_map_key_clear(KEY_WWW); break;
- case 0x22d: samsung_kbd_mouse_map_key_clear(KEY_BACK); break;
- case 0x22e: samsung_kbd_mouse_map_key_clear(KEY_FORWARD); break;
- case 0x22f: samsung_kbd_mouse_map_key_clear(KEY_FAVORITES); break;
- case 0x230: samsung_kbd_mouse_map_key_clear(KEY_REFRESH); break;
- case 0x231: samsung_kbd_mouse_map_key_clear(KEY_STOP); break;
+ case 0x183:
+ samsung_kbd_mouse_map_key_clear(KEY_MEDIA);
+ break;
+ case 0x195:
+ samsung_kbd_mouse_map_key_clear(KEY_EMAIL);
+ break;
+ case 0x196:
+ samsung_kbd_mouse_map_key_clear(KEY_CALC);
+ break;
+ case 0x197:
+ samsung_kbd_mouse_map_key_clear(KEY_COMPUTER);
+ break;
+ case 0x22b:
+ samsung_kbd_mouse_map_key_clear(KEY_SEARCH);
+ break;
+ case 0x22c:
+ samsung_kbd_mouse_map_key_clear(KEY_WWW);
+ break;
+ case 0x22d:
+ samsung_kbd_mouse_map_key_clear(KEY_BACK);
+ break;
+ case 0x22e:
+ samsung_kbd_mouse_map_key_clear(KEY_FORWARD);
+ break;
+ case 0x22f:
+ samsung_kbd_mouse_map_key_clear(KEY_FAVORITES);
+ break;
+ case 0x230:
+ samsung_kbd_mouse_map_key_clear(KEY_REFRESH);
+ break;
+ case 0x231:
+ samsung_kbd_mouse_map_key_clear(KEY_STOP);
+ break;
default:
return 0;
}
@@ -125,10 +139,340 @@ static int samsung_kbd_mouse_input_mapping(struct hid_device *hdev,
return 1;
}
+static int samsung_kbd_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (!(HID_UP_CONSUMER == (usage->hid & HID_USAGE_PAGE) ||
+ HID_UP_KEYBOARD == (usage->hid & HID_USAGE_PAGE)))
+ return 0;
+
+ dbg_hid("samsung wireless keyboard input mapping event [0x%x]\n",
+ usage->hid & HID_USAGE);
+
+ if (HID_UP_KEYBOARD == (usage->hid & HID_USAGE_PAGE)) {
+ set_bit(EV_REP, hi->input->evbit);
+ switch (usage->hid & HID_USAGE) {
+ case 0x32:
+ samsung_kbd_mouse_map_key_clear(KEY_BACKSLASH);
+ break;
+ case 0x64:
+ samsung_kbd_mouse_map_key_clear(KEY_102ND);
+ break;
+ /* Only for BR keyboard */
+ case 0x87:
+ samsung_kbd_mouse_map_key_clear(KEY_RO);
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ if (HID_UP_CONSUMER == (usage->hid & HID_USAGE_PAGE)) {
+ switch (usage->hid & HID_USAGE) {
+ /* report 2 */
+ /* MENU */
+ case 0x040:
+ samsung_kbd_mouse_map_key_clear(KEY_MENU);
+ break;
+ case 0x18a:
+ samsung_kbd_mouse_map_key_clear(KEY_MAIL);
+ break;
+ case 0x196:
+ samsung_kbd_mouse_map_key_clear(KEY_WWW);
+ break;
+ case 0x19e:
+ samsung_kbd_mouse_map_key_clear(KEY_SCREENLOCK);
+ break;
+ case 0x221:
+ samsung_kbd_mouse_map_key_clear(KEY_SEARCH);
+ break;
+ case 0x223:
+ samsung_kbd_mouse_map_key_clear(KEY_HOMEPAGE);
+ break;
+ /* Smtart Voice Key */
+ case 0x300:
+ samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY13);
+ break;
+ /* RECENTAPPS */
+ case 0x301:
+ samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY1);
+ break;
+ /* APPLICATION */
+ case 0x302:
+ samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY2);
+ break;
+ /* Voice search */
+ case 0x305:
+ samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY4);
+ break;
+ /* QPANEL on/off */
+ case 0x306:
+ samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY5);
+ break;
+ /* SIP on/off */
+ case 0x307:
+ samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY3);
+ break;
+ /* LANG */
+ case 0x308:
+ samsung_kbd_mouse_map_key_clear(KEY_LANGUAGE);
+ break;
+ case 0x30a:
+ samsung_kbd_mouse_map_key_clear(KEY_BRIGHTNESSDOWN);
+ break;
+ case 0x30b:
+ samsung_kbd_mouse_map_key_clear(KEY_BRIGHTNESSUP);
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int samsung_gamepad_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (!(HID_UP_BUTTON == (usage->hid & HID_USAGE_PAGE) ||
+ HID_UP_CONSUMER == (usage->hid & HID_USAGE_PAGE)))
+ return 0;
+
+ dbg_hid("samsung wireless gamepad input mapping event [0x%x], %ld, %ld, [0x%x]\n",
+ usage->hid & HID_USAGE, hi->input->evbit[0], hi->input->absbit[0], usage->hid & HID_USAGE_PAGE);
+
+ if (HID_UP_BUTTON == (usage->hid & HID_USAGE_PAGE)) {
+ switch (usage->hid & HID_USAGE) {
+ case 0x01:
+ samsung_kbd_mouse_map_key_clear(BTN_A);
+ break;
+ case 0x02:
+ samsung_kbd_mouse_map_key_clear(BTN_B);
+ break;
+ case 0x03:
+ samsung_kbd_mouse_map_key_clear(BTN_C);
+ break;
+ case 0x04:
+ samsung_kbd_mouse_map_key_clear(BTN_X);
+ break;
+ case 0x05:
+ samsung_kbd_mouse_map_key_clear(BTN_Y);
+ break;
+ case 0x06:
+ samsung_kbd_mouse_map_key_clear(BTN_Z);
+ break;
+ case 0x07:
+ samsung_kbd_mouse_map_key_clear(BTN_TL);
+ break;
+ case 0x08:
+ samsung_kbd_mouse_map_key_clear(BTN_TR);
+ break;
+ case 0x09:
+ samsung_kbd_mouse_map_key_clear(BTN_TL2);
+ break;
+ case 0x0a:
+ samsung_kbd_mouse_map_key_clear(BTN_TR2);
+ break;
+ case 0x0b:
+ samsung_kbd_mouse_map_key_clear(BTN_SELECT);
+ break;
+ case 0x0c:
+ samsung_kbd_mouse_map_key_clear(BTN_START);
+ break;
+ case 0x0d:
+ samsung_kbd_mouse_map_key_clear(BTN_MODE);
+ break;
+ case 0x0e:
+ samsung_kbd_mouse_map_key_clear(BTN_THUMBL);
+ break;
+ case 0x0f:
+ samsung_kbd_mouse_map_key_clear(BTN_THUMBR);
+ break;
+ case 0x10:
+ samsung_kbd_mouse_map_key_clear(0x13f);
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ if (HID_UP_CONSUMER == (usage->hid & HID_USAGE_PAGE)) {
+ switch (usage->hid & HID_USAGE) {
+ case 0x040:
+ samsung_kbd_mouse_map_key_clear(KEY_MENU);
+ break;
+ case 0x223:
+ samsung_kbd_mouse_map_key_clear(KEY_HOMEPAGE);
+ break;
+ case 0x224:
+ samsung_kbd_mouse_map_key_clear(KEY_BACK);
+ break;
+
+ /* Screen Capture */
+ case 0x303:
+ samsung_kbd_mouse_map_key_clear(KEY_SYSRQ);
+ break;
+
+ default:
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int samsung_actionmouse_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+
+ dbg_hid("samsung wireless actionmouse input mapping event [0x%x], [0x%x], %ld, %ld, [0x%x]\n",
+ usage->hid, usage->hid & HID_USAGE, hi->input->evbit[0], hi->input->absbit[0],
+ usage->hid & HID_USAGE_PAGE);
+
+ if (((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) && ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON))
+ return 0;
+
+ switch (usage->hid & HID_USAGE) {
+ case 0x301:
+ samsung_kbd_mouse_map_key_clear(254);
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static int samsung_universal_kbd_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (!(HID_UP_CONSUMER == (usage->hid & HID_USAGE_PAGE) ||
+ HID_UP_KEYBOARD == (usage->hid & HID_USAGE_PAGE)))
+ return 0;
+
+ dbg_hid("samsung wireless keyboard input mapping event [0x%x]\n",
+ usage->hid & HID_USAGE);
+
+ if (HID_UP_KEYBOARD == (usage->hid & HID_USAGE_PAGE)) {
+ set_bit(EV_REP, hi->input->evbit);
+ switch (usage->hid & HID_USAGE) {
+ case 0x32:
+ samsung_kbd_mouse_map_key_clear(KEY_BACKSLASH);
+ break;
+ case 0x64:
+ samsung_kbd_mouse_map_key_clear(KEY_102ND);
+ break;
+ /* Only for BR keyboard */
+ case 0x87:
+ samsung_kbd_mouse_map_key_clear(KEY_RO);
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ if (HID_UP_CONSUMER == (usage->hid & HID_USAGE_PAGE)) {
+ switch (usage->hid & HID_USAGE) {
+ /* report 2 */
+ /* MENU */
+ case 0x040:
+ samsung_kbd_mouse_map_key_clear(KEY_MENU);
+ break;
+ case 0x18a:
+ samsung_kbd_mouse_map_key_clear(KEY_MAIL);
+ break;
+ case 0x196:
+ samsung_kbd_mouse_map_key_clear(KEY_WWW);
+ break;
+ case 0x19e:
+ samsung_kbd_mouse_map_key_clear(KEY_SCREENLOCK);
+ break;
+ case 0x221:
+ samsung_kbd_mouse_map_key_clear(KEY_SEARCH);
+ break;
+ case 0x223:
+ samsung_kbd_mouse_map_key_clear(KEY_HOMEPAGE);
+ break;
+ /* RECENTAPPS */
+ case 0x301:
+ samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY1);
+ break;
+ /* APPLICATION */
+ case 0x302:
+ samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY2);
+ break;
+ /* Voice search */
+ case 0x305:
+ samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY4);
+ break;
+ /* QPANEL on/off */
+ case 0x306:
+ samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY5);
+ break;
+ /* SIP on/off */
+ case 0x307:
+ samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY3);
+ break;
+ /* LANG */
+ case 0x308:
+ samsung_kbd_mouse_map_key_clear(KEY_LANGUAGE);
+ break;
+ case 0x30a:
+ samsung_kbd_mouse_map_key_clear(KEY_BRIGHTNESSDOWN);
+ break;
+ case 0x070:
+ samsung_kbd_mouse_map_key_clear(KEY_BRIGHTNESSDOWN);
+ break;
+ case 0x30b:
+ samsung_kbd_mouse_map_key_clear(KEY_BRIGHTNESSUP);
+ break;
+ case 0x06f:
+ samsung_kbd_mouse_map_key_clear(KEY_BRIGHTNESSUP);
+ break;
+ /* S-Finder */
+ case 0x304:
+ samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY7);
+ break;
+ /* Screen Capture */
+ case 0x303:
+ samsung_kbd_mouse_map_key_clear(KEY_SYSRQ);
+ break;
+ /* Multi Window */
+ case 0x309:
+ samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY9);
+ break;
+ /* HotKey App 1 */
+ case 0x071:
+ samsung_kbd_mouse_map_key_clear(0x2f5);
+ break;
+ /* HotKey App 2 */
+ case 0x072:
+ samsung_kbd_mouse_map_key_clear(0x2f6);
+ break;
+ /* HotKey App 3 */
+ case 0x073:
+ samsung_kbd_mouse_map_key_clear(0x2f7);
+ break;
+ /* Dex */
+ case 0x06e:
+ samsung_kbd_mouse_map_key_clear(0x2bd);
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
static __u8 *samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product)
+ if (hdev->product == USB_DEVICE_ID_SAMSUNG_IR_REMOTE && hid_is_usb(hdev))
rdesc = samsung_irda_report_fixup(hdev, rdesc, rsize);
return rdesc;
}
@@ -139,9 +483,24 @@ static int samsung_input_mapping(struct hid_device *hdev, struct hid_input *hi,
{
int ret = 0;
- if (USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE == hdev->product)
+ if (hdev->product == USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE && hid_is_usb(hdev))
ret = samsung_kbd_mouse_input_mapping(hdev,
hi, field, usage, bit, max);
+ else if (hdev->product == USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD)
+ ret = samsung_kbd_input_mapping(hdev,
+ hi, field, usage, bit, max);
+ else if (hdev->product == USB_DEVICE_ID_SAMSUNG_WIRELESS_GAMEPAD)
+ ret = samsung_gamepad_input_mapping(hdev,
+ hi, field, usage, bit, max);
+ else if (hdev->product == USB_DEVICE_ID_SAMSUNG_WIRELESS_ACTIONMOUSE)
+ ret = samsung_actionmouse_input_mapping(hdev,
+ hi, field, usage, bit, max);
+ else if (hdev->product == USB_DEVICE_ID_SAMSUNG_WIRELESS_UNIVERSAL_KBD)
+ ret = samsung_universal_kbd_input_mapping(hdev,
+ hi, field, usage, bit, max);
+ else if (hdev->product == USB_DEVICE_ID_SAMSUNG_WIRELESS_MULTI_HOGP_KBD)
+ ret = samsung_universal_kbd_input_mapping(hdev,
+ hi, field, usage, bit, max);
return ret;
}
@@ -152,16 +511,17 @@ static int samsung_probe(struct hid_device *hdev,
int ret;
unsigned int cmask = HID_CONNECT_DEFAULT;
- if (!hid_is_usb(hdev))
- return -EINVAL;
-
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
goto err_free;
}
- if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product) {
+ if (hdev->product == USB_DEVICE_ID_SAMSUNG_IR_REMOTE) {
+ if (!hid_is_usb(hdev)) {
+ ret = -EINVAL;
+ goto err_free;
+ }
if (hdev->rsize == 184) {
/* disable hidinput, force hiddev */
cmask = (cmask & ~HID_CONNECT_HIDINPUT) |
@@ -183,6 +543,11 @@ err_free:
static const struct hid_device_id samsung_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_GAMEPAD) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_ACTIONMOUSE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_UNIVERSAL_KBD) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_MULTI_HOGP_KBD) },
{ }
};
MODULE_DEVICE_TABLE(hid, samsung_devices);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index e99f3a3c65e1..f89b300417d7 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -34,6 +34,7 @@
#define RPL_S_DEVICE_ID 0x7A78
#define MTL_P_DEVICE_ID 0x7E45
#define ARL_H_DEVICE_ID 0x7745
+#define ARL_S_DEVICE_ID 0x7F78
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 65e7eeb2fa64..56bd4f02f319 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -45,6 +45,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, RPL_S_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MTL_P_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ARL_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ARL_S_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index fbe10fbc5769..a44367aef621 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2990,11 +2990,11 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
wacom_wac_battery_pre_report(hdev, report);
- if (pad_in_hid_field && wacom->wacom_wac.pad_input)
+ if (pad_in_hid_field && wacom_wac->pad_input)
wacom_wac_pad_pre_report(hdev, report);
- if (pen_in_hid_field && wacom->wacom_wac.pen_input)
+ if (pen_in_hid_field && wacom_wac->pen_input)
wacom_wac_pen_pre_report(hdev, report);
- if (finger_in_hid_field && wacom->wacom_wac.touch_input)
+ if (finger_in_hid_field && wacom_wac->touch_input)
wacom_wac_finger_pre_report(hdev, report);
for (r = 0; r < report->maxfield; r++) {
@@ -3010,7 +3010,7 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
wacom_wac_battery_report(hdev, report);
- if (true_pad && wacom->wacom_wac.pad_input)
+ if (true_pad && wacom_wac->pad_input)
wacom_wac_pad_report(hdev, report, field);
}
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index e63b1e806e34..6ec499841f70 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -309,7 +309,6 @@ struct hid_data {
bool confidence;
int x;
int y;
- int pressure;
int width;
int height;
int id;
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index da6a7abd584f..10926359e6d2 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -114,9 +114,10 @@ enum {
* @netdev: Phonet network device
* @txqueue: TX data queue
* @cmdqueue: Queue of free commands
+ * @work: &struct work_struct for scheduled work
* @cl: HSI client own reference
* @link: Link for ssip_list
- * @tx_usecount: Refcount to keep track the slaves that use the wake line
+ * @tx_usecnt: Refcount to keep track the slaves that use the wake line
* @channel_id_cmd: HSI channel id for command stream
* @channel_id_data: HSI channel id for data stream
*/
diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
index acbf82f755a8..e3beeac8aee5 100644
--- a/drivers/hsi/hsi_core.c
+++ b/drivers/hsi/hsi_core.c
@@ -48,7 +48,7 @@ static int hsi_bus_match(struct device *dev, struct device_driver *driver)
return false;
}
-static struct bus_type hsi_bus_type = {
+static const struct bus_type hsi_bus_type = {
.name = "hsi",
.dev_groups = hsi_bus_dev_groups,
.match = hsi_bus_match,
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 56f7e06c673e..adbf674355b2 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -322,125 +322,89 @@ static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
- /* do we need a gpadl body msg */
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
sizeof(struct vmbus_channel_gpadl_header) -
sizeof(struct gpa_range);
+ pfncount = umin(pagecount, pfnsize / sizeof(u64));
+
+ msgsize = sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_gpadl_header) +
+ sizeof(struct gpa_range) + pfncount * sizeof(u64);
+ msgheader = kzalloc(msgsize, GFP_KERNEL);
+ if (!msgheader)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&msgheader->submsglist);
+ msgheader->msgsize = msgsize;
+
+ gpadl_header = (struct vmbus_channel_gpadl_header *)
+ msgheader->msg;
+ gpadl_header->rangecount = 1;
+ gpadl_header->range_buflen = sizeof(struct gpa_range) +
+ pagecount * sizeof(u64);
+ gpadl_header->range[0].byte_offset = 0;
+ gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
+ for (i = 0; i < pfncount; i++)
+ gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+ type, kbuffer, size, send_offset, i);
+ *msginfo = msgheader;
+
+ pfnsum = pfncount;
+ pfnleft = pagecount - pfncount;
+
+ /* how many pfns can we fit in a body message */
+ pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
+ sizeof(struct vmbus_channel_gpadl_body);
pfncount = pfnsize / sizeof(u64);
- if (pagecount > pfncount) {
- /* we need a gpadl body */
- /* fill in the header */
+ /*
+ * If pfnleft is zero, everything fits in the header and no body
+ * messages are needed
+ */
+ while (pfnleft) {
+ pfncurr = umin(pfncount, pfnleft);
msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_header) +
- sizeof(struct gpa_range) + pfncount * sizeof(u64);
- msgheader = kzalloc(msgsize, GFP_KERNEL);
- if (!msgheader)
- goto nomem;
-
- INIT_LIST_HEAD(&msgheader->submsglist);
- msgheader->msgsize = msgsize;
-
- gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->msg;
- gpadl_header->rangecount = 1;
- gpadl_header->range_buflen = sizeof(struct gpa_range) +
- pagecount * sizeof(u64);
- gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
- for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
- type, kbuffer, size, send_offset, i);
- *msginfo = msgheader;
-
- pfnsum = pfncount;
- pfnleft = pagecount - pfncount;
-
- /* how many pfns can we fit */
- pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
- sizeof(struct vmbus_channel_gpadl_body);
- pfncount = pfnsize / sizeof(u64);
-
- /* fill in the body */
- while (pfnleft) {
- if (pfnleft > pfncount)
- pfncurr = pfncount;
- else
- pfncurr = pfnleft;
-
- msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_body) +
- pfncurr * sizeof(u64);
- msgbody = kzalloc(msgsize, GFP_KERNEL);
-
- if (!msgbody) {
- struct vmbus_channel_msginfo *pos = NULL;
- struct vmbus_channel_msginfo *tmp = NULL;
- /*
- * Free up all the allocated messages.
- */
- list_for_each_entry_safe(pos, tmp,
- &msgheader->submsglist,
- msglistentry) {
-
- list_del(&pos->msglistentry);
- kfree(pos);
- }
-
- goto nomem;
- }
-
- msgbody->msgsize = msgsize;
- gpadl_body =
- (struct vmbus_channel_gpadl_body *)msgbody->msg;
+ sizeof(struct vmbus_channel_gpadl_body) +
+ pfncurr * sizeof(u64);
+ msgbody = kzalloc(msgsize, GFP_KERNEL);
+ if (!msgbody) {
+ struct vmbus_channel_msginfo *pos = NULL;
+ struct vmbus_channel_msginfo *tmp = NULL;
/*
- * Gpadl is u32 and we are using a pointer which could
- * be 64-bit
- * This is governed by the guest/host protocol and
- * so the hypervisor guarantees that this is ok.
+ * Free up all the allocated messages.
*/
- for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
- kbuffer, size, send_offset, pfnsum + i);
-
- /* add to msg header */
- list_add_tail(&msgbody->msglistentry,
- &msgheader->submsglist);
- pfnsum += pfncurr;
- pfnleft -= pfncurr;
+ list_for_each_entry_safe(pos, tmp,
+ &msgheader->submsglist,
+ msglistentry) {
+
+ list_del(&pos->msglistentry);
+ kfree(pos);
+ }
+ kfree(msgheader);
+ return -ENOMEM;
}
- } else {
- /* everything fits in a header */
- msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_header) +
- sizeof(struct gpa_range) + pagecount * sizeof(u64);
- msgheader = kzalloc(msgsize, GFP_KERNEL);
- if (msgheader == NULL)
- goto nomem;
-
- INIT_LIST_HEAD(&msgheader->submsglist);
- msgheader->msgsize = msgsize;
-
- gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->msg;
- gpadl_header->rangecount = 1;
- gpadl_header->range_buflen = sizeof(struct gpa_range) +
- pagecount * sizeof(u64);
- gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
- for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
- type, kbuffer, size, send_offset, i);
-
- *msginfo = msgheader;
+
+ msgbody->msgsize = msgsize;
+ gpadl_body = (struct vmbus_channel_gpadl_body *)msgbody->msg;
+
+ /*
+ * Gpadl is u32 and we are using a pointer which could
+ * be 64-bit
+ * This is governed by the guest/host protocol and
+ * so the hypervisor guarantees that this is ok.
+ */
+ for (i = 0; i < pfncurr; i++)
+ gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
+ kbuffer, size, send_offset, pfnsum + i);
+
+ /* add to msg header */
+ list_add_tail(&msgbody->msglistentry, &msgheader->submsglist);
+ pfnsum += pfncurr;
+ pfnleft -= pfncurr;
}
return 0;
-nomem:
- kfree(msgheader);
- kfree(msgbody);
- return -ENOMEM;
}
/*
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 42aec2c5606a..9c97c4065fe7 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -296,6 +296,11 @@ static struct {
spinlock_t lock;
} host_ts;
+static bool timesync_implicit;
+
+module_param(timesync_implicit, bool, 0644);
+MODULE_PARM_DESC(timesync_implicit, "If set treat SAMPLE as SYNC when clock is behind");
+
static inline u64 reftime_to_ns(u64 reftime)
{
return (reftime - WLTIMEDELTA) * 100;
@@ -345,6 +350,29 @@ static void hv_set_host_time(struct work_struct *work)
}
/*
+ * Due to a bug on Hyper-V hosts, the sync flag may not always be sent on resume.
+ * Force a sync if the guest is behind.
+ */
+static inline bool hv_implicit_sync(u64 host_time)
+{
+ struct timespec64 new_ts;
+ struct timespec64 threshold_ts;
+
+ new_ts = ns_to_timespec64(reftime_to_ns(host_time));
+ ktime_get_real_ts64(&threshold_ts);
+
+ threshold_ts.tv_sec += 5;
+
+ /*
+ * If guest behind the host by 5 or more seconds.
+ */
+ if (timespec64_compare(&new_ts, &threshold_ts) >= 0)
+ return true;
+
+ return false;
+}
+
+/*
* Synchronize time with host after reboot, restore, etc.
*
* ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
@@ -384,7 +412,8 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
spin_unlock_irqrestore(&host_ts.lock, flags);
/* Schedule work to do do_settimeofday64() */
- if (adj_flags & ICTIMESYNCFLAG_SYNC)
+ if ((adj_flags & ICTIMESYNCFLAG_SYNC) ||
+ (timesync_implicit && hv_implicit_sync(host_ts.host_time)))
schedule_work(&adj_time_work);
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index b33d5abd9beb..7f7965f3d187 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -988,7 +988,7 @@ static const struct dev_pm_ops vmbus_pm = {
};
/* The one and only one */
-static struct bus_type hv_bus = {
+static const struct bus_type hv_bus = {
.name = "vmbus",
.match = vmbus_match,
.shutdown = vmbus_shutdown,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index a608264da87d..83945397b6eb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -301,6 +301,16 @@ config SENSORS_ASC7621
This driver can also be built as a module. If so, the module
will be called asc7621.
+config SENSORS_ASUS_ROG_RYUJIN
+ tristate "ASUS ROG RYUJIN II 360 hardware monitoring driver"
+ depends on HID
+ help
+ If you say yes here you get support for the fans and sensors of
+ the ASUS ROG RYUJIN II 360 AIO CPU liquid cooler.
+
+ This driver can also be built as a module. If so, the module
+ will be called asus_rog_ryujin.
+
config SENSORS_AXI_FAN_CONTROL
tristate "Analog Devices FAN Control HDL Core driver"
help
@@ -412,6 +422,17 @@ config SENSORS_ASPEED
This driver can also be built as a module. If so, the module
will be called aspeed_pwm_tacho.
+config SENSORS_ASPEED_G6
+ tristate "ASPEED g6 PWM and Fan tach driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on PWM
+ help
+ This driver provides support for ASPEED G6 PWM and Fan Tach
+ controllers.
+
+ This driver can also be built as a module. If so, the module
+ will be called aspeed_pwm_tacho.
+
config SENSORS_ATXP1
tristate "Attansic ATXP1 VID controller"
depends on I2C
@@ -452,6 +473,16 @@ config SENSORS_BT1_PVT_ALARMS
the data conversion will be periodically performed and the data will be
saved in the internal driver cache.
+config SENSORS_CHIPCAP2
+ tristate "Amphenol ChipCap 2 relative humidity and temperature sensor"
+ depends on I2C
+ help
+ Say yes here to build support for the Amphenol ChipCap 2
+ relative humidity and temperature sensor.
+
+ To compile this driver as a module, choose M here: the module
+ will be called chipcap2.
+
config SENSORS_CORSAIR_CPRO
tristate "Corsair Commander Pro controller"
depends on HID
@@ -1038,6 +1069,17 @@ config SENSORS_LTC4261
This driver can also be built as a module. If so, the module will
be called ltc4261.
+config SENSORS_LTC4282
+ tristate "Analog Devices LTC4282"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for Analog Devices LTC4282
+ High Current Hot Swap Controller I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4282.
+
config SENSORS_LTQ_CPUTEMP
bool "Lantiq cpu temperature sensor driver"
depends on SOC_XWAY
@@ -1674,6 +1716,16 @@ config SENSORS_NZXT_KRAKEN2
This driver can also be built as a module. If so, the module
will be called nzxt-kraken2.
+config SENSORS_NZXT_KRAKEN3
+ tristate "NZXT Kraken X53/X63/X73, Z53/Z63/Z73 coolers"
+ depends on USB_HID
+ help
+ If you say yes here you get support for hardware monitoring for the
+ NZXT Kraken X53/X63/X73, Z53/Z63/Z73 all-in-one CPU liquid coolers.
+
+ This driver can also be built as a module. If so, the module
+ will be called nzxt-kraken3.
+
config SENSORS_NZXT_SMART2
tristate "NZXT RGB & Fan Controller/Smart Device v2"
depends on USB_HID
@@ -1714,6 +1766,16 @@ source "drivers/hwmon/peci/Kconfig"
source "drivers/hwmon/pmbus/Kconfig"
+config SENSORS_PT5161L
+ tristate "Astera Labs PT5161L PCIe retimer hardware monitoring"
+ depends on I2C
+ help
+ If you say yes here you get support for temperature monitoring
+ on the Astera Labs PT5161L PCIe retimer.
+
+ This driver can also be built as a module. If so, the module
+ will be called pt5161l.
+
config SENSORS_PWM_FAN
tristate "PWM fan"
depends on (PWM && OF) || COMPILE_TEST
@@ -1994,6 +2056,20 @@ config SENSORS_SFCTEMP
This driver can also be built as a module. If so, the module
will be called sfctemp.
+config SENSORS_SURFACE_FAN
+ tristate "Surface Fan Driver"
+ depends on SURFACE_AGGREGATOR
+ depends on SURFACE_AGGREGATOR_BUS
+ help
+ Driver that provides monitoring of the fan on Surface Pro devices that
+ have a fan, like the Surface Pro 9.
+
+ This makes the fan's current speed accessible through the hwmon
+ system. It does not provide control over the fan, the firmware is
+ responsible for that, this driver merely provides monitoring.
+
+ Select M or Y here, if you want to be able to read the fan's speed.
+
config SENSORS_ADC128D818
tristate "Texas Instruments ADC128D818"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 47be39af5c03..5c31808f6378 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -55,9 +55,12 @@ obj-$(CONFIG_SENSORS_ARM_SCPI) += scpi-hwmon.o
obj-$(CONFIG_SENSORS_AS370) += as370-hwmon.o
obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o
obj-$(CONFIG_SENSORS_ASPEED) += aspeed-pwm-tacho.o
+obj-$(CONFIG_SENSORS_ASPEED_G6) += aspeed-g6-pwm-tach.o
+obj-$(CONFIG_SENSORS_ASUS_ROG_RYUJIN) += asus_rog_ryujin.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_AXI_FAN_CONTROL) += axi-fan-control.o
obj-$(CONFIG_SENSORS_BT1_PVT) += bt1-pvt.o
+obj-$(CONFIG_SENSORS_CHIPCAP2) += chipcap2.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_CORSAIR_CPRO) += corsair-cpro.o
obj-$(CONFIG_SENSORS_CORSAIR_PSU) += corsair-psu.o
@@ -136,6 +139,7 @@ obj-$(CONFIG_SENSORS_LTC4222) += ltc4222.o
obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
obj-$(CONFIG_SENSORS_LTC4260) += ltc4260.o
obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
+obj-$(CONFIG_SENSORS_LTC4282) += ltc4282.o
obj-$(CONFIG_SENSORS_LTQ_CPUTEMP) += ltq-cputemp.o
obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
obj-$(CONFIG_SENSORS_MAX127) += max127.o
@@ -173,6 +177,7 @@ obj-$(CONFIG_SENSORS_NPCM7XX) += npcm750-pwm-fan.o
obj-$(CONFIG_SENSORS_NSA320) += nsa320-hwmon.o
obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_NZXT_KRAKEN2) += nzxt-kraken2.o
+obj-$(CONFIG_SENSORS_NZXT_KRAKEN3) += nzxt-kraken3.o
obj-$(CONFIG_SENSORS_NZXT_SMART2) += nzxt-smart2.o
obj-$(CONFIG_SENSORS_OXP) += oxp-sensors.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
@@ -180,6 +185,7 @@ obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
obj-$(CONFIG_SENSORS_POWERZ) += powerz.o
obj-$(CONFIG_SENSORS_POWR1220) += powr1220.o
+obj-$(CONFIG_SENSORS_PT5161L) += pt5161l.o
obj-$(CONFIG_SENSORS_PWM_FAN) += pwm-fan.o
obj-$(CONFIG_SENSORS_RASPBERRYPI_HWMON) += raspberrypi-hwmon.o
obj-$(CONFIG_SENSORS_SBTSI) += sbtsi_temp.o
@@ -201,6 +207,7 @@ obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
obj-$(CONFIG_SENSORS_SPARX5) += sparx5-temp.o
obj-$(CONFIG_SENSORS_STTS751) += stts751.o
+obj-$(CONFIG_SENSORS_SURFACE_FAN)+= surface_fan.o
obj-$(CONFIG_SENSORS_SY7636A) += sy7636a-hwmon.o
obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o
obj-$(CONFIG_SENSORS_TC74) += tc74.o
diff --git a/drivers/hwmon/adm1177.c b/drivers/hwmon/adm1177.c
index 60a893f27159..3390102d2d4a 100644
--- a/drivers/hwmon/adm1177.c
+++ b/drivers/hwmon/adm1177.c
@@ -250,7 +250,6 @@ static const struct of_device_id adm1177_dt_ids[] = {
MODULE_DEVICE_TABLE(of, adm1177_dt_ids);
static struct i2c_driver adm1177_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "adm1177",
.of_match_table = adm1177_dt_ids,
diff --git a/drivers/hwmon/adt7310.c b/drivers/hwmon/adt7310.c
index 067865f4887a..25281739aa3b 100644
--- a/drivers/hwmon/adt7310.c
+++ b/drivers/hwmon/adt7310.c
@@ -124,7 +124,7 @@ static int adt7310_reg_write(void *context, unsigned int reg, unsigned int val)
static const struct regmap_config adt7310_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = adt7310_regmap_is_volatile,
.reg_read = adt7310_reg_read,
.reg_write = adt7310_reg_write,
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index 952506779336..d15f64d4b6e7 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -69,7 +69,7 @@ static const struct regmap_config adt7410_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.max_register = ADT7X10_ID,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = adt7410_regmap_is_volatile,
.reg_read = adt7410_reg_read,
.reg_write = adt7410_reg_write,
@@ -95,14 +95,12 @@ static const struct i2c_device_id adt7410_ids[] = {
MODULE_DEVICE_TABLE(i2c, adt7410_ids);
static struct i2c_driver adt7410_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "adt7410",
.pm = pm_sleep_ptr(&adt7x10_dev_pm_ops),
},
.probe = adt7410_i2c_probe,
.id_table = adt7410_ids,
- .address_list = I2C_ADDRS(0x48, 0x49, 0x4a, 0x4b),
};
module_i2c_driver(adt7410_driver);
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 2a7a4b6b0094..9b02b304c2f5 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -934,10 +934,21 @@ static const struct i2c_device_id amc6821_id[] = {
MODULE_DEVICE_TABLE(i2c, amc6821_id);
+static const struct of_device_id __maybe_unused amc6821_of_match[] = {
+ {
+ .compatible = "ti,amc6821",
+ .data = (void *)amc6821,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, amc6821_of_match);
+
static struct i2c_driver amc6821_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "amc6821",
+ .of_match_table = of_match_ptr(amc6821_of_match),
},
.probe = amc6821_probe,
.id_table = amc6821_id,
diff --git a/drivers/hwmon/aspeed-g6-pwm-tach.c b/drivers/hwmon/aspeed-g6-pwm-tach.c
new file mode 100644
index 000000000000..597b3b019d49
--- /dev/null
+++ b/drivers/hwmon/aspeed-g6-pwm-tach.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2021 Aspeed Technology Inc.
+ *
+ * PWM/TACH controller driver for Aspeed ast2600 SoCs.
+ * This drivers doesn't support earlier version of the IP.
+ *
+ * The hardware operates in time quantities of length
+ * Q := (DIV_L + 1) << DIV_H / input-clk
+ * The length of a PWM period is (DUTY_CYCLE_PERIOD + 1) * Q.
+ * The maximal value for DUTY_CYCLE_PERIOD is used here to provide
+ * a fine grained selection for the duty cycle.
+ *
+ * This driver uses DUTY_CYCLE_RISING_POINT = 0, so from the start of a
+ * period the output is active until DUTY_CYCLE_FALLING_POINT * Q. Note
+ * that if DUTY_CYCLE_RISING_POINT = DUTY_CYCLE_FALLING_POINT the output is
+ * always active.
+ *
+ * Register usage:
+ * PIN_ENABLE: When it is unset the pwm controller will emit inactive level to the external.
+ * Use to determine whether the PWM channel is enabled or disabled
+ * CLK_ENABLE: When it is unset the pwm controller will assert the duty counter reset and
+ * emit inactive level to the PIN_ENABLE mux after that the driver can still change the pwm period
+ * and duty and the value will apply when CLK_ENABLE be set again.
+ * Use to determine whether duty_cycle bigger than 0.
+ * PWM_ASPEED_CTRL_INVERSE: When it is toggled the output value will inverse immediately.
+ * PWM_ASPEED_DUTY_CYCLE_FALLING_POINT/PWM_ASPEED_DUTY_CYCLE_RISING_POINT: When these two
+ * values are equal it means the duty cycle = 100%.
+ *
+ * The glitch may generate at:
+ * - Enabled changing when the duty_cycle bigger than 0% and less than 100%.
+ * - Polarity changing when the duty_cycle bigger than 0% and less than 100%.
+ *
+ * Limitations:
+ * - When changing both duty cycle and period, we cannot prevent in
+ * software that the output might produce a period with mixed
+ * settings.
+ * - Disabling the PWM doesn't complete the current period.
+ *
+ * Improvements:
+ * - When only changing one of duty cycle or period, our pwm controller will not
+ * generate the glitch, the configure will change at next cycle of pwm.
+ * This improvement can disable/enable through PWM_ASPEED_CTRL_DUTY_SYNC_DISABLE.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/hwmon.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/reset.h>
+#include <linux/sysfs.h>
+
+/* The channel number of Aspeed pwm controller */
+#define PWM_ASPEED_NR_PWMS 16
+/* PWM Control Register */
+#define PWM_ASPEED_CTRL(ch) ((ch) * 0x10 + 0x00)
+#define PWM_ASPEED_CTRL_LOAD_SEL_RISING_AS_WDT BIT(19)
+#define PWM_ASPEED_CTRL_DUTY_LOAD_AS_WDT_ENABLE BIT(18)
+#define PWM_ASPEED_CTRL_DUTY_SYNC_DISABLE BIT(17)
+#define PWM_ASPEED_CTRL_CLK_ENABLE BIT(16)
+#define PWM_ASPEED_CTRL_LEVEL_OUTPUT BIT(15)
+#define PWM_ASPEED_CTRL_INVERSE BIT(14)
+#define PWM_ASPEED_CTRL_OPEN_DRAIN_ENABLE BIT(13)
+#define PWM_ASPEED_CTRL_PIN_ENABLE BIT(12)
+#define PWM_ASPEED_CTRL_CLK_DIV_H GENMASK(11, 8)
+#define PWM_ASPEED_CTRL_CLK_DIV_L GENMASK(7, 0)
+
+/* PWM Duty Cycle Register */
+#define PWM_ASPEED_DUTY_CYCLE(ch) ((ch) * 0x10 + 0x04)
+#define PWM_ASPEED_DUTY_CYCLE_PERIOD GENMASK(31, 24)
+#define PWM_ASPEED_DUTY_CYCLE_POINT_AS_WDT GENMASK(23, 16)
+#define PWM_ASPEED_DUTY_CYCLE_FALLING_POINT GENMASK(15, 8)
+#define PWM_ASPEED_DUTY_CYCLE_RISING_POINT GENMASK(7, 0)
+
+/* PWM fixed value */
+#define PWM_ASPEED_FIXED_PERIOD FIELD_MAX(PWM_ASPEED_DUTY_CYCLE_PERIOD)
+
+/* The channel number of Aspeed tach controller */
+#define TACH_ASPEED_NR_TACHS 16
+/* TACH Control Register */
+#define TACH_ASPEED_CTRL(ch) (((ch) * 0x10) + 0x08)
+#define TACH_ASPEED_IER BIT(31)
+#define TACH_ASPEED_INVERS_LIMIT BIT(30)
+#define TACH_ASPEED_LOOPBACK BIT(29)
+#define TACH_ASPEED_ENABLE BIT(28)
+#define TACH_ASPEED_DEBOUNCE_MASK GENMASK(27, 26)
+#define TACH_ASPEED_DEBOUNCE_BIT 26
+#define TACH_ASPEED_IO_EDGE_MASK GENMASK(25, 24)
+#define TACH_ASPEED_IO_EDGE_BIT 24
+#define TACH_ASPEED_CLK_DIV_T_MASK GENMASK(23, 20)
+#define TACH_ASPEED_CLK_DIV_BIT 20
+#define TACH_ASPEED_THRESHOLD_MASK GENMASK(19, 0)
+/* [27:26] */
+#define DEBOUNCE_3_CLK 0x00
+#define DEBOUNCE_2_CLK 0x01
+#define DEBOUNCE_1_CLK 0x02
+#define DEBOUNCE_0_CLK 0x03
+/* [25:24] */
+#define F2F_EDGES 0x00
+#define R2R_EDGES 0x01
+#define BOTH_EDGES 0x02
+/* [23:20] */
+/* divisor = 4 to the nth power, n = register value */
+#define DEFAULT_TACH_DIV 1024
+#define DIV_TO_REG(divisor) (ilog2(divisor) >> 1)
+
+/* TACH Status Register */
+#define TACH_ASPEED_STS(ch) (((ch) * 0x10) + 0x0C)
+
+/*PWM_TACH_STS */
+#define TACH_ASPEED_ISR BIT(31)
+#define TACH_ASPEED_PWM_OUT BIT(25)
+#define TACH_ASPEED_PWM_OEN BIT(24)
+#define TACH_ASPEED_DEB_INPUT BIT(23)
+#define TACH_ASPEED_RAW_INPUT BIT(22)
+#define TACH_ASPEED_VALUE_UPDATE BIT(21)
+#define TACH_ASPEED_FULL_MEASUREMENT BIT(20)
+#define TACH_ASPEED_VALUE_MASK GENMASK(19, 0)
+/**********************************************************
+ * Software setting
+ *********************************************************/
+#define DEFAULT_FAN_PULSE_PR 2
+
+struct aspeed_pwm_tach_data {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *reset;
+ unsigned long clk_rate;
+ struct pwm_chip chip;
+ bool tach_present[TACH_ASPEED_NR_TACHS];
+ u32 tach_divisor;
+};
+
+static inline struct aspeed_pwm_tach_data *
+aspeed_pwm_chip_to_data(struct pwm_chip *chip)
+{
+ return container_of(chip, struct aspeed_pwm_tach_data, chip);
+}
+
+static int aspeed_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct aspeed_pwm_tach_data *priv = aspeed_pwm_chip_to_data(chip);
+ u32 hwpwm = pwm->hwpwm;
+ bool polarity, pin_en, clk_en;
+ u32 duty_pt, val;
+ u64 div_h, div_l, duty_cycle_period, dividend;
+
+ val = readl(priv->base + PWM_ASPEED_CTRL(hwpwm));
+ polarity = FIELD_GET(PWM_ASPEED_CTRL_INVERSE, val);
+ pin_en = FIELD_GET(PWM_ASPEED_CTRL_PIN_ENABLE, val);
+ clk_en = FIELD_GET(PWM_ASPEED_CTRL_CLK_ENABLE, val);
+ div_h = FIELD_GET(PWM_ASPEED_CTRL_CLK_DIV_H, val);
+ div_l = FIELD_GET(PWM_ASPEED_CTRL_CLK_DIV_L, val);
+ val = readl(priv->base + PWM_ASPEED_DUTY_CYCLE(hwpwm));
+ duty_pt = FIELD_GET(PWM_ASPEED_DUTY_CYCLE_FALLING_POINT, val);
+ duty_cycle_period = FIELD_GET(PWM_ASPEED_DUTY_CYCLE_PERIOD, val);
+ /*
+ * This multiplication doesn't overflow, the upper bound is
+ * 1000000000 * 256 * 256 << 15 = 0x1dcd650000000000
+ */
+ dividend = (u64)NSEC_PER_SEC * (div_l + 1) * (duty_cycle_period + 1)
+ << div_h;
+ state->period = DIV_ROUND_UP_ULL(dividend, priv->clk_rate);
+
+ if (clk_en && duty_pt) {
+ dividend = (u64)NSEC_PER_SEC * (div_l + 1) * duty_pt
+ << div_h;
+ state->duty_cycle = DIV_ROUND_UP_ULL(dividend, priv->clk_rate);
+ } else {
+ state->duty_cycle = clk_en ? state->period : 0;
+ }
+ state->polarity = polarity ? PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
+ state->enabled = pin_en;
+ return 0;
+}
+
+static int aspeed_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct aspeed_pwm_tach_data *priv = aspeed_pwm_chip_to_data(chip);
+ u32 hwpwm = pwm->hwpwm, duty_pt, val;
+ u64 div_h, div_l, divisor, expect_period;
+ bool clk_en;
+
+ expect_period = div64_u64(ULLONG_MAX, (u64)priv->clk_rate);
+ expect_period = min(expect_period, state->period);
+ dev_dbg(chip->dev, "expect period: %lldns, duty_cycle: %lldns",
+ expect_period, state->duty_cycle);
+ /*
+ * Pick the smallest value for div_h so that div_l can be the biggest
+ * which results in a finer resolution near the target period value.
+ */
+ divisor = (u64)NSEC_PER_SEC * (PWM_ASPEED_FIXED_PERIOD + 1) *
+ (FIELD_MAX(PWM_ASPEED_CTRL_CLK_DIV_L) + 1);
+ div_h = order_base_2(DIV64_U64_ROUND_UP(priv->clk_rate * expect_period, divisor));
+ if (div_h > 0xf)
+ div_h = 0xf;
+
+ divisor = ((u64)NSEC_PER_SEC * (PWM_ASPEED_FIXED_PERIOD + 1)) << div_h;
+ div_l = div64_u64(priv->clk_rate * expect_period, divisor);
+
+ if (div_l == 0)
+ return -ERANGE;
+
+ div_l -= 1;
+
+ if (div_l > 255)
+ div_l = 255;
+
+ dev_dbg(chip->dev, "clk source: %ld div_h %lld, div_l : %lld\n",
+ priv->clk_rate, div_h, div_l);
+ /* duty_pt = duty_cycle * (PERIOD + 1) / period */
+ duty_pt = div64_u64(state->duty_cycle * priv->clk_rate,
+ (u64)NSEC_PER_SEC * (div_l + 1) << div_h);
+ dev_dbg(chip->dev, "duty_cycle = %lld, duty_pt = %d\n",
+ state->duty_cycle, duty_pt);
+
+ /*
+ * Fixed DUTY_CYCLE_PERIOD to its max value to get a
+ * fine-grained resolution for duty_cycle at the expense of a
+ * coarser period resolution.
+ */
+ val = readl(priv->base + PWM_ASPEED_DUTY_CYCLE(hwpwm));
+ val &= ~PWM_ASPEED_DUTY_CYCLE_PERIOD;
+ val |= FIELD_PREP(PWM_ASPEED_DUTY_CYCLE_PERIOD,
+ PWM_ASPEED_FIXED_PERIOD);
+ writel(val, priv->base + PWM_ASPEED_DUTY_CYCLE(hwpwm));
+
+ if (duty_pt == 0) {
+ /* emit inactive level and assert the duty counter reset */
+ clk_en = 0;
+ } else {
+ clk_en = 1;
+ if (duty_pt >= (PWM_ASPEED_FIXED_PERIOD + 1))
+ duty_pt = 0;
+ val = readl(priv->base + PWM_ASPEED_DUTY_CYCLE(hwpwm));
+ val &= ~(PWM_ASPEED_DUTY_CYCLE_RISING_POINT |
+ PWM_ASPEED_DUTY_CYCLE_FALLING_POINT);
+ val |= FIELD_PREP(PWM_ASPEED_DUTY_CYCLE_FALLING_POINT, duty_pt);
+ writel(val, priv->base + PWM_ASPEED_DUTY_CYCLE(hwpwm));
+ }
+
+ val = readl(priv->base + PWM_ASPEED_CTRL(hwpwm));
+ val &= ~(PWM_ASPEED_CTRL_CLK_DIV_H | PWM_ASPEED_CTRL_CLK_DIV_L |
+ PWM_ASPEED_CTRL_PIN_ENABLE | PWM_ASPEED_CTRL_CLK_ENABLE |
+ PWM_ASPEED_CTRL_INVERSE);
+ val |= FIELD_PREP(PWM_ASPEED_CTRL_CLK_DIV_H, div_h) |
+ FIELD_PREP(PWM_ASPEED_CTRL_CLK_DIV_L, div_l) |
+ FIELD_PREP(PWM_ASPEED_CTRL_PIN_ENABLE, state->enabled) |
+ FIELD_PREP(PWM_ASPEED_CTRL_CLK_ENABLE, clk_en) |
+ FIELD_PREP(PWM_ASPEED_CTRL_INVERSE, state->polarity);
+ writel(val, priv->base + PWM_ASPEED_CTRL(hwpwm));
+
+ return 0;
+}
+
+static const struct pwm_ops aspeed_pwm_ops = {
+ .apply = aspeed_pwm_apply,
+ .get_state = aspeed_pwm_get_state,
+};
+
+static void aspeed_tach_ch_enable(struct aspeed_pwm_tach_data *priv, u8 tach_ch,
+ bool enable)
+{
+ if (enable)
+ writel(readl(priv->base + TACH_ASPEED_CTRL(tach_ch)) |
+ TACH_ASPEED_ENABLE,
+ priv->base + TACH_ASPEED_CTRL(tach_ch));
+ else
+ writel(readl(priv->base + TACH_ASPEED_CTRL(tach_ch)) &
+ ~TACH_ASPEED_ENABLE,
+ priv->base + TACH_ASPEED_CTRL(tach_ch));
+}
+
+static int aspeed_tach_val_to_rpm(struct aspeed_pwm_tach_data *priv, u32 tach_val)
+{
+ u64 rpm;
+ u32 tach_div;
+
+ tach_div = tach_val * priv->tach_divisor * DEFAULT_FAN_PULSE_PR;
+
+ dev_dbg(priv->dev, "clk %ld, tach_val %d , tach_div %d\n",
+ priv->clk_rate, tach_val, tach_div);
+
+ rpm = (u64)priv->clk_rate * 60;
+ do_div(rpm, tach_div);
+
+ return (int)rpm;
+}
+
+static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tach_data *priv,
+ u8 fan_tach_ch)
+{
+ u32 val;
+
+ val = readl(priv->base + TACH_ASPEED_STS(fan_tach_ch));
+
+ if (!(val & TACH_ASPEED_FULL_MEASUREMENT))
+ return 0;
+ val = FIELD_GET(TACH_ASPEED_VALUE_MASK, val);
+ return aspeed_tach_val_to_rpm(priv, val);
+}
+
+static int aspeed_tach_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct aspeed_pwm_tach_data *priv = dev_get_drvdata(dev);
+ u32 reg_val;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ *val = aspeed_get_fan_tach_ch_rpm(priv, channel);
+ break;
+ case hwmon_fan_div:
+ reg_val = readl(priv->base + TACH_ASPEED_CTRL(channel));
+ reg_val = FIELD_GET(TACH_ASPEED_CLK_DIV_T_MASK, reg_val);
+ *val = BIT(reg_val << 1);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int aspeed_tach_hwmon_write(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct aspeed_pwm_tach_data *priv = dev_get_drvdata(dev);
+ u32 reg_val;
+
+ switch (attr) {
+ case hwmon_fan_div:
+ if (!is_power_of_2(val) || (ilog2(val) % 2) ||
+ DIV_TO_REG(val) > 0xb)
+ return -EINVAL;
+ priv->tach_divisor = val;
+ reg_val = readl(priv->base + TACH_ASPEED_CTRL(channel));
+ reg_val &= ~TACH_ASPEED_CLK_DIV_T_MASK;
+ reg_val |= FIELD_PREP(TACH_ASPEED_CLK_DIV_T_MASK,
+ DIV_TO_REG(priv->tach_divisor));
+ writel(reg_val, priv->base + TACH_ASPEED_CTRL(channel));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static umode_t aspeed_tach_dev_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct aspeed_pwm_tach_data *priv = drvdata;
+
+ if (!priv->tach_present[channel])
+ return 0;
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ case hwmon_fan_div:
+ return 0644;
+ }
+ return 0;
+}
+
+static const struct hwmon_ops aspeed_tach_ops = {
+ .is_visible = aspeed_tach_dev_is_visible,
+ .read = aspeed_tach_hwmon_read,
+ .write = aspeed_tach_hwmon_write,
+};
+
+static const struct hwmon_channel_info *aspeed_tach_info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_DIV, HWMON_F_INPUT | HWMON_F_DIV,
+ HWMON_F_INPUT | HWMON_F_DIV, HWMON_F_INPUT | HWMON_F_DIV,
+ HWMON_F_INPUT | HWMON_F_DIV, HWMON_F_INPUT | HWMON_F_DIV,
+ HWMON_F_INPUT | HWMON_F_DIV, HWMON_F_INPUT | HWMON_F_DIV,
+ HWMON_F_INPUT | HWMON_F_DIV, HWMON_F_INPUT | HWMON_F_DIV,
+ HWMON_F_INPUT | HWMON_F_DIV, HWMON_F_INPUT | HWMON_F_DIV,
+ HWMON_F_INPUT | HWMON_F_DIV, HWMON_F_INPUT | HWMON_F_DIV,
+ HWMON_F_INPUT | HWMON_F_DIV, HWMON_F_INPUT | HWMON_F_DIV),
+ NULL
+};
+
+static const struct hwmon_chip_info aspeed_tach_chip_info = {
+ .ops = &aspeed_tach_ops,
+ .info = aspeed_tach_info,
+};
+
+static void aspeed_present_fan_tach(struct aspeed_pwm_tach_data *priv, u8 *tach_ch, int count)
+{
+ u8 ch, index;
+ u32 val;
+
+ for (index = 0; index < count; index++) {
+ ch = tach_ch[index];
+ priv->tach_present[ch] = true;
+ priv->tach_divisor = DEFAULT_TACH_DIV;
+
+ val = readl(priv->base + TACH_ASPEED_CTRL(ch));
+ val &= ~(TACH_ASPEED_INVERS_LIMIT | TACH_ASPEED_DEBOUNCE_MASK |
+ TACH_ASPEED_IO_EDGE_MASK | TACH_ASPEED_CLK_DIV_T_MASK |
+ TACH_ASPEED_THRESHOLD_MASK);
+ val |= (DEBOUNCE_3_CLK << TACH_ASPEED_DEBOUNCE_BIT) |
+ F2F_EDGES |
+ FIELD_PREP(TACH_ASPEED_CLK_DIV_T_MASK,
+ DIV_TO_REG(priv->tach_divisor));
+ writel(val, priv->base + TACH_ASPEED_CTRL(ch));
+
+ aspeed_tach_ch_enable(priv, ch, true);
+ }
+}
+
+static int aspeed_create_fan_monitor(struct device *dev,
+ struct device_node *child,
+ struct aspeed_pwm_tach_data *priv)
+{
+ int ret, count;
+ u8 *tach_ch;
+
+ count = of_property_count_u8_elems(child, "tach-ch");
+ if (count < 1)
+ return -EINVAL;
+ tach_ch = devm_kcalloc(dev, count, sizeof(*tach_ch), GFP_KERNEL);
+ if (!tach_ch)
+ return -ENOMEM;
+ ret = of_property_read_u8_array(child, "tach-ch", tach_ch, count);
+ if (ret)
+ return ret;
+
+ aspeed_present_fan_tach(priv, tach_ch, count);
+
+ return 0;
+}
+
+static void aspeed_pwm_tach_reset_assert(void *data)
+{
+ struct reset_control *rst = data;
+
+ reset_control_assert(rst);
+}
+
+static int aspeed_pwm_tach_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev, *hwmon;
+ int ret;
+ struct device_node *child;
+ struct aspeed_pwm_tach_data *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
+ "Couldn't get clock\n");
+ priv->clk_rate = clk_get_rate(priv->clk);
+ priv->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->reset))
+ return dev_err_probe(dev, PTR_ERR(priv->reset),
+ "Couldn't get reset control\n");
+
+ ret = reset_control_deassert(priv->reset);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Couldn't deassert reset control\n");
+ ret = devm_add_action_or_reset(dev, aspeed_pwm_tach_reset_assert,
+ priv->reset);
+ if (ret)
+ return ret;
+
+ priv->chip.dev = dev;
+ priv->chip.ops = &aspeed_pwm_ops;
+ priv->chip.npwm = PWM_ASPEED_NR_PWMS;
+
+ ret = devm_pwmchip_add(dev, &priv->chip);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
+
+ for_each_child_of_node(dev->of_node, child) {
+ ret = aspeed_create_fan_monitor(dev, child, priv);
+ if (ret) {
+ of_node_put(child);
+ dev_warn(dev, "Failed to create fan %d", ret);
+ return 0;
+ }
+ }
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "aspeed_tach", priv,
+ &aspeed_tach_chip_info, NULL);
+ ret = PTR_ERR_OR_ZERO(hwmon);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register hwmon device\n");
+
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
+
+ return 0;
+}
+
+static int aspeed_pwm_tach_remove(struct platform_device *pdev)
+{
+ struct aspeed_pwm_tach_data *priv = platform_get_drvdata(pdev);
+
+ reset_control_assert(priv->reset);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_pwm_tach_match[] = {
+ {
+ .compatible = "aspeed,ast2600-pwm-tach",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, aspeed_pwm_tach_match);
+
+static struct platform_driver aspeed_pwm_tach_driver = {
+ .probe = aspeed_pwm_tach_probe,
+ .remove = aspeed_pwm_tach_remove,
+ .driver = {
+ .name = "aspeed-g6-pwm-tach",
+ .of_match_table = aspeed_pwm_tach_match,
+ },
+};
+
+module_platform_driver(aspeed_pwm_tach_driver);
+
+MODULE_AUTHOR("Billy Tsai <billy_tsai@aspeedtech.com>");
+MODULE_DESCRIPTION("Aspeed ast2600 PWM and Fan Tach device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/asus_rog_ryujin.c b/drivers/hwmon/asus_rog_ryujin.c
new file mode 100644
index 000000000000..f8b20346a995
--- /dev/null
+++ b/drivers/hwmon/asus_rog_ryujin.c
@@ -0,0 +1,609 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * hwmon driver for Asus ROG Ryujin II 360 AIO cooler.
+ *
+ * Copyright 2024 Aleksa Savic <savicaleksa83@gmail.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/hid.h>
+#include <linux/hwmon.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/unaligned.h>
+
+#define DRIVER_NAME "asus_rog_ryujin"
+
+#define USB_VENDOR_ID_ASUS_ROG 0x0b05
+#define USB_PRODUCT_ID_RYUJIN_AIO 0x1988 /* ASUS ROG RYUJIN II 360 */
+
+#define STATUS_VALIDITY 1500 /* ms */
+#define MAX_REPORT_LENGTH 65
+
+/* Cooler status report offsets */
+#define RYUJIN_TEMP_SENSOR_1 3
+#define RYUJIN_TEMP_SENSOR_2 4
+#define RYUJIN_PUMP_SPEED 5
+#define RYUJIN_INTERNAL_FAN_SPEED 7
+
+/* Cooler duty report offsets */
+#define RYUJIN_PUMP_DUTY 4
+#define RYUJIN_INTERNAL_FAN_DUTY 5
+
+/* Controller status (speeds) report offsets */
+#define RYUJIN_CONTROLLER_SPEED_1 5
+#define RYUJIN_CONTROLLER_SPEED_2 7
+#define RYUJIN_CONTROLLER_SPEED_3 9
+#define RYUJIN_CONTROLLER_SPEED_4 3
+
+/* Controller duty report offsets */
+#define RYUJIN_CONTROLLER_DUTY 4
+
+/* Control commands and their inner offsets */
+#define RYUJIN_CMD_PREFIX 0xEC
+
+static const u8 get_cooler_status_cmd[] = { RYUJIN_CMD_PREFIX, 0x99 };
+static const u8 get_cooler_duty_cmd[] = { RYUJIN_CMD_PREFIX, 0x9A };
+static const u8 get_controller_speed_cmd[] = { RYUJIN_CMD_PREFIX, 0xA0 };
+static const u8 get_controller_duty_cmd[] = { RYUJIN_CMD_PREFIX, 0xA1 };
+
+#define RYUJIN_SET_COOLER_PUMP_DUTY_OFFSET 3
+#define RYUJIN_SET_COOLER_FAN_DUTY_OFFSET 4
+static const u8 set_cooler_duty_cmd[] = { RYUJIN_CMD_PREFIX, 0x1A, 0x00, 0x00, 0x00 };
+
+#define RYUJIN_SET_CONTROLLER_FAN_DUTY_OFFSET 4
+static const u8 set_controller_duty_cmd[] = { RYUJIN_CMD_PREFIX, 0x21, 0x00, 0x00, 0x00 };
+
+/* Command lengths */
+#define GET_CMD_LENGTH 2 /* Same length for all get commands */
+#define SET_CMD_LENGTH 5 /* Same length for all set commands */
+
+/* Command response headers */
+#define RYUJIN_GET_COOLER_STATUS_CMD_RESPONSE 0x19
+#define RYUJIN_GET_COOLER_DUTY_CMD_RESPONSE 0x1A
+#define RYUJIN_GET_CONTROLLER_SPEED_CMD_RESPONSE 0x20
+#define RYUJIN_GET_CONTROLLER_DUTY_CMD_RESPONSE 0x21
+
+static const char *const rog_ryujin_temp_label[] = {
+ "Coolant temp"
+};
+
+static const char *const rog_ryujin_speed_label[] = {
+ "Pump speed",
+ "Internal fan speed",
+ "Controller fan 1 speed",
+ "Controller fan 2 speed",
+ "Controller fan 3 speed",
+ "Controller fan 4 speed",
+};
+
+struct rog_ryujin_data {
+ struct hid_device *hdev;
+ struct device *hwmon_dev;
+ /* For locking access to buffer */
+ struct mutex buffer_lock;
+ /* For queueing multiple readers */
+ struct mutex status_report_request_mutex;
+ /* For reinitializing the completions below */
+ spinlock_t status_report_request_lock;
+ struct completion cooler_status_received;
+ struct completion controller_status_received;
+ struct completion cooler_duty_received;
+ struct completion controller_duty_received;
+ struct completion cooler_duty_set;
+ struct completion controller_duty_set;
+
+ /* Sensor data */
+ s32 temp_input[1];
+ u16 speed_input[6]; /* Pump, internal fan and four controller fan speeds in RPM */
+ u8 duty_input[3]; /* Pump, internal fan and controller fan duty in PWM */
+
+ u8 *buffer;
+ unsigned long updated; /* jiffies */
+};
+
+static int rog_ryujin_percent_to_pwm(u16 val)
+{
+ return DIV_ROUND_CLOSEST(val * 255, 100);
+}
+
+static int rog_ryujin_pwm_to_percent(long val)
+{
+ return DIV_ROUND_CLOSEST(val * 100, 255);
+}
+
+static umode_t rog_ryujin_is_visible(const void *data,
+ enum hwmon_sensor_types type, u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_label:
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_label:
+ case hwmon_fan_input:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* Writes the command to the device with the rest of the report filled with zeroes */
+static int rog_ryujin_write_expanded(struct rog_ryujin_data *priv, const u8 *cmd, int cmd_length)
+{
+ int ret;
+
+ mutex_lock(&priv->buffer_lock);
+
+ memcpy_and_pad(priv->buffer, MAX_REPORT_LENGTH, cmd, cmd_length, 0x00);
+ ret = hid_hw_output_report(priv->hdev, priv->buffer, MAX_REPORT_LENGTH);
+
+ mutex_unlock(&priv->buffer_lock);
+ return ret;
+}
+
+/* Assumes priv->status_report_request_mutex is locked */
+static int rog_ryujin_execute_cmd(struct rog_ryujin_data *priv, const u8 *cmd, int cmd_length,
+ struct completion *status_completion)
+{
+ int ret;
+
+ /*
+ * Disable raw event parsing for a moment to safely reinitialize the
+ * completion. Reinit is done because hidraw could have triggered
+ * the raw event parsing and marked the passed in completion as done.
+ */
+ spin_lock_bh(&priv->status_report_request_lock);
+ reinit_completion(status_completion);
+ spin_unlock_bh(&priv->status_report_request_lock);
+
+ /* Send command for getting data */
+ ret = rog_ryujin_write_expanded(priv, cmd, cmd_length);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_interruptible_timeout(status_completion,
+ msecs_to_jiffies(STATUS_VALIDITY));
+ if (ret == 0)
+ return -ETIMEDOUT;
+ else if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rog_ryujin_get_status(struct rog_ryujin_data *priv)
+{
+ int ret = mutex_lock_interruptible(&priv->status_report_request_mutex);
+
+ if (ret < 0)
+ return ret;
+
+ if (!time_after(jiffies, priv->updated + msecs_to_jiffies(STATUS_VALIDITY))) {
+ /* Data is up to date */
+ goto unlock_and_return;
+ }
+
+ /* Retrieve cooler status */
+ ret =
+ rog_ryujin_execute_cmd(priv, get_cooler_status_cmd, GET_CMD_LENGTH,
+ &priv->cooler_status_received);
+ if (ret < 0)
+ goto unlock_and_return;
+
+ /* Retrieve controller status (speeds) */
+ ret =
+ rog_ryujin_execute_cmd(priv, get_controller_speed_cmd, GET_CMD_LENGTH,
+ &priv->controller_status_received);
+ if (ret < 0)
+ goto unlock_and_return;
+
+ /* Retrieve cooler duty */
+ ret =
+ rog_ryujin_execute_cmd(priv, get_cooler_duty_cmd, GET_CMD_LENGTH,
+ &priv->cooler_duty_received);
+ if (ret < 0)
+ goto unlock_and_return;
+
+ /* Retrieve controller duty */
+ ret =
+ rog_ryujin_execute_cmd(priv, get_controller_duty_cmd, GET_CMD_LENGTH,
+ &priv->controller_duty_received);
+ if (ret < 0)
+ goto unlock_and_return;
+
+ priv->updated = jiffies;
+
+unlock_and_return:
+ mutex_unlock(&priv->status_report_request_mutex);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rog_ryujin_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct rog_ryujin_data *priv = dev_get_drvdata(dev);
+ int ret = rog_ryujin_get_status(priv);
+
+ if (ret < 0)
+ return ret;
+
+ switch (type) {
+ case hwmon_temp:
+ *val = priv->temp_input[channel];
+ break;
+ case hwmon_fan:
+ *val = priv->speed_input[channel];
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = priv->duty_input[channel];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP; /* unreachable */
+ }
+
+ return 0;
+}
+
+static int rog_ryujin_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_temp:
+ *str = rog_ryujin_temp_label[channel];
+ break;
+ case hwmon_fan:
+ *str = rog_ryujin_speed_label[channel];
+ break;
+ default:
+ return -EOPNOTSUPP; /* unreachable */
+ }
+
+ return 0;
+}
+
+static int rog_ryujin_write_fixed_duty(struct rog_ryujin_data *priv, int channel, int val)
+{
+ u8 set_cmd[SET_CMD_LENGTH];
+ int ret;
+
+ if (channel < 2) {
+ /*
+ * Retrieve cooler duty since both pump and internal fan are set
+ * together, then write back with one of them modified.
+ */
+ ret = mutex_lock_interruptible(&priv->status_report_request_mutex);
+ if (ret < 0)
+ return ret;
+ ret =
+ rog_ryujin_execute_cmd(priv, get_cooler_duty_cmd, GET_CMD_LENGTH,
+ &priv->cooler_duty_received);
+ if (ret < 0)
+ goto unlock_and_return;
+
+ memcpy(set_cmd, set_cooler_duty_cmd, SET_CMD_LENGTH);
+
+ /* Cooler duties are set as 0-100% */
+ val = rog_ryujin_pwm_to_percent(val);
+
+ if (channel == 0) {
+ /* Cooler pump duty */
+ set_cmd[RYUJIN_SET_COOLER_PUMP_DUTY_OFFSET] = val;
+ set_cmd[RYUJIN_SET_COOLER_FAN_DUTY_OFFSET] =
+ rog_ryujin_pwm_to_percent(priv->duty_input[1]);
+ } else if (channel == 1) {
+ /* Cooler internal fan duty */
+ set_cmd[RYUJIN_SET_COOLER_PUMP_DUTY_OFFSET] =
+ rog_ryujin_pwm_to_percent(priv->duty_input[0]);
+ set_cmd[RYUJIN_SET_COOLER_FAN_DUTY_OFFSET] = val;
+ }
+
+ ret = rog_ryujin_execute_cmd(priv, set_cmd, SET_CMD_LENGTH, &priv->cooler_duty_set);
+unlock_and_return:
+ mutex_unlock(&priv->status_report_request_mutex);
+ if (ret < 0)
+ return ret;
+ } else {
+ /*
+ * Controller fan duty (channel == 2). No need to retrieve current
+ * duty, so just send the command.
+ */
+ memcpy(set_cmd, set_controller_duty_cmd, SET_CMD_LENGTH);
+ set_cmd[RYUJIN_SET_CONTROLLER_FAN_DUTY_OFFSET] = val;
+
+ ret =
+ rog_ryujin_execute_cmd(priv, set_cmd, SET_CMD_LENGTH,
+ &priv->controller_duty_set);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Lock onto this value until next refresh cycle */
+ priv->duty_input[channel] = val;
+
+ return 0;
+}
+
+static int rog_ryujin_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long val)
+{
+ struct rog_ryujin_data *priv = dev_get_drvdata(dev);
+ int ret;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ ret = rog_ryujin_write_fixed_duty(priv, channel, val);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops rog_ryujin_hwmon_ops = {
+ .is_visible = rog_ryujin_is_visible,
+ .read = rog_ryujin_read,
+ .read_string = rog_ryujin_read_string,
+ .write = rog_ryujin_write
+};
+
+static const struct hwmon_channel_info *rog_ryujin_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info rog_ryujin_chip_info = {
+ .ops = &rog_ryujin_hwmon_ops,
+ .info = rog_ryujin_info,
+};
+
+static int rog_ryujin_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data,
+ int size)
+{
+ struct rog_ryujin_data *priv = hid_get_drvdata(hdev);
+
+ if (data[0] != RYUJIN_CMD_PREFIX)
+ return 0;
+
+ if (data[1] == RYUJIN_GET_COOLER_STATUS_CMD_RESPONSE) {
+ /* Received coolant temp and speeds of pump and internal fan */
+ priv->temp_input[0] =
+ data[RYUJIN_TEMP_SENSOR_1] * 1000 + data[RYUJIN_TEMP_SENSOR_2] * 100;
+ priv->speed_input[0] = get_unaligned_le16(data + RYUJIN_PUMP_SPEED);
+ priv->speed_input[1] = get_unaligned_le16(data + RYUJIN_INTERNAL_FAN_SPEED);
+
+ if (!completion_done(&priv->cooler_status_received))
+ complete_all(&priv->cooler_status_received);
+ } else if (data[1] == RYUJIN_GET_CONTROLLER_SPEED_CMD_RESPONSE) {
+ /* Received speeds of four fans attached to the controller */
+ priv->speed_input[2] = get_unaligned_le16(data + RYUJIN_CONTROLLER_SPEED_1);
+ priv->speed_input[3] = get_unaligned_le16(data + RYUJIN_CONTROLLER_SPEED_2);
+ priv->speed_input[4] = get_unaligned_le16(data + RYUJIN_CONTROLLER_SPEED_3);
+ priv->speed_input[5] = get_unaligned_le16(data + RYUJIN_CONTROLLER_SPEED_4);
+
+ if (!completion_done(&priv->controller_status_received))
+ complete_all(&priv->controller_status_received);
+ } else if (data[1] == RYUJIN_GET_COOLER_DUTY_CMD_RESPONSE) {
+ /* Received report for pump and internal fan duties (in %) */
+ if (data[RYUJIN_PUMP_DUTY] == 0 && data[RYUJIN_INTERNAL_FAN_DUTY] == 0) {
+ /*
+ * We received a report with zeroes for duty in both places.
+ * The device returns this as a confirmation that setting values
+ * is successful. If we initiated a write, mark it as complete.
+ */
+ if (!completion_done(&priv->cooler_duty_set))
+ complete_all(&priv->cooler_duty_set);
+ else if (!completion_done(&priv->cooler_duty_received))
+ /*
+ * We didn't initiate a write, but received both zeroes.
+ * This means that either both duties are actually zero,
+ * or that we received a success report caused by userspace.
+ * We're expecting a report, so parse it.
+ */
+ goto read_cooler_duty;
+ return 0;
+ }
+read_cooler_duty:
+ priv->duty_input[0] = rog_ryujin_percent_to_pwm(data[RYUJIN_PUMP_DUTY]);
+ priv->duty_input[1] = rog_ryujin_percent_to_pwm(data[RYUJIN_INTERNAL_FAN_DUTY]);
+
+ if (!completion_done(&priv->cooler_duty_received))
+ complete_all(&priv->cooler_duty_received);
+ } else if (data[1] == RYUJIN_GET_CONTROLLER_DUTY_CMD_RESPONSE) {
+ /* Received report for controller duty for fans (in PWM) */
+ if (data[RYUJIN_CONTROLLER_DUTY] == 0) {
+ /*
+ * We received a report with a zero for duty. The device returns this as
+ * a confirmation that setting the controller duty value was successful.
+ * If we initiated a write, mark it as complete.
+ */
+ if (!completion_done(&priv->controller_duty_set))
+ complete_all(&priv->controller_duty_set);
+ else if (!completion_done(&priv->controller_duty_received))
+ /*
+ * We didn't initiate a write, but received a zero for duty.
+ * This means that either the duty is actually zero, or that
+ * we received a success report caused by userspace.
+ * We're expecting a report, so parse it.
+ */
+ goto read_controller_duty;
+ return 0;
+ }
+read_controller_duty:
+ priv->duty_input[2] = data[RYUJIN_CONTROLLER_DUTY];
+
+ if (!completion_done(&priv->controller_duty_received))
+ complete_all(&priv->controller_duty_received);
+ }
+
+ return 0;
+}
+
+static int rog_ryujin_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct rog_ryujin_data *priv;
+ int ret;
+
+ priv = devm_kzalloc(&hdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->hdev = hdev;
+ hid_set_drvdata(hdev, priv);
+
+ /*
+ * Initialize priv->updated to STATUS_VALIDITY seconds in the past, making
+ * the initial empty data invalid for rog_ryujin_read() without the need for
+ * a special case there.
+ */
+ priv->updated = jiffies - msecs_to_jiffies(STATUS_VALIDITY);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "hid parse failed with %d\n", ret);
+ return ret;
+ }
+
+ /* Enable hidraw so existing user-space tools can continue to work */
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "hid hw start failed with %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "hid hw open failed with %d\n", ret);
+ goto fail_and_stop;
+ }
+
+ priv->buffer = devm_kzalloc(&hdev->dev, MAX_REPORT_LENGTH, GFP_KERNEL);
+ if (!priv->buffer) {
+ ret = -ENOMEM;
+ goto fail_and_close;
+ }
+
+ mutex_init(&priv->status_report_request_mutex);
+ mutex_init(&priv->buffer_lock);
+ spin_lock_init(&priv->status_report_request_lock);
+ init_completion(&priv->cooler_status_received);
+ init_completion(&priv->controller_status_received);
+ init_completion(&priv->cooler_duty_received);
+ init_completion(&priv->controller_duty_received);
+ init_completion(&priv->cooler_duty_set);
+ init_completion(&priv->controller_duty_set);
+
+ priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "rog_ryujin",
+ priv, &rog_ryujin_chip_info, NULL);
+ if (IS_ERR(priv->hwmon_dev)) {
+ ret = PTR_ERR(priv->hwmon_dev);
+ hid_err(hdev, "hwmon registration failed with %d\n", ret);
+ goto fail_and_close;
+ }
+
+ return 0;
+
+fail_and_close:
+ hid_hw_close(hdev);
+fail_and_stop:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static void rog_ryujin_remove(struct hid_device *hdev)
+{
+ struct rog_ryujin_data *priv = hid_get_drvdata(hdev);
+
+ hwmon_device_unregister(priv->hwmon_dev);
+
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id rog_ryujin_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS_ROG, USB_PRODUCT_ID_RYUJIN_AIO) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, rog_ryujin_table);
+
+static struct hid_driver rog_ryujin_driver = {
+ .name = "rog_ryujin",
+ .id_table = rog_ryujin_table,
+ .probe = rog_ryujin_probe,
+ .remove = rog_ryujin_remove,
+ .raw_event = rog_ryujin_raw_event,
+};
+
+static int __init rog_ryujin_init(void)
+{
+ return hid_register_driver(&rog_ryujin_driver);
+}
+
+static void __exit rog_ryujin_exit(void)
+{
+ hid_unregister_driver(&rog_ryujin_driver);
+}
+
+/* When compiled into the kernel, initialize after the HID bus */
+late_initcall(rog_ryujin_init);
+module_exit(rog_ryujin_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Aleksa Savic <savicaleksa83@gmail.com>");
+MODULE_DESCRIPTION("Hwmon driver for Asus ROG Ryujin II 360 AIO cooler");
diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c
index 19b9bf3d75ef..35c862eb158b 100644
--- a/drivers/hwmon/axi-fan-control.c
+++ b/drivers/hwmon/axi-fan-control.c
@@ -13,8 +13,9 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
/* register map */
#define ADI_REG_RSTN 0x0080
@@ -83,7 +84,7 @@ static ssize_t axi_fan_control_show(struct device *dev, struct device_attribute
temp = DIV_ROUND_CLOSEST_ULL(temp * 509314ULL, 65535) - 280230;
- return sprintf(buf, "%u\n", temp);
+ return sysfs_emit(buf, "%u\n", temp);
}
static ssize_t axi_fan_control_store(struct device *dev, struct device_attribute *da,
@@ -368,12 +369,12 @@ static irqreturn_t axi_fan_control_irq_handler(int irq, void *data)
}
static int axi_fan_control_init(struct axi_fan_control_data *ctl,
- const struct device_node *np)
+ const struct device *dev)
{
int ret;
/* get fan pulses per revolution */
- ret = of_property_read_u32(np, "pulses-per-revolution", &ctl->ppr);
+ ret = device_property_read_u32(dev, "pulses-per-revolution", &ctl->ppr);
if (ret)
return ret;
@@ -443,25 +444,16 @@ static struct attribute *axi_fan_control_attrs[] = {
};
ATTRIBUTE_GROUPS(axi_fan_control);
-static const u32 version_1_0_0 = ADI_AXI_PCORE_VER(1, 0, 'a');
-
-static const struct of_device_id axi_fan_control_of_match[] = {
- { .compatible = "adi,axi-fan-control-1.00.a",
- .data = (void *)&version_1_0_0},
- {},
-};
-MODULE_DEVICE_TABLE(of, axi_fan_control_of_match);
-
static int axi_fan_control_probe(struct platform_device *pdev)
{
struct axi_fan_control_data *ctl;
struct clk *clk;
- const struct of_device_id *id;
+ const unsigned int *id;
const char *name = "axi_fan_control";
u32 version;
int ret;
- id = of_match_node(axi_fan_control_of_match, pdev->dev.of_node);
+ id = device_get_match_data(&pdev->dev);
if (!id)
return -EINVAL;
@@ -474,10 +466,9 @@ static int axi_fan_control_probe(struct platform_device *pdev)
return PTR_ERR(ctl->base);
clk = devm_clk_get_enabled(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "clk_get failed with %ld\n", PTR_ERR(clk));
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "clk_get failed\n");
ctl->clk_rate = clk_get_rate(clk);
if (!ctl->clk_rate)
@@ -485,22 +476,20 @@ static int axi_fan_control_probe(struct platform_device *pdev)
version = axi_ioread(ADI_AXI_REG_VERSION, ctl);
if (ADI_AXI_PCORE_VER_MAJOR(version) !=
- ADI_AXI_PCORE_VER_MAJOR((*(u32 *)id->data))) {
- dev_err(&pdev->dev, "Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
- ADI_AXI_PCORE_VER_MAJOR((*(u32 *)id->data)),
- ADI_AXI_PCORE_VER_MINOR((*(u32 *)id->data)),
- ADI_AXI_PCORE_VER_PATCH((*(u32 *)id->data)),
- ADI_AXI_PCORE_VER_MAJOR(version),
- ADI_AXI_PCORE_VER_MINOR(version),
- ADI_AXI_PCORE_VER_PATCH(version));
- return -ENODEV;
- }
-
- ret = axi_fan_control_init(ctl, pdev->dev.of_node);
- if (ret) {
- dev_err(&pdev->dev, "Failed to initialize device\n");
- return ret;
- }
+ ADI_AXI_PCORE_VER_MAJOR((*id)))
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
+ ADI_AXI_PCORE_VER_MAJOR(*id),
+ ADI_AXI_PCORE_VER_MINOR(*id),
+ ADI_AXI_PCORE_VER_PATCH(*id),
+ ADI_AXI_PCORE_VER_MAJOR(version),
+ ADI_AXI_PCORE_VER_MINOR(version),
+ ADI_AXI_PCORE_VER_PATCH(version));
+
+ ret = axi_fan_control_init(ctl, &pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to initialize device\n");
ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
name,
@@ -519,14 +508,22 @@ static int axi_fan_control_probe(struct platform_device *pdev)
axi_fan_control_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
pdev->driver_override, ctl);
- if (ret) {
- dev_err(&pdev->dev, "failed to request an irq, %d", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to request an irq\n");
return 0;
}
+static const u32 version_1_0_0 = ADI_AXI_PCORE_VER(1, 0, 'a');
+
+static const struct of_device_id axi_fan_control_of_match[] = {
+ { .compatible = "adi,axi-fan-control-1.00.a",
+ .data = (void *)&version_1_0_0},
+ {},
+};
+MODULE_DEVICE_TABLE(of, axi_fan_control_of_match);
+
static struct platform_driver axi_fan_control_driver = {
.driver = {
.name = "axi_fan_control_driver",
diff --git a/drivers/hwmon/chipcap2.c b/drivers/hwmon/chipcap2.c
new file mode 100644
index 000000000000..6ccceae21f70
--- /dev/null
+++ b/drivers/hwmon/chipcap2.c
@@ -0,0 +1,822 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * cc2.c - Support for the Amphenol ChipCap 2 relative humidity, temperature sensor
+ *
+ * Part numbers supported:
+ * CC2D23, CC2D23S, CC2D25, CC2D25S, CC2D33, CC2D33S, CC2D35, CC2D35S
+ *
+ * Author: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+ *
+ * Datasheet and application notes:
+ * https://www.amphenol-sensors.com/en/telaire/humidity/527-humidity-sensors/3095-chipcap-2
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#define CC2_START_CM 0xA0
+#define CC2_START_NOM 0x80
+#define CC2_R_ALARM_H_ON 0x18
+#define CC2_R_ALARM_H_OFF 0x19
+#define CC2_R_ALARM_L_ON 0x1A
+#define CC2_R_ALARM_L_OFF 0x1B
+#define CC2_RW_OFFSET 0x40
+#define CC2_W_ALARM_H_ON (CC2_R_ALARM_H_ON + CC2_RW_OFFSET)
+#define CC2_W_ALARM_H_OFF (CC2_R_ALARM_H_OFF + CC2_RW_OFFSET)
+#define CC2_W_ALARM_L_ON (CC2_R_ALARM_L_ON + CC2_RW_OFFSET)
+#define CC2_W_ALARM_L_OFF (CC2_R_ALARM_L_OFF + CC2_RW_OFFSET)
+
+#define CC2_STATUS_FIELD GENMASK(7, 6)
+#define CC2_STATUS_VALID_DATA 0x00
+#define CC2_STATUS_STALE_DATA 0x01
+#define CC2_STATUS_CMD_MODE 0x02
+
+#define CC2_RESPONSE_FIELD GENMASK(1, 0)
+#define CC2_RESPONSE_BUSY 0x00
+#define CC2_RESPONSE_ACK 0x01
+#define CC2_RESPONSE_NACK 0x02
+
+#define CC2_ERR_CORR_EEPROM BIT(2)
+#define CC2_ERR_UNCORR_EEPROM BIT(3)
+#define CC2_ERR_RAM_PARITY BIT(4)
+#define CC2_ERR_CONFIG_LOAD BIT(5)
+
+#define CC2_EEPROM_SIZE 10
+#define CC2_EEPROM_DATA_LEN 3
+#define CC2_MEASUREMENT_DATA_LEN 4
+
+#define CC2_RH_DATA_FIELD GENMASK(13, 0)
+
+/* ensure clean off -> on transitions */
+#define CC2_POWER_CYCLE_MS 80
+
+#define CC2_STARTUP_TO_DATA_MS 55
+#define CC2_RESP_START_CM_US 100
+#define CC2_RESP_EEPROM_R_US 100
+#define CC2_RESP_EEPROM_W_MS 12
+#define CC2_STARTUP_TIME_US 1250
+
+#define CC2_RH_MAX (100 * 1000U)
+
+#define CC2_CM_RETRIES 5
+
+struct cc2_rh_alarm_info {
+ bool low_alarm;
+ bool high_alarm;
+ bool low_alarm_visible;
+ bool high_alarm_visible;
+};
+
+struct cc2_data {
+ struct cc2_rh_alarm_info rh_alarm;
+ struct completion complete;
+ struct device *hwmon;
+ struct i2c_client *client;
+ struct mutex dev_access_lock; /* device access lock */
+ struct regulator *regulator;
+ const char *name;
+ int irq_ready;
+ int irq_low;
+ int irq_high;
+ bool process_irqs;
+};
+
+enum cc2_chan_addr {
+ CC2_CHAN_TEMP = 0,
+ CC2_CHAN_HUMIDITY,
+};
+
+/* %RH as a per cent mille from a register value */
+static long cc2_rh_convert(u16 data)
+{
+ unsigned long tmp = (data & CC2_RH_DATA_FIELD) * CC2_RH_MAX;
+
+ return tmp / ((1 << 14) - 1);
+}
+
+/* convert %RH to a register value */
+static u16 cc2_rh_to_reg(long data)
+{
+ return data * ((1 << 14) - 1) / CC2_RH_MAX;
+}
+
+/* temperature in milli degrees celsius from a register value */
+static long cc2_temp_convert(u16 data)
+{
+ unsigned long tmp = ((data >> 2) * 165 * 1000U) / ((1 << 14) - 1);
+
+ return tmp - 40 * 1000U;
+}
+
+static int cc2_enable(struct cc2_data *data)
+{
+ int ret;
+
+ /* exclusive regulator, check in case a disable failed */
+ if (regulator_is_enabled(data->regulator))
+ return 0;
+
+ /* clear any pending completion */
+ try_wait_for_completion(&data->complete);
+
+ ret = regulator_enable(data->regulator);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(CC2_STARTUP_TIME_US, CC2_STARTUP_TIME_US + 125);
+
+ data->process_irqs = true;
+
+ return 0;
+}
+
+static void cc2_disable(struct cc2_data *data)
+{
+ int err;
+
+ /* ignore alarms triggered by voltage toggling when powering up */
+ data->process_irqs = false;
+
+ /* exclusive regulator, check in case an enable failed */
+ if (regulator_is_enabled(data->regulator)) {
+ err = regulator_disable(data->regulator);
+ if (err)
+ dev_dbg(&data->client->dev, "Failed to disable device");
+ }
+}
+
+static int cc2_cmd_response_diagnostic(struct device *dev, u8 status)
+{
+ int resp;
+
+ if (FIELD_GET(CC2_STATUS_FIELD, status) != CC2_STATUS_CMD_MODE) {
+ dev_dbg(dev, "Command sent out of command window\n");
+ return -ETIMEDOUT;
+ }
+
+ resp = FIELD_GET(CC2_RESPONSE_FIELD, status);
+ switch (resp) {
+ case CC2_RESPONSE_ACK:
+ return 0;
+ case CC2_RESPONSE_BUSY:
+ return -EBUSY;
+ case CC2_RESPONSE_NACK:
+ if (resp & CC2_ERR_CORR_EEPROM)
+ dev_dbg(dev, "Command failed: corrected EEPROM\n");
+ if (resp & CC2_ERR_UNCORR_EEPROM)
+ dev_dbg(dev, "Command failed: uncorrected EEPROM\n");
+ if (resp & CC2_ERR_RAM_PARITY)
+ dev_dbg(dev, "Command failed: RAM parity\n");
+ if (resp & CC2_ERR_RAM_PARITY)
+ dev_dbg(dev, "Command failed: configuration error\n");
+ return -ENODATA;
+ default:
+ dev_dbg(dev, "Unknown command reply\n");
+ return -EINVAL;
+ }
+}
+
+static int cc2_read_command_status(struct i2c_client *client)
+{
+ u8 status;
+ int ret;
+
+ ret = i2c_master_recv(client, &status, 1);
+ if (ret != 1) {
+ ret = ret < 0 ? ret : -EIO;
+ return ret;
+ }
+
+ return cc2_cmd_response_diagnostic(&client->dev, status);
+}
+
+/*
+ * The command mode is only accessible after sending the START_CM command in the
+ * first 10 ms after power-up. Only in case the command window is missed,
+ * CC2_CM_RETRIES retries are attempted before giving up and returning an error.
+ */
+static int cc2_command_mode_start(struct cc2_data *data)
+{
+ unsigned long timeout;
+ int i, ret;
+
+ for (i = 0; i < CC2_CM_RETRIES; i++) {
+ ret = cc2_enable(data);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_word_data(data->client, CC2_START_CM, 0);
+ if (ret < 0)
+ return ret;
+
+ if (data->irq_ready > 0) {
+ timeout = usecs_to_jiffies(2 * CC2_RESP_START_CM_US);
+ ret = wait_for_completion_timeout(&data->complete,
+ timeout);
+ if (!ret)
+ return -ETIMEDOUT;
+ } else {
+ usleep_range(CC2_RESP_START_CM_US,
+ 2 * CC2_RESP_START_CM_US);
+ }
+ ret = cc2_read_command_status(data->client);
+ if (ret != -ETIMEDOUT || i == CC2_CM_RETRIES)
+ break;
+
+ /* command window missed, prepare for a retry */
+ cc2_disable(data);
+ msleep(CC2_POWER_CYCLE_MS);
+ }
+
+ return ret;
+}
+
+/* Sending a Start_NOM command finishes the command mode immediately with no
+ * reply and the device enters normal operation mode
+ */
+static int cc2_command_mode_finish(struct cc2_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_word_data(data->client, CC2_START_NOM, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cc2_write_reg(struct cc2_data *data, u8 reg, u16 val)
+{
+ unsigned long timeout;
+ int ret;
+
+ ret = cc2_command_mode_start(data);
+ if (ret < 0)
+ goto disable;
+
+ cpu_to_be16s(&val);
+ ret = i2c_smbus_write_word_data(data->client, reg, val);
+ if (ret < 0)
+ goto disable;
+
+ if (data->irq_ready > 0) {
+ timeout = msecs_to_jiffies(2 * CC2_RESP_EEPROM_W_MS);
+ ret = wait_for_completion_timeout(&data->complete, timeout);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto disable;
+ }
+ } else {
+ msleep(CC2_RESP_EEPROM_W_MS);
+ }
+
+ ret = cc2_read_command_status(data->client);
+
+disable:
+ cc2_disable(data);
+
+ return ret;
+}
+
+static int cc2_read_reg(struct cc2_data *data, u8 reg, u16 *val)
+{
+ u8 buf[CC2_EEPROM_DATA_LEN];
+ unsigned long timeout;
+ int ret;
+
+ ret = cc2_command_mode_start(data);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_word_data(data->client, reg, 0);
+ if (ret < 0)
+ return ret;
+
+ if (data->irq_ready > 0) {
+ timeout = usecs_to_jiffies(2 * CC2_RESP_EEPROM_R_US);
+ ret = wait_for_completion_timeout(&data->complete, timeout);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ } else {
+ usleep_range(CC2_RESP_EEPROM_R_US, CC2_RESP_EEPROM_R_US + 10);
+ }
+ ret = i2c_master_recv(data->client, buf, CC2_EEPROM_DATA_LEN);
+ if (ret != CC2_EEPROM_DATA_LEN)
+ return ret < 0 ? ret : -EIO;
+
+ *val = be16_to_cpup((__be16 *)&buf[1]);
+
+ return cc2_read_command_status(data->client);
+}
+
+static int cc2_get_reg_val(struct cc2_data *data, u8 reg, long *val)
+{
+ u16 reg_val;
+ int ret;
+
+ ret = cc2_read_reg(data, reg, &reg_val);
+ if (!ret)
+ *val = cc2_rh_convert(reg_val);
+
+ cc2_disable(data);
+
+ return ret;
+}
+
+static int cc2_data_fetch(struct i2c_client *client,
+ enum hwmon_sensor_types type, long *val)
+{
+ u8 data[CC2_MEASUREMENT_DATA_LEN];
+ u8 status;
+ int ret;
+
+ ret = i2c_master_recv(client, data, CC2_MEASUREMENT_DATA_LEN);
+ if (ret != CC2_MEASUREMENT_DATA_LEN) {
+ ret = ret < 0 ? ret : -EIO;
+ return ret;
+ }
+ status = FIELD_GET(CC2_STATUS_FIELD, data[0]);
+ if (status == CC2_STATUS_STALE_DATA)
+ return -EBUSY;
+
+ if (status != CC2_STATUS_VALID_DATA)
+ return -EIO;
+
+ switch (type) {
+ case hwmon_humidity:
+ *val = cc2_rh_convert(be16_to_cpup((__be16 *)&data[0]));
+ break;
+ case hwmon_temp:
+ *val = cc2_temp_convert(be16_to_cpup((__be16 *)&data[2]));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cc2_read_measurement(struct cc2_data *data,
+ enum hwmon_sensor_types type, long *val)
+{
+ unsigned long timeout;
+ int ret;
+
+ if (data->irq_ready > 0) {
+ timeout = msecs_to_jiffies(CC2_STARTUP_TO_DATA_MS * 2);
+ ret = wait_for_completion_timeout(&data->complete, timeout);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ } else {
+ msleep(CC2_STARTUP_TO_DATA_MS);
+ }
+
+ ret = cc2_data_fetch(data->client, type, val);
+
+ return ret;
+}
+
+/*
+ * A measurement requires enabling the device, waiting for the automatic
+ * measurement to finish, reading the measurement data and disabling the device
+ * again.
+ */
+static int cc2_measurement(struct cc2_data *data, enum hwmon_sensor_types type,
+ long *val)
+{
+ int ret;
+
+ ret = cc2_enable(data);
+ if (ret)
+ return ret;
+
+ ret = cc2_read_measurement(data, type, val);
+
+ cc2_disable(data);
+
+ return ret;
+}
+
+/*
+ * In order to check alarm status, the corresponding ALARM_OFF (hysteresis)
+ * register must be read and a new measurement must be carried out to trigger
+ * the alarm signals. Given that the device carries out a measurement after
+ * exiting the command mode, there is no need to force two power-up sequences.
+ * Instead, a NOM command is sent and the device is disabled after the
+ * measurement is read.
+ */
+static int cc2_read_hyst_and_measure(struct cc2_data *data, u8 reg,
+ long *hyst, long *measurement)
+{
+ u16 reg_val;
+ int ret;
+
+ ret = cc2_read_reg(data, reg, &reg_val);
+ if (ret)
+ goto disable;
+
+ *hyst = cc2_rh_convert(reg_val);
+
+ ret = cc2_command_mode_finish(data);
+ if (ret)
+ goto disable;
+
+ ret = cc2_read_measurement(data, hwmon_humidity, measurement);
+
+disable:
+ cc2_disable(data);
+
+ return ret;
+}
+
+static umode_t cc2_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct cc2_data *cc2 = data;
+
+ switch (type) {
+ case hwmon_humidity:
+ switch (attr) {
+ case hwmon_humidity_input:
+ return 0444;
+ case hwmon_humidity_min_alarm:
+ return cc2->rh_alarm.low_alarm_visible ? 0444 : 0;
+ case hwmon_humidity_max_alarm:
+ return cc2->rh_alarm.high_alarm_visible ? 0444 : 0;
+ case hwmon_humidity_min:
+ case hwmon_humidity_min_hyst:
+ return cc2->rh_alarm.low_alarm_visible ? 0644 : 0;
+ case hwmon_humidity_max:
+ case hwmon_humidity_max_hyst:
+ return cc2->rh_alarm.high_alarm_visible ? 0644 : 0;
+ default:
+ return 0;
+ }
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ return 0;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static irqreturn_t cc2_ready_interrupt(int irq, void *data)
+{
+ struct cc2_data *cc2 = data;
+
+ if (cc2->process_irqs)
+ complete(&cc2->complete);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cc2_low_interrupt(int irq, void *data)
+{
+ struct cc2_data *cc2 = data;
+
+ if (cc2->process_irqs) {
+ hwmon_notify_event(cc2->hwmon, hwmon_humidity,
+ hwmon_humidity_min_alarm, CC2_CHAN_HUMIDITY);
+ cc2->rh_alarm.low_alarm = true;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cc2_high_interrupt(int irq, void *data)
+{
+ struct cc2_data *cc2 = data;
+
+ if (cc2->process_irqs) {
+ hwmon_notify_event(cc2->hwmon, hwmon_humidity,
+ hwmon_humidity_max_alarm, CC2_CHAN_HUMIDITY);
+ cc2->rh_alarm.high_alarm = true;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int cc2_humidity_min_alarm_status(struct cc2_data *data, long *val)
+{
+ long measurement, min_hyst;
+ int ret;
+
+ ret = cc2_read_hyst_and_measure(data, CC2_R_ALARM_L_OFF, &min_hyst,
+ &measurement);
+ if (ret < 0)
+ return ret;
+
+ if (data->rh_alarm.low_alarm) {
+ *val = (measurement < min_hyst) ? 1 : 0;
+ data->rh_alarm.low_alarm = *val;
+ } else {
+ *val = 0;
+ }
+
+ return 0;
+}
+
+static int cc2_humidity_max_alarm_status(struct cc2_data *data, long *val)
+{
+ long measurement, max_hyst;
+ int ret;
+
+ ret = cc2_read_hyst_and_measure(data, CC2_R_ALARM_H_OFF, &max_hyst,
+ &measurement);
+ if (ret < 0)
+ return ret;
+
+ if (data->rh_alarm.high_alarm) {
+ *val = (measurement > max_hyst) ? 1 : 0;
+ data->rh_alarm.high_alarm = *val;
+ } else {
+ *val = 0;
+ }
+
+ return 0;
+}
+
+static int cc2_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct cc2_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&data->dev_access_lock);
+
+ switch (type) {
+ case hwmon_temp:
+ ret = cc2_measurement(data, type, val);
+ break;
+ case hwmon_humidity:
+ switch (attr) {
+ case hwmon_humidity_input:
+ ret = cc2_measurement(data, type, val);
+ break;
+ case hwmon_humidity_min:
+ ret = cc2_get_reg_val(data, CC2_R_ALARM_L_ON, val);
+ break;
+ case hwmon_humidity_min_hyst:
+ ret = cc2_get_reg_val(data, CC2_R_ALARM_L_OFF, val);
+ break;
+ case hwmon_humidity_max:
+ ret = cc2_get_reg_val(data, CC2_R_ALARM_H_ON, val);
+ break;
+ case hwmon_humidity_max_hyst:
+ ret = cc2_get_reg_val(data, CC2_R_ALARM_H_OFF, val);
+ break;
+ case hwmon_humidity_min_alarm:
+ ret = cc2_humidity_min_alarm_status(data, val);
+ break;
+ case hwmon_humidity_max_alarm:
+ ret = cc2_humidity_max_alarm_status(data, val);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ mutex_unlock(&data->dev_access_lock);
+
+ return ret;
+}
+
+static int cc2_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct cc2_data *data = dev_get_drvdata(dev);
+ int ret;
+ u16 arg;
+ u8 cmd;
+
+ if (type != hwmon_humidity)
+ return -EOPNOTSUPP;
+
+ if (val < 0 || val > CC2_RH_MAX)
+ return -EINVAL;
+
+ mutex_lock(&data->dev_access_lock);
+
+ switch (attr) {
+ case hwmon_humidity_min:
+ cmd = CC2_W_ALARM_L_ON;
+ arg = cc2_rh_to_reg(val);
+ ret = cc2_write_reg(data, cmd, arg);
+ break;
+
+ case hwmon_humidity_min_hyst:
+ cmd = CC2_W_ALARM_L_OFF;
+ arg = cc2_rh_to_reg(val);
+ ret = cc2_write_reg(data, cmd, arg);
+ break;
+
+ case hwmon_humidity_max:
+ cmd = CC2_W_ALARM_H_ON;
+ arg = cc2_rh_to_reg(val);
+ ret = cc2_write_reg(data, cmd, arg);
+ break;
+
+ case hwmon_humidity_max_hyst:
+ cmd = CC2_W_ALARM_H_OFF;
+ arg = cc2_rh_to_reg(val);
+ ret = cc2_write_reg(data, cmd, arg);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&data->dev_access_lock);
+
+ return ret;
+}
+
+static int cc2_request_ready_irq(struct cc2_data *data, struct device *dev)
+{
+ int ret = 0;
+
+ data->irq_ready = fwnode_irq_get_byname(dev_fwnode(dev), "ready");
+ if (data->irq_ready > 0) {
+ init_completion(&data->complete);
+ ret = devm_request_threaded_irq(dev, data->irq_ready, NULL,
+ cc2_ready_interrupt,
+ IRQF_ONESHOT |
+ IRQF_TRIGGER_RISING,
+ dev_name(dev), data);
+ }
+
+ return ret;
+}
+
+static int cc2_request_alarm_irqs(struct cc2_data *data, struct device *dev)
+{
+ int ret = 0;
+
+ data->irq_low = fwnode_irq_get_byname(dev_fwnode(dev), "low");
+ if (data->irq_low > 0) {
+ ret = devm_request_threaded_irq(dev, data->irq_low, NULL,
+ cc2_low_interrupt,
+ IRQF_ONESHOT |
+ IRQF_TRIGGER_RISING,
+ dev_name(dev), data);
+ if (ret)
+ return ret;
+
+ data->rh_alarm.low_alarm_visible = true;
+ }
+
+ data->irq_high = fwnode_irq_get_byname(dev_fwnode(dev), "high");
+ if (data->irq_high > 0) {
+ ret = devm_request_threaded_irq(dev, data->irq_high, NULL,
+ cc2_high_interrupt,
+ IRQF_ONESHOT |
+ IRQF_TRIGGER_RISING,
+ dev_name(dev), data);
+ if (ret)
+ return ret;
+
+ data->rh_alarm.high_alarm_visible = true;
+ }
+
+ return ret;
+}
+
+static const struct hwmon_channel_info *cc2_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(humidity, HWMON_H_INPUT | HWMON_H_MIN | HWMON_H_MAX |
+ HWMON_H_MIN_HYST | HWMON_H_MAX_HYST |
+ HWMON_H_MIN_ALARM | HWMON_H_MAX_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops cc2_hwmon_ops = {
+ .is_visible = cc2_is_visible,
+ .read = cc2_read,
+ .write = cc2_write,
+};
+
+static const struct hwmon_chip_info cc2_chip_info = {
+ .ops = &cc2_hwmon_ops,
+ .info = cc2_info,
+};
+
+static int cc2_probe(struct i2c_client *client)
+{
+ struct cc2_data *data;
+ struct device *dev = &client->dev;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+
+ mutex_init(&data->dev_access_lock);
+
+ data->client = client;
+
+ data->regulator = devm_regulator_get_exclusive(dev, "vdd");
+ if (IS_ERR(data->regulator)) {
+ dev_err_probe(dev, PTR_ERR(data->regulator),
+ "Failed to get regulator\n");
+ return PTR_ERR(data->regulator);
+ }
+
+ ret = cc2_request_ready_irq(data, dev);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to request ready irq\n");
+ return ret;
+ }
+
+ ret = cc2_request_alarm_irqs(data, dev);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to request alarm irqs\n");
+ goto disable;
+ }
+
+ data->hwmon = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &cc2_chip_info,
+ NULL);
+ if (IS_ERR(data->hwmon)) {
+ dev_err_probe(dev, PTR_ERR(data->hwmon),
+ "Failed to register hwmon device\n");
+ ret = PTR_ERR(data->hwmon);
+ }
+
+disable:
+ cc2_disable(data);
+
+ return ret;
+}
+
+static void cc2_remove(struct i2c_client *client)
+{
+ struct cc2_data *data = i2c_get_clientdata(client);
+
+ cc2_disable(data);
+}
+
+static const struct i2c_device_id cc2_id[] = {
+ { "cc2d23" },
+ { "cc2d23s" },
+ { "cc2d25" },
+ { "cc2d25s" },
+ { "cc2d33" },
+ { "cc2d33s" },
+ { "cc2d35" },
+ { "cc2d35s" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cc2_id);
+
+static const struct of_device_id cc2_of_match[] = {
+ { .compatible = "amphenol,cc2d23" },
+ { .compatible = "amphenol,cc2d23s" },
+ { .compatible = "amphenol,cc2d25" },
+ { .compatible = "amphenol,cc2d25s" },
+ { .compatible = "amphenol,cc2d33" },
+ { .compatible = "amphenol,cc2d33s" },
+ { .compatible = "amphenol,cc2d35" },
+ { .compatible = "amphenol,cc2d35s" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cc2_of_match);
+
+static struct i2c_driver cc2_driver = {
+ .driver = {
+ .name = "cc2d23",
+ .of_match_table = cc2_of_match,
+ },
+ .probe = cc2_probe,
+ .remove = cc2_remove,
+ .id_table = cc2_id,
+};
+module_i2c_driver(cc2_driver);
+
+MODULE_AUTHOR("Javier Carrasco <javier.carrasco.cruz@gamil.com>");
+MODULE_DESCRIPTION("Amphenol ChipCap 2 humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index b8fc8d1ef20d..616bd1a5b864 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -39,13 +39,18 @@ static int force_tjmax;
module_param_named(tjmax, force_tjmax, int, 0444);
MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
-#define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
-#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 512 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */
-#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
-#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
-#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
+
+enum coretemp_attr_index {
+ ATTR_LABEL,
+ ATTR_CRIT_ALARM,
+ ATTR_TEMP,
+ ATTR_TJMAX,
+ ATTR_TTARGET,
+ MAX_CORE_ATTRS = ATTR_TJMAX + 1, /* Maximum no of basic attrs */
+ TOTAL_ATTRS = ATTR_TTARGET + 1 /* Maximum no of possible attrs */
+};
#ifdef CONFIG_SMP
#define for_each_sibling(i, cpu) \
@@ -65,19 +70,17 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
* @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
* from where the temperature values should be read.
* @attr_size: Total number of pre-core attrs displayed in the sysfs.
- * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
- * Otherwise, temp_data holds coretemp data.
*/
struct temp_data {
int temp;
int tjmax;
unsigned long last_updated;
unsigned int cpu;
+ int index;
u32 cpu_core_id;
u32 status_reg;
int attr_size;
- bool is_pkg_data;
- struct sensor_device_attribute sd_attrs[TOTAL_ATTRS];
+ struct device_attribute sd_attrs[TOTAL_ATTRS];
char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
struct attribute *attrs[TOTAL_ATTRS + 1];
struct attribute_group attr_group;
@@ -88,10 +91,11 @@ struct temp_data {
struct platform_data {
struct device *hwmon_dev;
u16 pkg_id;
- u16 cpu_map[NUM_REAL_CORES];
+ int nr_cores;
struct ida ida;
struct cpumask cpumask;
- struct temp_data *core_data[MAX_CORE_DATA];
+ struct temp_data *pkg_data;
+ struct temp_data **core_data;
struct device_attribute name_attr;
};
@@ -143,6 +147,11 @@ static const struct tjmax_model tjmax_model_table[] = {
*/
};
+static bool is_pkg_temp_data(struct temp_data *tdata)
+{
+ return tdata->index < 0;
+}
+
static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
{
/* The 100C is default for both mobile and non mobile CPUs */
@@ -332,11 +341,10 @@ static struct platform_device **zone_devices;
static ssize_t show_label(struct device *dev,
struct device_attribute *devattr, char *buf)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct platform_data *pdata = dev_get_drvdata(dev);
- struct temp_data *tdata = pdata->core_data[attr->index];
+ struct temp_data *tdata = container_of(devattr, struct temp_data, sd_attrs[ATTR_LABEL]);
- if (tdata->is_pkg_data)
+ if (is_pkg_temp_data(tdata))
return sprintf(buf, "Package id %u\n", pdata->pkg_id);
return sprintf(buf, "Core %u\n", tdata->cpu_core_id);
@@ -346,9 +354,8 @@ static ssize_t show_crit_alarm(struct device *dev,
struct device_attribute *devattr, char *buf)
{
u32 eax, edx;
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct platform_data *pdata = dev_get_drvdata(dev);
- struct temp_data *tdata = pdata->core_data[attr->index];
+ struct temp_data *tdata = container_of(devattr, struct temp_data,
+ sd_attrs[ATTR_CRIT_ALARM]);
mutex_lock(&tdata->update_lock);
rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
@@ -360,9 +367,7 @@ static ssize_t show_crit_alarm(struct device *dev,
static ssize_t show_tjmax(struct device *dev,
struct device_attribute *devattr, char *buf)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct platform_data *pdata = dev_get_drvdata(dev);
- struct temp_data *tdata = pdata->core_data[attr->index];
+ struct temp_data *tdata = container_of(devattr, struct temp_data, sd_attrs[ATTR_TJMAX]);
int tjmax;
mutex_lock(&tdata->update_lock);
@@ -375,9 +380,7 @@ static ssize_t show_tjmax(struct device *dev,
static ssize_t show_ttarget(struct device *dev,
struct device_attribute *devattr, char *buf)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct platform_data *pdata = dev_get_drvdata(dev);
- struct temp_data *tdata = pdata->core_data[attr->index];
+ struct temp_data *tdata = container_of(devattr, struct temp_data, sd_attrs[ATTR_TTARGET]);
int ttarget;
mutex_lock(&tdata->update_lock);
@@ -393,9 +396,7 @@ static ssize_t show_temp(struct device *dev,
struct device_attribute *devattr, char *buf)
{
u32 eax, edx;
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct platform_data *pdata = dev_get_drvdata(dev);
- struct temp_data *tdata = pdata->core_data[attr->index];
+ struct temp_data *tdata = container_of(devattr, struct temp_data, sd_attrs[ATTR_TEMP]);
int tjmax;
mutex_lock(&tdata->update_lock);
@@ -418,8 +419,7 @@ static ssize_t show_temp(struct device *dev,
return sprintf(buf, "%d\n", tdata->temp);
}
-static int create_core_attrs(struct temp_data *tdata, struct device *dev,
- int index)
+static int create_core_attrs(struct temp_data *tdata, struct device *dev)
{
int i;
static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
@@ -436,16 +436,15 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
* The attr number is always core id + 2
* The Pkgtemp will always show up as temp1_*, if available
*/
- int attr_no = tdata->is_pkg_data ? 1 : tdata->cpu_core_id + 2;
+ int attr_no = is_pkg_temp_data(tdata) ? 1 : tdata->cpu_core_id + 2;
snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
"temp%d_%s", attr_no, suffixes[i]);
- sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
- tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
- tdata->sd_attrs[i].dev_attr.attr.mode = 0444;
- tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
- tdata->sd_attrs[i].index = index;
- tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr;
+ sysfs_attr_init(&tdata->sd_attrs[i].attr);
+ tdata->sd_attrs[i].attr.name = tdata->attr_name[i];
+ tdata->sd_attrs[i].attr.mode = 0444;
+ tdata->sd_attrs[i].show = rd_ptr[i];
+ tdata->attrs[i] = &tdata->sd_attrs[i].attr;
}
tdata->attr_group.attrs = tdata->attrs;
return sysfs_create_group(&dev->kobj, &tdata->attr_group);
@@ -477,17 +476,44 @@ static struct platform_device *coretemp_get_pdev(unsigned int cpu)
return NULL;
}
-static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
+static struct temp_data *
+init_temp_data(struct platform_data *pdata, unsigned int cpu, int pkg_flag)
{
struct temp_data *tdata;
+ if (!pdata->core_data) {
+ /*
+ * TODO:
+ * The information of actual possible cores in a package is broken for now.
+ * Will replace hardcoded NUM_REAL_CORES with actual per package core count
+ * when this information becomes available.
+ */
+ pdata->nr_cores = NUM_REAL_CORES;
+ pdata->core_data = kcalloc(pdata->nr_cores, sizeof(struct temp_data *),
+ GFP_KERNEL);
+ if (!pdata->core_data)
+ return NULL;
+ }
+
tdata = kzalloc(sizeof(struct temp_data), GFP_KERNEL);
if (!tdata)
return NULL;
+ if (pkg_flag) {
+ pdata->pkg_data = tdata;
+ /* Use tdata->index as indicator of package temp data */
+ tdata->index = -1;
+ } else {
+ tdata->index = ida_alloc_max(&pdata->ida, pdata->nr_cores - 1, GFP_KERNEL);
+ if (tdata->index < 0) {
+ kfree(tdata);
+ return NULL;
+ }
+ pdata->core_data[tdata->index] = tdata;
+ }
+
tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
MSR_IA32_THERM_STATUS;
- tdata->is_pkg_data = pkg_flag;
tdata->cpu = cpu;
tdata->cpu_core_id = topology_core_id(cpu);
tdata->attr_size = MAX_CORE_ATTRS;
@@ -495,6 +521,36 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
return tdata;
}
+static void destroy_temp_data(struct platform_data *pdata, struct temp_data *tdata)
+{
+ if (is_pkg_temp_data(tdata)) {
+ pdata->pkg_data = NULL;
+ kfree(pdata->core_data);
+ pdata->core_data = NULL;
+ pdata->nr_cores = 0;
+ } else {
+ pdata->core_data[tdata->index] = NULL;
+ ida_free(&pdata->ida, tdata->index);
+ }
+ kfree(tdata);
+}
+
+static struct temp_data *get_temp_data(struct platform_data *pdata, int cpu)
+{
+ int i;
+
+ /* cpu < 0 means get pkg temp_data */
+ if (cpu < 0)
+ return pdata->pkg_data;
+
+ for (i = 0; i < pdata->nr_cores; i++) {
+ if (pdata->core_data[i] &&
+ pdata->core_data[i]->cpu_core_id == topology_core_id(cpu))
+ return pdata->core_data[i];
+ }
+ return NULL;
+}
+
static int create_core_data(struct platform_device *pdev, unsigned int cpu,
int pkg_flag)
{
@@ -502,37 +558,19 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
struct platform_data *pdata = platform_get_drvdata(pdev);
struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 eax, edx;
- int err, index;
+ int err;
if (!housekeeping_cpu(cpu, HK_TYPE_MISC))
return 0;
- /*
- * Get the index of tdata in pdata->core_data[]
- * tdata for package: pdata->core_data[1]
- * tdata for core: pdata->core_data[2] .. pdata->core_data[NUM_REAL_CORES + 1]
- */
- if (pkg_flag) {
- index = PKG_SYSFS_ATTR_NO;
- } else {
- index = ida_alloc_max(&pdata->ida, NUM_REAL_CORES - 1, GFP_KERNEL);
- if (index < 0)
- return index;
-
- pdata->cpu_map[index] = topology_core_id(cpu);
- index += BASE_SYSFS_ATTR_NO;
- }
-
- tdata = init_temp_data(cpu, pkg_flag);
- if (!tdata) {
- err = -ENOMEM;
- goto ida_free;
- }
+ tdata = init_temp_data(pdata, cpu, pkg_flag);
+ if (!tdata)
+ return -ENOMEM;
/* Test if we can access the status register */
err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
if (err)
- goto exit_free;
+ goto err;
/* Make sure tdata->tjmax is a valid indicator for dynamic/static tjmax */
get_tjmax(tdata, &pdev->dev);
@@ -546,20 +584,15 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
if (get_ttarget(tdata, &pdev->dev) >= 0)
tdata->attr_size++;
- pdata->core_data[index] = tdata;
-
/* Create sysfs interfaces */
- err = create_core_attrs(tdata, pdata->hwmon_dev, index);
+ err = create_core_attrs(tdata, pdata->hwmon_dev);
if (err)
- goto exit_free;
+ goto err;
return 0;
-exit_free:
- pdata->core_data[index] = NULL;
- kfree(tdata);
-ida_free:
- if (!pkg_flag)
- ida_free(&pdata->ida, index - BASE_SYSFS_ATTR_NO);
+
+err:
+ destroy_temp_data(pdata, tdata);
return err;
}
@@ -570,10 +603,8 @@ coretemp_add_core(struct platform_device *pdev, unsigned int cpu, int pkg_flag)
dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
}
-static void coretemp_remove_core(struct platform_data *pdata, int indx)
+static void coretemp_remove_core(struct platform_data *pdata, struct temp_data *tdata)
{
- struct temp_data *tdata = pdata->core_data[indx];
-
/* if we errored on add then this is already gone */
if (!tdata)
return;
@@ -581,11 +612,7 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
/* Remove the sysfs attributes */
sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group);
- kfree(pdata->core_data[indx]);
- pdata->core_data[indx] = NULL;
-
- if (indx >= BASE_SYSFS_ATTR_NO)
- ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO);
+ destroy_temp_data(pdata, tdata);
}
static int coretemp_device_add(int zoneid)
@@ -698,7 +725,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
struct platform_device *pdev = coretemp_get_pdev(cpu);
struct platform_data *pd;
struct temp_data *tdata;
- int i, indx = -1, target;
+ int target;
/* No need to tear down any interfaces for suspend */
if (cpuhp_tasks_frozen)
@@ -709,18 +736,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
if (!pd->hwmon_dev)
return 0;
- for (i = 0; i < NUM_REAL_CORES; i++) {
- if (pd->cpu_map[i] == topology_core_id(cpu)) {
- indx = i + BASE_SYSFS_ATTR_NO;
- break;
- }
- }
-
- /* Too many cores and this core is not populated, just return */
- if (indx < 0)
- return 0;
-
- tdata = pd->core_data[indx];
+ tdata = get_temp_data(pd, cpu);
cpumask_clear_cpu(cpu, &pd->cpumask);
@@ -731,7 +747,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
*/
target = cpumask_any_and(&pd->cpumask, topology_sibling_cpumask(cpu));
if (target >= nr_cpu_ids) {
- coretemp_remove_core(pd, indx);
+ coretemp_remove_core(pd, tdata);
} else if (tdata && tdata->cpu == cpu) {
mutex_lock(&tdata->update_lock);
tdata->cpu = target;
@@ -741,10 +757,10 @@ static int coretemp_cpu_offline(unsigned int cpu)
/*
* If all cores in this pkg are offline, remove the interface.
*/
- tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
+ tdata = get_temp_data(pd, -1);
if (cpumask_empty(&pd->cpumask)) {
if (tdata)
- coretemp_remove_core(pd, PKG_SYSFS_ATTR_NO);
+ coretemp_remove_core(pd, tdata);
hwmon_device_unregister(pd->hwmon_dev);
pd->hwmon_dev = NULL;
return 0;
@@ -782,7 +798,7 @@ static int __init coretemp_init(void)
if (!x86_match_cpu(coretemp_ids))
return -ENODEV;
- max_zones = topology_max_packages() * topology_max_die_per_package();
+ max_zones = topology_max_packages() * topology_max_dies_per_package();
zone_devices = kcalloc(max_zones, sizeof(struct platform_device *),
GFP_KERNEL);
if (!zone_devices)
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 6d8c0f328b7b..a8d42c9d5d04 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1450,10 +1450,15 @@ struct i8k_fan_control_data {
};
enum i8k_fan_controls {
+ I8K_FAN_30A3_31A3,
I8K_FAN_34A3_35A3,
};
static const struct i8k_fan_control_data i8k_fan_control_data[] __initconst = {
+ [I8K_FAN_30A3_31A3] = {
+ .manual_fan = 0x30a3,
+ .auto_fan = 0x31a3,
+ },
[I8K_FAN_34A3_35A3] = {
.manual_fan = 0x34a3,
.auto_fan = 0x35a3,
@@ -1517,6 +1522,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
},
.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
},
+ {
+ .ident = "Dell XPS 9315",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 9315"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
+ },
{ }
};
@@ -1587,6 +1600,7 @@ static struct wmi_driver dell_smm_wmi_driver = {
},
.id_table = dell_smm_wmi_id_table,
.probe = dell_smm_wmi_probe,
+ .no_singleton = true,
};
/*
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index 21b635046521..bffbc8040171 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -380,7 +380,6 @@ MODULE_DEVICE_TABLE(i2c, ds1621_id);
/* This is the driver that will be inserted */
static struct i2c_driver ds1621_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "ds1621",
},
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 2b09536630cb..4fc4df012fac 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -241,7 +241,6 @@ MODULE_DEVICE_TABLE(i2c, ds620_id);
/* This is the driver that will be inserted */
static struct i2c_driver ds620_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "ds620",
},
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 1332e4ac078c..d370efd6f986 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -385,7 +385,7 @@ static bool emc1403_regmap_is_volatile(struct device *dev, unsigned int reg)
static const struct regmap_config emc1403_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = emc1403_regmap_is_volatile,
};
diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c
index 29f0e4945f19..6ef733c0be16 100644
--- a/drivers/hwmon/emc2305.c
+++ b/drivers/hwmon/emc2305.c
@@ -12,9 +12,6 @@
#include <linux/platform_data/emc2305.h>
#include <linux/thermal.h>
-static const unsigned short
-emc2305_normal_i2c[] = { 0x27, 0x2c, 0x2d, 0x2e, 0x2f, 0x4c, 0x4d, I2C_CLIENT_END };
-
#define EMC2305_REG_DRIVE_FAIL_STATUS 0x27
#define EMC2305_REG_VENDOR 0xfe
#define EMC2305_FAN_MAX 0xff
@@ -611,14 +608,12 @@ static void emc2305_remove(struct i2c_client *client)
}
static struct i2c_driver emc2305_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "emc2305",
},
.probe = emc2305_probe,
.remove = emc2305_remove,
.id_table = emc2305_ids,
- .address_list = emc2305_normal_i2c,
};
module_i2c_driver(emc2305_driver);
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 6307112c2c0c..9ed2c4b6734e 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -209,7 +209,7 @@ static ssize_t power1_average_show(struct device *dev,
* With the new x86 topology modelling, x86_max_cores is the
* compute unit number.
*/
- cu_num = boot_cpu_data.x86_max_cores;
+ cu_num = topology_num_cores_per_package();
ret = read_registers(data);
if (ret)
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index c7dd3f5b2bd5..3b259c425ab7 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -510,6 +510,7 @@ static const char * const hwmon_in_attr_templates[] = {
[hwmon_in_rated_min] = "in%d_rated_min",
[hwmon_in_rated_max] = "in%d_rated_max",
[hwmon_in_beep] = "in%d_beep",
+ [hwmon_in_fault] = "in%d_fault",
};
static const char * const hwmon_curr_attr_templates[] = {
@@ -586,6 +587,8 @@ static const char * const hwmon_humidity_attr_templates[] = {
[hwmon_humidity_fault] = "humidity%d_fault",
[hwmon_humidity_rated_min] = "humidity%d_rated_min",
[hwmon_humidity_rated_max] = "humidity%d_rated_max",
+ [hwmon_humidity_min_alarm] = "humidity%d_min_alarm",
+ [hwmon_humidity_max_alarm] = "humidity%d_max_alarm",
};
static const char * const hwmon_fan_attr_templates[] = {
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
index c558143e5285..d9b57a4b3e41 100644
--- a/drivers/hwmon/ina209.c
+++ b/drivers/hwmon/ina209.c
@@ -589,7 +589,6 @@ MODULE_DEVICE_TABLE(of, ina209_of_match);
/* This is the driver that will be inserted */
static struct i2c_driver ina209_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "ina209",
.of_match_table = of_match_ptr(ina209_of_match),
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index ca9f5d2c811b..69289293bc38 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -629,7 +629,6 @@ static const struct of_device_id __maybe_unused ina238_of_match[] = {
MODULE_DEVICE_TABLE(of, ina238_of_match);
static struct i2c_driver ina238_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "ina238",
.of_match_table = of_match_ptr(ina238_of_match),
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 5ffdc94db436..2c9530b6f192 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -762,7 +762,7 @@ static const struct regmap_config ina3221_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_table = &ina3221_volatile_table,
};
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index f958e830b23c..75dc25df0f8b 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -497,7 +497,7 @@ static const struct regmap_config jc42_regmap_config = {
.writeable_reg = jc42_writable_reg,
.readable_reg = jc42_readable_reg,
.volatile_reg = jc42_volatile_reg,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static int jc42_probe(struct i2c_client *client)
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 5befedca6abb..b333c9bde4e6 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -165,7 +165,7 @@ static bool lm83_regmap_is_volatile(struct device *dev, unsigned int reg)
static const struct regmap_config lm83_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = lm83_regmap_is_volatile,
.reg_read = lm83_regmap_reg_read,
.reg_write = lm83_regmap_reg_write,
diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c
new file mode 100644
index 000000000000..4f608a3790fb
--- /dev/null
+++ b/drivers/hwmon/ltc4282.c
@@ -0,0 +1,1782 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC4282 I2C High Current Hot Swap Controller over I2C
+ *
+ * Copyright 2023 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/property.h>
+#include <linux/string.h>
+#include <linux/units.h>
+#include <linux/util_macros.h>
+
+#define LTC4282_CTRL_LSB 0x00
+ #define LTC4282_CTRL_OV_RETRY_MASK BIT(0)
+ #define LTC4282_CTRL_UV_RETRY_MASK BIT(1)
+ #define LTC4282_CTRL_OC_RETRY_MASK BIT(2)
+ #define LTC4282_CTRL_ON_ACTIVE_LOW_MASK BIT(5)
+ #define LTC4282_CTRL_ON_DELAY_MASK BIT(6)
+#define LTC4282_CTRL_MSB 0x01
+ #define LTC4282_CTRL_VIN_MODE_MASK GENMASK(1, 0)
+ #define LTC4282_CTRL_OV_MODE_MASK GENMASK(3, 2)
+ #define LTC4282_CTRL_UV_MODE_MASK GENMASK(5, 4)
+#define LTC4282_FAULT_LOG 0x04
+ #define LTC4282_OV_FAULT_MASK BIT(0)
+ #define LTC4282_UV_FAULT_MASK BIT(1)
+ #define LTC4282_VDD_FAULT_MASK \
+ (LTC4282_OV_FAULT_MASK | LTC4282_UV_FAULT_MASK)
+ #define LTC4282_OC_FAULT_MASK BIT(2)
+ #define LTC4282_POWER_BAD_FAULT_MASK BIT(3)
+ #define LTC4282_FET_SHORT_FAULT_MASK BIT(5)
+ #define LTC4282_FET_BAD_FAULT_MASK BIT(6)
+ #define LTC4282_FET_FAILURE_FAULT_MASK \
+ (LTC4282_FET_SHORT_FAULT_MASK | LTC4282_FET_BAD_FAULT_MASK)
+#define LTC4282_ADC_ALERT_LOG 0x05
+ #define LTC4282_GPIO_ALARM_L_MASK BIT(0)
+ #define LTC4282_GPIO_ALARM_H_MASK BIT(1)
+ #define LTC4282_VSOURCE_ALARM_L_MASK BIT(2)
+ #define LTC4282_VSOURCE_ALARM_H_MASK BIT(3)
+ #define LTC4282_VSENSE_ALARM_L_MASK BIT(4)
+ #define LTC4282_VSENSE_ALARM_H_MASK BIT(5)
+ #define LTC4282_POWER_ALARM_L_MASK BIT(6)
+ #define LTC4282_POWER_ALARM_H_MASK BIT(7)
+#define LTC4282_FET_BAD_FAULT_TIMEOUT 0x06
+ #define LTC4282_FET_BAD_MAX_TIMEOUT 255
+#define LTC4282_GPIO_CONFIG 0x07
+ #define LTC4282_GPIO_2_FET_STRESS_MASK BIT(1)
+ #define LTC4282_GPIO_1_CONFIG_MASK GENMASK(5, 4)
+#define LTC4282_VGPIO_MIN 0x08
+#define LTC4282_VGPIO_MAX 0x09
+#define LTC4282_VSOURCE_MIN 0x0a
+#define LTC4282_VSOURCE_MAX 0x0b
+#define LTC4282_VSENSE_MIN 0x0c
+#define LTC4282_VSENSE_MAX 0x0d
+#define LTC4282_POWER_MIN 0x0e
+#define LTC4282_POWER_MAX 0x0f
+#define LTC4282_CLK_DIV 0x10
+ #define LTC4282_CLK_DIV_MASK GENMASK(4, 0)
+ #define LTC4282_CLKOUT_MASK GENMASK(6, 5)
+#define LTC4282_ILIM_ADJUST 0x11
+ #define LTC4282_GPIO_MODE_MASK BIT(1)
+ #define LTC4282_VDD_MONITOR_MASK BIT(2)
+ #define LTC4282_FOLDBACK_MODE_MASK GENMASK(4, 3)
+ #define LTC4282_ILIM_ADJUST_MASK GENMASK(7, 5)
+#define LTC4282_ENERGY 0x12
+#define LTC4282_TIME_COUNTER 0x18
+#define LTC4282_ALERT_CTRL 0x1c
+ #define LTC4282_ALERT_OUT_MASK BIT(6)
+#define LTC4282_ADC_CTRL 0x1d
+ #define LTC4282_FAULT_LOG_EN_MASK BIT(2)
+ #define LTC4282_METER_HALT_MASK BIT(5)
+ #define LTC4282_METER_RESET_MASK BIT(6)
+ #define LTC4282_RESET_MASK BIT(7)
+#define LTC4282_STATUS_LSB 0x1e
+ #define LTC4282_OV_STATUS_MASK BIT(0)
+ #define LTC4282_UV_STATUS_MASK BIT(1)
+ #define LTC4282_VDD_STATUS_MASK \
+ (LTC4282_OV_STATUS_MASK | LTC4282_UV_STATUS_MASK)
+ #define LTC4282_OC_STATUS_MASK BIT(2)
+ #define LTC4282_POWER_GOOD_MASK BIT(3)
+ #define LTC4282_FET_FAILURE_MASK GENMASK(6, 5)
+#define LTC4282_STATUS_MSB 0x1f
+#define LTC4282_RESERVED_1 0x32
+#define LTC4282_RESERVED_2 0x33
+#define LTC4282_VGPIO 0x34
+#define LTC4282_VGPIO_LOWEST 0x36
+#define LTC4282_VGPIO_HIGHEST 0x38
+#define LTC4282_VSOURCE 0x3a
+#define LTC4282_VSOURCE_LOWEST 0x3c
+#define LTC4282_VSOURCE_HIGHEST 0x3e
+#define LTC4282_VSENSE 0x40
+#define LTC4282_VSENSE_LOWEST 0x42
+#define LTC4282_VSENSE_HIGHEST 0x44
+#define LTC4282_POWER 0x46
+#define LTC4282_POWER_LOWEST 0x48
+#define LTC4282_POWER_HIGHEST 0x4a
+#define LTC4282_RESERVED_3 0x50
+
+#define LTC4282_CLKIN_MIN (250 * KILO)
+#define LTC4282_CLKIN_MAX (15500 * KILO)
+#define LTC4282_CLKIN_RANGE (LTC4282_CLKIN_MAX - LTC4282_CLKIN_MIN + 1)
+#define LTC4282_CLKOUT_SYSTEM (250 * KILO)
+#define LTC4282_CLKOUT_CNV 15
+
+enum {
+ LTC4282_CHAN_VSOURCE,
+ LTC4282_CHAN_VDD,
+ LTC4282_CHAN_VGPIO,
+};
+
+struct ltc4282_cache {
+ u32 in_max_raw;
+ u32 in_min_raw;
+ long in_highest;
+ long in_lowest;
+ bool en;
+};
+
+struct ltc4282_state {
+ struct regmap *map;
+ /* Protect against multiple accesses to the device registers */
+ struct mutex lock;
+ struct clk_hw clk_hw;
+ /*
+ * Used to cache values for VDD/VSOURCE depending which will be used
+ * when hwmon is not enabled for that channel. Needed because they share
+ * the same registers.
+ */
+ struct ltc4282_cache in0_1_cache[LTC4282_CHAN_VGPIO];
+ u32 vsense_max;
+ long power_max;
+ u32 rsense;
+ u16 vdd;
+ u16 vfs_out;
+ bool energy_en;
+};
+
+enum {
+ LTC4282_CLKOUT_NONE,
+ LTC4282_CLKOUT_INT,
+ LTC4282_CLKOUT_TICK,
+};
+
+static int ltc4282_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct ltc4282_state *st = container_of(hw, struct ltc4282_state,
+ clk_hw);
+ u32 val = LTC4282_CLKOUT_INT;
+
+ if (rate == LTC4282_CLKOUT_CNV)
+ val = LTC4282_CLKOUT_TICK;
+
+ return regmap_update_bits(st->map, LTC4282_CLK_DIV, LTC4282_CLKOUT_MASK,
+ FIELD_PREP(LTC4282_CLKOUT_MASK, val));
+}
+
+/*
+ * Note the 15HZ conversion rate assumes 12bit ADC which is what we are
+ * supporting for now.
+ */
+static const unsigned int ltc4282_out_rates[] = {
+ LTC4282_CLKOUT_CNV, LTC4282_CLKOUT_SYSTEM
+};
+
+static long ltc4282_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ int idx = find_closest(rate, ltc4282_out_rates,
+ ARRAY_SIZE(ltc4282_out_rates));
+
+ return ltc4282_out_rates[idx];
+}
+
+static unsigned long ltc4282_recalc_rate(struct clk_hw *hw,
+ unsigned long parent)
+{
+ struct ltc4282_state *st = container_of(hw, struct ltc4282_state,
+ clk_hw);
+ u32 clkdiv;
+ int ret;
+
+ ret = regmap_read(st->map, LTC4282_CLK_DIV, &clkdiv);
+ if (ret)
+ return 0;
+
+ clkdiv = FIELD_GET(LTC4282_CLKOUT_MASK, clkdiv);
+ if (!clkdiv)
+ return 0;
+ if (clkdiv == LTC4282_CLKOUT_INT)
+ return LTC4282_CLKOUT_SYSTEM;
+
+ return LTC4282_CLKOUT_CNV;
+}
+
+static void ltc4282_disable(struct clk_hw *clk_hw)
+{
+ struct ltc4282_state *st = container_of(clk_hw, struct ltc4282_state,
+ clk_hw);
+
+ regmap_clear_bits(st->map, LTC4282_CLK_DIV, LTC4282_CLKOUT_MASK);
+}
+
+static int ltc4282_read_voltage_word(const struct ltc4282_state *st, u32 reg,
+ u32 fs, long *val)
+{
+ __be16 in;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &in, sizeof(in));
+ if (ret)
+ return ret;
+
+ /*
+ * This is also used to calculate current in which case fs comes in
+ * 10 * uV. Hence the ULL usage.
+ */
+ *val = DIV_ROUND_CLOSEST_ULL(be16_to_cpu(in) * (u64)fs, U16_MAX);
+ return 0;
+}
+
+static int ltc4282_read_voltage_byte_cached(const struct ltc4282_state *st,
+ u32 reg, u32 fs, long *val,
+ u32 *cached_raw)
+{
+ int ret;
+ u32 in;
+
+ if (cached_raw) {
+ in = *cached_raw;
+ } else {
+ ret = regmap_read(st->map, reg, &in);
+ if (ret)
+ return ret;
+ }
+
+ *val = DIV_ROUND_CLOSEST(in * fs, U8_MAX);
+ return 0;
+}
+
+static int ltc4282_read_voltage_byte(const struct ltc4282_state *st, u32 reg,
+ u32 fs, long *val)
+{
+ return ltc4282_read_voltage_byte_cached(st, reg, fs, val, NULL);
+}
+
+static int __ltc4282_read_alarm(struct ltc4282_state *st, u32 reg, u32 mask,
+ long *val)
+{
+ u32 alarm;
+ int ret;
+
+ ret = regmap_read(st->map, reg, &alarm);
+ if (ret)
+ return ret;
+
+ *val = !!(alarm & mask);
+
+ /* if not status/fault logs, clear the alarm after reading it */
+ if (reg != LTC4282_STATUS_LSB && reg != LTC4282_FAULT_LOG)
+ return regmap_clear_bits(st->map, reg, mask);
+
+ return 0;
+}
+
+static int ltc4282_read_alarm(struct ltc4282_state *st, u32 reg, u32 mask,
+ long *val)
+{
+ guard(mutex)(&st->lock);
+ return __ltc4282_read_alarm(st, reg, mask, val);
+}
+
+static int ltc4282_vdd_source_read_in(struct ltc4282_state *st, u32 channel,
+ long *val)
+{
+ guard(mutex)(&st->lock);
+ if (!st->in0_1_cache[channel].en)
+ return -ENODATA;
+
+ return ltc4282_read_voltage_word(st, LTC4282_VSOURCE, st->vfs_out, val);
+}
+
+static int ltc4282_vdd_source_read_hist(struct ltc4282_state *st, u32 reg,
+ u32 channel, long *cached, long *val)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+ if (!st->in0_1_cache[channel].en) {
+ *val = *cached;
+ return 0;
+ }
+
+ ret = ltc4282_read_voltage_word(st, reg, st->vfs_out, val);
+ if (ret)
+ return ret;
+
+ *cached = *val;
+ return 0;
+}
+
+static int ltc4282_vdd_source_read_lim(struct ltc4282_state *st, u32 reg,
+ u32 channel, u32 *cached, long *val)
+{
+ guard(mutex)(&st->lock);
+ if (!st->in0_1_cache[channel].en)
+ return ltc4282_read_voltage_byte_cached(st, reg, st->vfs_out,
+ val, cached);
+
+ return ltc4282_read_voltage_byte(st, reg, st->vfs_out, val);
+}
+
+static int ltc4282_vdd_source_read_alm(struct ltc4282_state *st, u32 mask,
+ u32 channel, long *val)
+{
+ guard(mutex)(&st->lock);
+ if (!st->in0_1_cache[channel].en) {
+ /*
+ * Do this otherwise alarms can get confused because we clear
+ * them after reading them. So, if someone mistakenly reads
+ * VSOURCE right before VDD (or the other way around), we might
+ * get no alarm just because it was cleared when reading VSOURCE
+ * and had no time for a new conversion and thus having the
+ * alarm again.
+ */
+ *val = 0;
+ return 0;
+ }
+
+ return __ltc4282_read_alarm(st, LTC4282_ADC_ALERT_LOG, mask, val);
+}
+
+static int ltc4282_read_in(struct ltc4282_state *st, u32 attr, long *val,
+ u32 channel)
+{
+ switch (attr) {
+ case hwmon_in_input:
+ if (channel == LTC4282_CHAN_VGPIO)
+ return ltc4282_read_voltage_word(st, LTC4282_VGPIO,
+ 1280, val);
+
+ return ltc4282_vdd_source_read_in(st, channel, val);
+ case hwmon_in_highest:
+ if (channel == LTC4282_CHAN_VGPIO)
+ return ltc4282_read_voltage_word(st,
+ LTC4282_VGPIO_HIGHEST,
+ 1280, val);
+
+ return ltc4282_vdd_source_read_hist(st, LTC4282_VSOURCE_HIGHEST,
+ channel,
+ &st->in0_1_cache[channel].in_highest, val);
+ case hwmon_in_lowest:
+ if (channel == LTC4282_CHAN_VGPIO)
+ return ltc4282_read_voltage_word(st, LTC4282_VGPIO_LOWEST,
+ 1280, val);
+
+ return ltc4282_vdd_source_read_hist(st, LTC4282_VSOURCE_LOWEST,
+ channel,
+ &st->in0_1_cache[channel].in_lowest, val);
+ case hwmon_in_max_alarm:
+ if (channel == LTC4282_CHAN_VGPIO)
+ return ltc4282_read_alarm(st, LTC4282_ADC_ALERT_LOG,
+ LTC4282_GPIO_ALARM_H_MASK,
+ val);
+
+ return ltc4282_vdd_source_read_alm(st,
+ LTC4282_VSOURCE_ALARM_H_MASK,
+ channel, val);
+ case hwmon_in_min_alarm:
+ if (channel == LTC4282_CHAN_VGPIO)
+ ltc4282_read_alarm(st, LTC4282_ADC_ALERT_LOG,
+ LTC4282_GPIO_ALARM_L_MASK, val);
+
+ return ltc4282_vdd_source_read_alm(st,
+ LTC4282_VSOURCE_ALARM_L_MASK,
+ channel, val);
+ case hwmon_in_crit_alarm:
+ return ltc4282_read_alarm(st, LTC4282_STATUS_LSB,
+ LTC4282_OV_STATUS_MASK, val);
+ case hwmon_in_lcrit_alarm:
+ return ltc4282_read_alarm(st, LTC4282_STATUS_LSB,
+ LTC4282_UV_STATUS_MASK, val);
+ case hwmon_in_max:
+ if (channel == LTC4282_CHAN_VGPIO)
+ return ltc4282_read_voltage_byte(st, LTC4282_VGPIO_MAX,
+ 1280, val);
+
+ return ltc4282_vdd_source_read_lim(st, LTC4282_VSOURCE_MAX,
+ channel,
+ &st->in0_1_cache[channel].in_max_raw, val);
+ case hwmon_in_min:
+ if (channel == LTC4282_CHAN_VGPIO)
+ return ltc4282_read_voltage_byte(st, LTC4282_VGPIO_MIN,
+ 1280, val);
+
+ return ltc4282_vdd_source_read_lim(st, LTC4282_VSOURCE_MIN,
+ channel,
+ &st->in0_1_cache[channel].in_min_raw, val);
+ case hwmon_in_enable:
+ scoped_guard(mutex, &st->lock) {
+ *val = st->in0_1_cache[channel].en;
+ }
+ return 0;
+ case hwmon_in_fault:
+ /*
+ * We report failure if we detect either a fer_bad or a
+ * fet_short in the status register.
+ */
+ return ltc4282_read_alarm(st, LTC4282_STATUS_LSB,
+ LTC4282_FET_FAILURE_MASK, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ltc4282_read_current_word(const struct ltc4282_state *st, u32 reg,
+ long *val)
+{
+ long in;
+ int ret;
+
+ /*
+ * We pass in full scale in 10 * micro (note that 40 is already
+ * millivolt) so we have better approximations to calculate current.
+ */
+ ret = ltc4282_read_voltage_word(st, reg, DECA * 40 * MILLI, &in);
+ if (ret)
+ return ret;
+
+ *val = DIV_ROUND_CLOSEST(in * MILLI, st->rsense);
+
+ return 0;
+}
+
+static int ltc4282_read_current_byte(const struct ltc4282_state *st, u32 reg,
+ long *val)
+{
+ long in;
+ int ret;
+
+ ret = ltc4282_read_voltage_byte(st, reg, DECA * 40 * MILLI, &in);
+ if (ret)
+ return ret;
+
+ *val = DIV_ROUND_CLOSEST(in * MILLI, st->rsense);
+
+ return 0;
+}
+
+static int ltc4282_read_curr(struct ltc4282_state *st, const u32 attr,
+ long *val)
+{
+ switch (attr) {
+ case hwmon_curr_input:
+ return ltc4282_read_current_word(st, LTC4282_VSENSE, val);
+ case hwmon_curr_highest:
+ return ltc4282_read_current_word(st, LTC4282_VSENSE_HIGHEST,
+ val);
+ case hwmon_curr_lowest:
+ return ltc4282_read_current_word(st, LTC4282_VSENSE_LOWEST,
+ val);
+ case hwmon_curr_max:
+ return ltc4282_read_current_byte(st, LTC4282_VSENSE_MAX, val);
+ case hwmon_curr_min:
+ return ltc4282_read_current_byte(st, LTC4282_VSENSE_MIN, val);
+ case hwmon_curr_max_alarm:
+ return ltc4282_read_alarm(st, LTC4282_ADC_ALERT_LOG,
+ LTC4282_VSENSE_ALARM_H_MASK, val);
+ case hwmon_curr_min_alarm:
+ return ltc4282_read_alarm(st, LTC4282_ADC_ALERT_LOG,
+ LTC4282_VSENSE_ALARM_L_MASK, val);
+ case hwmon_curr_crit_alarm:
+ return ltc4282_read_alarm(st, LTC4282_STATUS_LSB,
+ LTC4282_OC_STATUS_MASK, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ltc4282_read_power_word(const struct ltc4282_state *st, u32 reg,
+ long *val)
+{
+ u64 temp = DECA * 40ULL * st->vfs_out * BIT(16), temp_2;
+ __be16 raw;
+ u16 power;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &raw, sizeof(raw));
+ if (ret)
+ return ret;
+
+ power = be16_to_cpu(raw);
+ /*
+ * Power is given by:
+ * P = CODE(16b) * 0.040 * Vfs(out) * 2^16 / ((2^16 - 1)^2 * Rsense)
+ */
+ if (check_mul_overflow(power * temp, MICRO, &temp_2)) {
+ temp = DIV_ROUND_CLOSEST_ULL(power * temp, U16_MAX);
+ *val = DIV64_U64_ROUND_CLOSEST(temp * MICRO,
+ U16_MAX * (u64)st->rsense);
+ return 0;
+ }
+
+ *val = DIV64_U64_ROUND_CLOSEST(temp_2,
+ st->rsense * int_pow(U16_MAX, 2));
+
+ return 0;
+}
+
+static int ltc4282_read_power_byte(const struct ltc4282_state *st, u32 reg,
+ long *val)
+{
+ u32 power;
+ u64 temp;
+ int ret;
+
+ ret = regmap_read(st->map, reg, &power);
+ if (ret)
+ return ret;
+
+ temp = power * 40 * DECA * st->vfs_out * BIT_ULL(8);
+ *val = DIV64_U64_ROUND_CLOSEST(temp * MICRO,
+ int_pow(U8_MAX, 2) * st->rsense);
+
+ return 0;
+}
+
+static int ltc4282_read_energy(const struct ltc4282_state *st, u64 *val)
+{
+ u64 temp, energy;
+ __be64 raw;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, LTC4282_ENERGY, &raw, 6);
+ if (ret)
+ return ret;
+
+ energy = be64_to_cpu(raw) >> 16;
+ /*
+ * The formula for energy is given by:
+ * E = CODE(48b) * 0.040 * Vfs(out) * Tconv * 256 /
+ * ((2^16 - 1)^2 * Rsense)
+ *
+ * Since we only support 12bit ADC, Tconv = 0.065535s. Passing Vfs(out)
+ * and 0.040 to mV and Tconv to us, we can simplify the formula to:
+ * E = CODE(48b) * 40 * Vfs(out) * 256 / (U16_MAX * Rsense)
+ *
+ * As Rsense can have tenths of micro-ohm resolution, we need to
+ * multiply by DECA to get microujoule.
+ */
+ if (check_mul_overflow(DECA * st->vfs_out * 40 * BIT(8), energy, &temp)) {
+ temp = DIV_ROUND_CLOSEST(DECA * st->vfs_out * 40 * BIT(8), U16_MAX);
+ *val = DIV_ROUND_CLOSEST_ULL(temp * energy, st->rsense);
+ return 0;
+ }
+
+ *val = DIV64_U64_ROUND_CLOSEST(temp, U16_MAX * (u64)st->rsense);
+
+ return 0;
+}
+
+static int ltc4282_read_power(struct ltc4282_state *st, const u32 attr,
+ long *val)
+{
+ switch (attr) {
+ case hwmon_power_input:
+ return ltc4282_read_power_word(st, LTC4282_POWER, val);
+ case hwmon_power_input_highest:
+ return ltc4282_read_power_word(st, LTC4282_POWER_HIGHEST, val);
+ case hwmon_power_input_lowest:
+ return ltc4282_read_power_word(st, LTC4282_POWER_LOWEST, val);
+ case hwmon_power_max_alarm:
+ return ltc4282_read_alarm(st, LTC4282_ADC_ALERT_LOG,
+ LTC4282_POWER_ALARM_H_MASK, val);
+ case hwmon_power_min_alarm:
+ return ltc4282_read_alarm(st, LTC4282_ADC_ALERT_LOG,
+ LTC4282_POWER_ALARM_L_MASK, val);
+ case hwmon_power_max:
+ return ltc4282_read_power_byte(st, LTC4282_POWER_MAX, val);
+ case hwmon_power_min:
+ return ltc4282_read_power_byte(st, LTC4282_POWER_MIN, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ltc4282_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct ltc4282_state *st = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_in:
+ return ltc4282_read_in(st, attr, val, channel);
+ case hwmon_curr:
+ return ltc4282_read_curr(st, attr, val);
+ case hwmon_power:
+ return ltc4282_read_power(st, attr, val);
+ case hwmon_energy:
+ scoped_guard(mutex, &st->lock) {
+ *val = st->energy_en;
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ltc4282_write_power_byte(const struct ltc4282_state *st, u32 reg,
+ long val)
+{
+ u32 power;
+ u64 temp;
+
+ if (val > st->power_max)
+ val = st->power_max;
+
+ temp = val * int_pow(U8_MAX, 2) * st->rsense;
+ power = DIV64_U64_ROUND_CLOSEST(temp,
+ MICRO * DECA * 256ULL * st->vfs_out * 40);
+
+ return regmap_write(st->map, reg, power);
+}
+
+static int ltc4282_write_power_word(const struct ltc4282_state *st, u32 reg,
+ long val)
+{
+ u64 temp = int_pow(U16_MAX, 2) * st->rsense, temp_2;
+ __be16 __raw;
+ u16 code;
+
+ if (check_mul_overflow(temp, val, &temp_2)) {
+ temp = DIV_ROUND_CLOSEST_ULL(temp, DECA * MICRO);
+ code = DIV64_U64_ROUND_CLOSEST(temp * val,
+ 40ULL * BIT(16) * st->vfs_out);
+ } else {
+ temp = DECA * MICRO * 40ULL * BIT(16) * st->vfs_out;
+ code = DIV64_U64_ROUND_CLOSEST(temp_2, temp);
+ }
+
+ __raw = cpu_to_be16(code);
+ return regmap_bulk_write(st->map, reg, &__raw, sizeof(__raw));
+}
+
+static int __ltc4282_in_write_history(const struct ltc4282_state *st, u32 reg,
+ long lowest, long highest, u32 fs)
+{
+ __be16 __raw;
+ u16 tmp;
+ int ret;
+
+ tmp = DIV_ROUND_CLOSEST(U16_MAX * lowest, fs);
+
+ __raw = cpu_to_be16(tmp);
+
+ ret = regmap_bulk_write(st->map, reg, &__raw, 2);
+ if (ret)
+ return ret;
+
+ tmp = DIV_ROUND_CLOSEST(U16_MAX * highest, fs);
+
+ __raw = cpu_to_be16(tmp);
+
+ return regmap_bulk_write(st->map, reg + 2, &__raw, 2);
+}
+
+static int ltc4282_in_write_history(struct ltc4282_state *st, u32 reg,
+ long lowest, long highest, u32 fs)
+{
+ guard(mutex)(&st->lock);
+ return __ltc4282_in_write_history(st, reg, lowest, highest, fs);
+}
+
+static int ltc4282_power_reset_hist(struct ltc4282_state *st)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = ltc4282_write_power_word(st, LTC4282_POWER_LOWEST,
+ st->power_max);
+ if (ret)
+ return ret;
+
+ ret = ltc4282_write_power_word(st, LTC4282_POWER_HIGHEST, 0);
+ if (ret)
+ return ret;
+
+ /* now, let's also clear possible power_bad fault logs */
+ return regmap_clear_bits(st->map, LTC4282_FAULT_LOG,
+ LTC4282_POWER_BAD_FAULT_MASK);
+}
+
+static int ltc4282_write_power(struct ltc4282_state *st, u32 attr,
+ long val)
+{
+ switch (attr) {
+ case hwmon_power_max:
+ return ltc4282_write_power_byte(st, LTC4282_POWER_MAX, val);
+ case hwmon_power_min:
+ return ltc4282_write_power_byte(st, LTC4282_POWER_MIN, val);
+ case hwmon_power_reset_history:
+ return ltc4282_power_reset_hist(st);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ltc4282_write_voltage_byte_cached(const struct ltc4282_state *st,
+ u32 reg, u32 fs, long val,
+ u32 *cache_raw)
+{
+ u32 in;
+
+ val = clamp_val(val, 0, fs);
+ in = DIV_ROUND_CLOSEST(val * U8_MAX, fs);
+
+ if (cache_raw) {
+ *cache_raw = in;
+ return 0;
+ }
+
+ return regmap_write(st->map, reg, in);
+}
+
+static int ltc4282_write_voltage_byte(const struct ltc4282_state *st, u32 reg,
+ u32 fs, long val)
+{
+ return ltc4282_write_voltage_byte_cached(st, reg, fs, val, NULL);
+}
+
+static int ltc4282_cache_history(struct ltc4282_state *st, u32 channel)
+{
+ long val;
+ int ret;
+
+ ret = ltc4282_read_voltage_word(st, LTC4282_VSOURCE_LOWEST, st->vfs_out,
+ &val);
+ if (ret)
+ return ret;
+
+ st->in0_1_cache[channel].in_lowest = val;
+
+ ret = ltc4282_read_voltage_word(st, LTC4282_VSOURCE_HIGHEST,
+ st->vfs_out, &val);
+ if (ret)
+ return ret;
+
+ st->in0_1_cache[channel].in_highest = val;
+
+ ret = regmap_read(st->map, LTC4282_VSOURCE_MIN,
+ &st->in0_1_cache[channel].in_min_raw);
+ if (ret)
+ return ret;
+
+ return regmap_read(st->map, LTC4282_VSOURCE_MAX,
+ &st->in0_1_cache[channel].in_max_raw);
+}
+
+static int ltc4282_cache_sync(struct ltc4282_state *st, u32 channel)
+{
+ int ret;
+
+ ret = __ltc4282_in_write_history(st, LTC4282_VSOURCE_LOWEST,
+ st->in0_1_cache[channel].in_lowest,
+ st->in0_1_cache[channel].in_highest,
+ st->vfs_out);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->map, LTC4282_VSOURCE_MIN,
+ st->in0_1_cache[channel].in_min_raw);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->map, LTC4282_VSOURCE_MAX,
+ st->in0_1_cache[channel].in_max_raw);
+}
+
+static int ltc4282_vdd_source_write_lim(struct ltc4282_state *st, u32 reg,
+ int channel, u32 *cache, long val)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+ if (st->in0_1_cache[channel].en)
+ ret = ltc4282_write_voltage_byte(st, reg, st->vfs_out, val);
+ else
+ ret = ltc4282_write_voltage_byte_cached(st, reg, st->vfs_out,
+ val, cache);
+
+ return ret;
+}
+
+static int ltc4282_vdd_source_reset_hist(struct ltc4282_state *st, int channel)
+{
+ long lowest = st->vfs_out;
+ int ret;
+
+ if (channel == LTC4282_CHAN_VDD)
+ lowest = st->vdd;
+
+ guard(mutex)(&st->lock);
+ if (st->in0_1_cache[channel].en) {
+ ret = __ltc4282_in_write_history(st, LTC4282_VSOURCE_LOWEST,
+ lowest, 0, st->vfs_out);
+ if (ret)
+ return ret;
+ }
+
+ st->in0_1_cache[channel].in_lowest = lowest;
+ st->in0_1_cache[channel].in_highest = 0;
+
+ /*
+ * We are also clearing possible fault logs in reset_history. Clearing
+ * the logs might be important when the auto retry bits are not enabled
+ * as the chip only enables the output again after having these logs
+ * cleared. As some of these logs are related to limits, it makes sense
+ * to clear them in here. For VDD, we need to clear under/over voltage
+ * events. For VSOURCE, fet_short and fet_bad...
+ */
+ if (channel == LTC4282_CHAN_VSOURCE)
+ return regmap_clear_bits(st->map, LTC4282_FAULT_LOG,
+ LTC4282_FET_FAILURE_FAULT_MASK);
+
+ return regmap_clear_bits(st->map, LTC4282_FAULT_LOG,
+ LTC4282_VDD_FAULT_MASK);
+}
+
+/*
+ * We need to mux between VSOURCE and VDD which means they are mutually
+ * exclusive. Moreover, we can't really disable both VDD and VSOURCE as the ADC
+ * is continuously running (we cannot independently halt it without also
+ * stopping VGPIO). Hence, the logic is that disabling or enabling VDD will
+ * automatically have the reverse effect on VSOURCE and vice-versa.
+ */
+static int ltc4282_vdd_source_enable(struct ltc4282_state *st, int channel,
+ long val)
+{
+ int ret, other_chan = ~channel & 0x1;
+ u8 __val = val;
+
+ guard(mutex)(&st->lock);
+ if (st->in0_1_cache[channel].en == !!val)
+ return 0;
+
+ /* clearing the bit makes the ADC to monitor VDD */
+ if (channel == LTC4282_CHAN_VDD)
+ __val = !__val;
+
+ ret = regmap_update_bits(st->map, LTC4282_ILIM_ADJUST,
+ LTC4282_VDD_MONITOR_MASK,
+ FIELD_PREP(LTC4282_VDD_MONITOR_MASK, !!__val));
+ if (ret)
+ return ret;
+
+ st->in0_1_cache[channel].en = !!val;
+ st->in0_1_cache[other_chan].en = !val;
+
+ if (st->in0_1_cache[channel].en) {
+ /*
+ * Then, we are disabling @other_chan. Let's save it's current
+ * history.
+ */
+ ret = ltc4282_cache_history(st, other_chan);
+ if (ret)
+ return ret;
+
+ return ltc4282_cache_sync(st, channel);
+ }
+ /*
+ * Then, we are enabling @other_chan. We need to do the opposite from
+ * above.
+ */
+ ret = ltc4282_cache_history(st, channel);
+ if (ret)
+ return ret;
+
+ return ltc4282_cache_sync(st, other_chan);
+}
+
+static int ltc4282_write_in(struct ltc4282_state *st, u32 attr, long val,
+ int channel)
+{
+ switch (attr) {
+ case hwmon_in_max:
+ if (channel == LTC4282_CHAN_VGPIO)
+ return ltc4282_write_voltage_byte(st, LTC4282_VGPIO_MAX,
+ 1280, val);
+
+ return ltc4282_vdd_source_write_lim(st, LTC4282_VSOURCE_MAX,
+ channel,
+ &st->in0_1_cache[channel].in_max_raw, val);
+ case hwmon_in_min:
+ if (channel == LTC4282_CHAN_VGPIO)
+ return ltc4282_write_voltage_byte(st, LTC4282_VGPIO_MIN,
+ 1280, val);
+
+ return ltc4282_vdd_source_write_lim(st, LTC4282_VSOURCE_MIN,
+ channel,
+ &st->in0_1_cache[channel].in_min_raw, val);
+ case hwmon_in_reset_history:
+ if (channel == LTC4282_CHAN_VGPIO)
+ return ltc4282_in_write_history(st,
+ LTC4282_VGPIO_LOWEST,
+ 1280, 0, 1280);
+
+ return ltc4282_vdd_source_reset_hist(st, channel);
+ case hwmon_in_enable:
+ return ltc4282_vdd_source_enable(st, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ltc4282_curr_reset_hist(struct ltc4282_state *st)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = __ltc4282_in_write_history(st, LTC4282_VSENSE_LOWEST,
+ st->vsense_max, 0, 40 * MILLI);
+ if (ret)
+ return ret;
+
+ /* now, let's also clear possible overcurrent fault logs */
+ return regmap_clear_bits(st->map, LTC4282_FAULT_LOG,
+ LTC4282_OC_FAULT_MASK);
+}
+
+static int ltc4282_write_curr(struct ltc4282_state *st, u32 attr,
+ long val)
+{
+ /* need to pass it in millivolt */
+ u32 in = DIV_ROUND_CLOSEST_ULL((u64)val * st->rsense, DECA * MICRO);
+
+ switch (attr) {
+ case hwmon_curr_max:
+ return ltc4282_write_voltage_byte(st, LTC4282_VSENSE_MAX, 40,
+ in);
+ case hwmon_curr_min:
+ return ltc4282_write_voltage_byte(st, LTC4282_VSENSE_MIN, 40,
+ in);
+ case hwmon_curr_reset_history:
+ return ltc4282_curr_reset_hist(st);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ltc4282_energy_enable_set(struct ltc4282_state *st, long val)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+ /* setting the bit halts the meter */
+ ret = regmap_update_bits(st->map, LTC4282_ADC_CTRL,
+ LTC4282_METER_HALT_MASK,
+ FIELD_PREP(LTC4282_METER_HALT_MASK, !val));
+ if (ret)
+ return ret;
+
+ st->energy_en = !!val;
+
+ return 0;
+}
+
+static int ltc4282_write(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct ltc4282_state *st = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_power:
+ return ltc4282_write_power(st, attr, val);
+ case hwmon_in:
+ return ltc4282_write_in(st, attr, val, channel);
+ case hwmon_curr:
+ return ltc4282_write_curr(st, attr, val);
+ case hwmon_energy:
+ return ltc4282_energy_enable_set(st, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t ltc4282_in_is_visible(const struct ltc4282_state *st, u32 attr)
+{
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_highest:
+ case hwmon_in_lowest:
+ case hwmon_in_max_alarm:
+ case hwmon_in_min_alarm:
+ case hwmon_in_label:
+ case hwmon_in_lcrit_alarm:
+ case hwmon_in_crit_alarm:
+ case hwmon_in_fault:
+ return 0444;
+ case hwmon_in_max:
+ case hwmon_in_min:
+ case hwmon_in_enable:
+ case hwmon_in_reset_history:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static umode_t ltc4282_curr_is_visible(u32 attr)
+{
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_highest:
+ case hwmon_curr_lowest:
+ case hwmon_curr_max_alarm:
+ case hwmon_curr_min_alarm:
+ case hwmon_curr_crit_alarm:
+ case hwmon_curr_label:
+ return 0444;
+ case hwmon_curr_max:
+ case hwmon_curr_min:
+ case hwmon_curr_reset_history:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static umode_t ltc4282_power_is_visible(u32 attr)
+{
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_input_highest:
+ case hwmon_power_input_lowest:
+ case hwmon_power_label:
+ case hwmon_power_max_alarm:
+ case hwmon_power_min_alarm:
+ return 0444;
+ case hwmon_power_max:
+ case hwmon_power_min:
+ case hwmon_power_reset_history:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static umode_t ltc4282_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ return ltc4282_in_is_visible(data, attr);
+ case hwmon_curr:
+ return ltc4282_curr_is_visible(attr);
+ case hwmon_power:
+ return ltc4282_power_is_visible(attr);
+ case hwmon_energy:
+ /* hwmon_energy_enable */
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static const char * const ltc4282_in_strs[] = {
+ "VSOURCE", "VDD", "VGPIO"
+};
+
+static int ltc4282_read_labels(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ *str = ltc4282_in_strs[channel];
+ return 0;
+ case hwmon_curr:
+ *str = "ISENSE";
+ return 0;
+ case hwmon_power:
+ *str = "Power";
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static ssize_t ltc4282_energy_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct ltc4282_state *st = dev_get_drvdata(dev);
+ u64 energy;
+ int ret;
+
+ guard(mutex)(&st->lock);
+ if (!st->energy_en)
+ return -ENODATA;
+
+ ret = ltc4282_read_energy(st, &energy);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", energy);
+}
+
+static const struct clk_ops ltc4282_ops = {
+ .recalc_rate = ltc4282_recalc_rate,
+ .round_rate = ltc4282_round_rate,
+ .set_rate = ltc4282_set_rate,
+ .disable = ltc4282_disable,
+};
+
+static int ltc428_clk_provider_setup(struct ltc4282_state *st,
+ struct device *dev)
+{
+ struct clk_init_data init;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_COMMON_CLK))
+ return 0;
+
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s-clk",
+ fwnode_get_name(dev_fwnode(dev)));
+ if (!init.name)
+ return -ENOMEM;
+
+ init.ops = &ltc4282_ops;
+ init.flags = CLK_GET_RATE_NOCACHE;
+ st->clk_hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &st->clk_hw);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &st->clk_hw);
+}
+
+static int ltc428_clks_setup(struct ltc4282_state *st, struct device *dev)
+{
+ unsigned long rate;
+ struct clk *clkin;
+ u32 val;
+ int ret;
+
+ ret = ltc428_clk_provider_setup(st, dev);
+ if (ret)
+ return ret;
+
+ clkin = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(clkin))
+ return dev_err_probe(dev, PTR_ERR(clkin),
+ "Failed to get clkin");
+ if (!clkin)
+ return 0;
+
+ rate = clk_get_rate(clkin);
+ if (!in_range(rate, LTC4282_CLKIN_MIN, LTC4282_CLKIN_RANGE))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid clkin range(%lu) [%lu %lu]\n",
+ rate, LTC4282_CLKIN_MIN,
+ LTC4282_CLKIN_MAX);
+
+ /*
+ * Clocks faster than 250KHZ should be reduced to 250KHZ. The clock
+ * frequency is divided by twice the value in the register.
+ */
+ val = rate / (2 * LTC4282_CLKIN_MIN);
+
+ return regmap_update_bits(st->map, LTC4282_CLK_DIV,
+ LTC4282_CLK_DIV_MASK,
+ FIELD_PREP(LTC4282_CLK_DIV_MASK, val));
+}
+
+static const int ltc4282_curr_lim_uv[] = {
+ 12500, 15625, 18750, 21875, 25000, 28125, 31250, 34375
+};
+
+static int ltc4282_get_defaults(struct ltc4282_state *st, u32 *vin_mode)
+{
+ u32 reg_val, ilm_adjust;
+ int ret;
+
+ ret = regmap_read(st->map, LTC4282_ADC_CTRL, &reg_val);
+ if (ret)
+ return ret;
+
+ st->energy_en = !FIELD_GET(LTC4282_METER_HALT_MASK, reg_val);
+
+ ret = regmap_read(st->map, LTC4282_CTRL_MSB, &reg_val);
+ if (ret)
+ return ret;
+
+ *vin_mode = FIELD_GET(LTC4282_CTRL_VIN_MODE_MASK, reg_val);
+
+ ret = regmap_read(st->map, LTC4282_ILIM_ADJUST, &reg_val);
+ if (ret)
+ return ret;
+
+ ilm_adjust = FIELD_GET(LTC4282_ILIM_ADJUST_MASK, reg_val);
+ st->vsense_max = ltc4282_curr_lim_uv[ilm_adjust];
+
+ st->in0_1_cache[LTC4282_CHAN_VSOURCE].en = FIELD_GET(LTC4282_VDD_MONITOR_MASK,
+ ilm_adjust);
+ if (!st->in0_1_cache[LTC4282_CHAN_VSOURCE].en) {
+ st->in0_1_cache[LTC4282_CHAN_VDD].en = true;
+ return regmap_read(st->map, LTC4282_VSOURCE_MAX,
+ &st->in0_1_cache[LTC4282_CHAN_VSOURCE].in_max_raw);
+ }
+
+ return regmap_read(st->map, LTC4282_VSOURCE_MAX,
+ &st->in0_1_cache[LTC4282_CHAN_VDD].in_max_raw);
+}
+
+/*
+ * Set max limits for ISENSE and Power as that depends on the max voltage on
+ * rsense that is defined in ILIM_ADJUST. This is specially important for power
+ * because for some rsense and vfsout values, if we allow the default raw 255
+ * value, that would overflow long in 32bit archs when reading back the max
+ * power limit.
+ *
+ * Also set meaningful historic values for VDD and VSOURCE
+ * (0 would not mean much).
+ */
+static int ltc4282_set_max_limits(struct ltc4282_state *st)
+{
+ int ret;
+
+ ret = ltc4282_write_voltage_byte(st, LTC4282_VSENSE_MAX, 40 * MILLI,
+ st->vsense_max);
+ if (ret)
+ return ret;
+
+ /* Power is given by ISENSE * Vout. */
+ st->power_max = DIV_ROUND_CLOSEST(st->vsense_max * DECA * MILLI, st->rsense) * st->vfs_out;
+ ret = ltc4282_write_power_byte(st, LTC4282_POWER_MAX, st->power_max);
+ if (ret)
+ return ret;
+
+ if (st->in0_1_cache[LTC4282_CHAN_VDD].en) {
+ st->in0_1_cache[LTC4282_CHAN_VSOURCE].in_lowest = st->vfs_out;
+ return __ltc4282_in_write_history(st, LTC4282_VSOURCE_LOWEST,
+ st->vdd, 0, st->vfs_out);
+ }
+
+ st->in0_1_cache[LTC4282_CHAN_VDD].in_lowest = st->vdd;
+ return __ltc4282_in_write_history(st, LTC4282_VSOURCE_LOWEST,
+ st->vfs_out, 0, st->vfs_out);
+}
+
+static const char * const ltc4282_gpio1_modes[] = {
+ "power_bad", "power_good"
+};
+
+static const char * const ltc4282_gpio2_modes[] = {
+ "adc_input", "stress_fet"
+};
+
+static int ltc4282_gpio_setup(struct ltc4282_state *st, struct device *dev)
+{
+ const char *func = NULL;
+ int ret;
+
+ ret = device_property_read_string(dev, "adi,gpio1-mode", &func);
+ if (!ret) {
+ ret = match_string(ltc4282_gpio1_modes,
+ ARRAY_SIZE(ltc4282_gpio1_modes), func);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Invalid func(%s) for gpio1\n",
+ func);
+
+ ret = regmap_update_bits(st->map, LTC4282_GPIO_CONFIG,
+ LTC4282_GPIO_1_CONFIG_MASK,
+ FIELD_PREP(LTC4282_GPIO_1_CONFIG_MASK, ret));
+ if (ret)
+ return ret;
+ }
+
+ ret = device_property_read_string(dev, "adi,gpio2-mode", &func);
+ if (!ret) {
+ ret = match_string(ltc4282_gpio2_modes,
+ ARRAY_SIZE(ltc4282_gpio2_modes), func);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Invalid func(%s) for gpio2\n",
+ func);
+ if (!ret) {
+ /* setting the bit to 1 so the ADC to monitors GPIO2 */
+ ret = regmap_set_bits(st->map, LTC4282_ILIM_ADJUST,
+ LTC4282_GPIO_MODE_MASK);
+ } else {
+ ret = regmap_update_bits(st->map, LTC4282_GPIO_CONFIG,
+ LTC4282_GPIO_2_FET_STRESS_MASK,
+ FIELD_PREP(LTC4282_GPIO_2_FET_STRESS_MASK, 1));
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ if (!device_property_read_bool(dev, "adi,gpio3-monitor-enable"))
+ return 0;
+
+ if (func && !strcmp(func, "adc_input"))
+ return dev_err_probe(dev, -EINVAL,
+ "Cannot have both gpio2 and gpio3 muxed into the ADC");
+
+ return regmap_clear_bits(st->map, LTC4282_ILIM_ADJUST,
+ LTC4282_GPIO_MODE_MASK);
+}
+
+static const char * const ltc4282_dividers[] = {
+ "external", "vdd_5_percent", "vdd_10_percent", "vdd_15_percent"
+};
+
+/* This maps the Vout full scale for the given Vin mode */
+static const u16 ltc4282_vfs_milli[] = { 5540, 8320, 16640, 33280 };
+
+static const u16 ltc4282_vdd_milli[] = { 3300, 5000, 12000, 24000 };
+
+enum {
+ LTC4282_VIN_3_3V,
+ LTC4282_VIN_5V,
+ LTC4282_VIN_12V,
+ LTC4282_VIN_24V,
+};
+
+static int ltc4282_setup(struct ltc4282_state *st, struct device *dev)
+{
+ const char *divider;
+ u32 val, vin_mode;
+ int ret;
+
+ /* The part has an eeprom so let's get the needed defaults from it */
+ ret = ltc4282_get_defaults(st, &vin_mode);
+ if (ret)
+ return ret;
+
+ ret = device_property_read_u32(dev, "adi,rsense-nano-ohms",
+ &st->rsense);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to read adi,rsense-nano-ohms\n");
+ if (st->rsense < CENTI)
+ return dev_err_probe(dev, -EINVAL,
+ "adi,rsense-nano-ohms too small (< %lu)\n",
+ CENTI);
+
+ /*
+ * The resolution for rsense is tenths of micro (eg: 62.5 uOhm) which
+ * means we need nano in the bindings. However, to make things easier to
+ * handle (with respect to overflows) we divide it by 100 as we don't
+ * really need the last two digits.
+ */
+ st->rsense /= CENTI;
+
+ val = vin_mode;
+ ret = device_property_read_u32(dev, "adi,vin-mode-microvolt", &val);
+ if (!ret) {
+ switch (val) {
+ case 3300000:
+ val = LTC4282_VIN_3_3V;
+ break;
+ case 5000000:
+ val = LTC4282_VIN_5V;
+ break;
+ case 12000000:
+ val = LTC4282_VIN_12V;
+ break;
+ case 24000000:
+ val = LTC4282_VIN_24V;
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid val(%u) for vin-mode-microvolt\n",
+ val);
+ }
+
+ ret = regmap_update_bits(st->map, LTC4282_CTRL_MSB,
+ LTC4282_CTRL_VIN_MODE_MASK,
+ FIELD_PREP(LTC4282_CTRL_VIN_MODE_MASK, val));
+ if (ret)
+ return ret;
+
+ /* Foldback mode should also be set to the input voltage */
+ ret = regmap_update_bits(st->map, LTC4282_ILIM_ADJUST,
+ LTC4282_FOLDBACK_MODE_MASK,
+ FIELD_PREP(LTC4282_FOLDBACK_MODE_MASK, val));
+ if (ret)
+ return ret;
+ }
+
+ st->vfs_out = ltc4282_vfs_milli[val];
+ st->vdd = ltc4282_vdd_milli[val];
+
+ ret = device_property_read_u32(dev, "adi,current-limit-sense-microvolt",
+ &st->vsense_max);
+ if (!ret) {
+ int reg_val;
+
+ switch (val) {
+ case 12500:
+ reg_val = 0;
+ break;
+ case 15625:
+ reg_val = 1;
+ break;
+ case 18750:
+ reg_val = 2;
+ break;
+ case 21875:
+ reg_val = 3;
+ break;
+ case 25000:
+ reg_val = 4;
+ break;
+ case 28125:
+ reg_val = 5;
+ break;
+ case 31250:
+ reg_val = 6;
+ break;
+ case 34375:
+ reg_val = 7;
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid val(%u) for adi,current-limit-microvolt\n",
+ st->vsense_max);
+ }
+
+ ret = regmap_update_bits(st->map, LTC4282_ILIM_ADJUST,
+ LTC4282_ILIM_ADJUST_MASK,
+ FIELD_PREP(LTC4282_ILIM_ADJUST_MASK, reg_val));
+ if (ret)
+ return ret;
+ }
+
+ ret = ltc4282_set_max_limits(st);
+ if (ret)
+ return ret;
+
+ ret = device_property_read_string(dev, "adi,overvoltage-dividers",
+ &divider);
+ if (!ret) {
+ int div = match_string(ltc4282_dividers,
+ ARRAY_SIZE(ltc4282_dividers), divider);
+ if (div < 0)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid val(%s) for adi,overvoltage-divider\n",
+ divider);
+
+ ret = regmap_update_bits(st->map, LTC4282_CTRL_MSB,
+ LTC4282_CTRL_OV_MODE_MASK,
+ FIELD_PREP(LTC4282_CTRL_OV_MODE_MASK, div));
+ }
+
+ ret = device_property_read_string(dev, "adi,undervoltage-dividers",
+ &divider);
+ if (!ret) {
+ int div = match_string(ltc4282_dividers,
+ ARRAY_SIZE(ltc4282_dividers), divider);
+ if (div < 0)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid val(%s) for adi,undervoltage-divider\n",
+ divider);
+
+ ret = regmap_update_bits(st->map, LTC4282_CTRL_MSB,
+ LTC4282_CTRL_UV_MODE_MASK,
+ FIELD_PREP(LTC4282_CTRL_UV_MODE_MASK, div));
+ }
+
+ if (device_property_read_bool(dev, "adi,overcurrent-retry")) {
+ ret = regmap_set_bits(st->map, LTC4282_CTRL_LSB,
+ LTC4282_CTRL_OC_RETRY_MASK);
+ if (ret)
+ return ret;
+ }
+
+ if (device_property_read_bool(dev, "adi,overvoltage-retry-disable")) {
+ ret = regmap_clear_bits(st->map, LTC4282_CTRL_LSB,
+ LTC4282_CTRL_OV_RETRY_MASK);
+ if (ret)
+ return ret;
+ }
+
+ if (device_property_read_bool(dev, "adi,undervoltage-retry-disable")) {
+ ret = regmap_clear_bits(st->map, LTC4282_CTRL_LSB,
+ LTC4282_CTRL_UV_RETRY_MASK);
+ if (ret)
+ return ret;
+ }
+
+ if (device_property_read_bool(dev, "adi,fault-log-enable")) {
+ ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL,
+ LTC4282_FAULT_LOG_EN_MASK);
+ if (ret)
+ return ret;
+ }
+
+ if (device_property_read_bool(dev, "adi,fault-log-enable")) {
+ ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL, LTC4282_FAULT_LOG_EN_MASK);
+ if (ret)
+ return ret;
+ }
+
+ ret = device_property_read_u32(dev, "adi,fet-bad-timeout-ms", &val);
+ if (!ret) {
+ if (val > LTC4282_FET_BAD_MAX_TIMEOUT)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid value(%u) for adi,fet-bad-timeout-ms",
+ val);
+
+ ret = regmap_write(st->map, LTC4282_FET_BAD_FAULT_TIMEOUT, val);
+ if (ret)
+ return ret;
+ }
+
+ return ltc4282_gpio_setup(st, dev);
+}
+
+static bool ltc4282_readable_reg(struct device *dev, unsigned int reg)
+{
+ if (reg == LTC4282_RESERVED_1 || reg == LTC4282_RESERVED_2)
+ return false;
+
+ return true;
+}
+
+static bool ltc4282_writable_reg(struct device *dev, unsigned int reg)
+{
+ if (reg == LTC4282_STATUS_LSB || reg == LTC4282_STATUS_MSB)
+ return false;
+ if (reg == LTC4282_RESERVED_1 || reg == LTC4282_RESERVED_2)
+ return false;
+
+ return true;
+}
+
+static const struct regmap_config ltc4282_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LTC4282_RESERVED_3,
+ .readable_reg = ltc4282_readable_reg,
+ .writeable_reg = ltc4282_writable_reg,
+};
+
+static const struct hwmon_channel_info * const ltc4282_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LOWEST | HWMON_I_HIGHEST |
+ HWMON_I_MAX | HWMON_I_MIN | HWMON_I_MIN_ALARM |
+ HWMON_I_MAX_ALARM | HWMON_I_ENABLE |
+ HWMON_I_RESET_HISTORY | HWMON_I_FAULT |
+ HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LOWEST | HWMON_I_HIGHEST |
+ HWMON_I_MAX | HWMON_I_MIN | HWMON_I_MIN_ALARM |
+ HWMON_I_MAX_ALARM | HWMON_I_LCRIT_ALARM |
+ HWMON_I_CRIT_ALARM | HWMON_I_ENABLE |
+ HWMON_I_RESET_HISTORY | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LOWEST | HWMON_I_HIGHEST |
+ HWMON_I_MAX | HWMON_I_MIN | HWMON_I_MIN_ALARM |
+ HWMON_I_RESET_HISTORY | HWMON_I_MAX_ALARM |
+ HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LOWEST | HWMON_C_HIGHEST |
+ HWMON_C_MAX | HWMON_C_MIN | HWMON_C_MIN_ALARM |
+ HWMON_C_MAX_ALARM | HWMON_C_CRIT_ALARM |
+ HWMON_C_RESET_HISTORY | HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_INPUT_LOWEST |
+ HWMON_P_INPUT_HIGHEST | HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
+ HWMON_P_RESET_HISTORY | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(energy,
+ HWMON_E_ENABLE),
+ NULL
+};
+
+static const struct hwmon_ops ltc4282_hwmon_ops = {
+ .read = ltc4282_read,
+ .write = ltc4282_write,
+ .is_visible = ltc4282_is_visible,
+ .read_string = ltc4282_read_labels,
+};
+
+static const struct hwmon_chip_info ltc2947_chip_info = {
+ .ops = &ltc4282_hwmon_ops,
+ .info = ltc4282_info,
+};
+
+/* energy attributes are 6bytes wide so we need u64 */
+static SENSOR_DEVICE_ATTR_RO(energy1_input, ltc4282_energy, 0);
+
+static struct attribute *ltc4282_attrs[] = {
+ &sensor_dev_attr_energy1_input.dev_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(ltc4282);
+
+static int ltc4282_show_fault_log(void *arg, u64 *val, u32 mask)
+{
+ struct ltc4282_state *st = arg;
+ long alarm;
+ int ret;
+
+ ret = ltc4282_read_alarm(st, LTC4282_FAULT_LOG, mask, &alarm);
+ if (ret)
+ return ret;
+
+ *val = alarm;
+
+ return 0;
+}
+
+static int ltc4282_show_curr1_crit_fault_log(void *arg, u64 *val)
+{
+ return ltc4282_show_fault_log(arg, val, LTC4282_OC_FAULT_MASK);
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ltc4282_curr1_crit_fault_log,
+ ltc4282_show_curr1_crit_fault_log, NULL, "%llu\n");
+
+static int ltc4282_show_in1_lcrit_fault_log(void *arg, u64 *val)
+{
+ return ltc4282_show_fault_log(arg, val, LTC4282_UV_FAULT_MASK);
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ltc4282_in1_lcrit_fault_log,
+ ltc4282_show_in1_lcrit_fault_log, NULL, "%llu\n");
+
+static int ltc4282_show_in1_crit_fault_log(void *arg, u64 *val)
+{
+ return ltc4282_show_fault_log(arg, val, LTC4282_OV_FAULT_MASK);
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ltc4282_in1_crit_fault_log,
+ ltc4282_show_in1_crit_fault_log, NULL, "%llu\n");
+
+static int ltc4282_show_fet_bad_fault_log(void *arg, u64 *val)
+{
+ return ltc4282_show_fault_log(arg, val, LTC4282_FET_BAD_FAULT_MASK);
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ltc4282_fet_bad_fault_log,
+ ltc4282_show_fet_bad_fault_log, NULL, "%llu\n");
+
+static int ltc4282_show_fet_short_fault_log(void *arg, u64 *val)
+{
+ return ltc4282_show_fault_log(arg, val, LTC4282_FET_SHORT_FAULT_MASK);
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ltc4282_fet_short_fault_log,
+ ltc4282_show_fet_short_fault_log, NULL, "%llu\n");
+
+static int ltc4282_show_power1_bad_fault_log(void *arg, u64 *val)
+{
+ return ltc4282_show_fault_log(arg, val, LTC4282_POWER_BAD_FAULT_MASK);
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ltc4282_power1_bad_fault_log,
+ ltc4282_show_power1_bad_fault_log, NULL, "%llu\n");
+
+static void ltc4282_debugfs_remove(void *dir)
+{
+ debugfs_remove_recursive(dir);
+}
+
+static void ltc4282_debugfs_init(struct ltc4282_state *st,
+ struct i2c_client *i2c,
+ const struct device *hwmon)
+{
+ const char *debugfs_name;
+ struct dentry *dentry;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
+
+ debugfs_name = devm_kasprintf(&i2c->dev, GFP_KERNEL, "ltc4282-%s",
+ dev_name(hwmon));
+ if (!debugfs_name)
+ return;
+
+ dentry = debugfs_create_dir(debugfs_name, NULL);
+ if (IS_ERR(dentry))
+ return;
+
+ ret = devm_add_action_or_reset(&i2c->dev, ltc4282_debugfs_remove,
+ dentry);
+ if (ret)
+ return;
+
+ debugfs_create_file_unsafe("power1_bad_fault_log", 0400, dentry, st,
+ &ltc4282_power1_bad_fault_log);
+ debugfs_create_file_unsafe("in0_fet_short_fault_log", 0400, dentry, st,
+ &ltc4282_fet_short_fault_log);
+ debugfs_create_file_unsafe("in0_fet_bad_fault_log", 0400, dentry, st,
+ &ltc4282_fet_bad_fault_log);
+ debugfs_create_file_unsafe("in1_crit_fault_log", 0400, dentry, st,
+ &ltc4282_in1_crit_fault_log);
+ debugfs_create_file_unsafe("in1_lcrit_fault_log", 0400, dentry, st,
+ &ltc4282_in1_lcrit_fault_log);
+ debugfs_create_file_unsafe("curr1_crit_fault_log", 0400, dentry, st,
+ &ltc4282_curr1_crit_fault_log);
+}
+
+static int ltc4282_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev, *hwmon;
+ struct ltc4282_state *st;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to allocate memory\n");
+
+ st->map = devm_regmap_init_i2c(i2c, &ltc4282_regmap_config);
+ if (IS_ERR(st->map))
+ return dev_err_probe(dev, PTR_ERR(st->map),
+ "failed regmap init\n");
+
+ /* Soft reset */
+ ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL, LTC4282_RESET_MASK);
+ if (ret)
+ return ret;
+
+ /* Yes, it's big but it is as specified in the datasheet */
+ msleep(3200);
+
+ ret = ltc428_clks_setup(st, dev);
+ if (ret)
+ return ret;
+
+ ret = ltc4282_setup(st, dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&st->lock);
+ hwmon = devm_hwmon_device_register_with_info(dev, "ltc4282", st,
+ &ltc2947_chip_info,
+ ltc4282_groups);
+ if (IS_ERR(hwmon))
+ return PTR_ERR(hwmon);
+
+ ltc4282_debugfs_init(st, i2c, hwmon);
+
+ return 0;
+}
+
+static const struct of_device_id ltc4282_of_match[] = {
+ { .compatible = "adi,ltc4282" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ltc4282_of_match);
+
+static struct i2c_driver ltc4282_driver = {
+ .driver = {
+ .name = "ltc4282",
+ .of_match_table = ltc4282_of_match,
+ },
+ .probe = ltc4282_probe,
+};
+module_i2c_driver(ltc4282_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("LTC4282 I2C High Current Hot Swap Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max127.c b/drivers/hwmon/max127.c
index ee5ead06d612..da2289e3560a 100644
--- a/drivers/hwmon/max127.c
+++ b/drivers/hwmon/max127.c
@@ -335,7 +335,6 @@ static const struct i2c_device_id max127_id[] = {
MODULE_DEVICE_TABLE(i2c, max127_id);
static struct i2c_driver max127_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "max127",
},
diff --git a/drivers/hwmon/max31760.c b/drivers/hwmon/max31760.c
index 79945eb466ae..127e31ca3c87 100644
--- a/drivers/hwmon/max31760.c
+++ b/drivers/hwmon/max31760.c
@@ -60,7 +60,7 @@ static const struct regmap_config regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x5B,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = max31760_volatile_reg,
};
@@ -578,7 +578,6 @@ static DEFINE_SIMPLE_DEV_PM_OPS(max31760_pm_ops, max31760_suspend,
max31760_resume);
static struct i2c_driver max31760_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "max31760",
.of_match_table = max31760_of_match,
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index 0cd44c1e998a..3dc95196b229 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -543,7 +543,6 @@ static const struct i2c_device_id max31790_id[] = {
MODULE_DEVICE_TABLE(i2c, max31790_id);
static struct i2c_driver max31790_driver = {
- .class = I2C_CLASS_HWMON,
.probe = max31790_probe,
.driver = {
.name = "max31790",
diff --git a/drivers/hwmon/max31827.c b/drivers/hwmon/max31827.c
index 4a8c3e37c5d3..f8a13b30f100 100644
--- a/drivers/hwmon/max31827.c
+++ b/drivers/hwmon/max31827.c
@@ -652,7 +652,6 @@ static const struct of_device_id max31827_of_match[] = {
MODULE_DEVICE_TABLE(of, max31827_of_match);
static struct i2c_driver max31827_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "max31827",
.of_match_table = max31827_of_match,
diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c
index af7e62685898..05426cde0e36 100644
--- a/drivers/hwmon/max6621.c
+++ b/drivers/hwmon/max6621.c
@@ -549,7 +549,6 @@ static const struct of_device_id __maybe_unused max6621_of_match[] = {
MODULE_DEVICE_TABLE(of, max6621_of_match);
static struct i2c_driver max6621_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = MAX6621_DRV_NAME,
.of_match_table = of_match_ptr(max6621_of_match),
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 7d10dd434f2e..d161ba0e7813 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -780,7 +780,6 @@ static const struct of_device_id __maybe_unused max6697_of_match[] = {
MODULE_DEVICE_TABLE(of, max6697_of_match);
static struct i2c_driver max6697_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "max6697",
.of_match_table = of_match_ptr(max6697_of_match),
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 3f3f7a88413e..0d016fedb9c2 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -174,6 +174,7 @@ superio_exit(int ioreg)
#define NCT6683_CUSTOMER_ID_MITAC 0xa0e
#define NCT6683_CUSTOMER_ID_MSI 0x201
#define NCT6683_CUSTOMER_ID_MSI2 0x200
+#define NCT6683_CUSTOMER_ID_MSI3 0x207
#define NCT6683_CUSTOMER_ID_ASROCK 0xe2c
#define NCT6683_CUSTOMER_ID_ASROCK2 0xe1b
#define NCT6683_CUSTOMER_ID_ASROCK3 0x1631
@@ -1224,6 +1225,8 @@ static int nct6683_probe(struct platform_device *pdev)
break;
case NCT6683_CUSTOMER_ID_MSI2:
break;
+ case NCT6683_CUSTOMER_ID_MSI3:
+ break;
case NCT6683_CUSTOMER_ID_ASROCK:
break;
case NCT6683_CUSTOMER_ID_ASROCK2:
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index 8d2ef3145bca..9fbab8f02334 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -3512,6 +3512,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit;
const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
int num_reg_temp, num_reg_temp_mon, num_reg_tsi_temp;
+ int num_reg_temp_config;
struct device *hwmon_dev;
struct sensor_template_group tsi_temp_tg;
@@ -3594,6 +3595,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6106_REG_TEMP_CRIT;
reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
@@ -3669,6 +3671,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6106_REG_TEMP_CRIT;
reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
@@ -3746,6 +3749,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6775_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6775_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6775_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6775_REG_TEMP_CRIT;
@@ -3821,6 +3825,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6776_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6776_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6776_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6776_REG_TEMP_CRIT;
@@ -3900,6 +3905,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6779_REG_TEMP_CRIT;
@@ -4034,6 +4040,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6779_REG_TEMP_CRIT;
@@ -4123,6 +4130,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6798_REG_TEMP_OVER;
reg_temp_hyst = NCT6798_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6798_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6798_REG_TEMP_CRIT;
@@ -4204,7 +4212,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
= reg_temp_crit[src - 1];
if (reg_temp_crit_l && reg_temp_crit_l[i])
data->reg_temp[4][src - 1] = reg_temp_crit_l[i];
- data->reg_temp_config[src - 1] = reg_temp_config[i];
+ if (i < num_reg_temp_config)
+ data->reg_temp_config[src - 1] = reg_temp_config[i];
data->temp_src[src - 1] = src;
continue;
}
@@ -4217,7 +4226,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
data->reg_temp[0][s] = reg_temp[i];
data->reg_temp[1][s] = reg_temp_over[i];
data->reg_temp[2][s] = reg_temp_hyst[i];
- data->reg_temp_config[s] = reg_temp_config[i];
+ if (i < num_reg_temp_config)
+ data->reg_temp_config[s] = reg_temp_config[i];
if (reg_temp_crit_h && reg_temp_crit_h[i])
data->reg_temp[3][s] = reg_temp_crit_h[i];
else if (reg_temp_crit[src - 1])
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 024cff151c36..a0e664d5ebfe 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -1051,7 +1051,7 @@ static bool nct7802_regmap_is_volatile(struct device *dev, unsigned int reg)
static const struct regmap_config nct7802_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = nct7802_regmap_is_volatile,
};
diff --git a/drivers/hwmon/nzxt-kraken3.c b/drivers/hwmon/nzxt-kraken3.c
new file mode 100644
index 000000000000..5806a3f32bcb
--- /dev/null
+++ b/drivers/hwmon/nzxt-kraken3.c
@@ -0,0 +1,1008 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * hwmon driver for NZXT Kraken X53/X63/X73 and Z53/Z63/Z73 all in one coolers.
+ * X53 and Z53 in code refer to all models in their respective series (shortened
+ * for brevity).
+ *
+ * Copyright 2021 Jonas Malaco <jonas@protocubo.io>
+ * Copyright 2022 Aleksa Savic <savicaleksa83@gmail.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/hid.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <asm/unaligned.h>
+
+#define USB_VENDOR_ID_NZXT 0x1e71
+#define USB_PRODUCT_ID_X53 0x2007
+#define USB_PRODUCT_ID_X53_SECOND 0x2014
+#define USB_PRODUCT_ID_Z53 0x3008
+
+enum kinds { X53, Z53 } __packed;
+enum pwm_enable { off, manual, curve } __packed;
+
+static const char *const kraken3_device_names[] = {
+ [X53] = "x53",
+ [Z53] = "z53",
+};
+
+#define DRIVER_NAME "nzxt_kraken3"
+#define STATUS_REPORT_ID 0x75
+#define FIRMWARE_REPORT_ID 0x11
+#define STATUS_VALIDITY 2000 /* In ms, equivalent to period of four status reports */
+#define CUSTOM_CURVE_POINTS 40 /* For temps from 20C to 59C (critical temp) */
+#define PUMP_DUTY_MIN 20 /* In percent */
+
+/* Sensor report offsets for Kraken X53 and Z53 */
+#define TEMP_SENSOR_START_OFFSET 15
+#define TEMP_SENSOR_END_OFFSET 16
+#define PUMP_SPEED_OFFSET 17
+#define PUMP_DUTY_OFFSET 19
+
+/* Firmware version report offset for Kraken X53 and Z53 */
+#define FIRMWARE_VERSION_OFFSET 17
+
+/* Sensor report offsets for Kraken Z53 */
+#define Z53_FAN_SPEED_OFFSET 23
+#define Z53_FAN_DUTY_OFFSET 25
+
+/* Report offsets for control commands for Kraken X53 and Z53 */
+#define SET_DUTY_ID_OFFSET 1
+
+/* Control commands and their lengths for Kraken X53 and Z53 */
+
+/* Last byte sets the report interval at 0.5s */
+static const u8 set_interval_cmd[] = { 0x70, 0x02, 0x01, 0xB8, 1 };
+static const u8 finish_init_cmd[] = { 0x70, 0x01 };
+static const u8 __maybe_unused get_fw_version_cmd[] = { 0x10, 0x01 };
+static const u8 set_pump_duty_cmd_header[] = { 0x72, 0x00, 0x00, 0x00 };
+static const u8 z53_get_status_cmd[] = { 0x74, 0x01 };
+
+#define SET_INTERVAL_CMD_LENGTH 5
+#define FINISH_INIT_CMD_LENGTH 2
+#define GET_FW_VERSION_CMD_LENGTH 2
+#define MAX_REPORT_LENGTH 64
+#define MIN_REPORT_LENGTH 20
+#define SET_CURVE_DUTY_CMD_HEADER_LENGTH 4
+/* 4 byte header and 40 duty offsets */
+#define SET_CURVE_DUTY_CMD_LENGTH (4 + 40)
+#define Z53_GET_STATUS_CMD_LENGTH 2
+
+static const char *const kraken3_temp_label[] = {
+ "Coolant temp",
+};
+
+static const char *const kraken3_fan_label[] = {
+ "Pump speed",
+ "Fan speed"
+};
+
+struct kraken3_channel_info {
+ enum pwm_enable mode;
+
+ /* Both values are PWM */
+ u16 reported_duty;
+ u16 fixed_duty; /* Manually set fixed duty */
+
+ u8 pwm_points[CUSTOM_CURVE_POINTS];
+};
+
+struct kraken3_data {
+ struct hid_device *hdev;
+ struct device *hwmon_dev;
+ struct dentry *debugfs;
+ struct mutex buffer_lock; /* For locking access to buffer */
+ struct mutex z53_status_request_lock;
+ struct completion fw_version_processed;
+ /*
+ * For X53 devices, tracks whether an initial (one) sensor report was received to
+ * make fancontrol not bail outright. For Z53 devices, whether a status report
+ * was processed after requesting one.
+ */
+ struct completion status_report_processed;
+ /* For locking the above completion */
+ spinlock_t status_completion_lock;
+
+ u8 *buffer;
+ struct kraken3_channel_info channel_info[2]; /* Pump and fan */
+ bool is_device_faulty;
+
+ /* Sensor values */
+ s32 temp_input[1];
+ u16 fan_input[2];
+
+ enum kinds kind;
+ u8 firmware_version[3];
+
+ unsigned long updated; /* jiffies */
+};
+
+static umode_t kraken3_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ const struct kraken3_data *priv = data;
+
+ switch (type) {
+ case hwmon_temp:
+ if (channel < 1)
+ return 0444;
+ break;
+ case hwmon_fan:
+ switch (priv->kind) {
+ case X53:
+ /* Just the pump */
+ if (channel < 1)
+ return 0444;
+ break;
+ case Z53:
+ /* Pump and fan */
+ if (channel < 2)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_enable:
+ case hwmon_pwm_input:
+ switch (priv->kind) {
+ case X53:
+ /* Just the pump */
+ if (channel < 1)
+ return 0644;
+ break;
+ case Z53:
+ /* Pump and fan */
+ if (channel < 2)
+ return 0644;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Writes the command to the device with the rest of the report (up to 64 bytes) filled
+ * with zeroes.
+ */
+static int kraken3_write_expanded(struct kraken3_data *priv, const u8 *cmd, int cmd_length)
+{
+ int ret;
+
+ mutex_lock(&priv->buffer_lock);
+
+ memcpy_and_pad(priv->buffer, MAX_REPORT_LENGTH, cmd, cmd_length, 0x00);
+ ret = hid_hw_output_report(priv->hdev, priv->buffer, MAX_REPORT_LENGTH);
+
+ mutex_unlock(&priv->buffer_lock);
+ return ret;
+}
+
+static int kraken3_percent_to_pwm(long val)
+{
+ return DIV_ROUND_CLOSEST(val * 255, 100);
+}
+
+static int kraken3_pwm_to_percent(long val, int channel)
+{
+ int percent_value;
+
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ percent_value = DIV_ROUND_CLOSEST(val * 100, 255);
+
+ /* Bring up pump duty to min value if needed */
+ if (channel == 0 && percent_value < PUMP_DUTY_MIN)
+ percent_value = PUMP_DUTY_MIN;
+
+ return percent_value;
+}
+
+static int kraken3_read_x53(struct kraken3_data *priv)
+{
+ int ret;
+
+ if (completion_done(&priv->status_report_processed))
+ /*
+ * We're here because data is stale. This means that sensor reports haven't
+ * been received for some time in kraken3_raw_event(). On X-series sensor data
+ * can't be manually requested, so return an error.
+ */
+ return -ENODATA;
+
+ /*
+ * Data needs to be read, but a sensor report wasn't yet received. It's usually
+ * fancontrol that requests data this early and it exits if it reads an error code.
+ * So, wait for the first report to be parsed (but up to STATUS_VALIDITY).
+ * This does not concern the Z series devices, because they send a sensor report
+ * only when requested.
+ */
+ ret = wait_for_completion_interruptible_timeout(&priv->status_report_processed,
+ msecs_to_jiffies(STATUS_VALIDITY));
+ if (ret == 0)
+ return -ETIMEDOUT;
+ else if (ret < 0)
+ return ret;
+
+ /* The first sensor report was parsed on time and reading can continue */
+ return 0;
+}
+
+static int kraken3_read_z53(struct kraken3_data *priv)
+{
+ int ret = mutex_lock_interruptible(&priv->z53_status_request_lock);
+
+ if (ret < 0)
+ return ret;
+
+ if (!time_after(jiffies, priv->updated + msecs_to_jiffies(STATUS_VALIDITY))) {
+ /* Data is up to date */
+ goto unlock_and_return;
+ }
+
+ /*
+ * Disable interrupts for a moment to safely reinit the completion,
+ * as hidraw calls could have allowed one or more readers to complete.
+ */
+ spin_lock_bh(&priv->status_completion_lock);
+ reinit_completion(&priv->status_report_processed);
+ spin_unlock_bh(&priv->status_completion_lock);
+
+ /* Send command for getting status */
+ ret = kraken3_write_expanded(priv, z53_get_status_cmd, Z53_GET_STATUS_CMD_LENGTH);
+ if (ret < 0)
+ goto unlock_and_return;
+
+ /* Wait for completion from kraken3_raw_event() */
+ ret = wait_for_completion_interruptible_timeout(&priv->status_report_processed,
+ msecs_to_jiffies(STATUS_VALIDITY));
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+unlock_and_return:
+ mutex_unlock(&priv->z53_status_request_lock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int kraken3_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long *val)
+{
+ struct kraken3_data *priv = dev_get_drvdata(dev);
+ int ret;
+
+ if (time_after(jiffies, priv->updated + msecs_to_jiffies(STATUS_VALIDITY))) {
+ if (priv->kind == X53)
+ ret = kraken3_read_x53(priv);
+ else
+ ret = kraken3_read_z53(priv);
+
+ if (ret < 0)
+ return ret;
+
+ if (priv->is_device_faulty)
+ return -ENODATA;
+ }
+
+ switch (type) {
+ case hwmon_temp:
+ *val = priv->temp_input[channel];
+ break;
+ case hwmon_fan:
+ *val = priv->fan_input[channel];
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_enable:
+ *val = priv->channel_info[channel].mode;
+ break;
+ case hwmon_pwm_input:
+ *val = priv->channel_info[channel].reported_duty;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int kraken3_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_temp:
+ *str = kraken3_temp_label[channel];
+ break;
+ case hwmon_fan:
+ *str = kraken3_fan_label[channel];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* Writes custom curve to device */
+static int kraken3_write_curve(struct kraken3_data *priv, u8 *curve_array, int channel)
+{
+ u8 fixed_duty_cmd[SET_CURVE_DUTY_CMD_LENGTH];
+ int ret;
+
+ /* Copy command header */
+ memcpy(fixed_duty_cmd, set_pump_duty_cmd_header, SET_CURVE_DUTY_CMD_HEADER_LENGTH);
+
+ /* Set the correct ID for writing pump/fan duty (0x01 or 0x02, respectively) */
+ fixed_duty_cmd[SET_DUTY_ID_OFFSET] = channel + 1;
+
+ /* Copy curve to command */
+ memcpy(fixed_duty_cmd + SET_CURVE_DUTY_CMD_HEADER_LENGTH, curve_array, CUSTOM_CURVE_POINTS);
+
+ ret = kraken3_write_expanded(priv, fixed_duty_cmd, SET_CURVE_DUTY_CMD_LENGTH);
+ return ret;
+}
+
+static int kraken3_write_fixed_duty(struct kraken3_data *priv, long val, int channel)
+{
+ u8 fixed_curve_points[CUSTOM_CURVE_POINTS];
+ int ret, percent_val, i;
+
+ percent_val = kraken3_pwm_to_percent(val, channel);
+ if (percent_val < 0)
+ return percent_val;
+
+ /*
+ * The devices can only control the duty through a curve.
+ * Since we're setting a fixed duty here, fill the whole curve
+ * (ranging from 20C to 59C) with the same duty, except for
+ * the last point, the critical temperature, where it's maxed
+ * out for safety.
+ */
+
+ /* Fill the custom curve with the fixed value we're setting */
+ for (i = 0; i < CUSTOM_CURVE_POINTS - 1; i++)
+ fixed_curve_points[i] = percent_val;
+
+ /* Force duty to 100% at critical temp */
+ fixed_curve_points[CUSTOM_CURVE_POINTS - 1] = 100;
+
+ /* Write the fixed duty curve to the device */
+ ret = kraken3_write_curve(priv, fixed_curve_points, channel);
+ return ret;
+}
+
+static int kraken3_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long val)
+{
+ struct kraken3_data *priv = dev_get_drvdata(dev);
+ int ret;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ /* Remember the last set fixed duty for channel */
+ priv->channel_info[channel].fixed_duty = val;
+
+ if (priv->channel_info[channel].mode == manual) {
+ ret = kraken3_write_fixed_duty(priv, val, channel);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Lock onto this value and report it until next interrupt status
+ * report is received, so userspace tools can continue to work.
+ */
+ priv->channel_info[channel].reported_duty = val;
+ }
+ break;
+ case hwmon_pwm_enable:
+ if (val < 0 || val > 2)
+ return -EINVAL;
+
+ switch (val) {
+ case 0:
+ /* Set channel to 100%, direct duty value */
+ ret = kraken3_write_fixed_duty(priv, 255, channel);
+ if (ret < 0)
+ return ret;
+
+ /* We don't control anything anymore */
+ priv->channel_info[channel].mode = off;
+ break;
+ case 1:
+ /* Apply the last known direct duty value */
+ ret =
+ kraken3_write_fixed_duty(priv,
+ priv->channel_info[channel].fixed_duty,
+ channel);
+ if (ret < 0)
+ return ret;
+
+ priv->channel_info[channel].mode = manual;
+ break;
+ case 2:
+ /* Apply the curve and note as enabled */
+ ret =
+ kraken3_write_curve(priv,
+ priv->channel_info[channel].pwm_points,
+ channel);
+ if (ret < 0)
+ return ret;
+
+ priv->channel_info[channel].mode = curve;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static ssize_t kraken3_fan_curve_pwm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute_2 *dev_attr = to_sensor_dev_attr_2(attr);
+ struct kraken3_data *priv = dev_get_drvdata(dev);
+ long val;
+ int ret;
+
+ if (kstrtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ val = kraken3_pwm_to_percent(val, dev_attr->nr);
+ if (val < 0)
+ return val;
+
+ priv->channel_info[dev_attr->nr].pwm_points[dev_attr->index] = val;
+
+ if (priv->channel_info[dev_attr->nr].mode == curve) {
+ /* Apply the curve */
+ ret =
+ kraken3_write_curve(priv,
+ priv->channel_info[dev_attr->nr].pwm_points, dev_attr->nr);
+ if (ret < 0)
+ return ret;
+ }
+
+ return count;
+}
+
+static umode_t kraken3_curve_props_are_visible(struct kobject *kobj, struct attribute *attr,
+ int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct kraken3_data *priv = dev_get_drvdata(dev);
+
+ /* Only Z53 has the fan curve */
+ if (index >= CUSTOM_CURVE_POINTS && priv->kind != Z53)
+ return 0;
+
+ return attr->mode;
+}
+
+/* Custom pump curve from 20C to 59C (critical temp) */
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point1_pwm, kraken3_fan_curve_pwm, 0, 0);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point2_pwm, kraken3_fan_curve_pwm, 0, 1);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point3_pwm, kraken3_fan_curve_pwm, 0, 2);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point4_pwm, kraken3_fan_curve_pwm, 0, 3);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point5_pwm, kraken3_fan_curve_pwm, 0, 4);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point6_pwm, kraken3_fan_curve_pwm, 0, 5);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point7_pwm, kraken3_fan_curve_pwm, 0, 6);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point8_pwm, kraken3_fan_curve_pwm, 0, 7);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point9_pwm, kraken3_fan_curve_pwm, 0, 8);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point10_pwm, kraken3_fan_curve_pwm, 0, 9);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point11_pwm, kraken3_fan_curve_pwm, 0, 10);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point12_pwm, kraken3_fan_curve_pwm, 0, 11);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point13_pwm, kraken3_fan_curve_pwm, 0, 12);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point14_pwm, kraken3_fan_curve_pwm, 0, 13);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point15_pwm, kraken3_fan_curve_pwm, 0, 14);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point16_pwm, kraken3_fan_curve_pwm, 0, 15);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point17_pwm, kraken3_fan_curve_pwm, 0, 16);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point18_pwm, kraken3_fan_curve_pwm, 0, 17);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point19_pwm, kraken3_fan_curve_pwm, 0, 18);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point20_pwm, kraken3_fan_curve_pwm, 0, 19);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point21_pwm, kraken3_fan_curve_pwm, 0, 20);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point22_pwm, kraken3_fan_curve_pwm, 0, 21);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point23_pwm, kraken3_fan_curve_pwm, 0, 22);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point24_pwm, kraken3_fan_curve_pwm, 0, 23);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point25_pwm, kraken3_fan_curve_pwm, 0, 24);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point26_pwm, kraken3_fan_curve_pwm, 0, 25);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point27_pwm, kraken3_fan_curve_pwm, 0, 26);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point28_pwm, kraken3_fan_curve_pwm, 0, 27);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point29_pwm, kraken3_fan_curve_pwm, 0, 28);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point30_pwm, kraken3_fan_curve_pwm, 0, 29);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point31_pwm, kraken3_fan_curve_pwm, 0, 30);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point32_pwm, kraken3_fan_curve_pwm, 0, 31);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point33_pwm, kraken3_fan_curve_pwm, 0, 32);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point34_pwm, kraken3_fan_curve_pwm, 0, 33);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point35_pwm, kraken3_fan_curve_pwm, 0, 34);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point36_pwm, kraken3_fan_curve_pwm, 0, 35);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point37_pwm, kraken3_fan_curve_pwm, 0, 36);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point38_pwm, kraken3_fan_curve_pwm, 0, 37);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point39_pwm, kraken3_fan_curve_pwm, 0, 38);
+static SENSOR_DEVICE_ATTR_2_WO(temp1_auto_point40_pwm, kraken3_fan_curve_pwm, 0, 39);
+
+/* Custom fan curve from 20C to 59C (critical temp) */
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point1_pwm, kraken3_fan_curve_pwm, 1, 0);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point2_pwm, kraken3_fan_curve_pwm, 1, 1);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point3_pwm, kraken3_fan_curve_pwm, 1, 2);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point4_pwm, kraken3_fan_curve_pwm, 1, 3);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point5_pwm, kraken3_fan_curve_pwm, 1, 4);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point6_pwm, kraken3_fan_curve_pwm, 1, 5);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point7_pwm, kraken3_fan_curve_pwm, 1, 6);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point8_pwm, kraken3_fan_curve_pwm, 1, 7);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point9_pwm, kraken3_fan_curve_pwm, 1, 8);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point10_pwm, kraken3_fan_curve_pwm, 1, 9);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point11_pwm, kraken3_fan_curve_pwm, 1, 10);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point12_pwm, kraken3_fan_curve_pwm, 1, 11);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point13_pwm, kraken3_fan_curve_pwm, 1, 12);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point14_pwm, kraken3_fan_curve_pwm, 1, 13);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point15_pwm, kraken3_fan_curve_pwm, 1, 14);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point16_pwm, kraken3_fan_curve_pwm, 1, 15);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point17_pwm, kraken3_fan_curve_pwm, 1, 16);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point18_pwm, kraken3_fan_curve_pwm, 1, 17);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point19_pwm, kraken3_fan_curve_pwm, 1, 18);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point20_pwm, kraken3_fan_curve_pwm, 1, 19);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point21_pwm, kraken3_fan_curve_pwm, 1, 20);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point22_pwm, kraken3_fan_curve_pwm, 1, 21);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point23_pwm, kraken3_fan_curve_pwm, 1, 22);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point24_pwm, kraken3_fan_curve_pwm, 1, 23);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point25_pwm, kraken3_fan_curve_pwm, 1, 24);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point26_pwm, kraken3_fan_curve_pwm, 1, 25);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point27_pwm, kraken3_fan_curve_pwm, 1, 26);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point28_pwm, kraken3_fan_curve_pwm, 1, 27);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point29_pwm, kraken3_fan_curve_pwm, 1, 28);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point30_pwm, kraken3_fan_curve_pwm, 1, 29);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point31_pwm, kraken3_fan_curve_pwm, 1, 30);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point32_pwm, kraken3_fan_curve_pwm, 1, 31);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point33_pwm, kraken3_fan_curve_pwm, 1, 32);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point34_pwm, kraken3_fan_curve_pwm, 1, 33);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point35_pwm, kraken3_fan_curve_pwm, 1, 34);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point36_pwm, kraken3_fan_curve_pwm, 1, 35);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point37_pwm, kraken3_fan_curve_pwm, 1, 36);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point38_pwm, kraken3_fan_curve_pwm, 1, 37);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point39_pwm, kraken3_fan_curve_pwm, 1, 38);
+static SENSOR_DEVICE_ATTR_2_WO(temp2_auto_point40_pwm, kraken3_fan_curve_pwm, 1, 39);
+
+static struct attribute *kraken3_curve_attrs[] = {
+ /* Pump control curve */
+ &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point5_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point6_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point7_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point8_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point9_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point10_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point11_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point12_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point13_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point14_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point15_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point16_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point17_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point18_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point19_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point20_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point21_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point22_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point23_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point24_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point25_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point26_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point27_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point28_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point29_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point30_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point31_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point32_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point33_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point34_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point35_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point36_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point37_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point38_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point39_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point40_pwm.dev_attr.attr,
+ /* Fan control curve (Z53 only) */
+ &sensor_dev_attr_temp2_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point5_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point6_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point7_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point8_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point9_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point10_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point11_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point12_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point13_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point14_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point15_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point16_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point17_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point18_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point19_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point20_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point21_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point22_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point23_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point24_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point25_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point26_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point27_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point28_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point29_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point30_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point31_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point32_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point33_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point34_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point35_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point36_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point37_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point38_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point39_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point40_pwm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group kraken3_curves_group = {
+ .attrs = kraken3_curve_attrs,
+ .is_visible = kraken3_curve_props_are_visible
+};
+
+static const struct attribute_group *kraken3_groups[] = {
+ &kraken3_curves_group,
+ NULL
+};
+
+static const struct hwmon_ops kraken3_hwmon_ops = {
+ .is_visible = kraken3_is_visible,
+ .read = kraken3_read,
+ .read_string = kraken3_read_string,
+ .write = kraken3_write
+};
+
+static const struct hwmon_channel_info *kraken3_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
+ NULL
+};
+
+static const struct hwmon_chip_info kraken3_chip_info = {
+ .ops = &kraken3_hwmon_ops,
+ .info = kraken3_info,
+};
+
+static int kraken3_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size)
+{
+ struct kraken3_data *priv = hid_get_drvdata(hdev);
+ int i;
+
+ if (size < MIN_REPORT_LENGTH)
+ return 0;
+
+ if (report->id == FIRMWARE_REPORT_ID) {
+ /* Read firmware version */
+ for (i = 0; i < 3; i++)
+ priv->firmware_version[i] = data[FIRMWARE_VERSION_OFFSET + i];
+
+ if (!completion_done(&priv->fw_version_processed))
+ complete_all(&priv->fw_version_processed);
+
+ return 0;
+ }
+
+ if (report->id != STATUS_REPORT_ID)
+ return 0;
+
+ if (data[TEMP_SENSOR_START_OFFSET] == 0xff && data[TEMP_SENSOR_END_OFFSET] == 0xff) {
+ hid_err_once(hdev,
+ "firmware or device is possibly damaged (is SATA power connected?), not parsing reports\n");
+
+ /*
+ * Mark first X-series device report as received,
+ * as well as all for Z-series, if faulty.
+ */
+ spin_lock(&priv->status_completion_lock);
+ if (priv->kind != X53 || !completion_done(&priv->status_report_processed)) {
+ priv->is_device_faulty = true;
+ complete_all(&priv->status_report_processed);
+ }
+ spin_unlock(&priv->status_completion_lock);
+
+ return 0;
+ }
+
+ /* Received normal data */
+ priv->is_device_faulty = false;
+
+ /* Temperature and fan sensor readings */
+ priv->temp_input[0] =
+ data[TEMP_SENSOR_START_OFFSET] * 1000 + data[TEMP_SENSOR_END_OFFSET] * 100;
+
+ priv->fan_input[0] = get_unaligned_le16(data + PUMP_SPEED_OFFSET);
+ priv->channel_info[0].reported_duty = kraken3_percent_to_pwm(data[PUMP_DUTY_OFFSET]);
+
+ spin_lock(&priv->status_completion_lock);
+ if (priv->kind == X53 && !completion_done(&priv->status_report_processed)) {
+ /* Mark first X-series device report as received */
+ complete_all(&priv->status_report_processed);
+ } else if (priv->kind == Z53) {
+ /* Additional readings for Z53 */
+ priv->fan_input[1] = get_unaligned_le16(data + Z53_FAN_SPEED_OFFSET);
+ priv->channel_info[1].reported_duty =
+ kraken3_percent_to_pwm(data[Z53_FAN_DUTY_OFFSET]);
+
+ if (!completion_done(&priv->status_report_processed))
+ complete_all(&priv->status_report_processed);
+ }
+ spin_unlock(&priv->status_completion_lock);
+
+ priv->updated = jiffies;
+
+ return 0;
+}
+
+static int kraken3_init_device(struct hid_device *hdev)
+{
+ struct kraken3_data *priv = hid_get_drvdata(hdev);
+ int ret;
+
+ /* Set the polling interval */
+ ret = kraken3_write_expanded(priv, set_interval_cmd, SET_INTERVAL_CMD_LENGTH);
+ if (ret < 0)
+ return ret;
+
+ /* Finalize the init process */
+ ret = kraken3_write_expanded(priv, finish_init_cmd, FINISH_INIT_CMD_LENGTH);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int kraken3_get_fw_ver(struct hid_device *hdev)
+{
+ struct kraken3_data *priv = hid_get_drvdata(hdev);
+ int ret;
+
+ ret = kraken3_write_expanded(priv, get_fw_version_cmd, GET_FW_VERSION_CMD_LENGTH);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_interruptible_timeout(&priv->fw_version_processed,
+ msecs_to_jiffies(STATUS_VALIDITY));
+ if (ret == 0)
+ return -ETIMEDOUT;
+ else if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int __maybe_unused kraken3_reset_resume(struct hid_device *hdev)
+{
+ int ret;
+
+ ret = kraken3_init_device(hdev);
+ if (ret)
+ hid_err(hdev, "req init (reset_resume) failed with %d\n", ret);
+
+ return ret;
+}
+
+static int firmware_version_show(struct seq_file *seqf, void *unused)
+{
+ struct kraken3_data *priv = seqf->private;
+
+ seq_printf(seqf, "%u.%u.%u\n", priv->firmware_version[0], priv->firmware_version[1],
+ priv->firmware_version[2]);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(firmware_version);
+
+static void kraken3_debugfs_init(struct kraken3_data *priv)
+{
+ char name[64];
+
+ if (!priv->firmware_version[0])
+ return; /* Nothing to display in debugfs */
+
+ scnprintf(name, sizeof(name), "%s_%s-%s", DRIVER_NAME, kraken3_device_names[priv->kind],
+ dev_name(&priv->hdev->dev));
+
+ priv->debugfs = debugfs_create_dir(name, NULL);
+ debugfs_create_file("firmware_version", 0444, priv->debugfs, priv, &firmware_version_fops);
+}
+
+static int kraken3_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct kraken3_data *priv;
+ int ret;
+
+ priv = devm_kzalloc(&hdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->hdev = hdev;
+ hid_set_drvdata(hdev, priv);
+
+ /*
+ * Initialize ->updated to STATUS_VALIDITY seconds in the past, making
+ * the initial empty data invalid for kraken3_read without the need for
+ * a special case there.
+ */
+ priv->updated = jiffies - msecs_to_jiffies(STATUS_VALIDITY);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "hid parse failed with %d\n", ret);
+ return ret;
+ }
+
+ /* Enable hidraw so existing user-space tools can continue to work */
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "hid hw start failed with %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "hid hw open failed with %d\n", ret);
+ goto fail_and_stop;
+ }
+
+ switch (hdev->product) {
+ case USB_PRODUCT_ID_X53:
+ case USB_PRODUCT_ID_X53_SECOND:
+ priv->kind = X53;
+ break;
+ case USB_PRODUCT_ID_Z53:
+ priv->kind = Z53;
+ break;
+ default:
+ break;
+ }
+
+ priv->buffer = devm_kzalloc(&hdev->dev, MAX_REPORT_LENGTH, GFP_KERNEL);
+ if (!priv->buffer) {
+ ret = -ENOMEM;
+ goto fail_and_close;
+ }
+
+ mutex_init(&priv->buffer_lock);
+ mutex_init(&priv->z53_status_request_lock);
+ init_completion(&priv->fw_version_processed);
+ init_completion(&priv->status_report_processed);
+ spin_lock_init(&priv->status_completion_lock);
+
+ hid_device_io_start(hdev);
+ ret = kraken3_init_device(hdev);
+ if (ret < 0) {
+ hid_err(hdev, "device init failed with %d\n", ret);
+ goto fail_and_close;
+ }
+
+ ret = kraken3_get_fw_ver(hdev);
+ if (ret < 0)
+ hid_warn(hdev, "fw version request failed with %d\n", ret);
+
+ priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev,
+ kraken3_device_names[priv->kind], priv,
+ &kraken3_chip_info, kraken3_groups);
+ if (IS_ERR(priv->hwmon_dev)) {
+ ret = PTR_ERR(priv->hwmon_dev);
+ hid_err(hdev, "hwmon registration failed with %d\n", ret);
+ goto fail_and_close;
+ }
+
+ kraken3_debugfs_init(priv);
+
+ return 0;
+
+fail_and_close:
+ hid_hw_close(hdev);
+fail_and_stop:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static void kraken3_remove(struct hid_device *hdev)
+{
+ struct kraken3_data *priv = hid_get_drvdata(hdev);
+
+ debugfs_remove_recursive(priv->debugfs);
+ hwmon_device_unregister(priv->hwmon_dev);
+
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id kraken3_table[] = {
+ /* NZXT Kraken X53/X63/X73 have two possible product IDs */
+ { HID_USB_DEVICE(USB_VENDOR_ID_NZXT, USB_PRODUCT_ID_X53) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NZXT, USB_PRODUCT_ID_X53_SECOND) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NZXT, USB_PRODUCT_ID_Z53) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, kraken3_table);
+
+static struct hid_driver kraken3_driver = {
+ .name = DRIVER_NAME,
+ .id_table = kraken3_table,
+ .probe = kraken3_probe,
+ .remove = kraken3_remove,
+ .raw_event = kraken3_raw_event,
+#ifdef CONFIG_PM
+ .reset_resume = kraken3_reset_resume,
+#endif
+};
+
+static int __init kraken3_init(void)
+{
+ return hid_register_driver(&kraken3_driver);
+}
+
+static void __exit kraken3_exit(void)
+{
+ hid_unregister_driver(&kraken3_driver);
+}
+
+/* When compiled into the kernel, initialize after the HID bus */
+late_initcall(kraken3_init);
+module_exit(kraken3_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonas Malaco <jonas@protocubo.io>");
+MODULE_AUTHOR("Aleksa Savic <savicaleksa83@gmail.com>");
+MODULE_DESCRIPTION("Hwmon driver for NZXT Kraken X53/X63/X73, Z53/Z63/Z73 coolers");
diff --git a/drivers/hwmon/occ/p8_i2c.c b/drivers/hwmon/occ/p8_i2c.c
index 06095975f5c8..31159606cec7 100644
--- a/drivers/hwmon/occ/p8_i2c.c
+++ b/drivers/hwmon/occ/p8_i2c.c
@@ -241,7 +241,6 @@ static const struct of_device_id p8_i2c_occ_of_match[] = {
MODULE_DEVICE_TABLE(of, p8_i2c_occ_of_match);
static struct i2c_driver p8_i2c_occ_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "occ-hwmon",
.of_match_table = p8_i2c_occ_of_match,
diff --git a/drivers/hwmon/oxp-sensors.c b/drivers/hwmon/oxp-sensors.c
index ea9602063eab..8d3b0f86cc57 100644
--- a/drivers/hwmon/oxp-sensors.c
+++ b/drivers/hwmon/oxp-sensors.c
@@ -43,6 +43,7 @@ enum oxp_board {
aok_zoe_a1 = 1,
aya_neo_2,
aya_neo_air,
+ aya_neo_air_plus_mendo,
aya_neo_air_pro,
aya_neo_geek,
oxp_mini_amd,
@@ -101,6 +102,13 @@ static const struct dmi_system_id dmi_table[] = {
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AB05-Mendocino"),
+ },
+ .driver_data = (void *)aya_neo_air_plus_mendo,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR Pro"),
},
.driver_data = (void *)aya_neo_air_pro,
@@ -332,6 +340,7 @@ static int oxp_platform_read(struct device *dev, enum hwmon_sensor_types type,
switch (board) {
case aya_neo_2:
case aya_neo_air:
+ case aya_neo_air_plus_mendo:
case aya_neo_air_pro:
case aya_neo_geek:
case oxp_mini_amd:
@@ -374,6 +383,7 @@ static int oxp_platform_write(struct device *dev, enum hwmon_sensor_types type,
switch (board) {
case aya_neo_2:
case aya_neo_air:
+ case aya_neo_air_plus_mendo:
case aya_neo_air_pro:
case aya_neo_geek:
case oxp_mini_amd:
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 294808f5240a..557ae0c414b0 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -377,6 +377,15 @@ config SENSORS_MPQ7932
This driver can also be built as a module. If so, the module will
be called mpq7932.
+config SENSORS_MPQ8785
+ tristate "MPS MPQ8785"
+ help
+ If you say yes here you get hardware monitoring functionality support
+ for power management IC MPS MPQ8785.
+
+ This driver can also be built as a module. If so, the module will
+ be called mpq8785.
+
config SENSORS_PIM4328
tristate "Flex PIM4328 and compatibles"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index cf8a76744545..f14ecf03ad77 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_SENSORS_MP2975) += mp2975.o
obj-$(CONFIG_SENSORS_MP5023) += mp5023.o
obj-$(CONFIG_SENSORS_MP5990) += mp5990.o
obj-$(CONFIG_SENSORS_MPQ7932) += mpq7932.o
+obj-$(CONFIG_SENSORS_MPQ8785) += mpq8785.o
obj-$(CONFIG_SENSORS_PLI1209BC) += pli1209bc.o
obj-$(CONFIG_SENSORS_PM6764TR) += pm6764tr.o
obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
diff --git a/drivers/hwmon/pmbus/ir36021.c b/drivers/hwmon/pmbus/ir36021.c
index 382ba6b6031a..a263afeb8ac1 100644
--- a/drivers/hwmon/pmbus/ir36021.c
+++ b/drivers/hwmon/pmbus/ir36021.c
@@ -63,7 +63,6 @@ static const struct of_device_id __maybe_unused ir36021_of_id[] = {
MODULE_DEVICE_TABLE(of, ir36021_of_id);
static struct i2c_driver ir36021_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "ir36021",
.of_match_table = of_match_ptr(ir36021_of_id),
diff --git a/drivers/hwmon/pmbus/ir38064.c b/drivers/hwmon/pmbus/ir38064.c
index 04185be3fdb6..69e18cb468f6 100644
--- a/drivers/hwmon/pmbus/ir38064.c
+++ b/drivers/hwmon/pmbus/ir38064.c
@@ -22,7 +22,7 @@
#if IS_ENABLED(CONFIG_SENSORS_IR38064_REGULATOR)
static const struct regulator_desc ir38064_reg_desc[] = {
- PMBUS_REGULATOR("vout", 0),
+ PMBUS_REGULATOR_ONE("vout"),
};
#endif /* CONFIG_SENSORS_IR38064_REGULATOR */
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 3a20df5a43ec..cfffa4cdc0df 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -437,7 +437,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
#if IS_ENABLED(CONFIG_SENSORS_LM25066_REGULATOR)
static const struct regulator_desc lm25066_reg_desc[] = {
- PMBUS_REGULATOR("vout", 0),
+ PMBUS_REGULATOR_ONE("vout"),
};
#endif
diff --git a/drivers/hwmon/pmbus/mpq8785.c b/drivers/hwmon/pmbus/mpq8785.c
new file mode 100644
index 000000000000..4e2549cc8120
--- /dev/null
+++ b/drivers/hwmon/pmbus/mpq8785.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MPS MPQ8785 Step-Down Converter
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+static int mpq8785_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int vout_mode;
+
+ vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (vout_mode < 0 || vout_mode == 0xff)
+ return vout_mode < 0 ? vout_mode : -ENODEV;
+ switch (vout_mode >> 5) {
+ case 0:
+ info->format[PSC_VOLTAGE_OUT] = linear;
+ break;
+ case 1:
+ case 2:
+ info->format[PSC_VOLTAGE_OUT] = direct,
+ info->m[PSC_VOLTAGE_OUT] = 64;
+ info->b[PSC_VOLTAGE_OUT] = 0;
+ info->R[PSC_VOLTAGE_OUT] = 1;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return 0;
+};
+
+static struct pmbus_driver_info mpq8785_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 4,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 1,
+ .m[PSC_CURRENT_OUT] = 16,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 0,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 0,
+ .func[0] =
+ PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .identify = mpq8785_identify,
+};
+
+static int mpq8785_probe(struct i2c_client *client)
+{
+ return pmbus_do_probe(client, &mpq8785_info);
+};
+
+static const struct i2c_device_id mpq8785_id[] = {
+ { "mpq8785", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mpq8785_id);
+
+static const struct of_device_id __maybe_unused mpq8785_of_match[] = {
+ { .compatible = "mps,mpq8785" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpq8785_of_match);
+
+static struct i2c_driver mpq8785_driver = {
+ .driver = {
+ .name = "mpq8785",
+ .of_match_table = of_match_ptr(mpq8785_of_match),
+ },
+ .probe = mpq8785_probe,
+ .id_table = mpq8785_id,
+};
+
+module_i2c_driver(mpq8785_driver);
+
+MODULE_AUTHOR("Charles Hsu <ythsu0511@gmail.com>");
+MODULE_DESCRIPTION("PMBus driver for MPS MPQ8785");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 1363d9f89181..cb4c65a7f288 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -3188,7 +3188,7 @@ static int pmbus_regulator_notify(struct pmbus_data *data, int page, int event)
static int pmbus_write_smbalert_mask(struct i2c_client *client, u8 page, u8 reg, u8 val)
{
- return pmbus_write_word_data(client, page, PMBUS_SMBALERT_MASK, reg | (val << 8));
+ return _pmbus_write_word_data(client, page, PMBUS_SMBALERT_MASK, reg | (val << 8));
}
static irqreturn_t pmbus_fault_handler(int irq, void *pdata)
diff --git a/drivers/hwmon/pmbus/tda38640.c b/drivers/hwmon/pmbus/tda38640.c
index 09cd114b1736..c31889a036f0 100644
--- a/drivers/hwmon/pmbus/tda38640.c
+++ b/drivers/hwmon/pmbus/tda38640.c
@@ -15,7 +15,7 @@
#include "pmbus.h"
static const struct regulator_desc __maybe_unused tda38640_reg_desc[] = {
- PMBUS_REGULATOR("vout", 0),
+ PMBUS_REGULATOR_ONE("vout"),
};
struct tda38640_data {
diff --git a/drivers/hwmon/powr1220.c b/drivers/hwmon/powr1220.c
index 4120cadb00ae..2388d0565e7e 100644
--- a/drivers/hwmon/powr1220.c
+++ b/drivers/hwmon/powr1220.c
@@ -323,7 +323,6 @@ static const struct i2c_device_id powr1220_ids[] = {
MODULE_DEVICE_TABLE(i2c, powr1220_ids);
static struct i2c_driver powr1220_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "powr1220",
},
diff --git a/drivers/hwmon/pt5161l.c b/drivers/hwmon/pt5161l.c
new file mode 100644
index 000000000000..60361e39c474
--- /dev/null
+++ b/drivers/hwmon/pt5161l.c
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+/* Aries current average temp ADC code CSR */
+#define ARIES_CURRENT_AVG_TEMP_ADC_CSR 0x42c
+
+/* Device Load check register */
+#define ARIES_CODE_LOAD_REG 0x605
+/* Value indicating FW was loaded properly, [3:1] = 3'b111 */
+#define ARIES_LOAD_CODE 0xe
+
+/* Main Micro Heartbeat register */
+#define ARIES_MM_HEARTBEAT_ADDR 0x923
+
+/* Reg offset to specify Address for MM assisted accesses */
+#define ARIES_MM_ASSIST_REG_ADDR_OFFSET 0xd99
+/* Reg offset to specify Command for MM assisted accesses */
+#define ARIES_MM_ASSIST_CMD_OFFSET 0xd9d
+/* Reg offset to MM SPARE 0 used specify Address[7:0] */
+#define ARIES_MM_ASSIST_SPARE_0_OFFSET 0xd9f
+/* Reg offset to MM SPARE 3 used specify Data Byte 0 */
+#define ARIES_MM_ASSIST_SPARE_3_OFFSET 0xda2
+/* Wide register reads */
+#define ARIES_MM_RD_WIDE_REG_2B 0x1d
+#define ARIES_MM_RD_WIDE_REG_3B 0x1e
+#define ARIES_MM_RD_WIDE_REG_4B 0x1f
+#define ARIES_MM_RD_WIDE_REG_5B 0x20
+
+/* Time delay between checking MM status of EEPROM write (microseconds) */
+#define ARIES_MM_STATUS_TIME 5000
+
+/* AL Main SRAM DMEM offset (A0) */
+#define AL_MAIN_SRAM_DMEM_OFFSET (64 * 1024)
+/* SRAM read command */
+#define AL_TG_RD_LOC_IND_SRAM 0x16
+
+/* Offset for main micro FW info */
+#define ARIES_MAIN_MICRO_FW_INFO (96 * 1024 - 128)
+/* FW Info (Major) offset location in struct */
+#define ARIES_MM_FW_VERSION_MAJOR 0
+/* FW Info (Minor) offset location in struct */
+#define ARIES_MM_FW_VERSION_MINOR 1
+/* FW Info (Build no.) offset location in struct */
+#define ARIES_MM_FW_VERSION_BUILD 2
+
+#define ARIES_TEMP_CAL_CODE_DEFAULT 84
+
+/* Struct defining FW version loaded on an Aries device */
+struct pt5161l_fw_ver {
+ u8 major;
+ u8 minor;
+ u16 build;
+};
+
+/* Each client has this additional data */
+struct pt5161l_data {
+ struct i2c_client *client;
+ struct dentry *debugfs;
+ struct pt5161l_fw_ver fw_ver;
+ struct mutex lock; /* for atomic I2C transactions */
+ bool init_done;
+ bool code_load_okay; /* indicate if code load reg value is expected */
+ bool mm_heartbeat_okay; /* indicate if Main Micro heartbeat is good */
+ bool mm_wide_reg_access; /* MM assisted wide register access */
+};
+
+static struct dentry *pt5161l_debugfs_dir;
+
+/*
+ * Write multiple data bytes to Aries over I2C
+ */
+static int pt5161l_write_block_data(struct pt5161l_data *data, u32 address,
+ u8 len, u8 *val)
+{
+ struct i2c_client *client = data->client;
+ int ret;
+ u8 remain_len = len;
+ u8 xfer_len, curr_len;
+ u8 buf[16];
+ u8 cmd = 0x0F; /* [7]:pec_en, [4:2]:func, [1]:start, [0]:end */
+ u8 config = 0x40; /* [6]:cfg_type, [4:1]:burst_len, [0]:address bit16 */
+
+ while (remain_len > 0) {
+ if (remain_len > 4) {
+ curr_len = 4;
+ remain_len -= 4;
+ } else {
+ curr_len = remain_len;
+ remain_len = 0;
+ }
+
+ buf[0] = config | (curr_len - 1) << 1 | ((address >> 16) & 0x1);
+ buf[1] = (address >> 8) & 0xff;
+ buf[2] = address & 0xff;
+ memcpy(&buf[3], val, curr_len);
+
+ xfer_len = 3 + curr_len;
+ ret = i2c_smbus_write_block_data(client, cmd, xfer_len, buf);
+ if (ret)
+ return ret;
+
+ val += curr_len;
+ address += curr_len;
+ }
+
+ return 0;
+}
+
+/*
+ * Read multiple data bytes from Aries over I2C
+ */
+static int pt5161l_read_block_data(struct pt5161l_data *data, u32 address,
+ u8 len, u8 *val)
+{
+ struct i2c_client *client = data->client;
+ int ret, tries;
+ u8 remain_len = len;
+ u8 curr_len;
+ u8 wbuf[16], rbuf[24];
+ u8 cmd = 0x08; /* [7]:pec_en, [4:2]:func, [1]:start, [0]:end */
+ u8 config = 0x00; /* [6]:cfg_type, [4:1]:burst_len, [0]:address bit16 */
+
+ while (remain_len > 0) {
+ if (remain_len > 16) {
+ curr_len = 16;
+ remain_len -= 16;
+ } else {
+ curr_len = remain_len;
+ remain_len = 0;
+ }
+
+ wbuf[0] = config | (curr_len - 1) << 1 |
+ ((address >> 16) & 0x1);
+ wbuf[1] = (address >> 8) & 0xff;
+ wbuf[2] = address & 0xff;
+
+ for (tries = 0; tries < 3; tries++) {
+ ret = i2c_smbus_write_block_data(client, (cmd | 0x2), 3,
+ wbuf);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_read_block_data(client, (cmd | 0x1),
+ rbuf);
+ if (ret == curr_len)
+ break;
+ }
+ if (tries >= 3)
+ return ret;
+
+ memcpy(val, rbuf, curr_len);
+ val += curr_len;
+ address += curr_len;
+ }
+
+ return 0;
+}
+
+static int pt5161l_read_wide_reg(struct pt5161l_data *data, u32 address,
+ u8 width, u8 *val)
+{
+ int ret, tries;
+ u8 buf[8];
+ u8 status;
+
+ /*
+ * Safely access wide registers using mailbox method to prevent
+ * risking conflict with Aries firmware; otherwise fallback to
+ * legacy, less secure method.
+ */
+ if (data->mm_wide_reg_access) {
+ buf[0] = address & 0xff;
+ buf[1] = (address >> 8) & 0xff;
+ buf[2] = (address >> 16) & 0x1;
+ ret = pt5161l_write_block_data(data,
+ ARIES_MM_ASSIST_SPARE_0_OFFSET,
+ 3, buf);
+ if (ret)
+ return ret;
+
+ /* Set command based on width */
+ switch (width) {
+ case 2:
+ buf[0] = ARIES_MM_RD_WIDE_REG_2B;
+ break;
+ case 3:
+ buf[0] = ARIES_MM_RD_WIDE_REG_3B;
+ break;
+ case 4:
+ buf[0] = ARIES_MM_RD_WIDE_REG_4B;
+ break;
+ case 5:
+ buf[0] = ARIES_MM_RD_WIDE_REG_5B;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = pt5161l_write_block_data(data, ARIES_MM_ASSIST_CMD_OFFSET,
+ 1, buf);
+ if (ret)
+ return ret;
+
+ status = 0xff;
+ for (tries = 0; tries < 100; tries++) {
+ ret = pt5161l_read_block_data(data,
+ ARIES_MM_ASSIST_CMD_OFFSET,
+ 1, &status);
+ if (ret)
+ return ret;
+
+ if (status == 0)
+ break;
+
+ usleep_range(ARIES_MM_STATUS_TIME,
+ ARIES_MM_STATUS_TIME + 1000);
+ }
+ if (status != 0)
+ return -ETIMEDOUT;
+
+ ret = pt5161l_read_block_data(data,
+ ARIES_MM_ASSIST_SPARE_3_OFFSET,
+ width, val);
+ if (ret)
+ return ret;
+ } else {
+ return pt5161l_read_block_data(data, address, width, val);
+ }
+
+ return 0;
+}
+
+/*
+ * Read multiple (up to eight) data bytes from micro SRAM over I2C
+ */
+static int
+pt5161l_read_block_data_main_micro_indirect(struct pt5161l_data *data,
+ u32 address, u8 len, u8 *val)
+{
+ int ret, tries;
+ u8 buf[8];
+ u8 i, status;
+ u32 uind_offs = ARIES_MM_ASSIST_REG_ADDR_OFFSET;
+ u32 eeprom_base, eeprom_addr;
+
+ /* No multi-byte indirect support here. Hence read a byte at a time */
+ eeprom_base = address - AL_MAIN_SRAM_DMEM_OFFSET;
+ for (i = 0; i < len; i++) {
+ eeprom_addr = eeprom_base + i;
+ buf[0] = eeprom_addr & 0xff;
+ buf[1] = (eeprom_addr >> 8) & 0xff;
+ buf[2] = (eeprom_addr >> 16) & 0xff;
+ ret = pt5161l_write_block_data(data, uind_offs, 3, buf);
+ if (ret)
+ return ret;
+
+ buf[0] = AL_TG_RD_LOC_IND_SRAM;
+ ret = pt5161l_write_block_data(data, uind_offs + 4, 1, buf);
+ if (ret)
+ return ret;
+
+ status = 0xff;
+ for (tries = 0; tries < 255; tries++) {
+ ret = pt5161l_read_block_data(data, uind_offs + 4, 1,
+ &status);
+ if (ret)
+ return ret;
+
+ if (status == 0)
+ break;
+ }
+ if (status != 0)
+ return -ETIMEDOUT;
+
+ ret = pt5161l_read_block_data(data, uind_offs + 3, 1, buf);
+ if (ret)
+ return ret;
+
+ val[i] = buf[0];
+ }
+
+ return 0;
+}
+
+/*
+ * Check firmware load status
+ */
+static int pt5161l_fw_load_check(struct pt5161l_data *data)
+{
+ int ret;
+ u8 buf[8];
+
+ ret = pt5161l_read_block_data(data, ARIES_CODE_LOAD_REG, 1, buf);
+ if (ret)
+ return ret;
+
+ if (buf[0] < ARIES_LOAD_CODE) {
+ dev_dbg(&data->client->dev,
+ "Code Load reg unexpected. Not all modules are loaded %x\n",
+ buf[0]);
+ data->code_load_okay = false;
+ } else {
+ data->code_load_okay = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Check main micro heartbeat
+ */
+static int pt5161l_heartbeat_check(struct pt5161l_data *data)
+{
+ int ret, tries;
+ u8 buf[8];
+ u8 heartbeat;
+ bool hb_changed = false;
+
+ ret = pt5161l_read_block_data(data, ARIES_MM_HEARTBEAT_ADDR, 1, buf);
+ if (ret)
+ return ret;
+
+ heartbeat = buf[0];
+ for (tries = 0; tries < 100; tries++) {
+ ret = pt5161l_read_block_data(data, ARIES_MM_HEARTBEAT_ADDR, 1,
+ buf);
+ if (ret)
+ return ret;
+
+ if (buf[0] != heartbeat) {
+ hb_changed = true;
+ break;
+ }
+ }
+ data->mm_heartbeat_okay = hb_changed;
+
+ return 0;
+}
+
+/*
+ * Check the status of firmware
+ */
+static int pt5161l_fwsts_check(struct pt5161l_data *data)
+{
+ int ret;
+ u8 buf[8];
+ u8 major = 0, minor = 0;
+ u16 build = 0;
+
+ ret = pt5161l_fw_load_check(data);
+ if (ret)
+ return ret;
+
+ ret = pt5161l_heartbeat_check(data);
+ if (ret)
+ return ret;
+
+ if (data->code_load_okay && data->mm_heartbeat_okay) {
+ ret = pt5161l_read_block_data_main_micro_indirect(data, ARIES_MAIN_MICRO_FW_INFO +
+ ARIES_MM_FW_VERSION_MAJOR,
+ 1, &major);
+ if (ret)
+ return ret;
+
+ ret = pt5161l_read_block_data_main_micro_indirect(data, ARIES_MAIN_MICRO_FW_INFO +
+ ARIES_MM_FW_VERSION_MINOR,
+ 1, &minor);
+ if (ret)
+ return ret;
+
+ ret = pt5161l_read_block_data_main_micro_indirect(data, ARIES_MAIN_MICRO_FW_INFO +
+ ARIES_MM_FW_VERSION_BUILD,
+ 2, buf);
+ if (ret)
+ return ret;
+ build = buf[1] << 8 | buf[0];
+ }
+ data->fw_ver.major = major;
+ data->fw_ver.minor = minor;
+ data->fw_ver.build = build;
+
+ return 0;
+}
+
+static int pt5161l_fw_is_at_least(struct pt5161l_data *data, u8 major, u8 minor,
+ u16 build)
+{
+ u32 ver = major << 24 | minor << 16 | build;
+ u32 curr_ver = data->fw_ver.major << 24 | data->fw_ver.minor << 16 |
+ data->fw_ver.build;
+
+ if (curr_ver >= ver)
+ return true;
+
+ return false;
+}
+
+static int pt5161l_init_dev(struct pt5161l_data *data)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = pt5161l_fwsts_check(data);
+ mutex_unlock(&data->lock);
+ if (ret)
+ return ret;
+
+ /* Firmware 2.2.0 enables safe access to wide registers */
+ if (pt5161l_fw_is_at_least(data, 2, 2, 0))
+ data->mm_wide_reg_access = true;
+
+ data->init_done = true;
+
+ return 0;
+}
+
+static int pt5161l_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct pt5161l_data *data = dev_get_drvdata(dev);
+ int ret;
+ u8 buf[8];
+ long adc_code;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (!data->init_done) {
+ ret = pt5161l_init_dev(data);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&data->lock);
+ ret = pt5161l_read_wide_reg(data,
+ ARIES_CURRENT_AVG_TEMP_ADC_CSR, 4,
+ buf);
+ mutex_unlock(&data->lock);
+ if (ret) {
+ dev_dbg(dev, "Read adc_code failed %d\n", ret);
+ return ret;
+ }
+
+ adc_code = buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0];
+ if (adc_code == 0 || adc_code >= 0x3ff) {
+ dev_dbg(dev, "Invalid adc_code %lx\n", adc_code);
+ return -EIO;
+ }
+
+ *val = 110000 +
+ ((adc_code - (ARIES_TEMP_CAL_CODE_DEFAULT + 250)) *
+ -320);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static umode_t pt5161l_is_visible(const void *data,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *pt5161l_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops pt5161l_hwmon_ops = {
+ .is_visible = pt5161l_is_visible,
+ .read = pt5161l_read,
+};
+
+static const struct hwmon_chip_info pt5161l_chip_info = {
+ .ops = &pt5161l_hwmon_ops,
+ .info = pt5161l_info,
+};
+
+static ssize_t pt5161l_debugfs_read_fw_ver(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct pt5161l_data *data = file->private_data;
+ int ret;
+ char ver[32];
+
+ mutex_lock(&data->lock);
+ ret = pt5161l_fwsts_check(data);
+ mutex_unlock(&data->lock);
+ if (ret)
+ return ret;
+
+ ret = snprintf(ver, sizeof(ver), "%u.%u.%u\n", data->fw_ver.major,
+ data->fw_ver.minor, data->fw_ver.build);
+
+ return simple_read_from_buffer(buf, count, ppos, ver, ret);
+}
+
+static const struct file_operations pt5161l_debugfs_ops_fw_ver = {
+ .read = pt5161l_debugfs_read_fw_ver,
+ .open = simple_open,
+};
+
+static ssize_t pt5161l_debugfs_read_fw_load_sts(struct file *file,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct pt5161l_data *data = file->private_data;
+ int ret;
+ bool status = false;
+ char health[16];
+
+ mutex_lock(&data->lock);
+ ret = pt5161l_fw_load_check(data);
+ mutex_unlock(&data->lock);
+ if (ret == 0)
+ status = data->code_load_okay;
+
+ ret = snprintf(health, sizeof(health), "%s\n",
+ status ? "normal" : "abnormal");
+
+ return simple_read_from_buffer(buf, count, ppos, health, ret);
+}
+
+static const struct file_operations pt5161l_debugfs_ops_fw_load_sts = {
+ .read = pt5161l_debugfs_read_fw_load_sts,
+ .open = simple_open,
+};
+
+static ssize_t pt5161l_debugfs_read_hb_sts(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct pt5161l_data *data = file->private_data;
+ int ret;
+ bool status = false;
+ char health[16];
+
+ mutex_lock(&data->lock);
+ ret = pt5161l_heartbeat_check(data);
+ mutex_unlock(&data->lock);
+ if (ret == 0)
+ status = data->mm_heartbeat_okay;
+
+ ret = snprintf(health, sizeof(health), "%s\n",
+ status ? "normal" : "abnormal");
+
+ return simple_read_from_buffer(buf, count, ppos, health, ret);
+}
+
+static const struct file_operations pt5161l_debugfs_ops_hb_sts = {
+ .read = pt5161l_debugfs_read_hb_sts,
+ .open = simple_open,
+};
+
+static int pt5161l_init_debugfs(struct pt5161l_data *data)
+{
+ data->debugfs = debugfs_create_dir(dev_name(&data->client->dev),
+ pt5161l_debugfs_dir);
+
+ debugfs_create_file("fw_ver", 0444, data->debugfs, data,
+ &pt5161l_debugfs_ops_fw_ver);
+
+ debugfs_create_file("fw_load_status", 0444, data->debugfs, data,
+ &pt5161l_debugfs_ops_fw_load_sts);
+
+ debugfs_create_file("heartbeat_status", 0444, data->debugfs, data,
+ &pt5161l_debugfs_ops_hb_sts);
+
+ return 0;
+}
+
+static int pt5161l_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct pt5161l_data *data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ mutex_init(&data->lock);
+ pt5161l_init_dev(data);
+ dev_set_drvdata(dev, data);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &pt5161l_chip_info,
+ NULL);
+
+ pt5161l_init_debugfs(data);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static void pt5161l_remove(struct i2c_client *client)
+{
+ struct pt5161l_data *data = i2c_get_clientdata(client);
+
+ debugfs_remove_recursive(data->debugfs);
+}
+
+static const struct of_device_id __maybe_unused pt5161l_of_match[] = {
+ { .compatible = "asteralabs,pt5161l" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pt5161l_of_match);
+
+static const struct acpi_device_id __maybe_unused pt5161l_acpi_match[] = {
+ { "PT5161L", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, pt5161l_acpi_match);
+
+static const struct i2c_device_id pt5161l_id[] = {
+ { "pt5161l", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pt5161l_id);
+
+static struct i2c_driver pt5161l_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "pt5161l",
+ .of_match_table = of_match_ptr(pt5161l_of_match),
+ .acpi_match_table = ACPI_PTR(pt5161l_acpi_match),
+ },
+ .probe = pt5161l_probe,
+ .remove = pt5161l_remove,
+ .id_table = pt5161l_id,
+};
+
+static int __init pt5161l_init(void)
+{
+ pt5161l_debugfs_dir = debugfs_create_dir("pt5161l", NULL);
+ return i2c_add_driver(&pt5161l_driver);
+}
+
+static void __exit pt5161l_exit(void)
+{
+ i2c_del_driver(&pt5161l_driver);
+ debugfs_remove_recursive(pt5161l_debugfs_dir);
+}
+
+module_init(pt5161l_init);
+module_exit(pt5161l_exit);
+
+MODULE_AUTHOR("Cosmo Chou <cosmo.chou@quantatw.com>");
+MODULE_DESCRIPTION("Hwmon driver for Astera Labs Aries PCIe retimer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/sbrmi.c b/drivers/hwmon/sbrmi.c
index 484703f0ea5f..4318f5121145 100644
--- a/drivers/hwmon/sbrmi.c
+++ b/drivers/hwmon/sbrmi.c
@@ -342,7 +342,6 @@ static const struct of_device_id __maybe_unused sbrmi_of_match[] = {
MODULE_DEVICE_TABLE(of, sbrmi_of_match);
static struct i2c_driver sbrmi_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "sbrmi",
.of_match_table = of_match_ptr(sbrmi_of_match),
diff --git a/drivers/hwmon/sbtsi_temp.c b/drivers/hwmon/sbtsi_temp.c
index dd85cf89f008..a4181acb1aa6 100644
--- a/drivers/hwmon/sbtsi_temp.c
+++ b/drivers/hwmon/sbtsi_temp.c
@@ -232,7 +232,6 @@ static const struct of_device_id __maybe_unused sbtsi_of_match[] = {
MODULE_DEVICE_TABLE(of, sbtsi_of_match);
static struct i2c_driver sbtsi_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "sbtsi",
.of_match_table = of_match_ptr(sbtsi_of_match),
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 1891d4d75aa9..33e997b5c1f5 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -116,7 +116,7 @@ static const struct regmap_config sch5627_regmap_config = {
.val_bits = 8,
.wr_table = &sch5627_tunables_table,
.rd_table = &sch5627_tunables_table,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.use_single_read = true,
.use_single_write = true,
.can_sleep = true,
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 79657910b79e..c0d02fbcdb76 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -10,6 +10,7 @@
#include <asm/page.h>
#include <linux/crc8.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hwmon.h>
@@ -41,6 +42,9 @@ static const unsigned char sht3x_cmd_heater_off[] = { 0x30, 0x66 };
/* other commands */
static const unsigned char sht3x_cmd_read_status_reg[] = { 0xf3, 0x2d };
static const unsigned char sht3x_cmd_clear_status_reg[] = { 0x30, 0x41 };
+static const unsigned char sht3x_cmd_read_serial_number[] = { 0x37, 0x80 };
+
+static struct dentry *debugfs;
/* delays for single-shot mode i2c commands, both in us */
#define SHT3X_SINGLE_WAIT_TIME_HPM 15000
@@ -163,12 +167,14 @@ struct sht3x_data {
enum sht3x_chips chip_id;
struct mutex i2c_lock; /* lock for sending i2c commands */
struct mutex data_lock; /* lock for updating driver data */
+ struct dentry *sensor_dir;
u8 mode;
const unsigned char *command;
u32 wait_time; /* in us*/
unsigned long last_update; /* last update in periodic mode*/
enum sht3x_repeatability repeatability;
+ u32 serial_number;
/*
* cached values for temperature and humidity and limits
@@ -831,6 +837,40 @@ static int sht3x_write(struct device *dev, enum hwmon_sensor_types type,
}
}
+static void sht3x_debugfs_init(struct sht3x_data *data)
+{
+ char name[32];
+
+ snprintf(name, sizeof(name), "i2c%u-%02x",
+ data->client->adapter->nr, data->client->addr);
+ data->sensor_dir = debugfs_create_dir(name, debugfs);
+ debugfs_create_u32("serial_number", 0444,
+ data->sensor_dir, &data->serial_number);
+}
+
+static void sht3x_debugfs_remove(void *sensor_dir)
+{
+ debugfs_remove_recursive(sensor_dir);
+}
+
+static int sht3x_serial_number_read(struct sht3x_data *data)
+{
+ int ret;
+ char buffer[SHT3X_RESPONSE_LENGTH];
+ struct i2c_client *client = data->client;
+
+ ret = sht3x_read_from_command(client, data,
+ sht3x_cmd_read_serial_number,
+ buffer,
+ SHT3X_RESPONSE_LENGTH, 0);
+ if (ret)
+ return ret;
+
+ data->serial_number = (buffer[0] << 24) | (buffer[1] << 16) |
+ (buffer[3] << 8) | buffer[4];
+ return ret;
+}
+
static const struct hwmon_ops sht3x_ops = {
.is_visible = sht3x_is_visible,
.read = sht3x_read,
@@ -899,6 +939,18 @@ static int sht3x_probe(struct i2c_client *client)
if (ret)
return ret;
+ ret = sht3x_serial_number_read(data);
+ if (ret) {
+ dev_dbg(dev, "unable to read serial number\n");
+ } else {
+ sht3x_debugfs_init(data);
+ ret = devm_add_action_or_reset(dev,
+ sht3x_debugfs_remove,
+ data->sensor_dir);
+ if (ret)
+ return ret;
+ }
+
hwmon_dev = devm_hwmon_device_register_with_info(dev,
client->name,
data,
@@ -917,7 +969,19 @@ static struct i2c_driver sht3x_i2c_driver = {
.id_table = sht3x_ids,
};
-module_i2c_driver(sht3x_i2c_driver);
+static int __init sht3x_init(void)
+{
+ debugfs = debugfs_create_dir("sht3x", NULL);
+ return i2c_add_driver(&sht3x_i2c_driver);
+}
+module_init(sht3x_init);
+
+static void __exit sht3x_cleanup(void)
+{
+ debugfs_remove_recursive(debugfs);
+ i2c_del_driver(&sht3x_i2c_driver);
+}
+module_exit(sht3x_cleanup);
MODULE_AUTHOR("David Frey <david.frey@sensirion.com>");
MODULE_AUTHOR("Pascal Sachs <pascal.sachs@sensirion.com>");
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 641be1f7f9cd..e73b1522f3ce 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -153,13 +153,9 @@ static inline s8 TEMP_TO_REG(long val)
}
/*
- * FAN DIV: 1, 2, 4, or 8 (defaults to 2)
- * REG: 0, 1, 2, or 3 (respectively) (defaults to 1)
+ * FAN DIV: 1, 2, 4, or 8
+ * REG: 0, 1, 2, or 3 (respectively)
*/
-static inline u8 DIV_TO_REG(int val)
-{
- return val == 8 ? 3 : val == 4 ? 2 : val == 1 ? 0 : 1;
-}
#define DIV_FROM_REG(val) (1 << (val))
/*
diff --git a/drivers/hwmon/surface_fan.c b/drivers/hwmon/surface_fan.c
new file mode 100644
index 000000000000..de3c5a2409c6
--- /dev/null
+++ b/drivers/hwmon/surface_fan.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface Fan driver for Surface System Aggregator Module. It provides access
+ * to the fan's rpm through the hwmon system.
+ *
+ * Copyright (C) 2023 Ivor Wanders <ivor@iwanders.net>
+ */
+
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/surface_aggregator/device.h>
+#include <linux/types.h>
+
+// SSAM
+SSAM_DEFINE_SYNC_REQUEST_CL_R(__ssam_fan_rpm_get, __le16, {
+ .target_category = SSAM_SSH_TC_FAN,
+ .command_id = 0x01,
+});
+
+// hwmon
+static umode_t surface_fan_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ return 0444;
+}
+
+static int surface_fan_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct ssam_device *sdev = dev_get_drvdata(dev);
+ int ret;
+ __le16 value;
+
+ ret = __ssam_fan_rpm_get(sdev, &value);
+ if (ret)
+ return ret;
+
+ *val = le16_to_cpu(value);
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *const surface_fan_info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops surface_fan_hwmon_ops = {
+ .is_visible = surface_fan_hwmon_is_visible,
+ .read = surface_fan_hwmon_read,
+};
+
+static const struct hwmon_chip_info surface_fan_chip_info = {
+ .ops = &surface_fan_hwmon_ops,
+ .info = surface_fan_info,
+};
+
+static int surface_fan_probe(struct ssam_device *sdev)
+{
+ struct device *hdev;
+
+ hdev = devm_hwmon_device_register_with_info(&sdev->dev,
+ "surface_fan", sdev,
+ &surface_fan_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hdev);
+}
+
+static const struct ssam_device_id ssam_fan_match[] = {
+ { SSAM_SDEV(FAN, SAM, 0x01, 0x01) },
+ {},
+};
+MODULE_DEVICE_TABLE(ssam, ssam_fan_match);
+
+static struct ssam_device_driver surface_fan = {
+ .probe = surface_fan_probe,
+ .match_table = ssam_fan_match,
+ .driver = {
+ .name = "surface_fan",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_ssam_device_driver(surface_fan);
+
+MODULE_AUTHOR("Ivor Wanders <ivor@iwanders.net>");
+MODULE_DESCRIPTION("Fan Driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index 91f2314568cf..df1b45a62e80 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -256,7 +256,7 @@ static int tmp401_reg_write(void *context, unsigned int reg, unsigned int val)
static const struct regmap_config tmp401_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = tmp401_regmap_is_volatile,
.reg_read = tmp401_reg_read,
.reg_write = tmp401_reg_write,
diff --git a/drivers/hwmon/w83773g.c b/drivers/hwmon/w83773g.c
index 045eea8378c2..401a28f55f93 100644
--- a/drivers/hwmon/w83773g.c
+++ b/drivers/hwmon/w83773g.c
@@ -290,7 +290,6 @@ static int w83773_probe(struct i2c_client *client)
}
static struct i2c_driver w83773_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "w83773g",
.of_match_table = of_match_ptr(w83773_of_match),
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 5511fd46a65e..ce8c4846b7fa 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -445,6 +445,7 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
irq_status);
irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS);
if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) {
+ irq_handled = irq_status;
bus->cmd_err = ret;
bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
goto out_complete;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 2c36b36d7d51..274e987e4cfa 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1416,7 +1416,6 @@ static void i801_add_mux(struct i801_priv *priv)
lookup->table[i] = GPIO_LOOKUP(mux_config->gpio_chip,
mux_config->gpios[i], "mux", 0);
gpiod_add_lookup_table(lookup);
- priv->lookup = lookup;
/*
* Register the mux device, we use PLATFORM_DEVID_NONE here
@@ -1430,7 +1429,10 @@ static void i801_add_mux(struct i801_priv *priv)
sizeof(struct i2c_mux_gpio_platform_data));
if (IS_ERR(priv->mux_pdev)) {
gpiod_remove_lookup_table(lookup);
+ devm_kfree(dev, lookup);
dev_err(dev, "Failed to register i2c-mux-gpio device\n");
+ } else {
+ priv->lookup = lookup;
}
}
@@ -1742,9 +1744,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
i801_enable_host_notify(&priv->adapter);
- i801_probe_optional_slaves(priv);
/* We ignore errors - multiplexing is optional */
i801_add_mux(priv);
+ i801_probe_optional_slaves(priv);
pci_set_drvdata(dev, priv);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 88a053987403..60e813137f84 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -803,6 +803,11 @@ static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx,
ctl &= ~I2CR_MTX;
imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+
+ /* flag the last byte as processed */
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_READ_PROCESSED, &value);
+
i2c_imx_slave_finish_op(i2c_imx);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index ec2a8da134e5..198afee5233c 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -378,11 +378,15 @@ static int wmt_i2c_probe(struct platform_device *pdev)
err = i2c_add_adapter(adap);
if (err)
- return err;
+ goto err_disable_clk;
platform_set_drvdata(pdev, i2c_dev);
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(i2c_dev->clk);
+ return err;
}
static void wmt_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index bcf1198e8991..e486027f8b07 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1934,7 +1934,8 @@ static void __init spr_idle_state_table_update(void)
static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
{
- unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1;
+ unsigned int mwait_cstate = (MWAIT_HINT2CSTATE(mwait_hint) + 1) &
+ MWAIT_CSTATE_MASK;
unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) &
MWAIT_SUBSTATE_MASK;
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index 90b7ae6d42b7..484fe2e9fb17 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -1429,9 +1429,11 @@ static int adxl367_verify_devid(struct adxl367_state *st)
unsigned int val;
int ret;
- ret = regmap_read_poll_timeout(st->regmap, ADXL367_REG_DEVID, val,
- val == ADXL367_DEVID_AD, 1000, 10000);
+ ret = regmap_read(st->regmap, ADXL367_REG_DEVID, &val);
if (ret)
+ return dev_err_probe(st->dev, ret, "Failed to read dev id\n");
+
+ if (val != ADXL367_DEVID_AD)
return dev_err_probe(st->dev, -ENODEV,
"Invalid dev id 0x%02X, expected 0x%02X\n",
val, ADXL367_DEVID_AD);
@@ -1510,6 +1512,8 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops,
if (ret)
return ret;
+ fsleep(15000);
+
ret = adxl367_verify_devid(st);
if (ret)
return ret;
diff --git a/drivers/iio/accel/adxl367_i2c.c b/drivers/iio/accel/adxl367_i2c.c
index b595fe94f3a3..62c74bdc0d77 100644
--- a/drivers/iio/accel/adxl367_i2c.c
+++ b/drivers/iio/accel/adxl367_i2c.c
@@ -11,7 +11,7 @@
#include "adxl367.h"
-#define ADXL367_I2C_FIFO_DATA 0x42
+#define ADXL367_I2C_FIFO_DATA 0x18
struct adxl367_i2c_state {
struct regmap *regmap;
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 7e2192870743..55442eddf57c 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -212,7 +212,7 @@ int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
if (ret)
return ret;
- spi_bus_lock(sigma_delta->spi->master);
+ spi_bus_lock(sigma_delta->spi->controller);
sigma_delta->bus_locked = true;
sigma_delta->keep_cs_asserted = true;
reinit_completion(&sigma_delta->completion);
@@ -235,7 +235,7 @@ out:
sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
sigma_delta->bus_locked = false;
- spi_bus_unlock(sigma_delta->spi->master);
+ spi_bus_unlock(sigma_delta->spi->controller);
return ret;
}
@@ -287,7 +287,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
ad_sigma_delta_set_channel(sigma_delta, chan->address);
- spi_bus_lock(sigma_delta->spi->master);
+ spi_bus_lock(sigma_delta->spi->controller);
sigma_delta->bus_locked = true;
sigma_delta->keep_cs_asserted = true;
reinit_completion(&sigma_delta->completion);
@@ -322,7 +322,7 @@ out:
sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
sigma_delta->bus_locked = false;
- spi_bus_unlock(sigma_delta->spi->master);
+ spi_bus_unlock(sigma_delta->spi->controller);
iio_device_release_direct_mode(indio_dev);
if (ret)
@@ -387,7 +387,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
sigma_delta->samples_buf = samples_buf;
- spi_bus_lock(sigma_delta->spi->master);
+ spi_bus_lock(sigma_delta->spi->controller);
sigma_delta->bus_locked = true;
sigma_delta->keep_cs_asserted = true;
@@ -401,7 +401,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
return 0;
err_unlock:
- spi_bus_unlock(sigma_delta->spi->master);
+ spi_bus_unlock(sigma_delta->spi->controller);
return ret;
}
@@ -426,7 +426,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
ad_sigma_delta_disable_all(sigma_delta);
sigma_delta->bus_locked = false;
- return spi_bus_unlock(sigma_delta->spi->master);
+ return spi_bus_unlock(sigma_delta->spi->controller);
}
static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 66d4ba088e70..d4f9b5d8d28d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -109,6 +109,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
/* compute and process only all complete datum */
nb = fifo_count / bytes_per_datum;
fifo_count = nb * bytes_per_datum;
+ if (nb == 0)
+ goto end_session;
/* Each FIFO data contains all sensors, so same number for FIFO and sensor data */
fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
inv_sensors_timestamp_interrupt(&st->timestamp, fifo_period, nb, nb, pf->timestamp);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 676704f9151f..e6e6e94452a3 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -111,6 +111,7 @@ int inv_mpu6050_prepare_fifo(struct inv_mpu6050_state *st, bool enable)
if (enable) {
/* reset timestamping */
inv_sensors_timestamp_reset(&st->timestamp);
+ inv_sensors_timestamp_apply_odr(&st->timestamp, 0, 0, 0);
/* reset FIFO */
d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_RST;
ret = regmap_write(st->map, st->reg->user_ctrl, d);
@@ -184,6 +185,10 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
if (result)
goto error_power_off;
} else {
+ st->chip_config.gyro_fifo_enable = 0;
+ st->chip_config.accl_fifo_enable = 0;
+ st->chip_config.temp_fifo_enable = 0;
+ st->chip_config.magn_fifo_enable = 0;
result = inv_mpu6050_prepare_fifo(st, false);
if (result)
goto error_power_off;
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index e8a5fed07e88..a444d4b2978b 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -4,6 +4,7 @@
*
* Inspired by the older BMP085 driver drivers/misc/bmp085-spi.c
*/
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/err.h>
@@ -35,6 +36,34 @@ static int bmp280_regmap_spi_read(void *context, const void *reg,
return spi_write_then_read(spi, reg, reg_size, val, val_size);
}
+static int bmp380_regmap_spi_read(void *context, const void *reg,
+ size_t reg_size, void *val, size_t val_size)
+{
+ struct spi_device *spi = to_spi_device(context);
+ u8 rx_buf[4];
+ ssize_t status;
+
+ /*
+ * Maximum number of consecutive bytes read for a temperature or
+ * pressure measurement is 3.
+ */
+ if (val_size > 3)
+ return -EINVAL;
+
+ /*
+ * According to the BMP3xx datasheets, for a basic SPI read opertion,
+ * the first byte needs to be dropped and the rest are the requested
+ * data.
+ */
+ status = spi_write_then_read(spi, reg, 1, rx_buf, val_size + 1);
+ if (status)
+ return status;
+
+ memcpy(val, rx_buf + 1, val_size);
+
+ return 0;
+}
+
static struct regmap_bus bmp280_regmap_bus = {
.write = bmp280_regmap_spi_write,
.read = bmp280_regmap_spi_read,
@@ -42,10 +71,19 @@ static struct regmap_bus bmp280_regmap_bus = {
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
+static struct regmap_bus bmp380_regmap_bus = {
+ .write = bmp280_regmap_spi_write,
+ .read = bmp380_regmap_spi_read,
+ .read_flag_mask = BIT(7),
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
static int bmp280_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
const struct bmp280_chip_info *chip_info;
+ struct regmap_bus *bmp_regmap_bus;
struct regmap *regmap;
int ret;
@@ -58,8 +96,18 @@ static int bmp280_spi_probe(struct spi_device *spi)
chip_info = spi_get_device_match_data(spi);
+ switch (chip_info->chip_id[0]) {
+ case BMP380_CHIP_ID:
+ case BMP390_CHIP_ID:
+ bmp_regmap_bus = &bmp380_regmap_bus;
+ break;
+ default:
+ bmp_regmap_bus = &bmp280_regmap_bus;
+ break;
+ }
+
regmap = devm_regmap_init(&spi->dev,
- &bmp280_regmap_bus,
+ bmp_regmap_bus,
&spi->dev,
chip_info->regmap_config);
if (IS_ERR(regmap)) {
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
index 28c8269ba65d..0bba4c5a8d40 100644
--- a/drivers/iio/pressure/dlhl60d.c
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -250,18 +250,17 @@ static irqreturn_t dlh_trigger_handler(int irq, void *private)
struct dlh_state *st = iio_priv(indio_dev);
int ret;
unsigned int chn, i = 0;
- __be32 tmp_buf[2];
+ __be32 tmp_buf[2] = { };
ret = dlh_start_capture_and_read(st);
if (ret)
goto out;
for_each_set_bit(chn, indio_dev->active_scan_mask,
- indio_dev->masklength) {
- memcpy(tmp_buf + i,
+ indio_dev->masklength) {
+ memcpy(&tmp_buf[i++],
&st->rx_buf[1] + chn * DLH_NUM_DATA_BYTES,
DLH_NUM_DATA_BYTES);
- i++;
}
iio_push_to_buffers(indio_dev, tmp_buf);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 824349659d69..ce9c5bae83bf 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -401,6 +401,10 @@ static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
struct bnxt_re_fence_data *fence = &pd->fence;
struct ib_mr *ib_mr = &fence->mr->ib_mr;
struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
+ struct bnxt_re_dev *rdev = pd->rdev;
+
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
memset(wqe, 0, sizeof(*wqe));
wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
@@ -455,6 +459,9 @@ static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
struct device *dev = &rdev->en_dev->pdev->dev;
struct bnxt_re_mr *mr = fence->mr;
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
if (fence->mw) {
bnxt_re_dealloc_mw(fence->mw);
fence->mw = NULL;
@@ -486,6 +493,9 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
struct ib_mw *mw;
int rc;
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
DMA_BIDIRECTIONAL);
rc = dma_mapping_error(dev, dma_addr);
@@ -1817,7 +1827,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
switch (srq_attr_mask) {
case IB_SRQ_MAX_WR:
/* SRQ resize is not supported */
- break;
+ return -EINVAL;
case IB_SRQ_LIMIT:
/* Change the SRQ threshold */
if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
@@ -1832,13 +1842,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
/* On success, update the shadow */
srq->srq_limit = srq_attr->srq_limit;
/* No need to Build and send response back to udata */
- break;
+ return 0;
default:
ibdev_err(&rdev->ibdev,
"Unsupported srq_attr_mask 0x%x", srq_attr_mask);
return -EINVAL;
}
- return 0;
}
int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
@@ -2556,11 +2565,6 @@ static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
- /* Need unconditional fence for local invalidate
- * opcode to work as expected.
- */
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
if (wr->send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
if (wr->send_flags & IB_SEND_SOLICITED)
@@ -2583,12 +2587,6 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
wqe->frmr.levels = qplib_frpl->hwq.level;
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
- /* Need unconditional fence for reg_mr
- * opcode to function as expected.
- */
-
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
if (wr->wr.send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
@@ -2719,6 +2717,18 @@ bad:
return rc;
}
+static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
+{
+ /* Need unconditional fence for non-wire memory opcode
+ * to work as expected.
+ */
+ if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+}
+
int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
@@ -2798,8 +2808,11 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
rc = -EINVAL;
goto bad;
}
- if (!rc)
+ if (!rc) {
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_re_legacy_set_uc_fence(&wqe);
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+ }
bad:
if (rc) {
ibdev_err(&qp->rdev->ibdev,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index f022c922fae5..54b4d2f3a5d8 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -280,9 +280,6 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
{
-
- if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
- return;
rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
bnxt_re_set_resource_limits(rdev);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index c98e04fe2ddd..439d0c7c5d0c 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -744,7 +744,8 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
- srq->threshold = le16_to_cpu(sb->srq_limit);
+ if (!rc)
+ srq->threshold = le16_to_cpu(sb->srq_limit);
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
sbuf.sb, sbuf.dma_addr);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 68c621ff59d0..5a91cbda4aee 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -2086,7 +2086,7 @@ int init_credit_return(struct hfi1_devdata *dd)
"Unable to allocate credit return DMA range for NUMA %d\n",
i);
ret = -ENOMEM;
- goto done;
+ goto free_cr_base;
}
}
set_dev_node(&dd->pcidev->dev, dd->node);
@@ -2094,6 +2094,10 @@ int init_credit_return(struct hfi1_devdata *dd)
ret = 0;
done:
return ret;
+
+free_cr_base:
+ free_credit_return(dd);
+ goto done;
}
void free_credit_return(struct hfi1_devdata *dd)
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 6e5ac2023328..b67d23b1f286 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -3158,7 +3158,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
{
int rval = 0;
- if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
+ if ((unlikely(tx->num_desc == tx->desc_limit))) {
rval = _extend_sdma_tx_descs(dd, tx);
if (rval) {
__sdma_txclean(dd, tx);
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
index 8fb752f2eda2..2cb4b96db721 100644
--- a/drivers/infiniband/hw/irdma/defs.h
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -346,6 +346,7 @@ enum irdma_cqp_op_type {
#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
+#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
#define IRDMA_AE_RESET_SENT 0x0601
#define IRDMA_AE_TERMINATE_SENT 0x0602
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index bd4b2b896444..ad50b77282f8 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -387,6 +387,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
case IRDMA_AE_LCE_QP_CATASTROPHIC:
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
default:
@@ -570,6 +571,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
irq_update_affinity_hint(msix_vec->irq, NULL);
free_irq(msix_vec->irq, dev_id);
+ if (rf == dev_id) {
+ tasklet_kill(&rf->dpc_tasklet);
+ } else {
+ struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
+
+ tasklet_kill(&iwceq->dpc_tasklet);
+ }
}
/**
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index b5eb8d421988..0b046c061742 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -839,7 +839,9 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
- init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
+ init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
+ init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
+ init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
return -EINVAL;
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
@@ -2184,9 +2186,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
info.cq_base_pa = iwcq->kmem.pa;
}
- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
- info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
- (u32)IRDMA_MAX_CQ_READ_THRESH);
+ info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
+ (u32)IRDMA_MAX_CQ_READ_THRESH);
if (irdma_sc_cq_init(cq, &info)) {
ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index f87531318feb..a78a067e3ce7 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -458,6 +458,12 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev));
for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
+ if ((i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID ||
+ i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP))
+ if (!MLX5_CAP_GEN(mdev, roce) ||
+ !MLX5_CAP_ROCE(mdev, roce_cc_general))
+ continue;
+
dbg_cc_params->params[i].offset = i;
dbg_cc_params->params[i].dev = dev;
dbg_cc_params->params[i].port_num = port_num;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 869369cb5b5f..253fea374a72 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2949,7 +2949,7 @@ DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
UVERBS_IDR_ANY_OBJECT,
- UVERBS_ACCESS_WRITE,
+ UVERBS_ACCESS_READ,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(
MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index df1d1b0a3ef7..9947feb7fb8a 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -78,7 +78,7 @@ static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
*/
copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
left);
- memcpy(eseg->inline_hdr.start, pdata, copysz);
+ memcpy(eseg->inline_hdr.data, pdata, copysz);
stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
sizeof(eseg->inline_hdr.start) + copysz, 16);
*size += stride / 16;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 7887a6786ed4..f118ce0a9a61 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1879,8 +1879,17 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
/* RQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
ureq.rq_len, true, 0, alloc_and_init);
- if (rc)
+ if (rc) {
+ ib_umem_release(qp->usq.umem);
+ qp->usq.umem = NULL;
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ qedr_free_pbl(dev, &qp->usq.pbl_info,
+ qp->usq.pbl_tbl);
+ } else {
+ kfree(qp->usq.pbl_tbl);
+ }
return rc;
+ }
}
memset(&in_params, 0, sizeof(in_params));
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7a5be705d718..6f2a688fccbf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1272,10 +1272,10 @@ static int ipoib_get_iflink(const struct net_device *dev)
/* parent interface */
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
- return dev->ifindex;
+ return READ_ONCE(dev->ifindex);
/* child/vlan interface */
- return priv->parent->ifindex;
+ return READ_ONCE(priv->parent->ifindex);
}
static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 58f70cfec45a..040234c01be4 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -79,12 +79,16 @@ module_param(srpt_srq_size, int, 0444);
MODULE_PARM_DESC(srpt_srq_size,
"Shared receive queue (SRQ) size.");
+static int srpt_set_u64_x(const char *buffer, const struct kernel_param *kp)
+{
+ return kstrtou64(buffer, 16, (u64 *)kp->arg);
+}
static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
}
-module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
- 0444);
+module_param_call(srpt_service_guid, srpt_set_u64_x, srpt_get_u64_x,
+ &srpt_service_guid, 0444);
MODULE_PARM_DESC(srpt_service_guid,
"Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
@@ -210,10 +214,12 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
/**
* srpt_qp_event - QP event callback function
* @event: Description of the event that occurred.
- * @ch: SRPT RDMA channel.
+ * @ptr: SRPT RDMA channel.
*/
-static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
+static void srpt_qp_event(struct ib_event *event, void *ptr)
{
+ struct srpt_rdma_ch *ch = ptr;
+
pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
event->event, ch, ch->sess_name, ch->qp->qp_num,
get_ch_state_name(ch->state));
@@ -1807,8 +1813,7 @@ retry:
ch->cq_size = ch->rq_size + sq_size;
qp_init->qp_context = (void *)ch;
- qp_init->event_handler
- = (void(*)(struct ib_event *, void*))srpt_qp_event;
+ qp_init->event_handler = srpt_qp_event;
qp_init->send_cq = ch->cq;
qp_init->recv_cq = ch->cq;
qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
diff --git a/drivers/input/joystick/psxpad-spi.c b/drivers/input/joystick/psxpad-spi.c
index de734a927b4d..c47fc5f34bd0 100644
--- a/drivers/input/joystick/psxpad-spi.c
+++ b/drivers/input/joystick/psxpad-spi.c
@@ -342,8 +342,8 @@ static int psxpad_spi_probe(struct spi_device *spi)
spi->mode = SPI_MODE_3;
spi->bits_per_word = 8;
/* (PlayStation 1/2 joypad might be possible works 250kHz/500kHz) */
- spi->master->min_speed_hz = 125000;
- spi->master->max_speed_hz = 125000;
+ spi->controller->min_speed_hz = 125000;
+ spi->controller->max_speed_hz = 125000;
spi_setup(spi);
/* pad settings */
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 7c4b2a5cc1b5..14c828adebf7 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -130,7 +130,12 @@ static const struct xpad_device {
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
{ 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
{ 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
+ { 0x03f0, 0x038D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wired */
+ { 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wireless */
{ 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
+ { 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE },
+ { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, /* v2 */
+ { 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -463,6 +468,7 @@ static const struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* Xbox USB-IF not-approved class */
XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 controller */
XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
+ XPAD_XBOX360_VENDOR(0x03f0), /* HP HyperX Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index ba00ecfbd343..b41fd1240f43 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -315,12 +315,10 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
error = devm_gpio_request_one(dev, button->gpio,
flags, button->desc ? : DRV_NAME);
- if (error) {
- dev_err(dev,
- "unable to claim gpio %u, err=%d\n",
- button->gpio, error);
- return error;
- }
+ if (error)
+ return dev_err_probe(dev, error,
+ "unable to claim gpio %u\n",
+ button->gpio);
bdata->gpiod = gpio_to_desc(button->gpio);
if (!bdata->gpiod) {
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 953992b458e9..ca150618d32f 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -19,7 +19,6 @@
* Copyright (C) 2006 Nicolas Boichat (nicolas@boichat.ch)
*/
-#include "linux/usb.h"
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -194,8 +193,6 @@ enum tp_type {
/* list of device capability bits */
#define HAS_INTEGRATED_BUTTON 1
-/* maximum number of supported endpoints (currently trackpad and button) */
-#define MAX_ENDPOINTS 2
/* trackpad finger data block size */
#define FSIZE_TYPE1 (14 * sizeof(__le16))
@@ -894,18 +891,6 @@ static int bcm5974_resume(struct usb_interface *iface)
return error;
}
-static bool bcm5974_check_endpoints(struct usb_interface *iface,
- const struct bcm5974_config *cfg)
-{
- u8 ep_addr[MAX_ENDPOINTS + 1] = {0};
-
- ep_addr[0] = cfg->tp_ep;
- if (cfg->tp_type == TYPE1)
- ep_addr[1] = cfg->bt_ep;
-
- return usb_check_int_endpoints(iface, ep_addr);
-}
-
static int bcm5974_probe(struct usb_interface *iface,
const struct usb_device_id *id)
{
@@ -918,11 +903,6 @@ static int bcm5974_probe(struct usb_interface *iface,
/* find the product index */
cfg = bcm5974_get_config(udev);
- if (!bcm5974_check_endpoints(iface, cfg)) {
- dev_err(&iface->dev, "Unexpected non-int endpoint\n");
- return -ENODEV;
- }
-
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(struct bcm5974), GFP_KERNEL);
input_dev = input_allocate_device();
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 258d5fe3d395..42eaebb3bf5c 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -978,12 +978,12 @@ static int rmi_driver_remove(struct device *dev)
rmi_disable_irq(rmi_dev, false);
- irq_domain_remove(data->irqdomain);
- data->irqdomain = NULL;
-
rmi_f34_remove_sysfs(rmi_dev);
rmi_free_function_list(rmi_dev);
+ irq_domain_remove(data->irqdomain);
+ data->irqdomain = NULL;
+
return 0;
}
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
index 07c866f42296..9d92129aa432 100644
--- a/drivers/input/rmi4/rmi_spi.c
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -375,7 +375,7 @@ static int rmi_spi_probe(struct spi_device *spi)
struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
int error;
- if (spi->master->flags & SPI_CONTROLLER_HALF_DUPLEX)
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)
return -EINVAL;
rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 9a29d742617e..0af39bbbe3a3 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -163,6 +163,9 @@ config IOMMU_SVA
select IOMMU_MM_DATA
bool
+config IOMMU_IOPF
+ bool
+
config FSL_PAMU
bool "Freescale IOMMU support"
depends on PCI
@@ -179,7 +182,7 @@ config FSL_PAMU
config MSM_IOMMU
bool "MSM IOMMU Support"
depends on ARM
- depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
+ depends on ARCH_QCOM || COMPILE_TEST
select IOMMU_API
select IOMMU_IO_PGTABLE_ARMV7S
help
@@ -196,7 +199,7 @@ source "drivers/iommu/iommufd/Kconfig"
config IRQ_REMAP
bool "Support for Interrupt Remapping"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
- select DMAR_TABLE
+ select DMAR_TABLE if INTEL_IOMMU
help
Supports Interrupt remapping for IO-APIC and MSI devices.
To use x2apic mode in the CPU's which support x2APIC enhancements or
@@ -398,6 +401,7 @@ config ARM_SMMU_V3_SVA
bool "Shared Virtual Addressing support for the ARM SMMUv3"
depends on ARM_SMMU_V3
select IOMMU_SVA
+ select IOMMU_IOPF
select MMU_NOTIFIER
help
Support for sharing process address spaces with devices using the
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 95ad9dbfbda0..542760d963ec 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
-obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o io-pgfault.o
+obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o
+obj-$(CONFIG_IOMMU_IOPF) += io-pgfault.o
obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
obj-$(CONFIG_APPLE_DART) += apple-dart.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 8b3601f285fd..f482aab420f7 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -39,20 +39,16 @@ extern enum io_pgtable_fmt amd_iommu_pgtable;
extern int amd_iommu_gpt_level;
bool amd_iommu_v2_supported(void);
-struct amd_iommu *get_amd_iommu(unsigned int idx);
-u8 amd_iommu_pc_get_max_banks(unsigned int idx);
-bool amd_iommu_pc_supported(void);
-u8 amd_iommu_pc_get_max_counters(unsigned int idx);
-int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
- u8 fxn, u64 *value);
-int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
- u8 fxn, u64 *value);
/* Device capabilities */
int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
-int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address);
+/* GCR3 setup */
+int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, unsigned long gcr3);
+int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid);
+
/*
* This function flushes all internal caches of
* the IOMMU used by this driver.
@@ -63,10 +59,10 @@ void amd_iommu_domain_update(struct protection_domain *domain);
void amd_iommu_domain_flush_complete(struct protection_domain *domain);
void amd_iommu_domain_flush_pages(struct protection_domain *domain,
u64 address, size_t size);
-int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
-int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
- unsigned long cr3);
-int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid);
+void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, u64 address, size_t size);
+void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
+ ioasid_t pasid);
#ifdef CONFIG_IRQ_REMAP
int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
@@ -77,10 +73,6 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
}
#endif
-#define PPR_SUCCESS 0x0
-#define PPR_INVALID 0x1
-#define PPR_FAILURE 0xf
-
int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
int status, int tag);
@@ -150,6 +142,21 @@ static inline void *alloc_pgtable_page(int nid, gfp_t gfp)
return page ? page_address(page) : NULL;
}
+/*
+ * This must be called after device probe completes. During probe
+ * use rlookup_amd_iommu() get the iommu.
+ */
+static inline struct amd_iommu *get_amd_iommu_from_dev(struct device *dev)
+{
+ return iommu_get_iommu_dev(dev, struct amd_iommu, iommu);
+}
+
+/* This must be called after device probe completes. */
+static inline struct amd_iommu *get_amd_iommu_from_dev_data(struct iommu_dev_data *dev_data)
+{
+ return iommu_get_iommu_dev(dev_data->dev, struct amd_iommu, iommu);
+}
+
bool translation_pre_enabled(struct amd_iommu *iommu);
bool amd_iommu_is_attach_deferred(struct device *dev);
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line);
@@ -164,5 +171,4 @@ void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
-extern bool amd_iommu_snp_en;
#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 809d74faa1a5..d1fed5fc219b 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -453,15 +453,6 @@
#define MAX_DOMAIN_ID 65536
-/* Protection domain flags */
-#define PD_DMA_OPS_MASK BIT(0) /* domain used for dma_ops */
-#define PD_DEFAULT_MASK BIT(1) /* domain is a default dma_ops
- domain for an IOMMU */
-#define PD_PASSTHROUGH_MASK BIT(2) /* domain has no page
- translation */
-#define PD_IOMMUV2_MASK BIT(3) /* domain has gcr3 table */
-#define PD_GIOV_MASK BIT(4) /* domain enable GIOV support */
-
/* Timeout stuff */
#define LOOP_TIMEOUT 100000
#define MMIO_STATUS_TIMEOUT 2000000
@@ -513,14 +504,6 @@ extern struct kmem_cache *amd_iommu_irq_cache;
#define for_each_iommu_safe(iommu, next) \
list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
-#define APERTURE_RANGE_SHIFT 27 /* 128 MB */
-#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT)
-#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
-#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */
-#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
-#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
-
-
struct amd_iommu;
struct iommu_domain;
struct irq_domain;
@@ -541,6 +524,13 @@ struct amd_irte_ops;
#define io_pgtable_cfg_to_data(x) \
container_of((x), struct amd_io_pgtable, pgtbl_cfg)
+struct gcr3_tbl_info {
+ u64 *gcr3_tbl; /* Guest CR3 table */
+ int glx; /* Number of levels for GCR3 table */
+ u32 pasid_cnt; /* Track attached PASIDs */
+ u16 domid; /* Per device domain ID */
+};
+
struct amd_io_pgtable {
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable iop;
@@ -549,6 +539,11 @@ struct amd_io_pgtable {
u64 *pgd; /* v2 pgtable pgd pointer */
};
+enum protection_domain_mode {
+ PD_MODE_V1 = 1,
+ PD_MODE_V2,
+};
+
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
@@ -560,10 +555,8 @@ struct protection_domain {
struct amd_io_pgtable iop;
spinlock_t lock; /* mostly used to lock the page table*/
u16 id; /* the domain id written to the device table */
- int glx; /* Number of levels for GCR3 table */
int nid; /* Node ID */
- u64 *gcr3_tbl; /* Guest CR3 table */
- unsigned long flags; /* flags to find out type of domain */
+ enum protection_domain_mode pd_mode; /* Track page table type */
bool dirty_tracking; /* dirty tracking is enabled in the domain */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
@@ -816,6 +809,7 @@ struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
+ struct gcr3_tbl_info gcr3_info; /* Per-device GCR3 table */
struct device *dev;
u16 devid; /* PCI Device ID */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index c83bd0c2a1c9..e7a44929f0da 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -30,6 +30,7 @@
#include <asm/io_apic.h>
#include <asm/irq_remapping.h>
#include <asm/set_memory.h>
+#include <asm/sev.h>
#include <linux/crash_dump.h>
@@ -2068,6 +2069,9 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
/* Prevent binding other PCI device drivers to IOMMU devices */
iommu->dev->match_driver = false;
+ /* ACPI _PRT won't have an IRQ for IOMMU */
+ iommu->dev->irq_managed = 1;
+
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
&iommu->cap);
@@ -2769,6 +2773,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
+ iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
@@ -2825,6 +2830,7 @@ static void early_enable_iommus(void)
iommu_disable_irtcachedis(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
+ iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
@@ -2838,10 +2844,8 @@ static void enable_iommus_v2(void)
{
struct amd_iommu *iommu;
- for_each_iommu(iommu) {
+ for_each_iommu(iommu)
iommu_enable_ppr_log(iommu);
- iommu_enable_gt(iommu);
- }
}
static void enable_iommus_vapic(void)
@@ -3221,6 +3225,36 @@ out:
return true;
}
+static void iommu_snp_enable(void)
+{
+#ifdef CONFIG_KVM_AMD_SEV
+ if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ return;
+ /*
+ * The SNP support requires that IOMMU must be enabled, and is
+ * not configured in the passthrough mode.
+ */
+ if (no_iommu || iommu_default_passthrough()) {
+ pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
+ return;
+ }
+
+ amd_iommu_snp_en = check_feature(FEATURE_SNP);
+ if (!amd_iommu_snp_en) {
+ pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
+ return;
+ }
+
+ pr_info("IOMMU SNP support enabled.\n");
+
+ /* Enforce IOMMU v1 pagetable when SNP is enabled. */
+ if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ pr_warn("Forcing use of AMD IOMMU v1 page table due to SNP.\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ }
+#endif
+}
+
/****************************************************************************
*
* AMD IOMMU Initialization State Machine
@@ -3256,6 +3290,7 @@ static int __init state_next(void)
break;
case IOMMU_ENABLED:
register_syscore_ops(&amd_iommu_syscore_ops);
+ iommu_snp_enable();
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
break;
@@ -3694,13 +3729,11 @@ u8 amd_iommu_pc_get_max_banks(unsigned int idx)
return 0;
}
-EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
bool amd_iommu_pc_supported(void)
{
return amd_iommu_pc_present;
}
-EXPORT_SYMBOL(amd_iommu_pc_supported);
u8 amd_iommu_pc_get_max_counters(unsigned int idx)
{
@@ -3711,7 +3744,6 @@ u8 amd_iommu_pc_get_max_counters(unsigned int idx)
return 0;
}
-EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value, bool is_write)
@@ -3767,40 +3799,85 @@ int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64
return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
}
-#ifdef CONFIG_AMD_MEM_ENCRYPT
-int amd_iommu_snp_enable(void)
+#ifdef CONFIG_KVM_AMD_SEV
+static int iommu_page_make_shared(void *page)
{
- /*
- * The SNP support requires that IOMMU must be enabled, and is
- * not configured in the passthrough mode.
- */
- if (no_iommu || iommu_default_passthrough()) {
- pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported");
- return -EINVAL;
+ unsigned long paddr, pfn;
+
+ paddr = iommu_virt_to_phys(page);
+ /* Cbit maybe set in the paddr */
+ pfn = __sme_clr(paddr) >> PAGE_SHIFT;
+
+ if (!(pfn % PTRS_PER_PMD)) {
+ int ret, level;
+ bool assigned;
+
+ ret = snp_lookup_rmpentry(pfn, &assigned, &level);
+ if (ret) {
+ pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n", pfn, ret);
+ return ret;
+ }
+
+ if (!assigned) {
+ pr_warn("IOMMU PFN %lx not assigned in RMP table\n", pfn);
+ return -EINVAL;
+ }
+
+ if (level > PG_LEVEL_4K) {
+ ret = psmash(pfn);
+ if (!ret)
+ goto done;
+
+ pr_warn("PSMASH failed for IOMMU PFN %lx huge RMP entry, ret: %d, level: %d\n",
+ pfn, ret, level);
+ return ret;
+ }
}
- /*
- * Prevent enabling SNP after IOMMU_ENABLED state because this process
- * affect how IOMMU driver sets up data structures and configures
- * IOMMU hardware.
- */
- if (init_state > IOMMU_ENABLED) {
- pr_err("SNP: Too late to enable SNP for IOMMU.\n");
- return -EINVAL;
+done:
+ return rmp_make_shared(pfn, PG_LEVEL_4K);
+}
+
+static int iommu_make_shared(void *va, size_t size)
+{
+ void *page;
+ int ret;
+
+ if (!va)
+ return 0;
+
+ for (page = va; page < (va + size); page += PAGE_SIZE) {
+ ret = iommu_page_make_shared(page);
+ if (ret)
+ return ret;
}
- amd_iommu_snp_en = check_feature(FEATURE_SNP);
+ return 0;
+}
+
+int amd_iommu_snp_disable(void)
+{
+ struct amd_iommu *iommu;
+ int ret;
+
if (!amd_iommu_snp_en)
- return -EINVAL;
+ return 0;
- pr_info("SNP enabled\n");
+ for_each_iommu(iommu) {
+ ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE);
+ if (ret)
+ return ret;
- /* Enforce IOMMU v1 pagetable when SNP is enabled. */
- if (amd_iommu_pgtable != AMD_IOMMU_V1) {
- pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n");
- amd_iommu_pgtable = AMD_IOMMU_V1;
+ ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE);
+ if (ret)
+ return ret;
+
+ ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE);
+ if (ret)
+ return ret;
}
return 0;
}
+EXPORT_SYMBOL_GPL(amd_iommu_snp_disable);
#endif
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index 6d69ba60744f..93489d2db4e8 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -350,38 +350,26 @@ static const struct iommu_flush_ops v2_flush_ops = {
static void v2_free_pgtable(struct io_pgtable *iop)
{
- struct protection_domain *pdom;
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
- pdom = container_of(pgtable, struct protection_domain, iop);
- if (!(pdom->flags & PD_IOMMUV2_MASK))
+ if (!pgtable || !pgtable->pgd)
return;
- /* Clear gcr3 entry */
- amd_iommu_domain_clear_gcr3(&pdom->domain, 0);
-
- /* Make changes visible to IOMMUs */
- amd_iommu_domain_update(pdom);
-
/* Free page table */
free_pgtable(pgtable->pgd, get_pgtable_level());
+ pgtable->pgd = NULL;
}
static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
struct protection_domain *pdom = (struct protection_domain *)cookie;
- int ret;
int ias = IOMMU_IN_ADDR_BIT_SIZE;
pgtable->pgd = alloc_pgtable_page(pdom->nid, GFP_ATOMIC);
if (!pgtable->pgd)
return NULL;
- ret = amd_iommu_domain_set_gcr3(&pdom->domain, 0, iommu_virt_to_phys(pgtable->pgd));
- if (ret)
- goto err_free_pgd;
-
if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
ias = 57;
@@ -395,11 +383,6 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
cfg->tlb = &v2_flush_ops;
return &pgtable->iop;
-
-err_free_pgd:
- free_pgtable_page(pgtable->pgd);
-
- return NULL;
}
struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 4283dd8191f0..d35c1b8c8e65 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -45,10 +45,6 @@
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
-/* IO virtual address start page frame number */
-#define IOVA_START_PFN (1)
-#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-
/* Reserved IOVA ranges */
#define MSI_RANGE_START (0xfee00000)
#define MSI_RANGE_END (0xfeefffff)
@@ -79,6 +75,9 @@ struct kmem_cache *amd_iommu_irq_cache;
static void detach_device(struct device *dev);
+static void set_dte_entry(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data);
+
/****************************************************************************
*
* Helper functions
@@ -87,7 +86,7 @@ static void detach_device(struct device *dev);
static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
{
- return (pdom && (pdom->flags & PD_IOMMUV2_MASK));
+ return (pdom && (pdom->pd_mode == PD_MODE_V2));
}
static inline int get_acpihid_device_id(struct device *dev,
@@ -1388,14 +1387,9 @@ void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
size_t size, ioasid_t pasid, bool gn)
{
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
struct iommu_cmd cmd;
- int qdep;
-
- qdep = dev_data->ats_qdep;
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return -EINVAL;
+ int qdep = dev_data->ats_qdep;
build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address,
size, pasid, gn);
@@ -1415,16 +1409,12 @@ static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
*/
static int device_flush_dte(struct iommu_dev_data *dev_data)
{
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
struct pci_dev *pdev = NULL;
struct amd_iommu_pci_seg *pci_seg;
u16 alias;
int ret;
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return -EINVAL;
-
if (dev_is_pci(dev_data->dev))
pdev = to_pci_dev(dev_data->dev);
@@ -1453,27 +1443,37 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
return ret;
}
-/*
- * TLB invalidation function which is called from the mapping functions.
- * It invalidates a single PTE if the range to flush is within a single
- * page. Otherwise it flushes the whole TLB of the IOMMU.
- */
-static void __domain_flush_pages(struct protection_domain *domain,
+static int domain_flush_pages_v2(struct protection_domain *pdom,
u64 address, size_t size)
{
struct iommu_dev_data *dev_data;
struct iommu_cmd cmd;
- int ret = 0, i;
- ioasid_t pasid = IOMMU_NO_PASID;
- bool gn = false;
+ int ret = 0;
- if (pdom_is_v2_pgtbl_mode(domain))
- gn = true;
+ list_for_each_entry(dev_data, &pdom->dev_list, list) {
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
+ u16 domid = dev_data->gcr3_info.domid;
- build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, gn);
+ build_inv_iommu_pages(&cmd, address, size,
+ domid, IOMMU_NO_PASID, true);
+
+ ret |= iommu_queue_command(iommu, &cmd);
+ }
+
+ return ret;
+}
+
+static int domain_flush_pages_v1(struct protection_domain *pdom,
+ u64 address, size_t size)
+{
+ struct iommu_cmd cmd;
+ int ret = 0, i;
+
+ build_inv_iommu_pages(&cmd, address, size,
+ pdom->id, IOMMU_NO_PASID, false);
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (!domain->dev_iommu[i])
+ if (!pdom->dev_iommu[i])
continue;
/*
@@ -1483,6 +1483,28 @@ static void __domain_flush_pages(struct protection_domain *domain,
ret |= iommu_queue_command(amd_iommus[i], &cmd);
}
+ return ret;
+}
+
+/*
+ * TLB invalidation function which is called from the mapping functions.
+ * It flushes range of PTEs of the domain.
+ */
+static void __domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size)
+{
+ struct iommu_dev_data *dev_data;
+ int ret = 0;
+ ioasid_t pasid = IOMMU_NO_PASID;
+ bool gn = false;
+
+ if (pdom_is_v2_pgtbl_mode(domain)) {
+ gn = true;
+ ret = domain_flush_pages_v2(domain, address, size);
+ } else {
+ ret = domain_flush_pages_v1(domain, address, size);
+ }
+
list_for_each_entry(dev_data, &domain->dev_list, list) {
if (!dev_data->ats_enabled)
@@ -1551,6 +1573,29 @@ static void amd_iommu_domain_flush_all(struct protection_domain *domain)
CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
+void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, u64 address, size_t size)
+{
+ struct iommu_cmd cmd;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
+
+ build_inv_iommu_pages(&cmd, address, size,
+ dev_data->gcr3_info.domid, pasid, true);
+ iommu_queue_command(iommu, &cmd);
+
+ if (dev_data->ats_enabled)
+ device_flush_iotlb(dev_data, address, size, pasid, true);
+
+ iommu_completion_wait(iommu);
+}
+
+void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
+ ioasid_t pasid)
+{
+ amd_iommu_dev_flush_pasid_pages(dev_data, 0,
+ CMD_INV_IOMMU_ALL_PAGES_ADDRESS, pasid);
+}
+
void amd_iommu_domain_flush_complete(struct protection_domain *domain)
{
int i;
@@ -1592,6 +1637,49 @@ static void domain_flush_devices(struct protection_domain *domain)
device_flush_dte(dev_data);
}
+static void update_device_table(struct protection_domain *domain)
+{
+ struct iommu_dev_data *dev_data;
+
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
+ struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
+
+ set_dte_entry(iommu, dev_data);
+ clone_aliases(iommu, dev_data->dev);
+ }
+}
+
+void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
+{
+ update_device_table(domain);
+ domain_flush_devices(domain);
+}
+
+void amd_iommu_domain_update(struct protection_domain *domain)
+{
+ /* Update device table */
+ amd_iommu_update_and_flush_device_table(domain);
+
+ /* Flush domain TLB(s) and wait for completion */
+ amd_iommu_domain_flush_all(domain);
+}
+
+int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
+ int status, int tag)
+{
+ struct iommu_dev_data *dev_data;
+ struct amd_iommu *iommu;
+ struct iommu_cmd cmd;
+
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+ iommu = get_amd_iommu_from_dev(&pdev->dev);
+
+ build_complete_ppr(&cmd, dev_data->devid, pasid, status,
+ tag, dev_data->pri_tlp);
+
+ return iommu_queue_command(iommu, &cmd);
+}
+
/****************************************************************************
*
* The next functions belong to the domain allocation. A domain is
@@ -1656,16 +1744,22 @@ static void free_gcr3_tbl_level2(u64 *tbl)
}
}
-static void free_gcr3_table(struct protection_domain *domain)
+static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
{
- if (domain->glx == 2)
- free_gcr3_tbl_level2(domain->gcr3_tbl);
- else if (domain->glx == 1)
- free_gcr3_tbl_level1(domain->gcr3_tbl);
+ if (gcr3_info->glx == 2)
+ free_gcr3_tbl_level2(gcr3_info->gcr3_tbl);
+ else if (gcr3_info->glx == 1)
+ free_gcr3_tbl_level1(gcr3_info->gcr3_tbl);
else
- BUG_ON(domain->glx != 0);
+ WARN_ON_ONCE(gcr3_info->glx != 0);
- free_page((unsigned long)domain->gcr3_tbl);
+ gcr3_info->glx = 0;
+
+ /* Free per device domain ID */
+ domain_id_free(gcr3_info->domid);
+
+ free_page((unsigned long)gcr3_info->gcr3_tbl);
+ gcr3_info->gcr3_tbl = NULL;
}
/*
@@ -1684,33 +1778,133 @@ static int get_gcr3_levels(int pasids)
return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels;
}
-/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
-static int setup_gcr3_table(struct protection_domain *domain, int pasids)
+static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
+ struct amd_iommu *iommu, int pasids)
{
int levels = get_gcr3_levels(pasids);
+ int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
if (levels > amd_iommu_max_glx_val)
return -EINVAL;
- domain->gcr3_tbl = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
- if (domain->gcr3_tbl == NULL)
+ if (gcr3_info->gcr3_tbl)
+ return -EBUSY;
+
+ /* Allocate per device domain ID */
+ gcr3_info->domid = domain_id_alloc();
+
+ gcr3_info->gcr3_tbl = alloc_pgtable_page(nid, GFP_ATOMIC);
+ if (gcr3_info->gcr3_tbl == NULL) {
+ domain_id_free(gcr3_info->domid);
return -ENOMEM;
+ }
- domain->glx = levels;
- domain->flags |= PD_IOMMUV2_MASK;
+ gcr3_info->glx = levels;
- amd_iommu_domain_update(domain);
+ return 0;
+}
+static u64 *__get_gcr3_pte(struct gcr3_tbl_info *gcr3_info,
+ ioasid_t pasid, bool alloc)
+{
+ int index;
+ u64 *pte;
+ u64 *root = gcr3_info->gcr3_tbl;
+ int level = gcr3_info->glx;
+
+ while (true) {
+
+ index = (pasid >> (9 * level)) & 0x1ff;
+ pte = &root[index];
+
+ if (level == 0)
+ break;
+
+ if (!(*pte & GCR3_VALID)) {
+ if (!alloc)
+ return NULL;
+
+ root = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (root == NULL)
+ return NULL;
+
+ *pte = iommu_virt_to_phys(root) | GCR3_VALID;
+ }
+
+ root = iommu_phys_to_virt(*pte & PAGE_MASK);
+
+ level -= 1;
+ }
+
+ return pte;
+}
+
+static int update_gcr3(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, unsigned long gcr3, bool set)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ u64 *pte;
+
+ pte = __get_gcr3_pte(gcr3_info, pasid, true);
+ if (pte == NULL)
+ return -ENOMEM;
+
+ if (set)
+ *pte = (gcr3 & PAGE_MASK) | GCR3_VALID;
+ else
+ *pte = 0;
+
+ amd_iommu_dev_flush_pasid_all(dev_data, pasid);
return 0;
}
-static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
- struct protection_domain *domain, bool ats, bool ppr)
+int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid,
+ unsigned long gcr3)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ int ret;
+
+ iommu_group_mutex_assert(dev_data->dev);
+
+ ret = update_gcr3(dev_data, pasid, gcr3, true);
+ if (ret)
+ return ret;
+
+ gcr3_info->pasid_cnt++;
+ return ret;
+}
+
+int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ int ret;
+
+ iommu_group_mutex_assert(dev_data->dev);
+
+ ret = update_gcr3(dev_data, pasid, 0, false);
+ if (ret)
+ return ret;
+
+ gcr3_info->pasid_cnt--;
+ return ret;
+}
+
+static void set_dte_entry(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data)
{
u64 pte_root = 0;
u64 flags = 0;
u32 old_domid;
+ u16 devid = dev_data->devid;
+ u16 domid;
+ struct protection_domain *domain = dev_data->domain;
struct dev_table_entry *dev_table = get_dev_table(iommu);
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+
+ if (gcr3_info && gcr3_info->gcr3_tbl)
+ domid = dev_data->gcr3_info.domid;
+ else
+ domid = domain->id;
if (domain->iop.mode != PAGE_MODE_NONE)
pte_root = iommu_virt_to_phys(domain->iop.root);
@@ -1724,23 +1918,23 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
* When SNP is enabled, Only set TV bit when IOMMU
* page translation is in use.
*/
- if (!amd_iommu_snp_en || (domain->id != 0))
+ if (!amd_iommu_snp_en || (domid != 0))
pte_root |= DTE_FLAG_TV;
flags = dev_table[devid].data[1];
- if (ats)
+ if (dev_data->ats_enabled)
flags |= DTE_FLAG_IOTLB;
- if (ppr)
+ if (dev_data->ppr)
pte_root |= 1ULL << DEV_ENTRY_PPR;
if (domain->dirty_tracking)
pte_root |= DTE_FLAG_HAD;
- if (domain->flags & PD_IOMMUV2_MASK) {
- u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
- u64 glx = domain->glx;
+ if (gcr3_info && gcr3_info->gcr3_tbl) {
+ u64 gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
+ u64 glx = gcr3_info->glx;
u64 tmp;
pte_root |= DTE_FLAG_GV;
@@ -1768,12 +1962,13 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
((u64)GUEST_PGTABLE_5_LEVEL << DTE_GPT_LEVEL_SHIFT);
}
- if (domain->flags & PD_GIOV_MASK)
+ /* GIOV is supported with V2 page table mode only */
+ if (pdom_is_v2_pgtbl_mode(domain))
pte_root |= DTE_FLAG_GIOV;
}
flags &= ~DEV_DOMID_MASK;
- flags |= domain->id;
+ flags |= domid;
old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
dev_table[devid].data[1] = flags;
@@ -1804,16 +1999,11 @@ static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
amd_iommu_apply_erratum_63(iommu, devid);
}
-static void do_attach(struct iommu_dev_data *dev_data,
- struct protection_domain *domain)
+static int do_attach(struct iommu_dev_data *dev_data,
+ struct protection_domain *domain)
{
- struct amd_iommu *iommu;
- bool ats;
-
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return;
- ats = dev_data->ats_enabled;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ int ret = 0;
/* Update data structures */
dev_data->domain = domain;
@@ -1827,22 +2017,40 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
+ /* Init GCR3 table and update device table */
+ if (domain->pd_mode == PD_MODE_V2) {
+ /* By default, setup GCR3 table to support single PASID */
+ ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, 1);
+ if (ret)
+ return ret;
+
+ ret = update_gcr3(dev_data, 0,
+ iommu_virt_to_phys(domain->iop.pgd), true);
+ if (ret) {
+ free_gcr3_table(&dev_data->gcr3_info);
+ return ret;
+ }
+ }
+
/* Update device table */
- set_dte_entry(iommu, dev_data->devid, domain,
- ats, dev_data->ppr);
+ set_dte_entry(iommu, dev_data);
clone_aliases(iommu, dev_data->dev);
device_flush_dte(dev_data);
+
+ return ret;
}
static void do_detach(struct iommu_dev_data *dev_data)
{
struct protection_domain *domain = dev_data->domain;
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return;
+ /* Clear GCR3 table */
+ if (domain->pd_mode == PD_MODE_V2) {
+ update_gcr3(dev_data, 0, 0, false);
+ free_gcr3_table(&dev_data->gcr3_info);
+ }
/* Update data structures */
dev_data->domain = NULL;
@@ -1886,7 +2094,7 @@ static int attach_device(struct device *dev,
if (dev_is_pci(dev))
pdev_enable_caps(to_pci_dev(dev));
- do_attach(dev_data, domain);
+ ret = do_attach(dev_data, domain);
out:
spin_unlock(&dev_data->lock);
@@ -1954,8 +2162,7 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
ret = iommu_init_device(iommu, dev);
if (ret) {
- if (ret != -ENOTSUPP)
- dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
+ dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
iommu_dev = ERR_PTR(ret);
iommu_ignore_device(iommu, dev);
} else {
@@ -2000,42 +2207,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
/*****************************************************************************
*
- * The next functions belong to the dma_ops mapping/unmapping code.
- *
- *****************************************************************************/
-
-static void update_device_table(struct protection_domain *domain)
-{
- struct iommu_dev_data *dev_data;
-
- list_for_each_entry(dev_data, &domain->dev_list, list) {
- struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
-
- if (!iommu)
- continue;
- set_dte_entry(iommu, dev_data->devid, domain,
- dev_data->ats_enabled, dev_data->ppr);
- clone_aliases(iommu, dev_data->dev);
- }
-}
-
-void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
-{
- update_device_table(domain);
- domain_flush_devices(domain);
-}
-
-void amd_iommu_domain_update(struct protection_domain *domain)
-{
- /* Update device table */
- amd_iommu_update_and_flush_device_table(domain);
-
- /* Flush domain TLB(s) and wait for completion */
- amd_iommu_domain_flush_all(domain);
-}
-
-/*****************************************************************************
- *
* The following functions belong to the exported interface of AMD IOMMU
*
* This interface allows access to lower level functions of the IOMMU
@@ -2070,9 +2241,6 @@ static void protection_domain_free(struct protection_domain *domain)
if (domain->iop.pgtbl_cfg.tlb)
free_io_pgtable_ops(&domain->iop.iop.ops);
- if (domain->flags & PD_IOMMUV2_MASK)
- free_gcr3_table(domain);
-
if (domain->iop.root)
free_page((unsigned long)domain->iop.root);
@@ -2094,19 +2262,16 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
return -ENOMEM;
}
+ domain->pd_mode = PD_MODE_V1;
amd_iommu_domain_set_pgtable(domain, pt_root, mode);
return 0;
}
-static int protection_domain_init_v2(struct protection_domain *domain)
+static int protection_domain_init_v2(struct protection_domain *pdom)
{
- domain->flags |= PD_GIOV_MASK;
-
- domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
-
- if (setup_gcr3_table(domain, 1))
- return -ENOMEM;
+ pdom->pd_mode = PD_MODE_V2;
+ pdom->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
return 0;
}
@@ -2194,11 +2359,8 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
struct protection_domain *domain;
struct amd_iommu *iommu = NULL;
- if (dev) {
- iommu = rlookup_amd_iommu(dev);
- if (!iommu)
- return ERR_PTR(-ENODEV);
- }
+ if (dev)
+ iommu = get_amd_iommu_from_dev(dev);
/*
* Since DTE[Mode]=0 is prohibited on SNP-enabled system,
@@ -2279,7 +2441,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
struct protection_domain *domain = to_pdomain(dom);
- struct amd_iommu *iommu = rlookup_amd_iommu(dev);
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
int ret;
/*
@@ -2337,7 +2499,7 @@ static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
int prot = 0;
int ret = -EINVAL;
- if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ if ((domain->pd_mode == PD_MODE_V1) &&
(domain->iop.mode == PAGE_MODE_NONE))
return -EINVAL;
@@ -2383,7 +2545,7 @@ static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
size_t r;
- if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ if ((domain->pd_mode == PD_MODE_V1) &&
(domain->iop.mode == PAGE_MODE_NONE))
return 0;
@@ -2418,7 +2580,7 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
case IOMMU_CAP_DEFERRED_FLUSH:
return true;
case IOMMU_CAP_DIRTY_TRACKING: {
- struct amd_iommu *iommu = rlookup_amd_iommu(dev);
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
return amd_iommu_hd_support(iommu);
}
@@ -2447,9 +2609,7 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
}
list_for_each_entry(dev_data, &pdomain->dev_list, list) {
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- continue;
+ iommu = get_amd_iommu_from_dev_data(dev_data);
dev_table = get_dev_table(iommu);
pte_root = dev_table[dev_data->devid].data[0];
@@ -2509,9 +2669,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
return;
devid = PCI_SBDF_TO_DEVID(sbdf);
- iommu = rlookup_amd_iommu(dev);
- if (!iommu)
- return;
+ iommu = get_amd_iommu_from_dev(dev);
pci_seg = iommu->pci_seg;
list_for_each_entry(entry, &pci_seg->unity_map, list) {
@@ -2645,216 +2803,6 @@ const struct iommu_ops amd_iommu_ops = {
}
};
-static int __flush_pasid(struct protection_domain *domain, u32 pasid,
- u64 address, size_t size)
-{
- struct iommu_dev_data *dev_data;
- struct iommu_cmd cmd;
- int i, ret;
-
- if (!(domain->flags & PD_IOMMUV2_MASK))
- return -EINVAL;
-
- build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true);
-
- /*
- * IOMMU TLB needs to be flushed before Device TLB to
- * prevent device TLB refill from IOMMU TLB
- */
- for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (domain->dev_iommu[i] == 0)
- continue;
-
- ret = iommu_queue_command(amd_iommus[i], &cmd);
- if (ret != 0)
- goto out;
- }
-
- /* Wait until IOMMU TLB flushes are complete */
- amd_iommu_domain_flush_complete(domain);
-
- /* Now flush device TLBs */
- list_for_each_entry(dev_data, &domain->dev_list, list) {
- struct amd_iommu *iommu;
- int qdep;
-
- /*
- There might be non-IOMMUv2 capable devices in an IOMMUv2
- * domain.
- */
- if (!dev_data->ats_enabled)
- continue;
-
- qdep = dev_data->ats_qdep;
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- continue;
- build_inv_iotlb_pages(&cmd, dev_data->devid, qdep,
- address, size, pasid, true);
-
- ret = iommu_queue_command(iommu, &cmd);
- if (ret != 0)
- goto out;
- }
-
- /* Wait until all device TLBs are flushed */
- amd_iommu_domain_flush_complete(domain);
-
- ret = 0;
-
-out:
-
- return ret;
-}
-
-static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
- u64 address)
-{
- return __flush_pasid(domain, pasid, address, PAGE_SIZE);
-}
-
-int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
- u64 address)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __amd_iommu_flush_page(domain, pasid, address);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
-{
- return __flush_pasid(domain, pasid, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
-}
-
-int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __amd_iommu_flush_tlb(domain, pasid);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
-{
- int index;
- u64 *pte;
-
- while (true) {
-
- index = (pasid >> (9 * level)) & 0x1ff;
- pte = &root[index];
-
- if (level == 0)
- break;
-
- if (!(*pte & GCR3_VALID)) {
- if (!alloc)
- return NULL;
-
- root = (void *)get_zeroed_page(GFP_ATOMIC);
- if (root == NULL)
- return NULL;
-
- *pte = iommu_virt_to_phys(root) | GCR3_VALID;
- }
-
- root = iommu_phys_to_virt(*pte & PAGE_MASK);
-
- level -= 1;
- }
-
- return pte;
-}
-
-static int __set_gcr3(struct protection_domain *domain, u32 pasid,
- unsigned long cr3)
-{
- u64 *pte;
-
- if (domain->iop.mode != PAGE_MODE_NONE)
- return -EINVAL;
-
- pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
- if (pte == NULL)
- return -ENOMEM;
-
- *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
-
- return __amd_iommu_flush_tlb(domain, pasid);
-}
-
-static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
-{
- u64 *pte;
-
- if (domain->iop.mode != PAGE_MODE_NONE)
- return -EINVAL;
-
- pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
- if (pte == NULL)
- return 0;
-
- *pte = 0;
-
- return __amd_iommu_flush_tlb(domain, pasid);
-}
-
-int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
- unsigned long cr3)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __set_gcr3(domain, pasid, cr3);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __clear_gcr3(domain, pasid);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
- int status, int tag)
-{
- struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
- struct iommu_cmd cmd;
-
- dev_data = dev_iommu_priv_get(&pdev->dev);
- iommu = rlookup_amd_iommu(&pdev->dev);
- if (!iommu)
- return -ENODEV;
-
- build_complete_ppr(&cmd, dev_data->devid, pasid, status,
- tag, dev_data->pri_tlp);
-
- return iommu_queue_command(iommu, &cmd);
-}
-
#ifdef CONFIG_IRQ_REMAP
/*****************************************************************************
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index ef3ee95706da..eb1e62cd499a 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -779,7 +779,8 @@ static void apple_dart_domain_free(struct iommu_domain *domain)
kfree(dart_domain);
}
-static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int apple_dart_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 05722121f00e..2cd433a9c8a0 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -10,7 +10,6 @@
#include <linux/slab.h>
#include "arm-smmu-v3.h"
-#include "../../iommu-sva.h"
#include "../../io-pgtable-arm.h"
struct arm_smmu_mmu_notifier {
@@ -292,10 +291,8 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
struct mm_struct *mm)
{
int ret;
- unsigned long flags;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_mmu_notifier *smmu_mn;
- struct arm_smmu_master *master;
list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
if (smmu_mn->mn.mm == mm) {
@@ -325,28 +322,9 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
goto err_free_cd;
}
- spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_for_each_entry(master, &smmu_domain->devices, domain_head) {
- ret = arm_smmu_write_ctx_desc(master, mm_get_enqcmd_pasid(mm),
- cd);
- if (ret) {
- list_for_each_entry_from_reverse(
- master, &smmu_domain->devices, domain_head)
- arm_smmu_write_ctx_desc(
- master, mm_get_enqcmd_pasid(mm), NULL);
- break;
- }
- }
- spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- if (ret)
- goto err_put_notifier;
-
list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
return smmu_mn;
-err_put_notifier:
- /* Frees smmu_mn */
- mmu_notifier_put(&smmu_mn->mn);
err_free_cd:
arm_smmu_free_shared_cd(cd);
return ERR_PTR(ret);
@@ -363,9 +341,6 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
list_del(&smmu_mn->list);
- arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
- NULL);
-
/*
* If we went through clear(), we've already invalidated, and no
* new TLB entry can have been formed.
@@ -381,13 +356,20 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
arm_smmu_free_shared_cd(cd);
}
-static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
+static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
+ struct mm_struct *mm)
{
int ret;
struct arm_smmu_bond *bond;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_domain *smmu_domain;
+
+ if (!(domain->type & __IOMMU_DOMAIN_PAGING))
+ return -ENODEV;
+ smmu_domain = to_smmu_domain(domain);
+ if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
+ return -ENODEV;
if (!master || !master->sva_enabled)
return -ENODEV;
@@ -404,9 +386,15 @@ static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
goto err_free_bond;
}
+ ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd);
+ if (ret)
+ goto err_put_notifier;
+
list_add(&bond->list, &master->bonds);
return 0;
+err_put_notifier:
+ arm_smmu_mmu_notifier_put(bond->smmu_mn);
err_free_bond:
kfree(bond);
return ret;
@@ -487,7 +475,6 @@ bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
{
- int ret;
struct device *dev = master->dev;
/*
@@ -500,16 +487,7 @@ static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
if (!master->iopf_enabled)
return -EINVAL;
- ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
- if (ret)
- return ret;
-
- ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
- if (ret) {
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
- return ret;
- }
- return 0;
+ return iopf_queue_add_device(master->smmu->evtq.iopf, dev);
}
static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
@@ -519,7 +497,6 @@ static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
if (!master->iopf_enabled)
return;
- iommu_unregister_device_fault_handler(dev);
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
}
@@ -568,6 +545,9 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
mutex_lock(&sva_lock);
+
+ arm_smmu_write_ctx_desc(master, id, NULL);
+
list_for_each_entry(t, &master->bonds, list) {
if (t->mm == mm) {
bond = t;
@@ -590,7 +570,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct mm_struct *mm = domain->mm;
mutex_lock(&sva_lock);
- ret = __arm_smmu_sva_bind(dev, mm);
+ ret = __arm_smmu_sva_bind(dev, id, mm);
mutex_unlock(&sva_lock);
return ret;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 0ffb1cf17e0b..5ed036225e69 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -29,7 +29,6 @@
#include "arm-smmu-v3.h"
#include "../../dma-iommu.h"
-#include "../../iommu-sva.h"
static bool disable_bypass = true;
module_param(disable_bypass, bool, 0444);
@@ -48,6 +47,9 @@ enum arm_smmu_msi_index {
ARM_SMMU_MAX_MSIS,
};
+static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu,
+ ioasid_t sid);
+
static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
[EVTQ_MSI_INDEX] = {
ARM_SMMU_EVTQ_IRQ_CFG0,
@@ -86,6 +88,9 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
+static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu);
+
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@@ -921,31 +926,29 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
}
-static int arm_smmu_page_response(struct device *dev,
- struct iommu_fault_event *unused,
- struct iommu_page_response *resp)
+static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused,
+ struct iommu_page_response *resp)
{
struct arm_smmu_cmdq_ent cmd = {0};
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
int sid = master->streams[0].id;
- if (master->stall_enabled) {
- cmd.opcode = CMDQ_OP_RESUME;
- cmd.resume.sid = sid;
- cmd.resume.stag = resp->grpid;
- switch (resp->code) {
- case IOMMU_PAGE_RESP_INVALID:
- case IOMMU_PAGE_RESP_FAILURE:
- cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
- break;
- case IOMMU_PAGE_RESP_SUCCESS:
- cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
- break;
- default:
- return -EINVAL;
- }
- } else {
- return -ENODEV;
+ if (WARN_ON(!master->stall_enabled))
+ return;
+
+ cmd.opcode = CMDQ_OP_RESUME;
+ cmd.resume.sid = sid;
+ cmd.resume.stag = resp->grpid;
+ switch (resp->code) {
+ case IOMMU_PAGE_RESP_INVALID:
+ case IOMMU_PAGE_RESP_FAILURE:
+ cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
+ break;
+ case IOMMU_PAGE_RESP_SUCCESS:
+ cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
+ break;
+ default:
+ break;
}
arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
@@ -955,8 +958,6 @@ static int arm_smmu_page_response(struct device *dev,
* terminated... at some point in the future. PRI_RESP is fire and
* forget.
*/
-
- return 0;
}
/* Context descriptor manipulation functions */
@@ -971,6 +972,199 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
+/*
+ * Based on the value of ent report which bits of the STE the HW will access. It
+ * would be nice if this was complete according to the spec, but minimally it
+ * has to capture the bits this driver uses.
+ */
+static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent,
+ struct arm_smmu_ste *used_bits)
+{
+ unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent->data[0]));
+
+ used_bits->data[0] = cpu_to_le64(STRTAB_STE_0_V);
+ if (!(ent->data[0] & cpu_to_le64(STRTAB_STE_0_V)))
+ return;
+
+ used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_CFG);
+
+ /* S1 translates */
+ if (cfg & BIT(0)) {
+ used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT |
+ STRTAB_STE_0_S1CTXPTR_MASK |
+ STRTAB_STE_0_S1CDMAX);
+ used_bits->data[1] |=
+ cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
+ STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
+ STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
+ STRTAB_STE_1_EATS);
+ used_bits->data[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
+ }
+
+ /* S2 translates */
+ if (cfg & BIT(1)) {
+ used_bits->data[1] |=
+ cpu_to_le64(STRTAB_STE_1_EATS | STRTAB_STE_1_SHCFG);
+ used_bits->data[2] |=
+ cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
+ STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
+ STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2R);
+ used_bits->data[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
+ }
+
+ if (cfg == STRTAB_STE_0_CFG_BYPASS)
+ used_bits->data[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
+}
+
+/*
+ * Figure out if we can do a hitless update of entry to become target. Returns a
+ * bit mask where 1 indicates that qword needs to be set disruptively.
+ * unused_update is an intermediate value of entry that has unused bits set to
+ * their new values.
+ */
+static u8 arm_smmu_entry_qword_diff(const struct arm_smmu_ste *entry,
+ const struct arm_smmu_ste *target,
+ struct arm_smmu_ste *unused_update)
+{
+ struct arm_smmu_ste target_used = {};
+ struct arm_smmu_ste cur_used = {};
+ u8 used_qword_diff = 0;
+ unsigned int i;
+
+ arm_smmu_get_ste_used(entry, &cur_used);
+ arm_smmu_get_ste_used(target, &target_used);
+
+ for (i = 0; i != ARRAY_SIZE(target_used.data); i++) {
+ /*
+ * Check that masks are up to date, the make functions are not
+ * allowed to set a bit to 1 if the used function doesn't say it
+ * is used.
+ */
+ WARN_ON_ONCE(target->data[i] & ~target_used.data[i]);
+
+ /* Bits can change because they are not currently being used */
+ unused_update->data[i] = (entry->data[i] & cur_used.data[i]) |
+ (target->data[i] & ~cur_used.data[i]);
+ /*
+ * Each bit indicates that a used bit in a qword needs to be
+ * changed after unused_update is applied.
+ */
+ if ((unused_update->data[i] & target_used.data[i]) !=
+ target->data[i])
+ used_qword_diff |= 1 << i;
+ }
+ return used_qword_diff;
+}
+
+static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid,
+ struct arm_smmu_ste *entry,
+ const struct arm_smmu_ste *target, unsigned int start,
+ unsigned int len)
+{
+ bool changed = false;
+ unsigned int i;
+
+ for (i = start; len != 0; len--, i++) {
+ if (entry->data[i] != target->data[i]) {
+ WRITE_ONCE(entry->data[i], target->data[i]);
+ changed = true;
+ }
+ }
+
+ if (changed)
+ arm_smmu_sync_ste_for_sid(smmu, sid);
+ return changed;
+}
+
+/*
+ * Update the STE/CD to the target configuration. The transition from the
+ * current entry to the target entry takes place over multiple steps that
+ * attempts to make the transition hitless if possible. This function takes care
+ * not to create a situation where the HW can perceive a corrupted entry. HW is
+ * only required to have a 64 bit atomicity with stores from the CPU, while
+ * entries are many 64 bit values big.
+ *
+ * The difference between the current value and the target value is analyzed to
+ * determine which of three updates are required - disruptive, hitless or no
+ * change.
+ *
+ * In the most general disruptive case we can make any update in three steps:
+ * - Disrupting the entry (V=0)
+ * - Fill now unused qwords, execpt qword 0 which contains V
+ * - Make qword 0 have the final value and valid (V=1) with a single 64
+ * bit store
+ *
+ * However this disrupts the HW while it is happening. There are several
+ * interesting cases where a STE/CD can be updated without disturbing the HW
+ * because only a small number of bits are changing (S1DSS, CONFIG, etc) or
+ * because the used bits don't intersect. We can detect this by calculating how
+ * many 64 bit values need update after adjusting the unused bits and skip the
+ * V=0 process. This relies on the IGNORED behavior described in the
+ * specification.
+ */
+static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
+ struct arm_smmu_ste *entry,
+ const struct arm_smmu_ste *target)
+{
+ unsigned int num_entry_qwords = ARRAY_SIZE(target->data);
+ struct arm_smmu_device *smmu = master->smmu;
+ struct arm_smmu_ste unused_update;
+ u8 used_qword_diff;
+
+ used_qword_diff =
+ arm_smmu_entry_qword_diff(entry, target, &unused_update);
+ if (hweight8(used_qword_diff) == 1) {
+ /*
+ * Only one qword needs its used bits to be changed. This is a
+ * hitless update, update all bits the current STE is ignoring
+ * to their new values, then update a single "critical qword" to
+ * change the STE and finally 0 out any bits that are now unused
+ * in the target configuration.
+ */
+ unsigned int critical_qword_index = ffs(used_qword_diff) - 1;
+
+ /*
+ * Skip writing unused bits in the critical qword since we'll be
+ * writing it in the next step anyways. This can save a sync
+ * when the only change is in that qword.
+ */
+ unused_update.data[critical_qword_index] =
+ entry->data[critical_qword_index];
+ entry_set(smmu, sid, entry, &unused_update, 0, num_entry_qwords);
+ entry_set(smmu, sid, entry, target, critical_qword_index, 1);
+ entry_set(smmu, sid, entry, target, 0, num_entry_qwords);
+ } else if (used_qword_diff) {
+ /*
+ * At least two qwords need their inuse bits to be changed. This
+ * requires a breaking update, zero the V bit, write all qwords
+ * but 0, then set qword 0
+ */
+ unused_update.data[0] = entry->data[0] & (~STRTAB_STE_0_V);
+ entry_set(smmu, sid, entry, &unused_update, 0, 1);
+ entry_set(smmu, sid, entry, target, 1, num_entry_qwords - 1);
+ entry_set(smmu, sid, entry, target, 0, 1);
+ } else {
+ /*
+ * No inuse bit changed. Sanity check that all unused bits are 0
+ * in the entry. The target was already sanity checked by
+ * compute_qword_diff().
+ */
+ WARN_ON_ONCE(
+ entry_set(smmu, sid, entry, target, 0, num_entry_qwords));
+ }
+
+ /* It's likely that we'll want to use the new STE soon */
+ if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
+ struct arm_smmu_cmdq_ent
+ prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG,
+ .prefetch = {
+ .sid = sid,
+ } };
+
+ arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ }
+}
+
static void arm_smmu_sync_cd(struct arm_smmu_master *master,
int ssid, bool leaf)
{
@@ -1251,158 +1445,131 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
-static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
- struct arm_smmu_ste *dst)
+static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
{
- /*
- * This is hideously complicated, but we only really care about
- * three cases at the moment:
- *
- * 1. Invalid (all zero) -> bypass/fault (init)
- * 2. Bypass/fault -> translation/bypass (attach)
- * 3. Translation/bypass -> bypass/fault (detach)
- *
- * Given that we can't update the STE atomically and the SMMU
- * doesn't read the thing in a defined order, that leaves us
- * with the following maintenance requirements:
- *
- * 1. Update Config, return (init time STEs aren't live)
- * 2. Write everything apart from dword 0, sync, write dword 0, sync
- * 3. Update Config, sync
- */
- u64 val = le64_to_cpu(dst->data[0]);
- bool ste_live = false;
- struct arm_smmu_device *smmu = master->smmu;
- struct arm_smmu_ctx_desc_cfg *cd_table = NULL;
- struct arm_smmu_s2_cfg *s2_cfg = NULL;
- struct arm_smmu_domain *smmu_domain = master->domain;
- struct arm_smmu_cmdq_ent prefetch_cmd = {
- .opcode = CMDQ_OP_PREFETCH_CFG,
- .prefetch = {
- .sid = sid,
- },
- };
-
- if (smmu_domain) {
- switch (smmu_domain->stage) {
- case ARM_SMMU_DOMAIN_S1:
- cd_table = &master->cd_table;
- break;
- case ARM_SMMU_DOMAIN_S2:
- s2_cfg = &smmu_domain->s2_cfg;
- break;
- default:
- break;
- }
- }
-
- if (val & STRTAB_STE_0_V) {
- switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
- case STRTAB_STE_0_CFG_BYPASS:
- break;
- case STRTAB_STE_0_CFG_S1_TRANS:
- case STRTAB_STE_0_CFG_S2_TRANS:
- ste_live = true;
- break;
- case STRTAB_STE_0_CFG_ABORT:
- BUG_ON(!disable_bypass);
- break;
- default:
- BUG(); /* STE corruption */
- }
- }
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
+}
- /* Nuke the existing STE_0 value, as we're going to rewrite it */
- val = STRTAB_STE_0_V;
+static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target)
+{
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS));
+ target->data[1] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
+}
- /* Bypass/fault */
- if (!smmu_domain || !(cd_table || s2_cfg)) {
- if (!smmu_domain && disable_bypass)
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
- else
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
+static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master)
+{
+ struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
+ struct arm_smmu_device *smmu = master->smmu;
- dst->data[0] = cpu_to_le64(val);
- dst->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
- STRTAB_STE_1_SHCFG_INCOMING));
- dst->data[2] = 0; /* Nuke the VMID */
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
+ FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt) |
+ (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+ FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax));
+
+ target->data[1] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
+ FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+ FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+ FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
+ ((smmu->features & ARM_SMMU_FEAT_STALLS &&
+ !master->stall_enabled) ?
+ STRTAB_STE_1_S1STALLD :
+ 0) |
+ FIELD_PREP(STRTAB_STE_1_EATS,
+ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
+
+ if (smmu->features & ARM_SMMU_FEAT_E2H) {
/*
- * The SMMU can perform negative caching, so we must sync
- * the STE regardless of whether the old value was live.
+ * To support BTM the streamworld needs to match the
+ * configuration of the CPU so that the ASID broadcasts are
+ * properly matched. This means either S/NS-EL2-E2H (hypervisor)
+ * or NS-EL1 (guest). Since an SVA domain can be installed in a
+ * PASID this should always use a BTM compatible configuration
+ * if the HW supports it.
*/
- if (smmu)
- arm_smmu_sync_ste_for_sid(smmu, sid);
- return;
- }
-
- if (cd_table) {
- u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
- STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
-
- BUG_ON(ste_live);
- dst->data[1] = cpu_to_le64(
- FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
- FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
- FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
- FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
- FIELD_PREP(STRTAB_STE_1_STRW, strw));
-
- if (smmu->features & ARM_SMMU_FEAT_STALLS &&
- !master->stall_enabled)
- dst->data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
+ target->data[1] |= cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_EL2));
+ } else {
+ target->data[1] |= cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
- val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
- FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
- FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax) |
- FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt);
+ /*
+ * VMID 0 is reserved for stage-2 bypass EL1 STEs, see
+ * arm_smmu_domain_alloc_id()
+ */
+ target->data[2] =
+ cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0));
}
+}
- if (s2_cfg) {
- BUG_ON(ste_live);
- dst->data[2] = cpu_to_le64(
- FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
- FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
+static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg;
+ const struct io_pgtable_cfg *pgtbl_cfg =
+ &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg;
+ typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr =
+ &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+ u64 vtcr_val;
+
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS));
+
+ target->data[1] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_EATS,
+ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0) |
+ FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
+
+ vtcr_val = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
+ target->data[2] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
+ FIELD_PREP(STRTAB_STE_2_VTCR, vtcr_val) |
+ STRTAB_STE_2_S2AA64 |
#ifdef __BIG_ENDIAN
- STRTAB_STE_2_S2ENDI |
+ STRTAB_STE_2_S2ENDI |
#endif
- STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
- STRTAB_STE_2_S2R);
+ STRTAB_STE_2_S2PTW |
+ STRTAB_STE_2_S2R);
- dst->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
-
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
- }
-
- if (master->ats_enabled)
- dst->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
- STRTAB_STE_1_EATS_TRANS));
-
- arm_smmu_sync_ste_for_sid(smmu, sid);
- /* See comment in arm_smmu_write_ctx_desc() */
- WRITE_ONCE(dst->data[0], cpu_to_le64(val));
- arm_smmu_sync_ste_for_sid(smmu, sid);
-
- /* It's likely that we'll want to use the new STE soon */
- if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
- arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr &
+ STRTAB_STE_3_S2TTB_MASK);
}
-static void arm_smmu_init_bypass_stes(struct arm_smmu_ste *strtab,
- unsigned int nent, bool force)
+/*
+ * This can safely directly manipulate the STE memory without a sync sequence
+ * because the STE table has not been installed in the SMMU yet.
+ */
+static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
+ unsigned int nent)
{
unsigned int i;
- u64 val = STRTAB_STE_0_V;
-
- if (disable_bypass && !force)
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
- else
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) {
- strtab->data[0] = cpu_to_le64(val);
- strtab->data[1] = cpu_to_le64(FIELD_PREP(
- STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
- strtab->data[2] = 0;
+ if (disable_bypass)
+ arm_smmu_make_abort_ste(strtab);
+ else
+ arm_smmu_make_bypass_ste(strtab);
strtab++;
}
}
@@ -1430,7 +1597,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM;
}
- arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
+ arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0;
}
@@ -1460,27 +1627,19 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
/* IRQ and event handlers */
static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
{
- int ret;
- u32 reason;
+ int ret = 0;
u32 perm = 0;
struct arm_smmu_master *master;
bool ssid_valid = evt[0] & EVTQ_0_SSV;
u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]);
- struct iommu_fault_event fault_evt = { };
+ struct iopf_fault fault_evt = { };
struct iommu_fault *flt = &fault_evt.fault;
switch (FIELD_GET(EVTQ_0_ID, evt[0])) {
case EVT_ID_TRANSLATION_FAULT:
- reason = IOMMU_FAULT_REASON_PTE_FETCH;
- break;
case EVT_ID_ADDR_SIZE_FAULT:
- reason = IOMMU_FAULT_REASON_OOR_ADDRESS;
- break;
case EVT_ID_ACCESS_FAULT:
- reason = IOMMU_FAULT_REASON_ACCESS;
- break;
case EVT_ID_PERMISSION_FAULT:
- reason = IOMMU_FAULT_REASON_PERMISSION;
break;
default:
return -EOPNOTSUPP;
@@ -1490,6 +1649,9 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
if (evt[1] & EVTQ_1_S2)
return -EFAULT;
+ if (!(evt[1] & EVTQ_1_STALL))
+ return -EOPNOTSUPP;
+
if (evt[1] & EVTQ_1_RnW)
perm |= IOMMU_FAULT_PERM_READ;
else
@@ -1501,32 +1663,17 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
if (evt[1] & EVTQ_1_PnU)
perm |= IOMMU_FAULT_PERM_PRIV;
- if (evt[1] & EVTQ_1_STALL) {
- flt->type = IOMMU_FAULT_PAGE_REQ;
- flt->prm = (struct iommu_fault_page_request) {
- .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
- .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
- .perm = perm,
- .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
- };
-
- if (ssid_valid) {
- flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
- flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
- }
- } else {
- flt->type = IOMMU_FAULT_DMA_UNRECOV;
- flt->event = (struct iommu_fault_unrecoverable) {
- .reason = reason,
- .flags = IOMMU_FAULT_UNRECOV_ADDR_VALID,
- .perm = perm,
- .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
- };
+ flt->type = IOMMU_FAULT_PAGE_REQ;
+ flt->prm = (struct iommu_fault_page_request) {
+ .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
+ .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
+ .perm = perm,
+ .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
+ };
- if (ssid_valid) {
- flt->event.flags |= IOMMU_FAULT_UNRECOV_PASID_VALID;
- flt->event.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
- }
+ if (ssid_valid) {
+ flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
}
mutex_lock(&smmu->streams_mutex);
@@ -1536,17 +1683,7 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
goto out_unlock;
}
- ret = iommu_report_device_fault(master->dev, &fault_evt);
- if (ret && flt->type == IOMMU_FAULT_PAGE_REQ) {
- /* Nobody cared, abort the access */
- struct iommu_page_response resp = {
- .pasid = flt->prm.pasid,
- .grpid = flt->prm.grpid,
- .code = IOMMU_PAGE_RESP_FAILURE,
- };
- arm_smmu_page_response(master->dev, &fault_evt, &resp);
- }
-
+ iommu_report_device_fault(master->dev, &fault_evt);
out_unlock:
mutex_unlock(&smmu->streams_mutex);
return ret;
@@ -2025,15 +2162,15 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
- struct arm_smmu_domain *smmu_domain;
if (type == IOMMU_DOMAIN_SVA)
return arm_smmu_sva_domain_alloc();
+ return ERR_PTR(-EOPNOTSUPP);
+}
- if (type != IOMMU_DOMAIN_UNMANAGED &&
- type != IOMMU_DOMAIN_DMA &&
- type != IOMMU_DOMAIN_IDENTITY)
- return NULL;
+static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
+{
+ struct arm_smmu_domain *smmu_domain;
/*
* Allocate the domain and initialise some of its data structures.
@@ -2042,13 +2179,23 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
*/
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
if (!smmu_domain)
- return NULL;
+ return ERR_PTR(-ENOMEM);
mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
+ if (dev) {
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ int ret;
+
+ ret = arm_smmu_domain_finalise(smmu_domain, master->smmu);
+ if (ret) {
+ kfree(smmu_domain);
+ return ERR_PTR(ret);
+ }
+ }
return &smmu_domain->domain;
}
@@ -2074,12 +2221,12 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
kfree(smmu_domain);
}
-static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
+static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
+ struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
int ret;
u32 asid;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
@@ -2111,13 +2258,12 @@ out_unlock:
return ret;
}
-static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
+static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
+ struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
int vmid;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
- typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
/* Reserve VMID 0 for stage-2 bypass STEs */
vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
@@ -2125,35 +2271,21 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
if (vmid < 0)
return vmid;
- vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
cfg->vmid = (u16)vmid;
- cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
- cfg->vtcr = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
return 0;
}
-static int arm_smmu_domain_finalise(struct iommu_domain *domain)
+static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu)
{
int ret;
unsigned long ias, oas;
enum io_pgtable_fmt fmt;
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable_ops *pgtbl_ops;
- int (*finalise_stage_fn)(struct arm_smmu_domain *,
- struct io_pgtable_cfg *);
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_device *smmu = smmu_domain->smmu;
-
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
- smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
- return 0;
- }
+ int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
+ struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg);
/* Restrict the stage to what we can actually support */
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
@@ -2192,17 +2324,18 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
if (!pgtbl_ops)
return -ENOMEM;
- domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
- domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
- domain->geometry.force_aperture = true;
+ smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
+ smmu_domain->domain.geometry.force_aperture = true;
- ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
+ ret = finalise_stage_fn(smmu, smmu_domain, &pgtbl_cfg);
if (ret < 0) {
free_io_pgtable_ops(pgtbl_ops);
return ret;
}
smmu_domain->pgtbl_ops = pgtbl_ops;
+ smmu_domain->smmu = smmu;
return 0;
}
@@ -2225,7 +2358,8 @@ arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
}
}
-static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
+static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master,
+ const struct arm_smmu_ste *target)
{
int i, j;
struct arm_smmu_device *smmu = master->smmu;
@@ -2242,7 +2376,7 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
if (j < i)
continue;
- arm_smmu_write_strtab_ent(master, sid, step);
+ arm_smmu_write_ste(master, sid, step, target);
}
}
@@ -2261,12 +2395,12 @@ static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
}
-static void arm_smmu_enable_ats(struct arm_smmu_master *master)
+static void arm_smmu_enable_ats(struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
{
size_t stu;
struct pci_dev *pdev;
struct arm_smmu_device *smmu = master->smmu;
- struct arm_smmu_domain *smmu_domain = master->domain;
/* Don't enable ATS at the endpoint if it's not enabled in the STE */
if (!master->ats_enabled)
@@ -2282,10 +2416,9 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master)
dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
}
-static void arm_smmu_disable_ats(struct arm_smmu_master *master)
+static void arm_smmu_disable_ats(struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
{
- struct arm_smmu_domain *smmu_domain = master->domain;
-
if (!master->ats_enabled)
return;
@@ -2348,35 +2481,28 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
static void arm_smmu_detach_dev(struct arm_smmu_master *master)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(master->dev);
+ struct arm_smmu_domain *smmu_domain;
unsigned long flags;
- struct arm_smmu_domain *smmu_domain = master->domain;
- if (!smmu_domain)
+ if (!domain || !(domain->type & __IOMMU_DOMAIN_PAGING))
return;
- arm_smmu_disable_ats(master);
+ smmu_domain = to_smmu_domain(domain);
+ arm_smmu_disable_ats(master, smmu_domain);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_del(&master->domain_head);
+ list_del_init(&master->domain_head);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- master->domain = NULL;
master->ats_enabled = false;
- arm_smmu_install_ste_for_dev(master);
- /*
- * Clearing the CD entry isn't strictly required to detach the domain
- * since the table is uninstalled anyway, but it helps avoid confusion
- * in the call to arm_smmu_write_ctx_desc on the next attach (which
- * expects the entry to be empty).
- */
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && master->cd_table.cdtab)
- arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
unsigned long flags;
+ struct arm_smmu_ste target;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -2398,15 +2524,10 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -EBUSY;
}
- arm_smmu_detach_dev(master);
-
mutex_lock(&smmu_domain->init_mutex);
if (!smmu_domain->smmu) {
- smmu_domain->smmu = smmu;
- ret = arm_smmu_domain_finalise(domain);
- if (ret)
- smmu_domain->smmu = NULL;
+ ret = arm_smmu_domain_finalise(smmu_domain, smmu);
} else if (smmu_domain->smmu != smmu)
ret = -EINVAL;
@@ -2414,57 +2535,140 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (ret)
return ret;
- master->domain = smmu_domain;
-
/*
- * The SMMU does not support enabling ATS with bypass. When the STE is
- * in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests and
- * Translated transactions are denied as though ATS is disabled for the
- * stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and
- * F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry).
+ * Prevent arm_smmu_share_asid() from trying to change the ASID
+ * of either the old or new domain while we are working on it.
+ * This allows the STE and the smmu_domain->devices list to
+ * be inconsistent during this routine.
*/
- if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
- master->ats_enabled = arm_smmu_ats_supported(master);
+ mutex_lock(&arm_smmu_asid_lock);
+
+ arm_smmu_detach_dev(master);
+
+ master->ats_enabled = arm_smmu_ats_supported(master);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_add(&master->domain_head, &smmu_domain->devices);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
if (!master->cd_table.cdtab) {
ret = arm_smmu_alloc_cd_tables(master);
- if (ret) {
- master->domain = NULL;
+ if (ret)
+ goto out_list_del;
+ } else {
+ /*
+ * arm_smmu_write_ctx_desc() relies on the entry being
+ * invalid to work, clear any existing entry.
+ */
+ ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
+ NULL);
+ if (ret)
goto out_list_del;
- }
}
- /*
- * Prevent SVA from concurrently modifying the CD or writing to
- * the CD entry
- */
- mutex_lock(&arm_smmu_asid_lock);
ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd);
- mutex_unlock(&arm_smmu_asid_lock);
- if (ret) {
- master->domain = NULL;
+ if (ret)
goto out_list_del;
- }
- }
- arm_smmu_install_ste_for_dev(master);
+ arm_smmu_make_cdtable_ste(&target, master);
+ arm_smmu_install_ste_for_dev(master, &target);
+ break;
+ case ARM_SMMU_DOMAIN_S2:
+ arm_smmu_make_s2_domain_ste(&target, master, smmu_domain);
+ arm_smmu_install_ste_for_dev(master, &target);
+ if (master->cd_table.cdtab)
+ arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
+ NULL);
+ break;
+ }
- arm_smmu_enable_ats(master);
- return 0;
+ arm_smmu_enable_ats(master, smmu_domain);
+ goto out_unlock;
out_list_del:
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_del(&master->domain_head);
+ list_del_init(&master->domain_head);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+out_unlock:
+ mutex_unlock(&arm_smmu_asid_lock);
return ret;
}
+static int arm_smmu_attach_dev_ste(struct device *dev,
+ struct arm_smmu_ste *ste)
+{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ if (arm_smmu_master_sva_enabled(master))
+ return -EBUSY;
+
+ /*
+ * Do not allow any ASID to be changed while are working on the STE,
+ * otherwise we could miss invalidations.
+ */
+ mutex_lock(&arm_smmu_asid_lock);
+
+ /*
+ * The SMMU does not support enabling ATS with bypass/abort. When the
+ * STE is in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests
+ * and Translated transactions are denied as though ATS is disabled for
+ * the stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and
+ * F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry).
+ */
+ arm_smmu_detach_dev(master);
+
+ arm_smmu_install_ste_for_dev(master, ste);
+ mutex_unlock(&arm_smmu_asid_lock);
+
+ /*
+ * This has to be done after removing the master from the
+ * arm_smmu_domain->devices to avoid races updating the same context
+ * descriptor from arm_smmu_share_asid().
+ */
+ if (master->cd_table.cdtab)
+ arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
+ return 0;
+}
+
+static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_make_bypass_ste(&ste);
+ return arm_smmu_attach_dev_ste(dev, &ste);
+}
+
+static const struct iommu_domain_ops arm_smmu_identity_ops = {
+ .attach_dev = arm_smmu_attach_dev_identity,
+};
+
+static struct iommu_domain arm_smmu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &arm_smmu_identity_ops,
+};
+
+static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_make_abort_ste(&ste);
+ return arm_smmu_attach_dev_ste(dev, &ste);
+}
+
+static const struct iommu_domain_ops arm_smmu_blocked_ops = {
+ .attach_dev = arm_smmu_attach_dev_blocked,
+};
+
+static struct iommu_domain arm_smmu_blocked_domain = {
+ .type = IOMMU_DOMAIN_BLOCKED,
+ .ops = &arm_smmu_blocked_ops,
+};
+
static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -2658,6 +2862,7 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
master->dev = dev;
master->smmu = smmu;
INIT_LIST_HEAD(&master->bonds);
+ INIT_LIST_HEAD(&master->domain_head);
dev_iommu_priv_set(dev, master);
ret = arm_smmu_insert_master(smmu, master);
@@ -2699,7 +2904,13 @@ static void arm_smmu_release_device(struct device *dev)
if (WARN_ON(arm_smmu_master_sva_enabled(master)))
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
- arm_smmu_detach_dev(master);
+
+ /* Put the STE back to what arm_smmu_init_strtab() sets */
+ if (disable_bypass && !dev->iommu->require_direct)
+ arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
+ else
+ arm_smmu_attach_dev_identity(&arm_smmu_identity_domain, dev);
+
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
if (master->cd_table.cdtab)
@@ -2739,7 +2950,8 @@ static int arm_smmu_enable_nesting(struct iommu_domain *domain)
return ret;
}
-static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int arm_smmu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
return iommu_fwspec_add_ids(dev, args->args, 1);
}
@@ -2844,8 +3056,11 @@ static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
}
static struct iommu_ops arm_smmu_ops = {
+ .identity_domain = &arm_smmu_identity_domain,
+ .blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
+ .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
@@ -3049,7 +3264,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
- arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
+ arm_smmu_init_initial_stes(strtab, cfg->num_l1_ents);
return 0;
}
@@ -3125,7 +3340,8 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
static void arm_smmu_free_msis(void *data)
{
struct device *dev = data;
- platform_msi_domain_free_irqs(dev);
+
+ platform_device_msi_free_irqs_all(dev);
}
static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
@@ -3166,7 +3382,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
}
/* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
- ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
+ ret = platform_device_msi_init_and_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
if (ret) {
dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n");
return;
@@ -3760,7 +3976,6 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
list_for_each_entry(e, &rmr_list, list) {
- struct arm_smmu_ste *step;
struct iommu_iort_rmr_data *rmr;
int ret, i;
@@ -3773,8 +3988,12 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
continue;
}
- step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]);
- arm_smmu_init_bypass_stes(step, 1, true);
+ /*
+ * STE table is not programmed to HW, see
+ * arm_smmu_initial_bypass_stes()
+ */
+ arm_smmu_make_bypass_ste(
+ arm_smmu_get_step_for_sid(smmu, rmr->sids[i]));
}
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 65fb388d5173..23baf117e7e4 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -609,8 +609,6 @@ struct arm_smmu_ctx_desc_cfg {
struct arm_smmu_s2_cfg {
u16 vmid;
- u64 vttbr;
- u64 vtcr;
};
struct arm_smmu_strtab_cfg {
@@ -697,7 +695,6 @@ struct arm_smmu_stream {
struct arm_smmu_master {
struct arm_smmu_device *smmu;
struct device *dev;
- struct arm_smmu_domain *domain;
struct list_head domain_head;
struct arm_smmu_stream *streams;
/* Locked by the iommu core using the group mutex */
@@ -715,7 +712,6 @@ struct arm_smmu_master {
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
- ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_domain {
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 8b04ece00420..5c7cfc51b57c 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -260,6 +260,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sm6375-mdss" },
{ .compatible = "qcom,sm8150-mdss" },
{ .compatible = "qcom,sm8250-mdss" },
+ { .compatible = "qcom,x1e80100-mdss" },
{ }
};
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 68b6bc5e7c71..c572d877b0e1 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -859,10 +859,14 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
arm_smmu_rpm_put(smmu);
}
-static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
+static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED) {
+ if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
+ return NULL;
+ }
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@@ -875,15 +879,6 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->cb_lock);
- if (dev) {
- struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
-
- if (arm_smmu_init_domain_context(smmu_domain, cfg->smmu, dev)) {
- kfree(smmu_domain);
- return NULL;
- }
- }
-
return &smmu_domain->domain;
}
@@ -1551,7 +1546,8 @@ static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
return ret;
}
-static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int arm_smmu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
u32 mask, fwid = 0;
@@ -1600,7 +1596,7 @@ static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
- .domain_alloc_paging = arm_smmu_domain_alloc_paging,
+ .domain_alloc = arm_smmu_domain_alloc,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize,
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 17a1c163fef6..e079bb7a993e 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -546,7 +546,8 @@ static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
return &qcom_iommu->iommu;
}
-static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int qcom_iommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct qcom_iommu_dev *qcom_iommu;
struct platform_device *iommu_pdev;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 50ccc4f1ef81..b58f5a3311c3 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -859,6 +859,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
iommu_deferred_attach(dev, domain))
return DMA_MAPPING_ERROR;
+ /* If anyone ever wants this we'd need support in the IOVA allocator */
+ if (dev_WARN_ONCE(dev, dma_get_min_align_mask(dev) > iova_mask(iovad),
+ "Unsupported alignment constraint\n"))
+ return DMA_MAPPING_ERROR;
+
size = iova_align(iovad, size + iova_off);
iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 2c6e9094f1e9..d98c9161948a 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1431,7 +1431,7 @@ static void exynos_iommu_release_device(struct device *dev)
}
static int exynos_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *spec)
+ const struct of_phandle_args *spec)
{
struct platform_device *sysmmu = of_find_device_by_node(spec->np);
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 012cd2541a68..6cf9f48e7d8c 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -51,6 +51,7 @@ config INTEL_IOMMU_SVM
depends on X86_64
select MMU_NOTIFIER
select IOMMU_SVA
+ select IOMMU_IOPF
help
Shared Virtual Memory (SVM) provides a facility for devices
to access DMA resources through process address space by
@@ -64,17 +65,6 @@ config INTEL_IOMMU_DEFAULT_ON
one is found. If this option is not selected, DMAR support can
be enabled by passing intel_iommu=on to the kernel.
-config INTEL_IOMMU_BROKEN_GFX_WA
- bool "Workaround broken graphics drivers (going away soon)"
- depends on BROKEN && X86
- help
- Current Graphics drivers tend to use physical address
- for DMA and avoid using DMA APIs. Setting this config
- option permits the IOMMU driver to set a unity map for
- all the OS-visible memory. Hence the driver can continue
- to use physical addresses for DMA, at least until this
- option is removed in the 2.6.32 kernel.
-
config INTEL_IOMMU_FLOPPY_WA
def_bool y
depends on X86
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index 5dabf081a779..5402b699a122 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -5,5 +5,7 @@ obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
+ifdef CONFIG_INTEL_IOMMU
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
+endif
obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 23cb80d62a9a..36d7427b1202 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1095,7 +1095,9 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->agaw = agaw;
iommu->msagaw = msagaw;
iommu->segment = drhd->segment;
-
+ iommu->device_rbtree = RB_ROOT;
+ spin_lock_init(&iommu->device_rbtree_lock);
+ mutex_init(&iommu->iopf_lock);
iommu->node = NUMA_NO_NODE;
ver = readl(iommu->reg + DMAR_VER_REG);
@@ -1271,6 +1273,8 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
{
u32 fault;
int head, tail;
+ struct device *dev;
+ u64 iqe_err, ite_sid;
struct q_inval *qi = iommu->qi;
int shift = qi_shift(iommu);
@@ -1315,6 +1319,13 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
tail = readl(iommu->reg + DMAR_IQT_REG);
tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
+ /*
+ * SID field is valid only when the ITE field is Set in FSTS_REG
+ * see Intel VT-d spec r4.1, section 11.4.9.9
+ */
+ iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
+ ite_sid = DMAR_IQER_REG_ITESID(iqe_err);
+
writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
pr_info("Invalidation Time-out Error (ITE) cleared\n");
@@ -1324,6 +1335,19 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
head = (head - 2 + QI_LENGTH) % QI_LENGTH;
} while (head != tail);
+ /*
+ * If device was released or isn't present, no need to retry
+ * the ATS invalidate request anymore.
+ *
+ * 0 value of ite_sid means old VT-d device, no ite_sid value.
+ * see Intel VT-d spec r4.1, section 11.4.9.9
+ */
+ if (ite_sid) {
+ dev = device_rbtree_find(iommu, ite_sid);
+ if (!dev || !dev_is_pci(dev) ||
+ !pci_device_is_present(to_pci_dev(dev)))
+ return -ETIMEDOUT;
+ }
if (qi->desc_status[wait_index] == QI_ABORT)
return -EAGAIN;
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 6fb5f6fceea1..50eb9aed47cc 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -27,7 +27,6 @@
#include "iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h"
-#include "../iommu-sva.h"
#include "pasid.h"
#include "cap_audit.h"
#include "perfmon.h"
@@ -97,6 +96,81 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK;
}
+static int device_rid_cmp_key(const void *key, const struct rb_node *node)
+{
+ struct device_domain_info *info =
+ rb_entry(node, struct device_domain_info, node);
+ const u16 *rid_lhs = key;
+
+ if (*rid_lhs < PCI_DEVID(info->bus, info->devfn))
+ return -1;
+
+ if (*rid_lhs > PCI_DEVID(info->bus, info->devfn))
+ return 1;
+
+ return 0;
+}
+
+static int device_rid_cmp(struct rb_node *lhs, const struct rb_node *rhs)
+{
+ struct device_domain_info *info =
+ rb_entry(lhs, struct device_domain_info, node);
+ u16 key = PCI_DEVID(info->bus, info->devfn);
+
+ return device_rid_cmp_key(&key, rhs);
+}
+
+/*
+ * Looks up an IOMMU-probed device using its source ID.
+ *
+ * Returns the pointer to the device if there is a match. Otherwise,
+ * returns NULL.
+ *
+ * Note that this helper doesn't guarantee that the device won't be
+ * released by the iommu subsystem after being returned. The caller
+ * should use its own synchronization mechanism to avoid the device
+ * being released during its use if its possibly the case.
+ */
+struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid)
+{
+ struct device_domain_info *info = NULL;
+ struct rb_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
+ node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key);
+ if (node)
+ info = rb_entry(node, struct device_domain_info, node);
+ spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
+
+ return info ? info->dev : NULL;
+}
+
+static int device_rbtree_insert(struct intel_iommu *iommu,
+ struct device_domain_info *info)
+{
+ struct rb_node *curr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
+ curr = rb_find_add(&info->node, &iommu->device_rbtree, device_rid_cmp);
+ spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
+ if (WARN_ON(curr))
+ return -EEXIST;
+
+ return 0;
+}
+
+static void device_rbtree_remove(struct device_domain_info *info)
+{
+ struct intel_iommu *iommu = info->iommu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
+ rb_erase(&info->node, &iommu->device_rbtree);
+ spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
+}
+
/*
* This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -396,8 +470,6 @@ static int domain_update_device_node(struct dmar_domain *domain)
return nid;
}
-static void domain_update_iotlb(struct dmar_domain *domain);
-
/* Return the super pagesize bitmap if supported. */
static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
{
@@ -1218,7 +1290,7 @@ domain_lookup_dev_info(struct dmar_domain *domain,
return NULL;
}
-static void domain_update_iotlb(struct dmar_domain *domain)
+void domain_update_iotlb(struct dmar_domain *domain)
{
struct dev_pasid_info *dev_pasid;
struct device_domain_info *info;
@@ -1368,6 +1440,46 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
spin_unlock_irqrestore(&domain->lock, flags);
}
+static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+ unsigned long pfn, unsigned int pages,
+ int ih)
+{
+ unsigned int aligned_pages = __roundup_pow_of_two(pages);
+ unsigned long bitmask = aligned_pages - 1;
+ unsigned int mask = ilog2(aligned_pages);
+ u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
+
+ /*
+ * PSI masks the low order bits of the base address. If the
+ * address isn't aligned to the mask, then compute a mask value
+ * needed to ensure the target range is flushed.
+ */
+ if (unlikely(bitmask & pfn)) {
+ unsigned long end_pfn = pfn + pages - 1, shared_bits;
+
+ /*
+ * Since end_pfn <= pfn + bitmask, the only way bits
+ * higher than bitmask can differ in pfn and end_pfn is
+ * by carrying. This means after masking out bitmask,
+ * high bits starting with the first set bit in
+ * shared_bits are all equal in both pfn and end_pfn.
+ */
+ shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
+ mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
+ }
+
+ /*
+ * Fallback to domain selective flush if no PSI support or
+ * the size is too big.
+ */
+ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
+ DMA_TLB_PSI_FLUSH);
+}
+
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
struct dmar_domain *domain,
unsigned long pfn, unsigned int pages,
@@ -1384,42 +1496,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
if (ih)
ih = 1 << 6;
- if (domain->use_first_level) {
+ if (domain->use_first_level)
domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
- } else {
- unsigned long bitmask = aligned_pages - 1;
-
- /*
- * PSI masks the low order bits of the base address. If the
- * address isn't aligned to the mask, then compute a mask value
- * needed to ensure the target range is flushed.
- */
- if (unlikely(bitmask & pfn)) {
- unsigned long end_pfn = pfn + pages - 1, shared_bits;
-
- /*
- * Since end_pfn <= pfn + bitmask, the only way bits
- * higher than bitmask can differ in pfn and end_pfn is
- * by carrying. This means after masking out bitmask,
- * high bits starting with the first set bit in
- * shared_bits are all equal in both pfn and end_pfn.
- */
- shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
- mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
- }
-
- /*
- * Fallback to domain selective flush if no PSI support or
- * the size is too big.
- */
- if (!cap_pgsel_inv(iommu->cap) ||
- mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
- DMA_TLB_PSI_FLUSH);
- }
+ else
+ __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
/*
* In caching mode, changes of pages from non-present to present require
@@ -1443,6 +1523,46 @@ static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *
iommu_flush_write_buffer(iommu);
}
+/*
+ * Flush the relevant caches in nested translation if the domain
+ * also serves as a parent
+ */
+static void parent_domain_flush(struct dmar_domain *domain,
+ unsigned long pfn,
+ unsigned long pages, int ih)
+{
+ struct dmar_domain *s1_domain;
+
+ spin_lock(&domain->s1_lock);
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ struct device_domain_info *device_info;
+ struct iommu_domain_info *info;
+ unsigned long flags;
+ unsigned long i;
+
+ xa_for_each(&s1_domain->iommu_array, i, info)
+ __iommu_flush_iotlb_psi(info->iommu, info->did,
+ pfn, pages, ih);
+
+ if (!s1_domain->has_iotlb_device)
+ continue;
+
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ list_for_each_entry(device_info, &s1_domain->devices, link)
+ /*
+ * Address translation cache in device side caches the
+ * result of nested translation. There is no easy way
+ * to identify the exact set of nested translations
+ * affected by a change in S2. So just flush the entire
+ * device cache.
+ */
+ __iommu_flush_dev_iotlb(device_info, 0,
+ MAX_AGAW_PFN_WIDTH);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ }
+ spin_unlock(&domain->s1_lock);
+}
+
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
@@ -1462,6 +1582,9 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
if (!cap_caching_mode(iommu->cap))
iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH);
}
+
+ if (dmar_domain->nested_parent)
+ parent_domain_flush(dmar_domain, 0, -1, 0);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1727,34 +1850,17 @@ static void domain_exit(struct dmar_domain *domain)
kfree(domain);
}
-/*
- * Get the PASID directory size for scalable mode context entry.
- * Value of X in the PDTS field of a scalable mode context entry
- * indicates PASID directory with 2^(X + 7) entries.
- */
-static unsigned long context_get_sm_pds(struct pasid_table *table)
-{
- unsigned long pds, max_pde;
-
- max_pde = table->max_pasid >> PASID_PDE_SHIFT;
- pds = find_first_bit(&max_pde, MAX_NR_PASID_BITS);
- if (pds < 7)
- return 0;
-
- return pds - 7;
-}
-
static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu,
- struct pasid_table *table,
u8 bus, u8 devfn)
{
struct device_domain_info *info =
domain_lookup_dev_info(domain, iommu, bus, devfn);
u16 did = domain_id_iommu(domain, iommu);
int translation = CONTEXT_TT_MULTI_LEVEL;
+ struct dma_pte *pgd = domain->pgd;
struct context_entry *context;
- int ret;
+ int agaw, ret;
if (hw_pass_through && domain_type_is_si(domain))
translation = CONTEXT_TT_PASS_THROUGH;
@@ -1797,65 +1903,37 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
}
context_clear_entry(context);
+ context_set_domain_id(context, did);
- if (sm_supported(iommu)) {
- unsigned long pds;
-
- /* Setup the PASID DIR pointer: */
- pds = context_get_sm_pds(table);
- context->lo = (u64)virt_to_phys(table->table) |
- context_pdts(pds);
-
- /* Setup the RID_PASID field: */
- context_set_sm_rid2pasid(context, IOMMU_NO_PASID);
-
+ if (translation != CONTEXT_TT_PASS_THROUGH) {
/*
- * Setup the Device-TLB enable bit and Page request
- * Enable bit:
+ * Skip top levels of page tables for iommu which has
+ * less agaw than default. Unnecessary for PT mode.
*/
- if (info && info->ats_supported)
- context_set_sm_dte(context);
- if (info && info->pri_supported)
- context_set_sm_pre(context);
- if (info && info->pasid_supported)
- context_set_pasid(context);
- } else {
- struct dma_pte *pgd = domain->pgd;
- int agaw;
-
- context_set_domain_id(context, did);
-
- if (translation != CONTEXT_TT_PASS_THROUGH) {
- /*
- * Skip top levels of page tables for iommu which has
- * less agaw than default. Unnecessary for PT mode.
- */
- for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
- ret = -ENOMEM;
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd))
- goto out_unlock;
- }
-
- if (info && info->ats_supported)
- translation = CONTEXT_TT_DEV_IOTLB;
- else
- translation = CONTEXT_TT_MULTI_LEVEL;
-
- context_set_address_root(context, virt_to_phys(pgd));
- context_set_address_width(context, agaw);
- } else {
- /*
- * In pass through mode, AW must be programmed to
- * indicate the largest AGAW value supported by
- * hardware. And ASR is ignored by hardware.
- */
- context_set_address_width(context, iommu->msagaw);
+ for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+ ret = -ENOMEM;
+ pgd = phys_to_virt(dma_pte_addr(pgd));
+ if (!dma_pte_present(pgd))
+ goto out_unlock;
}
- context_set_translation_type(context, translation);
+ if (info && info->ats_supported)
+ translation = CONTEXT_TT_DEV_IOTLB;
+ else
+ translation = CONTEXT_TT_MULTI_LEVEL;
+
+ context_set_address_root(context, virt_to_phys(pgd));
+ context_set_address_width(context, agaw);
+ } else {
+ /*
+ * In pass through mode, AW must be programmed to
+ * indicate the largest AGAW value supported by
+ * hardware. And ASR is ignored by hardware.
+ */
+ context_set_address_width(context, iommu->msagaw);
}
+ context_set_translation_type(context, translation);
context_set_fault_enable(context);
context_set_present(context);
if (!ecap_coherent(iommu->ecap))
@@ -1885,43 +1963,29 @@ out_unlock:
return ret;
}
-struct domain_context_mapping_data {
- struct dmar_domain *domain;
- struct intel_iommu *iommu;
- struct pasid_table *table;
-};
-
static int domain_context_mapping_cb(struct pci_dev *pdev,
u16 alias, void *opaque)
{
- struct domain_context_mapping_data *data = opaque;
+ struct device_domain_info *info = dev_iommu_priv_get(&pdev->dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct dmar_domain *domain = opaque;
- return domain_context_mapping_one(data->domain, data->iommu,
- data->table, PCI_BUS_NUM(alias),
- alias & 0xff);
+ return domain_context_mapping_one(domain, iommu,
+ PCI_BUS_NUM(alias), alias & 0xff);
}
static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct domain_context_mapping_data data;
struct intel_iommu *iommu = info->iommu;
u8 bus = info->bus, devfn = info->devfn;
- struct pasid_table *table;
-
- table = intel_pasid_get_table(dev);
if (!dev_is_pci(dev))
- return domain_context_mapping_one(domain, iommu, table,
- bus, devfn);
-
- data.domain = domain;
- data.iommu = iommu;
- data.table = table;
+ return domain_context_mapping_one(domain, iommu, bus, devfn);
return pci_for_each_dma_alias(to_pci_dev(dev),
- &domain_context_mapping_cb, &data);
+ domain_context_mapping_cb, domain);
}
/* Returns a number of VTD pages, but aligned to MM page size */
@@ -1985,6 +2049,9 @@ static void switch_to_super_page(struct dmar_domain *domain,
iommu_flush_iotlb_psi(info->iommu, domain,
start_pfn, lvl_pages,
0, 0);
+ if (domain->nested_parent)
+ parent_domain_flush(domain, start_pfn,
+ lvl_pages, 0);
}
pte++;
@@ -2108,9 +2175,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
struct context_entry *context;
u16 did_old;
- if (!iommu)
- return;
-
spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (!context) {
@@ -2118,14 +2182,7 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
return;
}
- if (sm_supported(iommu)) {
- if (hw_pass_through && domain_type_is_si(info->domain))
- did_old = FLPT_DEFAULT_DID;
- else
- did_old = domain_id_iommu(info->domain, iommu);
- } else {
- did_old = context_domain_id(context);
- }
+ did_old = context_domain_id(context);
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
@@ -2136,9 +2193,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
- if (sm_supported(iommu))
- qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
-
iommu->flush.flush_iotlb(iommu,
did_old,
0,
@@ -2278,28 +2332,19 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
list_add(&info->link, &domain->devices);
spin_unlock_irqrestore(&domain->lock, flags);
- /* PASID table is mandatory for a PCI device in scalable mode. */
- if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
- /* Setup the PASID entry for requests without PASID: */
- if (hw_pass_through && domain_type_is_si(domain))
- ret = intel_pasid_setup_pass_through(iommu,
- dev, IOMMU_NO_PASID);
- else if (domain->use_first_level)
- ret = domain_setup_first_level(iommu, domain, dev,
- IOMMU_NO_PASID);
- else
- ret = intel_pasid_setup_second_level(iommu, domain,
- dev, IOMMU_NO_PASID);
- if (ret) {
- dev_err(dev, "Setup RID2PASID failed\n");
- device_block_translation(dev);
- return ret;
- }
- }
+ if (dev_is_real_dma_subdevice(dev))
+ return 0;
+
+ if (!sm_supported(iommu))
+ ret = domain_context_mapping(domain, dev);
+ else if (hw_pass_through && domain_type_is_si(domain))
+ ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
+ else if (domain->use_first_level)
+ ret = domain_setup_first_level(iommu, domain, dev, IOMMU_NO_PASID);
+ else
+ ret = intel_pasid_setup_second_level(iommu, domain, dev, IOMMU_NO_PASID);
- ret = domain_context_mapping(domain, dev);
if (ret) {
- dev_err(dev, "Domain context map failed\n");
device_block_translation(dev);
return ret;
}
@@ -2660,10 +2705,6 @@ static int __init init_dmars(void)
iommu_set_root_entry(iommu);
}
-#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
- dmar_map_gfx = 0;
-#endif
-
if (!dmar_map_gfx)
iommu_identity_mapping |= IDENTMAP_GFX;
@@ -3747,30 +3788,6 @@ static void domain_context_clear(struct device_domain_info *info)
&domain_context_clear_one_cb, info);
}
-static void dmar_remove_one_dev_info(struct device *dev)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct dmar_domain *domain = info->domain;
- struct intel_iommu *iommu = info->iommu;
- unsigned long flags;
-
- if (!dev_is_real_dma_subdevice(info->dev)) {
- if (dev_is_pci(info->dev) && sm_supported(iommu))
- intel_pasid_tear_down_entry(iommu, info->dev,
- IOMMU_NO_PASID, false);
-
- iommu_disable_pci_caps(info);
- domain_context_clear(info);
- }
-
- spin_lock_irqsave(&domain->lock, flags);
- list_del(&info->link);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- domain_detach_iommu(domain, iommu);
- info->domain = NULL;
-}
-
/*
* Clear the page table pointer in context or pasid table entries so that
* all DMA requests without PASID from the device are blocked. If the page
@@ -3883,6 +3900,7 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
bool nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
struct intel_iommu *iommu = info->iommu;
+ struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
/* Must be NESTING domain */
@@ -3908,11 +3926,16 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
if (!domain)
return ERR_PTR(-ENOMEM);
- if (nested_parent)
- to_dmar_domain(domain)->nested_parent = true;
+ dmar_domain = to_dmar_domain(domain);
+
+ if (nested_parent) {
+ dmar_domain->nested_parent = true;
+ INIT_LIST_HEAD(&dmar_domain->s1_domains);
+ spin_lock_init(&dmar_domain->s1_lock);
+ }
if (dirty_tracking) {
- if (to_dmar_domain(domain)->use_first_level) {
+ if (dmar_domain->use_first_level) {
iommu_domain_free(domain);
return ERR_PTR(-EOPNOTSUPP);
}
@@ -3924,8 +3947,12 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
static void intel_iommu_domain_free(struct iommu_domain *domain)
{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+
+ WARN_ON(dmar_domain->nested_parent &&
+ !list_empty(&dmar_domain->s1_domains));
if (domain != &si_domain->domain)
- domain_exit(to_dmar_domain(domain));
+ domain_exit(dmar_domain);
}
int prepare_domain_attach_device(struct iommu_domain *domain,
@@ -3965,6 +3992,10 @@ int prepare_domain_attach_device(struct iommu_domain *domain,
dmar_domain->agaw--;
}
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
+ context_copied(iommu, info->bus, info->devfn))
+ return intel_pasid_setup_sm_context(dev);
+
return 0;
}
@@ -4107,6 +4138,9 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
start_pfn, nrpages,
list_empty(&gather->freelist), 0);
+ if (dmar_domain->nested_parent)
+ parent_domain_flush(dmar_domain, start_pfn, nrpages,
+ list_empty(&gather->freelist));
put_pages_list(&gather->freelist);
}
@@ -4265,26 +4299,50 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
dev_iommu_priv_set(dev, info);
+ ret = device_rbtree_insert(iommu, info);
+ if (ret)
+ goto free;
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
dev_err(dev, "PASID table allocation failed\n");
- kfree(info);
- return ERR_PTR(ret);
+ goto clear_rbtree;
+ }
+
+ if (!context_copied(iommu, info->bus, info->devfn)) {
+ ret = intel_pasid_setup_sm_context(dev);
+ if (ret)
+ goto free_table;
}
}
intel_iommu_debugfs_create_dev(info);
return &iommu->iommu;
+free_table:
+ intel_pasid_free_table(dev);
+clear_rbtree:
+ device_rbtree_remove(info);
+free:
+ kfree(info);
+
+ return ERR_PTR(ret);
}
static void intel_iommu_release_device(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+
+ mutex_lock(&iommu->iopf_lock);
+ device_rbtree_remove(info);
+ mutex_unlock(&iommu->iopf_lock);
+
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
+ !context_copied(iommu, info->bus, info->devfn))
+ intel_pasid_teardown_sm_context(dev);
- dmar_remove_one_dev_info(dev);
intel_pasid_free_table(dev);
intel_iommu_debugfs_remove_dev(info);
kfree(info);
@@ -4427,23 +4485,15 @@ static int intel_iommu_enable_iopf(struct device *dev)
if (ret)
return ret;
- ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
- if (ret)
- goto iopf_remove_device;
-
ret = pci_enable_pri(pdev, PRQ_DEPTH);
- if (ret)
- goto iopf_unregister_handler;
+ if (ret) {
+ iopf_queue_remove_device(iommu->iopf_queue, dev);
+ return ret;
+ }
+
info->pri_enabled = 1;
return 0;
-
-iopf_unregister_handler:
- iommu_unregister_device_fault_handler(dev);
-iopf_remove_device:
- iopf_queue_remove_device(iommu->iopf_queue, dev);
-
- return ret;
}
static int intel_iommu_disable_iopf(struct device *dev)
@@ -4464,14 +4514,7 @@ static int intel_iommu_disable_iopf(struct device *dev)
*/
pci_disable_pri(to_pci_dev(dev));
info->pri_enabled = 0;
-
- /*
- * With PRI disabled and outstanding PRQs drained, unregistering
- * fault handler and removing device from iopf queue should never
- * fail.
- */
- WARN_ON(iommu_unregister_device_fault_handler(dev));
- WARN_ON(iopf_queue_remove_device(iommu->iopf_queue, dev));
+ iopf_queue_remove_device(iommu->iopf_queue, dev);
return 0;
}
@@ -4664,21 +4707,70 @@ static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
return vtd;
}
+/*
+ * Set dirty tracking for the device list of a domain. The caller must
+ * hold the domain->lock when calling it.
+ */
+static int device_set_dirty_tracking(struct list_head *devices, bool enable)
+{
+ struct device_domain_info *info;
+ int ret = 0;
+
+ list_for_each_entry(info, devices, link) {
+ ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev,
+ IOMMU_NO_PASID, enable);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int parent_domain_set_dirty_tracking(struct dmar_domain *domain,
+ bool enable)
+{
+ struct dmar_domain *s1_domain;
+ unsigned long flags;
+ int ret;
+
+ spin_lock(&domain->s1_lock);
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ ret = device_set_dirty_tracking(&s1_domain->devices, enable);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ if (ret)
+ goto err_unwind;
+ }
+ spin_unlock(&domain->s1_lock);
+ return 0;
+
+err_unwind:
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ device_set_dirty_tracking(&s1_domain->devices,
+ domain->dirty_tracking);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ }
+ spin_unlock(&domain->s1_lock);
+ return ret;
+}
+
static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain,
bool enable)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct device_domain_info *info;
int ret;
spin_lock(&dmar_domain->lock);
if (dmar_domain->dirty_tracking == enable)
goto out_unlock;
- list_for_each_entry(info, &dmar_domain->devices, link) {
- ret = intel_pasid_setup_dirty_tracking(info->iommu,
- info->domain, info->dev,
- IOMMU_NO_PASID, enable);
+ ret = device_set_dirty_tracking(&dmar_domain->devices, enable);
+ if (ret)
+ goto err_unwind;
+
+ if (dmar_domain->nested_parent) {
+ ret = parent_domain_set_dirty_tracking(dmar_domain, enable);
if (ret)
goto err_unwind;
}
@@ -4690,10 +4782,8 @@ out_unlock:
return 0;
err_unwind:
- list_for_each_entry(info, &dmar_domain->devices, link)
- intel_pasid_setup_dirty_tracking(info->iommu, dmar_domain,
- info->dev, IOMMU_NO_PASID,
- dmar_domain->dirty_tracking);
+ device_set_dirty_tracking(&dmar_domain->devices,
+ dmar_domain->dirty_tracking);
spin_unlock(&dmar_domain->lock);
return ret;
}
@@ -4743,6 +4833,7 @@ static const struct iommu_dirty_ops intel_dirty_ops = {
const struct iommu_ops intel_iommu_ops = {
.blocked_domain = &blocking_domain,
+ .release_domain = &blocking_domain,
.capable = intel_iommu_capable,
.hw_info = intel_iommu_hw_info,
.domain_alloc = intel_iommu_domain_alloc,
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index d02f916d8e59..404d2476a877 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -627,6 +627,10 @@ struct dmar_domain {
int agaw;
/* maximum mapped address */
u64 max_addr;
+ /* Protect the s1_domains list */
+ spinlock_t s1_lock;
+ /* Track s1_domains nested on this domain */
+ struct list_head s1_domains;
};
/* Nested user domain */
@@ -637,6 +641,8 @@ struct dmar_domain {
unsigned long s1_pgtbl;
/* page table attributes */
struct iommu_hwpt_vtd_s1 s1_cfg;
+ /* link to parent domain siblings */
+ struct list_head s2_link;
};
};
@@ -713,9 +719,16 @@ struct intel_iommu {
#endif
struct iopf_queue *iopf_queue;
unsigned char iopfq_name[16];
+ /* Synchronization between fault report and iommu device release. */
+ struct mutex iopf_lock;
struct q_inval *qi; /* Queued invalidation info */
u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/
+ /* rb tree for all probed devices */
+ struct rb_root device_rbtree;
+ /* protect the device_rbtree */
+ spinlock_t device_rbtree_lock;
+
#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
struct irq_domain *ir_domain;
@@ -749,6 +762,8 @@ struct device_domain_info {
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
struct pasid_table *pasid_table; /* pasid table */
+ /* device tracking node(lookup by PCI RID) */
+ struct rb_node node;
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
struct dentry *debugfs_dentry; /* pointer to device directory dentry */
#endif
@@ -1060,6 +1075,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
*/
#define QI_OPT_WAIT_DRAIN BIT(0)
+void domain_update_iotlb(struct dmar_domain *domain);
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void device_block_translation(struct device *dev);
@@ -1074,13 +1090,14 @@ void free_pgtable_page(void *vaddr);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
const struct iommu_user_data *user_data);
+struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid);
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
int intel_svm_enable_prq(struct intel_iommu *iommu);
int intel_svm_finish_prq(struct intel_iommu *iommu);
-int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
- struct iommu_page_response *msg);
+void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
+ struct iommu_page_response *msg);
struct iommu_domain *intel_svm_domain_alloc(void);
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
void intel_drain_pasid_prq(struct device *dev, u32 pasid);
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index f26c7f1c46cc..a7d68f3d518a 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -65,12 +65,20 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
list_add(&info->link, &dmar_domain->devices);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ domain_update_iotlb(dmar_domain);
+
return 0;
}
static void intel_nested_domain_free(struct iommu_domain *domain)
{
- kfree(to_dmar_domain(domain));
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct dmar_domain *s2_domain = dmar_domain->s2_domain;
+
+ spin_lock(&s2_domain->s1_lock);
+ list_del(&dmar_domain->s2_link);
+ spin_unlock(&s2_domain->s1_lock);
+ kfree(dmar_domain);
}
static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
@@ -95,7 +103,7 @@ static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
}
static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr,
- unsigned long npages, bool ih)
+ u64 npages, bool ih)
{
struct iommu_domain_info *info;
unsigned int mask;
@@ -201,5 +209,9 @@ struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
spin_lock_init(&domain->lock);
xa_init(&domain->iommu_array);
+ spin_lock(&s2_domain->s1_lock);
+ list_add(&domain->s2_link, &s2_domain->s1_domains);
+ spin_unlock(&s2_domain->s1_lock);
+
return &domain->domain;
}
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 3239cefa4c33..11f0b856d74c 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -214,6 +214,9 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
if (!info || !info->ats_enabled)
return;
+ if (pci_dev_is_disconnected(to_pci_dev(dev)))
+ return;
+
sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
pfsid = info->pfsid;
@@ -428,7 +431,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
* Set up dirty tracking on a second only or nested translation type.
*/
int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
- struct dmar_domain *domain,
struct device *dev, u32 pasid,
bool enabled)
{
@@ -445,7 +447,7 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
return -ENODEV;
}
- did = domain_id_iommu(domain, iommu);
+ did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
if (pgtt != PASID_ENTRY_PGTT_SL_ONLY &&
pgtt != PASID_ENTRY_PGTT_NESTED) {
@@ -658,6 +660,8 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, s2_domain->agaw);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ if (s2_domain->dirty_tracking)
+ pasid_set_ssade(pte);
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
pasid_set_present(pte);
spin_unlock(&iommu->lock);
@@ -666,3 +670,205 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
return 0;
}
+
+/*
+ * Interfaces to setup or teardown a pasid table to the scalable-mode
+ * context table entry:
+ */
+
+static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct context_entry *context;
+
+ spin_lock(&iommu->lock);
+ context = iommu_context_addr(iommu, bus, devfn, false);
+ if (!context) {
+ spin_unlock(&iommu->lock);
+ return;
+ }
+
+ context_clear_entry(context);
+ __iommu_flush_cache(iommu, context, sizeof(*context));
+ spin_unlock(&iommu->lock);
+
+ /*
+ * Cache invalidation for changes to a scalable-mode context table
+ * entry.
+ *
+ * Section 6.5.3.3 of the VT-d spec:
+ * - Device-selective context-cache invalidation;
+ * - Domain-selective PASID-cache invalidation to affected domains
+ * (can be skipped if all PASID entries were not-present);
+ * - Domain-selective IOTLB invalidation to affected domains;
+ * - Global Device-TLB invalidation to affected functions.
+ *
+ * The iommu has been parked in the blocking state. All domains have
+ * been detached from the device or PASID. The PASID and IOTLB caches
+ * have been invalidated during the domain detach path.
+ */
+ iommu->flush.flush_context(iommu, 0, PCI_DEVID(bus, devfn),
+ DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL);
+ devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID);
+}
+
+static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct device *dev = data;
+
+ if (dev == &pdev->dev)
+ device_pasid_table_teardown(dev, PCI_BUS_NUM(alias), alias & 0xff);
+
+ return 0;
+}
+
+void intel_pasid_teardown_sm_context(struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ if (!dev_is_pci(dev)) {
+ device_pasid_table_teardown(dev, info->bus, info->devfn);
+ return;
+ }
+
+ pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_teardown, dev);
+}
+
+/*
+ * Get the PASID directory size for scalable mode context entry.
+ * Value of X in the PDTS field of a scalable mode context entry
+ * indicates PASID directory with 2^(X + 7) entries.
+ */
+static unsigned long context_get_sm_pds(struct pasid_table *table)
+{
+ unsigned long pds, max_pde;
+
+ max_pde = table->max_pasid >> PASID_PDE_SHIFT;
+ pds = find_first_bit(&max_pde, MAX_NR_PASID_BITS);
+ if (pds < 7)
+ return 0;
+
+ return pds - 7;
+}
+
+static int context_entry_set_pasid_table(struct context_entry *context,
+ struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct pasid_table *table = info->pasid_table;
+ struct intel_iommu *iommu = info->iommu;
+ unsigned long pds;
+
+ context_clear_entry(context);
+
+ pds = context_get_sm_pds(table);
+ context->lo = (u64)virt_to_phys(table->table) | context_pdts(pds);
+ context_set_sm_rid2pasid(context, IOMMU_NO_PASID);
+
+ if (info->ats_supported)
+ context_set_sm_dte(context);
+ if (info->pri_supported)
+ context_set_sm_pre(context);
+ if (info->pasid_supported)
+ context_set_pasid(context);
+
+ context_set_fault_enable(context);
+ context_set_present(context);
+ __iommu_flush_cache(iommu, context, sizeof(*context));
+
+ return 0;
+}
+
+static int device_pasid_table_setup(struct device *dev, u8 bus, u8 devfn)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct context_entry *context;
+
+ spin_lock(&iommu->lock);
+ context = iommu_context_addr(iommu, bus, devfn, true);
+ if (!context) {
+ spin_unlock(&iommu->lock);
+ return -ENOMEM;
+ }
+
+ if (context_present(context) && !context_copied(iommu, bus, devfn)) {
+ spin_unlock(&iommu->lock);
+ return 0;
+ }
+
+ if (context_copied(iommu, bus, devfn)) {
+ context_clear_entry(context);
+ __iommu_flush_cache(iommu, context, sizeof(*context));
+
+ /*
+ * For kdump cases, old valid entries may be cached due to
+ * the in-flight DMA and copied pgtable, but there is no
+ * unmapping behaviour for them, thus we need explicit cache
+ * flushes for all affected domain IDs and PASIDs used in
+ * the copied PASID table. Given that we have no idea about
+ * which domain IDs and PASIDs were used in the copied tables,
+ * upgrade them to global PASID and IOTLB cache invalidation.
+ */
+ iommu->flush.flush_context(iommu, 0,
+ PCI_DEVID(bus, devfn),
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID);
+
+ /*
+ * At this point, the device is supposed to finish reset at
+ * its driver probe stage, so no in-flight DMA will exist,
+ * and we don't need to worry anymore hereafter.
+ */
+ clear_context_copied(iommu, bus, devfn);
+ }
+
+ context_entry_set_pasid_table(context, dev);
+ spin_unlock(&iommu->lock);
+
+ /*
+ * It's a non-present to present mapping. If hardware doesn't cache
+ * non-present entry we don't need to flush the caches. If it does
+ * cache non-present entries, then it does so in the special
+ * domain #0, which we have to flush:
+ */
+ if (cap_caching_mode(iommu->cap)) {
+ iommu->flush.flush_context(iommu, 0,
+ PCI_DEVID(bus, devfn),
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
+ }
+
+ return 0;
+}
+
+static int pci_pasid_table_setup(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct device *dev = data;
+
+ if (dev != &pdev->dev)
+ return 0;
+
+ return device_pasid_table_setup(dev, PCI_BUS_NUM(alias), alias & 0xff);
+}
+
+/*
+ * Set the device's PASID table to its context table entry.
+ *
+ * The PASID table is set to the context entries of both device itself
+ * and its alias requester ID for DMA.
+ */
+int intel_pasid_setup_sm_context(struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ if (!dev_is_pci(dev))
+ return device_pasid_table_setup(dev, info->bus, info->devfn);
+
+ return pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_setup, dev);
+}
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 8d40d4c66e31..da9978fef7ac 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -307,7 +307,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid);
int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
- struct dmar_domain *domain,
struct device *dev, u32 pasid,
bool enabled);
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
@@ -319,4 +318,6 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
bool fault_ignore);
void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
struct device *dev, u32 pasid);
+int intel_pasid_setup_sm_context(struct device *dev);
+void intel_pasid_teardown_sm_context(struct device *dev);
#endif /* __INTEL_PASID_H */
diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c
index 94ee70ac38e3..adc4de6bbd88 100644
--- a/drivers/iommu/intel/perf.c
+++ b/drivers/iommu/intel/perf.c
@@ -33,7 +33,7 @@ int dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type)
spin_lock_irqsave(&latency_lock, flags);
if (!iommu->perf_statistic) {
- iommu->perf_statistic = kzalloc(sizeof(*lstat) * DMAR_LATENCY_NUM,
+ iommu->perf_statistic = kcalloc(DMAR_LATENCY_NUM, sizeof(*lstat),
GFP_ATOMIC);
if (!iommu->perf_statistic) {
ret = -ENOMEM;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 40edd282903f..c1bed89b1026 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -22,7 +22,6 @@
#include "iommu.h"
#include "pasid.h"
#include "perf.h"
-#include "../iommu-sva.h"
#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
@@ -315,10 +314,11 @@ out:
return 0;
}
-static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
- struct iommu_domain *domain, ioasid_t pasid)
+static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
struct mm_struct *mm = domain->mm;
struct intel_svm_dev *sdev;
struct intel_svm *svm;
@@ -360,7 +360,6 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
sdev->iommu = iommu;
sdev->did = FLPT_DEFAULT_DID;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
- init_rcu_head(&sdev->rcu);
if (info->ats_enabled) {
sdev->qdep = info->ats_qdep;
if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
@@ -408,13 +407,6 @@ void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
if (svm->notifier.ops)
mmu_notifier_unregister(&svm->notifier, mm);
pasid_private_remove(svm->pasid);
- /*
- * We mandate that no page faults may be outstanding
- * for the PASID when intel_svm_unbind_mm() is called.
- * If that is not obeyed, subtle errors will happen.
- * Let's make them less subtle...
- */
- memset(svm, 0x6b, sizeof(*svm));
kfree(svm);
}
}
@@ -562,16 +554,12 @@ static int prq_to_iommu_prot(struct page_req_dsc *req)
return prot;
}
-static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
- struct page_req_dsc *desc)
+static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
+ struct page_req_dsc *desc)
{
- struct iommu_fault_event event;
-
- if (!dev || !dev_is_pci(dev))
- return -ENODEV;
+ struct iopf_fault event = { };
/* Fill in event data for device specific processing */
- memset(&event, 0, sizeof(struct iommu_fault_event));
event.fault.type = IOMMU_FAULT_PAGE_REQ;
event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
event.fault.prm.pasid = desc->pasid;
@@ -603,7 +591,7 @@ static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
event.fault.prm.private_data[0] = ktime_to_ns(ktime_get());
}
- return iommu_report_device_fault(dev, &event);
+ iommu_report_device_fault(dev, &event);
}
static void handle_bad_prq_event(struct intel_iommu *iommu,
@@ -650,7 +638,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
struct intel_iommu *iommu = d;
struct page_req_dsc *req;
int head, tail, handled;
- struct pci_dev *pdev;
+ struct device *dev;
u64 address;
/*
@@ -696,23 +684,22 @@ bad_req:
if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
goto prq_advance;
- pdev = pci_get_domain_bus_and_slot(iommu->segment,
- PCI_BUS_NUM(req->rid),
- req->rid & 0xff);
/*
* If prq is to be handled outside iommu driver via receiver of
* the fault notifiers, we skip the page response here.
*/
- if (!pdev)
+ mutex_lock(&iommu->iopf_lock);
+ dev = device_rbtree_find(iommu, req->rid);
+ if (!dev) {
+ mutex_unlock(&iommu->iopf_lock);
goto bad_req;
+ }
- if (intel_svm_prq_report(iommu, &pdev->dev, req))
- handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
- else
- trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
- req->priv_data[0], req->priv_data[1],
- iommu->prq_seq_number++);
- pci_dev_put(pdev);
+ intel_svm_prq_report(iommu, dev, req);
+ trace_prq_report(iommu, dev, req->qw_0, req->qw_1,
+ req->priv_data[0], req->priv_data[1],
+ iommu->prq_seq_number++);
+ mutex_unlock(&iommu->iopf_lock);
prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
@@ -742,9 +729,8 @@ prq_advance:
return IRQ_RETVAL(handled);
}
-int intel_svm_page_response(struct device *dev,
- struct iommu_fault_event *evt,
- struct iommu_page_response *msg)
+void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
+ struct iommu_page_response *msg)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
@@ -753,7 +739,6 @@ int intel_svm_page_response(struct device *dev,
bool private_present;
bool pasid_present;
bool last_page;
- int ret = 0;
u16 sid;
prm = &evt->fault.prm;
@@ -762,16 +747,6 @@ int intel_svm_page_response(struct device *dev,
private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
- if (!pasid_present) {
- ret = -EINVAL;
- goto out;
- }
-
- if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
- ret = -EINVAL;
- goto out;
- }
-
/*
* Per VT-d spec. v3.0 ch7.7, system software must respond
* with page group response if private data is present (PDP)
@@ -800,17 +775,6 @@ int intel_svm_page_response(struct device *dev,
qi_submit_sync(iommu, &desc, 1, 0);
}
-out:
- return ret;
-}
-
-static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
- struct device *dev, ioasid_t pasid)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu = info->iommu;
-
- return intel_svm_bind_mm(iommu, dev, domain, pasid);
}
static void intel_svm_domain_free(struct iommu_domain *domain)
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index e5b8b9110c13..06d78fcc79fd 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -11,101 +11,140 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include "iommu-sva.h"
+#include "iommu-priv.h"
-/**
- * struct iopf_queue - IO Page Fault queue
- * @wq: the fault workqueue
- * @devices: devices attached to this queue
- * @lock: protects the device list
+/*
+ * Return the fault parameter of a device if it exists. Otherwise, return NULL.
+ * On a successful return, the caller takes a reference of this parameter and
+ * should put it after use by calling iopf_put_dev_fault_param().
*/
-struct iopf_queue {
- struct workqueue_struct *wq;
- struct list_head devices;
- struct mutex lock;
-};
+static struct iommu_fault_param *iopf_get_dev_fault_param(struct device *dev)
+{
+ struct dev_iommu *param = dev->iommu;
+ struct iommu_fault_param *fault_param;
-/**
- * struct iopf_device_param - IO Page Fault data attached to a device
- * @dev: the device that owns this param
- * @queue: IOPF queue
- * @queue_list: index into queue->devices
- * @partial: faults that are part of a Page Request Group for which the last
- * request hasn't been submitted yet.
- */
-struct iopf_device_param {
- struct device *dev;
- struct iopf_queue *queue;
- struct list_head queue_list;
- struct list_head partial;
-};
-
-struct iopf_fault {
- struct iommu_fault fault;
- struct list_head list;
-};
-
-struct iopf_group {
- struct iopf_fault last_fault;
- struct list_head faults;
- struct work_struct work;
- struct device *dev;
-};
-
-static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
- enum iommu_page_response_code status)
+ rcu_read_lock();
+ fault_param = rcu_dereference(param->fault_param);
+ if (fault_param && !refcount_inc_not_zero(&fault_param->users))
+ fault_param = NULL;
+ rcu_read_unlock();
+
+ return fault_param;
+}
+
+/* Caller must hold a reference of the fault parameter. */
+static void iopf_put_dev_fault_param(struct iommu_fault_param *fault_param)
{
- struct iommu_page_response resp = {
- .version = IOMMU_PAGE_RESP_VERSION_1,
- .pasid = iopf->fault.prm.pasid,
- .grpid = iopf->fault.prm.grpid,
- .code = status,
- };
+ if (refcount_dec_and_test(&fault_param->users))
+ kfree_rcu(fault_param, rcu);
+}
- if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
- (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
- resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
+static void __iopf_free_group(struct iopf_group *group)
+{
+ struct iopf_fault *iopf, *next;
- return iommu_page_response(dev, &resp);
+ list_for_each_entry_safe(iopf, next, &group->faults, list) {
+ if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
+ kfree(iopf);
+ }
+
+ /* Pair with iommu_report_device_fault(). */
+ iopf_put_dev_fault_param(group->fault_param);
}
-static void iopf_handler(struct work_struct *work)
+void iopf_free_group(struct iopf_group *group)
+{
+ __iopf_free_group(group);
+ kfree(group);
+}
+EXPORT_SYMBOL_GPL(iopf_free_group);
+
+static struct iommu_domain *get_domain_for_iopf(struct device *dev,
+ struct iommu_fault *fault)
{
- struct iopf_group *group;
struct iommu_domain *domain;
- struct iopf_fault *iopf, *next;
- enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
- group = container_of(work, struct iopf_group, work);
- domain = iommu_get_domain_for_dev_pasid(group->dev,
- group->last_fault.fault.prm.pasid, 0);
- if (!domain || !domain->iopf_handler)
- status = IOMMU_PAGE_RESP_INVALID;
+ if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) {
+ domain = iommu_get_domain_for_dev_pasid(dev, fault->prm.pasid, 0);
+ if (IS_ERR(domain))
+ domain = NULL;
+ } else {
+ domain = iommu_get_domain_for_dev(dev);
+ }
- list_for_each_entry_safe(iopf, next, &group->faults, list) {
+ if (!domain || !domain->iopf_handler) {
+ dev_warn_ratelimited(dev,
+ "iopf (pasid %d) without domain attached or handler installed\n",
+ fault->prm.pasid);
+
+ return NULL;
+ }
+
+ return domain;
+}
+
+/* Non-last request of a group. Postpone until the last one. */
+static int report_partial_fault(struct iommu_fault_param *fault_param,
+ struct iommu_fault *fault)
+{
+ struct iopf_fault *iopf;
+
+ iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
+ if (!iopf)
+ return -ENOMEM;
+
+ iopf->fault = *fault;
+
+ mutex_lock(&fault_param->lock);
+ list_add(&iopf->list, &fault_param->partial);
+ mutex_unlock(&fault_param->lock);
+
+ return 0;
+}
+
+static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
+ struct iopf_fault *evt,
+ struct iopf_group *abort_group)
+{
+ struct iopf_fault *iopf, *next;
+ struct iopf_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group) {
/*
- * For the moment, errors are sticky: don't handle subsequent
- * faults in the group if there is an error.
+ * We always need to construct the group as we need it to abort
+ * the request at the driver if it can't be handled.
*/
- if (status == IOMMU_PAGE_RESP_SUCCESS)
- status = domain->iopf_handler(&iopf->fault,
- domain->fault_data);
+ group = abort_group;
+ }
- if (!(iopf->fault.prm.flags &
- IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
- kfree(iopf);
+ group->fault_param = iopf_param;
+ group->last_fault.fault = evt->fault;
+ INIT_LIST_HEAD(&group->faults);
+ INIT_LIST_HEAD(&group->pending_node);
+ list_add(&group->last_fault.list, &group->faults);
+
+ /* See if we have partial faults for this group */
+ mutex_lock(&iopf_param->lock);
+ list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
+ if (iopf->fault.prm.grpid == evt->fault.prm.grpid)
+ /* Insert *before* the last fault */
+ list_move(&iopf->list, &group->faults);
}
+ list_add(&group->pending_node, &iopf_param->faults);
+ mutex_unlock(&iopf_param->lock);
- iopf_complete_group(group->dev, &group->last_fault, status);
- kfree(group);
+ return group;
}
/**
- * iommu_queue_iopf - IO Page Fault handler
- * @fault: fault event
- * @cookie: struct device, passed to iommu_register_device_fault_handler.
+ * iommu_report_device_fault() - Report fault event to device driver
+ * @dev: the device
+ * @evt: fault event data
*
- * Add a fault to the device workqueue, to be handled by mm.
+ * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
+ * handler. If this function fails then ops->page_response() was called to
+ * complete evt if required.
*
* This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
* them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
@@ -137,83 +176,57 @@ static void iopf_handler(struct work_struct *work)
* freed after the device has stopped generating page faults (or the iommu
* hardware has been set to block the page faults) and the pending page faults
* have been flushed.
- *
- * Return: 0 on success and <0 on error.
*/
-int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
+void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
{
- int ret;
+ struct iommu_fault *fault = &evt->fault;
+ struct iommu_fault_param *iopf_param;
+ struct iopf_group abort_group = {};
struct iopf_group *group;
- struct iopf_fault *iopf, *next;
- struct iopf_device_param *iopf_param;
- struct device *dev = cookie;
- struct dev_iommu *param = dev->iommu;
-
- lockdep_assert_held(&param->lock);
-
- if (fault->type != IOMMU_FAULT_PAGE_REQ)
- /* Not a recoverable page fault */
- return -EOPNOTSUPP;
-
- /*
- * As long as we're holding param->lock, the queue can't be unlinked
- * from the device and therefore cannot disappear.
- */
- iopf_param = param->iopf_param;
- if (!iopf_param)
- return -ENODEV;
+ iopf_param = iopf_get_dev_fault_param(dev);
+ if (WARN_ON(!iopf_param))
+ return;
if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
- iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
- if (!iopf)
- return -ENOMEM;
-
- iopf->fault = *fault;
-
- /* Non-last request of a group. Postpone until the last one */
- list_add(&iopf->list, &iopf_param->partial);
-
- return 0;
+ report_partial_fault(iopf_param, fault);
+ iopf_put_dev_fault_param(iopf_param);
+ /* A request that is not the last does not need to be ack'd */
}
- group = kzalloc(sizeof(*group), GFP_KERNEL);
- if (!group) {
- /*
- * The caller will send a response to the hardware. But we do
- * need to clean up before leaving, otherwise partial faults
- * will be stuck.
- */
- ret = -ENOMEM;
- goto cleanup_partial;
- }
+ /*
+ * This is the last page fault of a group. Allocate an iopf group and
+ * pass it to domain's page fault handler. The group holds a reference
+ * count of the fault parameter. It will be released after response or
+ * error path of this function. If an error is returned, the caller
+ * will send a response to the hardware. We need to clean up before
+ * leaving, otherwise partial faults will be stuck.
+ */
+ group = iopf_group_alloc(iopf_param, evt, &abort_group);
+ if (group == &abort_group)
+ goto err_abort;
- group->dev = dev;
- group->last_fault.fault = *fault;
- INIT_LIST_HEAD(&group->faults);
- list_add(&group->last_fault.list, &group->faults);
- INIT_WORK(&group->work, iopf_handler);
+ group->domain = get_domain_for_iopf(dev, fault);
+ if (!group->domain)
+ goto err_abort;
- /* See if we have partial faults for this group */
- list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
- if (iopf->fault.prm.grpid == fault->prm.grpid)
- /* Insert *before* the last fault */
- list_move(&iopf->list, &group->faults);
- }
+ /*
+ * On success iopf_handler must call iopf_group_response() and
+ * iopf_free_group()
+ */
+ if (group->domain->iopf_handler(group))
+ goto err_abort;
- queue_work(iopf_param->queue->wq, &group->work);
- return 0;
+ return;
-cleanup_partial:
- list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
- if (iopf->fault.prm.grpid == fault->prm.grpid) {
- list_del(&iopf->list);
- kfree(iopf);
- }
- }
- return ret;
+err_abort:
+ iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE);
+ if (group == &abort_group)
+ __iopf_free_group(group);
+ else
+ iopf_free_group(group);
}
-EXPORT_SYMBOL_GPL(iommu_queue_iopf);
+EXPORT_SYMBOL_GPL(iommu_report_device_fault);
/**
* iopf_queue_flush_dev - Ensure that all queued faults have been processed
@@ -229,26 +242,52 @@ EXPORT_SYMBOL_GPL(iommu_queue_iopf);
*/
int iopf_queue_flush_dev(struct device *dev)
{
- int ret = 0;
- struct iopf_device_param *iopf_param;
- struct dev_iommu *param = dev->iommu;
+ struct iommu_fault_param *iopf_param;
- if (!param)
+ /*
+ * It's a driver bug to be here after iopf_queue_remove_device().
+ * Therefore, it's safe to dereference the fault parameter without
+ * holding the lock.
+ */
+ iopf_param = rcu_dereference_check(dev->iommu->fault_param, true);
+ if (WARN_ON(!iopf_param))
return -ENODEV;
- mutex_lock(&param->lock);
- iopf_param = param->iopf_param;
- if (iopf_param)
- flush_workqueue(iopf_param->queue->wq);
- else
- ret = -ENODEV;
- mutex_unlock(&param->lock);
+ flush_workqueue(iopf_param->queue->wq);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
/**
+ * iopf_group_response - Respond a group of page faults
+ * @group: the group of faults with the same group id
+ * @status: the response code
+ */
+void iopf_group_response(struct iopf_group *group,
+ enum iommu_page_response_code status)
+{
+ struct iommu_fault_param *fault_param = group->fault_param;
+ struct iopf_fault *iopf = &group->last_fault;
+ struct device *dev = group->fault_param->dev;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+ struct iommu_page_response resp = {
+ .pasid = iopf->fault.prm.pasid,
+ .grpid = iopf->fault.prm.grpid,
+ .code = status,
+ };
+
+ /* Only send response if there is a fault report pending */
+ mutex_lock(&fault_param->lock);
+ if (!list_empty(&group->pending_node)) {
+ ops->page_response(dev, &group->last_fault, &resp);
+ list_del_init(&group->pending_node);
+ }
+ mutex_unlock(&fault_param->lock);
+}
+EXPORT_SYMBOL_GPL(iopf_group_response);
+
+/**
* iopf_queue_discard_partial - Remove all pending partial fault
* @queue: the queue whose partial faults need to be discarded
*
@@ -261,18 +300,20 @@ EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
int iopf_queue_discard_partial(struct iopf_queue *queue)
{
struct iopf_fault *iopf, *next;
- struct iopf_device_param *iopf_param;
+ struct iommu_fault_param *iopf_param;
if (!queue)
return -EINVAL;
mutex_lock(&queue->lock);
list_for_each_entry(iopf_param, &queue->devices, queue_list) {
+ mutex_lock(&iopf_param->lock);
list_for_each_entry_safe(iopf, next, &iopf_param->partial,
list) {
list_del(&iopf->list);
kfree(iopf);
}
+ mutex_unlock(&iopf_param->lock);
}
mutex_unlock(&queue->lock);
return 0;
@@ -288,34 +329,42 @@ EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
*/
int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
{
- int ret = -EBUSY;
- struct iopf_device_param *iopf_param;
+ int ret = 0;
struct dev_iommu *param = dev->iommu;
+ struct iommu_fault_param *fault_param;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- if (!param)
+ if (!ops->page_response)
return -ENODEV;
- iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL);
- if (!iopf_param)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&iopf_param->partial);
- iopf_param->queue = queue;
- iopf_param->dev = dev;
-
mutex_lock(&queue->lock);
mutex_lock(&param->lock);
- if (!param->iopf_param) {
- list_add(&iopf_param->queue_list, &queue->devices);
- param->iopf_param = iopf_param;
- ret = 0;
+ if (rcu_dereference_check(param->fault_param,
+ lockdep_is_held(&param->lock))) {
+ ret = -EBUSY;
+ goto done_unlock;
+ }
+
+ fault_param = kzalloc(sizeof(*fault_param), GFP_KERNEL);
+ if (!fault_param) {
+ ret = -ENOMEM;
+ goto done_unlock;
}
+
+ mutex_init(&fault_param->lock);
+ INIT_LIST_HEAD(&fault_param->faults);
+ INIT_LIST_HEAD(&fault_param->partial);
+ fault_param->dev = dev;
+ refcount_set(&fault_param->users, 1);
+ list_add(&fault_param->queue_list, &queue->devices);
+ fault_param->queue = queue;
+
+ rcu_assign_pointer(param->fault_param, fault_param);
+
+done_unlock:
mutex_unlock(&param->lock);
mutex_unlock(&queue->lock);
- if (ret)
- kfree(iopf_param);
-
return ret;
}
EXPORT_SYMBOL_GPL(iopf_queue_add_device);
@@ -325,40 +374,66 @@ EXPORT_SYMBOL_GPL(iopf_queue_add_device);
* @queue: IOPF queue
* @dev: device to remove
*
- * Caller makes sure that no more faults are reported for this device.
+ * Removing a device from an iopf_queue. It's recommended to follow these
+ * steps when removing a device:
*
- * Return: 0 on success and <0 on error.
+ * - Disable new PRI reception: Turn off PRI generation in the IOMMU hardware
+ * and flush any hardware page request queues. This should be done before
+ * calling into this helper.
+ * - Acknowledge all outstanding PRQs to the device: Respond to all outstanding
+ * page requests with IOMMU_PAGE_RESP_INVALID, indicating the device should
+ * not retry. This helper function handles this.
+ * - Disable PRI on the device: After calling this helper, the caller could
+ * then disable PRI on the device.
+ *
+ * Calling iopf_queue_remove_device() essentially disassociates the device.
+ * The fault_param might still exist, but iommu_page_response() will do
+ * nothing. The device fault parameter reference count has been properly
+ * passed from iommu_report_device_fault() to the fault handling work, and
+ * will eventually be released after iommu_page_response().
*/
-int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
+void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
{
- int ret = -EINVAL;
- struct iopf_fault *iopf, *next;
- struct iopf_device_param *iopf_param;
+ struct iopf_fault *partial_iopf;
+ struct iopf_fault *next;
+ struct iopf_group *group, *temp;
struct dev_iommu *param = dev->iommu;
-
- if (!param || !queue)
- return -EINVAL;
+ struct iommu_fault_param *fault_param;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
mutex_lock(&queue->lock);
mutex_lock(&param->lock);
- iopf_param = param->iopf_param;
- if (iopf_param && iopf_param->queue == queue) {
- list_del(&iopf_param->queue_list);
- param->iopf_param = NULL;
- ret = 0;
+ fault_param = rcu_dereference_check(param->fault_param,
+ lockdep_is_held(&param->lock));
+
+ if (WARN_ON(!fault_param || fault_param->queue != queue))
+ goto unlock;
+
+ mutex_lock(&fault_param->lock);
+ list_for_each_entry_safe(partial_iopf, next, &fault_param->partial, list)
+ kfree(partial_iopf);
+
+ list_for_each_entry_safe(group, temp, &fault_param->faults, pending_node) {
+ struct iopf_fault *iopf = &group->last_fault;
+ struct iommu_page_response resp = {
+ .pasid = iopf->fault.prm.pasid,
+ .grpid = iopf->fault.prm.grpid,
+ .code = IOMMU_PAGE_RESP_INVALID
+ };
+
+ ops->page_response(dev, iopf, &resp);
+ list_del_init(&group->pending_node);
}
- mutex_unlock(&param->lock);
- mutex_unlock(&queue->lock);
- if (ret)
- return ret;
+ mutex_unlock(&fault_param->lock);
- /* Just in case some faults are still stuck */
- list_for_each_entry_safe(iopf, next, &iopf_param->partial, list)
- kfree(iopf);
+ list_del(&fault_param->queue_list);
- kfree(iopf_param);
-
- return 0;
+ /* dec the ref owned by iopf_queue_add_device() */
+ rcu_assign_pointer(param->fault_param, NULL);
+ iopf_put_dev_fault_param(fault_param);
+unlock:
+ mutex_unlock(&param->lock);
+ mutex_unlock(&queue->lock);
}
EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
@@ -404,7 +479,7 @@ EXPORT_SYMBOL_GPL(iopf_queue_alloc);
*/
void iopf_queue_free(struct iopf_queue *queue)
{
- struct iopf_device_param *iopf_param, *next;
+ struct iommu_fault_param *iopf_param, *next;
if (!queue)
return;
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
index 2024a2313348..5f731d994803 100644
--- a/drivers/iommu/iommu-priv.h
+++ b/drivers/iommu/iommu-priv.h
@@ -21,10 +21,11 @@ int iommu_group_replace_domain(struct iommu_group *group,
struct iommu_domain *new_domain);
int iommu_device_register_bus(struct iommu_device *iommu,
- const struct iommu_ops *ops, struct bus_type *bus,
+ const struct iommu_ops *ops,
+ const struct bus_type *bus,
struct notifier_block *nb);
void iommu_device_unregister_bus(struct iommu_device *iommu,
- struct bus_type *bus,
+ const struct bus_type *bus,
struct notifier_block *nb);
#endif /* __LINUX_IOMMU_PRIV_H */
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index c3fc9201d0be..640acc804e8c 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -7,7 +7,7 @@
#include <linux/sched/mm.h>
#include <linux/iommu.h>
-#include "iommu-sva.h"
+#include "iommu-priv.h"
static DEFINE_MUTEX(iommu_sva_lock);
@@ -41,6 +41,7 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
}
iommu_mm->pasid = pasid;
INIT_LIST_HEAD(&iommu_mm->sva_domains);
+ INIT_LIST_HEAD(&iommu_mm->sva_handles);
/*
* Make sure the write to mm->iommu_mm is not reordered in front of
* initialization to iommu_mm fields. If it does, readers may see a
@@ -82,6 +83,14 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
goto out_unlock;
}
+ list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) {
+ if (handle->dev == dev) {
+ refcount_inc(&handle->users);
+ mutex_unlock(&iommu_sva_lock);
+ return handle;
+ }
+ }
+
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle) {
ret = -ENOMEM;
@@ -111,6 +120,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
list_add(&domain->next, &mm->iommu_mm->sva_domains);
out:
+ refcount_set(&handle->users, 1);
+ list_add(&handle->handle_item, &mm->iommu_mm->sva_handles);
mutex_unlock(&iommu_sva_lock);
handle->dev = dev;
handle->domain = domain;
@@ -141,6 +152,12 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
struct device *dev = handle->dev;
mutex_lock(&iommu_sva_lock);
+ if (!refcount_dec_and_test(&handle->users)) {
+ mutex_unlock(&iommu_sva_lock);
+ return;
+ }
+ list_del(&handle->handle_item);
+
iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
if (--domain->users == 0) {
list_del(&domain->next);
@@ -159,15 +176,25 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle)
}
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
+void mm_pasid_drop(struct mm_struct *mm)
+{
+ struct iommu_mm_data *iommu_mm = mm->iommu_mm;
+
+ if (!iommu_mm)
+ return;
+
+ iommu_free_global_pasid(iommu_mm->pasid);
+ kfree(iommu_mm);
+}
+
/*
* I/O page fault handler for SVA
*/
-enum iommu_page_response_code
-iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
+static enum iommu_page_response_code
+iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
{
vm_fault_t ret;
struct vm_area_struct *vma;
- struct mm_struct *mm = data;
unsigned int access_flags = 0;
unsigned int fault_flags = FAULT_FLAG_REMOTE;
struct iommu_fault_page_request *prm = &fault->prm;
@@ -217,13 +244,54 @@ out_put_mm:
return status;
}
-void mm_pasid_drop(struct mm_struct *mm)
+static void iommu_sva_handle_iopf(struct work_struct *work)
{
- struct iommu_mm_data *iommu_mm = mm->iommu_mm;
+ struct iopf_fault *iopf;
+ struct iopf_group *group;
+ enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
+
+ group = container_of(work, struct iopf_group, work);
+ list_for_each_entry(iopf, &group->faults, list) {
+ /*
+ * For the moment, errors are sticky: don't handle subsequent
+ * faults in the group if there is an error.
+ */
+ if (status != IOMMU_PAGE_RESP_SUCCESS)
+ break;
+
+ status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm);
+ }
- if (!iommu_mm)
- return;
+ iopf_group_response(group, status);
+ iopf_free_group(group);
+}
- iommu_free_global_pasid(iommu_mm->pasid);
- kfree(iommu_mm);
+static int iommu_sva_iopf_handler(struct iopf_group *group)
+{
+ struct iommu_fault_param *fault_param = group->fault_param;
+
+ INIT_WORK(&group->work, iommu_sva_handle_iopf);
+ if (!queue_work(fault_param->queue->wq, &group->work))
+ return -EBUSY;
+
+ return 0;
+}
+
+struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
+ struct mm_struct *mm)
+{
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+ struct iommu_domain *domain;
+
+ domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
+ if (!domain)
+ return NULL;
+
+ domain->type = IOMMU_DOMAIN_SVA;
+ mmgrab(mm);
+ domain->mm = mm;
+ domain->owner = ops;
+ domain->iopf_handler = iommu_sva_iopf_handler;
+
+ return domain;
}
diff --git a/drivers/iommu/iommu-sva.h b/drivers/iommu/iommu-sva.h
deleted file mode 100644
index 54946b5a7caf..000000000000
--- a/drivers/iommu/iommu-sva.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * SVA library for IOMMU drivers
- */
-#ifndef _IOMMU_SVA_H
-#define _IOMMU_SVA_H
-
-#include <linux/mm_types.h>
-
-/* I/O Page fault */
-struct device;
-struct iommu_fault;
-struct iopf_queue;
-
-#ifdef CONFIG_IOMMU_SVA
-int iommu_queue_iopf(struct iommu_fault *fault, void *cookie);
-
-int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
-int iopf_queue_remove_device(struct iopf_queue *queue,
- struct device *dev);
-int iopf_queue_flush_dev(struct device *dev);
-struct iopf_queue *iopf_queue_alloc(const char *name);
-void iopf_queue_free(struct iopf_queue *queue);
-int iopf_queue_discard_partial(struct iopf_queue *queue);
-enum iommu_page_response_code
-iommu_sva_handle_iopf(struct iommu_fault *fault, void *data);
-
-#else /* CONFIG_IOMMU_SVA */
-static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
-{
- return -ENODEV;
-}
-
-static inline int iopf_queue_add_device(struct iopf_queue *queue,
- struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline int iopf_queue_remove_device(struct iopf_queue *queue,
- struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline int iopf_queue_flush_dev(struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline struct iopf_queue *iopf_queue_alloc(const char *name)
-{
- return NULL;
-}
-
-static inline void iopf_queue_free(struct iopf_queue *queue)
-{
-}
-
-static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
-{
- return -ENODEV;
-}
-
-static inline enum iommu_page_response_code
-iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
-{
- return IOMMU_PAGE_RESP_INVALID;
-}
-#endif /* CONFIG_IOMMU_SVA */
-#endif /* _IOMMU_SVA_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d14413916f93..098869007c69 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -36,8 +36,6 @@
#include "dma-iommu.h"
#include "iommu-priv.h"
-#include "iommu-sva.h"
-
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
static DEFINE_IDA(iommu_global_pasid_ida);
@@ -291,7 +289,7 @@ EXPORT_SYMBOL_GPL(iommu_device_unregister);
#if IS_ENABLED(CONFIG_IOMMUFD_TEST)
void iommu_device_unregister_bus(struct iommu_device *iommu,
- struct bus_type *bus,
+ const struct bus_type *bus,
struct notifier_block *nb)
{
bus_unregister_notifier(bus, nb);
@@ -305,7 +303,8 @@ EXPORT_SYMBOL_GPL(iommu_device_unregister_bus);
* some memory to hold a notifier_block.
*/
int iommu_device_register_bus(struct iommu_device *iommu,
- const struct iommu_ops *ops, struct bus_type *bus,
+ const struct iommu_ops *ops,
+ const struct bus_type *bus,
struct notifier_block *nb)
{
int err;
@@ -463,13 +462,24 @@ static void iommu_deinit_device(struct device *dev)
/*
* release_device() must stop using any attached domain on the device.
- * If there are still other devices in the group they are not effected
+ * If there are still other devices in the group, they are not affected
* by this callback.
*
- * The IOMMU driver must set the device to either an identity or
- * blocking translation and stop using any domain pointer, as it is
- * going to be freed.
+ * If the iommu driver provides release_domain, the core code ensures
+ * that domain is attached prior to calling release_device. Drivers can
+ * use this to enforce a translation on the idle iommu. Typically, the
+ * global static blocked_domain is a good choice.
+ *
+ * Otherwise, the iommu driver must set the device to either an identity
+ * or a blocking translation in release_device() and stop using any
+ * domain pointer, as it is going to be freed.
+ *
+ * Regardless, if a delayed attach never occurred, then the release
+ * should still avoid touching any hardware configuration either.
*/
+ if (!dev->iommu->attach_deferred && ops->release_domain)
+ ops->release_domain->ops->attach_dev(ops->release_domain, dev);
+
if (ops->release_device)
ops->release_device(dev);
@@ -1248,6 +1258,25 @@ void iommu_group_remove_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_group_remove_device);
+#if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
+/**
+ * iommu_group_mutex_assert - Check device group mutex lock
+ * @dev: the device that has group param set
+ *
+ * This function is called by an iommu driver to check whether it holds
+ * group mutex lock for the given device or not.
+ *
+ * Note that this function must be called after device group param is set.
+ */
+void iommu_group_mutex_assert(struct device *dev)
+{
+ struct iommu_group *group = dev->iommu_group;
+
+ lockdep_assert_held(&group->mutex);
+}
+EXPORT_SYMBOL_GPL(iommu_group_mutex_assert);
+#endif
+
static struct device *iommu_group_first_dev(struct iommu_group *group)
{
lockdep_assert_held(&group->mutex);
@@ -1331,217 +1360,6 @@ void iommu_group_put(struct iommu_group *group)
EXPORT_SYMBOL_GPL(iommu_group_put);
/**
- * iommu_register_device_fault_handler() - Register a device fault handler
- * @dev: the device
- * @handler: the fault handler
- * @data: private data passed as argument to the handler
- *
- * When an IOMMU fault event is received, this handler gets called with the
- * fault event and data as argument. The handler should return 0 on success. If
- * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
- * complete the fault by calling iommu_page_response() with one of the following
- * response code:
- * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
- * - IOMMU_PAGE_RESP_INVALID: terminate the fault
- * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
- * page faults if possible.
- *
- * Return 0 if the fault handler was installed successfully, or an error.
- */
-int iommu_register_device_fault_handler(struct device *dev,
- iommu_dev_fault_handler_t handler,
- void *data)
-{
- struct dev_iommu *param = dev->iommu;
- int ret = 0;
-
- if (!param)
- return -EINVAL;
-
- mutex_lock(&param->lock);
- /* Only allow one fault handler registered for each device */
- if (param->fault_param) {
- ret = -EBUSY;
- goto done_unlock;
- }
-
- get_device(dev);
- param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
- if (!param->fault_param) {
- put_device(dev);
- ret = -ENOMEM;
- goto done_unlock;
- }
- param->fault_param->handler = handler;
- param->fault_param->data = data;
- mutex_init(&param->fault_param->lock);
- INIT_LIST_HEAD(&param->fault_param->faults);
-
-done_unlock:
- mutex_unlock(&param->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
-
-/**
- * iommu_unregister_device_fault_handler() - Unregister the device fault handler
- * @dev: the device
- *
- * Remove the device fault handler installed with
- * iommu_register_device_fault_handler().
- *
- * Return 0 on success, or an error.
- */
-int iommu_unregister_device_fault_handler(struct device *dev)
-{
- struct dev_iommu *param = dev->iommu;
- int ret = 0;
-
- if (!param)
- return -EINVAL;
-
- mutex_lock(&param->lock);
-
- if (!param->fault_param)
- goto unlock;
-
- /* we cannot unregister handler if there are pending faults */
- if (!list_empty(&param->fault_param->faults)) {
- ret = -EBUSY;
- goto unlock;
- }
-
- kfree(param->fault_param);
- param->fault_param = NULL;
- put_device(dev);
-unlock:
- mutex_unlock(&param->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
-
-/**
- * iommu_report_device_fault() - Report fault event to device driver
- * @dev: the device
- * @evt: fault event data
- *
- * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
- * handler. When this function fails and the fault is recoverable, it is the
- * caller's responsibility to complete the fault.
- *
- * Return 0 on success, or an error.
- */
-int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
-{
- struct dev_iommu *param = dev->iommu;
- struct iommu_fault_event *evt_pending = NULL;
- struct iommu_fault_param *fparam;
- int ret = 0;
-
- if (!param || !evt)
- return -EINVAL;
-
- /* we only report device fault if there is a handler registered */
- mutex_lock(&param->lock);
- fparam = param->fault_param;
- if (!fparam || !fparam->handler) {
- ret = -EINVAL;
- goto done_unlock;
- }
-
- if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
- (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
- evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
- GFP_KERNEL);
- if (!evt_pending) {
- ret = -ENOMEM;
- goto done_unlock;
- }
- mutex_lock(&fparam->lock);
- list_add_tail(&evt_pending->list, &fparam->faults);
- mutex_unlock(&fparam->lock);
- }
-
- ret = fparam->handler(&evt->fault, fparam->data);
- if (ret && evt_pending) {
- mutex_lock(&fparam->lock);
- list_del(&evt_pending->list);
- mutex_unlock(&fparam->lock);
- kfree(evt_pending);
- }
-done_unlock:
- mutex_unlock(&param->lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_report_device_fault);
-
-int iommu_page_response(struct device *dev,
- struct iommu_page_response *msg)
-{
- bool needs_pasid;
- int ret = -EINVAL;
- struct iommu_fault_event *evt;
- struct iommu_fault_page_request *prm;
- struct dev_iommu *param = dev->iommu;
- const struct iommu_ops *ops = dev_iommu_ops(dev);
- bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
-
- if (!ops->page_response)
- return -ENODEV;
-
- if (!param || !param->fault_param)
- return -EINVAL;
-
- if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
- msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
- return -EINVAL;
-
- /* Only send response if there is a fault report pending */
- mutex_lock(&param->fault_param->lock);
- if (list_empty(&param->fault_param->faults)) {
- dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
- goto done_unlock;
- }
- /*
- * Check if we have a matching page request pending to respond,
- * otherwise return -EINVAL
- */
- list_for_each_entry(evt, &param->fault_param->faults, list) {
- prm = &evt->fault.prm;
- if (prm->grpid != msg->grpid)
- continue;
-
- /*
- * If the PASID is required, the corresponding request is
- * matched using the group ID, the PASID valid bit and the PASID
- * value. Otherwise only the group ID matches request and
- * response.
- */
- needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
- if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
- continue;
-
- if (!needs_pasid && has_pasid) {
- /* No big deal, just clear it. */
- msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
- msg->pasid = 0;
- }
-
- ret = ops->page_response(dev, evt, msg);
- list_del(&evt->list);
- kfree(evt);
- break;
- }
-
-done_unlock:
- mutex_unlock(&param->fault_param->lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_page_response);
-
-/**
* iommu_group_id - Return ID for a group
* @group: the group to ID
*
@@ -2986,7 +2804,7 @@ bool iommu_default_passthrough(void)
}
EXPORT_SYMBOL_GPL(iommu_default_passthrough);
-const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
+const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
{
const struct iommu_ops *ops = NULL;
struct iommu_device *iommu;
@@ -3037,7 +2855,7 @@ void iommu_fwspec_free(struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_fwspec_free);
-int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
+int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
int i, new_num;
@@ -3623,26 +3441,6 @@ struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
}
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid);
-struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
- struct mm_struct *mm)
-{
- const struct iommu_ops *ops = dev_iommu_ops(dev);
- struct iommu_domain *domain;
-
- domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
- if (!domain)
- return NULL;
-
- domain->type = IOMMU_DOMAIN_SVA;
- mmgrab(mm);
- domain->mm = mm;
- domain->owner = ops;
- domain->iopf_handler = iommu_sva_handle_iopf;
- domain->fault_data = mm;
-
- return domain;
-}
-
ioasid_t iommu_alloc_global_pasid(struct device *dev)
{
int ret;
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 3f3f1fa1a0a9..33d142f8057d 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -263,7 +263,8 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
if (cmd->__reserved)
return -EOPNOTSUPP;
- if (cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len)
+ if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) ||
+ (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len))
return -EINVAL;
idev = iommufd_get_device(ucmd, cmd->dev_id);
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index 504ac1b01b2d..05fd9d3abf1b 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -1330,20 +1330,23 @@ out_unlock:
int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access)
{
+ u32 new_id;
int rc;
down_write(&iopt->domains_rwsem);
down_write(&iopt->iova_rwsem);
- rc = xa_alloc(&iopt->access_list, &access->iopt_access_list_id, access,
- xa_limit_16b, GFP_KERNEL_ACCOUNT);
+ rc = xa_alloc(&iopt->access_list, &new_id, access, xa_limit_16b,
+ GFP_KERNEL_ACCOUNT);
+
if (rc)
goto out_unlock;
rc = iopt_calculate_iova_alignment(iopt);
if (rc) {
- xa_erase(&iopt->access_list, access->iopt_access_list_id);
+ xa_erase(&iopt->access_list, new_id);
goto out_unlock;
}
+ access->iopt_access_list_id = new_id;
out_unlock:
up_write(&iopt->iova_rwsem);
diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index 482d4059f5db..e854d3f67205 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -45,6 +45,7 @@ enum {
enum {
MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0,
+ MOCK_FLAGS_DEVICE_HUGE_IOVA = 1 << 1,
};
enum {
diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
index 0a92c9eeaf7f..db8c46bee155 100644
--- a/drivers/iommu/iommufd/iova_bitmap.c
+++ b/drivers/iommu/iommufd/iova_bitmap.c
@@ -100,7 +100,7 @@ struct iova_bitmap {
struct iova_bitmap_map mapped;
/* userspace address of the bitmap */
- u64 __user *bitmap;
+ u8 __user *bitmap;
/* u64 index that @mapped points to */
unsigned long mapped_base_index;
@@ -113,6 +113,9 @@ struct iova_bitmap {
/* length of the IOVA range for the whole bitmap */
size_t length;
+
+ /* length of the IOVA range set ahead the pinned pages */
+ unsigned long set_ahead_length;
};
/*
@@ -162,7 +165,7 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
{
struct iova_bitmap_map *mapped = &bitmap->mapped;
unsigned long npages;
- u64 __user *addr;
+ u8 __user *addr;
long ret;
/*
@@ -176,17 +179,18 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
sizeof(*bitmap->bitmap), PAGE_SIZE);
/*
- * We always cap at max number of 'struct page' a base page can fit.
- * This is, for example, on x86 means 2M of bitmap data max.
- */
- npages = min(npages, PAGE_SIZE / sizeof(struct page *));
-
- /*
* Bitmap address to be pinned is calculated via pointer arithmetic
* with bitmap u64 word index.
*/
addr = bitmap->bitmap + bitmap->mapped_base_index;
+ /*
+ * We always cap at max number of 'struct page' a base page can fit.
+ * This is, for example, on x86 means 2M of bitmap data max.
+ */
+ npages = min(npages + !!offset_in_page(addr),
+ PAGE_SIZE / sizeof(struct page *));
+
ret = pin_user_pages_fast((unsigned long)addr, npages,
FOLL_WRITE, mapped->pages);
if (ret <= 0)
@@ -247,7 +251,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
mapped = &bitmap->mapped;
mapped->pgshift = __ffs(page_size);
- bitmap->bitmap = data;
+ bitmap->bitmap = (u8 __user *)data;
bitmap->mapped_total_index =
iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
bitmap->iova = iova;
@@ -304,7 +308,7 @@ static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
remaining = min_t(unsigned long, remaining,
- bytes / sizeof(*bitmap->bitmap));
+ DIV_ROUND_UP(bytes, sizeof(*bitmap->bitmap)));
return remaining;
}
@@ -341,6 +345,32 @@ static bool iova_bitmap_done(struct iova_bitmap *bitmap)
return bitmap->mapped_base_index >= bitmap->mapped_total_index;
}
+static int iova_bitmap_set_ahead(struct iova_bitmap *bitmap,
+ size_t set_ahead_length)
+{
+ int ret = 0;
+
+ while (set_ahead_length > 0 && !iova_bitmap_done(bitmap)) {
+ unsigned long length = iova_bitmap_mapped_length(bitmap);
+ unsigned long iova = iova_bitmap_mapped_iova(bitmap);
+
+ ret = iova_bitmap_get(bitmap);
+ if (ret)
+ break;
+
+ length = min(length, set_ahead_length);
+ iova_bitmap_set(bitmap, iova, length);
+
+ set_ahead_length -= length;
+ bitmap->mapped_base_index +=
+ iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
+ iova_bitmap_put(bitmap);
+ }
+
+ bitmap->set_ahead_length = 0;
+ return ret;
+}
+
/*
* Advances to the next range, releases the current pinned
* pages and pins the next set of bitmap pages.
@@ -357,6 +387,15 @@ static int iova_bitmap_advance(struct iova_bitmap *bitmap)
if (iova_bitmap_done(bitmap))
return 0;
+ /* Iterate, set and skip any bits requested for next iteration */
+ if (bitmap->set_ahead_length) {
+ int ret;
+
+ ret = iova_bitmap_set_ahead(bitmap, bitmap->set_ahead_length);
+ if (ret)
+ return ret;
+ }
+
/* When advancing the index we pin the next set of bitmap pages */
return iova_bitmap_get(bitmap);
}
@@ -409,6 +448,7 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
unsigned long last_bit = (((iova + length - 1) - mapped->iova) >>
mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
+ unsigned long last_page_idx = mapped->npages - 1;
do {
unsigned int page_idx = cur_bit / BITS_PER_PAGE;
@@ -417,10 +457,18 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
last_bit - cur_bit + 1);
void *kaddr;
+ if (unlikely(page_idx > last_page_idx))
+ break;
+
kaddr = kmap_local_page(mapped->pages[page_idx]);
bitmap_set(kaddr, offset, nbits);
kunmap_local(kaddr);
cur_bit += nbits;
} while (cur_bit <= last_bit);
+
+ if (unlikely(cur_bit <= last_bit)) {
+ bitmap->set_ahead_length =
+ ((last_bit - cur_bit + 1) << bitmap->mapped.pgshift);
+ }
}
EXPORT_SYMBOL_NS_GPL(iova_bitmap_set, IOMMUFD);
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index d9e9920c7eba..7a2199470f31 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -36,11 +36,12 @@ static struct mock_bus_type iommufd_mock_bus_type = {
},
};
-static atomic_t mock_dev_num;
+static DEFINE_IDA(mock_dev_ida);
enum {
MOCK_DIRTY_TRACK = 1,
MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
+ MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
/*
* Like a real page table alignment requires the low bits of the address
@@ -53,6 +54,7 @@ enum {
MOCK_PFN_START_IOVA = _MOCK_PFN_START,
MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
+ MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
};
/*
@@ -61,8 +63,8 @@ enum {
* In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
* value. This has a much smaller randomization space and syzkaller can hit it.
*/
-static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
- u64 *iova)
+static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
+ u64 *iova)
{
struct syz_layout {
__u32 nth_area;
@@ -86,6 +88,21 @@ static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
return 0;
}
+static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
+ u64 *iova)
+{
+ unsigned long ret;
+
+ mutex_lock(&access->ioas_lock);
+ if (!access->ioas) {
+ mutex_unlock(&access->ioas_lock);
+ return 0;
+ }
+ ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
+ mutex_unlock(&access->ioas_lock);
+ return ret;
+}
+
void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
unsigned int ioas_id, u64 *iova, u32 *flags)
{
@@ -98,7 +115,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
if (IS_ERR(ioas))
return;
- *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova);
+ *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
iommufd_put_object(ucmd->ictx, &ioas->obj);
}
@@ -121,6 +138,7 @@ enum selftest_obj_type {
struct mock_dev {
struct device dev;
unsigned long flags;
+ int id;
};
struct selftest_obj {
@@ -191,6 +209,34 @@ static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
return 0;
}
+static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
+ unsigned long iova, size_t page_size,
+ unsigned long flags)
+{
+ unsigned long cur, end = iova + page_size - 1;
+ bool dirty = false;
+ void *ent, *old;
+
+ for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
+ ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
+ if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
+ continue;
+
+ dirty = true;
+ /* Clear dirty */
+ if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
+ unsigned long val;
+
+ val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
+ old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
+ xa_mk_value(val), GFP_KERNEL);
+ WARN_ON_ONCE(ent != old);
+ }
+ }
+
+ return dirty;
+}
+
static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
unsigned long iova, size_t size,
unsigned long flags,
@@ -198,31 +244,31 @@ static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
{
struct mock_iommu_domain *mock =
container_of(domain, struct mock_iommu_domain, domain);
- unsigned long i, max = size / MOCK_IO_PAGE_SIZE;
- void *ent, *old;
+ unsigned long end = iova + size;
+ void *ent;
if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
return -EINVAL;
- for (i = 0; i < max; i++) {
- unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE;
+ do {
+ unsigned long pgsize = MOCK_IO_PAGE_SIZE;
+ unsigned long head;
- ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
- if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) {
- /* Clear dirty */
- if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
- unsigned long val;
-
- val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
- old = xa_store(&mock->pfns,
- cur / MOCK_IO_PAGE_SIZE,
- xa_mk_value(val), GFP_KERNEL);
- WARN_ON_ONCE(ent != old);
- }
- iommu_dirty_bitmap_record(dirty, cur,
- MOCK_IO_PAGE_SIZE);
+ ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
+ if (!ent) {
+ iova += pgsize;
+ continue;
}
- }
+
+ if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
+ pgsize = MOCK_HUGE_PAGE_SIZE;
+ head = iova & ~(pgsize - 1);
+
+ /* Clear dirty */
+ if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
+ iommu_dirty_bitmap_record(dirty, head, pgsize);
+ iova = head + pgsize;
+ } while (iova < end);
return 0;
}
@@ -234,6 +280,7 @@ const struct iommu_dirty_ops dirty_ops = {
static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
{
+ struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
struct mock_iommu_domain *mock;
mock = kzalloc(sizeof(*mock), GFP_KERNEL);
@@ -242,6 +289,8 @@ static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
+ if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
+ mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
mock->domain.ops = mock_ops.default_domain_ops;
mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
xa_init(&mock->pfns);
@@ -287,7 +336,7 @@ mock_domain_alloc_user(struct device *dev, u32 flags,
return ERR_PTR(-EOPNOTSUPP);
if (user_data || (has_dirty_flag && no_dirty_ops))
return ERR_PTR(-EOPNOTSUPP);
- domain = mock_domain_alloc_paging(NULL);
+ domain = mock_domain_alloc_paging(dev);
if (!domain)
return ERR_PTR(-ENOMEM);
if (has_dirty_flag)
@@ -350,6 +399,9 @@ static int mock_domain_map_pages(struct iommu_domain *domain,
if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
flags = MOCK_PFN_LAST_IOVA;
+ if (pgsize != MOCK_IO_PAGE_SIZE) {
+ flags |= MOCK_PFN_HUGE_IOVA;
+ }
old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
flags),
@@ -394,20 +446,27 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
/*
* iommufd generates unmaps that must be a strict
- * superset of the map's performend So every starting
- * IOVA should have been an iova passed to map, and the
+ * superset of the map's performend So every
+ * starting/ending IOVA should have been an iova passed
+ * to map.
*
- * First IOVA must be present and have been a first IOVA
- * passed to map_pages
+ * This simple logic doesn't work when the HUGE_PAGE is
+ * turned on since the core code will automatically
+ * switch between the two page sizes creating a break in
+ * the unmap calls. The break can land in the middle of
+ * contiguous IOVA.
*/
- if (first) {
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_START_IOVA));
- first = false;
+ if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
+ if (first) {
+ WARN_ON(ent && !(xa_to_value(ent) &
+ MOCK_PFN_START_IOVA));
+ first = false;
+ }
+ if (pgcount == 1 &&
+ cur + MOCK_IO_PAGE_SIZE == pgsize)
+ WARN_ON(ent && !(xa_to_value(ent) &
+ MOCK_PFN_LAST_IOVA));
}
- if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_LAST_IOVA));
iova += MOCK_IO_PAGE_SIZE;
ret += MOCK_IO_PAGE_SIZE;
@@ -595,7 +654,7 @@ static void mock_dev_release(struct device *dev)
{
struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
- atomic_dec(&mock_dev_num);
+ ida_free(&mock_dev_ida, mdev->id);
kfree(mdev);
}
@@ -604,7 +663,8 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
struct mock_dev *mdev;
int rc;
- if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY))
+ if (dev_flags &
+ ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA))
return ERR_PTR(-EINVAL);
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
@@ -616,8 +676,12 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
mdev->dev.release = mock_dev_release;
mdev->dev.bus = &iommufd_mock_bus_type.bus;
- rc = dev_set_name(&mdev->dev, "iommufd_mock%u",
- atomic_inc_return(&mock_dev_num));
+ rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
+ if (rc < 0)
+ goto err_put;
+ mdev->id = rc;
+
+ rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
if (rc)
goto err_put;
@@ -1119,7 +1183,7 @@ static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
}
if (flags & MOCK_FLAGS_ACCESS_SYZ)
- iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
+ iova = iommufd_test_syz_conv_iova(staccess->access,
&cmd->access_pages.iova);
npages = (ALIGN(iova + length, PAGE_SIZE) -
@@ -1221,8 +1285,8 @@ static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
}
if (flags & MOCK_FLAGS_ACCESS_SYZ)
- iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
- &cmd->access_rw.iova);
+ iova = iommufd_test_syz_conv_iova(staccess->access,
+ &cmd->access_rw.iova);
rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
if (rc)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index d30e453d0fb4..d59d0ea2fd21 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -24,24 +24,8 @@ static bool iova_rcache_insert(struct iova_domain *iovad,
static unsigned long iova_rcache_get(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn);
-static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
-
-unsigned long iova_rcache_range(void)
-{
- return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
-}
-
-static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
-{
- struct iova_domain *iovad;
-
- iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
-
- free_cpu_cached_iovas(cpu, iovad);
- return 0;
-}
-
+static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_global_cached_iovas(struct iova_domain *iovad);
static struct iova *to_iova(struct rb_node *node)
@@ -252,54 +236,6 @@ static void free_iova_mem(struct iova *iova)
kmem_cache_free(iova_cache, iova);
}
-int iova_cache_get(void)
-{
- mutex_lock(&iova_cache_mutex);
- if (!iova_cache_users) {
- int ret;
-
- ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
- iova_cpuhp_dead);
- if (ret) {
- mutex_unlock(&iova_cache_mutex);
- pr_err("Couldn't register cpuhp handler\n");
- return ret;
- }
-
- iova_cache = kmem_cache_create(
- "iommu_iova", sizeof(struct iova), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!iova_cache) {
- cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
- mutex_unlock(&iova_cache_mutex);
- pr_err("Couldn't create iova cache\n");
- return -ENOMEM;
- }
- }
-
- iova_cache_users++;
- mutex_unlock(&iova_cache_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(iova_cache_get);
-
-void iova_cache_put(void)
-{
- mutex_lock(&iova_cache_mutex);
- if (WARN_ON(!iova_cache_users)) {
- mutex_unlock(&iova_cache_mutex);
- return;
- }
- iova_cache_users--;
- if (!iova_cache_users) {
- cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
- kmem_cache_destroy(iova_cache);
- }
- mutex_unlock(&iova_cache_mutex);
-}
-EXPORT_SYMBOL_GPL(iova_cache_put);
-
/**
* alloc_iova - allocates an iova
* @iovad: - iova domain in question
@@ -654,11 +590,18 @@ struct iova_rcache {
struct delayed_work work;
};
+static struct kmem_cache *iova_magazine_cache;
+
+unsigned long iova_rcache_range(void)
+{
+ return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
+}
+
static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
{
struct iova_magazine *mag;
- mag = kmalloc(sizeof(*mag), flags);
+ mag = kmem_cache_alloc(iova_magazine_cache, flags);
if (mag)
mag->size = 0;
@@ -667,7 +610,7 @@ static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
static void iova_magazine_free(struct iova_magazine *mag)
{
- kfree(mag);
+ kmem_cache_free(iova_magazine_cache, mag);
}
static void
@@ -990,5 +933,71 @@ static void free_global_cached_iovas(struct iova_domain *iovad)
spin_unlock_irqrestore(&rcache->lock, flags);
}
}
+
+static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct iova_domain *iovad;
+
+ iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
+
+ free_cpu_cached_iovas(cpu, iovad);
+ return 0;
+}
+
+int iova_cache_get(void)
+{
+ int err = -ENOMEM;
+
+ mutex_lock(&iova_cache_mutex);
+ if (!iova_cache_users) {
+ iova_cache = kmem_cache_create("iommu_iova", sizeof(struct iova), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!iova_cache)
+ goto out_err;
+
+ iova_magazine_cache = kmem_cache_create("iommu_iova_magazine",
+ sizeof(struct iova_magazine),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!iova_magazine_cache)
+ goto out_err;
+
+ err = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead",
+ NULL, iova_cpuhp_dead);
+ if (err) {
+ pr_err("IOVA: Couldn't register cpuhp handler: %pe\n", ERR_PTR(err));
+ goto out_err;
+ }
+ }
+
+ iova_cache_users++;
+ mutex_unlock(&iova_cache_mutex);
+
+ return 0;
+
+out_err:
+ kmem_cache_destroy(iova_cache);
+ kmem_cache_destroy(iova_magazine_cache);
+ mutex_unlock(&iova_cache_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(iova_cache_get);
+
+void iova_cache_put(void)
+{
+ mutex_lock(&iova_cache_mutex);
+ if (WARN_ON(!iova_cache_users)) {
+ mutex_unlock(&iova_cache_mutex);
+ return;
+ }
+ iova_cache_users--;
+ if (!iova_cache_users) {
+ cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
+ kmem_cache_destroy(iova_cache);
+ kmem_cache_destroy(iova_magazine_cache);
+ }
+ mutex_unlock(&iova_cache_mutex);
+}
+EXPORT_SYMBOL_GPL(iova_cache_put);
+
MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index ace1fc4bd34b..b657cc09605f 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -709,7 +709,7 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
}
static int ipmmu_init_platform_device(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct platform_device *ipmmu_pdev;
@@ -773,7 +773,7 @@ static bool ipmmu_device_is_allowed(struct device *dev)
}
static int ipmmu_of_xlate(struct device *dev,
- struct of_phandle_args *spec)
+ const struct of_phandle_args *spec)
{
if (!ipmmu_device_is_allowed(dev))
return -ENODEV;
@@ -1005,7 +1005,6 @@ static const struct of_device_id ipmmu_of_ids[] = {
static int ipmmu_probe(struct platform_device *pdev)
{
struct ipmmu_vmsa_device *mmu;
- struct resource *res;
int irq;
int ret;
@@ -1025,8 +1024,7 @@ static int ipmmu_probe(struct platform_device *pdev)
return ret;
/* Map I/O memory and request IRQ. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmu->base = devm_ioremap_resource(&pdev->dev, res);
+ mmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmu->base))
return PTR_ERR(mmu->base);
@@ -1123,7 +1121,6 @@ static void ipmmu_remove(struct platform_device *pdev)
ipmmu_device_reset(mmu);
}
-#ifdef CONFIG_PM_SLEEP
static int ipmmu_resume_noirq(struct device *dev)
{
struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
@@ -1153,18 +1150,14 @@ static int ipmmu_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops ipmmu_pm = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
};
-#define DEV_PM_OPS &ipmmu_pm
-#else
-#define DEV_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
static struct platform_driver ipmmu_driver = {
.driver = {
.name = "ipmmu-vmsa",
- .of_match_table = of_match_ptr(ipmmu_of_ids),
- .pm = DEV_PM_OPS,
+ .of_match_table = ipmmu_of_ids,
+ .pm = pm_sleep_ptr(&ipmmu_pm),
},
.probe = ipmmu_probe,
.remove_new = ipmmu_remove,
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 83314b9d8f38..ee59647c2050 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -99,7 +99,8 @@ int __init irq_remapping_prepare(void)
if (disable_irq_remap)
return -ENOSYS;
- if (intel_irq_remap_ops.prepare() == 0)
+ if (IS_ENABLED(CONFIG_INTEL_IOMMU) &&
+ intel_irq_remap_ops.prepare() == 0)
remap_ops = &intel_irq_remap_ops;
else if (IS_ENABLED(CONFIG_AMD_IOMMU) &&
amd_iommu_irq_ops.prepare() == 0)
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index f86af9815d6f..989e0869d805 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -598,7 +598,7 @@ static void print_ctx_regs(void __iomem *base, int ctx)
static int insert_iommu_master(struct device *dev,
struct msm_iommu_dev **iommu,
- struct of_phandle_args *spec)
+ const struct of_phandle_args *spec)
{
struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
int sid;
@@ -626,7 +626,7 @@ static int insert_iommu_master(struct device *dev,
}
static int qcom_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *spec)
+ const struct of_phandle_args *spec)
{
struct msm_iommu_dev *iommu = NULL, *iter;
unsigned long flags;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 7abe9e85a570..b8c47f18bc26 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -957,7 +957,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
return group;
}
-static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int mtk_iommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct platform_device *m4updev;
@@ -1264,7 +1265,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->plat_data = of_device_get_match_data(dev);
/* Protect memory. HW will access here while translation fault.*/
- protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
+ protect = devm_kcalloc(dev, 2, MTK_PROTECT_PA_ALIGN, GFP_KERNEL);
if (!protect)
return -ENOMEM;
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 25b41222abae..a9fa2a54dc9b 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -398,7 +398,8 @@ static const struct iommu_ops mtk_iommu_v1_ops;
* MTK generation one iommu HW only support one iommu domain, and all the client
* sharing the same iova address space.
*/
-static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_args *args)
+static int mtk_iommu_v1_create_mapping(struct device *dev,
+ const struct of_phandle_args *args)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct mtk_iommu_v1_data *data;
@@ -621,8 +622,8 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
data->dev = dev;
/* Protect memory. HW will access here while translation fault.*/
- protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2,
- GFP_KERNEL | GFP_DMA);
+ protect = devm_kcalloc(dev, 2, MTK_PROTECT_PA_ALIGN,
+ GFP_KERNEL | GFP_DMA);
if (!protect)
return -ENOMEM;
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 719652b60840..3afe0b48a48d 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -29,7 +29,7 @@ static int of_iommu_xlate(struct device *dev,
!of_device_is_available(iommu_spec->np))
return -ENODEV;
- ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
+ ret = iommu_fwspec_init(dev, fwnode, ops);
if (ret)
return ret;
/*
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 2685861c0a12..da79d9f4cf63 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1140,7 +1140,7 @@ static void rk_iommu_release_device(struct device *dev)
}
static int rk_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct platform_device *iommu_dev;
struct rk_iommudata *data;
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 537359f10997..ba53571a8239 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -390,7 +390,8 @@ static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
return &sdev->iommu;
}
-static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int sprd_iommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct platform_device *pdev;
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 41484a5a399b..decd52cba998 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -819,7 +819,7 @@ static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
}
static int sun50i_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
unsigned id = args->args[0];
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 310871728ab4..14e525bd0d9b 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -830,7 +830,7 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
}
static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
const struct iommu_ops *ops = smmu->iommu.ops;
int err;
@@ -959,7 +959,7 @@ static struct iommu_group *tegra_smmu_device_group(struct device *dev)
}
static int tegra_smmu_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
struct tegra_mc *mc = platform_get_drvdata(iommu_pdev);
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 34db37fd9675..04048f64a2c0 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -1051,7 +1051,8 @@ static struct iommu_group *viommu_device_group(struct device *dev)
return generic_device_group(dev);
}
-static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int viommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
return iommu_fwspec_add_ids(dev, args->args, 1);
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index f7149d0f3d45..72c07a12f5e1 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -546,6 +546,17 @@ config SIFIVE_PLIC
select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+config STARFIVE_JH8100_INTC
+ bool "StarFive JH8100 External Interrupt Controller"
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ default ARCH_STARFIVE
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ This enables support for the INTC chip found in StarFive JH8100
+ SoC.
+
+ If you don't know what to do here, say Y.
+
config EXYNOS_IRQ_COMBINER
bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST
depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index ffd945fe71aa..ec4a18380998 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
+obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index 9745a119d0e6..eb02d203c963 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -242,7 +242,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn,
else if (intc->n_words != n_words)
return -EINVAL;
- cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+ cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, enable_cache, n_words),
GFP_KERNEL);
if (!cpu)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 24ca1d656adc..36e71af054e9 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -249,7 +249,7 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
return -EINVAL;
}
- cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+ cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, mask_cache, n_words),
GFP_KERNEL);
if (!cpu)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 53abd4779914..fca888b36680 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3181,6 +3181,7 @@ static void its_cpu_init_lpis(void)
val |= GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
+out:
if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
@@ -3216,7 +3217,6 @@ static void its_cpu_init_lpis(void)
/* Make sure the GIC has seen the above */
dsb(sy);
-out:
gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
smp_processor_id(),
@@ -4436,12 +4436,12 @@ static const struct irq_domain_ops its_sgi_domain_ops = {
static int its_vpe_id_alloc(void)
{
- return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
+ return ida_alloc_max(&its_vpeid_ida, ITS_MAX_VPEID - 1, GFP_KERNEL);
}
static void its_vpe_id_free(u16 id)
{
- ida_simple_remove(&its_vpeid_ida, id);
+ ida_free(&its_vpeid_ida, id);
}
static int its_vpe_init(struct its_vpe *vpe)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 98b0329b7154..6fb276504bcc 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -19,6 +19,7 @@
#include <linux/percpu.h>
#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/iopoll.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-common.h>
@@ -180,11 +181,6 @@ static enum gic_intid_range get_intid_range(struct irq_data *d)
return __get_intid_range(d->hwirq);
}
-static inline unsigned int gic_irq(struct irq_data *d)
-{
- return d->hwirq;
-}
-
static inline bool gic_irq_in_rdist(struct irq_data *d)
{
switch (get_intid_range(d)) {
@@ -251,17 +247,13 @@ static inline void __iomem *gic_dist_base(struct irq_data *d)
static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
{
- u32 count = 1000000; /* 1s! */
+ u32 val;
+ int ret;
- while (readl_relaxed(base + GICD_CTLR) & bit) {
- count--;
- if (!count) {
- pr_err_ratelimited("RWP timeout, gone fishing\n");
- return;
- }
- cpu_relax();
- udelay(1);
- }
+ ret = readl_relaxed_poll_timeout_atomic(base + GICD_CTLR, val, !(val & bit),
+ 1, USEC_PER_SEC);
+ if (ret == -ETIMEDOUT)
+ pr_err_ratelimited("RWP timeout, gone fishing\n");
}
/* Wait for completion of a distributor change */
@@ -279,8 +271,8 @@ static void gic_redist_wait_for_rwp(void)
static void gic_enable_redist(bool enable)
{
void __iomem *rbase;
- u32 count = 1000000; /* 1s! */
u32 val;
+ int ret;
if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
return;
@@ -301,16 +293,13 @@ static void gic_enable_redist(bool enable)
return; /* No PM support in this redistributor */
}
- while (--count) {
- val = readl_relaxed(rbase + GICR_WAKER);
- if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
- break;
- cpu_relax();
- udelay(1);
- }
- if (!count)
+ ret = readl_relaxed_poll_timeout_atomic(rbase + GICR_WAKER, val,
+ enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep),
+ 1, USEC_PER_SEC);
+ if (ret == -ETIMEDOUT) {
pr_err_ratelimited("redistributor failed to %s...\n",
enable ? "wakeup" : "sleep");
+ }
}
/*
@@ -548,7 +537,7 @@ static int gic_irq_nmi_setup(struct irq_data *d)
* A secondary irq_chip should be in charge of LPI request,
* it should not be possible to get there
*/
- if (WARN_ON(gic_irq(d) >= 8192))
+ if (WARN_ON(irqd_to_hwirq(d) >= 8192))
return -EINVAL;
/* desc lock should already be held */
@@ -588,7 +577,7 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
* A secondary irq_chip should be in charge of LPI request,
* it should not be possible to get there
*/
- if (WARN_ON(gic_irq(d) >= 8192))
+ if (WARN_ON(irqd_to_hwirq(d) >= 8192))
return;
/* desc lock should already be held */
@@ -626,7 +615,7 @@ static bool gic_arm64_erratum_2941627_needed(struct irq_data *d)
static void gic_eoi_irq(struct irq_data *d)
{
- write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
+ write_gicreg(irqd_to_hwirq(d), ICC_EOIR1_EL1);
isb();
if (gic_arm64_erratum_2941627_needed(d)) {
@@ -646,19 +635,19 @@ static void gic_eoimode1_eoi_irq(struct irq_data *d)
* No need to deactivate an LPI, or an interrupt that
* is is getting forwarded to a vcpu.
*/
- if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
+ if (irqd_to_hwirq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
return;
if (!gic_arm64_erratum_2941627_needed(d))
- gic_write_dir(gic_irq(d));
+ gic_write_dir(irqd_to_hwirq(d));
else
gic_poke_irq(d, GICD_ICACTIVER);
}
static int gic_set_type(struct irq_data *d, unsigned int type)
{
+ irq_hw_number_t irq = irqd_to_hwirq(d);
enum gic_intid_range range;
- unsigned int irq = gic_irq(d);
void __iomem *base;
u32 offset, index;
int ret;
@@ -684,7 +673,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
ret = gic_configure_irq(index, type, base + offset, NULL);
if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
/* Misconfigured PPIs are usually not fatal */
- pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
+ pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq);
ret = 0;
}
@@ -1702,9 +1691,13 @@ static int gic_irq_domain_select(struct irq_domain *d,
irq_hw_number_t hwirq;
/* Not for us */
- if (fwspec->fwnode != d->fwnode)
+ if (fwspec->fwnode != d->fwnode)
return 0;
+ /* Handle pure domain searches */
+ if (!fwspec->param_count)
+ return d->bus_token == bus_token;
+
/* If this is not DT, then we have a single domain */
if (!is_of_node(fwspec->fwnode))
return 1;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 412196a7dad5..98aa383e39db 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -162,11 +162,6 @@ static inline void __iomem *gic_cpu_base(struct irq_data *d)
return gic_data_cpu_base(gic_data);
}
-static inline unsigned int gic_irq(struct irq_data *d)
-{
- return d->hwirq;
-}
-
static inline bool cascading_gic_irq(struct irq_data *d)
{
void *data = irq_data_get_irq_handler_data(d);
@@ -183,14 +178,16 @@ static inline bool cascading_gic_irq(struct irq_data *d)
*/
static void gic_poke_irq(struct irq_data *d, u32 offset)
{
- u32 mask = 1 << (gic_irq(d) % 32);
- writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4);
+ u32 mask = 1 << (irqd_to_hwirq(d) % 32);
+
+ writel_relaxed(mask, gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4);
}
static int gic_peek_irq(struct irq_data *d, u32 offset)
{
- u32 mask = 1 << (gic_irq(d) % 32);
- return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask);
+ u32 mask = 1 << (irqd_to_hwirq(d) % 32);
+
+ return !!(readl_relaxed(gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4) & mask);
}
static void gic_mask_irq(struct irq_data *d)
@@ -220,7 +217,7 @@ static void gic_unmask_irq(struct irq_data *d)
static void gic_eoi_irq(struct irq_data *d)
{
- u32 hwirq = gic_irq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
if (hwirq < 16)
hwirq = this_cpu_read(sgi_intid);
@@ -230,7 +227,7 @@ static void gic_eoi_irq(struct irq_data *d)
static void gic_eoimode1_eoi_irq(struct irq_data *d)
{
- u32 hwirq = gic_irq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
/* Do not deactivate an IRQ forwarded to a vcpu. */
if (irqd_is_forwarded_to_vcpu(d))
@@ -293,8 +290,8 @@ static int gic_irq_get_irqchip_state(struct irq_data *d,
static int gic_set_type(struct irq_data *d, unsigned int type)
{
+ irq_hw_number_t gicirq = irqd_to_hwirq(d);
void __iomem *base = gic_dist_base(d);
- unsigned int gicirq = gic_irq(d);
int ret;
/* Interrupt configuration for SGIs can't be changed */
@@ -309,7 +306,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL);
if (ret && gicirq < 32) {
/* Misconfigured PPIs are usually not fatal */
- pr_warn("GIC: PPI%d is secure or misconfigured\n", gicirq - 16);
+ pr_warn("GIC: PPI%ld is secure or misconfigured\n", gicirq - 16);
ret = 0;
}
@@ -319,7 +316,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{
/* Only interrupts on the primary GIC can be forwarded to a vcpu. */
- if (cascading_gic_irq(d) || gic_irq(d) < 16)
+ if (cascading_gic_irq(d) || irqd_to_hwirq(d) < 16)
return -EINVAL;
if (vcpu)
@@ -796,7 +793,7 @@ static void rmw_writeb(u8 bval, void __iomem *addr)
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + irqd_to_hwirq(d);
struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
unsigned int cpu;
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index 5831be454673..b42ed68acfa6 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -461,12 +461,11 @@ err_generic:
return ret;
}
-static int pdc_intc_remove(struct platform_device *pdev)
+static void pdc_intc_remove(struct platform_device *pdev)
{
struct pdc_intc_priv *priv = platform_get_drvdata(pdev);
irq_domain_remove(priv->domain);
- return 0;
}
static const struct of_device_id pdc_intc_match[] = {
@@ -479,8 +478,8 @@ static struct platform_driver pdc_intc_driver = {
.name = "pdc-intc",
.of_match_table = pdc_intc_match,
},
- .probe = pdc_intc_probe,
- .remove = pdc_intc_remove,
+ .probe = pdc_intc_probe,
+ .remove_new = pdc_intc_remove,
};
static int __init pdc_intc_init(void)
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
index aa041e4dfee0..511adfaeec82 100644
--- a/drivers/irqchip/irq-imx-intmux.c
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -166,6 +166,10 @@ static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec
if (fwspec->fwnode != d->fwnode)
return false;
+ /* Handle pure domain searches */
+ if (!fwspec->param_count)
+ return d->bus_token == bus_token;
+
return irqchip_data->chanidx == fwspec->param[1];
}
@@ -282,7 +286,7 @@ out:
return ret;
}
-static int imx_intmux_remove(struct platform_device *pdev)
+static void imx_intmux_remove(struct platform_device *pdev)
{
struct intmux_data *data = platform_get_drvdata(pdev);
int i;
@@ -298,8 +302,6 @@ static int imx_intmux_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -354,11 +356,11 @@ static const struct of_device_id imx_intmux_id[] = {
static struct platform_driver imx_intmux_driver = {
.driver = {
- .name = "imx-intmux",
- .of_match_table = imx_intmux_id,
- .pm = &imx_intmux_pm_ops,
+ .name = "imx-intmux",
+ .of_match_table = imx_intmux_id,
+ .pm = &imx_intmux_pm_ops,
},
- .probe = imx_intmux_probe,
- .remove = imx_intmux_remove,
+ .probe = imx_intmux_probe,
+ .remove_new = imx_intmux_remove,
};
builtin_platform_driver(imx_intmux_driver);
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index bd9543314539..20cf7a9e9ece 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -231,7 +231,7 @@ out:
return ret;
}
-static int imx_irqsteer_remove(struct platform_device *pdev)
+static void imx_irqsteer_remove(struct platform_device *pdev)
{
struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
int i;
@@ -243,8 +243,6 @@ static int imx_irqsteer_remove(struct platform_device *pdev)
irq_domain_remove(irqsteer_data->domain);
clk_disable_unprepare(irqsteer_data->ipg_clk);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -307,11 +305,11 @@ static const struct of_device_id imx_irqsteer_dt_ids[] = {
static struct platform_driver imx_irqsteer_driver = {
.driver = {
- .name = "imx-irqsteer",
- .of_match_table = imx_irqsteer_dt_ids,
- .pm = &imx_irqsteer_pm_ops,
+ .name = "imx-irqsteer",
+ .of_match_table = imx_irqsteer_dt_ids,
+ .pm = &imx_irqsteer_pm_ops,
},
- .probe = imx_irqsteer_probe,
- .remove = imx_irqsteer_remove,
+ .probe = imx_irqsteer_probe,
+ .remove_new = imx_irqsteer_remove,
};
builtin_platform_driver(imx_irqsteer_driver);
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index a36396db4b08..30f1979fa124 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -190,7 +190,7 @@ static int keystone_irq_probe(struct platform_device *pdev)
return 0;
}
-static int keystone_irq_remove(struct platform_device *pdev)
+static void keystone_irq_remove(struct platform_device *pdev)
{
struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
int hwirq;
@@ -201,7 +201,6 @@ static int keystone_irq_remove(struct platform_device *pdev)
irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
irq_domain_remove(kirq->irqd);
- return 0;
}
static const struct of_device_id keystone_irq_dt_ids[] = {
@@ -212,7 +211,7 @@ MODULE_DEVICE_TABLE(of, keystone_irq_dt_ids);
static struct platform_driver keystone_irq_device_driver = {
.probe = keystone_irq_probe,
- .remove = keystone_irq_remove,
+ .remove_new = keystone_irq_remove,
.driver = {
.name = "keystone_irq",
.of_match_table = of_match_ptr(keystone_irq_dt_ids),
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index b3736bdd4b9f..b64cbe3052e8 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -198,6 +198,12 @@ static void eiointc_irq_dispatch(struct irq_desc *desc)
for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) {
pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
+
+ /* Skip handling if pending bitmap is zero */
+ if (!pending)
+ continue;
+
+ /* Clear the IRQs */
iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
while (pending) {
int bit = __ffs(pending);
@@ -304,23 +310,7 @@ static int eiointc_suspend(void)
static void eiointc_resume(void)
{
- int i, j;
- struct irq_desc *desc;
- struct irq_data *irq_data;
-
eiointc_router_init(0);
-
- for (i = 0; i < nr_pics; i++) {
- for (j = 0; j < eiointc_priv[0]->vec_count; j++) {
- desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j);
- if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) {
- raw_spin_lock(&desc->lock);
- irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc));
- eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0);
- raw_spin_unlock(&desc->lock);
- }
- }
- }
}
static struct syscore_ops eiointc_syscore_ops = {
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 15cf80b46322..1aef5c4d27c6 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -398,7 +398,7 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
return 0;
}
-static int ls_scfg_msi_remove(struct platform_device *pdev)
+static void ls_scfg_msi_remove(struct platform_device *pdev)
{
struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
int i;
@@ -410,17 +410,15 @@ static int ls_scfg_msi_remove(struct platform_device *pdev)
irq_domain_remove(msi_data->parent);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static struct platform_driver ls_scfg_msi_driver = {
.driver = {
- .name = "ls-scfg-msi",
- .of_match_table = ls_scfg_msi_id,
+ .name = "ls-scfg-msi",
+ .of_match_table = ls_scfg_msi_id,
},
- .probe = ls_scfg_msi_probe,
- .remove = ls_scfg_msi_remove,
+ .probe = ls_scfg_msi_probe,
+ .remove_new = ls_scfg_msi_remove,
};
module_platform_driver(ls_scfg_msi_driver);
diff --git a/drivers/irqchip/irq-madera.c b/drivers/irqchip/irq-madera.c
index 3eb1f8cdf674..acceb6e7fa95 100644
--- a/drivers/irqchip/irq-madera.c
+++ b/drivers/irqchip/irq-madera.c
@@ -222,7 +222,7 @@ static int madera_irq_probe(struct platform_device *pdev)
return 0;
}
-static int madera_irq_remove(struct platform_device *pdev)
+static void madera_irq_remove(struct platform_device *pdev)
{
struct madera *madera = dev_get_drvdata(pdev->dev.parent);
@@ -232,13 +232,11 @@ static int madera_irq_remove(struct platform_device *pdev)
*/
madera->irq_dev = NULL;
regmap_del_irq_chip(madera->irq, madera->irq_data);
-
- return 0;
}
static struct platform_driver madera_irq_driver = {
- .probe = &madera_irq_probe,
- .remove = &madera_irq_remove,
+ .probe = madera_irq_probe,
+ .remove_new = madera_irq_remove,
.driver = {
.name = "madera-irq",
.pm = &madera_irq_pm_ops,
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 5101a3fb11df..58881d313979 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -235,22 +235,17 @@ static const struct irq_domain_ops mbigen_domain_ops = {
static int mbigen_of_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
- struct device *parent;
struct platform_device *child;
struct irq_domain *domain;
struct device_node *np;
u32 num_pins;
int ret = 0;
- parent = bus_get_dev_root(&platform_bus_type);
- if (!parent)
- return -ENODEV;
-
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_property_read_bool(np, "interrupt-controller"))
continue;
- child = of_platform_device_create(np, NULL, parent);
+ child = of_platform_device_create(np, NULL, NULL);
if (!child) {
ret = -ENOMEM;
break;
@@ -273,7 +268,6 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
}
}
- put_device(parent);
if (ret)
of_node_put(np);
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index f88df39f4129..9a1791908598 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -154,6 +154,10 @@ static const struct meson_gpio_irq_params c3_params = {
INIT_MESON_S4_COMMON_DATA(55)
};
+static const struct meson_gpio_irq_params t7_params = {
+ INIT_MESON_S4_COMMON_DATA(157)
+};
+
static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
@@ -165,6 +169,7 @@ static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
{ .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params },
{ .compatible = "amlogic,c3-gpio-intc", .data = &c3_params },
+ { .compatible = "amlogic,t7-gpio-intc", .data = &t7_params },
{ }
};
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index ef3d3646ccc2..d17d9c0e2880 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -167,14 +167,12 @@ static int mvebu_pic_probe(struct platform_device *pdev)
return 0;
}
-static int mvebu_pic_remove(struct platform_device *pdev)
+static void mvebu_pic_remove(struct platform_device *pdev)
{
struct mvebu_pic *pic = platform_get_drvdata(pdev);
on_each_cpu(mvebu_pic_disable_percpu_irq, pic, 1);
irq_domain_remove(pic->domain);
-
- return 0;
}
static const struct of_device_id mvebu_pic_of_match[] = {
@@ -184,11 +182,11 @@ static const struct of_device_id mvebu_pic_of_match[] = {
MODULE_DEVICE_TABLE(of, mvebu_pic_of_match);
static struct platform_driver mvebu_pic_driver = {
- .probe = mvebu_pic_probe,
- .remove = mvebu_pic_remove,
+ .probe = mvebu_pic_probe,
+ .remove_new = mvebu_pic_remove,
.driver = {
- .name = "mvebu-pic",
- .of_match_table = mvebu_pic_of_match,
+ .name = "mvebu-pic",
+ .of_match_table = mvebu_pic_of_match,
},
};
module_platform_driver(mvebu_pic_driver);
diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c
index 0f64ecb9b1f4..060eb000e9d3 100644
--- a/drivers/irqchip/irq-pruss-intc.c
+++ b/drivers/irqchip/irq-pruss-intc.c
@@ -599,7 +599,7 @@ fail_irq:
return ret;
}
-static int pruss_intc_remove(struct platform_device *pdev)
+static void pruss_intc_remove(struct platform_device *pdev)
{
struct pruss_intc *intc = platform_get_drvdata(pdev);
u8 max_system_events = intc->soc_config->num_system_events;
@@ -616,8 +616,6 @@ static int pruss_intc_remove(struct platform_device *pdev)
irq_dispose_mapping(irq_find_mapping(intc->domain, hwirq));
irq_domain_remove(intc->domain);
-
- return 0;
}
static const struct pruss_intc_match_data pruss_intc_data = {
@@ -645,12 +643,12 @@ MODULE_DEVICE_TABLE(of, pruss_intc_of_match);
static struct platform_driver pruss_intc_driver = {
.driver = {
- .name = "pruss-intc",
- .of_match_table = pruss_intc_of_match,
- .suppress_bind_attrs = true,
+ .name = "pruss-intc",
+ .of_match_table = pruss_intc_of_match,
+ .suppress_bind_attrs = true,
},
- .probe = pruss_intc_probe,
- .remove = pruss_intc_remove,
+ .probe = pruss_intc_probe,
+ .remove_new = pruss_intc_remove,
};
module_platform_driver(pruss_intc_driver);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index fa19585f3dee..9ad37237ba95 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -561,14 +561,13 @@ err0:
return ret;
}
-static int intc_irqpin_remove(struct platform_device *pdev)
+static void intc_irqpin_remove(struct platform_device *pdev)
{
struct intc_irqpin_priv *p = platform_get_drvdata(pdev);
irq_domain_remove(p->irq_domain);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int __maybe_unused intc_irqpin_suspend(struct device *dev)
@@ -585,11 +584,11 @@ static SIMPLE_DEV_PM_OPS(intc_irqpin_pm_ops, intc_irqpin_suspend, NULL);
static struct platform_driver intc_irqpin_device_driver = {
.probe = intc_irqpin_probe,
- .remove = intc_irqpin_remove,
+ .remove_new = intc_irqpin_remove,
.driver = {
- .name = "renesas_intc_irqpin",
- .of_match_table = intc_irqpin_dt_ids,
- .pm = &intc_irqpin_pm_ops,
+ .name = "renesas_intc_irqpin",
+ .of_match_table = intc_irqpin_dt_ids,
+ .pm = &intc_irqpin_pm_ops,
}
};
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 49b446b396f9..76026e0b8e20 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -218,14 +218,13 @@ err_runtime_pm_disable:
return ret;
}
-static int irqc_remove(struct platform_device *pdev)
+static void irqc_remove(struct platform_device *pdev)
{
struct irqc_priv *p = platform_get_drvdata(pdev);
irq_domain_remove(p->irq_domain);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int __maybe_unused irqc_suspend(struct device *dev)
@@ -248,11 +247,11 @@ MODULE_DEVICE_TABLE(of, irqc_dt_ids);
static struct platform_driver irqc_device_driver = {
.probe = irqc_probe,
- .remove = irqc_remove,
+ .remove_new = irqc_remove,
.driver = {
- .name = "renesas_irqc",
+ .name = "renesas_irqc",
.of_match_table = irqc_dt_ids,
- .pm = &irqc_pm_ops,
+ .pm = &irqc_pm_ops,
}
};
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
index e4c99c2e0373..f05afe82db4d 100644
--- a/drivers/irqchip/irq-renesas-rza1.c
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -244,12 +244,11 @@ out_put_node:
return ret;
}
-static int rza1_irqc_remove(struct platform_device *pdev)
+static void rza1_irqc_remove(struct platform_device *pdev)
{
struct rza1_irqc_priv *priv = platform_get_drvdata(pdev);
irq_domain_remove(priv->irq_domain);
- return 0;
}
static const struct of_device_id rza1_irqc_dt_ids[] = {
@@ -260,9 +259,9 @@ MODULE_DEVICE_TABLE(of, rza1_irqc_dt_ids);
static struct platform_driver rza1_irqc_device_driver = {
.probe = rza1_irqc_probe,
- .remove = rza1_irqc_remove,
+ .remove_new = rza1_irqc_remove,
.driver = {
- .name = "renesas_rza1_irqc",
+ .name = "renesas_rza1_irqc",
.of_match_table = rza1_irqc_dt_ids,
}
};
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index e8d01b14ccdd..f87aeab460eb 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -17,17 +17,29 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/smp.h>
+#include <linux/soc/andes/irq.h>
+
+#include <asm/hwcap.h>
static struct irq_domain *intc_domain;
+static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG;
+static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG;
+static unsigned int riscv_intc_custom_nr_irqs __ro_after_init;
static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
{
unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
- if (unlikely(cause >= BITS_PER_LONG))
- panic("unexpected interrupt cause");
+ if (generic_handle_domain_irq(intc_domain, cause))
+ pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause);
+}
+
+static asmlinkage void riscv_intc_aia_irq(struct pt_regs *regs)
+{
+ unsigned long topi;
- generic_handle_domain_irq(intc_domain, cause);
+ while ((topi = csr_read(CSR_TOPI)))
+ generic_handle_domain_irq(intc_domain, topi >> TOPI_IID_SHIFT);
}
/*
@@ -39,12 +51,43 @@ static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
static void riscv_intc_irq_mask(struct irq_data *d)
{
- csr_clear(CSR_IE, BIT(d->hwirq));
+ if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG)
+ csr_clear(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG));
+ else
+ csr_clear(CSR_IE, BIT(d->hwirq));
}
static void riscv_intc_irq_unmask(struct irq_data *d)
{
- csr_set(CSR_IE, BIT(d->hwirq));
+ if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG)
+ csr_set(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG));
+ else
+ csr_set(CSR_IE, BIT(d->hwirq));
+}
+
+static void andes_intc_irq_mask(struct irq_data *d)
+{
+ /*
+ * Andes specific S-mode local interrupt causes (hwirq)
+ * are defined as (256 + n) and controlled by n-th bit
+ * of SLIE.
+ */
+ unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
+
+ if (d->hwirq < ANDES_SLI_CAUSE_BASE)
+ csr_clear(CSR_IE, mask);
+ else
+ csr_clear(ANDES_CSR_SLIE, mask);
+}
+
+static void andes_intc_irq_unmask(struct irq_data *d)
+{
+ unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
+
+ if (d->hwirq < ANDES_SLI_CAUSE_BASE)
+ csr_set(CSR_IE, mask);
+ else
+ csr_set(ANDES_CSR_SLIE, mask);
}
static void riscv_intc_irq_eoi(struct irq_data *d)
@@ -70,12 +113,21 @@ static struct irq_chip riscv_intc_chip = {
.irq_eoi = riscv_intc_irq_eoi,
};
+static struct irq_chip andes_intc_chip = {
+ .name = "RISC-V INTC",
+ .irq_mask = andes_intc_irq_mask,
+ .irq_unmask = andes_intc_irq_unmask,
+ .irq_eoi = riscv_intc_irq_eoi,
+};
+
static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
+ struct irq_chip *chip = d->host_data;
+
irq_set_percpu_devid(irq);
- irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
- handle_percpu_devid_irq, NULL, NULL);
+ irq_domain_set_info(d, irq, hwirq, chip, NULL, handle_percpu_devid_irq,
+ NULL, NULL);
return 0;
}
@@ -93,6 +145,14 @@ static int riscv_intc_domain_alloc(struct irq_domain *domain,
if (ret)
return ret;
+ /*
+ * Only allow hwirq for which we have corresponding standard or
+ * custom interrupt enable register.
+ */
+ if ((hwirq >= riscv_intc_nr_irqs && hwirq < riscv_intc_custom_base) ||
+ (hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs))
+ return -EINVAL;
+
for (i = 0; i < nr_irqs; i++) {
ret = riscv_intc_domain_map(domain, virq + i, hwirq + i);
if (ret)
@@ -113,18 +173,20 @@ static struct fwnode_handle *riscv_intc_hwnode(void)
return intc_domain->fwnode;
}
-static int __init riscv_intc_init_common(struct fwnode_handle *fn)
+static int __init riscv_intc_init_common(struct fwnode_handle *fn, struct irq_chip *chip)
{
int rc;
- intc_domain = irq_domain_create_linear(fn, BITS_PER_LONG,
- &riscv_intc_domain_ops, NULL);
+ intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, chip);
if (!intc_domain) {
pr_err("unable to add IRQ domain\n");
return -ENXIO;
}
- rc = set_handle_irq(&riscv_intc_irq);
+ if (riscv_isa_extension_available(NULL, SxAIA))
+ rc = set_handle_irq(&riscv_intc_aia_irq);
+ else
+ rc = set_handle_irq(&riscv_intc_irq);
if (rc) {
pr_err("failed to set irq handler\n");
return rc;
@@ -132,7 +194,11 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn)
riscv_set_intc_hwnode_fn(riscv_intc_hwnode);
- pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
+ pr_info("%d local interrupts mapped%s\n",
+ riscv_isa_extension_available(NULL, SxAIA) ? 64 : riscv_intc_nr_irqs,
+ riscv_isa_extension_available(NULL, SxAIA) ? " using AIA" : "");
+ if (riscv_intc_custom_nr_irqs)
+ pr_info("%d custom local interrupts mapped\n", riscv_intc_custom_nr_irqs);
return 0;
}
@@ -140,8 +206,9 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn)
static int __init riscv_intc_init(struct device_node *node,
struct device_node *parent)
{
- int rc;
+ struct irq_chip *chip = &riscv_intc_chip;
unsigned long hartid;
+ int rc;
rc = riscv_of_parent_hartid(node, &hartid);
if (rc < 0) {
@@ -166,10 +233,17 @@ static int __init riscv_intc_init(struct device_node *node,
return 0;
}
- return riscv_intc_init_common(of_node_to_fwnode(node));
+ if (of_device_is_compatible(node, "andestech,cpu-intc")) {
+ riscv_intc_custom_base = ANDES_SLI_CAUSE_BASE;
+ riscv_intc_custom_nr_irqs = ANDES_RV_IRQ_LAST;
+ chip = &andes_intc_chip;
+ }
+
+ return riscv_intc_init_common(of_node_to_fwnode(node), chip);
}
IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
+IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
#ifdef CONFIG_ACPI
@@ -196,7 +270,7 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
return -ENOMEM;
}
- return riscv_intc_init_common(fn);
+ return riscv_intc_init_common(fn, &riscv_intc_chip);
}
IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 5b7bc4fd9517..f3d4cb9e34f7 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -3,7 +3,6 @@
* Copyright (C) 2017 SiFive
* Copyright (C) 2018 Christoph Hellwig
*/
-#define pr_fmt(fmt) "plic: " fmt
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -64,6 +63,7 @@
#define PLIC_QUIRK_EDGE_INTERRUPT 0
struct plic_priv {
+ struct device *dev;
struct cpumask lmask;
struct irq_domain *irqdomain;
void __iomem *regs;
@@ -103,9 +103,11 @@ static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
{
- raw_spin_lock(&handler->enable_lock);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&handler->enable_lock, flags);
__plic_toggle(handler->enable_base, hwirq, enable);
- raw_spin_unlock(&handler->enable_lock);
+ raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
}
static inline void plic_irq_toggle(const struct cpumask *mask,
@@ -148,7 +150,13 @@ static void plic_irq_eoi(struct irq_data *d)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ if (unlikely(irqd_irq_disabled(d))) {
+ plic_toggle(handler, d->hwirq, 1);
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ plic_toggle(handler, d->hwirq, 0);
+ } else {
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ }
}
#ifdef CONFIG_SMP
@@ -236,6 +244,7 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type)
static int plic_irq_suspend(void)
{
unsigned int i, cpu;
+ unsigned long flags;
u32 __iomem *reg;
struct plic_priv *priv;
@@ -253,12 +262,12 @@ static int plic_irq_suspend(void)
if (!handler->present)
continue;
- raw_spin_lock(&handler->enable_lock);
+ raw_spin_lock_irqsave(&handler->enable_lock, flags);
for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
reg = handler->enable_base + i * sizeof(u32);
handler->enable_save[i] = readl(reg);
}
- raw_spin_unlock(&handler->enable_lock);
+ raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
}
return 0;
@@ -267,6 +276,7 @@ static int plic_irq_suspend(void)
static void plic_irq_resume(void)
{
unsigned int i, index, cpu;
+ unsigned long flags;
u32 __iomem *reg;
struct plic_priv *priv;
@@ -284,12 +294,12 @@ static void plic_irq_resume(void)
if (!handler->present)
continue;
- raw_spin_lock(&handler->enable_lock);
+ raw_spin_lock_irqsave(&handler->enable_lock, flags);
for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
reg = handler->enable_base + i * sizeof(u32);
writel(handler->enable_save[i], reg);
}
- raw_spin_unlock(&handler->enable_lock);
+ raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
}
}
@@ -370,9 +380,10 @@ static void plic_handle_irq(struct irq_desc *desc)
while ((hwirq = readl(claim))) {
int err = generic_handle_domain_irq(handler->priv->irqdomain,
hwirq);
- if (unlikely(err))
- pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
- hwirq);
+ if (unlikely(err)) {
+ dev_warn_ratelimited(handler->priv->dev,
+ "can't find mapping for hwirq %lu\n", hwirq);
+ }
}
chained_irq_exit(chip, desc);
@@ -400,63 +411,122 @@ static int plic_starting_cpu(unsigned int cpu)
enable_percpu_irq(plic_parent_irq,
irq_get_trigger_type(plic_parent_irq));
else
- pr_warn("cpu%d: parent irq not available\n", cpu);
+ dev_warn(handler->priv->dev, "cpu%d: parent irq not available\n", cpu);
plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
return 0;
}
-static int __init __plic_init(struct device_node *node,
- struct device_node *parent,
- unsigned long plic_quirks)
+static const struct of_device_id plic_match[] = {
+ { .compatible = "sifive,plic-1.0.0" },
+ { .compatible = "riscv,plic0" },
+ { .compatible = "andestech,nceplic100",
+ .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
+ { .compatible = "thead,c900-plic",
+ .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
+ {}
+};
+
+static int plic_parse_nr_irqs_and_contexts(struct platform_device *pdev,
+ u32 *nr_irqs, u32 *nr_contexts)
{
- int error = 0, nr_contexts, nr_handlers = 0, i;
- u32 nr_irqs;
- struct plic_priv *priv;
- struct plic_handler *handler;
- unsigned int cpu;
+ struct device *dev = &pdev->dev;
+ int rc;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(dev->fwnode))
+ return -EINVAL;
- priv->plic_quirks = plic_quirks;
+ rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,ndev", nr_irqs);
+ if (rc) {
+ dev_err(dev, "riscv,ndev property not available\n");
+ return rc;
+ }
- priv->regs = of_iomap(node, 0);
- if (WARN_ON(!priv->regs)) {
- error = -EIO;
- goto out_free_priv;
+ *nr_contexts = of_irq_count(to_of_node(dev->fwnode));
+ if (WARN_ON(!(*nr_contexts))) {
+ dev_err(dev, "no PLIC context available\n");
+ return -EINVAL;
}
- error = -EINVAL;
- of_property_read_u32(node, "riscv,ndev", &nr_irqs);
- if (WARN_ON(!nr_irqs))
- goto out_iounmap;
+ return 0;
+}
- priv->nr_irqs = nr_irqs;
+static int plic_parse_context_parent(struct platform_device *pdev, u32 context,
+ u32 *parent_hwirq, int *parent_cpu)
+{
+ struct device *dev = &pdev->dev;
+ struct of_phandle_args parent;
+ unsigned long hartid;
+ int rc;
- priv->prio_save = bitmap_alloc(nr_irqs, GFP_KERNEL);
- if (!priv->prio_save)
- goto out_free_priority_reg;
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(dev->fwnode))
+ return -EINVAL;
- nr_contexts = of_irq_count(node);
- if (WARN_ON(!nr_contexts))
- goto out_free_priority_reg;
+ rc = of_irq_parse_one(to_of_node(dev->fwnode), context, &parent);
+ if (rc)
+ return rc;
- error = -ENOMEM;
- priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
- &plic_irqdomain_ops, priv);
- if (WARN_ON(!priv->irqdomain))
- goto out_free_priority_reg;
+ rc = riscv_of_parent_hartid(parent.np, &hartid);
+ if (rc)
+ return rc;
- for (i = 0; i < nr_contexts; i++) {
- struct of_phandle_args parent;
- irq_hw_number_t hwirq;
- int cpu;
- unsigned long hartid;
+ *parent_hwirq = parent.args[0];
+ *parent_cpu = riscv_hartid_to_cpuid(hartid);
+ return 0;
+}
+
+static int plic_probe(struct platform_device *pdev)
+{
+ int error = 0, nr_contexts, nr_handlers = 0, cpu, i;
+ struct device *dev = &pdev->dev;
+ unsigned long plic_quirks = 0;
+ struct plic_handler *handler;
+ u32 nr_irqs, parent_hwirq;
+ struct irq_domain *domain;
+ struct plic_priv *priv;
+ irq_hw_number_t hwirq;
+ bool cpuhp_setup;
- if (of_irq_parse_one(node, i, &parent)) {
- pr_err("failed to parse parent for context %d.\n", i);
+ if (is_of_node(dev->fwnode)) {
+ const struct of_device_id *id;
+
+ id = of_match_node(plic_match, to_of_node(dev->fwnode));
+ if (id)
+ plic_quirks = (unsigned long)id->data;
+ }
+
+ error = plic_parse_nr_irqs_and_contexts(pdev, &nr_irqs, &nr_contexts);
+ if (error)
+ return error;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->plic_quirks = plic_quirks;
+ priv->nr_irqs = nr_irqs;
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(!priv->regs))
+ return -EIO;
+
+ priv->prio_save = devm_bitmap_zalloc(dev, nr_irqs, GFP_KERNEL);
+ if (!priv->prio_save)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_contexts; i++) {
+ error = plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu);
+ if (error) {
+ dev_warn(dev, "hwirq for context%d not found\n", i);
continue;
}
@@ -464,7 +534,7 @@ static int __init __plic_init(struct device_node *node,
* Skip contexts other than external interrupts for our
* privilege level.
*/
- if (parent.args[0] != RV_IRQ_EXT) {
+ if (parent_hwirq != RV_IRQ_EXT) {
/* Disable S-mode enable bits if running in M-mode. */
if (IS_ENABLED(CONFIG_RISCV_M_MODE)) {
void __iomem *enable_base = priv->regs +
@@ -477,24 +547,17 @@ static int __init __plic_init(struct device_node *node,
continue;
}
- error = riscv_of_parent_hartid(parent.np, &hartid);
- if (error < 0) {
- pr_warn("failed to parse hart ID for context %d.\n", i);
- continue;
- }
-
- cpu = riscv_hartid_to_cpuid(hartid);
if (cpu < 0) {
- pr_warn("Invalid cpuid for context %d\n", i);
+ dev_warn(dev, "Invalid cpuid for context %d\n", i);
continue;
}
/* Find parent domain and register chained handler */
- if (!plic_parent_irq && irq_find_host(parent.np)) {
- plic_parent_irq = irq_of_parse_and_map(node, i);
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
+ if (!plic_parent_irq && domain) {
+ plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
if (plic_parent_irq)
- irq_set_chained_handler(plic_parent_irq,
- plic_handle_irq);
+ irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
}
/*
@@ -504,7 +567,7 @@ static int __init __plic_init(struct device_node *node,
*/
handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) {
- pr_warn("handler already present for context %d.\n", i);
+ dev_warn(dev, "handler already present for context %d.\n", i);
plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
goto done;
}
@@ -518,10 +581,10 @@ static int __init __plic_init(struct device_node *node,
i * CONTEXT_ENABLE_SIZE;
handler->priv = priv;
- handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32),
- sizeof(*handler->enable_save), GFP_KERNEL);
+ handler->enable_save = devm_kcalloc(dev, DIV_ROUND_UP(nr_irqs, 32),
+ sizeof(*handler->enable_save), GFP_KERNEL);
if (!handler->enable_save)
- goto out_free_enable_reg;
+ goto fail_cleanup_contexts;
done:
for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
plic_toggle(handler, hwirq, 0);
@@ -531,52 +594,60 @@ done:
nr_handlers++;
}
+ priv->irqdomain = irq_domain_add_linear(to_of_node(dev->fwnode), nr_irqs + 1,
+ &plic_irqdomain_ops, priv);
+ if (WARN_ON(!priv->irqdomain))
+ goto fail_cleanup_contexts;
+
/*
* We can have multiple PLIC instances so setup cpuhp state
- * and register syscore operations only when context handler
- * for current/boot CPU is present.
+ * and register syscore operations only once after context
+ * handlers of all online CPUs are initialized.
*/
- handler = this_cpu_ptr(&plic_handlers);
- if (handler->present && !plic_cpuhp_setup_done) {
- cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
- "irqchip/sifive/plic:starting",
- plic_starting_cpu, plic_dying_cpu);
- register_syscore_ops(&plic_irq_syscore_ops);
- plic_cpuhp_setup_done = true;
+ if (!plic_cpuhp_setup_done) {
+ cpuhp_setup = true;
+ for_each_online_cpu(cpu) {
+ handler = per_cpu_ptr(&plic_handlers, cpu);
+ if (!handler->present) {
+ cpuhp_setup = false;
+ break;
+ }
+ }
+ if (cpuhp_setup) {
+ cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ "irqchip/sifive/plic:starting",
+ plic_starting_cpu, plic_dying_cpu);
+ register_syscore_ops(&plic_irq_syscore_ops);
+ plic_cpuhp_setup_done = true;
+ }
}
- pr_info("%pOFP: mapped %d interrupts with %d handlers for"
- " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
+ dev_info(dev, "mapped %d interrupts with %d handlers for %d contexts.\n",
+ nr_irqs, nr_handlers, nr_contexts);
return 0;
-out_free_enable_reg:
- for_each_cpu(cpu, cpu_present_mask) {
+fail_cleanup_contexts:
+ for (i = 0; i < nr_contexts; i++) {
+ if (plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu))
+ continue;
+ if (parent_hwirq != RV_IRQ_EXT || cpu < 0)
+ continue;
+
handler = per_cpu_ptr(&plic_handlers, cpu);
- kfree(handler->enable_save);
+ handler->present = false;
+ handler->hart_base = NULL;
+ handler->enable_base = NULL;
+ handler->enable_save = NULL;
+ handler->priv = NULL;
}
-out_free_priority_reg:
- kfree(priv->prio_save);
-out_iounmap:
- iounmap(priv->regs);
-out_free_priv:
- kfree(priv);
- return error;
+ return -ENOMEM;
}
-static int __init plic_init(struct device_node *node,
- struct device_node *parent)
-{
- return __plic_init(node, parent, 0);
-}
-
-IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
-IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
-
-static int __init plic_edge_init(struct device_node *node,
- struct device_node *parent)
-{
- return __plic_init(node, parent, BIT(PLIC_QUIRK_EDGE_INTERRUPT));
-}
-
-IRQCHIP_DECLARE(andestech_nceplic100, "andestech,nceplic100", plic_edge_init);
-IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_edge_init);
+static struct platform_driver plic_driver = {
+ .driver = {
+ .name = "riscv-plic",
+ .of_match_table = plic_match,
+ },
+ .probe = plic_probe,
+};
+builtin_platform_driver(plic_driver);
diff --git a/drivers/irqchip/irq-starfive-jh8100-intc.c b/drivers/irqchip/irq-starfive-jh8100-intc.c
new file mode 100644
index 000000000000..0f5837176e53
--- /dev/null
+++ b/drivers/irqchip/irq-starfive-jh8100-intc.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive JH8100 External Interrupt Controller driver
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ *
+ * Author: Changhuang Liang <changhuang.liang@starfivetech.com>
+ */
+
+#define pr_fmt(fmt) "irq-starfive-jh8100: " fmt
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define STARFIVE_INTC_SRC0_CLEAR 0x10
+#define STARFIVE_INTC_SRC0_MASK 0x14
+#define STARFIVE_INTC_SRC0_INT 0x1c
+
+#define STARFIVE_INTC_SRC_IRQ_NUM 32
+
+struct starfive_irq_chip {
+ void __iomem *base;
+ struct irq_domain *domain;
+ raw_spinlock_t lock;
+};
+
+static void starfive_intc_bit_set(struct starfive_irq_chip *irqc,
+ u32 reg, u32 bit_mask)
+{
+ u32 value;
+
+ value = ioread32(irqc->base + reg);
+ value |= bit_mask;
+ iowrite32(value, irqc->base + reg);
+}
+
+static void starfive_intc_bit_clear(struct starfive_irq_chip *irqc,
+ u32 reg, u32 bit_mask)
+{
+ u32 value;
+
+ value = ioread32(irqc->base + reg);
+ value &= ~bit_mask;
+ iowrite32(value, irqc->base + reg);
+}
+
+static void starfive_intc_unmask(struct irq_data *d)
+{
+ struct starfive_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+
+ raw_spin_lock(&irqc->lock);
+ starfive_intc_bit_clear(irqc, STARFIVE_INTC_SRC0_MASK, BIT(d->hwirq));
+ raw_spin_unlock(&irqc->lock);
+}
+
+static void starfive_intc_mask(struct irq_data *d)
+{
+ struct starfive_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+
+ raw_spin_lock(&irqc->lock);
+ starfive_intc_bit_set(irqc, STARFIVE_INTC_SRC0_MASK, BIT(d->hwirq));
+ raw_spin_unlock(&irqc->lock);
+}
+
+static struct irq_chip intc_dev = {
+ .name = "StarFive JH8100 INTC",
+ .irq_unmask = starfive_intc_unmask,
+ .irq_mask = starfive_intc_mask,
+};
+
+static int starfive_intc_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_domain_set_info(d, irq, hwirq, &intc_dev, d->host_data,
+ handle_level_irq, NULL, NULL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops starfive_intc_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = starfive_intc_map,
+};
+
+static void starfive_intc_irq_handler(struct irq_desc *desc)
+{
+ struct starfive_irq_chip *irqc = irq_data_get_irq_handler_data(&desc->irq_data);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long value;
+ int hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ value = ioread32(irqc->base + STARFIVE_INTC_SRC0_INT);
+ while (value) {
+ hwirq = ffs(value) - 1;
+
+ generic_handle_domain_irq(irqc->domain, hwirq);
+
+ starfive_intc_bit_set(irqc, STARFIVE_INTC_SRC0_CLEAR, BIT(hwirq));
+ starfive_intc_bit_clear(irqc, STARFIVE_INTC_SRC0_CLEAR, BIT(hwirq));
+
+ __clear_bit(hwirq, &value);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int __init starfive_intc_init(struct device_node *intc,
+ struct device_node *parent)
+{
+ struct starfive_irq_chip *irqc;
+ struct reset_control *rst;
+ struct clk *clk;
+ int parent_irq;
+ int ret;
+
+ irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+
+ irqc->base = of_iomap(intc, 0);
+ if (!irqc->base) {
+ pr_err("Unable to map registers\n");
+ ret = -ENXIO;
+ goto err_free;
+ }
+
+ rst = of_reset_control_get_exclusive(intc, NULL);
+ if (IS_ERR(rst)) {
+ pr_err("Unable to get reset control %pe\n", rst);
+ ret = PTR_ERR(rst);
+ goto err_unmap;
+ }
+
+ clk = of_clk_get(intc, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Unable to get clock %pe\n", clk);
+ ret = PTR_ERR(clk);
+ goto err_reset_put;
+ }
+
+ ret = reset_control_deassert(rst);
+ if (ret)
+ goto err_clk_put;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto err_reset_assert;
+
+ raw_spin_lock_init(&irqc->lock);
+
+ irqc->domain = irq_domain_add_linear(intc, STARFIVE_INTC_SRC_IRQ_NUM,
+ &starfive_intc_domain_ops, irqc);
+ if (!irqc->domain) {
+ pr_err("Unable to create IRQ domain\n");
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ parent_irq = of_irq_get(intc, 0);
+ if (parent_irq < 0) {
+ pr_err("Failed to get main IRQ: %d\n", parent_irq);
+ ret = parent_irq;
+ goto err_remove_domain;
+ }
+
+ irq_set_chained_handler_and_data(parent_irq, starfive_intc_irq_handler,
+ irqc);
+
+ pr_info("Interrupt controller register, nr_irqs %d\n",
+ STARFIVE_INTC_SRC_IRQ_NUM);
+
+ return 0;
+
+err_remove_domain:
+ irq_domain_remove(irqc->domain);
+err_clk_disable:
+ clk_disable_unprepare(clk);
+err_reset_assert:
+ reset_control_assert(rst);
+err_clk_put:
+ clk_put(clk);
+err_reset_put:
+ reset_control_put(rst);
+err_unmap:
+ iounmap(irqc->base);
+err_free:
+ kfree(irqc);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(starfive_intc)
+IRQCHIP_MATCH("starfive,jh8100-intc", starfive_intc_init)
+IRQCHIP_PLATFORM_DRIVER_END(starfive_intc)
+
+MODULE_DESCRIPTION("StarFive JH8100 External Interrupt Controller");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Changhuang Liang <changhuang.liang@starfivetech.com>");
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 971240e2e31b..26a5193d0ae4 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -898,10 +898,9 @@ static void stm32_exti_remove_irq(void *data)
irq_domain_remove(domain);
}
-static int stm32_exti_remove(struct platform_device *pdev)
+static void stm32_exti_remove(struct platform_device *pdev)
{
stm32_exti_h_syscore_deinit();
- return 0;
}
static int stm32_exti_probe(struct platform_device *pdev)
@@ -991,10 +990,10 @@ MODULE_DEVICE_TABLE(of, stm32_exti_ids);
static struct platform_driver stm32_exti_driver = {
.probe = stm32_exti_probe,
- .remove = stm32_exti_remove,
+ .remove_new = stm32_exti_remove,
.driver = {
- .name = "stm32_exti",
- .of_match_table = stm32_exti_ids,
+ .name = "stm32_exti",
+ .of_match_table = stm32_exti_ids,
},
};
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index b2d61d4f6fe6..57f610dab6b8 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -139,13 +139,11 @@ static int ts4800_ic_probe(struct platform_device *pdev)
return 0;
}
-static int ts4800_ic_remove(struct platform_device *pdev)
+static void ts4800_ic_remove(struct platform_device *pdev)
{
struct ts4800_irq_data *data = platform_get_drvdata(pdev);
irq_domain_remove(data->domain);
-
- return 0;
}
static const struct of_device_id ts4800_ic_of_match[] = {
@@ -155,11 +153,11 @@ static const struct of_device_id ts4800_ic_of_match[] = {
MODULE_DEVICE_TABLE(of, ts4800_ic_of_match);
static struct platform_driver ts4800_ic_driver = {
- .probe = ts4800_ic_probe,
- .remove = ts4800_ic_remove,
+ .probe = ts4800_ic_probe,
+ .remove_new = ts4800_ic_remove,
.driver = {
- .name = "ts4800-irqc",
- .of_match_table = ts4800_ic_of_match,
+ .name = "ts4800-irqc",
+ .of_match_table = ts4800_ic_of_match,
},
};
module_platform_driver(ts4800_ic_driver);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 9e3d5561e04e..ea93e7236c4a 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -47,9 +47,8 @@
/**
* struct vic_device - VIC PM device
- * @parent_irq: The parent IRQ number of the VIC if cascaded, or 0.
- * @irq: The IRQ number for the base of the VIC.
* @base: The register base for the VIC.
+ * @irq: The IRQ number for the base of the VIC.
* @valid_sources: A bitmask of valid interrupts
* @resume_sources: A bitmask of interrupts for resume.
* @resume_irqs: The IRQs enabled for resume.
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 6e80d7bd3c4d..3ed257334562 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -49,7 +49,9 @@ MODULE_LICENSE("GPL");
/* -------- driver information -------------------------------------- */
static DEFINE_MUTEX(capi_mutex);
-static struct class *capi_class;
+static const struct class capi_class = {
+ .name = "capi",
+};
static int capi_major = 68; /* allocated */
module_param_named(major, capi_major, uint, 0);
@@ -1393,18 +1395,19 @@ static int __init capi_init(void)
kcapi_exit();
return major_ret;
}
- capi_class = class_create("capi");
- if (IS_ERR(capi_class)) {
+
+ ret = class_register(&capi_class);
+ if (ret) {
unregister_chrdev(capi_major, "capi20");
kcapi_exit();
- return PTR_ERR(capi_class);
+ return ret;
}
- device_create(capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi20");
+ device_create(&capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi20");
if (capinc_tty_init() < 0) {
- device_destroy(capi_class, MKDEV(capi_major, 0));
- class_destroy(capi_class);
+ device_destroy(&capi_class, MKDEV(capi_major, 0));
+ class_unregister(&capi_class);
unregister_chrdev(capi_major, "capi20");
kcapi_exit();
return -ENOMEM;
@@ -1427,8 +1430,8 @@ static void __exit capi_exit(void)
{
proc_exit();
- device_destroy(capi_class, MKDEV(capi_major, 0));
- class_destroy(capi_class);
+ device_destroy(&capi_class, MKDEV(capi_major, 0));
+ class_unregister(&capi_class);
unregister_chrdev(capi_major, "capi20");
capinc_tty_exit();
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index 09b72f14d4b7..b4ed0bb8ddfb 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -31,7 +31,9 @@ struct dsp_element_entry {
static LIST_HEAD(dsp_elements);
/* sysfs */
-static struct class *elements_class;
+static const struct class elements_class = {
+ .name = "dsp_pipeline",
+};
static ssize_t
attr_show_args(struct device *dev, struct device_attribute *attr, char *buf)
@@ -80,7 +82,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
INIT_LIST_HEAD(&entry->list);
entry->elem = elem;
- entry->dev.class = elements_class;
+ entry->dev.class = &elements_class;
entry->dev.release = mISDN_dsp_dev_release;
dev_set_drvdata(&entry->dev, elem);
dev_set_name(&entry->dev, "%s", elem->name);
@@ -131,9 +133,11 @@ EXPORT_SYMBOL(mISDN_dsp_element_unregister);
int dsp_pipeline_module_init(void)
{
- elements_class = class_create("dsp_pipeline");
- if (IS_ERR(elements_class))
- return PTR_ERR(elements_class);
+ int err;
+
+ err = class_register(&elements_class);
+ if (err)
+ return err;
dsp_hwec_init();
@@ -146,7 +150,7 @@ void dsp_pipeline_module_exit(void)
dsp_hwec_exit();
- class_destroy(elements_class);
+ class_unregister(&elements_class);
list_for_each_entry_safe(entry, n, &dsp_elements, list) {
list_del(&entry->list);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index d721b254e1e4..05e6af88b88c 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -6,6 +6,12 @@ config LEDS_GPIO_REGISTER
As this function is used by arch code it must not be compiled as a
module.
+# This library does not depend on NEW_LEDS and must be independent so it can be
+# selected from other subsystems (specifically backlight).
+config LEDS_EXPRESSWIRE
+ bool
+ depends on GPIOLIB
+
menuconfig NEW_LEDS
bool "LED Support"
help
@@ -186,6 +192,10 @@ config LEDS_EL15203000
To compile this driver as a module, choose M here: the module
will be called leds-el15203000.
+config LEDS_EXPRESSWIRE
+ bool
+ depends on GPIOLIB
+
config LEDS_TURRIS_OMNIA
tristate "LED support for CZ.NIC's Turris Omnia"
depends on LEDS_CLASS_MULTICOLOR
@@ -395,7 +405,7 @@ config LEDS_LP3952
config LEDS_LP50XX
tristate "LED Support for TI LP5036/30/24/18/12/09 LED driver chip"
depends on LEDS_CLASS && REGMAP_I2C
- depends on LEDS_CLASS_MULTICOLOR || !LEDS_CLASS_MULTICOLOR
+ depends on LEDS_CLASS_MULTICOLOR
help
If you say yes here you get support for the Texas Instruments
LP5036, LP5030, LP5024, LP5018, LP5012 and LP5009 LED driver.
@@ -406,7 +416,7 @@ config LEDS_LP50XX
config LEDS_LP55XX_COMMON
tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501"
depends on LEDS_CLASS
- depends on LEDS_CLASS_MULTICOLOR || !LEDS_CLASS_MULTICOLOR
+ depends on LEDS_CLASS_MULTICOLOR
depends on OF
depends on I2C
select FW_LOADER
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index ce07dc295ff0..effdfc6f1e95 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -91,6 +91,9 @@ obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
+# Kinetic ExpressWire Protocol
+obj-$(CONFIG_LEDS_EXPRESSWIRE) += leds-expresswire.o
+
# LED SPI Drivers
obj-$(CONFIG_LEDS_CR0014114) += leds-cr0014114.o
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
index 4e08dbc05709..809b6d98bb3e 100644
--- a/drivers/leds/flash/Kconfig
+++ b/drivers/leds/flash/Kconfig
@@ -23,7 +23,8 @@ config LEDS_AS3645A
config LEDS_KTD2692
tristate "LED support for Kinetic KTD2692 flash LED controller"
depends on OF
- depends on GPIOLIB || COMPILE_TEST
+ depends on GPIOLIB
+ select LEDS_EXPRESSWIRE
help
This option enables support for Kinetic KTD2692 LED flash connected
through ExpressWire interface.
@@ -51,8 +52,8 @@ config LEDS_MAX77693
config LEDS_MT6360
tristate "LED Support for Mediatek MT6360 PMIC"
depends on LEDS_CLASS && OF
- depends on LEDS_CLASS_FLASH || !LEDS_CLASS_FLASH
- depends on LEDS_CLASS_MULTICOLOR || !LEDS_CLASS_MULTICOLOR
+ depends on LEDS_CLASS_FLASH
+ depends on LEDS_CLASS_MULTICOLOR
depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
depends on MFD_MT6360
help
diff --git a/drivers/leds/flash/leds-ktd2692.c b/drivers/leds/flash/leds-ktd2692.c
index 598eee5daa52..7bb0aa2753e3 100644
--- a/drivers/leds/flash/leds-ktd2692.c
+++ b/drivers/leds/flash/leds-ktd2692.c
@@ -6,9 +6,9 @@
* Ingi Kim <ingi2.kim@samsung.com>
*/
-#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
+#include <linux/leds-expresswire.h>
#include <linux/led-class-flash.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -37,22 +37,9 @@
#define KTD2692_REG_FLASH_CURRENT_BASE 0x80
#define KTD2692_REG_MODE_BASE 0xA0
-/* Set bit coding time for expresswire interface */
-#define KTD2692_TIME_RESET_US 700
-#define KTD2692_TIME_DATA_START_TIME_US 10
-#define KTD2692_TIME_HIGH_END_OF_DATA_US 350
-#define KTD2692_TIME_LOW_END_OF_DATA_US 10
-#define KTD2692_TIME_SHORT_BITSET_US 4
-#define KTD2692_TIME_LONG_BITSET_US 12
-
/* KTD2692 default length of name */
#define KTD2692_NAME_LENGTH 20
-enum ktd2692_bitset {
- KTD2692_LOW = 0,
- KTD2692_HIGH,
-};
-
/* Movie / Flash Mode Control */
enum ktd2692_led_mode {
KTD2692_MODE_DISABLE = 0, /* default */
@@ -71,7 +58,19 @@ struct ktd2692_led_config_data {
enum led_brightness max_brightness;
};
+const struct expresswire_timing ktd2692_timing = {
+ .poweroff_us = 700,
+ .data_start_us = 10,
+ .end_of_data_low_us = 10,
+ .end_of_data_high_us = 350,
+ .short_bitset_us = 4,
+ .long_bitset_us = 12
+};
+
struct ktd2692_context {
+ /* Common ExpressWire properties (ctrl GPIO and timing) */
+ struct expresswire_common_props props;
+
/* Related LED Flash class device */
struct led_classdev_flash fled_cdev;
@@ -80,7 +79,6 @@ struct ktd2692_context {
struct regulator *regulator;
struct gpio_desc *aux_gpio;
- struct gpio_desc *ctrl_gpio;
enum ktd2692_led_mode mode;
enum led_brightness torch_brightness;
@@ -92,67 +90,6 @@ static struct ktd2692_context *fled_cdev_to_led(
return container_of(fled_cdev, struct ktd2692_context, fled_cdev);
}
-static void ktd2692_expresswire_start(struct ktd2692_context *led)
-{
- gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH);
- udelay(KTD2692_TIME_DATA_START_TIME_US);
-}
-
-static void ktd2692_expresswire_reset(struct ktd2692_context *led)
-{
- gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW);
- udelay(KTD2692_TIME_RESET_US);
-}
-
-static void ktd2692_expresswire_end(struct ktd2692_context *led)
-{
- gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW);
- udelay(KTD2692_TIME_LOW_END_OF_DATA_US);
- gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH);
- udelay(KTD2692_TIME_HIGH_END_OF_DATA_US);
-}
-
-static void ktd2692_expresswire_set_bit(struct ktd2692_context *led, bool bit)
-{
- /*
- * The Low Bit(0) and High Bit(1) is based on a time detection
- * algorithm between time low and time high
- * Time_(L_LB) : Low time of the Low Bit(0)
- * Time_(H_LB) : High time of the LOW Bit(0)
- * Time_(L_HB) : Low time of the High Bit(1)
- * Time_(H_HB) : High time of the High Bit(1)
- *
- * It can be simplified to:
- * Low Bit(0) : 2 * Time_(H_LB) < Time_(L_LB)
- * High Bit(1) : 2 * Time_(L_HB) < Time_(H_HB)
- * HIGH ___ ____ _.. _________ ___
- * |_________| |_.. |____| |__|
- * LOW <L_LB> <H_LB> <L_HB> <H_HB>
- * [ Low Bit (0) ] [ High Bit(1) ]
- */
- if (bit) {
- gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW);
- udelay(KTD2692_TIME_SHORT_BITSET_US);
- gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH);
- udelay(KTD2692_TIME_LONG_BITSET_US);
- } else {
- gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW);
- udelay(KTD2692_TIME_LONG_BITSET_US);
- gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH);
- udelay(KTD2692_TIME_SHORT_BITSET_US);
- }
-}
-
-static void ktd2692_expresswire_write(struct ktd2692_context *led, u8 value)
-{
- int i;
-
- ktd2692_expresswire_start(led);
- for (i = 7; i >= 0; i--)
- ktd2692_expresswire_set_bit(led, value & BIT(i));
- ktd2692_expresswire_end(led);
-}
-
static int ktd2692_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
@@ -163,14 +100,14 @@ static int ktd2692_led_brightness_set(struct led_classdev *led_cdev,
if (brightness == LED_OFF) {
led->mode = KTD2692_MODE_DISABLE;
- gpiod_direction_output(led->aux_gpio, KTD2692_LOW);
+ gpiod_direction_output(led->aux_gpio, 0);
} else {
- ktd2692_expresswire_write(led, brightness |
+ expresswire_write_u8(&led->props, brightness |
KTD2692_REG_MOVIE_CURRENT_BASE);
led->mode = KTD2692_MODE_MOVIE;
}
- ktd2692_expresswire_write(led, led->mode | KTD2692_REG_MODE_BASE);
+ expresswire_write_u8(&led->props, led->mode | KTD2692_REG_MODE_BASE);
mutex_unlock(&led->lock);
return 0;
@@ -187,17 +124,17 @@ static int ktd2692_led_flash_strobe_set(struct led_classdev_flash *fled_cdev,
if (state) {
flash_tm_reg = GET_TIMEOUT_OFFSET(timeout->val, timeout->step);
- ktd2692_expresswire_write(led, flash_tm_reg
+ expresswire_write_u8(&led->props, flash_tm_reg
| KTD2692_REG_FLASH_TIMEOUT_BASE);
led->mode = KTD2692_MODE_FLASH;
- gpiod_direction_output(led->aux_gpio, KTD2692_HIGH);
+ gpiod_direction_output(led->aux_gpio, 1);
} else {
led->mode = KTD2692_MODE_DISABLE;
- gpiod_direction_output(led->aux_gpio, KTD2692_LOW);
+ gpiod_direction_output(led->aux_gpio, 0);
}
- ktd2692_expresswire_write(led, led->mode | KTD2692_REG_MODE_BASE);
+ expresswire_write_u8(&led->props, led->mode | KTD2692_REG_MODE_BASE);
fled_cdev->led_cdev.brightness = LED_OFF;
led->mode = KTD2692_MODE_DISABLE;
@@ -247,12 +184,12 @@ static void ktd2692_init_flash_timeout(struct led_classdev_flash *fled_cdev,
static void ktd2692_setup(struct ktd2692_context *led)
{
led->mode = KTD2692_MODE_DISABLE;
- ktd2692_expresswire_reset(led);
- gpiod_direction_output(led->aux_gpio, KTD2692_LOW);
+ expresswire_power_off(&led->props);
+ gpiod_direction_output(led->aux_gpio, 0);
- ktd2692_expresswire_write(led, (KTD2692_MM_MIN_CURR_THRESHOLD_SCALE - 1)
+ expresswire_write_u8(&led->props, (KTD2692_MM_MIN_CURR_THRESHOLD_SCALE - 1)
| KTD2692_REG_MM_MIN_CURR_THRESHOLD_BASE);
- ktd2692_expresswire_write(led, KTD2692_FLASH_MODE_CURR_PERCENT(45)
+ expresswire_write_u8(&led->props, KTD2692_FLASH_MODE_CURR_PERCENT(45)
| KTD2692_REG_FLASH_CURRENT_BASE);
}
@@ -277,8 +214,8 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
if (!np)
return -ENXIO;
- led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
- ret = PTR_ERR_OR_ZERO(led->ctrl_gpio);
+ led->props.ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
+ ret = PTR_ERR_OR_ZERO(led->props.ctrl_gpio);
if (ret)
return dev_err_probe(dev, ret, "cannot get ctrl-gpios\n");
@@ -412,6 +349,7 @@ static struct platform_driver ktd2692_driver = {
module_platform_driver(ktd2692_driver);
+MODULE_IMPORT_NS(EXPRESSWIRE);
MODULE_AUTHOR("Ingi Kim <ingi2.kim@samsung.com>");
MODULE_DESCRIPTION("Kinetic KTD2692 LED driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/flash/leds-lm3601x.c b/drivers/leds/flash/leds-lm3601x.c
index 8191be0ef0c6..7e93c447fec5 100644
--- a/drivers/leds/flash/leds-lm3601x.c
+++ b/drivers/leds/flash/leds-lm3601x.c
@@ -70,12 +70,11 @@ enum lm3601x_type {
};
/**
- * struct lm3601x_led -
+ * struct lm3601x_led - private lm3601x LED data
* @fled_cdev: flash LED class device pointer
* @client: Pointer to the I2C client
* @regmap: Devices register map
* @lock: Lock for reading/writing the device
- * @led_name: LED label for the Torch or IR LED
* @flash_timeout: the timeout for the flash
* @last_flag: last known flags register value
* @torch_current_max: maximum current for the torch
diff --git a/drivers/leds/flash/leds-sgm3140.c b/drivers/leds/flash/leds-sgm3140.c
index eb648ff54b4e..db0ac6641954 100644
--- a/drivers/leds/flash/leds-sgm3140.c
+++ b/drivers/leds/flash/leds-sgm3140.c
@@ -114,8 +114,11 @@ static int sgm3140_brightness_set(struct led_classdev *led_cdev,
"failed to enable regulator: %d\n", ret);
return ret;
}
+ gpiod_set_value_cansleep(priv->flash_gpio, 0);
gpiod_set_value_cansleep(priv->enable_gpio, 1);
} else {
+ del_timer_sync(&priv->powerdown_timer);
+ gpiod_set_value_cansleep(priv->flash_gpio, 0);
gpiod_set_value_cansleep(priv->enable_gpio, 0);
ret = regulator_disable(priv->vin_regulator);
if (ret) {
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index ba1be15cfd8e..24fcff682b24 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -552,6 +552,12 @@ int led_classdev_register_ext(struct device *parent,
led_init_core(led_cdev);
#ifdef CONFIG_LEDS_TRIGGERS
+ /*
+ * If no default trigger was given and hw_control_trigger is set,
+ * make it the default trigger.
+ */
+ if (!led_cdev->default_trigger && led_cdev->hw_control_trigger)
+ led_cdev->default_trigger = led_cdev->hw_control_trigger;
led_trigger_set_default(led_cdev);
#endif
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index bd59a14a4a90..0f5ac30053ad 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -23,7 +23,7 @@
* Nests outside led_cdev->trigger_lock
*/
static DECLARE_RWSEM(triggers_list_lock);
-LIST_HEAD(trigger_list);
+static LIST_HEAD(trigger_list);
/* Used by LED Class */
@@ -247,9 +247,23 @@ void led_trigger_remove(struct led_classdev *led_cdev)
}
EXPORT_SYMBOL_GPL(led_trigger_remove);
+static bool led_match_default_trigger(struct led_classdev *led_cdev,
+ struct led_trigger *trig)
+{
+ if (!strcmp(led_cdev->default_trigger, trig->name) &&
+ trigger_relevant(led_cdev, trig)) {
+ led_cdev->flags |= LED_INIT_DEFAULT_TRIGGER;
+ led_trigger_set(led_cdev, trig);
+ return true;
+ }
+
+ return false;
+}
+
void led_trigger_set_default(struct led_classdev *led_cdev)
{
struct led_trigger *trig;
+ bool found = false;
if (!led_cdev->default_trigger)
return;
@@ -257,15 +271,19 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
down_read(&triggers_list_lock);
down_write(&led_cdev->trigger_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
- if (!strcmp(led_cdev->default_trigger, trig->name) &&
- trigger_relevant(led_cdev, trig)) {
- led_cdev->flags |= LED_INIT_DEFAULT_TRIGGER;
- led_trigger_set(led_cdev, trig);
+ found = led_match_default_trigger(led_cdev, trig);
+ if (found)
break;
- }
}
up_write(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
+
+ /*
+ * If default trigger wasn't found, maybe trigger module isn't loaded yet.
+ * Once loaded it will re-probe with all led_cdev's.
+ */
+ if (!found)
+ request_module_nowait("ledtrig:%s", led_cdev->default_trigger);
}
EXPORT_SYMBOL_GPL(led_trigger_set_default);
@@ -297,12 +315,8 @@ int led_trigger_register(struct led_trigger *trig)
down_read(&leds_list_lock);
list_for_each_entry(led_cdev, &leds_list, node) {
down_write(&led_cdev->trigger_lock);
- if (!led_cdev->trigger && led_cdev->default_trigger &&
- !strcmp(led_cdev->default_trigger, trig->name) &&
- trigger_relevant(led_cdev, trig)) {
- led_cdev->flags |= LED_INIT_DEFAULT_TRIGGER;
- led_trigger_set(led_cdev, trig);
- }
+ if (!led_cdev->trigger && led_cdev->default_trigger)
+ led_match_default_trigger(led_cdev, trig);
up_write(&led_cdev->trigger_lock);
}
up_read(&leds_list_lock);
diff --git a/drivers/leds/leds-aw200xx.c b/drivers/leds/leds-aw200xx.c
index f584a7f98fc5..6c8c9f2c19e3 100644
--- a/drivers/leds/leds-aw200xx.c
+++ b/drivers/leds/leds-aw200xx.c
@@ -282,7 +282,7 @@ static int aw200xx_set_imax(const struct aw200xx *const chip,
u32 led_imax_uA)
{
u32 g_imax_uA = aw200xx_imax_to_global(chip, led_imax_uA);
- u32 coeff_table[] = {1, 2, 3, 4, 6, 8, 12, 16};
+ static const u32 coeff_table[] = {1, 2, 3, 4, 6, 8, 12, 16};
u32 gccr_imax = UINT_MAX;
u32 cur_imax = 0;
int i;
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
index 91f44b23cb11..17235a5e576a 100644
--- a/drivers/leds/leds-aw2013.c
+++ b/drivers/leds/leds-aw2013.c
@@ -405,6 +405,7 @@ error_reg:
chip->regulators);
error:
+ mutex_unlock(&chip->mutex);
mutex_destroy(&chip->mutex);
return ret;
}
diff --git a/drivers/leds/leds-expresswire.c b/drivers/leds/leds-expresswire.c
new file mode 100644
index 000000000000..e4937a8e0f44
--- /dev/null
+++ b/drivers/leds/leds-expresswire.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Shared library for Kinetic's ExpressWire protocol.
+ * This protocol works by pulsing the ExpressWire IC's control GPIO.
+ * ktd2692 and ktd2801 are known to use this protocol.
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio/consumer.h>
+#include <linux/types.h>
+
+#include <linux/leds-expresswire.h>
+
+void expresswire_power_off(struct expresswire_common_props *props)
+{
+ gpiod_set_value_cansleep(props->ctrl_gpio, 0);
+ usleep_range(props->timing.poweroff_us, props->timing.poweroff_us * 2);
+}
+EXPORT_SYMBOL_NS_GPL(expresswire_power_off, EXPRESSWIRE);
+
+void expresswire_enable(struct expresswire_common_props *props)
+{
+ gpiod_set_value(props->ctrl_gpio, 1);
+ udelay(props->timing.detect_delay_us);
+ gpiod_set_value(props->ctrl_gpio, 0);
+ udelay(props->timing.detect_us);
+ gpiod_set_value(props->ctrl_gpio, 1);
+}
+EXPORT_SYMBOL_NS_GPL(expresswire_enable, EXPRESSWIRE);
+
+void expresswire_start(struct expresswire_common_props *props)
+{
+ gpiod_set_value(props->ctrl_gpio, 1);
+ udelay(props->timing.data_start_us);
+}
+EXPORT_SYMBOL_NS_GPL(expresswire_start, EXPRESSWIRE);
+
+void expresswire_end(struct expresswire_common_props *props)
+{
+ gpiod_set_value(props->ctrl_gpio, 0);
+ udelay(props->timing.end_of_data_low_us);
+ gpiod_set_value(props->ctrl_gpio, 1);
+ udelay(props->timing.end_of_data_high_us);
+}
+EXPORT_SYMBOL_NS_GPL(expresswire_end, EXPRESSWIRE);
+
+void expresswire_set_bit(struct expresswire_common_props *props, bool bit)
+{
+ if (bit) {
+ gpiod_set_value(props->ctrl_gpio, 0);
+ udelay(props->timing.short_bitset_us);
+ gpiod_set_value(props->ctrl_gpio, 1);
+ udelay(props->timing.long_bitset_us);
+ } else {
+ gpiod_set_value(props->ctrl_gpio, 0);
+ udelay(props->timing.long_bitset_us);
+ gpiod_set_value(props->ctrl_gpio, 1);
+ udelay(props->timing.short_bitset_us);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(expresswire_set_bit, EXPRESSWIRE);
+
+void expresswire_write_u8(struct expresswire_common_props *props, u8 val)
+{
+ expresswire_start(props);
+ for (int i = 7; i >= 0; i--)
+ expresswire_set_bit(props, val & BIT(i));
+ expresswire_end(props);
+}
+EXPORT_SYMBOL_NS_GPL(expresswire_write_u8, EXPRESSWIRE);
diff --git a/drivers/leds/leds-mlxcpld.c b/drivers/leds/leds-mlxcpld.c
index 1355c84a2919..718f55096e90 100644
--- a/drivers/leds/leds-mlxcpld.c
+++ b/drivers/leds/leds-mlxcpld.c
@@ -77,7 +77,7 @@ struct mlxcpld_param {
/**
* struct mlxcpld_led_priv - LED private data:
- * @cled: LED class device instance
+ * @cdev: LED class device instance
* @param: LED CPLD access parameters
**/
struct mlxcpld_led_priv {
diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
index d8e3d5d8d2d0..5595788d98d2 100644
--- a/drivers/leds/leds-mlxreg.c
+++ b/drivers/leds/leds-mlxreg.c
@@ -29,7 +29,6 @@
* @data: led configuration data;
* @led_cdev: led class data;
* @base_color: base led color (other colors have constant offset from base);
- * @led_data: led data;
* @data_parent: pointer to private device control data of parent;
* @led_cdev_name: class device name
*/
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index 47223c850e4b..b53905da3592 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -39,6 +39,7 @@
#define PCA963X_LED_PWM 0x2 /* Controlled through PWM */
#define PCA963X_LED_GRP_PWM 0x3 /* Controlled through PWM/GRPPWM */
+#define PCA963X_MODE1_SLEEP 0x04 /* Normal mode or Low Power mode, oscillator off */
#define PCA963X_MODE2_OUTDRV 0x04 /* Open-drain or totem pole */
#define PCA963X_MODE2_INVRT 0x10 /* Normal or inverted direction */
#define PCA963X_MODE2_DMBLNK 0x20 /* Enable blinking */
@@ -380,6 +381,32 @@ err:
return ret;
}
+static int pca963x_suspend(struct device *dev)
+{
+ struct pca963x *chip = dev_get_drvdata(dev);
+ u8 reg;
+
+ reg = i2c_smbus_read_byte_data(chip->client, PCA963X_MODE1);
+ reg = reg | BIT(PCA963X_MODE1_SLEEP);
+ i2c_smbus_write_byte_data(chip->client, PCA963X_MODE1, reg);
+
+ return 0;
+}
+
+static int pca963x_resume(struct device *dev)
+{
+ struct pca963x *chip = dev_get_drvdata(dev);
+ u8 reg;
+
+ reg = i2c_smbus_read_byte_data(chip->client, PCA963X_MODE1);
+ reg = reg & ~BIT(PCA963X_MODE1_SLEEP);
+ i2c_smbus_write_byte_data(chip->client, PCA963X_MODE1, reg);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(pca963x_pm, pca963x_suspend, pca963x_resume);
+
static const struct of_device_id of_pca963x_match[] = {
{ .compatible = "nxp,pca9632", },
{ .compatible = "nxp,pca9633", },
@@ -430,6 +457,7 @@ static struct i2c_driver pca963x_driver = {
.driver = {
.name = "leds-pca963x",
.of_match_table = of_pca963x_match,
+ .pm = pm_sleep_ptr(&pca963x_pm)
},
.probe = pca963x_probe,
.id_table = pca963x_id,
diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
index 9d91f21842f2..96296db5f410 100644
--- a/drivers/leds/leds-spi-byte.c
+++ b/drivers/leds/leds-spi-byte.c
@@ -83,7 +83,7 @@ static int spi_byte_probe(struct spi_device *spi)
struct device_node *child;
struct device *dev = &spi->dev;
struct spi_byte_led *led;
- const char *name = "leds-spi-byte::";
+ struct led_init_data init_data = {};
const char *state;
int ret;
@@ -97,12 +97,9 @@ static int spi_byte_probe(struct spi_device *spi)
if (!led)
return -ENOMEM;
- of_property_read_string(child, "label", &name);
- strscpy(led->name, name, sizeof(led->name));
led->spi = spi;
mutex_init(&led->mutex);
led->cdef = device_get_match_data(dev);
- led->ldev.name = led->name;
led->ldev.brightness = LED_OFF;
led->ldev.max_brightness = led->cdef->max_value - led->cdef->off_value;
led->ldev.brightness_set_blocking = spi_byte_brightness_set_blocking;
@@ -120,7 +117,11 @@ static int spi_byte_probe(struct spi_device *spi)
spi_byte_brightness_set_blocking(&led->ldev,
led->ldev.brightness);
- ret = devm_led_classdev_register(&spi->dev, &led->ldev);
+ init_data.fwnode = of_fwnode_handle(child);
+ init_data.devicename = "leds-spi-byte";
+ init_data.default_label = ":";
+
+ ret = devm_led_classdev_register_ext(&spi->dev, &led->ldev, &init_data);
if (ret) {
mutex_destroy(&led->mutex);
return ret;
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 345062ccabda..1138e2ab82e5 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -30,7 +30,6 @@ ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
extern struct rw_semaphore leds_list_lock;
extern struct list_head leds_list;
-extern struct list_head trigger_list;
extern const char * const led_colors[LED_COLOR_ID_MAX];
#endif /* __LEDS_H_INCLUDED */
diff --git a/drivers/leds/rgb/Kconfig b/drivers/leds/rgb/Kconfig
index e66bd21b9852..8fc12d6a2958 100644
--- a/drivers/leds/rgb/Kconfig
+++ b/drivers/leds/rgb/Kconfig
@@ -27,6 +27,17 @@ config LEDS_KTD202X
To compile this driver as a module, choose M here: the module
will be called leds-ktd202x.
+config LEDS_NCP5623
+ tristate "LED support for NCP5623"
+ depends on I2C
+ depends on OF
+ help
+ This option enables support for ON semiconductor NCP5623
+ Triple Output I2C Controlled RGB LED Driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-ncp5623.
+
config LEDS_PWM_MULTICOLOR
tristate "PWM driven multi-color LED Support"
depends on PWM
@@ -41,6 +52,7 @@ config LEDS_QCOM_LPG
tristate "LED support for Qualcomm LPG"
depends on OF
depends on PWM
+ depends on QCOM_PBS || !QCOM_PBS
depends on SPMI
help
This option enables support for the Light Pulse Generator found in a
diff --git a/drivers/leds/rgb/Makefile b/drivers/leds/rgb/Makefile
index 243f31e4d70d..a501fd27f179 100644
--- a/drivers/leds/rgb/Makefile
+++ b/drivers/leds/rgb/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_LEDS_GROUP_MULTICOLOR) += leds-group-multicolor.o
obj-$(CONFIG_LEDS_KTD202X) += leds-ktd202x.o
+obj-$(CONFIG_LEDS_NCP5623) += leds-ncp5623.o
obj-$(CONFIG_LEDS_PWM_MULTICOLOR) += leds-pwm-multicolor.o
obj-$(CONFIG_LEDS_QCOM_LPG) += leds-qcom-lpg.o
obj-$(CONFIG_LEDS_MT6370_RGB) += leds-mt6370-rgb.o
diff --git a/drivers/leds/rgb/leds-group-multicolor.c b/drivers/leds/rgb/leds-group-multicolor.c
index 39f58be32af5..b6c7679015fd 100644
--- a/drivers/leds/rgb/leds-group-multicolor.c
+++ b/drivers/leds/rgb/leds-group-multicolor.c
@@ -69,7 +69,7 @@ static int leds_gmc_probe(struct platform_device *pdev)
struct mc_subled *subled;
struct leds_multicolor *priv;
unsigned int max_brightness = 0;
- int i, ret, count = 0;
+ int i, ret, count = 0, common_flags = 0;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -91,6 +91,7 @@ static int leds_gmc_probe(struct platform_device *pdev)
if (!priv->monochromatics)
return -ENOMEM;
+ common_flags |= led_cdev->flags;
priv->monochromatics[count] = led_cdev;
max_brightness = max(max_brightness, led_cdev->max_brightness);
@@ -114,12 +115,15 @@ static int leds_gmc_probe(struct platform_device *pdev)
/* Initialise the multicolor's LED class device */
cdev = &priv->mc_cdev.led_cdev;
- cdev->flags = LED_CORE_SUSPENDRESUME;
cdev->brightness_set_blocking = leds_gmc_set;
cdev->max_brightness = max_brightness;
cdev->color = LED_COLOR_ID_MULTI;
priv->mc_cdev.num_colors = count;
+ /* we only need suspend/resume if a sub-led requests it */
+ if (common_flags & LED_CORE_SUSPENDRESUME)
+ cdev->flags = LED_CORE_SUSPENDRESUME;
+
init_data.fwnode = dev_fwnode(dev);
ret = devm_led_classdev_multicolor_register_ext(dev, &priv->mc_cdev, &init_data);
if (ret)
diff --git a/drivers/leds/rgb/leds-ncp5623.c b/drivers/leds/rgb/leds-ncp5623.c
new file mode 100644
index 000000000000..2be4ff918516
--- /dev/null
+++ b/drivers/leds/rgb/leds-ncp5623.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NCP5623 Multi-LED Driver
+ *
+ * Author: Abdel Alkuor <alkuor@gmail.com>
+ * Datasheet: https://www.onsemi.com/pdf/datasheet/ncp5623-d.pdf
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include <linux/led-class-multicolor.h>
+
+#define NCP5623_FUNCTION_OFFSET 0x5
+#define NCP5623_REG(x) ((x) << NCP5623_FUNCTION_OFFSET)
+
+#define NCP5623_SHUTDOWN_REG NCP5623_REG(0x0)
+#define NCP5623_ILED_REG NCP5623_REG(0x1)
+#define NCP5623_PWM_REG(index) NCP5623_REG(0x2 + (index))
+#define NCP5623_UPWARD_STEP_REG NCP5623_REG(0x5)
+#define NCP5623_DOWNWARD_STEP_REG NCP5623_REG(0x6)
+#define NCP5623_DIMMING_TIME_REG NCP5623_REG(0x7)
+
+#define NCP5623_MAX_BRIGHTNESS 0x1f
+#define NCP5623_MAX_DIM_TIME_MS 240
+#define NCP5623_DIM_STEP_MS 8
+
+struct ncp5623 {
+ struct i2c_client *client;
+ struct led_classdev_mc mc_dev;
+ struct mutex lock;
+
+ int current_brightness;
+ unsigned long delay;
+};
+
+static int ncp5623_write(struct i2c_client *client, u8 reg, u8 data)
+{
+ return i2c_smbus_write_byte_data(client, reg | data, 0);
+}
+
+static int ncp5623_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
+ struct ncp5623 *ncp = container_of(mc_cdev, struct ncp5623, mc_dev);
+ int ret;
+
+ guard(mutex)(&ncp->lock);
+
+ if (ncp->delay && time_is_after_jiffies(ncp->delay))
+ return -EBUSY;
+
+ ncp->delay = 0;
+
+ for (int i = 0; i < mc_cdev->num_colors; i++) {
+ ret = ncp5623_write(ncp->client,
+ NCP5623_PWM_REG(mc_cdev->subled_info[i].channel),
+ min(mc_cdev->subled_info[i].intensity,
+ NCP5623_MAX_BRIGHTNESS));
+ if (ret)
+ return ret;
+ }
+
+ ret = ncp5623_write(ncp->client, NCP5623_DIMMING_TIME_REG, 0);
+ if (ret)
+ return ret;
+
+ ret = ncp5623_write(ncp->client, NCP5623_ILED_REG, brightness);
+ if (ret)
+ return ret;
+
+ ncp->current_brightness = brightness;
+
+ return 0;
+}
+
+static int ncp5623_pattern_set(struct led_classdev *cdev,
+ struct led_pattern *pattern,
+ u32 len, int repeat)
+{
+ struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
+ struct ncp5623 *ncp = container_of(mc_cdev, struct ncp5623, mc_dev);
+ int brightness_diff;
+ u8 reg;
+ int ret;
+
+ guard(mutex)(&ncp->lock);
+
+ if (ncp->delay && time_is_after_jiffies(ncp->delay))
+ return -EBUSY;
+
+ ncp->delay = 0;
+
+ if (pattern[0].delta_t > NCP5623_MAX_DIM_TIME_MS ||
+ (pattern[0].delta_t % NCP5623_DIM_STEP_MS) != 0)
+ return -EINVAL;
+
+ brightness_diff = pattern[0].brightness - ncp->current_brightness;
+
+ if (brightness_diff == 0)
+ return 0;
+
+ if (pattern[0].delta_t) {
+ if (brightness_diff > 0)
+ reg = NCP5623_UPWARD_STEP_REG;
+ else
+ reg = NCP5623_DOWNWARD_STEP_REG;
+ } else {
+ reg = NCP5623_ILED_REG;
+ }
+
+ ret = ncp5623_write(ncp->client, reg,
+ min(pattern[0].brightness, NCP5623_MAX_BRIGHTNESS));
+ if (ret)
+ return ret;
+
+ ret = ncp5623_write(ncp->client,
+ NCP5623_DIMMING_TIME_REG,
+ pattern[0].delta_t / NCP5623_DIM_STEP_MS);
+ if (ret)
+ return ret;
+
+ /*
+ * During testing, when the brightness difference is 1, for some
+ * unknown reason, the time factor it takes to change to the new
+ * value is the longest time possible. Otherwise, the time factor
+ * is simply the brightness difference.
+ *
+ * For example:
+ * current_brightness = 20 and new_brightness = 21 then the time it
+ * takes to set the new brightness increments to the maximum possible
+ * brightness from 20 then from 0 to 21.
+ * time_factor = max_brightness - 20 + 21
+ */
+ if (abs(brightness_diff) == 1)
+ ncp->delay = NCP5623_MAX_BRIGHTNESS + brightness_diff;
+ else
+ ncp->delay = abs(brightness_diff);
+
+ ncp->delay = msecs_to_jiffies(ncp->delay * pattern[0].delta_t) + jiffies;
+
+ ncp->current_brightness = pattern[0].brightness;
+
+ return 0;
+}
+
+static int ncp5623_pattern_clear(struct led_classdev *led_cdev)
+{
+ return 0;
+}
+
+static int ncp5623_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct fwnode_handle *mc_node, *led_node;
+ struct led_init_data init_data = { };
+ int num_subleds = 0;
+ struct ncp5623 *ncp;
+ struct mc_subled *subled_info;
+ u32 color_index;
+ u32 reg;
+ int ret;
+
+ ncp = devm_kzalloc(dev, sizeof(*ncp), GFP_KERNEL);
+ if (!ncp)
+ return -ENOMEM;
+
+ ncp->client = client;
+
+ mc_node = device_get_named_child_node(dev, "multi-led");
+ if (!mc_node)
+ return -EINVAL;
+
+ fwnode_for_each_child_node(mc_node, led_node)
+ num_subleds++;
+
+ subled_info = devm_kcalloc(dev, num_subleds, sizeof(*subled_info), GFP_KERNEL);
+ if (!subled_info) {
+ ret = -ENOMEM;
+ goto release_mc_node;
+ }
+
+ fwnode_for_each_available_child_node(mc_node, led_node) {
+ ret = fwnode_property_read_u32(led_node, "color", &color_index);
+ if (ret) {
+ fwnode_handle_put(led_node);
+ goto release_mc_node;
+ }
+
+ ret = fwnode_property_read_u32(led_node, "reg", &reg);
+ if (ret) {
+ fwnode_handle_put(led_node);
+ goto release_mc_node;
+ }
+
+ subled_info[ncp->mc_dev.num_colors].channel = reg;
+ subled_info[ncp->mc_dev.num_colors++].color_index = color_index;
+ }
+
+ init_data.fwnode = mc_node;
+
+ ncp->mc_dev.led_cdev.max_brightness = NCP5623_MAX_BRIGHTNESS;
+ ncp->mc_dev.subled_info = subled_info;
+ ncp->mc_dev.led_cdev.brightness_set_blocking = ncp5623_brightness_set;
+ ncp->mc_dev.led_cdev.pattern_set = ncp5623_pattern_set;
+ ncp->mc_dev.led_cdev.pattern_clear = ncp5623_pattern_clear;
+ ncp->mc_dev.led_cdev.default_trigger = "pattern";
+
+ mutex_init(&ncp->lock);
+ i2c_set_clientdata(client, ncp);
+
+ ret = led_classdev_multicolor_register_ext(dev, &ncp->mc_dev, &init_data);
+ if (ret)
+ goto destroy_lock;
+
+ return 0;
+
+destroy_lock:
+ mutex_destroy(&ncp->lock);
+
+release_mc_node:
+ fwnode_handle_put(mc_node);
+
+ return ret;
+}
+
+static void ncp5623_remove(struct i2c_client *client)
+{
+ struct ncp5623 *ncp = i2c_get_clientdata(client);
+
+ mutex_lock(&ncp->lock);
+ ncp->delay = 0;
+ mutex_unlock(&ncp->lock);
+
+ ncp5623_write(client, NCP5623_DIMMING_TIME_REG, 0);
+ led_classdev_multicolor_unregister(&ncp->mc_dev);
+ mutex_destroy(&ncp->lock);
+}
+
+static void ncp5623_shutdown(struct i2c_client *client)
+{
+ struct ncp5623 *ncp = i2c_get_clientdata(client);
+
+ if (!(ncp->mc_dev.led_cdev.flags & LED_RETAIN_AT_SHUTDOWN))
+ ncp5623_write(client, NCP5623_SHUTDOWN_REG, 0);
+
+ mutex_destroy(&ncp->lock);
+}
+
+static const struct of_device_id ncp5623_id[] = {
+ { .compatible = "onnn,ncp5623" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ncp5623_id);
+
+static struct i2c_driver ncp5623_i2c_driver = {
+ .driver = {
+ .name = "ncp5623",
+ .of_match_table = ncp5623_id,
+ },
+ .probe = ncp5623_probe,
+ .remove = ncp5623_remove,
+ .shutdown = ncp5623_shutdown,
+};
+
+module_i2c_driver(ncp5623_i2c_driver);
+
+MODULE_AUTHOR("Abdel Alkuor <alkuor@gmail.com>");
+MODULE_DESCRIPTION("NCP5623 Multi-LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index 156b73d1f4a2..6bdc5b923f98 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -8,11 +8,13 @@
#include <linux/bitfield.h>
#include <linux/led-class-multicolor.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/soc/qcom/qcom-pbs.h>
#define LPG_SUBTYPE_REG 0x05
#define LPG_SUBTYPE_LPG 0x2
@@ -39,6 +41,10 @@
#define PWM_SEC_ACCESS_REG 0xd0
#define PWM_DTEST_REG(x) (0xe2 + (x) - 1)
+#define SDAM_REG_PBS_SEQ_EN 0x42
+#define SDAM_PBS_TRIG_SET 0xe5
+#define SDAM_PBS_TRIG_CLR 0xe6
+
#define TRI_LED_SRC_SEL 0x45
#define TRI_LED_EN_CTL 0x46
#define TRI_LED_ATC_CTL 0x47
@@ -48,9 +54,31 @@
#define LPG_RESOLUTION_9BIT BIT(9)
#define LPG_RESOLUTION_15BIT BIT(15)
+#define PPG_MAX_LED_BRIGHTNESS 255
+
#define LPG_MAX_M 7
#define LPG_MAX_PREDIV 6
+#define DEFAULT_TICK_DURATION_US 7800
+#define RAMP_STEP_DURATION(x) (((x) * 1000 / DEFAULT_TICK_DURATION_US) & 0xff)
+
+#define SDAM_MAX_DEVICES 2
+/* LPG common config settings for PPG */
+#define SDAM_START_BASE 0x40
+#define SDAM_REG_RAMP_STEP_DURATION 0x47
+
+#define SDAM_LUT_SDAM_LUT_PATTERN_OFFSET 0x45
+#define SDAM_LPG_SDAM_LUT_PATTERN_OFFSET 0x80
+
+/* LPG per channel config settings for PPG */
+#define SDAM_LUT_EN_OFFSET 0x0
+#define SDAM_PATTERN_CONFIG_OFFSET 0x1
+#define SDAM_END_INDEX_OFFSET 0x3
+#define SDAM_START_INDEX_OFFSET 0x4
+#define SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET 0x6
+#define SDAM_PAUSE_HI_MULTIPLIER_OFFSET 0x8
+#define SDAM_PAUSE_LO_MULTIPLIER_OFFSET 0x9
+
struct lpg_channel;
struct lpg_data;
@@ -64,6 +92,10 @@ struct lpg_data;
* @lut_base: base address of the LUT block (optional)
* @lut_size: number of entries in the LUT block
* @lut_bitmap: allocation bitmap for LUT entries
+ * @pbs_dev: PBS device
+ * @lpg_chan_sdam: LPG SDAM peripheral device
+ * @lut_sdam: LUT SDAM peripheral device
+ * @pbs_en_bitmap: bitmap for tracking PBS triggers
* @triled_base: base address of the TRILED block (optional)
* @triled_src: power-source for the TRILED
* @triled_has_atc_ctl: true if there is TRI_LED_ATC_CTL register
@@ -77,7 +109,7 @@ struct lpg {
struct mutex lock;
- struct pwm_chip pwm;
+ struct pwm_chip *pwm;
const struct lpg_data *data;
@@ -85,6 +117,11 @@ struct lpg {
u32 lut_size;
unsigned long *lut_bitmap;
+ struct pbs_dev *pbs_dev;
+ struct nvmem_device *lpg_chan_sdam;
+ struct nvmem_device *lut_sdam;
+ unsigned long pbs_en_bitmap;
+
u32 triled_base;
u32 triled_src;
bool triled_has_atc_ctl;
@@ -101,6 +138,7 @@ struct lpg {
* @triled_mask: mask in TRILED to enable this channel
* @lut_mask: mask in LUT to start pattern generator for this channel
* @subtype: PMIC hardware block subtype
+ * @sdam_offset: channel offset in LPG SDAM
* @in_use: channel is exposed to LED framework
* @color: color of the LED attached to this channel
* @dtest_line: DTEST line for output, or 0 if disabled
@@ -129,6 +167,7 @@ struct lpg_channel {
unsigned int triled_mask;
unsigned int lut_mask;
unsigned int subtype;
+ u32 sdam_offset;
bool in_use;
@@ -178,10 +217,12 @@ struct lpg_led {
/**
* struct lpg_channel_data - per channel initialization data
+ * @sdam_offset: Channel offset in LPG SDAM
* @base: base address for PWM channel registers
* @triled_mask: bitmask for controlling this channel in TRILED
*/
struct lpg_channel_data {
+ unsigned int sdam_offset;
unsigned int base;
u8 triled_mask;
};
@@ -206,6 +247,65 @@ struct lpg_data {
const struct lpg_channel_data *channels;
};
+#define PBS_SW_TRIG_BIT BIT(0)
+
+static int lpg_clear_pbs_trigger(struct lpg *lpg, unsigned int lut_mask)
+{
+ u8 val = 0;
+ int rc;
+
+ lpg->pbs_en_bitmap &= (~lut_mask);
+ if (!lpg->pbs_en_bitmap) {
+ rc = nvmem_device_write(lpg->lpg_chan_sdam, SDAM_REG_PBS_SEQ_EN, 1, &val);
+ if (rc < 0)
+ return rc;
+
+ if (lpg->lut_sdam) {
+ val = PBS_SW_TRIG_BIT;
+ rc = nvmem_device_write(lpg->lpg_chan_sdam, SDAM_PBS_TRIG_CLR, 1, &val);
+ if (rc < 0)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int lpg_set_pbs_trigger(struct lpg *lpg, unsigned int lut_mask)
+{
+ u8 val = PBS_SW_TRIG_BIT;
+ int rc;
+
+ if (!lpg->pbs_en_bitmap) {
+ rc = nvmem_device_write(lpg->lpg_chan_sdam, SDAM_REG_PBS_SEQ_EN, 1, &val);
+ if (rc < 0)
+ return rc;
+
+ if (lpg->lut_sdam) {
+ rc = nvmem_device_write(lpg->lpg_chan_sdam, SDAM_PBS_TRIG_SET, 1, &val);
+ if (rc < 0)
+ return rc;
+ } else {
+ rc = qcom_pbs_trigger_event(lpg->pbs_dev, val);
+ if (rc < 0)
+ return rc;
+ }
+ }
+ lpg->pbs_en_bitmap |= lut_mask;
+
+ return 0;
+}
+
+static int lpg_sdam_configure_triggers(struct lpg_channel *chan, u8 set_trig)
+{
+ u32 addr = SDAM_LUT_EN_OFFSET + chan->sdam_offset;
+
+ if (!chan->lpg->lpg_chan_sdam)
+ return 0;
+
+ return nvmem_device_write(chan->lpg->lpg_chan_sdam, addr, 1, &set_trig);
+}
+
static int triled_set(struct lpg *lpg, unsigned int mask, unsigned int enable)
{
/* Skip if we don't have a triled block */
@@ -216,6 +316,47 @@ static int triled_set(struct lpg *lpg, unsigned int mask, unsigned int enable)
mask, enable);
}
+static int lpg_lut_store_sdam(struct lpg *lpg, struct led_pattern *pattern,
+ size_t len, unsigned int *lo_idx, unsigned int *hi_idx)
+{
+ unsigned int idx;
+ u8 brightness;
+ int i, rc;
+ u16 addr;
+
+ if (len > lpg->lut_size) {
+ dev_err(lpg->dev, "Pattern length (%zu) exceeds maximum pattern length (%d)\n",
+ len, lpg->lut_size);
+ return -EINVAL;
+ }
+
+ idx = bitmap_find_next_zero_area(lpg->lut_bitmap, lpg->lut_size, 0, len, 0);
+ if (idx >= lpg->lut_size)
+ return -ENOSPC;
+
+ for (i = 0; i < len; i++) {
+ brightness = pattern[i].brightness;
+
+ if (lpg->lut_sdam) {
+ addr = SDAM_LUT_SDAM_LUT_PATTERN_OFFSET + i + idx;
+ rc = nvmem_device_write(lpg->lut_sdam, addr, 1, &brightness);
+ } else {
+ addr = SDAM_LPG_SDAM_LUT_PATTERN_OFFSET + i + idx;
+ rc = nvmem_device_write(lpg->lpg_chan_sdam, addr, 1, &brightness);
+ }
+
+ if (rc < 0)
+ return rc;
+ }
+
+ bitmap_set(lpg->lut_bitmap, idx, len);
+
+ *lo_idx = idx;
+ *hi_idx = idx + len - 1;
+
+ return 0;
+}
+
static int lpg_lut_store(struct lpg *lpg, struct led_pattern *pattern,
size_t len, unsigned int *lo_idx, unsigned int *hi_idx)
{
@@ -256,6 +397,9 @@ static void lpg_lut_free(struct lpg *lpg, unsigned int lo_idx, unsigned int hi_i
static int lpg_lut_sync(struct lpg *lpg, unsigned int mask)
{
+ if (!lpg->lut_base)
+ return 0;
+
return regmap_write(lpg->map, lpg->lut_base + RAMP_CONTROL_REG, mask);
}
@@ -462,6 +606,49 @@ static void lpg_apply_pwm_value(struct lpg_channel *chan)
#define LPG_PATTERN_CONFIG_PAUSE_HI BIT(1)
#define LPG_PATTERN_CONFIG_PAUSE_LO BIT(0)
+static void lpg_sdam_apply_lut_control(struct lpg_channel *chan)
+{
+ struct nvmem_device *lpg_chan_sdam = chan->lpg->lpg_chan_sdam;
+ unsigned int lo_idx = chan->pattern_lo_idx;
+ unsigned int hi_idx = chan->pattern_hi_idx;
+ u8 val = 0, conf = 0, lut_offset = 0;
+ unsigned int hi_pause, lo_pause;
+ struct lpg *lpg = chan->lpg;
+
+ if (!chan->ramp_enabled || chan->pattern_lo_idx == chan->pattern_hi_idx)
+ return;
+
+ hi_pause = DIV_ROUND_UP(chan->ramp_hi_pause_ms, chan->ramp_tick_ms);
+ lo_pause = DIV_ROUND_UP(chan->ramp_lo_pause_ms, chan->ramp_tick_ms);
+
+ if (!chan->ramp_oneshot)
+ conf |= LPG_PATTERN_CONFIG_REPEAT;
+ if (chan->ramp_hi_pause_ms && lpg->lut_sdam)
+ conf |= LPG_PATTERN_CONFIG_PAUSE_HI;
+ if (chan->ramp_lo_pause_ms && lpg->lut_sdam)
+ conf |= LPG_PATTERN_CONFIG_PAUSE_LO;
+
+ if (lpg->lut_sdam) {
+ lut_offset = SDAM_LUT_SDAM_LUT_PATTERN_OFFSET - SDAM_START_BASE;
+ hi_idx += lut_offset;
+ lo_idx += lut_offset;
+ }
+
+ nvmem_device_write(lpg_chan_sdam, SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET + chan->sdam_offset, 1, &val);
+ nvmem_device_write(lpg_chan_sdam, SDAM_PATTERN_CONFIG_OFFSET + chan->sdam_offset, 1, &conf);
+ nvmem_device_write(lpg_chan_sdam, SDAM_END_INDEX_OFFSET + chan->sdam_offset, 1, &hi_idx);
+ nvmem_device_write(lpg_chan_sdam, SDAM_START_INDEX_OFFSET + chan->sdam_offset, 1, &lo_idx);
+
+ val = RAMP_STEP_DURATION(chan->ramp_tick_ms);
+ nvmem_device_write(lpg_chan_sdam, SDAM_REG_RAMP_STEP_DURATION, 1, &val);
+
+ if (lpg->lut_sdam) {
+ nvmem_device_write(lpg_chan_sdam, SDAM_PAUSE_HI_MULTIPLIER_OFFSET + chan->sdam_offset, 1, &hi_pause);
+ nvmem_device_write(lpg_chan_sdam, SDAM_PAUSE_LO_MULTIPLIER_OFFSET + chan->sdam_offset, 1, &lo_pause);
+ }
+
+}
+
static void lpg_apply_lut_control(struct lpg_channel *chan)
{
struct lpg *lpg = chan->lpg;
@@ -596,7 +783,10 @@ static void lpg_apply(struct lpg_channel *chan)
lpg_apply_pwm_value(chan);
lpg_apply_control(chan);
lpg_apply_sync(chan);
- lpg_apply_lut_control(chan);
+ if (chan->lpg->lpg_chan_sdam)
+ lpg_sdam_apply_lut_control(chan);
+ else
+ lpg_apply_lut_control(chan);
lpg_enable_glitch(chan);
}
@@ -621,6 +811,7 @@ static void lpg_brightness_set(struct lpg_led *led, struct led_classdev *cdev,
chan->ramp_enabled = false;
} else if (chan->pattern_lo_idx != chan->pattern_hi_idx) {
lpg_calc_freq(chan, NSEC_PER_MSEC);
+ lpg_sdam_configure_triggers(chan, 1);
chan->enabled = true;
chan->ramp_enabled = true;
@@ -648,8 +839,10 @@ static void lpg_brightness_set(struct lpg_led *led, struct led_classdev *cdev,
triled_set(lpg, triled_mask, triled_enabled);
/* Trigger start of ramp generator(s) */
- if (lut_mask)
+ if (lut_mask) {
lpg_lut_sync(lpg, lut_mask);
+ lpg_set_pbs_trigger(lpg, lut_mask);
+ }
}
static int lpg_brightness_single_set(struct led_classdev *cdev,
@@ -766,9 +959,9 @@ static int lpg_pattern_set(struct lpg_led *led, struct led_pattern *led_pattern,
struct led_pattern *pattern;
unsigned int brightness_a;
unsigned int brightness_b;
+ unsigned int hi_pause = 0;
+ unsigned int lo_pause = 0;
unsigned int actual_len;
- unsigned int hi_pause;
- unsigned int lo_pause;
unsigned int delta_t;
unsigned int lo_idx;
unsigned int hi_idx;
@@ -835,18 +1028,24 @@ static int lpg_pattern_set(struct lpg_led *led, struct led_pattern *led_pattern,
* If the specified pattern is a palindrome the ping pong mode is
* enabled. In this scenario the delta_t of the middle entry (i.e. the
* last in the programmed pattern) determines the "high pause".
+ *
+ * SDAM-based devices do not support "ping pong", and only supports
+ * "low pause" and "high pause" with a dedicated SDAM LUT.
*/
/* Detect palindromes and use "ping pong" to reduce LUT usage */
- for (i = 0; i < len / 2; i++) {
- brightness_a = pattern[i].brightness;
- brightness_b = pattern[len - i - 1].brightness;
-
- if (brightness_a != brightness_b) {
- ping_pong = false;
- break;
+ if (lpg->lut_base) {
+ for (i = 0; i < len / 2; i++) {
+ brightness_a = pattern[i].brightness;
+ brightness_b = pattern[len - i - 1].brightness;
+
+ if (brightness_a != brightness_b) {
+ ping_pong = false;
+ break;
+ }
}
- }
+ } else
+ ping_pong = false;
/* The pattern length to be written to the LUT */
if (ping_pong)
@@ -874,12 +1073,27 @@ static int lpg_pattern_set(struct lpg_led *led, struct led_pattern *led_pattern,
if (delta_t >= BIT(9))
goto out_free_pattern;
- /* Find "low pause" and "high pause" in the pattern */
- lo_pause = pattern[0].delta_t;
- hi_pause = pattern[actual_len - 1].delta_t;
+ /*
+ * Find "low pause" and "high pause" in the pattern in the LUT case.
+ * SDAM-based devices without dedicated LUT SDAM require equal
+ * duration of all steps.
+ */
+ if (lpg->lut_base || lpg->lut_sdam) {
+ lo_pause = pattern[0].delta_t;
+ hi_pause = pattern[actual_len - 1].delta_t;
+ } else {
+ if (delta_t != pattern[0].delta_t || delta_t != pattern[actual_len - 1].delta_t)
+ goto out_free_pattern;
+ }
+
mutex_lock(&lpg->lock);
- ret = lpg_lut_store(lpg, pattern, actual_len, &lo_idx, &hi_idx);
+
+ if (lpg->lut_base)
+ ret = lpg_lut_store(lpg, pattern, actual_len, &lo_idx, &hi_idx);
+ else
+ ret = lpg_lut_store_sdam(lpg, pattern, actual_len, &lo_idx, &hi_idx);
+
if (ret < 0)
goto out_unlock;
@@ -927,7 +1141,12 @@ static int lpg_pattern_mc_set(struct led_classdev *cdev,
{
struct led_classdev_mc *mc = lcdev_to_mccdev(cdev);
struct lpg_led *led = container_of(mc, struct lpg_led, mcdev);
- int ret;
+ unsigned int triled_mask = 0;
+ int ret, i;
+
+ for (i = 0; i < led->num_channels; i++)
+ triled_mask |= led->channels[i]->triled_mask;
+ triled_set(led->lpg, triled_mask, 0);
ret = lpg_pattern_set(led, pattern, len, repeat);
if (ret < 0)
@@ -952,6 +1171,8 @@ static int lpg_pattern_clear(struct lpg_led *led)
for (i = 0; i < led->num_channels; i++) {
chan = led->channels[i];
+ lpg_sdam_configure_triggers(chan, 0);
+ lpg_clear_pbs_trigger(chan->lpg, chan->lut_mask);
chan->pattern_lo_idx = 0;
chan->pattern_hi_idx = 0;
}
@@ -978,7 +1199,7 @@ static int lpg_pattern_mc_clear(struct led_classdev *cdev)
static inline struct lpg *lpg_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct lpg, pwm);
+ return pwmchip_get_drvdata(chip);
}
static int lpg_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -1093,13 +1314,17 @@ static const struct pwm_ops lpg_pwm_ops = {
static int lpg_add_pwm(struct lpg *lpg)
{
+ struct pwm_chip *chip;
int ret;
- lpg->pwm.dev = lpg->dev;
- lpg->pwm.npwm = lpg->num_channels;
- lpg->pwm.ops = &lpg_pwm_ops;
+ lpg->pwm = chip = devm_pwmchip_alloc(lpg->dev, lpg->num_channels, 0);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ chip->ops = &lpg_pwm_ops;
+ pwmchip_set_drvdata(chip, lpg);
- ret = devm_pwmchip_add(lpg->dev, &lpg->pwm);
+ ret = devm_pwmchip_add(lpg->dev, chip);
if (ret)
dev_err_probe(lpg->dev, ret, "failed to add PWM chip\n");
@@ -1187,8 +1412,8 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
cdev->brightness_set_blocking = lpg_brightness_mc_set;
cdev->blink_set = lpg_blink_mc_set;
- /* Register pattern accessors only if we have a LUT block */
- if (lpg->lut_base) {
+ /* Register pattern accessors if we have a LUT block or when using PPG */
+ if (lpg->lut_base || lpg->lpg_chan_sdam) {
cdev->pattern_set = lpg_pattern_mc_set;
cdev->pattern_clear = lpg_pattern_mc_clear;
}
@@ -1201,15 +1426,19 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
cdev->brightness_set_blocking = lpg_brightness_single_set;
cdev->blink_set = lpg_blink_single_set;
- /* Register pattern accessors only if we have a LUT block */
- if (lpg->lut_base) {
+ /* Register pattern accessors if we have a LUT block or when using PPG */
+ if (lpg->lut_base || lpg->lpg_chan_sdam) {
cdev->pattern_set = lpg_pattern_single_set;
cdev->pattern_clear = lpg_pattern_single_clear;
}
}
cdev->default_trigger = of_get_property(np, "linux,default-trigger", NULL);
- cdev->max_brightness = LPG_RESOLUTION_9BIT - 1;
+
+ if (lpg->lpg_chan_sdam)
+ cdev->max_brightness = PPG_MAX_LED_BRIGHTNESS;
+ else
+ cdev->max_brightness = LPG_RESOLUTION_9BIT - 1;
if (!of_property_read_string(np, "default-state", &state) &&
!strcmp(state, "on"))
@@ -1250,6 +1479,7 @@ static int lpg_init_channels(struct lpg *lpg)
chan->base = data->channels[i].base;
chan->triled_mask = data->channels[i].triled_mask;
chan->lut_mask = BIT(i);
+ chan->sdam_offset = data->channels[i].sdam_offset;
regmap_read(lpg->map, chan->base + LPG_SUBTYPE_REG, &chan->subtype);
}
@@ -1295,11 +1525,12 @@ static int lpg_init_lut(struct lpg *lpg)
{
const struct lpg_data *data = lpg->data;
- if (!data->lut_base)
+ if (!data->lut_size)
return 0;
- lpg->lut_base = data->lut_base;
lpg->lut_size = data->lut_size;
+ if (data->lut_base)
+ lpg->lut_base = data->lut_base;
lpg->lut_bitmap = devm_bitmap_zalloc(lpg->dev, lpg->lut_size, GFP_KERNEL);
if (!lpg->lut_bitmap)
@@ -1308,6 +1539,59 @@ static int lpg_init_lut(struct lpg *lpg)
return 0;
}
+static int lpg_init_sdam(struct lpg *lpg)
+{
+ int i, sdam_count, rc;
+ u8 val = 0;
+
+ sdam_count = of_property_count_strings(lpg->dev->of_node, "nvmem-names");
+ if (sdam_count <= 0)
+ return 0;
+ if (sdam_count > SDAM_MAX_DEVICES)
+ return -EINVAL;
+
+ /* Get the 1st SDAM device for LPG/LUT config */
+ lpg->lpg_chan_sdam = devm_nvmem_device_get(lpg->dev, "lpg_chan_sdam");
+ if (IS_ERR(lpg->lpg_chan_sdam))
+ return dev_err_probe(lpg->dev, PTR_ERR(lpg->lpg_chan_sdam),
+ "Failed to get LPG chan SDAM device\n");
+
+ if (sdam_count == 1) {
+ /* Get PBS device node if single SDAM device */
+ lpg->pbs_dev = get_pbs_client_device(lpg->dev);
+ if (IS_ERR(lpg->pbs_dev))
+ return dev_err_probe(lpg->dev, PTR_ERR(lpg->pbs_dev),
+ "Failed to get PBS client device\n");
+ } else if (sdam_count == 2) {
+ /* Get the 2nd SDAM device for LUT pattern */
+ lpg->lut_sdam = devm_nvmem_device_get(lpg->dev, "lut_sdam");
+ if (IS_ERR(lpg->lut_sdam))
+ return dev_err_probe(lpg->dev, PTR_ERR(lpg->lut_sdam),
+ "Failed to get LPG LUT SDAM device\n");
+ }
+
+ for (i = 0; i < lpg->num_channels; i++) {
+ struct lpg_channel *chan = &lpg->channels[i];
+
+ if (chan->sdam_offset) {
+ rc = nvmem_device_write(lpg->lpg_chan_sdam,
+ SDAM_PBS_SCRATCH_LUT_COUNTER_OFFSET + chan->sdam_offset, 1, &val);
+ if (rc < 0)
+ return rc;
+
+ rc = lpg_sdam_configure_triggers(chan, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = lpg_clear_pbs_trigger(chan->lpg, chan->lut_mask);
+ if (rc < 0)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
static int lpg_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -1342,6 +1626,10 @@ static int lpg_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ ret = lpg_init_sdam(lpg);
+ if (ret < 0)
+ return ret;
+
ret = lpg_init_lut(lpg);
if (ret < 0)
return ret;
@@ -1360,6 +1648,23 @@ static int lpg_probe(struct platform_device *pdev)
return lpg_add_pwm(lpg);
}
+static const struct lpg_data pm660l_lpg_data = {
+ .lut_base = 0xb000,
+ .lut_size = 49,
+
+ .triled_base = 0xd000,
+ .triled_has_atc_ctl = true,
+ .triled_has_src_sel = true,
+
+ .num_channels = 4,
+ .channels = (const struct lpg_channel_data[]) {
+ { .base = 0xb100, .triled_mask = BIT(5) },
+ { .base = 0xb200, .triled_mask = BIT(6) },
+ { .base = 0xb300, .triled_mask = BIT(7) },
+ { .base = 0xb400 },
+ },
+};
+
static const struct lpg_data pm8916_pwm_data = {
.num_channels = 1,
.channels = (const struct lpg_channel_data[]) {
@@ -1407,11 +1712,13 @@ static const struct lpg_data pm8994_lpg_data = {
static const struct lpg_data pmi632_lpg_data = {
.triled_base = 0xd000,
+ .lut_size = 64,
+
.num_channels = 5,
.channels = (const struct lpg_channel_data[]) {
- { .base = 0xb300, .triled_mask = BIT(7) },
- { .base = 0xb400, .triled_mask = BIT(6) },
- { .base = 0xb500, .triled_mask = BIT(5) },
+ { .base = 0xb300, .triled_mask = BIT(7), .sdam_offset = 0x48 },
+ { .base = 0xb400, .triled_mask = BIT(6), .sdam_offset = 0x56 },
+ { .base = 0xb500, .triled_mask = BIT(5), .sdam_offset = 0x64 },
{ .base = 0xb600 },
{ .base = 0xb700 },
},
@@ -1484,11 +1791,13 @@ static const struct lpg_data pm8150l_lpg_data = {
static const struct lpg_data pm8350c_pwm_data = {
.triled_base = 0xef00,
+ .lut_size = 122,
+
.num_channels = 4,
.channels = (const struct lpg_channel_data[]) {
- { .base = 0xe800, .triled_mask = BIT(7) },
- { .base = 0xe900, .triled_mask = BIT(6) },
- { .base = 0xea00, .triled_mask = BIT(5) },
+ { .base = 0xe800, .triled_mask = BIT(7), .sdam_offset = 0x48 },
+ { .base = 0xe900, .triled_mask = BIT(6), .sdam_offset = 0x56 },
+ { .base = 0xea00, .triled_mask = BIT(5), .sdam_offset = 0x64 },
{ .base = 0xeb00 },
},
};
@@ -1502,6 +1811,7 @@ static const struct lpg_data pmk8550_pwm_data = {
};
static const struct of_device_id lpg_of_table[] = {
+ { .compatible = "qcom,pm660l-lpg", .data = &pm660l_lpg_data },
{ .compatible = "qcom,pm8150b-lpg", .data = &pm8150b_lpg_data },
{ .compatible = "qcom,pm8150l-lpg", .data = &pm8150l_lpg_data },
{ .compatible = "qcom,pm8350c-pwm", .data = &pm8350c_pwm_data },
diff --git a/drivers/leds/trigger/ledtrig-audio.c b/drivers/leds/trigger/ledtrig-audio.c
index c6b437e6369b..2ecd4b760fc3 100644
--- a/drivers/leds/trigger/ledtrig-audio.c
+++ b/drivers/leds/trigger/ledtrig-audio.c
@@ -63,3 +63,5 @@ module_exit(ledtrig_audio_exit);
MODULE_DESCRIPTION("LED trigger for audio mute control");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ledtrig:audio-mute");
+MODULE_ALIAS("ledtrig:audio-micmute");
diff --git a/drivers/leds/trigger/ledtrig-default-on.c b/drivers/leds/trigger/ledtrig-default-on.c
index 8207f85eceb1..8678e64a5c33 100644
--- a/drivers/leds/trigger/ledtrig-default-on.c
+++ b/drivers/leds/trigger/ledtrig-default-on.c
@@ -28,3 +28,4 @@ module_led_trigger(defon_led_trigger);
MODULE_AUTHOR("Nick Forbes <nick.forbes@incepta.com>");
MODULE_DESCRIPTION("Default-ON LED trigger");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ledtrig:default-on");
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 8e5475819590..ea00f6c70882 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -18,10 +18,12 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/leds.h>
+#include <linux/linkmode.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/phy.h>
#include <linux/rtnetlink.h>
#include <linux/timer.h>
#include "../leds.h"
@@ -65,12 +67,15 @@ struct led_netdev_data {
unsigned long mode;
int link_speed;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_link_modes);
u8 duplex;
bool carrier_link_up;
bool hw_control;
};
+static const struct attribute_group netdev_trig_link_speed_attrs_group;
+
static void set_baseline_state(struct led_netdev_data *trigger_data)
{
int current_brightness;
@@ -218,13 +223,20 @@ static void get_device_state(struct led_netdev_data *trigger_data)
struct ethtool_link_ksettings cmd;
trigger_data->carrier_link_up = netif_carrier_ok(trigger_data->net_dev);
- if (!trigger_data->carrier_link_up)
+
+ if (__ethtool_get_link_ksettings(trigger_data->net_dev, &cmd))
return;
- if (!__ethtool_get_link_ksettings(trigger_data->net_dev, &cmd)) {
+ if (trigger_data->carrier_link_up) {
trigger_data->link_speed = cmd.base.speed;
trigger_data->duplex = cmd.base.duplex;
}
+
+ /*
+ * Have a local copy of the link speed supported to avoid rtnl lock every time
+ * modes are refreshed on any change event
+ */
+ linkmode_copy(trigger_data->supported_link_modes, cmd.link_modes.supported);
}
static ssize_t device_name_show(struct device *dev,
@@ -277,7 +289,10 @@ static int set_device_name(struct led_netdev_data *trigger_data,
trigger_data->last_activity = 0;
- set_baseline_state(trigger_data);
+ /* Skip if we're called from netdev_trig_activate() and hw_control is true */
+ if (!trigger_data->hw_control || led_get_trigger_data(trigger_data->led_cdev))
+ set_baseline_state(trigger_data);
+
mutex_unlock(&trigger_data->lock);
rtnl_unlock();
@@ -295,6 +310,10 @@ static ssize_t device_name_store(struct device *dev,
if (ret < 0)
return ret;
+
+ /* Refresh link_speed visibility */
+ sysfs_update_group(&dev->kobj, &netdev_trig_link_speed_attrs_group);
+
return size;
}
@@ -458,15 +477,63 @@ static ssize_t offloaded_show(struct device *dev,
static DEVICE_ATTR_RO(offloaded);
-static struct attribute *netdev_trig_attrs[] = {
- &dev_attr_device_name.attr,
- &dev_attr_link.attr,
+#define CHECK_LINK_MODE_ATTR(link_speed) \
+ do { \
+ if (attr == &dev_attr_link_##link_speed.attr && \
+ link_ksettings.base.speed == SPEED_##link_speed) \
+ return attr->mode; \
+ } while (0)
+
+static umode_t netdev_trig_link_speed_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct led_netdev_data *trigger_data;
+ unsigned long *supported_link_modes;
+ u32 mode;
+
+ trigger_data = led_trigger_get_drvdata(dev);
+ supported_link_modes = trigger_data->supported_link_modes;
+
+ /*
+ * Search in the supported link mode mask a matching supported mode.
+ * Stop at the first matching entry as we care only to check if a particular
+ * speed is supported and not the kind.
+ */
+ for_each_set_bit(mode, supported_link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS) {
+ struct ethtool_link_ksettings link_ksettings;
+
+ ethtool_params_from_link_mode(&link_ksettings, mode);
+
+ CHECK_LINK_MODE_ATTR(10);
+ CHECK_LINK_MODE_ATTR(100);
+ CHECK_LINK_MODE_ATTR(1000);
+ CHECK_LINK_MODE_ATTR(2500);
+ CHECK_LINK_MODE_ATTR(5000);
+ CHECK_LINK_MODE_ATTR(10000);
+ }
+
+ return 0;
+}
+
+static struct attribute *netdev_trig_link_speed_attrs[] = {
&dev_attr_link_10.attr,
&dev_attr_link_100.attr,
&dev_attr_link_1000.attr,
&dev_attr_link_2500.attr,
&dev_attr_link_5000.attr,
&dev_attr_link_10000.attr,
+ NULL
+};
+
+static const struct attribute_group netdev_trig_link_speed_attrs_group = {
+ .attrs = netdev_trig_link_speed_attrs,
+ .is_visible = netdev_trig_link_speed_visible,
+};
+
+static struct attribute *netdev_trig_attrs[] = {
+ &dev_attr_device_name.attr,
+ &dev_attr_link.attr,
&dev_attr_full_duplex.attr,
&dev_attr_half_duplex.attr,
&dev_attr_rx.attr,
@@ -475,7 +542,16 @@ static struct attribute *netdev_trig_attrs[] = {
&dev_attr_offloaded.attr,
NULL
};
-ATTRIBUTE_GROUPS(netdev_trig);
+
+static const struct attribute_group netdev_trig_attrs_group = {
+ .attrs = netdev_trig_attrs,
+};
+
+static const struct attribute_group *netdev_trig_groups[] = {
+ &netdev_trig_attrs_group,
+ &netdev_trig_link_speed_attrs_group,
+ NULL,
+};
static int netdev_trig_notify(struct notifier_block *nb,
unsigned long evt, void *dv)
@@ -484,6 +560,7 @@ static int netdev_trig_notify(struct notifier_block *nb,
netdev_notifier_info_to_dev((struct netdev_notifier_info *)dv);
struct led_netdev_data *trigger_data =
container_of(nb, struct led_netdev_data, notifier);
+ struct led_classdev *led_cdev = trigger_data->led_cdev;
if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
&& evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
@@ -504,12 +581,12 @@ static int netdev_trig_notify(struct notifier_block *nb,
trigger_data->duplex = DUPLEX_UNKNOWN;
switch (evt) {
case NETDEV_CHANGENAME:
- get_device_state(trigger_data);
- fallthrough;
case NETDEV_REGISTER:
dev_put(trigger_data->net_dev);
dev_hold(dev);
trigger_data->net_dev = dev;
+ if (evt == NETDEV_CHANGENAME)
+ get_device_state(trigger_data);
break;
case NETDEV_UNREGISTER:
dev_put(trigger_data->net_dev);
@@ -518,6 +595,10 @@ static int netdev_trig_notify(struct notifier_block *nb,
case NETDEV_UP:
case NETDEV_CHANGE:
get_device_state(trigger_data);
+ /* Refresh link_speed visibility */
+ if (evt == NETDEV_CHANGE)
+ sysfs_update_group(&led_cdev->dev->kobj,
+ &netdev_trig_link_speed_attrs_group);
break;
}
@@ -617,8 +698,8 @@ static int netdev_trig_activate(struct led_classdev *led_cdev)
if (dev) {
const char *name = dev_name(dev);
- set_device_name(trigger_data, name, strlen(name));
trigger_data->hw_control = true;
+ set_device_name(trigger_data, name, strlen(name));
rc = led_cdev->hw_control_get(led_cdev, &mode);
if (!rc)
@@ -663,3 +744,4 @@ MODULE_AUTHOR("Ben Whitten <ben.whitten@gmail.com>");
MODULE_AUTHOR("Oliver Jowett <oliver@opencloud.com>");
MODULE_DESCRIPTION("Netdev LED trigger");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ledtrig:netdev");
diff --git a/drivers/leds/trigger/ledtrig-panic.c b/drivers/leds/trigger/ledtrig-panic.c
index 5a6b21bfeb9a..1d49c1078091 100644
--- a/drivers/leds/trigger/ledtrig-panic.c
+++ b/drivers/leds/trigger/ledtrig-panic.c
@@ -21,24 +21,15 @@ static struct led_trigger *trigger;
*/
static void led_trigger_set_panic(struct led_classdev *led_cdev)
{
- struct led_trigger *trig;
+ if (led_cdev->trigger)
+ list_del(&led_cdev->trig_list);
+ list_add_tail(&led_cdev->trig_list, &trigger->led_cdevs);
- list_for_each_entry(trig, &trigger_list, next_trig) {
- if (strcmp("panic", trig->name))
- continue;
- if (led_cdev->trigger)
- list_del(&led_cdev->trig_list);
- list_add_tail(&led_cdev->trig_list, &trig->led_cdevs);
+ /* Avoid the delayed blink path */
+ led_cdev->blink_delay_on = 0;
+ led_cdev->blink_delay_off = 0;
- /* Avoid the delayed blink path */
- led_cdev->blink_delay_on = 0;
- led_cdev->blink_delay_off = 0;
-
- led_cdev->trigger = trig;
- if (trig->activate)
- trig->activate(led_cdev);
- break;
- }
+ led_cdev->trigger = trigger;
}
static int led_trigger_panic_notifier(struct notifier_block *nb,
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index c2d87e7fa85b..89450645c230 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -10,6 +10,7 @@
#include <asm/ptrace.h>
#include <linux/adb.h>
+#include <linux/backlight.h>
#include <linux/pmu.h>
#include <asm/backlight.h>
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index e3e28a4f7d01..b1abc2a0c971 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1587,8 +1587,8 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
}
/* Allocate platform MSIs for each ring */
- ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings,
- flexrm_mbox_msi_write);
+ ret = platform_device_msi_init_and_alloc_irqs(dev, mbox->num_rings,
+ flexrm_mbox_msi_write);
if (ret)
goto fail_destroy_cmpl_pool;
@@ -1641,7 +1641,7 @@ skip_debugfs:
fail_free_debugfs_root:
debugfs_remove_recursive(mbox->root);
- platform_msi_domain_free_irqs(dev);
+ platform_device_msi_free_irqs_all(dev);
fail_destroy_cmpl_pool:
dma_pool_destroy(mbox->cmpl_pool);
fail_destroy_bd_pool:
@@ -1657,7 +1657,7 @@ static void flexrm_mbox_remove(struct platform_device *pdev)
debugfs_remove_recursive(mbox->root);
- platform_msi_domain_free_irqs(dev);
+ platform_device_msi_free_irqs_all(dev);
dma_pool_destroy(mbox->cmpl_pool);
dma_pool_destroy(mbox->bd_pool);
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 656171362fe9..5c1d09cad761 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -4,6 +4,7 @@
* Copyright 2022 NXP, Peng Fan <peng.fan@nxp.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/s4.h>
@@ -15,6 +16,7 @@
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
@@ -29,7 +31,9 @@
#define IMX_MU_S4_CHANS 2
#define IMX_MU_CHAN_NAME_SIZE 20
-#define IMX_MU_NUM_RR 4
+#define IMX_MU_V2_PAR_OFF 0x4
+#define IMX_MU_V2_TR_MASK GENMASK(7, 0)
+#define IMX_MU_V2_RR_MASK GENMASK(15, 8)
#define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000))
#define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000))
@@ -93,10 +97,11 @@ struct imx_mu_priv {
struct clk *clk;
int irq[IMX_MU_CHANS];
bool suspend;
-
- u32 xcr[IMX_MU_xCR_MAX];
-
bool side_b;
+
+ u32 xcr[IMX_MU_xCR_MAX];
+ u32 num_tr;
+ u32 num_rr;
};
enum imx_mu_type {
@@ -110,7 +115,7 @@ struct imx_mu_dcfg {
int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
int (*rxdb)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
- void (*init)(struct imx_mu_priv *priv);
+ int (*init)(struct imx_mu_priv *priv);
enum imx_mu_type type;
u32 xTR; /* Transmit Register0 */
u32 xRR; /* Receive Register0 */
@@ -264,18 +269,17 @@ static int imx_mu_generic_rxdb(struct imx_mu_priv *priv,
static int imx_mu_specific_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data)
{
u32 *arg = data;
+ u32 num_tr = priv->num_tr;
int i, ret;
u32 xsr;
- u32 size, max_size, num_tr;
+ u32 size, max_size;
if (priv->dcfg->type & IMX_MU_V2_S4) {
size = ((struct imx_s4_rpc_msg_max *)data)->hdr.size;
max_size = sizeof(struct imx_s4_rpc_msg_max);
- num_tr = 8;
} else {
size = ((struct imx_sc_rpc_msg_max *)data)->hdr.size;
max_size = sizeof(struct imx_sc_rpc_msg_max);
- num_tr = 4;
}
switch (cp->type) {
@@ -324,6 +328,7 @@ static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *
int i, ret;
u32 xsr;
u32 size, max_size;
+ u32 num_rr = priv->num_rr;
data = (u32 *)priv->msg;
@@ -345,13 +350,13 @@ static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *
for (i = 1; i < size; i++) {
ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_RSR], xsr,
- xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % 4), 0,
+ xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % num_rr), 0,
5 * USEC_PER_SEC);
if (ret) {
dev_err(priv->dev, "timeout read idx %d\n", i);
return ret;
}
- *data++ = imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR + (i % num_rr) * 4);
}
imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, 0), 0);
@@ -737,11 +742,30 @@ static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
return imx_mu_xlate(mbox, sp);
}
-static void imx_mu_init_generic(struct imx_mu_priv *priv)
+static void imx_mu_get_tr_rr(struct imx_mu_priv *priv)
+{
+ u32 val;
+
+ if (priv->dcfg->type & IMX_MU_V2) {
+ val = imx_mu_read(priv, IMX_MU_V2_PAR_OFF);
+ priv->num_tr = FIELD_GET(IMX_MU_V2_TR_MASK, val);
+ priv->num_rr = FIELD_GET(IMX_MU_V2_RR_MASK, val);
+ } else {
+ priv->num_tr = 4;
+ priv->num_rr = 4;
+ }
+}
+
+static int imx_mu_init_generic(struct imx_mu_priv *priv)
{
unsigned int i;
unsigned int val;
+ if (priv->num_rr > 4 || priv->num_tr > 4) {
+ WARN_ONCE(true, "%s not support TR/RR larger than 4\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
for (i = 0; i < IMX_MU_CHANS; i++) {
struct imx_mu_con_priv *cp = &priv->con_priv[i];
@@ -757,7 +781,7 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
priv->mbox.of_xlate = imx_mu_xlate;
if (priv->side_b)
- return;
+ return 0;
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
@@ -768,11 +792,13 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
imx_mu_write(priv, val, priv->dcfg->xSR[IMX_MU_GSR]);
/* Clear any pending RSR */
- for (i = 0; i < IMX_MU_NUM_RR; i++)
- imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
+ for (i = 0; i < priv->num_rr; i++)
+ imx_mu_read(priv, priv->dcfg->xRR + i * 4);
+
+ return 0;
}
-static void imx_mu_init_specific(struct imx_mu_priv *priv)
+static int imx_mu_init_specific(struct imx_mu_priv *priv)
{
unsigned int i;
int num_chans = priv->dcfg->type & IMX_MU_V2_S4 ? IMX_MU_S4_CHANS : IMX_MU_SCU_CHANS;
@@ -794,12 +820,20 @@ static void imx_mu_init_specific(struct imx_mu_priv *priv)
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
+
+ return 0;
}
-static void imx_mu_init_seco(struct imx_mu_priv *priv)
+static int imx_mu_init_seco(struct imx_mu_priv *priv)
{
- imx_mu_init_generic(priv);
+ int ret;
+
+ ret = imx_mu_init_generic(priv);
+ if (ret)
+ return ret;
priv->mbox.of_xlate = imx_mu_seco_xlate;
+
+ return 0;
}
static int imx_mu_probe(struct platform_device *pdev)
@@ -864,9 +898,15 @@ static int imx_mu_probe(struct platform_device *pdev)
return ret;
}
+ imx_mu_get_tr_rr(priv);
+
priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
- priv->dcfg->init(priv);
+ ret = priv->dcfg->init(priv);
+ if (ret) {
+ dev_err(dev, "Failed to init MU\n");
+ goto disable_clk;
+ }
spin_lock_init(&priv->xcr_lock);
@@ -878,10 +918,10 @@ static int imx_mu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
ret = devm_mbox_controller_register(dev, &priv->mbox);
- if (ret) {
- clk_disable_unprepare(priv->clk);
- return ret;
- }
+ if (ret)
+ goto disable_clk;
+
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
pm_runtime_enable(dev);
@@ -899,6 +939,7 @@ static int imx_mu_probe(struct platform_device *pdev)
disable_runtime_pm:
pm_runtime_disable(dev);
+disable_clk:
clk_disable_unprepare(priv->clk);
return ret;
}
@@ -994,6 +1035,9 @@ static const struct of_device_id imx_mu_dt_ids[] = {
{ .compatible = "fsl,imx8ulp-mu", .data = &imx_mu_cfg_imx8ulp },
{ .compatible = "fsl,imx8ulp-mu-s4", .data = &imx_mu_cfg_imx8ulp_s4 },
{ .compatible = "fsl,imx93-mu-s4", .data = &imx_mu_cfg_imx93_s4 },
+ { .compatible = "fsl,imx95-mu", .data = &imx_mu_cfg_imx8ulp },
+ { .compatible = "fsl,imx95-mu-ele", .data = &imx_mu_cfg_imx8ulp_s4 },
+ { .compatible = "fsl,imx95-mu-v2x", .data = &imx_mu_cfg_imx8ulp_s4 },
{ .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
{ .compatible = "fsl,imx8-mu-seco", .data = &imx_mu_cfg_imx8_seco },
{ },
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index a743e2c572fc..68ce56fc61d0 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -634,4 +634,6 @@ config DM_AUDIT
Enables audit logging of several security relevant events in the
particular device-mapper targets, especially the integrity target.
+source "drivers/md/dm-vdo/Kconfig"
+
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 027d7cfeca3f..476a214e4bdc 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_DM_ZERO) += dm-zero.o
obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
+obj-$(CONFIG_DM_VDO) += dm-vdo/
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
obj-$(CONFIG_DM_EBS) += dm-ebs.o
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 6ae2329052c9..4e6afa89921f 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -300,7 +300,7 @@ struct cached_dev {
struct list_head list;
struct bcache_device disk;
struct block_device *bdev;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct cache_sb sb;
struct cache_sb_disk *sb_disk;
@@ -423,7 +423,7 @@ struct cache {
struct kobject kobj;
struct block_device *bdev;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct task_struct *alloc_thread;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index dc3f50f69714..330bcd9ea4a9 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -900,9 +900,23 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
struct request_queue *q;
const size_t max_stripes = min_t(size_t, INT_MAX,
SIZE_MAX / sizeof(atomic_t));
+ struct queue_limits lim = {
+ .max_hw_sectors = UINT_MAX,
+ .max_sectors = UINT_MAX,
+ .max_segment_size = UINT_MAX,
+ .max_segments = BIO_MAX_VECS,
+ .max_hw_discard_sectors = UINT_MAX,
+ .io_min = block_size,
+ .logical_block_size = block_size,
+ .physical_block_size = block_size,
+ };
uint64_t n;
int idx;
+ if (cached_bdev) {
+ d->stripe_size = bdev_io_opt(cached_bdev) >> SECTOR_SHIFT;
+ lim.io_opt = umax(block_size, bdev_io_opt(cached_bdev));
+ }
if (!d->stripe_size)
d->stripe_size = 1 << 31;
else if (d->stripe_size < BCH_MIN_STRIPE_SZ)
@@ -935,8 +949,21 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
goto out_ida_remove;
- d->disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!d->disk)
+ if (lim.logical_block_size > PAGE_SIZE && cached_bdev) {
+ /*
+ * This should only happen with BCACHE_SB_VERSION_BDEV.
+ * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
+ */
+ pr_info("bcache%i: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n",
+ idx, lim.logical_block_size,
+ PAGE_SIZE, bdev_logical_block_size(cached_bdev));
+
+ /* This also adjusts physical block size/min io size if needed */
+ lim.logical_block_size = bdev_logical_block_size(cached_bdev);
+ }
+
+ d->disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(d->disk))
goto out_bioset_exit;
set_capacity(d->disk, sectors);
@@ -949,27 +976,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
d->disk->private_data = d;
q = d->disk->queue;
- q->limits.max_hw_sectors = UINT_MAX;
- q->limits.max_sectors = UINT_MAX;
- q->limits.max_segment_size = UINT_MAX;
- q->limits.max_segments = BIO_MAX_VECS;
- blk_queue_max_discard_sectors(q, UINT_MAX);
- q->limits.io_min = block_size;
- q->limits.logical_block_size = block_size;
- q->limits.physical_block_size = block_size;
-
- if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) {
- /*
- * This should only happen with BCACHE_SB_VERSION_BDEV.
- * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
- */
- pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n",
- d->disk->disk_name, q->limits.logical_block_size,
- PAGE_SIZE, bdev_logical_block_size(cached_bdev));
-
- /* This also adjusts physical block size/min io size if needed */
- blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev));
- }
blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
@@ -1369,8 +1375,8 @@ static CLOSURE_CALLBACK(cached_dev_free)
if (dc->sb_disk)
put_page(virt_to_page(dc->sb_disk));
- if (dc->bdev_handle)
- bdev_release(dc->bdev_handle);
+ if (dc->bdev_file)
+ fput(dc->bdev_file);
wake_up(&unregister_wait);
@@ -1416,9 +1422,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
}
- dc->disk.stripe_size = q->limits.io_opt >> 9;
-
- if (dc->disk.stripe_size)
+ if (bdev_io_opt(dc->bdev))
dc->partial_stripes_expensive =
q->limits.raid_partial_stripes_expensive;
@@ -1428,9 +1432,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
if (ret)
return ret;
- blk_queue_io_opt(dc->disk.disk->queue,
- max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q)));
-
atomic_set(&dc->io_errors, 0);
dc->io_disable = false;
dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
@@ -1445,7 +1446,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
/* Cached device - bcache superblock */
static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
- struct bdev_handle *bdev_handle,
+ struct file *bdev_file,
struct cached_dev *dc)
{
const char *err = "cannot allocate memory";
@@ -1453,8 +1454,8 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
int ret = -ENOMEM;
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
- dc->bdev_handle = bdev_handle;
- dc->bdev = bdev_handle->bdev;
+ dc->bdev_file = bdev_file;
+ dc->bdev = file_bdev(bdev_file);
dc->sb_disk = sb_disk;
if (cached_dev_init(dc, sb->block_size << 9))
@@ -2218,8 +2219,8 @@ void bch_cache_release(struct kobject *kobj)
if (ca->sb_disk)
put_page(virt_to_page(ca->sb_disk));
- if (ca->bdev_handle)
- bdev_release(ca->bdev_handle);
+ if (ca->bdev_file)
+ fput(ca->bdev_file);
kfree(ca);
module_put(THIS_MODULE);
@@ -2339,18 +2340,18 @@ err_free:
}
static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
- struct bdev_handle *bdev_handle,
+ struct file *bdev_file,
struct cache *ca)
{
const char *err = NULL; /* must be set for any error case */
int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
- ca->bdev_handle = bdev_handle;
- ca->bdev = bdev_handle->bdev;
+ ca->bdev_file = bdev_file;
+ ca->bdev = file_bdev(bdev_file);
ca->sb_disk = sb_disk;
- if (bdev_max_discard_sectors((bdev_handle->bdev)))
+ if (bdev_max_discard_sectors(file_bdev(bdev_file)))
ca->discard = CACHE_DISCARD(&ca->sb);
ret = cache_alloc(ca);
@@ -2361,20 +2362,20 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
err = "cache_alloc(): cache device is too small";
else
err = "cache_alloc(): unknown error";
- pr_notice("error %pg: %s\n", bdev_handle->bdev, err);
+ pr_notice("error %pg: %s\n", file_bdev(bdev_file), err);
/*
* If we failed here, it means ca->kobj is not initialized yet,
* kobject_put() won't be called and there is no chance to
- * call bdev_release() to bdev in bch_cache_release(). So
- * we explicitly call bdev_release() here.
+ * call fput() to bdev in bch_cache_release(). So
+ * we explicitly call fput() on the block device here.
*/
- bdev_release(bdev_handle);
+ fput(bdev_file);
return ret;
}
- if (kobject_add(&ca->kobj, bdev_kobj(bdev_handle->bdev), "bcache")) {
+ if (kobject_add(&ca->kobj, bdev_kobj(file_bdev(bdev_file)), "bcache")) {
pr_notice("error %pg: error calling kobject_add\n",
- bdev_handle->bdev);
+ file_bdev(bdev_file));
ret = -ENOMEM;
goto out;
}
@@ -2388,7 +2389,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto out;
}
- pr_info("registered cache device %pg\n", ca->bdev_handle->bdev);
+ pr_info("registered cache device %pg\n", file_bdev(ca->bdev_file));
out:
kobject_put(&ca->kobj);
@@ -2446,7 +2447,7 @@ struct async_reg_args {
char *path;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
void *holder;
};
@@ -2457,7 +2458,7 @@ static void register_bdev_worker(struct work_struct *work)
container_of(work, struct async_reg_args, reg_work.work);
mutex_lock(&bch_register_lock);
- if (register_bdev(args->sb, args->sb_disk, args->bdev_handle,
+ if (register_bdev(args->sb, args->sb_disk, args->bdev_file,
args->holder) < 0)
fail = true;
mutex_unlock(&bch_register_lock);
@@ -2478,7 +2479,7 @@ static void register_cache_worker(struct work_struct *work)
container_of(work, struct async_reg_args, reg_work.work);
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(args->sb, args->sb_disk, args->bdev_handle,
+ if (register_cache(args->sb, args->sb_disk, args->bdev_file,
args->holder))
fail = true;
@@ -2516,7 +2517,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
char *path = NULL;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
- struct bdev_handle *bdev_handle, *bdev_handle2;
+ struct file *bdev_file, *bdev_file2;
void *holder = NULL;
ssize_t ret;
bool async_registration = false;
@@ -2549,15 +2550,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
ret = -EINVAL;
err = "failed to open device";
- bdev_handle = bdev_open_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
- if (IS_ERR(bdev_handle))
+ bdev_file = bdev_file_open_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
+ if (IS_ERR(bdev_file))
goto out_free_sb;
err = "failed to set blocksize";
- if (set_blocksize(bdev_handle->bdev, 4096))
+ if (set_blocksize(file_bdev(bdev_file), 4096))
goto out_blkdev_put;
- err = read_super(sb, bdev_handle->bdev, &sb_disk);
+ err = read_super(sb, file_bdev(bdev_file), &sb_disk);
if (err)
goto out_blkdev_put;
@@ -2569,13 +2570,13 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
}
/* Now reopen in exclusive mode with proper holder */
- bdev_handle2 = bdev_open_by_dev(bdev_handle->bdev->bd_dev,
+ bdev_file2 = bdev_file_open_by_dev(file_bdev(bdev_file)->bd_dev,
BLK_OPEN_READ | BLK_OPEN_WRITE, holder, NULL);
- bdev_release(bdev_handle);
- bdev_handle = bdev_handle2;
- if (IS_ERR(bdev_handle)) {
- ret = PTR_ERR(bdev_handle);
- bdev_handle = NULL;
+ fput(bdev_file);
+ bdev_file = bdev_file2;
+ if (IS_ERR(bdev_file)) {
+ ret = PTR_ERR(bdev_file);
+ bdev_file = NULL;
if (ret == -EBUSY) {
dev_t dev;
@@ -2610,7 +2611,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
args->path = path;
args->sb = sb;
args->sb_disk = sb_disk;
- args->bdev_handle = bdev_handle;
+ args->bdev_file = bdev_file;
args->holder = holder;
register_device_async(args);
/* No wait and returns to user space */
@@ -2619,14 +2620,14 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (SB_IS_BDEV(sb)) {
mutex_lock(&bch_register_lock);
- ret = register_bdev(sb, sb_disk, bdev_handle, holder);
+ ret = register_bdev(sb, sb_disk, bdev_file, holder);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
if (ret < 0)
goto out_free_sb;
} else {
/* blkdev_put() will be called in bch_cache_release() */
- ret = register_cache(sb, sb_disk, bdev_handle, holder);
+ ret = register_cache(sb, sb_disk, bdev_file, holder);
if (ret)
goto out_free_sb;
}
@@ -2642,8 +2643,8 @@ out_free_holder:
out_put_sb_page:
put_page(virt_to_page(sb_disk));
out_blkdev_put:
- if (bdev_handle)
- bdev_release(bdev_handle);
+ if (bdev_file)
+ fput(bdev_file);
out_free_sb:
kfree(sb);
out_free_path:
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
index 9ab32abe5ed4..bca0f39e15b8 100644
--- a/drivers/md/dm-bio-prison-v1.c
+++ b/drivers/md/dm-bio-prison-v1.c
@@ -489,5 +489,5 @@ module_init(dm_bio_prison_init);
module_exit(dm_bio_prison_exit);
MODULE_DESCRIPTION(DM_NAME " bio prison");
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 13c65b7e1ed6..098bf526136c 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1292,7 +1292,8 @@ static void dmio_complete(unsigned long error, void *context)
}
static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
- unsigned int n_sectors, unsigned int offset)
+ unsigned int n_sectors, unsigned int offset,
+ unsigned short ioprio)
{
int r;
struct dm_io_request io_req = {
@@ -1315,7 +1316,7 @@ static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
io_req.mem.ptr.vma = (char *)b->data + offset;
}
- r = dm_io(&io_req, 1, &region, NULL);
+ r = dm_io(&io_req, 1, &region, NULL, ioprio);
if (unlikely(r))
b->end_io(b, errno_to_blk_status(r));
}
@@ -1331,7 +1332,8 @@ static void bio_complete(struct bio *bio)
}
static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
- unsigned int n_sectors, unsigned int offset)
+ unsigned int n_sectors, unsigned int offset,
+ unsigned short ioprio)
{
struct bio *bio;
char *ptr;
@@ -1339,13 +1341,14 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
if (!bio) {
- use_dmio(b, op, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset, ioprio);
return;
}
bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_complete;
bio->bi_private = b;
+ bio->bi_ioprio = ioprio;
ptr = (char *)b->data + offset;
len = n_sectors << SECTOR_SHIFT;
@@ -1368,7 +1371,7 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
return sector;
}
-static void submit_io(struct dm_buffer *b, enum req_op op,
+static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio,
void (*end_io)(struct dm_buffer *, blk_status_t))
{
unsigned int n_sectors;
@@ -1398,9 +1401,9 @@ static void submit_io(struct dm_buffer *b, enum req_op op,
}
if (b->data_mode != DATA_MODE_VMALLOC)
- use_bio(b, op, sector, n_sectors, offset);
+ use_bio(b, op, sector, n_sectors, offset, ioprio);
else
- use_dmio(b, op, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset, ioprio);
}
/*
@@ -1456,7 +1459,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
b->write_end = b->dirty_end;
if (!write_list)
- submit_io(b, REQ_OP_WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
else
list_add_tail(&b->write_list, write_list);
}
@@ -1470,7 +1473,7 @@ static void __flush_write_list(struct list_head *write_list)
struct dm_buffer *b =
list_entry(write_list->next, struct dm_buffer, write_list);
list_del(&b->write_list);
- submit_io(b, REQ_OP_WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
cond_resched();
}
blk_finish_plug(&plug);
@@ -1852,7 +1855,8 @@ static void read_endio(struct dm_buffer *b, blk_status_t status)
* and uses dm_bufio_mark_buffer_dirty to write new data back).
*/
static void *new_read(struct dm_bufio_client *c, sector_t block,
- enum new_flag nf, struct dm_buffer **bp)
+ enum new_flag nf, struct dm_buffer **bp,
+ unsigned short ioprio)
{
int need_submit = 0;
struct dm_buffer *b;
@@ -1905,7 +1909,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
return NULL;
if (need_submit)
- submit_io(b, REQ_OP_READ, read_endio);
+ submit_io(b, REQ_OP_READ, ioprio, read_endio);
if (nf != NF_GET) /* we already tested this condition above */
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
@@ -1926,32 +1930,46 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
struct dm_buffer **bp)
{
- return new_read(c, block, NF_GET, bp);
+ return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_get);
-void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
- struct dm_buffer **bp)
+static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp, unsigned short ioprio)
{
if (WARN_ON_ONCE(dm_bufio_in_request()))
return ERR_PTR(-EINVAL);
- return new_read(c, block, NF_READ, bp);
+ return new_read(c, block, NF_READ, bp, ioprio);
+}
+
+void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp)
+{
+ return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_read);
+void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp, unsigned short ioprio)
+{
+ return __dm_bufio_read(c, block, bp, ioprio);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_read_with_ioprio);
+
void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
struct dm_buffer **bp)
{
if (WARN_ON_ONCE(dm_bufio_in_request()))
return ERR_PTR(-EINVAL);
- return new_read(c, block, NF_FRESH, bp);
+ return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_new);
-void dm_bufio_prefetch(struct dm_bufio_client *c,
- sector_t block, unsigned int n_blocks)
+static void __dm_bufio_prefetch(struct dm_bufio_client *c,
+ sector_t block, unsigned int n_blocks,
+ unsigned short ioprio)
{
struct blk_plug plug;
@@ -1987,7 +2005,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
dm_bufio_unlock(c);
if (need_submit)
- submit_io(b, REQ_OP_READ, read_endio);
+ submit_io(b, REQ_OP_READ, ioprio, read_endio);
dm_bufio_release(b);
cond_resched();
@@ -2002,8 +2020,20 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
flush_plug:
blk_finish_plug(&plug);
}
+
+void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks)
+{
+ return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT);
+}
EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
+void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block,
+ unsigned int n_blocks, unsigned short ioprio)
+{
+ return __dm_bufio_prefetch(c, block, n_blocks, ioprio);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_prefetch_with_ioprio);
+
void dm_bufio_release(struct dm_buffer *b)
{
struct dm_bufio_client *c = b->c;
@@ -2167,7 +2197,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
if (WARN_ON_ONCE(dm_bufio_in_request()))
return -EINVAL;
- return dm_io(&io_req, 1, &io_reg, NULL);
+ return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
@@ -2191,7 +2221,7 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c
if (WARN_ON_ONCE(dm_bufio_in_request()))
return -EINVAL; /* discards are optional */
- return dm_io(&io_req, 1, &io_reg, NULL);
+ return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
@@ -2968,6 +2998,6 @@ MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
-MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
+MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>");
MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 8bd2ad743d9a..2ed894155cab 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1947,7 +1947,7 @@ static void __exit smq_exit(void)
module_init(smq_init);
module_exit(smq_exit);
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("smq cache policy");
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f745f8508243..9a74c6316c5d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -53,15 +53,17 @@
struct convert_context {
struct completion restart;
struct bio *bio_in;
- struct bio *bio_out;
struct bvec_iter iter_in;
+ struct bio *bio_out;
struct bvec_iter iter_out;
- u64 cc_sector;
atomic_t cc_pending;
+ u64 cc_sector;
union {
struct skcipher_request *req;
struct aead_request *req_aead;
} r;
+ bool aead_recheck;
+ bool aead_failed;
};
@@ -82,6 +84,8 @@ struct dm_crypt_io {
blk_status_t error;
sector_t sector;
+ struct bvec_iter saved_bi_iter;
+
struct rb_node rb_node;
} CRYPTO_MINALIGN_ATTR;
@@ -1370,10 +1374,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
if (r == -EBADMSG) {
sector_t s = le64_to_cpu(*sector);
- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
- ctx->bio_in->bi_bdev, s);
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
- ctx->bio_in, s, 0);
+ ctx->aead_failed = true;
+ if (ctx->aead_recheck) {
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+ ctx->bio_in, s, 0);
+ }
}
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
@@ -1681,6 +1688,7 @@ retry:
GFP_NOIO, &cc->bs);
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
+ clone->bi_ioprio = io->base_bio->bi_ioprio;
remaining_size = size;
@@ -1757,6 +1765,8 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
io->base_bio = bio;
io->sector = sector;
io->error = 0;
+ io->ctx.aead_recheck = false;
+ io->ctx.aead_failed = false;
io->ctx.r.req = NULL;
io->integrity_metadata = NULL;
io->integrity_metadata_from_pool = false;
@@ -1768,6 +1778,8 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
atomic_inc(&io->io_pending);
}
+static void kcryptd_queue_read(struct dm_crypt_io *io);
+
/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
@@ -1781,6 +1793,15 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
if (!atomic_dec_and_test(&io->io_pending))
return;
+ if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) &&
+ cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) {
+ io->ctx.aead_recheck = true;
+ io->ctx.aead_failed = false;
+ io->error = 0;
+ kcryptd_queue_read(io);
+ return;
+ }
+
if (io->ctx.r.req)
crypt_free_req(cc, io->ctx.r.req, base_bio);
@@ -1816,15 +1837,19 @@ static void crypt_endio(struct bio *clone)
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->cc;
unsigned int rw = bio_data_dir(clone);
- blk_status_t error;
+ blk_status_t error = clone->bi_status;
+
+ if (io->ctx.aead_recheck && !error) {
+ kcryptd_queue_crypt(io);
+ return;
+ }
/*
* free the processed pages
*/
- if (rw == WRITE)
+ if (rw == WRITE || io->ctx.aead_recheck)
crypt_free_buffer_pages(cc, clone);
- error = clone->bi_status;
bio_put(clone);
if (rw == READ && !error) {
@@ -1845,6 +1870,22 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
struct crypt_config *cc = io->cc;
struct bio *clone;
+ if (io->ctx.aead_recheck) {
+ if (!(gfp & __GFP_DIRECT_RECLAIM))
+ return 1;
+ crypt_inc_pending(io);
+ clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
+ if (unlikely(!clone)) {
+ crypt_dec_pending(io);
+ return 1;
+ }
+ clone->bi_iter.bi_sector = cc->start + io->sector;
+ crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
+ io->saved_bi_iter = clone->bi_iter;
+ dm_submit_bio_remap(io->base_bio, clone);
+ return 0;
+ }
+
/*
* We need the original biovec array in order to decrypt the whole bio
* data *afterwards* -- thanks to immutable biovecs we don't need to
@@ -1924,7 +1965,6 @@ continue_locked:
schedule();
- set_current_state(TASK_RUNNING);
spin_lock_irq(&cc->write_thread_lock);
goto continue_locked;
@@ -2071,6 +2111,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
io->ctx.bio_out = clone;
io->ctx.iter_out = clone->bi_iter;
+ if (crypt_integrity_aead(cc)) {
+ bio_copy_data(clone, io->base_bio);
+ io->ctx.bio_in = clone;
+ io->ctx.iter_in = clone->bi_iter;
+ }
+
sector += bio_sectors(clone);
crypt_inc_pending(io);
@@ -2107,6 +2153,14 @@ dec:
static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
{
+ if (io->ctx.aead_recheck) {
+ if (!io->error) {
+ io->ctx.bio_in->bi_iter = io->saved_bi_iter;
+ bio_copy_data(io->base_bio, io->ctx.bio_in);
+ }
+ crypt_free_buffer_pages(io->cc, io->ctx.bio_in);
+ bio_put(io->ctx.bio_in);
+ }
crypt_dec_pending(io);
}
@@ -2136,11 +2190,17 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
crypt_inc_pending(io);
- crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
- io->sector);
+ if (io->ctx.aead_recheck) {
+ io->ctx.cc_sector = io->sector + cc->iv_offset;
+ r = crypt_convert(cc, &io->ctx,
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ } else {
+ crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
+ io->sector);
- r = crypt_convert(cc, &io->ctx,
- test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ r = crypt_convert(cc, &io->ctx,
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ }
/*
* Crypto API backlogged the request, because its queue was full
* and we're in softirq context, so continue from a workqueue
@@ -2182,10 +2242,13 @@ static void kcryptd_async_done(void *data, int error)
if (error == -EBADMSG) {
sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
- ctx->bio_in->bi_bdev, s);
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
- ctx->bio_in, s, 0);
+ ctx->aead_failed = true;
+ if (ctx->aead_recheck) {
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+ ctx->bio_in, s, 0);
+ }
io->error = BLK_STS_PROTECTION;
} else if (error < 0)
io->error = BLK_STS_IOERR;
@@ -2233,7 +2296,11 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
* irqs_disabled(): the kernel may run some IO completion from the idle thread, but
* it is being executed with irqs disabled.
*/
- if (!(in_hardirq() || irqs_disabled())) {
+ if (in_hardirq() || irqs_disabled()) {
+ INIT_WORK(&io->work, kcryptd_crypt);
+ queue_work(system_bh_wq, &io->work);
+ return;
+ } else {
kcryptd_crypt(&io->work);
return;
}
@@ -3110,7 +3177,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
if (!strcasecmp(sval, "aead")) {
set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
- } else if (strcasecmp(sval, "none")) {
+ } else if (strcasecmp(sval, "none")) {
ti->error = "Unknown integrity profile";
return -EINVAL;
}
@@ -3639,7 +3706,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 24, 0},
+ .version = {1, 25, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 12a377e06d02..1a33820c9f46 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -573,5 +573,5 @@ static struct target_type dust_target = {
module_dm(dust);
MODULE_DESCRIPTION(DM_NAME " dust test target");
-MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
+MODULE_AUTHOR("Bryan Gurney <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 435b45201f4d..b70d4016c2ac 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -454,6 +454,6 @@ static struct target_type ebs_target = {
};
module_dm(ebs);
-MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@lists.linux.dev>");
MODULE_DESCRIPTION(DM_NAME " emulated block size target");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 7916ed9f10e8..731467d4ed10 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -690,5 +690,5 @@ static struct target_type flakey_target = {
module_dm(flakey);
MODULE_DESCRIPTION(DM_NAME " flakey target");
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c5f03aab4552..d822ab2f739b 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -278,6 +278,8 @@ struct dm_integrity_c {
atomic64_t number_of_mismatches;
+ mempool_t recheck_pool;
+
struct notifier_block reboot_notifier;
};
@@ -553,7 +555,7 @@ static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
}
}
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r))
return r;
@@ -1071,7 +1073,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
io_loc.sector = ic->start + SB_SECTORS + sector;
io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
"reading journal" : "writing journal", r);
@@ -1188,7 +1190,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, u
io_loc.sector = target;
io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
fn(-1UL, data);
@@ -1517,7 +1519,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
fr.io_reg.count = 0,
fr.ic = ic;
init_completion(&fr.comp);
- r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
+ r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT);
BUG_ON(r);
}
@@ -1689,6 +1691,77 @@ failed:
get_random_bytes(result, ic->tag_size);
}
+static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
+{
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ struct dm_integrity_c *ic = dio->ic;
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ sector_t sector, logical_sector, area, offset;
+ struct page *page;
+ void *buffer;
+
+ get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
+ dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
+ &dio->metadata_offset);
+ sector = get_data_sector(ic, area, offset);
+ logical_sector = dio->range.logical_sector;
+
+ page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
+ buffer = page_to_virt(page);
+
+ __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
+ unsigned pos = 0;
+
+ do {
+ char *mem;
+ int r;
+ struct dm_io_request io_req;
+ struct dm_io_region io_loc;
+ io_req.bi_opf = REQ_OP_READ;
+ io_req.mem.type = DM_IO_KMEM;
+ io_req.mem.ptr.addr = buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = ic->io;
+ io_loc.bdev = ic->dev->bdev;
+ io_loc.sector = sector;
+ io_loc.count = ic->sectors_per_block;
+
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ if (unlikely(r)) {
+ dio->bi_status = errno_to_blk_status(r);
+ goto free_ret;
+ }
+
+ integrity_sector_checksum(ic, logical_sector, buffer, checksum);
+ r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
+ &dio->metadata_offset, ic->tag_size, TAG_CMP);
+ if (r) {
+ if (r > 0) {
+ DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
+ bio->bi_bdev, logical_sector);
+ atomic64_inc(&ic->number_of_mismatches);
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
+ bio, logical_sector, 0);
+ r = -EILSEQ;
+ }
+ dio->bi_status = errno_to_blk_status(r);
+ goto free_ret;
+ }
+
+ mem = bvec_kmap_local(&bv);
+ memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
+ kunmap_local(mem);
+
+ pos += ic->sectors_per_block << SECTOR_SHIFT;
+ sector += ic->sectors_per_block;
+ logical_sector += ic->sectors_per_block;
+ } while (pos < bv.bv_len);
+ }
+free_ret:
+ mempool_free(page, &ic->recheck_pool);
+}
+
static void integrity_metadata(struct work_struct *w)
{
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
@@ -1776,15 +1849,8 @@ again:
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
if (r > 0) {
- sector_t s;
-
- s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
- DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
- bio->bi_bdev, s);
- r = -EILSEQ;
- atomic64_inc(&ic->number_of_mismatches);
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
- bio, s, 0);
+ integrity_recheck(dio, checksums);
+ goto skip_io;
}
if (likely(checksums != checksums_onstack))
kfree(checksums);
@@ -2740,7 +2806,7 @@ next_chunk:
io_loc.sector = get_data_sector(ic, area, offset);
io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
dm_integrity_io_error(ic, "reading data", r);
goto err;
@@ -3419,6 +3485,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
limits->dma_alignment = limits->logical_block_size - 1;
}
+ limits->max_integrity_segments = USHRT_MAX;
}
static void calculate_journal_section_size(struct dm_integrity_c *ic)
@@ -3586,7 +3653,6 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
blk_integrity_register(disk, &bi);
- blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
}
static void dm_integrity_free_page_list(struct page_list *pl)
@@ -4261,6 +4327,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
goto bad;
}
+ r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
+ if (r) {
+ ti->error = "Cannot allocate mempool";
+ goto bad;
+ }
+
ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
if (!ic->metadata_wq) {
@@ -4609,6 +4681,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
+ mempool_exit(&ic->recheck_pool);
mempool_exit(&ic->journal_io_mempool);
if (ic->io)
dm_io_client_destroy(ic->io);
@@ -4661,7 +4734,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index f053ce245814..7409490259d1 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -305,7 +305,7 @@ static void km_dp_init(struct dpages *dp, void *data)
*/
static void do_region(const blk_opf_t opf, unsigned int region,
struct dm_io_region *where, struct dpages *dp,
- struct io *io)
+ struct io *io, unsigned short ioprio)
{
struct bio *bio;
struct page *page;
@@ -354,6 +354,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
&io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_end_io = endio;
+ bio->bi_ioprio = ioprio;
store_io_and_region_in_bio(bio, io, region);
if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
@@ -383,7 +384,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
struct dm_io_region *where, struct dpages *dp,
- struct io *io, int sync)
+ struct io *io, int sync, unsigned short ioprio)
{
int i;
struct dpages old_pages = *dp;
@@ -400,7 +401,7 @@ static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
for (i = 0; i < num_regions; i++) {
*dp = old_pages;
if (where[i].count || (opf & REQ_PREFLUSH))
- do_region(opf, i, where + i, dp, io);
+ do_region(opf, i, where + i, dp, io, ioprio);
}
/*
@@ -425,7 +426,7 @@ static void sync_io_complete(unsigned long error, void *context)
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
- unsigned long *error_bits)
+ unsigned long *error_bits, unsigned short ioprio)
{
struct io *io;
struct sync_io sio;
@@ -447,7 +448,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(opf, num_regions, where, dp, io, 1);
+ dispatch_io(opf, num_regions, where, dp, io, 1, ioprio);
wait_for_completion_io(&sio.wait);
@@ -459,7 +460,8 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
static int async_io(struct dm_io_client *client, unsigned int num_regions,
struct dm_io_region *where, blk_opf_t opf,
- struct dpages *dp, io_notify_fn fn, void *context)
+ struct dpages *dp, io_notify_fn fn, void *context,
+ unsigned short ioprio)
{
struct io *io;
@@ -479,7 +481,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(opf, num_regions, where, dp, io, 0);
+ dispatch_io(opf, num_regions, where, dp, io, 0, ioprio);
return 0;
}
@@ -521,7 +523,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
}
int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
- struct dm_io_region *where, unsigned long *sync_error_bits)
+ struct dm_io_region *where, unsigned long *sync_error_bits,
+ unsigned short ioprio)
{
int r;
struct dpages dp;
@@ -532,11 +535,11 @@ int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
if (!io_req->notify.fn)
return sync_io(io_req->client, num_regions, where,
- io_req->bi_opf, &dp, sync_error_bits);
+ io_req->bi_opf, &dp, sync_error_bits, ioprio);
return async_io(io_req->client, num_regions, where,
io_req->bi_opf, &dp, io_req->notify.fn,
- io_req->notify.context);
+ io_req->notify.context, ioprio);
}
EXPORT_SYMBOL(dm_io);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 3b1ad7127cb8..c2c07bfa6471 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -25,7 +25,7 @@
#include <linux/ima.h>
#define DM_MSG_PREFIX "ioctl"
-#define DM_DRIVER_EMAIL "dm-devel@redhat.com"
+#define DM_DRIVER_EMAIL "dm-devel@lists.linux.dev"
struct dm_file {
/*
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 36bcfdccae04..6ea75436a433 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -578,9 +578,9 @@ static int run_io_job(struct kcopyd_job *job)
io_job_start(job->kc->throttle);
if (job->op == REQ_OP_READ)
- r = dm_io(&io_req, 1, &job->source, NULL);
+ r = dm_io(&io_req, 1, &job->source, NULL, IOPRIO_DEFAULT);
else
- r = dm_io(&io_req, job->num_dests, job->dests, NULL);
+ r = dm_io(&io_req, job->num_dests, job->dests, NULL, IOPRIO_DEFAULT);
return r;
}
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 7e4f27e86150..9fbb4b48fb2b 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -926,5 +926,5 @@ module_init(userspace_dirty_log_init);
module_exit(userspace_dirty_log_exit);
MODULE_DESCRIPTION(DM_NAME " userspace dirty log link");
-MODULE_AUTHOR("Jonathan Brassow <dm-devel@redhat.com>");
+MODULE_AUTHOR("Jonathan Brassow <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index f9f84236dfcd..9d85d045f9d9 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -300,7 +300,7 @@ static int rw_header(struct log_c *lc, enum req_op op)
{
lc->io_req.bi_opf = op;
- return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
+ return dm_io(&lc->io_req, 1, &lc->header_location, NULL, IOPRIO_DEFAULT);
}
static int flush_header(struct log_c *lc)
@@ -313,7 +313,7 @@ static int flush_header(struct log_c *lc)
lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
- return dm_io(&lc->io_req, 1, &null_location, NULL);
+ return dm_io(&lc->io_req, 1, &null_location, NULL, IOPRIO_DEFAULT);
}
static int read_header(struct log_c *log)
@@ -908,5 +908,5 @@ module_init(dm_dirty_log_init);
module_exit(dm_dirty_log_exit);
MODULE_DESCRIPTION(DM_NAME " dirty region log");
-MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index bea3cda9938e..05d1328d1811 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -2266,5 +2266,5 @@ module_param_named(queue_if_no_path_timeout_secs, queue_if_no_path_timeout_secs,
MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
MODULE_DESCRIPTION(DM_NAME " multipath target");
-MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
+MODULE_AUTHOR("Sistina Software <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-ps-round-robin.c b/drivers/md/dm-ps-round-robin.c
index 0f04b673597a..d1745b123dc1 100644
--- a/drivers/md/dm-ps-round-robin.c
+++ b/drivers/md/dm-ps-round-robin.c
@@ -240,5 +240,5 @@ module_init(dm_rr_init);
module_exit(dm_rr_exit);
MODULE_DESCRIPTION(DM_NAME " round-robin multipath path selector");
-MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
+MODULE_AUTHOR("Sistina Software <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index eb009d6bb03a..abe88d1e6735 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -213,6 +213,7 @@ struct raid_dev {
#define RT_FLAG_RS_IN_SYNC 6
#define RT_FLAG_RS_RESYNCING 7
#define RT_FLAG_RS_GROW 8
+#define RT_FLAG_RS_FROZEN 9
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -3240,11 +3241,12 @@ size_check:
rs->md.ro = 1;
rs->md.in_sync = 1;
- /* Keep array frozen until resume. */
- set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
-
/* Has to be held on running the array */
mddev_suspend_and_lock_nointr(&rs->md);
+
+ /* Keep array frozen until resume. */
+ md_frozen_sync_thread(&rs->md);
+
r = md_run(&rs->md);
rs->md.in_sync = 0; /* Assume already marked dirty */
if (r) {
@@ -3329,17 +3331,18 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
struct mddev *mddev = &rs->md;
/*
- * If we're reshaping to add disk(s)), ti->len and
+ * If we're reshaping to add disk(s), ti->len and
* mddev->array_sectors will differ during the process
* (ti->len > mddev->array_sectors), so we have to requeue
* bios with addresses > mddev->array_sectors here or
* there will occur accesses past EOD of the component
* data images thus erroring the raid set.
*/
- if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
+ if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors))
return DM_MAPIO_REQUEUE;
- md_handle_request(mddev, bio);
+ if (unlikely(!md_handle_request(mddev, bio)))
+ return DM_MAPIO_REQUEUE;
return DM_MAPIO_SUBMITTED;
}
@@ -3718,21 +3721,33 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
+ int ret = 0;
if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;
- if (!strcasecmp(argv[0], "frozen"))
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- else
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ if (test_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags) ||
+ test_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags))
+ return -EBUSY;
- if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
- if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev);
- }
- } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
+ if (!strcasecmp(argv[0], "frozen")) {
+ ret = mddev_lock(mddev);
+ if (ret)
+ return ret;
+
+ md_frozen_sync_thread(mddev);
+ mddev_unlock(mddev);
+ } else if (!strcasecmp(argv[0], "idle")) {
+ ret = mddev_lock(mddev);
+ if (ret)
+ return ret;
+
+ md_idle_sync_thread(mddev);
+ mddev_unlock(mddev);
+ }
+
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
return -EBUSY;
else if (!strcasecmp(argv[0], "resync"))
; /* MD_RECOVERY_NEEDED set below */
@@ -3791,15 +3806,46 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
}
+static void raid_presuspend(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+ struct mddev *mddev = &rs->md;
+
+ /*
+ * From now on, disallow raid_message() to change sync_thread until
+ * resume, raid_postsuspend() is too late.
+ */
+ set_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
+
+ if (!reshape_interrupted(mddev))
+ return;
+
+ /*
+ * For raid456, if reshape is interrupted, IO across reshape position
+ * will never make progress, while caller will wait for IO to be done.
+ * Inform raid456 to handle those IO to prevent deadlock.
+ */
+ if (mddev->pers && mddev->pers->prepare_suspend)
+ mddev->pers->prepare_suspend(mddev);
+}
+
+static void raid_presuspend_undo(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+
+ clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
+}
+
static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
- /* Writes have to be stopped before suspending to avoid deadlocks. */
- if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
- md_stop_writes(&rs->md);
-
+ /*
+ * sync_thread must be stopped during suspend, and writes have
+ * to be stopped before suspending to avoid deadlocks.
+ */
+ md_stop_writes(&rs->md);
mddev_suspend(&rs->md, false);
}
}
@@ -4012,8 +4058,6 @@ static int raid_preresume(struct dm_target *ti)
}
/* Check for any resize/reshape on @rs and adjust/initiate */
- /* Be prepared for mddev_resume() in raid_resume() */
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
mddev->resync_min = mddev->recovery_cp;
@@ -4047,7 +4091,9 @@ static void raid_resume(struct dm_target *ti)
* Take this opportunity to check whether any failed
* devices are reachable again.
*/
+ mddev_lock_nointr(mddev);
attempt_restore_of_faulty_devices(rs);
+ mddev_unlock(mddev);
}
if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
@@ -4055,10 +4101,13 @@ static void raid_resume(struct dm_target *ti)
if (mddev->delta_disks < 0)
rs_set_capacity(rs);
+ WARN_ON_ONCE(!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery));
+ WARN_ON_ONCE(test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
+ clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
mddev_lock_nointr(mddev);
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
mddev->ro = 0;
mddev->in_sync = 0;
+ md_unfrozen_sync_thread(mddev);
mddev_unlock_and_resume(mddev);
}
}
@@ -4074,6 +4123,8 @@ static struct target_type raid_target = {
.message = raid_message,
.iterate_devices = raid_iterate_devices,
.io_hints = raid_io_hints,
+ .presuspend = raid_presuspend,
+ .presuspend_undo = raid_presuspend_undo,
.postsuspend = raid_postsuspend,
.preresume = raid_preresume,
.resume = raid_resume,
@@ -4091,6 +4142,6 @@ MODULE_ALIAS("dm-raid10");
MODULE_ALIAS("dm-raid4");
MODULE_ALIAS("dm-raid5");
MODULE_ALIAS("dm-raid6");
-MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
-MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_AUTHOR("Neil Brown <dm-devel@lists.linux.dev>");
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ddcb2bc4a617..9511dae5b556 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -278,7 +278,7 @@ static int mirror_flush(struct dm_target *ti)
}
error_bits = -1;
- dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
+ dm_io(&io_req, ms->nr_mirrors, io, &error_bits, IOPRIO_DEFAULT);
if (unlikely(error_bits != 0)) {
for (i = 0; i < ms->nr_mirrors; i++)
if (test_bit(i, &error_bits))
@@ -554,7 +554,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
map_region(&io, m, bio);
bio_set_m(bio, m);
- BUG_ON(dm_io(&io_req, 1, &io, NULL));
+ BUG_ON(dm_io(&io_req, 1, &io, NULL, IOPRIO_DEFAULT));
}
static inline int region_in_sync(struct mirror_set *ms, region_t region,
@@ -681,7 +681,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
*/
bio_set_m(bio, get_default_mirror(ms));
- BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
+ BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL, IOPRIO_DEFAULT));
}
static void do_writes(struct mirror_set *ms, struct bio_list *writes)
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 852cfa37d48a..a4550975c27d 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -723,5 +723,5 @@ void dm_rh_start_recovery(struct dm_region_hash *rh)
EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
MODULE_DESCRIPTION(DM_NAME " region hash");
-MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 15649921f2a9..568d10842b1f 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -223,7 +223,7 @@ static void do_metadata(struct work_struct *work)
{
struct mdata_req *req = container_of(work, struct mdata_req, work);
- req->result = dm_io(req->io_req, 1, req->where, NULL);
+ req->result = dm_io(req->io_req, 1, req->where, NULL, IOPRIO_DEFAULT);
}
/*
@@ -247,7 +247,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
struct mdata_req req;
if (!metadata)
- return dm_io(&io_req, 1, &where, NULL);
+ return dm_io(&io_req, 1, &where, NULL, IOPRIO_DEFAULT);
req.where = &where;
req.io_req = &io_req;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 07c7f9795b10..4793ad2aa1f7 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -453,12 +453,13 @@ static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bi
cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
- if (r)
+ if (r) {
/*
* We reused an old cell; we can get rid of
* the new one.
*/
dm_bio_prison_free_cell(pool->prison, cell_prealloc);
+ }
return r;
}
@@ -707,9 +708,10 @@ static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
(void) sector_div(e, pool->sectors_per_block);
}
- if (e < b)
+ if (e < b) {
/* Can happen if the bio is within a single block. */
e = b;
+ }
*begin = b;
*end = e;
@@ -721,13 +723,14 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
sector_t bi_sector = bio->bi_iter.bi_sector;
bio_set_dev(bio, tc->pool_dev->bdev);
- if (block_size_is_power_of_two(pool))
+ if (block_size_is_power_of_two(pool)) {
bio->bi_iter.bi_sector =
(block << pool->sectors_per_block_shift) |
(bi_sector & (pool->sectors_per_block - 1));
- else
+ } else {
bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
sector_div(bi_sector, pool->sectors_per_block);
+ }
}
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
@@ -1401,9 +1404,10 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
if (pool->pf.zero_new_blocks) {
if (io_overwrites_block(pool, bio))
remap_and_issue_overwrite(tc, bio, data_block, m);
- else
+ else {
ll_zero(tc, m, data_block * pool->sectors_per_block,
(data_block + 1) * pool->sectors_per_block);
+ }
} else
process_prepared_mapping(m);
}
@@ -1416,17 +1420,17 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
sector_t virt_block_begin = virt_block * pool->sectors_per_block;
sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
- if (virt_block_end <= tc->origin_size)
+ if (virt_block_end <= tc->origin_size) {
schedule_copy(tc, virt_block, tc->origin_dev,
virt_block, data_dest, cell, bio,
pool->sectors_per_block);
- else if (virt_block_begin < tc->origin_size)
+ } else if (virt_block_begin < tc->origin_size) {
schedule_copy(tc, virt_block, tc->origin_dev,
virt_block, data_dest, cell, bio,
tc->origin_size - virt_block_begin);
- else
+ } else
schedule_zero(tc, virt_block, data_dest, cell, bio);
}
@@ -4560,5 +4564,5 @@ module_param_named(no_space_timeout, no_space_timeout_secs, uint, 0644);
MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-vdo/Kconfig b/drivers/md/dm-vdo/Kconfig
new file mode 100644
index 000000000000..111ecd2c2a24
--- /dev/null
+++ b/drivers/md/dm-vdo/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DM_VDO
+ tristate "VDO: deduplication and compression target"
+ depends on 64BIT
+ depends on BLK_DEV_DM
+ select DM_BUFIO
+ select LZ4_COMPRESS
+ select LZ4_DECOMPRESS
+ help
+ This device mapper target presents a block device with
+ deduplication, compression and thin-provisioning.
+
+ To compile this code as a module, choose M here: the module will
+ be called dm-vdo.
+
+ If unsure, say N.
diff --git a/drivers/md/dm-vdo/Makefile b/drivers/md/dm-vdo/Makefile
new file mode 100644
index 000000000000..33e09abc6acd
--- /dev/null
+++ b/drivers/md/dm-vdo/Makefile
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y := -I$(srctree)/$(src) -I$(srctree)/$(src)/indexer
+
+obj-$(CONFIG_DM_VDO) += dm-vdo.o
+
+dm-vdo-objs := \
+ action-manager.o \
+ admin-state.o \
+ block-map.o \
+ completion.o \
+ data-vio.o \
+ dedupe.o \
+ dm-vdo-target.o \
+ dump.o \
+ encodings.o \
+ errors.o \
+ flush.o \
+ funnel-queue.o \
+ funnel-workqueue.o \
+ int-map.o \
+ io-submitter.o \
+ logger.o \
+ logical-zone.o \
+ memory-alloc.o \
+ message-stats.o \
+ murmurhash3.o \
+ packer.o \
+ permassert.o \
+ physical-zone.o \
+ priority-table.o \
+ recovery-journal.o \
+ repair.o \
+ slab-depot.o \
+ status-codes.o \
+ string-utils.o \
+ thread-device.o \
+ thread-registry.o \
+ thread-utils.o \
+ vdo.o \
+ vio.o \
+ wait-queue.o \
+ indexer/chapter-index.o \
+ indexer/config.o \
+ indexer/delta-index.o \
+ indexer/funnel-requestqueue.o \
+ indexer/geometry.o \
+ indexer/index.o \
+ indexer/index-layout.o \
+ indexer/index-page-map.o \
+ indexer/index-session.o \
+ indexer/io-factory.o \
+ indexer/open-chapter.o \
+ indexer/radix-sort.o \
+ indexer/sparse-cache.o \
+ indexer/volume.o \
+ indexer/volume-index.o
diff --git a/drivers/md/dm-vdo/action-manager.c b/drivers/md/dm-vdo/action-manager.c
new file mode 100644
index 000000000000..a0e5e7077d13
--- /dev/null
+++ b/drivers/md/dm-vdo/action-manager.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "action-manager.h"
+
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "admin-state.h"
+#include "completion.h"
+#include "status-codes.h"
+#include "types.h"
+#include "vdo.h"
+
+/**
+ * struct action - An action to be performed in each of a set of zones.
+ * @in_use: Whether this structure is in use.
+ * @operation: The admin operation associated with this action.
+ * @preamble: The method to run on the initiator thread before the action is applied to each zone.
+ * @zone_action: The action to be performed in each zone.
+ * @conclusion: The method to run on the initiator thread after the action is applied to each zone.
+ * @parent: The object to notify when the action is complete.
+ * @context: The action specific context.
+ * @next: The action to perform after this one.
+ */
+struct action {
+ bool in_use;
+ const struct admin_state_code *operation;
+ vdo_action_preamble_fn preamble;
+ vdo_zone_action_fn zone_action;
+ vdo_action_conclusion_fn conclusion;
+ struct vdo_completion *parent;
+ void *context;
+ struct action *next;
+};
+
+/**
+ * struct action_manager - Definition of an action manager.
+ * @completion: The completion for performing actions.
+ * @state: The state of this action manager.
+ * @actions: The two action slots.
+ * @current_action: The current action slot.
+ * @zones: The number of zones in which an action is to be applied.
+ * @Scheduler: A function to schedule a default next action.
+ * @get_zone_thread_id: A function to get the id of the thread on which to apply an action to a
+ * zone.
+ * @initiator_thread_id: The ID of the thread on which actions may be initiated.
+ * @context: Opaque data associated with this action manager.
+ * @acting_zone: The zone currently being acted upon.
+ */
+struct action_manager {
+ struct vdo_completion completion;
+ struct admin_state state;
+ struct action actions[2];
+ struct action *current_action;
+ zone_count_t zones;
+ vdo_action_scheduler_fn scheduler;
+ vdo_zone_thread_getter_fn get_zone_thread_id;
+ thread_id_t initiator_thread_id;
+ void *context;
+ zone_count_t acting_zone;
+};
+
+static inline struct action_manager *as_action_manager(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_ACTION_COMPLETION);
+ return container_of(completion, struct action_manager, completion);
+}
+
+/* Implements vdo_action_scheduler_fn. */
+static bool no_default_action(void *context __always_unused)
+{
+ return false;
+}
+
+/* Implements vdo_action_preamble_fn. */
+static void no_preamble(void *context __always_unused, struct vdo_completion *completion)
+{
+ vdo_finish_completion(completion);
+}
+
+/* Implements vdo_action_conclusion_fn. */
+static int no_conclusion(void *context __always_unused)
+{
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_make_action_manager() - Make an action manager.
+ * @zones: The number of zones to which actions will be applied.
+ * @get_zone_thread_id: A function to get the thread id associated with a zone.
+ * @initiator_thread_id: The thread on which actions may initiated.
+ * @context: The object which holds the per-zone context for the action.
+ * @scheduler: A function to schedule a next action after an action concludes if there is no
+ * pending action (may be NULL).
+ * @vdo: The vdo used to initialize completions.
+ * @manager_ptr: A pointer to hold the new action manager.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_make_action_manager(zone_count_t zones,
+ vdo_zone_thread_getter_fn get_zone_thread_id,
+ thread_id_t initiator_thread_id, void *context,
+ vdo_action_scheduler_fn scheduler, struct vdo *vdo,
+ struct action_manager **manager_ptr)
+{
+ struct action_manager *manager;
+ int result = vdo_allocate(1, struct action_manager, __func__, &manager);
+
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *manager = (struct action_manager) {
+ .zones = zones,
+ .scheduler =
+ ((scheduler == NULL) ? no_default_action : scheduler),
+ .get_zone_thread_id = get_zone_thread_id,
+ .initiator_thread_id = initiator_thread_id,
+ .context = context,
+ };
+
+ manager->actions[0].next = &manager->actions[1];
+ manager->current_action = manager->actions[1].next =
+ &manager->actions[0];
+ vdo_set_admin_state_code(&manager->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ vdo_initialize_completion(&manager->completion, vdo, VDO_ACTION_COMPLETION);
+ *manager_ptr = manager;
+ return VDO_SUCCESS;
+}
+
+const struct admin_state_code *vdo_get_current_manager_operation(struct action_manager *manager)
+{
+ return vdo_get_admin_state_code(&manager->state);
+}
+
+void *vdo_get_current_action_context(struct action_manager *manager)
+{
+ return manager->current_action->in_use ? manager->current_action->context : NULL;
+}
+
+static void finish_action_callback(struct vdo_completion *completion);
+static void apply_to_zone(struct vdo_completion *completion);
+
+static thread_id_t get_acting_zone_thread_id(struct action_manager *manager)
+{
+ return manager->get_zone_thread_id(manager->context, manager->acting_zone);
+}
+
+static void preserve_error(struct vdo_completion *completion)
+{
+ if (completion->parent != NULL)
+ vdo_set_completion_result(completion->parent, completion->result);
+
+ vdo_reset_completion(completion);
+ vdo_run_completion(completion);
+}
+
+static void prepare_for_next_zone(struct action_manager *manager)
+{
+ vdo_prepare_completion_for_requeue(&manager->completion, apply_to_zone,
+ preserve_error,
+ get_acting_zone_thread_id(manager),
+ manager->current_action->parent);
+}
+
+static void prepare_for_conclusion(struct action_manager *manager)
+{
+ vdo_prepare_completion_for_requeue(&manager->completion, finish_action_callback,
+ preserve_error, manager->initiator_thread_id,
+ manager->current_action->parent);
+}
+
+static void apply_to_zone(struct vdo_completion *completion)
+{
+ zone_count_t zone;
+ struct action_manager *manager = as_action_manager(completion);
+
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == get_acting_zone_thread_id(manager)),
+ "%s() called on acting zones's thread", __func__);
+
+ zone = manager->acting_zone++;
+ if (manager->acting_zone == manager->zones) {
+ /*
+ * We are about to apply to the last zone. Once that is finished, we're done, so go
+ * back to the initiator thread and finish up.
+ */
+ prepare_for_conclusion(manager);
+ } else {
+ /* Prepare to come back on the next zone */
+ prepare_for_next_zone(manager);
+ }
+
+ manager->current_action->zone_action(manager->context, zone, completion);
+}
+
+static void handle_preamble_error(struct vdo_completion *completion)
+{
+ /* Skip the zone actions since the preamble failed. */
+ completion->callback = finish_action_callback;
+ preserve_error(completion);
+}
+
+static void launch_current_action(struct action_manager *manager)
+{
+ struct action *action = manager->current_action;
+ int result = vdo_start_operation(&manager->state, action->operation);
+
+ if (result != VDO_SUCCESS) {
+ if (action->parent != NULL)
+ vdo_set_completion_result(action->parent, result);
+
+ /* We aren't going to run the preamble, so don't run the conclusion */
+ action->conclusion = no_conclusion;
+ finish_action_callback(&manager->completion);
+ return;
+ }
+
+ if (action->zone_action == NULL) {
+ prepare_for_conclusion(manager);
+ } else {
+ manager->acting_zone = 0;
+ vdo_prepare_completion_for_requeue(&manager->completion, apply_to_zone,
+ handle_preamble_error,
+ get_acting_zone_thread_id(manager),
+ manager->current_action->parent);
+ }
+
+ action->preamble(manager->context, &manager->completion);
+}
+
+/**
+ * vdo_schedule_default_action() - Attempt to schedule the default action.
+ * @manager: The action manager.
+ *
+ * If the manager is not operating normally, the action will not be scheduled.
+ *
+ * Return: true if an action was scheduled.
+ */
+bool vdo_schedule_default_action(struct action_manager *manager)
+{
+ /* Don't schedule a default action if we are operating or not in normal operation. */
+ const struct admin_state_code *code = vdo_get_current_manager_operation(manager);
+
+ return ((code == VDO_ADMIN_STATE_NORMAL_OPERATION) &&
+ manager->scheduler(manager->context));
+}
+
+static void finish_action_callback(struct vdo_completion *completion)
+{
+ bool has_next_action;
+ int result;
+ struct action_manager *manager = as_action_manager(completion);
+ struct action action = *(manager->current_action);
+
+ manager->current_action->in_use = false;
+ manager->current_action = manager->current_action->next;
+
+ /*
+ * We need to check this now to avoid use-after-free issues if running the conclusion or
+ * notifying the parent results in the manager being freed.
+ */
+ has_next_action =
+ (manager->current_action->in_use || vdo_schedule_default_action(manager));
+ result = action.conclusion(manager->context);
+ vdo_finish_operation(&manager->state, VDO_SUCCESS);
+ if (action.parent != NULL)
+ vdo_continue_completion(action.parent, result);
+
+ if (has_next_action)
+ launch_current_action(manager);
+}
+
+/**
+ * vdo_schedule_action() - Schedule an action to be applied to all zones.
+ * @manager: The action manager to schedule the action on.
+ * @preamble: A method to be invoked on the initiator thread once this action is started but before
+ * applying to each zone; may be NULL.
+ * @action: The action to apply to each zone; may be NULL.
+ * @conclusion: A method to be invoked back on the initiator thread once the action has been
+ * applied to all zones; may be NULL.
+ * @parent: The object to notify once the action is complete or if the action can not be scheduled;
+ * may be NULL.
+ *
+ * The action will be launched immediately if there is no current action, or as soon as the current
+ * action completes. If there is already a pending action, this action will not be scheduled, and,
+ * if it has a parent, that parent will be notified. At least one of the preamble, action, or
+ * conclusion must not be NULL.
+ *
+ * Return: true if the action was scheduled.
+ */
+bool vdo_schedule_action(struct action_manager *manager, vdo_action_preamble_fn preamble,
+ vdo_zone_action_fn action, vdo_action_conclusion_fn conclusion,
+ struct vdo_completion *parent)
+{
+ return vdo_schedule_operation(manager, VDO_ADMIN_STATE_OPERATING, preamble,
+ action, conclusion, parent);
+}
+
+/**
+ * vdo_schedule_operation() - Schedule an operation to be applied to all zones.
+ * @manager: The action manager to schedule the action on.
+ * @operation: The operation this action will perform
+ * @preamble: A method to be invoked on the initiator thread once this action is started but before
+ * applying to each zone; may be NULL.
+ * @action: The action to apply to each zone; may be NULL.
+ * @conclusion: A method to be invoked back on the initiator thread once the action has been
+ * applied to all zones; may be NULL.
+ * @parent: The object to notify once the action is complete or if the action can not be scheduled;
+ * may be NULL.
+ *
+ * The operation's action will be launched immediately if there is no current action, or as soon as
+ * the current action completes. If there is already a pending action, this operation will not be
+ * scheduled, and, if it has a parent, that parent will be notified. At least one of the preamble,
+ * action, or conclusion must not be NULL.
+ *
+ * Return: true if the action was scheduled.
+ */
+bool vdo_schedule_operation(struct action_manager *manager,
+ const struct admin_state_code *operation,
+ vdo_action_preamble_fn preamble, vdo_zone_action_fn action,
+ vdo_action_conclusion_fn conclusion,
+ struct vdo_completion *parent)
+{
+ return vdo_schedule_operation_with_context(manager, operation, preamble, action,
+ conclusion, NULL, parent);
+}
+
+/**
+ * vdo_schedule_operation_with_context() - Schedule an operation on all zones.
+ * @manager: The action manager to schedule the action on.
+ * @operation: The operation this action will perform.
+ * @preamble: A method to be invoked on the initiator thread once this action is started but before
+ * applying to each zone; may be NULL.
+ * @action: The action to apply to each zone; may be NULL.
+ * @conclusion: A method to be invoked back on the initiator thread once the action has been
+ * applied to all zones; may be NULL.
+ * @context: An action-specific context which may be retrieved via
+ * vdo_get_current_action_context(); may be NULL.
+ * @parent: The object to notify once the action is complete or if the action can not be scheduled;
+ * may be NULL.
+ *
+ * The operation's action will be launched immediately if there is no current action, or as soon as
+ * the current action completes. If there is already a pending action, this operation will not be
+ * scheduled, and, if it has a parent, that parent will be notified. At least one of the preamble,
+ * action, or conclusion must not be NULL.
+ *
+ * Return: true if the action was scheduled
+ */
+bool vdo_schedule_operation_with_context(struct action_manager *manager,
+ const struct admin_state_code *operation,
+ vdo_action_preamble_fn preamble,
+ vdo_zone_action_fn action,
+ vdo_action_conclusion_fn conclusion,
+ void *context, struct vdo_completion *parent)
+{
+ struct action *current_action;
+
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == manager->initiator_thread_id),
+ "action initiated from correct thread");
+ if (!manager->current_action->in_use) {
+ current_action = manager->current_action;
+ } else if (!manager->current_action->next->in_use) {
+ current_action = manager->current_action->next;
+ } else {
+ if (parent != NULL)
+ vdo_continue_completion(parent, VDO_COMPONENT_BUSY);
+
+ return false;
+ }
+
+ *current_action = (struct action) {
+ .in_use = true,
+ .operation = operation,
+ .preamble = (preamble == NULL) ? no_preamble : preamble,
+ .zone_action = action,
+ .conclusion = (conclusion == NULL) ? no_conclusion : conclusion,
+ .context = context,
+ .parent = parent,
+ .next = current_action->next,
+ };
+
+ if (current_action == manager->current_action)
+ launch_current_action(manager);
+
+ return true;
+}
diff --git a/drivers/md/dm-vdo/action-manager.h b/drivers/md/dm-vdo/action-manager.h
new file mode 100644
index 000000000000..b0a8d3ddf3db
--- /dev/null
+++ b/drivers/md/dm-vdo/action-manager.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_ACTION_MANAGER_H
+#define VDO_ACTION_MANAGER_H
+
+#include "admin-state.h"
+#include "types.h"
+
+/*
+ * An action_manager provides a generic mechanism for applying actions to multi-zone entities (such
+ * as the block map or slab depot). Each action manager is tied to a specific context for which it
+ * manages actions. The manager ensures that only one action is active on that context at a time,
+ * and supports at most one pending action. Calls to schedule an action when there is already a
+ * pending action will result in VDO_COMPONENT_BUSY errors. Actions may only be submitted to the
+ * action manager from a single thread (which thread is determined when the action manager is
+ * constructed).
+ *
+ * A scheduled action consists of four components:
+ *
+ * preamble
+ * an optional method to be run on the initiator thread before applying the action to all zones
+ * zone_action
+ * an optional method to be applied to each of the zones
+ * conclusion
+ * an optional method to be run on the initiator thread once the per-zone method has been
+ * applied to all zones
+ * parent
+ * an optional completion to be finished once the conclusion is done
+ *
+ * At least one of the three methods must be provided.
+ */
+
+/*
+ * A function which is to be applied asynchronously to a set of zones.
+ * @context: The object which holds the per-zone context for the action
+ * @zone_number: The number of zone to which the action is being applied
+ * @parent: The object to notify when the action is complete
+ */
+typedef void (*vdo_zone_action_fn)(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent);
+
+/*
+ * A function which is to be applied asynchronously on an action manager's initiator thread as the
+ * preamble of an action.
+ * @context: The object which holds the per-zone context for the action
+ * @parent: The object to notify when the action is complete
+ */
+typedef void (*vdo_action_preamble_fn)(void *context, struct vdo_completion *parent);
+
+/*
+ * A function which will run on the action manager's initiator thread as the conclusion of an
+ * action.
+ * @context: The object which holds the per-zone context for the action
+ *
+ * Return: VDO_SUCCESS or an error
+ */
+typedef int (*vdo_action_conclusion_fn)(void *context);
+
+/*
+ * A function to schedule an action.
+ * @context: The object which holds the per-zone context for the action
+ *
+ * Return: true if an action was scheduled
+ */
+typedef bool (*vdo_action_scheduler_fn)(void *context);
+
+/*
+ * A function to get the id of the thread associated with a given zone.
+ * @context: The action context
+ * @zone_number: The number of the zone for which the thread ID is desired
+ */
+typedef thread_id_t (*vdo_zone_thread_getter_fn)(void *context, zone_count_t zone_number);
+
+struct action_manager;
+
+int __must_check vdo_make_action_manager(zone_count_t zones,
+ vdo_zone_thread_getter_fn get_zone_thread_id,
+ thread_id_t initiator_thread_id, void *context,
+ vdo_action_scheduler_fn scheduler,
+ struct vdo *vdo,
+ struct action_manager **manager_ptr);
+
+const struct admin_state_code *__must_check
+vdo_get_current_manager_operation(struct action_manager *manager);
+
+void * __must_check vdo_get_current_action_context(struct action_manager *manager);
+
+bool vdo_schedule_default_action(struct action_manager *manager);
+
+bool vdo_schedule_action(struct action_manager *manager, vdo_action_preamble_fn preamble,
+ vdo_zone_action_fn action, vdo_action_conclusion_fn conclusion,
+ struct vdo_completion *parent);
+
+bool vdo_schedule_operation(struct action_manager *manager,
+ const struct admin_state_code *operation,
+ vdo_action_preamble_fn preamble, vdo_zone_action_fn action,
+ vdo_action_conclusion_fn conclusion,
+ struct vdo_completion *parent);
+
+bool vdo_schedule_operation_with_context(struct action_manager *manager,
+ const struct admin_state_code *operation,
+ vdo_action_preamble_fn preamble,
+ vdo_zone_action_fn action,
+ vdo_action_conclusion_fn conclusion,
+ void *context, struct vdo_completion *parent);
+
+#endif /* VDO_ACTION_MANAGER_H */
diff --git a/drivers/md/dm-vdo/admin-state.c b/drivers/md/dm-vdo/admin-state.c
new file mode 100644
index 000000000000..3f9dba525154
--- /dev/null
+++ b/drivers/md/dm-vdo/admin-state.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "admin-state.h"
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "completion.h"
+#include "types.h"
+
+static const struct admin_state_code VDO_CODE_NORMAL_OPERATION = {
+ .name = "VDO_ADMIN_STATE_NORMAL_OPERATION",
+ .normal = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_NORMAL_OPERATION = &VDO_CODE_NORMAL_OPERATION;
+static const struct admin_state_code VDO_CODE_OPERATING = {
+ .name = "VDO_ADMIN_STATE_OPERATING",
+ .normal = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_OPERATING = &VDO_CODE_OPERATING;
+static const struct admin_state_code VDO_CODE_FORMATTING = {
+ .name = "VDO_ADMIN_STATE_FORMATTING",
+ .operating = true,
+ .loading = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_FORMATTING = &VDO_CODE_FORMATTING;
+static const struct admin_state_code VDO_CODE_PRE_LOADING = {
+ .name = "VDO_ADMIN_STATE_PRE_LOADING",
+ .operating = true,
+ .loading = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_PRE_LOADING = &VDO_CODE_PRE_LOADING;
+static const struct admin_state_code VDO_CODE_PRE_LOADED = {
+ .name = "VDO_ADMIN_STATE_PRE_LOADED",
+};
+const struct admin_state_code *VDO_ADMIN_STATE_PRE_LOADED = &VDO_CODE_PRE_LOADED;
+static const struct admin_state_code VDO_CODE_LOADING = {
+ .name = "VDO_ADMIN_STATE_LOADING",
+ .normal = true,
+ .operating = true,
+ .loading = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_LOADING = &VDO_CODE_LOADING;
+static const struct admin_state_code VDO_CODE_LOADING_FOR_RECOVERY = {
+ .name = "VDO_ADMIN_STATE_LOADING_FOR_RECOVERY",
+ .operating = true,
+ .loading = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_LOADING_FOR_RECOVERY =
+ &VDO_CODE_LOADING_FOR_RECOVERY;
+static const struct admin_state_code VDO_CODE_LOADING_FOR_REBUILD = {
+ .name = "VDO_ADMIN_STATE_LOADING_FOR_REBUILD",
+ .operating = true,
+ .loading = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_LOADING_FOR_REBUILD = &VDO_CODE_LOADING_FOR_REBUILD;
+static const struct admin_state_code VDO_CODE_WAITING_FOR_RECOVERY = {
+ .name = "VDO_ADMIN_STATE_WAITING_FOR_RECOVERY",
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_WAITING_FOR_RECOVERY =
+ &VDO_CODE_WAITING_FOR_RECOVERY;
+static const struct admin_state_code VDO_CODE_NEW = {
+ .name = "VDO_ADMIN_STATE_NEW",
+ .quiescent = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_NEW = &VDO_CODE_NEW;
+static const struct admin_state_code VDO_CODE_INITIALIZED = {
+ .name = "VDO_ADMIN_STATE_INITIALIZED",
+};
+const struct admin_state_code *VDO_ADMIN_STATE_INITIALIZED = &VDO_CODE_INITIALIZED;
+static const struct admin_state_code VDO_CODE_RECOVERING = {
+ .name = "VDO_ADMIN_STATE_RECOVERING",
+ .draining = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_RECOVERING = &VDO_CODE_RECOVERING;
+static const struct admin_state_code VDO_CODE_REBUILDING = {
+ .name = "VDO_ADMIN_STATE_REBUILDING",
+ .draining = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_REBUILDING = &VDO_CODE_REBUILDING;
+static const struct admin_state_code VDO_CODE_SAVING = {
+ .name = "VDO_ADMIN_STATE_SAVING",
+ .draining = true,
+ .quiescing = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SAVING = &VDO_CODE_SAVING;
+static const struct admin_state_code VDO_CODE_SAVED = {
+ .name = "VDO_ADMIN_STATE_SAVED",
+ .quiescent = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SAVED = &VDO_CODE_SAVED;
+static const struct admin_state_code VDO_CODE_SCRUBBING = {
+ .name = "VDO_ADMIN_STATE_SCRUBBING",
+ .draining = true,
+ .loading = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SCRUBBING = &VDO_CODE_SCRUBBING;
+static const struct admin_state_code VDO_CODE_SAVE_FOR_SCRUBBING = {
+ .name = "VDO_ADMIN_STATE_SAVE_FOR_SCRUBBING",
+ .draining = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SAVE_FOR_SCRUBBING = &VDO_CODE_SAVE_FOR_SCRUBBING;
+static const struct admin_state_code VDO_CODE_STOPPING = {
+ .name = "VDO_ADMIN_STATE_STOPPING",
+ .draining = true,
+ .quiescing = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_STOPPING = &VDO_CODE_STOPPING;
+static const struct admin_state_code VDO_CODE_STOPPED = {
+ .name = "VDO_ADMIN_STATE_STOPPED",
+ .quiescent = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_STOPPED = &VDO_CODE_STOPPED;
+static const struct admin_state_code VDO_CODE_SUSPENDING = {
+ .name = "VDO_ADMIN_STATE_SUSPENDING",
+ .draining = true,
+ .quiescing = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SUSPENDING = &VDO_CODE_SUSPENDING;
+static const struct admin_state_code VDO_CODE_SUSPENDED = {
+ .name = "VDO_ADMIN_STATE_SUSPENDED",
+ .quiescent = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SUSPENDED = &VDO_CODE_SUSPENDED;
+static const struct admin_state_code VDO_CODE_SUSPENDED_OPERATION = {
+ .name = "VDO_ADMIN_STATE_SUSPENDED_OPERATION",
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SUSPENDED_OPERATION = &VDO_CODE_SUSPENDED_OPERATION;
+static const struct admin_state_code VDO_CODE_RESUMING = {
+ .name = "VDO_ADMIN_STATE_RESUMING",
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_RESUMING = &VDO_CODE_RESUMING;
+
+/**
+ * get_next_state() - Determine the state which should be set after a given operation completes
+ * based on the operation and the current state.
+ * @operation The operation to be started.
+ *
+ * Return: The state to set when the operation completes or NULL if the operation can not be
+ * started in the current state.
+ */
+static const struct admin_state_code *get_next_state(const struct admin_state *state,
+ const struct admin_state_code *operation)
+{
+ const struct admin_state_code *code = vdo_get_admin_state_code(state);
+
+ if (code->operating)
+ return NULL;
+
+ if (operation == VDO_ADMIN_STATE_SAVING)
+ return (code == VDO_ADMIN_STATE_NORMAL_OPERATION ? VDO_ADMIN_STATE_SAVED : NULL);
+
+ if (operation == VDO_ADMIN_STATE_SUSPENDING) {
+ return (code == VDO_ADMIN_STATE_NORMAL_OPERATION
+ ? VDO_ADMIN_STATE_SUSPENDED
+ : NULL);
+ }
+
+ if (operation == VDO_ADMIN_STATE_STOPPING)
+ return (code == VDO_ADMIN_STATE_NORMAL_OPERATION ? VDO_ADMIN_STATE_STOPPED : NULL);
+
+ if (operation == VDO_ADMIN_STATE_PRE_LOADING)
+ return (code == VDO_ADMIN_STATE_INITIALIZED ? VDO_ADMIN_STATE_PRE_LOADED : NULL);
+
+ if (operation == VDO_ADMIN_STATE_SUSPENDED_OPERATION) {
+ return (((code == VDO_ADMIN_STATE_SUSPENDED) ||
+ (code == VDO_ADMIN_STATE_SAVED)) ? code : NULL);
+ }
+
+ return VDO_ADMIN_STATE_NORMAL_OPERATION;
+}
+
+/**
+ * vdo_finish_operation() - Finish the current operation.
+ *
+ * Will notify the operation waiter if there is one. This method should be used for operations
+ * started with vdo_start_operation(). For operations which were started with vdo_start_draining(),
+ * use vdo_finish_draining() instead.
+ *
+ * Return: true if there was an operation to finish.
+ */
+bool vdo_finish_operation(struct admin_state *state, int result)
+{
+ if (!vdo_get_admin_state_code(state)->operating)
+ return false;
+
+ state->complete = state->starting;
+ if (state->waiter != NULL)
+ vdo_set_completion_result(state->waiter, result);
+
+ if (!state->starting) {
+ vdo_set_admin_state_code(state, state->next_state);
+ if (state->waiter != NULL)
+ vdo_launch_completion(vdo_forget(state->waiter));
+ }
+
+ return true;
+}
+
+/**
+ * begin_operation() - Begin an operation if it may be started given the current state.
+ * @waiter A completion to notify when the operation is complete; may be NULL.
+ * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check begin_operation(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter,
+ vdo_admin_initiator_fn initiator)
+{
+ int result;
+ const struct admin_state_code *next_state = get_next_state(state, operation);
+
+ if (next_state == NULL) {
+ result = vdo_log_error_strerror(VDO_INVALID_ADMIN_STATE,
+ "Can't start %s from %s",
+ operation->name,
+ vdo_get_admin_state_code(state)->name);
+ } else if (state->waiter != NULL) {
+ result = vdo_log_error_strerror(VDO_COMPONENT_BUSY,
+ "Can't start %s with extant waiter",
+ operation->name);
+ } else {
+ state->waiter = waiter;
+ state->next_state = next_state;
+ vdo_set_admin_state_code(state, operation);
+ if (initiator != NULL) {
+ state->starting = true;
+ initiator(state);
+ state->starting = false;
+ if (state->complete)
+ vdo_finish_operation(state, VDO_SUCCESS);
+ }
+
+ return VDO_SUCCESS;
+ }
+
+ if (waiter != NULL)
+ vdo_continue_completion(waiter, result);
+
+ return result;
+}
+
+/**
+ * start_operation() - Start an operation if it may be started given the current state.
+ * @waiter A completion to notify when the operation is complete.
+ * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ *
+ * Return: true if the operation was started.
+ */
+static inline bool __must_check start_operation(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter,
+ vdo_admin_initiator_fn initiator)
+{
+ return (begin_operation(state, operation, waiter, initiator) == VDO_SUCCESS);
+}
+
+/**
+ * check_code() - Check the result of a state validation.
+ * @valid true if the code is of an appropriate type.
+ * @code The code which failed to be of the correct type.
+ * @what What the code failed to be, for logging.
+ * @waiter The completion to notify of the error; may be NULL.
+ *
+ * If the result failed, log an invalid state error and, if there is a waiter, notify it.
+ *
+ * Return: The result of the check.
+ */
+static bool check_code(bool valid, const struct admin_state_code *code, const char *what,
+ struct vdo_completion *waiter)
+{
+ int result;
+
+ if (valid)
+ return true;
+
+ result = vdo_log_error_strerror(VDO_INVALID_ADMIN_STATE,
+ "%s is not a %s", code->name, what);
+ if (waiter != NULL)
+ vdo_continue_completion(waiter, result);
+
+ return false;
+}
+
+/**
+ * assert_vdo_drain_operation() - Check that an operation is a drain.
+ * @waiter The completion to finish with an error if the operation is not a drain.
+ *
+ * Return: true if the specified operation is a drain.
+ */
+static bool __must_check assert_vdo_drain_operation(const struct admin_state_code *operation,
+ struct vdo_completion *waiter)
+{
+ return check_code(operation->draining, operation, "drain operation", waiter);
+}
+
+/**
+ * vdo_start_draining() - Initiate a drain operation if the current state permits it.
+ * @operation The type of drain to initiate.
+ * @waiter The completion to notify when the drain is complete.
+ * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ *
+ * Return: true if the drain was initiated, if not the waiter will be notified.
+ */
+bool vdo_start_draining(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter, vdo_admin_initiator_fn initiator)
+{
+ const struct admin_state_code *code = vdo_get_admin_state_code(state);
+
+ if (!assert_vdo_drain_operation(operation, waiter))
+ return false;
+
+ if (code->quiescent) {
+ vdo_launch_completion(waiter);
+ return false;
+ }
+
+ if (!code->normal) {
+ vdo_log_error_strerror(VDO_INVALID_ADMIN_STATE, "can't start %s from %s",
+ operation->name, code->name);
+ vdo_continue_completion(waiter, VDO_INVALID_ADMIN_STATE);
+ return false;
+ }
+
+ return start_operation(state, operation, waiter, initiator);
+}
+
+/**
+ * vdo_finish_draining() - Finish a drain operation if one was in progress.
+ *
+ * Return: true if the state was draining; will notify the waiter if so.
+ */
+bool vdo_finish_draining(struct admin_state *state)
+{
+ return vdo_finish_draining_with_result(state, VDO_SUCCESS);
+}
+
+/**
+ * vdo_finish_draining_with_result() - Finish a drain operation with a status code.
+ *
+ * Return: true if the state was draining; will notify the waiter if so.
+ */
+bool vdo_finish_draining_with_result(struct admin_state *state, int result)
+{
+ return (vdo_is_state_draining(state) && vdo_finish_operation(state, result));
+}
+
+/**
+ * vdo_assert_load_operation() - Check that an operation is a load.
+ * @waiter The completion to finish with an error if the operation is not a load.
+ *
+ * Return: true if the specified operation is a load.
+ */
+bool vdo_assert_load_operation(const struct admin_state_code *operation,
+ struct vdo_completion *waiter)
+{
+ return check_code(operation->loading, operation, "load operation", waiter);
+}
+
+/**
+ * vdo_start_loading() - Initiate a load operation if the current state permits it.
+ * @operation The type of load to initiate.
+ * @waiter The completion to notify when the load is complete (may be NULL).
+ * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ *
+ * Return: true if the load was initiated, if not the waiter will be notified.
+ */
+bool vdo_start_loading(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter, vdo_admin_initiator_fn initiator)
+{
+ return (vdo_assert_load_operation(operation, waiter) &&
+ start_operation(state, operation, waiter, initiator));
+}
+
+/**
+ * vdo_finish_loading() - Finish a load operation if one was in progress.
+ *
+ * Return: true if the state was loading; will notify the waiter if so.
+ */
+bool vdo_finish_loading(struct admin_state *state)
+{
+ return vdo_finish_loading_with_result(state, VDO_SUCCESS);
+}
+
+/**
+ * vdo_finish_loading_with_result() - Finish a load operation with a status code.
+ * @result The result of the load operation.
+ *
+ * Return: true if the state was loading; will notify the waiter if so.
+ */
+bool vdo_finish_loading_with_result(struct admin_state *state, int result)
+{
+ return (vdo_is_state_loading(state) && vdo_finish_operation(state, result));
+}
+
+/**
+ * assert_vdo_resume_operation() - Check whether an admin_state_code is a resume operation.
+ * @waiter The completion to notify if the operation is not a resume operation; may be NULL.
+ *
+ * Return: true if the code is a resume operation.
+ */
+static bool __must_check assert_vdo_resume_operation(const struct admin_state_code *operation,
+ struct vdo_completion *waiter)
+{
+ return check_code(operation == VDO_ADMIN_STATE_RESUMING, operation,
+ "resume operation", waiter);
+}
+
+/**
+ * vdo_start_resuming() - Initiate a resume operation if the current state permits it.
+ * @operation The type of resume to start.
+ * @waiter The completion to notify when the resume is complete (may be NULL).
+ * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ *
+ * Return: true if the resume was initiated, if not the waiter will be notified.
+ */
+bool vdo_start_resuming(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter, vdo_admin_initiator_fn initiator)
+{
+ return (assert_vdo_resume_operation(operation, waiter) &&
+ start_operation(state, operation, waiter, initiator));
+}
+
+/**
+ * vdo_finish_resuming() - Finish a resume operation if one was in progress.
+ *
+ * Return: true if the state was resuming; will notify the waiter if so.
+ */
+bool vdo_finish_resuming(struct admin_state *state)
+{
+ return vdo_finish_resuming_with_result(state, VDO_SUCCESS);
+}
+
+/**
+ * vdo_finish_resuming_with_result() - Finish a resume operation with a status code.
+ * @result The result of the resume operation.
+ *
+ * Return: true if the state was resuming; will notify the waiter if so.
+ */
+bool vdo_finish_resuming_with_result(struct admin_state *state, int result)
+{
+ return (vdo_is_state_resuming(state) && vdo_finish_operation(state, result));
+}
+
+/**
+ * vdo_resume_if_quiescent() - Change the state to normal operation if the current state is
+ * quiescent.
+ *
+ * Return: VDO_SUCCESS if the state resumed, VDO_INVALID_ADMIN_STATE otherwise.
+ */
+int vdo_resume_if_quiescent(struct admin_state *state)
+{
+ if (!vdo_is_state_quiescent(state))
+ return VDO_INVALID_ADMIN_STATE;
+
+ vdo_set_admin_state_code(state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_start_operation() - Attempt to start an operation.
+ *
+ * Return: VDO_SUCCESS if the operation was started, VDO_INVALID_ADMIN_STATE if not
+ */
+int vdo_start_operation(struct admin_state *state,
+ const struct admin_state_code *operation)
+{
+ return vdo_start_operation_with_waiter(state, operation, NULL, NULL);
+}
+
+/**
+ * vdo_start_operation_with_waiter() - Attempt to start an operation.
+ * @waiter the completion to notify when the operation completes or fails to start; may be NULL.
+ * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ *
+ * Return: VDO_SUCCESS if the operation was started, VDO_INVALID_ADMIN_STATE if not
+ */
+int vdo_start_operation_with_waiter(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter,
+ vdo_admin_initiator_fn initiator)
+{
+ return (check_code(operation->operating, operation, "operation", waiter) ?
+ begin_operation(state, operation, waiter, initiator) :
+ VDO_INVALID_ADMIN_STATE);
+}
diff --git a/drivers/md/dm-vdo/admin-state.h b/drivers/md/dm-vdo/admin-state.h
new file mode 100644
index 000000000000..a7d6ac2c30a6
--- /dev/null
+++ b/drivers/md/dm-vdo/admin-state.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_ADMIN_STATE_H
+#define VDO_ADMIN_STATE_H
+
+#include "completion.h"
+#include "types.h"
+
+struct admin_state_code {
+ const char *name;
+ /* Normal operation, data_vios may be active */
+ bool normal;
+ /* I/O is draining, new requests should not start */
+ bool draining;
+ /* This is a startup time operation */
+ bool loading;
+ /* The next state will be quiescent */
+ bool quiescing;
+ /* The VDO is quiescent, there should be no I/O */
+ bool quiescent;
+ /* Whether an operation is in progress and so no other operation may be started */
+ bool operating;
+};
+
+extern const struct admin_state_code *VDO_ADMIN_STATE_NORMAL_OPERATION;
+extern const struct admin_state_code *VDO_ADMIN_STATE_OPERATING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_FORMATTING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_PRE_LOADING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_PRE_LOADED;
+extern const struct admin_state_code *VDO_ADMIN_STATE_LOADING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_LOADING_FOR_RECOVERY;
+extern const struct admin_state_code *VDO_ADMIN_STATE_LOADING_FOR_REBUILD;
+extern const struct admin_state_code *VDO_ADMIN_STATE_WAITING_FOR_RECOVERY;
+extern const struct admin_state_code *VDO_ADMIN_STATE_NEW;
+extern const struct admin_state_code *VDO_ADMIN_STATE_INITIALIZED;
+extern const struct admin_state_code *VDO_ADMIN_STATE_RECOVERING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_REBUILDING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SAVING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SAVED;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SCRUBBING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SAVE_FOR_SCRUBBING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_STOPPING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_STOPPED;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SUSPENDING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SUSPENDED;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SUSPENDED_OPERATION;
+extern const struct admin_state_code *VDO_ADMIN_STATE_RESUMING;
+
+struct admin_state {
+ const struct admin_state_code *current_state;
+ /* The next administrative state (when the current operation finishes) */
+ const struct admin_state_code *next_state;
+ /* A completion waiting on a state change */
+ struct vdo_completion *waiter;
+ /* Whether an operation is being initiated */
+ bool starting;
+ /* Whether an operation has completed in the initiator */
+ bool complete;
+};
+
+/**
+ * typedef vdo_admin_initiator_fn - A method to be called once an admin operation may be initiated.
+ */
+typedef void (*vdo_admin_initiator_fn)(struct admin_state *state);
+
+static inline const struct admin_state_code * __must_check
+vdo_get_admin_state_code(const struct admin_state *state)
+{
+ return READ_ONCE(state->current_state);
+}
+
+/**
+ * vdo_set_admin_state_code() - Set the current admin state code.
+ *
+ * This function should be used primarily for initialization and by adminState internals. Most uses
+ * should go through the operation interfaces.
+ */
+static inline void vdo_set_admin_state_code(struct admin_state *state,
+ const struct admin_state_code *code)
+{
+ WRITE_ONCE(state->current_state, code);
+}
+
+static inline bool __must_check vdo_is_state_normal(const struct admin_state *state)
+{
+ return vdo_get_admin_state_code(state)->normal;
+}
+
+static inline bool __must_check vdo_is_state_suspending(const struct admin_state *state)
+{
+ return (vdo_get_admin_state_code(state) == VDO_ADMIN_STATE_SUSPENDING);
+}
+
+static inline bool __must_check vdo_is_state_saving(const struct admin_state *state)
+{
+ return (vdo_get_admin_state_code(state) == VDO_ADMIN_STATE_SAVING);
+}
+
+static inline bool __must_check vdo_is_state_saved(const struct admin_state *state)
+{
+ return (vdo_get_admin_state_code(state) == VDO_ADMIN_STATE_SAVED);
+}
+
+static inline bool __must_check vdo_is_state_draining(const struct admin_state *state)
+{
+ return vdo_get_admin_state_code(state)->draining;
+}
+
+static inline bool __must_check vdo_is_state_loading(const struct admin_state *state)
+{
+ return vdo_get_admin_state_code(state)->loading;
+}
+
+static inline bool __must_check vdo_is_state_resuming(const struct admin_state *state)
+{
+ return (vdo_get_admin_state_code(state) == VDO_ADMIN_STATE_RESUMING);
+}
+
+static inline bool __must_check vdo_is_state_clean_load(const struct admin_state *state)
+{
+ const struct admin_state_code *code = vdo_get_admin_state_code(state);
+
+ return ((code == VDO_ADMIN_STATE_FORMATTING) || (code == VDO_ADMIN_STATE_LOADING));
+}
+
+static inline bool __must_check vdo_is_state_quiescing(const struct admin_state *state)
+{
+ return vdo_get_admin_state_code(state)->quiescing;
+}
+
+static inline bool __must_check vdo_is_state_quiescent(const struct admin_state *state)
+{
+ return vdo_get_admin_state_code(state)->quiescent;
+}
+
+bool __must_check vdo_assert_load_operation(const struct admin_state_code *operation,
+ struct vdo_completion *waiter);
+
+bool vdo_start_loading(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter, vdo_admin_initiator_fn initiator);
+
+bool vdo_finish_loading(struct admin_state *state);
+
+bool vdo_finish_loading_with_result(struct admin_state *state, int result);
+
+bool vdo_start_resuming(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter, vdo_admin_initiator_fn initiator);
+
+bool vdo_finish_resuming(struct admin_state *state);
+
+bool vdo_finish_resuming_with_result(struct admin_state *state, int result);
+
+int vdo_resume_if_quiescent(struct admin_state *state);
+
+bool vdo_start_draining(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter, vdo_admin_initiator_fn initiator);
+
+bool vdo_finish_draining(struct admin_state *state);
+
+bool vdo_finish_draining_with_result(struct admin_state *state, int result);
+
+int vdo_start_operation(struct admin_state *state,
+ const struct admin_state_code *operation);
+
+int vdo_start_operation_with_waiter(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter,
+ vdo_admin_initiator_fn initiator);
+
+bool vdo_finish_operation(struct admin_state *state, int result);
+
+#endif /* VDO_ADMIN_STATE_H */
diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c
new file mode 100644
index 000000000000..a0a7c1bd634e
--- /dev/null
+++ b/drivers/md/dm-vdo/block-map.c
@@ -0,0 +1,3318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "block-map.h"
+
+#include <linux/bio.h>
+#include <linux/ratelimit.h>
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "action-manager.h"
+#include "admin-state.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "encodings.h"
+#include "io-submitter.h"
+#include "physical-zone.h"
+#include "recovery-journal.h"
+#include "slab-depot.h"
+#include "status-codes.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+/**
+ * DOC: Block map eras
+ *
+ * The block map era, or maximum age, is used as follows:
+ *
+ * Each block map page, when dirty, records the earliest recovery journal block sequence number of
+ * the changes reflected in that dirty block. Sequence numbers are classified into eras: every
+ * @maximum_age sequence numbers, we switch to a new era. Block map pages are assigned to eras
+ * according to the sequence number they record.
+ *
+ * In the current (newest) era, block map pages are not written unless there is cache pressure. In
+ * the next oldest era, each time a new journal block is written 1/@maximum_age of the pages in
+ * this era are issued for write. In all older eras, pages are issued for write immediately.
+ */
+
+struct page_descriptor {
+ root_count_t root_index;
+ height_t height;
+ page_number_t page_index;
+ slot_number_t slot;
+} __packed;
+
+union page_key {
+ struct page_descriptor descriptor;
+ u64 key;
+};
+
+struct write_if_not_dirtied_context {
+ struct block_map_zone *zone;
+ u8 generation;
+};
+
+struct block_map_tree_segment {
+ struct tree_page *levels[VDO_BLOCK_MAP_TREE_HEIGHT];
+};
+
+struct block_map_tree {
+ struct block_map_tree_segment *segments;
+};
+
+struct forest {
+ struct block_map *map;
+ size_t segments;
+ struct boundary *boundaries;
+ struct tree_page **pages;
+ struct block_map_tree trees[];
+};
+
+struct cursor_level {
+ page_number_t page_index;
+ slot_number_t slot;
+};
+
+struct cursors;
+
+struct cursor {
+ struct vdo_waiter waiter;
+ struct block_map_tree *tree;
+ height_t height;
+ struct cursors *parent;
+ struct boundary boundary;
+ struct cursor_level levels[VDO_BLOCK_MAP_TREE_HEIGHT];
+ struct pooled_vio *vio;
+};
+
+struct cursors {
+ struct block_map_zone *zone;
+ struct vio_pool *pool;
+ vdo_entry_callback_fn entry_callback;
+ struct vdo_completion *completion;
+ root_count_t active_roots;
+ struct cursor cursors[];
+};
+
+static const physical_block_number_t NO_PAGE = 0xFFFFFFFFFFFFFFFF;
+
+/* Used to indicate that the page holding the location of a tree root has been "loaded". */
+static const physical_block_number_t VDO_INVALID_PBN = 0xFFFFFFFFFFFFFFFF;
+
+const struct block_map_entry UNMAPPED_BLOCK_MAP_ENTRY = {
+ .mapping_state = VDO_MAPPING_STATE_UNMAPPED & 0x0F,
+ .pbn_high_nibble = 0,
+ .pbn_low_word = __cpu_to_le32(VDO_ZERO_BLOCK & UINT_MAX),
+};
+
+#define LOG_INTERVAL 4000
+#define DISPLAY_INTERVAL 100000
+
+/*
+ * For adjusting VDO page cache statistic fields which are only mutated on the logical zone thread.
+ * Prevents any compiler shenanigans from affecting other threads reading those stats.
+ */
+#define ADD_ONCE(value, delta) WRITE_ONCE(value, (value) + (delta))
+
+static inline bool is_dirty(const struct page_info *info)
+{
+ return info->state == PS_DIRTY;
+}
+
+static inline bool is_present(const struct page_info *info)
+{
+ return (info->state == PS_RESIDENT) || (info->state == PS_DIRTY);
+}
+
+static inline bool is_in_flight(const struct page_info *info)
+{
+ return (info->state == PS_INCOMING) || (info->state == PS_OUTGOING);
+}
+
+static inline bool is_incoming(const struct page_info *info)
+{
+ return info->state == PS_INCOMING;
+}
+
+static inline bool is_outgoing(const struct page_info *info)
+{
+ return info->state == PS_OUTGOING;
+}
+
+static inline bool is_valid(const struct page_info *info)
+{
+ return is_present(info) || is_outgoing(info);
+}
+
+static char *get_page_buffer(struct page_info *info)
+{
+ struct vdo_page_cache *cache = info->cache;
+
+ return &cache->pages[(info - cache->infos) * VDO_BLOCK_SIZE];
+}
+
+static inline struct vdo_page_completion *page_completion_from_waiter(struct vdo_waiter *waiter)
+{
+ struct vdo_page_completion *completion;
+
+ if (waiter == NULL)
+ return NULL;
+
+ completion = container_of(waiter, struct vdo_page_completion, waiter);
+ vdo_assert_completion_type(&completion->completion, VDO_PAGE_COMPLETION);
+ return completion;
+}
+
+/**
+ * initialize_info() - Initialize all page info structures and put them on the free list.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int initialize_info(struct vdo_page_cache *cache)
+{
+ struct page_info *info;
+
+ INIT_LIST_HEAD(&cache->free_list);
+ for (info = cache->infos; info < cache->infos + cache->page_count; info++) {
+ int result;
+
+ info->cache = cache;
+ info->state = PS_FREE;
+ info->pbn = NO_PAGE;
+
+ result = create_metadata_vio(cache->vdo, VIO_TYPE_BLOCK_MAP,
+ VIO_PRIORITY_METADATA, info,
+ get_page_buffer(info), &info->vio);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* The thread ID should never change. */
+ info->vio->completion.callback_thread_id = cache->zone->thread_id;
+
+ INIT_LIST_HEAD(&info->state_entry);
+ list_add_tail(&info->state_entry, &cache->free_list);
+ INIT_LIST_HEAD(&info->lru_entry);
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * allocate_cache_components() - Allocate components of the cache which require their own
+ * allocation.
+ * @maximum_age: The number of journal blocks before a dirtied page is considered old and must be
+ * written out.
+ *
+ * The caller is responsible for all clean up on errors.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check allocate_cache_components(struct vdo_page_cache *cache)
+{
+ u64 size = cache->page_count * (u64) VDO_BLOCK_SIZE;
+ int result;
+
+ result = vdo_allocate(cache->page_count, struct page_info, "page infos",
+ &cache->infos);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate_memory(size, VDO_BLOCK_SIZE, "cache pages", &cache->pages);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_int_map_create(cache->page_count, &cache->page_map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return initialize_info(cache);
+}
+
+/**
+ * assert_on_cache_thread() - Assert that a function has been called on the VDO page cache's
+ * thread.
+ */
+static inline void assert_on_cache_thread(struct vdo_page_cache *cache,
+ const char *function_name)
+{
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id),
+ "%s() must only be called on cache thread %d, not thread %d",
+ function_name, cache->zone->thread_id, thread_id);
+}
+
+/** assert_io_allowed() - Assert that a page cache may issue I/O. */
+static inline void assert_io_allowed(struct vdo_page_cache *cache)
+{
+ VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state),
+ "VDO page cache may issue I/O");
+}
+
+/** report_cache_pressure() - Log and, if enabled, report cache pressure. */
+static void report_cache_pressure(struct vdo_page_cache *cache)
+{
+ ADD_ONCE(cache->stats.cache_pressure, 1);
+ if (cache->waiter_count > cache->page_count) {
+ if ((cache->pressure_report % LOG_INTERVAL) == 0)
+ vdo_log_info("page cache pressure %u", cache->stats.cache_pressure);
+
+ if (++cache->pressure_report >= DISPLAY_INTERVAL)
+ cache->pressure_report = 0;
+ }
+}
+
+/**
+ * get_page_state_name() - Return the name of a page state.
+ *
+ * If the page state is invalid a static string is returned and the invalid state is logged.
+ *
+ * Return: A pointer to a static page state name.
+ */
+static const char * __must_check get_page_state_name(enum vdo_page_buffer_state state)
+{
+ int result;
+ static const char * const state_names[] = {
+ "FREE", "INCOMING", "FAILED", "RESIDENT", "DIRTY", "OUTGOING"
+ };
+
+ BUILD_BUG_ON(ARRAY_SIZE(state_names) != PAGE_STATE_COUNT);
+
+ result = VDO_ASSERT(state < ARRAY_SIZE(state_names),
+ "Unknown page_state value %d", state);
+ if (result != VDO_SUCCESS)
+ return "[UNKNOWN PAGE STATE]";
+
+ return state_names[state];
+}
+
+/**
+ * update_counter() - Update the counter associated with a given state.
+ * @info: The page info to count.
+ * @delta: The delta to apply to the counter.
+ */
+static void update_counter(struct page_info *info, s32 delta)
+{
+ struct block_map_statistics *stats = &info->cache->stats;
+
+ switch (info->state) {
+ case PS_FREE:
+ ADD_ONCE(stats->free_pages, delta);
+ return;
+
+ case PS_INCOMING:
+ ADD_ONCE(stats->incoming_pages, delta);
+ return;
+
+ case PS_OUTGOING:
+ ADD_ONCE(stats->outgoing_pages, delta);
+ return;
+
+ case PS_FAILED:
+ ADD_ONCE(stats->failed_pages, delta);
+ return;
+
+ case PS_RESIDENT:
+ ADD_ONCE(stats->clean_pages, delta);
+ return;
+
+ case PS_DIRTY:
+ ADD_ONCE(stats->dirty_pages, delta);
+ return;
+
+ default:
+ return;
+ }
+}
+
+/** update_lru() - Update the lru information for an active page. */
+static void update_lru(struct page_info *info)
+{
+ if (info->cache->lru_list.prev != &info->lru_entry)
+ list_move_tail(&info->lru_entry, &info->cache->lru_list);
+}
+
+/**
+ * set_info_state() - Set the state of a page_info and put it on the right list, adjusting
+ * counters.
+ */
+static void set_info_state(struct page_info *info, enum vdo_page_buffer_state new_state)
+{
+ if (new_state == info->state)
+ return;
+
+ update_counter(info, -1);
+ info->state = new_state;
+ update_counter(info, 1);
+
+ switch (info->state) {
+ case PS_FREE:
+ case PS_FAILED:
+ list_move_tail(&info->state_entry, &info->cache->free_list);
+ return;
+
+ case PS_OUTGOING:
+ list_move_tail(&info->state_entry, &info->cache->outgoing_list);
+ return;
+
+ case PS_DIRTY:
+ return;
+
+ default:
+ list_del_init(&info->state_entry);
+ }
+}
+
+/** set_info_pbn() - Set the pbn for an info, updating the map as needed. */
+static int __must_check set_info_pbn(struct page_info *info, physical_block_number_t pbn)
+{
+ struct vdo_page_cache *cache = info->cache;
+
+ /* Either the new or the old page number must be NO_PAGE. */
+ int result = VDO_ASSERT((pbn == NO_PAGE) || (info->pbn == NO_PAGE),
+ "Must free a page before reusing it.");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (info->pbn != NO_PAGE)
+ vdo_int_map_remove(cache->page_map, info->pbn);
+
+ info->pbn = pbn;
+
+ if (pbn != NO_PAGE) {
+ result = vdo_int_map_put(cache->page_map, pbn, info, true, NULL);
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+ return VDO_SUCCESS;
+}
+
+/** reset_page_info() - Reset page info to represent an unallocated page. */
+static int reset_page_info(struct page_info *info)
+{
+ int result;
+
+ result = VDO_ASSERT(info->busy == 0, "VDO Page must not be busy");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(!vdo_waitq_has_waiters(&info->waiting),
+ "VDO Page must not have waiters");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = set_info_pbn(info, NO_PAGE);
+ set_info_state(info, PS_FREE);
+ list_del_init(&info->lru_entry);
+ return result;
+}
+
+/**
+ * find_free_page() - Find a free page.
+ *
+ * Return: A pointer to the page info structure (if found), NULL otherwise.
+ */
+static struct page_info * __must_check find_free_page(struct vdo_page_cache *cache)
+{
+ struct page_info *info;
+
+ info = list_first_entry_or_null(&cache->free_list, struct page_info,
+ state_entry);
+ if (info != NULL)
+ list_del_init(&info->state_entry);
+
+ return info;
+}
+
+/**
+ * find_page() - Find the page info (if any) associated with a given pbn.
+ * @pbn: The absolute physical block number of the page.
+ *
+ * Return: The page info for the page if available, or NULL if not.
+ */
+static struct page_info * __must_check find_page(struct vdo_page_cache *cache,
+ physical_block_number_t pbn)
+{
+ if ((cache->last_found != NULL) && (cache->last_found->pbn == pbn))
+ return cache->last_found;
+
+ cache->last_found = vdo_int_map_get(cache->page_map, pbn);
+ return cache->last_found;
+}
+
+/**
+ * select_lru_page() - Determine which page is least recently used.
+ *
+ * Picks the least recently used from among the non-busy entries at the front of each of the lru
+ * ring. Since whenever we mark a page busy we also put it to the end of the ring it is unlikely
+ * that the entries at the front are busy unless the queue is very short, but not impossible.
+ *
+ * Return: A pointer to the info structure for a relevant page, or NULL if no such page can be
+ * found. The page can be dirty or resident.
+ */
+static struct page_info * __must_check select_lru_page(struct vdo_page_cache *cache)
+{
+ struct page_info *info;
+
+ list_for_each_entry(info, &cache->lru_list, lru_entry)
+ if ((info->busy == 0) && !is_in_flight(info))
+ return info;
+
+ return NULL;
+}
+
+/* ASYNCHRONOUS INTERFACE BEYOND THIS POINT */
+
+/**
+ * complete_with_page() - Helper to complete the VDO Page Completion request successfully.
+ * @info: The page info representing the result page.
+ * @vdo_page_comp: The VDO page completion to complete.
+ */
+static void complete_with_page(struct page_info *info,
+ struct vdo_page_completion *vdo_page_comp)
+{
+ bool available = vdo_page_comp->writable ? is_present(info) : is_valid(info);
+
+ if (!available) {
+ vdo_log_error_strerror(VDO_BAD_PAGE,
+ "Requested cache page %llu in state %s is not %s",
+ (unsigned long long) info->pbn,
+ get_page_state_name(info->state),
+ vdo_page_comp->writable ? "present" : "valid");
+ vdo_fail_completion(&vdo_page_comp->completion, VDO_BAD_PAGE);
+ return;
+ }
+
+ vdo_page_comp->info = info;
+ vdo_page_comp->ready = true;
+ vdo_finish_completion(&vdo_page_comp->completion);
+}
+
+/**
+ * complete_waiter_with_error() - Complete a page completion with an error code.
+ * @waiter: The page completion, as a waiter.
+ * @result_ptr: A pointer to the error code.
+ *
+ * Implements waiter_callback_fn.
+ */
+static void complete_waiter_with_error(struct vdo_waiter *waiter, void *result_ptr)
+{
+ int *result = result_ptr;
+
+ vdo_fail_completion(&page_completion_from_waiter(waiter)->completion, *result);
+}
+
+/**
+ * complete_waiter_with_page() - Complete a page completion with a page.
+ * @waiter: The page completion, as a waiter.
+ * @page_info: The page info to complete with.
+ *
+ * Implements waiter_callback_fn.
+ */
+static void complete_waiter_with_page(struct vdo_waiter *waiter, void *page_info)
+{
+ complete_with_page(page_info, page_completion_from_waiter(waiter));
+}
+
+/**
+ * distribute_page_over_waitq() - Complete a waitq of VDO page completions with a page result.
+ *
+ * Upon completion the waitq will be empty.
+ *
+ * Return: The number of pages distributed.
+ */
+static unsigned int distribute_page_over_waitq(struct page_info *info,
+ struct vdo_wait_queue *waitq)
+{
+ size_t num_pages;
+
+ update_lru(info);
+ num_pages = vdo_waitq_num_waiters(waitq);
+
+ /*
+ * Increment the busy count once for each pending completion so that this page does not
+ * stop being busy until all completions have been processed.
+ */
+ info->busy += num_pages;
+
+ vdo_waitq_notify_all_waiters(waitq, complete_waiter_with_page, info);
+ return num_pages;
+}
+
+/**
+ * set_persistent_error() - Set a persistent error which all requests will receive in the future.
+ * @context: A string describing what triggered the error.
+ *
+ * Once triggered, all enqueued completions will get this error. Any future requests will result in
+ * this error as well.
+ */
+static void set_persistent_error(struct vdo_page_cache *cache, const char *context,
+ int result)
+{
+ struct page_info *info;
+ /* If we're already read-only, there's no need to log. */
+ struct vdo *vdo = cache->vdo;
+
+ if ((result != VDO_READ_ONLY) && !vdo_is_read_only(vdo)) {
+ vdo_log_error_strerror(result, "VDO Page Cache persistent error: %s",
+ context);
+ vdo_enter_read_only_mode(vdo, result);
+ }
+
+ assert_on_cache_thread(cache, __func__);
+
+ vdo_waitq_notify_all_waiters(&cache->free_waiters,
+ complete_waiter_with_error, &result);
+ cache->waiter_count = 0;
+
+ for (info = cache->infos; info < cache->infos + cache->page_count; info++) {
+ vdo_waitq_notify_all_waiters(&info->waiting,
+ complete_waiter_with_error, &result);
+ }
+}
+
+/**
+ * validate_completed_page() - Check that a page completion which is being freed to the cache
+ * referred to a valid page and is in a valid state.
+ * @writable: Whether a writable page is required.
+ *
+ * Return: VDO_SUCCESS if the page was valid, otherwise as error
+ */
+static int __must_check validate_completed_page(struct vdo_page_completion *completion,
+ bool writable)
+{
+ int result;
+
+ result = VDO_ASSERT(completion->ready, "VDO Page completion not ready");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(completion->info != NULL,
+ "VDO Page Completion must be complete");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(completion->info->pbn == completion->pbn,
+ "VDO Page Completion pbn must be consistent");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(is_valid(completion->info),
+ "VDO Page Completion page must be valid");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (writable) {
+ result = VDO_ASSERT(completion->writable,
+ "VDO Page Completion must be writable");
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ return VDO_SUCCESS;
+}
+
+static void check_for_drain_complete(struct block_map_zone *zone)
+{
+ if (vdo_is_state_draining(&zone->state) &&
+ (zone->active_lookups == 0) &&
+ !vdo_waitq_has_waiters(&zone->flush_waiters) &&
+ !is_vio_pool_busy(zone->vio_pool) &&
+ (zone->page_cache.outstanding_reads == 0) &&
+ (zone->page_cache.outstanding_writes == 0)) {
+ vdo_finish_draining_with_result(&zone->state,
+ (vdo_is_read_only(zone->block_map->vdo) ?
+ VDO_READ_ONLY : VDO_SUCCESS));
+ }
+}
+
+static void enter_zone_read_only_mode(struct block_map_zone *zone, int result)
+{
+ vdo_enter_read_only_mode(zone->block_map->vdo, result);
+
+ /*
+ * We are in read-only mode, so we won't ever write any page out.
+ * Just take all waiters off the waitq so the zone can drain.
+ */
+ vdo_waitq_init(&zone->flush_waiters);
+ check_for_drain_complete(zone);
+}
+
+static bool __must_check
+validate_completed_page_or_enter_read_only_mode(struct vdo_page_completion *completion,
+ bool writable)
+{
+ int result = validate_completed_page(completion, writable);
+
+ if (result == VDO_SUCCESS)
+ return true;
+
+ enter_zone_read_only_mode(completion->info->cache->zone, result);
+ return false;
+}
+
+/**
+ * handle_load_error() - Handle page load errors.
+ * @completion: The page read vio.
+ */
+static void handle_load_error(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct page_info *info = completion->parent;
+ struct vdo_page_cache *cache = info->cache;
+
+ assert_on_cache_thread(cache, __func__);
+ vio_record_metadata_io_error(as_vio(completion));
+ vdo_enter_read_only_mode(cache->zone->block_map->vdo, result);
+ ADD_ONCE(cache->stats.failed_reads, 1);
+ set_info_state(info, PS_FAILED);
+ vdo_waitq_notify_all_waiters(&info->waiting, complete_waiter_with_error, &result);
+ reset_page_info(info);
+
+ /*
+ * Don't decrement until right before calling check_for_drain_complete() to
+ * ensure that the above work can't cause the page cache to be freed out from under us.
+ */
+ cache->outstanding_reads--;
+ check_for_drain_complete(cache->zone);
+}
+
+/**
+ * page_is_loaded() - Callback used when a page has been loaded.
+ * @completion: The vio which has loaded the page. Its parent is the page_info.
+ */
+static void page_is_loaded(struct vdo_completion *completion)
+{
+ struct page_info *info = completion->parent;
+ struct vdo_page_cache *cache = info->cache;
+ nonce_t nonce = info->cache->zone->block_map->nonce;
+ struct block_map_page *page;
+ enum block_map_page_validity validity;
+
+ assert_on_cache_thread(cache, __func__);
+
+ page = (struct block_map_page *) get_page_buffer(info);
+ validity = vdo_validate_block_map_page(page, nonce, info->pbn);
+ if (validity == VDO_BLOCK_MAP_PAGE_BAD) {
+ physical_block_number_t pbn = vdo_get_block_map_page_pbn(page);
+ int result = vdo_log_error_strerror(VDO_BAD_PAGE,
+ "Expected page %llu but got page %llu instead",
+ (unsigned long long) info->pbn,
+ (unsigned long long) pbn);
+
+ vdo_continue_completion(completion, result);
+ return;
+ }
+
+ if (validity == VDO_BLOCK_MAP_PAGE_INVALID)
+ vdo_format_block_map_page(page, nonce, info->pbn, false);
+
+ info->recovery_lock = 0;
+ set_info_state(info, PS_RESIDENT);
+ distribute_page_over_waitq(info, &info->waiting);
+
+ /*
+ * Don't decrement until right before calling check_for_drain_complete() to
+ * ensure that the above work can't cause the page cache to be freed out from under us.
+ */
+ cache->outstanding_reads--;
+ check_for_drain_complete(cache->zone);
+}
+
+/**
+ * handle_rebuild_read_error() - Handle a read error during a read-only rebuild.
+ * @completion: The page load completion.
+ */
+static void handle_rebuild_read_error(struct vdo_completion *completion)
+{
+ struct page_info *info = completion->parent;
+ struct vdo_page_cache *cache = info->cache;
+
+ assert_on_cache_thread(cache, __func__);
+
+ /*
+ * We are doing a read-only rebuild, so treat this as a successful read
+ * of an uninitialized page.
+ */
+ vio_record_metadata_io_error(as_vio(completion));
+ ADD_ONCE(cache->stats.failed_reads, 1);
+ memset(get_page_buffer(info), 0, VDO_BLOCK_SIZE);
+ vdo_reset_completion(completion);
+ page_is_loaded(completion);
+}
+
+static void load_cache_page_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct page_info *info = vio->completion.parent;
+
+ continue_vio_after_io(vio, page_is_loaded, info->cache->zone->thread_id);
+}
+
+/**
+ * launch_page_load() - Begin the process of loading a page.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check launch_page_load(struct page_info *info,
+ physical_block_number_t pbn)
+{
+ int result;
+ vdo_action_fn callback;
+ struct vdo_page_cache *cache = info->cache;
+
+ assert_io_allowed(cache);
+
+ result = set_info_pbn(info, pbn);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT((info->busy == 0), "Page is not busy before loading.");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ set_info_state(info, PS_INCOMING);
+ cache->outstanding_reads++;
+ ADD_ONCE(cache->stats.pages_loaded, 1);
+ callback = (cache->rebuilding ? handle_rebuild_read_error : handle_load_error);
+ vdo_submit_metadata_vio(info->vio, pbn, load_cache_page_endio,
+ callback, REQ_OP_READ | REQ_PRIO);
+ return VDO_SUCCESS;
+}
+
+static void write_pages(struct vdo_completion *completion);
+
+/** handle_flush_error() - Handle errors flushing the layer. */
+static void handle_flush_error(struct vdo_completion *completion)
+{
+ struct page_info *info = completion->parent;
+
+ vio_record_metadata_io_error(as_vio(completion));
+ set_persistent_error(info->cache, "flush failed", completion->result);
+ write_pages(completion);
+}
+
+static void flush_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct page_info *info = vio->completion.parent;
+
+ continue_vio_after_io(vio, write_pages, info->cache->zone->thread_id);
+}
+
+/** save_pages() - Attempt to save the outgoing pages by first flushing the layer. */
+static void save_pages(struct vdo_page_cache *cache)
+{
+ struct page_info *info;
+ struct vio *vio;
+
+ if ((cache->pages_in_flush > 0) || (cache->pages_to_flush == 0))
+ return;
+
+ assert_io_allowed(cache);
+
+ info = list_first_entry(&cache->outgoing_list, struct page_info, state_entry);
+
+ cache->pages_in_flush = cache->pages_to_flush;
+ cache->pages_to_flush = 0;
+ ADD_ONCE(cache->stats.flush_count, 1);
+
+ vio = info->vio;
+
+ /*
+ * We must make sure that the recovery journal entries that changed these pages were
+ * successfully persisted, and thus must issue a flush before each batch of pages is
+ * written to ensure this.
+ */
+ vdo_submit_flush_vio(vio, flush_endio, handle_flush_error);
+}
+
+/**
+ * schedule_page_save() - Add a page to the outgoing list of pages waiting to be saved.
+ *
+ * Once in the list, a page may not be used until it has been written out.
+ */
+static void schedule_page_save(struct page_info *info)
+{
+ if (info->busy > 0) {
+ info->write_status = WRITE_STATUS_DEFERRED;
+ return;
+ }
+
+ info->cache->pages_to_flush++;
+ info->cache->outstanding_writes++;
+ set_info_state(info, PS_OUTGOING);
+}
+
+/**
+ * launch_page_save() - Add a page to outgoing pages waiting to be saved, and then start saving
+ * pages if another save is not in progress.
+ */
+static void launch_page_save(struct page_info *info)
+{
+ schedule_page_save(info);
+ save_pages(info->cache);
+}
+
+/**
+ * completion_needs_page() - Determine whether a given vdo_page_completion (as a waiter) is
+ * requesting a given page number.
+ * @context: A pointer to the pbn of the desired page.
+ *
+ * Implements waiter_match_fn.
+ *
+ * Return: true if the page completion is for the desired page number.
+ */
+static bool completion_needs_page(struct vdo_waiter *waiter, void *context)
+{
+ physical_block_number_t *pbn = context;
+
+ return (page_completion_from_waiter(waiter)->pbn == *pbn);
+}
+
+/**
+ * allocate_free_page() - Allocate a free page to the first completion in the waiting queue, and
+ * any other completions that match it in page number.
+ */
+static void allocate_free_page(struct page_info *info)
+{
+ int result;
+ struct vdo_waiter *oldest_waiter;
+ physical_block_number_t pbn;
+ struct vdo_page_cache *cache = info->cache;
+
+ assert_on_cache_thread(cache, __func__);
+
+ if (!vdo_waitq_has_waiters(&cache->free_waiters)) {
+ if (cache->stats.cache_pressure > 0) {
+ vdo_log_info("page cache pressure relieved");
+ WRITE_ONCE(cache->stats.cache_pressure, 0);
+ }
+
+ return;
+ }
+
+ result = reset_page_info(info);
+ if (result != VDO_SUCCESS) {
+ set_persistent_error(cache, "cannot reset page info", result);
+ return;
+ }
+
+ oldest_waiter = vdo_waitq_get_first_waiter(&cache->free_waiters);
+ pbn = page_completion_from_waiter(oldest_waiter)->pbn;
+
+ /*
+ * Remove all entries which match the page number in question and push them onto the page
+ * info's waitq.
+ */
+ vdo_waitq_dequeue_matching_waiters(&cache->free_waiters, completion_needs_page,
+ &pbn, &info->waiting);
+ cache->waiter_count -= vdo_waitq_num_waiters(&info->waiting);
+
+ result = launch_page_load(info, pbn);
+ if (result != VDO_SUCCESS) {
+ vdo_waitq_notify_all_waiters(&info->waiting,
+ complete_waiter_with_error, &result);
+ }
+}
+
+/**
+ * discard_a_page() - Begin the process of discarding a page.
+ *
+ * If no page is discardable, increments a count of deferred frees so that the next release of a
+ * page which is no longer busy will kick off another discard cycle. This is an indication that the
+ * cache is not big enough.
+ *
+ * If the selected page is not dirty, immediately allocates the page to the oldest completion
+ * waiting for a free page.
+ */
+static void discard_a_page(struct vdo_page_cache *cache)
+{
+ struct page_info *info = select_lru_page(cache);
+
+ if (info == NULL) {
+ report_cache_pressure(cache);
+ return;
+ }
+
+ if (!is_dirty(info)) {
+ allocate_free_page(info);
+ return;
+ }
+
+ VDO_ASSERT_LOG_ONLY(!is_in_flight(info),
+ "page selected for discard is not in flight");
+
+ cache->discard_count++;
+ info->write_status = WRITE_STATUS_DISCARD;
+ launch_page_save(info);
+}
+
+/**
+ * discard_page_for_completion() - Helper used to trigger a discard so that the completion can get
+ * a different page.
+ */
+static void discard_page_for_completion(struct vdo_page_completion *vdo_page_comp)
+{
+ struct vdo_page_cache *cache = vdo_page_comp->cache;
+
+ cache->waiter_count++;
+ vdo_waitq_enqueue_waiter(&cache->free_waiters, &vdo_page_comp->waiter);
+ discard_a_page(cache);
+}
+
+/**
+ * discard_page_if_needed() - Helper used to trigger a discard if the cache needs another free
+ * page.
+ * @cache: The page cache.
+ */
+static void discard_page_if_needed(struct vdo_page_cache *cache)
+{
+ if (cache->waiter_count > cache->discard_count)
+ discard_a_page(cache);
+}
+
+/**
+ * write_has_finished() - Inform the cache that a write has finished (possibly with an error).
+ * @info: The info structure for the page whose write just completed.
+ *
+ * Return: true if the page write was a discard.
+ */
+static bool write_has_finished(struct page_info *info)
+{
+ bool was_discard = (info->write_status == WRITE_STATUS_DISCARD);
+
+ assert_on_cache_thread(info->cache, __func__);
+ info->cache->outstanding_writes--;
+
+ info->write_status = WRITE_STATUS_NORMAL;
+ return was_discard;
+}
+
+/**
+ * handle_page_write_error() - Handler for page write errors.
+ * @completion: The page write vio.
+ */
+static void handle_page_write_error(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct page_info *info = completion->parent;
+ struct vdo_page_cache *cache = info->cache;
+
+ vio_record_metadata_io_error(as_vio(completion));
+
+ /* If we're already read-only, write failures are to be expected. */
+ if (result != VDO_READ_ONLY) {
+ vdo_log_ratelimit(vdo_log_error,
+ "failed to write block map page %llu",
+ (unsigned long long) info->pbn);
+ }
+
+ set_info_state(info, PS_DIRTY);
+ ADD_ONCE(cache->stats.failed_writes, 1);
+ set_persistent_error(cache, "cannot write page", result);
+
+ if (!write_has_finished(info))
+ discard_page_if_needed(cache);
+
+ check_for_drain_complete(cache->zone);
+}
+
+static void page_is_written_out(struct vdo_completion *completion);
+
+static void write_cache_page_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct page_info *info = vio->completion.parent;
+
+ continue_vio_after_io(vio, page_is_written_out, info->cache->zone->thread_id);
+}
+
+/**
+ * page_is_written_out() - Callback used when a page has been written out.
+ * @completion: The vio which wrote the page. Its parent is a page_info.
+ */
+static void page_is_written_out(struct vdo_completion *completion)
+{
+ bool was_discard, reclaimed;
+ u32 reclamations;
+ struct page_info *info = completion->parent;
+ struct vdo_page_cache *cache = info->cache;
+ struct block_map_page *page = (struct block_map_page *) get_page_buffer(info);
+
+ if (!page->header.initialized) {
+ page->header.initialized = true;
+ vdo_submit_metadata_vio(info->vio, info->pbn,
+ write_cache_page_endio,
+ handle_page_write_error,
+ REQ_OP_WRITE | REQ_PRIO | REQ_PREFLUSH);
+ return;
+ }
+
+ /* Handle journal updates and torn write protection. */
+ vdo_release_recovery_journal_block_reference(cache->zone->block_map->journal,
+ info->recovery_lock,
+ VDO_ZONE_TYPE_LOGICAL,
+ cache->zone->zone_number);
+ info->recovery_lock = 0;
+ was_discard = write_has_finished(info);
+ reclaimed = (!was_discard || (info->busy > 0) || vdo_waitq_has_waiters(&info->waiting));
+
+ set_info_state(info, PS_RESIDENT);
+
+ reclamations = distribute_page_over_waitq(info, &info->waiting);
+ ADD_ONCE(cache->stats.reclaimed, reclamations);
+
+ if (was_discard)
+ cache->discard_count--;
+
+ if (reclaimed)
+ discard_page_if_needed(cache);
+ else
+ allocate_free_page(info);
+
+ check_for_drain_complete(cache->zone);
+}
+
+/**
+ * write_pages() - Write the batch of pages which were covered by the layer flush which just
+ * completed.
+ * @flush_completion: The flush vio.
+ *
+ * This callback is registered in save_pages().
+ */
+static void write_pages(struct vdo_completion *flush_completion)
+{
+ struct vdo_page_cache *cache = ((struct page_info *) flush_completion->parent)->cache;
+
+ /*
+ * We need to cache these two values on the stack since it is possible for the last
+ * page info to cause the page cache to get freed. Hence once we launch the last page,
+ * it may be unsafe to dereference the cache.
+ */
+ bool has_unflushed_pages = (cache->pages_to_flush > 0);
+ page_count_t pages_in_flush = cache->pages_in_flush;
+
+ cache->pages_in_flush = 0;
+ while (pages_in_flush-- > 0) {
+ struct page_info *info =
+ list_first_entry(&cache->outgoing_list, struct page_info,
+ state_entry);
+
+ list_del_init(&info->state_entry);
+ if (vdo_is_read_only(info->cache->vdo)) {
+ struct vdo_completion *completion = &info->vio->completion;
+
+ vdo_reset_completion(completion);
+ completion->callback = page_is_written_out;
+ completion->error_handler = handle_page_write_error;
+ vdo_fail_completion(completion, VDO_READ_ONLY);
+ continue;
+ }
+ ADD_ONCE(info->cache->stats.pages_saved, 1);
+ vdo_submit_metadata_vio(info->vio, info->pbn, write_cache_page_endio,
+ handle_page_write_error, REQ_OP_WRITE | REQ_PRIO);
+ }
+
+ if (has_unflushed_pages) {
+ /*
+ * If there are unflushed pages, the cache can't have been freed, so this call is
+ * safe.
+ */
+ save_pages(cache);
+ }
+}
+
+/**
+ * vdo_release_page_completion() - Release a VDO Page Completion.
+ *
+ * The page referenced by this completion (if any) will no longer be held busy by this completion.
+ * If a page becomes discardable and there are completions awaiting free pages then a new round of
+ * page discarding is started.
+ */
+void vdo_release_page_completion(struct vdo_completion *completion)
+{
+ struct page_info *discard_info = NULL;
+ struct vdo_page_completion *page_completion = as_vdo_page_completion(completion);
+ struct vdo_page_cache *cache;
+
+ if (completion->result == VDO_SUCCESS) {
+ if (!validate_completed_page_or_enter_read_only_mode(page_completion, false))
+ return;
+
+ if (--page_completion->info->busy == 0)
+ discard_info = page_completion->info;
+ }
+
+ VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL),
+ "Page being released after leaving all queues");
+
+ page_completion->info = NULL;
+ cache = page_completion->cache;
+ assert_on_cache_thread(cache, __func__);
+
+ if (discard_info != NULL) {
+ if (discard_info->write_status == WRITE_STATUS_DEFERRED) {
+ discard_info->write_status = WRITE_STATUS_NORMAL;
+ launch_page_save(discard_info);
+ }
+
+ /*
+ * if there are excess requests for pages (that have not already started discards)
+ * we need to discard some page (which may be this one)
+ */
+ discard_page_if_needed(cache);
+ }
+}
+
+/**
+ * load_page_for_completion() - Helper function to load a page as described by a VDO Page
+ * Completion.
+ */
+static void load_page_for_completion(struct page_info *info,
+ struct vdo_page_completion *vdo_page_comp)
+{
+ int result;
+
+ vdo_waitq_enqueue_waiter(&info->waiting, &vdo_page_comp->waiter);
+ result = launch_page_load(info, vdo_page_comp->pbn);
+ if (result != VDO_SUCCESS) {
+ vdo_waitq_notify_all_waiters(&info->waiting,
+ complete_waiter_with_error, &result);
+ }
+}
+
+/**
+ * vdo_get_page() - Initialize a page completion and get a block map page.
+ * @page_completion: The vdo_page_completion to initialize.
+ * @zone: The block map zone of the desired page.
+ * @pbn: The absolute physical block of the desired page.
+ * @writable: Whether the page can be modified.
+ * @parent: The object to notify when the fetch is complete.
+ * @callback: The notification callback.
+ * @error_handler: The handler for fetch errors.
+ * @requeue: Whether we must requeue when notifying the parent.
+ *
+ * May cause another page to be discarded (potentially writing a dirty page) and the one nominated
+ * by the completion to be loaded from disk. When the callback is invoked, the page will be
+ * resident in the cache and marked busy. All callers must call vdo_release_page_completion()
+ * when they are done with the page to clear the busy mark.
+ */
+void vdo_get_page(struct vdo_page_completion *page_completion,
+ struct block_map_zone *zone, physical_block_number_t pbn,
+ bool writable, void *parent, vdo_action_fn callback,
+ vdo_action_fn error_handler, bool requeue)
+{
+ struct vdo_page_cache *cache = &zone->page_cache;
+ struct vdo_completion *completion = &page_completion->completion;
+ struct page_info *info;
+
+ assert_on_cache_thread(cache, __func__);
+ VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL),
+ "New page completion was not already on a wait queue");
+
+ *page_completion = (struct vdo_page_completion) {
+ .pbn = pbn,
+ .writable = writable,
+ .cache = cache,
+ };
+
+ vdo_initialize_completion(completion, cache->vdo, VDO_PAGE_COMPLETION);
+ vdo_prepare_completion(completion, callback, error_handler,
+ cache->zone->thread_id, parent);
+ completion->requeue = requeue;
+
+ if (page_completion->writable && vdo_is_read_only(cache->vdo)) {
+ vdo_fail_completion(completion, VDO_READ_ONLY);
+ return;
+ }
+
+ if (page_completion->writable)
+ ADD_ONCE(cache->stats.write_count, 1);
+ else
+ ADD_ONCE(cache->stats.read_count, 1);
+
+ info = find_page(cache, page_completion->pbn);
+ if (info != NULL) {
+ /* The page is in the cache already. */
+ if ((info->write_status == WRITE_STATUS_DEFERRED) ||
+ is_incoming(info) ||
+ (is_outgoing(info) && page_completion->writable)) {
+ /* The page is unusable until it has finished I/O. */
+ ADD_ONCE(cache->stats.wait_for_page, 1);
+ vdo_waitq_enqueue_waiter(&info->waiting, &page_completion->waiter);
+ return;
+ }
+
+ if (is_valid(info)) {
+ /* The page is usable. */
+ ADD_ONCE(cache->stats.found_in_cache, 1);
+ if (!is_present(info))
+ ADD_ONCE(cache->stats.read_outgoing, 1);
+ update_lru(info);
+ info->busy++;
+ complete_with_page(info, page_completion);
+ return;
+ }
+
+ /* Something horrible has gone wrong. */
+ VDO_ASSERT_LOG_ONLY(false, "Info found in a usable state.");
+ }
+
+ /* The page must be fetched. */
+ info = find_free_page(cache);
+ if (info != NULL) {
+ ADD_ONCE(cache->stats.fetch_required, 1);
+ load_page_for_completion(info, page_completion);
+ return;
+ }
+
+ /* The page must wait for a page to be discarded. */
+ ADD_ONCE(cache->stats.discard_required, 1);
+ discard_page_for_completion(page_completion);
+}
+
+/**
+ * vdo_request_page_write() - Request that a VDO page be written out as soon as it is not busy.
+ * @completion: The vdo_page_completion containing the page.
+ */
+void vdo_request_page_write(struct vdo_completion *completion)
+{
+ struct page_info *info;
+ struct vdo_page_completion *vdo_page_comp = as_vdo_page_completion(completion);
+
+ if (!validate_completed_page_or_enter_read_only_mode(vdo_page_comp, true))
+ return;
+
+ info = vdo_page_comp->info;
+ set_info_state(info, PS_DIRTY);
+ launch_page_save(info);
+}
+
+/**
+ * vdo_get_cached_page() - Get the block map page from a page completion.
+ * @completion: A vdo page completion whose callback has been called.
+ * @page_ptr: A pointer to hold the page
+ *
+ * Return: VDO_SUCCESS or an error
+ */
+int vdo_get_cached_page(struct vdo_completion *completion,
+ struct block_map_page **page_ptr)
+{
+ int result;
+ struct vdo_page_completion *vpc;
+
+ vpc = as_vdo_page_completion(completion);
+ result = validate_completed_page(vpc, true);
+ if (result == VDO_SUCCESS)
+ *page_ptr = (struct block_map_page *) get_page_buffer(vpc->info);
+
+ return result;
+}
+
+/**
+ * vdo_invalidate_page_cache() - Invalidate all entries in the VDO page cache.
+ *
+ * There must not be any dirty pages in the cache.
+ *
+ * Return: A success or error code.
+ */
+int vdo_invalidate_page_cache(struct vdo_page_cache *cache)
+{
+ struct page_info *info;
+
+ assert_on_cache_thread(cache, __func__);
+
+ /* Make sure we don't throw away any dirty pages. */
+ for (info = cache->infos; info < cache->infos + cache->page_count; info++) {
+ int result = VDO_ASSERT(!is_dirty(info), "cache must have no dirty pages");
+
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ /* Reset the page map by re-allocating it. */
+ vdo_int_map_free(vdo_forget(cache->page_map));
+ return vdo_int_map_create(cache->page_count, &cache->page_map);
+}
+
+/**
+ * get_tree_page_by_index() - Get the tree page for a given height and page index.
+ *
+ * Return: The requested page.
+ */
+static struct tree_page * __must_check get_tree_page_by_index(struct forest *forest,
+ root_count_t root_index,
+ height_t height,
+ page_number_t page_index)
+{
+ page_number_t offset = 0;
+ size_t segment;
+
+ for (segment = 0; segment < forest->segments; segment++) {
+ page_number_t border = forest->boundaries[segment].levels[height - 1];
+
+ if (page_index < border) {
+ struct block_map_tree *tree = &forest->trees[root_index];
+
+ return &(tree->segments[segment].levels[height - 1][page_index - offset]);
+ }
+
+ offset = border;
+ }
+
+ return NULL;
+}
+
+/* Get the page referred to by the lock's tree slot at its current height. */
+static inline struct tree_page *get_tree_page(const struct block_map_zone *zone,
+ const struct tree_lock *lock)
+{
+ return get_tree_page_by_index(zone->block_map->forest, lock->root_index,
+ lock->height,
+ lock->tree_slots[lock->height].page_index);
+}
+
+/** vdo_copy_valid_page() - Validate and copy a buffer to a page. */
+bool vdo_copy_valid_page(char *buffer, nonce_t nonce,
+ physical_block_number_t pbn,
+ struct block_map_page *page)
+{
+ struct block_map_page *loaded = (struct block_map_page *) buffer;
+ enum block_map_page_validity validity =
+ vdo_validate_block_map_page(loaded, nonce, pbn);
+
+ if (validity == VDO_BLOCK_MAP_PAGE_VALID) {
+ memcpy(page, loaded, VDO_BLOCK_SIZE);
+ return true;
+ }
+
+ if (validity == VDO_BLOCK_MAP_PAGE_BAD) {
+ vdo_log_error_strerror(VDO_BAD_PAGE,
+ "Expected page %llu but got page %llu instead",
+ (unsigned long long) pbn,
+ (unsigned long long) vdo_get_block_map_page_pbn(loaded));
+ }
+
+ return false;
+}
+
+/**
+ * in_cyclic_range() - Check whether the given value is between the lower and upper bounds, within
+ * a cyclic range of values from 0 to (modulus - 1).
+ * @lower: The lowest value to accept.
+ * @value: The value to check.
+ * @upper: The highest value to accept.
+ * @modulus: The size of the cyclic space, no more than 2^15.
+ *
+ * The value and both bounds must be smaller than the modulus.
+ *
+ * Return: true if the value is in range.
+ */
+static bool in_cyclic_range(u16 lower, u16 value, u16 upper, u16 modulus)
+{
+ if (value < lower)
+ value += modulus;
+ if (upper < lower)
+ upper += modulus;
+ return (value <= upper);
+}
+
+/**
+ * is_not_older() - Check whether a generation is strictly older than some other generation in the
+ * context of a zone's current generation range.
+ * @zone: The zone in which to do the comparison.
+ * @a: The generation in question.
+ * @b: The generation to compare to.
+ *
+ * Return: true if generation @a is not strictly older than generation @b in the context of @zone
+ */
+static bool __must_check is_not_older(struct block_map_zone *zone, u8 a, u8 b)
+{
+ int result;
+
+ result = VDO_ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) &&
+ in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)),
+ "generation(s) %u, %u are out of range [%u, %u]",
+ a, b, zone->oldest_generation, zone->generation);
+ if (result != VDO_SUCCESS) {
+ enter_zone_read_only_mode(zone, result);
+ return true;
+ }
+
+ return in_cyclic_range(b, a, zone->generation, 1 << 8);
+}
+
+static void release_generation(struct block_map_zone *zone, u8 generation)
+{
+ int result;
+
+ result = VDO_ASSERT((zone->dirty_page_counts[generation] > 0),
+ "dirty page count underflow for generation %u", generation);
+ if (result != VDO_SUCCESS) {
+ enter_zone_read_only_mode(zone, result);
+ return;
+ }
+
+ zone->dirty_page_counts[generation]--;
+ while ((zone->dirty_page_counts[zone->oldest_generation] == 0) &&
+ (zone->oldest_generation != zone->generation))
+ zone->oldest_generation++;
+}
+
+static void set_generation(struct block_map_zone *zone, struct tree_page *page,
+ u8 new_generation)
+{
+ u32 new_count;
+ int result;
+ bool decrement_old = vdo_waiter_is_waiting(&page->waiter);
+ u8 old_generation = page->generation;
+
+ if (decrement_old && (old_generation == new_generation))
+ return;
+
+ page->generation = new_generation;
+ new_count = ++zone->dirty_page_counts[new_generation];
+ result = VDO_ASSERT((new_count != 0), "dirty page count overflow for generation %u",
+ new_generation);
+ if (result != VDO_SUCCESS) {
+ enter_zone_read_only_mode(zone, result);
+ return;
+ }
+
+ if (decrement_old)
+ release_generation(zone, old_generation);
+}
+
+static void write_page(struct tree_page *tree_page, struct pooled_vio *vio);
+
+/* Implements waiter_callback_fn */
+static void write_page_callback(struct vdo_waiter *waiter, void *context)
+{
+ write_page(container_of(waiter, struct tree_page, waiter), context);
+}
+
+static void acquire_vio(struct vdo_waiter *waiter, struct block_map_zone *zone)
+{
+ waiter->callback = write_page_callback;
+ acquire_vio_from_pool(zone->vio_pool, waiter);
+}
+
+/* Return: true if all possible generations were not already active */
+static bool attempt_increment(struct block_map_zone *zone)
+{
+ u8 generation = zone->generation + 1;
+
+ if (zone->oldest_generation == generation)
+ return false;
+
+ zone->generation = generation;
+ return true;
+}
+
+/* Launches a flush if one is not already in progress. */
+static void enqueue_page(struct tree_page *page, struct block_map_zone *zone)
+{
+ if ((zone->flusher == NULL) && attempt_increment(zone)) {
+ zone->flusher = page;
+ acquire_vio(&page->waiter, zone);
+ return;
+ }
+
+ vdo_waitq_enqueue_waiter(&zone->flush_waiters, &page->waiter);
+}
+
+static void write_page_if_not_dirtied(struct vdo_waiter *waiter, void *context)
+{
+ struct tree_page *page = container_of(waiter, struct tree_page, waiter);
+ struct write_if_not_dirtied_context *write_context = context;
+
+ if (page->generation == write_context->generation) {
+ acquire_vio(waiter, write_context->zone);
+ return;
+ }
+
+ enqueue_page(page, write_context->zone);
+}
+
+static void return_to_pool(struct block_map_zone *zone, struct pooled_vio *vio)
+{
+ return_vio_to_pool(zone->vio_pool, vio);
+ check_for_drain_complete(zone);
+}
+
+/* This callback is registered in write_initialized_page(). */
+static void finish_page_write(struct vdo_completion *completion)
+{
+ bool dirty;
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = container_of(vio, struct pooled_vio, vio);
+ struct tree_page *page = completion->parent;
+ struct block_map_zone *zone = pooled->context;
+
+ vdo_release_recovery_journal_block_reference(zone->block_map->journal,
+ page->writing_recovery_lock,
+ VDO_ZONE_TYPE_LOGICAL,
+ zone->zone_number);
+
+ dirty = (page->writing_generation != page->generation);
+ release_generation(zone, page->writing_generation);
+ page->writing = false;
+
+ if (zone->flusher == page) {
+ struct write_if_not_dirtied_context context = {
+ .zone = zone,
+ .generation = page->writing_generation,
+ };
+
+ vdo_waitq_notify_all_waiters(&zone->flush_waiters,
+ write_page_if_not_dirtied, &context);
+ if (dirty && attempt_increment(zone)) {
+ write_page(page, pooled);
+ return;
+ }
+
+ zone->flusher = NULL;
+ }
+
+ if (dirty) {
+ enqueue_page(page, zone);
+ } else if ((zone->flusher == NULL) && vdo_waitq_has_waiters(&zone->flush_waiters) &&
+ attempt_increment(zone)) {
+ zone->flusher = container_of(vdo_waitq_dequeue_waiter(&zone->flush_waiters),
+ struct tree_page, waiter);
+ write_page(zone->flusher, pooled);
+ return;
+ }
+
+ return_to_pool(zone, pooled);
+}
+
+static void handle_write_error(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = container_of(vio, struct pooled_vio, vio);
+ struct block_map_zone *zone = pooled->context;
+
+ vio_record_metadata_io_error(vio);
+ enter_zone_read_only_mode(zone, result);
+ return_to_pool(zone, pooled);
+}
+
+static void write_page_endio(struct bio *bio);
+
+static void write_initialized_page(struct vdo_completion *completion)
+{
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = container_of(vio, struct pooled_vio, vio);
+ struct block_map_zone *zone = pooled->context;
+ struct tree_page *tree_page = completion->parent;
+ struct block_map_page *page = (struct block_map_page *) vio->data;
+ blk_opf_t operation = REQ_OP_WRITE | REQ_PRIO;
+
+ /*
+ * Now that we know the page has been written at least once, mark the copy we are writing
+ * as initialized.
+ */
+ page->header.initialized = true;
+
+ if (zone->flusher == tree_page)
+ operation |= REQ_PREFLUSH;
+
+ vdo_submit_metadata_vio(vio, vdo_get_block_map_page_pbn(page),
+ write_page_endio, handle_write_error,
+ operation);
+}
+
+static void write_page_endio(struct bio *bio)
+{
+ struct pooled_vio *vio = bio->bi_private;
+ struct block_map_zone *zone = vio->context;
+ struct block_map_page *page = (struct block_map_page *) vio->vio.data;
+
+ continue_vio_after_io(&vio->vio,
+ (page->header.initialized ?
+ finish_page_write : write_initialized_page),
+ zone->thread_id);
+}
+
+static void write_page(struct tree_page *tree_page, struct pooled_vio *vio)
+{
+ struct vdo_completion *completion = &vio->vio.completion;
+ struct block_map_zone *zone = vio->context;
+ struct block_map_page *page = vdo_as_block_map_page(tree_page);
+
+ if ((zone->flusher != tree_page) &&
+ is_not_older(zone, tree_page->generation, zone->generation)) {
+ /*
+ * This page was re-dirtied after the last flush was issued, hence we need to do
+ * another flush.
+ */
+ enqueue_page(tree_page, zone);
+ return_to_pool(zone, vio);
+ return;
+ }
+
+ completion->parent = tree_page;
+ memcpy(vio->vio.data, tree_page->page_buffer, VDO_BLOCK_SIZE);
+ completion->callback_thread_id = zone->thread_id;
+
+ tree_page->writing = true;
+ tree_page->writing_generation = tree_page->generation;
+ tree_page->writing_recovery_lock = tree_page->recovery_lock;
+
+ /* Clear this now so that we know this page is not on any dirty list. */
+ tree_page->recovery_lock = 0;
+
+ /*
+ * We've already copied the page into the vio which will write it, so if it was not yet
+ * initialized, the first write will indicate that (for torn write protection). It is now
+ * safe to mark it as initialized in memory since if the write fails, the in memory state
+ * will become irrelevant.
+ */
+ if (page->header.initialized) {
+ write_initialized_page(completion);
+ return;
+ }
+
+ page->header.initialized = true;
+ vdo_submit_metadata_vio(&vio->vio, vdo_get_block_map_page_pbn(page),
+ write_page_endio, handle_write_error,
+ REQ_OP_WRITE | REQ_PRIO);
+}
+
+/* Release a lock on a page which was being loaded or allocated. */
+static void release_page_lock(struct data_vio *data_vio, char *what)
+{
+ struct block_map_zone *zone;
+ struct tree_lock *lock_holder;
+ struct tree_lock *lock = &data_vio->tree_lock;
+
+ VDO_ASSERT_LOG_ONLY(lock->locked,
+ "release of unlocked block map page %s for key %llu in tree %u",
+ what, (unsigned long long) lock->key, lock->root_index);
+
+ zone = data_vio->logical.zone->block_map_zone;
+ lock_holder = vdo_int_map_remove(zone->loading_pages, lock->key);
+ VDO_ASSERT_LOG_ONLY((lock_holder == lock),
+ "block map page %s mismatch for key %llu in tree %u",
+ what, (unsigned long long) lock->key, lock->root_index);
+ lock->locked = false;
+}
+
+static void finish_lookup(struct data_vio *data_vio, int result)
+{
+ data_vio->tree_lock.height = 0;
+
+ --data_vio->logical.zone->block_map_zone->active_lookups;
+
+ set_data_vio_logical_callback(data_vio, continue_data_vio_with_block_map_slot);
+ data_vio->vio.completion.error_handler = handle_data_vio_error;
+ continue_data_vio_with_error(data_vio, result);
+}
+
+static void abort_lookup_for_waiter(struct vdo_waiter *waiter, void *context)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+ int result = *((int *) context);
+
+ if (!data_vio->write) {
+ if (result == VDO_NO_SPACE)
+ result = VDO_SUCCESS;
+ } else if (result != VDO_NO_SPACE) {
+ result = VDO_READ_ONLY;
+ }
+
+ finish_lookup(data_vio, result);
+}
+
+static void abort_lookup(struct data_vio *data_vio, int result, char *what)
+{
+ if (result != VDO_NO_SPACE)
+ enter_zone_read_only_mode(data_vio->logical.zone->block_map_zone, result);
+
+ if (data_vio->tree_lock.locked) {
+ release_page_lock(data_vio, what);
+ vdo_waitq_notify_all_waiters(&data_vio->tree_lock.waiters,
+ abort_lookup_for_waiter,
+ &result);
+ }
+
+ finish_lookup(data_vio, result);
+}
+
+static void abort_load(struct data_vio *data_vio, int result)
+{
+ abort_lookup(data_vio, result, "load");
+}
+
+static bool __must_check is_invalid_tree_entry(const struct vdo *vdo,
+ const struct data_location *mapping,
+ height_t height)
+{
+ if (!vdo_is_valid_location(mapping) ||
+ vdo_is_state_compressed(mapping->state) ||
+ (vdo_is_mapped_location(mapping) && (mapping->pbn == VDO_ZERO_BLOCK)))
+ return true;
+
+ /* Roots aren't physical data blocks, so we can't check their PBNs. */
+ if (height == VDO_BLOCK_MAP_TREE_HEIGHT)
+ return false;
+
+ return !vdo_is_physical_data_block(vdo->depot, mapping->pbn);
+}
+
+static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio);
+static void allocate_block_map_page(struct block_map_zone *zone,
+ struct data_vio *data_vio);
+
+static void continue_with_loaded_page(struct data_vio *data_vio,
+ struct block_map_page *page)
+{
+ struct tree_lock *lock = &data_vio->tree_lock;
+ struct block_map_tree_slot slot = lock->tree_slots[lock->height];
+ struct data_location mapping =
+ vdo_unpack_block_map_entry(&page->entries[slot.block_map_slot.slot]);
+
+ if (is_invalid_tree_entry(vdo_from_data_vio(data_vio), &mapping, lock->height)) {
+ vdo_log_error_strerror(VDO_BAD_MAPPING,
+ "Invalid block map tree PBN: %llu with state %u for page index %u at height %u",
+ (unsigned long long) mapping.pbn, mapping.state,
+ lock->tree_slots[lock->height - 1].page_index,
+ lock->height - 1);
+ abort_load(data_vio, VDO_BAD_MAPPING);
+ return;
+ }
+
+ if (!vdo_is_mapped_location(&mapping)) {
+ /* The page we need is unallocated */
+ allocate_block_map_page(data_vio->logical.zone->block_map_zone,
+ data_vio);
+ return;
+ }
+
+ lock->tree_slots[lock->height - 1].block_map_slot.pbn = mapping.pbn;
+ if (lock->height == 1) {
+ finish_lookup(data_vio, VDO_SUCCESS);
+ return;
+ }
+
+ /* We know what page we need to load next */
+ load_block_map_page(data_vio->logical.zone->block_map_zone, data_vio);
+}
+
+static void continue_load_for_waiter(struct vdo_waiter *waiter, void *context)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+
+ data_vio->tree_lock.height--;
+ continue_with_loaded_page(data_vio, context);
+}
+
+static void finish_block_map_page_load(struct vdo_completion *completion)
+{
+ physical_block_number_t pbn;
+ struct tree_page *tree_page;
+ struct block_map_page *page;
+ nonce_t nonce;
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = vio_as_pooled_vio(vio);
+ struct data_vio *data_vio = completion->parent;
+ struct block_map_zone *zone = pooled->context;
+ struct tree_lock *tree_lock = &data_vio->tree_lock;
+
+ tree_lock->height--;
+ pbn = tree_lock->tree_slots[tree_lock->height].block_map_slot.pbn;
+ tree_page = get_tree_page(zone, tree_lock);
+ page = (struct block_map_page *) tree_page->page_buffer;
+ nonce = zone->block_map->nonce;
+
+ if (!vdo_copy_valid_page(vio->data, nonce, pbn, page))
+ vdo_format_block_map_page(page, nonce, pbn, false);
+ return_vio_to_pool(zone->vio_pool, pooled);
+
+ /* Release our claim to the load and wake any waiters */
+ release_page_lock(data_vio, "load");
+ vdo_waitq_notify_all_waiters(&tree_lock->waiters, continue_load_for_waiter, page);
+ continue_with_loaded_page(data_vio, page);
+}
+
+static void handle_io_error(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = container_of(vio, struct pooled_vio, vio);
+ struct data_vio *data_vio = completion->parent;
+ struct block_map_zone *zone = pooled->context;
+
+ vio_record_metadata_io_error(vio);
+ return_vio_to_pool(zone->vio_pool, pooled);
+ abort_load(data_vio, result);
+}
+
+static void load_page_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct data_vio *data_vio = vio->completion.parent;
+
+ continue_vio_after_io(vio, finish_block_map_page_load,
+ data_vio->logical.zone->thread_id);
+}
+
+static void load_page(struct vdo_waiter *waiter, void *context)
+{
+ struct pooled_vio *pooled = context;
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+ struct tree_lock *lock = &data_vio->tree_lock;
+ physical_block_number_t pbn = lock->tree_slots[lock->height - 1].block_map_slot.pbn;
+
+ pooled->vio.completion.parent = data_vio;
+ vdo_submit_metadata_vio(&pooled->vio, pbn, load_page_endio,
+ handle_io_error, REQ_OP_READ | REQ_PRIO);
+}
+
+/*
+ * If the page is already locked, queue up to wait for the lock to be released. If the lock is
+ * acquired, @data_vio->tree_lock.locked will be true.
+ */
+static int attempt_page_lock(struct block_map_zone *zone, struct data_vio *data_vio)
+{
+ int result;
+ struct tree_lock *lock_holder;
+ struct tree_lock *lock = &data_vio->tree_lock;
+ height_t height = lock->height;
+ struct block_map_tree_slot tree_slot = lock->tree_slots[height];
+ union page_key key;
+
+ key.descriptor = (struct page_descriptor) {
+ .root_index = lock->root_index,
+ .height = height,
+ .page_index = tree_slot.page_index,
+ .slot = tree_slot.block_map_slot.slot,
+ };
+ lock->key = key.key;
+
+ result = vdo_int_map_put(zone->loading_pages, lock->key,
+ lock, false, (void **) &lock_holder);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (lock_holder == NULL) {
+ /* We got the lock */
+ data_vio->tree_lock.locked = true;
+ return VDO_SUCCESS;
+ }
+
+ /* Someone else is loading or allocating the page we need */
+ vdo_waitq_enqueue_waiter(&lock_holder->waiters, &data_vio->waiter);
+ return VDO_SUCCESS;
+}
+
+/* Load a block map tree page from disk, for the next level in the data vio tree lock. */
+static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio)
+{
+ int result;
+
+ result = attempt_page_lock(zone, data_vio);
+ if (result != VDO_SUCCESS) {
+ abort_load(data_vio, result);
+ return;
+ }
+
+ if (data_vio->tree_lock.locked) {
+ data_vio->waiter.callback = load_page;
+ acquire_vio_from_pool(zone->vio_pool, &data_vio->waiter);
+ }
+}
+
+static void allocation_failure(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ if (vdo_requeue_completion_if_needed(completion,
+ data_vio->logical.zone->thread_id))
+ return;
+
+ abort_lookup(data_vio, completion->result, "allocation");
+}
+
+static void continue_allocation_for_waiter(struct vdo_waiter *waiter, void *context)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+ struct tree_lock *tree_lock = &data_vio->tree_lock;
+ physical_block_number_t pbn = *((physical_block_number_t *) context);
+
+ tree_lock->height--;
+ data_vio->tree_lock.tree_slots[tree_lock->height].block_map_slot.pbn = pbn;
+
+ if (tree_lock->height == 0) {
+ finish_lookup(data_vio, VDO_SUCCESS);
+ return;
+ }
+
+ allocate_block_map_page(data_vio->logical.zone->block_map_zone, data_vio);
+}
+
+/** expire_oldest_list() - Expire the oldest list. */
+static void expire_oldest_list(struct dirty_lists *dirty_lists)
+{
+ block_count_t i = dirty_lists->offset++;
+
+ dirty_lists->oldest_period++;
+ if (!list_empty(&dirty_lists->eras[i][VDO_TREE_PAGE])) {
+ list_splice_tail_init(&dirty_lists->eras[i][VDO_TREE_PAGE],
+ &dirty_lists->expired[VDO_TREE_PAGE]);
+ }
+
+ if (!list_empty(&dirty_lists->eras[i][VDO_CACHE_PAGE])) {
+ list_splice_tail_init(&dirty_lists->eras[i][VDO_CACHE_PAGE],
+ &dirty_lists->expired[VDO_CACHE_PAGE]);
+ }
+
+ if (dirty_lists->offset == dirty_lists->maximum_age)
+ dirty_lists->offset = 0;
+}
+
+
+/** update_period() - Update the dirty_lists period if necessary. */
+static void update_period(struct dirty_lists *dirty, sequence_number_t period)
+{
+ while (dirty->next_period <= period) {
+ if ((dirty->next_period - dirty->oldest_period) == dirty->maximum_age)
+ expire_oldest_list(dirty);
+ dirty->next_period++;
+ }
+}
+
+/** write_expired_elements() - Write out the expired list. */
+static void write_expired_elements(struct block_map_zone *zone)
+{
+ struct tree_page *page, *ttmp;
+ struct page_info *info, *ptmp;
+ struct list_head *expired;
+ u8 generation = zone->generation;
+
+ expired = &zone->dirty_lists->expired[VDO_TREE_PAGE];
+ list_for_each_entry_safe(page, ttmp, expired, entry) {
+ int result;
+
+ list_del_init(&page->entry);
+
+ result = VDO_ASSERT(!vdo_waiter_is_waiting(&page->waiter),
+ "Newly expired page not already waiting to write");
+ if (result != VDO_SUCCESS) {
+ enter_zone_read_only_mode(zone, result);
+ continue;
+ }
+
+ set_generation(zone, page, generation);
+ if (!page->writing)
+ enqueue_page(page, zone);
+ }
+
+ expired = &zone->dirty_lists->expired[VDO_CACHE_PAGE];
+ list_for_each_entry_safe(info, ptmp, expired, state_entry) {
+ list_del_init(&info->state_entry);
+ schedule_page_save(info);
+ }
+
+ save_pages(&zone->page_cache);
+}
+
+/**
+ * add_to_dirty_lists() - Add an element to the dirty lists.
+ * @zone: The zone in which we are operating.
+ * @entry: The list entry of the element to add.
+ * @type: The type of page.
+ * @old_period: The period in which the element was previously dirtied, or 0 if it was not dirty.
+ * @new_period: The period in which the element has now been dirtied, or 0 if it does not hold a
+ * lock.
+ */
+static void add_to_dirty_lists(struct block_map_zone *zone,
+ struct list_head *entry,
+ enum block_map_page_type type,
+ sequence_number_t old_period,
+ sequence_number_t new_period)
+{
+ struct dirty_lists *dirty_lists = zone->dirty_lists;
+
+ if ((old_period == new_period) || ((old_period != 0) && (old_period < new_period)))
+ return;
+
+ if (new_period < dirty_lists->oldest_period) {
+ list_move_tail(entry, &dirty_lists->expired[type]);
+ } else {
+ update_period(dirty_lists, new_period);
+ list_move_tail(entry,
+ &dirty_lists->eras[new_period % dirty_lists->maximum_age][type]);
+ }
+
+ write_expired_elements(zone);
+}
+
+/*
+ * Record the allocation in the tree and wake any waiters now that the write lock has been
+ * released.
+ */
+static void finish_block_map_allocation(struct vdo_completion *completion)
+{
+ physical_block_number_t pbn;
+ struct tree_page *tree_page;
+ struct block_map_page *page;
+ sequence_number_t old_lock;
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
+ struct tree_lock *tree_lock = &data_vio->tree_lock;
+ height_t height = tree_lock->height;
+
+ assert_data_vio_in_logical_zone(data_vio);
+
+ tree_page = get_tree_page(zone, tree_lock);
+ pbn = tree_lock->tree_slots[height - 1].block_map_slot.pbn;
+
+ /* Record the allocation. */
+ page = (struct block_map_page *) tree_page->page_buffer;
+ old_lock = tree_page->recovery_lock;
+ vdo_update_block_map_page(page, data_vio, pbn,
+ VDO_MAPPING_STATE_UNCOMPRESSED,
+ &tree_page->recovery_lock);
+
+ if (vdo_waiter_is_waiting(&tree_page->waiter)) {
+ /* This page is waiting to be written out. */
+ if (zone->flusher != tree_page) {
+ /*
+ * The outstanding flush won't cover the update we just made,
+ * so mark the page as needing another flush.
+ */
+ set_generation(zone, tree_page, zone->generation);
+ }
+ } else {
+ /* Put the page on a dirty list */
+ if (old_lock == 0)
+ INIT_LIST_HEAD(&tree_page->entry);
+ add_to_dirty_lists(zone, &tree_page->entry, VDO_TREE_PAGE,
+ old_lock, tree_page->recovery_lock);
+ }
+
+ tree_lock->height--;
+ if (height > 1) {
+ /* Format the interior node we just allocated (in memory). */
+ tree_page = get_tree_page(zone, tree_lock);
+ vdo_format_block_map_page(tree_page->page_buffer,
+ zone->block_map->nonce,
+ pbn, false);
+ }
+
+ /* Release our claim to the allocation and wake any waiters */
+ release_page_lock(data_vio, "allocation");
+ vdo_waitq_notify_all_waiters(&tree_lock->waiters,
+ continue_allocation_for_waiter, &pbn);
+ if (tree_lock->height == 0) {
+ finish_lookup(data_vio, VDO_SUCCESS);
+ return;
+ }
+
+ allocate_block_map_page(zone, data_vio);
+}
+
+static void release_block_map_write_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_allocated_zone(data_vio);
+
+ release_data_vio_allocation_lock(data_vio, true);
+ launch_data_vio_logical_callback(data_vio, finish_block_map_allocation);
+}
+
+/*
+ * Newly allocated block map pages are set to have to MAXIMUM_REFERENCES after they are journaled,
+ * to prevent deduplication against the block after we release the write lock on it, but before we
+ * write out the page.
+ */
+static void set_block_map_page_reference_count(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_allocated_zone(data_vio);
+
+ completion->callback = release_block_map_write_lock;
+ vdo_modify_reference_count(completion, &data_vio->increment_updater);
+}
+
+static void journal_block_map_allocation(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_journal_zone(data_vio);
+
+ set_data_vio_allocated_zone_callback(data_vio,
+ set_block_map_page_reference_count);
+ vdo_add_recovery_journal_entry(completion->vdo->recovery_journal, data_vio);
+}
+
+static void allocate_block(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct tree_lock *lock = &data_vio->tree_lock;
+ physical_block_number_t pbn;
+
+ assert_data_vio_in_allocated_zone(data_vio);
+
+ if (!vdo_allocate_block_in_zone(data_vio))
+ return;
+
+ pbn = data_vio->allocation.pbn;
+ lock->tree_slots[lock->height - 1].block_map_slot.pbn = pbn;
+ data_vio->increment_updater = (struct reference_updater) {
+ .operation = VDO_JOURNAL_BLOCK_MAP_REMAPPING,
+ .increment = true,
+ .zpbn = {
+ .pbn = pbn,
+ .state = VDO_MAPPING_STATE_UNCOMPRESSED,
+ },
+ .lock = data_vio->allocation.lock,
+ };
+
+ launch_data_vio_journal_callback(data_vio, journal_block_map_allocation);
+}
+
+static void allocate_block_map_page(struct block_map_zone *zone,
+ struct data_vio *data_vio)
+{
+ int result;
+
+ if (!data_vio->write || data_vio->is_discard) {
+ /* This is a pure read or a discard, so there's nothing left to do here. */
+ finish_lookup(data_vio, VDO_SUCCESS);
+ return;
+ }
+
+ result = attempt_page_lock(zone, data_vio);
+ if (result != VDO_SUCCESS) {
+ abort_lookup(data_vio, result, "allocation");
+ return;
+ }
+
+ if (!data_vio->tree_lock.locked)
+ return;
+
+ data_vio_allocate_data_block(data_vio, VIO_BLOCK_MAP_WRITE_LOCK,
+ allocate_block, allocation_failure);
+}
+
+/**
+ * vdo_find_block_map_slot() - Find the block map slot in which the block map entry for a data_vio
+ * resides and cache that result in the data_vio.
+ *
+ * All ancestors in the tree will be allocated or loaded, as needed.
+ */
+void vdo_find_block_map_slot(struct data_vio *data_vio)
+{
+ page_number_t page_index;
+ struct block_map_tree_slot tree_slot;
+ struct data_location mapping;
+ struct block_map_page *page = NULL;
+ struct tree_lock *lock = &data_vio->tree_lock;
+ struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
+
+ zone->active_lookups++;
+ if (vdo_is_state_draining(&zone->state)) {
+ finish_lookup(data_vio, VDO_SHUTTING_DOWN);
+ return;
+ }
+
+ lock->tree_slots[0].block_map_slot.slot =
+ data_vio->logical.lbn % VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+ page_index = (lock->tree_slots[0].page_index / zone->block_map->root_count);
+ tree_slot = (struct block_map_tree_slot) {
+ .page_index = page_index / VDO_BLOCK_MAP_ENTRIES_PER_PAGE,
+ .block_map_slot = {
+ .pbn = 0,
+ .slot = page_index % VDO_BLOCK_MAP_ENTRIES_PER_PAGE,
+ },
+ };
+
+ for (lock->height = 1; lock->height <= VDO_BLOCK_MAP_TREE_HEIGHT; lock->height++) {
+ physical_block_number_t pbn;
+
+ lock->tree_slots[lock->height] = tree_slot;
+ page = (struct block_map_page *) (get_tree_page(zone, lock)->page_buffer);
+ pbn = vdo_get_block_map_page_pbn(page);
+ if (pbn != VDO_ZERO_BLOCK) {
+ lock->tree_slots[lock->height].block_map_slot.pbn = pbn;
+ break;
+ }
+
+ /* Calculate the index and slot for the next level. */
+ tree_slot.block_map_slot.slot =
+ tree_slot.page_index % VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+ tree_slot.page_index = tree_slot.page_index / VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+ }
+
+ /* The page at this height has been allocated and loaded. */
+ mapping = vdo_unpack_block_map_entry(&page->entries[tree_slot.block_map_slot.slot]);
+ if (is_invalid_tree_entry(vdo_from_data_vio(data_vio), &mapping, lock->height)) {
+ vdo_log_error_strerror(VDO_BAD_MAPPING,
+ "Invalid block map tree PBN: %llu with state %u for page index %u at height %u",
+ (unsigned long long) mapping.pbn, mapping.state,
+ lock->tree_slots[lock->height - 1].page_index,
+ lock->height - 1);
+ abort_load(data_vio, VDO_BAD_MAPPING);
+ return;
+ }
+
+ if (!vdo_is_mapped_location(&mapping)) {
+ /* The page we want one level down has not been allocated, so allocate it. */
+ allocate_block_map_page(zone, data_vio);
+ return;
+ }
+
+ lock->tree_slots[lock->height - 1].block_map_slot.pbn = mapping.pbn;
+ if (lock->height == 1) {
+ /* This is the ultimate block map page, so we're done */
+ finish_lookup(data_vio, VDO_SUCCESS);
+ return;
+ }
+
+ /* We know what page we need to load. */
+ load_block_map_page(zone, data_vio);
+}
+
+/*
+ * Find the PBN of a leaf block map page. This method may only be used after all allocated tree
+ * pages have been loaded, otherwise, it may give the wrong answer (0).
+ */
+physical_block_number_t vdo_find_block_map_page_pbn(struct block_map *map,
+ page_number_t page_number)
+{
+ struct data_location mapping;
+ struct tree_page *tree_page;
+ struct block_map_page *page;
+ root_count_t root_index = page_number % map->root_count;
+ page_number_t page_index = page_number / map->root_count;
+ slot_number_t slot = page_index % VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+
+ page_index /= VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+
+ tree_page = get_tree_page_by_index(map->forest, root_index, 1, page_index);
+ page = (struct block_map_page *) tree_page->page_buffer;
+ if (!page->header.initialized)
+ return VDO_ZERO_BLOCK;
+
+ mapping = vdo_unpack_block_map_entry(&page->entries[slot]);
+ if (!vdo_is_valid_location(&mapping) || vdo_is_state_compressed(mapping.state))
+ return VDO_ZERO_BLOCK;
+ return mapping.pbn;
+}
+
+/*
+ * Write a tree page or indicate that it has been re-dirtied if it is already being written. This
+ * method is used when correcting errors in the tree during read-only rebuild.
+ */
+void vdo_write_tree_page(struct tree_page *page, struct block_map_zone *zone)
+{
+ bool waiting = vdo_waiter_is_waiting(&page->waiter);
+
+ if (waiting && (zone->flusher == page))
+ return;
+
+ set_generation(zone, page, zone->generation);
+ if (waiting || page->writing)
+ return;
+
+ enqueue_page(page, zone);
+}
+
+static int make_segment(struct forest *old_forest, block_count_t new_pages,
+ struct boundary *new_boundary, struct forest *forest)
+{
+ size_t index = (old_forest == NULL) ? 0 : old_forest->segments;
+ struct tree_page *page_ptr;
+ page_count_t segment_sizes[VDO_BLOCK_MAP_TREE_HEIGHT];
+ height_t height;
+ root_count_t root;
+ int result;
+
+ forest->segments = index + 1;
+
+ result = vdo_allocate(forest->segments, struct boundary,
+ "forest boundary array", &forest->boundaries);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(forest->segments, struct tree_page *,
+ "forest page pointers", &forest->pages);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(new_pages, struct tree_page,
+ "new forest pages", &forest->pages[index]);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (index > 0) {
+ memcpy(forest->boundaries, old_forest->boundaries,
+ index * sizeof(struct boundary));
+ memcpy(forest->pages, old_forest->pages,
+ index * sizeof(struct tree_page *));
+ }
+
+ memcpy(&(forest->boundaries[index]), new_boundary, sizeof(struct boundary));
+
+ for (height = 0; height < VDO_BLOCK_MAP_TREE_HEIGHT; height++) {
+ segment_sizes[height] = new_boundary->levels[height];
+ if (index > 0)
+ segment_sizes[height] -= old_forest->boundaries[index - 1].levels[height];
+ }
+
+ page_ptr = forest->pages[index];
+ for (root = 0; root < forest->map->root_count; root++) {
+ struct block_map_tree_segment *segment;
+ struct block_map_tree *tree = &(forest->trees[root]);
+ height_t height;
+
+ int result = vdo_allocate(forest->segments,
+ struct block_map_tree_segment,
+ "tree root segments", &tree->segments);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (index > 0) {
+ memcpy(tree->segments, old_forest->trees[root].segments,
+ index * sizeof(struct block_map_tree_segment));
+ }
+
+ segment = &(tree->segments[index]);
+ for (height = 0; height < VDO_BLOCK_MAP_TREE_HEIGHT; height++) {
+ if (segment_sizes[height] == 0)
+ continue;
+
+ segment->levels[height] = page_ptr;
+ if (height == (VDO_BLOCK_MAP_TREE_HEIGHT - 1)) {
+ /* Record the root. */
+ struct block_map_page *page =
+ vdo_format_block_map_page(page_ptr->page_buffer,
+ forest->map->nonce,
+ VDO_INVALID_PBN, true);
+ page->entries[0] =
+ vdo_pack_block_map_entry(forest->map->root_origin + root,
+ VDO_MAPPING_STATE_UNCOMPRESSED);
+ }
+ page_ptr += segment_sizes[height];
+ }
+ }
+
+ return VDO_SUCCESS;
+}
+
+static void deforest(struct forest *forest, size_t first_page_segment)
+{
+ root_count_t root;
+
+ if (forest->pages != NULL) {
+ size_t segment;
+
+ for (segment = first_page_segment; segment < forest->segments; segment++)
+ vdo_free(forest->pages[segment]);
+ vdo_free(forest->pages);
+ }
+
+ for (root = 0; root < forest->map->root_count; root++)
+ vdo_free(forest->trees[root].segments);
+
+ vdo_free(forest->boundaries);
+ vdo_free(forest);
+}
+
+/**
+ * make_forest() - Make a collection of trees for a block_map, expanding the existing forest if
+ * there is one.
+ * @entries: The number of entries the block map will hold.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int make_forest(struct block_map *map, block_count_t entries)
+{
+ struct forest *forest, *old_forest = map->forest;
+ struct boundary new_boundary, *old_boundary = NULL;
+ block_count_t new_pages;
+ int result;
+
+ if (old_forest != NULL)
+ old_boundary = &(old_forest->boundaries[old_forest->segments - 1]);
+
+ new_pages = vdo_compute_new_forest_pages(map->root_count, old_boundary,
+ entries, &new_boundary);
+ if (new_pages == 0) {
+ map->next_entry_count = entries;
+ return VDO_SUCCESS;
+ }
+
+ result = vdo_allocate_extended(struct forest, map->root_count,
+ struct block_map_tree, __func__,
+ &forest);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ forest->map = map;
+ result = make_segment(old_forest, new_pages, &new_boundary, forest);
+ if (result != VDO_SUCCESS) {
+ deforest(forest, forest->segments - 1);
+ return result;
+ }
+
+ map->next_forest = forest;
+ map->next_entry_count = entries;
+ return VDO_SUCCESS;
+}
+
+/**
+ * replace_forest() - Replace a block_map's forest with the already-prepared larger forest.
+ */
+static void replace_forest(struct block_map *map)
+{
+ if (map->next_forest != NULL) {
+ if (map->forest != NULL)
+ deforest(map->forest, map->forest->segments);
+ map->forest = vdo_forget(map->next_forest);
+ }
+
+ map->entry_count = map->next_entry_count;
+ map->next_entry_count = 0;
+}
+
+/**
+ * finish_cursor() - Finish the traversal of a single tree. If it was the last cursor, finish the
+ * traversal.
+ */
+static void finish_cursor(struct cursor *cursor)
+{
+ struct cursors *cursors = cursor->parent;
+ struct vdo_completion *completion = cursors->completion;
+
+ return_vio_to_pool(cursors->pool, vdo_forget(cursor->vio));
+ if (--cursors->active_roots > 0)
+ return;
+
+ vdo_free(cursors);
+
+ vdo_finish_completion(completion);
+}
+
+static void traverse(struct cursor *cursor);
+
+/**
+ * continue_traversal() - Continue traversing a block map tree.
+ * @completion: The VIO doing a read or write.
+ */
+static void continue_traversal(struct vdo_completion *completion)
+{
+ vio_record_metadata_io_error(as_vio(completion));
+ traverse(completion->parent);
+}
+
+/**
+ * finish_traversal_load() - Continue traversing a block map tree now that a page has been loaded.
+ * @completion: The VIO doing the read.
+ */
+static void finish_traversal_load(struct vdo_completion *completion)
+{
+ struct cursor *cursor = completion->parent;
+ height_t height = cursor->height;
+ struct cursor_level *level = &cursor->levels[height];
+ struct tree_page *tree_page =
+ &(cursor->tree->segments[0].levels[height][level->page_index]);
+ struct block_map_page *page = (struct block_map_page *) tree_page->page_buffer;
+
+ vdo_copy_valid_page(cursor->vio->vio.data,
+ cursor->parent->zone->block_map->nonce,
+ pbn_from_vio_bio(cursor->vio->vio.bio), page);
+ traverse(cursor);
+}
+
+static void traversal_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct cursor *cursor = vio->completion.parent;
+
+ continue_vio_after_io(vio, finish_traversal_load,
+ cursor->parent->zone->thread_id);
+}
+
+/**
+ * traverse() - Traverse a single block map tree.
+ *
+ * This is the recursive heart of the traversal process.
+ */
+static void traverse(struct cursor *cursor)
+{
+ for (; cursor->height < VDO_BLOCK_MAP_TREE_HEIGHT; cursor->height++) {
+ height_t height = cursor->height;
+ struct cursor_level *level = &cursor->levels[height];
+ struct tree_page *tree_page =
+ &(cursor->tree->segments[0].levels[height][level->page_index]);
+ struct block_map_page *page = (struct block_map_page *) tree_page->page_buffer;
+
+ if (!page->header.initialized)
+ continue;
+
+ for (; level->slot < VDO_BLOCK_MAP_ENTRIES_PER_PAGE; level->slot++) {
+ struct cursor_level *next_level;
+ page_number_t entry_index =
+ (VDO_BLOCK_MAP_ENTRIES_PER_PAGE * level->page_index) + level->slot;
+ struct data_location location =
+ vdo_unpack_block_map_entry(&page->entries[level->slot]);
+
+ if (!vdo_is_valid_location(&location)) {
+ /* This entry is invalid, so remove it from the page. */
+ page->entries[level->slot] = UNMAPPED_BLOCK_MAP_ENTRY;
+ vdo_write_tree_page(tree_page, cursor->parent->zone);
+ continue;
+ }
+
+ if (!vdo_is_mapped_location(&location))
+ continue;
+
+ /* Erase mapped entries past the end of the logical space. */
+ if (entry_index >= cursor->boundary.levels[height]) {
+ page->entries[level->slot] = UNMAPPED_BLOCK_MAP_ENTRY;
+ vdo_write_tree_page(tree_page, cursor->parent->zone);
+ continue;
+ }
+
+ if (cursor->height < VDO_BLOCK_MAP_TREE_HEIGHT - 1) {
+ int result = cursor->parent->entry_callback(location.pbn,
+ cursor->parent->completion);
+ if (result != VDO_SUCCESS) {
+ page->entries[level->slot] = UNMAPPED_BLOCK_MAP_ENTRY;
+ vdo_write_tree_page(tree_page, cursor->parent->zone);
+ continue;
+ }
+ }
+
+ if (cursor->height == 0)
+ continue;
+
+ cursor->height--;
+ next_level = &cursor->levels[cursor->height];
+ next_level->page_index = entry_index;
+ next_level->slot = 0;
+ level->slot++;
+ vdo_submit_metadata_vio(&cursor->vio->vio, location.pbn,
+ traversal_endio, continue_traversal,
+ REQ_OP_READ | REQ_PRIO);
+ return;
+ }
+ }
+
+ finish_cursor(cursor);
+}
+
+/**
+ * launch_cursor() - Start traversing a single block map tree now that the cursor has a VIO with
+ * which to load pages.
+ * @context: The pooled_vio just acquired.
+ *
+ * Implements waiter_callback_fn.
+ */
+static void launch_cursor(struct vdo_waiter *waiter, void *context)
+{
+ struct cursor *cursor = container_of(waiter, struct cursor, waiter);
+ struct pooled_vio *pooled = context;
+
+ cursor->vio = pooled;
+ pooled->vio.completion.parent = cursor;
+ pooled->vio.completion.callback_thread_id = cursor->parent->zone->thread_id;
+ traverse(cursor);
+}
+
+/**
+ * compute_boundary() - Compute the number of pages used at each level of the given root's tree.
+ *
+ * Return: The list of page counts as a boundary structure.
+ */
+static struct boundary compute_boundary(struct block_map *map, root_count_t root_index)
+{
+ struct boundary boundary;
+ height_t height;
+ page_count_t leaf_pages = vdo_compute_block_map_page_count(map->entry_count);
+ /*
+ * Compute the leaf pages for this root. If the number of leaf pages does not distribute
+ * evenly, we must determine if this root gets an extra page. Extra pages are assigned to
+ * roots starting from tree 0.
+ */
+ page_count_t last_tree_root = (leaf_pages - 1) % map->root_count;
+ page_count_t level_pages = leaf_pages / map->root_count;
+
+ if (root_index <= last_tree_root)
+ level_pages++;
+
+ for (height = 0; height < VDO_BLOCK_MAP_TREE_HEIGHT - 1; height++) {
+ boundary.levels[height] = level_pages;
+ level_pages = DIV_ROUND_UP(level_pages, VDO_BLOCK_MAP_ENTRIES_PER_PAGE);
+ }
+
+ /* The root node always exists, even if the root is otherwise unused. */
+ boundary.levels[VDO_BLOCK_MAP_TREE_HEIGHT - 1] = 1;
+
+ return boundary;
+}
+
+/**
+ * vdo_traverse_forest() - Walk the entire forest of a block map.
+ * @callback: A function to call with the pbn of each allocated node in the forest.
+ * @completion: The completion to notify on each traversed PBN, and when traversal completes.
+ */
+void vdo_traverse_forest(struct block_map *map, vdo_entry_callback_fn callback,
+ struct vdo_completion *completion)
+{
+ root_count_t root;
+ struct cursors *cursors;
+ int result;
+
+ result = vdo_allocate_extended(struct cursors, map->root_count,
+ struct cursor, __func__, &cursors);
+ if (result != VDO_SUCCESS) {
+ vdo_fail_completion(completion, result);
+ return;
+ }
+
+ cursors->zone = &map->zones[0];
+ cursors->pool = cursors->zone->vio_pool;
+ cursors->entry_callback = callback;
+ cursors->completion = completion;
+ cursors->active_roots = map->root_count;
+ for (root = 0; root < map->root_count; root++) {
+ struct cursor *cursor = &cursors->cursors[root];
+
+ *cursor = (struct cursor) {
+ .tree = &map->forest->trees[root],
+ .height = VDO_BLOCK_MAP_TREE_HEIGHT - 1,
+ .parent = cursors,
+ .boundary = compute_boundary(map, root),
+ };
+
+ cursor->waiter.callback = launch_cursor;
+ acquire_vio_from_pool(cursors->pool, &cursor->waiter);
+ }
+}
+
+/**
+ * initialize_block_map_zone() - Initialize the per-zone portions of the block map.
+ * @maximum_age: The number of journal blocks before a dirtied page is considered old and must be
+ * written out.
+ */
+static int __must_check initialize_block_map_zone(struct block_map *map,
+ zone_count_t zone_number,
+ page_count_t cache_size,
+ block_count_t maximum_age)
+{
+ int result;
+ block_count_t i;
+ struct vdo *vdo = map->vdo;
+ struct block_map_zone *zone = &map->zones[zone_number];
+
+ BUILD_BUG_ON(sizeof(struct page_descriptor) != sizeof(u64));
+
+ zone->zone_number = zone_number;
+ zone->thread_id = vdo->thread_config.logical_threads[zone_number];
+ zone->block_map = map;
+
+ result = vdo_allocate_extended(struct dirty_lists, maximum_age,
+ dirty_era_t, __func__,
+ &zone->dirty_lists);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ zone->dirty_lists->maximum_age = maximum_age;
+ INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_TREE_PAGE]);
+ INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_CACHE_PAGE]);
+
+ for (i = 0; i < maximum_age; i++) {
+ INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_TREE_PAGE]);
+ INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_CACHE_PAGE]);
+ }
+
+ result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->loading_pages);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = make_vio_pool(vdo, BLOCK_MAP_VIO_POOL_SIZE,
+ zone->thread_id, VIO_TYPE_BLOCK_MAP_INTERIOR,
+ VIO_PRIORITY_METADATA, zone, &zone->vio_pool);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+
+ zone->page_cache.zone = zone;
+ zone->page_cache.vdo = vdo;
+ zone->page_cache.page_count = cache_size / map->zone_count;
+ zone->page_cache.stats.free_pages = zone->page_cache.page_count;
+
+ result = allocate_cache_components(&zone->page_cache);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* initialize empty circular queues */
+ INIT_LIST_HEAD(&zone->page_cache.lru_list);
+ INIT_LIST_HEAD(&zone->page_cache.outgoing_list);
+
+ return VDO_SUCCESS;
+}
+
+/* Implements vdo_zone_thread_getter_fn */
+static thread_id_t get_block_map_zone_thread_id(void *context, zone_count_t zone_number)
+{
+ struct block_map *map = context;
+
+ return map->zones[zone_number].thread_id;
+}
+
+/* Implements vdo_action_preamble_fn */
+static void prepare_for_era_advance(void *context, struct vdo_completion *parent)
+{
+ struct block_map *map = context;
+
+ map->current_era_point = map->pending_era_point;
+ vdo_finish_completion(parent);
+}
+
+/* Implements vdo_zone_action_fn */
+static void advance_block_map_zone_era(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct block_map *map = context;
+ struct block_map_zone *zone = &map->zones[zone_number];
+
+ update_period(zone->dirty_lists, map->current_era_point);
+ write_expired_elements(zone);
+ vdo_finish_completion(parent);
+}
+
+/*
+ * Schedule an era advance if necessary. This method should not be called directly. Rather, call
+ * vdo_schedule_default_action() on the block map's action manager.
+ *
+ * Implements vdo_action_scheduler_fn.
+ */
+static bool schedule_era_advance(void *context)
+{
+ struct block_map *map = context;
+
+ if (map->current_era_point == map->pending_era_point)
+ return false;
+
+ return vdo_schedule_action(map->action_manager, prepare_for_era_advance,
+ advance_block_map_zone_era, NULL, NULL);
+}
+
+static void uninitialize_block_map_zone(struct block_map_zone *zone)
+{
+ struct vdo_page_cache *cache = &zone->page_cache;
+
+ vdo_free(vdo_forget(zone->dirty_lists));
+ free_vio_pool(vdo_forget(zone->vio_pool));
+ vdo_int_map_free(vdo_forget(zone->loading_pages));
+ if (cache->infos != NULL) {
+ struct page_info *info;
+
+ for (info = cache->infos; info < cache->infos + cache->page_count; info++)
+ free_vio(vdo_forget(info->vio));
+ }
+
+ vdo_int_map_free(vdo_forget(cache->page_map));
+ vdo_free(vdo_forget(cache->infos));
+ vdo_free(vdo_forget(cache->pages));
+}
+
+void vdo_free_block_map(struct block_map *map)
+{
+ zone_count_t zone;
+
+ if (map == NULL)
+ return;
+
+ for (zone = 0; zone < map->zone_count; zone++)
+ uninitialize_block_map_zone(&map->zones[zone]);
+
+ vdo_abandon_block_map_growth(map);
+ if (map->forest != NULL)
+ deforest(vdo_forget(map->forest), 0);
+ vdo_free(vdo_forget(map->action_manager));
+ vdo_free(map);
+}
+
+/* @journal may be NULL. */
+int vdo_decode_block_map(struct block_map_state_2_0 state, block_count_t logical_blocks,
+ struct vdo *vdo, struct recovery_journal *journal,
+ nonce_t nonce, page_count_t cache_size, block_count_t maximum_age,
+ struct block_map **map_ptr)
+{
+ struct block_map *map;
+ int result;
+ zone_count_t zone = 0;
+
+ BUILD_BUG_ON(VDO_BLOCK_MAP_ENTRIES_PER_PAGE !=
+ ((VDO_BLOCK_SIZE - sizeof(struct block_map_page)) /
+ sizeof(struct block_map_entry)));
+ result = VDO_ASSERT(cache_size > 0, "block map cache size is specified");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate_extended(struct block_map,
+ vdo->thread_config.logical_zone_count,
+ struct block_map_zone, __func__, &map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ map->vdo = vdo;
+ map->root_origin = state.root_origin;
+ map->root_count = state.root_count;
+ map->entry_count = logical_blocks;
+ map->journal = journal;
+ map->nonce = nonce;
+
+ result = make_forest(map, map->entry_count);
+ if (result != VDO_SUCCESS) {
+ vdo_free_block_map(map);
+ return result;
+ }
+
+ replace_forest(map);
+
+ map->zone_count = vdo->thread_config.logical_zone_count;
+ for (zone = 0; zone < map->zone_count; zone++) {
+ result = initialize_block_map_zone(map, zone, cache_size, maximum_age);
+ if (result != VDO_SUCCESS) {
+ vdo_free_block_map(map);
+ return result;
+ }
+ }
+
+ result = vdo_make_action_manager(map->zone_count, get_block_map_zone_thread_id,
+ vdo_get_recovery_journal_thread_id(journal),
+ map, schedule_era_advance, vdo,
+ &map->action_manager);
+ if (result != VDO_SUCCESS) {
+ vdo_free_block_map(map);
+ return result;
+ }
+
+ *map_ptr = map;
+ return VDO_SUCCESS;
+}
+
+struct block_map_state_2_0 vdo_record_block_map(const struct block_map *map)
+{
+ return (struct block_map_state_2_0) {
+ .flat_page_origin = VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN,
+ /* This is the flat page count, which has turned out to always be 0. */
+ .flat_page_count = 0,
+ .root_origin = map->root_origin,
+ .root_count = map->root_count,
+ };
+}
+
+/* The block map needs to know the journals' sequence number to initialize the eras. */
+void vdo_initialize_block_map_from_journal(struct block_map *map,
+ struct recovery_journal *journal)
+{
+ zone_count_t z = 0;
+
+ map->current_era_point = vdo_get_recovery_journal_current_sequence_number(journal);
+ map->pending_era_point = map->current_era_point;
+
+ for (z = 0; z < map->zone_count; z++) {
+ struct dirty_lists *dirty_lists = map->zones[z].dirty_lists;
+
+ VDO_ASSERT_LOG_ONLY(dirty_lists->next_period == 0, "current period not set");
+ dirty_lists->oldest_period = map->current_era_point;
+ dirty_lists->next_period = map->current_era_point + 1;
+ dirty_lists->offset = map->current_era_point % dirty_lists->maximum_age;
+ }
+}
+
+/* Compute the logical zone for the LBN of a data vio. */
+zone_count_t vdo_compute_logical_zone(struct data_vio *data_vio)
+{
+ struct block_map *map = vdo_from_data_vio(data_vio)->block_map;
+ struct tree_lock *tree_lock = &data_vio->tree_lock;
+ page_number_t page_number = data_vio->logical.lbn / VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+
+ tree_lock->tree_slots[0].page_index = page_number;
+ tree_lock->root_index = page_number % map->root_count;
+ return (tree_lock->root_index % map->zone_count);
+}
+
+void vdo_advance_block_map_era(struct block_map *map,
+ sequence_number_t recovery_block_number)
+{
+ if (map == NULL)
+ return;
+
+ map->pending_era_point = recovery_block_number;
+ vdo_schedule_default_action(map->action_manager);
+}
+
+/* Implements vdo_admin_initiator_fn */
+static void initiate_drain(struct admin_state *state)
+{
+ struct block_map_zone *zone = container_of(state, struct block_map_zone, state);
+
+ VDO_ASSERT_LOG_ONLY((zone->active_lookups == 0),
+ "%s() called with no active lookups", __func__);
+
+ if (!vdo_is_state_suspending(state)) {
+ while (zone->dirty_lists->oldest_period < zone->dirty_lists->next_period)
+ expire_oldest_list(zone->dirty_lists);
+ write_expired_elements(zone);
+ }
+
+ check_for_drain_complete(zone);
+}
+
+/* Implements vdo_zone_action_fn. */
+static void drain_zone(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct block_map *map = context;
+ struct block_map_zone *zone = &map->zones[zone_number];
+
+ vdo_start_draining(&zone->state,
+ vdo_get_current_manager_operation(map->action_manager),
+ parent, initiate_drain);
+}
+
+void vdo_drain_block_map(struct block_map *map, const struct admin_state_code *operation,
+ struct vdo_completion *parent)
+{
+ vdo_schedule_operation(map->action_manager, operation, NULL, drain_zone, NULL,
+ parent);
+}
+
+/* Implements vdo_zone_action_fn. */
+static void resume_block_map_zone(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct block_map *map = context;
+ struct block_map_zone *zone = &map->zones[zone_number];
+
+ vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state));
+}
+
+void vdo_resume_block_map(struct block_map *map, struct vdo_completion *parent)
+{
+ vdo_schedule_operation(map->action_manager, VDO_ADMIN_STATE_RESUMING,
+ NULL, resume_block_map_zone, NULL, parent);
+}
+
+/* Allocate an expanded collection of trees, for a future growth. */
+int vdo_prepare_to_grow_block_map(struct block_map *map,
+ block_count_t new_logical_blocks)
+{
+ if (map->next_entry_count == new_logical_blocks)
+ return VDO_SUCCESS;
+
+ if (map->next_entry_count > 0)
+ vdo_abandon_block_map_growth(map);
+
+ if (new_logical_blocks < map->entry_count) {
+ map->next_entry_count = map->entry_count;
+ return VDO_SUCCESS;
+ }
+
+ return make_forest(map, new_logical_blocks);
+}
+
+/* Implements vdo_action_preamble_fn */
+static void grow_forest(void *context, struct vdo_completion *completion)
+{
+ replace_forest(context);
+ vdo_finish_completion(completion);
+}
+
+/* Requires vdo_prepare_to_grow_block_map() to have been previously called. */
+void vdo_grow_block_map(struct block_map *map, struct vdo_completion *parent)
+{
+ vdo_schedule_operation(map->action_manager,
+ VDO_ADMIN_STATE_SUSPENDED_OPERATION,
+ grow_forest, NULL, NULL, parent);
+}
+
+void vdo_abandon_block_map_growth(struct block_map *map)
+{
+ struct forest *forest = vdo_forget(map->next_forest);
+
+ if (forest != NULL)
+ deforest(forest, forest->segments - 1);
+
+ map->next_entry_count = 0;
+}
+
+/* Release the page completion and then continue the requester. */
+static inline void finish_processing_page(struct vdo_completion *completion, int result)
+{
+ struct vdo_completion *parent = completion->parent;
+
+ vdo_release_page_completion(completion);
+ vdo_continue_completion(parent, result);
+}
+
+static void handle_page_error(struct vdo_completion *completion)
+{
+ finish_processing_page(completion, completion->result);
+}
+
+/* Fetch the mapping page for a block map update, and call the provided handler when fetched. */
+static void fetch_mapping_page(struct data_vio *data_vio, bool modifiable,
+ vdo_action_fn action)
+{
+ struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
+
+ if (vdo_is_state_draining(&zone->state)) {
+ continue_data_vio_with_error(data_vio, VDO_SHUTTING_DOWN);
+ return;
+ }
+
+ vdo_get_page(&data_vio->page_completion, zone,
+ data_vio->tree_lock.tree_slots[0].block_map_slot.pbn,
+ modifiable, &data_vio->vio.completion,
+ action, handle_page_error, false);
+}
+
+/**
+ * clear_mapped_location() - Clear a data_vio's mapped block location, setting it to be unmapped.
+ *
+ * This indicates the block map entry for the logical block is either unmapped or corrupted.
+ */
+static void clear_mapped_location(struct data_vio *data_vio)
+{
+ data_vio->mapped = (struct zoned_pbn) {
+ .state = VDO_MAPPING_STATE_UNMAPPED,
+ };
+}
+
+/**
+ * set_mapped_location() - Decode and validate a block map entry, and set the mapped location of a
+ * data_vio.
+ *
+ * Return: VDO_SUCCESS or VDO_BAD_MAPPING if the map entry is invalid or an error code for any
+ * other failure
+ */
+static int __must_check set_mapped_location(struct data_vio *data_vio,
+ const struct block_map_entry *entry)
+{
+ /* Unpack the PBN for logging purposes even if the entry is invalid. */
+ struct data_location mapped = vdo_unpack_block_map_entry(entry);
+
+ if (vdo_is_valid_location(&mapped)) {
+ int result;
+
+ result = vdo_get_physical_zone(vdo_from_data_vio(data_vio),
+ mapped.pbn, &data_vio->mapped.zone);
+ if (result == VDO_SUCCESS) {
+ data_vio->mapped.pbn = mapped.pbn;
+ data_vio->mapped.state = mapped.state;
+ return VDO_SUCCESS;
+ }
+
+ /*
+ * Return all errors not specifically known to be errors from validating the
+ * location.
+ */
+ if ((result != VDO_OUT_OF_RANGE) && (result != VDO_BAD_MAPPING))
+ return result;
+ }
+
+ /*
+ * Log the corruption even if we wind up ignoring it for write VIOs, converting all cases
+ * to VDO_BAD_MAPPING.
+ */
+ vdo_log_error_strerror(VDO_BAD_MAPPING,
+ "PBN %llu with state %u read from the block map was invalid",
+ (unsigned long long) mapped.pbn, mapped.state);
+
+ /*
+ * A read VIO has no option but to report the bad mapping--reading zeros would be hiding
+ * known data loss.
+ */
+ if (!data_vio->write)
+ return VDO_BAD_MAPPING;
+
+ /*
+ * A write VIO only reads this mapping to decref the old block. Treat this as an unmapped
+ * entry rather than fail the write.
+ */
+ clear_mapped_location(data_vio);
+ return VDO_SUCCESS;
+}
+
+/* This callback is registered in vdo_get_mapped_block(). */
+static void get_mapping_from_fetched_page(struct vdo_completion *completion)
+{
+ int result;
+ struct vdo_page_completion *vpc = as_vdo_page_completion(completion);
+ const struct block_map_page *page;
+ const struct block_map_entry *entry;
+ struct data_vio *data_vio = as_data_vio(completion->parent);
+ struct block_map_tree_slot *tree_slot;
+
+ if (completion->result != VDO_SUCCESS) {
+ finish_processing_page(completion, completion->result);
+ return;
+ }
+
+ result = validate_completed_page(vpc, false);
+ if (result != VDO_SUCCESS) {
+ finish_processing_page(completion, result);
+ return;
+ }
+
+ page = (const struct block_map_page *) get_page_buffer(vpc->info);
+ tree_slot = &data_vio->tree_lock.tree_slots[0];
+ entry = &page->entries[tree_slot->block_map_slot.slot];
+
+ result = set_mapped_location(data_vio, entry);
+ finish_processing_page(completion, result);
+}
+
+void vdo_update_block_map_page(struct block_map_page *page, struct data_vio *data_vio,
+ physical_block_number_t pbn,
+ enum block_mapping_state mapping_state,
+ sequence_number_t *recovery_lock)
+{
+ struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
+ struct block_map *block_map = zone->block_map;
+ struct recovery_journal *journal = block_map->journal;
+ sequence_number_t old_locked, new_locked;
+ struct tree_lock *tree_lock = &data_vio->tree_lock;
+
+ /* Encode the new mapping. */
+ page->entries[tree_lock->tree_slots[tree_lock->height].block_map_slot.slot] =
+ vdo_pack_block_map_entry(pbn, mapping_state);
+
+ /* Adjust references on the recovery journal blocks. */
+ old_locked = *recovery_lock;
+ new_locked = data_vio->recovery_sequence_number;
+
+ if ((old_locked == 0) || (old_locked > new_locked)) {
+ vdo_acquire_recovery_journal_block_reference(journal, new_locked,
+ VDO_ZONE_TYPE_LOGICAL,
+ zone->zone_number);
+
+ if (old_locked > 0) {
+ vdo_release_recovery_journal_block_reference(journal, old_locked,
+ VDO_ZONE_TYPE_LOGICAL,
+ zone->zone_number);
+ }
+
+ *recovery_lock = new_locked;
+ }
+
+ /*
+ * FIXME: explain this more
+ * Release the transferred lock from the data_vio.
+ */
+ vdo_release_journal_entry_lock(journal, new_locked);
+ data_vio->recovery_sequence_number = 0;
+}
+
+static void put_mapping_in_fetched_page(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion->parent);
+ sequence_number_t old_lock;
+ struct vdo_page_completion *vpc;
+ struct page_info *info;
+ int result;
+
+ if (completion->result != VDO_SUCCESS) {
+ finish_processing_page(completion, completion->result);
+ return;
+ }
+
+ vpc = as_vdo_page_completion(completion);
+ result = validate_completed_page(vpc, true);
+ if (result != VDO_SUCCESS) {
+ finish_processing_page(completion, result);
+ return;
+ }
+
+ info = vpc->info;
+ old_lock = info->recovery_lock;
+ vdo_update_block_map_page((struct block_map_page *) get_page_buffer(info),
+ data_vio, data_vio->new_mapped.pbn,
+ data_vio->new_mapped.state, &info->recovery_lock);
+ set_info_state(info, PS_DIRTY);
+ add_to_dirty_lists(info->cache->zone, &info->state_entry,
+ VDO_CACHE_PAGE, old_lock, info->recovery_lock);
+ finish_processing_page(completion, VDO_SUCCESS);
+}
+
+/* Read a stored block mapping into a data_vio. */
+void vdo_get_mapped_block(struct data_vio *data_vio)
+{
+ if (data_vio->tree_lock.tree_slots[0].block_map_slot.pbn == VDO_ZERO_BLOCK) {
+ /*
+ * We know that the block map page for this LBN has not been allocated, so the
+ * block must be unmapped.
+ */
+ clear_mapped_location(data_vio);
+ continue_data_vio(data_vio);
+ return;
+ }
+
+ fetch_mapping_page(data_vio, false, get_mapping_from_fetched_page);
+}
+
+/* Update a stored block mapping to reflect a data_vio's new mapping. */
+void vdo_put_mapped_block(struct data_vio *data_vio)
+{
+ fetch_mapping_page(data_vio, true, put_mapping_in_fetched_page);
+}
+
+struct block_map_statistics vdo_get_block_map_statistics(struct block_map *map)
+{
+ zone_count_t zone = 0;
+ struct block_map_statistics totals;
+
+ memset(&totals, 0, sizeof(struct block_map_statistics));
+ for (zone = 0; zone < map->zone_count; zone++) {
+ const struct block_map_statistics *stats =
+ &(map->zones[zone].page_cache.stats);
+
+ totals.dirty_pages += READ_ONCE(stats->dirty_pages);
+ totals.clean_pages += READ_ONCE(stats->clean_pages);
+ totals.free_pages += READ_ONCE(stats->free_pages);
+ totals.failed_pages += READ_ONCE(stats->failed_pages);
+ totals.incoming_pages += READ_ONCE(stats->incoming_pages);
+ totals.outgoing_pages += READ_ONCE(stats->outgoing_pages);
+ totals.cache_pressure += READ_ONCE(stats->cache_pressure);
+ totals.read_count += READ_ONCE(stats->read_count);
+ totals.write_count += READ_ONCE(stats->write_count);
+ totals.failed_reads += READ_ONCE(stats->failed_reads);
+ totals.failed_writes += READ_ONCE(stats->failed_writes);
+ totals.reclaimed += READ_ONCE(stats->reclaimed);
+ totals.read_outgoing += READ_ONCE(stats->read_outgoing);
+ totals.found_in_cache += READ_ONCE(stats->found_in_cache);
+ totals.discard_required += READ_ONCE(stats->discard_required);
+ totals.wait_for_page += READ_ONCE(stats->wait_for_page);
+ totals.fetch_required += READ_ONCE(stats->fetch_required);
+ totals.pages_loaded += READ_ONCE(stats->pages_loaded);
+ totals.pages_saved += READ_ONCE(stats->pages_saved);
+ totals.flush_count += READ_ONCE(stats->flush_count);
+ }
+
+ return totals;
+}
diff --git a/drivers/md/dm-vdo/block-map.h b/drivers/md/dm-vdo/block-map.h
new file mode 100644
index 000000000000..39a13039e4a3
--- /dev/null
+++ b/drivers/md/dm-vdo/block-map.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_BLOCK_MAP_H
+#define VDO_BLOCK_MAP_H
+
+#include <linux/list.h>
+
+#include "numeric.h"
+
+#include "admin-state.h"
+#include "completion.h"
+#include "encodings.h"
+#include "int-map.h"
+#include "statistics.h"
+#include "types.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+/*
+ * The block map is responsible for tracking all the logical to physical mappings of a VDO. It
+ * consists of a collection of 60 radix trees gradually allocated as logical addresses are used.
+ * Each tree is assigned to a logical zone such that it is easy to compute which zone must handle
+ * each logical address. Each logical zone also has a dedicated portion of the leaf page cache.
+ *
+ * Each logical zone has a single dedicated queue and thread for performing all updates to the
+ * radix trees assigned to that zone. The concurrency guarantees of this single-threaded model
+ * allow the code to omit more fine-grained locking for the block map structures.
+ *
+ * Load operations must be performed on the admin thread. Normal operations, such as reading and
+ * updating mappings, must be performed on the appropriate logical zone thread. Save operations
+ * must be launched from the same admin thread as the original load operation.
+ */
+
+enum {
+ BLOCK_MAP_VIO_POOL_SIZE = 64,
+};
+
+/*
+ * Generation counter for page references.
+ */
+typedef u32 vdo_page_generation;
+
+extern const struct block_map_entry UNMAPPED_BLOCK_MAP_ENTRY;
+
+/* The VDO Page Cache abstraction. */
+struct vdo_page_cache {
+ /* the VDO which owns this cache */
+ struct vdo *vdo;
+ /* number of pages in cache */
+ page_count_t page_count;
+ /* number of pages to write in the current batch */
+ page_count_t pages_in_batch;
+ /* Whether the VDO is doing a read-only rebuild */
+ bool rebuilding;
+
+ /* array of page information entries */
+ struct page_info *infos;
+ /* raw memory for pages */
+ char *pages;
+ /* cache last found page info */
+ struct page_info *last_found;
+ /* map of page number to info */
+ struct int_map *page_map;
+ /* main LRU list (all infos) */
+ struct list_head lru_list;
+ /* free page list (oldest first) */
+ struct list_head free_list;
+ /* outgoing page list */
+ struct list_head outgoing_list;
+ /* number of read I/O operations pending */
+ page_count_t outstanding_reads;
+ /* number of write I/O operations pending */
+ page_count_t outstanding_writes;
+ /* number of pages covered by the current flush */
+ page_count_t pages_in_flush;
+ /* number of pages waiting to be included in the next flush */
+ page_count_t pages_to_flush;
+ /* number of discards in progress */
+ unsigned int discard_count;
+ /* how many VPCs waiting for free page */
+ unsigned int waiter_count;
+ /* queue of waiters who want a free page */
+ struct vdo_wait_queue free_waiters;
+ /*
+ * Statistics are only updated on the logical zone thread, but are accessed from other
+ * threads.
+ */
+ struct block_map_statistics stats;
+ /* counter for pressure reports */
+ u32 pressure_report;
+ /* the block map zone to which this cache belongs */
+ struct block_map_zone *zone;
+};
+
+/*
+ * The state of a page buffer. If the page buffer is free no particular page is bound to it,
+ * otherwise the page buffer is bound to particular page whose absolute pbn is in the pbn field. If
+ * the page is resident or dirty the page data is stable and may be accessed. Otherwise the page is
+ * in flight (incoming or outgoing) and its data should not be accessed.
+ *
+ * @note Update the static data in get_page_state_name() if you change this enumeration.
+ */
+enum vdo_page_buffer_state {
+ /* this page buffer is not being used */
+ PS_FREE,
+ /* this page is being read from store */
+ PS_INCOMING,
+ /* attempt to load this page failed */
+ PS_FAILED,
+ /* this page is valid and un-modified */
+ PS_RESIDENT,
+ /* this page is valid and modified */
+ PS_DIRTY,
+ /* this page is being written and should not be used */
+ PS_OUTGOING,
+ /* not a state */
+ PAGE_STATE_COUNT,
+} __packed;
+
+/*
+ * The write status of page
+ */
+enum vdo_page_write_status {
+ WRITE_STATUS_NORMAL,
+ WRITE_STATUS_DISCARD,
+ WRITE_STATUS_DEFERRED,
+} __packed;
+
+/* Per-page-slot information. */
+struct page_info {
+ /* Preallocated page struct vio */
+ struct vio *vio;
+ /* back-link for references */
+ struct vdo_page_cache *cache;
+ /* the pbn of the page */
+ physical_block_number_t pbn;
+ /* page is busy (temporarily locked) */
+ u16 busy;
+ /* the write status the page */
+ enum vdo_page_write_status write_status;
+ /* page state */
+ enum vdo_page_buffer_state state;
+ /* queue of completions awaiting this item */
+ struct vdo_wait_queue waiting;
+ /* state linked list entry */
+ struct list_head state_entry;
+ /* LRU entry */
+ struct list_head lru_entry;
+ /*
+ * The earliest recovery journal block containing uncommitted updates to the block map page
+ * associated with this page_info. A reference (lock) is held on that block to prevent it
+ * from being reaped. When this value changes, the reference on the old value must be
+ * released and a reference on the new value must be acquired.
+ */
+ sequence_number_t recovery_lock;
+};
+
+/*
+ * A completion awaiting a specific page. Also a live reference into the page once completed, until
+ * freed.
+ */
+struct vdo_page_completion {
+ /* The generic completion */
+ struct vdo_completion completion;
+ /* The cache involved */
+ struct vdo_page_cache *cache;
+ /* The waiter for the pending list */
+ struct vdo_waiter waiter;
+ /* The absolute physical block number of the page on disk */
+ physical_block_number_t pbn;
+ /* Whether the page may be modified */
+ bool writable;
+ /* Whether the page is available */
+ bool ready;
+ /* The info structure for the page, only valid when ready */
+ struct page_info *info;
+};
+
+struct forest;
+
+struct tree_page {
+ struct vdo_waiter waiter;
+
+ /* Dirty list entry */
+ struct list_head entry;
+
+ /* If dirty, the tree zone flush generation in which it was last dirtied. */
+ u8 generation;
+
+ /* Whether this page is an interior tree page being written out. */
+ bool writing;
+
+ /* If writing, the tree zone flush generation of the copy being written. */
+ u8 writing_generation;
+
+ /*
+ * Sequence number of the earliest recovery journal block containing uncommitted updates to
+ * this page
+ */
+ sequence_number_t recovery_lock;
+
+ /* The value of recovery_lock when the this page last started writing */
+ sequence_number_t writing_recovery_lock;
+
+ char page_buffer[VDO_BLOCK_SIZE];
+};
+
+enum block_map_page_type {
+ VDO_TREE_PAGE,
+ VDO_CACHE_PAGE,
+};
+
+typedef struct list_head dirty_era_t[2];
+
+struct dirty_lists {
+ /* The number of periods after which an element will be expired */
+ block_count_t maximum_age;
+ /* The oldest period which has unexpired elements */
+ sequence_number_t oldest_period;
+ /* One more than the current period */
+ sequence_number_t next_period;
+ /* The offset in the array of lists of the oldest period */
+ block_count_t offset;
+ /* Expired pages */
+ dirty_era_t expired;
+ /* The lists of dirty pages */
+ dirty_era_t eras[];
+};
+
+struct block_map_zone {
+ zone_count_t zone_number;
+ thread_id_t thread_id;
+ struct admin_state state;
+ struct block_map *block_map;
+ /* Dirty pages, by era*/
+ struct dirty_lists *dirty_lists;
+ struct vdo_page_cache page_cache;
+ data_vio_count_t active_lookups;
+ struct int_map *loading_pages;
+ struct vio_pool *vio_pool;
+ /* The tree page which has issued or will be issuing a flush */
+ struct tree_page *flusher;
+ struct vdo_wait_queue flush_waiters;
+ /* The generation after the most recent flush */
+ u8 generation;
+ u8 oldest_generation;
+ /* The counts of dirty pages in each generation */
+ u32 dirty_page_counts[256];
+};
+
+struct block_map {
+ struct vdo *vdo;
+ struct action_manager *action_manager;
+ /* The absolute PBN of the first root of the tree part of the block map */
+ physical_block_number_t root_origin;
+ block_count_t root_count;
+
+ /* The era point we are currently distributing to the zones */
+ sequence_number_t current_era_point;
+ /* The next era point */
+ sequence_number_t pending_era_point;
+
+ /* The number of entries in block map */
+ block_count_t entry_count;
+ nonce_t nonce;
+ struct recovery_journal *journal;
+
+ /* The trees for finding block map pages */
+ struct forest *forest;
+ /* The expanded trees awaiting growth */
+ struct forest *next_forest;
+ /* The number of entries after growth */
+ block_count_t next_entry_count;
+
+ zone_count_t zone_count;
+ struct block_map_zone zones[];
+};
+
+/**
+ * typedef vdo_entry_callback_fn - A function to be called for each allocated PBN when traversing
+ * the forest.
+ * @pbn: A PBN of a tree node.
+ * @completion: The parent completion of the traversal.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+typedef int (*vdo_entry_callback_fn)(physical_block_number_t pbn,
+ struct vdo_completion *completion);
+
+static inline struct vdo_page_completion *as_vdo_page_completion(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_PAGE_COMPLETION);
+ return container_of(completion, struct vdo_page_completion, completion);
+}
+
+void vdo_release_page_completion(struct vdo_completion *completion);
+
+void vdo_get_page(struct vdo_page_completion *page_completion,
+ struct block_map_zone *zone, physical_block_number_t pbn,
+ bool writable, void *parent, vdo_action_fn callback,
+ vdo_action_fn error_handler, bool requeue);
+
+void vdo_request_page_write(struct vdo_completion *completion);
+
+int __must_check vdo_get_cached_page(struct vdo_completion *completion,
+ struct block_map_page **page_ptr);
+
+int __must_check vdo_invalidate_page_cache(struct vdo_page_cache *cache);
+
+static inline struct block_map_page * __must_check
+vdo_as_block_map_page(struct tree_page *tree_page)
+{
+ return (struct block_map_page *) tree_page->page_buffer;
+}
+
+bool vdo_copy_valid_page(char *buffer, nonce_t nonce,
+ physical_block_number_t pbn,
+ struct block_map_page *page);
+
+void vdo_find_block_map_slot(struct data_vio *data_vio);
+
+physical_block_number_t vdo_find_block_map_page_pbn(struct block_map *map,
+ page_number_t page_number);
+
+void vdo_write_tree_page(struct tree_page *page, struct block_map_zone *zone);
+
+void vdo_traverse_forest(struct block_map *map, vdo_entry_callback_fn callback,
+ struct vdo_completion *completion);
+
+int __must_check vdo_decode_block_map(struct block_map_state_2_0 state,
+ block_count_t logical_blocks, struct vdo *vdo,
+ struct recovery_journal *journal, nonce_t nonce,
+ page_count_t cache_size, block_count_t maximum_age,
+ struct block_map **map_ptr);
+
+void vdo_drain_block_map(struct block_map *map, const struct admin_state_code *operation,
+ struct vdo_completion *parent);
+
+void vdo_resume_block_map(struct block_map *map, struct vdo_completion *parent);
+
+int __must_check vdo_prepare_to_grow_block_map(struct block_map *map,
+ block_count_t new_logical_blocks);
+
+void vdo_grow_block_map(struct block_map *map, struct vdo_completion *parent);
+
+void vdo_abandon_block_map_growth(struct block_map *map);
+
+void vdo_free_block_map(struct block_map *map);
+
+struct block_map_state_2_0 __must_check vdo_record_block_map(const struct block_map *map);
+
+void vdo_initialize_block_map_from_journal(struct block_map *map,
+ struct recovery_journal *journal);
+
+zone_count_t vdo_compute_logical_zone(struct data_vio *data_vio);
+
+void vdo_advance_block_map_era(struct block_map *map,
+ sequence_number_t recovery_block_number);
+
+void vdo_update_block_map_page(struct block_map_page *page, struct data_vio *data_vio,
+ physical_block_number_t pbn,
+ enum block_mapping_state mapping_state,
+ sequence_number_t *recovery_lock);
+
+void vdo_get_mapped_block(struct data_vio *data_vio);
+
+void vdo_put_mapped_block(struct data_vio *data_vio);
+
+struct block_map_statistics __must_check vdo_get_block_map_statistics(struct block_map *map);
+
+/**
+ * vdo_convert_maximum_age() - Convert the maximum age to reflect the new recovery journal format
+ * @age: The configured maximum age
+ *
+ * Return: The converted age
+ *
+ * In the old recovery journal format, each journal block held 311 entries, and every write bio
+ * made two entries. The old maximum age was half the usable journal length. In the new format,
+ * each block holds only 217 entries, but each bio only makes one entry. We convert the configured
+ * age so that the number of writes in a block map era is the same in the old and new formats. This
+ * keeps the bound on the amount of work required to recover the block map from the recovery
+ * journal the same across the format change. It also keeps the amortization of block map page
+ * writes to write bios the same.
+ */
+static inline block_count_t vdo_convert_maximum_age(block_count_t age)
+{
+ return DIV_ROUND_UP(age * RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK,
+ 2 * RECOVERY_JOURNAL_ENTRIES_PER_BLOCK);
+}
+
+#endif /* VDO_BLOCK_MAP_H */
diff --git a/drivers/md/dm-vdo/completion.c b/drivers/md/dm-vdo/completion.c
new file mode 100644
index 000000000000..5ad85334632d
--- /dev/null
+++ b/drivers/md/dm-vdo/completion.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "completion.h"
+
+#include <linux/kernel.h>
+
+#include "logger.h"
+#include "permassert.h"
+
+#include "status-codes.h"
+#include "types.h"
+#include "vio.h"
+#include "vdo.h"
+
+/**
+ * DOC: vdo completions.
+ *
+ * Most of vdo's data structures are lock free, each either belonging to a single "zone," or
+ * divided into a number of zones whose accesses to the structure do not overlap. During normal
+ * operation, at most one thread will be operating in any given zone. Each zone has a
+ * vdo_work_queue which holds vdo_completions that are to be run in that zone. A completion may
+ * only be enqueued on one queue or operating in a single zone at a time.
+ *
+ * At each step of a multi-threaded operation, the completion performing the operation is given a
+ * callback, error handler, and thread id for the next step. A completion is "run" when it is
+ * operating on the correct thread (as specified by its callback_thread_id). If the value of its
+ * "result" field is an error (i.e. not VDO_SUCCESS), the function in its "error_handler" will be
+ * invoked. If the error_handler is NULL, or there is no error, the function set as its "callback"
+ * will be invoked. Generally, a completion will not be run directly, but rather will be
+ * "launched." In this case, it will check whether it is operating on the correct thread. If it is,
+ * it will run immediately. Otherwise, it will be enqueue on the vdo_work_queue associated with the
+ * completion's "callback_thread_id". When it is dequeued, it will be on the correct thread, and
+ * will get run. In some cases, the completion should get queued instead of running immediately,
+ * even if it is being launched from the correct thread. This is usually in cases where there is a
+ * long chain of callbacks, all on the same thread, which could overflow the stack. In such cases,
+ * the completion's "requeue" field should be set to true. Doing so will skip the current thread
+ * check and simply enqueue the completion.
+ *
+ * A completion may be "finished," in which case its "complete" field will be set to true before it
+ * is next run. It is a bug to attempt to set the result or re-finish a finished completion.
+ * Because a completion's fields are not safe to examine from any thread other than the one on
+ * which the completion is currently operating, this field is used only to aid in detecting
+ * programming errors. It can not be used for cross-thread checking on the status of an operation.
+ * A completion must be "reset" before it can be reused after it has been finished. Resetting will
+ * also clear any error from the result field.
+ **/
+
+void vdo_initialize_completion(struct vdo_completion *completion,
+ struct vdo *vdo,
+ enum vdo_completion_type type)
+{
+ memset(completion, 0, sizeof(*completion));
+ completion->vdo = vdo;
+ completion->type = type;
+ vdo_reset_completion(completion);
+}
+
+static inline void assert_incomplete(struct vdo_completion *completion)
+{
+ VDO_ASSERT_LOG_ONLY(!completion->complete, "completion is not complete");
+}
+
+/**
+ * vdo_set_completion_result() - Set the result of a completion.
+ *
+ * Older errors will not be masked.
+ */
+void vdo_set_completion_result(struct vdo_completion *completion, int result)
+{
+ assert_incomplete(completion);
+ if (completion->result == VDO_SUCCESS)
+ completion->result = result;
+}
+
+/**
+ * vdo_launch_completion_with_priority() - Run or enqueue a completion.
+ * @priority: The priority at which to enqueue the completion.
+ *
+ * If called on the correct thread (i.e. the one specified in the completion's callback_thread_id
+ * field) and not marked for requeue, the completion will be run immediately. Otherwise, the
+ * completion will be enqueued on the specified thread.
+ */
+void vdo_launch_completion_with_priority(struct vdo_completion *completion,
+ enum vdo_completion_priority priority)
+{
+ thread_id_t callback_thread = completion->callback_thread_id;
+
+ if (completion->requeue || (callback_thread != vdo_get_callback_thread_id())) {
+ vdo_enqueue_completion(completion, priority);
+ return;
+ }
+
+ vdo_run_completion(completion);
+}
+
+/** vdo_finish_completion() - Mark a completion as complete and then launch it. */
+void vdo_finish_completion(struct vdo_completion *completion)
+{
+ assert_incomplete(completion);
+ completion->complete = true;
+ if (completion->callback != NULL)
+ vdo_launch_completion(completion);
+}
+
+void vdo_enqueue_completion(struct vdo_completion *completion,
+ enum vdo_completion_priority priority)
+{
+ struct vdo *vdo = completion->vdo;
+ thread_id_t thread_id = completion->callback_thread_id;
+
+ if (VDO_ASSERT(thread_id < vdo->thread_config.thread_count,
+ "thread_id %u (completion type %d) is less than thread count %u",
+ thread_id, completion->type,
+ vdo->thread_config.thread_count) != VDO_SUCCESS)
+ BUG();
+
+ completion->requeue = false;
+ completion->priority = priority;
+ completion->my_queue = NULL;
+ vdo_enqueue_work_queue(vdo->threads[thread_id].queue, completion);
+}
+
+/**
+ * vdo_requeue_completion_if_needed() - Requeue a completion if not called on the specified thread.
+ *
+ * Return: True if the completion was requeued; callers may not access the completion in this case.
+ */
+bool vdo_requeue_completion_if_needed(struct vdo_completion *completion,
+ thread_id_t callback_thread_id)
+{
+ if (vdo_get_callback_thread_id() == callback_thread_id)
+ return false;
+
+ completion->callback_thread_id = callback_thread_id;
+ vdo_enqueue_completion(completion, VDO_WORK_Q_DEFAULT_PRIORITY);
+ return true;
+}
diff --git a/drivers/md/dm-vdo/completion.h b/drivers/md/dm-vdo/completion.h
new file mode 100644
index 000000000000..3407f34ce58c
--- /dev/null
+++ b/drivers/md/dm-vdo/completion.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_COMPLETION_H
+#define VDO_COMPLETION_H
+
+#include "permassert.h"
+
+#include "status-codes.h"
+#include "types.h"
+
+/**
+ * vdo_run_completion() - Run a completion's callback or error handler on the current thread.
+ *
+ * Context: This function must be called from the correct callback thread.
+ */
+static inline void vdo_run_completion(struct vdo_completion *completion)
+{
+ if ((completion->result != VDO_SUCCESS) && (completion->error_handler != NULL)) {
+ completion->error_handler(completion);
+ return;
+ }
+
+ completion->callback(completion);
+}
+
+void vdo_set_completion_result(struct vdo_completion *completion, int result);
+
+void vdo_initialize_completion(struct vdo_completion *completion, struct vdo *vdo,
+ enum vdo_completion_type type);
+
+/**
+ * vdo_reset_completion() - Reset a completion to a clean state, while keeping the type, vdo and
+ * parent information.
+ */
+static inline void vdo_reset_completion(struct vdo_completion *completion)
+{
+ completion->result = VDO_SUCCESS;
+ completion->complete = false;
+}
+
+void vdo_launch_completion_with_priority(struct vdo_completion *completion,
+ enum vdo_completion_priority priority);
+
+/**
+ * vdo_launch_completion() - Launch a completion with default priority.
+ */
+static inline void vdo_launch_completion(struct vdo_completion *completion)
+{
+ vdo_launch_completion_with_priority(completion, VDO_WORK_Q_DEFAULT_PRIORITY);
+}
+
+/**
+ * vdo_continue_completion() - Continue processing a completion.
+ * @result: The current result (will not mask older errors).
+ *
+ * Continue processing a completion by setting the current result and calling
+ * vdo_launch_completion().
+ */
+static inline void vdo_continue_completion(struct vdo_completion *completion, int result)
+{
+ vdo_set_completion_result(completion, result);
+ vdo_launch_completion(completion);
+}
+
+void vdo_finish_completion(struct vdo_completion *completion);
+
+/**
+ * vdo_fail_completion() - Set the result of a completion if it does not already have an error,
+ * then finish it.
+ */
+static inline void vdo_fail_completion(struct vdo_completion *completion, int result)
+{
+ vdo_set_completion_result(completion, result);
+ vdo_finish_completion(completion);
+}
+
+/**
+ * vdo_assert_completion_type() - Assert that a completion is of the correct type.
+ *
+ * Return: VDO_SUCCESS or an error
+ */
+static inline int vdo_assert_completion_type(struct vdo_completion *completion,
+ enum vdo_completion_type expected)
+{
+ return VDO_ASSERT(expected == completion->type,
+ "completion type should be %u, not %u", expected,
+ completion->type);
+}
+
+static inline void vdo_set_completion_callback(struct vdo_completion *completion,
+ vdo_action_fn callback,
+ thread_id_t callback_thread_id)
+{
+ completion->callback = callback;
+ completion->callback_thread_id = callback_thread_id;
+}
+
+/**
+ * vdo_launch_completion_callback() - Set the callback for a completion and launch it immediately.
+ */
+static inline void vdo_launch_completion_callback(struct vdo_completion *completion,
+ vdo_action_fn callback,
+ thread_id_t callback_thread_id)
+{
+ vdo_set_completion_callback(completion, callback, callback_thread_id);
+ vdo_launch_completion(completion);
+}
+
+/**
+ * vdo_prepare_completion() - Prepare a completion for launch.
+ *
+ * Resets the completion, and then sets its callback, error handler, callback thread, and parent.
+ */
+static inline void vdo_prepare_completion(struct vdo_completion *completion,
+ vdo_action_fn callback,
+ vdo_action_fn error_handler,
+ thread_id_t callback_thread_id, void *parent)
+{
+ vdo_reset_completion(completion);
+ vdo_set_completion_callback(completion, callback, callback_thread_id);
+ completion->error_handler = error_handler;
+ completion->parent = parent;
+}
+
+/**
+ * vdo_prepare_completion_for_requeue() - Prepare a completion for launch ensuring that it will
+ * always be requeued.
+ *
+ * Resets the completion, and then sets its callback, error handler, callback thread, and parent.
+ */
+static inline void vdo_prepare_completion_for_requeue(struct vdo_completion *completion,
+ vdo_action_fn callback,
+ vdo_action_fn error_handler,
+ thread_id_t callback_thread_id,
+ void *parent)
+{
+ vdo_prepare_completion(completion, callback, error_handler,
+ callback_thread_id, parent);
+ completion->requeue = true;
+}
+
+void vdo_enqueue_completion(struct vdo_completion *completion,
+ enum vdo_completion_priority priority);
+
+
+bool vdo_requeue_completion_if_needed(struct vdo_completion *completion,
+ thread_id_t callback_thread_id);
+
+#endif /* VDO_COMPLETION_H */
diff --git a/drivers/md/dm-vdo/constants.h b/drivers/md/dm-vdo/constants.h
new file mode 100644
index 000000000000..a8c4d6e24b38
--- /dev/null
+++ b/drivers/md/dm-vdo/constants.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_CONSTANTS_H
+#define VDO_CONSTANTS_H
+
+#include <linux/blkdev.h>
+
+#include "types.h"
+
+enum {
+ /*
+ * The maximum number of contiguous PBNs which will go to a single bio submission queue,
+ * assuming there is more than one queue.
+ */
+ VDO_BIO_ROTATION_INTERVAL_LIMIT = 1024,
+
+ /* The number of entries on a block map page */
+ VDO_BLOCK_MAP_ENTRIES_PER_PAGE = 812,
+
+ /* The origin of the flat portion of the block map */
+ VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN = 1,
+
+ /*
+ * The height of a block map tree. Assuming a root count of 60 and 812 entries per page,
+ * this is big enough to represent almost 95 PB of logical space.
+ */
+ VDO_BLOCK_MAP_TREE_HEIGHT = 5,
+
+ /* The default number of bio submission queues. */
+ DEFAULT_VDO_BIO_SUBMIT_QUEUE_COUNT = 4,
+
+ /* The number of contiguous PBNs to be submitted to a single bio queue. */
+ DEFAULT_VDO_BIO_SUBMIT_QUEUE_ROTATE_INTERVAL = 64,
+
+ /* The number of trees in the arboreal block map */
+ DEFAULT_VDO_BLOCK_MAP_TREE_ROOT_COUNT = 60,
+
+ /* The default size of the recovery journal, in blocks */
+ DEFAULT_VDO_RECOVERY_JOURNAL_SIZE = 32 * 1024,
+
+ /* The default size of each slab journal, in blocks */
+ DEFAULT_VDO_SLAB_JOURNAL_SIZE = 224,
+
+ /* Unit test minimum */
+ MINIMUM_VDO_SLAB_JOURNAL_BLOCKS = 2,
+
+ /*
+ * The initial size of lbn_operations and pbn_operations, which is based upon the expected
+ * maximum number of outstanding VIOs. This value was chosen to make it highly unlikely
+ * that the maps would need to be resized.
+ */
+ VDO_LOCK_MAP_CAPACITY = 10000,
+
+ /* The maximum number of logical zones */
+ MAX_VDO_LOGICAL_ZONES = 60,
+
+ /* The maximum number of physical zones */
+ MAX_VDO_PHYSICAL_ZONES = 16,
+
+ /* The base-2 logarithm of the maximum blocks in one slab */
+ MAX_VDO_SLAB_BITS = 23,
+
+ /* The maximum number of slabs the slab depot supports */
+ MAX_VDO_SLABS = 8192,
+
+ /*
+ * The maximum number of block map pages to load simultaneously during recovery or rebuild.
+ */
+ MAXIMUM_SIMULTANEOUS_VDO_BLOCK_MAP_RESTORATION_READS = 1024,
+
+ /* The maximum number of entries in the slab summary */
+ MAXIMUM_VDO_SLAB_SUMMARY_ENTRIES = MAX_VDO_SLABS * MAX_VDO_PHYSICAL_ZONES,
+
+ /* The maximum number of total threads in a VDO thread configuration. */
+ MAXIMUM_VDO_THREADS = 100,
+
+ /* The maximum number of VIOs in the system at once */
+ MAXIMUM_VDO_USER_VIOS = 2048,
+
+ /* The only physical block size supported by VDO */
+ VDO_BLOCK_SIZE = 4096,
+
+ /* The number of sectors per block */
+ VDO_SECTORS_PER_BLOCK = (VDO_BLOCK_SIZE >> SECTOR_SHIFT),
+
+ /* The size of a sector that will not be torn */
+ VDO_SECTOR_SIZE = 512,
+
+ /* The physical block number reserved for storing the zero block */
+ VDO_ZERO_BLOCK = 0,
+};
+
+#endif /* VDO_CONSTANTS_H */
diff --git a/drivers/md/dm-vdo/cpu.h b/drivers/md/dm-vdo/cpu.h
new file mode 100644
index 000000000000..d6a2615ba657
--- /dev/null
+++ b/drivers/md/dm-vdo/cpu.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_CPU_H
+#define UDS_CPU_H
+
+#include <linux/cache.h>
+
+/**
+ * uds_prefetch_address() - Minimize cache-miss latency by attempting to move data into a CPU cache
+ * before it is accessed.
+ *
+ * @address: the address to fetch (may be invalid)
+ * @for_write: must be constant at compile time--false if for reading, true if for writing
+ */
+static inline void uds_prefetch_address(const void *address, bool for_write)
+{
+ /*
+ * for_write won't be a constant if we are compiled with optimization turned off, in which
+ * case prefetching really doesn't matter. clang can't figure out that if for_write is a
+ * constant, it can be passed as the second, mandatorily constant argument to prefetch(),
+ * at least currently on llvm 12.
+ */
+ if (__builtin_constant_p(for_write)) {
+ if (for_write)
+ __builtin_prefetch(address, true);
+ else
+ __builtin_prefetch(address, false);
+ }
+}
+
+/**
+ * uds_prefetch_range() - Minimize cache-miss latency by attempting to move a range of addresses
+ * into a CPU cache before they are accessed.
+ *
+ * @start: the starting address to fetch (may be invalid)
+ * @size: the number of bytes in the address range
+ * @for_write: must be constant at compile time--false if for reading, true if for writing
+ */
+static inline void uds_prefetch_range(const void *start, unsigned int size,
+ bool for_write)
+{
+ /*
+ * Count the number of cache lines to fetch, allowing for the address range to span an
+ * extra cache line boundary due to address alignment.
+ */
+ const char *address = (const char *) start;
+ unsigned int offset = ((uintptr_t) address % L1_CACHE_BYTES);
+ unsigned int cache_lines = (1 + ((size + offset) / L1_CACHE_BYTES));
+
+ while (cache_lines-- > 0) {
+ uds_prefetch_address(address, for_write);
+ address += L1_CACHE_BYTES;
+ }
+}
+
+#endif /* UDS_CPU_H */
diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c
new file mode 100644
index 000000000000..94f6f1ccfb7d
--- /dev/null
+++ b/drivers/md/dm-vdo/data-vio.c
@@ -0,0 +1,2063 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "data-vio.h"
+
+#include <linux/atomic.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device-mapper.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lz4.h>
+#include <linux/minmax.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "murmurhash3.h"
+#include "permassert.h"
+
+#include "block-map.h"
+#include "dump.h"
+#include "encodings.h"
+#include "int-map.h"
+#include "io-submitter.h"
+#include "logical-zone.h"
+#include "packer.h"
+#include "recovery-journal.h"
+#include "slab-depot.h"
+#include "status-codes.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+/**
+ * DOC: Bio flags.
+ *
+ * For certain flags set on user bios, if the user bio has not yet been acknowledged, setting those
+ * flags on our own bio(s) for that request may help underlying layers better fulfill the user
+ * bio's needs. This constant contains the aggregate of those flags; VDO strips all the other
+ * flags, as they convey incorrect information.
+ *
+ * These flags are always irrelevant if we have already finished the user bio as they are only
+ * hints on IO importance. If VDO has finished the user bio, any remaining IO done doesn't care how
+ * important finishing the finished bio was.
+ *
+ * Note that bio.c contains the complete list of flags we believe may be set; the following list
+ * explains the action taken with each of those flags VDO could receive:
+ *
+ * * REQ_SYNC: Passed down if the user bio is not yet completed, since it indicates the user bio
+ * completion is required for further work to be done by the issuer.
+ * * REQ_META: Passed down if the user bio is not yet completed, since it may mean the lower layer
+ * treats it as more urgent, similar to REQ_SYNC.
+ * * REQ_PRIO: Passed down if the user bio is not yet completed, since it indicates the user bio is
+ * important.
+ * * REQ_NOMERGE: Set only if the incoming bio was split; irrelevant to VDO IO.
+ * * REQ_IDLE: Set if the incoming bio had more IO quickly following; VDO's IO pattern doesn't
+ * match incoming IO, so this flag is incorrect for it.
+ * * REQ_FUA: Handled separately, and irrelevant to VDO IO otherwise.
+ * * REQ_RAHEAD: Passed down, as, for reads, it indicates trivial importance.
+ * * REQ_BACKGROUND: Not passed down, as VIOs are a limited resource and VDO needs them recycled
+ * ASAP to service heavy load, which is the only place where REQ_BACKGROUND might aid in load
+ * prioritization.
+ */
+static blk_opf_t PASSTHROUGH_FLAGS = (REQ_PRIO | REQ_META | REQ_SYNC | REQ_RAHEAD);
+
+/**
+ * DOC:
+ *
+ * The data_vio_pool maintains the pool of data_vios which a vdo uses to service incoming bios. For
+ * correctness, and in order to avoid potentially expensive or blocking memory allocations during
+ * normal operation, the number of concurrently active data_vios is capped. Furthermore, in order
+ * to avoid starvation of reads and writes, at most 75% of the data_vios may be used for
+ * discards. The data_vio_pool is responsible for enforcing these limits. Threads submitting bios
+ * for which a data_vio or discard permit are not available will block until the necessary
+ * resources are available. The pool is also responsible for distributing resources to blocked
+ * threads and waking them. Finally, the pool attempts to batch the work of recycling data_vios by
+ * performing the work of actually assigning resources to blocked threads or placing data_vios back
+ * into the pool on a single cpu at a time.
+ *
+ * The pool contains two "limiters", one for tracking data_vios and one for tracking discard
+ * permits. The limiters also provide safe cross-thread access to pool statistics without the need
+ * to take the pool's lock. When a thread submits a bio to a vdo device, it will first attempt to
+ * get a discard permit if it is a discard, and then to get a data_vio. If the necessary resources
+ * are available, the incoming bio will be assigned to the acquired data_vio, and it will be
+ * launched. However, if either of these are unavailable, the arrival time of the bio is recorded
+ * in the bio's bi_private field, the bio and its submitter are both queued on the appropriate
+ * limiter and the submitting thread will then put itself to sleep. (note that this mechanism will
+ * break if jiffies are only 32 bits.)
+ *
+ * Whenever a data_vio has completed processing for the bio it was servicing, release_data_vio()
+ * will be called on it. This function will add the data_vio to a funnel queue, and then check the
+ * state of the pool. If the pool is not currently processing released data_vios, the pool's
+ * completion will be enqueued on a cpu queue. This obviates the need for the releasing threads to
+ * hold the pool's lock, and also batches release work while avoiding starvation of the cpu
+ * threads.
+ *
+ * Whenever the pool's completion is run on a cpu thread, it calls process_release_callback() which
+ * processes a batch of returned data_vios (currently at most 32) from the pool's funnel queue. For
+ * each data_vio, it first checks whether that data_vio was processing a discard. If so, and there
+ * is a blocked bio waiting for a discard permit, that permit is notionally transferred to the
+ * eldest discard waiter, and that waiter is moved to the end of the list of discard bios waiting
+ * for a data_vio. If there are no discard waiters, the discard permit is returned to the pool.
+ * Next, the data_vio is assigned to the oldest blocked bio which either has a discard permit, or
+ * doesn't need one and relaunched. If neither of these exist, the data_vio is returned to the
+ * pool. Finally, if any waiting bios were launched, the threads which blocked trying to submit
+ * them are awakened.
+ */
+
+#define DATA_VIO_RELEASE_BATCH_SIZE 128
+
+static const unsigned int VDO_SECTORS_PER_BLOCK_MASK = VDO_SECTORS_PER_BLOCK - 1;
+static const u32 COMPRESSION_STATUS_MASK = 0xff;
+static const u32 MAY_NOT_COMPRESS_MASK = 0x80000000;
+
+struct limiter;
+typedef void (*assigner_fn)(struct limiter *limiter);
+
+/* Bookkeeping structure for a single type of resource. */
+struct limiter {
+ /* The data_vio_pool to which this limiter belongs */
+ struct data_vio_pool *pool;
+ /* The maximum number of data_vios available */
+ data_vio_count_t limit;
+ /* The number of resources in use */
+ data_vio_count_t busy;
+ /* The maximum number of resources ever simultaneously in use */
+ data_vio_count_t max_busy;
+ /* The number of resources to release */
+ data_vio_count_t release_count;
+ /* The number of waiters to wake */
+ data_vio_count_t wake_count;
+ /* The list of waiting bios which are known to process_release_callback() */
+ struct bio_list waiters;
+ /* The list of waiting bios which are not yet known to process_release_callback() */
+ struct bio_list new_waiters;
+ /* The list of waiters which have their permits */
+ struct bio_list *permitted_waiters;
+ /* The function for assigning a resource to a waiter */
+ assigner_fn assigner;
+ /* The queue of blocked threads */
+ wait_queue_head_t blocked_threads;
+ /* The arrival time of the eldest waiter */
+ u64 arrival;
+};
+
+/*
+ * A data_vio_pool is a collection of preallocated data_vios which may be acquired from any thread,
+ * and are released in batches.
+ */
+struct data_vio_pool {
+ /* Completion for scheduling releases */
+ struct vdo_completion completion;
+ /* The administrative state of the pool */
+ struct admin_state state;
+ /* Lock protecting the pool */
+ spinlock_t lock;
+ /* The main limiter controlling the total data_vios in the pool. */
+ struct limiter limiter;
+ /* The limiter controlling data_vios for discard */
+ struct limiter discard_limiter;
+ /* The list of bios which have discard permits but still need a data_vio */
+ struct bio_list permitted_discards;
+ /* The list of available data_vios */
+ struct list_head available;
+ /* The queue of data_vios waiting to be returned to the pool */
+ struct funnel_queue *queue;
+ /* Whether the pool is processing, or scheduled to process releases */
+ atomic_t processing;
+ /* The data vios in the pool */
+ struct data_vio data_vios[];
+};
+
+static const char * const ASYNC_OPERATION_NAMES[] = {
+ "launch",
+ "acknowledge_write",
+ "acquire_hash_lock",
+ "attempt_logical_block_lock",
+ "lock_duplicate_pbn",
+ "check_for_duplication",
+ "cleanup",
+ "compress_data_vio",
+ "find_block_map_slot",
+ "get_mapped_block_for_read",
+ "get_mapped_block_for_write",
+ "hash_data_vio",
+ "journal_remapping",
+ "vdo_attempt_packing",
+ "put_mapped_block",
+ "read_data_vio",
+ "update_dedupe_index",
+ "update_reference_counts",
+ "verify_duplication",
+ "write_data_vio",
+};
+
+/* The steps taken cleaning up a VIO, in the order they are performed. */
+enum data_vio_cleanup_stage {
+ VIO_CLEANUP_START,
+ VIO_RELEASE_HASH_LOCK = VIO_CLEANUP_START,
+ VIO_RELEASE_ALLOCATED,
+ VIO_RELEASE_RECOVERY_LOCKS,
+ VIO_RELEASE_LOGICAL,
+ VIO_CLEANUP_DONE
+};
+
+static inline struct data_vio_pool * __must_check
+as_data_vio_pool(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_DATA_VIO_POOL_COMPLETION);
+ return container_of(completion, struct data_vio_pool, completion);
+}
+
+static inline u64 get_arrival_time(struct bio *bio)
+{
+ return (u64) bio->bi_private;
+}
+
+/**
+ * check_for_drain_complete_locked() - Check whether a data_vio_pool has no outstanding data_vios
+ * or waiters while holding the pool's lock.
+ */
+static bool check_for_drain_complete_locked(struct data_vio_pool *pool)
+{
+ if (pool->limiter.busy > 0)
+ return false;
+
+ VDO_ASSERT_LOG_ONLY((pool->discard_limiter.busy == 0),
+ "no outstanding discard permits");
+
+ return (bio_list_empty(&pool->limiter.new_waiters) &&
+ bio_list_empty(&pool->discard_limiter.new_waiters));
+}
+
+static void initialize_lbn_lock(struct data_vio *data_vio, logical_block_number_t lbn)
+{
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+ zone_count_t zone_number;
+ struct lbn_lock *lock = &data_vio->logical;
+
+ lock->lbn = lbn;
+ lock->locked = false;
+ vdo_waitq_init(&lock->waiters);
+ zone_number = vdo_compute_logical_zone(data_vio);
+ lock->zone = &vdo->logical_zones->zones[zone_number];
+}
+
+static void launch_locked_request(struct data_vio *data_vio)
+{
+ data_vio->logical.locked = true;
+ if (data_vio->write) {
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+
+ if (vdo_is_read_only(vdo)) {
+ continue_data_vio_with_error(data_vio, VDO_READ_ONLY);
+ return;
+ }
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_FIND_BLOCK_MAP_SLOT;
+ vdo_find_block_map_slot(data_vio);
+}
+
+static void acknowledge_data_vio(struct data_vio *data_vio)
+{
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+ struct bio *bio = data_vio->user_bio;
+ int error = vdo_status_to_errno(data_vio->vio.completion.result);
+
+ if (bio == NULL)
+ return;
+
+ VDO_ASSERT_LOG_ONLY((data_vio->remaining_discard <=
+ (u32) (VDO_BLOCK_SIZE - data_vio->offset)),
+ "data_vio to acknowledge is not an incomplete discard");
+
+ data_vio->user_bio = NULL;
+ vdo_count_bios(&vdo->stats.bios_acknowledged, bio);
+ if (data_vio->is_partial)
+ vdo_count_bios(&vdo->stats.bios_acknowledged_partial, bio);
+
+ bio->bi_status = errno_to_blk_status(error);
+ bio_endio(bio);
+}
+
+static void copy_to_bio(struct bio *bio, char *data_ptr)
+{
+ struct bio_vec biovec;
+ struct bvec_iter iter;
+
+ bio_for_each_segment(biovec, bio, iter) {
+ memcpy_to_bvec(&biovec, data_ptr);
+ data_ptr += biovec.bv_len;
+ }
+}
+
+struct data_vio_compression_status get_data_vio_compression_status(struct data_vio *data_vio)
+{
+ u32 packed = atomic_read(&data_vio->compression.status);
+
+ /* pairs with cmpxchg in set_data_vio_compression_status */
+ smp_rmb();
+ return (struct data_vio_compression_status) {
+ .stage = packed & COMPRESSION_STATUS_MASK,
+ .may_not_compress = ((packed & MAY_NOT_COMPRESS_MASK) != 0),
+ };
+}
+
+/**
+ * pack_status() - Convert a data_vio_compression_status into a u32 which may be stored
+ * atomically.
+ * @status: The state to convert.
+ *
+ * Return: The compression state packed into a u32.
+ */
+static u32 __must_check pack_status(struct data_vio_compression_status status)
+{
+ return status.stage | (status.may_not_compress ? MAY_NOT_COMPRESS_MASK : 0);
+}
+
+/**
+ * set_data_vio_compression_status() - Set the compression status of a data_vio.
+ * @state: The expected current status of the data_vio.
+ * @new_state: The status to set.
+ *
+ * Return: true if the new status was set, false if the data_vio's compression status did not
+ * match the expected state, and so was left unchanged.
+ */
+static bool __must_check
+set_data_vio_compression_status(struct data_vio *data_vio,
+ struct data_vio_compression_status status,
+ struct data_vio_compression_status new_status)
+{
+ u32 actual;
+ u32 expected = pack_status(status);
+ u32 replacement = pack_status(new_status);
+
+ /*
+ * Extra barriers because this was original developed using a CAS operation that implicitly
+ * had them.
+ */
+ smp_mb__before_atomic();
+ actual = atomic_cmpxchg(&data_vio->compression.status, expected, replacement);
+ /* same as before_atomic */
+ smp_mb__after_atomic();
+ return (expected == actual);
+}
+
+struct data_vio_compression_status advance_data_vio_compression_stage(struct data_vio *data_vio)
+{
+ for (;;) {
+ struct data_vio_compression_status status =
+ get_data_vio_compression_status(data_vio);
+ struct data_vio_compression_status new_status = status;
+
+ if (status.stage == DATA_VIO_POST_PACKER) {
+ /* We're already in the last stage. */
+ return status;
+ }
+
+ if (status.may_not_compress) {
+ /*
+ * Compression has been dis-allowed for this VIO, so skip the rest of the
+ * path and go to the end.
+ */
+ new_status.stage = DATA_VIO_POST_PACKER;
+ } else {
+ /* Go to the next state. */
+ new_status.stage++;
+ }
+
+ if (set_data_vio_compression_status(data_vio, status, new_status))
+ return new_status;
+
+ /* Another thread changed the status out from under us so try again. */
+ }
+}
+
+/**
+ * cancel_data_vio_compression() - Prevent this data_vio from being compressed or packed.
+ *
+ * Return: true if the data_vio is in the packer and the caller was the first caller to cancel it.
+ */
+bool cancel_data_vio_compression(struct data_vio *data_vio)
+{
+ struct data_vio_compression_status status, new_status;
+
+ for (;;) {
+ status = get_data_vio_compression_status(data_vio);
+ if (status.may_not_compress || (status.stage == DATA_VIO_POST_PACKER)) {
+ /* This data_vio is already set up to not block in the packer. */
+ break;
+ }
+
+ new_status.stage = status.stage;
+ new_status.may_not_compress = true;
+
+ if (set_data_vio_compression_status(data_vio, status, new_status))
+ break;
+ }
+
+ return ((status.stage == DATA_VIO_PACKING) && !status.may_not_compress);
+}
+
+/**
+ * attempt_logical_block_lock() - Attempt to acquire the lock on a logical block.
+ * @completion: The data_vio for an external data request as a completion.
+ *
+ * This is the start of the path for all external requests. It is registered in launch_data_vio().
+ */
+static void attempt_logical_block_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct lbn_lock *lock = &data_vio->logical;
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+ struct data_vio *lock_holder;
+ int result;
+
+ assert_data_vio_in_logical_zone(data_vio);
+
+ if (data_vio->logical.lbn >= vdo->states.vdo.config.logical_blocks) {
+ continue_data_vio_with_error(data_vio, VDO_OUT_OF_RANGE);
+ return;
+ }
+
+ result = vdo_int_map_put(lock->zone->lbn_operations, lock->lbn,
+ data_vio, false, (void **) &lock_holder);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ if (lock_holder == NULL) {
+ /* We got the lock */
+ launch_locked_request(data_vio);
+ return;
+ }
+
+ result = VDO_ASSERT(lock_holder->logical.locked, "logical block lock held");
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ /*
+ * If the new request is a pure read request (not read-modify-write) and the lock_holder is
+ * writing and has received an allocation, service the read request immediately by copying
+ * data from the lock_holder to avoid having to flush the write out of the packer just to
+ * prevent the read from waiting indefinitely. If the lock_holder does not yet have an
+ * allocation, prevent it from blocking in the packer and wait on it. This is necessary in
+ * order to prevent returning data that may not have actually been written.
+ */
+ if (!data_vio->write && READ_ONCE(lock_holder->allocation_succeeded)) {
+ copy_to_bio(data_vio->user_bio, lock_holder->vio.data + data_vio->offset);
+ acknowledge_data_vio(data_vio);
+ complete_data_vio(completion);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_ATTEMPT_LOGICAL_BLOCK_LOCK;
+ vdo_waitq_enqueue_waiter(&lock_holder->logical.waiters, &data_vio->waiter);
+
+ /*
+ * Prevent writes and read-modify-writes from blocking indefinitely on lock holders in the
+ * packer.
+ */
+ if (lock_holder->write && cancel_data_vio_compression(lock_holder)) {
+ data_vio->compression.lock_holder = lock_holder;
+ launch_data_vio_packer_callback(data_vio,
+ vdo_remove_lock_holder_from_packer);
+ }
+}
+
+/**
+ * launch_data_vio() - (Re)initialize a data_vio to have a new logical block number, keeping the
+ * same parent and other state and send it on its way.
+ */
+static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lbn)
+{
+ struct vdo_completion *completion = &data_vio->vio.completion;
+
+ /*
+ * Clearing the tree lock must happen before initializing the LBN lock, which also adds
+ * information to the tree lock.
+ */
+ memset(&data_vio->tree_lock, 0, sizeof(data_vio->tree_lock));
+ initialize_lbn_lock(data_vio, lbn);
+ INIT_LIST_HEAD(&data_vio->hash_lock_entry);
+ INIT_LIST_HEAD(&data_vio->write_entry);
+
+ memset(&data_vio->allocation, 0, sizeof(data_vio->allocation));
+
+ data_vio->is_duplicate = false;
+
+ memset(&data_vio->record_name, 0, sizeof(data_vio->record_name));
+ memset(&data_vio->duplicate, 0, sizeof(data_vio->duplicate));
+ vdo_reset_completion(completion);
+ completion->error_handler = handle_data_vio_error;
+ set_data_vio_logical_callback(data_vio, attempt_logical_block_lock);
+ vdo_enqueue_completion(completion, VDO_DEFAULT_Q_MAP_BIO_PRIORITY);
+}
+
+static bool is_zero_block(char *block)
+{
+ int i;
+
+ for (i = 0; i < VDO_BLOCK_SIZE; i += sizeof(u64)) {
+ if (*((u64 *) &block[i]))
+ return false;
+ }
+
+ return true;
+}
+
+static void copy_from_bio(struct bio *bio, char *data_ptr)
+{
+ struct bio_vec biovec;
+ struct bvec_iter iter;
+
+ bio_for_each_segment(biovec, bio, iter) {
+ memcpy_from_bvec(data_ptr, &biovec);
+ data_ptr += biovec.bv_len;
+ }
+}
+
+static void launch_bio(struct vdo *vdo, struct data_vio *data_vio, struct bio *bio)
+{
+ logical_block_number_t lbn;
+ /*
+ * Zero out the fields which don't need to be preserved (i.e. which are not pointers to
+ * separately allocated objects).
+ */
+ memset(data_vio, 0, offsetof(struct data_vio, vio));
+ memset(&data_vio->compression, 0, offsetof(struct compression_state, block));
+
+ data_vio->user_bio = bio;
+ data_vio->offset = to_bytes(bio->bi_iter.bi_sector & VDO_SECTORS_PER_BLOCK_MASK);
+ data_vio->is_partial = (bio->bi_iter.bi_size < VDO_BLOCK_SIZE) || (data_vio->offset != 0);
+
+ /*
+ * Discards behave very differently than other requests when coming in from device-mapper.
+ * We have to be able to handle any size discards and various sector offsets within a
+ * block.
+ */
+ if (bio_op(bio) == REQ_OP_DISCARD) {
+ data_vio->remaining_discard = bio->bi_iter.bi_size;
+ data_vio->write = true;
+ data_vio->is_discard = true;
+ if (data_vio->is_partial) {
+ vdo_count_bios(&vdo->stats.bios_in_partial, bio);
+ data_vio->read = true;
+ }
+ } else if (data_vio->is_partial) {
+ vdo_count_bios(&vdo->stats.bios_in_partial, bio);
+ data_vio->read = true;
+ if (bio_data_dir(bio) == WRITE)
+ data_vio->write = true;
+ } else if (bio_data_dir(bio) == READ) {
+ data_vio->read = true;
+ } else {
+ /*
+ * Copy the bio data to a char array so that we can continue to use the data after
+ * we acknowledge the bio.
+ */
+ copy_from_bio(bio, data_vio->vio.data);
+ data_vio->is_zero = is_zero_block(data_vio->vio.data);
+ data_vio->write = true;
+ }
+
+ if (data_vio->user_bio->bi_opf & REQ_FUA)
+ data_vio->fua = true;
+
+ lbn = (bio->bi_iter.bi_sector - vdo->starting_sector_offset) / VDO_SECTORS_PER_BLOCK;
+ launch_data_vio(data_vio, lbn);
+}
+
+static void assign_data_vio(struct limiter *limiter, struct data_vio *data_vio)
+{
+ struct bio *bio = bio_list_pop(limiter->permitted_waiters);
+
+ launch_bio(limiter->pool->completion.vdo, data_vio, bio);
+ limiter->wake_count++;
+
+ bio = bio_list_peek(limiter->permitted_waiters);
+ limiter->arrival = ((bio == NULL) ? U64_MAX : get_arrival_time(bio));
+}
+
+static void assign_discard_permit(struct limiter *limiter)
+{
+ struct bio *bio = bio_list_pop(&limiter->waiters);
+
+ if (limiter->arrival == U64_MAX)
+ limiter->arrival = get_arrival_time(bio);
+
+ bio_list_add(limiter->permitted_waiters, bio);
+}
+
+static void get_waiters(struct limiter *limiter)
+{
+ bio_list_merge(&limiter->waiters, &limiter->new_waiters);
+ bio_list_init(&limiter->new_waiters);
+}
+
+static inline struct data_vio *get_available_data_vio(struct data_vio_pool *pool)
+{
+ struct data_vio *data_vio =
+ list_first_entry(&pool->available, struct data_vio, pool_entry);
+
+ list_del_init(&data_vio->pool_entry);
+ return data_vio;
+}
+
+static void assign_data_vio_to_waiter(struct limiter *limiter)
+{
+ assign_data_vio(limiter, get_available_data_vio(limiter->pool));
+}
+
+static void update_limiter(struct limiter *limiter)
+{
+ struct bio_list *waiters = &limiter->waiters;
+ data_vio_count_t available = limiter->limit - limiter->busy;
+
+ VDO_ASSERT_LOG_ONLY((limiter->release_count <= limiter->busy),
+ "Release count %u is not more than busy count %u",
+ limiter->release_count, limiter->busy);
+
+ get_waiters(limiter);
+ for (; (limiter->release_count > 0) && !bio_list_empty(waiters); limiter->release_count--)
+ limiter->assigner(limiter);
+
+ if (limiter->release_count > 0) {
+ WRITE_ONCE(limiter->busy, limiter->busy - limiter->release_count);
+ limiter->release_count = 0;
+ return;
+ }
+
+ for (; (available > 0) && !bio_list_empty(waiters); available--)
+ limiter->assigner(limiter);
+
+ WRITE_ONCE(limiter->busy, limiter->limit - available);
+ if (limiter->max_busy < limiter->busy)
+ WRITE_ONCE(limiter->max_busy, limiter->busy);
+}
+
+/**
+ * schedule_releases() - Ensure that release processing is scheduled.
+ *
+ * If this call switches the state to processing, enqueue. Otherwise, some other thread has already
+ * done so.
+ */
+static void schedule_releases(struct data_vio_pool *pool)
+{
+ /* Pairs with the barrier in process_release_callback(). */
+ smp_mb__before_atomic();
+ if (atomic_cmpxchg(&pool->processing, false, true))
+ return;
+
+ pool->completion.requeue = true;
+ vdo_launch_completion_with_priority(&pool->completion,
+ CPU_Q_COMPLETE_VIO_PRIORITY);
+}
+
+static void reuse_or_release_resources(struct data_vio_pool *pool,
+ struct data_vio *data_vio,
+ struct list_head *returned)
+{
+ if (data_vio->remaining_discard > 0) {
+ if (bio_list_empty(&pool->discard_limiter.waiters)) {
+ /* Return the data_vio's discard permit. */
+ pool->discard_limiter.release_count++;
+ } else {
+ assign_discard_permit(&pool->discard_limiter);
+ }
+ }
+
+ if (pool->limiter.arrival < pool->discard_limiter.arrival) {
+ assign_data_vio(&pool->limiter, data_vio);
+ } else if (pool->discard_limiter.arrival < U64_MAX) {
+ assign_data_vio(&pool->discard_limiter, data_vio);
+ } else {
+ list_add(&data_vio->pool_entry, returned);
+ pool->limiter.release_count++;
+ }
+}
+
+/**
+ * process_release_callback() - Process a batch of data_vio releases.
+ * @completion: The pool with data_vios to release.
+ */
+static void process_release_callback(struct vdo_completion *completion)
+{
+ struct data_vio_pool *pool = as_data_vio_pool(completion);
+ bool reschedule;
+ bool drained;
+ data_vio_count_t processed;
+ data_vio_count_t to_wake;
+ data_vio_count_t discards_to_wake;
+ LIST_HEAD(returned);
+
+ spin_lock(&pool->lock);
+ get_waiters(&pool->discard_limiter);
+ get_waiters(&pool->limiter);
+ spin_unlock(&pool->lock);
+
+ if (pool->limiter.arrival == U64_MAX) {
+ struct bio *bio = bio_list_peek(&pool->limiter.waiters);
+
+ if (bio != NULL)
+ pool->limiter.arrival = get_arrival_time(bio);
+ }
+
+ for (processed = 0; processed < DATA_VIO_RELEASE_BATCH_SIZE; processed++) {
+ struct data_vio *data_vio;
+ struct funnel_queue_entry *entry = vdo_funnel_queue_poll(pool->queue);
+
+ if (entry == NULL)
+ break;
+
+ data_vio = as_data_vio(container_of(entry, struct vdo_completion,
+ work_queue_entry_link));
+ acknowledge_data_vio(data_vio);
+ reuse_or_release_resources(pool, data_vio, &returned);
+ }
+
+ spin_lock(&pool->lock);
+ /*
+ * There is a race where waiters could be added while we are in the unlocked section above.
+ * Those waiters could not see the resources we are now about to release, so we assign
+ * those resources now as we have no guarantee of being rescheduled. This is handled in
+ * update_limiter().
+ */
+ update_limiter(&pool->discard_limiter);
+ list_splice(&returned, &pool->available);
+ update_limiter(&pool->limiter);
+ to_wake = pool->limiter.wake_count;
+ pool->limiter.wake_count = 0;
+ discards_to_wake = pool->discard_limiter.wake_count;
+ pool->discard_limiter.wake_count = 0;
+
+ atomic_set(&pool->processing, false);
+ /* Pairs with the barrier in schedule_releases(). */
+ smp_mb();
+
+ reschedule = !vdo_is_funnel_queue_empty(pool->queue);
+ drained = (!reschedule &&
+ vdo_is_state_draining(&pool->state) &&
+ check_for_drain_complete_locked(pool));
+ spin_unlock(&pool->lock);
+
+ if (to_wake > 0)
+ wake_up_nr(&pool->limiter.blocked_threads, to_wake);
+
+ if (discards_to_wake > 0)
+ wake_up_nr(&pool->discard_limiter.blocked_threads, discards_to_wake);
+
+ if (reschedule)
+ schedule_releases(pool);
+ else if (drained)
+ vdo_finish_draining(&pool->state);
+}
+
+static void initialize_limiter(struct limiter *limiter, struct data_vio_pool *pool,
+ assigner_fn assigner, data_vio_count_t limit)
+{
+ limiter->pool = pool;
+ limiter->assigner = assigner;
+ limiter->limit = limit;
+ limiter->arrival = U64_MAX;
+ init_waitqueue_head(&limiter->blocked_threads);
+}
+
+/**
+ * initialize_data_vio() - Allocate the components of a data_vio.
+ *
+ * The caller is responsible for cleaning up the data_vio on error.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int initialize_data_vio(struct data_vio *data_vio, struct vdo *vdo)
+{
+ struct bio *bio;
+ int result;
+
+ BUILD_BUG_ON(VDO_BLOCK_SIZE > PAGE_SIZE);
+ result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "data_vio data",
+ &data_vio->vio.data);
+ if (result != VDO_SUCCESS)
+ return vdo_log_error_strerror(result,
+ "data_vio data allocation failure");
+
+ result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "compressed block",
+ &data_vio->compression.block);
+ if (result != VDO_SUCCESS) {
+ return vdo_log_error_strerror(result,
+ "data_vio compressed block allocation failure");
+ }
+
+ result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "vio scratch",
+ &data_vio->scratch_block);
+ if (result != VDO_SUCCESS)
+ return vdo_log_error_strerror(result,
+ "data_vio scratch allocation failure");
+
+ result = vdo_create_bio(&bio);
+ if (result != VDO_SUCCESS)
+ return vdo_log_error_strerror(result,
+ "data_vio data bio allocation failure");
+
+ vdo_initialize_completion(&data_vio->decrement_completion, vdo,
+ VDO_DECREMENT_COMPLETION);
+ initialize_vio(&data_vio->vio, bio, 1, VIO_TYPE_DATA, VIO_PRIORITY_DATA, vdo);
+
+ return VDO_SUCCESS;
+}
+
+static void destroy_data_vio(struct data_vio *data_vio)
+{
+ if (data_vio == NULL)
+ return;
+
+ vdo_free_bio(vdo_forget(data_vio->vio.bio));
+ vdo_free(vdo_forget(data_vio->vio.data));
+ vdo_free(vdo_forget(data_vio->compression.block));
+ vdo_free(vdo_forget(data_vio->scratch_block));
+}
+
+/**
+ * make_data_vio_pool() - Initialize a data_vio pool.
+ * @vdo: The vdo to which the pool will belong.
+ * @pool_size: The number of data_vios in the pool.
+ * @discard_limit: The maximum number of data_vios which may be used for discards.
+ * @pool: A pointer to hold the newly allocated pool.
+ */
+int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
+ data_vio_count_t discard_limit, struct data_vio_pool **pool_ptr)
+{
+ int result;
+ struct data_vio_pool *pool;
+ data_vio_count_t i;
+
+ result = vdo_allocate_extended(struct data_vio_pool, pool_size, struct data_vio,
+ __func__, &pool);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ VDO_ASSERT_LOG_ONLY((discard_limit <= pool_size),
+ "discard limit does not exceed pool size");
+ initialize_limiter(&pool->discard_limiter, pool, assign_discard_permit,
+ discard_limit);
+ pool->discard_limiter.permitted_waiters = &pool->permitted_discards;
+ initialize_limiter(&pool->limiter, pool, assign_data_vio_to_waiter, pool_size);
+ pool->limiter.permitted_waiters = &pool->limiter.waiters;
+ INIT_LIST_HEAD(&pool->available);
+ spin_lock_init(&pool->lock);
+ vdo_set_admin_state_code(&pool->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ vdo_initialize_completion(&pool->completion, vdo, VDO_DATA_VIO_POOL_COMPLETION);
+ vdo_prepare_completion(&pool->completion, process_release_callback,
+ process_release_callback, vdo->thread_config.cpu_thread,
+ NULL);
+
+ result = vdo_make_funnel_queue(&pool->queue);
+ if (result != VDO_SUCCESS) {
+ free_data_vio_pool(vdo_forget(pool));
+ return result;
+ }
+
+ for (i = 0; i < pool_size; i++) {
+ struct data_vio *data_vio = &pool->data_vios[i];
+
+ result = initialize_data_vio(data_vio, vdo);
+ if (result != VDO_SUCCESS) {
+ destroy_data_vio(data_vio);
+ free_data_vio_pool(pool);
+ return result;
+ }
+
+ list_add(&data_vio->pool_entry, &pool->available);
+ }
+
+ *pool_ptr = pool;
+ return VDO_SUCCESS;
+}
+
+/**
+ * free_data_vio_pool() - Free a data_vio_pool and the data_vios in it.
+ *
+ * All data_vios must be returned to the pool before calling this function.
+ */
+void free_data_vio_pool(struct data_vio_pool *pool)
+{
+ struct data_vio *data_vio, *tmp;
+
+ if (pool == NULL)
+ return;
+
+ /*
+ * Pairs with the barrier in process_release_callback(). Possibly not needed since it
+ * caters to an enqueue vs. free race.
+ */
+ smp_mb();
+ BUG_ON(atomic_read(&pool->processing));
+
+ spin_lock(&pool->lock);
+ VDO_ASSERT_LOG_ONLY((pool->limiter.busy == 0),
+ "data_vio pool must not have %u busy entries when being freed",
+ pool->limiter.busy);
+ VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->limiter.waiters) &&
+ bio_list_empty(&pool->limiter.new_waiters)),
+ "data_vio pool must not have threads waiting to read or write when being freed");
+ VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->discard_limiter.waiters) &&
+ bio_list_empty(&pool->discard_limiter.new_waiters)),
+ "data_vio pool must not have threads waiting to discard when being freed");
+ spin_unlock(&pool->lock);
+
+ list_for_each_entry_safe(data_vio, tmp, &pool->available, pool_entry) {
+ list_del_init(&data_vio->pool_entry);
+ destroy_data_vio(data_vio);
+ }
+
+ vdo_free_funnel_queue(vdo_forget(pool->queue));
+ vdo_free(pool);
+}
+
+static bool acquire_permit(struct limiter *limiter)
+{
+ if (limiter->busy >= limiter->limit)
+ return false;
+
+ WRITE_ONCE(limiter->busy, limiter->busy + 1);
+ if (limiter->max_busy < limiter->busy)
+ WRITE_ONCE(limiter->max_busy, limiter->busy);
+ return true;
+}
+
+static void wait_permit(struct limiter *limiter, struct bio *bio)
+ __releases(&limiter->pool->lock)
+{
+ DEFINE_WAIT(wait);
+
+ bio_list_add(&limiter->new_waiters, bio);
+ prepare_to_wait_exclusive(&limiter->blocked_threads, &wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock(&limiter->pool->lock);
+ io_schedule();
+ finish_wait(&limiter->blocked_threads, &wait);
+}
+
+/**
+ * vdo_launch_bio() - Acquire a data_vio from the pool, assign the bio to it, and launch it.
+ *
+ * This will block if data_vios or discard permits are not available.
+ */
+void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio)
+{
+ struct data_vio *data_vio;
+
+ VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&pool->state),
+ "data_vio_pool not quiescent on acquire");
+
+ bio->bi_private = (void *) jiffies;
+ spin_lock(&pool->lock);
+ if ((bio_op(bio) == REQ_OP_DISCARD) &&
+ !acquire_permit(&pool->discard_limiter)) {
+ wait_permit(&pool->discard_limiter, bio);
+ return;
+ }
+
+ if (!acquire_permit(&pool->limiter)) {
+ wait_permit(&pool->limiter, bio);
+ return;
+ }
+
+ data_vio = get_available_data_vio(pool);
+ spin_unlock(&pool->lock);
+ launch_bio(pool->completion.vdo, data_vio, bio);
+}
+
+/* Implements vdo_admin_initiator_fn. */
+static void initiate_drain(struct admin_state *state)
+{
+ bool drained;
+ struct data_vio_pool *pool = container_of(state, struct data_vio_pool, state);
+
+ spin_lock(&pool->lock);
+ drained = check_for_drain_complete_locked(pool);
+ spin_unlock(&pool->lock);
+
+ if (drained)
+ vdo_finish_draining(state);
+}
+
+static void assert_on_vdo_cpu_thread(const struct vdo *vdo, const char *name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.cpu_thread),
+ "%s called on cpu thread", name);
+}
+
+/**
+ * drain_data_vio_pool() - Wait asynchronously for all data_vios to be returned to the pool.
+ * @completion: The completion to notify when the pool has drained.
+ */
+void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
+{
+ assert_on_vdo_cpu_thread(completion->vdo, __func__);
+ vdo_start_draining(&pool->state, VDO_ADMIN_STATE_SUSPENDING, completion,
+ initiate_drain);
+}
+
+/**
+ * resume_data_vio_pool() - Resume a data_vio pool.
+ * @completion: The completion to notify when the pool has resumed.
+ */
+void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
+{
+ assert_on_vdo_cpu_thread(completion->vdo, __func__);
+ vdo_continue_completion(completion, vdo_resume_if_quiescent(&pool->state));
+}
+
+static void dump_limiter(const char *name, struct limiter *limiter)
+{
+ vdo_log_info("%s: %u of %u busy (max %u), %s", name, limiter->busy,
+ limiter->limit, limiter->max_busy,
+ ((bio_list_empty(&limiter->waiters) &&
+ bio_list_empty(&limiter->new_waiters)) ?
+ "no waiters" : "has waiters"));
+}
+
+/**
+ * dump_data_vio_pool() - Dump a data_vio pool to the log.
+ * @dump_vios: Whether to dump the details of each busy data_vio as well.
+ */
+void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios)
+{
+ /*
+ * In order that syslog can empty its buffer, sleep after 35 elements for 4ms (till the
+ * second clock tick). These numbers were picked based on experiments with lab machines.
+ */
+ static const int ELEMENTS_PER_BATCH = 35;
+ static const int SLEEP_FOR_SYSLOG = 4000;
+
+ if (pool == NULL)
+ return;
+
+ spin_lock(&pool->lock);
+ dump_limiter("data_vios", &pool->limiter);
+ dump_limiter("discard permits", &pool->discard_limiter);
+ if (dump_vios) {
+ int i;
+ int dumped = 0;
+
+ for (i = 0; i < pool->limiter.limit; i++) {
+ struct data_vio *data_vio = &pool->data_vios[i];
+
+ if (!list_empty(&data_vio->pool_entry))
+ continue;
+
+ dump_data_vio(data_vio);
+ if (++dumped >= ELEMENTS_PER_BATCH) {
+ spin_unlock(&pool->lock);
+ dumped = 0;
+ fsleep(SLEEP_FOR_SYSLOG);
+ spin_lock(&pool->lock);
+ }
+ }
+ }
+
+ spin_unlock(&pool->lock);
+}
+
+data_vio_count_t get_data_vio_pool_active_discards(struct data_vio_pool *pool)
+{
+ return READ_ONCE(pool->discard_limiter.busy);
+}
+
+data_vio_count_t get_data_vio_pool_discard_limit(struct data_vio_pool *pool)
+{
+ return READ_ONCE(pool->discard_limiter.limit);
+}
+
+data_vio_count_t get_data_vio_pool_maximum_discards(struct data_vio_pool *pool)
+{
+ return READ_ONCE(pool->discard_limiter.max_busy);
+}
+
+int set_data_vio_pool_discard_limit(struct data_vio_pool *pool, data_vio_count_t limit)
+{
+ if (get_data_vio_pool_request_limit(pool) < limit) {
+ // The discard limit may not be higher than the data_vio limit.
+ return -EINVAL;
+ }
+
+ spin_lock(&pool->lock);
+ pool->discard_limiter.limit = limit;
+ spin_unlock(&pool->lock);
+
+ return VDO_SUCCESS;
+}
+
+data_vio_count_t get_data_vio_pool_active_requests(struct data_vio_pool *pool)
+{
+ return READ_ONCE(pool->limiter.busy);
+}
+
+data_vio_count_t get_data_vio_pool_request_limit(struct data_vio_pool *pool)
+{
+ return READ_ONCE(pool->limiter.limit);
+}
+
+data_vio_count_t get_data_vio_pool_maximum_requests(struct data_vio_pool *pool)
+{
+ return READ_ONCE(pool->limiter.max_busy);
+}
+
+static void update_data_vio_error_stats(struct data_vio *data_vio)
+{
+ u8 index = 0;
+ static const char * const operations[] = {
+ [0] = "empty",
+ [1] = "read",
+ [2] = "write",
+ [3] = "read-modify-write",
+ [5] = "read+fua",
+ [6] = "write+fua",
+ [7] = "read-modify-write+fua",
+ };
+
+ if (data_vio->read)
+ index = 1;
+
+ if (data_vio->write)
+ index += 2;
+
+ if (data_vio->fua)
+ index += 4;
+
+ update_vio_error_stats(&data_vio->vio,
+ "Completing %s vio for LBN %llu with error after %s",
+ operations[index],
+ (unsigned long long) data_vio->logical.lbn,
+ get_data_vio_operation_name(data_vio));
+}
+
+static void perform_cleanup_stage(struct data_vio *data_vio,
+ enum data_vio_cleanup_stage stage);
+
+/**
+ * release_allocated_lock() - Release the PBN lock and/or the reference on the allocated block at
+ * the end of processing a data_vio.
+ */
+static void release_allocated_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_allocated_zone(data_vio);
+ release_data_vio_allocation_lock(data_vio, false);
+ perform_cleanup_stage(data_vio, VIO_RELEASE_RECOVERY_LOCKS);
+}
+
+/** release_lock() - Release an uncontended LBN lock. */
+static void release_lock(struct data_vio *data_vio, struct lbn_lock *lock)
+{
+ struct int_map *lock_map = lock->zone->lbn_operations;
+ struct data_vio *lock_holder;
+
+ if (!lock->locked) {
+ /* The lock is not locked, so it had better not be registered in the lock map. */
+ struct data_vio *lock_holder = vdo_int_map_get(lock_map, lock->lbn);
+
+ VDO_ASSERT_LOG_ONLY((data_vio != lock_holder),
+ "no logical block lock held for block %llu",
+ (unsigned long long) lock->lbn);
+ return;
+ }
+
+ /* Release the lock by removing the lock from the map. */
+ lock_holder = vdo_int_map_remove(lock_map, lock->lbn);
+ VDO_ASSERT_LOG_ONLY((data_vio == lock_holder),
+ "logical block lock mismatch for block %llu",
+ (unsigned long long) lock->lbn);
+ lock->locked = false;
+}
+
+/** transfer_lock() - Transfer a contended LBN lock to the eldest waiter. */
+static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock)
+{
+ struct data_vio *lock_holder, *next_lock_holder;
+ int result;
+
+ VDO_ASSERT_LOG_ONLY(lock->locked, "lbn_lock with waiters is not locked");
+
+ /* Another data_vio is waiting for the lock, transfer it in a single lock map operation. */
+ next_lock_holder =
+ vdo_waiter_as_data_vio(vdo_waitq_dequeue_waiter(&lock->waiters));
+
+ /* Transfer the remaining lock waiters to the next lock holder. */
+ vdo_waitq_transfer_all_waiters(&lock->waiters,
+ &next_lock_holder->logical.waiters);
+
+ result = vdo_int_map_put(lock->zone->lbn_operations, lock->lbn,
+ next_lock_holder, true, (void **) &lock_holder);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(next_lock_holder, result);
+ return;
+ }
+
+ VDO_ASSERT_LOG_ONLY((lock_holder == data_vio),
+ "logical block lock mismatch for block %llu",
+ (unsigned long long) lock->lbn);
+ lock->locked = false;
+
+ /*
+ * If there are still waiters, other data_vios must be trying to get the lock we just
+ * transferred. We must ensure that the new lock holder doesn't block in the packer.
+ */
+ if (vdo_waitq_has_waiters(&next_lock_holder->logical.waiters))
+ cancel_data_vio_compression(next_lock_holder);
+
+ /*
+ * Avoid stack overflow on lock transfer.
+ * FIXME: this is only an issue in the 1 thread config.
+ */
+ next_lock_holder->vio.completion.requeue = true;
+ launch_locked_request(next_lock_holder);
+}
+
+/**
+ * release_logical_lock() - Release the logical block lock and flush generation lock at the end of
+ * processing a data_vio.
+ */
+static void release_logical_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct lbn_lock *lock = &data_vio->logical;
+
+ assert_data_vio_in_logical_zone(data_vio);
+
+ if (vdo_waitq_has_waiters(&lock->waiters))
+ transfer_lock(data_vio, lock);
+ else
+ release_lock(data_vio, lock);
+
+ vdo_release_flush_generation_lock(data_vio);
+ perform_cleanup_stage(data_vio, VIO_CLEANUP_DONE);
+}
+
+/** clean_hash_lock() - Release the hash lock at the end of processing a data_vio. */
+static void clean_hash_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_hash_zone(data_vio);
+ if (completion->result != VDO_SUCCESS) {
+ vdo_clean_failed_hash_lock(data_vio);
+ return;
+ }
+
+ vdo_release_hash_lock(data_vio);
+ perform_cleanup_stage(data_vio, VIO_RELEASE_LOGICAL);
+}
+
+/**
+ * finish_cleanup() - Make some assertions about a data_vio which has finished cleaning up.
+ *
+ * If it is part of a multi-block discard, starts on the next block, otherwise, returns it to the
+ * pool.
+ */
+static void finish_cleanup(struct data_vio *data_vio)
+{
+ struct vdo_completion *completion = &data_vio->vio.completion;
+
+ VDO_ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL,
+ "complete data_vio has no allocation lock");
+ VDO_ASSERT_LOG_ONLY(data_vio->hash_lock == NULL,
+ "complete data_vio has no hash lock");
+ if ((data_vio->remaining_discard <= VDO_BLOCK_SIZE) ||
+ (completion->result != VDO_SUCCESS)) {
+ struct data_vio_pool *pool = completion->vdo->data_vio_pool;
+
+ vdo_funnel_queue_put(pool->queue, &completion->work_queue_entry_link);
+ schedule_releases(pool);
+ return;
+ }
+
+ data_vio->remaining_discard -= min_t(u32, data_vio->remaining_discard,
+ VDO_BLOCK_SIZE - data_vio->offset);
+ data_vio->is_partial = (data_vio->remaining_discard < VDO_BLOCK_SIZE);
+ data_vio->read = data_vio->is_partial;
+ data_vio->offset = 0;
+ completion->requeue = true;
+ launch_data_vio(data_vio, data_vio->logical.lbn + 1);
+}
+
+/** perform_cleanup_stage() - Perform the next step in the process of cleaning up a data_vio. */
+static void perform_cleanup_stage(struct data_vio *data_vio,
+ enum data_vio_cleanup_stage stage)
+{
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+
+ switch (stage) {
+ case VIO_RELEASE_HASH_LOCK:
+ if (data_vio->hash_lock != NULL) {
+ launch_data_vio_hash_zone_callback(data_vio, clean_hash_lock);
+ return;
+ }
+ fallthrough;
+
+ case VIO_RELEASE_ALLOCATED:
+ if (data_vio_has_allocation(data_vio)) {
+ launch_data_vio_allocated_zone_callback(data_vio,
+ release_allocated_lock);
+ return;
+ }
+ fallthrough;
+
+ case VIO_RELEASE_RECOVERY_LOCKS:
+ if ((data_vio->recovery_sequence_number > 0) &&
+ (READ_ONCE(vdo->read_only_notifier.read_only_error) == VDO_SUCCESS) &&
+ (data_vio->vio.completion.result != VDO_READ_ONLY))
+ vdo_log_warning("VDO not read-only when cleaning data_vio with RJ lock");
+ fallthrough;
+
+ case VIO_RELEASE_LOGICAL:
+ launch_data_vio_logical_callback(data_vio, release_logical_lock);
+ return;
+
+ default:
+ finish_cleanup(data_vio);
+ }
+}
+
+void complete_data_vio(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ completion->error_handler = NULL;
+ data_vio->last_async_operation = VIO_ASYNC_OP_CLEANUP;
+ perform_cleanup_stage(data_vio,
+ (data_vio->write ? VIO_CLEANUP_START : VIO_RELEASE_LOGICAL));
+}
+
+static void enter_read_only_mode(struct vdo_completion *completion)
+{
+ if (vdo_is_read_only(completion->vdo))
+ return;
+
+ if (completion->result != VDO_READ_ONLY) {
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ vdo_log_error_strerror(completion->result,
+ "Preparing to enter read-only mode: data_vio for LBN %llu (becoming mapped to %llu, previously mapped to %llu, allocated %llu) is completing with a fatal error after operation %s",
+ (unsigned long long) data_vio->logical.lbn,
+ (unsigned long long) data_vio->new_mapped.pbn,
+ (unsigned long long) data_vio->mapped.pbn,
+ (unsigned long long) data_vio->allocation.pbn,
+ get_data_vio_operation_name(data_vio));
+ }
+
+ vdo_enter_read_only_mode(completion->vdo, completion->result);
+}
+
+void handle_data_vio_error(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ if ((completion->result == VDO_READ_ONLY) || (data_vio->user_bio == NULL))
+ enter_read_only_mode(completion);
+
+ update_data_vio_error_stats(data_vio);
+ complete_data_vio(completion);
+}
+
+/**
+ * get_data_vio_operation_name() - Get the name of the last asynchronous operation performed on a
+ * data_vio.
+ */
+const char *get_data_vio_operation_name(struct data_vio *data_vio)
+{
+ BUILD_BUG_ON((MAX_VIO_ASYNC_OPERATION_NUMBER - MIN_VIO_ASYNC_OPERATION_NUMBER) !=
+ ARRAY_SIZE(ASYNC_OPERATION_NAMES));
+
+ return ((data_vio->last_async_operation < MAX_VIO_ASYNC_OPERATION_NUMBER) ?
+ ASYNC_OPERATION_NAMES[data_vio->last_async_operation] :
+ "unknown async operation");
+}
+
+/**
+ * data_vio_allocate_data_block() - Allocate a data block.
+ *
+ * @write_lock_type: The type of write lock to obtain on the block.
+ * @callback: The callback which will attempt an allocation in the current zone and continue if it
+ * succeeds.
+ * @error_handler: The handler for errors while allocating.
+ */
+void data_vio_allocate_data_block(struct data_vio *data_vio,
+ enum pbn_lock_type write_lock_type,
+ vdo_action_fn callback, vdo_action_fn error_handler)
+{
+ struct allocation *allocation = &data_vio->allocation;
+
+ VDO_ASSERT_LOG_ONLY((allocation->pbn == VDO_ZERO_BLOCK),
+ "data_vio does not have an allocation");
+ allocation->write_lock_type = write_lock_type;
+ allocation->zone = vdo_get_next_allocation_zone(data_vio->logical.zone);
+ allocation->first_allocation_zone = allocation->zone->zone_number;
+
+ data_vio->vio.completion.error_handler = error_handler;
+ launch_data_vio_allocated_zone_callback(data_vio, callback);
+}
+
+/**
+ * release_data_vio_allocation_lock() - Release the PBN lock on a data_vio's allocated block.
+ * @reset: If true, the allocation will be reset (i.e. any allocated pbn will be forgotten).
+ *
+ * If the reference to the locked block is still provisional, it will be released as well.
+ */
+void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset)
+{
+ struct allocation *allocation = &data_vio->allocation;
+ physical_block_number_t locked_pbn = allocation->pbn;
+
+ assert_data_vio_in_allocated_zone(data_vio);
+
+ if (reset || vdo_pbn_lock_has_provisional_reference(allocation->lock))
+ allocation->pbn = VDO_ZERO_BLOCK;
+
+ vdo_release_physical_zone_pbn_lock(allocation->zone, locked_pbn,
+ vdo_forget(allocation->lock));
+}
+
+/**
+ * uncompress_data_vio() - Uncompress the data a data_vio has just read.
+ * @mapping_state: The mapping state indicating which fragment to decompress.
+ * @buffer: The buffer to receive the uncompressed data.
+ */
+int uncompress_data_vio(struct data_vio *data_vio,
+ enum block_mapping_state mapping_state, char *buffer)
+{
+ int size;
+ u16 fragment_offset, fragment_size;
+ struct compressed_block *block = data_vio->compression.block;
+ int result = vdo_get_compressed_block_fragment(mapping_state, block,
+ &fragment_offset, &fragment_size);
+
+ if (result != VDO_SUCCESS) {
+ vdo_log_debug("%s: compressed fragment error %d", __func__, result);
+ return result;
+ }
+
+ size = LZ4_decompress_safe((block->data + fragment_offset), buffer,
+ fragment_size, VDO_BLOCK_SIZE);
+ if (size != VDO_BLOCK_SIZE) {
+ vdo_log_debug("%s: lz4 error", __func__);
+ return VDO_INVALID_FRAGMENT;
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * modify_for_partial_write() - Do the modify-write part of a read-modify-write cycle.
+ * @completion: The data_vio which has just finished its read.
+ *
+ * This callback is registered in read_block().
+ */
+static void modify_for_partial_write(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ char *data = data_vio->vio.data;
+ struct bio *bio = data_vio->user_bio;
+
+ assert_data_vio_on_cpu_thread(data_vio);
+
+ if (bio_op(bio) == REQ_OP_DISCARD) {
+ memset(data + data_vio->offset, '\0', min_t(u32,
+ data_vio->remaining_discard,
+ VDO_BLOCK_SIZE - data_vio->offset));
+ } else {
+ copy_from_bio(bio, data + data_vio->offset);
+ }
+
+ data_vio->is_zero = is_zero_block(data);
+ data_vio->read = false;
+ launch_data_vio_logical_callback(data_vio,
+ continue_data_vio_with_block_map_slot);
+}
+
+static void complete_read(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ char *data = data_vio->vio.data;
+ bool compressed = vdo_is_state_compressed(data_vio->mapped.state);
+
+ assert_data_vio_on_cpu_thread(data_vio);
+
+ if (compressed) {
+ int result = uncompress_data_vio(data_vio, data_vio->mapped.state, data);
+
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+ }
+
+ if (data_vio->write) {
+ modify_for_partial_write(completion);
+ return;
+ }
+
+ if (compressed || data_vio->is_partial)
+ copy_to_bio(data_vio->user_bio, data + data_vio->offset);
+
+ acknowledge_data_vio(data_vio);
+ complete_data_vio(completion);
+}
+
+static void read_endio(struct bio *bio)
+{
+ struct data_vio *data_vio = vio_as_data_vio(bio->bi_private);
+ int result = blk_status_to_errno(bio->bi_status);
+
+ vdo_count_completed_bios(bio);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ launch_data_vio_cpu_callback(data_vio, complete_read,
+ CPU_Q_COMPLETE_READ_PRIORITY);
+}
+
+static void complete_zero_read(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_on_cpu_thread(data_vio);
+
+ if (data_vio->is_partial) {
+ memset(data_vio->vio.data, 0, VDO_BLOCK_SIZE);
+ if (data_vio->write) {
+ modify_for_partial_write(completion);
+ return;
+ }
+ } else {
+ zero_fill_bio(data_vio->user_bio);
+ }
+
+ complete_read(completion);
+}
+
+/**
+ * read_block() - Read a block asynchronously.
+ *
+ * This is the callback registered in read_block_mapping().
+ */
+static void read_block(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct vio *vio = as_vio(completion);
+ int result = VDO_SUCCESS;
+
+ if (data_vio->mapped.pbn == VDO_ZERO_BLOCK) {
+ launch_data_vio_cpu_callback(data_vio, complete_zero_read,
+ CPU_Q_COMPLETE_VIO_PRIORITY);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_READ_DATA_VIO;
+ if (vdo_is_state_compressed(data_vio->mapped.state)) {
+ result = vio_reset_bio(vio, (char *) data_vio->compression.block,
+ read_endio, REQ_OP_READ, data_vio->mapped.pbn);
+ } else {
+ blk_opf_t opf = ((data_vio->user_bio->bi_opf & PASSTHROUGH_FLAGS) | REQ_OP_READ);
+
+ if (data_vio->is_partial) {
+ result = vio_reset_bio(vio, vio->data, read_endio, opf,
+ data_vio->mapped.pbn);
+ } else {
+ /* A full 4k read. Use the incoming bio to avoid having to copy the data */
+ bio_reset(vio->bio, vio->bio->bi_bdev, opf);
+ bio_init_clone(data_vio->user_bio->bi_bdev, vio->bio,
+ data_vio->user_bio, GFP_KERNEL);
+
+ /* Copy over the original bio iovec and opflags. */
+ vdo_set_bio_properties(vio->bio, vio, read_endio, opf,
+ data_vio->mapped.pbn);
+ }
+ }
+
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ vdo_submit_data_vio(data_vio);
+}
+
+static inline struct data_vio *
+reference_count_update_completion_as_data_vio(struct vdo_completion *completion)
+{
+ if (completion->type == VIO_COMPLETION)
+ return as_data_vio(completion);
+
+ return container_of(completion, struct data_vio, decrement_completion);
+}
+
+/**
+ * update_block_map() - Rendezvous of the data_vio and decrement completions after each has
+ * made its reference updates. Handle any error from either, or proceed
+ * to updating the block map.
+ * @completion: The completion of the write in progress.
+ */
+static void update_block_map(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = reference_count_update_completion_as_data_vio(completion);
+
+ assert_data_vio_in_logical_zone(data_vio);
+
+ if (!data_vio->first_reference_operation_complete) {
+ /* Rendezvous, we're first */
+ data_vio->first_reference_operation_complete = true;
+ return;
+ }
+
+ completion = &data_vio->vio.completion;
+ vdo_set_completion_result(completion, data_vio->decrement_completion.result);
+ if (completion->result != VDO_SUCCESS) {
+ handle_data_vio_error(completion);
+ return;
+ }
+
+ completion->error_handler = handle_data_vio_error;
+ if (data_vio->hash_lock != NULL)
+ set_data_vio_hash_zone_callback(data_vio, vdo_continue_hash_lock);
+ else
+ completion->callback = complete_data_vio;
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_PUT_MAPPED_BLOCK;
+ vdo_put_mapped_block(data_vio);
+}
+
+static void decrement_reference_count(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = container_of(completion, struct data_vio,
+ decrement_completion);
+
+ assert_data_vio_in_mapped_zone(data_vio);
+
+ vdo_set_completion_callback(completion, update_block_map,
+ data_vio->logical.zone->thread_id);
+ completion->error_handler = update_block_map;
+ vdo_modify_reference_count(completion, &data_vio->decrement_updater);
+}
+
+static void increment_reference_count(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_new_mapped_zone(data_vio);
+
+ if (data_vio->downgrade_allocation_lock) {
+ /*
+ * Now that the data has been written, it's safe to deduplicate against the
+ * block. Downgrade the allocation lock to a read lock so it can be used later by
+ * the hash lock. This is done here since it needs to happen sometime before we
+ * return to the hash zone, and we are currently on the correct thread. For
+ * compressed blocks, the downgrade will have already been done.
+ */
+ vdo_downgrade_pbn_write_lock(data_vio->allocation.lock, false);
+ }
+
+ set_data_vio_logical_callback(data_vio, update_block_map);
+ completion->error_handler = update_block_map;
+ vdo_modify_reference_count(completion, &data_vio->increment_updater);
+}
+
+/** journal_remapping() - Add a recovery journal entry for a data remapping. */
+static void journal_remapping(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_journal_zone(data_vio);
+
+ data_vio->decrement_updater.operation = VDO_JOURNAL_DATA_REMAPPING;
+ data_vio->decrement_updater.zpbn = data_vio->mapped;
+ if (data_vio->new_mapped.pbn == VDO_ZERO_BLOCK) {
+ data_vio->first_reference_operation_complete = true;
+ if (data_vio->mapped.pbn == VDO_ZERO_BLOCK)
+ set_data_vio_logical_callback(data_vio, update_block_map);
+ } else {
+ set_data_vio_new_mapped_zone_callback(data_vio,
+ increment_reference_count);
+ }
+
+ if (data_vio->mapped.pbn == VDO_ZERO_BLOCK) {
+ data_vio->first_reference_operation_complete = true;
+ } else {
+ vdo_set_completion_callback(&data_vio->decrement_completion,
+ decrement_reference_count,
+ data_vio->mapped.zone->thread_id);
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_JOURNAL_REMAPPING;
+ vdo_add_recovery_journal_entry(completion->vdo->recovery_journal, data_vio);
+}
+
+/**
+ * read_old_block_mapping() - Get the previous PBN/LBN mapping of an in-progress write.
+ *
+ * Gets the previous PBN mapped to this LBN from the block map, so as to make an appropriate
+ * journal entry referencing the removal of this LBN->PBN mapping.
+ */
+static void read_old_block_mapping(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_logical_zone(data_vio);
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_WRITE;
+ set_data_vio_journal_callback(data_vio, journal_remapping);
+ vdo_get_mapped_block(data_vio);
+}
+
+void update_metadata_for_data_vio_write(struct data_vio *data_vio, struct pbn_lock *lock)
+{
+ data_vio->increment_updater = (struct reference_updater) {
+ .operation = VDO_JOURNAL_DATA_REMAPPING,
+ .increment = true,
+ .zpbn = data_vio->new_mapped,
+ .lock = lock,
+ };
+
+ launch_data_vio_logical_callback(data_vio, read_old_block_mapping);
+}
+
+/**
+ * pack_compressed_data() - Attempt to pack the compressed data_vio into a block.
+ *
+ * This is the callback registered in launch_compress_data_vio().
+ */
+static void pack_compressed_data(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_packer_zone(data_vio);
+
+ if (!vdo_get_compressing(vdo_from_data_vio(data_vio)) ||
+ get_data_vio_compression_status(data_vio).may_not_compress) {
+ write_data_vio(data_vio);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_ATTEMPT_PACKING;
+ vdo_attempt_packing(data_vio);
+}
+
+/**
+ * compress_data_vio() - Do the actual work of compressing the data on a CPU queue.
+ *
+ * This callback is registered in launch_compress_data_vio().
+ */
+static void compress_data_vio(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ int size;
+
+ assert_data_vio_on_cpu_thread(data_vio);
+
+ /*
+ * By putting the compressed data at the start of the compressed block data field, we won't
+ * need to copy it if this data_vio becomes a compressed write agent.
+ */
+ size = LZ4_compress_default(data_vio->vio.data,
+ data_vio->compression.block->data, VDO_BLOCK_SIZE,
+ VDO_MAX_COMPRESSED_FRAGMENT_SIZE,
+ (char *) vdo_get_work_queue_private_data());
+ if ((size > 0) && (size < VDO_COMPRESSED_BLOCK_DATA_SIZE)) {
+ data_vio->compression.size = size;
+ launch_data_vio_packer_callback(data_vio, pack_compressed_data);
+ return;
+ }
+
+ write_data_vio(data_vio);
+}
+
+/**
+ * launch_compress_data_vio() - Continue a write by attempting to compress the data.
+ *
+ * This is a re-entry point to vio_write used by hash locks.
+ */
+void launch_compress_data_vio(struct data_vio *data_vio)
+{
+ VDO_ASSERT_LOG_ONLY(!data_vio->is_duplicate, "compressing a non-duplicate block");
+ VDO_ASSERT_LOG_ONLY(data_vio->hash_lock != NULL,
+ "data_vio to compress has a hash_lock");
+ VDO_ASSERT_LOG_ONLY(data_vio_has_allocation(data_vio),
+ "data_vio to compress has an allocation");
+
+ /*
+ * There are 4 reasons why a data_vio which has reached this point will not be eligible for
+ * compression:
+ *
+ * 1) Since data_vios can block indefinitely in the packer, it would be bad to do so if the
+ * write request also requests FUA.
+ *
+ * 2) A data_vio should not be compressed when compression is disabled for the vdo.
+ *
+ * 3) A data_vio could be doing a partial write on behalf of a larger discard which has not
+ * yet been acknowledged and hence blocking in the packer would be bad.
+ *
+ * 4) Some other data_vio may be waiting on this data_vio in which case blocking in the
+ * packer would also be bad.
+ */
+ if (data_vio->fua ||
+ !vdo_get_compressing(vdo_from_data_vio(data_vio)) ||
+ ((data_vio->user_bio != NULL) && (bio_op(data_vio->user_bio) == REQ_OP_DISCARD)) ||
+ (advance_data_vio_compression_stage(data_vio).stage != DATA_VIO_COMPRESSING)) {
+ write_data_vio(data_vio);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_COMPRESS_DATA_VIO;
+ launch_data_vio_cpu_callback(data_vio, compress_data_vio,
+ CPU_Q_COMPRESS_BLOCK_PRIORITY);
+}
+
+/**
+ * hash_data_vio() - Hash the data in a data_vio and set the hash zone (which also flags the record
+ * name as set).
+
+ * This callback is registered in prepare_for_dedupe().
+ */
+static void hash_data_vio(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_on_cpu_thread(data_vio);
+ VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "zero blocks should not be hashed");
+
+ murmurhash3_128(data_vio->vio.data, VDO_BLOCK_SIZE, 0x62ea60be,
+ &data_vio->record_name);
+
+ data_vio->hash_zone = vdo_select_hash_zone(vdo_from_data_vio(data_vio)->hash_zones,
+ &data_vio->record_name);
+ data_vio->last_async_operation = VIO_ASYNC_OP_ACQUIRE_VDO_HASH_LOCK;
+ launch_data_vio_hash_zone_callback(data_vio, vdo_acquire_hash_lock);
+}
+
+/** prepare_for_dedupe() - Prepare for the dedupe path after attempting to get an allocation. */
+static void prepare_for_dedupe(struct data_vio *data_vio)
+{
+ /* We don't care what thread we are on. */
+ VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "must not prepare to dedupe zero blocks");
+
+ /*
+ * Before we can dedupe, we need to know the record name, so the first
+ * step is to hash the block data.
+ */
+ data_vio->last_async_operation = VIO_ASYNC_OP_HASH_DATA_VIO;
+ launch_data_vio_cpu_callback(data_vio, hash_data_vio, CPU_Q_HASH_BLOCK_PRIORITY);
+}
+
+/**
+ * write_bio_finished() - This is the bio_end_io function registered in write_block() to be called
+ * when a data_vio's write to the underlying storage has completed.
+ */
+static void write_bio_finished(struct bio *bio)
+{
+ struct data_vio *data_vio = vio_as_data_vio((struct vio *) bio->bi_private);
+
+ vdo_count_completed_bios(bio);
+ vdo_set_completion_result(&data_vio->vio.completion,
+ blk_status_to_errno(bio->bi_status));
+ data_vio->downgrade_allocation_lock = true;
+ update_metadata_for_data_vio_write(data_vio, data_vio->allocation.lock);
+}
+
+/** write_data_vio() - Write a data block to storage without compression. */
+void write_data_vio(struct data_vio *data_vio)
+{
+ struct data_vio_compression_status status, new_status;
+ int result;
+
+ if (!data_vio_has_allocation(data_vio)) {
+ /*
+ * There was no space to write this block and we failed to deduplicate or compress
+ * it.
+ */
+ continue_data_vio_with_error(data_vio, VDO_NO_SPACE);
+ return;
+ }
+
+ new_status = (struct data_vio_compression_status) {
+ .stage = DATA_VIO_POST_PACKER,
+ .may_not_compress = true,
+ };
+
+ do {
+ status = get_data_vio_compression_status(data_vio);
+ } while ((status.stage != DATA_VIO_POST_PACKER) &&
+ !set_data_vio_compression_status(data_vio, status, new_status));
+
+ /* Write the data from the data block buffer. */
+ result = vio_reset_bio(&data_vio->vio, data_vio->vio.data,
+ write_bio_finished, REQ_OP_WRITE,
+ data_vio->allocation.pbn);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_WRITE_DATA_VIO;
+ vdo_submit_data_vio(data_vio);
+}
+
+/**
+ * acknowledge_write_callback() - Acknowledge a write to the requestor.
+ *
+ * This callback is registered in allocate_block() and continue_write_with_block_map_slot().
+ */
+static void acknowledge_write_callback(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct vdo *vdo = completion->vdo;
+
+ VDO_ASSERT_LOG_ONLY((!vdo_uses_bio_ack_queue(vdo) ||
+ (vdo_get_callback_thread_id() == vdo->thread_config.bio_ack_thread)),
+ "%s() called on bio ack queue", __func__);
+ VDO_ASSERT_LOG_ONLY(data_vio_has_flush_generation_lock(data_vio),
+ "write VIO to be acknowledged has a flush generation lock");
+ acknowledge_data_vio(data_vio);
+ if (data_vio->new_mapped.pbn == VDO_ZERO_BLOCK) {
+ /* This is a zero write or discard */
+ update_metadata_for_data_vio_write(data_vio, NULL);
+ return;
+ }
+
+ prepare_for_dedupe(data_vio);
+}
+
+/**
+ * allocate_block() - Attempt to allocate a block in the current allocation zone.
+ *
+ * This callback is registered in continue_write_with_block_map_slot().
+ */
+static void allocate_block(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_allocated_zone(data_vio);
+
+ if (!vdo_allocate_block_in_zone(data_vio))
+ return;
+
+ completion->error_handler = handle_data_vio_error;
+ WRITE_ONCE(data_vio->allocation_succeeded, true);
+ data_vio->new_mapped = (struct zoned_pbn) {
+ .zone = data_vio->allocation.zone,
+ .pbn = data_vio->allocation.pbn,
+ .state = VDO_MAPPING_STATE_UNCOMPRESSED,
+ };
+
+ if (data_vio->fua) {
+ prepare_for_dedupe(data_vio);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_ACKNOWLEDGE_WRITE;
+ launch_data_vio_on_bio_ack_queue(data_vio, acknowledge_write_callback);
+}
+
+/**
+ * handle_allocation_error() - Handle an error attempting to allocate a block.
+ *
+ * This error handler is registered in continue_write_with_block_map_slot().
+ */
+static void handle_allocation_error(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ if (completion->result == VDO_NO_SPACE) {
+ /* We failed to get an allocation, but we can try to dedupe. */
+ vdo_reset_completion(completion);
+ completion->error_handler = handle_data_vio_error;
+ prepare_for_dedupe(data_vio);
+ return;
+ }
+
+ /* We got a "real" error, not just a failure to allocate, so fail the request. */
+ handle_data_vio_error(completion);
+}
+
+static int assert_is_discard(struct data_vio *data_vio)
+{
+ int result = VDO_ASSERT(data_vio->is_discard,
+ "data_vio with no block map page is a discard");
+
+ return ((result == VDO_SUCCESS) ? result : VDO_READ_ONLY);
+}
+
+/**
+ * continue_data_vio_with_block_map_slot() - Read the data_vio's mapping from the block map.
+ *
+ * This callback is registered in launch_read_data_vio().
+ */
+void continue_data_vio_with_block_map_slot(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_logical_zone(data_vio);
+ if (data_vio->read) {
+ set_data_vio_logical_callback(data_vio, read_block);
+ data_vio->last_async_operation = VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_READ;
+ vdo_get_mapped_block(data_vio);
+ return;
+ }
+
+ vdo_acquire_flush_generation_lock(data_vio);
+
+ if (data_vio->tree_lock.tree_slots[0].block_map_slot.pbn == VDO_ZERO_BLOCK) {
+ /*
+ * This is a discard for a block on a block map page which has not been allocated, so
+ * there's nothing more we need to do.
+ */
+ completion->callback = complete_data_vio;
+ continue_data_vio_with_error(data_vio, assert_is_discard(data_vio));
+ return;
+ }
+
+ /*
+ * We need an allocation if this is neither a full-block discard nor a
+ * full-block zero write.
+ */
+ if (!data_vio->is_zero && (!data_vio->is_discard || data_vio->is_partial)) {
+ data_vio_allocate_data_block(data_vio, VIO_WRITE_LOCK, allocate_block,
+ handle_allocation_error);
+ return;
+ }
+
+
+ /*
+ * We don't need to write any data, so skip allocation and just update the block map and
+ * reference counts (via the journal).
+ */
+ data_vio->new_mapped.pbn = VDO_ZERO_BLOCK;
+ if (data_vio->is_zero)
+ data_vio->new_mapped.state = VDO_MAPPING_STATE_UNCOMPRESSED;
+
+ if (data_vio->remaining_discard > VDO_BLOCK_SIZE) {
+ /* This is not the final block of a discard so we can't acknowledge it yet. */
+ update_metadata_for_data_vio_write(data_vio, NULL);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_ACKNOWLEDGE_WRITE;
+ launch_data_vio_on_bio_ack_queue(data_vio, acknowledge_write_callback);
+}
diff --git a/drivers/md/dm-vdo/data-vio.h b/drivers/md/dm-vdo/data-vio.h
new file mode 100644
index 000000000000..25926b6cd98b
--- /dev/null
+++ b/drivers/md/dm-vdo/data-vio.h
@@ -0,0 +1,670 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef DATA_VIO_H
+#define DATA_VIO_H
+
+#include <linux/atomic.h>
+#include <linux/bio.h>
+#include <linux/list.h>
+
+#include "permassert.h"
+
+#include "indexer.h"
+
+#include "block-map.h"
+#include "completion.h"
+#include "constants.h"
+#include "dedupe.h"
+#include "encodings.h"
+#include "logical-zone.h"
+#include "physical-zone.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+/* Codes for describing the last asynchronous operation performed on a vio. */
+enum async_operation_number {
+ MIN_VIO_ASYNC_OPERATION_NUMBER,
+ VIO_ASYNC_OP_LAUNCH = MIN_VIO_ASYNC_OPERATION_NUMBER,
+ VIO_ASYNC_OP_ACKNOWLEDGE_WRITE,
+ VIO_ASYNC_OP_ACQUIRE_VDO_HASH_LOCK,
+ VIO_ASYNC_OP_ATTEMPT_LOGICAL_BLOCK_LOCK,
+ VIO_ASYNC_OP_LOCK_DUPLICATE_PBN,
+ VIO_ASYNC_OP_CHECK_FOR_DUPLICATION,
+ VIO_ASYNC_OP_CLEANUP,
+ VIO_ASYNC_OP_COMPRESS_DATA_VIO,
+ VIO_ASYNC_OP_FIND_BLOCK_MAP_SLOT,
+ VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_READ,
+ VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_WRITE,
+ VIO_ASYNC_OP_HASH_DATA_VIO,
+ VIO_ASYNC_OP_JOURNAL_REMAPPING,
+ VIO_ASYNC_OP_ATTEMPT_PACKING,
+ VIO_ASYNC_OP_PUT_MAPPED_BLOCK,
+ VIO_ASYNC_OP_READ_DATA_VIO,
+ VIO_ASYNC_OP_UPDATE_DEDUPE_INDEX,
+ VIO_ASYNC_OP_UPDATE_REFERENCE_COUNTS,
+ VIO_ASYNC_OP_VERIFY_DUPLICATION,
+ VIO_ASYNC_OP_WRITE_DATA_VIO,
+ MAX_VIO_ASYNC_OPERATION_NUMBER,
+} __packed;
+
+struct lbn_lock {
+ logical_block_number_t lbn;
+ bool locked;
+ struct vdo_wait_queue waiters;
+ struct logical_zone *zone;
+};
+
+/* A position in the arboreal block map at a specific level. */
+struct block_map_tree_slot {
+ page_number_t page_index;
+ struct block_map_slot block_map_slot;
+};
+
+/* Fields for using the arboreal block map. */
+struct tree_lock {
+ /* The current height at which this data_vio is operating */
+ height_t height;
+ /* The block map tree for this LBN */
+ root_count_t root_index;
+ /* Whether we hold a page lock */
+ bool locked;
+ /* The key for the lock map */
+ u64 key;
+ /* The queue of waiters for the page this vio is allocating or loading */
+ struct vdo_wait_queue waiters;
+ /* The block map tree slots for this LBN */
+ struct block_map_tree_slot tree_slots[VDO_BLOCK_MAP_TREE_HEIGHT + 1];
+};
+
+struct zoned_pbn {
+ physical_block_number_t pbn;
+ enum block_mapping_state state;
+ struct physical_zone *zone;
+};
+
+/*
+ * Where a data_vio is on the compression path; advance_compression_stage() depends on the order of
+ * this enum.
+ */
+enum data_vio_compression_stage {
+ /* A data_vio which has not yet entered the compression path */
+ DATA_VIO_PRE_COMPRESSOR,
+ /* A data_vio which is in the compressor */
+ DATA_VIO_COMPRESSING,
+ /* A data_vio which is blocked in the packer */
+ DATA_VIO_PACKING,
+ /* A data_vio which is no longer on the compression path (and never will be) */
+ DATA_VIO_POST_PACKER,
+};
+
+struct data_vio_compression_status {
+ enum data_vio_compression_stage stage;
+ bool may_not_compress;
+};
+
+struct compression_state {
+ /*
+ * The current compression status of this data_vio. This field contains a value which
+ * consists of a data_vio_compression_stage and a flag indicating whether a request has
+ * been made to cancel (or prevent) compression for this data_vio.
+ *
+ * This field should be accessed through the get_data_vio_compression_status() and
+ * set_data_vio_compression_status() methods. It should not be accessed directly.
+ */
+ atomic_t status;
+
+ /* The compressed size of this block */
+ u16 size;
+
+ /* The packer input or output bin slot which holds the enclosing data_vio */
+ slot_number_t slot;
+
+ /* The packer bin to which the enclosing data_vio has been assigned */
+ struct packer_bin *bin;
+
+ /* A link in the chain of data_vios which have been packed together */
+ struct data_vio *next_in_batch;
+
+ /* A vio which is blocked in the packer while holding a lock this vio needs. */
+ struct data_vio *lock_holder;
+
+ /*
+ * The compressed block used to hold the compressed form of this block and that of any
+ * other blocks for which this data_vio is the compressed write agent.
+ */
+ struct compressed_block *block;
+};
+
+/* Fields supporting allocation of data blocks. */
+struct allocation {
+ /* The physical zone in which to allocate a physical block */
+ struct physical_zone *zone;
+
+ /* The block allocated to this vio */
+ physical_block_number_t pbn;
+
+ /*
+ * If non-NULL, the pooled PBN lock held on the allocated block. Must be a write lock until
+ * the block has been written, after which it will become a read lock.
+ */
+ struct pbn_lock *lock;
+
+ /* The type of write lock to obtain on the allocated block */
+ enum pbn_lock_type write_lock_type;
+
+ /* The zone which was the start of the current allocation cycle */
+ zone_count_t first_allocation_zone;
+
+ /* Whether this vio should wait for a clean slab */
+ bool wait_for_clean_slab;
+};
+
+struct reference_updater {
+ enum journal_operation operation;
+ bool increment;
+ struct zoned_pbn zpbn;
+ struct pbn_lock *lock;
+ struct vdo_waiter waiter;
+};
+
+/* A vio for processing user data requests. */
+struct data_vio {
+ /* The vdo_wait_queue entry structure */
+ struct vdo_waiter waiter;
+
+ /* The logical block of this request */
+ struct lbn_lock logical;
+
+ /* The state for traversing the block map tree */
+ struct tree_lock tree_lock;
+
+ /* The current partition address of this block */
+ struct zoned_pbn mapped;
+
+ /* The hash of this vio (if not zero) */
+ struct uds_record_name record_name;
+
+ /* Used for logging and debugging */
+ enum async_operation_number last_async_operation;
+
+ /* The operations to record in the recovery and slab journals */
+ struct reference_updater increment_updater;
+ struct reference_updater decrement_updater;
+
+ u16 read : 1;
+ u16 write : 1;
+ u16 fua : 1;
+ u16 is_zero : 1;
+ u16 is_discard : 1;
+ u16 is_partial : 1;
+ u16 is_duplicate : 1;
+ u16 first_reference_operation_complete : 1;
+ u16 downgrade_allocation_lock : 1;
+
+ struct allocation allocation;
+
+ /*
+ * Whether this vio has received an allocation. This field is examined from threads not in
+ * the allocation zone.
+ */
+ bool allocation_succeeded;
+
+ /* The new partition address of this block after the vio write completes */
+ struct zoned_pbn new_mapped;
+
+ /* The hash zone responsible for the name (NULL if is_zero_block) */
+ struct hash_zone *hash_zone;
+
+ /* The lock this vio holds or shares with other vios with the same data */
+ struct hash_lock *hash_lock;
+
+ /* All data_vios sharing a hash lock are kept in a list linking these list entries */
+ struct list_head hash_lock_entry;
+
+ /* The block number in the partition of the UDS deduplication advice */
+ struct zoned_pbn duplicate;
+
+ /*
+ * The sequence number of the recovery journal block containing the increment entry for
+ * this vio.
+ */
+ sequence_number_t recovery_sequence_number;
+
+ /* The point in the recovery journal where this write last made an entry */
+ struct journal_point recovery_journal_point;
+
+ /* The list of vios in user initiated write requests */
+ struct list_head write_entry;
+
+ /* The generation number of the VDO that this vio belongs to */
+ sequence_number_t flush_generation;
+
+ /* The completion to use for fetching block map pages for this vio */
+ struct vdo_page_completion page_completion;
+
+ /* The user bio that initiated this VIO */
+ struct bio *user_bio;
+
+ /* partial block support */
+ block_size_t offset;
+
+ /*
+ * The number of bytes to be discarded. For discards, this field will always be positive,
+ * whereas for non-discards it will always be 0. Hence it can be used to determine whether
+ * a data_vio is processing a discard, even after the user_bio has been acknowledged.
+ */
+ u32 remaining_discard;
+
+ struct dedupe_context *dedupe_context;
+
+ /* Fields beyond this point will not be reset when a pooled data_vio is reused. */
+
+ struct vio vio;
+
+ /* The completion for making reference count decrements */
+ struct vdo_completion decrement_completion;
+
+ /* All of the fields necessary for the compression path */
+ struct compression_state compression;
+
+ /* A block used as output during compression or uncompression */
+ char *scratch_block;
+
+ struct list_head pool_entry;
+};
+
+static inline struct data_vio *vio_as_data_vio(struct vio *vio)
+{
+ VDO_ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio");
+ return container_of(vio, struct data_vio, vio);
+}
+
+static inline struct data_vio *as_data_vio(struct vdo_completion *completion)
+{
+ return vio_as_data_vio(as_vio(completion));
+}
+
+static inline struct data_vio *vdo_waiter_as_data_vio(struct vdo_waiter *waiter)
+{
+ if (waiter == NULL)
+ return NULL;
+
+ return container_of(waiter, struct data_vio, waiter);
+}
+
+static inline struct data_vio *data_vio_from_reference_updater(struct reference_updater *updater)
+{
+ if (updater->increment)
+ return container_of(updater, struct data_vio, increment_updater);
+
+ return container_of(updater, struct data_vio, decrement_updater);
+}
+
+static inline bool data_vio_has_flush_generation_lock(struct data_vio *data_vio)
+{
+ return !list_empty(&data_vio->write_entry);
+}
+
+static inline struct vdo *vdo_from_data_vio(struct data_vio *data_vio)
+{
+ return data_vio->vio.completion.vdo;
+}
+
+static inline bool data_vio_has_allocation(struct data_vio *data_vio)
+{
+ return (data_vio->allocation.pbn != VDO_ZERO_BLOCK);
+}
+
+struct data_vio_compression_status __must_check
+advance_data_vio_compression_stage(struct data_vio *data_vio);
+struct data_vio_compression_status __must_check
+get_data_vio_compression_status(struct data_vio *data_vio);
+bool cancel_data_vio_compression(struct data_vio *data_vio);
+
+struct data_vio_pool;
+
+int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
+ data_vio_count_t discard_limit, struct data_vio_pool **pool_ptr);
+void free_data_vio_pool(struct data_vio_pool *pool);
+void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio);
+void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion);
+void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion);
+
+void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios);
+data_vio_count_t get_data_vio_pool_active_discards(struct data_vio_pool *pool);
+data_vio_count_t get_data_vio_pool_discard_limit(struct data_vio_pool *pool);
+data_vio_count_t get_data_vio_pool_maximum_discards(struct data_vio_pool *pool);
+int __must_check set_data_vio_pool_discard_limit(struct data_vio_pool *pool,
+ data_vio_count_t limit);
+data_vio_count_t get_data_vio_pool_active_requests(struct data_vio_pool *pool);
+data_vio_count_t get_data_vio_pool_request_limit(struct data_vio_pool *pool);
+data_vio_count_t get_data_vio_pool_maximum_requests(struct data_vio_pool *pool);
+
+void complete_data_vio(struct vdo_completion *completion);
+void handle_data_vio_error(struct vdo_completion *completion);
+
+static inline void continue_data_vio(struct data_vio *data_vio)
+{
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+/**
+ * continue_data_vio_with_error() - Set an error code and then continue processing a data_vio.
+ *
+ * This will not mask older errors. This function can be called with a success code, but it is more
+ * efficient to call continue_data_vio() if the caller knows the result was a success.
+ */
+static inline void continue_data_vio_with_error(struct data_vio *data_vio, int result)
+{
+ vdo_continue_completion(&data_vio->vio.completion, result);
+}
+
+const char * __must_check get_data_vio_operation_name(struct data_vio *data_vio);
+
+static inline void assert_data_vio_in_hash_zone(struct data_vio *data_vio)
+{
+ thread_id_t expected = data_vio->hash_zone->thread_id;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+ /*
+ * It's odd to use the LBN, but converting the record name to hex is a bit clunky for an
+ * inline, and the LBN better than nothing as an identifier.
+ */
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on hash zone thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id, expected);
+}
+
+static inline void set_data_vio_hash_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ data_vio->hash_zone->thread_id);
+}
+
+/**
+ * launch_data_vio_hash_zone_callback() - Set a callback as a hash zone operation and invoke it
+ * immediately.
+ */
+static inline void launch_data_vio_hash_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_hash_zone_callback(data_vio, callback);
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+static inline void assert_data_vio_in_logical_zone(struct data_vio *data_vio)
+{
+ thread_id_t expected = data_vio->logical.zone->thread_id;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id, expected);
+}
+
+static inline void set_data_vio_logical_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ data_vio->logical.zone->thread_id);
+}
+
+/**
+ * launch_data_vio_logical_callback() - Set a callback as a logical block operation and invoke it
+ * immediately.
+ */
+static inline void launch_data_vio_logical_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_logical_callback(data_vio, callback);
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+static inline void assert_data_vio_in_allocated_zone(struct data_vio *data_vio)
+{
+ thread_id_t expected = data_vio->allocation.zone->thread_id;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "struct data_vio for allocated physical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->allocation.pbn, thread_id,
+ expected);
+}
+
+static inline void set_data_vio_allocated_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ data_vio->allocation.zone->thread_id);
+}
+
+/**
+ * launch_data_vio_allocated_zone_callback() - Set a callback as a physical block operation in a
+ * data_vio's allocated zone and queue the data_vio and
+ * invoke it immediately.
+ */
+static inline void launch_data_vio_allocated_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_allocated_zone_callback(data_vio, callback);
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+static inline void assert_data_vio_in_duplicate_zone(struct data_vio *data_vio)
+{
+ thread_id_t expected = data_vio->duplicate.zone->thread_id;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for duplicate physical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->duplicate.pbn, thread_id,
+ expected);
+}
+
+static inline void set_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ data_vio->duplicate.zone->thread_id);
+}
+
+/**
+ * launch_data_vio_duplicate_zone_callback() - Set a callback as a physical block operation in a
+ * data_vio's duplicate zone and queue the data_vio and
+ * invoke it immediately.
+ */
+static inline void launch_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_duplicate_zone_callback(data_vio, callback);
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+static inline void assert_data_vio_in_mapped_zone(struct data_vio *data_vio)
+{
+ thread_id_t expected = data_vio->mapped.zone->thread_id;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for mapped physical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->mapped.pbn, thread_id, expected);
+}
+
+static inline void set_data_vio_mapped_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ data_vio->mapped.zone->thread_id);
+}
+
+static inline void assert_data_vio_in_new_mapped_zone(struct data_vio *data_vio)
+{
+ thread_id_t expected = data_vio->new_mapped.zone->thread_id;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for new_mapped physical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->new_mapped.pbn, thread_id,
+ expected);
+}
+
+static inline void set_data_vio_new_mapped_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ data_vio->new_mapped.zone->thread_id);
+}
+
+static inline void assert_data_vio_in_journal_zone(struct data_vio *data_vio)
+{
+ thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((journal_thread == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on journal thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id,
+ journal_thread);
+}
+
+static inline void set_data_vio_journal_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
+
+ vdo_set_completion_callback(&data_vio->vio.completion, callback, journal_thread);
+}
+
+/**
+ * launch_data_vio_journal_callback() - Set a callback as a journal operation and invoke it
+ * immediately.
+ */
+static inline void launch_data_vio_journal_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_journal_callback(data_vio, callback);
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+static inline void assert_data_vio_in_packer_zone(struct data_vio *data_vio)
+{
+ thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((packer_thread == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on packer thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id,
+ packer_thread);
+}
+
+static inline void set_data_vio_packer_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
+
+ vdo_set_completion_callback(&data_vio->vio.completion, callback, packer_thread);
+}
+
+/**
+ * launch_data_vio_packer_callback() - Set a callback as a packer operation and invoke it
+ * immediately.
+ */
+static inline void launch_data_vio_packer_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_packer_callback(data_vio, callback);
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+static inline void assert_data_vio_on_cpu_thread(struct data_vio *data_vio)
+{
+ thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((cpu_thread == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on cpu thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id,
+ cpu_thread);
+}
+
+static inline void set_data_vio_cpu_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
+
+ vdo_set_completion_callback(&data_vio->vio.completion, callback, cpu_thread);
+}
+
+/**
+ * launch_data_vio_cpu_callback() - Set a callback to run on the CPU queues and invoke it
+ * immediately.
+ */
+static inline void launch_data_vio_cpu_callback(struct data_vio *data_vio,
+ vdo_action_fn callback,
+ enum vdo_completion_priority priority)
+{
+ set_data_vio_cpu_callback(data_vio, callback);
+ vdo_launch_completion_with_priority(&data_vio->vio.completion, priority);
+}
+
+static inline void set_data_vio_bio_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ get_vio_bio_zone_thread_id(&data_vio->vio));
+}
+
+/**
+ * launch_data_vio_bio_zone_callback() - Set a callback as a bio zone operation and invoke it
+ * immediately.
+ */
+static inline void launch_data_vio_bio_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_bio_zone_callback(data_vio, callback);
+ vdo_launch_completion_with_priority(&data_vio->vio.completion,
+ BIO_Q_DATA_PRIORITY);
+}
+
+/**
+ * launch_data_vio_on_bio_ack_queue() - If the vdo uses a bio_ack queue, set a callback to run on
+ * it and invoke it immediately, otherwise, just run the
+ * callback on the current thread.
+ */
+static inline void launch_data_vio_on_bio_ack_queue(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ struct vdo_completion *completion = &data_vio->vio.completion;
+ struct vdo *vdo = completion->vdo;
+
+ if (!vdo_uses_bio_ack_queue(vdo)) {
+ callback(completion);
+ return;
+ }
+
+ vdo_set_completion_callback(completion, callback,
+ vdo->thread_config.bio_ack_thread);
+ vdo_launch_completion_with_priority(completion, BIO_ACK_Q_ACK_PRIORITY);
+}
+
+void data_vio_allocate_data_block(struct data_vio *data_vio,
+ enum pbn_lock_type write_lock_type,
+ vdo_action_fn callback, vdo_action_fn error_handler);
+
+void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset);
+
+int __must_check uncompress_data_vio(struct data_vio *data_vio,
+ enum block_mapping_state mapping_state,
+ char *buffer);
+
+void update_metadata_for_data_vio_write(struct data_vio *data_vio,
+ struct pbn_lock *lock);
+void write_data_vio(struct data_vio *data_vio);
+void launch_compress_data_vio(struct data_vio *data_vio);
+void continue_data_vio_with_block_map_slot(struct vdo_completion *completion);
+
+#endif /* DATA_VIO_H */
diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
new file mode 100644
index 000000000000..117266e1b3ae
--- /dev/null
+++ b/drivers/md/dm-vdo/dedupe.c
@@ -0,0 +1,3003 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+/**
+ * DOC:
+ *
+ * Hash Locks:
+ *
+ * A hash_lock controls and coordinates writing, index access, and dedupe among groups of data_vios
+ * concurrently writing identical blocks, allowing them to deduplicate not only against advice but
+ * also against each other. This saves on index queries and allows those data_vios to concurrently
+ * deduplicate against a single block instead of being serialized through a PBN read lock. Only one
+ * index query is needed for each hash_lock, instead of one for every data_vio.
+ *
+ * Hash_locks are assigned to hash_zones by computing a modulus on the hash itself. Each hash_zone
+ * has a single dedicated queue and thread for performing all operations on the hash_locks assigned
+ * to that zone. The concurrency guarantees of this single-threaded model allow the code to omit
+ * more fine-grained locking for the hash_lock structures.
+ *
+ * A hash_lock acts like a state machine perhaps more than as a lock. Other than the starting and
+ * ending states INITIALIZING and BYPASSING, every state represents and is held for the duration of
+ * an asynchronous operation. All state transitions are performed on the thread of the hash_zone
+ * containing the lock. An asynchronous operation is almost always performed upon entering a state,
+ * and the callback from that operation triggers exiting the state and entering a new state.
+ *
+ * In all states except DEDUPING, there is a single data_vio, called the lock agent, performing the
+ * asynchronous operations on behalf of the lock. The agent will change during the lifetime of the
+ * lock if the lock is shared by more than one data_vio. data_vios waiting to deduplicate are kept
+ * on a wait queue. Viewed a different way, the agent holds the lock exclusively until the lock
+ * enters the DEDUPING state, at which point it becomes a shared lock that all the waiters (and any
+ * new data_vios that arrive) use to share a PBN lock. In state DEDUPING, there is no agent. When
+ * the last data_vio in the lock calls back in DEDUPING, it becomes the agent and the lock becomes
+ * exclusive again. New data_vios that arrive in the lock will also go on the wait queue.
+ *
+ * The existence of lock waiters is a key factor controlling which state the lock transitions to
+ * next. When the lock is new or has waiters, it will always try to reach DEDUPING, and when it
+ * doesn't, it will try to clean up and exit.
+ *
+ * Deduping requires holding a PBN lock on a block that is known to contain data identical to the
+ * data_vios in the lock, so the lock will send the agent to the duplicate zone to acquire the PBN
+ * lock (LOCKING), to the kernel I/O threads to read and verify the data (VERIFYING), or to write a
+ * new copy of the data to a full data block or a slot in a compressed block (WRITING).
+ *
+ * Cleaning up consists of updating the index when the data location is different from the initial
+ * index query (UPDATING, triggered by stale advice, compression, and rollover), releasing the PBN
+ * lock on the duplicate block (UNLOCKING), and if the agent is the last data_vio referencing the
+ * lock, releasing the hash_lock itself back to the hash zone (BYPASSING).
+ *
+ * The shortest sequence of states is for non-concurrent writes of new data:
+ * INITIALIZING -> QUERYING -> WRITING -> BYPASSING
+ * This sequence is short because no PBN read lock or index update is needed.
+ *
+ * Non-concurrent, finding valid advice looks like this (endpoints elided):
+ * -> QUERYING -> LOCKING -> VERIFYING -> DEDUPING -> UNLOCKING ->
+ * Or with stale advice (endpoints elided):
+ * -> QUERYING -> LOCKING -> VERIFYING -> UNLOCKING -> WRITING -> UPDATING ->
+ *
+ * When there are not enough available reference count increments available on a PBN for a data_vio
+ * to deduplicate, a new lock is forked and the excess waiters roll over to the new lock (which
+ * goes directly to WRITING). The new lock takes the place of the old lock in the lock map so new
+ * data_vios will be directed to it. The two locks will proceed independently, but only the new
+ * lock will have the right to update the index (unless it also forks).
+ *
+ * Since rollover happens in a lock instance, once a valid data location has been selected, it will
+ * not change. QUERYING and WRITING are only performed once per lock lifetime. All other
+ * non-endpoint states can be re-entered.
+ *
+ * The function names in this module follow a convention referencing the states and transitions in
+ * the state machine. For example, for the LOCKING state, there are start_locking() and
+ * finish_locking() functions. start_locking() is invoked by the finish function of the state (or
+ * states) that transition to LOCKING. It performs the actual lock state change and must be invoked
+ * on the hash zone thread. finish_locking() is called by (or continued via callback from) the
+ * code actually obtaining the lock. It does any bookkeeping or decision-making required and
+ * invokes the appropriate start function of the state being transitioned to after LOCKING.
+ *
+ * ----------------------------------------------------------------------
+ *
+ * Index Queries:
+ *
+ * A query to the UDS index is handled asynchronously by the index's threads. When the query is
+ * complete, a callback supplied with the query will be called from one of the those threads. Under
+ * heavy system load, the index may be slower to respond than is desirable for reasonable I/O
+ * throughput. Since deduplication of writes is not necessary for correct operation of a VDO
+ * device, it is acceptable to timeout out slow index queries and proceed to fulfill a write
+ * request without deduplicating. However, because the uds_request struct itself is supplied by the
+ * caller, we can not simply reuse a uds_request object which we have chosen to timeout. Hence,
+ * each hash_zone maintains a pool of dedupe_contexts which each contain a uds_request along with a
+ * reference to the data_vio on behalf of which they are performing a query.
+ *
+ * When a hash_lock needs to query the index, it attempts to acquire an unused dedupe_context from
+ * its hash_zone's pool. If one is available, that context is prepared, associated with the
+ * hash_lock's agent, added to the list of pending contexts, and then sent to the index. The
+ * context's state will be transitioned from DEDUPE_CONTEXT_IDLE to DEDUPE_CONTEXT_PENDING. If all
+ * goes well, the dedupe callback will be called by the index which will change the context's state
+ * to DEDUPE_CONTEXT_COMPLETE, and the associated data_vio will be enqueued to run back in the hash
+ * zone where the query results will be processed and the context will be put back in the idle
+ * state and returned to the hash_zone's available list.
+ *
+ * The first time an index query is launched from a given hash_zone, a timer is started. When the
+ * timer fires, the hash_zone's completion is enqueued to run in the hash_zone where the zone's
+ * pending list will be searched for any contexts in the pending state which have been running for
+ * too long. Those contexts are transitioned to the DEDUPE_CONTEXT_TIMED_OUT state and moved to the
+ * zone's timed_out list where they won't be examined again if there is a subsequent time out). The
+ * data_vios associated with timed out contexts are sent to continue processing their write
+ * operation without deduplicating. The timer is also restarted.
+ *
+ * When the dedupe callback is run for a context which is in the timed out state, that context is
+ * moved to the DEDUPE_CONTEXT_TIMED_OUT_COMPLETE state. No other action need be taken as the
+ * associated data_vios have already been dispatched.
+ *
+ * If a hash_lock needs a dedupe context, and the available list is empty, the timed_out list will
+ * be searched for any contexts which are timed out and complete. One of these will be used
+ * immediately, and the rest will be returned to the available list and marked idle.
+ */
+
+#include "dedupe.h"
+
+#include <linux/atomic.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/ratelimit.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+#include "indexer.h"
+
+#include "action-manager.h"
+#include "admin-state.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "int-map.h"
+#include "io-submitter.h"
+#include "packer.h"
+#include "physical-zone.h"
+#include "slab-depot.h"
+#include "statistics.h"
+#include "types.h"
+#include "vdo.h"
+#include "wait-queue.h"
+
+struct uds_attribute {
+ struct attribute attr;
+ const char *(*show_string)(struct hash_zones *hash_zones);
+};
+
+#define DEDUPE_QUERY_TIMER_IDLE 0
+#define DEDUPE_QUERY_TIMER_RUNNING 1
+#define DEDUPE_QUERY_TIMER_FIRED 2
+
+enum dedupe_context_state {
+ DEDUPE_CONTEXT_IDLE,
+ DEDUPE_CONTEXT_PENDING,
+ DEDUPE_CONTEXT_TIMED_OUT,
+ DEDUPE_CONTEXT_COMPLETE,
+ DEDUPE_CONTEXT_TIMED_OUT_COMPLETE,
+};
+
+/* Possible index states: closed, opened, or transitioning between those two. */
+enum index_state {
+ IS_CLOSED,
+ IS_CHANGING,
+ IS_OPENED,
+};
+
+static const char *CLOSED = "closed";
+static const char *CLOSING = "closing";
+static const char *ERROR = "error";
+static const char *OFFLINE = "offline";
+static const char *ONLINE = "online";
+static const char *OPENING = "opening";
+static const char *SUSPENDED = "suspended";
+static const char *UNKNOWN = "unknown";
+
+/* Version 2 uses the kernel space UDS index and is limited to 16 bytes */
+#define UDS_ADVICE_VERSION 2
+/* version byte + state byte + 64-bit little-endian PBN */
+#define UDS_ADVICE_SIZE (1 + 1 + sizeof(u64))
+
+enum hash_lock_state {
+ /* State for locks that are not in use or are being initialized. */
+ VDO_HASH_LOCK_INITIALIZING,
+
+ /* This is the sequence of states typically used on the non-dedupe path. */
+ VDO_HASH_LOCK_QUERYING,
+ VDO_HASH_LOCK_WRITING,
+ VDO_HASH_LOCK_UPDATING,
+
+ /* The remaining states are typically used on the dedupe path in this order. */
+ VDO_HASH_LOCK_LOCKING,
+ VDO_HASH_LOCK_VERIFYING,
+ VDO_HASH_LOCK_DEDUPING,
+ VDO_HASH_LOCK_UNLOCKING,
+
+ /*
+ * Terminal state for locks returning to the pool. Must be last both because it's the final
+ * state, and also because it's used to count the states.
+ */
+ VDO_HASH_LOCK_BYPASSING,
+};
+
+static const char * const LOCK_STATE_NAMES[] = {
+ [VDO_HASH_LOCK_BYPASSING] = "BYPASSING",
+ [VDO_HASH_LOCK_DEDUPING] = "DEDUPING",
+ [VDO_HASH_LOCK_INITIALIZING] = "INITIALIZING",
+ [VDO_HASH_LOCK_LOCKING] = "LOCKING",
+ [VDO_HASH_LOCK_QUERYING] = "QUERYING",
+ [VDO_HASH_LOCK_UNLOCKING] = "UNLOCKING",
+ [VDO_HASH_LOCK_UPDATING] = "UPDATING",
+ [VDO_HASH_LOCK_VERIFYING] = "VERIFYING",
+ [VDO_HASH_LOCK_WRITING] = "WRITING",
+};
+
+struct hash_lock {
+ /* The block hash covered by this lock */
+ struct uds_record_name hash;
+
+ /* When the lock is unused, this list entry allows the lock to be pooled */
+ struct list_head pool_node;
+
+ /*
+ * A list containing the data VIOs sharing this lock, all having the same record name and
+ * data block contents, linked by their hash_lock_node fields.
+ */
+ struct list_head duplicate_ring;
+
+ /* The number of data_vios sharing this lock instance */
+ data_vio_count_t reference_count;
+
+ /* The maximum value of reference_count in the lifetime of this lock */
+ data_vio_count_t max_references;
+
+ /* The current state of this lock */
+ enum hash_lock_state state;
+
+ /* True if the UDS index should be updated with new advice */
+ bool update_advice;
+
+ /* True if the advice has been verified to be a true duplicate */
+ bool verified;
+
+ /* True if the lock has already accounted for an initial verification */
+ bool verify_counted;
+
+ /* True if this lock is registered in the lock map (cleared on rollover) */
+ bool registered;
+
+ /*
+ * If verified is false, this is the location of a possible duplicate. If verified is true,
+ * it is the verified location of a true duplicate.
+ */
+ struct zoned_pbn duplicate;
+
+ /* The PBN lock on the block containing the duplicate data */
+ struct pbn_lock *duplicate_lock;
+
+ /* The data_vio designated to act on behalf of the lock */
+ struct data_vio *agent;
+
+ /*
+ * Other data_vios with data identical to the agent who are currently waiting for the agent
+ * to get the information they all need to deduplicate--either against each other, or
+ * against an existing duplicate on disk.
+ */
+ struct vdo_wait_queue waiters;
+};
+
+#define LOCK_POOL_CAPACITY MAXIMUM_VDO_USER_VIOS
+
+struct hash_zones {
+ struct action_manager *manager;
+ struct uds_parameters parameters;
+ struct uds_index_session *index_session;
+ struct ratelimit_state ratelimiter;
+ atomic64_t timeouts;
+ atomic64_t dedupe_context_busy;
+
+ /* This spinlock protects the state fields and the starting of dedupe requests. */
+ spinlock_t lock;
+
+ /* The fields in the next block are all protected by the lock */
+ struct vdo_completion completion;
+ enum index_state index_state;
+ enum index_state index_target;
+ struct admin_state state;
+ bool changing;
+ bool create_flag;
+ bool dedupe_flag;
+ bool error_flag;
+ u64 reported_timeouts;
+
+ /* The number of zones */
+ zone_count_t zone_count;
+ /* The hash zones themselves */
+ struct hash_zone zones[];
+};
+
+/* These are in milliseconds. */
+unsigned int vdo_dedupe_index_timeout_interval = 5000;
+unsigned int vdo_dedupe_index_min_timer_interval = 100;
+/* Same two variables, in jiffies for easier consumption. */
+static u64 vdo_dedupe_index_timeout_jiffies;
+static u64 vdo_dedupe_index_min_timer_jiffies;
+
+static inline struct hash_zone *as_hash_zone(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_HASH_ZONE_COMPLETION);
+ return container_of(completion, struct hash_zone, completion);
+}
+
+static inline struct hash_zones *as_hash_zones(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_HASH_ZONES_COMPLETION);
+ return container_of(completion, struct hash_zones, completion);
+}
+
+static inline void assert_in_hash_zone(struct hash_zone *zone, const char *name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
+ "%s called on hash zone thread", name);
+}
+
+static inline bool change_context_state(struct dedupe_context *context, int old, int new)
+{
+ return (atomic_cmpxchg(&context->state, old, new) == old);
+}
+
+static inline bool change_timer_state(struct hash_zone *zone, int old, int new)
+{
+ return (atomic_cmpxchg(&zone->timer_state, old, new) == old);
+}
+
+/**
+ * return_hash_lock_to_pool() - (Re)initialize a hash lock and return it to its pool.
+ * @zone: The zone from which the lock was borrowed.
+ * @lock: The lock that is no longer in use.
+ */
+static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *lock)
+{
+ memset(lock, 0, sizeof(*lock));
+ INIT_LIST_HEAD(&lock->pool_node);
+ INIT_LIST_HEAD(&lock->duplicate_ring);
+ vdo_waitq_init(&lock->waiters);
+ list_add_tail(&lock->pool_node, &zone->lock_pool);
+}
+
+/**
+ * vdo_get_duplicate_lock() - Get the PBN lock on the duplicate data location for a data_vio from
+ * the hash_lock the data_vio holds (if there is one).
+ * @data_vio: The data_vio to query.
+ *
+ * Return: The PBN lock on the data_vio's duplicate location.
+ */
+struct pbn_lock *vdo_get_duplicate_lock(struct data_vio *data_vio)
+{
+ if (data_vio->hash_lock == NULL)
+ return NULL;
+
+ return data_vio->hash_lock->duplicate_lock;
+}
+
+/**
+ * hash_lock_key() - Return hash_lock's record name as a hash code.
+ * @lock: The hash lock.
+ *
+ * Return: The key to use for the int map.
+ */
+static inline u64 hash_lock_key(struct hash_lock *lock)
+{
+ return get_unaligned_le64(&lock->hash.name);
+}
+
+/**
+ * get_hash_lock_state_name() - Get the string representation of a hash lock state.
+ * @state: The hash lock state.
+ *
+ * Return: The short string representing the state
+ */
+static const char *get_hash_lock_state_name(enum hash_lock_state state)
+{
+ /* Catch if a state has been added without updating the name array. */
+ BUILD_BUG_ON((VDO_HASH_LOCK_BYPASSING + 1) != ARRAY_SIZE(LOCK_STATE_NAMES));
+ return (state < ARRAY_SIZE(LOCK_STATE_NAMES)) ? LOCK_STATE_NAMES[state] : "INVALID";
+}
+
+/**
+ * assert_hash_lock_agent() - Assert that a data_vio is the agent of its hash lock, and that this
+ * is being called in the hash zone.
+ * @data_vio: The data_vio expected to be the lock agent.
+ * @where: A string describing the function making the assertion.
+ */
+static void assert_hash_lock_agent(struct data_vio *data_vio, const char *where)
+{
+ /* Not safe to access the agent field except from the hash zone. */
+ assert_data_vio_in_hash_zone(data_vio);
+ VDO_ASSERT_LOG_ONLY(data_vio == data_vio->hash_lock->agent,
+ "%s must be for the hash lock agent", where);
+}
+
+/**
+ * set_duplicate_lock() - Set the duplicate lock held by a hash lock. May only be called in the
+ * physical zone of the PBN lock.
+ * @hash_lock: The hash lock to update.
+ * @pbn_lock: The PBN read lock to use as the duplicate lock.
+ */
+static void set_duplicate_lock(struct hash_lock *hash_lock, struct pbn_lock *pbn_lock)
+{
+ VDO_ASSERT_LOG_ONLY((hash_lock->duplicate_lock == NULL),
+ "hash lock must not already hold a duplicate lock");
+ pbn_lock->holder_count += 1;
+ hash_lock->duplicate_lock = pbn_lock;
+}
+
+/**
+ * dequeue_lock_waiter() - Remove the first data_vio from the lock's waitq and return it.
+ * @lock: The lock containing the wait queue.
+ *
+ * Return: The first (oldest) waiter in the queue, or NULL if the queue is empty.
+ */
+static inline struct data_vio *dequeue_lock_waiter(struct hash_lock *lock)
+{
+ return vdo_waiter_as_data_vio(vdo_waitq_dequeue_waiter(&lock->waiters));
+}
+
+/**
+ * set_hash_lock() - Set, change, or clear the hash lock a data_vio is using.
+ * @data_vio: The data_vio to update.
+ * @new_lock: The hash lock the data_vio is joining.
+ *
+ * Updates the hash lock (or locks) to reflect the change in membership.
+ */
+static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock)
+{
+ struct hash_lock *old_lock = data_vio->hash_lock;
+
+ if (old_lock != NULL) {
+ VDO_ASSERT_LOG_ONLY(data_vio->hash_zone != NULL,
+ "must have a hash zone when holding a hash lock");
+ VDO_ASSERT_LOG_ONLY(!list_empty(&data_vio->hash_lock_entry),
+ "must be on a hash lock ring when holding a hash lock");
+ VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 0,
+ "hash lock reference must be counted");
+
+ if ((old_lock->state != VDO_HASH_LOCK_BYPASSING) &&
+ (old_lock->state != VDO_HASH_LOCK_UNLOCKING)) {
+ /*
+ * If the reference count goes to zero in a non-terminal state, we're most
+ * likely leaking this lock.
+ */
+ VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 1,
+ "hash locks should only become unreferenced in a terminal state, not state %s",
+ get_hash_lock_state_name(old_lock->state));
+ }
+
+ list_del_init(&data_vio->hash_lock_entry);
+ old_lock->reference_count -= 1;
+
+ data_vio->hash_lock = NULL;
+ }
+
+ if (new_lock != NULL) {
+ /*
+ * Keep all data_vios sharing the lock on a ring since they can complete in any
+ * order and we'll always need a pointer to one to compare data.
+ */
+ list_move_tail(&data_vio->hash_lock_entry, &new_lock->duplicate_ring);
+ new_lock->reference_count += 1;
+ if (new_lock->max_references < new_lock->reference_count)
+ new_lock->max_references = new_lock->reference_count;
+
+ data_vio->hash_lock = new_lock;
+ }
+}
+
+/* There are loops in the state diagram, so some forward decl's are needed. */
+static void start_deduping(struct hash_lock *lock, struct data_vio *agent,
+ bool agent_is_done);
+static void start_locking(struct hash_lock *lock, struct data_vio *agent);
+static void start_writing(struct hash_lock *lock, struct data_vio *agent);
+static void unlock_duplicate_pbn(struct vdo_completion *completion);
+static void transfer_allocation_lock(struct data_vio *data_vio);
+
+/**
+ * exit_hash_lock() - Bottleneck for data_vios that have written or deduplicated and that are no
+ * longer needed to be an agent for the hash lock.
+ * @data_vio: The data_vio to complete and send to be cleaned up.
+ */
+static void exit_hash_lock(struct data_vio *data_vio)
+{
+ /* Release the hash lock now, saving a thread transition in cleanup. */
+ vdo_release_hash_lock(data_vio);
+
+ /* Complete the data_vio and start the clean-up path to release any locks it still holds. */
+ data_vio->vio.completion.callback = complete_data_vio;
+
+ continue_data_vio(data_vio);
+}
+
+/**
+ * set_duplicate_location() - Set the location of the duplicate block for data_vio, updating the
+ * is_duplicate and duplicate fields from a zoned_pbn.
+ * @data_vio: The data_vio to modify.
+ * @source: The location of the duplicate.
+ */
+static void set_duplicate_location(struct data_vio *data_vio,
+ const struct zoned_pbn source)
+{
+ data_vio->is_duplicate = (source.pbn != VDO_ZERO_BLOCK);
+ data_vio->duplicate = source;
+}
+
+/**
+ * retire_lock_agent() - Retire the active lock agent, replacing it with the first lock waiter, and
+ * make the retired agent exit the hash lock.
+ * @lock: The hash lock to update.
+ *
+ * Return: The new lock agent (which will be NULL if there was no waiter)
+ */
+static struct data_vio *retire_lock_agent(struct hash_lock *lock)
+{
+ struct data_vio *old_agent = lock->agent;
+ struct data_vio *new_agent = dequeue_lock_waiter(lock);
+
+ lock->agent = new_agent;
+ exit_hash_lock(old_agent);
+ if (new_agent != NULL)
+ set_duplicate_location(new_agent, lock->duplicate);
+ return new_agent;
+}
+
+/**
+ * wait_on_hash_lock() - Add a data_vio to the lock's queue of waiters.
+ * @lock: The hash lock on which to wait.
+ * @data_vio: The data_vio to add to the queue.
+ */
+static void wait_on_hash_lock(struct hash_lock *lock, struct data_vio *data_vio)
+{
+ vdo_waitq_enqueue_waiter(&lock->waiters, &data_vio->waiter);
+
+ /*
+ * Make sure the agent doesn't block indefinitely in the packer since it now has at least
+ * one other data_vio waiting on it.
+ */
+ if ((lock->state != VDO_HASH_LOCK_WRITING) || !cancel_data_vio_compression(lock->agent))
+ return;
+
+ /*
+ * Even though we're waiting, we also have to send ourselves as a one-way message to the
+ * packer to ensure the agent continues executing. This is safe because
+ * cancel_vio_compression() guarantees the agent won't continue executing until this
+ * message arrives in the packer, and because the wait queue link isn't used for sending
+ * the message.
+ */
+ data_vio->compression.lock_holder = lock->agent;
+ launch_data_vio_packer_callback(data_vio, vdo_remove_lock_holder_from_packer);
+}
+
+/**
+ * abort_waiter() - waiter_callback_fn function that shunts waiters to write their blocks without
+ * optimization.
+ * @waiter: The data_vio's waiter link.
+ * @context: Not used.
+ */
+static void abort_waiter(struct vdo_waiter *waiter, void *context __always_unused)
+{
+ write_data_vio(vdo_waiter_as_data_vio(waiter));
+}
+
+/**
+ * start_bypassing() - Stop using the hash lock.
+ * @lock: The hash lock.
+ * @agent: The data_vio acting as the agent for the lock.
+ *
+ * Stops using the hash lock. This is the final transition for hash locks which did not get an
+ * error.
+ */
+static void start_bypassing(struct hash_lock *lock, struct data_vio *agent)
+{
+ lock->state = VDO_HASH_LOCK_BYPASSING;
+ exit_hash_lock(agent);
+}
+
+void vdo_clean_failed_hash_lock(struct data_vio *data_vio)
+{
+ struct hash_lock *lock = data_vio->hash_lock;
+
+ if (lock->state == VDO_HASH_LOCK_BYPASSING) {
+ exit_hash_lock(data_vio);
+ return;
+ }
+
+ if (lock->agent == NULL) {
+ lock->agent = data_vio;
+ } else if (data_vio != lock->agent) {
+ exit_hash_lock(data_vio);
+ return;
+ }
+
+ lock->state = VDO_HASH_LOCK_BYPASSING;
+
+ /* Ensure we don't attempt to update advice when cleaning up. */
+ lock->update_advice = false;
+
+ vdo_waitq_notify_all_waiters(&lock->waiters, abort_waiter, NULL);
+
+ if (lock->duplicate_lock != NULL) {
+ /* The agent must reference the duplicate zone to launch it. */
+ data_vio->duplicate = lock->duplicate;
+ launch_data_vio_duplicate_zone_callback(data_vio, unlock_duplicate_pbn);
+ return;
+ }
+
+ lock->agent = NULL;
+ data_vio->is_duplicate = false;
+ exit_hash_lock(data_vio);
+}
+
+/**
+ * finish_unlocking() - Handle the result of the agent for the lock releasing a read lock on
+ * duplicate candidate.
+ * @completion: The completion of the data_vio acting as the lock's agent.
+ *
+ * This continuation is registered in unlock_duplicate_pbn().
+ */
+static void finish_unlocking(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct hash_lock *lock = agent->hash_lock;
+
+ assert_hash_lock_agent(agent, __func__);
+
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL,
+ "must have released the duplicate lock for the hash lock");
+
+ if (!lock->verified) {
+ /*
+ * UNLOCKING -> WRITING transition: The lock we released was on an unverified
+ * block, so it must have been a lock on advice we were verifying, not on a
+ * location that was used for deduplication. Go write (or compress) the block to
+ * get a location to dedupe against.
+ */
+ start_writing(lock, agent);
+ return;
+ }
+
+ /*
+ * With the lock released, the verified duplicate block may already have changed and will
+ * need to be re-verified if a waiter arrived.
+ */
+ lock->verified = false;
+
+ if (vdo_waitq_has_waiters(&lock->waiters)) {
+ /*
+ * UNLOCKING -> LOCKING transition: A new data_vio entered the hash lock while the
+ * agent was releasing the PBN lock. The current agent exits and the waiter has to
+ * re-lock and re-verify the duplicate location.
+ *
+ * TODO: If we used the current agent to re-acquire the PBN lock we wouldn't need
+ * to re-verify.
+ */
+ agent = retire_lock_agent(lock);
+ start_locking(lock, agent);
+ return;
+ }
+
+ /*
+ * UNLOCKING -> BYPASSING transition: The agent is done with the lock and no other
+ * data_vios reference it, so remove it from the lock map and return it to the pool.
+ */
+ start_bypassing(lock, agent);
+}
+
+/**
+ * unlock_duplicate_pbn() - Release a read lock on the PBN of the block that may or may not have
+ * contained duplicate data.
+ * @completion: The completion of the data_vio acting as the lock's agent.
+ *
+ * This continuation is launched by start_unlocking(), and calls back to finish_unlocking() on the
+ * hash zone thread.
+ */
+static void unlock_duplicate_pbn(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct hash_lock *lock = agent->hash_lock;
+
+ assert_data_vio_in_duplicate_zone(agent);
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock != NULL,
+ "must have a duplicate lock to release");
+
+ vdo_release_physical_zone_pbn_lock(agent->duplicate.zone, agent->duplicate.pbn,
+ vdo_forget(lock->duplicate_lock));
+ if (lock->state == VDO_HASH_LOCK_BYPASSING) {
+ complete_data_vio(completion);
+ return;
+ }
+
+ launch_data_vio_hash_zone_callback(agent, finish_unlocking);
+}
+
+/**
+ * start_unlocking() - Release a read lock on the PBN of the block that may or may not have
+ * contained duplicate data.
+ * @lock: The hash lock.
+ * @agent: The data_vio currently acting as the agent for the lock.
+ */
+static void start_unlocking(struct hash_lock *lock, struct data_vio *agent)
+{
+ lock->state = VDO_HASH_LOCK_UNLOCKING;
+ launch_data_vio_duplicate_zone_callback(agent, unlock_duplicate_pbn);
+}
+
+static void release_context(struct dedupe_context *context)
+{
+ struct hash_zone *zone = context->zone;
+
+ WRITE_ONCE(zone->active, zone->active - 1);
+ list_move(&context->list_entry, &zone->available);
+}
+
+static void process_update_result(struct data_vio *agent)
+{
+ struct dedupe_context *context = agent->dedupe_context;
+
+ if ((context == NULL) ||
+ !change_context_state(context, DEDUPE_CONTEXT_COMPLETE, DEDUPE_CONTEXT_IDLE))
+ return;
+
+ release_context(context);
+}
+
+/**
+ * finish_updating() - Process the result of a UDS update performed by the agent for the lock.
+ * @completion: The completion of the data_vio that performed the update
+ *
+ * This continuation is registered in start_querying().
+ */
+static void finish_updating(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct hash_lock *lock = agent->hash_lock;
+
+ assert_hash_lock_agent(agent, __func__);
+
+ process_update_result(agent);
+
+ /*
+ * UDS was updated successfully, so don't update again unless the duplicate location
+ * changes due to rollover.
+ */
+ lock->update_advice = false;
+
+ if (vdo_waitq_has_waiters(&lock->waiters)) {
+ /*
+ * UPDATING -> DEDUPING transition: A new data_vio arrived during the UDS update.
+ * Send it on the verified dedupe path. The agent is done with the lock, but the
+ * lock may still need to use it to clean up after rollover.
+ */
+ start_deduping(lock, agent, true);
+ return;
+ }
+
+ if (lock->duplicate_lock != NULL) {
+ /*
+ * UPDATING -> UNLOCKING transition: No one is waiting to dedupe, but we hold a
+ * duplicate PBN lock, so go release it.
+ */
+ start_unlocking(lock, agent);
+ return;
+ }
+
+ /*
+ * UPDATING -> BYPASSING transition: No one is waiting to dedupe and there's no lock to
+ * release.
+ */
+ start_bypassing(lock, agent);
+}
+
+static void query_index(struct data_vio *data_vio, enum uds_request_type operation);
+
+/**
+ * start_updating() - Continue deduplication with the last step, updating UDS with the location of
+ * the duplicate that should be returned as advice in the future.
+ * @lock: The hash lock.
+ * @agent: The data_vio currently acting as the agent for the lock.
+ */
+static void start_updating(struct hash_lock *lock, struct data_vio *agent)
+{
+ lock->state = VDO_HASH_LOCK_UPDATING;
+
+ VDO_ASSERT_LOG_ONLY(lock->verified, "new advice should have been verified");
+ VDO_ASSERT_LOG_ONLY(lock->update_advice, "should only update advice if needed");
+
+ agent->last_async_operation = VIO_ASYNC_OP_UPDATE_DEDUPE_INDEX;
+ set_data_vio_hash_zone_callback(agent, finish_updating);
+ query_index(agent, UDS_UPDATE);
+}
+
+/**
+ * finish_deduping() - Handle a data_vio that has finished deduplicating against the block locked
+ * by the hash lock.
+ * @lock: The hash lock.
+ * @data_vio: The lock holder that has finished deduplicating.
+ *
+ * If there are other data_vios still sharing the lock, this will just release the data_vio's share
+ * of the lock and finish processing the data_vio. If this is the last data_vio holding the lock,
+ * this makes the data_vio the lock agent and uses it to advance the state of the lock so it can
+ * eventually be released.
+ */
+static void finish_deduping(struct hash_lock *lock, struct data_vio *data_vio)
+{
+ struct data_vio *agent = data_vio;
+
+ VDO_ASSERT_LOG_ONLY(lock->agent == NULL, "shouldn't have an agent in DEDUPING");
+ VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters),
+ "shouldn't have any lock waiters in DEDUPING");
+
+ /* Just release the lock reference if other data_vios are still deduping. */
+ if (lock->reference_count > 1) {
+ exit_hash_lock(data_vio);
+ return;
+ }
+
+ /* The hash lock must have an agent for all other lock states. */
+ lock->agent = agent;
+ if (lock->update_advice) {
+ /*
+ * DEDUPING -> UPDATING transition: The location of the duplicate block changed
+ * since the initial UDS query because of compression, rollover, or because the
+ * query agent didn't have an allocation. The UDS update was delayed in case there
+ * was another change in location, but with only this data_vio using the hash lock,
+ * it's time to update the advice.
+ */
+ start_updating(lock, agent);
+ } else {
+ /*
+ * DEDUPING -> UNLOCKING transition: Release the PBN read lock on the duplicate
+ * location so the hash lock itself can be released (contingent on no new data_vios
+ * arriving in the lock before the agent returns).
+ */
+ start_unlocking(lock, agent);
+ }
+}
+
+/**
+ * acquire_lock() - Get the lock for a record name.
+ * @zone: The zone responsible for the hash.
+ * @hash: The hash to lock.
+ * @replace_lock: If non-NULL, the lock already registered for the hash which should be replaced by
+ * the new lock.
+ * @lock_ptr: A pointer to receive the hash lock.
+ *
+ * Gets the lock for the hash (record name) of the data in a data_vio, or if one does not exist (or
+ * if we are explicitly rolling over), initialize a new lock for the hash and register it in the
+ * zone. This must only be called in the correct thread for the zone.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check acquire_lock(struct hash_zone *zone,
+ const struct uds_record_name *hash,
+ struct hash_lock *replace_lock,
+ struct hash_lock **lock_ptr)
+{
+ struct hash_lock *lock, *new_lock;
+ int result;
+
+ /*
+ * Borrow and prepare a lock from the pool so we don't have to do two int_map accesses
+ * in the common case of no lock contention.
+ */
+ result = VDO_ASSERT(!list_empty(&zone->lock_pool),
+ "never need to wait for a free hash lock");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ new_lock = list_entry(zone->lock_pool.prev, struct hash_lock, pool_node);
+ list_del_init(&new_lock->pool_node);
+
+ /*
+ * Fill in the hash of the new lock so we can map it, since we have to use the hash as the
+ * map key.
+ */
+ new_lock->hash = *hash;
+
+ result = vdo_int_map_put(zone->hash_lock_map, hash_lock_key(new_lock),
+ new_lock, (replace_lock != NULL), (void **) &lock);
+ if (result != VDO_SUCCESS) {
+ return_hash_lock_to_pool(zone, vdo_forget(new_lock));
+ return result;
+ }
+
+ if (replace_lock != NULL) {
+ /* On mismatch put the old lock back and return a severe error */
+ VDO_ASSERT_LOG_ONLY(lock == replace_lock,
+ "old lock must have been in the lock map");
+ /* TODO: Check earlier and bail out? */
+ VDO_ASSERT_LOG_ONLY(replace_lock->registered,
+ "old lock must have been marked registered");
+ replace_lock->registered = false;
+ }
+
+ if (lock == replace_lock) {
+ lock = new_lock;
+ lock->registered = true;
+ } else {
+ /* There's already a lock for the hash, so we don't need the borrowed lock. */
+ return_hash_lock_to_pool(zone, vdo_forget(new_lock));
+ }
+
+ *lock_ptr = lock;
+ return VDO_SUCCESS;
+}
+
+/**
+ * enter_forked_lock() - Bind the data_vio to a new hash lock.
+ *
+ * Implements waiter_callback_fn. Binds the data_vio that was waiting to a new hash lock and waits
+ * on that lock.
+ */
+static void enter_forked_lock(struct vdo_waiter *waiter, void *context)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+ struct hash_lock *new_lock = context;
+
+ set_hash_lock(data_vio, new_lock);
+ wait_on_hash_lock(new_lock, data_vio);
+}
+
+/**
+ * fork_hash_lock() - Fork a hash lock because it has run out of increments on the duplicate PBN.
+ * @old_lock: The hash lock to fork.
+ * @new_agent: The data_vio that will be the agent for the new lock.
+ *
+ * Transfers the new agent and any lock waiters to a new hash lock instance which takes the place
+ * of the old lock in the lock map. The old lock remains active, but will not update advice.
+ */
+static void fork_hash_lock(struct hash_lock *old_lock, struct data_vio *new_agent)
+{
+ struct hash_lock *new_lock;
+ int result;
+
+ result = acquire_lock(new_agent->hash_zone, &new_agent->record_name, old_lock,
+ &new_lock);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(new_agent, result);
+ return;
+ }
+
+ /*
+ * Only one of the two locks should update UDS. The old lock is out of references, so it
+ * would be poor dedupe advice in the short term.
+ */
+ old_lock->update_advice = false;
+ new_lock->update_advice = true;
+
+ set_hash_lock(new_agent, new_lock);
+ new_lock->agent = new_agent;
+
+ vdo_waitq_notify_all_waiters(&old_lock->waiters, enter_forked_lock, new_lock);
+
+ new_agent->is_duplicate = false;
+ start_writing(new_lock, new_agent);
+}
+
+/**
+ * launch_dedupe() - Reserve a reference count increment for a data_vio and launch it on the dedupe
+ * path.
+ * @lock: The hash lock.
+ * @data_vio: The data_vio to deduplicate using the hash lock.
+ * @has_claim: true if the data_vio already has claimed an increment from the duplicate lock.
+ *
+ * If no increments are available, this will roll over to a new hash lock and launch the data_vio
+ * as the writing agent for that lock.
+ */
+static void launch_dedupe(struct hash_lock *lock, struct data_vio *data_vio,
+ bool has_claim)
+{
+ if (!has_claim && !vdo_claim_pbn_lock_increment(lock->duplicate_lock)) {
+ /* Out of increments, so must roll over to a new lock. */
+ fork_hash_lock(lock, data_vio);
+ return;
+ }
+
+ /* Deduplicate against the lock's verified location. */
+ set_duplicate_location(data_vio, lock->duplicate);
+ data_vio->new_mapped = data_vio->duplicate;
+ update_metadata_for_data_vio_write(data_vio, lock->duplicate_lock);
+}
+
+/**
+ * start_deduping() - Enter the hash lock state where data_vios deduplicate in parallel against a
+ * true copy of their data on disk.
+ * @lock: The hash lock.
+ * @agent: The data_vio acting as the agent for the lock.
+ * @agent_is_done: true only if the agent has already written or deduplicated against its data.
+ *
+ * If the agent itself needs to deduplicate, an increment for it must already have been claimed
+ * from the duplicate lock, ensuring the hash lock will still have a data_vio holding it.
+ */
+static void start_deduping(struct hash_lock *lock, struct data_vio *agent,
+ bool agent_is_done)
+{
+ lock->state = VDO_HASH_LOCK_DEDUPING;
+
+ /*
+ * We don't take the downgraded allocation lock from the agent unless we actually need to
+ * deduplicate against it.
+ */
+ if (lock->duplicate_lock == NULL) {
+ VDO_ASSERT_LOG_ONLY(!vdo_is_state_compressed(agent->new_mapped.state),
+ "compression must have shared a lock");
+ VDO_ASSERT_LOG_ONLY(agent_is_done,
+ "agent must have written the new duplicate");
+ transfer_allocation_lock(agent);
+ }
+
+ VDO_ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(lock->duplicate_lock),
+ "duplicate_lock must be a PBN read lock");
+
+ /*
+ * This state is not like any of the other states. There is no designated agent--the agent
+ * transitioning to this state and all the waiters will be launched to deduplicate in
+ * parallel.
+ */
+ lock->agent = NULL;
+
+ /*
+ * Launch the agent (if not already deduplicated) and as many lock waiters as we have
+ * available increments for on the dedupe path. If we run out of increments, rollover will
+ * be triggered and the remaining waiters will be transferred to the new lock.
+ */
+ if (!agent_is_done) {
+ launch_dedupe(lock, agent, true);
+ agent = NULL;
+ }
+ while (vdo_waitq_has_waiters(&lock->waiters))
+ launch_dedupe(lock, dequeue_lock_waiter(lock), false);
+
+ if (agent_is_done) {
+ /*
+ * In the degenerate case where all the waiters rolled over to a new lock, this
+ * will continue to use the old agent to clean up this lock, and otherwise it just
+ * lets the agent exit the lock.
+ */
+ finish_deduping(lock, agent);
+ }
+}
+
+/**
+ * increment_stat() - Increment a statistic counter in a non-atomic yet thread-safe manner.
+ * @stat: The statistic field to increment.
+ */
+static inline void increment_stat(u64 *stat)
+{
+ /*
+ * Must only be mutated on the hash zone thread. Prevents any compiler shenanigans from
+ * affecting other threads reading stats.
+ */
+ WRITE_ONCE(*stat, *stat + 1);
+}
+
+/**
+ * finish_verifying() - Handle the result of the agent for the lock comparing its data to the
+ * duplicate candidate.
+ * @completion: The completion of the data_vio used to verify dedupe
+ *
+ * This continuation is registered in start_verifying().
+ */
+static void finish_verifying(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct hash_lock *lock = agent->hash_lock;
+
+ assert_hash_lock_agent(agent, __func__);
+
+ lock->verified = agent->is_duplicate;
+
+ /*
+ * Only count the result of the initial verification of the advice as valid or stale, and
+ * not any re-verifications due to PBN lock releases.
+ */
+ if (!lock->verify_counted) {
+ lock->verify_counted = true;
+ if (lock->verified)
+ increment_stat(&agent->hash_zone->statistics.dedupe_advice_valid);
+ else
+ increment_stat(&agent->hash_zone->statistics.dedupe_advice_stale);
+ }
+
+ /*
+ * Even if the block is a verified duplicate, we can't start to deduplicate unless we can
+ * claim a reference count increment for the agent.
+ */
+ if (lock->verified && !vdo_claim_pbn_lock_increment(lock->duplicate_lock)) {
+ agent->is_duplicate = false;
+ lock->verified = false;
+ }
+
+ if (lock->verified) {
+ /*
+ * VERIFYING -> DEDUPING transition: The advice is for a true duplicate, so start
+ * deduplicating against it, if references are available.
+ */
+ start_deduping(lock, agent, false);
+ } else {
+ /*
+ * VERIFYING -> UNLOCKING transition: Either the verify failed or we'd try to
+ * dedupe and roll over immediately, which would fail because it would leave the
+ * lock without an agent to release the PBN lock. In both cases, the data will have
+ * to be written or compressed, but first the advice PBN must be unlocked by the
+ * VERIFYING agent.
+ */
+ lock->update_advice = true;
+ start_unlocking(lock, agent);
+ }
+}
+
+static bool blocks_equal(char *block1, char *block2)
+{
+ int i;
+
+ for (i = 0; i < VDO_BLOCK_SIZE; i += sizeof(u64)) {
+ if (*((u64 *) &block1[i]) != *((u64 *) &block2[i]))
+ return false;
+ }
+
+ return true;
+}
+
+static void verify_callback(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+
+ agent->is_duplicate = blocks_equal(agent->vio.data, agent->scratch_block);
+ launch_data_vio_hash_zone_callback(agent, finish_verifying);
+}
+
+static void uncompress_and_verify(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ int result;
+
+ result = uncompress_data_vio(agent, agent->duplicate.state,
+ agent->scratch_block);
+ if (result == VDO_SUCCESS) {
+ verify_callback(completion);
+ return;
+ }
+
+ agent->is_duplicate = false;
+ launch_data_vio_hash_zone_callback(agent, finish_verifying);
+}
+
+static void verify_endio(struct bio *bio)
+{
+ struct data_vio *agent = vio_as_data_vio(bio->bi_private);
+ int result = blk_status_to_errno(bio->bi_status);
+
+ vdo_count_completed_bios(bio);
+ if (result != VDO_SUCCESS) {
+ agent->is_duplicate = false;
+ launch_data_vio_hash_zone_callback(agent, finish_verifying);
+ return;
+ }
+
+ if (vdo_is_state_compressed(agent->duplicate.state)) {
+ launch_data_vio_cpu_callback(agent, uncompress_and_verify,
+ CPU_Q_COMPRESS_BLOCK_PRIORITY);
+ return;
+ }
+
+ launch_data_vio_cpu_callback(agent, verify_callback,
+ CPU_Q_COMPLETE_READ_PRIORITY);
+}
+
+/**
+ * start_verifying() - Begin the data verification phase.
+ * @lock: The hash lock (must be LOCKING).
+ * @agent: The data_vio to use to read and compare candidate data.
+ *
+ * Continue the deduplication path for a hash lock by using the agent to read (and possibly
+ * decompress) the data at the candidate duplicate location, comparing it to the data in the agent
+ * to verify that the candidate is identical to all the data_vios sharing the hash. If so, it can
+ * be deduplicated against, otherwise a data_vio allocation will have to be written to and used for
+ * dedupe.
+ */
+static void start_verifying(struct hash_lock *lock, struct data_vio *agent)
+{
+ int result;
+ struct vio *vio = &agent->vio;
+ char *buffer = (vdo_is_state_compressed(agent->duplicate.state) ?
+ (char *) agent->compression.block :
+ agent->scratch_block);
+
+ lock->state = VDO_HASH_LOCK_VERIFYING;
+ VDO_ASSERT_LOG_ONLY(!lock->verified, "hash lock only verifies advice once");
+
+ agent->last_async_operation = VIO_ASYNC_OP_VERIFY_DUPLICATION;
+ result = vio_reset_bio(vio, buffer, verify_endio, REQ_OP_READ,
+ agent->duplicate.pbn);
+ if (result != VDO_SUCCESS) {
+ set_data_vio_hash_zone_callback(agent, finish_verifying);
+ continue_data_vio_with_error(agent, result);
+ return;
+ }
+
+ set_data_vio_bio_zone_callback(agent, vdo_submit_vio);
+ vdo_launch_completion_with_priority(&vio->completion, BIO_Q_VERIFY_PRIORITY);
+}
+
+/**
+ * finish_locking() - Handle the result of the agent for the lock attempting to obtain a PBN read
+ * lock on the candidate duplicate block.
+ * @completion: The completion of the data_vio that attempted to get the read lock.
+ *
+ * This continuation is registered in lock_duplicate_pbn().
+ */
+static void finish_locking(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct hash_lock *lock = agent->hash_lock;
+
+ assert_hash_lock_agent(agent, __func__);
+
+ if (!agent->is_duplicate) {
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL,
+ "must not hold duplicate_lock if not flagged as a duplicate");
+ /*
+ * LOCKING -> WRITING transition: The advice block is being modified or has no
+ * available references, so try to write or compress the data, remembering to
+ * update UDS later with the new advice.
+ */
+ increment_stat(&agent->hash_zone->statistics.dedupe_advice_stale);
+ lock->update_advice = true;
+ start_writing(lock, agent);
+ return;
+ }
+
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock != NULL,
+ "must hold duplicate_lock if flagged as a duplicate");
+
+ if (!lock->verified) {
+ /*
+ * LOCKING -> VERIFYING transition: Continue on the unverified dedupe path, reading
+ * the candidate duplicate and comparing it to the agent's data to decide whether
+ * it is a true duplicate or stale advice.
+ */
+ start_verifying(lock, agent);
+ return;
+ }
+
+ if (!vdo_claim_pbn_lock_increment(lock->duplicate_lock)) {
+ /*
+ * LOCKING -> UNLOCKING transition: The verified block was re-locked, but has no
+ * available increments left. Must first release the useless PBN read lock before
+ * rolling over to a new copy of the block.
+ */
+ agent->is_duplicate = false;
+ lock->verified = false;
+ lock->update_advice = true;
+ start_unlocking(lock, agent);
+ return;
+ }
+
+ /*
+ * LOCKING -> DEDUPING transition: Continue on the verified dedupe path, deduplicating
+ * against a location that was previously verified or written to.
+ */
+ start_deduping(lock, agent, false);
+}
+
+static bool acquire_provisional_reference(struct data_vio *agent, struct pbn_lock *lock,
+ struct slab_depot *depot)
+{
+ /* Ensure that the newly-locked block is referenced. */
+ struct vdo_slab *slab = vdo_get_slab(depot, agent->duplicate.pbn);
+ int result = vdo_acquire_provisional_reference(slab, agent->duplicate.pbn, lock);
+
+ if (result == VDO_SUCCESS)
+ return true;
+
+ vdo_log_warning_strerror(result,
+ "Error acquiring provisional reference for dedupe candidate; aborting dedupe");
+ agent->is_duplicate = false;
+ vdo_release_physical_zone_pbn_lock(agent->duplicate.zone,
+ agent->duplicate.pbn, lock);
+ continue_data_vio_with_error(agent, result);
+ return false;
+}
+
+/**
+ * lock_duplicate_pbn() - Acquire a read lock on the PBN of the block containing candidate
+ * duplicate data (compressed or uncompressed).
+ * @completion: The completion of the data_vio attempting to acquire the physical block lock on
+ * behalf of its hash lock.
+ *
+ * If the PBN is already locked for writing, the lock attempt is abandoned and is_duplicate will be
+ * cleared before calling back. This continuation is launched from start_locking(), and calls back
+ * to finish_locking() on the hash zone thread.
+ */
+static void lock_duplicate_pbn(struct vdo_completion *completion)
+{
+ unsigned int increment_limit;
+ struct pbn_lock *lock;
+ int result;
+
+ struct data_vio *agent = as_data_vio(completion);
+ struct slab_depot *depot = vdo_from_data_vio(agent)->depot;
+ struct physical_zone *zone = agent->duplicate.zone;
+
+ assert_data_vio_in_duplicate_zone(agent);
+
+ set_data_vio_hash_zone_callback(agent, finish_locking);
+
+ /*
+ * While in the zone that owns it, find out how many additional references can be made to
+ * the block if it turns out to truly be a duplicate.
+ */
+ increment_limit = vdo_get_increment_limit(depot, agent->duplicate.pbn);
+ if (increment_limit == 0) {
+ /*
+ * We could deduplicate against it later if a reference happened to be released
+ * during verification, but it's probably better to bail out now.
+ */
+ agent->is_duplicate = false;
+ continue_data_vio(agent);
+ return;
+ }
+
+ result = vdo_attempt_physical_zone_pbn_lock(zone, agent->duplicate.pbn,
+ VIO_READ_LOCK, &lock);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(agent, result);
+ return;
+ }
+
+ if (!vdo_is_pbn_read_lock(lock)) {
+ /*
+ * There are three cases of write locks: uncompressed data block writes, compressed
+ * (packed) block writes, and block map page writes. In all three cases, we give up
+ * on trying to verify the advice and don't bother to try deduplicate against the
+ * data in the write lock holder.
+ *
+ * 1) We don't ever want to try to deduplicate against a block map page.
+ *
+ * 2a) It's very unlikely we'd deduplicate against an entire packed block, both
+ * because of the chance of matching it, and because we don't record advice for it,
+ * but for the uncompressed representation of all the fragments it contains. The
+ * only way we'd be getting lock contention is if we've written the same
+ * representation coincidentally before, had it become unreferenced, and it just
+ * happened to be packed together from compressed writes when we go to verify the
+ * lucky advice. Giving up is a minuscule loss of potential dedupe.
+ *
+ * 2b) If the advice is for a slot of a compressed block, it's about to get
+ * smashed, and the write smashing it cannot contain our data--it would have to be
+ * writing on behalf of our hash lock, but that's impossible since we're the lock
+ * agent.
+ *
+ * 3a) If the lock is held by a data_vio with different data, the advice is already
+ * stale or is about to become stale.
+ *
+ * 3b) If the lock is held by a data_vio that matches us, we may as well either
+ * write it ourselves (or reference the copy we already wrote) instead of
+ * potentially having many duplicates wait for the lock holder to write, journal,
+ * hash, and finally arrive in the hash lock. We lose a chance to avoid a UDS
+ * update in the very rare case of advice for a free block that just happened to be
+ * allocated to a data_vio with the same hash. There's also a chance to save on a
+ * block write, at the cost of a block verify. Saving on a full block compare in
+ * all stale advice cases almost certainly outweighs saving a UDS update and
+ * trading a write for a read in a lucky case where advice would have been saved
+ * from becoming stale.
+ */
+ agent->is_duplicate = false;
+ continue_data_vio(agent);
+ return;
+ }
+
+ if (lock->holder_count == 0) {
+ if (!acquire_provisional_reference(agent, lock, depot))
+ return;
+
+ /*
+ * The increment limit we grabbed earlier is still valid. The lock now holds the
+ * rights to acquire all those references. Those rights will be claimed by hash
+ * locks sharing this read lock.
+ */
+ lock->increment_limit = increment_limit;
+ }
+
+ /*
+ * We've successfully acquired a read lock on behalf of the hash lock, so mark it as such.
+ */
+ set_duplicate_lock(agent->hash_lock, lock);
+
+ /*
+ * TODO: Optimization: We could directly launch the block verify, then switch to a hash
+ * thread.
+ */
+ continue_data_vio(agent);
+}
+
+/**
+ * start_locking() - Continue deduplication for a hash lock that has obtained valid advice of a
+ * potential duplicate through its agent.
+ * @lock: The hash lock (currently must be QUERYING).
+ * @agent: The data_vio bearing the dedupe advice.
+ */
+static void start_locking(struct hash_lock *lock, struct data_vio *agent)
+{
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL,
+ "must not acquire a duplicate lock when already holding it");
+
+ lock->state = VDO_HASH_LOCK_LOCKING;
+
+ /*
+ * TODO: Optimization: If we arrange to continue on the duplicate zone thread when
+ * accepting the advice, and don't explicitly change lock states (or use an agent-local
+ * state, or an atomic), we can avoid a thread transition here.
+ */
+ agent->last_async_operation = VIO_ASYNC_OP_LOCK_DUPLICATE_PBN;
+ launch_data_vio_duplicate_zone_callback(agent, lock_duplicate_pbn);
+}
+
+/**
+ * finish_writing() - Re-entry point for the lock agent after it has finished writing or
+ * compressing its copy of the data block.
+ * @lock: The hash lock, which must be in state WRITING.
+ * @agent: The data_vio that wrote its data for the lock.
+ *
+ * The agent will never need to dedupe against anything, so it's done with the lock, but the lock
+ * may not be finished with it, as a UDS update might still be needed.
+ *
+ * If there are other lock holders, the agent will hand the job to one of them and exit, leaving
+ * the lock to deduplicate against the just-written block. If there are no other lock holders, the
+ * agent either exits (and later tears down the hash lock), or it remains the agent and updates
+ * UDS.
+ */
+static void finish_writing(struct hash_lock *lock, struct data_vio *agent)
+{
+ /*
+ * Dedupe against the data block or compressed block slot the agent wrote. Since we know
+ * the write succeeded, there's no need to verify it.
+ */
+ lock->duplicate = agent->new_mapped;
+ lock->verified = true;
+
+ if (vdo_is_state_compressed(lock->duplicate.state) && lock->registered) {
+ /*
+ * Compression means the location we gave in the UDS query is not the location
+ * we're using to deduplicate.
+ */
+ lock->update_advice = true;
+ }
+
+ /* If there are any waiters, we need to start deduping them. */
+ if (vdo_waitq_has_waiters(&lock->waiters)) {
+ /*
+ * WRITING -> DEDUPING transition: an asynchronously-written block failed to
+ * compress, so the PBN lock on the written copy was already transferred. The agent
+ * is done with the lock, but the lock may still need to use it to clean up after
+ * rollover.
+ */
+ start_deduping(lock, agent, true);
+ return;
+ }
+
+ /*
+ * There are no waiters and the agent has successfully written, so take a step towards
+ * being able to release the hash lock (or just release it).
+ */
+ if (lock->update_advice) {
+ /*
+ * WRITING -> UPDATING transition: There's no waiter and a UDS update is needed, so
+ * retain the WRITING agent and use it to launch the update. The happens on
+ * compression, rollover, or the QUERYING agent not having an allocation.
+ */
+ start_updating(lock, agent);
+ } else if (lock->duplicate_lock != NULL) {
+ /*
+ * WRITING -> UNLOCKING transition: There's no waiter and no update needed, but the
+ * compressed write gave us a shared duplicate lock that we must release.
+ */
+ set_duplicate_location(agent, lock->duplicate);
+ start_unlocking(lock, agent);
+ } else {
+ /*
+ * WRITING -> BYPASSING transition: There's no waiter, no update needed, and no
+ * duplicate lock held, so both the agent and lock have no more work to do. The
+ * agent will release its allocation lock in cleanup.
+ */
+ start_bypassing(lock, agent);
+ }
+}
+
+/**
+ * select_writing_agent() - Search through the lock waiters for a data_vio that has an allocation.
+ * @lock: The hash lock to modify.
+ *
+ * If an allocation is found, swap agents, put the old agent at the head of the wait queue, then
+ * return the new agent. Otherwise, just return the current agent.
+ */
+static struct data_vio *select_writing_agent(struct hash_lock *lock)
+{
+ struct vdo_wait_queue temp_queue;
+ struct data_vio *data_vio;
+
+ vdo_waitq_init(&temp_queue);
+
+ /*
+ * Move waiters to the temp queue one-by-one until we find an allocation. Not ideal to
+ * search, but it only happens when nearly out of space.
+ */
+ while (((data_vio = dequeue_lock_waiter(lock)) != NULL) &&
+ !data_vio_has_allocation(data_vio)) {
+ /* Use the lower-level enqueue since we're just moving waiters around. */
+ vdo_waitq_enqueue_waiter(&temp_queue, &data_vio->waiter);
+ }
+
+ if (data_vio != NULL) {
+ /*
+ * Move the rest of the waiters over to the temp queue, preserving the order they
+ * arrived at the lock.
+ */
+ vdo_waitq_transfer_all_waiters(&lock->waiters, &temp_queue);
+
+ /*
+ * The current agent is being replaced and will have to wait to dedupe; make it the
+ * first waiter since it was the first to reach the lock.
+ */
+ vdo_waitq_enqueue_waiter(&lock->waiters, &lock->agent->waiter);
+ lock->agent = data_vio;
+ } else {
+ /* No one has an allocation, so keep the current agent. */
+ data_vio = lock->agent;
+ }
+
+ /* Swap all the waiters back onto the lock's queue. */
+ vdo_waitq_transfer_all_waiters(&temp_queue, &lock->waiters);
+ return data_vio;
+}
+
+/**
+ * start_writing() - Begin the non-duplicate write path.
+ * @lock: The hash lock (currently must be QUERYING).
+ * @agent: The data_vio currently acting as the agent for the lock.
+ *
+ * Begins the non-duplicate write path for a hash lock that had no advice, selecting a data_vio
+ * with an allocation as a new agent, if necessary, then resuming the agent on the data_vio write
+ * path.
+ */
+static void start_writing(struct hash_lock *lock, struct data_vio *agent)
+{
+ lock->state = VDO_HASH_LOCK_WRITING;
+
+ /*
+ * The agent might not have received an allocation and so can't be used for writing, but
+ * it's entirely possible that one of the waiters did.
+ */
+ if (!data_vio_has_allocation(agent)) {
+ agent = select_writing_agent(lock);
+ /* If none of the waiters had an allocation, the writes all have to fail. */
+ if (!data_vio_has_allocation(agent)) {
+ /*
+ * TODO: Should we keep a variant of BYPASSING that causes new arrivals to
+ * fail immediately if they don't have an allocation? It might be possible
+ * that on some path there would be non-waiters still referencing the lock,
+ * so it would remain in the map as everything is currently spelled, even
+ * if the agent and all waiters release.
+ */
+ continue_data_vio_with_error(agent, VDO_NO_SPACE);
+ return;
+ }
+ }
+
+ /*
+ * If the agent compresses, it might wait indefinitely in the packer, which would be bad if
+ * there are any other data_vios waiting.
+ */
+ if (vdo_waitq_has_waiters(&lock->waiters))
+ cancel_data_vio_compression(agent);
+
+ /*
+ * Send the agent to the compress/pack/write path in vioWrite. If it succeeds, it will
+ * return to the hash lock via vdo_continue_hash_lock() and call finish_writing().
+ */
+ launch_compress_data_vio(agent);
+}
+
+/*
+ * Decode VDO duplicate advice from the old_metadata field of a UDS request.
+ * Returns true if valid advice was found and decoded
+ */
+static bool decode_uds_advice(struct dedupe_context *context)
+{
+ const struct uds_request *request = &context->request;
+ struct data_vio *data_vio = context->requestor;
+ size_t offset = 0;
+ const struct uds_record_data *encoding = &request->old_metadata;
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+ struct zoned_pbn *advice = &data_vio->duplicate;
+ u8 version;
+ int result;
+
+ if ((request->status != UDS_SUCCESS) || !request->found)
+ return false;
+
+ version = encoding->data[offset++];
+ if (version != UDS_ADVICE_VERSION) {
+ vdo_log_error("invalid UDS advice version code %u", version);
+ return false;
+ }
+
+ advice->state = encoding->data[offset++];
+ advice->pbn = get_unaligned_le64(&encoding->data[offset]);
+ offset += sizeof(u64);
+ BUG_ON(offset != UDS_ADVICE_SIZE);
+
+ /* Don't use advice that's clearly meaningless. */
+ if ((advice->state == VDO_MAPPING_STATE_UNMAPPED) || (advice->pbn == VDO_ZERO_BLOCK)) {
+ vdo_log_debug("Invalid advice from deduplication server: pbn %llu, state %u. Giving up on deduplication of logical block %llu",
+ (unsigned long long) advice->pbn, advice->state,
+ (unsigned long long) data_vio->logical.lbn);
+ atomic64_inc(&vdo->stats.invalid_advice_pbn_count);
+ return false;
+ }
+
+ result = vdo_get_physical_zone(vdo, advice->pbn, &advice->zone);
+ if ((result != VDO_SUCCESS) || (advice->zone == NULL)) {
+ vdo_log_debug("Invalid physical block number from deduplication server: %llu, giving up on deduplication of logical block %llu",
+ (unsigned long long) advice->pbn,
+ (unsigned long long) data_vio->logical.lbn);
+ atomic64_inc(&vdo->stats.invalid_advice_pbn_count);
+ return false;
+ }
+
+ return true;
+}
+
+static void process_query_result(struct data_vio *agent)
+{
+ struct dedupe_context *context = agent->dedupe_context;
+
+ if (context == NULL)
+ return;
+
+ if (change_context_state(context, DEDUPE_CONTEXT_COMPLETE, DEDUPE_CONTEXT_IDLE)) {
+ agent->is_duplicate = decode_uds_advice(context);
+ release_context(context);
+ }
+}
+
+/**
+ * finish_querying() - Process the result of a UDS query performed by the agent for the lock.
+ * @completion: The completion of the data_vio that performed the query.
+ *
+ * This continuation is registered in start_querying().
+ */
+static void finish_querying(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct hash_lock *lock = agent->hash_lock;
+
+ assert_hash_lock_agent(agent, __func__);
+
+ process_query_result(agent);
+
+ if (agent->is_duplicate) {
+ lock->duplicate = agent->duplicate;
+ /*
+ * QUERYING -> LOCKING transition: Valid advice was obtained from UDS. Use the
+ * QUERYING agent to start the hash lock on the unverified dedupe path, verifying
+ * that the advice can be used.
+ */
+ start_locking(lock, agent);
+ } else {
+ /*
+ * The agent will be used as the duplicate if has an allocation; if it does, that
+ * location was posted to UDS, so no update will be needed.
+ */
+ lock->update_advice = !data_vio_has_allocation(agent);
+ /*
+ * QUERYING -> WRITING transition: There was no advice or the advice wasn't valid,
+ * so try to write or compress the data.
+ */
+ start_writing(lock, agent);
+ }
+}
+
+/**
+ * start_querying() - Start deduplication for a hash lock.
+ * @lock: The initialized hash lock.
+ * @data_vio: The data_vio that has just obtained the new lock.
+ *
+ * Starts deduplication for a hash lock that has finished initializing by making the data_vio that
+ * requested it the agent, entering the QUERYING state, and using the agent to perform the UDS
+ * query on behalf of the lock.
+ */
+static void start_querying(struct hash_lock *lock, struct data_vio *data_vio)
+{
+ lock->agent = data_vio;
+ lock->state = VDO_HASH_LOCK_QUERYING;
+ data_vio->last_async_operation = VIO_ASYNC_OP_CHECK_FOR_DUPLICATION;
+ set_data_vio_hash_zone_callback(data_vio, finish_querying);
+ query_index(data_vio,
+ (data_vio_has_allocation(data_vio) ? UDS_POST : UDS_QUERY));
+}
+
+/**
+ * report_bogus_lock_state() - Complain that a data_vio has entered a hash_lock that is in an
+ * unimplemented or unusable state and continue the data_vio with an
+ * error.
+ * @lock: The hash lock.
+ * @data_vio: The data_vio attempting to enter the lock.
+ */
+static void report_bogus_lock_state(struct hash_lock *lock, struct data_vio *data_vio)
+{
+ VDO_ASSERT_LOG_ONLY(false, "hash lock must not be in unimplemented state %s",
+ get_hash_lock_state_name(lock->state));
+ continue_data_vio_with_error(data_vio, VDO_LOCK_ERROR);
+}
+
+/**
+ * vdo_continue_hash_lock() - Continue the processing state after writing, compressing, or
+ * deduplicating.
+ * @data_vio: The data_vio to continue processing in its hash lock.
+ *
+ * Asynchronously continue processing a data_vio in its hash lock after it has finished writing,
+ * compressing, or deduplicating, so it can share the result with any data_vios waiting in the hash
+ * lock, or update the UDS index, or simply release its share of the lock.
+ *
+ * Context: This must only be called in the correct thread for the hash zone.
+ */
+void vdo_continue_hash_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct hash_lock *lock = data_vio->hash_lock;
+
+ switch (lock->state) {
+ case VDO_HASH_LOCK_WRITING:
+ VDO_ASSERT_LOG_ONLY(data_vio == lock->agent,
+ "only the lock agent may continue the lock");
+ finish_writing(lock, data_vio);
+ break;
+
+ case VDO_HASH_LOCK_DEDUPING:
+ finish_deduping(lock, data_vio);
+ break;
+
+ case VDO_HASH_LOCK_BYPASSING:
+ /* This data_vio has finished the write path and the lock doesn't need it. */
+ exit_hash_lock(data_vio);
+ break;
+
+ case VDO_HASH_LOCK_INITIALIZING:
+ case VDO_HASH_LOCK_QUERYING:
+ case VDO_HASH_LOCK_UPDATING:
+ case VDO_HASH_LOCK_LOCKING:
+ case VDO_HASH_LOCK_VERIFYING:
+ case VDO_HASH_LOCK_UNLOCKING:
+ /* A lock in this state should never be re-entered. */
+ report_bogus_lock_state(lock, data_vio);
+ break;
+
+ default:
+ report_bogus_lock_state(lock, data_vio);
+ }
+}
+
+/**
+ * is_hash_collision() - Check to see if a hash collision has occurred.
+ * @lock: The lock to check.
+ * @candidate: The data_vio seeking to share the lock.
+ *
+ * Check whether the data in data_vios sharing a lock is different than in a data_vio seeking to
+ * share the lock, which should only be possible in the extremely unlikely case of a hash
+ * collision.
+ *
+ * Return: true if the given data_vio must not share the lock because it doesn't have the same data
+ * as the lock holders.
+ */
+static bool is_hash_collision(struct hash_lock *lock, struct data_vio *candidate)
+{
+ struct data_vio *lock_holder;
+ struct hash_zone *zone;
+ bool collides;
+
+ if (list_empty(&lock->duplicate_ring))
+ return false;
+
+ lock_holder = list_first_entry(&lock->duplicate_ring, struct data_vio,
+ hash_lock_entry);
+ zone = candidate->hash_zone;
+ collides = !blocks_equal(lock_holder->vio.data, candidate->vio.data);
+ if (collides)
+ increment_stat(&zone->statistics.concurrent_hash_collisions);
+ else
+ increment_stat(&zone->statistics.concurrent_data_matches);
+
+ return collides;
+}
+
+static inline int assert_hash_lock_preconditions(const struct data_vio *data_vio)
+{
+ int result;
+
+ /* FIXME: BUG_ON() and/or enter read-only mode? */
+ result = VDO_ASSERT(data_vio->hash_lock == NULL,
+ "must not already hold a hash lock");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(list_empty(&data_vio->hash_lock_entry),
+ "must not already be a member of a hash lock ring");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return VDO_ASSERT(data_vio->recovery_sequence_number == 0,
+ "must not hold a recovery lock when getting a hash lock");
+}
+
+/**
+ * vdo_acquire_hash_lock() - Acquire or share a lock on a record name.
+ * @data_vio: The data_vio acquiring a lock on its record name.
+ *
+ * Acquire or share a lock on the hash (record name) of the data in a data_vio, updating the
+ * data_vio to reference the lock. This must only be called in the correct thread for the zone. In
+ * the unlikely case of a hash collision, this function will succeed, but the data_vio will not get
+ * a lock reference.
+ */
+void vdo_acquire_hash_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct hash_lock *lock;
+ int result;
+
+ assert_data_vio_in_hash_zone(data_vio);
+
+ result = assert_hash_lock_preconditions(data_vio);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ result = acquire_lock(data_vio->hash_zone, &data_vio->record_name, NULL, &lock);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ if (is_hash_collision(lock, data_vio)) {
+ /*
+ * Hash collisions are extremely unlikely, but the bogus dedupe would be a data
+ * corruption. Bypass optimization entirely. We can't compress a data_vio without
+ * a hash_lock as the compressed write depends on the hash_lock to manage the
+ * references for the compressed block.
+ */
+ write_data_vio(data_vio);
+ return;
+ }
+
+ set_hash_lock(data_vio, lock);
+ switch (lock->state) {
+ case VDO_HASH_LOCK_INITIALIZING:
+ start_querying(lock, data_vio);
+ return;
+
+ case VDO_HASH_LOCK_QUERYING:
+ case VDO_HASH_LOCK_WRITING:
+ case VDO_HASH_LOCK_UPDATING:
+ case VDO_HASH_LOCK_LOCKING:
+ case VDO_HASH_LOCK_VERIFYING:
+ case VDO_HASH_LOCK_UNLOCKING:
+ /* The lock is busy, and can't be shared yet. */
+ wait_on_hash_lock(lock, data_vio);
+ return;
+
+ case VDO_HASH_LOCK_BYPASSING:
+ /* We can't use this lock, so bypass optimization entirely. */
+ vdo_release_hash_lock(data_vio);
+ write_data_vio(data_vio);
+ return;
+
+ case VDO_HASH_LOCK_DEDUPING:
+ launch_dedupe(lock, data_vio, false);
+ return;
+
+ default:
+ /* A lock in this state should not be acquired by new VIOs. */
+ report_bogus_lock_state(lock, data_vio);
+ }
+}
+
+/**
+ * vdo_release_hash_lock() - Release a data_vio's share of a hash lock, if held, and null out the
+ * data_vio's reference to it.
+ * @data_vio: The data_vio releasing its hash lock.
+ *
+ * If the data_vio is the only one holding the lock, this also releases any resources or locks used
+ * by the hash lock (such as a PBN read lock on a block containing data with the same hash) and
+ * returns the lock to the hash zone's lock pool.
+ *
+ * Context: This must only be called in the correct thread for the hash zone.
+ */
+void vdo_release_hash_lock(struct data_vio *data_vio)
+{
+ u64 lock_key;
+ struct hash_lock *lock = data_vio->hash_lock;
+ struct hash_zone *zone = data_vio->hash_zone;
+
+ if (lock == NULL)
+ return;
+
+ set_hash_lock(data_vio, NULL);
+
+ if (lock->reference_count > 0) {
+ /* The lock is still in use by other data_vios. */
+ return;
+ }
+
+ lock_key = hash_lock_key(lock);
+ if (lock->registered) {
+ struct hash_lock *removed;
+
+ removed = vdo_int_map_remove(zone->hash_lock_map, lock_key);
+ VDO_ASSERT_LOG_ONLY(lock == removed,
+ "hash lock being released must have been mapped");
+ } else {
+ VDO_ASSERT_LOG_ONLY(lock != vdo_int_map_get(zone->hash_lock_map, lock_key),
+ "unregistered hash lock must not be in the lock map");
+ }
+
+ VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters),
+ "hash lock returned to zone must have no waiters");
+ VDO_ASSERT_LOG_ONLY((lock->duplicate_lock == NULL),
+ "hash lock returned to zone must not reference a PBN lock");
+ VDO_ASSERT_LOG_ONLY((lock->state == VDO_HASH_LOCK_BYPASSING),
+ "returned hash lock must not be in use with state %s",
+ get_hash_lock_state_name(lock->state));
+ VDO_ASSERT_LOG_ONLY(list_empty(&lock->pool_node),
+ "hash lock returned to zone must not be in a pool ring");
+ VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_ring),
+ "hash lock returned to zone must not reference DataVIOs");
+
+ return_hash_lock_to_pool(zone, lock);
+}
+
+/**
+ * transfer_allocation_lock() - Transfer a data_vio's downgraded allocation PBN lock to the
+ * data_vio's hash lock, converting it to a duplicate PBN lock.
+ * @data_vio: The data_vio holding the allocation lock to transfer.
+ */
+static void transfer_allocation_lock(struct data_vio *data_vio)
+{
+ struct allocation *allocation = &data_vio->allocation;
+ struct hash_lock *hash_lock = data_vio->hash_lock;
+
+ VDO_ASSERT_LOG_ONLY(data_vio->new_mapped.pbn == allocation->pbn,
+ "transferred lock must be for the block written");
+
+ allocation->pbn = VDO_ZERO_BLOCK;
+
+ VDO_ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(allocation->lock),
+ "must have downgraded the allocation lock before transfer");
+
+ hash_lock->duplicate = data_vio->new_mapped;
+ data_vio->duplicate = data_vio->new_mapped;
+
+ /*
+ * Since the lock is being transferred, the holder count doesn't change (and isn't even
+ * safe to examine on this thread).
+ */
+ hash_lock->duplicate_lock = vdo_forget(allocation->lock);
+}
+
+/**
+ * vdo_share_compressed_write_lock() - Make a data_vio's hash lock a shared holder of the PBN lock
+ * on the compressed block to which its data was just written.
+ * @data_vio: The data_vio which was just compressed.
+ * @pbn_lock: The PBN lock on the compressed block.
+ *
+ * If the lock is still a write lock (as it will be for the first share), it will be converted to a
+ * read lock. This also reserves a reference count increment for the data_vio.
+ */
+void vdo_share_compressed_write_lock(struct data_vio *data_vio,
+ struct pbn_lock *pbn_lock)
+{
+ bool claimed;
+
+ VDO_ASSERT_LOG_ONLY(vdo_get_duplicate_lock(data_vio) == NULL,
+ "a duplicate PBN lock should not exist when writing");
+ VDO_ASSERT_LOG_ONLY(vdo_is_state_compressed(data_vio->new_mapped.state),
+ "lock transfer must be for a compressed write");
+ assert_data_vio_in_new_mapped_zone(data_vio);
+
+ /* First sharer downgrades the lock. */
+ if (!vdo_is_pbn_read_lock(pbn_lock))
+ vdo_downgrade_pbn_write_lock(pbn_lock, true);
+
+ /*
+ * Get a share of the PBN lock, ensuring it cannot be released until after this data_vio
+ * has had a chance to journal a reference.
+ */
+ data_vio->duplicate = data_vio->new_mapped;
+ data_vio->hash_lock->duplicate = data_vio->new_mapped;
+ set_duplicate_lock(data_vio->hash_lock, pbn_lock);
+
+ /*
+ * Claim a reference for this data_vio. Necessary since another hash_lock might start
+ * deduplicating against it before our incRef.
+ */
+ claimed = vdo_claim_pbn_lock_increment(pbn_lock);
+ VDO_ASSERT_LOG_ONLY(claimed, "impossible to fail to claim an initial increment");
+}
+
+static void start_uds_queue(void *ptr)
+{
+ /*
+ * Allow the UDS dedupe worker thread to do memory allocations. It will only do allocations
+ * during the UDS calls that open or close an index, but those allocations can safely sleep
+ * while reserving a large amount of memory. We could use an allocations_allowed boolean
+ * (like the base threads do), but it would be an unnecessary embellishment.
+ */
+ struct vdo_thread *thread = vdo_get_work_queue_owner(vdo_get_current_work_queue());
+
+ vdo_register_allocating_thread(&thread->allocating_thread, NULL);
+}
+
+static void finish_uds_queue(void *ptr __always_unused)
+{
+ vdo_unregister_allocating_thread();
+}
+
+static void close_index(struct hash_zones *zones)
+ __must_hold(&zones->lock)
+{
+ int result;
+
+ /*
+ * Change the index state so that get_index_statistics() will not try to use the index
+ * session we are closing.
+ */
+ zones->index_state = IS_CHANGING;
+ /* Close the index session, while not holding the lock. */
+ spin_unlock(&zones->lock);
+ result = uds_close_index(zones->index_session);
+
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "Error closing index");
+ spin_lock(&zones->lock);
+ zones->index_state = IS_CLOSED;
+ zones->error_flag |= result != UDS_SUCCESS;
+ /* ASSERTION: We leave in IS_CLOSED state. */
+}
+
+static void open_index(struct hash_zones *zones)
+ __must_hold(&zones->lock)
+{
+ /* ASSERTION: We enter in IS_CLOSED state. */
+ int result;
+ bool create_flag = zones->create_flag;
+
+ zones->create_flag = false;
+ /*
+ * Change the index state so that the it will be reported to the outside world as
+ * "opening".
+ */
+ zones->index_state = IS_CHANGING;
+ zones->error_flag = false;
+
+ /* Open the index session, while not holding the lock */
+ spin_unlock(&zones->lock);
+ result = uds_open_index(create_flag ? UDS_CREATE : UDS_LOAD,
+ &zones->parameters, zones->index_session);
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "Error opening index");
+
+ spin_lock(&zones->lock);
+ if (!create_flag) {
+ switch (result) {
+ case -ENOENT:
+ /*
+ * Either there is no index, or there is no way we can recover the index.
+ * We will be called again and try to create a new index.
+ */
+ zones->index_state = IS_CLOSED;
+ zones->create_flag = true;
+ return;
+ default:
+ break;
+ }
+ }
+ if (result == UDS_SUCCESS) {
+ zones->index_state = IS_OPENED;
+ } else {
+ zones->index_state = IS_CLOSED;
+ zones->index_target = IS_CLOSED;
+ zones->error_flag = true;
+ spin_unlock(&zones->lock);
+ vdo_log_info("Setting UDS index target state to error");
+ spin_lock(&zones->lock);
+ }
+ /*
+ * ASSERTION: On success, we leave in IS_OPENED state.
+ * ASSERTION: On failure, we leave in IS_CLOSED state.
+ */
+}
+
+static void change_dedupe_state(struct vdo_completion *completion)
+{
+ struct hash_zones *zones = as_hash_zones(completion);
+
+ spin_lock(&zones->lock);
+
+ /* Loop until the index is in the target state and the create flag is clear. */
+ while (vdo_is_state_normal(&zones->state) &&
+ ((zones->index_state != zones->index_target) || zones->create_flag)) {
+ if (zones->index_state == IS_OPENED)
+ close_index(zones);
+ else
+ open_index(zones);
+ }
+
+ zones->changing = false;
+ spin_unlock(&zones->lock);
+}
+
+static void start_expiration_timer(struct dedupe_context *context)
+{
+ u64 start_time = context->submission_jiffies;
+ u64 end_time;
+
+ if (!change_timer_state(context->zone, DEDUPE_QUERY_TIMER_IDLE,
+ DEDUPE_QUERY_TIMER_RUNNING))
+ return;
+
+ end_time = max(start_time + vdo_dedupe_index_timeout_jiffies,
+ jiffies + vdo_dedupe_index_min_timer_jiffies);
+ mod_timer(&context->zone->timer, end_time);
+}
+
+/**
+ * report_dedupe_timeouts() - Record and eventually report that some dedupe requests reached their
+ * expiration time without getting answers, so we timed them out.
+ * @zones: the hash zones.
+ * @timeouts: the number of newly timed out requests.
+ */
+static void report_dedupe_timeouts(struct hash_zones *zones, unsigned int timeouts)
+{
+ atomic64_add(timeouts, &zones->timeouts);
+ spin_lock(&zones->lock);
+ if (__ratelimit(&zones->ratelimiter)) {
+ u64 unreported = atomic64_read(&zones->timeouts);
+
+ unreported -= zones->reported_timeouts;
+ vdo_log_debug("UDS index timeout on %llu requests",
+ (unsigned long long) unreported);
+ zones->reported_timeouts += unreported;
+ }
+ spin_unlock(&zones->lock);
+}
+
+static int initialize_index(struct vdo *vdo, struct hash_zones *zones)
+{
+ int result;
+ off_t uds_offset;
+ struct volume_geometry geometry = vdo->geometry;
+ static const struct vdo_work_queue_type uds_queue_type = {
+ .start = start_uds_queue,
+ .finish = finish_uds_queue,
+ .max_priority = UDS_Q_MAX_PRIORITY,
+ .default_priority = UDS_Q_PRIORITY,
+ };
+
+ vdo_set_dedupe_index_timeout_interval(vdo_dedupe_index_timeout_interval);
+ vdo_set_dedupe_index_min_timer_interval(vdo_dedupe_index_min_timer_interval);
+
+ /*
+ * Since we will save up the timeouts that would have been reported but were ratelimited,
+ * we don't need to report ratelimiting.
+ */
+ ratelimit_default_init(&zones->ratelimiter);
+ ratelimit_set_flags(&zones->ratelimiter, RATELIMIT_MSG_ON_RELEASE);
+ uds_offset = ((vdo_get_index_region_start(geometry) -
+ geometry.bio_offset) * VDO_BLOCK_SIZE);
+ zones->parameters = (struct uds_parameters) {
+ .bdev = vdo->device_config->owned_device->bdev,
+ .offset = uds_offset,
+ .size = (vdo_get_index_region_size(geometry) * VDO_BLOCK_SIZE),
+ .memory_size = geometry.index_config.mem,
+ .sparse = geometry.index_config.sparse,
+ .nonce = (u64) geometry.nonce,
+ };
+
+ result = uds_create_index_session(&zones->index_session);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = vdo_make_thread(vdo, vdo->thread_config.dedupe_thread, &uds_queue_type,
+ 1, NULL);
+ if (result != VDO_SUCCESS) {
+ uds_destroy_index_session(vdo_forget(zones->index_session));
+ vdo_log_error("UDS index queue initialization failed (%d)", result);
+ return result;
+ }
+
+ vdo_initialize_completion(&zones->completion, vdo, VDO_HASH_ZONES_COMPLETION);
+ vdo_set_completion_callback(&zones->completion, change_dedupe_state,
+ vdo->thread_config.dedupe_thread);
+ return VDO_SUCCESS;
+}
+
+/**
+ * finish_index_operation() - This is the UDS callback for index queries.
+ * @request: The uds request which has just completed.
+ */
+static void finish_index_operation(struct uds_request *request)
+{
+ struct dedupe_context *context = container_of(request, struct dedupe_context,
+ request);
+
+ if (change_context_state(context, DEDUPE_CONTEXT_PENDING,
+ DEDUPE_CONTEXT_COMPLETE)) {
+ /*
+ * This query has not timed out, so send its data_vio back to its hash zone to
+ * process the results.
+ */
+ continue_data_vio(context->requestor);
+ return;
+ }
+
+ /*
+ * This query has timed out, so try to mark it complete and hence eligible for reuse. Its
+ * data_vio has already moved on.
+ */
+ if (!change_context_state(context, DEDUPE_CONTEXT_TIMED_OUT,
+ DEDUPE_CONTEXT_TIMED_OUT_COMPLETE)) {
+ VDO_ASSERT_LOG_ONLY(false, "uds request was timed out (state %d)",
+ atomic_read(&context->state));
+ }
+
+ vdo_funnel_queue_put(context->zone->timed_out_complete, &context->queue_entry);
+}
+
+/**
+ * check_for_drain_complete() - Check whether this zone has drained.
+ * @zone: The zone to check.
+ */
+static void check_for_drain_complete(struct hash_zone *zone)
+{
+ data_vio_count_t recycled = 0;
+
+ if (!vdo_is_state_draining(&zone->state))
+ return;
+
+ if ((atomic_read(&zone->timer_state) == DEDUPE_QUERY_TIMER_IDLE) ||
+ change_timer_state(zone, DEDUPE_QUERY_TIMER_RUNNING,
+ DEDUPE_QUERY_TIMER_IDLE)) {
+ del_timer_sync(&zone->timer);
+ } else {
+ /*
+ * There is an in flight time-out, which must get processed before we can continue.
+ */
+ return;
+ }
+
+ for (;;) {
+ struct dedupe_context *context;
+ struct funnel_queue_entry *entry;
+
+ entry = vdo_funnel_queue_poll(zone->timed_out_complete);
+ if (entry == NULL)
+ break;
+
+ context = container_of(entry, struct dedupe_context, queue_entry);
+ atomic_set(&context->state, DEDUPE_CONTEXT_IDLE);
+ list_add(&context->list_entry, &zone->available);
+ recycled++;
+ }
+
+ if (recycled > 0)
+ WRITE_ONCE(zone->active, zone->active - recycled);
+ VDO_ASSERT_LOG_ONLY(READ_ONCE(zone->active) == 0, "all contexts inactive");
+ vdo_finish_draining(&zone->state);
+}
+
+static void timeout_index_operations_callback(struct vdo_completion *completion)
+{
+ struct dedupe_context *context, *tmp;
+ struct hash_zone *zone = as_hash_zone(completion);
+ u64 timeout_jiffies = msecs_to_jiffies(vdo_dedupe_index_timeout_interval);
+ unsigned long cutoff = jiffies - timeout_jiffies;
+ unsigned int timed_out = 0;
+
+ atomic_set(&zone->timer_state, DEDUPE_QUERY_TIMER_IDLE);
+ list_for_each_entry_safe(context, tmp, &zone->pending, list_entry) {
+ if (cutoff <= context->submission_jiffies) {
+ /*
+ * We have reached the oldest query which has not timed out yet, so restart
+ * the timer.
+ */
+ start_expiration_timer(context);
+ break;
+ }
+
+ if (!change_context_state(context, DEDUPE_CONTEXT_PENDING,
+ DEDUPE_CONTEXT_TIMED_OUT)) {
+ /*
+ * This context completed between the time the timeout fired, and now. We
+ * can treat it as a successful query, its requestor is already enqueued
+ * to process it.
+ */
+ continue;
+ }
+
+ /*
+ * Remove this context from the pending list so we won't look at it again on a
+ * subsequent timeout. Once the index completes it, it will be reused. Meanwhile,
+ * send its requestor on its way.
+ */
+ list_del_init(&context->list_entry);
+ continue_data_vio(context->requestor);
+ timed_out++;
+ }
+
+ if (timed_out > 0)
+ report_dedupe_timeouts(completion->vdo->hash_zones, timed_out);
+
+ check_for_drain_complete(zone);
+}
+
+static void timeout_index_operations(struct timer_list *t)
+{
+ struct hash_zone *zone = from_timer(zone, t, timer);
+
+ if (change_timer_state(zone, DEDUPE_QUERY_TIMER_RUNNING,
+ DEDUPE_QUERY_TIMER_FIRED))
+ vdo_launch_completion(&zone->completion);
+}
+
+static int __must_check initialize_zone(struct vdo *vdo, struct hash_zones *zones,
+ zone_count_t zone_number)
+{
+ int result;
+ data_vio_count_t i;
+ struct hash_zone *zone = &zones->zones[zone_number];
+
+ result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->hash_lock_map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ zone->zone_number = zone_number;
+ zone->thread_id = vdo->thread_config.hash_zone_threads[zone_number];
+ vdo_initialize_completion(&zone->completion, vdo, VDO_HASH_ZONE_COMPLETION);
+ vdo_set_completion_callback(&zone->completion, timeout_index_operations_callback,
+ zone->thread_id);
+ INIT_LIST_HEAD(&zone->lock_pool);
+ result = vdo_allocate(LOCK_POOL_CAPACITY, struct hash_lock, "hash_lock array",
+ &zone->lock_array);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (i = 0; i < LOCK_POOL_CAPACITY; i++)
+ return_hash_lock_to_pool(zone, &zone->lock_array[i]);
+
+ INIT_LIST_HEAD(&zone->available);
+ INIT_LIST_HEAD(&zone->pending);
+ result = vdo_make_funnel_queue(&zone->timed_out_complete);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ timer_setup(&zone->timer, timeout_index_operations, 0);
+
+ for (i = 0; i < MAXIMUM_VDO_USER_VIOS; i++) {
+ struct dedupe_context *context = &zone->contexts[i];
+
+ context->zone = zone;
+ context->request.callback = finish_index_operation;
+ context->request.session = zones->index_session;
+ list_add(&context->list_entry, &zone->available);
+ }
+
+ return vdo_make_default_thread(vdo, zone->thread_id);
+}
+
+/** get_thread_id_for_zone() - Implements vdo_zone_thread_getter_fn. */
+static thread_id_t get_thread_id_for_zone(void *context, zone_count_t zone_number)
+{
+ struct hash_zones *zones = context;
+
+ return zones->zones[zone_number].thread_id;
+}
+
+/**
+ * vdo_make_hash_zones() - Create the hash zones.
+ *
+ * @vdo: The vdo to which the zone will belong.
+ * @zones_ptr: A pointer to hold the zones.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_make_hash_zones(struct vdo *vdo, struct hash_zones **zones_ptr)
+{
+ int result;
+ struct hash_zones *zones;
+ zone_count_t z;
+ zone_count_t zone_count = vdo->thread_config.hash_zone_count;
+
+ if (zone_count == 0)
+ return VDO_SUCCESS;
+
+ result = vdo_allocate_extended(struct hash_zones, zone_count, struct hash_zone,
+ __func__, &zones);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = initialize_index(vdo, zones);
+ if (result != VDO_SUCCESS) {
+ vdo_free(zones);
+ return result;
+ }
+
+ vdo_set_admin_state_code(&zones->state, VDO_ADMIN_STATE_NEW);
+
+ zones->zone_count = zone_count;
+ for (z = 0; z < zone_count; z++) {
+ result = initialize_zone(vdo, zones, z);
+ if (result != VDO_SUCCESS) {
+ vdo_free_hash_zones(zones);
+ return result;
+ }
+ }
+
+ result = vdo_make_action_manager(zones->zone_count, get_thread_id_for_zone,
+ vdo->thread_config.admin_thread, zones, NULL,
+ vdo, &zones->manager);
+ if (result != VDO_SUCCESS) {
+ vdo_free_hash_zones(zones);
+ return result;
+ }
+
+ *zones_ptr = zones;
+ return VDO_SUCCESS;
+}
+
+void vdo_finish_dedupe_index(struct hash_zones *zones)
+{
+ if (zones == NULL)
+ return;
+
+ uds_destroy_index_session(vdo_forget(zones->index_session));
+}
+
+/**
+ * vdo_free_hash_zones() - Free the hash zones.
+ * @zones: The zone to free.
+ */
+void vdo_free_hash_zones(struct hash_zones *zones)
+{
+ zone_count_t i;
+
+ if (zones == NULL)
+ return;
+
+ vdo_free(vdo_forget(zones->manager));
+
+ for (i = 0; i < zones->zone_count; i++) {
+ struct hash_zone *zone = &zones->zones[i];
+
+ vdo_free_funnel_queue(vdo_forget(zone->timed_out_complete));
+ vdo_int_map_free(vdo_forget(zone->hash_lock_map));
+ vdo_free(vdo_forget(zone->lock_array));
+ }
+
+ if (zones->index_session != NULL)
+ vdo_finish_dedupe_index(zones);
+
+ ratelimit_state_exit(&zones->ratelimiter);
+ vdo_free(zones);
+}
+
+static void initiate_suspend_index(struct admin_state *state)
+{
+ struct hash_zones *zones = container_of(state, struct hash_zones, state);
+ enum index_state index_state;
+
+ spin_lock(&zones->lock);
+ index_state = zones->index_state;
+ spin_unlock(&zones->lock);
+
+ if (index_state != IS_CLOSED) {
+ bool save = vdo_is_state_saving(&zones->state);
+ int result;
+
+ result = uds_suspend_index_session(zones->index_session, save);
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "Error suspending dedupe index");
+ }
+
+ vdo_finish_draining(state);
+}
+
+/**
+ * suspend_index() - Suspend the UDS index prior to draining hash zones.
+ *
+ * Implements vdo_action_preamble_fn
+ */
+static void suspend_index(void *context, struct vdo_completion *completion)
+{
+ struct hash_zones *zones = context;
+
+ vdo_start_draining(&zones->state,
+ vdo_get_current_manager_operation(zones->manager), completion,
+ initiate_suspend_index);
+}
+
+/**
+ * initiate_drain() - Initiate a drain.
+ *
+ * Implements vdo_admin_initiator_fn.
+ */
+static void initiate_drain(struct admin_state *state)
+{
+ check_for_drain_complete(container_of(state, struct hash_zone, state));
+}
+
+/**
+ * drain_hash_zone() - Drain a hash zone.
+ *
+ * Implements vdo_zone_action_fn.
+ */
+static void drain_hash_zone(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct hash_zones *zones = context;
+
+ vdo_start_draining(&zones->zones[zone_number].state,
+ vdo_get_current_manager_operation(zones->manager), parent,
+ initiate_drain);
+}
+
+/** vdo_drain_hash_zones() - Drain all hash zones. */
+void vdo_drain_hash_zones(struct hash_zones *zones, struct vdo_completion *parent)
+{
+ vdo_schedule_operation(zones->manager, parent->vdo->suspend_type, suspend_index,
+ drain_hash_zone, NULL, parent);
+}
+
+static void launch_dedupe_state_change(struct hash_zones *zones)
+ __must_hold(&zones->lock)
+{
+ /* ASSERTION: We enter with the lock held. */
+ if (zones->changing || !vdo_is_state_normal(&zones->state))
+ /* Either a change is already in progress, or changes are not allowed. */
+ return;
+
+ if (zones->create_flag || (zones->index_state != zones->index_target)) {
+ zones->changing = true;
+ vdo_launch_completion(&zones->completion);
+ return;
+ }
+
+ /* ASSERTION: We exit with the lock held. */
+}
+
+/**
+ * resume_index() - Resume the UDS index prior to resuming hash zones.
+ *
+ * Implements vdo_action_preamble_fn
+ */
+static void resume_index(void *context, struct vdo_completion *parent)
+{
+ struct hash_zones *zones = context;
+ struct device_config *config = parent->vdo->device_config;
+ int result;
+
+ zones->parameters.bdev = config->owned_device->bdev;
+ result = uds_resume_index_session(zones->index_session, zones->parameters.bdev);
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "Error resuming dedupe index");
+
+ spin_lock(&zones->lock);
+ vdo_resume_if_quiescent(&zones->state);
+
+ if (config->deduplication) {
+ zones->index_target = IS_OPENED;
+ WRITE_ONCE(zones->dedupe_flag, true);
+ } else {
+ zones->index_target = IS_CLOSED;
+ }
+
+ launch_dedupe_state_change(zones);
+ spin_unlock(&zones->lock);
+
+ vdo_finish_completion(parent);
+}
+
+/**
+ * resume_hash_zone() - Resume a hash zone.
+ *
+ * Implements vdo_zone_action_fn.
+ */
+static void resume_hash_zone(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct hash_zone *zone = &(((struct hash_zones *) context)->zones[zone_number]);
+
+ vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state));
+}
+
+/**
+ * vdo_resume_hash_zones() - Resume a set of hash zones.
+ * @zones: The hash zones to resume.
+ * @parent: The object to notify when the zones have resumed.
+ */
+void vdo_resume_hash_zones(struct hash_zones *zones, struct vdo_completion *parent)
+{
+ if (vdo_is_read_only(parent->vdo)) {
+ vdo_launch_completion(parent);
+ return;
+ }
+
+ vdo_schedule_operation(zones->manager, VDO_ADMIN_STATE_RESUMING, resume_index,
+ resume_hash_zone, NULL, parent);
+}
+
+/**
+ * get_hash_zone_statistics() - Add the statistics for this hash zone to the tally for all zones.
+ * @zone: The hash zone to query.
+ * @tally: The tally
+ */
+static void get_hash_zone_statistics(const struct hash_zone *zone,
+ struct hash_lock_statistics *tally)
+{
+ const struct hash_lock_statistics *stats = &zone->statistics;
+
+ tally->dedupe_advice_valid += READ_ONCE(stats->dedupe_advice_valid);
+ tally->dedupe_advice_stale += READ_ONCE(stats->dedupe_advice_stale);
+ tally->concurrent_data_matches += READ_ONCE(stats->concurrent_data_matches);
+ tally->concurrent_hash_collisions += READ_ONCE(stats->concurrent_hash_collisions);
+ tally->curr_dedupe_queries += READ_ONCE(zone->active);
+}
+
+static void get_index_statistics(struct hash_zones *zones,
+ struct index_statistics *stats)
+{
+ enum index_state state;
+ struct uds_index_stats index_stats;
+ int result;
+
+ spin_lock(&zones->lock);
+ state = zones->index_state;
+ spin_unlock(&zones->lock);
+
+ if (state != IS_OPENED)
+ return;
+
+ result = uds_get_index_session_stats(zones->index_session, &index_stats);
+ if (result != UDS_SUCCESS) {
+ vdo_log_error_strerror(result, "Error reading index stats");
+ return;
+ }
+
+ stats->entries_indexed = index_stats.entries_indexed;
+ stats->posts_found = index_stats.posts_found;
+ stats->posts_not_found = index_stats.posts_not_found;
+ stats->queries_found = index_stats.queries_found;
+ stats->queries_not_found = index_stats.queries_not_found;
+ stats->updates_found = index_stats.updates_found;
+ stats->updates_not_found = index_stats.updates_not_found;
+ stats->entries_discarded = index_stats.entries_discarded;
+}
+
+/**
+ * vdo_get_dedupe_statistics() - Tally the statistics from all the hash zones and the UDS index.
+ * @hash_zones: The hash zones to query
+ *
+ * Return: The sum of the hash lock statistics from all hash zones plus the statistics from the UDS
+ * index
+ */
+void vdo_get_dedupe_statistics(struct hash_zones *zones, struct vdo_statistics *stats)
+
+{
+ zone_count_t zone;
+
+ for (zone = 0; zone < zones->zone_count; zone++)
+ get_hash_zone_statistics(&zones->zones[zone], &stats->hash_lock);
+
+ get_index_statistics(zones, &stats->index);
+
+ /*
+ * zones->timeouts gives the number of timeouts, and dedupe_context_busy gives the number
+ * of queries not made because of earlier timeouts.
+ */
+ stats->dedupe_advice_timeouts =
+ (atomic64_read(&zones->timeouts) + atomic64_read(&zones->dedupe_context_busy));
+}
+
+/**
+ * vdo_select_hash_zone() - Select the hash zone responsible for locking a given record name.
+ * @zones: The hash_zones from which to select.
+ * @name: The record name.
+ *
+ * Return: The hash zone responsible for the record name.
+ */
+struct hash_zone *vdo_select_hash_zone(struct hash_zones *zones,
+ const struct uds_record_name *name)
+{
+ /*
+ * Use a fragment of the record name as a hash code. Eight bits of hash should suffice
+ * since the number of hash zones is small.
+ * TODO: Verify that the first byte is independent enough.
+ */
+ u32 hash = name->name[0];
+
+ /*
+ * Scale the 8-bit hash fragment to a zone index by treating it as a binary fraction and
+ * multiplying that by the zone count. If the hash is uniformly distributed over [0 ..
+ * 2^8-1], then (hash * count / 2^8) should be uniformly distributed over [0 .. count-1].
+ * The multiply and shift is much faster than a divide (modulus) on X86 CPUs.
+ */
+ hash = (hash * zones->zone_count) >> 8;
+ return &zones->zones[hash];
+}
+
+/**
+ * dump_hash_lock() - Dump a compact description of hash_lock to the log if the lock is not on the
+ * free list.
+ * @lock: The hash lock to dump.
+ */
+static void dump_hash_lock(const struct hash_lock *lock)
+{
+ const char *state;
+
+ if (!list_empty(&lock->pool_node)) {
+ /* This lock is on the free list. */
+ return;
+ }
+
+ /*
+ * Necessarily cryptic since we can log a lot of these. First three chars of state is
+ * unambiguous. 'U' indicates a lock not registered in the map.
+ */
+ state = get_hash_lock_state_name(lock->state);
+ vdo_log_info(" hl %px: %3.3s %c%llu/%u rc=%u wc=%zu agt=%px",
+ lock, state, (lock->registered ? 'D' : 'U'),
+ (unsigned long long) lock->duplicate.pbn,
+ lock->duplicate.state, lock->reference_count,
+ vdo_waitq_num_waiters(&lock->waiters), lock->agent);
+}
+
+static const char *index_state_to_string(struct hash_zones *zones,
+ enum index_state state)
+{
+ if (!vdo_is_state_normal(&zones->state))
+ return SUSPENDED;
+
+ switch (state) {
+ case IS_CLOSED:
+ return zones->error_flag ? ERROR : CLOSED;
+ case IS_CHANGING:
+ return zones->index_target == IS_OPENED ? OPENING : CLOSING;
+ case IS_OPENED:
+ return READ_ONCE(zones->dedupe_flag) ? ONLINE : OFFLINE;
+ default:
+ return UNKNOWN;
+ }
+}
+
+/**
+ * dump_hash_zone() - Dump information about a hash zone to the log for debugging.
+ * @zone: The zone to dump.
+ */
+static void dump_hash_zone(const struct hash_zone *zone)
+{
+ data_vio_count_t i;
+
+ if (zone->hash_lock_map == NULL) {
+ vdo_log_info("struct hash_zone %u: NULL map", zone->zone_number);
+ return;
+ }
+
+ vdo_log_info("struct hash_zone %u: mapSize=%zu",
+ zone->zone_number, vdo_int_map_size(zone->hash_lock_map));
+ for (i = 0; i < LOCK_POOL_CAPACITY; i++)
+ dump_hash_lock(&zone->lock_array[i]);
+}
+
+/**
+ * vdo_dump_hash_zones() - Dump information about the hash zones to the log for debugging.
+ * @zones: The zones to dump.
+ */
+void vdo_dump_hash_zones(struct hash_zones *zones)
+{
+ const char *state, *target;
+ zone_count_t zone;
+
+ spin_lock(&zones->lock);
+ state = index_state_to_string(zones, zones->index_state);
+ target = (zones->changing ? index_state_to_string(zones, zones->index_target) : NULL);
+ spin_unlock(&zones->lock);
+
+ vdo_log_info("UDS index: state: %s", state);
+ if (target != NULL)
+ vdo_log_info("UDS index: changing to state: %s", target);
+
+ for (zone = 0; zone < zones->zone_count; zone++)
+ dump_hash_zone(&zones->zones[zone]);
+}
+
+void vdo_set_dedupe_index_timeout_interval(unsigned int value)
+{
+ u64 alb_jiffies;
+
+ /* Arbitrary maximum value is two minutes */
+ if (value > 120000)
+ value = 120000;
+ /* Arbitrary minimum value is 2 jiffies */
+ alb_jiffies = msecs_to_jiffies(value);
+
+ if (alb_jiffies < 2) {
+ alb_jiffies = 2;
+ value = jiffies_to_msecs(alb_jiffies);
+ }
+ vdo_dedupe_index_timeout_interval = value;
+ vdo_dedupe_index_timeout_jiffies = alb_jiffies;
+}
+
+void vdo_set_dedupe_index_min_timer_interval(unsigned int value)
+{
+ u64 min_jiffies;
+
+ /* Arbitrary maximum value is one second */
+ if (value > 1000)
+ value = 1000;
+
+ /* Arbitrary minimum value is 2 jiffies */
+ min_jiffies = msecs_to_jiffies(value);
+
+ if (min_jiffies < 2) {
+ min_jiffies = 2;
+ value = jiffies_to_msecs(min_jiffies);
+ }
+
+ vdo_dedupe_index_min_timer_interval = value;
+ vdo_dedupe_index_min_timer_jiffies = min_jiffies;
+}
+
+/**
+ * acquire_context() - Acquire a dedupe context from a hash_zone if any are available.
+ * @zone: the hash zone
+ *
+ * Return: A dedupe_context or NULL if none are available
+ */
+static struct dedupe_context * __must_check acquire_context(struct hash_zone *zone)
+{
+ struct dedupe_context *context;
+ struct funnel_queue_entry *entry;
+
+ assert_in_hash_zone(zone, __func__);
+
+ if (!list_empty(&zone->available)) {
+ WRITE_ONCE(zone->active, zone->active + 1);
+ context = list_first_entry(&zone->available, struct dedupe_context,
+ list_entry);
+ list_del_init(&context->list_entry);
+ return context;
+ }
+
+ entry = vdo_funnel_queue_poll(zone->timed_out_complete);
+ return ((entry == NULL) ?
+ NULL : container_of(entry, struct dedupe_context, queue_entry));
+}
+
+static void prepare_uds_request(struct uds_request *request, struct data_vio *data_vio,
+ enum uds_request_type operation)
+{
+ request->record_name = data_vio->record_name;
+ request->type = operation;
+ if ((operation == UDS_POST) || (operation == UDS_UPDATE)) {
+ size_t offset = 0;
+ struct uds_record_data *encoding = &request->new_metadata;
+
+ encoding->data[offset++] = UDS_ADVICE_VERSION;
+ encoding->data[offset++] = data_vio->new_mapped.state;
+ put_unaligned_le64(data_vio->new_mapped.pbn, &encoding->data[offset]);
+ offset += sizeof(u64);
+ BUG_ON(offset != UDS_ADVICE_SIZE);
+ }
+}
+
+/*
+ * The index operation will inquire about data_vio.record_name, providing (if the operation is
+ * appropriate) advice from the data_vio's new_mapped fields. The advice found in the index (or
+ * NULL if none) will be returned via receive_data_vio_dedupe_advice(). dedupe_context.status is
+ * set to the return status code of any asynchronous index processing.
+ */
+static void query_index(struct data_vio *data_vio, enum uds_request_type operation)
+{
+ int result;
+ struct dedupe_context *context;
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+ struct hash_zone *zone = data_vio->hash_zone;
+
+ assert_data_vio_in_hash_zone(data_vio);
+
+ if (!READ_ONCE(vdo->hash_zones->dedupe_flag)) {
+ continue_data_vio(data_vio);
+ return;
+ }
+
+ context = acquire_context(zone);
+ if (context == NULL) {
+ atomic64_inc(&vdo->hash_zones->dedupe_context_busy);
+ continue_data_vio(data_vio);
+ return;
+ }
+
+ data_vio->dedupe_context = context;
+ context->requestor = data_vio;
+ context->submission_jiffies = jiffies;
+ prepare_uds_request(&context->request, data_vio, operation);
+ atomic_set(&context->state, DEDUPE_CONTEXT_PENDING);
+ list_add_tail(&context->list_entry, &zone->pending);
+ start_expiration_timer(context);
+ result = uds_launch_request(&context->request);
+ if (result != UDS_SUCCESS) {
+ context->request.status = result;
+ finish_index_operation(&context->request);
+ }
+}
+
+static void set_target_state(struct hash_zones *zones, enum index_state target,
+ bool change_dedupe, bool dedupe, bool set_create)
+{
+ const char *old_state, *new_state;
+
+ spin_lock(&zones->lock);
+ old_state = index_state_to_string(zones, zones->index_target);
+ if (change_dedupe)
+ WRITE_ONCE(zones->dedupe_flag, dedupe);
+
+ if (set_create)
+ zones->create_flag = true;
+
+ zones->index_target = target;
+ launch_dedupe_state_change(zones);
+ new_state = index_state_to_string(zones, zones->index_target);
+ spin_unlock(&zones->lock);
+
+ if (old_state != new_state)
+ vdo_log_info("Setting UDS index target state to %s", new_state);
+}
+
+const char *vdo_get_dedupe_index_state_name(struct hash_zones *zones)
+{
+ const char *state;
+
+ spin_lock(&zones->lock);
+ state = index_state_to_string(zones, zones->index_state);
+ spin_unlock(&zones->lock);
+
+ return state;
+}
+
+/* Handle a dmsetup message relevant to the index. */
+int vdo_message_dedupe_index(struct hash_zones *zones, const char *name)
+{
+ if (strcasecmp(name, "index-close") == 0) {
+ set_target_state(zones, IS_CLOSED, false, false, false);
+ return 0;
+ } else if (strcasecmp(name, "index-create") == 0) {
+ set_target_state(zones, IS_OPENED, false, false, true);
+ return 0;
+ } else if (strcasecmp(name, "index-disable") == 0) {
+ set_target_state(zones, IS_OPENED, true, false, false);
+ return 0;
+ } else if (strcasecmp(name, "index-enable") == 0) {
+ set_target_state(zones, IS_OPENED, true, true, false);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+void vdo_set_dedupe_state_normal(struct hash_zones *zones)
+{
+ vdo_set_admin_state_code(&zones->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+}
+
+/* If create_flag, create a new index without first attempting to load an existing index. */
+void vdo_start_dedupe_index(struct hash_zones *zones, bool create_flag)
+{
+ set_target_state(zones, IS_OPENED, true, true, create_flag);
+}
diff --git a/drivers/md/dm-vdo/dedupe.h b/drivers/md/dm-vdo/dedupe.h
new file mode 100644
index 000000000000..9000d6f3eece
--- /dev/null
+++ b/drivers/md/dm-vdo/dedupe.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_DEDUPE_H
+#define VDO_DEDUPE_H
+
+#include <linux/list.h>
+#include <linux/timer.h>
+
+#include "indexer.h"
+
+#include "admin-state.h"
+#include "constants.h"
+#include "statistics.h"
+#include "types.h"
+#include "wait-queue.h"
+
+struct dedupe_context {
+ struct hash_zone *zone;
+ struct uds_request request;
+ struct list_head list_entry;
+ struct funnel_queue_entry queue_entry;
+ u64 submission_jiffies;
+ struct data_vio *requestor;
+ atomic_t state;
+};
+
+struct hash_lock;
+
+struct hash_zone {
+ /* Which hash zone this is */
+ zone_count_t zone_number;
+
+ /* The administrative state of the zone */
+ struct admin_state state;
+
+ /* The thread ID for this zone */
+ thread_id_t thread_id;
+
+ /* Mapping from record name fields to hash_locks */
+ struct int_map *hash_lock_map;
+
+ /* List containing all unused hash_locks */
+ struct list_head lock_pool;
+
+ /*
+ * Statistics shared by all hash locks in this zone. Only modified on the hash zone thread,
+ * but queried by other threads.
+ */
+ struct hash_lock_statistics statistics;
+
+ /* Array of all hash_locks */
+ struct hash_lock *lock_array;
+
+ /* These fields are used to manage the dedupe contexts */
+ struct list_head available;
+ struct list_head pending;
+ struct funnel_queue *timed_out_complete;
+ struct timer_list timer;
+ struct vdo_completion completion;
+ unsigned int active;
+ atomic_t timer_state;
+
+ /* The dedupe contexts for querying the index from this zone */
+ struct dedupe_context contexts[MAXIMUM_VDO_USER_VIOS];
+};
+
+struct hash_zones;
+
+struct pbn_lock * __must_check vdo_get_duplicate_lock(struct data_vio *data_vio);
+
+void vdo_acquire_hash_lock(struct vdo_completion *completion);
+void vdo_continue_hash_lock(struct vdo_completion *completion);
+void vdo_release_hash_lock(struct data_vio *data_vio);
+void vdo_clean_failed_hash_lock(struct data_vio *data_vio);
+void vdo_share_compressed_write_lock(struct data_vio *data_vio,
+ struct pbn_lock *pbn_lock);
+
+int __must_check vdo_make_hash_zones(struct vdo *vdo, struct hash_zones **zones_ptr);
+
+void vdo_free_hash_zones(struct hash_zones *zones);
+
+void vdo_drain_hash_zones(struct hash_zones *zones, struct vdo_completion *parent);
+
+void vdo_get_dedupe_statistics(struct hash_zones *zones, struct vdo_statistics *stats);
+
+struct hash_zone * __must_check vdo_select_hash_zone(struct hash_zones *zones,
+ const struct uds_record_name *name);
+
+void vdo_dump_hash_zones(struct hash_zones *zones);
+
+const char *vdo_get_dedupe_index_state_name(struct hash_zones *zones);
+
+u64 vdo_get_dedupe_index_timeout_count(struct hash_zones *zones);
+
+int vdo_message_dedupe_index(struct hash_zones *zones, const char *name);
+
+void vdo_set_dedupe_state_normal(struct hash_zones *zones);
+
+void vdo_start_dedupe_index(struct hash_zones *zones, bool create_flag);
+
+void vdo_resume_hash_zones(struct hash_zones *zones, struct vdo_completion *parent);
+
+void vdo_finish_dedupe_index(struct hash_zones *zones);
+
+/* Interval (in milliseconds) from submission until switching to fast path and skipping UDS. */
+extern unsigned int vdo_dedupe_index_timeout_interval;
+
+/*
+ * Minimum time interval (in milliseconds) between timer invocations to check for requests waiting
+ * for UDS that should now time out.
+ */
+extern unsigned int vdo_dedupe_index_min_timer_interval;
+
+void vdo_set_dedupe_index_timeout_interval(unsigned int value);
+void vdo_set_dedupe_index_min_timer_interval(unsigned int value);
+
+#endif /* VDO_DEDUPE_H */
diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c
new file mode 100644
index 000000000000..5a4b0a927f56
--- /dev/null
+++ b/drivers/md/dm-vdo/dm-vdo-target.c
@@ -0,0 +1,2910 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device-mapper.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include "admin-state.h"
+#include "block-map.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "dedupe.h"
+#include "dump.h"
+#include "encodings.h"
+#include "errors.h"
+#include "flush.h"
+#include "io-submitter.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "message-stats.h"
+#include "recovery-journal.h"
+#include "repair.h"
+#include "slab-depot.h"
+#include "status-codes.h"
+#include "string-utils.h"
+#include "thread-device.h"
+#include "thread-registry.h"
+#include "thread-utils.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+
+enum admin_phases {
+ GROW_LOGICAL_PHASE_START,
+ GROW_LOGICAL_PHASE_GROW_BLOCK_MAP,
+ GROW_LOGICAL_PHASE_END,
+ GROW_LOGICAL_PHASE_ERROR,
+ GROW_PHYSICAL_PHASE_START,
+ GROW_PHYSICAL_PHASE_COPY_SUMMARY,
+ GROW_PHYSICAL_PHASE_UPDATE_COMPONENTS,
+ GROW_PHYSICAL_PHASE_USE_NEW_SLABS,
+ GROW_PHYSICAL_PHASE_END,
+ GROW_PHYSICAL_PHASE_ERROR,
+ LOAD_PHASE_START,
+ LOAD_PHASE_LOAD_DEPOT,
+ LOAD_PHASE_MAKE_DIRTY,
+ LOAD_PHASE_PREPARE_TO_ALLOCATE,
+ LOAD_PHASE_SCRUB_SLABS,
+ LOAD_PHASE_DATA_REDUCTION,
+ LOAD_PHASE_FINISHED,
+ LOAD_PHASE_DRAIN_JOURNAL,
+ LOAD_PHASE_WAIT_FOR_READ_ONLY,
+ PRE_LOAD_PHASE_START,
+ PRE_LOAD_PHASE_LOAD_COMPONENTS,
+ PRE_LOAD_PHASE_END,
+ PREPARE_GROW_PHYSICAL_PHASE_START,
+ RESUME_PHASE_START,
+ RESUME_PHASE_ALLOW_READ_ONLY_MODE,
+ RESUME_PHASE_DEDUPE,
+ RESUME_PHASE_DEPOT,
+ RESUME_PHASE_JOURNAL,
+ RESUME_PHASE_BLOCK_MAP,
+ RESUME_PHASE_LOGICAL_ZONES,
+ RESUME_PHASE_PACKER,
+ RESUME_PHASE_FLUSHER,
+ RESUME_PHASE_DATA_VIOS,
+ RESUME_PHASE_END,
+ SUSPEND_PHASE_START,
+ SUSPEND_PHASE_PACKER,
+ SUSPEND_PHASE_DATA_VIOS,
+ SUSPEND_PHASE_DEDUPE,
+ SUSPEND_PHASE_FLUSHES,
+ SUSPEND_PHASE_LOGICAL_ZONES,
+ SUSPEND_PHASE_BLOCK_MAP,
+ SUSPEND_PHASE_JOURNAL,
+ SUSPEND_PHASE_DEPOT,
+ SUSPEND_PHASE_READ_ONLY_WAIT,
+ SUSPEND_PHASE_WRITE_SUPER_BLOCK,
+ SUSPEND_PHASE_END,
+};
+
+static const char * const ADMIN_PHASE_NAMES[] = {
+ "GROW_LOGICAL_PHASE_START",
+ "GROW_LOGICAL_PHASE_GROW_BLOCK_MAP",
+ "GROW_LOGICAL_PHASE_END",
+ "GROW_LOGICAL_PHASE_ERROR",
+ "GROW_PHYSICAL_PHASE_START",
+ "GROW_PHYSICAL_PHASE_COPY_SUMMARY",
+ "GROW_PHYSICAL_PHASE_UPDATE_COMPONENTS",
+ "GROW_PHYSICAL_PHASE_USE_NEW_SLABS",
+ "GROW_PHYSICAL_PHASE_END",
+ "GROW_PHYSICAL_PHASE_ERROR",
+ "LOAD_PHASE_START",
+ "LOAD_PHASE_LOAD_DEPOT",
+ "LOAD_PHASE_MAKE_DIRTY",
+ "LOAD_PHASE_PREPARE_TO_ALLOCATE",
+ "LOAD_PHASE_SCRUB_SLABS",
+ "LOAD_PHASE_DATA_REDUCTION",
+ "LOAD_PHASE_FINISHED",
+ "LOAD_PHASE_DRAIN_JOURNAL",
+ "LOAD_PHASE_WAIT_FOR_READ_ONLY",
+ "PRE_LOAD_PHASE_START",
+ "PRE_LOAD_PHASE_LOAD_COMPONENTS",
+ "PRE_LOAD_PHASE_END",
+ "PREPARE_GROW_PHYSICAL_PHASE_START",
+ "RESUME_PHASE_START",
+ "RESUME_PHASE_ALLOW_READ_ONLY_MODE",
+ "RESUME_PHASE_DEDUPE",
+ "RESUME_PHASE_DEPOT",
+ "RESUME_PHASE_JOURNAL",
+ "RESUME_PHASE_BLOCK_MAP",
+ "RESUME_PHASE_LOGICAL_ZONES",
+ "RESUME_PHASE_PACKER",
+ "RESUME_PHASE_FLUSHER",
+ "RESUME_PHASE_DATA_VIOS",
+ "RESUME_PHASE_END",
+ "SUSPEND_PHASE_START",
+ "SUSPEND_PHASE_PACKER",
+ "SUSPEND_PHASE_DATA_VIOS",
+ "SUSPEND_PHASE_DEDUPE",
+ "SUSPEND_PHASE_FLUSHES",
+ "SUSPEND_PHASE_LOGICAL_ZONES",
+ "SUSPEND_PHASE_BLOCK_MAP",
+ "SUSPEND_PHASE_JOURNAL",
+ "SUSPEND_PHASE_DEPOT",
+ "SUSPEND_PHASE_READ_ONLY_WAIT",
+ "SUSPEND_PHASE_WRITE_SUPER_BLOCK",
+ "SUSPEND_PHASE_END",
+};
+
+/* If we bump this, update the arrays below */
+#define TABLE_VERSION 4
+
+/* arrays for handling different table versions */
+static const u8 REQUIRED_ARGC[] = { 10, 12, 9, 7, 6 };
+/* pool name no longer used. only here for verification of older versions */
+static const u8 POOL_NAME_ARG_INDEX[] = { 8, 10, 8 };
+
+/*
+ * Track in-use instance numbers using a flat bit array.
+ *
+ * O(n) run time isn't ideal, but if we have 1000 VDO devices in use simultaneously we still only
+ * need to scan 16 words, so it's not likely to be a big deal compared to other resource usage.
+ */
+
+/*
+ * This minimum size for the bit array creates a numbering space of 0-999, which allows
+ * successive starts of the same volume to have different instance numbers in any
+ * reasonably-sized test. Changing instances on restart allows vdoMonReport to detect that
+ * the ephemeral stats have reset to zero.
+ */
+#define BIT_COUNT_MINIMUM 1000
+/* Grow the bit array by this many bits when needed */
+#define BIT_COUNT_INCREMENT 100
+
+struct instance_tracker {
+ unsigned int bit_count;
+ unsigned long *words;
+ unsigned int count;
+ unsigned int next;
+};
+
+static DEFINE_MUTEX(instances_lock);
+static struct instance_tracker instances;
+
+/**
+ * free_device_config() - Free a device config created by parse_device_config().
+ * @config: The config to free.
+ */
+static void free_device_config(struct device_config *config)
+{
+ if (config == NULL)
+ return;
+
+ if (config->owned_device != NULL)
+ dm_put_device(config->owning_target, config->owned_device);
+
+ vdo_free(config->parent_device_name);
+ vdo_free(config->original_string);
+
+ /* Reduce the chance a use-after-free (as in BZ 1669960) happens to work. */
+ memset(config, 0, sizeof(*config));
+ vdo_free(config);
+}
+
+/**
+ * get_version_number() - Decide the version number from argv.
+ *
+ * @argc: The number of table values.
+ * @argv: The array of table values.
+ * @error_ptr: A pointer to return a error string in.
+ * @version_ptr: A pointer to return the version.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int get_version_number(int argc, char **argv, char **error_ptr,
+ unsigned int *version_ptr)
+{
+ /* version, if it exists, is in a form of V<n> */
+ if (sscanf(argv[0], "V%u", version_ptr) == 1) {
+ if (*version_ptr < 1 || *version_ptr > TABLE_VERSION) {
+ *error_ptr = "Unknown version number detected";
+ return VDO_BAD_CONFIGURATION;
+ }
+ } else {
+ /* V0 actually has no version number in the table string */
+ *version_ptr = 0;
+ }
+
+ /*
+ * V0 and V1 have no optional parameters. There will always be a parameter for thread
+ * config, even if it's a "." to show it's an empty list.
+ */
+ if (*version_ptr <= 1) {
+ if (argc != REQUIRED_ARGC[*version_ptr]) {
+ *error_ptr = "Incorrect number of arguments for version";
+ return VDO_BAD_CONFIGURATION;
+ }
+ } else if (argc < REQUIRED_ARGC[*version_ptr]) {
+ *error_ptr = "Incorrect number of arguments for version";
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ if (*version_ptr != TABLE_VERSION) {
+ vdo_log_warning("Detected version mismatch between kernel module and tools kernel: %d, tool: %d",
+ TABLE_VERSION, *version_ptr);
+ vdo_log_warning("Please consider upgrading management tools to match kernel.");
+ }
+ return VDO_SUCCESS;
+}
+
+/* Free a list of non-NULL string pointers, and then the list itself. */
+static void free_string_array(char **string_array)
+{
+ unsigned int offset;
+
+ for (offset = 0; string_array[offset] != NULL; offset++)
+ vdo_free(string_array[offset]);
+ vdo_free(string_array);
+}
+
+/*
+ * Split the input string into substrings, separated at occurrences of the indicated character,
+ * returning a null-terminated list of string pointers.
+ *
+ * The string pointers and the pointer array itself should both be freed with vdo_free() when no
+ * longer needed. This can be done with vdo_free_string_array (below) if the pointers in the array
+ * are not changed. Since the array and copied strings are allocated by this function, it may only
+ * be used in contexts where allocation is permitted.
+ *
+ * Empty substrings are not ignored; that is, returned substrings may be empty strings if the
+ * separator occurs twice in a row.
+ */
+static int split_string(const char *string, char separator, char ***substring_array_ptr)
+{
+ unsigned int current_substring = 0, substring_count = 1;
+ const char *s;
+ char **substrings;
+ int result;
+ ptrdiff_t length;
+
+ for (s = string; *s != 0; s++) {
+ if (*s == separator)
+ substring_count++;
+ }
+
+ result = vdo_allocate(substring_count + 1, char *, "string-splitting array",
+ &substrings);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (s = string; *s != 0; s++) {
+ if (*s == separator) {
+ ptrdiff_t length = s - string;
+
+ result = vdo_allocate(length + 1, char, "split string",
+ &substrings[current_substring]);
+ if (result != VDO_SUCCESS) {
+ free_string_array(substrings);
+ return result;
+ }
+ /*
+ * Trailing NUL is already in place after allocation; deal with the zero or
+ * more non-NUL bytes in the string.
+ */
+ if (length > 0)
+ memcpy(substrings[current_substring], string, length);
+ string = s + 1;
+ current_substring++;
+ BUG_ON(current_substring >= substring_count);
+ }
+ }
+ /* Process final string, with no trailing separator. */
+ BUG_ON(current_substring != (substring_count - 1));
+ length = strlen(string);
+
+ result = vdo_allocate(length + 1, char, "split string",
+ &substrings[current_substring]);
+ if (result != VDO_SUCCESS) {
+ free_string_array(substrings);
+ return result;
+ }
+ memcpy(substrings[current_substring], string, length);
+ current_substring++;
+ /* substrings[current_substring] is NULL already */
+ *substring_array_ptr = substrings;
+ return VDO_SUCCESS;
+}
+
+/*
+ * Join the input substrings into one string, joined with the indicated character, returning a
+ * string. array_length is a bound on the number of valid elements in substring_array, in case it
+ * is not NULL-terminated.
+ */
+static int join_strings(char **substring_array, size_t array_length, char separator,
+ char **string_ptr)
+{
+ size_t string_length = 0;
+ size_t i;
+ int result;
+ char *output, *current_position;
+
+ for (i = 0; (i < array_length) && (substring_array[i] != NULL); i++)
+ string_length += strlen(substring_array[i]) + 1;
+
+ result = vdo_allocate(string_length, char, __func__, &output);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ current_position = &output[0];
+
+ for (i = 0; (i < array_length) && (substring_array[i] != NULL); i++) {
+ current_position = vdo_append_to_buffer(current_position,
+ output + string_length, "%s",
+ substring_array[i]);
+ *current_position = separator;
+ current_position++;
+ }
+
+ /* We output one too many separators; replace the last with a zero byte. */
+ if (current_position != output)
+ *(current_position - 1) = '\0';
+
+ *string_ptr = output;
+ return VDO_SUCCESS;
+}
+
+/**
+ * parse_bool() - Parse a two-valued option into a bool.
+ * @bool_str: The string value to convert to a bool.
+ * @true_str: The string value which should be converted to true.
+ * @false_str: The string value which should be converted to false.
+ * @bool_ptr: A pointer to return the bool value in.
+ *
+ * Return: VDO_SUCCESS or an error if bool_str is neither true_str nor false_str.
+ */
+static inline int __must_check parse_bool(const char *bool_str, const char *true_str,
+ const char *false_str, bool *bool_ptr)
+{
+ bool value = false;
+
+ if (strcmp(bool_str, true_str) == 0)
+ value = true;
+ else if (strcmp(bool_str, false_str) == 0)
+ value = false;
+ else
+ return VDO_BAD_CONFIGURATION;
+
+ *bool_ptr = value;
+ return VDO_SUCCESS;
+}
+
+/**
+ * process_one_thread_config_spec() - Process one component of a thread parameter configuration
+ * string and update the configuration data structure.
+ * @thread_param_type: The type of thread specified.
+ * @count: The thread count requested.
+ * @config: The configuration data structure to update.
+ *
+ * If the thread count requested is invalid, a message is logged and -EINVAL returned. If the
+ * thread name is unknown, a message is logged but no error is returned.
+ *
+ * Return: VDO_SUCCESS or -EINVAL
+ */
+static int process_one_thread_config_spec(const char *thread_param_type,
+ unsigned int count,
+ struct thread_count_config *config)
+{
+ /* Handle limited thread parameters */
+ if (strcmp(thread_param_type, "bioRotationInterval") == 0) {
+ if (count == 0) {
+ vdo_log_error("thread config string error: 'bioRotationInterval' of at least 1 is required");
+ return -EINVAL;
+ } else if (count > VDO_BIO_ROTATION_INTERVAL_LIMIT) {
+ vdo_log_error("thread config string error: 'bioRotationInterval' cannot be higher than %d",
+ VDO_BIO_ROTATION_INTERVAL_LIMIT);
+ return -EINVAL;
+ }
+ config->bio_rotation_interval = count;
+ return VDO_SUCCESS;
+ }
+ if (strcmp(thread_param_type, "logical") == 0) {
+ if (count > MAX_VDO_LOGICAL_ZONES) {
+ vdo_log_error("thread config string error: at most %d 'logical' threads are allowed",
+ MAX_VDO_LOGICAL_ZONES);
+ return -EINVAL;
+ }
+ config->logical_zones = count;
+ return VDO_SUCCESS;
+ }
+ if (strcmp(thread_param_type, "physical") == 0) {
+ if (count > MAX_VDO_PHYSICAL_ZONES) {
+ vdo_log_error("thread config string error: at most %d 'physical' threads are allowed",
+ MAX_VDO_PHYSICAL_ZONES);
+ return -EINVAL;
+ }
+ config->physical_zones = count;
+ return VDO_SUCCESS;
+ }
+ /* Handle other thread count parameters */
+ if (count > MAXIMUM_VDO_THREADS) {
+ vdo_log_error("thread config string error: at most %d '%s' threads are allowed",
+ MAXIMUM_VDO_THREADS, thread_param_type);
+ return -EINVAL;
+ }
+ if (strcmp(thread_param_type, "hash") == 0) {
+ config->hash_zones = count;
+ return VDO_SUCCESS;
+ }
+ if (strcmp(thread_param_type, "cpu") == 0) {
+ if (count == 0) {
+ vdo_log_error("thread config string error: at least one 'cpu' thread required");
+ return -EINVAL;
+ }
+ config->cpu_threads = count;
+ return VDO_SUCCESS;
+ }
+ if (strcmp(thread_param_type, "ack") == 0) {
+ config->bio_ack_threads = count;
+ return VDO_SUCCESS;
+ }
+ if (strcmp(thread_param_type, "bio") == 0) {
+ if (count == 0) {
+ vdo_log_error("thread config string error: at least one 'bio' thread required");
+ return -EINVAL;
+ }
+ config->bio_threads = count;
+ return VDO_SUCCESS;
+ }
+
+ /*
+ * Don't fail, just log. This will handle version mismatches between user mode tools and
+ * kernel.
+ */
+ vdo_log_info("unknown thread parameter type \"%s\"", thread_param_type);
+ return VDO_SUCCESS;
+}
+
+/**
+ * parse_one_thread_config_spec() - Parse one component of a thread parameter configuration string
+ * and update the configuration data structure.
+ * @spec: The thread parameter specification string.
+ * @config: The configuration data to be updated.
+ */
+static int parse_one_thread_config_spec(const char *spec,
+ struct thread_count_config *config)
+{
+ unsigned int count;
+ char **fields;
+ int result;
+
+ result = split_string(spec, '=', &fields);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if ((fields[0] == NULL) || (fields[1] == NULL) || (fields[2] != NULL)) {
+ vdo_log_error("thread config string error: expected thread parameter assignment, saw \"%s\"",
+ spec);
+ free_string_array(fields);
+ return -EINVAL;
+ }
+
+ result = kstrtouint(fields[1], 10, &count);
+ if (result) {
+ vdo_log_error("thread config string error: integer value needed, found \"%s\"",
+ fields[1]);
+ free_string_array(fields);
+ return result;
+ }
+
+ result = process_one_thread_config_spec(fields[0], count, config);
+ free_string_array(fields);
+ return result;
+}
+
+/**
+ * parse_thread_config_string() - Parse the configuration string passed and update the specified
+ * counts and other parameters of various types of threads to be
+ * created.
+ * @string: Thread parameter configuration string.
+ * @config: The thread configuration data to update.
+ *
+ * The configuration string should contain one or more comma-separated specs of the form
+ * "typename=number"; the supported type names are "cpu", "ack", "bio", "bioRotationInterval",
+ * "logical", "physical", and "hash".
+ *
+ * If an error occurs during parsing of a single key/value pair, we deem it serious enough to stop
+ * further parsing.
+ *
+ * This function can't set the "reason" value the caller wants to pass back, because we'd want to
+ * format it to say which field was invalid, and we can't allocate the "reason" strings
+ * dynamically. So if an error occurs, we'll log the details and pass back an error.
+ *
+ * Return: VDO_SUCCESS or -EINVAL or -ENOMEM
+ */
+static int parse_thread_config_string(const char *string,
+ struct thread_count_config *config)
+{
+ int result = VDO_SUCCESS;
+ char **specs;
+
+ if (strcmp(".", string) != 0) {
+ unsigned int i;
+
+ result = split_string(string, ',', &specs);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (i = 0; specs[i] != NULL; i++) {
+ result = parse_one_thread_config_spec(specs[i], config);
+ if (result != VDO_SUCCESS)
+ break;
+ }
+ free_string_array(specs);
+ }
+ return result;
+}
+
+/**
+ * process_one_key_value_pair() - Process one component of an optional parameter string and update
+ * the configuration data structure.
+ * @key: The optional parameter key name.
+ * @value: The optional parameter value.
+ * @config: The configuration data structure to update.
+ *
+ * If the value requested is invalid, a message is logged and -EINVAL returned. If the key is
+ * unknown, a message is logged but no error is returned.
+ *
+ * Return: VDO_SUCCESS or -EINVAL
+ */
+static int process_one_key_value_pair(const char *key, unsigned int value,
+ struct device_config *config)
+{
+ /* Non thread optional parameters */
+ if (strcmp(key, "maxDiscard") == 0) {
+ if (value == 0) {
+ vdo_log_error("optional parameter error: at least one max discard block required");
+ return -EINVAL;
+ }
+ /* Max discard sectors in blkdev_issue_discard is UINT_MAX >> 9 */
+ if (value > (UINT_MAX / VDO_BLOCK_SIZE)) {
+ vdo_log_error("optional parameter error: at most %d max discard blocks are allowed",
+ UINT_MAX / VDO_BLOCK_SIZE);
+ return -EINVAL;
+ }
+ config->max_discard_blocks = value;
+ return VDO_SUCCESS;
+ }
+ /* Handles unknown key names */
+ return process_one_thread_config_spec(key, value, &config->thread_counts);
+}
+
+/**
+ * parse_one_key_value_pair() - Parse one key/value pair and update the configuration data
+ * structure.
+ * @key: The optional key name.
+ * @value: The optional value.
+ * @config: The configuration data to be updated.
+ *
+ * Return: VDO_SUCCESS or error.
+ */
+static int parse_one_key_value_pair(const char *key, const char *value,
+ struct device_config *config)
+{
+ unsigned int count;
+ int result;
+
+ if (strcmp(key, "deduplication") == 0)
+ return parse_bool(value, "on", "off", &config->deduplication);
+
+ if (strcmp(key, "compression") == 0)
+ return parse_bool(value, "on", "off", &config->compression);
+
+ /* The remaining arguments must have integral values. */
+ result = kstrtouint(value, 10, &count);
+ if (result) {
+ vdo_log_error("optional config string error: integer value needed, found \"%s\"",
+ value);
+ return result;
+ }
+ return process_one_key_value_pair(key, count, config);
+}
+
+/**
+ * parse_key_value_pairs() - Parse all key/value pairs from a list of arguments.
+ * @argc: The total number of arguments in list.
+ * @argv: The list of key/value pairs.
+ * @config: The device configuration data to update.
+ *
+ * If an error occurs during parsing of a single key/value pair, we deem it serious enough to stop
+ * further parsing.
+ *
+ * This function can't set the "reason" value the caller wants to pass back, because we'd want to
+ * format it to say which field was invalid, and we can't allocate the "reason" strings
+ * dynamically. So if an error occurs, we'll log the details and return the error.
+ *
+ * Return: VDO_SUCCESS or error
+ */
+static int parse_key_value_pairs(int argc, char **argv, struct device_config *config)
+{
+ int result = VDO_SUCCESS;
+
+ while (argc) {
+ result = parse_one_key_value_pair(argv[0], argv[1], config);
+ if (result != VDO_SUCCESS)
+ break;
+
+ argc -= 2;
+ argv += 2;
+ }
+
+ return result;
+}
+
+/**
+ * parse_optional_arguments() - Parse the configuration string passed in for optional arguments.
+ * @arg_set: The structure holding the arguments to parse.
+ * @error_ptr: Pointer to a buffer to hold the error string.
+ * @config: Pointer to device configuration data to update.
+ *
+ * For V0/V1 configurations, there will only be one optional parameter; the thread configuration.
+ * The configuration string should contain one or more comma-separated specs of the form
+ * "typename=number"; the supported type names are "cpu", "ack", "bio", "bioRotationInterval",
+ * "logical", "physical", and "hash".
+ *
+ * For V2 configurations and beyond, there could be any number of arguments. They should contain
+ * one or more key/value pairs separated by a space.
+ *
+ * Return: VDO_SUCCESS or error
+ */
+static int parse_optional_arguments(struct dm_arg_set *arg_set, char **error_ptr,
+ struct device_config *config)
+{
+ int result = VDO_SUCCESS;
+
+ if (config->version == 0 || config->version == 1) {
+ result = parse_thread_config_string(arg_set->argv[0],
+ &config->thread_counts);
+ if (result != VDO_SUCCESS) {
+ *error_ptr = "Invalid thread-count configuration";
+ return VDO_BAD_CONFIGURATION;
+ }
+ } else {
+ if ((arg_set->argc % 2) != 0) {
+ *error_ptr = "Odd number of optional arguments given but they should be <key> <value> pairs";
+ return VDO_BAD_CONFIGURATION;
+ }
+ result = parse_key_value_pairs(arg_set->argc, arg_set->argv, config);
+ if (result != VDO_SUCCESS) {
+ *error_ptr = "Invalid optional argument configuration";
+ return VDO_BAD_CONFIGURATION;
+ }
+ }
+ return result;
+}
+
+/**
+ * handle_parse_error() - Handle a parsing error.
+ * @config: The config to free.
+ * @error_ptr: A place to store a constant string about the error.
+ * @error_str: A constant string to store in error_ptr.
+ */
+static void handle_parse_error(struct device_config *config, char **error_ptr,
+ char *error_str)
+{
+ free_device_config(config);
+ *error_ptr = error_str;
+}
+
+/**
+ * parse_device_config() - Convert the dmsetup table into a struct device_config.
+ * @argc: The number of table values.
+ * @argv: The array of table values.
+ * @ti: The target structure for this table.
+ * @config_ptr: A pointer to return the allocated config.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int parse_device_config(int argc, char **argv, struct dm_target *ti,
+ struct device_config **config_ptr)
+{
+ bool enable_512e;
+ size_t logical_bytes = to_bytes(ti->len);
+ struct dm_arg_set arg_set;
+ char **error_ptr = &ti->error;
+ struct device_config *config = NULL;
+ int result;
+
+ if ((logical_bytes % VDO_BLOCK_SIZE) != 0) {
+ handle_parse_error(config, error_ptr,
+ "Logical size must be a multiple of 4096");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ if (argc == 0) {
+ handle_parse_error(config, error_ptr, "Incorrect number of arguments");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ result = vdo_allocate(1, struct device_config, "device_config", &config);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr,
+ "Could not allocate config structure");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ config->owning_target = ti;
+ config->logical_blocks = logical_bytes / VDO_BLOCK_SIZE;
+ INIT_LIST_HEAD(&config->config_list);
+
+ /* Save the original string. */
+ result = join_strings(argv, argc, ' ', &config->original_string);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr, "Could not populate string");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ vdo_log_info("table line: %s", config->original_string);
+
+ config->thread_counts = (struct thread_count_config) {
+ .bio_ack_threads = 1,
+ .bio_threads = DEFAULT_VDO_BIO_SUBMIT_QUEUE_COUNT,
+ .bio_rotation_interval = DEFAULT_VDO_BIO_SUBMIT_QUEUE_ROTATE_INTERVAL,
+ .cpu_threads = 1,
+ .logical_zones = 0,
+ .physical_zones = 0,
+ .hash_zones = 0,
+ };
+ config->max_discard_blocks = 1;
+ config->deduplication = true;
+ config->compression = false;
+
+ arg_set.argc = argc;
+ arg_set.argv = argv;
+
+ result = get_version_number(argc, argv, error_ptr, &config->version);
+ if (result != VDO_SUCCESS) {
+ /* get_version_number sets error_ptr itself. */
+ handle_parse_error(config, error_ptr, *error_ptr);
+ return result;
+ }
+ /* Move the arg pointer forward only if the argument was there. */
+ if (config->version >= 1)
+ dm_shift_arg(&arg_set);
+
+ result = vdo_duplicate_string(dm_shift_arg(&arg_set), "parent device name",
+ &config->parent_device_name);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr,
+ "Could not copy parent device name");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ /* Get the physical blocks, if known. */
+ if (config->version >= 1) {
+ result = kstrtoull(dm_shift_arg(&arg_set), 10, &config->physical_blocks);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr,
+ "Invalid physical block count");
+ return VDO_BAD_CONFIGURATION;
+ }
+ }
+
+ /* Get the logical block size and validate */
+ result = parse_bool(dm_shift_arg(&arg_set), "512", "4096", &enable_512e);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr, "Invalid logical block size");
+ return VDO_BAD_CONFIGURATION;
+ }
+ config->logical_block_size = (enable_512e ? 512 : 4096);
+
+ /* Skip past the two no longer used read cache options. */
+ if (config->version <= 1)
+ dm_consume_args(&arg_set, 2);
+
+ /* Get the page cache size. */
+ result = kstrtouint(dm_shift_arg(&arg_set), 10, &config->cache_size);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr,
+ "Invalid block map page cache size");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ /* Get the block map era length. */
+ result = kstrtouint(dm_shift_arg(&arg_set), 10, &config->block_map_maximum_age);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr, "Invalid block map maximum age");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ /* Skip past the no longer used MD RAID5 optimization mode */
+ if (config->version <= 2)
+ dm_consume_args(&arg_set, 1);
+
+ /* Skip past the no longer used write policy setting */
+ if (config->version <= 3)
+ dm_consume_args(&arg_set, 1);
+
+ /* Skip past the no longer used pool name for older table lines */
+ if (config->version <= 2) {
+ /*
+ * Make sure the enum to get the pool name from argv directly is still in sync with
+ * the parsing of the table line.
+ */
+ if (&arg_set.argv[0] != &argv[POOL_NAME_ARG_INDEX[config->version]]) {
+ handle_parse_error(config, error_ptr,
+ "Pool name not in expected location");
+ return VDO_BAD_CONFIGURATION;
+ }
+ dm_shift_arg(&arg_set);
+ }
+
+ /* Get the optional arguments and validate. */
+ result = parse_optional_arguments(&arg_set, error_ptr, config);
+ if (result != VDO_SUCCESS) {
+ /* parse_optional_arguments sets error_ptr itself. */
+ handle_parse_error(config, error_ptr, *error_ptr);
+ return result;
+ }
+
+ /*
+ * Logical, physical, and hash zone counts can all be zero; then we get one thread doing
+ * everything, our older configuration. If any zone count is non-zero, the others must be
+ * as well.
+ */
+ if (((config->thread_counts.logical_zones == 0) !=
+ (config->thread_counts.physical_zones == 0)) ||
+ ((config->thread_counts.physical_zones == 0) !=
+ (config->thread_counts.hash_zones == 0))) {
+ handle_parse_error(config, error_ptr,
+ "Logical, physical, and hash zones counts must all be zero or all non-zero");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ if (config->cache_size <
+ (2 * MAXIMUM_VDO_USER_VIOS * config->thread_counts.logical_zones)) {
+ handle_parse_error(config, error_ptr,
+ "Insufficient block map cache for logical zones");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ result = dm_get_device(ti, config->parent_device_name,
+ dm_table_get_mode(ti->table), &config->owned_device);
+ if (result != 0) {
+ vdo_log_error("couldn't open device \"%s\": error %d",
+ config->parent_device_name, result);
+ handle_parse_error(config, error_ptr, "Unable to open storage device");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ if (config->version == 0) {
+ u64 device_size = i_size_read(config->owned_device->bdev->bd_inode);
+
+ config->physical_blocks = device_size / VDO_BLOCK_SIZE;
+ }
+
+ *config_ptr = config;
+ return result;
+}
+
+static struct vdo *get_vdo_for_target(struct dm_target *ti)
+{
+ return ((struct device_config *) ti->private)->vdo;
+}
+
+
+static int vdo_map_bio(struct dm_target *ti, struct bio *bio)
+{
+ struct vdo *vdo = get_vdo_for_target(ti);
+ struct vdo_work_queue *current_work_queue;
+ const struct admin_state_code *code = vdo_get_admin_state_code(&vdo->admin.state);
+
+ VDO_ASSERT_LOG_ONLY(code->normal, "vdo should not receive bios while in state %s",
+ code->name);
+
+ /* Count all incoming bios. */
+ vdo_count_bios(&vdo->stats.bios_in, bio);
+
+
+ /* Handle empty bios. Empty flush bios are not associated with a vio. */
+ if ((bio_op(bio) == REQ_OP_FLUSH) || ((bio->bi_opf & REQ_PREFLUSH) != 0)) {
+ vdo_launch_flush(vdo, bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /* This could deadlock, */
+ current_work_queue = vdo_get_current_work_queue();
+ BUG_ON((current_work_queue != NULL) &&
+ (vdo == vdo_get_work_queue_owner(current_work_queue)->vdo));
+ vdo_launch_bio(vdo->data_vio_pool, bio);
+ return DM_MAPIO_SUBMITTED;
+}
+
+static void vdo_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct vdo *vdo = get_vdo_for_target(ti);
+
+ limits->logical_block_size = vdo->device_config->logical_block_size;
+ limits->physical_block_size = VDO_BLOCK_SIZE;
+
+ /* The minimum io size for random io */
+ blk_limits_io_min(limits, VDO_BLOCK_SIZE);
+ /* The optimal io size for streamed/sequential io */
+ blk_limits_io_opt(limits, VDO_BLOCK_SIZE);
+
+ /*
+ * Sets the maximum discard size that will be passed into VDO. This value comes from a
+ * table line value passed in during dmsetup create.
+ *
+ * The value 1024 is the largest usable value on HD systems. A 2048 sector discard on a
+ * busy HD system takes 31 seconds. We should use a value no higher than 1024, which takes
+ * 15 to 16 seconds on a busy HD system. However, using large values results in 120 second
+ * blocked task warnings in kernel logs. In order to avoid these warnings, we choose to
+ * use the smallest reasonable value.
+ *
+ * The value is used by dm-thin to determine whether to pass down discards. The block layer
+ * splits large discards on this boundary when this is set.
+ */
+ limits->max_discard_sectors =
+ (vdo->device_config->max_discard_blocks * VDO_SECTORS_PER_BLOCK);
+
+ /*
+ * Force discards to not begin or end with a partial block by stating the granularity is
+ * 4k.
+ */
+ limits->discard_granularity = VDO_BLOCK_SIZE;
+}
+
+static int vdo_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn,
+ void *data)
+{
+ struct device_config *config = get_vdo_for_target(ti)->device_config;
+
+ return fn(ti, config->owned_device, 0,
+ config->physical_blocks * VDO_SECTORS_PER_BLOCK, data);
+}
+
+/*
+ * Status line is:
+ * <device> <operating mode> <in recovery> <index state> <compression state>
+ * <used physical blocks> <total physical blocks>
+ */
+
+static void vdo_status(struct dm_target *ti, status_type_t status_type,
+ unsigned int status_flags, char *result, unsigned int maxlen)
+{
+ struct vdo *vdo = get_vdo_for_target(ti);
+ struct vdo_statistics *stats;
+ struct device_config *device_config;
+ /* N.B.: The DMEMIT macro uses the variables named "sz", "result", "maxlen". */
+ int sz = 0;
+
+ switch (status_type) {
+ case STATUSTYPE_INFO:
+ /* Report info for dmsetup status */
+ mutex_lock(&vdo->stats_mutex);
+ vdo_fetch_statistics(vdo, &vdo->stats_buffer);
+ stats = &vdo->stats_buffer;
+
+ DMEMIT("/dev/%pg %s %s %s %s %llu %llu",
+ vdo_get_backing_device(vdo), stats->mode,
+ stats->in_recovery_mode ? "recovering" : "-",
+ vdo_get_dedupe_index_state_name(vdo->hash_zones),
+ vdo_get_compressing(vdo) ? "online" : "offline",
+ stats->data_blocks_used + stats->overhead_blocks_used,
+ stats->physical_blocks);
+ mutex_unlock(&vdo->stats_mutex);
+ break;
+
+ case STATUSTYPE_TABLE:
+ /* Report the string actually specified in the beginning. */
+ device_config = (struct device_config *) ti->private;
+ DMEMIT("%s", device_config->original_string);
+ break;
+
+ case STATUSTYPE_IMA:
+ /* FIXME: We ought to be more detailed here, but this is what thin does. */
+ *result = '\0';
+ break;
+ }
+}
+
+static block_count_t __must_check get_underlying_device_block_count(const struct vdo *vdo)
+{
+ return i_size_read(vdo_get_backing_device(vdo)->bd_inode) / VDO_BLOCK_SIZE;
+}
+
+static int __must_check process_vdo_message_locked(struct vdo *vdo, unsigned int argc,
+ char **argv)
+{
+ if ((argc == 2) && (strcasecmp(argv[0], "compression") == 0)) {
+ if (strcasecmp(argv[1], "on") == 0) {
+ vdo_set_compressing(vdo, true);
+ return 0;
+ }
+
+ if (strcasecmp(argv[1], "off") == 0) {
+ vdo_set_compressing(vdo, false);
+ return 0;
+ }
+
+ vdo_log_warning("invalid argument '%s' to dmsetup compression message",
+ argv[1]);
+ return -EINVAL;
+ }
+
+ vdo_log_warning("unrecognized dmsetup message '%s' received", argv[0]);
+ return -EINVAL;
+}
+
+/*
+ * If the message is a dump, just do it. Otherwise, check that no other message is being processed,
+ * and only proceed if so.
+ * Returns -EBUSY if another message is being processed
+ */
+static int __must_check process_vdo_message(struct vdo *vdo, unsigned int argc,
+ char **argv)
+{
+ int result;
+
+ /*
+ * All messages which may be processed in parallel with other messages should be handled
+ * here before the atomic check below. Messages which should be exclusive should be
+ * processed in process_vdo_message_locked().
+ */
+
+ /* Dump messages should always be processed */
+ if (strcasecmp(argv[0], "dump") == 0)
+ return vdo_dump(vdo, argc, argv, "dmsetup message");
+
+ if (argc == 1) {
+ if (strcasecmp(argv[0], "dump-on-shutdown") == 0) {
+ vdo->dump_on_shutdown = true;
+ return 0;
+ }
+
+ /* Index messages should always be processed */
+ if ((strcasecmp(argv[0], "index-close") == 0) ||
+ (strcasecmp(argv[0], "index-create") == 0) ||
+ (strcasecmp(argv[0], "index-disable") == 0) ||
+ (strcasecmp(argv[0], "index-enable") == 0))
+ return vdo_message_dedupe_index(vdo->hash_zones, argv[0]);
+ }
+
+ if (atomic_cmpxchg(&vdo->processing_message, 0, 1) != 0)
+ return -EBUSY;
+
+ result = process_vdo_message_locked(vdo, argc, argv);
+
+ /* Pairs with the implicit barrier in cmpxchg just above */
+ smp_wmb();
+ atomic_set(&vdo->processing_message, 0);
+ return result;
+}
+
+static int vdo_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result_buffer, unsigned int maxlen)
+{
+ struct registered_thread allocating_thread, instance_thread;
+ struct vdo *vdo;
+ int result;
+
+ if (argc == 0) {
+ vdo_log_warning("unspecified dmsetup message");
+ return -EINVAL;
+ }
+
+ vdo = get_vdo_for_target(ti);
+ vdo_register_allocating_thread(&allocating_thread, NULL);
+ vdo_register_thread_device_id(&instance_thread, &vdo->instance);
+
+ /*
+ * Must be done here so we don't map return codes. The code in dm-ioctl expects a 1 for a
+ * return code to look at the buffer and see if it is full or not.
+ */
+ if ((argc == 1) && (strcasecmp(argv[0], "stats") == 0)) {
+ vdo_write_stats(vdo, result_buffer, maxlen);
+ result = 1;
+ } else {
+ result = vdo_status_to_errno(process_vdo_message(vdo, argc, argv));
+ }
+
+ vdo_unregister_thread_device_id();
+ vdo_unregister_allocating_thread();
+ return result;
+}
+
+static void configure_target_capabilities(struct dm_target *ti)
+{
+ ti->discards_supported = 1;
+ ti->flush_supported = true;
+ ti->num_discard_bios = 1;
+ ti->num_flush_bios = 1;
+
+ /*
+ * If this value changes, please make sure to update the value for max_discard_sectors
+ * accordingly.
+ */
+ BUG_ON(dm_set_target_max_io_len(ti, VDO_SECTORS_PER_BLOCK) != 0);
+}
+
+/*
+ * Implements vdo_filter_fn.
+ */
+static bool vdo_uses_device(struct vdo *vdo, const void *context)
+{
+ const struct device_config *config = context;
+
+ return vdo_get_backing_device(vdo)->bd_dev == config->owned_device->bdev->bd_dev;
+}
+
+/**
+ * get_thread_id_for_phase() - Get the thread id for the current phase of the admin operation in
+ * progress.
+ */
+static thread_id_t __must_check get_thread_id_for_phase(struct vdo *vdo)
+{
+ switch (vdo->admin.phase) {
+ case RESUME_PHASE_PACKER:
+ case RESUME_PHASE_FLUSHER:
+ case SUSPEND_PHASE_PACKER:
+ case SUSPEND_PHASE_FLUSHES:
+ return vdo->thread_config.packer_thread;
+
+ case RESUME_PHASE_DATA_VIOS:
+ case SUSPEND_PHASE_DATA_VIOS:
+ return vdo->thread_config.cpu_thread;
+
+ case LOAD_PHASE_DRAIN_JOURNAL:
+ case RESUME_PHASE_JOURNAL:
+ case SUSPEND_PHASE_JOURNAL:
+ return vdo->thread_config.journal_thread;
+
+ default:
+ return vdo->thread_config.admin_thread;
+ }
+}
+
+static struct vdo_completion *prepare_admin_completion(struct vdo *vdo,
+ vdo_action_fn callback,
+ vdo_action_fn error_handler)
+{
+ struct vdo_completion *completion = &vdo->admin.completion;
+
+ /*
+ * We can't use vdo_prepare_completion_for_requeue() here because we don't want to reset
+ * any error in the completion.
+ */
+ completion->callback = callback;
+ completion->error_handler = error_handler;
+ completion->callback_thread_id = get_thread_id_for_phase(vdo);
+ completion->requeue = true;
+ return completion;
+}
+
+/**
+ * advance_phase() - Increment the phase of the current admin operation and prepare the admin
+ * completion to run on the thread for the next phase.
+ * @vdo: The on which an admin operation is being performed
+ *
+ * Return: The current phase
+ */
+static u32 advance_phase(struct vdo *vdo)
+{
+ u32 phase = vdo->admin.phase++;
+
+ vdo->admin.completion.callback_thread_id = get_thread_id_for_phase(vdo);
+ vdo->admin.completion.requeue = true;
+ return phase;
+}
+
+/*
+ * Perform an administrative operation (load, suspend, grow logical, or grow physical). This method
+ * should not be called from vdo threads.
+ */
+static int perform_admin_operation(struct vdo *vdo, u32 starting_phase,
+ vdo_action_fn callback, vdo_action_fn error_handler,
+ const char *type)
+{
+ int result;
+ struct vdo_administrator *admin = &vdo->admin;
+
+ if (atomic_cmpxchg(&admin->busy, 0, 1) != 0) {
+ return vdo_log_error_strerror(VDO_COMPONENT_BUSY,
+ "Can't start %s operation, another operation is already in progress",
+ type);
+ }
+
+ admin->phase = starting_phase;
+ reinit_completion(&admin->callback_sync);
+ vdo_reset_completion(&admin->completion);
+ vdo_launch_completion(prepare_admin_completion(vdo, callback, error_handler));
+
+ /*
+ * Using the "interruptible" interface means that Linux will not log a message when we wait
+ * for more than 120 seconds.
+ */
+ while (wait_for_completion_interruptible(&admin->callback_sync)) {
+ /* However, if we get a signal in a user-mode process, we could spin... */
+ fsleep(1000);
+ }
+
+ result = admin->completion.result;
+ /* pairs with implicit barrier in cmpxchg above */
+ smp_wmb();
+ atomic_set(&admin->busy, 0);
+ return result;
+}
+
+/* Assert that we are operating on the correct thread for the current phase. */
+static void assert_admin_phase_thread(struct vdo *vdo, const char *what)
+{
+ VDO_ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo),
+ "%s on correct thread for %s", what,
+ ADMIN_PHASE_NAMES[vdo->admin.phase]);
+}
+
+/**
+ * finish_operation_callback() - Callback to finish an admin operation.
+ * @completion: The admin_completion.
+ */
+static void finish_operation_callback(struct vdo_completion *completion)
+{
+ struct vdo_administrator *admin = &completion->vdo->admin;
+
+ vdo_finish_operation(&admin->state, completion->result);
+ complete(&admin->callback_sync);
+}
+
+/**
+ * decode_from_super_block() - Decode the VDO state from the super block and validate that it is
+ * correct.
+ * @vdo: The vdo being loaded.
+ *
+ * On error from this method, the component states must be destroyed explicitly. If this method
+ * returns successfully, the component states must not be destroyed.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check decode_from_super_block(struct vdo *vdo)
+{
+ const struct device_config *config = vdo->device_config;
+ int result;
+
+ result = vdo_decode_component_states(vdo->super_block.buffer, &vdo->geometry,
+ &vdo->states);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_set_state(vdo, vdo->states.vdo.state);
+ vdo->load_state = vdo->states.vdo.state;
+
+ /*
+ * If the device config specifies a larger logical size than was recorded in the super
+ * block, just accept it.
+ */
+ if (vdo->states.vdo.config.logical_blocks < config->logical_blocks) {
+ vdo_log_warning("Growing logical size: a logical size of %llu blocks was specified, but that differs from the %llu blocks configured in the vdo super block",
+ (unsigned long long) config->logical_blocks,
+ (unsigned long long) vdo->states.vdo.config.logical_blocks);
+ vdo->states.vdo.config.logical_blocks = config->logical_blocks;
+ }
+
+ result = vdo_validate_component_states(&vdo->states, vdo->geometry.nonce,
+ config->physical_blocks,
+ config->logical_blocks);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo->layout = vdo->states.layout;
+ return VDO_SUCCESS;
+}
+
+/**
+ * decode_vdo() - Decode the component data portion of a super block and fill in the corresponding
+ * portions of the vdo being loaded.
+ * @vdo: The vdo being loaded.
+ *
+ * This will also allocate the recovery journal and slab depot. If this method is called with an
+ * asynchronous layer (i.e. a thread config which specifies at least one base thread), the block
+ * map and packer will be constructed as well.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check decode_vdo(struct vdo *vdo)
+{
+ block_count_t maximum_age, journal_length;
+ struct partition *partition;
+ int result;
+
+ result = decode_from_super_block(vdo);
+ if (result != VDO_SUCCESS) {
+ vdo_destroy_component_states(&vdo->states);
+ return result;
+ }
+
+ maximum_age = vdo_convert_maximum_age(vdo->device_config->block_map_maximum_age);
+ journal_length =
+ vdo_get_recovery_journal_length(vdo->states.vdo.config.recovery_journal_size);
+ if (maximum_age > (journal_length / 2)) {
+ return vdo_log_error_strerror(VDO_BAD_CONFIGURATION,
+ "maximum age: %llu exceeds limit %llu",
+ (unsigned long long) maximum_age,
+ (unsigned long long) (journal_length / 2));
+ }
+
+ if (maximum_age == 0) {
+ return vdo_log_error_strerror(VDO_BAD_CONFIGURATION,
+ "maximum age must be greater than 0");
+ }
+
+ result = vdo_enable_read_only_entry(vdo);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ partition = vdo_get_known_partition(&vdo->layout,
+ VDO_RECOVERY_JOURNAL_PARTITION);
+ result = vdo_decode_recovery_journal(vdo->states.recovery_journal,
+ vdo->states.vdo.nonce, vdo, partition,
+ vdo->states.vdo.complete_recoveries,
+ vdo->states.vdo.config.recovery_journal_size,
+ &vdo->recovery_journal);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ partition = vdo_get_known_partition(&vdo->layout, VDO_SLAB_SUMMARY_PARTITION);
+ result = vdo_decode_slab_depot(vdo->states.slab_depot, vdo, partition,
+ &vdo->depot);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_decode_block_map(vdo->states.block_map,
+ vdo->states.vdo.config.logical_blocks, vdo,
+ vdo->recovery_journal, vdo->states.vdo.nonce,
+ vdo->device_config->cache_size, maximum_age,
+ &vdo->block_map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_make_physical_zones(vdo, &vdo->physical_zones);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* The logical zones depend on the physical zones already existing. */
+ result = vdo_make_logical_zones(vdo, &vdo->logical_zones);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return vdo_make_hash_zones(vdo, &vdo->hash_zones);
+}
+
+/**
+ * pre_load_callback() - Callback to initiate a pre-load, registered in vdo_initialize().
+ * @completion: The admin completion.
+ */
+static void pre_load_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ int result;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ switch (advance_phase(vdo)) {
+ case PRE_LOAD_PHASE_START:
+ result = vdo_start_operation(&vdo->admin.state,
+ VDO_ADMIN_STATE_PRE_LOADING);
+ if (result != VDO_SUCCESS) {
+ vdo_continue_completion(completion, result);
+ return;
+ }
+
+ vdo_load_super_block(vdo, completion);
+ return;
+
+ case PRE_LOAD_PHASE_LOAD_COMPONENTS:
+ vdo_continue_completion(completion, decode_vdo(vdo));
+ return;
+
+ case PRE_LOAD_PHASE_END:
+ break;
+
+ default:
+ vdo_set_completion_result(completion, UDS_BAD_STATE);
+ }
+
+ finish_operation_callback(completion);
+}
+
+static void release_instance(unsigned int instance)
+{
+ mutex_lock(&instances_lock);
+ if (instance >= instances.bit_count) {
+ VDO_ASSERT_LOG_ONLY(false,
+ "instance number %u must be less than bit count %u",
+ instance, instances.bit_count);
+ } else if (test_bit(instance, instances.words) == 0) {
+ VDO_ASSERT_LOG_ONLY(false, "instance number %u must be allocated", instance);
+ } else {
+ __clear_bit(instance, instances.words);
+ instances.count -= 1;
+ }
+ mutex_unlock(&instances_lock);
+}
+
+static void set_device_config(struct dm_target *ti, struct vdo *vdo,
+ struct device_config *config)
+{
+ list_del_init(&config->config_list);
+ list_add_tail(&config->config_list, &vdo->device_config_list);
+ config->vdo = vdo;
+ ti->private = config;
+ configure_target_capabilities(ti);
+}
+
+static int vdo_initialize(struct dm_target *ti, unsigned int instance,
+ struct device_config *config)
+{
+ struct vdo *vdo;
+ int result;
+ u64 block_size = VDO_BLOCK_SIZE;
+ u64 logical_size = to_bytes(ti->len);
+ block_count_t logical_blocks = logical_size / block_size;
+
+ vdo_log_info("loading device '%s'", vdo_get_device_name(ti));
+ vdo_log_debug("Logical block size = %llu", (u64) config->logical_block_size);
+ vdo_log_debug("Logical blocks = %llu", logical_blocks);
+ vdo_log_debug("Physical block size = %llu", (u64) block_size);
+ vdo_log_debug("Physical blocks = %llu", config->physical_blocks);
+ vdo_log_debug("Block map cache blocks = %u", config->cache_size);
+ vdo_log_debug("Block map maximum age = %u", config->block_map_maximum_age);
+ vdo_log_debug("Deduplication = %s", (config->deduplication ? "on" : "off"));
+ vdo_log_debug("Compression = %s", (config->compression ? "on" : "off"));
+
+ vdo = vdo_find_matching(vdo_uses_device, config);
+ if (vdo != NULL) {
+ vdo_log_error("Existing vdo already uses device %s",
+ vdo->device_config->parent_device_name);
+ ti->error = "Cannot share storage device with already-running VDO";
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ result = vdo_make(instance, config, &ti->error, &vdo);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error("Could not create VDO device. (VDO error %d, message %s)",
+ result, ti->error);
+ vdo_destroy(vdo);
+ return result;
+ }
+
+ result = perform_admin_operation(vdo, PRE_LOAD_PHASE_START, pre_load_callback,
+ finish_operation_callback, "pre-load");
+ if (result != VDO_SUCCESS) {
+ ti->error = ((result == VDO_INVALID_ADMIN_STATE) ?
+ "Pre-load is only valid immediately after initialization" :
+ "Cannot load metadata from device");
+ vdo_log_error("Could not start VDO device. (VDO error %d, message %s)",
+ result, ti->error);
+ vdo_destroy(vdo);
+ return result;
+ }
+
+ set_device_config(ti, vdo, config);
+ vdo->device_config = config;
+ return VDO_SUCCESS;
+}
+
+/* Implements vdo_filter_fn. */
+static bool __must_check vdo_is_named(struct vdo *vdo, const void *context)
+{
+ struct dm_target *ti = vdo->device_config->owning_target;
+ const char *device_name = vdo_get_device_name(ti);
+
+ return strcmp(device_name, context) == 0;
+}
+
+/**
+ * get_bit_array_size() - Return the number of bytes needed to store a bit array of the specified
+ * capacity in an array of unsigned longs.
+ * @bit_count: The number of bits the array must hold.
+ *
+ * Return: the number of bytes needed for the array representation.
+ */
+static size_t get_bit_array_size(unsigned int bit_count)
+{
+ /* Round up to a multiple of the word size and convert to a byte count. */
+ return (BITS_TO_LONGS(bit_count) * sizeof(unsigned long));
+}
+
+/**
+ * grow_bit_array() - Re-allocate the bitmap word array so there will more instance numbers that
+ * can be allocated.
+ *
+ * Since the array is initially NULL, this also initializes the array the first time we allocate an
+ * instance number.
+ *
+ * Return: VDO_SUCCESS or an error code from the allocation
+ */
+static int grow_bit_array(void)
+{
+ unsigned int new_count = max(instances.bit_count + BIT_COUNT_INCREMENT,
+ (unsigned int) BIT_COUNT_MINIMUM);
+ unsigned long *new_words;
+ int result;
+
+ result = vdo_reallocate_memory(instances.words,
+ get_bit_array_size(instances.bit_count),
+ get_bit_array_size(new_count),
+ "instance number bit array", &new_words);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ instances.bit_count = new_count;
+ instances.words = new_words;
+ return VDO_SUCCESS;
+}
+
+/**
+ * allocate_instance() - Allocate an instance number.
+ * @instance_ptr: A point to hold the instance number
+ *
+ * Return: VDO_SUCCESS or an error code
+ *
+ * This function must be called while holding the instances lock.
+ */
+static int allocate_instance(unsigned int *instance_ptr)
+{
+ unsigned int instance;
+ int result;
+
+ /* If there are no unallocated instances, grow the bit array. */
+ if (instances.count >= instances.bit_count) {
+ result = grow_bit_array();
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ /*
+ * There must be a zero bit somewhere now. Find it, starting just after the last instance
+ * allocated.
+ */
+ instance = find_next_zero_bit(instances.words, instances.bit_count,
+ instances.next);
+ if (instance >= instances.bit_count) {
+ /* Nothing free after next, so wrap around to instance zero. */
+ instance = find_first_zero_bit(instances.words, instances.bit_count);
+ result = VDO_ASSERT(instance < instances.bit_count,
+ "impossibly, no zero bit found");
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ __set_bit(instance, instances.words);
+ instances.count++;
+ instances.next = instance + 1;
+ *instance_ptr = instance;
+ return VDO_SUCCESS;
+}
+
+static int construct_new_vdo_registered(struct dm_target *ti, unsigned int argc,
+ char **argv, unsigned int instance)
+{
+ int result;
+ struct device_config *config;
+
+ result = parse_device_config(argc, argv, ti, &config);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error_strerror(result, "parsing failed: %s", ti->error);
+ release_instance(instance);
+ return -EINVAL;
+ }
+
+ /* Beyond this point, the instance number will be cleaned up for us if needed */
+ result = vdo_initialize(ti, instance, config);
+ if (result != VDO_SUCCESS) {
+ release_instance(instance);
+ free_device_config(config);
+ return vdo_status_to_errno(result);
+ }
+
+ return VDO_SUCCESS;
+}
+
+static int construct_new_vdo(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ int result;
+ unsigned int instance;
+ struct registered_thread instance_thread;
+
+ mutex_lock(&instances_lock);
+ result = allocate_instance(&instance);
+ mutex_unlock(&instances_lock);
+ if (result != VDO_SUCCESS)
+ return -ENOMEM;
+
+ vdo_register_thread_device_id(&instance_thread, &instance);
+ result = construct_new_vdo_registered(ti, argc, argv, instance);
+ vdo_unregister_thread_device_id();
+ return result;
+}
+
+/**
+ * check_may_grow_physical() - Callback to check that we're not in recovery mode, used in
+ * vdo_prepare_to_grow_physical().
+ * @completion: The admin completion.
+ */
+static void check_may_grow_physical(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ /* These checks can only be done from a vdo thread. */
+ if (vdo_is_read_only(vdo))
+ vdo_set_completion_result(completion, VDO_READ_ONLY);
+
+ if (vdo_in_recovery_mode(vdo))
+ vdo_set_completion_result(completion, VDO_RETRY_AFTER_REBUILD);
+
+ finish_operation_callback(completion);
+}
+
+static block_count_t get_partition_size(struct layout *layout, enum partition_id id)
+{
+ return vdo_get_known_partition(layout, id)->count;
+}
+
+/**
+ * grow_layout() - Make the layout for growing a vdo.
+ * @vdo: The vdo preparing to grow.
+ * @old_size: The current size of the vdo.
+ * @new_size: The size to which the vdo will be grown.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int grow_layout(struct vdo *vdo, block_count_t old_size, block_count_t new_size)
+{
+ int result;
+ block_count_t min_new_size;
+
+ if (vdo->next_layout.size == new_size) {
+ /* We are already prepared to grow to the new size, so we're done. */
+ return VDO_SUCCESS;
+ }
+
+ /* Make a copy completion if there isn't one */
+ if (vdo->partition_copier == NULL) {
+ vdo->partition_copier = dm_kcopyd_client_create(NULL);
+ if (IS_ERR(vdo->partition_copier)) {
+ result = PTR_ERR(vdo->partition_copier);
+ vdo->partition_copier = NULL;
+ return result;
+ }
+ }
+
+ /* Free any unused preparation. */
+ vdo_uninitialize_layout(&vdo->next_layout);
+
+ /*
+ * Make a new layout with the existing partition sizes for everything but the slab depot
+ * partition.
+ */
+ result = vdo_initialize_layout(new_size, vdo->layout.start,
+ get_partition_size(&vdo->layout,
+ VDO_BLOCK_MAP_PARTITION),
+ get_partition_size(&vdo->layout,
+ VDO_RECOVERY_JOURNAL_PARTITION),
+ get_partition_size(&vdo->layout,
+ VDO_SLAB_SUMMARY_PARTITION),
+ &vdo->next_layout);
+ if (result != VDO_SUCCESS) {
+ dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier));
+ return result;
+ }
+
+ /* Ensure the new journal and summary are entirely within the added blocks. */
+ min_new_size = (old_size +
+ get_partition_size(&vdo->next_layout,
+ VDO_SLAB_SUMMARY_PARTITION) +
+ get_partition_size(&vdo->next_layout,
+ VDO_RECOVERY_JOURNAL_PARTITION));
+ if (min_new_size > new_size) {
+ /* Copying the journal and summary would destroy some old metadata. */
+ vdo_uninitialize_layout(&vdo->next_layout);
+ dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier));
+ return VDO_INCREMENT_TOO_SMALL;
+ }
+
+ return VDO_SUCCESS;
+}
+
+static int prepare_to_grow_physical(struct vdo *vdo, block_count_t new_physical_blocks)
+{
+ int result;
+ block_count_t current_physical_blocks = vdo->states.vdo.config.physical_blocks;
+
+ vdo_log_info("Preparing to resize physical to %llu",
+ (unsigned long long) new_physical_blocks);
+ VDO_ASSERT_LOG_ONLY((new_physical_blocks > current_physical_blocks),
+ "New physical size is larger than current physical size");
+ result = perform_admin_operation(vdo, PREPARE_GROW_PHYSICAL_PHASE_START,
+ check_may_grow_physical,
+ finish_operation_callback,
+ "prepare grow-physical");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = grow_layout(vdo, current_physical_blocks, new_physical_blocks);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_prepare_to_grow_slab_depot(vdo->depot,
+ vdo_get_known_partition(&vdo->next_layout,
+ VDO_SLAB_DEPOT_PARTITION));
+ if (result != VDO_SUCCESS) {
+ vdo_uninitialize_layout(&vdo->next_layout);
+ return result;
+ }
+
+ vdo_log_info("Done preparing to resize physical");
+ return VDO_SUCCESS;
+}
+
+/**
+ * validate_new_device_config() - Check whether a new device config represents a valid modification
+ * to an existing config.
+ * @to_validate: The new config to validate.
+ * @config: The existing config.
+ * @may_grow: Set to true if growing the logical and physical size of the vdo is currently
+ * permitted.
+ * @error_ptr: A pointer to hold the reason for any error.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int validate_new_device_config(struct device_config *to_validate,
+ struct device_config *config, bool may_grow,
+ char **error_ptr)
+{
+ if (to_validate->owning_target->begin != config->owning_target->begin) {
+ *error_ptr = "Starting sector cannot change";
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (to_validate->logical_block_size != config->logical_block_size) {
+ *error_ptr = "Logical block size cannot change";
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (to_validate->logical_blocks < config->logical_blocks) {
+ *error_ptr = "Can't shrink VDO logical size";
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (to_validate->cache_size != config->cache_size) {
+ *error_ptr = "Block map cache size cannot change";
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (to_validate->block_map_maximum_age != config->block_map_maximum_age) {
+ *error_ptr = "Block map maximum age cannot change";
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (memcmp(&to_validate->thread_counts, &config->thread_counts,
+ sizeof(struct thread_count_config)) != 0) {
+ *error_ptr = "Thread configuration cannot change";
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (to_validate->physical_blocks < config->physical_blocks) {
+ *error_ptr = "Removing physical storage from a VDO is not supported";
+ return VDO_NOT_IMPLEMENTED;
+ }
+
+ if (!may_grow && (to_validate->physical_blocks > config->physical_blocks)) {
+ *error_ptr = "VDO physical size may not grow in current state";
+ return VDO_NOT_IMPLEMENTED;
+ }
+
+ return VDO_SUCCESS;
+}
+
+static int prepare_to_modify(struct dm_target *ti, struct device_config *config,
+ struct vdo *vdo)
+{
+ int result;
+ bool may_grow = (vdo_get_admin_state(vdo) != VDO_ADMIN_STATE_PRE_LOADED);
+
+ result = validate_new_device_config(config, vdo->device_config, may_grow,
+ &ti->error);
+ if (result != VDO_SUCCESS)
+ return -EINVAL;
+
+ if (config->logical_blocks > vdo->device_config->logical_blocks) {
+ block_count_t logical_blocks = vdo->states.vdo.config.logical_blocks;
+
+ vdo_log_info("Preparing to resize logical to %llu",
+ (unsigned long long) config->logical_blocks);
+ VDO_ASSERT_LOG_ONLY((config->logical_blocks > logical_blocks),
+ "New logical size is larger than current size");
+
+ result = vdo_prepare_to_grow_block_map(vdo->block_map,
+ config->logical_blocks);
+ if (result != VDO_SUCCESS) {
+ ti->error = "Device vdo_prepare_to_grow_logical failed";
+ return result;
+ }
+
+ vdo_log_info("Done preparing to resize logical");
+ }
+
+ if (config->physical_blocks > vdo->device_config->physical_blocks) {
+ result = prepare_to_grow_physical(vdo, config->physical_blocks);
+ if (result != VDO_SUCCESS) {
+ if (result == VDO_PARAMETER_MISMATCH) {
+ /*
+ * If we don't trap this case, vdo_status_to_errno() will remap
+ * it to -EIO, which is misleading and ahistorical.
+ */
+ result = -EINVAL;
+ }
+
+ if (result == VDO_TOO_MANY_SLABS)
+ ti->error = "Device vdo_prepare_to_grow_physical failed (specified physical size too big based on formatted slab size)";
+ else
+ ti->error = "Device vdo_prepare_to_grow_physical failed";
+
+ return result;
+ }
+ }
+
+ if (strcmp(config->parent_device_name, vdo->device_config->parent_device_name) != 0) {
+ const char *device_name = vdo_get_device_name(config->owning_target);
+
+ vdo_log_info("Updating backing device of %s from %s to %s", device_name,
+ vdo->device_config->parent_device_name,
+ config->parent_device_name);
+ }
+
+ return VDO_SUCCESS;
+}
+
+static int update_existing_vdo(const char *device_name, struct dm_target *ti,
+ unsigned int argc, char **argv, struct vdo *vdo)
+{
+ int result;
+ struct device_config *config;
+
+ result = parse_device_config(argc, argv, ti, &config);
+ if (result != VDO_SUCCESS)
+ return -EINVAL;
+
+ vdo_log_info("preparing to modify device '%s'", device_name);
+ result = prepare_to_modify(ti, config, vdo);
+ if (result != VDO_SUCCESS) {
+ free_device_config(config);
+ return vdo_status_to_errno(result);
+ }
+
+ set_device_config(ti, vdo, config);
+ return VDO_SUCCESS;
+}
+
+static int vdo_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ int result;
+ struct registered_thread allocating_thread, instance_thread;
+ const char *device_name;
+ struct vdo *vdo;
+
+ vdo_register_allocating_thread(&allocating_thread, NULL);
+ device_name = vdo_get_device_name(ti);
+ vdo = vdo_find_matching(vdo_is_named, device_name);
+ if (vdo == NULL) {
+ result = construct_new_vdo(ti, argc, argv);
+ } else {
+ vdo_register_thread_device_id(&instance_thread, &vdo->instance);
+ result = update_existing_vdo(device_name, ti, argc, argv, vdo);
+ vdo_unregister_thread_device_id();
+ }
+
+ vdo_unregister_allocating_thread();
+ return result;
+}
+
+static void vdo_dtr(struct dm_target *ti)
+{
+ struct device_config *config = ti->private;
+ struct vdo *vdo = vdo_forget(config->vdo);
+
+ list_del_init(&config->config_list);
+ if (list_empty(&vdo->device_config_list)) {
+ const char *device_name;
+
+ /* This was the last config referencing the VDO. Free it. */
+ unsigned int instance = vdo->instance;
+ struct registered_thread allocating_thread, instance_thread;
+
+ vdo_register_thread_device_id(&instance_thread, &instance);
+ vdo_register_allocating_thread(&allocating_thread, NULL);
+
+ device_name = vdo_get_device_name(ti);
+ vdo_log_info("stopping device '%s'", device_name);
+ if (vdo->dump_on_shutdown)
+ vdo_dump_all(vdo, "device shutdown");
+
+ vdo_destroy(vdo_forget(vdo));
+ vdo_log_info("device '%s' stopped", device_name);
+ vdo_unregister_thread_device_id();
+ vdo_unregister_allocating_thread();
+ release_instance(instance);
+ } else if (config == vdo->device_config) {
+ /*
+ * The VDO still references this config. Give it a reference to a config that isn't
+ * being destroyed.
+ */
+ vdo->device_config = list_first_entry(&vdo->device_config_list,
+ struct device_config, config_list);
+ }
+
+ free_device_config(config);
+ ti->private = NULL;
+}
+
+static void vdo_presuspend(struct dm_target *ti)
+{
+ get_vdo_for_target(ti)->suspend_type =
+ (dm_noflush_suspending(ti) ? VDO_ADMIN_STATE_SUSPENDING : VDO_ADMIN_STATE_SAVING);
+}
+
+/**
+ * write_super_block_for_suspend() - Update the VDO state and save the super block.
+ * @completion: The admin completion
+ */
+static void write_super_block_for_suspend(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+
+ switch (vdo_get_state(vdo)) {
+ case VDO_DIRTY:
+ case VDO_NEW:
+ vdo_set_state(vdo, VDO_CLEAN);
+ break;
+
+ case VDO_CLEAN:
+ case VDO_READ_ONLY_MODE:
+ case VDO_FORCE_REBUILD:
+ case VDO_RECOVERING:
+ case VDO_REBUILD_FOR_UPGRADE:
+ break;
+
+ case VDO_REPLAYING:
+ default:
+ vdo_continue_completion(completion, UDS_BAD_STATE);
+ return;
+ }
+
+ vdo_save_components(vdo, completion);
+}
+
+/**
+ * suspend_callback() - Callback to initiate a suspend, registered in vdo_postsuspend().
+ * @completion: The sub-task completion.
+ */
+static void suspend_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ struct admin_state *state = &vdo->admin.state;
+ int result;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ switch (advance_phase(vdo)) {
+ case SUSPEND_PHASE_START:
+ if (vdo_get_admin_state_code(state)->quiescent) {
+ /* Already suspended */
+ break;
+ }
+
+ vdo_continue_completion(completion,
+ vdo_start_operation(state, vdo->suspend_type));
+ return;
+
+ case SUSPEND_PHASE_PACKER:
+ /*
+ * If the VDO was already resumed from a prior suspend while read-only, some of the
+ * components may not have been resumed. By setting a read-only error here, we
+ * guarantee that the result of this suspend will be VDO_READ_ONLY and not
+ * VDO_INVALID_ADMIN_STATE in that case.
+ */
+ if (vdo_in_read_only_mode(vdo))
+ vdo_set_completion_result(completion, VDO_READ_ONLY);
+
+ vdo_drain_packer(vdo->packer, completion);
+ return;
+
+ case SUSPEND_PHASE_DATA_VIOS:
+ drain_data_vio_pool(vdo->data_vio_pool, completion);
+ return;
+
+ case SUSPEND_PHASE_DEDUPE:
+ vdo_drain_hash_zones(vdo->hash_zones, completion);
+ return;
+
+ case SUSPEND_PHASE_FLUSHES:
+ vdo_drain_flusher(vdo->flusher, completion);
+ return;
+
+ case SUSPEND_PHASE_LOGICAL_ZONES:
+ /*
+ * Attempt to flush all I/O before completing post suspend work. We believe a
+ * suspended device is expected to have persisted all data written before the
+ * suspend, even if it hasn't been flushed yet.
+ */
+ result = vdo_synchronous_flush(vdo);
+ if (result != VDO_SUCCESS)
+ vdo_enter_read_only_mode(vdo, result);
+
+ vdo_drain_logical_zones(vdo->logical_zones,
+ vdo_get_admin_state_code(state), completion);
+ return;
+
+ case SUSPEND_PHASE_BLOCK_MAP:
+ vdo_drain_block_map(vdo->block_map, vdo_get_admin_state_code(state),
+ completion);
+ return;
+
+ case SUSPEND_PHASE_JOURNAL:
+ vdo_drain_recovery_journal(vdo->recovery_journal,
+ vdo_get_admin_state_code(state), completion);
+ return;
+
+ case SUSPEND_PHASE_DEPOT:
+ vdo_drain_slab_depot(vdo->depot, vdo_get_admin_state_code(state),
+ completion);
+ return;
+
+ case SUSPEND_PHASE_READ_ONLY_WAIT:
+ vdo_wait_until_not_entering_read_only_mode(completion);
+ return;
+
+ case SUSPEND_PHASE_WRITE_SUPER_BLOCK:
+ if (vdo_is_state_suspending(state) || (completion->result != VDO_SUCCESS)) {
+ /* If we didn't save the VDO or there was an error, we're done. */
+ break;
+ }
+
+ write_super_block_for_suspend(completion);
+ return;
+
+ case SUSPEND_PHASE_END:
+ break;
+
+ default:
+ vdo_set_completion_result(completion, UDS_BAD_STATE);
+ }
+
+ finish_operation_callback(completion);
+}
+
+static void vdo_postsuspend(struct dm_target *ti)
+{
+ struct vdo *vdo = get_vdo_for_target(ti);
+ struct registered_thread instance_thread;
+ const char *device_name;
+ int result;
+
+ vdo_register_thread_device_id(&instance_thread, &vdo->instance);
+ device_name = vdo_get_device_name(vdo->device_config->owning_target);
+ vdo_log_info("suspending device '%s'", device_name);
+
+ /*
+ * It's important to note any error here does not actually stop device-mapper from
+ * suspending the device. All this work is done post suspend.
+ */
+ result = perform_admin_operation(vdo, SUSPEND_PHASE_START, suspend_callback,
+ suspend_callback, "suspend");
+
+ if ((result == VDO_SUCCESS) || (result == VDO_READ_ONLY)) {
+ /*
+ * Treat VDO_READ_ONLY as a success since a read-only suspension still leaves the
+ * VDO suspended.
+ */
+ vdo_log_info("device '%s' suspended", device_name);
+ } else if (result == VDO_INVALID_ADMIN_STATE) {
+ vdo_log_error("Suspend invoked while in unexpected state: %s",
+ vdo_get_admin_state(vdo)->name);
+ } else {
+ vdo_log_error_strerror(result, "Suspend of device '%s' failed",
+ device_name);
+ }
+
+ vdo_unregister_thread_device_id();
+}
+
+/**
+ * was_new() - Check whether the vdo was new when it was loaded.
+ * @vdo: The vdo to query.
+ *
+ * Return: true if the vdo was new.
+ */
+static bool was_new(const struct vdo *vdo)
+{
+ return (vdo->load_state == VDO_NEW);
+}
+
+/**
+ * requires_repair() - Check whether a vdo requires recovery or rebuild.
+ * @vdo: The vdo to query.
+ *
+ * Return: true if the vdo must be repaired.
+ */
+static bool __must_check requires_repair(const struct vdo *vdo)
+{
+ switch (vdo_get_state(vdo)) {
+ case VDO_DIRTY:
+ case VDO_FORCE_REBUILD:
+ case VDO_REPLAYING:
+ case VDO_REBUILD_FOR_UPGRADE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/**
+ * get_load_type() - Determine how the slab depot was loaded.
+ * @vdo: The vdo.
+ *
+ * Return: How the depot was loaded.
+ */
+static enum slab_depot_load_type get_load_type(struct vdo *vdo)
+{
+ if (vdo_state_requires_read_only_rebuild(vdo->load_state))
+ return VDO_SLAB_DEPOT_REBUILD_LOAD;
+
+ if (vdo_state_requires_recovery(vdo->load_state))
+ return VDO_SLAB_DEPOT_RECOVERY_LOAD;
+
+ return VDO_SLAB_DEPOT_NORMAL_LOAD;
+}
+
+/**
+ * load_callback() - Callback to do the destructive parts of loading a VDO.
+ * @completion: The sub-task completion.
+ */
+static void load_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ int result;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ switch (advance_phase(vdo)) {
+ case LOAD_PHASE_START:
+ result = vdo_start_operation(&vdo->admin.state, VDO_ADMIN_STATE_LOADING);
+ if (result != VDO_SUCCESS) {
+ vdo_continue_completion(completion, result);
+ return;
+ }
+
+ /* Prepare the recovery journal for new entries. */
+ vdo_open_recovery_journal(vdo->recovery_journal, vdo->depot,
+ vdo->block_map);
+ vdo_allow_read_only_mode_entry(completion);
+ return;
+
+ case LOAD_PHASE_LOAD_DEPOT:
+ vdo_set_dedupe_state_normal(vdo->hash_zones);
+ if (vdo_is_read_only(vdo)) {
+ /*
+ * In read-only mode we don't use the allocator and it may not even be
+ * readable, so don't bother trying to load it.
+ */
+ vdo_set_completion_result(completion, VDO_READ_ONLY);
+ break;
+ }
+
+ if (requires_repair(vdo)) {
+ vdo_repair(completion);
+ return;
+ }
+
+ vdo_load_slab_depot(vdo->depot,
+ (was_new(vdo) ? VDO_ADMIN_STATE_FORMATTING :
+ VDO_ADMIN_STATE_LOADING),
+ completion, NULL);
+ return;
+
+ case LOAD_PHASE_MAKE_DIRTY:
+ vdo_set_state(vdo, VDO_DIRTY);
+ vdo_save_components(vdo, completion);
+ return;
+
+ case LOAD_PHASE_PREPARE_TO_ALLOCATE:
+ vdo_initialize_block_map_from_journal(vdo->block_map,
+ vdo->recovery_journal);
+ vdo_prepare_slab_depot_to_allocate(vdo->depot, get_load_type(vdo),
+ completion);
+ return;
+
+ case LOAD_PHASE_SCRUB_SLABS:
+ if (vdo_state_requires_recovery(vdo->load_state))
+ vdo_enter_recovery_mode(vdo);
+
+ vdo_scrub_all_unrecovered_slabs(vdo->depot, completion);
+ return;
+
+ case LOAD_PHASE_DATA_REDUCTION:
+ WRITE_ONCE(vdo->compressing, vdo->device_config->compression);
+ if (vdo->device_config->deduplication) {
+ /*
+ * Don't try to load or rebuild the index first (and log scary error
+ * messages) if this is known to be a newly-formatted volume.
+ */
+ vdo_start_dedupe_index(vdo->hash_zones, was_new(vdo));
+ }
+
+ vdo->allocations_allowed = false;
+ fallthrough;
+
+ case LOAD_PHASE_FINISHED:
+ break;
+
+ case LOAD_PHASE_DRAIN_JOURNAL:
+ vdo_drain_recovery_journal(vdo->recovery_journal, VDO_ADMIN_STATE_SAVING,
+ completion);
+ return;
+
+ case LOAD_PHASE_WAIT_FOR_READ_ONLY:
+ /* Avoid an infinite loop */
+ completion->error_handler = NULL;
+ vdo->admin.phase = LOAD_PHASE_FINISHED;
+ vdo_wait_until_not_entering_read_only_mode(completion);
+ return;
+
+ default:
+ vdo_set_completion_result(completion, UDS_BAD_STATE);
+ }
+
+ finish_operation_callback(completion);
+}
+
+/**
+ * handle_load_error() - Handle an error during the load operation.
+ * @completion: The admin completion.
+ *
+ * If at all possible, brings the vdo online in read-only mode. This handler is registered in
+ * vdo_preresume_registered().
+ */
+static void handle_load_error(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+
+ if (vdo_requeue_completion_if_needed(completion,
+ vdo->thread_config.admin_thread))
+ return;
+
+ if (vdo_state_requires_read_only_rebuild(vdo->load_state) &&
+ (vdo->admin.phase == LOAD_PHASE_MAKE_DIRTY)) {
+ vdo_log_error_strerror(completion->result, "aborting load");
+ vdo->admin.phase = LOAD_PHASE_DRAIN_JOURNAL;
+ load_callback(vdo_forget(completion));
+ return;
+ }
+
+ vdo_log_error_strerror(completion->result,
+ "Entering read-only mode due to load error");
+ vdo->admin.phase = LOAD_PHASE_WAIT_FOR_READ_ONLY;
+ vdo_enter_read_only_mode(vdo, completion->result);
+ completion->result = VDO_READ_ONLY;
+ load_callback(completion);
+}
+
+/**
+ * write_super_block_for_resume() - Update the VDO state and save the super block.
+ * @completion: The admin completion
+ */
+static void write_super_block_for_resume(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+
+ switch (vdo_get_state(vdo)) {
+ case VDO_CLEAN:
+ case VDO_NEW:
+ vdo_set_state(vdo, VDO_DIRTY);
+ vdo_save_components(vdo, completion);
+ return;
+
+ case VDO_DIRTY:
+ case VDO_READ_ONLY_MODE:
+ case VDO_FORCE_REBUILD:
+ case VDO_RECOVERING:
+ case VDO_REBUILD_FOR_UPGRADE:
+ /* No need to write the super block in these cases */
+ vdo_launch_completion(completion);
+ return;
+
+ case VDO_REPLAYING:
+ default:
+ vdo_continue_completion(completion, UDS_BAD_STATE);
+ }
+}
+
+/**
+ * resume_callback() - Callback to resume a VDO.
+ * @completion: The admin completion.
+ */
+static void resume_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ int result;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ switch (advance_phase(vdo)) {
+ case RESUME_PHASE_START:
+ result = vdo_start_operation(&vdo->admin.state,
+ VDO_ADMIN_STATE_RESUMING);
+ if (result != VDO_SUCCESS) {
+ vdo_continue_completion(completion, result);
+ return;
+ }
+
+ write_super_block_for_resume(completion);
+ return;
+
+ case RESUME_PHASE_ALLOW_READ_ONLY_MODE:
+ vdo_allow_read_only_mode_entry(completion);
+ return;
+
+ case RESUME_PHASE_DEDUPE:
+ vdo_resume_hash_zones(vdo->hash_zones, completion);
+ return;
+
+ case RESUME_PHASE_DEPOT:
+ vdo_resume_slab_depot(vdo->depot, completion);
+ return;
+
+ case RESUME_PHASE_JOURNAL:
+ vdo_resume_recovery_journal(vdo->recovery_journal, completion);
+ return;
+
+ case RESUME_PHASE_BLOCK_MAP:
+ vdo_resume_block_map(vdo->block_map, completion);
+ return;
+
+ case RESUME_PHASE_LOGICAL_ZONES:
+ vdo_resume_logical_zones(vdo->logical_zones, completion);
+ return;
+
+ case RESUME_PHASE_PACKER:
+ {
+ bool was_enabled = vdo_get_compressing(vdo);
+ bool enable = vdo->device_config->compression;
+
+ if (enable != was_enabled)
+ WRITE_ONCE(vdo->compressing, enable);
+ vdo_log_info("compression is %s", (enable ? "enabled" : "disabled"));
+
+ vdo_resume_packer(vdo->packer, completion);
+ return;
+ }
+
+ case RESUME_PHASE_FLUSHER:
+ vdo_resume_flusher(vdo->flusher, completion);
+ return;
+
+ case RESUME_PHASE_DATA_VIOS:
+ resume_data_vio_pool(vdo->data_vio_pool, completion);
+ return;
+
+ case RESUME_PHASE_END:
+ break;
+
+ default:
+ vdo_set_completion_result(completion, UDS_BAD_STATE);
+ }
+
+ finish_operation_callback(completion);
+}
+
+/**
+ * grow_logical_callback() - Callback to initiate a grow logical.
+ * @completion: The admin completion.
+ *
+ * Registered in perform_grow_logical().
+ */
+static void grow_logical_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ int result;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ switch (advance_phase(vdo)) {
+ case GROW_LOGICAL_PHASE_START:
+ if (vdo_is_read_only(vdo)) {
+ vdo_log_error_strerror(VDO_READ_ONLY,
+ "Can't grow logical size of a read-only VDO");
+ vdo_set_completion_result(completion, VDO_READ_ONLY);
+ break;
+ }
+
+ result = vdo_start_operation(&vdo->admin.state,
+ VDO_ADMIN_STATE_SUSPENDED_OPERATION);
+ if (result != VDO_SUCCESS) {
+ vdo_continue_completion(completion, result);
+ return;
+ }
+
+ vdo->states.vdo.config.logical_blocks = vdo->block_map->next_entry_count;
+ vdo_save_components(vdo, completion);
+ return;
+
+ case GROW_LOGICAL_PHASE_GROW_BLOCK_MAP:
+ vdo_grow_block_map(vdo->block_map, completion);
+ return;
+
+ case GROW_LOGICAL_PHASE_END:
+ break;
+
+ case GROW_LOGICAL_PHASE_ERROR:
+ vdo_enter_read_only_mode(vdo, completion->result);
+ break;
+
+ default:
+ vdo_set_completion_result(completion, UDS_BAD_STATE);
+ }
+
+ finish_operation_callback(completion);
+}
+
+/**
+ * handle_logical_growth_error() - Handle an error during the grow physical process.
+ * @completion: The admin completion.
+ */
+static void handle_logical_growth_error(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+
+ if (vdo->admin.phase == GROW_LOGICAL_PHASE_GROW_BLOCK_MAP) {
+ /*
+ * We've failed to write the new size in the super block, so set our in memory
+ * config back to the old size.
+ */
+ vdo->states.vdo.config.logical_blocks = vdo->block_map->entry_count;
+ vdo_abandon_block_map_growth(vdo->block_map);
+ }
+
+ vdo->admin.phase = GROW_LOGICAL_PHASE_ERROR;
+ grow_logical_callback(completion);
+}
+
+/**
+ * perform_grow_logical() - Grow the logical size of the vdo.
+ * @vdo: The vdo to grow.
+ * @new_logical_blocks: The size to which the vdo should be grown.
+ *
+ * Context: This method may only be called when the vdo has been suspended and must not be called
+ * from a base thread.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int perform_grow_logical(struct vdo *vdo, block_count_t new_logical_blocks)
+{
+ int result;
+
+ if (vdo->device_config->logical_blocks == new_logical_blocks) {
+ /*
+ * A table was loaded for which we prepared to grow, but a table without that
+ * growth was what we are resuming with.
+ */
+ vdo_abandon_block_map_growth(vdo->block_map);
+ return VDO_SUCCESS;
+ }
+
+ vdo_log_info("Resizing logical to %llu",
+ (unsigned long long) new_logical_blocks);
+ if (vdo->block_map->next_entry_count != new_logical_blocks)
+ return VDO_PARAMETER_MISMATCH;
+
+ result = perform_admin_operation(vdo, GROW_LOGICAL_PHASE_START,
+ grow_logical_callback,
+ handle_logical_growth_error, "grow logical");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_log_info("Logical blocks now %llu", (unsigned long long) new_logical_blocks);
+ return VDO_SUCCESS;
+}
+
+static void copy_callback(int read_err, unsigned long write_err, void *context)
+{
+ struct vdo_completion *completion = context;
+ int result = (((read_err == 0) && (write_err == 0)) ? VDO_SUCCESS : -EIO);
+
+ vdo_continue_completion(completion, result);
+}
+
+static void partition_to_region(struct partition *partition, struct vdo *vdo,
+ struct dm_io_region *region)
+{
+ physical_block_number_t pbn = partition->offset - vdo->geometry.bio_offset;
+
+ *region = (struct dm_io_region) {
+ .bdev = vdo_get_backing_device(vdo),
+ .sector = pbn * VDO_SECTORS_PER_BLOCK,
+ .count = partition->count * VDO_SECTORS_PER_BLOCK,
+ };
+}
+
+/**
+ * copy_partition() - Copy a partition from the location specified in the current layout to that in
+ * the next layout.
+ * @vdo: The vdo preparing to grow.
+ * @id: The ID of the partition to copy.
+ * @parent: The completion to notify when the copy is complete.
+ */
+static void copy_partition(struct vdo *vdo, enum partition_id id,
+ struct vdo_completion *parent)
+{
+ struct dm_io_region read_region, write_regions[1];
+ struct partition *from = vdo_get_known_partition(&vdo->layout, id);
+ struct partition *to = vdo_get_known_partition(&vdo->next_layout, id);
+
+ partition_to_region(from, vdo, &read_region);
+ partition_to_region(to, vdo, &write_regions[0]);
+ dm_kcopyd_copy(vdo->partition_copier, &read_region, 1, write_regions, 0,
+ copy_callback, parent);
+}
+
+/**
+ * grow_physical_callback() - Callback to initiate a grow physical.
+ * @completion: The admin completion.
+ *
+ * Registered in perform_grow_physical().
+ */
+static void grow_physical_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ int result;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ switch (advance_phase(vdo)) {
+ case GROW_PHYSICAL_PHASE_START:
+ if (vdo_is_read_only(vdo)) {
+ vdo_log_error_strerror(VDO_READ_ONLY,
+ "Can't grow physical size of a read-only VDO");
+ vdo_set_completion_result(completion, VDO_READ_ONLY);
+ break;
+ }
+
+ result = vdo_start_operation(&vdo->admin.state,
+ VDO_ADMIN_STATE_SUSPENDED_OPERATION);
+ if (result != VDO_SUCCESS) {
+ vdo_continue_completion(completion, result);
+ return;
+ }
+
+ /* Copy the journal into the new layout. */
+ copy_partition(vdo, VDO_RECOVERY_JOURNAL_PARTITION, completion);
+ return;
+
+ case GROW_PHYSICAL_PHASE_COPY_SUMMARY:
+ copy_partition(vdo, VDO_SLAB_SUMMARY_PARTITION, completion);
+ return;
+
+ case GROW_PHYSICAL_PHASE_UPDATE_COMPONENTS:
+ vdo_uninitialize_layout(&vdo->layout);
+ vdo->layout = vdo->next_layout;
+ vdo_forget(vdo->next_layout.head);
+ vdo->states.vdo.config.physical_blocks = vdo->layout.size;
+ vdo_update_slab_depot_size(vdo->depot);
+ vdo_save_components(vdo, completion);
+ return;
+
+ case GROW_PHYSICAL_PHASE_USE_NEW_SLABS:
+ vdo_use_new_slabs(vdo->depot, completion);
+ return;
+
+ case GROW_PHYSICAL_PHASE_END:
+ vdo->depot->summary_origin =
+ vdo_get_known_partition(&vdo->layout,
+ VDO_SLAB_SUMMARY_PARTITION)->offset;
+ vdo->recovery_journal->origin =
+ vdo_get_known_partition(&vdo->layout,
+ VDO_RECOVERY_JOURNAL_PARTITION)->offset;
+ break;
+
+ case GROW_PHYSICAL_PHASE_ERROR:
+ vdo_enter_read_only_mode(vdo, completion->result);
+ break;
+
+ default:
+ vdo_set_completion_result(completion, UDS_BAD_STATE);
+ }
+
+ vdo_uninitialize_layout(&vdo->next_layout);
+ finish_operation_callback(completion);
+}
+
+/**
+ * handle_physical_growth_error() - Handle an error during the grow physical process.
+ * @completion: The sub-task completion.
+ */
+static void handle_physical_growth_error(struct vdo_completion *completion)
+{
+ completion->vdo->admin.phase = GROW_PHYSICAL_PHASE_ERROR;
+ grow_physical_callback(completion);
+}
+
+/**
+ * perform_grow_physical() - Grow the physical size of the vdo.
+ * @vdo: The vdo to resize.
+ * @new_physical_blocks: The new physical size in blocks.
+ *
+ * Context: This method may only be called when the vdo has been suspended and must not be called
+ * from a base thread.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int perform_grow_physical(struct vdo *vdo, block_count_t new_physical_blocks)
+{
+ int result;
+ block_count_t new_depot_size, prepared_depot_size;
+ block_count_t old_physical_blocks = vdo->states.vdo.config.physical_blocks;
+
+ /* Skip any noop grows. */
+ if (old_physical_blocks == new_physical_blocks)
+ return VDO_SUCCESS;
+
+ if (new_physical_blocks != vdo->next_layout.size) {
+ /*
+ * Either the VDO isn't prepared to grow, or it was prepared to grow to a different
+ * size. Doing this check here relies on the fact that the call to this method is
+ * done under the dmsetup message lock.
+ */
+ vdo_uninitialize_layout(&vdo->next_layout);
+ vdo_abandon_new_slabs(vdo->depot);
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ /* Validate that we are prepared to grow appropriately. */
+ new_depot_size =
+ vdo_get_known_partition(&vdo->next_layout, VDO_SLAB_DEPOT_PARTITION)->count;
+ prepared_depot_size = (vdo->depot->new_slabs == NULL) ? 0 : vdo->depot->new_size;
+ if (prepared_depot_size != new_depot_size)
+ return VDO_PARAMETER_MISMATCH;
+
+ result = perform_admin_operation(vdo, GROW_PHYSICAL_PHASE_START,
+ grow_physical_callback,
+ handle_physical_growth_error, "grow physical");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_log_info("Physical block count was %llu, now %llu",
+ (unsigned long long) old_physical_blocks,
+ (unsigned long long) new_physical_blocks);
+ return VDO_SUCCESS;
+}
+
+/**
+ * apply_new_vdo_configuration() - Attempt to make any configuration changes from the table being
+ * resumed.
+ * @vdo: The vdo being resumed.
+ * @config: The new device configuration derived from the table with which the vdo is being
+ * resumed.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check apply_new_vdo_configuration(struct vdo *vdo,
+ struct device_config *config)
+{
+ int result;
+
+ result = perform_grow_logical(vdo, config->logical_blocks);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error("grow logical operation failed, result = %d", result);
+ return result;
+ }
+
+ result = perform_grow_physical(vdo, config->physical_blocks);
+ if (result != VDO_SUCCESS)
+ vdo_log_error("resize operation failed, result = %d", result);
+
+ return result;
+}
+
+static int vdo_preresume_registered(struct dm_target *ti, struct vdo *vdo)
+{
+ struct device_config *config = ti->private;
+ const char *device_name = vdo_get_device_name(ti);
+ block_count_t backing_blocks;
+ int result;
+
+ backing_blocks = get_underlying_device_block_count(vdo);
+ if (backing_blocks < config->physical_blocks) {
+ /* FIXME: can this still happen? */
+ vdo_log_error("resume of device '%s' failed: backing device has %llu blocks but VDO physical size is %llu blocks",
+ device_name, (unsigned long long) backing_blocks,
+ (unsigned long long) config->physical_blocks);
+ return -EINVAL;
+ }
+
+ if (vdo_get_admin_state(vdo) == VDO_ADMIN_STATE_PRE_LOADED) {
+ vdo_log_info("starting device '%s'", device_name);
+ result = perform_admin_operation(vdo, LOAD_PHASE_START, load_callback,
+ handle_load_error, "load");
+ if ((result != VDO_SUCCESS) && (result != VDO_READ_ONLY)) {
+ /*
+ * Something has gone very wrong. Make sure everything has drained and
+ * leave the device in an unresumable state.
+ */
+ vdo_log_error_strerror(result,
+ "Start failed, could not load VDO metadata");
+ vdo->suspend_type = VDO_ADMIN_STATE_STOPPING;
+ perform_admin_operation(vdo, SUSPEND_PHASE_START,
+ suspend_callback, suspend_callback,
+ "suspend");
+ return result;
+ }
+
+ /* Even if the VDO is read-only, it is now able to handle read requests. */
+ vdo_log_info("device '%s' started", device_name);
+ }
+
+ vdo_log_info("resuming device '%s'", device_name);
+
+ /* If this fails, the VDO was not in a state to be resumed. This should never happen. */
+ result = apply_new_vdo_configuration(vdo, config);
+ BUG_ON(result == VDO_INVALID_ADMIN_STATE);
+
+ /*
+ * Now that we've tried to modify the vdo, the new config *is* the config, whether the
+ * modifications worked or not.
+ */
+ vdo->device_config = config;
+
+ /*
+ * Any error here is highly unexpected and the state of the vdo is questionable, so we mark
+ * it read-only in memory. Because we are suspended, the read-only state will not be
+ * written to disk.
+ */
+ if (result != VDO_SUCCESS) {
+ vdo_log_error_strerror(result,
+ "Commit of modifications to device '%s' failed",
+ device_name);
+ vdo_enter_read_only_mode(vdo, result);
+ return result;
+ }
+
+ if (vdo_get_admin_state(vdo)->normal) {
+ /* The VDO was just started, so we don't need to resume it. */
+ return VDO_SUCCESS;
+ }
+
+ result = perform_admin_operation(vdo, RESUME_PHASE_START, resume_callback,
+ resume_callback, "resume");
+ BUG_ON(result == VDO_INVALID_ADMIN_STATE);
+ if (result == VDO_READ_ONLY) {
+ /* Even if the vdo is read-only, it has still resumed. */
+ result = VDO_SUCCESS;
+ }
+
+ if (result != VDO_SUCCESS)
+ vdo_log_error("resume of device '%s' failed with error: %d", device_name,
+ result);
+
+ return result;
+}
+
+static int vdo_preresume(struct dm_target *ti)
+{
+ struct registered_thread instance_thread;
+ struct vdo *vdo = get_vdo_for_target(ti);
+ int result;
+
+ vdo_register_thread_device_id(&instance_thread, &vdo->instance);
+ result = vdo_preresume_registered(ti, vdo);
+ if ((result == VDO_PARAMETER_MISMATCH) || (result == VDO_INVALID_ADMIN_STATE))
+ result = -EINVAL;
+ vdo_unregister_thread_device_id();
+ return vdo_status_to_errno(result);
+}
+
+static void vdo_resume(struct dm_target *ti)
+{
+ struct registered_thread instance_thread;
+
+ vdo_register_thread_device_id(&instance_thread,
+ &get_vdo_for_target(ti)->instance);
+ vdo_log_info("device '%s' resumed", vdo_get_device_name(ti));
+ vdo_unregister_thread_device_id();
+}
+
+/*
+ * If anything changes that affects how user tools will interact with vdo, update the version
+ * number and make sure documentation about the change is complete so tools can properly update
+ * their management code.
+ */
+static struct target_type vdo_target_bio = {
+ .features = DM_TARGET_SINGLETON,
+ .name = "vdo",
+ .version = { 9, 0, 0 },
+ .module = THIS_MODULE,
+ .ctr = vdo_ctr,
+ .dtr = vdo_dtr,
+ .io_hints = vdo_io_hints,
+ .iterate_devices = vdo_iterate_devices,
+ .map = vdo_map_bio,
+ .message = vdo_message,
+ .status = vdo_status,
+ .presuspend = vdo_presuspend,
+ .postsuspend = vdo_postsuspend,
+ .preresume = vdo_preresume,
+ .resume = vdo_resume,
+};
+
+static bool dm_registered;
+
+static void vdo_module_destroy(void)
+{
+ vdo_log_debug("unloading");
+
+ if (dm_registered)
+ dm_unregister_target(&vdo_target_bio);
+
+ VDO_ASSERT_LOG_ONLY(instances.count == 0,
+ "should have no instance numbers still in use, but have %u",
+ instances.count);
+ vdo_free(instances.words);
+ memset(&instances, 0, sizeof(struct instance_tracker));
+}
+
+static int __init vdo_init(void)
+{
+ int result = 0;
+
+ /* Memory tracking must be initialized first for accurate accounting. */
+ vdo_memory_init();
+ vdo_initialize_threads_mutex();
+ vdo_initialize_thread_device_registry();
+ vdo_initialize_device_registry_once();
+
+ /* Add VDO errors to the set of errors registered by the indexer. */
+ result = vdo_register_status_codes();
+ if (result != VDO_SUCCESS) {
+ vdo_log_error("vdo_register_status_codes failed %d", result);
+ vdo_module_destroy();
+ return result;
+ }
+
+ result = dm_register_target(&vdo_target_bio);
+ if (result < 0) {
+ vdo_log_error("dm_register_target failed %d", result);
+ vdo_module_destroy();
+ return result;
+ }
+ dm_registered = true;
+
+ return result;
+}
+
+static void __exit vdo_exit(void)
+{
+ vdo_module_destroy();
+ /* Memory tracking cleanup must be done last. */
+ vdo_memory_exit();
+}
+
+module_init(vdo_init);
+module_exit(vdo_exit);
+
+module_param_named(log_level, vdo_log_level, uint, 0644);
+MODULE_PARM_DESC(log_level, "Log level for log messages");
+
+MODULE_DESCRIPTION(DM_NAME " target for transparent deduplication");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-vdo/dump.c b/drivers/md/dm-vdo/dump.c
new file mode 100644
index 000000000000..00e575d7d773
--- /dev/null
+++ b/drivers/md/dm-vdo/dump.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "dump.h"
+
+#include <linux/module.h>
+
+#include "memory-alloc.h"
+#include "string-utils.h"
+
+#include "constants.h"
+#include "data-vio.h"
+#include "dedupe.h"
+#include "funnel-workqueue.h"
+#include "io-submitter.h"
+#include "logger.h"
+#include "types.h"
+#include "vdo.h"
+
+enum dump_options {
+ /* Work queues */
+ SHOW_QUEUES,
+ /* Memory pools */
+ SHOW_VIO_POOL,
+ /* Others */
+ SHOW_VDO_STATUS,
+ /* This one means an option overrides the "default" choices, instead of altering them. */
+ SKIP_DEFAULT
+};
+
+enum dump_option_flags {
+ /* Work queues */
+ FLAG_SHOW_QUEUES = (1 << SHOW_QUEUES),
+ /* Memory pools */
+ FLAG_SHOW_VIO_POOL = (1 << SHOW_VIO_POOL),
+ /* Others */
+ FLAG_SHOW_VDO_STATUS = (1 << SHOW_VDO_STATUS),
+ /* Special */
+ FLAG_SKIP_DEFAULT = (1 << SKIP_DEFAULT)
+};
+
+#define FLAGS_ALL_POOLS (FLAG_SHOW_VIO_POOL)
+#define DEFAULT_DUMP_FLAGS (FLAG_SHOW_QUEUES | FLAG_SHOW_VDO_STATUS)
+/* Another static buffer... log10(256) = 2.408+, round up: */
+#define DIGITS_PER_U64 (1 + sizeof(u64) * 2409 / 1000)
+
+static inline bool is_arg_string(const char *arg, const char *this_option)
+{
+ /* convention seems to be case-independent options */
+ return strncasecmp(arg, this_option, strlen(this_option)) == 0;
+}
+
+static void do_dump(struct vdo *vdo, unsigned int dump_options_requested,
+ const char *why)
+{
+ u32 active, maximum;
+ s64 outstanding;
+
+ vdo_log_info("%s dump triggered via %s", VDO_LOGGING_MODULE_NAME, why);
+ active = get_data_vio_pool_active_requests(vdo->data_vio_pool);
+ maximum = get_data_vio_pool_maximum_requests(vdo->data_vio_pool);
+ outstanding = (atomic64_read(&vdo->stats.bios_submitted) -
+ atomic64_read(&vdo->stats.bios_completed));
+ vdo_log_info("%u device requests outstanding (max %u), %lld bio requests outstanding, device '%s'",
+ active, maximum, outstanding,
+ vdo_get_device_name(vdo->device_config->owning_target));
+ if (((dump_options_requested & FLAG_SHOW_QUEUES) != 0) && (vdo->threads != NULL)) {
+ thread_id_t id;
+
+ for (id = 0; id < vdo->thread_config.thread_count; id++)
+ vdo_dump_work_queue(vdo->threads[id].queue);
+ }
+
+ vdo_dump_hash_zones(vdo->hash_zones);
+ dump_data_vio_pool(vdo->data_vio_pool,
+ (dump_options_requested & FLAG_SHOW_VIO_POOL) != 0);
+ if ((dump_options_requested & FLAG_SHOW_VDO_STATUS) != 0)
+ vdo_dump_status(vdo);
+
+ vdo_report_memory_usage();
+ vdo_log_info("end of %s dump", VDO_LOGGING_MODULE_NAME);
+}
+
+static int parse_dump_options(unsigned int argc, char *const *argv,
+ unsigned int *dump_options_requested_ptr)
+{
+ unsigned int dump_options_requested = 0;
+
+ static const struct {
+ const char *name;
+ unsigned int flags;
+ } option_names[] = {
+ { "viopool", FLAG_SKIP_DEFAULT | FLAG_SHOW_VIO_POOL },
+ { "vdo", FLAG_SKIP_DEFAULT | FLAG_SHOW_VDO_STATUS },
+ { "pools", FLAG_SKIP_DEFAULT | FLAGS_ALL_POOLS },
+ { "queues", FLAG_SKIP_DEFAULT | FLAG_SHOW_QUEUES },
+ { "threads", FLAG_SKIP_DEFAULT | FLAG_SHOW_QUEUES },
+ { "default", FLAG_SKIP_DEFAULT | DEFAULT_DUMP_FLAGS },
+ { "all", ~0 },
+ };
+
+ bool options_okay = true;
+ unsigned int i;
+
+ for (i = 1; i < argc; i++) {
+ unsigned int j;
+
+ for (j = 0; j < ARRAY_SIZE(option_names); j++) {
+ if (is_arg_string(argv[i], option_names[j].name)) {
+ dump_options_requested |= option_names[j].flags;
+ break;
+ }
+ }
+ if (j == ARRAY_SIZE(option_names)) {
+ vdo_log_warning("dump option name '%s' unknown", argv[i]);
+ options_okay = false;
+ }
+ }
+ if (!options_okay)
+ return -EINVAL;
+ if ((dump_options_requested & FLAG_SKIP_DEFAULT) == 0)
+ dump_options_requested |= DEFAULT_DUMP_FLAGS;
+ *dump_options_requested_ptr = dump_options_requested;
+ return 0;
+}
+
+/* Dump as specified by zero or more string arguments. */
+int vdo_dump(struct vdo *vdo, unsigned int argc, char *const *argv, const char *why)
+{
+ unsigned int dump_options_requested = 0;
+ int result = parse_dump_options(argc, argv, &dump_options_requested);
+
+ if (result != 0)
+ return result;
+
+ do_dump(vdo, dump_options_requested, why);
+ return 0;
+}
+
+/* Dump everything we know how to dump */
+void vdo_dump_all(struct vdo *vdo, const char *why)
+{
+ do_dump(vdo, ~0, why);
+}
+
+/*
+ * Dump out the data_vio waiters on a waitq.
+ * wait_on should be the label to print for queue (e.g. logical or physical)
+ */
+static void dump_vio_waiters(struct vdo_wait_queue *waitq, char *wait_on)
+{
+ struct vdo_waiter *waiter, *first = vdo_waitq_get_first_waiter(waitq);
+ struct data_vio *data_vio;
+
+ if (first == NULL)
+ return;
+
+ data_vio = vdo_waiter_as_data_vio(first);
+
+ vdo_log_info(" %s is locked. Waited on by: vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s",
+ wait_on, data_vio, data_vio->allocation.pbn, data_vio->logical.lbn,
+ data_vio->duplicate.pbn, get_data_vio_operation_name(data_vio));
+
+ for (waiter = first->next_waiter; waiter != first; waiter = waiter->next_waiter) {
+ data_vio = vdo_waiter_as_data_vio(waiter);
+ vdo_log_info(" ... and : vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s",
+ data_vio, data_vio->allocation.pbn, data_vio->logical.lbn,
+ data_vio->duplicate.pbn,
+ get_data_vio_operation_name(data_vio));
+ }
+}
+
+/*
+ * Encode various attributes of a data_vio as a string of one-character flags. This encoding is for
+ * logging brevity:
+ *
+ * R => vio completion result not VDO_SUCCESS
+ * W => vio is on a waitq
+ * D => vio is a duplicate
+ * p => vio is a partial block operation
+ * z => vio is a zero block
+ * d => vio is a discard
+ *
+ * The common case of no flags set will result in an empty, null-terminated buffer. If any flags
+ * are encoded, the first character in the string will be a space character.
+ */
+static void encode_vio_dump_flags(struct data_vio *data_vio, char buffer[8])
+{
+ char *p_flag = buffer;
+ *p_flag++ = ' ';
+ if (data_vio->vio.completion.result != VDO_SUCCESS)
+ *p_flag++ = 'R';
+ if (data_vio->waiter.next_waiter != NULL)
+ *p_flag++ = 'W';
+ if (data_vio->is_duplicate)
+ *p_flag++ = 'D';
+ if (data_vio->is_partial)
+ *p_flag++ = 'p';
+ if (data_vio->is_zero)
+ *p_flag++ = 'z';
+ if (data_vio->remaining_discard > 0)
+ *p_flag++ = 'd';
+ if (p_flag == &buffer[1]) {
+ /* No flags, so remove the blank space. */
+ p_flag = buffer;
+ }
+ *p_flag = '\0';
+}
+
+/* Implements buffer_dump_function. */
+void dump_data_vio(void *data)
+{
+ struct data_vio *data_vio = data;
+
+ /*
+ * This just needs to be big enough to hold a queue (thread) name and a function name (plus
+ * a separator character and NUL). The latter is limited only by taste.
+ *
+ * In making this static, we're assuming only one "dump" will run at a time. If more than
+ * one does run, the log output will be garbled anyway.
+ */
+ static char vio_completion_dump_buffer[100 + MAX_VDO_WORK_QUEUE_NAME_LEN];
+ static char vio_block_number_dump_buffer[sizeof("P L D") + 3 * DIGITS_PER_U64];
+ static char vio_flush_generation_buffer[sizeof(" FG") + DIGITS_PER_U64];
+ static char flags_dump_buffer[8];
+
+ /*
+ * We're likely to be logging a couple thousand of these lines, and in some circumstances
+ * syslogd may have trouble keeping up, so keep it BRIEF rather than user-friendly.
+ */
+ vdo_dump_completion_to_buffer(&data_vio->vio.completion,
+ vio_completion_dump_buffer,
+ sizeof(vio_completion_dump_buffer));
+ if (data_vio->is_duplicate) {
+ snprintf(vio_block_number_dump_buffer,
+ sizeof(vio_block_number_dump_buffer), "P%llu L%llu D%llu",
+ data_vio->allocation.pbn, data_vio->logical.lbn,
+ data_vio->duplicate.pbn);
+ } else if (data_vio_has_allocation(data_vio)) {
+ snprintf(vio_block_number_dump_buffer,
+ sizeof(vio_block_number_dump_buffer), "P%llu L%llu",
+ data_vio->allocation.pbn, data_vio->logical.lbn);
+ } else {
+ snprintf(vio_block_number_dump_buffer,
+ sizeof(vio_block_number_dump_buffer), "L%llu",
+ data_vio->logical.lbn);
+ }
+
+ if (data_vio->flush_generation != 0) {
+ snprintf(vio_flush_generation_buffer,
+ sizeof(vio_flush_generation_buffer), " FG%llu",
+ data_vio->flush_generation);
+ } else {
+ vio_flush_generation_buffer[0] = 0;
+ }
+
+ encode_vio_dump_flags(data_vio, flags_dump_buffer);
+
+ vdo_log_info(" vio %px %s%s %s %s%s", data_vio,
+ vio_block_number_dump_buffer,
+ vio_flush_generation_buffer,
+ get_data_vio_operation_name(data_vio),
+ vio_completion_dump_buffer,
+ flags_dump_buffer);
+ /*
+ * might want info on: wantUDSAnswer / operation / status
+ * might want info on: bio / bios_merged
+ */
+
+ dump_vio_waiters(&data_vio->logical.waiters, "lbn");
+
+ /* might want to dump more info from vio here */
+}
diff --git a/drivers/md/dm-vdo/dump.h b/drivers/md/dm-vdo/dump.h
new file mode 100644
index 000000000000..ad47c70cca78
--- /dev/null
+++ b/drivers/md/dm-vdo/dump.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_DUMP_H
+#define VDO_DUMP_H
+
+#include "types.h"
+
+int vdo_dump(struct vdo *vdo, unsigned int argc, char *const *argv, const char *why);
+
+void vdo_dump_all(struct vdo *vdo, const char *why);
+
+void dump_data_vio(void *data);
+
+#endif /* VDO_DUMP_H */
diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c
new file mode 100644
index 000000000000..a34ea0229d53
--- /dev/null
+++ b/drivers/md/dm-vdo/encodings.c
@@ -0,0 +1,1483 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "encodings.h"
+
+#include <linux/log2.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "constants.h"
+#include "status-codes.h"
+#include "types.h"
+
+/** The maximum logical space is 4 petabytes, which is 1 terablock. */
+static const block_count_t MAXIMUM_VDO_LOGICAL_BLOCKS = 1024ULL * 1024 * 1024 * 1024;
+
+/** The maximum physical space is 256 terabytes, which is 64 gigablocks. */
+static const block_count_t MAXIMUM_VDO_PHYSICAL_BLOCKS = 1024ULL * 1024 * 1024 * 64;
+
+struct geometry_block {
+ char magic_number[VDO_GEOMETRY_MAGIC_NUMBER_SIZE];
+ struct packed_header header;
+ u32 checksum;
+} __packed;
+
+static const struct header GEOMETRY_BLOCK_HEADER_5_0 = {
+ .id = VDO_GEOMETRY_BLOCK,
+ .version = {
+ .major_version = 5,
+ .minor_version = 0,
+ },
+ /*
+ * Note: this size isn't just the payload size following the header, like it is everywhere
+ * else in VDO.
+ */
+ .size = sizeof(struct geometry_block) + sizeof(struct volume_geometry),
+};
+
+static const struct header GEOMETRY_BLOCK_HEADER_4_0 = {
+ .id = VDO_GEOMETRY_BLOCK,
+ .version = {
+ .major_version = 4,
+ .minor_version = 0,
+ },
+ /*
+ * Note: this size isn't just the payload size following the header, like it is everywhere
+ * else in VDO.
+ */
+ .size = sizeof(struct geometry_block) + sizeof(struct volume_geometry_4_0),
+};
+
+const u8 VDO_GEOMETRY_MAGIC_NUMBER[VDO_GEOMETRY_MAGIC_NUMBER_SIZE + 1] = "dmvdo001";
+
+#define PAGE_HEADER_4_1_SIZE (8 + 8 + 8 + 1 + 1 + 1 + 1)
+
+static const struct version_number BLOCK_MAP_4_1 = {
+ .major_version = 4,
+ .minor_version = 1,
+};
+
+const struct header VDO_BLOCK_MAP_HEADER_2_0 = {
+ .id = VDO_BLOCK_MAP,
+ .version = {
+ .major_version = 2,
+ .minor_version = 0,
+ },
+ .size = sizeof(struct block_map_state_2_0),
+};
+
+const struct header VDO_RECOVERY_JOURNAL_HEADER_7_0 = {
+ .id = VDO_RECOVERY_JOURNAL,
+ .version = {
+ .major_version = 7,
+ .minor_version = 0,
+ },
+ .size = sizeof(struct recovery_journal_state_7_0),
+};
+
+const struct header VDO_SLAB_DEPOT_HEADER_2_0 = {
+ .id = VDO_SLAB_DEPOT,
+ .version = {
+ .major_version = 2,
+ .minor_version = 0,
+ },
+ .size = sizeof(struct slab_depot_state_2_0),
+};
+
+static const struct header VDO_LAYOUT_HEADER_3_0 = {
+ .id = VDO_LAYOUT,
+ .version = {
+ .major_version = 3,
+ .minor_version = 0,
+ },
+ .size = sizeof(struct layout_3_0) + (sizeof(struct partition_3_0) * VDO_PARTITION_COUNT),
+};
+
+static const enum partition_id REQUIRED_PARTITIONS[] = {
+ VDO_BLOCK_MAP_PARTITION,
+ VDO_SLAB_DEPOT_PARTITION,
+ VDO_RECOVERY_JOURNAL_PARTITION,
+ VDO_SLAB_SUMMARY_PARTITION,
+};
+
+/*
+ * The current version for the data encoded in the super block. This must be changed any time there
+ * is a change to encoding of the component data of any VDO component.
+ */
+static const struct version_number VDO_COMPONENT_DATA_41_0 = {
+ .major_version = 41,
+ .minor_version = 0,
+};
+
+const struct version_number VDO_VOLUME_VERSION_67_0 = {
+ .major_version = 67,
+ .minor_version = 0,
+};
+
+static const struct header SUPER_BLOCK_HEADER_12_0 = {
+ .id = VDO_SUPER_BLOCK,
+ .version = {
+ .major_version = 12,
+ .minor_version = 0,
+ },
+
+ /* This is the minimum size, if the super block contains no components. */
+ .size = VDO_SUPER_BLOCK_FIXED_SIZE - VDO_ENCODED_HEADER_SIZE,
+};
+
+/**
+ * validate_version() - Check whether a version matches an expected version.
+ * @expected_version: The expected version.
+ * @actual_version: The version being validated.
+ * @component_name: The name of the component or the calling function (for error logging).
+ *
+ * Logs an error describing a mismatch.
+ *
+ * Return: VDO_SUCCESS if the versions are the same,
+ * VDO_UNSUPPORTED_VERSION if the versions don't match.
+ */
+static int __must_check validate_version(struct version_number expected_version,
+ struct version_number actual_version,
+ const char *component_name)
+{
+ if (!vdo_are_same_version(expected_version, actual_version)) {
+ return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
+ "%s version mismatch, expected %d.%d, got %d.%d",
+ component_name,
+ expected_version.major_version,
+ expected_version.minor_version,
+ actual_version.major_version,
+ actual_version.minor_version);
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_validate_header() - Check whether a header matches expectations.
+ * @expected_header: The expected header.
+ * @actual_header: The header being validated.
+ * @exact_size: If true, the size fields of the two headers must be the same, otherwise it is
+ * required that actual_header.size >= expected_header.size.
+ * @name: The name of the component or the calling function (for error logging).
+ *
+ * Logs an error describing the first mismatch found.
+ *
+ * Return: VDO_SUCCESS if the header meets expectations,
+ * VDO_INCORRECT_COMPONENT if the component ids don't match,
+ * VDO_UNSUPPORTED_VERSION if the versions or sizes don't match.
+ */
+int vdo_validate_header(const struct header *expected_header,
+ const struct header *actual_header, bool exact_size,
+ const char *name)
+{
+ int result;
+
+ if (expected_header->id != actual_header->id) {
+ return vdo_log_error_strerror(VDO_INCORRECT_COMPONENT,
+ "%s ID mismatch, expected %d, got %d",
+ name, expected_header->id,
+ actual_header->id);
+ }
+
+ result = validate_version(expected_header->version, actual_header->version,
+ name);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if ((expected_header->size > actual_header->size) ||
+ (exact_size && (expected_header->size < actual_header->size))) {
+ return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
+ "%s size mismatch, expected %zu, got %zu",
+ name, expected_header->size,
+ actual_header->size);
+ }
+
+ return VDO_SUCCESS;
+}
+
+static void encode_version_number(u8 *buffer, size_t *offset,
+ struct version_number version)
+{
+ struct packed_version_number packed = vdo_pack_version_number(version);
+
+ memcpy(buffer + *offset, &packed, sizeof(packed));
+ *offset += sizeof(packed);
+}
+
+void vdo_encode_header(u8 *buffer, size_t *offset, const struct header *header)
+{
+ struct packed_header packed = vdo_pack_header(header);
+
+ memcpy(buffer + *offset, &packed, sizeof(packed));
+ *offset += sizeof(packed);
+}
+
+static void decode_version_number(u8 *buffer, size_t *offset,
+ struct version_number *version)
+{
+ struct packed_version_number packed;
+
+ memcpy(&packed, buffer + *offset, sizeof(packed));
+ *offset += sizeof(packed);
+ *version = vdo_unpack_version_number(packed);
+}
+
+void vdo_decode_header(u8 *buffer, size_t *offset, struct header *header)
+{
+ struct packed_header packed;
+
+ memcpy(&packed, buffer + *offset, sizeof(packed));
+ *offset += sizeof(packed);
+
+ *header = vdo_unpack_header(&packed);
+}
+
+/**
+ * decode_volume_geometry() - Decode the on-disk representation of a volume geometry from a buffer.
+ * @buffer: A buffer to decode from.
+ * @offset: The offset in the buffer at which to decode.
+ * @geometry: The structure to receive the decoded fields.
+ * @version: The geometry block version to decode.
+ */
+static void decode_volume_geometry(u8 *buffer, size_t *offset,
+ struct volume_geometry *geometry, u32 version)
+{
+ u32 unused, mem;
+ enum volume_region_id id;
+ nonce_t nonce;
+ block_count_t bio_offset = 0;
+ bool sparse;
+
+ /* This is for backwards compatibility. */
+ decode_u32_le(buffer, offset, &unused);
+ geometry->unused = unused;
+
+ decode_u64_le(buffer, offset, &nonce);
+ geometry->nonce = nonce;
+
+ memcpy((unsigned char *) &geometry->uuid, buffer + *offset, sizeof(uuid_t));
+ *offset += sizeof(uuid_t);
+
+ if (version > 4)
+ decode_u64_le(buffer, offset, &bio_offset);
+ geometry->bio_offset = bio_offset;
+
+ for (id = 0; id < VDO_VOLUME_REGION_COUNT; id++) {
+ physical_block_number_t start_block;
+ enum volume_region_id saved_id;
+
+ decode_u32_le(buffer, offset, &saved_id);
+ decode_u64_le(buffer, offset, &start_block);
+
+ geometry->regions[id] = (struct volume_region) {
+ .id = saved_id,
+ .start_block = start_block,
+ };
+ }
+
+ decode_u32_le(buffer, offset, &mem);
+ *offset += sizeof(u32);
+ sparse = buffer[(*offset)++];
+
+ geometry->index_config = (struct index_config) {
+ .mem = mem,
+ .sparse = sparse,
+ };
+}
+
+/**
+ * vdo_parse_geometry_block() - Decode and validate an encoded geometry block.
+ * @block: The encoded geometry block.
+ * @geometry: The structure to receive the decoded fields.
+ */
+int __must_check vdo_parse_geometry_block(u8 *block, struct volume_geometry *geometry)
+{
+ u32 checksum, saved_checksum;
+ struct header header;
+ size_t offset = 0;
+ int result;
+
+ if (memcmp(block, VDO_GEOMETRY_MAGIC_NUMBER, VDO_GEOMETRY_MAGIC_NUMBER_SIZE) != 0)
+ return VDO_BAD_MAGIC;
+ offset += VDO_GEOMETRY_MAGIC_NUMBER_SIZE;
+
+ vdo_decode_header(block, &offset, &header);
+ if (header.version.major_version <= 4) {
+ result = vdo_validate_header(&GEOMETRY_BLOCK_HEADER_4_0, &header,
+ true, __func__);
+ } else {
+ result = vdo_validate_header(&GEOMETRY_BLOCK_HEADER_5_0, &header,
+ true, __func__);
+ }
+ if (result != VDO_SUCCESS)
+ return result;
+
+ decode_volume_geometry(block, &offset, geometry, header.version.major_version);
+
+ result = VDO_ASSERT(header.size == offset + sizeof(u32),
+ "should have decoded up to the geometry checksum");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* Decode and verify the checksum. */
+ checksum = vdo_crc32(block, offset);
+ decode_u32_le(block, &offset, &saved_checksum);
+
+ return ((checksum == saved_checksum) ? VDO_SUCCESS : VDO_CHECKSUM_MISMATCH);
+}
+
+struct block_map_page *vdo_format_block_map_page(void *buffer, nonce_t nonce,
+ physical_block_number_t pbn,
+ bool initialized)
+{
+ struct block_map_page *page = buffer;
+
+ memset(buffer, 0, VDO_BLOCK_SIZE);
+ page->version = vdo_pack_version_number(BLOCK_MAP_4_1);
+ page->header.nonce = __cpu_to_le64(nonce);
+ page->header.pbn = __cpu_to_le64(pbn);
+ page->header.initialized = initialized;
+ return page;
+}
+
+enum block_map_page_validity vdo_validate_block_map_page(struct block_map_page *page,
+ nonce_t nonce,
+ physical_block_number_t pbn)
+{
+ BUILD_BUG_ON(sizeof(struct block_map_page_header) != PAGE_HEADER_4_1_SIZE);
+
+ if (!vdo_are_same_version(BLOCK_MAP_4_1,
+ vdo_unpack_version_number(page->version)) ||
+ !page->header.initialized || (nonce != __le64_to_cpu(page->header.nonce)))
+ return VDO_BLOCK_MAP_PAGE_INVALID;
+
+ if (pbn != vdo_get_block_map_page_pbn(page))
+ return VDO_BLOCK_MAP_PAGE_BAD;
+
+ return VDO_BLOCK_MAP_PAGE_VALID;
+}
+
+static int decode_block_map_state_2_0(u8 *buffer, size_t *offset,
+ struct block_map_state_2_0 *state)
+{
+ size_t initial_offset;
+ block_count_t flat_page_count, root_count;
+ physical_block_number_t flat_page_origin, root_origin;
+ struct header header;
+ int result;
+
+ vdo_decode_header(buffer, offset, &header);
+ result = vdo_validate_header(&VDO_BLOCK_MAP_HEADER_2_0, &header, true, __func__);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ initial_offset = *offset;
+
+ decode_u64_le(buffer, offset, &flat_page_origin);
+ result = VDO_ASSERT(flat_page_origin == VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN,
+ "Flat page origin must be %u (recorded as %llu)",
+ VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN,
+ (unsigned long long) state->flat_page_origin);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ decode_u64_le(buffer, offset, &flat_page_count);
+ result = VDO_ASSERT(flat_page_count == 0,
+ "Flat page count must be 0 (recorded as %llu)",
+ (unsigned long long) state->flat_page_count);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ decode_u64_le(buffer, offset, &root_origin);
+ decode_u64_le(buffer, offset, &root_count);
+
+ result = VDO_ASSERT(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset,
+ "decoded block map component size must match header size");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *state = (struct block_map_state_2_0) {
+ .flat_page_origin = flat_page_origin,
+ .flat_page_count = flat_page_count,
+ .root_origin = root_origin,
+ .root_count = root_count,
+ };
+
+ return VDO_SUCCESS;
+}
+
+static void encode_block_map_state_2_0(u8 *buffer, size_t *offset,
+ struct block_map_state_2_0 state)
+{
+ size_t initial_offset;
+
+ vdo_encode_header(buffer, offset, &VDO_BLOCK_MAP_HEADER_2_0);
+
+ initial_offset = *offset;
+ encode_u64_le(buffer, offset, state.flat_page_origin);
+ encode_u64_le(buffer, offset, state.flat_page_count);
+ encode_u64_le(buffer, offset, state.root_origin);
+ encode_u64_le(buffer, offset, state.root_count);
+
+ VDO_ASSERT_LOG_ONLY(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset,
+ "encoded block map component size must match header size");
+}
+
+/**
+ * vdo_compute_new_forest_pages() - Compute the number of pages which must be allocated at each
+ * level in order to grow the forest to a new number of entries.
+ * @entries: The new number of entries the block map must address.
+ *
+ * Return: The total number of non-leaf pages required.
+ */
+block_count_t vdo_compute_new_forest_pages(root_count_t root_count,
+ struct boundary *old_sizes,
+ block_count_t entries,
+ struct boundary *new_sizes)
+{
+ page_count_t leaf_pages = max(vdo_compute_block_map_page_count(entries), 1U);
+ page_count_t level_size = DIV_ROUND_UP(leaf_pages, root_count);
+ block_count_t total_pages = 0;
+ height_t height;
+
+ for (height = 0; height < VDO_BLOCK_MAP_TREE_HEIGHT; height++) {
+ block_count_t new_pages;
+
+ level_size = DIV_ROUND_UP(level_size, VDO_BLOCK_MAP_ENTRIES_PER_PAGE);
+ new_sizes->levels[height] = level_size;
+ new_pages = level_size;
+ if (old_sizes != NULL)
+ new_pages -= old_sizes->levels[height];
+ total_pages += (new_pages * root_count);
+ }
+
+ return total_pages;
+}
+
+/**
+ * encode_recovery_journal_state_7_0() - Encode the state of a recovery journal.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static void encode_recovery_journal_state_7_0(u8 *buffer, size_t *offset,
+ struct recovery_journal_state_7_0 state)
+{
+ size_t initial_offset;
+
+ vdo_encode_header(buffer, offset, &VDO_RECOVERY_JOURNAL_HEADER_7_0);
+
+ initial_offset = *offset;
+ encode_u64_le(buffer, offset, state.journal_start);
+ encode_u64_le(buffer, offset, state.logical_blocks_used);
+ encode_u64_le(buffer, offset, state.block_map_data_blocks);
+
+ VDO_ASSERT_LOG_ONLY(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset,
+ "encoded recovery journal component size must match header size");
+}
+
+/**
+ * decode_recovery_journal_state_7_0() - Decode the state of a recovery journal saved in a buffer.
+ * @buffer: The buffer containing the saved state.
+ * @state: A pointer to a recovery journal state to hold the result of a successful decode.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check decode_recovery_journal_state_7_0(u8 *buffer, size_t *offset,
+ struct recovery_journal_state_7_0 *state)
+{
+ struct header header;
+ int result;
+ size_t initial_offset;
+ sequence_number_t journal_start;
+ block_count_t logical_blocks_used, block_map_data_blocks;
+
+ vdo_decode_header(buffer, offset, &header);
+ result = vdo_validate_header(&VDO_RECOVERY_JOURNAL_HEADER_7_0, &header, true,
+ __func__);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ initial_offset = *offset;
+ decode_u64_le(buffer, offset, &journal_start);
+ decode_u64_le(buffer, offset, &logical_blocks_used);
+ decode_u64_le(buffer, offset, &block_map_data_blocks);
+
+ result = VDO_ASSERT(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset,
+ "decoded recovery journal component size must match header size");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *state = (struct recovery_journal_state_7_0) {
+ .journal_start = journal_start,
+ .logical_blocks_used = logical_blocks_used,
+ .block_map_data_blocks = block_map_data_blocks,
+ };
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_get_journal_operation_name() - Get the name of a journal operation.
+ * @operation: The operation to name.
+ *
+ * Return: The name of the operation.
+ */
+const char *vdo_get_journal_operation_name(enum journal_operation operation)
+{
+ switch (operation) {
+ case VDO_JOURNAL_DATA_REMAPPING:
+ return "data remapping";
+
+ case VDO_JOURNAL_BLOCK_MAP_REMAPPING:
+ return "block map remapping";
+
+ default:
+ return "unknown journal operation";
+ }
+}
+
+/**
+ * encode_slab_depot_state_2_0() - Encode the state of a slab depot into a buffer.
+ */
+static void encode_slab_depot_state_2_0(u8 *buffer, size_t *offset,
+ struct slab_depot_state_2_0 state)
+{
+ size_t initial_offset;
+
+ vdo_encode_header(buffer, offset, &VDO_SLAB_DEPOT_HEADER_2_0);
+
+ initial_offset = *offset;
+ encode_u64_le(buffer, offset, state.slab_config.slab_blocks);
+ encode_u64_le(buffer, offset, state.slab_config.data_blocks);
+ encode_u64_le(buffer, offset, state.slab_config.reference_count_blocks);
+ encode_u64_le(buffer, offset, state.slab_config.slab_journal_blocks);
+ encode_u64_le(buffer, offset, state.slab_config.slab_journal_flushing_threshold);
+ encode_u64_le(buffer, offset, state.slab_config.slab_journal_blocking_threshold);
+ encode_u64_le(buffer, offset, state.slab_config.slab_journal_scrubbing_threshold);
+ encode_u64_le(buffer, offset, state.first_block);
+ encode_u64_le(buffer, offset, state.last_block);
+ buffer[(*offset)++] = state.zone_count;
+
+ VDO_ASSERT_LOG_ONLY(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset,
+ "encoded block map component size must match header size");
+}
+
+/**
+ * decode_slab_depot_state_2_0() - Decode slab depot component state version 2.0 from a buffer.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int decode_slab_depot_state_2_0(u8 *buffer, size_t *offset,
+ struct slab_depot_state_2_0 *state)
+{
+ struct header header;
+ int result;
+ size_t initial_offset;
+ struct slab_config slab_config;
+ block_count_t count;
+ physical_block_number_t first_block, last_block;
+ zone_count_t zone_count;
+
+ vdo_decode_header(buffer, offset, &header);
+ result = vdo_validate_header(&VDO_SLAB_DEPOT_HEADER_2_0, &header, true,
+ __func__);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ initial_offset = *offset;
+ decode_u64_le(buffer, offset, &count);
+ slab_config.slab_blocks = count;
+
+ decode_u64_le(buffer, offset, &count);
+ slab_config.data_blocks = count;
+
+ decode_u64_le(buffer, offset, &count);
+ slab_config.reference_count_blocks = count;
+
+ decode_u64_le(buffer, offset, &count);
+ slab_config.slab_journal_blocks = count;
+
+ decode_u64_le(buffer, offset, &count);
+ slab_config.slab_journal_flushing_threshold = count;
+
+ decode_u64_le(buffer, offset, &count);
+ slab_config.slab_journal_blocking_threshold = count;
+
+ decode_u64_le(buffer, offset, &count);
+ slab_config.slab_journal_scrubbing_threshold = count;
+
+ decode_u64_le(buffer, offset, &first_block);
+ decode_u64_le(buffer, offset, &last_block);
+ zone_count = buffer[(*offset)++];
+
+ result = VDO_ASSERT(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset,
+ "decoded slab depot component size must match header size");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *state = (struct slab_depot_state_2_0) {
+ .slab_config = slab_config,
+ .first_block = first_block,
+ .last_block = last_block,
+ .zone_count = zone_count,
+ };
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_configure_slab_depot() - Configure the slab depot.
+ * @partition: The slab depot partition
+ * @slab_config: The configuration of a single slab.
+ * @zone_count: The number of zones the depot will use.
+ * @state: The state structure to be configured.
+ *
+ * Configures the slab_depot for the specified storage capacity, finding the number of data blocks
+ * that will fit and still leave room for the depot metadata, then return the saved state for that
+ * configuration.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_configure_slab_depot(const struct partition *partition,
+ struct slab_config slab_config, zone_count_t zone_count,
+ struct slab_depot_state_2_0 *state)
+{
+ block_count_t total_slab_blocks, total_data_blocks;
+ size_t slab_count;
+ physical_block_number_t last_block;
+ block_count_t slab_size = slab_config.slab_blocks;
+
+ vdo_log_debug("slabDepot %s(block_count=%llu, first_block=%llu, slab_size=%llu, zone_count=%u)",
+ __func__, (unsigned long long) partition->count,
+ (unsigned long long) partition->offset,
+ (unsigned long long) slab_size, zone_count);
+
+ /* We do not allow runt slabs, so we waste up to a slab's worth. */
+ slab_count = (partition->count / slab_size);
+ if (slab_count == 0)
+ return VDO_NO_SPACE;
+
+ if (slab_count > MAX_VDO_SLABS)
+ return VDO_TOO_MANY_SLABS;
+
+ total_slab_blocks = slab_count * slab_config.slab_blocks;
+ total_data_blocks = slab_count * slab_config.data_blocks;
+ last_block = partition->offset + total_slab_blocks;
+
+ *state = (struct slab_depot_state_2_0) {
+ .slab_config = slab_config,
+ .first_block = partition->offset,
+ .last_block = last_block,
+ .zone_count = zone_count,
+ };
+
+ vdo_log_debug("slab_depot last_block=%llu, total_data_blocks=%llu, slab_count=%zu, left_over=%llu",
+ (unsigned long long) last_block,
+ (unsigned long long) total_data_blocks, slab_count,
+ (unsigned long long) (partition->count - (last_block - partition->offset)));
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_configure_slab() - Measure and initialize the configuration to use for each slab.
+ * @slab_size: The number of blocks per slab.
+ * @slab_journal_blocks: The number of blocks for the slab journal.
+ * @slab_config: The slab configuration to initialize.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_configure_slab(block_count_t slab_size, block_count_t slab_journal_blocks,
+ struct slab_config *slab_config)
+{
+ block_count_t ref_blocks, meta_blocks, data_blocks;
+ block_count_t flushing_threshold, remaining, blocking_threshold;
+ block_count_t minimal_extra_space, scrubbing_threshold;
+
+ if (slab_journal_blocks >= slab_size)
+ return VDO_BAD_CONFIGURATION;
+
+ /*
+ * This calculation should technically be a recurrence, but the total number of metadata
+ * blocks is currently less than a single block of ref_counts, so we'd gain at most one
+ * data block in each slab with more iteration.
+ */
+ ref_blocks = vdo_get_saved_reference_count_size(slab_size - slab_journal_blocks);
+ meta_blocks = (ref_blocks + slab_journal_blocks);
+
+ /* Make sure test code hasn't configured slabs to be too small. */
+ if (meta_blocks >= slab_size)
+ return VDO_BAD_CONFIGURATION;
+
+ /*
+ * If the slab size is very small, assume this must be a unit test and override the number
+ * of data blocks to be a power of two (wasting blocks in the slab). Many tests need their
+ * data_blocks fields to be the exact capacity of the configured volume, and that used to
+ * fall out since they use a power of two for the number of data blocks, the slab size was
+ * a power of two, and every block in a slab was a data block.
+ *
+ * TODO: Try to figure out some way of structuring testParameters and unit tests so this
+ * hack isn't needed without having to edit several unit tests every time the metadata size
+ * changes by one block.
+ */
+ data_blocks = slab_size - meta_blocks;
+ if ((slab_size < 1024) && !is_power_of_2(data_blocks))
+ data_blocks = ((block_count_t) 1 << ilog2(data_blocks));
+
+ /*
+ * Configure the slab journal thresholds. The flush threshold is 168 of 224 blocks in
+ * production, or 3/4ths, so we use this ratio for all sizes.
+ */
+ flushing_threshold = ((slab_journal_blocks * 3) + 3) / 4;
+ /*
+ * The blocking threshold should be far enough from the flushing threshold to not produce
+ * delays, but far enough from the end of the journal to allow multiple successive recovery
+ * failures.
+ */
+ remaining = slab_journal_blocks - flushing_threshold;
+ blocking_threshold = flushing_threshold + ((remaining * 5) / 7);
+ /* The scrubbing threshold should be at least 2048 entries before the end of the journal. */
+ minimal_extra_space = 1 + (MAXIMUM_VDO_USER_VIOS / VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK);
+ scrubbing_threshold = blocking_threshold;
+ if (slab_journal_blocks > minimal_extra_space)
+ scrubbing_threshold = slab_journal_blocks - minimal_extra_space;
+ if (blocking_threshold > scrubbing_threshold)
+ blocking_threshold = scrubbing_threshold;
+
+ *slab_config = (struct slab_config) {
+ .slab_blocks = slab_size,
+ .data_blocks = data_blocks,
+ .reference_count_blocks = ref_blocks,
+ .slab_journal_blocks = slab_journal_blocks,
+ .slab_journal_flushing_threshold = flushing_threshold,
+ .slab_journal_blocking_threshold = blocking_threshold,
+ .slab_journal_scrubbing_threshold = scrubbing_threshold};
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_decode_slab_journal_entry() - Decode a slab journal entry.
+ * @block: The journal block holding the entry.
+ * @entry_count: The number of the entry.
+ *
+ * Return: The decoded entry.
+ */
+struct slab_journal_entry vdo_decode_slab_journal_entry(struct packed_slab_journal_block *block,
+ journal_entry_count_t entry_count)
+{
+ struct slab_journal_entry entry =
+ vdo_unpack_slab_journal_entry(&block->payload.entries[entry_count]);
+
+ if (block->header.has_block_map_increments &&
+ ((block->payload.full_entries.entry_types[entry_count / 8] &
+ ((u8) 1 << (entry_count % 8))) != 0))
+ entry.operation = VDO_JOURNAL_BLOCK_MAP_REMAPPING;
+
+ return entry;
+}
+
+/**
+ * allocate_partition() - Allocate a partition and add it to a layout.
+ * @layout: The layout containing the partition.
+ * @id: The id of the partition.
+ * @offset: The offset into the layout at which the partition begins.
+ * @size: The size of the partition in blocks.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int allocate_partition(struct layout *layout, u8 id,
+ physical_block_number_t offset, block_count_t size)
+{
+ struct partition *partition;
+ int result;
+
+ result = vdo_allocate(1, struct partition, __func__, &partition);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ partition->id = id;
+ partition->offset = offset;
+ partition->count = size;
+ partition->next = layout->head;
+ layout->head = partition;
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * make_partition() - Create a new partition from the beginning or end of the unused space in a
+ * layout.
+ * @layout: The layout.
+ * @id: The id of the partition to make.
+ * @size: The number of blocks to carve out; if 0, all remaining space will be used.
+ * @beginning: True if the partition should start at the beginning of the unused space.
+ *
+ * Return: A success or error code, particularly VDO_NO_SPACE if there are fewer than size blocks
+ * remaining.
+ */
+static int __must_check make_partition(struct layout *layout, enum partition_id id,
+ block_count_t size, bool beginning)
+{
+ int result;
+ physical_block_number_t offset;
+ block_count_t free_blocks = layout->last_free - layout->first_free;
+
+ if (size == 0) {
+ if (free_blocks == 0)
+ return VDO_NO_SPACE;
+ size = free_blocks;
+ } else if (size > free_blocks) {
+ return VDO_NO_SPACE;
+ }
+
+ result = vdo_get_partition(layout, id, NULL);
+ if (result != VDO_UNKNOWN_PARTITION)
+ return VDO_PARTITION_EXISTS;
+
+ offset = beginning ? layout->first_free : (layout->last_free - size);
+
+ result = allocate_partition(layout, id, offset, size);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ layout->num_partitions++;
+ if (beginning)
+ layout->first_free += size;
+ else
+ layout->last_free = layout->last_free - size;
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_initialize_layout() - Lay out the partitions of a vdo.
+ * @size: The entire size of the vdo.
+ * @origin: The start of the layout on the underlying storage in blocks.
+ * @block_map_blocks: The size of the block map partition.
+ * @journal_blocks: The size of the journal partition.
+ * @summary_blocks: The size of the slab summary partition.
+ * @layout: The layout to initialize.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_initialize_layout(block_count_t size, physical_block_number_t offset,
+ block_count_t block_map_blocks, block_count_t journal_blocks,
+ block_count_t summary_blocks, struct layout *layout)
+{
+ int result;
+ block_count_t necessary_size =
+ (offset + block_map_blocks + journal_blocks + summary_blocks);
+
+ if (necessary_size > size)
+ return vdo_log_error_strerror(VDO_NO_SPACE,
+ "Not enough space to make a VDO");
+
+ *layout = (struct layout) {
+ .start = offset,
+ .size = size,
+ .first_free = offset,
+ .last_free = size,
+ .num_partitions = 0,
+ .head = NULL,
+ };
+
+ result = make_partition(layout, VDO_BLOCK_MAP_PARTITION, block_map_blocks, true);
+ if (result != VDO_SUCCESS) {
+ vdo_uninitialize_layout(layout);
+ return result;
+ }
+
+ result = make_partition(layout, VDO_SLAB_SUMMARY_PARTITION, summary_blocks,
+ false);
+ if (result != VDO_SUCCESS) {
+ vdo_uninitialize_layout(layout);
+ return result;
+ }
+
+ result = make_partition(layout, VDO_RECOVERY_JOURNAL_PARTITION, journal_blocks,
+ false);
+ if (result != VDO_SUCCESS) {
+ vdo_uninitialize_layout(layout);
+ return result;
+ }
+
+ result = make_partition(layout, VDO_SLAB_DEPOT_PARTITION, 0, true);
+ if (result != VDO_SUCCESS)
+ vdo_uninitialize_layout(layout);
+
+ return result;
+}
+
+/**
+ * vdo_uninitialize_layout() - Clean up a layout.
+ * @layout: The layout to clean up.
+ *
+ * All partitions created by this layout become invalid pointers.
+ */
+void vdo_uninitialize_layout(struct layout *layout)
+{
+ while (layout->head != NULL) {
+ struct partition *part = layout->head;
+
+ layout->head = part->next;
+ vdo_free(part);
+ }
+
+ memset(layout, 0, sizeof(struct layout));
+}
+
+/**
+ * vdo_get_partition() - Get a partition by id.
+ * @layout: The layout from which to get a partition.
+ * @id: The id of the partition.
+ * @partition_ptr: A pointer to hold the partition.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_get_partition(struct layout *layout, enum partition_id id,
+ struct partition **partition_ptr)
+{
+ struct partition *partition;
+
+ for (partition = layout->head; partition != NULL; partition = partition->next) {
+ if (partition->id == id) {
+ if (partition_ptr != NULL)
+ *partition_ptr = partition;
+ return VDO_SUCCESS;
+ }
+ }
+
+ return VDO_UNKNOWN_PARTITION;
+}
+
+/**
+ * vdo_get_known_partition() - Get a partition by id from a validated layout.
+ * @layout: The layout from which to get a partition.
+ * @id: The id of the partition.
+ *
+ * Return: the partition
+ */
+struct partition *vdo_get_known_partition(struct layout *layout, enum partition_id id)
+{
+ struct partition *partition;
+ int result = vdo_get_partition(layout, id, &partition);
+
+ VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "layout has expected partition: %u", id);
+
+ return partition;
+}
+
+static void encode_layout(u8 *buffer, size_t *offset, const struct layout *layout)
+{
+ const struct partition *partition;
+ size_t initial_offset;
+ struct header header = VDO_LAYOUT_HEADER_3_0;
+
+ BUILD_BUG_ON(sizeof(enum partition_id) != sizeof(u8));
+ VDO_ASSERT_LOG_ONLY(layout->num_partitions <= U8_MAX,
+ "layout partition count must fit in a byte");
+
+ vdo_encode_header(buffer, offset, &header);
+
+ initial_offset = *offset;
+ encode_u64_le(buffer, offset, layout->first_free);
+ encode_u64_le(buffer, offset, layout->last_free);
+ buffer[(*offset)++] = layout->num_partitions;
+
+ VDO_ASSERT_LOG_ONLY(sizeof(struct layout_3_0) == *offset - initial_offset,
+ "encoded size of a layout header must match structure");
+
+ for (partition = layout->head; partition != NULL; partition = partition->next) {
+ buffer[(*offset)++] = partition->id;
+ encode_u64_le(buffer, offset, partition->offset);
+ /* This field only exists for backwards compatibility */
+ encode_u64_le(buffer, offset, 0);
+ encode_u64_le(buffer, offset, partition->count);
+ }
+
+ VDO_ASSERT_LOG_ONLY(header.size == *offset - initial_offset,
+ "encoded size of a layout must match header size");
+}
+
+static int decode_layout(u8 *buffer, size_t *offset, physical_block_number_t start,
+ block_count_t size, struct layout *layout)
+{
+ struct header header;
+ struct layout_3_0 layout_header;
+ struct partition *partition;
+ size_t initial_offset;
+ physical_block_number_t first_free, last_free;
+ u8 partition_count;
+ u8 i;
+ int result;
+
+ vdo_decode_header(buffer, offset, &header);
+ /* Layout is variable size, so only do a minimum size check here. */
+ result = vdo_validate_header(&VDO_LAYOUT_HEADER_3_0, &header, false, __func__);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ initial_offset = *offset;
+ decode_u64_le(buffer, offset, &first_free);
+ decode_u64_le(buffer, offset, &last_free);
+ partition_count = buffer[(*offset)++];
+ layout_header = (struct layout_3_0) {
+ .first_free = first_free,
+ .last_free = last_free,
+ .partition_count = partition_count,
+ };
+
+ result = VDO_ASSERT(sizeof(struct layout_3_0) == *offset - initial_offset,
+ "decoded size of a layout header must match structure");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ layout->start = start;
+ layout->size = size;
+ layout->first_free = layout_header.first_free;
+ layout->last_free = layout_header.last_free;
+ layout->num_partitions = layout_header.partition_count;
+
+ if (layout->num_partitions > VDO_PARTITION_COUNT) {
+ return vdo_log_error_strerror(VDO_UNKNOWN_PARTITION,
+ "layout has extra partitions");
+ }
+
+ for (i = 0; i < layout->num_partitions; i++) {
+ u8 id;
+ u64 partition_offset, count;
+
+ id = buffer[(*offset)++];
+ decode_u64_le(buffer, offset, &partition_offset);
+ *offset += sizeof(u64);
+ decode_u64_le(buffer, offset, &count);
+
+ result = allocate_partition(layout, id, partition_offset, count);
+ if (result != VDO_SUCCESS) {
+ vdo_uninitialize_layout(layout);
+ return result;
+ }
+ }
+
+ /* Validate that the layout has all (and only) the required partitions */
+ for (i = 0; i < VDO_PARTITION_COUNT; i++) {
+ result = vdo_get_partition(layout, REQUIRED_PARTITIONS[i], &partition);
+ if (result != VDO_SUCCESS) {
+ vdo_uninitialize_layout(layout);
+ return vdo_log_error_strerror(result,
+ "layout is missing required partition %u",
+ REQUIRED_PARTITIONS[i]);
+ }
+
+ start += partition->count;
+ }
+
+ if (start != size) {
+ vdo_uninitialize_layout(layout);
+ return vdo_log_error_strerror(UDS_BAD_STATE,
+ "partitions do not cover the layout");
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * pack_vdo_config() - Convert a vdo_config to its packed on-disk representation.
+ * @config: The vdo config to convert.
+ *
+ * Return: The platform-independent representation of the config.
+ */
+static struct packed_vdo_config pack_vdo_config(struct vdo_config config)
+{
+ return (struct packed_vdo_config) {
+ .logical_blocks = __cpu_to_le64(config.logical_blocks),
+ .physical_blocks = __cpu_to_le64(config.physical_blocks),
+ .slab_size = __cpu_to_le64(config.slab_size),
+ .recovery_journal_size = __cpu_to_le64(config.recovery_journal_size),
+ .slab_journal_blocks = __cpu_to_le64(config.slab_journal_blocks),
+ };
+}
+
+/**
+ * pack_vdo_component() - Convert a vdo_component to its packed on-disk representation.
+ * @component: The VDO component data to convert.
+ *
+ * Return: The platform-independent representation of the component.
+ */
+static struct packed_vdo_component_41_0 pack_vdo_component(const struct vdo_component component)
+{
+ return (struct packed_vdo_component_41_0) {
+ .state = __cpu_to_le32(component.state),
+ .complete_recoveries = __cpu_to_le64(component.complete_recoveries),
+ .read_only_recoveries = __cpu_to_le64(component.read_only_recoveries),
+ .config = pack_vdo_config(component.config),
+ .nonce = __cpu_to_le64(component.nonce),
+ };
+}
+
+static void encode_vdo_component(u8 *buffer, size_t *offset,
+ struct vdo_component component)
+{
+ struct packed_vdo_component_41_0 packed;
+
+ encode_version_number(buffer, offset, VDO_COMPONENT_DATA_41_0);
+ packed = pack_vdo_component(component);
+ memcpy(buffer + *offset, &packed, sizeof(packed));
+ *offset += sizeof(packed);
+}
+
+/**
+ * unpack_vdo_config() - Convert a packed_vdo_config to its native in-memory representation.
+ * @config: The packed vdo config to convert.
+ *
+ * Return: The native in-memory representation of the vdo config.
+ */
+static struct vdo_config unpack_vdo_config(struct packed_vdo_config config)
+{
+ return (struct vdo_config) {
+ .logical_blocks = __le64_to_cpu(config.logical_blocks),
+ .physical_blocks = __le64_to_cpu(config.physical_blocks),
+ .slab_size = __le64_to_cpu(config.slab_size),
+ .recovery_journal_size = __le64_to_cpu(config.recovery_journal_size),
+ .slab_journal_blocks = __le64_to_cpu(config.slab_journal_blocks),
+ };
+}
+
+/**
+ * unpack_vdo_component_41_0() - Convert a packed_vdo_component_41_0 to its native in-memory
+ * representation.
+ * @component: The packed vdo component data to convert.
+ *
+ * Return: The native in-memory representation of the component.
+ */
+static struct vdo_component unpack_vdo_component_41_0(struct packed_vdo_component_41_0 component)
+{
+ return (struct vdo_component) {
+ .state = __le32_to_cpu(component.state),
+ .complete_recoveries = __le64_to_cpu(component.complete_recoveries),
+ .read_only_recoveries = __le64_to_cpu(component.read_only_recoveries),
+ .config = unpack_vdo_config(component.config),
+ .nonce = __le64_to_cpu(component.nonce),
+ };
+}
+
+/**
+ * decode_vdo_component() - Decode the component data for the vdo itself out of the super block.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int decode_vdo_component(u8 *buffer, size_t *offset, struct vdo_component *component)
+{
+ struct version_number version;
+ struct packed_vdo_component_41_0 packed;
+ int result;
+
+ decode_version_number(buffer, offset, &version);
+ result = validate_version(version, VDO_COMPONENT_DATA_41_0,
+ "VDO component data");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ memcpy(&packed, buffer + *offset, sizeof(packed));
+ *offset += sizeof(packed);
+ *component = unpack_vdo_component_41_0(packed);
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_validate_config() - Validate constraints on a VDO config.
+ * @config: The VDO config.
+ * @physical_block_count: The minimum block count of the underlying storage.
+ * @logical_block_count: The expected logical size of the VDO, or 0 if the logical size may be
+ * unspecified.
+ *
+ * Return: A success or error code.
+ */
+int vdo_validate_config(const struct vdo_config *config,
+ block_count_t physical_block_count,
+ block_count_t logical_block_count)
+{
+ struct slab_config slab_config;
+ int result;
+
+ result = VDO_ASSERT(config->slab_size > 0, "slab size unspecified");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(is_power_of_2(config->slab_size),
+ "slab size must be a power of two");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(config->slab_size <= (1 << MAX_VDO_SLAB_BITS),
+ "slab size must be less than or equal to 2^%d",
+ MAX_VDO_SLAB_BITS);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(config->slab_journal_blocks >= MINIMUM_VDO_SLAB_JOURNAL_BLOCKS,
+ "slab journal size meets minimum size");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(config->slab_journal_blocks <= config->slab_size,
+ "slab journal size is within expected bound");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_configure_slab(config->slab_size, config->slab_journal_blocks,
+ &slab_config);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT((slab_config.data_blocks >= 1),
+ "slab must be able to hold at least one block");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(config->physical_blocks > 0, "physical blocks unspecified");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(config->physical_blocks <= MAXIMUM_VDO_PHYSICAL_BLOCKS,
+ "physical block count %llu exceeds maximum %llu",
+ (unsigned long long) config->physical_blocks,
+ (unsigned long long) MAXIMUM_VDO_PHYSICAL_BLOCKS);
+ if (result != VDO_SUCCESS)
+ return VDO_OUT_OF_RANGE;
+
+ if (physical_block_count != config->physical_blocks) {
+ vdo_log_error("A physical size of %llu blocks was specified, not the %llu blocks configured in the vdo super block",
+ (unsigned long long) physical_block_count,
+ (unsigned long long) config->physical_blocks);
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (logical_block_count > 0) {
+ result = VDO_ASSERT((config->logical_blocks > 0),
+ "logical blocks unspecified");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (logical_block_count != config->logical_blocks) {
+ vdo_log_error("A logical size of %llu blocks was specified, but that differs from the %llu blocks configured in the vdo super block",
+ (unsigned long long) logical_block_count,
+ (unsigned long long) config->logical_blocks);
+ return VDO_PARAMETER_MISMATCH;
+ }
+ }
+
+ result = VDO_ASSERT(config->logical_blocks <= MAXIMUM_VDO_LOGICAL_BLOCKS,
+ "logical blocks too large");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(config->recovery_journal_size > 0,
+ "recovery journal size unspecified");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(is_power_of_2(config->recovery_journal_size),
+ "recovery journal size must be a power of two");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return result;
+}
+
+/**
+ * vdo_destroy_component_states() - Clean up any allocations in a vdo_component_states.
+ * @states: The component states to destroy.
+ */
+void vdo_destroy_component_states(struct vdo_component_states *states)
+{
+ if (states == NULL)
+ return;
+
+ vdo_uninitialize_layout(&states->layout);
+}
+
+/**
+ * decode_components() - Decode the components now that we know the component data is a version we
+ * understand.
+ * @buffer: The buffer being decoded.
+ * @offset: The offset to start decoding from.
+ * @geometry: The vdo geometry
+ * @states: An object to hold the successfully decoded state.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check decode_components(u8 *buffer, size_t *offset,
+ struct volume_geometry *geometry,
+ struct vdo_component_states *states)
+{
+ int result;
+
+ decode_vdo_component(buffer, offset, &states->vdo);
+
+ result = decode_layout(buffer, offset, vdo_get_data_region_start(*geometry) + 1,
+ states->vdo.config.physical_blocks, &states->layout);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = decode_recovery_journal_state_7_0(buffer, offset,
+ &states->recovery_journal);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = decode_slab_depot_state_2_0(buffer, offset, &states->slab_depot);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = decode_block_map_state_2_0(buffer, offset, &states->block_map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ VDO_ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE,
+ "All decoded component data was used");
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_decode_component_states() - Decode the payload of a super block.
+ * @buffer: The buffer containing the encoded super block contents.
+ * @geometry: The vdo geometry
+ * @states: A pointer to hold the decoded states.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_decode_component_states(u8 *buffer, struct volume_geometry *geometry,
+ struct vdo_component_states *states)
+{
+ int result;
+ size_t offset = VDO_COMPONENT_DATA_OFFSET;
+
+ /* This is for backwards compatibility. */
+ decode_u32_le(buffer, &offset, &states->unused);
+
+ /* Check the VDO volume version */
+ decode_version_number(buffer, &offset, &states->volume_version);
+ result = validate_version(VDO_VOLUME_VERSION_67_0, states->volume_version,
+ "volume");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = decode_components(buffer, &offset, geometry, states);
+ if (result != VDO_SUCCESS)
+ vdo_uninitialize_layout(&states->layout);
+
+ return result;
+}
+
+/**
+ * vdo_validate_component_states() - Validate the decoded super block configuration.
+ * @states: The state decoded from the super block.
+ * @geometry_nonce: The nonce from the geometry block.
+ * @physical_size: The minimum block count of the underlying storage.
+ * @logical_size: The expected logical size of the VDO, or 0 if the logical size may be
+ * unspecified.
+ *
+ * Return: VDO_SUCCESS or an error if the configuration is invalid.
+ */
+int vdo_validate_component_states(struct vdo_component_states *states,
+ nonce_t geometry_nonce, block_count_t physical_size,
+ block_count_t logical_size)
+{
+ if (geometry_nonce != states->vdo.nonce) {
+ return vdo_log_error_strerror(VDO_BAD_NONCE,
+ "Geometry nonce %llu does not match superblock nonce %llu",
+ (unsigned long long) geometry_nonce,
+ (unsigned long long) states->vdo.nonce);
+ }
+
+ return vdo_validate_config(&states->vdo.config, physical_size, logical_size);
+}
+
+/**
+ * vdo_encode_component_states() - Encode the state of all vdo components in the super block.
+ */
+static void vdo_encode_component_states(u8 *buffer, size_t *offset,
+ const struct vdo_component_states *states)
+{
+ /* This is for backwards compatibility. */
+ encode_u32_le(buffer, offset, states->unused);
+ encode_version_number(buffer, offset, states->volume_version);
+ encode_vdo_component(buffer, offset, states->vdo);
+ encode_layout(buffer, offset, &states->layout);
+ encode_recovery_journal_state_7_0(buffer, offset, states->recovery_journal);
+ encode_slab_depot_state_2_0(buffer, offset, states->slab_depot);
+ encode_block_map_state_2_0(buffer, offset, states->block_map);
+
+ VDO_ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE,
+ "All super block component data was encoded");
+}
+
+/**
+ * vdo_encode_super_block() - Encode a super block into its on-disk representation.
+ */
+void vdo_encode_super_block(u8 *buffer, struct vdo_component_states *states)
+{
+ u32 checksum;
+ struct header header = SUPER_BLOCK_HEADER_12_0;
+ size_t offset = 0;
+
+ header.size += VDO_COMPONENT_DATA_SIZE;
+ vdo_encode_header(buffer, &offset, &header);
+ vdo_encode_component_states(buffer, &offset, states);
+
+ checksum = vdo_crc32(buffer, offset);
+ encode_u32_le(buffer, &offset, checksum);
+
+ /*
+ * Even though the buffer is a full block, to avoid the potential corruption from a torn
+ * write, the entire encoding must fit in the first sector.
+ */
+ VDO_ASSERT_LOG_ONLY(offset <= VDO_SECTOR_SIZE,
+ "entire superblock must fit in one sector");
+}
+
+/**
+ * vdo_decode_super_block() - Decode a super block from its on-disk representation.
+ */
+int vdo_decode_super_block(u8 *buffer)
+{
+ struct header header;
+ int result;
+ u32 checksum, saved_checksum;
+ size_t offset = 0;
+
+ /* Decode and validate the header. */
+ vdo_decode_header(buffer, &offset, &header);
+ result = vdo_validate_header(&SUPER_BLOCK_HEADER_12_0, &header, false, __func__);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (header.size > VDO_COMPONENT_DATA_SIZE + sizeof(u32)) {
+ /*
+ * We can't check release version or checksum until we know the content size, so we
+ * have to assume a version mismatch on unexpected values.
+ */
+ return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
+ "super block contents too large: %zu",
+ header.size);
+ }
+
+ /* Skip past the component data for now, to verify the checksum. */
+ offset += VDO_COMPONENT_DATA_SIZE;
+
+ checksum = vdo_crc32(buffer, offset);
+ decode_u32_le(buffer, &offset, &saved_checksum);
+
+ result = VDO_ASSERT(offset == VDO_SUPER_BLOCK_FIXED_SIZE + VDO_COMPONENT_DATA_SIZE,
+ "must have decoded entire superblock payload");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return ((checksum != saved_checksum) ? VDO_CHECKSUM_MISMATCH : VDO_SUCCESS);
+}
diff --git a/drivers/md/dm-vdo/encodings.h b/drivers/md/dm-vdo/encodings.h
new file mode 100644
index 000000000000..e5ff2b0aaa79
--- /dev/null
+++ b/drivers/md/dm-vdo/encodings.h
@@ -0,0 +1,1298 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_ENCODINGS_H
+#define VDO_ENCODINGS_H
+
+#include <linux/blk_types.h>
+#include <linux/crc32.h>
+#include <linux/limits.h>
+#include <linux/uuid.h>
+
+#include "numeric.h"
+
+#include "constants.h"
+#include "types.h"
+
+/*
+ * An in-memory representation of a version number for versioned structures on disk.
+ *
+ * A version number consists of two portions, a major version and a minor version. Any format
+ * change which does not require an explicit upgrade step from the previous version should
+ * increment the minor version. Any format change which either requires an explicit upgrade step,
+ * or is wholly incompatible (i.e. can not be upgraded to), should increment the major version, and
+ * set the minor version to 0.
+ */
+struct version_number {
+ u32 major_version;
+ u32 minor_version;
+};
+
+/*
+ * A packed, machine-independent, on-disk representation of a version_number. Both fields are
+ * stored in little-endian byte order.
+ */
+struct packed_version_number {
+ __le32 major_version;
+ __le32 minor_version;
+} __packed;
+
+/* The registry of component ids for use in headers */
+#define VDO_SUPER_BLOCK 0
+#define VDO_LAYOUT 1
+#define VDO_RECOVERY_JOURNAL 2
+#define VDO_SLAB_DEPOT 3
+#define VDO_BLOCK_MAP 4
+#define VDO_GEOMETRY_BLOCK 5
+
+/* The header for versioned data stored on disk. */
+struct header {
+ u32 id; /* The component this is a header for */
+ struct version_number version; /* The version of the data format */
+ size_t size; /* The size of the data following this header */
+};
+
+/* A packed, machine-independent, on-disk representation of a component header. */
+struct packed_header {
+ __le32 id;
+ struct packed_version_number version;
+ __le64 size;
+} __packed;
+
+enum {
+ VDO_GEOMETRY_BLOCK_LOCATION = 0,
+ VDO_GEOMETRY_MAGIC_NUMBER_SIZE = 8,
+ VDO_DEFAULT_GEOMETRY_BLOCK_VERSION = 5,
+};
+
+struct index_config {
+ u32 mem;
+ u32 unused;
+ bool sparse;
+} __packed;
+
+enum volume_region_id {
+ VDO_INDEX_REGION = 0,
+ VDO_DATA_REGION = 1,
+ VDO_VOLUME_REGION_COUNT,
+};
+
+struct volume_region {
+ /* The ID of the region */
+ enum volume_region_id id;
+ /*
+ * The absolute starting offset on the device. The region continues until the next region
+ * begins.
+ */
+ physical_block_number_t start_block;
+} __packed;
+
+struct volume_geometry {
+ /* For backwards compatibility */
+ u32 unused;
+ /* The nonce of this volume */
+ nonce_t nonce;
+ /* The uuid of this volume */
+ uuid_t uuid;
+ /* The block offset to be applied to bios */
+ block_count_t bio_offset;
+ /* The regions in ID order */
+ struct volume_region regions[VDO_VOLUME_REGION_COUNT];
+ /* The index config */
+ struct index_config index_config;
+} __packed;
+
+/* This volume geometry struct is used for sizing only */
+struct volume_geometry_4_0 {
+ /* For backwards compatibility */
+ u32 unused;
+ /* The nonce of this volume */
+ nonce_t nonce;
+ /* The uuid of this volume */
+ uuid_t uuid;
+ /* The regions in ID order */
+ struct volume_region regions[VDO_VOLUME_REGION_COUNT];
+ /* The index config */
+ struct index_config index_config;
+} __packed;
+
+extern const u8 VDO_GEOMETRY_MAGIC_NUMBER[VDO_GEOMETRY_MAGIC_NUMBER_SIZE + 1];
+
+/**
+ * DOC: Block map entries
+ *
+ * The entry for each logical block in the block map is encoded into five bytes, which saves space
+ * in both the on-disk and in-memory layouts. It consists of the 36 low-order bits of a
+ * physical_block_number_t (addressing 256 terabytes with a 4KB block size) and a 4-bit encoding of
+ * a block_mapping_state.
+ *
+ * Of the 8 high bits of the 5-byte structure:
+ *
+ * Bits 7..4: The four highest bits of the 36-bit physical block number
+ * Bits 3..0: The 4-bit block_mapping_state
+ *
+ * The following 4 bytes are the low order bytes of the physical block number, in little-endian
+ * order.
+ *
+ * Conversion functions to and from a data location are provided.
+ */
+struct block_map_entry {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ unsigned mapping_state : 4;
+ unsigned pbn_high_nibble : 4;
+#else
+ unsigned pbn_high_nibble : 4;
+ unsigned mapping_state : 4;
+#endif
+
+ __le32 pbn_low_word;
+} __packed;
+
+struct block_map_page_header {
+ __le64 nonce;
+ __le64 pbn;
+
+ /* May be non-zero on disk */
+ u8 unused_long_word[8];
+
+ /* Whether this page has been written twice to disk */
+ bool initialized;
+
+ /* Always zero on disk */
+ u8 unused_byte1;
+
+ /* May be non-zero on disk */
+ u8 unused_byte2;
+ u8 unused_byte3;
+} __packed;
+
+struct block_map_page {
+ struct packed_version_number version;
+ struct block_map_page_header header;
+ struct block_map_entry entries[];
+} __packed;
+
+enum block_map_page_validity {
+ VDO_BLOCK_MAP_PAGE_VALID,
+ VDO_BLOCK_MAP_PAGE_INVALID,
+ /* Valid page found in the wrong location on disk */
+ VDO_BLOCK_MAP_PAGE_BAD,
+};
+
+struct block_map_state_2_0 {
+ physical_block_number_t flat_page_origin;
+ block_count_t flat_page_count;
+ physical_block_number_t root_origin;
+ block_count_t root_count;
+} __packed;
+
+struct boundary {
+ page_number_t levels[VDO_BLOCK_MAP_TREE_HEIGHT];
+};
+
+extern const struct header VDO_BLOCK_MAP_HEADER_2_0;
+
+/* The state of the recovery journal as encoded in the VDO super block. */
+struct recovery_journal_state_7_0 {
+ /* Sequence number to start the journal */
+ sequence_number_t journal_start;
+ /* Number of logical blocks used by VDO */
+ block_count_t logical_blocks_used;
+ /* Number of block map pages allocated */
+ block_count_t block_map_data_blocks;
+} __packed;
+
+extern const struct header VDO_RECOVERY_JOURNAL_HEADER_7_0;
+
+typedef u16 journal_entry_count_t;
+
+/*
+ * A recovery journal entry stores three physical locations: a data location that is the value of a
+ * single mapping in the block map tree, and the two locations of the block map pages and slots
+ * that are acquiring and releasing a reference to the location. The journal entry also stores an
+ * operation code that says whether the mapping is for a logical block or for the block map tree
+ * itself.
+ */
+struct recovery_journal_entry {
+ struct block_map_slot slot;
+ struct data_location mapping;
+ struct data_location unmapping;
+ enum journal_operation operation;
+};
+
+/* The packed, on-disk representation of a recovery journal entry. */
+struct packed_recovery_journal_entry {
+ /*
+ * In little-endian bit order:
+ * Bits 15..12: The four highest bits of the 36-bit physical block number of the block map
+ * tree page
+ * Bits 11..2: The 10-bit block map page slot number
+ * Bit 1..0: The journal_operation of the entry (this actually only requires 1 bit, but
+ * it is convenient to keep the extra bit as part of this field.
+ */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ unsigned operation : 2;
+ unsigned slot_low : 6;
+ unsigned slot_high : 4;
+ unsigned pbn_high_nibble : 4;
+#else
+ unsigned slot_low : 6;
+ unsigned operation : 2;
+ unsigned pbn_high_nibble : 4;
+ unsigned slot_high : 4;
+#endif
+
+ /*
+ * Bits 47..16: The 32 low-order bits of the block map page PBN, in little-endian byte
+ * order
+ */
+ __le32 pbn_low_word;
+
+ /*
+ * Bits 87..48: The five-byte block map entry encoding the location that will be stored in
+ * the block map page slot
+ */
+ struct block_map_entry mapping;
+
+ /*
+ * Bits 127..88: The five-byte block map entry encoding the location that was stored in the
+ * block map page slot
+ */
+ struct block_map_entry unmapping;
+} __packed;
+
+/* The packed, on-disk representation of an old format recovery journal entry. */
+struct packed_recovery_journal_entry_1 {
+ /*
+ * In little-endian bit order:
+ * Bits 15..12: The four highest bits of the 36-bit physical block number of the block map
+ * tree page
+ * Bits 11..2: The 10-bit block map page slot number
+ * Bits 1..0: The 2-bit journal_operation of the entry
+ *
+ */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ unsigned operation : 2;
+ unsigned slot_low : 6;
+ unsigned slot_high : 4;
+ unsigned pbn_high_nibble : 4;
+#else
+ unsigned slot_low : 6;
+ unsigned operation : 2;
+ unsigned pbn_high_nibble : 4;
+ unsigned slot_high : 4;
+#endif
+
+ /*
+ * Bits 47..16: The 32 low-order bits of the block map page PBN, in little-endian byte
+ * order
+ */
+ __le32 pbn_low_word;
+
+ /*
+ * Bits 87..48: The five-byte block map entry encoding the location that was or will be
+ * stored in the block map page slot
+ */
+ struct block_map_entry block_map_entry;
+} __packed;
+
+enum journal_operation_1 {
+ VDO_JOURNAL_DATA_DECREMENT = 0,
+ VDO_JOURNAL_DATA_INCREMENT = 1,
+ VDO_JOURNAL_BLOCK_MAP_DECREMENT = 2,
+ VDO_JOURNAL_BLOCK_MAP_INCREMENT = 3,
+} __packed;
+
+struct recovery_block_header {
+ sequence_number_t block_map_head; /* Block map head sequence number */
+ sequence_number_t slab_journal_head; /* Slab journal head seq. number */
+ sequence_number_t sequence_number; /* Sequence number for this block */
+ nonce_t nonce; /* A given VDO instance's nonce */
+ block_count_t logical_blocks_used; /* Logical blocks in use */
+ block_count_t block_map_data_blocks; /* Allocated block map pages */
+ journal_entry_count_t entry_count; /* Number of entries written */
+ u8 check_byte; /* The protection check byte */
+ u8 recovery_count; /* Number of recoveries completed */
+ enum vdo_metadata_type metadata_type; /* Metadata type */
+};
+
+/*
+ * The packed, on-disk representation of a recovery journal block header. All fields are kept in
+ * little-endian byte order.
+ */
+struct packed_journal_header {
+ /* Block map head 64-bit sequence number */
+ __le64 block_map_head;
+
+ /* Slab journal head 64-bit sequence number */
+ __le64 slab_journal_head;
+
+ /* The 64-bit sequence number for this block */
+ __le64 sequence_number;
+
+ /* A given VDO instance's 64-bit nonce */
+ __le64 nonce;
+
+ /* 8-bit metadata type (should always be one for the recovery journal) */
+ u8 metadata_type;
+
+ /* 16-bit count of the entries encoded in the block */
+ __le16 entry_count;
+
+ /* 64-bit count of the logical blocks used when this block was opened */
+ __le64 logical_blocks_used;
+
+ /* 64-bit count of the block map blocks used when this block was opened */
+ __le64 block_map_data_blocks;
+
+ /* The protection check byte */
+ u8 check_byte;
+
+ /* The number of recoveries completed */
+ u8 recovery_count;
+} __packed;
+
+struct packed_journal_sector {
+ /* The protection check byte */
+ u8 check_byte;
+
+ /* The number of recoveries completed */
+ u8 recovery_count;
+
+ /* The number of entries in this sector */
+ u8 entry_count;
+
+ /* Journal entries for this sector */
+ struct packed_recovery_journal_entry entries[];
+} __packed;
+
+enum {
+ /* The number of entries in each sector (except the last) when filled */
+ RECOVERY_JOURNAL_ENTRIES_PER_SECTOR =
+ ((VDO_SECTOR_SIZE - sizeof(struct packed_journal_sector)) /
+ sizeof(struct packed_recovery_journal_entry)),
+ RECOVERY_JOURNAL_ENTRIES_PER_BLOCK = RECOVERY_JOURNAL_ENTRIES_PER_SECTOR * 7,
+ /* The number of entries in a v1 recovery journal block. */
+ RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK = 311,
+ /* The number of entries in each v1 sector (except the last) when filled */
+ RECOVERY_JOURNAL_1_ENTRIES_PER_SECTOR =
+ ((VDO_SECTOR_SIZE - sizeof(struct packed_journal_sector)) /
+ sizeof(struct packed_recovery_journal_entry_1)),
+ /* The number of entries in the last sector when a block is full */
+ RECOVERY_JOURNAL_1_ENTRIES_IN_LAST_SECTOR =
+ (RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK % RECOVERY_JOURNAL_1_ENTRIES_PER_SECTOR),
+};
+
+/* A type representing a reference count of a block. */
+typedef u8 vdo_refcount_t;
+
+/* The absolute position of an entry in a recovery journal or slab journal. */
+struct journal_point {
+ sequence_number_t sequence_number;
+ journal_entry_count_t entry_count;
+};
+
+/* A packed, platform-independent encoding of a struct journal_point. */
+struct packed_journal_point {
+ /*
+ * The packed representation is the little-endian 64-bit representation of the low-order 48
+ * bits of the sequence number, shifted up 16 bits, or'ed with the 16-bit entry count.
+ *
+ * Very long-term, the top 16 bits of the sequence number may not always be zero, as this
+ * encoding assumes--see BZ 1523240.
+ */
+ __le64 encoded_point;
+} __packed;
+
+/* Special vdo_refcount_t values. */
+#define EMPTY_REFERENCE_COUNT 0
+enum {
+ MAXIMUM_REFERENCE_COUNT = 254,
+ PROVISIONAL_REFERENCE_COUNT = 255,
+};
+
+enum {
+ COUNTS_PER_SECTOR =
+ ((VDO_SECTOR_SIZE - sizeof(struct packed_journal_point)) / sizeof(vdo_refcount_t)),
+ COUNTS_PER_BLOCK = COUNTS_PER_SECTOR * VDO_SECTORS_PER_BLOCK,
+};
+
+/* The format of each sector of a reference_block on disk. */
+struct packed_reference_sector {
+ struct packed_journal_point commit_point;
+ vdo_refcount_t counts[COUNTS_PER_SECTOR];
+} __packed;
+
+struct packed_reference_block {
+ struct packed_reference_sector sectors[VDO_SECTORS_PER_BLOCK];
+};
+
+struct slab_depot_state_2_0 {
+ struct slab_config slab_config;
+ physical_block_number_t first_block;
+ physical_block_number_t last_block;
+ zone_count_t zone_count;
+} __packed;
+
+extern const struct header VDO_SLAB_DEPOT_HEADER_2_0;
+
+/*
+ * vdo_slab journal blocks may have one of two formats, depending upon whether or not any of the
+ * entries in the block are block map increments. Since the steady state for a VDO is that all of
+ * the necessary block map pages will be allocated, most slab journal blocks will have only data
+ * entries. Such blocks can hold more entries, hence the two formats.
+ */
+
+/* A single slab journal entry */
+struct slab_journal_entry {
+ slab_block_number sbn;
+ enum journal_operation operation;
+ bool increment;
+};
+
+/* A single slab journal entry in its on-disk form */
+typedef struct {
+ u8 offset_low8;
+ u8 offset_mid8;
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ unsigned offset_high7 : 7;
+ unsigned increment : 1;
+#else
+ unsigned increment : 1;
+ unsigned offset_high7 : 7;
+#endif
+} __packed packed_slab_journal_entry;
+
+/* The unpacked representation of the header of a slab journal block */
+struct slab_journal_block_header {
+ /* Sequence number for head of journal */
+ sequence_number_t head;
+ /* Sequence number for this block */
+ sequence_number_t sequence_number;
+ /* The nonce for a given VDO instance */
+ nonce_t nonce;
+ /* Recovery journal point for last entry */
+ struct journal_point recovery_point;
+ /* Metadata type */
+ enum vdo_metadata_type metadata_type;
+ /* Whether this block contains block map increments */
+ bool has_block_map_increments;
+ /* The number of entries in the block */
+ journal_entry_count_t entry_count;
+};
+
+/*
+ * The packed, on-disk representation of a slab journal block header. All fields are kept in
+ * little-endian byte order.
+ */
+struct packed_slab_journal_block_header {
+ /* 64-bit sequence number for head of journal */
+ __le64 head;
+ /* 64-bit sequence number for this block */
+ __le64 sequence_number;
+ /* Recovery journal point for the last entry, packed into 64 bits */
+ struct packed_journal_point recovery_point;
+ /* The 64-bit nonce for a given VDO instance */
+ __le64 nonce;
+ /* 8-bit metadata type (should always be two, for the slab journal) */
+ u8 metadata_type;
+ /* Whether this block contains block map increments */
+ bool has_block_map_increments;
+ /* 16-bit count of the entries encoded in the block */
+ __le16 entry_count;
+} __packed;
+
+enum {
+ VDO_SLAB_JOURNAL_PAYLOAD_SIZE =
+ VDO_BLOCK_SIZE - sizeof(struct packed_slab_journal_block_header),
+ VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK = (VDO_SLAB_JOURNAL_PAYLOAD_SIZE * 8) / 25,
+ VDO_SLAB_JOURNAL_ENTRY_TYPES_SIZE =
+ ((VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK - 1) / 8) + 1,
+ VDO_SLAB_JOURNAL_ENTRIES_PER_BLOCK =
+ (VDO_SLAB_JOURNAL_PAYLOAD_SIZE / sizeof(packed_slab_journal_entry)),
+};
+
+/* The payload of a slab journal block which has block map increments */
+struct full_slab_journal_entries {
+ /* The entries themselves */
+ packed_slab_journal_entry entries[VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK];
+ /* The bit map indicating which entries are block map increments */
+ u8 entry_types[VDO_SLAB_JOURNAL_ENTRY_TYPES_SIZE];
+} __packed;
+
+typedef union {
+ /* Entries which include block map increments */
+ struct full_slab_journal_entries full_entries;
+ /* Entries which are only data updates */
+ packed_slab_journal_entry entries[VDO_SLAB_JOURNAL_ENTRIES_PER_BLOCK];
+ /* Ensure the payload fills to the end of the block */
+ u8 space[VDO_SLAB_JOURNAL_PAYLOAD_SIZE];
+} __packed slab_journal_payload;
+
+struct packed_slab_journal_block {
+ struct packed_slab_journal_block_header header;
+ slab_journal_payload payload;
+} __packed;
+
+/* The offset of a slab journal tail block. */
+typedef u8 tail_block_offset_t;
+
+struct slab_summary_entry {
+ /* Bits 7..0: The offset of the tail block within the slab journal */
+ tail_block_offset_t tail_block_offset;
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* Bits 13..8: A hint about the fullness of the slab */
+ unsigned int fullness_hint : 6;
+ /* Bit 14: Whether the ref_counts must be loaded from the layer */
+ unsigned int load_ref_counts : 1;
+ /* Bit 15: The believed cleanliness of this slab */
+ unsigned int is_dirty : 1;
+#else
+ /* Bit 15: The believed cleanliness of this slab */
+ unsigned int is_dirty : 1;
+ /* Bit 14: Whether the ref_counts must be loaded from the layer */
+ unsigned int load_ref_counts : 1;
+ /* Bits 13..8: A hint about the fullness of the slab */
+ unsigned int fullness_hint : 6;
+#endif
+} __packed;
+
+enum {
+ VDO_SLAB_SUMMARY_FULLNESS_HINT_BITS = 6,
+ VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK = VDO_BLOCK_SIZE / sizeof(struct slab_summary_entry),
+ VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE = MAX_VDO_SLABS / VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK,
+ VDO_SLAB_SUMMARY_BLOCKS = VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE * MAX_VDO_PHYSICAL_ZONES,
+};
+
+struct layout {
+ physical_block_number_t start;
+ block_count_t size;
+ physical_block_number_t first_free;
+ physical_block_number_t last_free;
+ size_t num_partitions;
+ struct partition *head;
+};
+
+struct partition {
+ enum partition_id id; /* The id of this partition */
+ physical_block_number_t offset; /* The offset into the layout of this partition */
+ block_count_t count; /* The number of blocks in the partition */
+ struct partition *next; /* A pointer to the next partition in the layout */
+};
+
+struct layout_3_0 {
+ physical_block_number_t first_free;
+ physical_block_number_t last_free;
+ u8 partition_count;
+} __packed;
+
+struct partition_3_0 {
+ enum partition_id id;
+ physical_block_number_t offset;
+ physical_block_number_t base; /* unused but retained for backwards compatibility */
+ block_count_t count;
+} __packed;
+
+/*
+ * The configuration of the VDO service.
+ */
+struct vdo_config {
+ block_count_t logical_blocks; /* number of logical blocks */
+ block_count_t physical_blocks; /* number of physical blocks */
+ block_count_t slab_size; /* number of blocks in a slab */
+ block_count_t recovery_journal_size; /* number of recovery journal blocks */
+ block_count_t slab_journal_blocks; /* number of slab journal blocks */
+};
+
+/* This is the structure that captures the vdo fields saved as a super block component. */
+struct vdo_component {
+ enum vdo_state state;
+ u64 complete_recoveries;
+ u64 read_only_recoveries;
+ struct vdo_config config;
+ nonce_t nonce;
+};
+
+/*
+ * A packed, machine-independent, on-disk representation of the vdo_config in the VDO component
+ * data in the super block.
+ */
+struct packed_vdo_config {
+ __le64 logical_blocks;
+ __le64 physical_blocks;
+ __le64 slab_size;
+ __le64 recovery_journal_size;
+ __le64 slab_journal_blocks;
+} __packed;
+
+/*
+ * A packed, machine-independent, on-disk representation of version 41.0 of the VDO component data
+ * in the super block.
+ */
+struct packed_vdo_component_41_0 {
+ __le32 state;
+ __le64 complete_recoveries;
+ __le64 read_only_recoveries;
+ struct packed_vdo_config config;
+ __le64 nonce;
+} __packed;
+
+/*
+ * The version of the on-disk format of a VDO volume. This should be incremented any time the
+ * on-disk representation of any VDO structure changes. Changes which require only online upgrade
+ * steps should increment the minor version. Changes which require an offline upgrade or which can
+ * not be upgraded to at all should increment the major version and set the minor version to 0.
+ */
+extern const struct version_number VDO_VOLUME_VERSION_67_0;
+
+enum {
+ VDO_ENCODED_HEADER_SIZE = sizeof(struct packed_header),
+ BLOCK_MAP_COMPONENT_ENCODED_SIZE =
+ VDO_ENCODED_HEADER_SIZE + sizeof(struct block_map_state_2_0),
+ RECOVERY_JOURNAL_COMPONENT_ENCODED_SIZE =
+ VDO_ENCODED_HEADER_SIZE + sizeof(struct recovery_journal_state_7_0),
+ SLAB_DEPOT_COMPONENT_ENCODED_SIZE =
+ VDO_ENCODED_HEADER_SIZE + sizeof(struct slab_depot_state_2_0),
+ VDO_PARTITION_COUNT = 4,
+ VDO_LAYOUT_ENCODED_SIZE = (VDO_ENCODED_HEADER_SIZE +
+ sizeof(struct layout_3_0) +
+ (sizeof(struct partition_3_0) * VDO_PARTITION_COUNT)),
+ VDO_SUPER_BLOCK_FIXED_SIZE = VDO_ENCODED_HEADER_SIZE + sizeof(u32),
+ VDO_MAX_COMPONENT_DATA_SIZE = VDO_SECTOR_SIZE - VDO_SUPER_BLOCK_FIXED_SIZE,
+ VDO_COMPONENT_ENCODED_SIZE =
+ (sizeof(struct packed_version_number) + sizeof(struct packed_vdo_component_41_0)),
+ VDO_COMPONENT_DATA_OFFSET = VDO_ENCODED_HEADER_SIZE,
+ VDO_COMPONENT_DATA_SIZE = (sizeof(u32) +
+ sizeof(struct packed_version_number) +
+ VDO_COMPONENT_ENCODED_SIZE +
+ VDO_LAYOUT_ENCODED_SIZE +
+ RECOVERY_JOURNAL_COMPONENT_ENCODED_SIZE +
+ SLAB_DEPOT_COMPONENT_ENCODED_SIZE +
+ BLOCK_MAP_COMPONENT_ENCODED_SIZE),
+};
+
+/* The entirety of the component data encoded in the VDO super block. */
+struct vdo_component_states {
+ /* For backwards compatibility */
+ u32 unused;
+
+ /* The VDO volume version */
+ struct version_number volume_version;
+
+ /* Components */
+ struct vdo_component vdo;
+ struct block_map_state_2_0 block_map;
+ struct recovery_journal_state_7_0 recovery_journal;
+ struct slab_depot_state_2_0 slab_depot;
+
+ /* Our partitioning of the underlying storage */
+ struct layout layout;
+};
+
+/**
+ * vdo_are_same_version() - Check whether two version numbers are the same.
+ * @version_a: The first version.
+ * @version_b: The second version.
+ *
+ * Return: true if the two versions are the same.
+ */
+static inline bool vdo_are_same_version(struct version_number version_a,
+ struct version_number version_b)
+{
+ return ((version_a.major_version == version_b.major_version) &&
+ (version_a.minor_version == version_b.minor_version));
+}
+
+/**
+ * vdo_is_upgradable_version() - Check whether an actual version is upgradable to an expected
+ * version.
+ * @expected_version: The expected version.
+ * @actual_version: The version being validated.
+ *
+ * An actual version is upgradable if its major number is expected but its minor number differs,
+ * and the expected version's minor number is greater than the actual version's minor number.
+ *
+ * Return: true if the actual version is upgradable.
+ */
+static inline bool vdo_is_upgradable_version(struct version_number expected_version,
+ struct version_number actual_version)
+{
+ return ((expected_version.major_version == actual_version.major_version) &&
+ (expected_version.minor_version > actual_version.minor_version));
+}
+
+int __must_check vdo_validate_header(const struct header *expected_header,
+ const struct header *actual_header, bool exact_size,
+ const char *component_name);
+
+void vdo_encode_header(u8 *buffer, size_t *offset, const struct header *header);
+void vdo_decode_header(u8 *buffer, size_t *offset, struct header *header);
+
+/**
+ * vdo_pack_version_number() - Convert a version_number to its packed on-disk representation.
+ * @version: The version number to convert.
+ *
+ * Return: the platform-independent representation of the version
+ */
+static inline struct packed_version_number vdo_pack_version_number(struct version_number version)
+{
+ return (struct packed_version_number) {
+ .major_version = __cpu_to_le32(version.major_version),
+ .minor_version = __cpu_to_le32(version.minor_version),
+ };
+}
+
+/**
+ * vdo_unpack_version_number() - Convert a packed_version_number to its native in-memory
+ * representation.
+ * @version: The version number to convert.
+ *
+ * Return: The platform-independent representation of the version.
+ */
+static inline struct version_number vdo_unpack_version_number(struct packed_version_number version)
+{
+ return (struct version_number) {
+ .major_version = __le32_to_cpu(version.major_version),
+ .minor_version = __le32_to_cpu(version.minor_version),
+ };
+}
+
+/**
+ * vdo_pack_header() - Convert a component header to its packed on-disk representation.
+ * @header: The header to convert.
+ *
+ * Return: the platform-independent representation of the header
+ */
+static inline struct packed_header vdo_pack_header(const struct header *header)
+{
+ return (struct packed_header) {
+ .id = __cpu_to_le32(header->id),
+ .version = vdo_pack_version_number(header->version),
+ .size = __cpu_to_le64(header->size),
+ };
+}
+
+/**
+ * vdo_unpack_header() - Convert a packed_header to its native in-memory representation.
+ * @header: The header to convert.
+ *
+ * Return: The platform-independent representation of the version.
+ */
+static inline struct header vdo_unpack_header(const struct packed_header *header)
+{
+ return (struct header) {
+ .id = __le32_to_cpu(header->id),
+ .version = vdo_unpack_version_number(header->version),
+ .size = __le64_to_cpu(header->size),
+ };
+}
+
+/**
+ * vdo_get_index_region_start() - Get the start of the index region from a geometry.
+ * @geometry: The geometry.
+ *
+ * Return: The start of the index region.
+ */
+static inline physical_block_number_t __must_check
+vdo_get_index_region_start(struct volume_geometry geometry)
+{
+ return geometry.regions[VDO_INDEX_REGION].start_block;
+}
+
+/**
+ * vdo_get_data_region_start() - Get the start of the data region from a geometry.
+ * @geometry: The geometry.
+ *
+ * Return: The start of the data region.
+ */
+static inline physical_block_number_t __must_check
+vdo_get_data_region_start(struct volume_geometry geometry)
+{
+ return geometry.regions[VDO_DATA_REGION].start_block;
+}
+
+/**
+ * vdo_get_index_region_size() - Get the size of the index region from a geometry.
+ * @geometry: The geometry.
+ *
+ * Return: The size of the index region.
+ */
+static inline physical_block_number_t __must_check
+vdo_get_index_region_size(struct volume_geometry geometry)
+{
+ return vdo_get_data_region_start(geometry) -
+ vdo_get_index_region_start(geometry);
+}
+
+int __must_check vdo_parse_geometry_block(unsigned char *block,
+ struct volume_geometry *geometry);
+
+static inline bool vdo_is_state_compressed(const enum block_mapping_state mapping_state)
+{
+ return (mapping_state > VDO_MAPPING_STATE_UNCOMPRESSED);
+}
+
+static inline struct block_map_entry
+vdo_pack_block_map_entry(physical_block_number_t pbn, enum block_mapping_state mapping_state)
+{
+ return (struct block_map_entry) {
+ .mapping_state = (mapping_state & 0x0F),
+ .pbn_high_nibble = ((pbn >> 32) & 0x0F),
+ .pbn_low_word = __cpu_to_le32(pbn & UINT_MAX),
+ };
+}
+
+static inline struct data_location vdo_unpack_block_map_entry(const struct block_map_entry *entry)
+{
+ physical_block_number_t low32 = __le32_to_cpu(entry->pbn_low_word);
+ physical_block_number_t high4 = entry->pbn_high_nibble;
+
+ return (struct data_location) {
+ .pbn = ((high4 << 32) | low32),
+ .state = entry->mapping_state,
+ };
+}
+
+static inline bool vdo_is_mapped_location(const struct data_location *location)
+{
+ return (location->state != VDO_MAPPING_STATE_UNMAPPED);
+}
+
+static inline bool vdo_is_valid_location(const struct data_location *location)
+{
+ if (location->pbn == VDO_ZERO_BLOCK)
+ return !vdo_is_state_compressed(location->state);
+ else
+ return vdo_is_mapped_location(location);
+}
+
+static inline physical_block_number_t __must_check
+vdo_get_block_map_page_pbn(const struct block_map_page *page)
+{
+ return __le64_to_cpu(page->header.pbn);
+}
+
+struct block_map_page *vdo_format_block_map_page(void *buffer, nonce_t nonce,
+ physical_block_number_t pbn,
+ bool initialized);
+
+enum block_map_page_validity __must_check vdo_validate_block_map_page(struct block_map_page *page,
+ nonce_t nonce,
+ physical_block_number_t pbn);
+
+static inline page_count_t vdo_compute_block_map_page_count(block_count_t entries)
+{
+ return DIV_ROUND_UP(entries, VDO_BLOCK_MAP_ENTRIES_PER_PAGE);
+}
+
+block_count_t __must_check vdo_compute_new_forest_pages(root_count_t root_count,
+ struct boundary *old_sizes,
+ block_count_t entries,
+ struct boundary *new_sizes);
+
+/**
+ * vdo_pack_recovery_journal_entry() - Return the packed, on-disk representation of a recovery
+ * journal entry.
+ * @entry: The journal entry to pack.
+ *
+ * Return: The packed representation of the journal entry.
+ */
+static inline struct packed_recovery_journal_entry
+vdo_pack_recovery_journal_entry(const struct recovery_journal_entry *entry)
+{
+ return (struct packed_recovery_journal_entry) {
+ .operation = entry->operation,
+ .slot_low = entry->slot.slot & 0x3F,
+ .slot_high = (entry->slot.slot >> 6) & 0x0F,
+ .pbn_high_nibble = (entry->slot.pbn >> 32) & 0x0F,
+ .pbn_low_word = __cpu_to_le32(entry->slot.pbn & UINT_MAX),
+ .mapping = vdo_pack_block_map_entry(entry->mapping.pbn,
+ entry->mapping.state),
+ .unmapping = vdo_pack_block_map_entry(entry->unmapping.pbn,
+ entry->unmapping.state),
+ };
+}
+
+/**
+ * vdo_unpack_recovery_journal_entry() - Unpack the on-disk representation of a recovery journal
+ * entry.
+ * @entry: The recovery journal entry to unpack.
+ *
+ * Return: The unpacked entry.
+ */
+static inline struct recovery_journal_entry
+vdo_unpack_recovery_journal_entry(const struct packed_recovery_journal_entry *entry)
+{
+ physical_block_number_t low32 = __le32_to_cpu(entry->pbn_low_word);
+ physical_block_number_t high4 = entry->pbn_high_nibble;
+
+ return (struct recovery_journal_entry) {
+ .operation = entry->operation,
+ .slot = {
+ .pbn = ((high4 << 32) | low32),
+ .slot = (entry->slot_low | (entry->slot_high << 6)),
+ },
+ .mapping = vdo_unpack_block_map_entry(&entry->mapping),
+ .unmapping = vdo_unpack_block_map_entry(&entry->unmapping),
+ };
+}
+
+const char * __must_check vdo_get_journal_operation_name(enum journal_operation operation);
+
+/**
+ * vdo_is_valid_recovery_journal_sector() - Determine whether the header of the given sector could
+ * describe a valid sector for the given journal block
+ * header.
+ * @header: The unpacked block header to compare against.
+ * @sector: The packed sector to check.
+ * @sector_number: The number of the sector being checked.
+ *
+ * Return: true if the sector matches the block header.
+ */
+static inline bool __must_check
+vdo_is_valid_recovery_journal_sector(const struct recovery_block_header *header,
+ const struct packed_journal_sector *sector,
+ u8 sector_number)
+{
+ if ((header->check_byte != sector->check_byte) ||
+ (header->recovery_count != sector->recovery_count))
+ return false;
+
+ if (header->metadata_type == VDO_METADATA_RECOVERY_JOURNAL_2)
+ return sector->entry_count <= RECOVERY_JOURNAL_ENTRIES_PER_SECTOR;
+
+ if (sector_number == 7)
+ return sector->entry_count <= RECOVERY_JOURNAL_1_ENTRIES_IN_LAST_SECTOR;
+
+ return sector->entry_count <= RECOVERY_JOURNAL_1_ENTRIES_PER_SECTOR;
+}
+
+/**
+ * vdo_compute_recovery_journal_block_number() - Compute the physical block number of the recovery
+ * journal block which would have a given sequence
+ * number.
+ * @journal_size: The size of the journal.
+ * @sequence_number: The sequence number.
+ *
+ * Return: The pbn of the journal block which would the specified sequence number.
+ */
+static inline physical_block_number_t __must_check
+vdo_compute_recovery_journal_block_number(block_count_t journal_size,
+ sequence_number_t sequence_number)
+{
+ /*
+ * Since journal size is a power of two, the block number modulus can just be extracted
+ * from the low-order bits of the sequence.
+ */
+ return (sequence_number & (journal_size - 1));
+}
+
+/**
+ * vdo_get_journal_block_sector() - Find the recovery journal sector from the block header and
+ * sector number.
+ * @header: The header of the recovery journal block.
+ * @sector_number: The index of the sector (1-based).
+ *
+ * Return: A packed recovery journal sector.
+ */
+static inline struct packed_journal_sector * __must_check
+vdo_get_journal_block_sector(struct packed_journal_header *header, int sector_number)
+{
+ char *sector_data = ((char *) header) + (VDO_SECTOR_SIZE * sector_number);
+
+ return (struct packed_journal_sector *) sector_data;
+}
+
+/**
+ * vdo_pack_recovery_block_header() - Generate the packed representation of a recovery block
+ * header.
+ * @header: The header containing the values to encode.
+ * @packed: The header into which to pack the values.
+ */
+static inline void vdo_pack_recovery_block_header(const struct recovery_block_header *header,
+ struct packed_journal_header *packed)
+{
+ *packed = (struct packed_journal_header) {
+ .block_map_head = __cpu_to_le64(header->block_map_head),
+ .slab_journal_head = __cpu_to_le64(header->slab_journal_head),
+ .sequence_number = __cpu_to_le64(header->sequence_number),
+ .nonce = __cpu_to_le64(header->nonce),
+ .logical_blocks_used = __cpu_to_le64(header->logical_blocks_used),
+ .block_map_data_blocks = __cpu_to_le64(header->block_map_data_blocks),
+ .entry_count = __cpu_to_le16(header->entry_count),
+ .check_byte = header->check_byte,
+ .recovery_count = header->recovery_count,
+ .metadata_type = header->metadata_type,
+ };
+}
+
+/**
+ * vdo_unpack_recovery_block_header() - Decode the packed representation of a recovery block
+ * header.
+ * @packed: The packed header to decode.
+ *
+ * Return: The unpacked header.
+ */
+static inline struct recovery_block_header
+vdo_unpack_recovery_block_header(const struct packed_journal_header *packed)
+{
+ return (struct recovery_block_header) {
+ .block_map_head = __le64_to_cpu(packed->block_map_head),
+ .slab_journal_head = __le64_to_cpu(packed->slab_journal_head),
+ .sequence_number = __le64_to_cpu(packed->sequence_number),
+ .nonce = __le64_to_cpu(packed->nonce),
+ .logical_blocks_used = __le64_to_cpu(packed->logical_blocks_used),
+ .block_map_data_blocks = __le64_to_cpu(packed->block_map_data_blocks),
+ .entry_count = __le16_to_cpu(packed->entry_count),
+ .check_byte = packed->check_byte,
+ .recovery_count = packed->recovery_count,
+ .metadata_type = packed->metadata_type,
+ };
+}
+
+/**
+ * vdo_compute_slab_count() - Compute the number of slabs a depot with given parameters would have.
+ * @first_block: PBN of the first data block.
+ * @last_block: PBN of the last data block.
+ * @slab_size_shift: Exponent for the number of blocks per slab.
+ *
+ * Return: The number of slabs.
+ */
+static inline slab_count_t vdo_compute_slab_count(physical_block_number_t first_block,
+ physical_block_number_t last_block,
+ unsigned int slab_size_shift)
+{
+ return (slab_count_t) ((last_block - first_block) >> slab_size_shift);
+}
+
+int __must_check vdo_configure_slab_depot(const struct partition *partition,
+ struct slab_config slab_config,
+ zone_count_t zone_count,
+ struct slab_depot_state_2_0 *state);
+
+int __must_check vdo_configure_slab(block_count_t slab_size,
+ block_count_t slab_journal_blocks,
+ struct slab_config *slab_config);
+
+/**
+ * vdo_get_saved_reference_count_size() - Get the number of blocks required to save a reference
+ * counts state covering the specified number of data
+ * blocks.
+ * @block_count: The number of physical data blocks that can be referenced.
+ *
+ * Return: The number of blocks required to save reference counts with the given block count.
+ */
+static inline block_count_t vdo_get_saved_reference_count_size(block_count_t block_count)
+{
+ return DIV_ROUND_UP(block_count, COUNTS_PER_BLOCK);
+}
+
+/**
+ * vdo_get_slab_journal_start_block() - Get the physical block number of the start of the slab
+ * journal relative to the start block allocator partition.
+ * @slab_config: The slab configuration of the VDO.
+ * @origin: The first block of the slab.
+ */
+static inline physical_block_number_t __must_check
+vdo_get_slab_journal_start_block(const struct slab_config *slab_config,
+ physical_block_number_t origin)
+{
+ return origin + slab_config->data_blocks + slab_config->reference_count_blocks;
+}
+
+/**
+ * vdo_advance_journal_point() - Move the given journal point forward by one entry.
+ * @point: The journal point to adjust.
+ * @entries_per_block: The number of entries in one full block.
+ */
+static inline void vdo_advance_journal_point(struct journal_point *point,
+ journal_entry_count_t entries_per_block)
+{
+ point->entry_count++;
+ if (point->entry_count == entries_per_block) {
+ point->sequence_number++;
+ point->entry_count = 0;
+ }
+}
+
+/**
+ * vdo_before_journal_point() - Check whether the first point precedes the second point.
+ * @first: The first journal point.
+ * @second: The second journal point.
+ *
+ * Return: true if the first point precedes the second point.
+ */
+static inline bool vdo_before_journal_point(const struct journal_point *first,
+ const struct journal_point *second)
+{
+ return ((first->sequence_number < second->sequence_number) ||
+ ((first->sequence_number == second->sequence_number) &&
+ (first->entry_count < second->entry_count)));
+}
+
+/**
+ * vdo_pack_journal_point() - Encode the journal location represented by a
+ * journal_point into a packed_journal_point.
+ * @unpacked: The unpacked input point.
+ * @packed: The packed output point.
+ */
+static inline void vdo_pack_journal_point(const struct journal_point *unpacked,
+ struct packed_journal_point *packed)
+{
+ packed->encoded_point =
+ __cpu_to_le64((unpacked->sequence_number << 16) | unpacked->entry_count);
+}
+
+/**
+ * vdo_unpack_journal_point() - Decode the journal location represented by a packed_journal_point
+ * into a journal_point.
+ * @packed: The packed input point.
+ * @unpacked: The unpacked output point.
+ */
+static inline void vdo_unpack_journal_point(const struct packed_journal_point *packed,
+ struct journal_point *unpacked)
+{
+ u64 native = __le64_to_cpu(packed->encoded_point);
+
+ unpacked->sequence_number = (native >> 16);
+ unpacked->entry_count = (native & 0xffff);
+}
+
+/**
+ * vdo_pack_slab_journal_block_header() - Generate the packed representation of a slab block
+ * header.
+ * @header: The header containing the values to encode.
+ * @packed: The header into which to pack the values.
+ */
+static inline void
+vdo_pack_slab_journal_block_header(const struct slab_journal_block_header *header,
+ struct packed_slab_journal_block_header *packed)
+{
+ packed->head = __cpu_to_le64(header->head);
+ packed->sequence_number = __cpu_to_le64(header->sequence_number);
+ packed->nonce = __cpu_to_le64(header->nonce);
+ packed->entry_count = __cpu_to_le16(header->entry_count);
+ packed->metadata_type = header->metadata_type;
+ packed->has_block_map_increments = header->has_block_map_increments;
+
+ vdo_pack_journal_point(&header->recovery_point, &packed->recovery_point);
+}
+
+/**
+ * vdo_unpack_slab_journal_block_header() - Decode the packed representation of a slab block
+ * header.
+ * @packed: The packed header to decode.
+ * @header: The header into which to unpack the values.
+ */
+static inline void
+vdo_unpack_slab_journal_block_header(const struct packed_slab_journal_block_header *packed,
+ struct slab_journal_block_header *header)
+{
+ *header = (struct slab_journal_block_header) {
+ .head = __le64_to_cpu(packed->head),
+ .sequence_number = __le64_to_cpu(packed->sequence_number),
+ .nonce = __le64_to_cpu(packed->nonce),
+ .entry_count = __le16_to_cpu(packed->entry_count),
+ .metadata_type = packed->metadata_type,
+ .has_block_map_increments = packed->has_block_map_increments,
+ };
+ vdo_unpack_journal_point(&packed->recovery_point, &header->recovery_point);
+}
+
+/**
+ * vdo_pack_slab_journal_entry() - Generate the packed encoding of a slab journal entry.
+ * @packed: The entry into which to pack the values.
+ * @sbn: The slab block number of the entry to encode.
+ * @is_increment: The increment flag.
+ */
+static inline void vdo_pack_slab_journal_entry(packed_slab_journal_entry *packed,
+ slab_block_number sbn, bool is_increment)
+{
+ packed->offset_low8 = (sbn & 0x0000FF);
+ packed->offset_mid8 = (sbn & 0x00FF00) >> 8;
+ packed->offset_high7 = (sbn & 0x7F0000) >> 16;
+ packed->increment = is_increment ? 1 : 0;
+}
+
+/**
+ * vdo_unpack_slab_journal_entry() - Decode the packed representation of a slab journal entry.
+ * @packed: The packed entry to decode.
+ *
+ * Return: The decoded slab journal entry.
+ */
+static inline struct slab_journal_entry __must_check
+vdo_unpack_slab_journal_entry(const packed_slab_journal_entry *packed)
+{
+ struct slab_journal_entry entry;
+
+ entry.sbn = packed->offset_high7;
+ entry.sbn <<= 8;
+ entry.sbn |= packed->offset_mid8;
+ entry.sbn <<= 8;
+ entry.sbn |= packed->offset_low8;
+ entry.operation = VDO_JOURNAL_DATA_REMAPPING;
+ entry.increment = packed->increment;
+ return entry;
+}
+
+struct slab_journal_entry __must_check
+vdo_decode_slab_journal_entry(struct packed_slab_journal_block *block,
+ journal_entry_count_t entry_count);
+
+/**
+ * vdo_get_slab_summary_hint_shift() - Compute the shift for slab summary hints.
+ * @slab_size_shift: Exponent for the number of blocks per slab.
+ *
+ * Return: The hint shift.
+ */
+static inline u8 __must_check vdo_get_slab_summary_hint_shift(unsigned int slab_size_shift)
+{
+ return ((slab_size_shift > VDO_SLAB_SUMMARY_FULLNESS_HINT_BITS) ?
+ (slab_size_shift - VDO_SLAB_SUMMARY_FULLNESS_HINT_BITS) :
+ 0);
+}
+
+int __must_check vdo_initialize_layout(block_count_t size,
+ physical_block_number_t offset,
+ block_count_t block_map_blocks,
+ block_count_t journal_blocks,
+ block_count_t summary_blocks,
+ struct layout *layout);
+
+void vdo_uninitialize_layout(struct layout *layout);
+
+int __must_check vdo_get_partition(struct layout *layout, enum partition_id id,
+ struct partition **partition_ptr);
+
+struct partition * __must_check vdo_get_known_partition(struct layout *layout,
+ enum partition_id id);
+
+int vdo_validate_config(const struct vdo_config *config,
+ block_count_t physical_block_count,
+ block_count_t logical_block_count);
+
+void vdo_destroy_component_states(struct vdo_component_states *states);
+
+int __must_check vdo_decode_component_states(u8 *buffer,
+ struct volume_geometry *geometry,
+ struct vdo_component_states *states);
+
+int __must_check vdo_validate_component_states(struct vdo_component_states *states,
+ nonce_t geometry_nonce,
+ block_count_t physical_size,
+ block_count_t logical_size);
+
+void vdo_encode_super_block(u8 *buffer, struct vdo_component_states *states);
+int __must_check vdo_decode_super_block(u8 *buffer);
+
+/* We start with 0L and postcondition with ~0L to match our historical usage in userspace. */
+static inline u32 vdo_crc32(const void *buf, unsigned long len)
+{
+ return (crc32(0L, buf, len) ^ ~0L);
+}
+
+#endif /* VDO_ENCODINGS_H */
diff --git a/drivers/md/dm-vdo/errors.c b/drivers/md/dm-vdo/errors.c
new file mode 100644
index 000000000000..6f89eb1c63a3
--- /dev/null
+++ b/drivers/md/dm-vdo/errors.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "errors.h"
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+
+#include "logger.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+static const struct error_info successful = { "UDS_SUCCESS", "Success" };
+
+static const char *const message_table[] = {
+ [EPERM] = "Operation not permitted",
+ [ENOENT] = "No such file or directory",
+ [ESRCH] = "No such process",
+ [EINTR] = "Interrupted system call",
+ [EIO] = "Input/output error",
+ [ENXIO] = "No such device or address",
+ [E2BIG] = "Argument list too long",
+ [ENOEXEC] = "Exec format error",
+ [EBADF] = "Bad file descriptor",
+ [ECHILD] = "No child processes",
+ [EAGAIN] = "Resource temporarily unavailable",
+ [ENOMEM] = "Cannot allocate memory",
+ [EACCES] = "Permission denied",
+ [EFAULT] = "Bad address",
+ [ENOTBLK] = "Block device required",
+ [EBUSY] = "Device or resource busy",
+ [EEXIST] = "File exists",
+ [EXDEV] = "Invalid cross-device link",
+ [ENODEV] = "No such device",
+ [ENOTDIR] = "Not a directory",
+ [EISDIR] = "Is a directory",
+ [EINVAL] = "Invalid argument",
+ [ENFILE] = "Too many open files in system",
+ [EMFILE] = "Too many open files",
+ [ENOTTY] = "Inappropriate ioctl for device",
+ [ETXTBSY] = "Text file busy",
+ [EFBIG] = "File too large",
+ [ENOSPC] = "No space left on device",
+ [ESPIPE] = "Illegal seek",
+ [EROFS] = "Read-only file system",
+ [EMLINK] = "Too many links",
+ [EPIPE] = "Broken pipe",
+ [EDOM] = "Numerical argument out of domain",
+ [ERANGE] = "Numerical result out of range"
+};
+
+static const struct error_info error_list[] = {
+ { "UDS_OVERFLOW", "Index overflow" },
+ { "UDS_INVALID_ARGUMENT", "Invalid argument passed to internal routine" },
+ { "UDS_BAD_STATE", "UDS data structures are in an invalid state" },
+ { "UDS_DUPLICATE_NAME", "Attempt to enter the same name into a delta index twice" },
+ { "UDS_ASSERTION_FAILED", "Assertion failed" },
+ { "UDS_QUEUED", "Request queued" },
+ { "UDS_ALREADY_REGISTERED", "Error range already registered" },
+ { "UDS_OUT_OF_RANGE", "Cannot access data outside specified limits" },
+ { "UDS_DISABLED", "UDS library context is disabled" },
+ { "UDS_UNSUPPORTED_VERSION", "Unsupported version" },
+ { "UDS_CORRUPT_DATA", "Some index structure is corrupt" },
+ { "UDS_NO_INDEX", "No index found" },
+ { "UDS_INDEX_NOT_SAVED_CLEANLY", "Index not saved cleanly" },
+};
+
+struct error_block {
+ const char *name;
+ int base;
+ int last;
+ int max;
+ const struct error_info *infos;
+};
+
+#define MAX_ERROR_BLOCKS 6
+
+static struct {
+ int allocated;
+ int count;
+ struct error_block blocks[MAX_ERROR_BLOCKS];
+} registered_errors = {
+ .allocated = MAX_ERROR_BLOCKS,
+ .count = 1,
+ .blocks = { {
+ .name = "UDS Error",
+ .base = UDS_ERROR_CODE_BASE,
+ .last = UDS_ERROR_CODE_LAST,
+ .max = UDS_ERROR_CODE_BLOCK_END,
+ .infos = error_list,
+ } },
+};
+
+/* Get the error info for an error number. Also returns the name of the error block, if known. */
+static const char *get_error_info(int errnum, const struct error_info **info_ptr)
+{
+ struct error_block *block;
+
+ if (errnum == UDS_SUCCESS) {
+ *info_ptr = &successful;
+ return NULL;
+ }
+
+ for (block = registered_errors.blocks;
+ block < registered_errors.blocks + registered_errors.count;
+ block++) {
+ if ((errnum >= block->base) && (errnum < block->last)) {
+ *info_ptr = block->infos + (errnum - block->base);
+ return block->name;
+ } else if ((errnum >= block->last) && (errnum < block->max)) {
+ *info_ptr = NULL;
+ return block->name;
+ }
+ }
+
+ return NULL;
+}
+
+/* Return a string describing a system error message. */
+static const char *system_string_error(int errnum, char *buf, size_t buflen)
+{
+ size_t len;
+ const char *error_string = NULL;
+
+ if ((errnum > 0) && (errnum < ARRAY_SIZE(message_table)))
+ error_string = message_table[errnum];
+
+ len = ((error_string == NULL) ?
+ snprintf(buf, buflen, "Unknown error %d", errnum) :
+ snprintf(buf, buflen, "%s", error_string));
+ if (len < buflen)
+ return buf;
+
+ buf[0] = '\0';
+ return "System error";
+}
+
+/* Convert an error code to a descriptive string. */
+const char *uds_string_error(int errnum, char *buf, size_t buflen)
+{
+ char *buffer = buf;
+ char *buf_end = buf + buflen;
+ const struct error_info *info = NULL;
+ const char *block_name;
+
+ if (buf == NULL)
+ return NULL;
+
+ if (errnum < 0)
+ errnum = -errnum;
+
+ block_name = get_error_info(errnum, &info);
+ if (block_name != NULL) {
+ if (info != NULL) {
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s: %s",
+ block_name, info->message);
+ } else {
+ buffer = vdo_append_to_buffer(buffer, buf_end, "Unknown %s %d",
+ block_name, errnum);
+ }
+ } else if (info != NULL) {
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s", info->message);
+ } else {
+ const char *tmp = system_string_error(errnum, buffer, buf_end - buffer);
+
+ if (tmp != buffer)
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s", tmp);
+ else
+ buffer += strlen(tmp);
+ }
+
+ return buf;
+}
+
+/* Convert an error code to its name. */
+const char *uds_string_error_name(int errnum, char *buf, size_t buflen)
+{
+ char *buffer = buf;
+ char *buf_end = buf + buflen;
+ const struct error_info *info = NULL;
+ const char *block_name;
+
+ if (errnum < 0)
+ errnum = -errnum;
+
+ block_name = get_error_info(errnum, &info);
+ if (block_name != NULL) {
+ if (info != NULL) {
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s", info->name);
+ } else {
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s %d",
+ block_name, errnum);
+ }
+ } else if (info != NULL) {
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s", info->name);
+ } else {
+ const char *tmp;
+
+ tmp = system_string_error(errnum, buffer, buf_end - buffer);
+ if (tmp != buffer)
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s", tmp);
+ else
+ buffer += strlen(tmp);
+ }
+
+ return buf;
+}
+
+/*
+ * Translate an error code into a value acceptable to the kernel. The input error code may be a
+ * system-generated value (such as -EIO), or an internal UDS status code. The result will be a
+ * negative errno value.
+ */
+int uds_status_to_errno(int error)
+{
+ char error_name[VDO_MAX_ERROR_NAME_SIZE];
+ char error_message[VDO_MAX_ERROR_MESSAGE_SIZE];
+
+ /* 0 is success, and negative values are already system error codes. */
+ if (likely(error <= 0))
+ return error;
+
+ if (error < 1024) {
+ /* This is probably an errno from userspace. */
+ return -error;
+ }
+
+ /* Internal UDS errors */
+ switch (error) {
+ case UDS_NO_INDEX:
+ case UDS_CORRUPT_DATA:
+ /* The index doesn't exist or can't be recovered. */
+ return -ENOENT;
+
+ case UDS_INDEX_NOT_SAVED_CLEANLY:
+ case UDS_UNSUPPORTED_VERSION:
+ /*
+ * The index exists, but can't be loaded. Tell the client it exists so they don't
+ * destroy it inadvertently.
+ */
+ return -EEXIST;
+
+ case UDS_DISABLED:
+ /* The session is unusable; only returned by requests. */
+ return -EIO;
+
+ default:
+ /* Translate an unexpected error into something generic. */
+ vdo_log_info("%s: mapping status code %d (%s: %s) to -EIO",
+ __func__, error,
+ uds_string_error_name(error, error_name,
+ sizeof(error_name)),
+ uds_string_error(error, error_message,
+ sizeof(error_message)));
+ return -EIO;
+ }
+}
+
+/*
+ * Register a block of error codes.
+ *
+ * @block_name: the name of the block of error codes
+ * @first_error: the first error code in the block
+ * @next_free_error: one past the highest possible error in the block
+ * @infos: a pointer to the error info array for the block
+ * @info_size: the size of the error info array
+ */
+int uds_register_error_block(const char *block_name, int first_error,
+ int next_free_error, const struct error_info *infos,
+ size_t info_size)
+{
+ int result;
+ struct error_block *block;
+ struct error_block new_block = {
+ .name = block_name,
+ .base = first_error,
+ .last = first_error + (info_size / sizeof(struct error_info)),
+ .max = next_free_error,
+ .infos = infos,
+ };
+
+ result = VDO_ASSERT(first_error < next_free_error,
+ "well-defined error block range");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (registered_errors.count == registered_errors.allocated) {
+ /* This should never happen. */
+ return UDS_OVERFLOW;
+ }
+
+ for (block = registered_errors.blocks;
+ block < registered_errors.blocks + registered_errors.count;
+ block++) {
+ if (strcmp(block_name, block->name) == 0)
+ return UDS_DUPLICATE_NAME;
+
+ /* Ensure error ranges do not overlap. */
+ if ((first_error < block->max) && (next_free_error > block->base))
+ return UDS_ALREADY_REGISTERED;
+ }
+
+ registered_errors.blocks[registered_errors.count++] = new_block;
+ return UDS_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/errors.h b/drivers/md/dm-vdo/errors.h
new file mode 100644
index 000000000000..24e0e745fd5f
--- /dev/null
+++ b/drivers/md/dm-vdo/errors.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_ERRORS_H
+#define UDS_ERRORS_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/* Custom error codes and error-related utilities */
+#define VDO_SUCCESS 0
+
+/* Valid status codes for internal UDS functions. */
+enum uds_status_codes {
+ /* Successful return */
+ UDS_SUCCESS = VDO_SUCCESS,
+ /* Used as a base value for reporting internal errors */
+ UDS_ERROR_CODE_BASE = 1024,
+ /* Index overflow */
+ UDS_OVERFLOW = UDS_ERROR_CODE_BASE,
+ /* Invalid argument passed to internal routine */
+ UDS_INVALID_ARGUMENT,
+ /* UDS data structures are in an invalid state */
+ UDS_BAD_STATE,
+ /* Attempt to enter the same name into an internal structure twice */
+ UDS_DUPLICATE_NAME,
+ /* An assertion failed */
+ UDS_ASSERTION_FAILED,
+ /* A request has been queued for later processing (not an error) */
+ UDS_QUEUED,
+ /* This error range has already been registered */
+ UDS_ALREADY_REGISTERED,
+ /* Attempt to read or write data outside the valid range */
+ UDS_OUT_OF_RANGE,
+ /* The index session is disabled */
+ UDS_DISABLED,
+ /* The index configuration or volume format is no longer supported */
+ UDS_UNSUPPORTED_VERSION,
+ /* Some index structure is corrupt */
+ UDS_CORRUPT_DATA,
+ /* No index state found */
+ UDS_NO_INDEX,
+ /* Attempt to access incomplete index save data */
+ UDS_INDEX_NOT_SAVED_CLEANLY,
+ /* One more than the last UDS_INTERNAL error code */
+ UDS_ERROR_CODE_LAST,
+ /* One more than the last error this block will ever use */
+ UDS_ERROR_CODE_BLOCK_END = UDS_ERROR_CODE_BASE + 440,
+};
+
+enum {
+ VDO_MAX_ERROR_NAME_SIZE = 80,
+ VDO_MAX_ERROR_MESSAGE_SIZE = 128,
+};
+
+struct error_info {
+ const char *name;
+ const char *message;
+};
+
+const char * __must_check uds_string_error(int errnum, char *buf, size_t buflen);
+
+const char *uds_string_error_name(int errnum, char *buf, size_t buflen);
+
+int uds_status_to_errno(int error);
+
+int uds_register_error_block(const char *block_name, int first_error,
+ int last_reserved_error, const struct error_info *infos,
+ size_t info_size);
+
+#endif /* UDS_ERRORS_H */
diff --git a/drivers/md/dm-vdo/flush.c b/drivers/md/dm-vdo/flush.c
new file mode 100644
index 000000000000..57e87f0d7069
--- /dev/null
+++ b/drivers/md/dm-vdo/flush.c
@@ -0,0 +1,560 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "flush.h"
+
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "admin-state.h"
+#include "completion.h"
+#include "io-submitter.h"
+#include "logical-zone.h"
+#include "slab-depot.h"
+#include "types.h"
+#include "vdo.h"
+
+struct flusher {
+ struct vdo_completion completion;
+ /* The vdo to which this flusher belongs */
+ struct vdo *vdo;
+ /* The administrative state of the flusher */
+ struct admin_state state;
+ /* The current flush generation of the vdo */
+ sequence_number_t flush_generation;
+ /* The first unacknowledged flush generation */
+ sequence_number_t first_unacknowledged_generation;
+ /* The queue of flush requests waiting to notify other threads */
+ struct vdo_wait_queue notifiers;
+ /* The queue of flush requests waiting for VIOs to complete */
+ struct vdo_wait_queue pending_flushes;
+ /* The flush generation for which notifications are being sent */
+ sequence_number_t notify_generation;
+ /* The logical zone to notify next */
+ struct logical_zone *logical_zone_to_notify;
+ /* The ID of the thread on which flush requests should be made */
+ thread_id_t thread_id;
+ /* The pool of flush requests */
+ mempool_t *flush_pool;
+ /* Bios waiting for a flush request to become available */
+ struct bio_list waiting_flush_bios;
+ /* The lock to protect the previous fields */
+ spinlock_t lock;
+ /* The rotor for selecting the bio queue for submitting flush bios */
+ zone_count_t bio_queue_rotor;
+ /* The number of flushes submitted to the current bio queue */
+ int flush_count;
+};
+
+/**
+ * assert_on_flusher_thread() - Check that we are on the flusher thread.
+ * @flusher: The flusher.
+ * @caller: The function which is asserting.
+ */
+static inline void assert_on_flusher_thread(struct flusher *flusher, const char *caller)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == flusher->thread_id),
+ "%s() called from flusher thread", caller);
+}
+
+/**
+ * as_flusher() - Convert a generic vdo_completion to a flusher.
+ * @completion: The completion to convert.
+ *
+ * Return: The completion as a flusher.
+ */
+static struct flusher *as_flusher(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_FLUSH_NOTIFICATION_COMPLETION);
+ return container_of(completion, struct flusher, completion);
+}
+
+/**
+ * completion_as_vdo_flush() - Convert a generic vdo_completion to a vdo_flush.
+ * @completion: The completion to convert.
+ *
+ * Return: The completion as a vdo_flush.
+ */
+static inline struct vdo_flush *completion_as_vdo_flush(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_FLUSH_COMPLETION);
+ return container_of(completion, struct vdo_flush, completion);
+}
+
+/**
+ * vdo_waiter_as_flush() - Convert a vdo_flush's generic wait queue entry back to the vdo_flush.
+ * @waiter: The wait queue entry to convert.
+ *
+ * Return: The wait queue entry as a vdo_flush.
+ */
+static struct vdo_flush *vdo_waiter_as_flush(struct vdo_waiter *waiter)
+{
+ return container_of(waiter, struct vdo_flush, waiter);
+}
+
+static void *allocate_flush(gfp_t gfp_mask, void *pool_data)
+{
+ struct vdo_flush *flush = NULL;
+
+ if ((gfp_mask & GFP_NOWAIT) == GFP_NOWAIT) {
+ flush = vdo_allocate_memory_nowait(sizeof(struct vdo_flush), __func__);
+ } else {
+ int result = vdo_allocate(1, struct vdo_flush, __func__, &flush);
+
+ if (result != VDO_SUCCESS)
+ vdo_log_error_strerror(result, "failed to allocate spare flush");
+ }
+
+ if (flush != NULL) {
+ struct flusher *flusher = pool_data;
+
+ vdo_initialize_completion(&flush->completion, flusher->vdo,
+ VDO_FLUSH_COMPLETION);
+ }
+
+ return flush;
+}
+
+static void free_flush(void *element, void *pool_data __always_unused)
+{
+ vdo_free(element);
+}
+
+/**
+ * vdo_make_flusher() - Make a flusher for a vdo.
+ * @vdo: The vdo which owns the flusher.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_make_flusher(struct vdo *vdo)
+{
+ int result = vdo_allocate(1, struct flusher, __func__, &vdo->flusher);
+
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo->flusher->vdo = vdo;
+ vdo->flusher->thread_id = vdo->thread_config.packer_thread;
+ vdo_set_admin_state_code(&vdo->flusher->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ vdo_initialize_completion(&vdo->flusher->completion, vdo,
+ VDO_FLUSH_NOTIFICATION_COMPLETION);
+
+ spin_lock_init(&vdo->flusher->lock);
+ bio_list_init(&vdo->flusher->waiting_flush_bios);
+ vdo->flusher->flush_pool = mempool_create(1, allocate_flush, free_flush,
+ vdo->flusher);
+ return ((vdo->flusher->flush_pool == NULL) ? -ENOMEM : VDO_SUCCESS);
+}
+
+/**
+ * vdo_free_flusher() - Free a flusher.
+ * @flusher: The flusher to free.
+ */
+void vdo_free_flusher(struct flusher *flusher)
+{
+ if (flusher == NULL)
+ return;
+
+ if (flusher->flush_pool != NULL)
+ mempool_destroy(vdo_forget(flusher->flush_pool));
+ vdo_free(flusher);
+}
+
+/**
+ * vdo_get_flusher_thread_id() - Get the ID of the thread on which flusher functions should be
+ * called.
+ * @flusher: The flusher to query.
+ *
+ * Return: The ID of the thread which handles the flusher.
+ */
+thread_id_t vdo_get_flusher_thread_id(struct flusher *flusher)
+{
+ return flusher->thread_id;
+}
+
+static void notify_flush(struct flusher *flusher);
+static void vdo_complete_flush(struct vdo_flush *flush);
+
+/**
+ * finish_notification() - Finish the notification process.
+ * @completion: The flusher completion.
+ *
+ * Finishes the notification process by checking if any flushes have completed and then starting
+ * the notification of the next flush request if one came in while the current notification was in
+ * progress. This callback is registered in flush_packer_callback().
+ */
+static void finish_notification(struct vdo_completion *completion)
+{
+ struct flusher *flusher = as_flusher(completion);
+
+ assert_on_flusher_thread(flusher, __func__);
+
+ vdo_waitq_enqueue_waiter(&flusher->pending_flushes,
+ vdo_waitq_dequeue_waiter(&flusher->notifiers));
+ vdo_complete_flushes(flusher);
+ if (vdo_waitq_has_waiters(&flusher->notifiers))
+ notify_flush(flusher);
+}
+
+/**
+ * flush_packer_callback() - Flush the packer.
+ * @completion: The flusher completion.
+ *
+ * Flushes the packer now that all of the logical and physical zones have been notified of the new
+ * flush request. This callback is registered in increment_generation().
+ */
+static void flush_packer_callback(struct vdo_completion *completion)
+{
+ struct flusher *flusher = as_flusher(completion);
+
+ vdo_increment_packer_flush_generation(flusher->vdo->packer);
+ vdo_launch_completion_callback(completion, finish_notification,
+ flusher->thread_id);
+}
+
+/**
+ * increment_generation() - Increment the flush generation in a logical zone.
+ * @completion: The flusher as a completion.
+ *
+ * If there are more logical zones, go on to the next one, otherwise, prepare the physical zones.
+ * This callback is registered both in notify_flush() and in itself.
+ */
+static void increment_generation(struct vdo_completion *completion)
+{
+ struct flusher *flusher = as_flusher(completion);
+ struct logical_zone *zone = flusher->logical_zone_to_notify;
+
+ vdo_increment_logical_zone_flush_generation(zone, flusher->notify_generation);
+ if (zone->next == NULL) {
+ vdo_launch_completion_callback(completion, flush_packer_callback,
+ flusher->thread_id);
+ return;
+ }
+
+ flusher->logical_zone_to_notify = zone->next;
+ vdo_launch_completion_callback(completion, increment_generation,
+ flusher->logical_zone_to_notify->thread_id);
+}
+
+/**
+ * notify_flush() - Launch a flush notification.
+ * @flusher: The flusher doing the notification.
+ */
+static void notify_flush(struct flusher *flusher)
+{
+ struct vdo_flush *flush =
+ vdo_waiter_as_flush(vdo_waitq_get_first_waiter(&flusher->notifiers));
+
+ flusher->notify_generation = flush->flush_generation;
+ flusher->logical_zone_to_notify = &flusher->vdo->logical_zones->zones[0];
+ flusher->completion.requeue = true;
+ vdo_launch_completion_callback(&flusher->completion, increment_generation,
+ flusher->logical_zone_to_notify->thread_id);
+}
+
+/**
+ * flush_vdo() - Start processing a flush request.
+ * @completion: A flush request (as a vdo_completion)
+ *
+ * This callback is registered in launch_flush().
+ */
+static void flush_vdo(struct vdo_completion *completion)
+{
+ struct vdo_flush *flush = completion_as_vdo_flush(completion);
+ struct flusher *flusher = completion->vdo->flusher;
+ bool may_notify;
+ int result;
+
+ assert_on_flusher_thread(flusher, __func__);
+ result = VDO_ASSERT(vdo_is_state_normal(&flusher->state),
+ "flusher is in normal operation");
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(flusher->vdo, result);
+ vdo_complete_flush(flush);
+ return;
+ }
+
+ flush->flush_generation = flusher->flush_generation++;
+ may_notify = !vdo_waitq_has_waiters(&flusher->notifiers);
+ vdo_waitq_enqueue_waiter(&flusher->notifiers, &flush->waiter);
+ if (may_notify)
+ notify_flush(flusher);
+}
+
+/**
+ * check_for_drain_complete() - Check whether the flusher has drained.
+ * @flusher: The flusher.
+ */
+static void check_for_drain_complete(struct flusher *flusher)
+{
+ bool drained;
+
+ if (!vdo_is_state_draining(&flusher->state) ||
+ vdo_waitq_has_waiters(&flusher->pending_flushes))
+ return;
+
+ spin_lock(&flusher->lock);
+ drained = bio_list_empty(&flusher->waiting_flush_bios);
+ spin_unlock(&flusher->lock);
+
+ if (drained)
+ vdo_finish_draining(&flusher->state);
+}
+
+/**
+ * vdo_complete_flushes() - Attempt to complete any flushes which might have finished.
+ * @flusher: The flusher.
+ */
+void vdo_complete_flushes(struct flusher *flusher)
+{
+ sequence_number_t oldest_active_generation = U64_MAX;
+ struct logical_zone *zone;
+
+ assert_on_flusher_thread(flusher, __func__);
+
+ for (zone = &flusher->vdo->logical_zones->zones[0]; zone != NULL; zone = zone->next)
+ oldest_active_generation =
+ min(oldest_active_generation,
+ READ_ONCE(zone->oldest_active_generation));
+
+ while (vdo_waitq_has_waiters(&flusher->pending_flushes)) {
+ struct vdo_flush *flush =
+ vdo_waiter_as_flush(vdo_waitq_get_first_waiter(&flusher->pending_flushes));
+
+ if (flush->flush_generation >= oldest_active_generation)
+ return;
+
+ VDO_ASSERT_LOG_ONLY((flush->flush_generation ==
+ flusher->first_unacknowledged_generation),
+ "acknowledged next expected flush, %llu, was: %llu",
+ (unsigned long long) flusher->first_unacknowledged_generation,
+ (unsigned long long) flush->flush_generation);
+ vdo_waitq_dequeue_waiter(&flusher->pending_flushes);
+ vdo_complete_flush(flush);
+ flusher->first_unacknowledged_generation++;
+ }
+
+ check_for_drain_complete(flusher);
+}
+
+/**
+ * vdo_dump_flusher() - Dump the flusher, in a thread-unsafe fashion.
+ * @flusher: The flusher.
+ */
+void vdo_dump_flusher(const struct flusher *flusher)
+{
+ vdo_log_info("struct flusher");
+ vdo_log_info(" flush_generation=%llu first_unacknowledged_generation=%llu",
+ (unsigned long long) flusher->flush_generation,
+ (unsigned long long) flusher->first_unacknowledged_generation);
+ vdo_log_info(" notifiers queue is %s; pending_flushes queue is %s",
+ (vdo_waitq_has_waiters(&flusher->notifiers) ? "not empty" : "empty"),
+ (vdo_waitq_has_waiters(&flusher->pending_flushes) ? "not empty" : "empty"));
+}
+
+/**
+ * initialize_flush() - Initialize a vdo_flush structure.
+ * @flush: The flush to initialize.
+ * @vdo: The vdo being flushed.
+ *
+ * Initializes a vdo_flush structure, transferring all the bios in the flusher's waiting_flush_bios
+ * list to it. The caller MUST already hold the lock.
+ */
+static void initialize_flush(struct vdo_flush *flush, struct vdo *vdo)
+{
+ bio_list_init(&flush->bios);
+ bio_list_merge(&flush->bios, &vdo->flusher->waiting_flush_bios);
+ bio_list_init(&vdo->flusher->waiting_flush_bios);
+}
+
+static void launch_flush(struct vdo_flush *flush)
+{
+ struct vdo_completion *completion = &flush->completion;
+
+ vdo_prepare_completion(completion, flush_vdo, flush_vdo,
+ completion->vdo->thread_config.packer_thread, NULL);
+ vdo_enqueue_completion(completion, VDO_DEFAULT_Q_FLUSH_PRIORITY);
+}
+
+/**
+ * vdo_launch_flush() - Function called to start processing a flush request.
+ * @vdo: The vdo.
+ * @bio: The bio containing an empty flush request.
+ *
+ * This is called when we receive an empty flush bio from the block layer, and before acknowledging
+ * a non-empty bio with the FUA flag set.
+ */
+void vdo_launch_flush(struct vdo *vdo, struct bio *bio)
+{
+ /*
+ * Try to allocate a vdo_flush to represent the flush request. If the allocation fails,
+ * we'll deal with it later.
+ */
+ struct vdo_flush *flush = mempool_alloc(vdo->flusher->flush_pool, GFP_NOWAIT);
+ struct flusher *flusher = vdo->flusher;
+ const struct admin_state_code *code = vdo_get_admin_state_code(&flusher->state);
+
+ VDO_ASSERT_LOG_ONLY(!code->quiescent, "Flushing not allowed in state %s",
+ code->name);
+
+ spin_lock(&flusher->lock);
+
+ /* We have a new bio to start. Add it to the list. */
+ bio_list_add(&flusher->waiting_flush_bios, bio);
+
+ if (flush == NULL) {
+ spin_unlock(&flusher->lock);
+ return;
+ }
+
+ /* We have flushes to start. Capture them in the vdo_flush structure. */
+ initialize_flush(flush, vdo);
+ spin_unlock(&flusher->lock);
+
+ /* Finish launching the flushes. */
+ launch_flush(flush);
+}
+
+/**
+ * release_flush() - Release a vdo_flush structure that has completed its work.
+ * @flush: The completed flush structure to re-use or free.
+ *
+ * If there are any pending flush requests whose vdo_flush allocation failed, they will be launched
+ * by immediately re-using the released vdo_flush. If there is no spare vdo_flush, the released
+ * structure will become the spare. Otherwise, the vdo_flush will be freed.
+ */
+static void release_flush(struct vdo_flush *flush)
+{
+ bool relaunch_flush;
+ struct flusher *flusher = flush->completion.vdo->flusher;
+
+ spin_lock(&flusher->lock);
+ if (bio_list_empty(&flusher->waiting_flush_bios)) {
+ relaunch_flush = false;
+ } else {
+ /* We have flushes to start. Capture them in a flush request. */
+ initialize_flush(flush, flusher->vdo);
+ relaunch_flush = true;
+ }
+ spin_unlock(&flusher->lock);
+
+ if (relaunch_flush) {
+ /* Finish launching the flushes. */
+ launch_flush(flush);
+ return;
+ }
+
+ mempool_free(flush, flusher->flush_pool);
+}
+
+/**
+ * vdo_complete_flush_callback() - Function called to complete and free a flush request, registered
+ * in vdo_complete_flush().
+ * @completion: The flush request.
+ */
+static void vdo_complete_flush_callback(struct vdo_completion *completion)
+{
+ struct vdo_flush *flush = completion_as_vdo_flush(completion);
+ struct vdo *vdo = completion->vdo;
+ struct bio *bio;
+
+ while ((bio = bio_list_pop(&flush->bios)) != NULL) {
+ /*
+ * We're not acknowledging this bio now, but we'll never touch it again, so this is
+ * the last chance to account for it.
+ */
+ vdo_count_bios(&vdo->stats.bios_acknowledged, bio);
+
+ /* Update the device, and send it on down... */
+ bio_set_dev(bio, vdo_get_backing_device(vdo));
+ atomic64_inc(&vdo->stats.flush_out);
+ submit_bio_noacct(bio);
+ }
+
+
+ /*
+ * Release the flush structure, freeing it, re-using it as the spare, or using it to launch
+ * any flushes that had to wait when allocations failed.
+ */
+ release_flush(flush);
+}
+
+/**
+ * select_bio_queue() - Select the bio queue on which to finish a flush request.
+ * @flusher: The flusher finishing the request.
+ */
+static thread_id_t select_bio_queue(struct flusher *flusher)
+{
+ struct vdo *vdo = flusher->vdo;
+ zone_count_t bio_threads = flusher->vdo->thread_config.bio_thread_count;
+ int interval;
+
+ if (bio_threads == 1)
+ return vdo->thread_config.bio_threads[0];
+
+ interval = vdo->device_config->thread_counts.bio_rotation_interval;
+ if (flusher->flush_count == interval) {
+ flusher->flush_count = 1;
+ flusher->bio_queue_rotor = ((flusher->bio_queue_rotor + 1) % bio_threads);
+ } else {
+ flusher->flush_count++;
+ }
+
+ return vdo->thread_config.bio_threads[flusher->bio_queue_rotor];
+}
+
+/**
+ * vdo_complete_flush() - Complete and free a vdo flush request.
+ * @flush: The flush request.
+ */
+static void vdo_complete_flush(struct vdo_flush *flush)
+{
+ struct vdo_completion *completion = &flush->completion;
+
+ vdo_prepare_completion(completion, vdo_complete_flush_callback,
+ vdo_complete_flush_callback,
+ select_bio_queue(completion->vdo->flusher), NULL);
+ vdo_enqueue_completion(completion, BIO_Q_FLUSH_PRIORITY);
+}
+
+/**
+ * initiate_drain() - Initiate a drain.
+ *
+ * Implements vdo_admin_initiator_fn.
+ */
+static void initiate_drain(struct admin_state *state)
+{
+ check_for_drain_complete(container_of(state, struct flusher, state));
+}
+
+/**
+ * vdo_drain_flusher() - Drain the flusher.
+ * @flusher: The flusher to drain.
+ * @completion: The completion to finish when the flusher has drained.
+ *
+ * Drains the flusher by preventing any more VIOs from entering the flusher and then flushing. The
+ * flusher will be left in the suspended state.
+ */
+void vdo_drain_flusher(struct flusher *flusher, struct vdo_completion *completion)
+{
+ assert_on_flusher_thread(flusher, __func__);
+ vdo_start_draining(&flusher->state, VDO_ADMIN_STATE_SUSPENDING, completion,
+ initiate_drain);
+}
+
+/**
+ * vdo_resume_flusher() - Resume a flusher which has been suspended.
+ * @flusher: The flusher to resume.
+ * @parent: The completion to finish when the flusher has resumed.
+ */
+void vdo_resume_flusher(struct flusher *flusher, struct vdo_completion *parent)
+{
+ assert_on_flusher_thread(flusher, __func__);
+ vdo_continue_completion(parent, vdo_resume_if_quiescent(&flusher->state));
+}
diff --git a/drivers/md/dm-vdo/flush.h b/drivers/md/dm-vdo/flush.h
new file mode 100644
index 000000000000..97252d6656e0
--- /dev/null
+++ b/drivers/md/dm-vdo/flush.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_FLUSH_H
+#define VDO_FLUSH_H
+
+#include "funnel-workqueue.h"
+#include "types.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+/* A marker for tracking which journal entries are affected by a flush request. */
+struct vdo_flush {
+ /* The completion for enqueueing this flush request. */
+ struct vdo_completion completion;
+ /* The flush bios covered by this request */
+ struct bio_list bios;
+ /* The wait queue entry for this flush */
+ struct vdo_waiter waiter;
+ /* Which flush this struct represents */
+ sequence_number_t flush_generation;
+};
+
+struct flusher;
+
+int __must_check vdo_make_flusher(struct vdo *vdo);
+
+void vdo_free_flusher(struct flusher *flusher);
+
+thread_id_t __must_check vdo_get_flusher_thread_id(struct flusher *flusher);
+
+void vdo_complete_flushes(struct flusher *flusher);
+
+void vdo_dump_flusher(const struct flusher *flusher);
+
+void vdo_launch_flush(struct vdo *vdo, struct bio *bio);
+
+void vdo_drain_flusher(struct flusher *flusher, struct vdo_completion *completion);
+
+void vdo_resume_flusher(struct flusher *flusher, struct vdo_completion *parent);
+
+#endif /* VDO_FLUSH_H */
diff --git a/drivers/md/dm-vdo/funnel-queue.c b/drivers/md/dm-vdo/funnel-queue.c
new file mode 100644
index 000000000000..a63b2f2bfd7d
--- /dev/null
+++ b/drivers/md/dm-vdo/funnel-queue.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "funnel-queue.h"
+
+#include "cpu.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+int vdo_make_funnel_queue(struct funnel_queue **queue_ptr)
+{
+ int result;
+ struct funnel_queue *queue;
+
+ result = vdo_allocate(1, struct funnel_queue, "funnel queue", &queue);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /*
+ * Initialize the stub entry and put it in the queue, establishing the invariant that
+ * queue->newest and queue->oldest are never null.
+ */
+ queue->stub.next = NULL;
+ queue->newest = &queue->stub;
+ queue->oldest = &queue->stub;
+
+ *queue_ptr = queue;
+ return VDO_SUCCESS;
+}
+
+void vdo_free_funnel_queue(struct funnel_queue *queue)
+{
+ vdo_free(queue);
+}
+
+static struct funnel_queue_entry *get_oldest(struct funnel_queue *queue)
+{
+ /*
+ * Barrier requirements: We need a read barrier between reading a "next" field pointer
+ * value and reading anything it points to. There's an accompanying barrier in
+ * vdo_funnel_queue_put() between its caller setting up the entry and making it visible.
+ */
+ struct funnel_queue_entry *oldest = queue->oldest;
+ struct funnel_queue_entry *next = READ_ONCE(oldest->next);
+
+ if (oldest == &queue->stub) {
+ /*
+ * When the oldest entry is the stub and it has no successor, the queue is
+ * logically empty.
+ */
+ if (next == NULL)
+ return NULL;
+ /*
+ * The stub entry has a successor, so the stub can be dequeued and ignored without
+ * breaking the queue invariants.
+ */
+ oldest = next;
+ queue->oldest = oldest;
+ next = READ_ONCE(oldest->next);
+ }
+
+ /*
+ * We have a non-stub candidate to dequeue. If it lacks a successor, we'll need to put the
+ * stub entry back on the queue first.
+ */
+ if (next == NULL) {
+ struct funnel_queue_entry *newest = READ_ONCE(queue->newest);
+
+ if (oldest != newest) {
+ /*
+ * Another thread has already swung queue->newest atomically, but not yet
+ * assigned previous->next. The queue is really still empty.
+ */
+ return NULL;
+ }
+
+ /*
+ * Put the stub entry back on the queue, ensuring a successor will eventually be
+ * seen.
+ */
+ vdo_funnel_queue_put(queue, &queue->stub);
+
+ /* Check again for a successor. */
+ next = READ_ONCE(oldest->next);
+ if (next == NULL) {
+ /*
+ * We lost a race with a producer who swapped queue->newest before we did,
+ * but who hasn't yet updated previous->next. Try again later.
+ */
+ return NULL;
+ }
+ }
+
+ return oldest;
+}
+
+/*
+ * Poll a queue, removing the oldest entry if the queue is not empty. This function must only be
+ * called from a single consumer thread.
+ */
+struct funnel_queue_entry *vdo_funnel_queue_poll(struct funnel_queue *queue)
+{
+ struct funnel_queue_entry *oldest = get_oldest(queue);
+
+ if (oldest == NULL)
+ return oldest;
+
+ /*
+ * Dequeue the oldest entry and return it. Only one consumer thread may call this function,
+ * so no locking, atomic operations, or fences are needed; queue->oldest is owned by the
+ * consumer and oldest->next is never used by a producer thread after it is swung from NULL
+ * to non-NULL.
+ */
+ queue->oldest = READ_ONCE(oldest->next);
+ /*
+ * Make sure the caller sees the proper stored data for this entry. Since we've already
+ * fetched the entry pointer we stored in "queue->oldest", this also ensures that on entry
+ * to the next call we'll properly see the dependent data.
+ */
+ smp_rmb();
+ /*
+ * If "oldest" is a very light-weight work item, we'll be looking for the next one very
+ * soon, so prefetch it now.
+ */
+ uds_prefetch_address(queue->oldest, true);
+ WRITE_ONCE(oldest->next, NULL);
+ return oldest;
+}
+
+/*
+ * Check whether the funnel queue is empty or not. If the queue is in a transition state with one
+ * or more entries being added such that the list view is incomplete, this function will report the
+ * queue as empty.
+ */
+bool vdo_is_funnel_queue_empty(struct funnel_queue *queue)
+{
+ return get_oldest(queue) == NULL;
+}
+
+/*
+ * Check whether the funnel queue is idle or not. If the queue has entries available to be
+ * retrieved, it is not idle. If the queue is in a transition state with one or more entries being
+ * added such that the list view is incomplete, it may not be possible to retrieve an entry with
+ * the vdo_funnel_queue_poll() function, but the queue will not be considered idle.
+ */
+bool vdo_is_funnel_queue_idle(struct funnel_queue *queue)
+{
+ /*
+ * Oldest is not the stub, so there's another entry, though if next is NULL we can't
+ * retrieve it yet.
+ */
+ if (queue->oldest != &queue->stub)
+ return false;
+
+ /*
+ * Oldest is the stub, but newest has been updated by _put(); either there's another,
+ * retrievable entry in the list, or the list is officially empty but in the intermediate
+ * state of having an entry added.
+ *
+ * Whether anything is retrievable depends on whether stub.next has been updated and become
+ * visible to us, but for idleness we don't care. And due to memory ordering in _put(), the
+ * update to newest would be visible to us at the same time or sooner.
+ */
+ if (READ_ONCE(queue->newest) != &queue->stub)
+ return false;
+
+ return true;
+}
diff --git a/drivers/md/dm-vdo/funnel-queue.h b/drivers/md/dm-vdo/funnel-queue.h
new file mode 100644
index 000000000000..bde0f1deff98
--- /dev/null
+++ b/drivers/md/dm-vdo/funnel-queue.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_FUNNEL_QUEUE_H
+#define VDO_FUNNEL_QUEUE_H
+
+#include <linux/atomic.h>
+#include <linux/cache.h>
+
+/*
+ * A funnel queue is a simple (almost) lock-free queue that accepts entries from multiple threads
+ * (multi-producer) and delivers them to a single thread (single-consumer). "Funnel" is an attempt
+ * to evoke the image of requests from more than one producer being "funneled down" to a single
+ * consumer.
+ *
+ * This is an unsynchronized but thread-safe data structure when used as intended. There is no
+ * mechanism to ensure that only one thread is consuming from the queue. If more than one thread
+ * attempts to consume from the queue, the resulting behavior is undefined. Clients must not
+ * directly access or manipulate the internals of the queue, which are only exposed for the purpose
+ * of allowing the very simple enqueue operation to be inlined.
+ *
+ * The implementation requires that a funnel_queue_entry structure (a link pointer) is embedded in
+ * the queue entries, and pointers to those structures are used exclusively by the queue. No macros
+ * are defined to template the queue, so the offset of the funnel_queue_entry in the records placed
+ * in the queue must all be the same so the client can derive their structure pointer from the
+ * entry pointer returned by vdo_funnel_queue_poll().
+ *
+ * Callers are wholly responsible for allocating and freeing the entries. Entries may be freed as
+ * soon as they are returned since this queue is not susceptible to the "ABA problem" present in
+ * many lock-free data structures. The queue is dynamically allocated to ensure cache-line
+ * alignment, but no other dynamic allocation is used.
+ *
+ * The algorithm is not actually 100% lock-free. There is a single point in vdo_funnel_queue_put()
+ * at which a preempted producer will prevent the consumers from seeing items added to the queue by
+ * later producers, and only if the queue is short enough or the consumer fast enough for it to
+ * reach what was the end of the queue at the time of the preemption.
+ *
+ * The consumer function, vdo_funnel_queue_poll(), will return NULL when the queue is empty. To
+ * wait for data to consume, spin (if safe) or combine the queue with a struct event_count to
+ * signal the presence of new entries.
+ */
+
+/* This queue link structure must be embedded in client entries. */
+struct funnel_queue_entry {
+ /* The next (newer) entry in the queue. */
+ struct funnel_queue_entry *next;
+};
+
+/*
+ * The dynamically allocated queue structure, which is allocated on a cache line boundary so the
+ * producer and consumer fields in the structure will land on separate cache lines. This should be
+ * consider opaque but it is exposed here so vdo_funnel_queue_put() can be inlined.
+ */
+struct __aligned(L1_CACHE_BYTES) funnel_queue {
+ /*
+ * The producers' end of the queue, an atomically exchanged pointer that will never be
+ * NULL.
+ */
+ struct funnel_queue_entry *newest;
+
+ /* The consumer's end of the queue, which is owned by the consumer and never NULL. */
+ struct funnel_queue_entry *oldest __aligned(L1_CACHE_BYTES);
+
+ /* A dummy entry used to provide the non-NULL invariants above. */
+ struct funnel_queue_entry stub;
+};
+
+int __must_check vdo_make_funnel_queue(struct funnel_queue **queue_ptr);
+
+void vdo_free_funnel_queue(struct funnel_queue *queue);
+
+/*
+ * Put an entry on the end of the queue.
+ *
+ * The entry pointer must be to the struct funnel_queue_entry embedded in the caller's data
+ * structure. The caller must be able to derive the address of the start of their data structure
+ * from the pointer that passed in here, so every entry in the queue must have the struct
+ * funnel_queue_entry at the same offset within the client's structure.
+ */
+static inline void vdo_funnel_queue_put(struct funnel_queue *queue,
+ struct funnel_queue_entry *entry)
+{
+ struct funnel_queue_entry *previous;
+
+ /*
+ * Barrier requirements: All stores relating to the entry ("next" pointer, containing data
+ * structure fields) must happen before the previous->next store making it visible to the
+ * consumer. Also, the entry's "next" field initialization to NULL must happen before any
+ * other producer threads can see the entry (the xchg) and try to update the "next" field.
+ *
+ * xchg implements a full barrier.
+ */
+ WRITE_ONCE(entry->next, NULL);
+ previous = xchg(&queue->newest, entry);
+ /*
+ * Preemptions between these two statements hide the rest of the queue from the consumer,
+ * preventing consumption until the following assignment runs.
+ */
+ WRITE_ONCE(previous->next, entry);
+}
+
+struct funnel_queue_entry *__must_check vdo_funnel_queue_poll(struct funnel_queue *queue);
+
+bool __must_check vdo_is_funnel_queue_empty(struct funnel_queue *queue);
+
+bool __must_check vdo_is_funnel_queue_idle(struct funnel_queue *queue);
+
+#endif /* VDO_FUNNEL_QUEUE_H */
diff --git a/drivers/md/dm-vdo/funnel-workqueue.c b/drivers/md/dm-vdo/funnel-workqueue.c
new file mode 100644
index 000000000000..ae11941c90a9
--- /dev/null
+++ b/drivers/md/dm-vdo/funnel-workqueue.c
@@ -0,0 +1,638 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "funnel-workqueue.h"
+
+#include <linux/atomic.h>
+#include <linux/cache.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+
+#include "funnel-queue.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+#include "completion.h"
+#include "status-codes.h"
+
+static DEFINE_PER_CPU(unsigned int, service_queue_rotor);
+
+/**
+ * DOC: Work queue definition.
+ *
+ * There are two types of work queues: simple, with one worker thread, and round-robin, which uses
+ * a group of the former to do the work, and assigns work to them in round-robin fashion (roughly).
+ * Externally, both are represented via the same common sub-structure, though there's actually not
+ * a great deal of overlap between the two types internally.
+ */
+struct vdo_work_queue {
+ /* Name of just the work queue (e.g., "cpuQ12") */
+ char *name;
+ bool round_robin_mode;
+ struct vdo_thread *owner;
+ /* Life cycle functions, etc */
+ const struct vdo_work_queue_type *type;
+};
+
+struct simple_work_queue {
+ struct vdo_work_queue common;
+ struct funnel_queue *priority_lists[VDO_WORK_Q_MAX_PRIORITY + 1];
+ void *private;
+
+ /*
+ * The fields above are unchanged after setup but often read, and are good candidates for
+ * caching -- and if the max priority is 2, just fit in one x86-64 cache line if aligned.
+ * The fields below are often modified as we sleep and wake, so we want a separate cache
+ * line for performance.
+ */
+
+ /* Any (0 or 1) worker threads waiting for new work to do */
+ wait_queue_head_t waiting_worker_threads ____cacheline_aligned;
+ /* Hack to reduce wakeup calls if the worker thread is running */
+ atomic_t idle;
+
+ /* These are infrequently used so in terms of performance we don't care where they land. */
+ struct task_struct *thread;
+ /* Notify creator once worker has initialized */
+ struct completion *started;
+};
+
+struct round_robin_work_queue {
+ struct vdo_work_queue common;
+ struct simple_work_queue **service_queues;
+ unsigned int num_service_queues;
+};
+
+static inline struct simple_work_queue *as_simple_work_queue(struct vdo_work_queue *queue)
+{
+ return ((queue == NULL) ?
+ NULL : container_of(queue, struct simple_work_queue, common));
+}
+
+static inline struct round_robin_work_queue *as_round_robin_work_queue(struct vdo_work_queue *queue)
+{
+ return ((queue == NULL) ?
+ NULL :
+ container_of(queue, struct round_robin_work_queue, common));
+}
+
+/* Processing normal completions. */
+
+/*
+ * Dequeue and return the next waiting completion, if any.
+ *
+ * We scan the funnel queues from highest priority to lowest, once; there is therefore a race
+ * condition where a high-priority completion can be enqueued followed by a lower-priority one, and
+ * we'll grab the latter (but we'll catch the high-priority item on the next call). If strict
+ * enforcement of priorities becomes necessary, this function will need fixing.
+ */
+static struct vdo_completion *poll_for_completion(struct simple_work_queue *queue)
+{
+ int i;
+
+ for (i = queue->common.type->max_priority; i >= 0; i--) {
+ struct funnel_queue_entry *link = vdo_funnel_queue_poll(queue->priority_lists[i]);
+
+ if (link != NULL)
+ return container_of(link, struct vdo_completion, work_queue_entry_link);
+ }
+
+ return NULL;
+}
+
+static void enqueue_work_queue_completion(struct simple_work_queue *queue,
+ struct vdo_completion *completion)
+{
+ VDO_ASSERT_LOG_ONLY(completion->my_queue == NULL,
+ "completion %px (fn %px) to enqueue (%px) is not already queued (%px)",
+ completion, completion->callback, queue, completion->my_queue);
+ if (completion->priority == VDO_WORK_Q_DEFAULT_PRIORITY)
+ completion->priority = queue->common.type->default_priority;
+
+ if (VDO_ASSERT(completion->priority <= queue->common.type->max_priority,
+ "priority is in range for queue") != VDO_SUCCESS)
+ completion->priority = 0;
+
+ completion->my_queue = &queue->common;
+
+ /* Funnel queue handles the synchronization for the put. */
+ vdo_funnel_queue_put(queue->priority_lists[completion->priority],
+ &completion->work_queue_entry_link);
+
+ /*
+ * Due to how funnel queue synchronization is handled (just atomic operations), the
+ * simplest safe implementation here would be to wake-up any waiting threads after
+ * enqueueing each item. Even if the funnel queue is not empty at the time of adding an
+ * item to the queue, the consumer thread may not see this since it is not guaranteed to
+ * have the same view of the queue as a producer thread.
+ *
+ * However, the above is wasteful so instead we attempt to minimize the number of thread
+ * wakeups. Using an idle flag, and careful ordering using memory barriers, we should be
+ * able to determine when the worker thread might be asleep or going to sleep. We use
+ * cmpxchg to try to take ownership (vs other producer threads) of the responsibility for
+ * waking the worker thread, so multiple wakeups aren't tried at once.
+ *
+ * This was tuned for some x86 boxes that were handy; it's untested whether doing the read
+ * first is any better or worse for other platforms, even other x86 configurations.
+ */
+ smp_mb();
+ if ((atomic_read(&queue->idle) != 1) || (atomic_cmpxchg(&queue->idle, 1, 0) != 1))
+ return;
+
+ /* There's a maximum of one thread in this list. */
+ wake_up(&queue->waiting_worker_threads);
+}
+
+static void run_start_hook(struct simple_work_queue *queue)
+{
+ if (queue->common.type->start != NULL)
+ queue->common.type->start(queue->private);
+}
+
+static void run_finish_hook(struct simple_work_queue *queue)
+{
+ if (queue->common.type->finish != NULL)
+ queue->common.type->finish(queue->private);
+}
+
+/*
+ * Wait for the next completion to process, or until kthread_should_stop indicates that it's time
+ * for us to shut down.
+ *
+ * If kthread_should_stop says it's time to stop but we have pending completions return a
+ * completion.
+ *
+ * Also update statistics relating to scheduler interactions.
+ */
+static struct vdo_completion *wait_for_next_completion(struct simple_work_queue *queue)
+{
+ struct vdo_completion *completion;
+ DEFINE_WAIT(wait);
+
+ while (true) {
+ prepare_to_wait(&queue->waiting_worker_threads, &wait,
+ TASK_INTERRUPTIBLE);
+ /*
+ * Don't set the idle flag until a wakeup will not be lost.
+ *
+ * Force synchronization between setting the idle flag and checking the funnel
+ * queue; the producer side will do them in the reverse order. (There's still a
+ * race condition we've chosen to allow, because we've got a timeout below that
+ * unwedges us if we hit it, but this may narrow the window a little.)
+ */
+ atomic_set(&queue->idle, 1);
+ smp_mb(); /* store-load barrier between "idle" and funnel queue */
+
+ completion = poll_for_completion(queue);
+ if (completion != NULL)
+ break;
+
+ /*
+ * We need to check for thread-stop after setting TASK_INTERRUPTIBLE state up
+ * above. Otherwise, schedule() will put the thread to sleep and might miss a
+ * wakeup from kthread_stop() call in vdo_finish_work_queue().
+ */
+ if (kthread_should_stop())
+ break;
+
+ schedule();
+
+ /*
+ * Most of the time when we wake, it should be because there's work to do. If it
+ * was a spurious wakeup, continue looping.
+ */
+ completion = poll_for_completion(queue);
+ if (completion != NULL)
+ break;
+ }
+
+ finish_wait(&queue->waiting_worker_threads, &wait);
+ atomic_set(&queue->idle, 0);
+
+ return completion;
+}
+
+static void process_completion(struct simple_work_queue *queue,
+ struct vdo_completion *completion)
+{
+ if (VDO_ASSERT(completion->my_queue == &queue->common,
+ "completion %px from queue %px marked as being in this queue (%px)",
+ completion, queue, completion->my_queue) == VDO_SUCCESS)
+ completion->my_queue = NULL;
+
+ vdo_run_completion(completion);
+}
+
+static void service_work_queue(struct simple_work_queue *queue)
+{
+ run_start_hook(queue);
+
+ while (true) {
+ struct vdo_completion *completion = poll_for_completion(queue);
+
+ if (completion == NULL)
+ completion = wait_for_next_completion(queue);
+
+ if (completion == NULL) {
+ /* No completions but kthread_should_stop() was triggered. */
+ break;
+ }
+
+ process_completion(queue, completion);
+
+ /*
+ * Be friendly to a CPU that has other work to do, if the kernel has told us to.
+ * This speeds up some performance tests; that "other work" might include other VDO
+ * threads.
+ */
+ if (need_resched())
+ cond_resched();
+ }
+
+ run_finish_hook(queue);
+}
+
+static int work_queue_runner(void *ptr)
+{
+ struct simple_work_queue *queue = ptr;
+
+ complete(queue->started);
+ service_work_queue(queue);
+ return 0;
+}
+
+/* Creation & teardown */
+
+static void free_simple_work_queue(struct simple_work_queue *queue)
+{
+ unsigned int i;
+
+ for (i = 0; i <= VDO_WORK_Q_MAX_PRIORITY; i++)
+ vdo_free_funnel_queue(queue->priority_lists[i]);
+ vdo_free(queue->common.name);
+ vdo_free(queue);
+}
+
+static void free_round_robin_work_queue(struct round_robin_work_queue *queue)
+{
+ struct simple_work_queue **queue_table = queue->service_queues;
+ unsigned int count = queue->num_service_queues;
+ unsigned int i;
+
+ queue->service_queues = NULL;
+
+ for (i = 0; i < count; i++)
+ free_simple_work_queue(queue_table[i]);
+ vdo_free(queue_table);
+ vdo_free(queue->common.name);
+ vdo_free(queue);
+}
+
+void vdo_free_work_queue(struct vdo_work_queue *queue)
+{
+ if (queue == NULL)
+ return;
+
+ vdo_finish_work_queue(queue);
+
+ if (queue->round_robin_mode)
+ free_round_robin_work_queue(as_round_robin_work_queue(queue));
+ else
+ free_simple_work_queue(as_simple_work_queue(queue));
+}
+
+static int make_simple_work_queue(const char *thread_name_prefix, const char *name,
+ struct vdo_thread *owner, void *private,
+ const struct vdo_work_queue_type *type,
+ struct simple_work_queue **queue_ptr)
+{
+ DECLARE_COMPLETION_ONSTACK(started);
+ struct simple_work_queue *queue;
+ int i;
+ struct task_struct *thread = NULL;
+ int result;
+
+ VDO_ASSERT_LOG_ONLY((type->max_priority <= VDO_WORK_Q_MAX_PRIORITY),
+ "queue priority count %u within limit %u", type->max_priority,
+ VDO_WORK_Q_MAX_PRIORITY);
+
+ result = vdo_allocate(1, struct simple_work_queue, "simple work queue", &queue);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ queue->private = private;
+ queue->started = &started;
+ queue->common.type = type;
+ queue->common.owner = owner;
+ init_waitqueue_head(&queue->waiting_worker_threads);
+
+ result = vdo_duplicate_string(name, "queue name", &queue->common.name);
+ if (result != VDO_SUCCESS) {
+ vdo_free(queue);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i <= type->max_priority; i++) {
+ result = vdo_make_funnel_queue(&queue->priority_lists[i]);
+ if (result != VDO_SUCCESS) {
+ free_simple_work_queue(queue);
+ return result;
+ }
+ }
+
+ thread = kthread_run(work_queue_runner, queue, "%s:%s", thread_name_prefix,
+ queue->common.name);
+ if (IS_ERR(thread)) {
+ free_simple_work_queue(queue);
+ return (int) PTR_ERR(thread);
+ }
+
+ queue->thread = thread;
+
+ /*
+ * If we don't wait to ensure the thread is running VDO code, a quick kthread_stop (due to
+ * errors elsewhere) could cause it to never get as far as running VDO, skipping the
+ * cleanup code.
+ *
+ * Eventually we should just make that path safe too, and then we won't need this
+ * synchronization.
+ */
+ wait_for_completion(&started);
+
+ *queue_ptr = queue;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_make_work_queue() - Create a work queue; if multiple threads are requested, completions will
+ * be distributed to them in round-robin fashion.
+ *
+ * Each queue is associated with a struct vdo_thread which has a single vdo thread id. Regardless
+ * of the actual number of queues and threads allocated here, code outside of the queue
+ * implementation will treat this as a single zone.
+ */
+int vdo_make_work_queue(const char *thread_name_prefix, const char *name,
+ struct vdo_thread *owner, const struct vdo_work_queue_type *type,
+ unsigned int thread_count, void *thread_privates[],
+ struct vdo_work_queue **queue_ptr)
+{
+ struct round_robin_work_queue *queue;
+ int result;
+ char thread_name[TASK_COMM_LEN];
+ unsigned int i;
+
+ if (thread_count == 1) {
+ struct simple_work_queue *simple_queue;
+ void *context = ((thread_privates != NULL) ? thread_privates[0] : NULL);
+
+ result = make_simple_work_queue(thread_name_prefix, name, owner, context,
+ type, &simple_queue);
+ if (result == VDO_SUCCESS)
+ *queue_ptr = &simple_queue->common;
+ return result;
+ }
+
+ result = vdo_allocate(1, struct round_robin_work_queue, "round-robin work queue",
+ &queue);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(thread_count, struct simple_work_queue *,
+ "subordinate work queues", &queue->service_queues);
+ if (result != VDO_SUCCESS) {
+ vdo_free(queue);
+ return result;
+ }
+
+ queue->num_service_queues = thread_count;
+ queue->common.round_robin_mode = true;
+ queue->common.owner = owner;
+
+ result = vdo_duplicate_string(name, "queue name", &queue->common.name);
+ if (result != VDO_SUCCESS) {
+ vdo_free(queue->service_queues);
+ vdo_free(queue);
+ return -ENOMEM;
+ }
+
+ *queue_ptr = &queue->common;
+
+ for (i = 0; i < thread_count; i++) {
+ void *context = ((thread_privates != NULL) ? thread_privates[i] : NULL);
+
+ snprintf(thread_name, sizeof(thread_name), "%s%u", name, i);
+ result = make_simple_work_queue(thread_name_prefix, thread_name, owner,
+ context, type, &queue->service_queues[i]);
+ if (result != VDO_SUCCESS) {
+ queue->num_service_queues = i;
+ /* Destroy previously created subordinates. */
+ vdo_free_work_queue(vdo_forget(*queue_ptr));
+ return result;
+ }
+ }
+
+ return VDO_SUCCESS;
+}
+
+static void finish_simple_work_queue(struct simple_work_queue *queue)
+{
+ if (queue->thread == NULL)
+ return;
+
+ /* Tells the worker thread to shut down and waits for it to exit. */
+ kthread_stop(queue->thread);
+ queue->thread = NULL;
+}
+
+static void finish_round_robin_work_queue(struct round_robin_work_queue *queue)
+{
+ struct simple_work_queue **queue_table = queue->service_queues;
+ unsigned int count = queue->num_service_queues;
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ finish_simple_work_queue(queue_table[i]);
+}
+
+/* No enqueueing of completions should be done once this function is called. */
+void vdo_finish_work_queue(struct vdo_work_queue *queue)
+{
+ if (queue == NULL)
+ return;
+
+ if (queue->round_robin_mode)
+ finish_round_robin_work_queue(as_round_robin_work_queue(queue));
+ else
+ finish_simple_work_queue(as_simple_work_queue(queue));
+}
+
+/* Debugging dumps */
+
+static void dump_simple_work_queue(struct simple_work_queue *queue)
+{
+ const char *thread_status = "no threads";
+ char task_state_report = '-';
+
+ if (queue->thread != NULL) {
+ task_state_report = task_state_to_char(queue->thread);
+ thread_status = atomic_read(&queue->idle) ? "idle" : "running";
+ }
+
+ vdo_log_info("workQ %px (%s) %s (%c)", &queue->common, queue->common.name,
+ thread_status, task_state_report);
+
+ /* ->waiting_worker_threads wait queue status? anyone waiting? */
+}
+
+/*
+ * Write to the buffer some info about the completion, for logging. Since the common use case is
+ * dumping info about a lot of completions to syslog all at once, the format favors brevity over
+ * readability.
+ */
+void vdo_dump_work_queue(struct vdo_work_queue *queue)
+{
+ if (queue->round_robin_mode) {
+ struct round_robin_work_queue *round_robin = as_round_robin_work_queue(queue);
+ unsigned int i;
+
+ for (i = 0; i < round_robin->num_service_queues; i++)
+ dump_simple_work_queue(round_robin->service_queues[i]);
+ } else {
+ dump_simple_work_queue(as_simple_work_queue(queue));
+ }
+}
+
+static void get_function_name(void *pointer, char *buffer, size_t buffer_length)
+{
+ if (pointer == NULL) {
+ /*
+ * Format "%ps" logs a null pointer as "(null)" with a bunch of leading spaces. We
+ * sometimes use this when logging lots of data; don't be so verbose.
+ */
+ strscpy(buffer, "-", buffer_length);
+ } else {
+ /*
+ * Use a pragma to defeat gcc's format checking, which doesn't understand that
+ * "%ps" actually does support a precision spec in Linux kernel code.
+ */
+ char *space;
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat"
+ snprintf(buffer, buffer_length, "%.*ps", buffer_length - 1, pointer);
+#pragma GCC diagnostic pop
+
+ space = strchr(buffer, ' ');
+ if (space != NULL)
+ *space = '\0';
+ }
+}
+
+void vdo_dump_completion_to_buffer(struct vdo_completion *completion, char *buffer,
+ size_t length)
+{
+ size_t current_length =
+ scnprintf(buffer, length, "%.*s/", TASK_COMM_LEN,
+ (completion->my_queue == NULL ? "-" : completion->my_queue->name));
+
+ if (current_length < length - 1) {
+ get_function_name((void *) completion->callback, buffer + current_length,
+ length - current_length);
+ }
+}
+
+/* Completion submission */
+/*
+ * If the completion has a timeout that has already passed, the timeout handler function may be
+ * invoked by this function.
+ */
+void vdo_enqueue_work_queue(struct vdo_work_queue *queue,
+ struct vdo_completion *completion)
+{
+ /*
+ * Convert the provided generic vdo_work_queue to the simple_work_queue to actually queue
+ * on.
+ */
+ struct simple_work_queue *simple_queue = NULL;
+
+ if (!queue->round_robin_mode) {
+ simple_queue = as_simple_work_queue(queue);
+ } else {
+ struct round_robin_work_queue *round_robin = as_round_robin_work_queue(queue);
+
+ /*
+ * It shouldn't be a big deal if the same rotor gets used for multiple work queues.
+ * Any patterns that might develop are likely to be disrupted by random ordering of
+ * multiple completions and migration between cores, unless the load is so light as
+ * to be regular in ordering of tasks and the threads are confined to individual
+ * cores; with a load that light we won't care.
+ */
+ unsigned int rotor = this_cpu_inc_return(service_queue_rotor);
+ unsigned int index = rotor % round_robin->num_service_queues;
+
+ simple_queue = round_robin->service_queues[index];
+ }
+
+ enqueue_work_queue_completion(simple_queue, completion);
+}
+
+/* Misc */
+
+/*
+ * Return the work queue pointer recorded at initialization time in the work-queue stack handle
+ * initialized on the stack of the current thread, if any.
+ */
+static struct simple_work_queue *get_current_thread_work_queue(void)
+{
+ /*
+ * In interrupt context, if a vdo thread is what got interrupted, the calls below will find
+ * the queue for the thread which was interrupted. However, the interrupted thread may have
+ * been processing a completion, in which case starting to process another would violate
+ * our concurrency assumptions.
+ */
+ if (in_interrupt())
+ return NULL;
+
+ if (kthread_func(current) != work_queue_runner)
+ /* Not a VDO work queue thread. */
+ return NULL;
+
+ return kthread_data(current);
+}
+
+struct vdo_work_queue *vdo_get_current_work_queue(void)
+{
+ struct simple_work_queue *queue = get_current_thread_work_queue();
+
+ return (queue == NULL) ? NULL : &queue->common;
+}
+
+struct vdo_thread *vdo_get_work_queue_owner(struct vdo_work_queue *queue)
+{
+ return queue->owner;
+}
+
+/**
+ * vdo_get_work_queue_private_data() - Returns the private data for the current thread's work
+ * queue, or NULL if none or if the current thread is not a
+ * work queue thread.
+ */
+void *vdo_get_work_queue_private_data(void)
+{
+ struct simple_work_queue *queue = get_current_thread_work_queue();
+
+ return (queue != NULL) ? queue->private : NULL;
+}
+
+bool vdo_work_queue_type_is(struct vdo_work_queue *queue,
+ const struct vdo_work_queue_type *type)
+{
+ return (queue->type == type);
+}
diff --git a/drivers/md/dm-vdo/funnel-workqueue.h b/drivers/md/dm-vdo/funnel-workqueue.h
new file mode 100644
index 000000000000..b5be6e9e83bc
--- /dev/null
+++ b/drivers/md/dm-vdo/funnel-workqueue.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_WORK_QUEUE_H
+#define VDO_WORK_QUEUE_H
+
+#include <linux/sched.h> /* for TASK_COMM_LEN */
+
+#include "types.h"
+
+enum {
+ MAX_VDO_WORK_QUEUE_NAME_LEN = TASK_COMM_LEN,
+};
+
+struct vdo_work_queue_type {
+ void (*start)(void *context);
+ void (*finish)(void *context);
+ enum vdo_completion_priority max_priority;
+ enum vdo_completion_priority default_priority;
+};
+
+struct vdo_completion;
+struct vdo_thread;
+struct vdo_work_queue;
+
+int vdo_make_work_queue(const char *thread_name_prefix, const char *name,
+ struct vdo_thread *owner, const struct vdo_work_queue_type *type,
+ unsigned int thread_count, void *thread_privates[],
+ struct vdo_work_queue **queue_ptr);
+
+void vdo_enqueue_work_queue(struct vdo_work_queue *queue, struct vdo_completion *completion);
+
+void vdo_finish_work_queue(struct vdo_work_queue *queue);
+
+void vdo_free_work_queue(struct vdo_work_queue *queue);
+
+void vdo_dump_work_queue(struct vdo_work_queue *queue);
+
+void vdo_dump_completion_to_buffer(struct vdo_completion *completion, char *buffer,
+ size_t length);
+
+void *vdo_get_work_queue_private_data(void);
+struct vdo_work_queue *vdo_get_current_work_queue(void);
+struct vdo_thread *vdo_get_work_queue_owner(struct vdo_work_queue *queue);
+
+bool __must_check vdo_work_queue_type_is(struct vdo_work_queue *queue,
+ const struct vdo_work_queue_type *type);
+
+#endif /* VDO_WORK_QUEUE_H */
diff --git a/drivers/md/dm-vdo/indexer/chapter-index.c b/drivers/md/dm-vdo/indexer/chapter-index.c
new file mode 100644
index 000000000000..7e32a25d3f2f
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/chapter-index.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "chapter-index.h"
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "hash-utils.h"
+#include "indexer.h"
+
+int uds_make_open_chapter_index(struct open_chapter_index **chapter_index,
+ const struct index_geometry *geometry, u64 volume_nonce)
+{
+ int result;
+ size_t memory_size;
+ struct open_chapter_index *index;
+
+ result = vdo_allocate(1, struct open_chapter_index, "open chapter index", &index);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /*
+ * The delta index will rebalance delta lists when memory gets tight,
+ * so give the chapter index one extra page.
+ */
+ memory_size = ((geometry->index_pages_per_chapter + 1) * geometry->bytes_per_page);
+ index->geometry = geometry;
+ index->volume_nonce = volume_nonce;
+ result = uds_initialize_delta_index(&index->delta_index, 1,
+ geometry->delta_lists_per_chapter,
+ geometry->chapter_mean_delta,
+ geometry->chapter_payload_bits,
+ memory_size, 'm');
+ if (result != UDS_SUCCESS) {
+ vdo_free(index);
+ return result;
+ }
+
+ index->memory_size = index->delta_index.memory_size + sizeof(struct open_chapter_index);
+ *chapter_index = index;
+ return UDS_SUCCESS;
+}
+
+void uds_free_open_chapter_index(struct open_chapter_index *chapter_index)
+{
+ if (chapter_index == NULL)
+ return;
+
+ uds_uninitialize_delta_index(&chapter_index->delta_index);
+ vdo_free(chapter_index);
+}
+
+/* Re-initialize an open chapter index for a new chapter. */
+void uds_empty_open_chapter_index(struct open_chapter_index *chapter_index,
+ u64 virtual_chapter_number)
+{
+ uds_reset_delta_index(&chapter_index->delta_index);
+ chapter_index->virtual_chapter_number = virtual_chapter_number;
+}
+
+static inline bool was_entry_found(const struct delta_index_entry *entry, u32 address)
+{
+ return (!entry->at_end) && (entry->key == address);
+}
+
+/* Associate a record name with the record page containing its metadata. */
+int uds_put_open_chapter_index_record(struct open_chapter_index *chapter_index,
+ const struct uds_record_name *name,
+ u32 page_number)
+{
+ int result;
+ struct delta_index_entry entry;
+ u32 address;
+ u32 list_number;
+ const u8 *found_name;
+ bool found;
+ const struct index_geometry *geometry = chapter_index->geometry;
+ u64 chapter_number = chapter_index->virtual_chapter_number;
+ u32 record_pages = geometry->record_pages_per_chapter;
+
+ result = VDO_ASSERT(page_number < record_pages,
+ "Page number within chapter (%u) exceeds the maximum value %u",
+ page_number, record_pages);
+ if (result != VDO_SUCCESS)
+ return UDS_INVALID_ARGUMENT;
+
+ address = uds_hash_to_chapter_delta_address(name, geometry);
+ list_number = uds_hash_to_chapter_delta_list(name, geometry);
+ result = uds_get_delta_index_entry(&chapter_index->delta_index, list_number,
+ address, name->name, &entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ found = was_entry_found(&entry, address);
+ result = VDO_ASSERT(!(found && entry.is_collision),
+ "Chunk appears more than once in chapter %llu",
+ (unsigned long long) chapter_number);
+ if (result != VDO_SUCCESS)
+ return UDS_BAD_STATE;
+
+ found_name = (found ? name->name : NULL);
+ return uds_put_delta_index_entry(&entry, address, page_number, found_name);
+}
+
+/*
+ * Pack a section of an open chapter index into a chapter index page. A range of delta lists
+ * (starting with a specified list index) is copied from the open chapter index into a memory page.
+ * The number of lists copied onto the page is returned to the caller on success.
+ *
+ * @chapter_index: The open chapter index
+ * @memory: The memory page to use
+ * @first_list: The first delta list number to be copied
+ * @last_page: If true, this is the last page of the chapter index and all the remaining lists must
+ * be packed onto this page
+ * @lists_packed: The number of delta lists that were packed onto this page
+ */
+int uds_pack_open_chapter_index_page(struct open_chapter_index *chapter_index,
+ u8 *memory, u32 first_list, bool last_page,
+ u32 *lists_packed)
+{
+ int result;
+ struct delta_index *delta_index = &chapter_index->delta_index;
+ struct delta_index_stats stats;
+ u64 nonce = chapter_index->volume_nonce;
+ u64 chapter_number = chapter_index->virtual_chapter_number;
+ const struct index_geometry *geometry = chapter_index->geometry;
+ u32 list_count = geometry->delta_lists_per_chapter;
+ unsigned int removals = 0;
+ struct delta_index_entry entry;
+ u32 next_list;
+ s32 list_number;
+
+ for (;;) {
+ result = uds_pack_delta_index_page(delta_index, nonce, memory,
+ geometry->bytes_per_page,
+ chapter_number, first_list,
+ lists_packed);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if ((first_list + *lists_packed) == list_count) {
+ /* All lists are packed. */
+ break;
+ } else if (*lists_packed == 0) {
+ /*
+ * The next delta list does not fit on a page. This delta list will be
+ * removed.
+ */
+ } else if (last_page) {
+ /*
+ * This is the last page and there are lists left unpacked, but all of the
+ * remaining lists must fit on the page. Find a list that contains entries
+ * and remove the entire list. Try the first list that does not fit. If it
+ * is empty, we will select the last list that already fits and has any
+ * entries.
+ */
+ } else {
+ /* This page is done. */
+ break;
+ }
+
+ if (removals == 0) {
+ uds_get_delta_index_stats(delta_index, &stats);
+ vdo_log_warning("The chapter index for chapter %llu contains %llu entries with %llu collisions",
+ (unsigned long long) chapter_number,
+ (unsigned long long) stats.record_count,
+ (unsigned long long) stats.collision_count);
+ }
+
+ list_number = *lists_packed;
+ do {
+ if (list_number < 0)
+ return UDS_OVERFLOW;
+
+ next_list = first_list + list_number--,
+ result = uds_start_delta_index_search(delta_index, next_list, 0,
+ &entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_next_delta_index_entry(&entry);
+ if (result != UDS_SUCCESS)
+ return result;
+ } while (entry.at_end);
+
+ do {
+ result = uds_remove_delta_index_entry(&entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ removals++;
+ } while (!entry.at_end);
+ }
+
+ if (removals > 0) {
+ vdo_log_warning("To avoid chapter index page overflow in chapter %llu, %u entries were removed from the chapter index",
+ (unsigned long long) chapter_number, removals);
+ }
+
+ return UDS_SUCCESS;
+}
+
+/* Make a new chapter index page, initializing it with the data from a given index_page buffer. */
+int uds_initialize_chapter_index_page(struct delta_index_page *index_page,
+ const struct index_geometry *geometry,
+ u8 *page_buffer, u64 volume_nonce)
+{
+ return uds_initialize_delta_index_page(index_page, volume_nonce,
+ geometry->chapter_mean_delta,
+ geometry->chapter_payload_bits,
+ page_buffer, geometry->bytes_per_page);
+}
+
+/* Validate a chapter index page read during rebuild. */
+int uds_validate_chapter_index_page(const struct delta_index_page *index_page,
+ const struct index_geometry *geometry)
+{
+ int result;
+ const struct delta_index *delta_index = &index_page->delta_index;
+ u32 first = index_page->lowest_list_number;
+ u32 last = index_page->highest_list_number;
+ u32 list_number;
+
+ /* We walk every delta list from start to finish. */
+ for (list_number = first; list_number <= last; list_number++) {
+ struct delta_index_entry entry;
+
+ result = uds_start_delta_index_search(delta_index, list_number - first,
+ 0, &entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ for (;;) {
+ result = uds_next_delta_index_entry(&entry);
+ if (result != UDS_SUCCESS) {
+ /*
+ * A random bit stream is highly likely to arrive here when we go
+ * past the end of the delta list.
+ */
+ return result;
+ }
+
+ if (entry.at_end)
+ break;
+
+ /* Also make sure that the record page field contains a plausible value. */
+ if (uds_get_delta_entry_value(&entry) >=
+ geometry->record_pages_per_chapter) {
+ /*
+ * Do not log this as an error. It happens in normal operation when
+ * we are doing a rebuild but haven't written the entire volume
+ * once.
+ */
+ return UDS_CORRUPT_DATA;
+ }
+ }
+ }
+ return UDS_SUCCESS;
+}
+
+/*
+ * Search a chapter index page for a record name, returning the record page number that may contain
+ * the name.
+ */
+int uds_search_chapter_index_page(struct delta_index_page *index_page,
+ const struct index_geometry *geometry,
+ const struct uds_record_name *name,
+ u16 *record_page_ptr)
+{
+ int result;
+ struct delta_index *delta_index = &index_page->delta_index;
+ u32 address = uds_hash_to_chapter_delta_address(name, geometry);
+ u32 delta_list_number = uds_hash_to_chapter_delta_list(name, geometry);
+ u32 sub_list_number = delta_list_number - index_page->lowest_list_number;
+ struct delta_index_entry entry;
+
+ result = uds_get_delta_index_entry(delta_index, sub_list_number, address,
+ name->name, &entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (was_entry_found(&entry, address))
+ *record_page_ptr = uds_get_delta_entry_value(&entry);
+ else
+ *record_page_ptr = NO_CHAPTER_INDEX_ENTRY;
+
+ return UDS_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/indexer/chapter-index.h b/drivers/md/dm-vdo/indexer/chapter-index.h
new file mode 100644
index 000000000000..be8bf2b675b1
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/chapter-index.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_CHAPTER_INDEX_H
+#define UDS_CHAPTER_INDEX_H
+
+#include <linux/limits.h>
+
+#include "delta-index.h"
+#include "geometry.h"
+
+/*
+ * A chapter index for an open chapter is a mutable structure that tracks all the records that have
+ * been added to the chapter. A chapter index for a closed chapter is similar except that it is
+ * immutable because the contents of a closed chapter can never change, and the immutable structure
+ * is more efficient. Both types of chapter index are implemented with a delta index.
+ */
+
+/* The value returned when no entry is found in the chapter index. */
+#define NO_CHAPTER_INDEX_ENTRY U16_MAX
+
+struct open_chapter_index {
+ const struct index_geometry *geometry;
+ struct delta_index delta_index;
+ u64 virtual_chapter_number;
+ u64 volume_nonce;
+ size_t memory_size;
+};
+
+int __must_check uds_make_open_chapter_index(struct open_chapter_index **chapter_index,
+ const struct index_geometry *geometry,
+ u64 volume_nonce);
+
+void uds_free_open_chapter_index(struct open_chapter_index *chapter_index);
+
+void uds_empty_open_chapter_index(struct open_chapter_index *chapter_index,
+ u64 virtual_chapter_number);
+
+int __must_check uds_put_open_chapter_index_record(struct open_chapter_index *chapter_index,
+ const struct uds_record_name *name,
+ u32 page_number);
+
+int __must_check uds_pack_open_chapter_index_page(struct open_chapter_index *chapter_index,
+ u8 *memory, u32 first_list,
+ bool last_page, u32 *lists_packed);
+
+int __must_check uds_initialize_chapter_index_page(struct delta_index_page *index_page,
+ const struct index_geometry *geometry,
+ u8 *page_buffer, u64 volume_nonce);
+
+int __must_check uds_validate_chapter_index_page(const struct delta_index_page *index_page,
+ const struct index_geometry *geometry);
+
+int __must_check uds_search_chapter_index_page(struct delta_index_page *index_page,
+ const struct index_geometry *geometry,
+ const struct uds_record_name *name,
+ u16 *record_page_ptr);
+
+#endif /* UDS_CHAPTER_INDEX_H */
diff --git a/drivers/md/dm-vdo/indexer/config.c b/drivers/md/dm-vdo/indexer/config.c
new file mode 100644
index 000000000000..5532371b952f
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/config.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "config.h"
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "string-utils.h"
+#include "thread-utils.h"
+
+static const u8 INDEX_CONFIG_MAGIC[] = "ALBIC";
+static const u8 INDEX_CONFIG_VERSION_6_02[] = "06.02";
+static const u8 INDEX_CONFIG_VERSION_8_02[] = "08.02";
+
+#define DEFAULT_VOLUME_READ_THREADS 2
+#define MAX_VOLUME_READ_THREADS 16
+#define INDEX_CONFIG_MAGIC_LENGTH (sizeof(INDEX_CONFIG_MAGIC) - 1)
+#define INDEX_CONFIG_VERSION_LENGTH ((int)(sizeof(INDEX_CONFIG_VERSION_6_02) - 1))
+
+static bool is_version(const u8 *version, u8 *buffer)
+{
+ return memcmp(version, buffer, INDEX_CONFIG_VERSION_LENGTH) == 0;
+}
+
+static bool are_matching_configurations(struct uds_configuration *saved_config,
+ struct index_geometry *saved_geometry,
+ struct uds_configuration *user)
+{
+ struct index_geometry *geometry = user->geometry;
+ bool result = true;
+
+ if (saved_geometry->record_pages_per_chapter != geometry->record_pages_per_chapter) {
+ vdo_log_error("Record pages per chapter (%u) does not match (%u)",
+ saved_geometry->record_pages_per_chapter,
+ geometry->record_pages_per_chapter);
+ result = false;
+ }
+
+ if (saved_geometry->chapters_per_volume != geometry->chapters_per_volume) {
+ vdo_log_error("Chapter count (%u) does not match (%u)",
+ saved_geometry->chapters_per_volume,
+ geometry->chapters_per_volume);
+ result = false;
+ }
+
+ if (saved_geometry->sparse_chapters_per_volume != geometry->sparse_chapters_per_volume) {
+ vdo_log_error("Sparse chapter count (%u) does not match (%u)",
+ saved_geometry->sparse_chapters_per_volume,
+ geometry->sparse_chapters_per_volume);
+ result = false;
+ }
+
+ if (saved_config->cache_chapters != user->cache_chapters) {
+ vdo_log_error("Cache size (%u) does not match (%u)",
+ saved_config->cache_chapters, user->cache_chapters);
+ result = false;
+ }
+
+ if (saved_config->volume_index_mean_delta != user->volume_index_mean_delta) {
+ vdo_log_error("Volume index mean delta (%u) does not match (%u)",
+ saved_config->volume_index_mean_delta,
+ user->volume_index_mean_delta);
+ result = false;
+ }
+
+ if (saved_geometry->bytes_per_page != geometry->bytes_per_page) {
+ vdo_log_error("Bytes per page value (%zu) does not match (%zu)",
+ saved_geometry->bytes_per_page, geometry->bytes_per_page);
+ result = false;
+ }
+
+ if (saved_config->sparse_sample_rate != user->sparse_sample_rate) {
+ vdo_log_error("Sparse sample rate (%u) does not match (%u)",
+ saved_config->sparse_sample_rate,
+ user->sparse_sample_rate);
+ result = false;
+ }
+
+ if (saved_config->nonce != user->nonce) {
+ vdo_log_error("Nonce (%llu) does not match (%llu)",
+ (unsigned long long) saved_config->nonce,
+ (unsigned long long) user->nonce);
+ result = false;
+ }
+
+ return result;
+}
+
+/* Read the configuration and validate it against the provided one. */
+int uds_validate_config_contents(struct buffered_reader *reader,
+ struct uds_configuration *user_config)
+{
+ int result;
+ struct uds_configuration config;
+ struct index_geometry geometry;
+ u8 version_buffer[INDEX_CONFIG_VERSION_LENGTH];
+ u32 bytes_per_page;
+ u8 buffer[sizeof(struct uds_configuration_6_02)];
+ size_t offset = 0;
+
+ result = uds_verify_buffered_data(reader, INDEX_CONFIG_MAGIC,
+ INDEX_CONFIG_MAGIC_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_read_from_buffered_reader(reader, version_buffer,
+ INDEX_CONFIG_VERSION_LENGTH);
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "cannot read index config version");
+
+ if (!is_version(INDEX_CONFIG_VERSION_6_02, version_buffer) &&
+ !is_version(INDEX_CONFIG_VERSION_8_02, version_buffer)) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "unsupported configuration version: '%.*s'",
+ INDEX_CONFIG_VERSION_LENGTH,
+ version_buffer);
+ }
+
+ result = uds_read_from_buffered_reader(reader, buffer, sizeof(buffer));
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "cannot read config data");
+
+ decode_u32_le(buffer, &offset, &geometry.record_pages_per_chapter);
+ decode_u32_le(buffer, &offset, &geometry.chapters_per_volume);
+ decode_u32_le(buffer, &offset, &geometry.sparse_chapters_per_volume);
+ decode_u32_le(buffer, &offset, &config.cache_chapters);
+ offset += sizeof(u32);
+ decode_u32_le(buffer, &offset, &config.volume_index_mean_delta);
+ decode_u32_le(buffer, &offset, &bytes_per_page);
+ geometry.bytes_per_page = bytes_per_page;
+ decode_u32_le(buffer, &offset, &config.sparse_sample_rate);
+ decode_u64_le(buffer, &offset, &config.nonce);
+
+ result = VDO_ASSERT(offset == sizeof(struct uds_configuration_6_02),
+ "%zu bytes read but not decoded",
+ sizeof(struct uds_configuration_6_02) - offset);
+ if (result != VDO_SUCCESS)
+ return UDS_CORRUPT_DATA;
+
+ if (is_version(INDEX_CONFIG_VERSION_6_02, version_buffer)) {
+ user_config->geometry->remapped_virtual = 0;
+ user_config->geometry->remapped_physical = 0;
+ } else {
+ u8 remapping[sizeof(u64) + sizeof(u64)];
+
+ result = uds_read_from_buffered_reader(reader, remapping,
+ sizeof(remapping));
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "cannot read converted config");
+
+ offset = 0;
+ decode_u64_le(remapping, &offset,
+ &user_config->geometry->remapped_virtual);
+ decode_u64_le(remapping, &offset,
+ &user_config->geometry->remapped_physical);
+ }
+
+ if (!are_matching_configurations(&config, &geometry, user_config)) {
+ vdo_log_warning("Supplied configuration does not match save");
+ return UDS_NO_INDEX;
+ }
+
+ return UDS_SUCCESS;
+}
+
+/*
+ * Write the configuration to stable storage. If the superblock version is < 4, write the 6.02
+ * version; otherwise write the 8.02 version, indicating the configuration is for an index that has
+ * been reduced by one chapter.
+ */
+int uds_write_config_contents(struct buffered_writer *writer,
+ struct uds_configuration *config, u32 version)
+{
+ int result;
+ struct index_geometry *geometry = config->geometry;
+ u8 buffer[sizeof(struct uds_configuration_8_02)];
+ size_t offset = 0;
+
+ result = uds_write_to_buffered_writer(writer, INDEX_CONFIG_MAGIC,
+ INDEX_CONFIG_MAGIC_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ /*
+ * If version is < 4, the index has not been reduced by a chapter so it must be written out
+ * as version 6.02 so that it is still compatible with older versions of UDS.
+ */
+ if (version >= 4) {
+ result = uds_write_to_buffered_writer(writer, INDEX_CONFIG_VERSION_8_02,
+ INDEX_CONFIG_VERSION_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+ } else {
+ result = uds_write_to_buffered_writer(writer, INDEX_CONFIG_VERSION_6_02,
+ INDEX_CONFIG_VERSION_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ encode_u32_le(buffer, &offset, geometry->record_pages_per_chapter);
+ encode_u32_le(buffer, &offset, geometry->chapters_per_volume);
+ encode_u32_le(buffer, &offset, geometry->sparse_chapters_per_volume);
+ encode_u32_le(buffer, &offset, config->cache_chapters);
+ encode_u32_le(buffer, &offset, 0);
+ encode_u32_le(buffer, &offset, config->volume_index_mean_delta);
+ encode_u32_le(buffer, &offset, geometry->bytes_per_page);
+ encode_u32_le(buffer, &offset, config->sparse_sample_rate);
+ encode_u64_le(buffer, &offset, config->nonce);
+
+ result = VDO_ASSERT(offset == sizeof(struct uds_configuration_6_02),
+ "%zu bytes encoded, of %zu expected", offset,
+ sizeof(struct uds_configuration_6_02));
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (version >= 4) {
+ encode_u64_le(buffer, &offset, geometry->remapped_virtual);
+ encode_u64_le(buffer, &offset, geometry->remapped_physical);
+ }
+
+ return uds_write_to_buffered_writer(writer, buffer, offset);
+}
+
+/* Compute configuration parameters that depend on memory size. */
+static int compute_memory_sizes(uds_memory_config_size_t mem_gb, bool sparse,
+ u32 *chapters_per_volume, u32 *record_pages_per_chapter,
+ u32 *sparse_chapters_per_volume)
+{
+ u32 reduced_chapters = 0;
+ u32 base_chapters;
+
+ if (mem_gb == UDS_MEMORY_CONFIG_256MB) {
+ base_chapters = DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = SMALL_RECORD_PAGES_PER_CHAPTER;
+ } else if (mem_gb == UDS_MEMORY_CONFIG_512MB) {
+ base_chapters = DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = 2 * SMALL_RECORD_PAGES_PER_CHAPTER;
+ } else if (mem_gb == UDS_MEMORY_CONFIG_768MB) {
+ base_chapters = DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = 3 * SMALL_RECORD_PAGES_PER_CHAPTER;
+ } else if ((mem_gb >= 1) && (mem_gb <= UDS_MEMORY_CONFIG_MAX)) {
+ base_chapters = mem_gb * DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = DEFAULT_RECORD_PAGES_PER_CHAPTER;
+ } else if (mem_gb == UDS_MEMORY_CONFIG_REDUCED_256MB) {
+ reduced_chapters = 1;
+ base_chapters = DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = SMALL_RECORD_PAGES_PER_CHAPTER;
+ } else if (mem_gb == UDS_MEMORY_CONFIG_REDUCED_512MB) {
+ reduced_chapters = 1;
+ base_chapters = DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = 2 * SMALL_RECORD_PAGES_PER_CHAPTER;
+ } else if (mem_gb == UDS_MEMORY_CONFIG_REDUCED_768MB) {
+ reduced_chapters = 1;
+ base_chapters = DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = 3 * SMALL_RECORD_PAGES_PER_CHAPTER;
+ } else if ((mem_gb >= 1 + UDS_MEMORY_CONFIG_REDUCED) &&
+ (mem_gb <= UDS_MEMORY_CONFIG_REDUCED_MAX)) {
+ reduced_chapters = 1;
+ base_chapters = ((mem_gb - UDS_MEMORY_CONFIG_REDUCED) *
+ DEFAULT_CHAPTERS_PER_VOLUME);
+ *record_pages_per_chapter = DEFAULT_RECORD_PAGES_PER_CHAPTER;
+ } else {
+ vdo_log_error("received invalid memory size");
+ return -EINVAL;
+ }
+
+ if (sparse) {
+ /* Make 95% of chapters sparse, allowing 10x more records. */
+ *sparse_chapters_per_volume = (19 * base_chapters) / 2;
+ base_chapters *= 10;
+ } else {
+ *sparse_chapters_per_volume = 0;
+ }
+
+ *chapters_per_volume = base_chapters - reduced_chapters;
+ return UDS_SUCCESS;
+}
+
+static unsigned int __must_check normalize_zone_count(unsigned int requested)
+{
+ unsigned int zone_count = requested;
+
+ if (zone_count == 0)
+ zone_count = num_online_cpus() / 2;
+
+ if (zone_count < 1)
+ zone_count = 1;
+
+ if (zone_count > MAX_ZONES)
+ zone_count = MAX_ZONES;
+
+ vdo_log_info("Using %u indexing zone%s for concurrency.",
+ zone_count, zone_count == 1 ? "" : "s");
+ return zone_count;
+}
+
+static unsigned int __must_check normalize_read_threads(unsigned int requested)
+{
+ unsigned int read_threads = requested;
+
+ if (read_threads < 1)
+ read_threads = DEFAULT_VOLUME_READ_THREADS;
+
+ if (read_threads > MAX_VOLUME_READ_THREADS)
+ read_threads = MAX_VOLUME_READ_THREADS;
+
+ return read_threads;
+}
+
+int uds_make_configuration(const struct uds_parameters *params,
+ struct uds_configuration **config_ptr)
+{
+ struct uds_configuration *config;
+ u32 chapters_per_volume = 0;
+ u32 record_pages_per_chapter = 0;
+ u32 sparse_chapters_per_volume = 0;
+ int result;
+
+ result = compute_memory_sizes(params->memory_size, params->sparse,
+ &chapters_per_volume, &record_pages_per_chapter,
+ &sparse_chapters_per_volume);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = vdo_allocate(1, struct uds_configuration, __func__, &config);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_make_index_geometry(DEFAULT_BYTES_PER_PAGE, record_pages_per_chapter,
+ chapters_per_volume, sparse_chapters_per_volume,
+ 0, 0, &config->geometry);
+ if (result != UDS_SUCCESS) {
+ uds_free_configuration(config);
+ return result;
+ }
+
+ config->zone_count = normalize_zone_count(params->zone_count);
+ config->read_threads = normalize_read_threads(params->read_threads);
+
+ config->cache_chapters = DEFAULT_CACHE_CHAPTERS;
+ config->volume_index_mean_delta = DEFAULT_VOLUME_INDEX_MEAN_DELTA;
+ config->sparse_sample_rate = (params->sparse ? DEFAULT_SPARSE_SAMPLE_RATE : 0);
+ config->nonce = params->nonce;
+ config->bdev = params->bdev;
+ config->offset = params->offset;
+ config->size = params->size;
+
+ *config_ptr = config;
+ return UDS_SUCCESS;
+}
+
+void uds_free_configuration(struct uds_configuration *config)
+{
+ if (config != NULL) {
+ uds_free_index_geometry(config->geometry);
+ vdo_free(config);
+ }
+}
+
+void uds_log_configuration(struct uds_configuration *config)
+{
+ struct index_geometry *geometry = config->geometry;
+
+ vdo_log_debug("Configuration:");
+ vdo_log_debug(" Record pages per chapter: %10u", geometry->record_pages_per_chapter);
+ vdo_log_debug(" Chapters per volume: %10u", geometry->chapters_per_volume);
+ vdo_log_debug(" Sparse chapters per volume: %10u", geometry->sparse_chapters_per_volume);
+ vdo_log_debug(" Cache size (chapters): %10u", config->cache_chapters);
+ vdo_log_debug(" Volume index mean delta: %10u", config->volume_index_mean_delta);
+ vdo_log_debug(" Bytes per page: %10zu", geometry->bytes_per_page);
+ vdo_log_debug(" Sparse sample rate: %10u", config->sparse_sample_rate);
+ vdo_log_debug(" Nonce: %llu", (unsigned long long) config->nonce);
+}
diff --git a/drivers/md/dm-vdo/indexer/config.h b/drivers/md/dm-vdo/indexer/config.h
new file mode 100644
index 000000000000..08507dc2f7a1
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/config.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_CONFIG_H
+#define UDS_CONFIG_H
+
+#include "geometry.h"
+#include "indexer.h"
+#include "io-factory.h"
+
+/*
+ * The uds_configuration records a variety of parameters used to configure a new UDS index. Some
+ * parameters are provided by the client, while others are fixed or derived from user-supplied
+ * values. It is created when an index is created, and it is recorded in the index metadata.
+ */
+
+enum {
+ DEFAULT_VOLUME_INDEX_MEAN_DELTA = 4096,
+ DEFAULT_CACHE_CHAPTERS = 7,
+ DEFAULT_SPARSE_SAMPLE_RATE = 32,
+ MAX_ZONES = 16,
+};
+
+/* A set of configuration parameters for the indexer. */
+struct uds_configuration {
+ /* Storage device for the index */
+ struct block_device *bdev;
+
+ /* The maximum allowable size of the index */
+ size_t size;
+
+ /* The offset where the index should start */
+ off_t offset;
+
+ /* Parameters for the volume */
+
+ /* The volume layout */
+ struct index_geometry *geometry;
+
+ /* Index owner's nonce */
+ u64 nonce;
+
+ /* The number of threads used to process index requests */
+ unsigned int zone_count;
+
+ /* The number of threads used to read volume pages */
+ unsigned int read_threads;
+
+ /* Size of the page cache and sparse chapter index cache in chapters */
+ u32 cache_chapters;
+
+ /* Parameters for the volume index */
+
+ /* The mean delta for the volume index */
+ u32 volume_index_mean_delta;
+
+ /* Sampling rate for sparse indexing */
+ u32 sparse_sample_rate;
+};
+
+/* On-disk structure of data for a version 8.02 index. */
+struct uds_configuration_8_02 {
+ /* Smaller (16), Small (64) or large (256) indices */
+ u32 record_pages_per_chapter;
+ /* Total number of chapters per volume */
+ u32 chapters_per_volume;
+ /* Number of sparse chapters per volume */
+ u32 sparse_chapters_per_volume;
+ /* Size of the page cache, in chapters */
+ u32 cache_chapters;
+ /* Unused field */
+ u32 unused;
+ /* The volume index mean delta to use */
+ u32 volume_index_mean_delta;
+ /* Size of a page, used for both record pages and index pages */
+ u32 bytes_per_page;
+ /* Sampling rate for sparse indexing */
+ u32 sparse_sample_rate;
+ /* Index owner's nonce */
+ u64 nonce;
+ /* Virtual chapter remapped from physical chapter 0 */
+ u64 remapped_virtual;
+ /* New physical chapter which remapped chapter was moved to */
+ u64 remapped_physical;
+} __packed;
+
+/* On-disk structure of data for a version 6.02 index. */
+struct uds_configuration_6_02 {
+ /* Smaller (16), Small (64) or large (256) indices */
+ u32 record_pages_per_chapter;
+ /* Total number of chapters per volume */
+ u32 chapters_per_volume;
+ /* Number of sparse chapters per volume */
+ u32 sparse_chapters_per_volume;
+ /* Size of the page cache, in chapters */
+ u32 cache_chapters;
+ /* Unused field */
+ u32 unused;
+ /* The volume index mean delta to use */
+ u32 volume_index_mean_delta;
+ /* Size of a page, used for both record pages and index pages */
+ u32 bytes_per_page;
+ /* Sampling rate for sparse indexing */
+ u32 sparse_sample_rate;
+ /* Index owner's nonce */
+ u64 nonce;
+} __packed;
+
+int __must_check uds_make_configuration(const struct uds_parameters *params,
+ struct uds_configuration **config_ptr);
+
+void uds_free_configuration(struct uds_configuration *config);
+
+int __must_check uds_validate_config_contents(struct buffered_reader *reader,
+ struct uds_configuration *config);
+
+int __must_check uds_write_config_contents(struct buffered_writer *writer,
+ struct uds_configuration *config, u32 version);
+
+void uds_log_configuration(struct uds_configuration *config);
+
+#endif /* UDS_CONFIG_H */
diff --git a/drivers/md/dm-vdo/indexer/delta-index.c b/drivers/md/dm-vdo/indexer/delta-index.c
new file mode 100644
index 000000000000..0ac2443f0df3
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/delta-index.c
@@ -0,0 +1,1970 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+#include "delta-index.h"
+
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/compiler.h>
+#include <linux/limits.h>
+#include <linux/log2.h>
+
+#include "cpu.h"
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+#include "string-utils.h"
+#include "time-utils.h"
+
+#include "config.h"
+#include "indexer.h"
+
+/*
+ * The entries in a delta index could be stored in a single delta list, but to reduce search times
+ * and update costs it uses multiple delta lists. These lists are stored in a single chunk of
+ * memory managed by the delta_zone structure. The delta_zone can move the data around within its
+ * memory, so the location of each delta list is recorded as a bit offset into the memory. Because
+ * the volume index can contain over a million delta lists, we want to be efficient with the size
+ * of the delta list header information. This information is encoded into 16 bytes per list. The
+ * volume index delta list memory can easily exceed 4 gigabits, so a 64 bit value is needed to
+ * address the memory. The volume index delta lists average around 6 kilobits, so 16 bits are
+ * sufficient to store the size of a delta list.
+ *
+ * Each delta list is stored as a bit stream. Within the delta list encoding, bits and bytes are
+ * numbered in little endian order. Within a byte, bit 0 is the least significant bit (0x1), and
+ * bit 7 is the most significant bit (0x80). Within a bit stream, bit 7 is the most significant bit
+ * of byte 0, and bit 8 is the least significant bit of byte 1. Within a byte array, a byte's
+ * number corresponds to its index in the array.
+ *
+ * A standard delta list entry is stored as a fixed length payload (the value) followed by a
+ * variable length key (the delta). A collision entry is used when two block names have the same
+ * delta list address. A collision entry always follows a standard entry for the hash with which it
+ * collides, and is encoded with DELTA == 0 with an additional 256 bits field at the end,
+ * containing the full block name. An entry with a delta of 0 at the beginning of a delta list
+ * indicates a normal entry.
+ *
+ * The delta in each entry is encoded with a variable-length Huffman code to minimize the memory
+ * used by small deltas. The Huffman code is specified by three parameters, which can be computed
+ * from the desired mean delta when the index is full. (See compute_coding_constants() for
+ * details.)
+ *
+ * The bit field utilities used to read and write delta entries assume that it is possible to read
+ * some bytes beyond the end of the bit field, so a delta_zone memory allocation is guarded by two
+ * invalid delta lists to prevent reading outside the delta_zone memory. The valid delta lists are
+ * numbered 1 to N, and the guard lists are numbered 0 and N+1. The function to decode the bit
+ * stream include a step that skips over bits set to 0 until the first 1 bit is found. A corrupted
+ * delta list could cause this step to run off the end of the delta_zone memory, so as extra
+ * protection against this happening, the tail guard list is set to all ones.
+ *
+ * The delta_index supports two different forms. The mutable form is created by
+ * uds_initialize_delta_index(), and is used for the volume index and for open chapter indexes. The
+ * immutable form is created by uds_initialize_delta_index_page(), and is used for closed (and
+ * cached) chapter index pages. The immutable form does not allocate delta list headers or
+ * temporary offsets, and thus is somewhat more memory efficient.
+ */
+
+/*
+ * This is the largest field size supported by get_field() and set_field(). Any field that is
+ * larger is not guaranteed to fit in a single byte-aligned u32.
+ */
+#define MAX_FIELD_BITS ((sizeof(u32) - 1) * BITS_PER_BYTE + 1)
+
+/*
+ * This is the largest field size supported by get_big_field() and set_big_field(). Any field that
+ * is larger is not guaranteed to fit in a single byte-aligned u64.
+ */
+#define MAX_BIG_FIELD_BITS ((sizeof(u64) - 1) * BITS_PER_BYTE + 1)
+
+/*
+ * This is the number of guard bytes needed at the end of the memory byte array when using the bit
+ * utilities. These utilities call get_big_field() and set_big_field(), which can access up to 7
+ * bytes beyond the end of the desired field. The definition is written to make it clear how this
+ * value is derived.
+ */
+#define POST_FIELD_GUARD_BYTES (sizeof(u64) - 1)
+
+/* The number of guard bits that are needed in the tail guard list */
+#define GUARD_BITS (POST_FIELD_GUARD_BYTES * BITS_PER_BYTE)
+
+/*
+ * The maximum size of a single delta list in bytes. We count guard bytes in this value because a
+ * buffer of this size can be used with move_bits().
+ */
+#define DELTA_LIST_MAX_BYTE_COUNT \
+ ((U16_MAX + BITS_PER_BYTE) / BITS_PER_BYTE + POST_FIELD_GUARD_BYTES)
+
+/* The number of extra bytes and bits needed to store a collision entry */
+#define COLLISION_BYTES UDS_RECORD_NAME_SIZE
+#define COLLISION_BITS (COLLISION_BYTES * BITS_PER_BYTE)
+
+/*
+ * Immutable delta lists are packed into pages containing a header that encodes the delta list
+ * information into 19 bits per list (64KB bit offset).
+ */
+#define IMMUTABLE_HEADER_SIZE 19
+
+/*
+ * Constants and structures for the saved delta index. "DI" is for delta_index, and -##### is a
+ * number to increment when the format of the data changes.
+ */
+#define MAGIC_SIZE 8
+
+static const char DELTA_INDEX_MAGIC[] = "DI-00002";
+
+struct delta_index_header {
+ char magic[MAGIC_SIZE];
+ u32 zone_number;
+ u32 zone_count;
+ u32 first_list;
+ u32 list_count;
+ u64 record_count;
+ u64 collision_count;
+};
+
+/*
+ * Header data used for immutable delta index pages. This data is followed by the delta list offset
+ * table.
+ */
+struct delta_page_header {
+ /* Externally-defined nonce */
+ u64 nonce;
+ /* The virtual chapter number */
+ u64 virtual_chapter_number;
+ /* Index of the first delta list on the page */
+ u16 first_list;
+ /* Number of delta lists on the page */
+ u16 list_count;
+} __packed;
+
+static inline u64 get_delta_list_byte_start(const struct delta_list *delta_list)
+{
+ return delta_list->start / BITS_PER_BYTE;
+}
+
+static inline u16 get_delta_list_byte_size(const struct delta_list *delta_list)
+{
+ unsigned int bit_offset = delta_list->start % BITS_PER_BYTE;
+
+ return BITS_TO_BYTES(bit_offset + delta_list->size);
+}
+
+static void rebalance_delta_zone(const struct delta_zone *delta_zone, u32 first,
+ u32 last)
+{
+ struct delta_list *delta_list;
+ u64 new_start;
+
+ if (first == last) {
+ /* Only one list is moving, and we know there is space. */
+ delta_list = &delta_zone->delta_lists[first];
+ new_start = delta_zone->new_offsets[first];
+ if (delta_list->start != new_start) {
+ u64 source;
+ u64 destination;
+
+ source = get_delta_list_byte_start(delta_list);
+ delta_list->start = new_start;
+ destination = get_delta_list_byte_start(delta_list);
+ memmove(delta_zone->memory + destination,
+ delta_zone->memory + source,
+ get_delta_list_byte_size(delta_list));
+ }
+ } else {
+ /*
+ * There is more than one list. Divide the problem in half, and use recursive calls
+ * to process each half. Note that after this computation, first <= middle, and
+ * middle < last.
+ */
+ u32 middle = (first + last) / 2;
+
+ delta_list = &delta_zone->delta_lists[middle];
+ new_start = delta_zone->new_offsets[middle];
+
+ /*
+ * The direction that our middle list is moving determines which half of the
+ * problem must be processed first.
+ */
+ if (new_start > delta_list->start) {
+ rebalance_delta_zone(delta_zone, middle + 1, last);
+ rebalance_delta_zone(delta_zone, first, middle);
+ } else {
+ rebalance_delta_zone(delta_zone, first, middle);
+ rebalance_delta_zone(delta_zone, middle + 1, last);
+ }
+ }
+}
+
+static inline size_t get_zone_memory_size(unsigned int zone_count, size_t memory_size)
+{
+ /* Round up so that each zone is a multiple of 64K in size. */
+ size_t ALLOC_BOUNDARY = 64 * 1024;
+
+ return (memory_size / zone_count + ALLOC_BOUNDARY - 1) & -ALLOC_BOUNDARY;
+}
+
+void uds_reset_delta_index(const struct delta_index *delta_index)
+{
+ unsigned int z;
+
+ /*
+ * Initialize all delta lists to be empty. We keep 2 extra delta list descriptors, one
+ * before the first real entry and one after so that we don't need to bounds check the
+ * array access when calculating preceding and following gap sizes.
+ */
+ for (z = 0; z < delta_index->zone_count; z++) {
+ u64 list_bits;
+ u64 spacing;
+ u64 offset;
+ unsigned int i;
+ struct delta_zone *zone = &delta_index->delta_zones[z];
+ struct delta_list *delta_lists = zone->delta_lists;
+
+ /* Zeroing the delta list headers initializes the head guard list correctly. */
+ memset(delta_lists, 0,
+ (zone->list_count + 2) * sizeof(struct delta_list));
+
+ /* Set all the bits in the end guard list. */
+ list_bits = (u64) zone->size * BITS_PER_BYTE - GUARD_BITS;
+ delta_lists[zone->list_count + 1].start = list_bits;
+ delta_lists[zone->list_count + 1].size = GUARD_BITS;
+ memset(zone->memory + (list_bits / BITS_PER_BYTE), ~0,
+ POST_FIELD_GUARD_BYTES);
+
+ /* Evenly space out the real delta lists by setting regular offsets. */
+ spacing = list_bits / zone->list_count;
+ offset = spacing / 2;
+ for (i = 1; i <= zone->list_count; i++) {
+ delta_lists[i].start = offset;
+ offset += spacing;
+ }
+
+ /* Update the statistics. */
+ zone->discard_count += zone->record_count;
+ zone->record_count = 0;
+ zone->collision_count = 0;
+ }
+}
+
+/* Compute the Huffman coding parameters for the given mean delta. The Huffman code is specified by
+ * three parameters:
+ *
+ * MINBITS The number of bits in the smallest code
+ * BASE The number of values coded using a code of length MINBITS
+ * INCR The number of values coded by using one additional bit
+ *
+ * These parameters are related by this equation:
+ *
+ * BASE + INCR == 1 << MINBITS
+ *
+ * The math for the Huffman code of an exponential distribution says that
+ *
+ * INCR = log(2) * MEAN_DELTA
+ *
+ * Then use the smallest MINBITS value so that
+ *
+ * (1 << MINBITS) > INCR
+ *
+ * And then
+ *
+ * BASE = (1 << MINBITS) - INCR
+ *
+ * Now the index can generate a code such that
+ * - The first BASE values code using MINBITS bits.
+ * - The next INCR values code using MINBITS+1 bits.
+ * - The next INCR values code using MINBITS+2 bits.
+ * - (and so on).
+ */
+static void compute_coding_constants(u32 mean_delta, u16 *min_bits, u32 *min_keys, u32 *incr_keys)
+{
+ /*
+ * We want to compute the rounded value of log(2) * mean_delta. Since we cannot always use
+ * floating point, use a really good integer approximation.
+ */
+ *incr_keys = (836158UL * mean_delta + 603160UL) / 1206321UL;
+ *min_bits = bits_per(*incr_keys + 1);
+ *min_keys = (1 << *min_bits) - *incr_keys;
+}
+
+void uds_uninitialize_delta_index(struct delta_index *delta_index)
+{
+ unsigned int z;
+
+ if (delta_index->delta_zones == NULL)
+ return;
+
+ for (z = 0; z < delta_index->zone_count; z++) {
+ vdo_free(vdo_forget(delta_index->delta_zones[z].new_offsets));
+ vdo_free(vdo_forget(delta_index->delta_zones[z].delta_lists));
+ vdo_free(vdo_forget(delta_index->delta_zones[z].memory));
+ }
+
+ vdo_free(delta_index->delta_zones);
+ memset(delta_index, 0, sizeof(struct delta_index));
+}
+
+static int initialize_delta_zone(struct delta_zone *delta_zone, size_t size,
+ u32 first_list, u32 list_count, u32 mean_delta,
+ u32 payload_bits, u8 tag)
+{
+ int result;
+
+ result = vdo_allocate(size, u8, "delta list", &delta_zone->memory);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(list_count + 2, u64, "delta list temp",
+ &delta_zone->new_offsets);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* Allocate the delta lists. */
+ result = vdo_allocate(list_count + 2, struct delta_list, "delta lists",
+ &delta_zone->delta_lists);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ compute_coding_constants(mean_delta, &delta_zone->min_bits,
+ &delta_zone->min_keys, &delta_zone->incr_keys);
+ delta_zone->value_bits = payload_bits;
+ delta_zone->buffered_writer = NULL;
+ delta_zone->size = size;
+ delta_zone->rebalance_time = 0;
+ delta_zone->rebalance_count = 0;
+ delta_zone->record_count = 0;
+ delta_zone->collision_count = 0;
+ delta_zone->discard_count = 0;
+ delta_zone->overflow_count = 0;
+ delta_zone->first_list = first_list;
+ delta_zone->list_count = list_count;
+ delta_zone->tag = tag;
+
+ return UDS_SUCCESS;
+}
+
+int uds_initialize_delta_index(struct delta_index *delta_index, unsigned int zone_count,
+ u32 list_count, u32 mean_delta, u32 payload_bits,
+ size_t memory_size, u8 tag)
+{
+ int result;
+ unsigned int z;
+ size_t zone_memory;
+
+ result = vdo_allocate(zone_count, struct delta_zone, "Delta Index Zones",
+ &delta_index->delta_zones);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ delta_index->zone_count = zone_count;
+ delta_index->list_count = list_count;
+ delta_index->lists_per_zone = DIV_ROUND_UP(list_count, zone_count);
+ delta_index->memory_size = 0;
+ delta_index->mutable = true;
+ delta_index->tag = tag;
+
+ for (z = 0; z < zone_count; z++) {
+ u32 lists_in_zone = delta_index->lists_per_zone;
+ u32 first_list_in_zone = z * lists_in_zone;
+
+ if (z == zone_count - 1) {
+ /*
+ * The last zone gets fewer lists if zone_count doesn't evenly divide
+ * list_count. We'll have an underflow if the assertion below doesn't hold.
+ */
+ if (delta_index->list_count <= first_list_in_zone) {
+ uds_uninitialize_delta_index(delta_index);
+ return vdo_log_error_strerror(UDS_INVALID_ARGUMENT,
+ "%u delta lists not enough for %u zones",
+ list_count, zone_count);
+ }
+ lists_in_zone = delta_index->list_count - first_list_in_zone;
+ }
+
+ zone_memory = get_zone_memory_size(zone_count, memory_size);
+ result = initialize_delta_zone(&delta_index->delta_zones[z], zone_memory,
+ first_list_in_zone, lists_in_zone,
+ mean_delta, payload_bits, tag);
+ if (result != UDS_SUCCESS) {
+ uds_uninitialize_delta_index(delta_index);
+ return result;
+ }
+
+ delta_index->memory_size +=
+ (sizeof(struct delta_zone) + zone_memory +
+ (lists_in_zone + 2) * (sizeof(struct delta_list) + sizeof(u64)));
+ }
+
+ uds_reset_delta_index(delta_index);
+ return UDS_SUCCESS;
+}
+
+/* Read a bit field from an arbitrary bit boundary. */
+static inline u32 get_field(const u8 *memory, u64 offset, u8 size)
+{
+ const void *addr = memory + offset / BITS_PER_BYTE;
+
+ return (get_unaligned_le32(addr) >> (offset % BITS_PER_BYTE)) & ((1 << size) - 1);
+}
+
+/* Write a bit field to an arbitrary bit boundary. */
+static inline void set_field(u32 value, u8 *memory, u64 offset, u8 size)
+{
+ void *addr = memory + offset / BITS_PER_BYTE;
+ int shift = offset % BITS_PER_BYTE;
+ u32 data = get_unaligned_le32(addr);
+
+ data &= ~(((1 << size) - 1) << shift);
+ data |= value << shift;
+ put_unaligned_le32(data, addr);
+}
+
+/* Get the bit offset to the immutable delta list header. */
+static inline u32 get_immutable_header_offset(u32 list_number)
+{
+ return sizeof(struct delta_page_header) * BITS_PER_BYTE +
+ list_number * IMMUTABLE_HEADER_SIZE;
+}
+
+/* Get the bit offset to the start of the immutable delta list bit stream. */
+static inline u32 get_immutable_start(const u8 *memory, u32 list_number)
+{
+ return get_field(memory, get_immutable_header_offset(list_number),
+ IMMUTABLE_HEADER_SIZE);
+}
+
+/* Set the bit offset to the start of the immutable delta list bit stream. */
+static inline void set_immutable_start(u8 *memory, u32 list_number, u32 start)
+{
+ set_field(start, memory, get_immutable_header_offset(list_number),
+ IMMUTABLE_HEADER_SIZE);
+}
+
+static bool verify_delta_index_page(u64 nonce, u16 list_count, u64 expected_nonce,
+ u8 *memory, size_t memory_size)
+{
+ unsigned int i;
+
+ /*
+ * Verify the nonce. A mismatch can happen here during rebuild if we haven't written the
+ * entire volume at least once.
+ */
+ if (nonce != expected_nonce)
+ return false;
+
+ /* Verify that the number of delta lists can fit in the page. */
+ if (list_count > ((memory_size - sizeof(struct delta_page_header)) *
+ BITS_PER_BYTE / IMMUTABLE_HEADER_SIZE))
+ return false;
+
+ /*
+ * Verify that the first delta list is immediately after the last delta
+ * list header.
+ */
+ if (get_immutable_start(memory, 0) != get_immutable_header_offset(list_count + 1))
+ return false;
+
+ /* Verify that the lists are in the correct order. */
+ for (i = 0; i < list_count; i++) {
+ if (get_immutable_start(memory, i) > get_immutable_start(memory, i + 1))
+ return false;
+ }
+
+ /*
+ * Verify that the last list ends on the page, and that there is room
+ * for the post-field guard bits.
+ */
+ if (get_immutable_start(memory, list_count) >
+ (memory_size - POST_FIELD_GUARD_BYTES) * BITS_PER_BYTE)
+ return false;
+
+ /* Verify that the guard bytes are correctly set to all ones. */
+ for (i = 0; i < POST_FIELD_GUARD_BYTES; i++) {
+ if (memory[memory_size - POST_FIELD_GUARD_BYTES + i] != (u8) ~0)
+ return false;
+ }
+
+ /* All verifications passed. */
+ return true;
+}
+
+/* Initialize a delta index page to refer to a supplied page. */
+int uds_initialize_delta_index_page(struct delta_index_page *delta_index_page,
+ u64 expected_nonce, u32 mean_delta, u32 payload_bits,
+ u8 *memory, size_t memory_size)
+{
+ u64 nonce;
+ u64 vcn;
+ u64 first_list;
+ u64 list_count;
+ struct delta_page_header *header = (struct delta_page_header *) memory;
+ struct delta_zone *delta_zone = &delta_index_page->delta_zone;
+ const u8 *nonce_addr = (const u8 *) &header->nonce;
+ const u8 *vcn_addr = (const u8 *) &header->virtual_chapter_number;
+ const u8 *first_list_addr = (const u8 *) &header->first_list;
+ const u8 *list_count_addr = (const u8 *) &header->list_count;
+
+ /* First assume that the header is little endian. */
+ nonce = get_unaligned_le64(nonce_addr);
+ vcn = get_unaligned_le64(vcn_addr);
+ first_list = get_unaligned_le16(first_list_addr);
+ list_count = get_unaligned_le16(list_count_addr);
+ if (!verify_delta_index_page(nonce, list_count, expected_nonce, memory,
+ memory_size)) {
+ /* If that fails, try big endian. */
+ nonce = get_unaligned_be64(nonce_addr);
+ vcn = get_unaligned_be64(vcn_addr);
+ first_list = get_unaligned_be16(first_list_addr);
+ list_count = get_unaligned_be16(list_count_addr);
+ if (!verify_delta_index_page(nonce, list_count, expected_nonce, memory,
+ memory_size)) {
+ /*
+ * Both attempts failed. Do not log this as an error, because it can happen
+ * during a rebuild if we haven't written the entire volume at least once.
+ */
+ return UDS_CORRUPT_DATA;
+ }
+ }
+
+ delta_index_page->delta_index.delta_zones = delta_zone;
+ delta_index_page->delta_index.zone_count = 1;
+ delta_index_page->delta_index.list_count = list_count;
+ delta_index_page->delta_index.lists_per_zone = list_count;
+ delta_index_page->delta_index.mutable = false;
+ delta_index_page->delta_index.tag = 'p';
+ delta_index_page->virtual_chapter_number = vcn;
+ delta_index_page->lowest_list_number = first_list;
+ delta_index_page->highest_list_number = first_list + list_count - 1;
+
+ compute_coding_constants(mean_delta, &delta_zone->min_bits,
+ &delta_zone->min_keys, &delta_zone->incr_keys);
+ delta_zone->value_bits = payload_bits;
+ delta_zone->memory = memory;
+ delta_zone->delta_lists = NULL;
+ delta_zone->new_offsets = NULL;
+ delta_zone->buffered_writer = NULL;
+ delta_zone->size = memory_size;
+ delta_zone->rebalance_time = 0;
+ delta_zone->rebalance_count = 0;
+ delta_zone->record_count = 0;
+ delta_zone->collision_count = 0;
+ delta_zone->discard_count = 0;
+ delta_zone->overflow_count = 0;
+ delta_zone->first_list = 0;
+ delta_zone->list_count = list_count;
+ delta_zone->tag = 'p';
+
+ return UDS_SUCCESS;
+}
+
+/* Read a large bit field from an arbitrary bit boundary. */
+static inline u64 get_big_field(const u8 *memory, u64 offset, u8 size)
+{
+ const void *addr = memory + offset / BITS_PER_BYTE;
+
+ return (get_unaligned_le64(addr) >> (offset % BITS_PER_BYTE)) & ((1UL << size) - 1);
+}
+
+/* Write a large bit field to an arbitrary bit boundary. */
+static inline void set_big_field(u64 value, u8 *memory, u64 offset, u8 size)
+{
+ void *addr = memory + offset / BITS_PER_BYTE;
+ u8 shift = offset % BITS_PER_BYTE;
+ u64 data = get_unaligned_le64(addr);
+
+ data &= ~(((1UL << size) - 1) << shift);
+ data |= value << shift;
+ put_unaligned_le64(data, addr);
+}
+
+/* Set a sequence of bits to all zeros. */
+static inline void set_zero(u8 *memory, u64 offset, u32 size)
+{
+ if (size > 0) {
+ u8 *addr = memory + offset / BITS_PER_BYTE;
+ u8 shift = offset % BITS_PER_BYTE;
+ u32 count = size + shift > BITS_PER_BYTE ? (u32) BITS_PER_BYTE - shift : size;
+
+ *addr++ &= ~(((1 << count) - 1) << shift);
+ for (size -= count; size > BITS_PER_BYTE; size -= BITS_PER_BYTE)
+ *addr++ = 0;
+
+ if (size > 0)
+ *addr &= 0xFF << size;
+ }
+}
+
+/*
+ * Move several bits from a higher to a lower address, moving the lower addressed bits first. The
+ * size and memory offsets are measured in bits.
+ */
+static void move_bits_down(const u8 *from, u64 from_offset, u8 *to, u64 to_offset, u32 size)
+{
+ const u8 *source;
+ u8 *destination;
+ u8 offset;
+ u8 count;
+ u64 field;
+
+ /* Start by moving one field that ends on a to int boundary. */
+ count = (MAX_BIG_FIELD_BITS - ((to_offset + MAX_BIG_FIELD_BITS) % BITS_PER_TYPE(u32)));
+ field = get_big_field(from, from_offset, count);
+ set_big_field(field, to, to_offset, count);
+ from_offset += count;
+ to_offset += count;
+ size -= count;
+
+ /* Now do the main loop to copy 32 bit chunks that are int-aligned at the destination. */
+ offset = from_offset % BITS_PER_TYPE(u32);
+ source = from + (from_offset - offset) / BITS_PER_BYTE;
+ destination = to + to_offset / BITS_PER_BYTE;
+ while (size > MAX_BIG_FIELD_BITS) {
+ put_unaligned_le32(get_unaligned_le64(source) >> offset, destination);
+ source += sizeof(u32);
+ destination += sizeof(u32);
+ from_offset += BITS_PER_TYPE(u32);
+ to_offset += BITS_PER_TYPE(u32);
+ size -= BITS_PER_TYPE(u32);
+ }
+
+ /* Finish up by moving any remaining bits. */
+ if (size > 0) {
+ field = get_big_field(from, from_offset, size);
+ set_big_field(field, to, to_offset, size);
+ }
+}
+
+/*
+ * Move several bits from a lower to a higher address, moving the higher addressed bits first. The
+ * size and memory offsets are measured in bits.
+ */
+static void move_bits_up(const u8 *from, u64 from_offset, u8 *to, u64 to_offset, u32 size)
+{
+ const u8 *source;
+ u8 *destination;
+ u8 offset;
+ u8 count;
+ u64 field;
+
+ /* Start by moving one field that begins on a destination int boundary. */
+ count = (to_offset + size) % BITS_PER_TYPE(u32);
+ if (count > 0) {
+ size -= count;
+ field = get_big_field(from, from_offset + size, count);
+ set_big_field(field, to, to_offset + size, count);
+ }
+
+ /* Now do the main loop to copy 32 bit chunks that are int-aligned at the destination. */
+ offset = (from_offset + size) % BITS_PER_TYPE(u32);
+ source = from + (from_offset + size - offset) / BITS_PER_BYTE;
+ destination = to + (to_offset + size) / BITS_PER_BYTE;
+ while (size > MAX_BIG_FIELD_BITS) {
+ source -= sizeof(u32);
+ destination -= sizeof(u32);
+ size -= BITS_PER_TYPE(u32);
+ put_unaligned_le32(get_unaligned_le64(source) >> offset, destination);
+ }
+
+ /* Finish up by moving any remaining bits. */
+ if (size > 0) {
+ field = get_big_field(from, from_offset, size);
+ set_big_field(field, to, to_offset, size);
+ }
+}
+
+/*
+ * Move bits from one field to another. When the fields overlap, behave as if we first move all the
+ * bits from the source to a temporary value, and then move all the bits from the temporary value
+ * to the destination. The size and memory offsets are measured in bits.
+ */
+static void move_bits(const u8 *from, u64 from_offset, u8 *to, u64 to_offset, u32 size)
+{
+ u64 field;
+
+ /* A small move doesn't require special handling. */
+ if (size <= MAX_BIG_FIELD_BITS) {
+ if (size > 0) {
+ field = get_big_field(from, from_offset, size);
+ set_big_field(field, to, to_offset, size);
+ }
+
+ return;
+ }
+
+ if (from_offset > to_offset)
+ move_bits_down(from, from_offset, to, to_offset, size);
+ else
+ move_bits_up(from, from_offset, to, to_offset, size);
+}
+
+/*
+ * Pack delta lists from a mutable delta index into an immutable delta index page. A range of delta
+ * lists (starting with a specified list index) is copied from the mutable delta index into a
+ * memory page used in the immutable index. The number of lists copied onto the page is returned in
+ * list_count.
+ */
+int uds_pack_delta_index_page(const struct delta_index *delta_index, u64 header_nonce,
+ u8 *memory, size_t memory_size, u64 virtual_chapter_number,
+ u32 first_list, u32 *list_count)
+{
+ const struct delta_zone *delta_zone;
+ struct delta_list *delta_lists;
+ u32 max_lists;
+ u32 n_lists = 0;
+ u32 offset;
+ u32 i;
+ int free_bits;
+ int bits;
+ struct delta_page_header *header;
+
+ delta_zone = &delta_index->delta_zones[0];
+ delta_lists = &delta_zone->delta_lists[first_list + 1];
+ max_lists = delta_index->list_count - first_list;
+
+ /*
+ * Compute how many lists will fit on the page. Subtract the size of the fixed header, one
+ * delta list offset, and the guard bytes from the page size to determine how much space is
+ * available for delta lists.
+ */
+ free_bits = memory_size * BITS_PER_BYTE;
+ free_bits -= get_immutable_header_offset(1);
+ free_bits -= GUARD_BITS;
+ if (free_bits < IMMUTABLE_HEADER_SIZE) {
+ /* This page is too small to store any delta lists. */
+ return vdo_log_error_strerror(UDS_OVERFLOW,
+ "Chapter Index Page of %zu bytes is too small",
+ memory_size);
+ }
+
+ while (n_lists < max_lists) {
+ /* Each list requires a delta list offset and the list data. */
+ bits = IMMUTABLE_HEADER_SIZE + delta_lists[n_lists].size;
+ if (bits > free_bits)
+ break;
+
+ n_lists++;
+ free_bits -= bits;
+ }
+
+ *list_count = n_lists;
+
+ header = (struct delta_page_header *) memory;
+ put_unaligned_le64(header_nonce, (u8 *) &header->nonce);
+ put_unaligned_le64(virtual_chapter_number,
+ (u8 *) &header->virtual_chapter_number);
+ put_unaligned_le16(first_list, (u8 *) &header->first_list);
+ put_unaligned_le16(n_lists, (u8 *) &header->list_count);
+
+ /* Construct the delta list offset table. */
+ offset = get_immutable_header_offset(n_lists + 1);
+ set_immutable_start(memory, 0, offset);
+ for (i = 0; i < n_lists; i++) {
+ offset += delta_lists[i].size;
+ set_immutable_start(memory, i + 1, offset);
+ }
+
+ /* Copy the delta list data onto the memory page. */
+ for (i = 0; i < n_lists; i++) {
+ move_bits(delta_zone->memory, delta_lists[i].start, memory,
+ get_immutable_start(memory, i), delta_lists[i].size);
+ }
+
+ /* Set all the bits in the guard bytes. */
+ memset(memory + memory_size - POST_FIELD_GUARD_BYTES, ~0,
+ POST_FIELD_GUARD_BYTES);
+ return UDS_SUCCESS;
+}
+
+/* Compute the new offsets of the delta lists. */
+static void compute_new_list_offsets(struct delta_zone *delta_zone, u32 growing_index,
+ size_t growing_size, size_t used_space)
+{
+ size_t spacing;
+ u32 i;
+ struct delta_list *delta_lists = delta_zone->delta_lists;
+ u32 tail_guard_index = delta_zone->list_count + 1;
+
+ spacing = (delta_zone->size - used_space) / delta_zone->list_count;
+ delta_zone->new_offsets[0] = 0;
+ for (i = 0; i <= delta_zone->list_count; i++) {
+ delta_zone->new_offsets[i + 1] =
+ (delta_zone->new_offsets[i] +
+ get_delta_list_byte_size(&delta_lists[i]) + spacing);
+ delta_zone->new_offsets[i] *= BITS_PER_BYTE;
+ delta_zone->new_offsets[i] += delta_lists[i].start % BITS_PER_BYTE;
+ if (i == 0)
+ delta_zone->new_offsets[i + 1] -= spacing / 2;
+ if (i + 1 == growing_index)
+ delta_zone->new_offsets[i + 1] += growing_size;
+ }
+
+ delta_zone->new_offsets[tail_guard_index] =
+ (delta_zone->size * BITS_PER_BYTE - delta_lists[tail_guard_index].size);
+}
+
+static void rebalance_lists(struct delta_zone *delta_zone)
+{
+ struct delta_list *delta_lists;
+ u32 i;
+ size_t used_space = 0;
+
+ /* Extend and balance memory to receive the delta lists */
+ delta_lists = delta_zone->delta_lists;
+ for (i = 0; i <= delta_zone->list_count + 1; i++)
+ used_space += get_delta_list_byte_size(&delta_lists[i]);
+
+ compute_new_list_offsets(delta_zone, 0, 0, used_space);
+ for (i = 1; i <= delta_zone->list_count + 1; i++)
+ delta_lists[i].start = delta_zone->new_offsets[i];
+}
+
+/* Start restoring a delta index from multiple input streams. */
+int uds_start_restoring_delta_index(struct delta_index *delta_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count)
+{
+ int result;
+ unsigned int zone_count = reader_count;
+ u64 record_count = 0;
+ u64 collision_count = 0;
+ u32 first_list[MAX_ZONES];
+ u32 list_count[MAX_ZONES];
+ unsigned int z;
+ u32 list_next = 0;
+ const struct delta_zone *delta_zone;
+
+ /* Read and validate each header. */
+ for (z = 0; z < zone_count; z++) {
+ struct delta_index_header header;
+ u8 buffer[sizeof(struct delta_index_header)];
+ size_t offset = 0;
+
+ result = uds_read_from_buffered_reader(buffered_readers[z], buffer,
+ sizeof(buffer));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read delta index header");
+ }
+
+ memcpy(&header.magic, buffer, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ decode_u32_le(buffer, &offset, &header.zone_number);
+ decode_u32_le(buffer, &offset, &header.zone_count);
+ decode_u32_le(buffer, &offset, &header.first_list);
+ decode_u32_le(buffer, &offset, &header.list_count);
+ decode_u64_le(buffer, &offset, &header.record_count);
+ decode_u64_le(buffer, &offset, &header.collision_count);
+
+ result = VDO_ASSERT(offset == sizeof(struct delta_index_header),
+ "%zu bytes decoded of %zu expected", offset,
+ sizeof(struct delta_index_header));
+ if (result != VDO_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read delta index header");
+ }
+
+ if (memcmp(header.magic, DELTA_INDEX_MAGIC, MAGIC_SIZE) != 0) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "delta index file has bad magic number");
+ }
+
+ if (zone_count != header.zone_count) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "delta index files contain mismatched zone counts (%u,%u)",
+ zone_count, header.zone_count);
+ }
+
+ if (header.zone_number != z) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "delta index zone %u found in slot %u",
+ header.zone_number, z);
+ }
+
+ first_list[z] = header.first_list;
+ list_count[z] = header.list_count;
+ record_count += header.record_count;
+ collision_count += header.collision_count;
+
+ if (first_list[z] != list_next) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "delta index file for zone %u starts with list %u instead of list %u",
+ z, first_list[z], list_next);
+ }
+
+ list_next += list_count[z];
+ }
+
+ if (list_next != delta_index->list_count) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "delta index files contain %u delta lists instead of %u delta lists",
+ list_next, delta_index->list_count);
+ }
+
+ if (collision_count > record_count) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "delta index files contain %llu collisions and %llu records",
+ (unsigned long long) collision_count,
+ (unsigned long long) record_count);
+ }
+
+ uds_reset_delta_index(delta_index);
+ delta_index->delta_zones[0].record_count = record_count;
+ delta_index->delta_zones[0].collision_count = collision_count;
+
+ /* Read the delta lists and distribute them to the proper zones. */
+ for (z = 0; z < zone_count; z++) {
+ u32 i;
+
+ delta_index->load_lists[z] = 0;
+ for (i = 0; i < list_count[z]; i++) {
+ u16 delta_list_size;
+ u32 list_number;
+ unsigned int zone_number;
+ u8 size_data[sizeof(u16)];
+
+ result = uds_read_from_buffered_reader(buffered_readers[z],
+ size_data,
+ sizeof(size_data));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read delta index size");
+ }
+
+ delta_list_size = get_unaligned_le16(size_data);
+ if (delta_list_size > 0)
+ delta_index->load_lists[z] += 1;
+
+ list_number = first_list[z] + i;
+ zone_number = list_number / delta_index->lists_per_zone;
+ delta_zone = &delta_index->delta_zones[zone_number];
+ list_number -= delta_zone->first_list;
+ delta_zone->delta_lists[list_number + 1].size = delta_list_size;
+ }
+ }
+
+ /* Prepare each zone to start receiving the delta list data. */
+ for (z = 0; z < delta_index->zone_count; z++)
+ rebalance_lists(&delta_index->delta_zones[z]);
+
+ return UDS_SUCCESS;
+}
+
+static int restore_delta_list_to_zone(struct delta_zone *delta_zone,
+ const struct delta_list_save_info *save_info,
+ const u8 *data)
+{
+ struct delta_list *delta_list;
+ u16 bit_count;
+ u16 byte_count;
+ u32 list_number = save_info->index - delta_zone->first_list;
+
+ if (list_number >= delta_zone->list_count) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "invalid delta list number %u not in range [%u,%u)",
+ save_info->index, delta_zone->first_list,
+ delta_zone->first_list + delta_zone->list_count);
+ }
+
+ delta_list = &delta_zone->delta_lists[list_number + 1];
+ if (delta_list->size == 0) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "unexpected delta list number %u",
+ save_info->index);
+ }
+
+ bit_count = delta_list->size + save_info->bit_offset;
+ byte_count = BITS_TO_BYTES(bit_count);
+ if (save_info->byte_count != byte_count) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "unexpected delta list size %u != %u",
+ save_info->byte_count, byte_count);
+ }
+
+ move_bits(data, save_info->bit_offset, delta_zone->memory, delta_list->start,
+ delta_list->size);
+ return UDS_SUCCESS;
+}
+
+static int restore_delta_list_data(struct delta_index *delta_index, unsigned int load_zone,
+ struct buffered_reader *buffered_reader, u8 *data)
+{
+ int result;
+ struct delta_list_save_info save_info;
+ u8 buffer[sizeof(struct delta_list_save_info)];
+ unsigned int new_zone;
+
+ result = uds_read_from_buffered_reader(buffered_reader, buffer, sizeof(buffer));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read delta list data");
+ }
+
+ save_info = (struct delta_list_save_info) {
+ .tag = buffer[0],
+ .bit_offset = buffer[1],
+ .byte_count = get_unaligned_le16(&buffer[2]),
+ .index = get_unaligned_le32(&buffer[4]),
+ };
+
+ if ((save_info.bit_offset >= BITS_PER_BYTE) ||
+ (save_info.byte_count > DELTA_LIST_MAX_BYTE_COUNT)) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "corrupt delta list data");
+ }
+
+ /* Make sure the data is intended for this delta index. */
+ if (save_info.tag != delta_index->tag)
+ return UDS_CORRUPT_DATA;
+
+ if (save_info.index >= delta_index->list_count) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "invalid delta list number %u of %u",
+ save_info.index,
+ delta_index->list_count);
+ }
+
+ result = uds_read_from_buffered_reader(buffered_reader, data,
+ save_info.byte_count);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read delta list data");
+ }
+
+ delta_index->load_lists[load_zone] -= 1;
+ new_zone = save_info.index / delta_index->lists_per_zone;
+ return restore_delta_list_to_zone(&delta_index->delta_zones[new_zone],
+ &save_info, data);
+}
+
+/* Restore delta lists from saved data. */
+int uds_finish_restoring_delta_index(struct delta_index *delta_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count)
+{
+ int result;
+ int saved_result = UDS_SUCCESS;
+ unsigned int z;
+ u8 *data;
+
+ result = vdo_allocate(DELTA_LIST_MAX_BYTE_COUNT, u8, __func__, &data);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (z = 0; z < reader_count; z++) {
+ while (delta_index->load_lists[z] > 0) {
+ result = restore_delta_list_data(delta_index, z,
+ buffered_readers[z], data);
+ if (result != UDS_SUCCESS) {
+ saved_result = result;
+ break;
+ }
+ }
+ }
+
+ vdo_free(data);
+ return saved_result;
+}
+
+int uds_check_guard_delta_lists(struct buffered_reader **buffered_readers,
+ unsigned int reader_count)
+{
+ int result;
+ unsigned int z;
+ u8 buffer[sizeof(struct delta_list_save_info)];
+
+ for (z = 0; z < reader_count; z++) {
+ result = uds_read_from_buffered_reader(buffered_readers[z], buffer,
+ sizeof(buffer));
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (buffer[0] != 'z')
+ return UDS_CORRUPT_DATA;
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int flush_delta_list(struct delta_zone *zone, u32 flush_index)
+{
+ struct delta_list *delta_list;
+ u8 buffer[sizeof(struct delta_list_save_info)];
+ int result;
+
+ delta_list = &zone->delta_lists[flush_index + 1];
+
+ buffer[0] = zone->tag;
+ buffer[1] = delta_list->start % BITS_PER_BYTE;
+ put_unaligned_le16(get_delta_list_byte_size(delta_list), &buffer[2]);
+ put_unaligned_le32(zone->first_list + flush_index, &buffer[4]);
+
+ result = uds_write_to_buffered_writer(zone->buffered_writer, buffer,
+ sizeof(buffer));
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning_strerror(result, "failed to write delta list memory");
+ return result;
+ }
+
+ result = uds_write_to_buffered_writer(zone->buffered_writer,
+ zone->memory + get_delta_list_byte_start(delta_list),
+ get_delta_list_byte_size(delta_list));
+ if (result != UDS_SUCCESS)
+ vdo_log_warning_strerror(result, "failed to write delta list memory");
+
+ return result;
+}
+
+/* Start saving a delta index zone to a buffered output stream. */
+int uds_start_saving_delta_index(const struct delta_index *delta_index,
+ unsigned int zone_number,
+ struct buffered_writer *buffered_writer)
+{
+ int result;
+ u32 i;
+ struct delta_zone *delta_zone;
+ u8 buffer[sizeof(struct delta_index_header)];
+ size_t offset = 0;
+
+ delta_zone = &delta_index->delta_zones[zone_number];
+ memcpy(buffer, DELTA_INDEX_MAGIC, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ encode_u32_le(buffer, &offset, zone_number);
+ encode_u32_le(buffer, &offset, delta_index->zone_count);
+ encode_u32_le(buffer, &offset, delta_zone->first_list);
+ encode_u32_le(buffer, &offset, delta_zone->list_count);
+ encode_u64_le(buffer, &offset, delta_zone->record_count);
+ encode_u64_le(buffer, &offset, delta_zone->collision_count);
+
+ result = VDO_ASSERT(offset == sizeof(struct delta_index_header),
+ "%zu bytes encoded of %zu expected", offset,
+ sizeof(struct delta_index_header));
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_write_to_buffered_writer(buffered_writer, buffer, offset);
+ if (result != UDS_SUCCESS)
+ return vdo_log_warning_strerror(result,
+ "failed to write delta index header");
+
+ for (i = 0; i < delta_zone->list_count; i++) {
+ u8 data[sizeof(u16)];
+ struct delta_list *delta_list;
+
+ delta_list = &delta_zone->delta_lists[i + 1];
+ put_unaligned_le16(delta_list->size, data);
+ result = uds_write_to_buffered_writer(buffered_writer, data,
+ sizeof(data));
+ if (result != UDS_SUCCESS)
+ return vdo_log_warning_strerror(result,
+ "failed to write delta list size");
+ }
+
+ delta_zone->buffered_writer = buffered_writer;
+ return UDS_SUCCESS;
+}
+
+int uds_finish_saving_delta_index(const struct delta_index *delta_index,
+ unsigned int zone_number)
+{
+ int result;
+ int first_error = UDS_SUCCESS;
+ u32 i;
+ struct delta_zone *delta_zone;
+ struct delta_list *delta_list;
+
+ delta_zone = &delta_index->delta_zones[zone_number];
+ for (i = 0; i < delta_zone->list_count; i++) {
+ delta_list = &delta_zone->delta_lists[i + 1];
+ if (delta_list->size > 0) {
+ result = flush_delta_list(delta_zone, i);
+ if ((result != UDS_SUCCESS) && (first_error == UDS_SUCCESS))
+ first_error = result;
+ }
+ }
+
+ delta_zone->buffered_writer = NULL;
+ return first_error;
+}
+
+int uds_write_guard_delta_list(struct buffered_writer *buffered_writer)
+{
+ int result;
+ u8 buffer[sizeof(struct delta_list_save_info)];
+
+ memset(buffer, 0, sizeof(struct delta_list_save_info));
+ buffer[0] = 'z';
+
+ result = uds_write_to_buffered_writer(buffered_writer, buffer, sizeof(buffer));
+ if (result != UDS_SUCCESS)
+ vdo_log_warning_strerror(result, "failed to write guard delta list");
+
+ return UDS_SUCCESS;
+}
+
+size_t uds_compute_delta_index_save_bytes(u32 list_count, size_t memory_size)
+{
+ /* One zone will use at least as much memory as other zone counts. */
+ return (sizeof(struct delta_index_header) +
+ list_count * (sizeof(struct delta_list_save_info) + 1) +
+ get_zone_memory_size(1, memory_size));
+}
+
+static int assert_not_at_end(const struct delta_index_entry *delta_entry)
+{
+ int result = VDO_ASSERT(!delta_entry->at_end,
+ "operation is invalid because the list entry is at the end of the delta list");
+ if (result != VDO_SUCCESS)
+ result = UDS_BAD_STATE;
+
+ return result;
+}
+
+/*
+ * Prepare to search for an entry in the specified delta list.
+ *
+ * This is always the first function to be called when dealing with delta index entries. It is
+ * always followed by calls to uds_next_delta_index_entry() to iterate through a delta list. The
+ * fields of the delta_index_entry argument will be set up for iteration, but will not contain an
+ * entry from the list.
+ */
+int uds_start_delta_index_search(const struct delta_index *delta_index, u32 list_number,
+ u32 key, struct delta_index_entry *delta_entry)
+{
+ int result;
+ unsigned int zone_number;
+ struct delta_zone *delta_zone;
+ struct delta_list *delta_list;
+
+ result = VDO_ASSERT((list_number < delta_index->list_count),
+ "Delta list number (%u) is out of range (%u)", list_number,
+ delta_index->list_count);
+ if (result != VDO_SUCCESS)
+ return UDS_CORRUPT_DATA;
+
+ zone_number = list_number / delta_index->lists_per_zone;
+ delta_zone = &delta_index->delta_zones[zone_number];
+ list_number -= delta_zone->first_list;
+ result = VDO_ASSERT((list_number < delta_zone->list_count),
+ "Delta list number (%u) is out of range (%u) for zone (%u)",
+ list_number, delta_zone->list_count, zone_number);
+ if (result != VDO_SUCCESS)
+ return UDS_CORRUPT_DATA;
+
+ if (delta_index->mutable) {
+ delta_list = &delta_zone->delta_lists[list_number + 1];
+ } else {
+ u32 end_offset;
+
+ /*
+ * Translate the immutable delta list header into a temporary
+ * full delta list header.
+ */
+ delta_list = &delta_entry->temp_delta_list;
+ delta_list->start = get_immutable_start(delta_zone->memory, list_number);
+ end_offset = get_immutable_start(delta_zone->memory, list_number + 1);
+ delta_list->size = end_offset - delta_list->start;
+ delta_list->save_key = 0;
+ delta_list->save_offset = 0;
+ }
+
+ if (key > delta_list->save_key) {
+ delta_entry->key = delta_list->save_key;
+ delta_entry->offset = delta_list->save_offset;
+ } else {
+ delta_entry->key = 0;
+ delta_entry->offset = 0;
+ if (key == 0) {
+ /*
+ * This usually means we're about to walk the entire delta list, so get all
+ * of it into the CPU cache.
+ */
+ uds_prefetch_range(&delta_zone->memory[delta_list->start / BITS_PER_BYTE],
+ delta_list->size / BITS_PER_BYTE, false);
+ }
+ }
+
+ delta_entry->at_end = false;
+ delta_entry->delta_zone = delta_zone;
+ delta_entry->delta_list = delta_list;
+ delta_entry->entry_bits = 0;
+ delta_entry->is_collision = false;
+ delta_entry->list_number = list_number;
+ delta_entry->list_overflow = false;
+ delta_entry->value_bits = delta_zone->value_bits;
+ return UDS_SUCCESS;
+}
+
+static inline u64 get_delta_entry_offset(const struct delta_index_entry *delta_entry)
+{
+ return delta_entry->delta_list->start + delta_entry->offset;
+}
+
+/*
+ * Decode a delta index entry delta value. The delta_index_entry basically describes the previous
+ * list entry, and has had its offset field changed to point to the subsequent entry. We decode the
+ * bit stream and update the delta_list_entry to describe the entry.
+ */
+static inline void decode_delta(struct delta_index_entry *delta_entry)
+{
+ int key_bits;
+ u32 delta;
+ const struct delta_zone *delta_zone = delta_entry->delta_zone;
+ const u8 *memory = delta_zone->memory;
+ u64 delta_offset = get_delta_entry_offset(delta_entry) + delta_entry->value_bits;
+ const u8 *addr = memory + delta_offset / BITS_PER_BYTE;
+ int offset = delta_offset % BITS_PER_BYTE;
+ u32 data = get_unaligned_le32(addr) >> offset;
+
+ addr += sizeof(u32);
+ key_bits = delta_zone->min_bits;
+ delta = data & ((1 << key_bits) - 1);
+ if (delta >= delta_zone->min_keys) {
+ data >>= key_bits;
+ if (data == 0) {
+ key_bits = sizeof(u32) * BITS_PER_BYTE - offset;
+ while ((data = get_unaligned_le32(addr)) == 0) {
+ addr += sizeof(u32);
+ key_bits += sizeof(u32) * BITS_PER_BYTE;
+ }
+ }
+ key_bits += ffs(data);
+ delta += ((key_bits - delta_zone->min_bits - 1) * delta_zone->incr_keys);
+ }
+ delta_entry->delta = delta;
+ delta_entry->key += delta;
+
+ /* Check for a collision, a delta of zero after the start. */
+ if (unlikely((delta == 0) && (delta_entry->offset > 0))) {
+ delta_entry->is_collision = true;
+ delta_entry->entry_bits = delta_entry->value_bits + key_bits + COLLISION_BITS;
+ } else {
+ delta_entry->is_collision = false;
+ delta_entry->entry_bits = delta_entry->value_bits + key_bits;
+ }
+}
+
+noinline int uds_next_delta_index_entry(struct delta_index_entry *delta_entry)
+{
+ int result;
+ const struct delta_list *delta_list;
+ u32 next_offset;
+ u16 size;
+
+ result = assert_not_at_end(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ delta_list = delta_entry->delta_list;
+ delta_entry->offset += delta_entry->entry_bits;
+ size = delta_list->size;
+ if (unlikely(delta_entry->offset >= size)) {
+ delta_entry->at_end = true;
+ delta_entry->delta = 0;
+ delta_entry->is_collision = false;
+ result = VDO_ASSERT((delta_entry->offset == size),
+ "next offset past end of delta list");
+ if (result != VDO_SUCCESS)
+ result = UDS_CORRUPT_DATA;
+
+ return result;
+ }
+
+ decode_delta(delta_entry);
+
+ next_offset = delta_entry->offset + delta_entry->entry_bits;
+ if (next_offset > size) {
+ /*
+ * This is not an assertion because uds_validate_chapter_index_page() wants to
+ * handle this error.
+ */
+ vdo_log_warning("Decoded past the end of the delta list");
+ return UDS_CORRUPT_DATA;
+ }
+
+ return UDS_SUCCESS;
+}
+
+int uds_remember_delta_index_offset(const struct delta_index_entry *delta_entry)
+{
+ int result;
+ struct delta_list *delta_list = delta_entry->delta_list;
+
+ result = VDO_ASSERT(!delta_entry->is_collision, "entry is not a collision");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ delta_list->save_key = delta_entry->key - delta_entry->delta;
+ delta_list->save_offset = delta_entry->offset;
+ return UDS_SUCCESS;
+}
+
+static void set_delta(struct delta_index_entry *delta_entry, u32 delta)
+{
+ const struct delta_zone *delta_zone = delta_entry->delta_zone;
+ u32 key_bits = (delta_zone->min_bits +
+ ((delta_zone->incr_keys - delta_zone->min_keys + delta) /
+ delta_zone->incr_keys));
+
+ delta_entry->delta = delta;
+ delta_entry->entry_bits = delta_entry->value_bits + key_bits;
+}
+
+static void get_collision_name(const struct delta_index_entry *entry, u8 *name)
+{
+ u64 offset = get_delta_entry_offset(entry) + entry->entry_bits - COLLISION_BITS;
+ const u8 *addr = entry->delta_zone->memory + offset / BITS_PER_BYTE;
+ int size = COLLISION_BYTES;
+ int shift = offset % BITS_PER_BYTE;
+
+ while (--size >= 0)
+ *name++ = get_unaligned_le16(addr++) >> shift;
+}
+
+static void set_collision_name(const struct delta_index_entry *entry, const u8 *name)
+{
+ u64 offset = get_delta_entry_offset(entry) + entry->entry_bits - COLLISION_BITS;
+ u8 *addr = entry->delta_zone->memory + offset / BITS_PER_BYTE;
+ int size = COLLISION_BYTES;
+ int shift = offset % BITS_PER_BYTE;
+ u16 mask = ~((u16) 0xFF << shift);
+ u16 data;
+
+ while (--size >= 0) {
+ data = (get_unaligned_le16(addr) & mask) | (*name++ << shift);
+ put_unaligned_le16(data, addr++);
+ }
+}
+
+int uds_get_delta_index_entry(const struct delta_index *delta_index, u32 list_number,
+ u32 key, const u8 *name,
+ struct delta_index_entry *delta_entry)
+{
+ int result;
+
+ result = uds_start_delta_index_search(delta_index, list_number, key,
+ delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ do {
+ result = uds_next_delta_index_entry(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+ } while (!delta_entry->at_end && (key > delta_entry->key));
+
+ result = uds_remember_delta_index_offset(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (!delta_entry->at_end && (key == delta_entry->key)) {
+ struct delta_index_entry collision_entry = *delta_entry;
+
+ for (;;) {
+ u8 full_name[COLLISION_BYTES];
+
+ result = uds_next_delta_index_entry(&collision_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (collision_entry.at_end || !collision_entry.is_collision)
+ break;
+
+ get_collision_name(&collision_entry, full_name);
+ if (memcmp(full_name, name, COLLISION_BYTES) == 0) {
+ *delta_entry = collision_entry;
+ break;
+ }
+ }
+ }
+
+ return UDS_SUCCESS;
+}
+
+int uds_get_delta_entry_collision(const struct delta_index_entry *delta_entry, u8 *name)
+{
+ int result;
+
+ result = assert_not_at_end(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(delta_entry->is_collision,
+ "Cannot get full block name from a non-collision delta index entry");
+ if (result != VDO_SUCCESS)
+ return UDS_BAD_STATE;
+
+ get_collision_name(delta_entry, name);
+ return UDS_SUCCESS;
+}
+
+u32 uds_get_delta_entry_value(const struct delta_index_entry *delta_entry)
+{
+ return get_field(delta_entry->delta_zone->memory,
+ get_delta_entry_offset(delta_entry), delta_entry->value_bits);
+}
+
+static int assert_mutable_entry(const struct delta_index_entry *delta_entry)
+{
+ int result = VDO_ASSERT((delta_entry->delta_list != &delta_entry->temp_delta_list),
+ "delta index is mutable");
+ if (result != VDO_SUCCESS)
+ result = UDS_BAD_STATE;
+
+ return result;
+}
+
+int uds_set_delta_entry_value(const struct delta_index_entry *delta_entry, u32 value)
+{
+ int result;
+ u32 value_mask = (1 << delta_entry->value_bits) - 1;
+
+ result = assert_mutable_entry(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = assert_not_at_end(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT((value & value_mask) == value,
+ "Value (%u) being set in a delta index is too large (must fit in %u bits)",
+ value, delta_entry->value_bits);
+ if (result != VDO_SUCCESS)
+ return UDS_INVALID_ARGUMENT;
+
+ set_field(value, delta_entry->delta_zone->memory,
+ get_delta_entry_offset(delta_entry), delta_entry->value_bits);
+ return UDS_SUCCESS;
+}
+
+/*
+ * Extend the memory used by the delta lists by adding growing_size bytes before the list indicated
+ * by growing_index, then rebalancing the lists in the new chunk.
+ */
+static int extend_delta_zone(struct delta_zone *delta_zone, u32 growing_index,
+ size_t growing_size)
+{
+ ktime_t start_time;
+ ktime_t end_time;
+ struct delta_list *delta_lists;
+ u32 i;
+ size_t used_space;
+
+
+ /* Calculate the amount of space that is or will be in use. */
+ start_time = current_time_ns(CLOCK_MONOTONIC);
+ delta_lists = delta_zone->delta_lists;
+ used_space = growing_size;
+ for (i = 0; i <= delta_zone->list_count + 1; i++)
+ used_space += get_delta_list_byte_size(&delta_lists[i]);
+
+ if (delta_zone->size < used_space)
+ return UDS_OVERFLOW;
+
+ /* Compute the new offsets of the delta lists. */
+ compute_new_list_offsets(delta_zone, growing_index, growing_size, used_space);
+
+ /*
+ * When we rebalance the delta list, we will include the end guard list in the rebalancing.
+ * It contains the end guard data, which must be copied.
+ */
+ rebalance_delta_zone(delta_zone, 1, delta_zone->list_count + 1);
+ end_time = current_time_ns(CLOCK_MONOTONIC);
+ delta_zone->rebalance_count++;
+ delta_zone->rebalance_time += ktime_sub(end_time, start_time);
+ return UDS_SUCCESS;
+}
+
+static int insert_bits(struct delta_index_entry *delta_entry, u16 size)
+{
+ u64 free_before;
+ u64 free_after;
+ u64 source;
+ u64 destination;
+ u32 count;
+ bool before_flag;
+ u8 *memory;
+ struct delta_zone *delta_zone = delta_entry->delta_zone;
+ struct delta_list *delta_list = delta_entry->delta_list;
+ /* Compute bits in use before and after the inserted bits. */
+ u32 total_size = delta_list->size;
+ u32 before_size = delta_entry->offset;
+ u32 after_size = total_size - delta_entry->offset;
+
+ if (total_size + size > U16_MAX) {
+ delta_entry->list_overflow = true;
+ delta_zone->overflow_count++;
+ return UDS_OVERFLOW;
+ }
+
+ /* Compute bits available before and after the delta list. */
+ free_before = (delta_list[0].start - (delta_list[-1].start + delta_list[-1].size));
+ free_after = (delta_list[1].start - (delta_list[0].start + delta_list[0].size));
+
+ if ((size <= free_before) && (size <= free_after)) {
+ /*
+ * We have enough space to use either before or after the list. Select the smaller
+ * amount of data. If it is exactly the same, try to take from the larger amount of
+ * free space.
+ */
+ if (before_size < after_size)
+ before_flag = true;
+ else if (after_size < before_size)
+ before_flag = false;
+ else
+ before_flag = free_before > free_after;
+ } else if (size <= free_before) {
+ /* There is space before but not after. */
+ before_flag = true;
+ } else if (size <= free_after) {
+ /* There is space after but not before. */
+ before_flag = false;
+ } else {
+ /*
+ * Neither of the surrounding spaces is large enough for this request. Extend
+ * and/or rebalance the delta list memory choosing to move the least amount of
+ * data.
+ */
+ int result;
+ u32 growing_index = delta_entry->list_number + 1;
+
+ before_flag = before_size < after_size;
+ if (!before_flag)
+ growing_index++;
+ result = extend_delta_zone(delta_zone, growing_index,
+ BITS_TO_BYTES(size));
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ delta_list->size += size;
+ if (before_flag) {
+ source = delta_list->start;
+ destination = source - size;
+ delta_list->start -= size;
+ count = before_size;
+ } else {
+ source = delta_list->start + delta_entry->offset;
+ destination = source + size;
+ count = after_size;
+ }
+
+ memory = delta_zone->memory;
+ move_bits(memory, source, memory, destination, count);
+ return UDS_SUCCESS;
+}
+
+static void encode_delta(const struct delta_index_entry *delta_entry)
+{
+ u32 temp;
+ u32 t1;
+ u32 t2;
+ u64 offset;
+ const struct delta_zone *delta_zone = delta_entry->delta_zone;
+ u8 *memory = delta_zone->memory;
+
+ offset = get_delta_entry_offset(delta_entry) + delta_entry->value_bits;
+ if (delta_entry->delta < delta_zone->min_keys) {
+ set_field(delta_entry->delta, memory, offset, delta_zone->min_bits);
+ return;
+ }
+
+ temp = delta_entry->delta - delta_zone->min_keys;
+ t1 = (temp % delta_zone->incr_keys) + delta_zone->min_keys;
+ t2 = temp / delta_zone->incr_keys;
+ set_field(t1, memory, offset, delta_zone->min_bits);
+ set_zero(memory, offset + delta_zone->min_bits, t2);
+ set_field(1, memory, offset + delta_zone->min_bits + t2, 1);
+}
+
+static void encode_entry(const struct delta_index_entry *delta_entry, u32 value,
+ const u8 *name)
+{
+ u8 *memory = delta_entry->delta_zone->memory;
+ u64 offset = get_delta_entry_offset(delta_entry);
+
+ set_field(value, memory, offset, delta_entry->value_bits);
+ encode_delta(delta_entry);
+ if (name != NULL)
+ set_collision_name(delta_entry, name);
+}
+
+/*
+ * Create a new entry in the delta index. If the entry is a collision, the full 256 bit name must
+ * be provided.
+ */
+int uds_put_delta_index_entry(struct delta_index_entry *delta_entry, u32 key, u32 value,
+ const u8 *name)
+{
+ int result;
+ struct delta_zone *delta_zone;
+
+ result = assert_mutable_entry(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (delta_entry->is_collision) {
+ /*
+ * The caller wants us to insert a collision entry onto a collision entry. This
+ * happens when we find a collision and attempt to add the name again to the index.
+ * This is normally a fatal error unless we are replaying a closed chapter while we
+ * are rebuilding a volume index.
+ */
+ return UDS_DUPLICATE_NAME;
+ }
+
+ if (delta_entry->offset < delta_entry->delta_list->save_offset) {
+ /*
+ * The saved entry offset is after the new entry and will no longer be valid, so
+ * replace it with the insertion point.
+ */
+ result = uds_remember_delta_index_offset(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ if (name != NULL) {
+ /* Insert a collision entry which is placed after this entry. */
+ result = assert_not_at_end(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT((key == delta_entry->key),
+ "incorrect key for collision entry");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ delta_entry->offset += delta_entry->entry_bits;
+ set_delta(delta_entry, 0);
+ delta_entry->is_collision = true;
+ delta_entry->entry_bits += COLLISION_BITS;
+ result = insert_bits(delta_entry, delta_entry->entry_bits);
+ } else if (delta_entry->at_end) {
+ /* Insert a new entry at the end of the delta list. */
+ result = VDO_ASSERT((key >= delta_entry->key), "key past end of list");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ set_delta(delta_entry, key - delta_entry->key);
+ delta_entry->key = key;
+ delta_entry->at_end = false;
+ result = insert_bits(delta_entry, delta_entry->entry_bits);
+ } else {
+ u16 old_entry_size;
+ u16 additional_size;
+ struct delta_index_entry next_entry;
+ u32 next_value;
+
+ /*
+ * Insert a new entry which requires the delta in the following entry to be
+ * updated.
+ */
+ result = VDO_ASSERT((key < delta_entry->key),
+ "key precedes following entry");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT((key >= delta_entry->key - delta_entry->delta),
+ "key effects following entry's delta");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ old_entry_size = delta_entry->entry_bits;
+ next_entry = *delta_entry;
+ next_value = uds_get_delta_entry_value(&next_entry);
+ set_delta(delta_entry, key - (delta_entry->key - delta_entry->delta));
+ delta_entry->key = key;
+ set_delta(&next_entry, next_entry.key - key);
+ next_entry.offset += delta_entry->entry_bits;
+ /* The two new entries are always bigger than the single entry being replaced. */
+ additional_size = (delta_entry->entry_bits +
+ next_entry.entry_bits - old_entry_size);
+ result = insert_bits(delta_entry, additional_size);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ encode_entry(&next_entry, next_value, NULL);
+ }
+
+ if (result != UDS_SUCCESS)
+ return result;
+
+ encode_entry(delta_entry, value, name);
+ delta_zone = delta_entry->delta_zone;
+ delta_zone->record_count++;
+ delta_zone->collision_count += delta_entry->is_collision ? 1 : 0;
+ return UDS_SUCCESS;
+}
+
+static void delete_bits(const struct delta_index_entry *delta_entry, int size)
+{
+ u64 source;
+ u64 destination;
+ u32 count;
+ bool before_flag;
+ struct delta_list *delta_list = delta_entry->delta_list;
+ u8 *memory = delta_entry->delta_zone->memory;
+ /* Compute bits retained before and after the deleted bits. */
+ u32 total_size = delta_list->size;
+ u32 before_size = delta_entry->offset;
+ u32 after_size = total_size - delta_entry->offset - size;
+
+ /*
+ * Determine whether to add to the available space either before or after the delta list.
+ * We prefer to move the least amount of data. If it is exactly the same, try to add to the
+ * smaller amount of free space.
+ */
+ if (before_size < after_size) {
+ before_flag = true;
+ } else if (after_size < before_size) {
+ before_flag = false;
+ } else {
+ u64 free_before =
+ (delta_list[0].start - (delta_list[-1].start + delta_list[-1].size));
+ u64 free_after =
+ (delta_list[1].start - (delta_list[0].start + delta_list[0].size));
+
+ before_flag = (free_before < free_after);
+ }
+
+ delta_list->size -= size;
+ if (before_flag) {
+ source = delta_list->start;
+ destination = source + size;
+ delta_list->start += size;
+ count = before_size;
+ } else {
+ destination = delta_list->start + delta_entry->offset;
+ source = destination + size;
+ count = after_size;
+ }
+
+ move_bits(memory, source, memory, destination, count);
+}
+
+int uds_remove_delta_index_entry(struct delta_index_entry *delta_entry)
+{
+ int result;
+ struct delta_index_entry next_entry;
+ struct delta_zone *delta_zone;
+ struct delta_list *delta_list;
+
+ result = assert_mutable_entry(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_entry = *delta_entry;
+ result = uds_next_delta_index_entry(&next_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ delta_zone = delta_entry->delta_zone;
+
+ if (delta_entry->is_collision) {
+ /* This is a collision entry, so just remove it. */
+ delete_bits(delta_entry, delta_entry->entry_bits);
+ next_entry.offset = delta_entry->offset;
+ delta_zone->collision_count -= 1;
+ } else if (next_entry.at_end) {
+ /* This entry is at the end of the list, so just remove it. */
+ delete_bits(delta_entry, delta_entry->entry_bits);
+ next_entry.key -= delta_entry->delta;
+ next_entry.offset = delta_entry->offset;
+ } else {
+ /* The delta in the next entry needs to be updated. */
+ u32 next_value = uds_get_delta_entry_value(&next_entry);
+ u16 old_size = delta_entry->entry_bits + next_entry.entry_bits;
+
+ if (next_entry.is_collision) {
+ next_entry.is_collision = false;
+ delta_zone->collision_count -= 1;
+ }
+
+ set_delta(&next_entry, delta_entry->delta + next_entry.delta);
+ next_entry.offset = delta_entry->offset;
+ /* The one new entry is always smaller than the two entries being replaced. */
+ delete_bits(delta_entry, old_size - next_entry.entry_bits);
+ encode_entry(&next_entry, next_value, NULL);
+ }
+
+ delta_zone->record_count--;
+ delta_zone->discard_count++;
+ *delta_entry = next_entry;
+
+ delta_list = delta_entry->delta_list;
+ if (delta_entry->offset < delta_list->save_offset) {
+ /* The saved entry offset is no longer valid. */
+ delta_list->save_key = 0;
+ delta_list->save_offset = 0;
+ }
+
+ return UDS_SUCCESS;
+}
+
+void uds_get_delta_index_stats(const struct delta_index *delta_index,
+ struct delta_index_stats *stats)
+{
+ unsigned int z;
+ const struct delta_zone *delta_zone;
+
+ memset(stats, 0, sizeof(struct delta_index_stats));
+ for (z = 0; z < delta_index->zone_count; z++) {
+ delta_zone = &delta_index->delta_zones[z];
+ stats->rebalance_time += delta_zone->rebalance_time;
+ stats->rebalance_count += delta_zone->rebalance_count;
+ stats->record_count += delta_zone->record_count;
+ stats->collision_count += delta_zone->collision_count;
+ stats->discard_count += delta_zone->discard_count;
+ stats->overflow_count += delta_zone->overflow_count;
+ stats->list_count += delta_zone->list_count;
+ }
+}
+
+size_t uds_compute_delta_index_size(u32 entry_count, u32 mean_delta, u32 payload_bits)
+{
+ u16 min_bits;
+ u32 incr_keys;
+ u32 min_keys;
+
+ compute_coding_constants(mean_delta, &min_bits, &min_keys, &incr_keys);
+ /* On average, each delta is encoded into about min_bits + 1.5 bits. */
+ return entry_count * (payload_bits + min_bits + 1) + entry_count / 2;
+}
+
+u32 uds_get_delta_index_page_count(u32 entry_count, u32 list_count, u32 mean_delta,
+ u32 payload_bits, size_t bytes_per_page)
+{
+ unsigned int bits_per_delta_list;
+ unsigned int bits_per_page;
+ size_t bits_per_index;
+
+ /* Compute the expected number of bits needed for all the entries. */
+ bits_per_index = uds_compute_delta_index_size(entry_count, mean_delta,
+ payload_bits);
+ bits_per_delta_list = bits_per_index / list_count;
+
+ /* Add in the immutable delta list headers. */
+ bits_per_index += list_count * IMMUTABLE_HEADER_SIZE;
+ /* Compute the number of usable bits on an immutable index page. */
+ bits_per_page = ((bytes_per_page - sizeof(struct delta_page_header)) * BITS_PER_BYTE);
+ /*
+ * Reduce the bits per page by one immutable delta list header and one delta list to
+ * account for internal fragmentation.
+ */
+ bits_per_page -= IMMUTABLE_HEADER_SIZE + bits_per_delta_list;
+ /* Now compute the number of pages needed. */
+ return DIV_ROUND_UP(bits_per_index, bits_per_page);
+}
+
+void uds_log_delta_index_entry(struct delta_index_entry *delta_entry)
+{
+ vdo_log_ratelimit(vdo_log_info,
+ "List 0x%X Key 0x%X Offset 0x%X%s%s List_size 0x%X%s",
+ delta_entry->list_number, delta_entry->key,
+ delta_entry->offset, delta_entry->at_end ? " end" : "",
+ delta_entry->is_collision ? " collision" : "",
+ delta_entry->delta_list->size,
+ delta_entry->list_overflow ? " overflow" : "");
+ delta_entry->list_overflow = false;
+}
diff --git a/drivers/md/dm-vdo/indexer/delta-index.h b/drivers/md/dm-vdo/indexer/delta-index.h
new file mode 100644
index 000000000000..53f6c6ac0bc7
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/delta-index.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_DELTA_INDEX_H
+#define UDS_DELTA_INDEX_H
+
+#include <linux/cache.h>
+
+#include "numeric.h"
+#include "time-utils.h"
+
+#include "config.h"
+#include "io-factory.h"
+
+/*
+ * A delta index is a key-value store, where each entry maps an address (the key) to a payload (the
+ * value). The entries are sorted by address, and only the delta between successive addresses is
+ * stored in the entry. The addresses are assumed to be uniformly distributed, and the deltas are
+ * therefore exponentially distributed.
+ *
+ * A delta_index can either be mutable or immutable depending on its expected use. The immutable
+ * form of a delta index is used for the indexes of closed chapters committed to the volume. The
+ * mutable form of a delta index is used by the volume index, and also by the chapter index in an
+ * open chapter. Like the index as a whole, each mutable delta index is divided into a number of
+ * independent zones.
+ */
+
+struct delta_list {
+ /* The offset of the delta list start, in bits */
+ u64 start;
+ /* The number of bits in the delta list */
+ u16 size;
+ /* Where the last search "found" the key, in bits */
+ u16 save_offset;
+ /* The key for the record just before save_offset */
+ u32 save_key;
+};
+
+struct delta_zone {
+ /* The delta list memory */
+ u8 *memory;
+ /* The delta list headers */
+ struct delta_list *delta_lists;
+ /* Temporary starts of delta lists */
+ u64 *new_offsets;
+ /* Buffered writer for saving an index */
+ struct buffered_writer *buffered_writer;
+ /* The size of delta list memory */
+ size_t size;
+ /* Nanoseconds spent rebalancing */
+ ktime_t rebalance_time;
+ /* Number of memory rebalances */
+ u32 rebalance_count;
+ /* The number of bits in a stored value */
+ u8 value_bits;
+ /* The number of bits in the minimal key code */
+ u16 min_bits;
+ /* The number of keys used in a minimal code */
+ u32 min_keys;
+ /* The number of keys used for another code bit */
+ u32 incr_keys;
+ /* The number of records in the index */
+ u64 record_count;
+ /* The number of collision records */
+ u64 collision_count;
+ /* The number of records removed */
+ u64 discard_count;
+ /* The number of UDS_OVERFLOW errors detected */
+ u64 overflow_count;
+ /* The index of the first delta list */
+ u32 first_list;
+ /* The number of delta lists */
+ u32 list_count;
+ /* Tag belonging to this delta index */
+ u8 tag;
+} __aligned(L1_CACHE_BYTES);
+
+struct delta_list_save_info {
+ /* Tag identifying which delta index this list is in */
+ u8 tag;
+ /* Bit offset of the start of the list data */
+ u8 bit_offset;
+ /* Number of bytes of list data */
+ u16 byte_count;
+ /* The delta list number within the delta index */
+ u32 index;
+} __packed;
+
+struct delta_index {
+ /* The zones */
+ struct delta_zone *delta_zones;
+ /* The number of zones */
+ unsigned int zone_count;
+ /* The number of delta lists */
+ u32 list_count;
+ /* Maximum lists per zone */
+ u32 lists_per_zone;
+ /* Total memory allocated to this index */
+ size_t memory_size;
+ /* The number of non-empty lists at load time per zone */
+ u32 load_lists[MAX_ZONES];
+ /* True if this index is mutable */
+ bool mutable;
+ /* Tag belonging to this delta index */
+ u8 tag;
+};
+
+/*
+ * A delta_index_page describes a single page of a chapter index. The delta_index field allows the
+ * page to be treated as an immutable delta_index. We use the delta_zone field to treat the chapter
+ * index page as a single zone index, and without the need to do an additional memory allocation.
+ */
+struct delta_index_page {
+ struct delta_index delta_index;
+ /* These values are loaded from the delta_page_header */
+ u32 lowest_list_number;
+ u32 highest_list_number;
+ u64 virtual_chapter_number;
+ /* This structure describes the single zone of a delta index page. */
+ struct delta_zone delta_zone;
+};
+
+/*
+ * Notes on the delta_index_entries:
+ *
+ * The fields documented as "public" can be read by any code that uses a delta_index. The fields
+ * documented as "private" carry information between delta_index method calls and should not be
+ * used outside the delta_index module.
+ *
+ * (1) The delta_index_entry is used like an iterator when searching a delta list.
+ *
+ * (2) It is also the result of a successful search and can be used to refer to the element found
+ * by the search.
+ *
+ * (3) It is also the result of an unsuccessful search and can be used to refer to the insertion
+ * point for a new record.
+ *
+ * (4) If at_end is true, the delta_list entry can only be used as the insertion point for a new
+ * record at the end of the list.
+ *
+ * (5) If at_end is false and is_collision is true, the delta_list entry fields refer to a
+ * collision entry in the list, and the delta_list entry can be used as a reference to this
+ * entry.
+ *
+ * (6) If at_end is false and is_collision is false, the delta_list entry fields refer to a
+ * non-collision entry in the list. Such delta_list entries can be used as a reference to a
+ * found entry, or an insertion point for a non-collision entry before this entry, or an
+ * insertion point for a collision entry that collides with this entry.
+ */
+struct delta_index_entry {
+ /* Public fields */
+ /* The key for this entry */
+ u32 key;
+ /* We are after the last list entry */
+ bool at_end;
+ /* This record is a collision */
+ bool is_collision;
+
+ /* Private fields */
+ /* This delta list overflowed */
+ bool list_overflow;
+ /* The number of bits used for the value */
+ u8 value_bits;
+ /* The number of bits used for the entire entry */
+ u16 entry_bits;
+ /* The delta index zone */
+ struct delta_zone *delta_zone;
+ /* The delta list containing the entry */
+ struct delta_list *delta_list;
+ /* The delta list number */
+ u32 list_number;
+ /* Bit offset of this entry within the list */
+ u16 offset;
+ /* The delta between this and previous entry */
+ u32 delta;
+ /* Temporary delta list for immutable indices */
+ struct delta_list temp_delta_list;
+};
+
+struct delta_index_stats {
+ /* Number of bytes allocated */
+ size_t memory_allocated;
+ /* Nanoseconds spent rebalancing */
+ ktime_t rebalance_time;
+ /* Number of memory rebalances */
+ u32 rebalance_count;
+ /* The number of records in the index */
+ u64 record_count;
+ /* The number of collision records */
+ u64 collision_count;
+ /* The number of records removed */
+ u64 discard_count;
+ /* The number of UDS_OVERFLOW errors detected */
+ u64 overflow_count;
+ /* The number of delta lists */
+ u32 list_count;
+};
+
+int __must_check uds_initialize_delta_index(struct delta_index *delta_index,
+ unsigned int zone_count, u32 list_count,
+ u32 mean_delta, u32 payload_bits,
+ size_t memory_size, u8 tag);
+
+int __must_check uds_initialize_delta_index_page(struct delta_index_page *delta_index_page,
+ u64 expected_nonce, u32 mean_delta,
+ u32 payload_bits, u8 *memory,
+ size_t memory_size);
+
+void uds_uninitialize_delta_index(struct delta_index *delta_index);
+
+void uds_reset_delta_index(const struct delta_index *delta_index);
+
+int __must_check uds_pack_delta_index_page(const struct delta_index *delta_index,
+ u64 header_nonce, u8 *memory,
+ size_t memory_size,
+ u64 virtual_chapter_number, u32 first_list,
+ u32 *list_count);
+
+int __must_check uds_start_restoring_delta_index(struct delta_index *delta_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count);
+
+int __must_check uds_finish_restoring_delta_index(struct delta_index *delta_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count);
+
+int __must_check uds_check_guard_delta_lists(struct buffered_reader **buffered_readers,
+ unsigned int reader_count);
+
+int __must_check uds_start_saving_delta_index(const struct delta_index *delta_index,
+ unsigned int zone_number,
+ struct buffered_writer *buffered_writer);
+
+int __must_check uds_finish_saving_delta_index(const struct delta_index *delta_index,
+ unsigned int zone_number);
+
+int __must_check uds_write_guard_delta_list(struct buffered_writer *buffered_writer);
+
+size_t __must_check uds_compute_delta_index_save_bytes(u32 list_count,
+ size_t memory_size);
+
+int __must_check uds_start_delta_index_search(const struct delta_index *delta_index,
+ u32 list_number, u32 key,
+ struct delta_index_entry *iterator);
+
+int __must_check uds_next_delta_index_entry(struct delta_index_entry *delta_entry);
+
+int __must_check uds_remember_delta_index_offset(const struct delta_index_entry *delta_entry);
+
+int __must_check uds_get_delta_index_entry(const struct delta_index *delta_index,
+ u32 list_number, u32 key, const u8 *name,
+ struct delta_index_entry *delta_entry);
+
+int __must_check uds_get_delta_entry_collision(const struct delta_index_entry *delta_entry,
+ u8 *name);
+
+u32 __must_check uds_get_delta_entry_value(const struct delta_index_entry *delta_entry);
+
+int __must_check uds_set_delta_entry_value(const struct delta_index_entry *delta_entry, u32 value);
+
+int __must_check uds_put_delta_index_entry(struct delta_index_entry *delta_entry, u32 key,
+ u32 value, const u8 *name);
+
+int __must_check uds_remove_delta_index_entry(struct delta_index_entry *delta_entry);
+
+void uds_get_delta_index_stats(const struct delta_index *delta_index,
+ struct delta_index_stats *stats);
+
+size_t __must_check uds_compute_delta_index_size(u32 entry_count, u32 mean_delta,
+ u32 payload_bits);
+
+u32 uds_get_delta_index_page_count(u32 entry_count, u32 list_count, u32 mean_delta,
+ u32 payload_bits, size_t bytes_per_page);
+
+void uds_log_delta_index_entry(struct delta_index_entry *delta_entry);
+
+#endif /* UDS_DELTA_INDEX_H */
diff --git a/drivers/md/dm-vdo/indexer/funnel-requestqueue.c b/drivers/md/dm-vdo/indexer/funnel-requestqueue.c
new file mode 100644
index 000000000000..1a5735375ddc
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/funnel-requestqueue.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "funnel-requestqueue.h"
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/wait.h>
+
+#include "funnel-queue.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "thread-utils.h"
+
+/*
+ * This queue will attempt to handle requests in reasonably sized batches instead of reacting
+ * immediately to each new request. The wait time between batches is dynamically adjusted up or
+ * down to try to balance responsiveness against wasted thread run time.
+ *
+ * If the wait time becomes long enough, the queue will become dormant and must be explicitly
+ * awoken when a new request is enqueued. The enqueue operation updates "newest" in the funnel
+ * queue via xchg (which is a memory barrier), and later checks "dormant" to decide whether to do a
+ * wakeup of the worker thread.
+ *
+ * When deciding to go to sleep, the worker thread sets "dormant" and then examines "newest" to
+ * decide if the funnel queue is idle. In dormant mode, the last examination of "newest" before
+ * going to sleep is done inside the wait_event_interruptible() macro, after a point where one or
+ * more memory barriers have been issued. (Preparing to sleep uses spin locks.) Even if the funnel
+ * queue's "next" field update isn't visible yet to make the entry accessible, its existence will
+ * kick the worker thread out of dormant mode and back into timer-based mode.
+ *
+ * Unbatched requests are used to communicate between different zone threads and will also cause
+ * the queue to awaken immediately.
+ */
+
+enum {
+ NANOSECOND = 1,
+ MICROSECOND = 1000 * NANOSECOND,
+ MILLISECOND = 1000 * MICROSECOND,
+ DEFAULT_WAIT_TIME = 20 * MICROSECOND,
+ MINIMUM_WAIT_TIME = DEFAULT_WAIT_TIME / 2,
+ MAXIMUM_WAIT_TIME = MILLISECOND,
+ MINIMUM_BATCH = 32,
+ MAXIMUM_BATCH = 64,
+};
+
+struct uds_request_queue {
+ /* Wait queue for synchronizing producers and consumer */
+ struct wait_queue_head wait_head;
+ /* Function to process a request */
+ uds_request_queue_processor_fn processor;
+ /* Queue of new incoming requests */
+ struct funnel_queue *main_queue;
+ /* Queue of old requests to retry */
+ struct funnel_queue *retry_queue;
+ /* The thread id of the worker thread */
+ struct thread *thread;
+ /* True if the worker was started */
+ bool started;
+ /* When true, requests can be enqueued */
+ bool running;
+ /* A flag set when the worker is waiting without a timeout */
+ atomic_t dormant;
+};
+
+static inline struct uds_request *poll_queues(struct uds_request_queue *queue)
+{
+ struct funnel_queue_entry *entry;
+
+ entry = vdo_funnel_queue_poll(queue->retry_queue);
+ if (entry != NULL)
+ return container_of(entry, struct uds_request, queue_link);
+
+ entry = vdo_funnel_queue_poll(queue->main_queue);
+ if (entry != NULL)
+ return container_of(entry, struct uds_request, queue_link);
+
+ return NULL;
+}
+
+static inline bool are_queues_idle(struct uds_request_queue *queue)
+{
+ return vdo_is_funnel_queue_idle(queue->retry_queue) &&
+ vdo_is_funnel_queue_idle(queue->main_queue);
+}
+
+/*
+ * Determine if there is a next request to process, and return it if there is. Also return flags
+ * indicating whether the worker thread can sleep (for the use of wait_event() macros) and whether
+ * the thread did sleep before returning a new request.
+ */
+static inline bool dequeue_request(struct uds_request_queue *queue,
+ struct uds_request **request_ptr, bool *waited_ptr)
+{
+ struct uds_request *request = poll_queues(queue);
+
+ if (request != NULL) {
+ *request_ptr = request;
+ return true;
+ }
+
+ if (!READ_ONCE(queue->running)) {
+ /* Wake the worker thread so it can exit. */
+ *request_ptr = NULL;
+ return true;
+ }
+
+ *request_ptr = NULL;
+ *waited_ptr = true;
+ return false;
+}
+
+static void wait_for_request(struct uds_request_queue *queue, bool dormant,
+ unsigned long timeout, struct uds_request **request,
+ bool *waited)
+{
+ if (dormant) {
+ wait_event_interruptible(queue->wait_head,
+ (dequeue_request(queue, request, waited) ||
+ !are_queues_idle(queue)));
+ return;
+ }
+
+ wait_event_interruptible_hrtimeout(queue->wait_head,
+ dequeue_request(queue, request, waited),
+ ns_to_ktime(timeout));
+}
+
+static void request_queue_worker(void *arg)
+{
+ struct uds_request_queue *queue = arg;
+ struct uds_request *request = NULL;
+ unsigned long time_batch = DEFAULT_WAIT_TIME;
+ bool dormant = atomic_read(&queue->dormant);
+ bool waited = false;
+ long current_batch = 0;
+
+ for (;;) {
+ wait_for_request(queue, dormant, time_batch, &request, &waited);
+ if (likely(request != NULL)) {
+ current_batch++;
+ queue->processor(request);
+ } else if (!READ_ONCE(queue->running)) {
+ break;
+ }
+
+ if (dormant) {
+ /*
+ * The queue has been roused from dormancy. Clear the flag so enqueuers can
+ * stop broadcasting. No fence is needed for this transition.
+ */
+ atomic_set(&queue->dormant, false);
+ dormant = false;
+ time_batch = DEFAULT_WAIT_TIME;
+ } else if (waited) {
+ /*
+ * We waited for this request to show up. Adjust the wait time to smooth
+ * out the batch size.
+ */
+ if (current_batch < MINIMUM_BATCH) {
+ /*
+ * If the last batch of requests was too small, increase the wait
+ * time.
+ */
+ time_batch += time_batch / 4;
+ if (time_batch >= MAXIMUM_WAIT_TIME) {
+ atomic_set(&queue->dormant, true);
+ dormant = true;
+ }
+ } else if (current_batch > MAXIMUM_BATCH) {
+ /*
+ * If the last batch of requests was too large, decrease the wait
+ * time.
+ */
+ time_batch -= time_batch / 4;
+ if (time_batch < MINIMUM_WAIT_TIME)
+ time_batch = MINIMUM_WAIT_TIME;
+ }
+ current_batch = 0;
+ }
+ }
+
+ /*
+ * Ensure that we process any remaining requests that were enqueued before trying to shut
+ * down. The corresponding write barrier is in uds_request_queue_finish().
+ */
+ smp_rmb();
+ while ((request = poll_queues(queue)) != NULL)
+ queue->processor(request);
+}
+
+int uds_make_request_queue(const char *queue_name,
+ uds_request_queue_processor_fn processor,
+ struct uds_request_queue **queue_ptr)
+{
+ int result;
+ struct uds_request_queue *queue;
+
+ result = vdo_allocate(1, struct uds_request_queue, __func__, &queue);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ queue->processor = processor;
+ queue->running = true;
+ atomic_set(&queue->dormant, false);
+ init_waitqueue_head(&queue->wait_head);
+
+ result = vdo_make_funnel_queue(&queue->main_queue);
+ if (result != VDO_SUCCESS) {
+ uds_request_queue_finish(queue);
+ return result;
+ }
+
+ result = vdo_make_funnel_queue(&queue->retry_queue);
+ if (result != VDO_SUCCESS) {
+ uds_request_queue_finish(queue);
+ return result;
+ }
+
+ result = vdo_create_thread(request_queue_worker, queue, queue_name,
+ &queue->thread);
+ if (result != VDO_SUCCESS) {
+ uds_request_queue_finish(queue);
+ return result;
+ }
+
+ queue->started = true;
+ *queue_ptr = queue;
+ return UDS_SUCCESS;
+}
+
+static inline void wake_up_worker(struct uds_request_queue *queue)
+{
+ if (wq_has_sleeper(&queue->wait_head))
+ wake_up(&queue->wait_head);
+}
+
+void uds_request_queue_enqueue(struct uds_request_queue *queue,
+ struct uds_request *request)
+{
+ struct funnel_queue *sub_queue;
+ bool unbatched = request->unbatched;
+
+ sub_queue = request->requeued ? queue->retry_queue : queue->main_queue;
+ vdo_funnel_queue_put(sub_queue, &request->queue_link);
+
+ /*
+ * We must wake the worker thread when it is dormant. A read fence isn't needed here since
+ * we know the queue operation acts as one.
+ */
+ if (atomic_read(&queue->dormant) || unbatched)
+ wake_up_worker(queue);
+}
+
+void uds_request_queue_finish(struct uds_request_queue *queue)
+{
+ if (queue == NULL)
+ return;
+
+ /*
+ * This memory barrier ensures that any requests we queued will be seen. The point is that
+ * when dequeue_request() sees the following update to the running flag, it will also be
+ * able to see any change we made to a next field in the funnel queue entry. The
+ * corresponding read barrier is in request_queue_worker().
+ */
+ smp_wmb();
+ WRITE_ONCE(queue->running, false);
+
+ if (queue->started) {
+ wake_up_worker(queue);
+ vdo_join_threads(queue->thread);
+ }
+
+ vdo_free_funnel_queue(queue->main_queue);
+ vdo_free_funnel_queue(queue->retry_queue);
+ vdo_free(queue);
+}
diff --git a/drivers/md/dm-vdo/indexer/funnel-requestqueue.h b/drivers/md/dm-vdo/indexer/funnel-requestqueue.h
new file mode 100644
index 000000000000..9b0f53939b4d
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/funnel-requestqueue.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_REQUEST_QUEUE_H
+#define UDS_REQUEST_QUEUE_H
+
+#include "indexer.h"
+
+/*
+ * A simple request queue which will handle new requests in the order in which they are received,
+ * and will attempt to handle requeued requests before new ones. However, the nature of the
+ * implementation means that it cannot guarantee this ordering; the prioritization is merely a
+ * hint.
+ */
+
+struct uds_request_queue;
+
+typedef void (*uds_request_queue_processor_fn)(struct uds_request *);
+
+int __must_check uds_make_request_queue(const char *queue_name,
+ uds_request_queue_processor_fn processor,
+ struct uds_request_queue **queue_ptr);
+
+void uds_request_queue_enqueue(struct uds_request_queue *queue,
+ struct uds_request *request);
+
+void uds_request_queue_finish(struct uds_request_queue *queue);
+
+#endif /* UDS_REQUEST_QUEUE_H */
diff --git a/drivers/md/dm-vdo/indexer/geometry.c b/drivers/md/dm-vdo/indexer/geometry.c
new file mode 100644
index 000000000000..c0575612e820
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/geometry.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "geometry.h"
+
+#include <linux/compiler.h>
+#include <linux/log2.h>
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "delta-index.h"
+#include "indexer.h"
+
+/*
+ * An index volume is divided into a fixed number of fixed-size chapters, each consisting of a
+ * fixed number of fixed-size pages. The volume layout is defined by two constants and four
+ * parameters. The constants are that index records are 32 bytes long (16-byte block name plus
+ * 16-byte metadata) and that open chapter index hash slots are one byte long. The four parameters
+ * are the number of bytes in a page, the number of record pages in a chapter, the number of
+ * chapters in a volume, and the number of chapters that are sparse. From these parameters, we can
+ * derive the rest of the layout and other index properties.
+ *
+ * The index volume is sized by its maximum memory footprint. For a dense index, the persistent
+ * storage is about 10 times the size of the memory footprint. For a sparse index, the persistent
+ * storage is about 100 times the size of the memory footprint.
+ *
+ * For a small index with a memory footprint less than 1GB, there are three possible memory
+ * configurations: 0.25GB, 0.5GB and 0.75GB. The default geometry for each is 1024 index records
+ * per 32 KB page, 1024 chapters per volume, and either 64, 128, or 192 record pages per chapter
+ * (resulting in 6, 13, or 20 index pages per chapter) depending on the memory configuration. For
+ * the VDO default of a 0.25 GB index, this yields a deduplication window of 256 GB using about 2.5
+ * GB for the persistent storage and 256 MB of RAM.
+ *
+ * For a larger index with a memory footprint that is a multiple of 1 GB, the geometry is 1024
+ * index records per 32 KB page, 256 record pages per chapter, 26 index pages per chapter, and 1024
+ * chapters for every GB of memory footprint. For a 1 GB volume, this yields a deduplication window
+ * of 1 TB using about 9GB of persistent storage and 1 GB of RAM.
+ *
+ * The above numbers hold for volumes which have no sparse chapters. A sparse volume has 10 times
+ * as many chapters as the corresponding non-sparse volume, which provides 10 times the
+ * deduplication window while using 10 times as much persistent storage as the equivalent
+ * non-sparse volume with the same memory footprint.
+ *
+ * If the volume has been converted from a non-lvm format to an lvm volume, the number of chapters
+ * per volume will have been reduced by one by eliminating physical chapter 0, and the virtual
+ * chapter that formerly mapped to physical chapter 0 may be remapped to another physical chapter.
+ * This remapping is expressed by storing which virtual chapter was remapped, and which physical
+ * chapter it was moved to.
+ */
+
+int uds_make_index_geometry(size_t bytes_per_page, u32 record_pages_per_chapter,
+ u32 chapters_per_volume, u32 sparse_chapters_per_volume,
+ u64 remapped_virtual, u64 remapped_physical,
+ struct index_geometry **geometry_ptr)
+{
+ int result;
+ struct index_geometry *geometry;
+
+ result = vdo_allocate(1, struct index_geometry, "geometry", &geometry);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ geometry->bytes_per_page = bytes_per_page;
+ geometry->record_pages_per_chapter = record_pages_per_chapter;
+ geometry->chapters_per_volume = chapters_per_volume;
+ geometry->sparse_chapters_per_volume = sparse_chapters_per_volume;
+ geometry->dense_chapters_per_volume = chapters_per_volume - sparse_chapters_per_volume;
+ geometry->remapped_virtual = remapped_virtual;
+ geometry->remapped_physical = remapped_physical;
+
+ geometry->records_per_page = bytes_per_page / BYTES_PER_RECORD;
+ geometry->records_per_chapter = geometry->records_per_page * record_pages_per_chapter;
+ geometry->records_per_volume = (u64) geometry->records_per_chapter * chapters_per_volume;
+
+ geometry->chapter_mean_delta = 1 << DEFAULT_CHAPTER_MEAN_DELTA_BITS;
+ geometry->chapter_payload_bits = bits_per(record_pages_per_chapter - 1);
+ /*
+ * We want 1 delta list for every 64 records in the chapter.
+ * The "| 077" ensures that the chapter_delta_list_bits computation
+ * does not underflow.
+ */
+ geometry->chapter_delta_list_bits =
+ bits_per((geometry->records_per_chapter - 1) | 077) - 6;
+ geometry->delta_lists_per_chapter = 1 << geometry->chapter_delta_list_bits;
+ /* We need enough address bits to achieve the desired mean delta. */
+ geometry->chapter_address_bits =
+ (DEFAULT_CHAPTER_MEAN_DELTA_BITS -
+ geometry->chapter_delta_list_bits +
+ bits_per(geometry->records_per_chapter - 1));
+ geometry->index_pages_per_chapter =
+ uds_get_delta_index_page_count(geometry->records_per_chapter,
+ geometry->delta_lists_per_chapter,
+ geometry->chapter_mean_delta,
+ geometry->chapter_payload_bits,
+ bytes_per_page);
+
+ geometry->pages_per_chapter = geometry->index_pages_per_chapter + record_pages_per_chapter;
+ geometry->pages_per_volume = geometry->pages_per_chapter * chapters_per_volume;
+ geometry->bytes_per_volume =
+ bytes_per_page * (geometry->pages_per_volume + HEADER_PAGES_PER_VOLUME);
+
+ *geometry_ptr = geometry;
+ return UDS_SUCCESS;
+}
+
+int uds_copy_index_geometry(struct index_geometry *source,
+ struct index_geometry **geometry_ptr)
+{
+ return uds_make_index_geometry(source->bytes_per_page,
+ source->record_pages_per_chapter,
+ source->chapters_per_volume,
+ source->sparse_chapters_per_volume,
+ source->remapped_virtual, source->remapped_physical,
+ geometry_ptr);
+}
+
+void uds_free_index_geometry(struct index_geometry *geometry)
+{
+ vdo_free(geometry);
+}
+
+u32 __must_check uds_map_to_physical_chapter(const struct index_geometry *geometry,
+ u64 virtual_chapter)
+{
+ u64 delta;
+
+ if (!uds_is_reduced_index_geometry(geometry))
+ return virtual_chapter % geometry->chapters_per_volume;
+
+ if (likely(virtual_chapter > geometry->remapped_virtual)) {
+ delta = virtual_chapter - geometry->remapped_virtual;
+ if (likely(delta > geometry->remapped_physical))
+ return delta % geometry->chapters_per_volume;
+ else
+ return delta - 1;
+ }
+
+ if (virtual_chapter == geometry->remapped_virtual)
+ return geometry->remapped_physical;
+
+ delta = geometry->remapped_virtual - virtual_chapter;
+ if (delta < geometry->chapters_per_volume)
+ return geometry->chapters_per_volume - delta;
+
+ /* This chapter is so old the answer doesn't matter. */
+ return 0;
+}
+
+/* Check whether any sparse chapters are in use. */
+bool uds_has_sparse_chapters(const struct index_geometry *geometry,
+ u64 oldest_virtual_chapter, u64 newest_virtual_chapter)
+{
+ return uds_is_sparse_index_geometry(geometry) &&
+ ((newest_virtual_chapter - oldest_virtual_chapter + 1) >
+ geometry->dense_chapters_per_volume);
+}
+
+bool uds_is_chapter_sparse(const struct index_geometry *geometry,
+ u64 oldest_virtual_chapter, u64 newest_virtual_chapter,
+ u64 virtual_chapter_number)
+{
+ return uds_has_sparse_chapters(geometry, oldest_virtual_chapter,
+ newest_virtual_chapter) &&
+ ((virtual_chapter_number + geometry->dense_chapters_per_volume) <=
+ newest_virtual_chapter);
+}
+
+/* Calculate how many chapters to expire after opening the newest chapter. */
+u32 uds_chapters_to_expire(const struct index_geometry *geometry, u64 newest_chapter)
+{
+ /* If the index isn't full yet, don't expire anything. */
+ if (newest_chapter < geometry->chapters_per_volume)
+ return 0;
+
+ /* If a chapter is out of order... */
+ if (geometry->remapped_physical > 0) {
+ u64 oldest_chapter = newest_chapter - geometry->chapters_per_volume;
+
+ /*
+ * ... expire an extra chapter when expiring the moved chapter to free physical
+ * space for the new chapter ...
+ */
+ if (oldest_chapter == geometry->remapped_virtual)
+ return 2;
+
+ /*
+ * ... but don't expire anything when the new chapter will use the physical chapter
+ * freed by expiring the moved chapter.
+ */
+ if (oldest_chapter == (geometry->remapped_virtual + geometry->remapped_physical))
+ return 0;
+ }
+
+ /* Normally, just expire one. */
+ return 1;
+}
diff --git a/drivers/md/dm-vdo/indexer/geometry.h b/drivers/md/dm-vdo/indexer/geometry.h
new file mode 100644
index 000000000000..a2ecdb238cf2
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/geometry.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_INDEX_GEOMETRY_H
+#define UDS_INDEX_GEOMETRY_H
+
+#include "indexer.h"
+
+/*
+ * The index_geometry records parameters that define the layout of a UDS index volume, and the size and
+ * shape of various index structures. It is created when the index is created, and is referenced by
+ * many index sub-components.
+ */
+
+struct index_geometry {
+ /* Size of a chapter page, in bytes */
+ size_t bytes_per_page;
+ /* Number of record pages in a chapter */
+ u32 record_pages_per_chapter;
+ /* Total number of chapters in a volume */
+ u32 chapters_per_volume;
+ /* Number of sparsely-indexed chapters in a volume */
+ u32 sparse_chapters_per_volume;
+ /* Number of bits used to determine delta list numbers */
+ u8 chapter_delta_list_bits;
+ /* Virtual chapter remapped from physical chapter 0 */
+ u64 remapped_virtual;
+ /* New physical chapter where the remapped chapter can be found */
+ u64 remapped_physical;
+
+ /*
+ * The following properties are derived from the ones above, but they are computed and
+ * recorded as fields for convenience.
+ */
+ /* Total number of pages in a volume, excluding the header */
+ u32 pages_per_volume;
+ /* Total number of bytes in a volume, including the header */
+ size_t bytes_per_volume;
+ /* Number of pages in a chapter */
+ u32 pages_per_chapter;
+ /* Number of index pages in a chapter index */
+ u32 index_pages_per_chapter;
+ /* Number of records that fit on a page */
+ u32 records_per_page;
+ /* Number of records that fit in a chapter */
+ u32 records_per_chapter;
+ /* Number of records that fit in a volume */
+ u64 records_per_volume;
+ /* Number of delta lists per chapter index */
+ u32 delta_lists_per_chapter;
+ /* Mean delta for chapter indexes */
+ u32 chapter_mean_delta;
+ /* Number of bits needed for record page numbers */
+ u8 chapter_payload_bits;
+ /* Number of bits used to compute addresses for chapter delta lists */
+ u8 chapter_address_bits;
+ /* Number of densely-indexed chapters in a volume */
+ u32 dense_chapters_per_volume;
+};
+
+enum {
+ /* The number of bytes in a record (name + metadata) */
+ BYTES_PER_RECORD = (UDS_RECORD_NAME_SIZE + UDS_RECORD_DATA_SIZE),
+
+ /* The default length of a page in a chapter, in bytes */
+ DEFAULT_BYTES_PER_PAGE = 1024 * BYTES_PER_RECORD,
+
+ /* The default maximum number of records per page */
+ DEFAULT_RECORDS_PER_PAGE = DEFAULT_BYTES_PER_PAGE / BYTES_PER_RECORD,
+
+ /* The default number of record pages in a chapter */
+ DEFAULT_RECORD_PAGES_PER_CHAPTER = 256,
+
+ /* The default number of record pages in a chapter for a small index */
+ SMALL_RECORD_PAGES_PER_CHAPTER = 64,
+
+ /* The default number of chapters in a volume */
+ DEFAULT_CHAPTERS_PER_VOLUME = 1024,
+
+ /* The default number of sparsely-indexed chapters in a volume */
+ DEFAULT_SPARSE_CHAPTERS_PER_VOLUME = 0,
+
+ /* The log2 of the default mean delta */
+ DEFAULT_CHAPTER_MEAN_DELTA_BITS = 16,
+
+ /* The log2 of the number of delta lists in a large chapter */
+ DEFAULT_CHAPTER_DELTA_LIST_BITS = 12,
+
+ /* The log2 of the number of delta lists in a small chapter */
+ SMALL_CHAPTER_DELTA_LIST_BITS = 10,
+
+ /* The number of header pages per volume */
+ HEADER_PAGES_PER_VOLUME = 1,
+};
+
+int __must_check uds_make_index_geometry(size_t bytes_per_page, u32 record_pages_per_chapter,
+ u32 chapters_per_volume,
+ u32 sparse_chapters_per_volume, u64 remapped_virtual,
+ u64 remapped_physical,
+ struct index_geometry **geometry_ptr);
+
+int __must_check uds_copy_index_geometry(struct index_geometry *source,
+ struct index_geometry **geometry_ptr);
+
+void uds_free_index_geometry(struct index_geometry *geometry);
+
+u32 __must_check uds_map_to_physical_chapter(const struct index_geometry *geometry,
+ u64 virtual_chapter);
+
+/*
+ * Check whether this geometry is reduced by a chapter. This will only be true if the volume was
+ * converted from a non-lvm volume to an lvm volume.
+ */
+static inline bool __must_check
+uds_is_reduced_index_geometry(const struct index_geometry *geometry)
+{
+ return !!(geometry->chapters_per_volume & 1);
+}
+
+static inline bool __must_check
+uds_is_sparse_index_geometry(const struct index_geometry *geometry)
+{
+ return geometry->sparse_chapters_per_volume > 0;
+}
+
+bool __must_check uds_has_sparse_chapters(const struct index_geometry *geometry,
+ u64 oldest_virtual_chapter,
+ u64 newest_virtual_chapter);
+
+bool __must_check uds_is_chapter_sparse(const struct index_geometry *geometry,
+ u64 oldest_virtual_chapter,
+ u64 newest_virtual_chapter,
+ u64 virtual_chapter_number);
+
+u32 __must_check uds_chapters_to_expire(const struct index_geometry *geometry,
+ u64 newest_chapter);
+
+#endif /* UDS_INDEX_GEOMETRY_H */
diff --git a/drivers/md/dm-vdo/indexer/hash-utils.h b/drivers/md/dm-vdo/indexer/hash-utils.h
new file mode 100644
index 000000000000..6a8dd8ffea6c
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/hash-utils.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_HASH_UTILS_H
+#define UDS_HASH_UTILS_H
+
+#include "numeric.h"
+
+#include "geometry.h"
+#include "indexer.h"
+
+/* Utilities for extracting portions of a request name for various uses. */
+
+/* How various portions of a record name are apportioned. */
+enum {
+ VOLUME_INDEX_BYTES_OFFSET = 0,
+ VOLUME_INDEX_BYTES_COUNT = 8,
+ CHAPTER_INDEX_BYTES_OFFSET = 8,
+ CHAPTER_INDEX_BYTES_COUNT = 6,
+ SAMPLE_BYTES_OFFSET = 14,
+ SAMPLE_BYTES_COUNT = 2,
+};
+
+static inline u64 uds_extract_chapter_index_bytes(const struct uds_record_name *name)
+{
+ const u8 *chapter_bits = &name->name[CHAPTER_INDEX_BYTES_OFFSET];
+ u64 bytes = (u64) get_unaligned_be16(chapter_bits) << 32;
+
+ bytes |= get_unaligned_be32(chapter_bits + 2);
+ return bytes;
+}
+
+static inline u64 uds_extract_volume_index_bytes(const struct uds_record_name *name)
+{
+ return get_unaligned_be64(&name->name[VOLUME_INDEX_BYTES_OFFSET]);
+}
+
+static inline u32 uds_extract_sampling_bytes(const struct uds_record_name *name)
+{
+ return get_unaligned_be16(&name->name[SAMPLE_BYTES_OFFSET]);
+}
+
+/* Compute the chapter delta list for a given name. */
+static inline u32 uds_hash_to_chapter_delta_list(const struct uds_record_name *name,
+ const struct index_geometry *geometry)
+{
+ return ((uds_extract_chapter_index_bytes(name) >> geometry->chapter_address_bits) &
+ ((1 << geometry->chapter_delta_list_bits) - 1));
+}
+
+/* Compute the chapter delta address for a given name. */
+static inline u32 uds_hash_to_chapter_delta_address(const struct uds_record_name *name,
+ const struct index_geometry *geometry)
+{
+ return uds_extract_chapter_index_bytes(name) & ((1 << geometry->chapter_address_bits) - 1);
+}
+
+static inline unsigned int uds_name_to_hash_slot(const struct uds_record_name *name,
+ unsigned int slot_count)
+{
+ return (unsigned int) (uds_extract_chapter_index_bytes(name) % slot_count);
+}
+
+#endif /* UDS_HASH_UTILS_H */
diff --git a/drivers/md/dm-vdo/indexer/index-layout.c b/drivers/md/dm-vdo/indexer/index-layout.c
new file mode 100644
index 000000000000..627adc24af3b
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index-layout.c
@@ -0,0 +1,1765 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "index-layout.h"
+
+#include <linux/random.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "murmurhash3.h"
+#include "numeric.h"
+#include "time-utils.h"
+
+#include "config.h"
+#include "open-chapter.h"
+#include "volume-index.h"
+
+/*
+ * The UDS layout on storage media is divided into a number of fixed-size regions, the sizes of
+ * which are computed when the index is created. Every header and region begins on 4K block
+ * boundary. Save regions are further sub-divided into regions of their own.
+ *
+ * Each region has a kind and an instance number. Some kinds only have one instance and therefore
+ * use RL_SOLE_INSTANCE (-1) as the instance number. The RL_KIND_INDEX used to use instances to
+ * represent sub-indices; now, however there is only ever one sub-index and therefore one instance.
+ * The RL_KIND_VOLUME_INDEX uses instances to record which zone is being saved.
+ *
+ * Every region header has a type and version.
+ *
+ * +-+-+---------+--------+--------+-+
+ * | | | I N D E X 0 101, 0 | |
+ * |H|C+---------+--------+--------+S|
+ * |D|f| Volume | Save | Save |e|
+ * |R|g| Region | Region | Region |a|
+ * | | | 201, -1 | 202, 0 | 202, 1 |l|
+ * +-+-+--------+---------+--------+-+
+ *
+ * The header contains the encoded region layout table as well as some index configuration data.
+ * The sub-index region and its subdivisions are maintained in the same table.
+ *
+ * There are two save regions to preserve the old state in case saving the new state is incomplete.
+ * They are used in alternation. Each save region is further divided into sub-regions.
+ *
+ * +-+-----+------+------+-----+-----+
+ * |H| IPM | MI | MI | | OC |
+ * |D| | zone | zone | ... | |
+ * |R| 301 | 302 | 302 | | 303 |
+ * | | -1 | 0 | 1 | | -1 |
+ * +-+-----+------+------+-----+-----+
+ *
+ * The header contains the encoded region layout table as well as index state data for that save.
+ * Each save also has a unique nonce.
+ */
+
+#define MAGIC_SIZE 32
+#define NONCE_INFO_SIZE 32
+#define MAX_SAVES 2
+
+enum region_kind {
+ RL_KIND_EMPTY = 0,
+ RL_KIND_HEADER = 1,
+ RL_KIND_CONFIG = 100,
+ RL_KIND_INDEX = 101,
+ RL_KIND_SEAL = 102,
+ RL_KIND_VOLUME = 201,
+ RL_KIND_SAVE = 202,
+ RL_KIND_INDEX_PAGE_MAP = 301,
+ RL_KIND_VOLUME_INDEX = 302,
+ RL_KIND_OPEN_CHAPTER = 303,
+};
+
+/* Some region types are historical and are no longer used. */
+enum region_type {
+ RH_TYPE_FREE = 0, /* unused */
+ RH_TYPE_SUPER = 1,
+ RH_TYPE_SAVE = 2,
+ RH_TYPE_CHECKPOINT = 3, /* unused */
+ RH_TYPE_UNSAVED = 4,
+};
+
+#define RL_SOLE_INSTANCE 65535
+
+/*
+ * Super block version 2 is the first released version.
+ *
+ * Super block version 3 is the normal version used from RHEL 8.2 onwards.
+ *
+ * Super block versions 4 through 6 were incremental development versions and
+ * are not supported.
+ *
+ * Super block version 7 is used for volumes which have been reduced in size by one chapter in
+ * order to make room to prepend LVM metadata to a volume originally created without lvm. This
+ * allows the index to retain most its deduplication records.
+ */
+#define SUPER_VERSION_MINIMUM 3
+#define SUPER_VERSION_CURRENT 3
+#define SUPER_VERSION_MAXIMUM 7
+
+static const u8 LAYOUT_MAGIC[MAGIC_SIZE] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*";
+static const u64 REGION_MAGIC = 0x416c6252676e3031; /* 'AlbRgn01' */
+
+struct region_header {
+ u64 magic;
+ u64 region_blocks;
+ u16 type;
+ /* Currently always version 1 */
+ u16 version;
+ u16 region_count;
+ u16 payload;
+};
+
+struct layout_region {
+ u64 start_block;
+ u64 block_count;
+ u32 __unused;
+ u16 kind;
+ u16 instance;
+};
+
+struct region_table {
+ size_t encoded_size;
+ struct region_header header;
+ struct layout_region regions[];
+};
+
+struct index_save_data {
+ u64 timestamp;
+ u64 nonce;
+ /* Currently always version 1 */
+ u32 version;
+ u32 unused__;
+};
+
+struct index_state_version {
+ s32 signature;
+ s32 version_id;
+};
+
+static const struct index_state_version INDEX_STATE_VERSION_301 = {
+ .signature = -1,
+ .version_id = 301,
+};
+
+struct index_state_data301 {
+ struct index_state_version version;
+ u64 newest_chapter;
+ u64 oldest_chapter;
+ u64 last_save;
+ u32 unused;
+ u32 padding;
+};
+
+struct index_save_layout {
+ unsigned int zone_count;
+ struct layout_region index_save;
+ struct layout_region header;
+ struct layout_region index_page_map;
+ struct layout_region free_space;
+ struct layout_region volume_index_zones[MAX_ZONES];
+ struct layout_region open_chapter;
+ struct index_save_data save_data;
+ struct index_state_data301 state_data;
+};
+
+struct sub_index_layout {
+ u64 nonce;
+ struct layout_region sub_index;
+ struct layout_region volume;
+ struct index_save_layout *saves;
+};
+
+struct super_block_data {
+ u8 magic_label[MAGIC_SIZE];
+ u8 nonce_info[NONCE_INFO_SIZE];
+ u64 nonce;
+ u32 version;
+ u32 block_size;
+ u16 index_count;
+ u16 max_saves;
+ /* Padding reflects a blank field on permanent storage */
+ u8 padding[4];
+ u64 open_chapter_blocks;
+ u64 page_map_blocks;
+ u64 volume_offset;
+ u64 start_offset;
+};
+
+struct index_layout {
+ struct io_factory *factory;
+ size_t factory_size;
+ off_t offset;
+ struct super_block_data super;
+ struct layout_region header;
+ struct layout_region config;
+ struct sub_index_layout index;
+ struct layout_region seal;
+ u64 total_blocks;
+};
+
+struct save_layout_sizes {
+ unsigned int save_count;
+ size_t block_size;
+ u64 volume_blocks;
+ u64 volume_index_blocks;
+ u64 page_map_blocks;
+ u64 open_chapter_blocks;
+ u64 save_blocks;
+ u64 sub_index_blocks;
+ u64 total_blocks;
+ size_t total_size;
+};
+
+static inline bool is_converted_super_block(struct super_block_data *super)
+{
+ return super->version == 7;
+}
+
+static int __must_check compute_sizes(const struct uds_configuration *config,
+ struct save_layout_sizes *sls)
+{
+ int result;
+ struct index_geometry *geometry = config->geometry;
+
+ memset(sls, 0, sizeof(*sls));
+ sls->save_count = MAX_SAVES;
+ sls->block_size = UDS_BLOCK_SIZE;
+ sls->volume_blocks = geometry->bytes_per_volume / sls->block_size;
+
+ result = uds_compute_volume_index_save_blocks(config, sls->block_size,
+ &sls->volume_index_blocks);
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "cannot compute index save size");
+
+ sls->page_map_blocks =
+ DIV_ROUND_UP(uds_compute_index_page_map_save_size(geometry),
+ sls->block_size);
+ sls->open_chapter_blocks =
+ DIV_ROUND_UP(uds_compute_saved_open_chapter_size(geometry),
+ sls->block_size);
+ sls->save_blocks =
+ 1 + (sls->volume_index_blocks + sls->page_map_blocks + sls->open_chapter_blocks);
+ sls->sub_index_blocks = sls->volume_blocks + (sls->save_count * sls->save_blocks);
+ sls->total_blocks = 3 + sls->sub_index_blocks;
+ sls->total_size = sls->total_blocks * sls->block_size;
+
+ return UDS_SUCCESS;
+}
+
+int uds_compute_index_size(const struct uds_parameters *parameters, u64 *index_size)
+{
+ int result;
+ struct uds_configuration *index_config;
+ struct save_layout_sizes sizes;
+
+ if (index_size == NULL) {
+ vdo_log_error("Missing output size pointer");
+ return -EINVAL;
+ }
+
+ result = uds_make_configuration(parameters, &index_config);
+ if (result != UDS_SUCCESS) {
+ vdo_log_error_strerror(result, "cannot compute index size");
+ return uds_status_to_errno(result);
+ }
+
+ result = compute_sizes(index_config, &sizes);
+ uds_free_configuration(index_config);
+ if (result != UDS_SUCCESS)
+ return uds_status_to_errno(result);
+
+ *index_size = sizes.total_size;
+ return UDS_SUCCESS;
+}
+
+/* Create unique data using the current time and a pseudorandom number. */
+static void create_unique_nonce_data(u8 *buffer)
+{
+ ktime_t now = current_time_ns(CLOCK_REALTIME);
+ u32 rand;
+ size_t offset = 0;
+
+ get_random_bytes(&rand, sizeof(u32));
+ memcpy(buffer + offset, &now, sizeof(now));
+ offset += sizeof(now);
+ memcpy(buffer + offset, &rand, sizeof(rand));
+ offset += sizeof(rand);
+ while (offset < NONCE_INFO_SIZE) {
+ size_t len = min(NONCE_INFO_SIZE - offset, offset);
+
+ memcpy(buffer + offset, buffer, len);
+ offset += len;
+ }
+}
+
+static u64 hash_stuff(u64 start, const void *data, size_t len)
+{
+ u32 seed = start ^ (start >> 27);
+ u8 hash_buffer[16];
+
+ murmurhash3_128(data, len, seed, hash_buffer);
+ return get_unaligned_le64(hash_buffer + 4);
+}
+
+/* Generate a primary nonce from the provided data. */
+static u64 generate_primary_nonce(const void *data, size_t len)
+{
+ return hash_stuff(0xa1b1e0fc, data, len);
+}
+
+/*
+ * Deterministically generate a secondary nonce from an existing nonce and some arbitrary data by
+ * hashing the original nonce and the data to produce a new nonce.
+ */
+static u64 generate_secondary_nonce(u64 nonce, const void *data, size_t len)
+{
+ return hash_stuff(nonce + 1, data, len);
+}
+
+static int __must_check open_layout_reader(struct index_layout *layout,
+ struct layout_region *lr, off_t offset,
+ struct buffered_reader **reader_ptr)
+{
+ return uds_make_buffered_reader(layout->factory, lr->start_block + offset,
+ lr->block_count, reader_ptr);
+}
+
+static int open_region_reader(struct index_layout *layout, struct layout_region *region,
+ struct buffered_reader **reader_ptr)
+{
+ return open_layout_reader(layout, region, -layout->super.start_offset,
+ reader_ptr);
+}
+
+static int __must_check open_layout_writer(struct index_layout *layout,
+ struct layout_region *lr, off_t offset,
+ struct buffered_writer **writer_ptr)
+{
+ return uds_make_buffered_writer(layout->factory, lr->start_block + offset,
+ lr->block_count, writer_ptr);
+}
+
+static int open_region_writer(struct index_layout *layout, struct layout_region *region,
+ struct buffered_writer **writer_ptr)
+{
+ return open_layout_writer(layout, region, -layout->super.start_offset,
+ writer_ptr);
+}
+
+static void generate_super_block_data(struct save_layout_sizes *sls,
+ struct super_block_data *super)
+{
+ memset(super, 0, sizeof(*super));
+ memcpy(super->magic_label, LAYOUT_MAGIC, MAGIC_SIZE);
+ create_unique_nonce_data(super->nonce_info);
+
+ super->nonce = generate_primary_nonce(super->nonce_info,
+ sizeof(super->nonce_info));
+ super->version = SUPER_VERSION_CURRENT;
+ super->block_size = sls->block_size;
+ super->index_count = 1;
+ super->max_saves = sls->save_count;
+ super->open_chapter_blocks = sls->open_chapter_blocks;
+ super->page_map_blocks = sls->page_map_blocks;
+ super->volume_offset = 0;
+ super->start_offset = 0;
+}
+
+static void define_sub_index_nonce(struct index_layout *layout)
+{
+ struct sub_index_nonce_data {
+ u64 offset;
+ u16 index_id;
+ };
+ struct sub_index_layout *sil = &layout->index;
+ u64 primary_nonce = layout->super.nonce;
+ u8 buffer[sizeof(struct sub_index_nonce_data)] = { 0 };
+ size_t offset = 0;
+
+ encode_u64_le(buffer, &offset, sil->sub_index.start_block);
+ encode_u16_le(buffer, &offset, 0);
+ sil->nonce = generate_secondary_nonce(primary_nonce, buffer, sizeof(buffer));
+ if (sil->nonce == 0) {
+ sil->nonce = generate_secondary_nonce(~primary_nonce + 1, buffer,
+ sizeof(buffer));
+ }
+}
+
+static void setup_sub_index(struct index_layout *layout, u64 start_block,
+ struct save_layout_sizes *sls)
+{
+ struct sub_index_layout *sil = &layout->index;
+ u64 next_block = start_block;
+ unsigned int i;
+
+ sil->sub_index = (struct layout_region) {
+ .start_block = start_block,
+ .block_count = sls->sub_index_blocks,
+ .kind = RL_KIND_INDEX,
+ .instance = 0,
+ };
+
+ sil->volume = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = sls->volume_blocks,
+ .kind = RL_KIND_VOLUME,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ next_block += sls->volume_blocks;
+
+ for (i = 0; i < sls->save_count; i++) {
+ sil->saves[i].index_save = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = sls->save_blocks,
+ .kind = RL_KIND_SAVE,
+ .instance = i,
+ };
+
+ next_block += sls->save_blocks;
+ }
+
+ define_sub_index_nonce(layout);
+}
+
+static void initialize_layout(struct index_layout *layout, struct save_layout_sizes *sls)
+{
+ u64 next_block = layout->offset / sls->block_size;
+
+ layout->total_blocks = sls->total_blocks;
+ generate_super_block_data(sls, &layout->super);
+ layout->header = (struct layout_region) {
+ .start_block = next_block++,
+ .block_count = 1,
+ .kind = RL_KIND_HEADER,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ layout->config = (struct layout_region) {
+ .start_block = next_block++,
+ .block_count = 1,
+ .kind = RL_KIND_CONFIG,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ setup_sub_index(layout, next_block, sls);
+ next_block += sls->sub_index_blocks;
+
+ layout->seal = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = 1,
+ .kind = RL_KIND_SEAL,
+ .instance = RL_SOLE_INSTANCE,
+ };
+}
+
+static int __must_check make_index_save_region_table(struct index_save_layout *isl,
+ struct region_table **table_ptr)
+{
+ int result;
+ unsigned int z;
+ struct region_table *table;
+ struct layout_region *lr;
+ u16 region_count;
+ size_t payload;
+ size_t type;
+
+ if (isl->zone_count > 0) {
+ /*
+ * Normal save regions: header, page map, volume index zones,
+ * open chapter, and possibly free space.
+ */
+ region_count = 3 + isl->zone_count;
+ if (isl->free_space.block_count > 0)
+ region_count++;
+
+ payload = sizeof(isl->save_data) + sizeof(isl->state_data);
+ type = RH_TYPE_SAVE;
+ } else {
+ /* Empty save regions: header, page map, free space. */
+ region_count = 3;
+ payload = sizeof(isl->save_data);
+ type = RH_TYPE_UNSAVED;
+ }
+
+ result = vdo_allocate_extended(struct region_table, region_count,
+ struct layout_region,
+ "layout region table for ISL", &table);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ lr = &table->regions[0];
+ *lr++ = isl->header;
+ *lr++ = isl->index_page_map;
+ for (z = 0; z < isl->zone_count; z++)
+ *lr++ = isl->volume_index_zones[z];
+
+ if (isl->zone_count > 0)
+ *lr++ = isl->open_chapter;
+
+ if (isl->free_space.block_count > 0)
+ *lr++ = isl->free_space;
+
+ table->header = (struct region_header) {
+ .magic = REGION_MAGIC,
+ .region_blocks = isl->index_save.block_count,
+ .type = type,
+ .version = 1,
+ .region_count = region_count,
+ .payload = payload,
+ };
+
+ table->encoded_size = (sizeof(struct region_header) + payload +
+ region_count * sizeof(struct layout_region));
+ *table_ptr = table;
+ return UDS_SUCCESS;
+}
+
+static void encode_region_table(u8 *buffer, size_t *offset, struct region_table *table)
+{
+ unsigned int i;
+
+ encode_u64_le(buffer, offset, REGION_MAGIC);
+ encode_u64_le(buffer, offset, table->header.region_blocks);
+ encode_u16_le(buffer, offset, table->header.type);
+ encode_u16_le(buffer, offset, table->header.version);
+ encode_u16_le(buffer, offset, table->header.region_count);
+ encode_u16_le(buffer, offset, table->header.payload);
+
+ for (i = 0; i < table->header.region_count; i++) {
+ encode_u64_le(buffer, offset, table->regions[i].start_block);
+ encode_u64_le(buffer, offset, table->regions[i].block_count);
+ encode_u32_le(buffer, offset, 0);
+ encode_u16_le(buffer, offset, table->regions[i].kind);
+ encode_u16_le(buffer, offset, table->regions[i].instance);
+ }
+}
+
+static int __must_check write_index_save_header(struct index_save_layout *isl,
+ struct region_table *table,
+ struct buffered_writer *writer)
+{
+ int result;
+ u8 *buffer;
+ size_t offset = 0;
+
+ result = vdo_allocate(table->encoded_size, u8, "index save data", &buffer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ encode_region_table(buffer, &offset, table);
+ encode_u64_le(buffer, &offset, isl->save_data.timestamp);
+ encode_u64_le(buffer, &offset, isl->save_data.nonce);
+ encode_u32_le(buffer, &offset, isl->save_data.version);
+ encode_u32_le(buffer, &offset, 0);
+ if (isl->zone_count > 0) {
+ encode_u32_le(buffer, &offset, INDEX_STATE_VERSION_301.signature);
+ encode_u32_le(buffer, &offset, INDEX_STATE_VERSION_301.version_id);
+ encode_u64_le(buffer, &offset, isl->state_data.newest_chapter);
+ encode_u64_le(buffer, &offset, isl->state_data.oldest_chapter);
+ encode_u64_le(buffer, &offset, isl->state_data.last_save);
+ encode_u64_le(buffer, &offset, 0);
+ }
+
+ result = uds_write_to_buffered_writer(writer, buffer, offset);
+ vdo_free(buffer);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return uds_flush_buffered_writer(writer);
+}
+
+static int write_index_save_layout(struct index_layout *layout,
+ struct index_save_layout *isl)
+{
+ int result;
+ struct region_table *table;
+ struct buffered_writer *writer;
+
+ result = make_index_save_region_table(isl, &table);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = open_region_writer(layout, &isl->header, &writer);
+ if (result != UDS_SUCCESS) {
+ vdo_free(table);
+ return result;
+ }
+
+ result = write_index_save_header(isl, table, writer);
+ vdo_free(table);
+ uds_free_buffered_writer(writer);
+
+ return result;
+}
+
+static void reset_index_save_layout(struct index_save_layout *isl, u64 page_map_blocks)
+{
+ u64 free_blocks;
+ u64 next_block = isl->index_save.start_block;
+
+ isl->zone_count = 0;
+ memset(&isl->save_data, 0, sizeof(isl->save_data));
+
+ isl->header = (struct layout_region) {
+ .start_block = next_block++,
+ .block_count = 1,
+ .kind = RL_KIND_HEADER,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ isl->index_page_map = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = page_map_blocks,
+ .kind = RL_KIND_INDEX_PAGE_MAP,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ next_block += page_map_blocks;
+
+ free_blocks = isl->index_save.block_count - page_map_blocks - 1;
+ isl->free_space = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = free_blocks,
+ .kind = RL_KIND_EMPTY,
+ .instance = RL_SOLE_INSTANCE,
+ };
+}
+
+static int __must_check invalidate_old_save(struct index_layout *layout,
+ struct index_save_layout *isl)
+{
+ reset_index_save_layout(isl, layout->super.page_map_blocks);
+ return write_index_save_layout(layout, isl);
+}
+
+static int discard_index_state_data(struct index_layout *layout)
+{
+ int result;
+ int saved_result = UDS_SUCCESS;
+ unsigned int i;
+
+ for (i = 0; i < layout->super.max_saves; i++) {
+ result = invalidate_old_save(layout, &layout->index.saves[i]);
+ if (result != UDS_SUCCESS)
+ saved_result = result;
+ }
+
+ if (saved_result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result,
+ "%s: cannot destroy all index saves",
+ __func__);
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check make_layout_region_table(struct index_layout *layout,
+ struct region_table **table_ptr)
+{
+ int result;
+ unsigned int i;
+ /* Regions: header, config, index, volume, saves, seal */
+ u16 region_count = 5 + layout->super.max_saves;
+ u16 payload;
+ struct region_table *table;
+ struct layout_region *lr;
+
+ result = vdo_allocate_extended(struct region_table, region_count,
+ struct layout_region, "layout region table",
+ &table);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ lr = &table->regions[0];
+ *lr++ = layout->header;
+ *lr++ = layout->config;
+ *lr++ = layout->index.sub_index;
+ *lr++ = layout->index.volume;
+
+ for (i = 0; i < layout->super.max_saves; i++)
+ *lr++ = layout->index.saves[i].index_save;
+
+ *lr++ = layout->seal;
+
+ if (is_converted_super_block(&layout->super)) {
+ payload = sizeof(struct super_block_data);
+ } else {
+ payload = (sizeof(struct super_block_data) -
+ sizeof(layout->super.volume_offset) -
+ sizeof(layout->super.start_offset));
+ }
+
+ table->header = (struct region_header) {
+ .magic = REGION_MAGIC,
+ .region_blocks = layout->total_blocks,
+ .type = RH_TYPE_SUPER,
+ .version = 1,
+ .region_count = region_count,
+ .payload = payload,
+ };
+
+ table->encoded_size = (sizeof(struct region_header) + payload +
+ region_count * sizeof(struct layout_region));
+ *table_ptr = table;
+ return UDS_SUCCESS;
+}
+
+static int __must_check write_layout_header(struct index_layout *layout,
+ struct region_table *table,
+ struct buffered_writer *writer)
+{
+ int result;
+ u8 *buffer;
+ size_t offset = 0;
+
+ result = vdo_allocate(table->encoded_size, u8, "layout data", &buffer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ encode_region_table(buffer, &offset, table);
+ memcpy(buffer + offset, &layout->super.magic_label, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ memcpy(buffer + offset, &layout->super.nonce_info, NONCE_INFO_SIZE);
+ offset += NONCE_INFO_SIZE;
+ encode_u64_le(buffer, &offset, layout->super.nonce);
+ encode_u32_le(buffer, &offset, layout->super.version);
+ encode_u32_le(buffer, &offset, layout->super.block_size);
+ encode_u16_le(buffer, &offset, layout->super.index_count);
+ encode_u16_le(buffer, &offset, layout->super.max_saves);
+ encode_u32_le(buffer, &offset, 0);
+ encode_u64_le(buffer, &offset, layout->super.open_chapter_blocks);
+ encode_u64_le(buffer, &offset, layout->super.page_map_blocks);
+
+ if (is_converted_super_block(&layout->super)) {
+ encode_u64_le(buffer, &offset, layout->super.volume_offset);
+ encode_u64_le(buffer, &offset, layout->super.start_offset);
+ }
+
+ result = uds_write_to_buffered_writer(writer, buffer, offset);
+ vdo_free(buffer);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return uds_flush_buffered_writer(writer);
+}
+
+static int __must_check write_uds_index_config(struct index_layout *layout,
+ struct uds_configuration *config,
+ off_t offset)
+{
+ int result;
+ struct buffered_writer *writer = NULL;
+
+ result = open_layout_writer(layout, &layout->config, offset, &writer);
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "failed to open config region");
+
+ result = uds_write_config_contents(writer, config, layout->super.version);
+ if (result != UDS_SUCCESS) {
+ uds_free_buffered_writer(writer);
+ return vdo_log_error_strerror(result, "failed to write config region");
+ }
+
+ result = uds_flush_buffered_writer(writer);
+ if (result != UDS_SUCCESS) {
+ uds_free_buffered_writer(writer);
+ return vdo_log_error_strerror(result, "cannot flush config writer");
+ }
+
+ uds_free_buffered_writer(writer);
+ return UDS_SUCCESS;
+}
+
+static int __must_check save_layout(struct index_layout *layout, off_t offset)
+{
+ int result;
+ struct buffered_writer *writer = NULL;
+ struct region_table *table;
+
+ result = make_layout_region_table(layout, &table);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = open_layout_writer(layout, &layout->header, offset, &writer);
+ if (result != UDS_SUCCESS) {
+ vdo_free(table);
+ return result;
+ }
+
+ result = write_layout_header(layout, table, writer);
+ vdo_free(table);
+ uds_free_buffered_writer(writer);
+
+ return result;
+}
+
+static int create_index_layout(struct index_layout *layout, struct uds_configuration *config)
+{
+ int result;
+ struct save_layout_sizes sizes;
+
+ result = compute_sizes(config, &sizes);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = vdo_allocate(sizes.save_count, struct index_save_layout, __func__,
+ &layout->index.saves);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ initialize_layout(layout, &sizes);
+
+ result = discard_index_state_data(layout);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = write_uds_index_config(layout, config, 0);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return save_layout(layout, 0);
+}
+
+static u64 generate_index_save_nonce(u64 volume_nonce, struct index_save_layout *isl)
+{
+ struct save_nonce_data {
+ struct index_save_data data;
+ u64 offset;
+ } nonce_data;
+ u8 buffer[sizeof(nonce_data)];
+ size_t offset = 0;
+
+ encode_u64_le(buffer, &offset, isl->save_data.timestamp);
+ encode_u64_le(buffer, &offset, 0);
+ encode_u32_le(buffer, &offset, isl->save_data.version);
+ encode_u32_le(buffer, &offset, 0U);
+ encode_u64_le(buffer, &offset, isl->index_save.start_block);
+ VDO_ASSERT_LOG_ONLY(offset == sizeof(nonce_data),
+ "%zu bytes encoded of %zu expected",
+ offset, sizeof(nonce_data));
+ return generate_secondary_nonce(volume_nonce, buffer, sizeof(buffer));
+}
+
+static u64 validate_index_save_layout(struct index_save_layout *isl, u64 volume_nonce)
+{
+ if ((isl->zone_count == 0) || (isl->save_data.timestamp == 0))
+ return 0;
+
+ if (isl->save_data.nonce != generate_index_save_nonce(volume_nonce, isl))
+ return 0;
+
+ return isl->save_data.timestamp;
+}
+
+static int find_latest_uds_index_save_slot(struct index_layout *layout,
+ struct index_save_layout **isl_ptr)
+{
+ struct index_save_layout *latest = NULL;
+ struct index_save_layout *isl;
+ unsigned int i;
+ u64 save_time = 0;
+ u64 latest_time = 0;
+
+ for (i = 0; i < layout->super.max_saves; i++) {
+ isl = &layout->index.saves[i];
+ save_time = validate_index_save_layout(isl, layout->index.nonce);
+ if (save_time > latest_time) {
+ latest = isl;
+ latest_time = save_time;
+ }
+ }
+
+ if (latest == NULL) {
+ vdo_log_error("No valid index save found");
+ return UDS_INDEX_NOT_SAVED_CLEANLY;
+ }
+
+ *isl_ptr = latest;
+ return UDS_SUCCESS;
+}
+
+int uds_discard_open_chapter(struct index_layout *layout)
+{
+ int result;
+ struct index_save_layout *isl;
+ struct buffered_writer *writer;
+
+ result = find_latest_uds_index_save_slot(layout, &isl);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = open_region_writer(layout, &isl->open_chapter, &writer);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_write_to_buffered_writer(writer, NULL, UDS_BLOCK_SIZE);
+ if (result != UDS_SUCCESS) {
+ uds_free_buffered_writer(writer);
+ return result;
+ }
+
+ result = uds_flush_buffered_writer(writer);
+ uds_free_buffered_writer(writer);
+ return result;
+}
+
+int uds_load_index_state(struct index_layout *layout, struct uds_index *index)
+{
+ int result;
+ unsigned int zone;
+ struct index_save_layout *isl;
+ struct buffered_reader *readers[MAX_ZONES];
+
+ result = find_latest_uds_index_save_slot(layout, &isl);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ index->newest_virtual_chapter = isl->state_data.newest_chapter;
+ index->oldest_virtual_chapter = isl->state_data.oldest_chapter;
+ index->last_save = isl->state_data.last_save;
+
+ result = open_region_reader(layout, &isl->open_chapter, &readers[0]);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_load_open_chapter(index, readers[0]);
+ uds_free_buffered_reader(readers[0]);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ for (zone = 0; zone < isl->zone_count; zone++) {
+ result = open_region_reader(layout, &isl->volume_index_zones[zone],
+ &readers[zone]);
+ if (result != UDS_SUCCESS) {
+ for (; zone > 0; zone--)
+ uds_free_buffered_reader(readers[zone - 1]);
+
+ return result;
+ }
+ }
+
+ result = uds_load_volume_index(index->volume_index, readers, isl->zone_count);
+ for (zone = 0; zone < isl->zone_count; zone++)
+ uds_free_buffered_reader(readers[zone]);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = open_region_reader(layout, &isl->index_page_map, &readers[0]);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_read_index_page_map(index->volume->index_page_map, readers[0]);
+ uds_free_buffered_reader(readers[0]);
+
+ return result;
+}
+
+static struct index_save_layout *select_oldest_index_save_layout(struct index_layout *layout)
+{
+ struct index_save_layout *oldest = NULL;
+ struct index_save_layout *isl;
+ unsigned int i;
+ u64 save_time = 0;
+ u64 oldest_time = 0;
+
+ for (i = 0; i < layout->super.max_saves; i++) {
+ isl = &layout->index.saves[i];
+ save_time = validate_index_save_layout(isl, layout->index.nonce);
+ if (oldest == NULL || save_time < oldest_time) {
+ oldest = isl;
+ oldest_time = save_time;
+ }
+ }
+
+ return oldest;
+}
+
+static void instantiate_index_save_layout(struct index_save_layout *isl,
+ struct super_block_data *super,
+ u64 volume_nonce, unsigned int zone_count)
+{
+ unsigned int z;
+ u64 next_block;
+ u64 free_blocks;
+ u64 volume_index_blocks;
+
+ isl->zone_count = zone_count;
+ memset(&isl->save_data, 0, sizeof(isl->save_data));
+ isl->save_data.timestamp = ktime_to_ms(current_time_ns(CLOCK_REALTIME));
+ isl->save_data.version = 1;
+ isl->save_data.nonce = generate_index_save_nonce(volume_nonce, isl);
+
+ next_block = isl->index_save.start_block;
+ isl->header = (struct layout_region) {
+ .start_block = next_block++,
+ .block_count = 1,
+ .kind = RL_KIND_HEADER,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ isl->index_page_map = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = super->page_map_blocks,
+ .kind = RL_KIND_INDEX_PAGE_MAP,
+ .instance = RL_SOLE_INSTANCE,
+ };
+ next_block += super->page_map_blocks;
+
+ free_blocks = (isl->index_save.block_count - 1 -
+ super->page_map_blocks -
+ super->open_chapter_blocks);
+ volume_index_blocks = free_blocks / isl->zone_count;
+ for (z = 0; z < isl->zone_count; z++) {
+ isl->volume_index_zones[z] = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = volume_index_blocks,
+ .kind = RL_KIND_VOLUME_INDEX,
+ .instance = z,
+ };
+
+ next_block += volume_index_blocks;
+ free_blocks -= volume_index_blocks;
+ }
+
+ isl->open_chapter = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = super->open_chapter_blocks,
+ .kind = RL_KIND_OPEN_CHAPTER,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ next_block += super->open_chapter_blocks;
+
+ isl->free_space = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = free_blocks,
+ .kind = RL_KIND_EMPTY,
+ .instance = RL_SOLE_INSTANCE,
+ };
+}
+
+static int setup_uds_index_save_slot(struct index_layout *layout,
+ unsigned int zone_count,
+ struct index_save_layout **isl_ptr)
+{
+ int result;
+ struct index_save_layout *isl;
+
+ isl = select_oldest_index_save_layout(layout);
+ result = invalidate_old_save(layout, isl);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ instantiate_index_save_layout(isl, &layout->super, layout->index.nonce,
+ zone_count);
+
+ *isl_ptr = isl;
+ return UDS_SUCCESS;
+}
+
+static void cancel_uds_index_save(struct index_save_layout *isl)
+{
+ memset(&isl->save_data, 0, sizeof(isl->save_data));
+ memset(&isl->state_data, 0, sizeof(isl->state_data));
+ isl->zone_count = 0;
+}
+
+int uds_save_index_state(struct index_layout *layout, struct uds_index *index)
+{
+ int result;
+ unsigned int zone;
+ struct index_save_layout *isl;
+ struct buffered_writer *writers[MAX_ZONES];
+
+ result = setup_uds_index_save_slot(layout, index->zone_count, &isl);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ isl->state_data = (struct index_state_data301) {
+ .newest_chapter = index->newest_virtual_chapter,
+ .oldest_chapter = index->oldest_virtual_chapter,
+ .last_save = index->last_save,
+ };
+
+ result = open_region_writer(layout, &isl->open_chapter, &writers[0]);
+ if (result != UDS_SUCCESS) {
+ cancel_uds_index_save(isl);
+ return result;
+ }
+
+ result = uds_save_open_chapter(index, writers[0]);
+ uds_free_buffered_writer(writers[0]);
+ if (result != UDS_SUCCESS) {
+ cancel_uds_index_save(isl);
+ return result;
+ }
+
+ for (zone = 0; zone < index->zone_count; zone++) {
+ result = open_region_writer(layout, &isl->volume_index_zones[zone],
+ &writers[zone]);
+ if (result != UDS_SUCCESS) {
+ for (; zone > 0; zone--)
+ uds_free_buffered_writer(writers[zone - 1]);
+
+ cancel_uds_index_save(isl);
+ return result;
+ }
+ }
+
+ result = uds_save_volume_index(index->volume_index, writers, index->zone_count);
+ for (zone = 0; zone < index->zone_count; zone++)
+ uds_free_buffered_writer(writers[zone]);
+ if (result != UDS_SUCCESS) {
+ cancel_uds_index_save(isl);
+ return result;
+ }
+
+ result = open_region_writer(layout, &isl->index_page_map, &writers[0]);
+ if (result != UDS_SUCCESS) {
+ cancel_uds_index_save(isl);
+ return result;
+ }
+
+ result = uds_write_index_page_map(index->volume->index_page_map, writers[0]);
+ uds_free_buffered_writer(writers[0]);
+ if (result != UDS_SUCCESS) {
+ cancel_uds_index_save(isl);
+ return result;
+ }
+
+ return write_index_save_layout(layout, isl);
+}
+
+static int __must_check load_region_table(struct buffered_reader *reader,
+ struct region_table **table_ptr)
+{
+ int result;
+ unsigned int i;
+ struct region_header header;
+ struct region_table *table;
+ u8 buffer[sizeof(struct region_header)];
+ size_t offset = 0;
+
+ result = uds_read_from_buffered_reader(reader, buffer, sizeof(buffer));
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "cannot read region table header");
+
+ decode_u64_le(buffer, &offset, &header.magic);
+ decode_u64_le(buffer, &offset, &header.region_blocks);
+ decode_u16_le(buffer, &offset, &header.type);
+ decode_u16_le(buffer, &offset, &header.version);
+ decode_u16_le(buffer, &offset, &header.region_count);
+ decode_u16_le(buffer, &offset, &header.payload);
+
+ if (header.magic != REGION_MAGIC)
+ return UDS_NO_INDEX;
+
+ if (header.version != 1) {
+ return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION,
+ "unknown region table version %hu",
+ header.version);
+ }
+
+ result = vdo_allocate_extended(struct region_table, header.region_count,
+ struct layout_region,
+ "single file layout region table", &table);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ table->header = header;
+ for (i = 0; i < header.region_count; i++) {
+ u8 region_buffer[sizeof(struct layout_region)];
+
+ offset = 0;
+ result = uds_read_from_buffered_reader(reader, region_buffer,
+ sizeof(region_buffer));
+ if (result != UDS_SUCCESS) {
+ vdo_free(table);
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "cannot read region table layouts");
+ }
+
+ decode_u64_le(region_buffer, &offset, &table->regions[i].start_block);
+ decode_u64_le(region_buffer, &offset, &table->regions[i].block_count);
+ offset += sizeof(u32);
+ decode_u16_le(region_buffer, &offset, &table->regions[i].kind);
+ decode_u16_le(region_buffer, &offset, &table->regions[i].instance);
+ }
+
+ *table_ptr = table;
+ return UDS_SUCCESS;
+}
+
+static int __must_check read_super_block_data(struct buffered_reader *reader,
+ struct index_layout *layout,
+ size_t saved_size)
+{
+ int result;
+ struct super_block_data *super = &layout->super;
+ u8 *buffer;
+ size_t offset = 0;
+
+ result = vdo_allocate(saved_size, u8, "super block data", &buffer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_read_from_buffered_reader(reader, buffer, saved_size);
+ if (result != UDS_SUCCESS) {
+ vdo_free(buffer);
+ return vdo_log_error_strerror(result, "cannot read region table header");
+ }
+
+ memcpy(&super->magic_label, buffer, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ memcpy(&super->nonce_info, buffer + offset, NONCE_INFO_SIZE);
+ offset += NONCE_INFO_SIZE;
+ decode_u64_le(buffer, &offset, &super->nonce);
+ decode_u32_le(buffer, &offset, &super->version);
+ decode_u32_le(buffer, &offset, &super->block_size);
+ decode_u16_le(buffer, &offset, &super->index_count);
+ decode_u16_le(buffer, &offset, &super->max_saves);
+ offset += sizeof(u32);
+ decode_u64_le(buffer, &offset, &super->open_chapter_blocks);
+ decode_u64_le(buffer, &offset, &super->page_map_blocks);
+
+ if (is_converted_super_block(super)) {
+ decode_u64_le(buffer, &offset, &super->volume_offset);
+ decode_u64_le(buffer, &offset, &super->start_offset);
+ } else {
+ super->volume_offset = 0;
+ super->start_offset = 0;
+ }
+
+ vdo_free(buffer);
+
+ if (memcmp(super->magic_label, LAYOUT_MAGIC, MAGIC_SIZE) != 0)
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "unknown superblock magic label");
+
+ if ((super->version < SUPER_VERSION_MINIMUM) ||
+ (super->version == 4) || (super->version == 5) || (super->version == 6) ||
+ (super->version > SUPER_VERSION_MAXIMUM)) {
+ return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION,
+ "unknown superblock version number %u",
+ super->version);
+ }
+
+ if (super->volume_offset < super->start_offset) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "inconsistent offsets (start %llu, volume %llu)",
+ (unsigned long long) super->start_offset,
+ (unsigned long long) super->volume_offset);
+ }
+
+ /* Sub-indexes are no longer used but the layout retains this field. */
+ if (super->index_count != 1) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "invalid subindex count %u",
+ super->index_count);
+ }
+
+ if (generate_primary_nonce(super->nonce_info, sizeof(super->nonce_info)) != super->nonce) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "inconsistent superblock nonce");
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check verify_region(struct layout_region *lr, u64 start_block,
+ enum region_kind kind, unsigned int instance)
+{
+ if (lr->start_block != start_block)
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "incorrect layout region offset");
+
+ if (lr->kind != kind)
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "incorrect layout region kind");
+
+ if (lr->instance != instance) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "incorrect layout region instance");
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check verify_sub_index(struct index_layout *layout, u64 start_block,
+ struct region_table *table)
+{
+ int result;
+ unsigned int i;
+ struct sub_index_layout *sil = &layout->index;
+ u64 next_block = start_block;
+
+ sil->sub_index = table->regions[2];
+ result = verify_region(&sil->sub_index, next_block, RL_KIND_INDEX, 0);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ define_sub_index_nonce(layout);
+
+ sil->volume = table->regions[3];
+ result = verify_region(&sil->volume, next_block, RL_KIND_VOLUME,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += sil->volume.block_count + layout->super.volume_offset;
+
+ for (i = 0; i < layout->super.max_saves; i++) {
+ sil->saves[i].index_save = table->regions[i + 4];
+ result = verify_region(&sil->saves[i].index_save, next_block,
+ RL_KIND_SAVE, i);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += sil->saves[i].index_save.block_count;
+ }
+
+ next_block -= layout->super.volume_offset;
+ if (next_block != start_block + sil->sub_index.block_count) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "sub index region does not span all saves");
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check reconstitute_layout(struct index_layout *layout,
+ struct region_table *table, u64 first_block)
+{
+ int result;
+ u64 next_block = first_block;
+
+ result = vdo_allocate(layout->super.max_saves, struct index_save_layout,
+ __func__, &layout->index.saves);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ layout->total_blocks = table->header.region_blocks;
+
+ layout->header = table->regions[0];
+ result = verify_region(&layout->header, next_block++, RL_KIND_HEADER,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ layout->config = table->regions[1];
+ result = verify_region(&layout->config, next_block++, RL_KIND_CONFIG,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = verify_sub_index(layout, next_block, table);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += layout->index.sub_index.block_count;
+
+ layout->seal = table->regions[table->header.region_count - 1];
+ result = verify_region(&layout->seal, next_block + layout->super.volume_offset,
+ RL_KIND_SEAL, RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (++next_block != (first_block + layout->total_blocks)) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "layout table does not span total blocks");
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check load_super_block(struct index_layout *layout, size_t block_size,
+ u64 first_block, struct buffered_reader *reader)
+{
+ int result;
+ struct region_table *table = NULL;
+ struct super_block_data *super = &layout->super;
+
+ result = load_region_table(reader, &table);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (table->header.type != RH_TYPE_SUPER) {
+ vdo_free(table);
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "not a superblock region table");
+ }
+
+ result = read_super_block_data(reader, layout, table->header.payload);
+ if (result != UDS_SUCCESS) {
+ vdo_free(table);
+ return vdo_log_error_strerror(result, "unknown superblock format");
+ }
+
+ if (super->block_size != block_size) {
+ vdo_free(table);
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "superblock saved block_size %u differs from supplied block_size %zu",
+ super->block_size, block_size);
+ }
+
+ first_block -= (super->volume_offset - super->start_offset);
+ result = reconstitute_layout(layout, table, first_block);
+ vdo_free(table);
+ return result;
+}
+
+static int __must_check read_index_save_data(struct buffered_reader *reader,
+ struct index_save_layout *isl,
+ size_t saved_size)
+{
+ int result;
+ struct index_state_version file_version;
+ u8 buffer[sizeof(struct index_save_data) + sizeof(struct index_state_data301)];
+ size_t offset = 0;
+
+ if (saved_size != sizeof(buffer)) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "unexpected index save data size %zu",
+ saved_size);
+ }
+
+ result = uds_read_from_buffered_reader(reader, buffer, sizeof(buffer));
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "cannot read index save data");
+
+ decode_u64_le(buffer, &offset, &isl->save_data.timestamp);
+ decode_u64_le(buffer, &offset, &isl->save_data.nonce);
+ decode_u32_le(buffer, &offset, &isl->save_data.version);
+ offset += sizeof(u32);
+
+ if (isl->save_data.version > 1) {
+ return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION,
+ "unknown index save version number %u",
+ isl->save_data.version);
+ }
+
+ decode_s32_le(buffer, &offset, &file_version.signature);
+ decode_s32_le(buffer, &offset, &file_version.version_id);
+
+ if ((file_version.signature != INDEX_STATE_VERSION_301.signature) ||
+ (file_version.version_id != INDEX_STATE_VERSION_301.version_id)) {
+ return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION,
+ "index state version %d,%d is unsupported",
+ file_version.signature,
+ file_version.version_id);
+ }
+
+ decode_u64_le(buffer, &offset, &isl->state_data.newest_chapter);
+ decode_u64_le(buffer, &offset, &isl->state_data.oldest_chapter);
+ decode_u64_le(buffer, &offset, &isl->state_data.last_save);
+ /* Skip past some historical fields that are now unused */
+ offset += sizeof(u32) + sizeof(u32);
+ return UDS_SUCCESS;
+}
+
+static int __must_check reconstruct_index_save(struct index_save_layout *isl,
+ struct region_table *table)
+{
+ int result;
+ unsigned int z;
+ struct layout_region *last_region;
+ u64 next_block = isl->index_save.start_block;
+ u64 last_block = next_block + isl->index_save.block_count;
+
+ isl->zone_count = table->header.region_count - 3;
+
+ last_region = &table->regions[table->header.region_count - 1];
+ if (last_region->kind == RL_KIND_EMPTY) {
+ isl->free_space = *last_region;
+ isl->zone_count--;
+ } else {
+ isl->free_space = (struct layout_region) {
+ .start_block = last_block,
+ .block_count = 0,
+ .kind = RL_KIND_EMPTY,
+ .instance = RL_SOLE_INSTANCE,
+ };
+ }
+
+ isl->header = table->regions[0];
+ result = verify_region(&isl->header, next_block++, RL_KIND_HEADER,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ isl->index_page_map = table->regions[1];
+ result = verify_region(&isl->index_page_map, next_block, RL_KIND_INDEX_PAGE_MAP,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += isl->index_page_map.block_count;
+
+ for (z = 0; z < isl->zone_count; z++) {
+ isl->volume_index_zones[z] = table->regions[z + 2];
+ result = verify_region(&isl->volume_index_zones[z], next_block,
+ RL_KIND_VOLUME_INDEX, z);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += isl->volume_index_zones[z].block_count;
+ }
+
+ isl->open_chapter = table->regions[isl->zone_count + 2];
+ result = verify_region(&isl->open_chapter, next_block, RL_KIND_OPEN_CHAPTER,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += isl->open_chapter.block_count;
+
+ result = verify_region(&isl->free_space, next_block, RL_KIND_EMPTY,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += isl->free_space.block_count;
+ if (next_block != last_block) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "index save layout table incomplete");
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check load_index_save(struct index_save_layout *isl,
+ struct buffered_reader *reader,
+ unsigned int instance)
+{
+ int result;
+ struct region_table *table = NULL;
+
+ result = load_region_table(reader, &table);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result, "cannot read index save %u header",
+ instance);
+ }
+
+ if (table->header.region_blocks != isl->index_save.block_count) {
+ u64 region_blocks = table->header.region_blocks;
+
+ vdo_free(table);
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "unexpected index save %u region block count %llu",
+ instance,
+ (unsigned long long) region_blocks);
+ }
+
+ if (table->header.type == RH_TYPE_UNSAVED) {
+ vdo_free(table);
+ reset_index_save_layout(isl, 0);
+ return UDS_SUCCESS;
+ }
+
+
+ if (table->header.type != RH_TYPE_SAVE) {
+ vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "unexpected index save %u header type %u",
+ instance, table->header.type);
+ vdo_free(table);
+ return UDS_CORRUPT_DATA;
+ }
+
+ result = read_index_save_data(reader, isl, table->header.payload);
+ if (result != UDS_SUCCESS) {
+ vdo_free(table);
+ return vdo_log_error_strerror(result,
+ "unknown index save %u data format",
+ instance);
+ }
+
+ result = reconstruct_index_save(isl, table);
+ vdo_free(table);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result, "cannot reconstruct index save %u",
+ instance);
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check load_sub_index_regions(struct index_layout *layout)
+{
+ int result;
+ unsigned int j;
+ struct index_save_layout *isl;
+ struct buffered_reader *reader;
+
+ for (j = 0; j < layout->super.max_saves; j++) {
+ isl = &layout->index.saves[j];
+ result = open_region_reader(layout, &isl->index_save, &reader);
+
+ if (result != UDS_SUCCESS) {
+ vdo_log_error_strerror(result,
+ "cannot get reader for index 0 save %u",
+ j);
+ return result;
+ }
+
+ result = load_index_save(isl, reader, j);
+ uds_free_buffered_reader(reader);
+ if (result != UDS_SUCCESS) {
+ /* Another save slot might be valid. */
+ reset_index_save_layout(isl, 0);
+ continue;
+ }
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check verify_uds_index_config(struct index_layout *layout,
+ struct uds_configuration *config)
+{
+ int result;
+ struct buffered_reader *reader = NULL;
+ u64 offset;
+
+ offset = layout->super.volume_offset - layout->super.start_offset;
+ result = open_layout_reader(layout, &layout->config, offset, &reader);
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "failed to open config reader");
+
+ result = uds_validate_config_contents(reader, config);
+ if (result != UDS_SUCCESS) {
+ uds_free_buffered_reader(reader);
+ return vdo_log_error_strerror(result, "failed to read config region");
+ }
+
+ uds_free_buffered_reader(reader);
+ return UDS_SUCCESS;
+}
+
+static int load_index_layout(struct index_layout *layout, struct uds_configuration *config)
+{
+ int result;
+ struct buffered_reader *reader;
+
+ result = uds_make_buffered_reader(layout->factory,
+ layout->offset / UDS_BLOCK_SIZE, 1, &reader);
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "unable to read superblock");
+
+ result = load_super_block(layout, UDS_BLOCK_SIZE,
+ layout->offset / UDS_BLOCK_SIZE, reader);
+ uds_free_buffered_reader(reader);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = verify_uds_index_config(layout, config);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return load_sub_index_regions(layout);
+}
+
+static int create_layout_factory(struct index_layout *layout,
+ const struct uds_configuration *config)
+{
+ int result;
+ size_t writable_size;
+ struct io_factory *factory = NULL;
+
+ result = uds_make_io_factory(config->bdev, &factory);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ writable_size = uds_get_writable_size(factory) & -UDS_BLOCK_SIZE;
+ if (writable_size < config->size + config->offset) {
+ uds_put_io_factory(factory);
+ vdo_log_error("index storage (%zu) is smaller than the requested size %zu",
+ writable_size, config->size + config->offset);
+ return -ENOSPC;
+ }
+
+ layout->factory = factory;
+ layout->factory_size = (config->size > 0) ? config->size : writable_size;
+ layout->offset = config->offset;
+ return UDS_SUCCESS;
+}
+
+int uds_make_index_layout(struct uds_configuration *config, bool new_layout,
+ struct index_layout **layout_ptr)
+{
+ int result;
+ struct index_layout *layout = NULL;
+ struct save_layout_sizes sizes;
+
+ result = compute_sizes(config, &sizes);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = vdo_allocate(1, struct index_layout, __func__, &layout);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = create_layout_factory(layout, config);
+ if (result != UDS_SUCCESS) {
+ uds_free_index_layout(layout);
+ return result;
+ }
+
+ if (layout->factory_size < sizes.total_size) {
+ vdo_log_error("index storage (%zu) is smaller than the required size %llu",
+ layout->factory_size,
+ (unsigned long long) sizes.total_size);
+ uds_free_index_layout(layout);
+ return -ENOSPC;
+ }
+
+ if (new_layout)
+ result = create_index_layout(layout, config);
+ else
+ result = load_index_layout(layout, config);
+ if (result != UDS_SUCCESS) {
+ uds_free_index_layout(layout);
+ return result;
+ }
+
+ *layout_ptr = layout;
+ return UDS_SUCCESS;
+}
+
+void uds_free_index_layout(struct index_layout *layout)
+{
+ if (layout == NULL)
+ return;
+
+ vdo_free(layout->index.saves);
+ if (layout->factory != NULL)
+ uds_put_io_factory(layout->factory);
+
+ vdo_free(layout);
+}
+
+int uds_replace_index_layout_storage(struct index_layout *layout,
+ struct block_device *bdev)
+{
+ return uds_replace_storage(layout->factory, bdev);
+}
+
+/* Obtain a dm_bufio_client for the volume region. */
+int uds_open_volume_bufio(struct index_layout *layout, size_t block_size,
+ unsigned int reserved_buffers,
+ struct dm_bufio_client **client_ptr)
+{
+ off_t offset = (layout->index.volume.start_block +
+ layout->super.volume_offset -
+ layout->super.start_offset);
+
+ return uds_make_bufio(layout->factory, offset, block_size, reserved_buffers,
+ client_ptr);
+}
+
+u64 uds_get_volume_nonce(struct index_layout *layout)
+{
+ return layout->index.nonce;
+}
diff --git a/drivers/md/dm-vdo/indexer/index-layout.h b/drivers/md/dm-vdo/indexer/index-layout.h
new file mode 100644
index 000000000000..e9ac6f4302d6
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index-layout.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_INDEX_LAYOUT_H
+#define UDS_INDEX_LAYOUT_H
+
+#include "config.h"
+#include "indexer.h"
+#include "io-factory.h"
+
+/*
+ * The index layout describes the format of the index on the underlying storage, and is responsible
+ * for creating those structures when the index is first created. It also validates the index data
+ * when loading a saved index, and updates it when saving the index.
+ */
+
+struct index_layout;
+
+int __must_check uds_make_index_layout(struct uds_configuration *config, bool new_layout,
+ struct index_layout **layout_ptr);
+
+void uds_free_index_layout(struct index_layout *layout);
+
+int __must_check uds_replace_index_layout_storage(struct index_layout *layout,
+ struct block_device *bdev);
+
+int __must_check uds_load_index_state(struct index_layout *layout,
+ struct uds_index *index);
+
+int __must_check uds_save_index_state(struct index_layout *layout,
+ struct uds_index *index);
+
+int __must_check uds_discard_open_chapter(struct index_layout *layout);
+
+u64 __must_check uds_get_volume_nonce(struct index_layout *layout);
+
+int __must_check uds_open_volume_bufio(struct index_layout *layout, size_t block_size,
+ unsigned int reserved_buffers,
+ struct dm_bufio_client **client_ptr);
+
+#endif /* UDS_INDEX_LAYOUT_H */
diff --git a/drivers/md/dm-vdo/indexer/index-page-map.c b/drivers/md/dm-vdo/indexer/index-page-map.c
new file mode 100644
index 000000000000..00b44e07d0c1
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index-page-map.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "index-page-map.h"
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+#include "string-utils.h"
+#include "thread-utils.h"
+
+#include "hash-utils.h"
+#include "indexer.h"
+
+/*
+ * The index page map is conceptually a two-dimensional array indexed by chapter number and index
+ * page number within the chapter. Each entry contains the number of the last delta list on that
+ * index page. In order to save memory, the information for the last page in each chapter is not
+ * recorded, as it is known from the geometry.
+ */
+
+static const u8 PAGE_MAP_MAGIC[] = "ALBIPM02";
+
+#define PAGE_MAP_MAGIC_LENGTH (sizeof(PAGE_MAP_MAGIC) - 1)
+
+static inline u32 get_entry_count(const struct index_geometry *geometry)
+{
+ return geometry->chapters_per_volume * (geometry->index_pages_per_chapter - 1);
+}
+
+int uds_make_index_page_map(const struct index_geometry *geometry,
+ struct index_page_map **map_ptr)
+{
+ int result;
+ struct index_page_map *map;
+
+ result = vdo_allocate(1, struct index_page_map, "page map", &map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ map->geometry = geometry;
+ map->entries_per_chapter = geometry->index_pages_per_chapter - 1;
+ result = vdo_allocate(get_entry_count(geometry), u16, "Index Page Map Entries",
+ &map->entries);
+ if (result != VDO_SUCCESS) {
+ uds_free_index_page_map(map);
+ return result;
+ }
+
+ *map_ptr = map;
+ return UDS_SUCCESS;
+}
+
+void uds_free_index_page_map(struct index_page_map *map)
+{
+ if (map != NULL) {
+ vdo_free(map->entries);
+ vdo_free(map);
+ }
+}
+
+void uds_update_index_page_map(struct index_page_map *map, u64 virtual_chapter_number,
+ u32 chapter_number, u32 index_page_number,
+ u32 delta_list_number)
+{
+ size_t slot;
+
+ map->last_update = virtual_chapter_number;
+ if (index_page_number == map->entries_per_chapter)
+ return;
+
+ slot = (chapter_number * map->entries_per_chapter) + index_page_number;
+ map->entries[slot] = delta_list_number;
+}
+
+u32 uds_find_index_page_number(const struct index_page_map *map,
+ const struct uds_record_name *name, u32 chapter_number)
+{
+ u32 delta_list_number = uds_hash_to_chapter_delta_list(name, map->geometry);
+ u32 slot = chapter_number * map->entries_per_chapter;
+ u32 page;
+
+ for (page = 0; page < map->entries_per_chapter; page++) {
+ if (delta_list_number <= map->entries[slot + page])
+ break;
+ }
+
+ return page;
+}
+
+void uds_get_list_number_bounds(const struct index_page_map *map, u32 chapter_number,
+ u32 index_page_number, u32 *lowest_list,
+ u32 *highest_list)
+{
+ u32 slot = chapter_number * map->entries_per_chapter;
+
+ *lowest_list = ((index_page_number == 0) ?
+ 0 : map->entries[slot + index_page_number - 1] + 1);
+ *highest_list = ((index_page_number < map->entries_per_chapter) ?
+ map->entries[slot + index_page_number] :
+ map->geometry->delta_lists_per_chapter - 1);
+}
+
+u64 uds_compute_index_page_map_save_size(const struct index_geometry *geometry)
+{
+ return PAGE_MAP_MAGIC_LENGTH + sizeof(u64) + sizeof(u16) * get_entry_count(geometry);
+}
+
+int uds_write_index_page_map(struct index_page_map *map, struct buffered_writer *writer)
+{
+ int result;
+ u8 *buffer;
+ size_t offset = 0;
+ u64 saved_size = uds_compute_index_page_map_save_size(map->geometry);
+ u32 i;
+
+ result = vdo_allocate(saved_size, u8, "page map data", &buffer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ memcpy(buffer, PAGE_MAP_MAGIC, PAGE_MAP_MAGIC_LENGTH);
+ offset += PAGE_MAP_MAGIC_LENGTH;
+ encode_u64_le(buffer, &offset, map->last_update);
+ for (i = 0; i < get_entry_count(map->geometry); i++)
+ encode_u16_le(buffer, &offset, map->entries[i]);
+
+ result = uds_write_to_buffered_writer(writer, buffer, offset);
+ vdo_free(buffer);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return uds_flush_buffered_writer(writer);
+}
+
+int uds_read_index_page_map(struct index_page_map *map, struct buffered_reader *reader)
+{
+ int result;
+ u8 magic[PAGE_MAP_MAGIC_LENGTH];
+ u8 *buffer;
+ size_t offset = 0;
+ u64 saved_size = uds_compute_index_page_map_save_size(map->geometry);
+ u32 i;
+
+ result = vdo_allocate(saved_size, u8, "page map data", &buffer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_read_from_buffered_reader(reader, buffer, saved_size);
+ if (result != UDS_SUCCESS) {
+ vdo_free(buffer);
+ return result;
+ }
+
+ memcpy(&magic, buffer, PAGE_MAP_MAGIC_LENGTH);
+ offset += PAGE_MAP_MAGIC_LENGTH;
+ if (memcmp(magic, PAGE_MAP_MAGIC, PAGE_MAP_MAGIC_LENGTH) != 0) {
+ vdo_free(buffer);
+ return UDS_CORRUPT_DATA;
+ }
+
+ decode_u64_le(buffer, &offset, &map->last_update);
+ for (i = 0; i < get_entry_count(map->geometry); i++)
+ decode_u16_le(buffer, &offset, &map->entries[i]);
+
+ vdo_free(buffer);
+ vdo_log_debug("read index page map, last update %llu",
+ (unsigned long long) map->last_update);
+ return UDS_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/indexer/index-page-map.h b/drivers/md/dm-vdo/indexer/index-page-map.h
new file mode 100644
index 000000000000..b327c0bb9656
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index-page-map.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_INDEX_PAGE_MAP_H
+#define UDS_INDEX_PAGE_MAP_H
+
+#include "geometry.h"
+#include "io-factory.h"
+
+/*
+ * The index maintains a page map which records how the chapter delta lists are distributed among
+ * the index pages for each chapter, allowing the volume to be efficient about reading only pages
+ * that it knows it will need.
+ */
+
+struct index_page_map {
+ const struct index_geometry *geometry;
+ u64 last_update;
+ u32 entries_per_chapter;
+ u16 *entries;
+};
+
+int __must_check uds_make_index_page_map(const struct index_geometry *geometry,
+ struct index_page_map **map_ptr);
+
+void uds_free_index_page_map(struct index_page_map *map);
+
+int __must_check uds_read_index_page_map(struct index_page_map *map,
+ struct buffered_reader *reader);
+
+int __must_check uds_write_index_page_map(struct index_page_map *map,
+ struct buffered_writer *writer);
+
+void uds_update_index_page_map(struct index_page_map *map, u64 virtual_chapter_number,
+ u32 chapter_number, u32 index_page_number,
+ u32 delta_list_number);
+
+u32 __must_check uds_find_index_page_number(const struct index_page_map *map,
+ const struct uds_record_name *name,
+ u32 chapter_number);
+
+void uds_get_list_number_bounds(const struct index_page_map *map, u32 chapter_number,
+ u32 index_page_number, u32 *lowest_list,
+ u32 *highest_list);
+
+u64 uds_compute_index_page_map_save_size(const struct index_geometry *geometry);
+
+#endif /* UDS_INDEX_PAGE_MAP_H */
diff --git a/drivers/md/dm-vdo/indexer/index-session.c b/drivers/md/dm-vdo/indexer/index-session.c
new file mode 100644
index 000000000000..aee0914d604a
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index-session.c
@@ -0,0 +1,739 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "index-session.h"
+
+#include <linux/atomic.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "time-utils.h"
+
+#include "funnel-requestqueue.h"
+#include "index.h"
+#include "index-layout.h"
+
+/*
+ * The index session contains a lock (the request_mutex) which ensures that only one thread can
+ * change the state of its index at a time. The state field indicates the current state of the
+ * index through a set of descriptive flags. The request_mutex must be notified whenever a
+ * non-transient state flag is cleared. The request_mutex is also used to count the number of
+ * requests currently in progress so that they can be drained when suspending or closing the index.
+ *
+ * If the index session is suspended shortly after opening an index, it may have to suspend during
+ * a rebuild. Depending on the size of the index, a rebuild may take a significant amount of time,
+ * so UDS allows the rebuild to be paused in order to suspend the session in a timely manner. When
+ * the index session is resumed, the rebuild can continue from where it left off. If the index
+ * session is shut down with a suspended rebuild, the rebuild progress is abandoned and the rebuild
+ * will start from the beginning the next time the index is loaded. The mutex and status fields in
+ * the index_load_context are used to record the state of any interrupted rebuild.
+ */
+
+enum index_session_flag_bit {
+ IS_FLAG_BIT_START = 8,
+ /* The session has started loading an index but not completed it. */
+ IS_FLAG_BIT_LOADING = IS_FLAG_BIT_START,
+ /* The session has loaded an index, which can handle requests. */
+ IS_FLAG_BIT_LOADED,
+ /* The session's index has been permanently disabled. */
+ IS_FLAG_BIT_DISABLED,
+ /* The session's index is suspended. */
+ IS_FLAG_BIT_SUSPENDED,
+ /* The session is handling some index state change. */
+ IS_FLAG_BIT_WAITING,
+ /* The session's index is closing and draining requests. */
+ IS_FLAG_BIT_CLOSING,
+ /* The session is being destroyed and is draining requests. */
+ IS_FLAG_BIT_DESTROYING,
+};
+
+enum index_session_flag {
+ IS_FLAG_LOADED = (1 << IS_FLAG_BIT_LOADED),
+ IS_FLAG_LOADING = (1 << IS_FLAG_BIT_LOADING),
+ IS_FLAG_DISABLED = (1 << IS_FLAG_BIT_DISABLED),
+ IS_FLAG_SUSPENDED = (1 << IS_FLAG_BIT_SUSPENDED),
+ IS_FLAG_WAITING = (1 << IS_FLAG_BIT_WAITING),
+ IS_FLAG_CLOSING = (1 << IS_FLAG_BIT_CLOSING),
+ IS_FLAG_DESTROYING = (1 << IS_FLAG_BIT_DESTROYING),
+};
+
+/* Release a reference to an index session. */
+static void release_index_session(struct uds_index_session *index_session)
+{
+ mutex_lock(&index_session->request_mutex);
+ if (--index_session->request_count == 0)
+ uds_broadcast_cond(&index_session->request_cond);
+ mutex_unlock(&index_session->request_mutex);
+}
+
+/*
+ * Acquire a reference to the index session for an asynchronous index request. The reference must
+ * eventually be released with a corresponding call to release_index_session().
+ */
+static int get_index_session(struct uds_index_session *index_session)
+{
+ unsigned int state;
+ int result = UDS_SUCCESS;
+
+ mutex_lock(&index_session->request_mutex);
+ index_session->request_count++;
+ state = index_session->state;
+ mutex_unlock(&index_session->request_mutex);
+
+ if (state == IS_FLAG_LOADED) {
+ return UDS_SUCCESS;
+ } else if (state & IS_FLAG_DISABLED) {
+ result = UDS_DISABLED;
+ } else if ((state & IS_FLAG_LOADING) ||
+ (state & IS_FLAG_SUSPENDED) ||
+ (state & IS_FLAG_WAITING)) {
+ result = -EBUSY;
+ } else {
+ result = UDS_NO_INDEX;
+ }
+
+ release_index_session(index_session);
+ return result;
+}
+
+int uds_launch_request(struct uds_request *request)
+{
+ size_t internal_size;
+ int result;
+
+ if (request->callback == NULL) {
+ vdo_log_error("missing required callback");
+ return -EINVAL;
+ }
+
+ switch (request->type) {
+ case UDS_DELETE:
+ case UDS_POST:
+ case UDS_QUERY:
+ case UDS_QUERY_NO_UPDATE:
+ case UDS_UPDATE:
+ break;
+ default:
+ vdo_log_error("received invalid callback type");
+ return -EINVAL;
+ }
+
+ /* Reset all internal fields before processing. */
+ internal_size =
+ sizeof(struct uds_request) - offsetof(struct uds_request, zone_number);
+ // FIXME should be using struct_group for this instead
+ memset((char *) request + sizeof(*request) - internal_size, 0, internal_size);
+
+ result = get_index_session(request->session);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ request->found = false;
+ request->unbatched = false;
+ request->index = request->session->index;
+
+ uds_enqueue_request(request, STAGE_TRIAGE);
+ return UDS_SUCCESS;
+}
+
+static void enter_callback_stage(struct uds_request *request)
+{
+ if (request->status != UDS_SUCCESS) {
+ /* All request errors are considered unrecoverable */
+ mutex_lock(&request->session->request_mutex);
+ request->session->state |= IS_FLAG_DISABLED;
+ mutex_unlock(&request->session->request_mutex);
+ }
+
+ uds_request_queue_enqueue(request->session->callback_queue, request);
+}
+
+static inline void count_once(u64 *count_ptr)
+{
+ WRITE_ONCE(*count_ptr, READ_ONCE(*count_ptr) + 1);
+}
+
+static void update_session_stats(struct uds_request *request)
+{
+ struct session_stats *session_stats = &request->session->stats;
+
+ count_once(&session_stats->requests);
+
+ switch (request->type) {
+ case UDS_POST:
+ if (request->found)
+ count_once(&session_stats->posts_found);
+ else
+ count_once(&session_stats->posts_not_found);
+
+ if (request->location == UDS_LOCATION_IN_OPEN_CHAPTER)
+ count_once(&session_stats->posts_found_open_chapter);
+ else if (request->location == UDS_LOCATION_IN_DENSE)
+ count_once(&session_stats->posts_found_dense);
+ else if (request->location == UDS_LOCATION_IN_SPARSE)
+ count_once(&session_stats->posts_found_sparse);
+ break;
+
+ case UDS_UPDATE:
+ if (request->found)
+ count_once(&session_stats->updates_found);
+ else
+ count_once(&session_stats->updates_not_found);
+ break;
+
+ case UDS_DELETE:
+ if (request->found)
+ count_once(&session_stats->deletions_found);
+ else
+ count_once(&session_stats->deletions_not_found);
+ break;
+
+ case UDS_QUERY:
+ case UDS_QUERY_NO_UPDATE:
+ if (request->found)
+ count_once(&session_stats->queries_found);
+ else
+ count_once(&session_stats->queries_not_found);
+ break;
+
+ default:
+ request->status = VDO_ASSERT(false, "unknown request type: %d",
+ request->type);
+ }
+}
+
+static void handle_callbacks(struct uds_request *request)
+{
+ struct uds_index_session *index_session = request->session;
+
+ if (request->status == UDS_SUCCESS)
+ update_session_stats(request);
+
+ request->status = uds_status_to_errno(request->status);
+ request->callback(request);
+ release_index_session(index_session);
+}
+
+static int __must_check make_empty_index_session(struct uds_index_session **index_session_ptr)
+{
+ int result;
+ struct uds_index_session *session;
+
+ result = vdo_allocate(1, struct uds_index_session, __func__, &session);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ mutex_init(&session->request_mutex);
+ uds_init_cond(&session->request_cond);
+ mutex_init(&session->load_context.mutex);
+ uds_init_cond(&session->load_context.cond);
+
+ result = uds_make_request_queue("callbackW", &handle_callbacks,
+ &session->callback_queue);
+ if (result != UDS_SUCCESS) {
+ vdo_free(session);
+ return result;
+ }
+
+ *index_session_ptr = session;
+ return UDS_SUCCESS;
+}
+
+int uds_create_index_session(struct uds_index_session **session)
+{
+ if (session == NULL) {
+ vdo_log_error("missing session pointer");
+ return -EINVAL;
+ }
+
+ return uds_status_to_errno(make_empty_index_session(session));
+}
+
+static int __must_check start_loading_index_session(struct uds_index_session *index_session)
+{
+ int result;
+
+ mutex_lock(&index_session->request_mutex);
+ if (index_session->state & IS_FLAG_SUSPENDED) {
+ vdo_log_info("Index session is suspended");
+ result = -EBUSY;
+ } else if (index_session->state != 0) {
+ vdo_log_info("Index is already loaded");
+ result = -EBUSY;
+ } else {
+ index_session->state |= IS_FLAG_LOADING;
+ result = UDS_SUCCESS;
+ }
+ mutex_unlock(&index_session->request_mutex);
+ return result;
+}
+
+static void finish_loading_index_session(struct uds_index_session *index_session,
+ int result)
+{
+ mutex_lock(&index_session->request_mutex);
+ index_session->state &= ~IS_FLAG_LOADING;
+ if (result == UDS_SUCCESS)
+ index_session->state |= IS_FLAG_LOADED;
+
+ uds_broadcast_cond(&index_session->request_cond);
+ mutex_unlock(&index_session->request_mutex);
+}
+
+static int initialize_index_session(struct uds_index_session *index_session,
+ enum uds_open_index_type open_type)
+{
+ int result;
+ struct uds_configuration *config;
+
+ result = uds_make_configuration(&index_session->parameters, &config);
+ if (result != UDS_SUCCESS) {
+ vdo_log_error_strerror(result, "Failed to allocate config");
+ return result;
+ }
+
+ memset(&index_session->stats, 0, sizeof(index_session->stats));
+ result = uds_make_index(config, open_type, &index_session->load_context,
+ enter_callback_stage, &index_session->index);
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "Failed to make index");
+ else
+ uds_log_configuration(config);
+
+ uds_free_configuration(config);
+ return result;
+}
+
+static const char *get_open_type_string(enum uds_open_index_type open_type)
+{
+ switch (open_type) {
+ case UDS_CREATE:
+ return "creating index";
+ case UDS_LOAD:
+ return "loading or rebuilding index";
+ case UDS_NO_REBUILD:
+ return "loading index";
+ default:
+ return "unknown open method";
+ }
+}
+
+/*
+ * Open an index under the given session. This operation will fail if the
+ * index session is suspended, or if there is already an open index.
+ */
+int uds_open_index(enum uds_open_index_type open_type,
+ const struct uds_parameters *parameters,
+ struct uds_index_session *session)
+{
+ int result;
+ char name[BDEVNAME_SIZE];
+
+ if (parameters == NULL) {
+ vdo_log_error("missing required parameters");
+ return -EINVAL;
+ }
+ if (parameters->bdev == NULL) {
+ vdo_log_error("missing required block device");
+ return -EINVAL;
+ }
+ if (session == NULL) {
+ vdo_log_error("missing required session pointer");
+ return -EINVAL;
+ }
+
+ result = start_loading_index_session(session);
+ if (result != UDS_SUCCESS)
+ return uds_status_to_errno(result);
+
+ session->parameters = *parameters;
+ format_dev_t(name, parameters->bdev->bd_dev);
+ vdo_log_info("%s: %s", get_open_type_string(open_type), name);
+
+ result = initialize_index_session(session, open_type);
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "Failed %s",
+ get_open_type_string(open_type));
+
+ finish_loading_index_session(session, result);
+ return uds_status_to_errno(result);
+}
+
+static void wait_for_no_requests_in_progress(struct uds_index_session *index_session)
+{
+ mutex_lock(&index_session->request_mutex);
+ while (index_session->request_count > 0) {
+ uds_wait_cond(&index_session->request_cond,
+ &index_session->request_mutex);
+ }
+ mutex_unlock(&index_session->request_mutex);
+}
+
+static int __must_check save_index(struct uds_index_session *index_session)
+{
+ wait_for_no_requests_in_progress(index_session);
+ return uds_save_index(index_session->index);
+}
+
+static void suspend_rebuild(struct uds_index_session *session)
+{
+ mutex_lock(&session->load_context.mutex);
+ switch (session->load_context.status) {
+ case INDEX_OPENING:
+ session->load_context.status = INDEX_SUSPENDING;
+
+ /* Wait until the index indicates that it is not replaying. */
+ while ((session->load_context.status != INDEX_SUSPENDED) &&
+ (session->load_context.status != INDEX_READY)) {
+ uds_wait_cond(&session->load_context.cond,
+ &session->load_context.mutex);
+ }
+
+ break;
+
+ case INDEX_READY:
+ /* Index load does not need to be suspended. */
+ break;
+
+ case INDEX_SUSPENDED:
+ case INDEX_SUSPENDING:
+ case INDEX_FREEING:
+ default:
+ /* These cases should not happen. */
+ VDO_ASSERT_LOG_ONLY(false, "Bad load context state %u",
+ session->load_context.status);
+ break;
+ }
+ mutex_unlock(&session->load_context.mutex);
+}
+
+/*
+ * Suspend index operation, draining all current index requests and preventing new index requests
+ * from starting. Optionally saves all index data before returning.
+ */
+int uds_suspend_index_session(struct uds_index_session *session, bool save)
+{
+ int result = UDS_SUCCESS;
+ bool no_work = false;
+ bool rebuilding = false;
+
+ /* Wait for any current index state change to complete. */
+ mutex_lock(&session->request_mutex);
+ while (session->state & IS_FLAG_CLOSING)
+ uds_wait_cond(&session->request_cond, &session->request_mutex);
+
+ if ((session->state & IS_FLAG_WAITING) || (session->state & IS_FLAG_DESTROYING)) {
+ no_work = true;
+ vdo_log_info("Index session is already changing state");
+ result = -EBUSY;
+ } else if (session->state & IS_FLAG_SUSPENDED) {
+ no_work = true;
+ } else if (session->state & IS_FLAG_LOADING) {
+ session->state |= IS_FLAG_WAITING;
+ rebuilding = true;
+ } else if (session->state & IS_FLAG_LOADED) {
+ session->state |= IS_FLAG_WAITING;
+ } else {
+ no_work = true;
+ session->state |= IS_FLAG_SUSPENDED;
+ uds_broadcast_cond(&session->request_cond);
+ }
+ mutex_unlock(&session->request_mutex);
+
+ if (no_work)
+ return uds_status_to_errno(result);
+
+ if (rebuilding)
+ suspend_rebuild(session);
+ else if (save)
+ result = save_index(session);
+ else
+ result = uds_flush_index_session(session);
+
+ mutex_lock(&session->request_mutex);
+ session->state &= ~IS_FLAG_WAITING;
+ session->state |= IS_FLAG_SUSPENDED;
+ uds_broadcast_cond(&session->request_cond);
+ mutex_unlock(&session->request_mutex);
+ return uds_status_to_errno(result);
+}
+
+static int replace_device(struct uds_index_session *session, struct block_device *bdev)
+{
+ int result;
+
+ result = uds_replace_index_storage(session->index, bdev);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ session->parameters.bdev = bdev;
+ return UDS_SUCCESS;
+}
+
+/*
+ * Resume index operation after being suspended. If the index is suspended and the supplied block
+ * device differs from the current backing store, the index will start using the new backing store.
+ */
+int uds_resume_index_session(struct uds_index_session *session,
+ struct block_device *bdev)
+{
+ int result = UDS_SUCCESS;
+ bool no_work = false;
+ bool resume_replay = false;
+
+ mutex_lock(&session->request_mutex);
+ if (session->state & IS_FLAG_WAITING) {
+ vdo_log_info("Index session is already changing state");
+ no_work = true;
+ result = -EBUSY;
+ } else if (!(session->state & IS_FLAG_SUSPENDED)) {
+ /* If not suspended, just succeed. */
+ no_work = true;
+ result = UDS_SUCCESS;
+ } else {
+ session->state |= IS_FLAG_WAITING;
+ if (session->state & IS_FLAG_LOADING)
+ resume_replay = true;
+ }
+ mutex_unlock(&session->request_mutex);
+
+ if (no_work)
+ return result;
+
+ if ((session->index != NULL) && (bdev != session->parameters.bdev)) {
+ result = replace_device(session, bdev);
+ if (result != UDS_SUCCESS) {
+ mutex_lock(&session->request_mutex);
+ session->state &= ~IS_FLAG_WAITING;
+ uds_broadcast_cond(&session->request_cond);
+ mutex_unlock(&session->request_mutex);
+ return uds_status_to_errno(result);
+ }
+ }
+
+ if (resume_replay) {
+ mutex_lock(&session->load_context.mutex);
+ switch (session->load_context.status) {
+ case INDEX_SUSPENDED:
+ session->load_context.status = INDEX_OPENING;
+ /* Notify the index to start replaying again. */
+ uds_broadcast_cond(&session->load_context.cond);
+ break;
+
+ case INDEX_READY:
+ /* There is no index rebuild to resume. */
+ break;
+
+ case INDEX_OPENING:
+ case INDEX_SUSPENDING:
+ case INDEX_FREEING:
+ default:
+ /* These cases should not happen; do nothing. */
+ VDO_ASSERT_LOG_ONLY(false, "Bad load context state %u",
+ session->load_context.status);
+ break;
+ }
+ mutex_unlock(&session->load_context.mutex);
+ }
+
+ mutex_lock(&session->request_mutex);
+ session->state &= ~IS_FLAG_WAITING;
+ session->state &= ~IS_FLAG_SUSPENDED;
+ uds_broadcast_cond(&session->request_cond);
+ mutex_unlock(&session->request_mutex);
+ return UDS_SUCCESS;
+}
+
+static int save_and_free_index(struct uds_index_session *index_session)
+{
+ int result = UDS_SUCCESS;
+ bool suspended;
+ struct uds_index *index = index_session->index;
+
+ if (index == NULL)
+ return UDS_SUCCESS;
+
+ mutex_lock(&index_session->request_mutex);
+ suspended = (index_session->state & IS_FLAG_SUSPENDED);
+ mutex_unlock(&index_session->request_mutex);
+
+ if (!suspended) {
+ result = uds_save_index(index);
+ if (result != UDS_SUCCESS)
+ vdo_log_warning_strerror(result,
+ "ignoring error from save_index");
+ }
+ uds_free_index(index);
+ index_session->index = NULL;
+
+ /*
+ * Reset all index state that happens to be in the index
+ * session, so it doesn't affect any future index.
+ */
+ mutex_lock(&index_session->load_context.mutex);
+ index_session->load_context.status = INDEX_OPENING;
+ mutex_unlock(&index_session->load_context.mutex);
+
+ mutex_lock(&index_session->request_mutex);
+ /* Only the suspend bit will remain relevant. */
+ index_session->state &= IS_FLAG_SUSPENDED;
+ mutex_unlock(&index_session->request_mutex);
+
+ return result;
+}
+
+/* Save and close the current index. */
+int uds_close_index(struct uds_index_session *index_session)
+{
+ int result = UDS_SUCCESS;
+
+ /* Wait for any current index state change to complete. */
+ mutex_lock(&index_session->request_mutex);
+ while ((index_session->state & IS_FLAG_WAITING) ||
+ (index_session->state & IS_FLAG_CLOSING)) {
+ uds_wait_cond(&index_session->request_cond,
+ &index_session->request_mutex);
+ }
+
+ if (index_session->state & IS_FLAG_SUSPENDED) {
+ vdo_log_info("Index session is suspended");
+ result = -EBUSY;
+ } else if ((index_session->state & IS_FLAG_DESTROYING) ||
+ !(index_session->state & IS_FLAG_LOADED)) {
+ /* The index doesn't exist, hasn't finished loading, or is being destroyed. */
+ result = UDS_NO_INDEX;
+ } else {
+ index_session->state |= IS_FLAG_CLOSING;
+ }
+ mutex_unlock(&index_session->request_mutex);
+ if (result != UDS_SUCCESS)
+ return uds_status_to_errno(result);
+
+ vdo_log_debug("Closing index");
+ wait_for_no_requests_in_progress(index_session);
+ result = save_and_free_index(index_session);
+ vdo_log_debug("Closed index");
+
+ mutex_lock(&index_session->request_mutex);
+ index_session->state &= ~IS_FLAG_CLOSING;
+ uds_broadcast_cond(&index_session->request_cond);
+ mutex_unlock(&index_session->request_mutex);
+ return uds_status_to_errno(result);
+}
+
+/* This will save and close an open index before destroying the session. */
+int uds_destroy_index_session(struct uds_index_session *index_session)
+{
+ int result;
+ bool load_pending = false;
+
+ vdo_log_debug("Destroying index session");
+
+ /* Wait for any current index state change to complete. */
+ mutex_lock(&index_session->request_mutex);
+ while ((index_session->state & IS_FLAG_WAITING) ||
+ (index_session->state & IS_FLAG_CLOSING)) {
+ uds_wait_cond(&index_session->request_cond,
+ &index_session->request_mutex);
+ }
+
+ if (index_session->state & IS_FLAG_DESTROYING) {
+ mutex_unlock(&index_session->request_mutex);
+ vdo_log_info("Index session is already closing");
+ return -EBUSY;
+ }
+
+ index_session->state |= IS_FLAG_DESTROYING;
+ load_pending = ((index_session->state & IS_FLAG_LOADING) &&
+ (index_session->state & IS_FLAG_SUSPENDED));
+ mutex_unlock(&index_session->request_mutex);
+
+ if (load_pending) {
+ /* Tell the index to terminate the rebuild. */
+ mutex_lock(&index_session->load_context.mutex);
+ if (index_session->load_context.status == INDEX_SUSPENDED) {
+ index_session->load_context.status = INDEX_FREEING;
+ uds_broadcast_cond(&index_session->load_context.cond);
+ }
+ mutex_unlock(&index_session->load_context.mutex);
+
+ /* Wait until the load exits before proceeding. */
+ mutex_lock(&index_session->request_mutex);
+ while (index_session->state & IS_FLAG_LOADING) {
+ uds_wait_cond(&index_session->request_cond,
+ &index_session->request_mutex);
+ }
+ mutex_unlock(&index_session->request_mutex);
+ }
+
+ wait_for_no_requests_in_progress(index_session);
+ result = save_and_free_index(index_session);
+ uds_request_queue_finish(index_session->callback_queue);
+ index_session->callback_queue = NULL;
+ vdo_log_debug("Destroyed index session");
+ vdo_free(index_session);
+ return uds_status_to_errno(result);
+}
+
+/* Wait until all callbacks for index operations are complete. */
+int uds_flush_index_session(struct uds_index_session *index_session)
+{
+ wait_for_no_requests_in_progress(index_session);
+ uds_wait_for_idle_index(index_session->index);
+ return UDS_SUCCESS;
+}
+
+/* Statistics collection is intended to be thread-safe. */
+static void collect_stats(const struct uds_index_session *index_session,
+ struct uds_index_stats *stats)
+{
+ const struct session_stats *session_stats = &index_session->stats;
+
+ stats->current_time = ktime_to_seconds(current_time_ns(CLOCK_REALTIME));
+ stats->posts_found = READ_ONCE(session_stats->posts_found);
+ stats->in_memory_posts_found = READ_ONCE(session_stats->posts_found_open_chapter);
+ stats->dense_posts_found = READ_ONCE(session_stats->posts_found_dense);
+ stats->sparse_posts_found = READ_ONCE(session_stats->posts_found_sparse);
+ stats->posts_not_found = READ_ONCE(session_stats->posts_not_found);
+ stats->updates_found = READ_ONCE(session_stats->updates_found);
+ stats->updates_not_found = READ_ONCE(session_stats->updates_not_found);
+ stats->deletions_found = READ_ONCE(session_stats->deletions_found);
+ stats->deletions_not_found = READ_ONCE(session_stats->deletions_not_found);
+ stats->queries_found = READ_ONCE(session_stats->queries_found);
+ stats->queries_not_found = READ_ONCE(session_stats->queries_not_found);
+ stats->requests = READ_ONCE(session_stats->requests);
+}
+
+int uds_get_index_session_stats(struct uds_index_session *index_session,
+ struct uds_index_stats *stats)
+{
+ if (stats == NULL) {
+ vdo_log_error("received a NULL index stats pointer");
+ return -EINVAL;
+ }
+
+ collect_stats(index_session, stats);
+ if (index_session->index != NULL) {
+ uds_get_index_stats(index_session->index, stats);
+ } else {
+ stats->entries_indexed = 0;
+ stats->memory_used = 0;
+ stats->collisions = 0;
+ stats->entries_discarded = 0;
+ }
+
+ return UDS_SUCCESS;
+}
+
+void uds_wait_cond(struct cond_var *cv, struct mutex *mutex)
+{
+ DEFINE_WAIT(__wait);
+
+ prepare_to_wait(&cv->wait_queue, &__wait, TASK_IDLE);
+ mutex_unlock(mutex);
+ schedule();
+ finish_wait(&cv->wait_queue, &__wait);
+ mutex_lock(mutex);
+}
diff --git a/drivers/md/dm-vdo/indexer/index-session.h b/drivers/md/dm-vdo/indexer/index-session.h
new file mode 100644
index 000000000000..066648f6e062
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index-session.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_INDEX_SESSION_H
+#define UDS_INDEX_SESSION_H
+
+#include <linux/atomic.h>
+#include <linux/cache.h>
+
+#include "thread-utils.h"
+
+#include "config.h"
+#include "indexer.h"
+
+/*
+ * The index session mediates all interactions with a UDS index. Once the index session is created,
+ * it can be used to open, close, suspend, or recreate an index. It implements the majority of the
+ * functions in the top-level UDS API.
+ *
+ * If any deduplication request fails due to an internal error, the index is marked disabled. It
+ * will not accept any further requests and can only be closed. Closing the index will clear the
+ * disabled flag, and the index can then be reopened and recovered using the same index session.
+ */
+
+struct __aligned(L1_CACHE_BYTES) session_stats {
+ /* Post requests that found an entry */
+ u64 posts_found;
+ /* Post requests found in the open chapter */
+ u64 posts_found_open_chapter;
+ /* Post requests found in the dense index */
+ u64 posts_found_dense;
+ /* Post requests found in the sparse index */
+ u64 posts_found_sparse;
+ /* Post requests that did not find an entry */
+ u64 posts_not_found;
+ /* Update requests that found an entry */
+ u64 updates_found;
+ /* Update requests that did not find an entry */
+ u64 updates_not_found;
+ /* Delete requests that found an entry */
+ u64 deletions_found;
+ /* Delete requests that did not find an entry */
+ u64 deletions_not_found;
+ /* Query requests that found an entry */
+ u64 queries_found;
+ /* Query requests that did not find an entry */
+ u64 queries_not_found;
+ /* Total number of requests */
+ u64 requests;
+};
+
+enum index_suspend_status {
+ /* An index load has started but the index is not ready for use. */
+ INDEX_OPENING = 0,
+ /* The index is able to handle requests. */
+ INDEX_READY,
+ /* The index is attempting to suspend a rebuild. */
+ INDEX_SUSPENDING,
+ /* An index rebuild has been suspended. */
+ INDEX_SUSPENDED,
+ /* An index rebuild is being stopped in order to shut down. */
+ INDEX_FREEING,
+};
+
+struct index_load_context {
+ struct mutex mutex;
+ struct cond_var cond;
+ enum index_suspend_status status;
+};
+
+struct uds_index_session {
+ unsigned int state;
+ struct uds_index *index;
+ struct uds_request_queue *callback_queue;
+ struct uds_parameters parameters;
+ struct index_load_context load_context;
+ struct mutex request_mutex;
+ struct cond_var request_cond;
+ int request_count;
+ struct session_stats stats;
+};
+
+#endif /* UDS_INDEX_SESSION_H */
diff --git a/drivers/md/dm-vdo/indexer/index.c b/drivers/md/dm-vdo/indexer/index.c
new file mode 100644
index 000000000000..1ba767144426
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index.c
@@ -0,0 +1,1388 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+
+#include "index.h"
+
+#include "logger.h"
+#include "memory-alloc.h"
+
+#include "funnel-requestqueue.h"
+#include "hash-utils.h"
+#include "sparse-cache.h"
+
+static const u64 NO_LAST_SAVE = U64_MAX;
+
+/*
+ * When searching for deduplication records, the index first searches the volume index, and then
+ * searches the chapter index for the relevant chapter. If the chapter has been fully committed to
+ * storage, the chapter pages are loaded into the page cache. If the chapter has not yet been
+ * committed (either the open chapter or a recently closed one), the index searches the in-memory
+ * representation of the chapter. Finally, if the volume index does not find a record and the index
+ * is sparse, the index will search the sparse cache.
+ *
+ * The index send two kinds of messages to coordinate between zones: chapter close messages for the
+ * chapter writer, and sparse cache barrier messages for the sparse cache.
+ *
+ * The chapter writer is responsible for committing chapters of records to storage. Since zones can
+ * get different numbers of records, some zones may fall behind others. Each time a zone fills up
+ * its available space in a chapter, it informs the chapter writer that the chapter is complete,
+ * and also informs all other zones that it has closed the chapter. Each other zone will then close
+ * the chapter immediately, regardless of how full it is, in order to minimize skew between zones.
+ * Once every zone has closed the chapter, the chapter writer will commit that chapter to storage.
+ *
+ * The last zone to close the chapter also removes the oldest chapter from the volume index.
+ * Although that chapter is invalid for zones that have moved on, the existence of the open chapter
+ * means that those zones will never ask the volume index about it. No zone is allowed to get more
+ * than one chapter ahead of any other. If a zone is so far ahead that it tries to close another
+ * chapter before the previous one has been closed by all zones, it is forced to wait.
+ *
+ * The sparse cache relies on having the same set of chapter indexes available to all zones. When a
+ * request wants to add a chapter to the sparse cache, it sends a barrier message to each zone
+ * during the triage stage that acts as a rendezvous. Once every zone has reached the barrier and
+ * paused its operations, the cache membership is changed and each zone is then informed that it
+ * can proceed. More details can be found in the sparse cache documentation.
+ *
+ * If a sparse cache has only one zone, it will not create a triage queue, but it still needs the
+ * barrier message to change the sparse cache membership, so the index simulates the message by
+ * invoking the handler directly.
+ */
+
+struct chapter_writer {
+ /* The index to which we belong */
+ struct uds_index *index;
+ /* The thread to do the writing */
+ struct thread *thread;
+ /* The lock protecting the following fields */
+ struct mutex mutex;
+ /* The condition signalled on state changes */
+ struct cond_var cond;
+ /* Set to true to stop the thread */
+ bool stop;
+ /* The result from the most recent write */
+ int result;
+ /* The number of bytes allocated by the chapter writer */
+ size_t memory_size;
+ /* The number of zones which have submitted a chapter for writing */
+ unsigned int zones_to_write;
+ /* Open chapter index used by uds_close_open_chapter() */
+ struct open_chapter_index *open_chapter_index;
+ /* Collated records used by uds_close_open_chapter() */
+ struct uds_volume_record *collated_records;
+ /* The chapters to write (one per zone) */
+ struct open_chapter_zone *chapters[];
+};
+
+static bool is_zone_chapter_sparse(const struct index_zone *zone, u64 virtual_chapter)
+{
+ return uds_is_chapter_sparse(zone->index->volume->geometry,
+ zone->oldest_virtual_chapter,
+ zone->newest_virtual_chapter, virtual_chapter);
+}
+
+static int launch_zone_message(struct uds_zone_message message, unsigned int zone,
+ struct uds_index *index)
+{
+ int result;
+ struct uds_request *request;
+
+ result = vdo_allocate(1, struct uds_request, __func__, &request);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ request->index = index;
+ request->unbatched = true;
+ request->zone_number = zone;
+ request->zone_message = message;
+
+ uds_enqueue_request(request, STAGE_MESSAGE);
+ return UDS_SUCCESS;
+}
+
+static void enqueue_barrier_messages(struct uds_index *index, u64 virtual_chapter)
+{
+ struct uds_zone_message message = {
+ .type = UDS_MESSAGE_SPARSE_CACHE_BARRIER,
+ .virtual_chapter = virtual_chapter,
+ };
+ unsigned int zone;
+
+ for (zone = 0; zone < index->zone_count; zone++) {
+ int result = launch_zone_message(message, zone, index);
+
+ VDO_ASSERT_LOG_ONLY((result == UDS_SUCCESS), "barrier message allocation");
+ }
+}
+
+/*
+ * Determine whether this request should trigger a sparse cache barrier message to change the
+ * membership of the sparse cache. If a change in membership is desired, the function returns the
+ * chapter number to add.
+ */
+static u64 triage_index_request(struct uds_index *index, struct uds_request *request)
+{
+ u64 virtual_chapter;
+ struct index_zone *zone;
+
+ virtual_chapter = uds_lookup_volume_index_name(index->volume_index,
+ &request->record_name);
+ if (virtual_chapter == NO_CHAPTER)
+ return NO_CHAPTER;
+
+ zone = index->zones[request->zone_number];
+ if (!is_zone_chapter_sparse(zone, virtual_chapter))
+ return NO_CHAPTER;
+
+ /*
+ * FIXME: Optimize for a common case by remembering the chapter from the most recent
+ * barrier message and skipping this chapter if is it the same.
+ */
+
+ return virtual_chapter;
+}
+
+/*
+ * Simulate a message to change the sparse cache membership for a single-zone sparse index. This
+ * allows us to forgo the complicated locking required by a multi-zone sparse index. Any other kind
+ * of index does nothing here.
+ */
+static int simulate_index_zone_barrier_message(struct index_zone *zone,
+ struct uds_request *request)
+{
+ u64 sparse_virtual_chapter;
+
+ if ((zone->index->zone_count > 1) ||
+ !uds_is_sparse_index_geometry(zone->index->volume->geometry))
+ return UDS_SUCCESS;
+
+ sparse_virtual_chapter = triage_index_request(zone->index, request);
+ if (sparse_virtual_chapter == NO_CHAPTER)
+ return UDS_SUCCESS;
+
+ return uds_update_sparse_cache(zone, sparse_virtual_chapter);
+}
+
+/* This is the request processing function for the triage queue. */
+static void triage_request(struct uds_request *request)
+{
+ struct uds_index *index = request->index;
+ u64 sparse_virtual_chapter = triage_index_request(index, request);
+
+ if (sparse_virtual_chapter != NO_CHAPTER)
+ enqueue_barrier_messages(index, sparse_virtual_chapter);
+
+ uds_enqueue_request(request, STAGE_INDEX);
+}
+
+static int finish_previous_chapter(struct uds_index *index, u64 current_chapter_number)
+{
+ int result;
+ struct chapter_writer *writer = index->chapter_writer;
+
+ mutex_lock(&writer->mutex);
+ while (index->newest_virtual_chapter < current_chapter_number)
+ uds_wait_cond(&writer->cond, &writer->mutex);
+ result = writer->result;
+ mutex_unlock(&writer->mutex);
+
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result,
+ "Writing of previous open chapter failed");
+
+ return UDS_SUCCESS;
+}
+
+static int swap_open_chapter(struct index_zone *zone)
+{
+ int result;
+ struct open_chapter_zone *temporary_chapter;
+
+ result = finish_previous_chapter(zone->index, zone->newest_virtual_chapter);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ temporary_chapter = zone->open_chapter;
+ zone->open_chapter = zone->writing_chapter;
+ zone->writing_chapter = temporary_chapter;
+ return UDS_SUCCESS;
+}
+
+/*
+ * Inform the chapter writer that this zone is done with this chapter. The chapter won't start
+ * writing until all zones have closed it.
+ */
+static unsigned int start_closing_chapter(struct uds_index *index,
+ unsigned int zone_number,
+ struct open_chapter_zone *chapter)
+{
+ unsigned int finished_zones;
+ struct chapter_writer *writer = index->chapter_writer;
+
+ mutex_lock(&writer->mutex);
+ finished_zones = ++writer->zones_to_write;
+ writer->chapters[zone_number] = chapter;
+ uds_broadcast_cond(&writer->cond);
+ mutex_unlock(&writer->mutex);
+
+ return finished_zones;
+}
+
+static int announce_chapter_closed(struct index_zone *zone, u64 closed_chapter)
+{
+ int result;
+ unsigned int i;
+ struct uds_zone_message zone_message = {
+ .type = UDS_MESSAGE_ANNOUNCE_CHAPTER_CLOSED,
+ .virtual_chapter = closed_chapter,
+ };
+
+ for (i = 0; i < zone->index->zone_count; i++) {
+ if (zone->id == i)
+ continue;
+
+ result = launch_zone_message(zone_message, i, zone->index);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int open_next_chapter(struct index_zone *zone)
+{
+ int result;
+ u64 closed_chapter;
+ u64 expiring;
+ unsigned int finished_zones;
+ u32 expire_chapters;
+
+ vdo_log_debug("closing chapter %llu of zone %u after %u entries (%u short)",
+ (unsigned long long) zone->newest_virtual_chapter, zone->id,
+ zone->open_chapter->size,
+ zone->open_chapter->capacity - zone->open_chapter->size);
+
+ result = swap_open_chapter(zone);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ closed_chapter = zone->newest_virtual_chapter++;
+ uds_set_volume_index_zone_open_chapter(zone->index->volume_index, zone->id,
+ zone->newest_virtual_chapter);
+ uds_reset_open_chapter(zone->open_chapter);
+
+ finished_zones = start_closing_chapter(zone->index, zone->id,
+ zone->writing_chapter);
+ if ((finished_zones == 1) && (zone->index->zone_count > 1)) {
+ result = announce_chapter_closed(zone, closed_chapter);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ expiring = zone->oldest_virtual_chapter;
+ expire_chapters = uds_chapters_to_expire(zone->index->volume->geometry,
+ zone->newest_virtual_chapter);
+ zone->oldest_virtual_chapter += expire_chapters;
+
+ if (finished_zones < zone->index->zone_count)
+ return UDS_SUCCESS;
+
+ while (expire_chapters-- > 0)
+ uds_forget_chapter(zone->index->volume, expiring++);
+
+ return UDS_SUCCESS;
+}
+
+static int handle_chapter_closed(struct index_zone *zone, u64 virtual_chapter)
+{
+ if (zone->newest_virtual_chapter == virtual_chapter)
+ return open_next_chapter(zone);
+
+ return UDS_SUCCESS;
+}
+
+static int dispatch_index_zone_control_request(struct uds_request *request)
+{
+ struct uds_zone_message *message = &request->zone_message;
+ struct index_zone *zone = request->index->zones[request->zone_number];
+
+ switch (message->type) {
+ case UDS_MESSAGE_SPARSE_CACHE_BARRIER:
+ return uds_update_sparse_cache(zone, message->virtual_chapter);
+
+ case UDS_MESSAGE_ANNOUNCE_CHAPTER_CLOSED:
+ return handle_chapter_closed(zone, message->virtual_chapter);
+
+ default:
+ vdo_log_error("invalid message type: %d", message->type);
+ return UDS_INVALID_ARGUMENT;
+ }
+}
+
+static void set_request_location(struct uds_request *request,
+ enum uds_index_region new_location)
+{
+ request->location = new_location;
+ request->found = ((new_location == UDS_LOCATION_IN_OPEN_CHAPTER) ||
+ (new_location == UDS_LOCATION_IN_DENSE) ||
+ (new_location == UDS_LOCATION_IN_SPARSE));
+}
+
+static void set_chapter_location(struct uds_request *request,
+ const struct index_zone *zone, u64 virtual_chapter)
+{
+ request->found = true;
+ if (virtual_chapter == zone->newest_virtual_chapter)
+ request->location = UDS_LOCATION_IN_OPEN_CHAPTER;
+ else if (is_zone_chapter_sparse(zone, virtual_chapter))
+ request->location = UDS_LOCATION_IN_SPARSE;
+ else
+ request->location = UDS_LOCATION_IN_DENSE;
+}
+
+static int search_sparse_cache_in_zone(struct index_zone *zone, struct uds_request *request,
+ u64 virtual_chapter, bool *found)
+{
+ int result;
+ struct volume *volume;
+ u16 record_page_number;
+ u32 chapter;
+
+ result = uds_search_sparse_cache(zone, &request->record_name, &virtual_chapter,
+ &record_page_number);
+ if ((result != UDS_SUCCESS) || (virtual_chapter == NO_CHAPTER))
+ return result;
+
+ request->virtual_chapter = virtual_chapter;
+ volume = zone->index->volume;
+ chapter = uds_map_to_physical_chapter(volume->geometry, virtual_chapter);
+ return uds_search_cached_record_page(volume, request, chapter,
+ record_page_number, found);
+}
+
+static int get_record_from_zone(struct index_zone *zone, struct uds_request *request,
+ bool *found)
+{
+ struct volume *volume;
+
+ if (request->location == UDS_LOCATION_RECORD_PAGE_LOOKUP) {
+ *found = true;
+ return UDS_SUCCESS;
+ } else if (request->location == UDS_LOCATION_UNAVAILABLE) {
+ *found = false;
+ return UDS_SUCCESS;
+ }
+
+ if (request->virtual_chapter == zone->newest_virtual_chapter) {
+ uds_search_open_chapter(zone->open_chapter, &request->record_name,
+ &request->old_metadata, found);
+ return UDS_SUCCESS;
+ }
+
+ if ((zone->newest_virtual_chapter > 0) &&
+ (request->virtual_chapter == (zone->newest_virtual_chapter - 1)) &&
+ (zone->writing_chapter->size > 0)) {
+ uds_search_open_chapter(zone->writing_chapter, &request->record_name,
+ &request->old_metadata, found);
+ return UDS_SUCCESS;
+ }
+
+ volume = zone->index->volume;
+ if (is_zone_chapter_sparse(zone, request->virtual_chapter) &&
+ uds_sparse_cache_contains(volume->sparse_cache, request->virtual_chapter,
+ request->zone_number))
+ return search_sparse_cache_in_zone(zone, request,
+ request->virtual_chapter, found);
+
+ return uds_search_volume_page_cache(volume, request, found);
+}
+
+static int put_record_in_zone(struct index_zone *zone, struct uds_request *request,
+ const struct uds_record_data *metadata)
+{
+ unsigned int remaining;
+
+ remaining = uds_put_open_chapter(zone->open_chapter, &request->record_name,
+ metadata);
+ if (remaining == 0)
+ return open_next_chapter(zone);
+
+ return UDS_SUCCESS;
+}
+
+static int search_index_zone(struct index_zone *zone, struct uds_request *request)
+{
+ int result;
+ struct volume_index_record record;
+ bool overflow_record, found = false;
+ struct uds_record_data *metadata;
+ u64 chapter;
+
+ result = uds_get_volume_index_record(zone->index->volume_index,
+ &request->record_name, &record);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (record.is_found) {
+ if (request->requeued && request->virtual_chapter != record.virtual_chapter)
+ set_request_location(request, UDS_LOCATION_UNKNOWN);
+
+ request->virtual_chapter = record.virtual_chapter;
+ result = get_record_from_zone(zone, request, &found);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ if (found)
+ set_chapter_location(request, zone, record.virtual_chapter);
+
+ /*
+ * If a record has overflowed a chapter index in more than one chapter (or overflowed in
+ * one chapter and collided with an existing record), it will exist as a collision record
+ * in the volume index, but we won't find it in the volume. This case needs special
+ * handling.
+ */
+ overflow_record = (record.is_found && record.is_collision && !found);
+ chapter = zone->newest_virtual_chapter;
+ if (found || overflow_record) {
+ if ((request->type == UDS_QUERY_NO_UPDATE) ||
+ ((request->type == UDS_QUERY) && overflow_record)) {
+ /* There is nothing left to do. */
+ return UDS_SUCCESS;
+ }
+
+ if (record.virtual_chapter != chapter) {
+ /*
+ * Update the volume index to reference the new chapter for the block. If
+ * the record had been deleted or dropped from the chapter index, it will
+ * be back.
+ */
+ result = uds_set_volume_index_record_chapter(&record, chapter);
+ } else if (request->type != UDS_UPDATE) {
+ /* The record is already in the open chapter. */
+ return UDS_SUCCESS;
+ }
+ } else {
+ /*
+ * The record wasn't in the volume index, so check whether the
+ * name is in a cached sparse chapter. If we found the name on
+ * a previous search, use that result instead.
+ */
+ if (request->location == UDS_LOCATION_RECORD_PAGE_LOOKUP) {
+ found = true;
+ } else if (request->location == UDS_LOCATION_UNAVAILABLE) {
+ found = false;
+ } else if (uds_is_sparse_index_geometry(zone->index->volume->geometry) &&
+ !uds_is_volume_index_sample(zone->index->volume_index,
+ &request->record_name)) {
+ result = search_sparse_cache_in_zone(zone, request, NO_CHAPTER,
+ &found);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ if (found)
+ set_request_location(request, UDS_LOCATION_IN_SPARSE);
+
+ if ((request->type == UDS_QUERY_NO_UPDATE) ||
+ ((request->type == UDS_QUERY) && !found)) {
+ /* There is nothing left to do. */
+ return UDS_SUCCESS;
+ }
+
+ /*
+ * Add a new entry to the volume index referencing the open chapter. This needs to
+ * be done both for new records, and for records from cached sparse chapters.
+ */
+ result = uds_put_volume_index_record(&record, chapter);
+ }
+
+ if (result == UDS_OVERFLOW) {
+ /*
+ * The volume index encountered a delta list overflow. The condition was already
+ * logged. We will go on without adding the record to the open chapter.
+ */
+ return UDS_SUCCESS;
+ }
+
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (!found || (request->type == UDS_UPDATE)) {
+ /* This is a new record or we're updating an existing record. */
+ metadata = &request->new_metadata;
+ } else {
+ /* Move the existing record to the open chapter. */
+ metadata = &request->old_metadata;
+ }
+
+ return put_record_in_zone(zone, request, metadata);
+}
+
+static int remove_from_index_zone(struct index_zone *zone, struct uds_request *request)
+{
+ int result;
+ struct volume_index_record record;
+
+ result = uds_get_volume_index_record(zone->index->volume_index,
+ &request->record_name, &record);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (!record.is_found)
+ return UDS_SUCCESS;
+
+ /* If the request was requeued, check whether the saved state is still valid. */
+
+ if (record.is_collision) {
+ set_chapter_location(request, zone, record.virtual_chapter);
+ } else {
+ /* Non-collision records are hints, so resolve the name in the chapter. */
+ bool found;
+
+ if (request->requeued && request->virtual_chapter != record.virtual_chapter)
+ set_request_location(request, UDS_LOCATION_UNKNOWN);
+
+ request->virtual_chapter = record.virtual_chapter;
+ result = get_record_from_zone(zone, request, &found);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (!found) {
+ /* There is no record to remove. */
+ return UDS_SUCCESS;
+ }
+ }
+
+ set_chapter_location(request, zone, record.virtual_chapter);
+
+ /*
+ * Delete the volume index entry for the named record only. Note that a later search might
+ * later return stale advice if there is a colliding name in the same chapter, but it's a
+ * very rare case (1 in 2^21).
+ */
+ result = uds_remove_volume_index_record(&record);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ /*
+ * If the record is in the open chapter, we must remove it or mark it deleted to avoid
+ * trouble if the record is added again later.
+ */
+ if (request->location == UDS_LOCATION_IN_OPEN_CHAPTER)
+ uds_remove_from_open_chapter(zone->open_chapter, &request->record_name);
+
+ return UDS_SUCCESS;
+}
+
+static int dispatch_index_request(struct uds_index *index, struct uds_request *request)
+{
+ int result;
+ struct index_zone *zone = index->zones[request->zone_number];
+
+ if (!request->requeued) {
+ result = simulate_index_zone_barrier_message(zone, request);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ switch (request->type) {
+ case UDS_POST:
+ case UDS_UPDATE:
+ case UDS_QUERY:
+ case UDS_QUERY_NO_UPDATE:
+ result = search_index_zone(zone, request);
+ break;
+
+ case UDS_DELETE:
+ result = remove_from_index_zone(zone, request);
+ break;
+
+ default:
+ result = vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
+ "invalid request type: %d",
+ request->type);
+ break;
+ }
+
+ return result;
+}
+
+/* This is the request processing function invoked by each zone's thread. */
+static void execute_zone_request(struct uds_request *request)
+{
+ int result;
+ struct uds_index *index = request->index;
+
+ if (request->zone_message.type != UDS_MESSAGE_NONE) {
+ result = dispatch_index_zone_control_request(request);
+ if (result != UDS_SUCCESS) {
+ vdo_log_error_strerror(result, "error executing message: %d",
+ request->zone_message.type);
+ }
+
+ /* Once the message is processed it can be freed. */
+ vdo_free(vdo_forget(request));
+ return;
+ }
+
+ index->need_to_save = true;
+ if (request->requeued && (request->status != UDS_SUCCESS)) {
+ set_request_location(request, UDS_LOCATION_UNAVAILABLE);
+ index->callback(request);
+ return;
+ }
+
+ result = dispatch_index_request(index, request);
+ if (result == UDS_QUEUED) {
+ /* The request has been requeued so don't let it complete. */
+ return;
+ }
+
+ if (!request->found)
+ set_request_location(request, UDS_LOCATION_UNAVAILABLE);
+
+ request->status = result;
+ index->callback(request);
+}
+
+static int initialize_index_queues(struct uds_index *index,
+ const struct index_geometry *geometry)
+{
+ int result;
+ unsigned int i;
+
+ for (i = 0; i < index->zone_count; i++) {
+ result = uds_make_request_queue("indexW", &execute_zone_request,
+ &index->zone_queues[i]);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ /* The triage queue is only needed for sparse multi-zone indexes. */
+ if ((index->zone_count > 1) && uds_is_sparse_index_geometry(geometry)) {
+ result = uds_make_request_queue("triageW", &triage_request,
+ &index->triage_queue);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ return UDS_SUCCESS;
+}
+
+/* This is the driver function for the chapter writer thread. */
+static void close_chapters(void *arg)
+{
+ int result;
+ struct chapter_writer *writer = arg;
+ struct uds_index *index = writer->index;
+
+ vdo_log_debug("chapter writer starting");
+ mutex_lock(&writer->mutex);
+ for (;;) {
+ while (writer->zones_to_write < index->zone_count) {
+ if (writer->stop && (writer->zones_to_write == 0)) {
+ /*
+ * We've been told to stop, and all of the zones are in the same
+ * open chapter, so we can exit now.
+ */
+ mutex_unlock(&writer->mutex);
+ vdo_log_debug("chapter writer stopping");
+ return;
+ }
+ uds_wait_cond(&writer->cond, &writer->mutex);
+ }
+
+ /*
+ * Release the lock while closing a chapter. We probably don't need to do this, but
+ * it seems safer in principle. It's OK to access the chapter and chapter_number
+ * fields without the lock since those aren't allowed to change until we're done.
+ */
+ mutex_unlock(&writer->mutex);
+
+ if (index->has_saved_open_chapter) {
+ /*
+ * Remove the saved open chapter the first time we close an open chapter
+ * after loading from a clean shutdown, or after doing a clean save. The
+ * lack of the saved open chapter will indicate that a recovery is
+ * necessary.
+ */
+ index->has_saved_open_chapter = false;
+ result = uds_discard_open_chapter(index->layout);
+ if (result == UDS_SUCCESS)
+ vdo_log_debug("Discarding saved open chapter");
+ }
+
+ result = uds_close_open_chapter(writer->chapters, index->zone_count,
+ index->volume,
+ writer->open_chapter_index,
+ writer->collated_records,
+ index->newest_virtual_chapter);
+
+ mutex_lock(&writer->mutex);
+ index->newest_virtual_chapter++;
+ index->oldest_virtual_chapter +=
+ uds_chapters_to_expire(index->volume->geometry,
+ index->newest_virtual_chapter);
+ writer->result = result;
+ writer->zones_to_write = 0;
+ uds_broadcast_cond(&writer->cond);
+ }
+}
+
+static void stop_chapter_writer(struct chapter_writer *writer)
+{
+ struct thread *writer_thread = NULL;
+
+ mutex_lock(&writer->mutex);
+ if (writer->thread != NULL) {
+ writer_thread = writer->thread;
+ writer->thread = NULL;
+ writer->stop = true;
+ uds_broadcast_cond(&writer->cond);
+ }
+ mutex_unlock(&writer->mutex);
+
+ if (writer_thread != NULL)
+ vdo_join_threads(writer_thread);
+}
+
+static void free_chapter_writer(struct chapter_writer *writer)
+{
+ if (writer == NULL)
+ return;
+
+ stop_chapter_writer(writer);
+ uds_free_open_chapter_index(writer->open_chapter_index);
+ vdo_free(writer->collated_records);
+ vdo_free(writer);
+}
+
+static int make_chapter_writer(struct uds_index *index,
+ struct chapter_writer **writer_ptr)
+{
+ int result;
+ struct chapter_writer *writer;
+ size_t collated_records_size =
+ (sizeof(struct uds_volume_record) * index->volume->geometry->records_per_chapter);
+
+ result = vdo_allocate_extended(struct chapter_writer, index->zone_count,
+ struct open_chapter_zone *, "Chapter Writer",
+ &writer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ writer->index = index;
+ mutex_init(&writer->mutex);
+ uds_init_cond(&writer->cond);
+
+ result = vdo_allocate_cache_aligned(collated_records_size, "collated records",
+ &writer->collated_records);
+ if (result != VDO_SUCCESS) {
+ free_chapter_writer(writer);
+ return result;
+ }
+
+ result = uds_make_open_chapter_index(&writer->open_chapter_index,
+ index->volume->geometry,
+ index->volume->nonce);
+ if (result != UDS_SUCCESS) {
+ free_chapter_writer(writer);
+ return result;
+ }
+
+ writer->memory_size = (sizeof(struct chapter_writer) +
+ index->zone_count * sizeof(struct open_chapter_zone *) +
+ collated_records_size +
+ writer->open_chapter_index->memory_size);
+
+ result = vdo_create_thread(close_chapters, writer, "writer", &writer->thread);
+ if (result != VDO_SUCCESS) {
+ free_chapter_writer(writer);
+ return result;
+ }
+
+ *writer_ptr = writer;
+ return UDS_SUCCESS;
+}
+
+static int load_index(struct uds_index *index)
+{
+ int result;
+ u64 last_save_chapter;
+
+ result = uds_load_index_state(index->layout, index);
+ if (result != UDS_SUCCESS)
+ return UDS_INDEX_NOT_SAVED_CLEANLY;
+
+ last_save_chapter = ((index->last_save != NO_LAST_SAVE) ? index->last_save : 0);
+
+ vdo_log_info("loaded index from chapter %llu through chapter %llu",
+ (unsigned long long) index->oldest_virtual_chapter,
+ (unsigned long long) last_save_chapter);
+
+ return UDS_SUCCESS;
+}
+
+static int rebuild_index_page_map(struct uds_index *index, u64 vcn)
+{
+ int result;
+ struct delta_index_page *chapter_index_page;
+ struct index_geometry *geometry = index->volume->geometry;
+ u32 chapter = uds_map_to_physical_chapter(geometry, vcn);
+ u32 expected_list_number = 0;
+ u32 index_page_number;
+ u32 lowest_delta_list;
+ u32 highest_delta_list;
+
+ for (index_page_number = 0;
+ index_page_number < geometry->index_pages_per_chapter;
+ index_page_number++) {
+ result = uds_get_volume_index_page(index->volume, chapter,
+ index_page_number,
+ &chapter_index_page);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result,
+ "failed to read index page %u in chapter %u",
+ index_page_number, chapter);
+ }
+
+ lowest_delta_list = chapter_index_page->lowest_list_number;
+ highest_delta_list = chapter_index_page->highest_list_number;
+ if (lowest_delta_list != expected_list_number) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "chapter %u index page %u is corrupt",
+ chapter, index_page_number);
+ }
+
+ uds_update_index_page_map(index->volume->index_page_map, vcn, chapter,
+ index_page_number, highest_delta_list);
+ expected_list_number = highest_delta_list + 1;
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int replay_record(struct uds_index *index, const struct uds_record_name *name,
+ u64 virtual_chapter, bool will_be_sparse_chapter)
+{
+ int result;
+ struct volume_index_record record;
+ bool update_record;
+
+ if (will_be_sparse_chapter &&
+ !uds_is_volume_index_sample(index->volume_index, name)) {
+ /*
+ * This entry will be in a sparse chapter after the rebuild completes, and it is
+ * not a sample, so just skip over it.
+ */
+ return UDS_SUCCESS;
+ }
+
+ result = uds_get_volume_index_record(index->volume_index, name, &record);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (record.is_found) {
+ if (record.is_collision) {
+ if (record.virtual_chapter == virtual_chapter) {
+ /* The record is already correct. */
+ return UDS_SUCCESS;
+ }
+
+ update_record = true;
+ } else if (record.virtual_chapter == virtual_chapter) {
+ /*
+ * There is a volume index entry pointing to the current chapter, but we
+ * don't know if it is for the same name as the one we are currently
+ * working on or not. For now, we're just going to assume that it isn't.
+ * This will create one extra collision record if there was a deleted
+ * record in the current chapter.
+ */
+ update_record = false;
+ } else {
+ /*
+ * If we're rebuilding, we don't normally want to go to disk to see if the
+ * record exists, since we will likely have just read the record from disk
+ * (i.e. we know it's there). The exception to this is when we find an
+ * entry in the volume index that has a different chapter. In this case, we
+ * need to search that chapter to determine if the volume index entry was
+ * for the same record or a different one.
+ */
+ result = uds_search_volume_page_cache_for_rebuild(index->volume,
+ name,
+ record.virtual_chapter,
+ &update_record);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+ } else {
+ update_record = false;
+ }
+
+ if (update_record) {
+ /*
+ * Update the volume index to reference the new chapter for the block. If the
+ * record had been deleted or dropped from the chapter index, it will be back.
+ */
+ result = uds_set_volume_index_record_chapter(&record, virtual_chapter);
+ } else {
+ /*
+ * Add a new entry to the volume index referencing the open chapter. This should be
+ * done regardless of whether we are a brand new record or a sparse record, i.e.
+ * one that doesn't exist in the index but does on disk, since for a sparse record,
+ * we would want to un-sparsify if it did exist.
+ */
+ result = uds_put_volume_index_record(&record, virtual_chapter);
+ }
+
+ if ((result == UDS_DUPLICATE_NAME) || (result == UDS_OVERFLOW)) {
+ /* The rebuilt index will lose these records. */
+ return UDS_SUCCESS;
+ }
+
+ return result;
+}
+
+static bool check_for_suspend(struct uds_index *index)
+{
+ bool closing;
+
+ if (index->load_context == NULL)
+ return false;
+
+ mutex_lock(&index->load_context->mutex);
+ if (index->load_context->status != INDEX_SUSPENDING) {
+ mutex_unlock(&index->load_context->mutex);
+ return false;
+ }
+
+ /* Notify that we are suspended and wait for the resume. */
+ index->load_context->status = INDEX_SUSPENDED;
+ uds_broadcast_cond(&index->load_context->cond);
+
+ while ((index->load_context->status != INDEX_OPENING) &&
+ (index->load_context->status != INDEX_FREEING))
+ uds_wait_cond(&index->load_context->cond, &index->load_context->mutex);
+
+ closing = (index->load_context->status == INDEX_FREEING);
+ mutex_unlock(&index->load_context->mutex);
+ return closing;
+}
+
+static int replay_chapter(struct uds_index *index, u64 virtual, bool sparse)
+{
+ int result;
+ u32 i;
+ u32 j;
+ const struct index_geometry *geometry;
+ u32 physical_chapter;
+
+ if (check_for_suspend(index)) {
+ vdo_log_info("Replay interrupted by index shutdown at chapter %llu",
+ (unsigned long long) virtual);
+ return -EBUSY;
+ }
+
+ geometry = index->volume->geometry;
+ physical_chapter = uds_map_to_physical_chapter(geometry, virtual);
+ uds_prefetch_volume_chapter(index->volume, physical_chapter);
+ uds_set_volume_index_open_chapter(index->volume_index, virtual);
+
+ result = rebuild_index_page_map(index, virtual);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result,
+ "could not rebuild index page map for chapter %u",
+ physical_chapter);
+ }
+
+ for (i = 0; i < geometry->record_pages_per_chapter; i++) {
+ u8 *record_page;
+ u32 record_page_number;
+
+ record_page_number = geometry->index_pages_per_chapter + i;
+ result = uds_get_volume_record_page(index->volume, physical_chapter,
+ record_page_number, &record_page);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result, "could not get page %d",
+ record_page_number);
+ }
+
+ for (j = 0; j < geometry->records_per_page; j++) {
+ const u8 *name_bytes;
+ struct uds_record_name name;
+
+ name_bytes = record_page + (j * BYTES_PER_RECORD);
+ memcpy(&name.name, name_bytes, UDS_RECORD_NAME_SIZE);
+ result = replay_record(index, &name, virtual, sparse);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int replay_volume(struct uds_index *index)
+{
+ int result;
+ u64 old_map_update;
+ u64 new_map_update;
+ u64 virtual;
+ u64 from_virtual = index->oldest_virtual_chapter;
+ u64 upto_virtual = index->newest_virtual_chapter;
+ bool will_be_sparse;
+
+ vdo_log_info("Replaying volume from chapter %llu through chapter %llu",
+ (unsigned long long) from_virtual,
+ (unsigned long long) upto_virtual);
+
+ /*
+ * The index failed to load, so the volume index is empty. Add records to the volume index
+ * in order, skipping non-hooks in chapters which will be sparse to save time.
+ *
+ * Go through each record page of each chapter and add the records back to the volume
+ * index. This should not cause anything to be written to either the open chapter or the
+ * on-disk volume. Also skip the on-disk chapter corresponding to upto_virtual, as this
+ * would have already been purged from the volume index when the chapter was opened.
+ *
+ * Also, go through each index page for each chapter and rebuild the index page map.
+ */
+ old_map_update = index->volume->index_page_map->last_update;
+ for (virtual = from_virtual; virtual < upto_virtual; virtual++) {
+ will_be_sparse = uds_is_chapter_sparse(index->volume->geometry,
+ from_virtual, upto_virtual,
+ virtual);
+ result = replay_chapter(index, virtual, will_be_sparse);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ /* Also reap the chapter being replaced by the open chapter. */
+ uds_set_volume_index_open_chapter(index->volume_index, upto_virtual);
+
+ new_map_update = index->volume->index_page_map->last_update;
+ if (new_map_update != old_map_update) {
+ vdo_log_info("replay changed index page map update from %llu to %llu",
+ (unsigned long long) old_map_update,
+ (unsigned long long) new_map_update);
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int rebuild_index(struct uds_index *index)
+{
+ int result;
+ u64 lowest;
+ u64 highest;
+ bool is_empty = false;
+ u32 chapters_per_volume = index->volume->geometry->chapters_per_volume;
+
+ index->volume->lookup_mode = LOOKUP_FOR_REBUILD;
+ result = uds_find_volume_chapter_boundaries(index->volume, &lowest, &highest,
+ &is_empty);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_fatal_strerror(result,
+ "cannot rebuild index: unknown volume chapter boundaries");
+ }
+
+ if (is_empty) {
+ index->newest_virtual_chapter = 0;
+ index->oldest_virtual_chapter = 0;
+ index->volume->lookup_mode = LOOKUP_NORMAL;
+ return UDS_SUCCESS;
+ }
+
+ index->newest_virtual_chapter = highest + 1;
+ index->oldest_virtual_chapter = lowest;
+ if (index->newest_virtual_chapter ==
+ (index->oldest_virtual_chapter + chapters_per_volume)) {
+ /* Skip the chapter shadowed by the open chapter. */
+ index->oldest_virtual_chapter++;
+ }
+
+ result = replay_volume(index);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ index->volume->lookup_mode = LOOKUP_NORMAL;
+ return UDS_SUCCESS;
+}
+
+static void free_index_zone(struct index_zone *zone)
+{
+ if (zone == NULL)
+ return;
+
+ uds_free_open_chapter(zone->open_chapter);
+ uds_free_open_chapter(zone->writing_chapter);
+ vdo_free(zone);
+}
+
+static int make_index_zone(struct uds_index *index, unsigned int zone_number)
+{
+ int result;
+ struct index_zone *zone;
+
+ result = vdo_allocate(1, struct index_zone, "index zone", &zone);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_make_open_chapter(index->volume->geometry, index->zone_count,
+ &zone->open_chapter);
+ if (result != UDS_SUCCESS) {
+ free_index_zone(zone);
+ return result;
+ }
+
+ result = uds_make_open_chapter(index->volume->geometry, index->zone_count,
+ &zone->writing_chapter);
+ if (result != UDS_SUCCESS) {
+ free_index_zone(zone);
+ return result;
+ }
+
+ zone->index = index;
+ zone->id = zone_number;
+ index->zones[zone_number] = zone;
+
+ return UDS_SUCCESS;
+}
+
+int uds_make_index(struct uds_configuration *config, enum uds_open_index_type open_type,
+ struct index_load_context *load_context, index_callback_fn callback,
+ struct uds_index **new_index)
+{
+ int result;
+ bool loaded = false;
+ bool new = (open_type == UDS_CREATE);
+ struct uds_index *index = NULL;
+ struct index_zone *zone;
+ u64 nonce;
+ unsigned int z;
+
+ result = vdo_allocate_extended(struct uds_index, config->zone_count,
+ struct uds_request_queue *, "index", &index);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ index->zone_count = config->zone_count;
+
+ result = uds_make_index_layout(config, new, &index->layout);
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return result;
+ }
+
+ result = vdo_allocate(index->zone_count, struct index_zone *, "zones",
+ &index->zones);
+ if (result != VDO_SUCCESS) {
+ uds_free_index(index);
+ return result;
+ }
+
+ result = uds_make_volume(config, index->layout, &index->volume);
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return result;
+ }
+
+ index->volume->lookup_mode = LOOKUP_NORMAL;
+ for (z = 0; z < index->zone_count; z++) {
+ result = make_index_zone(index, z);
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return vdo_log_error_strerror(result,
+ "Could not create index zone");
+ }
+ }
+
+ nonce = uds_get_volume_nonce(index->layout);
+ result = uds_make_volume_index(config, nonce, &index->volume_index);
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return vdo_log_error_strerror(result, "could not make volume index");
+ }
+
+ index->load_context = load_context;
+ index->callback = callback;
+
+ result = initialize_index_queues(index, config->geometry);
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return result;
+ }
+
+ result = make_chapter_writer(index, &index->chapter_writer);
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return result;
+ }
+
+ if (!new) {
+ result = load_index(index);
+ switch (result) {
+ case UDS_SUCCESS:
+ loaded = true;
+ break;
+ case -ENOMEM:
+ /* We should not try a rebuild for this error. */
+ vdo_log_error_strerror(result, "index could not be loaded");
+ break;
+ default:
+ vdo_log_error_strerror(result, "index could not be loaded");
+ if (open_type == UDS_LOAD) {
+ result = rebuild_index(index);
+ if (result != UDS_SUCCESS) {
+ vdo_log_error_strerror(result,
+ "index could not be rebuilt");
+ }
+ }
+ break;
+ }
+ }
+
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return vdo_log_error_strerror(result, "fatal error in %s()", __func__);
+ }
+
+ for (z = 0; z < index->zone_count; z++) {
+ zone = index->zones[z];
+ zone->oldest_virtual_chapter = index->oldest_virtual_chapter;
+ zone->newest_virtual_chapter = index->newest_virtual_chapter;
+ }
+
+ if (index->load_context != NULL) {
+ mutex_lock(&index->load_context->mutex);
+ index->load_context->status = INDEX_READY;
+ /*
+ * If we get here, suspend is meaningless, but notify any thread trying to suspend
+ * us so it doesn't hang.
+ */
+ uds_broadcast_cond(&index->load_context->cond);
+ mutex_unlock(&index->load_context->mutex);
+ }
+
+ index->has_saved_open_chapter = loaded;
+ index->need_to_save = !loaded;
+ *new_index = index;
+ return UDS_SUCCESS;
+}
+
+void uds_free_index(struct uds_index *index)
+{
+ unsigned int i;
+
+ if (index == NULL)
+ return;
+
+ uds_request_queue_finish(index->triage_queue);
+ for (i = 0; i < index->zone_count; i++)
+ uds_request_queue_finish(index->zone_queues[i]);
+
+ free_chapter_writer(index->chapter_writer);
+
+ uds_free_volume_index(index->volume_index);
+ if (index->zones != NULL) {
+ for (i = 0; i < index->zone_count; i++)
+ free_index_zone(index->zones[i]);
+ vdo_free(index->zones);
+ }
+
+ uds_free_volume(index->volume);
+ uds_free_index_layout(vdo_forget(index->layout));
+ vdo_free(index);
+}
+
+/* Wait for the chapter writer to complete any outstanding writes. */
+void uds_wait_for_idle_index(struct uds_index *index)
+{
+ struct chapter_writer *writer = index->chapter_writer;
+
+ mutex_lock(&writer->mutex);
+ while (writer->zones_to_write > 0)
+ uds_wait_cond(&writer->cond, &writer->mutex);
+ mutex_unlock(&writer->mutex);
+}
+
+/* This function assumes that all requests have been drained. */
+int uds_save_index(struct uds_index *index)
+{
+ int result;
+
+ if (!index->need_to_save)
+ return UDS_SUCCESS;
+
+ uds_wait_for_idle_index(index);
+ index->prev_save = index->last_save;
+ index->last_save = ((index->newest_virtual_chapter == 0) ?
+ NO_LAST_SAVE : index->newest_virtual_chapter - 1);
+ vdo_log_info("beginning save (vcn %llu)", (unsigned long long) index->last_save);
+
+ result = uds_save_index_state(index->layout, index);
+ if (result != UDS_SUCCESS) {
+ vdo_log_info("save index failed");
+ index->last_save = index->prev_save;
+ } else {
+ index->has_saved_open_chapter = true;
+ index->need_to_save = false;
+ vdo_log_info("finished save (vcn %llu)",
+ (unsigned long long) index->last_save);
+ }
+
+ return result;
+}
+
+int uds_replace_index_storage(struct uds_index *index, struct block_device *bdev)
+{
+ return uds_replace_volume_storage(index->volume, index->layout, bdev);
+}
+
+/* Accessing statistics should be safe from any thread. */
+void uds_get_index_stats(struct uds_index *index, struct uds_index_stats *counters)
+{
+ struct volume_index_stats stats;
+
+ uds_get_volume_index_stats(index->volume_index, &stats);
+ counters->entries_indexed = stats.record_count;
+ counters->collisions = stats.collision_count;
+ counters->entries_discarded = stats.discard_count;
+
+ counters->memory_used = (index->volume_index->memory_size +
+ index->volume->cache_size +
+ index->chapter_writer->memory_size);
+}
+
+void uds_enqueue_request(struct uds_request *request, enum request_stage stage)
+{
+ struct uds_index *index = request->index;
+ struct uds_request_queue *queue;
+
+ switch (stage) {
+ case STAGE_TRIAGE:
+ if (index->triage_queue != NULL) {
+ queue = index->triage_queue;
+ break;
+ }
+
+ fallthrough;
+
+ case STAGE_INDEX:
+ request->zone_number =
+ uds_get_volume_index_zone(index->volume_index, &request->record_name);
+ fallthrough;
+
+ case STAGE_MESSAGE:
+ queue = index->zone_queues[request->zone_number];
+ break;
+
+ default:
+ VDO_ASSERT_LOG_ONLY(false, "invalid index stage: %d", stage);
+ return;
+ }
+
+ uds_request_queue_enqueue(queue, request);
+}
diff --git a/drivers/md/dm-vdo/indexer/index.h b/drivers/md/dm-vdo/indexer/index.h
new file mode 100644
index 000000000000..edabb239548e
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_INDEX_H
+#define UDS_INDEX_H
+
+#include "index-layout.h"
+#include "index-session.h"
+#include "open-chapter.h"
+#include "volume.h"
+#include "volume-index.h"
+
+/*
+ * The index is a high-level structure which represents the totality of the UDS index. It manages
+ * the queues for incoming requests and dispatches them to the appropriate sub-components like the
+ * volume or the volume index. It also manages administrative tasks such as saving and loading the
+ * index.
+ *
+ * The index is divided into a number of independent zones and assigns each request to a zone based
+ * on its name. Most sub-components are similarly divided into zones as well so that requests in
+ * each zone usually operate without interference or coordination between zones.
+ */
+
+typedef void (*index_callback_fn)(struct uds_request *request);
+
+struct index_zone {
+ struct uds_index *index;
+ struct open_chapter_zone *open_chapter;
+ struct open_chapter_zone *writing_chapter;
+ u64 oldest_virtual_chapter;
+ u64 newest_virtual_chapter;
+ unsigned int id;
+};
+
+struct uds_index {
+ bool has_saved_open_chapter;
+ bool need_to_save;
+ struct index_load_context *load_context;
+ struct index_layout *layout;
+ struct volume_index *volume_index;
+ struct volume *volume;
+ unsigned int zone_count;
+ struct index_zone **zones;
+
+ u64 oldest_virtual_chapter;
+ u64 newest_virtual_chapter;
+
+ u64 last_save;
+ u64 prev_save;
+ struct chapter_writer *chapter_writer;
+
+ index_callback_fn callback;
+ struct uds_request_queue *triage_queue;
+ struct uds_request_queue *zone_queues[];
+};
+
+enum request_stage {
+ STAGE_TRIAGE,
+ STAGE_INDEX,
+ STAGE_MESSAGE,
+};
+
+int __must_check uds_make_index(struct uds_configuration *config,
+ enum uds_open_index_type open_type,
+ struct index_load_context *load_context,
+ index_callback_fn callback, struct uds_index **new_index);
+
+int __must_check uds_save_index(struct uds_index *index);
+
+void uds_free_index(struct uds_index *index);
+
+int __must_check uds_replace_index_storage(struct uds_index *index,
+ struct block_device *bdev);
+
+void uds_get_index_stats(struct uds_index *index, struct uds_index_stats *counters);
+
+void uds_enqueue_request(struct uds_request *request, enum request_stage stage);
+
+void uds_wait_for_idle_index(struct uds_index *index);
+
+#endif /* UDS_INDEX_H */
diff --git a/drivers/md/dm-vdo/indexer/indexer.h b/drivers/md/dm-vdo/indexer/indexer.h
new file mode 100644
index 000000000000..3744aaf625b0
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/indexer.h
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef INDEXER_H
+#define INDEXER_H
+
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "funnel-queue.h"
+
+/*
+ * UDS public API
+ *
+ * The Universal Deduplication System (UDS) is an efficient name-value store. When used for
+ * deduplicating storage, the names are generally hashes of data blocks and the associated data is
+ * where that block is located on the underlying storage medium. The stored names are expected to
+ * be randomly distributed among the space of possible names. If this assumption is violated, the
+ * UDS index will store fewer names than normal but will otherwise continue to work. The data
+ * associated with each name can be any 16-byte value.
+ *
+ * A client must first create an index session to interact with an index. Once created, the session
+ * can be shared among multiple threads or users. When a session is destroyed, it will also close
+ * and save any associated index.
+ *
+ * To make a request, a client must allocate a uds_request structure and set the required fields
+ * before launching it. UDS will invoke the provided callback to complete the request. After the
+ * callback has been called, the uds_request structure can be freed or reused for a new request.
+ * There are five types of requests:
+ *
+ * A UDS_UPDATE request will associate the provided name with the provided data. Any previous data
+ * associated with that name will be discarded.
+ *
+ * A UDS_QUERY request will return the data associated with the provided name, if any. The entry
+ * for the name will also be marked as most recent, as if the data had been updated.
+ *
+ * A UDS_POST request is a combination of UDS_QUERY and UDS_UPDATE. If there is already data
+ * associated with the provided name, that data is returned. If there is no existing association,
+ * the name is associated with the newly provided data. This request is equivalent to a UDS_QUERY
+ * request followed by a UDS_UPDATE request if no data is found, but it is much more efficient.
+ *
+ * A UDS_QUERY_NO_UPDATE request will return the data associated with the provided name, but will
+ * not change the recency of the entry for the name. This request is primarily useful for testing,
+ * to determine whether an entry exists without changing the internal state of the index.
+ *
+ * A UDS_DELETE request removes any data associated with the provided name. This operation is
+ * generally not necessary, because the index will automatically discard its oldest entries once it
+ * becomes full.
+ */
+
+/* General UDS constants and structures */
+
+enum uds_request_type {
+ /* Create or update the mapping for a name, and make the name most recent. */
+ UDS_UPDATE,
+
+ /* Return any mapped data for a name, and make the name most recent. */
+ UDS_QUERY,
+
+ /*
+ * Return any mapped data for a name, or map the provided data to the name if there is no
+ * current data, and make the name most recent.
+ */
+ UDS_POST,
+
+ /* Return any mapped data for a name without updating its recency. */
+ UDS_QUERY_NO_UPDATE,
+
+ /* Remove any mapping for a name. */
+ UDS_DELETE,
+
+};
+
+enum uds_open_index_type {
+ /* Create a new index. */
+ UDS_CREATE,
+
+ /* Load an existing index and try to recover if necessary. */
+ UDS_LOAD,
+
+ /* Load an existing index, but only if it was saved cleanly. */
+ UDS_NO_REBUILD,
+};
+
+enum {
+ /* The record name size in bytes */
+ UDS_RECORD_NAME_SIZE = 16,
+ /* The maximum record data size in bytes */
+ UDS_RECORD_DATA_SIZE = 16,
+};
+
+/*
+ * A type representing a UDS memory configuration which is either a positive integer number of
+ * gigabytes or one of the six special constants for configurations smaller than one gigabyte.
+ */
+typedef int uds_memory_config_size_t;
+
+enum {
+ /* The maximum configurable amount of memory */
+ UDS_MEMORY_CONFIG_MAX = 1024,
+ /* Flag indicating that the index has one less chapter than usual */
+ UDS_MEMORY_CONFIG_REDUCED = 0x1000,
+ UDS_MEMORY_CONFIG_REDUCED_MAX = 1024 + UDS_MEMORY_CONFIG_REDUCED,
+ /* Special values indicating sizes less than 1 GB */
+ UDS_MEMORY_CONFIG_256MB = -256,
+ UDS_MEMORY_CONFIG_512MB = -512,
+ UDS_MEMORY_CONFIG_768MB = -768,
+ UDS_MEMORY_CONFIG_REDUCED_256MB = -1280,
+ UDS_MEMORY_CONFIG_REDUCED_512MB = -1536,
+ UDS_MEMORY_CONFIG_REDUCED_768MB = -1792,
+};
+
+struct uds_record_name {
+ unsigned char name[UDS_RECORD_NAME_SIZE];
+};
+
+struct uds_record_data {
+ unsigned char data[UDS_RECORD_DATA_SIZE];
+};
+
+struct uds_volume_record {
+ struct uds_record_name name;
+ struct uds_record_data data;
+};
+
+struct uds_parameters {
+ /* The block_device used for storage */
+ struct block_device *bdev;
+ /* The maximum allowable size of the index on storage */
+ size_t size;
+ /* The offset where the index should start */
+ off_t offset;
+ /* The maximum memory allocation, in GB */
+ uds_memory_config_size_t memory_size;
+ /* Whether the index should include sparse chapters */
+ bool sparse;
+ /* A 64-bit nonce to validate the index */
+ u64 nonce;
+ /* The number of threads used to process index requests */
+ unsigned int zone_count;
+ /* The number of threads used to read volume pages */
+ unsigned int read_threads;
+};
+
+/*
+ * These statistics capture characteristics of the current index, including resource usage and
+ * requests processed since the index was opened.
+ */
+struct uds_index_stats {
+ /* The total number of records stored in the index */
+ u64 entries_indexed;
+ /* An estimate of the index's memory usage, in bytes */
+ u64 memory_used;
+ /* The number of collisions recorded in the volume index */
+ u64 collisions;
+ /* The number of entries discarded from the index since startup */
+ u64 entries_discarded;
+ /* The time at which these statistics were fetched */
+ s64 current_time;
+ /* The number of post calls that found an existing entry */
+ u64 posts_found;
+ /* The number of post calls that added an entry */
+ u64 posts_not_found;
+ /*
+ * The number of post calls that found an existing entry that is current enough to only
+ * exist in memory and not have been committed to disk yet
+ */
+ u64 in_memory_posts_found;
+ /*
+ * The number of post calls that found an existing entry in the dense portion of the index
+ */
+ u64 dense_posts_found;
+ /*
+ * The number of post calls that found an existing entry in the sparse portion of the index
+ */
+ u64 sparse_posts_found;
+ /* The number of update calls that updated an existing entry */
+ u64 updates_found;
+ /* The number of update calls that added a new entry */
+ u64 updates_not_found;
+ /* The number of delete requests that deleted an existing entry */
+ u64 deletions_found;
+ /* The number of delete requests that did nothing */
+ u64 deletions_not_found;
+ /* The number of query calls that found existing entry */
+ u64 queries_found;
+ /* The number of query calls that did not find an entry */
+ u64 queries_not_found;
+ /* The total number of requests processed */
+ u64 requests;
+};
+
+enum uds_index_region {
+ /* No location information has been determined */
+ UDS_LOCATION_UNKNOWN = 0,
+ /* The index page entry has been found */
+ UDS_LOCATION_INDEX_PAGE_LOOKUP,
+ /* The record page entry has been found */
+ UDS_LOCATION_RECORD_PAGE_LOOKUP,
+ /* The record is not in the index */
+ UDS_LOCATION_UNAVAILABLE,
+ /* The record was found in the open chapter */
+ UDS_LOCATION_IN_OPEN_CHAPTER,
+ /* The record was found in the dense part of the index */
+ UDS_LOCATION_IN_DENSE,
+ /* The record was found in the sparse part of the index */
+ UDS_LOCATION_IN_SPARSE,
+} __packed;
+
+/* Zone message requests are used to communicate between index zones. */
+enum uds_zone_message_type {
+ /* A standard request with no message */
+ UDS_MESSAGE_NONE = 0,
+ /* Add a chapter to the sparse chapter index cache */
+ UDS_MESSAGE_SPARSE_CACHE_BARRIER,
+ /* Close a chapter to keep the zone from falling behind */
+ UDS_MESSAGE_ANNOUNCE_CHAPTER_CLOSED,
+} __packed;
+
+struct uds_zone_message {
+ /* The type of message, determining how it will be processed */
+ enum uds_zone_message_type type;
+ /* The virtual chapter number to which the message applies */
+ u64 virtual_chapter;
+};
+
+struct uds_index_session;
+struct uds_index;
+struct uds_request;
+
+/* Once this callback has been invoked, the uds_request structure can be reused or freed. */
+typedef void (*uds_request_callback_fn)(struct uds_request *request);
+
+struct uds_request {
+ /* These input fields must be set before launching a request. */
+
+ /* The name of the record to look up or create */
+ struct uds_record_name record_name;
+ /* New data to associate with the record name, if applicable */
+ struct uds_record_data new_metadata;
+ /* A callback to invoke when the request is complete */
+ uds_request_callback_fn callback;
+ /* The index session that will manage this request */
+ struct uds_index_session *session;
+ /* The type of operation to perform, as describe above */
+ enum uds_request_type type;
+
+ /* These output fields are set when a request is complete. */
+
+ /* The existing data associated with the request name, if any */
+ struct uds_record_data old_metadata;
+ /* Either UDS_SUCCESS or an error code for the request */
+ int status;
+ /* True if the record name had an existing entry in the index */
+ bool found;
+
+ /*
+ * The remaining fields are used internally and should not be altered by clients. The index
+ * relies on zone_number being the first field in this section.
+ */
+
+ /* The number of the zone which will process this request*/
+ unsigned int zone_number;
+ /* A link for adding a request to a lock-free queue */
+ struct funnel_queue_entry queue_link;
+ /* A link for adding a request to a standard linked list */
+ struct uds_request *next_request;
+ /* A pointer to the index processing this request */
+ struct uds_index *index;
+ /* Control message for coordinating between zones */
+ struct uds_zone_message zone_message;
+ /* If true, process request immediately by waking the worker thread */
+ bool unbatched;
+ /* If true, continue this request before processing newer requests */
+ bool requeued;
+ /* The virtual chapter containing the record name, if known */
+ u64 virtual_chapter;
+ /* The region of the index containing the record name */
+ enum uds_index_region location;
+};
+
+/* Compute the number of bytes needed to store an index. */
+int __must_check uds_compute_index_size(const struct uds_parameters *parameters,
+ u64 *index_size);
+
+/* A session is required for most index operations. */
+int __must_check uds_create_index_session(struct uds_index_session **session);
+
+/* Destroying an index session also closes and saves the associated index. */
+int uds_destroy_index_session(struct uds_index_session *session);
+
+/*
+ * Create or open an index with an existing session. This operation fails if the index session is
+ * suspended, or if there is already an open index.
+ */
+int __must_check uds_open_index(enum uds_open_index_type open_type,
+ const struct uds_parameters *parameters,
+ struct uds_index_session *session);
+
+/*
+ * Wait until all callbacks for index operations are complete, and prevent new index operations
+ * from starting. New index operations will fail with EBUSY until the session is resumed. Also
+ * optionally saves the index.
+ */
+int __must_check uds_suspend_index_session(struct uds_index_session *session, bool save);
+
+/*
+ * Allow new index operations for an index, whether it was suspended or not. If the index is
+ * suspended and the supplied block device differs from the current backing store, the index will
+ * start using the new backing store instead.
+ */
+int __must_check uds_resume_index_session(struct uds_index_session *session,
+ struct block_device *bdev);
+
+/* Wait until all outstanding index operations are complete. */
+int __must_check uds_flush_index_session(struct uds_index_session *session);
+
+/* Close an index. This operation fails if the index session is suspended. */
+int __must_check uds_close_index(struct uds_index_session *session);
+
+/* Get index statistics since the last time the index was opened. */
+int __must_check uds_get_index_session_stats(struct uds_index_session *session,
+ struct uds_index_stats *stats);
+
+/* This function will fail if any required field of the request is not set. */
+int __must_check uds_launch_request(struct uds_request *request);
+
+struct cond_var {
+ wait_queue_head_t wait_queue;
+};
+
+static inline void uds_init_cond(struct cond_var *cv)
+{
+ init_waitqueue_head(&cv->wait_queue);
+}
+
+static inline void uds_signal_cond(struct cond_var *cv)
+{
+ wake_up(&cv->wait_queue);
+}
+
+static inline void uds_broadcast_cond(struct cond_var *cv)
+{
+ wake_up_all(&cv->wait_queue);
+}
+
+void uds_wait_cond(struct cond_var *cv, struct mutex *mutex);
+
+#endif /* INDEXER_H */
diff --git a/drivers/md/dm-vdo/indexer/io-factory.c b/drivers/md/dm-vdo/indexer/io-factory.c
new file mode 100644
index 000000000000..515765d35794
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/io-factory.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "io-factory.h"
+
+#include <linux/atomic.h>
+#include <linux/blkdev.h>
+#include <linux/err.h>
+#include <linux/mount.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+
+/*
+ * The I/O factory object manages access to index storage, which is a contiguous range of blocks on
+ * a block device.
+ *
+ * The factory holds the open device and is responsible for closing it. The factory has methods to
+ * make helper structures that can be used to access sections of the index.
+ */
+struct io_factory {
+ struct block_device *bdev;
+ atomic_t ref_count;
+};
+
+/* The buffered reader allows efficient I/O by reading page-sized segments into a buffer. */
+struct buffered_reader {
+ struct io_factory *factory;
+ struct dm_bufio_client *client;
+ struct dm_buffer *buffer;
+ sector_t limit;
+ sector_t block_number;
+ u8 *start;
+ u8 *end;
+};
+
+#define MAX_READ_AHEAD_BLOCKS 4
+
+/*
+ * The buffered writer allows efficient I/O by buffering writes and committing page-sized segments
+ * to storage.
+ */
+struct buffered_writer {
+ struct io_factory *factory;
+ struct dm_bufio_client *client;
+ struct dm_buffer *buffer;
+ sector_t limit;
+ sector_t block_number;
+ u8 *start;
+ u8 *end;
+ int error;
+};
+
+static void uds_get_io_factory(struct io_factory *factory)
+{
+ atomic_inc(&factory->ref_count);
+}
+
+int uds_make_io_factory(struct block_device *bdev, struct io_factory **factory_ptr)
+{
+ int result;
+ struct io_factory *factory;
+
+ result = vdo_allocate(1, struct io_factory, __func__, &factory);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ factory->bdev = bdev;
+ atomic_set_release(&factory->ref_count, 1);
+
+ *factory_ptr = factory;
+ return UDS_SUCCESS;
+}
+
+int uds_replace_storage(struct io_factory *factory, struct block_device *bdev)
+{
+ factory->bdev = bdev;
+ return UDS_SUCCESS;
+}
+
+/* Free an I/O factory once all references have been released. */
+void uds_put_io_factory(struct io_factory *factory)
+{
+ if (atomic_add_return(-1, &factory->ref_count) <= 0)
+ vdo_free(factory);
+}
+
+size_t uds_get_writable_size(struct io_factory *factory)
+{
+ return i_size_read(factory->bdev->bd_inode);
+}
+
+/* Create a struct dm_bufio_client for an index region starting at offset. */
+int uds_make_bufio(struct io_factory *factory, off_t block_offset, size_t block_size,
+ unsigned int reserved_buffers, struct dm_bufio_client **client_ptr)
+{
+ struct dm_bufio_client *client;
+
+ client = dm_bufio_client_create(factory->bdev, block_size, reserved_buffers, 0,
+ NULL, NULL, 0);
+ if (IS_ERR(client))
+ return -PTR_ERR(client);
+
+ dm_bufio_set_sector_offset(client, block_offset * SECTORS_PER_BLOCK);
+ *client_ptr = client;
+ return UDS_SUCCESS;
+}
+
+static void read_ahead(struct buffered_reader *reader, sector_t block_number)
+{
+ if (block_number < reader->limit) {
+ sector_t read_ahead = min((sector_t) MAX_READ_AHEAD_BLOCKS,
+ reader->limit - block_number);
+
+ dm_bufio_prefetch(reader->client, block_number, read_ahead);
+ }
+}
+
+void uds_free_buffered_reader(struct buffered_reader *reader)
+{
+ if (reader == NULL)
+ return;
+
+ if (reader->buffer != NULL)
+ dm_bufio_release(reader->buffer);
+
+ dm_bufio_client_destroy(reader->client);
+ uds_put_io_factory(reader->factory);
+ vdo_free(reader);
+}
+
+/* Create a buffered reader for an index region starting at offset. */
+int uds_make_buffered_reader(struct io_factory *factory, off_t offset, u64 block_count,
+ struct buffered_reader **reader_ptr)
+{
+ int result;
+ struct dm_bufio_client *client = NULL;
+ struct buffered_reader *reader = NULL;
+
+ result = uds_make_bufio(factory, offset, UDS_BLOCK_SIZE, 1, &client);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = vdo_allocate(1, struct buffered_reader, "buffered reader", &reader);
+ if (result != VDO_SUCCESS) {
+ dm_bufio_client_destroy(client);
+ return result;
+ }
+
+ *reader = (struct buffered_reader) {
+ .factory = factory,
+ .client = client,
+ .buffer = NULL,
+ .limit = block_count,
+ .block_number = 0,
+ .start = NULL,
+ .end = NULL,
+ };
+
+ read_ahead(reader, 0);
+ uds_get_io_factory(factory);
+ *reader_ptr = reader;
+ return UDS_SUCCESS;
+}
+
+static int position_reader(struct buffered_reader *reader, sector_t block_number,
+ off_t offset)
+{
+ struct dm_buffer *buffer = NULL;
+ void *data;
+
+ if ((reader->end == NULL) || (block_number != reader->block_number)) {
+ if (block_number >= reader->limit)
+ return UDS_OUT_OF_RANGE;
+
+ if (reader->buffer != NULL)
+ dm_bufio_release(vdo_forget(reader->buffer));
+
+ data = dm_bufio_read(reader->client, block_number, &buffer);
+ if (IS_ERR(data))
+ return -PTR_ERR(data);
+
+ reader->buffer = buffer;
+ reader->start = data;
+ if (block_number == reader->block_number + 1)
+ read_ahead(reader, block_number + 1);
+ }
+
+ reader->block_number = block_number;
+ reader->end = reader->start + offset;
+ return UDS_SUCCESS;
+}
+
+static size_t bytes_remaining_in_read_buffer(struct buffered_reader *reader)
+{
+ return (reader->end == NULL) ? 0 : reader->start + UDS_BLOCK_SIZE - reader->end;
+}
+
+static int reset_reader(struct buffered_reader *reader)
+{
+ sector_t block_number;
+
+ if (bytes_remaining_in_read_buffer(reader) > 0)
+ return UDS_SUCCESS;
+
+ block_number = reader->block_number;
+ if (reader->end != NULL)
+ block_number++;
+
+ return position_reader(reader, block_number, 0);
+}
+
+int uds_read_from_buffered_reader(struct buffered_reader *reader, u8 *data,
+ size_t length)
+{
+ int result = UDS_SUCCESS;
+ size_t chunk_size;
+
+ while (length > 0) {
+ result = reset_reader(reader);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ chunk_size = min(length, bytes_remaining_in_read_buffer(reader));
+ memcpy(data, reader->end, chunk_size);
+ length -= chunk_size;
+ data += chunk_size;
+ reader->end += chunk_size;
+ }
+
+ return UDS_SUCCESS;
+}
+
+/*
+ * Verify that the next data on the reader matches the required value. If the value matches, the
+ * matching contents are consumed. If the value does not match, the reader state is unchanged.
+ */
+int uds_verify_buffered_data(struct buffered_reader *reader, const u8 *value,
+ size_t length)
+{
+ int result = UDS_SUCCESS;
+ size_t chunk_size;
+ sector_t start_block_number = reader->block_number;
+ int start_offset = reader->end - reader->start;
+
+ while (length > 0) {
+ result = reset_reader(reader);
+ if (result != UDS_SUCCESS) {
+ result = UDS_CORRUPT_DATA;
+ break;
+ }
+
+ chunk_size = min(length, bytes_remaining_in_read_buffer(reader));
+ if (memcmp(value, reader->end, chunk_size) != 0) {
+ result = UDS_CORRUPT_DATA;
+ break;
+ }
+
+ length -= chunk_size;
+ value += chunk_size;
+ reader->end += chunk_size;
+ }
+
+ if (result != UDS_SUCCESS)
+ position_reader(reader, start_block_number, start_offset);
+
+ return result;
+}
+
+/* Create a buffered writer for an index region starting at offset. */
+int uds_make_buffered_writer(struct io_factory *factory, off_t offset, u64 block_count,
+ struct buffered_writer **writer_ptr)
+{
+ int result;
+ struct dm_bufio_client *client = NULL;
+ struct buffered_writer *writer;
+
+ result = uds_make_bufio(factory, offset, UDS_BLOCK_SIZE, 1, &client);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = vdo_allocate(1, struct buffered_writer, "buffered writer", &writer);
+ if (result != VDO_SUCCESS) {
+ dm_bufio_client_destroy(client);
+ return result;
+ }
+
+ *writer = (struct buffered_writer) {
+ .factory = factory,
+ .client = client,
+ .buffer = NULL,
+ .limit = block_count,
+ .start = NULL,
+ .end = NULL,
+ .block_number = 0,
+ .error = UDS_SUCCESS,
+ };
+
+ uds_get_io_factory(factory);
+ *writer_ptr = writer;
+ return UDS_SUCCESS;
+}
+
+static size_t get_remaining_write_space(struct buffered_writer *writer)
+{
+ return writer->start + UDS_BLOCK_SIZE - writer->end;
+}
+
+static int __must_check prepare_next_buffer(struct buffered_writer *writer)
+{
+ struct dm_buffer *buffer = NULL;
+ void *data;
+
+ if (writer->block_number >= writer->limit) {
+ writer->error = UDS_OUT_OF_RANGE;
+ return UDS_OUT_OF_RANGE;
+ }
+
+ data = dm_bufio_new(writer->client, writer->block_number, &buffer);
+ if (IS_ERR(data)) {
+ writer->error = -PTR_ERR(data);
+ return writer->error;
+ }
+
+ writer->buffer = buffer;
+ writer->start = data;
+ writer->end = data;
+ return UDS_SUCCESS;
+}
+
+static int flush_previous_buffer(struct buffered_writer *writer)
+{
+ size_t available;
+
+ if (writer->buffer == NULL)
+ return writer->error;
+
+ if (writer->error == UDS_SUCCESS) {
+ available = get_remaining_write_space(writer);
+
+ if (available > 0)
+ memset(writer->end, 0, available);
+
+ dm_bufio_mark_buffer_dirty(writer->buffer);
+ }
+
+ dm_bufio_release(writer->buffer);
+ writer->buffer = NULL;
+ writer->start = NULL;
+ writer->end = NULL;
+ writer->block_number++;
+ return writer->error;
+}
+
+void uds_free_buffered_writer(struct buffered_writer *writer)
+{
+ int result;
+
+ if (writer == NULL)
+ return;
+
+ flush_previous_buffer(writer);
+ result = -dm_bufio_write_dirty_buffers(writer->client);
+ if (result != UDS_SUCCESS)
+ vdo_log_warning_strerror(result, "%s: failed to sync storage", __func__);
+
+ dm_bufio_client_destroy(writer->client);
+ uds_put_io_factory(writer->factory);
+ vdo_free(writer);
+}
+
+/*
+ * Append data to the buffer, writing as needed. If no data is provided, zeros are written instead.
+ * If a write error occurs, it is recorded and returned on every subsequent write attempt.
+ */
+int uds_write_to_buffered_writer(struct buffered_writer *writer, const u8 *data,
+ size_t length)
+{
+ int result = writer->error;
+ size_t chunk_size;
+
+ while ((length > 0) && (result == UDS_SUCCESS)) {
+ if (writer->buffer == NULL) {
+ result = prepare_next_buffer(writer);
+ continue;
+ }
+
+ chunk_size = min(length, get_remaining_write_space(writer));
+ if (data == NULL) {
+ memset(writer->end, 0, chunk_size);
+ } else {
+ memcpy(writer->end, data, chunk_size);
+ data += chunk_size;
+ }
+
+ length -= chunk_size;
+ writer->end += chunk_size;
+
+ if (get_remaining_write_space(writer) == 0)
+ result = uds_flush_buffered_writer(writer);
+ }
+
+ return result;
+}
+
+int uds_flush_buffered_writer(struct buffered_writer *writer)
+{
+ if (writer->error != UDS_SUCCESS)
+ return writer->error;
+
+ return flush_previous_buffer(writer);
+}
diff --git a/drivers/md/dm-vdo/indexer/io-factory.h b/drivers/md/dm-vdo/indexer/io-factory.h
new file mode 100644
index 000000000000..7fb5a0616a79
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/io-factory.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_IO_FACTORY_H
+#define UDS_IO_FACTORY_H
+
+#include <linux/dm-bufio.h>
+
+/*
+ * The I/O factory manages all low-level I/O operations to the underlying storage device. Its main
+ * clients are the index layout and the volume. The buffered reader and buffered writer interfaces
+ * are helpers for accessing data in a contiguous range of storage blocks.
+ */
+
+struct buffered_reader;
+struct buffered_writer;
+
+struct io_factory;
+
+enum {
+ UDS_BLOCK_SIZE = 4096,
+ SECTORS_PER_BLOCK = UDS_BLOCK_SIZE >> SECTOR_SHIFT,
+};
+
+int __must_check uds_make_io_factory(struct block_device *bdev,
+ struct io_factory **factory_ptr);
+
+int __must_check uds_replace_storage(struct io_factory *factory,
+ struct block_device *bdev);
+
+void uds_put_io_factory(struct io_factory *factory);
+
+size_t __must_check uds_get_writable_size(struct io_factory *factory);
+
+int __must_check uds_make_bufio(struct io_factory *factory, off_t block_offset,
+ size_t block_size, unsigned int reserved_buffers,
+ struct dm_bufio_client **client_ptr);
+
+int __must_check uds_make_buffered_reader(struct io_factory *factory, off_t offset,
+ u64 block_count,
+ struct buffered_reader **reader_ptr);
+
+void uds_free_buffered_reader(struct buffered_reader *reader);
+
+int __must_check uds_read_from_buffered_reader(struct buffered_reader *reader, u8 *data,
+ size_t length);
+
+int __must_check uds_verify_buffered_data(struct buffered_reader *reader, const u8 *value,
+ size_t length);
+
+int __must_check uds_make_buffered_writer(struct io_factory *factory, off_t offset,
+ u64 block_count,
+ struct buffered_writer **writer_ptr);
+
+void uds_free_buffered_writer(struct buffered_writer *buffer);
+
+int __must_check uds_write_to_buffered_writer(struct buffered_writer *writer,
+ const u8 *data, size_t length);
+
+int __must_check uds_flush_buffered_writer(struct buffered_writer *writer);
+
+#endif /* UDS_IO_FACTORY_H */
diff --git a/drivers/md/dm-vdo/indexer/open-chapter.c b/drivers/md/dm-vdo/indexer/open-chapter.c
new file mode 100644
index 000000000000..4a67bcadaae0
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/open-chapter.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "open-chapter.h"
+
+#include <linux/log2.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+
+#include "config.h"
+#include "hash-utils.h"
+
+/*
+ * Each index zone has a dedicated open chapter zone structure which gets an equal share of the
+ * open chapter space. Records are assigned to zones based on their record name. Within each zone,
+ * records are stored in an array in the order they arrive. Additionally, a reference to each
+ * record is stored in a hash table to help determine if a new record duplicates an existing one.
+ * If new metadata for an existing name arrives, the record is altered in place. The array of
+ * records is 1-based so that record number 0 can be used to indicate an unused hash slot.
+ *
+ * Deleted records are marked with a flag rather than actually removed to simplify hash table
+ * management. The array of deleted flags overlays the array of hash slots, but the flags are
+ * indexed by record number instead of by record name. The number of hash slots will always be a
+ * power of two that is greater than the number of records to be indexed, guaranteeing that hash
+ * insertion cannot fail, and that there are sufficient flags for all records.
+ *
+ * Once any open chapter zone fills its available space, the chapter is closed. The records from
+ * each zone are interleaved to attempt to preserve temporal locality and assigned to record pages.
+ * Empty or deleted records are replaced by copies of a valid record so that the record pages only
+ * contain valid records. The chapter then constructs a delta index which maps each record name to
+ * the record page on which that record can be found, which is split into index pages. These
+ * structures are then passed to the volume to be recorded on storage.
+ *
+ * When the index is saved, the open chapter records are saved in a single array, once again
+ * interleaved to attempt to preserve temporal locality. When the index is reloaded, there may be a
+ * different number of zones than previously, so the records must be parcelled out to their new
+ * zones. In addition, depending on the distribution of record names, a new zone may have more
+ * records than it has space. In this case, the latest records for that zone will be discarded.
+ */
+
+static const u8 OPEN_CHAPTER_MAGIC[] = "ALBOC";
+static const u8 OPEN_CHAPTER_VERSION[] = "02.00";
+
+#define OPEN_CHAPTER_MAGIC_LENGTH (sizeof(OPEN_CHAPTER_MAGIC) - 1)
+#define OPEN_CHAPTER_VERSION_LENGTH (sizeof(OPEN_CHAPTER_VERSION) - 1)
+#define LOAD_RATIO 2
+
+static inline size_t records_size(const struct open_chapter_zone *open_chapter)
+{
+ return sizeof(struct uds_volume_record) * (1 + open_chapter->capacity);
+}
+
+static inline size_t slots_size(size_t slot_count)
+{
+ return sizeof(struct open_chapter_zone_slot) * slot_count;
+}
+
+int uds_make_open_chapter(const struct index_geometry *geometry, unsigned int zone_count,
+ struct open_chapter_zone **open_chapter_ptr)
+{
+ int result;
+ struct open_chapter_zone *open_chapter;
+ size_t capacity = geometry->records_per_chapter / zone_count;
+ size_t slot_count = (1 << bits_per(capacity * LOAD_RATIO));
+
+ result = vdo_allocate_extended(struct open_chapter_zone, slot_count,
+ struct open_chapter_zone_slot, "open chapter",
+ &open_chapter);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ open_chapter->slot_count = slot_count;
+ open_chapter->capacity = capacity;
+ result = vdo_allocate_cache_aligned(records_size(open_chapter), "record pages",
+ &open_chapter->records);
+ if (result != VDO_SUCCESS) {
+ uds_free_open_chapter(open_chapter);
+ return result;
+ }
+
+ *open_chapter_ptr = open_chapter;
+ return UDS_SUCCESS;
+}
+
+void uds_reset_open_chapter(struct open_chapter_zone *open_chapter)
+{
+ open_chapter->size = 0;
+ open_chapter->deletions = 0;
+
+ memset(open_chapter->records, 0, records_size(open_chapter));
+ memset(open_chapter->slots, 0, slots_size(open_chapter->slot_count));
+}
+
+static unsigned int probe_chapter_slots(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name)
+{
+ struct uds_volume_record *record;
+ unsigned int slot_count = open_chapter->slot_count;
+ unsigned int slot = uds_name_to_hash_slot(name, slot_count);
+ unsigned int record_number;
+ unsigned int attempts = 1;
+
+ while (true) {
+ record_number = open_chapter->slots[slot].record_number;
+
+ /*
+ * If the hash slot is empty, we've reached the end of a chain without finding the
+ * record and should terminate the search.
+ */
+ if (record_number == 0)
+ return slot;
+
+ /*
+ * If the name of the record referenced by the slot matches and has not been
+ * deleted, then we've found the requested name.
+ */
+ record = &open_chapter->records[record_number];
+ if ((memcmp(&record->name, name, UDS_RECORD_NAME_SIZE) == 0) &&
+ !open_chapter->slots[record_number].deleted)
+ return slot;
+
+ /*
+ * Quadratic probing: advance the probe by 1, 2, 3, etc. and try again. This
+ * performs better than linear probing and works best for 2^N slots.
+ */
+ slot = (slot + attempts++) % slot_count;
+ }
+}
+
+void uds_search_open_chapter(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name,
+ struct uds_record_data *metadata, bool *found)
+{
+ unsigned int slot;
+ unsigned int record_number;
+
+ slot = probe_chapter_slots(open_chapter, name);
+ record_number = open_chapter->slots[slot].record_number;
+ if (record_number == 0) {
+ *found = false;
+ } else {
+ *found = true;
+ *metadata = open_chapter->records[record_number].data;
+ }
+}
+
+/* Add a record to the open chapter zone and return the remaining space. */
+int uds_put_open_chapter(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name,
+ const struct uds_record_data *metadata)
+{
+ unsigned int slot;
+ unsigned int record_number;
+ struct uds_volume_record *record;
+
+ if (open_chapter->size >= open_chapter->capacity)
+ return 0;
+
+ slot = probe_chapter_slots(open_chapter, name);
+ record_number = open_chapter->slots[slot].record_number;
+
+ if (record_number == 0) {
+ record_number = ++open_chapter->size;
+ open_chapter->slots[slot].record_number = record_number;
+ }
+
+ record = &open_chapter->records[record_number];
+ record->name = *name;
+ record->data = *metadata;
+
+ return open_chapter->capacity - open_chapter->size;
+}
+
+void uds_remove_from_open_chapter(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name)
+{
+ unsigned int slot;
+ unsigned int record_number;
+
+ slot = probe_chapter_slots(open_chapter, name);
+ record_number = open_chapter->slots[slot].record_number;
+
+ if (record_number > 0) {
+ open_chapter->slots[record_number].deleted = true;
+ open_chapter->deletions += 1;
+ }
+}
+
+void uds_free_open_chapter(struct open_chapter_zone *open_chapter)
+{
+ if (open_chapter != NULL) {
+ vdo_free(open_chapter->records);
+ vdo_free(open_chapter);
+ }
+}
+
+/* Map each record name to its record page number in the delta chapter index. */
+static int fill_delta_chapter_index(struct open_chapter_zone **chapter_zones,
+ unsigned int zone_count,
+ struct open_chapter_index *index,
+ struct uds_volume_record *collated_records)
+{
+ int result;
+ unsigned int records_per_chapter;
+ unsigned int records_per_page;
+ unsigned int record_index;
+ unsigned int records = 0;
+ u32 page_number;
+ unsigned int z;
+ int overflow_count = 0;
+ struct uds_volume_record *fill_record = NULL;
+
+ /*
+ * The record pages should not have any empty space, so find a record with which to fill
+ * the chapter zone if it was closed early, and also to replace any deleted records. The
+ * last record in any filled zone is guaranteed to not have been deleted, so use one of
+ * those.
+ */
+ for (z = 0; z < zone_count; z++) {
+ struct open_chapter_zone *zone = chapter_zones[z];
+
+ if (zone->size == zone->capacity) {
+ fill_record = &zone->records[zone->size];
+ break;
+ }
+ }
+
+ records_per_chapter = index->geometry->records_per_chapter;
+ records_per_page = index->geometry->records_per_page;
+
+ for (records = 0; records < records_per_chapter; records++) {
+ struct uds_volume_record *record = &collated_records[records];
+ struct open_chapter_zone *open_chapter;
+
+ /* The record arrays in the zones are 1-based. */
+ record_index = 1 + (records / zone_count);
+ page_number = records / records_per_page;
+ open_chapter = chapter_zones[records % zone_count];
+
+ /* Use the fill record in place of an unused record. */
+ if (record_index > open_chapter->size ||
+ open_chapter->slots[record_index].deleted) {
+ *record = *fill_record;
+ continue;
+ }
+
+ *record = open_chapter->records[record_index];
+ result = uds_put_open_chapter_index_record(index, &record->name,
+ page_number);
+ switch (result) {
+ case UDS_SUCCESS:
+ break;
+ case UDS_OVERFLOW:
+ overflow_count++;
+ break;
+ default:
+ vdo_log_error_strerror(result,
+ "failed to build open chapter index");
+ return result;
+ }
+ }
+
+ if (overflow_count > 0)
+ vdo_log_warning("Failed to add %d entries to chapter index",
+ overflow_count);
+
+ return UDS_SUCCESS;
+}
+
+int uds_close_open_chapter(struct open_chapter_zone **chapter_zones,
+ unsigned int zone_count, struct volume *volume,
+ struct open_chapter_index *chapter_index,
+ struct uds_volume_record *collated_records,
+ u64 virtual_chapter_number)
+{
+ int result;
+
+ uds_empty_open_chapter_index(chapter_index, virtual_chapter_number);
+ result = fill_delta_chapter_index(chapter_zones, zone_count, chapter_index,
+ collated_records);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return uds_write_chapter(volume, chapter_index, collated_records);
+}
+
+int uds_save_open_chapter(struct uds_index *index, struct buffered_writer *writer)
+{
+ int result;
+ struct open_chapter_zone *open_chapter;
+ struct uds_volume_record *record;
+ u8 record_count_data[sizeof(u32)];
+ u32 record_count = 0;
+ unsigned int record_index;
+ unsigned int z;
+
+ result = uds_write_to_buffered_writer(writer, OPEN_CHAPTER_MAGIC,
+ OPEN_CHAPTER_MAGIC_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_write_to_buffered_writer(writer, OPEN_CHAPTER_VERSION,
+ OPEN_CHAPTER_VERSION_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ for (z = 0; z < index->zone_count; z++) {
+ open_chapter = index->zones[z]->open_chapter;
+ record_count += open_chapter->size - open_chapter->deletions;
+ }
+
+ put_unaligned_le32(record_count, record_count_data);
+ result = uds_write_to_buffered_writer(writer, record_count_data,
+ sizeof(record_count_data));
+ if (result != UDS_SUCCESS)
+ return result;
+
+ record_index = 1;
+ while (record_count > 0) {
+ for (z = 0; z < index->zone_count; z++) {
+ open_chapter = index->zones[z]->open_chapter;
+ if (record_index > open_chapter->size)
+ continue;
+
+ if (open_chapter->slots[record_index].deleted)
+ continue;
+
+ record = &open_chapter->records[record_index];
+ result = uds_write_to_buffered_writer(writer, (u8 *) record,
+ sizeof(*record));
+ if (result != UDS_SUCCESS)
+ return result;
+
+ record_count--;
+ }
+
+ record_index++;
+ }
+
+ return uds_flush_buffered_writer(writer);
+}
+
+u64 uds_compute_saved_open_chapter_size(struct index_geometry *geometry)
+{
+ unsigned int records_per_chapter = geometry->records_per_chapter;
+
+ return OPEN_CHAPTER_MAGIC_LENGTH + OPEN_CHAPTER_VERSION_LENGTH + sizeof(u32) +
+ records_per_chapter * sizeof(struct uds_volume_record);
+}
+
+static int load_version20(struct uds_index *index, struct buffered_reader *reader)
+{
+ int result;
+ u32 record_count;
+ u8 record_count_data[sizeof(u32)];
+ struct uds_volume_record record;
+
+ /*
+ * Track which zones cannot accept any more records. If the open chapter had a different
+ * number of zones previously, some new zones may have more records than they have space
+ * for. These overflow records will be discarded.
+ */
+ bool full_flags[MAX_ZONES] = {
+ false,
+ };
+
+ result = uds_read_from_buffered_reader(reader, (u8 *) &record_count_data,
+ sizeof(record_count_data));
+ if (result != UDS_SUCCESS)
+ return result;
+
+ record_count = get_unaligned_le32(record_count_data);
+ while (record_count-- > 0) {
+ unsigned int zone = 0;
+
+ result = uds_read_from_buffered_reader(reader, (u8 *) &record,
+ sizeof(record));
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (index->zone_count > 1)
+ zone = uds_get_volume_index_zone(index->volume_index,
+ &record.name);
+
+ if (!full_flags[zone]) {
+ struct open_chapter_zone *open_chapter;
+ unsigned int remaining;
+
+ open_chapter = index->zones[zone]->open_chapter;
+ remaining = uds_put_open_chapter(open_chapter, &record.name,
+ &record.data);
+ /* Do not allow any zone to fill completely. */
+ full_flags[zone] = (remaining <= 1);
+ }
+ }
+
+ return UDS_SUCCESS;
+}
+
+int uds_load_open_chapter(struct uds_index *index, struct buffered_reader *reader)
+{
+ u8 version[OPEN_CHAPTER_VERSION_LENGTH];
+ int result;
+
+ result = uds_verify_buffered_data(reader, OPEN_CHAPTER_MAGIC,
+ OPEN_CHAPTER_MAGIC_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_read_from_buffered_reader(reader, version, sizeof(version));
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (memcmp(OPEN_CHAPTER_VERSION, version, sizeof(version)) != 0) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "Invalid open chapter version: %.*s",
+ (int) sizeof(version), version);
+ }
+
+ return load_version20(index, reader);
+}
diff --git a/drivers/md/dm-vdo/indexer/open-chapter.h b/drivers/md/dm-vdo/indexer/open-chapter.h
new file mode 100644
index 000000000000..a4250bb19525
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/open-chapter.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_OPEN_CHAPTER_H
+#define UDS_OPEN_CHAPTER_H
+
+#include "chapter-index.h"
+#include "geometry.h"
+#include "index.h"
+#include "volume.h"
+
+/*
+ * The open chapter tracks the newest records in memory. Like the index as a whole, each open
+ * chapter is divided into a number of independent zones which are interleaved when the chapter is
+ * committed to the volume.
+ */
+
+enum {
+ OPEN_CHAPTER_RECORD_NUMBER_BITS = 23,
+};
+
+struct open_chapter_zone_slot {
+ /* If non-zero, the record number addressed by this hash slot */
+ unsigned int record_number : OPEN_CHAPTER_RECORD_NUMBER_BITS;
+ /* If true, the record at the index of this hash slot was deleted */
+ bool deleted : 1;
+} __packed;
+
+struct open_chapter_zone {
+ /* The maximum number of records that can be stored */
+ unsigned int capacity;
+ /* The number of records stored */
+ unsigned int size;
+ /* The number of deleted records */
+ unsigned int deletions;
+ /* Array of chunk records, 1-based */
+ struct uds_volume_record *records;
+ /* The number of slots in the hash table */
+ unsigned int slot_count;
+ /* The hash table slots, referencing virtual record numbers */
+ struct open_chapter_zone_slot slots[];
+};
+
+int __must_check uds_make_open_chapter(const struct index_geometry *geometry,
+ unsigned int zone_count,
+ struct open_chapter_zone **open_chapter_ptr);
+
+void uds_reset_open_chapter(struct open_chapter_zone *open_chapter);
+
+void uds_search_open_chapter(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name,
+ struct uds_record_data *metadata, bool *found);
+
+int __must_check uds_put_open_chapter(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name,
+ const struct uds_record_data *metadata);
+
+void uds_remove_from_open_chapter(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name);
+
+void uds_free_open_chapter(struct open_chapter_zone *open_chapter);
+
+int __must_check uds_close_open_chapter(struct open_chapter_zone **chapter_zones,
+ unsigned int zone_count, struct volume *volume,
+ struct open_chapter_index *chapter_index,
+ struct uds_volume_record *collated_records,
+ u64 virtual_chapter_number);
+
+int __must_check uds_save_open_chapter(struct uds_index *index,
+ struct buffered_writer *writer);
+
+int __must_check uds_load_open_chapter(struct uds_index *index,
+ struct buffered_reader *reader);
+
+u64 uds_compute_saved_open_chapter_size(struct index_geometry *geometry);
+
+#endif /* UDS_OPEN_CHAPTER_H */
diff --git a/drivers/md/dm-vdo/indexer/radix-sort.c b/drivers/md/dm-vdo/indexer/radix-sort.c
new file mode 100644
index 000000000000..66b8c706a1ef
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/radix-sort.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "radix-sort.h"
+
+#include <linux/limits.h>
+#include <linux/types.h>
+
+#include "memory-alloc.h"
+#include "string-utils.h"
+
+/*
+ * This implementation allocates one large object to do the sorting, which can be reused as many
+ * times as desired. The amount of memory required is logarithmically proportional to the number of
+ * keys to be sorted.
+ */
+
+/* Piles smaller than this are handled with a simple insertion sort. */
+#define INSERTION_SORT_THRESHOLD 12
+
+/* Sort keys are pointers to immutable fixed-length arrays of bytes. */
+typedef const u8 *sort_key_t;
+
+/*
+ * The keys are separated into piles based on the byte in each keys at the current offset, so the
+ * number of keys with each byte must be counted.
+ */
+struct histogram {
+ /* The number of non-empty bins */
+ u16 used;
+ /* The index (key byte) of the first non-empty bin */
+ u16 first;
+ /* The index (key byte) of the last non-empty bin */
+ u16 last;
+ /* The number of occurrences of each specific byte */
+ u32 size[256];
+};
+
+/*
+ * Sub-tasks are manually managed on a stack, both for performance and to put a logarithmic bound
+ * on the stack space needed.
+ */
+struct task {
+ /* Pointer to the first key to sort. */
+ sort_key_t *first_key;
+ /* Pointer to the last key to sort. */
+ sort_key_t *last_key;
+ /* The offset into the key at which to continue sorting. */
+ u16 offset;
+ /* The number of bytes remaining in the sort keys. */
+ u16 length;
+};
+
+struct radix_sorter {
+ unsigned int count;
+ struct histogram bins;
+ sort_key_t *pile[256];
+ struct task *end_of_stack;
+ struct task insertion_list[256];
+ struct task stack[];
+};
+
+/* Compare a segment of two fixed-length keys starting at an offset. */
+static inline int compare(sort_key_t key1, sort_key_t key2, u16 offset, u16 length)
+{
+ return memcmp(&key1[offset], &key2[offset], length);
+}
+
+/* Insert the next unsorted key into an array of sorted keys. */
+static inline void insert_key(const struct task task, sort_key_t *next)
+{
+ /* Pull the unsorted key out, freeing up the array slot. */
+ sort_key_t unsorted = *next;
+
+ /* Compare the key to the preceding sorted entries, shifting down ones that are larger. */
+ while ((--next >= task.first_key) &&
+ (compare(unsorted, next[0], task.offset, task.length) < 0))
+ next[1] = next[0];
+
+ /* Insert the key into the last slot that was cleared, sorting it. */
+ next[1] = unsorted;
+}
+
+/*
+ * Sort a range of key segments using an insertion sort. This simple sort is faster than the
+ * 256-way radix sort when the number of keys to sort is small.
+ */
+static inline void insertion_sort(const struct task task)
+{
+ sort_key_t *next;
+
+ for (next = task.first_key + 1; next <= task.last_key; next++)
+ insert_key(task, next);
+}
+
+/* Push a sorting task onto a task stack. */
+static inline void push_task(struct task **stack_pointer, sort_key_t *first_key,
+ u32 count, u16 offset, u16 length)
+{
+ struct task *task = (*stack_pointer)++;
+
+ task->first_key = first_key;
+ task->last_key = &first_key[count - 1];
+ task->offset = offset;
+ task->length = length;
+}
+
+static inline void swap_keys(sort_key_t *a, sort_key_t *b)
+{
+ sort_key_t c = *a;
+ *a = *b;
+ *b = c;
+}
+
+/*
+ * Count the number of times each byte value appears in the arrays of keys to sort at the current
+ * offset, keeping track of the number of non-empty bins, and the index of the first and last
+ * non-empty bin.
+ */
+static inline void measure_bins(const struct task task, struct histogram *bins)
+{
+ sort_key_t *key_ptr;
+
+ /*
+ * Subtle invariant: bins->used and bins->size[] are zero because the sorting code clears
+ * it all out as it goes. Even though this structure is re-used, we don't need to pay to
+ * zero it before starting a new tally.
+ */
+ bins->first = U8_MAX;
+ bins->last = 0;
+
+ for (key_ptr = task.first_key; key_ptr <= task.last_key; key_ptr++) {
+ /* Increment the count for the byte in the key at the current offset. */
+ u8 bin = (*key_ptr)[task.offset];
+ u32 size = ++bins->size[bin];
+
+ /* Track non-empty bins. */
+ if (size == 1) {
+ bins->used += 1;
+ if (bin < bins->first)
+ bins->first = bin;
+
+ if (bin > bins->last)
+ bins->last = bin;
+ }
+ }
+}
+
+/*
+ * Convert the bin sizes to pointers to where each pile goes.
+ *
+ * pile[0] = first_key + bin->size[0],
+ * pile[1] = pile[0] + bin->size[1], etc.
+ *
+ * After the keys are moved to the appropriate pile, we'll need to sort each of the piles by the
+ * next radix position. A new task is put on the stack for each pile containing lots of keys, or a
+ * new task is put on the list for each pile containing few keys.
+ *
+ * @stack: pointer the top of the stack
+ * @end_of_stack: the end of the stack
+ * @list: pointer the head of the list
+ * @pile: array for pointers to the end of each pile
+ * @bins: the histogram of the sizes of each pile
+ * @first_key: the first key of the stack
+ * @offset: the next radix position to sort by
+ * @length: the number of bytes remaining in the sort keys
+ *
+ * Return: UDS_SUCCESS or an error code
+ */
+static inline int push_bins(struct task **stack, struct task *end_of_stack,
+ struct task **list, sort_key_t *pile[],
+ struct histogram *bins, sort_key_t *first_key,
+ u16 offset, u16 length)
+{
+ sort_key_t *pile_start = first_key;
+ int bin;
+
+ for (bin = bins->first; ; bin++) {
+ u32 size = bins->size[bin];
+
+ /* Skip empty piles. */
+ if (size == 0)
+ continue;
+
+ /* There's no need to sort empty keys. */
+ if (length > 0) {
+ if (size > INSERTION_SORT_THRESHOLD) {
+ if (*stack >= end_of_stack)
+ return UDS_BAD_STATE;
+
+ push_task(stack, pile_start, size, offset, length);
+ } else if (size > 1) {
+ push_task(list, pile_start, size, offset, length);
+ }
+ }
+
+ pile_start += size;
+ pile[bin] = pile_start;
+ if (--bins->used == 0)
+ break;
+ }
+
+ return UDS_SUCCESS;
+}
+
+int uds_make_radix_sorter(unsigned int count, struct radix_sorter **sorter)
+{
+ int result;
+ unsigned int stack_size = count / INSERTION_SORT_THRESHOLD;
+ struct radix_sorter *radix_sorter;
+
+ result = vdo_allocate_extended(struct radix_sorter, stack_size, struct task,
+ __func__, &radix_sorter);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ radix_sorter->count = count;
+ radix_sorter->end_of_stack = radix_sorter->stack + stack_size;
+ *sorter = radix_sorter;
+ return UDS_SUCCESS;
+}
+
+void uds_free_radix_sorter(struct radix_sorter *sorter)
+{
+ vdo_free(sorter);
+}
+
+/*
+ * Sort pointers to fixed-length keys (arrays of bytes) using a radix sort. The sort implementation
+ * is unstable, so the relative ordering of equal keys is not preserved.
+ */
+int uds_radix_sort(struct radix_sorter *sorter, const unsigned char *keys[],
+ unsigned int count, unsigned short length)
+{
+ struct task start;
+ struct histogram *bins = &sorter->bins;
+ sort_key_t **pile = sorter->pile;
+ struct task *task_stack = sorter->stack;
+
+ /* All zero-length keys are identical and therefore already sorted. */
+ if ((count == 0) || (length == 0))
+ return UDS_SUCCESS;
+
+ /* The initial task is to sort the entire length of all the keys. */
+ start = (struct task) {
+ .first_key = keys,
+ .last_key = &keys[count - 1],
+ .offset = 0,
+ .length = length,
+ };
+
+ if (count <= INSERTION_SORT_THRESHOLD) {
+ insertion_sort(start);
+ return UDS_SUCCESS;
+ }
+
+ if (count > sorter->count)
+ return UDS_INVALID_ARGUMENT;
+
+ /*
+ * Repeatedly consume a sorting task from the stack and process it, pushing new sub-tasks
+ * onto the stack for each radix-sorted pile. When all tasks and sub-tasks have been
+ * processed, the stack will be empty and all the keys in the starting task will be fully
+ * sorted.
+ */
+ for (*task_stack = start; task_stack >= sorter->stack; task_stack--) {
+ const struct task task = *task_stack;
+ struct task *insertion_task_list;
+ int result;
+ sort_key_t *fence;
+ sort_key_t *end;
+
+ measure_bins(task, bins);
+
+ /*
+ * Now that we know how large each bin is, generate pointers for each of the piles
+ * and push a new task to sort each pile by the next radix byte.
+ */
+ insertion_task_list = sorter->insertion_list;
+ result = push_bins(&task_stack, sorter->end_of_stack,
+ &insertion_task_list, pile, bins, task.first_key,
+ task.offset + 1, task.length - 1);
+ if (result != UDS_SUCCESS) {
+ memset(bins, 0, sizeof(*bins));
+ return result;
+ }
+
+ /* Now bins->used is zero again. */
+
+ /*
+ * Don't bother processing the last pile: when piles 0..N-1 are all in place, then
+ * pile N must also be in place.
+ */
+ end = task.last_key - bins->size[bins->last];
+ bins->size[bins->last] = 0;
+
+ for (fence = task.first_key; fence <= end; ) {
+ u8 bin;
+ sort_key_t key = *fence;
+
+ /*
+ * The radix byte of the key tells us which pile it belongs in. Swap it for
+ * an unprocessed item just below that pile, and repeat.
+ */
+ while (--pile[bin = key[task.offset]] > fence)
+ swap_keys(pile[bin], &key);
+
+ /*
+ * The pile reached the fence. Put the key at the bottom of that pile,
+ * completing it, and advance the fence to the next pile.
+ */
+ *fence = key;
+ fence += bins->size[bin];
+ bins->size[bin] = 0;
+ }
+
+ /* Now bins->size[] is all zero again. */
+
+ /*
+ * When the number of keys in a task gets small enough, it is faster to use an
+ * insertion sort than to keep subdividing into tiny piles.
+ */
+ while (--insertion_task_list >= sorter->insertion_list)
+ insertion_sort(*insertion_task_list);
+ }
+
+ return UDS_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/indexer/radix-sort.h b/drivers/md/dm-vdo/indexer/radix-sort.h
new file mode 100644
index 000000000000..812949bc2cee
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/radix-sort.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_RADIX_SORT_H
+#define UDS_RADIX_SORT_H
+
+/*
+ * Radix sort is implemented using an American Flag sort, an unstable, in-place 8-bit radix
+ * exchange sort. This is adapted from the algorithm in the paper by Peter M. McIlroy, Keith
+ * Bostic, and M. Douglas McIlroy, "Engineering Radix Sort".
+ *
+ * http://www.usenix.org/publications/compsystems/1993/win_mcilroy.pdf
+ */
+
+struct radix_sorter;
+
+int __must_check uds_make_radix_sorter(unsigned int count, struct radix_sorter **sorter);
+
+void uds_free_radix_sorter(struct radix_sorter *sorter);
+
+int __must_check uds_radix_sort(struct radix_sorter *sorter, const unsigned char *keys[],
+ unsigned int count, unsigned short length);
+
+#endif /* UDS_RADIX_SORT_H */
diff --git a/drivers/md/dm-vdo/indexer/sparse-cache.c b/drivers/md/dm-vdo/indexer/sparse-cache.c
new file mode 100644
index 000000000000..28920167827c
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/sparse-cache.c
@@ -0,0 +1,624 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "sparse-cache.h"
+
+#include <linux/cache.h>
+#include <linux/delay.h>
+#include <linux/dm-bufio.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "chapter-index.h"
+#include "config.h"
+#include "index.h"
+
+/*
+ * Since the cache is small, it is implemented as a simple array of cache entries. Searching for a
+ * specific virtual chapter is implemented as a linear search. The cache replacement policy is
+ * least-recently-used (LRU). Again, the small size of the cache allows the LRU order to be
+ * maintained by shifting entries in an array list.
+ *
+ * Changing the contents of the cache requires the coordinated participation of all zone threads
+ * via the careful use of barrier messages sent to all the index zones by the triage queue worker
+ * thread. The critical invariant for coordination is that the cache membership must not change
+ * between updates, so that all calls to uds_sparse_cache_contains() from the zone threads must all
+ * receive the same results for every virtual chapter number. To ensure that critical invariant,
+ * state changes such as "that virtual chapter is no longer in the volume" and "skip searching that
+ * chapter because it has had too many cache misses" are represented separately from the cache
+ * membership information (the virtual chapter number).
+ *
+ * As a result of this invariant, we have the guarantee that every zone thread will call
+ * uds_update_sparse_cache() once and exactly once to request a chapter that is not in the cache,
+ * and the serialization of the barrier requests from the triage queue ensures they will all
+ * request the same chapter number. This means the only synchronization we need can be provided by
+ * a pair of thread barriers used only in the uds_update_sparse_cache() call, providing a critical
+ * section where a single zone thread can drive the cache update while all the other zone threads
+ * are known to be blocked, waiting in the second barrier. Outside that critical section, all the
+ * zone threads implicitly hold a shared lock. Inside it, the thread for zone zero holds an
+ * exclusive lock. No other threads may access or modify the cache entries.
+ *
+ * Chapter statistics must only be modified by a single thread, which is also the zone zero thread.
+ * All fields that might be frequently updated by that thread are kept in separate cache-aligned
+ * structures so they will not cause cache contention via "false sharing" with the fields that are
+ * frequently accessed by all of the zone threads.
+ *
+ * The LRU order is managed independently by each zone thread, and each zone uses its own list for
+ * searching and cache membership queries. The zone zero list is used to decide which chapter to
+ * evict when the cache is updated, and its search list is copied to the other threads at that
+ * time.
+ *
+ * The virtual chapter number field of the cache entry is the single field indicating whether a
+ * chapter is a member of the cache or not. The value NO_CHAPTER is used to represent a null or
+ * undefined chapter number. When present in the virtual chapter number field of a
+ * cached_chapter_index, it indicates that the cache entry is dead, and all the other fields of
+ * that entry (other than immutable pointers to cache memory) are undefined and irrelevant. Any
+ * cache entry that is not marked as dead is fully defined and a member of the cache, and
+ * uds_sparse_cache_contains() will always return true for any virtual chapter number that appears
+ * in any of the cache entries.
+ *
+ * A chapter index that is a member of the cache may be excluded from searches between calls to
+ * uds_update_sparse_cache() in two different ways. First, when a chapter falls off the end of the
+ * volume, its virtual chapter number will be less that the oldest virtual chapter number. Since
+ * that chapter is no longer part of the volume, there's no point in continuing to search that
+ * chapter index. Once invalidated, that virtual chapter will still be considered a member of the
+ * cache, but it will no longer be searched for matching names.
+ *
+ * The second mechanism is a heuristic based on keeping track of the number of consecutive search
+ * misses in a given chapter index. Once that count exceeds a threshold, the skip_search flag will
+ * be set to true, causing the chapter to be skipped when searching the entire cache, but still
+ * allowing it to be found when searching for a hook in that specific chapter. Finding a hook will
+ * clear the skip_search flag, once again allowing the non-hook searches to use that cache entry.
+ * Again, regardless of the state of the skip_search flag, the virtual chapter must still
+ * considered to be a member of the cache for uds_sparse_cache_contains().
+ */
+
+#define SKIP_SEARCH_THRESHOLD 20000
+#define ZONE_ZERO 0
+
+/*
+ * These counters are essentially fields of the struct cached_chapter_index, but are segregated
+ * into this structure because they are frequently modified. They are grouped and aligned to keep
+ * them on different cache lines from the chapter fields that are accessed far more often than they
+ * are updated.
+ */
+struct __aligned(L1_CACHE_BYTES) cached_index_counters {
+ u64 consecutive_misses;
+};
+
+struct __aligned(L1_CACHE_BYTES) cached_chapter_index {
+ /*
+ * The virtual chapter number of the cached chapter index. NO_CHAPTER means this cache
+ * entry is unused. This field must only be modified in the critical section in
+ * uds_update_sparse_cache().
+ */
+ u64 virtual_chapter;
+
+ u32 index_pages_count;
+
+ /*
+ * These pointers are immutable during the life of the cache. The contents of the arrays
+ * change when the cache entry is replaced.
+ */
+ struct delta_index_page *index_pages;
+ struct dm_buffer **page_buffers;
+
+ /*
+ * If set, skip the chapter when searching the entire cache. This flag is just a
+ * performance optimization. This flag is mutable between cache updates, but it rarely
+ * changes and is frequently accessed, so it groups with the immutable fields.
+ */
+ bool skip_search;
+
+ /*
+ * The cache-aligned counters change often and are placed at the end of the structure to
+ * prevent false sharing with the more stable fields above.
+ */
+ struct cached_index_counters counters;
+};
+
+/*
+ * A search_list represents an ordering of the sparse chapter index cache entry array, from most
+ * recently accessed to least recently accessed, which is the order in which the indexes should be
+ * searched and the reverse order in which they should be evicted from the cache.
+ *
+ * Cache entries that are dead or empty are kept at the end of the list, avoiding the need to even
+ * iterate over them to search, and ensuring that dead entries are replaced before any live entries
+ * are evicted.
+ *
+ * The search list is instantiated for each zone thread, avoiding any need for synchronization. The
+ * structure is allocated on a cache boundary to avoid false sharing of memory cache lines between
+ * zone threads.
+ */
+struct search_list {
+ u8 capacity;
+ u8 first_dead_entry;
+ struct cached_chapter_index *entries[];
+};
+
+struct threads_barrier {
+ /* Lock for this barrier object */
+ struct semaphore lock;
+ /* Semaphore for threads waiting at this barrier */
+ struct semaphore wait;
+ /* Number of threads which have arrived */
+ int arrived;
+ /* Total number of threads using this barrier */
+ int thread_count;
+};
+
+struct sparse_cache {
+ const struct index_geometry *geometry;
+ unsigned int capacity;
+ unsigned int zone_count;
+
+ unsigned int skip_threshold;
+ struct search_list *search_lists[MAX_ZONES];
+ struct cached_chapter_index **scratch_entries;
+
+ struct threads_barrier begin_update_barrier;
+ struct threads_barrier end_update_barrier;
+
+ struct cached_chapter_index chapters[];
+};
+
+static void initialize_threads_barrier(struct threads_barrier *barrier,
+ unsigned int thread_count)
+{
+ sema_init(&barrier->lock, 1);
+ barrier->arrived = 0;
+ barrier->thread_count = thread_count;
+ sema_init(&barrier->wait, 0);
+}
+
+static inline void __down(struct semaphore *semaphore)
+{
+ /*
+ * Do not use down(semaphore). Instead use down_interruptible so that
+ * we do not get 120 second stall messages in kern.log.
+ */
+ while (down_interruptible(semaphore) != 0) {
+ /*
+ * If we're called from a user-mode process (e.g., "dmsetup
+ * remove") while waiting for an operation that may take a
+ * while (e.g., UDS index save), and a signal is sent (SIGINT,
+ * SIGUSR2), then down_interruptible will not block. If that
+ * happens, sleep briefly to avoid keeping the CPU locked up in
+ * this loop. We could just call cond_resched, but then we'd
+ * still keep consuming CPU time slices and swamp other threads
+ * trying to do computational work.
+ */
+ fsleep(1000);
+ }
+}
+
+static void enter_threads_barrier(struct threads_barrier *barrier)
+{
+ __down(&barrier->lock);
+ if (++barrier->arrived == barrier->thread_count) {
+ /* last thread */
+ int i;
+
+ for (i = 1; i < barrier->thread_count; i++)
+ up(&barrier->wait);
+
+ barrier->arrived = 0;
+ up(&barrier->lock);
+ } else {
+ up(&barrier->lock);
+ __down(&barrier->wait);
+ }
+}
+
+static int __must_check initialize_cached_chapter_index(struct cached_chapter_index *chapter,
+ const struct index_geometry *geometry)
+{
+ int result;
+
+ chapter->virtual_chapter = NO_CHAPTER;
+ chapter->index_pages_count = geometry->index_pages_per_chapter;
+
+ result = vdo_allocate(chapter->index_pages_count, struct delta_index_page,
+ __func__, &chapter->index_pages);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return vdo_allocate(chapter->index_pages_count, struct dm_buffer *,
+ "sparse index volume pages", &chapter->page_buffers);
+}
+
+static int __must_check make_search_list(struct sparse_cache *cache,
+ struct search_list **list_ptr)
+{
+ struct search_list *list;
+ unsigned int bytes;
+ u8 i;
+ int result;
+
+ bytes = (sizeof(struct search_list) +
+ (cache->capacity * sizeof(struct cached_chapter_index *)));
+ result = vdo_allocate_cache_aligned(bytes, "search list", &list);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ list->capacity = cache->capacity;
+ list->first_dead_entry = 0;
+
+ for (i = 0; i < list->capacity; i++)
+ list->entries[i] = &cache->chapters[i];
+
+ *list_ptr = list;
+ return UDS_SUCCESS;
+}
+
+int uds_make_sparse_cache(const struct index_geometry *geometry, unsigned int capacity,
+ unsigned int zone_count, struct sparse_cache **cache_ptr)
+{
+ int result;
+ unsigned int i;
+ struct sparse_cache *cache;
+ unsigned int bytes;
+
+ bytes = (sizeof(struct sparse_cache) + (capacity * sizeof(struct cached_chapter_index)));
+ result = vdo_allocate_cache_aligned(bytes, "sparse cache", &cache);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ cache->geometry = geometry;
+ cache->capacity = capacity;
+ cache->zone_count = zone_count;
+
+ /*
+ * Scale down the skip threshold since the cache only counts cache misses in zone zero, but
+ * requests are being handled in all zones.
+ */
+ cache->skip_threshold = (SKIP_SEARCH_THRESHOLD / zone_count);
+
+ initialize_threads_barrier(&cache->begin_update_barrier, zone_count);
+ initialize_threads_barrier(&cache->end_update_barrier, zone_count);
+
+ for (i = 0; i < capacity; i++) {
+ result = initialize_cached_chapter_index(&cache->chapters[i], geometry);
+ if (result != UDS_SUCCESS)
+ goto out;
+ }
+
+ for (i = 0; i < zone_count; i++) {
+ result = make_search_list(cache, &cache->search_lists[i]);
+ if (result != UDS_SUCCESS)
+ goto out;
+ }
+
+ /* purge_search_list() needs some temporary lists for sorting. */
+ result = vdo_allocate(capacity * 2, struct cached_chapter_index *,
+ "scratch entries", &cache->scratch_entries);
+ if (result != VDO_SUCCESS)
+ goto out;
+
+ *cache_ptr = cache;
+ return UDS_SUCCESS;
+out:
+ uds_free_sparse_cache(cache);
+ return result;
+}
+
+static inline void set_skip_search(struct cached_chapter_index *chapter,
+ bool skip_search)
+{
+ /* Check before setting to reduce cache line contention. */
+ if (READ_ONCE(chapter->skip_search) != skip_search)
+ WRITE_ONCE(chapter->skip_search, skip_search);
+}
+
+static void score_search_hit(struct cached_chapter_index *chapter)
+{
+ chapter->counters.consecutive_misses = 0;
+ set_skip_search(chapter, false);
+}
+
+static void score_search_miss(struct sparse_cache *cache,
+ struct cached_chapter_index *chapter)
+{
+ chapter->counters.consecutive_misses++;
+ if (chapter->counters.consecutive_misses > cache->skip_threshold)
+ set_skip_search(chapter, true);
+}
+
+static void release_cached_chapter_index(struct cached_chapter_index *chapter)
+{
+ unsigned int i;
+
+ chapter->virtual_chapter = NO_CHAPTER;
+ if (chapter->page_buffers == NULL)
+ return;
+
+ for (i = 0; i < chapter->index_pages_count; i++) {
+ if (chapter->page_buffers[i] != NULL)
+ dm_bufio_release(vdo_forget(chapter->page_buffers[i]));
+ }
+}
+
+void uds_free_sparse_cache(struct sparse_cache *cache)
+{
+ unsigned int i;
+
+ if (cache == NULL)
+ return;
+
+ vdo_free(cache->scratch_entries);
+
+ for (i = 0; i < cache->zone_count; i++)
+ vdo_free(cache->search_lists[i]);
+
+ for (i = 0; i < cache->capacity; i++) {
+ release_cached_chapter_index(&cache->chapters[i]);
+ vdo_free(cache->chapters[i].index_pages);
+ vdo_free(cache->chapters[i].page_buffers);
+ }
+
+ vdo_free(cache);
+}
+
+/*
+ * Take the indicated element of the search list and move it to the start, pushing the pointers
+ * previously before it back down the list.
+ */
+static inline void set_newest_entry(struct search_list *search_list, u8 index)
+{
+ struct cached_chapter_index *newest;
+
+ if (index > 0) {
+ newest = search_list->entries[index];
+ memmove(&search_list->entries[1], &search_list->entries[0],
+ index * sizeof(struct cached_chapter_index *));
+ search_list->entries[0] = newest;
+ }
+
+ /*
+ * This function may have moved a dead chapter to the front of the list for reuse, in which
+ * case the set of dead chapters becomes smaller.
+ */
+ if (search_list->first_dead_entry <= index)
+ search_list->first_dead_entry++;
+}
+
+bool uds_sparse_cache_contains(struct sparse_cache *cache, u64 virtual_chapter,
+ unsigned int zone_number)
+{
+ struct search_list *search_list;
+ struct cached_chapter_index *chapter;
+ u8 i;
+
+ /*
+ * The correctness of the barriers depends on the invariant that between calls to
+ * uds_update_sparse_cache(), the answers this function returns must never vary: the result
+ * for a given chapter must be identical across zones. That invariant must be maintained
+ * even if the chapter falls off the end of the volume, or if searching it is disabled
+ * because of too many search misses.
+ */
+ search_list = cache->search_lists[zone_number];
+ for (i = 0; i < search_list->first_dead_entry; i++) {
+ chapter = search_list->entries[i];
+
+ if (virtual_chapter == chapter->virtual_chapter) {
+ if (zone_number == ZONE_ZERO)
+ score_search_hit(chapter);
+
+ set_newest_entry(search_list, i);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * Re-sort cache entries into three sets (active, skippable, and dead) while maintaining the LRU
+ * ordering that already existed. This operation must only be called during the critical section in
+ * uds_update_sparse_cache().
+ */
+static void purge_search_list(struct search_list *search_list,
+ struct sparse_cache *cache, u64 oldest_virtual_chapter)
+{
+ struct cached_chapter_index **entries;
+ struct cached_chapter_index **skipped;
+ struct cached_chapter_index **dead;
+ struct cached_chapter_index *chapter;
+ unsigned int next_alive = 0;
+ unsigned int next_skipped = 0;
+ unsigned int next_dead = 0;
+ unsigned int i;
+
+ entries = &search_list->entries[0];
+ skipped = &cache->scratch_entries[0];
+ dead = &cache->scratch_entries[search_list->capacity];
+
+ for (i = 0; i < search_list->first_dead_entry; i++) {
+ chapter = search_list->entries[i];
+ if ((chapter->virtual_chapter < oldest_virtual_chapter) ||
+ (chapter->virtual_chapter == NO_CHAPTER))
+ dead[next_dead++] = chapter;
+ else if (chapter->skip_search)
+ skipped[next_skipped++] = chapter;
+ else
+ entries[next_alive++] = chapter;
+ }
+
+ memcpy(&entries[next_alive], skipped,
+ next_skipped * sizeof(struct cached_chapter_index *));
+ memcpy(&entries[next_alive + next_skipped], dead,
+ next_dead * sizeof(struct cached_chapter_index *));
+ search_list->first_dead_entry = next_alive + next_skipped;
+}
+
+static int __must_check cache_chapter_index(struct cached_chapter_index *chapter,
+ u64 virtual_chapter,
+ const struct volume *volume)
+{
+ int result;
+
+ release_cached_chapter_index(chapter);
+
+ result = uds_read_chapter_index_from_volume(volume, virtual_chapter,
+ chapter->page_buffers,
+ chapter->index_pages);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ chapter->counters.consecutive_misses = 0;
+ chapter->virtual_chapter = virtual_chapter;
+ chapter->skip_search = false;
+
+ return UDS_SUCCESS;
+}
+
+static inline void copy_search_list(const struct search_list *source,
+ struct search_list *target)
+{
+ *target = *source;
+ memcpy(target->entries, source->entries,
+ source->capacity * sizeof(struct cached_chapter_index *));
+}
+
+/*
+ * Update the sparse cache to contain a chapter index. This function must be called by all the zone
+ * threads with the same chapter number to correctly enter the thread barriers used to synchronize
+ * the cache updates.
+ */
+int uds_update_sparse_cache(struct index_zone *zone, u64 virtual_chapter)
+{
+ int result = UDS_SUCCESS;
+ const struct uds_index *index = zone->index;
+ struct sparse_cache *cache = index->volume->sparse_cache;
+
+ if (uds_sparse_cache_contains(cache, virtual_chapter, zone->id))
+ return UDS_SUCCESS;
+
+ /*
+ * Wait for every zone thread to reach its corresponding barrier request and invoke this
+ * function before starting to modify the cache.
+ */
+ enter_threads_barrier(&cache->begin_update_barrier);
+
+ /*
+ * This is the start of the critical section: the zone zero thread is captain, effectively
+ * holding an exclusive lock on the sparse cache. All the other zone threads must do
+ * nothing between the two barriers. They will wait at the end_update_barrier again for the
+ * captain to finish the update.
+ */
+
+ if (zone->id == ZONE_ZERO) {
+ unsigned int z;
+ struct search_list *list = cache->search_lists[ZONE_ZERO];
+
+ purge_search_list(list, cache, zone->oldest_virtual_chapter);
+
+ if (virtual_chapter >= index->oldest_virtual_chapter) {
+ set_newest_entry(list, list->capacity - 1);
+ result = cache_chapter_index(list->entries[0], virtual_chapter,
+ index->volume);
+ }
+
+ for (z = 1; z < cache->zone_count; z++)
+ copy_search_list(list, cache->search_lists[z]);
+ }
+
+ /*
+ * This is the end of the critical section. All cache invariants must have been restored.
+ */
+ enter_threads_barrier(&cache->end_update_barrier);
+ return result;
+}
+
+void uds_invalidate_sparse_cache(struct sparse_cache *cache)
+{
+ unsigned int i;
+
+ for (i = 0; i < cache->capacity; i++)
+ release_cached_chapter_index(&cache->chapters[i]);
+}
+
+static inline bool should_skip_chapter(struct cached_chapter_index *chapter,
+ u64 oldest_chapter, u64 requested_chapter)
+{
+ if ((chapter->virtual_chapter == NO_CHAPTER) ||
+ (chapter->virtual_chapter < oldest_chapter))
+ return true;
+
+ if (requested_chapter != NO_CHAPTER)
+ return requested_chapter != chapter->virtual_chapter;
+ else
+ return READ_ONCE(chapter->skip_search);
+}
+
+static int __must_check search_cached_chapter_index(struct cached_chapter_index *chapter,
+ const struct index_geometry *geometry,
+ const struct index_page_map *index_page_map,
+ const struct uds_record_name *name,
+ u16 *record_page_ptr)
+{
+ u32 physical_chapter =
+ uds_map_to_physical_chapter(geometry, chapter->virtual_chapter);
+ u32 index_page_number =
+ uds_find_index_page_number(index_page_map, name, physical_chapter);
+ struct delta_index_page *index_page =
+ &chapter->index_pages[index_page_number];
+
+ return uds_search_chapter_index_page(index_page, geometry, name,
+ record_page_ptr);
+}
+
+int uds_search_sparse_cache(struct index_zone *zone, const struct uds_record_name *name,
+ u64 *virtual_chapter_ptr, u16 *record_page_ptr)
+{
+ int result;
+ struct volume *volume = zone->index->volume;
+ struct sparse_cache *cache = volume->sparse_cache;
+ struct cached_chapter_index *chapter;
+ struct search_list *search_list;
+ u8 i;
+ /* Search the entire cache unless a specific chapter was requested. */
+ bool search_one = (*virtual_chapter_ptr != NO_CHAPTER);
+
+ *record_page_ptr = NO_CHAPTER_INDEX_ENTRY;
+ search_list = cache->search_lists[zone->id];
+ for (i = 0; i < search_list->first_dead_entry; i++) {
+ chapter = search_list->entries[i];
+
+ if (should_skip_chapter(chapter, zone->oldest_virtual_chapter,
+ *virtual_chapter_ptr))
+ continue;
+
+ result = search_cached_chapter_index(chapter, cache->geometry,
+ volume->index_page_map, name,
+ record_page_ptr);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (*record_page_ptr != NO_CHAPTER_INDEX_ENTRY) {
+ /*
+ * In theory, this might be a false match while a true match exists in
+ * another chapter, but that's a very rare case and not worth the extra
+ * search complexity.
+ */
+ set_newest_entry(search_list, i);
+ if (zone->id == ZONE_ZERO)
+ score_search_hit(chapter);
+
+ *virtual_chapter_ptr = chapter->virtual_chapter;
+ return UDS_SUCCESS;
+ }
+
+ if (zone->id == ZONE_ZERO)
+ score_search_miss(cache, chapter);
+
+ if (search_one)
+ break;
+ }
+
+ return UDS_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/indexer/sparse-cache.h b/drivers/md/dm-vdo/indexer/sparse-cache.h
new file mode 100644
index 000000000000..45e2dcf165b5
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/sparse-cache.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_SPARSE_CACHE_H
+#define UDS_SPARSE_CACHE_H
+
+#include "geometry.h"
+#include "indexer.h"
+
+/*
+ * The sparse cache is a cache of entire chapter indexes from sparse chapters used for searching
+ * for names after all other search paths have failed. It contains only complete chapter indexes;
+ * record pages from sparse chapters and single index pages used for resolving hooks are kept in
+ * the regular page cache in the volume.
+ *
+ * The most important property of this cache is the absence of synchronization for read operations.
+ * Safe concurrent access to the cache by the zone threads is controlled by the triage queue and
+ * the barrier requests it issues to the zone queues. The set of cached chapters does not and must
+ * not change between the carefully coordinated calls to uds_update_sparse_cache() from the zone
+ * threads. Outside of updates, every zone will get the same result when calling
+ * uds_sparse_cache_contains() as every other zone.
+ */
+
+struct index_zone;
+struct sparse_cache;
+
+int __must_check uds_make_sparse_cache(const struct index_geometry *geometry,
+ unsigned int capacity, unsigned int zone_count,
+ struct sparse_cache **cache_ptr);
+
+void uds_free_sparse_cache(struct sparse_cache *cache);
+
+bool uds_sparse_cache_contains(struct sparse_cache *cache, u64 virtual_chapter,
+ unsigned int zone_number);
+
+int __must_check uds_update_sparse_cache(struct index_zone *zone, u64 virtual_chapter);
+
+void uds_invalidate_sparse_cache(struct sparse_cache *cache);
+
+int __must_check uds_search_sparse_cache(struct index_zone *zone,
+ const struct uds_record_name *name,
+ u64 *virtual_chapter_ptr, u16 *record_page_ptr);
+
+#endif /* UDS_SPARSE_CACHE_H */
diff --git a/drivers/md/dm-vdo/indexer/volume-index.c b/drivers/md/dm-vdo/indexer/volume-index.c
new file mode 100644
index 000000000000..12f954a0c532
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/volume-index.c
@@ -0,0 +1,1283 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+#include "volume-index.h"
+
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/cache.h>
+#include <linux/compiler.h>
+#include <linux/log2.h>
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+#include "thread-utils.h"
+
+#include "config.h"
+#include "geometry.h"
+#include "hash-utils.h"
+#include "indexer.h"
+
+/*
+ * The volume index is a combination of two separate subindexes, one containing sparse hook entries
+ * (retained for all chapters), and one containing the remaining entries (retained only for the
+ * dense chapters). If there are no sparse chapters, only the non-hook sub index is used, and it
+ * will contain all records for all chapters.
+ *
+ * The volume index is also divided into zones, with one thread operating on each zone. Each
+ * incoming request is dispatched to the appropriate thread, and then to the appropriate subindex.
+ * Each delta list is handled by a single zone. To ensure that the distribution of delta lists to
+ * zones doesn't underflow (leaving some zone with no delta lists), the minimum number of delta
+ * lists must be the square of the maximum zone count for both subindexes.
+ *
+ * Each subindex zone is a delta index where the payload is a chapter number. The volume index can
+ * compute the delta list number, address, and zone number from the record name in order to
+ * dispatch record handling to the correct structures.
+ *
+ * Most operations that use all the zones take place either before request processing is allowed,
+ * or after all requests have been flushed in order to shut down. The only multi-threaded operation
+ * supported during normal operation is the uds_lookup_volume_index_name() method, used to determine
+ * whether a new chapter should be loaded into the sparse index cache. This operation only uses the
+ * sparse hook subindex, and the zone mutexes are used to make this operation safe.
+ *
+ * There are three ways of expressing chapter numbers in the volume index: virtual, index, and
+ * rolling. The interface to the volume index uses virtual chapter numbers, which are 64 bits long.
+ * Internally the subindex stores only the minimal number of bits necessary by masking away the
+ * high-order bits. When the index needs to deal with ordering of index chapter numbers, as when
+ * flushing entries from older chapters, it rolls the index chapter number around so that the
+ * smallest one in use is mapped to 0. See convert_index_to_virtual() or flush_invalid_entries()
+ * for an example of this technique.
+ *
+ * For efficiency, when older chapter numbers become invalid, the index does not immediately remove
+ * the invalidated entries. Instead it lazily removes them from a given delta list the next time it
+ * walks that list during normal operation. Because of this, the index size must be increased
+ * somewhat to accommodate all the invalid entries that have not yet been removed. For the standard
+ * index sizes, this requires about 4 chapters of old entries per 1024 chapters of valid entries in
+ * the index.
+ */
+
+struct sub_index_parameters {
+ /* The number of bits in address mask */
+ u8 address_bits;
+ /* The number of bits in chapter number */
+ u8 chapter_bits;
+ /* The mean delta */
+ u32 mean_delta;
+ /* The number of delta lists */
+ u64 list_count;
+ /* The number of chapters used */
+ u32 chapter_count;
+ /* The number of bits per chapter */
+ size_t chapter_size_in_bits;
+ /* The number of bytes of delta list memory */
+ size_t memory_size;
+ /* The number of bytes the index should keep free at all times */
+ size_t target_free_bytes;
+};
+
+struct split_config {
+ /* The hook subindex configuration */
+ struct uds_configuration hook_config;
+ struct index_geometry hook_geometry;
+
+ /* The non-hook subindex configuration */
+ struct uds_configuration non_hook_config;
+ struct index_geometry non_hook_geometry;
+};
+
+struct chapter_range {
+ u32 chapter_start;
+ u32 chapter_count;
+};
+
+#define MAGIC_SIZE 8
+
+static const char MAGIC_START_5[] = "MI5-0005";
+
+struct sub_index_data {
+ char magic[MAGIC_SIZE]; /* MAGIC_START_5 */
+ u64 volume_nonce;
+ u64 virtual_chapter_low;
+ u64 virtual_chapter_high;
+ u32 first_list;
+ u32 list_count;
+};
+
+static const char MAGIC_START_6[] = "MI6-0001";
+
+struct volume_index_data {
+ char magic[MAGIC_SIZE]; /* MAGIC_START_6 */
+ u32 sparse_sample_rate;
+};
+
+static inline u32 extract_address(const struct volume_sub_index *sub_index,
+ const struct uds_record_name *name)
+{
+ return uds_extract_volume_index_bytes(name) & sub_index->address_mask;
+}
+
+static inline u32 extract_dlist_num(const struct volume_sub_index *sub_index,
+ const struct uds_record_name *name)
+{
+ u64 bits = uds_extract_volume_index_bytes(name);
+
+ return (bits >> sub_index->address_bits) % sub_index->list_count;
+}
+
+static inline const struct volume_sub_index_zone *
+get_zone_for_record(const struct volume_index_record *record)
+{
+ return &record->sub_index->zones[record->zone_number];
+}
+
+static inline u64 convert_index_to_virtual(const struct volume_index_record *record,
+ u32 index_chapter)
+{
+ const struct volume_sub_index_zone *volume_index_zone = get_zone_for_record(record);
+ u32 rolling_chapter = ((index_chapter - volume_index_zone->virtual_chapter_low) &
+ record->sub_index->chapter_mask);
+
+ return volume_index_zone->virtual_chapter_low + rolling_chapter;
+}
+
+static inline u32 convert_virtual_to_index(const struct volume_sub_index *sub_index,
+ u64 virtual_chapter)
+{
+ return virtual_chapter & sub_index->chapter_mask;
+}
+
+static inline bool is_virtual_chapter_indexed(const struct volume_index_record *record,
+ u64 virtual_chapter)
+{
+ const struct volume_sub_index_zone *volume_index_zone = get_zone_for_record(record);
+
+ return ((virtual_chapter >= volume_index_zone->virtual_chapter_low) &&
+ (virtual_chapter <= volume_index_zone->virtual_chapter_high));
+}
+
+static inline bool has_sparse(const struct volume_index *volume_index)
+{
+ return volume_index->sparse_sample_rate > 0;
+}
+
+bool uds_is_volume_index_sample(const struct volume_index *volume_index,
+ const struct uds_record_name *name)
+{
+ if (!has_sparse(volume_index))
+ return false;
+
+ return (uds_extract_sampling_bytes(name) % volume_index->sparse_sample_rate) == 0;
+}
+
+static inline const struct volume_sub_index *
+get_volume_sub_index(const struct volume_index *volume_index,
+ const struct uds_record_name *name)
+{
+ return (uds_is_volume_index_sample(volume_index, name) ?
+ &volume_index->vi_hook :
+ &volume_index->vi_non_hook);
+}
+
+static unsigned int get_volume_sub_index_zone(const struct volume_sub_index *sub_index,
+ const struct uds_record_name *name)
+{
+ return extract_dlist_num(sub_index, name) / sub_index->delta_index.lists_per_zone;
+}
+
+unsigned int uds_get_volume_index_zone(const struct volume_index *volume_index,
+ const struct uds_record_name *name)
+{
+ return get_volume_sub_index_zone(get_volume_sub_index(volume_index, name), name);
+}
+
+#define DELTA_LIST_SIZE 256
+
+static int compute_volume_sub_index_parameters(const struct uds_configuration *config,
+ struct sub_index_parameters *params)
+{
+ u64 entries_in_volume_index, address_span;
+ u32 chapters_in_volume_index, invalid_chapters;
+ u32 rounded_chapters;
+ u64 delta_list_records;
+ u32 address_count;
+ u64 index_size_in_bits;
+ size_t expected_index_size;
+ u64 min_delta_lists = MAX_ZONES * MAX_ZONES;
+ struct index_geometry *geometry = config->geometry;
+ u64 records_per_chapter = geometry->records_per_chapter;
+
+ params->chapter_count = geometry->chapters_per_volume;
+ /*
+ * Make sure that the number of delta list records in the volume index does not change when
+ * the volume is reduced by one chapter. This preserves the mapping from name to volume
+ * index delta list.
+ */
+ rounded_chapters = params->chapter_count;
+ if (uds_is_reduced_index_geometry(geometry))
+ rounded_chapters += 1;
+ delta_list_records = records_per_chapter * rounded_chapters;
+ address_count = config->volume_index_mean_delta * DELTA_LIST_SIZE;
+ params->list_count = max(delta_list_records / DELTA_LIST_SIZE, min_delta_lists);
+ params->address_bits = bits_per(address_count - 1);
+ params->chapter_bits = bits_per(rounded_chapters - 1);
+ if ((u32) params->list_count != params->list_count) {
+ return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
+ "cannot initialize volume index with %llu delta lists",
+ (unsigned long long) params->list_count);
+ }
+
+ if (params->address_bits > 31) {
+ return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
+ "cannot initialize volume index with %u address bits",
+ params->address_bits);
+ }
+
+ /*
+ * The probability that a given delta list is not touched during the writing of an entire
+ * chapter is:
+ *
+ * double p_not_touched = pow((double) (params->list_count - 1) / params->list_count,
+ * records_per_chapter);
+ *
+ * For the standard index sizes, about 78% of the delta lists are not touched, and
+ * therefore contain old index entries that have not been eliminated by the lazy LRU
+ * processing. Then the number of old index entries that accumulate over the entire index,
+ * in terms of full chapters worth of entries, is:
+ *
+ * double invalid_chapters = p_not_touched / (1.0 - p_not_touched);
+ *
+ * For the standard index sizes, the index needs about 3.5 chapters of space for the old
+ * entries in a 1024 chapter index, so round this up to use 4 chapters per 1024 chapters in
+ * the index.
+ */
+ invalid_chapters = max(rounded_chapters / 256, 2U);
+ chapters_in_volume_index = rounded_chapters + invalid_chapters;
+ entries_in_volume_index = records_per_chapter * chapters_in_volume_index;
+
+ address_span = params->list_count << params->address_bits;
+ params->mean_delta = address_span / entries_in_volume_index;
+
+ /*
+ * Compute the expected size of a full index, then set the total memory to be 6% larger
+ * than that expected size. This number should be large enough that there are not many
+ * rebalances when the index is full.
+ */
+ params->chapter_size_in_bits = uds_compute_delta_index_size(records_per_chapter,
+ params->mean_delta,
+ params->chapter_bits);
+ index_size_in_bits = params->chapter_size_in_bits * chapters_in_volume_index;
+ expected_index_size = index_size_in_bits / BITS_PER_BYTE;
+ params->memory_size = expected_index_size * 106 / 100;
+
+ params->target_free_bytes = expected_index_size / 20;
+ return UDS_SUCCESS;
+}
+
+static void uninitialize_volume_sub_index(struct volume_sub_index *sub_index)
+{
+ vdo_free(vdo_forget(sub_index->flush_chapters));
+ vdo_free(vdo_forget(sub_index->zones));
+ uds_uninitialize_delta_index(&sub_index->delta_index);
+}
+
+void uds_free_volume_index(struct volume_index *volume_index)
+{
+ if (volume_index == NULL)
+ return;
+
+ if (volume_index->zones != NULL)
+ vdo_free(vdo_forget(volume_index->zones));
+
+ uninitialize_volume_sub_index(&volume_index->vi_non_hook);
+ uninitialize_volume_sub_index(&volume_index->vi_hook);
+ vdo_free(volume_index);
+}
+
+
+static int compute_volume_sub_index_save_bytes(const struct uds_configuration *config,
+ size_t *bytes)
+{
+ struct sub_index_parameters params = { .address_bits = 0 };
+ int result;
+
+ result = compute_volume_sub_index_parameters(config, &params);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ *bytes = (sizeof(struct sub_index_data) + params.list_count * sizeof(u64) +
+ uds_compute_delta_index_save_bytes(params.list_count,
+ params.memory_size));
+ return UDS_SUCCESS;
+}
+
+/* This function is only useful if the configuration includes sparse chapters. */
+static void split_configuration(const struct uds_configuration *config,
+ struct split_config *split)
+{
+ u64 sample_rate, sample_records;
+ u64 dense_chapters, sparse_chapters;
+
+ /* Start with copies of the base configuration. */
+ split->hook_config = *config;
+ split->hook_geometry = *config->geometry;
+ split->hook_config.geometry = &split->hook_geometry;
+ split->non_hook_config = *config;
+ split->non_hook_geometry = *config->geometry;
+ split->non_hook_config.geometry = &split->non_hook_geometry;
+
+ sample_rate = config->sparse_sample_rate;
+ sparse_chapters = config->geometry->sparse_chapters_per_volume;
+ dense_chapters = config->geometry->chapters_per_volume - sparse_chapters;
+ sample_records = config->geometry->records_per_chapter / sample_rate;
+
+ /* Adjust the number of records indexed for each chapter. */
+ split->hook_geometry.records_per_chapter = sample_records;
+ split->non_hook_geometry.records_per_chapter -= sample_records;
+
+ /* Adjust the number of chapters indexed. */
+ split->hook_geometry.sparse_chapters_per_volume = 0;
+ split->non_hook_geometry.sparse_chapters_per_volume = 0;
+ split->non_hook_geometry.chapters_per_volume = dense_chapters;
+}
+
+static int compute_volume_index_save_bytes(const struct uds_configuration *config,
+ size_t *bytes)
+{
+ size_t hook_bytes, non_hook_bytes;
+ struct split_config split;
+ int result;
+
+ if (!uds_is_sparse_index_geometry(config->geometry))
+ return compute_volume_sub_index_save_bytes(config, bytes);
+
+ split_configuration(config, &split);
+ result = compute_volume_sub_index_save_bytes(&split.hook_config, &hook_bytes);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = compute_volume_sub_index_save_bytes(&split.non_hook_config,
+ &non_hook_bytes);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ *bytes = sizeof(struct volume_index_data) + hook_bytes + non_hook_bytes;
+ return UDS_SUCCESS;
+}
+
+int uds_compute_volume_index_save_blocks(const struct uds_configuration *config,
+ size_t block_size, u64 *block_count)
+{
+ size_t bytes;
+ int result;
+
+ result = compute_volume_index_save_bytes(config, &bytes);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ bytes += sizeof(struct delta_list_save_info);
+ *block_count = DIV_ROUND_UP(bytes, block_size) + MAX_ZONES;
+ return UDS_SUCCESS;
+}
+
+/* Flush invalid entries while walking the delta list. */
+static inline int flush_invalid_entries(struct volume_index_record *record,
+ struct chapter_range *flush_range,
+ u32 *next_chapter_to_invalidate)
+{
+ int result;
+
+ result = uds_next_delta_index_entry(&record->delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ while (!record->delta_entry.at_end) {
+ u32 index_chapter = uds_get_delta_entry_value(&record->delta_entry);
+ u32 relative_chapter = ((index_chapter - flush_range->chapter_start) &
+ record->sub_index->chapter_mask);
+
+ if (likely(relative_chapter >= flush_range->chapter_count)) {
+ if (relative_chapter < *next_chapter_to_invalidate)
+ *next_chapter_to_invalidate = relative_chapter;
+ break;
+ }
+
+ result = uds_remove_delta_index_entry(&record->delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ return UDS_SUCCESS;
+}
+
+/* Find the matching record, or the list offset where the record would go. */
+static int get_volume_index_entry(struct volume_index_record *record, u32 list_number,
+ u32 key, struct chapter_range *flush_range)
+{
+ struct volume_index_record other_record;
+ const struct volume_sub_index *sub_index = record->sub_index;
+ u32 next_chapter_to_invalidate = sub_index->chapter_mask;
+ int result;
+
+ result = uds_start_delta_index_search(&sub_index->delta_index, list_number, 0,
+ &record->delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ do {
+ result = flush_invalid_entries(record, flush_range,
+ &next_chapter_to_invalidate);
+ if (result != UDS_SUCCESS)
+ return result;
+ } while (!record->delta_entry.at_end && (key > record->delta_entry.key));
+
+ result = uds_remember_delta_index_offset(&record->delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ /* Check any collision records for a more precise match. */
+ other_record = *record;
+ if (!other_record.delta_entry.at_end && (key == other_record.delta_entry.key)) {
+ for (;;) {
+ u8 collision_name[UDS_RECORD_NAME_SIZE];
+
+ result = flush_invalid_entries(&other_record, flush_range,
+ &next_chapter_to_invalidate);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (other_record.delta_entry.at_end ||
+ !other_record.delta_entry.is_collision)
+ break;
+
+ result = uds_get_delta_entry_collision(&other_record.delta_entry,
+ collision_name);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (memcmp(collision_name, record->name, UDS_RECORD_NAME_SIZE) == 0) {
+ *record = other_record;
+ break;
+ }
+ }
+ }
+ while (!other_record.delta_entry.at_end) {
+ result = flush_invalid_entries(&other_record, flush_range,
+ &next_chapter_to_invalidate);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+ next_chapter_to_invalidate += flush_range->chapter_start;
+ next_chapter_to_invalidate &= sub_index->chapter_mask;
+ flush_range->chapter_start = next_chapter_to_invalidate;
+ flush_range->chapter_count = 0;
+ return UDS_SUCCESS;
+}
+
+static int get_volume_sub_index_record(struct volume_sub_index *sub_index,
+ const struct uds_record_name *name,
+ struct volume_index_record *record)
+{
+ int result;
+ const struct volume_sub_index_zone *volume_index_zone;
+ u32 address = extract_address(sub_index, name);
+ u32 delta_list_number = extract_dlist_num(sub_index, name);
+ u64 flush_chapter = sub_index->flush_chapters[delta_list_number];
+
+ record->sub_index = sub_index;
+ record->mutex = NULL;
+ record->name = name;
+ record->zone_number = delta_list_number / sub_index->delta_index.lists_per_zone;
+ volume_index_zone = get_zone_for_record(record);
+
+ if (flush_chapter < volume_index_zone->virtual_chapter_low) {
+ struct chapter_range range;
+ u64 flush_count = volume_index_zone->virtual_chapter_low - flush_chapter;
+
+ range.chapter_start = convert_virtual_to_index(sub_index, flush_chapter);
+ range.chapter_count = (flush_count > sub_index->chapter_mask ?
+ sub_index->chapter_mask + 1 :
+ flush_count);
+ result = get_volume_index_entry(record, delta_list_number, address,
+ &range);
+ flush_chapter = convert_index_to_virtual(record, range.chapter_start);
+ if (flush_chapter > volume_index_zone->virtual_chapter_high)
+ flush_chapter = volume_index_zone->virtual_chapter_high;
+ sub_index->flush_chapters[delta_list_number] = flush_chapter;
+ } else {
+ result = uds_get_delta_index_entry(&sub_index->delta_index,
+ delta_list_number, address,
+ name->name, &record->delta_entry);
+ }
+
+ if (result != UDS_SUCCESS)
+ return result;
+
+ record->is_found =
+ (!record->delta_entry.at_end && (record->delta_entry.key == address));
+ if (record->is_found) {
+ u32 index_chapter = uds_get_delta_entry_value(&record->delta_entry);
+
+ record->virtual_chapter = convert_index_to_virtual(record, index_chapter);
+ }
+
+ record->is_collision = record->delta_entry.is_collision;
+ return UDS_SUCCESS;
+}
+
+int uds_get_volume_index_record(struct volume_index *volume_index,
+ const struct uds_record_name *name,
+ struct volume_index_record *record)
+{
+ int result;
+
+ if (uds_is_volume_index_sample(volume_index, name)) {
+ /*
+ * Other threads cannot be allowed to call uds_lookup_volume_index_name() while
+ * this thread is finding the volume index record. Due to the lazy LRU flushing of
+ * the volume index, uds_get_volume_index_record() is not a read-only operation.
+ */
+ unsigned int zone =
+ get_volume_sub_index_zone(&volume_index->vi_hook, name);
+ struct mutex *mutex = &volume_index->zones[zone].hook_mutex;
+
+ mutex_lock(mutex);
+ result = get_volume_sub_index_record(&volume_index->vi_hook, name,
+ record);
+ mutex_unlock(mutex);
+ /* Remember the mutex so that other operations on the index record can use it. */
+ record->mutex = mutex;
+ } else {
+ result = get_volume_sub_index_record(&volume_index->vi_non_hook, name,
+ record);
+ }
+
+ return result;
+}
+
+int uds_put_volume_index_record(struct volume_index_record *record, u64 virtual_chapter)
+{
+ int result;
+ u32 address;
+ const struct volume_sub_index *sub_index = record->sub_index;
+
+ if (!is_virtual_chapter_indexed(record, virtual_chapter)) {
+ u64 low = get_zone_for_record(record)->virtual_chapter_low;
+ u64 high = get_zone_for_record(record)->virtual_chapter_high;
+
+ return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
+ "cannot put record into chapter number %llu that is out of the valid range %llu to %llu",
+ (unsigned long long) virtual_chapter,
+ (unsigned long long) low,
+ (unsigned long long) high);
+ }
+ address = extract_address(sub_index, record->name);
+ if (unlikely(record->mutex != NULL))
+ mutex_lock(record->mutex);
+ result = uds_put_delta_index_entry(&record->delta_entry, address,
+ convert_virtual_to_index(sub_index,
+ virtual_chapter),
+ record->is_found ? record->name->name : NULL);
+ if (unlikely(record->mutex != NULL))
+ mutex_unlock(record->mutex);
+ switch (result) {
+ case UDS_SUCCESS:
+ record->virtual_chapter = virtual_chapter;
+ record->is_collision = record->delta_entry.is_collision;
+ record->is_found = true;
+ break;
+ case UDS_OVERFLOW:
+ vdo_log_ratelimit(vdo_log_warning_strerror, UDS_OVERFLOW,
+ "Volume index entry dropped due to overflow condition");
+ uds_log_delta_index_entry(&record->delta_entry);
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+int uds_remove_volume_index_record(struct volume_index_record *record)
+{
+ int result;
+
+ if (!record->is_found)
+ return vdo_log_warning_strerror(UDS_BAD_STATE,
+ "illegal operation on new record");
+
+ /* Mark the record so that it cannot be used again */
+ record->is_found = false;
+ if (unlikely(record->mutex != NULL))
+ mutex_lock(record->mutex);
+ result = uds_remove_delta_index_entry(&record->delta_entry);
+ if (unlikely(record->mutex != NULL))
+ mutex_unlock(record->mutex);
+ return result;
+}
+
+static void set_volume_sub_index_zone_open_chapter(struct volume_sub_index *sub_index,
+ unsigned int zone_number,
+ u64 virtual_chapter)
+{
+ u64 used_bits = 0;
+ struct volume_sub_index_zone *zone = &sub_index->zones[zone_number];
+ struct delta_zone *delta_zone;
+ u32 i;
+
+ zone->virtual_chapter_low = (virtual_chapter >= sub_index->chapter_count ?
+ virtual_chapter - sub_index->chapter_count + 1 :
+ 0);
+ zone->virtual_chapter_high = virtual_chapter;
+
+ /* Check to see if the new zone data is too large. */
+ delta_zone = &sub_index->delta_index.delta_zones[zone_number];
+ for (i = 1; i <= delta_zone->list_count; i++)
+ used_bits += delta_zone->delta_lists[i].size;
+
+ if (used_bits > sub_index->max_zone_bits) {
+ /* Expire enough chapters to free the desired space. */
+ u64 expire_count =
+ 1 + (used_bits - sub_index->max_zone_bits) / sub_index->chapter_zone_bits;
+
+ if (expire_count == 1) {
+ vdo_log_ratelimit(vdo_log_info,
+ "zone %u: At chapter %llu, expiring chapter %llu early",
+ zone_number,
+ (unsigned long long) virtual_chapter,
+ (unsigned long long) zone->virtual_chapter_low);
+ zone->early_flushes++;
+ zone->virtual_chapter_low++;
+ } else {
+ u64 first_expired = zone->virtual_chapter_low;
+
+ if (first_expired + expire_count < zone->virtual_chapter_high) {
+ zone->early_flushes += expire_count;
+ zone->virtual_chapter_low += expire_count;
+ } else {
+ zone->early_flushes +=
+ zone->virtual_chapter_high - zone->virtual_chapter_low;
+ zone->virtual_chapter_low = zone->virtual_chapter_high;
+ }
+ vdo_log_ratelimit(vdo_log_info,
+ "zone %u: At chapter %llu, expiring chapters %llu to %llu early",
+ zone_number,
+ (unsigned long long) virtual_chapter,
+ (unsigned long long) first_expired,
+ (unsigned long long) zone->virtual_chapter_low - 1);
+ }
+ }
+}
+
+void uds_set_volume_index_zone_open_chapter(struct volume_index *volume_index,
+ unsigned int zone_number,
+ u64 virtual_chapter)
+{
+ struct mutex *mutex = &volume_index->zones[zone_number].hook_mutex;
+
+ set_volume_sub_index_zone_open_chapter(&volume_index->vi_non_hook, zone_number,
+ virtual_chapter);
+
+ /*
+ * Other threads cannot be allowed to call uds_lookup_volume_index_name() while the open
+ * chapter number is changing.
+ */
+ if (has_sparse(volume_index)) {
+ mutex_lock(mutex);
+ set_volume_sub_index_zone_open_chapter(&volume_index->vi_hook,
+ zone_number, virtual_chapter);
+ mutex_unlock(mutex);
+ }
+}
+
+/*
+ * Set the newest open chapter number for the index, while also advancing the oldest valid chapter
+ * number.
+ */
+void uds_set_volume_index_open_chapter(struct volume_index *volume_index,
+ u64 virtual_chapter)
+{
+ unsigned int zone;
+
+ for (zone = 0; zone < volume_index->zone_count; zone++)
+ uds_set_volume_index_zone_open_chapter(volume_index, zone, virtual_chapter);
+}
+
+int uds_set_volume_index_record_chapter(struct volume_index_record *record,
+ u64 virtual_chapter)
+{
+ const struct volume_sub_index *sub_index = record->sub_index;
+ int result;
+
+ if (!record->is_found)
+ return vdo_log_warning_strerror(UDS_BAD_STATE,
+ "illegal operation on new record");
+
+ if (!is_virtual_chapter_indexed(record, virtual_chapter)) {
+ u64 low = get_zone_for_record(record)->virtual_chapter_low;
+ u64 high = get_zone_for_record(record)->virtual_chapter_high;
+
+ return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
+ "cannot set chapter number %llu that is out of the valid range %llu to %llu",
+ (unsigned long long) virtual_chapter,
+ (unsigned long long) low,
+ (unsigned long long) high);
+ }
+
+ if (unlikely(record->mutex != NULL))
+ mutex_lock(record->mutex);
+ result = uds_set_delta_entry_value(&record->delta_entry,
+ convert_virtual_to_index(sub_index,
+ virtual_chapter));
+ if (unlikely(record->mutex != NULL))
+ mutex_unlock(record->mutex);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ record->virtual_chapter = virtual_chapter;
+ return UDS_SUCCESS;
+}
+
+static u64 lookup_volume_sub_index_name(const struct volume_sub_index *sub_index,
+ const struct uds_record_name *name)
+{
+ int result;
+ u32 address = extract_address(sub_index, name);
+ u32 delta_list_number = extract_dlist_num(sub_index, name);
+ unsigned int zone_number = get_volume_sub_index_zone(sub_index, name);
+ const struct volume_sub_index_zone *zone = &sub_index->zones[zone_number];
+ u64 virtual_chapter;
+ u32 index_chapter;
+ u32 rolling_chapter;
+ struct delta_index_entry delta_entry;
+
+ result = uds_get_delta_index_entry(&sub_index->delta_index, delta_list_number,
+ address, name->name, &delta_entry);
+ if (result != UDS_SUCCESS)
+ return NO_CHAPTER;
+
+ if (delta_entry.at_end || (delta_entry.key != address))
+ return NO_CHAPTER;
+
+ index_chapter = uds_get_delta_entry_value(&delta_entry);
+ rolling_chapter = (index_chapter - zone->virtual_chapter_low) & sub_index->chapter_mask;
+
+ virtual_chapter = zone->virtual_chapter_low + rolling_chapter;
+ if (virtual_chapter > zone->virtual_chapter_high)
+ return NO_CHAPTER;
+
+ return virtual_chapter;
+}
+
+/* Do a read-only lookup of the record name for sparse cache management. */
+u64 uds_lookup_volume_index_name(const struct volume_index *volume_index,
+ const struct uds_record_name *name)
+{
+ unsigned int zone_number = uds_get_volume_index_zone(volume_index, name);
+ struct mutex *mutex = &volume_index->zones[zone_number].hook_mutex;
+ u64 virtual_chapter;
+
+ if (!uds_is_volume_index_sample(volume_index, name))
+ return NO_CHAPTER;
+
+ mutex_lock(mutex);
+ virtual_chapter = lookup_volume_sub_index_name(&volume_index->vi_hook, name);
+ mutex_unlock(mutex);
+
+ return virtual_chapter;
+}
+
+static void abort_restoring_volume_sub_index(struct volume_sub_index *sub_index)
+{
+ uds_reset_delta_index(&sub_index->delta_index);
+}
+
+static void abort_restoring_volume_index(struct volume_index *volume_index)
+{
+ abort_restoring_volume_sub_index(&volume_index->vi_non_hook);
+ if (has_sparse(volume_index))
+ abort_restoring_volume_sub_index(&volume_index->vi_hook);
+}
+
+static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
+ struct buffered_reader **readers,
+ unsigned int reader_count)
+{
+ unsigned int z;
+ int result;
+ u64 virtual_chapter_low = 0, virtual_chapter_high = 0;
+ unsigned int i;
+
+ for (i = 0; i < reader_count; i++) {
+ struct sub_index_data header;
+ u8 buffer[sizeof(struct sub_index_data)];
+ size_t offset = 0;
+ u32 j;
+
+ result = uds_read_from_buffered_reader(readers[i], buffer,
+ sizeof(buffer));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read volume index header");
+ }
+
+ memcpy(&header.magic, buffer, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ decode_u64_le(buffer, &offset, &header.volume_nonce);
+ decode_u64_le(buffer, &offset, &header.virtual_chapter_low);
+ decode_u64_le(buffer, &offset, &header.virtual_chapter_high);
+ decode_u32_le(buffer, &offset, &header.first_list);
+ decode_u32_le(buffer, &offset, &header.list_count);
+
+ result = VDO_ASSERT(offset == sizeof(buffer),
+ "%zu bytes decoded of %zu expected", offset,
+ sizeof(buffer));
+ if (result != VDO_SUCCESS)
+ result = UDS_CORRUPT_DATA;
+
+ if (memcmp(header.magic, MAGIC_START_5, MAGIC_SIZE) != 0) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "volume index file had bad magic number");
+ }
+
+ if (sub_index->volume_nonce == 0) {
+ sub_index->volume_nonce = header.volume_nonce;
+ } else if (header.volume_nonce != sub_index->volume_nonce) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "volume index volume nonce incorrect");
+ }
+
+ if (i == 0) {
+ virtual_chapter_low = header.virtual_chapter_low;
+ virtual_chapter_high = header.virtual_chapter_high;
+ } else if (virtual_chapter_high != header.virtual_chapter_high) {
+ u64 low = header.virtual_chapter_low;
+ u64 high = header.virtual_chapter_high;
+
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "Inconsistent volume index zone files: Chapter range is [%llu,%llu], chapter range %d is [%llu,%llu]",
+ (unsigned long long) virtual_chapter_low,
+ (unsigned long long) virtual_chapter_high,
+ i, (unsigned long long) low,
+ (unsigned long long) high);
+ } else if (virtual_chapter_low < header.virtual_chapter_low) {
+ virtual_chapter_low = header.virtual_chapter_low;
+ }
+
+ for (j = 0; j < header.list_count; j++) {
+ u8 decoded[sizeof(u64)];
+
+ result = uds_read_from_buffered_reader(readers[i], decoded,
+ sizeof(u64));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read volume index flush ranges");
+ }
+
+ sub_index->flush_chapters[header.first_list + j] =
+ get_unaligned_le64(decoded);
+ }
+ }
+
+ for (z = 0; z < sub_index->zone_count; z++) {
+ memset(&sub_index->zones[z], 0, sizeof(struct volume_sub_index_zone));
+ sub_index->zones[z].virtual_chapter_low = virtual_chapter_low;
+ sub_index->zones[z].virtual_chapter_high = virtual_chapter_high;
+ }
+
+ result = uds_start_restoring_delta_index(&sub_index->delta_index, readers,
+ reader_count);
+ if (result != UDS_SUCCESS)
+ return vdo_log_warning_strerror(result, "restoring delta index failed");
+
+ return UDS_SUCCESS;
+}
+
+static int start_restoring_volume_index(struct volume_index *volume_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count)
+{
+ unsigned int i;
+ int result;
+
+ if (!has_sparse(volume_index)) {
+ return start_restoring_volume_sub_index(&volume_index->vi_non_hook,
+ buffered_readers, reader_count);
+ }
+
+ for (i = 0; i < reader_count; i++) {
+ struct volume_index_data header;
+ u8 buffer[sizeof(struct volume_index_data)];
+ size_t offset = 0;
+
+ result = uds_read_from_buffered_reader(buffered_readers[i], buffer,
+ sizeof(buffer));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read volume index header");
+ }
+
+ memcpy(&header.magic, buffer, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ decode_u32_le(buffer, &offset, &header.sparse_sample_rate);
+
+ result = VDO_ASSERT(offset == sizeof(buffer),
+ "%zu bytes decoded of %zu expected", offset,
+ sizeof(buffer));
+ if (result != VDO_SUCCESS)
+ result = UDS_CORRUPT_DATA;
+
+ if (memcmp(header.magic, MAGIC_START_6, MAGIC_SIZE) != 0)
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "volume index file had bad magic number");
+
+ if (i == 0) {
+ volume_index->sparse_sample_rate = header.sparse_sample_rate;
+ } else if (volume_index->sparse_sample_rate != header.sparse_sample_rate) {
+ vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "Inconsistent sparse sample rate in delta index zone files: %u vs. %u",
+ volume_index->sparse_sample_rate,
+ header.sparse_sample_rate);
+ return UDS_CORRUPT_DATA;
+ }
+ }
+
+ result = start_restoring_volume_sub_index(&volume_index->vi_non_hook,
+ buffered_readers, reader_count);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return start_restoring_volume_sub_index(&volume_index->vi_hook, buffered_readers,
+ reader_count);
+}
+
+static int finish_restoring_volume_sub_index(struct volume_sub_index *sub_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count)
+{
+ return uds_finish_restoring_delta_index(&sub_index->delta_index,
+ buffered_readers, reader_count);
+}
+
+static int finish_restoring_volume_index(struct volume_index *volume_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count)
+{
+ int result;
+
+ result = finish_restoring_volume_sub_index(&volume_index->vi_non_hook,
+ buffered_readers, reader_count);
+ if ((result == UDS_SUCCESS) && has_sparse(volume_index)) {
+ result = finish_restoring_volume_sub_index(&volume_index->vi_hook,
+ buffered_readers,
+ reader_count);
+ }
+
+ return result;
+}
+
+int uds_load_volume_index(struct volume_index *volume_index,
+ struct buffered_reader **readers, unsigned int reader_count)
+{
+ int result;
+
+ /* Start by reading the header section of the stream. */
+ result = start_restoring_volume_index(volume_index, readers, reader_count);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = finish_restoring_volume_index(volume_index, readers, reader_count);
+ if (result != UDS_SUCCESS) {
+ abort_restoring_volume_index(volume_index);
+ return result;
+ }
+
+ /* Check the final guard lists to make sure there is no extra data. */
+ result = uds_check_guard_delta_lists(readers, reader_count);
+ if (result != UDS_SUCCESS)
+ abort_restoring_volume_index(volume_index);
+
+ return result;
+}
+
+static int start_saving_volume_sub_index(const struct volume_sub_index *sub_index,
+ unsigned int zone_number,
+ struct buffered_writer *buffered_writer)
+{
+ int result;
+ struct volume_sub_index_zone *volume_index_zone = &sub_index->zones[zone_number];
+ u32 first_list = sub_index->delta_index.delta_zones[zone_number].first_list;
+ u32 list_count = sub_index->delta_index.delta_zones[zone_number].list_count;
+ u8 buffer[sizeof(struct sub_index_data)];
+ size_t offset = 0;
+ u32 i;
+
+ memcpy(buffer, MAGIC_START_5, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ encode_u64_le(buffer, &offset, sub_index->volume_nonce);
+ encode_u64_le(buffer, &offset, volume_index_zone->virtual_chapter_low);
+ encode_u64_le(buffer, &offset, volume_index_zone->virtual_chapter_high);
+ encode_u32_le(buffer, &offset, first_list);
+ encode_u32_le(buffer, &offset, list_count);
+
+ result = VDO_ASSERT(offset == sizeof(struct sub_index_data),
+ "%zu bytes of config written, of %zu expected", offset,
+ sizeof(struct sub_index_data));
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_write_to_buffered_writer(buffered_writer, buffer, offset);
+ if (result != UDS_SUCCESS)
+ return vdo_log_warning_strerror(result,
+ "failed to write volume index header");
+
+ for (i = 0; i < list_count; i++) {
+ u8 encoded[sizeof(u64)];
+
+ put_unaligned_le64(sub_index->flush_chapters[first_list + i], &encoded);
+ result = uds_write_to_buffered_writer(buffered_writer, encoded,
+ sizeof(u64));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to write volume index flush ranges");
+ }
+ }
+
+ return uds_start_saving_delta_index(&sub_index->delta_index, zone_number,
+ buffered_writer);
+}
+
+static int start_saving_volume_index(const struct volume_index *volume_index,
+ unsigned int zone_number,
+ struct buffered_writer *writer)
+{
+ u8 buffer[sizeof(struct volume_index_data)];
+ size_t offset = 0;
+ int result;
+
+ if (!has_sparse(volume_index)) {
+ return start_saving_volume_sub_index(&volume_index->vi_non_hook,
+ zone_number, writer);
+ }
+
+ memcpy(buffer, MAGIC_START_6, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ encode_u32_le(buffer, &offset, volume_index->sparse_sample_rate);
+ result = VDO_ASSERT(offset == sizeof(struct volume_index_data),
+ "%zu bytes of header written, of %zu expected", offset,
+ sizeof(struct volume_index_data));
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_write_to_buffered_writer(writer, buffer, offset);
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning_strerror(result, "failed to write volume index header");
+ return result;
+ }
+
+ result = start_saving_volume_sub_index(&volume_index->vi_non_hook, zone_number,
+ writer);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return start_saving_volume_sub_index(&volume_index->vi_hook, zone_number,
+ writer);
+}
+
+static int finish_saving_volume_sub_index(const struct volume_sub_index *sub_index,
+ unsigned int zone_number)
+{
+ return uds_finish_saving_delta_index(&sub_index->delta_index, zone_number);
+}
+
+static int finish_saving_volume_index(const struct volume_index *volume_index,
+ unsigned int zone_number)
+{
+ int result;
+
+ result = finish_saving_volume_sub_index(&volume_index->vi_non_hook, zone_number);
+ if ((result == UDS_SUCCESS) && has_sparse(volume_index))
+ result = finish_saving_volume_sub_index(&volume_index->vi_hook, zone_number);
+ return result;
+}
+
+int uds_save_volume_index(struct volume_index *volume_index,
+ struct buffered_writer **writers, unsigned int writer_count)
+{
+ int result = UDS_SUCCESS;
+ unsigned int zone;
+
+ for (zone = 0; zone < writer_count; zone++) {
+ result = start_saving_volume_index(volume_index, zone, writers[zone]);
+ if (result != UDS_SUCCESS)
+ break;
+
+ result = finish_saving_volume_index(volume_index, zone);
+ if (result != UDS_SUCCESS)
+ break;
+
+ result = uds_write_guard_delta_list(writers[zone]);
+ if (result != UDS_SUCCESS)
+ break;
+
+ result = uds_flush_buffered_writer(writers[zone]);
+ if (result != UDS_SUCCESS)
+ break;
+ }
+
+ return result;
+}
+
+static void get_volume_sub_index_stats(const struct volume_sub_index *sub_index,
+ struct volume_index_stats *stats)
+{
+ struct delta_index_stats dis;
+ unsigned int z;
+
+ uds_get_delta_index_stats(&sub_index->delta_index, &dis);
+ stats->rebalance_time = dis.rebalance_time;
+ stats->rebalance_count = dis.rebalance_count;
+ stats->record_count = dis.record_count;
+ stats->collision_count = dis.collision_count;
+ stats->discard_count = dis.discard_count;
+ stats->overflow_count = dis.overflow_count;
+ stats->delta_lists = dis.list_count;
+ stats->early_flushes = 0;
+ for (z = 0; z < sub_index->zone_count; z++)
+ stats->early_flushes += sub_index->zones[z].early_flushes;
+}
+
+void uds_get_volume_index_stats(const struct volume_index *volume_index,
+ struct volume_index_stats *stats)
+{
+ struct volume_index_stats sparse_stats;
+
+ get_volume_sub_index_stats(&volume_index->vi_non_hook, stats);
+ if (!has_sparse(volume_index))
+ return;
+
+ get_volume_sub_index_stats(&volume_index->vi_hook, &sparse_stats);
+ stats->rebalance_time += sparse_stats.rebalance_time;
+ stats->rebalance_count += sparse_stats.rebalance_count;
+ stats->record_count += sparse_stats.record_count;
+ stats->collision_count += sparse_stats.collision_count;
+ stats->discard_count += sparse_stats.discard_count;
+ stats->overflow_count += sparse_stats.overflow_count;
+ stats->delta_lists += sparse_stats.delta_lists;
+ stats->early_flushes += sparse_stats.early_flushes;
+}
+
+static int initialize_volume_sub_index(const struct uds_configuration *config,
+ u64 volume_nonce, u8 tag,
+ struct volume_sub_index *sub_index)
+{
+ struct sub_index_parameters params = { .address_bits = 0 };
+ unsigned int zone_count = config->zone_count;
+ u64 available_bytes = 0;
+ unsigned int z;
+ int result;
+
+ result = compute_volume_sub_index_parameters(config, &params);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ sub_index->address_bits = params.address_bits;
+ sub_index->address_mask = (1u << params.address_bits) - 1;
+ sub_index->chapter_bits = params.chapter_bits;
+ sub_index->chapter_mask = (1u << params.chapter_bits) - 1;
+ sub_index->chapter_count = params.chapter_count;
+ sub_index->list_count = params.list_count;
+ sub_index->zone_count = zone_count;
+ sub_index->chapter_zone_bits = params.chapter_size_in_bits / zone_count;
+ sub_index->volume_nonce = volume_nonce;
+
+ result = uds_initialize_delta_index(&sub_index->delta_index, zone_count,
+ params.list_count, params.mean_delta,
+ params.chapter_bits, params.memory_size,
+ tag);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ for (z = 0; z < sub_index->delta_index.zone_count; z++)
+ available_bytes += sub_index->delta_index.delta_zones[z].size;
+ available_bytes -= params.target_free_bytes;
+ sub_index->max_zone_bits = (available_bytes * BITS_PER_BYTE) / zone_count;
+ sub_index->memory_size = (sub_index->delta_index.memory_size +
+ sizeof(struct volume_sub_index) +
+ (params.list_count * sizeof(u64)) +
+ (zone_count * sizeof(struct volume_sub_index_zone)));
+
+ /* The following arrays are initialized to all zeros. */
+ result = vdo_allocate(params.list_count, u64, "first chapter to flush",
+ &sub_index->flush_chapters);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return vdo_allocate(zone_count, struct volume_sub_index_zone,
+ "volume index zones", &sub_index->zones);
+}
+
+int uds_make_volume_index(const struct uds_configuration *config, u64 volume_nonce,
+ struct volume_index **volume_index_ptr)
+{
+ struct split_config split;
+ unsigned int zone;
+ struct volume_index *volume_index;
+ int result;
+
+ result = vdo_allocate(1, struct volume_index, "volume index", &volume_index);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ volume_index->zone_count = config->zone_count;
+
+ if (!uds_is_sparse_index_geometry(config->geometry)) {
+ result = initialize_volume_sub_index(config, volume_nonce, 'm',
+ &volume_index->vi_non_hook);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume_index(volume_index);
+ return result;
+ }
+
+ volume_index->memory_size = volume_index->vi_non_hook.memory_size;
+ *volume_index_ptr = volume_index;
+ return UDS_SUCCESS;
+ }
+
+ volume_index->sparse_sample_rate = config->sparse_sample_rate;
+
+ result = vdo_allocate(config->zone_count, struct volume_index_zone,
+ "volume index zones", &volume_index->zones);
+ if (result != VDO_SUCCESS) {
+ uds_free_volume_index(volume_index);
+ return result;
+ }
+
+ for (zone = 0; zone < config->zone_count; zone++)
+ mutex_init(&volume_index->zones[zone].hook_mutex);
+
+ split_configuration(config, &split);
+ result = initialize_volume_sub_index(&split.non_hook_config, volume_nonce, 'd',
+ &volume_index->vi_non_hook);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume_index(volume_index);
+ return vdo_log_error_strerror(result,
+ "Error creating non hook volume index");
+ }
+
+ result = initialize_volume_sub_index(&split.hook_config, volume_nonce, 's',
+ &volume_index->vi_hook);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume_index(volume_index);
+ return vdo_log_error_strerror(result,
+ "Error creating hook volume index");
+ }
+
+ volume_index->memory_size =
+ volume_index->vi_non_hook.memory_size + volume_index->vi_hook.memory_size;
+ *volume_index_ptr = volume_index;
+ return UDS_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/indexer/volume-index.h b/drivers/md/dm-vdo/indexer/volume-index.h
new file mode 100644
index 000000000000..583998c547b7
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/volume-index.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_VOLUME_INDEX_H
+#define UDS_VOLUME_INDEX_H
+
+#include <linux/limits.h>
+
+#include "thread-utils.h"
+
+#include "config.h"
+#include "delta-index.h"
+#include "indexer.h"
+
+/*
+ * The volume index is the primary top-level index for UDS. It contains records which map a record
+ * name to the chapter where a record with that name is stored. This mapping can definitively say
+ * when no record exists. However, because we only use a subset of the name for this index, it
+ * cannot definitively say that a record for the entry does exist. It can only say that if a record
+ * exists, it will be in a particular chapter. The request can then be dispatched to that chapter
+ * for further processing.
+ *
+ * If the volume_index_record does not actually match the record name, the index can store a more
+ * specific collision record to disambiguate the new entry from the existing one. Index entries are
+ * managed with volume_index_record structures.
+ */
+
+#define NO_CHAPTER U64_MAX
+
+struct volume_index_stats {
+ /* Nanoseconds spent rebalancing */
+ ktime_t rebalance_time;
+ /* Number of memory rebalances */
+ u32 rebalance_count;
+ /* The number of records in the index */
+ u64 record_count;
+ /* The number of collision records */
+ u64 collision_count;
+ /* The number of records removed */
+ u64 discard_count;
+ /* The number of UDS_OVERFLOWs detected */
+ u64 overflow_count;
+ /* The number of delta lists */
+ u32 delta_lists;
+ /* Number of early flushes */
+ u64 early_flushes;
+};
+
+struct volume_sub_index_zone {
+ u64 virtual_chapter_low;
+ u64 virtual_chapter_high;
+ u64 early_flushes;
+} __aligned(L1_CACHE_BYTES);
+
+struct volume_sub_index {
+ /* The delta index */
+ struct delta_index delta_index;
+ /* The first chapter to be flushed in each zone */
+ u64 *flush_chapters;
+ /* The zones */
+ struct volume_sub_index_zone *zones;
+ /* The volume nonce */
+ u64 volume_nonce;
+ /* Expected size of a chapter (per zone) */
+ u64 chapter_zone_bits;
+ /* Maximum size of the index (per zone) */
+ u64 max_zone_bits;
+ /* The number of bits in address mask */
+ u8 address_bits;
+ /* Mask to get address within delta list */
+ u32 address_mask;
+ /* The number of bits in chapter number */
+ u8 chapter_bits;
+ /* The largest storable chapter number */
+ u32 chapter_mask;
+ /* The number of chapters used */
+ u32 chapter_count;
+ /* The number of delta lists */
+ u32 list_count;
+ /* The number of zones */
+ unsigned int zone_count;
+ /* The amount of memory allocated */
+ u64 memory_size;
+};
+
+struct volume_index_zone {
+ /* Protects the sampled index in this zone */
+ struct mutex hook_mutex;
+} __aligned(L1_CACHE_BYTES);
+
+struct volume_index {
+ u32 sparse_sample_rate;
+ unsigned int zone_count;
+ u64 memory_size;
+ struct volume_sub_index vi_non_hook;
+ struct volume_sub_index vi_hook;
+ struct volume_index_zone *zones;
+};
+
+/*
+ * The volume_index_record structure is used to facilitate processing of a record name. A client
+ * first calls uds_get_volume_index_record() to find the volume index record for a record name. The
+ * fields of the record can then be examined to determine the state of the record.
+ *
+ * If is_found is false, then the index did not find an entry for the record name. Calling
+ * uds_put_volume_index_record() will insert a new entry for that name at the proper place.
+ *
+ * If is_found is true, then we did find an entry for the record name, and the virtual_chapter and
+ * is_collision fields reflect the entry found. Subsequently, a call to
+ * uds_remove_volume_index_record() will remove the entry, a call to
+ * uds_set_volume_index_record_chapter() will update the existing entry, and a call to
+ * uds_put_volume_index_record() will insert a new collision record after the existing entry.
+ */
+struct volume_index_record {
+ /* Public fields */
+
+ /* Chapter where the record info is found */
+ u64 virtual_chapter;
+ /* This record is a collision */
+ bool is_collision;
+ /* This record is the requested record */
+ bool is_found;
+
+ /* Private fields */
+
+ /* Zone that contains this name */
+ unsigned int zone_number;
+ /* The volume index */
+ struct volume_sub_index *sub_index;
+ /* Mutex for accessing this delta index entry in the hook index */
+ struct mutex *mutex;
+ /* The record name to which this record refers */
+ const struct uds_record_name *name;
+ /* The delta index entry for this record */
+ struct delta_index_entry delta_entry;
+};
+
+int __must_check uds_make_volume_index(const struct uds_configuration *config,
+ u64 volume_nonce,
+ struct volume_index **volume_index);
+
+void uds_free_volume_index(struct volume_index *volume_index);
+
+int __must_check uds_compute_volume_index_save_blocks(const struct uds_configuration *config,
+ size_t block_size,
+ u64 *block_count);
+
+unsigned int __must_check uds_get_volume_index_zone(const struct volume_index *volume_index,
+ const struct uds_record_name *name);
+
+bool __must_check uds_is_volume_index_sample(const struct volume_index *volume_index,
+ const struct uds_record_name *name);
+
+/*
+ * This function is only used to manage sparse cache membership. Most requests should use
+ * uds_get_volume_index_record() to look up index records instead.
+ */
+u64 __must_check uds_lookup_volume_index_name(const struct volume_index *volume_index,
+ const struct uds_record_name *name);
+
+int __must_check uds_get_volume_index_record(struct volume_index *volume_index,
+ const struct uds_record_name *name,
+ struct volume_index_record *record);
+
+int __must_check uds_put_volume_index_record(struct volume_index_record *record,
+ u64 virtual_chapter);
+
+int __must_check uds_remove_volume_index_record(struct volume_index_record *record);
+
+int __must_check uds_set_volume_index_record_chapter(struct volume_index_record *record,
+ u64 virtual_chapter);
+
+void uds_set_volume_index_open_chapter(struct volume_index *volume_index,
+ u64 virtual_chapter);
+
+void uds_set_volume_index_zone_open_chapter(struct volume_index *volume_index,
+ unsigned int zone_number,
+ u64 virtual_chapter);
+
+int __must_check uds_load_volume_index(struct volume_index *volume_index,
+ struct buffered_reader **readers,
+ unsigned int reader_count);
+
+int __must_check uds_save_volume_index(struct volume_index *volume_index,
+ struct buffered_writer **writers,
+ unsigned int writer_count);
+
+void uds_get_volume_index_stats(const struct volume_index *volume_index,
+ struct volume_index_stats *stats);
+
+#endif /* UDS_VOLUME_INDEX_H */
diff --git a/drivers/md/dm-vdo/indexer/volume.c b/drivers/md/dm-vdo/indexer/volume.c
new file mode 100644
index 000000000000..655453bb276b
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/volume.c
@@ -0,0 +1,1693 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "volume.h"
+
+#include <linux/atomic.h>
+#include <linux/dm-bufio.h>
+#include <linux/err.h>
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+#include "string-utils.h"
+#include "thread-utils.h"
+
+#include "chapter-index.h"
+#include "config.h"
+#include "geometry.h"
+#include "hash-utils.h"
+#include "index.h"
+#include "sparse-cache.h"
+
+/*
+ * The first block of the volume layout is reserved for the volume header, which is no longer used.
+ * The remainder of the volume is divided into chapters consisting of several pages of records, and
+ * several pages of static index to use to find those records. The index pages are recorded first,
+ * followed by the record pages. The chapters are written in order as they are filled, so the
+ * volume storage acts as a circular log of the most recent chapters, with each new chapter
+ * overwriting the oldest saved one.
+ *
+ * When a new chapter is filled and closed, the records from that chapter are sorted and
+ * interleaved in approximate temporal order, and assigned to record pages. Then a static delta
+ * index is generated to store which record page contains each record. The in-memory index page map
+ * is also updated to indicate which delta lists fall on each chapter index page. This means that
+ * when a record is read, the volume only has to load a single index page and a single record page,
+ * rather than search the entire chapter. These index and record pages are written to storage, and
+ * the index pages are transferred to the page cache under the theory that the most recently
+ * written chapter is likely to be accessed again soon.
+ *
+ * When reading a record, the volume index will indicate which chapter should contain it. The
+ * volume uses the index page map to determine which chapter index page needs to be loaded, and
+ * then reads the relevant record page number from the chapter index. Both index and record pages
+ * are stored in a page cache when read for the common case that subsequent records need the same
+ * pages. The page cache evicts the least recently accessed entries when caching new pages. In
+ * addition, the volume uses dm-bufio to manage access to the storage, which may allow for
+ * additional caching depending on available system resources.
+ *
+ * Record requests are handled from cached pages when possible. If a page needs to be read, it is
+ * placed on a queue along with the request that wants to read it. Any requests for the same page
+ * that arrive while the read is pending are added to the queue entry. A separate reader thread
+ * handles the queued reads, adding the page to the cache and updating any requests queued with it
+ * so they can continue processing. This allows the index zone threads to continue processing new
+ * requests rather than wait for the storage reads.
+ *
+ * When an index rebuild is necessary, the volume reads each stored chapter to determine which
+ * range of chapters contain valid records, so that those records can be used to reconstruct the
+ * in-memory volume index.
+ */
+
+/* The maximum allowable number of contiguous bad chapters */
+#define MAX_BAD_CHAPTERS 100
+#define VOLUME_CACHE_MAX_ENTRIES (U16_MAX >> 1)
+#define VOLUME_CACHE_QUEUED_FLAG (1 << 15)
+#define VOLUME_CACHE_MAX_QUEUED_READS 4096
+
+static const u64 BAD_CHAPTER = U64_MAX;
+
+/*
+ * The invalidate counter is two 32 bits fields stored together atomically. The low order 32 bits
+ * are the physical page number of the cached page being read. The high order 32 bits are a
+ * sequence number. This value is written when the zone that owns it begins or completes a cache
+ * search. Any other thread will only read the counter in wait_for_pending_searches() while waiting
+ * to update the cache contents.
+ */
+union invalidate_counter {
+ u64 value;
+ struct {
+ u32 page;
+ u32 counter;
+ };
+};
+
+static inline u32 map_to_page_number(struct index_geometry *geometry, u32 physical_page)
+{
+ return (physical_page - HEADER_PAGES_PER_VOLUME) % geometry->pages_per_chapter;
+}
+
+static inline u32 map_to_chapter_number(struct index_geometry *geometry, u32 physical_page)
+{
+ return (physical_page - HEADER_PAGES_PER_VOLUME) / geometry->pages_per_chapter;
+}
+
+static inline bool is_record_page(struct index_geometry *geometry, u32 physical_page)
+{
+ return map_to_page_number(geometry, physical_page) >= geometry->index_pages_per_chapter;
+}
+
+static u32 map_to_physical_page(const struct index_geometry *geometry, u32 chapter, u32 page)
+{
+ /* Page zero is the header page, so the first chapter index page is page one. */
+ return HEADER_PAGES_PER_VOLUME + (geometry->pages_per_chapter * chapter) + page;
+}
+
+static inline union invalidate_counter get_invalidate_counter(struct page_cache *cache,
+ unsigned int zone_number)
+{
+ return (union invalidate_counter) {
+ .value = READ_ONCE(cache->search_pending_counters[zone_number].atomic_value),
+ };
+}
+
+static inline void set_invalidate_counter(struct page_cache *cache,
+ unsigned int zone_number,
+ union invalidate_counter invalidate_counter)
+{
+ WRITE_ONCE(cache->search_pending_counters[zone_number].atomic_value,
+ invalidate_counter.value);
+}
+
+static inline bool search_pending(union invalidate_counter invalidate_counter)
+{
+ return (invalidate_counter.counter & 1) != 0;
+}
+
+/* Lock the cache for a zone in order to search for a page. */
+static void begin_pending_search(struct page_cache *cache, u32 physical_page,
+ unsigned int zone_number)
+{
+ union invalidate_counter invalidate_counter =
+ get_invalidate_counter(cache, zone_number);
+
+ invalidate_counter.page = physical_page;
+ invalidate_counter.counter++;
+ set_invalidate_counter(cache, zone_number, invalidate_counter);
+ VDO_ASSERT_LOG_ONLY(search_pending(invalidate_counter),
+ "Search is pending for zone %u", zone_number);
+ /*
+ * This memory barrier ensures that the write to the invalidate counter is seen by other
+ * threads before this thread accesses the cached page. The corresponding read memory
+ * barrier is in wait_for_pending_searches().
+ */
+ smp_mb();
+}
+
+/* Unlock the cache for a zone by clearing its invalidate counter. */
+static void end_pending_search(struct page_cache *cache, unsigned int zone_number)
+{
+ union invalidate_counter invalidate_counter;
+
+ /*
+ * This memory barrier ensures that this thread completes reads of the
+ * cached page before other threads see the write to the invalidate
+ * counter.
+ */
+ smp_mb();
+
+ invalidate_counter = get_invalidate_counter(cache, zone_number);
+ VDO_ASSERT_LOG_ONLY(search_pending(invalidate_counter),
+ "Search is pending for zone %u", zone_number);
+ invalidate_counter.counter++;
+ set_invalidate_counter(cache, zone_number, invalidate_counter);
+}
+
+static void wait_for_pending_searches(struct page_cache *cache, u32 physical_page)
+{
+ union invalidate_counter initial_counters[MAX_ZONES];
+ unsigned int i;
+
+ /*
+ * We hold the read_threads_mutex. We are waiting for threads that do not hold the
+ * read_threads_mutex. Those threads have "locked" their targeted page by setting the
+ * search_pending_counter. The corresponding write memory barrier is in
+ * begin_pending_search().
+ */
+ smp_mb();
+
+ for (i = 0; i < cache->zone_count; i++)
+ initial_counters[i] = get_invalidate_counter(cache, i);
+ for (i = 0; i < cache->zone_count; i++) {
+ if (search_pending(initial_counters[i]) &&
+ (initial_counters[i].page == physical_page)) {
+ /*
+ * There is an active search using the physical page. We need to wait for
+ * the search to finish.
+ *
+ * FIXME: Investigate using wait_event() to wait for the search to finish.
+ */
+ while (initial_counters[i].value ==
+ get_invalidate_counter(cache, i).value)
+ cond_resched();
+ }
+ }
+}
+
+static void release_page_buffer(struct cached_page *page)
+{
+ if (page->buffer != NULL)
+ dm_bufio_release(vdo_forget(page->buffer));
+}
+
+static void clear_cache_page(struct page_cache *cache, struct cached_page *page)
+{
+ /* Do not clear read_pending because the read queue relies on it. */
+ release_page_buffer(page);
+ page->physical_page = cache->indexable_pages;
+ WRITE_ONCE(page->last_used, 0);
+}
+
+static void make_page_most_recent(struct page_cache *cache, struct cached_page *page)
+{
+ /*
+ * ASSERTION: We are either a zone thread holding a search_pending_counter, or we are any
+ * thread holding the read_threads_mutex.
+ */
+ if (atomic64_read(&cache->clock) != READ_ONCE(page->last_used))
+ WRITE_ONCE(page->last_used, atomic64_inc_return(&cache->clock));
+}
+
+/* Select a page to remove from the cache to make space for a new entry. */
+static struct cached_page *select_victim_in_cache(struct page_cache *cache)
+{
+ struct cached_page *page;
+ int oldest_index = 0;
+ s64 oldest_time = S64_MAX;
+ s64 last_used;
+ u16 i;
+
+ /* Find the oldest unclaimed page. We hold the read_threads_mutex. */
+ for (i = 0; i < cache->cache_slots; i++) {
+ /* A page with a pending read must not be replaced. */
+ if (cache->cache[i].read_pending)
+ continue;
+
+ last_used = READ_ONCE(cache->cache[i].last_used);
+ if (last_used <= oldest_time) {
+ oldest_time = last_used;
+ oldest_index = i;
+ }
+ }
+
+ page = &cache->cache[oldest_index];
+ if (page->physical_page != cache->indexable_pages) {
+ WRITE_ONCE(cache->index[page->physical_page], cache->cache_slots);
+ wait_for_pending_searches(cache, page->physical_page);
+ }
+
+ page->read_pending = true;
+ clear_cache_page(cache, page);
+ return page;
+}
+
+/* Make a newly filled cache entry available to other threads. */
+static int put_page_in_cache(struct page_cache *cache, u32 physical_page,
+ struct cached_page *page)
+{
+ int result;
+
+ /* We hold the read_threads_mutex. */
+ result = VDO_ASSERT((page->read_pending), "page to install has a pending read");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ page->physical_page = physical_page;
+ make_page_most_recent(cache, page);
+ page->read_pending = false;
+
+ /*
+ * We hold the read_threads_mutex, but we must have a write memory barrier before making
+ * the cached_page available to the readers that do not hold the mutex. The corresponding
+ * read memory barrier is in get_page_and_index().
+ */
+ smp_wmb();
+
+ /* This assignment also clears the queued flag. */
+ WRITE_ONCE(cache->index[physical_page], page - cache->cache);
+ return UDS_SUCCESS;
+}
+
+static void cancel_page_in_cache(struct page_cache *cache, u32 physical_page,
+ struct cached_page *page)
+{
+ int result;
+
+ /* We hold the read_threads_mutex. */
+ result = VDO_ASSERT((page->read_pending), "page to install has a pending read");
+ if (result != VDO_SUCCESS)
+ return;
+
+ clear_cache_page(cache, page);
+ page->read_pending = false;
+
+ /* Clear the mapping and the queued flag for the new page. */
+ WRITE_ONCE(cache->index[physical_page], cache->cache_slots);
+}
+
+static inline u16 next_queue_position(u16 position)
+{
+ return (position + 1) % VOLUME_CACHE_MAX_QUEUED_READS;
+}
+
+static inline void advance_queue_position(u16 *position)
+{
+ *position = next_queue_position(*position);
+}
+
+static inline bool read_queue_is_full(struct page_cache *cache)
+{
+ return cache->read_queue_first == next_queue_position(cache->read_queue_last);
+}
+
+static bool enqueue_read(struct page_cache *cache, struct uds_request *request,
+ u32 physical_page)
+{
+ struct queued_read *queue_entry;
+ u16 last = cache->read_queue_last;
+ u16 read_queue_index;
+
+ /* We hold the read_threads_mutex. */
+ if ((cache->index[physical_page] & VOLUME_CACHE_QUEUED_FLAG) == 0) {
+ /* This page has no existing entry in the queue. */
+ if (read_queue_is_full(cache))
+ return false;
+
+ /* Fill in the read queue entry. */
+ cache->read_queue[last].physical_page = physical_page;
+ cache->read_queue[last].invalid = false;
+ cache->read_queue[last].first_request = NULL;
+ cache->read_queue[last].last_request = NULL;
+
+ /* Point the cache index to the read queue entry. */
+ read_queue_index = last;
+ WRITE_ONCE(cache->index[physical_page],
+ read_queue_index | VOLUME_CACHE_QUEUED_FLAG);
+
+ advance_queue_position(&cache->read_queue_last);
+ } else {
+ /* It's already queued, so add this request to the existing entry. */
+ read_queue_index = cache->index[physical_page] & ~VOLUME_CACHE_QUEUED_FLAG;
+ }
+
+ request->next_request = NULL;
+ queue_entry = &cache->read_queue[read_queue_index];
+ if (queue_entry->first_request == NULL)
+ queue_entry->first_request = request;
+ else
+ queue_entry->last_request->next_request = request;
+ queue_entry->last_request = request;
+
+ return true;
+}
+
+static void enqueue_page_read(struct volume *volume, struct uds_request *request,
+ u32 physical_page)
+{
+ /* Mark the page as queued, so that chapter invalidation knows to cancel a read. */
+ while (!enqueue_read(&volume->page_cache, request, physical_page)) {
+ vdo_log_debug("Read queue full, waiting for reads to finish");
+ uds_wait_cond(&volume->read_threads_read_done_cond,
+ &volume->read_threads_mutex);
+ }
+
+ uds_signal_cond(&volume->read_threads_cond);
+}
+
+/*
+ * Reserve the next read queue entry for processing, but do not actually remove it from the queue.
+ * Must be followed by release_queued_requests().
+ */
+static struct queued_read *reserve_read_queue_entry(struct page_cache *cache)
+{
+ /* We hold the read_threads_mutex. */
+ struct queued_read *entry;
+ u16 index_value;
+ bool queued;
+
+ /* No items to dequeue */
+ if (cache->read_queue_next_read == cache->read_queue_last)
+ return NULL;
+
+ entry = &cache->read_queue[cache->read_queue_next_read];
+ index_value = cache->index[entry->physical_page];
+ queued = (index_value & VOLUME_CACHE_QUEUED_FLAG) != 0;
+ /* Check to see if it's still queued before resetting. */
+ if (entry->invalid && queued)
+ WRITE_ONCE(cache->index[entry->physical_page], cache->cache_slots);
+
+ /*
+ * If a synchronous read has taken this page, set invalid to true so it doesn't get
+ * overwritten. Requests will just be requeued.
+ */
+ if (!queued)
+ entry->invalid = true;
+
+ entry->reserved = true;
+ advance_queue_position(&cache->read_queue_next_read);
+ return entry;
+}
+
+static inline struct queued_read *wait_to_reserve_read_queue_entry(struct volume *volume)
+{
+ struct queued_read *queue_entry = NULL;
+
+ while (!volume->read_threads_exiting) {
+ queue_entry = reserve_read_queue_entry(&volume->page_cache);
+ if (queue_entry != NULL)
+ break;
+
+ uds_wait_cond(&volume->read_threads_cond, &volume->read_threads_mutex);
+ }
+
+ return queue_entry;
+}
+
+static int init_chapter_index_page(const struct volume *volume, u8 *index_page,
+ u32 chapter, u32 index_page_number,
+ struct delta_index_page *chapter_index_page)
+{
+ u64 ci_virtual;
+ u32 ci_chapter;
+ u32 lowest_list;
+ u32 highest_list;
+ struct index_geometry *geometry = volume->geometry;
+ int result;
+
+ result = uds_initialize_chapter_index_page(chapter_index_page, geometry,
+ index_page, volume->nonce);
+ if (volume->lookup_mode == LOOKUP_FOR_REBUILD)
+ return result;
+
+ if (result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result,
+ "Reading chapter index page for chapter %u page %u",
+ chapter, index_page_number);
+ }
+
+ uds_get_list_number_bounds(volume->index_page_map, chapter, index_page_number,
+ &lowest_list, &highest_list);
+ ci_virtual = chapter_index_page->virtual_chapter_number;
+ ci_chapter = uds_map_to_physical_chapter(geometry, ci_virtual);
+ if ((chapter == ci_chapter) &&
+ (lowest_list == chapter_index_page->lowest_list_number) &&
+ (highest_list == chapter_index_page->highest_list_number))
+ return UDS_SUCCESS;
+
+ vdo_log_warning("Index page map updated to %llu",
+ (unsigned long long) volume->index_page_map->last_update);
+ vdo_log_warning("Page map expects that chapter %u page %u has range %u to %u, but chapter index page has chapter %llu with range %u to %u",
+ chapter, index_page_number, lowest_list, highest_list,
+ (unsigned long long) ci_virtual,
+ chapter_index_page->lowest_list_number,
+ chapter_index_page->highest_list_number);
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "index page map mismatch with chapter index");
+}
+
+static int initialize_index_page(const struct volume *volume, u32 physical_page,
+ struct cached_page *page)
+{
+ u32 chapter = map_to_chapter_number(volume->geometry, physical_page);
+ u32 index_page_number = map_to_page_number(volume->geometry, physical_page);
+
+ return init_chapter_index_page(volume, dm_bufio_get_block_data(page->buffer),
+ chapter, index_page_number, &page->index_page);
+}
+
+static bool search_record_page(const u8 record_page[],
+ const struct uds_record_name *name,
+ const struct index_geometry *geometry,
+ struct uds_record_data *metadata)
+{
+ /*
+ * The array of records is sorted by name and stored as a binary tree in heap order, so the
+ * root of the tree is the first array element.
+ */
+ u32 node = 0;
+ const struct uds_volume_record *records = (const struct uds_volume_record *) record_page;
+
+ while (node < geometry->records_per_page) {
+ int result;
+ const struct uds_volume_record *record = &records[node];
+
+ result = memcmp(name, &record->name, UDS_RECORD_NAME_SIZE);
+ if (result == 0) {
+ if (metadata != NULL)
+ *metadata = record->data;
+ return true;
+ }
+
+ /* The children of node N are at indexes 2N+1 and 2N+2. */
+ node = ((2 * node) + ((result < 0) ? 1 : 2));
+ }
+
+ return false;
+}
+
+/*
+ * If we've read in a record page, we're going to do an immediate search, to speed up processing by
+ * avoiding get_record_from_zone(), and to ensure that requests make progress even when queued. If
+ * we've read in an index page, we save the record page number so we don't have to resolve the
+ * index page again. We use the location, virtual_chapter, and old_metadata fields in the request
+ * to allow the index code to know where to begin processing the request again.
+ */
+static int search_page(struct cached_page *page, const struct volume *volume,
+ struct uds_request *request, u32 physical_page)
+{
+ int result;
+ enum uds_index_region location;
+ u16 record_page_number;
+
+ if (is_record_page(volume->geometry, physical_page)) {
+ if (search_record_page(dm_bufio_get_block_data(page->buffer),
+ &request->record_name, volume->geometry,
+ &request->old_metadata))
+ location = UDS_LOCATION_RECORD_PAGE_LOOKUP;
+ else
+ location = UDS_LOCATION_UNAVAILABLE;
+ } else {
+ result = uds_search_chapter_index_page(&page->index_page,
+ volume->geometry,
+ &request->record_name,
+ &record_page_number);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (record_page_number == NO_CHAPTER_INDEX_ENTRY) {
+ location = UDS_LOCATION_UNAVAILABLE;
+ } else {
+ location = UDS_LOCATION_INDEX_PAGE_LOOKUP;
+ *((u16 *) &request->old_metadata) = record_page_number;
+ }
+ }
+
+ request->location = location;
+ request->found = false;
+ return UDS_SUCCESS;
+}
+
+static int process_entry(struct volume *volume, struct queued_read *entry)
+{
+ u32 page_number = entry->physical_page;
+ struct uds_request *request;
+ struct cached_page *page = NULL;
+ u8 *page_data;
+ int result;
+
+ if (entry->invalid) {
+ vdo_log_debug("Requeuing requests for invalid page");
+ return UDS_SUCCESS;
+ }
+
+ page = select_victim_in_cache(&volume->page_cache);
+
+ mutex_unlock(&volume->read_threads_mutex);
+ page_data = dm_bufio_read(volume->client, page_number, &page->buffer);
+ mutex_lock(&volume->read_threads_mutex);
+ if (IS_ERR(page_data)) {
+ result = -PTR_ERR(page_data);
+ vdo_log_warning_strerror(result,
+ "error reading physical page %u from volume",
+ page_number);
+ cancel_page_in_cache(&volume->page_cache, page_number, page);
+ return result;
+ }
+
+ if (entry->invalid) {
+ vdo_log_warning("Page %u invalidated after read", page_number);
+ cancel_page_in_cache(&volume->page_cache, page_number, page);
+ return UDS_SUCCESS;
+ }
+
+ if (!is_record_page(volume->geometry, page_number)) {
+ result = initialize_index_page(volume, page_number, page);
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning("Error initializing chapter index page");
+ cancel_page_in_cache(&volume->page_cache, page_number, page);
+ return result;
+ }
+ }
+
+ result = put_page_in_cache(&volume->page_cache, page_number, page);
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning("Error putting page %u in cache", page_number);
+ cancel_page_in_cache(&volume->page_cache, page_number, page);
+ return result;
+ }
+
+ request = entry->first_request;
+ while ((request != NULL) && (result == UDS_SUCCESS)) {
+ result = search_page(page, volume, request, page_number);
+ request = request->next_request;
+ }
+
+ return result;
+}
+
+static void release_queued_requests(struct volume *volume, struct queued_read *entry,
+ int result)
+{
+ struct page_cache *cache = &volume->page_cache;
+ u16 next_read = cache->read_queue_next_read;
+ struct uds_request *request;
+ struct uds_request *next;
+
+ for (request = entry->first_request; request != NULL; request = next) {
+ next = request->next_request;
+ request->status = result;
+ request->requeued = true;
+ uds_enqueue_request(request, STAGE_INDEX);
+ }
+
+ entry->reserved = false;
+
+ /* Move the read_queue_first pointer as far as we can. */
+ while ((cache->read_queue_first != next_read) &&
+ (!cache->read_queue[cache->read_queue_first].reserved))
+ advance_queue_position(&cache->read_queue_first);
+ uds_broadcast_cond(&volume->read_threads_read_done_cond);
+}
+
+static void read_thread_function(void *arg)
+{
+ struct volume *volume = arg;
+
+ vdo_log_debug("reader starting");
+ mutex_lock(&volume->read_threads_mutex);
+ while (true) {
+ struct queued_read *queue_entry;
+ int result;
+
+ queue_entry = wait_to_reserve_read_queue_entry(volume);
+ if (volume->read_threads_exiting)
+ break;
+
+ result = process_entry(volume, queue_entry);
+ release_queued_requests(volume, queue_entry, result);
+ }
+ mutex_unlock(&volume->read_threads_mutex);
+ vdo_log_debug("reader done");
+}
+
+static void get_page_and_index(struct page_cache *cache, u32 physical_page,
+ int *queue_index, struct cached_page **page_ptr)
+{
+ u16 index_value;
+ u16 index;
+ bool queued;
+
+ /*
+ * ASSERTION: We are either a zone thread holding a search_pending_counter, or we are any
+ * thread holding the read_threads_mutex.
+ *
+ * Holding only a search_pending_counter is the most frequent case.
+ */
+ /*
+ * It would be unlikely for the compiler to turn the usage of index_value into two reads of
+ * cache->index, but it would be possible and very bad if those reads did not return the
+ * same bits.
+ */
+ index_value = READ_ONCE(cache->index[physical_page]);
+ queued = (index_value & VOLUME_CACHE_QUEUED_FLAG) != 0;
+ index = index_value & ~VOLUME_CACHE_QUEUED_FLAG;
+
+ if (!queued && (index < cache->cache_slots)) {
+ *page_ptr = &cache->cache[index];
+ /*
+ * We have acquired access to the cached page, but unless we hold the
+ * read_threads_mutex, we need a read memory barrier now. The corresponding write
+ * memory barrier is in put_page_in_cache().
+ */
+ smp_rmb();
+ } else {
+ *page_ptr = NULL;
+ }
+
+ *queue_index = queued ? index : -1;
+}
+
+static void get_page_from_cache(struct page_cache *cache, u32 physical_page,
+ struct cached_page **page)
+{
+ /*
+ * ASSERTION: We are in a zone thread.
+ * ASSERTION: We holding a search_pending_counter or the read_threads_mutex.
+ */
+ int queue_index = -1;
+
+ get_page_and_index(cache, physical_page, &queue_index, page);
+}
+
+static int read_page_locked(struct volume *volume, u32 physical_page,
+ struct cached_page **page_ptr)
+{
+ int result = UDS_SUCCESS;
+ struct cached_page *page = NULL;
+ u8 *page_data;
+
+ page = select_victim_in_cache(&volume->page_cache);
+ page_data = dm_bufio_read(volume->client, physical_page, &page->buffer);
+ if (IS_ERR(page_data)) {
+ result = -PTR_ERR(page_data);
+ vdo_log_warning_strerror(result,
+ "error reading physical page %u from volume",
+ physical_page);
+ cancel_page_in_cache(&volume->page_cache, physical_page, page);
+ return result;
+ }
+
+ if (!is_record_page(volume->geometry, physical_page)) {
+ result = initialize_index_page(volume, physical_page, page);
+ if (result != UDS_SUCCESS) {
+ if (volume->lookup_mode != LOOKUP_FOR_REBUILD)
+ vdo_log_warning("Corrupt index page %u", physical_page);
+ cancel_page_in_cache(&volume->page_cache, physical_page, page);
+ return result;
+ }
+ }
+
+ result = put_page_in_cache(&volume->page_cache, physical_page, page);
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning("Error putting page %u in cache", physical_page);
+ cancel_page_in_cache(&volume->page_cache, physical_page, page);
+ return result;
+ }
+
+ *page_ptr = page;
+ return UDS_SUCCESS;
+}
+
+/* Retrieve a page from the cache while holding the read threads mutex. */
+static int get_volume_page_locked(struct volume *volume, u32 physical_page,
+ struct cached_page **page_ptr)
+{
+ int result;
+ struct cached_page *page = NULL;
+
+ get_page_from_cache(&volume->page_cache, physical_page, &page);
+ if (page == NULL) {
+ result = read_page_locked(volume, physical_page, &page);
+ if (result != UDS_SUCCESS)
+ return result;
+ } else {
+ make_page_most_recent(&volume->page_cache, page);
+ }
+
+ *page_ptr = page;
+ return UDS_SUCCESS;
+}
+
+/* Retrieve a page from the cache while holding a search_pending lock. */
+static int get_volume_page_protected(struct volume *volume, struct uds_request *request,
+ u32 physical_page, struct cached_page **page_ptr)
+{
+ struct cached_page *page;
+
+ get_page_from_cache(&volume->page_cache, physical_page, &page);
+ if (page != NULL) {
+ if (request->zone_number == 0) {
+ /* Only one zone is allowed to update the LRU. */
+ make_page_most_recent(&volume->page_cache, page);
+ }
+
+ *page_ptr = page;
+ return UDS_SUCCESS;
+ }
+
+ /* Prepare to enqueue a read for the page. */
+ end_pending_search(&volume->page_cache, request->zone_number);
+ mutex_lock(&volume->read_threads_mutex);
+
+ /*
+ * Do the lookup again while holding the read mutex (no longer the fast case so this should
+ * be fine to repeat). We need to do this because a page may have been added to the cache
+ * by a reader thread between the time we searched above and the time we went to actually
+ * try to enqueue it below. This could result in us enqueuing another read for a page which
+ * is already in the cache, which would mean we end up with two entries in the cache for
+ * the same page.
+ */
+ get_page_from_cache(&volume->page_cache, physical_page, &page);
+ if (page == NULL) {
+ enqueue_page_read(volume, request, physical_page);
+ /*
+ * The performance gain from unlocking first, while "search pending" mode is off,
+ * turns out to be significant in some cases. The page is not available yet so
+ * the order does not matter for correctness as it does below.
+ */
+ mutex_unlock(&volume->read_threads_mutex);
+ begin_pending_search(&volume->page_cache, physical_page,
+ request->zone_number);
+ return UDS_QUEUED;
+ }
+
+ /*
+ * Now that the page is loaded, the volume needs to switch to "reader thread unlocked" and
+ * "search pending" state in careful order so no other thread can mess with the data before
+ * the caller gets to look at it.
+ */
+ begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+ mutex_unlock(&volume->read_threads_mutex);
+ *page_ptr = page;
+ return UDS_SUCCESS;
+}
+
+static int get_volume_page(struct volume *volume, u32 chapter, u32 page_number,
+ struct cached_page **page_ptr)
+{
+ int result;
+ u32 physical_page = map_to_physical_page(volume->geometry, chapter, page_number);
+
+ mutex_lock(&volume->read_threads_mutex);
+ result = get_volume_page_locked(volume, physical_page, page_ptr);
+ mutex_unlock(&volume->read_threads_mutex);
+ return result;
+}
+
+int uds_get_volume_record_page(struct volume *volume, u32 chapter, u32 page_number,
+ u8 **data_ptr)
+{
+ int result;
+ struct cached_page *page = NULL;
+
+ result = get_volume_page(volume, chapter, page_number, &page);
+ if (result == UDS_SUCCESS)
+ *data_ptr = dm_bufio_get_block_data(page->buffer);
+ return result;
+}
+
+int uds_get_volume_index_page(struct volume *volume, u32 chapter, u32 page_number,
+ struct delta_index_page **index_page_ptr)
+{
+ int result;
+ struct cached_page *page = NULL;
+
+ result = get_volume_page(volume, chapter, page_number, &page);
+ if (result == UDS_SUCCESS)
+ *index_page_ptr = &page->index_page;
+ return result;
+}
+
+/*
+ * Find the record page associated with a name in a given index page. This will return UDS_QUEUED
+ * if the page in question must be read from storage.
+ */
+static int search_cached_index_page(struct volume *volume, struct uds_request *request,
+ u32 chapter, u32 index_page_number,
+ u16 *record_page_number)
+{
+ int result;
+ struct cached_page *page = NULL;
+ u32 physical_page = map_to_physical_page(volume->geometry, chapter,
+ index_page_number);
+
+ /*
+ * Make sure the invalidate counter is updated before we try and read the mapping. This
+ * prevents this thread from reading a page in the cache which has already been marked for
+ * invalidation by the reader thread, before the reader thread has noticed that the
+ * invalidate_counter has been incremented.
+ */
+ begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+
+ result = get_volume_page_protected(volume, request, physical_page, &page);
+ if (result != UDS_SUCCESS) {
+ end_pending_search(&volume->page_cache, request->zone_number);
+ return result;
+ }
+
+ result = uds_search_chapter_index_page(&page->index_page, volume->geometry,
+ &request->record_name,
+ record_page_number);
+ end_pending_search(&volume->page_cache, request->zone_number);
+ return result;
+}
+
+/*
+ * Find the metadata associated with a name in a given record page. This will return UDS_QUEUED if
+ * the page in question must be read from storage.
+ */
+int uds_search_cached_record_page(struct volume *volume, struct uds_request *request,
+ u32 chapter, u16 record_page_number, bool *found)
+{
+ struct cached_page *record_page;
+ struct index_geometry *geometry = volume->geometry;
+ int result;
+ u32 physical_page, page_number;
+
+ *found = false;
+ if (record_page_number == NO_CHAPTER_INDEX_ENTRY)
+ return UDS_SUCCESS;
+
+ result = VDO_ASSERT(record_page_number < geometry->record_pages_per_chapter,
+ "0 <= %d < %u", record_page_number,
+ geometry->record_pages_per_chapter);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ page_number = geometry->index_pages_per_chapter + record_page_number;
+
+ physical_page = map_to_physical_page(volume->geometry, chapter, page_number);
+
+ /*
+ * Make sure the invalidate counter is updated before we try and read the mapping. This
+ * prevents this thread from reading a page in the cache which has already been marked for
+ * invalidation by the reader thread, before the reader thread has noticed that the
+ * invalidate_counter has been incremented.
+ */
+ begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+
+ result = get_volume_page_protected(volume, request, physical_page, &record_page);
+ if (result != UDS_SUCCESS) {
+ end_pending_search(&volume->page_cache, request->zone_number);
+ return result;
+ }
+
+ if (search_record_page(dm_bufio_get_block_data(record_page->buffer),
+ &request->record_name, geometry, &request->old_metadata))
+ *found = true;
+
+ end_pending_search(&volume->page_cache, request->zone_number);
+ return UDS_SUCCESS;
+}
+
+void uds_prefetch_volume_chapter(const struct volume *volume, u32 chapter)
+{
+ const struct index_geometry *geometry = volume->geometry;
+ u32 physical_page = map_to_physical_page(geometry, chapter, 0);
+
+ dm_bufio_prefetch(volume->client, physical_page, geometry->pages_per_chapter);
+}
+
+int uds_read_chapter_index_from_volume(const struct volume *volume, u64 virtual_chapter,
+ struct dm_buffer *volume_buffers[],
+ struct delta_index_page index_pages[])
+{
+ int result;
+ u32 i;
+ const struct index_geometry *geometry = volume->geometry;
+ u32 physical_chapter = uds_map_to_physical_chapter(geometry, virtual_chapter);
+ u32 physical_page = map_to_physical_page(geometry, physical_chapter, 0);
+
+ dm_bufio_prefetch(volume->client, physical_page, geometry->index_pages_per_chapter);
+ for (i = 0; i < geometry->index_pages_per_chapter; i++) {
+ u8 *index_page;
+
+ index_page = dm_bufio_read(volume->client, physical_page + i,
+ &volume_buffers[i]);
+ if (IS_ERR(index_page)) {
+ result = -PTR_ERR(index_page);
+ vdo_log_warning_strerror(result,
+ "error reading physical page %u",
+ physical_page);
+ return result;
+ }
+
+ result = init_chapter_index_page(volume, index_page, physical_chapter, i,
+ &index_pages[i]);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ return UDS_SUCCESS;
+}
+
+int uds_search_volume_page_cache(struct volume *volume, struct uds_request *request,
+ bool *found)
+{
+ int result;
+ u32 physical_chapter =
+ uds_map_to_physical_chapter(volume->geometry, request->virtual_chapter);
+ u32 index_page_number;
+ u16 record_page_number;
+
+ index_page_number = uds_find_index_page_number(volume->index_page_map,
+ &request->record_name,
+ physical_chapter);
+
+ if (request->location == UDS_LOCATION_INDEX_PAGE_LOOKUP) {
+ record_page_number = *((u16 *) &request->old_metadata);
+ } else {
+ result = search_cached_index_page(volume, request, physical_chapter,
+ index_page_number,
+ &record_page_number);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ return uds_search_cached_record_page(volume, request, physical_chapter,
+ record_page_number, found);
+}
+
+int uds_search_volume_page_cache_for_rebuild(struct volume *volume,
+ const struct uds_record_name *name,
+ u64 virtual_chapter, bool *found)
+{
+ int result;
+ struct index_geometry *geometry = volume->geometry;
+ struct cached_page *page;
+ u32 physical_chapter = uds_map_to_physical_chapter(geometry, virtual_chapter);
+ u32 index_page_number;
+ u16 record_page_number;
+ u32 page_number;
+
+ *found = false;
+ index_page_number =
+ uds_find_index_page_number(volume->index_page_map, name,
+ physical_chapter);
+ result = get_volume_page(volume, physical_chapter, index_page_number, &page);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_search_chapter_index_page(&page->index_page, geometry, name,
+ &record_page_number);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (record_page_number == NO_CHAPTER_INDEX_ENTRY)
+ return UDS_SUCCESS;
+
+ page_number = geometry->index_pages_per_chapter + record_page_number;
+ result = get_volume_page(volume, physical_chapter, page_number, &page);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ *found = search_record_page(dm_bufio_get_block_data(page->buffer), name,
+ geometry, NULL);
+ return UDS_SUCCESS;
+}
+
+static void invalidate_page(struct page_cache *cache, u32 physical_page)
+{
+ struct cached_page *page;
+ int queue_index = -1;
+
+ /* We hold the read_threads_mutex. */
+ get_page_and_index(cache, physical_page, &queue_index, &page);
+ if (page != NULL) {
+ WRITE_ONCE(cache->index[page->physical_page], cache->cache_slots);
+ wait_for_pending_searches(cache, page->physical_page);
+ clear_cache_page(cache, page);
+ } else if (queue_index > -1) {
+ vdo_log_debug("setting pending read to invalid");
+ cache->read_queue[queue_index].invalid = true;
+ }
+}
+
+void uds_forget_chapter(struct volume *volume, u64 virtual_chapter)
+{
+ u32 physical_chapter =
+ uds_map_to_physical_chapter(volume->geometry, virtual_chapter);
+ u32 first_page = map_to_physical_page(volume->geometry, physical_chapter, 0);
+ u32 i;
+
+ vdo_log_debug("forgetting chapter %llu", (unsigned long long) virtual_chapter);
+ mutex_lock(&volume->read_threads_mutex);
+ for (i = 0; i < volume->geometry->pages_per_chapter; i++)
+ invalidate_page(&volume->page_cache, first_page + i);
+ mutex_unlock(&volume->read_threads_mutex);
+}
+
+/*
+ * Donate an index pages from a newly written chapter to the page cache since it is likely to be
+ * used again soon. The caller must already hold the reader thread mutex.
+ */
+static int donate_index_page_locked(struct volume *volume, u32 physical_chapter,
+ u32 index_page_number, struct dm_buffer *page_buffer)
+{
+ int result;
+ struct cached_page *page = NULL;
+ u32 physical_page =
+ map_to_physical_page(volume->geometry, physical_chapter,
+ index_page_number);
+
+ page = select_victim_in_cache(&volume->page_cache);
+ page->buffer = page_buffer;
+ result = init_chapter_index_page(volume, dm_bufio_get_block_data(page_buffer),
+ physical_chapter, index_page_number,
+ &page->index_page);
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning("Error initialize chapter index page");
+ cancel_page_in_cache(&volume->page_cache, physical_page, page);
+ return result;
+ }
+
+ result = put_page_in_cache(&volume->page_cache, physical_page, page);
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning("Error putting page %u in cache", physical_page);
+ cancel_page_in_cache(&volume->page_cache, physical_page, page);
+ return result;
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int write_index_pages(struct volume *volume, u32 physical_chapter_number,
+ struct open_chapter_index *chapter_index)
+{
+ struct index_geometry *geometry = volume->geometry;
+ struct dm_buffer *page_buffer;
+ u32 first_index_page = map_to_physical_page(geometry, physical_chapter_number, 0);
+ u32 delta_list_number = 0;
+ u32 index_page_number;
+
+ for (index_page_number = 0;
+ index_page_number < geometry->index_pages_per_chapter;
+ index_page_number++) {
+ u8 *page_data;
+ u32 physical_page = first_index_page + index_page_number;
+ u32 lists_packed;
+ bool last_page;
+ int result;
+
+ page_data = dm_bufio_new(volume->client, physical_page, &page_buffer);
+ if (IS_ERR(page_data)) {
+ return vdo_log_warning_strerror(-PTR_ERR(page_data),
+ "failed to prepare index page");
+ }
+
+ last_page = ((index_page_number + 1) == geometry->index_pages_per_chapter);
+ result = uds_pack_open_chapter_index_page(chapter_index, page_data,
+ delta_list_number, last_page,
+ &lists_packed);
+ if (result != UDS_SUCCESS) {
+ dm_bufio_release(page_buffer);
+ return vdo_log_warning_strerror(result,
+ "failed to pack index page");
+ }
+
+ dm_bufio_mark_buffer_dirty(page_buffer);
+
+ if (lists_packed == 0) {
+ vdo_log_debug("no delta lists packed on chapter %u page %u",
+ physical_chapter_number, index_page_number);
+ } else {
+ delta_list_number += lists_packed;
+ }
+
+ uds_update_index_page_map(volume->index_page_map,
+ chapter_index->virtual_chapter_number,
+ physical_chapter_number, index_page_number,
+ delta_list_number - 1);
+
+ mutex_lock(&volume->read_threads_mutex);
+ result = donate_index_page_locked(volume, physical_chapter_number,
+ index_page_number, page_buffer);
+ mutex_unlock(&volume->read_threads_mutex);
+ if (result != UDS_SUCCESS) {
+ dm_bufio_release(page_buffer);
+ return result;
+ }
+ }
+
+ return UDS_SUCCESS;
+}
+
+static u32 encode_tree(u8 record_page[],
+ const struct uds_volume_record *sorted_pointers[],
+ u32 next_record, u32 node, u32 node_count)
+{
+ if (node < node_count) {
+ u32 child = (2 * node) + 1;
+
+ next_record = encode_tree(record_page, sorted_pointers, next_record,
+ child, node_count);
+
+ /*
+ * In-order traversal: copy the contents of the next record into the page at the
+ * node offset.
+ */
+ memcpy(&record_page[node * BYTES_PER_RECORD],
+ sorted_pointers[next_record++], BYTES_PER_RECORD);
+
+ next_record = encode_tree(record_page, sorted_pointers, next_record,
+ child + 1, node_count);
+ }
+
+ return next_record;
+}
+
+static int encode_record_page(const struct volume *volume,
+ const struct uds_volume_record records[], u8 record_page[])
+{
+ int result;
+ u32 i;
+ u32 records_per_page = volume->geometry->records_per_page;
+ const struct uds_volume_record **record_pointers = volume->record_pointers;
+
+ for (i = 0; i < records_per_page; i++)
+ record_pointers[i] = &records[i];
+
+ /*
+ * Sort the record pointers by using just the names in the records, which is less work than
+ * sorting the entire record values.
+ */
+ BUILD_BUG_ON(offsetof(struct uds_volume_record, name) != 0);
+ result = uds_radix_sort(volume->radix_sorter, (const u8 **) record_pointers,
+ records_per_page, UDS_RECORD_NAME_SIZE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ encode_tree(record_page, record_pointers, 0, 0, records_per_page);
+ return UDS_SUCCESS;
+}
+
+static int write_record_pages(struct volume *volume, u32 physical_chapter_number,
+ const struct uds_volume_record *records)
+{
+ u32 record_page_number;
+ struct index_geometry *geometry = volume->geometry;
+ struct dm_buffer *page_buffer;
+ const struct uds_volume_record *next_record = records;
+ u32 first_record_page = map_to_physical_page(geometry, physical_chapter_number,
+ geometry->index_pages_per_chapter);
+
+ for (record_page_number = 0;
+ record_page_number < geometry->record_pages_per_chapter;
+ record_page_number++) {
+ u8 *page_data;
+ u32 physical_page = first_record_page + record_page_number;
+ int result;
+
+ page_data = dm_bufio_new(volume->client, physical_page, &page_buffer);
+ if (IS_ERR(page_data)) {
+ return vdo_log_warning_strerror(-PTR_ERR(page_data),
+ "failed to prepare record page");
+ }
+
+ result = encode_record_page(volume, next_record, page_data);
+ if (result != UDS_SUCCESS) {
+ dm_bufio_release(page_buffer);
+ return vdo_log_warning_strerror(result,
+ "failed to encode record page %u",
+ record_page_number);
+ }
+
+ next_record += geometry->records_per_page;
+ dm_bufio_mark_buffer_dirty(page_buffer);
+ dm_bufio_release(page_buffer);
+ }
+
+ return UDS_SUCCESS;
+}
+
+int uds_write_chapter(struct volume *volume, struct open_chapter_index *chapter_index,
+ const struct uds_volume_record *records)
+{
+ int result;
+ u32 physical_chapter_number =
+ uds_map_to_physical_chapter(volume->geometry,
+ chapter_index->virtual_chapter_number);
+
+ result = write_index_pages(volume, physical_chapter_number, chapter_index);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = write_record_pages(volume, physical_chapter_number, records);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = -dm_bufio_write_dirty_buffers(volume->client);
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "cannot sync chapter to volume");
+
+ return result;
+}
+
+static void probe_chapter(struct volume *volume, u32 chapter_number,
+ u64 *virtual_chapter_number)
+{
+ const struct index_geometry *geometry = volume->geometry;
+ u32 expected_list_number = 0;
+ u32 i;
+ u64 vcn = BAD_CHAPTER;
+
+ *virtual_chapter_number = BAD_CHAPTER;
+ dm_bufio_prefetch(volume->client,
+ map_to_physical_page(geometry, chapter_number, 0),
+ geometry->index_pages_per_chapter);
+
+ for (i = 0; i < geometry->index_pages_per_chapter; i++) {
+ struct delta_index_page *page;
+ int result;
+
+ result = uds_get_volume_index_page(volume, chapter_number, i, &page);
+ if (result != UDS_SUCCESS)
+ return;
+
+ if (page->virtual_chapter_number == BAD_CHAPTER) {
+ vdo_log_error("corrupt index page in chapter %u",
+ chapter_number);
+ return;
+ }
+
+ if (vcn == BAD_CHAPTER) {
+ vcn = page->virtual_chapter_number;
+ } else if (page->virtual_chapter_number != vcn) {
+ vdo_log_error("inconsistent chapter %u index page %u: expected vcn %llu, got vcn %llu",
+ chapter_number, i, (unsigned long long) vcn,
+ (unsigned long long) page->virtual_chapter_number);
+ return;
+ }
+
+ if (expected_list_number != page->lowest_list_number) {
+ vdo_log_error("inconsistent chapter %u index page %u: expected list number %u, got list number %u",
+ chapter_number, i, expected_list_number,
+ page->lowest_list_number);
+ return;
+ }
+ expected_list_number = page->highest_list_number + 1;
+
+ result = uds_validate_chapter_index_page(page, geometry);
+ if (result != UDS_SUCCESS)
+ return;
+ }
+
+ if (chapter_number != uds_map_to_physical_chapter(geometry, vcn)) {
+ vdo_log_error("chapter %u vcn %llu is out of phase (%u)", chapter_number,
+ (unsigned long long) vcn, geometry->chapters_per_volume);
+ return;
+ }
+
+ *virtual_chapter_number = vcn;
+}
+
+/* Find the last valid physical chapter in the volume. */
+static void find_real_end_of_volume(struct volume *volume, u32 limit, u32 *limit_ptr)
+{
+ u32 span = 1;
+ u32 tries = 0;
+
+ while (limit > 0) {
+ u32 chapter = (span > limit) ? 0 : limit - span;
+ u64 vcn = 0;
+
+ probe_chapter(volume, chapter, &vcn);
+ if (vcn == BAD_CHAPTER) {
+ limit = chapter;
+ if (++tries > 1)
+ span *= 2;
+ } else {
+ if (span == 1)
+ break;
+ span /= 2;
+ tries = 0;
+ }
+ }
+
+ *limit_ptr = limit;
+}
+
+static int find_chapter_limits(struct volume *volume, u32 chapter_limit, u64 *lowest_vcn,
+ u64 *highest_vcn)
+{
+ struct index_geometry *geometry = volume->geometry;
+ u64 zero_vcn;
+ u64 lowest = BAD_CHAPTER;
+ u64 highest = BAD_CHAPTER;
+ u64 moved_chapter = BAD_CHAPTER;
+ u32 left_chapter = 0;
+ u32 right_chapter = 0;
+ u32 bad_chapters = 0;
+
+ /*
+ * This method assumes there is at most one run of contiguous bad chapters caused by
+ * unflushed writes. Either the bad spot is at the beginning and end, or somewhere in the
+ * middle. Wherever it is, the highest and lowest VCNs are adjacent to it. Otherwise the
+ * volume is cleanly saved and somewhere in the middle of it the highest VCN immediately
+ * precedes the lowest one.
+ */
+
+ /* It doesn't matter if this results in a bad spot (BAD_CHAPTER). */
+ probe_chapter(volume, 0, &zero_vcn);
+
+ /*
+ * Binary search for end of the discontinuity in the monotonically increasing virtual
+ * chapter numbers; bad spots are treated as a span of BAD_CHAPTER values. In effect we're
+ * searching for the index of the smallest value less than zero_vcn. In the case we go off
+ * the end it means that chapter 0 has the lowest vcn.
+ *
+ * If a virtual chapter is out-of-order, it will be the one moved by conversion. Always
+ * skip over the moved chapter when searching, adding it to the range at the end if
+ * necessary.
+ */
+ if (geometry->remapped_physical > 0) {
+ u64 remapped_vcn;
+
+ probe_chapter(volume, geometry->remapped_physical, &remapped_vcn);
+ if (remapped_vcn == geometry->remapped_virtual)
+ moved_chapter = geometry->remapped_physical;
+ }
+
+ left_chapter = 0;
+ right_chapter = chapter_limit;
+
+ while (left_chapter < right_chapter) {
+ u64 probe_vcn;
+ u32 chapter = (left_chapter + right_chapter) / 2;
+
+ if (chapter == moved_chapter)
+ chapter--;
+
+ probe_chapter(volume, chapter, &probe_vcn);
+ if (zero_vcn <= probe_vcn) {
+ left_chapter = chapter + 1;
+ if (left_chapter == moved_chapter)
+ left_chapter++;
+ } else {
+ right_chapter = chapter;
+ }
+ }
+
+ /* If left_chapter goes off the end, chapter 0 has the lowest virtual chapter number.*/
+ if (left_chapter >= chapter_limit)
+ left_chapter = 0;
+
+ /* At this point, left_chapter is the chapter with the lowest virtual chapter number. */
+ probe_chapter(volume, left_chapter, &lowest);
+
+ /* The moved chapter might be the lowest in the range. */
+ if ((moved_chapter != BAD_CHAPTER) && (lowest == geometry->remapped_virtual + 1))
+ lowest = geometry->remapped_virtual;
+
+ /*
+ * Circularly scan backwards, moving over any bad chapters until encountering a good one,
+ * which is the chapter with the highest vcn.
+ */
+ while (highest == BAD_CHAPTER) {
+ right_chapter = (right_chapter + chapter_limit - 1) % chapter_limit;
+ if (right_chapter == moved_chapter)
+ continue;
+
+ probe_chapter(volume, right_chapter, &highest);
+ if (bad_chapters++ >= MAX_BAD_CHAPTERS) {
+ vdo_log_error("too many bad chapters in volume: %u",
+ bad_chapters);
+ return UDS_CORRUPT_DATA;
+ }
+ }
+
+ *lowest_vcn = lowest;
+ *highest_vcn = highest;
+ return UDS_SUCCESS;
+}
+
+/*
+ * Find the highest and lowest contiguous chapters present in the volume and determine their
+ * virtual chapter numbers. This is used by rebuild.
+ */
+int uds_find_volume_chapter_boundaries(struct volume *volume, u64 *lowest_vcn,
+ u64 *highest_vcn, bool *is_empty)
+{
+ u32 chapter_limit = volume->geometry->chapters_per_volume;
+
+ find_real_end_of_volume(volume, chapter_limit, &chapter_limit);
+ if (chapter_limit == 0) {
+ *lowest_vcn = 0;
+ *highest_vcn = 0;
+ *is_empty = true;
+ return UDS_SUCCESS;
+ }
+
+ *is_empty = false;
+ return find_chapter_limits(volume, chapter_limit, lowest_vcn, highest_vcn);
+}
+
+int __must_check uds_replace_volume_storage(struct volume *volume,
+ struct index_layout *layout,
+ struct block_device *bdev)
+{
+ int result;
+ u32 i;
+
+ result = uds_replace_index_layout_storage(layout, bdev);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ /* Release all outstanding dm_bufio objects */
+ for (i = 0; i < volume->page_cache.indexable_pages; i++)
+ volume->page_cache.index[i] = volume->page_cache.cache_slots;
+ for (i = 0; i < volume->page_cache.cache_slots; i++)
+ clear_cache_page(&volume->page_cache, &volume->page_cache.cache[i]);
+ if (volume->sparse_cache != NULL)
+ uds_invalidate_sparse_cache(volume->sparse_cache);
+ if (volume->client != NULL)
+ dm_bufio_client_destroy(vdo_forget(volume->client));
+
+ return uds_open_volume_bufio(layout, volume->geometry->bytes_per_page,
+ volume->reserved_buffers, &volume->client);
+}
+
+static int __must_check initialize_page_cache(struct page_cache *cache,
+ const struct index_geometry *geometry,
+ u32 chapters_in_cache,
+ unsigned int zone_count)
+{
+ int result;
+ u32 i;
+
+ cache->indexable_pages = geometry->pages_per_volume + 1;
+ cache->cache_slots = chapters_in_cache * geometry->record_pages_per_chapter;
+ cache->zone_count = zone_count;
+ atomic64_set(&cache->clock, 1);
+
+ result = VDO_ASSERT((cache->cache_slots <= VOLUME_CACHE_MAX_ENTRIES),
+ "requested cache size, %u, within limit %u",
+ cache->cache_slots, VOLUME_CACHE_MAX_ENTRIES);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(VOLUME_CACHE_MAX_QUEUED_READS, struct queued_read,
+ "volume read queue", &cache->read_queue);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(cache->zone_count, struct search_pending_counter,
+ "Volume Cache Zones", &cache->search_pending_counters);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(cache->indexable_pages, u16, "page cache index",
+ &cache->index);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(cache->cache_slots, struct cached_page, "page cache cache",
+ &cache->cache);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* Initialize index values to invalid values. */
+ for (i = 0; i < cache->indexable_pages; i++)
+ cache->index[i] = cache->cache_slots;
+
+ for (i = 0; i < cache->cache_slots; i++)
+ clear_cache_page(cache, &cache->cache[i]);
+
+ return UDS_SUCCESS;
+}
+
+int uds_make_volume(const struct uds_configuration *config, struct index_layout *layout,
+ struct volume **new_volume)
+{
+ unsigned int i;
+ struct volume *volume = NULL;
+ struct index_geometry *geometry;
+ unsigned int reserved_buffers;
+ int result;
+
+ result = vdo_allocate(1, struct volume, "volume", &volume);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ volume->nonce = uds_get_volume_nonce(layout);
+
+ result = uds_copy_index_geometry(config->geometry, &volume->geometry);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume(volume);
+ return vdo_log_warning_strerror(result,
+ "failed to allocate geometry: error");
+ }
+ geometry = volume->geometry;
+
+ /*
+ * Reserve a buffer for each entry in the page cache, one for the chapter writer, and one
+ * for each entry in the sparse cache.
+ */
+ reserved_buffers = config->cache_chapters * geometry->record_pages_per_chapter;
+ reserved_buffers += 1;
+ if (uds_is_sparse_index_geometry(geometry))
+ reserved_buffers += (config->cache_chapters * geometry->index_pages_per_chapter);
+ volume->reserved_buffers = reserved_buffers;
+ result = uds_open_volume_bufio(layout, geometry->bytes_per_page,
+ volume->reserved_buffers, &volume->client);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ result = uds_make_radix_sorter(geometry->records_per_page,
+ &volume->radix_sorter);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ result = vdo_allocate(geometry->records_per_page,
+ const struct uds_volume_record *, "record pointers",
+ &volume->record_pointers);
+ if (result != VDO_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ if (uds_is_sparse_index_geometry(geometry)) {
+ size_t page_size = sizeof(struct delta_index_page) + geometry->bytes_per_page;
+
+ result = uds_make_sparse_cache(geometry, config->cache_chapters,
+ config->zone_count,
+ &volume->sparse_cache);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ volume->cache_size =
+ page_size * geometry->index_pages_per_chapter * config->cache_chapters;
+ }
+
+ result = initialize_page_cache(&volume->page_cache, geometry,
+ config->cache_chapters, config->zone_count);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ volume->cache_size += volume->page_cache.cache_slots * sizeof(struct delta_index_page);
+ result = uds_make_index_page_map(geometry, &volume->index_page_map);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ mutex_init(&volume->read_threads_mutex);
+ uds_init_cond(&volume->read_threads_read_done_cond);
+ uds_init_cond(&volume->read_threads_cond);
+
+ result = vdo_allocate(config->read_threads, struct thread *, "reader threads",
+ &volume->reader_threads);
+ if (result != VDO_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ for (i = 0; i < config->read_threads; i++) {
+ result = vdo_create_thread(read_thread_function, (void *) volume,
+ "reader", &volume->reader_threads[i]);
+ if (result != VDO_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ volume->read_thread_count = i + 1;
+ }
+
+ *new_volume = volume;
+ return UDS_SUCCESS;
+}
+
+static void uninitialize_page_cache(struct page_cache *cache)
+{
+ u16 i;
+
+ if (cache->cache != NULL) {
+ for (i = 0; i < cache->cache_slots; i++)
+ release_page_buffer(&cache->cache[i]);
+ }
+ vdo_free(cache->index);
+ vdo_free(cache->cache);
+ vdo_free(cache->search_pending_counters);
+ vdo_free(cache->read_queue);
+}
+
+void uds_free_volume(struct volume *volume)
+{
+ if (volume == NULL)
+ return;
+
+ if (volume->reader_threads != NULL) {
+ unsigned int i;
+
+ /* This works even if some threads weren't started. */
+ mutex_lock(&volume->read_threads_mutex);
+ volume->read_threads_exiting = true;
+ uds_broadcast_cond(&volume->read_threads_cond);
+ mutex_unlock(&volume->read_threads_mutex);
+ for (i = 0; i < volume->read_thread_count; i++)
+ vdo_join_threads(volume->reader_threads[i]);
+ vdo_free(volume->reader_threads);
+ volume->reader_threads = NULL;
+ }
+
+ /* Must destroy the client AFTER freeing the cached pages. */
+ uninitialize_page_cache(&volume->page_cache);
+ uds_free_sparse_cache(volume->sparse_cache);
+ if (volume->client != NULL)
+ dm_bufio_client_destroy(vdo_forget(volume->client));
+
+ uds_free_index_page_map(volume->index_page_map);
+ uds_free_radix_sorter(volume->radix_sorter);
+ vdo_free(volume->geometry);
+ vdo_free(volume->record_pointers);
+ vdo_free(volume);
+}
diff --git a/drivers/md/dm-vdo/indexer/volume.h b/drivers/md/dm-vdo/indexer/volume.h
new file mode 100644
index 000000000000..8679a5e55347
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/volume.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_VOLUME_H
+#define UDS_VOLUME_H
+
+#include <linux/atomic.h>
+#include <linux/cache.h>
+#include <linux/dm-bufio.h>
+#include <linux/limits.h>
+
+#include "permassert.h"
+#include "thread-utils.h"
+
+#include "chapter-index.h"
+#include "config.h"
+#include "geometry.h"
+#include "indexer.h"
+#include "index-layout.h"
+#include "index-page-map.h"
+#include "radix-sort.h"
+#include "sparse-cache.h"
+
+/*
+ * The volume manages deduplication records on permanent storage. The term "volume" can also refer
+ * to the region of permanent storage where the records (and the chapters containing them) are
+ * stored. The volume handles all I/O to this region by reading, caching, and writing chapter pages
+ * as necessary.
+ */
+
+enum index_lookup_mode {
+ /* Always do lookups in all chapters normally */
+ LOOKUP_NORMAL,
+ /* Only do a subset of lookups needed when rebuilding an index */
+ LOOKUP_FOR_REBUILD,
+};
+
+struct queued_read {
+ bool invalid;
+ bool reserved;
+ u32 physical_page;
+ struct uds_request *first_request;
+ struct uds_request *last_request;
+};
+
+struct __aligned(L1_CACHE_BYTES) search_pending_counter {
+ u64 atomic_value;
+};
+
+struct cached_page {
+ /* Whether this page is currently being read asynchronously */
+ bool read_pending;
+ /* The physical page stored in this cache entry */
+ u32 physical_page;
+ /* The value of the volume clock when this page was last used */
+ s64 last_used;
+ /* The cached page buffer */
+ struct dm_buffer *buffer;
+ /* The chapter index page, meaningless for record pages */
+ struct delta_index_page index_page;
+};
+
+struct page_cache {
+ /* The number of zones */
+ unsigned int zone_count;
+ /* The number of volume pages that can be cached */
+ u32 indexable_pages;
+ /* The maximum number of simultaneously cached pages */
+ u16 cache_slots;
+ /* An index for each physical page noting where it is in the cache */
+ u16 *index;
+ /* The array of cached pages */
+ struct cached_page *cache;
+ /* A counter for each zone tracking if a search is occurring there */
+ struct search_pending_counter *search_pending_counters;
+ /* The read queue entries as a circular array */
+ struct queued_read *read_queue;
+
+ /* All entries above this point are constant after initialization. */
+
+ /*
+ * These values are all indexes into the array of read queue entries. New entries in the
+ * read queue are enqueued at read_queue_last. To dequeue entries, a reader thread gets the
+ * lock and then claims the entry pointed to by read_queue_next_read and increments that
+ * value. After the read is completed, the reader thread calls release_read_queue_entry(),
+ * which increments read_queue_first until it points to a pending read, or is equal to
+ * read_queue_next_read. This means that if multiple reads are outstanding,
+ * read_queue_first might not advance until the last of the reads finishes.
+ */
+ u16 read_queue_first;
+ u16 read_queue_next_read;
+ u16 read_queue_last;
+
+ atomic64_t clock;
+};
+
+struct volume {
+ struct index_geometry *geometry;
+ struct dm_bufio_client *client;
+ u64 nonce;
+ size_t cache_size;
+
+ /* A single page worth of records, for sorting */
+ const struct uds_volume_record **record_pointers;
+ /* Sorter for sorting records within each page */
+ struct radix_sorter *radix_sorter;
+
+ struct sparse_cache *sparse_cache;
+ struct page_cache page_cache;
+ struct index_page_map *index_page_map;
+
+ struct mutex read_threads_mutex;
+ struct cond_var read_threads_cond;
+ struct cond_var read_threads_read_done_cond;
+ struct thread **reader_threads;
+ unsigned int read_thread_count;
+ bool read_threads_exiting;
+
+ enum index_lookup_mode lookup_mode;
+ unsigned int reserved_buffers;
+};
+
+int __must_check uds_make_volume(const struct uds_configuration *config,
+ struct index_layout *layout,
+ struct volume **new_volume);
+
+void uds_free_volume(struct volume *volume);
+
+int __must_check uds_replace_volume_storage(struct volume *volume,
+ struct index_layout *layout,
+ struct block_device *bdev);
+
+int __must_check uds_find_volume_chapter_boundaries(struct volume *volume,
+ u64 *lowest_vcn, u64 *highest_vcn,
+ bool *is_empty);
+
+int __must_check uds_search_volume_page_cache(struct volume *volume,
+ struct uds_request *request,
+ bool *found);
+
+int __must_check uds_search_volume_page_cache_for_rebuild(struct volume *volume,
+ const struct uds_record_name *name,
+ u64 virtual_chapter,
+ bool *found);
+
+int __must_check uds_search_cached_record_page(struct volume *volume,
+ struct uds_request *request, u32 chapter,
+ u16 record_page_number, bool *found);
+
+void uds_forget_chapter(struct volume *volume, u64 chapter);
+
+int __must_check uds_write_chapter(struct volume *volume,
+ struct open_chapter_index *chapter_index,
+ const struct uds_volume_record records[]);
+
+void uds_prefetch_volume_chapter(const struct volume *volume, u32 chapter);
+
+int __must_check uds_read_chapter_index_from_volume(const struct volume *volume,
+ u64 virtual_chapter,
+ struct dm_buffer *volume_buffers[],
+ struct delta_index_page index_pages[]);
+
+int __must_check uds_get_volume_record_page(struct volume *volume, u32 chapter,
+ u32 page_number, u8 **data_ptr);
+
+int __must_check uds_get_volume_index_page(struct volume *volume, u32 chapter,
+ u32 page_number,
+ struct delta_index_page **page_ptr);
+
+#endif /* UDS_VOLUME_H */
diff --git a/drivers/md/dm-vdo/int-map.c b/drivers/md/dm-vdo/int-map.c
new file mode 100644
index 000000000000..3aa438f84ea1
--- /dev/null
+++ b/drivers/md/dm-vdo/int-map.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+/**
+ * DOC:
+ *
+ * Hash table implementation of a map from integers to pointers, implemented using the Hopscotch
+ * Hashing algorithm by Herlihy, Shavit, and Tzafrir (see
+ * http://en.wikipedia.org/wiki/Hopscotch_hashing). This implementation does not contain any of the
+ * locking/concurrency features of the algorithm, just the collision resolution scheme.
+ *
+ * Hopscotch Hashing is based on hashing with open addressing and linear probing. All the entries
+ * are stored in a fixed array of buckets, with no dynamic allocation for collisions. Unlike linear
+ * probing, all the entries that hash to a given bucket are stored within a fixed neighborhood
+ * starting at that bucket. Chaining is effectively represented as a bit vector relative to each
+ * bucket instead of as pointers or explicit offsets.
+ *
+ * When an empty bucket cannot be found within a given neighborhood, subsequent neighborhoods are
+ * searched, and one or more entries will "hop" into those neighborhoods. When this process works,
+ * an empty bucket will move into the desired neighborhood, allowing the entry to be added. When
+ * that process fails (typically when the buckets are around 90% full), the table must be resized
+ * and the all entries rehashed and added to the expanded table.
+ *
+ * Unlike linear probing, the number of buckets that must be searched in the worst case has a fixed
+ * upper bound (the size of the neighborhood). Those entries occupy a small number of memory cache
+ * lines, leading to improved use of the cache (fewer misses on both successful and unsuccessful
+ * searches). Hopscotch hashing outperforms linear probing at much higher load factors, so even
+ * with the increased memory burden for maintaining the hop vectors, less memory is needed to
+ * achieve that performance. Hopscotch is also immune to "contamination" from deleting entries
+ * since entries are genuinely removed instead of being replaced by a placeholder.
+ *
+ * The published description of the algorithm used a bit vector, but the paper alludes to an offset
+ * scheme which is used by this implementation. Since the entries in the neighborhood are within N
+ * entries of the hash bucket at the start of the neighborhood, a pair of small offset fields each
+ * log2(N) bits wide is all that's needed to maintain the hops as a linked list. In order to encode
+ * "no next hop" (i.e. NULL) as the natural initial value of zero, the offsets are biased by one
+ * (i.e. 0 => NULL, 1 => offset=0, 2 => offset=1, etc.) We can represent neighborhoods of up to 255
+ * entries with just 8+8=16 bits per entry. The hop list is sorted by hop offset so the first entry
+ * in the list is always the bucket closest to the start of the neighborhood.
+ *
+ * While individual accesses tend to be very fast, the table resize operations are very, very
+ * expensive. If an upper bound on the latency of adding an entry to the table is needed, we either
+ * need to ensure the table is pre-sized to be large enough so no resize is ever needed, or we'll
+ * need to develop an approach to incrementally resize the table.
+ */
+
+#include "int-map.h"
+
+#include <linux/minmax.h>
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+
+#define DEFAULT_CAPACITY 16 /* the number of neighborhoods in a new table */
+#define NEIGHBORHOOD 255 /* the number of buckets in each neighborhood */
+#define MAX_PROBES 1024 /* limit on the number of probes for a free bucket */
+#define NULL_HOP_OFFSET 0 /* the hop offset value terminating the hop list */
+#define DEFAULT_LOAD 75 /* a compromise between memory use and performance */
+
+/**
+ * struct bucket - hash bucket
+ *
+ * Buckets are packed together to reduce memory usage and improve cache efficiency. It would be
+ * tempting to encode the hop offsets separately and maintain alignment of key/value pairs, but
+ * it's crucial to keep the hop fields near the buckets that they use them so they'll tend to share
+ * cache lines.
+ */
+struct __packed bucket {
+ /**
+ * @first_hop: The biased offset of the first entry in the hop list of the neighborhood
+ * that hashes to this bucket.
+ */
+ u8 first_hop;
+ /** @next_hop: The biased offset of the next bucket in the hop list. */
+ u8 next_hop;
+ /** @key: The key stored in this bucket. */
+ u64 key;
+ /** @value: The value stored in this bucket (NULL if empty). */
+ void *value;
+};
+
+/**
+ * struct int_map - The concrete definition of the opaque int_map type.
+ *
+ * To avoid having to wrap the neighborhoods of the last entries back around to the start of the
+ * bucket array, we allocate a few more buckets at the end of the array instead, which is why
+ * capacity and bucket_count are different.
+ */
+struct int_map {
+ /** @size: The number of entries stored in the map. */
+ size_t size;
+ /** @capacity: The number of neighborhoods in the map. */
+ size_t capacity;
+ /* @bucket_count: The number of buckets in the bucket array. */
+ size_t bucket_count;
+ /** @buckets: The array of hash buckets. */
+ struct bucket *buckets;
+};
+
+/**
+ * mix() - The Google CityHash 16-byte hash mixing function.
+ * @input1: The first input value.
+ * @input2: The second input value.
+ *
+ * Return: A hash of the two inputs.
+ */
+static u64 mix(u64 input1, u64 input2)
+{
+ static const u64 CITY_MULTIPLIER = 0x9ddfea08eb382d69ULL;
+ u64 hash = (input1 ^ input2);
+
+ hash *= CITY_MULTIPLIER;
+ hash ^= (hash >> 47);
+ hash ^= input2;
+ hash *= CITY_MULTIPLIER;
+ hash ^= (hash >> 47);
+ hash *= CITY_MULTIPLIER;
+ return hash;
+}
+
+/**
+ * hash_key() - Calculate a 64-bit non-cryptographic hash value for the provided 64-bit integer
+ * key.
+ * @key: The mapping key.
+ *
+ * The implementation is based on Google's CityHash, only handling the specific case of an 8-byte
+ * input.
+ *
+ * Return: The hash of the mapping key.
+ */
+static u64 hash_key(u64 key)
+{
+ /*
+ * Aliasing restrictions forbid us from casting pointer types, so use a union to convert a
+ * single u64 to two u32 values.
+ */
+ union {
+ u64 u64;
+ u32 u32[2];
+ } pun = {.u64 = key};
+
+ return mix(sizeof(key) + (((u64) pun.u32[0]) << 3), pun.u32[1]);
+}
+
+/**
+ * allocate_buckets() - Initialize an int_map.
+ * @map: The map to initialize.
+ * @capacity: The initial capacity of the map.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int allocate_buckets(struct int_map *map, size_t capacity)
+{
+ map->size = 0;
+ map->capacity = capacity;
+
+ /*
+ * Allocate NEIGHBORHOOD - 1 extra buckets so the last bucket can have a full neighborhood
+ * without have to wrap back around to element zero.
+ */
+ map->bucket_count = capacity + (NEIGHBORHOOD - 1);
+ return vdo_allocate(map->bucket_count, struct bucket,
+ "struct int_map buckets", &map->buckets);
+}
+
+/**
+ * vdo_int_map_create() - Allocate and initialize an int_map.
+ * @initial_capacity: The number of entries the map should initially be capable of holding (zero
+ * tells the map to use its own small default).
+ * @map_ptr: Output, a pointer to hold the new int_map.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_int_map_create(size_t initial_capacity, struct int_map **map_ptr)
+{
+ struct int_map *map;
+ int result;
+ size_t capacity;
+
+ result = vdo_allocate(1, struct int_map, "struct int_map", &map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* Use the default capacity if the caller did not specify one. */
+ capacity = (initial_capacity > 0) ? initial_capacity : DEFAULT_CAPACITY;
+
+ /*
+ * Scale up the capacity by the specified initial load factor. (i.e to hold 1000 entries at
+ * 80% load we need a capacity of 1250)
+ */
+ capacity = capacity * 100 / DEFAULT_LOAD;
+
+ result = allocate_buckets(map, capacity);
+ if (result != VDO_SUCCESS) {
+ vdo_int_map_free(vdo_forget(map));
+ return result;
+ }
+
+ *map_ptr = map;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_int_map_free() - Free an int_map.
+ * @map: The int_map to free.
+ *
+ * NOTE: The map does not own the pointer values stored in the map and they are not freed by this
+ * call.
+ */
+void vdo_int_map_free(struct int_map *map)
+{
+ if (map == NULL)
+ return;
+
+ vdo_free(vdo_forget(map->buckets));
+ vdo_free(vdo_forget(map));
+}
+
+/**
+ * vdo_int_map_size() - Get the number of entries stored in an int_map.
+ * @map: The int_map to query.
+ *
+ * Return: The number of entries in the map.
+ */
+size_t vdo_int_map_size(const struct int_map *map)
+{
+ return map->size;
+}
+
+/**
+ * dereference_hop() - Convert a biased hop offset within a neighborhood to a pointer to the bucket
+ * it references.
+ * @neighborhood: The first bucket in the neighborhood.
+ * @hop_offset: The biased hop offset to the desired bucket.
+ *
+ * Return: NULL if hop_offset is zero, otherwise a pointer to the bucket in the neighborhood at
+ * hop_offset - 1.
+ */
+static struct bucket *dereference_hop(struct bucket *neighborhood, unsigned int hop_offset)
+{
+ BUILD_BUG_ON(NULL_HOP_OFFSET != 0);
+ if (hop_offset == NULL_HOP_OFFSET)
+ return NULL;
+
+ return &neighborhood[hop_offset - 1];
+}
+
+/**
+ * insert_in_hop_list() - Add a bucket into the hop list for the neighborhood.
+ * @neighborhood: The first bucket in the neighborhood.
+ * @new_bucket: The bucket to add to the hop list.
+ *
+ * The bucket is inserted it into the list so the hop list remains sorted by hop offset.
+ */
+static void insert_in_hop_list(struct bucket *neighborhood, struct bucket *new_bucket)
+{
+ /* Zero indicates a NULL hop offset, so bias the hop offset by one. */
+ int hop_offset = 1 + (new_bucket - neighborhood);
+
+ /* Handle the special case of adding a bucket at the start of the list. */
+ int next_hop = neighborhood->first_hop;
+
+ if ((next_hop == NULL_HOP_OFFSET) || (next_hop > hop_offset)) {
+ new_bucket->next_hop = next_hop;
+ neighborhood->first_hop = hop_offset;
+ return;
+ }
+
+ /* Search the hop list for the insertion point that maintains the sort order. */
+ for (;;) {
+ struct bucket *bucket = dereference_hop(neighborhood, next_hop);
+
+ next_hop = bucket->next_hop;
+
+ if ((next_hop == NULL_HOP_OFFSET) || (next_hop > hop_offset)) {
+ new_bucket->next_hop = next_hop;
+ bucket->next_hop = hop_offset;
+ return;
+ }
+ }
+}
+
+/**
+ * select_bucket() - Select and return the hash bucket for a given search key.
+ * @map: The map to search.
+ * @key: The mapping key.
+ */
+static struct bucket *select_bucket(const struct int_map *map, u64 key)
+{
+ /*
+ * Calculate a good hash value for the provided key. We want exactly 32 bits, so mask the
+ * result.
+ */
+ u64 hash = hash_key(key) & 0xFFFFFFFF;
+
+ /*
+ * Scale the 32-bit hash to a bucket index by treating it as a binary fraction and
+ * multiplying that by the capacity. If the hash is uniformly distributed over [0 ..
+ * 2^32-1], then (hash * capacity / 2^32) should be uniformly distributed over [0 ..
+ * capacity-1]. The multiply and shift is much faster than a divide (modulus) on X86 CPUs.
+ */
+ return &map->buckets[(hash * map->capacity) >> 32];
+}
+
+/**
+ * search_hop_list() - Search the hop list associated with given hash bucket for a given search
+ * key.
+ * @map: The map being searched.
+ * @bucket: The map bucket to search for the key.
+ * @key: The mapping key.
+ * @previous_ptr: Output. if not NULL, a pointer in which to store the bucket in the list preceding
+ * the one that had the matching key
+ *
+ * If the key is found, returns a pointer to the entry (bucket or collision), otherwise returns
+ * NULL.
+ *
+ * Return: An entry that matches the key, or NULL if not found.
+ */
+static struct bucket *search_hop_list(struct int_map *map __always_unused,
+ struct bucket *bucket,
+ u64 key,
+ struct bucket **previous_ptr)
+{
+ struct bucket *previous = NULL;
+ unsigned int next_hop = bucket->first_hop;
+
+ while (next_hop != NULL_HOP_OFFSET) {
+ /*
+ * Check the neighboring bucket indexed by the offset for the
+ * desired key.
+ */
+ struct bucket *entry = dereference_hop(bucket, next_hop);
+
+ if ((key == entry->key) && (entry->value != NULL)) {
+ if (previous_ptr != NULL)
+ *previous_ptr = previous;
+ return entry;
+ }
+ next_hop = entry->next_hop;
+ previous = entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * vdo_int_map_get() - Retrieve the value associated with a given key from the int_map.
+ * @map: The int_map to query.
+ * @key: The key to look up.
+ *
+ * Return: The value associated with the given key, or NULL if the key is not mapped to any value.
+ */
+void *vdo_int_map_get(struct int_map *map, u64 key)
+{
+ struct bucket *match = search_hop_list(map, select_bucket(map, key), key, NULL);
+
+ return ((match != NULL) ? match->value : NULL);
+}
+
+/**
+ * resize_buckets() - Increase the number of hash buckets.
+ * @map: The map to resize.
+ *
+ * Resizes and rehashes all the existing entries, storing them in the new buckets.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int resize_buckets(struct int_map *map)
+{
+ int result;
+ size_t i;
+
+ /* Copy the top-level map data to the stack. */
+ struct int_map old_map = *map;
+
+ /* Re-initialize the map to be empty and 50% larger. */
+ size_t new_capacity = map->capacity / 2 * 3;
+
+ vdo_log_info("%s: attempting resize from %zu to %zu, current size=%zu",
+ __func__, map->capacity, new_capacity, map->size);
+ result = allocate_buckets(map, new_capacity);
+ if (result != VDO_SUCCESS) {
+ *map = old_map;
+ return result;
+ }
+
+ /* Populate the new hash table from the entries in the old bucket array. */
+ for (i = 0; i < old_map.bucket_count; i++) {
+ struct bucket *entry = &old_map.buckets[i];
+
+ if (entry->value == NULL)
+ continue;
+
+ result = vdo_int_map_put(map, entry->key, entry->value, true, NULL);
+ if (result != VDO_SUCCESS) {
+ /* Destroy the new partial map and restore the map from the stack. */
+ vdo_free(vdo_forget(map->buckets));
+ *map = old_map;
+ return result;
+ }
+ }
+
+ /* Destroy the old bucket array. */
+ vdo_free(vdo_forget(old_map.buckets));
+ return VDO_SUCCESS;
+}
+
+/**
+ * find_empty_bucket() - Probe the bucket array starting at the given bucket for the next empty
+ * bucket, returning a pointer to it.
+ * @map: The map containing the buckets to search.
+ * @bucket: The bucket at which to start probing.
+ * @max_probes: The maximum number of buckets to search.
+ *
+ * NULL will be returned if the search reaches the end of the bucket array or if the number of
+ * linear probes exceeds a specified limit.
+ *
+ * Return: The next empty bucket, or NULL if the search failed.
+ */
+static struct bucket *
+find_empty_bucket(struct int_map *map, struct bucket *bucket, unsigned int max_probes)
+{
+ /*
+ * Limit the search to either the nearer of the end of the bucket array or a fixed distance
+ * beyond the initial bucket.
+ */
+ ptrdiff_t remaining = &map->buckets[map->bucket_count] - bucket;
+ struct bucket *sentinel = &bucket[min_t(ptrdiff_t, remaining, max_probes)];
+ struct bucket *entry;
+
+ for (entry = bucket; entry < sentinel; entry++) {
+ if (entry->value == NULL)
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * move_empty_bucket() - Move an empty bucket closer to the start of the bucket array.
+ * @map: The map containing the bucket.
+ * @hole: The empty bucket to fill with an entry that precedes it in one of its enclosing
+ * neighborhoods.
+ *
+ * This searches the neighborhoods that contain the empty bucket for a non-empty bucket closer to
+ * the start of the array. If such a bucket is found, this swaps the two buckets by moving the
+ * entry to the empty bucket.
+ *
+ * Return: The bucket that was vacated by moving its entry to the provided hole, or NULL if no
+ * entry could be moved.
+ */
+static struct bucket *move_empty_bucket(struct int_map *map __always_unused,
+ struct bucket *hole)
+{
+ /*
+ * Examine every neighborhood that the empty bucket is part of, starting with the one in
+ * which it is the last bucket. No boundary check is needed for the negative array
+ * arithmetic since this function is only called when hole is at least NEIGHBORHOOD cells
+ * deeper into the array than a valid bucket.
+ */
+ struct bucket *bucket;
+
+ for (bucket = &hole[1 - NEIGHBORHOOD]; bucket < hole; bucket++) {
+ /*
+ * Find the entry that is nearest to the bucket, which means it will be nearest to
+ * the hash bucket whose neighborhood is full.
+ */
+ struct bucket *new_hole = dereference_hop(bucket, bucket->first_hop);
+
+ if (new_hole == NULL) {
+ /*
+ * There are no buckets in this neighborhood that are in use by this one
+ * (they must all be owned by overlapping neighborhoods).
+ */
+ continue;
+ }
+
+ /*
+ * Skip this bucket if its first entry is actually further away than the hole that
+ * we're already trying to fill.
+ */
+ if (hole < new_hole)
+ continue;
+
+ /*
+ * We've found an entry in this neighborhood that we can "hop" further away, moving
+ * the hole closer to the hash bucket, if not all the way into its neighborhood.
+ */
+
+ /*
+ * The entry that will be the new hole is the first bucket in the list, so setting
+ * first_hop is all that's needed remove it from the list.
+ */
+ bucket->first_hop = new_hole->next_hop;
+ new_hole->next_hop = NULL_HOP_OFFSET;
+
+ /* Move the entry into the original hole. */
+ hole->key = new_hole->key;
+ hole->value = new_hole->value;
+ new_hole->value = NULL;
+
+ /* Insert the filled hole into the hop list for the neighborhood. */
+ insert_in_hop_list(bucket, hole);
+ return new_hole;
+ }
+
+ /* We couldn't find an entry to relocate to the hole. */
+ return NULL;
+}
+
+/**
+ * update_mapping() - Find and update any existing mapping for a given key, returning the value
+ * associated with the key in the provided pointer.
+ * @map: The int_map to attempt to modify.
+ * @neighborhood: The first bucket in the neighborhood that would contain the search key
+ * @key: The key with which to associate the new value.
+ * @new_value: The value to be associated with the key.
+ * @update: Whether to overwrite an existing value.
+ * @old_value_ptr: a pointer in which to store the old value (unmodified if no mapping was found)
+ *
+ * Return: true if the map contains a mapping for the key, false if it does not.
+ */
+static bool update_mapping(struct int_map *map, struct bucket *neighborhood,
+ u64 key, void *new_value, bool update, void **old_value_ptr)
+{
+ struct bucket *bucket = search_hop_list(map, neighborhood, key, NULL);
+
+ if (bucket == NULL) {
+ /* There is no bucket containing the key in the neighborhood. */
+ return false;
+ }
+
+ /*
+ * Return the value of the current mapping (if desired) and update the mapping with the new
+ * value (if desired).
+ */
+ if (old_value_ptr != NULL)
+ *old_value_ptr = bucket->value;
+ if (update)
+ bucket->value = new_value;
+ return true;
+}
+
+/**
+ * find_or_make_vacancy() - Find an empty bucket.
+ * @map: The int_map to search or modify.
+ * @neighborhood: The first bucket in the neighborhood in which an empty bucket is needed for a new
+ * mapping.
+ *
+ * Find an empty bucket in a specified neighborhood for a new mapping or attempt to re-arrange
+ * mappings so there is such a bucket. This operation may fail (returning NULL) if an empty bucket
+ * is not available or could not be relocated to the neighborhood.
+ *
+ * Return: a pointer to an empty bucket in the desired neighborhood, or NULL if a vacancy could not
+ * be found or arranged.
+ */
+static struct bucket *find_or_make_vacancy(struct int_map *map,
+ struct bucket *neighborhood)
+{
+ /* Probe within and beyond the neighborhood for the first empty bucket. */
+ struct bucket *hole = find_empty_bucket(map, neighborhood, MAX_PROBES);
+
+ /*
+ * Keep trying until the empty bucket is in the bucket's neighborhood or we are unable to
+ * move it any closer by swapping it with a filled bucket.
+ */
+ while (hole != NULL) {
+ int distance = hole - neighborhood;
+
+ if (distance < NEIGHBORHOOD) {
+ /*
+ * We've found or relocated an empty bucket close enough to the initial
+ * hash bucket to be referenced by its hop vector.
+ */
+ return hole;
+ }
+
+ /*
+ * The nearest empty bucket isn't within the neighborhood that must contain the new
+ * entry, so try to swap it with bucket that is closer.
+ */
+ hole = move_empty_bucket(map, hole);
+ }
+
+ return NULL;
+}
+
+/**
+ * vdo_int_map_put() - Try to associate a value with an integer.
+ * @map: The int_map to attempt to modify.
+ * @key: The key with which to associate the new value.
+ * @new_value: The value to be associated with the key.
+ * @update: Whether to overwrite an existing value.
+ * @old_value_ptr: A pointer in which to store either the old value (if the key was already mapped)
+ * or NULL if the map did not contain the key; NULL may be provided if the caller
+ * does not need to know the old value
+ *
+ * Try to associate a value (a pointer) with an integer in an int_map. If the map already contains
+ * a mapping for the provided key, the old value is only replaced with the specified value if
+ * update is true. In either case the old value is returned. If the map does not already contain a
+ * value for the specified key, the new value is added regardless of the value of update.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_int_map_put(struct int_map *map, u64 key, void *new_value, bool update,
+ void **old_value_ptr)
+{
+ struct bucket *neighborhood, *bucket;
+
+ if (unlikely(new_value == NULL))
+ return -EINVAL;
+
+ /*
+ * Select the bucket at the start of the neighborhood that must contain any entry for the
+ * provided key.
+ */
+ neighborhood = select_bucket(map, key);
+
+ /*
+ * Check whether the neighborhood already contains an entry for the key, in which case we
+ * optionally update it, returning the old value.
+ */
+ if (update_mapping(map, neighborhood, key, new_value, update, old_value_ptr))
+ return VDO_SUCCESS;
+
+ /*
+ * Find an empty bucket in the desired neighborhood for the new entry or re-arrange entries
+ * in the map so there is such a bucket. This operation will usually succeed; the loop body
+ * will only be executed on the rare occasions that we have to resize the map.
+ */
+ while ((bucket = find_or_make_vacancy(map, neighborhood)) == NULL) {
+ int result;
+
+ /*
+ * There is no empty bucket in which to put the new entry in the current map, so
+ * we're forced to allocate a new bucket array with a larger capacity, re-hash all
+ * the entries into those buckets, and try again (a very expensive operation for
+ * large maps).
+ */
+ result = resize_buckets(map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /*
+ * Resizing the map invalidates all pointers to buckets, so recalculate the
+ * neighborhood pointer.
+ */
+ neighborhood = select_bucket(map, key);
+ }
+
+ /* Put the new entry in the empty bucket, adding it to the neighborhood. */
+ bucket->key = key;
+ bucket->value = new_value;
+ insert_in_hop_list(neighborhood, bucket);
+ map->size += 1;
+
+ /* There was no existing entry, so there was no old value to be returned. */
+ if (old_value_ptr != NULL)
+ *old_value_ptr = NULL;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_int_map_remove() - Remove the mapping for a given key from the int_map.
+ * @map: The int_map from which to remove the mapping.
+ * @key: The key whose mapping is to be removed.
+ *
+ * Return: the value that was associated with the key, or NULL if it was not mapped.
+ */
+void *vdo_int_map_remove(struct int_map *map, u64 key)
+{
+ void *value;
+
+ /* Select the bucket to search and search it for an existing entry. */
+ struct bucket *bucket = select_bucket(map, key);
+ struct bucket *previous;
+ struct bucket *victim = search_hop_list(map, bucket, key, &previous);
+
+ if (victim == NULL) {
+ /* There is no matching entry to remove. */
+ return NULL;
+ }
+
+ /*
+ * We found an entry to remove. Save the mapped value to return later and empty the bucket.
+ */
+ map->size -= 1;
+ value = victim->value;
+ victim->value = NULL;
+ victim->key = 0;
+
+ /* The victim bucket is now empty, but it still needs to be spliced out of the hop list. */
+ if (previous == NULL) {
+ /* The victim is the head of the list, so swing first_hop. */
+ bucket->first_hop = victim->next_hop;
+ } else {
+ previous->next_hop = victim->next_hop;
+ }
+
+ victim->next_hop = NULL_HOP_OFFSET;
+ return value;
+}
diff --git a/drivers/md/dm-vdo/int-map.h b/drivers/md/dm-vdo/int-map.h
new file mode 100644
index 000000000000..1858ad799887
--- /dev/null
+++ b/drivers/md/dm-vdo/int-map.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_INT_MAP_H
+#define VDO_INT_MAP_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/**
+ * DOC: int_map
+ *
+ * An int_map associates pointers (void *) with integer keys (u64). NULL pointer values are
+ * not supported.
+ *
+ * The map is implemented as hash table, which should provide constant-time insert, query, and
+ * remove operations, although the insert may occasionally grow the table, which is linear in the
+ * number of entries in the map. The table will grow as needed to hold new entries, but will not
+ * shrink as entries are removed.
+ */
+
+struct int_map;
+
+int __must_check vdo_int_map_create(size_t initial_capacity, struct int_map **map_ptr);
+
+void vdo_int_map_free(struct int_map *map);
+
+size_t vdo_int_map_size(const struct int_map *map);
+
+void *vdo_int_map_get(struct int_map *map, u64 key);
+
+int __must_check vdo_int_map_put(struct int_map *map, u64 key, void *new_value,
+ bool update, void **old_value_ptr);
+
+void *vdo_int_map_remove(struct int_map *map, u64 key);
+
+#endif /* VDO_INT_MAP_H */
diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c
new file mode 100644
index 000000000000..9a3716bb3c05
--- /dev/null
+++ b/drivers/md/dm-vdo/io-submitter.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "io-submitter.h"
+
+#include <linux/bio.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "data-vio.h"
+#include "logger.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+
+/*
+ * Submission of bio operations to the underlying storage device will go through a separate work
+ * queue thread (or more than one) to prevent blocking in other threads if the storage device has a
+ * full queue. The plug structure allows that thread to do better batching of requests to make the
+ * I/O more efficient.
+ *
+ * When multiple worker threads are used, a thread is chosen for a I/O operation submission based
+ * on the PBN, so a given PBN will consistently wind up on the same thread. Flush operations are
+ * assigned round-robin.
+ *
+ * The map (protected by the mutex) collects pending I/O operations so that the worker thread can
+ * reorder them to try to encourage I/O request merging in the request queue underneath.
+ */
+struct bio_queue_data {
+ struct vdo_work_queue *queue;
+ struct blk_plug plug;
+ struct int_map *map;
+ struct mutex lock;
+ unsigned int queue_number;
+};
+
+struct io_submitter {
+ unsigned int num_bio_queues_used;
+ unsigned int bio_queue_rotation_interval;
+ struct bio_queue_data bio_queue_data[];
+};
+
+static void start_bio_queue(void *ptr)
+{
+ struct bio_queue_data *bio_queue_data = ptr;
+
+ blk_start_plug(&bio_queue_data->plug);
+}
+
+static void finish_bio_queue(void *ptr)
+{
+ struct bio_queue_data *bio_queue_data = ptr;
+
+ blk_finish_plug(&bio_queue_data->plug);
+}
+
+static const struct vdo_work_queue_type bio_queue_type = {
+ .start = start_bio_queue,
+ .finish = finish_bio_queue,
+ .max_priority = BIO_Q_MAX_PRIORITY,
+ .default_priority = BIO_Q_DATA_PRIORITY,
+};
+
+/**
+ * count_all_bios() - Determine which bio counter to use.
+ * @vio: The vio associated with the bio.
+ * @bio: The bio to count.
+ */
+static void count_all_bios(struct vio *vio, struct bio *bio)
+{
+ struct atomic_statistics *stats = &vio->completion.vdo->stats;
+
+ if (is_data_vio(vio)) {
+ vdo_count_bios(&stats->bios_out, bio);
+ return;
+ }
+
+ vdo_count_bios(&stats->bios_meta, bio);
+ if (vio->type == VIO_TYPE_RECOVERY_JOURNAL)
+ vdo_count_bios(&stats->bios_journal, bio);
+ else if (vio->type == VIO_TYPE_BLOCK_MAP)
+ vdo_count_bios(&stats->bios_page_cache, bio);
+}
+
+/**
+ * assert_in_bio_zone() - Assert that a vio is in the correct bio zone and not in interrupt
+ * context.
+ * @vio: The vio to check.
+ */
+static void assert_in_bio_zone(struct vio *vio)
+{
+ VDO_ASSERT_LOG_ONLY(!in_interrupt(), "not in interrupt context");
+ assert_vio_in_bio_zone(vio);
+}
+
+/**
+ * send_bio_to_device() - Update stats and tracing info, then submit the supplied bio to the OS for
+ * processing.
+ * @vio: The vio associated with the bio.
+ * @bio: The bio to submit to the OS.
+ */
+static void send_bio_to_device(struct vio *vio, struct bio *bio)
+{
+ struct vdo *vdo = vio->completion.vdo;
+
+ assert_in_bio_zone(vio);
+ atomic64_inc(&vdo->stats.bios_submitted);
+ count_all_bios(vio, bio);
+ bio_set_dev(bio, vdo_get_backing_device(vdo));
+ submit_bio_noacct(bio);
+}
+
+/**
+ * vdo_submit_vio() - Submits a vio's bio to the underlying block device. May block if the device
+ * is busy. This callback should be used by vios which did not attempt to merge.
+ */
+void vdo_submit_vio(struct vdo_completion *completion)
+{
+ struct vio *vio = as_vio(completion);
+
+ send_bio_to_device(vio, vio->bio);
+}
+
+/**
+ * get_bio_list() - Extract the list of bios to submit from a vio.
+ * @vio: The vio submitting I/O.
+ *
+ * The list will always contain at least one entry (the bio for the vio on which it is called), but
+ * other bios may have been merged with it as well.
+ *
+ * Return: bio The head of the bio list to submit.
+ */
+static struct bio *get_bio_list(struct vio *vio)
+{
+ struct bio *bio;
+ struct io_submitter *submitter = vio->completion.vdo->io_submitter;
+ struct bio_queue_data *bio_queue_data = &(submitter->bio_queue_data[vio->bio_zone]);
+
+ assert_in_bio_zone(vio);
+
+ mutex_lock(&bio_queue_data->lock);
+ vdo_int_map_remove(bio_queue_data->map,
+ vio->bios_merged.head->bi_iter.bi_sector);
+ vdo_int_map_remove(bio_queue_data->map,
+ vio->bios_merged.tail->bi_iter.bi_sector);
+ bio = vio->bios_merged.head;
+ bio_list_init(&vio->bios_merged);
+ mutex_unlock(&bio_queue_data->lock);
+
+ return bio;
+}
+
+/**
+ * submit_data_vio() - Submit a data_vio's bio to the storage below along with
+ * any bios that have been merged with it.
+ *
+ * Context: This call may block and so should only be called from a bio thread.
+ */
+static void submit_data_vio(struct vdo_completion *completion)
+{
+ struct bio *bio, *next;
+ struct vio *vio = as_vio(completion);
+
+ assert_in_bio_zone(vio);
+ for (bio = get_bio_list(vio); bio != NULL; bio = next) {
+ next = bio->bi_next;
+ bio->bi_next = NULL;
+ send_bio_to_device((struct vio *) bio->bi_private, bio);
+ }
+}
+
+/**
+ * get_mergeable_locked() - Attempt to find an already queued bio that the current bio can be
+ * merged with.
+ * @map: The bio map to use for merging.
+ * @vio: The vio we want to merge.
+ * @back_merge: Set to true for a back merge, false for a front merge.
+ *
+ * There are two types of merging possible, forward and backward, which are distinguished by a flag
+ * that uses kernel elevator terminology.
+ *
+ * Return: the vio to merge to, NULL if no merging is possible.
+ */
+static struct vio *get_mergeable_locked(struct int_map *map, struct vio *vio,
+ bool back_merge)
+{
+ struct bio *bio = vio->bio;
+ sector_t merge_sector = bio->bi_iter.bi_sector;
+ struct vio *vio_merge;
+
+ if (back_merge)
+ merge_sector -= VDO_SECTORS_PER_BLOCK;
+ else
+ merge_sector += VDO_SECTORS_PER_BLOCK;
+
+ vio_merge = vdo_int_map_get(map, merge_sector);
+
+ if (vio_merge == NULL)
+ return NULL;
+
+ if (vio->completion.priority != vio_merge->completion.priority)
+ return NULL;
+
+ if (bio_data_dir(bio) != bio_data_dir(vio_merge->bio))
+ return NULL;
+
+ if (bio_list_empty(&vio_merge->bios_merged))
+ return NULL;
+
+ if (back_merge) {
+ return (vio_merge->bios_merged.tail->bi_iter.bi_sector == merge_sector ?
+ vio_merge : NULL);
+ }
+
+ return (vio_merge->bios_merged.head->bi_iter.bi_sector == merge_sector ?
+ vio_merge : NULL);
+}
+
+static int map_merged_vio(struct int_map *bio_map, struct vio *vio)
+{
+ int result;
+ sector_t bio_sector;
+
+ bio_sector = vio->bios_merged.head->bi_iter.bi_sector;
+ result = vdo_int_map_put(bio_map, bio_sector, vio, true, NULL);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ bio_sector = vio->bios_merged.tail->bi_iter.bi_sector;
+ return vdo_int_map_put(bio_map, bio_sector, vio, true, NULL);
+}
+
+static int merge_to_prev_tail(struct int_map *bio_map, struct vio *vio,
+ struct vio *prev_vio)
+{
+ vdo_int_map_remove(bio_map, prev_vio->bios_merged.tail->bi_iter.bi_sector);
+ bio_list_merge(&prev_vio->bios_merged, &vio->bios_merged);
+ return map_merged_vio(bio_map, prev_vio);
+}
+
+static int merge_to_next_head(struct int_map *bio_map, struct vio *vio,
+ struct vio *next_vio)
+{
+ /*
+ * Handle "next merge" and "gap fill" cases the same way so as to reorder bios in a way
+ * that's compatible with using funnel queues in work queues. This avoids removing an
+ * existing completion.
+ */
+ vdo_int_map_remove(bio_map, next_vio->bios_merged.head->bi_iter.bi_sector);
+ bio_list_merge_head(&next_vio->bios_merged, &vio->bios_merged);
+ return map_merged_vio(bio_map, next_vio);
+}
+
+/**
+ * try_bio_map_merge() - Attempt to merge a vio's bio with other pending I/Os.
+ * @vio: The vio to merge.
+ *
+ * Currently this is only used for data_vios, but is broken out for future use with metadata vios.
+ *
+ * Return: whether or not the vio was merged.
+ */
+static bool try_bio_map_merge(struct vio *vio)
+{
+ int result;
+ bool merged = true;
+ struct bio *bio = vio->bio;
+ struct vio *prev_vio, *next_vio;
+ struct vdo *vdo = vio->completion.vdo;
+ struct bio_queue_data *bio_queue_data =
+ &vdo->io_submitter->bio_queue_data[vio->bio_zone];
+
+ bio->bi_next = NULL;
+ bio_list_init(&vio->bios_merged);
+ bio_list_add(&vio->bios_merged, bio);
+
+ mutex_lock(&bio_queue_data->lock);
+ prev_vio = get_mergeable_locked(bio_queue_data->map, vio, true);
+ next_vio = get_mergeable_locked(bio_queue_data->map, vio, false);
+ if (prev_vio == next_vio)
+ next_vio = NULL;
+
+ if ((prev_vio == NULL) && (next_vio == NULL)) {
+ /* no merge. just add to bio_queue */
+ merged = false;
+ result = vdo_int_map_put(bio_queue_data->map,
+ bio->bi_iter.bi_sector,
+ vio, true, NULL);
+ } else if (next_vio == NULL) {
+ /* Only prev. merge to prev's tail */
+ result = merge_to_prev_tail(bio_queue_data->map, vio, prev_vio);
+ } else {
+ /* Only next. merge to next's head */
+ result = merge_to_next_head(bio_queue_data->map, vio, next_vio);
+ }
+ mutex_unlock(&bio_queue_data->lock);
+
+ /* We don't care about failure of int_map_put in this case. */
+ VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "bio map insertion succeeds");
+ return merged;
+}
+
+/**
+ * vdo_submit_data_vio() - Submit I/O for a data_vio.
+ * @data_vio: the data_vio for which to issue I/O.
+ *
+ * If possible, this I/O will be merged other pending I/Os. Otherwise, the data_vio will be sent to
+ * the appropriate bio zone directly.
+ */
+void vdo_submit_data_vio(struct data_vio *data_vio)
+{
+ if (try_bio_map_merge(&data_vio->vio))
+ return;
+
+ launch_data_vio_bio_zone_callback(data_vio, submit_data_vio);
+}
+
+/**
+ * __submit_metadata_vio() - Submit I/O for a metadata vio.
+ * @vio: the vio for which to issue I/O
+ * @physical: the physical block number to read or write
+ * @callback: the bio endio function which will be called after the I/O completes
+ * @error_handler: the handler for submission or I/O errors (may be NULL)
+ * @operation: the type of I/O to perform
+ * @data: the buffer to read or write (may be NULL)
+ *
+ * The vio is enqueued on a vdo bio queue so that bio submission (which may block) does not block
+ * other vdo threads.
+ *
+ * That the error handler will run on the correct thread is only true so long as the thread calling
+ * this function, and the thread set in the endio callback are the same, as well as the fact that
+ * no error can occur on the bio queue. Currently this is true for all callers, but additional care
+ * will be needed if this ever changes.
+ */
+void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
+ bio_end_io_t callback, vdo_action_fn error_handler,
+ blk_opf_t operation, char *data)
+{
+ int result;
+ struct vdo_completion *completion = &vio->completion;
+ const struct admin_state_code *code = vdo_get_admin_state(completion->vdo);
+
+
+ VDO_ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name);
+ VDO_ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio");
+
+ vdo_reset_completion(completion);
+ completion->error_handler = error_handler;
+ result = vio_reset_bio(vio, data, callback, operation | REQ_META, physical);
+ if (result != VDO_SUCCESS) {
+ continue_vio(vio, result);
+ return;
+ }
+
+ vdo_set_completion_callback(completion, vdo_submit_vio,
+ get_vio_bio_zone_thread_id(vio));
+ vdo_launch_completion_with_priority(completion, get_metadata_priority(vio));
+}
+
+/**
+ * vdo_make_io_submitter() - Create an io_submitter structure.
+ * @thread_count: Number of bio-submission threads to set up.
+ * @rotation_interval: Interval to use when rotating between bio-submission threads when enqueuing
+ * completions.
+ * @max_requests_active: Number of bios for merge tracking.
+ * @vdo: The vdo which will use this submitter.
+ * @io_submitter: pointer to the new data structure.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_interval,
+ unsigned int max_requests_active, struct vdo *vdo,
+ struct io_submitter **io_submitter_ptr)
+{
+ unsigned int i;
+ struct io_submitter *io_submitter;
+ int result;
+
+ result = vdo_allocate_extended(struct io_submitter, thread_count,
+ struct bio_queue_data, "bio submission data",
+ &io_submitter);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ io_submitter->bio_queue_rotation_interval = rotation_interval;
+
+ /* Setup for each bio-submission work queue */
+ for (i = 0; i < thread_count; i++) {
+ struct bio_queue_data *bio_queue_data = &io_submitter->bio_queue_data[i];
+
+ mutex_init(&bio_queue_data->lock);
+ /*
+ * One I/O operation per request, but both first & last sector numbers.
+ *
+ * If requests are assigned to threads round-robin, they should be distributed
+ * quite evenly. But if they're assigned based on PBN, things can sometimes be very
+ * uneven. So for now, we'll assume that all requests *may* wind up on one thread,
+ * and thus all in the same map.
+ */
+ result = vdo_int_map_create(max_requests_active * 2,
+ &bio_queue_data->map);
+ if (result != VDO_SUCCESS) {
+ /*
+ * Clean up the partially initialized bio-queue entirely and indicate that
+ * initialization failed.
+ */
+ vdo_log_error("bio map initialization failed %d", result);
+ vdo_cleanup_io_submitter(io_submitter);
+ vdo_free_io_submitter(io_submitter);
+ return result;
+ }
+
+ bio_queue_data->queue_number = i;
+ result = vdo_make_thread(vdo, vdo->thread_config.bio_threads[i],
+ &bio_queue_type, 1, (void **) &bio_queue_data);
+ if (result != VDO_SUCCESS) {
+ /*
+ * Clean up the partially initialized bio-queue entirely and indicate that
+ * initialization failed.
+ */
+ vdo_int_map_free(vdo_forget(bio_queue_data->map));
+ vdo_log_error("bio queue initialization failed %d", result);
+ vdo_cleanup_io_submitter(io_submitter);
+ vdo_free_io_submitter(io_submitter);
+ return result;
+ }
+
+ bio_queue_data->queue = vdo->threads[vdo->thread_config.bio_threads[i]].queue;
+ io_submitter->num_bio_queues_used++;
+ }
+
+ *io_submitter_ptr = io_submitter;
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_cleanup_io_submitter() - Tear down the io_submitter fields as needed for a physical layer.
+ * @io_submitter: The I/O submitter data to tear down (may be NULL).
+ */
+void vdo_cleanup_io_submitter(struct io_submitter *io_submitter)
+{
+ int i;
+
+ if (io_submitter == NULL)
+ return;
+
+ for (i = io_submitter->num_bio_queues_used - 1; i >= 0; i--)
+ vdo_finish_work_queue(io_submitter->bio_queue_data[i].queue);
+}
+
+/**
+ * vdo_free_io_submitter() - Free the io_submitter fields and structure as needed.
+ * @io_submitter: The I/O submitter data to destroy.
+ *
+ * This must be called after vdo_cleanup_io_submitter(). It is used to release resources late in
+ * the shutdown process to avoid or reduce the chance of race conditions.
+ */
+void vdo_free_io_submitter(struct io_submitter *io_submitter)
+{
+ int i;
+
+ if (io_submitter == NULL)
+ return;
+
+ for (i = io_submitter->num_bio_queues_used - 1; i >= 0; i--) {
+ io_submitter->num_bio_queues_used--;
+ /* vdo_destroy() will free the work queue, so just give up our reference to it. */
+ vdo_forget(io_submitter->bio_queue_data[i].queue);
+ vdo_int_map_free(vdo_forget(io_submitter->bio_queue_data[i].map));
+ }
+ vdo_free(io_submitter);
+}
diff --git a/drivers/md/dm-vdo/io-submitter.h b/drivers/md/dm-vdo/io-submitter.h
new file mode 100644
index 000000000000..80748699496f
--- /dev/null
+++ b/drivers/md/dm-vdo/io-submitter.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_IO_SUBMITTER_H
+#define VDO_IO_SUBMITTER_H
+
+#include <linux/bio.h>
+
+#include "types.h"
+
+struct io_submitter;
+
+int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_interval,
+ unsigned int max_requests_active, struct vdo *vdo,
+ struct io_submitter **io_submitter);
+
+void vdo_cleanup_io_submitter(struct io_submitter *io_submitter);
+
+void vdo_free_io_submitter(struct io_submitter *io_submitter);
+
+void vdo_submit_vio(struct vdo_completion *completion);
+
+void vdo_submit_data_vio(struct data_vio *data_vio);
+
+void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
+ bio_end_io_t callback, vdo_action_fn error_handler,
+ blk_opf_t operation, char *data);
+
+static inline void vdo_submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
+ bio_end_io_t callback, vdo_action_fn error_handler,
+ blk_opf_t operation)
+{
+ __submit_metadata_vio(vio, physical, callback, error_handler,
+ operation, vio->data);
+}
+
+static inline void vdo_submit_flush_vio(struct vio *vio, bio_end_io_t callback,
+ vdo_action_fn error_handler)
+{
+ /* FIXME: Can we just use REQ_OP_FLUSH? */
+ __submit_metadata_vio(vio, 0, callback, error_handler,
+ REQ_OP_WRITE | REQ_PREFLUSH, NULL);
+}
+
+#endif /* VDO_IO_SUBMITTER_H */
diff --git a/drivers/md/dm-vdo/logger.c b/drivers/md/dm-vdo/logger.c
new file mode 100644
index 000000000000..3f7dc2cb6b98
--- /dev/null
+++ b/drivers/md/dm-vdo/logger.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "logger.h"
+
+#include <asm/current.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+
+#include "errors.h"
+#include "thread-device.h"
+#include "thread-utils.h"
+
+int vdo_log_level = VDO_LOG_DEFAULT;
+
+int vdo_get_log_level(void)
+{
+ int log_level_latch = READ_ONCE(vdo_log_level);
+
+ if (unlikely(log_level_latch > VDO_LOG_MAX)) {
+ log_level_latch = VDO_LOG_DEFAULT;
+ WRITE_ONCE(vdo_log_level, log_level_latch);
+ }
+ return log_level_latch;
+}
+
+static const char *get_current_interrupt_type(void)
+{
+ if (in_nmi())
+ return "NMI";
+
+ if (in_irq())
+ return "HI";
+
+ if (in_softirq())
+ return "SI";
+
+ return "INTR";
+}
+
+/**
+ * emit_log_message_to_kernel() - Emit a log message to the kernel at the specified priority.
+ *
+ * @priority: The priority at which to log the message
+ * @fmt: The format string of the message
+ */
+static void emit_log_message_to_kernel(int priority, const char *fmt, ...)
+{
+ va_list args;
+ struct va_format vaf;
+
+ if (priority > vdo_get_log_level())
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ switch (priority) {
+ case VDO_LOG_EMERG:
+ case VDO_LOG_ALERT:
+ case VDO_LOG_CRIT:
+ pr_crit("%pV", &vaf);
+ break;
+ case VDO_LOG_ERR:
+ pr_err("%pV", &vaf);
+ break;
+ case VDO_LOG_WARNING:
+ pr_warn("%pV", &vaf);
+ break;
+ case VDO_LOG_NOTICE:
+ case VDO_LOG_INFO:
+ pr_info("%pV", &vaf);
+ break;
+ case VDO_LOG_DEBUG:
+ pr_debug("%pV", &vaf);
+ break;
+ default:
+ printk(KERN_DEFAULT "%pV", &vaf);
+ break;
+ }
+
+ va_end(args);
+}
+
+/**
+ * emit_log_message() - Emit a log message to the kernel log in a format suited to the current
+ * thread context.
+ *
+ * Context info formats:
+ *
+ * interrupt: uds[NMI]: blah
+ * kvdo thread: kvdo12:foobarQ: blah
+ * thread w/device id: kvdo12:myprog: blah
+ * other thread: uds: myprog: blah
+ *
+ * Fields: module name, interrupt level, process name, device ID.
+ *
+ * @priority: the priority at which to log the message
+ * @module: The name of the module doing the logging
+ * @prefix: The prefix of the log message
+ * @vaf1: The first message format descriptor
+ * @vaf2: The second message format descriptor
+ */
+static void emit_log_message(int priority, const char *module, const char *prefix,
+ const struct va_format *vaf1, const struct va_format *vaf2)
+{
+ int device_instance;
+
+ /*
+ * In interrupt context, identify the interrupt type and module. Ignore the process/thread
+ * since it could be anything.
+ */
+ if (in_interrupt()) {
+ const char *type = get_current_interrupt_type();
+
+ emit_log_message_to_kernel(priority, "%s[%s]: %s%pV%pV\n", module, type,
+ prefix, vaf1, vaf2);
+ return;
+ }
+
+ /* Not at interrupt level; we have a process we can look at, and might have a device ID. */
+ device_instance = vdo_get_thread_device_id();
+ if (device_instance >= 0) {
+ emit_log_message_to_kernel(priority, "%s%u:%s: %s%pV%pV\n", module,
+ device_instance, current->comm, prefix, vaf1,
+ vaf2);
+ return;
+ }
+
+ /*
+ * If it's a kernel thread and the module name is a prefix of its name, assume it is ours
+ * and only identify the thread.
+ */
+ if (((current->flags & PF_KTHREAD) != 0) &&
+ (strncmp(module, current->comm, strlen(module)) == 0)) {
+ emit_log_message_to_kernel(priority, "%s: %s%pV%pV\n", current->comm,
+ prefix, vaf1, vaf2);
+ return;
+ }
+
+ /* Identify the module and the process. */
+ emit_log_message_to_kernel(priority, "%s: %s: %s%pV%pV\n", module, current->comm,
+ prefix, vaf1, vaf2);
+}
+
+/*
+ * vdo_log_embedded_message() - Log a message embedded within another message.
+ * @priority: the priority at which to log the message
+ * @module: the name of the module doing the logging
+ * @prefix: optional string prefix to message, may be NULL
+ * @fmt1: format of message first part (required)
+ * @args1: arguments for message first part (required)
+ * @fmt2: format of message second part
+ */
+void vdo_log_embedded_message(int priority, const char *module, const char *prefix,
+ const char *fmt1, va_list args1, const char *fmt2, ...)
+{
+ va_list args1_copy;
+ va_list args2;
+ struct va_format vaf1, vaf2;
+
+ va_start(args2, fmt2);
+
+ if (module == NULL)
+ module = VDO_LOGGING_MODULE_NAME;
+
+ if (prefix == NULL)
+ prefix = "";
+
+ /*
+ * It is implementation dependent whether va_list is defined as an array type that decays
+ * to a pointer when passed as an argument. Copy args1 and args2 with va_copy so that vaf1
+ * and vaf2 get proper va_list pointers irrespective of how va_list is defined.
+ */
+ va_copy(args1_copy, args1);
+ vaf1.fmt = fmt1;
+ vaf1.va = &args1_copy;
+
+ vaf2.fmt = fmt2;
+ vaf2.va = &args2;
+
+ emit_log_message(priority, module, prefix, &vaf1, &vaf2);
+
+ va_end(args1_copy);
+ va_end(args2);
+}
+
+int vdo_vlog_strerror(int priority, int errnum, const char *module, const char *format,
+ va_list args)
+{
+ char errbuf[VDO_MAX_ERROR_MESSAGE_SIZE];
+ const char *message = uds_string_error(errnum, errbuf, sizeof(errbuf));
+
+ vdo_log_embedded_message(priority, module, NULL, format, args, ": %s (%d)",
+ message, errnum);
+ return errnum;
+}
+
+int __vdo_log_strerror(int priority, int errnum, const char *module, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vdo_vlog_strerror(priority, errnum, module, format, args);
+ va_end(args);
+ return errnum;
+}
+
+void vdo_log_backtrace(int priority)
+{
+ if (priority > vdo_get_log_level())
+ return;
+
+ dump_stack();
+}
+
+void __vdo_log_message(int priority, const char *module, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vdo_log_embedded_message(priority, module, NULL, format, args, "%s", "");
+ va_end(args);
+}
+
+/*
+ * Sleep or delay a few milliseconds in an attempt to allow the log buffers to be flushed lest they
+ * be overrun.
+ */
+void vdo_pause_for_logger(void)
+{
+ fsleep(4000);
+}
diff --git a/drivers/md/dm-vdo/logger.h b/drivers/md/dm-vdo/logger.h
new file mode 100644
index 000000000000..ae6ad691c027
--- /dev/null
+++ b/drivers/md/dm-vdo/logger.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_LOGGER_H
+#define VDO_LOGGER_H
+
+#include <linux/kern_levels.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <linux/device-mapper.h>
+
+/* Custom logging utilities for UDS */
+
+enum {
+ VDO_LOG_EMERG = LOGLEVEL_EMERG,
+ VDO_LOG_ALERT = LOGLEVEL_ALERT,
+ VDO_LOG_CRIT = LOGLEVEL_CRIT,
+ VDO_LOG_ERR = LOGLEVEL_ERR,
+ VDO_LOG_WARNING = LOGLEVEL_WARNING,
+ VDO_LOG_NOTICE = LOGLEVEL_NOTICE,
+ VDO_LOG_INFO = LOGLEVEL_INFO,
+ VDO_LOG_DEBUG = LOGLEVEL_DEBUG,
+
+ VDO_LOG_MAX = VDO_LOG_DEBUG,
+ VDO_LOG_DEFAULT = VDO_LOG_INFO,
+};
+
+extern int vdo_log_level;
+
+#define DM_MSG_PREFIX "vdo"
+#define VDO_LOGGING_MODULE_NAME DM_NAME ": " DM_MSG_PREFIX
+
+/* Apply a rate limiter to a log method call. */
+#define vdo_log_ratelimit(log_fn, ...) \
+ do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) { \
+ log_fn(__VA_ARGS__); \
+ } \
+ } while (0)
+
+int vdo_get_log_level(void);
+
+void vdo_log_embedded_message(int priority, const char *module, const char *prefix,
+ const char *fmt1, va_list args1, const char *fmt2, ...)
+ __printf(4, 0) __printf(6, 7);
+
+void vdo_log_backtrace(int priority);
+
+/* All log functions will preserve the caller's value of errno. */
+
+#define vdo_log_strerror(priority, errnum, ...) \
+ __vdo_log_strerror(priority, errnum, VDO_LOGGING_MODULE_NAME, __VA_ARGS__)
+
+int __vdo_log_strerror(int priority, int errnum, const char *module,
+ const char *format, ...)
+ __printf(4, 5);
+
+int vdo_vlog_strerror(int priority, int errnum, const char *module, const char *format,
+ va_list args)
+ __printf(4, 0);
+
+/* Log an error prefixed with the string associated with the errnum. */
+#define vdo_log_error_strerror(errnum, ...) \
+ vdo_log_strerror(VDO_LOG_ERR, errnum, __VA_ARGS__)
+
+#define vdo_log_debug_strerror(errnum, ...) \
+ vdo_log_strerror(VDO_LOG_DEBUG, errnum, __VA_ARGS__)
+
+#define vdo_log_info_strerror(errnum, ...) \
+ vdo_log_strerror(VDO_LOG_INFO, errnum, __VA_ARGS__)
+
+#define vdo_log_warning_strerror(errnum, ...) \
+ vdo_log_strerror(VDO_LOG_WARNING, errnum, __VA_ARGS__)
+
+#define vdo_log_fatal_strerror(errnum, ...) \
+ vdo_log_strerror(VDO_LOG_CRIT, errnum, __VA_ARGS__)
+
+#define vdo_log_message(priority, ...) \
+ __vdo_log_message(priority, VDO_LOGGING_MODULE_NAME, __VA_ARGS__)
+
+void __vdo_log_message(int priority, const char *module, const char *format, ...)
+ __printf(3, 4);
+
+#define vdo_log_debug(...) vdo_log_message(VDO_LOG_DEBUG, __VA_ARGS__)
+
+#define vdo_log_info(...) vdo_log_message(VDO_LOG_INFO, __VA_ARGS__)
+
+#define vdo_log_warning(...) vdo_log_message(VDO_LOG_WARNING, __VA_ARGS__)
+
+#define vdo_log_error(...) vdo_log_message(VDO_LOG_ERR, __VA_ARGS__)
+
+#define vdo_log_fatal(...) vdo_log_message(VDO_LOG_CRIT, __VA_ARGS__)
+
+void vdo_pause_for_logger(void);
+#endif /* VDO_LOGGER_H */
diff --git a/drivers/md/dm-vdo/logical-zone.c b/drivers/md/dm-vdo/logical-zone.c
new file mode 100644
index 000000000000..026f031ffc9e
--- /dev/null
+++ b/drivers/md/dm-vdo/logical-zone.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "logical-zone.h"
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+#include "action-manager.h"
+#include "admin-state.h"
+#include "block-map.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "flush.h"
+#include "int-map.h"
+#include "physical-zone.h"
+#include "vdo.h"
+
+#define ALLOCATIONS_PER_ZONE 128
+
+/**
+ * as_logical_zone() - Convert a generic vdo_completion to a logical_zone.
+ * @completion: The completion to convert.
+ *
+ * Return: The completion as a logical_zone.
+ */
+static struct logical_zone *as_logical_zone(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_GENERATION_FLUSHED_COMPLETION);
+ return container_of(completion, struct logical_zone, completion);
+}
+
+/* get_thread_id_for_zone() - Implements vdo_zone_thread_getter_fn. */
+static thread_id_t get_thread_id_for_zone(void *context, zone_count_t zone_number)
+{
+ struct logical_zones *zones = context;
+
+ return zones->zones[zone_number].thread_id;
+}
+
+/**
+ * initialize_zone() - Initialize a logical zone.
+ * @zones: The logical_zones to which this zone belongs.
+ * @zone_number: The logical_zone's index.
+ */
+static int initialize_zone(struct logical_zones *zones, zone_count_t zone_number)
+{
+ int result;
+ struct vdo *vdo = zones->vdo;
+ struct logical_zone *zone = &zones->zones[zone_number];
+ zone_count_t allocation_zone_number;
+
+ result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->lbn_operations);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (zone_number < vdo->thread_config.logical_zone_count - 1)
+ zone->next = &zones->zones[zone_number + 1];
+
+ vdo_initialize_completion(&zone->completion, vdo,
+ VDO_GENERATION_FLUSHED_COMPLETION);
+ zone->zones = zones;
+ zone->zone_number = zone_number;
+ zone->thread_id = vdo->thread_config.logical_threads[zone_number];
+ zone->block_map_zone = &vdo->block_map->zones[zone_number];
+ INIT_LIST_HEAD(&zone->write_vios);
+ vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+
+ allocation_zone_number = zone->thread_id % vdo->thread_config.physical_zone_count;
+ zone->allocation_zone = &vdo->physical_zones->zones[allocation_zone_number];
+
+ return vdo_make_default_thread(vdo, zone->thread_id);
+}
+
+/**
+ * vdo_make_logical_zones() - Create a set of logical zones.
+ * @vdo: The vdo to which the zones will belong.
+ * @zones_ptr: A pointer to hold the new zones.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_make_logical_zones(struct vdo *vdo, struct logical_zones **zones_ptr)
+{
+ struct logical_zones *zones;
+ int result;
+ zone_count_t zone;
+ zone_count_t zone_count = vdo->thread_config.logical_zone_count;
+
+ if (zone_count == 0)
+ return VDO_SUCCESS;
+
+ result = vdo_allocate_extended(struct logical_zones, zone_count,
+ struct logical_zone, __func__, &zones);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ zones->vdo = vdo;
+ zones->zone_count = zone_count;
+ for (zone = 0; zone < zone_count; zone++) {
+ result = initialize_zone(zones, zone);
+ if (result != VDO_SUCCESS) {
+ vdo_free_logical_zones(zones);
+ return result;
+ }
+ }
+
+ result = vdo_make_action_manager(zones->zone_count, get_thread_id_for_zone,
+ vdo->thread_config.admin_thread, zones, NULL,
+ vdo, &zones->manager);
+ if (result != VDO_SUCCESS) {
+ vdo_free_logical_zones(zones);
+ return result;
+ }
+
+ *zones_ptr = zones;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_free_logical_zones() - Free a set of logical zones.
+ * @zones: The set of zones to free.
+ */
+void vdo_free_logical_zones(struct logical_zones *zones)
+{
+ zone_count_t index;
+
+ if (zones == NULL)
+ return;
+
+ vdo_free(vdo_forget(zones->manager));
+
+ for (index = 0; index < zones->zone_count; index++)
+ vdo_int_map_free(vdo_forget(zones->zones[index].lbn_operations));
+
+ vdo_free(zones);
+}
+
+static inline void assert_on_zone_thread(struct logical_zone *zone, const char *what)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
+ "%s() called on correct thread", what);
+}
+
+/**
+ * check_for_drain_complete() - Check whether this zone has drained.
+ * @zone: The zone to check.
+ */
+static void check_for_drain_complete(struct logical_zone *zone)
+{
+ if (!vdo_is_state_draining(&zone->state) || zone->notifying ||
+ !list_empty(&zone->write_vios))
+ return;
+
+ vdo_finish_draining(&zone->state);
+}
+
+/**
+ * initiate_drain() - Initiate a drain.
+ *
+ * Implements vdo_admin_initiator_fn.
+ */
+static void initiate_drain(struct admin_state *state)
+{
+ check_for_drain_complete(container_of(state, struct logical_zone, state));
+}
+
+/**
+ * drain_logical_zone() - Drain a logical zone.
+ *
+ * Implements vdo_zone_action_fn.
+ */
+static void drain_logical_zone(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct logical_zones *zones = context;
+
+ vdo_start_draining(&zones->zones[zone_number].state,
+ vdo_get_current_manager_operation(zones->manager), parent,
+ initiate_drain);
+}
+
+void vdo_drain_logical_zones(struct logical_zones *zones,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent)
+{
+ vdo_schedule_operation(zones->manager, operation, NULL, drain_logical_zone, NULL,
+ parent);
+}
+
+/**
+ * resume_logical_zone() - Resume a logical zone.
+ *
+ * Implements vdo_zone_action_fn.
+ */
+static void resume_logical_zone(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct logical_zone *zone = &(((struct logical_zones *) context)->zones[zone_number]);
+
+ vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state));
+}
+
+/**
+ * vdo_resume_logical_zones() - Resume a set of logical zones.
+ * @zones: The logical zones to resume.
+ * @parent: The object to notify when the zones have resumed.
+ */
+void vdo_resume_logical_zones(struct logical_zones *zones, struct vdo_completion *parent)
+{
+ vdo_schedule_operation(zones->manager, VDO_ADMIN_STATE_RESUMING, NULL,
+ resume_logical_zone, NULL, parent);
+}
+
+/**
+ * update_oldest_active_generation() - Update the oldest active generation.
+ * @zone: The zone.
+ *
+ * Return: true if the oldest active generation has changed.
+ */
+static bool update_oldest_active_generation(struct logical_zone *zone)
+{
+ struct data_vio *data_vio =
+ list_first_entry_or_null(&zone->write_vios, struct data_vio,
+ write_entry);
+ sequence_number_t oldest =
+ (data_vio == NULL) ? zone->flush_generation : data_vio->flush_generation;
+
+ if (oldest == zone->oldest_active_generation)
+ return false;
+
+ WRITE_ONCE(zone->oldest_active_generation, oldest);
+ return true;
+}
+
+/**
+ * vdo_increment_logical_zone_flush_generation() - Increment the flush generation in a logical
+ * zone.
+ * @zone: The logical zone.
+ * @expected_generation: The expected value of the flush generation before the increment.
+ */
+void vdo_increment_logical_zone_flush_generation(struct logical_zone *zone,
+ sequence_number_t expected_generation)
+{
+ assert_on_zone_thread(zone, __func__);
+ VDO_ASSERT_LOG_ONLY((zone->flush_generation == expected_generation),
+ "logical zone %u flush generation %llu should be %llu before increment",
+ zone->zone_number, (unsigned long long) zone->flush_generation,
+ (unsigned long long) expected_generation);
+
+ zone->flush_generation++;
+ zone->ios_in_flush_generation = 0;
+ update_oldest_active_generation(zone);
+}
+
+/**
+ * vdo_acquire_flush_generation_lock() - Acquire the shared lock on a flush generation by a write
+ * data_vio.
+ * @data_vio: The data_vio.
+ */
+void vdo_acquire_flush_generation_lock(struct data_vio *data_vio)
+{
+ struct logical_zone *zone = data_vio->logical.zone;
+
+ assert_on_zone_thread(zone, __func__);
+ VDO_ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal");
+
+ data_vio->flush_generation = zone->flush_generation;
+ list_add_tail(&data_vio->write_entry, &zone->write_vios);
+ zone->ios_in_flush_generation++;
+}
+
+static void attempt_generation_complete_notification(struct vdo_completion *completion);
+
+/**
+ * notify_flusher() - Notify the flush that at least one generation no longer has active VIOs.
+ * @completion: The zone completion.
+ *
+ * This callback is registered in attempt_generation_complete_notification().
+ */
+static void notify_flusher(struct vdo_completion *completion)
+{
+ struct logical_zone *zone = as_logical_zone(completion);
+
+ vdo_complete_flushes(zone->zones->vdo->flusher);
+ vdo_launch_completion_callback(completion,
+ attempt_generation_complete_notification,
+ zone->thread_id);
+}
+
+/**
+ * attempt_generation_complete_notification() - Notify the flusher if some generation no
+ * longer has active VIOs.
+ * @completion: The zone completion.
+ */
+static void attempt_generation_complete_notification(struct vdo_completion *completion)
+{
+ struct logical_zone *zone = as_logical_zone(completion);
+
+ assert_on_zone_thread(zone, __func__);
+ if (zone->oldest_active_generation <= zone->notification_generation) {
+ zone->notifying = false;
+ check_for_drain_complete(zone);
+ return;
+ }
+
+ zone->notifying = true;
+ zone->notification_generation = zone->oldest_active_generation;
+ vdo_launch_completion_callback(&zone->completion, notify_flusher,
+ vdo_get_flusher_thread_id(zone->zones->vdo->flusher));
+}
+
+/**
+ * vdo_release_flush_generation_lock() - Release the shared lock on a flush generation held by a
+ * write data_vio.
+ * @data_vio: The data_vio whose lock is to be released.
+ *
+ * If there are pending flushes, and this data_vio completes the oldest generation active in this
+ * zone, an attempt will be made to finish any flushes which may now be complete.
+ */
+void vdo_release_flush_generation_lock(struct data_vio *data_vio)
+{
+ struct logical_zone *zone = data_vio->logical.zone;
+
+ assert_on_zone_thread(zone, __func__);
+
+ if (!data_vio_has_flush_generation_lock(data_vio))
+ return;
+
+ list_del_init(&data_vio->write_entry);
+ VDO_ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation),
+ "data_vio releasing lock on generation %llu is not older than oldest active generation %llu",
+ (unsigned long long) data_vio->flush_generation,
+ (unsigned long long) zone->oldest_active_generation);
+
+ if (!update_oldest_active_generation(zone) || zone->notifying)
+ return;
+
+ attempt_generation_complete_notification(&zone->completion);
+}
+
+struct physical_zone *vdo_get_next_allocation_zone(struct logical_zone *zone)
+{
+ if (zone->allocation_count == ALLOCATIONS_PER_ZONE) {
+ zone->allocation_count = 0;
+ zone->allocation_zone = zone->allocation_zone->next;
+ }
+
+ zone->allocation_count++;
+ return zone->allocation_zone;
+}
+
+/**
+ * vdo_dump_logical_zone() - Dump information about a logical zone to the log for debugging.
+ * @zone: The zone to dump
+ *
+ * Context: the information is dumped in a thread-unsafe fashion.
+ *
+ */
+void vdo_dump_logical_zone(const struct logical_zone *zone)
+{
+ vdo_log_info("logical_zone %u", zone->zone_number);
+ vdo_log_info(" flush_generation=%llu oldest_active_generation=%llu notification_generation=%llu notifying=%s ios_in_flush_generation=%llu",
+ (unsigned long long) READ_ONCE(zone->flush_generation),
+ (unsigned long long) READ_ONCE(zone->oldest_active_generation),
+ (unsigned long long) READ_ONCE(zone->notification_generation),
+ vdo_bool_to_string(READ_ONCE(zone->notifying)),
+ (unsigned long long) READ_ONCE(zone->ios_in_flush_generation));
+}
diff --git a/drivers/md/dm-vdo/logical-zone.h b/drivers/md/dm-vdo/logical-zone.h
new file mode 100644
index 000000000000..1b666c84a193
--- /dev/null
+++ b/drivers/md/dm-vdo/logical-zone.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_LOGICAL_ZONE_H
+#define VDO_LOGICAL_ZONE_H
+
+#include <linux/list.h>
+
+#include "admin-state.h"
+#include "int-map.h"
+#include "types.h"
+
+struct physical_zone;
+
+struct logical_zone {
+ /* The completion for flush notifications */
+ struct vdo_completion completion;
+ /* The owner of this zone */
+ struct logical_zones *zones;
+ /* Which logical zone this is */
+ zone_count_t zone_number;
+ /* The thread id for this zone */
+ thread_id_t thread_id;
+ /* In progress operations keyed by LBN */
+ struct int_map *lbn_operations;
+ /* The logical to physical map */
+ struct block_map_zone *block_map_zone;
+ /* The current flush generation */
+ sequence_number_t flush_generation;
+ /*
+ * The oldest active generation in this zone. This is mutated only on the logical zone
+ * thread but is queried from the flusher thread.
+ */
+ sequence_number_t oldest_active_generation;
+ /* The number of IOs in the current flush generation */
+ block_count_t ios_in_flush_generation;
+ /* The youngest generation of the current notification */
+ sequence_number_t notification_generation;
+ /* Whether a notification is in progress */
+ bool notifying;
+ /* The queue of active data write VIOs */
+ struct list_head write_vios;
+ /* The administrative state of the zone */
+ struct admin_state state;
+ /* The physical zone from which to allocate */
+ struct physical_zone *allocation_zone;
+ /* The number of allocations done from the current allocation_zone */
+ block_count_t allocation_count;
+ /* The next zone */
+ struct logical_zone *next;
+};
+
+struct logical_zones {
+ /* The vdo whose zones these are */
+ struct vdo *vdo;
+ /* The manager for administrative actions */
+ struct action_manager *manager;
+ /* The number of zones */
+ zone_count_t zone_count;
+ /* The logical zones themselves */
+ struct logical_zone zones[];
+};
+
+int __must_check vdo_make_logical_zones(struct vdo *vdo,
+ struct logical_zones **zones_ptr);
+
+void vdo_free_logical_zones(struct logical_zones *zones);
+
+void vdo_drain_logical_zones(struct logical_zones *zones,
+ const struct admin_state_code *operation,
+ struct vdo_completion *completion);
+
+void vdo_resume_logical_zones(struct logical_zones *zones,
+ struct vdo_completion *parent);
+
+void vdo_increment_logical_zone_flush_generation(struct logical_zone *zone,
+ sequence_number_t expected_generation);
+
+void vdo_acquire_flush_generation_lock(struct data_vio *data_vio);
+
+void vdo_release_flush_generation_lock(struct data_vio *data_vio);
+
+struct physical_zone * __must_check vdo_get_next_allocation_zone(struct logical_zone *zone);
+
+void vdo_dump_logical_zone(const struct logical_zone *zone);
+
+#endif /* VDO_LOGICAL_ZONE_H */
diff --git a/drivers/md/dm-vdo/memory-alloc.c b/drivers/md/dm-vdo/memory-alloc.c
new file mode 100644
index 000000000000..185f259c7245
--- /dev/null
+++ b/drivers/md/dm-vdo/memory-alloc.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+/*
+ * UDS and VDO keep track of which threads are allowed to allocate memory freely, and which threads
+ * must be careful to not do a memory allocation that does an I/O request. The 'allocating_threads'
+ * thread_registry and its associated methods implement this tracking.
+ */
+static struct thread_registry allocating_threads;
+
+static inline bool allocations_allowed(void)
+{
+ return vdo_lookup_thread(&allocating_threads) != NULL;
+}
+
+/*
+ * Register the current thread as an allocating thread.
+ *
+ * An optional flag location can be supplied indicating whether, at any given point in time, the
+ * threads associated with that flag should be allocating storage. If the flag is false, a message
+ * will be logged.
+ *
+ * If no flag is supplied, the thread is always allowed to allocate storage without complaint.
+ *
+ * @new_thread: registered_thread structure to use for the current thread
+ * @flag_ptr: Location of the allocation-allowed flag
+ */
+void vdo_register_allocating_thread(struct registered_thread *new_thread,
+ const bool *flag_ptr)
+{
+ if (flag_ptr == NULL) {
+ static const bool allocation_always_allowed = true;
+
+ flag_ptr = &allocation_always_allowed;
+ }
+
+ vdo_register_thread(&allocating_threads, new_thread, flag_ptr);
+}
+
+/* Unregister the current thread as an allocating thread. */
+void vdo_unregister_allocating_thread(void)
+{
+ vdo_unregister_thread(&allocating_threads);
+}
+
+/*
+ * We track how much memory has been allocated and freed. When we unload the module, we log an
+ * error if we have not freed all the memory that we allocated. Nearly all memory allocation and
+ * freeing is done using this module.
+ *
+ * We do not use kernel functions like the kvasprintf() method, which allocate memory indirectly
+ * using kmalloc.
+ *
+ * These data structures and methods are used to track the amount of memory used.
+ */
+
+/*
+ * We allocate very few large objects, and allocation/deallocation isn't done in a
+ * performance-critical stage for us, so a linked list should be fine.
+ */
+struct vmalloc_block_info {
+ void *ptr;
+ size_t size;
+ struct vmalloc_block_info *next;
+};
+
+static struct {
+ spinlock_t lock;
+ size_t kmalloc_blocks;
+ size_t kmalloc_bytes;
+ size_t vmalloc_blocks;
+ size_t vmalloc_bytes;
+ size_t peak_bytes;
+ struct vmalloc_block_info *vmalloc_list;
+} memory_stats __cacheline_aligned;
+
+static void update_peak_usage(void)
+{
+ size_t total_bytes = memory_stats.kmalloc_bytes + memory_stats.vmalloc_bytes;
+
+ if (total_bytes > memory_stats.peak_bytes)
+ memory_stats.peak_bytes = total_bytes;
+}
+
+static void add_kmalloc_block(size_t size)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&memory_stats.lock, flags);
+ memory_stats.kmalloc_blocks++;
+ memory_stats.kmalloc_bytes += size;
+ update_peak_usage();
+ spin_unlock_irqrestore(&memory_stats.lock, flags);
+}
+
+static void remove_kmalloc_block(size_t size)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&memory_stats.lock, flags);
+ memory_stats.kmalloc_blocks--;
+ memory_stats.kmalloc_bytes -= size;
+ spin_unlock_irqrestore(&memory_stats.lock, flags);
+}
+
+static void add_vmalloc_block(struct vmalloc_block_info *block)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&memory_stats.lock, flags);
+ block->next = memory_stats.vmalloc_list;
+ memory_stats.vmalloc_list = block;
+ memory_stats.vmalloc_blocks++;
+ memory_stats.vmalloc_bytes += block->size;
+ update_peak_usage();
+ spin_unlock_irqrestore(&memory_stats.lock, flags);
+}
+
+static void remove_vmalloc_block(void *ptr)
+{
+ struct vmalloc_block_info *block;
+ struct vmalloc_block_info **block_ptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&memory_stats.lock, flags);
+ for (block_ptr = &memory_stats.vmalloc_list;
+ (block = *block_ptr) != NULL;
+ block_ptr = &block->next) {
+ if (block->ptr == ptr) {
+ *block_ptr = block->next;
+ memory_stats.vmalloc_blocks--;
+ memory_stats.vmalloc_bytes -= block->size;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&memory_stats.lock, flags);
+ if (block != NULL)
+ vdo_free(block);
+ else
+ vdo_log_info("attempting to remove ptr %px not found in vmalloc list", ptr);
+}
+
+/*
+ * Determine whether allocating a memory block should use kmalloc or __vmalloc.
+ *
+ * vmalloc can allocate any integral number of pages.
+ *
+ * kmalloc can allocate any number of bytes up to a configured limit, which defaults to 8 megabytes
+ * on some systems. kmalloc is especially good when memory is being both allocated and freed, and
+ * it does this efficiently in a multi CPU environment.
+ *
+ * kmalloc usually rounds the size of the block up to the next power of two, so when the requested
+ * block is bigger than PAGE_SIZE / 2 bytes, kmalloc will never give you less space than the
+ * corresponding vmalloc allocation. Sometimes vmalloc will use less overhead than kmalloc.
+ *
+ * The advantages of kmalloc do not help out UDS or VDO, because we allocate all our memory up
+ * front and do not free and reallocate it. Sometimes we have problems using kmalloc, because the
+ * Linux memory page map can become so fragmented that kmalloc will not give us a 32KB chunk. We
+ * have used vmalloc as a backup to kmalloc in the past, and a follow-up vmalloc of 32KB will work.
+ * But there is no strong case to be made for using kmalloc over vmalloc for these size chunks.
+ *
+ * The kmalloc/vmalloc boundary is set at 4KB, and kmalloc gets the 4KB requests. There is no
+ * strong reason for favoring either kmalloc or vmalloc for 4KB requests, except that tracking
+ * vmalloc statistics uses a linked list implementation. Using a simple test, this choice of
+ * boundary results in 132 vmalloc calls. Using vmalloc for requests of exactly 4KB results in an
+ * additional 6374 vmalloc calls, which is much less efficient for tracking.
+ *
+ * @size: How many bytes to allocate
+ */
+static inline bool use_kmalloc(size_t size)
+{
+ return size <= PAGE_SIZE;
+}
+
+/*
+ * Allocate storage based on memory size and alignment, logging an error if the allocation fails.
+ * The memory will be zeroed.
+ *
+ * @size: The size of an object
+ * @align: The required alignment
+ * @what: What is being allocated (for error logging)
+ * @ptr: A pointer to hold the allocated memory
+ *
+ * Return: VDO_SUCCESS or an error code
+ */
+int vdo_allocate_memory(size_t size, size_t align, const char *what, void *ptr)
+{
+ /*
+ * The __GFP_RETRY_MAYFAIL flag means the VM implementation will retry memory reclaim
+ * procedures that have previously failed if there is some indication that progress has
+ * been made elsewhere. It can wait for other tasks to attempt high level approaches to
+ * freeing memory such as compaction (which removes fragmentation) and page-out. There is
+ * still a definite limit to the number of retries, but it is a larger limit than with
+ * __GFP_NORETRY. Allocations with this flag may fail, but only when there is genuinely
+ * little unused memory. While these allocations do not directly trigger the OOM killer,
+ * their failure indicates that the system is likely to need to use the OOM killer soon.
+ * The caller must handle failure, but can reasonably do so by failing a higher-level
+ * request, or completing it only in a much less efficient manner.
+ */
+ const gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL;
+ unsigned int noio_flags;
+ bool allocations_restricted = !allocations_allowed();
+ unsigned long start_time;
+ void *p = NULL;
+
+ if (unlikely(ptr == NULL))
+ return -EINVAL;
+
+ if (size == 0) {
+ *((void **) ptr) = NULL;
+ return VDO_SUCCESS;
+ }
+
+ if (allocations_restricted)
+ noio_flags = memalloc_noio_save();
+
+ start_time = jiffies;
+ if (use_kmalloc(size) && (align < PAGE_SIZE)) {
+ p = kmalloc(size, gfp_flags | __GFP_NOWARN);
+ if (p == NULL) {
+ /*
+ * It is possible for kmalloc to fail to allocate memory because there is
+ * no page available. A short sleep may allow the page reclaimer to
+ * free a page.
+ */
+ fsleep(1000);
+ p = kmalloc(size, gfp_flags);
+ }
+
+ if (p != NULL)
+ add_kmalloc_block(ksize(p));
+ } else {
+ struct vmalloc_block_info *block;
+
+ if (vdo_allocate(1, struct vmalloc_block_info, __func__, &block) == VDO_SUCCESS) {
+ /*
+ * It is possible for __vmalloc to fail to allocate memory because there
+ * are no pages available. A short sleep may allow the page reclaimer
+ * to free enough pages for a small allocation.
+ *
+ * For larger allocations, the page_alloc code is racing against the page
+ * reclaimer. If the page reclaimer can stay ahead of page_alloc, the
+ * __vmalloc will succeed. But if page_alloc overtakes the page reclaimer,
+ * the allocation fails. It is possible that more retries will succeed.
+ */
+ for (;;) {
+ p = __vmalloc(size, gfp_flags | __GFP_NOWARN);
+ if (p != NULL)
+ break;
+
+ if (jiffies_to_msecs(jiffies - start_time) > 1000) {
+ /* Try one more time, logging a failure for this call. */
+ p = __vmalloc(size, gfp_flags);
+ break;
+ }
+
+ fsleep(1000);
+ }
+
+ if (p == NULL) {
+ vdo_free(block);
+ } else {
+ block->ptr = p;
+ block->size = PAGE_ALIGN(size);
+ add_vmalloc_block(block);
+ }
+ }
+ }
+
+ if (allocations_restricted)
+ memalloc_noio_restore(noio_flags);
+
+ if (unlikely(p == NULL)) {
+ vdo_log_error("Could not allocate %zu bytes for %s in %u msecs",
+ size, what, jiffies_to_msecs(jiffies - start_time));
+ return -ENOMEM;
+ }
+
+ *((void **) ptr) = p;
+ return VDO_SUCCESS;
+}
+
+/*
+ * Allocate storage based on memory size, failing immediately if the required memory is not
+ * available. The memory will be zeroed.
+ *
+ * @size: The size of an object.
+ * @what: What is being allocated (for error logging)
+ *
+ * Return: pointer to the allocated memory, or NULL if the required space is not available.
+ */
+void *vdo_allocate_memory_nowait(size_t size, const char *what __maybe_unused)
+{
+ void *p = kmalloc(size, GFP_NOWAIT | __GFP_ZERO);
+
+ if (p != NULL)
+ add_kmalloc_block(ksize(p));
+
+ return p;
+}
+
+void vdo_free(void *ptr)
+{
+ if (ptr != NULL) {
+ if (is_vmalloc_addr(ptr)) {
+ remove_vmalloc_block(ptr);
+ vfree(ptr);
+ } else {
+ remove_kmalloc_block(ksize(ptr));
+ kfree(ptr);
+ }
+ }
+}
+
+/*
+ * Reallocate dynamically allocated memory. There are no alignment guarantees for the reallocated
+ * memory. If the new memory is larger than the old memory, the new space will be zeroed.
+ *
+ * @ptr: The memory to reallocate.
+ * @old_size: The old size of the memory
+ * @size: The new size to allocate
+ * @what: What is being allocated (for error logging)
+ * @new_ptr: A pointer to hold the reallocated pointer
+ *
+ * Return: VDO_SUCCESS or an error code
+ */
+int vdo_reallocate_memory(void *ptr, size_t old_size, size_t size, const char *what,
+ void *new_ptr)
+{
+ int result;
+
+ if (size == 0) {
+ vdo_free(ptr);
+ *(void **) new_ptr = NULL;
+ return VDO_SUCCESS;
+ }
+
+ result = vdo_allocate(size, char, what, new_ptr);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (ptr != NULL) {
+ if (old_size < size)
+ size = old_size;
+
+ memcpy(*((void **) new_ptr), ptr, size);
+ vdo_free(ptr);
+ }
+
+ return VDO_SUCCESS;
+}
+
+int vdo_duplicate_string(const char *string, const char *what, char **new_string)
+{
+ int result;
+ u8 *dup;
+
+ result = vdo_allocate(strlen(string) + 1, u8, what, &dup);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ memcpy(dup, string, strlen(string) + 1);
+ *new_string = dup;
+ return VDO_SUCCESS;
+}
+
+void vdo_memory_init(void)
+{
+ spin_lock_init(&memory_stats.lock);
+ vdo_initialize_thread_registry(&allocating_threads);
+}
+
+void vdo_memory_exit(void)
+{
+ VDO_ASSERT_LOG_ONLY(memory_stats.kmalloc_bytes == 0,
+ "kmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel",
+ memory_stats.kmalloc_bytes, memory_stats.kmalloc_blocks);
+ VDO_ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0,
+ "vmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel",
+ memory_stats.vmalloc_bytes, memory_stats.vmalloc_blocks);
+ vdo_log_debug("peak usage %zd bytes", memory_stats.peak_bytes);
+}
+
+void vdo_get_memory_stats(u64 *bytes_used, u64 *peak_bytes_used)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&memory_stats.lock, flags);
+ *bytes_used = memory_stats.kmalloc_bytes + memory_stats.vmalloc_bytes;
+ *peak_bytes_used = memory_stats.peak_bytes;
+ spin_unlock_irqrestore(&memory_stats.lock, flags);
+}
+
+/*
+ * Report stats on any allocated memory that we're tracking. Not all allocation types are
+ * guaranteed to be tracked in bytes (e.g., bios).
+ */
+void vdo_report_memory_usage(void)
+{
+ unsigned long flags;
+ u64 kmalloc_blocks;
+ u64 kmalloc_bytes;
+ u64 vmalloc_blocks;
+ u64 vmalloc_bytes;
+ u64 peak_usage;
+ u64 total_bytes;
+
+ spin_lock_irqsave(&memory_stats.lock, flags);
+ kmalloc_blocks = memory_stats.kmalloc_blocks;
+ kmalloc_bytes = memory_stats.kmalloc_bytes;
+ vmalloc_blocks = memory_stats.vmalloc_blocks;
+ vmalloc_bytes = memory_stats.vmalloc_bytes;
+ peak_usage = memory_stats.peak_bytes;
+ spin_unlock_irqrestore(&memory_stats.lock, flags);
+ total_bytes = kmalloc_bytes + vmalloc_bytes;
+ vdo_log_info("current module memory tracking (actual allocation sizes, not requested):");
+ vdo_log_info(" %llu bytes in %llu kmalloc blocks",
+ (unsigned long long) kmalloc_bytes,
+ (unsigned long long) kmalloc_blocks);
+ vdo_log_info(" %llu bytes in %llu vmalloc blocks",
+ (unsigned long long) vmalloc_bytes,
+ (unsigned long long) vmalloc_blocks);
+ vdo_log_info(" total %llu bytes, peak usage %llu bytes",
+ (unsigned long long) total_bytes, (unsigned long long) peak_usage);
+}
diff --git a/drivers/md/dm-vdo/memory-alloc.h b/drivers/md/dm-vdo/memory-alloc.h
new file mode 100644
index 000000000000..0093d9f940d9
--- /dev/null
+++ b/drivers/md/dm-vdo/memory-alloc.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_MEMORY_ALLOC_H
+#define VDO_MEMORY_ALLOC_H
+
+#include <linux/cache.h>
+#include <linux/io.h> /* for PAGE_SIZE */
+
+#include "permassert.h"
+#include "thread-registry.h"
+
+/* Custom memory allocation function that tracks memory usage */
+int __must_check vdo_allocate_memory(size_t size, size_t align, const char *what, void *ptr);
+
+/*
+ * Allocate storage based on element counts, sizes, and alignment.
+ *
+ * This is a generalized form of our allocation use case: It allocates an array of objects,
+ * optionally preceded by one object of another type (i.e., a struct with trailing variable-length
+ * array), with the alignment indicated.
+ *
+ * Why is this inline? The sizes and alignment will always be constant, when invoked through the
+ * macros below, and often the count will be a compile-time constant 1 or the number of extra bytes
+ * will be a compile-time constant 0. So at least some of the arithmetic can usually be optimized
+ * away, and the run-time selection between allocation functions always can. In many cases, it'll
+ * boil down to just a function call with a constant size.
+ *
+ * @count: The number of objects to allocate
+ * @size: The size of an object
+ * @extra: The number of additional bytes to allocate
+ * @align: The required alignment
+ * @what: What is being allocated (for error logging)
+ * @ptr: A pointer to hold the allocated memory
+ *
+ * Return: VDO_SUCCESS or an error code
+ */
+static inline int __vdo_do_allocation(size_t count, size_t size, size_t extra,
+ size_t align, const char *what, void *ptr)
+{
+ size_t total_size = count * size + extra;
+
+ /* Overflow check: */
+ if ((size > 0) && (count > ((SIZE_MAX - extra) / size))) {
+ /*
+ * This is kind of a hack: We rely on the fact that SIZE_MAX would cover the entire
+ * address space (minus one byte) and thus the system can never allocate that much
+ * and the call will always fail. So we can report an overflow as "out of memory"
+ * by asking for "merely" SIZE_MAX bytes.
+ */
+ total_size = SIZE_MAX;
+ }
+
+ return vdo_allocate_memory(total_size, align, what, ptr);
+}
+
+/*
+ * Allocate one or more elements of the indicated type, logging an error if the allocation fails.
+ * The memory will be zeroed.
+ *
+ * @COUNT: The number of objects to allocate
+ * @TYPE: The type of objects to allocate. This type determines the alignment of the allocation.
+ * @WHAT: What is being allocated (for error logging)
+ * @PTR: A pointer to hold the allocated memory
+ *
+ * Return: VDO_SUCCESS or an error code
+ */
+#define vdo_allocate(COUNT, TYPE, WHAT, PTR) \
+ __vdo_do_allocation(COUNT, sizeof(TYPE), 0, __alignof__(TYPE), WHAT, PTR)
+
+/*
+ * Allocate one object of an indicated type, followed by one or more elements of a second type,
+ * logging an error if the allocation fails. The memory will be zeroed.
+ *
+ * @TYPE1: The type of the primary object to allocate. This type determines the alignment of the
+ * allocated memory.
+ * @COUNT: The number of objects to allocate
+ * @TYPE2: The type of array objects to allocate
+ * @WHAT: What is being allocated (for error logging)
+ * @PTR: A pointer to hold the allocated memory
+ *
+ * Return: VDO_SUCCESS or an error code
+ */
+#define vdo_allocate_extended(TYPE1, COUNT, TYPE2, WHAT, PTR) \
+ __extension__({ \
+ int _result; \
+ TYPE1 **_ptr = (PTR); \
+ BUILD_BUG_ON(__alignof__(TYPE1) < __alignof__(TYPE2)); \
+ _result = __vdo_do_allocation(COUNT, \
+ sizeof(TYPE2), \
+ sizeof(TYPE1), \
+ __alignof__(TYPE1), \
+ WHAT, \
+ _ptr); \
+ _result; \
+ })
+
+/*
+ * Allocate memory starting on a cache line boundary, logging an error if the allocation fails. The
+ * memory will be zeroed.
+ *
+ * @size: The number of bytes to allocate
+ * @what: What is being allocated (for error logging)
+ * @ptr: A pointer to hold the allocated memory
+ *
+ * Return: VDO_SUCCESS or an error code
+ */
+static inline int __must_check vdo_allocate_cache_aligned(size_t size, const char *what, void *ptr)
+{
+ return vdo_allocate_memory(size, L1_CACHE_BYTES, what, ptr);
+}
+
+/*
+ * Allocate one element of the indicated type immediately, failing if the required memory is not
+ * immediately available.
+ *
+ * @size: The number of bytes to allocate
+ * @what: What is being allocated (for error logging)
+ *
+ * Return: pointer to the memory, or NULL if the memory is not available.
+ */
+void *__must_check vdo_allocate_memory_nowait(size_t size, const char *what);
+
+int __must_check vdo_reallocate_memory(void *ptr, size_t old_size, size_t size,
+ const char *what, void *new_ptr);
+
+int __must_check vdo_duplicate_string(const char *string, const char *what,
+ char **new_string);
+
+/* Free memory allocated with vdo_allocate(). */
+void vdo_free(void *ptr);
+
+static inline void *__vdo_forget(void **ptr_ptr)
+{
+ void *ptr = *ptr_ptr;
+
+ *ptr_ptr = NULL;
+ return ptr;
+}
+
+/*
+ * Null out a pointer and return a copy to it. This macro should be used when passing a pointer to
+ * a function for which it is not safe to access the pointer once the function returns.
+ */
+#define vdo_forget(ptr) __vdo_forget((void **) &(ptr))
+
+void vdo_memory_init(void);
+
+void vdo_memory_exit(void);
+
+void vdo_register_allocating_thread(struct registered_thread *new_thread,
+ const bool *flag_ptr);
+
+void vdo_unregister_allocating_thread(void);
+
+void vdo_get_memory_stats(u64 *bytes_used, u64 *peak_bytes_used);
+
+void vdo_report_memory_usage(void);
+
+#endif /* VDO_MEMORY_ALLOC_H */
diff --git a/drivers/md/dm-vdo/message-stats.c b/drivers/md/dm-vdo/message-stats.c
new file mode 100644
index 000000000000..2802cf92922b
--- /dev/null
+++ b/drivers/md/dm-vdo/message-stats.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "dedupe.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "message-stats.h"
+#include "statistics.h"
+#include "thread-device.h"
+#include "vdo.h"
+
+static void write_u64(char *prefix, u64 value, char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ int count;
+
+ count = scnprintf(*buf, *maxlen, "%s%llu%s", prefix == NULL ? "" : prefix,
+ value, suffix == NULL ? "" : suffix);
+ *buf += count;
+ *maxlen -= count;
+}
+
+static void write_u32(char *prefix, u32 value, char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ int count;
+
+ count = scnprintf(*buf, *maxlen, "%s%u%s", prefix == NULL ? "" : prefix,
+ value, suffix == NULL ? "" : suffix);
+ *buf += count;
+ *maxlen -= count;
+}
+
+static void write_block_count_t(char *prefix, block_count_t value, char *suffix,
+ char **buf, unsigned int *maxlen)
+{
+ int count;
+
+ count = scnprintf(*buf, *maxlen, "%s%llu%s", prefix == NULL ? "" : prefix,
+ value, suffix == NULL ? "" : suffix);
+ *buf += count;
+ *maxlen -= count;
+}
+
+static void write_string(char *prefix, char *value, char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ int count;
+
+ count = scnprintf(*buf, *maxlen, "%s%s%s", prefix == NULL ? "" : prefix,
+ value, suffix == NULL ? "" : suffix);
+ *buf += count;
+ *maxlen -= count;
+}
+
+static void write_bool(char *prefix, bool value, char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ int count;
+
+ count = scnprintf(*buf, *maxlen, "%s%d%s", prefix == NULL ? "" : prefix,
+ value, suffix == NULL ? "" : suffix);
+ *buf += count;
+ *maxlen -= count;
+}
+
+static void write_u8(char *prefix, u8 value, char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ int count;
+
+ count = scnprintf(*buf, *maxlen, "%s%u%s", prefix == NULL ? "" : prefix,
+ value, suffix == NULL ? "" : suffix);
+ *buf += count;
+ *maxlen -= count;
+}
+
+static void write_block_allocator_statistics(char *prefix,
+ struct block_allocator_statistics *stats,
+ char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* The total number of slabs from which blocks may be allocated */
+ write_u64("slabCount : ", stats->slab_count, ", ", buf, maxlen);
+ /* The total number of slabs from which blocks have ever been allocated */
+ write_u64("slabsOpened : ", stats->slabs_opened, ", ", buf, maxlen);
+ /* The number of times since loading that a slab has been re-opened */
+ write_u64("slabsReopened : ", stats->slabs_reopened, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_commit_statistics(char *prefix, struct commit_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* The total number of items on which processing has started */
+ write_u64("started : ", stats->started, ", ", buf, maxlen);
+ /* The total number of items for which a write operation has been issued */
+ write_u64("written : ", stats->written, ", ", buf, maxlen);
+ /* The total number of items for which a write operation has completed */
+ write_u64("committed : ", stats->committed, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_recovery_journal_statistics(char *prefix,
+ struct recovery_journal_statistics *stats,
+ char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of times the on-disk journal was full */
+ write_u64("diskFull : ", stats->disk_full, ", ", buf, maxlen);
+ /* Number of times the recovery journal requested slab journal commits. */
+ write_u64("slabJournalCommitsRequested : ",
+ stats->slab_journal_commits_requested, ", ", buf, maxlen);
+ /* Write/Commit totals for individual journal entries */
+ write_commit_statistics("entries : ", &stats->entries, ", ", buf, maxlen);
+ /* Write/Commit totals for journal blocks */
+ write_commit_statistics("blocks : ", &stats->blocks, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_packer_statistics(char *prefix, struct packer_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of compressed data items written since startup */
+ write_u64("compressedFragmentsWritten : ",
+ stats->compressed_fragments_written, ", ", buf, maxlen);
+ /* Number of blocks containing compressed items written since startup */
+ write_u64("compressedBlocksWritten : ",
+ stats->compressed_blocks_written, ", ", buf, maxlen);
+ /* Number of VIOs that are pending in the packer */
+ write_u64("compressedFragmentsInPacker : ",
+ stats->compressed_fragments_in_packer, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_slab_journal_statistics(char *prefix,
+ struct slab_journal_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of times the on-disk journal was full */
+ write_u64("diskFullCount : ", stats->disk_full_count, ", ", buf, maxlen);
+ /* Number of times an entry was added over the flush threshold */
+ write_u64("flushCount : ", stats->flush_count, ", ", buf, maxlen);
+ /* Number of times an entry was added over the block threshold */
+ write_u64("blockedCount : ", stats->blocked_count, ", ", buf, maxlen);
+ /* Number of times a tail block was written */
+ write_u64("blocksWritten : ", stats->blocks_written, ", ", buf, maxlen);
+ /* Number of times we had to wait for the tail to write */
+ write_u64("tailBusyCount : ", stats->tail_busy_count, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_slab_summary_statistics(char *prefix,
+ struct slab_summary_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of blocks written */
+ write_u64("blocksWritten : ", stats->blocks_written, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_ref_counts_statistics(char *prefix, struct ref_counts_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of reference blocks written */
+ write_u64("blocksWritten : ", stats->blocks_written, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_block_map_statistics(char *prefix, struct block_map_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* number of dirty (resident) pages */
+ write_u32("dirtyPages : ", stats->dirty_pages, ", ", buf, maxlen);
+ /* number of clean (resident) pages */
+ write_u32("cleanPages : ", stats->clean_pages, ", ", buf, maxlen);
+ /* number of free pages */
+ write_u32("freePages : ", stats->free_pages, ", ", buf, maxlen);
+ /* number of pages in failed state */
+ write_u32("failedPages : ", stats->failed_pages, ", ", buf, maxlen);
+ /* number of pages incoming */
+ write_u32("incomingPages : ", stats->incoming_pages, ", ", buf, maxlen);
+ /* number of pages outgoing */
+ write_u32("outgoingPages : ", stats->outgoing_pages, ", ", buf, maxlen);
+ /* how many times free page not avail */
+ write_u32("cachePressure : ", stats->cache_pressure, ", ", buf, maxlen);
+ /* number of get_vdo_page() calls for read */
+ write_u64("readCount : ", stats->read_count, ", ", buf, maxlen);
+ /* number of get_vdo_page() calls for write */
+ write_u64("writeCount : ", stats->write_count, ", ", buf, maxlen);
+ /* number of times pages failed to read */
+ write_u64("failedReads : ", stats->failed_reads, ", ", buf, maxlen);
+ /* number of times pages failed to write */
+ write_u64("failedWrites : ", stats->failed_writes, ", ", buf, maxlen);
+ /* number of gets that are reclaimed */
+ write_u64("reclaimed : ", stats->reclaimed, ", ", buf, maxlen);
+ /* number of gets for outgoing pages */
+ write_u64("readOutgoing : ", stats->read_outgoing, ", ", buf, maxlen);
+ /* number of gets that were already there */
+ write_u64("foundInCache : ", stats->found_in_cache, ", ", buf, maxlen);
+ /* number of gets requiring discard */
+ write_u64("discardRequired : ", stats->discard_required, ", ", buf, maxlen);
+ /* number of gets enqueued for their page */
+ write_u64("waitForPage : ", stats->wait_for_page, ", ", buf, maxlen);
+ /* number of gets that have to fetch */
+ write_u64("fetchRequired : ", stats->fetch_required, ", ", buf, maxlen);
+ /* number of page fetches */
+ write_u64("pagesLoaded : ", stats->pages_loaded, ", ", buf, maxlen);
+ /* number of page saves */
+ write_u64("pagesSaved : ", stats->pages_saved, ", ", buf, maxlen);
+ /* the number of flushes issued */
+ write_u64("flushCount : ", stats->flush_count, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_hash_lock_statistics(char *prefix, struct hash_lock_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of times the UDS advice proved correct */
+ write_u64("dedupeAdviceValid : ", stats->dedupe_advice_valid, ", ", buf, maxlen);
+ /* Number of times the UDS advice proved incorrect */
+ write_u64("dedupeAdviceStale : ", stats->dedupe_advice_stale, ", ", buf, maxlen);
+ /* Number of writes with the same data as another in-flight write */
+ write_u64("concurrentDataMatches : ", stats->concurrent_data_matches,
+ ", ", buf, maxlen);
+ /* Number of writes whose hash collided with an in-flight write */
+ write_u64("concurrentHashCollisions : ",
+ stats->concurrent_hash_collisions, ", ", buf, maxlen);
+ /* Current number of dedupe queries that are in flight */
+ write_u32("currDedupeQueries : ", stats->curr_dedupe_queries, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_error_statistics(char *prefix, struct error_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* number of times VDO got an invalid dedupe advice PBN from UDS */
+ write_u64("invalidAdvicePBNCount : ", stats->invalid_advice_pbn_count,
+ ", ", buf, maxlen);
+ /* number of times a VIO completed with a VDO_NO_SPACE error */
+ write_u64("noSpaceErrorCount : ", stats->no_space_error_count, ", ",
+ buf, maxlen);
+ /* number of times a VIO completed with a VDO_READ_ONLY error */
+ write_u64("readOnlyErrorCount : ", stats->read_only_error_count, ", ",
+ buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_bio_stats(char *prefix, struct bio_stats *stats, char *suffix,
+ char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of REQ_OP_READ bios */
+ write_u64("read : ", stats->read, ", ", buf, maxlen);
+ /* Number of REQ_OP_WRITE bios with data */
+ write_u64("write : ", stats->write, ", ", buf, maxlen);
+ /* Number of bios tagged with REQ_PREFLUSH and containing no data */
+ write_u64("emptyFlush : ", stats->empty_flush, ", ", buf, maxlen);
+ /* Number of REQ_OP_DISCARD bios */
+ write_u64("discard : ", stats->discard, ", ", buf, maxlen);
+ /* Number of bios tagged with REQ_PREFLUSH */
+ write_u64("flush : ", stats->flush, ", ", buf, maxlen);
+ /* Number of bios tagged with REQ_FUA */
+ write_u64("fua : ", stats->fua, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_memory_usage(char *prefix, struct memory_usage *stats, char *suffix,
+ char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Tracked bytes currently allocated. */
+ write_u64("bytesUsed : ", stats->bytes_used, ", ", buf, maxlen);
+ /* Maximum tracked bytes allocated. */
+ write_u64("peakBytesUsed : ", stats->peak_bytes_used, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_index_statistics(char *prefix, struct index_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of records stored in the index */
+ write_u64("entriesIndexed : ", stats->entries_indexed, ", ", buf, maxlen);
+ /* Number of post calls that found an existing entry */
+ write_u64("postsFound : ", stats->posts_found, ", ", buf, maxlen);
+ /* Number of post calls that added a new entry */
+ write_u64("postsNotFound : ", stats->posts_not_found, ", ", buf, maxlen);
+ /* Number of query calls that found an existing entry */
+ write_u64("queriesFound : ", stats->queries_found, ", ", buf, maxlen);
+ /* Number of query calls that added a new entry */
+ write_u64("queriesNotFound : ", stats->queries_not_found, ", ", buf, maxlen);
+ /* Number of update calls that found an existing entry */
+ write_u64("updatesFound : ", stats->updates_found, ", ", buf, maxlen);
+ /* Number of update calls that added a new entry */
+ write_u64("updatesNotFound : ", stats->updates_not_found, ", ", buf, maxlen);
+ /* Number of entries discarded */
+ write_u64("entriesDiscarded : ", stats->entries_discarded, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_vdo_statistics(char *prefix, struct vdo_statistics *stats, char *suffix,
+ char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ write_u32("version : ", stats->version, ", ", buf, maxlen);
+ /* Number of blocks used for data */
+ write_u64("dataBlocksUsed : ", stats->data_blocks_used, ", ", buf, maxlen);
+ /* Number of blocks used for VDO metadata */
+ write_u64("overheadBlocksUsed : ", stats->overhead_blocks_used, ", ",
+ buf, maxlen);
+ /* Number of logical blocks that are currently mapped to physical blocks */
+ write_u64("logicalBlocksUsed : ", stats->logical_blocks_used, ", ", buf, maxlen);
+ /* number of physical blocks */
+ write_block_count_t("physicalBlocks : ", stats->physical_blocks, ", ",
+ buf, maxlen);
+ /* number of logical blocks */
+ write_block_count_t("logicalBlocks : ", stats->logical_blocks, ", ",
+ buf, maxlen);
+ /* Size of the block map page cache, in bytes */
+ write_u64("blockMapCacheSize : ", stats->block_map_cache_size, ", ",
+ buf, maxlen);
+ /* The physical block size */
+ write_u64("blockSize : ", stats->block_size, ", ", buf, maxlen);
+ /* Number of times the VDO has successfully recovered */
+ write_u64("completeRecoveries : ", stats->complete_recoveries, ", ",
+ buf, maxlen);
+ /* Number of times the VDO has recovered from read-only mode */
+ write_u64("readOnlyRecoveries : ", stats->read_only_recoveries, ", ",
+ buf, maxlen);
+ /* String describing the operating mode of the VDO */
+ write_string("mode : ", stats->mode, ", ", buf, maxlen);
+ /* Whether the VDO is in recovery mode */
+ write_bool("inRecoveryMode : ", stats->in_recovery_mode, ", ", buf, maxlen);
+ /* What percentage of recovery mode work has been completed */
+ write_u8("recoveryPercentage : ", stats->recovery_percentage, ", ", buf, maxlen);
+ /* The statistics for the compressed block packer */
+ write_packer_statistics("packer : ", &stats->packer, ", ", buf, maxlen);
+ /* Counters for events in the block allocator */
+ write_block_allocator_statistics("allocator : ", &stats->allocator,
+ ", ", buf, maxlen);
+ /* Counters for events in the recovery journal */
+ write_recovery_journal_statistics("journal : ", &stats->journal, ", ",
+ buf, maxlen);
+ /* The statistics for the slab journals */
+ write_slab_journal_statistics("slabJournal : ", &stats->slab_journal,
+ ", ", buf, maxlen);
+ /* The statistics for the slab summary */
+ write_slab_summary_statistics("slabSummary : ", &stats->slab_summary,
+ ", ", buf, maxlen);
+ /* The statistics for the reference counts */
+ write_ref_counts_statistics("refCounts : ", &stats->ref_counts, ", ",
+ buf, maxlen);
+ /* The statistics for the block map */
+ write_block_map_statistics("blockMap : ", &stats->block_map, ", ", buf, maxlen);
+ /* The dedupe statistics from hash locks */
+ write_hash_lock_statistics("hashLock : ", &stats->hash_lock, ", ", buf, maxlen);
+ /* Counts of error conditions */
+ write_error_statistics("errors : ", &stats->errors, ", ", buf, maxlen);
+ /* The VDO instance */
+ write_u32("instance : ", stats->instance, ", ", buf, maxlen);
+ /* Current number of active VIOs */
+ write_u32("currentVIOsInProgress : ", stats->current_vios_in_progress,
+ ", ", buf, maxlen);
+ /* Maximum number of active VIOs */
+ write_u32("maxVIOs : ", stats->max_vios, ", ", buf, maxlen);
+ /* Number of times the UDS index was too slow in responding */
+ write_u64("dedupeAdviceTimeouts : ", stats->dedupe_advice_timeouts,
+ ", ", buf, maxlen);
+ /* Number of flush requests submitted to the storage device */
+ write_u64("flushOut : ", stats->flush_out, ", ", buf, maxlen);
+ /* Logical block size */
+ write_u64("logicalBlockSize : ", stats->logical_block_size, ", ", buf, maxlen);
+ /* Bios submitted into VDO from above */
+ write_bio_stats("biosIn : ", &stats->bios_in, ", ", buf, maxlen);
+ write_bio_stats("biosInPartial : ", &stats->bios_in_partial, ", ", buf, maxlen);
+ /* Bios submitted onward for user data */
+ write_bio_stats("biosOut : ", &stats->bios_out, ", ", buf, maxlen);
+ /* Bios submitted onward for metadata */
+ write_bio_stats("biosMeta : ", &stats->bios_meta, ", ", buf, maxlen);
+ write_bio_stats("biosJournal : ", &stats->bios_journal, ", ", buf, maxlen);
+ write_bio_stats("biosPageCache : ", &stats->bios_page_cache, ", ", buf, maxlen);
+ write_bio_stats("biosOutCompleted : ", &stats->bios_out_completed, ", ",
+ buf, maxlen);
+ write_bio_stats("biosMetaCompleted : ", &stats->bios_meta_completed,
+ ", ", buf, maxlen);
+ write_bio_stats("biosJournalCompleted : ",
+ &stats->bios_journal_completed, ", ", buf, maxlen);
+ write_bio_stats("biosPageCacheCompleted : ",
+ &stats->bios_page_cache_completed, ", ", buf, maxlen);
+ write_bio_stats("biosAcknowledged : ", &stats->bios_acknowledged, ", ",
+ buf, maxlen);
+ write_bio_stats("biosAcknowledgedPartial : ",
+ &stats->bios_acknowledged_partial, ", ", buf, maxlen);
+ /* Current number of bios in progress */
+ write_bio_stats("biosInProgress : ", &stats->bios_in_progress, ", ",
+ buf, maxlen);
+ /* Memory usage stats. */
+ write_memory_usage("memoryUsage : ", &stats->memory_usage, ", ", buf, maxlen);
+ /* The statistics for the UDS index */
+ write_index_statistics("index : ", &stats->index, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen)
+{
+ struct vdo_statistics *stats;
+ int result;
+
+ result = vdo_allocate(1, struct vdo_statistics, __func__, &stats);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error("Cannot allocate memory to write VDO statistics");
+ return result;
+ }
+
+ vdo_fetch_statistics(vdo, stats);
+ write_vdo_statistics(NULL, stats, NULL, &buf, &maxlen);
+ vdo_free(stats);
+ return VDO_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/message-stats.h b/drivers/md/dm-vdo/message-stats.h
new file mode 100644
index 000000000000..f7fceca9acab
--- /dev/null
+++ b/drivers/md/dm-vdo/message-stats.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_MESSAGE_STATS_H
+#define VDO_MESSAGE_STATS_H
+
+#include "types.h"
+
+int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen);
+
+#endif /* VDO_MESSAGE_STATS_H */
diff --git a/drivers/md/dm-vdo/murmurhash3.c b/drivers/md/dm-vdo/murmurhash3.c
new file mode 100644
index 000000000000..00c9b9c05001
--- /dev/null
+++ b/drivers/md/dm-vdo/murmurhash3.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: LGPL-2.1+
+/*
+ * MurmurHash3 was written by Austin Appleby, and is placed in the public
+ * domain. The author hereby disclaims copyright to this source code.
+ *
+ * Adapted by John Wiele (jwiele@redhat.com).
+ */
+
+#include "murmurhash3.h"
+
+static inline u64 rotl64(u64 x, s8 r)
+{
+ return (x << r) | (x >> (64 - r));
+}
+
+#define ROTL64(x, y) rotl64(x, y)
+static __always_inline u64 getblock64(const u64 *p, int i)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return p[i];
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return __builtin_bswap64(p[i]);
+#else
+#error "can't figure out byte order"
+#endif
+}
+
+static __always_inline void putblock64(u64 *p, int i, u64 value)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ p[i] = value;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ p[i] = __builtin_bswap64(value);
+#else
+#error "can't figure out byte order"
+#endif
+}
+
+/* Finalization mix - force all bits of a hash block to avalanche */
+
+static __always_inline u64 fmix64(u64 k)
+{
+ k ^= k >> 33;
+ k *= 0xff51afd7ed558ccdLLU;
+ k ^= k >> 33;
+ k *= 0xc4ceb9fe1a85ec53LLU;
+ k ^= k >> 33;
+
+ return k;
+}
+
+void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
+{
+ const u8 *data = key;
+ const int nblocks = len / 16;
+
+ u64 h1 = seed;
+ u64 h2 = seed;
+
+ const u64 c1 = 0x87c37b91114253d5LLU;
+ const u64 c2 = 0x4cf5ad432745937fLLU;
+
+ /* body */
+
+ const u64 *blocks = (const u64 *)(data);
+
+ int i;
+
+ for (i = 0; i < nblocks; i++) {
+ u64 k1 = getblock64(blocks, i * 2 + 0);
+ u64 k2 = getblock64(blocks, i * 2 + 1);
+
+ k1 *= c1;
+ k1 = ROTL64(k1, 31);
+ k1 *= c2;
+ h1 ^= k1;
+
+ h1 = ROTL64(h1, 27);
+ h1 += h2;
+ h1 = h1 * 5 + 0x52dce729;
+
+ k2 *= c2;
+ k2 = ROTL64(k2, 33);
+ k2 *= c1;
+ h2 ^= k2;
+
+ h2 = ROTL64(h2, 31);
+ h2 += h1;
+ h2 = h2 * 5 + 0x38495ab5;
+ }
+
+ /* tail */
+
+ {
+ const u8 *tail = (const u8 *)(data + nblocks * 16);
+
+ u64 k1 = 0;
+ u64 k2 = 0;
+
+ switch (len & 15) {
+ case 15:
+ k2 ^= ((u64)tail[14]) << 48;
+ fallthrough;
+ case 14:
+ k2 ^= ((u64)tail[13]) << 40;
+ fallthrough;
+ case 13:
+ k2 ^= ((u64)tail[12]) << 32;
+ fallthrough;
+ case 12:
+ k2 ^= ((u64)tail[11]) << 24;
+ fallthrough;
+ case 11:
+ k2 ^= ((u64)tail[10]) << 16;
+ fallthrough;
+ case 10:
+ k2 ^= ((u64)tail[9]) << 8;
+ fallthrough;
+ case 9:
+ k2 ^= ((u64)tail[8]) << 0;
+ k2 *= c2;
+ k2 = ROTL64(k2, 33);
+ k2 *= c1;
+ h2 ^= k2;
+ fallthrough;
+
+ case 8:
+ k1 ^= ((u64)tail[7]) << 56;
+ fallthrough;
+ case 7:
+ k1 ^= ((u64)tail[6]) << 48;
+ fallthrough;
+ case 6:
+ k1 ^= ((u64)tail[5]) << 40;
+ fallthrough;
+ case 5:
+ k1 ^= ((u64)tail[4]) << 32;
+ fallthrough;
+ case 4:
+ k1 ^= ((u64)tail[3]) << 24;
+ fallthrough;
+ case 3:
+ k1 ^= ((u64)tail[2]) << 16;
+ fallthrough;
+ case 2:
+ k1 ^= ((u64)tail[1]) << 8;
+ fallthrough;
+ case 1:
+ k1 ^= ((u64)tail[0]) << 0;
+ k1 *= c1;
+ k1 = ROTL64(k1, 31);
+ k1 *= c2;
+ h1 ^= k1;
+ break;
+ default:
+ break;
+ };
+ }
+ /* finalization */
+
+ h1 ^= len;
+ h2 ^= len;
+
+ h1 += h2;
+ h2 += h1;
+
+ h1 = fmix64(h1);
+ h2 = fmix64(h2);
+
+ h1 += h2;
+ h2 += h1;
+
+ putblock64((u64 *)out, 0, h1);
+ putblock64((u64 *)out, 1, h2);
+}
diff --git a/drivers/md/dm-vdo/murmurhash3.h b/drivers/md/dm-vdo/murmurhash3.h
new file mode 100644
index 000000000000..d84711ddb659
--- /dev/null
+++ b/drivers/md/dm-vdo/murmurhash3.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/*
+ * MurmurHash3 was written by Austin Appleby, and is placed in the public
+ * domain. The author hereby disclaims copyright to this source code.
+ */
+
+#ifndef _MURMURHASH3_H_
+#define _MURMURHASH3_H_
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+void murmurhash3_128(const void *key, int len, u32 seed, void *out);
+
+#endif /* _MURMURHASH3_H_ */
diff --git a/drivers/md/dm-vdo/numeric.h b/drivers/md/dm-vdo/numeric.h
new file mode 100644
index 000000000000..dc8c400b21d2
--- /dev/null
+++ b/drivers/md/dm-vdo/numeric.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_NUMERIC_H
+#define UDS_NUMERIC_H
+
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/*
+ * These utilities encode or decode a number from an offset in a larger data buffer and then
+ * advance the offset pointer to the next field in the buffer.
+ */
+
+static inline void decode_s64_le(const u8 *buffer, size_t *offset, s64 *decoded)
+{
+ *decoded = get_unaligned_le64(buffer + *offset);
+ *offset += sizeof(s64);
+}
+
+static inline void encode_s64_le(u8 *data, size_t *offset, s64 to_encode)
+{
+ put_unaligned_le64(to_encode, data + *offset);
+ *offset += sizeof(s64);
+}
+
+static inline void decode_u64_le(const u8 *buffer, size_t *offset, u64 *decoded)
+{
+ *decoded = get_unaligned_le64(buffer + *offset);
+ *offset += sizeof(u64);
+}
+
+static inline void encode_u64_le(u8 *data, size_t *offset, u64 to_encode)
+{
+ put_unaligned_le64(to_encode, data + *offset);
+ *offset += sizeof(u64);
+}
+
+static inline void decode_s32_le(const u8 *buffer, size_t *offset, s32 *decoded)
+{
+ *decoded = get_unaligned_le32(buffer + *offset);
+ *offset += sizeof(s32);
+}
+
+static inline void encode_s32_le(u8 *data, size_t *offset, s32 to_encode)
+{
+ put_unaligned_le32(to_encode, data + *offset);
+ *offset += sizeof(s32);
+}
+
+static inline void decode_u32_le(const u8 *buffer, size_t *offset, u32 *decoded)
+{
+ *decoded = get_unaligned_le32(buffer + *offset);
+ *offset += sizeof(u32);
+}
+
+static inline void encode_u32_le(u8 *data, size_t *offset, u32 to_encode)
+{
+ put_unaligned_le32(to_encode, data + *offset);
+ *offset += sizeof(u32);
+}
+
+static inline void decode_u16_le(const u8 *buffer, size_t *offset, u16 *decoded)
+{
+ *decoded = get_unaligned_le16(buffer + *offset);
+ *offset += sizeof(u16);
+}
+
+static inline void encode_u16_le(u8 *data, size_t *offset, u16 to_encode)
+{
+ put_unaligned_le16(to_encode, data + *offset);
+ *offset += sizeof(u16);
+}
+
+#endif /* UDS_NUMERIC_H */
diff --git a/drivers/md/dm-vdo/packer.c b/drivers/md/dm-vdo/packer.c
new file mode 100644
index 000000000000..16cf29b4c90a
--- /dev/null
+++ b/drivers/md/dm-vdo/packer.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "packer.h"
+
+#include <linux/atomic.h>
+#include <linux/blkdev.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+#include "admin-state.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "dedupe.h"
+#include "encodings.h"
+#include "io-submitter.h"
+#include "physical-zone.h"
+#include "status-codes.h"
+#include "vdo.h"
+#include "vio.h"
+
+static const struct version_number COMPRESSED_BLOCK_1_0 = {
+ .major_version = 1,
+ .minor_version = 0,
+};
+
+#define COMPRESSED_BLOCK_1_0_SIZE (4 + 4 + (2 * VDO_MAX_COMPRESSION_SLOTS))
+
+/**
+ * vdo_get_compressed_block_fragment() - Get a reference to a compressed fragment from a compressed
+ * block.
+ * @mapping_state [in] The mapping state for the look up.
+ * @compressed_block [in] The compressed block that was read from disk.
+ * @fragment_offset [out] The offset of the fragment within a compressed block.
+ * @fragment_size [out] The size of the fragment.
+ *
+ * Return: If a valid compressed fragment is found, VDO_SUCCESS; otherwise, VDO_INVALID_FRAGMENT if
+ * the fragment is invalid.
+ */
+int vdo_get_compressed_block_fragment(enum block_mapping_state mapping_state,
+ struct compressed_block *block,
+ u16 *fragment_offset, u16 *fragment_size)
+{
+ u16 compressed_size;
+ u16 offset = 0;
+ unsigned int i;
+ u8 slot;
+ struct version_number version;
+
+ if (!vdo_is_state_compressed(mapping_state))
+ return VDO_INVALID_FRAGMENT;
+
+ version = vdo_unpack_version_number(block->header.version);
+ if (!vdo_are_same_version(version, COMPRESSED_BLOCK_1_0))
+ return VDO_INVALID_FRAGMENT;
+
+ slot = mapping_state - VDO_MAPPING_STATE_COMPRESSED_BASE;
+ if (slot >= VDO_MAX_COMPRESSION_SLOTS)
+ return VDO_INVALID_FRAGMENT;
+
+ compressed_size = __le16_to_cpu(block->header.sizes[slot]);
+ for (i = 0; i < slot; i++) {
+ offset += __le16_to_cpu(block->header.sizes[i]);
+ if (offset >= VDO_COMPRESSED_BLOCK_DATA_SIZE)
+ return VDO_INVALID_FRAGMENT;
+ }
+
+ if ((offset + compressed_size) > VDO_COMPRESSED_BLOCK_DATA_SIZE)
+ return VDO_INVALID_FRAGMENT;
+
+ *fragment_offset = offset;
+ *fragment_size = compressed_size;
+ return VDO_SUCCESS;
+}
+
+/**
+ * assert_on_packer_thread() - Check that we are on the packer thread.
+ * @packer: The packer.
+ * @caller: The function which is asserting.
+ */
+static inline void assert_on_packer_thread(struct packer *packer, const char *caller)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer->thread_id),
+ "%s() called from packer thread", caller);
+}
+
+/**
+ * insert_in_sorted_list() - Insert a bin to the list.
+ * @packer: The packer.
+ * @bin: The bin to move to its sorted position.
+ *
+ * The list is in ascending order of free space. Since all bins are already in the list, this
+ * actually moves the bin to the correct position in the list.
+ */
+static void insert_in_sorted_list(struct packer *packer, struct packer_bin *bin)
+{
+ struct packer_bin *active_bin;
+
+ list_for_each_entry(active_bin, &packer->bins, list)
+ if (active_bin->free_space > bin->free_space) {
+ list_move_tail(&bin->list, &active_bin->list);
+ return;
+ }
+
+ list_move_tail(&bin->list, &packer->bins);
+}
+
+/**
+ * make_bin() - Allocate a bin and put it into the packer's list.
+ * @packer: The packer.
+ */
+static int __must_check make_bin(struct packer *packer)
+{
+ struct packer_bin *bin;
+ int result;
+
+ result = vdo_allocate_extended(struct packer_bin, VDO_MAX_COMPRESSION_SLOTS,
+ struct vio *, __func__, &bin);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ bin->free_space = VDO_COMPRESSED_BLOCK_DATA_SIZE;
+ INIT_LIST_HEAD(&bin->list);
+ list_add_tail(&bin->list, &packer->bins);
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_make_packer() - Make a new block packer.
+ *
+ * @vdo: The vdo to which this packer belongs.
+ * @bin_count: The number of partial bins to keep in memory.
+ * @packer_ptr: A pointer to hold the new packer.
+ *
+ * Return: VDO_SUCCESS or an error
+ */
+int vdo_make_packer(struct vdo *vdo, block_count_t bin_count, struct packer **packer_ptr)
+{
+ struct packer *packer;
+ block_count_t i;
+ int result;
+
+ result = vdo_allocate(1, struct packer, __func__, &packer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ packer->thread_id = vdo->thread_config.packer_thread;
+ packer->size = bin_count;
+ INIT_LIST_HEAD(&packer->bins);
+ vdo_set_admin_state_code(&packer->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+
+ for (i = 0; i < bin_count; i++) {
+ result = make_bin(packer);
+ if (result != VDO_SUCCESS) {
+ vdo_free_packer(packer);
+ return result;
+ }
+ }
+
+ /*
+ * The canceled bin can hold up to half the number of user vios. Every canceled vio in the
+ * bin must have a canceler for which it is waiting, and any canceler will only have
+ * canceled one lock holder at a time.
+ */
+ result = vdo_allocate_extended(struct packer_bin, MAXIMUM_VDO_USER_VIOS / 2,
+ struct vio *, __func__, &packer->canceled_bin);
+ if (result != VDO_SUCCESS) {
+ vdo_free_packer(packer);
+ return result;
+ }
+
+ result = vdo_make_default_thread(vdo, packer->thread_id);
+ if (result != VDO_SUCCESS) {
+ vdo_free_packer(packer);
+ return result;
+ }
+
+ *packer_ptr = packer;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_free_packer() - Free a block packer.
+ * @packer: The packer to free.
+ */
+void vdo_free_packer(struct packer *packer)
+{
+ struct packer_bin *bin, *tmp;
+
+ if (packer == NULL)
+ return;
+
+ list_for_each_entry_safe(bin, tmp, &packer->bins, list) {
+ list_del_init(&bin->list);
+ vdo_free(bin);
+ }
+
+ vdo_free(vdo_forget(packer->canceled_bin));
+ vdo_free(packer);
+}
+
+/**
+ * get_packer_from_data_vio() - Get the packer from a data_vio.
+ * @data_vio: The data_vio.
+ *
+ * Return: The packer from the VDO to which the data_vio belongs.
+ */
+static inline struct packer *get_packer_from_data_vio(struct data_vio *data_vio)
+{
+ return vdo_from_data_vio(data_vio)->packer;
+}
+
+/**
+ * vdo_get_packer_statistics() - Get the current statistics from the packer.
+ * @packer: The packer to query.
+ *
+ * Return: a copy of the current statistics for the packer.
+ */
+struct packer_statistics vdo_get_packer_statistics(const struct packer *packer)
+{
+ const struct packer_statistics *stats = &packer->statistics;
+
+ return (struct packer_statistics) {
+ .compressed_fragments_written = READ_ONCE(stats->compressed_fragments_written),
+ .compressed_blocks_written = READ_ONCE(stats->compressed_blocks_written),
+ .compressed_fragments_in_packer = READ_ONCE(stats->compressed_fragments_in_packer),
+ };
+}
+
+/**
+ * abort_packing() - Abort packing a data_vio.
+ * @data_vio: The data_vio to abort.
+ */
+static void abort_packing(struct data_vio *data_vio)
+{
+ struct packer *packer = get_packer_from_data_vio(data_vio);
+
+ WRITE_ONCE(packer->statistics.compressed_fragments_in_packer,
+ packer->statistics.compressed_fragments_in_packer - 1);
+
+ write_data_vio(data_vio);
+}
+
+/**
+ * release_compressed_write_waiter() - Update a data_vio for which a successful compressed write
+ * has completed and send it on its way.
+
+ * @data_vio: The data_vio to release.
+ * @allocation: The allocation to which the compressed block was written.
+ */
+static void release_compressed_write_waiter(struct data_vio *data_vio,
+ struct allocation *allocation)
+{
+ data_vio->new_mapped = (struct zoned_pbn) {
+ .pbn = allocation->pbn,
+ .zone = allocation->zone,
+ .state = data_vio->compression.slot + VDO_MAPPING_STATE_COMPRESSED_BASE,
+ };
+
+ vdo_share_compressed_write_lock(data_vio, allocation->lock);
+ update_metadata_for_data_vio_write(data_vio, allocation->lock);
+}
+
+/**
+ * finish_compressed_write() - Finish a compressed block write.
+ * @completion: The compressed write completion.
+ *
+ * This callback is registered in continue_after_allocation().
+ */
+static void finish_compressed_write(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct data_vio *client, *next;
+
+ assert_data_vio_in_allocated_zone(agent);
+
+ /*
+ * Process all the non-agent waiters first to ensure that the pbn lock can not be released
+ * until all of them have had a chance to journal their increfs.
+ */
+ for (client = agent->compression.next_in_batch; client != NULL; client = next) {
+ next = client->compression.next_in_batch;
+ release_compressed_write_waiter(client, &agent->allocation);
+ }
+
+ completion->error_handler = handle_data_vio_error;
+ release_compressed_write_waiter(agent, &agent->allocation);
+}
+
+static void handle_compressed_write_error(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct allocation *allocation = &agent->allocation;
+ struct data_vio *client, *next;
+
+ if (vdo_requeue_completion_if_needed(completion, allocation->zone->thread_id))
+ return;
+
+ update_vio_error_stats(as_vio(completion),
+ "Completing compressed write vio for physical block %llu with error",
+ (unsigned long long) allocation->pbn);
+
+ for (client = agent->compression.next_in_batch; client != NULL; client = next) {
+ next = client->compression.next_in_batch;
+ write_data_vio(client);
+ }
+
+ /* Now that we've released the batch from the packer, forget the error and continue on. */
+ vdo_reset_completion(completion);
+ completion->error_handler = handle_data_vio_error;
+ write_data_vio(agent);
+}
+
+/**
+ * add_to_bin() - Put a data_vio in a specific packer_bin in which it will definitely fit.
+ * @bin: The bin in which to put the data_vio.
+ * @data_vio: The data_vio to add.
+ */
+static void add_to_bin(struct packer_bin *bin, struct data_vio *data_vio)
+{
+ data_vio->compression.bin = bin;
+ data_vio->compression.slot = bin->slots_used;
+ bin->incoming[bin->slots_used++] = data_vio;
+}
+
+/**
+ * remove_from_bin() - Get the next data_vio whose compression has not been canceled from a bin.
+ * @packer: The packer.
+ * @bin: The bin from which to get a data_vio.
+ *
+ * Any canceled data_vios will be moved to the canceled bin.
+ * Return: An uncanceled data_vio from the bin or NULL if there are none.
+ */
+static struct data_vio *remove_from_bin(struct packer *packer, struct packer_bin *bin)
+{
+ while (bin->slots_used > 0) {
+ struct data_vio *data_vio = bin->incoming[--bin->slots_used];
+
+ if (!advance_data_vio_compression_stage(data_vio).may_not_compress) {
+ data_vio->compression.bin = NULL;
+ return data_vio;
+ }
+
+ add_to_bin(packer->canceled_bin, data_vio);
+ }
+
+ /* The bin is now empty. */
+ bin->free_space = VDO_COMPRESSED_BLOCK_DATA_SIZE;
+ return NULL;
+}
+
+/**
+ * initialize_compressed_block() - Initialize a compressed block.
+ * @block: The compressed block to initialize.
+ * @size: The size of the agent's fragment.
+ *
+ * This method initializes the compressed block in the compressed write agent. Because the
+ * compressor already put the agent's compressed fragment at the start of the compressed block's
+ * data field, it needn't be copied. So all we need do is initialize the header and set the size of
+ * the agent's fragment.
+ */
+static void initialize_compressed_block(struct compressed_block *block, u16 size)
+{
+ /*
+ * Make sure the block layout isn't accidentally changed by changing the length of the
+ * block header.
+ */
+ BUILD_BUG_ON(sizeof(struct compressed_block_header) != COMPRESSED_BLOCK_1_0_SIZE);
+
+ block->header.version = vdo_pack_version_number(COMPRESSED_BLOCK_1_0);
+ block->header.sizes[0] = __cpu_to_le16(size);
+}
+
+/**
+ * pack_fragment() - Pack a data_vio's fragment into the compressed block in which it is already
+ * known to fit.
+ * @compression: The agent's compression_state to pack in to.
+ * @data_vio: The data_vio to pack.
+ * @offset: The offset into the compressed block at which to pack the fragment.
+ * @compressed_block: The compressed block which will be written out when batch is fully packed.
+ *
+ * Return: The new amount of space used.
+ */
+static block_size_t __must_check pack_fragment(struct compression_state *compression,
+ struct data_vio *data_vio,
+ block_size_t offset, slot_number_t slot,
+ struct compressed_block *block)
+{
+ struct compression_state *to_pack = &data_vio->compression;
+ char *fragment = to_pack->block->data;
+
+ to_pack->next_in_batch = compression->next_in_batch;
+ compression->next_in_batch = data_vio;
+ to_pack->slot = slot;
+ block->header.sizes[slot] = __cpu_to_le16(to_pack->size);
+ memcpy(&block->data[offset], fragment, to_pack->size);
+ return (offset + to_pack->size);
+}
+
+/**
+ * compressed_write_end_io() - The bio_end_io for a compressed block write.
+ * @bio: The bio for the compressed write.
+ */
+static void compressed_write_end_io(struct bio *bio)
+{
+ struct data_vio *data_vio = vio_as_data_vio(bio->bi_private);
+
+ vdo_count_completed_bios(bio);
+ set_data_vio_allocated_zone_callback(data_vio, finish_compressed_write);
+ continue_data_vio_with_error(data_vio, blk_status_to_errno(bio->bi_status));
+}
+
+/**
+ * write_bin() - Write out a bin.
+ * @packer: The packer.
+ * @bin: The bin to write.
+ */
+static void write_bin(struct packer *packer, struct packer_bin *bin)
+{
+ int result;
+ block_size_t offset;
+ slot_number_t slot = 1;
+ struct compression_state *compression;
+ struct compressed_block *block;
+ struct data_vio *agent = remove_from_bin(packer, bin);
+ struct data_vio *client;
+ struct packer_statistics *stats;
+
+ if (agent == NULL)
+ return;
+
+ compression = &agent->compression;
+ compression->slot = 0;
+ block = compression->block;
+ initialize_compressed_block(block, compression->size);
+ offset = compression->size;
+
+ while ((client = remove_from_bin(packer, bin)) != NULL)
+ offset = pack_fragment(compression, client, offset, slot++, block);
+
+ /*
+ * If the batch contains only a single vio, then we save nothing by saving the compressed
+ * form. Continue processing the single vio in the batch.
+ */
+ if (slot == 1) {
+ abort_packing(agent);
+ return;
+ }
+
+ if (slot < VDO_MAX_COMPRESSION_SLOTS) {
+ /* Clear out the sizes of the unused slots */
+ memset(&block->header.sizes[slot], 0,
+ (VDO_MAX_COMPRESSION_SLOTS - slot) * sizeof(__le16));
+ }
+
+ agent->vio.completion.error_handler = handle_compressed_write_error;
+ if (vdo_is_read_only(vdo_from_data_vio(agent))) {
+ continue_data_vio_with_error(agent, VDO_READ_ONLY);
+ return;
+ }
+
+ result = vio_reset_bio(&agent->vio, (char *) block, compressed_write_end_io,
+ REQ_OP_WRITE, agent->allocation.pbn);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(agent, result);
+ return;
+ }
+
+ /*
+ * Once the compressed write is submitted, the fragments are no longer in the packer, so
+ * update stats now.
+ */
+ stats = &packer->statistics;
+ WRITE_ONCE(stats->compressed_fragments_in_packer,
+ (stats->compressed_fragments_in_packer - slot));
+ WRITE_ONCE(stats->compressed_fragments_written,
+ (stats->compressed_fragments_written + slot));
+ WRITE_ONCE(stats->compressed_blocks_written,
+ stats->compressed_blocks_written + 1);
+
+ vdo_submit_data_vio(agent);
+}
+
+/**
+ * add_data_vio_to_packer_bin() - Add a data_vio to a bin's incoming queue
+ * @packer: The packer.
+ * @bin: The bin to which to add the data_vio.
+ * @data_vio: The data_vio to add to the bin's queue.
+ *
+ * Adds a data_vio to a bin's incoming queue, handles logical space change, and calls physical
+ * space processor.
+ */
+static void add_data_vio_to_packer_bin(struct packer *packer, struct packer_bin *bin,
+ struct data_vio *data_vio)
+{
+ /* If the selected bin doesn't have room, start a new batch to make room. */
+ if (bin->free_space < data_vio->compression.size)
+ write_bin(packer, bin);
+
+ add_to_bin(bin, data_vio);
+ bin->free_space -= data_vio->compression.size;
+
+ /* If we happen to exactly fill the bin, start a new batch. */
+ if ((bin->slots_used == VDO_MAX_COMPRESSION_SLOTS) ||
+ (bin->free_space == 0))
+ write_bin(packer, bin);
+
+ /* Now that we've finished changing the free space, restore the sort order. */
+ insert_in_sorted_list(packer, bin);
+}
+
+/**
+ * select_bin() - Select the bin that should be used to pack the compressed data in a data_vio with
+ * other data_vios.
+ * @packer: The packer.
+ * @data_vio: The data_vio.
+ */
+static struct packer_bin * __must_check select_bin(struct packer *packer,
+ struct data_vio *data_vio)
+{
+ /*
+ * First best fit: select the bin with the least free space that has enough room for the
+ * compressed data in the data_vio.
+ */
+ struct packer_bin *bin, *fullest_bin;
+
+ list_for_each_entry(bin, &packer->bins, list) {
+ if (bin->free_space >= data_vio->compression.size)
+ return bin;
+ }
+
+ /*
+ * None of the bins have enough space for the data_vio. We're not allowed to create new
+ * bins, so we have to overflow one of the existing bins. It's pretty intuitive to select
+ * the fullest bin, since that "wastes" the least amount of free space in the compressed
+ * block. But if the space currently used in the fullest bin is smaller than the compressed
+ * size of the incoming block, it seems wrong to force that bin to write when giving up on
+ * compressing the incoming data_vio would likewise "waste" the least amount of free space.
+ */
+ fullest_bin = list_first_entry(&packer->bins, struct packer_bin, list);
+ if (data_vio->compression.size >=
+ (VDO_COMPRESSED_BLOCK_DATA_SIZE - fullest_bin->free_space))
+ return NULL;
+
+ /*
+ * The fullest bin doesn't have room, but writing it out and starting a new batch with the
+ * incoming data_vio will increase the packer's free space.
+ */
+ return fullest_bin;
+}
+
+/**
+ * vdo_attempt_packing() - Attempt to rewrite the data in this data_vio as part of a compressed
+ * block.
+ * @data_vio: The data_vio to pack.
+ */
+void vdo_attempt_packing(struct data_vio *data_vio)
+{
+ int result;
+ struct packer_bin *bin;
+ struct data_vio_compression_status status = get_data_vio_compression_status(data_vio);
+ struct packer *packer = get_packer_from_data_vio(data_vio);
+
+ assert_on_packer_thread(packer, __func__);
+
+ result = VDO_ASSERT((status.stage == DATA_VIO_COMPRESSING),
+ "attempt to pack data_vio not ready for packing, stage: %u",
+ status.stage);
+ if (result != VDO_SUCCESS)
+ return;
+
+ /*
+ * Increment whether or not this data_vio will be packed or not since abort_packing()
+ * always decrements the counter.
+ */
+ WRITE_ONCE(packer->statistics.compressed_fragments_in_packer,
+ packer->statistics.compressed_fragments_in_packer + 1);
+
+ /*
+ * If packing of this data_vio is disallowed for administrative reasons, give up before
+ * making any state changes.
+ */
+ if (!vdo_is_state_normal(&packer->state) ||
+ (data_vio->flush_generation < packer->flush_generation)) {
+ abort_packing(data_vio);
+ return;
+ }
+
+ /*
+ * The advance_data_vio_compression_stage() check here verifies that the data_vio is
+ * allowed to be compressed (if it has already been canceled, we'll fall out here). Once
+ * the data_vio is in the DATA_VIO_PACKING state, it must be guaranteed to be put in a bin
+ * before any more requests can be processed by the packer thread. Otherwise, a canceling
+ * data_vio could attempt to remove the canceled data_vio from the packer and fail to
+ * rendezvous with it. Thus, we must call select_bin() first to ensure that we will
+ * actually add the data_vio to a bin before advancing to the DATA_VIO_PACKING stage.
+ */
+ bin = select_bin(packer, data_vio);
+ if ((bin == NULL) ||
+ (advance_data_vio_compression_stage(data_vio).stage != DATA_VIO_PACKING)) {
+ abort_packing(data_vio);
+ return;
+ }
+
+ add_data_vio_to_packer_bin(packer, bin, data_vio);
+}
+
+/**
+ * check_for_drain_complete() - Check whether the packer has drained.
+ * @packer: The packer.
+ */
+static void check_for_drain_complete(struct packer *packer)
+{
+ if (vdo_is_state_draining(&packer->state) && (packer->canceled_bin->slots_used == 0))
+ vdo_finish_draining(&packer->state);
+}
+
+/**
+ * write_all_non_empty_bins() - Write out all non-empty bins on behalf of a flush or suspend.
+ * @packer: The packer being flushed.
+ */
+static void write_all_non_empty_bins(struct packer *packer)
+{
+ struct packer_bin *bin;
+
+ list_for_each_entry(bin, &packer->bins, list)
+ write_bin(packer, bin);
+ /*
+ * We don't need to re-sort the bin here since this loop will make every bin have
+ * the same amount of free space, so every ordering is sorted.
+ */
+
+ check_for_drain_complete(packer);
+}
+
+/**
+ * vdo_flush_packer() - Request that the packer flush asynchronously.
+ * @packer: The packer to flush.
+ *
+ * All bins with at least two compressed data blocks will be written out, and any solitary pending
+ * VIOs will be released from the packer. While flushing is in progress, any VIOs submitted to
+ * vdo_attempt_packing() will be continued immediately without attempting to pack them.
+ */
+void vdo_flush_packer(struct packer *packer)
+{
+ assert_on_packer_thread(packer, __func__);
+ if (vdo_is_state_normal(&packer->state))
+ write_all_non_empty_bins(packer);
+}
+
+/**
+ * vdo_remove_lock_holder_from_packer() - Remove a lock holder from the packer.
+ * @completion: The data_vio which needs a lock held by a data_vio in the packer. The data_vio's
+ * compression.lock_holder field will point to the data_vio to remove.
+ */
+void vdo_remove_lock_holder_from_packer(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct packer *packer = get_packer_from_data_vio(data_vio);
+ struct data_vio *lock_holder;
+ struct packer_bin *bin;
+ slot_number_t slot;
+
+ assert_data_vio_in_packer_zone(data_vio);
+
+ lock_holder = vdo_forget(data_vio->compression.lock_holder);
+ bin = lock_holder->compression.bin;
+ VDO_ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin");
+
+ slot = lock_holder->compression.slot;
+ bin->slots_used--;
+ if (slot < bin->slots_used) {
+ bin->incoming[slot] = bin->incoming[bin->slots_used];
+ bin->incoming[slot]->compression.slot = slot;
+ }
+
+ lock_holder->compression.bin = NULL;
+ lock_holder->compression.slot = 0;
+
+ if (bin != packer->canceled_bin) {
+ bin->free_space += lock_holder->compression.size;
+ insert_in_sorted_list(packer, bin);
+ }
+
+ abort_packing(lock_holder);
+ check_for_drain_complete(packer);
+}
+
+/**
+ * vdo_increment_packer_flush_generation() - Increment the flush generation in the packer.
+ * @packer: The packer.
+ *
+ * This will also cause the packer to flush so that any VIOs from previous generations will exit
+ * the packer.
+ */
+void vdo_increment_packer_flush_generation(struct packer *packer)
+{
+ assert_on_packer_thread(packer, __func__);
+ packer->flush_generation++;
+ vdo_flush_packer(packer);
+}
+
+/**
+ * initiate_drain() - Initiate a drain.
+ *
+ * Implements vdo_admin_initiator_fn.
+ */
+static void initiate_drain(struct admin_state *state)
+{
+ struct packer *packer = container_of(state, struct packer, state);
+
+ write_all_non_empty_bins(packer);
+}
+
+/**
+ * vdo_drain_packer() - Drain the packer by preventing any more VIOs from entering the packer and
+ * then flushing.
+ * @packer: The packer to drain.
+ * @completion: The completion to finish when the packer has drained.
+ */
+void vdo_drain_packer(struct packer *packer, struct vdo_completion *completion)
+{
+ assert_on_packer_thread(packer, __func__);
+ vdo_start_draining(&packer->state, VDO_ADMIN_STATE_SUSPENDING, completion,
+ initiate_drain);
+}
+
+/**
+ * vdo_resume_packer() - Resume a packer which has been suspended.
+ * @packer: The packer to resume.
+ * @parent: The completion to finish when the packer has resumed.
+ */
+void vdo_resume_packer(struct packer *packer, struct vdo_completion *parent)
+{
+ assert_on_packer_thread(packer, __func__);
+ vdo_continue_completion(parent, vdo_resume_if_quiescent(&packer->state));
+}
+
+static void dump_packer_bin(const struct packer_bin *bin, bool canceled)
+{
+ if (bin->slots_used == 0)
+ /* Don't dump empty bins. */
+ return;
+
+ vdo_log_info(" %sBin slots_used=%u free_space=%zu",
+ (canceled ? "Canceled" : ""), bin->slots_used, bin->free_space);
+
+ /*
+ * FIXME: dump vios in bin->incoming? The vios should have been dumped from the vio pool.
+ * Maybe just dump their addresses so it's clear they're here?
+ */
+}
+
+/**
+ * vdo_dump_packer() - Dump the packer.
+ * @packer: The packer.
+ *
+ * Context: dumps in a thread-unsafe fashion.
+ */
+void vdo_dump_packer(const struct packer *packer)
+{
+ struct packer_bin *bin;
+
+ vdo_log_info("packer");
+ vdo_log_info(" flushGeneration=%llu state %s packer_bin_count=%llu",
+ (unsigned long long) packer->flush_generation,
+ vdo_get_admin_state_code(&packer->state)->name,
+ (unsigned long long) packer->size);
+
+ list_for_each_entry(bin, &packer->bins, list)
+ dump_packer_bin(bin, false);
+
+ dump_packer_bin(packer->canceled_bin, true);
+}
diff --git a/drivers/md/dm-vdo/packer.h b/drivers/md/dm-vdo/packer.h
new file mode 100644
index 000000000000..0f3be44710b5
--- /dev/null
+++ b/drivers/md/dm-vdo/packer.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_PACKER_H
+#define VDO_PACKER_H
+
+#include <linux/list.h>
+
+#include "admin-state.h"
+#include "constants.h"
+#include "encodings.h"
+#include "statistics.h"
+#include "types.h"
+#include "wait-queue.h"
+
+enum {
+ DEFAULT_PACKER_BINS = 16,
+};
+
+/* The header of a compressed block. */
+struct compressed_block_header {
+ /* Unsigned 32-bit major and minor versions, little-endian */
+ struct packed_version_number version;
+
+ /* List of unsigned 16-bit compressed block sizes, little-endian */
+ __le16 sizes[VDO_MAX_COMPRESSION_SLOTS];
+} __packed;
+
+enum {
+ VDO_COMPRESSED_BLOCK_DATA_SIZE = VDO_BLOCK_SIZE - sizeof(struct compressed_block_header),
+
+ /*
+ * A compressed block is only written if we can pack at least two fragments into it, so a
+ * fragment which fills the entire data portion of a compressed block is too big.
+ */
+ VDO_MAX_COMPRESSED_FRAGMENT_SIZE = VDO_COMPRESSED_BLOCK_DATA_SIZE - 1,
+};
+
+/* * The compressed block overlay. */
+struct compressed_block {
+ struct compressed_block_header header;
+ char data[VDO_COMPRESSED_BLOCK_DATA_SIZE];
+} __packed;
+
+/*
+ * Each packer_bin holds an incomplete batch of data_vios that only partially fill a compressed
+ * block. The bins are kept in a ring sorted by the amount of unused space so the first bin with
+ * enough space to hold a newly-compressed data_vio can easily be found. When the bin fills up or
+ * is flushed, the first uncanceled data_vio in the bin is selected to be the agent for that bin.
+ * Upon entering the packer, each data_vio already has its compressed data in the first slot of the
+ * data_vio's compressed_block (overlaid on the data_vio's scratch_block). So the agent's fragment
+ * is already in place. The fragments for the other uncanceled data_vios in the bin are packed into
+ * the agent's compressed block. The agent then writes out the compressed block. If the write is
+ * successful, the agent shares its pbn lock which each of the other data_vios in its compressed
+ * block and sends each on its way. Finally the agent itself continues on the write path as before.
+ *
+ * There is one special bin which is used to hold data_vios which have been canceled and removed
+ * from their bin by the packer. These data_vios need to wait for the canceller to rendezvous with
+ * them and so they sit in this special bin.
+ */
+struct packer_bin {
+ /* List links for packer.packer_bins */
+ struct list_head list;
+ /* The number of items in the bin */
+ slot_number_t slots_used;
+ /* The number of compressed block bytes remaining in the current batch */
+ size_t free_space;
+ /* The current partial batch of data_vios, waiting for more */
+ struct data_vio *incoming[];
+};
+
+struct packer {
+ /* The ID of the packer's callback thread */
+ thread_id_t thread_id;
+ /* The number of bins */
+ block_count_t size;
+ /* A list of all packer_bins, kept sorted by free_space */
+ struct list_head bins;
+ /*
+ * A bin to hold data_vios which were canceled out of the packer and are waiting to
+ * rendezvous with the canceling data_vio.
+ */
+ struct packer_bin *canceled_bin;
+
+ /* The current flush generation */
+ sequence_number_t flush_generation;
+
+ /* The administrative state of the packer */
+ struct admin_state state;
+
+ /* Statistics are only updated on the packer thread, but are accessed from other threads */
+ struct packer_statistics statistics;
+};
+
+int vdo_get_compressed_block_fragment(enum block_mapping_state mapping_state,
+ struct compressed_block *block,
+ u16 *fragment_offset, u16 *fragment_size);
+
+int __must_check vdo_make_packer(struct vdo *vdo, block_count_t bin_count,
+ struct packer **packer_ptr);
+
+void vdo_free_packer(struct packer *packer);
+
+struct packer_statistics __must_check vdo_get_packer_statistics(const struct packer *packer);
+
+void vdo_attempt_packing(struct data_vio *data_vio);
+
+void vdo_flush_packer(struct packer *packer);
+
+void vdo_remove_lock_holder_from_packer(struct vdo_completion *completion);
+
+void vdo_increment_packer_flush_generation(struct packer *packer);
+
+void vdo_drain_packer(struct packer *packer, struct vdo_completion *completion);
+
+void vdo_resume_packer(struct packer *packer, struct vdo_completion *parent);
+
+void vdo_dump_packer(const struct packer *packer);
+
+#endif /* VDO_PACKER_H */
diff --git a/drivers/md/dm-vdo/permassert.c b/drivers/md/dm-vdo/permassert.c
new file mode 100644
index 000000000000..bf9eccea1cb3
--- /dev/null
+++ b/drivers/md/dm-vdo/permassert.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "permassert.h"
+
+#include "errors.h"
+#include "logger.h"
+
+int vdo_assertion_failed(const char *expression_string, const char *file_name,
+ int line_number, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+
+ vdo_log_embedded_message(VDO_LOG_ERR, VDO_LOGGING_MODULE_NAME, "assertion \"",
+ format, args, "\" (%s) failed at %s:%d",
+ expression_string, file_name, line_number);
+ vdo_log_backtrace(VDO_LOG_ERR);
+
+ va_end(args);
+
+ return UDS_ASSERTION_FAILED;
+}
diff --git a/drivers/md/dm-vdo/permassert.h b/drivers/md/dm-vdo/permassert.h
new file mode 100644
index 000000000000..c34f2ba650e1
--- /dev/null
+++ b/drivers/md/dm-vdo/permassert.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef PERMASSERT_H
+#define PERMASSERT_H
+
+#include <linux/compiler.h>
+
+#include "errors.h"
+
+/* Utilities for asserting that certain conditions are met */
+
+#define STRINGIFY(X) #X
+
+/*
+ * A hack to apply the "warn if unused" attribute to an integral expression.
+ *
+ * Since GCC doesn't propagate the warn_unused_result attribute to conditional expressions
+ * incorporating calls to functions with that attribute, this function can be used to wrap such an
+ * expression. With optimization enabled, this function contributes no additional instructions, but
+ * the warn_unused_result attribute still applies to the code calling it.
+ */
+static inline int __must_check vdo_must_use(int value)
+{
+ return value;
+}
+
+/* Assert that an expression is true and return an error if it is not. */
+#define VDO_ASSERT(expr, ...) vdo_must_use(__VDO_ASSERT(expr, __VA_ARGS__))
+
+/* Log a message if the expression is not true. */
+#define VDO_ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__)
+
+#define __VDO_ASSERT(expr, ...) \
+ (likely(expr) ? VDO_SUCCESS \
+ : vdo_assertion_failed(STRINGIFY(expr), __FILE__, __LINE__, __VA_ARGS__))
+
+/* Log an assertion failure message. */
+int vdo_assertion_failed(const char *expression_string, const char *file_name,
+ int line_number, const char *format, ...)
+ __printf(4, 5);
+
+#endif /* PERMASSERT_H */
diff --git a/drivers/md/dm-vdo/physical-zone.c b/drivers/md/dm-vdo/physical-zone.c
new file mode 100644
index 000000000000..2fee3a7c1191
--- /dev/null
+++ b/drivers/md/dm-vdo/physical-zone.c
@@ -0,0 +1,644 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "physical-zone.h"
+
+#include <linux/list.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "block-map.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "dedupe.h"
+#include "encodings.h"
+#include "flush.h"
+#include "int-map.h"
+#include "slab-depot.h"
+#include "status-codes.h"
+#include "vdo.h"
+
+/* Each user data_vio needs a PBN read lock and write lock. */
+#define LOCK_POOL_CAPACITY (2 * MAXIMUM_VDO_USER_VIOS)
+
+struct pbn_lock_implementation {
+ enum pbn_lock_type type;
+ const char *name;
+ const char *release_reason;
+};
+
+/* This array must have an entry for every pbn_lock_type value. */
+static const struct pbn_lock_implementation LOCK_IMPLEMENTATIONS[] = {
+ [VIO_READ_LOCK] = {
+ .type = VIO_READ_LOCK,
+ .name = "read",
+ .release_reason = "candidate duplicate",
+ },
+ [VIO_WRITE_LOCK] = {
+ .type = VIO_WRITE_LOCK,
+ .name = "write",
+ .release_reason = "newly allocated",
+ },
+ [VIO_BLOCK_MAP_WRITE_LOCK] = {
+ .type = VIO_BLOCK_MAP_WRITE_LOCK,
+ .name = "block map write",
+ .release_reason = "block map write",
+ },
+};
+
+static inline bool has_lock_type(const struct pbn_lock *lock, enum pbn_lock_type type)
+{
+ return (lock->implementation == &LOCK_IMPLEMENTATIONS[type]);
+}
+
+/**
+ * vdo_is_pbn_read_lock() - Check whether a pbn_lock is a read lock.
+ * @lock: The lock to check.
+ *
+ * Return: true if the lock is a read lock.
+ */
+bool vdo_is_pbn_read_lock(const struct pbn_lock *lock)
+{
+ return has_lock_type(lock, VIO_READ_LOCK);
+}
+
+static inline void set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type type)
+{
+ lock->implementation = &LOCK_IMPLEMENTATIONS[type];
+}
+
+/**
+ * vdo_downgrade_pbn_write_lock() - Downgrade a PBN write lock to a PBN read lock.
+ * @lock: The PBN write lock to downgrade.
+ *
+ * The lock holder count is cleared and the caller is responsible for setting the new count.
+ */
+void vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write)
+{
+ VDO_ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock),
+ "PBN lock must not already have been downgraded");
+ VDO_ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK),
+ "must not downgrade block map write locks");
+ VDO_ASSERT_LOG_ONLY(lock->holder_count == 1,
+ "PBN write lock should have one holder but has %u",
+ lock->holder_count);
+ /*
+ * data_vio write locks are downgraded in place--the writer retains the hold on the lock.
+ * If this was a compressed write, the holder has not yet journaled its own inc ref,
+ * otherwise, it has.
+ */
+ lock->increment_limit =
+ (compressed_write ? MAXIMUM_REFERENCE_COUNT : MAXIMUM_REFERENCE_COUNT - 1);
+ set_pbn_lock_type(lock, VIO_READ_LOCK);
+}
+
+/**
+ * vdo_claim_pbn_lock_increment() - Try to claim one of the available reference count increments on
+ * a read lock.
+ * @lock: The PBN read lock from which to claim an increment.
+ *
+ * Claims may be attempted from any thread. A claim is only valid until the PBN lock is released.
+ *
+ * Return: true if the claim succeeded, guaranteeing one increment can be made without overflowing
+ * the PBN's reference count.
+ */
+bool vdo_claim_pbn_lock_increment(struct pbn_lock *lock)
+{
+ /*
+ * Claim the next free reference atomically since hash locks from multiple hash zone
+ * threads might be concurrently deduplicating against a single PBN lock on compressed
+ * block. As long as hitting the increment limit will lead to the PBN lock being released
+ * in a sane time-frame, we won't overflow a 32-bit claim counter, allowing a simple add
+ * instead of a compare-and-swap.
+ */
+ u32 claim_number = (u32) atomic_add_return(1, &lock->increments_claimed);
+
+ return (claim_number <= lock->increment_limit);
+}
+
+/**
+ * vdo_assign_pbn_lock_provisional_reference() - Inform a PBN lock that it is responsible for a
+ * provisional reference.
+ * @lock: The PBN lock.
+ */
+void vdo_assign_pbn_lock_provisional_reference(struct pbn_lock *lock)
+{
+ VDO_ASSERT_LOG_ONLY(!lock->has_provisional_reference,
+ "lock does not have a provisional reference");
+ lock->has_provisional_reference = true;
+}
+
+/**
+ * vdo_unassign_pbn_lock_provisional_reference() - Inform a PBN lock that it is no longer
+ * responsible for a provisional reference.
+ * @lock: The PBN lock.
+ */
+void vdo_unassign_pbn_lock_provisional_reference(struct pbn_lock *lock)
+{
+ lock->has_provisional_reference = false;
+}
+
+/**
+ * release_pbn_lock_provisional_reference() - If the lock is responsible for a provisional
+ * reference, release that reference.
+ * @lock: The lock.
+ * @locked_pbn: The PBN covered by the lock.
+ * @allocator: The block allocator from which to release the reference.
+ *
+ * This method is called when the lock is released.
+ */
+static void release_pbn_lock_provisional_reference(struct pbn_lock *lock,
+ physical_block_number_t locked_pbn,
+ struct block_allocator *allocator)
+{
+ int result;
+
+ if (!vdo_pbn_lock_has_provisional_reference(lock))
+ return;
+
+ result = vdo_release_block_reference(allocator, locked_pbn);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error_strerror(result,
+ "Failed to release reference to %s physical block %llu",
+ lock->implementation->release_reason,
+ (unsigned long long) locked_pbn);
+ }
+
+ vdo_unassign_pbn_lock_provisional_reference(lock);
+}
+
+/**
+ * union idle_pbn_lock - PBN lock list entries.
+ *
+ * Unused (idle) PBN locks are kept in a list. Just like in a malloc implementation, the lock
+ * structure is unused memory, so we can save a bit of space (and not pollute the lock structure
+ * proper) by using a union to overlay the lock structure with the free list.
+ */
+typedef union {
+ /** @entry: Only used while locks are in the pool. */
+ struct list_head entry;
+ /** @lock: Only used while locks are not in the pool. */
+ struct pbn_lock lock;
+} idle_pbn_lock;
+
+/**
+ * struct pbn_lock_pool - list of PBN locks.
+ *
+ * The lock pool is little more than the memory allocated for the locks.
+ */
+struct pbn_lock_pool {
+ /** @capacity: The number of locks allocated for the pool. */
+ size_t capacity;
+ /** @borrowed: The number of locks currently borrowed from the pool. */
+ size_t borrowed;
+ /** @idle_list: A list containing all idle PBN lock instances. */
+ struct list_head idle_list;
+ /** @locks: The memory for all the locks allocated by this pool. */
+ idle_pbn_lock locks[];
+};
+
+/**
+ * return_pbn_lock_to_pool() - Return a pbn lock to its pool.
+ * @pool: The pool from which the lock was borrowed.
+ * @lock: The last reference to the lock being returned.
+ *
+ * It must be the last live reference, as if the memory were being freed (the lock memory will
+ * re-initialized or zeroed).
+ */
+static void return_pbn_lock_to_pool(struct pbn_lock_pool *pool, struct pbn_lock *lock)
+{
+ idle_pbn_lock *idle;
+
+ /* A bit expensive, but will promptly catch some use-after-free errors. */
+ memset(lock, 0, sizeof(*lock));
+
+ idle = container_of(lock, idle_pbn_lock, lock);
+ INIT_LIST_HEAD(&idle->entry);
+ list_add_tail(&idle->entry, &pool->idle_list);
+
+ VDO_ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed");
+ pool->borrowed -= 1;
+}
+
+/**
+ * make_pbn_lock_pool() - Create a new PBN lock pool and all the lock instances it can loan out.
+ *
+ * @capacity: The number of PBN locks to allocate for the pool.
+ * @pool_ptr: A pointer to receive the new pool.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int make_pbn_lock_pool(size_t capacity, struct pbn_lock_pool **pool_ptr)
+{
+ size_t i;
+ struct pbn_lock_pool *pool;
+ int result;
+
+ result = vdo_allocate_extended(struct pbn_lock_pool, capacity, idle_pbn_lock,
+ __func__, &pool);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ pool->capacity = capacity;
+ pool->borrowed = capacity;
+ INIT_LIST_HEAD(&pool->idle_list);
+
+ for (i = 0; i < capacity; i++)
+ return_pbn_lock_to_pool(pool, &pool->locks[i].lock);
+
+ *pool_ptr = pool;
+ return VDO_SUCCESS;
+}
+
+/**
+ * free_pbn_lock_pool() - Free a PBN lock pool.
+ * @pool: The lock pool to free.
+ *
+ * This also frees all the PBN locks it allocated, so the caller must ensure that all locks have
+ * been returned to the pool.
+ */
+static void free_pbn_lock_pool(struct pbn_lock_pool *pool)
+{
+ if (pool == NULL)
+ return;
+
+ VDO_ASSERT_LOG_ONLY(pool->borrowed == 0,
+ "All PBN locks must be returned to the pool before it is freed, but %zu locks are still on loan",
+ pool->borrowed);
+ vdo_free(pool);
+}
+
+/**
+ * borrow_pbn_lock_from_pool() - Borrow a PBN lock from the pool and initialize it with the
+ * provided type.
+ * @pool: The pool from which to borrow.
+ * @type: The type with which to initialize the lock.
+ * @lock_ptr: A pointer to receive the borrowed lock.
+ *
+ * Pools do not grow on demand or allocate memory, so this will fail if the pool is empty. Borrowed
+ * locks are still associated with this pool and must be returned to only this pool.
+ *
+ * Return: VDO_SUCCESS, or VDO_LOCK_ERROR if the pool is empty.
+ */
+static int __must_check borrow_pbn_lock_from_pool(struct pbn_lock_pool *pool,
+ enum pbn_lock_type type,
+ struct pbn_lock **lock_ptr)
+{
+ int result;
+ struct list_head *idle_entry;
+ idle_pbn_lock *idle;
+
+ if (pool->borrowed >= pool->capacity)
+ return vdo_log_error_strerror(VDO_LOCK_ERROR,
+ "no free PBN locks left to borrow");
+ pool->borrowed += 1;
+
+ result = VDO_ASSERT(!list_empty(&pool->idle_list),
+ "idle list should not be empty if pool not at capacity");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ idle_entry = pool->idle_list.prev;
+ list_del(idle_entry);
+ memset(idle_entry, 0, sizeof(*idle_entry));
+
+ idle = list_entry(idle_entry, idle_pbn_lock, entry);
+ idle->lock.holder_count = 0;
+ set_pbn_lock_type(&idle->lock, type);
+
+ *lock_ptr = &idle->lock;
+ return VDO_SUCCESS;
+}
+
+/**
+ * initialize_zone() - Initialize a physical zone.
+ * @vdo: The vdo to which the zone will belong.
+ * @zones: The physical_zones to which the zone being initialized belongs
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int initialize_zone(struct vdo *vdo, struct physical_zones *zones)
+{
+ int result;
+ zone_count_t zone_number = zones->zone_count;
+ struct physical_zone *zone = &zones->zones[zone_number];
+
+ result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->pbn_operations);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = make_pbn_lock_pool(LOCK_POOL_CAPACITY, &zone->lock_pool);
+ if (result != VDO_SUCCESS) {
+ vdo_int_map_free(zone->pbn_operations);
+ return result;
+ }
+
+ zone->zone_number = zone_number;
+ zone->thread_id = vdo->thread_config.physical_threads[zone_number];
+ zone->allocator = &vdo->depot->allocators[zone_number];
+ zone->next = &zones->zones[(zone_number + 1) % vdo->thread_config.physical_zone_count];
+ result = vdo_make_default_thread(vdo, zone->thread_id);
+ if (result != VDO_SUCCESS) {
+ free_pbn_lock_pool(vdo_forget(zone->lock_pool));
+ vdo_int_map_free(zone->pbn_operations);
+ return result;
+ }
+ return result;
+}
+
+/**
+ * vdo_make_physical_zones() - Make the physical zones for a vdo.
+ * @vdo: The vdo being constructed
+ * @zones_ptr: A pointer to hold the zones
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_make_physical_zones(struct vdo *vdo, struct physical_zones **zones_ptr)
+{
+ struct physical_zones *zones;
+ int result;
+ zone_count_t zone_count = vdo->thread_config.physical_zone_count;
+
+ if (zone_count == 0)
+ return VDO_SUCCESS;
+
+ result = vdo_allocate_extended(struct physical_zones, zone_count,
+ struct physical_zone, __func__, &zones);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (zones->zone_count = 0; zones->zone_count < zone_count; zones->zone_count++) {
+ result = initialize_zone(vdo, zones);
+ if (result != VDO_SUCCESS) {
+ vdo_free_physical_zones(zones);
+ return result;
+ }
+ }
+
+ *zones_ptr = zones;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_free_physical_zones() - Destroy the physical zones.
+ * @zones: The zones to free.
+ */
+void vdo_free_physical_zones(struct physical_zones *zones)
+{
+ zone_count_t index;
+
+ if (zones == NULL)
+ return;
+
+ for (index = 0; index < zones->zone_count; index++) {
+ struct physical_zone *zone = &zones->zones[index];
+
+ free_pbn_lock_pool(vdo_forget(zone->lock_pool));
+ vdo_int_map_free(vdo_forget(zone->pbn_operations));
+ }
+
+ vdo_free(zones);
+}
+
+/**
+ * vdo_get_physical_zone_pbn_lock() - Get the lock on a PBN if one exists.
+ * @zone: The physical zone responsible for the PBN.
+ * @pbn: The physical block number whose lock is desired.
+ *
+ * Return: The lock or NULL if the PBN is not locked.
+ */
+struct pbn_lock *vdo_get_physical_zone_pbn_lock(struct physical_zone *zone,
+ physical_block_number_t pbn)
+{
+ return ((zone == NULL) ? NULL : vdo_int_map_get(zone->pbn_operations, pbn));
+}
+
+/**
+ * vdo_attempt_physical_zone_pbn_lock() - Attempt to lock a physical block in the zone responsible
+ * for it.
+ * @zone: The physical zone responsible for the PBN.
+ * @pbn: The physical block number to lock.
+ * @type: The type with which to initialize a new lock.
+ * @lock_ptr: A pointer to receive the lock, existing or new.
+ *
+ * If the PBN is already locked, the existing lock will be returned. Otherwise, a new lock instance
+ * will be borrowed from the pool, initialized, and returned. The lock owner will be NULL for a new
+ * lock acquired by the caller, who is responsible for setting that field promptly. The lock owner
+ * will be non-NULL when there is already an existing lock on the PBN.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone,
+ physical_block_number_t pbn,
+ enum pbn_lock_type type,
+ struct pbn_lock **lock_ptr)
+{
+ /*
+ * Borrow and prepare a lock from the pool so we don't have to do two int_map accesses in
+ * the common case of no lock contention.
+ */
+ struct pbn_lock *lock, *new_lock = NULL;
+ int result;
+
+ result = borrow_pbn_lock_from_pool(zone->lock_pool, type, &new_lock);
+ if (result != VDO_SUCCESS) {
+ VDO_ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock");
+ return result;
+ }
+
+ result = vdo_int_map_put(zone->pbn_operations, pbn, new_lock, false,
+ (void **) &lock);
+ if (result != VDO_SUCCESS) {
+ return_pbn_lock_to_pool(zone->lock_pool, new_lock);
+ return result;
+ }
+
+ if (lock != NULL) {
+ /* The lock is already held, so we don't need the borrowed one. */
+ return_pbn_lock_to_pool(zone->lock_pool, vdo_forget(new_lock));
+ result = VDO_ASSERT(lock->holder_count > 0, "physical block %llu lock held",
+ (unsigned long long) pbn);
+ if (result != VDO_SUCCESS)
+ return result;
+ *lock_ptr = lock;
+ } else {
+ *lock_ptr = new_lock;
+ }
+ return VDO_SUCCESS;
+}
+
+/**
+ * allocate_and_lock_block() - Attempt to allocate a block from this zone.
+ * @allocation: The struct allocation of the data_vio attempting to allocate.
+ *
+ * If a block is allocated, the recipient will also hold a lock on it.
+ *
+ * Return: VDO_SUCCESS if a block was allocated, or an error code.
+ */
+static int allocate_and_lock_block(struct allocation *allocation)
+{
+ int result;
+ struct pbn_lock *lock;
+
+ VDO_ASSERT_LOG_ONLY(allocation->lock == NULL,
+ "must not allocate a block while already holding a lock on one");
+
+ result = vdo_allocate_block(allocation->zone->allocator, &allocation->pbn);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_attempt_physical_zone_pbn_lock(allocation->zone, allocation->pbn,
+ allocation->write_lock_type, &lock);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (lock->holder_count > 0) {
+ /* This block is already locked, which should be impossible. */
+ return vdo_log_error_strerror(VDO_LOCK_ERROR,
+ "Newly allocated block %llu was spuriously locked (holder_count=%u)",
+ (unsigned long long) allocation->pbn,
+ lock->holder_count);
+ }
+
+ /* We've successfully acquired a new lock, so mark it as ours. */
+ lock->holder_count += 1;
+ allocation->lock = lock;
+ vdo_assign_pbn_lock_provisional_reference(lock);
+ return VDO_SUCCESS;
+}
+
+/**
+ * retry_allocation() - Retry allocating a block now that we're done waiting for scrubbing.
+ * @waiter: The allocating_vio that was waiting to allocate.
+ * @context: The context (unused).
+ */
+static void retry_allocation(struct vdo_waiter *waiter, void *context __always_unused)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+
+ /* Now that some slab has scrubbed, restart the allocation process. */
+ data_vio->allocation.wait_for_clean_slab = false;
+ data_vio->allocation.first_allocation_zone = data_vio->allocation.zone->zone_number;
+ continue_data_vio(data_vio);
+}
+
+/**
+ * continue_allocating() - Continue searching for an allocation by enqueuing to wait for scrubbing
+ * or switching to the next zone.
+ * @data_vio: The data_vio attempting to get an allocation.
+ *
+ * This method should only be called from the error handler set in data_vio_allocate_data_block.
+ *
+ * Return: true if the allocation process has continued in another zone.
+ */
+static bool continue_allocating(struct data_vio *data_vio)
+{
+ struct allocation *allocation = &data_vio->allocation;
+ struct physical_zone *zone = allocation->zone;
+ struct vdo_completion *completion = &data_vio->vio.completion;
+ int result = VDO_SUCCESS;
+ bool was_waiting = allocation->wait_for_clean_slab;
+ bool tried_all = (allocation->first_allocation_zone == zone->next->zone_number);
+
+ vdo_reset_completion(completion);
+
+ if (tried_all && !was_waiting) {
+ /*
+ * We've already looked in all the zones, and found nothing. So go through the
+ * zones again, and wait for each to scrub before trying to allocate.
+ */
+ allocation->wait_for_clean_slab = true;
+ allocation->first_allocation_zone = zone->zone_number;
+ }
+
+ if (allocation->wait_for_clean_slab) {
+ data_vio->waiter.callback = retry_allocation;
+ result = vdo_enqueue_clean_slab_waiter(zone->allocator,
+ &data_vio->waiter);
+ if (result == VDO_SUCCESS) {
+ /* We've enqueued to wait for a slab to be scrubbed. */
+ return true;
+ }
+
+ if ((result != VDO_NO_SPACE) || (was_waiting && tried_all)) {
+ vdo_set_completion_result(completion, result);
+ return false;
+ }
+ }
+
+ allocation->zone = zone->next;
+ completion->callback_thread_id = allocation->zone->thread_id;
+ vdo_launch_completion(completion);
+ return true;
+}
+
+/**
+ * vdo_allocate_block_in_zone() - Attempt to allocate a block in the current physical zone, and if
+ * that fails try the next if possible.
+ * @data_vio: The data_vio needing an allocation.
+ *
+ * Return: true if a block was allocated, if not the data_vio will have been dispatched so the
+ * caller must not touch it.
+ */
+bool vdo_allocate_block_in_zone(struct data_vio *data_vio)
+{
+ int result = allocate_and_lock_block(&data_vio->allocation);
+
+ if (result == VDO_SUCCESS)
+ return true;
+
+ if ((result != VDO_NO_SPACE) || !continue_allocating(data_vio))
+ continue_data_vio_with_error(data_vio, result);
+
+ return false;
+}
+
+/**
+ * vdo_release_physical_zone_pbn_lock() - Release a physical block lock if it is held and return it
+ * to the lock pool.
+ * @zone: The physical zone in which the lock was obtained.
+ * @locked_pbn: The physical block number to unlock.
+ * @lock: The lock being released.
+ *
+ * It must be the last live reference, as if the memory were being freed (the
+ * lock memory will re-initialized or zeroed).
+ */
+void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone,
+ physical_block_number_t locked_pbn,
+ struct pbn_lock *lock)
+{
+ struct pbn_lock *holder;
+
+ if (lock == NULL)
+ return;
+
+ VDO_ASSERT_LOG_ONLY(lock->holder_count > 0,
+ "should not be releasing a lock that is not held");
+
+ lock->holder_count -= 1;
+ if (lock->holder_count > 0) {
+ /* The lock was shared and is still referenced, so don't release it yet. */
+ return;
+ }
+
+ holder = vdo_int_map_remove(zone->pbn_operations, locked_pbn);
+ VDO_ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu",
+ (unsigned long long) locked_pbn);
+
+ release_pbn_lock_provisional_reference(lock, locked_pbn, zone->allocator);
+ return_pbn_lock_to_pool(zone->lock_pool, lock);
+}
+
+/**
+ * vdo_dump_physical_zone() - Dump information about a physical zone to the log for debugging.
+ * @zone: The zone to dump.
+ */
+void vdo_dump_physical_zone(const struct physical_zone *zone)
+{
+ vdo_dump_block_allocator(zone->allocator);
+}
diff --git a/drivers/md/dm-vdo/physical-zone.h b/drivers/md/dm-vdo/physical-zone.h
new file mode 100644
index 000000000000..47d874fd5a0b
--- /dev/null
+++ b/drivers/md/dm-vdo/physical-zone.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_PHYSICAL_ZONE_H
+#define VDO_PHYSICAL_ZONE_H
+
+#include <linux/atomic.h>
+
+#include "types.h"
+
+/*
+ * The type of a PBN lock.
+ */
+enum pbn_lock_type {
+ VIO_READ_LOCK,
+ VIO_WRITE_LOCK,
+ VIO_BLOCK_MAP_WRITE_LOCK,
+};
+
+struct pbn_lock_implementation;
+
+/*
+ * A PBN lock.
+ */
+struct pbn_lock {
+ /* The implementation of the lock */
+ const struct pbn_lock_implementation *implementation;
+
+ /* The number of VIOs holding or sharing this lock */
+ data_vio_count_t holder_count;
+ /*
+ * The number of compressed block writers holding a share of this lock while they are
+ * acquiring a reference to the PBN.
+ */
+ u8 fragment_locks;
+
+ /* Whether the locked PBN has been provisionally referenced on behalf of the lock holder. */
+ bool has_provisional_reference;
+
+ /*
+ * For read locks, the number of references that were known to be available on the locked
+ * block at the time the lock was acquired.
+ */
+ u8 increment_limit;
+
+ /*
+ * For read locks, the number of data_vios that have tried to claim one of the available
+ * increments during the lifetime of the lock. Each claim will first increment this
+ * counter, so it can exceed the increment limit.
+ */
+ atomic_t increments_claimed;
+};
+
+struct physical_zone {
+ /* Which physical zone this is */
+ zone_count_t zone_number;
+ /* The thread ID for this zone */
+ thread_id_t thread_id;
+ /* In progress operations keyed by PBN */
+ struct int_map *pbn_operations;
+ /* Pool of unused pbn_lock instances */
+ struct pbn_lock_pool *lock_pool;
+ /* The block allocator for this zone */
+ struct block_allocator *allocator;
+ /* The next zone from which to attempt an allocation */
+ struct physical_zone *next;
+};
+
+struct physical_zones {
+ /* The number of zones */
+ zone_count_t zone_count;
+ /* The physical zones themselves */
+ struct physical_zone zones[];
+};
+
+bool __must_check vdo_is_pbn_read_lock(const struct pbn_lock *lock);
+void vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write);
+bool __must_check vdo_claim_pbn_lock_increment(struct pbn_lock *lock);
+
+/**
+ * vdo_pbn_lock_has_provisional_reference() - Check whether a PBN lock has a provisional reference.
+ * @lock: The PBN lock.
+ */
+static inline bool vdo_pbn_lock_has_provisional_reference(struct pbn_lock *lock)
+{
+ return ((lock != NULL) && lock->has_provisional_reference);
+}
+
+void vdo_assign_pbn_lock_provisional_reference(struct pbn_lock *lock);
+void vdo_unassign_pbn_lock_provisional_reference(struct pbn_lock *lock);
+
+int __must_check vdo_make_physical_zones(struct vdo *vdo,
+ struct physical_zones **zones_ptr);
+
+void vdo_free_physical_zones(struct physical_zones *zones);
+
+struct pbn_lock * __must_check vdo_get_physical_zone_pbn_lock(struct physical_zone *zone,
+ physical_block_number_t pbn);
+
+int __must_check vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone,
+ physical_block_number_t pbn,
+ enum pbn_lock_type type,
+ struct pbn_lock **lock_ptr);
+
+bool __must_check vdo_allocate_block_in_zone(struct data_vio *data_vio);
+
+void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone,
+ physical_block_number_t locked_pbn,
+ struct pbn_lock *lock);
+
+void vdo_dump_physical_zone(const struct physical_zone *zone);
+
+#endif /* VDO_PHYSICAL_ZONE_H */
diff --git a/drivers/md/dm-vdo/priority-table.c b/drivers/md/dm-vdo/priority-table.c
new file mode 100644
index 000000000000..42d3d8d0e4b5
--- /dev/null
+++ b/drivers/md/dm-vdo/priority-table.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "priority-table.h"
+
+#include <linux/log2.h>
+
+#include "errors.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "status-codes.h"
+
+/* We use a single 64-bit search vector, so the maximum priority is 63 */
+#define MAX_PRIORITY 63
+
+/*
+ * All the entries with the same priority are queued in a circular list in a bucket for that
+ * priority. The table is essentially an array of buckets.
+ */
+struct bucket {
+ /*
+ * The head of a queue of table entries, all having the same priority
+ */
+ struct list_head queue;
+ /* The priority of all the entries in this bucket */
+ unsigned int priority;
+};
+
+/*
+ * A priority table is an array of buckets, indexed by priority. New entries are added to the end
+ * of the queue in the appropriate bucket. The dequeue operation finds the highest-priority
+ * non-empty bucket by searching a bit vector represented as a single 8-byte word, which is very
+ * fast with compiler and CPU support.
+ */
+struct priority_table {
+ /* The maximum priority of entries that may be stored in this table */
+ unsigned int max_priority;
+ /* A bit vector flagging all buckets that are currently non-empty */
+ u64 search_vector;
+ /* The array of all buckets, indexed by priority */
+ struct bucket buckets[];
+};
+
+/**
+ * vdo_make_priority_table() - Allocate and initialize a new priority_table.
+ * @max_priority: The maximum priority value for table entries.
+ * @table_ptr: A pointer to hold the new table.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_make_priority_table(unsigned int max_priority, struct priority_table **table_ptr)
+{
+ struct priority_table *table;
+ int result;
+ unsigned int priority;
+
+ if (max_priority > MAX_PRIORITY)
+ return UDS_INVALID_ARGUMENT;
+
+ result = vdo_allocate_extended(struct priority_table, max_priority + 1,
+ struct bucket, __func__, &table);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (priority = 0; priority <= max_priority; priority++) {
+ struct bucket *bucket = &table->buckets[priority];
+
+ bucket->priority = priority;
+ INIT_LIST_HEAD(&bucket->queue);
+ }
+
+ table->max_priority = max_priority;
+ table->search_vector = 0;
+
+ *table_ptr = table;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_free_priority_table() - Free a priority_table.
+ * @table: The table to free.
+ *
+ * The table does not own the entries stored in it and they are not freed by this call.
+ */
+void vdo_free_priority_table(struct priority_table *table)
+{
+ if (table == NULL)
+ return;
+
+ /*
+ * Unlink the buckets from any entries still in the table so the entries won't be left with
+ * dangling pointers to freed memory.
+ */
+ vdo_reset_priority_table(table);
+
+ vdo_free(table);
+}
+
+/**
+ * vdo_reset_priority_table() - Reset a priority table, leaving it in the same empty state as when
+ * newly constructed.
+ * @table: The table to reset.
+ *
+ * The table does not own the entries stored in it and they are not freed (or even unlinked from
+ * each other) by this call.
+ */
+void vdo_reset_priority_table(struct priority_table *table)
+{
+ unsigned int priority;
+
+ table->search_vector = 0;
+ for (priority = 0; priority <= table->max_priority; priority++)
+ list_del_init(&table->buckets[priority].queue);
+}
+
+/**
+ * vdo_priority_table_enqueue() - Add a new entry to the priority table, appending it to the queue
+ * for entries with the specified priority.
+ * @table: The table in which to store the entry.
+ * @priority: The priority of the entry.
+ * @entry: The list_head embedded in the entry to store in the table (the caller must have
+ * initialized it).
+ */
+void vdo_priority_table_enqueue(struct priority_table *table, unsigned int priority,
+ struct list_head *entry)
+{
+ VDO_ASSERT_LOG_ONLY((priority <= table->max_priority),
+ "entry priority must be valid for the table");
+
+ /* Append the entry to the queue in the specified bucket. */
+ list_move_tail(entry, &table->buckets[priority].queue);
+
+ /* Flag the bucket in the search vector since it must be non-empty. */
+ table->search_vector |= (1ULL << priority);
+}
+
+static inline void mark_bucket_empty(struct priority_table *table, struct bucket *bucket)
+{
+ table->search_vector &= ~(1ULL << bucket->priority);
+}
+
+/**
+ * vdo_priority_table_dequeue() - Find the highest-priority entry in the table, remove it from the
+ * table, and return it.
+ * @table: The priority table from which to remove an entry.
+ *
+ * If there are multiple entries with the same priority, the one that has been in the table with
+ * that priority the longest will be returned.
+ *
+ * Return: The dequeued entry, or NULL if the table is currently empty.
+ */
+struct list_head *vdo_priority_table_dequeue(struct priority_table *table)
+{
+ struct bucket *bucket;
+ struct list_head *entry;
+ int top_priority;
+
+ if (table->search_vector == 0) {
+ /* All buckets are empty. */
+ return NULL;
+ }
+
+ /*
+ * Find the highest priority non-empty bucket by finding the highest-order non-zero bit in
+ * the search vector.
+ */
+ top_priority = ilog2(table->search_vector);
+
+ /* Dequeue the first entry in the bucket. */
+ bucket = &table->buckets[top_priority];
+ entry = bucket->queue.next;
+ list_del_init(entry);
+
+ /* Clear the bit in the search vector if the bucket has been emptied. */
+ if (list_empty(&bucket->queue))
+ mark_bucket_empty(table, bucket);
+
+ return entry;
+}
+
+/**
+ * vdo_priority_table_remove() - Remove a specified entry from its priority table.
+ * @table: The table from which to remove the entry.
+ * @entry: The entry to remove from the table.
+ */
+void vdo_priority_table_remove(struct priority_table *table, struct list_head *entry)
+{
+ struct list_head *next_entry;
+
+ /*
+ * We can't guard against calls where the entry is on a list for a different table, but
+ * it's easy to deal with an entry not in any table or list.
+ */
+ if (list_empty(entry))
+ return;
+
+ /*
+ * Remove the entry from the bucket list, remembering a pointer to another entry in the
+ * ring.
+ */
+ next_entry = entry->next;
+ list_del_init(entry);
+
+ /*
+ * If the rest of the list is now empty, the next node must be the list head in the bucket
+ * and we can use it to update the search vector.
+ */
+ if (list_empty(next_entry))
+ mark_bucket_empty(table, list_entry(next_entry, struct bucket, queue));
+}
+
+/**
+ * vdo_is_priority_table_empty() - Return whether the priority table is empty.
+ * @table: The table to check.
+ *
+ * Return: true if the table is empty.
+ */
+bool vdo_is_priority_table_empty(struct priority_table *table)
+{
+ return (table->search_vector == 0);
+}
diff --git a/drivers/md/dm-vdo/priority-table.h b/drivers/md/dm-vdo/priority-table.h
new file mode 100644
index 000000000000..8b060462e3e4
--- /dev/null
+++ b/drivers/md/dm-vdo/priority-table.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_PRIORITY_TABLE_H
+#define VDO_PRIORITY_TABLE_H
+
+#include <linux/list.h>
+
+/*
+ * A priority_table is a simple implementation of a priority queue for entries with priorities that
+ * are small non-negative integer values. It implements the obvious priority queue operations of
+ * enqueuing an entry and dequeuing an entry with the maximum priority. It also supports removing
+ * an arbitrary entry. The priority of an entry already in the table can be changed by removing it
+ * and re-enqueuing it with a different priority. All operations have O(1) complexity.
+ *
+ * The links for the table entries must be embedded in the entries themselves. Lists are used to
+ * link entries in the table and no wrapper type is declared, so an existing list entry in an
+ * object can also be used to queue it in a priority_table, assuming the field is not used for
+ * anything else while so queued.
+ *
+ * The table is implemented as an array of queues (circular lists) indexed by priority, along with
+ * a hint for which queues are non-empty. Steven Skiena calls a very similar structure a "bounded
+ * height priority queue", but given the resemblance to a hash table, "priority table" seems both
+ * shorter and more apt, if somewhat novel.
+ */
+
+struct priority_table;
+
+int __must_check vdo_make_priority_table(unsigned int max_priority,
+ struct priority_table **table_ptr);
+
+void vdo_free_priority_table(struct priority_table *table);
+
+void vdo_priority_table_enqueue(struct priority_table *table, unsigned int priority,
+ struct list_head *entry);
+
+void vdo_reset_priority_table(struct priority_table *table);
+
+struct list_head * __must_check vdo_priority_table_dequeue(struct priority_table *table);
+
+void vdo_priority_table_remove(struct priority_table *table, struct list_head *entry);
+
+bool __must_check vdo_is_priority_table_empty(struct priority_table *table);
+
+#endif /* VDO_PRIORITY_TABLE_H */
diff --git a/drivers/md/dm-vdo/recovery-journal.c b/drivers/md/dm-vdo/recovery-journal.c
new file mode 100644
index 000000000000..ee6321a3e523
--- /dev/null
+++ b/drivers/md/dm-vdo/recovery-journal.c
@@ -0,0 +1,1762 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "recovery-journal.h"
+
+#include <linux/atomic.h>
+#include <linux/bio.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "block-map.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "encodings.h"
+#include "io-submitter.h"
+#include "slab-depot.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+static const u64 RECOVERY_COUNT_MASK = 0xff;
+
+/*
+ * The number of reserved blocks must be large enough to prevent a new recovery journal
+ * block write from overwriting a block which appears to still be a valid head block of the
+ * journal. Currently, that means reserving enough space for all 2048 data_vios.
+ */
+#define RECOVERY_JOURNAL_RESERVED_BLOCKS \
+ ((MAXIMUM_VDO_USER_VIOS / RECOVERY_JOURNAL_ENTRIES_PER_BLOCK) + 2)
+
+/**
+ * DOC: Lock Counters.
+ *
+ * A lock_counter is intended to keep all of the locks for the blocks in the recovery journal. The
+ * per-zone counters are all kept in a single array which is arranged by zone (i.e. zone 0's lock 0
+ * is at index 0, zone 0's lock 1 is at index 1, and zone 1's lock 0 is at index 'locks'. This
+ * arrangement is intended to minimize cache-line contention for counters from different zones.
+ *
+ * The locks are implemented as a single object instead of as a lock counter per lock both to
+ * afford this opportunity to reduce cache line contention and also to eliminate the need to have a
+ * completion per lock.
+ *
+ * Lock sets are laid out with the set for recovery journal first, followed by the logical zones,
+ * and then the physical zones.
+ */
+
+enum lock_counter_state {
+ LOCK_COUNTER_STATE_NOT_NOTIFYING,
+ LOCK_COUNTER_STATE_NOTIFYING,
+ LOCK_COUNTER_STATE_SUSPENDED,
+};
+
+/**
+ * get_zone_count_ptr() - Get a pointer to the zone count for a given lock on a given zone.
+ * @journal: The recovery journal.
+ * @lock_number: The lock to get.
+ * @zone_type: The zone type whose count is desired.
+ *
+ * Return: A pointer to the zone count for the given lock and zone.
+ */
+static inline atomic_t *get_zone_count_ptr(struct recovery_journal *journal,
+ block_count_t lock_number,
+ enum vdo_zone_type zone_type)
+{
+ return ((zone_type == VDO_ZONE_TYPE_LOGICAL)
+ ? &journal->lock_counter.logical_zone_counts[lock_number]
+ : &journal->lock_counter.physical_zone_counts[lock_number]);
+}
+
+/**
+ * get_counter() - Get the zone counter for a given lock on a given zone.
+ * @journal: The recovery journal.
+ * @lock_number: The lock to get.
+ * @zone_type: The zone type whose count is desired.
+ * @zone_id: The zone index whose count is desired.
+ *
+ * Return: The counter for the given lock and zone.
+ */
+static inline u16 *get_counter(struct recovery_journal *journal,
+ block_count_t lock_number, enum vdo_zone_type zone_type,
+ zone_count_t zone_id)
+{
+ struct lock_counter *counter = &journal->lock_counter;
+ block_count_t zone_counter = (counter->locks * zone_id) + lock_number;
+
+ if (zone_type == VDO_ZONE_TYPE_JOURNAL)
+ return &counter->journal_counters[zone_counter];
+
+ if (zone_type == VDO_ZONE_TYPE_LOGICAL)
+ return &counter->logical_counters[zone_counter];
+
+ return &counter->physical_counters[zone_counter];
+}
+
+static atomic_t *get_decrement_counter(struct recovery_journal *journal,
+ block_count_t lock_number)
+{
+ return &journal->lock_counter.journal_decrement_counts[lock_number];
+}
+
+/**
+ * is_journal_zone_locked() - Check whether the journal zone is locked for a given lock.
+ * @journal: The recovery journal.
+ * @lock_number: The lock to check.
+ *
+ * Return: true if the journal zone is locked.
+ */
+static bool is_journal_zone_locked(struct recovery_journal *journal,
+ block_count_t lock_number)
+{
+ u16 journal_value = *get_counter(journal, lock_number, VDO_ZONE_TYPE_JOURNAL, 0);
+ u32 decrements = atomic_read(get_decrement_counter(journal, lock_number));
+
+ /* Pairs with barrier in vdo_release_journal_entry_lock() */
+ smp_rmb();
+ VDO_ASSERT_LOG_ONLY((decrements <= journal_value),
+ "journal zone lock counter must not underflow");
+ return (journal_value != decrements);
+}
+
+/**
+ * vdo_release_recovery_journal_block_reference() - Release a reference to a recovery journal
+ * block.
+ * @journal: The recovery journal.
+ * @sequence_number: The journal sequence number of the referenced block.
+ * @zone_type: The type of the zone making the adjustment.
+ * @zone_id: The ID of the zone making the adjustment.
+ *
+ * If this is the last reference for a given zone type, an attempt will be made to reap the
+ * journal.
+ */
+void vdo_release_recovery_journal_block_reference(struct recovery_journal *journal,
+ sequence_number_t sequence_number,
+ enum vdo_zone_type zone_type,
+ zone_count_t zone_id)
+{
+ u16 *current_value;
+ block_count_t lock_number;
+ int prior_state;
+
+ if (sequence_number == 0)
+ return;
+
+ lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number);
+ current_value = get_counter(journal, lock_number, zone_type, zone_id);
+
+ VDO_ASSERT_LOG_ONLY((*current_value >= 1),
+ "decrement of lock counter must not underflow");
+ *current_value -= 1;
+
+ if (zone_type == VDO_ZONE_TYPE_JOURNAL) {
+ if (is_journal_zone_locked(journal, lock_number))
+ return;
+ } else {
+ atomic_t *zone_count;
+
+ if (*current_value != 0)
+ return;
+
+ zone_count = get_zone_count_ptr(journal, lock_number, zone_type);
+
+ if (atomic_add_return(-1, zone_count) > 0)
+ return;
+ }
+
+ /*
+ * Extra barriers because this was original developed using a CAS operation that implicitly
+ * had them.
+ */
+ smp_mb__before_atomic();
+ prior_state = atomic_cmpxchg(&journal->lock_counter.state,
+ LOCK_COUNTER_STATE_NOT_NOTIFYING,
+ LOCK_COUNTER_STATE_NOTIFYING);
+ /* same as before_atomic */
+ smp_mb__after_atomic();
+
+ if (prior_state != LOCK_COUNTER_STATE_NOT_NOTIFYING)
+ return;
+
+ vdo_launch_completion(&journal->lock_counter.completion);
+}
+
+static inline struct recovery_journal_block * __must_check get_journal_block(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct recovery_journal_block, list_node);
+}
+
+/**
+ * pop_free_list() - Get a block from the end of the free list.
+ * @journal: The journal.
+ *
+ * Return: The block or NULL if the list is empty.
+ */
+static struct recovery_journal_block * __must_check pop_free_list(struct recovery_journal *journal)
+{
+ struct recovery_journal_block *block;
+
+ if (list_empty(&journal->free_tail_blocks))
+ return NULL;
+
+ block = list_last_entry(&journal->free_tail_blocks,
+ struct recovery_journal_block, list_node);
+ list_del_init(&block->list_node);
+ return block;
+}
+
+/**
+ * is_block_dirty() - Check whether a recovery block is dirty.
+ * @block: The block to check.
+ *
+ * Indicates it has any uncommitted entries, which includes both entries not written and entries
+ * written but not yet acknowledged.
+ *
+ * Return: true if the block has any uncommitted entries.
+ */
+static inline bool __must_check is_block_dirty(const struct recovery_journal_block *block)
+{
+ return (block->uncommitted_entry_count > 0);
+}
+
+/**
+ * is_block_empty() - Check whether a journal block is empty.
+ * @block: The block to check.
+ *
+ * Return: true if the block has no entries.
+ */
+static inline bool __must_check is_block_empty(const struct recovery_journal_block *block)
+{
+ return (block->entry_count == 0);
+}
+
+/**
+ * is_block_full() - Check whether a journal block is full.
+ * @block: The block to check.
+ *
+ * Return: true if the block is full.
+ */
+static inline bool __must_check is_block_full(const struct recovery_journal_block *block)
+{
+ return ((block == NULL) || (block->journal->entries_per_block == block->entry_count));
+}
+
+/**
+ * assert_on_journal_thread() - Assert that we are running on the journal thread.
+ * @journal: The journal.
+ * @function_name: The function doing the check (for logging).
+ */
+static void assert_on_journal_thread(struct recovery_journal *journal,
+ const char *function_name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == journal->thread_id),
+ "%s() called on journal thread", function_name);
+}
+
+/**
+ * continue_waiter() - Release a data_vio from the journal.
+ *
+ * Invoked whenever a data_vio is to be released from the journal, either because its entry was
+ * committed to disk, or because there was an error. Implements waiter_callback_fn.
+ */
+static void continue_waiter(struct vdo_waiter *waiter, void *context)
+{
+ continue_data_vio_with_error(vdo_waiter_as_data_vio(waiter), *((int *) context));
+}
+
+/**
+ * has_block_waiters() - Check whether the journal has any waiters on any blocks.
+ * @journal: The journal in question.
+ *
+ * Return: true if any block has a waiter.
+ */
+static inline bool has_block_waiters(struct recovery_journal *journal)
+{
+ struct recovery_journal_block *block = get_journal_block(&journal->active_tail_blocks);
+
+ /*
+ * Either the first active tail block (if it exists) has waiters, or no active tail block
+ * has waiters.
+ */
+ return ((block != NULL) &&
+ (vdo_waitq_has_waiters(&block->entry_waiters) ||
+ vdo_waitq_has_waiters(&block->commit_waiters)));
+}
+
+static void recycle_journal_blocks(struct recovery_journal *journal);
+static void recycle_journal_block(struct recovery_journal_block *block);
+static void notify_commit_waiters(struct recovery_journal *journal);
+
+/**
+ * suspend_lock_counter() - Prevent the lock counter from notifying.
+ * @counter: The counter.
+ *
+ * Return: true if the lock counter was not notifying and hence the suspend was efficacious.
+ */
+static bool suspend_lock_counter(struct lock_counter *counter)
+{
+ int prior_state;
+
+ /*
+ * Extra barriers because this was originally developed using a CAS operation that
+ * implicitly had them.
+ */
+ smp_mb__before_atomic();
+ prior_state = atomic_cmpxchg(&counter->state, LOCK_COUNTER_STATE_NOT_NOTIFYING,
+ LOCK_COUNTER_STATE_SUSPENDED);
+ /* same as before_atomic */
+ smp_mb__after_atomic();
+
+ return ((prior_state == LOCK_COUNTER_STATE_SUSPENDED) ||
+ (prior_state == LOCK_COUNTER_STATE_NOT_NOTIFYING));
+}
+
+static inline bool is_read_only(struct recovery_journal *journal)
+{
+ return vdo_is_read_only(journal->flush_vio->completion.vdo);
+}
+
+/**
+ * check_for_drain_complete() - Check whether the journal has drained.
+ * @journal: The journal which may have just drained.
+ */
+static void check_for_drain_complete(struct recovery_journal *journal)
+{
+ int result = VDO_SUCCESS;
+
+ if (is_read_only(journal)) {
+ result = VDO_READ_ONLY;
+ /*
+ * Clean up any full active blocks which were not written due to read-only mode.
+ *
+ * FIXME: This would probably be better as a short-circuit in write_block().
+ */
+ notify_commit_waiters(journal);
+ recycle_journal_blocks(journal);
+
+ /* Release any data_vios waiting to be assigned entries. */
+ vdo_waitq_notify_all_waiters(&journal->entry_waiters,
+ continue_waiter, &result);
+ }
+
+ if (!vdo_is_state_draining(&journal->state) ||
+ journal->reaping ||
+ has_block_waiters(journal) ||
+ vdo_waitq_has_waiters(&journal->entry_waiters) ||
+ !suspend_lock_counter(&journal->lock_counter))
+ return;
+
+ if (vdo_is_state_saving(&journal->state)) {
+ if (journal->active_block != NULL) {
+ VDO_ASSERT_LOG_ONLY(((result == VDO_READ_ONLY) ||
+ !is_block_dirty(journal->active_block)),
+ "journal being saved has clean active block");
+ recycle_journal_block(journal->active_block);
+ }
+
+ VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
+ "all blocks in a journal being saved must be inactive");
+ }
+
+ vdo_finish_draining_with_result(&journal->state, result);
+}
+
+/**
+ * notify_recovery_journal_of_read_only_mode() - Notify a recovery journal that the VDO has gone
+ * read-only.
+ * @listener: The journal.
+ * @parent: The completion to notify in order to acknowledge the notification.
+ *
+ * Implements vdo_read_only_notification_fn.
+ */
+static void notify_recovery_journal_of_read_only_mode(void *listener,
+ struct vdo_completion *parent)
+{
+ check_for_drain_complete(listener);
+ vdo_finish_completion(parent);
+}
+
+/**
+ * enter_journal_read_only_mode() - Put the journal in read-only mode.
+ * @journal: The journal which has failed.
+ * @error_code: The error result triggering this call.
+ *
+ * All attempts to add entries after this function is called will fail. All VIOs waiting for
+ * commits will be awakened with an error.
+ */
+static void enter_journal_read_only_mode(struct recovery_journal *journal,
+ int error_code)
+{
+ vdo_enter_read_only_mode(journal->flush_vio->completion.vdo, error_code);
+ check_for_drain_complete(journal);
+}
+
+/**
+ * vdo_get_recovery_journal_current_sequence_number() - Obtain the recovery journal's current
+ * sequence number.
+ * @journal: The journal in question.
+ *
+ * Exposed only so the block map can be initialized therefrom.
+ *
+ * Return: The sequence number of the tail block.
+ */
+sequence_number_t vdo_get_recovery_journal_current_sequence_number(struct recovery_journal *journal)
+{
+ return journal->tail;
+}
+
+/**
+ * get_recovery_journal_head() - Get the head of the recovery journal.
+ * @journal: The journal.
+ *
+ * The head is the lowest sequence number of the block map head and the slab journal head.
+ *
+ * Return: the head of the journal.
+ */
+static inline sequence_number_t get_recovery_journal_head(const struct recovery_journal *journal)
+{
+ return min(journal->block_map_head, journal->slab_journal_head);
+}
+
+/**
+ * compute_recovery_count_byte() - Compute the recovery count byte for a given recovery count.
+ * @recovery_count: The recovery count.
+ *
+ * Return: The byte corresponding to the recovery count.
+ */
+static inline u8 __must_check compute_recovery_count_byte(u64 recovery_count)
+{
+ return (u8)(recovery_count & RECOVERY_COUNT_MASK);
+}
+
+/**
+ * check_slab_journal_commit_threshold() - Check whether the journal is over the threshold, and if
+ * so, force the oldest slab journal tail block to commit.
+ * @journal: The journal.
+ */
+static void check_slab_journal_commit_threshold(struct recovery_journal *journal)
+{
+ block_count_t current_length = journal->tail - journal->slab_journal_head;
+
+ if (current_length > journal->slab_journal_commit_threshold) {
+ journal->events.slab_journal_commits_requested++;
+ vdo_commit_oldest_slab_journal_tail_blocks(journal->depot,
+ journal->slab_journal_head);
+ }
+}
+
+static void reap_recovery_journal(struct recovery_journal *journal);
+static void assign_entries(struct recovery_journal *journal);
+
+/**
+ * finish_reaping() - Finish reaping the journal.
+ * @journal: The journal being reaped.
+ */
+static void finish_reaping(struct recovery_journal *journal)
+{
+ block_count_t blocks_reaped;
+ sequence_number_t old_head = get_recovery_journal_head(journal);
+
+ journal->block_map_head = journal->block_map_reap_head;
+ journal->slab_journal_head = journal->slab_journal_reap_head;
+ blocks_reaped = get_recovery_journal_head(journal) - old_head;
+ journal->available_space += blocks_reaped * journal->entries_per_block;
+ journal->reaping = false;
+ check_slab_journal_commit_threshold(journal);
+ assign_entries(journal);
+ check_for_drain_complete(journal);
+}
+
+/**
+ * complete_reaping() - Finish reaping the journal after flushing the lower layer.
+ * @completion: The journal's flush VIO.
+ *
+ * This is the callback registered in reap_recovery_journal().
+ */
+static void complete_reaping(struct vdo_completion *completion)
+{
+ struct recovery_journal *journal = completion->parent;
+
+ finish_reaping(journal);
+
+ /* Try reaping again in case more locks were released while flush was out. */
+ reap_recovery_journal(journal);
+}
+
+/**
+ * handle_flush_error() - Handle an error when flushing the lower layer due to reaping.
+ * @completion: The journal's flush VIO.
+ */
+static void handle_flush_error(struct vdo_completion *completion)
+{
+ struct recovery_journal *journal = completion->parent;
+
+ vio_record_metadata_io_error(as_vio(completion));
+ journal->reaping = false;
+ enter_journal_read_only_mode(journal, completion->result);
+}
+
+static void flush_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct recovery_journal *journal = vio->completion.parent;
+
+ continue_vio_after_io(vio, complete_reaping, journal->thread_id);
+}
+
+/**
+ * initialize_journal_state() - Set all journal fields appropriately to start journaling from the
+ * current active block.
+ * @journal: The journal to be reset based on its active block.
+ */
+static void initialize_journal_state(struct recovery_journal *journal)
+{
+ journal->append_point.sequence_number = journal->tail;
+ journal->last_write_acknowledged = journal->tail;
+ journal->block_map_head = journal->tail;
+ journal->slab_journal_head = journal->tail;
+ journal->block_map_reap_head = journal->tail;
+ journal->slab_journal_reap_head = journal->tail;
+ journal->block_map_head_block_number =
+ vdo_get_recovery_journal_block_number(journal, journal->block_map_head);
+ journal->slab_journal_head_block_number =
+ vdo_get_recovery_journal_block_number(journal,
+ journal->slab_journal_head);
+ journal->available_space =
+ (journal->entries_per_block * vdo_get_recovery_journal_length(journal->size));
+}
+
+/**
+ * vdo_get_recovery_journal_length() - Get the number of usable recovery journal blocks.
+ * @journal_size: The size of the recovery journal in blocks.
+ *
+ * Return: the number of recovery journal blocks usable for entries.
+ */
+block_count_t vdo_get_recovery_journal_length(block_count_t journal_size)
+{
+ block_count_t reserved_blocks = journal_size / 4;
+
+ if (reserved_blocks > RECOVERY_JOURNAL_RESERVED_BLOCKS)
+ reserved_blocks = RECOVERY_JOURNAL_RESERVED_BLOCKS;
+ return (journal_size - reserved_blocks);
+}
+
+/**
+ * reap_recovery_journal_callback() - Attempt to reap the journal.
+ * @completion: The lock counter completion.
+ *
+ * Attempts to reap the journal now that all the locks on some journal block have been released.
+ * This is the callback registered with the lock counter.
+ */
+static void reap_recovery_journal_callback(struct vdo_completion *completion)
+{
+ struct recovery_journal *journal = (struct recovery_journal *) completion->parent;
+ /*
+ * The acknowledgment must be done before reaping so that there is no race between
+ * acknowledging the notification and unlocks wishing to notify.
+ */
+ smp_wmb();
+ atomic_set(&journal->lock_counter.state, LOCK_COUNTER_STATE_NOT_NOTIFYING);
+
+ if (vdo_is_state_quiescing(&journal->state)) {
+ /*
+ * Don't start reaping when the journal is trying to quiesce. Do check if this
+ * notification is the last thing the is waiting on.
+ */
+ check_for_drain_complete(journal);
+ return;
+ }
+
+ reap_recovery_journal(journal);
+ check_slab_journal_commit_threshold(journal);
+}
+
+/**
+ * initialize_lock_counter() - Initialize a lock counter.
+ *
+ * @journal: The recovery journal.
+ * @vdo: The vdo.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check initialize_lock_counter(struct recovery_journal *journal,
+ struct vdo *vdo)
+{
+ int result;
+ struct thread_config *config = &vdo->thread_config;
+ struct lock_counter *counter = &journal->lock_counter;
+
+ result = vdo_allocate(journal->size, u16, __func__, &counter->journal_counters);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(journal->size, atomic_t, __func__,
+ &counter->journal_decrement_counts);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(journal->size * config->logical_zone_count, u16, __func__,
+ &counter->logical_counters);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(journal->size, atomic_t, __func__,
+ &counter->logical_zone_counts);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(journal->size * config->physical_zone_count, u16, __func__,
+ &counter->physical_counters);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(journal->size, atomic_t, __func__,
+ &counter->physical_zone_counts);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_initialize_completion(&counter->completion, vdo,
+ VDO_LOCK_COUNTER_COMPLETION);
+ vdo_prepare_completion(&counter->completion, reap_recovery_journal_callback,
+ reap_recovery_journal_callback, config->journal_thread,
+ journal);
+ counter->logical_zones = config->logical_zone_count;
+ counter->physical_zones = config->physical_zone_count;
+ counter->locks = journal->size;
+ return VDO_SUCCESS;
+}
+
+/**
+ * set_journal_tail() - Set the journal's tail sequence number.
+ * @journal: The journal whose tail is to be set.
+ * @tail: The new tail value.
+ */
+static void set_journal_tail(struct recovery_journal *journal, sequence_number_t tail)
+{
+ /* VDO does not support sequence numbers above 1 << 48 in the slab journal. */
+ if (tail >= (1ULL << 48))
+ enter_journal_read_only_mode(journal, VDO_JOURNAL_OVERFLOW);
+
+ journal->tail = tail;
+}
+
+/**
+ * initialize_recovery_block() - Initialize a journal block.
+ * @vdo: The vdo from which to construct vios.
+ * @journal: The journal to which the block will belong.
+ * @block: The block to initialize.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int initialize_recovery_block(struct vdo *vdo, struct recovery_journal *journal,
+ struct recovery_journal_block *block)
+{
+ char *data;
+ int result;
+
+ /*
+ * Ensure that a block is large enough to store RECOVERY_JOURNAL_ENTRIES_PER_BLOCK entries.
+ */
+ BUILD_BUG_ON(RECOVERY_JOURNAL_ENTRIES_PER_BLOCK >
+ ((VDO_BLOCK_SIZE - sizeof(struct packed_journal_header)) /
+ sizeof(struct packed_recovery_journal_entry)));
+
+ /*
+ * Allocate a full block for the journal block even though not all of the space is used
+ * since the VIO needs to write a full disk block.
+ */
+ result = vdo_allocate(VDO_BLOCK_SIZE, char, __func__, &data);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = allocate_vio_components(vdo, VIO_TYPE_RECOVERY_JOURNAL,
+ VIO_PRIORITY_HIGH, block, 1, data, &block->vio);
+ if (result != VDO_SUCCESS) {
+ vdo_free(data);
+ return result;
+ }
+
+ list_add_tail(&block->list_node, &journal->free_tail_blocks);
+ block->journal = journal;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_decode_recovery_journal() - Make a recovery journal and initialize it with the state that
+ * was decoded from the super block.
+ *
+ * @state: The decoded state of the journal.
+ * @nonce: The nonce of the VDO.
+ * @vdo: The VDO.
+ * @partition: The partition for the journal.
+ * @recovery_count: The VDO's number of completed recoveries.
+ * @journal_size: The number of blocks in the journal on disk.
+ * @journal_ptr: The pointer to hold the new recovery journal.
+ *
+ * Return: A success or error code.
+ */
+int vdo_decode_recovery_journal(struct recovery_journal_state_7_0 state, nonce_t nonce,
+ struct vdo *vdo, struct partition *partition,
+ u64 recovery_count, block_count_t journal_size,
+ struct recovery_journal **journal_ptr)
+{
+ block_count_t i;
+ struct recovery_journal *journal;
+ int result;
+
+ result = vdo_allocate_extended(struct recovery_journal,
+ RECOVERY_JOURNAL_RESERVED_BLOCKS,
+ struct recovery_journal_block, __func__,
+ &journal);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ INIT_LIST_HEAD(&journal->free_tail_blocks);
+ INIT_LIST_HEAD(&journal->active_tail_blocks);
+ vdo_waitq_init(&journal->pending_writes);
+
+ journal->thread_id = vdo->thread_config.journal_thread;
+ journal->origin = partition->offset;
+ journal->nonce = nonce;
+ journal->recovery_count = compute_recovery_count_byte(recovery_count);
+ journal->size = journal_size;
+ journal->slab_journal_commit_threshold = (journal_size * 2) / 3;
+ journal->logical_blocks_used = state.logical_blocks_used;
+ journal->block_map_data_blocks = state.block_map_data_blocks;
+ journal->entries_per_block = RECOVERY_JOURNAL_ENTRIES_PER_BLOCK;
+ set_journal_tail(journal, state.journal_start);
+ initialize_journal_state(journal);
+ /* TODO: this will have to change if we make initial resume of a VDO a real resume */
+ vdo_set_admin_state_code(&journal->state, VDO_ADMIN_STATE_SUSPENDED);
+
+ for (i = 0; i < RECOVERY_JOURNAL_RESERVED_BLOCKS; i++) {
+ struct recovery_journal_block *block = &journal->blocks[i];
+
+ result = initialize_recovery_block(vdo, journal, block);
+ if (result != VDO_SUCCESS) {
+ vdo_free_recovery_journal(journal);
+ return result;
+ }
+ }
+
+ result = initialize_lock_counter(journal, vdo);
+ if (result != VDO_SUCCESS) {
+ vdo_free_recovery_journal(journal);
+ return result;
+ }
+
+ result = create_metadata_vio(vdo, VIO_TYPE_RECOVERY_JOURNAL, VIO_PRIORITY_HIGH,
+ journal, NULL, &journal->flush_vio);
+ if (result != VDO_SUCCESS) {
+ vdo_free_recovery_journal(journal);
+ return result;
+ }
+
+ result = vdo_register_read_only_listener(vdo, journal,
+ notify_recovery_journal_of_read_only_mode,
+ journal->thread_id);
+ if (result != VDO_SUCCESS) {
+ vdo_free_recovery_journal(journal);
+ return result;
+ }
+
+ result = vdo_make_default_thread(vdo, journal->thread_id);
+ if (result != VDO_SUCCESS) {
+ vdo_free_recovery_journal(journal);
+ return result;
+ }
+
+ journal->flush_vio->completion.callback_thread_id = journal->thread_id;
+ *journal_ptr = journal;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_free_recovery_journal() - Free a recovery journal.
+ * @journal: The recovery journal to free.
+ */
+void vdo_free_recovery_journal(struct recovery_journal *journal)
+{
+ block_count_t i;
+
+ if (journal == NULL)
+ return;
+
+ vdo_free(vdo_forget(journal->lock_counter.logical_zone_counts));
+ vdo_free(vdo_forget(journal->lock_counter.physical_zone_counts));
+ vdo_free(vdo_forget(journal->lock_counter.journal_counters));
+ vdo_free(vdo_forget(journal->lock_counter.journal_decrement_counts));
+ vdo_free(vdo_forget(journal->lock_counter.logical_counters));
+ vdo_free(vdo_forget(journal->lock_counter.physical_counters));
+ free_vio(vdo_forget(journal->flush_vio));
+
+ /*
+ * FIXME: eventually, the journal should be constructed in a quiescent state which
+ * requires opening before use.
+ */
+ if (!vdo_is_state_quiescent(&journal->state)) {
+ VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
+ "journal being freed has no active tail blocks");
+ } else if (!vdo_is_state_saved(&journal->state) &&
+ !list_empty(&journal->active_tail_blocks)) {
+ vdo_log_warning("journal being freed has uncommitted entries");
+ }
+
+ for (i = 0; i < RECOVERY_JOURNAL_RESERVED_BLOCKS; i++) {
+ struct recovery_journal_block *block = &journal->blocks[i];
+
+ vdo_free(vdo_forget(block->vio.data));
+ free_vio_components(&block->vio);
+ }
+
+ vdo_free(journal);
+}
+
+/**
+ * vdo_initialize_recovery_journal_post_repair() - Initialize the journal after a repair.
+ * @journal: The journal in question.
+ * @recovery_count: The number of completed recoveries.
+ * @tail: The new tail block sequence number.
+ * @logical_blocks_used: The new number of logical blocks used.
+ * @block_map_data_blocks: The new number of block map data blocks.
+ */
+void vdo_initialize_recovery_journal_post_repair(struct recovery_journal *journal,
+ u64 recovery_count,
+ sequence_number_t tail,
+ block_count_t logical_blocks_used,
+ block_count_t block_map_data_blocks)
+{
+ set_journal_tail(journal, tail + 1);
+ journal->recovery_count = compute_recovery_count_byte(recovery_count);
+ initialize_journal_state(journal);
+ journal->logical_blocks_used = logical_blocks_used;
+ journal->block_map_data_blocks = block_map_data_blocks;
+}
+
+/**
+ * vdo_get_journal_block_map_data_blocks_used() - Get the number of block map pages, allocated from
+ * data blocks, currently in use.
+ * @journal: The journal in question.
+ *
+ * Return: The number of block map pages allocated from slabs.
+ */
+block_count_t vdo_get_journal_block_map_data_blocks_used(struct recovery_journal *journal)
+{
+ return journal->block_map_data_blocks;
+}
+
+/**
+ * vdo_get_recovery_journal_thread_id() - Get the ID of a recovery journal's thread.
+ * @journal: The journal to query.
+ *
+ * Return: The ID of the journal's thread.
+ */
+thread_id_t vdo_get_recovery_journal_thread_id(struct recovery_journal *journal)
+{
+ return journal->thread_id;
+}
+
+/**
+ * vdo_open_recovery_journal() - Prepare the journal for new entries.
+ * @journal: The journal in question.
+ * @depot: The slab depot for this VDO.
+ * @block_map: The block map for this VDO.
+ */
+void vdo_open_recovery_journal(struct recovery_journal *journal,
+ struct slab_depot *depot, struct block_map *block_map)
+{
+ journal->depot = depot;
+ journal->block_map = block_map;
+ WRITE_ONCE(journal->state.current_state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+}
+
+/**
+ * vdo_record_recovery_journal() - Record the state of a recovery journal for encoding in the super
+ * block.
+ * @journal: the recovery journal.
+ *
+ * Return: the state of the journal.
+ */
+struct recovery_journal_state_7_0
+vdo_record_recovery_journal(const struct recovery_journal *journal)
+{
+ struct recovery_journal_state_7_0 state = {
+ .logical_blocks_used = journal->logical_blocks_used,
+ .block_map_data_blocks = journal->block_map_data_blocks,
+ };
+
+ if (vdo_is_state_saved(&journal->state)) {
+ /*
+ * If the journal is saved, we should start one past the active block (since the
+ * active block is not guaranteed to be empty).
+ */
+ state.journal_start = journal->tail;
+ } else {
+ /*
+ * When we're merely suspended or have gone read-only, we must record the first
+ * block that might have entries that need to be applied.
+ */
+ state.journal_start = get_recovery_journal_head(journal);
+ }
+
+ return state;
+}
+
+/**
+ * get_block_header() - Get a pointer to the packed journal block header in the block buffer.
+ * @block: The recovery block.
+ *
+ * Return: The block's header.
+ */
+static inline struct packed_journal_header *
+get_block_header(const struct recovery_journal_block *block)
+{
+ return (struct packed_journal_header *) block->vio.data;
+}
+
+/**
+ * set_active_sector() - Set the current sector of the current block and initialize it.
+ * @block: The block to update.
+ * @sector: A pointer to the first byte of the new sector.
+ */
+static void set_active_sector(struct recovery_journal_block *block, void *sector)
+{
+ block->sector = sector;
+ block->sector->check_byte = get_block_header(block)->check_byte;
+ block->sector->recovery_count = block->journal->recovery_count;
+ block->sector->entry_count = 0;
+}
+
+/**
+ * advance_tail() - Advance the tail of the journal.
+ * @journal: The journal whose tail should be advanced.
+ *
+ * Return: true if the tail was advanced.
+ */
+static bool advance_tail(struct recovery_journal *journal)
+{
+ struct recovery_block_header unpacked;
+ struct packed_journal_header *header;
+ struct recovery_journal_block *block;
+
+ block = journal->active_block = pop_free_list(journal);
+ if (block == NULL)
+ return false;
+
+ list_move_tail(&block->list_node, &journal->active_tail_blocks);
+
+ unpacked = (struct recovery_block_header) {
+ .metadata_type = VDO_METADATA_RECOVERY_JOURNAL_2,
+ .block_map_data_blocks = journal->block_map_data_blocks,
+ .logical_blocks_used = journal->logical_blocks_used,
+ .nonce = journal->nonce,
+ .recovery_count = journal->recovery_count,
+ .sequence_number = journal->tail,
+ .check_byte = vdo_compute_recovery_journal_check_byte(journal,
+ journal->tail),
+ };
+
+ header = get_block_header(block);
+ memset(block->vio.data, 0x0, VDO_BLOCK_SIZE);
+ block->sequence_number = journal->tail;
+ block->entry_count = 0;
+ block->uncommitted_entry_count = 0;
+ block->block_number = vdo_get_recovery_journal_block_number(journal,
+ journal->tail);
+
+ vdo_pack_recovery_block_header(&unpacked, header);
+ set_active_sector(block, vdo_get_journal_block_sector(header, 1));
+ set_journal_tail(journal, journal->tail + 1);
+ vdo_advance_block_map_era(journal->block_map, journal->tail);
+ return true;
+}
+
+/**
+ * initialize_lock_count() - Initialize the value of the journal zone's counter for a given lock.
+ * @journal: The recovery journal.
+ *
+ * Context: This must be called from the journal zone.
+ */
+static void initialize_lock_count(struct recovery_journal *journal)
+{
+ u16 *journal_value;
+ block_count_t lock_number = journal->active_block->block_number;
+ atomic_t *decrement_counter = get_decrement_counter(journal, lock_number);
+
+ journal_value = get_counter(journal, lock_number, VDO_ZONE_TYPE_JOURNAL, 0);
+ VDO_ASSERT_LOG_ONLY((*journal_value == atomic_read(decrement_counter)),
+ "count to be initialized not in use");
+ *journal_value = journal->entries_per_block + 1;
+ atomic_set(decrement_counter, 0);
+}
+
+/**
+ * prepare_to_assign_entry() - Prepare the currently active block to receive an entry and check
+ * whether an entry of the given type may be assigned at this time.
+ * @journal: The journal receiving an entry.
+ *
+ * Return: true if there is space in the journal to store an entry of the specified type.
+ */
+static bool prepare_to_assign_entry(struct recovery_journal *journal)
+{
+ if (journal->available_space == 0)
+ return false;
+
+ if (is_block_full(journal->active_block) && !advance_tail(journal))
+ return false;
+
+ if (!is_block_empty(journal->active_block))
+ return true;
+
+ if ((journal->tail - get_recovery_journal_head(journal)) > journal->size) {
+ /* Cannot use this block since the journal is full. */
+ journal->events.disk_full++;
+ return false;
+ }
+
+ /*
+ * Don't allow the new block to be reaped until all of its entries have been committed to
+ * the block map and until the journal block has been fully committed as well. Because the
+ * block map update is done only after any slab journal entries have been made, the
+ * per-entry lock for the block map entry serves to protect those as well.
+ */
+ initialize_lock_count(journal);
+ return true;
+}
+
+static void write_blocks(struct recovery_journal *journal);
+
+/**
+ * schedule_block_write() - Queue a block for writing.
+ * @journal: The journal in question.
+ * @block: The block which is now ready to write.
+ *
+ * The block is expected to be full. If the block is currently writing, this is a noop as the block
+ * will be queued for writing when the write finishes. The block must not currently be queued for
+ * writing.
+ */
+static void schedule_block_write(struct recovery_journal *journal,
+ struct recovery_journal_block *block)
+{
+ if (!block->committing)
+ vdo_waitq_enqueue_waiter(&journal->pending_writes, &block->write_waiter);
+ /*
+ * At the end of adding entries, or discovering this partial block is now full and ready to
+ * rewrite, we will call write_blocks() and write a whole batch.
+ */
+}
+
+/**
+ * release_journal_block_reference() - Release a reference to a journal block.
+ * @block: The journal block from which to release a reference.
+ */
+static void release_journal_block_reference(struct recovery_journal_block *block)
+{
+ vdo_release_recovery_journal_block_reference(block->journal,
+ block->sequence_number,
+ VDO_ZONE_TYPE_JOURNAL, 0);
+}
+
+static void update_usages(struct recovery_journal *journal, struct data_vio *data_vio)
+{
+ if (data_vio->increment_updater.operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) {
+ journal->block_map_data_blocks++;
+ return;
+ }
+
+ if (data_vio->new_mapped.state != VDO_MAPPING_STATE_UNMAPPED)
+ journal->logical_blocks_used++;
+
+ if (data_vio->mapped.state != VDO_MAPPING_STATE_UNMAPPED)
+ journal->logical_blocks_used--;
+}
+
+/**
+ * assign_entry() - Assign an entry waiter to the active block.
+ *
+ * Implements waiter_callback_fn.
+ */
+static void assign_entry(struct vdo_waiter *waiter, void *context)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+ struct recovery_journal_block *block = context;
+ struct recovery_journal *journal = block->journal;
+
+ /* Record the point at which we will make the journal entry. */
+ data_vio->recovery_journal_point = (struct journal_point) {
+ .sequence_number = block->sequence_number,
+ .entry_count = block->entry_count,
+ };
+
+ update_usages(journal, data_vio);
+ journal->available_space--;
+
+ if (!vdo_waitq_has_waiters(&block->entry_waiters))
+ journal->events.blocks.started++;
+
+ vdo_waitq_enqueue_waiter(&block->entry_waiters, &data_vio->waiter);
+ block->entry_count++;
+ block->uncommitted_entry_count++;
+ journal->events.entries.started++;
+
+ if (is_block_full(block)) {
+ /*
+ * The block is full, so we can write it anytime henceforth. If it is already
+ * committing, we'll queue it for writing when it comes back.
+ */
+ schedule_block_write(journal, block);
+ }
+
+ /* Force out slab journal tail blocks when threshold is reached. */
+ check_slab_journal_commit_threshold(journal);
+}
+
+static void assign_entries(struct recovery_journal *journal)
+{
+ if (journal->adding_entries) {
+ /* Protect against re-entrancy. */
+ return;
+ }
+
+ journal->adding_entries = true;
+ while (vdo_waitq_has_waiters(&journal->entry_waiters) &&
+ prepare_to_assign_entry(journal)) {
+ vdo_waitq_notify_next_waiter(&journal->entry_waiters,
+ assign_entry, journal->active_block);
+ }
+
+ /* Now that we've finished with entries, see if we have a batch of blocks to write. */
+ write_blocks(journal);
+ journal->adding_entries = false;
+}
+
+/**
+ * recycle_journal_block() - Prepare an in-memory journal block to be reused now that it has been
+ * fully committed.
+ * @block: The block to be recycled.
+ */
+static void recycle_journal_block(struct recovery_journal_block *block)
+{
+ struct recovery_journal *journal = block->journal;
+ block_count_t i;
+
+ list_move_tail(&block->list_node, &journal->free_tail_blocks);
+
+ /* Release any unused entry locks. */
+ for (i = block->entry_count; i < journal->entries_per_block; i++)
+ release_journal_block_reference(block);
+
+ /*
+ * Release our own lock against reaping now that the block is completely committed, or
+ * we're giving up because we're in read-only mode.
+ */
+ if (block->entry_count > 0)
+ release_journal_block_reference(block);
+
+ if (block == journal->active_block)
+ journal->active_block = NULL;
+}
+
+/**
+ * continue_committed_waiter() - invoked whenever a VIO is to be released from the journal because
+ * its entry was committed to disk.
+ *
+ * Implements waiter_callback_fn.
+ */
+static void continue_committed_waiter(struct vdo_waiter *waiter, void *context)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+ struct recovery_journal *journal = context;
+ int result = (is_read_only(journal) ? VDO_READ_ONLY : VDO_SUCCESS);
+ bool has_decrement;
+
+ VDO_ASSERT_LOG_ONLY(vdo_before_journal_point(&journal->commit_point,
+ &data_vio->recovery_journal_point),
+ "DataVIOs released from recovery journal in order. Recovery journal point is (%llu, %u), but commit waiter point is (%llu, %u)",
+ (unsigned long long) journal->commit_point.sequence_number,
+ journal->commit_point.entry_count,
+ (unsigned long long) data_vio->recovery_journal_point.sequence_number,
+ data_vio->recovery_journal_point.entry_count);
+
+ journal->commit_point = data_vio->recovery_journal_point;
+ data_vio->last_async_operation = VIO_ASYNC_OP_UPDATE_REFERENCE_COUNTS;
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ /*
+ * The increment must be launched first since it must come before the
+ * decrement if they are in the same slab.
+ */
+ has_decrement = (data_vio->decrement_updater.zpbn.pbn != VDO_ZERO_BLOCK);
+ if ((data_vio->increment_updater.zpbn.pbn != VDO_ZERO_BLOCK) || !has_decrement)
+ continue_data_vio(data_vio);
+
+ if (has_decrement)
+ vdo_launch_completion(&data_vio->decrement_completion);
+}
+
+/**
+ * notify_commit_waiters() - Notify any VIOs whose entries have now committed.
+ * @journal: The recovery journal to update.
+ */
+static void notify_commit_waiters(struct recovery_journal *journal)
+{
+ struct recovery_journal_block *block;
+
+ list_for_each_entry(block, &journal->active_tail_blocks, list_node) {
+ if (block->committing)
+ return;
+
+ vdo_waitq_notify_all_waiters(&block->commit_waiters,
+ continue_committed_waiter, journal);
+ if (is_read_only(journal)) {
+ vdo_waitq_notify_all_waiters(&block->entry_waiters,
+ continue_committed_waiter,
+ journal);
+ } else if (is_block_dirty(block) || !is_block_full(block)) {
+ /* Stop at partially-committed or partially-filled blocks. */
+ return;
+ }
+ }
+}
+
+/**
+ * recycle_journal_blocks() - Recycle any journal blocks which have been fully committed.
+ * @journal: The recovery journal to update.
+ */
+static void recycle_journal_blocks(struct recovery_journal *journal)
+{
+ struct recovery_journal_block *block, *tmp;
+
+ list_for_each_entry_safe(block, tmp, &journal->active_tail_blocks, list_node) {
+ if (block->committing) {
+ /* Don't recycle committing blocks. */
+ return;
+ }
+
+ if (!is_read_only(journal) &&
+ (is_block_dirty(block) || !is_block_full(block))) {
+ /*
+ * Don't recycle partially written or partially full blocks, except in
+ * read-only mode.
+ */
+ return;
+ }
+
+ recycle_journal_block(block);
+ }
+}
+
+/**
+ * complete_write() - Handle post-commit processing.
+ * @completion: The completion of the VIO writing this block.
+ *
+ * This is the callback registered by write_block(). If more entries accumulated in the block being
+ * committed while the commit was in progress, another commit will be initiated.
+ */
+static void complete_write(struct vdo_completion *completion)
+{
+ struct recovery_journal_block *block = completion->parent;
+ struct recovery_journal *journal = block->journal;
+ struct recovery_journal_block *last_active_block;
+
+ assert_on_journal_thread(journal, __func__);
+
+ journal->pending_write_count -= 1;
+ journal->events.blocks.committed += 1;
+ journal->events.entries.committed += block->entries_in_commit;
+ block->uncommitted_entry_count -= block->entries_in_commit;
+ block->entries_in_commit = 0;
+ block->committing = false;
+
+ /* If this block is the latest block to be acknowledged, record that fact. */
+ if (block->sequence_number > journal->last_write_acknowledged)
+ journal->last_write_acknowledged = block->sequence_number;
+
+ last_active_block = get_journal_block(&journal->active_tail_blocks);
+ VDO_ASSERT_LOG_ONLY((block->sequence_number >= last_active_block->sequence_number),
+ "completed journal write is still active");
+
+ notify_commit_waiters(journal);
+
+ /*
+ * Is this block now full? Reaping, and adding entries, might have already sent it off for
+ * rewriting; else, queue it for rewrite.
+ */
+ if (is_block_dirty(block) && is_block_full(block))
+ schedule_block_write(journal, block);
+
+ recycle_journal_blocks(journal);
+ write_blocks(journal);
+
+ check_for_drain_complete(journal);
+}
+
+static void handle_write_error(struct vdo_completion *completion)
+{
+ struct recovery_journal_block *block = completion->parent;
+ struct recovery_journal *journal = block->journal;
+
+ vio_record_metadata_io_error(as_vio(completion));
+ vdo_log_error_strerror(completion->result,
+ "cannot write recovery journal block %llu",
+ (unsigned long long) block->sequence_number);
+ enter_journal_read_only_mode(journal, completion->result);
+ complete_write(completion);
+}
+
+static void complete_write_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct recovery_journal_block *block = vio->completion.parent;
+ struct recovery_journal *journal = block->journal;
+
+ continue_vio_after_io(vio, complete_write, journal->thread_id);
+}
+
+/**
+ * add_queued_recovery_entries() - Actually add entries from the queue to the given block.
+ * @block: The journal block.
+ */
+static void add_queued_recovery_entries(struct recovery_journal_block *block)
+{
+ while (vdo_waitq_has_waiters(&block->entry_waiters)) {
+ struct data_vio *data_vio =
+ vdo_waiter_as_data_vio(vdo_waitq_dequeue_waiter(&block->entry_waiters));
+ struct tree_lock *lock = &data_vio->tree_lock;
+ struct packed_recovery_journal_entry *packed_entry;
+ struct recovery_journal_entry new_entry;
+
+ if (block->sector->entry_count == RECOVERY_JOURNAL_ENTRIES_PER_SECTOR)
+ set_active_sector(block,
+ (char *) block->sector + VDO_SECTOR_SIZE);
+
+ /* Compose and encode the entry. */
+ packed_entry = &block->sector->entries[block->sector->entry_count++];
+ new_entry = (struct recovery_journal_entry) {
+ .mapping = {
+ .pbn = data_vio->increment_updater.zpbn.pbn,
+ .state = data_vio->increment_updater.zpbn.state,
+ },
+ .unmapping = {
+ .pbn = data_vio->decrement_updater.zpbn.pbn,
+ .state = data_vio->decrement_updater.zpbn.state,
+ },
+ .operation = data_vio->increment_updater.operation,
+ .slot = lock->tree_slots[lock->height].block_map_slot,
+ };
+ *packed_entry = vdo_pack_recovery_journal_entry(&new_entry);
+ data_vio->recovery_sequence_number = block->sequence_number;
+
+ /* Enqueue the data_vio to wait for its entry to commit. */
+ vdo_waitq_enqueue_waiter(&block->commit_waiters, &data_vio->waiter);
+ }
+}
+
+/**
+ * write_block() - Issue a block for writing.
+ *
+ * Implements waiter_callback_fn.
+ */
+static void write_block(struct vdo_waiter *waiter, void *context __always_unused)
+{
+ struct recovery_journal_block *block =
+ container_of(waiter, struct recovery_journal_block, write_waiter);
+ struct recovery_journal *journal = block->journal;
+ struct packed_journal_header *header = get_block_header(block);
+
+ if (block->committing || !vdo_waitq_has_waiters(&block->entry_waiters) ||
+ is_read_only(journal))
+ return;
+
+ block->entries_in_commit = vdo_waitq_num_waiters(&block->entry_waiters);
+ add_queued_recovery_entries(block);
+
+ journal->pending_write_count += 1;
+ journal->events.blocks.written += 1;
+ journal->events.entries.written += block->entries_in_commit;
+
+ header->block_map_head = __cpu_to_le64(journal->block_map_head);
+ header->slab_journal_head = __cpu_to_le64(journal->slab_journal_head);
+ header->entry_count = __cpu_to_le16(block->entry_count);
+
+ block->committing = true;
+
+ /*
+ * We must issue a flush and a FUA for every commit. The flush is necessary to ensure that
+ * the data being referenced is stable. The FUA is necessary to ensure that the journal
+ * block itself is stable before allowing overwrites of the lbn's previous data.
+ */
+ vdo_submit_metadata_vio(&block->vio, journal->origin + block->block_number,
+ complete_write_endio, handle_write_error,
+ REQ_OP_WRITE | REQ_PRIO | REQ_PREFLUSH | REQ_SYNC | REQ_FUA);
+}
+
+
+/**
+ * write_blocks() - Attempt to commit blocks, according to write policy.
+ * @journal: The recovery journal.
+ */
+static void write_blocks(struct recovery_journal *journal)
+{
+ assert_on_journal_thread(journal, __func__);
+ /*
+ * We call this function after adding entries to the journal and after finishing a block
+ * write. Thus, when this function terminates we must either have no VIOs waiting in the
+ * journal or have some outstanding IO to provide a future wakeup.
+ *
+ * We want to only issue full blocks if there are no pending writes. However, if there are
+ * no outstanding writes and some unwritten entries, we must issue a block, even if it's
+ * the active block and it isn't full.
+ */
+ if (journal->pending_write_count > 0)
+ return;
+
+ /* Write all the full blocks. */
+ vdo_waitq_notify_all_waiters(&journal->pending_writes, write_block, NULL);
+
+ /*
+ * Do we need to write the active block? Only if we have no outstanding writes, even after
+ * issuing all of the full writes.
+ */
+ if ((journal->pending_write_count == 0) && (journal->active_block != NULL))
+ write_block(&journal->active_block->write_waiter, NULL);
+}
+
+/**
+ * vdo_add_recovery_journal_entry() - Add an entry to a recovery journal.
+ * @journal: The journal in which to make an entry.
+ * @data_vio: The data_vio for which to add the entry. The entry will be taken
+ * from the logical and new_mapped fields of the data_vio. The
+ * data_vio's recovery_sequence_number field will be set to the
+ * sequence number of the journal block in which the entry was
+ * made.
+ *
+ * This method is asynchronous. The data_vio will not be called back until the entry is committed
+ * to the on-disk journal.
+ */
+void vdo_add_recovery_journal_entry(struct recovery_journal *journal,
+ struct data_vio *data_vio)
+{
+ assert_on_journal_thread(journal, __func__);
+ if (!vdo_is_state_normal(&journal->state)) {
+ continue_data_vio_with_error(data_vio, VDO_INVALID_ADMIN_STATE);
+ return;
+ }
+
+ if (is_read_only(journal)) {
+ continue_data_vio_with_error(data_vio, VDO_READ_ONLY);
+ return;
+ }
+
+ VDO_ASSERT_LOG_ONLY(data_vio->recovery_sequence_number == 0,
+ "journal lock not held for new entry");
+
+ vdo_advance_journal_point(&journal->append_point, journal->entries_per_block);
+ vdo_waitq_enqueue_waiter(&journal->entry_waiters, &data_vio->waiter);
+ assign_entries(journal);
+}
+
+/**
+ * is_lock_locked() - Check whether a lock is locked for a zone type.
+ * @journal: The recovery journal.
+ * @lock_number: The lock to check.
+ * @zone_type: The type of the zone.
+ *
+ * If the recovery journal has a lock on the lock number, both logical and physical zones are
+ * considered locked.
+ *
+ * Return: true if the specified lock has references (is locked).
+ */
+static bool is_lock_locked(struct recovery_journal *journal, block_count_t lock_number,
+ enum vdo_zone_type zone_type)
+{
+ atomic_t *zone_count;
+ bool locked;
+
+ if (is_journal_zone_locked(journal, lock_number))
+ return true;
+
+ zone_count = get_zone_count_ptr(journal, lock_number, zone_type);
+ locked = (atomic_read(zone_count) != 0);
+ /* Pairs with implicit barrier in vdo_release_recovery_journal_block_reference() */
+ smp_rmb();
+ return locked;
+}
+
+/**
+ * reap_recovery_journal() - Conduct a sweep on a recovery journal to reclaim unreferenced blocks.
+ * @journal: The recovery journal.
+ */
+static void reap_recovery_journal(struct recovery_journal *journal)
+{
+ if (journal->reaping) {
+ /*
+ * We already have an outstanding reap in progress. We need to wait for it to
+ * finish.
+ */
+ return;
+ }
+
+ if (vdo_is_state_quiescent(&journal->state)) {
+ /* We are supposed to not do IO. Don't botch it by reaping. */
+ return;
+ }
+
+ /*
+ * Start reclaiming blocks only when the journal head has no references. Then stop when a
+ * block is referenced.
+ */
+ while ((journal->block_map_reap_head < journal->last_write_acknowledged) &&
+ !is_lock_locked(journal, journal->block_map_head_block_number,
+ VDO_ZONE_TYPE_LOGICAL)) {
+ journal->block_map_reap_head++;
+ if (++journal->block_map_head_block_number == journal->size)
+ journal->block_map_head_block_number = 0;
+ }
+
+ while ((journal->slab_journal_reap_head < journal->last_write_acknowledged) &&
+ !is_lock_locked(journal, journal->slab_journal_head_block_number,
+ VDO_ZONE_TYPE_PHYSICAL)) {
+ journal->slab_journal_reap_head++;
+ if (++journal->slab_journal_head_block_number == journal->size)
+ journal->slab_journal_head_block_number = 0;
+ }
+
+ if ((journal->block_map_reap_head == journal->block_map_head) &&
+ (journal->slab_journal_reap_head == journal->slab_journal_head)) {
+ /* Nothing happened. */
+ return;
+ }
+
+ /*
+ * If the block map head will advance, we must flush any block map page modified by the
+ * entries we are reaping. If the slab journal head will advance, we must flush the slab
+ * summary update covering the slab journal that just released some lock.
+ */
+ journal->reaping = true;
+ vdo_submit_flush_vio(journal->flush_vio, flush_endio, handle_flush_error);
+}
+
+/**
+ * vdo_acquire_recovery_journal_block_reference() - Acquire a reference to a recovery journal block
+ * from somewhere other than the journal itself.
+ * @journal: The recovery journal.
+ * @sequence_number: The journal sequence number of the referenced block.
+ * @zone_type: The type of the zone making the adjustment.
+ * @zone_id: The ID of the zone making the adjustment.
+ */
+void vdo_acquire_recovery_journal_block_reference(struct recovery_journal *journal,
+ sequence_number_t sequence_number,
+ enum vdo_zone_type zone_type,
+ zone_count_t zone_id)
+{
+ block_count_t lock_number;
+ u16 *current_value;
+
+ if (sequence_number == 0)
+ return;
+
+ VDO_ASSERT_LOG_ONLY((zone_type != VDO_ZONE_TYPE_JOURNAL),
+ "invalid lock count increment from journal zone");
+
+ lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number);
+ current_value = get_counter(journal, lock_number, zone_type, zone_id);
+ VDO_ASSERT_LOG_ONLY(*current_value < U16_MAX,
+ "increment of lock counter must not overflow");
+
+ if (*current_value == 0) {
+ /*
+ * This zone is acquiring this lock for the first time. Extra barriers because this
+ * was original developed using an atomic add operation that implicitly had them.
+ */
+ smp_mb__before_atomic();
+ atomic_inc(get_zone_count_ptr(journal, lock_number, zone_type));
+ /* same as before_atomic */
+ smp_mb__after_atomic();
+ }
+
+ *current_value += 1;
+}
+
+/**
+ * vdo_release_journal_entry_lock() - Release a single per-entry reference count for a recovery
+ * journal block.
+ * @journal: The recovery journal.
+ * @sequence_number: The journal sequence number of the referenced block.
+ */
+void vdo_release_journal_entry_lock(struct recovery_journal *journal,
+ sequence_number_t sequence_number)
+{
+ block_count_t lock_number;
+
+ if (sequence_number == 0)
+ return;
+
+ lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number);
+ /*
+ * Extra barriers because this was originally developed using an atomic add operation that
+ * implicitly had them.
+ */
+ smp_mb__before_atomic();
+ atomic_inc(get_decrement_counter(journal, lock_number));
+ /* same as before_atomic */
+ smp_mb__after_atomic();
+}
+
+/**
+ * initiate_drain() - Initiate a drain.
+ *
+ * Implements vdo_admin_initiator_fn.
+ */
+static void initiate_drain(struct admin_state *state)
+{
+ check_for_drain_complete(container_of(state, struct recovery_journal, state));
+}
+
+/**
+ * vdo_drain_recovery_journal() - Drain recovery journal I/O.
+ * @journal: The journal to drain.
+ * @operation: The drain operation (suspend or save).
+ * @parent: The completion to notify once the journal is drained.
+ *
+ * All uncommitted entries will be written out.
+ */
+void vdo_drain_recovery_journal(struct recovery_journal *journal,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent)
+{
+ assert_on_journal_thread(journal, __func__);
+ vdo_start_draining(&journal->state, operation, parent, initiate_drain);
+}
+
+/**
+ * resume_lock_counter() - Re-allow notifications from a suspended lock counter.
+ * @counter: The counter.
+ *
+ * Return: true if the lock counter was suspended.
+ */
+static bool resume_lock_counter(struct lock_counter *counter)
+{
+ int prior_state;
+
+ /*
+ * Extra barriers because this was original developed using a CAS operation that implicitly
+ * had them.
+ */
+ smp_mb__before_atomic();
+ prior_state = atomic_cmpxchg(&counter->state, LOCK_COUNTER_STATE_SUSPENDED,
+ LOCK_COUNTER_STATE_NOT_NOTIFYING);
+ /* same as before_atomic */
+ smp_mb__after_atomic();
+
+ return (prior_state == LOCK_COUNTER_STATE_SUSPENDED);
+}
+
+/**
+ * vdo_resume_recovery_journal() - Resume a recovery journal which has been drained.
+ * @journal: The journal to resume.
+ * @parent: The completion to finish once the journal is resumed.
+ */
+void vdo_resume_recovery_journal(struct recovery_journal *journal,
+ struct vdo_completion *parent)
+{
+ bool saved;
+
+ assert_on_journal_thread(journal, __func__);
+ saved = vdo_is_state_saved(&journal->state);
+ vdo_set_completion_result(parent, vdo_resume_if_quiescent(&journal->state));
+ if (is_read_only(journal)) {
+ vdo_continue_completion(parent, VDO_READ_ONLY);
+ return;
+ }
+
+ if (saved)
+ initialize_journal_state(journal);
+
+ if (resume_lock_counter(&journal->lock_counter)) {
+ /* We might have missed a notification. */
+ reap_recovery_journal(journal);
+ }
+
+ vdo_launch_completion(parent);
+}
+
+/**
+ * vdo_get_recovery_journal_logical_blocks_used() - Get the number of logical blocks in use by the
+ * VDO.
+ * @journal: The journal.
+ *
+ * Return: The number of logical blocks in use by the VDO.
+ */
+block_count_t vdo_get_recovery_journal_logical_blocks_used(const struct recovery_journal *journal)
+{
+ return journal->logical_blocks_used;
+}
+
+/**
+ * vdo_get_recovery_journal_statistics() - Get the current statistics from the recovery journal.
+ * @journal: The recovery journal to query.
+ *
+ * Return: A copy of the current statistics for the journal.
+ */
+struct recovery_journal_statistics
+vdo_get_recovery_journal_statistics(const struct recovery_journal *journal)
+{
+ return journal->events;
+}
+
+/**
+ * dump_recovery_block() - Dump the contents of the recovery block to the log.
+ * @block: The block to dump.
+ */
+static void dump_recovery_block(const struct recovery_journal_block *block)
+{
+ vdo_log_info(" sequence number %llu; entries %u; %s; %zu entry waiters; %zu commit waiters",
+ (unsigned long long) block->sequence_number, block->entry_count,
+ (block->committing ? "committing" : "waiting"),
+ vdo_waitq_num_waiters(&block->entry_waiters),
+ vdo_waitq_num_waiters(&block->commit_waiters));
+}
+
+/**
+ * vdo_dump_recovery_journal_statistics() - Dump some current statistics and other debug info from
+ * the recovery journal.
+ * @journal: The recovery journal to dump.
+ */
+void vdo_dump_recovery_journal_statistics(const struct recovery_journal *journal)
+{
+ const struct recovery_journal_block *block;
+ struct recovery_journal_statistics stats = vdo_get_recovery_journal_statistics(journal);
+
+ vdo_log_info("Recovery Journal");
+ vdo_log_info(" block_map_head=%llu slab_journal_head=%llu last_write_acknowledged=%llu tail=%llu block_map_reap_head=%llu slab_journal_reap_head=%llu disk_full=%llu slab_journal_commits_requested=%llu entry_waiters=%zu",
+ (unsigned long long) journal->block_map_head,
+ (unsigned long long) journal->slab_journal_head,
+ (unsigned long long) journal->last_write_acknowledged,
+ (unsigned long long) journal->tail,
+ (unsigned long long) journal->block_map_reap_head,
+ (unsigned long long) journal->slab_journal_reap_head,
+ (unsigned long long) stats.disk_full,
+ (unsigned long long) stats.slab_journal_commits_requested,
+ vdo_waitq_num_waiters(&journal->entry_waiters));
+ vdo_log_info(" entries: started=%llu written=%llu committed=%llu",
+ (unsigned long long) stats.entries.started,
+ (unsigned long long) stats.entries.written,
+ (unsigned long long) stats.entries.committed);
+ vdo_log_info(" blocks: started=%llu written=%llu committed=%llu",
+ (unsigned long long) stats.blocks.started,
+ (unsigned long long) stats.blocks.written,
+ (unsigned long long) stats.blocks.committed);
+
+ vdo_log_info(" active blocks:");
+ list_for_each_entry(block, &journal->active_tail_blocks, list_node)
+ dump_recovery_block(block);
+}
diff --git a/drivers/md/dm-vdo/recovery-journal.h b/drivers/md/dm-vdo/recovery-journal.h
new file mode 100644
index 000000000000..899071173015
--- /dev/null
+++ b/drivers/md/dm-vdo/recovery-journal.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_RECOVERY_JOURNAL_H
+#define VDO_RECOVERY_JOURNAL_H
+
+#include <linux/list.h>
+
+#include "numeric.h"
+
+#include "admin-state.h"
+#include "constants.h"
+#include "encodings.h"
+#include "flush.h"
+#include "statistics.h"
+#include "types.h"
+#include "wait-queue.h"
+
+/**
+ * DOC: recovery journal.
+ *
+ * The recovery_journal provides a log of all block mapping and reference count changes which have
+ * not yet been stably written to the block map or slab journals. This log helps to reduce the
+ * write amplification of writes by providing amortization of slab journal and block map page
+ * updates.
+ *
+ * The recovery journal has a single dedicated queue and thread for performing all journal updates.
+ * The concurrency guarantees of this single-threaded model allow the code to omit more
+ * fine-grained locking for recovery journal structures.
+ *
+ * The journal consists of a set of on-disk blocks arranged as a circular log with monotonically
+ * increasing sequence numbers. Three sequence numbers serve to define the active extent of the
+ * journal. The 'head' is the oldest active block in the journal. The 'tail' is the end of the
+ * half-open interval containing the active blocks. 'active' is the number of the block actively
+ * receiving entries. In an empty journal, head == active == tail. Once any entries are added, tail
+ * = active + 1, and head may be any value in the interval [tail - size, active].
+ *
+ * The journal also contains a set of in-memory blocks which are used to buffer up entries until
+ * they can be committed. In general the number of in-memory blocks ('tail_buffer_count') will be
+ * less than the on-disk size. Each in-memory block is also a vdo_completion. Each in-memory block
+ * has a vio which is used to commit that block to disk. The vio's data is the on-disk
+ * representation of the journal block. In addition each in-memory block has a buffer which is used
+ * to accumulate entries while a partial commit of the block is in progress. In-memory blocks are
+ * kept on two rings. Free blocks live on the 'free_tail_blocks' ring. When a block becomes active
+ * (see below) it is moved to the 'active_tail_blocks' ring. When a block is fully committed, it is
+ * moved back to the 'free_tail_blocks' ring.
+ *
+ * When entries are added to the journal, they are added to the active in-memory block, as
+ * indicated by the 'active_block' field. If the caller wishes to wait for the entry to be
+ * committed, the requesting VIO will be attached to the in-memory block to which the caller's
+ * entry was added. If the caller does wish to wait, or if the entry filled the active block, an
+ * attempt will be made to commit that block to disk. If there is already another commit in
+ * progress, the attempt will be ignored and then automatically retried when the in-progress commit
+ * completes. If there is no commit in progress, any data_vios waiting on the block are transferred
+ * to the block's vio which is then written, automatically waking all of the waiters when it
+ * completes. When the write completes, any entries which accumulated in the block are copied to
+ * the vio's data buffer.
+ *
+ * Finally, the journal maintains a set of counters, one for each on disk journal block. These
+ * counters are used as locks to prevent premature reaping of journal blocks. Each time a new
+ * sequence number is used, the counter for the corresponding block is incremented. The counter is
+ * subsequently decremented when that block is filled and then committed for the last time. This
+ * prevents blocks from being reaped while they are still being updated. The counter is also
+ * incremented once for each entry added to a block, and decremented once each time the block map
+ * is updated in memory for that request. This prevents blocks from being reaped while their VIOs
+ * are still active. Finally, each in-memory block map page tracks the oldest journal block that
+ * contains entries corresponding to uncommitted updates to that block map page. Each time an
+ * in-memory block map page is updated, it checks if the journal block for the VIO is earlier than
+ * the one it references, in which case it increments the count on the earlier journal block and
+ * decrements the count on the later journal block, maintaining a lock on the oldest journal block
+ * containing entries for that page. When a block map page has been flushed from the cache, the
+ * counter for the journal block it references is decremented. Whenever the counter for the head
+ * block goes to 0, the head is advanced until it comes to a block whose counter is not 0 or until
+ * it reaches the active block. This is the mechanism for reclaiming journal space on disk.
+ *
+ * If there is no in-memory space when a VIO attempts to add an entry, the VIO will be attached to
+ * the 'commit_completion' and will be woken the next time a full block has committed. If there is
+ * no on-disk space when a VIO attempts to add an entry, the VIO will be attached to the
+ * 'reap_completion', and will be woken the next time a journal block is reaped.
+ */
+
+enum vdo_zone_type {
+ VDO_ZONE_TYPE_ADMIN,
+ VDO_ZONE_TYPE_JOURNAL,
+ VDO_ZONE_TYPE_LOGICAL,
+ VDO_ZONE_TYPE_PHYSICAL,
+};
+
+struct lock_counter {
+ /* The completion for notifying the owner of a lock release */
+ struct vdo_completion completion;
+ /* The number of logical zones which may hold locks */
+ zone_count_t logical_zones;
+ /* The number of physical zones which may hold locks */
+ zone_count_t physical_zones;
+ /* The number of locks */
+ block_count_t locks;
+ /* Whether the lock release notification is in flight */
+ atomic_t state;
+ /* The number of logical zones which hold each lock */
+ atomic_t *logical_zone_counts;
+ /* The number of physical zones which hold each lock */
+ atomic_t *physical_zone_counts;
+ /* The per-lock counts for the journal zone */
+ u16 *journal_counters;
+ /* The per-lock decrement counts for the journal zone */
+ atomic_t *journal_decrement_counts;
+ /* The per-zone, per-lock reference counts for logical zones */
+ u16 *logical_counters;
+ /* The per-zone, per-lock reference counts for physical zones */
+ u16 *physical_counters;
+};
+
+struct recovery_journal_block {
+ /* The doubly linked pointers for the free or active lists */
+ struct list_head list_node;
+ /* The waiter for the pending full block list */
+ struct vdo_waiter write_waiter;
+ /* The journal to which this block belongs */
+ struct recovery_journal *journal;
+ /* A pointer to the current sector in the packed block buffer */
+ struct packed_journal_sector *sector;
+ /* The vio for writing this block */
+ struct vio vio;
+ /* The sequence number for this block */
+ sequence_number_t sequence_number;
+ /* The location of this block in the on-disk journal */
+ physical_block_number_t block_number;
+ /* Whether this block is being committed */
+ bool committing;
+ /* The total number of entries in this block */
+ journal_entry_count_t entry_count;
+ /* The total number of uncommitted entries (queued or committing) */
+ journal_entry_count_t uncommitted_entry_count;
+ /* The number of new entries in the current commit */
+ journal_entry_count_t entries_in_commit;
+ /* The queue of vios which will make entries for the next commit */
+ struct vdo_wait_queue entry_waiters;
+ /* The queue of vios waiting for the current commit */
+ struct vdo_wait_queue commit_waiters;
+};
+
+struct recovery_journal {
+ /* The thread ID of the journal zone */
+ thread_id_t thread_id;
+ /* The slab depot which can hold locks on this journal */
+ struct slab_depot *depot;
+ /* The block map which can hold locks on this journal */
+ struct block_map *block_map;
+ /* The queue of vios waiting to make entries */
+ struct vdo_wait_queue entry_waiters;
+ /* The number of free entries in the journal */
+ u64 available_space;
+ /* The number of decrement entries which need to be made */
+ data_vio_count_t pending_decrement_count;
+ /* Whether the journal is adding entries from the increment or decrement waiters queues */
+ bool adding_entries;
+ /* The administrative state of the journal */
+ struct admin_state state;
+ /* Whether a reap is in progress */
+ bool reaping;
+ /* The location of the first journal block */
+ physical_block_number_t origin;
+ /* The oldest active block in the journal on disk for block map rebuild */
+ sequence_number_t block_map_head;
+ /* The oldest active block in the journal on disk for slab journal replay */
+ sequence_number_t slab_journal_head;
+ /* The newest block in the journal on disk to which a write has finished */
+ sequence_number_t last_write_acknowledged;
+ /* The end of the half-open interval of the active journal */
+ sequence_number_t tail;
+ /* The point at which the last entry will have been added */
+ struct journal_point append_point;
+ /* The journal point of the vio most recently released from the journal */
+ struct journal_point commit_point;
+ /* The nonce of the VDO */
+ nonce_t nonce;
+ /* The number of recoveries completed by the VDO */
+ u8 recovery_count;
+ /* The number of entries which fit in a single block */
+ journal_entry_count_t entries_per_block;
+ /* Unused in-memory journal blocks */
+ struct list_head free_tail_blocks;
+ /* In-memory journal blocks with records */
+ struct list_head active_tail_blocks;
+ /* A pointer to the active block (the one we are adding entries to now) */
+ struct recovery_journal_block *active_block;
+ /* Journal blocks that need writing */
+ struct vdo_wait_queue pending_writes;
+ /* The new block map reap head after reaping */
+ sequence_number_t block_map_reap_head;
+ /* The head block number for the block map rebuild range */
+ block_count_t block_map_head_block_number;
+ /* The new slab journal reap head after reaping */
+ sequence_number_t slab_journal_reap_head;
+ /* The head block number for the slab journal replay range */
+ block_count_t slab_journal_head_block_number;
+ /* The data-less vio, usable only for flushing */
+ struct vio *flush_vio;
+ /* The number of blocks in the on-disk journal */
+ block_count_t size;
+ /* The number of logical blocks that are in-use */
+ block_count_t logical_blocks_used;
+ /* The number of block map pages that are allocated */
+ block_count_t block_map_data_blocks;
+ /* The number of journal blocks written but not yet acknowledged */
+ block_count_t pending_write_count;
+ /* The threshold at which slab journal tail blocks will be written out */
+ block_count_t slab_journal_commit_threshold;
+ /* Counters for events in the journal that are reported as statistics */
+ struct recovery_journal_statistics events;
+ /* The locks for each on-disk block */
+ struct lock_counter lock_counter;
+ /* The tail blocks */
+ struct recovery_journal_block blocks[];
+};
+
+/**
+ * vdo_get_recovery_journal_block_number() - Get the physical block number for a given sequence
+ * number.
+ * @journal: The journal.
+ * @sequence: The sequence number of the desired block.
+ *
+ * Return: The block number corresponding to the sequence number.
+ */
+static inline physical_block_number_t __must_check
+vdo_get_recovery_journal_block_number(const struct recovery_journal *journal,
+ sequence_number_t sequence)
+{
+ /*
+ * Since journal size is a power of two, the block number modulus can just be extracted
+ * from the low-order bits of the sequence.
+ */
+ return vdo_compute_recovery_journal_block_number(journal->size, sequence);
+}
+
+/**
+ * vdo_compute_recovery_journal_check_byte() - Compute the check byte for a given sequence number.
+ * @journal: The journal.
+ * @sequence: The sequence number.
+ *
+ * Return: The check byte corresponding to the sequence number.
+ */
+static inline u8 __must_check
+vdo_compute_recovery_journal_check_byte(const struct recovery_journal *journal,
+ sequence_number_t sequence)
+{
+ /* The check byte must change with each trip around the journal. */
+ return (((sequence / journal->size) & 0x7F) | 0x80);
+}
+
+int __must_check vdo_decode_recovery_journal(struct recovery_journal_state_7_0 state,
+ nonce_t nonce, struct vdo *vdo,
+ struct partition *partition,
+ u64 recovery_count,
+ block_count_t journal_size,
+ struct recovery_journal **journal_ptr);
+
+void vdo_free_recovery_journal(struct recovery_journal *journal);
+
+void vdo_initialize_recovery_journal_post_repair(struct recovery_journal *journal,
+ u64 recovery_count,
+ sequence_number_t tail,
+ block_count_t logical_blocks_used,
+ block_count_t block_map_data_blocks);
+
+block_count_t __must_check
+vdo_get_journal_block_map_data_blocks_used(struct recovery_journal *journal);
+
+thread_id_t __must_check vdo_get_recovery_journal_thread_id(struct recovery_journal *journal);
+
+void vdo_open_recovery_journal(struct recovery_journal *journal,
+ struct slab_depot *depot, struct block_map *block_map);
+
+sequence_number_t
+vdo_get_recovery_journal_current_sequence_number(struct recovery_journal *journal);
+
+block_count_t __must_check vdo_get_recovery_journal_length(block_count_t journal_size);
+
+struct recovery_journal_state_7_0 __must_check
+vdo_record_recovery_journal(const struct recovery_journal *journal);
+
+void vdo_add_recovery_journal_entry(struct recovery_journal *journal,
+ struct data_vio *data_vio);
+
+void vdo_acquire_recovery_journal_block_reference(struct recovery_journal *journal,
+ sequence_number_t sequence_number,
+ enum vdo_zone_type zone_type,
+ zone_count_t zone_id);
+
+void vdo_release_recovery_journal_block_reference(struct recovery_journal *journal,
+ sequence_number_t sequence_number,
+ enum vdo_zone_type zone_type,
+ zone_count_t zone_id);
+
+void vdo_release_journal_entry_lock(struct recovery_journal *journal,
+ sequence_number_t sequence_number);
+
+void vdo_drain_recovery_journal(struct recovery_journal *journal,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent);
+
+void vdo_resume_recovery_journal(struct recovery_journal *journal,
+ struct vdo_completion *parent);
+
+block_count_t __must_check
+vdo_get_recovery_journal_logical_blocks_used(const struct recovery_journal *journal);
+
+struct recovery_journal_statistics __must_check
+vdo_get_recovery_journal_statistics(const struct recovery_journal *journal);
+
+void vdo_dump_recovery_journal_statistics(const struct recovery_journal *journal);
+
+#endif /* VDO_RECOVERY_JOURNAL_H */
diff --git a/drivers/md/dm-vdo/repair.c b/drivers/md/dm-vdo/repair.c
new file mode 100644
index 000000000000..defc9359f10e
--- /dev/null
+++ b/drivers/md/dm-vdo/repair.c
@@ -0,0 +1,1756 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "repair.h"
+
+#include <linux/min_heap.h>
+#include <linux/minmax.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "block-map.h"
+#include "completion.h"
+#include "constants.h"
+#include "encodings.h"
+#include "int-map.h"
+#include "io-submitter.h"
+#include "recovery-journal.h"
+#include "slab-depot.h"
+#include "types.h"
+#include "vdo.h"
+#include "wait-queue.h"
+
+/*
+ * An explicitly numbered block mapping. Numbering the mappings allows them to be sorted by logical
+ * block number during repair while still preserving the relative order of journal entries with
+ * the same logical block number.
+ */
+struct numbered_block_mapping {
+ struct block_map_slot block_map_slot;
+ struct block_map_entry block_map_entry;
+ /* A serial number to use during replay */
+ u32 number;
+} __packed;
+
+/*
+ * The absolute position of an entry in the recovery journal, including the sector number and the
+ * entry number within the sector.
+ */
+struct recovery_point {
+ /* Block sequence number */
+ sequence_number_t sequence_number;
+ /* Sector number */
+ u8 sector_count;
+ /* Entry number */
+ journal_entry_count_t entry_count;
+ /* Whether or not the increment portion of the current entry has been applied */
+ bool increment_applied;
+};
+
+struct repair_completion {
+ /* The completion header */
+ struct vdo_completion completion;
+
+ /* A buffer to hold the data read off disk */
+ char *journal_data;
+
+ /* For loading the journal */
+ data_vio_count_t vio_count;
+ data_vio_count_t vios_complete;
+ struct vio *vios;
+
+ /* The number of entries to be applied to the block map */
+ size_t block_map_entry_count;
+ /* The sequence number of the first valid block for block map recovery */
+ sequence_number_t block_map_head;
+ /* The sequence number of the first valid block for slab journal replay */
+ sequence_number_t slab_journal_head;
+ /* The sequence number of the last valid block of the journal (if known) */
+ sequence_number_t tail;
+ /*
+ * The highest sequence number of the journal. During recovery (vs read-only rebuild), not
+ * the same as the tail, since the tail ignores blocks after the first hole.
+ */
+ sequence_number_t highest_tail;
+
+ /* The number of logical blocks currently known to be in use */
+ block_count_t logical_blocks_used;
+ /* The number of block map data blocks known to be allocated */
+ block_count_t block_map_data_blocks;
+
+ /* These fields are for playing the journal into the block map */
+ /* The entry data for the block map recovery */
+ struct numbered_block_mapping *entries;
+ /* The number of entries in the entry array */
+ size_t entry_count;
+ /* number of pending (non-ready) requests*/
+ page_count_t outstanding;
+ /* number of page completions */
+ page_count_t page_count;
+ bool launching;
+ /*
+ * a heap wrapping journal_entries. It re-orders and sorts journal entries in ascending LBN
+ * order, then original journal order. This permits efficient iteration over the journal
+ * entries in order.
+ */
+ struct min_heap replay_heap;
+ /* Fields tracking progress through the journal entries. */
+ struct numbered_block_mapping *current_entry;
+ struct numbered_block_mapping *current_unfetched_entry;
+ /* Current requested page's PBN */
+ physical_block_number_t pbn;
+
+ /* These fields are only used during recovery. */
+ /* A location just beyond the last valid entry of the journal */
+ struct recovery_point tail_recovery_point;
+ /* The location of the next recovery journal entry to apply */
+ struct recovery_point next_recovery_point;
+ /* The journal point to give to the next synthesized decref */
+ struct journal_point next_journal_point;
+ /* The number of entries played into slab journals */
+ size_t entries_added_to_slab_journals;
+
+ /* These fields are only used during read-only rebuild */
+ page_count_t page_to_fetch;
+ /* the number of leaf pages in the block map */
+ page_count_t leaf_pages;
+ /* the last slot of the block map */
+ struct block_map_slot last_slot;
+
+ /*
+ * The page completions used for playing the journal into the block map, and, during
+ * read-only rebuild, for rebuilding the reference counts from the block map.
+ */
+ struct vdo_page_completion page_completions[];
+};
+
+/*
+ * This is a min_heap callback function that orders numbered_block_mappings using the
+ * 'block_map_slot' field as the primary key and the mapping 'number' field as the secondary key.
+ * Using the mapping number preserves the journal order of entries for the same slot, allowing us
+ * to sort by slot while still ensuring we replay all entries with the same slot in the exact order
+ * as they appeared in the journal.
+ */
+static bool mapping_is_less_than(const void *item1, const void *item2)
+{
+ const struct numbered_block_mapping *mapping1 =
+ (const struct numbered_block_mapping *) item1;
+ const struct numbered_block_mapping *mapping2 =
+ (const struct numbered_block_mapping *) item2;
+
+ if (mapping1->block_map_slot.pbn != mapping2->block_map_slot.pbn)
+ return mapping1->block_map_slot.pbn < mapping2->block_map_slot.pbn;
+
+ if (mapping1->block_map_slot.slot != mapping2->block_map_slot.slot)
+ return mapping1->block_map_slot.slot < mapping2->block_map_slot.slot;
+
+ if (mapping1->number != mapping2->number)
+ return mapping1->number < mapping2->number;
+
+ return 0;
+}
+
+static void swap_mappings(void *item1, void *item2)
+{
+ struct numbered_block_mapping *mapping1 = item1;
+ struct numbered_block_mapping *mapping2 = item2;
+
+ swap(*mapping1, *mapping2);
+}
+
+static const struct min_heap_callbacks repair_min_heap = {
+ .elem_size = sizeof(struct numbered_block_mapping),
+ .less = mapping_is_less_than,
+ .swp = swap_mappings,
+};
+
+static struct numbered_block_mapping *sort_next_heap_element(struct repair_completion *repair)
+{
+ struct min_heap *heap = &repair->replay_heap;
+ struct numbered_block_mapping *last;
+
+ if (heap->nr == 0)
+ return NULL;
+
+ /*
+ * Swap the next heap element with the last one on the heap, popping it off the heap,
+ * restore the heap invariant, and return a pointer to the popped element.
+ */
+ last = &repair->entries[--heap->nr];
+ swap_mappings(heap->data, last);
+ min_heapify(heap, 0, &repair_min_heap);
+ return last;
+}
+
+/**
+ * as_repair_completion() - Convert a generic completion to a repair_completion.
+ * @completion: The completion to convert.
+ *
+ * Return: The repair_completion.
+ */
+static inline struct repair_completion * __must_check
+as_repair_completion(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_REPAIR_COMPLETION);
+ return container_of(completion, struct repair_completion, completion);
+}
+
+static void prepare_repair_completion(struct repair_completion *repair,
+ vdo_action_fn callback, enum vdo_zone_type zone_type)
+{
+ struct vdo_completion *completion = &repair->completion;
+ const struct thread_config *thread_config = &completion->vdo->thread_config;
+ thread_id_t thread_id;
+
+ /* All blockmap access is done on single thread, so use logical zone 0. */
+ thread_id = ((zone_type == VDO_ZONE_TYPE_LOGICAL) ?
+ thread_config->logical_threads[0] :
+ thread_config->admin_thread);
+ vdo_reset_completion(completion);
+ vdo_set_completion_callback(completion, callback, thread_id);
+}
+
+static void launch_repair_completion(struct repair_completion *repair,
+ vdo_action_fn callback, enum vdo_zone_type zone_type)
+{
+ prepare_repair_completion(repair, callback, zone_type);
+ vdo_launch_completion(&repair->completion);
+}
+
+static void uninitialize_vios(struct repair_completion *repair)
+{
+ while (repair->vio_count > 0)
+ free_vio_components(&repair->vios[--repair->vio_count]);
+
+ vdo_free(vdo_forget(repair->vios));
+}
+
+static void free_repair_completion(struct repair_completion *repair)
+{
+ if (repair == NULL)
+ return;
+
+ /*
+ * We do this here because this function is the only common bottleneck for all clean up
+ * paths.
+ */
+ repair->completion.vdo->block_map->zones[0].page_cache.rebuilding = false;
+
+ uninitialize_vios(repair);
+ vdo_free(vdo_forget(repair->journal_data));
+ vdo_free(vdo_forget(repair->entries));
+ vdo_free(repair);
+}
+
+static void finish_repair(struct vdo_completion *completion)
+{
+ struct vdo_completion *parent = completion->parent;
+ struct vdo *vdo = completion->vdo;
+ struct repair_completion *repair = as_repair_completion(completion);
+
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ if (vdo->load_state != VDO_REBUILD_FOR_UPGRADE)
+ vdo->states.vdo.complete_recoveries++;
+
+ vdo_initialize_recovery_journal_post_repair(vdo->recovery_journal,
+ vdo->states.vdo.complete_recoveries,
+ repair->highest_tail,
+ repair->logical_blocks_used,
+ repair->block_map_data_blocks);
+ free_repair_completion(vdo_forget(repair));
+
+ if (vdo_state_requires_read_only_rebuild(vdo->load_state)) {
+ vdo_log_info("Read-only rebuild complete");
+ vdo_launch_completion(parent);
+ return;
+ }
+
+ /* FIXME: shouldn't this say either "recovery" or "repair"? */
+ vdo_log_info("Rebuild complete");
+
+ /*
+ * Now that we've freed the repair completion and its vast array of journal entries, we
+ * can allocate refcounts.
+ */
+ vdo_continue_completion(parent, vdo_allocate_reference_counters(vdo->depot));
+}
+
+/**
+ * abort_repair() - Handle a repair error.
+ * @completion: The repair completion.
+ */
+static void abort_repair(struct vdo_completion *completion)
+{
+ struct vdo_completion *parent = completion->parent;
+ int result = completion->result;
+ struct repair_completion *repair = as_repair_completion(completion);
+
+ if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state))
+ vdo_log_info("Read-only rebuild aborted");
+ else
+ vdo_log_warning("Recovery aborted");
+
+ free_repair_completion(vdo_forget(repair));
+ vdo_continue_completion(parent, result);
+}
+
+/**
+ * abort_on_error() - Abort a repair if there is an error.
+ * @result: The result to check.
+ * @repair: The repair completion.
+ *
+ * Return: true if the result was an error.
+ */
+static bool __must_check abort_on_error(int result, struct repair_completion *repair)
+{
+ if (result == VDO_SUCCESS)
+ return false;
+
+ vdo_fail_completion(&repair->completion, result);
+ return true;
+}
+
+/**
+ * drain_slab_depot() - Flush out all dirty refcounts blocks now that they have been rebuilt or
+ * recovered.
+ */
+static void drain_slab_depot(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ struct repair_completion *repair = as_repair_completion(completion);
+ const struct admin_state_code *operation;
+
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ prepare_repair_completion(repair, finish_repair, VDO_ZONE_TYPE_ADMIN);
+ if (vdo_state_requires_read_only_rebuild(vdo->load_state)) {
+ vdo_log_info("Saving rebuilt state");
+ operation = VDO_ADMIN_STATE_REBUILDING;
+ } else {
+ vdo_log_info("Replayed %zu journal entries into slab journals",
+ repair->entries_added_to_slab_journals);
+ operation = VDO_ADMIN_STATE_RECOVERING;
+ }
+
+ vdo_drain_slab_depot(vdo->depot, operation, completion);
+}
+
+/**
+ * flush_block_map_updates() - Flush the block map now that all the reference counts are rebuilt.
+ * @completion: The repair completion.
+ *
+ * This callback is registered in finish_if_done().
+ */
+static void flush_block_map_updates(struct vdo_completion *completion)
+{
+ vdo_assert_on_admin_thread(completion->vdo, __func__);
+
+ vdo_log_info("Flushing block map changes");
+ prepare_repair_completion(as_repair_completion(completion), drain_slab_depot,
+ VDO_ZONE_TYPE_ADMIN);
+ vdo_drain_block_map(completion->vdo->block_map, VDO_ADMIN_STATE_RECOVERING,
+ completion);
+}
+
+static bool fetch_page(struct repair_completion *repair,
+ struct vdo_completion *completion);
+
+/**
+ * handle_page_load_error() - Handle an error loading a page.
+ * @completion: The vdo_page_completion.
+ */
+static void handle_page_load_error(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = completion->parent;
+
+ repair->outstanding--;
+ vdo_set_completion_result(&repair->completion, completion->result);
+ vdo_release_page_completion(completion);
+ fetch_page(repair, completion);
+}
+
+/**
+ * unmap_entry() - Unmap an invalid entry and indicate that its page must be written out.
+ * @page: The page containing the entries
+ * @completion: The page_completion for writing the page
+ * @slot: The slot to unmap
+ */
+static void unmap_entry(struct block_map_page *page, struct vdo_completion *completion,
+ slot_number_t slot)
+{
+ page->entries[slot] = UNMAPPED_BLOCK_MAP_ENTRY;
+ vdo_request_page_write(completion);
+}
+
+/**
+ * remove_out_of_bounds_entries() - Unmap entries which outside the logical space.
+ * @page: The page containing the entries
+ * @completion: The page_completion for writing the page
+ * @start: The first slot to check
+ */
+static void remove_out_of_bounds_entries(struct block_map_page *page,
+ struct vdo_completion *completion,
+ slot_number_t start)
+{
+ slot_number_t slot;
+
+ for (slot = start; slot < VDO_BLOCK_MAP_ENTRIES_PER_PAGE; slot++) {
+ struct data_location mapping = vdo_unpack_block_map_entry(&page->entries[slot]);
+
+ if (vdo_is_mapped_location(&mapping))
+ unmap_entry(page, completion, slot);
+ }
+}
+
+/**
+ * process_slot() - Update the reference counts for a single entry.
+ * @page: The page containing the entries
+ * @completion: The page_completion for writing the page
+ * @slot: The slot to check
+ *
+ * Return: true if the entry was a valid mapping
+ */
+static bool process_slot(struct block_map_page *page, struct vdo_completion *completion,
+ slot_number_t slot)
+{
+ struct slab_depot *depot = completion->vdo->depot;
+ int result;
+ struct data_location mapping = vdo_unpack_block_map_entry(&page->entries[slot]);
+
+ if (!vdo_is_valid_location(&mapping)) {
+ /* This entry is invalid, so remove it from the page. */
+ unmap_entry(page, completion, slot);
+ return false;
+ }
+
+ if (!vdo_is_mapped_location(&mapping))
+ return false;
+
+
+ if (mapping.pbn == VDO_ZERO_BLOCK)
+ return true;
+
+ if (!vdo_is_physical_data_block(depot, mapping.pbn)) {
+ /*
+ * This is a nonsense mapping. Remove it from the map so we're at least consistent
+ * and mark the page dirty.
+ */
+ unmap_entry(page, completion, slot);
+ return false;
+ }
+
+ result = vdo_adjust_reference_count_for_rebuild(depot, mapping.pbn,
+ VDO_JOURNAL_DATA_REMAPPING);
+ if (result == VDO_SUCCESS)
+ return true;
+
+ vdo_log_error_strerror(result,
+ "Could not adjust reference count for PBN %llu, slot %u mapped to PBN %llu",
+ (unsigned long long) vdo_get_block_map_page_pbn(page),
+ slot, (unsigned long long) mapping.pbn);
+ unmap_entry(page, completion, slot);
+ return false;
+}
+
+/**
+ * rebuild_reference_counts_from_page() - Rebuild reference counts from a block map page.
+ * @repair: The repair completion.
+ * @completion: The page completion holding the page.
+ */
+static void rebuild_reference_counts_from_page(struct repair_completion *repair,
+ struct vdo_completion *completion)
+{
+ slot_number_t slot, last_slot;
+ struct block_map_page *page;
+ int result;
+
+ result = vdo_get_cached_page(completion, &page);
+ if (result != VDO_SUCCESS) {
+ vdo_set_completion_result(&repair->completion, result);
+ return;
+ }
+
+ if (!page->header.initialized)
+ return;
+
+ /* Remove any bogus entries which exist beyond the end of the logical space. */
+ if (vdo_get_block_map_page_pbn(page) == repair->last_slot.pbn) {
+ last_slot = repair->last_slot.slot;
+ remove_out_of_bounds_entries(page, completion, last_slot);
+ } else {
+ last_slot = VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+ }
+
+ /* Inform the slab depot of all entries on this page. */
+ for (slot = 0; slot < last_slot; slot++) {
+ if (process_slot(page, completion, slot))
+ repair->logical_blocks_used++;
+ }
+}
+
+/**
+ * page_loaded() - Process a page which has just been loaded.
+ * @completion: The vdo_page_completion for the fetched page.
+ *
+ * This callback is registered by fetch_page().
+ */
+static void page_loaded(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = completion->parent;
+
+ repair->outstanding--;
+ rebuild_reference_counts_from_page(repair, completion);
+ vdo_release_page_completion(completion);
+
+ /* Advance progress to the next page, and fetch the next page we haven't yet requested. */
+ fetch_page(repair, completion);
+}
+
+static physical_block_number_t get_pbn_to_fetch(struct repair_completion *repair,
+ struct block_map *block_map)
+{
+ physical_block_number_t pbn = VDO_ZERO_BLOCK;
+
+ if (repair->completion.result != VDO_SUCCESS)
+ return VDO_ZERO_BLOCK;
+
+ while ((pbn == VDO_ZERO_BLOCK) && (repair->page_to_fetch < repair->leaf_pages))
+ pbn = vdo_find_block_map_page_pbn(block_map, repair->page_to_fetch++);
+
+ if (vdo_is_physical_data_block(repair->completion.vdo->depot, pbn))
+ return pbn;
+
+ vdo_set_completion_result(&repair->completion, VDO_BAD_MAPPING);
+ return VDO_ZERO_BLOCK;
+}
+
+/**
+ * fetch_page() - Fetch a page from the block map.
+ * @repair: The repair_completion.
+ * @completion: The page completion to use.
+ *
+ * Return true if the rebuild is complete
+ */
+static bool fetch_page(struct repair_completion *repair,
+ struct vdo_completion *completion)
+{
+ struct vdo_page_completion *page_completion = (struct vdo_page_completion *) completion;
+ struct block_map *block_map = repair->completion.vdo->block_map;
+ physical_block_number_t pbn = get_pbn_to_fetch(repair, block_map);
+
+ if (pbn != VDO_ZERO_BLOCK) {
+ repair->outstanding++;
+ /*
+ * We must set the requeue flag here to ensure that we don't blow the stack if all
+ * the requested pages are already in the cache or get load errors.
+ */
+ vdo_get_page(page_completion, &block_map->zones[0], pbn, true, repair,
+ page_loaded, handle_page_load_error, true);
+ }
+
+ if (repair->outstanding > 0)
+ return false;
+
+ launch_repair_completion(repair, flush_block_map_updates, VDO_ZONE_TYPE_ADMIN);
+ return true;
+}
+
+/**
+ * rebuild_from_leaves() - Rebuild reference counts from the leaf block map pages.
+ * @completion: The repair completion.
+ *
+ * Rebuilds reference counts from the leaf block map pages now that reference counts have been
+ * rebuilt from the interior tree pages (which have been loaded in the process). This callback is
+ * registered in rebuild_reference_counts().
+ */
+static void rebuild_from_leaves(struct vdo_completion *completion)
+{
+ page_count_t i;
+ struct repair_completion *repair = as_repair_completion(completion);
+ struct block_map *map = completion->vdo->block_map;
+
+ repair->logical_blocks_used = 0;
+
+ /*
+ * The PBN calculation doesn't work until the tree pages have been loaded, so we can't set
+ * this value at the start of repair.
+ */
+ repair->leaf_pages = vdo_compute_block_map_page_count(map->entry_count);
+ repair->last_slot = (struct block_map_slot) {
+ .slot = map->entry_count % VDO_BLOCK_MAP_ENTRIES_PER_PAGE,
+ .pbn = vdo_find_block_map_page_pbn(map, repair->leaf_pages - 1),
+ };
+ if (repair->last_slot.slot == 0)
+ repair->last_slot.slot = VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+
+ for (i = 0; i < repair->page_count; i++) {
+ if (fetch_page(repair, &repair->page_completions[i].completion)) {
+ /*
+ * The rebuild has already moved on, so it isn't safe nor is there a need
+ * to launch any more fetches.
+ */
+ return;
+ }
+ }
+}
+
+/**
+ * process_entry() - Process a single entry from the block map tree.
+ * @pbn: A pbn which holds a block map tree page.
+ * @completion: The parent completion of the traversal.
+ *
+ * Implements vdo_entry_callback_fn.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int process_entry(physical_block_number_t pbn, struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion);
+ struct slab_depot *depot = completion->vdo->depot;
+ int result;
+
+ if ((pbn == VDO_ZERO_BLOCK) || !vdo_is_physical_data_block(depot, pbn)) {
+ return vdo_log_error_strerror(VDO_BAD_CONFIGURATION,
+ "PBN %llu out of range",
+ (unsigned long long) pbn);
+ }
+
+ result = vdo_adjust_reference_count_for_rebuild(depot, pbn,
+ VDO_JOURNAL_BLOCK_MAP_REMAPPING);
+ if (result != VDO_SUCCESS) {
+ return vdo_log_error_strerror(result,
+ "Could not adjust reference count for block map tree PBN %llu",
+ (unsigned long long) pbn);
+ }
+
+ repair->block_map_data_blocks++;
+ return VDO_SUCCESS;
+}
+
+static void rebuild_reference_counts(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion);
+ struct vdo *vdo = completion->vdo;
+ struct vdo_page_cache *cache = &vdo->block_map->zones[0].page_cache;
+
+ /* We must allocate ref_counts before we can rebuild them. */
+ if (abort_on_error(vdo_allocate_reference_counters(vdo->depot), repair))
+ return;
+
+ /*
+ * Completion chaining from page cache hits can lead to stack overflow during the rebuild,
+ * so clear out the cache before this rebuild phase.
+ */
+ if (abort_on_error(vdo_invalidate_page_cache(cache), repair))
+ return;
+
+ prepare_repair_completion(repair, rebuild_from_leaves, VDO_ZONE_TYPE_LOGICAL);
+ vdo_traverse_forest(vdo->block_map, process_entry, completion);
+}
+
+/**
+ * increment_recovery_point() - Move the given recovery point forward by one entry.
+ */
+static void increment_recovery_point(struct recovery_point *point)
+{
+ if (++point->entry_count < RECOVERY_JOURNAL_ENTRIES_PER_SECTOR)
+ return;
+
+ point->entry_count = 0;
+ if (point->sector_count < (VDO_SECTORS_PER_BLOCK - 1)) {
+ point->sector_count++;
+ return;
+ }
+
+ point->sequence_number++;
+ point->sector_count = 1;
+}
+
+/**
+ * advance_points() - Advance the current recovery and journal points.
+ * @repair: The repair_completion whose points are to be advanced.
+ * @entries_per_block: The number of entries in a recovery journal block.
+ */
+static void advance_points(struct repair_completion *repair,
+ journal_entry_count_t entries_per_block)
+{
+ if (!repair->next_recovery_point.increment_applied) {
+ repair->next_recovery_point.increment_applied = true;
+ return;
+ }
+
+ increment_recovery_point(&repair->next_recovery_point);
+ vdo_advance_journal_point(&repair->next_journal_point, entries_per_block);
+ repair->next_recovery_point.increment_applied = false;
+}
+
+/**
+ * before_recovery_point() - Check whether the first point precedes the second point.
+ * @first: The first recovery point.
+ * @second: The second recovery point.
+ *
+ * Return: true if the first point precedes the second point.
+ */
+static bool __must_check before_recovery_point(const struct recovery_point *first,
+ const struct recovery_point *second)
+{
+ if (first->sequence_number < second->sequence_number)
+ return true;
+
+ if (first->sequence_number > second->sequence_number)
+ return false;
+
+ if (first->sector_count < second->sector_count)
+ return true;
+
+ return ((first->sector_count == second->sector_count) &&
+ (first->entry_count < second->entry_count));
+}
+
+static struct packed_journal_sector * __must_check get_sector(struct recovery_journal *journal,
+ char *journal_data,
+ sequence_number_t sequence,
+ u8 sector_number)
+{
+ off_t offset;
+
+ offset = ((vdo_get_recovery_journal_block_number(journal, sequence) * VDO_BLOCK_SIZE) +
+ (VDO_SECTOR_SIZE * sector_number));
+ return (struct packed_journal_sector *) (journal_data + offset);
+}
+
+/**
+ * get_entry() - Unpack the recovery journal entry associated with the given recovery point.
+ * @repair: The repair completion.
+ * @point: The recovery point.
+ *
+ * Return: The unpacked contents of the matching recovery journal entry.
+ */
+static struct recovery_journal_entry get_entry(const struct repair_completion *repair,
+ const struct recovery_point *point)
+{
+ struct packed_journal_sector *sector;
+
+ sector = get_sector(repair->completion.vdo->recovery_journal,
+ repair->journal_data, point->sequence_number,
+ point->sector_count);
+ return vdo_unpack_recovery_journal_entry(&sector->entries[point->entry_count]);
+}
+
+/**
+ * validate_recovery_journal_entry() - Validate a recovery journal entry.
+ * @vdo: The vdo.
+ * @entry: The entry to validate.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int validate_recovery_journal_entry(const struct vdo *vdo,
+ const struct recovery_journal_entry *entry)
+{
+ if ((entry->slot.pbn >= vdo->states.vdo.config.physical_blocks) ||
+ (entry->slot.slot >= VDO_BLOCK_MAP_ENTRIES_PER_PAGE) ||
+ !vdo_is_valid_location(&entry->mapping) ||
+ !vdo_is_valid_location(&entry->unmapping) ||
+ !vdo_is_physical_data_block(vdo->depot, entry->mapping.pbn) ||
+ !vdo_is_physical_data_block(vdo->depot, entry->unmapping.pbn)) {
+ return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
+ "Invalid entry: %s (%llu, %u) from %llu to %llu is not within bounds",
+ vdo_get_journal_operation_name(entry->operation),
+ (unsigned long long) entry->slot.pbn,
+ entry->slot.slot,
+ (unsigned long long) entry->unmapping.pbn,
+ (unsigned long long) entry->mapping.pbn);
+ }
+
+ if ((entry->operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) &&
+ (vdo_is_state_compressed(entry->mapping.state) ||
+ (entry->mapping.pbn == VDO_ZERO_BLOCK) ||
+ (entry->unmapping.state != VDO_MAPPING_STATE_UNMAPPED) ||
+ (entry->unmapping.pbn != VDO_ZERO_BLOCK))) {
+ return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
+ "Invalid entry: %s (%llu, %u) from %llu to %llu is not a valid tree mapping",
+ vdo_get_journal_operation_name(entry->operation),
+ (unsigned long long) entry->slot.pbn,
+ entry->slot.slot,
+ (unsigned long long) entry->unmapping.pbn,
+ (unsigned long long) entry->mapping.pbn);
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * add_slab_journal_entries() - Replay recovery journal entries into the slab journals of the
+ * allocator currently being recovered.
+ * @completion: The allocator completion.
+ *
+ * Waits for slab journal tailblock space when necessary. This method is its own callback.
+ */
+static void add_slab_journal_entries(struct vdo_completion *completion)
+{
+ struct recovery_point *recovery_point;
+ struct repair_completion *repair = completion->parent;
+ struct vdo *vdo = completion->vdo;
+ struct recovery_journal *journal = vdo->recovery_journal;
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+
+ /* Get ready in case we need to enqueue again. */
+ vdo_prepare_completion(completion, add_slab_journal_entries,
+ vdo_notify_slab_journals_are_recovered,
+ completion->callback_thread_id, repair);
+ for (recovery_point = &repair->next_recovery_point;
+ before_recovery_point(recovery_point, &repair->tail_recovery_point);
+ advance_points(repair, journal->entries_per_block)) {
+ int result;
+ physical_block_number_t pbn;
+ struct vdo_slab *slab;
+ struct recovery_journal_entry entry = get_entry(repair, recovery_point);
+ bool increment = !repair->next_recovery_point.increment_applied;
+
+ if (increment) {
+ result = validate_recovery_journal_entry(vdo, &entry);
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(vdo, result);
+ vdo_fail_completion(completion, result);
+ return;
+ }
+
+ pbn = entry.mapping.pbn;
+ } else {
+ pbn = entry.unmapping.pbn;
+ }
+
+ if (pbn == VDO_ZERO_BLOCK)
+ continue;
+
+ slab = vdo_get_slab(vdo->depot, pbn);
+ if (slab->allocator != allocator)
+ continue;
+
+ if (!vdo_attempt_replay_into_slab(slab, pbn, entry.operation, increment,
+ &repair->next_journal_point,
+ completion))
+ return;
+
+ repair->entries_added_to_slab_journals++;
+ }
+
+ vdo_notify_slab_journals_are_recovered(completion);
+}
+
+/**
+ * vdo_replay_into_slab_journals() - Replay recovery journal entries in the slab journals of slabs
+ * owned by a given block_allocator.
+ * @allocator: The allocator whose slab journals are to be recovered.
+ * @context: The slab depot load context supplied by a recovery when it loads the depot.
+ */
+void vdo_replay_into_slab_journals(struct block_allocator *allocator, void *context)
+{
+ struct vdo_completion *completion = &allocator->completion;
+ struct repair_completion *repair = context;
+ struct vdo *vdo = completion->vdo;
+
+ vdo_assert_on_physical_zone_thread(vdo, allocator->zone_number, __func__);
+ if (repair->entry_count == 0) {
+ /* there's nothing to replay */
+ repair->logical_blocks_used = vdo->recovery_journal->logical_blocks_used;
+ repair->block_map_data_blocks = vdo->recovery_journal->block_map_data_blocks;
+ vdo_notify_slab_journals_are_recovered(completion);
+ return;
+ }
+
+ repair->next_recovery_point = (struct recovery_point) {
+ .sequence_number = repair->slab_journal_head,
+ .sector_count = 1,
+ .entry_count = 0,
+ };
+
+ repair->next_journal_point = (struct journal_point) {
+ .sequence_number = repair->slab_journal_head,
+ .entry_count = 0,
+ };
+
+ vdo_log_info("Replaying entries into slab journals for zone %u",
+ allocator->zone_number);
+ completion->parent = repair;
+ add_slab_journal_entries(completion);
+}
+
+static void load_slab_depot(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion);
+ const struct admin_state_code *operation;
+
+ vdo_assert_on_admin_thread(completion->vdo, __func__);
+
+ if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state)) {
+ prepare_repair_completion(repair, rebuild_reference_counts,
+ VDO_ZONE_TYPE_LOGICAL);
+ operation = VDO_ADMIN_STATE_LOADING_FOR_REBUILD;
+ } else {
+ prepare_repair_completion(repair, drain_slab_depot, VDO_ZONE_TYPE_ADMIN);
+ operation = VDO_ADMIN_STATE_LOADING_FOR_RECOVERY;
+ }
+
+ vdo_load_slab_depot(completion->vdo->depot, operation, completion, repair);
+}
+
+static void flush_block_map(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion);
+ const struct admin_state_code *operation;
+
+ vdo_assert_on_admin_thread(completion->vdo, __func__);
+
+ vdo_log_info("Flushing block map changes");
+ prepare_repair_completion(repair, load_slab_depot, VDO_ZONE_TYPE_ADMIN);
+ operation = (vdo_state_requires_read_only_rebuild(completion->vdo->load_state) ?
+ VDO_ADMIN_STATE_REBUILDING :
+ VDO_ADMIN_STATE_RECOVERING);
+ vdo_drain_block_map(completion->vdo->block_map, operation, completion);
+}
+
+static bool finish_if_done(struct repair_completion *repair)
+{
+ /* Pages are still being launched or there is still work to do */
+ if (repair->launching || (repair->outstanding > 0))
+ return false;
+
+ if (repair->completion.result != VDO_SUCCESS) {
+ page_count_t i;
+
+ for (i = 0; i < repair->page_count; i++) {
+ struct vdo_page_completion *page_completion =
+ &repair->page_completions[i];
+
+ if (page_completion->ready)
+ vdo_release_page_completion(&page_completion->completion);
+ }
+
+ vdo_launch_completion(&repair->completion);
+ return true;
+ }
+
+ if (repair->current_entry >= repair->entries)
+ return false;
+
+ launch_repair_completion(repair, flush_block_map, VDO_ZONE_TYPE_ADMIN);
+ return true;
+}
+
+static void abort_block_map_recovery(struct repair_completion *repair, int result)
+{
+ vdo_set_completion_result(&repair->completion, result);
+ finish_if_done(repair);
+}
+
+/**
+ * find_entry_starting_next_page() - Find the first journal entry after a given entry which is not
+ * on the same block map page.
+ * @current_entry: The entry to search from.
+ * @needs_sort: Whether sorting is needed to proceed.
+ *
+ * Return: Pointer to the first later journal entry on a different block map page, or a pointer to
+ * just before the journal entries if no subsequent entry is on a different block map page.
+ */
+static struct numbered_block_mapping *
+find_entry_starting_next_page(struct repair_completion *repair,
+ struct numbered_block_mapping *current_entry, bool needs_sort)
+{
+ size_t current_page;
+
+ /* If current_entry is invalid, return immediately. */
+ if (current_entry < repair->entries)
+ return current_entry;
+
+ current_page = current_entry->block_map_slot.pbn;
+
+ /* Decrement current_entry until it's out of bounds or on a different page. */
+ while ((current_entry >= repair->entries) &&
+ (current_entry->block_map_slot.pbn == current_page)) {
+ if (needs_sort) {
+ struct numbered_block_mapping *just_sorted_entry =
+ sort_next_heap_element(repair);
+ VDO_ASSERT_LOG_ONLY(just_sorted_entry < current_entry,
+ "heap is returning elements in an unexpected order");
+ }
+
+ current_entry--;
+ }
+
+ return current_entry;
+}
+
+/*
+ * Apply a range of journal entries [starting_entry, ending_entry) journal
+ * entries to a block map page.
+ */
+static void apply_journal_entries_to_page(struct block_map_page *page,
+ struct numbered_block_mapping *starting_entry,
+ struct numbered_block_mapping *ending_entry)
+{
+ struct numbered_block_mapping *current_entry = starting_entry;
+
+ while (current_entry != ending_entry) {
+ page->entries[current_entry->block_map_slot.slot] = current_entry->block_map_entry;
+ current_entry--;
+ }
+}
+
+static void recover_ready_pages(struct repair_completion *repair,
+ struct vdo_completion *completion);
+
+static void block_map_page_loaded(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion->parent);
+
+ repair->outstanding--;
+ if (!repair->launching)
+ recover_ready_pages(repair, completion);
+}
+
+static void handle_block_map_page_load_error(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion->parent);
+
+ repair->outstanding--;
+ abort_block_map_recovery(repair, completion->result);
+}
+
+static void fetch_block_map_page(struct repair_completion *repair,
+ struct vdo_completion *completion)
+{
+ physical_block_number_t pbn;
+
+ if (repair->current_unfetched_entry < repair->entries)
+ /* Nothing left to fetch. */
+ return;
+
+ /* Fetch the next page we haven't yet requested. */
+ pbn = repair->current_unfetched_entry->block_map_slot.pbn;
+ repair->current_unfetched_entry =
+ find_entry_starting_next_page(repair, repair->current_unfetched_entry,
+ true);
+ repair->outstanding++;
+ vdo_get_page(((struct vdo_page_completion *) completion),
+ &repair->completion.vdo->block_map->zones[0], pbn, true,
+ &repair->completion, block_map_page_loaded,
+ handle_block_map_page_load_error, false);
+}
+
+static struct vdo_page_completion *get_next_page_completion(struct repair_completion *repair,
+ struct vdo_page_completion *completion)
+{
+ completion++;
+ if (completion == (&repair->page_completions[repair->page_count]))
+ completion = &repair->page_completions[0];
+ return completion;
+}
+
+static void recover_ready_pages(struct repair_completion *repair,
+ struct vdo_completion *completion)
+{
+ struct vdo_page_completion *page_completion = (struct vdo_page_completion *) completion;
+
+ if (finish_if_done(repair))
+ return;
+
+ if (repair->pbn != page_completion->pbn)
+ return;
+
+ while (page_completion->ready) {
+ struct numbered_block_mapping *start_of_next_page;
+ struct block_map_page *page;
+ int result;
+
+ result = vdo_get_cached_page(completion, &page);
+ if (result != VDO_SUCCESS) {
+ abort_block_map_recovery(repair, result);
+ return;
+ }
+
+ start_of_next_page =
+ find_entry_starting_next_page(repair, repair->current_entry,
+ false);
+ apply_journal_entries_to_page(page, repair->current_entry,
+ start_of_next_page);
+ repair->current_entry = start_of_next_page;
+ vdo_request_page_write(completion);
+ vdo_release_page_completion(completion);
+
+ if (finish_if_done(repair))
+ return;
+
+ repair->pbn = repair->current_entry->block_map_slot.pbn;
+ fetch_block_map_page(repair, completion);
+ page_completion = get_next_page_completion(repair, page_completion);
+ completion = &page_completion->completion;
+ }
+}
+
+static void recover_block_map(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion);
+ struct vdo *vdo = completion->vdo;
+ struct numbered_block_mapping *first_sorted_entry;
+ page_count_t i;
+
+ vdo_assert_on_logical_zone_thread(vdo, 0, __func__);
+
+ /* Suppress block map errors. */
+ vdo->block_map->zones[0].page_cache.rebuilding =
+ vdo_state_requires_read_only_rebuild(vdo->load_state);
+
+ if (repair->block_map_entry_count == 0) {
+ vdo_log_info("Replaying 0 recovery entries into block map");
+ vdo_free(vdo_forget(repair->journal_data));
+ launch_repair_completion(repair, load_slab_depot, VDO_ZONE_TYPE_ADMIN);
+ return;
+ }
+
+ /*
+ * Organize the journal entries into a binary heap so we can iterate over them in sorted
+ * order incrementally, avoiding an expensive sort call.
+ */
+ repair->replay_heap = (struct min_heap) {
+ .data = repair->entries,
+ .nr = repair->block_map_entry_count,
+ .size = repair->block_map_entry_count,
+ };
+ min_heapify_all(&repair->replay_heap, &repair_min_heap);
+
+ vdo_log_info("Replaying %zu recovery entries into block map",
+ repair->block_map_entry_count);
+
+ repair->current_entry = &repair->entries[repair->block_map_entry_count - 1];
+ first_sorted_entry = sort_next_heap_element(repair);
+ VDO_ASSERT_LOG_ONLY(first_sorted_entry == repair->current_entry,
+ "heap is returning elements in an unexpected order");
+
+ /* Prevent any page from being processed until all pages have been launched. */
+ repair->launching = true;
+ repair->pbn = repair->current_entry->block_map_slot.pbn;
+ repair->current_unfetched_entry = repair->current_entry;
+ for (i = 0; i < repair->page_count; i++) {
+ if (repair->current_unfetched_entry < repair->entries)
+ break;
+
+ fetch_block_map_page(repair, &repair->page_completions[i].completion);
+ }
+ repair->launching = false;
+
+ /* Process any ready pages. */
+ recover_ready_pages(repair, &repair->page_completions[0].completion);
+}
+
+/**
+ * get_recovery_journal_block_header() - Get the block header for a block at a position in the
+ * journal data and unpack it.
+ * @journal: The recovery journal.
+ * @data: The recovery journal data.
+ * @sequence: The sequence number.
+ *
+ * Return: The unpacked header.
+ */
+static struct recovery_block_header __must_check
+get_recovery_journal_block_header(struct recovery_journal *journal, char *data,
+ sequence_number_t sequence)
+{
+ physical_block_number_t pbn =
+ vdo_get_recovery_journal_block_number(journal, sequence);
+ char *header = &data[pbn * VDO_BLOCK_SIZE];
+
+ return vdo_unpack_recovery_block_header((struct packed_journal_header *) header);
+}
+
+/**
+ * is_valid_recovery_journal_block() - Determine whether the given header describes a valid block
+ * for the given journal.
+ * @journal: The journal to use.
+ * @header: The unpacked block header to check.
+ * @old_ok: Whether an old format header is valid.
+ *
+ * A block is not valid if it is unformatted, or if it is older than the last successful recovery
+ * or reformat.
+ *
+ * Return: True if the header is valid.
+ */
+static bool __must_check is_valid_recovery_journal_block(const struct recovery_journal *journal,
+ const struct recovery_block_header *header,
+ bool old_ok)
+{
+ if ((header->nonce != journal->nonce) ||
+ (header->recovery_count != journal->recovery_count))
+ return false;
+
+ if (header->metadata_type == VDO_METADATA_RECOVERY_JOURNAL_2)
+ return (header->entry_count <= journal->entries_per_block);
+
+ return (old_ok &&
+ (header->metadata_type == VDO_METADATA_RECOVERY_JOURNAL) &&
+ (header->entry_count <= RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK));
+}
+
+/**
+ * is_exact_recovery_journal_block() - Determine whether the given header describes the exact block
+ * indicated.
+ * @journal: The journal to use.
+ * @header: The unpacked block header to check.
+ * @sequence: The expected sequence number.
+ * @type: The expected metadata type.
+ *
+ * Return: True if the block matches.
+ */
+static bool __must_check is_exact_recovery_journal_block(const struct recovery_journal *journal,
+ const struct recovery_block_header *header,
+ sequence_number_t sequence,
+ enum vdo_metadata_type type)
+{
+ return ((header->metadata_type == type) &&
+ (header->sequence_number == sequence) &&
+ (is_valid_recovery_journal_block(journal, header, true)));
+}
+
+/**
+ * find_recovery_journal_head_and_tail() - Find the tail and head of the journal.
+ *
+ * Return: True if there were valid journal blocks.
+ */
+static bool find_recovery_journal_head_and_tail(struct repair_completion *repair)
+{
+ struct recovery_journal *journal = repair->completion.vdo->recovery_journal;
+ bool found_entries = false;
+ physical_block_number_t i;
+
+ /*
+ * Ensure that we don't replay old entries since we know the tail recorded in the super
+ * block must be a lower bound. Not doing so can result in extra data loss by setting the
+ * tail too early.
+ */
+ repair->highest_tail = journal->tail;
+ for (i = 0; i < journal->size; i++) {
+ struct recovery_block_header header =
+ get_recovery_journal_block_header(journal, repair->journal_data, i);
+
+ if (!is_valid_recovery_journal_block(journal, &header, true)) {
+ /* This block is old or incorrectly formatted */
+ continue;
+ }
+
+ if (vdo_get_recovery_journal_block_number(journal, header.sequence_number) != i) {
+ /* This block is in the wrong location */
+ continue;
+ }
+
+ if (header.sequence_number >= repair->highest_tail) {
+ found_entries = true;
+ repair->highest_tail = header.sequence_number;
+ }
+
+ if (!found_entries)
+ continue;
+
+ if (header.block_map_head > repair->block_map_head)
+ repair->block_map_head = header.block_map_head;
+
+ if (header.slab_journal_head > repair->slab_journal_head)
+ repair->slab_journal_head = header.slab_journal_head;
+ }
+
+ return found_entries;
+}
+
+/**
+ * unpack_entry() - Unpack a recovery journal entry in either format.
+ * @vdo: The vdo.
+ * @packed: The entry to unpack.
+ * @format: The expected format of the entry.
+ * @entry: The unpacked entry.
+ *
+ * Return: true if the entry should be applied.3
+ */
+static bool unpack_entry(struct vdo *vdo, char *packed, enum vdo_metadata_type format,
+ struct recovery_journal_entry *entry)
+{
+ if (format == VDO_METADATA_RECOVERY_JOURNAL_2) {
+ struct packed_recovery_journal_entry *packed_entry =
+ (struct packed_recovery_journal_entry *) packed;
+
+ *entry = vdo_unpack_recovery_journal_entry(packed_entry);
+ } else {
+ physical_block_number_t low32, high4;
+
+ struct packed_recovery_journal_entry_1 *packed_entry =
+ (struct packed_recovery_journal_entry_1 *) packed;
+
+ if (packed_entry->operation == VDO_JOURNAL_DATA_INCREMENT)
+ entry->operation = VDO_JOURNAL_DATA_REMAPPING;
+ else if (packed_entry->operation == VDO_JOURNAL_BLOCK_MAP_INCREMENT)
+ entry->operation = VDO_JOURNAL_BLOCK_MAP_REMAPPING;
+ else
+ return false;
+
+ low32 = __le32_to_cpu(packed_entry->pbn_low_word);
+ high4 = packed_entry->pbn_high_nibble;
+ entry->slot = (struct block_map_slot) {
+ .pbn = ((high4 << 32) | low32),
+ .slot = (packed_entry->slot_low | (packed_entry->slot_high << 6)),
+ };
+ entry->mapping = vdo_unpack_block_map_entry(&packed_entry->block_map_entry);
+ entry->unmapping = (struct data_location) {
+ .pbn = VDO_ZERO_BLOCK,
+ .state = VDO_MAPPING_STATE_UNMAPPED,
+ };
+ }
+
+ return (validate_recovery_journal_entry(vdo, entry) == VDO_SUCCESS);
+}
+
+/**
+ * append_sector_entries() - Append an array of recovery journal entries from a journal block
+ * sector to the array of numbered mappings in the repair completion,
+ * numbering each entry in the order they are appended.
+ * @repair: The repair completion.
+ * @entries: The entries in the sector.
+ * @format: The format of the sector.
+ * @entry_count: The number of entries to append.
+ */
+static void append_sector_entries(struct repair_completion *repair, char *entries,
+ enum vdo_metadata_type format,
+ journal_entry_count_t entry_count)
+{
+ journal_entry_count_t i;
+ struct vdo *vdo = repair->completion.vdo;
+ off_t increment = ((format == VDO_METADATA_RECOVERY_JOURNAL_2)
+ ? sizeof(struct packed_recovery_journal_entry)
+ : sizeof(struct packed_recovery_journal_entry_1));
+
+ for (i = 0; i < entry_count; i++, entries += increment) {
+ struct recovery_journal_entry entry;
+
+ if (!unpack_entry(vdo, entries, format, &entry))
+ /* When recovering from read-only mode, ignore damaged entries. */
+ continue;
+
+ repair->entries[repair->block_map_entry_count] =
+ (struct numbered_block_mapping) {
+ .block_map_slot = entry.slot,
+ .block_map_entry = vdo_pack_block_map_entry(entry.mapping.pbn,
+ entry.mapping.state),
+ .number = repair->block_map_entry_count,
+ };
+ repair->block_map_entry_count++;
+ }
+}
+
+static journal_entry_count_t entries_per_sector(enum vdo_metadata_type format,
+ u8 sector_number)
+{
+ if (format == VDO_METADATA_RECOVERY_JOURNAL_2)
+ return RECOVERY_JOURNAL_ENTRIES_PER_SECTOR;
+
+ return ((sector_number == (VDO_SECTORS_PER_BLOCK - 1))
+ ? RECOVERY_JOURNAL_1_ENTRIES_IN_LAST_SECTOR
+ : RECOVERY_JOURNAL_1_ENTRIES_PER_SECTOR);
+}
+
+static void extract_entries_from_block(struct repair_completion *repair,
+ struct recovery_journal *journal,
+ sequence_number_t sequence,
+ enum vdo_metadata_type format,
+ journal_entry_count_t entries)
+{
+ sector_count_t i;
+ struct recovery_block_header header =
+ get_recovery_journal_block_header(journal, repair->journal_data,
+ sequence);
+
+ if (!is_exact_recovery_journal_block(journal, &header, sequence, format)) {
+ /* This block is invalid, so skip it. */
+ return;
+ }
+
+ entries = min(entries, header.entry_count);
+ for (i = 1; i < VDO_SECTORS_PER_BLOCK; i++) {
+ struct packed_journal_sector *sector =
+ get_sector(journal, repair->journal_data, sequence, i);
+ journal_entry_count_t sector_entries =
+ min(entries, entries_per_sector(format, i));
+
+ if (vdo_is_valid_recovery_journal_sector(&header, sector, i)) {
+ /* Only extract as many as the block header calls for. */
+ append_sector_entries(repair, (char *) sector->entries, format,
+ min_t(journal_entry_count_t,
+ sector->entry_count,
+ sector_entries));
+ }
+
+ /*
+ * Even if the sector wasn't full, count it as full when counting up to the
+ * entry count the block header claims.
+ */
+ entries -= sector_entries;
+ }
+}
+
+static int parse_journal_for_rebuild(struct repair_completion *repair)
+{
+ int result;
+ sequence_number_t i;
+ block_count_t count;
+ enum vdo_metadata_type format;
+ struct vdo *vdo = repair->completion.vdo;
+ struct recovery_journal *journal = vdo->recovery_journal;
+ journal_entry_count_t entries_per_block = journal->entries_per_block;
+
+ format = get_recovery_journal_block_header(journal, repair->journal_data,
+ repair->highest_tail).metadata_type;
+ if (format == VDO_METADATA_RECOVERY_JOURNAL)
+ entries_per_block = RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK;
+
+ /*
+ * Allocate an array of numbered_block_mapping structures large enough to transcribe every
+ * packed_recovery_journal_entry from every valid journal block.
+ */
+ count = ((repair->highest_tail - repair->block_map_head + 1) * entries_per_block);
+ result = vdo_allocate(count, struct numbered_block_mapping, __func__,
+ &repair->entries);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (i = repair->block_map_head; i <= repair->highest_tail; i++)
+ extract_entries_from_block(repair, journal, i, format, entries_per_block);
+
+ return VDO_SUCCESS;
+}
+
+static int validate_heads(struct repair_completion *repair)
+{
+ /* Both reap heads must be behind the tail. */
+ if ((repair->block_map_head <= repair->tail) &&
+ (repair->slab_journal_head <= repair->tail))
+ return VDO_SUCCESS;
+
+
+ return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
+ "Journal tail too early. block map head: %llu, slab journal head: %llu, tail: %llu",
+ (unsigned long long) repair->block_map_head,
+ (unsigned long long) repair->slab_journal_head,
+ (unsigned long long) repair->tail);
+}
+
+/**
+ * extract_new_mappings() - Find all valid new mappings to be applied to the block map.
+ *
+ * The mappings are extracted from the journal and stored in a sortable array so that all of the
+ * mappings to be applied to a given block map page can be done in a single page fetch.
+ */
+static int extract_new_mappings(struct repair_completion *repair)
+{
+ int result;
+ struct vdo *vdo = repair->completion.vdo;
+ struct recovery_point recovery_point = {
+ .sequence_number = repair->block_map_head,
+ .sector_count = 1,
+ .entry_count = 0,
+ };
+
+ /*
+ * Allocate an array of numbered_block_mapping structs just large enough to transcribe
+ * every packed_recovery_journal_entry from every valid journal block.
+ */
+ result = vdo_allocate(repair->entry_count, struct numbered_block_mapping,
+ __func__, &repair->entries);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (; before_recovery_point(&recovery_point, &repair->tail_recovery_point);
+ increment_recovery_point(&recovery_point)) {
+ struct recovery_journal_entry entry = get_entry(repair, &recovery_point);
+
+ result = validate_recovery_journal_entry(vdo, &entry);
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(vdo, result);
+ return result;
+ }
+
+ repair->entries[repair->block_map_entry_count] =
+ (struct numbered_block_mapping) {
+ .block_map_slot = entry.slot,
+ .block_map_entry = vdo_pack_block_map_entry(entry.mapping.pbn,
+ entry.mapping.state),
+ .number = repair->block_map_entry_count,
+ };
+ repair->block_map_entry_count++;
+ }
+
+ result = VDO_ASSERT((repair->block_map_entry_count <= repair->entry_count),
+ "approximate entry count is an upper bound");
+ if (result != VDO_SUCCESS)
+ vdo_enter_read_only_mode(vdo, result);
+
+ return result;
+}
+
+/**
+ * compute_usages() - Compute the lbns in use and block map data blocks counts from the tail of
+ * the journal.
+ */
+static noinline int compute_usages(struct repair_completion *repair)
+{
+ /*
+ * This function is declared noinline to avoid a spurious valgrind error regarding the
+ * following structure being uninitialized.
+ */
+ struct recovery_point recovery_point = {
+ .sequence_number = repair->tail,
+ .sector_count = 1,
+ .entry_count = 0,
+ };
+
+ struct vdo *vdo = repair->completion.vdo;
+ struct recovery_journal *journal = vdo->recovery_journal;
+ struct recovery_block_header header =
+ get_recovery_journal_block_header(journal, repair->journal_data,
+ repair->tail);
+
+ repair->logical_blocks_used = header.logical_blocks_used;
+ repair->block_map_data_blocks = header.block_map_data_blocks;
+
+ for (; before_recovery_point(&recovery_point, &repair->tail_recovery_point);
+ increment_recovery_point(&recovery_point)) {
+ struct recovery_journal_entry entry = get_entry(repair, &recovery_point);
+ int result;
+
+ result = validate_recovery_journal_entry(vdo, &entry);
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(vdo, result);
+ return result;
+ }
+
+ if (entry.operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) {
+ repair->block_map_data_blocks++;
+ continue;
+ }
+
+ if (vdo_is_mapped_location(&entry.mapping))
+ repair->logical_blocks_used++;
+
+ if (vdo_is_mapped_location(&entry.unmapping))
+ repair->logical_blocks_used--;
+ }
+
+ return VDO_SUCCESS;
+}
+
+static int parse_journal_for_recovery(struct repair_completion *repair)
+{
+ int result;
+ sequence_number_t i, head;
+ bool found_entries = false;
+ struct recovery_journal *journal = repair->completion.vdo->recovery_journal;
+
+ head = min(repair->block_map_head, repair->slab_journal_head);
+ for (i = head; i <= repair->highest_tail; i++) {
+ struct recovery_block_header header;
+ journal_entry_count_t block_entries;
+ u8 j;
+
+ repair->tail = i;
+ repair->tail_recovery_point = (struct recovery_point) {
+ .sequence_number = i,
+ .sector_count = 0,
+ .entry_count = 0,
+ };
+
+ header = get_recovery_journal_block_header(journal, repair->journal_data, i);
+ if (header.metadata_type == VDO_METADATA_RECOVERY_JOURNAL) {
+ /* This is an old format block, so we need to upgrade */
+ vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
+ "Recovery journal is in the old format, a read-only rebuild is required.");
+ vdo_enter_read_only_mode(repair->completion.vdo,
+ VDO_UNSUPPORTED_VERSION);
+ return VDO_UNSUPPORTED_VERSION;
+ }
+
+ if (!is_exact_recovery_journal_block(journal, &header, i,
+ VDO_METADATA_RECOVERY_JOURNAL_2)) {
+ /* A bad block header was found so this must be the end of the journal. */
+ break;
+ }
+
+ block_entries = header.entry_count;
+
+ /* Examine each sector in turn to determine the last valid sector. */
+ for (j = 1; j < VDO_SECTORS_PER_BLOCK; j++) {
+ struct packed_journal_sector *sector =
+ get_sector(journal, repair->journal_data, i, j);
+ journal_entry_count_t sector_entries =
+ min_t(journal_entry_count_t, sector->entry_count,
+ block_entries);
+
+ /* A bad sector means that this block was torn. */
+ if (!vdo_is_valid_recovery_journal_sector(&header, sector, j))
+ break;
+
+ if (sector_entries > 0) {
+ found_entries = true;
+ repair->tail_recovery_point.sector_count++;
+ repair->tail_recovery_point.entry_count = sector_entries;
+ block_entries -= sector_entries;
+ repair->entry_count += sector_entries;
+ }
+
+ /* If this sector is short, the later sectors can't matter. */
+ if ((sector_entries < RECOVERY_JOURNAL_ENTRIES_PER_SECTOR) ||
+ (block_entries == 0))
+ break;
+ }
+
+ /* If this block was not filled, or if it tore, no later block can matter. */
+ if ((header.entry_count != journal->entries_per_block) || (block_entries > 0))
+ break;
+ }
+
+ if (!found_entries)
+ return validate_heads(repair);
+
+ /* Set the tail to the last valid tail block, if there is one. */
+ if (repair->tail_recovery_point.sector_count == 0)
+ repair->tail--;
+
+ result = validate_heads(repair);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_log_info("Highest-numbered recovery journal block has sequence number %llu, and the highest-numbered usable block is %llu",
+ (unsigned long long) repair->highest_tail,
+ (unsigned long long) repair->tail);
+
+ result = extract_new_mappings(repair);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return compute_usages(repair);
+}
+
+static int parse_journal(struct repair_completion *repair)
+{
+ if (!find_recovery_journal_head_and_tail(repair))
+ return VDO_SUCCESS;
+
+ return (vdo_state_requires_read_only_rebuild(repair->completion.vdo->load_state) ?
+ parse_journal_for_rebuild(repair) :
+ parse_journal_for_recovery(repair));
+}
+
+static void finish_journal_load(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = completion->parent;
+
+ if (++repair->vios_complete != repair->vio_count)
+ return;
+
+ vdo_log_info("Finished reading recovery journal");
+ uninitialize_vios(repair);
+ prepare_repair_completion(repair, recover_block_map, VDO_ZONE_TYPE_LOGICAL);
+ vdo_continue_completion(&repair->completion, parse_journal(repair));
+}
+
+static void handle_journal_load_error(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = completion->parent;
+
+ /* Preserve the error */
+ vdo_set_completion_result(&repair->completion, completion->result);
+ vio_record_metadata_io_error(as_vio(completion));
+ completion->callback(completion);
+}
+
+static void read_journal_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct vdo *vdo = vio->completion.vdo;
+
+ continue_vio_after_io(vio, finish_journal_load, vdo->thread_config.admin_thread);
+}
+
+/**
+ * vdo_repair() - Load the recovery journal and then recover or rebuild a vdo.
+ * @parent: The completion to notify when the operation is complete
+ */
+void vdo_repair(struct vdo_completion *parent)
+{
+ int result;
+ char *ptr;
+ struct repair_completion *repair;
+ struct vdo *vdo = parent->vdo;
+ struct recovery_journal *journal = vdo->recovery_journal;
+ physical_block_number_t pbn = journal->origin;
+ block_count_t remaining = journal->size;
+ block_count_t vio_count = DIV_ROUND_UP(remaining, MAX_BLOCKS_PER_VIO);
+ page_count_t page_count = min_t(page_count_t,
+ vdo->device_config->cache_size >> 1,
+ MAXIMUM_SIMULTANEOUS_VDO_BLOCK_MAP_RESTORATION_READS);
+
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ if (vdo->load_state == VDO_FORCE_REBUILD) {
+ vdo_log_warning("Rebuilding reference counts to clear read-only mode");
+ vdo->states.vdo.read_only_recoveries++;
+ } else if (vdo->load_state == VDO_REBUILD_FOR_UPGRADE) {
+ vdo_log_warning("Rebuilding reference counts for upgrade");
+ } else {
+ vdo_log_warning("Device was dirty, rebuilding reference counts");
+ }
+
+ result = vdo_allocate_extended(struct repair_completion, page_count,
+ struct vdo_page_completion, __func__,
+ &repair);
+ if (result != VDO_SUCCESS) {
+ vdo_fail_completion(parent, result);
+ return;
+ }
+
+ vdo_initialize_completion(&repair->completion, vdo, VDO_REPAIR_COMPLETION);
+ repair->completion.error_handler = abort_repair;
+ repair->completion.parent = parent;
+ prepare_repair_completion(repair, finish_repair, VDO_ZONE_TYPE_ADMIN);
+ repair->page_count = page_count;
+
+ result = vdo_allocate(remaining * VDO_BLOCK_SIZE, char, __func__,
+ &repair->journal_data);
+ if (abort_on_error(result, repair))
+ return;
+
+ result = vdo_allocate(vio_count, struct vio, __func__, &repair->vios);
+ if (abort_on_error(result, repair))
+ return;
+
+ ptr = repair->journal_data;
+ for (repair->vio_count = 0; repair->vio_count < vio_count; repair->vio_count++) {
+ block_count_t blocks = min_t(block_count_t, remaining,
+ MAX_BLOCKS_PER_VIO);
+
+ result = allocate_vio_components(vdo, VIO_TYPE_RECOVERY_JOURNAL,
+ VIO_PRIORITY_METADATA,
+ repair, blocks, ptr,
+ &repair->vios[repair->vio_count]);
+ if (abort_on_error(result, repair))
+ return;
+
+ ptr += (blocks * VDO_BLOCK_SIZE);
+ remaining -= blocks;
+ }
+
+ for (vio_count = 0; vio_count < repair->vio_count;
+ vio_count++, pbn += MAX_BLOCKS_PER_VIO) {
+ vdo_submit_metadata_vio(&repair->vios[vio_count], pbn, read_journal_endio,
+ handle_journal_load_error, REQ_OP_READ);
+ }
+}
diff --git a/drivers/md/dm-vdo/repair.h b/drivers/md/dm-vdo/repair.h
new file mode 100644
index 000000000000..ff255cf41486
--- /dev/null
+++ b/drivers/md/dm-vdo/repair.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_REPAIR_H
+#define VDO_REPAIR_H
+
+#include "types.h"
+
+void vdo_replay_into_slab_journals(struct block_allocator *allocator, void *context);
+void vdo_repair(struct vdo_completion *parent);
+
+#endif /* VDO_REPAIR_H */
diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c
new file mode 100644
index 000000000000..46e4721e5b4f
--- /dev/null
+++ b/drivers/md/dm-vdo/slab-depot.c
@@ -0,0 +1,5101 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "slab-depot.h"
+
+#include <linux/atomic.h>
+#include <linux/bio.h>
+#include <linux/err.h>
+#include <linux/log2.h>
+#include <linux/min_heap.h>
+#include <linux/minmax.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+#include "action-manager.h"
+#include "admin-state.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "encodings.h"
+#include "io-submitter.h"
+#include "physical-zone.h"
+#include "priority-table.h"
+#include "recovery-journal.h"
+#include "repair.h"
+#include "status-codes.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+static const u64 BYTES_PER_WORD = sizeof(u64);
+static const bool NORMAL_OPERATION = true;
+
+/**
+ * get_lock() - Get the lock object for a slab journal block by sequence number.
+ * @journal: vdo_slab journal to retrieve from.
+ * @sequence_number: Sequence number of the block.
+ *
+ * Return: The lock object for the given sequence number.
+ */
+static inline struct journal_lock * __must_check get_lock(struct slab_journal *journal,
+ sequence_number_t sequence_number)
+{
+ return &journal->locks[sequence_number % journal->size];
+}
+
+static bool is_slab_open(struct vdo_slab *slab)
+{
+ return (!vdo_is_state_quiescing(&slab->state) &&
+ !vdo_is_state_quiescent(&slab->state));
+}
+
+/**
+ * must_make_entries_to_flush() - Check whether there are entry waiters which should delay a flush.
+ * @journal: The journal to check.
+ *
+ * Return: true if there are no entry waiters, or if the slab is unrecovered.
+ */
+static inline bool __must_check must_make_entries_to_flush(struct slab_journal *journal)
+{
+ return ((journal->slab->status != VDO_SLAB_REBUILDING) &&
+ vdo_waitq_has_waiters(&journal->entry_waiters));
+}
+
+/**
+ * is_reaping() - Check whether a reap is currently in progress.
+ * @journal: The journal which may be reaping.
+ *
+ * Return: true if the journal is reaping.
+ */
+static inline bool __must_check is_reaping(struct slab_journal *journal)
+{
+ return (journal->head != journal->unreapable);
+}
+
+/**
+ * initialize_tail_block() - Initialize tail block as a new block.
+ * @journal: The journal whose tail block is being initialized.
+ */
+static void initialize_tail_block(struct slab_journal *journal)
+{
+ struct slab_journal_block_header *header = &journal->tail_header;
+
+ header->sequence_number = journal->tail;
+ header->entry_count = 0;
+ header->has_block_map_increments = false;
+}
+
+/**
+ * initialize_journal_state() - Set all journal fields appropriately to start journaling.
+ * @journal: The journal to be reset, based on its tail sequence number.
+ */
+static void initialize_journal_state(struct slab_journal *journal)
+{
+ journal->unreapable = journal->head;
+ journal->reap_lock = get_lock(journal, journal->unreapable);
+ journal->next_commit = journal->tail;
+ journal->summarized = journal->last_summarized = journal->tail;
+ initialize_tail_block(journal);
+}
+
+/**
+ * block_is_full() - Check whether a journal block is full.
+ * @journal: The slab journal for the block.
+ *
+ * Return: true if the tail block is full.
+ */
+static bool __must_check block_is_full(struct slab_journal *journal)
+{
+ journal_entry_count_t count = journal->tail_header.entry_count;
+
+ return (journal->tail_header.has_block_map_increments ?
+ (journal->full_entries_per_block == count) :
+ (journal->entries_per_block == count));
+}
+
+static void add_entries(struct slab_journal *journal);
+static void update_tail_block_location(struct slab_journal *journal);
+static void release_journal_locks(struct vdo_waiter *waiter, void *context);
+
+/**
+ * is_slab_journal_blank() - Check whether a slab's journal is blank.
+ *
+ * A slab journal is blank if it has never had any entries recorded in it.
+ *
+ * Return: true if the slab's journal has never been modified.
+ */
+static bool is_slab_journal_blank(const struct vdo_slab *slab)
+{
+ return ((slab->journal.tail == 1) &&
+ (slab->journal.tail_header.entry_count == 0));
+}
+
+/**
+ * mark_slab_journal_dirty() - Put a slab journal on the dirty ring of its allocator in the correct
+ * order.
+ * @journal: The journal to be marked dirty.
+ * @lock: The recovery journal lock held by the slab journal.
+ */
+static void mark_slab_journal_dirty(struct slab_journal *journal, sequence_number_t lock)
+{
+ struct slab_journal *dirty_journal;
+ struct list_head *dirty_list = &journal->slab->allocator->dirty_slab_journals;
+
+ VDO_ASSERT_LOG_ONLY(journal->recovery_lock == 0, "slab journal was clean");
+
+ journal->recovery_lock = lock;
+ list_for_each_entry_reverse(dirty_journal, dirty_list, dirty_entry) {
+ if (dirty_journal->recovery_lock <= journal->recovery_lock)
+ break;
+ }
+
+ list_move_tail(&journal->dirty_entry, dirty_journal->dirty_entry.next);
+}
+
+static void mark_slab_journal_clean(struct slab_journal *journal)
+{
+ journal->recovery_lock = 0;
+ list_del_init(&journal->dirty_entry);
+}
+
+static void check_if_slab_drained(struct vdo_slab *slab)
+{
+ bool read_only;
+ struct slab_journal *journal = &slab->journal;
+ const struct admin_state_code *code;
+
+ if (!vdo_is_state_draining(&slab->state) ||
+ must_make_entries_to_flush(journal) ||
+ is_reaping(journal) ||
+ journal->waiting_to_commit ||
+ !list_empty(&journal->uncommitted_blocks) ||
+ journal->updating_slab_summary ||
+ (slab->active_count > 0))
+ return;
+
+ /* When not suspending or recovering, the slab must be clean. */
+ code = vdo_get_admin_state_code(&slab->state);
+ read_only = vdo_is_read_only(slab->allocator->depot->vdo);
+ if (!read_only &&
+ vdo_waitq_has_waiters(&slab->dirty_blocks) &&
+ (code != VDO_ADMIN_STATE_SUSPENDING) &&
+ (code != VDO_ADMIN_STATE_RECOVERING))
+ return;
+
+ vdo_finish_draining_with_result(&slab->state,
+ (read_only ? VDO_READ_ONLY : VDO_SUCCESS));
+}
+
+/* FULLNESS HINT COMPUTATION */
+
+/**
+ * compute_fullness_hint() - Translate a slab's free block count into a 'fullness hint' that can be
+ * stored in a slab_summary_entry's 7 bits that are dedicated to its free
+ * count.
+ * @depot: The depot whose summary being updated.
+ * @free_blocks: The number of free blocks.
+ *
+ * Note: the number of free blocks must be strictly less than 2^23 blocks, even though
+ * theoretically slabs could contain precisely 2^23 blocks; there is an assumption that at least
+ * one block is used by metadata. This assumption is necessary; otherwise, the fullness hint might
+ * overflow. The fullness hint formula is roughly (fullness >> 16) & 0x7f, but (2^23 >> 16) & 0x7f
+ * is 0, which would make it impossible to distinguish completely full from completely empty.
+ *
+ * Return: A fullness hint, which can be stored in 7 bits.
+ */
+static u8 __must_check compute_fullness_hint(struct slab_depot *depot,
+ block_count_t free_blocks)
+{
+ block_count_t hint;
+
+ VDO_ASSERT_LOG_ONLY((free_blocks < (1 << 23)), "free blocks must be less than 2^23");
+
+ if (free_blocks == 0)
+ return 0;
+
+ hint = free_blocks >> depot->hint_shift;
+ return ((hint == 0) ? 1 : hint);
+}
+
+/**
+ * check_summary_drain_complete() - Check whether an allocators summary has finished draining.
+ */
+static void check_summary_drain_complete(struct block_allocator *allocator)
+{
+ if (!vdo_is_state_draining(&allocator->summary_state) ||
+ (allocator->summary_write_count > 0))
+ return;
+
+ vdo_finish_operation(&allocator->summary_state,
+ (vdo_is_read_only(allocator->depot->vdo) ?
+ VDO_READ_ONLY : VDO_SUCCESS));
+}
+
+/**
+ * notify_summary_waiters() - Wake all the waiters in a given queue.
+ * @allocator: The block allocator summary which owns the queue.
+ * @queue: The queue to notify.
+ */
+static void notify_summary_waiters(struct block_allocator *allocator,
+ struct vdo_wait_queue *queue)
+{
+ int result = (vdo_is_read_only(allocator->depot->vdo) ?
+ VDO_READ_ONLY : VDO_SUCCESS);
+
+ vdo_waitq_notify_all_waiters(queue, NULL, &result);
+}
+
+static void launch_write(struct slab_summary_block *summary_block);
+
+/**
+ * finish_updating_slab_summary_block() - Finish processing a block which attempted to write,
+ * whether or not the attempt succeeded.
+ * @block: The block.
+ */
+static void finish_updating_slab_summary_block(struct slab_summary_block *block)
+{
+ notify_summary_waiters(block->allocator, &block->current_update_waiters);
+ block->writing = false;
+ block->allocator->summary_write_count--;
+ if (vdo_waitq_has_waiters(&block->next_update_waiters))
+ launch_write(block);
+ else
+ check_summary_drain_complete(block->allocator);
+}
+
+/**
+ * finish_update() - This is the callback for a successful summary block write.
+ * @completion: The write vio.
+ */
+static void finish_update(struct vdo_completion *completion)
+{
+ struct slab_summary_block *block =
+ container_of(as_vio(completion), struct slab_summary_block, vio);
+
+ atomic64_inc(&block->allocator->depot->summary_statistics.blocks_written);
+ finish_updating_slab_summary_block(block);
+}
+
+/**
+ * handle_write_error() - Handle an error writing a slab summary block.
+ * @completion: The write VIO.
+ */
+static void handle_write_error(struct vdo_completion *completion)
+{
+ struct slab_summary_block *block =
+ container_of(as_vio(completion), struct slab_summary_block, vio);
+
+ vio_record_metadata_io_error(as_vio(completion));
+ vdo_enter_read_only_mode(completion->vdo, completion->result);
+ finish_updating_slab_summary_block(block);
+}
+
+static void write_slab_summary_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct slab_summary_block *block =
+ container_of(vio, struct slab_summary_block, vio);
+
+ continue_vio_after_io(vio, finish_update, block->allocator->thread_id);
+}
+
+/**
+ * launch_write() - Write a slab summary block unless it is currently out for writing.
+ * @block: The block that needs to be committed.
+ */
+static void launch_write(struct slab_summary_block *block)
+{
+ struct block_allocator *allocator = block->allocator;
+ struct slab_depot *depot = allocator->depot;
+ physical_block_number_t pbn;
+
+ if (block->writing)
+ return;
+
+ allocator->summary_write_count++;
+ vdo_waitq_transfer_all_waiters(&block->next_update_waiters,
+ &block->current_update_waiters);
+ block->writing = true;
+
+ if (vdo_is_read_only(depot->vdo)) {
+ finish_updating_slab_summary_block(block);
+ return;
+ }
+
+ memcpy(block->outgoing_entries, block->entries, VDO_BLOCK_SIZE);
+
+ /*
+ * Flush before writing to ensure that the slab journal tail blocks and reference updates
+ * covered by this summary update are stable. Otherwise, a subsequent recovery could
+ * encounter a slab summary update that refers to a slab journal tail block that has not
+ * actually been written. In such cases, the slab journal referenced will be treated as
+ * empty, causing any data within the slab which predates the existing recovery journal
+ * entries to be lost.
+ */
+ pbn = (depot->summary_origin +
+ (VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE * allocator->zone_number) +
+ block->index);
+ vdo_submit_metadata_vio(&block->vio, pbn, write_slab_summary_endio,
+ handle_write_error, REQ_OP_WRITE | REQ_PREFLUSH);
+}
+
+/**
+ * update_slab_summary_entry() - Update the entry for a slab.
+ * @slab: The slab whose entry is to be updated
+ * @waiter: The waiter that is updating the summary.
+ * @tail_block_offset: The offset of the slab journal's tail block.
+ * @load_ref_counts: Whether the reference counts must be loaded from disk on the vdo load.
+ * @is_clean: Whether the slab is clean.
+ * @free_blocks: The number of free blocks.
+ */
+static void update_slab_summary_entry(struct vdo_slab *slab, struct vdo_waiter *waiter,
+ tail_block_offset_t tail_block_offset,
+ bool load_ref_counts, bool is_clean,
+ block_count_t free_blocks)
+{
+ u8 index = slab->slab_number / VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK;
+ struct block_allocator *allocator = slab->allocator;
+ struct slab_summary_block *block = &allocator->summary_blocks[index];
+ int result;
+ struct slab_summary_entry *entry;
+
+ if (vdo_is_read_only(block->vio.completion.vdo)) {
+ result = VDO_READ_ONLY;
+ waiter->callback(waiter, &result);
+ return;
+ }
+
+ if (vdo_is_state_draining(&allocator->summary_state) ||
+ vdo_is_state_quiescent(&allocator->summary_state)) {
+ result = VDO_INVALID_ADMIN_STATE;
+ waiter->callback(waiter, &result);
+ return;
+ }
+
+ entry = &allocator->summary_entries[slab->slab_number];
+ *entry = (struct slab_summary_entry) {
+ .tail_block_offset = tail_block_offset,
+ .load_ref_counts = (entry->load_ref_counts || load_ref_counts),
+ .is_dirty = !is_clean,
+ .fullness_hint = compute_fullness_hint(allocator->depot, free_blocks),
+ };
+ vdo_waitq_enqueue_waiter(&block->next_update_waiters, waiter);
+ launch_write(block);
+}
+
+/**
+ * finish_reaping() - Actually advance the head of the journal now that any necessary flushes are
+ * complete.
+ * @journal: The journal to be reaped.
+ */
+static void finish_reaping(struct slab_journal *journal)
+{
+ journal->head = journal->unreapable;
+ add_entries(journal);
+ check_if_slab_drained(journal->slab);
+}
+
+static void reap_slab_journal(struct slab_journal *journal);
+
+/**
+ * complete_reaping() - Finish reaping now that we have flushed the lower layer and then try
+ * reaping again in case we deferred reaping due to an outstanding vio.
+ * @completion: The flush vio.
+ */
+static void complete_reaping(struct vdo_completion *completion)
+{
+ struct slab_journal *journal = completion->parent;
+
+ return_vio_to_pool(journal->slab->allocator->vio_pool,
+ vio_as_pooled_vio(as_vio(vdo_forget(completion))));
+ finish_reaping(journal);
+ reap_slab_journal(journal);
+}
+
+/**
+ * handle_flush_error() - Handle an error flushing the lower layer.
+ * @completion: The flush vio.
+ */
+static void handle_flush_error(struct vdo_completion *completion)
+{
+ vio_record_metadata_io_error(as_vio(completion));
+ vdo_enter_read_only_mode(completion->vdo, completion->result);
+ complete_reaping(completion);
+}
+
+static void flush_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct slab_journal *journal = vio->completion.parent;
+
+ continue_vio_after_io(vio, complete_reaping,
+ journal->slab->allocator->thread_id);
+}
+
+/**
+ * flush_for_reaping() - A waiter callback for getting a vio with which to flush the lower layer
+ * prior to reaping.
+ * @waiter: The journal as a flush waiter.
+ * @context: The newly acquired flush vio.
+ */
+static void flush_for_reaping(struct vdo_waiter *waiter, void *context)
+{
+ struct slab_journal *journal =
+ container_of(waiter, struct slab_journal, flush_waiter);
+ struct pooled_vio *pooled = context;
+ struct vio *vio = &pooled->vio;
+
+ vio->completion.parent = journal;
+ vdo_submit_flush_vio(vio, flush_endio, handle_flush_error);
+}
+
+/**
+ * reap_slab_journal() - Conduct a reap on a slab journal to reclaim unreferenced blocks.
+ * @journal: The slab journal.
+ */
+static void reap_slab_journal(struct slab_journal *journal)
+{
+ bool reaped = false;
+
+ if (is_reaping(journal)) {
+ /* We already have a reap in progress so wait for it to finish. */
+ return;
+ }
+
+ if ((journal->slab->status != VDO_SLAB_REBUILT) ||
+ !vdo_is_state_normal(&journal->slab->state) ||
+ vdo_is_read_only(journal->slab->allocator->depot->vdo)) {
+ /*
+ * We must not reap in the first two cases, and there's no point in read-only mode.
+ */
+ return;
+ }
+
+ /*
+ * Start reclaiming blocks only when the journal head has no references. Then stop when a
+ * block is referenced or reap reaches the most recently written block, referenced by the
+ * slab summary, which has the sequence number just before the tail.
+ */
+ while ((journal->unreapable < journal->tail) && (journal->reap_lock->count == 0)) {
+ reaped = true;
+ journal->unreapable++;
+ journal->reap_lock++;
+ if (journal->reap_lock == &journal->locks[journal->size])
+ journal->reap_lock = &journal->locks[0];
+ }
+
+ if (!reaped)
+ return;
+
+ /*
+ * It is never safe to reap a slab journal block without first issuing a flush, regardless
+ * of whether a user flush has been received or not. In the absence of the flush, the
+ * reference block write which released the locks allowing the slab journal to reap may not
+ * be persisted. Although slab summary writes will eventually issue flushes, multiple slab
+ * journal block writes can be issued while previous slab summary updates have not yet been
+ * made. Even though those slab journal block writes will be ignored if the slab summary
+ * update is not persisted, they may still overwrite the to-be-reaped slab journal block
+ * resulting in a loss of reference count updates.
+ */
+ journal->flush_waiter.callback = flush_for_reaping;
+ acquire_vio_from_pool(journal->slab->allocator->vio_pool,
+ &journal->flush_waiter);
+}
+
+/**
+ * adjust_slab_journal_block_reference() - Adjust the reference count for a slab journal block.
+ * @journal: The slab journal.
+ * @sequence_number: The journal sequence number of the referenced block.
+ * @adjustment: Amount to adjust the reference counter.
+ *
+ * Note that when the adjustment is negative, the slab journal will be reaped.
+ */
+static void adjust_slab_journal_block_reference(struct slab_journal *journal,
+ sequence_number_t sequence_number,
+ int adjustment)
+{
+ struct journal_lock *lock;
+
+ if (sequence_number == 0)
+ return;
+
+ if (journal->slab->status == VDO_SLAB_REPLAYING) {
+ /* Locks should not be used during offline replay. */
+ return;
+ }
+
+ VDO_ASSERT_LOG_ONLY((adjustment != 0), "adjustment must be non-zero");
+ lock = get_lock(journal, sequence_number);
+ if (adjustment < 0) {
+ VDO_ASSERT_LOG_ONLY((-adjustment <= lock->count),
+ "adjustment %d of lock count %u for slab journal block %llu must not underflow",
+ adjustment, lock->count,
+ (unsigned long long) sequence_number);
+ }
+
+ lock->count += adjustment;
+ if (lock->count == 0)
+ reap_slab_journal(journal);
+}
+
+/**
+ * release_journal_locks() - Callback invoked after a slab summary update completes.
+ * @waiter: The slab summary waiter that has just been notified.
+ * @context: The result code of the update.
+ *
+ * Registered in the constructor on behalf of update_tail_block_location().
+ *
+ * Implements waiter_callback_fn.
+ */
+static void release_journal_locks(struct vdo_waiter *waiter, void *context)
+{
+ sequence_number_t first, i;
+ struct slab_journal *journal =
+ container_of(waiter, struct slab_journal, slab_summary_waiter);
+ int result = *((int *) context);
+
+ if (result != VDO_SUCCESS) {
+ if (result != VDO_READ_ONLY) {
+ /*
+ * Don't bother logging what might be lots of errors if we are already in
+ * read-only mode.
+ */
+ vdo_log_error_strerror(result, "failed slab summary update %llu",
+ (unsigned long long) journal->summarized);
+ }
+
+ journal->updating_slab_summary = false;
+ vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result);
+ check_if_slab_drained(journal->slab);
+ return;
+ }
+
+ if (journal->partial_write_in_progress && (journal->summarized == journal->tail)) {
+ journal->partial_write_in_progress = false;
+ add_entries(journal);
+ }
+
+ first = journal->last_summarized;
+ journal->last_summarized = journal->summarized;
+ for (i = journal->summarized - 1; i >= first; i--) {
+ /*
+ * Release the lock the summarized block held on the recovery journal. (During
+ * replay, recovery_start will always be 0.)
+ */
+ if (journal->recovery_journal != NULL) {
+ zone_count_t zone_number = journal->slab->allocator->zone_number;
+ struct journal_lock *lock = get_lock(journal, i);
+
+ vdo_release_recovery_journal_block_reference(journal->recovery_journal,
+ lock->recovery_start,
+ VDO_ZONE_TYPE_PHYSICAL,
+ zone_number);
+ }
+
+ /*
+ * Release our own lock against reaping for blocks that are committed. (This
+ * function will not change locks during replay.)
+ */
+ adjust_slab_journal_block_reference(journal, i, -1);
+ }
+
+ journal->updating_slab_summary = false;
+
+ reap_slab_journal(journal);
+
+ /* Check if the slab summary needs to be updated again. */
+ update_tail_block_location(journal);
+}
+
+/**
+ * update_tail_block_location() - Update the tail block location in the slab summary, if necessary.
+ * @journal: The slab journal that is updating its tail block location.
+ */
+static void update_tail_block_location(struct slab_journal *journal)
+{
+ block_count_t free_block_count;
+ struct vdo_slab *slab = journal->slab;
+
+ if (journal->updating_slab_summary ||
+ vdo_is_read_only(journal->slab->allocator->depot->vdo) ||
+ (journal->last_summarized >= journal->next_commit)) {
+ check_if_slab_drained(slab);
+ return;
+ }
+
+ if (slab->status != VDO_SLAB_REBUILT) {
+ u8 hint = slab->allocator->summary_entries[slab->slab_number].fullness_hint;
+
+ free_block_count = ((block_count_t) hint) << slab->allocator->depot->hint_shift;
+ } else {
+ free_block_count = slab->free_blocks;
+ }
+
+ journal->summarized = journal->next_commit;
+ journal->updating_slab_summary = true;
+
+ /*
+ * Update slab summary as dirty.
+ * vdo_slab journal can only reap past sequence number 1 when all the ref counts for this
+ * slab have been written to the layer. Therefore, indicate that the ref counts must be
+ * loaded when the journal head has reaped past sequence number 1.
+ */
+ update_slab_summary_entry(slab, &journal->slab_summary_waiter,
+ journal->summarized % journal->size,
+ (journal->head > 1), false, free_block_count);
+}
+
+/**
+ * reopen_slab_journal() - Reopen a slab's journal by emptying it and then adding pending entries.
+ */
+static void reopen_slab_journal(struct vdo_slab *slab)
+{
+ struct slab_journal *journal = &slab->journal;
+ sequence_number_t block;
+
+ VDO_ASSERT_LOG_ONLY(journal->tail_header.entry_count == 0,
+ "vdo_slab journal's active block empty before reopening");
+ journal->head = journal->tail;
+ initialize_journal_state(journal);
+
+ /* Ensure no locks are spuriously held on an empty journal. */
+ for (block = 1; block <= journal->size; block++) {
+ VDO_ASSERT_LOG_ONLY((get_lock(journal, block)->count == 0),
+ "Scrubbed journal's block %llu is not locked",
+ (unsigned long long) block);
+ }
+
+ add_entries(journal);
+}
+
+static sequence_number_t get_committing_sequence_number(const struct pooled_vio *vio)
+{
+ const struct packed_slab_journal_block *block =
+ (const struct packed_slab_journal_block *) vio->vio.data;
+
+ return __le64_to_cpu(block->header.sequence_number);
+}
+
+/**
+ * complete_write() - Handle post-commit processing.
+ * @completion: The write vio as a completion.
+ *
+ * This is the callback registered by write_slab_journal_block().
+ */
+static void complete_write(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct pooled_vio *pooled = vio_as_pooled_vio(as_vio(completion));
+ struct slab_journal *journal = completion->parent;
+ sequence_number_t committed = get_committing_sequence_number(pooled);
+
+ list_del_init(&pooled->list_entry);
+ return_vio_to_pool(journal->slab->allocator->vio_pool, vdo_forget(pooled));
+
+ if (result != VDO_SUCCESS) {
+ vio_record_metadata_io_error(as_vio(completion));
+ vdo_log_error_strerror(result, "cannot write slab journal block %llu",
+ (unsigned long long) committed);
+ vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result);
+ check_if_slab_drained(journal->slab);
+ return;
+ }
+
+ WRITE_ONCE(journal->events->blocks_written, journal->events->blocks_written + 1);
+
+ if (list_empty(&journal->uncommitted_blocks)) {
+ /* If no blocks are outstanding, then the commit point is at the tail. */
+ journal->next_commit = journal->tail;
+ } else {
+ /* The commit point is always the beginning of the oldest incomplete block. */
+ pooled = container_of(journal->uncommitted_blocks.next,
+ struct pooled_vio, list_entry);
+ journal->next_commit = get_committing_sequence_number(pooled);
+ }
+
+ update_tail_block_location(journal);
+}
+
+static void write_slab_journal_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct slab_journal *journal = vio->completion.parent;
+
+ continue_vio_after_io(vio, complete_write, journal->slab->allocator->thread_id);
+}
+
+/**
+ * write_slab_journal_block() - Write a slab journal block.
+ * @waiter: The vio pool waiter which was just notified.
+ * @context: The vio pool entry for the write.
+ *
+ * Callback from acquire_vio_from_pool() registered in commit_tail().
+ */
+static void write_slab_journal_block(struct vdo_waiter *waiter, void *context)
+{
+ struct pooled_vio *pooled = context;
+ struct vio *vio = &pooled->vio;
+ struct slab_journal *journal =
+ container_of(waiter, struct slab_journal, resource_waiter);
+ struct slab_journal_block_header *header = &journal->tail_header;
+ int unused_entries = journal->entries_per_block - header->entry_count;
+ physical_block_number_t block_number;
+ const struct admin_state_code *operation;
+
+ header->head = journal->head;
+ list_add_tail(&pooled->list_entry, &journal->uncommitted_blocks);
+ vdo_pack_slab_journal_block_header(header, &journal->block->header);
+
+ /* Copy the tail block into the vio. */
+ memcpy(pooled->vio.data, journal->block, VDO_BLOCK_SIZE);
+
+ VDO_ASSERT_LOG_ONLY(unused_entries >= 0, "vdo_slab journal block is not overfull");
+ if (unused_entries > 0) {
+ /*
+ * Release the per-entry locks for any unused entries in the block we are about to
+ * write.
+ */
+ adjust_slab_journal_block_reference(journal, header->sequence_number,
+ -unused_entries);
+ journal->partial_write_in_progress = !block_is_full(journal);
+ }
+
+ block_number = journal->slab->journal_origin +
+ (header->sequence_number % journal->size);
+ vio->completion.parent = journal;
+
+ /*
+ * This block won't be read in recovery until the slab summary is updated to refer to it.
+ * The slab summary update does a flush which is sufficient to protect us from corruption
+ * due to out of order slab journal, reference block, or block map writes.
+ */
+ vdo_submit_metadata_vio(vdo_forget(vio), block_number, write_slab_journal_endio,
+ complete_write, REQ_OP_WRITE);
+
+ /* Since the write is submitted, the tail block structure can be reused. */
+ journal->tail++;
+ initialize_tail_block(journal);
+ journal->waiting_to_commit = false;
+
+ operation = vdo_get_admin_state_code(&journal->slab->state);
+ if (operation == VDO_ADMIN_STATE_WAITING_FOR_RECOVERY) {
+ vdo_finish_operation(&journal->slab->state,
+ (vdo_is_read_only(journal->slab->allocator->depot->vdo) ?
+ VDO_READ_ONLY : VDO_SUCCESS));
+ return;
+ }
+
+ add_entries(journal);
+}
+
+/**
+ * commit_tail() - Commit the tail block of the slab journal.
+ * @journal: The journal whose tail block should be committed.
+ */
+static void commit_tail(struct slab_journal *journal)
+{
+ if ((journal->tail_header.entry_count == 0) && must_make_entries_to_flush(journal)) {
+ /*
+ * There are no entries at the moment, but there are some waiters, so defer
+ * initiating the flush until those entries are ready to write.
+ */
+ return;
+ }
+
+ if (vdo_is_read_only(journal->slab->allocator->depot->vdo) ||
+ journal->waiting_to_commit ||
+ (journal->tail_header.entry_count == 0)) {
+ /*
+ * There is nothing to do since the tail block is empty, or writing, or the journal
+ * is in read-only mode.
+ */
+ return;
+ }
+
+ /*
+ * Since we are about to commit the tail block, this journal no longer needs to be on the
+ * ring of journals which the recovery journal might ask to commit.
+ */
+ mark_slab_journal_clean(journal);
+
+ journal->waiting_to_commit = true;
+
+ journal->resource_waiter.callback = write_slab_journal_block;
+ acquire_vio_from_pool(journal->slab->allocator->vio_pool,
+ &journal->resource_waiter);
+}
+
+/**
+ * encode_slab_journal_entry() - Encode a slab journal entry.
+ * @tail_header: The unpacked header for the block.
+ * @payload: The journal block payload to hold the entry.
+ * @sbn: The slab block number of the entry to encode.
+ * @operation: The type of the entry.
+ * @increment: True if this is an increment.
+ *
+ * Exposed for unit tests.
+ */
+static void encode_slab_journal_entry(struct slab_journal_block_header *tail_header,
+ slab_journal_payload *payload,
+ slab_block_number sbn,
+ enum journal_operation operation,
+ bool increment)
+{
+ journal_entry_count_t entry_number = tail_header->entry_count++;
+
+ if (operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) {
+ if (!tail_header->has_block_map_increments) {
+ memset(payload->full_entries.entry_types, 0,
+ VDO_SLAB_JOURNAL_ENTRY_TYPES_SIZE);
+ tail_header->has_block_map_increments = true;
+ }
+
+ payload->full_entries.entry_types[entry_number / 8] |=
+ ((u8)1 << (entry_number % 8));
+ }
+
+ vdo_pack_slab_journal_entry(&payload->entries[entry_number], sbn, increment);
+}
+
+/**
+ * expand_journal_point() - Convert a recovery journal journal_point which refers to both an
+ * increment and a decrement to a single point which refers to one or the
+ * other.
+ * @recovery_point: The journal point to convert.
+ * @increment: Whether the current entry is an increment.
+ *
+ * Return: The expanded journal point
+ *
+ * Because each data_vio has but a single recovery journal point, but may need to make both
+ * increment and decrement entries in the same slab journal. In order to distinguish the two
+ * entries, the entry count of the expanded journal point is twice the actual recovery journal
+ * entry count for increments, and one more than that for decrements.
+ */
+static struct journal_point expand_journal_point(struct journal_point recovery_point,
+ bool increment)
+{
+ recovery_point.entry_count *= 2;
+ if (!increment)
+ recovery_point.entry_count++;
+
+ return recovery_point;
+}
+
+/**
+ * add_entry() - Actually add an entry to the slab journal, potentially firing off a write if a
+ * block becomes full.
+ * @journal: The slab journal to append to.
+ * @pbn: The pbn being adjusted.
+ * @operation: The type of entry to make.
+ * @increment: True if this is an increment.
+ * @recovery_point: The expanded recovery point.
+ *
+ * This function is synchronous.
+ */
+static void add_entry(struct slab_journal *journal, physical_block_number_t pbn,
+ enum journal_operation operation, bool increment,
+ struct journal_point recovery_point)
+{
+ struct packed_slab_journal_block *block = journal->block;
+ int result;
+
+ result = VDO_ASSERT(vdo_before_journal_point(&journal->tail_header.recovery_point,
+ &recovery_point),
+ "recovery journal point is monotonically increasing, recovery point: %llu.%u, block recovery point: %llu.%u",
+ (unsigned long long) recovery_point.sequence_number,
+ recovery_point.entry_count,
+ (unsigned long long) journal->tail_header.recovery_point.sequence_number,
+ journal->tail_header.recovery_point.entry_count);
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result);
+ return;
+ }
+
+ if (operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) {
+ result = VDO_ASSERT((journal->tail_header.entry_count <
+ journal->full_entries_per_block),
+ "block has room for full entries");
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo,
+ result);
+ return;
+ }
+ }
+
+ encode_slab_journal_entry(&journal->tail_header, &block->payload,
+ pbn - journal->slab->start, operation, increment);
+ journal->tail_header.recovery_point = recovery_point;
+ if (block_is_full(journal))
+ commit_tail(journal);
+}
+
+static inline block_count_t journal_length(const struct slab_journal *journal)
+{
+ return journal->tail - journal->head;
+}
+
+/**
+ * vdo_attempt_replay_into_slab() - Replay a recovery journal entry into a slab's journal.
+ * @slab: The slab to play into.
+ * @pbn: The PBN for the entry.
+ * @operation: The type of entry to add.
+ * @increment: True if this entry is an increment.
+ * @recovery_point: The recovery journal point corresponding to this entry.
+ * @parent: The completion to notify when there is space to add the entry if the entry could not be
+ * added immediately.
+ *
+ * Return: true if the entry was added immediately.
+ */
+bool vdo_attempt_replay_into_slab(struct vdo_slab *slab, physical_block_number_t pbn,
+ enum journal_operation operation, bool increment,
+ struct journal_point *recovery_point,
+ struct vdo_completion *parent)
+{
+ struct slab_journal *journal = &slab->journal;
+ struct slab_journal_block_header *header = &journal->tail_header;
+ struct journal_point expanded = expand_journal_point(*recovery_point, increment);
+
+ /* Only accept entries after the current recovery point. */
+ if (!vdo_before_journal_point(&journal->tail_header.recovery_point, &expanded))
+ return true;
+
+ if ((header->entry_count >= journal->full_entries_per_block) &&
+ (header->has_block_map_increments || (operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING))) {
+ /*
+ * The tail block does not have room for the entry we are attempting to add so
+ * commit the tail block now.
+ */
+ commit_tail(journal);
+ }
+
+ if (journal->waiting_to_commit) {
+ vdo_start_operation_with_waiter(&journal->slab->state,
+ VDO_ADMIN_STATE_WAITING_FOR_RECOVERY,
+ parent, NULL);
+ return false;
+ }
+
+ if (journal_length(journal) >= journal->size) {
+ /*
+ * We must have reaped the current head before the crash, since the blocked
+ * threshold keeps us from having more entries than fit in a slab journal; hence we
+ * can just advance the head (and unreapable block), as needed.
+ */
+ journal->head++;
+ journal->unreapable++;
+ }
+
+ if (journal->slab->status == VDO_SLAB_REBUILT)
+ journal->slab->status = VDO_SLAB_REPLAYING;
+
+ add_entry(journal, pbn, operation, increment, expanded);
+ return true;
+}
+
+/**
+ * requires_reaping() - Check whether the journal must be reaped before adding new entries.
+ * @journal: The journal to check.
+ *
+ * Return: true if the journal must be reaped.
+ */
+static bool requires_reaping(const struct slab_journal *journal)
+{
+ return (journal_length(journal) >= journal->blocking_threshold);
+}
+
+/** finish_summary_update() - A waiter callback that resets the writing state of a slab. */
+static void finish_summary_update(struct vdo_waiter *waiter, void *context)
+{
+ struct vdo_slab *slab = container_of(waiter, struct vdo_slab, summary_waiter);
+ int result = *((int *) context);
+
+ slab->active_count--;
+
+ if ((result != VDO_SUCCESS) && (result != VDO_READ_ONLY)) {
+ vdo_log_error_strerror(result, "failed to update slab summary");
+ vdo_enter_read_only_mode(slab->allocator->depot->vdo, result);
+ }
+
+ check_if_slab_drained(slab);
+}
+
+static void write_reference_block(struct vdo_waiter *waiter, void *context);
+
+/**
+ * launch_reference_block_write() - Launch the write of a dirty reference block by first acquiring
+ * a VIO for it from the pool.
+ * @waiter: The waiter of the block which is starting to write.
+ * @context: The parent slab of the block.
+ *
+ * This can be asynchronous since the writer will have to wait if all VIOs in the pool are
+ * currently in use.
+ */
+static void launch_reference_block_write(struct vdo_waiter *waiter, void *context)
+{
+ struct vdo_slab *slab = context;
+
+ if (vdo_is_read_only(slab->allocator->depot->vdo))
+ return;
+
+ slab->active_count++;
+ container_of(waiter, struct reference_block, waiter)->is_writing = true;
+ waiter->callback = write_reference_block;
+ acquire_vio_from_pool(slab->allocator->vio_pool, waiter);
+}
+
+static void save_dirty_reference_blocks(struct vdo_slab *slab)
+{
+ vdo_waitq_notify_all_waiters(&slab->dirty_blocks,
+ launch_reference_block_write, slab);
+ check_if_slab_drained(slab);
+}
+
+/**
+ * finish_reference_block_write() - After a reference block has written, clean it, release its
+ * locks, and return its VIO to the pool.
+ * @completion: The VIO that just finished writing.
+ */
+static void finish_reference_block_write(struct vdo_completion *completion)
+{
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = vio_as_pooled_vio(vio);
+ struct reference_block *block = completion->parent;
+ struct vdo_slab *slab = block->slab;
+ tail_block_offset_t offset;
+
+ slab->active_count--;
+
+ /* Release the slab journal lock. */
+ adjust_slab_journal_block_reference(&slab->journal,
+ block->slab_journal_lock_to_release, -1);
+ return_vio_to_pool(slab->allocator->vio_pool, pooled);
+
+ /*
+ * We can't clear the is_writing flag earlier as releasing the slab journal lock may cause
+ * us to be dirtied again, but we don't want to double enqueue.
+ */
+ block->is_writing = false;
+
+ if (vdo_is_read_only(completion->vdo)) {
+ check_if_slab_drained(slab);
+ return;
+ }
+
+ /* Re-queue the block if it was re-dirtied while it was writing. */
+ if (block->is_dirty) {
+ vdo_waitq_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter);
+ if (vdo_is_state_draining(&slab->state)) {
+ /* We must be saving, and this block will otherwise not be relaunched. */
+ save_dirty_reference_blocks(slab);
+ }
+
+ return;
+ }
+
+ /*
+ * Mark the slab as clean in the slab summary if there are no dirty or writing blocks
+ * and no summary update in progress.
+ */
+ if ((slab->active_count > 0) || vdo_waitq_has_waiters(&slab->dirty_blocks)) {
+ check_if_slab_drained(slab);
+ return;
+ }
+
+ offset = slab->allocator->summary_entries[slab->slab_number].tail_block_offset;
+ slab->active_count++;
+ slab->summary_waiter.callback = finish_summary_update;
+ update_slab_summary_entry(slab, &slab->summary_waiter, offset,
+ true, true, slab->free_blocks);
+}
+
+/**
+ * get_reference_counters_for_block() - Find the reference counters for a given block.
+ * @block: The reference_block in question.
+ *
+ * Return: A pointer to the reference counters for this block.
+ */
+static vdo_refcount_t * __must_check get_reference_counters_for_block(struct reference_block *block)
+{
+ size_t block_index = block - block->slab->reference_blocks;
+
+ return &block->slab->counters[block_index * COUNTS_PER_BLOCK];
+}
+
+/**
+ * pack_reference_block() - Copy data from a reference block to a buffer ready to be written out.
+ * @block: The block to copy.
+ * @buffer: The char buffer to fill with the packed block.
+ */
+static void pack_reference_block(struct reference_block *block, void *buffer)
+{
+ struct packed_reference_block *packed = buffer;
+ vdo_refcount_t *counters = get_reference_counters_for_block(block);
+ sector_count_t i;
+ struct packed_journal_point commit_point;
+
+ vdo_pack_journal_point(&block->slab->slab_journal_point, &commit_point);
+
+ for (i = 0; i < VDO_SECTORS_PER_BLOCK; i++) {
+ packed->sectors[i].commit_point = commit_point;
+ memcpy(packed->sectors[i].counts, counters + (i * COUNTS_PER_SECTOR),
+ (sizeof(vdo_refcount_t) * COUNTS_PER_SECTOR));
+ }
+}
+
+static void write_reference_block_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct reference_block *block = vio->completion.parent;
+ thread_id_t thread_id = block->slab->allocator->thread_id;
+
+ continue_vio_after_io(vio, finish_reference_block_write, thread_id);
+}
+
+/**
+ * handle_io_error() - Handle an I/O error reading or writing a reference count block.
+ * @completion: The VIO doing the I/O as a completion.
+ */
+static void handle_io_error(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct vio *vio = as_vio(completion);
+ struct vdo_slab *slab = ((struct reference_block *) completion->parent)->slab;
+
+ vio_record_metadata_io_error(vio);
+ return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio));
+ slab->active_count--;
+ vdo_enter_read_only_mode(slab->allocator->depot->vdo, result);
+ check_if_slab_drained(slab);
+}
+
+/**
+ * write_reference_block() - After a dirty block waiter has gotten a VIO from the VIO pool, copy
+ * its counters and associated data into the VIO, and launch the write.
+ * @waiter: The waiter of the dirty block.
+ * @context: The VIO returned by the pool.
+ */
+static void write_reference_block(struct vdo_waiter *waiter, void *context)
+{
+ size_t block_offset;
+ physical_block_number_t pbn;
+ struct pooled_vio *pooled = context;
+ struct vdo_completion *completion = &pooled->vio.completion;
+ struct reference_block *block = container_of(waiter, struct reference_block,
+ waiter);
+
+ pack_reference_block(block, pooled->vio.data);
+ block_offset = (block - block->slab->reference_blocks);
+ pbn = (block->slab->ref_counts_origin + block_offset);
+ block->slab_journal_lock_to_release = block->slab_journal_lock;
+ completion->parent = block;
+
+ /*
+ * Mark the block as clean, since we won't be committing any updates that happen after this
+ * moment. As long as VIO order is preserved, two VIOs updating this block at once will not
+ * cause complications.
+ */
+ block->is_dirty = false;
+
+ /*
+ * Flush before writing to ensure that the recovery journal and slab journal entries which
+ * cover this reference update are stable. This prevents data corruption that can be caused
+ * by out of order writes.
+ */
+ WRITE_ONCE(block->slab->allocator->ref_counts_statistics.blocks_written,
+ block->slab->allocator->ref_counts_statistics.blocks_written + 1);
+
+ completion->callback_thread_id = ((struct block_allocator *) pooled->context)->thread_id;
+ vdo_submit_metadata_vio(&pooled->vio, pbn, write_reference_block_endio,
+ handle_io_error, REQ_OP_WRITE | REQ_PREFLUSH);
+}
+
+static void reclaim_journal_space(struct slab_journal *journal)
+{
+ block_count_t length = journal_length(journal);
+ struct vdo_slab *slab = journal->slab;
+ block_count_t write_count = vdo_waitq_num_waiters(&slab->dirty_blocks);
+ block_count_t written;
+
+ if ((length < journal->flushing_threshold) || (write_count == 0))
+ return;
+
+ /* The slab journal is over the first threshold, schedule some reference block writes. */
+ WRITE_ONCE(journal->events->flush_count, journal->events->flush_count + 1);
+ if (length < journal->flushing_deadline) {
+ /* Schedule more writes the closer to the deadline we get. */
+ write_count /= journal->flushing_deadline - length + 1;
+ write_count = max_t(block_count_t, write_count, 1);
+ }
+
+ for (written = 0; written < write_count; written++) {
+ vdo_waitq_notify_next_waiter(&slab->dirty_blocks,
+ launch_reference_block_write, slab);
+ }
+}
+
+/**
+ * reference_count_to_status() - Convert a reference count to a reference status.
+ * @count: The count to convert.
+ *
+ * Return: The appropriate reference status.
+ */
+static enum reference_status __must_check reference_count_to_status(vdo_refcount_t count)
+{
+ if (count == EMPTY_REFERENCE_COUNT)
+ return RS_FREE;
+ else if (count == 1)
+ return RS_SINGLE;
+ else if (count == PROVISIONAL_REFERENCE_COUNT)
+ return RS_PROVISIONAL;
+ else
+ return RS_SHARED;
+}
+
+/**
+ * dirty_block() - Mark a reference count block as dirty, potentially adding it to the dirty queue
+ * if it wasn't already dirty.
+ * @block: The reference block to mark as dirty.
+ */
+static void dirty_block(struct reference_block *block)
+{
+ if (block->is_dirty)
+ return;
+
+ block->is_dirty = true;
+ if (!block->is_writing)
+ vdo_waitq_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter);
+}
+
+/**
+ * get_reference_block() - Get the reference block that covers the given block index.
+ */
+static struct reference_block * __must_check get_reference_block(struct vdo_slab *slab,
+ slab_block_number index)
+{
+ return &slab->reference_blocks[index / COUNTS_PER_BLOCK];
+}
+
+/**
+ * slab_block_number_from_pbn() - Determine the index within the slab of a particular physical
+ * block number.
+ * @slab: The slab.
+ * @physical_block_number: The physical block number.
+ * @slab_block_number_ptr: A pointer to the slab block number.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check slab_block_number_from_pbn(struct vdo_slab *slab,
+ physical_block_number_t pbn,
+ slab_block_number *slab_block_number_ptr)
+{
+ u64 slab_block_number;
+
+ if (pbn < slab->start)
+ return VDO_OUT_OF_RANGE;
+
+ slab_block_number = pbn - slab->start;
+ if (slab_block_number >= slab->allocator->depot->slab_config.data_blocks)
+ return VDO_OUT_OF_RANGE;
+
+ *slab_block_number_ptr = slab_block_number;
+ return VDO_SUCCESS;
+}
+
+/**
+ * get_reference_counter() - Get the reference counter that covers the given physical block number.
+ * @slab: The slab to query.
+ * @pbn: The physical block number.
+ * @counter_ptr: A pointer to the reference counter.
+ */
+static int __must_check get_reference_counter(struct vdo_slab *slab,
+ physical_block_number_t pbn,
+ vdo_refcount_t **counter_ptr)
+{
+ slab_block_number index;
+ int result = slab_block_number_from_pbn(slab, pbn, &index);
+
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *counter_ptr = &slab->counters[index];
+
+ return VDO_SUCCESS;
+}
+
+static unsigned int calculate_slab_priority(struct vdo_slab *slab)
+{
+ block_count_t free_blocks = slab->free_blocks;
+ unsigned int unopened_slab_priority = slab->allocator->unopened_slab_priority;
+ unsigned int priority;
+
+ /*
+ * Wholly full slabs must be the only ones with lowest priority, 0.
+ *
+ * Slabs that have never been opened (empty, newly initialized, and never been written to)
+ * have lower priority than previously opened slabs that have a significant number of free
+ * blocks. This ranking causes VDO to avoid writing physical blocks for the first time
+ * unless there are very few free blocks that have been previously written to.
+ *
+ * Since VDO doesn't discard blocks currently, reusing previously written blocks makes VDO
+ * a better client of any underlying storage that is thinly-provisioned (though discarding
+ * would be better).
+ *
+ * For all other slabs, the priority is derived from the logarithm of the number of free
+ * blocks. Slabs with the same order of magnitude of free blocks have the same priority.
+ * With 2^23 blocks, the priority will range from 1 to 25. The reserved
+ * unopened_slab_priority divides the range and is skipped by the logarithmic mapping.
+ */
+
+ if (free_blocks == 0)
+ return 0;
+
+ if (is_slab_journal_blank(slab))
+ return unopened_slab_priority;
+
+ priority = (1 + ilog2(free_blocks));
+ return ((priority < unopened_slab_priority) ? priority : priority + 1);
+}
+
+/*
+ * Slabs are essentially prioritized by an approximation of the number of free blocks in the slab
+ * so slabs with lots of free blocks will be opened for allocation before slabs that have few free
+ * blocks.
+ */
+static void prioritize_slab(struct vdo_slab *slab)
+{
+ VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
+ "a slab must not already be on a ring when prioritizing");
+ slab->priority = calculate_slab_priority(slab);
+ vdo_priority_table_enqueue(slab->allocator->prioritized_slabs,
+ slab->priority, &slab->allocq_entry);
+}
+
+/**
+ * adjust_free_block_count() - Adjust the free block count and (if needed) reprioritize the slab.
+ * @incremented: true if the free block count went up.
+ */
+static void adjust_free_block_count(struct vdo_slab *slab, bool incremented)
+{
+ struct block_allocator *allocator = slab->allocator;
+
+ WRITE_ONCE(allocator->allocated_blocks,
+ allocator->allocated_blocks + (incremented ? -1 : 1));
+
+ /* The open slab doesn't need to be reprioritized until it is closed. */
+ if (slab == allocator->open_slab)
+ return;
+
+ /* Don't bother adjusting the priority table if unneeded. */
+ if (slab->priority == calculate_slab_priority(slab))
+ return;
+
+ /*
+ * Reprioritize the slab to reflect the new free block count by removing it from the table
+ * and re-enqueuing it with the new priority.
+ */
+ vdo_priority_table_remove(allocator->prioritized_slabs, &slab->allocq_entry);
+ prioritize_slab(slab);
+}
+
+/**
+ * increment_for_data() - Increment the reference count for a data block.
+ * @slab: The slab which owns the block.
+ * @block: The reference block which contains the block being updated.
+ * @block_number: The block to update.
+ * @old_status: The reference status of the data block before this increment.
+ * @lock: The pbn_lock associated with this increment (may be NULL).
+ * @counter_ptr: A pointer to the count for the data block (in, out).
+ * @adjust_block_count: Whether to update the allocator's free block count.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int increment_for_data(struct vdo_slab *slab, struct reference_block *block,
+ slab_block_number block_number,
+ enum reference_status old_status,
+ struct pbn_lock *lock, vdo_refcount_t *counter_ptr,
+ bool adjust_block_count)
+{
+ switch (old_status) {
+ case RS_FREE:
+ *counter_ptr = 1;
+ block->allocated_count++;
+ slab->free_blocks--;
+ if (adjust_block_count)
+ adjust_free_block_count(slab, false);
+
+ break;
+
+ case RS_PROVISIONAL:
+ *counter_ptr = 1;
+ break;
+
+ default:
+ /* Single or shared */
+ if (*counter_ptr >= MAXIMUM_REFERENCE_COUNT) {
+ return vdo_log_error_strerror(VDO_REF_COUNT_INVALID,
+ "Incrementing a block already having 254 references (slab %u, offset %u)",
+ slab->slab_number, block_number);
+ }
+ (*counter_ptr)++;
+ }
+
+ if (lock != NULL)
+ vdo_unassign_pbn_lock_provisional_reference(lock);
+ return VDO_SUCCESS;
+}
+
+/**
+ * decrement_for_data() - Decrement the reference count for a data block.
+ * @slab: The slab which owns the block.
+ * @block: The reference block which contains the block being updated.
+ * @block_number: The block to update.
+ * @old_status: The reference status of the data block before this decrement.
+ * @updater: The reference updater doing this operation in case we need to look up the pbn lock.
+ * @lock: The pbn_lock associated with the block being decremented (may be NULL).
+ * @counter_ptr: A pointer to the count for the data block (in, out).
+ * @adjust_block_count: Whether to update the allocator's free block count.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int decrement_for_data(struct vdo_slab *slab, struct reference_block *block,
+ slab_block_number block_number,
+ enum reference_status old_status,
+ struct reference_updater *updater,
+ vdo_refcount_t *counter_ptr, bool adjust_block_count)
+{
+ switch (old_status) {
+ case RS_FREE:
+ return vdo_log_error_strerror(VDO_REF_COUNT_INVALID,
+ "Decrementing free block at offset %u in slab %u",
+ block_number, slab->slab_number);
+
+ case RS_PROVISIONAL:
+ case RS_SINGLE:
+ if (updater->zpbn.zone != NULL) {
+ struct pbn_lock *lock = vdo_get_physical_zone_pbn_lock(updater->zpbn.zone,
+ updater->zpbn.pbn);
+
+ if (lock != NULL) {
+ /*
+ * There is a read lock on this block, so the block must not become
+ * unreferenced.
+ */
+ *counter_ptr = PROVISIONAL_REFERENCE_COUNT;
+ vdo_assign_pbn_lock_provisional_reference(lock);
+ break;
+ }
+ }
+
+ *counter_ptr = EMPTY_REFERENCE_COUNT;
+ block->allocated_count--;
+ slab->free_blocks++;
+ if (adjust_block_count)
+ adjust_free_block_count(slab, true);
+
+ break;
+
+ default:
+ /* Shared */
+ (*counter_ptr)--;
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * increment_for_block_map() - Increment the reference count for a block map page.
+ * @slab: The slab which owns the block.
+ * @block: The reference block which contains the block being updated.
+ * @block_number: The block to update.
+ * @old_status: The reference status of the block before this increment.
+ * @lock: The pbn_lock associated with this increment (may be NULL).
+ * @normal_operation: Whether we are in normal operation vs. recovery or rebuild.
+ * @counter_ptr: A pointer to the count for the block (in, out).
+ * @adjust_block_count: Whether to update the allocator's free block count.
+ *
+ * All block map increments should be from provisional to MAXIMUM_REFERENCE_COUNT. Since block map
+ * blocks never dedupe they should never be adjusted from any other state. The adjustment always
+ * results in MAXIMUM_REFERENCE_COUNT as this value is used to prevent dedupe against block map
+ * blocks.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int increment_for_block_map(struct vdo_slab *slab, struct reference_block *block,
+ slab_block_number block_number,
+ enum reference_status old_status,
+ struct pbn_lock *lock, bool normal_operation,
+ vdo_refcount_t *counter_ptr, bool adjust_block_count)
+{
+ switch (old_status) {
+ case RS_FREE:
+ if (normal_operation) {
+ return vdo_log_error_strerror(VDO_REF_COUNT_INVALID,
+ "Incrementing unallocated block map block (slab %u, offset %u)",
+ slab->slab_number, block_number);
+ }
+
+ *counter_ptr = MAXIMUM_REFERENCE_COUNT;
+ block->allocated_count++;
+ slab->free_blocks--;
+ if (adjust_block_count)
+ adjust_free_block_count(slab, false);
+
+ return VDO_SUCCESS;
+
+ case RS_PROVISIONAL:
+ if (!normal_operation)
+ return vdo_log_error_strerror(VDO_REF_COUNT_INVALID,
+ "Block map block had provisional reference during replay (slab %u, offset %u)",
+ slab->slab_number, block_number);
+
+ *counter_ptr = MAXIMUM_REFERENCE_COUNT;
+ if (lock != NULL)
+ vdo_unassign_pbn_lock_provisional_reference(lock);
+ return VDO_SUCCESS;
+
+ default:
+ return vdo_log_error_strerror(VDO_REF_COUNT_INVALID,
+ "Incrementing a block map block which is already referenced %u times (slab %u, offset %u)",
+ *counter_ptr, slab->slab_number,
+ block_number);
+ }
+}
+
+static bool __must_check is_valid_journal_point(const struct journal_point *point)
+{
+ return ((point != NULL) && (point->sequence_number > 0));
+}
+
+/**
+ * update_reference_count() - Update the reference count of a block.
+ * @slab: The slab which owns the block.
+ * @block: The reference block which contains the block being updated.
+ * @block_number: The block to update.
+ * @slab_journal_point: The slab journal point at which this update is journaled.
+ * @updater: The reference updater.
+ * @normal_operation: Whether we are in normal operation vs. recovery or rebuild.
+ * @adjust_block_count: Whether to update the slab's free block count.
+ * @provisional_decrement_ptr: A pointer which will be set to true if this update was a decrement
+ * of a provisional reference.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int update_reference_count(struct vdo_slab *slab, struct reference_block *block,
+ slab_block_number block_number,
+ const struct journal_point *slab_journal_point,
+ struct reference_updater *updater,
+ bool normal_operation, bool adjust_block_count,
+ bool *provisional_decrement_ptr)
+{
+ vdo_refcount_t *counter_ptr = &slab->counters[block_number];
+ enum reference_status old_status = reference_count_to_status(*counter_ptr);
+ int result;
+
+ if (!updater->increment) {
+ result = decrement_for_data(slab, block, block_number, old_status,
+ updater, counter_ptr, adjust_block_count);
+ if ((result == VDO_SUCCESS) && (old_status == RS_PROVISIONAL)) {
+ if (provisional_decrement_ptr != NULL)
+ *provisional_decrement_ptr = true;
+ return VDO_SUCCESS;
+ }
+ } else if (updater->operation == VDO_JOURNAL_DATA_REMAPPING) {
+ result = increment_for_data(slab, block, block_number, old_status,
+ updater->lock, counter_ptr, adjust_block_count);
+ } else {
+ result = increment_for_block_map(slab, block, block_number, old_status,
+ updater->lock, normal_operation,
+ counter_ptr, adjust_block_count);
+ }
+
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (is_valid_journal_point(slab_journal_point))
+ slab->slab_journal_point = *slab_journal_point;
+
+ return VDO_SUCCESS;
+}
+
+static int __must_check adjust_reference_count(struct vdo_slab *slab,
+ struct reference_updater *updater,
+ const struct journal_point *slab_journal_point)
+{
+ slab_block_number block_number;
+ int result;
+ struct reference_block *block;
+ bool provisional_decrement = false;
+
+ if (!is_slab_open(slab))
+ return VDO_INVALID_ADMIN_STATE;
+
+ result = slab_block_number_from_pbn(slab, updater->zpbn.pbn, &block_number);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ block = get_reference_block(slab, block_number);
+ result = update_reference_count(slab, block, block_number, slab_journal_point,
+ updater, NORMAL_OPERATION, true,
+ &provisional_decrement);
+ if ((result != VDO_SUCCESS) || provisional_decrement)
+ return result;
+
+ if (block->is_dirty && (block->slab_journal_lock > 0)) {
+ sequence_number_t entry_lock = slab_journal_point->sequence_number;
+ /*
+ * This block is already dirty and a slab journal entry has been made for it since
+ * the last time it was clean. We must release the per-entry slab journal lock for
+ * the entry associated with the update we are now doing.
+ */
+ result = VDO_ASSERT(is_valid_journal_point(slab_journal_point),
+ "Reference count adjustments need slab journal points.");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ adjust_slab_journal_block_reference(&slab->journal, entry_lock, -1);
+ return VDO_SUCCESS;
+ }
+
+ /*
+ * This may be the first time we are applying an update for which there is a slab journal
+ * entry to this block since the block was cleaned. Therefore, we convert the per-entry
+ * slab journal lock to an uncommitted reference block lock, if there is a per-entry lock.
+ */
+ if (is_valid_journal_point(slab_journal_point))
+ block->slab_journal_lock = slab_journal_point->sequence_number;
+ else
+ block->slab_journal_lock = 0;
+
+ dirty_block(block);
+ return VDO_SUCCESS;
+}
+
+/**
+ * add_entry_from_waiter() - Add an entry to the slab journal.
+ * @waiter: The vio which should make an entry now.
+ * @context: The slab journal to make an entry in.
+ *
+ * This callback is invoked by add_entries() once it has determined that we are ready to make
+ * another entry in the slab journal. Implements waiter_callback_fn.
+ */
+static void add_entry_from_waiter(struct vdo_waiter *waiter, void *context)
+{
+ int result;
+ struct reference_updater *updater =
+ container_of(waiter, struct reference_updater, waiter);
+ struct data_vio *data_vio = data_vio_from_reference_updater(updater);
+ struct slab_journal *journal = context;
+ struct slab_journal_block_header *header = &journal->tail_header;
+ struct journal_point slab_journal_point = {
+ .sequence_number = header->sequence_number,
+ .entry_count = header->entry_count,
+ };
+ sequence_number_t recovery_block = data_vio->recovery_journal_point.sequence_number;
+
+ if (header->entry_count == 0) {
+ /*
+ * This is the first entry in the current tail block, so get a lock on the recovery
+ * journal which we will hold until this tail block is committed.
+ */
+ get_lock(journal, header->sequence_number)->recovery_start = recovery_block;
+ if (journal->recovery_journal != NULL) {
+ zone_count_t zone_number = journal->slab->allocator->zone_number;
+
+ vdo_acquire_recovery_journal_block_reference(journal->recovery_journal,
+ recovery_block,
+ VDO_ZONE_TYPE_PHYSICAL,
+ zone_number);
+ }
+
+ mark_slab_journal_dirty(journal, recovery_block);
+ reclaim_journal_space(journal);
+ }
+
+ add_entry(journal, updater->zpbn.pbn, updater->operation, updater->increment,
+ expand_journal_point(data_vio->recovery_journal_point,
+ updater->increment));
+
+ if (journal->slab->status != VDO_SLAB_REBUILT) {
+ /*
+ * If the slab is unrecovered, scrubbing will take care of the count since the
+ * update is now recorded in the journal.
+ */
+ adjust_slab_journal_block_reference(journal,
+ slab_journal_point.sequence_number, -1);
+ result = VDO_SUCCESS;
+ } else {
+ /* Now that an entry has been made in the slab journal, update the counter. */
+ result = adjust_reference_count(journal->slab, updater,
+ &slab_journal_point);
+ }
+
+ if (updater->increment)
+ continue_data_vio_with_error(data_vio, result);
+ else
+ vdo_continue_completion(&data_vio->decrement_completion, result);
+}
+
+/**
+ * is_next_entry_a_block_map_increment() - Check whether the next entry to be made is a block map
+ * increment.
+ * @journal: The journal.
+ *
+ * Return: true if the first entry waiter's operation is a block map increment.
+ */
+static inline bool is_next_entry_a_block_map_increment(struct slab_journal *journal)
+{
+ struct vdo_waiter *waiter = vdo_waitq_get_first_waiter(&journal->entry_waiters);
+ struct reference_updater *updater =
+ container_of(waiter, struct reference_updater, waiter);
+
+ return (updater->operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING);
+}
+
+/**
+ * add_entries() - Add as many entries as possible from the queue of vios waiting to make entries.
+ * @journal: The journal to which entries may be added.
+ *
+ * By processing the queue in order, we ensure that slab journal entries are made in the same order
+ * as recovery journal entries for the same increment or decrement.
+ */
+static void add_entries(struct slab_journal *journal)
+{
+ if (journal->adding_entries) {
+ /* Protect against re-entrancy. */
+ return;
+ }
+
+ journal->adding_entries = true;
+ while (vdo_waitq_has_waiters(&journal->entry_waiters)) {
+ struct slab_journal_block_header *header = &journal->tail_header;
+
+ if (journal->partial_write_in_progress ||
+ (journal->slab->status == VDO_SLAB_REBUILDING)) {
+ /*
+ * Don't add entries while rebuilding or while a partial write is
+ * outstanding, as it could result in reference count corruption.
+ */
+ break;
+ }
+
+ if (journal->waiting_to_commit) {
+ /*
+ * If we are waiting for resources to write the tail block, and the tail
+ * block is full, we can't make another entry.
+ */
+ WRITE_ONCE(journal->events->tail_busy_count,
+ journal->events->tail_busy_count + 1);
+ break;
+ } else if (is_next_entry_a_block_map_increment(journal) &&
+ (header->entry_count >= journal->full_entries_per_block)) {
+ /*
+ * The tail block does not have room for a block map increment, so commit
+ * it now.
+ */
+ commit_tail(journal);
+ if (journal->waiting_to_commit) {
+ WRITE_ONCE(journal->events->tail_busy_count,
+ journal->events->tail_busy_count + 1);
+ break;
+ }
+ }
+
+ /* If the slab is over the blocking threshold, make the vio wait. */
+ if (requires_reaping(journal)) {
+ WRITE_ONCE(journal->events->blocked_count,
+ journal->events->blocked_count + 1);
+ save_dirty_reference_blocks(journal->slab);
+ break;
+ }
+
+ if (header->entry_count == 0) {
+ struct journal_lock *lock =
+ get_lock(journal, header->sequence_number);
+
+ /*
+ * Check if the on disk slab journal is full. Because of the blocking and
+ * scrubbing thresholds, this should never happen.
+ */
+ if (lock->count > 0) {
+ VDO_ASSERT_LOG_ONLY((journal->head + journal->size) == journal->tail,
+ "New block has locks, but journal is not full");
+
+ /*
+ * The blocking threshold must let the journal fill up if the new
+ * block has locks; if the blocking threshold is smaller than the
+ * journal size, the new block cannot possibly have locks already.
+ */
+ VDO_ASSERT_LOG_ONLY((journal->blocking_threshold >= journal->size),
+ "New block can have locks already iff blocking threshold is at the end of the journal");
+
+ WRITE_ONCE(journal->events->disk_full_count,
+ journal->events->disk_full_count + 1);
+ save_dirty_reference_blocks(journal->slab);
+ break;
+ }
+
+ /*
+ * Don't allow the new block to be reaped until all of the reference count
+ * blocks are written and the journal block has been fully committed as
+ * well.
+ */
+ lock->count = journal->entries_per_block + 1;
+
+ if (header->sequence_number == 1) {
+ struct vdo_slab *slab = journal->slab;
+ block_count_t i;
+
+ /*
+ * This is the first entry in this slab journal, ever. Dirty all of
+ * the reference count blocks. Each will acquire a lock on the tail
+ * block so that the journal won't be reaped until the reference
+ * counts are initialized. The lock acquisition must be done by the
+ * ref_counts since here we don't know how many reference blocks
+ * the ref_counts has.
+ */
+ for (i = 0; i < slab->reference_block_count; i++) {
+ slab->reference_blocks[i].slab_journal_lock = 1;
+ dirty_block(&slab->reference_blocks[i]);
+ }
+
+ adjust_slab_journal_block_reference(journal, 1,
+ slab->reference_block_count);
+ }
+ }
+
+ vdo_waitq_notify_next_waiter(&journal->entry_waiters,
+ add_entry_from_waiter, journal);
+ }
+
+ journal->adding_entries = false;
+
+ /* If there are no waiters, and we are flushing or saving, commit the tail block. */
+ if (vdo_is_state_draining(&journal->slab->state) &&
+ !vdo_is_state_suspending(&journal->slab->state) &&
+ !vdo_waitq_has_waiters(&journal->entry_waiters))
+ commit_tail(journal);
+}
+
+/**
+ * reset_search_cursor() - Reset the free block search back to the first reference counter in the
+ * first reference block of a slab.
+ */
+static void reset_search_cursor(struct vdo_slab *slab)
+{
+ struct search_cursor *cursor = &slab->search_cursor;
+
+ cursor->block = cursor->first_block;
+ cursor->index = 0;
+ /* Unit tests have slabs with only one reference block (and it's a runt). */
+ cursor->end_index = min_t(u32, COUNTS_PER_BLOCK, slab->block_count);
+}
+
+/**
+ * advance_search_cursor() - Advance the search cursor to the start of the next reference block in
+ * a slab,
+ *
+ * Wraps around to the first reference block if the current block is the last reference block.
+ *
+ * Return: true unless the cursor was at the last reference block.
+ */
+static bool advance_search_cursor(struct vdo_slab *slab)
+{
+ struct search_cursor *cursor = &slab->search_cursor;
+
+ /*
+ * If we just finished searching the last reference block, then wrap back around to the
+ * start of the array.
+ */
+ if (cursor->block == cursor->last_block) {
+ reset_search_cursor(slab);
+ return false;
+ }
+
+ /* We're not already at the end, so advance to cursor to the next block. */
+ cursor->block++;
+ cursor->index = cursor->end_index;
+
+ if (cursor->block == cursor->last_block) {
+ /* The last reference block will usually be a runt. */
+ cursor->end_index = slab->block_count;
+ } else {
+ cursor->end_index += COUNTS_PER_BLOCK;
+ }
+
+ return true;
+}
+
+/**
+ * vdo_adjust_reference_count_for_rebuild() - Adjust the reference count of a block during rebuild.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_adjust_reference_count_for_rebuild(struct slab_depot *depot,
+ physical_block_number_t pbn,
+ enum journal_operation operation)
+{
+ int result;
+ slab_block_number block_number;
+ struct reference_block *block;
+ struct vdo_slab *slab = vdo_get_slab(depot, pbn);
+ struct reference_updater updater = {
+ .operation = operation,
+ .increment = true,
+ };
+
+ result = slab_block_number_from_pbn(slab, pbn, &block_number);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ block = get_reference_block(slab, block_number);
+ result = update_reference_count(slab, block, block_number, NULL,
+ &updater, !NORMAL_OPERATION, false, NULL);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ dirty_block(block);
+ return VDO_SUCCESS;
+}
+
+/**
+ * replay_reference_count_change() - Replay the reference count adjustment from a slab journal
+ * entry into the reference count for a block.
+ * @slab: The slab.
+ * @entry_point: The slab journal point for the entry.
+ * @entry: The slab journal entry being replayed.
+ *
+ * The adjustment will be ignored if it was already recorded in the reference count.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int replay_reference_count_change(struct vdo_slab *slab,
+ const struct journal_point *entry_point,
+ struct slab_journal_entry entry)
+{
+ int result;
+ struct reference_block *block = get_reference_block(slab, entry.sbn);
+ sector_count_t sector = (entry.sbn % COUNTS_PER_BLOCK) / COUNTS_PER_SECTOR;
+ struct reference_updater updater = {
+ .operation = entry.operation,
+ .increment = entry.increment,
+ };
+
+ if (!vdo_before_journal_point(&block->commit_points[sector], entry_point)) {
+ /* This entry is already reflected in the existing counts, so do nothing. */
+ return VDO_SUCCESS;
+ }
+
+ /* This entry is not yet counted in the reference counts. */
+ result = update_reference_count(slab, block, entry.sbn, entry_point,
+ &updater, !NORMAL_OPERATION, false, NULL);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ dirty_block(block);
+ return VDO_SUCCESS;
+}
+
+/**
+ * find_zero_byte_in_word() - Find the array index of the first zero byte in word-sized range of
+ * reference counters.
+ * @word_ptr: A pointer to the eight counter bytes to check.
+ * @start_index: The array index corresponding to word_ptr[0].
+ * @fail_index: The array index to return if no zero byte is found.
+ *
+ * The search does no bounds checking; the function relies on the array being sufficiently padded.
+ *
+ * Return: The array index of the first zero byte in the word, or the value passed as fail_index if
+ * no zero byte was found.
+ */
+static inline slab_block_number find_zero_byte_in_word(const u8 *word_ptr,
+ slab_block_number start_index,
+ slab_block_number fail_index)
+{
+ u64 word = get_unaligned_le64(word_ptr);
+
+ /* This looks like a loop, but GCC will unroll the eight iterations for us. */
+ unsigned int offset;
+
+ for (offset = 0; offset < BYTES_PER_WORD; offset++) {
+ /* Assumes little-endian byte order, which we have on X86. */
+ if ((word & 0xFF) == 0)
+ return (start_index + offset);
+ word >>= 8;
+ }
+
+ return fail_index;
+}
+
+/**
+ * find_free_block() - Find the first block with a reference count of zero in the specified
+ * range of reference counter indexes.
+ * @slab: The slab counters to scan.
+ * @index_ptr: A pointer to hold the array index of the free block.
+ *
+ * Exposed for unit testing.
+ *
+ * Return: true if a free block was found in the specified range.
+ */
+static bool find_free_block(const struct vdo_slab *slab, slab_block_number *index_ptr)
+{
+ slab_block_number zero_index;
+ slab_block_number next_index = slab->search_cursor.index;
+ slab_block_number end_index = slab->search_cursor.end_index;
+ u8 *next_counter = &slab->counters[next_index];
+ u8 *end_counter = &slab->counters[end_index];
+
+ /*
+ * Search every byte of the first unaligned word. (Array is padded so reading past end is
+ * safe.)
+ */
+ zero_index = find_zero_byte_in_word(next_counter, next_index, end_index);
+ if (zero_index < end_index) {
+ *index_ptr = zero_index;
+ return true;
+ }
+
+ /*
+ * On architectures where unaligned word access is expensive, this would be a good place to
+ * advance to an alignment boundary.
+ */
+ next_index += BYTES_PER_WORD;
+ next_counter += BYTES_PER_WORD;
+
+ /*
+ * Now we're word-aligned; check an word at a time until we find a word containing a zero.
+ * (Array is padded so reading past end is safe.)
+ */
+ while (next_counter < end_counter) {
+ /*
+ * The following code is currently an exact copy of the code preceding the loop,
+ * but if you try to merge them by using a do loop, it runs slower because a jump
+ * instruction gets added at the start of the iteration.
+ */
+ zero_index = find_zero_byte_in_word(next_counter, next_index, end_index);
+ if (zero_index < end_index) {
+ *index_ptr = zero_index;
+ return true;
+ }
+
+ next_index += BYTES_PER_WORD;
+ next_counter += BYTES_PER_WORD;
+ }
+
+ return false;
+}
+
+/**
+ * search_current_reference_block() - Search the reference block currently saved in the search
+ * cursor for a reference count of zero, starting at the saved
+ * counter index.
+ * @slab: The slab to search.
+ * @free_index_ptr: A pointer to receive the array index of the zero reference count.
+ *
+ * Return: true if an unreferenced counter was found.
+ */
+static bool search_current_reference_block(const struct vdo_slab *slab,
+ slab_block_number *free_index_ptr)
+{
+ /* Don't bother searching if the current block is known to be full. */
+ return ((slab->search_cursor.block->allocated_count < COUNTS_PER_BLOCK) &&
+ find_free_block(slab, free_index_ptr));
+}
+
+/**
+ * search_reference_blocks() - Search each reference block for a reference count of zero.
+ * @slab: The slab to search.
+ * @free_index_ptr: A pointer to receive the array index of the zero reference count.
+ *
+ * Searches each reference block for a reference count of zero, starting at the reference block and
+ * counter index saved in the search cursor and searching up to the end of the last reference
+ * block. The search does not wrap.
+ *
+ * Return: true if an unreferenced counter was found.
+ */
+static bool search_reference_blocks(struct vdo_slab *slab,
+ slab_block_number *free_index_ptr)
+{
+ /* Start searching at the saved search position in the current block. */
+ if (search_current_reference_block(slab, free_index_ptr))
+ return true;
+
+ /* Search each reference block up to the end of the slab. */
+ while (advance_search_cursor(slab)) {
+ if (search_current_reference_block(slab, free_index_ptr))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * make_provisional_reference() - Do the bookkeeping for making a provisional reference.
+ */
+static void make_provisional_reference(struct vdo_slab *slab,
+ slab_block_number block_number)
+{
+ struct reference_block *block = get_reference_block(slab, block_number);
+
+ /*
+ * Make the initial transition from an unreferenced block to a
+ * provisionally allocated block.
+ */
+ slab->counters[block_number] = PROVISIONAL_REFERENCE_COUNT;
+
+ /* Account for the allocation. */
+ block->allocated_count++;
+ slab->free_blocks--;
+}
+
+/**
+ * dirty_all_reference_blocks() - Mark all reference count blocks in a slab as dirty.
+ */
+static void dirty_all_reference_blocks(struct vdo_slab *slab)
+{
+ block_count_t i;
+
+ for (i = 0; i < slab->reference_block_count; i++)
+ dirty_block(&slab->reference_blocks[i]);
+}
+
+/**
+ * clear_provisional_references() - Clear the provisional reference counts from a reference block.
+ * @block: The block to clear.
+ */
+static void clear_provisional_references(struct reference_block *block)
+{
+ vdo_refcount_t *counters = get_reference_counters_for_block(block);
+ block_count_t j;
+
+ for (j = 0; j < COUNTS_PER_BLOCK; j++) {
+ if (counters[j] == PROVISIONAL_REFERENCE_COUNT) {
+ counters[j] = EMPTY_REFERENCE_COUNT;
+ block->allocated_count--;
+ }
+ }
+}
+
+static inline bool journal_points_equal(struct journal_point first,
+ struct journal_point second)
+{
+ return ((first.sequence_number == second.sequence_number) &&
+ (first.entry_count == second.entry_count));
+}
+
+/**
+ * unpack_reference_block() - Unpack reference counts blocks into the internal memory structure.
+ * @packed: The written reference block to be unpacked.
+ * @block: The internal reference block to be loaded.
+ */
+static void unpack_reference_block(struct packed_reference_block *packed,
+ struct reference_block *block)
+{
+ block_count_t index;
+ sector_count_t i;
+ struct vdo_slab *slab = block->slab;
+ vdo_refcount_t *counters = get_reference_counters_for_block(block);
+
+ for (i = 0; i < VDO_SECTORS_PER_BLOCK; i++) {
+ struct packed_reference_sector *sector = &packed->sectors[i];
+
+ vdo_unpack_journal_point(&sector->commit_point, &block->commit_points[i]);
+ memcpy(counters + (i * COUNTS_PER_SECTOR), sector->counts,
+ (sizeof(vdo_refcount_t) * COUNTS_PER_SECTOR));
+ /* The slab_journal_point must be the latest point found in any sector. */
+ if (vdo_before_journal_point(&slab->slab_journal_point,
+ &block->commit_points[i]))
+ slab->slab_journal_point = block->commit_points[i];
+
+ if ((i > 0) &&
+ !journal_points_equal(block->commit_points[0],
+ block->commit_points[i])) {
+ size_t block_index = block - block->slab->reference_blocks;
+
+ vdo_log_warning("Torn write detected in sector %u of reference block %zu of slab %u",
+ i, block_index, block->slab->slab_number);
+ }
+ }
+
+ block->allocated_count = 0;
+ for (index = 0; index < COUNTS_PER_BLOCK; index++) {
+ if (counters[index] != EMPTY_REFERENCE_COUNT)
+ block->allocated_count++;
+ }
+}
+
+/**
+ * finish_reference_block_load() - After a reference block has been read, unpack it.
+ * @completion: The VIO that just finished reading.
+ */
+static void finish_reference_block_load(struct vdo_completion *completion)
+{
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = vio_as_pooled_vio(vio);
+ struct reference_block *block = completion->parent;
+ struct vdo_slab *slab = block->slab;
+
+ unpack_reference_block((struct packed_reference_block *) vio->data, block);
+ return_vio_to_pool(slab->allocator->vio_pool, pooled);
+ slab->active_count--;
+ clear_provisional_references(block);
+
+ slab->free_blocks -= block->allocated_count;
+ check_if_slab_drained(slab);
+}
+
+static void load_reference_block_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct reference_block *block = vio->completion.parent;
+
+ continue_vio_after_io(vio, finish_reference_block_load,
+ block->slab->allocator->thread_id);
+}
+
+/**
+ * load_reference_block() - After a block waiter has gotten a VIO from the VIO pool, load the
+ * block.
+ * @waiter: The waiter of the block to load.
+ * @context: The VIO returned by the pool.
+ */
+static void load_reference_block(struct vdo_waiter *waiter, void *context)
+{
+ struct pooled_vio *pooled = context;
+ struct vio *vio = &pooled->vio;
+ struct reference_block *block =
+ container_of(waiter, struct reference_block, waiter);
+ size_t block_offset = (block - block->slab->reference_blocks);
+
+ vio->completion.parent = block;
+ vdo_submit_metadata_vio(vio, block->slab->ref_counts_origin + block_offset,
+ load_reference_block_endio, handle_io_error,
+ REQ_OP_READ);
+}
+
+/**
+ * load_reference_blocks() - Load a slab's reference blocks from the underlying storage into a
+ * pre-allocated reference counter.
+ */
+static void load_reference_blocks(struct vdo_slab *slab)
+{
+ block_count_t i;
+
+ slab->free_blocks = slab->block_count;
+ slab->active_count = slab->reference_block_count;
+ for (i = 0; i < slab->reference_block_count; i++) {
+ struct vdo_waiter *waiter = &slab->reference_blocks[i].waiter;
+
+ waiter->callback = load_reference_block;
+ acquire_vio_from_pool(slab->allocator->vio_pool, waiter);
+ }
+}
+
+/**
+ * drain_slab() - Drain all reference count I/O.
+ *
+ * Depending upon the type of drain being performed (as recorded in the ref_count's vdo_slab), the
+ * reference blocks may be loaded from disk or dirty reference blocks may be written out.
+ */
+static void drain_slab(struct vdo_slab *slab)
+{
+ bool save;
+ bool load;
+ const struct admin_state_code *state = vdo_get_admin_state_code(&slab->state);
+
+ if (state == VDO_ADMIN_STATE_SUSPENDING)
+ return;
+
+ if ((state != VDO_ADMIN_STATE_REBUILDING) &&
+ (state != VDO_ADMIN_STATE_SAVE_FOR_SCRUBBING))
+ commit_tail(&slab->journal);
+
+ if ((state == VDO_ADMIN_STATE_RECOVERING) || (slab->counters == NULL))
+ return;
+
+ save = false;
+ load = slab->allocator->summary_entries[slab->slab_number].load_ref_counts;
+ if (state == VDO_ADMIN_STATE_SCRUBBING) {
+ if (load) {
+ load_reference_blocks(slab);
+ return;
+ }
+ } else if (state == VDO_ADMIN_STATE_SAVE_FOR_SCRUBBING) {
+ if (!load) {
+ /* These reference counts were never written, so mark them all dirty. */
+ dirty_all_reference_blocks(slab);
+ }
+ save = true;
+ } else if (state == VDO_ADMIN_STATE_REBUILDING) {
+ /*
+ * Write out the counters if the slab has written them before, or it has any
+ * non-zero reference counts, or there are any slab journal blocks.
+ */
+ block_count_t data_blocks = slab->allocator->depot->slab_config.data_blocks;
+
+ if (load || (slab->free_blocks != data_blocks) ||
+ !is_slab_journal_blank(slab)) {
+ dirty_all_reference_blocks(slab);
+ save = true;
+ }
+ } else if (state == VDO_ADMIN_STATE_SAVING) {
+ save = (slab->status == VDO_SLAB_REBUILT);
+ } else {
+ vdo_finish_draining_with_result(&slab->state, VDO_SUCCESS);
+ return;
+ }
+
+ if (save)
+ save_dirty_reference_blocks(slab);
+}
+
+static int allocate_slab_counters(struct vdo_slab *slab)
+{
+ int result;
+ size_t index, bytes;
+
+ result = VDO_ASSERT(slab->reference_blocks == NULL,
+ "vdo_slab %u doesn't allocate refcounts twice",
+ slab->slab_number);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(slab->reference_block_count, struct reference_block,
+ __func__, &slab->reference_blocks);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /*
+ * Allocate such that the runt slab has a full-length memory array, plus a little padding
+ * so we can word-search even at the very end.
+ */
+ bytes = (slab->reference_block_count * COUNTS_PER_BLOCK) + (2 * BYTES_PER_WORD);
+ result = vdo_allocate(bytes, vdo_refcount_t, "ref counts array",
+ &slab->counters);
+ if (result != VDO_SUCCESS) {
+ vdo_free(vdo_forget(slab->reference_blocks));
+ return result;
+ }
+
+ slab->search_cursor.first_block = slab->reference_blocks;
+ slab->search_cursor.last_block = &slab->reference_blocks[slab->reference_block_count - 1];
+ reset_search_cursor(slab);
+
+ for (index = 0; index < slab->reference_block_count; index++) {
+ slab->reference_blocks[index] = (struct reference_block) {
+ .slab = slab,
+ };
+ }
+
+ return VDO_SUCCESS;
+}
+
+static int allocate_counters_if_clean(struct vdo_slab *slab)
+{
+ if (vdo_is_state_clean_load(&slab->state))
+ return allocate_slab_counters(slab);
+
+ return VDO_SUCCESS;
+}
+
+static void finish_loading_journal(struct vdo_completion *completion)
+{
+ struct vio *vio = as_vio(completion);
+ struct slab_journal *journal = completion->parent;
+ struct vdo_slab *slab = journal->slab;
+ struct packed_slab_journal_block *block = (struct packed_slab_journal_block *) vio->data;
+ struct slab_journal_block_header header;
+
+ vdo_unpack_slab_journal_block_header(&block->header, &header);
+
+ /* FIXME: should it be an error if the following conditional fails? */
+ if ((header.metadata_type == VDO_METADATA_SLAB_JOURNAL) &&
+ (header.nonce == slab->allocator->nonce)) {
+ journal->tail = header.sequence_number + 1;
+
+ /*
+ * If the slab is clean, this implies the slab journal is empty, so advance the
+ * head appropriately.
+ */
+ journal->head = (slab->allocator->summary_entries[slab->slab_number].is_dirty ?
+ header.head : journal->tail);
+ journal->tail_header = header;
+ initialize_journal_state(journal);
+ }
+
+ return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio));
+ vdo_finish_loading_with_result(&slab->state, allocate_counters_if_clean(slab));
+}
+
+static void read_slab_journal_tail_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct slab_journal *journal = vio->completion.parent;
+
+ continue_vio_after_io(vio, finish_loading_journal,
+ journal->slab->allocator->thread_id);
+}
+
+static void handle_load_error(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct slab_journal *journal = completion->parent;
+ struct vio *vio = as_vio(completion);
+
+ vio_record_metadata_io_error(vio);
+ return_vio_to_pool(journal->slab->allocator->vio_pool, vio_as_pooled_vio(vio));
+ vdo_finish_loading_with_result(&journal->slab->state, result);
+}
+
+/**
+ * read_slab_journal_tail() - Read the slab journal tail block by using a vio acquired from the vio
+ * pool.
+ * @waiter: The vio pool waiter which has just been notified.
+ * @context: The vio pool entry given to the waiter.
+ *
+ * This is the success callback from acquire_vio_from_pool() when loading a slab journal.
+ */
+static void read_slab_journal_tail(struct vdo_waiter *waiter, void *context)
+{
+ struct slab_journal *journal =
+ container_of(waiter, struct slab_journal, resource_waiter);
+ struct vdo_slab *slab = journal->slab;
+ struct pooled_vio *pooled = context;
+ struct vio *vio = &pooled->vio;
+ tail_block_offset_t last_commit_point =
+ slab->allocator->summary_entries[slab->slab_number].tail_block_offset;
+
+ /*
+ * Slab summary keeps the commit point offset, so the tail block is the block before that.
+ * Calculation supports small journals in unit tests.
+ */
+ tail_block_offset_t tail_block = ((last_commit_point == 0) ?
+ (tail_block_offset_t)(journal->size - 1) :
+ (last_commit_point - 1));
+
+ vio->completion.parent = journal;
+ vio->completion.callback_thread_id = slab->allocator->thread_id;
+ vdo_submit_metadata_vio(vio, slab->journal_origin + tail_block,
+ read_slab_journal_tail_endio, handle_load_error,
+ REQ_OP_READ);
+}
+
+/**
+ * load_slab_journal() - Load a slab's journal by reading the journal's tail.
+ */
+static void load_slab_journal(struct vdo_slab *slab)
+{
+ struct slab_journal *journal = &slab->journal;
+ tail_block_offset_t last_commit_point;
+
+ last_commit_point = slab->allocator->summary_entries[slab->slab_number].tail_block_offset;
+ if ((last_commit_point == 0) &&
+ !slab->allocator->summary_entries[slab->slab_number].load_ref_counts) {
+ /*
+ * This slab claims that it has a tail block at (journal->size - 1), but a head of
+ * 1. This is impossible, due to the scrubbing threshold, on a real system, so
+ * don't bother reading the (bogus) data off disk.
+ */
+ VDO_ASSERT_LOG_ONLY(((journal->size < 16) ||
+ (journal->scrubbing_threshold < (journal->size - 1))),
+ "Scrubbing threshold protects against reads of unwritten slab journal blocks");
+ vdo_finish_loading_with_result(&slab->state,
+ allocate_counters_if_clean(slab));
+ return;
+ }
+
+ journal->resource_waiter.callback = read_slab_journal_tail;
+ acquire_vio_from_pool(slab->allocator->vio_pool, &journal->resource_waiter);
+}
+
+static void register_slab_for_scrubbing(struct vdo_slab *slab, bool high_priority)
+{
+ struct slab_scrubber *scrubber = &slab->allocator->scrubber;
+
+ VDO_ASSERT_LOG_ONLY((slab->status != VDO_SLAB_REBUILT),
+ "slab to be scrubbed is unrecovered");
+
+ if (slab->status != VDO_SLAB_REQUIRES_SCRUBBING)
+ return;
+
+ list_del_init(&slab->allocq_entry);
+ if (!slab->was_queued_for_scrubbing) {
+ WRITE_ONCE(scrubber->slab_count, scrubber->slab_count + 1);
+ slab->was_queued_for_scrubbing = true;
+ }
+
+ if (high_priority) {
+ slab->status = VDO_SLAB_REQUIRES_HIGH_PRIORITY_SCRUBBING;
+ list_add_tail(&slab->allocq_entry, &scrubber->high_priority_slabs);
+ return;
+ }
+
+ list_add_tail(&slab->allocq_entry, &scrubber->slabs);
+}
+
+/* Queue a slab for allocation or scrubbing. */
+static void queue_slab(struct vdo_slab *slab)
+{
+ struct block_allocator *allocator = slab->allocator;
+ block_count_t free_blocks;
+ int result;
+
+ VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
+ "a requeued slab must not already be on a ring");
+
+ if (vdo_is_read_only(allocator->depot->vdo))
+ return;
+
+ free_blocks = slab->free_blocks;
+ result = VDO_ASSERT((free_blocks <= allocator->depot->slab_config.data_blocks),
+ "rebuilt slab %u must have a valid free block count (has %llu, expected maximum %llu)",
+ slab->slab_number, (unsigned long long) free_blocks,
+ (unsigned long long) allocator->depot->slab_config.data_blocks);
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(allocator->depot->vdo, result);
+ return;
+ }
+
+ if (slab->status != VDO_SLAB_REBUILT) {
+ register_slab_for_scrubbing(slab, false);
+ return;
+ }
+
+ if (!vdo_is_state_resuming(&slab->state)) {
+ /*
+ * If the slab is resuming, we've already accounted for it here, so don't do it
+ * again.
+ * FIXME: under what situation would the slab be resuming here?
+ */
+ WRITE_ONCE(allocator->allocated_blocks,
+ allocator->allocated_blocks - free_blocks);
+ if (!is_slab_journal_blank(slab)) {
+ WRITE_ONCE(allocator->statistics.slabs_opened,
+ allocator->statistics.slabs_opened + 1);
+ }
+ }
+
+ if (allocator->depot->vdo->suspend_type == VDO_ADMIN_STATE_SAVING)
+ reopen_slab_journal(slab);
+
+ prioritize_slab(slab);
+}
+
+/**
+ * initiate_slab_action() - Initiate a slab action.
+ *
+ * Implements vdo_admin_initiator_fn.
+ */
+static void initiate_slab_action(struct admin_state *state)
+{
+ struct vdo_slab *slab = container_of(state, struct vdo_slab, state);
+
+ if (vdo_is_state_draining(state)) {
+ const struct admin_state_code *operation = vdo_get_admin_state_code(state);
+
+ if (operation == VDO_ADMIN_STATE_SCRUBBING)
+ slab->status = VDO_SLAB_REBUILDING;
+
+ drain_slab(slab);
+ check_if_slab_drained(slab);
+ return;
+ }
+
+ if (vdo_is_state_loading(state)) {
+ load_slab_journal(slab);
+ return;
+ }
+
+ if (vdo_is_state_resuming(state)) {
+ queue_slab(slab);
+ vdo_finish_resuming(state);
+ return;
+ }
+
+ vdo_finish_operation(state, VDO_INVALID_ADMIN_STATE);
+}
+
+/**
+ * get_next_slab() - Get the next slab to scrub.
+ * @scrubber: The slab scrubber.
+ *
+ * Return: The next slab to scrub or NULL if there are none.
+ */
+static struct vdo_slab *get_next_slab(struct slab_scrubber *scrubber)
+{
+ struct vdo_slab *slab;
+
+ slab = list_first_entry_or_null(&scrubber->high_priority_slabs,
+ struct vdo_slab, allocq_entry);
+ if (slab != NULL)
+ return slab;
+
+ return list_first_entry_or_null(&scrubber->slabs, struct vdo_slab,
+ allocq_entry);
+}
+
+/**
+ * has_slabs_to_scrub() - Check whether a scrubber has slabs to scrub.
+ * @scrubber: The scrubber to check.
+ *
+ * Return: true if the scrubber has slabs to scrub.
+ */
+static inline bool __must_check has_slabs_to_scrub(struct slab_scrubber *scrubber)
+{
+ return (get_next_slab(scrubber) != NULL);
+}
+
+/**
+ * uninitialize_scrubber_vio() - Clean up the slab_scrubber's vio.
+ * @scrubber: The scrubber.
+ */
+static void uninitialize_scrubber_vio(struct slab_scrubber *scrubber)
+{
+ vdo_free(vdo_forget(scrubber->vio.data));
+ free_vio_components(&scrubber->vio);
+}
+
+/**
+ * finish_scrubbing() - Stop scrubbing, either because there are no more slabs to scrub or because
+ * there's been an error.
+ * @scrubber: The scrubber.
+ */
+static void finish_scrubbing(struct slab_scrubber *scrubber, int result)
+{
+ bool notify = vdo_waitq_has_waiters(&scrubber->waiters);
+ bool done = !has_slabs_to_scrub(scrubber);
+ struct block_allocator *allocator =
+ container_of(scrubber, struct block_allocator, scrubber);
+
+ if (done)
+ uninitialize_scrubber_vio(scrubber);
+
+ if (scrubber->high_priority_only) {
+ scrubber->high_priority_only = false;
+ vdo_fail_completion(vdo_forget(scrubber->vio.completion.parent), result);
+ } else if (done && (atomic_add_return(-1, &allocator->depot->zones_to_scrub) == 0)) {
+ /* All of our slabs were scrubbed, and we're the last allocator to finish. */
+ enum vdo_state prior_state =
+ atomic_cmpxchg(&allocator->depot->vdo->state, VDO_RECOVERING,
+ VDO_DIRTY);
+
+ /*
+ * To be safe, even if the CAS failed, ensure anything that follows is ordered with
+ * respect to whatever state change did happen.
+ */
+ smp_mb__after_atomic();
+
+ /*
+ * We must check the VDO state here and not the depot's read_only_notifier since
+ * the compare-swap-above could have failed due to a read-only entry which our own
+ * thread does not yet know about.
+ */
+ if (prior_state == VDO_DIRTY)
+ vdo_log_info("VDO commencing normal operation");
+ else if (prior_state == VDO_RECOVERING)
+ vdo_log_info("Exiting recovery mode");
+ }
+
+ /*
+ * Note that the scrubber has stopped, and inform anyone who might be waiting for that to
+ * happen.
+ */
+ if (!vdo_finish_draining(&scrubber->admin_state))
+ WRITE_ONCE(scrubber->admin_state.current_state,
+ VDO_ADMIN_STATE_SUSPENDED);
+
+ /*
+ * We can't notify waiters until after we've finished draining or they'll just requeue.
+ * Fortunately if there were waiters, we can't have been freed yet.
+ */
+ if (notify)
+ vdo_waitq_notify_all_waiters(&scrubber->waiters, NULL, NULL);
+}
+
+static void scrub_next_slab(struct slab_scrubber *scrubber);
+
+/**
+ * slab_scrubbed() - Notify the scrubber that a slab has been scrubbed.
+ * @completion: The slab rebuild completion.
+ *
+ * This callback is registered in apply_journal_entries().
+ */
+static void slab_scrubbed(struct vdo_completion *completion)
+{
+ struct slab_scrubber *scrubber =
+ container_of(as_vio(completion), struct slab_scrubber, vio);
+ struct vdo_slab *slab = scrubber->slab;
+
+ slab->status = VDO_SLAB_REBUILT;
+ queue_slab(slab);
+ reopen_slab_journal(slab);
+ WRITE_ONCE(scrubber->slab_count, scrubber->slab_count - 1);
+ scrub_next_slab(scrubber);
+}
+
+/**
+ * abort_scrubbing() - Abort scrubbing due to an error.
+ * @scrubber: The slab scrubber.
+ * @result: The error.
+ */
+static void abort_scrubbing(struct slab_scrubber *scrubber, int result)
+{
+ vdo_enter_read_only_mode(scrubber->vio.completion.vdo, result);
+ finish_scrubbing(scrubber, result);
+}
+
+/**
+ * handle_scrubber_error() - Handle errors while rebuilding a slab.
+ * @completion: The slab rebuild completion.
+ */
+static void handle_scrubber_error(struct vdo_completion *completion)
+{
+ struct vio *vio = as_vio(completion);
+
+ vio_record_metadata_io_error(vio);
+ abort_scrubbing(container_of(vio, struct slab_scrubber, vio),
+ completion->result);
+}
+
+/**
+ * apply_block_entries() - Apply all the entries in a block to the reference counts.
+ * @block: A block with entries to apply.
+ * @entry_count: The number of entries to apply.
+ * @block_number: The sequence number of the block.
+ * @slab: The slab to apply the entries to.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int apply_block_entries(struct packed_slab_journal_block *block,
+ journal_entry_count_t entry_count,
+ sequence_number_t block_number, struct vdo_slab *slab)
+{
+ struct journal_point entry_point = {
+ .sequence_number = block_number,
+ .entry_count = 0,
+ };
+ int result;
+ slab_block_number max_sbn = slab->end - slab->start;
+
+ while (entry_point.entry_count < entry_count) {
+ struct slab_journal_entry entry =
+ vdo_decode_slab_journal_entry(block, entry_point.entry_count);
+
+ if (entry.sbn > max_sbn) {
+ /* This entry is out of bounds. */
+ return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
+ "vdo_slab journal entry (%llu, %u) had invalid offset %u in slab (size %u blocks)",
+ (unsigned long long) block_number,
+ entry_point.entry_count,
+ entry.sbn, max_sbn);
+ }
+
+ result = replay_reference_count_change(slab, &entry_point, entry);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error_strerror(result,
+ "vdo_slab journal entry (%llu, %u) (%s of offset %u) could not be applied in slab %u",
+ (unsigned long long) block_number,
+ entry_point.entry_count,
+ vdo_get_journal_operation_name(entry.operation),
+ entry.sbn, slab->slab_number);
+ return result;
+ }
+ entry_point.entry_count++;
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * apply_journal_entries() - Find the relevant vio of the slab journal and apply all valid entries.
+ * @completion: The metadata read vio completion.
+ *
+ * This is a callback registered in start_scrubbing().
+ */
+static void apply_journal_entries(struct vdo_completion *completion)
+{
+ int result;
+ struct slab_scrubber *scrubber =
+ container_of(as_vio(completion), struct slab_scrubber, vio);
+ struct vdo_slab *slab = scrubber->slab;
+ struct slab_journal *journal = &slab->journal;
+
+ /* Find the boundaries of the useful part of the journal. */
+ sequence_number_t tail = journal->tail;
+ tail_block_offset_t end_index = (tail - 1) % journal->size;
+ char *end_data = scrubber->vio.data + (end_index * VDO_BLOCK_SIZE);
+ struct packed_slab_journal_block *end_block =
+ (struct packed_slab_journal_block *) end_data;
+
+ sequence_number_t head = __le64_to_cpu(end_block->header.head);
+ tail_block_offset_t head_index = head % journal->size;
+ block_count_t index = head_index;
+
+ struct journal_point ref_counts_point = slab->slab_journal_point;
+ struct journal_point last_entry_applied = ref_counts_point;
+ sequence_number_t sequence;
+
+ for (sequence = head; sequence < tail; sequence++) {
+ char *block_data = scrubber->vio.data + (index * VDO_BLOCK_SIZE);
+ struct packed_slab_journal_block *block =
+ (struct packed_slab_journal_block *) block_data;
+ struct slab_journal_block_header header;
+
+ vdo_unpack_slab_journal_block_header(&block->header, &header);
+
+ if ((header.nonce != slab->allocator->nonce) ||
+ (header.metadata_type != VDO_METADATA_SLAB_JOURNAL) ||
+ (header.sequence_number != sequence) ||
+ (header.entry_count > journal->entries_per_block) ||
+ (header.has_block_map_increments &&
+ (header.entry_count > journal->full_entries_per_block))) {
+ /* The block is not what we expect it to be. */
+ vdo_log_error("vdo_slab journal block for slab %u was invalid",
+ slab->slab_number);
+ abort_scrubbing(scrubber, VDO_CORRUPT_JOURNAL);
+ return;
+ }
+
+ result = apply_block_entries(block, header.entry_count, sequence, slab);
+ if (result != VDO_SUCCESS) {
+ abort_scrubbing(scrubber, result);
+ return;
+ }
+
+ last_entry_applied.sequence_number = sequence;
+ last_entry_applied.entry_count = header.entry_count - 1;
+ index++;
+ if (index == journal->size)
+ index = 0;
+ }
+
+ /*
+ * At the end of rebuild, the reference counters should be accurate to the end of the
+ * journal we just applied.
+ */
+ result = VDO_ASSERT(!vdo_before_journal_point(&last_entry_applied,
+ &ref_counts_point),
+ "Refcounts are not more accurate than the slab journal");
+ if (result != VDO_SUCCESS) {
+ abort_scrubbing(scrubber, result);
+ return;
+ }
+
+ /* Save out the rebuilt reference blocks. */
+ vdo_prepare_completion(completion, slab_scrubbed, handle_scrubber_error,
+ slab->allocator->thread_id, completion->parent);
+ vdo_start_operation_with_waiter(&slab->state,
+ VDO_ADMIN_STATE_SAVE_FOR_SCRUBBING,
+ completion, initiate_slab_action);
+}
+
+static void read_slab_journal_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct slab_scrubber *scrubber = container_of(vio, struct slab_scrubber, vio);
+
+ continue_vio_after_io(bio->bi_private, apply_journal_entries,
+ scrubber->slab->allocator->thread_id);
+}
+
+/**
+ * start_scrubbing() - Read the current slab's journal from disk now that it has been flushed.
+ * @completion: The scrubber's vio completion.
+ *
+ * This callback is registered in scrub_next_slab().
+ */
+static void start_scrubbing(struct vdo_completion *completion)
+{
+ struct slab_scrubber *scrubber =
+ container_of(as_vio(completion), struct slab_scrubber, vio);
+ struct vdo_slab *slab = scrubber->slab;
+
+ if (!slab->allocator->summary_entries[slab->slab_number].is_dirty) {
+ slab_scrubbed(completion);
+ return;
+ }
+
+ vdo_submit_metadata_vio(&scrubber->vio, slab->journal_origin,
+ read_slab_journal_endio, handle_scrubber_error,
+ REQ_OP_READ);
+}
+
+/**
+ * scrub_next_slab() - Scrub the next slab if there is one.
+ * @scrubber: The scrubber.
+ */
+static void scrub_next_slab(struct slab_scrubber *scrubber)
+{
+ struct vdo_completion *completion = &scrubber->vio.completion;
+ struct vdo_slab *slab;
+
+ /*
+ * Note: this notify call is always safe only because scrubbing can only be started when
+ * the VDO is quiescent.
+ */
+ vdo_waitq_notify_all_waiters(&scrubber->waiters, NULL, NULL);
+
+ if (vdo_is_read_only(completion->vdo)) {
+ finish_scrubbing(scrubber, VDO_READ_ONLY);
+ return;
+ }
+
+ slab = get_next_slab(scrubber);
+ if ((slab == NULL) ||
+ (scrubber->high_priority_only && list_empty(&scrubber->high_priority_slabs))) {
+ finish_scrubbing(scrubber, VDO_SUCCESS);
+ return;
+ }
+
+ if (vdo_finish_draining(&scrubber->admin_state))
+ return;
+
+ list_del_init(&slab->allocq_entry);
+ scrubber->slab = slab;
+ vdo_prepare_completion(completion, start_scrubbing, handle_scrubber_error,
+ slab->allocator->thread_id, completion->parent);
+ vdo_start_operation_with_waiter(&slab->state, VDO_ADMIN_STATE_SCRUBBING,
+ completion, initiate_slab_action);
+}
+
+/**
+ * scrub_slabs() - Scrub all of an allocator's slabs that are eligible for scrubbing.
+ * @allocator: The block_allocator to scrub.
+ * @parent: The completion to notify when scrubbing is done, implies high_priority, may be NULL.
+ */
+static void scrub_slabs(struct block_allocator *allocator, struct vdo_completion *parent)
+{
+ struct slab_scrubber *scrubber = &allocator->scrubber;
+
+ scrubber->vio.completion.parent = parent;
+ scrubber->high_priority_only = (parent != NULL);
+ if (!has_slabs_to_scrub(scrubber)) {
+ finish_scrubbing(scrubber, VDO_SUCCESS);
+ return;
+ }
+
+ if (scrubber->high_priority_only &&
+ vdo_is_priority_table_empty(allocator->prioritized_slabs) &&
+ list_empty(&scrubber->high_priority_slabs))
+ register_slab_for_scrubbing(get_next_slab(scrubber), true);
+
+ vdo_resume_if_quiescent(&scrubber->admin_state);
+ scrub_next_slab(scrubber);
+}
+
+static inline void assert_on_allocator_thread(thread_id_t thread_id,
+ const char *function_name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == thread_id),
+ "%s called on correct thread", function_name);
+}
+
+static void register_slab_with_allocator(struct block_allocator *allocator,
+ struct vdo_slab *slab)
+{
+ allocator->slab_count++;
+ allocator->last_slab = slab->slab_number;
+}
+
+/**
+ * get_depot_slab_iterator() - Return a slab_iterator over the slabs in a slab_depot.
+ * @depot: The depot over which to iterate.
+ * @start: The number of the slab to start iterating from.
+ * @end: The number of the last slab which may be returned.
+ * @stride: The difference in slab number between successive slabs.
+ *
+ * Iteration always occurs from higher to lower numbered slabs.
+ *
+ * Return: An initialized iterator structure.
+ */
+static struct slab_iterator get_depot_slab_iterator(struct slab_depot *depot,
+ slab_count_t start, slab_count_t end,
+ slab_count_t stride)
+{
+ struct vdo_slab **slabs = depot->slabs;
+
+ return (struct slab_iterator) {
+ .slabs = slabs,
+ .next = (((slabs == NULL) || (start < end)) ? NULL : slabs[start]),
+ .end = end,
+ .stride = stride,
+ };
+}
+
+static struct slab_iterator get_slab_iterator(const struct block_allocator *allocator)
+{
+ return get_depot_slab_iterator(allocator->depot, allocator->last_slab,
+ allocator->zone_number,
+ allocator->depot->zone_count);
+}
+
+/**
+ * next_slab() - Get the next slab from a slab_iterator and advance the iterator
+ * @iterator: The slab_iterator.
+ *
+ * Return: The next slab or NULL if the iterator is exhausted.
+ */
+static struct vdo_slab *next_slab(struct slab_iterator *iterator)
+{
+ struct vdo_slab *slab = iterator->next;
+
+ if ((slab == NULL) || (slab->slab_number < iterator->end + iterator->stride))
+ iterator->next = NULL;
+ else
+ iterator->next = iterator->slabs[slab->slab_number - iterator->stride];
+
+ return slab;
+}
+
+/**
+ * abort_waiter() - Abort vios waiting to make journal entries when read-only.
+ *
+ * This callback is invoked on all vios waiting to make slab journal entries after the VDO has gone
+ * into read-only mode. Implements waiter_callback_fn.
+ */
+static void abort_waiter(struct vdo_waiter *waiter, void *context __always_unused)
+{
+ struct reference_updater *updater =
+ container_of(waiter, struct reference_updater, waiter);
+ struct data_vio *data_vio = data_vio_from_reference_updater(updater);
+
+ if (updater->increment) {
+ continue_data_vio_with_error(data_vio, VDO_READ_ONLY);
+ return;
+ }
+
+ vdo_continue_completion(&data_vio->decrement_completion, VDO_READ_ONLY);
+}
+
+/* Implements vdo_read_only_notification_fn. */
+static void notify_block_allocator_of_read_only_mode(void *listener,
+ struct vdo_completion *parent)
+{
+ struct block_allocator *allocator = listener;
+ struct slab_iterator iterator;
+
+ assert_on_allocator_thread(allocator->thread_id, __func__);
+ iterator = get_slab_iterator(allocator);
+ while (iterator.next != NULL) {
+ struct vdo_slab *slab = next_slab(&iterator);
+
+ vdo_waitq_notify_all_waiters(&slab->journal.entry_waiters,
+ abort_waiter, &slab->journal);
+ check_if_slab_drained(slab);
+ }
+
+ vdo_finish_completion(parent);
+}
+
+/**
+ * vdo_acquire_provisional_reference() - Acquire a provisional reference on behalf of a PBN lock if
+ * the block it locks is unreferenced.
+ * @slab: The slab which contains the block.
+ * @pbn: The physical block to reference.
+ * @lock: The lock.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_acquire_provisional_reference(struct vdo_slab *slab, physical_block_number_t pbn,
+ struct pbn_lock *lock)
+{
+ slab_block_number block_number;
+ int result;
+
+ if (vdo_pbn_lock_has_provisional_reference(lock))
+ return VDO_SUCCESS;
+
+ if (!is_slab_open(slab))
+ return VDO_INVALID_ADMIN_STATE;
+
+ result = slab_block_number_from_pbn(slab, pbn, &block_number);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (slab->counters[block_number] == EMPTY_REFERENCE_COUNT) {
+ make_provisional_reference(slab, block_number);
+ if (lock != NULL)
+ vdo_assign_pbn_lock_provisional_reference(lock);
+ }
+
+ if (vdo_pbn_lock_has_provisional_reference(lock))
+ adjust_free_block_count(slab, false);
+
+ return VDO_SUCCESS;
+}
+
+static int __must_check allocate_slab_block(struct vdo_slab *slab,
+ physical_block_number_t *block_number_ptr)
+{
+ slab_block_number free_index;
+
+ if (!is_slab_open(slab))
+ return VDO_INVALID_ADMIN_STATE;
+
+ if (!search_reference_blocks(slab, &free_index))
+ return VDO_NO_SPACE;
+
+ VDO_ASSERT_LOG_ONLY((slab->counters[free_index] == EMPTY_REFERENCE_COUNT),
+ "free block must have ref count of zero");
+ make_provisional_reference(slab, free_index);
+ adjust_free_block_count(slab, false);
+
+ /*
+ * Update the search hint so the next search will start at the array index just past the
+ * free block we just found.
+ */
+ slab->search_cursor.index = (free_index + 1);
+
+ *block_number_ptr = slab->start + free_index;
+ return VDO_SUCCESS;
+}
+
+/**
+ * open_slab() - Prepare a slab to be allocated from.
+ * @slab: The slab.
+ */
+static void open_slab(struct vdo_slab *slab)
+{
+ reset_search_cursor(slab);
+ if (is_slab_journal_blank(slab)) {
+ WRITE_ONCE(slab->allocator->statistics.slabs_opened,
+ slab->allocator->statistics.slabs_opened + 1);
+ dirty_all_reference_blocks(slab);
+ } else {
+ WRITE_ONCE(slab->allocator->statistics.slabs_reopened,
+ slab->allocator->statistics.slabs_reopened + 1);
+ }
+
+ slab->allocator->open_slab = slab;
+}
+
+
+/*
+ * The block allocated will have a provisional reference and the reference must be either confirmed
+ * with a subsequent increment or vacated with a subsequent decrement via
+ * vdo_release_block_reference().
+ */
+int vdo_allocate_block(struct block_allocator *allocator,
+ physical_block_number_t *block_number_ptr)
+{
+ int result;
+
+ if (allocator->open_slab != NULL) {
+ /* Try to allocate the next block in the currently open slab. */
+ result = allocate_slab_block(allocator->open_slab, block_number_ptr);
+ if ((result == VDO_SUCCESS) || (result != VDO_NO_SPACE))
+ return result;
+
+ /* Put the exhausted open slab back into the priority table. */
+ prioritize_slab(allocator->open_slab);
+ }
+
+ /* Remove the highest priority slab from the priority table and make it the open slab. */
+ open_slab(list_entry(vdo_priority_table_dequeue(allocator->prioritized_slabs),
+ struct vdo_slab, allocq_entry));
+
+ /*
+ * Try allocating again. If we're out of space immediately after opening a slab, then every
+ * slab must be fully allocated.
+ */
+ return allocate_slab_block(allocator->open_slab, block_number_ptr);
+}
+
+/**
+ * vdo_enqueue_clean_slab_waiter() - Wait for a clean slab.
+ * @allocator: The block_allocator on which to wait.
+ * @waiter: The waiter.
+ *
+ * Return: VDO_SUCCESS if the waiter was queued, VDO_NO_SPACE if there are no slabs to scrub, and
+ * some other error otherwise.
+ */
+int vdo_enqueue_clean_slab_waiter(struct block_allocator *allocator,
+ struct vdo_waiter *waiter)
+{
+ if (vdo_is_read_only(allocator->depot->vdo))
+ return VDO_READ_ONLY;
+
+ if (vdo_is_state_quiescent(&allocator->scrubber.admin_state))
+ return VDO_NO_SPACE;
+
+ vdo_waitq_enqueue_waiter(&allocator->scrubber.waiters, waiter);
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_modify_reference_count() - Modify the reference count of a block by first making a slab
+ * journal entry and then updating the reference counter.
+ *
+ * @data_vio: The data_vio for which to add the entry.
+ * @updater: Which of the data_vio's reference updaters is being submitted.
+ */
+void vdo_modify_reference_count(struct vdo_completion *completion,
+ struct reference_updater *updater)
+{
+ struct vdo_slab *slab = vdo_get_slab(completion->vdo->depot, updater->zpbn.pbn);
+
+ if (!is_slab_open(slab)) {
+ vdo_continue_completion(completion, VDO_INVALID_ADMIN_STATE);
+ return;
+ }
+
+ if (vdo_is_read_only(completion->vdo)) {
+ vdo_continue_completion(completion, VDO_READ_ONLY);
+ return;
+ }
+
+ vdo_waitq_enqueue_waiter(&slab->journal.entry_waiters, &updater->waiter);
+ if ((slab->status != VDO_SLAB_REBUILT) && requires_reaping(&slab->journal))
+ register_slab_for_scrubbing(slab, true);
+
+ add_entries(&slab->journal);
+}
+
+/* Release an unused provisional reference. */
+int vdo_release_block_reference(struct block_allocator *allocator,
+ physical_block_number_t pbn)
+{
+ struct reference_updater updater;
+
+ if (pbn == VDO_ZERO_BLOCK)
+ return VDO_SUCCESS;
+
+ updater = (struct reference_updater) {
+ .operation = VDO_JOURNAL_DATA_REMAPPING,
+ .increment = false,
+ .zpbn = {
+ .pbn = pbn,
+ },
+ };
+
+ return adjust_reference_count(vdo_get_slab(allocator->depot, pbn),
+ &updater, NULL);
+}
+
+/*
+ * This is a min_heap callback function orders slab_status structures using the 'is_clean' field as
+ * the primary key and the 'emptiness' field as the secondary key.
+ *
+ * Slabs need to be pushed onto the rings in the same order they are to be popped off. Popping
+ * should always get the most empty first, so pushing should be from most empty to least empty.
+ * Thus, the ordering is reversed from the usual sense since min_heap returns smaller elements
+ * before larger ones.
+ */
+static bool slab_status_is_less_than(const void *item1, const void *item2)
+{
+ const struct slab_status *info1 = item1;
+ const struct slab_status *info2 = item2;
+
+ if (info1->is_clean != info2->is_clean)
+ return info1->is_clean;
+ if (info1->emptiness != info2->emptiness)
+ return info1->emptiness > info2->emptiness;
+ return info1->slab_number < info2->slab_number;
+}
+
+static void swap_slab_statuses(void *item1, void *item2)
+{
+ struct slab_status *info1 = item1;
+ struct slab_status *info2 = item2;
+
+ swap(*info1, *info2);
+}
+
+static const struct min_heap_callbacks slab_status_min_heap = {
+ .elem_size = sizeof(struct slab_status),
+ .less = slab_status_is_less_than,
+ .swp = swap_slab_statuses,
+};
+
+/* Inform the slab actor that a action has finished on some slab; used by apply_to_slabs(). */
+static void slab_action_callback(struct vdo_completion *completion)
+{
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+ struct slab_actor *actor = &allocator->slab_actor;
+
+ if (--actor->slab_action_count == 0) {
+ actor->callback(completion);
+ return;
+ }
+
+ vdo_reset_completion(completion);
+}
+
+/* Preserve the error from part of an action and continue. */
+static void handle_operation_error(struct vdo_completion *completion)
+{
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+
+ if (allocator->state.waiter != NULL)
+ vdo_set_completion_result(allocator->state.waiter, completion->result);
+ completion->callback(completion);
+}
+
+/* Perform an action on each of an allocator's slabs in parallel. */
+static void apply_to_slabs(struct block_allocator *allocator, vdo_action_fn callback)
+{
+ struct slab_iterator iterator;
+
+ vdo_prepare_completion(&allocator->completion, slab_action_callback,
+ handle_operation_error, allocator->thread_id, NULL);
+ allocator->completion.requeue = false;
+
+ /*
+ * Since we are going to dequeue all of the slabs, the open slab will become invalid, so
+ * clear it.
+ */
+ allocator->open_slab = NULL;
+
+ /* Ensure that we don't finish before we're done starting. */
+ allocator->slab_actor = (struct slab_actor) {
+ .slab_action_count = 1,
+ .callback = callback,
+ };
+
+ iterator = get_slab_iterator(allocator);
+ while (iterator.next != NULL) {
+ const struct admin_state_code *operation =
+ vdo_get_admin_state_code(&allocator->state);
+ struct vdo_slab *slab = next_slab(&iterator);
+
+ list_del_init(&slab->allocq_entry);
+ allocator->slab_actor.slab_action_count++;
+ vdo_start_operation_with_waiter(&slab->state, operation,
+ &allocator->completion,
+ initiate_slab_action);
+ }
+
+ slab_action_callback(&allocator->completion);
+}
+
+static void finish_loading_allocator(struct vdo_completion *completion)
+{
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+ const struct admin_state_code *operation =
+ vdo_get_admin_state_code(&allocator->state);
+
+ if (allocator->eraser != NULL)
+ dm_kcopyd_client_destroy(vdo_forget(allocator->eraser));
+
+ if (operation == VDO_ADMIN_STATE_LOADING_FOR_RECOVERY) {
+ void *context =
+ vdo_get_current_action_context(allocator->depot->action_manager);
+
+ vdo_replay_into_slab_journals(allocator, context);
+ return;
+ }
+
+ vdo_finish_loading(&allocator->state);
+}
+
+static void erase_next_slab_journal(struct block_allocator *allocator);
+
+static void copy_callback(int read_err, unsigned long write_err, void *context)
+{
+ struct block_allocator *allocator = context;
+ int result = (((read_err == 0) && (write_err == 0)) ? VDO_SUCCESS : -EIO);
+
+ if (result != VDO_SUCCESS) {
+ vdo_fail_completion(&allocator->completion, result);
+ return;
+ }
+
+ erase_next_slab_journal(allocator);
+}
+
+/* erase_next_slab_journal() - Erase the next slab journal. */
+static void erase_next_slab_journal(struct block_allocator *allocator)
+{
+ struct vdo_slab *slab;
+ physical_block_number_t pbn;
+ struct dm_io_region regions[1];
+ struct slab_depot *depot = allocator->depot;
+ block_count_t blocks = depot->slab_config.slab_journal_blocks;
+
+ if (allocator->slabs_to_erase.next == NULL) {
+ vdo_finish_completion(&allocator->completion);
+ return;
+ }
+
+ slab = next_slab(&allocator->slabs_to_erase);
+ pbn = slab->journal_origin - depot->vdo->geometry.bio_offset;
+ regions[0] = (struct dm_io_region) {
+ .bdev = vdo_get_backing_device(depot->vdo),
+ .sector = pbn * VDO_SECTORS_PER_BLOCK,
+ .count = blocks * VDO_SECTORS_PER_BLOCK,
+ };
+ dm_kcopyd_zero(allocator->eraser, 1, regions, 0, copy_callback, allocator);
+}
+
+/* Implements vdo_admin_initiator_fn. */
+static void initiate_load(struct admin_state *state)
+{
+ struct block_allocator *allocator =
+ container_of(state, struct block_allocator, state);
+ const struct admin_state_code *operation = vdo_get_admin_state_code(state);
+
+ if (operation == VDO_ADMIN_STATE_LOADING_FOR_REBUILD) {
+ /*
+ * Must requeue because the kcopyd client cannot be freed in the same stack frame
+ * as the kcopyd callback, lest it deadlock.
+ */
+ vdo_prepare_completion_for_requeue(&allocator->completion,
+ finish_loading_allocator,
+ handle_operation_error,
+ allocator->thread_id, NULL);
+ allocator->eraser = dm_kcopyd_client_create(NULL);
+ if (IS_ERR(allocator->eraser)) {
+ vdo_fail_completion(&allocator->completion,
+ PTR_ERR(allocator->eraser));
+ allocator->eraser = NULL;
+ return;
+ }
+ allocator->slabs_to_erase = get_slab_iterator(allocator);
+
+ erase_next_slab_journal(allocator);
+ return;
+ }
+
+ apply_to_slabs(allocator, finish_loading_allocator);
+}
+
+/**
+ * vdo_notify_slab_journals_are_recovered() - Inform a block allocator that its slab journals have
+ * been recovered from the recovery journal.
+ * @completion The allocator completion
+ */
+void vdo_notify_slab_journals_are_recovered(struct vdo_completion *completion)
+{
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+
+ vdo_finish_loading_with_result(&allocator->state, completion->result);
+}
+
+static int get_slab_statuses(struct block_allocator *allocator,
+ struct slab_status **statuses_ptr)
+{
+ int result;
+ struct slab_status *statuses;
+ struct slab_iterator iterator = get_slab_iterator(allocator);
+
+ result = vdo_allocate(allocator->slab_count, struct slab_status, __func__,
+ &statuses);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *statuses_ptr = statuses;
+
+ while (iterator.next != NULL) {
+ slab_count_t slab_number = next_slab(&iterator)->slab_number;
+
+ *statuses++ = (struct slab_status) {
+ .slab_number = slab_number,
+ .is_clean = !allocator->summary_entries[slab_number].is_dirty,
+ .emptiness = allocator->summary_entries[slab_number].fullness_hint,
+ };
+ }
+
+ return VDO_SUCCESS;
+}
+
+/* Prepare slabs for allocation or scrubbing. */
+static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator *allocator)
+{
+ struct slab_status current_slab_status;
+ struct min_heap heap;
+ int result;
+ struct slab_status *slab_statuses;
+ struct slab_depot *depot = allocator->depot;
+
+ WRITE_ONCE(allocator->allocated_blocks,
+ allocator->slab_count * depot->slab_config.data_blocks);
+ result = get_slab_statuses(allocator, &slab_statuses);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* Sort the slabs by cleanliness, then by emptiness hint. */
+ heap = (struct min_heap) {
+ .data = slab_statuses,
+ .nr = allocator->slab_count,
+ .size = allocator->slab_count,
+ };
+ min_heapify_all(&heap, &slab_status_min_heap);
+
+ while (heap.nr > 0) {
+ bool high_priority;
+ struct vdo_slab *slab;
+ struct slab_journal *journal;
+
+ current_slab_status = slab_statuses[0];
+ min_heap_pop(&heap, &slab_status_min_heap);
+ slab = depot->slabs[current_slab_status.slab_number];
+
+ if ((depot->load_type == VDO_SLAB_DEPOT_REBUILD_LOAD) ||
+ (!allocator->summary_entries[slab->slab_number].load_ref_counts &&
+ current_slab_status.is_clean)) {
+ queue_slab(slab);
+ continue;
+ }
+
+ slab->status = VDO_SLAB_REQUIRES_SCRUBBING;
+ journal = &slab->journal;
+ high_priority = ((current_slab_status.is_clean &&
+ (depot->load_type == VDO_SLAB_DEPOT_NORMAL_LOAD)) ||
+ (journal_length(journal) >= journal->scrubbing_threshold));
+ register_slab_for_scrubbing(slab, high_priority);
+ }
+
+ vdo_free(slab_statuses);
+ return VDO_SUCCESS;
+}
+
+static const char *status_to_string(enum slab_rebuild_status status)
+{
+ switch (status) {
+ case VDO_SLAB_REBUILT:
+ return "REBUILT";
+ case VDO_SLAB_REQUIRES_SCRUBBING:
+ return "SCRUBBING";
+ case VDO_SLAB_REQUIRES_HIGH_PRIORITY_SCRUBBING:
+ return "PRIORITY_SCRUBBING";
+ case VDO_SLAB_REBUILDING:
+ return "REBUILDING";
+ case VDO_SLAB_REPLAYING:
+ return "REPLAYING";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+void vdo_dump_block_allocator(const struct block_allocator *allocator)
+{
+ unsigned int pause_counter = 0;
+ struct slab_iterator iterator = get_slab_iterator(allocator);
+ const struct slab_scrubber *scrubber = &allocator->scrubber;
+
+ vdo_log_info("block_allocator zone %u", allocator->zone_number);
+ while (iterator.next != NULL) {
+ struct vdo_slab *slab = next_slab(&iterator);
+ struct slab_journal *journal = &slab->journal;
+
+ if (slab->reference_blocks != NULL) {
+ /* Terse because there are a lot of slabs to dump and syslog is lossy. */
+ vdo_log_info("slab %u: P%u, %llu free", slab->slab_number,
+ slab->priority,
+ (unsigned long long) slab->free_blocks);
+ } else {
+ vdo_log_info("slab %u: status %s", slab->slab_number,
+ status_to_string(slab->status));
+ }
+
+ vdo_log_info(" slab journal: entry_waiters=%zu waiting_to_commit=%s updating_slab_summary=%s head=%llu unreapable=%llu tail=%llu next_commit=%llu summarized=%llu last_summarized=%llu recovery_lock=%llu dirty=%s",
+ vdo_waitq_num_waiters(&journal->entry_waiters),
+ vdo_bool_to_string(journal->waiting_to_commit),
+ vdo_bool_to_string(journal->updating_slab_summary),
+ (unsigned long long) journal->head,
+ (unsigned long long) journal->unreapable,
+ (unsigned long long) journal->tail,
+ (unsigned long long) journal->next_commit,
+ (unsigned long long) journal->summarized,
+ (unsigned long long) journal->last_summarized,
+ (unsigned long long) journal->recovery_lock,
+ vdo_bool_to_string(journal->recovery_lock != 0));
+ /*
+ * Given the frequency with which the locks are just a tiny bit off, it might be
+ * worth dumping all the locks, but that might be too much logging.
+ */
+
+ if (slab->counters != NULL) {
+ /* Terse because there are a lot of slabs to dump and syslog is lossy. */
+ vdo_log_info(" slab: free=%u/%u blocks=%u dirty=%zu active=%zu journal@(%llu,%u)",
+ slab->free_blocks, slab->block_count,
+ slab->reference_block_count,
+ vdo_waitq_num_waiters(&slab->dirty_blocks),
+ slab->active_count,
+ (unsigned long long) slab->slab_journal_point.sequence_number,
+ slab->slab_journal_point.entry_count);
+ } else {
+ vdo_log_info(" no counters");
+ }
+
+ /*
+ * Wait for a while after each batch of 32 slabs dumped, an arbitrary number,
+ * allowing the kernel log a chance to be flushed instead of being overrun.
+ */
+ if (pause_counter++ == 31) {
+ pause_counter = 0;
+ vdo_pause_for_logger();
+ }
+ }
+
+ vdo_log_info("slab_scrubber slab_count %u waiters %zu %s%s",
+ READ_ONCE(scrubber->slab_count),
+ vdo_waitq_num_waiters(&scrubber->waiters),
+ vdo_get_admin_state_code(&scrubber->admin_state)->name,
+ scrubber->high_priority_only ? ", high_priority_only " : "");
+}
+
+static void free_slab(struct vdo_slab *slab)
+{
+ if (slab == NULL)
+ return;
+
+ list_del(&slab->allocq_entry);
+ vdo_free(vdo_forget(slab->journal.block));
+ vdo_free(vdo_forget(slab->journal.locks));
+ vdo_free(vdo_forget(slab->counters));
+ vdo_free(vdo_forget(slab->reference_blocks));
+ vdo_free(slab);
+}
+
+static int initialize_slab_journal(struct vdo_slab *slab)
+{
+ struct slab_journal *journal = &slab->journal;
+ const struct slab_config *slab_config = &slab->allocator->depot->slab_config;
+ int result;
+
+ result = vdo_allocate(slab_config->slab_journal_blocks, struct journal_lock,
+ __func__, &journal->locks);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(VDO_BLOCK_SIZE, char, "struct packed_slab_journal_block",
+ (char **) &journal->block);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ journal->slab = slab;
+ journal->size = slab_config->slab_journal_blocks;
+ journal->flushing_threshold = slab_config->slab_journal_flushing_threshold;
+ journal->blocking_threshold = slab_config->slab_journal_blocking_threshold;
+ journal->scrubbing_threshold = slab_config->slab_journal_scrubbing_threshold;
+ journal->entries_per_block = VDO_SLAB_JOURNAL_ENTRIES_PER_BLOCK;
+ journal->full_entries_per_block = VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK;
+ journal->events = &slab->allocator->slab_journal_statistics;
+ journal->recovery_journal = slab->allocator->depot->vdo->recovery_journal;
+ journal->tail = 1;
+ journal->head = 1;
+
+ journal->flushing_deadline = journal->flushing_threshold;
+ /*
+ * Set there to be some time between the deadline and the blocking threshold, so that
+ * hopefully all are done before blocking.
+ */
+ if ((journal->blocking_threshold - journal->flushing_threshold) > 5)
+ journal->flushing_deadline = journal->blocking_threshold - 5;
+
+ journal->slab_summary_waiter.callback = release_journal_locks;
+
+ INIT_LIST_HEAD(&journal->dirty_entry);
+ INIT_LIST_HEAD(&journal->uncommitted_blocks);
+
+ journal->tail_header.nonce = slab->allocator->nonce;
+ journal->tail_header.metadata_type = VDO_METADATA_SLAB_JOURNAL;
+ initialize_journal_state(journal);
+ return VDO_SUCCESS;
+}
+
+/**
+ * make_slab() - Construct a new, empty slab.
+ * @slab_origin: The physical block number within the block allocator partition of the first block
+ * in the slab.
+ * @allocator: The block allocator to which the slab belongs.
+ * @slab_number: The slab number of the slab.
+ * @is_new: true if this slab is being allocated as part of a resize.
+ * @slab_ptr: A pointer to receive the new slab.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check make_slab(physical_block_number_t slab_origin,
+ struct block_allocator *allocator,
+ slab_count_t slab_number, bool is_new,
+ struct vdo_slab **slab_ptr)
+{
+ const struct slab_config *slab_config = &allocator->depot->slab_config;
+ struct vdo_slab *slab;
+ int result;
+
+ result = vdo_allocate(1, struct vdo_slab, __func__, &slab);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *slab = (struct vdo_slab) {
+ .allocator = allocator,
+ .start = slab_origin,
+ .end = slab_origin + slab_config->slab_blocks,
+ .slab_number = slab_number,
+ .ref_counts_origin = slab_origin + slab_config->data_blocks,
+ .journal_origin =
+ vdo_get_slab_journal_start_block(slab_config, slab_origin),
+ .block_count = slab_config->data_blocks,
+ .free_blocks = slab_config->data_blocks,
+ .reference_block_count =
+ vdo_get_saved_reference_count_size(slab_config->data_blocks),
+ };
+ INIT_LIST_HEAD(&slab->allocq_entry);
+
+ result = initialize_slab_journal(slab);
+ if (result != VDO_SUCCESS) {
+ free_slab(slab);
+ return result;
+ }
+
+ if (is_new) {
+ vdo_set_admin_state_code(&slab->state, VDO_ADMIN_STATE_NEW);
+ result = allocate_slab_counters(slab);
+ if (result != VDO_SUCCESS) {
+ free_slab(slab);
+ return result;
+ }
+ } else {
+ vdo_set_admin_state_code(&slab->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ }
+
+ *slab_ptr = slab;
+ return VDO_SUCCESS;
+}
+
+/**
+ * allocate_slabs() - Allocate a new slab pointer array.
+ * @depot: The depot.
+ * @slab_count: The number of slabs the depot should have in the new array.
+ *
+ * Any existing slab pointers will be copied into the new array, and slabs will be allocated as
+ * needed. The newly allocated slabs will not be distributed for use by the block allocators.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int allocate_slabs(struct slab_depot *depot, slab_count_t slab_count)
+{
+ block_count_t slab_size;
+ bool resizing = false;
+ physical_block_number_t slab_origin;
+ int result;
+
+ result = vdo_allocate(slab_count, struct vdo_slab *,
+ "slab pointer array", &depot->new_slabs);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (depot->slabs != NULL) {
+ memcpy(depot->new_slabs, depot->slabs,
+ depot->slab_count * sizeof(struct vdo_slab *));
+ resizing = true;
+ }
+
+ slab_size = depot->slab_config.slab_blocks;
+ slab_origin = depot->first_block + (depot->slab_count * slab_size);
+
+ for (depot->new_slab_count = depot->slab_count;
+ depot->new_slab_count < slab_count;
+ depot->new_slab_count++, slab_origin += slab_size) {
+ struct block_allocator *allocator =
+ &depot->allocators[depot->new_slab_count % depot->zone_count];
+ struct vdo_slab **slab_ptr = &depot->new_slabs[depot->new_slab_count];
+
+ result = make_slab(slab_origin, allocator, depot->new_slab_count,
+ resizing, slab_ptr);
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_abandon_new_slabs() - Abandon any new slabs in this depot, freeing them as needed.
+ * @depot: The depot.
+ */
+void vdo_abandon_new_slabs(struct slab_depot *depot)
+{
+ slab_count_t i;
+
+ if (depot->new_slabs == NULL)
+ return;
+
+ for (i = depot->slab_count; i < depot->new_slab_count; i++)
+ free_slab(vdo_forget(depot->new_slabs[i]));
+ depot->new_slab_count = 0;
+ depot->new_size = 0;
+ vdo_free(vdo_forget(depot->new_slabs));
+}
+
+/**
+ * get_allocator_thread_id() - Get the ID of the thread on which a given allocator operates.
+ *
+ * Implements vdo_zone_thread_getter_fn.
+ */
+static thread_id_t get_allocator_thread_id(void *context, zone_count_t zone_number)
+{
+ return ((struct slab_depot *) context)->allocators[zone_number].thread_id;
+}
+
+/**
+ * release_recovery_journal_lock() - Request the slab journal to release the recovery journal lock
+ * it may hold on a specified recovery journal block.
+ * @journal: The slab journal.
+ * @recovery_lock: The sequence number of the recovery journal block whose locks should be
+ * released.
+ *
+ * Return: true if the journal does hold a lock on the specified block (which it will release).
+ */
+static bool __must_check release_recovery_journal_lock(struct slab_journal *journal,
+ sequence_number_t recovery_lock)
+{
+ if (recovery_lock > journal->recovery_lock) {
+ VDO_ASSERT_LOG_ONLY((recovery_lock < journal->recovery_lock),
+ "slab journal recovery lock is not older than the recovery journal head");
+ return false;
+ }
+
+ if ((recovery_lock < journal->recovery_lock) ||
+ vdo_is_read_only(journal->slab->allocator->depot->vdo))
+ return false;
+
+ /* All locks are held by the block which is in progress; write it. */
+ commit_tail(journal);
+ return true;
+}
+
+/*
+ * Request a commit of all dirty tail blocks which are locking the recovery journal block the depot
+ * is seeking to release.
+ *
+ * Implements vdo_zone_action_fn.
+ */
+static void release_tail_block_locks(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_journal *journal, *tmp;
+ struct slab_depot *depot = context;
+ struct list_head *list = &depot->allocators[zone_number].dirty_slab_journals;
+
+ list_for_each_entry_safe(journal, tmp, list, dirty_entry) {
+ if (!release_recovery_journal_lock(journal,
+ depot->active_release_request))
+ break;
+ }
+
+ vdo_finish_completion(parent);
+}
+
+/**
+ * prepare_for_tail_block_commit() - Prepare to commit oldest tail blocks.
+ *
+ * Implements vdo_action_preamble_fn.
+ */
+static void prepare_for_tail_block_commit(void *context, struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+
+ depot->active_release_request = depot->new_release_request;
+ vdo_finish_completion(parent);
+}
+
+/**
+ * schedule_tail_block_commit() - Schedule a tail block commit if necessary.
+ *
+ * This method should not be called directly. Rather, call vdo_schedule_default_action() on the
+ * depot's action manager.
+ *
+ * Implements vdo_action_scheduler_fn.
+ */
+static bool schedule_tail_block_commit(void *context)
+{
+ struct slab_depot *depot = context;
+
+ if (depot->new_release_request == depot->active_release_request)
+ return false;
+
+ return vdo_schedule_action(depot->action_manager,
+ prepare_for_tail_block_commit,
+ release_tail_block_locks,
+ NULL, NULL);
+}
+
+/**
+ * initialize_slab_scrubber() - Initialize an allocator's slab scrubber.
+ * @allocator: The allocator being initialized
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int initialize_slab_scrubber(struct block_allocator *allocator)
+{
+ struct slab_scrubber *scrubber = &allocator->scrubber;
+ block_count_t slab_journal_size =
+ allocator->depot->slab_config.slab_journal_blocks;
+ char *journal_data;
+ int result;
+
+ result = vdo_allocate(VDO_BLOCK_SIZE * slab_journal_size,
+ char, __func__, &journal_data);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = allocate_vio_components(allocator->completion.vdo,
+ VIO_TYPE_SLAB_JOURNAL,
+ VIO_PRIORITY_METADATA,
+ allocator, slab_journal_size,
+ journal_data, &scrubber->vio);
+ if (result != VDO_SUCCESS) {
+ vdo_free(journal_data);
+ return result;
+ }
+
+ INIT_LIST_HEAD(&scrubber->high_priority_slabs);
+ INIT_LIST_HEAD(&scrubber->slabs);
+ vdo_set_admin_state_code(&scrubber->admin_state, VDO_ADMIN_STATE_SUSPENDED);
+ return VDO_SUCCESS;
+}
+
+/**
+ * initialize_slab_summary_block() - Initialize a slab_summary_block.
+ * @allocator: The allocator which owns the block.
+ * @index: The index of this block in its zone's summary.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check initialize_slab_summary_block(struct block_allocator *allocator,
+ block_count_t index)
+{
+ struct slab_summary_block *block = &allocator->summary_blocks[index];
+ int result;
+
+ result = vdo_allocate(VDO_BLOCK_SIZE, char, __func__, &block->outgoing_entries);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = allocate_vio_components(allocator->depot->vdo, VIO_TYPE_SLAB_SUMMARY,
+ VIO_PRIORITY_METADATA, NULL, 1,
+ block->outgoing_entries, &block->vio);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ block->allocator = allocator;
+ block->entries = &allocator->summary_entries[VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK * index];
+ block->index = index;
+ return VDO_SUCCESS;
+}
+
+static int __must_check initialize_block_allocator(struct slab_depot *depot,
+ zone_count_t zone)
+{
+ int result;
+ block_count_t i;
+ struct block_allocator *allocator = &depot->allocators[zone];
+ struct vdo *vdo = depot->vdo;
+ block_count_t max_free_blocks = depot->slab_config.data_blocks;
+ unsigned int max_priority = (2 + ilog2(max_free_blocks));
+
+ *allocator = (struct block_allocator) {
+ .depot = depot,
+ .zone_number = zone,
+ .thread_id = vdo->thread_config.physical_threads[zone],
+ .nonce = vdo->states.vdo.nonce,
+ };
+
+ INIT_LIST_HEAD(&allocator->dirty_slab_journals);
+ vdo_set_admin_state_code(&allocator->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ result = vdo_register_read_only_listener(vdo, allocator,
+ notify_block_allocator_of_read_only_mode,
+ allocator->thread_id);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_initialize_completion(&allocator->completion, vdo, VDO_BLOCK_ALLOCATOR_COMPLETION);
+ result = make_vio_pool(vdo, BLOCK_ALLOCATOR_VIO_POOL_SIZE, allocator->thread_id,
+ VIO_TYPE_SLAB_JOURNAL, VIO_PRIORITY_METADATA,
+ allocator, &allocator->vio_pool);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = initialize_slab_scrubber(allocator);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_make_priority_table(max_priority, &allocator->prioritized_slabs);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE,
+ struct slab_summary_block, __func__,
+ &allocator->summary_blocks);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_set_admin_state_code(&allocator->summary_state,
+ VDO_ADMIN_STATE_NORMAL_OPERATION);
+ allocator->summary_entries = depot->summary_entries + (MAX_VDO_SLABS * zone);
+
+ /* Initialize each summary block. */
+ for (i = 0; i < VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE; i++) {
+ result = initialize_slab_summary_block(allocator, i);
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ /*
+ * Performing well atop thin provisioned storage requires either that VDO discards freed
+ * blocks, or that the block allocator try to use slabs that already have allocated blocks
+ * in preference to slabs that have never been opened. For reasons we have not been able to
+ * fully understand, some SSD machines have been have been very sensitive (50% reduction in
+ * test throughput) to very slight differences in the timing and locality of block
+ * allocation. Assigning a low priority to unopened slabs (max_priority/2, say) would be
+ * ideal for the story, but anything less than a very high threshold (max_priority - 1)
+ * hurts on these machines.
+ *
+ * This sets the free block threshold for preferring to open an unopened slab to the binary
+ * floor of 3/4ths the total number of data blocks in a slab, which will generally evaluate
+ * to about half the slab size.
+ */
+ allocator->unopened_slab_priority = (1 + ilog2((max_free_blocks * 3) / 4));
+
+ return VDO_SUCCESS;
+}
+
+static int allocate_components(struct slab_depot *depot,
+ struct partition *summary_partition)
+{
+ int result;
+ zone_count_t zone;
+ slab_count_t slab_count;
+ u8 hint;
+ u32 i;
+ const struct thread_config *thread_config = &depot->vdo->thread_config;
+
+ result = vdo_make_action_manager(depot->zone_count, get_allocator_thread_id,
+ thread_config->journal_thread, depot,
+ schedule_tail_block_commit,
+ depot->vdo, &depot->action_manager);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ depot->origin = depot->first_block;
+
+ /* block size must be a multiple of entry size */
+ BUILD_BUG_ON((VDO_BLOCK_SIZE % sizeof(struct slab_summary_entry)) != 0);
+
+ depot->summary_origin = summary_partition->offset;
+ depot->hint_shift = vdo_get_slab_summary_hint_shift(depot->slab_size_shift);
+ result = vdo_allocate(MAXIMUM_VDO_SLAB_SUMMARY_ENTRIES,
+ struct slab_summary_entry, __func__,
+ &depot->summary_entries);
+ if (result != VDO_SUCCESS)
+ return result;
+
+
+ /* Initialize all the entries. */
+ hint = compute_fullness_hint(depot, depot->slab_config.data_blocks);
+ for (i = 0; i < MAXIMUM_VDO_SLAB_SUMMARY_ENTRIES; i++) {
+ /*
+ * This default tail block offset must be reflected in
+ * slabJournal.c::read_slab_journal_tail().
+ */
+ depot->summary_entries[i] = (struct slab_summary_entry) {
+ .tail_block_offset = 0,
+ .fullness_hint = hint,
+ .load_ref_counts = false,
+ .is_dirty = false,
+ };
+ }
+
+ slab_count = vdo_compute_slab_count(depot->first_block, depot->last_block,
+ depot->slab_size_shift);
+ if (thread_config->physical_zone_count > slab_count) {
+ return vdo_log_error_strerror(VDO_BAD_CONFIGURATION,
+ "%u physical zones exceeds slab count %u",
+ thread_config->physical_zone_count,
+ slab_count);
+ }
+
+ /* Initialize the block allocators. */
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ result = initialize_block_allocator(depot, zone);
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ /* Allocate slabs. */
+ result = allocate_slabs(depot, slab_count);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* Use the new slabs. */
+ for (i = depot->slab_count; i < depot->new_slab_count; i++) {
+ struct vdo_slab *slab = depot->new_slabs[i];
+
+ register_slab_with_allocator(slab->allocator, slab);
+ WRITE_ONCE(depot->slab_count, depot->slab_count + 1);
+ }
+
+ depot->slabs = depot->new_slabs;
+ depot->new_slabs = NULL;
+ depot->new_slab_count = 0;
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_decode_slab_depot() - Make a slab depot and configure it with the state read from the super
+ * block.
+ * @state: The slab depot state from the super block.
+ * @vdo: The VDO which will own the depot.
+ * @summary_partition: The partition which holds the slab summary.
+ * @depot_ptr: A pointer to hold the depot.
+ *
+ * Return: A success or error code.
+ */
+int vdo_decode_slab_depot(struct slab_depot_state_2_0 state, struct vdo *vdo,
+ struct partition *summary_partition,
+ struct slab_depot **depot_ptr)
+{
+ unsigned int slab_size_shift;
+ struct slab_depot *depot;
+ int result;
+
+ /*
+ * Calculate the bit shift for efficiently mapping block numbers to slabs. Using a shift
+ * requires that the slab size be a power of two.
+ */
+ block_count_t slab_size = state.slab_config.slab_blocks;
+
+ if (!is_power_of_2(slab_size)) {
+ return vdo_log_error_strerror(UDS_INVALID_ARGUMENT,
+ "slab size must be a power of two");
+ }
+ slab_size_shift = ilog2(slab_size);
+
+ result = vdo_allocate_extended(struct slab_depot,
+ vdo->thread_config.physical_zone_count,
+ struct block_allocator, __func__, &depot);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ depot->vdo = vdo;
+ depot->old_zone_count = state.zone_count;
+ depot->zone_count = vdo->thread_config.physical_zone_count;
+ depot->slab_config = state.slab_config;
+ depot->first_block = state.first_block;
+ depot->last_block = state.last_block;
+ depot->slab_size_shift = slab_size_shift;
+
+ result = allocate_components(depot, summary_partition);
+ if (result != VDO_SUCCESS) {
+ vdo_free_slab_depot(depot);
+ return result;
+ }
+
+ *depot_ptr = depot;
+ return VDO_SUCCESS;
+}
+
+static void uninitialize_allocator_summary(struct block_allocator *allocator)
+{
+ block_count_t i;
+
+ if (allocator->summary_blocks == NULL)
+ return;
+
+ for (i = 0; i < VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE; i++) {
+ free_vio_components(&allocator->summary_blocks[i].vio);
+ vdo_free(vdo_forget(allocator->summary_blocks[i].outgoing_entries));
+ }
+
+ vdo_free(vdo_forget(allocator->summary_blocks));
+}
+
+/**
+ * vdo_free_slab_depot() - Destroy a slab depot.
+ * @depot: The depot to destroy.
+ */
+void vdo_free_slab_depot(struct slab_depot *depot)
+{
+ zone_count_t zone = 0;
+
+ if (depot == NULL)
+ return;
+
+ vdo_abandon_new_slabs(depot);
+
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ struct block_allocator *allocator = &depot->allocators[zone];
+
+ if (allocator->eraser != NULL)
+ dm_kcopyd_client_destroy(vdo_forget(allocator->eraser));
+
+ uninitialize_allocator_summary(allocator);
+ uninitialize_scrubber_vio(&allocator->scrubber);
+ free_vio_pool(vdo_forget(allocator->vio_pool));
+ vdo_free_priority_table(vdo_forget(allocator->prioritized_slabs));
+ }
+
+ if (depot->slabs != NULL) {
+ slab_count_t i;
+
+ for (i = 0; i < depot->slab_count; i++)
+ free_slab(vdo_forget(depot->slabs[i]));
+ }
+
+ vdo_free(vdo_forget(depot->slabs));
+ vdo_free(vdo_forget(depot->action_manager));
+ vdo_free(vdo_forget(depot->summary_entries));
+ vdo_free(depot);
+}
+
+/**
+ * vdo_record_slab_depot() - Record the state of a slab depot for encoding into the super block.
+ * @depot: The depot to encode.
+ *
+ * Return: The depot state.
+ */
+struct slab_depot_state_2_0 vdo_record_slab_depot(const struct slab_depot *depot)
+{
+ /*
+ * If this depot is currently using 0 zones, it must have been synchronously loaded by a
+ * tool and is now being saved. We did not load and combine the slab summary, so we still
+ * need to do that next time we load with the old zone count rather than 0.
+ */
+ struct slab_depot_state_2_0 state;
+ zone_count_t zones_to_record = depot->zone_count;
+
+ if (depot->zone_count == 0)
+ zones_to_record = depot->old_zone_count;
+
+ state = (struct slab_depot_state_2_0) {
+ .slab_config = depot->slab_config,
+ .first_block = depot->first_block,
+ .last_block = depot->last_block,
+ .zone_count = zones_to_record,
+ };
+
+ return state;
+}
+
+/**
+ * vdo_allocate_reference_counters() - Allocate the reference counters for all slabs in the depot.
+ *
+ * Context: This method may be called only before entering normal operation from the load thread.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_allocate_reference_counters(struct slab_depot *depot)
+{
+ struct slab_iterator iterator =
+ get_depot_slab_iterator(depot, depot->slab_count - 1, 0, 1);
+
+ while (iterator.next != NULL) {
+ int result = allocate_slab_counters(next_slab(&iterator));
+
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * get_slab_number() - Get the number of the slab that contains a specified block.
+ * @depot: The slab depot.
+ * @pbn: The physical block number.
+ * @slab_number_ptr: A pointer to hold the slab number.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check get_slab_number(const struct slab_depot *depot,
+ physical_block_number_t pbn,
+ slab_count_t *slab_number_ptr)
+{
+ slab_count_t slab_number;
+
+ if (pbn < depot->first_block)
+ return VDO_OUT_OF_RANGE;
+
+ slab_number = (pbn - depot->first_block) >> depot->slab_size_shift;
+ if (slab_number >= depot->slab_count)
+ return VDO_OUT_OF_RANGE;
+
+ *slab_number_ptr = slab_number;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_get_slab() - Get the slab object for the slab that contains a specified block.
+ * @depot: The slab depot.
+ * @pbn: The physical block number.
+ *
+ * Will put the VDO in read-only mode if the PBN is not a valid data block nor the zero block.
+ *
+ * Return: The slab containing the block, or NULL if the block number is the zero block or
+ * otherwise out of range.
+ */
+struct vdo_slab *vdo_get_slab(const struct slab_depot *depot,
+ physical_block_number_t pbn)
+{
+ slab_count_t slab_number;
+ int result;
+
+ if (pbn == VDO_ZERO_BLOCK)
+ return NULL;
+
+ result = get_slab_number(depot, pbn, &slab_number);
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(depot->vdo, result);
+ return NULL;
+ }
+
+ return depot->slabs[slab_number];
+}
+
+/**
+ * vdo_get_increment_limit() - Determine how many new references a block can acquire.
+ * @depot: The slab depot.
+ * @pbn: The physical block number that is being queried.
+ *
+ * Context: This method must be called from the physical zone thread of the PBN.
+ *
+ * Return: The number of available references.
+ */
+u8 vdo_get_increment_limit(struct slab_depot *depot, physical_block_number_t pbn)
+{
+ struct vdo_slab *slab = vdo_get_slab(depot, pbn);
+ vdo_refcount_t *counter_ptr = NULL;
+ int result;
+
+ if ((slab == NULL) || (slab->status != VDO_SLAB_REBUILT))
+ return 0;
+
+ result = get_reference_counter(slab, pbn, &counter_ptr);
+ if (result != VDO_SUCCESS)
+ return 0;
+
+ if (*counter_ptr == PROVISIONAL_REFERENCE_COUNT)
+ return (MAXIMUM_REFERENCE_COUNT - 1);
+
+ return (MAXIMUM_REFERENCE_COUNT - *counter_ptr);
+}
+
+/**
+ * vdo_is_physical_data_block() - Determine whether the given PBN refers to a data block.
+ * @depot: The depot.
+ * @pbn: The physical block number to ask about.
+ *
+ * Return: True if the PBN corresponds to a data block.
+ */
+bool vdo_is_physical_data_block(const struct slab_depot *depot,
+ physical_block_number_t pbn)
+{
+ slab_count_t slab_number;
+ slab_block_number sbn;
+
+ return ((pbn == VDO_ZERO_BLOCK) ||
+ ((get_slab_number(depot, pbn, &slab_number) == VDO_SUCCESS) &&
+ (slab_block_number_from_pbn(depot->slabs[slab_number], pbn, &sbn) ==
+ VDO_SUCCESS)));
+}
+
+/**
+ * vdo_get_slab_depot_allocated_blocks() - Get the total number of data blocks allocated across all
+ * the slabs in the depot.
+ * @depot: The slab depot.
+ *
+ * This is the total number of blocks with a non-zero reference count.
+ *
+ * Context: This may be called from any thread.
+ *
+ * Return: The total number of blocks with a non-zero reference count.
+ */
+block_count_t vdo_get_slab_depot_allocated_blocks(const struct slab_depot *depot)
+{
+ block_count_t total = 0;
+ zone_count_t zone;
+
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ /* The allocators are responsible for thread safety. */
+ total += READ_ONCE(depot->allocators[zone].allocated_blocks);
+ }
+
+ return total;
+}
+
+/**
+ * vdo_get_slab_depot_data_blocks() - Get the total number of data blocks in all the slabs in the
+ * depot.
+ * @depot: The slab depot.
+ *
+ * Context: This may be called from any thread.
+ *
+ * Return: The total number of data blocks in all slabs.
+ */
+block_count_t vdo_get_slab_depot_data_blocks(const struct slab_depot *depot)
+{
+ return (READ_ONCE(depot->slab_count) * depot->slab_config.data_blocks);
+}
+
+/**
+ * finish_combining_zones() - Clean up after saving out the combined slab summary.
+ * @completion: The vio which was used to write the summary data.
+ */
+static void finish_combining_zones(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct vdo_completion *parent = completion->parent;
+
+ free_vio(as_vio(vdo_forget(completion)));
+ vdo_fail_completion(parent, result);
+}
+
+static void handle_combining_error(struct vdo_completion *completion)
+{
+ vio_record_metadata_io_error(as_vio(completion));
+ finish_combining_zones(completion);
+}
+
+static void write_summary_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct vdo *vdo = vio->completion.vdo;
+
+ continue_vio_after_io(vio, finish_combining_zones,
+ vdo->thread_config.admin_thread);
+}
+
+/**
+ * combine_summaries() - Treating the current entries buffer as the on-disk value of all zones,
+ * update every zone to the correct values for every slab.
+ * @depot: The depot whose summary entries should be combined.
+ */
+static void combine_summaries(struct slab_depot *depot)
+{
+ /*
+ * Combine all the old summary data into the portion of the buffer corresponding to the
+ * first zone.
+ */
+ zone_count_t zone = 0;
+ struct slab_summary_entry *entries = depot->summary_entries;
+
+ if (depot->old_zone_count > 1) {
+ slab_count_t entry_number;
+
+ for (entry_number = 0; entry_number < MAX_VDO_SLABS; entry_number++) {
+ if (zone != 0) {
+ memcpy(entries + entry_number,
+ entries + (zone * MAX_VDO_SLABS) + entry_number,
+ sizeof(struct slab_summary_entry));
+ }
+
+ zone++;
+ if (zone == depot->old_zone_count)
+ zone = 0;
+ }
+ }
+
+ /* Copy the combined data to each zones's region of the buffer. */
+ for (zone = 1; zone < MAX_VDO_PHYSICAL_ZONES; zone++) {
+ memcpy(entries + (zone * MAX_VDO_SLABS), entries,
+ MAX_VDO_SLABS * sizeof(struct slab_summary_entry));
+ }
+}
+
+/**
+ * finish_loading_summary() - Finish loading slab summary data.
+ * @completion: The vio which was used to read the summary data.
+ *
+ * Combines the slab summary data from all the previously written zones and copies the combined
+ * summary to each partition's data region. Then writes the combined summary back out to disk. This
+ * callback is registered in load_summary_endio().
+ */
+static void finish_loading_summary(struct vdo_completion *completion)
+{
+ struct slab_depot *depot = completion->vdo->depot;
+
+ /* Combine the summary from each zone so each zone is correct for all slabs. */
+ combine_summaries(depot);
+
+ /* Write the combined summary back out. */
+ vdo_submit_metadata_vio(as_vio(completion), depot->summary_origin,
+ write_summary_endio, handle_combining_error,
+ REQ_OP_WRITE);
+}
+
+static void load_summary_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct vdo *vdo = vio->completion.vdo;
+
+ continue_vio_after_io(vio, finish_loading_summary,
+ vdo->thread_config.admin_thread);
+}
+
+/**
+ * load_slab_summary() - The preamble of a load operation.
+ *
+ * Implements vdo_action_preamble_fn.
+ */
+static void load_slab_summary(void *context, struct vdo_completion *parent)
+{
+ int result;
+ struct vio *vio;
+ struct slab_depot *depot = context;
+ const struct admin_state_code *operation =
+ vdo_get_current_manager_operation(depot->action_manager);
+
+ result = create_multi_block_metadata_vio(depot->vdo, VIO_TYPE_SLAB_SUMMARY,
+ VIO_PRIORITY_METADATA, parent,
+ VDO_SLAB_SUMMARY_BLOCKS,
+ (char *) depot->summary_entries, &vio);
+ if (result != VDO_SUCCESS) {
+ vdo_fail_completion(parent, result);
+ return;
+ }
+
+ if ((operation == VDO_ADMIN_STATE_FORMATTING) ||
+ (operation == VDO_ADMIN_STATE_LOADING_FOR_REBUILD)) {
+ finish_loading_summary(&vio->completion);
+ return;
+ }
+
+ vdo_submit_metadata_vio(vio, depot->summary_origin, load_summary_endio,
+ handle_combining_error, REQ_OP_READ);
+}
+
+/* Implements vdo_zone_action_fn. */
+static void load_allocator(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+
+ vdo_start_loading(&depot->allocators[zone_number].state,
+ vdo_get_current_manager_operation(depot->action_manager),
+ parent, initiate_load);
+}
+
+/**
+ * vdo_load_slab_depot() - Asynchronously load any slab depot state that isn't included in the
+ * super_block component.
+ * @depot: The depot to load.
+ * @operation: The type of load to perform.
+ * @parent: The completion to notify when the load is complete.
+ * @context: Additional context for the load operation; may be NULL.
+ *
+ * This method may be called only before entering normal operation from the load thread.
+ */
+void vdo_load_slab_depot(struct slab_depot *depot,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent, void *context)
+{
+ if (!vdo_assert_load_operation(operation, parent))
+ return;
+
+ vdo_schedule_operation_with_context(depot->action_manager, operation,
+ load_slab_summary, load_allocator,
+ NULL, context, parent);
+}
+
+/* Implements vdo_zone_action_fn. */
+static void prepare_to_allocate(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+ struct block_allocator *allocator = &depot->allocators[zone_number];
+ int result;
+
+ result = vdo_prepare_slabs_for_allocation(allocator);
+ if (result != VDO_SUCCESS) {
+ vdo_fail_completion(parent, result);
+ return;
+ }
+
+ scrub_slabs(allocator, parent);
+}
+
+/**
+ * vdo_prepare_slab_depot_to_allocate() - Prepare the slab depot to come online and start
+ * allocating blocks.
+ * @depot: The depot to prepare.
+ * @load_type: The load type.
+ * @parent: The completion to notify when the operation is complete.
+ *
+ * This method may be called only before entering normal operation from the load thread. It must be
+ * called before allocation may proceed.
+ */
+void vdo_prepare_slab_depot_to_allocate(struct slab_depot *depot,
+ enum slab_depot_load_type load_type,
+ struct vdo_completion *parent)
+{
+ depot->load_type = load_type;
+ atomic_set(&depot->zones_to_scrub, depot->zone_count);
+ vdo_schedule_action(depot->action_manager, NULL,
+ prepare_to_allocate, NULL, parent);
+}
+
+/**
+ * vdo_update_slab_depot_size() - Update the slab depot to reflect its new size in memory.
+ * @depot: The depot to update.
+ *
+ * This size is saved to disk as part of the super block.
+ */
+void vdo_update_slab_depot_size(struct slab_depot *depot)
+{
+ depot->last_block = depot->new_last_block;
+}
+
+/**
+ * vdo_prepare_to_grow_slab_depot() - Allocate new memory needed for a resize of a slab depot to
+ * the given size.
+ * @depot: The depot to prepare to resize.
+ * @partition: The new depot partition
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_prepare_to_grow_slab_depot(struct slab_depot *depot,
+ const struct partition *partition)
+{
+ struct slab_depot_state_2_0 new_state;
+ int result;
+ slab_count_t new_slab_count;
+
+ if ((partition->count >> depot->slab_size_shift) <= depot->slab_count)
+ return VDO_INCREMENT_TOO_SMALL;
+
+ /* Generate the depot configuration for the new block count. */
+ VDO_ASSERT_LOG_ONLY(depot->first_block == partition->offset,
+ "New slab depot partition doesn't change origin");
+ result = vdo_configure_slab_depot(partition, depot->slab_config,
+ depot->zone_count, &new_state);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ new_slab_count = vdo_compute_slab_count(depot->first_block,
+ new_state.last_block,
+ depot->slab_size_shift);
+ if (new_slab_count <= depot->slab_count)
+ return vdo_log_error_strerror(VDO_INCREMENT_TOO_SMALL,
+ "Depot can only grow");
+ if (new_slab_count == depot->new_slab_count) {
+ /* Check it out, we've already got all the new slabs allocated! */
+ return VDO_SUCCESS;
+ }
+
+ vdo_abandon_new_slabs(depot);
+ result = allocate_slabs(depot, new_slab_count);
+ if (result != VDO_SUCCESS) {
+ vdo_abandon_new_slabs(depot);
+ return result;
+ }
+
+ depot->new_size = partition->count;
+ depot->old_last_block = depot->last_block;
+ depot->new_last_block = new_state.last_block;
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * finish_registration() - Finish registering new slabs now that all of the allocators have
+ * received their new slabs.
+ *
+ * Implements vdo_action_conclusion_fn.
+ */
+static int finish_registration(void *context)
+{
+ struct slab_depot *depot = context;
+
+ WRITE_ONCE(depot->slab_count, depot->new_slab_count);
+ vdo_free(depot->slabs);
+ depot->slabs = depot->new_slabs;
+ depot->new_slabs = NULL;
+ depot->new_slab_count = 0;
+ return VDO_SUCCESS;
+}
+
+/* Implements vdo_zone_action_fn. */
+static void register_new_slabs(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+ struct block_allocator *allocator = &depot->allocators[zone_number];
+ slab_count_t i;
+
+ for (i = depot->slab_count; i < depot->new_slab_count; i++) {
+ struct vdo_slab *slab = depot->new_slabs[i];
+
+ if (slab->allocator == allocator)
+ register_slab_with_allocator(allocator, slab);
+ }
+
+ vdo_finish_completion(parent);
+}
+
+/**
+ * vdo_use_new_slabs() - Use the new slabs allocated for resize.
+ * @depot: The depot.
+ * @parent: The object to notify when complete.
+ */
+void vdo_use_new_slabs(struct slab_depot *depot, struct vdo_completion *parent)
+{
+ VDO_ASSERT_LOG_ONLY(depot->new_slabs != NULL, "Must have new slabs to use");
+ vdo_schedule_operation(depot->action_manager,
+ VDO_ADMIN_STATE_SUSPENDED_OPERATION,
+ NULL, register_new_slabs,
+ finish_registration, parent);
+}
+
+/**
+ * stop_scrubbing() - Tell the scrubber to stop scrubbing after it finishes the slab it is
+ * currently working on.
+ * @scrubber: The scrubber to stop.
+ * @parent: The completion to notify when scrubbing has stopped.
+ */
+static void stop_scrubbing(struct block_allocator *allocator)
+{
+ struct slab_scrubber *scrubber = &allocator->scrubber;
+
+ if (vdo_is_state_quiescent(&scrubber->admin_state)) {
+ vdo_finish_completion(&allocator->completion);
+ } else {
+ vdo_start_draining(&scrubber->admin_state,
+ VDO_ADMIN_STATE_SUSPENDING,
+ &allocator->completion, NULL);
+ }
+}
+
+/* Implements vdo_admin_initiator_fn. */
+static void initiate_summary_drain(struct admin_state *state)
+{
+ check_summary_drain_complete(container_of(state, struct block_allocator,
+ summary_state));
+}
+
+static void do_drain_step(struct vdo_completion *completion)
+{
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+
+ vdo_prepare_completion_for_requeue(&allocator->completion, do_drain_step,
+ handle_operation_error, allocator->thread_id,
+ NULL);
+ switch (++allocator->drain_step) {
+ case VDO_DRAIN_ALLOCATOR_STEP_SCRUBBER:
+ stop_scrubbing(allocator);
+ return;
+
+ case VDO_DRAIN_ALLOCATOR_STEP_SLABS:
+ apply_to_slabs(allocator, do_drain_step);
+ return;
+
+ case VDO_DRAIN_ALLOCATOR_STEP_SUMMARY:
+ vdo_start_draining(&allocator->summary_state,
+ vdo_get_admin_state_code(&allocator->state),
+ completion, initiate_summary_drain);
+ return;
+
+ case VDO_DRAIN_ALLOCATOR_STEP_FINISHED:
+ VDO_ASSERT_LOG_ONLY(!is_vio_pool_busy(allocator->vio_pool),
+ "vio pool not busy");
+ vdo_finish_draining_with_result(&allocator->state, completion->result);
+ return;
+
+ default:
+ vdo_finish_draining_with_result(&allocator->state, UDS_BAD_STATE);
+ }
+}
+
+/* Implements vdo_admin_initiator_fn. */
+static void initiate_drain(struct admin_state *state)
+{
+ struct block_allocator *allocator =
+ container_of(state, struct block_allocator, state);
+
+ allocator->drain_step = VDO_DRAIN_ALLOCATOR_START;
+ do_drain_step(&allocator->completion);
+}
+
+/*
+ * Drain all allocator I/O. Depending upon the type of drain, some or all dirty metadata may be
+ * written to disk. The type of drain will be determined from the state of the allocator's depot.
+ *
+ * Implements vdo_zone_action_fn.
+ */
+static void drain_allocator(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+
+ vdo_start_draining(&depot->allocators[zone_number].state,
+ vdo_get_current_manager_operation(depot->action_manager),
+ parent, initiate_drain);
+}
+
+/**
+ * vdo_drain_slab_depot() - Drain all slab depot I/O.
+ * @depot: The depot to drain.
+ * @operation: The drain operation (flush, rebuild, suspend, or save).
+ * @parent: The completion to finish when the drain is complete.
+ *
+ * If saving, or flushing, all dirty depot metadata will be written out. If saving or suspending,
+ * the depot will be left in a suspended state.
+ */
+void vdo_drain_slab_depot(struct slab_depot *depot,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent)
+{
+ vdo_schedule_operation(depot->action_manager, operation,
+ NULL, drain_allocator, NULL, parent);
+}
+
+/**
+ * resume_scrubbing() - Tell the scrubber to resume scrubbing if it has been stopped.
+ * @allocator: The allocator being resumed.
+ */
+static void resume_scrubbing(struct block_allocator *allocator)
+{
+ int result;
+ struct slab_scrubber *scrubber = &allocator->scrubber;
+
+ if (!has_slabs_to_scrub(scrubber)) {
+ vdo_finish_completion(&allocator->completion);
+ return;
+ }
+
+ result = vdo_resume_if_quiescent(&scrubber->admin_state);
+ if (result != VDO_SUCCESS) {
+ vdo_fail_completion(&allocator->completion, result);
+ return;
+ }
+
+ scrub_next_slab(scrubber);
+ vdo_finish_completion(&allocator->completion);
+}
+
+static void do_resume_step(struct vdo_completion *completion)
+{
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+
+ vdo_prepare_completion_for_requeue(&allocator->completion, do_resume_step,
+ handle_operation_error,
+ allocator->thread_id, NULL);
+ switch (--allocator->drain_step) {
+ case VDO_DRAIN_ALLOCATOR_STEP_SUMMARY:
+ vdo_fail_completion(completion,
+ vdo_resume_if_quiescent(&allocator->summary_state));
+ return;
+
+ case VDO_DRAIN_ALLOCATOR_STEP_SLABS:
+ apply_to_slabs(allocator, do_resume_step);
+ return;
+
+ case VDO_DRAIN_ALLOCATOR_STEP_SCRUBBER:
+ resume_scrubbing(allocator);
+ return;
+
+ case VDO_DRAIN_ALLOCATOR_START:
+ vdo_finish_resuming_with_result(&allocator->state, completion->result);
+ return;
+
+ default:
+ vdo_finish_resuming_with_result(&allocator->state, UDS_BAD_STATE);
+ }
+}
+
+/* Implements vdo_admin_initiator_fn. */
+static void initiate_resume(struct admin_state *state)
+{
+ struct block_allocator *allocator =
+ container_of(state, struct block_allocator, state);
+
+ allocator->drain_step = VDO_DRAIN_ALLOCATOR_STEP_FINISHED;
+ do_resume_step(&allocator->completion);
+}
+
+/* Implements vdo_zone_action_fn. */
+static void resume_allocator(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+
+ vdo_start_resuming(&depot->allocators[zone_number].state,
+ vdo_get_current_manager_operation(depot->action_manager),
+ parent, initiate_resume);
+}
+
+/**
+ * vdo_resume_slab_depot() - Resume a suspended slab depot.
+ * @depot: The depot to resume.
+ * @parent: The completion to finish when the depot has resumed.
+ */
+void vdo_resume_slab_depot(struct slab_depot *depot, struct vdo_completion *parent)
+{
+ if (vdo_is_read_only(depot->vdo)) {
+ vdo_continue_completion(parent, VDO_READ_ONLY);
+ return;
+ }
+
+ vdo_schedule_operation(depot->action_manager, VDO_ADMIN_STATE_RESUMING,
+ NULL, resume_allocator, NULL, parent);
+}
+
+/**
+ * vdo_commit_oldest_slab_journal_tail_blocks() - Commit all dirty tail blocks which are locking a
+ * given recovery journal block.
+ * @depot: The depot.
+ * @recovery_block_number: The sequence number of the recovery journal block whose locks should be
+ * released.
+ *
+ * Context: This method must be called from the journal zone thread.
+ */
+void vdo_commit_oldest_slab_journal_tail_blocks(struct slab_depot *depot,
+ sequence_number_t recovery_block_number)
+{
+ if (depot == NULL)
+ return;
+
+ depot->new_release_request = recovery_block_number;
+ vdo_schedule_default_action(depot->action_manager);
+}
+
+/* Implements vdo_zone_action_fn. */
+static void scrub_all_unrecovered_slabs(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+
+ scrub_slabs(&depot->allocators[zone_number], NULL);
+ vdo_launch_completion(parent);
+}
+
+/**
+ * vdo_scrub_all_unrecovered_slabs() - Scrub all unrecovered slabs.
+ * @depot: The depot to scrub.
+ * @parent: The object to notify when scrubbing has been launched for all zones.
+ */
+void vdo_scrub_all_unrecovered_slabs(struct slab_depot *depot,
+ struct vdo_completion *parent)
+{
+ vdo_schedule_action(depot->action_manager, NULL,
+ scrub_all_unrecovered_slabs,
+ NULL, parent);
+}
+
+/**
+ * get_block_allocator_statistics() - Get the total of the statistics from all the block allocators
+ * in the depot.
+ * @depot: The slab depot.
+ *
+ * Return: The statistics from all block allocators in the depot.
+ */
+static struct block_allocator_statistics __must_check
+get_block_allocator_statistics(const struct slab_depot *depot)
+{
+ struct block_allocator_statistics totals;
+ zone_count_t zone;
+
+ memset(&totals, 0, sizeof(totals));
+
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ const struct block_allocator *allocator = &depot->allocators[zone];
+ const struct block_allocator_statistics *stats = &allocator->statistics;
+
+ totals.slab_count += allocator->slab_count;
+ totals.slabs_opened += READ_ONCE(stats->slabs_opened);
+ totals.slabs_reopened += READ_ONCE(stats->slabs_reopened);
+ }
+
+ return totals;
+}
+
+/**
+ * get_ref_counts_statistics() - Get the cumulative ref_counts statistics for the depot.
+ * @depot: The slab depot.
+ *
+ * Return: The cumulative statistics for all ref_counts in the depot.
+ */
+static struct ref_counts_statistics __must_check
+get_ref_counts_statistics(const struct slab_depot *depot)
+{
+ struct ref_counts_statistics totals;
+ zone_count_t zone;
+
+ memset(&totals, 0, sizeof(totals));
+
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ totals.blocks_written +=
+ READ_ONCE(depot->allocators[zone].ref_counts_statistics.blocks_written);
+ }
+
+ return totals;
+}
+
+/**
+ * get_slab_journal_statistics() - Get the aggregated slab journal statistics for the depot.
+ * @depot: The slab depot.
+ *
+ * Return: The aggregated statistics for all slab journals in the depot.
+ */
+static struct slab_journal_statistics __must_check
+get_slab_journal_statistics(const struct slab_depot *depot)
+{
+ struct slab_journal_statistics totals;
+ zone_count_t zone;
+
+ memset(&totals, 0, sizeof(totals));
+
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ const struct slab_journal_statistics *stats =
+ &depot->allocators[zone].slab_journal_statistics;
+
+ totals.disk_full_count += READ_ONCE(stats->disk_full_count);
+ totals.flush_count += READ_ONCE(stats->flush_count);
+ totals.blocked_count += READ_ONCE(stats->blocked_count);
+ totals.blocks_written += READ_ONCE(stats->blocks_written);
+ totals.tail_busy_count += READ_ONCE(stats->tail_busy_count);
+ }
+
+ return totals;
+}
+
+/**
+ * vdo_get_slab_depot_statistics() - Get all the vdo_statistics fields that are properties of the
+ * slab depot.
+ * @depot: The slab depot.
+ * @stats: The vdo statistics structure to partially fill.
+ */
+void vdo_get_slab_depot_statistics(const struct slab_depot *depot,
+ struct vdo_statistics *stats)
+{
+ slab_count_t slab_count = READ_ONCE(depot->slab_count);
+ slab_count_t unrecovered = 0;
+ zone_count_t zone;
+
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ /* The allocators are responsible for thread safety. */
+ unrecovered += READ_ONCE(depot->allocators[zone].scrubber.slab_count);
+ }
+
+ stats->recovery_percentage = (slab_count - unrecovered) * 100 / slab_count;
+ stats->allocator = get_block_allocator_statistics(depot);
+ stats->ref_counts = get_ref_counts_statistics(depot);
+ stats->slab_journal = get_slab_journal_statistics(depot);
+ stats->slab_summary = (struct slab_summary_statistics) {
+ .blocks_written = atomic64_read(&depot->summary_statistics.blocks_written),
+ };
+}
+
+/**
+ * vdo_dump_slab_depot() - Dump the slab depot, in a thread-unsafe fashion.
+ * @depot: The slab depot.
+ */
+void vdo_dump_slab_depot(const struct slab_depot *depot)
+{
+ vdo_log_info("vdo slab depot");
+ vdo_log_info(" zone_count=%u old_zone_count=%u slabCount=%u active_release_request=%llu new_release_request=%llu",
+ (unsigned int) depot->zone_count,
+ (unsigned int) depot->old_zone_count, READ_ONCE(depot->slab_count),
+ (unsigned long long) depot->active_release_request,
+ (unsigned long long) depot->new_release_request);
+}
diff --git a/drivers/md/dm-vdo/slab-depot.h b/drivers/md/dm-vdo/slab-depot.h
new file mode 100644
index 000000000000..f234853501ca
--- /dev/null
+++ b/drivers/md/dm-vdo/slab-depot.h
@@ -0,0 +1,601 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_SLAB_DEPOT_H
+#define VDO_SLAB_DEPOT_H
+
+#include <linux/atomic.h>
+#include <linux/dm-kcopyd.h>
+#include <linux/list.h>
+
+#include "numeric.h"
+
+#include "admin-state.h"
+#include "completion.h"
+#include "data-vio.h"
+#include "encodings.h"
+#include "physical-zone.h"
+#include "priority-table.h"
+#include "recovery-journal.h"
+#include "statistics.h"
+#include "types.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+/*
+ * A slab_depot is responsible for managing all of the slabs and block allocators of a VDO. It has
+ * a single array of slabs in order to eliminate the need for additional math in order to compute
+ * which physical zone a PBN is in. It also has a block_allocator per zone.
+ *
+ * Each physical zone has a single dedicated queue and thread for performing all updates to the
+ * slabs assigned to that zone. The concurrency guarantees of this single-threaded model allow the
+ * code to omit more fine-grained locking for the various slab structures. Each physical zone
+ * maintains a separate copy of the slab summary to remove the need for explicit locking on that
+ * structure as well.
+ *
+ * Load operations must be performed on the admin thread. Normal operations, such as allocations
+ * and reference count updates, must be performed on the appropriate physical zone thread. Requests
+ * from the recovery journal to commit slab journal tail blocks must be scheduled from the recovery
+ * journal thread to run on the appropriate physical zone thread. Save operations must be launched
+ * from the same admin thread as the original load operation.
+ */
+
+enum {
+ /* The number of vios in the vio pool is proportional to the throughput of the VDO. */
+ BLOCK_ALLOCATOR_VIO_POOL_SIZE = 128,
+};
+
+/*
+ * Represents the possible status of a block.
+ */
+enum reference_status {
+ RS_FREE, /* this block is free */
+ RS_SINGLE, /* this block is singly-referenced */
+ RS_SHARED, /* this block is shared */
+ RS_PROVISIONAL /* this block is provisionally allocated */
+};
+
+struct vdo_slab;
+
+struct journal_lock {
+ u16 count;
+ sequence_number_t recovery_start;
+};
+
+struct slab_journal {
+ /* A waiter object for getting a VIO pool entry */
+ struct vdo_waiter resource_waiter;
+ /* A waiter object for updating the slab summary */
+ struct vdo_waiter slab_summary_waiter;
+ /* A waiter object for getting a vio with which to flush */
+ struct vdo_waiter flush_waiter;
+ /* The queue of VIOs waiting to make an entry */
+ struct vdo_wait_queue entry_waiters;
+ /* The parent slab reference of this journal */
+ struct vdo_slab *slab;
+
+ /* Whether a tail block commit is pending */
+ bool waiting_to_commit;
+ /* Whether the journal is updating the slab summary */
+ bool updating_slab_summary;
+ /* Whether the journal is adding entries from the entry_waiters queue */
+ bool adding_entries;
+ /* Whether a partial write is in progress */
+ bool partial_write_in_progress;
+
+ /* The oldest block in the journal on disk */
+ sequence_number_t head;
+ /* The oldest block in the journal which may not be reaped */
+ sequence_number_t unreapable;
+ /* The end of the half-open interval of the active journal */
+ sequence_number_t tail;
+ /* The next journal block to be committed */
+ sequence_number_t next_commit;
+ /* The tail sequence number that is written in the slab summary */
+ sequence_number_t summarized;
+ /* The tail sequence number that was last summarized in slab summary */
+ sequence_number_t last_summarized;
+
+ /* The sequence number of the recovery journal lock */
+ sequence_number_t recovery_lock;
+
+ /*
+ * The number of entries which fit in a single block. Can't use the constant because unit
+ * tests change this number.
+ */
+ journal_entry_count_t entries_per_block;
+ /*
+ * The number of full entries which fit in a single block. Can't use the constant because
+ * unit tests change this number.
+ */
+ journal_entry_count_t full_entries_per_block;
+
+ /* The recovery journal of the VDO (slab journal holds locks on it) */
+ struct recovery_journal *recovery_journal;
+
+ /* The statistics shared by all slab journals in our physical zone */
+ struct slab_journal_statistics *events;
+ /* A list of the VIO pool entries for outstanding journal block writes */
+ struct list_head uncommitted_blocks;
+
+ /*
+ * The current tail block header state. This will be packed into the block just before it
+ * is written.
+ */
+ struct slab_journal_block_header tail_header;
+ /* A pointer to a block-sized buffer holding the packed block data */
+ struct packed_slab_journal_block *block;
+
+ /* The number of blocks in the on-disk journal */
+ block_count_t size;
+ /* The number of blocks at which to start pushing reference blocks */
+ block_count_t flushing_threshold;
+ /* The number of blocks at which all reference blocks should be writing */
+ block_count_t flushing_deadline;
+ /* The number of blocks at which to wait for reference blocks to write */
+ block_count_t blocking_threshold;
+ /* The number of blocks at which to scrub the slab before coming online */
+ block_count_t scrubbing_threshold;
+
+ /* This list entry is for block_allocator to keep a queue of dirty journals */
+ struct list_head dirty_entry;
+
+ /* The lock for the oldest unreaped block of the journal */
+ struct journal_lock *reap_lock;
+ /* The locks for each on disk block */
+ struct journal_lock *locks;
+};
+
+/*
+ * Reference_block structure
+ *
+ * Blocks are used as a proxy, permitting saves of partial refcounts.
+ */
+struct reference_block {
+ /* This block waits on the ref_counts to tell it to write */
+ struct vdo_waiter waiter;
+ /* The slab to which this reference_block belongs */
+ struct vdo_slab *slab;
+ /* The number of references in this block that represent allocations */
+ block_size_t allocated_count;
+ /* The slab journal block on which this block must hold a lock */
+ sequence_number_t slab_journal_lock;
+ /* The slab journal block which should be released when this block is committed */
+ sequence_number_t slab_journal_lock_to_release;
+ /* The point up to which each sector is accurate on disk */
+ struct journal_point commit_points[VDO_SECTORS_PER_BLOCK];
+ /* Whether this block has been modified since it was written to disk */
+ bool is_dirty;
+ /* Whether this block is currently writing */
+ bool is_writing;
+};
+
+/* The search_cursor represents the saved position of a free block search. */
+struct search_cursor {
+ /* The reference block containing the current search index */
+ struct reference_block *block;
+ /* The position at which to start searching for the next free counter */
+ slab_block_number index;
+ /* The position just past the last valid counter in the current block */
+ slab_block_number end_index;
+
+ /* A pointer to the first reference block in the slab */
+ struct reference_block *first_block;
+ /* A pointer to the last reference block in the slab */
+ struct reference_block *last_block;
+};
+
+enum slab_rebuild_status {
+ VDO_SLAB_REBUILT,
+ VDO_SLAB_REPLAYING,
+ VDO_SLAB_REQUIRES_SCRUBBING,
+ VDO_SLAB_REQUIRES_HIGH_PRIORITY_SCRUBBING,
+ VDO_SLAB_REBUILDING,
+};
+
+/*
+ * This is the type declaration for the vdo_slab type. A vdo_slab currently consists of a run of
+ * 2^23 data blocks, but that will soon change to dedicate a small number of those blocks for
+ * metadata storage for the reference counts and slab journal for the slab.
+ *
+ * A reference count is maintained for each physical block number. The vast majority of blocks have
+ * a very small reference count (usually 0 or 1). For references less than or equal to MAXIMUM_REFS
+ * (254) the reference count is stored in counters[pbn].
+ */
+struct vdo_slab {
+ /* A list entry to queue this slab in a block_allocator list */
+ struct list_head allocq_entry;
+
+ /* The struct block_allocator that owns this slab */
+ struct block_allocator *allocator;
+
+ /* The journal for this slab */
+ struct slab_journal journal;
+
+ /* The slab number of this slab */
+ slab_count_t slab_number;
+ /* The offset in the allocator partition of the first block in this slab */
+ physical_block_number_t start;
+ /* The offset of the first block past the end of this slab */
+ physical_block_number_t end;
+ /* The starting translated PBN of the slab journal */
+ physical_block_number_t journal_origin;
+ /* The starting translated PBN of the reference counts */
+ physical_block_number_t ref_counts_origin;
+
+ /* The administrative state of the slab */
+ struct admin_state state;
+ /* The status of the slab */
+ enum slab_rebuild_status status;
+ /* Whether the slab was ever queued for scrubbing */
+ bool was_queued_for_scrubbing;
+
+ /* The priority at which this slab has been queued for allocation */
+ u8 priority;
+
+ /* Fields beyond this point are the reference counts for the data blocks in this slab. */
+ /* The size of the counters array */
+ u32 block_count;
+ /* The number of free blocks */
+ u32 free_blocks;
+ /* The array of reference counts */
+ vdo_refcount_t *counters; /* use vdo_allocate() to align data ptr */
+
+ /* The saved block pointer and array indexes for the free block search */
+ struct search_cursor search_cursor;
+
+ /* A list of the dirty blocks waiting to be written out */
+ struct vdo_wait_queue dirty_blocks;
+ /* The number of blocks which are currently writing */
+ size_t active_count;
+
+ /* A waiter object for updating the slab summary */
+ struct vdo_waiter summary_waiter;
+
+ /* The latest slab journal for which there has been a reference count update */
+ struct journal_point slab_journal_point;
+
+ /* The number of reference count blocks */
+ u32 reference_block_count;
+ /* reference count block array */
+ struct reference_block *reference_blocks;
+};
+
+enum block_allocator_drain_step {
+ VDO_DRAIN_ALLOCATOR_START,
+ VDO_DRAIN_ALLOCATOR_STEP_SCRUBBER,
+ VDO_DRAIN_ALLOCATOR_STEP_SLABS,
+ VDO_DRAIN_ALLOCATOR_STEP_SUMMARY,
+ VDO_DRAIN_ALLOCATOR_STEP_FINISHED,
+};
+
+struct slab_scrubber {
+ /* The queue of slabs to scrub first */
+ struct list_head high_priority_slabs;
+ /* The queue of slabs to scrub once there are no high_priority_slabs */
+ struct list_head slabs;
+ /* The queue of VIOs waiting for a slab to be scrubbed */
+ struct vdo_wait_queue waiters;
+
+ /*
+ * The number of slabs that are unrecovered or being scrubbed. This field is modified by
+ * the physical zone thread, but is queried by other threads.
+ */
+ slab_count_t slab_count;
+
+ /* The administrative state of the scrubber */
+ struct admin_state admin_state;
+ /* Whether to only scrub high-priority slabs */
+ bool high_priority_only;
+ /* The slab currently being scrubbed */
+ struct vdo_slab *slab;
+ /* The vio for loading slab journal blocks */
+ struct vio vio;
+};
+
+/* A sub-structure for applying actions in parallel to all an allocator's slabs. */
+struct slab_actor {
+ /* The number of slabs performing a slab action */
+ slab_count_t slab_action_count;
+ /* The method to call when a slab action has been completed by all slabs */
+ vdo_action_fn callback;
+};
+
+/* A slab_iterator is a structure for iterating over a set of slabs. */
+struct slab_iterator {
+ struct vdo_slab **slabs;
+ struct vdo_slab *next;
+ slab_count_t end;
+ slab_count_t stride;
+};
+
+/*
+ * The slab_summary provides hints during load and recovery about the state of the slabs in order
+ * to avoid the need to read the slab journals in their entirety before a VDO can come online.
+ *
+ * The information in the summary for each slab includes the rough number of free blocks (which is
+ * used to prioritize scrubbing), the cleanliness of a slab (so that clean slabs containing free
+ * space will be used on restart), and the location of the tail block of the slab's journal.
+ *
+ * The slab_summary has its own partition at the end of the volume which is sized to allow for a
+ * complete copy of the summary for each of up to 16 physical zones.
+ *
+ * During resize, the slab_summary moves its backing partition and is saved once moved; the
+ * slab_summary is not permitted to overwrite the previous recovery journal space.
+ *
+ * The slab_summary does not have its own version information, but relies on the VDO volume version
+ * number.
+ */
+
+/*
+ * A slab status is a very small structure for use in determining the ordering of slabs in the
+ * scrubbing process.
+ */
+struct slab_status {
+ slab_count_t slab_number;
+ bool is_clean;
+ u8 emptiness;
+};
+
+struct slab_summary_block {
+ /* The block_allocator to which this block belongs */
+ struct block_allocator *allocator;
+ /* The index of this block in its zone's summary */
+ block_count_t index;
+ /* Whether this block has a write outstanding */
+ bool writing;
+ /* Ring of updates waiting on the outstanding write */
+ struct vdo_wait_queue current_update_waiters;
+ /* Ring of updates waiting on the next write */
+ struct vdo_wait_queue next_update_waiters;
+ /* The active slab_summary_entry array for this block */
+ struct slab_summary_entry *entries;
+ /* The vio used to write this block */
+ struct vio vio;
+ /* The packed entries, one block long, backing the vio */
+ char *outgoing_entries;
+};
+
+/*
+ * The statistics for all the slab summary zones owned by this slab summary. These fields are all
+ * mutated only by their physical zone threads, but are read by other threads when gathering
+ * statistics for the entire depot.
+ */
+struct atomic_slab_summary_statistics {
+ /* Number of blocks written */
+ atomic64_t blocks_written;
+};
+
+struct block_allocator {
+ struct vdo_completion completion;
+ /* The slab depot for this allocator */
+ struct slab_depot *depot;
+ /* The nonce of the VDO */
+ nonce_t nonce;
+ /* The physical zone number of this allocator */
+ zone_count_t zone_number;
+ /* The thread ID for this allocator's physical zone */
+ thread_id_t thread_id;
+ /* The number of slabs in this allocator */
+ slab_count_t slab_count;
+ /* The number of the last slab owned by this allocator */
+ slab_count_t last_slab;
+ /* The reduced priority level used to preserve unopened slabs */
+ unsigned int unopened_slab_priority;
+ /* The state of this allocator */
+ struct admin_state state;
+ /* The actor for applying an action to all slabs */
+ struct slab_actor slab_actor;
+
+ /* The slab from which blocks are currently being allocated */
+ struct vdo_slab *open_slab;
+ /* A priority queue containing all slabs available for allocation */
+ struct priority_table *prioritized_slabs;
+ /* The slab scrubber */
+ struct slab_scrubber scrubber;
+ /* What phase of the close operation the allocator is to perform */
+ enum block_allocator_drain_step drain_step;
+
+ /*
+ * These statistics are all mutated only by the physical zone thread, but are read by other
+ * threads when gathering statistics for the entire depot.
+ */
+ /*
+ * The count of allocated blocks in this zone. Not in block_allocator_statistics for
+ * historical reasons.
+ */
+ u64 allocated_blocks;
+ /* Statistics for this block allocator */
+ struct block_allocator_statistics statistics;
+ /* Cumulative statistics for the slab journals in this zone */
+ struct slab_journal_statistics slab_journal_statistics;
+ /* Cumulative statistics for the reference counters in this zone */
+ struct ref_counts_statistics ref_counts_statistics;
+
+ /*
+ * This is the head of a queue of slab journals which have entries in their tail blocks
+ * which have not yet started to commit. When the recovery journal is under space pressure,
+ * slab journals which have uncommitted entries holding a lock on the recovery journal head
+ * are forced to commit their blocks early. This list is kept in order, with the tail
+ * containing the slab journal holding the most recent recovery journal lock.
+ */
+ struct list_head dirty_slab_journals;
+
+ /* The vio pool for reading and writing block allocator metadata */
+ struct vio_pool *vio_pool;
+ /* The dm_kcopyd client for erasing slab journals */
+ struct dm_kcopyd_client *eraser;
+ /* Iterator over the slabs to be erased */
+ struct slab_iterator slabs_to_erase;
+
+ /* The portion of the slab summary managed by this allocator */
+ /* The state of the slab summary */
+ struct admin_state summary_state;
+ /* The number of outstanding summary writes */
+ block_count_t summary_write_count;
+ /* The array (owned by the blocks) of all entries */
+ struct slab_summary_entry *summary_entries;
+ /* The array of slab_summary_blocks */
+ struct slab_summary_block *summary_blocks;
+};
+
+enum slab_depot_load_type {
+ VDO_SLAB_DEPOT_NORMAL_LOAD,
+ VDO_SLAB_DEPOT_RECOVERY_LOAD,
+ VDO_SLAB_DEPOT_REBUILD_LOAD
+};
+
+struct slab_depot {
+ zone_count_t zone_count;
+ zone_count_t old_zone_count;
+ struct vdo *vdo;
+ struct slab_config slab_config;
+ struct action_manager *action_manager;
+
+ physical_block_number_t first_block;
+ physical_block_number_t last_block;
+ physical_block_number_t origin;
+
+ /* slab_size == (1 << slab_size_shift) */
+ unsigned int slab_size_shift;
+
+ /* Determines how slabs should be queued during load */
+ enum slab_depot_load_type load_type;
+
+ /* The state for notifying slab journals to release recovery journal */
+ sequence_number_t active_release_request;
+ sequence_number_t new_release_request;
+
+ /* State variables for scrubbing complete handling */
+ atomic_t zones_to_scrub;
+
+ /* Array of pointers to individually allocated slabs */
+ struct vdo_slab **slabs;
+ /* The number of slabs currently allocated and stored in 'slabs' */
+ slab_count_t slab_count;
+
+ /* Array of pointers to a larger set of slabs (used during resize) */
+ struct vdo_slab **new_slabs;
+ /* The number of slabs currently allocated and stored in 'new_slabs' */
+ slab_count_t new_slab_count;
+ /* The size that 'new_slabs' was allocated for */
+ block_count_t new_size;
+
+ /* The last block before resize, for rollback */
+ physical_block_number_t old_last_block;
+ /* The last block after resize, for resize */
+ physical_block_number_t new_last_block;
+
+ /* The statistics for the slab summary */
+ struct atomic_slab_summary_statistics summary_statistics;
+ /* The start of the slab summary partition */
+ physical_block_number_t summary_origin;
+ /* The number of bits to shift to get a 7-bit fullness hint */
+ unsigned int hint_shift;
+ /* The slab summary entries for all of the zones the partition can hold */
+ struct slab_summary_entry *summary_entries;
+
+ /* The block allocators for this depot */
+ struct block_allocator allocators[];
+};
+
+struct reference_updater;
+
+bool __must_check vdo_attempt_replay_into_slab(struct vdo_slab *slab,
+ physical_block_number_t pbn,
+ enum journal_operation operation,
+ bool increment,
+ struct journal_point *recovery_point,
+ struct vdo_completion *parent);
+
+int __must_check vdo_adjust_reference_count_for_rebuild(struct slab_depot *depot,
+ physical_block_number_t pbn,
+ enum journal_operation operation);
+
+static inline struct block_allocator *vdo_as_block_allocator(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_BLOCK_ALLOCATOR_COMPLETION);
+ return container_of(completion, struct block_allocator, completion);
+}
+
+int __must_check vdo_acquire_provisional_reference(struct vdo_slab *slab,
+ physical_block_number_t pbn,
+ struct pbn_lock *lock);
+
+int __must_check vdo_allocate_block(struct block_allocator *allocator,
+ physical_block_number_t *block_number_ptr);
+
+int vdo_enqueue_clean_slab_waiter(struct block_allocator *allocator,
+ struct vdo_waiter *waiter);
+
+void vdo_modify_reference_count(struct vdo_completion *completion,
+ struct reference_updater *updater);
+
+int __must_check vdo_release_block_reference(struct block_allocator *allocator,
+ physical_block_number_t pbn);
+
+void vdo_notify_slab_journals_are_recovered(struct vdo_completion *completion);
+
+void vdo_dump_block_allocator(const struct block_allocator *allocator);
+
+int __must_check vdo_decode_slab_depot(struct slab_depot_state_2_0 state,
+ struct vdo *vdo,
+ struct partition *summary_partition,
+ struct slab_depot **depot_ptr);
+
+void vdo_free_slab_depot(struct slab_depot *depot);
+
+struct slab_depot_state_2_0 __must_check vdo_record_slab_depot(const struct slab_depot *depot);
+
+int __must_check vdo_allocate_reference_counters(struct slab_depot *depot);
+
+struct vdo_slab * __must_check vdo_get_slab(const struct slab_depot *depot,
+ physical_block_number_t pbn);
+
+u8 __must_check vdo_get_increment_limit(struct slab_depot *depot,
+ physical_block_number_t pbn);
+
+bool __must_check vdo_is_physical_data_block(const struct slab_depot *depot,
+ physical_block_number_t pbn);
+
+block_count_t __must_check vdo_get_slab_depot_allocated_blocks(const struct slab_depot *depot);
+
+block_count_t __must_check vdo_get_slab_depot_data_blocks(const struct slab_depot *depot);
+
+void vdo_get_slab_depot_statistics(const struct slab_depot *depot,
+ struct vdo_statistics *stats);
+
+void vdo_load_slab_depot(struct slab_depot *depot,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent, void *context);
+
+void vdo_prepare_slab_depot_to_allocate(struct slab_depot *depot,
+ enum slab_depot_load_type load_type,
+ struct vdo_completion *parent);
+
+void vdo_update_slab_depot_size(struct slab_depot *depot);
+
+int __must_check vdo_prepare_to_grow_slab_depot(struct slab_depot *depot,
+ const struct partition *partition);
+
+void vdo_use_new_slabs(struct slab_depot *depot, struct vdo_completion *parent);
+
+void vdo_abandon_new_slabs(struct slab_depot *depot);
+
+void vdo_drain_slab_depot(struct slab_depot *depot,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent);
+
+void vdo_resume_slab_depot(struct slab_depot *depot, struct vdo_completion *parent);
+
+void vdo_commit_oldest_slab_journal_tail_blocks(struct slab_depot *depot,
+ sequence_number_t recovery_block_number);
+
+void vdo_scrub_all_unrecovered_slabs(struct slab_depot *depot,
+ struct vdo_completion *parent);
+
+void vdo_dump_slab_depot(const struct slab_depot *depot);
+
+#endif /* VDO_SLAB_DEPOT_H */
diff --git a/drivers/md/dm-vdo/statistics.h b/drivers/md/dm-vdo/statistics.h
new file mode 100644
index 000000000000..c88a75dffba3
--- /dev/null
+++ b/drivers/md/dm-vdo/statistics.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef STATISTICS_H
+#define STATISTICS_H
+
+#include "types.h"
+
+enum {
+ STATISTICS_VERSION = 36,
+};
+
+struct block_allocator_statistics {
+ /* The total number of slabs from which blocks may be allocated */
+ u64 slab_count;
+ /* The total number of slabs from which blocks have ever been allocated */
+ u64 slabs_opened;
+ /* The number of times since loading that a slab has been re-opened */
+ u64 slabs_reopened;
+};
+
+/**
+ * Counters for tracking the number of items written (blocks, requests, etc.)
+ * that keep track of totals at steps in the write pipeline. Three counters
+ * allow the number of buffered, in-memory items and the number of in-flight,
+ * unacknowledged writes to be derived, while still tracking totals for
+ * reporting purposes
+ */
+struct commit_statistics {
+ /* The total number of items on which processing has started */
+ u64 started;
+ /* The total number of items for which a write operation has been issued */
+ u64 written;
+ /* The total number of items for which a write operation has completed */
+ u64 committed;
+};
+
+/** Counters for events in the recovery journal */
+struct recovery_journal_statistics {
+ /* Number of times the on-disk journal was full */
+ u64 disk_full;
+ /* Number of times the recovery journal requested slab journal commits. */
+ u64 slab_journal_commits_requested;
+ /* Write/Commit totals for individual journal entries */
+ struct commit_statistics entries;
+ /* Write/Commit totals for journal blocks */
+ struct commit_statistics blocks;
+};
+
+/** The statistics for the compressed block packer. */
+struct packer_statistics {
+ /* Number of compressed data items written since startup */
+ u64 compressed_fragments_written;
+ /* Number of blocks containing compressed items written since startup */
+ u64 compressed_blocks_written;
+ /* Number of VIOs that are pending in the packer */
+ u64 compressed_fragments_in_packer;
+};
+
+/** The statistics for the slab journals. */
+struct slab_journal_statistics {
+ /* Number of times the on-disk journal was full */
+ u64 disk_full_count;
+ /* Number of times an entry was added over the flush threshold */
+ u64 flush_count;
+ /* Number of times an entry was added over the block threshold */
+ u64 blocked_count;
+ /* Number of times a tail block was written */
+ u64 blocks_written;
+ /* Number of times we had to wait for the tail to write */
+ u64 tail_busy_count;
+};
+
+/** The statistics for the slab summary. */
+struct slab_summary_statistics {
+ /* Number of blocks written */
+ u64 blocks_written;
+};
+
+/** The statistics for the reference counts. */
+struct ref_counts_statistics {
+ /* Number of reference blocks written */
+ u64 blocks_written;
+};
+
+/** The statistics for the block map. */
+struct block_map_statistics {
+ /* number of dirty (resident) pages */
+ u32 dirty_pages;
+ /* number of clean (resident) pages */
+ u32 clean_pages;
+ /* number of free pages */
+ u32 free_pages;
+ /* number of pages in failed state */
+ u32 failed_pages;
+ /* number of pages incoming */
+ u32 incoming_pages;
+ /* number of pages outgoing */
+ u32 outgoing_pages;
+ /* how many times free page not avail */
+ u32 cache_pressure;
+ /* number of get_vdo_page() calls for read */
+ u64 read_count;
+ /* number of get_vdo_page() calls for write */
+ u64 write_count;
+ /* number of times pages failed to read */
+ u64 failed_reads;
+ /* number of times pages failed to write */
+ u64 failed_writes;
+ /* number of gets that are reclaimed */
+ u64 reclaimed;
+ /* number of gets for outgoing pages */
+ u64 read_outgoing;
+ /* number of gets that were already there */
+ u64 found_in_cache;
+ /* number of gets requiring discard */
+ u64 discard_required;
+ /* number of gets enqueued for their page */
+ u64 wait_for_page;
+ /* number of gets that have to fetch */
+ u64 fetch_required;
+ /* number of page fetches */
+ u64 pages_loaded;
+ /* number of page saves */
+ u64 pages_saved;
+ /* the number of flushes issued */
+ u64 flush_count;
+};
+
+/** The dedupe statistics from hash locks */
+struct hash_lock_statistics {
+ /* Number of times the UDS advice proved correct */
+ u64 dedupe_advice_valid;
+ /* Number of times the UDS advice proved incorrect */
+ u64 dedupe_advice_stale;
+ /* Number of writes with the same data as another in-flight write */
+ u64 concurrent_data_matches;
+ /* Number of writes whose hash collided with an in-flight write */
+ u64 concurrent_hash_collisions;
+ /* Current number of dedupe queries that are in flight */
+ u32 curr_dedupe_queries;
+};
+
+/** Counts of error conditions in VDO. */
+struct error_statistics {
+ /* number of times VDO got an invalid dedupe advice PBN from UDS */
+ u64 invalid_advice_pbn_count;
+ /* number of times a VIO completed with a VDO_NO_SPACE error */
+ u64 no_space_error_count;
+ /* number of times a VIO completed with a VDO_READ_ONLY error */
+ u64 read_only_error_count;
+};
+
+struct bio_stats {
+ /* Number of REQ_OP_READ bios */
+ u64 read;
+ /* Number of REQ_OP_WRITE bios with data */
+ u64 write;
+ /* Number of bios tagged with REQ_PREFLUSH and containing no data */
+ u64 empty_flush;
+ /* Number of REQ_OP_DISCARD bios */
+ u64 discard;
+ /* Number of bios tagged with REQ_PREFLUSH */
+ u64 flush;
+ /* Number of bios tagged with REQ_FUA */
+ u64 fua;
+};
+
+struct memory_usage {
+ /* Tracked bytes currently allocated. */
+ u64 bytes_used;
+ /* Maximum tracked bytes allocated. */
+ u64 peak_bytes_used;
+};
+
+/** UDS index statistics */
+struct index_statistics {
+ /* Number of records stored in the index */
+ u64 entries_indexed;
+ /* Number of post calls that found an existing entry */
+ u64 posts_found;
+ /* Number of post calls that added a new entry */
+ u64 posts_not_found;
+ /* Number of query calls that found an existing entry */
+ u64 queries_found;
+ /* Number of query calls that added a new entry */
+ u64 queries_not_found;
+ /* Number of update calls that found an existing entry */
+ u64 updates_found;
+ /* Number of update calls that added a new entry */
+ u64 updates_not_found;
+ /* Number of entries discarded */
+ u64 entries_discarded;
+};
+
+/** The statistics of the vdo service. */
+struct vdo_statistics {
+ u32 version;
+ /* Number of blocks used for data */
+ u64 data_blocks_used;
+ /* Number of blocks used for VDO metadata */
+ u64 overhead_blocks_used;
+ /* Number of logical blocks that are currently mapped to physical blocks */
+ u64 logical_blocks_used;
+ /* number of physical blocks */
+ block_count_t physical_blocks;
+ /* number of logical blocks */
+ block_count_t logical_blocks;
+ /* Size of the block map page cache, in bytes */
+ u64 block_map_cache_size;
+ /* The physical block size */
+ u64 block_size;
+ /* Number of times the VDO has successfully recovered */
+ u64 complete_recoveries;
+ /* Number of times the VDO has recovered from read-only mode */
+ u64 read_only_recoveries;
+ /* String describing the operating mode of the VDO */
+ char mode[15];
+ /* Whether the VDO is in recovery mode */
+ bool in_recovery_mode;
+ /* What percentage of recovery mode work has been completed */
+ u8 recovery_percentage;
+ /* The statistics for the compressed block packer */
+ struct packer_statistics packer;
+ /* Counters for events in the block allocator */
+ struct block_allocator_statistics allocator;
+ /* Counters for events in the recovery journal */
+ struct recovery_journal_statistics journal;
+ /* The statistics for the slab journals */
+ struct slab_journal_statistics slab_journal;
+ /* The statistics for the slab summary */
+ struct slab_summary_statistics slab_summary;
+ /* The statistics for the reference counts */
+ struct ref_counts_statistics ref_counts;
+ /* The statistics for the block map */
+ struct block_map_statistics block_map;
+ /* The dedupe statistics from hash locks */
+ struct hash_lock_statistics hash_lock;
+ /* Counts of error conditions */
+ struct error_statistics errors;
+ /* The VDO instance */
+ u32 instance;
+ /* Current number of active VIOs */
+ u32 current_vios_in_progress;
+ /* Maximum number of active VIOs */
+ u32 max_vios;
+ /* Number of times the UDS index was too slow in responding */
+ u64 dedupe_advice_timeouts;
+ /* Number of flush requests submitted to the storage device */
+ u64 flush_out;
+ /* Logical block size */
+ u64 logical_block_size;
+ /* Bios submitted into VDO from above */
+ struct bio_stats bios_in;
+ struct bio_stats bios_in_partial;
+ /* Bios submitted onward for user data */
+ struct bio_stats bios_out;
+ /* Bios submitted onward for metadata */
+ struct bio_stats bios_meta;
+ struct bio_stats bios_journal;
+ struct bio_stats bios_page_cache;
+ struct bio_stats bios_out_completed;
+ struct bio_stats bios_meta_completed;
+ struct bio_stats bios_journal_completed;
+ struct bio_stats bios_page_cache_completed;
+ struct bio_stats bios_acknowledged;
+ struct bio_stats bios_acknowledged_partial;
+ /* Current number of bios in progress */
+ struct bio_stats bios_in_progress;
+ /* Memory usage stats. */
+ struct memory_usage memory_usage;
+ /* The statistics for the UDS index */
+ struct index_statistics index;
+};
+
+#endif /* not STATISTICS_H */
diff --git a/drivers/md/dm-vdo/status-codes.c b/drivers/md/dm-vdo/status-codes.c
new file mode 100644
index 000000000000..d3493450b169
--- /dev/null
+++ b/drivers/md/dm-vdo/status-codes.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "status-codes.h"
+
+#include "errors.h"
+#include "logger.h"
+#include "permassert.h"
+#include "thread-utils.h"
+
+const struct error_info vdo_status_list[] = {
+ { "VDO_NOT_IMPLEMENTED", "Not implemented" },
+ { "VDO_OUT_OF_RANGE", "Out of range" },
+ { "VDO_REF_COUNT_INVALID", "Reference count would become invalid" },
+ { "VDO_NO_SPACE", "Out of space" },
+ { "VDO_BAD_CONFIGURATION", "Bad configuration option" },
+ { "VDO_COMPONENT_BUSY", "Prior operation still in progress" },
+ { "VDO_BAD_PAGE", "Corrupt or incorrect page" },
+ { "VDO_UNSUPPORTED_VERSION", "Unsupported component version" },
+ { "VDO_INCORRECT_COMPONENT", "Component id mismatch in decoder" },
+ { "VDO_PARAMETER_MISMATCH", "Parameters have conflicting values" },
+ { "VDO_UNKNOWN_PARTITION", "No partition exists with a given id" },
+ { "VDO_PARTITION_EXISTS", "A partition already exists with a given id" },
+ { "VDO_INCREMENT_TOO_SMALL", "Physical block growth of too few blocks" },
+ { "VDO_CHECKSUM_MISMATCH", "Incorrect checksum" },
+ { "VDO_LOCK_ERROR", "A lock is held incorrectly" },
+ { "VDO_READ_ONLY", "The device is in read-only mode" },
+ { "VDO_SHUTTING_DOWN", "The device is shutting down" },
+ { "VDO_CORRUPT_JOURNAL", "Recovery journal entries corrupted" },
+ { "VDO_TOO_MANY_SLABS", "Exceeds maximum number of slabs supported" },
+ { "VDO_INVALID_FRAGMENT", "Compressed block fragment is invalid" },
+ { "VDO_RETRY_AFTER_REBUILD", "Retry operation after rebuilding finishes" },
+ { "VDO_BAD_MAPPING", "Invalid page mapping" },
+ { "VDO_BIO_CREATION_FAILED", "Bio creation failed" },
+ { "VDO_BAD_MAGIC", "Bad magic number" },
+ { "VDO_BAD_NONCE", "Bad nonce" },
+ { "VDO_JOURNAL_OVERFLOW", "Journal sequence number overflow" },
+ { "VDO_INVALID_ADMIN_STATE", "Invalid operation for current state" },
+};
+
+/**
+ * vdo_register_status_codes() - Register the VDO status codes.
+ * Return: A success or error code.
+ */
+int vdo_register_status_codes(void)
+{
+ int result;
+
+ BUILD_BUG_ON((VDO_STATUS_CODE_LAST - VDO_STATUS_CODE_BASE) !=
+ ARRAY_SIZE(vdo_status_list));
+
+ result = uds_register_error_block("VDO Status", VDO_STATUS_CODE_BASE,
+ VDO_STATUS_CODE_BLOCK_END, vdo_status_list,
+ sizeof(vdo_status_list));
+ return (result == UDS_SUCCESS) ? VDO_SUCCESS : result;
+}
+
+/**
+ * vdo_status_to_errno() - Given an error code, return a value we can return to the OS.
+ * @error: The error code to convert.
+ *
+ * The input error code may be a system-generated value (such as -EIO), an errno macro used in our
+ * code (such as EIO), or a UDS or VDO status code; the result must be something the rest of the OS
+ * can consume (negative errno values such as -EIO, in the case of the kernel).
+ *
+ * Return: A system error code value.
+ */
+int vdo_status_to_errno(int error)
+{
+ char error_name[VDO_MAX_ERROR_NAME_SIZE];
+ char error_message[VDO_MAX_ERROR_MESSAGE_SIZE];
+
+ /* 0 is success, negative a system error code */
+ if (likely(error <= 0))
+ return error;
+ if (error < 1024)
+ return -error;
+
+ /* VDO or UDS error */
+ switch (error) {
+ case VDO_NO_SPACE:
+ return -ENOSPC;
+ case VDO_READ_ONLY:
+ return -EIO;
+ default:
+ vdo_log_info("%s: mapping internal status code %d (%s: %s) to EIO",
+ __func__, error,
+ uds_string_error_name(error, error_name, sizeof(error_name)),
+ uds_string_error(error, error_message, sizeof(error_message)));
+ return -EIO;
+ }
+}
diff --git a/drivers/md/dm-vdo/status-codes.h b/drivers/md/dm-vdo/status-codes.h
new file mode 100644
index 000000000000..72da04159f88
--- /dev/null
+++ b/drivers/md/dm-vdo/status-codes.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_STATUS_CODES_H
+#define VDO_STATUS_CODES_H
+
+#include "errors.h"
+
+enum {
+ UDS_ERRORS_BLOCK_SIZE = UDS_ERROR_CODE_BLOCK_END - UDS_ERROR_CODE_BASE,
+ VDO_ERRORS_BLOCK_START = UDS_ERROR_CODE_BLOCK_END,
+ VDO_ERRORS_BLOCK_END = VDO_ERRORS_BLOCK_START + UDS_ERRORS_BLOCK_SIZE,
+};
+
+/* VDO-specific status codes. */
+enum vdo_status_codes {
+ /* base of all VDO errors */
+ VDO_STATUS_CODE_BASE = VDO_ERRORS_BLOCK_START,
+ /* we haven't written this yet */
+ VDO_NOT_IMPLEMENTED = VDO_STATUS_CODE_BASE,
+ /* input out of range */
+ VDO_OUT_OF_RANGE,
+ /* an invalid reference count would result */
+ VDO_REF_COUNT_INVALID,
+ /* a free block could not be allocated */
+ VDO_NO_SPACE,
+ /* improper or missing configuration option */
+ VDO_BAD_CONFIGURATION,
+ /* prior operation still in progress */
+ VDO_COMPONENT_BUSY,
+ /* page contents incorrect or corrupt data */
+ VDO_BAD_PAGE,
+ /* unsupported version of some component */
+ VDO_UNSUPPORTED_VERSION,
+ /* component id mismatch in decoder */
+ VDO_INCORRECT_COMPONENT,
+ /* parameters have conflicting values */
+ VDO_PARAMETER_MISMATCH,
+ /* no partition exists with a given id */
+ VDO_UNKNOWN_PARTITION,
+ /* a partition already exists with a given id */
+ VDO_PARTITION_EXISTS,
+ /* physical block growth of too few blocks */
+ VDO_INCREMENT_TOO_SMALL,
+ /* incorrect checksum */
+ VDO_CHECKSUM_MISMATCH,
+ /* a lock is held incorrectly */
+ VDO_LOCK_ERROR,
+ /* the VDO is in read-only mode */
+ VDO_READ_ONLY,
+ /* the VDO is shutting down */
+ VDO_SHUTTING_DOWN,
+ /* the recovery journal has corrupt entries */
+ VDO_CORRUPT_JOURNAL,
+ /* exceeds maximum number of slabs supported */
+ VDO_TOO_MANY_SLABS,
+ /* a compressed block fragment is invalid */
+ VDO_INVALID_FRAGMENT,
+ /* action is unsupported while rebuilding */
+ VDO_RETRY_AFTER_REBUILD,
+ /* a block map entry is invalid */
+ VDO_BAD_MAPPING,
+ /* bio_add_page failed */
+ VDO_BIO_CREATION_FAILED,
+ /* bad magic number */
+ VDO_BAD_MAGIC,
+ /* bad nonce */
+ VDO_BAD_NONCE,
+ /* sequence number overflow */
+ VDO_JOURNAL_OVERFLOW,
+ /* the VDO is not in a state to perform an admin operation */
+ VDO_INVALID_ADMIN_STATE,
+ /* one more than last error code */
+ VDO_STATUS_CODE_LAST,
+ VDO_STATUS_CODE_BLOCK_END = VDO_ERRORS_BLOCK_END
+};
+
+extern const struct error_info vdo_status_list[];
+
+int vdo_register_status_codes(void);
+
+int vdo_status_to_errno(int error);
+
+#endif /* VDO_STATUS_CODES_H */
diff --git a/drivers/md/dm-vdo/string-utils.c b/drivers/md/dm-vdo/string-utils.c
new file mode 100644
index 000000000000..71e44b4683ea
--- /dev/null
+++ b/drivers/md/dm-vdo/string-utils.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "string-utils.h"
+
+char *vdo_append_to_buffer(char *buffer, char *buf_end, const char *fmt, ...)
+{
+ va_list args;
+ size_t n;
+
+ va_start(args, fmt);
+ n = vsnprintf(buffer, buf_end - buffer, fmt, args);
+ if (n >= (size_t) (buf_end - buffer))
+ buffer = buf_end;
+ else
+ buffer += n;
+ va_end(args);
+
+ return buffer;
+}
diff --git a/drivers/md/dm-vdo/string-utils.h b/drivers/md/dm-vdo/string-utils.h
new file mode 100644
index 000000000000..96eecd38b1c2
--- /dev/null
+++ b/drivers/md/dm-vdo/string-utils.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_STRING_UTILS_H
+#define VDO_STRING_UTILS_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+/* Utilities related to string manipulation */
+
+static inline const char *vdo_bool_to_string(bool value)
+{
+ return value ? "true" : "false";
+}
+
+/* Append a formatted string to the end of a buffer. */
+char *vdo_append_to_buffer(char *buffer, char *buf_end, const char *fmt, ...)
+ __printf(3, 4);
+
+#endif /* VDO_STRING_UTILS_H */
diff --git a/drivers/md/dm-vdo/thread-device.c b/drivers/md/dm-vdo/thread-device.c
new file mode 100644
index 000000000000..df13ca914db8
--- /dev/null
+++ b/drivers/md/dm-vdo/thread-device.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "thread-device.h"
+
+/* A registry of threads associated with device id numbers. */
+static struct thread_registry device_id_thread_registry;
+
+/* Any registered thread must be unregistered. */
+void vdo_register_thread_device_id(struct registered_thread *new_thread,
+ unsigned int *id_ptr)
+{
+ vdo_register_thread(&device_id_thread_registry, new_thread, id_ptr);
+}
+
+void vdo_unregister_thread_device_id(void)
+{
+ vdo_unregister_thread(&device_id_thread_registry);
+}
+
+int vdo_get_thread_device_id(void)
+{
+ const unsigned int *pointer;
+
+ pointer = vdo_lookup_thread(&device_id_thread_registry);
+ return (pointer != NULL) ? *pointer : -1;
+}
+
+void vdo_initialize_thread_device_registry(void)
+{
+ vdo_initialize_thread_registry(&device_id_thread_registry);
+}
diff --git a/drivers/md/dm-vdo/thread-device.h b/drivers/md/dm-vdo/thread-device.h
new file mode 100644
index 000000000000..494d9c9ef3f6
--- /dev/null
+++ b/drivers/md/dm-vdo/thread-device.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_THREAD_DEVICE_H
+#define VDO_THREAD_DEVICE_H
+
+#include "thread-registry.h"
+
+void vdo_register_thread_device_id(struct registered_thread *new_thread,
+ unsigned int *id_ptr);
+
+void vdo_unregister_thread_device_id(void);
+
+int vdo_get_thread_device_id(void);
+
+void vdo_initialize_thread_device_registry(void);
+
+#endif /* VDO_THREAD_DEVICE_H */
diff --git a/drivers/md/dm-vdo/thread-registry.c b/drivers/md/dm-vdo/thread-registry.c
new file mode 100644
index 000000000000..d4a077d58c60
--- /dev/null
+++ b/drivers/md/dm-vdo/thread-registry.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "thread-registry.h"
+
+#include <asm/current.h>
+#include <linux/rculist.h>
+
+#include "permassert.h"
+
+/*
+ * We need to be careful when using other facilities that may use thread registry functions in
+ * their normal operation. For example, we do not want to invoke the logger while holding a lock.
+ */
+
+void vdo_initialize_thread_registry(struct thread_registry *registry)
+{
+ INIT_LIST_HEAD(&registry->links);
+ spin_lock_init(&registry->lock);
+}
+
+/* Register the current thread and associate it with a data pointer. */
+void vdo_register_thread(struct thread_registry *registry,
+ struct registered_thread *new_thread, const void *pointer)
+{
+ struct registered_thread *thread;
+ bool found_it = false;
+
+ INIT_LIST_HEAD(&new_thread->links);
+ new_thread->pointer = pointer;
+ new_thread->task = current;
+
+ spin_lock(&registry->lock);
+ list_for_each_entry(thread, &registry->links, links) {
+ if (thread->task == current) {
+ /* There should be no existing entry. */
+ list_del_rcu(&thread->links);
+ found_it = true;
+ break;
+ }
+ }
+ list_add_tail_rcu(&new_thread->links, &registry->links);
+ spin_unlock(&registry->lock);
+
+ VDO_ASSERT_LOG_ONLY(!found_it, "new thread not already in registry");
+ if (found_it) {
+ /* Ensure no RCU iterators see it before re-initializing. */
+ synchronize_rcu();
+ INIT_LIST_HEAD(&thread->links);
+ }
+}
+
+void vdo_unregister_thread(struct thread_registry *registry)
+{
+ struct registered_thread *thread;
+ bool found_it = false;
+
+ spin_lock(&registry->lock);
+ list_for_each_entry(thread, &registry->links, links) {
+ if (thread->task == current) {
+ list_del_rcu(&thread->links);
+ found_it = true;
+ break;
+ }
+ }
+ spin_unlock(&registry->lock);
+
+ VDO_ASSERT_LOG_ONLY(found_it, "thread found in registry");
+ if (found_it) {
+ /* Ensure no RCU iterators see it before re-initializing. */
+ synchronize_rcu();
+ INIT_LIST_HEAD(&thread->links);
+ }
+}
+
+const void *vdo_lookup_thread(struct thread_registry *registry)
+{
+ struct registered_thread *thread;
+ const void *result = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(thread, &registry->links, links) {
+ if (thread->task == current) {
+ result = thread->pointer;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return result;
+}
diff --git a/drivers/md/dm-vdo/thread-registry.h b/drivers/md/dm-vdo/thread-registry.h
new file mode 100644
index 000000000000..cc6d78312b9e
--- /dev/null
+++ b/drivers/md/dm-vdo/thread-registry.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_THREAD_REGISTRY_H
+#define VDO_THREAD_REGISTRY_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+struct thread_registry {
+ struct list_head links;
+ spinlock_t lock;
+};
+
+struct registered_thread {
+ struct list_head links;
+ const void *pointer;
+ struct task_struct *task;
+};
+
+void vdo_initialize_thread_registry(struct thread_registry *registry);
+
+void vdo_register_thread(struct thread_registry *registry,
+ struct registered_thread *new_thread, const void *pointer);
+
+void vdo_unregister_thread(struct thread_registry *registry);
+
+const void *vdo_lookup_thread(struct thread_registry *registry);
+
+#endif /* VDO_THREAD_REGISTRY_H */
diff --git a/drivers/md/dm-vdo/thread-utils.c b/drivers/md/dm-vdo/thread-utils.c
new file mode 100644
index 000000000000..ec08478dd013
--- /dev/null
+++ b/drivers/md/dm-vdo/thread-utils.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "thread-utils.h"
+
+#include <asm/current.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+
+static struct hlist_head thread_list;
+static struct mutex thread_mutex;
+
+struct thread {
+ void (*thread_function)(void *thread_data);
+ void *thread_data;
+ struct hlist_node thread_links;
+ struct task_struct *thread_task;
+ struct completion thread_done;
+};
+
+void vdo_initialize_threads_mutex(void)
+{
+ mutex_init(&thread_mutex);
+}
+
+static int thread_starter(void *arg)
+{
+ struct registered_thread allocating_thread;
+ struct thread *thread = arg;
+
+ thread->thread_task = current;
+ mutex_lock(&thread_mutex);
+ hlist_add_head(&thread->thread_links, &thread_list);
+ mutex_unlock(&thread_mutex);
+ vdo_register_allocating_thread(&allocating_thread, NULL);
+ thread->thread_function(thread->thread_data);
+ vdo_unregister_allocating_thread();
+ complete(&thread->thread_done);
+ return 0;
+}
+
+int vdo_create_thread(void (*thread_function)(void *), void *thread_data,
+ const char *name, struct thread **new_thread)
+{
+ char *name_colon = strchr(name, ':');
+ char *my_name_colon = strchr(current->comm, ':');
+ struct task_struct *task;
+ struct thread *thread;
+ int result;
+
+ result = vdo_allocate(1, struct thread, __func__, &thread);
+ if (result != VDO_SUCCESS) {
+ vdo_log_warning("Error allocating memory for %s", name);
+ return result;
+ }
+
+ thread->thread_function = thread_function;
+ thread->thread_data = thread_data;
+ init_completion(&thread->thread_done);
+ /*
+ * Start the thread, with an appropriate thread name.
+ *
+ * If the name supplied contains a colon character, use that name. This causes uds module
+ * threads to have names like "uds:callbackW" and the main test runner thread to be named
+ * "zub:runtest".
+ *
+ * Otherwise if the current thread has a name containing a colon character, prefix the name
+ * supplied with the name of the current thread up to (and including) the colon character.
+ * Thus when the "kvdo0:dedupeQ" thread opens an index session, all the threads associated
+ * with that index will have names like "kvdo0:foo".
+ *
+ * Otherwise just use the name supplied. This should be a rare occurrence.
+ */
+ if ((name_colon == NULL) && (my_name_colon != NULL)) {
+ task = kthread_run(thread_starter, thread, "%.*s:%s",
+ (int) (my_name_colon - current->comm), current->comm,
+ name);
+ } else {
+ task = kthread_run(thread_starter, thread, "%s", name);
+ }
+
+ if (IS_ERR(task)) {
+ vdo_free(thread);
+ return PTR_ERR(task);
+ }
+
+ *new_thread = thread;
+ return VDO_SUCCESS;
+}
+
+void vdo_join_threads(struct thread *thread)
+{
+ while (wait_for_completion_interruptible(&thread->thread_done))
+ fsleep(1000);
+
+ mutex_lock(&thread_mutex);
+ hlist_del(&thread->thread_links);
+ mutex_unlock(&thread_mutex);
+ vdo_free(thread);
+}
diff --git a/drivers/md/dm-vdo/thread-utils.h b/drivers/md/dm-vdo/thread-utils.h
new file mode 100644
index 000000000000..687ab43e2cee
--- /dev/null
+++ b/drivers/md/dm-vdo/thread-utils.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef THREAD_UTILS_H
+#define THREAD_UTILS_H
+
+#include <linux/atomic.h>
+
+/* Thread and synchronization utilities */
+
+struct thread;
+
+void vdo_initialize_threads_mutex(void);
+int __must_check vdo_create_thread(void (*thread_function)(void *), void *thread_data,
+ const char *name, struct thread **new_thread);
+void vdo_join_threads(struct thread *thread);
+
+#endif /* UDS_THREADS_H */
diff --git a/drivers/md/dm-vdo/time-utils.h b/drivers/md/dm-vdo/time-utils.h
new file mode 100644
index 000000000000..5f1e850fd826
--- /dev/null
+++ b/drivers/md/dm-vdo/time-utils.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_TIME_UTILS_H
+#define UDS_TIME_UTILS_H
+
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+static inline s64 ktime_to_seconds(ktime_t reltime)
+{
+ return reltime / NSEC_PER_SEC;
+}
+
+static inline ktime_t current_time_ns(clockid_t clock)
+{
+ return clock == CLOCK_MONOTONIC ? ktime_get_ns() : ktime_get_real_ns();
+}
+
+static inline ktime_t current_time_us(void)
+{
+ return current_time_ns(CLOCK_REALTIME) / NSEC_PER_USEC;
+}
+
+#endif /* UDS_TIME_UTILS_H */
diff --git a/drivers/md/dm-vdo/types.h b/drivers/md/dm-vdo/types.h
new file mode 100644
index 000000000000..dbe892b10f26
--- /dev/null
+++ b/drivers/md/dm-vdo/types.h
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_TYPES_H
+#define VDO_TYPES_H
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/device-mapper.h>
+#include <linux/list.h>
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+#include "funnel-queue.h"
+
+/* A size type in blocks. */
+typedef u64 block_count_t;
+
+/* The size of a block. */
+typedef u16 block_size_t;
+
+/* A counter for data_vios */
+typedef u16 data_vio_count_t;
+
+/* A height within a tree. */
+typedef u8 height_t;
+
+/* The logical block number as used by the consumer. */
+typedef u64 logical_block_number_t;
+
+/* The type of the nonce used to identify instances of VDO. */
+typedef u64 nonce_t;
+
+/* A size in pages. */
+typedef u32 page_count_t;
+
+/* A page number. */
+typedef u32 page_number_t;
+
+/*
+ * The physical (well, less logical) block number at which the block is found on the underlying
+ * device.
+ */
+typedef u64 physical_block_number_t;
+
+/* A count of tree roots. */
+typedef u8 root_count_t;
+
+/* A number of sectors. */
+typedef u8 sector_count_t;
+
+/* A sequence number. */
+typedef u64 sequence_number_t;
+
+/* The offset of a block within a slab. */
+typedef u32 slab_block_number;
+
+/* A size type in slabs. */
+typedef u16 slab_count_t;
+
+/* A slot in a bin or block map page. */
+typedef u16 slot_number_t;
+
+/* typedef thread_count_t - A thread counter. */
+typedef u8 thread_count_t;
+
+/* typedef thread_id_t - A thread ID, vdo threads are numbered sequentially from 0. */
+typedef u8 thread_id_t;
+
+/* A zone counter */
+typedef u8 zone_count_t;
+
+/* The following enums are persisted on storage, so the values must be preserved. */
+
+/* The current operating mode of the VDO. */
+enum vdo_state {
+ VDO_DIRTY = 0,
+ VDO_NEW = 1,
+ VDO_CLEAN = 2,
+ VDO_READ_ONLY_MODE = 3,
+ VDO_FORCE_REBUILD = 4,
+ VDO_RECOVERING = 5,
+ VDO_REPLAYING = 6, /* VDO_REPLAYING is never set anymore, but retained for upgrade */
+ VDO_REBUILD_FOR_UPGRADE = 7,
+
+ /* Keep VDO_STATE_COUNT at the bottom. */
+ VDO_STATE_COUNT
+};
+
+/**
+ * vdo_state_requires_read_only_rebuild() - Check whether a vdo_state indicates
+ * that a read-only rebuild is required.
+ * @state: The vdo_state to check.
+ *
+ * Return: true if the state indicates a rebuild is required
+ */
+static inline bool __must_check vdo_state_requires_read_only_rebuild(enum vdo_state state)
+{
+ return ((state == VDO_FORCE_REBUILD) || (state == VDO_REBUILD_FOR_UPGRADE));
+}
+
+/**
+ * vdo_state_requires_recovery() - Check whether a vdo state indicates that recovery is needed.
+ * @state: The state to check.
+ *
+ * Return: true if the state indicates a recovery is required
+ */
+static inline bool __must_check vdo_state_requires_recovery(enum vdo_state state)
+{
+ return ((state == VDO_DIRTY) || (state == VDO_REPLAYING) || (state == VDO_RECOVERING));
+}
+
+/*
+ * The current operation on a physical block (from the point of view of the recovery journal, slab
+ * journals, and reference counts.
+ */
+enum journal_operation {
+ VDO_JOURNAL_DATA_REMAPPING = 0,
+ VDO_JOURNAL_BLOCK_MAP_REMAPPING = 1,
+} __packed;
+
+/* Partition IDs encoded in the volume layout in the super block. */
+enum partition_id {
+ VDO_BLOCK_MAP_PARTITION = 0,
+ VDO_SLAB_DEPOT_PARTITION = 1,
+ VDO_RECOVERY_JOURNAL_PARTITION = 2,
+ VDO_SLAB_SUMMARY_PARTITION = 3,
+} __packed;
+
+/* Metadata types for the vdo. */
+enum vdo_metadata_type {
+ VDO_METADATA_RECOVERY_JOURNAL = 1,
+ VDO_METADATA_SLAB_JOURNAL = 2,
+ VDO_METADATA_RECOVERY_JOURNAL_2 = 3,
+} __packed;
+
+/* A position in the block map where a block map entry is stored. */
+struct block_map_slot {
+ physical_block_number_t pbn;
+ slot_number_t slot;
+};
+
+/*
+ * Four bits of each five-byte block map entry contain a mapping state value used to distinguish
+ * unmapped or discarded logical blocks (which are treated as mapped to the zero block) from entries
+ * that have been mapped to a physical block, including the zero block.
+ *
+ * FIXME: these should maybe be defines.
+ */
+enum block_mapping_state {
+ VDO_MAPPING_STATE_UNMAPPED = 0, /* Must be zero to be the default value */
+ VDO_MAPPING_STATE_UNCOMPRESSED = 1, /* A normal (uncompressed) block */
+ VDO_MAPPING_STATE_COMPRESSED_BASE = 2, /* Compressed in slot 0 */
+ VDO_MAPPING_STATE_COMPRESSED_MAX = 15, /* Compressed in slot 13 */
+};
+
+enum {
+ VDO_MAX_COMPRESSION_SLOTS =
+ (VDO_MAPPING_STATE_COMPRESSED_MAX - VDO_MAPPING_STATE_COMPRESSED_BASE + 1),
+};
+
+
+struct data_location {
+ physical_block_number_t pbn;
+ enum block_mapping_state state;
+};
+
+/* The configuration of a single slab derived from the configured block size and slab size. */
+struct slab_config {
+ /* total number of blocks in the slab */
+ block_count_t slab_blocks;
+ /* number of blocks available for data */
+ block_count_t data_blocks;
+ /* number of blocks for reference counts */
+ block_count_t reference_count_blocks;
+ /* number of blocks for the slab journal */
+ block_count_t slab_journal_blocks;
+ /*
+ * Number of blocks after which the slab journal starts pushing out a reference_block for
+ * each new entry it receives.
+ */
+ block_count_t slab_journal_flushing_threshold;
+ /*
+ * Number of blocks after which the slab journal pushes out all reference_blocks and makes
+ * all vios wait.
+ */
+ block_count_t slab_journal_blocking_threshold;
+ /* Number of blocks after which the slab must be scrubbed before coming online. */
+ block_count_t slab_journal_scrubbing_threshold;
+} __packed;
+
+/*
+ * This structure is memcmp'd for equality. Keep it packed and don't add any fields that are not
+ * properly set in both extant and parsed configs.
+ */
+struct thread_count_config {
+ unsigned int bio_ack_threads;
+ unsigned int bio_threads;
+ unsigned int bio_rotation_interval;
+ unsigned int cpu_threads;
+ unsigned int logical_zones;
+ unsigned int physical_zones;
+ unsigned int hash_zones;
+} __packed;
+
+struct device_config {
+ struct dm_target *owning_target;
+ struct dm_dev *owned_device;
+ struct vdo *vdo;
+ /* All configs referencing a layer are kept on a list in the layer */
+ struct list_head config_list;
+ char *original_string;
+ unsigned int version;
+ char *parent_device_name;
+ block_count_t physical_blocks;
+ /*
+ * This is the number of logical blocks from VDO's internal point of view. It is the number
+ * of 4K blocks regardless of the value of the logical_block_size parameter below.
+ */
+ block_count_t logical_blocks;
+ unsigned int logical_block_size;
+ unsigned int cache_size;
+ unsigned int block_map_maximum_age;
+ bool deduplication;
+ bool compression;
+ struct thread_count_config thread_counts;
+ block_count_t max_discard_blocks;
+};
+
+enum vdo_completion_type {
+ /* Keep VDO_UNSET_COMPLETION_TYPE at the top. */
+ VDO_UNSET_COMPLETION_TYPE,
+ VDO_ACTION_COMPLETION,
+ VDO_ADMIN_COMPLETION,
+ VDO_BLOCK_ALLOCATOR_COMPLETION,
+ VDO_DATA_VIO_POOL_COMPLETION,
+ VDO_DECREMENT_COMPLETION,
+ VDO_FLUSH_COMPLETION,
+ VDO_FLUSH_NOTIFICATION_COMPLETION,
+ VDO_GENERATION_FLUSHED_COMPLETION,
+ VDO_HASH_ZONE_COMPLETION,
+ VDO_HASH_ZONES_COMPLETION,
+ VDO_LOCK_COUNTER_COMPLETION,
+ VDO_PAGE_COMPLETION,
+ VDO_READ_ONLY_MODE_COMPLETION,
+ VDO_REPAIR_COMPLETION,
+ VDO_SYNC_COMPLETION,
+ VIO_COMPLETION,
+} __packed;
+
+struct vdo_completion;
+
+/**
+ * typedef vdo_action_fn - An asynchronous VDO operation.
+ * @completion: The completion of the operation.
+ */
+typedef void (*vdo_action_fn)(struct vdo_completion *completion);
+
+enum vdo_completion_priority {
+ BIO_ACK_Q_ACK_PRIORITY = 0,
+ BIO_ACK_Q_MAX_PRIORITY = 0,
+ BIO_Q_COMPRESSED_DATA_PRIORITY = 0,
+ BIO_Q_DATA_PRIORITY = 0,
+ BIO_Q_FLUSH_PRIORITY = 2,
+ BIO_Q_HIGH_PRIORITY = 2,
+ BIO_Q_METADATA_PRIORITY = 1,
+ BIO_Q_VERIFY_PRIORITY = 1,
+ BIO_Q_MAX_PRIORITY = 2,
+ CPU_Q_COMPLETE_VIO_PRIORITY = 0,
+ CPU_Q_COMPLETE_READ_PRIORITY = 0,
+ CPU_Q_COMPRESS_BLOCK_PRIORITY = 0,
+ CPU_Q_EVENT_REPORTER_PRIORITY = 0,
+ CPU_Q_HASH_BLOCK_PRIORITY = 0,
+ CPU_Q_MAX_PRIORITY = 0,
+ UDS_Q_PRIORITY = 0,
+ UDS_Q_MAX_PRIORITY = 0,
+ VDO_DEFAULT_Q_COMPLETION_PRIORITY = 1,
+ VDO_DEFAULT_Q_FLUSH_PRIORITY = 2,
+ VDO_DEFAULT_Q_MAP_BIO_PRIORITY = 0,
+ VDO_DEFAULT_Q_SYNC_PRIORITY = 2,
+ VDO_DEFAULT_Q_VIO_CALLBACK_PRIORITY = 1,
+ VDO_DEFAULT_Q_MAX_PRIORITY = 2,
+ /* The maximum allowable priority */
+ VDO_WORK_Q_MAX_PRIORITY = 2,
+ /* A value which must be out of range for a valid priority */
+ VDO_WORK_Q_DEFAULT_PRIORITY = VDO_WORK_Q_MAX_PRIORITY + 1,
+};
+
+struct vdo_completion {
+ /* The type of completion this is */
+ enum vdo_completion_type type;
+
+ /*
+ * <code>true</code> once the processing of the operation is complete. This flag should not
+ * be used by waiters external to the VDO base as it is used to gate calling the callback.
+ */
+ bool complete;
+
+ /*
+ * If true, queue this completion on the next callback invocation, even if it is already
+ * running on the correct thread.
+ */
+ bool requeue;
+
+ /* The ID of the thread which should run the next callback */
+ thread_id_t callback_thread_id;
+
+ /* The result of the operation */
+ int result;
+
+ /* The VDO on which this completion operates */
+ struct vdo *vdo;
+
+ /* The callback which will be called once the operation is complete */
+ vdo_action_fn callback;
+
+ /* Callback which, if set, will be called if an error result is set */
+ vdo_action_fn error_handler;
+
+ /* The parent object, if any, that spawned this completion */
+ void *parent;
+
+ /* Entry link for lock-free work queue */
+ struct funnel_queue_entry work_queue_entry_link;
+ enum vdo_completion_priority priority;
+ struct vdo_work_queue *my_queue;
+};
+
+struct block_allocator;
+struct data_vio;
+struct vdo;
+struct vdo_config;
+
+/* vio types for statistics and instrumentation. */
+enum vio_type {
+ VIO_TYPE_UNINITIALIZED = 0,
+ VIO_TYPE_DATA,
+ VIO_TYPE_BLOCK_ALLOCATOR,
+ VIO_TYPE_BLOCK_MAP,
+ VIO_TYPE_BLOCK_MAP_INTERIOR,
+ VIO_TYPE_GEOMETRY,
+ VIO_TYPE_PARTITION_COPY,
+ VIO_TYPE_RECOVERY_JOURNAL,
+ VIO_TYPE_SLAB_JOURNAL,
+ VIO_TYPE_SLAB_SUMMARY,
+ VIO_TYPE_SUPER_BLOCK,
+} __packed;
+
+/* Priority levels for asynchronous I/O operations performed on a vio. */
+enum vio_priority {
+ VIO_PRIORITY_LOW = 0,
+ VIO_PRIORITY_DATA = VIO_PRIORITY_LOW,
+ VIO_PRIORITY_COMPRESSED_DATA = VIO_PRIORITY_DATA,
+ VIO_PRIORITY_METADATA,
+ VIO_PRIORITY_HIGH,
+} __packed;
+
+/*
+ * A wrapper for a bio. All I/O to the storage below a vdo is conducted via vios.
+ */
+struct vio {
+ /* The completion for this vio */
+ struct vdo_completion completion;
+
+ /* The bio zone in which I/O should be processed */
+ zone_count_t bio_zone;
+
+ /* The queueing priority of the vio operation */
+ enum vio_priority priority;
+
+ /* The vio type is used for statistics and instrumentation. */
+ enum vio_type type;
+
+ /* The size of this vio in blocks */
+ unsigned int block_count;
+
+ /* The data being read or written. */
+ char *data;
+
+ /* The VDO-owned bio to use for all IO for this vio */
+ struct bio *bio;
+
+ /*
+ * A list of enqueued bios with consecutive block numbers, stored by vdo_submit_bio() under
+ * the first-enqueued vio. The other vios are found via their bio entries in this list, and
+ * are not added to the work queue as separate completions.
+ */
+ struct bio_list bios_merged;
+};
+
+#endif /* VDO_TYPES_H */
diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c
new file mode 100644
index 000000000000..fff847767755
--- /dev/null
+++ b/drivers/md/dm-vdo/vdo.c
@@ -0,0 +1,1730 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+/*
+ * This file contains the main entry points for normal operations on a vdo as well as functions for
+ * constructing and destroying vdo instances (in memory).
+ */
+
+/**
+ * DOC:
+ *
+ * A read_only_notifier has a single completion which is used to perform read-only notifications,
+ * however, vdo_enter_read_only_mode() may be called from any thread. A pair of fields, protected
+ * by a spinlock, are used to control the read-only mode entry process. The first field holds the
+ * read-only error. The second is the state field, which may hold any of the four special values
+ * enumerated here.
+ *
+ * When vdo_enter_read_only_mode() is called from some vdo thread, if the read_only_error field
+ * already contains an error (i.e. its value is not VDO_SUCCESS), then some other error has already
+ * initiated the read-only process, and nothing more is done. Otherwise, the new error is stored in
+ * the read_only_error field, and the state field is consulted. If the state is MAY_NOTIFY, it is
+ * set to NOTIFYING, and the notification process begins. If the state is MAY_NOT_NOTIFY, then
+ * notifications are currently disallowed, generally due to the vdo being suspended. In this case,
+ * the nothing more will be done until the vdo is resumed, at which point the notification will be
+ * performed. In any other case, the vdo is already read-only, and there is nothing more to do.
+ */
+
+#include "vdo.h"
+
+#include <linux/completion.h>
+#include <linux/device-mapper.h>
+#include <linux/kernel.h>
+#include <linux/lz4.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+#include "block-map.h"
+#include "completion.h"
+#include "data-vio.h"
+#include "dedupe.h"
+#include "encodings.h"
+#include "funnel-workqueue.h"
+#include "io-submitter.h"
+#include "logical-zone.h"
+#include "packer.h"
+#include "physical-zone.h"
+#include "recovery-journal.h"
+#include "slab-depot.h"
+#include "statistics.h"
+#include "status-codes.h"
+#include "vio.h"
+
+#define PARANOID_THREAD_CONSISTENCY_CHECKS 0
+
+struct sync_completion {
+ struct vdo_completion vdo_completion;
+ struct completion completion;
+};
+
+/* A linked list is adequate for the small number of entries we expect. */
+struct device_registry {
+ struct list_head links;
+ /* TODO: Convert to rcu per kernel recommendation. */
+ rwlock_t lock;
+};
+
+static struct device_registry registry;
+
+/**
+ * vdo_initialize_device_registry_once() - Initialize the necessary structures for the device
+ * registry.
+ */
+void vdo_initialize_device_registry_once(void)
+{
+ INIT_LIST_HEAD(&registry.links);
+ rwlock_init(&registry.lock);
+}
+
+/** vdo_is_equal() - Implements vdo_filter_fn. */
+static bool vdo_is_equal(struct vdo *vdo, const void *context)
+{
+ return (vdo == context);
+}
+
+/**
+ * filter_vdos_locked() - Find a vdo in the registry if it exists there.
+ * @filter: The filter function to apply to devices.
+ * @context: A bit of context to provide the filter.
+ *
+ * Context: Must be called holding the lock.
+ *
+ * Return: the vdo object found, if any.
+ */
+static struct vdo * __must_check filter_vdos_locked(vdo_filter_fn filter,
+ const void *context)
+{
+ struct vdo *vdo;
+
+ list_for_each_entry(vdo, &registry.links, registration) {
+ if (filter(vdo, context))
+ return vdo;
+ }
+
+ return NULL;
+}
+
+/**
+ * vdo_find_matching() - Find and return the first (if any) vdo matching a given filter function.
+ * @filter: The filter function to apply to vdos.
+ * @context: A bit of context to provide the filter.
+ */
+struct vdo *vdo_find_matching(vdo_filter_fn filter, const void *context)
+{
+ struct vdo *vdo;
+
+ read_lock(&registry.lock);
+ vdo = filter_vdos_locked(filter, context);
+ read_unlock(&registry.lock);
+
+ return vdo;
+}
+
+static void start_vdo_request_queue(void *ptr)
+{
+ struct vdo_thread *thread = vdo_get_work_queue_owner(vdo_get_current_work_queue());
+
+ vdo_register_allocating_thread(&thread->allocating_thread,
+ &thread->vdo->allocations_allowed);
+}
+
+static void finish_vdo_request_queue(void *ptr)
+{
+ vdo_unregister_allocating_thread();
+}
+
+#ifdef MODULE
+#define MODULE_NAME THIS_MODULE->name
+#else
+#define MODULE_NAME "dm-vdo"
+#endif /* MODULE */
+
+static const struct vdo_work_queue_type default_queue_type = {
+ .start = start_vdo_request_queue,
+ .finish = finish_vdo_request_queue,
+ .max_priority = VDO_DEFAULT_Q_MAX_PRIORITY,
+ .default_priority = VDO_DEFAULT_Q_COMPLETION_PRIORITY,
+};
+
+static const struct vdo_work_queue_type bio_ack_q_type = {
+ .start = NULL,
+ .finish = NULL,
+ .max_priority = BIO_ACK_Q_MAX_PRIORITY,
+ .default_priority = BIO_ACK_Q_ACK_PRIORITY,
+};
+
+static const struct vdo_work_queue_type cpu_q_type = {
+ .start = NULL,
+ .finish = NULL,
+ .max_priority = CPU_Q_MAX_PRIORITY,
+ .default_priority = CPU_Q_MAX_PRIORITY,
+};
+
+static void uninitialize_thread_config(struct thread_config *config)
+{
+ vdo_free(vdo_forget(config->logical_threads));
+ vdo_free(vdo_forget(config->physical_threads));
+ vdo_free(vdo_forget(config->hash_zone_threads));
+ vdo_free(vdo_forget(config->bio_threads));
+ memset(config, 0, sizeof(struct thread_config));
+}
+
+static void assign_thread_ids(struct thread_config *config,
+ thread_id_t thread_ids[], zone_count_t count)
+{
+ zone_count_t zone;
+
+ for (zone = 0; zone < count; zone++)
+ thread_ids[zone] = config->thread_count++;
+}
+
+/**
+ * initialize_thread_config() - Initialize the thread mapping
+ *
+ * If the logical, physical, and hash zone counts are all 0, a single thread will be shared by all
+ * three plus the packer and recovery journal. Otherwise, there must be at least one of each type,
+ * and each will have its own thread, as will the packer and recovery journal.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check initialize_thread_config(struct thread_count_config counts,
+ struct thread_config *config)
+{
+ int result;
+ bool single = ((counts.logical_zones + counts.physical_zones + counts.hash_zones) == 0);
+
+ config->bio_thread_count = counts.bio_threads;
+ if (single) {
+ config->logical_zone_count = 1;
+ config->physical_zone_count = 1;
+ config->hash_zone_count = 1;
+ } else {
+ config->logical_zone_count = counts.logical_zones;
+ config->physical_zone_count = counts.physical_zones;
+ config->hash_zone_count = counts.hash_zones;
+ }
+
+ result = vdo_allocate(config->logical_zone_count, thread_id_t,
+ "logical thread array", &config->logical_threads);
+ if (result != VDO_SUCCESS) {
+ uninitialize_thread_config(config);
+ return result;
+ }
+
+ result = vdo_allocate(config->physical_zone_count, thread_id_t,
+ "physical thread array", &config->physical_threads);
+ if (result != VDO_SUCCESS) {
+ uninitialize_thread_config(config);
+ return result;
+ }
+
+ result = vdo_allocate(config->hash_zone_count, thread_id_t,
+ "hash thread array", &config->hash_zone_threads);
+ if (result != VDO_SUCCESS) {
+ uninitialize_thread_config(config);
+ return result;
+ }
+
+ result = vdo_allocate(config->bio_thread_count, thread_id_t,
+ "bio thread array", &config->bio_threads);
+ if (result != VDO_SUCCESS) {
+ uninitialize_thread_config(config);
+ return result;
+ }
+
+ if (single) {
+ config->logical_threads[0] = config->thread_count;
+ config->physical_threads[0] = config->thread_count;
+ config->hash_zone_threads[0] = config->thread_count++;
+ } else {
+ config->admin_thread = config->thread_count;
+ config->journal_thread = config->thread_count++;
+ config->packer_thread = config->thread_count++;
+ assign_thread_ids(config, config->logical_threads, counts.logical_zones);
+ assign_thread_ids(config, config->physical_threads, counts.physical_zones);
+ assign_thread_ids(config, config->hash_zone_threads, counts.hash_zones);
+ }
+
+ config->dedupe_thread = config->thread_count++;
+ config->bio_ack_thread =
+ ((counts.bio_ack_threads > 0) ? config->thread_count++ : VDO_INVALID_THREAD_ID);
+ config->cpu_thread = config->thread_count++;
+ assign_thread_ids(config, config->bio_threads, counts.bio_threads);
+ return VDO_SUCCESS;
+}
+
+/**
+ * read_geometry_block() - Synchronously read the geometry block from a vdo's underlying block
+ * device.
+ * @vdo: The vdo whose geometry is to be read.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check read_geometry_block(struct vdo *vdo)
+{
+ struct vio *vio;
+ char *block;
+ int result;
+
+ result = vdo_allocate(VDO_BLOCK_SIZE, u8, __func__, &block);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = create_metadata_vio(vdo, VIO_TYPE_GEOMETRY, VIO_PRIORITY_HIGH, NULL,
+ block, &vio);
+ if (result != VDO_SUCCESS) {
+ vdo_free(block);
+ return result;
+ }
+
+ /*
+ * This is only safe because, having not already loaded the geometry, the vdo's geometry's
+ * bio_offset field is 0, so the fact that vio_reset_bio() will subtract that offset from
+ * the supplied pbn is not a problem.
+ */
+ result = vio_reset_bio(vio, block, NULL, REQ_OP_READ,
+ VDO_GEOMETRY_BLOCK_LOCATION);
+ if (result != VDO_SUCCESS) {
+ free_vio(vdo_forget(vio));
+ vdo_free(block);
+ return result;
+ }
+
+ bio_set_dev(vio->bio, vdo_get_backing_device(vdo));
+ submit_bio_wait(vio->bio);
+ result = blk_status_to_errno(vio->bio->bi_status);
+ free_vio(vdo_forget(vio));
+ if (result != 0) {
+ vdo_log_error_strerror(result, "synchronous read failed");
+ vdo_free(block);
+ return -EIO;
+ }
+
+ result = vdo_parse_geometry_block((u8 *) block, &vdo->geometry);
+ vdo_free(block);
+ return result;
+}
+
+static bool get_zone_thread_name(const thread_id_t thread_ids[], zone_count_t count,
+ thread_id_t id, const char *prefix,
+ char *buffer, size_t buffer_length)
+{
+ if (id >= thread_ids[0]) {
+ thread_id_t index = id - thread_ids[0];
+
+ if (index < count) {
+ snprintf(buffer, buffer_length, "%s%d", prefix, index);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * get_thread_name() - Format the name of the worker thread desired to support a given work queue.
+ * @thread_config: The thread configuration.
+ * @thread_id: The thread id.
+ * @buffer: Where to put the formatted name.
+ * @buffer_length: Size of the output buffer.
+ *
+ * The physical layer may add a prefix identifying the product; the output from this function
+ * should just identify the thread.
+ */
+static void get_thread_name(const struct thread_config *thread_config,
+ thread_id_t thread_id, char *buffer, size_t buffer_length)
+{
+ if (thread_id == thread_config->journal_thread) {
+ if (thread_config->packer_thread == thread_id) {
+ /*
+ * This is the "single thread" config where one thread is used for the
+ * journal, packer, logical, physical, and hash zones. In that case, it is
+ * known as the "request queue."
+ */
+ snprintf(buffer, buffer_length, "reqQ");
+ return;
+ }
+
+ snprintf(buffer, buffer_length, "journalQ");
+ return;
+ } else if (thread_id == thread_config->admin_thread) {
+ /* Theoretically this could be different from the journal thread. */
+ snprintf(buffer, buffer_length, "adminQ");
+ return;
+ } else if (thread_id == thread_config->packer_thread) {
+ snprintf(buffer, buffer_length, "packerQ");
+ return;
+ } else if (thread_id == thread_config->dedupe_thread) {
+ snprintf(buffer, buffer_length, "dedupeQ");
+ return;
+ } else if (thread_id == thread_config->bio_ack_thread) {
+ snprintf(buffer, buffer_length, "ackQ");
+ return;
+ } else if (thread_id == thread_config->cpu_thread) {
+ snprintf(buffer, buffer_length, "cpuQ");
+ return;
+ }
+
+ if (get_zone_thread_name(thread_config->logical_threads,
+ thread_config->logical_zone_count,
+ thread_id, "logQ", buffer, buffer_length))
+ return;
+
+ if (get_zone_thread_name(thread_config->physical_threads,
+ thread_config->physical_zone_count,
+ thread_id, "physQ", buffer, buffer_length))
+ return;
+
+ if (get_zone_thread_name(thread_config->hash_zone_threads,
+ thread_config->hash_zone_count,
+ thread_id, "hashQ", buffer, buffer_length))
+ return;
+
+ if (get_zone_thread_name(thread_config->bio_threads,
+ thread_config->bio_thread_count,
+ thread_id, "bioQ", buffer, buffer_length))
+ return;
+
+ /* Some sort of misconfiguration? */
+ snprintf(buffer, buffer_length, "reqQ%d", thread_id);
+}
+
+/**
+ * vdo_make_thread() - Construct a single vdo work_queue and its associated thread (or threads for
+ * round-robin queues).
+ * @vdo: The vdo which owns the thread.
+ * @thread_id: The id of the thread to create (as determined by the thread_config).
+ * @type: The description of the work queue for this thread.
+ * @queue_count: The number of actual threads/queues contained in the "thread".
+ * @contexts: An array of queue_count contexts, one for each individual queue; may be NULL.
+ *
+ * Each "thread" constructed by this method is represented by a unique thread id in the thread
+ * config, and completions can be enqueued to the queue and run on the threads comprising this
+ * entity.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_make_thread(struct vdo *vdo, thread_id_t thread_id,
+ const struct vdo_work_queue_type *type,
+ unsigned int queue_count, void *contexts[])
+{
+ struct vdo_thread *thread = &vdo->threads[thread_id];
+ char queue_name[MAX_VDO_WORK_QUEUE_NAME_LEN];
+
+ if (type == NULL)
+ type = &default_queue_type;
+
+ if (thread->queue != NULL) {
+ return VDO_ASSERT(vdo_work_queue_type_is(thread->queue, type),
+ "already constructed vdo thread %u is of the correct type",
+ thread_id);
+ }
+
+ thread->vdo = vdo;
+ thread->thread_id = thread_id;
+ get_thread_name(&vdo->thread_config, thread_id, queue_name, sizeof(queue_name));
+ return vdo_make_work_queue(vdo->thread_name_prefix, queue_name, thread,
+ type, queue_count, contexts, &thread->queue);
+}
+
+/**
+ * register_vdo() - Register a VDO; it must not already be registered.
+ * @vdo: The vdo to register.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int register_vdo(struct vdo *vdo)
+{
+ int result;
+
+ write_lock(&registry.lock);
+ result = VDO_ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL,
+ "VDO not already registered");
+ if (result == VDO_SUCCESS) {
+ INIT_LIST_HEAD(&vdo->registration);
+ list_add_tail(&vdo->registration, &registry.links);
+ }
+ write_unlock(&registry.lock);
+
+ return result;
+}
+
+/**
+ * initialize_vdo() - Do the portion of initializing a vdo which will clean up after itself on
+ * error.
+ * @vdo: The vdo being initialized
+ * @config: The configuration of the vdo
+ * @instance: The instance number of the vdo
+ * @reason: The buffer to hold the failure reason on error
+ */
+static int initialize_vdo(struct vdo *vdo, struct device_config *config,
+ unsigned int instance, char **reason)
+{
+ int result;
+ zone_count_t i;
+
+ vdo->device_config = config;
+ vdo->starting_sector_offset = config->owning_target->begin;
+ vdo->instance = instance;
+ vdo->allocations_allowed = true;
+ vdo_set_admin_state_code(&vdo->admin.state, VDO_ADMIN_STATE_NEW);
+ INIT_LIST_HEAD(&vdo->device_config_list);
+ vdo_initialize_completion(&vdo->admin.completion, vdo, VDO_ADMIN_COMPLETION);
+ init_completion(&vdo->admin.callback_sync);
+ mutex_init(&vdo->stats_mutex);
+ result = read_geometry_block(vdo);
+ if (result != VDO_SUCCESS) {
+ *reason = "Could not load geometry block";
+ return result;
+ }
+
+ result = initialize_thread_config(config->thread_counts, &vdo->thread_config);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot create thread configuration";
+ return result;
+ }
+
+ vdo_log_info("zones: %d logical, %d physical, %d hash; total threads: %d",
+ config->thread_counts.logical_zones,
+ config->thread_counts.physical_zones,
+ config->thread_counts.hash_zones, vdo->thread_config.thread_count);
+
+ /* Compression context storage */
+ result = vdo_allocate(config->thread_counts.cpu_threads, char *, "LZ4 context",
+ &vdo->compression_context);
+ if (result != VDO_SUCCESS) {
+ *reason = "cannot allocate LZ4 context";
+ return result;
+ }
+
+ for (i = 0; i < config->thread_counts.cpu_threads; i++) {
+ result = vdo_allocate(LZ4_MEM_COMPRESS, char, "LZ4 context",
+ &vdo->compression_context[i]);
+ if (result != VDO_SUCCESS) {
+ *reason = "cannot allocate LZ4 context";
+ return result;
+ }
+ }
+
+ result = register_vdo(vdo);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot add VDO to device registry";
+ return result;
+ }
+
+ vdo_set_admin_state_code(&vdo->admin.state, VDO_ADMIN_STATE_INITIALIZED);
+ return result;
+}
+
+/**
+ * vdo_make() - Allocate and initialize a vdo.
+ * @instance: Device instantiation counter.
+ * @config: The device configuration.
+ * @reason: The reason for any failure during this call.
+ * @vdo_ptr: A pointer to hold the created vdo.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_make(unsigned int instance, struct device_config *config, char **reason,
+ struct vdo **vdo_ptr)
+{
+ int result;
+ struct vdo *vdo;
+
+ /* Initialize with a generic failure reason to prevent returning garbage. */
+ *reason = "Unspecified error";
+
+ result = vdo_allocate(1, struct vdo, __func__, &vdo);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot allocate VDO";
+ return result;
+ }
+
+ result = initialize_vdo(vdo, config, instance, reason);
+ if (result != VDO_SUCCESS) {
+ vdo_destroy(vdo);
+ return result;
+ }
+
+ /* From here on, the caller will clean up if there is an error. */
+ *vdo_ptr = vdo;
+
+ snprintf(vdo->thread_name_prefix, sizeof(vdo->thread_name_prefix),
+ "%s%u", MODULE_NAME, instance);
+ BUG_ON(vdo->thread_name_prefix[0] == '\0');
+ result = vdo_allocate(vdo->thread_config.thread_count,
+ struct vdo_thread, __func__, &vdo->threads);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot allocate thread structures";
+ return result;
+ }
+
+ result = vdo_make_thread(vdo, vdo->thread_config.admin_thread,
+ &default_queue_type, 1, NULL);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot make admin thread";
+ return result;
+ }
+
+ result = vdo_make_flusher(vdo);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot make flusher zones";
+ return result;
+ }
+
+ result = vdo_make_packer(vdo, DEFAULT_PACKER_BINS, &vdo->packer);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot make packer zones";
+ return result;
+ }
+
+ BUG_ON(vdo->device_config->logical_block_size <= 0);
+ BUG_ON(vdo->device_config->owned_device == NULL);
+ result = make_data_vio_pool(vdo, MAXIMUM_VDO_USER_VIOS,
+ MAXIMUM_VDO_USER_VIOS * 3 / 4,
+ &vdo->data_vio_pool);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot allocate data_vio pool";
+ return result;
+ }
+
+ result = vdo_make_io_submitter(config->thread_counts.bio_threads,
+ config->thread_counts.bio_rotation_interval,
+ get_data_vio_pool_request_limit(vdo->data_vio_pool),
+ vdo, &vdo->io_submitter);
+ if (result != VDO_SUCCESS) {
+ *reason = "bio submission initialization failed";
+ return result;
+ }
+
+ if (vdo_uses_bio_ack_queue(vdo)) {
+ result = vdo_make_thread(vdo, vdo->thread_config.bio_ack_thread,
+ &bio_ack_q_type,
+ config->thread_counts.bio_ack_threads, NULL);
+ if (result != VDO_SUCCESS) {
+ *reason = "bio ack queue initialization failed";
+ return result;
+ }
+ }
+
+ result = vdo_make_thread(vdo, vdo->thread_config.cpu_thread, &cpu_q_type,
+ config->thread_counts.cpu_threads,
+ (void **) vdo->compression_context);
+ if (result != VDO_SUCCESS) {
+ *reason = "CPU queue initialization failed";
+ return result;
+ }
+
+ return VDO_SUCCESS;
+}
+
+static void finish_vdo(struct vdo *vdo)
+{
+ int i;
+
+ if (vdo->threads == NULL)
+ return;
+
+ vdo_cleanup_io_submitter(vdo->io_submitter);
+ vdo_finish_dedupe_index(vdo->hash_zones);
+
+ for (i = 0; i < vdo->thread_config.thread_count; i++)
+ vdo_finish_work_queue(vdo->threads[i].queue);
+}
+
+/**
+ * free_listeners() - Free the list of read-only listeners associated with a thread.
+ * @thread_data: The thread holding the list to free.
+ */
+static void free_listeners(struct vdo_thread *thread)
+{
+ struct read_only_listener *listener, *next;
+
+ for (listener = vdo_forget(thread->listeners); listener != NULL; listener = next) {
+ next = vdo_forget(listener->next);
+ vdo_free(listener);
+ }
+}
+
+static void uninitialize_super_block(struct vdo_super_block *super_block)
+{
+ free_vio_components(&super_block->vio);
+ vdo_free(super_block->buffer);
+}
+
+/**
+ * unregister_vdo() - Remove a vdo from the device registry.
+ * @vdo: The vdo to remove.
+ */
+static void unregister_vdo(struct vdo *vdo)
+{
+ write_lock(&registry.lock);
+ if (filter_vdos_locked(vdo_is_equal, vdo) == vdo)
+ list_del_init(&vdo->registration);
+
+ write_unlock(&registry.lock);
+}
+
+/**
+ * vdo_destroy() - Destroy a vdo instance.
+ * @vdo: The vdo to destroy (may be NULL).
+ */
+void vdo_destroy(struct vdo *vdo)
+{
+ unsigned int i;
+
+ if (vdo == NULL)
+ return;
+
+ /* A running VDO should never be destroyed without suspending first. */
+ BUG_ON(vdo_get_admin_state(vdo)->normal);
+
+ vdo->allocations_allowed = true;
+
+ finish_vdo(vdo);
+ unregister_vdo(vdo);
+ free_data_vio_pool(vdo->data_vio_pool);
+ vdo_free_io_submitter(vdo_forget(vdo->io_submitter));
+ vdo_free_flusher(vdo_forget(vdo->flusher));
+ vdo_free_packer(vdo_forget(vdo->packer));
+ vdo_free_recovery_journal(vdo_forget(vdo->recovery_journal));
+ vdo_free_slab_depot(vdo_forget(vdo->depot));
+ vdo_uninitialize_layout(&vdo->layout);
+ vdo_uninitialize_layout(&vdo->next_layout);
+ if (vdo->partition_copier)
+ dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier));
+ uninitialize_super_block(&vdo->super_block);
+ vdo_free_block_map(vdo_forget(vdo->block_map));
+ vdo_free_hash_zones(vdo_forget(vdo->hash_zones));
+ vdo_free_physical_zones(vdo_forget(vdo->physical_zones));
+ vdo_free_logical_zones(vdo_forget(vdo->logical_zones));
+
+ if (vdo->threads != NULL) {
+ for (i = 0; i < vdo->thread_config.thread_count; i++) {
+ free_listeners(&vdo->threads[i]);
+ vdo_free_work_queue(vdo_forget(vdo->threads[i].queue));
+ }
+ vdo_free(vdo_forget(vdo->threads));
+ }
+
+ uninitialize_thread_config(&vdo->thread_config);
+
+ if (vdo->compression_context != NULL) {
+ for (i = 0; i < vdo->device_config->thread_counts.cpu_threads; i++)
+ vdo_free(vdo_forget(vdo->compression_context[i]));
+
+ vdo_free(vdo_forget(vdo->compression_context));
+ }
+ vdo_free(vdo);
+}
+
+static int initialize_super_block(struct vdo *vdo, struct vdo_super_block *super_block)
+{
+ int result;
+
+ result = vdo_allocate(VDO_BLOCK_SIZE, char, "encoded super block",
+ (char **) &vdo->super_block.buffer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return allocate_vio_components(vdo, VIO_TYPE_SUPER_BLOCK,
+ VIO_PRIORITY_METADATA, NULL, 1,
+ (char *) super_block->buffer,
+ &vdo->super_block.vio);
+}
+
+/**
+ * finish_reading_super_block() - Continue after loading the super block.
+ * @completion: The super block vio.
+ *
+ * This callback is registered in vdo_load_super_block().
+ */
+static void finish_reading_super_block(struct vdo_completion *completion)
+{
+ struct vdo_super_block *super_block =
+ container_of(as_vio(completion), struct vdo_super_block, vio);
+
+ vdo_continue_completion(vdo_forget(completion->parent),
+ vdo_decode_super_block(super_block->buffer));
+}
+
+/**
+ * handle_super_block_read_error() - Handle an error reading the super block.
+ * @completion: The super block vio.
+ *
+ * This error handler is registered in vdo_load_super_block().
+ */
+static void handle_super_block_read_error(struct vdo_completion *completion)
+{
+ vio_record_metadata_io_error(as_vio(completion));
+ finish_reading_super_block(completion);
+}
+
+static void read_super_block_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct vdo_completion *parent = vio->completion.parent;
+
+ continue_vio_after_io(vio, finish_reading_super_block,
+ parent->callback_thread_id);
+}
+
+/**
+ * vdo_load_super_block() - Allocate a super block and read its contents from storage.
+ * @vdo: The vdo containing the super block on disk.
+ * @parent: The completion to notify after loading the super block.
+ */
+void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent)
+{
+ int result;
+
+ result = initialize_super_block(vdo, &vdo->super_block);
+ if (result != VDO_SUCCESS) {
+ vdo_continue_completion(parent, result);
+ return;
+ }
+
+ vdo->super_block.vio.completion.parent = parent;
+ vdo_submit_metadata_vio(&vdo->super_block.vio,
+ vdo_get_data_region_start(vdo->geometry),
+ read_super_block_endio,
+ handle_super_block_read_error,
+ REQ_OP_READ);
+}
+
+/**
+ * vdo_get_backing_device() - Get the block device object underlying a vdo.
+ * @vdo: The vdo.
+ *
+ * Return: The vdo's current block device.
+ */
+struct block_device *vdo_get_backing_device(const struct vdo *vdo)
+{
+ return vdo->device_config->owned_device->bdev;
+}
+
+/**
+ * vdo_get_device_name() - Get the device name associated with the vdo target.
+ * @target: The target device interface.
+ *
+ * Return: The block device name.
+ */
+const char *vdo_get_device_name(const struct dm_target *target)
+{
+ return dm_device_name(dm_table_get_md(target->table));
+}
+
+/**
+ * vdo_synchronous_flush() - Issue a flush request and wait for it to complete.
+ * @vdo: The vdo.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_synchronous_flush(struct vdo *vdo)
+{
+ int result;
+ struct bio bio;
+
+ bio_init(&bio, vdo_get_backing_device(vdo), NULL, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH);
+ submit_bio_wait(&bio);
+ result = blk_status_to_errno(bio.bi_status);
+
+ atomic64_inc(&vdo->stats.flush_out);
+ if (result != 0) {
+ vdo_log_error_strerror(result, "synchronous flush failed");
+ result = -EIO;
+ }
+
+ bio_uninit(&bio);
+ return result;
+}
+
+/**
+ * vdo_get_state() - Get the current state of the vdo.
+ * @vdo: The vdo.
+
+ * Context: This method may be called from any thread.
+ *
+ * Return: The current state of the vdo.
+ */
+enum vdo_state vdo_get_state(const struct vdo *vdo)
+{
+ enum vdo_state state = atomic_read(&vdo->state);
+
+ /* pairs with barriers where state field is changed */
+ smp_rmb();
+ return state;
+}
+
+/**
+ * vdo_set_state() - Set the current state of the vdo.
+ * @vdo: The vdo whose state is to be set.
+ * @state: The new state of the vdo.
+ *
+ * Context: This method may be called from any thread.
+ */
+void vdo_set_state(struct vdo *vdo, enum vdo_state state)
+{
+ /* pairs with barrier in vdo_get_state */
+ smp_wmb();
+ atomic_set(&vdo->state, state);
+}
+
+/**
+ * vdo_get_admin_state() - Get the admin state of the vdo.
+ * @vdo: The vdo.
+ *
+ * Return: The code for the vdo's current admin state.
+ */
+const struct admin_state_code *vdo_get_admin_state(const struct vdo *vdo)
+{
+ return vdo_get_admin_state_code(&vdo->admin.state);
+}
+
+/**
+ * record_vdo() - Record the state of the VDO for encoding in the super block.
+ */
+static void record_vdo(struct vdo *vdo)
+{
+ /* This is for backwards compatibility. */
+ vdo->states.unused = vdo->geometry.unused;
+ vdo->states.vdo.state = vdo_get_state(vdo);
+ vdo->states.block_map = vdo_record_block_map(vdo->block_map);
+ vdo->states.recovery_journal = vdo_record_recovery_journal(vdo->recovery_journal);
+ vdo->states.slab_depot = vdo_record_slab_depot(vdo->depot);
+ vdo->states.layout = vdo->layout;
+}
+
+/**
+ * continue_super_block_parent() - Continue the parent of a super block save operation.
+ * @completion: The super block vio.
+ *
+ * This callback is registered in vdo_save_components().
+ */
+static void continue_super_block_parent(struct vdo_completion *completion)
+{
+ vdo_continue_completion(vdo_forget(completion->parent), completion->result);
+}
+
+/**
+ * handle_save_error() - Log a super block save error.
+ * @completion: The super block vio.
+ *
+ * This error handler is registered in vdo_save_components().
+ */
+static void handle_save_error(struct vdo_completion *completion)
+{
+ struct vdo_super_block *super_block =
+ container_of(as_vio(completion), struct vdo_super_block, vio);
+
+ vio_record_metadata_io_error(&super_block->vio);
+ vdo_log_error_strerror(completion->result, "super block save failed");
+ /*
+ * Mark the super block as unwritable so that we won't attempt to write it again. This
+ * avoids the case where a growth attempt fails writing the super block with the new size,
+ * but the subsequent attempt to write out the read-only state succeeds. In this case,
+ * writes which happened just before the suspend would not be visible if the VDO is
+ * restarted without rebuilding, but, after a read-only rebuild, the effects of those
+ * writes would reappear.
+ */
+ super_block->unwritable = true;
+ completion->callback(completion);
+}
+
+static void super_block_write_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct vdo_completion *parent = vio->completion.parent;
+
+ continue_vio_after_io(vio, continue_super_block_parent,
+ parent->callback_thread_id);
+}
+
+/**
+ * vdo_save_components() - Encode the vdo and save the super block asynchronously.
+ * @vdo: The vdo whose state is being saved.
+ * @parent: The completion to notify when the save is complete.
+ */
+void vdo_save_components(struct vdo *vdo, struct vdo_completion *parent)
+{
+ struct vdo_super_block *super_block = &vdo->super_block;
+
+ if (super_block->unwritable) {
+ vdo_continue_completion(parent, VDO_READ_ONLY);
+ return;
+ }
+
+ if (super_block->vio.completion.parent != NULL) {
+ vdo_continue_completion(parent, VDO_COMPONENT_BUSY);
+ return;
+ }
+
+ record_vdo(vdo);
+
+ vdo_encode_super_block(super_block->buffer, &vdo->states);
+ super_block->vio.completion.parent = parent;
+ super_block->vio.completion.callback_thread_id = parent->callback_thread_id;
+ vdo_submit_metadata_vio(&super_block->vio,
+ vdo_get_data_region_start(vdo->geometry),
+ super_block_write_endio, handle_save_error,
+ REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA);
+}
+
+/**
+ * vdo_register_read_only_listener() - Register a listener to be notified when the VDO goes
+ * read-only.
+ * @vdo: The vdo to register with.
+ * @listener: The object to notify.
+ * @notification: The function to call to send the notification.
+ * @thread_id: The id of the thread on which to send the notification.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_register_read_only_listener(struct vdo *vdo, void *listener,
+ vdo_read_only_notification_fn notification,
+ thread_id_t thread_id)
+{
+ struct vdo_thread *thread = &vdo->threads[thread_id];
+ struct read_only_listener *read_only_listener;
+ int result;
+
+ result = VDO_ASSERT(thread_id != vdo->thread_config.dedupe_thread,
+ "read only listener not registered on dedupe thread");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(1, struct read_only_listener, __func__,
+ &read_only_listener);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *read_only_listener = (struct read_only_listener) {
+ .listener = listener,
+ .notify = notification,
+ .next = thread->listeners,
+ };
+
+ thread->listeners = read_only_listener;
+ return VDO_SUCCESS;
+}
+
+/**
+ * notify_vdo_of_read_only_mode() - Notify a vdo that it is going read-only.
+ * @listener: The vdo.
+ * @parent: The completion to notify in order to acknowledge the notification.
+ *
+ * This will save the read-only state to the super block.
+ *
+ * Implements vdo_read_only_notification_fn.
+ */
+static void notify_vdo_of_read_only_mode(void *listener, struct vdo_completion *parent)
+{
+ struct vdo *vdo = listener;
+
+ if (vdo_in_read_only_mode(vdo))
+ vdo_finish_completion(parent);
+
+ vdo_set_state(vdo, VDO_READ_ONLY_MODE);
+ vdo_save_components(vdo, parent);
+}
+
+/**
+ * vdo_enable_read_only_entry() - Enable a vdo to enter read-only mode on errors.
+ * @vdo: The vdo to enable.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_enable_read_only_entry(struct vdo *vdo)
+{
+ thread_id_t id;
+ bool is_read_only = vdo_in_read_only_mode(vdo);
+ struct read_only_notifier *notifier = &vdo->read_only_notifier;
+
+ if (is_read_only) {
+ notifier->read_only_error = VDO_READ_ONLY;
+ notifier->state = NOTIFIED;
+ } else {
+ notifier->state = MAY_NOT_NOTIFY;
+ }
+
+ spin_lock_init(&notifier->lock);
+ vdo_initialize_completion(&notifier->completion, vdo,
+ VDO_READ_ONLY_MODE_COMPLETION);
+
+ for (id = 0; id < vdo->thread_config.thread_count; id++)
+ vdo->threads[id].is_read_only = is_read_only;
+
+ return vdo_register_read_only_listener(vdo, vdo, notify_vdo_of_read_only_mode,
+ vdo->thread_config.admin_thread);
+}
+
+/**
+ * vdo_wait_until_not_entering_read_only_mode() - Wait until no read-only notifications are in
+ * progress and prevent any subsequent
+ * notifications.
+ * @parent: The completion to notify when no threads are entering read-only mode.
+ *
+ * Notifications may be re-enabled by calling vdo_allow_read_only_mode_entry().
+ */
+void vdo_wait_until_not_entering_read_only_mode(struct vdo_completion *parent)
+{
+ struct vdo *vdo = parent->vdo;
+ struct read_only_notifier *notifier = &vdo->read_only_notifier;
+
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ if (notifier->waiter != NULL) {
+ vdo_continue_completion(parent, VDO_COMPONENT_BUSY);
+ return;
+ }
+
+ spin_lock(&notifier->lock);
+ if (notifier->state == NOTIFYING)
+ notifier->waiter = parent;
+ else if (notifier->state == MAY_NOTIFY)
+ notifier->state = MAY_NOT_NOTIFY;
+ spin_unlock(&notifier->lock);
+
+ if (notifier->waiter == NULL) {
+ /*
+ * A notification was not in progress, and now they are
+ * disallowed.
+ */
+ vdo_launch_completion(parent);
+ return;
+ }
+}
+
+/**
+ * as_notifier() - Convert a generic vdo_completion to a read_only_notifier.
+ * @completion: The completion to convert.
+ *
+ * Return: The completion as a read_only_notifier.
+ */
+static inline struct read_only_notifier *as_notifier(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_READ_ONLY_MODE_COMPLETION);
+ return container_of(completion, struct read_only_notifier, completion);
+}
+
+/**
+ * finish_entering_read_only_mode() - Complete the process of entering read only mode.
+ * @completion: The read-only mode completion.
+ */
+static void finish_entering_read_only_mode(struct vdo_completion *completion)
+{
+ struct read_only_notifier *notifier = as_notifier(completion);
+
+ vdo_assert_on_admin_thread(completion->vdo, __func__);
+
+ spin_lock(&notifier->lock);
+ notifier->state = NOTIFIED;
+ spin_unlock(&notifier->lock);
+
+ if (notifier->waiter != NULL)
+ vdo_continue_completion(vdo_forget(notifier->waiter),
+ completion->result);
+}
+
+/**
+ * make_thread_read_only() - Inform each thread that the VDO is in read-only mode.
+ * @completion: The read-only mode completion.
+ */
+static void make_thread_read_only(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ thread_id_t thread_id = completion->callback_thread_id;
+ struct read_only_notifier *notifier = as_notifier(completion);
+ struct read_only_listener *listener = completion->parent;
+
+ if (listener == NULL) {
+ /* This is the first call on this thread */
+ struct vdo_thread *thread = &vdo->threads[thread_id];
+
+ thread->is_read_only = true;
+ listener = thread->listeners;
+ if (thread_id == 0)
+ vdo_log_error_strerror(READ_ONCE(notifier->read_only_error),
+ "Unrecoverable error, entering read-only mode");
+ } else {
+ /* We've just finished notifying a listener */
+ listener = listener->next;
+ }
+
+ if (listener != NULL) {
+ /* We have a listener to notify */
+ vdo_prepare_completion(completion, make_thread_read_only,
+ make_thread_read_only, thread_id,
+ listener);
+ listener->notify(listener->listener, completion);
+ return;
+ }
+
+ /* We're done with this thread */
+ if (++thread_id == vdo->thread_config.dedupe_thread) {
+ /*
+ * We don't want to notify the dedupe thread since it may be
+ * blocked rebuilding the index.
+ */
+ thread_id++;
+ }
+
+ if (thread_id >= vdo->thread_config.thread_count) {
+ /* There are no more threads */
+ vdo_prepare_completion(completion, finish_entering_read_only_mode,
+ finish_entering_read_only_mode,
+ vdo->thread_config.admin_thread, NULL);
+ } else {
+ vdo_prepare_completion(completion, make_thread_read_only,
+ make_thread_read_only, thread_id, NULL);
+ }
+
+ vdo_launch_completion(completion);
+}
+
+/**
+ * vdo_allow_read_only_mode_entry() - Allow the notifier to put the VDO into read-only mode,
+ * reversing the effects of
+ * vdo_wait_until_not_entering_read_only_mode().
+ * @parent: The object to notify once the operation is complete.
+ *
+ * If some thread tried to put the vdo into read-only mode while notifications were disallowed, it
+ * will be done when this method is called. If that happens, the parent will not be notified until
+ * the vdo has actually entered read-only mode and attempted to save the super block.
+ *
+ * Context: This method may only be called from the admin thread.
+ */
+void vdo_allow_read_only_mode_entry(struct vdo_completion *parent)
+{
+ struct vdo *vdo = parent->vdo;
+ struct read_only_notifier *notifier = &vdo->read_only_notifier;
+
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ if (notifier->waiter != NULL) {
+ vdo_continue_completion(parent, VDO_COMPONENT_BUSY);
+ return;
+ }
+
+ spin_lock(&notifier->lock);
+ if (notifier->state == MAY_NOT_NOTIFY) {
+ if (notifier->read_only_error == VDO_SUCCESS) {
+ notifier->state = MAY_NOTIFY;
+ } else {
+ notifier->state = NOTIFYING;
+ notifier->waiter = parent;
+ }
+ }
+ spin_unlock(&notifier->lock);
+
+ if (notifier->waiter == NULL) {
+ /* We're done */
+ vdo_launch_completion(parent);
+ return;
+ }
+
+ /* Do the pending notification. */
+ make_thread_read_only(&notifier->completion);
+}
+
+/**
+ * vdo_enter_read_only_mode() - Put a VDO into read-only mode and save the read-only state in the
+ * super block.
+ * @vdo: The vdo.
+ * @error_code: The error which caused the VDO to enter read-only mode.
+ *
+ * This method is a no-op if the VDO is already read-only.
+ */
+void vdo_enter_read_only_mode(struct vdo *vdo, int error_code)
+{
+ bool notify = false;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+ struct read_only_notifier *notifier = &vdo->read_only_notifier;
+ struct vdo_thread *thread;
+
+ if (thread_id != VDO_INVALID_THREAD_ID) {
+ thread = &vdo->threads[thread_id];
+ if (thread->is_read_only) {
+ /* This thread has already gone read-only. */
+ return;
+ }
+
+ /* Record for this thread that the VDO is read-only. */
+ thread->is_read_only = true;
+ }
+
+ spin_lock(&notifier->lock);
+ if (notifier->read_only_error == VDO_SUCCESS) {
+ WRITE_ONCE(notifier->read_only_error, error_code);
+ if (notifier->state == MAY_NOTIFY) {
+ notifier->state = NOTIFYING;
+ notify = true;
+ }
+ }
+ spin_unlock(&notifier->lock);
+
+ if (!notify) {
+ /* The notifier is already aware of a read-only error */
+ return;
+ }
+
+ /* Initiate a notification starting on the lowest numbered thread. */
+ vdo_launch_completion_callback(&notifier->completion, make_thread_read_only, 0);
+}
+
+/**
+ * vdo_is_read_only() - Check whether the VDO is read-only.
+ * @vdo: The vdo.
+ *
+ * Return: true if the vdo is read-only.
+ *
+ * This method may be called from any thread, as opposed to examining the VDO's state field which
+ * is only safe to check from the admin thread.
+ */
+bool vdo_is_read_only(struct vdo *vdo)
+{
+ return vdo->threads[vdo_get_callback_thread_id()].is_read_only;
+}
+
+/**
+ * vdo_in_read_only_mode() - Check whether a vdo is in read-only mode.
+ * @vdo: The vdo to query.
+ *
+ * Return: true if the vdo is in read-only mode.
+ */
+bool vdo_in_read_only_mode(const struct vdo *vdo)
+{
+ return (vdo_get_state(vdo) == VDO_READ_ONLY_MODE);
+}
+
+/**
+ * vdo_in_recovery_mode() - Check whether the vdo is in recovery mode.
+ * @vdo: The vdo to query.
+ *
+ * Return: true if the vdo is in recovery mode.
+ */
+bool vdo_in_recovery_mode(const struct vdo *vdo)
+{
+ return (vdo_get_state(vdo) == VDO_RECOVERING);
+}
+
+/**
+ * vdo_enter_recovery_mode() - Put the vdo into recovery mode.
+ * @vdo: The vdo.
+ */
+void vdo_enter_recovery_mode(struct vdo *vdo)
+{
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ if (vdo_in_read_only_mode(vdo))
+ return;
+
+ vdo_log_info("Entering recovery mode");
+ vdo_set_state(vdo, VDO_RECOVERING);
+}
+
+/**
+ * complete_synchronous_action() - Signal the waiting thread that a synchronous action is complete.
+ * @completion: The sync completion.
+ */
+static void complete_synchronous_action(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_SYNC_COMPLETION);
+ complete(&(container_of(completion, struct sync_completion,
+ vdo_completion)->completion));
+}
+
+/**
+ * perform_synchronous_action() - Launch an action on a VDO thread and wait for it to complete.
+ * @vdo: The vdo.
+ * @action: The callback to launch.
+ * @thread_id: The thread on which to run the action.
+ * @parent: The parent of the sync completion (may be NULL).
+ */
+static int perform_synchronous_action(struct vdo *vdo, vdo_action_fn action,
+ thread_id_t thread_id, void *parent)
+{
+ struct sync_completion sync;
+
+ vdo_initialize_completion(&sync.vdo_completion, vdo, VDO_SYNC_COMPLETION);
+ init_completion(&sync.completion);
+ sync.vdo_completion.parent = parent;
+ vdo_launch_completion_callback(&sync.vdo_completion, action, thread_id);
+ wait_for_completion(&sync.completion);
+ return sync.vdo_completion.result;
+}
+
+/**
+ * set_compression_callback() - Callback to turn compression on or off.
+ * @completion: The completion.
+ */
+static void set_compression_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ bool *enable = completion->parent;
+ bool was_enabled = vdo_get_compressing(vdo);
+
+ if (*enable != was_enabled) {
+ WRITE_ONCE(vdo->compressing, *enable);
+ if (was_enabled) {
+ /* Signal the packer to flush since compression has been disabled. */
+ vdo_flush_packer(vdo->packer);
+ }
+ }
+
+ vdo_log_info("compression is %s", (*enable ? "enabled" : "disabled"));
+ *enable = was_enabled;
+ complete_synchronous_action(completion);
+}
+
+/**
+ * vdo_set_compressing() - Turn compression on or off.
+ * @vdo: The vdo.
+ * @enable: Whether to enable or disable compression.
+ *
+ * Return: Whether compression was previously on or off.
+ */
+bool vdo_set_compressing(struct vdo *vdo, bool enable)
+{
+ perform_synchronous_action(vdo, set_compression_callback,
+ vdo->thread_config.packer_thread,
+ &enable);
+ return enable;
+}
+
+/**
+ * vdo_get_compressing() - Get whether compression is enabled in a vdo.
+ * @vdo: The vdo.
+ *
+ * Return: State of compression.
+ */
+bool vdo_get_compressing(struct vdo *vdo)
+{
+ return READ_ONCE(vdo->compressing);
+}
+
+static size_t get_block_map_cache_size(const struct vdo *vdo)
+{
+ return ((size_t) vdo->device_config->cache_size) * VDO_BLOCK_SIZE;
+}
+
+static struct error_statistics __must_check get_vdo_error_statistics(const struct vdo *vdo)
+{
+ /*
+ * The error counts can be incremented from arbitrary threads and so must be incremented
+ * atomically, but they are just statistics with no semantics that could rely on memory
+ * order, so unfenced reads are sufficient.
+ */
+ const struct atomic_statistics *atoms = &vdo->stats;
+
+ return (struct error_statistics) {
+ .invalid_advice_pbn_count = atomic64_read(&atoms->invalid_advice_pbn_count),
+ .no_space_error_count = atomic64_read(&atoms->no_space_error_count),
+ .read_only_error_count = atomic64_read(&atoms->read_only_error_count),
+ };
+}
+
+static void copy_bio_stat(struct bio_stats *b, const struct atomic_bio_stats *a)
+{
+ b->read = atomic64_read(&a->read);
+ b->write = atomic64_read(&a->write);
+ b->discard = atomic64_read(&a->discard);
+ b->flush = atomic64_read(&a->flush);
+ b->empty_flush = atomic64_read(&a->empty_flush);
+ b->fua = atomic64_read(&a->fua);
+}
+
+static struct bio_stats subtract_bio_stats(struct bio_stats minuend,
+ struct bio_stats subtrahend)
+{
+ return (struct bio_stats) {
+ .read = minuend.read - subtrahend.read,
+ .write = minuend.write - subtrahend.write,
+ .discard = minuend.discard - subtrahend.discard,
+ .flush = minuend.flush - subtrahend.flush,
+ .empty_flush = minuend.empty_flush - subtrahend.empty_flush,
+ .fua = minuend.fua - subtrahend.fua,
+ };
+}
+
+/**
+ * vdo_get_physical_blocks_allocated() - Get the number of physical blocks in use by user data.
+ * @vdo: The vdo.
+ *
+ * Return: The number of blocks allocated for user data.
+ */
+static block_count_t __must_check vdo_get_physical_blocks_allocated(const struct vdo *vdo)
+{
+ return (vdo_get_slab_depot_allocated_blocks(vdo->depot) -
+ vdo_get_journal_block_map_data_blocks_used(vdo->recovery_journal));
+}
+
+/**
+ * vdo_get_physical_blocks_overhead() - Get the number of physical blocks used by vdo metadata.
+ * @vdo: The vdo.
+ *
+ * Return: The number of overhead blocks.
+ */
+static block_count_t __must_check vdo_get_physical_blocks_overhead(const struct vdo *vdo)
+{
+ /*
+ * config.physical_blocks is mutated during resize and is in a packed structure,
+ * but resize runs on admin thread.
+ * TODO: Verify that this is always safe.
+ */
+ return (vdo->states.vdo.config.physical_blocks -
+ vdo_get_slab_depot_data_blocks(vdo->depot) +
+ vdo_get_journal_block_map_data_blocks_used(vdo->recovery_journal));
+}
+
+static const char *vdo_describe_state(enum vdo_state state)
+{
+ /* These strings should all fit in the 15 chars of VDOStatistics.mode. */
+ switch (state) {
+ case VDO_RECOVERING:
+ return "recovering";
+
+ case VDO_READ_ONLY_MODE:
+ return "read-only";
+
+ default:
+ return "normal";
+ }
+}
+
+/**
+ * get_vdo_statistics() - Populate a vdo_statistics structure on the admin thread.
+ * @vdo: The vdo.
+ * @stats: The statistics structure to populate.
+ */
+static void get_vdo_statistics(const struct vdo *vdo, struct vdo_statistics *stats)
+{
+ struct recovery_journal *journal = vdo->recovery_journal;
+ enum vdo_state state = vdo_get_state(vdo);
+
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ /* start with a clean slate */
+ memset(stats, 0, sizeof(struct vdo_statistics));
+
+ /*
+ * These are immutable properties of the vdo object, so it is safe to query them from any
+ * thread.
+ */
+ stats->version = STATISTICS_VERSION;
+ stats->logical_blocks = vdo->states.vdo.config.logical_blocks;
+ /*
+ * config.physical_blocks is mutated during resize and is in a packed structure, but resize
+ * runs on the admin thread.
+ * TODO: verify that this is always safe
+ */
+ stats->physical_blocks = vdo->states.vdo.config.physical_blocks;
+ stats->block_size = VDO_BLOCK_SIZE;
+ stats->complete_recoveries = vdo->states.vdo.complete_recoveries;
+ stats->read_only_recoveries = vdo->states.vdo.read_only_recoveries;
+ stats->block_map_cache_size = get_block_map_cache_size(vdo);
+
+ /* The callees are responsible for thread-safety. */
+ stats->data_blocks_used = vdo_get_physical_blocks_allocated(vdo);
+ stats->overhead_blocks_used = vdo_get_physical_blocks_overhead(vdo);
+ stats->logical_blocks_used = vdo_get_recovery_journal_logical_blocks_used(journal);
+ vdo_get_slab_depot_statistics(vdo->depot, stats);
+ stats->journal = vdo_get_recovery_journal_statistics(journal);
+ stats->packer = vdo_get_packer_statistics(vdo->packer);
+ stats->block_map = vdo_get_block_map_statistics(vdo->block_map);
+ vdo_get_dedupe_statistics(vdo->hash_zones, stats);
+ stats->errors = get_vdo_error_statistics(vdo);
+ stats->in_recovery_mode = (state == VDO_RECOVERING);
+ snprintf(stats->mode, sizeof(stats->mode), "%s", vdo_describe_state(state));
+
+ stats->instance = vdo->instance;
+ stats->current_vios_in_progress = get_data_vio_pool_active_requests(vdo->data_vio_pool);
+ stats->max_vios = get_data_vio_pool_maximum_requests(vdo->data_vio_pool);
+
+ stats->flush_out = atomic64_read(&vdo->stats.flush_out);
+ stats->logical_block_size = vdo->device_config->logical_block_size;
+ copy_bio_stat(&stats->bios_in, &vdo->stats.bios_in);
+ copy_bio_stat(&stats->bios_in_partial, &vdo->stats.bios_in_partial);
+ copy_bio_stat(&stats->bios_out, &vdo->stats.bios_out);
+ copy_bio_stat(&stats->bios_meta, &vdo->stats.bios_meta);
+ copy_bio_stat(&stats->bios_journal, &vdo->stats.bios_journal);
+ copy_bio_stat(&stats->bios_page_cache, &vdo->stats.bios_page_cache);
+ copy_bio_stat(&stats->bios_out_completed, &vdo->stats.bios_out_completed);
+ copy_bio_stat(&stats->bios_meta_completed, &vdo->stats.bios_meta_completed);
+ copy_bio_stat(&stats->bios_journal_completed,
+ &vdo->stats.bios_journal_completed);
+ copy_bio_stat(&stats->bios_page_cache_completed,
+ &vdo->stats.bios_page_cache_completed);
+ copy_bio_stat(&stats->bios_acknowledged, &vdo->stats.bios_acknowledged);
+ copy_bio_stat(&stats->bios_acknowledged_partial, &vdo->stats.bios_acknowledged_partial);
+ stats->bios_in_progress =
+ subtract_bio_stats(stats->bios_in, stats->bios_acknowledged);
+ vdo_get_memory_stats(&stats->memory_usage.bytes_used,
+ &stats->memory_usage.peak_bytes_used);
+}
+
+/**
+ * vdo_fetch_statistics_callback() - Action to populate a vdo_statistics
+ * structure on the admin thread.
+ * @completion: The completion.
+ *
+ * This callback is registered in vdo_fetch_statistics().
+ */
+static void vdo_fetch_statistics_callback(struct vdo_completion *completion)
+{
+ get_vdo_statistics(completion->vdo, completion->parent);
+ complete_synchronous_action(completion);
+}
+
+/**
+ * vdo_fetch_statistics() - Fetch statistics on the correct thread.
+ * @vdo: The vdo.
+ * @stats: The vdo statistics are returned here.
+ */
+void vdo_fetch_statistics(struct vdo *vdo, struct vdo_statistics *stats)
+{
+ perform_synchronous_action(vdo, vdo_fetch_statistics_callback,
+ vdo->thread_config.admin_thread, stats);
+}
+
+/**
+ * vdo_get_callback_thread_id() - Get the id of the callback thread on which a completion is
+ * currently running.
+ *
+ * Return: The current thread ID, or -1 if no such thread.
+ */
+thread_id_t vdo_get_callback_thread_id(void)
+{
+ struct vdo_work_queue *queue = vdo_get_current_work_queue();
+ struct vdo_thread *thread;
+ thread_id_t thread_id;
+
+ if (queue == NULL)
+ return VDO_INVALID_THREAD_ID;
+
+ thread = vdo_get_work_queue_owner(queue);
+ thread_id = thread->thread_id;
+
+ if (PARANOID_THREAD_CONSISTENCY_CHECKS) {
+ BUG_ON(thread_id >= thread->vdo->thread_config.thread_count);
+ BUG_ON(thread != &thread->vdo->threads[thread_id]);
+ }
+
+ return thread_id;
+}
+
+/**
+ * vdo_dump_status() - Dump status information about a vdo to the log for debugging.
+ * @vdo: The vdo to dump.
+ */
+void vdo_dump_status(const struct vdo *vdo)
+{
+ zone_count_t zone;
+
+ vdo_dump_flusher(vdo->flusher);
+ vdo_dump_recovery_journal_statistics(vdo->recovery_journal);
+ vdo_dump_packer(vdo->packer);
+ vdo_dump_slab_depot(vdo->depot);
+
+ for (zone = 0; zone < vdo->thread_config.logical_zone_count; zone++)
+ vdo_dump_logical_zone(&vdo->logical_zones->zones[zone]);
+
+ for (zone = 0; zone < vdo->thread_config.physical_zone_count; zone++)
+ vdo_dump_physical_zone(&vdo->physical_zones->zones[zone]);
+
+ vdo_dump_hash_zones(vdo->hash_zones);
+}
+
+/**
+ * vdo_assert_on_admin_thread() - Assert that we are running on the admin thread.
+ * @vdo: The vdo.
+ * @name: The name of the function which should be running on the admin thread (for logging).
+ */
+void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread),
+ "%s called on admin thread", name);
+}
+
+/**
+ * vdo_assert_on_logical_zone_thread() - Assert that this function was called on the specified
+ * logical zone thread.
+ * @vdo: The vdo.
+ * @logical_zone: The number of the logical zone.
+ * @name: The name of the calling function.
+ */
+void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone,
+ const char *name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
+ vdo->thread_config.logical_threads[logical_zone]),
+ "%s called on logical thread", name);
+}
+
+/**
+ * vdo_assert_on_physical_zone_thread() - Assert that this function was called on the specified
+ * physical zone thread.
+ * @vdo: The vdo.
+ * @physical_zone: The number of the physical zone.
+ * @name: The name of the calling function.
+ */
+void vdo_assert_on_physical_zone_thread(const struct vdo *vdo,
+ zone_count_t physical_zone, const char *name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
+ vdo->thread_config.physical_threads[physical_zone]),
+ "%s called on physical thread", name);
+}
+
+/**
+ * vdo_get_physical_zone() - Get the physical zone responsible for a given physical block number.
+ * @vdo: The vdo containing the physical zones.
+ * @pbn: The PBN of the data block.
+ * @zone_ptr: A pointer to return the physical zone.
+ *
+ * Gets the physical zone responsible for a given physical block number of a data block in this vdo
+ * instance, or of the zero block (for which a NULL zone is returned). For any other block number
+ * that is not in the range of valid data block numbers in any slab, an error will be returned.
+ * This function is safe to call on invalid block numbers; it will not put the vdo into read-only
+ * mode.
+ *
+ * Return: VDO_SUCCESS or VDO_OUT_OF_RANGE if the block number is invalid or an error code for any
+ * other failure.
+ */
+int vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn,
+ struct physical_zone **zone_ptr)
+{
+ struct vdo_slab *slab;
+ int result;
+
+ if (pbn == VDO_ZERO_BLOCK) {
+ *zone_ptr = NULL;
+ return VDO_SUCCESS;
+ }
+
+ /*
+ * Used because it does a more restrictive bounds check than vdo_get_slab(), and done first
+ * because it won't trigger read-only mode on an invalid PBN.
+ */
+ if (!vdo_is_physical_data_block(vdo->depot, pbn))
+ return VDO_OUT_OF_RANGE;
+
+ /* With the PBN already checked, we should always succeed in finding a slab. */
+ slab = vdo_get_slab(vdo->depot, pbn);
+ result = VDO_ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *zone_ptr = &vdo->physical_zones->zones[slab->allocator->zone_number];
+ return VDO_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/vdo.h b/drivers/md/dm-vdo/vdo.h
new file mode 100644
index 000000000000..483ae873e002
--- /dev/null
+++ b/drivers/md/dm-vdo/vdo.h
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_H
+#define VDO_H
+
+#include <linux/atomic.h>
+#include <linux/blk_types.h>
+#include <linux/completion.h>
+#include <linux/dm-kcopyd.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "admin-state.h"
+#include "encodings.h"
+#include "funnel-workqueue.h"
+#include "packer.h"
+#include "physical-zone.h"
+#include "statistics.h"
+#include "thread-registry.h"
+#include "types.h"
+
+enum notifier_state {
+ /* Notifications are allowed but not in progress */
+ MAY_NOTIFY,
+ /* A notification is in progress */
+ NOTIFYING,
+ /* Notifications are not allowed */
+ MAY_NOT_NOTIFY,
+ /* A notification has completed */
+ NOTIFIED,
+};
+
+/**
+ * typedef vdo_read_only_notification_fn - A function to notify a listener that the VDO has gone
+ * read-only.
+ * @listener: The object to notify.
+ * @parent: The completion to notify in order to acknowledge the notification.
+ */
+typedef void (*vdo_read_only_notification_fn)(void *listener, struct vdo_completion *parent);
+
+/*
+ * An object to be notified when the VDO enters read-only mode
+ */
+struct read_only_listener {
+ /* The listener */
+ void *listener;
+ /* The method to call to notify the listener */
+ vdo_read_only_notification_fn notify;
+ /* A pointer to the next listener */
+ struct read_only_listener *next;
+};
+
+struct vdo_thread {
+ struct vdo *vdo;
+ thread_id_t thread_id;
+ struct vdo_work_queue *queue;
+ /*
+ * Each thread maintains its own notion of whether the VDO is read-only so that the
+ * read-only state can be checked from any base thread without worrying about
+ * synchronization or thread safety. This does mean that knowledge of the VDO going
+ * read-only does not occur simultaneously across the VDO's threads, but that does not seem
+ * to cause any problems.
+ */
+ bool is_read_only;
+ /*
+ * A list of objects waiting to be notified on this thread that the VDO has entered
+ * read-only mode.
+ */
+ struct read_only_listener *listeners;
+ struct registered_thread allocating_thread;
+};
+
+/* Keep struct bio statistics atomically */
+struct atomic_bio_stats {
+ atomic64_t read; /* Number of not REQ_WRITE bios */
+ atomic64_t write; /* Number of REQ_WRITE bios */
+ atomic64_t discard; /* Number of REQ_DISCARD bios */
+ atomic64_t flush; /* Number of REQ_FLUSH bios */
+ atomic64_t empty_flush; /* Number of REQ_PREFLUSH bios without data */
+ atomic64_t fua; /* Number of REQ_FUA bios */
+};
+
+/* Counters are atomic since updates can arrive concurrently from arbitrary threads. */
+struct atomic_statistics {
+ atomic64_t bios_submitted;
+ atomic64_t bios_completed;
+ atomic64_t flush_out;
+ atomic64_t invalid_advice_pbn_count;
+ atomic64_t no_space_error_count;
+ atomic64_t read_only_error_count;
+ struct atomic_bio_stats bios_in;
+ struct atomic_bio_stats bios_in_partial;
+ struct atomic_bio_stats bios_out;
+ struct atomic_bio_stats bios_out_completed;
+ struct atomic_bio_stats bios_acknowledged;
+ struct atomic_bio_stats bios_acknowledged_partial;
+ struct atomic_bio_stats bios_meta;
+ struct atomic_bio_stats bios_meta_completed;
+ struct atomic_bio_stats bios_journal;
+ struct atomic_bio_stats bios_journal_completed;
+ struct atomic_bio_stats bios_page_cache;
+ struct atomic_bio_stats bios_page_cache_completed;
+};
+
+struct read_only_notifier {
+ /* The completion for entering read-only mode */
+ struct vdo_completion completion;
+ /* A completion waiting for notifications to be drained or enabled */
+ struct vdo_completion *waiter;
+ /* Lock to protect the next two fields */
+ spinlock_t lock;
+ /* The code of the error which put the VDO into read-only mode */
+ int read_only_error;
+ /* The current state of the notifier (values described above) */
+ enum notifier_state state;
+};
+
+/*
+ * The thread ID returned when the current thread is not a vdo thread, or can not be determined
+ * (usually due to being at interrupt context).
+ */
+#define VDO_INVALID_THREAD_ID ((thread_id_t) -1)
+
+struct thread_config {
+ zone_count_t logical_zone_count;
+ zone_count_t physical_zone_count;
+ zone_count_t hash_zone_count;
+ thread_count_t bio_thread_count;
+ thread_count_t thread_count;
+ thread_id_t admin_thread;
+ thread_id_t journal_thread;
+ thread_id_t packer_thread;
+ thread_id_t dedupe_thread;
+ thread_id_t bio_ack_thread;
+ thread_id_t cpu_thread;
+ thread_id_t *logical_threads;
+ thread_id_t *physical_threads;
+ thread_id_t *hash_zone_threads;
+ thread_id_t *bio_threads;
+};
+
+struct thread_count_config;
+
+struct vdo_super_block {
+ /* The vio for reading and writing the super block to disk */
+ struct vio vio;
+ /* A buffer to hold the super block */
+ u8 *buffer;
+ /* Whether this super block may not be written */
+ bool unwritable;
+};
+
+struct data_vio_pool;
+
+struct vdo_administrator {
+ struct vdo_completion completion;
+ struct admin_state state;
+ atomic_t busy;
+ u32 phase;
+ struct completion callback_sync;
+};
+
+struct vdo {
+ char thread_name_prefix[MAX_VDO_WORK_QUEUE_NAME_LEN];
+ struct vdo_thread *threads;
+ vdo_action_fn action;
+ struct vdo_completion *completion;
+ struct vio_tracer *vio_tracer;
+
+ /* The atomic version of the state of this vdo */
+ atomic_t state;
+ /* The full state of all components */
+ struct vdo_component_states states;
+ /*
+ * A counter value to attach to thread names and log messages to identify the individual
+ * device.
+ */
+ unsigned int instance;
+ /* The read-only notifier */
+ struct read_only_notifier read_only_notifier;
+ /* The load-time configuration of this vdo */
+ struct device_config *device_config;
+ /* The thread mapping */
+ struct thread_config thread_config;
+
+ /* The super block */
+ struct vdo_super_block super_block;
+
+ /* The partitioning of the underlying storage */
+ struct layout layout;
+ struct layout next_layout;
+ struct dm_kcopyd_client *partition_copier;
+
+ /* The block map */
+ struct block_map *block_map;
+
+ /* The journal for block map recovery */
+ struct recovery_journal *recovery_journal;
+
+ /* The slab depot */
+ struct slab_depot *depot;
+
+ /* The compressed-block packer */
+ struct packer *packer;
+ /* Whether incoming data should be compressed */
+ bool compressing;
+
+ /* The handler for flush requests */
+ struct flusher *flusher;
+
+ /* The state the vdo was in when loaded (primarily for unit tests) */
+ enum vdo_state load_state;
+
+ /* The logical zones of this vdo */
+ struct logical_zones *logical_zones;
+
+ /* The physical zones of this vdo */
+ struct physical_zones *physical_zones;
+
+ /* The hash lock zones of this vdo */
+ struct hash_zones *hash_zones;
+
+ /* Bio submission manager used for sending bios to the storage device. */
+ struct io_submitter *io_submitter;
+
+ /* The pool of data_vios for servicing incoming bios */
+ struct data_vio_pool *data_vio_pool;
+
+ /* The manager for administrative operations */
+ struct vdo_administrator admin;
+
+ /* Flags controlling administrative operations */
+ const struct admin_state_code *suspend_type;
+ bool allocations_allowed;
+ bool dump_on_shutdown;
+ atomic_t processing_message;
+
+ /*
+ * Statistics
+ * Atomic stats counters
+ */
+ struct atomic_statistics stats;
+ /* Used to gather statistics without allocating memory */
+ struct vdo_statistics stats_buffer;
+ /* Protects the stats_buffer */
+ struct mutex stats_mutex;
+
+ /* A list of all device_configs referencing this vdo */
+ struct list_head device_config_list;
+
+ /* This VDO's list entry for the device registry */
+ struct list_head registration;
+
+ /* Underlying block device info. */
+ u64 starting_sector_offset;
+ struct volume_geometry geometry;
+
+ /* N blobs of context data for LZ4 code, one per CPU thread. */
+ char **compression_context;
+};
+
+/**
+ * vdo_uses_bio_ack_queue() - Indicate whether the vdo is configured to use a separate work queue
+ * for acknowledging received and processed bios.
+ * @vdo: The vdo.
+ *
+ * Note that this directly controls the handling of write operations, but the compile-time flag
+ * VDO_USE_BIO_ACK_QUEUE_FOR_READ is also checked for read operations.
+ *
+ * Return: Whether a bio-acknowledgement work queue is in use.
+ */
+static inline bool vdo_uses_bio_ack_queue(struct vdo *vdo)
+{
+ return vdo->device_config->thread_counts.bio_ack_threads > 0;
+}
+
+/**
+ * typedef vdo_filter_fn - Method type for vdo matching methods.
+ *
+ * A filter function returns false if the vdo doesn't match.
+ */
+typedef bool (*vdo_filter_fn)(struct vdo *vdo, const void *context);
+
+void vdo_initialize_device_registry_once(void);
+struct vdo * __must_check vdo_find_matching(vdo_filter_fn filter, const void *context);
+
+int __must_check vdo_make_thread(struct vdo *vdo, thread_id_t thread_id,
+ const struct vdo_work_queue_type *type,
+ unsigned int queue_count, void *contexts[]);
+
+static inline int __must_check vdo_make_default_thread(struct vdo *vdo,
+ thread_id_t thread_id)
+{
+ return vdo_make_thread(vdo, thread_id, NULL, 1, NULL);
+}
+
+int __must_check vdo_make(unsigned int instance, struct device_config *config,
+ char **reason, struct vdo **vdo_ptr);
+
+void vdo_destroy(struct vdo *vdo);
+
+void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent);
+
+struct block_device * __must_check vdo_get_backing_device(const struct vdo *vdo);
+
+const char * __must_check vdo_get_device_name(const struct dm_target *target);
+
+int __must_check vdo_synchronous_flush(struct vdo *vdo);
+
+const struct admin_state_code * __must_check vdo_get_admin_state(const struct vdo *vdo);
+
+bool vdo_set_compressing(struct vdo *vdo, bool enable);
+
+bool vdo_get_compressing(struct vdo *vdo);
+
+void vdo_fetch_statistics(struct vdo *vdo, struct vdo_statistics *stats);
+
+thread_id_t vdo_get_callback_thread_id(void);
+
+enum vdo_state __must_check vdo_get_state(const struct vdo *vdo);
+
+void vdo_set_state(struct vdo *vdo, enum vdo_state state);
+
+void vdo_save_components(struct vdo *vdo, struct vdo_completion *parent);
+
+int vdo_register_read_only_listener(struct vdo *vdo, void *listener,
+ vdo_read_only_notification_fn notification,
+ thread_id_t thread_id);
+
+int vdo_enable_read_only_entry(struct vdo *vdo);
+
+void vdo_wait_until_not_entering_read_only_mode(struct vdo_completion *parent);
+
+void vdo_allow_read_only_mode_entry(struct vdo_completion *parent);
+
+void vdo_enter_read_only_mode(struct vdo *vdo, int error_code);
+
+bool __must_check vdo_is_read_only(struct vdo *vdo);
+
+bool __must_check vdo_in_read_only_mode(const struct vdo *vdo);
+
+bool __must_check vdo_in_recovery_mode(const struct vdo *vdo);
+
+void vdo_enter_recovery_mode(struct vdo *vdo);
+
+void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name);
+
+void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone,
+ const char *name);
+
+void vdo_assert_on_physical_zone_thread(const struct vdo *vdo, zone_count_t physical_zone,
+ const char *name);
+
+int __must_check vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn,
+ struct physical_zone **zone_ptr);
+
+void vdo_dump_status(const struct vdo *vdo);
+
+#endif /* VDO_H */
diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c
new file mode 100644
index 000000000000..b291578f726f
--- /dev/null
+++ b/drivers/md/dm-vdo/vio.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "vio.h"
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/kernel.h>
+#include <linux/ratelimit.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "constants.h"
+#include "io-submitter.h"
+#include "vdo.h"
+
+/* A vio_pool is a collection of preallocated vios. */
+struct vio_pool {
+ /* The number of objects managed by the pool */
+ size_t size;
+ /* The list of objects which are available */
+ struct list_head available;
+ /* The queue of requestors waiting for objects from the pool */
+ struct vdo_wait_queue waiting;
+ /* The number of objects currently in use */
+ size_t busy_count;
+ /* The list of objects which are in use */
+ struct list_head busy;
+ /* The ID of the thread on which this pool may be used */
+ thread_id_t thread_id;
+ /* The buffer backing the pool's vios */
+ char *buffer;
+ /* The pool entries */
+ struct pooled_vio vios[];
+};
+
+physical_block_number_t pbn_from_vio_bio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct vdo *vdo = vio->completion.vdo;
+ physical_block_number_t pbn = bio->bi_iter.bi_sector / VDO_SECTORS_PER_BLOCK;
+
+ return ((pbn == VDO_GEOMETRY_BLOCK_LOCATION) ? pbn : pbn + vdo->geometry.bio_offset);
+}
+
+static int create_multi_block_bio(block_count_t size, struct bio **bio_ptr)
+{
+ struct bio *bio = NULL;
+ int result;
+
+ result = vdo_allocate_extended(struct bio, size + 1, struct bio_vec,
+ "bio", &bio);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *bio_ptr = bio;
+ return VDO_SUCCESS;
+}
+
+int vdo_create_bio(struct bio **bio_ptr)
+{
+ return create_multi_block_bio(1, bio_ptr);
+}
+
+void vdo_free_bio(struct bio *bio)
+{
+ if (bio == NULL)
+ return;
+
+ bio_uninit(bio);
+ vdo_free(vdo_forget(bio));
+}
+
+int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type,
+ enum vio_priority priority, void *parent,
+ unsigned int block_count, char *data, struct vio *vio)
+{
+ struct bio *bio;
+ int result;
+
+ result = VDO_ASSERT(block_count <= MAX_BLOCKS_PER_VIO,
+ "block count %u does not exceed maximum %u", block_count,
+ MAX_BLOCKS_PER_VIO);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(((vio_type != VIO_TYPE_UNINITIALIZED) && (vio_type != VIO_TYPE_DATA)),
+ "%d is a metadata type", vio_type);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = create_multi_block_bio(block_count, &bio);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ initialize_vio(vio, bio, block_count, vio_type, priority, vdo);
+ vio->completion.parent = parent;
+ vio->data = data;
+ return VDO_SUCCESS;
+}
+
+/**
+ * create_multi_block_metadata_vio() - Create a vio.
+ * @vdo: The vdo on which the vio will operate.
+ * @vio_type: The type of vio to create.
+ * @priority: The relative priority to assign to the vio.
+ * @parent: The parent of the vio.
+ * @block_count: The size of the vio in blocks.
+ * @data: The buffer.
+ * @vio_ptr: A pointer to hold the new vio.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
+ enum vio_priority priority, void *parent,
+ unsigned int block_count, char *data,
+ struct vio **vio_ptr)
+{
+ struct vio *vio;
+ int result;
+
+ BUILD_BUG_ON(sizeof(struct vio) > 256);
+
+ /*
+ * Metadata vios should use direct allocation and not use the buffer pool, which is
+ * reserved for submissions from the linux block layer.
+ */
+ result = vdo_allocate(1, struct vio, __func__, &vio);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error("metadata vio allocation failure %d", result);
+ return result;
+ }
+
+ result = allocate_vio_components(vdo, vio_type, priority, parent, block_count,
+ data, vio);
+ if (result != VDO_SUCCESS) {
+ vdo_free(vio);
+ return result;
+ }
+
+ *vio_ptr = vio;
+ return VDO_SUCCESS;
+}
+
+/**
+ * free_vio_components() - Free the components of a vio embedded in a larger structure.
+ * @vio: The vio to destroy
+ */
+void free_vio_components(struct vio *vio)
+{
+ if (vio == NULL)
+ return;
+
+ BUG_ON(is_data_vio(vio));
+ vdo_free_bio(vdo_forget(vio->bio));
+}
+
+/**
+ * free_vio() - Destroy a vio.
+ * @vio: The vio to destroy.
+ */
+void free_vio(struct vio *vio)
+{
+ free_vio_components(vio);
+ vdo_free(vio);
+}
+
+/* Set bio properties for a VDO read or write. */
+void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callback,
+ blk_opf_t bi_opf, physical_block_number_t pbn)
+{
+ struct vdo *vdo = vio->completion.vdo;
+ struct device_config *config = vdo->device_config;
+
+ pbn -= vdo->geometry.bio_offset;
+ vio->bio_zone = ((pbn / config->thread_counts.bio_rotation_interval) %
+ config->thread_counts.bio_threads);
+
+ bio->bi_private = vio;
+ bio->bi_end_io = callback;
+ bio->bi_opf = bi_opf;
+ bio->bi_iter.bi_sector = pbn * VDO_SECTORS_PER_BLOCK;
+}
+
+/*
+ * Prepares the bio to perform IO with the specified buffer. May only be used on a VDO-allocated
+ * bio, as it assumes the bio wraps a 4k buffer that is 4k aligned, but there does not have to be a
+ * vio associated with the bio.
+ */
+int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
+ blk_opf_t bi_opf, physical_block_number_t pbn)
+{
+ int bvec_count, offset, len, i;
+ struct bio *bio = vio->bio;
+
+ bio_reset(bio, bio->bi_bdev, bi_opf);
+ vdo_set_bio_properties(bio, vio, callback, bi_opf, pbn);
+ if (data == NULL)
+ return VDO_SUCCESS;
+
+ bio->bi_io_vec = bio->bi_inline_vecs;
+ bio->bi_max_vecs = vio->block_count + 1;
+ len = VDO_BLOCK_SIZE * vio->block_count;
+ offset = offset_in_page(data);
+ bvec_count = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+
+ /*
+ * If we knew that data was always on one page, or contiguous pages, we wouldn't need the
+ * loop. But if we're using vmalloc, it's not impossible that the data is in different
+ * pages that can't be merged in bio_add_page...
+ */
+ for (i = 0; (i < bvec_count) && (len > 0); i++) {
+ struct page *page;
+ int bytes_added;
+ int bytes = PAGE_SIZE - offset;
+
+ if (bytes > len)
+ bytes = len;
+
+ page = is_vmalloc_addr(data) ? vmalloc_to_page(data) : virt_to_page(data);
+ bytes_added = bio_add_page(bio, page, bytes, offset);
+
+ if (bytes_added != bytes) {
+ return vdo_log_error_strerror(VDO_BIO_CREATION_FAILED,
+ "Could only add %i bytes to bio",
+ bytes_added);
+ }
+
+ data += bytes;
+ len -= bytes;
+ offset = 0;
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * update_vio_error_stats() - Update per-vio error stats and log the error.
+ * @vio: The vio which got an error.
+ * @format: The format of the message to log (a printf style format).
+ */
+void update_vio_error_stats(struct vio *vio, const char *format, ...)
+{
+ static DEFINE_RATELIMIT_STATE(error_limiter, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ va_list args;
+ int priority;
+ struct vdo *vdo = vio->completion.vdo;
+
+ switch (vio->completion.result) {
+ case VDO_READ_ONLY:
+ atomic64_inc(&vdo->stats.read_only_error_count);
+ return;
+
+ case VDO_NO_SPACE:
+ atomic64_inc(&vdo->stats.no_space_error_count);
+ priority = VDO_LOG_DEBUG;
+ break;
+
+ default:
+ priority = VDO_LOG_ERR;
+ }
+
+ if (!__ratelimit(&error_limiter))
+ return;
+
+ va_start(args, format);
+ vdo_vlog_strerror(priority, vio->completion.result, VDO_LOGGING_MODULE_NAME,
+ format, args);
+ va_end(args);
+}
+
+void vio_record_metadata_io_error(struct vio *vio)
+{
+ const char *description;
+ physical_block_number_t pbn = pbn_from_vio_bio(vio->bio);
+
+ if (bio_op(vio->bio) == REQ_OP_READ) {
+ description = "read";
+ } else if ((vio->bio->bi_opf & REQ_PREFLUSH) == REQ_PREFLUSH) {
+ description = (((vio->bio->bi_opf & REQ_FUA) == REQ_FUA) ?
+ "write+preflush+fua" :
+ "write+preflush");
+ } else if ((vio->bio->bi_opf & REQ_FUA) == REQ_FUA) {
+ description = "write+fua";
+ } else {
+ description = "write";
+ }
+
+ update_vio_error_stats(vio,
+ "Completing %s vio of type %u for physical block %llu with error",
+ description, vio->type, (unsigned long long) pbn);
+}
+
+/**
+ * make_vio_pool() - Create a new vio pool.
+ * @vdo: The vdo.
+ * @pool_size: The number of vios in the pool.
+ * @thread_id: The ID of the thread using this pool.
+ * @vio_type: The type of vios in the pool.
+ * @priority: The priority with which vios from the pool should be enqueued.
+ * @context: The context that each entry will have.
+ * @pool_ptr: The resulting pool.
+ *
+ * Return: A success or error code.
+ */
+int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
+ enum vio_type vio_type, enum vio_priority priority, void *context,
+ struct vio_pool **pool_ptr)
+{
+ struct vio_pool *pool;
+ char *ptr;
+ int result;
+
+ result = vdo_allocate_extended(struct vio_pool, pool_size, struct pooled_vio,
+ __func__, &pool);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ pool->thread_id = thread_id;
+ INIT_LIST_HEAD(&pool->available);
+ INIT_LIST_HEAD(&pool->busy);
+
+ result = vdo_allocate(pool_size * VDO_BLOCK_SIZE, char,
+ "VIO pool buffer", &pool->buffer);
+ if (result != VDO_SUCCESS) {
+ free_vio_pool(pool);
+ return result;
+ }
+
+ ptr = pool->buffer;
+ for (pool->size = 0; pool->size < pool_size; pool->size++, ptr += VDO_BLOCK_SIZE) {
+ struct pooled_vio *pooled = &pool->vios[pool->size];
+
+ result = allocate_vio_components(vdo, vio_type, priority, NULL, 1, ptr,
+ &pooled->vio);
+ if (result != VDO_SUCCESS) {
+ free_vio_pool(pool);
+ return result;
+ }
+
+ pooled->context = context;
+ list_add_tail(&pooled->pool_entry, &pool->available);
+ }
+
+ *pool_ptr = pool;
+ return VDO_SUCCESS;
+}
+
+/**
+ * free_vio_pool() - Destroy a vio pool.
+ * @pool: The pool to free.
+ */
+void free_vio_pool(struct vio_pool *pool)
+{
+ struct pooled_vio *pooled, *tmp;
+
+ if (pool == NULL)
+ return;
+
+ /* Remove all available vios from the object pool. */
+ VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting),
+ "VIO pool must not have any waiters when being freed");
+ VDO_ASSERT_LOG_ONLY((pool->busy_count == 0),
+ "VIO pool must not have %zu busy entries when being freed",
+ pool->busy_count);
+ VDO_ASSERT_LOG_ONLY(list_empty(&pool->busy),
+ "VIO pool must not have busy entries when being freed");
+
+ list_for_each_entry_safe(pooled, tmp, &pool->available, pool_entry) {
+ list_del(&pooled->pool_entry);
+ free_vio_components(&pooled->vio);
+ pool->size--;
+ }
+
+ VDO_ASSERT_LOG_ONLY(pool->size == 0,
+ "VIO pool must not have missing entries when being freed");
+
+ vdo_free(vdo_forget(pool->buffer));
+ vdo_free(pool);
+}
+
+/**
+ * is_vio_pool_busy() - Check whether an vio pool has outstanding entries.
+ *
+ * Return: true if the pool is busy.
+ */
+bool is_vio_pool_busy(struct vio_pool *pool)
+{
+ return (pool->busy_count != 0);
+}
+
+/**
+ * acquire_vio_from_pool() - Acquire a vio and buffer from the pool (asynchronous).
+ * @pool: The vio pool.
+ * @waiter: Object that is requesting a vio.
+ */
+void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter)
+{
+ struct pooled_vio *pooled;
+
+ VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
+ "acquire from active vio_pool called from correct thread");
+
+ if (list_empty(&pool->available)) {
+ vdo_waitq_enqueue_waiter(&pool->waiting, waiter);
+ return;
+ }
+
+ pooled = list_first_entry(&pool->available, struct pooled_vio, pool_entry);
+ pool->busy_count++;
+ list_move_tail(&pooled->pool_entry, &pool->busy);
+ (*waiter->callback)(waiter, pooled);
+}
+
+/**
+ * return_vio_to_pool() - Return a vio to the pool
+ * @pool: The vio pool.
+ * @vio: The pooled vio to return.
+ */
+void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio)
+{
+ VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
+ "vio pool entry returned on same thread as it was acquired");
+
+ vio->vio.completion.error_handler = NULL;
+ vio->vio.completion.parent = NULL;
+ if (vdo_waitq_has_waiters(&pool->waiting)) {
+ vdo_waitq_notify_next_waiter(&pool->waiting, NULL, vio);
+ return;
+ }
+
+ list_move_tail(&vio->pool_entry, &pool->available);
+ --pool->busy_count;
+}
+
+/*
+ * Various counting functions for statistics.
+ * These are used for bios coming into VDO, as well as bios generated by VDO.
+ */
+void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio)
+{
+ if (((bio->bi_opf & REQ_PREFLUSH) != 0) && (bio->bi_iter.bi_size == 0)) {
+ atomic64_inc(&bio_stats->empty_flush);
+ atomic64_inc(&bio_stats->flush);
+ return;
+ }
+
+ switch (bio_op(bio)) {
+ case REQ_OP_WRITE:
+ atomic64_inc(&bio_stats->write);
+ break;
+ case REQ_OP_READ:
+ atomic64_inc(&bio_stats->read);
+ break;
+ case REQ_OP_DISCARD:
+ atomic64_inc(&bio_stats->discard);
+ break;
+ /*
+ * All other operations are filtered out in dmvdo.c, or not created by VDO, so
+ * shouldn't exist.
+ */
+ default:
+ VDO_ASSERT_LOG_ONLY(0, "Bio operation %d not a write, read, discard, or empty flush",
+ bio_op(bio));
+ }
+
+ if ((bio->bi_opf & REQ_PREFLUSH) != 0)
+ atomic64_inc(&bio_stats->flush);
+ if (bio->bi_opf & REQ_FUA)
+ atomic64_inc(&bio_stats->fua);
+}
+
+static void count_all_bios_completed(struct vio *vio, struct bio *bio)
+{
+ struct atomic_statistics *stats = &vio->completion.vdo->stats;
+
+ if (is_data_vio(vio)) {
+ vdo_count_bios(&stats->bios_out_completed, bio);
+ return;
+ }
+
+ vdo_count_bios(&stats->bios_meta_completed, bio);
+ if (vio->type == VIO_TYPE_RECOVERY_JOURNAL)
+ vdo_count_bios(&stats->bios_journal_completed, bio);
+ else if (vio->type == VIO_TYPE_BLOCK_MAP)
+ vdo_count_bios(&stats->bios_page_cache_completed, bio);
+}
+
+void vdo_count_completed_bios(struct bio *bio)
+{
+ struct vio *vio = (struct vio *) bio->bi_private;
+
+ atomic64_inc(&vio->completion.vdo->stats.bios_completed);
+ count_all_bios_completed(vio, bio);
+}
diff --git a/drivers/md/dm-vdo/vio.h b/drivers/md/dm-vdo/vio.h
new file mode 100644
index 000000000000..3490e9f59b04
--- /dev/null
+++ b/drivers/md/dm-vdo/vio.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VIO_H
+#define VIO_H
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include "completion.h"
+#include "constants.h"
+#include "types.h"
+#include "vdo.h"
+
+enum {
+ MAX_BLOCKS_PER_VIO = (BIO_MAX_VECS << PAGE_SHIFT) / VDO_BLOCK_SIZE,
+};
+
+struct pooled_vio {
+ /* The underlying vio */
+ struct vio vio;
+ /* The list entry for chaining pooled vios together */
+ struct list_head list_entry;
+ /* The context set by the pool */
+ void *context;
+ /* The list entry used by the pool */
+ struct list_head pool_entry;
+};
+
+/**
+ * as_vio() - Convert a generic vdo_completion to a vio.
+ * @completion: The completion to convert.
+ *
+ * Return: The completion as a vio.
+ */
+static inline struct vio *as_vio(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VIO_COMPLETION);
+ return container_of(completion, struct vio, completion);
+}
+
+/**
+ * get_vio_bio_zone_thread_id() - Get the thread id of the bio zone in which a vio should submit
+ * its I/O.
+ * @vio: The vio.
+ *
+ * Return: The id of the bio zone thread the vio should use.
+ */
+static inline thread_id_t __must_check get_vio_bio_zone_thread_id(struct vio *vio)
+{
+ return vio->completion.vdo->thread_config.bio_threads[vio->bio_zone];
+}
+
+physical_block_number_t __must_check pbn_from_vio_bio(struct bio *bio);
+
+/**
+ * assert_vio_in_bio_zone() - Check that a vio is running on the correct thread for its bio zone.
+ * @vio: The vio to check.
+ */
+static inline void assert_vio_in_bio_zone(struct vio *vio)
+{
+ thread_id_t expected = get_vio_bio_zone_thread_id(vio);
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "vio I/O for physical block %llu on thread %u, should be on bio zone thread %u",
+ (unsigned long long) pbn_from_vio_bio(vio->bio), thread_id,
+ expected);
+}
+
+int vdo_create_bio(struct bio **bio_ptr);
+void vdo_free_bio(struct bio *bio);
+int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type,
+ enum vio_priority priority, void *parent,
+ unsigned int block_count, char *data, struct vio *vio);
+int __must_check create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
+ enum vio_priority priority,
+ void *parent, unsigned int block_count,
+ char *data, struct vio **vio_ptr);
+
+static inline int __must_check create_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
+ enum vio_priority priority,
+ void *parent, char *data,
+ struct vio **vio_ptr)
+{
+ return create_multi_block_metadata_vio(vdo, vio_type, priority, parent, 1, data,
+ vio_ptr);
+}
+
+void free_vio_components(struct vio *vio);
+void free_vio(struct vio *vio);
+
+/**
+ * initialize_vio() - Initialize a vio.
+ * @vio: The vio to initialize.
+ * @bio: The bio this vio should use for its I/O.
+ * @block_count: The size of this vio in vdo blocks.
+ * @vio_type: The vio type.
+ * @priority: The relative priority of the vio.
+ * @vdo: The vdo for this vio.
+ */
+static inline void initialize_vio(struct vio *vio, struct bio *bio,
+ unsigned int block_count, enum vio_type vio_type,
+ enum vio_priority priority, struct vdo *vdo)
+{
+ /* data_vio's may not span multiple blocks */
+ BUG_ON((vio_type == VIO_TYPE_DATA) && (block_count != 1));
+
+ vio->bio = bio;
+ vio->block_count = block_count;
+ vio->type = vio_type;
+ vio->priority = priority;
+ vdo_initialize_completion(&vio->completion, vdo, VIO_COMPLETION);
+}
+
+void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callback,
+ blk_opf_t bi_opf, physical_block_number_t pbn);
+
+int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
+ blk_opf_t bi_opf, physical_block_number_t pbn);
+
+void update_vio_error_stats(struct vio *vio, const char *format, ...)
+ __printf(2, 3);
+
+/**
+ * is_data_vio() - Check whether a vio is servicing an external data request.
+ * @vio: The vio to check.
+ */
+static inline bool is_data_vio(struct vio *vio)
+{
+ return (vio->type == VIO_TYPE_DATA);
+}
+
+/**
+ * get_metadata_priority() - Convert a vio's priority to a work item priority.
+ * @vio: The vio.
+ *
+ * Return: The priority with which to submit the vio's bio.
+ */
+static inline enum vdo_completion_priority get_metadata_priority(struct vio *vio)
+{
+ return ((vio->priority == VIO_PRIORITY_HIGH) ?
+ BIO_Q_HIGH_PRIORITY :
+ BIO_Q_METADATA_PRIORITY);
+}
+
+/**
+ * continue_vio() - Enqueue a vio to run its next callback.
+ * @vio: The vio to continue.
+ *
+ * Return: The result of the current operation.
+ */
+static inline void continue_vio(struct vio *vio, int result)
+{
+ if (unlikely(result != VDO_SUCCESS))
+ vdo_set_completion_result(&vio->completion, result);
+
+ vdo_enqueue_completion(&vio->completion, VDO_WORK_Q_DEFAULT_PRIORITY);
+}
+
+void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio);
+void vdo_count_completed_bios(struct bio *bio);
+
+/**
+ * continue_vio_after_io() - Continue a vio now that its I/O has returned.
+ */
+static inline void continue_vio_after_io(struct vio *vio, vdo_action_fn callback,
+ thread_id_t thread)
+{
+ vdo_count_completed_bios(vio->bio);
+ vdo_set_completion_callback(&vio->completion, callback, thread);
+ continue_vio(vio, blk_status_to_errno(vio->bio->bi_status));
+}
+
+void vio_record_metadata_io_error(struct vio *vio);
+
+/* A vio_pool is a collection of preallocated vios used to write arbitrary metadata blocks. */
+
+static inline struct pooled_vio *vio_as_pooled_vio(struct vio *vio)
+{
+ return container_of(vio, struct pooled_vio, vio);
+}
+
+struct vio_pool;
+
+int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
+ enum vio_type vio_type, enum vio_priority priority,
+ void *context, struct vio_pool **pool_ptr);
+void free_vio_pool(struct vio_pool *pool);
+bool __must_check is_vio_pool_busy(struct vio_pool *pool);
+void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter);
+void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio);
+
+#endif /* VIO_H */
diff --git a/drivers/md/dm-vdo/wait-queue.c b/drivers/md/dm-vdo/wait-queue.c
new file mode 100644
index 000000000000..6e1e739277ef
--- /dev/null
+++ b/drivers/md/dm-vdo/wait-queue.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "wait-queue.h"
+
+#include <linux/device-mapper.h>
+
+#include "permassert.h"
+
+#include "status-codes.h"
+
+/**
+ * vdo_waitq_enqueue_waiter() - Add a waiter to the tail end of a waitq.
+ * @waitq: The vdo_wait_queue to which to add the waiter.
+ * @waiter: The waiter to add to the waitq.
+ *
+ * The waiter must not already be waiting in a waitq.
+ */
+void vdo_waitq_enqueue_waiter(struct vdo_wait_queue *waitq, struct vdo_waiter *waiter)
+{
+ BUG_ON(waiter->next_waiter != NULL);
+
+ if (waitq->last_waiter == NULL) {
+ /*
+ * The waitq is empty, so form the initial circular list by self-linking the
+ * initial waiter.
+ */
+ waiter->next_waiter = waiter;
+ } else {
+ /* Splice the new waiter in at the end of the waitq. */
+ waiter->next_waiter = waitq->last_waiter->next_waiter;
+ waitq->last_waiter->next_waiter = waiter;
+ }
+
+ /* In both cases, the waiter we added to the ring becomes the last waiter. */
+ waitq->last_waiter = waiter;
+ waitq->length += 1;
+}
+
+/**
+ * vdo_waitq_transfer_all_waiters() - Transfer all waiters from one waitq to
+ * a second waitq, emptying the first waitq.
+ * @from_waitq: The waitq containing the waiters to move.
+ * @to_waitq: The waitq that will receive the waiters from the first waitq.
+ */
+void vdo_waitq_transfer_all_waiters(struct vdo_wait_queue *from_waitq,
+ struct vdo_wait_queue *to_waitq)
+{
+ /* If the source waitq is empty, there's nothing to do. */
+ if (!vdo_waitq_has_waiters(from_waitq))
+ return;
+
+ if (vdo_waitq_has_waiters(to_waitq)) {
+ /*
+ * Both are non-empty. Splice the two circular lists together
+ * by swapping the next (head) pointers in the list tails.
+ */
+ struct vdo_waiter *from_head = from_waitq->last_waiter->next_waiter;
+ struct vdo_waiter *to_head = to_waitq->last_waiter->next_waiter;
+
+ to_waitq->last_waiter->next_waiter = from_head;
+ from_waitq->last_waiter->next_waiter = to_head;
+ }
+
+ to_waitq->last_waiter = from_waitq->last_waiter;
+ to_waitq->length += from_waitq->length;
+ vdo_waitq_init(from_waitq);
+}
+
+/**
+ * vdo_waitq_notify_all_waiters() - Notify all the entries waiting in a waitq.
+ * @waitq: The vdo_wait_queue containing the waiters to notify.
+ * @callback: The function to call to notify each waiter, or NULL to invoke the callback field
+ * registered in each waiter.
+ * @context: The context to pass to the callback function.
+ *
+ * Notifies all the entries waiting in a waitq to continue execution by invoking a callback
+ * function on each of them in turn. The waitq is copied and emptied before invoking any callbacks,
+ * and only the waiters that were in the waitq at the start of the call will be notified.
+ */
+void vdo_waitq_notify_all_waiters(struct vdo_wait_queue *waitq,
+ vdo_waiter_callback_fn callback, void *context)
+{
+ /*
+ * Copy and empty the waitq first, avoiding the possibility of an infinite
+ * loop if entries are returned to the waitq by the callback function.
+ */
+ struct vdo_wait_queue waiters;
+
+ vdo_waitq_init(&waiters);
+ vdo_waitq_transfer_all_waiters(waitq, &waiters);
+
+ /* Drain the copied waitq, invoking the callback on every entry. */
+ while (vdo_waitq_has_waiters(&waiters))
+ vdo_waitq_notify_next_waiter(&waiters, callback, context);
+}
+
+/**
+ * vdo_waitq_get_first_waiter() - Return the waiter that is at the head end of a waitq.
+ * @waitq: The vdo_wait_queue from which to get the first waiter.
+ *
+ * Return: The first (oldest) waiter in the waitq, or NULL if the waitq is empty.
+ */
+struct vdo_waiter *vdo_waitq_get_first_waiter(const struct vdo_wait_queue *waitq)
+{
+ struct vdo_waiter *last_waiter = waitq->last_waiter;
+
+ if (last_waiter == NULL) {
+ /* There are no waiters, so we're done. */
+ return NULL;
+ }
+
+ /* The waitq is circular, so the last entry links to the head of the waitq. */
+ return last_waiter->next_waiter;
+}
+
+/**
+ * vdo_waitq_dequeue_matching_waiters() - Remove all waiters that match based on the specified
+ * matching method and append them to a vdo_wait_queue.
+ * @waitq: The vdo_wait_queue to process.
+ * @waiter_match: The method to determine matching.
+ * @match_context: Contextual info for the match method.
+ * @matched_waitq: A wait_waitq to store matches.
+ */
+void vdo_waitq_dequeue_matching_waiters(struct vdo_wait_queue *waitq,
+ vdo_waiter_match_fn waiter_match,
+ void *match_context,
+ struct vdo_wait_queue *matched_waitq)
+{
+ struct vdo_wait_queue iteration_waitq;
+
+ vdo_waitq_init(&iteration_waitq);
+ vdo_waitq_transfer_all_waiters(waitq, &iteration_waitq);
+
+ while (vdo_waitq_has_waiters(&iteration_waitq)) {
+ struct vdo_waiter *waiter = vdo_waitq_dequeue_waiter(&iteration_waitq);
+
+ vdo_waitq_enqueue_waiter((waiter_match(waiter, match_context) ?
+ matched_waitq : waitq), waiter);
+ }
+}
+
+/**
+ * vdo_waitq_dequeue_waiter() - Remove the first (oldest) waiter from a waitq.
+ * @waitq: The vdo_wait_queue from which to remove the first entry.
+ *
+ * The caller will be responsible for waking the waiter by continuing its
+ * execution appropriately.
+ *
+ * Return: The first (oldest) waiter in the waitq, or NULL if the waitq is empty.
+ */
+struct vdo_waiter *vdo_waitq_dequeue_waiter(struct vdo_wait_queue *waitq)
+{
+ struct vdo_waiter *first_waiter = vdo_waitq_get_first_waiter(waitq);
+ struct vdo_waiter *last_waiter = waitq->last_waiter;
+
+ if (first_waiter == NULL)
+ return NULL;
+
+ if (first_waiter == last_waiter) {
+ /* The waitq has a single entry, so empty it by nulling the tail. */
+ waitq->last_waiter = NULL;
+ } else {
+ /*
+ * The waitq has multiple waiters, so splice the first waiter out
+ * of the circular waitq.
+ */
+ last_waiter->next_waiter = first_waiter->next_waiter;
+ }
+
+ /* The waiter is no longer in a waitq. */
+ first_waiter->next_waiter = NULL;
+ waitq->length -= 1;
+
+ return first_waiter;
+}
+
+/**
+ * vdo_waitq_notify_next_waiter() - Notify the next entry waiting in a waitq.
+ * @waitq: The vdo_wait_queue containing the waiter to notify.
+ * @callback: The function to call to notify the waiter, or NULL to invoke the callback field
+ * registered in the waiter.
+ * @context: The context to pass to the callback function.
+ *
+ * Notifies the next entry waiting in a waitq to continue execution by invoking a callback function
+ * on it after removing it from the waitq.
+ *
+ * Return: true if there was a waiter in the waitq.
+ */
+bool vdo_waitq_notify_next_waiter(struct vdo_wait_queue *waitq,
+ vdo_waiter_callback_fn callback, void *context)
+{
+ struct vdo_waiter *waiter = vdo_waitq_dequeue_waiter(waitq);
+
+ if (waiter == NULL)
+ return false;
+
+ if (callback == NULL)
+ callback = waiter->callback;
+ callback(waiter, context);
+
+ return true;
+}
diff --git a/drivers/md/dm-vdo/wait-queue.h b/drivers/md/dm-vdo/wait-queue.h
new file mode 100644
index 000000000000..7e8ee6afe7c7
--- /dev/null
+++ b/drivers/md/dm-vdo/wait-queue.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_WAIT_QUEUE_H
+#define VDO_WAIT_QUEUE_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/**
+ * A vdo_wait_queue is a circular singly linked list of entries waiting to be notified
+ * of a change in a condition. Keeping a circular list allows the vdo_wait_queue
+ * structure to simply be a pointer to the tail (newest) entry, supporting
+ * constant-time enqueue and dequeue operations. A null pointer is an empty waitq.
+ *
+ * An empty waitq:
+ * waitq0.last_waiter -> NULL
+ *
+ * A singleton waitq:
+ * waitq1.last_waiter -> entry1 -> entry1 -> [...]
+ *
+ * A three-element waitq:
+ * waitq2.last_waiter -> entry3 -> entry1 -> entry2 -> entry3 -> [...]
+ *
+ * linux/wait.h's wait_queue_head is _not_ used because vdo_wait_queue's
+ * interface is much less complex (doesn't need locking, priorities or timers).
+ * Made possible by vdo's thread-based resource allocation and locking; and
+ * the polling nature of vdo_wait_queue consumers.
+ *
+ * FIXME: could be made to use a linux/list.h's list_head but its extra barriers
+ * really aren't needed. Nor is a doubly linked list, but vdo_wait_queue could
+ * make use of __list_del_clearprev() -- but that would compromise the ability
+ * to make full use of linux's list interface.
+ */
+
+struct vdo_waiter;
+
+struct vdo_wait_queue {
+ /* The tail of the queue, the last (most recently added) entry */
+ struct vdo_waiter *last_waiter;
+ /* The number of waiters currently in the queue */
+ size_t length;
+};
+
+/**
+ * vdo_waiter_callback_fn - Callback type that will be called to resume processing
+ * of a waiter after it has been removed from its wait queue.
+ */
+typedef void (*vdo_waiter_callback_fn)(struct vdo_waiter *waiter, void *context);
+
+/**
+ * vdo_waiter_match_fn - Method type for waiter matching methods.
+ *
+ * Returns false if the waiter does not match.
+ */
+typedef bool (*vdo_waiter_match_fn)(struct vdo_waiter *waiter, void *context);
+
+/* The structure for entries in a vdo_wait_queue. */
+struct vdo_waiter {
+ /*
+ * The next waiter in the waitq. If this entry is the last waiter, then this
+ * is actually a pointer back to the head of the waitq.
+ */
+ struct vdo_waiter *next_waiter;
+
+ /* Optional waiter-specific callback to invoke when dequeuing this waiter. */
+ vdo_waiter_callback_fn callback;
+};
+
+/**
+ * vdo_waiter_is_waiting() - Check whether a waiter is waiting.
+ * @waiter: The waiter to check.
+ *
+ * Return: true if the waiter is on some vdo_wait_queue.
+ */
+static inline bool vdo_waiter_is_waiting(struct vdo_waiter *waiter)
+{
+ return (waiter->next_waiter != NULL);
+}
+
+/**
+ * vdo_waitq_init() - Initialize a vdo_wait_queue.
+ * @waitq: The vdo_wait_queue to initialize.
+ */
+static inline void vdo_waitq_init(struct vdo_wait_queue *waitq)
+{
+ *waitq = (struct vdo_wait_queue) {
+ .last_waiter = NULL,
+ .length = 0,
+ };
+}
+
+/**
+ * vdo_waitq_has_waiters() - Check whether a vdo_wait_queue has any entries waiting.
+ * @waitq: The vdo_wait_queue to query.
+ *
+ * Return: true if there are any waiters in the waitq.
+ */
+static inline bool __must_check vdo_waitq_has_waiters(const struct vdo_wait_queue *waitq)
+{
+ return (waitq->last_waiter != NULL);
+}
+
+void vdo_waitq_enqueue_waiter(struct vdo_wait_queue *waitq,
+ struct vdo_waiter *waiter);
+
+struct vdo_waiter *vdo_waitq_dequeue_waiter(struct vdo_wait_queue *waitq);
+
+void vdo_waitq_notify_all_waiters(struct vdo_wait_queue *waitq,
+ vdo_waiter_callback_fn callback, void *context);
+
+bool vdo_waitq_notify_next_waiter(struct vdo_wait_queue *waitq,
+ vdo_waiter_callback_fn callback, void *context);
+
+void vdo_waitq_transfer_all_waiters(struct vdo_wait_queue *from_waitq,
+ struct vdo_wait_queue *to_waitq);
+
+struct vdo_waiter *vdo_waitq_get_first_waiter(const struct vdo_wait_queue *waitq);
+
+void vdo_waitq_dequeue_matching_waiters(struct vdo_wait_queue *waitq,
+ vdo_waiter_match_fn waiter_match,
+ void *match_context,
+ struct vdo_wait_queue *matched_waitq);
+
+/**
+ * vdo_waitq_num_waiters() - Return the number of waiters in a vdo_wait_queue.
+ * @waitq: The vdo_wait_queue to query.
+ *
+ * Return: The number of waiters in the waitq.
+ */
+static inline size_t __must_check vdo_waitq_num_waiters(const struct vdo_wait_queue *waitq)
+{
+ return waitq->length;
+}
+
+#endif /* VDO_WAIT_QUEUE_H */
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index b475200d8586..e46aee6f932e 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -60,7 +60,8 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
* to the data block. Caller is responsible for releasing buf.
*/
static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
- unsigned int *offset, struct dm_buffer **buf)
+ unsigned int *offset, struct dm_buffer **buf,
+ unsigned short ioprio)
{
u64 position, block, rem;
u8 *res;
@@ -69,7 +70,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
block = div64_u64_rem(position, v->fec->io_size, &rem);
*offset = (unsigned int)rem;
- res = dm_bufio_read(v->fec->bufio, block, buf);
+ res = dm_bufio_read_with_ioprio(v->fec->bufio, block, buf, ioprio);
if (IS_ERR(res)) {
DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
v->data_dev->name, (unsigned long long)rsb,
@@ -121,16 +122,17 @@ static inline unsigned int fec_buffer_rs_index(unsigned int i, unsigned int j)
* Decode all RS blocks from buffers and copy corrected bytes into fio->output
* starting from block_offset.
*/
-static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
- u64 rsb, int byte_index, unsigned int block_offset,
- int neras)
+static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
+ struct dm_verity_fec_io *fio, u64 rsb, int byte_index,
+ unsigned int block_offset, int neras)
{
int r, corrected = 0, res;
struct dm_buffer *buf;
unsigned int n, i, offset;
u8 *par, *block;
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
- par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+ par = fec_read_parity(v, rsb, block_offset, &offset, &buf, bio_prio(bio));
if (IS_ERR(par))
return PTR_ERR(par);
@@ -158,7 +160,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
if (offset >= v->fec->io_size) {
dm_bufio_release(buf);
- par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+ par = fec_read_parity(v, rsb, block_offset, &offset, &buf, bio_prio(bio));
if (IS_ERR(par))
return PTR_ERR(par);
}
@@ -210,6 +212,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
u8 *bbuf, *rs_block;
u8 want_digest[HASH_MAX_DIGESTSIZE];
unsigned int n, k;
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
if (neras)
*neras = 0;
@@ -248,7 +251,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
bufio = v->bufio;
}
- bbuf = dm_bufio_read(bufio, block, &buf);
+ bbuf = dm_bufio_read_with_ioprio(bufio, block, &buf, bio_prio(bio));
if (IS_ERR(bbuf)) {
DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld",
v->data_dev->name,
@@ -377,7 +380,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
if (unlikely(r < 0))
return r;
- r = fec_decode_bufs(v, fio, rsb, r, pos, neras);
+ r = fec_decode_bufs(v, io, fio, rsb, r, pos, neras);
if (r < 0)
return r;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 82662f5769c4..bb5da66da4c1 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -46,11 +46,12 @@ static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE
module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, 0644);
-static DEFINE_STATIC_KEY_FALSE(use_tasklet_enabled);
+static DEFINE_STATIC_KEY_FALSE(use_bh_wq_enabled);
struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
+ unsigned short ioprio;
sector_t block;
unsigned int n_blocks;
};
@@ -294,10 +295,11 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
int r;
sector_t hash_block;
unsigned int offset;
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
verity_hash_at_level(v, block, level, &hash_block, &offset);
- if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
+ if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
data = dm_bufio_get(v->bufio, hash_block, &buf);
if (data == NULL) {
/*
@@ -307,8 +309,10 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
*/
return -EAGAIN;
}
- } else
- data = dm_bufio_read(v->bufio, hash_block, &buf);
+ } else {
+ data = dm_bufio_read_with_ioprio(v->bufio, hash_block,
+ &buf, bio_prio(bio));
+ }
if (IS_ERR(data))
return PTR_ERR(data);
@@ -323,15 +327,14 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
r = verity_hash(v, verity_io_hash_req(v, io),
data, 1 << v->hash_dev_block_bits,
- verity_io_real_digest(v, io), !io->in_tasklet);
+ verity_io_real_digest(v, io), !io->in_bh);
if (unlikely(r < 0))
goto release_ret_r;
if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
v->digest_size) == 0))
aux->hash_verified = 1;
- else if (static_branch_unlikely(&use_tasklet_enabled) &&
- io->in_tasklet) {
+ else if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
/*
* Error handling code (FEC included) cannot be run in a
* tasklet since it may sleep, so fallback to work-queue.
@@ -482,6 +485,63 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
return 0;
}
+static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io,
+ u8 *data, size_t len)
+{
+ memcpy(data, io->recheck_buffer, len);
+ io->recheck_buffer += len;
+
+ return 0;
+}
+
+static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
+ struct bvec_iter start, sector_t cur_block)
+{
+ struct page *page;
+ void *buffer;
+ int r;
+ struct dm_io_request io_req;
+ struct dm_io_region io_loc;
+
+ page = mempool_alloc(&v->recheck_pool, GFP_NOIO);
+ buffer = page_to_virt(page);
+
+ io_req.bi_opf = REQ_OP_READ;
+ io_req.mem.type = DM_IO_KMEM;
+ io_req.mem.ptr.addr = buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = v->io;
+ io_loc.bdev = v->data_dev->bdev;
+ io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
+ io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ if (unlikely(r))
+ goto free_ret;
+
+ r = verity_hash(v, verity_io_hash_req(v, io), buffer,
+ 1 << v->data_dev_block_bits,
+ verity_io_real_digest(v, io), true);
+ if (unlikely(r))
+ goto free_ret;
+
+ if (memcmp(verity_io_real_digest(v, io),
+ verity_io_want_digest(v, io), v->digest_size)) {
+ r = -EIO;
+ goto free_ret;
+ }
+
+ io->recheck_buffer = buffer;
+ r = verity_for_bv_block(v, io, &start, verity_recheck_copy);
+ if (unlikely(r))
+ goto free_ret;
+
+ r = 0;
+free_ret:
+ mempool_free(page, &v->recheck_pool);
+
+ return r;
+}
+
static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
u8 *data, size_t len)
{
@@ -508,16 +568,14 @@ static int verity_verify_io(struct dm_verity_io *io)
{
bool is_zero;
struct dm_verity *v = io->v;
-#if defined(CONFIG_DM_VERITY_FEC)
struct bvec_iter start;
-#endif
struct bvec_iter iter_copy;
struct bvec_iter *iter;
struct crypto_wait wait;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
unsigned int b;
- if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
+ if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
/*
* Copy the iterator in case we need to restart
* verification in a work-queue.
@@ -557,14 +615,11 @@ static int verity_verify_io(struct dm_verity_io *io)
continue;
}
- r = verity_hash_init(v, req, &wait, !io->in_tasklet);
+ r = verity_hash_init(v, req, &wait, !io->in_bh);
if (unlikely(r < 0))
return r;
-#if defined(CONFIG_DM_VERITY_FEC)
- if (verity_fec_is_enabled(v))
- start = *iter;
-#endif
+ start = *iter;
r = verity_for_io_block(v, io, iter, &wait);
if (unlikely(r < 0))
return r;
@@ -579,13 +634,16 @@ static int verity_verify_io(struct dm_verity_io *io)
if (v->validated_blocks)
set_bit(cur_block, v->validated_blocks);
continue;
- } else if (static_branch_unlikely(&use_tasklet_enabled) &&
- io->in_tasklet) {
+ } else if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
/*
* Error handling code (FEC included) cannot be run in a
* tasklet since it may sleep, so fallback to work-queue.
*/
return -EAGAIN;
+ } else if (verity_recheck(v, io, start, cur_block) == 0) {
+ if (v->validated_blocks)
+ set_bit(cur_block, v->validated_blocks);
+ continue;
#if defined(CONFIG_DM_VERITY_FEC)
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
cur_block, NULL, &start) == 0) {
@@ -630,7 +688,7 @@ static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_status = status;
- if (!static_branch_unlikely(&use_tasklet_enabled) || !io->in_tasklet)
+ if (!static_branch_unlikely(&use_bh_wq_enabled) || !io->in_bh)
verity_fec_finish_io(io);
bio_endio(bio);
@@ -640,11 +698,28 @@ static void verity_work(struct work_struct *w)
{
struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
- io->in_tasklet = false;
+ io->in_bh = false;
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
}
+static void verity_bh_work(struct work_struct *w)
+{
+ struct dm_verity_io *io = container_of(w, struct dm_verity_io, bh_work);
+ int err;
+
+ io->in_bh = true;
+ err = verity_verify_io(io);
+ if (err == -EAGAIN || err == -ENOMEM) {
+ /* fallback to retrying with work-queue */
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+ return;
+ }
+
+ verity_finish_io(io, errno_to_blk_status(err));
+}
+
static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
@@ -657,8 +732,13 @@ static void verity_end_io(struct bio *bio)
return;
}
- INIT_WORK(&io->work, verity_work);
- queue_work(io->v->verify_wq, &io->work);
+ if (static_branch_unlikely(&use_bh_wq_enabled) && io->v->use_bh_wq) {
+ INIT_WORK(&io->bh_work, verity_bh_work);
+ queue_work(system_bh_wq, &io->bh_work);
+ } else {
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+ }
}
/*
@@ -696,14 +776,16 @@ static void verity_prefetch_io(struct work_struct *work)
hash_block_end = v->hash_blocks - 1;
}
no_prefetch_cluster:
- dm_bufio_prefetch(v->bufio, hash_block_start,
- hash_block_end - hash_block_start + 1);
+ dm_bufio_prefetch_with_ioprio(v->bufio, hash_block_start,
+ hash_block_end - hash_block_start + 1,
+ pw->ioprio);
}
kfree(pw);
}
-static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
+static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io,
+ unsigned short ioprio)
{
sector_t block = io->block;
unsigned int n_blocks = io->n_blocks;
@@ -731,6 +813,7 @@ static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
pw->v = v;
pw->block = block;
pw->n_blocks = n_blocks;
+ pw->ioprio = ioprio;
queue_work(v->verify_wq, &pw->work);
}
@@ -773,7 +856,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
verity_fec_init_io(io);
- verity_submit_prefetch(v, io);
+ verity_submit_prefetch(v, io, bio_prio(bio));
submit_bio_noacct(bio);
@@ -822,7 +905,7 @@ static void verity_status(struct dm_target *ti, status_type_t type,
args++;
if (v->validated_blocks)
args++;
- if (v->use_tasklet)
+ if (v->use_bh_wq)
args++;
if (v->signature_key_desc)
args += DM_VERITY_ROOT_HASH_VERIFICATION_OPTS;
@@ -849,7 +932,7 @@ static void verity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
if (v->validated_blocks)
DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
- if (v->use_tasklet)
+ if (v->use_bh_wq)
DMEMIT(" " DM_VERITY_OPT_TASKLET_VERIFY);
sz = verity_fec_status_table(v, sz, result, maxlen);
if (v->signature_key_desc)
@@ -941,6 +1024,10 @@ static void verity_dtr(struct dm_target *ti)
if (v->verify_wq)
destroy_workqueue(v->verify_wq);
+ mempool_exit(&v->recheck_pool);
+ if (v->io)
+ dm_io_client_destroy(v->io);
+
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
@@ -964,8 +1051,8 @@ static void verity_dtr(struct dm_target *ti)
kfree(v->signature_key_desc);
- if (v->use_tasklet)
- static_branch_dec(&use_tasklet_enabled);
+ if (v->use_bh_wq)
+ static_branch_dec(&use_bh_wq_enabled);
kfree(v);
@@ -1099,8 +1186,8 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_TASKLET_VERIFY)) {
- v->use_tasklet = true;
- static_branch_inc(&use_tasklet_enabled);
+ v->use_bh_wq = true;
+ static_branch_inc(&use_bh_wq_enabled);
continue;
} else if (verity_is_fec_opt_arg(arg_name)) {
@@ -1271,7 +1358,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
v->tfm = crypto_alloc_ahash(v->alg_name, 0,
- v->use_tasklet ? CRYPTO_ALG_ASYNC : 0);
+ v->use_bh_wq ? CRYPTO_ALG_ASYNC : 0);
if (IS_ERR(v->tfm)) {
ti->error = "Cannot initialize hash function";
r = PTR_ERR(v->tfm);
@@ -1379,10 +1466,24 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
v->hash_blocks = hash_position;
+ r = mempool_init_page_pool(&v->recheck_pool, 1, 0);
+ if (unlikely(r)) {
+ ti->error = "Cannot allocate mempool";
+ goto bad;
+ }
+
+ v->io = dm_io_client_create();
+ if (IS_ERR(v->io)) {
+ r = PTR_ERR(v->io);
+ v->io = NULL;
+ ti->error = "Cannot allocate dm io";
+ goto bad;
+ }
+
v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
dm_bufio_alloc_callback, NULL,
- v->use_tasklet ? DM_BUFIO_CLIENT_NO_SLEEP : 0);
+ v->use_bh_wq ? DM_BUFIO_CLIENT_NO_SLEEP : 0);
if (IS_ERR(v->bufio)) {
ti->error = "Cannot initialize dm-bufio";
r = PTR_ERR(v->bufio);
@@ -1401,7 +1502,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
* reducing wait times when reading from a dm-verity device.
*
* Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI
- * allows verify_wq to preempt softirq since verification in tasklet
+ * allows verify_wq to preempt softirq since verification in BH workqueue
* will fall-back to using it for error handling (or if the bufio cache
* doesn't have required hashes).
*/
@@ -1485,8 +1586,8 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i
static struct target_type verity_target = {
.name = "verity",
- .features = DM_TARGET_IMMUTABLE,
- .version = {1, 9, 0},
+ .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index f3f607008419..20b1bcf03474 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -11,6 +11,7 @@
#ifndef DM_VERITY_H
#define DM_VERITY_H
+#include <linux/dm-io.h>
#include <linux/dm-bufio.h>
#include <linux/device-mapper.h>
#include <linux/interrupt.h>
@@ -53,7 +54,7 @@ struct dm_verity {
unsigned char levels; /* the number of tree levels */
unsigned char version;
bool hash_failed:1; /* set if hash of any block failed */
- bool use_tasklet:1; /* try to verify in tasklet before work-queue */
+ bool use_bh_wq:1; /* try to verify in BH wq before normal work-queue */
unsigned int digest_size; /* digest size for the current hash algorithm */
unsigned int ahash_reqsize;/* the size of temporary space for crypto */
enum verity_mode mode; /* mode for handling verification errors */
@@ -68,6 +69,9 @@ struct dm_verity {
unsigned long *validated_blocks; /* bitset blocks validated */
char *signature_key_desc; /* signature keyring reference */
+
+ struct dm_io_client *io;
+ mempool_t recheck_pool;
};
struct dm_verity_io {
@@ -76,13 +80,16 @@ struct dm_verity_io {
/* original value of bio->bi_end_io */
bio_end_io_t *orig_bi_end_io;
+ struct bvec_iter iter;
+
sector_t block;
unsigned int n_blocks;
- bool in_tasklet;
-
- struct bvec_iter iter;
+ bool in_bh;
struct work_struct work;
+ struct work_struct bh_work;
+
+ char *recheck_buffer;
/*
* Three variably-size fields follow this struct:
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index b463c28c39ad..7ce8847b3404 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -531,7 +531,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
req.notify.context = &endio;
/* writing via async dm-io (implied by notify.fn above) won't return an error */
- (void) dm_io(&req, 1, &region, NULL);
+ (void) dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
i = j;
}
@@ -568,7 +568,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
req.notify.fn = NULL;
req.notify.context = NULL;
- r = dm_io(&req, 1, &region, NULL);
+ r = dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
if (unlikely(r))
writecache_error(wc, r, "error writing superblock");
}
@@ -596,7 +596,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
req.client = wc->dm_io;
req.notify.fn = NULL;
- r = dm_io(&req, 1, &region, NULL);
+ r = dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
if (unlikely(r))
writecache_error(wc, r, "error flushing metadata: %d", r);
}
@@ -990,7 +990,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors
req.client = wc->dm_io;
req.notify.fn = NULL;
- return dm_io(&req, 1, &region, NULL);
+ return dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
}
static void writecache_resume(struct dm_target *ti)
@@ -2776,5 +2776,5 @@ static struct target_type writecache_target = {
module_dm(writecache);
MODULE_DESCRIPTION(DM_NAME " writecache target");
-MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
+MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index fdfe30f7b697..8156881a31de 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1655,10 +1655,13 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
struct dmz_dev *dev = zone->dev;
+ unsigned int noio_flag;
+ noio_flag = memalloc_noio_save();
ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
dmz_start_sect(zmd, zone),
- zmd->zone_nr_sectors, GFP_NOIO);
+ zmd->zone_nr_sectors);
+ memalloc_noio_restore(noio_flag);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
zone->id, ret);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 10c73af93d00..56aa2a8b9d71 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -726,7 +726,8 @@ static struct table_device *open_table_device(struct mapped_device *md,
dev_t dev, blk_mode_t mode)
{
struct table_device *td;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
+ struct block_device *bdev;
u64 part_off;
int r;
@@ -735,34 +736,36 @@ static struct table_device *open_table_device(struct mapped_device *md,
return ERR_PTR(-ENOMEM);
refcount_set(&td->count, 1);
- bdev_handle = bdev_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
- if (IS_ERR(bdev_handle)) {
- r = PTR_ERR(bdev_handle);
+ bdev_file = bdev_file_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
+ if (IS_ERR(bdev_file)) {
+ r = PTR_ERR(bdev_file);
goto out_free_td;
}
+ bdev = file_bdev(bdev_file);
+
/*
* We can be called before the dm disk is added. In that case we can't
* register the holder relation here. It will be done once add_disk was
* called.
*/
if (md->disk->slave_dir) {
- r = bd_link_disk_holder(bdev_handle->bdev, md->disk);
+ r = bd_link_disk_holder(bdev, md->disk);
if (r)
goto out_blkdev_put;
}
td->dm_dev.mode = mode;
- td->dm_dev.bdev = bdev_handle->bdev;
- td->dm_dev.bdev_handle = bdev_handle;
- td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev, &part_off,
+ td->dm_dev.bdev = bdev;
+ td->dm_dev.bdev_file = bdev_file;
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off,
NULL, NULL);
format_dev_t(td->dm_dev.name, dev);
list_add(&td->list, &md->table_devices);
return td;
out_blkdev_put:
- bdev_release(bdev_handle);
+ fput(bdev_file);
out_free_td:
kfree(td);
return ERR_PTR(r);
@@ -775,7 +778,7 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
{
if (md->disk->slave_dir)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
- bdev_release(td->dm_dev.bdev_handle);
+ fput(td->dm_dev.bdev_file);
put_dax(td->dm_dev.dax_dev);
list_del(&td->list);
kfree(td);
@@ -2099,8 +2102,8 @@ static struct mapped_device *alloc_dev(int minor)
* established. If request-based table is loaded: blk-mq will
* override accordingly.
*/
- md->disk = blk_alloc_disk(md->numa_node_id);
- if (!md->disk)
+ md->disk = blk_alloc_disk(NULL, md->numa_node_id);
+ if (IS_ERR(md->disk))
goto bad;
md->queue = md->disk->queue;
@@ -2946,6 +2949,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend
static void __dm_internal_resume(struct mapped_device *md)
{
+ int r;
+ struct dm_table *map;
+
BUG_ON(!md->internal_suspend_count);
if (--md->internal_suspend_count)
@@ -2954,12 +2960,23 @@ static void __dm_internal_resume(struct mapped_device *md)
if (dm_suspended_md(md))
goto done; /* resume from nested suspend */
- /*
- * NOTE: existing callers don't need to call dm_table_resume_targets
- * (which may fail -- so best to avoid it for now by passing NULL map)
- */
- (void) __dm_resume(md, NULL);
-
+ map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
+ r = __dm_resume(md, map);
+ if (r) {
+ /*
+ * If a preresume method of some target failed, we are in a
+ * tricky situation. We can't return an error to the caller. We
+ * can't fake success because then the "resume" and
+ * "postsuspend" methods would not be paired correctly, and it
+ * would break various targets, for example it would cause list
+ * corruption in the "origin" target.
+ *
+ * So, we fake normal suspend here, to make sure that the
+ * "resume" and "postsuspend" methods will be paired correctly.
+ */
+ DMERR("Preresume method failed: %d", r);
+ set_bit(DMF_SUSPENDED, &md->flags);
+ }
done:
clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
smp_mb__after_atomic();
@@ -3513,5 +3530,5 @@ module_param(swap_bios, int, 0644);
MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
MODULE_DESCRIPTION(DM_NAME " driver");
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 9672f75c3050..059afc24c08b 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -234,7 +234,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
sector_t doff;
bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
- if (pg_index == store->file_pages - 1) {
+ /* we compare length (page numbers), not page offset. */
+ if ((pg_index - store->sb_index) == store->file_pages - 1) {
unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
if (last_page_size == 0)
@@ -438,8 +439,8 @@ static void filemap_write_page(struct bitmap *bitmap, unsigned long pg_index,
struct page *page = store->filemap[pg_index];
if (mddev_is_clustered(bitmap->mddev)) {
- pg_index += bitmap->cluster_slot *
- DIV_ROUND_UP(store->bytes, PAGE_SIZE);
+ /* go to node bitmap area starting point */
+ pg_index += store->sb_index;
}
if (store->file)
@@ -952,6 +953,7 @@ static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
unsigned long index = file_page_index(store, chunk);
unsigned long node_offset = 0;
+ index += store->sb_index;
if (mddev_is_clustered(bitmap->mddev))
node_offset = bitmap->cluster_slot * store->file_pages;
@@ -982,6 +984,7 @@ static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
unsigned long index = file_page_index(store, chunk);
unsigned long node_offset = 0;
+ index += store->sb_index;
if (mddev_is_clustered(bitmap->mddev))
node_offset = bitmap->cluster_slot * store->file_pages;
@@ -1043,9 +1046,8 @@ void md_bitmap_unplug(struct bitmap *bitmap)
if (dirty || need_write) {
if (!writing) {
md_bitmap_wait_writes(bitmap);
- if (bitmap->mddev->queue)
- blk_add_trace_msg(bitmap->mddev->queue,
- "md bitmap_unplug");
+ mddev_add_trace_msg(bitmap->mddev,
+ "md bitmap_unplug");
}
clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
filemap_write_page(bitmap, i, false);
@@ -1316,9 +1318,7 @@ void md_bitmap_daemon_work(struct mddev *mddev)
}
bitmap->allclean = 1;
- if (bitmap->mddev->queue)
- blk_add_trace_msg(bitmap->mddev->queue,
- "md bitmap_daemon_work");
+ mddev_add_trace_msg(bitmap->mddev, "md bitmap_daemon_work");
/* Any file-page which is PENDING now needs to be written.
* So set NEEDWRITE now, then after we make any last-minute changes
diff --git a/drivers/md/md-linear.h b/drivers/md/md-linear.h
deleted file mode 100644
index 5587eeedb882..000000000000
--- a/drivers/md/md-linear.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINEAR_H
-#define _LINEAR_H
-
-struct dev_info {
- struct md_rdev *rdev;
- sector_t end_sector;
-};
-
-struct linear_conf
-{
- struct rcu_head rcu;
- sector_t array_sectors;
- int raid_disks; /* a copy of mddev->raid_disks */
- struct dev_info disks[] __counted_by(raid_disks);
-};
-#endif
diff --git a/drivers/md/md-multipath.h b/drivers/md/md-multipath.h
deleted file mode 100644
index b3099e5fc4d7..000000000000
--- a/drivers/md/md-multipath.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _MULTIPATH_H
-#define _MULTIPATH_H
-
-struct multipath_info {
- struct md_rdev *rdev;
-};
-
-struct mpconf {
- struct mddev *mddev;
- struct multipath_info *multipaths;
- int raid_disks;
- spinlock_t device_lock;
- struct list_head retry_list;
-
- mempool_t pool;
-};
-
-/*
- * this is our 'private' 'collective' MULTIPATH buffer head.
- * it contains information about what kind of IO operations were started
- * for this MULTIPATH operation, and about their status:
- */
-
-struct multipath_bh {
- struct mddev *mddev;
- struct bio *master_bio;
- struct bio bio;
- int path;
- struct list_head retry_list;
-};
-#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2266358d8074..e575e74aabf5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -65,7 +65,6 @@
#include <linux/percpu-refcount.h>
#include <linux/part_stat.h>
-#include <trace/events/block.h>
#include "md.h"
#include "md-bitmap.h"
#include "md-cluster.h"
@@ -99,18 +98,6 @@ static void mddev_detach(struct mddev *mddev);
static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
static void md_wakeup_thread_directly(struct md_thread __rcu *thread);
-enum md_ro_state {
- MD_RDWR,
- MD_RDONLY,
- MD_AUTO_READ,
- MD_MAX_STATE
-};
-
-static bool md_is_rdwr(struct mddev *mddev)
-{
- return (mddev->ro == MD_RDWR);
-}
-
/*
* Default number of read corrections we'll attempt on an rdev
* before ejecting it from the array. We divide the read error
@@ -378,7 +365,7 @@ static bool is_suspended(struct mddev *mddev, struct bio *bio)
return true;
}
-void md_handle_request(struct mddev *mddev, struct bio *bio)
+bool md_handle_request(struct mddev *mddev, struct bio *bio)
{
check_suspended:
if (is_suspended(mddev, bio)) {
@@ -386,7 +373,7 @@ check_suspended:
/* Bail out if REQ_NOWAIT is set for the bio */
if (bio->bi_opf & REQ_NOWAIT) {
bio_wouldblock_error(bio);
- return;
+ return true;
}
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
@@ -402,10 +389,13 @@ check_suspended:
if (!mddev->pers->make_request(mddev, bio)) {
percpu_ref_put(&mddev->active_io);
+ if (!mddev->gendisk && mddev->pers->prepare_suspend)
+ return false;
goto check_suspended;
}
percpu_ref_put(&mddev->active_io);
+ return true;
}
EXPORT_SYMBOL(md_handle_request);
@@ -529,6 +519,24 @@ void mddev_resume(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(mddev_resume);
+/* sync bdev before setting device to readonly or stopping raid*/
+static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev, int opener_num)
+{
+ mutex_lock(&mddev->open_mutex);
+ if (mddev->pers && atomic_read(&mddev->openers) > opener_num) {
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
+ if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
+ mutex_unlock(&mddev->open_mutex);
+
+ sync_blockdev(mddev->gendisk->part0);
+ return 0;
+}
+
/*
* Generic flush handling for md
*/
@@ -579,8 +587,12 @@ static void submit_flushes(struct work_struct *ws)
rcu_read_lock();
}
rcu_read_unlock();
- if (atomic_dec_and_test(&mddev->flush_pending))
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ /* The pair is percpu_ref_get() from md_flush_request() */
+ percpu_ref_put(&mddev->active_io);
+
queue_work(md_wq, &mddev->flush_work);
+ }
}
static void md_submit_flush_data(struct work_struct *ws)
@@ -2402,7 +2414,7 @@ int md_integrity_register(struct mddev *mddev)
if (list_empty(&mddev->disks))
return 0; /* nothing to do */
- if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
+ if (mddev_is_dm(mddev) || blk_get_integrity(mddev->gendisk))
return 0; /* shouldn't register, or already is */
rdev_for_each(rdev, mddev) {
/* skip spares and non-functional disks */
@@ -2455,7 +2467,7 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
struct blk_integrity *bi_mddev;
- if (!mddev->gendisk)
+ if (mddev_is_dm(mddev))
return 0;
bi_mddev = blk_get_integrity(mddev->gendisk);
@@ -2562,6 +2574,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
fail:
pr_warn("md: failed to register dev-%s for %s\n",
b, mdname(mddev));
+ mddev_destroy_serial_pool(mddev, rdev);
return err;
}
@@ -2578,7 +2591,7 @@ static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
- bdev_release(rdev->bdev_handle);
+ fput(rdev->bdev_file);
rdev->bdev = NULL;
kobject_put(&rdev->kobj);
}
@@ -2591,7 +2604,7 @@ static void md_kick_rdev_from_array(struct md_rdev *rdev)
list_del_rcu(&rdev->same_set);
pr_debug("md: unbind<%pg>\n", rdev->bdev);
mddev_destroy_serial_pool(rdev->mddev, rdev);
- rdev->mddev = NULL;
+ WRITE_ONCE(rdev->mddev, NULL);
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
sysfs_put(rdev->sysfs_unack_badblocks);
@@ -2847,8 +2860,7 @@ repeat:
pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev), mddev->in_sync);
- if (mddev->queue)
- blk_add_trace_msg(mddev->queue, "md md_update_sb");
+ mddev_add_trace_msg(mddev, "md md_update_sb");
rewrite:
md_bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
@@ -2929,7 +2941,6 @@ static int add_bound_rdev(struct md_rdev *rdev)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_new_event();
- md_wakeup_thread(mddev->thread);
return 0;
}
@@ -3044,10 +3055,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
if (err == 0) {
md_kick_rdev_from_array(rdev);
- if (mddev->pers) {
+ if (mddev->pers)
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
- md_wakeup_thread(mddev->thread);
- }
md_new_event();
}
}
@@ -3077,7 +3086,6 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
- md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
@@ -3115,7 +3123,6 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
!test_bit(Replacement, &rdev->flags))
set_bit(WantReplacement, &rdev->flags);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
- md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "-want_replacement")) {
/* Clearing 'want_replacement' is always allowed.
@@ -3245,7 +3252,6 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
if (rdev->raid_disk >= 0)
return -EBUSY;
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
- md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
/* Activating a spare .. or possibly reactivating
* if we ever get bitmaps working here.
@@ -3339,8 +3345,7 @@ static ssize_t new_offset_store(struct md_rdev *rdev,
if (kstrtoull(buf, 10, &new_offset) < 0)
return -EINVAL;
- if (mddev->sync_thread ||
- test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
if (new_offset == rdev->data_offset)
/* reset is always permitted */
@@ -3671,7 +3676,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
struct kernfs_node *kn = NULL;
bool suspend = false;
ssize_t rv;
- struct mddev *mddev = rdev->mddev;
+ struct mddev *mddev = READ_ONCE(rdev->mddev);
if (!entry->store)
return -EIO;
@@ -3773,16 +3778,16 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
if (err)
goto out_clear_rdev;
- rdev->bdev_handle = bdev_open_by_dev(newdev,
+ rdev->bdev_file = bdev_file_open_by_dev(newdev,
BLK_OPEN_READ | BLK_OPEN_WRITE,
super_format == -2 ? &claim_rdev : rdev, NULL);
- if (IS_ERR(rdev->bdev_handle)) {
+ if (IS_ERR(rdev->bdev_file)) {
pr_warn("md: could not open device unknown-block(%u,%u).\n",
MAJOR(newdev), MINOR(newdev));
- err = PTR_ERR(rdev->bdev_handle);
+ err = PTR_ERR(rdev->bdev_file);
goto out_clear_rdev;
}
- rdev->bdev = rdev->bdev_handle->bdev;
+ rdev->bdev = file_bdev(rdev->bdev_file);
kobject_init(&rdev->kobj, &rdev_ktype);
@@ -3813,7 +3818,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
return rdev;
out_blkdev_put:
- bdev_release(rdev->bdev_handle);
+ fput(rdev->bdev_file);
out_clear_rdev:
md_rdev_clear(rdev);
out_free_rdev:
@@ -4013,8 +4018,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
*/
rv = -EBUSY;
- if (mddev->sync_thread ||
- test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
mddev->reshape_position != MaxSector ||
mddev->sysfs_active)
goto out_unlock;
@@ -4164,7 +4168,6 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
del_timer_sync(&mddev->safemode_timer);
}
- blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
if (!mddev->thread)
@@ -4471,8 +4474,8 @@ array_state_show(struct mddev *mddev, char *page)
return sprintf(page, "%s\n", array_states[st]);
}
-static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
-static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
+static int do_md_stop(struct mddev *mddev, int ro);
+static int md_set_readonly(struct mddev *mddev);
static int restart_array(struct mddev *mddev);
static ssize_t
@@ -4489,6 +4492,17 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
case broken: /* cannot be set */
case bad_word:
return -EINVAL;
+ case clear:
+ case readonly:
+ case inactive:
+ case read_auto:
+ if (!mddev->pers || !md_is_rdwr(mddev))
+ break;
+ /* write sysfs will not open mddev and opener should be 0 */
+ err = mddev_set_closing_and_sync_blockdev(mddev, 0);
+ if (err)
+ return err;
+ break;
default:
break;
}
@@ -4522,14 +4536,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
case inactive:
/* stop an active array, return 0 otherwise */
if (mddev->pers)
- err = do_md_stop(mddev, 2, NULL);
+ err = do_md_stop(mddev, 2);
break;
case clear:
- err = do_md_stop(mddev, 0, NULL);
+ err = do_md_stop(mddev, 0);
break;
case readonly:
if (mddev->pers)
- err = md_set_readonly(mddev, NULL);
+ err = md_set_readonly(mddev);
else {
mddev->ro = MD_RDONLY;
set_disk_ro(mddev->gendisk, 1);
@@ -4539,7 +4553,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
case read_auto:
if (mddev->pers) {
if (md_is_rdwr(mddev))
- err = md_set_readonly(mddev, NULL);
+ err = md_set_readonly(mddev);
else if (mddev->ro == MD_RDONLY)
err = restart_array(mddev);
if (err == 0) {
@@ -4588,6 +4602,11 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
mddev_unlock(mddev);
+
+ if (st == readonly || st == read_auto || st == inactive ||
+ (err && st == clear))
+ clear_bit(MD_CLOSING, &mddev->flags);
+
return err ?: len;
}
static struct md_sysfs_entry md_array_state =
@@ -4915,6 +4934,35 @@ static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
mddev_lock_nointr(mddev);
}
+void md_idle_sync_thread(struct mddev *mddev)
+{
+ lockdep_assert_held(&mddev->reconfig_mutex);
+
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ stop_sync_thread(mddev, true, true);
+}
+EXPORT_SYMBOL_GPL(md_idle_sync_thread);
+
+void md_frozen_sync_thread(struct mddev *mddev)
+{
+ lockdep_assert_held(&mddev->reconfig_mutex);
+
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ stop_sync_thread(mddev, true, false);
+}
+EXPORT_SYMBOL_GPL(md_frozen_sync_thread);
+
+void md_unfrozen_sync_thread(struct mddev *mddev)
+{
+ lockdep_assert_held(&mddev->reconfig_mutex);
+
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
+}
+EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread);
+
static void idle_sync_thread(struct mddev *mddev)
{
mutex_lock(&mddev->sync_mutex);
@@ -5706,6 +5754,51 @@ static const struct kobj_type md_ktype = {
int mdp_major = 0;
+/* stack the limit for all rdevs into lim */
+void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim)
+{
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, mddev) {
+ queue_limits_stack_bdev(lim, rdev->bdev, rdev->data_offset,
+ mddev->gendisk->disk_name);
+ }
+}
+EXPORT_SYMBOL_GPL(mddev_stack_rdev_limits);
+
+/* apply the extra stacking limits from a new rdev into mddev */
+int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev)
+{
+ struct queue_limits lim;
+
+ if (mddev_is_dm(mddev))
+ return 0;
+
+ lim = queue_limits_start_update(mddev->gendisk->queue);
+ queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset,
+ mddev->gendisk->disk_name);
+ return queue_limits_commit_update(mddev->gendisk->queue, &lim);
+}
+EXPORT_SYMBOL_GPL(mddev_stack_new_rdev);
+
+/* update the optimal I/O size after a reshape */
+void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes)
+{
+ struct queue_limits lim;
+
+ if (mddev_is_dm(mddev))
+ return;
+
+ /* don't bother updating io_opt if we can't suspend the array */
+ if (mddev_suspend(mddev, false) < 0)
+ return;
+ lim = queue_limits_start_update(mddev->gendisk->queue);
+ lim.io_opt = lim.io_min * nr_stripes;
+ queue_limits_commit_update(mddev->gendisk->queue, &lim);
+ mddev_resume(mddev);
+}
+EXPORT_SYMBOL_GPL(mddev_update_io_opt);
+
static void mddev_delayed_delete(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
@@ -5770,10 +5863,11 @@ struct mddev *md_alloc(dev_t dev, char *name)
*/
mddev->hold_active = UNTIL_STOP;
- error = -ENOMEM;
- disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!disk)
+ disk = blk_alloc_disk(NULL, NUMA_NO_NODE);
+ if (IS_ERR(disk)) {
+ error = PTR_ERR(disk);
goto out_free_mddev;
+ }
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
@@ -5787,9 +5881,7 @@ struct mddev *md_alloc(dev_t dev, char *name)
disk->fops = &md_fops;
disk->private_data = mddev;
- mddev->queue = disk->queue;
- blk_set_stacking_limits(&mddev->queue->limits);
- blk_queue_write_cache(mddev->queue, true, true);
+ blk_queue_write_cache(disk->queue, true, true);
disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
error = add_disk(disk);
@@ -5931,7 +6023,7 @@ int md_run(struct mddev *mddev)
invalidate_bdev(rdev->bdev);
if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
mddev->ro = MD_RDONLY;
- if (mddev->gendisk)
+ if (!mddev_is_dm(mddev))
set_disk_ro(mddev->gendisk, 1);
}
@@ -6034,7 +6126,10 @@ int md_run(struct mddev *mddev)
pr_warn("True protection against single-disk failure might be compromised.\n");
}
- mddev->recovery = 0;
+ /* dm-raid expect sync_thread to be frozen until resume */
+ if (mddev->gendisk)
+ mddev->recovery = 0;
+
/* may be over-ridden by personality */
mddev->resync_max_sectors = mddev->dev_sectors;
@@ -6090,7 +6185,8 @@ int md_run(struct mddev *mddev)
}
}
- if (mddev->queue) {
+ if (!mddev_is_dm(mddev)) {
+ struct request_queue *q = mddev->gendisk->queue;
bool nonrot = true;
rdev_for_each(rdev, mddev) {
@@ -6102,14 +6198,14 @@ int md_run(struct mddev *mddev)
if (mddev->degraded)
nonrot = false;
if (nonrot)
- blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
else
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
- blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q);
/* Set the NOWAIT flags if all underlying devices support it */
if (nowait)
- blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -6188,7 +6284,6 @@ int do_md_run(struct mddev *mddev)
/* run start up tasks that require md_thread */
md_start(mddev);
- md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
@@ -6209,7 +6304,6 @@ int md_start(struct mddev *mddev)
if (mddev->pers->start) {
set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
ret = mddev->pers->start(mddev);
clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
md_wakeup_thread(mddev->sync_thread);
@@ -6254,7 +6348,6 @@ static int restart_array(struct mddev *mddev)
pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
/* Kick recovery or resync if necessary */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
@@ -6274,7 +6367,15 @@ static void md_clean(struct mddev *mddev)
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
- mddev->flags = 0;
+ /*
+ * Don't clear MD_CLOSING, or mddev can be opened again.
+ * 'hold_active != 0' means mddev is still in the creation
+ * process and will be used later.
+ */
+ if (mddev->hold_active)
+ mddev->flags = 0;
+ else
+ mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
mddev->sb_flags = 0;
mddev->ro = MD_RDWR;
mddev->metadata_type[0] = 0;
@@ -6311,7 +6412,6 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev)
{
- stop_sync_thread(mddev, true, false);
del_timer_sync(&mddev->safemode_timer);
if (mddev->pers && mddev->pers->quiesce) {
@@ -6336,6 +6436,8 @@ static void __md_stop_writes(struct mddev *mddev)
void md_stop_writes(struct mddev *mddev)
{
mddev_lock_nointr(mddev);
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ stop_sync_thread(mddev, true, false);
__md_stop_writes(mddev);
mddev_unlock(mddev);
}
@@ -6349,8 +6451,10 @@ static void mddev_detach(struct mddev *mddev)
mddev->pers->quiesce(mddev, 0);
}
md_unregister_thread(mddev, &mddev->thread);
- if (mddev->queue)
- blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+
+ /* the unplug fn references 'conf' */
+ if (!mddev_is_dm(mddev))
+ blk_sync_queue(mddev->gendisk->queue);
}
static void __md_stop(struct mddev *mddev)
@@ -6387,7 +6491,8 @@ void md_stop(struct mddev *mddev)
EXPORT_SYMBOL_GPL(md_stop);
-static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+/* ensure 'mddev->pers' exist before calling md_set_readonly() */
+static int md_set_readonly(struct mddev *mddev)
{
int err = 0;
int did_freeze = 0;
@@ -6398,7 +6503,6 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
}
stop_sync_thread(mddev, false, false);
@@ -6406,36 +6510,29 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
mddev_lock_nointr(mddev);
- mutex_lock(&mddev->open_mutex);
- if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
- mddev->sync_thread ||
- test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
pr_warn("md: %s still in use.\n",mdname(mddev));
err = -EBUSY;
goto out;
}
- if (mddev->pers) {
- __md_stop_writes(mddev);
-
- if (mddev->ro == MD_RDONLY) {
- err = -ENXIO;
- goto out;
- }
+ __md_stop_writes(mddev);
- mddev->ro = MD_RDONLY;
- set_disk_ro(mddev->gendisk, 1);
+ if (mddev->ro == MD_RDONLY) {
+ err = -ENXIO;
+ goto out;
}
+ mddev->ro = MD_RDONLY;
+ set_disk_ro(mddev->gendisk, 1);
+
out:
- if ((mddev->pers && !err) || did_freeze) {
+ if (!err || did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- mutex_unlock(&mddev->open_mutex);
return err;
}
@@ -6443,8 +6540,7 @@ out:
* 0 - completely stop and dis-assemble array
* 2 - stop but do not disassemble array
*/
-static int do_md_stop(struct mddev *mddev, int mode,
- struct block_device *bdev)
+static int do_md_stop(struct mddev *mddev, int mode)
{
struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev;
@@ -6453,22 +6549,16 @@ static int do_md_stop(struct mddev *mddev, int mode,
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
}
stop_sync_thread(mddev, true, false);
- mutex_lock(&mddev->open_mutex);
- if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
- mddev->sysfs_active ||
- mddev->sync_thread ||
+ if (mddev->sysfs_active ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
pr_warn("md: %s still in use.\n",mdname(mddev));
- mutex_unlock(&mddev->open_mutex);
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
}
return -EBUSY;
}
@@ -6487,13 +6577,11 @@ static int do_md_stop(struct mddev *mddev, int mode,
sysfs_unlink_rdev(mddev, rdev);
set_capacity_and_notify(disk, 0);
- mutex_unlock(&mddev->open_mutex);
mddev->changed = 1;
if (!md_is_rdwr(mddev))
mddev->ro = MD_RDWR;
- } else
- mutex_unlock(&mddev->open_mutex);
+ }
/*
* Free resources if final stop
*/
@@ -6539,7 +6627,7 @@ static void autorun_array(struct mddev *mddev)
err = do_md_run(mddev);
if (err) {
pr_warn("md: do_md_run() returned %d\n", err);
- do_md_stop(mddev, 0, NULL);
+ do_md_stop(mddev, 0);
}
}
@@ -7009,9 +7097,7 @@ kick_rdev:
md_kick_rdev_from_array(rdev);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
- if (mddev->thread)
- md_wakeup_thread(mddev->thread);
- else
+ if (!mddev->thread)
md_update_sb(mddev, 1);
md_new_event();
@@ -7086,14 +7172,13 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
if (!bdev_nowait(rdev->bdev)) {
pr_info("%s: Disabling nowait because %pg does not support nowait\n",
mdname(mddev), rdev->bdev);
- blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->gendisk->queue);
}
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
md_new_event();
return 0;
@@ -7307,8 +7392,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
* of each device. If num_sectors is zero, we find the largest size
* that fits.
*/
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- mddev->sync_thread)
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
if (!md_is_rdwr(mddev))
return -EROFS;
@@ -7325,10 +7409,9 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
if (!rv) {
if (mddev_is_clustered(mddev))
md_cluster_ops->update_size(mddev, old_dev_sectors);
- else if (mddev->queue) {
+ else if (!mddev_is_dm(mddev))
set_capacity_and_notify(mddev->gendisk,
mddev->array_sectors);
- }
}
return rv;
}
@@ -7345,8 +7428,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
if (raid_disks <= 0 ||
(mddev->max_disks && raid_disks >= mddev->max_disks))
return -EINVAL;
- if (mddev->sync_thread ||
- test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
mddev->reshape_position != MaxSector)
return -EBUSY;
@@ -7542,16 +7624,17 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static inline bool md_ioctl_valid(unsigned int cmd)
+static inline int md_ioctl_valid(unsigned int cmd)
{
switch (cmd) {
- case ADD_NEW_DISK:
case GET_ARRAY_INFO:
- case GET_BITMAP_FILE:
case GET_DISK_INFO:
+ case RAID_VERSION:
+ return 0;
+ case ADD_NEW_DISK:
+ case GET_BITMAP_FILE:
case HOT_ADD_DISK:
case HOT_REMOVE_DISK:
- case RAID_VERSION:
case RESTART_ARRAY_RW:
case RUN_ARRAY:
case SET_ARRAY_INFO:
@@ -7560,9 +7643,11 @@ static inline bool md_ioctl_valid(unsigned int cmd)
case STOP_ARRAY:
case STOP_ARRAY_RO:
case CLUSTERED_DISK_NACK:
- return true;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ return 0;
default:
- return false;
+ return -ENOTTY;
}
}
@@ -7620,31 +7705,17 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
int err = 0;
void __user *argp = (void __user *)arg;
struct mddev *mddev = NULL;
- bool did_set_md_closing = false;
-
- if (!md_ioctl_valid(cmd))
- return -ENOTTY;
- switch (cmd) {
- case RAID_VERSION:
- case GET_ARRAY_INFO:
- case GET_DISK_INFO:
- break;
- default:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- }
+ err = md_ioctl_valid(cmd);
+ if (err)
+ return err;
/*
* Commands dealing with the RAID driver but not any
* particular array:
*/
- switch (cmd) {
- case RAID_VERSION:
- err = get_version(argp);
- goto out;
- default:;
- }
+ if (cmd == RAID_VERSION)
+ return get_version(argp);
/*
* Commands creating/starting a new array:
@@ -7652,35 +7723,23 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
mddev = bdev->bd_disk->private_data;
- if (!mddev) {
- BUG();
- goto out;
- }
-
/* Some actions do not requires the mutex */
switch (cmd) {
case GET_ARRAY_INFO:
if (!mddev->raid_disks && !mddev->external)
- err = -ENODEV;
- else
- err = get_array_info(mddev, argp);
- goto out;
+ return -ENODEV;
+ return get_array_info(mddev, argp);
case GET_DISK_INFO:
if (!mddev->raid_disks && !mddev->external)
- err = -ENODEV;
- else
- err = get_disk_info(mddev, argp);
- goto out;
+ return -ENODEV;
+ return get_disk_info(mddev, argp);
case SET_DISK_FAULTY:
- err = set_disk_faulty(mddev, new_decode_dev(arg));
- goto out;
+ return set_disk_faulty(mddev, new_decode_dev(arg));
case GET_BITMAP_FILE:
- err = get_bitmap_file(mddev, argp);
- goto out;
-
+ return get_bitmap_file(mddev, argp);
}
if (cmd == HOT_REMOVE_DISK)
@@ -7693,20 +7752,9 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
/* Need to flush page cache, and ensure no-one else opens
* and writes
*/
- mutex_lock(&mddev->open_mutex);
- if (mddev->pers && atomic_read(&mddev->openers) > 1) {
- mutex_unlock(&mddev->open_mutex);
- err = -EBUSY;
- goto out;
- }
- if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
- mutex_unlock(&mddev->open_mutex);
- err = -EBUSY;
- goto out;
- }
- did_set_md_closing = true;
- mutex_unlock(&mddev->open_mutex);
- sync_blockdev(bdev);
+ err = mddev_set_closing_and_sync_blockdev(mddev, 1);
+ if (err)
+ return err;
}
if (!md_is_rdwr(mddev))
@@ -7747,11 +7795,12 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
goto unlock;
case STOP_ARRAY:
- err = do_md_stop(mddev, 0, bdev);
+ err = do_md_stop(mddev, 0);
goto unlock;
case STOP_ARRAY_RO:
- err = md_set_readonly(mddev, bdev);
+ if (mddev->pers)
+ err = md_set_readonly(mddev);
goto unlock;
case HOT_REMOVE_DISK:
@@ -7846,7 +7895,7 @@ unlock:
mddev_unlock(mddev);
out:
- if(did_set_md_closing)
+ if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY))
clear_bit(MD_CLOSING, &mddev->flags);
return err;
}
@@ -8683,10 +8732,7 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
bio_chain(discard_bio, bio);
bio_clone_blkg_association(discard_bio, bio);
- if (mddev->gendisk)
- trace_block_bio_remap(discard_bio,
- disk_devt(mddev->gendisk),
- bio->bi_iter.bi_sector);
+ mddev_trace_remap(mddev, discard_bio, bio->bi_iter.bi_sector);
submit_bio_noacct(discard_bio);
}
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
@@ -8733,6 +8779,23 @@ void md_account_bio(struct mddev *mddev, struct bio **bio)
}
EXPORT_SYMBOL_GPL(md_account_bio);
+void md_free_cloned_bio(struct bio *bio)
+{
+ struct md_io_clone *md_io_clone = bio->bi_private;
+ struct bio *orig_bio = md_io_clone->orig_bio;
+ struct mddev *mddev = md_io_clone->mddev;
+
+ if (bio->bi_status && !orig_bio->bi_status)
+ orig_bio->bi_status = bio->bi_status;
+
+ if (md_io_clone->start_time)
+ bio_end_io_acct(orig_bio, md_io_clone->start_time);
+
+ bio_put(bio);
+ percpu_ref_put(&mddev->active_io);
+}
+EXPORT_SYMBOL_GPL(md_free_cloned_bio);
+
/* md_allow_write(mddev)
* Calling this ensures that the array is marked 'active' so that writes
* may proceed without blocking. It is important to call this before
@@ -8788,12 +8851,16 @@ void md_do_sync(struct md_thread *thread)
int ret;
/* just incase thread restarts... */
- if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
- test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
- if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */
+
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ goto skip;
+
+ if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
+ !md_is_rdwr(mddev)) {/* never try to sync a read-only array */
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- return;
+ goto skip;
}
if (mddev_is_clustered(mddev)) {
@@ -9162,7 +9229,7 @@ void md_do_sync(struct md_thread *thread)
mddev->delta_disks > 0 &&
mddev->pers->finish_reshape &&
mddev->pers->size &&
- mddev->queue) {
+ !mddev_is_dm(mddev)) {
mddev_lock_nointr(mddev);
md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
mddev_unlock(mddev);
@@ -9262,9 +9329,14 @@ static bool md_spares_need_change(struct mddev *mddev)
{
struct md_rdev *rdev;
- rdev_for_each(rdev, mddev)
- if (rdev_removeable(rdev) || rdev_addable(rdev))
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev) {
+ if (rdev_removeable(rdev) || rdev_addable(rdev)) {
+ rcu_read_unlock();
return true;
+ }
+ }
+ rcu_read_unlock();
return false;
}
@@ -9368,13 +9440,19 @@ static void md_start_sync(struct work_struct *ws)
struct mddev *mddev = container_of(ws, struct mddev, sync_work);
int spares = 0;
bool suspend = false;
+ char *name;
- if (md_spares_need_change(mddev))
+ /*
+ * If reshape is still in progress, spares won't be added or removed
+ * from conf until reshape is done.
+ */
+ if (mddev->reshape_position == MaxSector &&
+ md_spares_need_change(mddev)) {
suspend = true;
+ mddev_suspend(mddev, false);
+ }
- suspend ? mddev_suspend_and_lock_nointr(mddev) :
- mddev_lock_nointr(mddev);
-
+ mddev_lock_nointr(mddev);
if (!md_is_rdwr(mddev)) {
/*
* On a read-only array we can:
@@ -9400,8 +9478,10 @@ static void md_start_sync(struct work_struct *ws)
if (spares)
md_bitmap_write_all(mddev->bitmap);
+ name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
+ "reshape" : "resync";
rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "resync"));
+ md_register_thread(md_do_sync, mddev, name));
if (!mddev->sync_thread) {
pr_warn("%s: could not start resync thread...\n",
mdname(mddev));
@@ -9445,6 +9525,20 @@ not_running:
sysfs_notify_dirent_safe(mddev->sysfs_action);
}
+static void unregister_sync_thread(struct mddev *mddev)
+{
+ if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
+ /* resync/recovery still happening */
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ return;
+ }
+
+ if (WARN_ON_ONCE(!mddev->sync_thread))
+ return;
+
+ md_reap_sync_thread(mddev);
+}
+
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
@@ -9469,9 +9563,6 @@ not_running:
*/
void md_check_recovery(struct mddev *mddev)
{
- if (READ_ONCE(mddev->suspended))
- return;
-
if (mddev->bitmap)
md_bitmap_daemon_work(mddev);
@@ -9485,7 +9576,8 @@ void md_check_recovery(struct mddev *mddev)
}
if (!md_is_rdwr(mddev) &&
- !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
if ( ! (
(mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
@@ -9507,8 +9599,7 @@ void md_check_recovery(struct mddev *mddev)
struct md_rdev *rdev;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- /* sync_work already queued. */
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ unregister_sync_thread(mddev);
goto unlock;
}
@@ -9571,16 +9662,7 @@ void md_check_recovery(struct mddev *mddev)
* still set.
*/
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
- /* resync/recovery still happening */
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- goto unlock;
- }
-
- if (WARN_ON_ONCE(!mddev->sync_thread))
- goto unlock;
-
- md_reap_sync_thread(mddev);
+ unregister_sync_thread(mddev);
goto unlock;
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8d881cc59799..097d9dbd69b8 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -18,6 +18,7 @@
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <trace/events/block.h>
#include "md-cluster.h"
#define MaxSector (~(sector_t)0)
@@ -59,7 +60,7 @@ struct md_rdev {
*/
struct block_device *meta_bdev;
struct block_device *bdev; /* block device handle */
- struct bdev_handle *bdev_handle; /* Handle from open for bdev */
+ struct file *bdev_file; /* Handle from open for bdev */
struct page *sb_page, *bb_page;
int sb_loaded;
@@ -207,6 +208,7 @@ enum flag_bits {
* check if there is collision between raid1
* serial bios.
*/
+ Nonrot, /* non-rotational device (SSD) */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
@@ -222,6 +224,16 @@ static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
}
return 0;
}
+
+static inline int rdev_has_badblock(struct md_rdev *rdev, sector_t s,
+ int sectors)
+{
+ sector_t first_bad;
+ int bad_sectors;
+
+ return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors);
+}
+
extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
@@ -468,7 +480,6 @@ struct mddev {
struct timer_list safemode_timer;
struct percpu_ref writes_pending;
int sync_checkers; /* # of threads checking writes_pending */
- struct request_queue *queue; /* for plugging ... */
struct bitmap *bitmap; /* the bitmap for the device */
struct {
@@ -558,6 +569,37 @@ enum recovery_flags {
MD_RESYNCING_REMOTE, /* remote node is running resync thread */
};
+enum md_ro_state {
+ MD_RDWR,
+ MD_RDONLY,
+ MD_AUTO_READ,
+ MD_MAX_STATE
+};
+
+static inline bool md_is_rdwr(struct mddev *mddev)
+{
+ return (mddev->ro == MD_RDWR);
+}
+
+static inline bool reshape_interrupted(struct mddev *mddev)
+{
+ /* reshape never start */
+ if (mddev->reshape_position == MaxSector)
+ return false;
+
+ /* interrupted */
+ if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ return true;
+
+ /* running reshape will be interrupted soon. */
+ if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+ return true;
+
+ return false;
+}
+
static inline int __must_check mddev_lock(struct mddev *mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
@@ -617,6 +659,7 @@ struct md_personality
int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev);
void (*update_reshape_pos) (struct mddev *mddev);
+ void (*prepare_suspend) (struct mddev *mddev);
/* quiesce suspends or resumes internal processing.
* 1 - stop new actions and wait for action io to complete
* 0 - return to normal behaviour
@@ -750,6 +793,7 @@ extern void md_finish_reshape(struct mddev *mddev);
void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio, sector_t start, sector_t size);
void md_account_bio(struct mddev *mddev, struct bio **bio);
+void md_free_cloned_bio(struct bio *bio);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
@@ -778,9 +822,12 @@ extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev);
extern void md_rdev_clear(struct md_rdev *rdev);
-extern void md_handle_request(struct mddev *mddev, struct bio *bio);
+extern bool md_handle_request(struct mddev *mddev, struct bio *bio);
extern int mddev_suspend(struct mddev *mddev, bool interruptible);
extern void mddev_resume(struct mddev *mddev);
+extern void md_idle_sync_thread(struct mddev *mddev);
+extern void md_frozen_sync_thread(struct mddev *mddev);
+extern void md_unfrozen_sync_thread(struct mddev *mddev);
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
@@ -821,7 +868,7 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
{
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors)
- mddev->queue->limits.max_write_zeroes_sectors = 0;
+ mddev->gendisk->queue->limits.max_write_zeroes_sectors = 0;
}
static inline int mddev_suspend_and_lock(struct mddev *mddev)
@@ -860,7 +907,31 @@ void md_autostart_arrays(int part);
int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
int do_md_run(struct mddev *mddev);
+void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim);
+int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev);
+void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes);
extern const struct block_device_operations md_fops;
+/*
+ * MD devices can be used undeneath by DM, in which case ->gendisk is NULL.
+ */
+static inline bool mddev_is_dm(struct mddev *mddev)
+{
+ return !mddev->gendisk;
+}
+
+static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
+ sector_t sector)
+{
+ if (!mddev_is_dm(mddev))
+ trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector);
+}
+
+#define mddev_add_trace_msg(mddev, fmt, args...) \
+do { \
+ if (!mddev_is_dm(mddev)) \
+ blk_add_trace_msg((mddev)->gendisk->queue, fmt, ##args); \
+} while (0)
+
#endif /* _MD_MD_H */
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 0e010e1204aa..b17b54df673b 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -656,7 +656,7 @@ EXPORT_SYMBOL_GPL(dm_bm_checksum);
/*----------------------------------------------------------------*/
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_DESCRIPTION("Immutable metadata library for dm");
/*----------------------------------------------------------------*/
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c50a7abda744..c5d4aeb68404 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -379,6 +379,19 @@ static void raid0_free(struct mddev *mddev, void *priv)
free_conf(mddev, conf);
}
+static int raid0_set_limits(struct mddev *mddev)
+{
+ struct queue_limits lim;
+
+ blk_set_stacking_limits(&lim);
+ lim.max_hw_sectors = mddev->chunk_sectors;
+ lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.io_min = mddev->chunk_sectors << 9;
+ lim.io_opt = lim.io_min * mddev->raid_disks;
+ mddev_stack_rdev_limits(mddev, &lim);
+ return queue_limits_set(mddev->gendisk->queue, &lim);
+}
+
static int raid0_run(struct mddev *mddev)
{
struct r0conf *conf;
@@ -399,20 +412,10 @@ static int raid0_run(struct mddev *mddev)
mddev->private = conf;
}
conf = mddev->private;
- if (mddev->queue) {
- struct md_rdev *rdev;
-
- blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
- blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
-
- blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
- blk_queue_io_opt(mddev->queue,
- (mddev->chunk_sectors << 9) * mddev->raid_disks);
-
- rdev_for_each(rdev, mddev) {
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
- }
+ if (!mddev_is_dm(mddev)) {
+ ret = raid0_set_limits(mddev);
+ if (ret)
+ goto out_free_conf;
}
/* calculate array device size */
@@ -426,8 +429,10 @@ static int raid0_run(struct mddev *mddev)
ret = md_integrity_register(mddev);
if (ret)
- free_conf(mddev, conf);
-
+ goto out_free_conf;
+ return 0;
+out_free_conf:
+ free_conf(mddev, conf);
return ret;
}
@@ -578,10 +583,7 @@ static void raid0_map_submit_bio(struct mddev *mddev, struct bio *bio)
bio_set_dev(bio, tmp_dev->bdev);
bio->bi_iter.bi_sector = sector + zone->dev_start +
tmp_dev->data_offset;
-
- if (mddev->gendisk)
- trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
- bio_sector);
+ mddev_trace_remap(mddev, bio, bio_sector);
mddev_check_write_zeroes(mddev, bio);
submit_bio_noacct(bio);
}
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 512746551f36..2ea1710a3b70 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -227,3 +227,72 @@ static inline bool exceed_read_errors(struct mddev *mddev, struct md_rdev *rdev)
return false;
}
+
+/**
+ * raid1_check_read_range() - check a given read range for bad blocks,
+ * available read length is returned;
+ * @rdev: the rdev to read;
+ * @this_sector: read position;
+ * @len: read length;
+ *
+ * helper function for read_balance()
+ *
+ * 1) If there are no bad blocks in the range, @len is returned;
+ * 2) If the range are all bad blocks, 0 is returned;
+ * 3) If there are partial bad blocks:
+ * - If the bad block range starts after @this_sector, the length of first
+ * good region is returned;
+ * - If the bad block range starts before @this_sector, 0 is returned and
+ * the @len is updated to the offset into the region before we get to the
+ * good blocks;
+ */
+static inline int raid1_check_read_range(struct md_rdev *rdev,
+ sector_t this_sector, int *len)
+{
+ sector_t first_bad;
+ int bad_sectors;
+
+ /* no bad block overlap */
+ if (!is_badblock(rdev, this_sector, *len, &first_bad, &bad_sectors))
+ return *len;
+
+ /*
+ * bad block range starts offset into our range so we can return the
+ * number of sectors before the bad blocks start.
+ */
+ if (first_bad > this_sector)
+ return first_bad - this_sector;
+
+ /* read range is fully consumed by bad blocks. */
+ if (this_sector + *len <= first_bad + bad_sectors)
+ return 0;
+
+ /*
+ * final case, bad block range starts before or at the start of our
+ * range but does not cover our entire range so we still return 0 but
+ * update the length with the number of sectors before we get to the
+ * good ones.
+ */
+ *len = first_bad + bad_sectors - this_sector;
+ return 0;
+}
+
+/*
+ * Check if read should choose the first rdev.
+ *
+ * Balance on the whole device if no resync is going on (recovery is ok) or
+ * below the resync window. Otherwise, take the first readable disk.
+ */
+static inline bool raid1_should_read_first(struct mddev *mddev,
+ sector_t this_sector, int len)
+{
+ if ((mddev->recovery_cp < this_sector + len))
+ return true;
+
+ if (mddev_is_clustered(mddev) &&
+ md_cluster_ops->area_resyncing(mddev, READ, this_sector,
+ this_sector + len))
+ return true;
+
+ return false;
+}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 286f8b16c7bd..be8ac24f50b6 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -46,9 +46,6 @@
static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
-#define raid1_log(md, fmt, args...) \
- do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
-
#define RAID_1_10_NAME "raid1"
#include "raid1-10.c"
@@ -498,9 +495,6 @@ static void raid1_end_write_request(struct bio *bio)
* to user-side. So if something waits for IO, then it
* will wait for the 'master' bio.
*/
- sector_t first_bad;
- int bad_sectors;
-
r1_bio->bios[mirror] = NULL;
to_put = bio;
/*
@@ -516,8 +510,8 @@ static void raid1_end_write_request(struct bio *bio)
set_bit(R1BIO_Uptodate, &r1_bio->state);
/* Maybe we can clear some bad blocks. */
- if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
- &first_bad, &bad_sectors) && !discard_error) {
+ if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
+ !discard_error) {
r1_bio->bios[mirror] = IO_MADE_GOOD;
set_bit(R1BIO_MadeGood, &r1_bio->state);
}
@@ -582,211 +576,312 @@ static sector_t align_to_barrier_unit_end(sector_t start_sector,
return len;
}
-/*
- * This routine returns the disk from which the requested read should
- * be done. There is a per-array 'next expected sequential IO' sector
- * number - if this matches on the next IO then we use the last disk.
- * There is also a per-disk 'last know head position' sector that is
- * maintained from IRQ contexts, both the normal and the resync IO
- * completion handlers update this position correctly. If there is no
- * perfect sequential match then we pick the disk whose head is closest.
- *
- * If there are 2 mirrors in the same 2 devices, performance degrades
- * because position is mirror, not device based.
- *
- * The rdev for the device selected will have nr_pending incremented.
- */
-static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
+static void update_read_sectors(struct r1conf *conf, int disk,
+ sector_t this_sector, int len)
{
- const sector_t this_sector = r1_bio->sector;
- int sectors;
- int best_good_sectors;
- int best_disk, best_dist_disk, best_pending_disk;
- int has_nonrot_disk;
+ struct raid1_info *info = &conf->mirrors[disk];
+
+ atomic_inc(&info->rdev->nr_pending);
+ if (info->next_seq_sect != this_sector)
+ info->seq_start = this_sector;
+ info->next_seq_sect = this_sector + len;
+}
+
+static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio,
+ int *max_sectors)
+{
+ sector_t this_sector = r1_bio->sector;
+ int len = r1_bio->sectors;
int disk;
- sector_t best_dist;
- unsigned int min_pending;
- struct md_rdev *rdev;
- int choose_first;
- int choose_next_idle;
- /*
- * Check if we can balance. We can balance on the whole
- * device if no resync is going on, or below the resync window.
- * We take the first readable disk when above the resync window.
- */
- retry:
- sectors = r1_bio->sectors;
- best_disk = -1;
- best_dist_disk = -1;
- best_dist = MaxSector;
- best_pending_disk = -1;
- min_pending = UINT_MAX;
- best_good_sectors = 0;
- has_nonrot_disk = 0;
- choose_next_idle = 0;
- clear_bit(R1BIO_FailFast, &r1_bio->state);
+ for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
+ struct md_rdev *rdev;
+ int read_len;
- if ((conf->mddev->recovery_cp < this_sector + sectors) ||
- (mddev_is_clustered(conf->mddev) &&
- md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
- this_sector + sectors)))
- choose_first = 1;
- else
- choose_first = 0;
+ if (r1_bio->bios[disk] == IO_BLOCKED)
+ continue;
+
+ rdev = conf->mirrors[disk].rdev;
+ if (!rdev || test_bit(Faulty, &rdev->flags))
+ continue;
+
+ /* choose the first disk even if it has some bad blocks. */
+ read_len = raid1_check_read_range(rdev, this_sector, &len);
+ if (read_len > 0) {
+ update_read_sectors(conf, disk, this_sector, read_len);
+ *max_sectors = read_len;
+ return disk;
+ }
+ }
+
+ return -1;
+}
+
+static int choose_bb_rdev(struct r1conf *conf, struct r1bio *r1_bio,
+ int *max_sectors)
+{
+ sector_t this_sector = r1_bio->sector;
+ int best_disk = -1;
+ int best_len = 0;
+ int disk;
for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
- sector_t dist;
- sector_t first_bad;
- int bad_sectors;
- unsigned int pending;
- bool nonrot;
+ struct md_rdev *rdev;
+ int len;
+ int read_len;
+
+ if (r1_bio->bios[disk] == IO_BLOCKED)
+ continue;
rdev = conf->mirrors[disk].rdev;
- if (r1_bio->bios[disk] == IO_BLOCKED
- || rdev == NULL
- || test_bit(Faulty, &rdev->flags))
+ if (!rdev || test_bit(Faulty, &rdev->flags) ||
+ test_bit(WriteMostly, &rdev->flags))
continue;
- if (!test_bit(In_sync, &rdev->flags) &&
- rdev->recovery_offset < this_sector + sectors)
+
+ /* keep track of the disk with the most readable sectors. */
+ len = r1_bio->sectors;
+ read_len = raid1_check_read_range(rdev, this_sector, &len);
+ if (read_len > best_len) {
+ best_disk = disk;
+ best_len = read_len;
+ }
+ }
+
+ if (best_disk != -1) {
+ *max_sectors = best_len;
+ update_read_sectors(conf, best_disk, this_sector, best_len);
+ }
+
+ return best_disk;
+}
+
+static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio,
+ int *max_sectors)
+{
+ sector_t this_sector = r1_bio->sector;
+ int bb_disk = -1;
+ int bb_read_len = 0;
+ int disk;
+
+ for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
+ struct md_rdev *rdev;
+ int len;
+ int read_len;
+
+ if (r1_bio->bios[disk] == IO_BLOCKED)
continue;
- if (test_bit(WriteMostly, &rdev->flags)) {
- /* Don't balance among write-mostly, just
- * use the first as a last resort */
- if (best_dist_disk < 0) {
- if (is_badblock(rdev, this_sector, sectors,
- &first_bad, &bad_sectors)) {
- if (first_bad <= this_sector)
- /* Cannot use this */
- continue;
- best_good_sectors = first_bad - this_sector;
- } else
- best_good_sectors = sectors;
- best_dist_disk = disk;
- best_pending_disk = disk;
- }
+
+ rdev = conf->mirrors[disk].rdev;
+ if (!rdev || test_bit(Faulty, &rdev->flags) ||
+ !test_bit(WriteMostly, &rdev->flags))
continue;
+
+ /* there are no bad blocks, we can use this disk */
+ len = r1_bio->sectors;
+ read_len = raid1_check_read_range(rdev, this_sector, &len);
+ if (read_len == r1_bio->sectors) {
+ update_read_sectors(conf, disk, this_sector, read_len);
+ return disk;
}
- /* This is a reasonable device to use. It might
- * even be best.
+
+ /*
+ * there are partial bad blocks, choose the rdev with largest
+ * read length.
*/
- if (is_badblock(rdev, this_sector, sectors,
- &first_bad, &bad_sectors)) {
- if (best_dist < MaxSector)
- /* already have a better device */
- continue;
- if (first_bad <= this_sector) {
- /* cannot read here. If this is the 'primary'
- * device, then we must not read beyond
- * bad_sectors from another device..
- */
- bad_sectors -= (this_sector - first_bad);
- if (choose_first && sectors > bad_sectors)
- sectors = bad_sectors;
- if (best_good_sectors > sectors)
- best_good_sectors = sectors;
-
- } else {
- sector_t good_sectors = first_bad - this_sector;
- if (good_sectors > best_good_sectors) {
- best_good_sectors = good_sectors;
- best_disk = disk;
- }
- if (choose_first)
- break;
- }
- continue;
- } else {
- if ((sectors > best_good_sectors) && (best_disk >= 0))
- best_disk = -1;
- best_good_sectors = sectors;
+ if (read_len > bb_read_len) {
+ bb_disk = disk;
+ bb_read_len = read_len;
}
+ }
+
+ if (bb_disk != -1) {
+ *max_sectors = bb_read_len;
+ update_read_sectors(conf, bb_disk, this_sector, bb_read_len);
+ }
+
+ return bb_disk;
+}
+
+static bool is_sequential(struct r1conf *conf, int disk, struct r1bio *r1_bio)
+{
+ /* TODO: address issues with this check and concurrency. */
+ return conf->mirrors[disk].next_seq_sect == r1_bio->sector ||
+ conf->mirrors[disk].head_position == r1_bio->sector;
+}
+
+/*
+ * If buffered sequential IO size exceeds optimal iosize, check if there is idle
+ * disk. If yes, choose the idle disk.
+ */
+static bool should_choose_next(struct r1conf *conf, int disk)
+{
+ struct raid1_info *mirror = &conf->mirrors[disk];
+ int opt_iosize;
+
+ if (!test_bit(Nonrot, &mirror->rdev->flags))
+ return false;
+
+ opt_iosize = bdev_io_opt(mirror->rdev->bdev) >> 9;
+ return opt_iosize > 0 && mirror->seq_start != MaxSector &&
+ mirror->next_seq_sect > opt_iosize &&
+ mirror->next_seq_sect - opt_iosize >= mirror->seq_start;
+}
+
+static bool rdev_readable(struct md_rdev *rdev, struct r1bio *r1_bio)
+{
+ if (!rdev || test_bit(Faulty, &rdev->flags))
+ return false;
+
+ /* still in recovery */
+ if (!test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < r1_bio->sector + r1_bio->sectors)
+ return false;
+
+ /* don't read from slow disk unless have to */
+ if (test_bit(WriteMostly, &rdev->flags))
+ return false;
+
+ /* don't split IO for bad blocks unless have to */
+ if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors))
+ return false;
+
+ return true;
+}
+
+struct read_balance_ctl {
+ sector_t closest_dist;
+ int closest_dist_disk;
+ int min_pending;
+ int min_pending_disk;
+ int sequential_disk;
+ int readable_disks;
+};
+
+static int choose_best_rdev(struct r1conf *conf, struct r1bio *r1_bio)
+{
+ int disk;
+ struct read_balance_ctl ctl = {
+ .closest_dist_disk = -1,
+ .closest_dist = MaxSector,
+ .min_pending_disk = -1,
+ .min_pending = UINT_MAX,
+ .sequential_disk = -1,
+ };
+
+ for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
+ struct md_rdev *rdev;
+ sector_t dist;
+ unsigned int pending;
- if (best_disk >= 0)
- /* At least two disks to choose from so failfast is OK */
+ if (r1_bio->bios[disk] == IO_BLOCKED)
+ continue;
+
+ rdev = conf->mirrors[disk].rdev;
+ if (!rdev_readable(rdev, r1_bio))
+ continue;
+
+ /* At least two disks to choose from so failfast is OK */
+ if (ctl.readable_disks++ == 1)
set_bit(R1BIO_FailFast, &r1_bio->state);
- nonrot = bdev_nonrot(rdev->bdev);
- has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending);
- dist = abs(this_sector - conf->mirrors[disk].head_position);
- if (choose_first) {
- best_disk = disk;
- break;
- }
+ dist = abs(r1_bio->sector - conf->mirrors[disk].head_position);
+
/* Don't change to another disk for sequential reads */
- if (conf->mirrors[disk].next_seq_sect == this_sector
- || dist == 0) {
- int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
- struct raid1_info *mirror = &conf->mirrors[disk];
+ if (is_sequential(conf, disk, r1_bio)) {
+ if (!should_choose_next(conf, disk))
+ return disk;
- best_disk = disk;
/*
- * If buffered sequential IO size exceeds optimal
- * iosize, check if there is idle disk. If yes, choose
- * the idle disk. read_balance could already choose an
- * idle disk before noticing it's a sequential IO in
- * this disk. This doesn't matter because this disk
- * will idle, next time it will be utilized after the
- * first disk has IO size exceeds optimal iosize. In
- * this way, iosize of the first disk will be optimal
- * iosize at least. iosize of the second disk might be
- * small, but not a big deal since when the second disk
- * starts IO, the first disk is likely still busy.
+ * Add 'pending' to avoid choosing this disk if
+ * there is other idle disk.
*/
- if (nonrot && opt_iosize > 0 &&
- mirror->seq_start != MaxSector &&
- mirror->next_seq_sect > opt_iosize &&
- mirror->next_seq_sect - opt_iosize >=
- mirror->seq_start) {
- choose_next_idle = 1;
- continue;
- }
- break;
+ pending++;
+ /*
+ * If there is no other idle disk, this disk
+ * will be chosen.
+ */
+ ctl.sequential_disk = disk;
}
- if (choose_next_idle)
- continue;
-
- if (min_pending > pending) {
- min_pending = pending;
- best_pending_disk = disk;
+ if (ctl.min_pending > pending) {
+ ctl.min_pending = pending;
+ ctl.min_pending_disk = disk;
}
- if (dist < best_dist) {
- best_dist = dist;
- best_dist_disk = disk;
+ if (ctl.closest_dist > dist) {
+ ctl.closest_dist = dist;
+ ctl.closest_dist_disk = disk;
}
}
/*
+ * sequential IO size exceeds optimal iosize, however, there is no other
+ * idle disk, so choose the sequential disk.
+ */
+ if (ctl.sequential_disk != -1 && ctl.min_pending != 0)
+ return ctl.sequential_disk;
+
+ /*
* If all disks are rotational, choose the closest disk. If any disk is
* non-rotational, choose the disk with less pending request even the
* disk is rotational, which might/might not be optimal for raids with
* mixed ratation/non-rotational disks depending on workload.
*/
- if (best_disk == -1) {
- if (has_nonrot_disk || min_pending == 0)
- best_disk = best_pending_disk;
- else
- best_disk = best_dist_disk;
- }
+ if (ctl.min_pending_disk != -1 &&
+ (READ_ONCE(conf->nonrot_disks) || ctl.min_pending == 0))
+ return ctl.min_pending_disk;
+ else
+ return ctl.closest_dist_disk;
+}
- if (best_disk >= 0) {
- rdev = conf->mirrors[best_disk].rdev;
- if (!rdev)
- goto retry;
- atomic_inc(&rdev->nr_pending);
- sectors = best_good_sectors;
+/*
+ * This routine returns the disk from which the requested read should be done.
+ *
+ * 1) If resync is in progress, find the first usable disk and use it even if it
+ * has some bad blocks.
+ *
+ * 2) Now that there is no resync, loop through all disks and skipping slow
+ * disks and disks with bad blocks for now. Only pay attention to key disk
+ * choice.
+ *
+ * 3) If we've made it this far, now look for disks with bad blocks and choose
+ * the one with most number of sectors.
+ *
+ * 4) If we are all the way at the end, we have no choice but to use a disk even
+ * if it is write mostly.
+ *
+ * The rdev for the device selected will have nr_pending incremented.
+ */
+static int read_balance(struct r1conf *conf, struct r1bio *r1_bio,
+ int *max_sectors)
+{
+ int disk;
- if (conf->mirrors[best_disk].next_seq_sect != this_sector)
- conf->mirrors[best_disk].seq_start = this_sector;
+ clear_bit(R1BIO_FailFast, &r1_bio->state);
+
+ if (raid1_should_read_first(conf->mddev, r1_bio->sector,
+ r1_bio->sectors))
+ return choose_first_rdev(conf, r1_bio, max_sectors);
- conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
+ disk = choose_best_rdev(conf, r1_bio);
+ if (disk >= 0) {
+ *max_sectors = r1_bio->sectors;
+ update_read_sectors(conf, disk, r1_bio->sector,
+ r1_bio->sectors);
+ return disk;
}
- *max_sectors = sectors;
- return best_disk;
+ /*
+ * If we are here it means we didn't find a perfectly good disk so
+ * now spend a bit more time trying to find one with the most good
+ * sectors.
+ */
+ disk = choose_bb_rdev(conf, r1_bio, max_sectors);
+ if (disk >= 0)
+ return disk;
+
+ return choose_slow_rdev(conf, r1_bio, max_sectors);
}
static void wake_up_barrier(struct r1conf *conf)
@@ -1098,7 +1193,7 @@ static void freeze_array(struct r1conf *conf, int extra)
*/
spin_lock_irq(&conf->resync_lock);
conf->array_frozen = 1;
- raid1_log(conf->mddev, "wait freeze");
+ mddev_add_trace_msg(conf->mddev, "raid1 wait freeze");
wait_event_lock_irq_cmd(
conf->wait_barrier,
get_unqueued_pending(conf) == extra,
@@ -1287,7 +1382,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
* Reading from a write-mostly device must take care not to
* over-take any writes that are 'behind'
*/
- raid1_log(mddev, "wait behind writes");
+ mddev_add_trace_msg(mddev, "raid1 wait behind writes");
wait_event(bitmap->behind_wait,
atomic_read(&bitmap->behind_writes) == 0);
}
@@ -1320,11 +1415,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
test_bit(R1BIO_FailFast, &r1_bio->state))
read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r1_bio;
-
- if (mddev->gendisk)
- trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
- r1_bio->sector);
-
+ mddev_trace_remap(mddev, read_bio, r1_bio->sector);
submit_bio_noacct(read_bio);
}
@@ -1474,7 +1565,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
bio_wouldblock_error(bio);
return;
}
- raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
+ mddev_add_trace_msg(mddev, "raid1 wait rdev %d blocked",
+ blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf, bio->bi_iter.bi_sector, false);
goto retry_write;
@@ -1557,10 +1649,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
-
- if (mddev->gendisk)
- trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
- r1_bio->sector);
+ mddev_trace_remap(mddev, mbio, r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_bdev = (void *)rdev;
if (!raid1_add_bio_to_plug(mddev, mbio, raid1_unplug, disks)) {
@@ -1760,6 +1849,52 @@ static int raid1_spare_active(struct mddev *mddev)
return count;
}
+static bool raid1_add_conf(struct r1conf *conf, struct md_rdev *rdev, int disk,
+ bool replacement)
+{
+ struct raid1_info *info = conf->mirrors + disk;
+
+ if (replacement)
+ info += conf->raid_disks;
+
+ if (info->rdev)
+ return false;
+
+ if (bdev_nonrot(rdev->bdev)) {
+ set_bit(Nonrot, &rdev->flags);
+ WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks + 1);
+ }
+
+ rdev->raid_disk = disk;
+ info->head_position = 0;
+ info->seq_start = MaxSector;
+ WRITE_ONCE(info->rdev, rdev);
+
+ return true;
+}
+
+static bool raid1_remove_conf(struct r1conf *conf, int disk)
+{
+ struct raid1_info *info = conf->mirrors + disk;
+ struct md_rdev *rdev = info->rdev;
+
+ if (!rdev || test_bit(In_sync, &rdev->flags) ||
+ atomic_read(&rdev->nr_pending))
+ return false;
+
+ /* Only remove non-faulty devices if recovery is not possible. */
+ if (!test_bit(Faulty, &rdev->flags) &&
+ rdev->mddev->recovery_disabled != conf->recovery_disabled &&
+ rdev->mddev->degraded < conf->raid_disks)
+ return false;
+
+ if (test_and_clear_bit(Nonrot, &rdev->flags))
+ WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks - 1);
+
+ WRITE_ONCE(info->rdev, NULL);
+ return true;
+}
+
static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r1conf *conf = mddev->private;
@@ -1791,19 +1926,16 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
for (mirror = first; mirror <= last; mirror++) {
p = conf->mirrors + mirror;
if (!p->rdev) {
- if (mddev->gendisk)
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
+ err = mddev_stack_new_rdev(mddev, rdev);
+ if (err)
+ return err;
- p->head_position = 0;
- rdev->raid_disk = mirror;
- err = 0;
+ raid1_add_conf(conf, rdev, mirror, false);
/* As all devices are equivalent, we don't need a full recovery
* if this was recently any drive of the array
*/
if (rdev->saved_raid_disk < 0)
conf->fullsync = 1;
- WRITE_ONCE(p->rdev, rdev);
break;
}
if (test_bit(WantReplacement, &p->rdev->flags) &&
@@ -1813,13 +1945,11 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (err && repl_slot >= 0) {
/* Add this device as a replacement */
- p = conf->mirrors + repl_slot;
clear_bit(In_sync, &rdev->flags);
set_bit(Replacement, &rdev->flags);
- rdev->raid_disk = repl_slot;
+ raid1_add_conf(conf, rdev, repl_slot, true);
err = 0;
conf->fullsync = 1;
- WRITE_ONCE(p[conf->raid_disks].rdev, rdev);
}
print_conf(conf);
@@ -1836,27 +1966,20 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (unlikely(number >= conf->raid_disks))
goto abort;
- if (rdev != p->rdev)
- p = conf->mirrors + conf->raid_disks + number;
+ if (rdev != p->rdev) {
+ number += conf->raid_disks;
+ p = conf->mirrors + number;
+ }
print_conf(conf);
if (rdev == p->rdev) {
- if (test_bit(In_sync, &rdev->flags) ||
- atomic_read(&rdev->nr_pending)) {
+ if (!raid1_remove_conf(conf, number)) {
err = -EBUSY;
goto abort;
}
- /* Only remove non-faulty devices if recovery
- * is not possible.
- */
- if (!test_bit(Faulty, &rdev->flags) &&
- mddev->recovery_disabled != conf->recovery_disabled &&
- mddev->degraded < conf->raid_disks) {
- err = -EBUSY;
- goto abort;
- }
- WRITE_ONCE(p->rdev, NULL);
- if (conf->mirrors[conf->raid_disks + number].rdev) {
+
+ if (number < conf->raid_disks &&
+ conf->mirrors[conf->raid_disks + number].rdev) {
/* We just removed a device that is being replaced.
* Move down the replacement. We drain all IO before
* doing this to avoid confusion.
@@ -1944,8 +2067,6 @@ static void end_sync_write(struct bio *bio)
struct r1bio *r1_bio = get_resync_r1bio(bio);
struct mddev *mddev = r1_bio->mddev;
struct r1conf *conf = mddev->private;
- sector_t first_bad;
- int bad_sectors;
struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
if (!uptodate) {
@@ -1955,14 +2076,11 @@ static void end_sync_write(struct bio *bio)
set_bit(MD_RECOVERY_NEEDED, &
mddev->recovery);
set_bit(R1BIO_WriteError, &r1_bio->state);
- } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
- &first_bad, &bad_sectors) &&
- !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
- r1_bio->sector,
- r1_bio->sectors,
- &first_bad, &bad_sectors)
- )
+ } else if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
+ !rdev_has_badblock(conf->mirrors[r1_bio->read_disk].rdev,
+ r1_bio->sector, r1_bio->sectors)) {
set_bit(R1BIO_MadeGood, &r1_bio->state);
+ }
put_sync_write_buf(r1_bio, uptodate);
}
@@ -2279,16 +2397,12 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
s = PAGE_SIZE >> 9;
do {
- sector_t first_bad;
- int bad_sectors;
-
rdev = conf->mirrors[d].rdev;
if (rdev &&
(test_bit(In_sync, &rdev->flags) ||
(!test_bit(Faulty, &rdev->flags) &&
rdev->recovery_offset >= sect + s)) &&
- is_badblock(rdev, sect, s,
- &first_bad, &bad_sectors) == 0) {
+ rdev_has_badblock(rdev, sect, s) == 0) {
atomic_inc(&rdev->nr_pending);
if (sync_page_io(rdev, sect, s<<9,
conf->tmppage, REQ_OP_READ, false))
@@ -3006,23 +3120,17 @@ static struct r1conf *setup_conf(struct mddev *mddev)
err = -EINVAL;
spin_lock_init(&conf->device_lock);
+ conf->raid_disks = mddev->raid_disks;
rdev_for_each(rdev, mddev) {
int disk_idx = rdev->raid_disk;
- if (disk_idx >= mddev->raid_disks
- || disk_idx < 0)
+
+ if (disk_idx >= conf->raid_disks || disk_idx < 0)
continue;
- if (test_bit(Replacement, &rdev->flags))
- disk = conf->mirrors + mddev->raid_disks + disk_idx;
- else
- disk = conf->mirrors + disk_idx;
- if (disk->rdev)
+ if (!raid1_add_conf(conf, rdev, disk_idx,
+ test_bit(Replacement, &rdev->flags)))
goto abort;
- disk->rdev = rdev;
- disk->head_position = 0;
- disk->seq_start = MaxSector;
}
- conf->raid_disks = mddev->raid_disks;
conf->mddev = mddev;
INIT_LIST_HEAD(&conf->retry_list);
INIT_LIST_HEAD(&conf->bio_end_io_list);
@@ -3086,12 +3194,21 @@ static struct r1conf *setup_conf(struct mddev *mddev)
return ERR_PTR(err);
}
+static int raid1_set_limits(struct mddev *mddev)
+{
+ struct queue_limits lim;
+
+ blk_set_stacking_limits(&lim);
+ lim.max_write_zeroes_sectors = 0;
+ mddev_stack_rdev_limits(mddev, &lim);
+ return queue_limits_set(mddev->gendisk->queue, &lim);
+}
+
static void raid1_free(struct mddev *mddev, void *priv);
static int raid1_run(struct mddev *mddev)
{
struct r1conf *conf;
int i;
- struct md_rdev *rdev;
int ret;
if (mddev->level != 1) {
@@ -3118,14 +3235,10 @@ static int raid1_run(struct mddev *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
- if (mddev->queue)
- blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
-
- rdev_for_each(rdev, mddev) {
- if (!mddev->gendisk)
- continue;
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
+ if (!mddev_is_dm(mddev)) {
+ ret = raid1_set_limits(mddev);
+ if (ret)
+ goto abort;
}
mddev->degraded = 0;
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 14d4211a123a..5300cbaa58a4 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -71,6 +71,7 @@ struct r1conf {
* allow for replacements.
*/
int raid_disks;
+ int nonrot_disks;
spinlock_t device_lock;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7412066ea22c..a4556d2e46bf 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -76,9 +76,6 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
static void end_reshape_write(struct bio *bio);
static void end_reshape(struct r10conf *conf);
-#define raid10_log(md, fmt, args...) \
- do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
-
#include "raid1-10.c"
#define NULL_CMD
@@ -518,11 +515,7 @@ static void raid10_end_write_request(struct bio *bio)
* The 'master' represents the composite IO operation to
* user-side. So if something waits for IO, then it will
* wait for the 'master' bio.
- */
- sector_t first_bad;
- int bad_sectors;
-
- /*
+ *
* Do not set R10BIO_Uptodate if the current device is
* rebuilding or Faulty. This is because we cannot use
* such device for properly reading the data back (we could
@@ -535,10 +528,9 @@ static void raid10_end_write_request(struct bio *bio)
set_bit(R10BIO_Uptodate, &r10_bio->state);
/* Maybe we can clear some bad blocks. */
- if (is_badblock(rdev,
- r10_bio->devs[slot].addr,
- r10_bio->sectors,
- &first_bad, &bad_sectors) && !discard_error) {
+ if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
+ r10_bio->sectors) &&
+ !discard_error) {
bio_put(bio);
if (repl)
r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
@@ -753,17 +745,8 @@ static struct md_rdev *read_balance(struct r10conf *conf,
best_good_sectors = 0;
do_balance = 1;
clear_bit(R10BIO_FailFast, &r10_bio->state);
- /*
- * Check if we can balance. We can balance on the whole
- * device if no resync is going on (recovery is ok), or below
- * the resync window. We take the first readable disk when
- * above the resync window.
- */
- if ((conf->mddev->recovery_cp < MaxSector
- && (this_sector + sectors >= conf->next_resync)) ||
- (mddev_is_clustered(conf->mddev) &&
- md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
- this_sector + sectors)))
+
+ if (raid1_should_read_first(conf->mddev, this_sector, sectors))
do_balance = 0;
for (slot = 0; slot < conf->copies ; slot++) {
@@ -1033,7 +1016,7 @@ static bool wait_barrier(struct r10conf *conf, bool nowait)
ret = false;
} else {
conf->nr_waiting++;
- raid10_log(conf->mddev, "wait barrier");
+ mddev_add_trace_msg(conf->mddev, "raid10 wait barrier");
wait_event_barrier(conf, stop_waiting_barrier(conf));
conf->nr_waiting--;
}
@@ -1152,7 +1135,7 @@ static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
bio_wouldblock_error(bio);
return false;
}
- raid10_log(conf->mddev, "wait reshape");
+ mddev_add_trace_msg(conf->mddev, "raid10 wait reshape");
wait_event(conf->wait_barrier,
conf->reshape_progress <= bio->bi_iter.bi_sector ||
conf->reshape_progress >= bio->bi_iter.bi_sector +
@@ -1249,10 +1232,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
test_bit(R10BIO_FailFast, &r10_bio->state))
read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r10_bio;
-
- if (mddev->gendisk)
- trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
- r10_bio->sector);
+ mddev_trace_remap(mddev, read_bio, r10_bio->sector);
submit_bio_noacct(read_bio);
return;
}
@@ -1288,10 +1268,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
&& enough(conf, devnum))
mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r10_bio;
-
- if (conf->mddev->gendisk)
- trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
- r10_bio->sector);
+ mddev_trace_remap(mddev, mbio, r10_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_bdev = (void *)rdev;
@@ -1330,10 +1307,7 @@ retry_wait:
}
if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
- sector_t first_bad;
sector_t dev_sector = r10_bio->devs[i].addr;
- int bad_sectors;
- int is_bad;
/*
* Discard request doesn't care the write result
@@ -1342,9 +1316,8 @@ retry_wait:
if (!r10_bio->sectors)
continue;
- is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors,
- &first_bad, &bad_sectors);
- if (is_bad < 0) {
+ if (rdev_has_badblock(rdev, dev_sector,
+ r10_bio->sectors) < 0) {
/*
* Mustn't write here until the bad block
* is acknowledged
@@ -1360,8 +1333,9 @@ retry_wait:
if (unlikely(blocked_rdev)) {
/* Have to wait for this device to get unblocked, then retry */
allow_barrier(conf);
- raid10_log(conf->mddev, "%s wait rdev %d blocked",
- __func__, blocked_rdev->raid_disk);
+ mddev_add_trace_msg(conf->mddev,
+ "raid10 %s wait rdev %d blocked",
+ __func__, blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf, false);
goto retry_wait;
@@ -1416,7 +1390,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
bio_wouldblock_error(bio);
return;
}
- raid10_log(conf->mddev, "wait reshape metadata");
+ mddev_add_trace_msg(conf->mddev,
+ "raid10 wait reshape metadata");
wait_event(mddev->sb_wait,
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
@@ -2131,10 +2106,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
continue;
}
- if (mddev->gendisk)
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
-
+ err = mddev_stack_new_rdev(mddev, rdev);
+ if (err)
+ return err;
p->head_position = 0;
p->recovery_disabled = mddev->recovery_disabled - 1;
rdev->raid_disk = mirror;
@@ -2150,10 +2124,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
clear_bit(In_sync, &rdev->flags);
set_bit(Replacement, &rdev->flags);
rdev->raid_disk = repl_slot;
- err = 0;
- if (mddev->gendisk)
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
+ err = mddev_stack_new_rdev(mddev, rdev);
+ if (err)
+ return err;
conf->fullsync = 1;
WRITE_ONCE(p->replacement, rdev);
}
@@ -2290,8 +2263,6 @@ static void end_sync_write(struct bio *bio)
struct mddev *mddev = r10_bio->mddev;
struct r10conf *conf = mddev->private;
int d;
- sector_t first_bad;
- int bad_sectors;
int slot;
int repl;
struct md_rdev *rdev = NULL;
@@ -2312,11 +2283,10 @@ static void end_sync_write(struct bio *bio)
&rdev->mddev->recovery);
set_bit(R10BIO_WriteError, &r10_bio->state);
}
- } else if (is_badblock(rdev,
- r10_bio->devs[slot].addr,
- r10_bio->sectors,
- &first_bad, &bad_sectors))
+ } else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
+ r10_bio->sectors)) {
set_bit(R10BIO_MadeGood, &r10_bio->state);
+ }
rdev_dec_pending(rdev, mddev);
@@ -2597,11 +2567,8 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
int sectors, struct page *page, enum req_op op)
{
- sector_t first_bad;
- int bad_sectors;
-
- if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
- && (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
+ if (rdev_has_badblock(rdev, sector, sectors) &&
+ (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
return -1;
if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
/* success */
@@ -2658,16 +2625,14 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
s = PAGE_SIZE >> 9;
do {
- sector_t first_bad;
- int bad_sectors;
-
d = r10_bio->devs[sl].devnum;
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags) &&
- is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
- &first_bad, &bad_sectors) == 0) {
+ rdev_has_badblock(rdev,
+ r10_bio->devs[sl].addr + sect,
+ s) == 0) {
atomic_inc(&rdev->nr_pending);
success = sync_page_io(rdev,
r10_bio->devs[sl].addr +
@@ -4002,14 +3967,26 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return ERR_PTR(err);
}
-static void raid10_set_io_opt(struct r10conf *conf)
+static unsigned int raid10_nr_stripes(struct r10conf *conf)
{
- int raid_disks = conf->geo.raid_disks;
+ unsigned int raid_disks = conf->geo.raid_disks;
+
+ if (conf->geo.raid_disks % conf->geo.near_copies)
+ return raid_disks;
+ return raid_disks / conf->geo.near_copies;
+}
- if (!(conf->geo.raid_disks % conf->geo.near_copies))
- raid_disks /= conf->geo.near_copies;
- blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
- raid_disks);
+static int raid10_set_queue_limits(struct mddev *mddev)
+{
+ struct r10conf *conf = mddev->private;
+ struct queue_limits lim;
+
+ blk_set_stacking_limits(&lim);
+ lim.max_write_zeroes_sectors = 0;
+ lim.io_min = mddev->chunk_sectors << 9;
+ lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
+ mddev_stack_rdev_limits(mddev, &lim);
+ return queue_limits_set(mddev->gendisk->queue, &lim);
}
static int raid10_run(struct mddev *mddev)
@@ -4021,6 +3998,7 @@ static int raid10_run(struct mddev *mddev)
sector_t size;
sector_t min_offset_diff = 0;
int first = 1;
+ int ret = -EIO;
if (mddev->private == NULL) {
conf = setup_conf(mddev);
@@ -4047,12 +4025,6 @@ static int raid10_run(struct mddev *mddev)
}
}
- if (mddev->queue) {
- blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
- blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
- raid10_set_io_opt(conf);
- }
-
rdev_for_each(rdev, mddev) {
long long diff;
@@ -4081,14 +4053,16 @@ static int raid10_run(struct mddev *mddev)
if (first || diff < min_offset_diff)
min_offset_diff = diff;
- if (mddev->gendisk)
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
-
disk->head_position = 0;
first = 0;
}
+ if (!mddev_is_dm(conf->mddev)) {
+ ret = raid10_set_queue_limits(mddev);
+ if (ret)
+ goto out_free_conf;
+ }
+
/* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) {
pr_err("md/raid10:%s: not enough operational mirrors.\n",
@@ -4175,11 +4149,7 @@ static int raid10_run(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread)
- goto out_free_conf;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
return 0;
@@ -4189,7 +4159,7 @@ out_free_conf:
raid10_free_conf(conf);
mddev->private = NULL;
out:
- return -EIO;
+ return ret;
}
static void raid10_free(struct mddev *mddev, void *priv)
@@ -4573,16 +4543,8 @@ out:
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread) {
- ret = -EAGAIN;
- goto abort;
- }
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
conf->reshape_checkpoint = jiffies;
- md_wakeup_thread(mddev->sync_thread);
md_new_event();
return 0;
@@ -4966,8 +4928,7 @@ static void end_reshape(struct r10conf *conf)
conf->reshape_safe = MaxSector;
spin_unlock_irq(&conf->device_lock);
- if (conf->mddev->queue)
- raid10_set_io_opt(conf);
+ mddev_update_io_opt(conf->mddev, raid10_nr_stripes(conf));
conf->fullsync = 0;
}
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index da4ba736c4f0..a70cbec12ed0 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1393,7 +1393,8 @@ int ppl_init_log(struct r5conf *conf)
ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
ppl_conf->block_size = 512;
} else {
- ppl_conf->block_size = queue_logical_block_size(mddev->queue);
+ ppl_conf->block_size =
+ queue_logical_block_size(mddev->gendisk->queue);
}
for (i = 0; i < ppl_conf->count; i++) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8497880135ee..d874abfc1836 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -36,6 +36,7 @@
*/
#include <linux/blkdev.h>
+#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
@@ -760,6 +761,7 @@ enum stripe_result {
STRIPE_RETRY,
STRIPE_SCHEDULE_AND_RETRY,
STRIPE_FAIL,
+ STRIPE_WAIT_RESHAPE,
};
struct stripe_request_ctx {
@@ -1210,10 +1212,8 @@ again:
*/
while (op_is_write(op) && rdev &&
test_bit(WriteErrorSeen, &rdev->flags)) {
- sector_t first_bad;
- int bad_sectors;
- int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
- &first_bad, &bad_sectors);
+ int bad = rdev_has_badblock(rdev, sh->sector,
+ RAID5_STRIPE_SECTORS(conf));
if (!bad)
break;
@@ -1295,10 +1295,7 @@ again:
if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
- if (conf->mddev->gendisk)
- trace_block_bio_remap(bi,
- disk_devt(conf->mddev->gendisk),
- sh->dev[i].sector);
+ mddev_trace_remap(conf->mddev, bi, sh->dev[i].sector);
if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, bi);
else
@@ -1342,10 +1339,7 @@ again:
*/
if (op == REQ_OP_DISCARD)
rbi->bi_vcnt = 0;
- if (conf->mddev->gendisk)
- trace_block_bio_remap(rbi,
- disk_devt(conf->mddev->gendisk),
- sh->dev[i].sector);
+ mddev_trace_remap(conf->mddev, rbi, sh->dev[i].sector);
if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, rbi);
else
@@ -2412,7 +2406,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
atomic_inc(&conf->active_stripes);
raid5_release_stripe(sh);
- conf->max_nr_stripes++;
+ WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1);
return 1;
}
@@ -2422,12 +2416,12 @@ static int grow_stripes(struct r5conf *conf, int num)
size_t namelen = sizeof(conf->cache_name[0]);
int devs = max(conf->raid_disks, conf->previous_raid_disks);
- if (conf->mddev->gendisk)
+ if (mddev_is_dm(conf->mddev))
snprintf(conf->cache_name[0], namelen,
- "raid%d-%s", conf->level, mdname(conf->mddev));
+ "raid%d-%p", conf->level, conf->mddev);
else
snprintf(conf->cache_name[0], namelen,
- "raid%d-%p", conf->level, conf->mddev);
+ "raid%d-%s", conf->level, mdname(conf->mddev));
snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
conf->active_name = 0;
@@ -2707,7 +2701,7 @@ static int drop_one_stripe(struct r5conf *conf)
shrink_buffers(sh);
free_stripe(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes);
- conf->max_nr_stripes--;
+ WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1);
return 1;
}
@@ -2855,8 +2849,6 @@ static void raid5_end_write_request(struct bio *bi)
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
struct md_rdev *rdev;
- sector_t first_bad;
- int bad_sectors;
int replacement = 0;
for (i = 0 ; i < disks; i++) {
@@ -2888,9 +2880,8 @@ static void raid5_end_write_request(struct bio *bi)
if (replacement) {
if (bi->bi_status)
md_error(conf->mddev, rdev);
- else if (is_badblock(rdev, sh->sector,
- RAID5_STRIPE_SECTORS(conf),
- &first_bad, &bad_sectors))
+ else if (rdev_has_badblock(rdev, sh->sector,
+ RAID5_STRIPE_SECTORS(conf)))
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
} else {
if (bi->bi_status) {
@@ -2900,9 +2891,8 @@ static void raid5_end_write_request(struct bio *bi)
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED,
&rdev->mddev->recovery);
- } else if (is_badblock(rdev, sh->sector,
- RAID5_STRIPE_SECTORS(conf),
- &first_bad, &bad_sectors)) {
+ } else if (rdev_has_badblock(rdev, sh->sector,
+ RAID5_STRIPE_SECTORS(conf))) {
set_bit(R5_MadeGood, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags))
/* That was a successful write so make
@@ -4205,10 +4195,9 @@ static int handle_stripe_dirtying(struct r5conf *conf,
set_bit(STRIPE_HANDLE, &sh->state);
if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) {
/* prefer read-modify-write, but need to get some data */
- if (conf->mddev->queue)
- blk_add_trace_msg(conf->mddev->queue,
- "raid5 rmw %llu %d",
- (unsigned long long)sh->sector, rmw);
+ mddev_add_trace_msg(conf->mddev, "raid5 rmw %llu %d",
+ sh->sector, rmw);
+
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (test_bit(R5_InJournal, &dev->flags) &&
@@ -4285,10 +4274,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
set_bit(STRIPE_DELAYED, &sh->state);
}
}
- if (rcw && conf->mddev->queue)
- blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
- (unsigned long long)sh->sector,
- rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
+ if (rcw && !mddev_is_dm(conf->mddev))
+ blk_add_trace_msg(conf->mddev->gendisk->queue,
+ "raid5 rcw %llu %d %d %d",
+ (unsigned long long)sh->sector, rcw, qread,
+ test_bit(STRIPE_DELAYED, &sh->state));
}
if (rcw > disks && rmw > disks &&
@@ -4674,8 +4664,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
/* Now to look around and see what can be done */
for (i=disks; i--; ) {
struct md_rdev *rdev;
- sector_t first_bad;
- int bad_sectors;
int is_bad = 0;
dev = &sh->dev[i];
@@ -4719,8 +4707,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
rdev = conf->disks[i].replacement;
if (rdev && !test_bit(Faulty, &rdev->flags) &&
rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) &&
- !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
- &first_bad, &bad_sectors))
+ !rdev_has_badblock(rdev, sh->sector,
+ RAID5_STRIPE_SECTORS(conf)))
set_bit(R5_ReadRepl, &dev->flags);
else {
if (rdev && !test_bit(Faulty, &rdev->flags))
@@ -4733,8 +4721,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (rdev && test_bit(Faulty, &rdev->flags))
rdev = NULL;
if (rdev) {
- is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
- &first_bad, &bad_sectors);
+ is_bad = rdev_has_badblock(rdev, sh->sector,
+ RAID5_STRIPE_SECTORS(conf));
if (s->blocked_rdev == NULL
&& (test_bit(Blocked, &rdev->flags)
|| is_bad < 0)) {
@@ -5463,8 +5451,8 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
struct r5conf *conf = mddev->private;
struct bio *align_bio;
struct md_rdev *rdev;
- sector_t sector, end_sector, first_bad;
- int bad_sectors, dd_idx;
+ sector_t sector, end_sector;
+ int dd_idx;
bool did_inc;
if (!in_chunk_boundary(mddev, raid_bio)) {
@@ -5493,8 +5481,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
atomic_inc(&rdev->nr_pending);
- if (is_badblock(rdev, sector, bio_sectors(raid_bio), &first_bad,
- &bad_sectors)) {
+ if (rdev_has_badblock(rdev, sector, bio_sectors(raid_bio))) {
rdev_dec_pending(rdev, mddev);
return 0;
}
@@ -5530,9 +5517,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
spin_unlock_irq(&conf->device_lock);
}
- if (mddev->gendisk)
- trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk),
- raid_bio->bi_iter.bi_sector);
+ mddev_trace_remap(mddev, align_bio, raid_bio->bi_iter.bi_sector);
submit_bio_noacct(align_bio);
return 1;
}
@@ -5701,8 +5686,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
}
release_inactive_stripe_list(conf, cb->temp_inactive_list,
NR_STRIPE_HASH_LOCKS);
- if (mddev->queue)
- trace_block_unplug(mddev->queue, cnt, !from_schedule);
+ if (!mddev_is_dm(mddev))
+ trace_block_unplug(mddev->gendisk->queue, cnt, !from_schedule);
kfree(cb);
}
@@ -5946,7 +5931,8 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
if (ahead_of_reshape(mddev, logical_sector,
conf->reshape_safe)) {
spin_unlock_irq(&conf->device_lock);
- return STRIPE_SCHEDULE_AND_RETRY;
+ ret = STRIPE_SCHEDULE_AND_RETRY;
+ goto out;
}
}
spin_unlock_irq(&conf->device_lock);
@@ -6025,6 +6011,12 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
out_release:
raid5_release_stripe(sh);
+out:
+ if (ret == STRIPE_SCHEDULE_AND_RETRY && reshape_interrupted(mddev)) {
+ bi->bi_status = BLK_STS_RESOURCE;
+ ret = STRIPE_WAIT_RESHAPE;
+ pr_err_ratelimited("dm-raid456: io across reshape position while reshape can't make progress");
+ }
return ret;
}
@@ -6146,7 +6138,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
while (1) {
res = make_stripe_request(mddev, conf, &ctx, logical_sector,
bi);
- if (res == STRIPE_FAIL)
+ if (res == STRIPE_FAIL || res == STRIPE_WAIT_RESHAPE)
break;
if (res == STRIPE_RETRY)
@@ -6184,6 +6176,11 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
if (rw == WRITE)
md_write_end(mddev);
+ if (res == STRIPE_WAIT_RESHAPE) {
+ md_free_cloned_bio(bi);
+ return false;
+ }
+
bio_endio(bi);
return true;
}
@@ -6773,7 +6770,18 @@ static void raid5d(struct md_thread *thread)
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
+
+ /*
+ * Waiting on MD_SB_CHANGE_PENDING below may deadlock
+ * seeing md_check_recovery() is needed to clear
+ * the flag when using mdmon.
+ */
+ continue;
}
+
+ wait_event_lock_irq(mddev->sb_wait,
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
+ conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);
@@ -6820,7 +6828,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
if (size <= 16 || size > 32768)
return -EINVAL;
- conf->min_nr_stripes = size;
+ WRITE_ONCE(conf->min_nr_stripes, size);
mutex_lock(&conf->cache_size_mutex);
while (size < conf->max_nr_stripes &&
drop_one_stripe(conf))
@@ -6832,7 +6840,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes)
if (!grow_one_stripe(conf, GFP_KERNEL)) {
- conf->min_nr_stripes = conf->max_nr_stripes;
+ WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes);
result = -ENOMEM;
break;
}
@@ -6967,10 +6975,8 @@ raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len)
pr_debug("md/raid: change stripe_size from %lu to %lu\n",
conf->stripe_size, new);
- if (mddev->sync_thread ||
- test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- mddev->reshape_position != MaxSector ||
- mddev->sysfs_active) {
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ mddev->reshape_position != MaxSector || mddev->sysfs_active) {
err = -EBUSY;
goto out_unlock;
}
@@ -7084,7 +7090,7 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
if (!conf)
err = -ENODEV;
else if (new != conf->skip_copy) {
- struct request_queue *q = mddev->queue;
+ struct request_queue *q = mddev->gendisk->queue;
conf->skip_copy = new;
if (new)
@@ -7390,11 +7396,13 @@ static unsigned long raid5_cache_count(struct shrinker *shrink,
struct shrink_control *sc)
{
struct r5conf *conf = shrink->private_data;
+ int max_stripes = READ_ONCE(conf->max_nr_stripes);
+ int min_stripes = READ_ONCE(conf->min_nr_stripes);
- if (conf->max_nr_stripes < conf->min_nr_stripes)
+ if (max_stripes < min_stripes)
/* unlikely, but not impossible */
return 0;
- return conf->max_nr_stripes - conf->min_nr_stripes;
+ return max_stripes - min_stripes;
}
static struct r5conf *setup_conf(struct mddev *mddev)
@@ -7684,10 +7692,65 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
return 0;
}
-static void raid5_set_io_opt(struct r5conf *conf)
+static int raid5_set_limits(struct mddev *mddev)
{
- blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) *
- (conf->raid_disks - conf->max_degraded));
+ struct r5conf *conf = mddev->private;
+ struct queue_limits lim;
+ int data_disks, stripe;
+ struct md_rdev *rdev;
+
+ /*
+ * The read-ahead size must cover two whole stripes, which is
+ * 2 * (datadisks) * chunksize where 'n' is the number of raid devices.
+ */
+ data_disks = conf->previous_raid_disks - conf->max_degraded;
+
+ /*
+ * We can only discard a whole stripe. It doesn't make sense to
+ * discard data disk but write parity disk
+ */
+ stripe = roundup_pow_of_two(data_disks * (mddev->chunk_sectors << 9));
+
+ blk_set_stacking_limits(&lim);
+ lim.io_min = mddev->chunk_sectors << 9;
+ lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded);
+ lim.raid_partial_stripes_expensive = 1;
+ lim.discard_granularity = stripe;
+ lim.max_write_zeroes_sectors = 0;
+ mddev_stack_rdev_limits(mddev, &lim);
+ rdev_for_each(rdev, mddev)
+ queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset,
+ mddev->gendisk->disk_name);
+
+ /*
+ * Zeroing is required for discard, otherwise data could be lost.
+ *
+ * Consider a scenario: discard a stripe (the stripe could be
+ * inconsistent if discard_zeroes_data is 0); write one disk of the
+ * stripe (the stripe could be inconsistent again depending on which
+ * disks are used to calculate parity); the disk is broken; The stripe
+ * data of this disk is lost.
+ *
+ * We only allow DISCARD if the sysadmin has confirmed that only safe
+ * devices are in use by setting a module parameter. A better idea
+ * might be to turn DISCARD into WRITE_ZEROES requests, as that is
+ * required to be safe.
+ */
+ if (!devices_handle_discard_safely ||
+ lim.max_discard_sectors < (stripe >> 9) ||
+ lim.discard_granularity < stripe)
+ lim.max_hw_discard_sectors = 0;
+
+ /*
+ * Requests require having a bitmap for each stripe.
+ * Limit the max sectors based on this.
+ */
+ lim.max_hw_sectors = RAID5_MAX_REQ_STRIPES << RAID5_STRIPE_SHIFT(conf);
+
+ /* No restrictions on the number of segments in the request */
+ lim.max_segments = USHRT_MAX;
+
+ return queue_limits_set(mddev->gendisk->queue, &lim);
}
static int raid5_run(struct mddev *mddev)
@@ -7700,6 +7763,7 @@ static int raid5_run(struct mddev *mddev)
int i;
long long min_offset_diff = 0;
int first = 1;
+ int ret = -EIO;
if (mddev->recovery_cp != MaxSector)
pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
@@ -7936,11 +8000,7 @@ static int raid5_run(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread)
- goto abort;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
/* Ok, everything is just fine now */
@@ -7952,66 +8012,10 @@ static int raid5_run(struct mddev *mddev)
mdname(mddev));
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
- if (mddev->queue) {
- int chunk_size;
- /* read-ahead size must cover two whole stripes, which
- * is 2 * (datadisks) * chunksize where 'n' is the
- * number of raid devices
- */
- int data_disks = conf->previous_raid_disks - conf->max_degraded;
- int stripe = data_disks *
- ((mddev->chunk_sectors << 9) / PAGE_SIZE);
-
- chunk_size = mddev->chunk_sectors << 9;
- blk_queue_io_min(mddev->queue, chunk_size);
- raid5_set_io_opt(conf);
- mddev->queue->limits.raid_partial_stripes_expensive = 1;
- /*
- * We can only discard a whole stripe. It doesn't make sense to
- * discard data disk but write parity disk
- */
- stripe = stripe * PAGE_SIZE;
- stripe = roundup_pow_of_two(stripe);
- mddev->queue->limits.discard_granularity = stripe;
-
- blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
-
- rdev_for_each(rdev, mddev) {
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->new_data_offset << 9);
- }
-
- /*
- * zeroing is required, otherwise data
- * could be lost. Consider a scenario: discard a stripe
- * (the stripe could be inconsistent if
- * discard_zeroes_data is 0); write one disk of the
- * stripe (the stripe could be inconsistent again
- * depending on which disks are used to calculate
- * parity); the disk is broken; The stripe data of this
- * disk is lost.
- *
- * We only allow DISCARD if the sysadmin has confirmed that
- * only safe devices are in use by setting a module parameter.
- * A better idea might be to turn DISCARD into WRITE_ZEROES
- * requests, as that is required to be safe.
- */
- if (!devices_handle_discard_safely ||
- mddev->queue->limits.max_discard_sectors < (stripe >> 9) ||
- mddev->queue->limits.discard_granularity < stripe)
- blk_queue_max_discard_sectors(mddev->queue, 0);
-
- /*
- * Requests require having a bitmap for each stripe.
- * Limit the max sectors based on this.
- */
- blk_queue_max_hw_sectors(mddev->queue,
- RAID5_MAX_REQ_STRIPES << RAID5_STRIPE_SHIFT(conf));
-
- /* No restrictions on the number of segments in the request */
- blk_queue_max_segments(mddev->queue, USHRT_MAX);
+ if (!mddev_is_dm(mddev)) {
+ ret = raid5_set_limits(mddev);
+ if (ret)
+ goto abort;
}
if (log_init(conf, journal_dev, raid5_has_ppl(conf)))
@@ -8024,7 +8028,7 @@ abort:
free_conf(conf);
mddev->private = NULL;
pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
- return -EIO;
+ return ret;
}
static void raid5_free(struct mddev *mddev, void *priv)
@@ -8506,29 +8510,8 @@ static int raid5_start_reshape(struct mddev *mddev)
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread) {
- mddev->recovery = 0;
- spin_lock_irq(&conf->device_lock);
- write_seqcount_begin(&conf->gen_lock);
- mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
- mddev->new_chunk_sectors =
- conf->chunk_sectors = conf->prev_chunk_sectors;
- mddev->new_layout = conf->algorithm = conf->prev_algo;
- rdev_for_each(rdev, mddev)
- rdev->new_data_offset = rdev->data_offset;
- smp_wmb();
- conf->generation --;
- conf->reshape_progress = MaxSector;
- mddev->reshape_position = MaxSector;
- write_seqcount_end(&conf->gen_lock);
- spin_unlock_irq(&conf->device_lock);
- return -EAGAIN;
- }
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
conf->reshape_checkpoint = jiffies;
- md_wakeup_thread(mddev->sync_thread);
md_new_event();
return 0;
}
@@ -8556,8 +8539,8 @@ static void end_reshape(struct r5conf *conf)
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
- if (conf->mddev->queue)
- raid5_set_io_opt(conf);
+ mddev_update_io_opt(conf->mddev,
+ conf->raid_disks - conf->max_degraded);
}
}
@@ -8934,6 +8917,18 @@ static int raid5_start(struct mddev *mddev)
return r5l_start(conf->log);
}
+/*
+ * This is only used for dm-raid456, caller already frozen sync_thread, hence
+ * if rehsape is still in progress, io that is waiting for reshape can never be
+ * done now, hence wake up and handle those IO.
+ */
+static void raid5_prepare_suspend(struct mddev *mddev)
+{
+ struct r5conf *conf = mddev->private;
+
+ wake_up(&conf->wait_for_overlap);
+}
+
static struct md_personality raid6_personality =
{
.name = "raid6",
@@ -8957,6 +8952,7 @@ static struct md_personality raid6_personality =
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
+ .prepare_suspend = raid5_prepare_suspend,
};
static struct md_personality raid5_personality =
{
@@ -8981,6 +8977,7 @@ static struct md_personality raid5_personality =
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
+ .prepare_suspend = raid5_prepare_suspend,
};
static struct md_personality raid4_personality =
@@ -9006,6 +9003,7 @@ static struct md_personality raid4_personality =
.quiesce = raid5_quiesce,
.takeover = raid4_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
+ .prepare_suspend = raid5_prepare_suspend,
};
static int __init raid5_init(void)
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index e21287d50c15..e1ae0f9fad43 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -674,7 +674,7 @@ static int ccs_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_status = pm_runtime_get_if_active(&client->dev, true);
+ pm_status = pm_runtime_get_if_active(&client->dev);
if (!pm_status)
return 0;
diff --git a/drivers/media/i2c/ov64a40.c b/drivers/media/i2c/ov64a40.c
index 4fba4c2cb064..541bf74581d2 100644
--- a/drivers/media/i2c/ov64a40.c
+++ b/drivers/media/i2c/ov64a40.c
@@ -3287,7 +3287,7 @@ static int ov64a40_set_ctrl(struct v4l2_ctrl *ctrl)
exp_max, 1, exp_val);
}
- pm_status = pm_runtime_get_if_active(ov64a40->dev, true);
+ pm_status = pm_runtime_get_if_active(ov64a40->dev);
if (!pm_status)
return 0;
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 2785935da497..558152575d10 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -2091,9 +2091,6 @@ static int tc358743_probe(struct i2c_client *client)
state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
sd->dev = &client->dev;
- err = v4l2_async_register_subdev(sd);
- if (err < 0)
- goto err_hdl;
mutex_init(&state->confctl_mutex);
@@ -2151,6 +2148,10 @@ static int tc358743_probe(struct i2c_client *client)
if (err)
goto err_work_queues;
+ err = v4l2_async_register_subdev(sd);
+ if (err < 0)
+ goto err_work_queues;
+
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
diff --git a/drivers/media/i2c/thp7312.c b/drivers/media/i2c/thp7312.c
index 2806887514dc..19bd923a7315 100644
--- a/drivers/media/i2c/thp7312.c
+++ b/drivers/media/i2c/thp7312.c
@@ -1052,7 +1052,7 @@ static int thp7312_s_ctrl(struct v4l2_ctrl *ctrl)
if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
return -EINVAL;
- if (!pm_runtime_get_if_active(thp7312->dev, true))
+ if (!pm_runtime_get_if_active(thp7312->dev))
return 0;
switch (ctrl->id) {
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
index 5bfb8a06202e..9bcf10a77fd3 100644
--- a/drivers/media/pci/mgb4/mgb4_core.c
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -144,7 +144,7 @@ static int match_spi_adap(struct device *dev, void *data)
return to_spi_device(dev) ? 1 : 0;
}
-static struct spi_master *get_spi_adap(struct platform_device *pdev)
+static struct spi_controller *get_spi_adap(struct platform_device *pdev)
{
struct device *dev;
@@ -152,7 +152,7 @@ static struct spi_master *get_spi_adap(struct platform_device *pdev)
dev = device_find_child(&pdev->dev, NULL, match_spi_adap);
mutex_unlock(&pdev->dev.mutex);
- return dev ? container_of(dev, struct spi_master, dev) : NULL;
+ return dev ? container_of(dev, struct spi_controller, dev) : NULL;
}
static int init_spi(struct mgb4_dev *mgbdev, u32 devid)
@@ -179,7 +179,7 @@ static int init_spi(struct mgb4_dev *mgbdev, u32 devid)
};
struct pci_dev *pdev = mgbdev->pdev;
struct device *dev = &pdev->dev;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct spi_device *spi_dev;
u32 irq;
int rv, id;
@@ -207,8 +207,8 @@ static int init_spi(struct mgb4_dev *mgbdev, u32 devid)
return PTR_ERR(mgbdev->spi_pdev);
}
- master = get_spi_adap(mgbdev->spi_pdev);
- if (!master) {
+ ctlr = get_spi_adap(mgbdev->spi_pdev);
+ if (!ctlr) {
dev_err(dev, "failed to get SPI adapter\n");
rv = -EINVAL;
goto err_pdev;
@@ -242,8 +242,8 @@ static int init_spi(struct mgb4_dev *mgbdev, u32 devid)
spi_info.platform_data = &mgbdev->flash_data;
- spi_dev = spi_new_device(master, &spi_info);
- put_device(&master->dev);
+ spi_dev = spi_new_device(ctlr, &spi_info);
+ put_device(&ctlr->dev);
if (!spi_dev) {
dev_err(dev, "failed to create MTD device\n");
rv = -EINVAL;
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
index 526042d8afae..e90aa1c1584c 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
@@ -35,7 +35,7 @@ struct netup_spi_regs {
struct netup_spi {
struct device *dev;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct netup_spi_regs __iomem *regs;
u8 __iomem *mmio;
spinlock_t lock;
@@ -78,7 +78,7 @@ irqreturn_t netup_spi_interrupt(struct netup_spi *spi)
reg = readw(&spi->regs->control_stat);
if (!(reg & NETUP_SPI_CTRL_IRQ)) {
spin_unlock_irqrestore(&spi->lock, flags);
- dev_dbg(&spi->master->dev,
+ dev_dbg(&spi->ctlr->dev,
"%s(): not mine interrupt\n", __func__);
return IRQ_NONE;
}
@@ -88,15 +88,15 @@ irqreturn_t netup_spi_interrupt(struct netup_spi *spi)
spi->state = SPI_STATE_DONE;
wake_up(&spi->waitq);
spin_unlock_irqrestore(&spi->lock, flags);
- dev_dbg(&spi->master->dev,
+ dev_dbg(&spi->ctlr->dev,
"%s(): SPI interrupt handled\n", __func__);
return IRQ_HANDLED;
}
-static int netup_spi_transfer(struct spi_master *master,
+static int netup_spi_transfer(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct netup_spi *spi = spi_master_get_devdata(master);
+ struct netup_spi *spi = spi_controller_get_devdata(ctlr);
struct spi_transfer *t;
int result = 0;
u32 tr_size;
@@ -131,7 +131,7 @@ static int netup_spi_transfer(struct spi_master *master,
NETUP_SPI_CTRL_START |
(frag_last ? NETUP_SPI_CTRL_LAST_CS : 0),
&spi->regs->control_stat);
- dev_dbg(&spi->master->dev,
+ dev_dbg(&spi->ctlr->dev,
"%s(): control_stat 0x%04x\n",
__func__, readw(&spi->regs->control_stat));
wait_event_timeout(spi->waitq,
@@ -144,11 +144,11 @@ static int netup_spi_transfer(struct spi_master *master,
}
} else {
if (spi->state == SPI_STATE_START) {
- dev_dbg(&spi->master->dev,
+ dev_dbg(&spi->ctlr->dev,
"%s(): transfer timeout\n",
__func__);
} else {
- dev_dbg(&spi->master->dev,
+ dev_dbg(&spi->ctlr->dev,
"%s(): invalid state %d\n",
__func__, spi->state);
}
@@ -161,7 +161,7 @@ static int netup_spi_transfer(struct spi_master *master,
}
done:
msg->status = result;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(ctlr);
return result;
}
@@ -172,30 +172,30 @@ static int netup_spi_setup(struct spi_device *spi)
int netup_spi_init(struct netup_unidvb_dev *ndev)
{
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct netup_spi *nspi;
- master = devm_spi_alloc_master(&ndev->pci_dev->dev,
- sizeof(struct netup_spi));
- if (!master) {
+ ctlr = devm_spi_alloc_master(&ndev->pci_dev->dev,
+ sizeof(struct netup_spi));
+ if (!ctlr) {
dev_err(&ndev->pci_dev->dev,
"%s(): unable to alloc SPI master\n", __func__);
return -EINVAL;
}
- nspi = spi_master_get_devdata(master);
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
- master->bus_num = -1;
- master->num_chipselect = 1;
- master->transfer_one_message = netup_spi_transfer;
- master->setup = netup_spi_setup;
+ nspi = spi_controller_get_devdata(ctlr);
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = 1;
+ ctlr->transfer_one_message = netup_spi_transfer;
+ ctlr->setup = netup_spi_setup;
spin_lock_init(&nspi->lock);
init_waitqueue_head(&nspi->waitq);
- nspi->master = master;
+ nspi->ctlr = ctlr;
nspi->regs = (struct netup_spi_regs __iomem *)(ndev->bmmio0 + 0x4000);
writew(2, &nspi->regs->clock_divider);
writew(NETUP_UNIDVB_IRQ_SPI, ndev->bmmio0 + REG_IMASK_SET);
ndev->spi = nspi;
- if (spi_register_master(master)) {
+ if (spi_register_controller(ctlr)) {
ndev->spi = NULL;
dev_err(&ndev->pci_dev->dev,
"%s(): unable to register SPI bus\n", __func__);
@@ -207,8 +207,8 @@ int netup_spi_init(struct netup_unidvb_dev *ndev)
ndev->pci_bus,
ndev->pci_slot,
ndev->pci_func);
- if (!spi_new_device(master, &netup_spi_board)) {
- spi_unregister_master(master);
+ if (!spi_new_device(ctlr, &netup_spi_board)) {
+ spi_unregister_controller(ctlr);
ndev->spi = NULL;
dev_err(&ndev->pci_dev->dev,
"%s(): unable to create SPI device\n", __func__);
@@ -227,7 +227,7 @@ void netup_spi_release(struct netup_unidvb_dev *ndev)
if (!spi)
return;
- spi_unregister_master(spi->master);
+ spi_unregister_controller(spi->ctlr);
spin_lock_irqsave(&spi->lock, flags);
reg = readw(&spi->regs->control_stat);
writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat);
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index a712dd4f02a5..ce206b709754 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mem2mem.h>
@@ -114,7 +115,8 @@ static void venus_sys_error_handler(struct work_struct *work)
pm_runtime_put_sync(core->dev);
for (i = 0; i < max_attempts; i++) {
- if (!core->pmdomains[0] || !pm_runtime_active(core->pmdomains[0]))
+ if (!core->pmdomains ||
+ !pm_runtime_active(core->pmdomains->pd_devs[0]))
break;
usleep_range(1000, 1500);
}
@@ -705,7 +707,7 @@ static const struct venus_resources sdm845_res_v2 = {
.vcodec0_clks = { "vcodec0_core", "vcodec0_bus" },
.vcodec1_clks = { "vcodec1_core", "vcodec1_bus" },
.vcodec_clks_num = 2,
- .vcodec_pmdomains = { "venus", "vcodec0", "vcodec1" },
+ .vcodec_pmdomains = (const char *[]) { "venus", "vcodec0", "vcodec1" },
.vcodec_pmdomains_num = 3,
.opp_pmdomain = (const char *[]) { "cx", NULL },
.vcodec_num = 2,
@@ -754,7 +756,7 @@ static const struct venus_resources sc7180_res = {
.clks_num = 3,
.vcodec0_clks = { "vcodec0_core", "vcodec0_bus" },
.vcodec_clks_num = 2,
- .vcodec_pmdomains = { "venus", "vcodec0" },
+ .vcodec_pmdomains = (const char *[]) { "venus", "vcodec0" },
.vcodec_pmdomains_num = 2,
.opp_pmdomain = (const char *[]) { "cx", NULL },
.vcodec_num = 1,
@@ -811,7 +813,7 @@ static const struct venus_resources sm8250_res = {
.resets_num = 2,
.vcodec0_clks = { "vcodec0_core" },
.vcodec_clks_num = 1,
- .vcodec_pmdomains = { "venus", "vcodec0" },
+ .vcodec_pmdomains = (const char *[]) { "venus", "vcodec0" },
.vcodec_pmdomains_num = 2,
.opp_pmdomain = (const char *[]) { "mx", NULL },
.vcodec_num = 1,
@@ -870,7 +872,7 @@ static const struct venus_resources sc7280_res = {
.clks_num = 3,
.vcodec0_clks = {"vcodec_core", "vcodec_bus"},
.vcodec_clks_num = 2,
- .vcodec_pmdomains = { "venus", "vcodec0" },
+ .vcodec_pmdomains = (const char *[]) { "venus", "vcodec0" },
.vcodec_pmdomains_num = 2,
.opp_pmdomain = (const char *[]) { "cx", NULL },
.vcodec_num = 1,
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 4a633261ece4..7ef341bf21cc 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -25,7 +25,6 @@
#define VIDC_CLKS_NUM_MAX 4
#define VIDC_VCODEC_CLKS_NUM_MAX 2
-#define VIDC_PMDOMAINS_NUM_MAX 3
#define VIDC_RESETS_NUM_MAX 2
extern int venus_fw_debug;
@@ -72,7 +71,7 @@ struct venus_resources {
const char * const vcodec0_clks[VIDC_VCODEC_CLKS_NUM_MAX];
const char * const vcodec1_clks[VIDC_VCODEC_CLKS_NUM_MAX];
unsigned int vcodec_clks_num;
- const char * const vcodec_pmdomains[VIDC_PMDOMAINS_NUM_MAX];
+ const char **vcodec_pmdomains;
unsigned int vcodec_pmdomains_num;
const char **opp_pmdomain;
unsigned int vcodec_num;
@@ -134,7 +133,7 @@ struct venus_format {
* @video_path: an interconnect handle to video to/from memory path
* @cpucfg_path: an interconnect handle to cpu configuration path
* @has_opp_table: does OPP table exist
- * @pmdomains: an array of pmdomains struct device pointers
+ * @pmdomains: a pointer to a list of pmdomains
* @opp_dl_venus: an device-link for device OPP
* @opp_pmdomain: an OPP power-domain
* @resets: an array of reset signals
@@ -187,7 +186,7 @@ struct venus_core {
struct icc_path *video_path;
struct icc_path *cpucfg_path;
bool has_opp_table;
- struct device *pmdomains[VIDC_PMDOMAINS_NUM_MAX];
+ struct dev_pm_domain_list *pmdomains;
struct device_link *opp_dl_venus;
struct device *opp_pmdomain;
struct reset_control *resets[VIDC_RESETS_NUM_MAX];
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index a1b127caa90a..502822059498 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -455,7 +455,7 @@ static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask)
if (ret)
return ret;
- ret = pm_runtime_put_sync(core->pmdomains[1]);
+ ret = pm_runtime_put_sync(core->pmdomains->pd_devs[1]);
if (ret < 0)
return ret;
}
@@ -471,7 +471,7 @@ static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask)
if (ret)
return ret;
- ret = pm_runtime_put_sync(core->pmdomains[2]);
+ ret = pm_runtime_put_sync(core->pmdomains->pd_devs[2]);
if (ret < 0)
return ret;
}
@@ -484,7 +484,7 @@ static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask)
int ret;
if (coreid_mask & VIDC_CORE_ID_1) {
- ret = pm_runtime_get_sync(core->pmdomains[1]);
+ ret = pm_runtime_get_sync(core->pmdomains->pd_devs[1]);
if (ret < 0)
return ret;
@@ -502,7 +502,7 @@ static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask)
}
if (coreid_mask & VIDC_CORE_ID_2) {
- ret = pm_runtime_get_sync(core->pmdomains[2]);
+ ret = pm_runtime_get_sync(core->pmdomains->pd_devs[2]);
if (ret < 0)
return ret;
@@ -860,19 +860,18 @@ static int vcodec_domains_get(struct venus_core *core)
struct device **opp_virt_dev;
struct device *dev = core->dev;
const struct venus_resources *res = core->res;
- struct device *pd;
- unsigned int i;
+ struct dev_pm_domain_attach_data vcodec_data = {
+ .pd_names = res->vcodec_pmdomains,
+ .num_pd_names = res->vcodec_pmdomains_num,
+ .pd_flags = PD_FLAG_NO_DEV_LINK,
+ };
if (!res->vcodec_pmdomains_num)
goto skip_pmdomains;
- for (i = 0; i < res->vcodec_pmdomains_num; i++) {
- pd = dev_pm_domain_attach_by_name(dev,
- res->vcodec_pmdomains[i]);
- if (IS_ERR_OR_NULL(pd))
- return pd ? PTR_ERR(pd) : -ENODATA;
- core->pmdomains[i] = pd;
- }
+ ret = dev_pm_domain_attach_list(dev, &vcodec_data, &core->pmdomains);
+ if (ret < 0)
+ return ret;
skip_pmdomains:
if (!core->res->opp_pmdomain)
@@ -896,30 +895,14 @@ skip_pmdomains:
return 0;
opp_attach_err:
- for (i = 0; i < res->vcodec_pmdomains_num; i++) {
- if (IS_ERR_OR_NULL(core->pmdomains[i]))
- continue;
- dev_pm_domain_detach(core->pmdomains[i], true);
- }
-
+ dev_pm_domain_detach_list(core->pmdomains);
return ret;
}
static void vcodec_domains_put(struct venus_core *core)
{
- const struct venus_resources *res = core->res;
- unsigned int i;
+ dev_pm_domain_detach_list(core->pmdomains);
- if (!res->vcodec_pmdomains_num)
- goto skip_pmdomains;
-
- for (i = 0; i < res->vcodec_pmdomains_num; i++) {
- if (IS_ERR_OR_NULL(core->pmdomains[i]))
- continue;
- dev_pm_domain_detach(core->pmdomains[i], true);
- }
-
-skip_pmdomains:
if (!core->has_opp_table)
return;
@@ -1035,7 +1018,8 @@ static void core_put_v4(struct venus_core *core)
static int core_power_v4(struct venus_core *core, int on)
{
struct device *dev = core->dev;
- struct device *pmctrl = core->pmdomains[0];
+ struct device *pmctrl = core->pmdomains ?
+ core->pmdomains->pd_devs[0] : NULL;
int ret = 0;
if (on == POWER_ON) {
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 52d82cbe7685..2f7564f26445 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -110,7 +110,7 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
case BPF_FUNC_trace_printk:
- if (perfmon_capable())
+ if (bpf_token_capable(prog->aux->token, CAP_PERFMON))
return bpf_get_trace_printk_proto();
fallthrough;
default:
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index 9759996ee6a4..5138486abfa0 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -107,7 +107,7 @@ struct msi2500_dev {
struct video_device vdev;
struct v4l2_device v4l2_dev;
struct v4l2_subdev *v4l2_subdev;
- struct spi_master *master;
+ struct spi_controller *ctlr;
/* videobuf2 queue and queued buffers list */
struct vb2_queue vb_queue;
@@ -574,7 +574,7 @@ static void msi2500_disconnect(struct usb_interface *intf)
dev->udev = NULL;
v4l2_device_disconnect(&dev->v4l2_dev);
video_unregister_device(&dev->vdev);
- spi_unregister_master(dev->master);
+ spi_unregister_controller(dev->ctlr);
mutex_unlock(&dev->v4l2_lock);
mutex_unlock(&dev->vb_queue_lock);
@@ -1136,10 +1136,10 @@ static void msi2500_video_release(struct v4l2_device *v)
kfree(dev);
}
-static int msi2500_transfer_one_message(struct spi_master *master,
+static int msi2500_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *m)
{
- struct msi2500_dev *dev = spi_master_get_devdata(master);
+ struct msi2500_dev *dev = spi_controller_get_devdata(ctlr);
struct spi_transfer *t;
int ret = 0;
u32 data;
@@ -1154,7 +1154,7 @@ static int msi2500_transfer_one_message(struct spi_master *master,
}
m->status = ret;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(ctlr);
return ret;
}
@@ -1163,7 +1163,7 @@ static int msi2500_probe(struct usb_interface *intf,
{
struct msi2500_dev *dev;
struct v4l2_subdev *sd;
- struct spi_master *master;
+ struct spi_controller *ctlr;
int ret;
static struct spi_board_info board_info = {
.modalias = "msi001",
@@ -1220,30 +1220,30 @@ static int msi2500_probe(struct usb_interface *intf,
}
/* SPI master adapter */
- master = spi_alloc_master(dev->dev, 0);
- if (master == NULL) {
+ ctlr = spi_alloc_master(dev->dev, 0);
+ if (ctlr == NULL) {
ret = -ENOMEM;
goto err_unregister_v4l2_dev;
}
- dev->master = master;
- master->bus_num = -1;
- master->num_chipselect = 1;
- master->transfer_one_message = msi2500_transfer_one_message;
- spi_master_set_devdata(master, dev);
- ret = spi_register_master(master);
+ dev->ctlr = ctlr;
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = 1;
+ ctlr->transfer_one_message = msi2500_transfer_one_message;
+ spi_controller_set_devdata(ctlr, dev);
+ ret = spi_register_controller(ctlr);
if (ret) {
- spi_master_put(master);
+ spi_controller_put(ctlr);
goto err_unregister_v4l2_dev;
}
/* load v4l2 subdevice */
- sd = v4l2_spi_new_subdev(&dev->v4l2_dev, master, &board_info);
+ sd = v4l2_spi_new_subdev(&dev->v4l2_dev, ctlr, &board_info);
dev->v4l2_subdev = sd;
if (sd == NULL) {
dev_err(dev->dev, "cannot get v4l2 subdevice\n");
ret = -ENODEV;
- goto err_unregister_master;
+ goto err_unregister_controller;
}
/* Register controls */
@@ -1276,8 +1276,8 @@ static int msi2500_probe(struct usb_interface *intf,
return 0;
err_free_controls:
v4l2_ctrl_handler_free(&dev->hdl);
-err_unregister_master:
- spi_unregister_master(dev->master);
+err_unregister_controller:
+ spi_unregister_controller(dev->ctlr);
err_unregister_v4l2_dev:
v4l2_device_unregister(&dev->v4l2_dev);
err_free_mem:
diff --git a/drivers/media/v4l2-core/v4l2-spi.c b/drivers/media/v4l2-core/v4l2-spi.c
index eadecdff7349..a7092c3930d6 100644
--- a/drivers/media/v4l2-core/v4l2-spi.c
+++ b/drivers/media/v4l2-core/v4l2-spi.c
@@ -34,7 +34,7 @@ void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init);
struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
- struct spi_master *master,
+ struct spi_controller *ctlr,
struct spi_board_info *info)
{
struct v4l2_subdev *sd = NULL;
@@ -45,7 +45,7 @@ struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
if (info->modalias[0])
request_module(info->modalias);
- spi = spi_new_device(master, info);
+ spi = spi_new_device(ctlr, info);
if (!spi || !spi->dev.driver)
goto error;
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 434982545be6..8c5ad5c025fa 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -72,7 +72,6 @@ static DEFINE_SPINLOCK(emif_lock);
static unsigned long irq_state;
static LIST_HEAD(device_list);
-#ifdef CONFIG_DEBUG_FS
static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif,
struct emif_regs *regs)
{
@@ -140,31 +139,24 @@ static int emif_mr4_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(emif_mr4);
-static int __init_or_module emif_debugfs_init(struct emif_data *emif)
+static void emif_debugfs_init(struct emif_data *emif)
{
- emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
- debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
- &emif_regdump_fops);
- debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
- &emif_mr4_fops);
- return 0;
-}
-
-static void __exit emif_debugfs_exit(struct emif_data *emif)
-{
- debugfs_remove_recursive(emif->debugfs_root);
- emif->debugfs_root = NULL;
-}
-#else
-static inline int __init_or_module emif_debugfs_init(struct emif_data *emif)
-{
- return 0;
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
+ debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
+ &emif_regdump_fops);
+ debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
+ &emif_mr4_fops);
+ }
}
-static inline void __exit emif_debugfs_exit(struct emif_data *emif)
+static void emif_debugfs_exit(struct emif_data *emif)
{
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ debugfs_remove_recursive(emif->debugfs_root);
+ emif->debugfs_root = NULL;
+ }
}
-#endif
/*
* Get bus width used by EMIF. Note that this may be different from the
@@ -679,7 +671,7 @@ static void disable_and_clear_all_interrupts(struct emif_data *emif)
clear_all_interrupts(emif);
}
-static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq)
+static int setup_interrupts(struct emif_data *emif, u32 irq)
{
u32 interrupts, type;
void __iomem *base = emif->base;
@@ -710,7 +702,7 @@ static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq)
}
-static void __init_or_module emif_onetime_settings(struct emif_data *emif)
+static void emif_onetime_settings(struct emif_data *emif)
{
u32 pwr_mgmt_ctrl, zq, temp_alert_cfg;
void __iomem *base = emif->base;
@@ -834,8 +826,7 @@ static int is_custom_config_valid(struct emif_custom_configs *cust_cfgs,
return valid;
}
-#if defined(CONFIG_OF)
-static void __init_or_module of_get_custom_configs(struct device_node *np_emif,
+static void of_get_custom_configs(struct device_node *np_emif,
struct emif_data *emif)
{
struct emif_custom_configs *cust_cfgs = NULL;
@@ -884,7 +875,7 @@ static void __init_or_module of_get_custom_configs(struct device_node *np_emif,
emif->plat_data->custom_configs = cust_cfgs;
}
-static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
+static void of_get_ddr_info(struct device_node *np_emif,
struct device_node *np_ddr,
struct ddr_device_info *dev_info)
{
@@ -918,7 +909,7 @@ static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
dev_info->io_width = __fls(io_width) - 1;
}
-static struct emif_data * __init_or_module of_get_memory_device_details(
+static struct emif_data *of_get_memory_device_details(
struct device_node *np_emif, struct device *dev)
{
struct emif_data *emif = NULL;
@@ -991,16 +982,7 @@ out:
return emif;
}
-#else
-
-static struct emif_data * __init_or_module of_get_memory_device_details(
- struct device_node *np_emif, struct device *dev)
-{
- return NULL;
-}
-#endif
-
-static struct emif_data *__init_or_module get_device_details(
+static struct emif_data *get_device_details(
struct platform_device *pdev)
{
u32 size;
@@ -1104,7 +1086,7 @@ error:
return NULL;
}
-static int __init_or_module emif_probe(struct platform_device *pdev)
+static int emif_probe(struct platform_device *pdev)
{
struct emif_data *emif;
int irq, ret;
@@ -1159,7 +1141,7 @@ error:
return -ENODEV;
}
-static void __exit emif_remove(struct platform_device *pdev)
+static void emif_remove(struct platform_device *pdev)
{
struct emif_data *emif = platform_get_drvdata(pdev);
@@ -1183,7 +1165,8 @@ MODULE_DEVICE_TABLE(of, emif_of_match);
#endif
static struct platform_driver emif_driver = {
- .remove_new = __exit_p(emif_remove),
+ .probe = emif_probe,
+ .remove_new = emif_remove,
.shutdown = emif_shutdown,
.driver = {
.name = "emif",
@@ -1191,7 +1174,7 @@ static struct platform_driver emif_driver = {
},
};
-module_platform_driver_probe(emif_driver, emif_probe);
+module_platform_driver(emif_driver);
MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c
index 47d0ea5f1616..1c63eeacd071 100644
--- a/drivers/memory/stm32-fmc2-ebi.c
+++ b/drivers/memory/stm32-fmc2-ebi.c
@@ -11,6 +11,7 @@
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -20,8 +21,15 @@
#define FMC2_BCR(x) ((x) * 0x8 + FMC2_BCR1)
#define FMC2_BTR(x) ((x) * 0x8 + FMC2_BTR1)
#define FMC2_PCSCNTR 0x20
+#define FMC2_CFGR 0x20
+#define FMC2_SR 0x84
#define FMC2_BWTR1 0x104
#define FMC2_BWTR(x) ((x) * 0x8 + FMC2_BWTR1)
+#define FMC2_SECCFGR 0x300
+#define FMC2_CIDCFGR0 0x30c
+#define FMC2_CIDCFGR(x) ((x) * 0x8 + FMC2_CIDCFGR0)
+#define FMC2_SEMCR0 0x310
+#define FMC2_SEMCR(x) ((x) * 0x8 + FMC2_SEMCR0)
/* Register: FMC2_BCR1 */
#define FMC2_BCR1_CCLKEN BIT(20)
@@ -42,6 +50,7 @@
#define FMC2_BCR_ASYNCWAIT BIT(15)
#define FMC2_BCR_CPSIZE GENMASK(18, 16)
#define FMC2_BCR_CBURSTRW BIT(19)
+#define FMC2_BCR_CSCOUNT GENMASK(21, 20)
#define FMC2_BCR_NBLSET GENMASK(23, 22)
/* Register: FMC2_BTRx/FMC2_BWTRx */
@@ -58,8 +67,28 @@
#define FMC2_PCSCNTR_CSCOUNT GENMASK(15, 0)
#define FMC2_PCSCNTR_CNTBEN(x) BIT((x) + 16)
+/* Register: FMC2_CFGR */
+#define FMC2_CFGR_CLKDIV GENMASK(19, 16)
+#define FMC2_CFGR_CCLKEN BIT(20)
+#define FMC2_CFGR_FMC2EN BIT(31)
+
+/* Register: FMC2_SR */
+#define FMC2_SR_ISOST GENMASK(1, 0)
+
+/* Register: FMC2_CIDCFGR */
+#define FMC2_CIDCFGR_CFEN BIT(0)
+#define FMC2_CIDCFGR_SEMEN BIT(1)
+#define FMC2_CIDCFGR_SCID GENMASK(6, 4)
+#define FMC2_CIDCFGR_SEMWLC1 BIT(17)
+
+/* Register: FMC2_SEMCR */
+#define FMC2_SEMCR_SEM_MUTEX BIT(0)
+#define FMC2_SEMCR_SEMCID GENMASK(6, 4)
+
#define FMC2_MAX_EBI_CE 4
#define FMC2_MAX_BANKS 5
+#define FMC2_MAX_RESOURCES 6
+#define FMC2_CID1 1
#define FMC2_BCR_CPSIZE_0 0x0
#define FMC2_BCR_CPSIZE_128 0x1
@@ -74,6 +103,11 @@
#define FMC2_BCR_MTYP_PSRAM 0x1
#define FMC2_BCR_MTYP_NOR 0x2
+#define FMC2_BCR_CSCOUNT_0 0x0
+#define FMC2_BCR_CSCOUNT_1 0x1
+#define FMC2_BCR_CSCOUNT_64 0x2
+#define FMC2_BCR_CSCOUNT_256 0x3
+
#define FMC2_BXTR_EXTMOD_A 0x0
#define FMC2_BXTR_EXTMOD_B 0x1
#define FMC2_BXTR_EXTMOD_C 0x2
@@ -88,6 +122,7 @@
#define FMC2_BTR_CLKDIV_MAX 0xf
#define FMC2_BTR_DATLAT_MAX 0xf
#define FMC2_PCSCNTR_CSCOUNT_MAX 0xff
+#define FMC2_CFGR_CLKDIV_MAX 0xf
enum stm32_fmc2_ebi_bank {
FMC2_EBI1 = 0,
@@ -101,7 +136,8 @@ enum stm32_fmc2_ebi_register_type {
FMC2_REG_BCR = 1,
FMC2_REG_BTR,
FMC2_REG_BWTR,
- FMC2_REG_PCSCNTR
+ FMC2_REG_PCSCNTR,
+ FMC2_REG_CFGR
};
enum stm32_fmc2_ebi_transaction_type {
@@ -132,16 +168,42 @@ enum stm32_fmc2_ebi_cpsize {
FMC2_CPSIZE_1024 = 1024
};
+enum stm32_fmc2_ebi_cscount {
+ FMC2_CSCOUNT_0 = 0,
+ FMC2_CSCOUNT_1 = 1,
+ FMC2_CSCOUNT_64 = 64,
+ FMC2_CSCOUNT_256 = 256
+};
+
+struct stm32_fmc2_ebi;
+
+struct stm32_fmc2_ebi_data {
+ const struct stm32_fmc2_prop *child_props;
+ unsigned int nb_child_props;
+ u32 fmc2_enable_reg;
+ u32 fmc2_enable_bit;
+ int (*nwait_used_by_ctrls)(struct stm32_fmc2_ebi *ebi);
+ void (*set_setup)(struct stm32_fmc2_ebi *ebi);
+ int (*save_setup)(struct stm32_fmc2_ebi *ebi);
+ int (*check_rif)(struct stm32_fmc2_ebi *ebi, u32 resource);
+ void (*put_sems)(struct stm32_fmc2_ebi *ebi);
+ void (*get_sems)(struct stm32_fmc2_ebi *ebi);
+};
+
struct stm32_fmc2_ebi {
struct device *dev;
struct clk *clk;
struct regmap *regmap;
+ const struct stm32_fmc2_ebi_data *data;
u8 bank_assigned;
+ u8 sem_taken;
+ bool access_granted;
u32 bcr[FMC2_MAX_EBI_CE];
u32 btr[FMC2_MAX_EBI_CE];
u32 bwtr[FMC2_MAX_EBI_CE];
u32 pcscntr;
+ u32 cfgr;
};
/*
@@ -181,8 +243,11 @@ static int stm32_fmc2_ebi_check_mux(struct stm32_fmc2_ebi *ebi,
int cs)
{
u32 bcr;
+ int ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
if (bcr & FMC2_BCR_MTYP)
return 0;
@@ -195,8 +260,11 @@ static int stm32_fmc2_ebi_check_waitcfg(struct stm32_fmc2_ebi *ebi,
int cs)
{
u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+ int ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN)
return 0;
@@ -209,8 +277,11 @@ static int stm32_fmc2_ebi_check_sync_trans(struct stm32_fmc2_ebi *ebi,
int cs)
{
u32 bcr;
+ int ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
if (bcr & FMC2_BCR_BURSTEN)
return 0;
@@ -218,13 +289,43 @@ static int stm32_fmc2_ebi_check_sync_trans(struct stm32_fmc2_ebi *ebi,
return -EINVAL;
}
+static int stm32_fmc2_ebi_mp25_check_cclk(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs)
+{
+ if (!ebi->access_granted)
+ return -EACCES;
+
+ return stm32_fmc2_ebi_check_sync_trans(ebi, prop, cs);
+}
+
+static int stm32_fmc2_ebi_mp25_check_clk_period(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs)
+{
+ u32 cfgr;
+ int ret;
+
+ ret = regmap_read(ebi->regmap, FMC2_CFGR, &cfgr);
+ if (ret)
+ return ret;
+
+ if (cfgr & FMC2_CFGR_CCLKEN && !ebi->access_granted)
+ return -EACCES;
+
+ return stm32_fmc2_ebi_check_sync_trans(ebi, prop, cs);
+}
+
static int stm32_fmc2_ebi_check_async_trans(struct stm32_fmc2_ebi *ebi,
const struct stm32_fmc2_prop *prop,
int cs)
{
u32 bcr;
+ int ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
if (!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW))
return 0;
@@ -237,8 +338,11 @@ static int stm32_fmc2_ebi_check_cpsize(struct stm32_fmc2_ebi *ebi,
int cs)
{
u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
+ int ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN)
return 0;
@@ -251,12 +355,18 @@ static int stm32_fmc2_ebi_check_address_hold(struct stm32_fmc2_ebi *ebi,
int cs)
{
u32 bcr, bxtr, val = FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D);
+ int ret;
+
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
if (prop->reg_type == FMC2_REG_BWTR)
- regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
+ ret = regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
else
- regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
+ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
+ if (ret)
+ return ret;
if ((!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW)) &&
((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN))
@@ -270,12 +380,19 @@ static int stm32_fmc2_ebi_check_clk_period(struct stm32_fmc2_ebi *ebi,
int cs)
{
u32 bcr, bcr1;
+ int ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
- if (cs)
- regmap_read(ebi->regmap, FMC2_BCR1, &bcr1);
- else
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
+
+ if (cs) {
+ ret = regmap_read(ebi->regmap, FMC2_BCR1, &bcr1);
+ if (ret)
+ return ret;
+ } else {
bcr1 = bcr;
+ }
if (bcr & FMC2_BCR_BURSTEN && (!cs || !(bcr1 & FMC2_BCR1_CCLKEN)))
return 0;
@@ -307,18 +424,48 @@ static u32 stm32_fmc2_ebi_ns_to_clk_period(struct stm32_fmc2_ebi *ebi,
{
u32 nb_clk_cycles = stm32_fmc2_ebi_ns_to_clock_cycles(ebi, cs, setup);
u32 bcr, btr, clk_period;
+ int ret;
+
+ ret = regmap_read(ebi->regmap, FMC2_BCR1, &bcr);
+ if (ret)
+ return ret;
- regmap_read(ebi->regmap, FMC2_BCR1, &bcr);
if (bcr & FMC2_BCR1_CCLKEN || !cs)
- regmap_read(ebi->regmap, FMC2_BTR1, &btr);
+ ret = regmap_read(ebi->regmap, FMC2_BTR1, &btr);
else
- regmap_read(ebi->regmap, FMC2_BTR(cs), &btr);
+ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &btr);
+ if (ret)
+ return ret;
clk_period = FIELD_GET(FMC2_BTR_CLKDIV, btr) + 1;
return DIV_ROUND_UP(nb_clk_cycles, clk_period);
}
+static u32 stm32_fmc2_ebi_mp25_ns_to_clk_period(struct stm32_fmc2_ebi *ebi,
+ int cs, u32 setup)
+{
+ u32 nb_clk_cycles = stm32_fmc2_ebi_ns_to_clock_cycles(ebi, cs, setup);
+ u32 cfgr, btr, clk_period;
+ int ret;
+
+ ret = regmap_read(ebi->regmap, FMC2_CFGR, &cfgr);
+ if (ret)
+ return ret;
+
+ if (cfgr & FMC2_CFGR_CCLKEN) {
+ clk_period = FIELD_GET(FMC2_CFGR_CLKDIV, cfgr) + 1;
+ } else {
+ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &btr);
+ if (ret)
+ return ret;
+
+ clk_period = FIELD_GET(FMC2_BTR_CLKDIV, btr) + 1;
+ }
+
+ return DIV_ROUND_UP(nb_clk_cycles, clk_period);
+}
+
static int stm32_fmc2_ebi_get_reg(int reg_type, int cs, u32 *reg)
{
switch (reg_type) {
@@ -334,6 +481,9 @@ static int stm32_fmc2_ebi_get_reg(int reg_type, int cs, u32 *reg)
case FMC2_REG_PCSCNTR:
*reg = FMC2_PCSCNTR;
break;
+ case FMC2_REG_CFGR:
+ *reg = FMC2_CFGR;
+ break;
default:
return -EINVAL;
}
@@ -571,11 +721,16 @@ static int stm32_fmc2_ebi_set_address_setup(struct stm32_fmc2_ebi *ebi,
if (ret)
return ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
+
if (prop->reg_type == FMC2_REG_BWTR)
- regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
+ ret = regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
else
- regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
+ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
+ if (ret)
+ return ret;
if ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN)
val = clamp_val(setup, 1, FMC2_BXTR_ADDSET_MAX);
@@ -675,6 +830,30 @@ static int stm32_fmc2_ebi_set_clk_period(struct stm32_fmc2_ebi *ebi,
return 0;
}
+static int stm32_fmc2_ebi_mp25_set_clk_period(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 val, cfgr;
+ int ret;
+
+ ret = regmap_read(ebi->regmap, FMC2_CFGR, &cfgr);
+ if (ret)
+ return ret;
+
+ if (cfgr & FMC2_CFGR_CCLKEN) {
+ val = setup ? clamp_val(setup - 1, 1, FMC2_CFGR_CLKDIV_MAX) : 1;
+ val = FIELD_PREP(FMC2_CFGR_CLKDIV, val);
+ regmap_update_bits(ebi->regmap, FMC2_CFGR, FMC2_CFGR_CLKDIV, val);
+ } else {
+ val = setup ? clamp_val(setup - 1, 1, FMC2_BTR_CLKDIV_MAX) : 1;
+ val = FIELD_PREP(FMC2_BTR_CLKDIV, val);
+ regmap_update_bits(ebi->regmap, FMC2_BTR(cs), FMC2_BTR_CLKDIV, val);
+ }
+
+ return 0;
+}
+
static int stm32_fmc2_ebi_set_data_latency(struct stm32_fmc2_ebi *ebi,
const struct stm32_fmc2_prop *prop,
int cs, u32 setup)
@@ -693,11 +872,14 @@ static int stm32_fmc2_ebi_set_max_low_pulse(struct stm32_fmc2_ebi *ebi,
int cs, u32 setup)
{
u32 old_val, new_val, pcscntr;
+ int ret;
if (setup < 1)
return 0;
- regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr);
+ ret = regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr);
+ if (ret)
+ return ret;
/* Enable counter for the bank */
regmap_update_bits(ebi->regmap, FMC2_PCSCNTR,
@@ -717,6 +899,27 @@ static int stm32_fmc2_ebi_set_max_low_pulse(struct stm32_fmc2_ebi *ebi,
return 0;
}
+static int stm32_fmc2_ebi_mp25_set_max_low_pulse(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 val;
+
+ if (setup == FMC2_CSCOUNT_0)
+ val = FIELD_PREP(FMC2_BCR_CSCOUNT, FMC2_BCR_CSCOUNT_0);
+ else if (setup == FMC2_CSCOUNT_1)
+ val = FIELD_PREP(FMC2_BCR_CSCOUNT, FMC2_BCR_CSCOUNT_1);
+ else if (setup <= FMC2_CSCOUNT_64)
+ val = FIELD_PREP(FMC2_BCR_CSCOUNT, FMC2_BCR_CSCOUNT_64);
+ else
+ val = FIELD_PREP(FMC2_BCR_CSCOUNT, FMC2_BCR_CSCOUNT_256);
+
+ regmap_update_bits(ebi->regmap, FMC2_BCR(cs),
+ FMC2_BCR_CSCOUNT, val);
+
+ return 0;
+}
+
static const struct stm32_fmc2_prop stm32_fmc2_child_props[] = {
/* st,fmc2-ebi-cs-trans-type must be the first property */
{
@@ -882,6 +1085,275 @@ static const struct stm32_fmc2_prop stm32_fmc2_child_props[] = {
},
};
+static const struct stm32_fmc2_prop stm32_fmc2_mp25_child_props[] = {
+ /* st,fmc2-ebi-cs-trans-type must be the first property */
+ {
+ .name = "st,fmc2-ebi-cs-transaction-type",
+ .mprop = true,
+ .set = stm32_fmc2_ebi_set_trans_type,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-cclk-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_CFGR,
+ .reg_mask = FMC2_CFGR_CCLKEN,
+ .check = stm32_fmc2_ebi_mp25_check_cclk,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-mux-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_MUXEN,
+ .check = stm32_fmc2_ebi_check_mux,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-buswidth",
+ .reset_val = FMC2_BUSWIDTH_16,
+ .set = stm32_fmc2_ebi_set_buswidth,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-waitpol-high",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_WAITPOL,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-waitcfg-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_WAITCFG,
+ .check = stm32_fmc2_ebi_check_waitcfg,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-wait-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_WAITEN,
+ .check = stm32_fmc2_ebi_check_sync_trans,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-asyncwait-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_ASYNCWAIT,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-cpsize",
+ .check = stm32_fmc2_ebi_check_cpsize,
+ .set = stm32_fmc2_ebi_set_cpsize,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-byte-lane-setup-ns",
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_bl_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-address-setup-ns",
+ .reg_type = FMC2_REG_BTR,
+ .reset_val = FMC2_BXTR_ADDSET_MAX,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_address_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-address-hold-ns",
+ .reg_type = FMC2_REG_BTR,
+ .reset_val = FMC2_BXTR_ADDHLD_MAX,
+ .check = stm32_fmc2_ebi_check_address_hold,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_address_hold,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-data-setup-ns",
+ .reg_type = FMC2_REG_BTR,
+ .reset_val = FMC2_BXTR_DATAST_MAX,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_data_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-bus-turnaround-ns",
+ .reg_type = FMC2_REG_BTR,
+ .reset_val = FMC2_BXTR_BUSTURN_MAX + 1,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_bus_turnaround,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-data-hold-ns",
+ .reg_type = FMC2_REG_BTR,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_data_hold,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-clk-period-ns",
+ .reset_val = FMC2_CFGR_CLKDIV_MAX + 1,
+ .check = stm32_fmc2_ebi_mp25_check_clk_period,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_mp25_set_clk_period,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-data-latency-ns",
+ .check = stm32_fmc2_ebi_check_sync_trans,
+ .calculate = stm32_fmc2_ebi_mp25_ns_to_clk_period,
+ .set = stm32_fmc2_ebi_set_data_latency,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-address-setup-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .reset_val = FMC2_BXTR_ADDSET_MAX,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_address_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-address-hold-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .reset_val = FMC2_BXTR_ADDHLD_MAX,
+ .check = stm32_fmc2_ebi_check_address_hold,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_address_hold,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-data-setup-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .reset_val = FMC2_BXTR_DATAST_MAX,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_data_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-bus-turnaround-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .reset_val = FMC2_BXTR_BUSTURN_MAX + 1,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_bus_turnaround,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-data-hold-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_data_hold,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-max-low-pulse-ns",
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_mp25_set_max_low_pulse,
+ },
+};
+
+static int stm32_fmc2_ebi_mp25_check_rif(struct stm32_fmc2_ebi *ebi, u32 resource)
+{
+ u32 seccfgr, cidcfgr, semcr;
+ int cid, ret;
+
+ if (resource >= FMC2_MAX_RESOURCES)
+ return -EINVAL;
+
+ ret = regmap_read(ebi->regmap, FMC2_SECCFGR, &seccfgr);
+ if (ret)
+ return ret;
+
+ if (seccfgr & BIT(resource)) {
+ if (resource)
+ dev_err(ebi->dev, "resource %d is configured as secure\n",
+ resource);
+
+ return -EACCES;
+ }
+
+ ret = regmap_read(ebi->regmap, FMC2_CIDCFGR(resource), &cidcfgr);
+ if (ret)
+ return ret;
+
+ if (!(cidcfgr & FMC2_CIDCFGR_CFEN))
+ /* CID filtering is turned off: access granted */
+ return 0;
+
+ if (!(cidcfgr & FMC2_CIDCFGR_SEMEN)) {
+ /* Static CID mode */
+ cid = FIELD_GET(FMC2_CIDCFGR_SCID, cidcfgr);
+ if (cid != FMC2_CID1) {
+ if (resource)
+ dev_err(ebi->dev, "static CID%d set for resource %d\n",
+ cid, resource);
+
+ return -EACCES;
+ }
+
+ return 0;
+ }
+
+ /* Pass-list with semaphore mode */
+ if (!(cidcfgr & FMC2_CIDCFGR_SEMWLC1)) {
+ if (resource)
+ dev_err(ebi->dev, "CID1 is block-listed for resource %d\n",
+ resource);
+
+ return -EACCES;
+ }
+
+ ret = regmap_read(ebi->regmap, FMC2_SEMCR(resource), &semcr);
+ if (ret)
+ return ret;
+
+ if (!(semcr & FMC2_SEMCR_SEM_MUTEX)) {
+ regmap_update_bits(ebi->regmap, FMC2_SEMCR(resource),
+ FMC2_SEMCR_SEM_MUTEX, FMC2_SEMCR_SEM_MUTEX);
+
+ ret = regmap_read(ebi->regmap, FMC2_SEMCR(resource), &semcr);
+ if (ret)
+ return ret;
+ }
+
+ cid = FIELD_GET(FMC2_SEMCR_SEMCID, semcr);
+ if (cid != FMC2_CID1) {
+ if (resource)
+ dev_err(ebi->dev, "resource %d is already used by CID%d\n",
+ resource, cid);
+
+ return -EACCES;
+ }
+
+ ebi->sem_taken |= BIT(resource);
+
+ return 0;
+}
+
+static void stm32_fmc2_ebi_mp25_put_sems(struct stm32_fmc2_ebi *ebi)
+{
+ unsigned int resource;
+
+ for (resource = 0; resource < FMC2_MAX_RESOURCES; resource++) {
+ if (!(ebi->sem_taken & BIT(resource)))
+ continue;
+
+ regmap_update_bits(ebi->regmap, FMC2_SEMCR(resource),
+ FMC2_SEMCR_SEM_MUTEX, 0);
+ }
+}
+
+static void stm32_fmc2_ebi_mp25_get_sems(struct stm32_fmc2_ebi *ebi)
+{
+ unsigned int resource;
+
+ for (resource = 0; resource < FMC2_MAX_RESOURCES; resource++) {
+ if (!(ebi->sem_taken & BIT(resource)))
+ continue;
+
+ regmap_update_bits(ebi->regmap, FMC2_SEMCR(resource),
+ FMC2_SEMCR_SEM_MUTEX, FMC2_SEMCR_SEM_MUTEX);
+ }
+}
+
static int stm32_fmc2_ebi_parse_prop(struct stm32_fmc2_ebi *ebi,
struct device_node *dev_node,
const struct stm32_fmc2_prop *prop,
@@ -944,17 +1416,48 @@ static void stm32_fmc2_ebi_disable_bank(struct stm32_fmc2_ebi *ebi, int cs)
regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_MBKEN, 0);
}
-static void stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi)
+static int stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi)
{
unsigned int cs;
+ int ret;
for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
- regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]);
- regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]);
- regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]);
+ if (!(ebi->bank_assigned & BIT(cs)))
+ continue;
+
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]);
+ ret |= regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]);
+ ret |= regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]);
+ if (ret)
+ return ret;
}
- regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr);
+ return 0;
+}
+
+static int stm32_fmc2_ebi_mp1_save_setup(struct stm32_fmc2_ebi *ebi)
+{
+ int ret;
+
+ ret = stm32_fmc2_ebi_save_setup(ebi);
+ if (ret)
+ return ret;
+
+ return regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr);
+}
+
+static int stm32_fmc2_ebi_mp25_save_setup(struct stm32_fmc2_ebi *ebi)
+{
+ int ret;
+
+ ret = stm32_fmc2_ebi_save_setup(ebi);
+ if (ret)
+ return ret;
+
+ if (ebi->access_granted)
+ ret = regmap_read(ebi->regmap, FMC2_CFGR, &ebi->cfgr);
+
+ return ret;
}
static void stm32_fmc2_ebi_set_setup(struct stm32_fmc2_ebi *ebi)
@@ -962,14 +1465,29 @@ static void stm32_fmc2_ebi_set_setup(struct stm32_fmc2_ebi *ebi)
unsigned int cs;
for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+ if (!(ebi->bank_assigned & BIT(cs)))
+ continue;
+
regmap_write(ebi->regmap, FMC2_BCR(cs), ebi->bcr[cs]);
regmap_write(ebi->regmap, FMC2_BTR(cs), ebi->btr[cs]);
regmap_write(ebi->regmap, FMC2_BWTR(cs), ebi->bwtr[cs]);
}
+}
+static void stm32_fmc2_ebi_mp1_set_setup(struct stm32_fmc2_ebi *ebi)
+{
+ stm32_fmc2_ebi_set_setup(ebi);
regmap_write(ebi->regmap, FMC2_PCSCNTR, ebi->pcscntr);
}
+static void stm32_fmc2_ebi_mp25_set_setup(struct stm32_fmc2_ebi *ebi)
+{
+ stm32_fmc2_ebi_set_setup(ebi);
+
+ if (ebi->access_granted)
+ regmap_write(ebi->regmap, FMC2_CFGR, ebi->cfgr);
+}
+
static void stm32_fmc2_ebi_disable_banks(struct stm32_fmc2_ebi *ebi)
{
unsigned int cs;
@@ -983,33 +1501,48 @@ static void stm32_fmc2_ebi_disable_banks(struct stm32_fmc2_ebi *ebi)
}
/* NWAIT signal can not be connected to EBI controller and NAND controller */
-static bool stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi)
+static int stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi)
{
+ struct device *dev = ebi->dev;
unsigned int cs;
u32 bcr;
+ int ret;
for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
if (!(ebi->bank_assigned & BIT(cs)))
continue;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
+
if ((bcr & FMC2_BCR_WAITEN || bcr & FMC2_BCR_ASYNCWAIT) &&
- ebi->bank_assigned & BIT(FMC2_NAND))
- return true;
+ ebi->bank_assigned & BIT(FMC2_NAND)) {
+ dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n");
+ return -EINVAL;
+ }
}
- return false;
+ return 0;
}
static void stm32_fmc2_ebi_enable(struct stm32_fmc2_ebi *ebi)
{
- regmap_update_bits(ebi->regmap, FMC2_BCR1,
- FMC2_BCR1_FMC2EN, FMC2_BCR1_FMC2EN);
+ if (!ebi->access_granted)
+ return;
+
+ regmap_update_bits(ebi->regmap, ebi->data->fmc2_enable_reg,
+ ebi->data->fmc2_enable_bit,
+ ebi->data->fmc2_enable_bit);
}
static void stm32_fmc2_ebi_disable(struct stm32_fmc2_ebi *ebi)
{
- regmap_update_bits(ebi->regmap, FMC2_BCR1, FMC2_BCR1_FMC2EN, 0);
+ if (!ebi->access_granted)
+ return;
+
+ regmap_update_bits(ebi->regmap, ebi->data->fmc2_enable_reg,
+ ebi->data->fmc2_enable_bit, 0);
}
static int stm32_fmc2_ebi_setup_cs(struct stm32_fmc2_ebi *ebi,
@@ -1021,8 +1554,8 @@ static int stm32_fmc2_ebi_setup_cs(struct stm32_fmc2_ebi *ebi,
stm32_fmc2_ebi_disable_bank(ebi, cs);
- for (i = 0; i < ARRAY_SIZE(stm32_fmc2_child_props); i++) {
- const struct stm32_fmc2_prop *p = &stm32_fmc2_child_props[i];
+ for (i = 0; i < ebi->data->nb_child_props; i++) {
+ const struct stm32_fmc2_prop *p = &ebi->data->child_props[i];
ret = stm32_fmc2_ebi_parse_prop(ebi, dev_node, p, cs);
if (ret) {
@@ -1066,6 +1599,15 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
return -EINVAL;
}
+ if (ebi->data->check_rif) {
+ ret = ebi->data->check_rif(ebi, bank + 1);
+ if (ret) {
+ dev_err(dev, "bank access failed: %d\n", bank);
+ of_node_put(child);
+ return ret;
+ }
+ }
+
if (bank < FMC2_MAX_EBI_CE) {
ret = stm32_fmc2_ebi_setup_cs(ebi, child, bank);
if (ret) {
@@ -1085,9 +1627,10 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
return -ENODEV;
}
- if (stm32_fmc2_ebi_nwait_used_by_ctrls(ebi)) {
- dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n");
- return -EINVAL;
+ if (ebi->data->nwait_used_by_ctrls) {
+ ret = ebi->data->nwait_used_by_ctrls(ebi);
+ if (ret)
+ return ret;
}
stm32_fmc2_ebi_enable(ebi);
@@ -1107,6 +1650,11 @@ static int stm32_fmc2_ebi_probe(struct platform_device *pdev)
return -ENOMEM;
ebi->dev = dev;
+ platform_set_drvdata(pdev, ebi);
+
+ ebi->data = of_device_get_match_data(dev);
+ if (!ebi->data)
+ return -EINVAL;
ebi->regmap = device_node_to_regmap(dev->of_node);
if (IS_ERR(ebi->regmap))
@@ -1120,28 +1668,57 @@ static int stm32_fmc2_ebi_probe(struct platform_device *pdev)
if (PTR_ERR(rstc) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- ret = clk_prepare_enable(ebi->clk);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
if (!IS_ERR(rstc)) {
reset_control_assert(rstc);
reset_control_deassert(rstc);
}
+ /* Check if CFGR register can be modified */
+ ebi->access_granted = true;
+ if (ebi->data->check_rif) {
+ ret = ebi->data->check_rif(ebi, 0);
+ if (ret) {
+ u32 sr;
+
+ ebi->access_granted = false;
+
+ ret = regmap_read(ebi->regmap, FMC2_SR, &sr);
+ if (ret)
+ goto err_release;
+
+ /* In case of CFGR is secure, just check that the FMC2 is enabled */
+ if (sr & FMC2_SR_ISOST) {
+ dev_err(dev, "FMC2 is not ready to be used.\n");
+ ret = -EACCES;
+ goto err_release;
+ }
+ }
+ }
+
ret = stm32_fmc2_ebi_parse_dt(ebi);
if (ret)
goto err_release;
- stm32_fmc2_ebi_save_setup(ebi);
- platform_set_drvdata(pdev, ebi);
+ ret = ebi->data->save_setup(ebi);
+ if (ret)
+ goto err_release;
return 0;
err_release:
stm32_fmc2_ebi_disable_banks(ebi);
stm32_fmc2_ebi_disable(ebi);
- clk_disable_unprepare(ebi->clk);
+ if (ebi->data->put_sems)
+ ebi->data->put_sems(ebi);
+ pm_runtime_put_sync_suspend(dev);
return ret;
}
@@ -1153,7 +1730,25 @@ static void stm32_fmc2_ebi_remove(struct platform_device *pdev)
of_platform_depopulate(&pdev->dev);
stm32_fmc2_ebi_disable_banks(ebi);
stm32_fmc2_ebi_disable(ebi);
+ if (ebi->data->put_sems)
+ ebi->data->put_sems(ebi);
+ pm_runtime_put_sync_suspend(&pdev->dev);
+}
+
+static int __maybe_unused stm32_fmc2_ebi_runtime_suspend(struct device *dev)
+{
+ struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev);
+
clk_disable_unprepare(ebi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_fmc2_ebi_runtime_resume(struct device *dev)
+{
+ struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(ebi->clk);
}
static int __maybe_unused stm32_fmc2_ebi_suspend(struct device *dev)
@@ -1161,7 +1756,9 @@ static int __maybe_unused stm32_fmc2_ebi_suspend(struct device *dev)
struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev);
stm32_fmc2_ebi_disable(ebi);
- clk_disable_unprepare(ebi->clk);
+ if (ebi->data->put_sems)
+ ebi->data->put_sems(ebi);
+ pm_runtime_put_sync_suspend(dev);
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -1174,21 +1771,55 @@ static int __maybe_unused stm32_fmc2_ebi_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- ret = clk_prepare_enable(ebi->clk);
- if (ret)
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
return ret;
- stm32_fmc2_ebi_set_setup(ebi);
+ if (ebi->data->get_sems)
+ ebi->data->get_sems(ebi);
+ ebi->data->set_setup(ebi);
stm32_fmc2_ebi_enable(ebi);
return 0;
}
-static SIMPLE_DEV_PM_OPS(stm32_fmc2_ebi_pm_ops, stm32_fmc2_ebi_suspend,
- stm32_fmc2_ebi_resume);
+static const struct dev_pm_ops stm32_fmc2_ebi_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_fmc2_ebi_runtime_suspend,
+ stm32_fmc2_ebi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_fmc2_ebi_suspend, stm32_fmc2_ebi_resume)
+};
+
+static const struct stm32_fmc2_ebi_data stm32_fmc2_ebi_mp1_data = {
+ .child_props = stm32_fmc2_child_props,
+ .nb_child_props = ARRAY_SIZE(stm32_fmc2_child_props),
+ .fmc2_enable_reg = FMC2_BCR1,
+ .fmc2_enable_bit = FMC2_BCR1_FMC2EN,
+ .nwait_used_by_ctrls = stm32_fmc2_ebi_nwait_used_by_ctrls,
+ .set_setup = stm32_fmc2_ebi_mp1_set_setup,
+ .save_setup = stm32_fmc2_ebi_mp1_save_setup,
+};
+
+static const struct stm32_fmc2_ebi_data stm32_fmc2_ebi_mp25_data = {
+ .child_props = stm32_fmc2_mp25_child_props,
+ .nb_child_props = ARRAY_SIZE(stm32_fmc2_mp25_child_props),
+ .fmc2_enable_reg = FMC2_CFGR,
+ .fmc2_enable_bit = FMC2_CFGR_FMC2EN,
+ .set_setup = stm32_fmc2_ebi_mp25_set_setup,
+ .save_setup = stm32_fmc2_ebi_mp25_save_setup,
+ .check_rif = stm32_fmc2_ebi_mp25_check_rif,
+ .put_sems = stm32_fmc2_ebi_mp25_put_sems,
+ .get_sems = stm32_fmc2_ebi_mp25_get_sems,
+};
static const struct of_device_id stm32_fmc2_ebi_match[] = {
- {.compatible = "st,stm32mp1-fmc2-ebi"},
+ {
+ .compatible = "st,stm32mp1-fmc2-ebi",
+ .data = &stm32_fmc2_ebi_mp1_data,
+ },
+ {
+ .compatible = "st,stm32mp25-fmc2-ebi",
+ .data = &stm32_fmc2_ebi_mp25_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, stm32_fmc2_ebi_match);
diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c
index abff87f917cb..5f57cea48b62 100644
--- a/drivers/memory/tegra/tegra234.c
+++ b/drivers/memory/tegra/tegra234.c
@@ -92,6 +92,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0RDB,
.name = "dla0rdb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -102,6 +104,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0RDB1,
.name = "dla0rdb1",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -112,6 +116,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0WRB,
.name = "dla0wrb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -121,7 +127,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1RDB,
- .name = "dla0rdb",
+ .name = "dla1rdb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -407,7 +415,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1RDB1,
- .name = "dla0rdb1",
+ .name = "dla1rdb1",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -417,7 +427,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1WRB,
- .name = "dla0wrb",
+ .name = "dla1wrb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -539,7 +551,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
.bpmp_id = TEGRA_ICC_BPMP_NVJPG_0,
.type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVJPG,
- .regs = {
+ .regs = {
.sid = {
.override = 0x3f8,
.security = 0x3fc,
@@ -660,6 +672,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0RDA,
.name = "dla0rda",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -670,6 +684,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0FALRDB,
.name = "dla0falrdb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -680,6 +696,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0WRA,
.name = "dla0wra",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -690,6 +708,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0FALWRB,
.name = "dla0falwrb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -699,7 +719,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1RDA,
- .name = "dla0rda",
+ .name = "dla1rda",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -709,7 +731,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1FALRDB,
- .name = "dla0falrdb",
+ .name = "dla1falrdb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -719,7 +743,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1WRA,
- .name = "dla0wra",
+ .name = "dla1wra",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -729,7 +755,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1FALWRB,
- .name = "dla0falwrb",
+ .name = "dla1falwrb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -908,6 +936,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0RDA1,
.name = "dla0rda1",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -917,7 +947,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1RDA1,
- .name = "dla0rda1",
+ .name = "dla1rda1",
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index bbfaf6536903..23fea51ecbdd 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -164,7 +164,7 @@ static struct attribute *memstick_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(memstick_dev);
-static struct bus_type memstick_bus_type = {
+static const struct bus_type memstick_bus_type = {
.name = "memstick",
.dev_groups = memstick_dev_groups,
.match = memstick_bus_match,
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 04115cd92433..47a314a4eb6f 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2078,6 +2078,12 @@ static const struct blk_mq_ops msb_mq_ops = {
static int msb_init_disk(struct memstick_dev *card)
{
struct msb_data *msb = memstick_get_drvdata(card);
+ struct queue_limits lim = {
+ .logical_block_size = msb->page_size,
+ .max_hw_sectors = MS_BLOCK_MAX_PAGES,
+ .max_segments = MS_BLOCK_MAX_SEGS,
+ .max_segment_size = MS_BLOCK_MAX_PAGES * msb->page_size,
+ };
int rc;
unsigned long capacity;
@@ -2093,19 +2099,13 @@ static int msb_init_disk(struct memstick_dev *card)
if (rc)
goto out_release_id;
- msb->disk = blk_mq_alloc_disk(&msb->tag_set, card);
+ msb->disk = blk_mq_alloc_disk(&msb->tag_set, &lim, card);
if (IS_ERR(msb->disk)) {
rc = PTR_ERR(msb->disk);
goto out_free_tag_set;
}
msb->queue = msb->disk->queue;
- blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
- blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
- blk_queue_max_segment_size(msb->queue,
- MS_BLOCK_MAX_PAGES * msb->page_size);
- blk_queue_logical_block_size(msb->queue, msb->page_size);
-
sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
msb->disk->fops = &msb_bdops;
msb->disk->private_data = msb;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 5a69ed33999b..49accfdc89d6 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1103,6 +1103,12 @@ static const struct blk_mq_ops mspro_mq_ops = {
static int mspro_block_init_disk(struct memstick_dev *card)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
+ struct queue_limits lim = {
+ .logical_block_size = msb->page_size,
+ .max_hw_sectors = MSPRO_BLOCK_MAX_PAGES,
+ .max_segments = MSPRO_BLOCK_MAX_SEGS,
+ .max_segment_size = MSPRO_BLOCK_MAX_PAGES * msb->page_size,
+ };
struct mspro_devinfo *dev_info = NULL;
struct mspro_sys_info *sys_info = NULL;
struct mspro_sys_attr *s_attr = NULL;
@@ -1138,18 +1144,13 @@ static int mspro_block_init_disk(struct memstick_dev *card)
if (rc)
goto out_release_id;
- msb->disk = blk_mq_alloc_disk(&msb->tag_set, card);
+ msb->disk = blk_mq_alloc_disk(&msb->tag_set, &lim, card);
if (IS_ERR(msb->disk)) {
rc = PTR_ERR(msb->disk);
goto out_free_tag_set;
}
msb->queue = msb->disk->queue;
- blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
- blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
- blk_queue_max_segment_size(msb->queue,
- MSPRO_BLOCK_MAX_PAGES * msb->page_size);
-
msb->disk->major = major;
msb->disk->first_minor = disk_id << MSPRO_BLOCK_PART_SHIFT;
msb->disk->minors = 1 << MSPRO_BLOCK_PART_SHIFT;
@@ -1158,8 +1159,6 @@ static int mspro_block_init_disk(struct memstick_dev *card)
sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
- blk_queue_logical_block_size(msb->queue, msb->page_size);
-
capacity = be16_to_cpu(sys_info->user_block_count);
capacity *= be16_to_cpu(sys_info->block_size);
capacity *= msb->page_size >> 9;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index e7a6e45b9fac..4b023ee229cf 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1773,6 +1773,7 @@ config TWL4030_CORE
bool "TI TWL4030/TWL5030/TWL6030/TPS659x0 Support"
depends on I2C=y
select IRQ_DOMAIN
+ select MFD_CORE
select REGMAP_I2C
help
Say yes here if you have TWL4030 / TWL6030 family chip on your board.
diff --git a/drivers/mfd/ac100.c b/drivers/mfd/ac100.c
index 6d49d7fb5f14..8f47c392cbd1 100644
--- a/drivers/mfd/ac100.c
+++ b/drivers/mfd/ac100.c
@@ -72,7 +72,7 @@ static const struct regmap_config ac100_regmap_config = {
.wr_table = &ac100_writeable_table,
.volatile_table = &ac100_volatile_table,
.max_register = AC100_RTC_GP(15),
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static struct mfd_cell ac100_cells[] = {
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
index 0e52bd2ebd74..fb5f988e61f3 100644
--- a/drivers/mfd/altera-sysmgr.c
+++ b/drivers/mfd/altera-sysmgr.c
@@ -109,7 +109,9 @@ struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
dev = driver_find_device_by_of_node(&altr_sysmgr_driver.driver,
(void *)sysmgr_np);
- of_node_put(sysmgr_np);
+ if (property)
+ of_node_put(sysmgr_np);
+
if (!dev)
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index c7e85ff38013..9741977031df 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -106,7 +106,7 @@ static const struct regmap_config as3711_regmap_config = {
.precious_reg = as3711_precious_reg,
.max_register = AS3711_MAX_REG,
.num_reg_defaults_raw = AS3711_NUM_REGS,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
#ifdef CONFIG_OF
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index a2bf68afc131..bec047bdd088 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -299,7 +299,7 @@ static const struct regmap_config as3722_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = AS3722_MAX_REGISTER,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.rd_table = &as3722_readable_table,
.wr_table = &as3722_writable_table,
.volatile_table = &as3722_volatile_table,
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index deaa969bab4e..d8daa593ebd5 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -352,7 +352,7 @@ static const struct regmap_config axp192_regmap_config = {
.wr_table = &axp192_writeable_table,
.volatile_table = &axp192_volatile_table,
.max_register = AXP20X_CC_CTRL,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp20x_regmap_config = {
@@ -388,7 +388,7 @@ static const struct regmap_config axp313a_regmap_config = {
.wr_table = &axp313a_writeable_table,
.volatile_table = &axp313a_volatile_table,
.max_register = AXP313A_IRQ_STATE,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp806_regmap_config = {
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index 92eede9a5e61..8b56786d85d0 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -27,14 +27,14 @@ static const struct regmap_config bcm590xx_regmap_config_pri = {
.reg_bits = 8,
.val_bits = 8,
.max_register = BCM590XX_MAX_REGISTER_PRI,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config bcm590xx_regmap_config_sec = {
.reg_bits = 8,
.val_bits = 8,
.max_register = BCM590XX_MAX_REGISTER_SEC,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri)
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index 819d09e4d100..0a955178d469 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -67,7 +67,7 @@ static const struct regmap_access_table bd9571mwv_volatile_table = {
static const struct regmap_config bd9571mwv_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.rd_table = &bd9571mwv_readable_table,
.wr_table = &bd9571mwv_writable_table,
.volatile_table = &bd9571mwv_volatile_table,
@@ -152,7 +152,7 @@ static const struct regmap_access_table bd9574mwf_volatile_table = {
static const struct regmap_config bd9574mwf_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.rd_table = &bd9574mwf_readable_table,
.wr_table = &bd9574mwf_writable_table,
.volatile_table = &bd9574mwf_volatile_table,
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 603b1cd52785..a52d59cc2b1e 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -74,6 +74,10 @@ static const struct mfd_cell cros_ec_cec_cells[] = {
{ .name = "cros-ec-cec", },
};
+static const struct mfd_cell cros_ec_gpio_cells[] = {
+ { .name = "cros-ec-gpio", },
+};
+
static const struct mfd_cell cros_ec_rtc_cells[] = {
{ .name = "cros-ec-rtc", },
};
@@ -91,6 +95,10 @@ static const struct mfd_cell cros_usbpd_notify_cells[] = {
{ .name = "cros-usbpd-notify", },
};
+static const struct mfd_cell cros_ec_wdt_cells[] = {
+ { .name = "cros-ec-wdt", }
+};
+
static const struct cros_feature_to_cells cros_subdevices[] = {
{
.id = EC_FEATURE_CEC,
@@ -98,6 +106,11 @@ static const struct cros_feature_to_cells cros_subdevices[] = {
.num_cells = ARRAY_SIZE(cros_ec_cec_cells),
},
{
+ .id = EC_FEATURE_GPIO,
+ .mfd_cells = cros_ec_gpio_cells,
+ .num_cells = ARRAY_SIZE(cros_ec_gpio_cells),
+ },
+ {
.id = EC_FEATURE_RTC,
.mfd_cells = cros_ec_rtc_cells,
.num_cells = ARRAY_SIZE(cros_ec_rtc_cells),
@@ -107,6 +120,11 @@ static const struct cros_feature_to_cells cros_subdevices[] = {
.mfd_cells = cros_usbpd_charger_cells,
.num_cells = ARRAY_SIZE(cros_usbpd_charger_cells),
},
+ {
+ .id = EC_FEATURE_HANG_DETECT,
+ .mfd_cells = cros_ec_wdt_cells,
+ .num_cells = ARRAY_SIZE(cros_ec_wdt_cells),
+ },
};
static const struct mfd_cell cros_ec_platform_cells[] = {
diff --git a/drivers/mfd/cs42l43-i2c.c b/drivers/mfd/cs42l43-i2c.c
index 4922211680c9..c9e4ea76149a 100644
--- a/drivers/mfd/cs42l43-i2c.c
+++ b/drivers/mfd/cs42l43-i2c.c
@@ -6,11 +6,15 @@
* Cirrus Logic International Semiconductor Ltd.
*/
+#include <linux/array_size.h>
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/i2c.h>
+#include <linux/mfd/cs42l43.h>
#include <linux/mfd/cs42l43-regs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
#include "cs42l43.h"
@@ -34,7 +38,6 @@ static const struct regmap_config cs42l43_i2c_regmap = {
static int cs42l43_i2c_probe(struct i2c_client *i2c)
{
struct cs42l43 *cs42l43;
- int ret;
cs42l43 = devm_kzalloc(&i2c->dev, sizeof(*cs42l43), GFP_KERNEL);
if (!cs42l43)
@@ -46,11 +49,9 @@ static int cs42l43_i2c_probe(struct i2c_client *i2c)
cs42l43->attached = true;
cs42l43->regmap = devm_regmap_init_i2c(i2c, &cs42l43_i2c_regmap);
- if (IS_ERR(cs42l43->regmap)) {
- ret = PTR_ERR(cs42l43->regmap);
- dev_err(cs42l43->dev, "Failed to allocate regmap: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(cs42l43->regmap))
+ return dev_err_probe(cs42l43->dev, PTR_ERR(cs42l43->regmap),
+ "Failed to allocate regmap\n");
return cs42l43_dev_probe(cs42l43);
}
diff --git a/drivers/mfd/cs42l43-sdw.c b/drivers/mfd/cs42l43-sdw.c
index 1d85bbf8cdd5..65f7b1d78248 100644
--- a/drivers/mfd/cs42l43-sdw.c
+++ b/drivers/mfd/cs42l43-sdw.c
@@ -6,11 +6,15 @@
* Cirrus Logic International Semiconductor Ltd.
*/
+#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/errno.h>
+#include <linux/mfd/cs42l43.h>
#include <linux/mfd/cs42l43-regs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw_type.h>
@@ -167,7 +171,6 @@ static int cs42l43_sdw_probe(struct sdw_slave *sdw, const struct sdw_device_id *
{
struct cs42l43 *cs42l43;
struct device *dev = &sdw->dev;
- int ret;
cs42l43 = devm_kzalloc(dev, sizeof(*cs42l43), GFP_KERNEL);
if (!cs42l43)
@@ -177,11 +180,9 @@ static int cs42l43_sdw_probe(struct sdw_slave *sdw, const struct sdw_device_id *
cs42l43->sdw = sdw;
cs42l43->regmap = devm_regmap_init_sdw(sdw, &cs42l43_sdw_regmap);
- if (IS_ERR(cs42l43->regmap)) {
- ret = PTR_ERR(cs42l43->regmap);
- dev_err(cs42l43->dev, "Failed to allocate regmap: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(cs42l43->regmap))
+ return dev_err_probe(cs42l43->dev, PTR_ERR(cs42l43->regmap),
+ "Failed to allocate regmap\n");
return cs42l43_dev_probe(cs42l43);
}
diff --git a/drivers/mfd/cs42l43.c b/drivers/mfd/cs42l43.c
index 7b6d07cbe6fc..a0fb2dc6c3b2 100644
--- a/drivers/mfd/cs42l43.c
+++ b/drivers/mfd/cs42l43.c
@@ -6,51 +6,57 @@
* Cirrus Logic International Semiconductor Ltd.
*/
+#include <linux/array_size.h>
#include <linux/bitops.h>
#include <linux/build_bug.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
#include <linux/jiffies.h>
#include <linux/mfd/core.h>
+#include <linux/mfd/cs42l43.h>
#include <linux/mfd/cs42l43-regs.h>
#include <linux/module.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
+#include <linux/types.h>
#include "cs42l43.h"
-#define CS42L43_RESET_DELAY 20
+#define CS42L43_RESET_DELAY_MS 20
-#define CS42L43_SDW_ATTACH_TIMEOUT 500
-#define CS42L43_SDW_DETACH_TIMEOUT 100
+#define CS42L43_SDW_ATTACH_TIMEOUT_MS 500
+#define CS42L43_SDW_DETACH_TIMEOUT_MS 100
#define CS42L43_MCU_BOOT_STAGE1 1
#define CS42L43_MCU_BOOT_STAGE2 2
#define CS42L43_MCU_BOOT_STAGE3 3
#define CS42L43_MCU_BOOT_STAGE4 4
-#define CS42L43_MCU_POLL 5000
-#define CS42L43_MCU_CMD_TIMEOUT 20000
+#define CS42L43_MCU_POLL_US 5000
+#define CS42L43_MCU_CMD_TIMEOUT_US 20000
#define CS42L43_MCU_UPDATE_FORMAT 3
#define CS42L43_MCU_UPDATE_OFFSET 0x100000
-#define CS42L43_MCU_UPDATE_TIMEOUT 500000
+#define CS42L43_MCU_UPDATE_TIMEOUT_US 500000
#define CS42L43_MCU_UPDATE_RETRIES 5
#define CS42L43_MCU_SUPPORTED_REV 0x2105
#define CS42L43_MCU_SHADOW_REGS_REQUIRED_REV 0x2200
#define CS42L43_MCU_SUPPORTED_BIOS_REV 0x0001
-#define CS42L43_VDDP_DELAY 50
-#define CS42L43_VDDD_DELAY 1000
+#define CS42L43_VDDP_DELAY_US 50
+#define CS42L43_VDDD_DELAY_US 1000
-#define CS42L43_AUTOSUSPEND_TIME 250
+#define CS42L43_AUTOSUSPEND_TIME_MS 250
struct cs42l43_patch_header {
__le16 version;
__le16 size;
- u8 reserved;
- u8 secure;
+ __u8 reserved;
+ __u8 secure;
__le16 bss_size;
__le32 apply_addr;
__le32 checksum;
@@ -84,7 +90,7 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
{ CS42L43_DRV_CTRL_5, 0x136C00C0 },
{ CS42L43_GPIO_CTRL1, 0x00000707 },
{ CS42L43_GPIO_CTRL2, 0x00000000 },
- { CS42L43_GPIO_FN_SEL, 0x00000000 },
+ { CS42L43_GPIO_FN_SEL, 0x00000004 },
{ CS42L43_MCLK_SRC_SEL, 0x00000000 },
{ CS42L43_SAMPLE_RATE1, 0x00000003 },
{ CS42L43_SAMPLE_RATE2, 0x00000003 },
@@ -131,38 +137,38 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
{ CS42L43_ASP_TX_CH4_CTRL, 0x00170091 },
{ CS42L43_ASP_TX_CH5_CTRL, 0x001700C1 },
{ CS42L43_ASP_TX_CH6_CTRL, 0x001700F1 },
- { CS42L43_ASPTX1_INPUT, 0x00800000 },
- { CS42L43_ASPTX2_INPUT, 0x00800000 },
- { CS42L43_ASPTX3_INPUT, 0x00800000 },
- { CS42L43_ASPTX4_INPUT, 0x00800000 },
- { CS42L43_ASPTX5_INPUT, 0x00800000 },
- { CS42L43_ASPTX6_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP1_CH1_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP1_CH2_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP1_CH3_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP1_CH4_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP2_CH1_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP2_CH2_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP3_CH1_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP3_CH2_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP4_CH1_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP4_CH2_INPUT, 0x00800000 },
- { CS42L43_ASRC_INT1_INPUT1, 0x00800000 },
- { CS42L43_ASRC_INT2_INPUT1, 0x00800000 },
- { CS42L43_ASRC_INT3_INPUT1, 0x00800000 },
- { CS42L43_ASRC_INT4_INPUT1, 0x00800000 },
- { CS42L43_ASRC_DEC1_INPUT1, 0x00800000 },
- { CS42L43_ASRC_DEC2_INPUT1, 0x00800000 },
- { CS42L43_ASRC_DEC3_INPUT1, 0x00800000 },
- { CS42L43_ASRC_DEC4_INPUT1, 0x00800000 },
- { CS42L43_ISRC1INT1_INPUT1, 0x00800000 },
- { CS42L43_ISRC1INT2_INPUT1, 0x00800000 },
- { CS42L43_ISRC1DEC1_INPUT1, 0x00800000 },
- { CS42L43_ISRC1DEC2_INPUT1, 0x00800000 },
- { CS42L43_ISRC2INT1_INPUT1, 0x00800000 },
- { CS42L43_ISRC2INT2_INPUT1, 0x00800000 },
- { CS42L43_ISRC2DEC1_INPUT1, 0x00800000 },
- { CS42L43_ISRC2DEC2_INPUT1, 0x00800000 },
+ { CS42L43_ASPTX1_INPUT, 0x00000000 },
+ { CS42L43_ASPTX2_INPUT, 0x00000000 },
+ { CS42L43_ASPTX3_INPUT, 0x00000000 },
+ { CS42L43_ASPTX4_INPUT, 0x00000000 },
+ { CS42L43_ASPTX5_INPUT, 0x00000000 },
+ { CS42L43_ASPTX6_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP1_CH1_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP1_CH2_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP1_CH3_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP1_CH4_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP2_CH1_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP2_CH2_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP3_CH1_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP3_CH2_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP4_CH1_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP4_CH2_INPUT, 0x00000000 },
+ { CS42L43_ASRC_INT1_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_INT2_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_INT3_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_INT4_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_DEC1_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_DEC2_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_DEC3_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_DEC4_INPUT1, 0x00000000 },
+ { CS42L43_ISRC1INT1_INPUT1, 0x00000000 },
+ { CS42L43_ISRC1INT2_INPUT1, 0x00000000 },
+ { CS42L43_ISRC1DEC1_INPUT1, 0x00000000 },
+ { CS42L43_ISRC1DEC2_INPUT1, 0x00000000 },
+ { CS42L43_ISRC2INT1_INPUT1, 0x00000000 },
+ { CS42L43_ISRC2INT2_INPUT1, 0x00000000 },
+ { CS42L43_ISRC2DEC1_INPUT1, 0x00000000 },
+ { CS42L43_ISRC2DEC2_INPUT1, 0x00000000 },
{ CS42L43_EQ1MIX_INPUT1, 0x00800000 },
{ CS42L43_EQ1MIX_INPUT2, 0x00800000 },
{ CS42L43_EQ1MIX_INPUT3, 0x00800000 },
@@ -171,8 +177,8 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
{ CS42L43_EQ2MIX_INPUT2, 0x00800000 },
{ CS42L43_EQ2MIX_INPUT3, 0x00800000 },
{ CS42L43_EQ2MIX_INPUT4, 0x00800000 },
- { CS42L43_SPDIF1_INPUT1, 0x00800000 },
- { CS42L43_SPDIF2_INPUT1, 0x00800000 },
+ { CS42L43_SPDIF1_INPUT1, 0x00000000 },
+ { CS42L43_SPDIF2_INPUT1, 0x00000000 },
{ CS42L43_AMP1MIX_INPUT1, 0x00800000 },
{ CS42L43_AMP1MIX_INPUT2, 0x00800000 },
{ CS42L43_AMP1MIX_INPUT3, 0x00800000 },
@@ -217,7 +223,7 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
{ CS42L43_CTRL_REG, 0x00000006 },
{ CS42L43_FDIV_FRAC, 0x40000000 },
{ CS42L43_CAL_RATIO, 0x00000080 },
- { CS42L43_SPI_CLK_CONFIG1, 0x00000000 },
+ { CS42L43_SPI_CLK_CONFIG1, 0x00000001 },
{ CS42L43_SPI_CONFIG1, 0x00000000 },
{ CS42L43_SPI_CONFIG2, 0x00000000 },
{ CS42L43_SPI_CONFIG3, 0x00000001 },
@@ -532,10 +538,10 @@ static int cs42l43_soft_reset(struct cs42l43 *cs42l43)
regcache_cache_only(cs42l43->regmap, true);
regmap_multi_reg_write_bypassed(cs42l43->regmap, reset, ARRAY_SIZE(reset));
- msleep(CS42L43_RESET_DELAY);
+ msleep(CS42L43_RESET_DELAY_MS);
if (cs42l43->sdw) {
- unsigned long timeout = msecs_to_jiffies(CS42L43_SDW_DETACH_TIMEOUT);
+ unsigned long timeout = msecs_to_jiffies(CS42L43_SDW_DETACH_TIMEOUT_MS);
unsigned long time;
time = wait_for_completion_timeout(&cs42l43->device_detach, timeout);
@@ -555,7 +561,7 @@ static int cs42l43_soft_reset(struct cs42l43 *cs42l43)
static int cs42l43_wait_for_attach(struct cs42l43 *cs42l43)
{
if (!cs42l43->attached) {
- unsigned long timeout = msecs_to_jiffies(CS42L43_SDW_ATTACH_TIMEOUT);
+ unsigned long timeout = msecs_to_jiffies(CS42L43_SDW_ATTACH_TIMEOUT_MS);
unsigned long time;
time = wait_for_completion_timeout(&cs42l43->device_attach, timeout);
@@ -597,7 +603,7 @@ static int cs42l43_mcu_stage_2_3(struct cs42l43 *cs42l43, bool shadow)
ret = regmap_read_poll_timeout(cs42l43->regmap, CS42L43_BOOT_STATUS,
val, (val == CS42L43_MCU_BOOT_STAGE3),
- CS42L43_MCU_POLL, CS42L43_MCU_CMD_TIMEOUT);
+ CS42L43_MCU_POLL_US, CS42L43_MCU_CMD_TIMEOUT_US);
if (ret) {
dev_err(cs42l43->dev, "Failed to move to stage 3: %d, 0x%x\n", ret, val);
return ret;
@@ -646,7 +652,7 @@ static int cs42l43_mcu_disable(struct cs42l43 *cs42l43)
ret = regmap_read_poll_timeout(cs42l43->regmap, CS42L43_SOFT_INT_SHADOW, val,
(val & CS42L43_CONTROL_APPLIED_INT_MASK),
- CS42L43_MCU_POLL, CS42L43_MCU_CMD_TIMEOUT);
+ CS42L43_MCU_POLL_US, CS42L43_MCU_CMD_TIMEOUT_US);
if (ret) {
dev_err(cs42l43->dev, "Failed to disable firmware: %d, 0x%x\n", ret, val);
return ret;
@@ -690,7 +696,7 @@ static void cs42l43_mcu_load_firmware(const struct firmware *firmware, void *con
ret = regmap_read_poll_timeout(cs42l43->regmap, CS42L43_SOFT_INT_SHADOW, val,
(val & CS42L43_PATCH_APPLIED_INT_MASK),
- CS42L43_MCU_POLL, CS42L43_MCU_UPDATE_TIMEOUT);
+ CS42L43_MCU_POLL_US, CS42L43_MCU_UPDATE_TIMEOUT_US);
if (ret) {
dev_err(cs42l43->dev, "Failed to update firmware: %d, 0x%x\n", ret, val);
cs42l43->firmware_error = ret;
@@ -951,7 +957,7 @@ static int cs42l43_power_up(struct cs42l43 *cs42l43)
}
/* vdd-p must be on for 50uS before any other supply */
- usleep_range(CS42L43_VDDP_DELAY, 2 * CS42L43_VDDP_DELAY);
+ usleep_range(CS42L43_VDDP_DELAY_US, 2 * CS42L43_VDDP_DELAY_US);
gpiod_set_value_cansleep(cs42l43->reset, 1);
@@ -967,7 +973,7 @@ static int cs42l43_power_up(struct cs42l43 *cs42l43)
goto err_core_supplies;
}
- usleep_range(CS42L43_VDDD_DELAY, 2 * CS42L43_VDDD_DELAY);
+ usleep_range(CS42L43_VDDD_DELAY_US, 2 * CS42L43_VDDD_DELAY_US);
return 0;
@@ -1051,7 +1057,7 @@ int cs42l43_dev_probe(struct cs42l43 *cs42l43)
if (ret)
return ret;
- pm_runtime_set_autosuspend_delay(cs42l43->dev, CS42L43_AUTOSUSPEND_TIME);
+ pm_runtime_set_autosuspend_delay(cs42l43->dev, CS42L43_AUTOSUSPEND_TIME_MS);
pm_runtime_use_autosuspend(cs42l43->dev);
pm_runtime_set_active(cs42l43->dev);
/*
@@ -1059,7 +1065,9 @@ int cs42l43_dev_probe(struct cs42l43 *cs42l43)
* the boot work runs.
*/
pm_runtime_get_noresume(cs42l43->dev);
- devm_pm_runtime_enable(cs42l43->dev);
+ ret = devm_pm_runtime_enable(cs42l43->dev);
+ if (ret)
+ return ret;
queue_work(system_long_wq, &cs42l43->boot_work);
diff --git a/drivers/mfd/cs42l43.h b/drivers/mfd/cs42l43.h
index eb4caf393833..8d1b1b0f5a47 100644
--- a/drivers/mfd/cs42l43.h
+++ b/drivers/mfd/cs42l43.h
@@ -6,15 +6,17 @@
* Cirrus Logic International Semiconductor Ltd.
*/
-#include <linux/mfd/cs42l43.h>
-#include <linux/pm.h>
-#include <linux/regmap.h>
-
#ifndef CS42L43_CORE_INT_H
#define CS42L43_CORE_INT_H
#define CS42L43_N_DEFAULTS 176
+struct dev_pm_ops;
+struct device;
+struct reg_default;
+
+struct cs42l43;
+
extern const struct dev_pm_ops cs42l43_pm_ops;
extern const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS];
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 150448cd2eb0..dc85801b9fa0 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -533,7 +533,7 @@ const struct regmap_config da9052_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = DA9052_PAGE1_CON_REG,
.readable_reg = da9052_reg_readable,
diff --git a/drivers/mfd/da9055-core.c b/drivers/mfd/da9055-core.c
index 768302e05baa..1f727ef60d63 100644
--- a/drivers/mfd/da9055-core.c
+++ b/drivers/mfd/da9055-core.c
@@ -245,7 +245,7 @@ const struct regmap_config da9055_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = DA9055_MAX_REGISTER_CNT,
.readable_reg = da9055_register_readable,
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 73a22107900c..dbbc4779170a 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -476,7 +476,7 @@ static struct regmap_config da9061_regmap_config = {
.ranges = da9061_range_cfg,
.num_ranges = ARRAY_SIZE(da9061_range_cfg),
.max_register = DA9062AA_CONFIG_ID,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.rd_table = &da9061_aa_readable_table,
.wr_table = &da9061_aa_writeable_table,
.volatile_table = &da9061_aa_volatile_table,
@@ -582,7 +582,7 @@ static struct regmap_config da9062_regmap_config = {
.ranges = da9062_range_cfg,
.num_ranges = ARRAY_SIZE(da9062_range_cfg),
.max_register = DA9062AA_CONFIG_ID,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.rd_table = &da9062_aa_readable_table,
.wr_table = &da9062_aa_writeable_table,
.volatile_table = &da9062_aa_volatile_table,
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index d715cf9a9e68..c6235cd0dbdc 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -342,7 +342,7 @@ static struct regmap_config da9063_regmap_config = {
.num_ranges = ARRAY_SIZE(da9063_range_cfg),
.max_register = DA9063_REG_CONFIG_ID,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct of_device_id da9063_dt_ids[] = {
diff --git a/drivers/mfd/da9150-core.c b/drivers/mfd/da9150-core.c
index 94d621e20635..5c59cc869fb3 100644
--- a/drivers/mfd/da9150-core.c
+++ b/drivers/mfd/da9150-core.c
@@ -169,7 +169,7 @@ static const struct regmap_config da9150_regmap_config = {
.num_ranges = ARRAY_SIZE(da9150_range_cfg),
.max_register = DA9150_TBAT_RES_B,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = da9150_volatile_reg,
};
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 4621d3950b8f..8c00e0c695c5 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -23,12 +23,22 @@
#include "intel-lpss.h"
-/* Some DSDTs have an unused GEXP ACPI device conflicting with I2C4 resources */
-static const struct pci_device_id ignore_resource_conflicts_ids[] = {
- /* Microsoft Surface Go (version 1) I2C4 */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1182), },
- /* Microsoft Surface Go 2 I2C4 */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1237), },
+static const struct pci_device_id quirk_ids[] = {
+ {
+ /* Microsoft Surface Go (version 1) I2C4 */
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1182),
+ .driver_data = QUIRK_IGNORE_RESOURCE_CONFLICTS,
+ },
+ {
+ /* Microsoft Surface Go 2 I2C4 */
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1237),
+ .driver_data = QUIRK_IGNORE_RESOURCE_CONFLICTS,
+ },
+ {
+ /* Dell XPS 9530 (2023) */
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x51fb, 0x1028, 0x0beb),
+ .driver_data = QUIRK_CLOCK_DIVIDER_UNITY,
+ },
{ }
};
@@ -36,6 +46,7 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
const struct intel_lpss_platform_info *data = (void *)id->driver_data;
+ const struct pci_device_id *quirk_pci_info;
struct intel_lpss_platform_info *info;
int ret;
@@ -55,8 +66,9 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
info->mem = pci_resource_n(pdev, 0);
info->irq = pci_irq_vector(pdev, 0);
- if (pci_match_id(ignore_resource_conflicts_ids, pdev))
- info->ignore_resource_conflicts = true;
+ quirk_pci_info = pci_match_id(quirk_ids, pdev);
+ if (quirk_pci_info)
+ info->quirks = quirk_pci_info->driver_data;
pdev->d3cold_delay = 0;
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index eff423f7dd28..2a9018112dfc 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -300,6 +300,7 @@ static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
{
char name[32];
struct clk *tmp = *clk;
+ int ret;
snprintf(name, sizeof(name), "%s-enable", devname);
tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0,
@@ -316,6 +317,12 @@ static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
return PTR_ERR(tmp);
*clk = tmp;
+ if (lpss->info->quirks & QUIRK_CLOCK_DIVIDER_UNITY) {
+ ret = clk_set_rate(tmp, lpss->info->clk_rate);
+ if (ret)
+ return ret;
+ }
+
snprintf(name, sizeof(name), "%s-update", devname);
tmp = clk_register_gate(NULL, name, __clk_get_name(tmp),
CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL);
@@ -412,7 +419,7 @@ int intel_lpss_probe(struct device *dev,
return ret;
lpss->cell->swnode = info->swnode;
- lpss->cell->ignore_resource_conflicts = info->ignore_resource_conflicts;
+ lpss->cell->ignore_resource_conflicts = info->quirks & QUIRK_IGNORE_RESOURCE_CONFLICTS;
intel_lpss_init_dev(lpss);
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
index c1d72b117ed5..6f8f668f4c6f 100644
--- a/drivers/mfd/intel-lpss.h
+++ b/drivers/mfd/intel-lpss.h
@@ -11,16 +11,28 @@
#ifndef __MFD_INTEL_LPSS_H
#define __MFD_INTEL_LPSS_H
+#include <linux/bits.h>
#include <linux/pm.h>
+/*
+ * Some DSDTs have an unused GEXP ACPI device conflicting with I2C4 resources.
+ * Set to ignore resource conflicts with ACPI declared SystemMemory regions.
+ */
+#define QUIRK_IGNORE_RESOURCE_CONFLICTS BIT(0)
+/*
+ * Some devices have misconfigured clock divider due to a firmware bug.
+ * Set this to force the clock divider to 1:1 ratio.
+ */
+#define QUIRK_CLOCK_DIVIDER_UNITY BIT(1)
+
struct device;
struct resource;
struct software_node;
struct intel_lpss_platform_info {
struct resource *mem;
- bool ignore_resource_conflicts;
int irq;
+ unsigned int quirks;
unsigned long clk_rate;
const char *clk_con_id;
const struct software_node *swnode;
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 67af36a38913..5557f023a173 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -428,50 +428,13 @@ static int kempld_detect_device(struct kempld_device_data *pld)
#ifdef CONFIG_ACPI
static int kempld_get_acpi_data(struct platform_device *pdev)
{
- struct list_head resource_list;
- struct resource *resources;
- struct resource_entry *rentry;
struct device *dev = &pdev->dev;
- struct acpi_device *acpi_dev = ACPI_COMPANION(dev);
const struct kempld_platform_data *pdata;
int ret;
- int count;
pdata = acpi_device_get_match_data(dev);
ret = platform_device_add_data(pdev, pdata,
sizeof(struct kempld_platform_data));
- if (ret)
- return ret;
-
- INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(acpi_dev, &resource_list, NULL, NULL);
- if (ret < 0)
- goto out;
-
- count = ret;
-
- if (count == 0) {
- ret = platform_device_add_resources(pdev, pdata->ioresource, 1);
- goto out;
- }
-
- resources = devm_kcalloc(&acpi_dev->dev, count, sizeof(*resources),
- GFP_KERNEL);
- if (!resources) {
- ret = -ENOMEM;
- goto out;
- }
-
- count = 0;
- list_for_each_entry(rentry, &resource_list, node) {
- memcpy(&resources[count], rentry->res,
- sizeof(*resources));
- count++;
- }
- ret = platform_device_add_resources(pdev, resources, count);
-
-out:
- acpi_dev_free_resource_list(&resource_list);
return ret;
}
diff --git a/drivers/mfd/khadas-mcu.c b/drivers/mfd/khadas-mcu.c
index 61396d824f16..ba981a788692 100644
--- a/drivers/mfd/khadas-mcu.c
+++ b/drivers/mfd/khadas-mcu.c
@@ -72,7 +72,7 @@ static const struct regmap_config khadas_mcu_regmap_config = {
.max_register = KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG,
.volatile_reg = khadas_mcu_reg_volatile,
.writeable_reg = khadas_mcu_reg_writeable,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static struct mfd_cell khadas_mcu_fan_cells[] = {
diff --git a/drivers/mfd/lochnagar-i2c.c b/drivers/mfd/lochnagar-i2c.c
index 0b76fcccd0bd..6c930c57f2e2 100644
--- a/drivers/mfd/lochnagar-i2c.c
+++ b/drivers/mfd/lochnagar-i2c.c
@@ -70,7 +70,7 @@ static const struct regmap_config lochnagar1_i2c_regmap = {
.use_single_read = true,
.use_single_write = true,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct reg_sequence lochnagar1_patch[] = {
@@ -163,7 +163,7 @@ static const struct regmap_config lochnagar2_i2c_regmap = {
.readable_reg = lochnagar2_readable_register,
.volatile_reg = lochnagar2_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct reg_sequence lochnagar2_patch[] = {
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 73a0e7f9bd31..f14901660147 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -38,6 +38,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/align.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -1321,7 +1322,7 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
case INTEL_SPI_BYT:
pci_read_config_dword(dev, SPIBASE_BYT, &spi_base);
if (spi_base & SPIBASE_BYT_EN) {
- res->start = spi_base & ~(SPIBASE_BYT_SZ - 1);
+ res->start = ALIGN_DOWN(spi_base, SPIBASE_BYT_SZ);
res->end = res->start + SPIBASE_BYT_SZ - 1;
info->set_writeable = lpc_ich_byt_set_writeable;
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 1000572761a8..920797b806ce 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -7,6 +7,7 @@
* Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -174,28 +175,27 @@ int mc13xxx_irq_free(struct mc13xxx *mc13xxx, int irq, void *dev)
}
EXPORT_SYMBOL(mc13xxx_irq_free);
-#define maskval(reg, mask) (((reg) & (mask)) >> __ffs(mask))
static void mc13xxx_print_revision(struct mc13xxx *mc13xxx, u32 revision)
{
dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
"fin: %d, fab: %d, icid: %d/%d\n",
mc13xxx->variant->name,
- maskval(revision, MC13XXX_REVISION_REVFULL),
- maskval(revision, MC13XXX_REVISION_REVMETAL),
- maskval(revision, MC13XXX_REVISION_FIN),
- maskval(revision, MC13XXX_REVISION_FAB),
- maskval(revision, MC13XXX_REVISION_ICID),
- maskval(revision, MC13XXX_REVISION_ICIDCODE));
+ FIELD_GET(MC13XXX_REVISION_REVFULL, revision),
+ FIELD_GET(MC13XXX_REVISION_REVMETAL, revision),
+ FIELD_GET(MC13XXX_REVISION_FIN, revision),
+ FIELD_GET(MC13XXX_REVISION_FAB, revision),
+ FIELD_GET(MC13XXX_REVISION_ICID, revision),
+ FIELD_GET(MC13XXX_REVISION_ICIDCODE, revision));
}
static void mc34708_print_revision(struct mc13xxx *mc13xxx, u32 revision)
{
dev_info(mc13xxx->dev, "%s: rev %d.%d, fin: %d, fab: %d\n",
mc13xxx->variant->name,
- maskval(revision, MC34708_REVISION_REVFULL),
- maskval(revision, MC34708_REVISION_REVMETAL),
- maskval(revision, MC34708_REVISION_FIN),
- maskval(revision, MC34708_REVISION_FAB));
+ FIELD_GET(MC34708_REVISION_REVFULL, revision),
+ FIELD_GET(MC34708_REVISION_REVMETAL, revision),
+ FIELD_GET(MC34708_REVISION_FIN, revision),
+ FIELD_GET(MC34708_REVISION_FAB, revision));
}
/* These are only exported for mc13xxx-i2c and mc13xxx-spi */
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c
index 2fa592c37c6f..16ca23311cab 100644
--- a/drivers/mfd/mcp-core.c
+++ b/drivers/mfd/mcp-core.c
@@ -41,7 +41,7 @@ static void mcp_bus_remove(struct device *dev)
drv->remove(mcp);
}
-static struct bus_type mcp_bus_type = {
+static const struct bus_type mcp_bus_type = {
.name = "mcp",
.match = mcp_bus_match,
.probe = mcp_bus_probe,
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 2b85509a90fc..6ad5c93027af 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -29,7 +29,7 @@ struct mfd_of_node_entry {
struct device_node *np;
};
-static struct device_type mfd_dev_type = {
+static const struct device_type mfd_dev_type = {
.name = "mfd_device",
};
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 4449dde05021..4fd4a2da5ad7 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -142,6 +142,9 @@ static const struct mfd_cell mt6357_devs[] = {
.resources = mt6357_rtc_resources,
.of_compatible = "mediatek,mt6357-rtc",
}, {
+ .name = "mt6357-sound",
+ .of_compatible = "mediatek,mt6357-sound"
+ }, {
.name = "mtk-pmic-keys",
.num_resources = ARRAY_SIZE(mt6357_keys_resources),
.resources = mt6357_keys_resources,
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index ebc62033db16..949feb03d4f8 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -699,7 +699,7 @@ static int usbhs_omap_probe(struct platform_device *pdev)
}
for (i = 0; i < omap->nports; i++) {
- char clkname[30];
+ char clkname[40];
/* clock names are indexed from 1*/
snprintf(clkname, sizeof(clkname),
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 6ff84b2600c5..ea5fbcbbe4a5 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -358,7 +358,7 @@ int rave_sp_exec(struct rave_sp *sp,
ackid = atomic_inc_return(&sp->ackid);
reply.ackid = ackid;
- reply.code = rave_sp_reply_code((u8)command),
+ reply.code = rave_sp_reply_code((u8)command);
mutex_lock(&sp->bus_lock);
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index 5e81f011363f..2c0e8e9630f7 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -230,7 +230,7 @@ static const struct regmap_config rc5t583_regmap_config = {
.volatile_reg = volatile_reg,
.max_register = RC5T583_MAX_REG,
.num_reg_defaults_raw = RC5T583_NUM_REGS,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static int rc5t583_i2c_probe(struct i2c_client *i2c)
diff --git a/drivers/mfd/rk8xx-core.c b/drivers/mfd/rk8xx-core.c
index b1ffc3b9e2be..e2261b68b844 100644
--- a/drivers/mfd/rk8xx-core.c
+++ b/drivers/mfd/rk8xx-core.c
@@ -43,8 +43,8 @@ static struct resource rk806_pwrkey_resources[] = {
};
static const struct resource rk817_pwrkey_resources[] = {
- DEFINE_RES_IRQ(RK817_IRQ_PWRON_RISE),
DEFINE_RES_IRQ(RK817_IRQ_PWRON_FALL),
+ DEFINE_RES_IRQ(RK817_IRQ_PWRON_RISE),
};
static const struct resource rk817_charger_resources[] = {
diff --git a/drivers/mfd/rk8xx-spi.c b/drivers/mfd/rk8xx-spi.c
index fd137f38c2c4..3405fb82ff9f 100644
--- a/drivers/mfd/rk8xx-spi.c
+++ b/drivers/mfd/rk8xx-spi.c
@@ -34,7 +34,7 @@ static const struct regmap_config rk806_regmap_config_spi = {
.reg_bits = 16,
.val_bits = 8,
.max_register = RK806_BUCK_RSERVE_REG5,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_table = &rk806_volatile_table,
};
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index 7336e6d8a001..23ca00d2c624 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -62,7 +62,7 @@ static const struct regmap_config rn5t618_regmap_config = {
.val_bits = 8,
.volatile_reg = rn5t618_volatile_reg,
.max_register = RN5T618_MAX_REG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_irq rc5t619_irqs[] = {
diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c
index 594718f7e8e1..2f3826c7eef4 100644
--- a/drivers/mfd/rohm-bd71828.c
+++ b/drivers/mfd/rohm-bd71828.c
@@ -197,7 +197,7 @@ static const struct regmap_config bd71815_regmap = {
.val_bits = 8,
.volatile_table = &bd71815_volatile_regs,
.max_register = BD71815_MAX_REGISTER - 1,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config bd71828_regmap = {
@@ -205,7 +205,7 @@ static const struct regmap_config bd71828_regmap = {
.val_bits = 8,
.volatile_table = &bd71828_volatile_regs,
.max_register = BD71828_MAX_REGISTER,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
/*
diff --git a/drivers/mfd/rohm-bd718x7.c b/drivers/mfd/rohm-bd718x7.c
index 4798bdf27afb..7755a4c073bf 100644
--- a/drivers/mfd/rohm-bd718x7.c
+++ b/drivers/mfd/rohm-bd718x7.c
@@ -87,7 +87,7 @@ static const struct regmap_config bd718xx_regmap_config = {
.val_bits = 8,
.volatile_table = &volatile_regs,
.max_register = BD718XX_MAX_REGISTER - 1,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static int bd718xx_init_press_duration(struct regmap *regmap,
diff --git a/drivers/mfd/rohm-bd9576.c b/drivers/mfd/rohm-bd9576.c
index bceac7016740..3a9f61961721 100644
--- a/drivers/mfd/rohm-bd9576.c
+++ b/drivers/mfd/rohm-bd9576.c
@@ -62,7 +62,7 @@ static struct regmap_config bd957x_regmap = {
.val_bits = 8,
.volatile_table = &volatile_regs,
.max_register = BD957X_MAX_REGISTER,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static struct regmap_irq bd9576_irqs[] = {
diff --git a/drivers/mfd/rsmu_i2c.c b/drivers/mfd/rsmu_i2c.c
index 06d78a1cf1cc..5711e512b6a2 100644
--- a/drivers/mfd/rsmu_i2c.c
+++ b/drivers/mfd/rsmu_i2c.c
@@ -188,7 +188,7 @@ static const struct regmap_config rsmu_sabre_regmap_config = {
.ranges = rsmu_sabre_range_cfg,
.num_ranges = ARRAY_SIZE(rsmu_sabre_range_cfg),
.volatile_reg = rsmu_sabre_volatile_reg,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.can_multi_write = true,
};
diff --git a/drivers/mfd/si476x-prop.c b/drivers/mfd/si476x-prop.c
index f0608d138f02..3d5c118888b2 100644
--- a/drivers/mfd/si476x-prop.c
+++ b/drivers/mfd/si476x-prop.c
@@ -222,7 +222,7 @@ static const struct regmap_config si476x_regmap_config = {
.reg_read = si476x_core_regmap_read,
.reg_write = si476x_core_regmap_write,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
struct regmap *devm_regmap_init_si476x(struct si476x_core *core)
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index c02cbd9c2f5d..f391c2ccaa72 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -53,7 +53,7 @@ static const struct regmap_config stmfx_regmap_config = {
.max_register = STMFX_REG_MAX,
.volatile_reg = stmfx_reg_volatile,
.writeable_reg = stmfx_reg_writeable,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct resource stmfx_pinctrl_resources[] = {
diff --git a/drivers/mfd/stpmic1.c b/drivers/mfd/stpmic1.c
index c5128fe96cc7..d8a603d95aa6 100644
--- a/drivers/mfd/stpmic1.c
+++ b/drivers/mfd/stpmic1.c
@@ -63,7 +63,7 @@ static const struct regmap_access_table stpmic1_volatile_table = {
static const struct regmap_config stpmic1_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = PMIC_MAX_REGISTER_ADDRESS,
.rd_table = &stpmic1_readable_table,
.wr_table = &stpmic1_writeable_table,
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index c9550368d9ea..7d0e91164cba 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -238,7 +238,9 @@ struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
return ERR_PTR(-ENODEV);
regmap = syscon_node_to_regmap(syscon_np);
- of_node_put(syscon_np);
+
+ if (property)
+ of_node_put(syscon_np);
return regmap;
}
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 6e384a79e341..c130ffef182f 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -124,6 +124,11 @@
#define TWL6030_BASEADD_RSV 0x0000
#define TWL6030_BASEADD_ZERO 0x0000
+/* Some fields in TWL6030_PHOENIX_DEV_ON */
+#define TWL6030_APP_DEVOFF BIT(0)
+#define TWL6030_CON_DEVOFF BIT(1)
+#define TWL6030_MOD_DEVOFF BIT(2)
+
/* Few power values */
#define R_CFG_BOOT 0x05
@@ -687,6 +692,20 @@ static void twl_remove(struct i2c_client *client)
twl_priv->ready = false;
}
+static void twl6030_power_off(void)
+{
+ int err;
+ u8 val;
+
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &val, TWL6030_PHOENIX_DEV_ON);
+ if (err)
+ return;
+
+ val |= TWL6030_APP_DEVOFF | TWL6030_CON_DEVOFF | TWL6030_MOD_DEVOFF;
+ twl_i2c_write_u8(TWL_MODULE_PM_MASTER, val, TWL6030_PHOENIX_DEV_ON);
+}
+
+
static struct of_dev_auxdata twl_auxdata_lookup[] = {
OF_DEV_AUXDATA("ti,twl4030-gpio", 0, "twl4030-gpio", NULL),
{ /* sentinel */ },
@@ -852,6 +871,15 @@ twl_probe(struct i2c_client *client)
goto free;
}
+ if (twl_class_is_6030()) {
+ if (of_device_is_system_power_controller(node)) {
+ if (!pm_power_off)
+ pm_power_off = twl6030_power_off;
+ else
+ dev_warn(&client->dev, "Poweroff callback already assigned\n");
+ }
+ }
+
status = of_platform_populate(node, NULL, twl_auxdata_lookup,
&client->dev);
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 1595e9c76132..0bca948ab6ba 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -686,6 +686,9 @@ static bool twl4030_power_use_poweroff(const struct twl4030_power_data *pdata,
if (of_property_read_bool(node, "ti,use_poweroff"))
return true;
+ if (of_device_is_system_power_controller(node->parent))
+ return true;
+
return false;
}
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index f77ecc635b6f..6a8602c1c4ee 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -1922,7 +1922,7 @@ const struct regmap_config wm5102_spi_regmap = {
.readable_reg = wm5102_readable_register,
.volatile_reg = wm5102_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm5102_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm5102_reg_default),
};
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index eba324875afd..6ff33a54a068 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -3202,7 +3202,7 @@ const struct regmap_config wm5110_spi_regmap = {
.readable_reg = wm5110_readable_register,
.volatile_reg = wm5110_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm5110_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm5110_reg_default),
};
diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
index 65b98f3fbd92..18618a8f9206 100644
--- a/drivers/mfd/wm831x-auxadc.c
+++ b/drivers/mfd/wm831x-auxadc.c
@@ -152,7 +152,7 @@ static irqreturn_t wm831x_auxadc_irq(int irq, void *irq_data)
static int wm831x_auxadc_read_polled(struct wm831x *wm831x,
enum wm831x_auxadc input)
{
- int ret, src, timeout;
+ int ret, src;
mutex_lock(&wm831x->auxadc_lock);
@@ -179,32 +179,25 @@ static int wm831x_auxadc_read_polled(struct wm831x *wm831x,
goto disable;
}
- /* If we're not using interrupts then poll the
- * interrupt status register */
- timeout = 5;
- while (timeout) {
- msleep(1);
+ /* If we're not using interrupts then read the interrupt status register */
+ msleep(20);
- ret = wm831x_reg_read(wm831x,
- WM831X_INTERRUPT_STATUS_1);
- if (ret < 0) {
- dev_err(wm831x->dev,
- "ISR 1 read failed: %d\n", ret);
- goto disable;
- }
+ ret = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "ISR 1 read failed: %d\n", ret);
+ goto disable;
+ }
- /* Did it complete? */
- if (ret & WM831X_AUXADC_DATA_EINT) {
- wm831x_reg_write(wm831x,
- WM831X_INTERRUPT_STATUS_1,
- WM831X_AUXADC_DATA_EINT);
- break;
- } else {
- dev_err(wm831x->dev,
- "AUXADC conversion timeout\n");
- ret = -EBUSY;
- goto disable;
- }
+ /* Did it complete? */
+ if (ret & WM831X_AUXADC_DATA_EINT) {
+ wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1,
+ WM831X_AUXADC_DATA_EINT);
+ } else {
+ dev_err(wm831x->dev,
+ "AUXADC conversion timeout\n");
+ ret = -EBUSY;
+ goto disable;
}
ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
diff --git a/drivers/mfd/wm8350-regmap.c b/drivers/mfd/wm8350-regmap.c
index 5663b8b0b3ad..3d0ebb004dbf 100644
--- a/drivers/mfd/wm8350-regmap.c
+++ b/drivers/mfd/wm8350-regmap.c
@@ -325,7 +325,7 @@ const struct regmap_config wm8350_regmap = {
.reg_bits = 8,
.val_bits = 16,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = WM8350_MAX_REGISTER,
.readable_reg = wm8350_readable,
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 75483c9be0c4..ddfb234849dd 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -100,7 +100,7 @@ static const struct regmap_config wm8400_regmap_config = {
.volatile_reg = wm8400_volatile,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
/**
diff --git a/drivers/mfd/wm97xx-core.c b/drivers/mfd/wm97xx-core.c
index 663acbb1854c..1566a9b04b6a 100644
--- a/drivers/mfd/wm97xx-core.c
+++ b/drivers/mfd/wm97xx-core.c
@@ -95,7 +95,7 @@ static const struct regmap_config wm9705_regmap_config = {
.reg_stride = 2,
.val_bits = 16,
.max_register = 0x7e,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm9705_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm9705_reg_defaults),
@@ -163,7 +163,7 @@ static const struct regmap_config wm9712_regmap_config = {
.reg_stride = 2,
.val_bits = 16,
.max_register = 0x7e,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm9712_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm9712_reg_defaults),
@@ -234,7 +234,7 @@ static const struct regmap_config wm9713_regmap_config = {
.reg_stride = 2,
.val_bits = 16,
.max_register = 0x7e,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm9713_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm9713_reg_defaults),
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 03319a1fa97f..dbd26c3b245b 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -263,7 +263,6 @@ struct fastrpc_channel_ctx {
int domain_id;
int sesscount;
int vmcount;
- u64 perms;
struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
struct rpmsg_device *rpdev;
struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
@@ -1279,9 +1278,11 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
/* Map if we have any heap VMIDs associated with this ADSP Static Process. */
if (fl->cctx->vmcount) {
+ u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
(u64)fl->cctx->remote_heap->size,
- &fl->cctx->perms,
+ &src_perms,
fl->cctx->vmperms, fl->cctx->vmcount);
if (err) {
dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
@@ -1915,8 +1916,10 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
/* Add memory to static PD pool, protection thru hypervisor */
if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
+ u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
err = qcom_scm_assign_mem(buf->phys, (u64)buf->size,
- &fl->cctx->perms, fl->cctx->vmperms, fl->cctx->vmcount);
+ &src_perms, fl->cctx->vmperms, fl->cctx->vmcount);
if (err) {
dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
buf->phys, buf->size, err);
@@ -2290,7 +2293,6 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
if (vmcount) {
data->vmcount = vmcount;
- data->perms = BIT(QCOM_SCM_VMID_HLOS);
for (i = 0; i < data->vmcount; i++) {
data->vmperms[i].vmid = vmids[i];
data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
diff --git a/drivers/misc/gehc-achc.c b/drivers/misc/gehc-achc.c
index 4c9c5394da6f..b8fca4d393c6 100644
--- a/drivers/misc/gehc-achc.c
+++ b/drivers/misc/gehc-achc.c
@@ -65,7 +65,7 @@ static int ezport_start_programming(struct spi_device *spi, struct gpio_desc *re
struct spi_transfer release_cs = { };
int ret;
- spi_bus_lock(spi->master);
+ spi_bus_lock(spi->controller);
/* assert chip select */
spi_message_init(&msg);
@@ -85,16 +85,16 @@ static int ezport_start_programming(struct spi_device *spi, struct gpio_desc *re
ret = spi_sync_locked(spi, &msg);
fail:
- spi_bus_unlock(spi->master);
+ spi_bus_unlock(spi->controller);
return ret;
}
static void ezport_stop_programming(struct spi_device *spi, struct gpio_desc *reset)
{
/* reset without asserted chip select to return into normal mode */
- spi_bus_lock(spi->master);
+ spi_bus_lock(spi->controller);
ezport_reset(reset);
- spi_bus_unlock(spi->master);
+ spi_bus_unlock(spi->controller);
}
static int ezport_get_status_register(struct spi_device *spi)
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index c6eb27d46cb0..15119584473c 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -198,8 +198,14 @@ static int lis3lv02d_i2c_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
- if (!lis3->pdata || !lis3->pdata->wakeup_flags)
+ /* Turn on for wakeup if turned off by runtime suspend */
+ if (lis3->pdata && lis3->pdata->wakeup_flags) {
+ if (pm_runtime_suspended(dev))
+ lis3lv02d_poweron(lis3);
+ /* For non wakeup turn off if not already turned off by runtime suspend */
+ } else if (!pm_runtime_suspended(dev))
lis3lv02d_poweroff(lis3);
+
return 0;
}
@@ -208,13 +214,12 @@ static int lis3lv02d_i2c_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
- /*
- * pm_runtime documentation says that devices should always
- * be powered on at resume. Pm_runtime turns them off after system
- * wide resume is complete.
- */
- if (!lis3->pdata || !lis3->pdata->wakeup_flags ||
- pm_runtime_suspended(dev))
+ /* Turn back off if turned on for wakeup and runtime suspended*/
+ if (lis3->pdata && lis3->pdata->wakeup_flags) {
+ if (pm_runtime_suspended(dev))
+ lis3lv02d_poweroff(lis3);
+ /* For non wakeup turn back on if not runtime suspended */
+ } else if (!pm_runtime_suspended(dev))
lis3lv02d_poweron(lis3);
return 0;
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index b080eb2335eb..b92767d6bdd2 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -294,10 +294,11 @@ static void lkdtm_SPINLOCKUP(void)
__release(&lock_me_up);
}
-static void lkdtm_HUNG_TASK(void)
+static void __noreturn lkdtm_HUNG_TASK(void)
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
+ BUG();
}
static volatile unsigned int huge = INT_MAX - 2;
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 0772e4a4757e..5732fd59a227 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -153,12 +153,17 @@ static const struct crashtype *find_crashtype(const char *name)
/*
* This is forced noinline just so it distinctly shows up in the stackdump
* which makes validation of expected lkdtm crashes easier.
+ *
+ * NOTE: having a valid return value helps prevent the compiler from doing
+ * tail call optimizations and taking this out of the stack trace.
*/
-static noinline void lkdtm_do_action(const struct crashtype *crashtype)
+static noinline int lkdtm_do_action(const struct crashtype *crashtype)
{
if (WARN_ON(!crashtype || !crashtype->func))
- return;
+ return -EINVAL;
crashtype->func();
+
+ return 0;
}
static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
@@ -167,10 +172,8 @@ static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
int ret;
/* If this doesn't have a symbol, just call immediately. */
- if (!crashpoint->kprobe.symbol_name) {
- lkdtm_do_action(crashtype);
- return 0;
- }
+ if (!crashpoint->kprobe.symbol_name)
+ return lkdtm_do_action(crashtype);
if (lkdtm_kprobe != NULL)
unregister_kprobe(lkdtm_kprobe);
@@ -216,7 +219,7 @@ static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
spin_unlock_irqrestore(&crash_count_lock, flags);
if (do_it)
- lkdtm_do_action(lkdtm_crashtype);
+ return lkdtm_do_action(lkdtm_crashtype);
return 0;
}
@@ -303,6 +306,7 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
{
const struct crashtype *crashtype;
char *buf;
+ int err;
if (count >= PAGE_SIZE)
return -EINVAL;
@@ -326,9 +330,11 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
return -EINVAL;
pr_info("Performing direct entry %s\n", crashtype->name);
- lkdtm_do_action(crashtype);
+ err = lkdtm_do_action(crashtype);
*off += count;
+ if (err)
+ return err;
return count;
}
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 4f467d3972a6..b1b316f99703 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -48,7 +48,7 @@ static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
* correctly.
*
* This should get caught by either memory tagging, KASan, or by using
- * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
+ * CONFIG_SLUB_DEBUG=y and slab_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
*/
static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
{
diff --git a/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
index be52b113aea9..89364bdbb129 100644
--- a/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
+++ b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
@@ -96,7 +96,8 @@ static const struct component_master_ops mei_component_master_ops = {
*
* The function checks if the device is pci device and
* Intel VGA adapter, the subcomponent is SW Proxy
- * and the parent of MEI PCI and the parent of VGA are the same PCH device.
+ * and the VGA is on the bus 0 reserved for built-in devices
+ * to reject discrete GFX.
*
* @dev: master device
* @subcomponent: subcomponent to match (I915_COMPONENT_SWPROXY)
@@ -123,7 +124,8 @@ static int mei_gsc_proxy_component_match(struct device *dev, int subcomponent,
if (subcomponent != I915_COMPONENT_GSC_PROXY)
return 0;
- return component_compare_dev(dev->parent, ((struct device *)data)->parent);
+ /* Only built-in GFX */
+ return (pdev->bus->number == 0);
}
static int mei_gsc_proxy_probe(struct mei_cl_device *cldev,
@@ -146,7 +148,7 @@ static int mei_gsc_proxy_probe(struct mei_cl_device *cldev,
}
component_match_add_typed(&cldev->dev, &master_match,
- mei_gsc_proxy_component_match, cldev->dev.parent);
+ mei_gsc_proxy_component_match, NULL);
if (IS_ERR_OR_NULL(master_match)) {
ret = -ENOMEM;
goto err_exit;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 961e5d53a27a..aac36750d2c5 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -112,6 +112,8 @@
#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */
#define MEI_DEV_ID_MTL_M 0x7E70 /* Meteor Lake Point M */
+#define MEI_DEV_ID_ARL_S 0x7F68 /* Arrow Lake Point S */
+#define MEI_DEV_ID_ARL_H 0x7770 /* Arrow Lake Point H */
/*
* MEI HW Section
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 676d566f38dd..8cf636c54032 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -119,6 +119,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)},
/* required last entry */
{0, }
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index 6f4a4be6ccb5..55f7db490d3b 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -535,6 +535,7 @@ static const struct acpi_device_id vsc_tp_acpi_ids[] = {
{ "INTC1009" }, /* Raptor Lake */
{ "INTC1058" }, /* Tiger Lake */
{ "INTC1094" }, /* Alder Lake */
+ { "INTC10D0" }, /* Meteor Lake */
{}
};
MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
index f50d22882476..3964d9e5a39b 100644
--- a/drivers/misc/vmw_vmci/vmci_datagram.c
+++ b/drivers/misc/vmw_vmci/vmci_datagram.c
@@ -224,8 +224,8 @@ static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
return VMCI_ERROR_NO_MEM;
}
- dg_info = kmalloc(sizeof(*dg_info) +
- (size_t) dg->payload_size, GFP_ATOMIC);
+ dg_info = kmalloc(struct_size(dg_info, msg_payload, dg->payload_size),
+ GFP_ATOMIC);
if (!dg_info) {
atomic_dec(&delayed_dg_host_queue_size);
vmci_resource_put(resource);
@@ -234,7 +234,8 @@ static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
dg_info->in_dg_host_queue = true;
dg_info->entry = dst_entry;
- memcpy(&dg_info->msg, dg, dg_size);
+ dg_info->msg = *dg;
+ memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);
@@ -377,7 +378,8 @@ int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
dg_info->in_dg_host_queue = false;
dg_info->entry = dst_entry;
- memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
+ dg_info->msg = *dg;
+ memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 32d49100dff5..64a3492e8002 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -144,7 +144,7 @@ struct mmc_blk_data {
static dev_t mmc_rpmb_devt;
/* Bus type for RPMB character devices */
-static struct bus_type mmc_rpmb_bus_type = {
+static const struct bus_type mmc_rpmb_bus_type = {
.name = "mmc_rpmb",
};
@@ -206,7 +206,7 @@ static void mmc_blk_kref_release(struct kref *ref)
int devidx;
devidx = mmc_get_devidx(md->disk);
- ida_simple_remove(&mmc_blk_ida, devidx);
+ ida_free(&mmc_blk_ida, devidx);
mutex_lock(&open_lock);
md->disk->private_data = NULL;
@@ -874,10 +874,11 @@ static const struct block_device_operations mmc_bdops = {
static int mmc_blk_part_switch_pre(struct mmc_card *card,
unsigned int part_type)
{
- const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
+ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if ((part_type & mask) == mask) {
+ if ((part_type & mask) == rpmb) {
if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card);
if (ret)
@@ -892,10 +893,11 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
static int mmc_blk_part_switch_post(struct mmc_card *card,
unsigned int part_type)
{
- const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
+ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if ((part_type & mask) == mask) {
+ if ((part_type & mask) == rpmb) {
mmc_retune_unpause(card->host);
if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
ret = mmc_cmdq_enable(card);
@@ -2467,7 +2469,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
bool cache_enabled = false;
bool fua_enabled = false;
- devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
+ devidx = ida_alloc_max(&mmc_blk_ida, max_devices - 1, GFP_KERNEL);
if (devidx < 0) {
/*
* We get -ENOSPC because there are no more any available
@@ -2577,7 +2579,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
err_kfree:
kfree(md);
out:
- ida_simple_remove(&mmc_blk_ida, devidx);
+ ida_free(&mmc_blk_ida, devidx);
return ERR_PTR(ret);
}
@@ -2703,7 +2705,7 @@ static void mmc_blk_rpmb_device_release(struct device *dev)
{
struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
- ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
+ ida_free(&mmc_rpmb_ida, rpmb->id);
kfree(rpmb);
}
@@ -2719,13 +2721,13 @@ static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
struct mmc_rpmb_data *rpmb;
/* This creates the minor number for the RPMB char device */
- devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
+ devidx = ida_alloc_max(&mmc_rpmb_ida, max_devices - 1, GFP_KERNEL);
if (devidx < 0)
return devidx;
rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
if (!rpmb) {
- ida_simple_remove(&mmc_rpmb_ida, devidx);
+ ida_free(&mmc_rpmb_ida, devidx);
return -ENOMEM;
}
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 0af96548e7da..0ddaee0eae54 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -214,7 +214,7 @@ static const struct dev_pm_ops mmc_bus_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume)
};
-static struct bus_type mmc_bus_type = {
+static const struct bus_type mmc_bus_type = {
.name = "mmc",
.dev_groups = mmc_dev_groups,
.uevent = mmc_bus_uevent,
@@ -272,7 +272,7 @@ static void mmc_release_card(struct device *dev)
/*
* Allocate and initialise a new MMC card structure.
*/
-struct mmc_card *mmc_alloc_card(struct mmc_host *host, struct device_type *type)
+struct mmc_card *mmc_alloc_card(struct mmc_host *host, const struct device_type *type)
{
struct mmc_card *card;
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 3996b191b68d..cfd0d02d3420 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -23,7 +23,7 @@ static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *a
static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
struct mmc_card *mmc_alloc_card(struct mmc_host *host,
- struct device_type *type);
+ const struct device_type *type);
int mmc_add_card(struct mmc_card *card);
void mmc_remove_card(struct mmc_card *card);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index cf396e8f34e9..8f8781d6c25e 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -76,7 +76,7 @@ static void mmc_host_classdev_release(struct device *dev)
struct mmc_host *host = cls_dev_to_mmc_host(dev);
wakeup_source_unregister(host->ws);
if (of_alias_get_id(host->parent->of_node, "mmc") < 0)
- ida_simple_remove(&mmc_host_ida, host->index);
+ ida_free(&mmc_host_ida, host->index);
kfree(host);
}
@@ -88,7 +88,7 @@ static int mmc_host_classdev_shutdown(struct device *dev)
return 0;
}
-static struct class mmc_host_class = {
+static const struct class mmc_host_class = {
.name = "mmc_host",
.dev_release = mmc_host_classdev_release,
.shutdown_pre = mmc_host_classdev_shutdown,
@@ -234,10 +234,8 @@ static void mmc_of_parse_timing_phase(struct device *dev, const char *prop,
}
void
-mmc_of_parse_clk_phase(struct mmc_host *host, struct mmc_clk_phase_map *map)
+mmc_of_parse_clk_phase(struct device *dev, struct mmc_clk_phase_map *map)
{
- struct device *dev = host->parent;
-
mmc_of_parse_timing_phase(dev, "clk-phase-legacy",
&map->phase[MMC_TIMING_LEGACY]);
mmc_of_parse_timing_phase(dev, "clk-phase-mmc-hs",
@@ -538,7 +536,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
min_idx = mmc_first_nonreserved_index();
max_idx = 0;
- index = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
+ index = ida_alloc_range(&mmc_host_ida, min_idx, max_idx - 1,
+ GFP_KERNEL);
if (index < 0) {
kfree(host);
return NULL;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f410bee50132..5b2f7c285461 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -883,7 +883,7 @@ static struct attribute *mmc_std_attrs[] = {
};
ATTRIBUTE_GROUPS(mmc_std);
-static struct device_type mmc_type = {
+static const struct device_type mmc_type = {
.groups = mmc_std_groups,
};
@@ -1015,10 +1015,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
static unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_8,
EXT_CSD_BUS_WIDTH_4,
+ EXT_CSD_BUS_WIDTH_1,
};
static unsigned bus_widths[] = {
MMC_BUS_WIDTH_8,
MMC_BUS_WIDTH_4,
+ MMC_BUS_WIDTH_1,
};
struct mmc_host *host = card->host;
unsigned idx, bus_width = 0;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index a0a2412f62a7..241cdc2b2a2a 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -174,8 +174,8 @@ static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp)
return sg;
}
-static void mmc_queue_setup_discard(struct request_queue *q,
- struct mmc_card *card)
+static void mmc_queue_setup_discard(struct mmc_card *card,
+ struct queue_limits *lim)
{
unsigned max_discard;
@@ -183,15 +183,17 @@ static void mmc_queue_setup_discard(struct request_queue *q,
if (!max_discard)
return;
- blk_queue_max_discard_sectors(q, max_discard);
- q->limits.discard_granularity = card->pref_erase << 9;
- /* granularity must not be greater than max. discard */
- if (card->pref_erase > max_discard)
- q->limits.discard_granularity = SECTOR_SIZE;
+ lim->max_hw_discard_sectors = max_discard;
if (mmc_can_secure_erase_trim(card))
- blk_queue_max_secure_erase_sectors(q, max_discard);
+ lim->max_secure_erase_sectors = max_discard;
if (mmc_can_trim(card) && card->erased_byte == 0)
- blk_queue_max_write_zeroes_sectors(q, max_discard);
+ lim->max_write_zeroes_sectors = max_discard;
+
+ /* granularity must not be greater than max. discard */
+ if (card->pref_erase > max_discard)
+ lim->discard_granularity = SECTOR_SIZE;
+ else
+ lim->discard_granularity = card->pref_erase << 9;
}
static unsigned short mmc_get_max_segments(struct mmc_host *host)
@@ -341,40 +343,50 @@ static const struct blk_mq_ops mmc_mq_ops = {
.timeout = mmc_mq_timed_out,
};
-static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq,
+ struct mmc_card *card)
{
struct mmc_host *host = card->host;
- unsigned block_size = 512;
+ struct queue_limits lim = { };
+ struct gendisk *disk;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
if (mmc_can_erase(card))
- mmc_queue_setup_discard(mq->queue, card);
-
- if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
- blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
- blk_queue_max_hw_sectors(mq->queue,
- min(host->max_blk_count, host->max_req_size / 512));
- if (host->can_dma_map_merge)
- WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
- mmc_dev(host)),
- "merging was advertised but not possible");
- blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
-
- if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
- block_size = card->ext_csd.data_sector_size;
- WARN_ON(block_size != 512 && block_size != 4096);
- }
+ mmc_queue_setup_discard(card, &lim);
+
+ lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512);
+
+ if (mmc_card_mmc(card) && card->ext_csd.data_sector_size)
+ lim.logical_block_size = card->ext_csd.data_sector_size;
+ else
+ lim.logical_block_size = 512;
+
+ WARN_ON_ONCE(lim.logical_block_size != 512 &&
+ lim.logical_block_size != 4096);
- blk_queue_logical_block_size(mq->queue, block_size);
/*
- * After blk_queue_can_use_dma_map_merging() was called with succeed,
- * since it calls blk_queue_virt_boundary(), the mmc should not call
- * both blk_queue_max_segment_size().
+ * Setting a virt_boundary implicity sets a max_segment_size, so try
+ * to set the hardware one here.
*/
- if (!host->can_dma_map_merge)
- blk_queue_max_segment_size(mq->queue,
- round_down(host->max_seg_size, block_size));
+ if (host->can_dma_map_merge) {
+ lim.virt_boundary_mask = dma_get_merge_boundary(mmc_dev(host));
+ lim.max_segments = MMC_DMA_MAP_MERGE_SEGMENTS;
+ } else {
+ lim.max_segment_size =
+ round_down(host->max_seg_size, lim.logical_block_size);
+ lim.max_segments = host->max_segs;
+ }
+
+ disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq);
+ if (IS_ERR(disk))
+ return disk;
+ mq->queue = disk->queue;
+
+ if (mmc_host_is_spi(host) && host->use_spi_crc)
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
+
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
@@ -386,6 +398,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
init_waitqueue_head(&mq->wait);
mmc_crypto_setup_queue(mq->queue, host);
+ return disk;
}
static inline bool mmc_merge_capable(struct mmc_host *host)
@@ -447,18 +460,9 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
return ERR_PTR(ret);
- disk = blk_mq_alloc_disk(&mq->tag_set, mq);
- if (IS_ERR(disk)) {
+ disk = mmc_alloc_disk(mq, card);
+ if (IS_ERR(disk))
blk_mq_free_tag_set(&mq->tag_set);
- return disk;
- }
- mq->queue = disk->queue;
-
- if (mmc_host_is_spi(host) && host->use_spi_crc)
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
- blk_queue_rq_timeout(mq->queue, 60 * HZ);
-
- mmc_setup_queue(mq, card);
return disk;
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c3e554344c99..1c8148cdda50 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -805,7 +805,7 @@ static const struct attribute_group sd_std_group = {
};
__ATTRIBUTE_GROUPS(sd_std);
-struct device_type sd_type = {
+const struct device_type sd_type = {
.groups = sd_std_groups,
};
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
index 1af5a038bae9..fe6dd46927a4 100644
--- a/drivers/mmc/core/sd.h
+++ b/drivers/mmc/core/sd.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
-extern struct device_type sd_type;
+extern const struct device_type sd_type;
struct mmc_host;
struct mmc_card;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 5914516df2f7..4fb247fde5c0 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -66,7 +66,7 @@ static struct attribute *sdio_std_attrs[] = {
};
ATTRIBUTE_GROUPS(sdio_std);
-static struct device_type sdio_type = {
+static const struct device_type sdio_type = {
.groups = sdio_std_groups,
};
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 47a48e902a24..71d885fbc228 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -244,7 +244,7 @@ static const struct dev_pm_ops sdio_bus_pm_ops = {
)
};
-static struct bus_type sdio_bus_type = {
+static const struct bus_type sdio_bus_type = {
.name = "sdio",
.dev_groups = sdio_dev_groups,
.match = sdio_bus_match,
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 81f2c4e05287..aebc587f77a7 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -798,6 +798,15 @@ config MMC_DW_HI3798CV200
Synopsys DesignWare Memory Card Interface driver. Select this option
for platforms based on HiSilicon Hi3798CV200 SoC.
+config MMC_DW_HI3798MV200
+ tristate "Hi3798MV200 specific extensions for Synopsys DW Memory Card Interface"
+ depends on MMC_DW
+ select MMC_DW_PLTFM
+ help
+ This selects support for HiSilicon Hi3798MV200 SoC specific extensions to the
+ Synopsys DesignWare Memory Card Interface driver. Select this option
+ for platforms based on HiSilicon Hi3798MV200 SoC.
+
config MMC_DW_K3
tristate "K3 specific extensions for Synopsys DW Memory Card Interface"
depends on MMC_DW
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index d0be4465f3ec..f53f86d200ac 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o
obj-$(CONFIG_MMC_DW_BLUEFIELD) += dw_mmc-bluefield.o
obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o
obj-$(CONFIG_MMC_DW_HI3798CV200) += dw_mmc-hi3798cv200.o
+obj-$(CONFIG_MMC_DW_HI3798MV200) += dw_mmc-hi3798mv200.o
obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o
obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
obj-$(CONFIG_MMC_DW_ROCKCHIP) += dw_mmc-rockchip.o
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index ee3b1a4e0848..8bd938919687 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -180,12 +180,6 @@ struct mmc_davinci_host {
#define DAVINCI_MMC_DATADIR_WRITE 2
unsigned char data_dir;
- /* buffer is used during PIO of one scatterlist segment, and
- * is updated along with buffer_bytes_left. bytes_left applies
- * to all N blocks of the PIO transfer.
- */
- u8 *buffer;
- u32 buffer_bytes_left;
u32 bytes_left;
struct dma_chan *dma_tx;
@@ -196,8 +190,8 @@ struct mmc_davinci_host {
bool active_request;
/* For PIO we walk scatterlists one segment at a time. */
+ struct sg_mapping_iter sg_miter;
unsigned int sg_len;
- struct scatterlist *sg;
/* Version of the MMC/SD controller */
u8 version;
@@ -213,30 +207,22 @@ struct mmc_davinci_host {
static irqreturn_t mmc_davinci_irq(int irq, void *dev_id);
/* PIO only */
-static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
-{
- host->buffer_bytes_left = sg_dma_len(host->sg);
- host->buffer = sg_virt(host->sg);
- if (host->buffer_bytes_left > host->bytes_left)
- host->buffer_bytes_left = host->bytes_left;
-}
-
static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
unsigned int n)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
u8 *p;
unsigned int i;
- if (host->buffer_bytes_left == 0) {
- host->sg = sg_next(host->data->sg);
- mmc_davinci_sg_to_buf(host);
+ /*
+ * By adjusting sgm->consumed this will give a pointer to the
+ * current index into the sgm.
+ */
+ if (!sg_miter_next(sgm)) {
+ dev_err(mmc_dev(host->mmc), "ran out of sglist prematurely\n");
+ return;
}
-
- p = host->buffer;
- if (n > host->buffer_bytes_left)
- n = host->buffer_bytes_left;
- host->buffer_bytes_left -= n;
- host->bytes_left -= n;
+ p = sgm->addr;
/* NOTE: we never transfer more than rw_threshold bytes
* to/from the fifo here; there's no I/O overlap.
@@ -261,7 +247,9 @@ static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
p = p + (n & 3);
}
}
- host->buffer = p;
+
+ sgm->consumed = n;
+ host->bytes_left -= n;
}
static void mmc_davinci_start_command(struct mmc_davinci_host *host,
@@ -517,6 +505,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
int timeout;
struct mmc_data *data = req->data;
+ unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */
if (host->version == MMC_CTLR_VERSION_2)
fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
@@ -545,12 +534,14 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
/* Configure the FIFO */
if (data->flags & MMC_DATA_WRITE) {
+ flags |= SG_MITER_FROM_SG;
host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
host->base + DAVINCI_MMCFIFOCTL);
} else {
+ flags |= SG_MITER_TO_SG;
host->data_dir = DAVINCI_MMC_DATADIR_READ;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
@@ -558,7 +549,6 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
host->base + DAVINCI_MMCFIFOCTL);
}
- host->buffer = NULL;
host->bytes_left = data->blocks * data->blksz;
/* For now we try to use DMA whenever we won't need partial FIFO
@@ -576,8 +566,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
} else {
/* Revert to CPU Copy */
host->sg_len = data->sg_len;
- host->sg = host->data->sg;
- mmc_davinci_sg_to_buf(host);
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
}
@@ -843,6 +832,8 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
{
mmc_davinci_reset_ctrl(host, 1);
mmc_davinci_reset_ctrl(host, 0);
+ if (!host->do_dma)
+ sg_miter_stop(&host->sg_miter);
}
static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
@@ -919,11 +910,13 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
if (qstatus & MMCST0_DATDNE) {
/* All blocks sent/received, and CRC checks passed */
if (data != NULL) {
- if ((host->do_dma == 0) && (host->bytes_left > 0)) {
- /* if datasize < rw_threshold
- * no RX ints are generated
- */
- davinci_fifo_data_trans(host, host->bytes_left);
+ if (!host->do_dma) {
+ if (host->bytes_left > 0)
+ /* if datasize < rw_threshold
+ * no RX ints are generated
+ */
+ davinci_fifo_data_trans(host, host->bytes_left);
+ sg_miter_stop(&host->sg_miter);
}
end_transfer = 1;
data->bytes_xfered = data->blocks * data->blksz;
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 698408e8bad0..6dc057718d2c 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -11,7 +11,6 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c
index e9470c50a348..61923a518369 100644
--- a/drivers/mmc/host/dw_mmc-hi3798cv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c
@@ -201,4 +201,3 @@ module_platform_driver(dw_mci_hi3798cv200_driver);
MODULE_DESCRIPTION("HiSilicon Hi3798CV200 Specific DW-MSHC Driver Extension");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:dwmmc_hi3798cv200");
diff --git a/drivers/mmc/host/dw_mmc-hi3798mv200.c b/drivers/mmc/host/dw_mmc-hi3798mv200.c
new file mode 100644
index 000000000000..989ae8dda722
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-hi3798mv200.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Modified from dw_mmc-hi3798cv200.c
+ *
+ * Copyright (c) 2024 Yang Xiwen <forbidden405@outlook.com>
+ * Copyright (c) 2018 HiSilicon Technologies Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+
+#define SDMMC_TUNING_CTRL 0x118
+#define SDMMC_TUNING_FIND_EDGE BIT(5)
+
+#define ALL_INT_CLR 0x1ffff
+
+/* DLL ctrl reg */
+#define SAP_DLL_CTRL_DLLMODE BIT(16)
+
+struct dw_mci_hi3798mv200_priv {
+ struct clk *sample_clk;
+ struct clk *drive_clk;
+ struct regmap *crg_reg;
+ u32 sap_dll_offset;
+ struct mmc_clk_phase_map phase_map;
+};
+
+static void dw_mci_hi3798mv200_set_ios(struct dw_mci *host, struct mmc_ios *ios)
+{
+ struct dw_mci_hi3798mv200_priv *priv = host->priv;
+ struct mmc_clk_phase phase = priv->phase_map.phase[ios->timing];
+ u32 val;
+
+ val = mci_readl(host, ENABLE_SHIFT);
+ if (ios->timing == MMC_TIMING_MMC_DDR52
+ || ios->timing == MMC_TIMING_UHS_DDR50)
+ val |= SDMMC_ENABLE_PHASE;
+ else
+ val &= ~SDMMC_ENABLE_PHASE;
+ mci_writel(host, ENABLE_SHIFT, val);
+
+ val = mci_readl(host, DDR_REG);
+ if (ios->timing == MMC_TIMING_MMC_HS400)
+ val |= SDMMC_DDR_HS400;
+ else
+ val &= ~SDMMC_DDR_HS400;
+ mci_writel(host, DDR_REG, val);
+
+ if (clk_set_rate(host->ciu_clk, ios->clock))
+ dev_warn(host->dev, "Failed to set rate to %u\n", ios->clock);
+ else
+ /*
+ * CLK_MUX_ROUND_NEAREST is enabled for this clock
+ * The actual clock rate is not what we set, but a rounded value
+ * so we should get the rate once again
+ */
+ host->bus_hz = clk_get_rate(host->ciu_clk);
+
+ if (phase.valid) {
+ clk_set_phase(priv->drive_clk, phase.out_deg);
+ clk_set_phase(priv->sample_clk, phase.in_deg);
+ } else {
+ dev_warn(host->dev,
+ "The phase entry for timing mode %d is missing in device tree.\n",
+ ios->timing);
+ }
+}
+
+static inline int dw_mci_hi3798mv200_enable_tuning(struct dw_mci_slot *slot)
+{
+ struct dw_mci_hi3798mv200_priv *priv = slot->host->priv;
+
+ return regmap_clear_bits(priv->crg_reg, priv->sap_dll_offset, SAP_DLL_CTRL_DLLMODE);
+}
+
+static inline int dw_mci_hi3798mv200_disable_tuning(struct dw_mci_slot *slot)
+{
+ struct dw_mci_hi3798mv200_priv *priv = slot->host->priv;
+
+ return regmap_set_bits(priv->crg_reg, priv->sap_dll_offset, SAP_DLL_CTRL_DLLMODE);
+}
+
+static int dw_mci_hi3798mv200_execute_tuning_mix_mode(struct dw_mci_slot *slot,
+ u32 opcode)
+{
+ static const int degrees[] = { 0, 45, 90, 135, 180, 225, 270, 315 };
+ struct dw_mci *host = slot->host;
+ struct dw_mci_hi3798mv200_priv *priv = host->priv;
+ int raise_point = -1, fall_point = -1, mid;
+ int err, prev_err = -1;
+ int found = 0;
+ int regval;
+ int i;
+ int ret;
+
+ ret = dw_mci_hi3798mv200_enable_tuning(slot);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(degrees); i++) {
+ clk_set_phase(priv->sample_clk, degrees[i]);
+ mci_writel(host, RINTSTS, ALL_INT_CLR);
+
+ /*
+ * HiSilicon implemented a tuning mechanism.
+ * It needs special interaction with the DLL.
+ *
+ * Treat edge(flip) found as an error too.
+ */
+ err = mmc_send_tuning(slot->mmc, opcode, NULL);
+ regval = mci_readl(host, TUNING_CTRL);
+ if (err || (regval & SDMMC_TUNING_FIND_EDGE))
+ err = 1;
+ else
+ found = 1;
+
+ if (i > 0) {
+ if (err && !prev_err)
+ fall_point = i - 1;
+ if (!err && prev_err)
+ raise_point = i;
+ }
+
+ if (raise_point != -1 && fall_point != -1)
+ goto tuning_out;
+
+ prev_err = err;
+ err = 0;
+ }
+
+tuning_out:
+ ret = dw_mci_hi3798mv200_disable_tuning(slot);
+ if (ret < 0)
+ return ret;
+
+ if (found) {
+ if (raise_point == -1)
+ raise_point = 0;
+ if (fall_point == -1)
+ fall_point = ARRAY_SIZE(degrees) - 1;
+ if (fall_point < raise_point) {
+ if ((raise_point + fall_point) >
+ (ARRAY_SIZE(degrees) - 1))
+ mid = fall_point / 2;
+ else
+ mid = (raise_point + ARRAY_SIZE(degrees) - 1) / 2;
+ } else {
+ mid = (raise_point + fall_point) / 2;
+ }
+
+ /*
+ * We don't care what timing we are tuning for,
+ * simply use the same phase for all timing needs tuning.
+ */
+ priv->phase_map.phase[MMC_TIMING_MMC_HS200].in_deg = degrees[mid];
+ priv->phase_map.phase[MMC_TIMING_MMC_HS400].in_deg = degrees[mid];
+ priv->phase_map.phase[MMC_TIMING_UHS_SDR104].in_deg = degrees[mid];
+
+ clk_set_phase(priv->sample_clk, degrees[mid]);
+ dev_dbg(host->dev, "Tuning clk_sample[%d, %d], set[%d]\n",
+ raise_point, fall_point, degrees[mid]);
+ ret = 0;
+ } else {
+ dev_err(host->dev, "No valid clk_sample shift!\n");
+ ret = -EINVAL;
+ }
+
+ mci_writel(host, RINTSTS, ALL_INT_CLR);
+
+ return ret;
+}
+
+static int dw_mci_hi3798mv200_init(struct dw_mci *host)
+{
+ struct dw_mci_hi3798mv200_priv *priv;
+ struct device_node *np = host->dev->of_node;
+ int ret;
+
+ priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mmc_of_parse_clk_phase(host->dev, &priv->phase_map);
+
+ priv->sample_clk = devm_clk_get_enabled(host->dev, "ciu-sample");
+ if (IS_ERR(priv->sample_clk))
+ return dev_err_probe(host->dev, PTR_ERR(priv->sample_clk),
+ "failed to get enabled ciu-sample clock\n");
+
+ priv->drive_clk = devm_clk_get_enabled(host->dev, "ciu-drive");
+ if (IS_ERR(priv->drive_clk))
+ return dev_err_probe(host->dev, PTR_ERR(priv->drive_clk),
+ "failed to get enabled ciu-drive clock\n");
+
+ priv->crg_reg = syscon_regmap_lookup_by_phandle(np, "hisilicon,sap-dll-reg");
+ if (IS_ERR(priv->crg_reg))
+ return dev_err_probe(host->dev, PTR_ERR(priv->crg_reg),
+ "failed to get CRG reg\n");
+
+ ret = of_property_read_u32_index(np, "hisilicon,sap-dll-reg", 1, &priv->sap_dll_offset);
+ if (ret)
+ return dev_err_probe(host->dev, ret, "failed to get sample DLL register offset\n");
+
+ host->priv = priv;
+ return 0;
+}
+
+static const struct dw_mci_drv_data hi3798mv200_data = {
+ .common_caps = MMC_CAP_CMD23,
+ .init = dw_mci_hi3798mv200_init,
+ .set_ios = dw_mci_hi3798mv200_set_ios,
+ .execute_tuning = dw_mci_hi3798mv200_execute_tuning_mix_mode,
+};
+
+static const struct of_device_id dw_mci_hi3798mv200_match[] = {
+ { .compatible = "hisilicon,hi3798mv200-dw-mshc" },
+ {},
+};
+
+static int dw_mci_hi3798mv200_probe(struct platform_device *pdev)
+{
+ return dw_mci_pltfm_register(pdev, &hi3798mv200_data);
+}
+
+static void dw_mci_hi3798mv200_remove(struct platform_device *pdev)
+{
+ dw_mci_pltfm_remove(pdev);
+}
+
+MODULE_DEVICE_TABLE(of, dw_mci_hi3798mv200_match);
+static struct platform_driver dw_mci_hi3798mv200_driver = {
+ .probe = dw_mci_hi3798mv200_probe,
+ .remove_new = dw_mci_hi3798mv200_remove,
+ .driver = {
+ .name = "dwmmc_hi3798mv200",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = dw_mci_hi3798mv200_match,
+ },
+};
+module_platform_driver(dw_mci_hi3798mv200_driver);
+
+MODULE_DESCRIPTION("HiSilicon Hi3798MV200 Specific DW-MSHC Driver Extension");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 829af2c98a44..8e2d676b9239 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -35,7 +35,6 @@
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/mmc/slot-gpio.h>
#include "dw_mmc.h"
diff --git a/drivers/mmc/host/meson-mx-sdhc-clkc.c b/drivers/mmc/host/meson-mx-sdhc-clkc.c
index 19200b7079a6..cbd17a596cd2 100644
--- a/drivers/mmc/host/meson-mx-sdhc-clkc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-clkc.c
@@ -71,12 +71,23 @@ static int meson_mx_sdhc_clk_hw_register(struct device *dev,
static int meson_mx_sdhc_gate_clk_hw_register(struct device *dev,
const char *name_suffix,
struct clk_hw *parent,
- struct clk_hw *hw)
+ struct clk_hw *hw,
+ struct clk_bulk_data *clk_bulk_data,
+ u8 bulk_index)
{
struct clk_parent_data parent_data = { .hw = parent };
+ int ret;
+
+ ret = meson_mx_sdhc_clk_hw_register(dev, name_suffix, &parent_data, 1,
+ &clk_gate_ops, hw);
+ if (ret)
+ return ret;
+
+ clk_bulk_data[bulk_index].clk = devm_clk_hw_get_clk(dev, hw, name_suffix);
+ if (IS_ERR(clk_bulk_data[bulk_index].clk))
+ return PTR_ERR(clk_bulk_data[bulk_index].clk);
- return meson_mx_sdhc_clk_hw_register(dev, name_suffix, &parent_data, 1,
- &clk_gate_ops, hw);
+ return 0;
}
int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
@@ -115,7 +126,8 @@ int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
clkc_data->mod_clk_en.bit_idx = 15;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "mod_clk_on",
&clkc_data->div.hw,
- &clkc_data->mod_clk_en.hw);
+ &clkc_data->mod_clk_en.hw,
+ clk_bulk_data, 0);
if (ret)
return ret;
@@ -123,7 +135,8 @@ int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
clkc_data->tx_clk_en.bit_idx = 14;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "tx_clk_on",
&clkc_data->div.hw,
- &clkc_data->tx_clk_en.hw);
+ &clkc_data->tx_clk_en.hw,
+ clk_bulk_data, 1);
if (ret)
return ret;
@@ -131,7 +144,8 @@ int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
clkc_data->rx_clk_en.bit_idx = 13;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "rx_clk_on",
&clkc_data->div.hw,
- &clkc_data->rx_clk_en.hw);
+ &clkc_data->rx_clk_en.hw,
+ clk_bulk_data, 2);
if (ret)
return ret;
@@ -139,18 +153,7 @@ int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
clkc_data->sd_clk_en.bit_idx = 12;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "sd_clk_on",
&clkc_data->div.hw,
- &clkc_data->sd_clk_en.hw);
- if (ret)
- return ret;
-
- /*
- * TODO: Replace clk_hw.clk with devm_clk_hw_get_clk() once that is
- * available.
- */
- clk_bulk_data[0].clk = clkc_data->mod_clk_en.hw.clk;
- clk_bulk_data[1].clk = clkc_data->sd_clk_en.hw.clk;
- clk_bulk_data[2].clk = clkc_data->tx_clk_en.hw.clk;
- clk_bulk_data[3].clk = clkc_data->rx_clk_en.hw.clk;
-
- return 0;
+ &clkc_data->sd_clk_en.hw,
+ clk_bulk_data, 3);
+ return ret;
}
diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
index 1ed9731e77ef..31f750301dc1 100644
--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
@@ -65,10 +65,8 @@ static const struct regmap_config meson_mx_sdhc_regmap_config = {
.max_register = MESON_SDHC_CLK2,
};
-static void meson_mx_sdhc_hw_reset(struct mmc_host *mmc)
+static void meson_mx_sdhc_reset(struct meson_mx_sdhc_host *host)
{
- struct meson_mx_sdhc_host *host = mmc_priv(mmc);
-
regmap_write(host->regmap, MESON_SDHC_SRST, MESON_SDHC_SRST_MAIN_CTRL |
MESON_SDHC_SRST_RXFIFO | MESON_SDHC_SRST_TXFIFO |
MESON_SDHC_SRST_DPHY_RX | MESON_SDHC_SRST_DPHY_TX |
@@ -116,7 +114,7 @@ static void meson_mx_sdhc_wait_cmd_ready(struct mmc_host *mmc)
dev_warn(mmc_dev(mmc),
"Failed to poll for CMD_BUSY while processing CMD%d\n",
host->cmd->opcode);
- meson_mx_sdhc_hw_reset(mmc);
+ meson_mx_sdhc_reset(host);
}
ret = regmap_read_poll_timeout(host->regmap, MESON_SDHC_ESTA, esta,
@@ -127,7 +125,7 @@ static void meson_mx_sdhc_wait_cmd_ready(struct mmc_host *mmc)
dev_warn(mmc_dev(mmc),
"Failed to poll for ESTA[13:11] while processing CMD%d\n",
host->cmd->opcode);
- meson_mx_sdhc_hw_reset(mmc);
+ meson_mx_sdhc_reset(host);
}
}
@@ -495,7 +493,6 @@ static int meson_mx_sdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
}
static const struct mmc_host_ops meson_mx_sdhc_ops = {
- .card_hw_reset = meson_mx_sdhc_hw_reset,
.request = meson_mx_sdhc_request,
.set_ios = meson_mx_sdhc_set_ios,
.card_busy = meson_mx_sdhc_card_busy,
@@ -618,7 +615,7 @@ static irqreturn_t meson_mx_sdhc_irq_thread(int irq, void *irq_data)
}
if (cmd->error == -EIO || cmd->error == -ETIMEDOUT)
- meson_mx_sdhc_hw_reset(host->mmc);
+ meson_mx_sdhc_reset(host);
else if (cmd->data)
/*
* Clear the FIFOs after completing data transfers to prevent
@@ -728,7 +725,7 @@ static void meson_mx_sdhc_init_hw(struct mmc_host *mmc)
{
struct meson_mx_sdhc_host *host = mmc_priv(mmc);
- meson_mx_sdhc_hw_reset(mmc);
+ meson_mx_sdhc_reset(host);
regmap_write(host->regmap, MESON_SDHC_CTRL,
FIELD_PREP(MESON_SDHC_CTRL_RX_PERIOD, 0xf) |
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index b8dda8160c4e..09d7a6a0dc1a 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bio.h>
-#include <linux/dma-direction.h>
#include <linux/crc7.h>
#include <linux/crc-itu-t.h>
#include <linux/scatterlist.h>
@@ -510,10 +509,7 @@ mmc_spi_command_send(struct mmc_spi_host *host,
* so we explicitly initialize it to all ones on RX paths.
*/
static void
-mmc_spi_setup_data_message(
- struct mmc_spi_host *host,
- bool multiple,
- enum dma_data_direction direction)
+mmc_spi_setup_data_message(struct mmc_spi_host *host, bool multiple, bool write)
{
struct spi_transfer *t;
struct scratch *scratch = host->data;
@@ -523,7 +519,7 @@ mmc_spi_setup_data_message(
/* for reads, readblock() skips 0xff bytes before finding
* the token; for writes, this transfer issues that token.
*/
- if (direction == DMA_TO_DEVICE) {
+ if (write) {
t = &host->token;
memset(t, 0, sizeof(*t));
t->len = 1;
@@ -547,7 +543,7 @@ mmc_spi_setup_data_message(
t = &host->crc;
memset(t, 0, sizeof(*t));
t->len = 2;
- if (direction == DMA_TO_DEVICE) {
+ if (write) {
/* the actual CRC may get written later */
t->tx_buf = &scratch->crc_val;
} else {
@@ -570,10 +566,10 @@ mmc_spi_setup_data_message(
* the next token (next data block, or STOP_TRAN). We can try to
* minimize I/O ops by using a single read to collect end-of-busy.
*/
- if (multiple || direction == DMA_TO_DEVICE) {
+ if (multiple || write) {
t = &host->early_status;
memset(t, 0, sizeof(*t));
- t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
+ t->len = write ? sizeof(scratch->status) : 1;
t->tx_buf = host->ones;
t->rx_buf = scratch->status;
t->cs_change = 1;
@@ -777,15 +773,15 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
{
struct spi_device *spi = host->spi;
struct spi_transfer *t;
- enum dma_data_direction direction = mmc_get_dma_dir(data);
struct scatterlist *sg;
unsigned n_sg;
bool multiple = (data->blocks > 1);
- const char *write_or_read = (direction == DMA_TO_DEVICE) ? "write" : "read";
+ bool write = (data->flags & MMC_DATA_WRITE);
+ const char *write_or_read = write ? "write" : "read";
u32 clock_rate;
unsigned long timeout;
- mmc_spi_setup_data_message(host, multiple, direction);
+ mmc_spi_setup_data_message(host, multiple, write);
t = &host->t;
if (t->speed_hz)
@@ -807,7 +803,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
/* allow pio too; we don't allow highmem */
kmap_addr = kmap(sg_page(sg));
- if (direction == DMA_TO_DEVICE)
+ if (write)
t->tx_buf = kmap_addr + sg->offset;
else
t->rx_buf = kmap_addr + sg->offset;
@@ -818,7 +814,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
dev_dbg(&spi->dev, " %s block, %d bytes\n", write_or_read, t->len);
- if (direction == DMA_TO_DEVICE)
+ if (write)
status = mmc_spi_writeblock(host, t, timeout);
else
status = mmc_spi_readblock(host, t, timeout);
@@ -833,7 +829,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
}
/* discard mappings */
- if (direction == DMA_FROM_DEVICE)
+ if (write)
+ /* nothing to do */;
+ else
flush_dcache_page(sg_page(sg));
kunmap(sg_page(sg));
@@ -850,7 +848,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
* that can affect the STOP_TRAN logic. Complete (and current)
* MMC specs should sort that out before Linux starts using CMD23.
*/
- if (direction == DMA_TO_DEVICE && multiple) {
+ if (write && multiple) {
struct scratch *scratch = host->data;
int tmp;
const unsigned statlen = sizeof(scratch->status);
@@ -935,7 +933,7 @@ static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
#endif
/* request exclusive bus access */
- spi_bus_lock(host->spi->master);
+ spi_bus_lock(host->spi->controller);
crc_recover:
/* issue command; then optionally data and stop */
@@ -967,7 +965,7 @@ crc_recover:
}
/* release the bus */
- spi_bus_unlock(host->spi->master);
+ spi_bus_unlock(host->spi->controller);
mmc_request_done(host->mmc, mrq);
}
@@ -1157,7 +1155,7 @@ static int mmc_spi_probe(struct spi_device *spi)
/* We rely on full duplex transfers, mostly to reduce
* per-transfer overheads (by making fewer transfers).
*/
- if (spi->master->flags & SPI_CONTROLLER_HALF_DUPLEX)
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)
return -EINVAL;
/* MMC and SD specs only seem to care that sampling is on the
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 35067e1e6cd8..f5da7f9baa52 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -225,6 +225,8 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
struct scatterlist *sg;
int i;
+ host->dma_in_progress = true;
+
if (!host->variant->dma_lli || data->sg_len == 1 ||
idma->use_bounce_buffer) {
u32 dma_addr;
@@ -263,9 +265,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
return 0;
}
+static void sdmmc_idma_error(struct mmci_host *host)
+{
+ struct mmc_data *data = host->data;
+ struct sdmmc_idma *idma = host->dma_priv;
+
+ if (!dma_inprogress(host))
+ return;
+
+ writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+ host->dma_in_progress = false;
+ data->host_cookie = 0;
+
+ if (!idma->use_bounce_buffer)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+}
+
static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
{
+ if (!dma_inprogress(host))
+ return;
+
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+ host->dma_in_progress = false;
if (!data->host_cookie)
sdmmc_idma_unprep_data(host, data, 0);
@@ -676,6 +699,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.dma_setup = sdmmc_idma_setup,
.dma_start = sdmmc_idma_start,
.dma_finalize = sdmmc_idma_finalize,
+ .dma_error = sdmmc_idma_error,
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
.busy_complete = sdmmc_busy_complete,
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 5cfdd3a86e54..b88d6dec209f 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -131,12 +131,10 @@ struct moxart_host {
struct dma_async_tx_descriptor *tx_desc;
struct mmc_host *mmc;
struct mmc_request *mrq;
- struct scatterlist *cur_sg;
struct completion dma_complete;
struct completion pio_complete;
- u32 num_sg;
- u32 data_remain;
+ struct sg_mapping_iter sg_miter;
u32 data_len;
u32 fifo_width;
u32 timeout;
@@ -148,35 +146,6 @@ struct moxart_host {
bool is_removed;
};
-static inline void moxart_init_sg(struct moxart_host *host,
- struct mmc_data *data)
-{
- host->cur_sg = data->sg;
- host->num_sg = data->sg_len;
- host->data_remain = host->cur_sg->length;
-
- if (host->data_remain > host->data_len)
- host->data_remain = host->data_len;
-}
-
-static inline int moxart_next_sg(struct moxart_host *host)
-{
- int remain;
- struct mmc_data *data = host->mrq->cmd->data;
-
- host->cur_sg++;
- host->num_sg--;
-
- if (host->num_sg > 0) {
- host->data_remain = host->cur_sg->length;
- remain = host->data_len - data->bytes_xfered;
- if (remain > 0 && remain < host->data_remain)
- host->data_remain = remain;
- }
-
- return host->num_sg;
-}
-
static int moxart_wait_for_status(struct moxart_host *host,
u32 mask, u32 *status)
{
@@ -254,6 +223,11 @@ static void moxart_dma_complete(void *param)
complete(&host->dma_complete);
}
+static bool moxart_use_dma(struct moxart_host *host)
+{
+ return (host->data_len > host->fifo_width) && host->have_dma;
+}
+
static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
{
u32 len, dir_slave;
@@ -291,11 +265,11 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
dma_async_issue_pending(dma_chan);
}
- data->bytes_xfered += host->data_remain;
-
wait_for_completion_interruptible_timeout(&host->dma_complete,
host->timeout);
+ data->bytes_xfered = host->data_len;
+
dma_unmap_sg(dma_chan->device->dev,
data->sg, data->sg_len,
mmc_get_dma_dir(data));
@@ -304,14 +278,28 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
static void moxart_transfer_pio(struct moxart_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct mmc_data *data = host->mrq->cmd->data;
u32 *sgp, len = 0, remain, status;
if (host->data_len == data->bytes_xfered)
return;
- sgp = sg_virt(host->cur_sg);
- remain = host->data_remain;
+ /*
+ * By updating sgm->consumes this will get a proper pointer into the
+ * buffer at any time.
+ */
+ if (!sg_miter_next(sgm)) {
+ /* This shold not happen */
+ dev_err(mmc_dev(host->mmc), "ran out of scatterlist prematurely\n");
+ data->error = -EINVAL;
+ complete(&host->pio_complete);
+ return;
+ }
+ sgp = sgm->addr;
+ remain = sgm->length;
+ if (remain > host->data_len)
+ remain = host->data_len;
if (data->flags & MMC_DATA_WRITE) {
while (remain > 0) {
@@ -326,6 +314,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
sgp++;
len += 4;
}
+ sgm->consumed += len;
remain -= len;
}
@@ -342,22 +331,22 @@ static void moxart_transfer_pio(struct moxart_host *host)
sgp++;
len += 4;
}
+ sgm->consumed += len;
remain -= len;
}
}
- data->bytes_xfered += host->data_remain - remain;
- host->data_remain = remain;
-
- if (host->data_len != data->bytes_xfered)
- moxart_next_sg(host);
- else
+ data->bytes_xfered += sgm->consumed;
+ if (host->data_len == data->bytes_xfered) {
complete(&host->pio_complete);
+ return;
+ }
}
static void moxart_prepare_data(struct moxart_host *host)
{
struct mmc_data *data = host->mrq->cmd->data;
+ unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */
u32 datactrl;
int blksz_bits;
@@ -368,15 +357,19 @@ static void moxart_prepare_data(struct moxart_host *host)
blksz_bits = ffs(data->blksz) - 1;
BUG_ON(1 << blksz_bits != data->blksz);
- moxart_init_sg(host, data);
-
datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE);
- if (data->flags & MMC_DATA_WRITE)
+ if (data->flags & MMC_DATA_WRITE) {
+ flags |= SG_MITER_FROM_SG;
datactrl |= DCR_DATA_WRITE;
+ } else {
+ flags |= SG_MITER_TO_SG;
+ }
- if ((host->data_len > host->fifo_width) && host->have_dma)
+ if (moxart_use_dma(host))
datactrl |= DCR_DMA_EN;
+ else
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL);
writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR);
@@ -407,7 +400,7 @@ static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
moxart_send_command(host, host->mrq->cmd);
if (mrq->cmd->data) {
- if ((host->data_len > host->fifo_width) && host->have_dma) {
+ if (moxart_use_dma(host)) {
writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
@@ -449,6 +442,9 @@ static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
request_done:
+ if (!moxart_use_dma(host))
+ sg_miter_stop(&host->sg_miter);
+
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
}
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index ca01b7d204ba..af7f21888e27 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -38,8 +38,9 @@ struct mvsd_host {
unsigned int xfer_mode;
unsigned int intr_en;
unsigned int ctrl;
+ bool use_pio;
+ struct sg_mapping_iter sg_miter;
unsigned int pio_size;
- void *pio_ptr;
unsigned int sg_frags;
unsigned int ns_per_clk;
unsigned int clock;
@@ -114,11 +115,18 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
* data when the buffer is not aligned on a 64 byte
* boundary.
*/
+ unsigned int miter_flags = SG_MITER_ATOMIC; /* Used from IRQ */
+
+ if (data->flags & MMC_DATA_READ)
+ miter_flags |= SG_MITER_TO_SG;
+ else
+ miter_flags |= SG_MITER_FROM_SG;
+
host->pio_size = data->blocks * data->blksz;
- host->pio_ptr = sg_virt(data->sg);
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, miter_flags);
if (!nodma)
- dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n",
- host->pio_ptr, host->pio_size);
+ dev_dbg(host->dev, "fallback to PIO for data\n");
+ host->use_pio = true;
return 1;
} else {
dma_addr_t phys_addr;
@@ -129,6 +137,7 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
phys_addr = sg_dma_address(data->sg);
mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff);
mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16);
+ host->use_pio = false;
return 0;
}
}
@@ -288,8 +297,8 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
{
void __iomem *iobase = host->base;
- if (host->pio_ptr) {
- host->pio_ptr = NULL;
+ if (host->use_pio) {
+ sg_miter_stop(&host->sg_miter);
host->pio_size = 0;
} else {
dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
@@ -344,9 +353,12 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
static irqreturn_t mvsd_irq(int irq, void *dev)
{
struct mvsd_host *host = dev;
+ struct sg_mapping_iter *sgm = &host->sg_miter;
void __iomem *iobase = host->base;
u32 intr_status, intr_done_mask;
int irq_handled = 0;
+ u16 *p;
+ int s;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n",
@@ -370,15 +382,36 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
spin_lock(&host->lock);
/* PIO handling, if needed. Messy business... */
- if (host->pio_size &&
+ if (host->use_pio) {
+ /*
+ * As we set sgm->consumed this always gives a valid buffer
+ * position.
+ */
+ if (!sg_miter_next(sgm)) {
+ /* This should not happen */
+ dev_err(host->dev, "ran out of scatter segments\n");
+ spin_unlock(&host->lock);
+ host->intr_en &=
+ ~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W |
+ MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W);
+ mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
+ return IRQ_HANDLED;
+ }
+ p = sgm->addr;
+ s = sgm->length;
+ if (s > host->pio_size)
+ s = host->pio_size;
+ }
+
+ if (host->use_pio &&
(intr_status & host->intr_en &
(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) {
- u16 *p = host->pio_ptr;
- int s = host->pio_size;
+
while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) {
readsw(iobase + MVSD_FIFO, p, 16);
p += 16;
s -= 32;
+ sgm->consumed += 32;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
/*
@@ -391,6 +424,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
put_unaligned(mvsd_read(MVSD_FIFO), p++);
put_unaligned(mvsd_read(MVSD_FIFO), p++);
s -= 4;
+ sgm->consumed += 4;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
if (s && s < 4 && (intr_status & MVSD_NOR_RX_READY)) {
@@ -398,10 +432,13 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
val[0] = mvsd_read(MVSD_FIFO);
val[1] = mvsd_read(MVSD_FIFO);
memcpy(p, ((void *)&val) + 4 - s, s);
+ sgm->consumed += s;
s = 0;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
- if (s == 0) {
+ /* PIO transfer done */
+ host->pio_size -= sgm->consumed;
+ if (host->pio_size == 0) {
host->intr_en &=
~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W);
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
@@ -413,14 +450,10 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
}
dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
s, intr_status, mvsd_read(MVSD_HW_STATE));
- host->pio_ptr = p;
- host->pio_size = s;
irq_handled = 1;
- } else if (host->pio_size &&
+ } else if (host->use_pio &&
(intr_status & host->intr_en &
(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) {
- u16 *p = host->pio_ptr;
- int s = host->pio_size;
/*
* The TX_FIFO_8W bit is unreliable. When set, bursting
* 16 halfwords all at once in the FIFO drops data. Actually
@@ -431,6 +464,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
mvsd_write(MVSD_FIFO, get_unaligned(p++));
mvsd_write(MVSD_FIFO, get_unaligned(p++));
s -= 4;
+ sgm->consumed += 4;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
if (s < 4) {
@@ -439,10 +473,13 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
memcpy(((void *)&val) + 4 - s, p, s);
mvsd_write(MVSD_FIFO, val[0]);
mvsd_write(MVSD_FIFO, val[1]);
+ sgm->consumed += s;
s = 0;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
- if (s == 0) {
+ /* PIO transfer done */
+ host->pio_size -= sgm->consumed;
+ if (host->pio_size == 0) {
host->intr_en &=
~(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W);
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
@@ -450,8 +487,6 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
}
dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
s, intr_status, mvsd_read(MVSD_HW_STATE));
- host->pio_ptr = p;
- host->pio_size = s;
irq_handled = 1;
}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 5b3ab0e20505..1edf65291354 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -266,11 +266,18 @@ static inline void buffer_swap32(u32 *buf, int len)
static void mxcmci_swap_buffers(struct mmc_data *data)
{
- struct scatterlist *sg;
- int i;
+ struct sg_mapping_iter sgm;
+ u32 *buf;
+
+ sg_miter_start(&sgm, data->sg, data->sg_len,
+ SG_MITER_TO_SG | SG_MITER_FROM_SG);
+
+ while (sg_miter_next(&sgm)) {
+ buf = sgm.addr;
+ buffer_swap32(buf, sgm.length);
+ }
- for_each_sg(data->sg, sg, data->sg_len, i)
- buffer_swap32(sg_virt(sg), sg->length);
+ sg_miter_stop(&sgm);
}
#else
static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
@@ -526,10 +533,9 @@ static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
} while (1);
}
-static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
+static int mxcmci_pull(struct mxcmci_host *host, u32 *buf, int bytes)
{
unsigned int stat;
- u32 *buf = _buf;
while (bytes > 3) {
stat = mxcmci_poll_status(host,
@@ -555,10 +561,9 @@ static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
return 0;
}
-static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
+static int mxcmci_push(struct mxcmci_host *host, u32 *buf, int bytes)
{
unsigned int stat;
- u32 *buf = _buf;
while (bytes > 3) {
stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
@@ -586,31 +591,39 @@ static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
static int mxcmci_transfer_data(struct mxcmci_host *host)
{
struct mmc_data *data = host->req->data;
- struct scatterlist *sg;
- int stat, i;
+ struct sg_mapping_iter sgm;
+ int stat;
+ u32 *buf;
host->data = data;
host->datasize = 0;
+ sg_miter_start(&sgm, data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
if (data->flags & MMC_DATA_READ) {
- for_each_sg(data->sg, sg, data->sg_len, i) {
- stat = mxcmci_pull(host, sg_virt(sg), sg->length);
+ while (sg_miter_next(&sgm)) {
+ buf = sgm.addr;
+ stat = mxcmci_pull(host, buf, sgm.length);
if (stat)
- return stat;
- host->datasize += sg->length;
+ goto transfer_error;
+ host->datasize += sgm.length;
}
} else {
- for_each_sg(data->sg, sg, data->sg_len, i) {
- stat = mxcmci_push(host, sg_virt(sg), sg->length);
+ while (sg_miter_next(&sgm)) {
+ buf = sgm.addr;
+ stat = mxcmci_push(host, buf, sgm.length);
if (stat)
- return stat;
- host->datasize += sg->length;
+ goto transfer_error;
+ host->datasize += sgm.length;
}
stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
if (stat)
- return stat;
+ goto transfer_error;
}
- return 0;
+
+transfer_error:
+ sg_miter_stop(&sgm);
+ return stat;
}
static void mxcmci_datawork(struct work_struct *work)
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 9fb8995b43a1..088f8ed4fdc4 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -148,10 +148,8 @@ struct mmc_omap_host {
struct work_struct send_stop_work;
struct mmc_data *stop_data;
+ struct sg_mapping_iter sg_miter;
unsigned int sg_len;
- int sg_idx;
- u16 * buffer;
- u32 buffer_bytes_left;
u32 total_bytes_left;
unsigned features;
@@ -456,6 +454,8 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
{
if (host->dma_in_use)
mmc_omap_release_dma(host, data, data->error);
+ else
+ sg_miter_stop(&host->sg_miter);
host->data = NULL;
host->sg_len = 0;
@@ -651,19 +651,6 @@ mmc_omap_cmd_timer(struct timer_list *t)
spin_unlock_irqrestore(&host->slot_lock, flags);
}
-/* PIO only */
-static void
-mmc_omap_sg_to_buf(struct mmc_omap_host *host)
-{
- struct scatterlist *sg;
-
- sg = host->data->sg + host->sg_idx;
- host->buffer_bytes_left = sg->length;
- host->buffer = sg_virt(sg);
- if (host->buffer_bytes_left > host->total_bytes_left)
- host->buffer_bytes_left = host->total_bytes_left;
-}
-
static void
mmc_omap_clk_timer(struct timer_list *t)
{
@@ -676,33 +663,37 @@ mmc_omap_clk_timer(struct timer_list *t)
static void
mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
int n, nwords;
+ u16 *buffer;
- if (host->buffer_bytes_left == 0) {
- host->sg_idx++;
- BUG_ON(host->sg_idx == host->sg_len);
- mmc_omap_sg_to_buf(host);
+ if (!sg_miter_next(sgm)) {
+ /* This should not happen */
+ dev_err(mmc_dev(host->mmc), "ran out of scatterlist prematurely\n");
+ return;
}
+ buffer = sgm->addr;
+
n = 64;
- if (n > host->buffer_bytes_left)
- n = host->buffer_bytes_left;
+ if (n > sgm->length)
+ n = sgm->length;
+ if (n > host->total_bytes_left)
+ n = host->total_bytes_left;
/* Round up to handle odd number of bytes to transfer */
nwords = DIV_ROUND_UP(n, 2);
- host->buffer_bytes_left -= n;
+ sgm->consumed = n;
host->total_bytes_left -= n;
host->data->bytes_xfered += n;
if (write) {
__raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA),
- host->buffer, nwords);
+ buffer, nwords);
} else {
__raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA),
- host->buffer, nwords);
+ buffer, nwords);
}
-
- host->buffer += nwords;
}
#ifdef CONFIG_MMC_DEBUG
@@ -956,6 +947,7 @@ static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_reque
static void
mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
{
+ unsigned int miter_flags = SG_MITER_ATOMIC; /* Used from IRQ */
struct mmc_data *data = req->data;
int i, use_dma = 1, block_size;
struct scatterlist *sg;
@@ -990,7 +982,6 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
}
}
- host->sg_idx = 0;
if (use_dma) {
enum dma_data_direction dma_data_dir;
struct dma_async_tx_descriptor *tx;
@@ -1071,7 +1062,11 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
OMAP_MMC_WRITE(host, BUF, 0x1f1f);
host->total_bytes_left = data->blocks * block_size;
host->sg_len = sg_len;
- mmc_omap_sg_to_buf(host);
+ if (data->flags & MMC_DATA_READ)
+ miter_flags |= SG_MITER_TO_SG;
+ else
+ miter_flags |= SG_MITER_FROM_SG;
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, miter_flags);
host->dma_in_use = 0;
}
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index c1fb9740eab0..586f94d4dbfd 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -9,6 +9,7 @@
#ifndef RENESAS_SDHI_H
#define RENESAS_SDHI_H
+#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include "tmio_mmc.h"
@@ -63,7 +64,7 @@ struct renesas_sdhi_of_data_with_quirks {
struct renesas_sdhi_dma {
unsigned long end_flags;
enum dma_slave_buswidth dma_buswidth;
- bool (*filter)(struct dma_chan *chan, void *arg);
+ dma_filter_fn filter;
void (*enable)(struct tmio_mmc_host *host, bool enable);
struct completion dma_dataend;
struct tasklet_struct dma_complete;
diff --git a/drivers/mmc/host/sdhci-esdhc-mcf.c b/drivers/mmc/host/sdhci-esdhc-mcf.c
index a07f8333cd6b..c97363e2d86c 100644
--- a/drivers/mmc/host/sdhci-esdhc-mcf.c
+++ b/drivers/mmc/host/sdhci-esdhc-mcf.c
@@ -299,9 +299,8 @@ static void esdhc_mcf_pltfm_set_bus_width(struct sdhci_host *host, int width)
static void esdhc_mcf_request_done(struct sdhci_host *host,
struct mmc_request *mrq)
{
- struct scatterlist *sg;
+ struct sg_mapping_iter sgm;
u32 *buffer;
- int i;
if (!mrq->data || !mrq->data->bytes_xfered)
goto exit_done;
@@ -313,10 +312,13 @@ static void esdhc_mcf_request_done(struct sdhci_host *host,
* On mcf5441x there is no hw sdma option/flag to select the dma
* transfer endiannes. A swap after the transfer is needed.
*/
- for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) {
- buffer = (u32 *)sg_virt(sg);
- esdhc_mcf_buffer_swap32(buffer, sg->length);
+ sg_miter_start(&sgm, mrq->data->sg, mrq->data->sg_len,
+ SG_MITER_ATOMIC | SG_MITER_TO_SG | SG_MITER_FROM_SG);
+ while (sg_miter_next(&sgm)) {
+ buffer = sgm.addr;
+ esdhc_mcf_buffer_swap32(buffer, sgm.length);
}
+ sg_miter_stop(&sgm);
exit_done:
mmc_request_done(host->mmc, mrq);
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 42d54532cabe..430c1f90037b 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -435,7 +435,7 @@ static int aspeed_sdhci_probe(struct platform_device *pdev)
goto err_sdhci_add;
if (dev->phase_desc)
- mmc_of_parse_clk_phase(host->mmc, &dev->phase_map);
+ mmc_of_parse_clk_phase(&pdev->dev, &dev->phase_map);
ret = sdhci_add_host(host);
if (ret)
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index a1f57af6acfb..ab4b964d4058 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -52,6 +52,20 @@
#define AT_CTRL_SWIN_TH_VAL_MASK GENMASK(31, 24) /* bits [31:24] */
#define AT_CTRL_SWIN_TH_VAL 0x9 /* sampling window threshold */
+/* Sophgo CV18XX specific Registers */
+#define CV18XX_SDHCI_MSHC_CTRL 0x00
+#define CV18XX_EMMC_FUNC_EN BIT(0)
+#define CV18XX_LATANCY_1T BIT(1)
+#define CV18XX_SDHCI_PHY_TX_RX_DLY 0x40
+#define CV18XX_PHY_TX_DLY_MSK GENMASK(6, 0)
+#define CV18XX_PHY_TX_SRC_MSK GENMASK(9, 8)
+#define CV18XX_PHY_TX_SRC_INVERT_CLK_TX 0x1
+#define CV18XX_PHY_RX_DLY_MSK GENMASK(22, 16)
+#define CV18XX_PHY_RX_SRC_MSK GENMASK(25, 24)
+#define CV18XX_PHY_RX_SRC_INVERT_RX_CLK 0x1
+#define CV18XX_SDHCI_PHY_CONFIG 0x4c
+#define CV18XX_PHY_TX_BPS BIT(0)
+
/* Rockchip specific Registers */
#define DWCMSHC_EMMC_DLL_CTRL 0x800
#define DWCMSHC_EMMC_DLL_RXCLK 0x804
@@ -642,6 +656,35 @@ static void th1520_sdhci_reset(struct sdhci_host *host, u8 mask)
}
}
+static void cv18xx_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 val, emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
+
+ sdhci_reset(host, mask);
+
+ if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
+ val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+ val |= CV18XX_EMMC_FUNC_EN;
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+ }
+
+ val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+ val |= CV18XX_LATANCY_1T;
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+
+ val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
+ val |= CV18XX_PHY_TX_BPS;
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
+
+ val = (FIELD_PREP(CV18XX_PHY_TX_DLY_MSK, 0) |
+ FIELD_PREP(CV18XX_PHY_TX_SRC_MSK, CV18XX_PHY_TX_SRC_INVERT_CLK_TX) |
+ FIELD_PREP(CV18XX_PHY_RX_DLY_MSK, 0) |
+ FIELD_PREP(CV18XX_PHY_RX_SRC_MSK, CV18XX_PHY_RX_SRC_INVERT_RX_CLK));
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_TX_RX_DLY);
+}
+
static const struct sdhci_ops sdhci_dwcmshc_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -671,6 +714,15 @@ static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = {
.platform_execute_tuning = &th1520_execute_tuning,
};
+static const struct sdhci_ops sdhci_dwcmshc_cv18xx_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = dwcmshc_set_uhs_signaling,
+ .get_max_clock = dwcmshc_get_max_clock,
+ .reset = cv18xx_sdhci_reset,
+ .adma_write_desc = dwcmshc_adma_write_desc,
+};
+
static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
.ops = &sdhci_dwcmshc_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
@@ -700,6 +752,12 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_th1520_pdata = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
+static const struct sdhci_pltfm_data sdhci_dwcmshc_cv18xx_pdata = {
+ .ops = &sdhci_dwcmshc_cv18xx_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+};
+
static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
{
int err;
@@ -769,6 +827,14 @@ static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
.data = &sdhci_dwcmshc_pdata,
},
{
+ .compatible = "sophgo,cv1800b-dwcmshc",
+ .data = &sdhci_dwcmshc_cv18xx_pdata,
+ },
+ {
+ .compatible = "sophgo,sg2002-dwcmshc",
+ .data = &sdhci_dwcmshc_cv18xx_pdata,
+ },
+ {
.compatible = "thead,th1520-dwcmshc",
.data = &sdhci_dwcmshc_th1520_pdata,
},
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index 8cf3a375de65..cc9d28b75eb9 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ktime.h>
+#include <linux/iopoll.h>
#include <linux/of_address.h>
#include "sdhci-pltfm.h"
@@ -109,6 +110,8 @@
#define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18)
#define XENON_LOGIC_TIMING_VALUE 0x00AA8977
+#define XENON_MAX_PHY_TIMEOUT_LOOPS 100
+
/*
* List offset of PHY registers and some special register values
* in eMMC PHY 5.0 or eMMC PHY 5.1
@@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
return 0;
}
+static int xenon_check_stability_internal_clk(struct sdhci_host *host)
+{
+ u32 reg;
+ int err;
+
+ err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE,
+ 1100, 20000, false, host, SDHCI_CLOCK_CONTROL);
+ if (err)
+ dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n");
+
+ return err;
+}
+
/*
* eMMC 5.0/5.1 PHY init/re-init.
* eMMC PHY init should be executed after:
@@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
+ int ret = xenon_check_stability_internal_clk(host);
+
+ if (ret)
+ return ret;
+
reg = sdhci_readl(host, phy_regs->timing_adj);
reg |= XENON_PHY_INITIALIZAION;
sdhci_writel(host, reg, phy_regs->timing_adj);
@@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
/* get the wait time */
wait /= clock;
wait++;
- /* wait for host eMMC PHY init completes */
- udelay(wait);
- reg = sdhci_readl(host, phy_regs->timing_adj);
- reg &= XENON_PHY_INITIALIZAION;
- if (reg) {
+ /*
+ * AC5X spec says bit must be polled until zero.
+ * We see cases in which timeout can take longer
+ * than the standard calculation on AC5X, which is
+ * expected following the spec comment above.
+ * According to the spec, we must wait as long as
+ * it takes for that bit to toggle on AC5X.
+ * Cap that with 100 delay loops so we won't get
+ * stuck here forever:
+ */
+
+ ret = read_poll_timeout(sdhci_readl, reg,
+ !(reg & XENON_PHY_INITIALIZAION),
+ wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait,
+ false, host, phy_regs->timing_adj);
+ if (ret)
dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n",
- wait);
- return -ETIMEDOUT;
- }
+ wait * XENON_MAX_PHY_TIMEOUT_LOOPS);
- return 0;
+ return ret;
}
#define ARMADA_3700_SOC_PAD_1_8V 0x1
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 077d711e964e..08b4312af94e 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -227,14 +227,12 @@ struct sh_mmcif_host {
bool dying;
long timeout;
void __iomem *addr;
- u32 *pio_ptr;
spinlock_t lock; /* protect sh_mmcif_host::state */
enum sh_mmcif_state state;
enum sh_mmcif_wait_for wait_for;
struct delayed_work timeout_work;
size_t blocksize;
- int sg_idx;
- int sg_blkidx;
+ struct sg_mapping_iter sg_miter;
bool power;
bool ccs_enable; /* Command Completion Signal support */
bool clk_ctrl2_enable;
@@ -600,32 +598,17 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
return ret;
}
-static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
-{
- struct mmc_data *data = host->mrq->data;
-
- host->sg_blkidx += host->blocksize;
-
- /* data->sg->length must be a multiple of host->blocksize? */
- BUG_ON(host->sg_blkidx > data->sg->length);
-
- if (host->sg_blkidx == data->sg->length) {
- host->sg_blkidx = 0;
- if (++host->sg_idx < data->sg_len)
- host->pio_ptr = sg_virt(++data->sg);
- } else {
- host->pio_ptr = p;
- }
-
- return host->sg_idx != data->sg_len;
-}
-
static void sh_mmcif_single_read(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct mmc_data *data = mrq->data;
+
host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK) + 3;
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len,
+ SG_MITER_TO_SG);
+
host->wait_for = MMCIF_WAIT_FOR_READ;
/* buf read enable */
@@ -634,20 +617,32 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host,
static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
- u32 *p = sg_virt(data->sg);
+ u32 *p;
int i;
if (host->sd_error) {
+ sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
+ if (!sg_miter_next(sgm)) {
+ /* This should not happen on single blocks */
+ sg_miter_stop(sgm);
+ return false;
+ }
+
+ p = sgm->addr;
+
for (i = 0; i < host->blocksize / 4; i++)
*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+ sg_miter_stop(&host->sg_miter);
+
/* buffer read end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
host->wait_for = MMCIF_WAIT_FOR_READ_END;
@@ -658,6 +653,7 @@ static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct mmc_data *data = mrq->data;
if (!data->sg_len || !data->sg->length)
@@ -666,46 +662,63 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK;
+ sg_miter_start(sgm, data->sg, data->sg_len,
+ SG_MITER_TO_SG);
+
+ /* Advance to the first sglist entry */
+ if (!sg_miter_next(sgm)) {
+ sg_miter_stop(sgm);
+ return;
+ }
+
host->wait_for = MMCIF_WAIT_FOR_MREAD;
- host->sg_idx = 0;
- host->sg_blkidx = 0;
- host->pio_ptr = sg_virt(data->sg);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
}
static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
- u32 *p = host->pio_ptr;
+ u32 *p;
int i;
if (host->sd_error) {
+ sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
- BUG_ON(!data->sg->length);
+ p = sgm->addr;
for (i = 0; i < host->blocksize / 4; i++)
*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
- if (!sh_mmcif_next_block(host, p))
- return false;
+ sgm->consumed = host->blocksize;
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+ if (!sg_miter_next(sgm)) {
+ sg_miter_stop(sgm);
+ return false;
+ }
+
return true;
}
static void sh_mmcif_single_write(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct mmc_data *data = mrq->data;
+
host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK) + 3;
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len,
+ SG_MITER_FROM_SG);
+
host->wait_for = MMCIF_WAIT_FOR_WRITE;
/* buf write enable */
@@ -714,20 +727,32 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host,
static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
- u32 *p = sg_virt(data->sg);
+ u32 *p;
int i;
if (host->sd_error) {
+ sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
+ if (!sg_miter_next(sgm)) {
+ /* This should not happen on single blocks */
+ sg_miter_stop(sgm);
+ return false;
+ }
+
+ p = sgm->addr;
+
for (i = 0; i < host->blocksize / 4; i++)
sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+ sg_miter_stop(&host->sg_miter);
+
/* buffer write end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
@@ -738,6 +763,7 @@ static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct mmc_data *data = mrq->data;
if (!data->sg_len || !data->sg->length)
@@ -746,34 +772,46 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK;
+ sg_miter_start(sgm, data->sg, data->sg_len,
+ SG_MITER_FROM_SG);
+
+ /* Advance to the first sglist entry */
+ if (!sg_miter_next(sgm)) {
+ sg_miter_stop(sgm);
+ return;
+ }
+
host->wait_for = MMCIF_WAIT_FOR_MWRITE;
- host->sg_idx = 0;
- host->sg_blkidx = 0;
- host->pio_ptr = sg_virt(data->sg);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
}
static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
- u32 *p = host->pio_ptr;
+ u32 *p;
int i;
if (host->sd_error) {
+ sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
- BUG_ON(!data->sg->length);
+ p = sgm->addr;
for (i = 0; i < host->blocksize / 4; i++)
sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
- if (!sh_mmcif_next_block(host, p))
+ sgm->consumed = host->blocksize;
+
+ if (!sg_miter_next(sgm)) {
+ sg_miter_stop(sgm);
return false;
+ }
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index be7f18fd4836..93e912afd3ae 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -259,6 +259,8 @@ static void tmio_mmc_reset_work(struct work_struct *work)
else
mrq->cmd->error = -ETIMEDOUT;
+ /* No new calls yet, but disallow concurrent tmio_mmc_done_work() */
+ host->mrq = ERR_PTR(-EBUSY);
host->cmd = NULL;
host->data = NULL;
@@ -970,6 +972,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
return;
}
+ /* Disallow new mrqs and work handlers to run */
host->mrq = ERR_PTR(-EBUSY);
spin_unlock_irqrestore(&host->lock, flags);
@@ -1004,8 +1007,9 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
"%s.%d: IOS interrupted: clk %u, mode %u",
current->comm, task_pid_nr(current),
ios->clock, ios->power_mode);
- host->mrq = NULL;
+ /* Ready for new mrqs */
+ host->mrq = NULL;
host->clk_cache = ios->clock;
mutex_unlock(&host->ios_lock);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 001a468bc149..f0562f712d98 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1284,8 +1284,6 @@ static int wbsd_scan(struct wbsd_host *host)
continue;
for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
- id = 0xFFFF;
-
host->config = config_ports[i];
host->unlock_code = unlock_codes[j];
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 77d5f1d24489..860380931b6c 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -883,7 +883,6 @@ static void wmt_mci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
- struct resource *res;
u32 reg_tmp;
mmc = platform_get_drvdata(pdev);
@@ -911,9 +910,6 @@ static void wmt_mci_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk_sdmmc);
clk_put(priv->clk_sdmmc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
mmc_free_host(mmc);
dev_info(&pdev->dev, "WMT MCI device removed\n");
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index aa44a23ec045..97a00ec9a4d4 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -37,7 +37,7 @@
/* Info for the block device */
struct block2mtd_dev {
struct list_head list;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct mtd_info mtd;
struct mutex write_mutex;
};
@@ -55,8 +55,7 @@ static struct page *page_read(struct address_space *mapping, pgoff_t index)
/* erase a specified part of the device */
static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
{
- struct address_space *mapping =
- dev->bdev_handle->bdev->bd_inode->i_mapping;
+ struct address_space *mapping = dev->bdev_file->f_mapping;
struct page *page;
pgoff_t index = to >> PAGE_SHIFT; // page index
int pages = len >> PAGE_SHIFT;
@@ -106,8 +105,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
- struct address_space *mapping =
- dev->bdev_handle->bdev->bd_inode->i_mapping;
+ struct address_space *mapping = dev->bdev_file->f_mapping;
struct page *page;
pgoff_t index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
@@ -142,8 +140,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
loff_t to, size_t len, size_t *retlen)
{
struct page *page;
- struct address_space *mapping =
- dev->bdev_handle->bdev->bd_inode->i_mapping;
+ struct address_space *mapping = dev->bdev_file->f_mapping;
pgoff_t index = to >> PAGE_SHIFT; // page index
int offset = to & ~PAGE_MASK; // page offset
int cpylen;
@@ -198,7 +195,7 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
static void block2mtd_sync(struct mtd_info *mtd)
{
struct block2mtd_dev *dev = mtd->priv;
- sync_blockdev(dev->bdev_handle->bdev);
+ sync_blockdev(file_bdev(dev->bdev_file));
return;
}
@@ -210,10 +207,9 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
kfree(dev->mtd.name);
- if (dev->bdev_handle) {
- invalidate_mapping_pages(
- dev->bdev_handle->bdev->bd_inode->i_mapping, 0, -1);
- bdev_release(dev->bdev_handle);
+ if (dev->bdev_file) {
+ invalidate_mapping_pages(dev->bdev_file->f_mapping, 0, -1);
+ fput(dev->bdev_file);
}
kfree(dev);
@@ -223,10 +219,10 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
* This function is marked __ref because it calls the __init marked
* early_lookup_bdev when called from the early boot code.
*/
-static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
+static struct file __ref *mdtblock_early_get_bdev(const char *devname,
blk_mode_t mode, int timeout, struct block2mtd_dev *dev)
{
- struct bdev_handle *bdev_handle = ERR_PTR(-ENODEV);
+ struct file *bdev_file = ERR_PTR(-ENODEV);
#ifndef MODULE
int i;
@@ -234,7 +230,7 @@ static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
* We can't use early_lookup_bdev from a running system.
*/
if (system_state >= SYSTEM_RUNNING)
- return bdev_handle;
+ return bdev_file;
/*
* We might not have the root device mounted at this point.
@@ -253,20 +249,20 @@ static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
wait_for_device_probe();
if (!early_lookup_bdev(devname, &devt)) {
- bdev_handle = bdev_open_by_dev(devt, mode, dev, NULL);
- if (!IS_ERR(bdev_handle))
+ bdev_file = bdev_file_open_by_dev(devt, mode, dev, NULL);
+ if (!IS_ERR(bdev_file))
break;
}
}
#endif
- return bdev_handle;
+ return bdev_file;
}
static struct block2mtd_dev *add_device(char *devname, int erase_size,
char *label, int timeout)
{
const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct block_device *bdev;
struct block2mtd_dev *dev;
char *name;
@@ -279,16 +275,16 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
return NULL;
/* Get a handle on the device */
- bdev_handle = bdev_open_by_path(devname, mode, dev, NULL);
- if (IS_ERR(bdev_handle))
- bdev_handle = mdtblock_early_get_bdev(devname, mode, timeout,
+ bdev_file = bdev_file_open_by_path(devname, mode, dev, NULL);
+ if (IS_ERR(bdev_file))
+ bdev_file = mdtblock_early_get_bdev(devname, mode, timeout,
dev);
- if (IS_ERR(bdev_handle)) {
+ if (IS_ERR(bdev_file)) {
pr_err("error: cannot open device %s\n", devname);
goto err_free_block2mtd;
}
- dev->bdev_handle = bdev_handle;
- bdev = bdev_handle->bdev;
+ dev->bdev_file = bdev_file;
+ bdev = file_bdev(bdev_file);
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
pr_err("attempting to use an MTD device as a block device\n");
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 0c1b93303618..ec52277e3dd5 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -638,7 +638,7 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
/* name must be usable with cmdlinepart */
sprintf(priv->name, "spi%d.%d-%s",
- spi->master->bus_num, spi_get_chipselect(spi, 0),
+ spi->controller->bus_num, spi_get_chipselect(spi, 0),
name);
device = &priv->mtd;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f0526dcc2162..3caa0717d46c 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -277,6 +277,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
{
struct mtd_blktrans_ops *tr = new->tr;
struct mtd_blktrans_dev *d;
+ struct queue_limits lim = { };
int last_devnum = -1;
struct gendisk *gd;
int ret;
@@ -331,9 +332,13 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
if (ret)
goto out_kfree_tag_set;
+
+ lim.logical_block_size = tr->blksize;
+ if (tr->discard)
+ lim.max_hw_discard_sectors = UINT_MAX;
/* Create gendisk */
- gd = blk_mq_alloc_disk(new->tag_set, new);
+ gd = blk_mq_alloc_disk(new->tag_set, &lim, new);
if (IS_ERR(gd)) {
ret = PTR_ERR(gd);
goto out_free_tag_set;
@@ -371,14 +376,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (tr->flush)
blk_queue_write_cache(new->rq, true, false);
- blk_queue_logical_block_size(new->rq, tr->blksize);
-
blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
- if (tr->discard)
- blk_queue_max_discard_sectors(new->rq, UINT_MAX);
-
gd->queue = new->rq;
if (new->readonly)
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e451b28840d5..5887feb347a4 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -621,6 +621,7 @@ static void mtd_check_of_node(struct mtd_info *mtd)
if (plen == mtd_name_len &&
!strncmp(mtd->name, pname + offset, plen)) {
mtd_set_of_node(mtd, mtd_dn);
+ of_node_put(mtd_dn);
break;
}
}
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index a46698744850..5b0f5a9cef81 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -290,16 +290,13 @@ static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,32, 30),
MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,64, 30),
- MARVELL_LAYOUT( 2048, 512, 12, 3, 2, 704, 0, 30,640, 0, 30),
- MARVELL_LAYOUT( 2048, 512, 16, 5, 4, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 2048, 512, 16, 4, 4, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
- MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
- MARVELL_LAYOUT( 4096, 512, 12, 6, 5, 704, 0, 30,576, 32, 30),
- MARVELL_LAYOUT( 4096, 512, 16, 9, 8, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 4096, 512, 8, 4, 4, 1024, 0, 30, 0, 64, 30),
+ MARVELL_LAYOUT( 4096, 512, 16, 8, 8, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 8192, 512, 4, 4, 4, 2048, 0, 30, 0, 0, 0),
- MARVELL_LAYOUT( 8192, 512, 8, 9, 8, 1024, 0, 30, 0, 160, 30),
- MARVELL_LAYOUT( 8192, 512, 12, 12, 11, 704, 0, 30,448, 64, 30),
- MARVELL_LAYOUT( 8192, 512, 16, 17, 16, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 8192, 512, 8, 8, 8, 1024, 0, 30, 0, 160, 30),
+ MARVELL_LAYOUT( 8192, 512, 16, 16, 16, 512, 0, 30, 0, 32, 30),
};
/**
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index 987710e09441..6023cba748bb 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -186,7 +186,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
- &status2);
+ spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -207,6 +207,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
* report the maximum of 4 in this case
*/
/* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
+ status2 = *(spinand->scratchbuf);
return ((status & STATUS_ECC_MASK) >> 2) |
((status2 & STATUS_ECC_MASK) >> 4);
@@ -228,7 +229,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
- &status2);
+ spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -248,6 +249,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
* 1 ... 4 bits are flipped (and corrected)
*/
/* bits sorted this way (1...0): ECCSE1, ECCSE0 */
+ status2 = *(spinand->scratchbuf);
return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
case STATUS_ECC_UNCOR_ERROR:
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 654bd7372cd8..5c8fdcc088a0 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -348,6 +348,9 @@ static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
int ubiblock_create(struct ubi_volume_info *vi)
{
+ struct queue_limits lim = {
+ .max_segments = UBI_MAX_SG_COUNT,
+ };
struct ubiblock *dev;
struct gendisk *gd;
u64 disk_capacity;
@@ -393,7 +396,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
/* Initialize the gendisk of this ubiblock device */
- gd = blk_mq_alloc_disk(&dev->tag_set, dev);
+ gd = blk_mq_alloc_disk(&dev->tag_set, &lim, dev);
if (IS_ERR(gd)) {
ret = PTR_ERR(gd);
goto out_free_tags;
@@ -416,7 +419,6 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->gd = gd;
dev->rq = gd->queue;
- blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
list_add_tail(&dev->list, &ubiblock_devices);
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 68e79b1272f6..6d15ab3bfbbc 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -3063,15 +3063,10 @@ static int amt_dev_init(struct net_device *dev)
int err;
amt->dev = dev;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
err = gro_cells_init(&amt->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
return 0;
}
@@ -3081,7 +3076,6 @@ static void amt_dev_uninit(struct net_device *dev)
struct amt_dev *amt = netdev_priv(dev);
gro_cells_destroy(&amt->gro_cells);
- free_percpu(dev->tstats);
}
static const struct net_device_ops amt_netdev_ops = {
@@ -3090,7 +3084,6 @@ static const struct net_device_ops amt_netdev_ops = {
.ndo_open = amt_dev_open,
.ndo_stop = amt_dev_stop,
.ndo_start_xmit = amt_dev_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
};
static void amt_link_setup(struct net_device *dev)
@@ -3111,6 +3104,7 @@ static void amt_link_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
eth_hw_addr_random(dev);
eth_zero_addr(dev->broadcast);
ether_setup(dev);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index d9e052c49ba1..166bfc3c8e6c 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -108,6 +108,7 @@ static int go_tx(struct net_device *dev);
static int debug = ARCNET_DEBUG;
module_param(debug, int, 0);
+MODULE_DESCRIPTION("ARCnet core driver");
MODULE_LICENSE("GPL");
static int __init arcnet_init(void)
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 31377bb1cc97..339db6e4a1d5 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -194,15 +194,10 @@ static int bareudp_init(struct net_device *dev)
struct bareudp_dev *bareudp = netdev_priv(dev);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&bareudp->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
+
return 0;
}
@@ -211,7 +206,6 @@ static void bareudp_uninit(struct net_device *dev)
struct bareudp_dev *bareudp = netdev_priv(dev);
gro_cells_destroy(&bareudp->gro_cells);
- free_percpu(dev->tstats);
}
static struct socket *bareudp_create_sock(struct net *net, __be16 port)
@@ -529,7 +523,6 @@ static const struct net_device_ops bareudp_netdev_ops = {
.ndo_open = bareudp_open,
.ndo_stop = bareudp_stop,
.ndo_start_xmit = bareudp_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_fill_metadata_dst = bareudp_fill_metadata_dst,
};
@@ -567,6 +560,7 @@ static void bareudp_setup(struct net_device *dev)
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
}
static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -760,23 +754,18 @@ static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
unregister_netdevice_queue(bareudp->dev, head);
}
-static void __net_exit bareudp_exit_batch_net(struct list_head *net_list)
+static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_kill_list)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
- bareudp_destroy_tunnels(net, &list);
-
- /* unregister the devices gathered above */
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ bareudp_destroy_tunnels(net, dev_kill_list);
}
static struct pernet_operations bareudp_net_ops = {
.init = bareudp_init_net,
- .exit_batch = bareudp_exit_batch_net,
+ .exit_batch_rtnl = bareudp_exit_batch_rtnl,
.id = &bareudp_net_id,
.size = sizeof(struct bareudp_net),
};
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c99ffe6c683a..c6807e473ab7 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -82,10 +82,6 @@ enum ad_link_speed_type {
#define MAC_ADDRESS_EQUAL(A, B) \
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
-static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
- 0, 0, 0, 0, 0, 0
-};
-
static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
@@ -106,6 +102,9 @@ static void ad_agg_selection_logic(struct aggregator *aggregator,
static void ad_clear_agg(struct aggregator *aggregator);
static void ad_initialize_agg(struct aggregator *aggregator);
static void ad_initialize_port(struct port *port, int lacp_fast);
+static void ad_enable_collecting(struct port *port);
+static void ad_disable_distributing(struct port *port,
+ bool *update_slave_arr);
static void ad_enable_collecting_distributing(struct port *port,
bool *update_slave_arr);
static void ad_disable_collecting_distributing(struct port *port,
@@ -172,8 +171,37 @@ static inline int __agg_has_partner(struct aggregator *agg)
}
/**
+ * __disable_distributing_port - disable the port's slave for distributing.
+ * Port will still be able to collect.
+ * @port: the port we're looking at
+ *
+ * This will disable only distributing on the port's slave.
+ */
+static void __disable_distributing_port(struct port *port)
+{
+ bond_set_slave_tx_disabled_flags(port->slave, BOND_SLAVE_NOTIFY_LATER);
+}
+
+/**
+ * __enable_collecting_port - enable the port's slave for collecting,
+ * if it's up
+ * @port: the port we're looking at
+ *
+ * This will enable only collecting on the port's slave.
+ */
+static void __enable_collecting_port(struct port *port)
+{
+ struct slave *slave = port->slave;
+
+ if (slave->link == BOND_LINK_UP && bond_slave_is_up(slave))
+ bond_set_slave_rx_enabled_flags(slave, BOND_SLAVE_NOTIFY_LATER);
+}
+
+/**
* __disable_port - disable the port's slave
* @port: the port we're looking at
+ *
+ * This will disable both collecting and distributing on the port's slave.
*/
static inline void __disable_port(struct port *port)
{
@@ -183,6 +211,8 @@ static inline void __disable_port(struct port *port)
/**
* __enable_port - enable the port's slave, if it's up
* @port: the port we're looking at
+ *
+ * This will enable both collecting and distributing on the port's slave.
*/
static inline void __enable_port(struct port *port)
{
@@ -193,10 +223,27 @@ static inline void __enable_port(struct port *port)
}
/**
- * __port_is_enabled - check if the port's slave is in active state
+ * __port_move_to_attached_state - check if port should transition back to attached
+ * state.
+ * @port: the port we're looking at
+ */
+static bool __port_move_to_attached_state(struct port *port)
+{
+ if (!(port->sm_vars & AD_PORT_SELECTED) ||
+ (port->sm_vars & AD_PORT_STANDBY) ||
+ !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
+ !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION))
+ port->sm_mux_state = AD_MUX_ATTACHED;
+
+ return port->sm_mux_state == AD_MUX_ATTACHED;
+}
+
+/**
+ * __port_is_collecting_distributing - check if the port's slave is in the
+ * combined collecting/distributing state
* @port: the port we're looking at
*/
-static inline int __port_is_enabled(struct port *port)
+static int __port_is_collecting_distributing(struct port *port)
{
return bond_is_active_slave(port->slave);
}
@@ -942,6 +989,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
*/
static void ad_mux_machine(struct port *port, bool *update_slave_arr)
{
+ struct bonding *bond = __get_bond_by_port(port);
mux_states_t last_state;
/* keep current State Machine state to compare later if it was
@@ -999,9 +1047,13 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
if ((port->sm_vars & AD_PORT_SELECTED) &&
(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
!__check_agg_selection_timer(port)) {
- if (port->aggregator->is_active)
- port->sm_mux_state =
- AD_MUX_COLLECTING_DISTRIBUTING;
+ if (port->aggregator->is_active) {
+ int state = AD_MUX_COLLECTING_DISTRIBUTING;
+
+ if (!bond->params.coupled_control)
+ state = AD_MUX_COLLECTING;
+ port->sm_mux_state = state;
+ }
} else if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY)) {
/* if UNSELECTED or STANDBY */
@@ -1019,11 +1071,45 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
}
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
+ if (!__port_move_to_attached_state(port)) {
+ /* if port state hasn't changed make
+ * sure that a collecting distributing
+ * port in an active aggregator is enabled
+ */
+ if (port->aggregator->is_active &&
+ !__port_is_collecting_distributing(port)) {
+ __enable_port(port);
+ *update_slave_arr = true;
+ }
+ }
+ break;
+ case AD_MUX_COLLECTING:
+ if (!__port_move_to_attached_state(port)) {
+ if ((port->sm_vars & AD_PORT_SELECTED) &&
+ (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
+ (port->partner_oper.port_state & LACP_STATE_COLLECTING)) {
+ port->sm_mux_state = AD_MUX_DISTRIBUTING;
+ } else {
+ /* If port state hasn't changed, make sure that a collecting
+ * port is enabled for an active aggregator.
+ */
+ struct slave *slave = port->slave;
+
+ if (port->aggregator->is_active &&
+ bond_is_slave_rx_disabled(slave)) {
+ ad_enable_collecting(port);
+ *update_slave_arr = true;
+ }
+ }
+ }
+ break;
+ case AD_MUX_DISTRIBUTING:
if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY) ||
+ !(port->partner_oper.port_state & LACP_STATE_COLLECTING) ||
!(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
!(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) {
- port->sm_mux_state = AD_MUX_ATTACHED;
+ port->sm_mux_state = AD_MUX_COLLECTING;
} else {
/* if port state hasn't changed make
* sure that a collecting distributing
@@ -1031,7 +1117,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
*/
if (port->aggregator &&
port->aggregator->is_active &&
- !__port_is_enabled(port)) {
+ !__port_is_collecting_distributing(port)) {
__enable_port(port);
*update_slave_arr = true;
}
@@ -1082,6 +1168,20 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
update_slave_arr);
port->ntt = true;
break;
+ case AD_MUX_COLLECTING:
+ port->actor_oper_port_state |= LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
+ ad_enable_collecting(port);
+ ad_disable_distributing(port, update_slave_arr);
+ port->ntt = true;
+ break;
+ case AD_MUX_DISTRIBUTING:
+ port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
+ ad_enable_collecting_distributing(port,
+ update_slave_arr);
+ break;
default:
break;
}
@@ -1484,7 +1584,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
(aggregator->partner_system_priority == port->partner_oper.system_priority) &&
(aggregator->partner_oper_aggregator_key == port->partner_oper.key)
) &&
- ((!MAC_ADDRESS_EQUAL(&(port->partner_oper.system), &(null_mac_addr)) && /* partner answers */
+ ((__agg_has_partner(aggregator) && /* partner answers */
!aggregator->is_individual) /* but is not individual OR */
)
) {
@@ -1907,6 +2007,43 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
}
/**
+ * ad_enable_collecting - enable a port's receive
+ * @port: the port we're looking at
+ *
+ * Enable @port if it's in an active aggregator
+ */
+static void ad_enable_collecting(struct port *port)
+{
+ if (port->aggregator->is_active) {
+ struct slave *slave = port->slave;
+
+ slave_dbg(slave->bond->dev, slave->dev,
+ "Enabling collecting on port %d (LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
+ __enable_collecting_port(port);
+ }
+}
+
+/**
+ * ad_disable_distributing - disable a port's transmit
+ * @port: the port we're looking at
+ * @update_slave_arr: Does slave array need update?
+ */
+static void ad_disable_distributing(struct port *port, bool *update_slave_arr)
+{
+ if (port->aggregator && __agg_has_partner(port->aggregator)) {
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "Disabling distributing on port %d (LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
+ __disable_distributing_port(port);
+ /* Slave array needs an update */
+ *update_slave_arr = true;
+ }
+}
+
+/**
* ad_enable_collecting_distributing - enable a port's transmit/receive
* @port: the port we're looking at
* @update_slave_arr: Does slave array need update?
@@ -1935,9 +2072,7 @@ static void ad_enable_collecting_distributing(struct port *port,
static void ad_disable_collecting_distributing(struct port *port,
bool *update_slave_arr)
{
- if (port->aggregator &&
- !MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system),
- &(null_mac_addr))) {
+ if (port->aggregator && __agg_has_partner(port->aggregator)) {
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Disabling port %d (LAG %d)\n",
port->actor_port_number,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a11748b8d69b..2c5ed0a7cb18 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1811,7 +1811,7 @@ void bond_xdp_set_features(struct net_device *bond_dev)
ASSERT_RTNL();
- if (!bond_xdp_check(bond)) {
+ if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) {
xdp_clear_features_flag(bond_dev);
return;
}
@@ -2611,7 +2611,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_propose_link_state(slave, BOND_LINK_FAIL);
commit++;
slave->delay = bond->params.downdelay;
- if (slave->delay) {
+ if (slave->delay && net_ratelimit()) {
slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
(BOND_MODE(bond) ==
BOND_MODE_ACTIVEBACKUP) ?
@@ -2625,9 +2625,10 @@ static int bond_miimon_inspect(struct bonding *bond)
/* recovered before downdelay expired */
bond_propose_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
- slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
- (bond->params.downdelay - slave->delay) *
- bond->params.miimon);
+ if (net_ratelimit())
+ slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
+ (bond->params.downdelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@ -2649,7 +2650,7 @@ static int bond_miimon_inspect(struct bonding *bond)
commit++;
slave->delay = bond->params.updelay;
- if (slave->delay) {
+ if (slave->delay && net_ratelimit()) {
slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
ignore_updelay ? 0 :
bond->params.updelay *
@@ -2659,9 +2660,10 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_BACK:
if (!link_state) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
- slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
- (bond->params.updelay - slave->delay) *
- bond->params.miimon);
+ if (net_ratelimit())
+ slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
+ (bond->params.updelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@ -6305,6 +6307,7 @@ static int __init bond_check_params(struct bond_params *params)
params->ad_actor_sys_prio = ad_actor_sys_prio;
eth_zero_addr(params->ad_actor_system);
params->ad_user_port_key = ad_user_port_key;
+ params->coupled_control = 1;
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
@@ -6414,28 +6417,41 @@ static int __net_init bond_net_init(struct net *net)
return 0;
}
-static void __net_exit bond_net_exit_batch(struct list_head *net_list)
+/* According to commit 69b0216ac255 ("bonding: fix bonding_masters
+ * race condition in bond unloading") we need to remove sysfs files
+ * before we remove our devices (done later in bond_net_exit_batch_rtnl())
+ */
+static void __net_exit bond_net_pre_exit(struct net *net)
+{
+ struct bond_net *bn = net_generic(net, bond_net_id);
+
+ bond_destroy_sysfs(bn);
+}
+
+static void __net_exit bond_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_kill_list)
{
struct bond_net *bn;
struct net *net;
- LIST_HEAD(list);
-
- list_for_each_entry(net, net_list, exit_list) {
- bn = net_generic(net, bond_net_id);
- bond_destroy_sysfs(bn);
- }
/* Kill off any bonds created after unregistering bond rtnl ops */
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
struct bonding *bond, *tmp_bond;
bn = net_generic(net, bond_net_id);
list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, &list);
+ unregister_netdevice_queue(bond->dev, dev_kill_list);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
+}
+
+/* According to commit 23fa5c2caae0 ("bonding: destroy proc directory
+ * only after all bonds are gone") bond_destroy_proc_dir() is called
+ * after bond_net_exit_batch_rtnl() has completed.
+ */
+static void __net_exit bond_net_exit_batch(struct list_head *net_list)
+{
+ struct bond_net *bn;
+ struct net *net;
list_for_each_entry(net, net_list, exit_list) {
bn = net_generic(net, bond_net_id);
@@ -6445,6 +6461,8 @@ static void __net_exit bond_net_exit_batch(struct list_head *net_list)
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
+ .pre_exit = bond_net_pre_exit,
+ .exit_batch_rtnl = bond_net_exit_batch_rtnl,
.exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index cfa74cf8bb1a..29b4c3d1b9b6 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -122,6 +122,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
[IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
+ [IFLA_BOND_COUPLED_CONTROL] = { .type = NLA_U8 },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -549,6 +550,16 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return err;
}
+ if (data[IFLA_BOND_COUPLED_CONTROL]) {
+ int coupled_control = nla_get_u8(data[IFLA_BOND_COUPLED_CONTROL]);
+
+ bond_opt_initval(&newval, coupled_control);
+ err = __bond_opt_set(bond, BOND_OPT_COUPLED_CONTROL, &newval,
+ data[IFLA_BOND_COUPLED_CONTROL], extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -615,6 +626,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
/* IFLA_BOND_NS_IP6_TARGET */
nla_total_size(sizeof(struct nlattr)) +
nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_COUPLED_CONTROL */
0;
}
@@ -774,6 +786,10 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.missed_max))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_COUPLED_CONTROL,
+ bond->params.coupled_control))
+ goto nla_put_failure;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index f3f27f0bd2a6..4cdbc7e084f4 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -84,7 +84,8 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_missed_max_set(struct bonding *bond,
const struct bond_opt_value *newval);
-
+static int bond_option_coupled_control_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
{ "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
@@ -232,6 +233,12 @@ static const struct bond_opt_value bond_missed_max_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_coupled_control_tbl[] = {
+ { "on", 1, BOND_VALFLAG_DEFAULT},
+ { "off", 0, 0},
+ { NULL, -1, 0},
+};
+
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -496,6 +503,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.desc = "Delay between each peer notification on failover event, in milliseconds",
.values = bond_peer_notif_delay_tbl,
.set = bond_option_peer_notif_delay_set
+ },
+ [BOND_OPT_COUPLED_CONTROL] = {
+ .id = BOND_OPT_COUPLED_CONTROL,
+ .name = "coupled_control",
+ .desc = "Opt into using coupled control MUX for LACP states",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .values = bond_coupled_control_tbl,
+ .set = bond_option_coupled_control_set,
}
};
@@ -1692,3 +1708,13 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond,
bond->params.ad_user_port_key = newval->value;
return 0;
}
+
+static int bond_option_coupled_control_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ netdev_info(bond->dev, "Setting coupled_control to %s (%llu)\n",
+ newval->string, newval->value);
+
+ bond->params.coupled_control = newval->value;
+ return 0;
+}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index eb410714afc2..2e31db55d927 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -168,6 +168,8 @@ config CAN_KVASER_PCIEFD
Kvaser Mini PCI Express 2xHS v2
Kvaser Mini PCI Express 1xCAN v3
Kvaser Mini PCI Express 2xCAN v3
+ Kvaser M.2 PCIe 4xCAN
+ Kvaser PCIe 8xCAN
config CAN_SLCAN
tristate "Serial / USB serial CAN Adaptors (slcan)"
@@ -218,6 +220,7 @@ config CAN_XILINXCAN
source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
source "drivers/net/can/ctucanfd/Kconfig"
+source "drivers/net/can/esd/Kconfig"
source "drivers/net/can/ifi_canfd/Kconfig"
source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index ff8f76295d13..4669cd51e7bf 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_CAN_VXCAN) += vxcan.o
obj-$(CONFIG_CAN_SLCAN) += slcan/
obj-y += dev/
+obj-y += esd/
obj-y += rcar/
obj-y += spi/
obj-y += usb/
diff --git a/drivers/net/can/esd/Kconfig b/drivers/net/can/esd/Kconfig
new file mode 100644
index 000000000000..54bfc366634c
--- /dev/null
+++ b/drivers/net/can/esd/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CAN_ESD_402_PCI
+ tristate "esd electronics gmbh CAN-PCI(e)/402 family"
+ depends on PCI && HAS_DMA
+ help
+ Support for C402 card family from esd electronics gmbh.
+ This card family is based on the ESDACC CAN controller and
+ available in several form factors: PCI, PCIe, PCIe Mini,
+ M.2 PCIe, CPCIserial, PMC, XMC (see https://esd.eu/en)
+
+ This driver can also be built as a module. In this case the
+ module will be called esd_402_pci.
diff --git a/drivers/net/can/esd/Makefile b/drivers/net/can/esd/Makefile
new file mode 100644
index 000000000000..5dd2d470c286
--- /dev/null
+++ b/drivers/net/can/esd/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for esd gmbh ESDACC controller driver
+#
+esd_402_pci-objs := esdacc.o esd_402_pci-core.o
+
+obj-$(CONFIG_CAN_ESD_402_PCI) += esd_402_pci.o
diff --git a/drivers/net/can/esd/esd_402_pci-core.c b/drivers/net/can/esd/esd_402_pci-core.c
new file mode 100644
index 000000000000..b7cdcffd0e45
--- /dev/null
+++ b/drivers/net/can/esd/esd_402_pci-core.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include <linux/can/dev.h>
+#include <linux/can.h>
+#include <linux/can/netlink.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include "esdacc.h"
+
+#define ESD_PCI_DEVICE_ID_PCIE402 0x0402
+
+#define PCI402_FPGA_VER_MIN 0x003d
+#define PCI402_MAX_CORES 6
+#define PCI402_BAR 0
+#define PCI402_IO_OV_OFFS 0
+#define PCI402_IO_PCIEP_OFFS 0x10000
+#define PCI402_IO_LEN_TOTAL 0x20000
+#define PCI402_IO_LEN_CORE 0x2000
+#define PCI402_PCICFG_MSICAP 0x50
+
+#define PCI402_DMA_MASK DMA_BIT_MASK(32)
+#define PCI402_DMA_SIZE ALIGN(0x10000, PAGE_SIZE)
+
+#define PCI402_PCIEP_OF_INT_ENABLE 0x0050
+#define PCI402_PCIEP_OF_BM_ADDR_LO 0x1000
+#define PCI402_PCIEP_OF_BM_ADDR_HI 0x1004
+#define PCI402_PCIEP_OF_MSI_ADDR_LO 0x1008
+#define PCI402_PCIEP_OF_MSI_ADDR_HI 0x100c
+
+struct pci402_card {
+ /* Actually mapped io space, all other iomem derived from this */
+ void __iomem *addr;
+ void __iomem *addr_pciep;
+
+ void *dma_buf;
+ dma_addr_t dma_hnd;
+
+ struct acc_ov ov;
+ struct acc_core *cores;
+
+ bool msi_enabled;
+};
+
+/* The BTR register capabilities described by the can_bittiming_const structures
+ * below are valid since esdACC version 0x0032.
+ */
+
+/* Used if the esdACC FPGA is built as CAN-Classic version. */
+static const struct can_bittiming_const pci402_bittiming_const = {
+ .name = "esd_402",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1,
+};
+
+/* Used if the esdACC FPGA is built as CAN-FD version. */
+static const struct can_bittiming_const pci402_bittiming_const_canfd = {
+ .name = "esd_402fd",
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static const struct net_device_ops pci402_acc_netdev_ops = {
+ .ndo_open = acc_open,
+ .ndo_stop = acc_close,
+ .ndo_start_xmit = acc_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
+};
+
+static const struct ethtool_ops pci402_acc_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
+static irqreturn_t pci402_interrupt(int irq, void *dev_id)
+{
+ struct pci_dev *pdev = dev_id;
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ irqreturn_t irq_status;
+
+ irq_status = acc_card_interrupt(&card->ov, card->cores);
+
+ return irq_status;
+}
+
+static int pci402_set_msiconfig(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ u32 addr_lo_offs = 0;
+ u32 addr_lo = 0;
+ u32 addr_hi = 0;
+ u32 data = 0;
+ u16 csr = 0;
+ int err;
+
+ /* The FPGA hard IP PCIe core implements a 64-bit MSI Capability
+ * Register Format
+ */
+ err = pci_read_config_word(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_FLAGS, &csr);
+ if (err)
+ goto failed;
+
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_LO,
+ &addr_lo);
+ if (err)
+ goto failed;
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_HI,
+ &addr_hi);
+ if (err)
+ goto failed;
+
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_DATA_64,
+ &data);
+ if (err)
+ goto failed;
+
+ addr_lo_offs = addr_lo & 0x0000ffff;
+ addr_lo &= 0xffff0000;
+
+ if (addr_hi)
+ addr_lo |= 1; /* To enable 64-Bit addressing in PCIe endpoint */
+
+ if (!(csr & PCI_MSI_FLAGS_ENABLE)) {
+ err = -EINVAL;
+ goto failed;
+ }
+
+ iowrite32(addr_lo, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_LO);
+ iowrite32(addr_hi, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_HI);
+ acc_ov_write32(&card->ov, ACC_OV_OF_MSI_ADDRESSOFFSET, addr_lo_offs);
+ acc_ov_write32(&card->ov, ACC_OV_OF_MSI_DATA, data);
+
+ return 0;
+
+failed:
+ pci_warn(pdev, "Error while setting MSI configuration:\n"
+ "CSR: 0x%.4x, addr: 0x%.8x%.8x, offs: 0x%.4x, data: 0x%.8x\n",
+ csr, addr_hi, addr_lo, addr_lo_offs, data);
+
+ return err;
+}
+
+static int pci402_init_card(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ card->ov.addr = card->addr + PCI402_IO_OV_OFFS;
+ card->addr_pciep = card->addr + PCI402_IO_PCIEP_OFFS;
+
+ acc_reset_fpga(&card->ov);
+ acc_init_ov(&card->ov, &pdev->dev);
+
+ if (card->ov.version < PCI402_FPGA_VER_MIN) {
+ pci_err(pdev,
+ "esdACC version (0x%.4x) outdated, please update\n",
+ card->ov.version);
+ return -EINVAL;
+ }
+
+ if (card->ov.timestamp_frequency != ACC_TS_FREQ_80MHZ) {
+ pci_err(pdev,
+ "esdACC timestamp frequency of %uHz not supported by driver. Aborted.\n",
+ card->ov.timestamp_frequency);
+ return -EINVAL;
+ }
+
+ if (card->ov.active_cores > PCI402_MAX_CORES) {
+ pci_err(pdev,
+ "Card with %u active cores not supported by driver. Aborted.\n",
+ card->ov.active_cores);
+ return -EINVAL;
+ }
+ card->cores = devm_kcalloc(&pdev->dev, card->ov.active_cores,
+ sizeof(struct acc_core), GFP_KERNEL);
+ if (!card->cores)
+ return -ENOMEM;
+
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD) {
+ pci_warn(pdev,
+ "esdACC with CAN-FD feature detected. This driver doesn't support CAN-FD yet.\n");
+ }
+
+#ifdef __LITTLE_ENDIAN
+ /* So card converts all busmastered data to LE for us: */
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE);
+#endif
+
+ return 0;
+}
+
+static int pci402_init_interrupt(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+
+ err = pci_enable_msi(pdev);
+ if (!err) {
+ err = pci402_set_msiconfig(pdev);
+ if (!err) {
+ card->msi_enabled = true;
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_dbg(pdev, "MSI preparation done\n");
+ }
+ }
+
+ err = devm_request_irq(&pdev->dev, pdev->irq, pci402_interrupt,
+ IRQF_SHARED, dev_name(&pdev->dev), pdev);
+ if (err)
+ goto failure_msidis;
+
+ iowrite32(1, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE);
+
+ return 0;
+
+failure_msidis:
+ if (card->msi_enabled) {
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_disable_msi(pdev);
+ card->msi_enabled = false;
+ }
+
+ return err;
+}
+
+static void pci402_finish_interrupt(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE);
+ devm_free_irq(&pdev->dev, pdev->irq, pdev);
+
+ if (card->msi_enabled) {
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_disable_msi(pdev);
+ card->msi_enabled = false;
+ }
+}
+
+static int pci402_init_dma(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+
+ err = dma_set_coherent_mask(&pdev->dev, PCI402_DMA_MASK);
+ if (err) {
+ pci_err(pdev, "DMA set mask failed!\n");
+ return err;
+ }
+
+ /* The esdACC DMA engine needs the DMA buffer aligned to a 64k
+ * boundary. The DMA API guarantees to align the returned buffer to the
+ * smallest PAGE_SIZE order which is greater than or equal to the
+ * requested size. With PCI402_DMA_SIZE == 64kB this suffices here.
+ */
+ card->dma_buf = dma_alloc_coherent(&pdev->dev, PCI402_DMA_SIZE,
+ &card->dma_hnd, GFP_KERNEL);
+ if (!card->dma_buf)
+ return -ENOMEM;
+
+ acc_init_bm_ptr(&card->ov, card->cores, card->dma_buf);
+
+ iowrite32(card->dma_hnd,
+ card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO);
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI);
+
+ pci_set_master(pdev);
+
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_BM_ENABLE);
+
+ return 0;
+}
+
+static void pci402_finish_dma(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int i;
+
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_BM_ENABLE);
+
+ pci_clear_master(pdev);
+
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO);
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI);
+
+ card->ov.bmfifo.messages = NULL;
+ card->ov.bmfifo.irq_cnt = NULL;
+ for (i = 0; i < card->ov.active_cores; i++) {
+ struct acc_core *core = &card->cores[i];
+
+ core->bmfifo.messages = NULL;
+ core->bmfifo.irq_cnt = NULL;
+ }
+
+ dma_free_coherent(&pdev->dev, PCI402_DMA_SIZE, card->dma_buf,
+ card->dma_hnd);
+ card->dma_buf = NULL;
+}
+
+static void pci402_unregister_core(struct acc_core *core)
+{
+ netdev_info(core->netdev, "unregister\n");
+ unregister_candev(core->netdev);
+
+ free_candev(core->netdev);
+ core->netdev = NULL;
+}
+
+static int pci402_init_cores(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+ int i;
+
+ for (i = 0; i < card->ov.active_cores; i++) {
+ struct acc_core *core = &card->cores[i];
+ struct acc_net_priv *priv;
+ struct net_device *netdev;
+ u32 fifo_config;
+
+ core->addr = card->ov.addr + (i + 1) * PCI402_IO_LEN_CORE;
+
+ fifo_config = acc_read32(core, ACC_CORE_OF_TXFIFO_CONFIG);
+ core->tx_fifo_size = (fifo_config >> 24);
+ if (core->tx_fifo_size <= 1) {
+ pci_err(pdev, "Invalid tx_fifo_size!\n");
+ err = -EINVAL;
+ goto failure;
+ }
+
+ netdev = alloc_candev(sizeof(*priv), core->tx_fifo_size);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto failure;
+ }
+ core->netdev = netdev;
+
+ netdev->flags |= IFF_ECHO;
+ netdev->dev_port = i;
+ netdev->netdev_ops = &pci402_acc_netdev_ops;
+ netdev->ethtool_ops = &pci402_acc_ethtool_ops;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ priv = netdev_priv(netdev);
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_CC_LEN8_DLC;
+
+ priv->can.clock.freq = card->ov.core_frequency;
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD)
+ priv->can.bittiming_const = &pci402_bittiming_const_canfd;
+ else
+ priv->can.bittiming_const = &pci402_bittiming_const;
+ priv->can.do_set_bittiming = acc_set_bittiming;
+ priv->can.do_set_mode = acc_set_mode;
+ priv->can.do_get_berr_counter = acc_get_berr_counter;
+
+ priv->core = core;
+ priv->ov = &card->ov;
+
+ err = register_candev(netdev);
+ if (err) {
+ free_candev(core->netdev);
+ core->netdev = NULL;
+ goto failure;
+ }
+
+ netdev_info(netdev, "registered\n");
+ }
+
+ return 0;
+
+failure:
+ for (i--; i >= 0; i--)
+ pci402_unregister_core(&card->cores[i]);
+
+ return err;
+}
+
+static void pci402_finish_cores(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < card->ov.active_cores; i++)
+ pci402_unregister_core(&card->cores[i]);
+}
+
+static int pci402_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct pci402_card *card = NULL;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
+ if (!card) {
+ err = -ENOMEM;
+ goto failure_disable_pci;
+ }
+
+ pci_set_drvdata(pdev, card);
+
+ err = pci_request_regions(pdev, pci_name(pdev));
+ if (err)
+ goto failure_disable_pci;
+
+ card->addr = pci_iomap(pdev, PCI402_BAR, PCI402_IO_LEN_TOTAL);
+ if (!card->addr) {
+ err = -ENOMEM;
+ goto failure_release_regions;
+ }
+
+ err = pci402_init_card(pdev);
+ if (err)
+ goto failure_unmap;
+
+ err = pci402_init_dma(pdev);
+ if (err)
+ goto failure_unmap;
+
+ err = pci402_init_interrupt(pdev);
+ if (err)
+ goto failure_finish_dma;
+
+ err = pci402_init_cores(pdev);
+ if (err)
+ goto failure_finish_interrupt;
+
+ return 0;
+
+failure_finish_interrupt:
+ pci402_finish_interrupt(pdev);
+
+failure_finish_dma:
+ pci402_finish_dma(pdev);
+
+failure_unmap:
+ pci_iounmap(pdev, card->addr);
+
+failure_release_regions:
+ pci_release_regions(pdev);
+
+failure_disable_pci:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void pci402_remove(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ pci402_finish_interrupt(pdev);
+ pci402_finish_cores(pdev);
+ pci402_finish_dma(pdev);
+ pci_iounmap(pdev, card->addr);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id pci402_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_ESDGMBH,
+ .device = ESD_PCI_DEVICE_ID_PCIE402,
+ .subvendor = PCI_VENDOR_ID_ESDGMBH,
+ .subdevice = PCI_ANY_ID,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci402_tbl);
+
+static struct pci_driver pci402_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pci402_tbl,
+ .probe = pci402_probe,
+ .remove = pci402_remove,
+};
+module_pci_driver(pci402_driver);
+
+MODULE_DESCRIPTION("Socket-CAN driver for esd CAN 402 card family with esdACC core on PCIe");
+MODULE_AUTHOR("Thomas Körper <socketcan@esd.eu>");
+MODULE_AUTHOR("Stefan Mätje <stefan.maetje@esd.eu>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
new file mode 100644
index 000000000000..121cbbf81458
--- /dev/null
+++ b/drivers/net/can/esd/esdacc.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include "esdacc.h"
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+
+/* esdACC ID register layout */
+#define ACC_ID_ID_MASK GENMASK(28, 0)
+#define ACC_ID_EFF_FLAG BIT(29)
+
+/* esdACC DLC register layout */
+#define ACC_DLC_DLC_MASK GENMASK(3, 0)
+#define ACC_DLC_RTR_FLAG BIT(4)
+#define ACC_DLC_TXD_FLAG BIT(5)
+
+/* ecc value of esdACC equals SJA1000's ECC register */
+#define ACC_ECC_SEG 0x1f
+#define ACC_ECC_DIR 0x20
+#define ACC_ECC_BIT 0x00
+#define ACC_ECC_FORM 0x40
+#define ACC_ECC_STUFF 0x80
+#define ACC_ECC_MASK 0xc0
+
+/* esdACC Status Register bits. Unused bits not documented. */
+#define ACC_REG_STATUS_MASK_STATUS_ES BIT(17)
+#define ACC_REG_STATUS_MASK_STATUS_EP BIT(18)
+#define ACC_REG_STATUS_MASK_STATUS_BS BIT(19)
+
+/* esdACC Overview Module BM_IRQ_Mask register related defines */
+/* Two bit wide command masks to mask or unmask a single core IRQ */
+#define ACC_BM_IRQ_UNMASK BIT(0)
+#define ACC_BM_IRQ_MASK (ACC_BM_IRQ_UNMASK << 1)
+/* Command to unmask all IRQ sources. Created by shifting
+ * and oring the two bit wide ACC_BM_IRQ_UNMASK 16 times.
+ */
+#define ACC_BM_IRQ_UNMASK_ALL 0x55555555U
+
+static void acc_resetmode_enter(struct acc_core *core)
+{
+ acc_set_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+
+ /* Read back reset mode bit to flush PCI write posting */
+ acc_resetmode_entered(core);
+}
+
+static void acc_resetmode_leave(struct acc_core *core)
+{
+ acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+
+ /* Read back reset mode bit to flush PCI write posting */
+ acc_resetmode_entered(core);
+}
+
+static void acc_txq_put(struct acc_core *core, u32 acc_id, u8 acc_dlc,
+ const void *data)
+{
+ acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1,
+ *((const u32 *)(data + 4)));
+ acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_0,
+ *((const u32 *)data));
+ acc_write32(core, ACC_CORE_OF_TXFIFO_DLC, acc_dlc);
+ /* CAN id must be written at last. This write starts TX. */
+ acc_write32(core, ACC_CORE_OF_TXFIFO_ID, acc_id);
+}
+
+static u8 acc_tx_fifo_next(struct acc_core *core, u8 tx_fifo_idx)
+{
+ ++tx_fifo_idx;
+ if (tx_fifo_idx >= core->tx_fifo_size)
+ tx_fifo_idx = 0U;
+ return tx_fifo_idx;
+}
+
+/* Convert timestamp from esdACC time stamp ticks to ns
+ *
+ * The conversion factor ts2ns from time stamp counts to ns is basically
+ * ts2ns = NSEC_PER_SEC / timestamp_frequency
+ *
+ * We handle here only a fixed timestamp frequency of 80MHz. The
+ * resulting ts2ns factor would be 12.5.
+ *
+ * At the end we multiply by 12 and add the half of the HW timestamp
+ * to get a multiplication by 12.5. This way any overflow is
+ * avoided until ktime_t itself overflows.
+ */
+#define ACC_TS_FACTOR (NSEC_PER_SEC / ACC_TS_FREQ_80MHZ)
+#define ACC_TS_80MHZ_SHIFT 1
+
+static ktime_t acc_ts2ktime(struct acc_ov *ov, u64 ts)
+{
+ u64 ns;
+
+ ns = (ts * ACC_TS_FACTOR) + (ts >> ACC_TS_80MHZ_SHIFT);
+
+ return ns_to_ktime(ns);
+}
+
+#undef ACC_TS_FACTOR
+#undef ACC_TS_80MHZ_SHIFT
+
+void acc_init_ov(struct acc_ov *ov, struct device *dev)
+{
+ u32 temp;
+
+ temp = acc_ov_read32(ov, ACC_OV_OF_VERSION);
+ ov->version = temp;
+ ov->features = (temp >> 16);
+
+ temp = acc_ov_read32(ov, ACC_OV_OF_INFO);
+ ov->total_cores = temp;
+ ov->active_cores = (temp >> 8);
+
+ ov->core_frequency = acc_ov_read32(ov, ACC_OV_OF_CANCORE_FREQ);
+ ov->timestamp_frequency = acc_ov_read32(ov, ACC_OV_OF_TS_FREQ_LO);
+
+ /* Depending on esdACC feature NEW_PSC enable the new prescaler
+ * or adjust core_frequency according to the implicit division by 2.
+ */
+ if (ov->features & ACC_OV_REG_FEAT_MASK_NEW_PSC) {
+ acc_ov_set_bits(ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE);
+ } else {
+ ov->core_frequency /= 2;
+ }
+
+ dev_dbg(dev,
+ "esdACC v%u, freq: %u/%u, feat/strap: 0x%x/0x%x, cores: %u/%u\n",
+ ov->version, ov->core_frequency, ov->timestamp_frequency,
+ ov->features, acc_ov_read32(ov, ACC_OV_OF_INFO) >> 16,
+ ov->active_cores, ov->total_cores);
+}
+
+void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores, const void *mem)
+{
+ unsigned int u;
+
+ /* DMA buffer layout as follows where N is the number of CAN cores
+ * implemented in the FPGA, i.e. N = ov->total_cores
+ *
+ * Section Layout Section size
+ * ----------------------------------------------
+ * FIFO Card/Overview ACC_CORE_DMABUF_SIZE
+ * FIFO Core0 ACC_CORE_DMABUF_SIZE
+ * ... ...
+ * FIFO CoreN ACC_CORE_DMABUF_SIZE
+ * irq_cnt Card/Overview sizeof(u32)
+ * irq_cnt Core0 sizeof(u32)
+ * ... ...
+ * irq_cnt CoreN sizeof(u32)
+ */
+ ov->bmfifo.messages = mem;
+ ov->bmfifo.irq_cnt = mem + (ov->total_cores + 1U) * ACC_CORE_DMABUF_SIZE;
+
+ for (u = 0U; u < ov->active_cores; u++) {
+ struct acc_core *core = &cores[u];
+
+ core->bmfifo.messages = mem + (u + 1U) * ACC_CORE_DMABUF_SIZE;
+ core->bmfifo.irq_cnt = ov->bmfifo.irq_cnt + (u + 1U);
+ }
+}
+
+int acc_open(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+ u32 tx_fifo_status;
+ u32 ctrl_mode;
+ int err;
+
+ /* Retry to enter RESET mode if out of sync. */
+ if (priv->can.state != CAN_STATE_STOPPED) {
+ netdev_warn(netdev, "Entered %s() with bad can.state: %s\n",
+ __func__, can_get_state_str(priv->can.state));
+ acc_resetmode_enter(core);
+ priv->can.state = CAN_STATE_STOPPED;
+ }
+
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ ctrl_mode = ACC_REG_CONTROL_MASK_IE_RXTX |
+ ACC_REG_CONTROL_MASK_IE_TXERROR |
+ ACC_REG_CONTROL_MASK_IE_ERRWARN |
+ ACC_REG_CONTROL_MASK_IE_OVERRUN |
+ ACC_REG_CONTROL_MASK_IE_ERRPASS;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ ctrl_mode |= ACC_REG_CONTROL_MASK_IE_BUSERR;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ ctrl_mode |= ACC_REG_CONTROL_MASK_MODE_LOM;
+
+ acc_set_bits(core, ACC_CORE_OF_CTRL_MODE, ctrl_mode);
+
+ acc_resetmode_leave(core);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Resync TX FIFO indices to HW state after (re-)start. */
+ tx_fifo_status = acc_read32(core, ACC_CORE_OF_TXFIFO_STATUS);
+ core->tx_fifo_head = tx_fifo_status & 0xff;
+ core->tx_fifo_tail = (tx_fifo_status >> 8) & 0xff;
+
+ netif_start_queue(netdev);
+ return 0;
+}
+
+int acc_close(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+
+ acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_IE_RXTX |
+ ACC_REG_CONTROL_MASK_IE_TXERROR |
+ ACC_REG_CONTROL_MASK_IE_ERRWARN |
+ ACC_REG_CONTROL_MASK_IE_OVERRUN |
+ ACC_REG_CONTROL_MASK_IE_ERRPASS |
+ ACC_REG_CONTROL_MASK_IE_BUSERR);
+
+ netif_stop_queue(netdev);
+ acc_resetmode_enter(core);
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Mark pending TX requests to be aborted after controller restart. */
+ acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
+
+ /* ACC_REG_CONTROL_MASK_MODE_LOM is only accessible in RESET mode */
+ acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_MODE_LOM);
+
+ close_candev(netdev);
+ return 0;
+}
+
+netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u8 tx_fifo_head = core->tx_fifo_head;
+ int fifo_usage;
+ u32 acc_id;
+ u8 acc_dlc;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* Access core->tx_fifo_tail only once because it may be changed
+ * from the interrupt level.
+ */
+ fifo_usage = tx_fifo_head - core->tx_fifo_tail;
+ if (fifo_usage < 0)
+ fifo_usage += core->tx_fifo_size;
+
+ if (fifo_usage >= core->tx_fifo_size - 1) {
+ netdev_err(core->netdev,
+ "BUG: TX ring full when queue awake!\n");
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (fifo_usage == core->tx_fifo_size - 2)
+ netif_stop_queue(netdev);
+
+ acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
+ if (cf->can_id & CAN_RTR_FLAG)
+ acc_dlc |= ACC_DLC_RTR_FLAG;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ acc_id = cf->can_id & CAN_EFF_MASK;
+ acc_id |= ACC_ID_EFF_FLAG;
+ } else {
+ acc_id = cf->can_id & CAN_SFF_MASK;
+ }
+
+ can_put_echo_skb(skb, netdev, core->tx_fifo_head, 0);
+
+ core->tx_fifo_head = acc_tx_fifo_next(core, tx_fifo_head);
+
+ acc_txq_put(core, acc_id, acc_dlc, cf->data);
+
+ return NETDEV_TX_OK;
+}
+
+int acc_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ u32 core_status = acc_read32(priv->core, ACC_CORE_OF_STATUS);
+
+ bec->txerr = (core_status >> 8) & 0xff;
+ bec->rxerr = core_status & 0xff;
+
+ return 0;
+}
+
+int acc_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ /* Paranoid FIFO index check. */
+ {
+ const u32 tx_fifo_status =
+ acc_read32(priv->core, ACC_CORE_OF_TXFIFO_STATUS);
+ const u8 hw_fifo_head = tx_fifo_status;
+
+ if (hw_fifo_head != priv->core->tx_fifo_head ||
+ hw_fifo_head != priv->core->tx_fifo_tail) {
+ netdev_warn(netdev,
+ "TX FIFO mismatch: T %2u H %2u; TFHW %#08x\n",
+ priv->core->tx_fifo_tail,
+ priv->core->tx_fifo_head,
+ tx_fifo_status);
+ }
+ }
+ acc_resetmode_leave(priv->core);
+ /* To leave the bus-off state the esdACC controller begins
+ * here a grace period where it counts 128 "idle conditions" (each
+ * of 11 consecutive recessive bits) on the bus as required
+ * by the CAN spec.
+ *
+ * During this time the TX FIFO may still contain already
+ * aborted "zombie" frames that are only drained from the FIFO
+ * at the end of the grace period.
+ *
+ * To not to interfere with this drain process we don't
+ * call netif_wake_queue() here. When the controller reaches
+ * the error-active state again, it informs us about that
+ * with an acc_bmmsg_errstatechange message. Then
+ * netif_wake_queue() is called from
+ * handle_core_msg_errstatechange() instead.
+ */
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int acc_set_bittiming(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 brp;
+ u32 btr;
+
+ if (priv->ov->features & ACC_OV_REG_FEAT_MASK_CANFD) {
+ u32 fbtr = 0;
+
+ netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
+ bt->brp, bt->prop_seg,
+ bt->phase_seg1, bt->phase_seg2, bt->sjw);
+
+ brp = FIELD_PREP(ACC_REG_BRP_FD_MASK_BRP, bt->brp - 1);
+
+ btr = FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG2, bt->phase_seg2 - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_SJW, bt->sjw - 1);
+
+ /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
+ acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
+ acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
+
+ netdev_dbg(netdev, "esdACC: BRP %u, NBTR 0x%08x, DBTR 0x%08x",
+ brp, btr, fbtr);
+ } else {
+ netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
+ bt->brp, bt->prop_seg,
+ bt->phase_seg1, bt->phase_seg2, bt->sjw);
+
+ brp = FIELD_PREP(ACC_REG_BRP_CL_MASK_BRP, bt->brp - 1);
+
+ btr = FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG2, bt->phase_seg2 - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_SJW, bt->sjw - 1);
+
+ /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
+ acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
+ acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
+
+ netdev_dbg(netdev, "esdACC: BRP %u, BTR 0x%08x", brp, btr);
+ }
+
+ return 0;
+}
+
+static void handle_core_msg_rxtxdone(struct acc_core *core,
+ const struct acc_bmmsg_rxtxdone *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct sk_buff *skb;
+
+ if (msg->acc_dlc.len & ACC_DLC_TXD_FLAG) {
+ u8 tx_fifo_tail = core->tx_fifo_tail;
+
+ if (core->tx_fifo_head == tx_fifo_tail) {
+ netdev_warn(core->netdev,
+ "TX interrupt, but queue is empty!?\n");
+ return;
+ }
+
+ /* Direct access echo skb to attach HW time stamp. */
+ skb = priv->can.echo_skb[tx_fifo_tail];
+ if (skb) {
+ skb_hwtstamps(skb)->hwtstamp =
+ acc_ts2ktime(priv->ov, msg->ts);
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += can_get_echo_skb(core->netdev, tx_fifo_tail,
+ NULL);
+
+ core->tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
+
+ netif_wake_queue(core->netdev);
+
+ } else {
+ struct can_frame *cf;
+
+ skb = alloc_can_skb(core->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cf->can_id = msg->id & ACC_ID_ID_MASK;
+ if (msg->id & ACC_ID_EFF_FLAG)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ can_frame_set_cc_len(cf, msg->acc_dlc.len & ACC_DLC_DLC_MASK,
+ priv->can.ctrlmode);
+
+ if (msg->acc_dlc.len & ACC_DLC_RTR_FLAG) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ memcpy(cf->data, msg->data, cf->len);
+ stats->rx_bytes += cf->len;
+ }
+ stats->rx_packets++;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+ }
+}
+
+static void handle_core_msg_txabort(struct acc_core *core,
+ const struct acc_bmmsg_txabort *msg)
+{
+ struct net_device_stats *stats = &core->netdev->stats;
+ u8 tx_fifo_tail = core->tx_fifo_tail;
+ u32 abort_mask = msg->abort_mask; /* u32 extend to avoid warnings later */
+
+ /* The abort_mask shows which frames were aborted in esdACC's FIFO. */
+ while (tx_fifo_tail != core->tx_fifo_head && (abort_mask)) {
+ const u32 tail_mask = (1U << tx_fifo_tail);
+
+ if (!(abort_mask & tail_mask))
+ break;
+ abort_mask &= ~tail_mask;
+
+ can_free_echo_skb(core->netdev, tx_fifo_tail, NULL);
+ stats->tx_dropped++;
+ stats->tx_aborted_errors++;
+
+ tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
+ }
+ core->tx_fifo_tail = tx_fifo_tail;
+ if (abort_mask)
+ netdev_warn(core->netdev, "Unhandled aborted messages\n");
+
+ if (!acc_resetmode_entered(core))
+ netif_wake_queue(core->netdev);
+}
+
+static void handle_core_msg_overrun(struct acc_core *core,
+ const struct acc_bmmsg_overrun *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* lost_cnt may be 0 if not supported by esdACC version */
+ if (msg->lost_cnt) {
+ stats->rx_errors += msg->lost_cnt;
+ stats->rx_over_errors += msg->lost_cnt;
+ } else {
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+}
+
+static void handle_core_msg_buserr(struct acc_core *core,
+ const struct acc_bmmsg_buserr *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ const u32 reg_status = msg->reg_status;
+ const u8 rxerr = reg_status;
+ const u8 txerr = (reg_status >> 8);
+ u8 can_err_prot_type = 0U;
+
+ priv->can.can_stats.bus_error++;
+
+ /* Error occurred during transmission? */
+ if (msg->ecc & ACC_ECC_DIR) {
+ stats->rx_errors++;
+ } else {
+ can_err_prot_type |= CAN_ERR_PROT_TX;
+ stats->tx_errors++;
+ }
+ /* Determine error type */
+ switch (msg->ecc & ACC_ECC_MASK) {
+ case ACC_ECC_BIT:
+ can_err_prot_type |= CAN_ERR_PROT_BIT;
+ break;
+ case ACC_ECC_FORM:
+ can_err_prot_type |= CAN_ERR_PROT_FORM;
+ break;
+ case ACC_ECC_STUFF:
+ can_err_prot_type |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ can_err_prot_type |= CAN_ERR_PROT_UNSPEC;
+ break;
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
+
+ /* Set protocol error type */
+ cf->data[2] = can_err_prot_type;
+ /* Set error location */
+ cf->data[3] = msg->ecc & ACC_ECC_SEG;
+
+ /* Insert CAN TX and RX error counters. */
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+}
+
+static void
+handle_core_msg_errstatechange(struct acc_core *core,
+ const struct acc_bmmsg_errstatechange *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ const u32 reg_status = msg->reg_status;
+ const u8 rxerr = reg_status;
+ const u8 txerr = (reg_status >> 8);
+ enum can_state new_state;
+
+ if (reg_status & ACC_REG_STATUS_MASK_STATUS_BS) {
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_EP) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_ES) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ } else {
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
+ /* See comment in acc_set_mode() for CAN_MODE_START */
+ netif_wake_queue(core->netdev);
+ }
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+
+ if (new_state != priv->can.state) {
+ enum can_state tx_state, rx_state;
+
+ tx_state = (txerr >= rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (rxerr >= txerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+
+ /* Always call can_change_state() to update the state
+ * even if alloc_can_err_skb() may have failed.
+ * can_change_state() can cope with a NULL cf pointer.
+ */
+ can_change_state(core->netdev, cf, tx_state, rx_state);
+ }
+
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+ }
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
+ can_bus_off(core->netdev);
+ }
+}
+
+static void handle_core_interrupt(struct acc_core *core)
+{
+ u32 msg_fifo_head = core->bmfifo.local_irq_cnt & 0xff;
+
+ while (core->bmfifo.msg_fifo_tail != msg_fifo_head) {
+ const union acc_bmmsg *msg =
+ &core->bmfifo.messages[core->bmfifo.msg_fifo_tail];
+
+ switch (msg->msg_id) {
+ case BM_MSG_ID_RXTXDONE:
+ handle_core_msg_rxtxdone(core, &msg->rxtxdone);
+ break;
+
+ case BM_MSG_ID_TXABORT:
+ handle_core_msg_txabort(core, &msg->txabort);
+ break;
+
+ case BM_MSG_ID_OVERRUN:
+ handle_core_msg_overrun(core, &msg->overrun);
+ break;
+
+ case BM_MSG_ID_BUSERR:
+ handle_core_msg_buserr(core, &msg->buserr);
+ break;
+
+ case BM_MSG_ID_ERRPASSIVE:
+ case BM_MSG_ID_ERRWARN:
+ handle_core_msg_errstatechange(core,
+ &msg->errstatechange);
+ break;
+
+ default:
+ /* Ignore all other BM messages (like the CAN-FD messages) */
+ break;
+ }
+
+ core->bmfifo.msg_fifo_tail =
+ (core->bmfifo.msg_fifo_tail + 1) & 0xff;
+ }
+}
+
+/**
+ * acc_card_interrupt() - handle the interrupts of an esdACC FPGA
+ *
+ * @ov: overview module structure
+ * @cores: array of core structures
+ *
+ * This function handles all interrupts pending for the overview module and the
+ * CAN cores of the esdACC FPGA.
+ *
+ * It examines for all cores (the overview module core and the CAN cores)
+ * the bmfifo.irq_cnt and compares it with the previously saved
+ * bmfifo.local_irq_cnt. An IRQ is pending if they differ. The esdACC FPGA
+ * updates the bmfifo.irq_cnt values by DMA.
+ *
+ * The pending interrupts are masked by writing to the IRQ mask register at
+ * ACC_OV_OF_BM_IRQ_MASK. This register has for each core a two bit command
+ * field evaluated as follows:
+ *
+ * Define, bit pattern: meaning
+ * 00: no action
+ * ACC_BM_IRQ_UNMASK, 01: unmask interrupt
+ * ACC_BM_IRQ_MASK, 10: mask interrupt
+ * 11: no action
+ *
+ * For each CAN core with a pending IRQ handle_core_interrupt() handles all
+ * busmaster messages from the message FIFO. The last handled message (FIFO
+ * index) is written to the CAN core to acknowledge its handling.
+ *
+ * Last step is to unmask all interrupts in the FPGA using
+ * ACC_BM_IRQ_UNMASK_ALL.
+ *
+ * Return:
+ * IRQ_HANDLED, if card generated an interrupt that was handled
+ * IRQ_NONE, if the interrupt is not ours
+ */
+irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores)
+{
+ u32 irqmask;
+ int i;
+
+ /* First we look for whom interrupts are pending, card/overview
+ * or any of the cores. Two bits in irqmask are used for each;
+ * Each two bit field is set to ACC_BM_IRQ_MASK if an IRQ is
+ * pending.
+ */
+ irqmask = 0U;
+ if (READ_ONCE(*ov->bmfifo.irq_cnt) != ov->bmfifo.local_irq_cnt) {
+ irqmask |= ACC_BM_IRQ_MASK;
+ ov->bmfifo.local_irq_cnt = READ_ONCE(*ov->bmfifo.irq_cnt);
+ }
+
+ for (i = 0; i < ov->active_cores; i++) {
+ struct acc_core *core = &cores[i];
+
+ if (READ_ONCE(*core->bmfifo.irq_cnt) != core->bmfifo.local_irq_cnt) {
+ irqmask |= (ACC_BM_IRQ_MASK << (2 * (i + 1)));
+ core->bmfifo.local_irq_cnt = READ_ONCE(*core->bmfifo.irq_cnt);
+ }
+ }
+
+ if (!irqmask)
+ return IRQ_NONE;
+
+ /* At second we tell the card we're working on them by writing irqmask,
+ * call handle_{ov|core}_interrupt and then acknowledge the
+ * interrupts by writing irq_cnt:
+ */
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, irqmask);
+
+ if (irqmask & ACC_BM_IRQ_MASK) {
+ /* handle_ov_interrupt(); - no use yet. */
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_COUNTER,
+ ov->bmfifo.local_irq_cnt);
+ }
+
+ for (i = 0; i < ov->active_cores; i++) {
+ struct acc_core *core = &cores[i];
+
+ if (irqmask & (ACC_BM_IRQ_MASK << (2 * (i + 1)))) {
+ handle_core_interrupt(core);
+ acc_write32(core, ACC_OV_OF_BM_IRQ_COUNTER,
+ core->bmfifo.local_irq_cnt);
+ }
+ }
+
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, ACC_BM_IRQ_UNMASK_ALL);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/can/esd/esdacc.h b/drivers/net/can/esd/esdacc.h
new file mode 100644
index 000000000000..a70488b25d39
--- /dev/null
+++ b/drivers/net/can/esd/esdacc.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include <linux/bits.h>
+#include <linux/can/dev.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/units.h>
+
+#define ACC_TS_FREQ_80MHZ (80 * HZ_PER_MHZ)
+#define ACC_I2C_ADDON_DETECT_DELAY_MS 10
+
+/* esdACC Overview Module */
+#define ACC_OV_OF_PROBE 0x0000
+#define ACC_OV_OF_VERSION 0x0004
+#define ACC_OV_OF_INFO 0x0008
+#define ACC_OV_OF_CANCORE_FREQ 0x000c
+#define ACC_OV_OF_TS_FREQ_LO 0x0010
+#define ACC_OV_OF_TS_FREQ_HI 0x0014
+#define ACC_OV_OF_IRQ_STATUS_CORES 0x0018
+#define ACC_OV_OF_TS_CURR_LO 0x001c
+#define ACC_OV_OF_TS_CURR_HI 0x0020
+#define ACC_OV_OF_IRQ_STATUS 0x0028
+#define ACC_OV_OF_MODE 0x002c
+#define ACC_OV_OF_BM_IRQ_COUNTER 0x0070
+#define ACC_OV_OF_BM_IRQ_MASK 0x0074
+#define ACC_OV_OF_MSI_DATA 0x0080
+#define ACC_OV_OF_MSI_ADDRESSOFFSET 0x0084
+
+/* Feature flags are contained in the upper 16 bit of the version
+ * register at ACC_OV_OF_VERSION but only used with these masks after
+ * extraction into an extra variable => (xx - 16).
+ */
+#define ACC_OV_REG_FEAT_MASK_CANFD BIT(27 - 16)
+#define ACC_OV_REG_FEAT_MASK_NEW_PSC BIT(28 - 16)
+
+#define ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE BIT(0)
+#define ACC_OV_REG_MODE_MASK_BM_ENABLE BIT(1)
+#define ACC_OV_REG_MODE_MASK_MODE_LED BIT(2)
+#define ACC_OV_REG_MODE_MASK_TIMER_ENABLE BIT(4)
+#define ACC_OV_REG_MODE_MASK_TIMER_ONE_SHOT BIT(5)
+#define ACC_OV_REG_MODE_MASK_TIMER_ABSOLUTE BIT(6)
+#define ACC_OV_REG_MODE_MASK_TIMER GENMASK(6, 4)
+#define ACC_OV_REG_MODE_MASK_TS_SRC GENMASK(8, 7)
+#define ACC_OV_REG_MODE_MASK_I2C_ENABLE BIT(11)
+#define ACC_OV_REG_MODE_MASK_MSI_ENABLE BIT(14)
+#define ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE BIT(15)
+#define ACC_OV_REG_MODE_MASK_FPGA_RESET BIT(31)
+
+/* esdACC CAN Core Module */
+#define ACC_CORE_OF_CTRL_MODE 0x0000
+#define ACC_CORE_OF_STATUS_IRQ 0x0008
+#define ACC_CORE_OF_BRP 0x000c
+#define ACC_CORE_OF_BTR 0x0010
+#define ACC_CORE_OF_FBTR 0x0014
+#define ACC_CORE_OF_STATUS 0x0030
+#define ACC_CORE_OF_TXFIFO_CONFIG 0x0048
+#define ACC_CORE_OF_TXFIFO_STATUS 0x004c
+#define ACC_CORE_OF_TX_STATUS_IRQ 0x0050
+#define ACC_CORE_OF_TX_ABORT_MASK 0x0054
+#define ACC_CORE_OF_BM_IRQ_COUNTER 0x0070
+#define ACC_CORE_OF_TXFIFO_ID 0x00c0
+#define ACC_CORE_OF_TXFIFO_DLC 0x00c4
+#define ACC_CORE_OF_TXFIFO_DATA_0 0x00c8
+#define ACC_CORE_OF_TXFIFO_DATA_1 0x00cc
+
+#define ACC_REG_CONTROL_MASK_MODE_RESETMODE BIT(0)
+#define ACC_REG_CONTROL_MASK_MODE_LOM BIT(1)
+#define ACC_REG_CONTROL_MASK_MODE_STM BIT(2)
+#define ACC_REG_CONTROL_MASK_MODE_TRANSEN BIT(5)
+#define ACC_REG_CONTROL_MASK_MODE_TS BIT(6)
+#define ACC_REG_CONTROL_MASK_MODE_SCHEDULE BIT(7)
+
+#define ACC_REG_CONTROL_MASK_IE_RXTX BIT(8)
+#define ACC_REG_CONTROL_MASK_IE_TXERROR BIT(9)
+#define ACC_REG_CONTROL_MASK_IE_ERRWARN BIT(10)
+#define ACC_REG_CONTROL_MASK_IE_OVERRUN BIT(11)
+#define ACC_REG_CONTROL_MASK_IE_TSI BIT(12)
+#define ACC_REG_CONTROL_MASK_IE_ERRPASS BIT(13)
+#define ACC_REG_CONTROL_MASK_IE_ALI BIT(14)
+#define ACC_REG_CONTROL_MASK_IE_BUSERR BIT(15)
+
+/* BRP and BTR register layout for CAN-Classic version */
+#define ACC_REG_BRP_CL_MASK_BRP GENMASK(8, 0)
+#define ACC_REG_BTR_CL_MASK_TSEG1 GENMASK(3, 0)
+#define ACC_REG_BTR_CL_MASK_TSEG2 GENMASK(18, 16)
+#define ACC_REG_BTR_CL_MASK_SJW GENMASK(25, 24)
+
+/* BRP and BTR register layout for CAN-FD version */
+#define ACC_REG_BRP_FD_MASK_BRP GENMASK(7, 0)
+#define ACC_REG_BTR_FD_MASK_TSEG1 GENMASK(7, 0)
+#define ACC_REG_BTR_FD_MASK_TSEG2 GENMASK(22, 16)
+#define ACC_REG_BTR_FD_MASK_SJW GENMASK(30, 24)
+
+/* 256 BM_MSGs of 32 byte size */
+#define ACC_CORE_DMAMSG_SIZE 32U
+#define ACC_CORE_DMABUF_SIZE (256U * ACC_CORE_DMAMSG_SIZE)
+
+enum acc_bmmsg_id {
+ BM_MSG_ID_RXTXDONE = 0x01,
+ BM_MSG_ID_TXABORT = 0x02,
+ BM_MSG_ID_OVERRUN = 0x03,
+ BM_MSG_ID_BUSERR = 0x04,
+ BM_MSG_ID_ERRPASSIVE = 0x05,
+ BM_MSG_ID_ERRWARN = 0x06,
+ BM_MSG_ID_TIMESLICE = 0x07,
+ BM_MSG_ID_HWTIMER = 0x08,
+ BM_MSG_ID_HOTPLUG = 0x09,
+};
+
+/* The struct acc_bmmsg_* structure declarations that follow here provide
+ * access to the ring buffer of bus master messages maintained by the FPGA
+ * bus master engine. All bus master messages have the same size of
+ * ACC_CORE_DMAMSG_SIZE and a minimum alignment of ACC_CORE_DMAMSG_SIZE in
+ * memory.
+ *
+ * All structure members are natural aligned. Therefore we should not need
+ * a __packed attribute. All struct acc_bmmsg_* declarations have at least
+ * reserved* members to fill the structure to the full ACC_CORE_DMAMSG_SIZE.
+ *
+ * A failure of this property due padding will be detected at compile time
+ * by static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE).
+ */
+
+struct acc_bmmsg_rxtxdone {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u32 id;
+ struct {
+ u8 len;
+ u8 txdfifo_idx;
+ u8 zeroes8;
+ u8 reserved;
+ } acc_dlc;
+ u8 data[CAN_MAX_DLEN];
+ /* Time stamps in struct acc_ov::timestamp_frequency ticks. */
+ u64 ts;
+};
+
+struct acc_bmmsg_txabort {
+ u8 msg_id;
+ u8 txfifo_level;
+ u16 abort_mask;
+ u8 txtsfifo_level;
+ u8 reserved2[1];
+ u16 abort_mask_txts;
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_overrun {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 lost_cnt;
+ u8 reserved1;
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_buserr {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 ecc;
+ u8 reserved1;
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reg_status;
+ u32 reg_btr;
+ u32 reserved3[2];
+};
+
+struct acc_bmmsg_errstatechange {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reg_status;
+ u32 reserved3[3];
+};
+
+struct acc_bmmsg_timeslice {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_hwtimer {
+ u8 msg_id;
+ u8 reserved1[3];
+ u32 reserved2[1];
+ u64 timer;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_hotplug {
+ u8 msg_id;
+ u8 reserved1[3];
+ u32 reserved2[7];
+};
+
+union acc_bmmsg {
+ u8 msg_id;
+ struct acc_bmmsg_rxtxdone rxtxdone;
+ struct acc_bmmsg_txabort txabort;
+ struct acc_bmmsg_overrun overrun;
+ struct acc_bmmsg_buserr buserr;
+ struct acc_bmmsg_errstatechange errstatechange;
+ struct acc_bmmsg_timeslice timeslice;
+ struct acc_bmmsg_hwtimer hwtimer;
+};
+
+/* Check size of union acc_bmmsg to be of expected size. */
+static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE);
+
+struct acc_bmfifo {
+ const union acc_bmmsg *messages;
+ /* irq_cnt points to an u32 value where the esdACC FPGA deposits
+ * the bm_fifo head index in coherent DMA memory. Only bits 7..0
+ * are valid. Use READ_ONCE() to access this memory location.
+ */
+ const u32 *irq_cnt;
+ u32 local_irq_cnt;
+ u32 msg_fifo_tail;
+};
+
+struct acc_core {
+ void __iomem *addr;
+ struct net_device *netdev;
+ struct acc_bmfifo bmfifo;
+ u8 tx_fifo_size;
+ u8 tx_fifo_head;
+ u8 tx_fifo_tail;
+};
+
+struct acc_ov {
+ void __iomem *addr;
+ struct acc_bmfifo bmfifo;
+ u32 timestamp_frequency;
+ u32 core_frequency;
+ u16 version;
+ u16 features;
+ u8 total_cores;
+ u8 active_cores;
+};
+
+struct acc_net_priv {
+ struct can_priv can; /* must be the first member! */
+ struct acc_core *core;
+ struct acc_ov *ov;
+};
+
+static inline u32 acc_read32(struct acc_core *core, unsigned short offs)
+{
+ return ioread32be(core->addr + offs);
+}
+
+static inline void acc_write32(struct acc_core *core,
+ unsigned short offs, u32 v)
+{
+ iowrite32be(v, core->addr + offs);
+}
+
+static inline void acc_write32_noswap(struct acc_core *core,
+ unsigned short offs, u32 v)
+{
+ iowrite32(v, core->addr + offs);
+}
+
+static inline void acc_set_bits(struct acc_core *core,
+ unsigned short offs, u32 mask)
+{
+ u32 v = acc_read32(core, offs);
+
+ v |= mask;
+ acc_write32(core, offs, v);
+}
+
+static inline void acc_clear_bits(struct acc_core *core,
+ unsigned short offs, u32 mask)
+{
+ u32 v = acc_read32(core, offs);
+
+ v &= ~mask;
+ acc_write32(core, offs, v);
+}
+
+static inline int acc_resetmode_entered(struct acc_core *core)
+{
+ u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL_MODE);
+
+ return (ctrl & ACC_REG_CONTROL_MASK_MODE_RESETMODE) != 0;
+}
+
+static inline u32 acc_ov_read32(struct acc_ov *ov, unsigned short offs)
+{
+ return ioread32be(ov->addr + offs);
+}
+
+static inline void acc_ov_write32(struct acc_ov *ov,
+ unsigned short offs, u32 v)
+{
+ iowrite32be(v, ov->addr + offs);
+}
+
+static inline void acc_ov_set_bits(struct acc_ov *ov,
+ unsigned short offs, u32 b)
+{
+ u32 v = acc_ov_read32(ov, offs);
+
+ v |= b;
+ acc_ov_write32(ov, offs, v);
+}
+
+static inline void acc_ov_clear_bits(struct acc_ov *ov,
+ unsigned short offs, u32 b)
+{
+ u32 v = acc_ov_read32(ov, offs);
+
+ v &= ~b;
+ acc_ov_write32(ov, offs, v);
+}
+
+static inline void acc_reset_fpga(struct acc_ov *ov)
+{
+ acc_ov_write32(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_FPGA_RESET);
+
+ /* (Re-)start and wait for completion of addon detection on the I^2C bus */
+ acc_ov_set_bits(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_I2C_ENABLE);
+ mdelay(ACC_I2C_ADDON_DETECT_DELAY_MS);
+}
+
+void acc_init_ov(struct acc_ov *ov, struct device *dev);
+void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores,
+ const void *mem);
+int acc_open(struct net_device *netdev);
+int acc_close(struct net_device *netdev);
+netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+int acc_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec);
+int acc_set_mode(struct net_device *netdev, enum can_mode mode);
+int acc_set_bittiming(struct net_device *netdev);
+irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index a57005faa04f..f81b598147b3 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -27,7 +27,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
#define KVASER_PCIEFD_MAX_ERR_REP 256U
#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
-#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4UL
+#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
#define KVASER_PCIEFD_DMA_COUNT 2U
#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
@@ -47,12 +47,19 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015
#define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016
+/* Xilinx based devices */
+#define KVASER_PCIEFD_M2_4CAN_DEVICE_ID 0x0017
+#define KVASER_PCIEFD_8CAN_DEVICE_ID 0x0019
+
/* Altera SerDes Enable 64-bit DMA address translation */
#define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0)
/* SmartFusion2 SerDes LSB address translation mask */
#define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12)
+/* Xilinx SerDes LSB address translation mask */
+#define KVASER_PCIEFD_XILINX_DMA_LSB_MASK GENMASK(31, 12)
+
/* Kvaser KCAN CAN controller registers */
#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
@@ -281,6 +288,8 @@ static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
dma_addr_t addr, int index);
static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
dma_addr_t addr, int index);
+static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
struct kvaser_pciefd_address_offset {
u32 serdes;
@@ -335,6 +344,18 @@ static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offse
.kcan_ch1 = 0x142000,
};
+static const struct kvaser_pciefd_address_offset kvaser_pciefd_xilinx_address_offset = {
+ .serdes = 0x00208,
+ .pci_ien = 0x102004,
+ .pci_irq = 0x102008,
+ .sysid = 0x100000,
+ .loopback = 0x103000,
+ .kcan_srb_fifo = 0x120000,
+ .kcan_srb = 0x121000,
+ .kcan_ch0 = 0x140000,
+ .kcan_ch1 = 0x142000,
+};
+
static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = {
.kcan_rx0 = BIT(4),
.kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) },
@@ -347,6 +368,12 @@ static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = {
.all = GENMASK(19, 16) | BIT(4),
};
+static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = {
+ .kcan_rx0 = BIT(4),
+ .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) },
+ .all = GENMASK(19, 16) | BIT(4),
+};
+
static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = {
.kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera,
};
@@ -355,6 +382,10 @@ static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = {
.kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2,
};
+static const struct kvaser_pciefd_dev_ops kvaser_pciefd_xilinx_dev_ops = {
+ .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_xilinx,
+};
+
static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = {
.address_offset = &kvaser_pciefd_altera_address_offset,
.irq_mask = &kvaser_pciefd_altera_irq_mask,
@@ -367,6 +398,12 @@ static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = {
.ops = &kvaser_pciefd_sf2_dev_ops,
};
+static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data = {
+ .address_offset = &kvaser_pciefd_xilinx_address_offset,
+ .irq_mask = &kvaser_pciefd_xilinx_irq_mask,
+ .ops = &kvaser_pciefd_xilinx_dev_ops,
+};
+
struct kvaser_pciefd_can {
struct can_priv can;
struct kvaser_pciefd *kv_pcie;
@@ -457,6 +494,14 @@ static struct pci_device_id kvaser_pciefd_id_table[] = {
.driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
},
{
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_M2_4CAN_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_8CAN_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data,
+ },
+ {
0,
},
};
@@ -1035,6 +1080,21 @@ static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
iowrite32(msb, serdes_base + 0x4);
}
+static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index)
+{
+ void __iomem *serdes_base;
+ u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK;
+ u32 msb = 0x0;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ msb = addr >> 32;
+#endif
+ serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
+ iowrite32(msb, serdes_base);
+ iowrite32(lsb, serdes_base + 0x4);
+}
+
static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
{
int i;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 16ecc11c7f62..14b231c4d7ec 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -255,6 +255,7 @@ enum m_can_reg {
#define TXESC_TBDS_64B 0x7
/* Tx Event FIFO Configuration (TXEFC) */
+#define TXEFC_EFWM_MASK GENMASK(29, 24)
#define TXEFC_EFS_MASK GENMASK(21, 16)
/* Tx Event FIFO Status (TXEFS) */
@@ -320,6 +321,12 @@ struct id_and_dlc {
u32 dlc;
};
+struct m_can_fifo_element {
+ u32 id;
+ u32 dlc;
+ u8 data[CANFD_MAX_DLEN];
+};
+
static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
{
return cdev->ops->read_reg(cdev, reg);
@@ -372,16 +379,6 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
}
-static inline bool _m_can_tx_fifo_full(u32 txfqs)
-{
- return !!(txfqs & TXFQS_TFQF);
-}
-
-static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
-{
- return _m_can_tx_fifo_full(m_can_read(cdev, M_CAN_TXFQS));
-}
-
static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
{
u32 cccr = m_can_read(cdev, M_CAN_CCCR);
@@ -416,15 +413,48 @@ static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
}
}
+static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts)
+{
+ if (cdev->active_interrupts == interrupts)
+ return;
+ cdev->ops->write_reg(cdev, M_CAN_IE, interrupts);
+ cdev->active_interrupts = interrupts;
+}
+
+static void m_can_coalescing_disable(struct m_can_classdev *cdev)
+{
+ u32 new_interrupts = cdev->active_interrupts | IR_RF0N | IR_TEFN;
+
+ if (!cdev->net->irq)
+ return;
+
+ hrtimer_cancel(&cdev->hrtimer);
+ m_can_interrupt_enable(cdev, new_interrupts);
+}
+
static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
{
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Start hrtimer\n");
+ hrtimer_start(&cdev->hrtimer,
+ ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
+ HRTIMER_MODE_REL_PINNED);
+ }
+
/* Only interrupt line 0 is used in this driver */
m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
}
static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
{
+ m_can_coalescing_disable(cdev);
m_can_write(cdev, M_CAN_ILE, 0x0);
+ cdev->active_interrupts = 0x0;
+
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Stop hrtimer\n");
+ hrtimer_cancel(&cdev->hrtimer);
+ }
}
/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
@@ -444,18 +474,26 @@ static u32 m_can_get_timestamp(struct m_can_classdev *cdev)
static void m_can_clean(struct net_device *net)
{
struct m_can_classdev *cdev = netdev_priv(net);
+ unsigned long irqflags;
- if (cdev->tx_skb) {
- int putidx = 0;
+ if (cdev->tx_ops) {
+ for (int i = 0; i != cdev->tx_fifo_size; ++i) {
+ if (!cdev->tx_ops[i].skb)
+ continue;
- net->stats.tx_errors++;
- if (cdev->version > 30)
- putidx = FIELD_GET(TXFQS_TFQPI_MASK,
- m_can_read(cdev, M_CAN_TXFQS));
-
- can_free_echo_skb(cdev->net, putidx, NULL);
- cdev->tx_skb = NULL;
+ net->stats.tx_errors++;
+ cdev->tx_ops[i].skb = NULL;
+ }
}
+
+ for (int i = 0; i != cdev->can.echo_skb_max; ++i)
+ can_free_echo_skb(cdev->net, i, NULL);
+
+ netdev_reset_queue(cdev->net);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ cdev->tx_fifo_in_flight = 0;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
}
/* For peripherals, pass skb to rx-offload, which will push skb from
@@ -1007,23 +1045,60 @@ static int m_can_poll(struct napi_struct *napi, int quota)
* echo. timestamp is used for peripherals to ensure correct ordering
* by rx-offload, and is ignored for non-peripherals.
*/
-static void m_can_tx_update_stats(struct m_can_classdev *cdev,
- unsigned int msg_mark,
- u32 timestamp)
+static unsigned int m_can_tx_update_stats(struct m_can_classdev *cdev,
+ unsigned int msg_mark, u32 timestamp)
{
struct net_device *dev = cdev->net;
struct net_device_stats *stats = &dev->stats;
+ unsigned int frame_len;
if (cdev->is_peripheral)
stats->tx_bytes +=
can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload,
msg_mark,
timestamp,
- NULL);
+ &frame_len);
else
- stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark, &frame_len);
stats->tx_packets++;
+
+ return frame_len;
+}
+
+static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted,
+ unsigned int transmitted_frame_len)
+{
+ unsigned long irqflags;
+
+ netdev_completed_queue(cdev->net, transmitted, transmitted_frame_len);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ if (cdev->tx_fifo_in_flight >= cdev->tx_fifo_size && transmitted > 0)
+ netif_wake_queue(cdev->net);
+ cdev->tx_fifo_in_flight -= transmitted;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+}
+
+static netdev_tx_t m_can_start_tx(struct m_can_classdev *cdev)
+{
+ unsigned long irqflags;
+ int tx_fifo_in_flight;
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ tx_fifo_in_flight = cdev->tx_fifo_in_flight + 1;
+ if (tx_fifo_in_flight >= cdev->tx_fifo_size) {
+ netif_stop_queue(cdev->net);
+ if (tx_fifo_in_flight > cdev->tx_fifo_size) {
+ netdev_err_once(cdev->net, "hard_xmit called while TX FIFO full\n");
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+ cdev->tx_fifo_in_flight = tx_fifo_in_flight;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+
+ return NETDEV_TX_OK;
}
static int m_can_echo_tx_event(struct net_device *dev)
@@ -1035,6 +1110,8 @@ static int m_can_echo_tx_event(struct net_device *dev)
int i = 0;
int err = 0;
unsigned int msg_mark;
+ int processed = 0;
+ unsigned int processed_frame_len = 0;
struct m_can_classdev *cdev = netdev_priv(dev);
@@ -1063,25 +1140,62 @@ static int m_can_echo_tx_event(struct net_device *dev)
fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi);
/* update stats */
- m_can_tx_update_stats(cdev, msg_mark, timestamp);
+ processed_frame_len += m_can_tx_update_stats(cdev, msg_mark,
+ timestamp);
+
+ ++processed;
}
if (ack_fgi != -1)
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
ack_fgi));
+ m_can_finish_tx(cdev, processed, processed_frame_len);
+
return err;
}
+static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
+{
+ u32 new_interrupts = cdev->active_interrupts;
+ bool enable_rx_timer = false;
+ bool enable_tx_timer = false;
+
+ if (!cdev->net->irq)
+ return;
+
+ if (cdev->rx_coalesce_usecs_irq > 0 && (ir & (IR_RF0N | IR_RF0W))) {
+ enable_rx_timer = true;
+ new_interrupts &= ~IR_RF0N;
+ }
+ if (cdev->tx_coalesce_usecs_irq > 0 && (ir & (IR_TEFN | IR_TEFW))) {
+ enable_tx_timer = true;
+ new_interrupts &= ~IR_TEFN;
+ }
+ if (!enable_rx_timer && !hrtimer_active(&cdev->hrtimer))
+ new_interrupts |= IR_RF0N;
+ if (!enable_tx_timer && !hrtimer_active(&cdev->hrtimer))
+ new_interrupts |= IR_TEFN;
+
+ m_can_interrupt_enable(cdev, new_interrupts);
+ if (enable_rx_timer | enable_tx_timer)
+ hrtimer_start(&cdev->hrtimer, cdev->irq_timer_wait,
+ HRTIMER_MODE_REL);
+}
+
static irqreturn_t m_can_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct m_can_classdev *cdev = netdev_priv(dev);
u32 ir;
- if (pm_runtime_suspended(cdev->dev))
+ if (pm_runtime_suspended(cdev->dev)) {
+ m_can_coalescing_disable(cdev);
return IRQ_NONE;
+ }
+
ir = m_can_read(cdev, M_CAN_IR);
+ m_can_coalescing_update(cdev, ir);
if (!ir)
return IRQ_NONE;
@@ -1096,13 +1210,17 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
* - state change IRQ
* - bus error IRQ and bus error reporting
*/
- if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
+ if (ir & (IR_RF0N | IR_RF0W | IR_ERR_ALL_30X)) {
cdev->irqstatus = ir;
if (!cdev->is_peripheral) {
m_can_disable_all_interrupts(cdev);
napi_schedule(&cdev->napi);
- } else if (m_can_rx_peripheral(dev, ir) < 0) {
- goto out_fail;
+ } else {
+ int pkts;
+
+ pkts = m_can_rx_peripheral(dev, ir);
+ if (pkts < 0)
+ goto out_fail;
}
}
@@ -1110,21 +1228,18 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
u32 timestamp = 0;
+ unsigned int frame_len;
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
- m_can_tx_update_stats(cdev, 0, timestamp);
- netif_wake_queue(dev);
+ frame_len = m_can_tx_update_stats(cdev, 0, timestamp);
+ m_can_finish_tx(cdev, 1, frame_len);
}
} else {
- if (ir & IR_TEFN) {
+ if (ir & (IR_TEFN | IR_TEFW)) {
/* New TX FIFO Element arrived */
if (m_can_echo_tx_event(dev) != 0)
goto out_fail;
-
- if (netif_queue_stopped(dev) &&
- !m_can_tx_fifo_full(cdev))
- netif_wake_queue(dev);
}
}
@@ -1138,6 +1253,15 @@ out_fail:
return IRQ_HANDLED;
}
+static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer)
+{
+ struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer);
+
+ irq_wake_thread(cdev->net->irq, cdev->net);
+
+ return HRTIMER_NORESTART;
+}
+
static const struct can_bittiming_const m_can_bittiming_const_30X = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
@@ -1276,9 +1400,8 @@ static int m_can_chip_config(struct net_device *dev)
}
/* Disable unused interrupts */
- interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TEFW | IR_TFE |
- IR_TCF | IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N |
- IR_RF0F | IR_RF0W);
+ interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF |
+ IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F);
m_can_config_endisable(cdev, true);
@@ -1315,6 +1438,8 @@ static int m_can_chip_config(struct net_device *dev)
} else {
/* Full TX Event FIFO is used */
m_can_write(cdev, M_CAN_TXEFC,
+ FIELD_PREP(TXEFC_EFWM_MASK,
+ cdev->tx_max_coalesced_frames_irq) |
FIELD_PREP(TXEFC_EFS_MASK,
cdev->mcfg[MRAM_TXE].num) |
cdev->mcfg[MRAM_TXE].off);
@@ -1322,6 +1447,7 @@ static int m_can_chip_config(struct net_device *dev)
/* rx fifo configuration, blocking mode, fifo size 1 */
m_can_write(cdev, M_CAN_RXF0C,
+ FIELD_PREP(RXFC_FWM_MASK, cdev->rx_max_coalesced_frames_irq) |
FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) |
cdev->mcfg[MRAM_RXF0].off);
@@ -1380,7 +1506,7 @@ static int m_can_chip_config(struct net_device *dev)
else
interrupts &= ~(IR_ERR_LEC_31X);
}
- m_can_write(cdev, M_CAN_IE, interrupts);
+ m_can_interrupt_enable(cdev, interrupts);
/* route all interrupts to INT0 */
m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0);
@@ -1413,15 +1539,16 @@ static int m_can_start(struct net_device *dev)
if (ret)
return ret;
+ netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0),
+ cdev->tx_max_coalesced_frames);
+
cdev->can.state = CAN_STATE_ERROR_ACTIVE;
m_can_enable_all_interrupts(cdev);
- if (!dev->irq) {
- dev_dbg(cdev->dev, "Start hrtimer\n");
- hrtimer_start(&cdev->hrtimer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
- HRTIMER_MODE_REL_PINNED);
- }
+ if (cdev->version > 30)
+ cdev->tx_fifo_putidx = FIELD_GET(TXFQS_TFQPI_MASK,
+ m_can_read(cdev, M_CAN_TXFQS));
return 0;
}
@@ -1577,11 +1704,6 @@ static void m_can_stop(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
- if (!dev->irq) {
- dev_dbg(cdev->dev, "Stop hrtimer\n");
- hrtimer_cancel(&cdev->hrtimer);
- }
-
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
@@ -1605,8 +1727,9 @@ static int m_can_close(struct net_device *dev)
m_can_clk_stop(cdev);
free_irq(dev->irq, dev);
+ m_can_clean(dev);
+
if (cdev->is_peripheral) {
- cdev->tx_skb = NULL;
destroy_workqueue(cdev->tx_wq);
cdev->tx_wq = NULL;
can_rx_offload_disable(&cdev->offload);
@@ -1619,57 +1742,42 @@ static int m_can_close(struct net_device *dev)
return 0;
}
-static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx)
-{
- struct m_can_classdev *cdev = netdev_priv(dev);
- /*get wrap around for loopback skb index */
- unsigned int wrap = cdev->can.echo_skb_max;
- int next_idx;
-
- /* calculate next index */
- next_idx = (++putidx >= wrap ? 0 : putidx);
-
- /* check if occupied */
- return !!cdev->can.echo_skb[next_idx];
-}
-
-static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
+static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev,
+ struct sk_buff *skb)
{
- struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data;
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u8 len_padded = DIV_ROUND_UP(cf->len, 4);
+ struct m_can_fifo_element fifo_element;
struct net_device *dev = cdev->net;
- struct sk_buff *skb = cdev->tx_skb;
- struct id_and_dlc fifo_header;
u32 cccr, fdflags;
- u32 txfqs;
int err;
- int putidx;
-
- cdev->tx_skb = NULL;
+ u32 putidx;
+ unsigned int frame_len = can_skb_get_frame_len(skb);
/* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */
if (cf->can_id & CAN_EFF_FLAG) {
- fifo_header.id = cf->can_id & CAN_EFF_MASK;
- fifo_header.id |= TX_BUF_XTD;
+ fifo_element.id = cf->can_id & CAN_EFF_MASK;
+ fifo_element.id |= TX_BUF_XTD;
} else {
- fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18);
+ fifo_element.id = ((cf->can_id & CAN_SFF_MASK) << 18);
}
if (cf->can_id & CAN_RTR_FLAG)
- fifo_header.id |= TX_BUF_RTR;
+ fifo_element.id |= TX_BUF_RTR;
if (cdev->version == 30) {
netif_stop_queue(dev);
- fifo_header.dlc = can_fd_len2dlc(cf->len) << 16;
+ fifo_element.dlc = can_fd_len2dlc(cf->len) << 16;
/* Write the frame ID, DLC, and payload to the FIFO element. */
- err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2);
+ err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_element, 2);
if (err)
goto out_fail;
err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA,
- cf->data, DIV_ROUND_UP(cf->len, 4));
+ cf->data, len_padded);
if (err)
goto out_fail;
@@ -1690,33 +1798,15 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
}
m_can_write(cdev, M_CAN_TXBTIE, 0x1);
- can_put_echo_skb(skb, dev, 0, 0);
+ can_put_echo_skb(skb, dev, 0, frame_len);
m_can_write(cdev, M_CAN_TXBAR, 0x1);
/* End of xmit function for version 3.0.x */
} else {
/* Transmit routine for version >= v3.1.x */
- txfqs = m_can_read(cdev, M_CAN_TXFQS);
-
- /* Check if FIFO full */
- if (_m_can_tx_fifo_full(txfqs)) {
- /* This shouldn't happen */
- netif_stop_queue(dev);
- netdev_warn(dev,
- "TX queue active although FIFO is full.");
-
- if (cdev->is_peripheral) {
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
- } else {
- return NETDEV_TX_BUSY;
- }
- }
-
/* get put index for frame */
- putidx = FIELD_GET(TXFQS_TFQPI_MASK, txfqs);
+ putidx = cdev->tx_fifo_putidx;
/* Construct DLC Field, with CAN-FD configuration.
* Use the put index of the fifo as the message marker,
@@ -1731,30 +1821,32 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
fdflags |= TX_BUF_BRS;
}
- fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
+ fifo_element.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) |
fdflags | TX_BUF_EFC;
- err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2);
- if (err)
- goto out_fail;
- err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA,
- cf->data, DIV_ROUND_UP(cf->len, 4));
+ memcpy_and_pad(fifo_element.data, CANFD_MAX_DLEN, &cf->data,
+ cf->len, 0);
+
+ err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID,
+ &fifo_element, 2 + len_padded);
if (err)
goto out_fail;
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
*/
- can_put_echo_skb(skb, dev, putidx, 0);
+ can_put_echo_skb(skb, dev, putidx, frame_len);
- /* Enable TX FIFO element to start transfer */
- m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
-
- /* stop network queue if fifo full */
- if (m_can_tx_fifo_full(cdev) ||
- m_can_next_echo_skb_occupied(dev, putidx))
- netif_stop_queue(dev);
+ if (cdev->is_peripheral) {
+ /* Delay enabling TX FIFO element */
+ cdev->tx_peripheral_submit |= BIT(putidx);
+ } else {
+ /* Enable TX FIFO element to start transfer */
+ m_can_write(cdev, M_CAN_TXBAR, BIT(putidx));
+ }
+ cdev->tx_fifo_putidx = (++cdev->tx_fifo_putidx >= cdev->can.echo_skb_max ?
+ 0 : cdev->tx_fifo_putidx);
}
return NETDEV_TX_OK;
@@ -1765,46 +1857,91 @@ out_fail:
return NETDEV_TX_BUSY;
}
+static void m_can_tx_submit(struct m_can_classdev *cdev)
+{
+ if (cdev->version == 30)
+ return;
+ if (!cdev->is_peripheral)
+ return;
+
+ m_can_write(cdev, M_CAN_TXBAR, cdev->tx_peripheral_submit);
+ cdev->tx_peripheral_submit = 0;
+}
+
static void m_can_tx_work_queue(struct work_struct *ws)
{
- struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev,
- tx_work);
+ struct m_can_tx_op *op = container_of(ws, struct m_can_tx_op, work);
+ struct m_can_classdev *cdev = op->cdev;
+ struct sk_buff *skb = op->skb;
+
+ op->skb = NULL;
+ m_can_tx_handler(cdev, skb);
+ if (op->submit)
+ m_can_tx_submit(cdev);
+}
+
+static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb,
+ bool submit)
+{
+ cdev->tx_ops[cdev->next_tx_op].skb = skb;
+ cdev->tx_ops[cdev->next_tx_op].submit = submit;
+ queue_work(cdev->tx_wq, &cdev->tx_ops[cdev->next_tx_op].work);
+
+ ++cdev->next_tx_op;
+ if (cdev->next_tx_op >= cdev->tx_fifo_size)
+ cdev->next_tx_op = 0;
+}
+
+static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev,
+ struct sk_buff *skb)
+{
+ bool submit;
+
+ ++cdev->nr_txs_without_submit;
+ if (cdev->nr_txs_without_submit >= cdev->tx_max_coalesced_frames ||
+ !netdev_xmit_more()) {
+ cdev->nr_txs_without_submit = 0;
+ submit = true;
+ } else {
+ submit = false;
+ }
+ m_can_tx_queue_skb(cdev, skb, submit);
- m_can_tx_handler(cdev);
+ return NETDEV_TX_OK;
}
static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ unsigned int frame_len;
+ netdev_tx_t ret;
if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
- if (cdev->is_peripheral) {
- if (cdev->tx_skb) {
- netdev_err(dev, "hard_xmit called while tx busy\n");
- return NETDEV_TX_BUSY;
- }
+ frame_len = can_skb_get_frame_len(skb);
- if (cdev->can.state == CAN_STATE_BUS_OFF) {
- m_can_clean(dev);
- } else {
- /* Need to stop the queue to avoid numerous requests
- * from being sent. Suggested improvement is to create
- * a queueing mechanism that will queue the skbs and
- * process them in order.
- */
- cdev->tx_skb = skb;
- netif_stop_queue(cdev->net);
- queue_work(cdev->tx_wq, &cdev->tx_work);
- }
- } else {
- cdev->tx_skb = skb;
- return m_can_tx_handler(cdev);
+ if (cdev->can.state == CAN_STATE_BUS_OFF) {
+ m_can_clean(cdev->net);
+ return NETDEV_TX_OK;
}
- return NETDEV_TX_OK;
+ ret = m_can_start_tx(cdev);
+ if (ret != NETDEV_TX_OK)
+ return ret;
+
+ netdev_sent_queue(dev, frame_len);
+
+ if (cdev->is_peripheral)
+ ret = m_can_start_peripheral_xmit(cdev, skb);
+ else
+ ret = m_can_tx_handler(cdev, skb);
+
+ if (ret != NETDEV_TX_OK)
+ netdev_completed_queue(dev, 1, frame_len);
+
+ return ret;
}
static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
@@ -1844,15 +1981,17 @@ static int m_can_open(struct net_device *dev)
/* register interrupt handler */
if (cdev->is_peripheral) {
- cdev->tx_skb = NULL;
- cdev->tx_wq = alloc_workqueue("mcan_wq",
- WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
+ cdev->tx_wq = alloc_ordered_workqueue("mcan_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM);
if (!cdev->tx_wq) {
err = -ENOMEM;
goto out_wq_fail;
}
- INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
+ for (int i = 0; i != cdev->tx_fifo_size; ++i) {
+ cdev->tx_ops[i].cdev = cdev;
+ INIT_WORK(&cdev->tx_ops[i].work, m_can_tx_work_queue);
+ }
err = request_threaded_irq(dev->irq, NULL, m_can_isr,
IRQF_ONESHOT,
@@ -1900,7 +2039,108 @@ static const struct net_device_ops m_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static int m_can_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ ec->rx_max_coalesced_frames_irq = cdev->rx_max_coalesced_frames_irq;
+ ec->rx_coalesce_usecs_irq = cdev->rx_coalesce_usecs_irq;
+ ec->tx_max_coalesced_frames = cdev->tx_max_coalesced_frames;
+ ec->tx_max_coalesced_frames_irq = cdev->tx_max_coalesced_frames_irq;
+ ec->tx_coalesce_usecs_irq = cdev->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static int m_can_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ if (cdev->can.state != CAN_STATE_STOPPED) {
+ netdev_err(dev, "Device is in use, please shut it down first\n");
+ return -EBUSY;
+ }
+
+ if (ec->rx_max_coalesced_frames_irq > cdev->mcfg[MRAM_RXF0].num) {
+ netdev_err(dev, "rx-frames-irq %u greater than the RX FIFO %u\n",
+ ec->rx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_RXF0].num);
+ return -EINVAL;
+ }
+ if ((ec->rx_max_coalesced_frames_irq == 0) != (ec->rx_coalesce_usecs_irq == 0)) {
+ netdev_err(dev, "rx-frames-irq and rx-usecs-irq can only be set together\n");
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXE].num) {
+ netdev_err(dev, "tx-frames-irq %u greater than the TX event FIFO %u\n",
+ ec->tx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_TXE].num);
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXB].num) {
+ netdev_err(dev, "tx-frames-irq %u greater than the TX FIFO %u\n",
+ ec->tx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_TXB].num);
+ return -EINVAL;
+ }
+ if ((ec->tx_max_coalesced_frames_irq == 0) != (ec->tx_coalesce_usecs_irq == 0)) {
+ netdev_err(dev, "tx-frames-irq and tx-usecs-irq can only be set together\n");
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXE].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX event FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXE].num);
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXB].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXB].num);
+ return -EINVAL;
+ }
+ if (ec->rx_coalesce_usecs_irq != 0 && ec->tx_coalesce_usecs_irq != 0 &&
+ ec->rx_coalesce_usecs_irq != ec->tx_coalesce_usecs_irq) {
+ netdev_err(dev, "rx-usecs-irq %u needs to be equal to tx-usecs-irq %u if both are enabled\n",
+ ec->rx_coalesce_usecs_irq,
+ ec->tx_coalesce_usecs_irq);
+ return -EINVAL;
+ }
+
+ cdev->rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
+ cdev->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+ cdev->tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+ cdev->tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
+ cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+
+ if (cdev->rx_coalesce_usecs_irq)
+ cdev->irq_timer_wait =
+ ns_to_ktime(cdev->rx_coalesce_usecs_irq * NSEC_PER_USEC);
+ else
+ cdev->irq_timer_wait =
+ ns_to_ktime(cdev->tx_coalesce_usecs_irq * NSEC_PER_USEC);
+
+ return 0;
+}
+
static const struct ethtool_ops m_can_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_coalesce = m_can_get_coalesce,
+ .set_coalesce = m_can_set_coalesce,
+};
+
+static const struct ethtool_ops m_can_ethtool_ops_polling = {
.get_ts_info = ethtool_op_get_ts_info,
};
@@ -1908,7 +2148,10 @@ static int register_m_can_dev(struct net_device *dev)
{
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &m_can_netdev_ops;
- dev->ethtool_ops = &m_can_ethtool_ops;
+ if (dev->irq)
+ dev->ethtool_ops = &m_can_ethtool_ops;
+ else
+ dev->ethtool_ops = &m_can_ethtool_ops_polling;
return register_candev(dev);
}
@@ -2056,12 +2299,23 @@ int m_can_class_register(struct m_can_classdev *cdev)
{
int ret;
- if (cdev->pm_clock_support) {
- ret = m_can_clk_start(cdev);
- if (ret)
- return ret;
+ cdev->tx_fifo_size = max(1, min(cdev->mcfg[MRAM_TXB].num,
+ cdev->mcfg[MRAM_TXE].num));
+ if (cdev->is_peripheral) {
+ cdev->tx_ops =
+ devm_kzalloc(cdev->dev,
+ cdev->tx_fifo_size * sizeof(*cdev->tx_ops),
+ GFP_KERNEL);
+ if (!cdev->tx_ops) {
+ dev_err(cdev->dev, "Failed to allocate tx_ops for workqueue\n");
+ return -ENOMEM;
+ }
}
+ ret = m_can_clk_start(cdev);
+ if (ret)
+ return ret;
+
if (cdev->is_peripheral) {
ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
NAPI_POLL_WEIGHT);
@@ -2069,8 +2323,15 @@ int m_can_class_register(struct m_can_classdev *cdev)
goto clk_disable;
}
- if (!cdev->net->irq)
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Polling enabled, initialize hrtimer");
+ hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
cdev->hrtimer.function = &hrtimer_callback;
+ } else {
+ hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cdev->hrtimer.function = m_can_coalescing_timer;
+ }
ret = m_can_dev_setup(cdev);
if (ret)
@@ -2121,7 +2382,15 @@ int m_can_class_suspend(struct device *dev)
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
- m_can_stop(ndev);
+
+ /* leave the chip running with rx interrupt enabled if it is
+ * used as a wake-up source.
+ */
+ if (cdev->pm_wake_source)
+ m_can_write(cdev, M_CAN_IE, IR_RF0N);
+ else
+ m_can_stop(ndev);
+
m_can_clk_stop(cdev);
}
@@ -2148,11 +2417,15 @@ int m_can_class_resume(struct device *dev)
ret = m_can_clk_start(cdev);
if (ret)
return ret;
- ret = m_can_start(ndev);
- if (ret) {
- m_can_clk_stop(cdev);
- return ret;
+ if (cdev->pm_wake_source) {
+ m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
+ } else {
+ ret = m_can_start(ndev);
+ if (ret) {
+ m_can_clk_stop(cdev);
+ return ret;
+ }
}
netif_device_attach(ndev);
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 520e14277dff..3a9edc292593 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -70,6 +70,13 @@ struct m_can_ops {
int (*init)(struct m_can_classdev *cdev);
};
+struct m_can_tx_op {
+ struct m_can_classdev *cdev;
+ struct work_struct work;
+ struct sk_buff *skb;
+ bool submit;
+};
+
struct m_can_classdev {
struct can_priv can;
struct can_rx_offload offload;
@@ -80,18 +87,42 @@ struct m_can_classdev {
struct clk *cclk;
struct workqueue_struct *tx_wq;
- struct work_struct tx_work;
- struct sk_buff *tx_skb;
struct phy *transceiver;
+ ktime_t irq_timer_wait;
+
struct m_can_ops *ops;
int version;
u32 irqstatus;
int pm_clock_support;
+ int pm_wake_source;
int is_peripheral;
+ // Cached M_CAN_IE register content
+ u32 active_interrupts;
+ u32 rx_max_coalesced_frames_irq;
+ u32 rx_coalesce_usecs_irq;
+ u32 tx_max_coalesced_frames;
+ u32 tx_max_coalesced_frames_irq;
+ u32 tx_coalesce_usecs_irq;
+
+ // Store this internally to avoid fetch delays on peripheral chips
+ u32 tx_fifo_putidx;
+
+ /* Protects shared state between start_xmit and m_can_isr */
+ spinlock_t tx_handling_spinlock;
+ int tx_fifo_in_flight;
+
+ struct m_can_tx_op *tx_ops;
+ int tx_fifo_size;
+ int next_tx_op;
+
+ int nr_txs_without_submit;
+ /* bitfield of fifo elements that will be submitted together */
+ u32 tx_peripheral_submit;
+
struct mram_cfg mcfg[MRAM_CFG_NUM];
struct hrtimer hrtimer;
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index f2219aa2824b..45400de4163d 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -125,6 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1;
+ mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = id->driver_data;
mcan_class->ops = &m_can_pci_ops;
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index cdb28d6a092c..df0367124b4c 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -109,10 +109,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
ret = irq;
goto probe_fail;
}
- } else {
- dev_dbg(mcan_class->dev, "Polling enabled, initialize hrtimer");
- hrtimer_init(&mcan_class->hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
}
/* message ram could be shared */
@@ -143,6 +139,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->net->irq = irq;
mcan_class->pm_clock_support = 1;
+ mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk);
mcan_class->dev = &pdev->dev;
mcan_class->transceiver = transceiver;
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index ae8c42f5debd..a42600dac70d 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -411,6 +411,7 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv->spi = spi;
mcan_class->pm_clock_support = 0;
+ mcan_class->pm_wake_source = device_property_read_bool(&spi->dev, "wakeup-source");
mcan_class->can.clock.freq = freq;
mcan_class->dev = &spi->dev;
mcan_class->ops = &tcan4x5x_ops;
@@ -459,6 +460,9 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
goto out_power;
}
+ if (mcan_class->pm_wake_source)
+ device_init_wakeup(&spi->dev, true);
+
ret = m_can_class_register(mcan_class);
if (ret) {
dev_err(&spi->dev, "Failed registering m_can device %pe\n",
@@ -487,6 +491,29 @@ static void tcan4x5x_can_remove(struct spi_device *spi)
m_can_class_free_dev(priv->cdev.net);
}
+static int __maybe_unused tcan4x5x_suspend(struct device *dev)
+{
+ struct m_can_classdev *cdev = dev_get_drvdata(dev);
+ struct spi_device *spi = to_spi_device(dev);
+
+ if (cdev->pm_wake_source)
+ enable_irq_wake(spi->irq);
+
+ return m_can_class_suspend(dev);
+}
+
+static int __maybe_unused tcan4x5x_resume(struct device *dev)
+{
+ struct m_can_classdev *cdev = dev_get_drvdata(dev);
+ struct spi_device *spi = to_spi_device(dev);
+ int ret = m_can_class_resume(dev);
+
+ if (cdev->pm_wake_source)
+ disable_irq_wake(spi->irq);
+
+ return ret;
+}
+
static const struct of_device_id tcan4x5x_of_match[] = {
{
.compatible = "ti,tcan4x5x",
@@ -505,11 +532,15 @@ static const struct spi_device_id tcan4x5x_id_table[] = {
};
MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table);
+static const struct dev_pm_ops tcan4x5x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tcan4x5x_suspend, tcan4x5x_resume)
+};
+
static struct spi_driver tcan4x5x_can_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = tcan4x5x_of_match,
- .pm = NULL,
+ .pm = &tcan4x5x_pm_ops,
},
.id_table = tcan4x5x_id_table,
.probe = tcan4x5x_can_probe,
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 32286f861a19..721df91cdbfb 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -436,7 +436,7 @@ int softing_startstop(struct net_device *dev, int up)
return ret;
bus_bitmask_start = 0;
- if (dev && up)
+ if (up)
/* prepare to start this bus as well */
bus_bitmask_start |= (1 << priv->index);
/* bring netdevs down */
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index eebf967f4711..1d9057dc44f2 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -837,7 +837,7 @@ static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
return err;
if (trec & MCP251XFD_REG_TREC_TXBO)
- bec->txerr = 256;
+ bec->txerr = CAN_BUS_OFF_THRESHOLD;
else
bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index d1450722cb3c..bd58c636d465 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -100,6 +100,7 @@ config CAN_KVASER_USB
- Scania VCI2 (if you have the Kvaser logo on top)
- Kvaser BlackBird v2
- Kvaser Leaf Pro HS v2
+ - Kvaser Leaf v3
- Kvaser Hybrid CAN/LIN
- Kvaser Hybrid 2xCAN/LIN
- Kvaser Hybrid Pro CAN/LIN
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 95b0fdb602c8..65c962f76898 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -385,7 +385,7 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
static int gs_cmd_reset(struct gs_can *dev)
{
struct gs_device_mode dm = {
- .mode = GS_CAN_MODE_RESET,
+ .mode = cpu_to_le32(GS_CAN_MODE_RESET),
};
return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_MODE,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 71ef4db5c09f..8faf8a462c05 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -88,6 +88,7 @@
#define USB_USBCAN_PRO_4HS_PRODUCT_ID 0x0114
#define USB_HYBRID_CANLIN_PRODUCT_ID 0x0115
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 0x0116
+#define USB_LEAF_V3_PRODUCT_ID 0x0117
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
.quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
@@ -235,6 +236,8 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID),
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_V3_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 98c669ad5141..f7fabba707ea 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -119,7 +119,7 @@ static int vxcan_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(priv->peer);
- iflink = peer ? peer->ifindex : 0;
+ iflink = peer ? READ_ONCE(peer->ifindex) : 0;
rcu_read_unlock();
return iflink;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 3722eaa84234..fae0120473f8 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -31,6 +31,7 @@
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <linux/u64_stats_sync.h>
#define DRIVER_NAME "xilinx_can"
@@ -58,6 +59,13 @@ enum xcan_reg {
*/
XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
+
+ /* only on AXI CAN cores */
+ XCAN_ECC_CFG_OFFSET = 0xC8, /* ECC Configuration */
+ XCAN_TXTLFIFO_ECC_OFFSET = 0xCC, /* TXTL FIFO ECC error counter */
+ XCAN_TXOLFIFO_ECC_OFFSET = 0xD0, /* TXOL FIFO ECC error counter */
+ XCAN_RXFIFO_ECC_OFFSET = 0xD4, /* RX FIFO ECC error counter */
+
XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
@@ -124,6 +132,18 @@ enum xcan_reg {
#define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
#define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
#define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
+#define XCAN_IXR_E2BERX_MASK BIT(23) /* RX FIFO two bit ECC error */
+#define XCAN_IXR_E1BERX_MASK BIT(22) /* RX FIFO one bit ECC error */
+#define XCAN_IXR_E2BETXOL_MASK BIT(21) /* TXOL FIFO two bit ECC error */
+#define XCAN_IXR_E1BETXOL_MASK BIT(20) /* TXOL FIFO One bit ECC error */
+#define XCAN_IXR_E2BETXTL_MASK BIT(19) /* TXTL FIFO Two bit ECC error */
+#define XCAN_IXR_E1BETXTL_MASK BIT(18) /* TXTL FIFO One bit ECC error */
+#define XCAN_IXR_ECC_MASK (XCAN_IXR_E2BERX_MASK | \
+ XCAN_IXR_E1BERX_MASK | \
+ XCAN_IXR_E2BETXOL_MASK | \
+ XCAN_IXR_E1BETXOL_MASK | \
+ XCAN_IXR_E2BETXTL_MASK | \
+ XCAN_IXR_E1BETXTL_MASK)
#define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
#define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
#define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
@@ -137,6 +157,11 @@ enum xcan_reg {
#define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */
#define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */
#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
+#define XCAN_ECC_CFG_REECRX_MASK BIT(2) /* Reset RX FIFO ECC error counters */
+#define XCAN_ECC_CFG_REECTXOL_MASK BIT(1) /* Reset TXOL FIFO ECC error counters */
+#define XCAN_ECC_CFG_REECTXTL_MASK BIT(0) /* Reset TXTL FIFO ECC error counters */
+#define XCAN_ECC_1BIT_CNT_MASK GENMASK(15, 0) /* FIFO ECC 1bit count mask */
+#define XCAN_ECC_2BIT_CNT_MASK GENMASK(31, 16) /* FIFO ECC 2bit count mask */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */
@@ -202,6 +227,14 @@ struct xcan_devtype_data {
* @devtype: Device type specific constants
* @transceiver: Optional pointer to associated CAN transceiver
* @rstc: Pointer to reset control
+ * @ecc_enable: ECC enable flag
+ * @syncp: synchronization for ECC error stats
+ * @ecc_rx_2_bit_errors: RXFIFO 2bit ECC count
+ * @ecc_rx_1_bit_errors: RXFIFO 1bit ECC count
+ * @ecc_txol_2_bit_errors: TXOLFIFO 2bit ECC count
+ * @ecc_txol_1_bit_errors: TXOLFIFO 1bit ECC count
+ * @ecc_txtl_2_bit_errors: TXTLFIFO 2bit ECC count
+ * @ecc_txtl_1_bit_errors: TXTLFIFO 1bit ECC count
*/
struct xcan_priv {
struct can_priv can;
@@ -221,6 +254,14 @@ struct xcan_priv {
struct xcan_devtype_data devtype;
struct phy *transceiver;
struct reset_control *rstc;
+ bool ecc_enable;
+ struct u64_stats_sync syncp;
+ u64_stats_t ecc_rx_2_bit_errors;
+ u64_stats_t ecc_rx_1_bit_errors;
+ u64_stats_t ecc_txol_2_bit_errors;
+ u64_stats_t ecc_txol_1_bit_errors;
+ u64_stats_t ecc_txtl_2_bit_errors;
+ u64_stats_t ecc_txtl_1_bit_errors;
};
/* CAN Bittiming constants as per Xilinx CAN specs */
@@ -308,6 +349,24 @@ static const struct can_tdc_const xcan_tdc_const_canfd2 = {
.tdcf_max = 0,
};
+enum xcan_stats_type {
+ XCAN_ECC_RX_2_BIT_ERRORS,
+ XCAN_ECC_RX_1_BIT_ERRORS,
+ XCAN_ECC_TXOL_2_BIT_ERRORS,
+ XCAN_ECC_TXOL_1_BIT_ERRORS,
+ XCAN_ECC_TXTL_2_BIT_ERRORS,
+ XCAN_ECC_TXTL_1_BIT_ERRORS,
+};
+
+static const char xcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ [XCAN_ECC_RX_2_BIT_ERRORS] = "ecc_rx_2_bit_errors",
+ [XCAN_ECC_RX_1_BIT_ERRORS] = "ecc_rx_1_bit_errors",
+ [XCAN_ECC_TXOL_2_BIT_ERRORS] = "ecc_txol_2_bit_errors",
+ [XCAN_ECC_TXOL_1_BIT_ERRORS] = "ecc_txol_1_bit_errors",
+ [XCAN_ECC_TXTL_2_BIT_ERRORS] = "ecc_txtl_2_bit_errors",
+ [XCAN_ECC_TXTL_1_BIT_ERRORS] = "ecc_txtl_1_bit_errors",
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -523,6 +582,9 @@ static int xcan_chip_start(struct net_device *ndev)
XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
+ if (priv->ecc_enable)
+ ier |= XCAN_IXR_ECC_MASK;
+
if (priv->devtype.flags & XCAN_FLAG_RXMNF)
ier |= XCAN_IXR_RXMNF_MASK;
@@ -1127,6 +1189,54 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
priv->can.can_stats.bus_error++;
}
+ if (priv->ecc_enable && isr & XCAN_IXR_ECC_MASK) {
+ u32 reg_rx_ecc, reg_txol_ecc, reg_txtl_ecc;
+
+ reg_rx_ecc = priv->read_reg(priv, XCAN_RXFIFO_ECC_OFFSET);
+ reg_txol_ecc = priv->read_reg(priv, XCAN_TXOLFIFO_ECC_OFFSET);
+ reg_txtl_ecc = priv->read_reg(priv, XCAN_TXTLFIFO_ECC_OFFSET);
+
+ /* The counter reaches its maximum at 0xffff and does not overflow.
+ * Accept the small race window between reading and resetting ECC counters.
+ */
+ priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK |
+ XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK);
+
+ u64_stats_update_begin(&priv->syncp);
+
+ if (isr & XCAN_IXR_E2BERX_MASK) {
+ u64_stats_add(&priv->ecc_rx_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_rx_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BERX_MASK) {
+ u64_stats_add(&priv->ecc_rx_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_rx_ecc));
+ }
+
+ if (isr & XCAN_IXR_E2BETXOL_MASK) {
+ u64_stats_add(&priv->ecc_txol_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txol_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BETXOL_MASK) {
+ u64_stats_add(&priv->ecc_txol_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txol_ecc));
+ }
+
+ if (isr & XCAN_IXR_E2BETXTL_MASK) {
+ u64_stats_add(&priv->ecc_txtl_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txtl_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BETXTL_MASK) {
+ u64_stats_add(&priv->ecc_txtl_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txtl_ecc));
+ }
+
+ u64_stats_update_end(&priv->syncp);
+ }
+
if (cf.can_id) {
struct can_frame *skb_cf;
struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
@@ -1354,8 +1464,8 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = (struct net_device *)dev_id;
struct xcan_priv *priv = netdev_priv(ndev);
+ u32 isr_errors, mask;
u32 isr, ier;
- u32 isr_errors;
u32 rx_int_mask = xcan_rx_int_mask(priv);
/* Get the interrupt status from Xilinx CAN */
@@ -1374,10 +1484,15 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
if (isr & XCAN_IXR_TXOK_MASK)
xcan_tx_interrupt(ndev, isr);
+ mask = XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
+ XCAN_IXR_RXMNF_MASK;
+
+ if (priv->ecc_enable)
+ mask |= XCAN_IXR_ECC_MASK;
+
/* Check for the type of error interrupt and Processing it */
- isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
- XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
- XCAN_IXR_RXMNF_MASK);
+ isr_errors = isr & mask;
if (isr_errors) {
priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
xcan_err_interrupt(ndev, isr);
@@ -1546,6 +1661,43 @@ static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
return 0;
}
+static void xcan_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &xcan_priv_flags_strings,
+ sizeof(xcan_priv_flags_strings));
+ }
+}
+
+static int xcan_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(xcan_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void xcan_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+
+ data[XCAN_ECC_RX_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_2_bit_errors);
+ data[XCAN_ECC_RX_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_1_bit_errors);
+ data[XCAN_ECC_TXOL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_2_bit_errors);
+ data[XCAN_ECC_TXOL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_1_bit_errors);
+ data[XCAN_ECC_TXTL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_2_bit_errors);
+ data[XCAN_ECC_TXTL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_1_bit_errors);
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
@@ -1555,6 +1707,9 @@ static const struct net_device_ops xcan_netdev_ops = {
static const struct ethtool_ops xcan_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
+ .get_strings = xcan_get_strings,
+ .get_sset_count = xcan_get_sset_count,
+ .get_ethtool_stats = xcan_get_ethtool_stats,
};
/**
@@ -1793,6 +1948,7 @@ static int xcan_probe(struct platform_device *pdev)
return -ENOMEM;
priv = netdev_priv(ndev);
+ priv->ecc_enable = of_property_read_bool(pdev->dev.of_node, "xlnx,has-ecc");
priv->dev = &pdev->dev;
priv->can.bittiming_const = devtype->bittiming_const;
priv->can.do_set_mode = xcan_do_set_mode;
@@ -1909,6 +2065,11 @@ static int xcan_probe(struct platform_device *pdev)
priv->reg_base, ndev->irq, priv->can.clock.freq,
hw_tx_max, priv->tx_max);
+ if (priv->ecc_enable) {
+ /* Reset FIFO ECC counters */
+ priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK |
+ XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK);
+ }
return 0;
err_disableclks:
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index f8c1d73b251d..3092b391031a 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -48,7 +48,7 @@ config NET_DSA_MT7530
config NET_DSA_MT7530_MDIO
tristate "MediaTek MT7530 MDIO interface driver"
depends on NET_DSA_MT7530
- imply MEDIATEK_GE_PHY
+ select MEDIATEK_GE_PHY
select PCS_MTK_LYNXI
help
This enables support for the MediaTek MT7530 and MT7531 switch
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0d628b35fd5c..b2eeff04f4c8 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -559,6 +559,19 @@ static void b53_port_set_learning(struct b53_device *dev, int port,
b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
}
+static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
+{
+ struct b53_device *dev = ds->priv;
+ u16 reg;
+
+ b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
+ if (enable)
+ reg |= BIT(port);
+ else
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
+}
+
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
@@ -1257,7 +1270,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
+ struct ethtool_keee *p = &dev->ports[port].eee;
u8 rgmii_ctrl = 0, reg = 0, off;
bool tx_pause = false;
bool rx_pause = false;
@@ -2193,21 +2206,6 @@ void b53_mirror_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_mirror_del);
-void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
-{
- struct b53_device *dev = ds->priv;
- u16 reg;
-
- b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
- if (enable)
- reg |= BIT(port);
- else
- reg &= ~BIT(port);
- b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
-}
-EXPORT_SYMBOL(b53_eee_enable_set);
-
-
/* Returns 0 if EEE was not enabled, or 1 otherwise
*/
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
@@ -2224,27 +2222,21 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL(b53_eee_init);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
- u16 reg;
if (is5325(dev) || is5365(dev))
return -EOPNOTSUPP;
- b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, &reg);
- e->eee_enabled = p->eee_enabled;
- e->eee_active = !!(reg & BIT(port));
-
return 0;
}
EXPORT_SYMBOL(b53_get_mac_eee);
-int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
+ struct ethtool_keee *p = &dev->ports[port].eee;
if (is5325(dev) || is5365(dev))
return -EOPNOTSUPP;
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index fdcfd5081c28..c13a907947f1 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -95,7 +95,7 @@ struct b53_pcs {
struct b53_port {
u16 vlan_ctl_mask;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
struct b53_vlan {
@@ -395,9 +395,8 @@ void b53_mirror_del(struct dsa_switch *ds, int port,
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
void b53_disable_port(struct dsa_switch *ds, int port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
-void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable);
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
-int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
#endif
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 4a52ccbe393f..bc77ee9e6d0a 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -835,7 +835,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
bool tx_pause, bool rx_pause)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_eee *p = &priv->dev->ports[port].eee;
+ struct ethtool_keee *p = &priv->dev->ports[port].eee;
u32 reg_rgmii_ctrl = 0;
u32 reg, offset;
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 61b71bcfe396..14923535ca7e 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -49,9 +49,9 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
mutex_lock(&dev->alu_mutex);
ctrl_addr = IND_ACC_TABLE(table) | addr;
- ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
if (!ret)
- ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
mutex_unlock(&dev->alu_mutex);
@@ -633,6 +633,57 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
}
/**
+ * ksz879x_get_loopback - KSZ879x specific function to get loopback
+ * configuration status for a specific port
+ * @dev: Pointer to the device structure
+ * @port: Port number to query
+ * @val: Pointer to store the result
+ *
+ * This function reads the SMI registers to determine whether loopback mode
+ * is enabled for a specific port.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz879x_get_loopback(struct ksz_device *dev, u16 port,
+ u16 *val)
+{
+ u8 stat3;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_3, &stat3);
+ if (ret)
+ return ret;
+
+ if (stat3 & PORT_PHY_LOOPBACK)
+ *val |= BMCR_LOOPBACK;
+
+ return 0;
+}
+
+/**
+ * ksz879x_set_loopback - KSZ879x specific function to set loopback mode for
+ * a specific port
+ * @dev: Pointer to the device structure.
+ * @port: Port number to modify.
+ * @val: Value indicating whether to enable or disable loopback mode.
+ *
+ * This function translates loopback bit of the BMCR register into the
+ * corresponding hardware register bit value and writes it to the SMI interface.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz879x_set_loopback(struct ksz_device *dev, u16 port, u16 val)
+{
+ u8 stat3 = 0;
+
+ if (val & BMCR_LOOPBACK)
+ stat3 |= PORT_PHY_LOOPBACK;
+
+ return ksz_prmw8(dev, port, REG_PORT_STATUS_3, PORT_PHY_LOOPBACK,
+ stat3);
+}
+
+/**
* ksz8_r_phy_ctrl - Translates and reads from the SMI interface to a MIIM PHY
* Control register (Reg. 31).
* @dev: The KSZ device instance.
@@ -676,59 +727,122 @@ static int ksz8_r_phy_ctrl(struct ksz_device *dev, int port, u16 *val)
return 0;
}
+/**
+ * ksz8_r_phy_bmcr - Translates and reads from the SMI interface to a MIIM PHY
+ * Basic mode control register (Reg. 0).
+ * @dev: The KSZ device instance.
+ * @port: The port number to be read.
+ * @val: The value read from the SMI interface.
+ *
+ * This function reads the SMI interface and translates the hardware register
+ * bit values into their corresponding control settings for a MIIM PHY Basic
+ * mode control register.
+ *
+ * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873
+ * -------------------------------------------------------------------
+ * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit
+ * ----------------------------+-----------------------------+----------------
+ * Bit 15 - Soft Reset | 0xF/4 | Not supported
+ * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY)
+ * Bit 13 - Force 100 | 0xC/6 = 0xC/6
+ * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7
+ * Bit 11 - Power Down | 0xD/3 = 0xD/3
+ * Bit 10 - PHY Isolate | 0xF/5 | Not supported
+ * Bit 9 - Restart AN | 0xD/5 = 0xD/5
+ * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5
+ * Bit 7 - Collision Test/Res. | Not supported | Not supported
+ * Bit 6 - Reserved | Not supported | Not supported
+ * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7
+ * Bit 4 - Force MDI | 0xD/1 = 0xD/1
+ * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2
+ * Bit 2 - Disable Far-End F. | ???? | 0xD/4
+ * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6
+ * Bit 0 - Disable LED | 0xD/7 = 0xD/7
+ * -------------------------------------------------------------------
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz8_r_phy_bmcr(struct ksz_device *dev, u16 port, u16 *val)
+{
+ const u16 *regs = dev->info->regs;
+ u8 restart, speed, ctrl;
+ int ret;
+
+ *val = 0;
+
+ ret = ksz_pread8(dev, port, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, port, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, port, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
+ if (ctrl & PORT_FORCE_100_MBIT)
+ *val |= BMCR_SPEED100;
+
+ if (ksz_is_ksz88x3(dev)) {
+ if (restart & KSZ8873_PORT_PHY_LOOPBACK)
+ *val |= BMCR_LOOPBACK;
+
+ if ((ctrl & PORT_AUTO_NEG_ENABLE))
+ *val |= BMCR_ANENABLE;
+ } else {
+ ret = ksz879x_get_loopback(dev, port, val);
+ if (ret)
+ return ret;
+
+ if (!(ctrl & PORT_AUTO_NEG_DISABLE))
+ *val |= BMCR_ANENABLE;
+ }
+
+ if (restart & PORT_POWER_DOWN)
+ *val |= BMCR_PDOWN;
+
+ if (restart & PORT_AUTO_NEG_RESTART)
+ *val |= BMCR_ANRESTART;
+
+ if (ctrl & PORT_FORCE_FULL_DUPLEX)
+ *val |= BMCR_FULLDPLX;
+
+ if (speed & PORT_HP_MDIX)
+ *val |= KSZ886X_BMCR_HP_MDIX;
+
+ if (restart & PORT_FORCE_MDIX)
+ *val |= KSZ886X_BMCR_FORCE_MDI;
+
+ if (restart & PORT_AUTO_MDIX_DISABLE)
+ *val |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
+
+ if (restart & PORT_TX_DISABLE)
+ *val |= KSZ886X_BMCR_DISABLE_TRANSMIT;
+
+ if (restart & PORT_LED_OFF)
+ *val |= KSZ886X_BMCR_DISABLE_LED;
+
+ return 0;
+}
+
int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
{
- u8 restart, speed, ctrl, link;
+ u8 ctrl, link, val1, val2;
int processed = true;
const u16 *regs;
- u8 val1, val2;
u16 data = 0;
- u8 p = phy;
+ u16 p = phy;
int ret;
regs = dev->info->regs;
switch (reg) {
case MII_BMCR:
- ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
- if (ret)
- return ret;
-
- ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- if (ret)
- return ret;
-
- ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ ret = ksz8_r_phy_bmcr(dev, p, &data);
if (ret)
return ret;
-
- if (restart & PORT_PHY_LOOPBACK)
- data |= BMCR_LOOPBACK;
- if (ctrl & PORT_FORCE_100_MBIT)
- data |= BMCR_SPEED100;
- if (ksz_is_ksz88x3(dev)) {
- if ((ctrl & PORT_AUTO_NEG_ENABLE))
- data |= BMCR_ANENABLE;
- } else {
- if (!(ctrl & PORT_AUTO_NEG_DISABLE))
- data |= BMCR_ANENABLE;
- }
- if (restart & PORT_POWER_DOWN)
- data |= BMCR_PDOWN;
- if (restart & PORT_AUTO_NEG_RESTART)
- data |= BMCR_ANRESTART;
- if (ctrl & PORT_FORCE_FULL_DUPLEX)
- data |= BMCR_FULLDPLX;
- if (speed & PORT_HP_MDIX)
- data |= KSZ886X_BMCR_HP_MDIX;
- if (restart & PORT_FORCE_MDIX)
- data |= KSZ886X_BMCR_FORCE_MDI;
- if (restart & PORT_AUTO_MDIX_DISABLE)
- data |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
- if (restart & PORT_TX_DISABLE)
- data |= KSZ886X_BMCR_DISABLE_TRANSMIT;
- if (restart & PORT_LED_OFF)
- data |= KSZ886X_BMCR_DISABLE_LED;
break;
case MII_BMSR:
ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
@@ -860,113 +974,137 @@ static int ksz8_w_phy_ctrl(struct ksz_device *dev, int port, u16 val)
return ret;
}
-int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+/**
+ * ksz8_w_phy_bmcr - Translates and writes to the SMI interface from a MIIM PHY
+ * Basic mode control register (Reg. 0).
+ * @dev: The KSZ device instance.
+ * @port: The port number to be configured.
+ * @val: The register value to be written.
+ *
+ * This function translates control settings from a MIIM PHY Basic mode control
+ * register into their corresponding hardware register bit values for the SMI
+ * interface.
+ *
+ * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873
+ * -------------------------------------------------------------------
+ * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit
+ * ----------------------------+-----------------------------+----------------
+ * Bit 15 - Soft Reset | 0xF/4 | Not supported
+ * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY)
+ * Bit 13 - Force 100 | 0xC/6 = 0xC/6
+ * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7
+ * Bit 11 - Power Down | 0xD/3 = 0xD/3
+ * Bit 10 - PHY Isolate | 0xF/5 | Not supported
+ * Bit 9 - Restart AN | 0xD/5 = 0xD/5
+ * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5
+ * Bit 7 - Collision Test/Res. | Not supported | Not supported
+ * Bit 6 - Reserved | Not supported | Not supported
+ * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7
+ * Bit 4 - Force MDI | 0xD/1 = 0xD/1
+ * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2
+ * Bit 2 - Disable Far-End F. | ???? | 0xD/4
+ * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6
+ * Bit 0 - Disable LED | 0xD/7 = 0xD/7
+ * -------------------------------------------------------------------
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz8_w_phy_bmcr(struct ksz_device *dev, u16 port, u16 val)
{
- u8 restart, speed, ctrl, data;
- const u16 *regs;
- u8 p = phy;
+ u8 restart, speed, ctrl, restart_mask;
+ const u16 *regs = dev->info->regs;
int ret;
- regs = dev->info->regs;
+ /* Do not support PHY reset function. */
+ if (val & BMCR_RESET)
+ return 0;
- switch (reg) {
- case MII_BMCR:
+ speed = 0;
+ if (val & KSZ886X_BMCR_HP_MDIX)
+ speed |= PORT_HP_MDIX;
- /* Do not support PHY reset function. */
- if (val & BMCR_RESET)
- break;
- ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- if (ret)
- return ret;
+ ret = ksz_prmw8(dev, port, regs[P_SPEED_STATUS], PORT_HP_MDIX, speed);
+ if (ret)
+ return ret;
- data = speed;
- if (val & KSZ886X_BMCR_HP_MDIX)
- data |= PORT_HP_MDIX;
- else
- data &= ~PORT_HP_MDIX;
+ ctrl = 0;
+ if (ksz_is_ksz88x3(dev)) {
+ if ((val & BMCR_ANENABLE))
+ ctrl |= PORT_AUTO_NEG_ENABLE;
+ } else {
+ if (!(val & BMCR_ANENABLE))
+ ctrl |= PORT_AUTO_NEG_DISABLE;
- if (data != speed) {
- ret = ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
- if (ret)
- return ret;
- }
+ /* Fiber port does not support auto-negotiation. */
+ if (dev->ports[port].fiber)
+ ctrl |= PORT_AUTO_NEG_DISABLE;
+ }
- ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
- if (ret)
- return ret;
+ if (val & BMCR_SPEED100)
+ ctrl |= PORT_FORCE_100_MBIT;
- data = ctrl;
- if (ksz_is_ksz88x3(dev)) {
- if ((val & BMCR_ANENABLE))
- data |= PORT_AUTO_NEG_ENABLE;
- else
- data &= ~PORT_AUTO_NEG_ENABLE;
- } else {
- if (!(val & BMCR_ANENABLE))
- data |= PORT_AUTO_NEG_DISABLE;
- else
- data &= ~PORT_AUTO_NEG_DISABLE;
-
- /* Fiber port does not support auto-negotiation. */
- if (dev->ports[p].fiber)
- data |= PORT_AUTO_NEG_DISABLE;
- }
+ if (val & BMCR_FULLDPLX)
+ ctrl |= PORT_FORCE_FULL_DUPLEX;
- if (val & BMCR_SPEED100)
- data |= PORT_FORCE_100_MBIT;
- else
- data &= ~PORT_FORCE_100_MBIT;
- if (val & BMCR_FULLDPLX)
- data |= PORT_FORCE_FULL_DUPLEX;
- else
- data &= ~PORT_FORCE_FULL_DUPLEX;
+ ret = ksz_prmw8(dev, port, regs[P_FORCE_CTRL], PORT_FORCE_100_MBIT |
+ /* PORT_AUTO_NEG_ENABLE and PORT_AUTO_NEG_DISABLE are the same
+ * bits
+ */
+ PORT_FORCE_FULL_DUPLEX | PORT_AUTO_NEG_ENABLE, ctrl);
+ if (ret)
+ return ret;
- if (data != ctrl) {
- ret = ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
- if (ret)
- return ret;
- }
+ restart = 0;
+ restart_mask = PORT_LED_OFF | PORT_TX_DISABLE | PORT_AUTO_NEG_RESTART |
+ PORT_POWER_DOWN | PORT_AUTO_MDIX_DISABLE | PORT_FORCE_MDIX;
+
+ if (val & KSZ886X_BMCR_DISABLE_LED)
+ restart |= PORT_LED_OFF;
+
+ if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
+ restart |= PORT_TX_DISABLE;
+
+ if (val & BMCR_ANRESTART)
+ restart |= PORT_AUTO_NEG_RESTART;
+
+ if (val & BMCR_PDOWN)
+ restart |= PORT_POWER_DOWN;
+
+ if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
+ restart |= PORT_AUTO_MDIX_DISABLE;
+
+ if (val & KSZ886X_BMCR_FORCE_MDI)
+ restart |= PORT_FORCE_MDIX;
- ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ksz_is_ksz88x3(dev)) {
+ restart_mask |= KSZ8873_PORT_PHY_LOOPBACK;
+
+ if (val & BMCR_LOOPBACK)
+ restart |= KSZ8873_PORT_PHY_LOOPBACK;
+ } else {
+ ret = ksz879x_set_loopback(dev, port, val);
if (ret)
return ret;
+ }
- data = restart;
- if (val & KSZ886X_BMCR_DISABLE_LED)
- data |= PORT_LED_OFF;
- else
- data &= ~PORT_LED_OFF;
- if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
- data |= PORT_TX_DISABLE;
- else
- data &= ~PORT_TX_DISABLE;
- if (val & BMCR_ANRESTART)
- data |= PORT_AUTO_NEG_RESTART;
- else
- data &= ~(PORT_AUTO_NEG_RESTART);
- if (val & BMCR_PDOWN)
- data |= PORT_POWER_DOWN;
- else
- data &= ~PORT_POWER_DOWN;
- if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
- data |= PORT_AUTO_MDIX_DISABLE;
- else
- data &= ~PORT_AUTO_MDIX_DISABLE;
- if (val & KSZ886X_BMCR_FORCE_MDI)
- data |= PORT_FORCE_MDIX;
- else
- data &= ~PORT_FORCE_MDIX;
- if (val & BMCR_LOOPBACK)
- data |= PORT_PHY_LOOPBACK;
- else
- data &= ~PORT_PHY_LOOPBACK;
+ return ksz_prmw8(dev, port, regs[P_NEG_RESTART_CTRL], restart_mask,
+ restart);
+}
- if (data != restart) {
- ret = ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL],
- data);
- if (ret)
- return ret;
- }
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+{
+ const u16 *regs;
+ u8 ctrl, data;
+ u16 p = phy;
+ int ret;
+
+ regs = dev->info->regs;
+
+ switch (reg) {
+ case MII_BMCR:
+ ret = ksz8_w_phy_bmcr(dev, p, val);
+ if (ret)
+ return ret;
break;
case MII_ADVERTISE:
ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index beca974e0171..7c9341ef73b0 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -265,6 +265,7 @@
#define PORT_AUTO_MDIX_DISABLE BIT(2)
#define PORT_FORCE_MDIX BIT(1)
#define PORT_MAC_LOOPBACK BIT(0)
+#define KSZ8873_PORT_PHY_LOOPBACK BIT(0)
#define REG_PORT_1_STATUS_2 0x1E
#define REG_PORT_2_STATUS_2 0x2E
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index cac4a607e54a..82bebee4615c 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -104,6 +104,10 @@ static const struct of_device_id ksz9477_dt_ids[] = {
.data = &ksz_switch_chips[KSZ8563]
},
{
+ .compatible = "microchip,ksz8567",
+ .data = &ksz_switch_chips[KSZ8567]
+ },
+ {
.compatible = "microchip,ksz9567",
.data = &ksz_switch_chips[KSZ9567]
},
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 245dfb7a7a31..2b510f150dd8 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1476,6 +1476,39 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.gbit_capable = {true, true, true},
},
+ [KSZ8567] = {
+ .chip_id = KSZ8567_CHIP_ID,
+ .dev_name = "KSZ8567",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total port count */
+ .port_nirqs = 3,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
+ .tc_ets_supported = true,
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {false, false, false, false, false,
+ true, true},
+ },
+
[KSZ9567] = {
.chip_id = KSZ9567_CHIP_ID,
.dev_name = "KSZ9567",
@@ -1864,6 +1897,29 @@ static void ksz_get_strings(struct dsa_switch *ds, int port,
}
}
+/**
+ * ksz_update_port_member - Adjust port forwarding rules based on STP state and
+ * isolation settings.
+ * @dev: A pointer to the struct ksz_device representing the device.
+ * @port: The port number to adjust.
+ *
+ * This function dynamically adjusts the port membership configuration for a
+ * specified port and other device ports, based on Spanning Tree Protocol (STP)
+ * states and port isolation settings. Each port, including the CPU port, has a
+ * membership register, represented as a bitfield, where each bit corresponds
+ * to a port number. A set bit indicates permission to forward frames to that
+ * port. This function iterates over all ports, updating the membership register
+ * to reflect current forwarding permissions:
+ *
+ * 1. Forwards frames only to ports that are part of the same bridge group and
+ * in the BR_STATE_FORWARDING state.
+ * 2. Takes into account the isolation status of ports; ports in the
+ * BR_STATE_FORWARDING state with BR_ISOLATED configuration will not forward
+ * frames to each other, even if they are in the same bridge group.
+ * 3. Ensures that the CPU port is included in the membership based on its
+ * upstream port configuration, allowing for management and control traffic
+ * to flow as required.
+ */
static void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
@@ -1892,7 +1948,14 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
if (other_p->stp_state != BR_STATE_FORWARDING)
continue;
- if (p->stp_state == BR_STATE_FORWARDING) {
+ /* At this point we know that "port" and "other" port [i] are in
+ * the same bridge group and that "other" port [i] is in
+ * forwarding stp state. If "port" is also in forwarding stp
+ * state, we can allow forwarding from port [port] to port [i].
+ * Except if both ports are isolated.
+ */
+ if (p->stp_state == BR_STATE_FORWARDING &&
+ !(p->isolated && other_p->isolated)) {
val |= BIT(port);
port_member |= BIT(i);
}
@@ -1911,8 +1974,19 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
third_p = &dev->ports[j];
if (third_p->stp_state != BR_STATE_FORWARDING)
continue;
+
third_dp = dsa_to_port(ds, j);
- if (dsa_port_bridge_same(other_dp, third_dp))
+
+ /* Now we updating relation of the "other" port [i] to
+ * the "third" port [j]. We already know that "other"
+ * port [i] is in forwarding stp state and that "third"
+ * port [j] is in forwarding stp state too.
+ * We need to check if "other" port [i] and "third" port
+ * [j] are in the same bridge group and not isolated
+ * before allowing forwarding from port [i] to port [j].
+ */
+ if (dsa_port_bridge_same(other_dp, third_dp) &&
+ !(other_p->isolated && third_p->isolated))
val |= BIT(j);
}
@@ -2185,6 +2259,8 @@ static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
return ksz_irq_common_setup(dev, pirq);
}
+static int ksz_parse_drive_strength(struct ksz_device *dev);
+
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
@@ -2206,6 +2282,10 @@ static int ksz_setup(struct dsa_switch *ds)
return ret;
}
+ ret = ksz_parse_drive_strength(dev);
+ if (ret)
+ return ret;
+
/* set broadcast storm protection 10% rate */
regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL],
BROADCAST_STORM_RATE,
@@ -2649,6 +2729,7 @@ static void ksz_port_teardown(struct dsa_switch *ds, int port)
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -2664,7 +2745,7 @@ static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- if (flags.mask & ~BR_LEARNING)
+ if (flags.mask & ~(BR_LEARNING | BR_ISOLATED))
return -EINVAL;
return 0;
@@ -2677,8 +2758,12 @@ static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
- if (flags.mask & BR_LEARNING) {
- p->learning = !!(flags.val & BR_LEARNING);
+ if (flags.mask & (BR_LEARNING | BR_ISOLATED)) {
+ if (flags.mask & BR_LEARNING)
+ p->learning = !!(flags.val & BR_LEARNING);
+
+ if (flags.mask & BR_ISOLATED)
+ p->isolated = !!(flags.val & BR_ISOLATED);
/* Make the change take effect immediately */
ksz_port_stp_state_set(ds, port, p->stp_state);
@@ -2705,7 +2790,8 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
dev->chip_id == KSZ9563_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9893;
- if (dev->chip_id == KSZ9477_CHIP_ID ||
+ if (dev->chip_id == KSZ8567_CHIP_ID ||
+ dev->chip_id == KSZ9477_CHIP_ID ||
dev->chip_id == KSZ9896_CHIP_ID ||
dev->chip_id == KSZ9897_CHIP_ID ||
dev->chip_id == KSZ9567_CHIP_ID)
@@ -2813,6 +2899,7 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
case KSZ8830_CHIP_ID:
return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -2839,6 +2926,7 @@ static int ksz_validate_eee(struct dsa_switch *ds, int port)
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -2852,7 +2940,7 @@ static int ksz_validate_eee(struct dsa_switch *ds, int port)
}
static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
int ret;
@@ -2872,7 +2960,7 @@ static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
}
static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
struct ksz_device *dev = ds->priv;
int ret;
@@ -3183,6 +3271,7 @@ static int ksz_switch_detect(struct ksz_device *dev)
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
case KSZ9567_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case LAN9370_CHIP_ID:
case LAN9371_CHIP_ID:
case LAN9372_CHIP_ID:
@@ -3220,6 +3309,7 @@ static int ksz_cls_flower_add(struct dsa_switch *ds, int port,
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -3239,6 +3329,7 @@ static int ksz_cls_flower_del(struct dsa_switch *ds, int port,
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -4142,6 +4233,7 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -4242,10 +4334,6 @@ int ksz_switch_register(struct ksz_device *dev)
for (port_num = 0; port_num < dev->info->port_cnt; ++port_num)
dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
if (dev->dev->of_node) {
- ret = ksz_parse_drive_strength(dev);
- if (ret)
- return ret;
-
ret = of_get_phy_mode(dev->dev->of_node, &interface);
if (ret == 0)
dev->compat_interface = interface;
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 15612101a155..40c11b0d6b62 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -110,6 +110,7 @@ struct ksz_switch_macaddr {
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
bool learning;
+ bool isolated;
int stp_state;
struct phy_device phydev;
@@ -187,6 +188,7 @@ struct ksz_device {
/* List of supported models */
enum ksz_model {
KSZ8563,
+ KSZ8567,
KSZ8795,
KSZ8794,
KSZ8765,
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 6f6d878e742c..c8166fb440ab 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -165,6 +165,10 @@ static const struct of_device_id ksz_dt_ids[] = {
.data = &ksz_switch_chips[KSZ8563]
},
{
+ .compatible = "microchip,ksz8567",
+ .data = &ksz_switch_chips[KSZ8567]
+ },
+ {
.compatible = "microchip,ksz9567",
.data = &ksz_switch_chips[KSZ9567]
},
@@ -204,6 +208,7 @@ static const struct spi_device_id ksz_spi_ids[] = {
{ "ksz9893" },
{ "ksz9563" },
{ "ksz8563" },
+ { "ksz8567" },
{ "ksz9567" },
{ "lan9370" },
{ "lan9371" },
diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c
index 088533663b83..fa3ee85a99c1 100644
--- a/drivers/net/dsa/mt7530-mdio.c
+++ b/drivers/net/dsa/mt7530-mdio.c
@@ -81,17 +81,14 @@ static const struct regmap_bus mt7530_regmap_bus = {
};
static int
-mt7531_create_sgmii(struct mt7530_priv *priv, bool dual_sgmii)
+mt7531_create_sgmii(struct mt7530_priv *priv)
{
struct regmap_config *mt7531_pcs_config[2] = {};
struct phylink_pcs *pcs;
struct regmap *regmap;
int i, ret = 0;
- /* MT7531AE has two SGMII units for port 5 and port 6
- * MT7531BE has only one SGMII unit for port 6
- */
- for (i = dual_sgmii ? 0 : 1; i < 2; i++) {
+ for (i = priv->p5_sgmii ? 0 : 1; i < 2; i++) {
mt7531_pcs_config[i] = devm_kzalloc(priv->dev,
sizeof(struct regmap_config),
GFP_KERNEL);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 3c1f657593a8..678b51f9cea6 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -414,92 +414,57 @@ mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds)
}
/* Setup port 6 interface mode and TRGMII TX circuit */
-static int
-mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+static void
+mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
- u32 ncpo1, ssc_delta, trgint, xtal;
-
- xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
+ u32 ncpo1, ssc_delta, xtal;
- if (xtal == HWTRAP_XTAL_20MHZ) {
- dev_err(priv->dev,
- "%s: MT7530 with a 20MHz XTAL is not supported!\n",
- __func__);
- return -EINVAL;
- }
+ /* Disable the MT7530 TRGMII clocks */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
- switch (interface) {
- case PHY_INTERFACE_MODE_RGMII:
- trgint = 0;
- break;
- case PHY_INTERFACE_MODE_TRGMII:
- trgint = 1;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ssc_delta = 0x57;
- else
- ssc_delta = 0x87;
- if (priv->id == ID_MT7621) {
- /* PLL frequency: 125MHz: 1.0GBit */
- if (xtal == HWTRAP_XTAL_40MHZ)
- ncpo1 = 0x0640;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ncpo1 = 0x0a00;
- } else { /* PLL frequency: 250MHz: 2.0Gbit */
- if (xtal == HWTRAP_XTAL_40MHZ)
- ncpo1 = 0x0c80;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ncpo1 = 0x1400;
- }
- break;
- default:
- dev_err(priv->dev, "xMII interface %d not supported\n",
- interface);
- return -EINVAL;
+ if (interface == PHY_INTERFACE_MODE_RGMII) {
+ mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
+ P6_INTF_MODE(0));
+ return;
}
- mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
- P6_INTF_MODE(trgint));
+ mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1));
- if (trgint) {
- /* Disable the MT7530 TRGMII clocks */
- core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
- /* Setup the MT7530 TRGMII Tx Clock */
- core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
- core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
- core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
- core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
- core_write(priv, CORE_PLL_GROUP4,
- RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
- RG_SYSPLL_BIAS_LPF_EN);
- core_write(priv, CORE_PLL_GROUP2,
- RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
- RG_SYSPLL_POSDIV(1));
- core_write(priv, CORE_PLL_GROUP7,
- RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
- RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ssc_delta = 0x57;
+ else
+ ssc_delta = 0x87;
- /* Enable the MT7530 TRGMII clocks */
- core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ if (priv->id == ID_MT7621) {
+ /* PLL frequency: 125MHz: 1.0GBit */
+ if (xtal == HWTRAP_XTAL_40MHZ)
+ ncpo1 = 0x0640;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ncpo1 = 0x0a00;
+ } else { /* PLL frequency: 250MHz: 2.0Gbit */
+ if (xtal == HWTRAP_XTAL_40MHZ)
+ ncpo1 = 0x0c80;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ncpo1 = 0x1400;
}
- return 0;
-}
+ /* Setup the MT7530 TRGMII Tx Clock */
+ core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+ core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+ core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
+ RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
+ core_write(priv, CORE_PLL_GROUP2, RG_SYSPLL_EN_NORMAL |
+ RG_SYSPLL_VODEN | RG_SYSPLL_POSDIV(1));
+ core_write(priv, CORE_PLL_GROUP7, RG_LCDDS_PCW_NCPO_CHG |
+ RG_LCCDS_C(3) | RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
-static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
-{
- u32 val;
-
- val = mt7530_read(priv, MT7531_TOP_SIG_SR);
-
- return (val & PAD_DUAL_SGMII_EN) != 0;
-}
-
-static int
-mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
-{
- return 0;
+ /* Enable the MT7530 TRGMII clocks */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
}
static void
@@ -510,9 +475,6 @@ mt7531_pll_setup(struct mt7530_priv *priv)
u32 xtal;
u32 val;
- if (mt7531_dual_sgmii_supported(priv))
- return;
-
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
hwstrap = mt7530_read(priv, MT7531_HWTRAP);
@@ -920,8 +882,6 @@ static const char *p5_intf_modes(unsigned int p5_interface)
return "PHY P4";
case P5_INTF_SEL_GMAC5:
return "GMAC5";
- case P5_INTF_SEL_GMAC5_SGMII:
- return "GMAC5_SGMII";
default:
return "unknown";
}
@@ -956,13 +916,8 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
/* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */
val &= ~MHWTRAP_P5_DIS;
break;
- case P5_DISABLED:
- interface = PHY_INTERFACE_MODE_NA;
- break;
default:
- dev_err(ds->dev, "Unsupported p5_intf_sel %d\n",
- priv->p5_intf_sel);
- goto unlock_exit;
+ break;
}
/* Setup RGMII settings */
@@ -992,9 +947,6 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n",
val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface));
- priv->p5_interface = interface;
-
-unlock_exit:
mutex_unlock(&priv->reg_mutex);
}
@@ -1014,18 +966,10 @@ mt753x_trap_frames(struct mt7530_priv *priv)
MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
}
-static int
+static void
mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- int ret;
-
- /* Setup max capability of CPU port at first */
- if (priv->info->cpu_port_config) {
- ret = priv->info->cpu_port_config(ds, port);
- if (ret)
- return ret;
- }
/* Enable Mediatek header mode on the cpu port */
mt7530_write(priv, MT7530_PVC_P(port),
@@ -1035,10 +979,6 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
UNU_FFP(BIT(port)));
- /* Set CPU port number */
- if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
- mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
-
/* Add the CPU port to the CPU port bitmap for MT7531 and the switch on
* the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
* is affine to the inbound user port.
@@ -1055,8 +995,6 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
/* Set to fallback mode for independent VLAN learning */
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_FALLBACK_MODE);
-
- return 0;
}
static int
@@ -1080,7 +1018,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
priv->ports[port].enable = true;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
priv->ports[port].pm);
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
@@ -1100,7 +1037,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
priv->ports[port].enable = false;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
}
@@ -2107,7 +2043,7 @@ mt7530_setup_irq(struct mt7530_priv *priv)
}
/* This register must be set for MT7530 to properly fire interrupts */
- if (priv->id != ID_MT7531)
+ if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
@@ -2146,24 +2082,40 @@ mt7530_free_irq_common(struct mt7530_priv *priv)
static void
mt7530_free_irq(struct mt7530_priv *priv)
{
- mt7530_free_mdio_irq(priv);
+ struct device_node *mnp, *np = priv->dev->of_node;
+
+ mnp = of_get_child_by_name(np, "mdio");
+ if (!mnp)
+ mt7530_free_mdio_irq(priv);
+ of_node_put(mnp);
+
mt7530_free_irq_common(priv);
}
static int
mt7530_setup_mdio(struct mt7530_priv *priv)
{
+ struct device_node *mnp, *np = priv->dev->of_node;
struct dsa_switch *ds = priv->ds;
struct device *dev = priv->dev;
struct mii_bus *bus;
static int idx;
- int ret;
+ int ret = 0;
+
+ mnp = of_get_child_by_name(np, "mdio");
+
+ if (mnp && !of_device_is_available(mnp))
+ goto out;
bus = devm_mdiobus_alloc(dev);
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!mnp)
+ ds->user_mii_bus = bus;
- ds->user_mii_bus = bus;
bus->priv = priv;
bus->name = KBUILD_MODNAME "-mii";
snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
@@ -2174,16 +2126,18 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
bus->parent = dev;
bus->phy_mask = ~ds->phys_mii_mask;
- if (priv->irq)
+ if (priv->irq && !mnp)
mt7530_setup_mdio_irq(priv);
- ret = devm_mdiobus_register(dev, bus);
+ ret = devm_of_mdiobus_register(dev, bus, mnp);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
- if (priv->irq)
+ if (priv->irq && !mnp)
mt7530_free_mdio_irq(priv);
}
+out:
+ of_node_put(mnp);
return ret;
}
@@ -2238,6 +2192,12 @@ mt7530_setup(struct dsa_switch *ds)
}
}
+ /* Disable LEDs before reset to prevent the MT7530 sampling a
+ * potentially incorrect HT_XTAL_FSEL value.
+ */
+ mt7530_write(priv, MT7530_LED_EN, 0);
+ usleep_range(1000, 1100);
+
/* Reset whole chip through gpio pin or memory-mapped registers for
* different type of hardware
*/
@@ -2267,6 +2227,12 @@ mt7530_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_20MHZ) {
+ dev_err(priv->dev,
+ "MT7530 with a 20MHz XTAL is not supported!\n");
+ return -EINVAL;
+ }
+
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
@@ -2289,14 +2255,18 @@ mt7530_setup(struct dsa_switch *ds)
val |= MHWTRAP_MANUAL;
mt7530_write(priv, MT7530_MHWTRAP, val);
- priv->p6_interface = PHY_INTERFACE_MODE_NA;
-
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Clear link settings and enable force mode to force link down
+ * on all ports until they're enabled later.
+ */
+ mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
+ PMCR_FORCE_MODE, PMCR_FORCE_MODE);
+
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
@@ -2305,9 +2275,7 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
if (dsa_is_cpu_port(ds, i)) {
- ret = mt753x_cpu_port_enable(ds, i);
- if (ret)
- return ret;
+ mt753x_cpu_port_enable(ds, i);
} else {
mt7530_port_disable(ds, i);
@@ -2326,16 +2294,13 @@ mt7530_setup(struct dsa_switch *ds)
return ret;
/* Setup port 5 */
- priv->p5_intf_sel = P5_DISABLED;
- interface = PHY_INTERFACE_MODE_NA;
-
if (!dsa_is_unused_port(ds, 5)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
- ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface);
- if (ret && ret != -ENODEV)
- return ret;
} else {
- /* Scan the ethernet nodes. look for GMAC1, lookup used phy */
+ /* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY.
+ * Set priv->p5_intf_sel to the appropriate value if PHY muxing
+ * is detected.
+ */
for_each_child_of_node(dn, mac_np) {
if (!of_device_is_compatible(mac_np,
"mediatek,eth-mac"))
@@ -2366,6 +2331,10 @@ mt7530_setup(struct dsa_switch *ds)
of_node_put(phy_node);
break;
}
+
+ if (priv->p5_intf_sel == P5_INTF_SEL_PHY_P0 ||
+ priv->p5_intf_sel == P5_INTF_SEL_PHY_P4)
+ mt7530_setup_port5(ds, interface);
}
#ifdef CONFIG_GPIOLIB
@@ -2376,8 +2345,6 @@ mt7530_setup(struct dsa_switch *ds)
}
#endif /* CONFIG_GPIOLIB */
- mt7530_setup_port5(ds, interface);
-
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@@ -2402,6 +2369,12 @@ mt7531_setup_common(struct dsa_switch *ds)
UNU_FFP_MASK);
for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Clear link settings and enable force mode to force link down
+ * on all ports until they're enabled later.
+ */
+ mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
+ MT7531_FORCE_MODE, MT7531_FORCE_MODE);
+
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
@@ -2412,9 +2385,7 @@ mt7531_setup_common(struct dsa_switch *ds)
mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
if (dsa_is_cpu_port(ds, i)) {
- ret = mt753x_cpu_port_enable(ds, i);
- if (ret)
- return ret;
+ mt753x_cpu_port_enable(ds, i);
} else {
mt7530_port_disable(ds, i);
@@ -2474,38 +2445,35 @@ mt7531_setup(struct dsa_switch *ds)
return -ENODEV;
}
- /* all MACs must be forced link-down before sw reset */
+ /* MT7531AE has got two SGMII units. One for port 5, one for port 6.
+ * MT7531BE has got only one SGMII unit which is for port 6.
+ */
+ val = mt7530_read(priv, MT7531_TOP_SIG_SR);
+ priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
+
+ /* Force link down on all ports before internal reset */
for (i = 0; i < MT7530_NUM_PORTS; i++)
mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
/* Reset the switch through internal reset */
- mt7530_write(priv, MT7530_SYS_CTRL,
- SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
- SYS_CTRL_REG_RST);
-
- mt7531_pll_setup(priv);
-
- if (mt7531_dual_sgmii_supported(priv)) {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
+ mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
+ if (!priv->p5_sgmii) {
+ mt7531_pll_setup(priv);
+ } else {
/* Let ds->user_mii_bus be able to access external phy. */
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
MT7531_EXT_P_MDC_11);
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
MT7531_EXT_P_MDIO_12);
- } else {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
}
- dev_dbg(ds->dev, "P5 support %s interface\n",
- p5_intf_modes(priv->p5_intf_sel));
+
+ if (!dsa_is_unused_port(ds, 5))
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
MT7531_GPIO0_INTERRUPT);
- /* Let phylink decide the interface later. */
- priv->p5_interface = PHY_INTERFACE_MODE_NA;
- priv->p6_interface = PHY_INTERFACE_MODE_NA;
-
/* Enable PHY core PLL, since phy_device has not yet been created
* provided for phy_[read,write]_mmd_indirect is called, we provide
* our own mt7531_ind_mmd_phy_[read,write] to complete this
@@ -2535,12 +2503,14 @@ static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
switch (port) {
- case 0 ... 4: /* Internal phy */
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
+ /* Port 5 supports rgmii with delays, mii, and gmii. */
+ case 5:
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_MII,
config->supported_interfaces);
@@ -2548,7 +2518,8 @@ static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
config->supported_interfaces);
break;
- case 6: /* 1st cpu port */
+ /* Port 6 supports rgmii and trgmii. */
+ case 6:
__set_bit(PHY_INTERFACE_MODE_RGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_TRGMII,
@@ -2557,30 +2528,30 @@ static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
}
}
-static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
-{
- return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
-}
-
static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
switch (port) {
- case 0 ... 4: /* Internal phy */
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
break;
- case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
- if (mt7531_is_rgmii_port(priv, port)) {
+ /* Port 5 supports rgmii with delays on MT7531BE, sgmii/802.3z on
+ * MT7531AE.
+ */
+ case 5:
+ if (!priv->p5_sgmii) {
phy_interface_set_rgmii(config->supported_interfaces);
break;
}
fallthrough;
- case 6: /* 1st cpu port supports sgmii/8023z only */
+ /* Port 6 supports sgmii/802.3z. */
+ case 6:
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
@@ -2596,14 +2567,14 @@ static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
- phy_interface_zero(config->supported_interfaces);
-
switch (port) {
- case 0 ... 4: /* Internal phy */
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 3:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
+ /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
@@ -2612,41 +2583,24 @@ static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
}
}
-static int
-mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->pad_setup(ds, state->interface);
-}
-
-static int
+static void
mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
- /* Only need to setup port5. */
- if (port != 5)
- return 0;
-
- mt7530_setup_port5(priv->ds, interface);
-
- return 0;
+ if (port == 5)
+ mt7530_setup_port5(priv->ds, interface);
+ else if (port == 6)
+ mt7530_setup_port6(priv->ds, interface);
}
-static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
- phy_interface_t interface,
- struct phy_device *phydev)
+static void mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
+ phy_interface_t interface,
+ struct phy_device *phydev)
{
u32 val;
- if (!mt7531_is_rgmii_port(priv, port)) {
- dev_err(priv->dev, "RGMII mode is not available for port %d\n",
- port);
- return -EINVAL;
- }
-
val = mt7530_read(priv, MT7531_CLKGEN_CTRL);
val |= GP_CLK_EN;
val &= ~GP_MODE_MASK;
@@ -2674,31 +2628,14 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
case PHY_INTERFACE_MODE_RGMII_ID:
break;
default:
- return -EINVAL;
+ break;
}
}
- mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
-
- return 0;
-}
-
-static bool mt753x_is_mac_port(u32 port)
-{
- return (port == 5 || port == 6);
-}
-
-static int
-mt7988_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface)
-{
- if (dsa_is_cpu_port(ds, port) &&
- interface == PHY_INTERFACE_MODE_INTERNAL)
- return 0;
- return -EINVAL;
+ mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
}
-static int
+static void
mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
@@ -2706,39 +2643,11 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
struct phy_device *phydev;
struct dsa_port *dp;
- if (!mt753x_is_mac_port(port)) {
- dev_err(priv->dev, "port %d is not a MAC port\n", port);
- return -EINVAL;
- }
-
- switch (interface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (phy_interface_mode_is_rgmii(interface)) {
dp = dsa_to_port(ds, port);
phydev = dp->user->phydev;
- return mt7531_rgmii_setup(priv, port, interface, phydev);
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* handled in SGMII PCS driver */
- return 0;
- default:
- return -EINVAL;
+ mt7531_rgmii_setup(priv, port, interface, phydev);
}
-
- return -EINVAL;
-}
-
-static int
-mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
- const struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->mac_port_config(ds, port, mode, state->interface);
}
static struct phylink_pcs *
@@ -2764,54 +2673,13 @@ mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct mt7530_priv *priv = ds->priv;
- u32 mcr_cur, mcr_new;
-
- switch (port) {
- case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
- break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (priv->p5_interface == state->interface)
- break;
-
- if (mt753x_mac_config(ds, port, mode, state) < 0)
- goto unsupported;
-
- if (priv->p5_intf_sel != P5_DISABLED)
- priv->p5_interface = state->interface;
- break;
- case 6: /* 1st cpu port */
- if (priv->p6_interface == state->interface)
- break;
-
- mt753x_pad_setup(ds, state);
- if (mt753x_mac_config(ds, port, mode, state) < 0)
- goto unsupported;
-
- priv->p6_interface = state->interface;
- break;
- default:
-unsupported:
- dev_err(ds->dev, "%s: unsupported %s port: %i\n",
- __func__, phy_modes(state->interface), port);
- return;
- }
-
- mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
- mcr_new = mcr_cur;
- mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
- mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
- PMCR_BACKPR_EN | PMCR_FORCE_MODE_ID(priv->id);
+ if ((port == 5 || port == 6) && priv->info->mac_port_config)
+ priv->info->mac_port_config(ds, port, mode, state->interface);
/* Are we connected to external phy */
if (port == 5 && dsa_is_user_port(ds, 5))
- mcr_new |= PMCR_EXT_PHY;
-
- if (mcr_new != mcr_cur)
- mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
+ mt7530_set(priv, MT7530_PMCR_P(port), PMCR_EXT_PHY);
}
static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -2835,17 +2703,10 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
- /* MT753x MAC works in 1G full duplex mode for all up-clocked
- * variants.
- */
- if (interface == PHY_INTERFACE_MODE_TRGMII ||
- (phy_interface_mode_is_8023z(interface))) {
- speed = SPEED_1000;
- duplex = DUPLEX_FULL;
- }
-
switch (speed) {
case SPEED_1000:
+ case SPEED_2500:
+ case SPEED_10000:
mcr |= PMCR_FORCE_SPEED_1000;
break;
case SPEED_100:
@@ -2863,6 +2724,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
switch (speed) {
case SPEED_1000:
+ case SPEED_2500:
mcr |= PMCR_FORCE_EEE1G;
break;
case SPEED_100:
@@ -2874,63 +2736,6 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
mt7530_set(priv, MT7530_PMCR_P(port), mcr);
}
-static int
-mt7531_cpu_port_config(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
- phy_interface_t interface;
- int speed;
- int ret;
-
- switch (port) {
- case 5:
- if (mt7531_is_rgmii_port(priv, port))
- interface = PHY_INTERFACE_MODE_RGMII;
- else
- interface = PHY_INTERFACE_MODE_2500BASEX;
-
- priv->p5_interface = interface;
- break;
- case 6:
- interface = PHY_INTERFACE_MODE_2500BASEX;
-
- priv->p6_interface = interface;
- break;
- default:
- return -EINVAL;
- }
-
- if (interface == PHY_INTERFACE_MODE_2500BASEX)
- speed = SPEED_2500;
- else
- speed = SPEED_1000;
-
- ret = mt7531_mac_config(ds, port, MLO_AN_FIXED, interface);
- if (ret)
- return ret;
- mt7530_write(priv, MT7530_PMCR_P(port),
- PMCR_CPU_PORT_SETTING(priv->id));
- mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
- speed, DUPLEX_FULL, true, true);
-
- return 0;
-}
-
-static int
-mt7988_cpu_port_config(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
-
- mt7530_write(priv, MT7530_PMCR_P(port),
- PMCR_CPU_PORT_SETTING(priv->id));
-
- mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED,
- PHY_INTERFACE_MODE_INTERNAL, NULL,
- SPEED_10000, DUPLEX_FULL, true, true);
-
- return 0;
-}
-
static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
@@ -3013,17 +2818,9 @@ static int
mt753x_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
- int i, ret;
-
- /* Initialise the PCS devices */
- for (i = 0; i < priv->ds->num_ports; i++) {
- priv->pcs[i].pcs.ops = priv->info->pcs_ops;
- priv->pcs[i].pcs.neg_mode = true;
- priv->pcs[i].priv = priv;
- priv->pcs[i].port = i;
- }
+ int ret = priv->info->sw_setup(ds);
+ int i;
- ret = priv->info->sw_setup(ds);
if (ret)
return ret;
@@ -3035,8 +2832,16 @@ mt753x_setup(struct dsa_switch *ds)
if (ret && priv->irq)
mt7530_free_irq_common(priv);
+ /* Initialise the PCS devices */
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ priv->pcs[i].pcs.ops = priv->info->pcs_ops;
+ priv->pcs[i].pcs.neg_mode = true;
+ priv->pcs[i].priv = priv;
+ priv->pcs[i].port = i;
+ }
+
if (priv->create_sgmii) {
- ret = priv->create_sgmii(priv, mt7531_dual_sgmii_supported(priv));
+ ret = priv->create_sgmii(priv);
if (ret && priv->irq)
mt7530_free_irq(priv);
}
@@ -3045,7 +2850,7 @@ mt753x_setup(struct dsa_switch *ds)
}
static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
struct mt7530_priv *priv = ds->priv;
u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port));
@@ -3057,7 +2862,7 @@ static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
}
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
struct mt7530_priv *priv = ds->priv;
u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
@@ -3074,9 +2879,34 @@ static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
return 0;
}
-static int mt7988_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
+static void
+mt753x_conduit_state_change(struct dsa_switch *ds,
+ const struct net_device *conduit,
+ bool operational)
{
- return 0;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
+ struct mt7530_priv *priv = ds->priv;
+ int val = 0;
+ u8 mask;
+
+ /* Set the CPU port to trap frames to for MT7530. Trapped frames will be
+ * forwarded to the numerically smallest CPU port whose conduit
+ * interface is up.
+ */
+ if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
+ return;
+
+ mask = BIT(cpu_dp->index);
+
+ if (operational)
+ priv->active_cpu_ports |= mask;
+ else
+ priv->active_cpu_ports &= ~mask;
+
+ if (priv->active_cpu_ports)
+ val = CPU_EN | CPU_PORT(__ffs(priv->active_cpu_ports));
+
+ mt7530_rmw(priv, MT7530_MFC, CPU_EN | CPU_PORT_MASK, val);
}
static int mt7988_setup(struct dsa_switch *ds)
@@ -3129,6 +2959,7 @@ const struct dsa_switch_ops mt7530_switch_ops = {
.phylink_mac_link_up = mt753x_phylink_mac_link_up,
.get_mac_eee = mt753x_get_mac_eee,
.set_mac_eee = mt753x_set_mac_eee,
+ .conduit_state_change = mt753x_conduit_state_change,
};
EXPORT_SYMBOL_GPL(mt7530_switch_ops);
@@ -3141,7 +2972,6 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7530_phy_write_c22,
.phy_read_c45 = mt7530_phy_read_c45,
.phy_write_c45 = mt7530_phy_write_c45,
- .pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
@@ -3153,7 +2983,6 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7530_phy_write_c22,
.phy_read_c45 = mt7530_phy_read_c45,
.phy_write_c45 = mt7530_phy_write_c45,
- .pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
@@ -3165,8 +2994,6 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7531_ind_c22_phy_write,
.phy_read_c45 = mt7531_ind_c45_phy_read,
.phy_write_c45 = mt7531_ind_c45_phy_write,
- .pad_setup = mt7531_pad_setup,
- .cpu_port_config = mt7531_cpu_port_config,
.mac_port_get_caps = mt7531_mac_port_get_caps,
.mac_port_config = mt7531_mac_config,
},
@@ -3178,10 +3005,7 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7531_ind_c22_phy_write,
.phy_read_c45 = mt7531_ind_c45_phy_read,
.phy_write_c45 = mt7531_ind_c45_phy_write,
- .pad_setup = mt7988_pad_setup,
- .cpu_port_config = mt7988_cpu_port_config,
.mac_port_get_caps = mt7988_mac_port_get_caps,
- .mac_port_config = mt7988_mac_config,
},
};
EXPORT_SYMBOL_GPL(mt753x_table);
@@ -3208,10 +3032,8 @@ mt7530_probe_common(struct mt7530_priv *priv)
/* Sanity check if these required device operations are filled
* properly.
*/
- if (!priv->info->sw_setup || !priv->info->pad_setup ||
- !priv->info->phy_read_c22 || !priv->info->phy_write_c22 ||
- !priv->info->mac_port_get_caps ||
- !priv->info->mac_port_config)
+ if (!priv->info->sw_setup || !priv->info->phy_read_c22 ||
+ !priv->info->phy_write_c22 || !priv->info->mac_port_get_caps)
return -EINVAL;
priv->id = priv->info->id;
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 17e42d30fff4..a71166e0a7fc 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -41,8 +41,8 @@ enum mt753x_id {
#define UNU_FFP(x) (((x) & 0xff) << 8)
#define UNU_FFP_MASK UNU_FFP(~0)
#define CPU_EN BIT(7)
-#define CPU_PORT(x) ((x) << 4)
-#define CPU_MASK (0xf << 4)
+#define CPU_PORT_MASK GENMASK(6, 4)
+#define CPU_PORT(x) FIELD_PREP(CPU_PORT_MASK, x)
#define MIRROR_EN BIT(3)
#define MIRROR_PORT(x) ((x) & 0x7)
#define MIRROR_MASK 0x7
@@ -304,20 +304,11 @@ enum mt7530_vlan_port_acc_frm {
MT7531_FORCE_DPX | \
MT7531_FORCE_RX_FC | \
MT7531_FORCE_TX_FC)
-#define PMCR_FORCE_MODE_ID(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
- MT7531_FORCE_MODE : PMCR_FORCE_MODE)
#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
PMCR_FORCE_FDX | PMCR_FORCE_LNK | \
PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100)
-#define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \
- PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
- PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
- PMCR_TX_EN | PMCR_RX_EN | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
- PMCR_FORCE_SPEED_1000 | \
- PMCR_FORCE_FDX | PMCR_FORCE_LNK)
#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100)
#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24)
@@ -683,11 +674,10 @@ struct mt7530_port {
/* Port 5 interface select definitions */
enum p5_interface_select {
- P5_DISABLED = 0,
+ P5_DISABLED,
P5_INTF_SEL_PHY_P0,
P5_INTF_SEL_PHY_P4,
P5_INTF_SEL_GMAC5,
- P5_INTF_SEL_GMAC5_SGMII,
};
struct mt7530_priv;
@@ -705,8 +695,6 @@ struct mt753x_pcs {
* @phy_write_c22: Holding the way writing PHY port using C22
* @phy_read_c45: Holding the way reading PHY port using C45
* @phy_write_c45: Holding the way writing PHY port using C45
- * @pad_setup: Holding the way setting up the bus pad for a certain
- * MAC port
* @phy_mode_supported: Check if the PHY type is being supported on a certain
* port
* @mac_port_validate: Holding the way to set addition validate type for a
@@ -727,16 +715,14 @@ struct mt753x_info {
int regnum);
int (*phy_write_c45)(struct mt7530_priv *priv, int port, int devad,
int regnum, u16 val);
- int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
- int (*cpu_port_config)(struct dsa_switch *ds, int port);
void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
struct phylink_config *config);
void (*mac_port_validate)(struct dsa_switch *ds, int port,
phy_interface_t interface,
unsigned long *supported);
- int (*mac_port_config)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
+ void (*mac_port_config)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
};
/* struct mt7530_priv - This is the main data structure for holding the state
@@ -754,12 +740,14 @@ struct mt753x_info {
* @ports: Holding the state among ports
* @reg_mutex: The lock for protecting among process accessing
* registers
- * @p6_interface Holding the current port 6 interface
* @p5_intf_sel: Holding the current port 5 interface select
+ * @p5_sgmii: Flag for distinguishing if port 5 of the MT7531 switch
+ * has got SGMII
* @irq: IRQ number of the switch
* @irq_domain: IRQ domain of the switch irq_chip
* @irq_enable: IRQ enable bits, synced to SYS_INT_EN
* @create_sgmii: Pointer to function creating SGMII PCS instance(s)
+ * @active_cpu_ports: Holding the active CPU ports
*/
struct mt7530_priv {
struct device *dev;
@@ -773,9 +761,8 @@ struct mt7530_priv {
const struct mt753x_info *info;
unsigned int id;
bool mcm;
- phy_interface_t p6_interface;
- phy_interface_t p5_interface;
- unsigned int p5_intf_sel;
+ enum p5_interface_select p5_intf_sel;
+ bool p5_sgmii;
u8 mirror_rx;
u8 mirror_tx;
struct mt7530_port ports[MT7530_NUM_PORTS];
@@ -785,7 +772,8 @@ struct mt7530_priv {
int irq;
struct irq_domain *irq_domain;
u32 irq_enable;
- int (*create_sgmii)(struct mt7530_priv *priv, bool dual_sgmii);
+ int (*create_sgmii)(struct mt7530_priv *priv);
+ u8 active_cpu_ports;
};
struct mt7530_hw_vlan_entry {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 614cabb5c1b0..9ed1821184ec 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1451,14 +1451,14 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
/* Nothing to do on the port's MAC */
return 0;
}
static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
/* Nothing to do on the port's MAC */
return 0;
@@ -3659,7 +3659,7 @@ static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad,
int err;
if (!chip->info->ops->phy_read_c45)
- return 0xffff;
+ return -ENODEV;
mv88e6xxx_reg_lock(chip);
err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val);
@@ -3712,7 +3712,10 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
if (external) {
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true);
+ if (chip->info->family == MV88E6XXX_FAMILY_6393)
+ err = mv88e6393x_g2_scratch_gpio_set_smi(chip, true);
+ else
+ err = mv88e6390_g2_scratch_gpio_set_smi(chip, true);
mv88e6xxx_reg_unlock(chip);
if (err)
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index d9434f7cae53..82f9b410de0b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -378,8 +378,10 @@ extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops;
extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
-int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+int mv88e6390_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external);
+int mv88e6393x_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external);
int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index a9d6e40321a2..61ab6cc4fbfc 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -240,7 +240,7 @@ const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
};
/**
- * mv88e6xxx_g2_scratch_gpio_set_smi - set gpio muxing for external smi
+ * mv88e6390_g2_scratch_gpio_set_smi - set gpio muxing for external smi
* @chip: chip private data
* @external: set mux for external smi, or free for gpio usage
*
@@ -248,7 +248,7 @@ const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
* an external SMI interface, or they may be made free for other
* GPIO uses.
*/
-int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+int mv88e6390_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external)
{
int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG;
@@ -291,6 +291,37 @@ int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
}
/**
+ * mv88e6393x_g2_scratch_gpio_set_smi - set gpio muxing for external smi
+ * @chip: chip private data
+ * @external: set mux for external smi, or free for gpio usage
+ *
+ * MV88E6191X/6193X/6393X GPIO pins 9 and 10 can be configured as an
+ * external SMI interface or as regular GPIO-s.
+ *
+ * They however have a different register layout then the existing
+ * function.
+ */
+
+int mv88e6393x_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external)
+{
+ int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG;
+ int err;
+ u8 val;
+
+ err = mv88e6xxx_g2_scratch_read(chip, misc_cfg, &val);
+ if (err)
+ return err;
+
+ if (external)
+ val &= ~MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+ else
+ val |= MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+
+ return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val);
+}
+
+/**
* mv88e6352_g2_scratch_port_has_serdes - indicate if a port can have a serdes
* @chip: chip private data
* @port: port number to check for serdes
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
index 4d677f836807..5a27d047a38e 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6185.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
@@ -95,7 +95,7 @@ static void mv88e6185_pcs_get_state(struct phylink_pcs *pcs,
}
}
-static int mv88e6185_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+static int mv88e6185_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
@@ -137,6 +137,7 @@ static int mv88e6185_pcs_init(struct mv88e6xxx_chip *chip, int port)
mpcs->chip = chip;
mpcs->port = port;
mpcs->phylink_pcs.ops = &mv88e6185_phylink_pcs_ops;
+ mpcs->phylink_pcs.neg_mode = true;
irq = mv88e6xxx_serdes_irq_mapping(chip, port);
if (irq) {
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 7a864329cb72..dab66c0c6f64 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -950,15 +950,15 @@ qca8k_mdio_register(struct qca8k_priv *priv)
struct device *dev = ds->dev;
struct device_node *mdio;
struct mii_bus *bus;
- int err = 0;
+ int ret = 0;
mdio = of_get_child_by_name(dev->of_node, "mdio");
if (mdio && !of_device_is_available(mdio))
- goto out;
+ goto out_put_node;
bus = devm_mdiobus_alloc(dev);
if (!bus) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out_put_node;
}
@@ -984,12 +984,11 @@ qca8k_mdio_register(struct qca8k_priv *priv)
bus->write = qca8k_legacy_mdio_write;
}
- err = devm_of_mdiobus_register(dev, bus, mdio);
+ ret = devm_of_mdiobus_register(dev, bus, mdio);
out_put_node:
of_node_put(mdio);
-out:
- return err;
+ return ret;
}
static int
@@ -998,7 +997,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
struct device_node *ports, *port;
phy_interface_t mode;
- int err;
+ int ret;
ports = of_get_child_by_name(priv->dev->of_node, "ports");
if (!ports)
@@ -1008,11 +1007,11 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
return -EINVAL;
for_each_available_child_of_node(ports, port) {
- err = of_property_read_u32(port, "reg", &reg);
- if (err) {
+ ret = of_property_read_u32(port, "reg", &reg);
+ if (ret) {
of_node_put(port);
of_node_put(ports);
- return err;
+ return ret;
}
if (!dsa_is_user_port(priv->ds, reg))
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index 2358cd399c7e..7f80035c5441 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -534,7 +534,7 @@ int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
}
int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
struct qca8k_priv *priv = ds->priv;
@@ -558,7 +558,7 @@ exit:
}
int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
/* Nothing to do on the port's MAC */
return 0;
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index c8785c36c54e..2184d8d2d5a9 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -518,8 +518,8 @@ void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
/* Common eee function */
-int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee);
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *eee);
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
/* Common bridge function */
void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 060165a85fb7..6989972eebc3 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -16,37 +16,29 @@ menuconfig NET_DSA_REALTEK
if NET_DSA_REALTEK
config NET_DSA_REALTEK_MDIO
- tristate "Realtek MDIO interface driver"
+ bool "Realtek MDIO interface support"
depends on OF
- depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
- depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
- depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches configured
through MDIO.
config NET_DSA_REALTEK_SMI
- tristate "Realtek SMI interface driver"
+ bool "Realtek SMI interface support"
depends on OF
- depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
- depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
- depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches connected
through SMI.
config NET_DSA_REALTEK_RTL8365MB
- tristate "Realtek RTL8365MB switch subdriver"
- imply NET_DSA_REALTEK_SMI
- imply NET_DSA_REALTEK_MDIO
+ tristate "Realtek RTL8365MB switch driver"
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL8_4
help
Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
config NET_DSA_REALTEK_RTL8366RB
- tristate "Realtek RTL8366RB switch subdriver"
- imply NET_DSA_REALTEK_SMI
- imply NET_DSA_REALTEK_MDIO
+ tristate "Realtek RTL8366RB switch driver"
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL4_A
help
Select to enable support for Realtek RTL8366RB.
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
index 0aab57252a7c..35491dc20d6d 100644
--- a/drivers/net/dsa/realtek/Makefile
+++ b/drivers/net/dsa/realtek/Makefile
@@ -1,6 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_NET_DSA_REALTEK_MDIO) += realtek-mdio.o
-obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
+obj-$(CONFIG_NET_DSA_REALTEK) += realtek_dsa.o
+realtek_dsa-objs := rtl83xx.o
+
+ifdef CONFIG_NET_DSA_REALTEK_MDIO
+realtek_dsa-objs += realtek-mdio.o
+endif
+
+ifdef CONFIG_NET_DSA_REALTEK_SMI
+realtek_dsa-objs += realtek-smi.o
+endif
+
obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
rtl8366-objs := rtl8366-core.o rtl8366rb.o
obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index 292e6d087e8b..04b758e5a680 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -25,6 +25,8 @@
#include <linux/regmap.h>
#include "realtek.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
/* Read/write via mdiobus */
#define REALTEK_MDIO_CTRL0_REG 31
@@ -99,192 +101,87 @@ out_unlock:
return ret;
}
-static void realtek_mdio_lock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_lock(&priv->map_lock);
-}
-
-static void realtek_mdio_unlock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_unlock(&priv->map_lock);
-}
-
-static const struct regmap_config realtek_mdio_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
+static const struct realtek_interface_info realtek_mdio_info = {
.reg_read = realtek_mdio_read,
.reg_write = realtek_mdio_write,
- .cache_type = REGCACHE_NONE,
- .lock = realtek_mdio_lock,
- .unlock = realtek_mdio_unlock,
};
-static const struct regmap_config realtek_mdio_nolock_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .reg_read = realtek_mdio_read,
- .reg_write = realtek_mdio_write,
- .cache_type = REGCACHE_NONE,
- .disable_locking = true,
-};
-
-static int realtek_mdio_probe(struct mdio_device *mdiodev)
+/**
+ * realtek_mdio_probe() - Probe a platform device for an MDIO-connected switch
+ * @mdiodev: mdio_device to probe on.
+ *
+ * This function should be used as the .probe in an mdio_driver. After
+ * calling the common probe function for both interfaces, it initializes the
+ * values specific for MDIO-connected devices. Finally, it calls a common
+ * function to register the DSA switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: Returns 0 on success, a negative error on failure.
+ */
+int realtek_mdio_probe(struct mdio_device *mdiodev)
{
- struct realtek_priv *priv;
struct device *dev = &mdiodev->dev;
- const struct realtek_variant *var;
- struct regmap_config rc;
- struct device_node *np;
+ struct realtek_priv *priv;
int ret;
- var = of_device_get_match_data(dev);
- if (!var)
- return -EINVAL;
-
- priv = devm_kzalloc(&mdiodev->dev,
- size_add(sizeof(*priv), var->chip_data_sz),
- GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- mutex_init(&priv->map_lock);
+ priv = rtl83xx_probe(dev, &realtek_mdio_info);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
- rc = realtek_mdio_regmap_config;
- rc.lock_arg = priv;
- priv->map = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map)) {
- ret = PTR_ERR(priv->map);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- rc = realtek_mdio_nolock_regmap_config;
- priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map_nolock)) {
- ret = PTR_ERR(priv->map_nolock);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- priv->mdio_addr = mdiodev->addr;
priv->bus = mdiodev->bus;
- priv->dev = &mdiodev->dev;
- priv->chip_data = (void *)priv + sizeof(*priv);
-
- priv->clk_delay = var->clk_delay;
- priv->cmd_read = var->cmd_read;
- priv->cmd_write = var->cmd_write;
- priv->ops = var->ops;
-
+ priv->mdio_addr = mdiodev->addr;
priv->write_reg_noack = realtek_mdio_write;
- np = dev->of_node;
-
- dev_set_drvdata(dev, priv);
-
- /* TODO: if power is software controlled, set up any regulators here */
- priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
-
- priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(priv->reset)) {
- dev_err(dev, "failed to get RESET GPIO\n");
- return PTR_ERR(priv->reset);
- }
-
- if (priv->reset) {
- gpiod_set_value(priv->reset, 1);
- dev_dbg(dev, "asserted RESET\n");
- msleep(REALTEK_HW_STOP_DELAY);
- gpiod_set_value(priv->reset, 0);
- msleep(REALTEK_HW_START_DELAY);
- dev_dbg(dev, "deasserted RESET\n");
- }
-
- ret = priv->ops->detect(priv);
- if (ret) {
- dev_err(dev, "unable to detect switch\n");
- return ret;
- }
-
- priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = dev;
- priv->ds->num_ports = priv->num_ports;
- priv->ds->priv = priv;
- priv->ds->ops = var->ds_ops_mdio;
-
- ret = dsa_register_switch(priv->ds);
+ ret = rtl83xx_register_switch(priv);
if (ret) {
- dev_err(priv->dev, "unable to register switch ret = %d\n", ret);
+ rtl83xx_remove(priv);
return ret;
}
return 0;
}
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_probe, REALTEK_DSA);
-static void realtek_mdio_remove(struct mdio_device *mdiodev)
+/**
+ * realtek_mdio_remove() - Remove the driver of an MDIO-connected switch
+ * @mdiodev: mdio_device to be removed.
+ *
+ * This function should be used as the .remove_new in an mdio_driver. First
+ * it unregisters the DSA switch and then it calls the common remove function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_mdio_remove(struct mdio_device *mdiodev)
{
struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
- dsa_unregister_switch(priv->ds);
+ rtl83xx_unregister_switch(priv);
- /* leave the device reset asserted */
- if (priv->reset)
- gpiod_set_value(priv->reset, 1);
+ rtl83xx_remove(priv);
}
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_remove, REALTEK_DSA);
-static void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+/**
+ * realtek_mdio_shutdown() - Shutdown the driver of a MDIO-connected switch
+ * @mdiodev: mdio_device shutting down.
+ *
+ * This function should be used as the .shutdown in a platform_driver. It calls
+ * the common shutdown function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_mdio_shutdown(struct mdio_device *mdiodev)
{
struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
- dsa_switch_shutdown(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
+ rtl83xx_shutdown(priv);
}
-
-static const struct of_device_id realtek_mdio_of_match[] = {
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
- { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
- { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
-#endif
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, realtek_mdio_of_match);
-
-static struct mdio_driver realtek_mdio_driver = {
- .mdiodrv.driver = {
- .name = "realtek-mdio",
- .of_match_table = realtek_mdio_of_match,
- },
- .probe = realtek_mdio_probe,
- .remove = realtek_mdio_remove,
- .shutdown = realtek_mdio_shutdown,
-};
-
-mdio_module_driver(realtek_mdio_driver);
-
-MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
-MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via MDIO interface");
-MODULE_LICENSE("GPL");
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_shutdown, REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/realtek-mdio.h b/drivers/net/dsa/realtek/realtek-mdio.h
new file mode 100644
index 000000000000..ee70f6a5b8ff
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-mdio.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _REALTEK_MDIO_H
+#define _REALTEK_MDIO_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO)
+
+static inline int realtek_mdio_driver_register(struct mdio_driver *drv)
+{
+ return mdio_driver_register(drv);
+}
+
+static inline void realtek_mdio_driver_unregister(struct mdio_driver *drv)
+{
+ mdio_driver_unregister(drv);
+}
+
+int realtek_mdio_probe(struct mdio_device *mdiodev);
+void realtek_mdio_remove(struct mdio_device *mdiodev);
+void realtek_mdio_shutdown(struct mdio_device *mdiodev);
+
+#else /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO) */
+
+static inline int realtek_mdio_driver_register(struct mdio_driver *drv)
+{
+ return 0;
+}
+
+static inline void realtek_mdio_driver_unregister(struct mdio_driver *drv)
+{
+}
+
+static inline int realtek_mdio_probe(struct mdio_device *mdiodev)
+{
+ return -ENOENT;
+}
+
+static inline void realtek_mdio_remove(struct mdio_device *mdiodev)
+{
+}
+
+static inline void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO) */
+
+#endif /* _REALTEK_MDIO_H */
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index 755546ed8db6..88590ae95a75 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -31,7 +31,6 @@
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/of.h>
-#include <linux/of_mdio.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
@@ -40,12 +39,14 @@
#include <linux/if_bridge.h>
#include "realtek.h"
+#include "realtek-smi.h"
+#include "rtl83xx.h"
#define REALTEK_SMI_ACK_RETRY_COUNT 5
static inline void realtek_smi_clk_delay(struct realtek_priv *priv)
{
- ndelay(priv->clk_delay);
+ ndelay(priv->variant->clk_delay);
}
static void realtek_smi_start(struct realtek_priv *priv)
@@ -208,7 +209,7 @@ static int realtek_smi_read_reg(struct realtek_priv *priv, u32 addr, u32 *data)
realtek_smi_start(priv);
/* Send READ command */
- ret = realtek_smi_write_byte(priv, priv->cmd_read);
+ ret = realtek_smi_write_byte(priv, priv->variant->cmd_read);
if (ret)
goto out;
@@ -249,7 +250,7 @@ static int realtek_smi_write_reg(struct realtek_priv *priv,
realtek_smi_start(priv);
/* Send WRITE command */
- ret = realtek_smi_write_byte(priv, priv->cmd_write);
+ ret = realtek_smi_write_byte(priv, priv->variant->cmd_write);
if (ret)
goto out;
@@ -310,258 +311,98 @@ static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
return realtek_smi_read_reg(priv, reg, val);
}
-static void realtek_smi_lock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_lock(&priv->map_lock);
-}
-
-static void realtek_smi_unlock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_unlock(&priv->map_lock);
-}
-
-static const struct regmap_config realtek_smi_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
+static const struct realtek_interface_info realtek_smi_info = {
.reg_read = realtek_smi_read,
.reg_write = realtek_smi_write,
- .cache_type = REGCACHE_NONE,
- .lock = realtek_smi_lock,
- .unlock = realtek_smi_unlock,
};
-static const struct regmap_config realtek_smi_nolock_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .reg_read = realtek_smi_read,
- .reg_write = realtek_smi_write,
- .cache_type = REGCACHE_NONE,
- .disable_locking = true,
-};
-
-static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- struct realtek_priv *priv = bus->priv;
-
- return priv->ops->phy_read(priv, addr, regnum);
-}
-
-static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
-{
- struct realtek_priv *priv = bus->priv;
-
- return priv->ops->phy_write(priv, addr, regnum, val);
-}
-
-static int realtek_smi_setup_mdio(struct dsa_switch *ds)
-{
- struct realtek_priv *priv = ds->priv;
- struct device_node *mdio_np;
- int ret;
-
- mdio_np = of_get_compatible_child(priv->dev->of_node, "realtek,smi-mdio");
- if (!mdio_np) {
- dev_err(priv->dev, "no MDIO bus node\n");
- return -ENODEV;
- }
-
- priv->user_mii_bus = devm_mdiobus_alloc(priv->dev);
- if (!priv->user_mii_bus) {
- ret = -ENOMEM;
- goto err_put_node;
- }
- priv->user_mii_bus->priv = priv;
- priv->user_mii_bus->name = "SMI user MII";
- priv->user_mii_bus->read = realtek_smi_mdio_read;
- priv->user_mii_bus->write = realtek_smi_mdio_write;
- snprintf(priv->user_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
- ds->index);
- priv->user_mii_bus->dev.of_node = mdio_np;
- priv->user_mii_bus->parent = priv->dev;
- ds->user_mii_bus = priv->user_mii_bus;
-
- ret = devm_of_mdiobus_register(priv->dev, priv->user_mii_bus, mdio_np);
- if (ret) {
- dev_err(priv->dev, "unable to register MDIO bus %s\n",
- priv->user_mii_bus->id);
- goto err_put_node;
- }
-
- return 0;
-
-err_put_node:
- of_node_put(mdio_np);
-
- return ret;
-}
-
-static int realtek_smi_probe(struct platform_device *pdev)
+/**
+ * realtek_smi_probe() - Probe a platform device for an SMI-connected switch
+ * @pdev: platform_device to probe on.
+ *
+ * This function should be used as the .probe in a platform_driver. After
+ * calling the common probe function for both interfaces, it initializes the
+ * values specific for SMI-connected devices. Finally, it calls a common
+ * function to register the DSA switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: Returns 0 on success, a negative error on failure.
+ */
+int realtek_smi_probe(struct platform_device *pdev)
{
- const struct realtek_variant *var;
struct device *dev = &pdev->dev;
struct realtek_priv *priv;
- struct regmap_config rc;
- struct device_node *np;
int ret;
- var = of_device_get_match_data(dev);
- np = dev->of_node;
-
- priv = devm_kzalloc(dev, sizeof(*priv) + var->chip_data_sz, GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- priv->chip_data = (void *)priv + sizeof(*priv);
-
- mutex_init(&priv->map_lock);
-
- rc = realtek_smi_regmap_config;
- rc.lock_arg = priv;
- priv->map = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map)) {
- ret = PTR_ERR(priv->map);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- rc = realtek_smi_nolock_regmap_config;
- priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map_nolock)) {
- ret = PTR_ERR(priv->map_nolock);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- /* Link forward and backward */
- priv->dev = dev;
- priv->clk_delay = var->clk_delay;
- priv->cmd_read = var->cmd_read;
- priv->cmd_write = var->cmd_write;
- priv->ops = var->ops;
-
- priv->setup_interface = realtek_smi_setup_mdio;
- priv->write_reg_noack = realtek_smi_write_reg_noack;
-
- dev_set_drvdata(dev, priv);
- spin_lock_init(&priv->lock);
-
- /* TODO: if power is software controlled, set up any regulators here */
-
- priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(priv->reset)) {
- dev_err(dev, "failed to get RESET GPIO\n");
- return PTR_ERR(priv->reset);
- }
- if (priv->reset) {
- gpiod_set_value(priv->reset, 1);
- dev_dbg(dev, "asserted RESET\n");
- msleep(REALTEK_HW_STOP_DELAY);
- gpiod_set_value(priv->reset, 0);
- msleep(REALTEK_HW_START_DELAY);
- dev_dbg(dev, "deasserted RESET\n");
- }
+ priv = rtl83xx_probe(dev, &realtek_smi_info);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
/* Fetch MDIO pins */
priv->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
- if (IS_ERR(priv->mdc))
+ if (IS_ERR(priv->mdc)) {
+ rtl83xx_remove(priv);
return PTR_ERR(priv->mdc);
+ }
+
priv->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
- if (IS_ERR(priv->mdio))
+ if (IS_ERR(priv->mdio)) {
+ rtl83xx_remove(priv);
return PTR_ERR(priv->mdio);
-
- priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
-
- ret = priv->ops->detect(priv);
- if (ret) {
- dev_err(dev, "unable to detect switch\n");
- return ret;
}
- priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = dev;
- priv->ds->num_ports = priv->num_ports;
- priv->ds->priv = priv;
+ priv->write_reg_noack = realtek_smi_write_reg_noack;
- priv->ds->ops = var->ds_ops_smi;
- ret = dsa_register_switch(priv->ds);
+ ret = rtl83xx_register_switch(priv);
if (ret) {
- dev_err_probe(dev, ret, "unable to register switch\n");
+ rtl83xx_remove(priv);
return ret;
}
+
return 0;
}
+EXPORT_SYMBOL_NS_GPL(realtek_smi_probe, REALTEK_DSA);
-static void realtek_smi_remove(struct platform_device *pdev)
+/**
+ * realtek_smi_remove() - Remove the driver of a SMI-connected switch
+ * @pdev: platform_device to be removed.
+ *
+ * This function should be used as the .remove_new in a platform_driver. First
+ * it unregisters the DSA switch and then it calls the common remove function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_smi_remove(struct platform_device *pdev)
{
struct realtek_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return;
- dsa_unregister_switch(priv->ds);
- if (priv->user_mii_bus)
- of_node_put(priv->user_mii_bus->dev.of_node);
+ rtl83xx_unregister_switch(priv);
- /* leave the device reset asserted */
- if (priv->reset)
- gpiod_set_value(priv->reset, 1);
+ rtl83xx_remove(priv);
}
+EXPORT_SYMBOL_NS_GPL(realtek_smi_remove, REALTEK_DSA);
-static void realtek_smi_shutdown(struct platform_device *pdev)
+/**
+ * realtek_smi_shutdown() - Shutdown the driver of a SMI-connected switch
+ * @pdev: platform_device shutting down.
+ *
+ * This function should be used as the .shutdown in a platform_driver. It calls
+ * the common shutdown function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_smi_shutdown(struct platform_device *pdev)
{
struct realtek_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return;
- dsa_switch_shutdown(priv->ds);
-
- platform_set_drvdata(pdev, NULL);
+ rtl83xx_shutdown(priv);
}
-
-static const struct of_device_id realtek_smi_of_match[] = {
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
- {
- .compatible = "realtek,rtl8366rb",
- .data = &rtl8366rb_variant,
- },
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
- {
- .compatible = "realtek,rtl8365mb",
- .data = &rtl8365mb_variant,
- },
-#endif
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
-
-static struct platform_driver realtek_smi_driver = {
- .driver = {
- .name = "realtek-smi",
- .of_match_table = realtek_smi_of_match,
- },
- .probe = realtek_smi_probe,
- .remove_new = realtek_smi_remove,
- .shutdown = realtek_smi_shutdown,
-};
-module_platform_driver(realtek_smi_driver);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
-MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via SMI interface");
-MODULE_LICENSE("GPL");
+EXPORT_SYMBOL_NS_GPL(realtek_smi_shutdown, REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/realtek-smi.h b/drivers/net/dsa/realtek/realtek-smi.h
new file mode 100644
index 000000000000..ea49a2edd3c8
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-smi.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _REALTEK_SMI_H
+#define _REALTEK_SMI_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI)
+
+static inline int realtek_smi_driver_register(struct platform_driver *drv)
+{
+ return platform_driver_register(drv);
+}
+
+static inline void realtek_smi_driver_unregister(struct platform_driver *drv)
+{
+ platform_driver_unregister(drv);
+}
+
+int realtek_smi_probe(struct platform_device *pdev);
+void realtek_smi_remove(struct platform_device *pdev);
+void realtek_smi_shutdown(struct platform_device *pdev);
+
+#else /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI) */
+
+static inline int realtek_smi_driver_register(struct platform_driver *drv)
+{
+ return 0;
+}
+
+static inline void realtek_smi_driver_unregister(struct platform_driver *drv)
+{
+}
+
+static inline int realtek_smi_probe(struct platform_device *pdev)
+{
+ return -ENOENT;
+}
+
+static inline void realtek_smi_remove(struct platform_device *pdev)
+{
+}
+
+static inline void realtek_smi_shutdown(struct platform_device *pdev)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI) */
+
+#endif /* _REALTEK_SMI_H */
diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h
index 790488e9c667..e0b1aa01337b 100644
--- a/drivers/net/dsa/realtek/realtek.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <net/dsa.h>
+#include <linux/reset.h>
#define REALTEK_HW_STOP_DELAY 25 /* msecs */
#define REALTEK_HW_START_DELAY 100 /* msecs */
@@ -48,6 +49,7 @@ struct rtl8366_vlan_4k {
struct realtek_priv {
struct device *dev;
+ struct reset_control *reset_ctl;
struct gpio_desc *reset;
struct gpio_desc *mdc;
struct gpio_desc *mdio;
@@ -58,11 +60,10 @@ struct realtek_priv {
struct mii_bus *bus;
int mdio_addr;
- unsigned int clk_delay;
- u8 cmd_read;
- u8 cmd_write;
+ const struct realtek_variant *variant;
+
spinlock_t lock; /* Locks around command writes */
- struct dsa_switch *ds;
+ struct dsa_switch ds;
struct irq_domain *irqdomain;
bool leds_disabled;
@@ -73,7 +74,6 @@ struct realtek_priv {
struct rtl8366_mib_counter *mib_counters;
const struct realtek_ops *ops;
- int (*setup_interface)(struct dsa_switch *ds);
int (*write_reg_noack)(void *ctx, u32 addr, u32 data);
int vlan_enabled;
@@ -91,7 +91,6 @@ struct realtek_ops {
int (*detect)(struct realtek_priv *priv);
int (*reset_chip)(struct realtek_priv *priv);
int (*setup)(struct realtek_priv *priv);
- void (*cleanup)(struct realtek_priv *priv);
int (*get_mib_counter)(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
@@ -116,8 +115,7 @@ struct realtek_ops {
};
struct realtek_variant {
- const struct dsa_switch_ops *ds_ops_smi;
- const struct dsa_switch_ops *ds_ops_mdio;
+ const struct dsa_switch_ops *ds_ops;
const struct realtek_ops *ops;
unsigned int clk_delay;
u8 cmd_read;
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index b072045eb154..12665a8a3412 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -101,6 +101,9 @@
#include <linux/if_vlan.h>
#include "realtek.h"
+#include "realtek-smi.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
/* Family-specific data and limits */
#define RTL8365MB_PHYADDRMAX 7
@@ -206,10 +209,10 @@
#define RTL8365MB_EXT_PORT_MODE_100FX 13
/* External interface mode configuration registers 0~1 */
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT0,EXT1 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3 /* EXT2 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extint) \
- ((_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
+ ((_extint) <= 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
(_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 : \
0x0)
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extint) \
@@ -689,7 +692,7 @@ static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
u32 val;
int ret;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
@@ -722,7 +725,7 @@ static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
*data = val & 0xFFFF;
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return ret;
}
@@ -733,7 +736,7 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
u32 val;
int ret;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
@@ -764,7 +767,7 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
goto out;
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return 0;
}
@@ -825,17 +828,6 @@ static int rtl8365mb_phy_write(struct realtek_priv *priv, int phy, int regnum,
return 0;
}
-static int rtl8365mb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
- return rtl8365mb_phy_read(ds->priv, phy, regnum);
-}
-
-static int rtl8365mb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
- u16 val)
-{
- return rtl8365mb_phy_write(ds->priv, phy, regnum, val);
-}
-
static const struct rtl8365mb_extint *
rtl8365mb_get_port_extint(struct realtek_priv *priv, int port)
{
@@ -878,6 +870,7 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
{
const struct rtl8365mb_extint *extint =
rtl8365mb_get_port_extint(priv, port);
+ struct dsa_switch *ds = &priv->ds;
struct device_node *dn;
struct dsa_port *dp;
int tx_delay = 0;
@@ -888,7 +881,7 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
if (!extint)
return -ENODEV;
- dp = dsa_to_port(priv->ds, port);
+ dp = dsa_to_port(ds, port);
dn = dp->dn;
/* Set the RGMII TX/RX delay
@@ -1541,6 +1534,7 @@ static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
static void rtl8365mb_stats_setup(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
+ struct dsa_switch *ds = &priv->ds;
int i;
/* Per-chip global mutex to protect MIB counter access, since doing
@@ -1551,7 +1545,7 @@ static void rtl8365mb_stats_setup(struct realtek_priv *priv)
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(priv->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
/* Per-port spinlock to protect the stats64 data */
@@ -1567,12 +1561,13 @@ static void rtl8365mb_stats_setup(struct realtek_priv *priv)
static void rtl8365mb_stats_teardown(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
+ struct dsa_switch *ds = &priv->ds;
int i;
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(priv->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
cancel_delayed_work_sync(&p->mib_work);
@@ -1971,7 +1966,7 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
dev_info(priv->dev, "no interrupt support\n");
/* Configure CPU tagging */
- dsa_switch_for_each_cpu_port(cpu_dp, priv->ds) {
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
cpu->mask |= BIT(cpu_dp->index);
if (cpu->trap_port == RTL8365MB_MAX_NUM_PORTS)
@@ -1986,7 +1981,7 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(priv->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
/* Forward only to the CPU */
@@ -2003,7 +1998,7 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
* ports will still forward frames to the CPU despite being
* administratively down by default.
*/
- rtl8365mb_port_stp_state_set(priv->ds, i, BR_STATE_DISABLED);
+ rtl8365mb_port_stp_state_set(ds, i, BR_STATE_DISABLED);
/* Set up per-port private data */
p->priv = priv;
@@ -2014,12 +2009,10 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
if (ret)
goto out_teardown_irq;
- if (priv->setup_interface) {
- ret = priv->setup_interface(ds);
- if (ret) {
- dev_err(priv->dev, "could not set up MDIO bus\n");
- goto out_teardown_irq;
- }
+ ret = rtl83xx_setup_user_mdio(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ goto out_teardown_irq;
}
/* Start statistics counter polling */
@@ -2113,7 +2106,7 @@ static int rtl8365mb_detect(struct realtek_priv *priv)
return 0;
}
-static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
+static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
.change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
@@ -2134,29 +2127,6 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
.port_max_mtu = rtl8365mb_port_max_mtu,
};
-static const struct dsa_switch_ops rtl8365mb_switch_ops_mdio = {
- .get_tag_protocol = rtl8365mb_get_tag_protocol,
- .change_tag_protocol = rtl8365mb_change_tag_protocol,
- .setup = rtl8365mb_setup,
- .teardown = rtl8365mb_teardown,
- .phylink_get_caps = rtl8365mb_phylink_get_caps,
- .phylink_mac_config = rtl8365mb_phylink_mac_config,
- .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
- .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
- .phy_read = rtl8365mb_dsa_phy_read,
- .phy_write = rtl8365mb_dsa_phy_write,
- .port_stp_state_set = rtl8365mb_port_stp_state_set,
- .get_strings = rtl8365mb_get_strings,
- .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
- .get_sset_count = rtl8365mb_get_sset_count,
- .get_eth_phy_stats = rtl8365mb_get_phy_stats,
- .get_eth_mac_stats = rtl8365mb_get_mac_stats,
- .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
- .get_stats64 = rtl8365mb_get_stats64,
- .port_change_mtu = rtl8365mb_port_change_mtu,
- .port_max_mtu = rtl8365mb_port_max_mtu,
-};
-
static const struct realtek_ops rtl8365mb_ops = {
.detect = rtl8365mb_detect,
.phy_read = rtl8365mb_phy_read,
@@ -2164,16 +2134,66 @@ static const struct realtek_ops rtl8365mb_ops = {
};
const struct realtek_variant rtl8365mb_variant = {
- .ds_ops_smi = &rtl8365mb_switch_ops_smi,
- .ds_ops_mdio = &rtl8365mb_switch_ops_mdio,
+ .ds_ops = &rtl8365mb_switch_ops,
.ops = &rtl8365mb_ops,
.clk_delay = 10,
.cmd_read = 0xb9,
.cmd_write = 0xb8,
.chip_data_sz = sizeof(struct rtl8365mb),
};
-EXPORT_SYMBOL_GPL(rtl8365mb_variant);
+
+static const struct of_device_id rtl8365mb_of_match[] = {
+ { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rtl8365mb_of_match);
+
+static struct platform_driver rtl8365mb_smi_driver = {
+ .driver = {
+ .name = "rtl8365mb-smi",
+ .of_match_table = rtl8365mb_of_match,
+ },
+ .probe = realtek_smi_probe,
+ .remove_new = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+
+static struct mdio_driver rtl8365mb_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "rtl8365mb-mdio",
+ .of_match_table = rtl8365mb_of_match,
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+static int rtl8365mb_init(void)
+{
+ int ret;
+
+ ret = realtek_mdio_driver_register(&rtl8365mb_mdio_driver);
+ if (ret)
+ return ret;
+
+ ret = realtek_smi_driver_register(&rtl8365mb_smi_driver);
+ if (ret) {
+ realtek_mdio_driver_unregister(&rtl8365mb_mdio_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(rtl8365mb_init);
+
+static void __exit rtl8365mb_exit(void)
+{
+ realtek_smi_driver_unregister(&rtl8365mb_smi_driver);
+ realtek_mdio_driver_unregister(&rtl8365mb_mdio_driver);
+}
+module_exit(rtl8365mb_exit);
MODULE_AUTHOR("Alvin Å ipraga <alsi@bang-olufsen.dk>");
MODULE_DESCRIPTION("Driver for RTL8365MB-VC ethernet switch");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/rtl8366-core.c b/drivers/net/dsa/realtek/rtl8366-core.c
index 59f98d2c8769..7c6520ba3a26 100644
--- a/drivers/net/dsa/realtek/rtl8366-core.c
+++ b/drivers/net/dsa/realtek/rtl8366-core.c
@@ -34,7 +34,7 @@ int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used)
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+EXPORT_SYMBOL_NS_GPL(rtl8366_mc_is_used, REALTEK_DSA);
/**
* rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
@@ -187,7 +187,7 @@ int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
return ret;
}
-EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_vlan, REALTEK_DSA);
int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid)
@@ -217,7 +217,7 @@ int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_pvid, REALTEK_DSA);
int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
@@ -243,7 +243,7 @@ int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
priv->vlan4k_enabled = enable;
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan4k, REALTEK_DSA);
int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
{
@@ -265,7 +265,7 @@ int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
return ret;
}
-EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan, REALTEK_DSA);
int rtl8366_reset_vlan(struct realtek_priv *priv)
{
@@ -290,7 +290,7 @@ int rtl8366_reset_vlan(struct realtek_priv *priv)
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_reset_vlan, REALTEK_DSA);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
@@ -345,7 +345,7 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_add, REALTEK_DSA);
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
@@ -389,7 +389,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_del, REALTEK_DSA);
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
@@ -403,7 +403,7 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
for (i = 0; i < priv->num_mib_counters; i++)
ethtool_puts(&data, priv->mib_counters[i].name);
}
-EXPORT_SYMBOL_GPL(rtl8366_get_strings);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_strings, REALTEK_DSA);
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
@@ -417,7 +417,7 @@ int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
return priv->num_mib_counters;
}
-EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_sset_count, REALTEK_DSA);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
@@ -441,4 +441,4 @@ void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
data[i] = mibvalue;
}
}
-EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_ethtool_stats, REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index e3b6a470ca67..e10ae94cf771 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -23,6 +23,9 @@
#include <linux/regmap.h>
#include "realtek.h"
+#include "realtek-smi.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
#define RTL8366RB_PORT_NUM_CPU 5
#define RTL8366RB_NUM_PORTS 6
@@ -1030,12 +1033,10 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
dev_info(priv->dev, "no interrupt support\n");
- if (priv->setup_interface) {
- ret = priv->setup_interface(ds);
- if (ret) {
- dev_err(priv->dev, "could not set up MDIO bus\n");
- return -ENODEV;
- }
+ ret = rtl83xx_setup_user_mdio(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ return -ENODEV;
}
return 0;
@@ -1650,6 +1651,7 @@ static int rtl8366rb_get_mc_index(struct realtek_priv *priv, int port, int *val)
static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index)
{
+ struct dsa_switch *ds = &priv->ds;
struct rtl8366rb *rb;
bool pvid_enabled;
int ret;
@@ -1674,7 +1676,7 @@ static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index
* not drop any untagged or C-tagged frames. Make sure to update the
* filtering setting.
*/
- if (dsa_port_is_vlan_filtering(dsa_to_port(priv->ds, port)))
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
ret = rtl8366rb_drop_untagged(priv, port, !pvid_enabled);
return ret;
@@ -1718,7 +1720,7 @@ static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_READ);
@@ -1746,7 +1748,7 @@ static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
phy, regnum, reg, val);
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return ret;
}
@@ -1760,7 +1762,7 @@ static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_WRITE);
@@ -1777,22 +1779,11 @@ static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
goto out;
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return ret;
}
-static int rtl8366rb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
- return rtl8366rb_phy_read(ds->priv, phy, regnum);
-}
-
-static int rtl8366rb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
- u16 val)
-{
- return rtl8366rb_phy_write(ds->priv, phy, regnum, val);
-}
-
static int rtl8366rb_reset_chip(struct realtek_priv *priv)
{
int timeout = 10;
@@ -1858,7 +1849,7 @@ static int rtl8366rb_detect(struct realtek_priv *priv)
return 0;
}
-static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
+static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
.phylink_get_caps = rtl8366rb_phylink_get_caps,
@@ -1882,32 +1873,6 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
.port_max_mtu = rtl8366rb_max_mtu,
};
-static const struct dsa_switch_ops rtl8366rb_switch_ops_mdio = {
- .get_tag_protocol = rtl8366_get_tag_protocol,
- .setup = rtl8366rb_setup,
- .phy_read = rtl8366rb_dsa_phy_read,
- .phy_write = rtl8366rb_dsa_phy_write,
- .phylink_get_caps = rtl8366rb_phylink_get_caps,
- .phylink_mac_link_up = rtl8366rb_mac_link_up,
- .phylink_mac_link_down = rtl8366rb_mac_link_down,
- .get_strings = rtl8366_get_strings,
- .get_ethtool_stats = rtl8366_get_ethtool_stats,
- .get_sset_count = rtl8366_get_sset_count,
- .port_bridge_join = rtl8366rb_port_bridge_join,
- .port_bridge_leave = rtl8366rb_port_bridge_leave,
- .port_vlan_filtering = rtl8366rb_vlan_filtering,
- .port_vlan_add = rtl8366_vlan_add,
- .port_vlan_del = rtl8366_vlan_del,
- .port_enable = rtl8366rb_port_enable,
- .port_disable = rtl8366rb_port_disable,
- .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
- .port_bridge_flags = rtl8366rb_port_bridge_flags,
- .port_stp_state_set = rtl8366rb_port_stp_state_set,
- .port_fast_age = rtl8366rb_port_fast_age,
- .port_change_mtu = rtl8366rb_change_mtu,
- .port_max_mtu = rtl8366rb_max_mtu,
-};
-
static const struct realtek_ops rtl8366rb_ops = {
.detect = rtl8366rb_detect,
.get_vlan_mc = rtl8366rb_get_vlan_mc,
@@ -1925,16 +1890,66 @@ static const struct realtek_ops rtl8366rb_ops = {
};
const struct realtek_variant rtl8366rb_variant = {
- .ds_ops_smi = &rtl8366rb_switch_ops_smi,
- .ds_ops_mdio = &rtl8366rb_switch_ops_mdio,
+ .ds_ops = &rtl8366rb_switch_ops,
.ops = &rtl8366rb_ops,
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
.chip_data_sz = sizeof(struct rtl8366rb),
};
-EXPORT_SYMBOL_GPL(rtl8366rb_variant);
+
+static const struct of_device_id rtl8366rb_of_match[] = {
+ { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rtl8366rb_of_match);
+
+static struct platform_driver rtl8366rb_smi_driver = {
+ .driver = {
+ .name = "rtl8366rb-smi",
+ .of_match_table = rtl8366rb_of_match,
+ },
+ .probe = realtek_smi_probe,
+ .remove_new = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+
+static struct mdio_driver rtl8366rb_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "rtl8366rb-mdio",
+ .of_match_table = rtl8366rb_of_match,
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+static int rtl8366rb_init(void)
+{
+ int ret;
+
+ ret = realtek_mdio_driver_register(&rtl8366rb_mdio_driver);
+ if (ret)
+ return ret;
+
+ ret = realtek_smi_driver_register(&rtl8366rb_smi_driver);
+ if (ret) {
+ realtek_mdio_driver_unregister(&rtl8366rb_mdio_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(rtl8366rb_init);
+
+static void __exit rtl8366rb_exit(void)
+{
+ realtek_smi_driver_unregister(&rtl8366rb_smi_driver);
+ realtek_mdio_driver_unregister(&rtl8366rb_mdio_driver);
+}
+module_exit(rtl8366rb_exit);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Driver for RTL8366RB ethernet switch");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
new file mode 100644
index 000000000000..d2e876805393
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl83xx.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/of_mdio.h>
+
+#include "realtek.h"
+#include "rtl83xx.h"
+
+/**
+ * rtl83xx_lock() - Locks the mutex used by regmaps
+ * @ctx: realtek_priv pointer
+ *
+ * This function is passed to regmap to be used as the lock function.
+ * It is also used externally to block regmap before executing multiple
+ * operations that must happen in sequence (which will use
+ * realtek_priv.map_nolock instead).
+ *
+ * Context: Can sleep. Holds priv->map_lock lock.
+ * Return: nothing
+ */
+void rtl83xx_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_lock, REALTEK_DSA);
+
+/**
+ * rtl83xx_unlock() - Unlocks the mutex used by regmaps
+ * @ctx: realtek_priv pointer
+ *
+ * This function unlocks the lock acquired by rtl83xx_lock.
+ *
+ * Context: Releases priv->map_lock lock.
+ * Return: nothing
+ */
+void rtl83xx_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unlock, REALTEK_DSA);
+
+static int rtl83xx_user_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_read(priv, addr, regnum);
+}
+
+static int rtl83xx_user_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_write(priv, addr, regnum, val);
+}
+
+/**
+ * rtl83xx_setup_user_mdio() - register the user mii bus driver
+ * @ds: DSA switch associated with this user_mii_bus
+ *
+ * Registers the MDIO bus for built-in Ethernet PHYs, and associates it with
+ * the mandatory 'mdio' child OF node of the switch.
+ *
+ * Context: Can sleep.
+ * Return: 0 on success, negative value for failure.
+ */
+int rtl83xx_setup_user_mdio(struct dsa_switch *ds)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret = 0;
+
+ mdio_np = of_get_child_by_name(priv->dev->of_node, "mdio");
+ if (!mdio_np) {
+ dev_err(priv->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ bus = devm_mdiobus_alloc(priv->dev);
+ if (!bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+
+ bus->priv = priv;
+ bus->name = "Realtek user MII";
+ bus->read = rtl83xx_user_mdio_read;
+ bus->write = rtl83xx_user_mdio_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s:user_mii", dev_name(priv->dev));
+ bus->parent = priv->dev;
+
+ ret = devm_of_mdiobus_register(priv->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(priv->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ goto err_put_node;
+ }
+
+ priv->user_mii_bus = bus;
+
+err_put_node:
+ of_node_put(mdio_np);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_setup_user_mdio, REALTEK_DSA);
+
+/**
+ * rtl83xx_probe() - probe a Realtek switch
+ * @dev: the device being probed
+ * @interface_info: specific management interface info.
+ *
+ * This function initializes realtek_priv and reads data from the device tree
+ * node. The switch is hard resetted if a method is provided.
+ *
+ * Context: Can sleep.
+ * Return: Pointer to the realtek_priv or ERR_PTR() in case of failure.
+ *
+ * The realtek_priv pointer does not need to be freed as it is controlled by
+ * devres.
+ */
+struct realtek_priv *
+rtl83xx_probe(struct device *dev,
+ const struct realtek_interface_info *interface_info)
+{
+ const struct realtek_variant *var;
+ struct realtek_priv *priv;
+ struct regmap_config rc = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = interface_info->reg_read,
+ .reg_write = interface_info->reg_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = rtl83xx_lock,
+ .unlock = rtl83xx_unlock,
+ };
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ if (!var)
+ return ERR_PTR(-EINVAL);
+
+ priv = devm_kzalloc(dev, size_add(sizeof(*priv), var->chip_data_sz),
+ GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&priv->map_lock);
+
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ rc.disable_locking = true;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* Link forward and backward */
+ priv->dev = dev;
+ priv->variant = var;
+ priv->ops = var->ops;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ spin_lock_init(&priv->lock);
+
+ priv->leds_disabled = of_property_read_bool(dev->of_node,
+ "realtek,disable-leds");
+
+ /* TODO: if power is software controlled, set up any regulators here */
+ priv->reset_ctl = devm_reset_control_get_optional(dev, NULL);
+ if (IS_ERR(priv->reset_ctl)) {
+ ret = PTR_ERR(priv->reset_ctl);
+ dev_err_probe(dev, ret, "failed to get reset control\n");
+ return ERR_CAST(priv->reset_ctl);
+ }
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return ERR_CAST(priv->reset);
+ }
+
+ dev_set_drvdata(dev, priv);
+
+ if (priv->reset_ctl || priv->reset) {
+ rtl83xx_reset_assert(priv);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ rtl83xx_reset_deassert(priv);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ return priv;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_probe, REALTEK_DSA);
+
+/**
+ * rtl83xx_register_switch() - detects and register a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function first checks the switch chip ID and register a DSA
+ * switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: 0 on success, negative value for failure.
+ */
+int rtl83xx_register_switch(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ int ret;
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err_probe(priv->dev, ret, "unable to detect switch\n");
+ return ret;
+ }
+
+ ds->priv = priv;
+ ds->dev = priv->dev;
+ ds->ops = priv->variant->ds_ops;
+ ds->num_ports = priv->num_ports;
+
+ ret = dsa_register_switch(ds);
+ if (ret) {
+ dev_err_probe(priv->dev, ret, "unable to register switch\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_register_switch, REALTEK_DSA);
+
+/**
+ * rtl83xx_unregister_switch() - unregister a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function unregister a DSA switch.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void rtl83xx_unregister_switch(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+
+ dsa_unregister_switch(ds);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unregister_switch, REALTEK_DSA);
+
+/**
+ * rtl83xx_shutdown() - shutdown a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function shuts down the DSA switch and cleans the platform driver data,
+ * to prevent realtek_{smi,mdio}_remove() from running afterwards, which is
+ * possible if the parent bus implements its own .shutdown() as .remove().
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void rtl83xx_shutdown(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(priv->dev, NULL);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, REALTEK_DSA);
+
+/**
+ * rtl83xx_remove() - Cleanup a realtek switch driver
+ * @priv: realtek_priv pointer
+ *
+ * If a method is provided, this function asserts the hard reset of the switch
+ * in order to avoid leaking traffic when the driver is gone.
+ *
+ * Context: Might sleep if priv->gdev->chip->can_sleep.
+ * Return: nothing
+ */
+void rtl83xx_remove(struct realtek_priv *priv)
+{
+ /* leave the device reset asserted */
+ rtl83xx_reset_assert(priv);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, REALTEK_DSA);
+
+void rtl83xx_reset_assert(struct realtek_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_assert(priv->reset_ctl);
+ if (ret)
+ dev_warn(priv->dev,
+ "Failed to assert the switch reset control: %pe\n",
+ ERR_PTR(ret));
+
+ gpiod_set_value(priv->reset, true);
+}
+
+void rtl83xx_reset_deassert(struct realtek_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_deassert(priv->reset_ctl);
+ if (ret)
+ dev_warn(priv->dev,
+ "Failed to deassert the switch reset control: %pe\n",
+ ERR_PTR(ret));
+
+ gpiod_set_value(priv->reset, false);
+}
+
+MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Realtek DSA switches common module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/rtl83xx.h b/drivers/net/dsa/realtek/rtl83xx.h
new file mode 100644
index 000000000000..c8a0ff8fd75e
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl83xx.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RTL83XX_H
+#define _RTL83XX_H
+
+struct realtek_interface_info {
+ int (*reg_read)(void *ctx, u32 reg, u32 *val);
+ int (*reg_write)(void *ctx, u32 reg, u32 val);
+};
+
+void rtl83xx_lock(void *ctx);
+void rtl83xx_unlock(void *ctx);
+int rtl83xx_setup_user_mdio(struct dsa_switch *ds);
+struct realtek_priv *
+rtl83xx_probe(struct device *dev,
+ const struct realtek_interface_info *interface_info);
+int rtl83xx_register_switch(struct realtek_priv *priv);
+void rtl83xx_unregister_switch(struct realtek_priv *priv);
+void rtl83xx_shutdown(struct realtek_priv *priv);
+void rtl83xx_remove(struct realtek_priv *priv);
+void rtl83xx_reset_assert(struct realtek_priv *priv);
+void rtl83xx_reset_deassert(struct realtek_priv *priv);
+
+#endif /* _RTL83XX_H */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 768454aa36d6..d29b5d7af0d7 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -67,18 +67,12 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- if (!dev->lstats)
- return -ENOMEM;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
+ netdev_lockdep_set_classes(dev);
return 0;
}
-static void dummy_dev_uninit(struct net_device *dev)
-{
- free_percpu(dev->lstats);
-}
-
static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
{
if (new_carrier)
@@ -90,7 +84,6 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
- .ndo_uninit = dummy_dev_uninit,
.ndo_start_xmit = dummy_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = set_multicast_list,
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 5a274b99f299..6a19b5393ed1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -15,9 +15,6 @@ if ETHERNET
config MDIO
tristate
-config SUNGEM_PHY
- tristate
-
source "drivers/net/ethernet/3com/Kconfig"
source "drivers/net/ethernet/actions/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index da3bdd302502..760a9a60bc15 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -21,6 +21,7 @@ config ADIN1110
tristate "Analog Devices ADIN1110 MAC-PHY"
depends on SPI && NET_SWITCHDEV
select CRC8
+ select PHYLIB
help
Say yes here to build support for Analog Devices ADIN1110
Low Power 10BASE-T1L Ethernet MAC-PHY.
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index d7c274af6d4d..8b4ef5121308 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -464,8 +464,9 @@ static int adin1110_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* bitfield of ADIN1110_MDIOACC register will contain
* the requested register value.
*/
- ret = readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
- (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+ ret = readx_poll_timeout_atomic(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE),
+ 100, 30000);
if (ret < 0)
return ret;
@@ -495,8 +496,9 @@ static int adin1110_mdio_write(struct mii_bus *bus, int phy_id,
if (ret < 0)
return ret;
- return readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
- (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+ return readx_poll_timeout_atomic(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE),
+ 100, 30000);
}
/* ADIN1110 MAC-PHY contains an ADIN1100 PHY.
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 633b321d7fdd..9e9e4a03f1a8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -90,8 +90,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
struct ena_com_admin_sq *sq = &admin_queue->sq;
u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
- sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
- &sq->dma_addr, GFP_KERNEL);
+ sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL);
if (!sq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -113,8 +112,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
struct ena_com_admin_cq *cq = &admin_queue->cq;
u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
- cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
- &cq->dma_addr, GFP_KERNEL);
+ cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL);
if (!cq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -136,8 +134,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
- &aenq->dma_addr, GFP_KERNEL);
+ aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL);
if (!aenq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -155,14 +152,13 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
aenq_caps = 0;
aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
- aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
- << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
- ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ aenq_caps |=
+ (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
if (unlikely(!aenq_handlers)) {
- netdev_err(ena_dev->net_device,
- "AENQ handlers pointer is NULL\n");
+ netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n");
return -EINVAL;
}
@@ -189,14 +185,12 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu
}
if (unlikely(!admin_queue->comp_ctx)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Completion context is NULL\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n");
return NULL;
}
if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Completion context is occupied\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n");
return NULL;
}
@@ -226,8 +220,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
/* In case of queue FULL */
cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- netdev_dbg(admin_queue->ena_dev->net_device,
- "Admin queue is full.\n");
+ netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(-ENOSPC);
}
@@ -274,8 +267,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
struct ena_comp_ctx *comp_ctx;
u16 i;
- admin_queue->comp_ctx =
- devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
+ admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
if (unlikely(!admin_queue->comp_ctx)) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
@@ -336,20 +328,17 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
- GFP_KERNEL);
+ &io_sq->desc_addr.phys_addr, GFP_KERNEL);
}
if (!io_sq->desc_addr.virt_addr) {
- netdev_err(ena_dev->net_device,
- "Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
}
@@ -367,16 +356,14 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
- io_sq->bounce_buf_ctrl.base_buffer =
- devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->bounce_buf_ctrl.base_buffer)
io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
- netdev_err(ena_dev->net_device,
- "Bounce buffer memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n");
return -ENOMEM;
}
@@ -425,13 +412,11 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
prev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, prev_node);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr,
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
GFP_KERNEL);
}
@@ -514,8 +499,8 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
u8 comp_status)
{
if (unlikely(comp_status != 0))
- netdev_err(admin_queue->ena_dev->net_device,
- "Admin command failed[%u]\n", comp_status);
+ netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n",
+ comp_status);
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
@@ -580,8 +565,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Command was aborted\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n");
spin_lock_irqsave(&admin_queue->q_lock, flags);
admin_queue->stats.aborted_cmd++;
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -589,8 +573,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
goto err;
}
- WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
- comp_ctx->status);
+ WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status);
ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
@@ -634,8 +617,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set LLQ configurations: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret);
return ret;
}
@@ -658,8 +640,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_default_cfg->llq_header_location;
} else {
netdev_err(ena_dev->net_device,
- "Invalid header location control, supported: 0x%x\n",
- supported_feat);
+ "Invalid header location control, supported: 0x%x\n", supported_feat);
return -EINVAL;
}
@@ -681,8 +662,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
netdev_err(ena_dev->net_device,
"Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_stride_ctrl,
- supported_feat, llq_info->desc_stride_ctrl);
+ llq_default_cfg->llq_stride_ctrl, supported_feat,
+ llq_info->desc_stride_ctrl);
}
} else {
llq_info->desc_stride_ctrl = 0;
@@ -704,8 +685,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_info->desc_list_entry_size = 256;
} else {
netdev_err(ena_dev->net_device,
- "Invalid entry_size_ctrl, supported: 0x%x\n",
- supported_feat);
+ "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
return -EINVAL;
}
@@ -750,8 +730,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
netdev_err(ena_dev->net_device,
"Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_num_decs_before_header,
- supported_feat, llq_info->descs_num_before_header);
+ llq_default_cfg->llq_num_decs_before_header, supported_feat,
+ llq_info->descs_num_before_header);
}
/* Check for accelerated queue supported */
llq_accel_mode_get = llq_features->accel_mode.u.get;
@@ -767,8 +747,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
rc = ena_com_set_llq(ena_dev);
if (rc)
- netdev_err(ena_dev->net_device,
- "Cannot set LLQ configuration: %d\n", rc);
+ netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc);
return rc;
}
@@ -780,8 +759,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
int ret;
wait_for_completion_timeout(&comp_ctx->wait_event,
- usecs_to_jiffies(
- admin_queue->completion_timeout));
+ usecs_to_jiffies(admin_queue->completion_timeout));
/* In case the command wasn't completed find out the root cause.
* There might be 2 kinds of errors
@@ -797,8 +775,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
if (comp_ctx->status == ENA_CMD_COMPLETED) {
netdev_err(admin_queue->ena_dev->net_device,
"The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
- comp_ctx->cmd_opcode,
- admin_queue->auto_polling ? "ON" : "OFF");
+ comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
/* Check if fallback to polling is enabled */
if (admin_queue->auto_polling)
admin_queue->polling = true;
@@ -867,15 +844,13 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
if (unlikely(i == timeout)) {
netdev_err(ena_dev->net_device,
"Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
- mmio_read->seq_num, offset, read_resp->req_id,
- read_resp->reg_off);
+ mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off);
ret = ENA_MMIO_READ_TIMEOUT;
goto err;
}
if (read_resp->reg_off != offset) {
- netdev_err(ena_dev->net_device,
- "Read failure: wrong offset provided\n");
+ netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@@ -934,8 +909,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- netdev_err(ena_dev->net_device,
- "Failed to destroy io sq error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret);
return ret;
}
@@ -949,8 +923,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
if (io_cq->cdesc_addr.virt_addr) {
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- dma_free_coherent(ena_dev->dmadev, size,
- io_cq->cdesc_addr.virt_addr,
+ dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr,
io_cq->cdesc_addr.phys_addr);
io_cq->cdesc_addr.virt_addr = NULL;
@@ -959,8 +932,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
if (io_sq->desc_addr.virt_addr) {
size = io_sq->desc_entry_size * io_sq->q_depth;
- dma_free_coherent(ena_dev->dmadev, size,
- io_sq->desc_addr.virt_addr,
+ dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr,
io_sq->desc_addr.phys_addr);
io_sq->desc_addr.virt_addr = NULL;
@@ -985,8 +957,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
- netdev_err(ena_dev->net_device,
- "Reg read timeout occurred\n");
+ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
@@ -1026,8 +997,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
- feature_id);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id);
return -EOPNOTSUPP;
}
@@ -1064,8 +1034,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
if (unlikely(ret))
netdev_err(ena_dev->net_device,
- "Failed to submit get_feature command %d error: %d\n",
- feature_id, ret);
+ "Failed to submit get_feature command %d error: %d\n", feature_id, ret);
return ret;
}
@@ -1104,13 +1073,11 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_FUNCTION))
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
return -EOPNOTSUPP;
- rss->hash_key =
- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- &rss->hash_key_dma_addr, GFP_KERNEL);
+ rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+ &rss->hash_key_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_key))
return -ENOMEM;
@@ -1123,8 +1090,8 @@ static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
if (rss->hash_key)
- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- rss->hash_key, rss->hash_key_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key,
+ rss->hash_key_dma_addr);
rss->hash_key = NULL;
}
@@ -1132,9 +1099,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- rss->hash_ctrl =
- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+ rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+ &rss->hash_ctrl_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_ctrl))
return -ENOMEM;
@@ -1147,8 +1113,8 @@ static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
if (rss->hash_ctrl)
- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- rss->hash_ctrl, rss->hash_ctrl_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl,
+ rss->hash_ctrl_dma_addr);
rss->hash_ctrl = NULL;
}
@@ -1177,15 +1143,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
tbl_size = (1ULL << log_size) *
sizeof(struct ena_admin_rss_ind_table_entry);
- rss->rss_ind_tbl =
- dma_alloc_coherent(ena_dev->dmadev, tbl_size,
- &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
+ rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr,
+ GFP_KERNEL);
if (unlikely(!rss->rss_ind_tbl))
goto mem_err1;
tbl_size = (1ULL << log_size) * sizeof(u16);
- rss->host_rss_ind_tbl =
- devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
+ rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
if (unlikely(!rss->host_rss_ind_tbl))
goto mem_err2;
@@ -1197,8 +1161,7 @@ mem_err2:
tbl_size = (1ULL << log_size) *
sizeof(struct ena_admin_rss_ind_table_entry);
- dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
- rss->rss_ind_tbl_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr);
rss->rss_ind_tbl = NULL;
mem_err1:
rss->tbl_log_size = 0;
@@ -1261,8 +1224,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
&create_cmd.sq_ba,
io_sq->desc_addr.phys_addr);
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
}
@@ -1273,8 +1235,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to create IO SQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret);
return ret;
}
@@ -1284,16 +1245,12 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(uintptr_t)cmd_completion.sq_doorbell_offset);
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
- + cmd_completion.llq_headers_offset);
-
io_sq->desc_addr.pbuf_dev_addr =
(u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
cmd_completion.llq_descriptors_offset);
}
- netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
- io_sq->idx, io_sq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
return ret;
}
@@ -1420,8 +1377,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to create IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret);
return ret;
}
@@ -1430,18 +1386,12 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.cq_interrupt_unmask_register_offset);
- if (cmd_completion.cq_head_db_register_offset)
- io_cq->cq_head_db_reg =
- (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
- cmd_completion.cq_head_db_register_offset);
-
if (cmd_completion.numa_node_register_offset)
io_cq->numa_node_cfg_reg =
(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.numa_node_register_offset);
- netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
- io_cq->idx, io_cq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
return ret;
}
@@ -1451,8 +1401,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
struct ena_com_io_cq **io_cq)
{
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Invalid queue number %d but the max is %d\n", qid,
+ netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid,
ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1492,8 +1441,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
spin_lock_irqsave(&admin_queue->q_lock, flags);
while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- ena_delay_exponential_backoff_us(exp++,
- ena_dev->ena_min_poll_delay_us);
+ ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
spin_lock_irqsave(&admin_queue->q_lock, flags);
}
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -1519,8 +1467,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- netdev_err(ena_dev->net_device,
- "Failed to destroy IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret);
return ret;
}
@@ -1588,8 +1535,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to config AENQ ret: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret);
return ret;
}
@@ -1610,8 +1556,7 @@ int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
- netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
- width);
+ netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width);
return -EINVAL;
}
@@ -1633,19 +1578,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
ctrl_ver = ena_com_reg_bar_read32(ena_dev,
ENA_REGS_CONTROLLER_VERSION_OFF);
- if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
- (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+ if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
- (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- dev_info(ena_dev->dmadev,
- "ENA controller version: %d.%d.%d implementation version %d\n",
+ dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n",
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
@@ -1694,20 +1636,17 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
size = ADMIN_SQ_SIZE(admin_queue->q_depth);
if (sq->entries)
- dma_free_coherent(ena_dev->dmadev, size, sq->entries,
- sq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr);
sq->entries = NULL;
size = ADMIN_CQ_SIZE(admin_queue->q_depth);
if (cq->entries)
- dma_free_coherent(ena_dev->dmadev, size, cq->entries,
- cq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr);
cq->entries = NULL;
size = ADMIN_AENQ_SIZE(aenq->q_depth);
if (ena_dev->aenq.entries)
- dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
- aenq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr);
aenq->entries = NULL;
}
@@ -1733,10 +1672,8 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
spin_lock_init(&mmio_read->lock);
- mmio_read->read_resp =
- dma_alloc_coherent(ena_dev->dmadev,
- sizeof(*mmio_read->read_resp),
- &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
+ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
if (unlikely(!mmio_read->read_resp))
goto err;
@@ -1767,8 +1704,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
- dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
- mmio_read->read_resp, mmio_read->read_resp_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp,
+ mmio_read->read_resp_dma_addr);
mmio_read->read_resp = NULL;
}
@@ -1800,8 +1737,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
}
if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
- netdev_err(ena_dev->net_device,
- "Device isn't ready, abort com init\n");
+ netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n");
return -ENODEV;
}
@@ -1878,8 +1814,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
int ret;
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Qid (%d) is bigger than max num of queues (%d)\n",
+ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
ctx->qid, ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1905,8 +1840,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
/* header length is limited to 8 bits */
- io_sq->tx_max_header_size =
- min_t(u32, ena_dev->tx_max_header_size, SZ_256);
+ io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256);
ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
if (ret)
@@ -1938,8 +1872,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
struct ena_com_io_cq *io_cq;
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Qid (%d) is bigger than max num of queues (%d)\n",
+ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
qid, ENA_TOTAL_NUM_QUEUES);
return;
}
@@ -1983,8 +1916,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
if (rc)
return rc;
- if (get_resp.u.max_queue_ext.version !=
- ENA_FEATURE_MAX_QUEUE_EXT_VER)
+ if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
return -EINVAL;
memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
@@ -2025,18 +1957,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
if (!rc)
- memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
- sizeof(get_resp.u.hw_hints));
+ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints));
else if (rc == -EOPNOTSUPP)
- memset(&get_feat_ctx->hw_hints, 0x0,
- sizeof(get_feat_ctx->hw_hints));
+ memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
else
return rc;
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
if (!rc)
- memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
- sizeof(get_resp.u.llq));
+ memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq));
else if (rc == -EOPNOTSUPP)
memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
else
@@ -2084,8 +2013,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
aenq_common = &aenq_e->aenq_common_desc;
/* Go over all the events */
- while ((READ_ONCE(aenq_common->flags) &
- ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+ while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Make sure the phase bit (ownership) is as expected before
* reading the rest of the descriptor.
*/
@@ -2094,8 +2022,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
timestamp = (u64)aenq_common->timestamp_low |
((u64)aenq_common->timestamp_high << 32);
- netdev_dbg(ena_dev->net_device,
- "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+ netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
aenq_common->group, aenq_common->syndrome, timestamp);
/* Handle specific event*/
@@ -2124,8 +2051,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- writel_relaxed((u32)aenq->head,
- ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
@@ -2137,15 +2063,13 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
- if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
- (cap == ENA_MMIO_READ_TIMEOUT))) {
+ if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) {
netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
return -ETIME;
}
if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
- netdev_err(ena_dev->net_device,
- "Device isn't ready, can't reset device\n");
+ netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n");
return -EINVAL;
}
@@ -2168,8 +2092,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
rc = wait_for_reset_state(ena_dev, timeout,
ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
if (rc != 0) {
- netdev_err(ena_dev->net_device,
- "Reset indication didn't turn on\n");
+ netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n");
return rc;
}
@@ -2177,8 +2100,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
rc = wait_for_reset_state(ena_dev, timeout, 0);
if (rc != 0) {
- netdev_err(ena_dev->net_device,
- "Reset indication didn't turn off\n");
+ netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n");
return rc;
}
@@ -2215,8 +2137,7 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
sizeof(*get_resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to get stats. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
return ret;
}
@@ -2228,8 +2149,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
- netdev_err(ena_dev->net_device,
- "Capability %d isn't supported\n",
+ netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
ENA_ADMIN_ENI_STATS);
return -EOPNOTSUPP;
}
@@ -2266,8 +2186,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
- ENA_ADMIN_MTU);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
return -EOPNOTSUPP;
}
@@ -2286,8 +2205,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set mtu %d. error: %d\n", mtu, ret);
+ netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret);
return ret;
}
@@ -2301,8 +2219,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
ret = ena_com_get_feature(ena_dev, &resp,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to get offload capabilities %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret);
return ret;
}
@@ -2320,8 +2237,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
struct ena_admin_get_feat_resp get_resp;
int ret;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_FUNCTION)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_FUNCTION);
return -EOPNOTSUPP;
@@ -2334,8 +2250,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
return ret;
if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
- netdev_err(ena_dev->net_device,
- "Func hash %d isn't supported by device, abort\n",
+ netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n",
rss->hash_func);
return -EOPNOTSUPP;
}
@@ -2365,8 +2280,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to set hash function %d. error: %d\n",
+ netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n",
rss->hash_func, ret);
return -EINVAL;
}
@@ -2398,16 +2312,15 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return rc;
if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
- netdev_err(ena_dev->net_device,
- "Flow hash function %d isn't supported\n", func);
+ netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func);
return -EOPNOTSUPP;
}
if ((func == ENA_ADMIN_TOEPLITZ) && key) {
if (key_len != sizeof(hash_key->key)) {
netdev_err(ena_dev->net_device,
- "key len (%u) doesn't equal the supported size (%zu)\n",
- key_len, sizeof(hash_key->key));
+ "key len (%u) doesn't equal the supported size (%zu)\n", key_len,
+ sizeof(hash_key->key));
return -EINVAL;
}
memcpy(hash_key->key, key, key_len);
@@ -2495,8 +2408,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
struct ena_admin_set_feat_resp resp;
int ret;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_INPUT)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_INPUT);
return -EOPNOTSUPP;
@@ -2527,8 +2439,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set hash input. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret);
return ret;
}
@@ -2605,8 +2516,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
int rc;
if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
- netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
- proto);
+ netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto);
return -EINVAL;
}
@@ -2658,8 +2568,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
struct ena_admin_set_feat_resp resp;
int ret;
- if (!ena_com_check_supported_feature_id(
- ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
return -EOPNOTSUPP;
@@ -2699,8 +2608,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set indirect table. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret);
return ret;
}
@@ -2779,9 +2687,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
- host_attr->host_info =
- dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
- &host_attr->host_info_dma_addr, GFP_KERNEL);
+ host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
+ &host_attr->host_info_dma_addr, GFP_KERNEL);
if (unlikely(!host_attr->host_info))
return -ENOMEM;
@@ -2827,8 +2734,7 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
if (host_attr->debug_area_virt_addr) {
dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
- host_attr->debug_area_virt_addr,
- host_attr->debug_area_dma_addr);
+ host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr);
host_attr->debug_area_virt_addr = NULL;
}
}
@@ -2877,8 +2783,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set host attributes: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret);
return ret;
}
@@ -2896,8 +2801,7 @@ static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *en
u32 *intr_moder_interval)
{
if (!intr_delay_resolution) {
- netdev_err(ena_dev->net_device,
- "Illegal interrupt delay granularity value\n");
+ netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n");
return -EFAULT;
}
@@ -2935,14 +2839,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
if (rc) {
if (rc == -EOPNOTSUPP) {
- netdev_dbg(ena_dev->net_device,
- "Feature %d isn't supported\n",
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
} else {
netdev_err(ena_dev->net_device,
- "Failed to get interrupt moderation admin cmd. rc: %d\n",
- rc);
+ "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
}
/* no moderation supported, disable adaptive support */
@@ -2990,8 +2892,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
if (unlikely(ena_dev->tx_max_header_size == 0)) {
- netdev_err(ena_dev->net_device,
- "The size of the LLQ entry is smaller than needed\n");
+ netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 3c5081d9d25d..fea57eb8e58b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -109,16 +109,13 @@ struct ena_com_io_cq {
/* Interrupt unmask register */
u32 __iomem *unmask_reg;
- /* The completion queue head doorbell register */
- u32 __iomem *cq_head_db_reg;
-
/* numa configuration register (for TPH) */
u32 __iomem *numa_node_cfg_reg;
/* The value to write to the above register to unmask
* the interrupt of this queue
*/
- u32 msix_vector;
+ u32 msix_vector ____cacheline_aligned;
enum queue_direction direction;
@@ -134,7 +131,6 @@ struct ena_com_io_cq {
/* Device queue index */
u16 idx;
u16 head;
- u16 last_head_update;
u8 phase;
u8 cdesc_entry_size_in_bytes;
@@ -158,7 +154,6 @@ struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
u32 __iomem *db_addr;
- u8 __iomem *header_addr;
enum queue_direction direction;
enum ena_admin_placement_policy_type mem_queue_type;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index f9f886289b97..933e619b3a31 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -18,8 +18,7 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = (READ_ONCE(cdesc->status) &
- ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase)
@@ -65,8 +64,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
io_sq->entries_in_tx_burst_left--;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
- io_sq->qid, io_sq->entries_in_tx_burst_left);
+ "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
+ io_sq->entries_in_tx_burst_left);
}
/* Make sure everything was written into the bounce buffer before
@@ -75,8 +74,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
wmb();
/* The line is completed. Copy it to dev */
- __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
- bounce_buffer, (llq_info->desc_list_entry_size) / 8);
+ __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
+ (llq_info->desc_list_entry_size) / 8);
io_sq->tail++;
@@ -102,16 +101,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
header_offset =
llq_info->descs_num_before_header * io_sq->desc_entry_size;
- if (unlikely((header_offset + header_len) >
- llq_info->desc_list_entry_size)) {
+ if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Trying to write header larger than llq entry can accommodate\n");
return -EFAULT;
}
if (unlikely(!bounce_buffer)) {
- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
return -EFAULT;
}
@@ -129,8 +126,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) {
- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
return NULL;
}
@@ -247,8 +243,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq);
count++;
- last = (READ_ONCE(cdesc->status) &
- ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
@@ -369,9 +364,8 @@ static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
"l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
- ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
- ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
- ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
+ ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
+ ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
}
/*****************************************************************************/
@@ -403,13 +397,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (unlikely(header_len > io_sq->tx_max_header_size)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Header size is too large %d max header: %d\n",
- header_len, io_sq->tx_max_header_size);
+ "Header size is too large %d max header: %d\n", header_len,
+ io_sq->tx_max_header_size);
return -EINVAL;
}
- if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
- !buffer_to_push)) {
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Push header wasn't provided in LLQ mode\n");
return -EINVAL;
@@ -556,13 +549,11 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
}
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
- nb_hw_desc);
+ "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
- ena_rx_ctx->max_bufs);
+ "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
return -ENOSPC;
}
@@ -586,8 +577,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
io_sq->next_to_comp += nb_hw_desc;
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
- io_sq->qid, io_sq->next_to_comp);
+ "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
+ io_sq->next_to_comp);
/* Get rx flags from the last pkt */
ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
@@ -624,8 +615,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->req_id = req_id;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
- __func__, io_sq->qid, req_id);
+ "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
+ req_id);
desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi =
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 372b259279ec..72b019758caa 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -8,8 +8,6 @@
#include "ena_com.h"
-/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
-#define ENA_COMP_HEAD_THRESH 4
/* we allow 2 DMA descriptors per LLQ entry */
#define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc))
#define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
@@ -145,8 +143,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
}
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Queue: %d num_descs: %d num_entries_needed: %d\n",
- io_sq->qid, num_descs, num_entries_needed);
+ "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs,
+ num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left;
}
@@ -157,43 +155,20 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 tail = io_sq->tail;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Write submission queue doorbell for queue: %d tail: %d\n",
- io_sq->qid, tail);
+ "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail);
writel(tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) {
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Reset available entries in tx burst for queue %d to %d\n",
- io_sq->qid, max_entries_in_tx_burst);
+ "Reset available entries in tx burst for queue %d to %d\n", io_sq->qid,
+ max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
}
return 0;
}
-static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
-{
- u16 unreported_comp, head;
- bool need_update;
-
- if (unlikely(io_cq->cq_head_db_reg)) {
- head = io_cq->head;
- unreported_comp = head - io_cq->last_head_update;
- need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
-
- if (unlikely(need_update)) {
- netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Write completion queue doorbell for queue %d: head: %d\n",
- io_cq->qid, head);
- writel(head, io_cq->cq_head_db_reg);
- io_cq->last_head_update = head;
- }
- }
-
- return 0;
-}
-
static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
u8 numa_node)
{
@@ -248,8 +223,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
*req_id = READ_ONCE(cdesc->req_id);
if (unlikely(*req_id >= io_cq->q_depth)) {
- netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Invalid req id %d\n", cdesc->req_id);
+ netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Invalid req id %d\n",
+ cdesc->req_id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 1c0a7828d397..09e7da1a69c9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -32,7 +32,7 @@ MODULE_LICENSE("GPL");
#define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
- NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
+ NETIF_MSG_IFDOWN | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
static struct ena_aenq_handlers aenq_handlers;
@@ -47,19 +47,44 @@ static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
struct ena_adapter *adapter = netdev_priv(dev);
+ unsigned int time_since_last_napi, threshold;
+ struct ena_ring *tx_ring;
+ int napi_scheduled;
+
+ if (txqueue >= adapter->num_io_queues) {
+ netdev_err(dev, "TX timeout on invalid queue %u\n", txqueue);
+ goto schedule_reset;
+ }
+
+ threshold = jiffies_to_usecs(dev->watchdog_timeo);
+ tx_ring = &adapter->tx_ring[txqueue];
+ time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
+ napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED);
+
+ netdev_err(dev,
+ "TX q %d is paused for too long (threshold %u). Time since last napi %u usec. napi scheduled: %d\n",
+ txqueue,
+ threshold,
+ time_since_last_napi,
+ napi_scheduled);
+
+ if (threshold < time_since_last_napi && napi_scheduled) {
+ netdev_err(dev,
+ "napi handler hasn't been called for a long time but is scheduled\n");
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
+ }
+schedule_reset:
/* Change the state of the device to trigger reset
* Check that we are not in the middle or a trigger already
*/
-
if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
- ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD);
+ ena_reset_device(adapter, reset_reason);
ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
-
- netif_err(adapter, tx_err, dev, "Transmit time out\n");
}
static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
@@ -116,11 +141,9 @@ int ena_xmit_common(struct ena_adapter *adapter,
if (unlikely(rc)) {
netif_err(adapter, tx_queued, adapter->netdev,
"Failed to prepare tx bufs\n");
- ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
- &ring->syncp);
+ ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp);
if (rc != -ENOMEM)
- ena_reset_device(adapter,
- ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ ena_reset_device(adapter, ENA_REGS_RESET_DRIVER_INVALID_STATE);
return rc;
}
@@ -485,8 +508,7 @@ static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
*/
page = dev_alloc_page();
if (!page) {
- ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
- &rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp);
return ERR_PTR(-ENOSPC);
}
@@ -523,7 +545,7 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
/* We handle DMA here */
page = ena_alloc_map_page(rx_ring, &dma);
- if (unlikely(IS_ERR(page)))
+ if (IS_ERR(page))
return PTR_ERR(page);
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
@@ -545,8 +567,8 @@ static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info,
unsigned long attrs)
{
- dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE,
- DMA_BIDIRECTIONAL, attrs);
+ dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL,
+ attrs);
}
static void ena_free_rx_page(struct ena_ring *rx_ring,
@@ -819,8 +841,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
&req_id);
if (rc) {
if (unlikely(rc == -EINVAL))
- handle_invalid_req_id(tx_ring, req_id, NULL,
- false);
+ handle_invalid_req_id(tx_ring, req_id, NULL, false);
break;
}
@@ -856,7 +877,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_ring->next_to_clean = next_to_clean;
ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
- ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
@@ -1046,8 +1066,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
DMA_FROM_DEVICE);
if (!reuse_rx_buf_page)
- ena_unmap_rx_buff_attrs(rx_ring, rx_info,
- DMA_ATTR_SKIP_CPU_SYNC);
+ ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
page_offset + buf_offset, len, buf_len);
@@ -1303,10 +1322,8 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ENA_RX_REFILL_THRESH_PACKET);
/* Optimization, try to batch new rx buffers */
- if (refill_required > refill_threshold) {
- ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
+ if (refill_required > refill_threshold)
ena_refill_rx_bufs(rx_ring, refill_required);
- }
if (xdp_flags & ENA_XDP_REDIRECT)
xdp_do_flush();
@@ -1320,8 +1337,7 @@ error:
adapter = netdev_priv(rx_ring->netdev);
if (rc == -ENOSPC) {
- ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
- &rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
} else {
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
@@ -1811,8 +1827,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
if (!ena_dev->rss.tbl_log_size) {
rc = ena_rss_init_default(adapter);
if (rc && (rc != -EOPNOTSUPP)) {
- netif_err(adapter, ifup, adapter->netdev,
- "Failed to init RSS rc: %d\n", rc);
+ netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc);
return rc;
}
}
@@ -2134,6 +2149,12 @@ int ena_up(struct ena_adapter *adapter)
*/
ena_init_napi_in_range(adapter, 0, io_queue_count);
+ /* Enabling DIM needs to happen before enabling IRQs since DIM
+ * is run from napi routine
+ */
+ if (ena_com_interrupt_moderation_supported(adapter->ena_dev))
+ ena_com_enable_adaptive_moderation(adapter->ena_dev);
+
rc = ena_request_io_irq(adapter);
if (rc)
goto err_req_irq;
@@ -2184,7 +2205,7 @@ void ena_down(struct ena_adapter *adapter)
{
int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
- netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
+ netif_dbg(adapter, ifdown, adapter->netdev, "%s\n", __func__);
clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
@@ -2197,8 +2218,6 @@ void ena_down(struct ena_adapter *adapter)
/* After this point the napi handler won't enable the tx queue */
ena_napi_disable_in_range(adapter, 0, io_queue_count);
- /* After destroy the queue there won't be any new interrupts */
-
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
int rc;
@@ -2588,8 +2607,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(rc))
goto error_drop_packet;
- skb_tx_timestamp(skb);
-
next_to_use = tx_ring->next_to_use;
req_id = tx_ring->free_ids[next_to_use];
tx_info = &tx_ring->tx_buffer_info[req_id];
@@ -2653,6 +2670,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ skb_tx_timestamp(skb);
+
if (netif_xmit_stopped(txq) || !netdev_xmit_more())
/* trigger the dma engine. ena_ring_tx_doorbell()
* calls a memory barrier inside it.
@@ -2670,22 +2689,6 @@ error_drop_packet:
return NETDEV_TX_OK;
}
-static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- u16 qid;
- /* we suspect that this is good for in--kernel network services that
- * want to loop incoming skb rx to tx in normal user generated traffic,
- * most probably we will not get to this
- */
- if (skb_rx_queue_recorded(skb))
- qid = skb_get_rx_queue(skb);
- else
- qid = netdev_pick_tx(dev, skb, NULL);
-
- return qid;
-}
-
static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
@@ -2764,8 +2767,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_set_host_attributes(adapter->ena_dev);
if (rc) {
if (rc == -EOPNOTSUPP)
- netif_warn(adapter, drv, adapter->netdev,
- "Cannot set host attributes\n");
+ netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n");
else
netif_err(adapter, drv, adapter->netdev,
"Cannot set host attributes\n");
@@ -2863,18 +2865,16 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_open = ena_open,
.ndo_stop = ena_close,
.ndo_start_xmit = ena_start_xmit,
- .ndo_select_queue = ena_select_queue,
.ndo_get_stats64 = ena_get_stats64,
.ndo_tx_timeout = ena_tx_timeout,
.ndo_change_mtu = ena_change_mtu,
- .ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
.ndo_bpf = ena_xdp,
.ndo_xdp_xmit = ena_xdp_xmit,
};
-static void ena_calc_io_queue_size(struct ena_adapter *adapter,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static int ena_calc_io_queue_size(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -2933,6 +2933,18 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter,
max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
+ if (max_tx_queue_size < ENA_MIN_RING_SIZE) {
+ netdev_err(adapter->netdev, "Device max TX queue size: %d < minimum: %d\n",
+ max_tx_queue_size, ENA_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
+ if (max_rx_queue_size < ENA_MIN_RING_SIZE) {
+ netdev_err(adapter->netdev, "Device max RX queue size: %d < minimum: %d\n",
+ max_rx_queue_size, ENA_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
/* When forcing large headers, we multiply the entry size by 2, and therefore divide
* the queue size by 2, leaving the amount of memory used by the queues unchanged.
*/
@@ -2963,6 +2975,8 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter,
adapter->max_rx_ring_size = max_rx_queue_size;
adapter->requested_tx_ring_size = tx_queue_size;
adapter->requested_rx_ring_size = rx_queue_size;
+
+ return 0;
}
static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -3070,6 +3084,7 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
bool *wd_state)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct net_device *netdev = adapter->netdev;
struct ena_llq_configurations llq_config;
struct device *dev = &pdev->dev;
bool readless_supported;
@@ -3159,15 +3174,19 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
&llq_config);
if (rc) {
- dev_err(dev, "ENA device init failed\n");
+ netdev_err(netdev, "Cannot set queues placement policy rc= %d\n", rc);
goto err_admin_init;
}
- ena_calc_io_queue_size(adapter, get_feat_ctx);
+ rc = ena_calc_io_queue_size(adapter, get_feat_ctx);
+ if (unlikely(rc))
+ goto err_admin_init;
return 0;
err_admin_init:
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
err_mmio_read_less:
@@ -3226,7 +3245,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
if (!graceful)
ena_com_set_admin_running_state(ena_dev, false);
- if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ if (dev_up)
ena_down(adapter);
/* Stop the device from sending AENQ events (in case reset flag is set
@@ -3372,14 +3391,18 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
struct ena_ring *tx_ring)
{
struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi);
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
unsigned int time_since_last_napi;
unsigned int missing_tx_comp_to;
bool is_tx_comp_time_expired;
struct ena_tx_buffer *tx_buf;
unsigned long last_jiffies;
+ int napi_scheduled;
u32 missed_tx = 0;
int i, rc = 0;
+ missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
+
for (i = 0; i < tx_ring->ring_size; i++) {
tx_buf = &tx_ring->tx_buffer_info[i];
last_jiffies = tx_buf->last_jiffies;
@@ -3406,25 +3429,45 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
adapter->missing_tx_completion_to);
if (unlikely(is_tx_comp_time_expired)) {
- if (!tx_buf->print_once) {
- time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
- missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
- netif_notice(adapter, tx_err, adapter->netdev,
- "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n",
- tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to);
+ time_since_last_napi =
+ jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
+ napi_scheduled = !!(ena_napi->napi.state & NAPIF_STATE_SCHED);
+
+ if (missing_tx_comp_to < time_since_last_napi && napi_scheduled) {
+ /* We suspect napi isn't called because the
+ * bottom half is not run. Require a bigger
+ * timeout for these cases
+ */
+ if (!time_is_before_jiffies(last_jiffies +
+ 2 * adapter->missing_tx_completion_to))
+ continue;
+
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
}
- tx_buf->print_once = 1;
missed_tx++;
+
+ if (tx_buf->print_once)
+ continue;
+
+ netif_notice(adapter, tx_err, adapter->netdev,
+ "TX hasn't completed, qid %d, index %d. %u usecs from last napi execution, napi scheduled: %d\n",
+ tx_ring->qid, i, time_since_last_napi, napi_scheduled);
+
+ tx_buf->print_once = 1;
}
}
if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
netif_err(adapter, tx_err, adapter->netdev,
- "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
+ "Lost TX completions are above the threshold (%d > %d). Completion transmission timeout: %u.\n",
missed_tx,
- adapter->missing_tx_completion_threshold);
- ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
+ adapter->missing_tx_completion_threshold,
+ missing_tx_comp_to);
+ netif_err(adapter, tx_err, adapter->netdev,
+ "Resetting the device\n");
+
+ ena_reset_device(adapter, reset_reason);
rc = -EIO;
}
@@ -3762,8 +3805,8 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
}
}
- rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
- ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
+ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE,
+ 0xFFFFFFFF);
if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash function\n");
goto err_fill_indir;
@@ -4040,8 +4083,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
free_irq_cpu_rmap(netdev->rx_cpu_rmap);
netdev->rx_cpu_rmap = NULL;
}
-#endif /* CONFIG_RFS_ACCEL */
+#endif /* CONFIG_RFS_ACCEL */
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 1e007a41a525..2c3d6a77ea79 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -21,6 +21,7 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_USER_TRIGGER = 12,
ENA_REGS_RESET_GENERIC = 13,
ENA_REGS_RESET_MISS_INTERRUPT = 14,
+ ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15,
};
/* ena_registers offsets */
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
index fc1c4ef73ba3..337c435d3ce9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_xdp.c
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
@@ -412,7 +412,6 @@ static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
tx_ring->next_to_clean = next_to_clean;
ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
- ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
"tx_poll: q %d done. total pkts: %d\n",
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index ea773cfa0af6..c83a0a80d533 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -82,7 +82,6 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
unsigned long irqflags;
int nq_work = 0;
int aq_work = 0;
- int credits;
/* Don't process AdminQ when it's not up */
if (!pdsc_adminq_inc_if_up(pdsc)) {
@@ -128,11 +127,9 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
credits:
/* Return the interrupt credits, one for each completion */
- credits = nq_work + aq_work;
- if (credits)
- pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
- credits,
- PDS_CORE_INTR_CRED_REARM);
+ pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
+ nq_work + aq_work,
+ PDS_CORE_INTR_CRED_REARM);
refcount_dec(&pdsc->adminq_refcnt);
}
@@ -157,7 +154,6 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
qcq = &pdsc->adminqcq;
queue_work(pdsc->wq, &qcq->work);
- pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
refcount_dec(&pdsc->adminq_refcnt);
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 11c23a7f3172..2babea110991 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -160,23 +160,19 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
if (err < 0) {
dev_warn(cf->dev, "auxiliary_device_init of %s failed: %pe\n",
name, ERR_PTR(err));
- goto err_out;
+ kfree(padev);
+ return ERR_PTR(err);
}
err = auxiliary_device_add(aux_dev);
if (err) {
dev_warn(cf->dev, "auxiliary_device_add of %s failed: %pe\n",
name, ERR_PTR(err));
- goto err_out_uninit;
+ auxiliary_device_uninit(aux_dev);
+ return ERR_PTR(err);
}
return padev;
-
-err_out_uninit:
- auxiliary_device_uninit(aux_dev);
-err_out:
- kfree(padev);
- return ERR_PTR(err);
}
int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
@@ -184,6 +180,9 @@ int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
struct pds_auxiliary_dev *padev;
int err = 0;
+ if (!cf)
+ return -ENODEV;
+
mutex_lock(&pf->config_lock);
padev = pf->vfs[cf->vf_id].padev;
@@ -202,14 +201,27 @@ int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
{
struct pds_auxiliary_dev *padev;
- enum pds_core_vif_types vt;
char devname[PDS_DEVNAME_LEN];
+ enum pds_core_vif_types vt;
+ unsigned long mask;
u16 vt_support;
int client_id;
int err = 0;
+ if (!cf)
+ return -ENODEV;
+
mutex_lock(&pf->config_lock);
+ mask = BIT_ULL(PDSC_S_FW_DEAD) |
+ BIT_ULL(PDSC_S_STOPPING_DRIVER);
+ if (cf->state & mask) {
+ dev_err(pf->dev, "%s: can't add dev, VF client in bad state %#lx\n",
+ __func__, cf->state);
+ err = -ENXIO;
+ goto out_unlock;
+ }
+
/* We only support vDPA so far, so it is the only one to
* be verified that it is available in the Core device and
* enabled in the devlink param. In the future this might
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 7658a7286767..9662ee72814c 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -129,6 +129,7 @@ static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
if (index < 0)
return index;
qcq->intx = index;
+ qcq->cq.bound_intr = &pdsc->intr_info[index];
return 0;
}
@@ -222,7 +223,6 @@ int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
goto err_out_free_irq;
}
- qcq->cq.bound_intr = &pdsc->intr_info[qcq->intx];
qcq->cq.num_descs = num_descs;
qcq->cq.desc_size = cq_desc_size;
qcq->cq.tail_idx = 0;
@@ -300,6 +300,17 @@ err_out:
return err;
}
+static void pdsc_core_uninit(struct pdsc *pdsc)
+{
+ pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
+ pdsc_qcq_free(pdsc, &pdsc->adminqcq);
+
+ if (pdsc->kern_dbpage) {
+ iounmap(pdsc->kern_dbpage);
+ pdsc->kern_dbpage = NULL;
+ }
+}
+
static int pdsc_core_init(struct pdsc *pdsc)
{
union pds_core_dev_comp comp = {};
@@ -310,9 +321,32 @@ static int pdsc_core_init(struct pdsc *pdsc)
struct pds_core_dev_init_data_in cidi;
u32 dbid_count;
u32 dbpage_num;
+ int numdescs;
size_t sz;
int err;
+ /* Scale the descriptor ring length based on number of CPUs and VFs */
+ numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
+ numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
+ numdescs = roundup_pow_of_two(numdescs);
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
+ PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
+ numdescs,
+ sizeof(union pds_core_adminq_cmd),
+ sizeof(union pds_core_adminq_comp),
+ 0, &pdsc->adminqcq);
+ if (err)
+ return err;
+
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
+ PDS_CORE_QCQ_F_NOTIFYQ,
+ PDSC_NOTIFYQ_LENGTH,
+ sizeof(struct pds_core_notifyq_cmd),
+ sizeof(union pds_core_notifyq_comp),
+ 0, &pdsc->notifyqcq);
+ if (err)
+ goto err_out_uninit;
+
cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa);
cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa);
cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa);
@@ -336,7 +370,7 @@ static int pdsc_core_init(struct pdsc *pdsc)
if (err) {
dev_err(pdsc->dev, "Device init command failed: %pe\n",
ERR_PTR(err));
- return err;
+ goto err_out_uninit;
}
pdsc->hw_index = le32_to_cpu(cido.core_hw_index);
@@ -346,7 +380,8 @@ static int pdsc_core_init(struct pdsc *pdsc)
pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num);
if (!pdsc->kern_dbpage) {
dev_err(pdsc->dev, "Cannot map dbpage, aborting\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_out_uninit;
}
pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
@@ -359,6 +394,10 @@ static int pdsc_core_init(struct pdsc *pdsc)
pdsc->last_eid = 0;
+ return 0;
+
+err_out_uninit:
+ pdsc_core_uninit(pdsc);
return err;
}
@@ -401,38 +440,12 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
int pdsc_setup(struct pdsc *pdsc, bool init)
{
- int numdescs;
int err;
err = pdsc_dev_init(pdsc);
if (err)
return err;
- /* Scale the descriptor ring length based on number of CPUs and VFs */
- numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
- numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
- numdescs = roundup_pow_of_two(numdescs);
- err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
- PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
- numdescs,
- sizeof(union pds_core_adminq_cmd),
- sizeof(union pds_core_adminq_comp),
- 0, &pdsc->adminqcq);
- if (err)
- goto err_out_teardown;
-
- err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
- PDS_CORE_QCQ_F_NOTIFYQ,
- PDSC_NOTIFYQ_LENGTH,
- sizeof(struct pds_core_notifyq_cmd),
- sizeof(union pds_core_notifyq_comp),
- 0, &pdsc->notifyqcq);
- if (err)
- goto err_out_teardown;
-
- /* NotifyQ rides on the AdminQ interrupt */
- pdsc->notifyqcq.intx = pdsc->adminqcq.intx;
-
/* Set up the Core with the AdminQ and NotifyQ info */
err = pdsc_core_init(pdsc);
if (err)
@@ -458,35 +471,20 @@ err_out_teardown:
void pdsc_teardown(struct pdsc *pdsc, bool removing)
{
- int i;
-
if (!pdsc->pdev->is_virtfn)
pdsc_devcmd_reset(pdsc);
if (pdsc->adminqcq.work.func)
cancel_work_sync(&pdsc->adminqcq.work);
- pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
- pdsc_qcq_free(pdsc, &pdsc->adminqcq);
+
+ pdsc_core_uninit(pdsc);
if (removing) {
kfree(pdsc->viftype_status);
pdsc->viftype_status = NULL;
}
- if (pdsc->intr_info) {
- for (i = 0; i < pdsc->nintrs; i++)
- pdsc_intr_free(pdsc, i);
-
- kfree(pdsc->intr_info);
- pdsc->intr_info = NULL;
- pdsc->nintrs = 0;
- }
-
- if (pdsc->kern_dbpage) {
- iounmap(pdsc->kern_dbpage);
- pdsc->kern_dbpage = NULL;
- }
+ pdsc_dev_uninit(pdsc);
- pci_free_irq_vectors(pdsc->pdev);
set_bit(PDSC_S_FW_DEAD, &pdsc->state);
}
@@ -609,8 +607,7 @@ static void pdsc_check_pci_health(struct pdsc *pdsc)
if (fw_status != PDS_RC_BAD_PCI)
return;
- pdsc_reset_prepare(pdsc->pdev);
- pdsc_reset_done(pdsc->pdev);
+ pci_reset_function(pdsc->pdev);
}
void pdsc_health_thread(struct work_struct *work)
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 110c4b826b22..92d7657dd614 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -282,9 +282,7 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
int pdsc_devcmd_init(struct pdsc *pdsc);
int pdsc_devcmd_reset(struct pdsc *pdsc);
int pdsc_dev_init(struct pdsc *pdsc);
-
-void pdsc_reset_prepare(struct pci_dev *pdev);
-void pdsc_reset_done(struct pci_dev *pdev);
+void pdsc_dev_uninit(struct pdsc *pdsc);
int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
irq_handler_t handler, void *data);
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
index 4e8579ca1c8c..6bdd02b7aa6d 100644
--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -32,8 +32,8 @@ void pdsc_debugfs_del_dev(struct pdsc *pdsc)
static int identity_show(struct seq_file *seq, void *v)
{
- struct pdsc *pdsc = seq->private;
struct pds_core_dev_identity *ident;
+ struct pdsc *pdsc = seq->private;
int vt;
ident = &pdsc->dev_ident;
@@ -106,10 +106,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = {
void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
{
- struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
- struct dentry *intr_dentry;
+ struct dentry *qcq_dentry, *q_dentry, *cq_dentry, *intr_dentry;
struct debugfs_regset32 *intr_ctrl_regset;
- struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
struct pdsc_queue *q = &qcq->q;
struct pdsc_cq *cq = &qcq->cq;
@@ -147,6 +145,8 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
debugfs_create_u16("tail", 0400, cq_dentry, &cq->tail_idx);
if (qcq->flags & PDS_CORE_QCQ_F_INTR) {
+ struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
+
intr_dentry = debugfs_create_dir("intr", qcq->dentry);
if (IS_ERR_OR_NULL(intr_dentry))
return;
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index e65a1632df50..e494e1298dc9 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -316,6 +316,22 @@ static int pdsc_identify(struct pdsc *pdsc)
return 0;
}
+void pdsc_dev_uninit(struct pdsc *pdsc)
+{
+ if (pdsc->intr_info) {
+ int i;
+
+ for (i = 0; i < pdsc->nintrs; i++)
+ pdsc_intr_free(pdsc, i);
+
+ kfree(pdsc->intr_info);
+ pdsc->intr_info = NULL;
+ pdsc->nintrs = 0;
+ }
+
+ pci_free_irq_vectors(pdsc->pdev);
+}
+
int pdsc_dev_init(struct pdsc *pdsc)
{
unsigned int nintrs;
@@ -341,10 +357,8 @@ int pdsc_dev_init(struct pdsc *pdsc)
/* Get intr_info struct array for tracking */
pdsc->intr_info = kcalloc(nintrs, sizeof(*pdsc->intr_info), GFP_KERNEL);
- if (!pdsc->intr_info) {
- err = -ENOMEM;
- goto err_out;
- }
+ if (!pdsc->intr_info)
+ return -ENOMEM;
err = pci_alloc_irq_vectors(pdsc->pdev, nintrs, nintrs, PCI_IRQ_MSIX);
if (err != nintrs) {
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 0050c5894563..ab6133e7db42 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -45,6 +45,7 @@ static void pdsc_unmap_bars(struct pdsc *pdsc)
for (i = 0; i < PDS_CORE_BARS_MAX; i++) {
if (bars[i].vaddr)
pci_iounmap(pdsc->pdev, bars[i].vaddr);
+ bars[i].vaddr = NULL;
}
}
@@ -468,19 +469,28 @@ static void pdsc_restart_health_thread(struct pdsc *pdsc)
mod_timer(&pdsc->wdtimer, jiffies + 1);
}
-void pdsc_reset_prepare(struct pci_dev *pdev)
+static void pdsc_reset_prepare(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
pdsc_stop_health_thread(pdsc);
pdsc_fw_down(pdsc);
+ if (pdev->is_virtfn) {
+ struct pdsc *pf;
+
+ pf = pdsc_get_pf_struct(pdsc->pdev);
+ if (!IS_ERR(pf))
+ pdsc_auxbus_dev_del(pdsc, pf);
+ }
+
pdsc_unmap_bars(pdsc);
pci_release_regions(pdev);
- pci_disable_device(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
}
-void pdsc_reset_done(struct pci_dev *pdev)
+static void pdsc_reset_done(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
struct device *dev = pdsc->dev;
@@ -510,12 +520,43 @@ void pdsc_reset_done(struct pci_dev *pdev)
pdsc_fw_up(pdsc);
pdsc_restart_health_thread(pdsc);
+
+ if (pdev->is_virtfn) {
+ struct pdsc *pf;
+
+ pf = pdsc_get_pf_struct(pdsc->pdev);
+ if (!IS_ERR(pf))
+ pdsc_auxbus_dev_add(pdsc, pf);
+ }
+}
+
+static pci_ers_result_t pdsc_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t error)
+{
+ if (error == pci_channel_io_frozen) {
+ pdsc_reset_prepare(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_NONE;
+}
+
+static void pdsc_pci_error_resume(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+
+ if (test_bit(PDSC_S_FW_DEAD, &pdsc->state))
+ pci_reset_function_locked(pdev);
}
static const struct pci_error_handlers pdsc_err_handler = {
/* FLR handling */
.reset_prepare = pdsc_reset_prepare,
.reset_done = pdsc_reset_done,
+
+ /* AER handling */
+ .error_detected = pdsc_pci_error_detected,
+ .resume = pdsc_pci_error_resume,
};
static struct pci_driver pdsc_driver = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 18a6c8d99fa0..a2606ee3b0a5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -15,6 +15,7 @@
#include "aq_macsec.h"
#include "aq_main.h"
+#include <linux/linkmode.h>
#include <linux/ptp_clock_kernel.h>
static void aq_ethtool_get_regs(struct net_device *ndev,
@@ -681,23 +682,19 @@ static int aq_ethtool_get_ts_info(struct net_device *ndev,
return 0;
}
-static u32 eee_mask_to_ethtool_mask(u32 speed)
+static void eee_mask_to_ethtool_mask(unsigned long *mode, u32 speed)
{
- u32 rate = 0;
-
if (speed & AQ_NIC_RATE_EEE_10G)
- rate |= SUPPORTED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
if (speed & AQ_NIC_RATE_EEE_1G)
- rate |= SUPPORTED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (speed & AQ_NIC_RATE_EEE_100M)
- rate |= SUPPORTED_100baseT_Full;
-
- return rate;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
}
-static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
+static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_keee *eee)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 rate, supported_rates;
@@ -713,14 +710,14 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
if (err < 0)
return err;
- eee->supported = eee_mask_to_ethtool_mask(supported_rates);
+ eee_mask_to_ethtool_mask(eee->supported, supported_rates);
if (aq_nic->aq_nic_cfg.eee_speeds)
- eee->advertised = eee->supported;
+ linkmode_copy(eee->advertised, eee->supported);
- eee->lp_advertised = eee_mask_to_ethtool_mask(rate);
+ eee_mask_to_ethtool_mask(eee->lp_advertised, rate);
- eee->eee_enabled = !!eee->advertised;
+ eee->eee_enabled = !linkmode_empty(eee->advertised);
eee->tx_lpi_enabled = eee->eee_enabled;
if ((supported_rates & rate) & AQ_NIC_RATE_EEE_MSK)
@@ -729,7 +726,7 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
return 0;
}
-static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
+static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_keee *eee)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 rate, supported_rates;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 29b04a274d07..a806dadc4196 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -31,6 +31,20 @@ static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask)
priv->irq_mask |= mask;
}
+void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ /* Only supported with internal phys */
+ if (!intf->internal_phy)
+ return;
+
+ if (en)
+ _intr2_mask_clear(priv, ASP_INTR2_PHY_EVENT(intf->channel));
+ else
+ _intr2_mask_set(priv, ASP_INTR2_PHY_EVENT(intf->channel));
+}
+
void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en)
{
struct bcmasp_priv *priv = intf->parent;
@@ -79,6 +93,9 @@ static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status)
__napi_schedule_irqoff(&intf->tx_napi);
}
}
+
+ if (status & ASP_INTR2_PHY_EVENT(intf->channel))
+ phy_mac_interrupt(intf->ndev->phydev);
}
static irqreturn_t bcmasp_isr(int irq, void *data)
@@ -535,9 +552,6 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
int j = 0, i;
for (i = 0; i < NUM_NET_FILTERS; i++) {
- if (j == *rule_cnt)
- return -EMSGSIZE;
-
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -547,6 +561,9 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
priv->net_filters[i - 1].wake_filter)
continue;
+ if (j == *rule_cnt)
+ return -EMSGSIZE;
+
rule_locs[j++] = priv->net_filters[i].fs.location;
}
@@ -972,7 +989,26 @@ static void bcmasp_core_init(struct bcmasp_priv *priv)
ASP_INTR2_CLEAR);
}
-static void bcmasp_core_clock_select(struct bcmasp_priv *priv, bool slow)
+static void bcmasp_core_clock_select_many(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT);
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CPU_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CPU_CLOCK_SELECT);
+}
+
+static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow)
{
u32 reg;
@@ -1166,6 +1202,24 @@ static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
}
}
+static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
+{
+ u32 reg, phy_lpi_overwrite;
+
+ reg = rx_edpkt_core_rl(intf->parent, ASP_EDPKT_SPARE_REG);
+ phy_lpi_overwrite = intf->internal_phy ? ASP_EDPKT_SPARE_REG_EPHY_LPI :
+ ASP_EDPKT_SPARE_REG_GPHY_LPI;
+
+ if (en)
+ reg |= phy_lpi_overwrite;
+ else
+ reg &= ~phy_lpi_overwrite;
+
+ rx_edpkt_core_wl(intf->parent, reg, ASP_EDPKT_SPARE_REG);
+
+ usleep_range(50, 100);
+}
+
static struct bcmasp_hw_info v20_hw_info = {
.rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
.umac2fb = UMAC2FB_OFFSET,
@@ -1178,6 +1232,7 @@ static const struct bcmasp_plat_data v20_plat_data = {
.init_wol = bcmasp_init_wol_per_intf,
.enable_wol = bcmasp_enable_wol_per_intf,
.destroy_wol = bcmasp_wol_irq_destroy_per_intf,
+ .core_clock_select = bcmasp_core_clock_select_one,
.hw_info = &v20_hw_info,
};
@@ -1194,17 +1249,39 @@ static const struct bcmasp_plat_data v21_plat_data = {
.init_wol = bcmasp_init_wol_shared,
.enable_wol = bcmasp_enable_wol_shared,
.destroy_wol = bcmasp_wol_irq_destroy_shared,
+ .core_clock_select = bcmasp_core_clock_select_one,
+ .hw_info = &v21_hw_info,
+};
+
+static const struct bcmasp_plat_data v22_plat_data = {
+ .init_wol = bcmasp_init_wol_shared,
+ .enable_wol = bcmasp_enable_wol_shared,
+ .destroy_wol = bcmasp_wol_irq_destroy_shared,
+ .core_clock_select = bcmasp_core_clock_select_many,
.hw_info = &v21_hw_info,
+ .eee_fixup = bcmasp_eee_fixup,
};
+static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata)
+{
+ priv->init_wol = pdata->init_wol;
+ priv->enable_wol = pdata->enable_wol;
+ priv->destroy_wol = pdata->destroy_wol;
+ priv->core_clock_select = pdata->core_clock_select;
+ priv->eee_fixup = pdata->eee_fixup;
+ priv->hw_info = pdata->hw_info;
+}
+
static const struct of_device_id bcmasp_of_match[] = {
{ .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
{ .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
+ { .compatible = "brcm,asp-v2.2", .data = &v22_plat_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_of_match);
static const struct of_device_id bcmasp_mdio_of_match[] = {
+ { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
{ .compatible = "brcm,asp-v2.0-mdio", },
{ /* sentinel */ },
@@ -1265,16 +1342,13 @@ static int bcmasp_probe(struct platform_device *pdev)
if (!pdata)
return dev_err_probe(dev, -EINVAL, "unable to find platform data\n");
- priv->init_wol = pdata->init_wol;
- priv->enable_wol = pdata->enable_wol;
- priv->destroy_wol = pdata->destroy_wol;
- priv->hw_info = pdata->hw_info;
+ bcmasp_set_pdata(priv, pdata);
/* Enable all clocks to ensure successful probing */
bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
/* Switch to the main clock */
- bcmasp_core_clock_select(priv, false);
+ priv->core_clock_select(priv, false);
bcmasp_intr2_mask_set_all(priv);
bcmasp_intr2_clear_all(priv);
@@ -1381,7 +1455,7 @@ static int __maybe_unused bcmasp_suspend(struct device *d)
*/
bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE);
- bcmasp_core_clock_select(priv, true);
+ priv->core_clock_select(priv, true);
clk_disable_unprepare(priv->clk);
@@ -1399,7 +1473,7 @@ static int __maybe_unused bcmasp_resume(struct device *d)
return ret;
/* Switch to the main clock domain */
- bcmasp_core_clock_select(priv, false);
+ priv->core_clock_select(priv, false);
/* Re-enable all clocks for re-initialization */
bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index ec90add6b03e..f93cb3da44b0 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -19,6 +19,8 @@
#define ASP_INTR2_TX_DESC(intr) BIT((intr) + 14)
#define ASP_INTR2_UMC0_WAKE BIT(22)
#define ASP_INTR2_UMC1_WAKE BIT(28)
+#define ASP_INTR2_PHY_EVENT(intr) ((intr) ? BIT(30) | BIT(31) : \
+ BIT(24) | BIT(25))
#define ASP_WAKEUP_INTR2_OFFSET 0x1200
#define ASP_WAKEUP_INTR2_STATUS 0x0
@@ -33,6 +35,12 @@
#define ASP_WAKEUP_INTR2_FILT_1 BIT(3)
#define ASP_WAKEUP_INTR2_FW BIT(4)
+#define ASP_CTRL2_OFFSET 0x2000
+#define ASP_CTRL2_CORE_CLOCK_SELECT 0x0
+#define ASP_CTRL2_CORE_CLOCK_SELECT_MAIN BIT(0)
+#define ASP_CTRL2_CPU_CLOCK_SELECT 0x4
+#define ASP_CTRL2_CPU_CLOCK_SELECT_MAIN BIT(0)
+
#define ASP_TX_ANALYTICS_OFFSET 0x4c000
#define ASP_TX_ANALYTICS_CTRL 0x0
@@ -134,8 +142,11 @@ enum asp_rx_net_filter_block {
#define ASP_EDPKT_RX_PKT_CNT 0x138
#define ASP_EDPKT_HDR_EXTR_CNT 0x13c
#define ASP_EDPKT_HDR_OUT_CNT 0x140
+#define ASP_EDPKT_SPARE_REG 0x174
+#define ASP_EDPKT_SPARE_REG_EPHY_LPI BIT(4)
+#define ASP_EDPKT_SPARE_REG_GPHY_LPI BIT(3)
-#define ASP_CTRL 0x101000
+#define ASP_CTRL_OFFSET 0x101000
#define ASP_CTRL_ASP_SW_INIT 0x04
#define ASP_CTRL_ASP_SW_INIT_ACPUSS_CORE BIT(0)
#define ASP_CTRL_ASP_SW_INIT_ASP_TX BIT(1)
@@ -306,6 +317,7 @@ struct bcmasp_intf {
struct bcmasp_desc *rx_edpkt_cpu;
dma_addr_t rx_edpkt_dma_addr;
dma_addr_t rx_edpkt_dma_read;
+ dma_addr_t rx_edpkt_dma_valid;
/* RX buffer prefetcher ring*/
void *rx_ring_cpu;
@@ -337,7 +349,7 @@ struct bcmasp_intf {
int wol_irq;
unsigned int wol_irq_enabled:1;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
#define NUM_NET_FILTERS 32
@@ -372,6 +384,8 @@ struct bcmasp_plat_data {
void (*init_wol)(struct bcmasp_priv *priv);
void (*enable_wol)(struct bcmasp_intf *intf, bool en);
void (*destroy_wol)(struct bcmasp_priv *priv);
+ void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
+ void (*eee_fixup)(struct bcmasp_intf *priv, bool en);
struct bcmasp_hw_info *hw_info;
};
@@ -390,6 +404,8 @@ struct bcmasp_priv {
void (*init_wol)(struct bcmasp_priv *priv);
void (*enable_wol)(struct bcmasp_intf *intf, bool en);
void (*destroy_wol)(struct bcmasp_priv *priv);
+ void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
+ void (*eee_fixup)(struct bcmasp_intf *intf, bool en);
void __iomem *base;
struct bcmasp_hw_info *hw_info;
@@ -530,7 +546,8 @@ BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET);
BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET);
BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET);
-BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL);
+BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL_OFFSET);
+BCMASP_CORE_IO_MACRO(ctrl2, ASP_CTRL2_OFFSET);
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i);
@@ -541,6 +558,8 @@ void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en);
void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en);
+void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en);
+
void bcmasp_flush_rx_port(struct bcmasp_intf *intf);
extern const struct ethtool_ops bcmasp_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index ce6a3d56fb23..484fc2b5626f 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -360,29 +360,26 @@ void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable)
umac_wl(intf, reg, UMC_EEE_CTRL);
intf->eee.eee_enabled = enable;
- intf->eee.eee_active = enable;
}
-static int bcmasp_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_eee *p = &intf->eee;
+ struct ethtool_keee *p = &intf->eee;
if (!dev->phydev)
return -ENODEV;
- e->eee_enabled = p->eee_enabled;
- e->eee_active = p->eee_active;
e->tx_lpi_enabled = p->tx_lpi_enabled;
e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
return phy_ethtool_get_eee(dev->phydev, e);
}
-static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_eee *p = &intf->eee;
+ struct ethtool_keee *p = &intf->eee;
int ret;
if (!dev->phydev)
@@ -399,7 +396,6 @@ static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e)
}
umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER);
- intf->eee.eee_active = ret >= 0;
intf->eee.tx_lpi_enabled = e->tx_lpi_enabled;
bcmasp_eee_enable_set(intf, true);
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index f59557b0cd51..dd06b68b33ed 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -382,6 +382,7 @@ static void bcmasp_netif_start(struct net_device *dev)
bcmasp_enable_rx_irq(intf, 1);
bcmasp_enable_tx_irq(intf, 1);
+ bcmasp_enable_phy_irq(intf, 1);
phy_start(dev->phydev);
}
@@ -607,6 +608,7 @@ static void bcmasp_adj_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
u32 cmd_bits = 0, reg;
int changed = 0;
+ bool active;
if (intf->old_link != phydev->link) {
changed = 1;
@@ -658,8 +660,8 @@ static void bcmasp_adj_link(struct net_device *dev)
reg |= cmd_bits;
umac_wl(intf, reg, UMC_CMD);
- intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
- bcmasp_eee_enable_set(intf, intf->eee.eee_active);
+ active = phy_init_eee(phydev, 0) >= 0;
+ bcmasp_eee_enable_set(intf, active);
}
reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
@@ -673,40 +675,78 @@ static void bcmasp_adj_link(struct net_device *dev)
phy_print_status(phydev);
}
-static int bcmasp_init_rx(struct bcmasp_intf *intf)
+static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
{
struct device *kdev = &intf->parent->pdev->dev;
struct page *buffer_pg;
- dma_addr_t dma;
- void *p;
- u32 reg;
- int ret;
+ /* Alloc RX */
intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
if (!buffer_pg)
return -ENOMEM;
- dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(kdev, dma)) {
- __free_pages(buffer_pg, intf->rx_buf_order);
- return -ENOMEM;
- }
intf->rx_ring_cpu = page_to_virt(buffer_pg);
- intf->rx_ring_dma = dma;
- intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
+ intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(kdev, intf->rx_ring_dma))
+ goto free_rx_buffer;
+
+ intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
+ &intf->rx_edpkt_dma_addr, GFP_KERNEL);
+ if (!intf->rx_edpkt_cpu)
+ goto free_rx_buffer_dma;
+
+ /* Alloc TX */
+ intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
+ &intf->tx_spb_dma_addr, GFP_KERNEL);
+ if (!intf->tx_spb_cpu)
+ goto free_rx_edpkt_dma;
- p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr,
+ intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
GFP_KERNEL);
- if (!p) {
- ret = -ENOMEM;
- goto free_rx_ring;
- }
- intf->rx_edpkt_cpu = p;
+ if (!intf->tx_cbs)
+ goto free_tx_spb_dma;
- netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
+ return 0;
+
+free_tx_spb_dma:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+free_rx_edpkt_dma:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+free_rx_buffer_dma:
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+free_rx_buffer:
+ __free_pages(buffer_pg, intf->rx_buf_order);
+
+ return -ENOMEM;
+}
+
+static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+
+ /* RX buffers */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+ /* TX buffers */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+ kfree(intf->tx_cbs);
+}
+
+static void bcmasp_init_rx(struct bcmasp_intf *intf)
+{
+ /* Restart from index 0 */
+ intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
+ intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
intf->rx_edpkt_index = 0;
@@ -732,64 +772,23 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf)
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
- rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
- RX_EDPKT_DMA_END);
- rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
- RX_EDPKT_DMA_VALID);
-
- reg = UMAC2FB_CFG_DEFAULT_EN |
- ((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT);
- reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT);
- umac2fb_wl(intf, reg, UMAC2FB_CFG);
-
- return 0;
-
-free_rx_ring:
- dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
- return ret;
+ umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
+ UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
+ UMAC2FB_CFG);
}
-static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf)
-{
- struct device *kdev = &intf->parent->pdev->dev;
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
- intf->rx_edpkt_dma_addr);
- dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
-}
-
-static int bcmasp_init_tx(struct bcmasp_intf *intf)
+static void bcmasp_init_tx(struct bcmasp_intf *intf)
{
- struct device *kdev = &intf->parent->pdev->dev;
- void *p;
- int ret;
-
- p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr,
- GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- intf->tx_spb_cpu = p;
+ /* Restart from index 0 */
intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
-
- intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
- GFP_KERNEL);
- if (!intf->tx_cbs) {
- ret = -ENOMEM;
- goto free_tx_spb;
- }
-
intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0;
- netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
-
/* Make sure channels are disabled */
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
@@ -805,26 +804,6 @@ static int bcmasp_init_tx(struct bcmasp_intf *intf)
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
-
- return 0;
-
-free_tx_spb:
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
- intf->tx_spb_dma_addr);
-
- return ret;
-}
-
-static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf)
-{
- struct device *kdev = &intf->parent->pdev->dev;
-
- /* Free descriptors */
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
- intf->tx_spb_dma_addr);
-
- /* Free cbs */
- kfree(intf->tx_cbs);
}
static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
@@ -912,12 +891,10 @@ static void bcmasp_netif_deinit(struct net_device *dev)
/* Disable interrupts */
bcmasp_enable_tx_irq(intf, 0);
bcmasp_enable_rx_irq(intf, 0);
+ bcmasp_enable_phy_irq(intf, 0);
netif_napi_del(&intf->tx_napi);
- bcmasp_reclaim_free_all_tx(intf);
-
netif_napi_del(&intf->rx_napi);
- bcmasp_reclaim_free_all_rx(intf);
}
static int bcmasp_stop(struct net_device *dev)
@@ -931,6 +908,8 @@ static int bcmasp_stop(struct net_device *dev)
bcmasp_netif_deinit(dev);
+ bcmasp_reclaim_free_buffers(intf);
+
phy_disconnect(dev->phydev);
/* Disable internal EPHY or external PHY */
@@ -1050,6 +1029,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
netdev_err(dev, "could not attach to PHY\n");
goto err_phy_disable;
}
+
+ if (intf->internal_phy)
+ dev->phydev->irq = PHY_MAC_INTERRUPT;
+
+ /* Indicate that the MAC is responsible for PHY PM */
+ phydev->mac_managed_pm = true;
} else if (!intf->wolopts) {
ret = phy_resume(dev->phydev);
if (ret)
@@ -1069,17 +1054,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
intf->old_link = -1;
intf->old_pause = -1;
- ret = bcmasp_init_tx(intf);
- if (ret)
- goto err_phy_disconnect;
-
- /* Turn on asp */
+ bcmasp_init_tx(intf);
+ netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
bcmasp_enable_tx(intf, 1);
- ret = bcmasp_init_rx(intf);
- if (ret)
- goto err_reclaim_tx;
-
+ bcmasp_init_rx(intf);
+ netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
bcmasp_enable_rx(intf, 1);
/* Turn on UniMAC TX/RX */
@@ -1093,12 +1073,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
return 0;
-err_reclaim_tx:
- netif_napi_del(&intf->tx_napi);
- bcmasp_reclaim_free_all_tx(intf);
-err_phy_disconnect:
- if (phydev)
- phy_disconnect(phydev);
err_phy_disable:
if (intf->internal_phy)
bcmasp_ephy_enable_set(intf, false);
@@ -1114,13 +1088,24 @@ static int bcmasp_open(struct net_device *dev)
netif_dbg(intf, ifup, dev, "bcmasp open\n");
- ret = clk_prepare_enable(intf->parent->clk);
+ ret = bcmasp_alloc_buffers(intf);
if (ret)
return ret;
- ret = bcmasp_netif_init(dev, true);
+ ret = clk_prepare_enable(intf->parent->clk);
if (ret)
+ goto err_free_mem;
+
+ ret = bcmasp_netif_init(dev, true);
+ if (ret) {
clk_disable_unprepare(intf->parent->clk);
+ goto err_free_mem;
+ }
+
+ return ret;
+
+err_free_mem:
+ bcmasp_reclaim_free_buffers(intf);
return ret;
}
@@ -1329,6 +1314,9 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
ASP_WAKEUP_INTR2_MASK_CLEAR);
}
+ if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ intf->parent->eee_fixup(intf, true);
+
netif_dbg(intf, wol, ndev, "entered WOL mode\n");
}
@@ -1377,6 +1365,9 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
{
u32 reg;
+ if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ intf->parent->eee_fixup(intf, false);
+
reg = umac_rl(intf, UMC_MPD_CTRL);
reg &= ~UMC_MPD_CTRL_MPD_EN;
umac_wl(intf, reg, UMC_MPD_CTRL);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e9c1e1bb5580..c9b6acd8c892 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -147,10 +147,11 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
- phy_fw_ver, PHY_FW_VER_LEN);
- strscpy(buf, bp->fw_ver, buf_len);
- snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
- "bc %d.%d.%d%s%s",
+ phy_fw_ver, sizeof(phy_fw_ver));
+ /* This may become truncated. */
+ scnprintf(buf, buf_len,
+ "%sbc %d.%d.%d%s%s",
+ bp->fw_ver,
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
@@ -3537,7 +3538,7 @@ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ ((skb_inner_transport_offset(skb) >> 1) <<
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
@@ -3569,7 +3570,7 @@ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ((skb_transport_offset(skb) >> 1) <<
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
@@ -3612,7 +3613,7 @@ static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd,
u32 xmit_type)
{
- u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
+ u8 hlen = skb_network_offset(skb) >> 1;
/* for now NS flag is not used in Linux */
pbd->global_data =
@@ -3620,8 +3621,7 @@ static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
- pbd->ip_hlen_w = (skb_transport_header(skb) -
- skb_network_header(skb)) >> 1;
+ pbd->ip_hlen_w = skb_network_header_len(skb) >> 1;
hlen += pbd->ip_hlen_w;
@@ -3666,8 +3666,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
u8 outerip_off, outerip_len = 0;
/* from outer IP to transport */
- hlen_w = (skb_inner_transport_header(skb) -
- skb_network_header(skb)) >> 1;
+ hlen_w = skb_inner_transport_offset(skb) >> 1;
/* transport len */
hlen_w += inner_tcp_hdrlen(skb) >> 1;
@@ -3713,7 +3712,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
0, IPPROTO_TCP, 0));
}
- outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+ outerip_off = (skb_network_offset(skb)) >> 1;
*global_data |=
outerip_off |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 81d232e6d05f..58956ed8f531 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1132,7 +1132,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
}
memset(version, 0, sizeof(version));
- bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
+ bnx2x_fill_fw_str(bp, version, sizeof(version));
strlcat(info->fw_version, version, sizeof(info->fw_version));
strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
@@ -2081,34 +2081,31 @@ static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Storage only interface"
};
-static u32 bnx2x_eee_to_adv(u32 eee_adv)
+static void bnx2x_eee_to_linkmode(unsigned long *mode, u32 eee_adv)
{
- u32 modes = 0;
-
if (eee_adv & SHMEM_EEE_100M_ADV)
- modes |= ADVERTISED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
if (eee_adv & SHMEM_EEE_1G_ADV)
- modes |= ADVERTISED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (eee_adv & SHMEM_EEE_10G_ADV)
- modes |= ADVERTISED_10000baseT_Full;
-
- return modes;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
}
-static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
+static u32 bnx2x_linkmode_to_eee(const unsigned long *mode, u32 shift)
{
u32 eee_adv = 0;
- if (modes & ADVERTISED_100baseT_Full)
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_100M_ADV;
- if (modes & ADVERTISED_1000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_1G_ADV;
- if (modes & ADVERTISED_10000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_10G_ADV;
return eee_adv << shift;
}
-static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnx2x_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnx2x *bp = netdev_priv(dev);
u32 eee_cfg;
@@ -2120,16 +2117,17 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
eee_cfg = bp->link_vars.eee_status;
- edata->supported =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
- SHMEM_EEE_SUPPORTED_SHIFT);
+ bnx2x_eee_to_linkmode(edata->supported,
+ (eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
+ SHMEM_EEE_SUPPORTED_SHIFT);
+
+ bnx2x_eee_to_linkmode(edata->advertised,
+ (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
+ SHMEM_EEE_ADV_STATUS_SHIFT);
- edata->advertised =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
- SHMEM_EEE_ADV_STATUS_SHIFT);
- edata->lp_advertised =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
- SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+ bnx2x_eee_to_linkmode(edata->lp_advertised,
+ (eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
/* SHMEM value is in 16u units --> Convert to 1u units. */
edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
@@ -2141,7 +2139,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnx2x_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnx2x *bp = netdev_priv(dev);
u32 eee_cfg;
@@ -2162,8 +2160,8 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- advertised = bnx2x_adv_to_eee(edata->advertised,
- SHMEM_EEE_ADV_STATUS_SHIFT);
+ advertised = bnx2x_linkmode_to_eee(edata->advertised,
+ SHMEM_EEE_ADV_STATUS_SHIFT);
if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
DP(BNX2X_MSG_ETHTOOL,
"Direct manipulation of EEE advertisement is not supported\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 02808513ffe4..ea310057fe3a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6163,8 +6163,8 @@ static void bnx2x_link_int_ack(struct link_params *params,
static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
- str[0] = '\0';
- (*len)--;
+ if (*len)
+ str[0] = '\0';
return 0;
}
@@ -6173,7 +6173,7 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
u16 ret;
if (*len < 10) {
- /* Need more than 10chars for this format */
+ /* Need more than 10 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6188,8 +6188,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
{
u16 ret;
- if (*len < 10) {
- /* Need more than 10chars for this format */
+ if (*len < 9) {
+ /* Need more than 9 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6208,7 +6208,7 @@ int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
int status = 0;
u8 *ver_p = version;
u16 remain_len = len;
- if (version == NULL || params == NULL)
+ if (version == NULL || params == NULL || len == 0)
return -EINVAL;
bp = params->bp;
@@ -11546,7 +11546,7 @@ static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
str[2] = (spirom_ver & 0xFF0000) >> 16;
str[3] = (spirom_ver & 0xFF000000) >> 24;
str[4] = '\0';
- *len -= 5;
+ *len -= 4;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 39845d556baf..493b724848c8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -246,6 +246,49 @@ static const u16 bnxt_async_events_arr[] = {
static struct workqueue_struct *bnxt_pf_wq;
+#define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
+#define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
+
+const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
+ .ports = {
+ .src = 0,
+ .dst = 0,
+ },
+ .addrs = {
+ .v6addrs = {
+ .src = BNXT_IPV6_MASK_NONE,
+ .dst = BNXT_IPV6_MASK_NONE,
+ },
+ },
+};
+
+const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
+ .ports = {
+ .src = cpu_to_be16(0xffff),
+ .dst = cpu_to_be16(0xffff),
+ },
+ .addrs = {
+ .v6addrs = {
+ .src = BNXT_IPV6_MASK_ALL,
+ .dst = BNXT_IPV6_MASK_ALL,
+ },
+ },
+};
+
+const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
+ .ports = {
+ .src = cpu_to_be16(0xffff),
+ .dst = cpu_to_be16(0xffff),
+ },
+ .addrs = {
+ .v4addrs = {
+ .src = cpu_to_be32(0xffffffff),
+ .dst = cpu_to_be32(0xffffffff),
+ },
+ },
+};
+
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
@@ -4168,8 +4211,12 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
int num_vnics = 1;
#ifdef CONFIG_RFS_ACCEL
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS)
- num_vnics += bp->rx_nr_rings;
+ if (bp->flags & BNXT_FLAG_RFS) {
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ num_vnics++;
+ else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ num_vnics += bp->rx_nr_rings;
+ }
#endif
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
@@ -4186,6 +4233,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
static void bnxt_init_vnics(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
int i;
for (i = 0; i < bp->nr_vnics; i++) {
@@ -4199,20 +4247,33 @@ static void bnxt_init_vnics(struct bnxt *bp)
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
- if (!i) {
+ if (i == BNXT_VNIC_DEFAULT) {
u8 *key = (void *)vnic->rss_hash_key;
int k;
+ if (!bp->rss_hash_key_valid &&
+ !bp->rss_hash_key_updated) {
+ get_random_bytes(bp->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
+
+ memcpy(vnic->rss_hash_key, bp->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+
+ if (!bp->rss_hash_key_updated)
+ continue;
+
+ bp->rss_hash_key_updated = false;
+ bp->rss_hash_key_valid = true;
+
bp->toeplitz_prefix = 0;
- get_random_bytes(vnic->rss_hash_key,
- HW_HASH_KEY_SIZE);
for (k = 0; k < 8; k++) {
bp->toeplitz_prefix <<= 8;
bp->toeplitz_prefix |= key[k];
}
} else {
- memcpy(vnic->rss_hash_key,
- bp->vnic_info[0].rss_hash_key,
+ memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
HW_HASH_KEY_SIZE);
}
}
@@ -4798,6 +4859,44 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
}
}
+void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ u8 type = fltr->type, flags = fltr->flags;
+
+ INIT_LIST_HEAD(&fltr->list);
+ if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
+ (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
+ list_add_tail(&fltr->list, &bp->usr_fltr_list);
+}
+
+void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ if (!list_empty(&fltr->list))
+ list_del_init(&fltr->list);
+}
+
+void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
+{
+ struct bnxt_filter_base *usr_fltr, *tmp;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
+ if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
+ continue;
+ bnxt_del_one_usr_fltr(bp, usr_fltr);
+ }
+}
+
+static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ hlist_del(&fltr->hash);
+ bnxt_del_one_usr_fltr(bp, fltr);
+ if (fltr->flags) {
+ clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
+ }
+ kfree(fltr);
+}
+
static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
int i;
@@ -4813,12 +4912,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
head = &bp->ntp_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
bnxt_del_l2_filter(bp, fltr->l2_fltr);
- if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
+ !list_empty(&fltr->base.list)))
continue;
- hlist_del(&fltr->base.hash);
- clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
- bp->ntp_fltr_count--;
- kfree(fltr);
+ bnxt_del_fltr(bp, &fltr->base);
}
}
if (!all)
@@ -4840,7 +4937,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
@@ -4859,14 +4956,10 @@ static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
head = &bp->l2_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
- if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
+ !list_empty(&fltr->base.list)))
continue;
- hlist_del(&fltr->base.hash);
- if (fltr->base.flags) {
- clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
- bp->ntp_fltr_count--;
- }
- kfree(fltr);
+ bnxt_del_fltr(bp, &fltr->base);
}
}
}
@@ -5039,8 +5132,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
if (rc)
goto alloc_mem_err;
- bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
- BNXT_VNIC_UCAST_FLAG;
+ bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
+ BNXT_VNIC_MCAST_FLAG |
+ BNXT_VNIC_UCAST_FLAG;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
+ bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
+ BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
+
rc = bnxt_alloc_vnic_attributes(bp);
if (rc)
goto alloc_mem_err;
@@ -5342,6 +5440,7 @@ void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
return;
}
hlist_del_rcu(&fltr->base.hash);
+ bnxt_del_one_usr_fltr(bp, &fltr->base);
if (fltr->base.flags) {
clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
bp->ntp_fltr_count--;
@@ -5480,13 +5579,15 @@ static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
int bit_id;
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
- BNXT_MAX_FLTR, 0);
+ bp->max_fltr, 0);
if (bit_id < 0)
return -ENOMEM;
fltr->base.sw_id = (u16)bit_id;
+ bp->ntp_fltr_count++;
}
head = &bp->l2_fltr_hash_tbl[idx];
hlist_add_head_rcu(&fltr->base.hash, head);
+ bnxt_insert_usr_fltr(bp, &fltr->base);
set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
atomic_set(&fltr->refcnt, 1);
return 0;
@@ -5519,6 +5620,40 @@ static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
return fltr;
}
+struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u16 flags)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+ int rc;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ fltr = __bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr) {
+ fltr = ERR_PTR(-EEXIST);
+ goto l2_filter_exit;
+ }
+ fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
+ if (!fltr) {
+ fltr = ERR_PTR(-ENOMEM);
+ goto l2_filter_exit;
+ }
+ fltr->base.flags = flags;
+ rc = bnxt_init_l2_filter(bp, fltr, key, idx);
+ if (rc) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ bnxt_del_l2_filter(bp, fltr);
+ return ERR_PTR(rc);
+ }
+
+l2_filter_exit:
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return fltr;
+}
+
static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
{
#ifdef CONFIG_BNXT_SRIOV
@@ -5650,15 +5785,38 @@ void bnxt_fill_ipv6_mask(__be32 mask[4])
mask[i] = cpu_to_be32(~0);
}
+static void
+bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
+ struct hwrm_cfa_ntuple_filter_alloc_input *req,
+ u16 rxq)
+{
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ struct bnxt_vnic_info *vnic;
+ u32 enables;
+
+ vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
+ req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
+ req->enables |= cpu_to_le32(enables);
+ req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
+ } else {
+ u32 flags;
+
+ flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
+ req->flags |= cpu_to_le32(flags);
+ req->dst_id = cpu_to_le16(rxq);
+ }
+}
+
int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct hwrm_cfa_ntuple_filter_alloc_input *req;
+ struct bnxt_flow_masks *masks = &fltr->fmasks;
struct flow_keys *keys = &fltr->fkeys;
struct bnxt_l2_filter *l2_fltr;
struct bnxt_vnic_info *vnic;
- u32 flags = 0;
int rc;
rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
@@ -5668,16 +5826,16 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
l2_fltr = fltr->l2_fltr;
req->l2_filter_id = l2_fltr->base.filter_id;
-
- if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
- flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
- req->dst_id = cpu_to_le16(fltr->base.rxq);
+ if (fltr->base.flags & BNXT_ACT_DROP) {
+ req->flags =
+ cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
+ } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
+ bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq);
} else {
vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
- req->flags = cpu_to_le32(flags);
- req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+ req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
req->ethertype = htons(ETH_P_IP);
req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
@@ -5687,25 +5845,15 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req->ethertype = htons(ETH_P_IPV6);
req->ip_addr_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- *(struct in6_addr *)&req->src_ipaddr[0] =
- keys->addrs.v6addrs.src;
- bnxt_fill_ipv6_mask(req->src_ipaddr_mask);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- *(struct in6_addr *)&req->dst_ipaddr[0] =
- keys->addrs.v6addrs.dst;
- bnxt_fill_ipv6_mask(req->dst_ipaddr_mask);
- }
+ *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
+ *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
+ *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
+ *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
} else {
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- req->src_ipaddr[0] = keys->addrs.v4addrs.src;
- req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
- req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- }
+ req->src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
+ req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
}
if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
@@ -5713,14 +5861,10 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
- req->src_port = keys->ports.src;
- req->src_port_mask = cpu_to_be16(0xffff);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
- req->dst_port = keys->ports.dst;
- req->dst_port_mask = cpu_to_be16(0xffff);
- }
+ req->src_port = keys->ports.src;
+ req->src_port_mask = masks->ports.src;
+ req->dst_port = keys->ports.dst;
+ req->dst_port_mask = masks->ports.dst;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
@@ -5971,7 +6115,10 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
for (i = 0; i < tbl_size; i++) {
u16 ring_id, j;
- j = bp->rss_indir_tbl[i];
+ if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
+ j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
+ else
+ j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
ring_id = rxr->rx_ring_struct.fw_ring_id;
@@ -5985,10 +6132,13 @@ static void
__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
struct bnxt_vnic_info *vnic)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
bnxt_fill_hw_rss_tbl_p5(bp, vnic);
- else
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
+ } else {
bnxt_fill_hw_rss_tbl(bp, vnic);
+ }
if (bp->rss_hash_delta) {
req->hash_type = cpu_to_le32(bp->rss_hash_delta);
@@ -6061,7 +6211,7 @@ exit:
static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct hwrm_vnic_rss_qcfg_output *resp;
struct hwrm_vnic_rss_qcfg_input *req;
@@ -6165,6 +6315,7 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
{
+ struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_cfg_input *req;
unsigned int ring = 0, grp_idx;
@@ -6194,8 +6345,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
- req->rss_rule =
- cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
+ req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
@@ -6292,7 +6442,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
vnic_no_ring_grps:
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
- if (vnic_id == 0)
+ if (vnic_id == BNXT_VNIC_DEFAULT)
req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
resp = hwrm_req_hold(bp, req);
@@ -6351,6 +6501,14 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
}
if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
}
hwrm_req_drop(bp, req);
return rc;
@@ -6918,6 +7076,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_hw_ring_grps =
le32_to_cpu(resp->alloc_hw_ring_grps);
hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
hw_resc->resv_irqs = cp;
@@ -6973,8 +7132,7 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
static bool bnxt_rfs_supported(struct bnxt *bp);
static struct hwrm_func_cfg_input *
-__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 enables = 0;
@@ -6983,52 +7141,42 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return NULL;
req->fid = cpu_to_le16(0xffff);
- enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- req->num_tx_rings = cpu_to_le16(tx_rings);
+ enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
if (BNXT_NEW_RM(bp)) {
- enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= hwr->cp_p5 ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= rx_rings ?
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
} else {
- enables |= cp_rings ?
+ enables |= hwr->cp ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
- FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
- }
- enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
-
- req->num_rx_rings = cpu_to_le16(rx_rings);
+ enables |= hwr->grp ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ }
+ enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
+ 0;
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
-
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_msix = cpu_to_le16(cp_rings);
- req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
+ req->num_msix = cpu_to_le16(hwr->cp);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(1);
- if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
- bnxt_rfs_supported(bp))
- req->num_rsscos_ctxs =
- cpu_to_le16(ring_grps + 1);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
}
req->enables = cpu_to_le32(enables);
return req;
}
static struct hwrm_func_vf_cfg_input *
-__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 enables = 0;
@@ -7036,51 +7184,46 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
return NULL;
- enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
- enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->cp_p5 ?
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else {
- enables |= cp_rings ?
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
+ enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= hwr->grp ?
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
}
- enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
- req->num_tx_rings = cpu_to_le16(tx_rings);
- req->num_rx_rings = cpu_to_le16(rx_rings);
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
-
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
req->enables = cpu_to_le32(enables);
return req;
}
static int
-bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
int rc;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -7094,25 +7237,23 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return rc;
if (bp->hwrm_spec_code < 0x10601)
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return bnxt_hwrm_get_rings(bp);
}
static int
-bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
int rc;
if (!BNXT_NEW_RM(bp)) {
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return 0;
}
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -7123,15 +7264,12 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return bnxt_hwrm_get_rings(bp);
}
-static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
- int cp, int stat, int vnic)
+static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (BNXT_PF(bp))
- return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_pf_rings(bp, hwr);
else
- return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_vf_rings(bp, hwr);
}
int bnxt_nq_rings_in_use(struct bnxt *bp)
@@ -7174,6 +7312,24 @@ static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
return cp + ulp_stat;
}
+static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ if (!hwr->grp)
+ return 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
+
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ rss_ctx *= hwr->vnic;
+ return rss_ctx;
+ }
+ if (BNXT_VF(bp))
+ return BNXT_VF_MAX_RSS_CTX;
+ if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
+ return hwr->grp + 1;
+ return 1;
+}
+
/* Check if a default RSS map needs to be setup. This function is only
* used on older firmware that does not require reserving RX rings.
*/
@@ -7189,13 +7345,24 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
}
}
+static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
+{
+ if (bp->flags & BNXT_FLAG_RFS) {
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ return 2;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return rx_rings + 1;
+ }
+ return 1;
+}
+
static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int cp = bnxt_cp_rings_in_use(bp);
int nq = bnxt_nq_rings_in_use(bp);
int rx = bp->rx_nr_rings, stat;
- int vnic = 1, grp = rx;
+ int vnic, grp = rx;
if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
bp->hwrm_spec_code >= 0x10601)
@@ -7210,9 +7377,9 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
bnxt_check_rss_tbl_no_rmgr(bp);
return false;
}
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- vnic = rx + 1;
+
+ vnic = bnxt_get_total_vnics(bp, rx);
+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
stat = bnxt_get_func_stat_ctxs(bp);
@@ -7227,47 +7394,65 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
return false;
}
-static int __bnxt_reserve_rings(struct bnxt *bp)
+static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int cp = bnxt_nq_rings_in_use(bp);
- int tx = bp->tx_nr_rings;
- int rx = bp->rx_nr_rings;
- int grp, rx_rings, rc;
- int vnic = 1, stat;
+
+ hwr->tx = hw_resc->resv_tx_rings;
+ if (BNXT_NEW_RM(bp)) {
+ hwr->rx = hw_resc->resv_rx_rings;
+ hwr->cp = hw_resc->resv_irqs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr->cp_p5 = hw_resc->resv_cp_rings;
+ hwr->grp = hw_resc->resv_hw_ring_grps;
+ hwr->vnic = hw_resc->resv_vnics;
+ hwr->stat = hw_resc->resv_stat_ctxs;
+ hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
+ }
+}
+
+static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
+ hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
+}
+
+static int __bnxt_reserve_rings(struct bnxt *bp)
+{
+ struct bnxt_hw_rings hwr = {0};
+ int rx_rings, rc;
bool sh = false;
int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
+ hwr.cp = bnxt_nq_rings_in_use(bp);
+ hwr.tx = bp->tx_nr_rings;
+ hwr.rx = bp->rx_nr_rings;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr.cp_p5 = hwr.rx + hwr.tx;
+
+ hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx <<= 1;
- grp = bp->rx_nr_rings;
- stat = bnxt_get_func_stat_ctxs(bp);
+ hwr.rx <<= 1;
+ hwr.grp = bp->rx_nr_rings;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
+ hwr.stat = bnxt_get_func_stat_ctxs(bp);
- rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
+ rc = bnxt_hwrm_reserve_rings(bp, &hwr);
if (rc)
return rc;
- tx = hw_resc->resv_tx_rings;
- if (BNXT_NEW_RM(bp)) {
- rx = hw_resc->resv_rx_rings;
- cp = hw_resc->resv_irqs;
- grp = hw_resc->resv_hw_ring_grps;
- vnic = hw_resc->resv_vnics;
- stat = hw_resc->resv_stat_ctxs;
- }
+ bnxt_copy_reserved_rings(bp, &hwr);
- rx_rings = rx;
+ rx_rings = hwr.rx;
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- if (rx >= 2) {
- rx_rings = rx >> 1;
+ if (hwr.rx >= 2) {
+ rx_rings = hwr.rx >> 1;
} else {
if (netif_running(bp->dev))
return -ENOMEM;
@@ -7279,17 +7464,17 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
bnxt_set_ring_params(bp);
}
}
- rx_rings = min_t(int, rx_rings, grp);
- cp = min_t(int, cp, bp->cp_nr_rings);
- if (stat > bnxt_get_ulp_stat_ctxs(bp))
- stat -= bnxt_get_ulp_stat_ctxs(bp);
- cp = min_t(int, cp, stat);
- rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
+ rx_rings = min_t(int, rx_rings, hwr.grp);
+ hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
+ if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
+ hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp = min_t(int, hwr.cp, hwr.stat);
+ rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx = rx_rings << 1;
- tx_cp = bnxt_num_tx_to_cp(bp, tx);
- cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
- bp->tx_nr_rings = tx;
+ hwr.rx = rx_rings << 1;
+ tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
+ hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
+ bp->tx_nr_rings = hwr.tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
* if absolutely necessary
@@ -7306,9 +7491,9 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
}
bp->rx_nr_rings = rx_rings;
- bp->cp_nr_rings = cp;
+ bp->cp_nr_rings = hwr.cp;
- if (!tx || !rx || !cp || !grp || !vnic || !stat)
+ if (!bnxt_rings_ok(bp, &hwr))
return -ENOMEM;
if (!netif_is_rxfh_configured(bp->dev))
@@ -7317,9 +7502,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
return rc;
}
-static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 flags;
@@ -7327,8 +7510,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (!BNXT_NEW_RM(bp))
return 0;
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
@@ -7342,15 +7524,12 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 flags;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
if (BNXT_NEW_RM(bp)) {
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
@@ -7368,20 +7547,15 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (bp->hwrm_spec_code < 0x10801)
return 0;
if (BNXT_PF(bp))
- return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
- ring_grps, cp_rings, stats,
- vnics);
+ return bnxt_hwrm_check_pf_rings(bp, hwr);
- return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ return bnxt_hwrm_check_vf_rings(bp, hwr);
}
static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
@@ -8709,6 +8883,13 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
+ hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
+ hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+ hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+ hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+ hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -8717,12 +8898,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs);
- pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
- pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
- pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
- pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
- pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
- pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
bp->flags &= ~BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
@@ -8825,6 +9000,14 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
+
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
+
hwrm_cfa_adv_qcaps_exit:
hwrm_req_drop(bp, req);
return rc;
@@ -9689,10 +9872,28 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
return __bnxt_setup_vnic(bp, vnic_id);
}
+static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id,
+ u16 start_rx_ring_idx, int rx_rings)
+{
+ int rc;
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic_id, rc);
+ return rc;
+ }
+ return bnxt_setup_vnic(bp, vnic_id);
+}
+
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
int i, rc = 0;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0,
+ bp->rx_nr_rings);
+
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
@@ -9708,14 +9909,7 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
vnic->flags |= BNXT_VNIC_RFS_FLAG;
if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
- rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
- break;
- }
- rc = bnxt_setup_vnic(bp, vnic_id);
- if (rc)
+ if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1))
break;
}
return rc;
@@ -9756,7 +9950,7 @@ static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
int rc = 0;
unsigned int rx_nr_rings = bp->rx_nr_rings;
@@ -9785,7 +9979,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rx_nr_rings--;
/* default vnic 0 */
- rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
goto err_out;
@@ -9794,7 +9988,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (BNXT_VF(bp))
bnxt_hwrm_func_qcfg(bp);
- rc = bnxt_setup_vnic(bp, 0);
+ rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT);
if (rc)
goto err_out;
if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
@@ -10621,10 +10815,10 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
- eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
@@ -10766,7 +10960,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->module_status = resp->module_status;
if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
u16 fw_speeds;
eee->eee_active = 0;
@@ -10775,8 +10969,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
eee->eee_active = 1;
fw_speeds = le16_to_cpu(
resp->link_partner_adv_eee_link_speed_mask);
- eee->lp_advertised =
- _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
}
/* Pull initial EEE config */
@@ -10786,8 +10979,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
eee->eee_enabled = 1;
fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
- eee->advertised =
- _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
if (resp->eee_config_phy_addr &
PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
@@ -10957,7 +11149,7 @@ int bnxt_hwrm_set_pause(struct bnxt *bp)
static void bnxt_hwrm_set_eee(struct bnxt *bp,
struct hwrm_port_phy_cfg_input *req)
{
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
if (eee->eee_enabled) {
u16 eee_speeds;
@@ -11087,6 +11279,7 @@ static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
hw_resc->resv_rx_rings = 0;
hw_resc->resv_hw_ring_grps = 0;
hw_resc->resv_vnics = 0;
+ hw_resc->resv_rsscos_ctxs = 0;
if (!fw_reset) {
bp->tx_nr_rings = 0;
bp->rx_nr_rings = 0;
@@ -11322,22 +11515,25 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return true;
if (eee->eee_enabled) {
- u32 advertising =
- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
+
+ _bnxt_fw_to_linkmode(advertising, link_info->advertising);
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
eee->eee_enabled = 0;
return false;
}
- if (eee->advertised & ~advertising) {
- eee->advertised = advertising & eee->supported;
+ if (linkmode_andnot(tmp, eee->advertised, advertising)) {
+ linkmode_and(eee->advertised, advertising,
+ eee->supported);
return false;
}
}
@@ -11442,6 +11638,42 @@ static int bnxt_reinit_after_abort(struct bnxt *bp)
return rc;
}
+static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ struct bnxt_ntuple_filter *ntp_fltr;
+ struct bnxt_l2_filter *l2_fltr;
+
+ if (list_empty(&fltr->list))
+ return;
+
+ if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
+ ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ ntp_fltr->l2_fltr = l2_fltr;
+ if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
+ bnxt_del_ntp_filter(bp, ntp_fltr);
+ netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
+ fltr->sw_id);
+ }
+ } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
+ l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
+ if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
+ bnxt_del_l2_filter(bp, l2_fltr);
+ netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
+ fltr->sw_id);
+ }
+ }
+}
+
+static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
+{
+ struct bnxt_filter_base *usr_fltr, *tmp;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
+ bnxt_cfg_one_usr_fltr(bp, usr_fltr);
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -11528,6 +11760,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_vf_reps_open(bp);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
+ bnxt_cfg_usr_fltrs(bp);
return 0;
open_err_irq:
@@ -11969,8 +12202,8 @@ void bnxt_get_ring_err_stats(struct bnxt *bp,
static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
struct netdev_hw_addr *ha;
u8 *haddr;
int mc_count = 0;
@@ -12004,7 +12237,7 @@ static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
static bool bnxt_uc_list_updated(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int off = 0;
@@ -12031,7 +12264,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (!test_bit(BNXT_STATE_OPEN, &bp->state))
return;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
mask = vnic->rx_mask;
mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
@@ -12062,7 +12295,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
static int bnxt_cfg_rx_mode(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int i, off = 0, rc;
bool uc_update;
@@ -12174,21 +12407,32 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp)
{
- int vnics, max_vnics, max_rss_ctxs;
+ struct bnxt_hw_rings hwr = {0};
+ int max_vnics, max_rss_ctxs;
+ hwr.rss_ctx = 1;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ /* 2 VNICS: default + Ntuple */
+ hwr.vnic = 2;
+ hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
+ hwr.vnic;
+ goto check_reserve_vnic;
+ }
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
- vnics = 1 + bp->rx_nr_rings;
+ hwr.vnic = 1 + bp->rx_nr_rings;
+check_reserve_vnic:
max_vnics = bnxt_get_max_func_vnics(bp);
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
- /* RSS contexts not a limiting factor */
- if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
- max_rss_ctxs = max_vnics;
- if (vnics > max_vnics || vnics > max_rss_ctxs) {
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP))
+ hwr.rss_ctx = hwr.vnic;
+
+ if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
netdev_warn(bp->dev,
"Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
@@ -12199,15 +12443,19 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (!BNXT_NEW_RM(bp))
return true;
- if (vnics == bp->hw_resc.resv_vnics)
+ if (hwr.vnic == bp->hw_resc.resv_vnics &&
+ hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
- if (vnics <= bp->hw_resc.resv_vnics)
+ bnxt_hwrm_reserve_rings(bp, &hwr);
+ if (hwr.vnic <= bp->hw_resc.resv_vnics &&
+ hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
+ hwr.vnic = 1;
+ hwr.rss_ctx = 0;
+ bnxt_hwrm_reserve_rings(bp, &hwr);
return false;
}
@@ -12246,14 +12494,24 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
return features;
}
+static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
+ bool link_re_init, u32 flags, bool update_tpa)
+{
+ bnxt_close_nic(bp, irq_re_init, link_re_init);
+ bp->flags = flags;
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+ return bnxt_open_nic(bp, irq_re_init, link_re_init);
+}
+
static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
{
+ bool update_tpa = false, update_ntuple = false;
struct bnxt *bp = netdev_priv(dev);
u32 flags = bp->flags;
u32 changes;
int rc = 0;
bool re_init = false;
- bool update_tpa = false;
flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
if (features & NETIF_F_GRO_HW)
@@ -12269,6 +12527,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (features & NETIF_F_NTUPLE)
flags |= BNXT_FLAG_RFS;
+ else
+ bnxt_clear_usr_fltrs(bp, true);
changes = flags ^ bp->flags;
if (changes & BNXT_FLAG_TPA) {
@@ -12282,6 +12542,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (changes & ~BNXT_FLAG_TPA)
re_init = true;
+ if (changes & BNXT_FLAG_RFS)
+ update_ntuple = true;
+
if (flags != bp->flags) {
u32 old_flags = bp->flags;
@@ -12292,14 +12555,12 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
- if (re_init) {
- bnxt_close_nic(bp, false, false);
- bp->flags = flags;
- if (update_tpa)
- bnxt_set_ring_params(bp);
+ if (update_ntuple)
+ return bnxt_reinit_features(bp, true, false, flags, update_tpa);
+
+ if (re_init)
+ return bnxt_reinit_features(bp, false, false, flags, update_tpa);
- return bnxt_open_nic(bp, false, false);
- }
if (update_tpa) {
bp->flags = flags;
rc = bnxt_set_tpa(bp,
@@ -13129,9 +13390,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
- int tx_rings_needed, stats;
+ struct bnxt_hw_rings hwr = {0};
int rx_rings = rx;
- int cp, vnics;
if (tcs)
tx_sets = tcs;
@@ -13144,26 +13404,27 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
- tx_rings_needed = tx * tx_sets + tx_xdp;
- if (max_tx < tx_rings_needed)
+ hwr.rx = rx_rings;
+ hwr.tx = tx * tx_sets + tx_xdp;
+ if (max_tx < hwr.tx)
return -ENOMEM;
- vnics = 1;
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) ==
- BNXT_FLAG_RFS)
- vnics += rx;
+ hwr.vnic = bnxt_get_total_vnics(bp, rx);
- tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp);
- cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
- if (max_cp < cp)
+ tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
+ hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
+ if (max_cp < hwr.cp)
return -ENOMEM;
- stats = cp;
+ hwr.stat = hwr.cp;
if (BNXT_NEW_RM(bp)) {
- cp += bnxt_get_ulp_msix_num(bp);
- stats += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp += bnxt_get_ulp_msix_num(bp);
+ hwr.stat += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.grp = rx;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
}
- return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
- stats, vnics);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr.cp_p5 = hwr.tx + rx;
+ return bnxt_hwrm_check_rings(bp, &hwr);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -13766,6 +14027,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
return rc;
eth_hw_addr_set(dev, addr->sa_data);
+ bnxt_clear_usr_fltrs(bp, true);
if (netif_running(dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -13888,7 +14150,7 @@ u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
if (skb)
return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
}
@@ -13899,7 +14161,7 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
int bit_id;
spin_lock_bh(&bp->ntp_fltr_lock);
- bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0);
+ bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
if (bit_id < 0) {
spin_unlock_bh(&bp->ntp_fltr_lock);
return -ENOMEM;
@@ -13911,6 +14173,7 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
head = &bp->ntp_fltr_hash_tbl[idx];
hlist_add_head_rcu(&fltr->base.hash, head);
set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
+ bnxt_insert_usr_fltr(bp, &fltr->base);
bp->ntp_fltr_count++;
spin_unlock_bh(&bp->ntp_fltr_lock);
return 0;
@@ -13919,45 +14182,39 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2)
{
+ struct bnxt_flow_masks *masks1 = &f1->fmasks;
+ struct bnxt_flow_masks *masks2 = &f2->fmasks;
struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys;
- if (f1->ntuple_flags != f2->ntuple_flags)
- return false;
-
if (keys1->basic.n_proto != keys2->basic.n_proto ||
keys1->basic.ip_proto != keys2->basic.ip_proto)
return false;
if (keys1->basic.n_proto == htons(ETH_P_IP)) {
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
- keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
- keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst))
+ if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
+ masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
+ keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
+ masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
return false;
} else {
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
- memcmp(&keys1->addrs.v6addrs.src,
- &keys2->addrs.v6addrs.src,
- sizeof(keys1->addrs.v6addrs.src))) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
- memcmp(&keys1->addrs.v6addrs.dst,
- &keys2->addrs.v6addrs.dst,
- sizeof(keys1->addrs.v6addrs.dst))))
+ if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
+ &keys2->addrs.v6addrs.src) ||
+ !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
+ &masks2->addrs.v6addrs.src) ||
+ !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
+ &keys2->addrs.v6addrs.dst) ||
+ !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
+ &masks2->addrs.v6addrs.dst))
return false;
}
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) &&
- keys1->ports.src != keys2->ports.src) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) &&
- keys1->ports.dst != keys2->ports.dst))
- return false;
-
- if (keys1->control.flags == keys2->control.flags &&
- f1->l2_fltr == f2->l2_fltr)
- return true;
-
- return false;
+ return keys1->ports.src == keys2->ports.src &&
+ masks1->ports.src == masks2->ports.src &&
+ keys1->ports.dst == keys2->ports.dst &&
+ masks1->ports.dst == masks2->ports.dst &&
+ keys1->control.flags == keys2->control.flags &&
+ f1->l2_fltr == f2->l2_fltr;
}
struct bnxt_ntuple_filter *
@@ -13988,7 +14245,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u32 flags;
if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
- l2_fltr = bp->vnic_info[0].l2_filters[0];
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
atomic_inc(&l2_fltr->refcnt);
} else {
struct bnxt_l2_key key;
@@ -14022,10 +14279,13 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
- if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
- bp->hwrm_spec_code < 0x10601) {
- rc = -EPROTONOSUPPORT;
- goto err_free;
+ new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
+ if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
+ if (bp->hwrm_spec_code < 0x10601) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+ new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
}
flags = fkeys->control.flags;
if (((flags & FLOW_DIS_ENCAPSULATION) &&
@@ -14033,9 +14293,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
-
new_fltr->l2_fltr = l2_fltr;
- new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
rcu_read_lock();
@@ -14070,6 +14328,7 @@ void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
return;
}
hlist_del_rcu(&fltr->base.hash);
+ bnxt_del_one_usr_fltr(bp, &fltr->base);
bp->ntp_fltr_count--;
spin_unlock_bh(&bp->ntp_fltr_lock);
bnxt_del_l2_filter(bp, fltr->l2_fltr);
@@ -14264,6 +14523,70 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_bridge_setlink = bnxt_bridge_setlink,
};
+static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_cp_ring_info *cpr;
+ u64 *sw;
+
+ cpr = &bp->bnapi[i]->cp_ring;
+ sw = cpr->stats.sw_stats;
+
+ stats->packets = 0;
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
+
+ stats->bytes = 0;
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
+
+ stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards;
+}
+
+static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_napi *bnapi;
+ u64 *sw;
+
+ bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
+ sw = bnapi->cp_ring.stats.sw_stats;
+
+ stats->packets = 0;
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
+
+ stats->bytes = 0;
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
+}
+
+static void bnxt_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ rx->packets = bp->net_stats_prev.rx_packets;
+ rx->bytes = bp->net_stats_prev.rx_bytes;
+ rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
+
+ tx->packets = bp->net_stats_prev.tx_packets;
+ tx->bytes = bp->net_stats_prev.tx_bytes;
+}
+
+static const struct netdev_stat_ops bnxt_stat_ops = {
+ .get_queue_stats_rx = bnxt_get_queue_stats_rx,
+ .get_queue_stats_tx = bnxt_get_queue_stats_tx,
+ .get_base_stats = bnxt_get_base_stats,
+};
+
static void bnxt_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -14669,6 +14992,7 @@ void bnxt_print_device_info(struct bnxt *bp)
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct bnxt_hw_resc *hw_resc;
struct net_device *dev;
struct bnxt *bp;
int rc, max_irqs;
@@ -14710,6 +15034,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_free;
dev->netdev_ops = &bnxt_netdev_ops;
+ dev->stat_ops = &bnxt_stat_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
@@ -14827,6 +15152,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ hw_resc = &bp->hw_resc;
+ bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
+ BNXT_L2_FLTR_MAX_FLTR;
+ /* Older firmware may not report these filters properly */
+ if (bp->max_fltr < BNXT_MAX_FLTR)
+ bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
@@ -14879,6 +15210,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_dl;
+ INIT_LIST_HEAD(&bp->usr_fltr_list);
+
rc = register_netdev(dev);
if (rc)
goto init_err_cleanup;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 47338b48ca20..dd849e715c9b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1213,6 +1213,9 @@ struct bnxt_ring_grp_info {
u16 cp_fw_ring_id;
};
+#define BNXT_VNIC_DEFAULT 0
+#define BNXT_VNIC_NTUPLE 1
+
struct bnxt_vnic_info {
u16 fw_vnic_id; /* returned by Chimp during alloc */
#define BNXT_MAX_CTX_PER_VNIC 8
@@ -1252,11 +1255,24 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_MCAST_FLAG 4
#define BNXT_VNIC_UCAST_FLAG 8
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
+#define BNXT_VNIC_NTUPLE_FLAG 0x20
+};
+
+struct bnxt_hw_rings {
+ int tx;
+ int rx;
+ int grp;
+ int cp;
+ int cp_p5;
+ int stat;
+ int vnic;
+ int rss_ctx;
};
struct bnxt_hw_resc {
u16 min_rsscos_ctxs;
u16 max_rsscos_ctxs;
+ u16 resv_rsscos_ctxs;
u16 min_cp_rings;
u16 max_cp_rings;
u16 resv_cp_rings;
@@ -1281,6 +1297,12 @@ struct bnxt_hw_resc {
u16 max_nqs;
u16 max_irqs;
u16 resv_irqs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
};
#if defined(CONFIG_BNXT_SRIOV)
@@ -1315,12 +1337,6 @@ struct bnxt_pf_info {
u16 active_vfs;
u16 registered_vfs;
u16 max_vfs;
- u32 max_encap_records;
- u32 max_decap_records;
- u32 max_tx_em_flows;
- u32 max_tx_wm_flows;
- u32 max_rx_em_flows;
- u32 max_rx_wm_flows;
unsigned long *vf_event_bmap;
u16 hwrm_cmd_req_pages;
u8 vf_resv_strategy;
@@ -1334,6 +1350,7 @@ struct bnxt_pf_info {
struct bnxt_filter_base {
struct hlist_node hash;
+ struct list_head list;
__le64 filter_id;
u8 type;
#define BNXT_FLTR_TYPE_NTUPLE 1
@@ -1355,19 +1372,21 @@ struct bnxt_filter_base {
struct rcu_head rcu;
};
+struct bnxt_flow_masks {
+ struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_addrs addrs;
+};
+
+extern const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE;
+extern const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL;
+extern const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL;
+
struct bnxt_ntuple_filter {
+ /* base filter must be the first member */
struct bnxt_filter_base base;
struct flow_keys fkeys;
+ struct bnxt_flow_masks fmasks;
struct bnxt_l2_filter *l2_fltr;
- u32 ntuple_flags;
-#define BNXT_NTUPLE_MATCH_SRC_IP 1
-#define BNXT_NTUPLE_MATCH_DST_IP 2
-#define BNXT_NTUPLE_MATCH_SRC_PORT 4
-#define BNXT_NTUPLE_MATCH_DST_PORT 8
-#define BNXT_NTUPLE_MATCH_ALL (BNXT_NTUPLE_MATCH_SRC_IP | \
- BNXT_NTUPLE_MATCH_DST_IP | \
- BNXT_NTUPLE_MATCH_SRC_PORT | \
- BNXT_NTUPLE_MATCH_DST_PORT)
u32 flow_id;
};
@@ -1394,6 +1413,7 @@ struct bnxt_ipv6_tuple {
#define BNXT_L2_KEY_SIZE (sizeof(struct bnxt_l2_key) / 4)
struct bnxt_l2_filter {
+ /* base filter must be the first member */
struct bnxt_filter_base base;
struct bnxt_l2_key l2_key;
atomic_t refcnt;
@@ -2217,6 +2237,14 @@ struct bnxt {
#define BNXT_RSS_CAP_UDP_RSS_CAP BIT(1)
#define BNXT_RSS_CAP_NEW_RSS_CAP BIT(2)
#define BNXT_RSS_CAP_RSS_TCAM BIT(3)
+#define BNXT_RSS_CAP_AH_V4_RSS_CAP BIT(4)
+#define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(5)
+#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6)
+#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
+
+ u8 rss_hash_key[HW_HASH_KEY_SIZE];
+ u8 rss_hash_key_valid:1;
+ u8 rss_hash_key_updated:1;
u16 max_mtu;
u8 max_tc;
@@ -2301,12 +2329,17 @@ struct bnxt {
#define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35)
#define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(36)
#define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37)
+ #define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
u32 fw_dbg_cap;
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
#define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \
((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC))
+#define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \
+ (BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3))
+
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
@@ -2428,6 +2461,7 @@ struct bnxt {
unsigned long *ntp_fltr_bmap;
int ntp_fltr_count;
+ int max_fltr;
#define BNXT_L2_FLTR_MAX_FLTR 1024
#define BNXT_L2_FLTR_HASH_SIZE 32
@@ -2437,12 +2471,14 @@ struct bnxt {
u32 hash_seed;
u64 toeplitz_prefix;
+ struct list_head usr_fltr_list;
+
/* To protect link related settings during link changes and
* ethtool settings changes.
*/
struct mutex link_lock;
struct bnxt_link_info link_info;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
@@ -2641,10 +2677,16 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
+void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
+void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u16 flags);
int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr);
int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr);
int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index dc4ca706b0e2..1d240a27455a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -968,6 +968,7 @@ static int bnxt_set_channels(struct net_device *dev,
return -EINVAL;
}
+ bnxt_clear_usr_fltrs(bp, true);
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -1058,11 +1059,17 @@ static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
+ u32 count;
+
cmd->data = bp->ntp_fltr_count;
rcu_read_lock();
+ count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0,
+ cmd->rule_cnt);
cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
BNXT_NTP_FLTR_HASH_SIZE,
- rule_locs, 0, cmd->rule_cnt);
+ rule_locs, count,
+ cmd->rule_cnt);
rcu_read_unlock();
return 0;
@@ -1074,13 +1081,44 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
+ struct bnxt_flow_masks *fmasks;
struct flow_keys *fkeys;
int rc = -EINVAL;
- if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+ if (fs->location >= bp->max_fltr)
return rc;
rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE,
+ fs->location);
+ if (fltr_base) {
+ struct ethhdr *h_ether = &fs->h_u.ether_spec;
+ struct ethhdr *m_ether = &fs->m_u.ether_spec;
+ struct bnxt_l2_filter *l2_fltr;
+ struct bnxt_l2_key *l2_key;
+
+ l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
+ l2_key = &l2_fltr->l2_key;
+ fs->flow_type = ETHER_FLOW;
+ ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr);
+ eth_broadcast_addr(m_ether->h_dest);
+ if (l2_key->vlan) {
+ struct ethtool_flow_ext *m_ext = &fs->m_ext;
+ struct ethtool_flow_ext *h_ext = &fs->h_ext;
+
+ fs->flow_type |= FLOW_EXT;
+ m_ext->vlan_tci = htons(0xfff);
+ h_ext->vlan_tci = htons(l2_key->vlan);
+ }
+ if (fltr_base->flags & BNXT_ACT_RING_DST)
+ fs->ring_cookie = fltr_base->rxq;
+ if (fltr_base->flags & BNXT_ACT_FUNC_DST)
+ fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) <<
+ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ rcu_read_unlock();
+ return 0;
+ }
fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
BNXT_NTP_FLTR_HASH_SIZE,
fs->location);
@@ -1091,59 +1129,74 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
fkeys = &fltr->fkeys;
+ fmasks = &fltr->fmasks;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ if (fkeys->basic.ip_proto == IPPROTO_ICMP ||
+ fkeys->basic.ip_proto == IPPROTO_RAW) {
+ fs->flow_type = IP_USER_FLOW;
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ if (fkeys->basic.ip_proto == IPPROTO_ICMP)
+ fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
+ else
+ fs->h_u.usr_ip4_spec.proto = IPPROTO_RAW;
+ fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK;
+ } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V4_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
fs->flow_type = UDP_V4_FLOW;
- else
+ } else {
goto fltr_err;
-
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
- fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
- fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src;
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst;
+ if (fs->flow_type == TCP_V4_FLOW ||
+ fs->flow_type == UDP_V4_FLOW) {
fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src;
fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst;
}
} else {
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ if (fkeys->basic.ip_proto == IPPROTO_ICMPV6 ||
+ fkeys->basic.ip_proto == IPPROTO_RAW) {
+ fs->flow_type = IPV6_USER_FLOW;
+ if (fkeys->basic.ip_proto == IPPROTO_ICMPV6)
+ fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
+ else
+ fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_RAW;
+ fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK;
+ } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V6_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
fs->flow_type = UDP_V6_FLOW;
- else
+ } else {
goto fltr_err;
-
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
- fkeys->addrs.v6addrs.src;
- bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
- fkeys->addrs.v6addrs.dst;
- bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst);
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
+ fkeys->addrs.v6addrs.src;
+ *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] =
+ fmasks->addrs.v6addrs.src;
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
+ fkeys->addrs.v6addrs.dst;
+ *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] =
+ fmasks->addrs.v6addrs.dst;
+ if (fs->flow_type == TCP_V6_FLOW ||
+ fs->flow_type == UDP_V6_FLOW) {
fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src;
fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
+ fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst;
}
}
- fs->ring_cookie = fltr->base.rxq;
+ if (fltr->base.flags & BNXT_ACT_DROP)
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fs->ring_cookie = fltr->base.rxq;
rc = 0;
fltr_err:
@@ -1152,17 +1205,78 @@ fltr_err:
return rc;
}
-#define IPV4_ALL_MASK ((__force __be32)~0)
-#define L4_PORT_ALL_MASK ((__force __be16)~0)
+static int bnxt_add_l2_cls_rule(struct bnxt *bp,
+ struct ethtool_rx_flow_spec *fs)
+{
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ struct ethhdr *h_ether = &fs->h_u.ether_spec;
+ struct ethhdr *m_ether = &fs->m_u.ether_spec;
+ struct bnxt_l2_filter *fltr;
+ struct bnxt_l2_key key;
+ u16 vnic_id;
+ u8 flags;
+ int rc;
+
+ if (BNXT_CHIP_P5_PLUS(bp))
+ return -EOPNOTSUPP;
-static bool ipv6_mask_is_full(__be32 mask[4])
+ if (!is_broadcast_ether_addr(m_ether->h_dest))
+ return -EINVAL;
+ ether_addr_copy(key.dst_mac_addr, h_ether->h_dest);
+ key.vlan = 0;
+ if (fs->flow_type & FLOW_EXT) {
+ struct ethtool_flow_ext *m_ext = &fs->m_ext;
+ struct ethtool_flow_ext *h_ext = &fs->h_ext;
+
+ if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci)
+ return -EINVAL;
+ key.vlan = ntohs(h_ext->vlan_tci);
+ }
+
+ if (vf) {
+ flags = BNXT_ACT_FUNC_DST;
+ vnic_id = 0xffff;
+ vf--;
+ } else {
+ flags = BNXT_ACT_RING_DST;
+ vnic_id = bp->vnic_info[ring + 1].fw_vnic_id;
+ }
+ fltr = bnxt_alloc_new_l2_filter(bp, &key, flags);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
+
+ fltr->base.fw_vnic_id = vnic_id;
+ fltr->base.rxq = ring;
+ fltr->base.vf_idx = vf;
+ rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
+ if (rc)
+ bnxt_del_l2_filter(bp, fltr);
+ else
+ fs->location = fltr->base.sw_id;
+ return rc;
+}
+
+static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
+ struct ethtool_usrip4_spec *ip_mask)
{
- return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK;
+ if (ip_mask->l4_4_bytes || ip_mask->tos ||
+ ip_spec->ip_ver != ETH_RX_NFC_IP4 ||
+ ip_mask->proto != BNXT_IP_PROTO_FULL_MASK ||
+ (ip_spec->proto != IPPROTO_RAW && ip_spec->proto != IPPROTO_ICMP))
+ return false;
+ return true;
}
-static bool ipv6_mask_is_zero(__be32 mask[4])
+static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
+ struct ethtool_usrip6_spec *ip_mask)
{
- return !(mask[0] | mask[1] | mask[2] | mask[3]);
+ if (ip_mask->l4_4_bytes || ip_mask->tclass ||
+ ip_mask->l4_proto != BNXT_IP_PROTO_FULL_MASK ||
+ (ip_spec->l4_proto != IPPROTO_RAW &&
+ ip_spec->l4_proto != IPPROTO_ICMPV6))
+ return false;
+ return true;
}
static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
@@ -1172,6 +1286,7 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
struct bnxt_ntuple_filter *new_fltr, *fltr;
struct bnxt_l2_filter *l2_fltr;
+ struct bnxt_flow_masks *fmasks;
u32 flow_type = fs->flow_type;
struct flow_keys *fkeys;
u32 idx;
@@ -1183,17 +1298,42 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
return -EOPNOTSUPP;
+ if (flow_type == IP_USER_FLOW) {
+ if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec))
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_type == IPV6_USER_FLOW) {
+ if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec,
+ &fs->m_u.usr_ip6_spec))
+ return -EOPNOTSUPP;
+ }
+
new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
if (!new_fltr)
return -ENOMEM;
- l2_fltr = bp->vnic_info[0].l2_filters[0];
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
atomic_inc(&l2_fltr->refcnt);
new_fltr->l2_fltr = l2_fltr;
+ fmasks = &new_fltr->fmasks;
fkeys = &new_fltr->fkeys;
rc = -EOPNOTSUPP;
switch (flow_type) {
+ case IP_USER_FLOW: {
+ struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec;
+
+ fkeys->basic.ip_proto = ip_spec->proto;
+ fkeys->basic.n_proto = htons(ETH_P_IP);
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ fmasks->addrs.v4addrs.src = ip_mask->ip4src;
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
+ break;
+ }
case TCP_V4_FLOW:
case UDP_V4_FLOW: {
struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
@@ -1203,32 +1343,26 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
if (flow_type == UDP_V4_FLOW)
fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IP);
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ fmasks->addrs.v4addrs.src = ip_mask->ip4src;
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
+ fkeys->ports.src = ip_spec->psrc;
+ fmasks->ports.src = ip_mask->psrc;
+ fkeys->ports.dst = ip_spec->pdst;
+ fmasks->ports.dst = ip_mask->pdst;
+ break;
+ }
+ case IPV6_USER_FLOW: {
+ struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec;
- if (ip_mask->ip4src == IPV4_ALL_MASK) {
- fkeys->addrs.v4addrs.src = ip_spec->ip4src;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
- } else if (ip_mask->ip4src) {
- goto ntuple_err;
- }
- if (ip_mask->ip4dst == IPV4_ALL_MASK) {
- fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
- } else if (ip_mask->ip4dst) {
- goto ntuple_err;
- }
-
- if (ip_mask->psrc == L4_PORT_ALL_MASK) {
- fkeys->ports.src = ip_spec->psrc;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
- } else if (ip_mask->psrc) {
- goto ntuple_err;
- }
- if (ip_mask->pdst == L4_PORT_ALL_MASK) {
- fkeys->ports.dst = ip_spec->pdst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
- } else if (ip_mask->pdst) {
- goto ntuple_err;
- }
+ fkeys->basic.ip_proto = ip_spec->l4_proto;
+ fkeys->basic.n_proto = htons(ETH_P_IPV6);
+ fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
+ fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
+ fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
+ fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
break;
}
case TCP_V6_FLOW:
@@ -1241,40 +1375,21 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IPV6);
- if (ipv6_mask_is_full(ip_mask->ip6src)) {
- fkeys->addrs.v6addrs.src =
- *(struct in6_addr *)&ip_spec->ip6src;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
- } else if (!ipv6_mask_is_zero(ip_mask->ip6src)) {
- goto ntuple_err;
- }
- if (ipv6_mask_is_full(ip_mask->ip6dst)) {
- fkeys->addrs.v6addrs.dst =
- *(struct in6_addr *)&ip_spec->ip6dst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
- } else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) {
- goto ntuple_err;
- }
-
- if (ip_mask->psrc == L4_PORT_ALL_MASK) {
- fkeys->ports.src = ip_spec->psrc;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
- } else if (ip_mask->psrc) {
- goto ntuple_err;
- }
- if (ip_mask->pdst == L4_PORT_ALL_MASK) {
- fkeys->ports.dst = ip_spec->pdst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
- } else if (ip_mask->pdst) {
- goto ntuple_err;
- }
+ fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
+ fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
+ fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
+ fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
+ fkeys->ports.src = ip_spec->psrc;
+ fmasks->ports.src = ip_mask->psrc;
+ fkeys->ports.dst = ip_spec->pdst;
+ fmasks->ports.dst = ip_mask->pdst;
break;
}
default:
rc = -EOPNOTSUPP;
goto ntuple_err;
}
- if (!new_fltr->ntuple_flags)
+ if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks)))
goto ntuple_err;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
@@ -1287,8 +1402,11 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
}
rcu_read_unlock();
- new_fltr->base.rxq = ring;
new_fltr->base.flags = BNXT_ACT_NO_AGING;
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ new_fltr->base.flags |= BNXT_ACT_DROP;
+ else
+ new_fltr->base.rxq = ring;
__set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
if (!rc) {
@@ -1321,6 +1439,18 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (fs->location != RX_CLS_LOC_ANY)
return -EINVAL;
+ flow_type = fs->flow_type;
+ if ((flow_type == IP_USER_FLOW ||
+ flow_type == IPV6_USER_FLOW) &&
+ !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO))
+ return -EOPNOTSUPP;
+ if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
+ return -EINVAL;
+ flow_type &= ~FLOW_EXT;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW)
+ return bnxt_add_ntuple_cls_rule(bp, fs);
+
ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
if (BNXT_VF(bp) && vf)
@@ -1330,12 +1460,8 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (!vf && ring >= bp->rx_nr_rings)
return -EINVAL;
- flow_type = fs->flow_type;
- if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
- return -EINVAL;
- flow_type &= ~FLOW_EXT;
if (flow_type == ETHER_FLOW)
- rc = -EOPNOTSUPP;
+ rc = bnxt_add_l2_cls_rule(bp, fs);
else
rc = bnxt_add_ntuple_cls_rule(bp, fs);
return rc;
@@ -1346,11 +1472,22 @@ static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
+ u32 id = fs->location;
rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE, id);
+ if (fltr_base) {
+ struct bnxt_l2_filter *l2_fltr;
+
+ l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
+ rcu_read_unlock();
+ bnxt_hwrm_l2_filter_free(bp, l2_fltr);
+ bnxt_del_l2_filter(bp, l2_fltr);
+ return 0;
+ }
fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
- BNXT_NTP_FLTR_HASH_SIZE,
- fs->location);
+ BNXT_NTP_FLTR_HASH_SIZE, id);
if (!fltr_base) {
rcu_read_unlock();
return -ENOENT;
@@ -1396,8 +1533,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
- case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
+ if (bp->rss_hash_cfg &
+ (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4))
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
@@ -1415,8 +1558,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
- case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
+ if (bp->rss_hash_cfg &
+ (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6))
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
@@ -1463,6 +1612,24 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+ } else if (cmd->flow_type == AH_ESP_V4_FLOW) {
+ if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) ||
+ !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP)))
+ return -EINVAL;
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4);
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4;
+ } else if (cmd->flow_type == AH_ESP_V6_FLOW) {
+ if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) ||
+ !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP)))
+ return -EINVAL;
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6);
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6;
} else if (tuple == 4) {
return -EINVAL;
}
@@ -1521,7 +1688,7 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->ntp_fltr_count;
- cmd->data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL;
+ cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRLALL:
@@ -1596,7 +1763,7 @@ static int bnxt_get_rxfh(struct net_device *dev,
if (!bp->vnic_info)
return 0;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
if (rxfh->indir && bp->rss_indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
@@ -1619,8 +1786,10 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->key)
- return -EOPNOTSUPP;
+ if (rxfh->key) {
+ memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
if (rxfh->indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
@@ -1631,7 +1800,7 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (pad)
memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
}
-
+ bnxt_clear_usr_fltrs(bp, false);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -1751,31 +1920,21 @@ static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
-u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
+/* TODO: support 25GB, 40GB, 50GB with different cable type */
+void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds)
{
- u32 speed_mask = 0;
+ linkmode_zero(mode);
- /* TODO: support 25GB, 40GB, 50GB with different cable type */
- /* set the advertised speeds */
if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
- speed_mask |= ADVERTISED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
- speed_mask |= ADVERTISED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
- speed_mask |= ADVERTISED_2500baseX_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
- speed_mask |= ADVERTISED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
- speed_mask |= ADVERTISED_40000baseCR4_Full;
-
- if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
- speed_mask |= ADVERTISED_Pause;
- else if (fw_pause & BNXT_LINK_PAUSE_TX)
- speed_mask |= ADVERTISED_Asym_Pause;
- else if (fw_pause & BNXT_LINK_PAUSE_RX)
- speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
-
- return speed_mask;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode);
}
enum bnxt_media_type {
@@ -2643,23 +2802,22 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
return 0;
}
-u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode)
{
u16 fw_speed_mask = 0;
- /* only support autoneg at speed 100, 1000, and 10000 */
- if (advertising & (ADVERTISED_100baseT_Full |
- ADVERTISED_100baseT_Half)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
- }
- if (advertising & (ADVERTISED_1000baseT_Full |
- ADVERTISED_1000baseT_Half)) {
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
- }
- if (advertising & ADVERTISED_10000baseT_Full)
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
- if (advertising & ADVERTISED_40000baseCR4_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
return fw_speed_mask;
@@ -3884,12 +4042,13 @@ static int bnxt_set_eeprom(struct net_device *dev,
eeprom->len);
}
-static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
struct bnxt *bp = netdev_priv(dev);
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
- u32 advertising;
int rc = 0;
if (!BNXT_PHY_CFG_ABLE(bp))
@@ -3899,7 +4058,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
- advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ _bnxt_fw_to_linkmode(advertising, link_info->advertising);
if (!edata->eee_enabled)
goto eee_ok;
@@ -3919,16 +4078,15 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
edata->tx_lpi_timer = eee->tx_lpi_timer;
}
}
- if (!edata->advertised) {
- edata->advertised = advertising & eee->supported;
- } else if (edata->advertised & ~advertising) {
- netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
- edata->advertised, advertising);
+ if (linkmode_empty(edata->advertised)) {
+ linkmode_and(edata->advertised, advertising, eee->supported);
+ } else if (linkmode_andnot(tmp, edata->advertised, advertising)) {
+ netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n");
rc = -EINVAL;
goto eee_exit;
}
- eee->advertised = edata->advertised;
+ linkmode_copy(eee->advertised, edata->advertised);
eee->tx_lpi_enabled = edata->tx_lpi_enabled;
eee->tx_lpi_timer = edata->tx_lpi_timer;
eee_ok:
@@ -3942,7 +4100,7 @@ eee_exit:
return rc;
}
-static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3954,12 +4112,12 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
/* Preserve tx_lpi_timer so that the last value will be used
* by default when it is re-enabled.
*/
- edata->advertised = 0;
+ linkmode_zero(edata->advertised);
edata->tx_lpi_enabled = 0;
}
if (!bp->eee.eee_active)
- edata->lp_advertised = 0;
+ linkmode_zero(edata->lp_advertised);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index a8ecef8ab82c..e2ee030237d4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -43,12 +43,14 @@ struct bnxt_led_cfg {
#define BNXT_PXP_REG_LEN 0x3110
+#define BNXT_IP_PROTO_FULL_MASK 0xFF
+
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
-u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
+void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds);
u32 bnxt_fw_to_ethtool_speed(u16);
-u16 bnxt_get_fw_auto_link_speeds(u32);
+u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode);
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2d7ae71287b1..7396e2823e32 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1313,14 +1313,13 @@ void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
}
priv->eee.eee_enabled = enable;
- priv->eee.eee_active = enable;
priv->eee.tx_lpi_enabled = tx_lpi_enabled;
}
-static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_eee *p = &priv->eee;
+ struct ethtool_keee *p = &priv->eee;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1328,18 +1327,17 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
if (!dev->phydev)
return -ENODEV;
- e->eee_enabled = p->eee_enabled;
- e->eee_active = p->eee_active;
e->tx_lpi_enabled = p->tx_lpi_enabled;
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
return phy_ethtool_get_eee(dev->phydev, e);
}
-static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_eee *p = &priv->eee;
+ struct ethtool_keee *p = &priv->eee;
+ bool active;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1352,9 +1350,9 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
if (!p->eee_enabled) {
bcmgenet_eee_enable_set(dev, false, false);
} else {
- p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
+ active = phy_init_eee(dev->phydev, false) >= 0;
bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
- bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
+ bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled);
}
return phy_ethtool_set_eee(dev->phydev, e);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1985c0ec4da2..7523b60b3c1c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -645,7 +645,7 @@ struct bcmgenet_priv {
struct bcmgenet_mib_counters mib;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
#define GENET_IO_MACRO(name, offset) \
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 97ea76d443ab..9ada89355747 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -30,6 +30,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
u32 reg, cmd_bits = 0;
+ bool active;
/* speed */
if (phydev->speed == SPEED_1000)
@@ -88,9 +89,9 @@ static void bcmgenet_mac_config(struct net_device *dev)
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+ active = phy_init_eee(phydev, 0) >= 0;
bcmgenet_eee_enable_set(dev,
- priv->eee.eee_enabled && priv->eee.eee_active,
+ priv->eee.eee_enabled && active,
priv->eee.tx_lpi_enabled);
}
@@ -475,6 +476,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
ppd.wait_func = bcmgenet_mii_wait;
ppd.wait_func_data = priv;
ppd.bus_name = "bcmgenet MII bus";
+ /* Pass a reference to our "main" clock which is used for MDIO
+ * transfers
+ */
+ ppd.clk = priv->clk;
/* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD
* and is 2 * 32-bits word long, 8 bytes total.
@@ -673,7 +678,5 @@ void bcmgenet_mii_exit(struct net_device *dev)
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
of_node_put(priv->phy_dn);
- clk_prepare_enable(priv->clk);
platform_device_unregister(priv->mii_pdev);
- clk_disable_unprepare(priv->clk);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 04964bbe08cf..eee759054aad 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2338,10 +2338,10 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
tg3_phy_toggle_auxctl_smdsp(tp, false);
}
-static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
+static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
{
u32 val;
- struct ethtool_eee *dest = &tp->eee;
+ struct ethtool_keee *dest = &tp->eee;
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
return;
@@ -2362,13 +2362,13 @@ static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
/* Pull lp advertised settings */
if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
return;
- dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
/* Pull advertised and eee_enabled settings */
if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
return;
dest->eee_enabled = !!val;
- dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
/* Pull tx_lpi_enabled */
val = tr32(TG3_CPMU_EEE_MODE);
@@ -4354,23 +4354,12 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (!err) {
u32 err2;
- val = 0;
- /* Advertise 100-BaseTX EEE ability */
- if (advertise & ADVERTISED_100baseT_Full)
- val |= MDIO_AN_EEE_ADV_100TX;
- /* Advertise 1000-BaseT EEE ability */
- if (advertise & ADVERTISED_1000baseT_Full)
- val |= MDIO_AN_EEE_ADV_1000T;
-
- if (!tp->eee.eee_enabled) {
+ if (!tp->eee.eee_enabled)
val = 0;
- tp->eee.advertised = 0;
- } else {
- tp->eee.advertised = advertise &
- (ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full);
- }
+ else
+ val = ethtool_adv_to_mmd_eee_adv_t(advertise);
+ mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
if (err)
val = 0;
@@ -4618,7 +4607,7 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
static bool tg3_phy_eee_config_ok(struct tg3 *tp)
{
- struct ethtool_eee eee;
+ struct ethtool_keee eee = {};
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
return true;
@@ -4626,13 +4615,13 @@ static bool tg3_phy_eee_config_ok(struct tg3 *tp)
tg3_eee_pull_config(tp, &eee);
if (tp->eee.eee_enabled) {
- if (tp->eee.advertised != eee.advertised ||
+ if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
return false;
} else {
/* EEE is disabled but we're advertising */
- if (eee.advertised)
+ if (!linkmode_empty(eee.advertised))
return false;
}
@@ -14180,7 +14169,7 @@ static int tg3_set_coalesce(struct net_device *dev,
return 0;
}
-static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct tg3 *tp = netdev_priv(dev);
@@ -14189,7 +14178,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- if (edata->advertised != tp->eee.advertised) {
+ if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
netdev_warn(tp->dev,
"Direct manipulation of EEE advertisement is not supported\n");
return -EINVAL;
@@ -14202,7 +14191,9 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EINVAL;
}
- tp->eee = *edata;
+ tp->eee.eee_enabled = edata->eee_enabled;
+ tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
+ tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
tg3_warn_mgmt_link_flap(tp);
@@ -14217,7 +14208,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct tg3 *tp = netdev_priv(dev);
@@ -15655,10 +15646,13 @@ static int tg3_phy_probe(struct tg3 *tp)
tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
- tp->eee.supported = SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full;
- tp->eee.advertised = ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full;
+ linkmode_zero(tp->eee.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ tp->eee.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ tp->eee.supported);
+ linkmode_copy(tp->eee.advertised, tp->eee.supported);
+
tp->eee.eee_enabled = 1;
tp->eee.tx_lpi_enabled = 1;
tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 5016475e5005..cf1b2b123c7e 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3419,7 +3419,7 @@ struct tg3 {
unsigned int irq_cnt;
struct ethtool_coalesce coal;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
/* firmware info */
const char *fw_needed;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
index dea9d2907666..b08356060fb4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
@@ -60,7 +60,7 @@ int cxgb4_thermal_init(struct adapter *adap)
snprintf(ch_tz_name, sizeof(ch_tz_name), "cxgb4_%s", adap->name);
ch_thermal->tzdev = thermal_zone_device_register_with_trips(ch_tz_name, &trip, num_trip,
- 0, adap,
+ adap,
&cxgb4_thermal_ops,
NULL, 0, 0);
if (IS_ERR(ch_thermal->tzdev)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b5ff2e1a9975..49d5808b7d11 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -804,20 +804,6 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
}
/**
- * calc_tx_descs - calculate the number of Tx descriptors for a packet
- * @skb: the packet
- * @chip_ver: chip version
- *
- * Returns the number of Tx descriptors needed for the given Ethernet
- * packet, including the needed WR and CPL headers.
- */
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
- unsigned int chip_ver)
-{
- return flits_to_desc(calc_tx_flits(skb, chip_ver));
-}
-
-/**
* cxgb4_write_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @q: the Tx queue we are writing into
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 37bd38d772e8..d266a87297a5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -872,7 +872,7 @@ error:
return NETDEV_TX_OK;
}
-/* dev_base_lock rwlock held, nominally process context */
+/* rcu_read_lock potentially held, nominally process context */
static void enic_get_stats(struct net_device *netdev,
struct rtnl_link_stats64 *net_stats)
{
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c
index 20fcb20b42ed..66b577835338 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c
@@ -49,7 +49,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
tlv->type = htons(type);
tlv->length = htons(length);
- memcpy(tlv->value, value, length);
+ unsafe_memcpy(tlv->value, value, length,
+ /* Flexible array of flexible arrays */);
vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
vp->length = htonl(ntohl(vp->length) +
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index c2c5c589a5e3..44af1d13d931 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -590,5 +590,6 @@ module_pci_driver(pci_driver);
module_param(polling_frequency, long, 0444);
MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+MODULE_DESCRIPTION("Beckhoff CX5020 EtherCAT Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 64eadd320798..4b15af6b7122 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -229,8 +229,10 @@ static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
* would delay a working loopback anyway, let's ensure that loopback
* is working immediately by setting link mode directly
*/
- if (!retval && enable)
+ if (!retval && enable) {
+ netif_carrier_on(adapter->netdev);
tsnep_set_link_mode(adapter);
+ }
return retval;
}
@@ -238,7 +240,7 @@ static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
static int tsnep_phy_open(struct tsnep_adapter *adapter)
{
struct phy_device *phydev;
- struct ethtool_eee ethtool_eee;
+ struct ethtool_keee ethtool_keee;
int retval;
retval = phy_connect_direct(adapter->netdev, adapter->phydev,
@@ -257,8 +259,8 @@ static int tsnep_phy_open(struct tsnep_adapter *adapter)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
/* disable EEE autoneg, EEE not supported by TSNEP */
- memset(&ethtool_eee, 0, sizeof(ethtool_eee));
- phy_ethtool_set_eee(adapter->phydev, &ethtool_eee);
+ memset(&ethtool_keee, 0, sizeof(ethtool_keee));
+ phy_ethtool_set_eee(adapter->phydev, &ethtool_keee);
adapter->phydev->irq = PHY_MAC_INTERRUPT;
phy_start(adapter->phydev);
@@ -1266,6 +1268,14 @@ static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse)
return desc_refilled;
}
+static void tsnep_xsk_rx_need_wakeup(struct tsnep_rx *rx, int desc_available)
+{
+ if (desc_available)
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+}
+
static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
struct xdp_buff *xdp, int *status,
struct netdev_queue *tx_nq, struct tsnep_tx *tx)
@@ -1627,10 +1637,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
desc_available -= tsnep_rx_refill_zc(rx, desc_available, false);
if (xsk_uses_need_wakeup(rx->xsk_pool)) {
- if (desc_available)
- xsk_set_rx_need_wakeup(rx->xsk_pool);
- else
- xsk_clear_rx_need_wakeup(rx->xsk_pool);
+ tsnep_xsk_rx_need_wakeup(rx, desc_available);
return done;
}
@@ -1775,14 +1782,8 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
* first polling would be too late as need wakeup signalisation would
* be delayed for an indefinite time
*/
- if (xsk_uses_need_wakeup(rx->xsk_pool)) {
- int desc_available = tsnep_rx_desc_available(rx);
-
- if (desc_available)
- xsk_set_rx_need_wakeup(rx->xsk_pool);
- else
- xsk_clear_rx_need_wakeup(rx->xsk_pool);
- }
+ if (xsk_uses_need_wakeup(rx->xsk_pool))
+ tsnep_xsk_rx_need_wakeup(rx, tsnep_rx_desc_available(rx));
}
static bool tsnep_pending(struct tsnep_queue *queue)
@@ -2570,8 +2571,7 @@ static int tsnep_probe(struct platform_device *pdev)
mutex_init(&adapter->rxnfc_lock);
INIT_LIST_HEAD(&adapter->rxnfc_rules);
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adapter->addr = devm_ioremap_resource(&pdev->dev, io);
+ adapter->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &io);
if (IS_ERR(adapter->addr))
return PTR_ERR(adapter->addr);
netdev->mem_start = io->start;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index bfdbdab443ae..9f07f4947b63 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2402,7 +2402,7 @@ static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
static int enetc_phylink_connect(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct ethtool_eee edata;
+ struct ethtool_keee edata;
int err;
if (!priv->phylink) {
@@ -2418,7 +2418,7 @@ static int enetc_phylink_connect(struct net_device *ndev)
}
/* disable EEE autoneg, until ENETC driver supports it */
- memset(&edata, 0, sizeof(struct ethtool_eee));
+ memset(&edata, 0, sizeof(struct ethtool_keee));
phylink_ethtool_set_eee(priv->phylink, &edata);
phylink_start(priv->phylink);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index a8fbcada6b01..a19cb2a786fd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -672,7 +672,7 @@ struct fec_enet_private {
unsigned int itr_clk_rate;
/* tx lpi eee mode */
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
unsigned int clk_ref_rate;
/* ptp clock period in ns*/
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 432523b2c789..d7693fdf640d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -85,8 +85,6 @@ static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
-/* Pause frame feild and FIFO threshold */
-#define FEC_ENET_FCE (1 << 5)
#define FEC_ENET_RSEM_V 0x84
#define FEC_ENET_RSFL_V 16
#define FEC_ENET_RAEM_V 0x8
@@ -240,8 +238,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define PKT_MINBUF_SIZE 64
/* FEC receive acceleration */
-#define FEC_RACC_IPDIS (1 << 1)
-#define FEC_RACC_PRODIS (1 << 2)
+#define FEC_RACC_IPDIS BIT(1)
+#define FEC_RACC_PRODIS BIT(2)
#define FEC_RACC_SHIFT16 BIT(7)
#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
@@ -273,8 +271,23 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
/* FEC ECR bits definition */
-#define FEC_ECR_MAGICEN (1 << 2)
-#define FEC_ECR_SLEEP (1 << 3)
+#define FEC_ECR_RESET BIT(0)
+#define FEC_ECR_ETHEREN BIT(1)
+#define FEC_ECR_MAGICEN BIT(2)
+#define FEC_ECR_SLEEP BIT(3)
+#define FEC_ECR_EN1588 BIT(4)
+#define FEC_ECR_BYTESWP BIT(8)
+/* FEC RCR bits definition */
+#define FEC_RCR_LOOP BIT(0)
+#define FEC_RCR_HALFDPX BIT(1)
+#define FEC_RCR_MII BIT(2)
+#define FEC_RCR_PROMISC BIT(3)
+#define FEC_RCR_BC_REJ BIT(4)
+#define FEC_RCR_FLOWCTL BIT(5)
+#define FEC_RCR_RMII BIT(8)
+#define FEC_RCR_10BASET BIT(9)
+/* TX WMARK bits */
+#define FEC_TXWMRK_STRFWD BIT(8)
#define FEC_MII_TIMEOUT 30000 /* us */
@@ -1062,7 +1075,7 @@ fec_restart(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
- u32 ecntl = 0x2; /* ETHEREN */
+ u32 ecntl = FEC_ECR_ETHEREN;
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
@@ -1137,18 +1150,18 @@ fec_restart(struct net_device *ndev)
fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
rcntl |= (1 << 6);
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
- rcntl |= (1 << 8);
+ rcntl |= FEC_RCR_RMII;
else
- rcntl &= ~(1 << 8);
+ rcntl &= ~FEC_RCR_RMII;
/* 1G, 100M or 10M */
if (ndev->phydev) {
if (ndev->phydev->speed == SPEED_1000)
ecntl |= (1 << 5);
else if (ndev->phydev->speed == SPEED_100)
- rcntl &= ~(1 << 9);
+ rcntl &= ~FEC_RCR_10BASET;
else
- rcntl |= (1 << 9);
+ rcntl |= FEC_RCR_10BASET;
}
} else {
#ifdef FEC_MIIGSK_ENR
@@ -1181,7 +1194,7 @@ fec_restart(struct net_device *ndev)
if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
ndev->phydev && ndev->phydev->pause)) {
- rcntl |= FEC_ENET_FCE;
+ rcntl |= FEC_RCR_FLOWCTL;
/* set FIFO threshold parameter to reduce overrun */
writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
@@ -1192,7 +1205,7 @@ fec_restart(struct net_device *ndev)
/* OPD */
writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
} else {
- rcntl &= ~FEC_ENET_FCE;
+ rcntl &= ~FEC_RCR_FLOWCTL;
}
#endif /* !defined(CONFIG_M5272) */
@@ -1207,13 +1220,13 @@ fec_restart(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
- ecntl |= (1 << 8);
+ ecntl |= FEC_ECR_BYTESWP;
/* enable ENET store and forward mode */
- writel(1 << 8, fep->hwp + FEC_X_WMRK);
+ writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
}
if (fep->bufdesc_ex)
- ecntl |= (1 << 4);
+ ecntl |= FEC_ECR_EN1588;
if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
fep->rgmii_txc_dly)
@@ -1312,7 +1325,7 @@ static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
+ u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
u32 val;
/* We cannot expect a graceful transmit stop without link !!! */
@@ -1331,7 +1344,7 @@ fec_stop(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
- writel(1, fep->hwp + FEC_ECNTRL);
+ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
udelay(10);
}
} else {
@@ -1345,12 +1358,11 @@ fec_stop(struct net_device *ndev)
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- writel(2, fep->hwp + FEC_ECNTRL);
+ writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
}
-
static void
fec_timeout(struct net_device *ndev, unsigned int txqueue)
{
@@ -2005,6 +2017,37 @@ static int fec_get_mac(struct net_device *ndev)
/*
* Phy section
*/
+
+/* LPI Sleep Ts count base on tx clk (clk_ref).
+ * The lpi sleep cnt value = X us / (cycle_ns).
+ */
+static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return us * (fep->clk_ref_rate / 1000) / 1000;
+}
+
+static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct ethtool_keee *p = &fep->eee;
+ unsigned int sleep_cycle, wake_cycle;
+
+ if (enable) {
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
+ wake_cycle = sleep_cycle;
+ } else {
+ sleep_cycle = 0;
+ wake_cycle = 0;
+ }
+
+ writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
+ writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
+
+ return 0;
+}
+
static void fec_enet_adjust_link(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -2044,6 +2087,8 @@ static void fec_enet_adjust_link(struct net_device *ndev)
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
+ if (fep->quirks & FEC_QUIRK_HAS_EEE)
+ fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi);
} else {
if (fep->link) {
netif_stop_queue(ndev);
@@ -2403,6 +2448,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
else
phy_set_max_speed(phy_dev, 100);
+ if (fep->quirks & FEC_QUIRK_HAS_EEE)
+ phy_support_eee(phy_dev);
+
fep->link = 0;
fep->full_duplex = 0;
@@ -3109,50 +3157,11 @@ static int fec_enet_set_coalesce(struct net_device *ndev,
return 0;
}
-/* LPI Sleep Ts count base on tx clk (clk_ref).
- * The lpi sleep cnt value = X us / (cycle_ns).
- */
-static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
-
- return us * (fep->clk_ref_rate / 1000) / 1000;
-}
-
-static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
- unsigned int sleep_cycle, wake_cycle;
- int ret = 0;
-
- if (enable) {
- ret = phy_init_eee(ndev->phydev, false);
- if (ret)
- return ret;
-
- sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
- wake_cycle = sleep_cycle;
- } else {
- sleep_cycle = 0;
- wake_cycle = 0;
- }
-
- p->tx_lpi_enabled = enable;
- p->eee_enabled = enable;
- p->eee_active = enable;
-
- writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
- writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
-
- return 0;
-}
-
static int
-fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
+ struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3160,20 +3169,16 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- edata->eee_enabled = p->eee_enabled;
- edata->eee_active = p->eee_active;
edata->tx_lpi_timer = p->tx_lpi_timer;
- edata->tx_lpi_enabled = p->tx_lpi_enabled;
return phy_ethtool_get_eee(ndev->phydev, edata);
}
static int
-fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
- int ret = 0;
+ struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3183,15 +3188,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
p->tx_lpi_timer = edata->tx_lpi_timer;
- if (!edata->eee_enabled || !edata->tx_lpi_enabled ||
- !edata->tx_lpi_timer)
- ret = fec_enet_eee_mode_set(ndev, false);
- else
- ret = fec_enet_eee_mode_set(ndev, true);
-
- if (ret)
- return ret;
-
return phy_ethtool_set_eee(ndev->phydev, edata);
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 9ba15d3183d7..758535adc9ff 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1073,6 +1073,14 @@ int memac_initialization(struct mac_device *mac_dev,
unsigned long capabilities;
unsigned long *supported;
+ /* The internal connection to the serdes is XGMII, but this isn't
+ * really correct for the phy mode (which is the external connection).
+ * However, this is how all older device trees say that they want
+ * 10GBASE-R (aka XFI), so just convert it for them.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
+
mac_dev->phylink_ops = &memac_mac_ops;
mac_dev->set_promisc = memac_set_promiscuous;
mac_dev->change_addr = memac_modify_mac_address;
@@ -1139,7 +1147,7 @@ int memac_initialization(struct mac_device *mac_dev,
* (and therefore that xfi_pcs cannot be set). If we are defaulting to
* XGMII, assume this is for XFI. Otherwise, assume it is for SGMII.
*/
- if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_10GBASER)
memac->xfi_pcs = pcs;
else
memac->sgmii_pcs = pcs;
@@ -1153,14 +1161,6 @@ int memac_initialization(struct mac_device *mac_dev,
goto _return_fm_mac_free;
}
- /* The internal connection to the serdes is XGMII, but this isn't
- * really correct for the phy mode (which is the external connection).
- * However, this is how all older device trees say that they want
- * 10GBASE-R (aka XFI), so just convert it for them.
- */
- if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
- mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
-
/* TODO: The following interface modes are supported by (some) hardware
* but not by this driver:
* - 1000BASE-KX
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e3dfbd7a4236..a811238c018d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1649,7 +1649,7 @@ static int init_phy(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
phy_interface_t interface = priv->interface;
struct phy_device *phydev;
- struct ethtool_eee edata;
+ struct ethtool_keee edata;
linkmode_set_bit_array(phy_10_100_features_array,
ARRAY_SIZE(phy_10_100_features_array),
@@ -1681,7 +1681,7 @@ static int init_phy(struct net_device *dev)
phy_support_asym_pause(phydev);
/* disable EEE autoneg, EEE not supported by eTSEC */
- memset(&edata, 0, sizeof(struct ethtool_eee));
+ memset(&edata, 0, sizeof(struct ethtool_keee));
phy_ethtool_set_eee(phydev, &edata);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index b80349154604..4814c96d5fe7 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -9,6 +9,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/u64_stats_sync.h>
@@ -51,12 +52,16 @@
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
+#define GVE_MAX_RX_BUFFER_SIZE 4096
+
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
+#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
+
#define DQO_QPL_DEFAULT_TX_PAGES 512
#define DQO_QPL_DEFAULT_RX_PAGES 2048
@@ -150,6 +155,11 @@ struct gve_rx_compl_queue_dqo {
u32 mask; /* Mask for indices to the size of the ring */
};
+struct gve_header_buf {
+ u8 *data;
+ dma_addr_t addr;
+};
+
/* Stores state for tracking buffers posted to HW */
struct gve_rx_buf_state_dqo {
/* The page posted to HW. */
@@ -252,19 +262,26 @@ struct gve_rx_ring {
/* track number of used buffers */
u16 used_buf_states_cnt;
+
+ /* Address info of the buffers for header-split */
+ struct gve_header_buf hdr_bufs;
} dqo;
};
u64 rbytes; /* free-running bytes received */
+ u64 rx_hsplit_bytes; /* free-running header bytes received */
u64 rpackets; /* free-running packets received */
u32 cnt; /* free-running total number of completed packets */
u32 fill_cnt; /* free-running total number of descs and buffs posted */
u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
+ u64 rx_hsplit_pkt; /* free-running packets with headers split */
u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
u64 rx_copied_pkt; /* free-running total number of copied packets */
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
+ /* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
+ u64 rx_hsplit_unsplit_pkt;
u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
@@ -622,6 +639,56 @@ struct gve_ptype_lut {
struct gve_ptype ptypes[GVE_NUM_PTYPES];
};
+/* Parameters for allocating queue page lists */
+struct gve_qpls_alloc_cfg {
+ struct gve_qpl_config *qpl_cfg;
+ struct gve_queue_config *tx_cfg;
+ struct gve_queue_config *rx_cfg;
+
+ u16 num_xdp_queues;
+ bool raw_addressing;
+ bool is_gqi;
+
+ /* Allocated resources are returned here */
+ struct gve_queue_page_list *qpls;
+};
+
+/* Parameters for allocating resources for tx queues */
+struct gve_tx_alloc_rings_cfg {
+ struct gve_queue_config *qcfg;
+
+ /* qpls and qpl_cfg must already be allocated */
+ struct gve_queue_page_list *qpls;
+ struct gve_qpl_config *qpl_cfg;
+
+ u16 ring_size;
+ u16 start_idx;
+ u16 num_rings;
+ bool raw_addressing;
+
+ /* Allocated resources are returned here */
+ struct gve_tx_ring *tx;
+};
+
+/* Parameters for allocating resources for rx queues */
+struct gve_rx_alloc_rings_cfg {
+ /* tx config is also needed to determine QPL ids */
+ struct gve_queue_config *qcfg;
+ struct gve_queue_config *qcfg_tx;
+
+ /* qpls and qpl_cfg must already be allocated */
+ struct gve_queue_page_list *qpls;
+ struct gve_qpl_config *qpl_cfg;
+
+ u16 ring_size;
+ u16 packet_buffer_size;
+ bool raw_addressing;
+ bool enable_header_split;
+
+ /* Allocated resources are returned here */
+ struct gve_rx_ring *rx;
+};
+
/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
* when the entire configure_device_resources command is zeroed out and the
* queue_format is not specified.
@@ -729,13 +796,17 @@ struct gve_priv {
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
- int data_buffer_size_dqo;
+ u16 data_buffer_size_dqo;
+ u16 max_rx_buffer_size; /* device limit */
enum gve_queue_format queue_format;
/* Interrupt coalescing settings */
u32 tx_coalesce_usecs;
u32 rx_coalesce_usecs;
+
+ u16 header_buf_size; /* device configured, header-split supported if non-zero */
+ bool header_split_enabled; /* True if the header split is enabled by the user */
};
enum gve_service_task_flags_bit {
@@ -917,14 +988,14 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
priv->queue_format == GVE_DQO_QPL_FORMAT;
}
-/* Returns the number of tx queue page lists
- */
-static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
+/* Returns the number of tx queue page lists */
+static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
+ int num_xdp_queues,
+ bool is_qpl)
{
- if (!gve_is_qpl(priv))
+ if (!is_qpl)
return 0;
-
- return priv->tx_cfg.num_queues + priv->num_xdp_queues;
+ return tx_cfg->num_queues + num_xdp_queues;
}
/* Returns the number of XDP tx queue page lists
@@ -937,14 +1008,13 @@ static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
return priv->num_xdp_queues;
}
-/* Returns the number of rx queue page lists
- */
-static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
+/* Returns the number of rx queue page lists */
+static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
+ bool is_qpl)
{
- if (!gve_is_qpl(priv))
+ if (!is_qpl)
return 0;
-
- return priv->rx_cfg.num_queues;
+ return rx_cfg->num_queues;
}
static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
@@ -957,59 +1027,59 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
+/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
+static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
+{
+ return tx_cfg->max_queues + rx_qid;
+}
+
static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
{
return gve_tx_qpl_id(priv, 0);
}
-static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
+/* Returns the index into priv->qpls where the first rx queue's QPL resides */
+static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
{
- return gve_rx_qpl_id(priv, 0);
+ return gve_get_rx_qpl_id(tx_cfg, 0);
}
-/* Returns a pointer to the next available tx qpl in the list of qpls
- */
+/* Returns a pointer to the next available tx qpl in the list of qpls */
static inline
-struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
+struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
+ int tx_qid)
{
- int id = gve_tx_qpl_id(priv, tx_qid);
-
/* QPL already in use */
- if (test_bit(id, priv->qpl_cfg.qpl_id_map))
+ if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map))
return NULL;
-
- set_bit(id, priv->qpl_cfg.qpl_id_map);
- return &priv->qpls[id];
+ set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map);
+ return &cfg->qpls[tx_qid];
}
-/* Returns a pointer to the next available rx qpl in the list of qpls
- */
+/* Returns a pointer to the next available rx qpl in the list of qpls */
static inline
-struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
+struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg,
+ int rx_qid)
{
- int id = gve_rx_qpl_id(priv, rx_qid);
-
+ int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid);
/* QPL already in use */
- if (test_bit(id, priv->qpl_cfg.qpl_id_map))
+ if (test_bit(id, cfg->qpl_cfg->qpl_id_map))
return NULL;
-
- set_bit(id, priv->qpl_cfg.qpl_id_map);
- return &priv->qpls[id];
+ set_bit(id, cfg->qpl_cfg->qpl_id_map);
+ return &cfg->qpls[id];
}
-/* Unassigns the qpl with the given id
- */
-static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
+/* Unassigns the qpl with the given id */
+static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id)
{
- clear_bit(id, priv->qpl_cfg.qpl_id_map);
+ clear_bit(id, qpl_cfg->qpl_id_map);
}
-/* Returns the correct dma direction for tx and rx qpls
- */
+/* Returns the correct dma direction for tx and rx qpls */
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
int id)
{
- if (id < gve_rx_start_qpl_id(priv))
+ if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
return DMA_TO_DEVICE;
else
return DMA_FROM_DEVICE;
@@ -1036,6 +1106,9 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
return gve_xdp_tx_queue_id(priv, 0);
}
+/* gqi napi handler defined in gve_main.c */
+int gve_napi_poll(struct napi_struct *napi, int budget);
+
/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
@@ -1051,8 +1124,12 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
-int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
-void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
+int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg);
+void gve_tx_free_rings_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg);
+void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
+void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
u32 gve_tx_load_event_counter(struct gve_priv *priv,
struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
@@ -1061,7 +1138,15 @@ void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
int gve_rx_poll(struct gve_notify_block *block, int budget);
bool gve_rx_work_pending(struct gve_rx_ring *rx);
int gve_rx_alloc_rings(struct gve_priv *priv);
-void gve_rx_free_rings_gqi(struct gve_priv *priv);
+int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg);
+void gve_rx_free_rings_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg);
+void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
+void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
+u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
+bool gve_header_split_supported(const struct gve_priv *priv);
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 12fbd723ecc6..ae12ac38e18b 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -40,7 +40,8 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
- struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
+ struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
@@ -147,6 +148,23 @@ void gve_parse_device_option(struct gve_priv *priv,
}
*dev_op_jumbo_frames = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_BUFFER_SIZES:
+ if (option_length < sizeof(**dev_op_buffer_sizes) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Buffer Sizes",
+ (int)sizeof(**dev_op_buffer_sizes),
+ GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_buffer_sizes))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Buffer Sizes");
+ *dev_op_buffer_sizes = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -164,7 +182,8 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
- struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
+ struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt;
@@ -185,7 +204,7 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
- dev_op_dqo_qpl);
+ dev_op_dqo_qpl, dev_op_buffer_sizes);
dev_opt = next_opt;
}
@@ -640,6 +659,9 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be16(rx_buff_ring_entries);
cmd.create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO);
+ if (priv->header_split_enabled)
+ cmd.create_rx_queue.header_buffer_size =
+ cpu_to_be16(priv->header_buf_size);
}
return gve_adminq_issue_cmd(priv, &cmd);
@@ -755,7 +777,9 @@ static void gve_enable_supported_features(struct gve_priv *priv,
const struct gve_device_option_jumbo_frames
*dev_op_jumbo_frames,
const struct gve_device_option_dqo_qpl
- *dev_op_dqo_qpl)
+ *dev_op_dqo_qpl,
+ const struct gve_device_option_buffer_sizes
+ *dev_op_buffer_sizes)
{
/* Before control reaches this point, the page-size-capped max MTU from
* the gve_device_descriptor field has already been stored in
@@ -779,10 +803,22 @@ static void gve_enable_supported_features(struct gve_priv *priv,
if (priv->rx_pages_per_qpl == 0)
priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
}
+
+ if (dev_op_buffer_sizes &&
+ (supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) {
+ priv->max_rx_buffer_size =
+ be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size);
+ priv->header_buf_size =
+ be16_to_cpu(dev_op_buffer_sizes->header_buffer_size);
+ dev_info(&priv->pdev->dev,
+ "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
+ priv->max_rx_buffer_size, priv->header_buf_size);
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
@@ -816,7 +852,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
&dev_op_gqi_qpl, &dev_op_dqo_rda,
&dev_op_jumbo_frames,
- &dev_op_dqo_qpl);
+ &dev_op_dqo_qpl,
+ &dev_op_buffer_sizes);
if (err)
goto free_device_descriptor;
@@ -885,7 +922,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask,
- dev_op_jumbo_frames, dev_op_dqo_qpl);
+ dev_op_jumbo_frames, dev_op_dqo_qpl,
+ dev_op_buffer_sizes);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 5865ccdccbd0..5ac972e45ff8 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -125,6 +125,15 @@ struct gve_device_option_jumbo_frames {
static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
+struct gve_device_option_buffer_sizes {
+ /* GVE_SUP_BUFFER_SIZES_MASK bit should be set */
+ __be32 supported_features_mask;
+ __be16 packet_buffer_size;
+ __be16 header_buffer_size;
+};
+
+static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -140,6 +149,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_DQO_RDA = 0x4,
GVE_DEV_OPT_ID_DQO_QPL = 0x7,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
+ GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
};
enum gve_dev_opt_req_feat_mask {
@@ -149,10 +159,12 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
};
enum gve_sup_feature_mask {
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
+ GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -165,6 +177,7 @@ enum gve_driver_capbility {
gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
gve_driver_capability_dqo_rda = 3,
gve_driver_capability_alt_miss_compl = 4,
+ gve_driver_capability_flexible_buffer_size = 5,
};
#define GVE_CAP1(a) BIT((int)a)
@@ -176,7 +189,8 @@ enum gve_driver_capbility {
(GVE_CAP1(gve_driver_capability_gqi_qpl) | \
GVE_CAP1(gve_driver_capability_gqi_rda) | \
GVE_CAP1(gve_driver_capability_dqo_rda) | \
- GVE_CAP1(gve_driver_capability_alt_miss_compl))
+ GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
+ GVE_CAP1(gve_driver_capability_flexible_buffer_size))
#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
@@ -260,7 +274,9 @@ struct gve_adminq_create_rx_queue {
__be16 packet_buffer_size;
__be16 rx_buff_ring_size;
u8 enable_rsc;
- u8 padding[5];
+ u8 padding1;
+ __be16 header_buffer_size;
+ u8 padding2[2];
};
static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index c36b93f0de15..b81584829c40 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -38,10 +38,18 @@ netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
netdev_features_t features);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
-int gve_tx_alloc_rings_dqo(struct gve_priv *priv);
-void gve_tx_free_rings_dqo(struct gve_priv *priv);
-int gve_rx_alloc_rings_dqo(struct gve_priv *priv);
-void gve_rx_free_rings_dqo(struct gve_priv *priv);
+int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg);
+void gve_tx_free_rings_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg);
+void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx);
+void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx);
+int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg);
+void gve_rx_free_rings_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg);
+void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx);
+void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx);
int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct napi_struct *napi);
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
@@ -93,4 +101,6 @@ gve_set_itr_coalesce_usecs_dqo(struct gve_priv *priv,
gve_write_irq_doorbell_dqo(priv, block,
gve_setup_itr_interval_dqo(usecs));
}
+
+int gve_napi_poll_dqo(struct napi_struct *napi, int budget);
#endif /* _GVE_DQO_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index e5397aa1e48f..9aebfb843d9d 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -4,7 +4,6 @@
* Copyright (C) 2015-2021 Google, Inc.
*/
-#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include "gve.h"
#include "gve_adminq.h"
@@ -40,17 +39,18 @@ static u32 gve_get_msglevel(struct net_device *netdev)
* as declared in enum xdp_action inside file uapi/linux/bpf.h .
*/
static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
- "rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
- "rx_dropped", "tx_dropped", "tx_timeouts",
+ "rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes",
+ "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts",
"rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
+ "rx_hsplit_unsplit_pkt",
"interface_up_cnt", "interface_down_cnt", "reset_cnt",
"page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
};
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
- "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]",
- "rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
- "rx_frag_alloc_cnt[%u]",
+ "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]",
+ "rx_bytes[%u]", "rx_hsplit_bytes[%u]", "rx_cont_packet_cnt[%u]",
+ "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
@@ -154,11 +154,13 @@ static void
gve_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail,
- tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt,
+ u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes,
+ tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
+ tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt,
tmp_tx_pkts, tmp_tx_bytes;
- u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
- rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped;
+ u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt,
+ rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes,
+ tx_dropped;
int stats_idx, base_stats_idx, max_stats_idx;
struct stats *report_stats;
int *rx_qid_to_stats_idx;
@@ -185,8 +187,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
kfree(rx_qid_to_stats_idx);
return;
}
- for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
- rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
+ for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
+ rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
+ rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
+ ring = 0;
ring < priv->rx_cfg.num_queues; ring++) {
if (priv->rx) {
do {
@@ -195,18 +199,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets;
+ tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt;
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
+ tmp_rx_hsplit_unsplit_pkt =
+ rx->rx_hsplit_unsplit_pkt;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
rx_pkts += tmp_rx_pkts;
+ rx_hsplit_pkt += tmp_rx_hsplit_pkt;
rx_bytes += tmp_rx_bytes;
rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
+ rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt;
}
}
for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
@@ -227,6 +236,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
i = 0;
data[i++] = rx_pkts;
+ data[i++] = rx_hsplit_pkt;
data[i++] = tx_pkts;
data[i++] = rx_bytes;
data[i++] = tx_bytes;
@@ -238,6 +248,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx_skb_alloc_fail;
data[i++] = rx_buf_alloc_fail;
data[i++] = rx_desc_err_dropped_pkt;
+ data[i++] = rx_hsplit_unsplit_pkt;
data[i++] = priv->interface_up_cnt;
data[i++] = priv->interface_down_cnt;
data[i++] = priv->reset_cnt;
@@ -277,6 +288,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes;
+ tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
@@ -284,6 +296,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
+ data[i++] = tmp_rx_hsplit_bytes;
data[i++] = rx->rx_cont_packet_cnt;
data[i++] = rx->rx_frag_flip_cnt;
data[i++] = rx->rx_frag_copy_cnt;
@@ -480,6 +493,29 @@ static void gve_get_ringparam(struct net_device *netdev,
cmd->tx_max_pending = priv->tx_desc_cnt;
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
+
+ if (!gve_header_split_supported(priv))
+ kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
+ else if (priv->header_split_enabled)
+ kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+ else
+ kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
+}
+
+static int gve_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *cmd,
+ struct kernel_ethtool_ringparam *kernel_cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (priv->tx_desc_cnt != cmd->tx_pending ||
+ priv->rx_desc_cnt != cmd->rx_pending) {
+ dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n");
+ return -EOPNOTSUPP;
+ }
+
+ return gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
}
static int gve_user_reset(struct net_device *netdev, u32 *flags)
@@ -655,6 +691,7 @@ static int gve_set_coalesce(struct net_device *netdev,
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
.get_sset_count = gve_get_sset_count,
@@ -667,6 +704,7 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_coalesce = gve_get_coalesce,
.set_coalesce = gve_set_coalesce,
.get_ringparam = gve_get_ringparam,
+ .set_ringparam = gve_set_ringparam,
.reset = gve_user_reset,
.get_tunable = gve_get_tunable,
.set_tunable = gve_set_tunable,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 619bf63ec935..166bd827a6d7 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -22,6 +22,7 @@
#include "gve_dqo.h"
#include "gve_adminq.h"
#include "gve_register.h"
+#include "gve_utils.h"
#define GVE_DEFAULT_RX_COPYBREAK (256)
@@ -252,7 +253,7 @@ static irqreturn_t gve_intr_dqo(int irq, void *arg)
return IRQ_HANDLED;
}
-static int gve_napi_poll(struct napi_struct *napi, int budget)
+int gve_napi_poll(struct napi_struct *napi, int budget)
{
struct gve_notify_block *block;
__be32 __iomem *irq_doorbell;
@@ -302,7 +303,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
+int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
{
struct gve_notify_block *block =
container_of(napi, struct gve_notify_block, napi);
@@ -581,19 +582,59 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
gve_clear_device_resources_ok(priv);
}
-static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
- int (*gve_poll)(struct napi_struct *, int))
+static int gve_unregister_qpl(struct gve_priv *priv, u32 i)
{
- struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ int err;
+
+ err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Failed to unregister queue page list %d\n",
+ priv->qpls[i].id);
+ return err;
+ }
- netif_napi_add(priv->dev, &block->napi, gve_poll);
+ priv->num_registered_pages -= priv->qpls[i].num_entries;
+ return 0;
}
-static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
+static int gve_register_qpl(struct gve_priv *priv, u32 i)
{
- struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ int num_rx_qpls;
+ int pages;
+ int err;
+
+ /* Rx QPLs succeed Tx QPLs in the priv->qpls array. */
+ num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
+ if (i >= gve_rx_start_qpl_id(&priv->tx_cfg) + num_rx_qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot register nonexisting QPL at index %d\n", i);
+ return -EINVAL;
+ }
+
+ pages = priv->qpls[i].num_entries;
+
+ if (pages + priv->num_registered_pages > priv->max_registered_pages) {
+ netif_err(priv, drv, priv->dev,
+ "Reached max number of registered pages %llu > %llu\n",
+ pages + priv->num_registered_pages,
+ priv->max_registered_pages);
+ return -EINVAL;
+ }
+
+ err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to register queue page list %d\n",
+ priv->qpls[i].id);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
- netif_napi_del(&block->napi);
+ priv->num_registered_pages += pages;
+ return 0;
}
static int gve_register_xdp_qpls(struct gve_priv *priv)
@@ -602,55 +643,41 @@ static int gve_register_xdp_qpls(struct gve_priv *priv)
int err;
int i;
- start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to register queue page list %d\n",
- priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean
- * up
- */
+ err = gve_register_qpl(priv, i);
+ /* This failure will trigger a reset - no need to clean up */
+ if (err)
return err;
- }
}
return 0;
}
static int gve_register_qpls(struct gve_priv *priv)
{
+ int num_tx_qpls, num_rx_qpls;
int start_id;
int err;
int i;
- start_id = gve_tx_start_qpl_id(priv);
- for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to register queue page list %d\n",
- priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean
- * up
- */
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
+ gve_is_qpl(priv));
+ num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
+
+ for (i = 0; i < num_tx_qpls; i++) {
+ err = gve_register_qpl(priv, i);
+ if (err)
return err;
- }
}
- start_id = gve_rx_start_qpl_id(priv);
- for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to register queue page list %d\n",
- priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean
- * up
- */
+ /* there might be a gap between the tx and rx qpl ids */
+ start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
+ for (i = 0; i < num_rx_qpls; i++) {
+ err = gve_register_qpl(priv, start_id + i);
+ if (err)
return err;
- }
}
+
return 0;
}
@@ -660,48 +687,40 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv)
int err;
int i;
- start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean up */
- if (err) {
- netif_err(priv, drv, priv->dev,
- "Failed to unregister queue page list %d\n",
- priv->qpls[i].id);
+ err = gve_unregister_qpl(priv, i);
+ /* This failure will trigger a reset - no need to clean */
+ if (err)
return err;
- }
}
return 0;
}
static int gve_unregister_qpls(struct gve_priv *priv)
{
+ int num_tx_qpls, num_rx_qpls;
int start_id;
int err;
int i;
- start_id = gve_tx_start_qpl_id(priv);
- for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean up */
- if (err) {
- netif_err(priv, drv, priv->dev,
- "Failed to unregister queue page list %d\n",
- priv->qpls[i].id);
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
+ gve_is_qpl(priv));
+ num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
+
+ for (i = 0; i < num_tx_qpls; i++) {
+ err = gve_unregister_qpl(priv, i);
+ /* This failure will trigger a reset - no need to clean */
+ if (err)
return err;
- }
}
- start_id = gve_rx_start_qpl_id(priv);
- for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean up */
- if (err) {
- netif_err(priv, drv, priv->dev,
- "Failed to unregister queue page list %d\n",
- priv->qpls[i].id);
+ start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
+ for (i = 0; i < num_rx_qpls; i++) {
+ err = gve_unregister_qpl(priv, start_id + i);
+ /* This failure will trigger a reset - no need to clean */
+ if (err)
return err;
- }
}
return 0;
}
@@ -776,120 +795,124 @@ static int gve_create_rings(struct gve_priv *priv)
return 0;
}
-static void add_napi_init_xdp_sync_stats(struct gve_priv *priv,
- int (*napi_poll)(struct napi_struct *napi,
- int budget))
+static void init_xdp_sync_stats(struct gve_priv *priv)
{
int start_id = gve_xdp_tx_start_queue_id(priv);
int i;
- /* Add xdp tx napi & init sync stats*/
+ /* Init stats */
for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss);
priv->tx[i].ntfy_id = ntfy_idx;
- gve_add_napi(priv, ntfy_idx, napi_poll);
}
}
-static void add_napi_init_sync_stats(struct gve_priv *priv,
- int (*napi_poll)(struct napi_struct *napi,
- int budget))
+static void gve_init_sync_stats(struct gve_priv *priv)
{
int i;
- /* Add tx napi & init sync stats*/
- for (i = 0; i < gve_num_tx_queues(priv); i++) {
- int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
-
+ for (i = 0; i < priv->tx_cfg.num_queues; i++)
u64_stats_init(&priv->tx[i].statss);
- priv->tx[i].ntfy_id = ntfy_idx;
- gve_add_napi(priv, ntfy_idx, napi_poll);
- }
- /* Add rx napi & init sync stats*/
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
+ /* Init stats for XDP TX queues */
+ init_xdp_sync_stats(priv);
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
u64_stats_init(&priv->rx[i].statss);
- priv->rx[i].ntfy_id = ntfy_idx;
- gve_add_napi(priv, ntfy_idx, napi_poll);
+}
+
+static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
+{
+ cfg->qcfg = &priv->tx_cfg;
+ cfg->raw_addressing = !gve_is_qpl(priv);
+ cfg->qpls = priv->qpls;
+ cfg->qpl_cfg = &priv->qpl_cfg;
+ cfg->ring_size = priv->tx_desc_cnt;
+ cfg->start_idx = 0;
+ cfg->num_rings = gve_num_tx_queues(priv);
+ cfg->tx = priv->tx;
+}
+
+static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
+{
+ int i;
+
+ if (!priv->tx)
+ return;
+
+ for (i = start_id; i < start_id + num_rings; i++) {
+ if (gve_is_gqi(priv))
+ gve_tx_stop_ring_gqi(priv, i);
+ else
+ gve_tx_stop_ring_dqo(priv, i);
}
}
-static void gve_tx_free_rings(struct gve_priv *priv, int start_id, int num_rings)
+static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
+ int num_rings)
{
- if (gve_is_gqi(priv)) {
- gve_tx_free_rings_gqi(priv, start_id, num_rings);
- } else {
- gve_tx_free_rings_dqo(priv);
+ int i;
+
+ for (i = start_id; i < start_id + num_rings; i++) {
+ if (gve_is_gqi(priv))
+ gve_tx_start_ring_gqi(priv, i);
+ else
+ gve_tx_start_ring_dqo(priv, i);
}
}
static int gve_alloc_xdp_rings(struct gve_priv *priv)
{
- int start_id;
+ struct gve_tx_alloc_rings_cfg cfg = {0};
int err = 0;
if (!priv->num_xdp_queues)
return 0;
- start_id = gve_xdp_tx_start_queue_id(priv);
- err = gve_tx_alloc_rings(priv, start_id, priv->num_xdp_queues);
+ gve_tx_get_curr_alloc_cfg(priv, &cfg);
+ cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
+ cfg.num_rings = priv->num_xdp_queues;
+
+ err = gve_tx_alloc_rings_gqi(priv, &cfg);
if (err)
return err;
- add_napi_init_xdp_sync_stats(priv, gve_napi_poll);
+
+ gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
+ init_xdp_sync_stats(priv);
return 0;
}
-static int gve_alloc_rings(struct gve_priv *priv)
+static int gve_alloc_rings(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
- /* Setup tx rings */
- priv->tx = kvcalloc(priv->tx_cfg.max_queues, sizeof(*priv->tx),
- GFP_KERNEL);
- if (!priv->tx)
- return -ENOMEM;
-
if (gve_is_gqi(priv))
- err = gve_tx_alloc_rings(priv, 0, gve_num_tx_queues(priv));
+ err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg);
else
- err = gve_tx_alloc_rings_dqo(priv);
+ err = gve_tx_alloc_rings_dqo(priv, tx_alloc_cfg);
if (err)
- goto free_tx;
-
- /* Setup rx rings */
- priv->rx = kvcalloc(priv->rx_cfg.max_queues, sizeof(*priv->rx),
- GFP_KERNEL);
- if (!priv->rx) {
- err = -ENOMEM;
- goto free_tx_queue;
- }
+ return err;
if (gve_is_gqi(priv))
- err = gve_rx_alloc_rings(priv);
+ err = gve_rx_alloc_rings_gqi(priv, rx_alloc_cfg);
else
- err = gve_rx_alloc_rings_dqo(priv);
+ err = gve_rx_alloc_rings_dqo(priv, rx_alloc_cfg);
if (err)
- goto free_rx;
-
- if (gve_is_gqi(priv))
- add_napi_init_sync_stats(priv, gve_napi_poll);
- else
- add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
+ goto free_tx;
return 0;
-free_rx:
- kvfree(priv->rx);
- priv->rx = NULL;
-free_tx_queue:
- gve_tx_free_rings(priv, 0, gve_num_tx_queues(priv));
free_tx:
- kvfree(priv->tx);
- priv->tx = NULL;
+ if (gve_is_gqi(priv))
+ gve_tx_free_rings_gqi(priv, tx_alloc_cfg);
+ else
+ gve_tx_free_rings_dqo(priv, tx_alloc_cfg);
return err;
}
@@ -937,52 +960,30 @@ static int gve_destroy_rings(struct gve_priv *priv)
return 0;
}
-static void gve_rx_free_rings(struct gve_priv *priv)
-{
- if (gve_is_gqi(priv))
- gve_rx_free_rings_gqi(priv);
- else
- gve_rx_free_rings_dqo(priv);
-}
-
static void gve_free_xdp_rings(struct gve_priv *priv)
{
- int ntfy_idx, start_id;
- int i;
+ struct gve_tx_alloc_rings_cfg cfg = {0};
+
+ gve_tx_get_curr_alloc_cfg(priv, &cfg);
+ cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
+ cfg.num_rings = priv->num_xdp_queues;
- start_id = gve_xdp_tx_start_queue_id(priv);
if (priv->tx) {
- for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
- ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
- gve_remove_napi(priv, ntfy_idx);
- }
- gve_tx_free_rings(priv, start_id, priv->num_xdp_queues);
+ gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
+ gve_tx_free_rings_gqi(priv, &cfg);
}
}
-static void gve_free_rings(struct gve_priv *priv)
+static void gve_free_rings(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_cfg)
{
- int num_tx_queues = gve_num_tx_queues(priv);
- int ntfy_idx;
- int i;
-
- if (priv->tx) {
- for (i = 0; i < num_tx_queues; i++) {
- ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
- gve_remove_napi(priv, ntfy_idx);
- }
- gve_tx_free_rings(priv, 0, num_tx_queues);
- kvfree(priv->tx);
- priv->tx = NULL;
- }
- if (priv->rx) {
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
- gve_remove_napi(priv, ntfy_idx);
- }
- gve_rx_free_rings(priv);
- kvfree(priv->rx);
- priv->rx = NULL;
+ if (gve_is_gqi(priv)) {
+ gve_tx_free_rings_gqi(priv, tx_cfg);
+ gve_rx_free_rings_gqi(priv, rx_cfg);
+ } else {
+ gve_tx_free_rings_dqo(priv, tx_cfg);
+ gve_rx_free_rings_dqo(priv, rx_cfg);
}
}
@@ -1004,21 +1005,13 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
return 0;
}
-static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
- int pages)
+static int gve_alloc_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ u32 id, int pages)
{
- struct gve_queue_page_list *qpl = &priv->qpls[id];
int err;
int i;
- if (pages + priv->num_registered_pages > priv->max_registered_pages) {
- netif_err(priv, drv, priv->dev,
- "Reached max number of registered pages %llu > %llu\n",
- pages + priv->num_registered_pages,
- priv->max_registered_pages);
- return -EINVAL;
- }
-
qpl->id = id;
qpl->num_entries = 0;
qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
@@ -1039,7 +1032,6 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
return -ENOMEM;
qpl->num_entries++;
}
- priv->num_registered_pages += pages;
return 0;
}
@@ -1053,9 +1045,10 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
put_page(page);
}
-static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
+static void gve_free_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ int id)
{
- struct gve_queue_page_list *qpl = &priv->qpls[id];
int i;
if (!qpl->pages)
@@ -1072,19 +1065,30 @@ static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
free_pages:
kvfree(qpl->pages);
qpl->pages = NULL;
- priv->num_registered_pages -= qpl->num_entries;
}
-static int gve_alloc_xdp_qpls(struct gve_priv *priv)
+static void gve_free_n_qpls(struct gve_priv *priv,
+ struct gve_queue_page_list *qpls,
+ int start_id,
+ int num_qpls)
+{
+ int i;
+
+ for (i = start_id; i < start_id + num_qpls; i++)
+ gve_free_queue_page_list(priv, &qpls[i], i);
+}
+
+static int gve_alloc_n_qpls(struct gve_priv *priv,
+ struct gve_queue_page_list *qpls,
+ int page_count,
+ int start_id,
+ int num_qpls)
{
- int start_id;
- int i, j;
int err;
+ int i;
- start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_alloc_queue_page_list(priv, i,
- priv->tx_pages_per_qpl);
+ for (i = start_id; i < start_id + num_qpls; i++) {
+ err = gve_alloc_queue_page_list(priv, &qpls[i], i, page_count);
if (err)
goto free_qpls;
}
@@ -1092,95 +1096,89 @@ static int gve_alloc_xdp_qpls(struct gve_priv *priv)
return 0;
free_qpls:
- for (j = start_id; j <= i; j++)
- gve_free_queue_page_list(priv, j);
+ /* Must include the failing QPL too for gve_alloc_queue_page_list fails
+ * without cleaning up.
+ */
+ gve_free_n_qpls(priv, qpls, start_id, i - start_id + 1);
return err;
}
-static int gve_alloc_qpls(struct gve_priv *priv)
+static int gve_alloc_qpls(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *cfg)
{
- int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
+ int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
+ int rx_start_id, tx_num_qpls, rx_num_qpls;
+ struct gve_queue_page_list *qpls;
int page_count;
- int start_id;
- int i, j;
int err;
- if (!gve_is_qpl(priv))
+ if (cfg->raw_addressing)
return 0;
- priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL);
- if (!priv->qpls)
+ qpls = kvcalloc(max_queues, sizeof(*qpls), GFP_KERNEL);
+ if (!qpls)
return -ENOMEM;
- start_id = gve_tx_start_qpl_id(priv);
- page_count = priv->tx_pages_per_qpl;
- for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
- err = gve_alloc_queue_page_list(priv, i,
- page_count);
- if (err)
- goto free_qpls;
+ cfg->qpl_cfg->qpl_map_size = BITS_TO_LONGS(max_queues) *
+ sizeof(unsigned long) * BITS_PER_BYTE;
+ cfg->qpl_cfg->qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!cfg->qpl_cfg->qpl_id_map) {
+ err = -ENOMEM;
+ goto free_qpl_array;
}
- start_id = gve_rx_start_qpl_id(priv);
+ /* Allocate TX QPLs */
+ page_count = priv->tx_pages_per_qpl;
+ tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues,
+ gve_is_qpl(priv));
+ err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls);
+ if (err)
+ goto free_qpl_map;
+ /* Allocate RX QPLs */
+ rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg);
/* For GQI_QPL number of pages allocated have 1:1 relationship with
* number of descriptors. For DQO, number of pages required are
* more than descriptors (because of out of order completions).
*/
- page_count = priv->queue_format == GVE_GQI_QPL_FORMAT ?
- priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
- for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
- err = gve_alloc_queue_page_list(priv, i,
- page_count);
- if (err)
- goto free_qpls;
- }
-
- priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(max_queues) *
- sizeof(unsigned long) * BITS_PER_BYTE;
- priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
- sizeof(unsigned long), GFP_KERNEL);
- if (!priv->qpl_cfg.qpl_id_map) {
- err = -ENOMEM;
- goto free_qpls;
- }
+ page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
+ rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
+ err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
+ if (err)
+ goto free_tx_qpls;
+ cfg->qpls = qpls;
return 0;
-free_qpls:
- for (j = 0; j <= i; j++)
- gve_free_queue_page_list(priv, j);
- kvfree(priv->qpls);
- priv->qpls = NULL;
+free_tx_qpls:
+ gve_free_n_qpls(priv, qpls, 0, tx_num_qpls);
+free_qpl_map:
+ kvfree(cfg->qpl_cfg->qpl_id_map);
+ cfg->qpl_cfg->qpl_id_map = NULL;
+free_qpl_array:
+ kvfree(qpls);
return err;
}
-static void gve_free_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int i;
-
- start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++)
- gve_free_queue_page_list(priv, i);
-}
-
-static void gve_free_qpls(struct gve_priv *priv)
+static void gve_free_qpls(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *cfg)
{
- int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
+ int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
+ struct gve_queue_page_list *qpls = cfg->qpls;
int i;
- if (!priv->qpls)
+ if (!qpls)
return;
- kvfree(priv->qpl_cfg.qpl_id_map);
- priv->qpl_cfg.qpl_id_map = NULL;
+ kvfree(cfg->qpl_cfg->qpl_id_map);
+ cfg->qpl_cfg->qpl_id_map = NULL;
for (i = 0; i < max_queues; i++)
- gve_free_queue_page_list(priv, i);
+ gve_free_queue_page_list(priv, &qpls[i], i);
- kvfree(priv->qpls);
- priv->qpls = NULL;
+ kvfree(qpls);
+ cfg->qpls = NULL;
}
/* Use this to schedule a reset when the device is capable of continuing
@@ -1278,58 +1276,178 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
static void gve_drain_page_cache(struct gve_priv *priv)
{
- struct page_frag_cache *nc;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- nc = &priv->rx[i].page_cache;
- if (nc->va) {
- __page_frag_cache_drain(virt_to_page(nc->va),
- nc->pagecnt_bias);
- nc->va = NULL;
- }
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ page_frag_cache_drain(&priv->rx[i].page_cache);
+}
+
+static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *cfg)
+{
+ cfg->raw_addressing = !gve_is_qpl(priv);
+ cfg->is_gqi = gve_is_gqi(priv);
+ cfg->num_xdp_queues = priv->num_xdp_queues;
+ cfg->qpl_cfg = &priv->qpl_cfg;
+ cfg->tx_cfg = &priv->tx_cfg;
+ cfg->rx_cfg = &priv->rx_cfg;
+ cfg->qpls = priv->qpls;
+}
+
+static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
+{
+ cfg->qcfg = &priv->rx_cfg;
+ cfg->qcfg_tx = &priv->tx_cfg;
+ cfg->raw_addressing = !gve_is_qpl(priv);
+ cfg->enable_header_split = priv->header_split_enabled;
+ cfg->qpls = priv->qpls;
+ cfg->qpl_cfg = &priv->qpl_cfg;
+ cfg->ring_size = priv->rx_desc_cnt;
+ cfg->packet_buffer_size = gve_is_gqi(priv) ?
+ GVE_DEFAULT_RX_BUFFER_SIZE :
+ priv->data_buffer_size_dqo;
+ cfg->rx = priv->rx;
+}
+
+static void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
+ gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
+ gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
+}
+
+static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
+{
+ int i;
+
+ for (i = 0; i < num_rings; i++) {
+ if (gve_is_gqi(priv))
+ gve_rx_start_ring_gqi(priv, i);
+ else
+ gve_rx_start_ring_dqo(priv, i);
}
}
-static int gve_open(struct net_device *dev)
+static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
{
- struct gve_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (!priv->rx)
+ return;
+
+ for (i = 0; i < num_rings; i++) {
+ if (gve_is_gqi(priv))
+ gve_rx_stop_ring_gqi(priv, i);
+ else
+ gve_rx_stop_ring_dqo(priv, i);
+ }
+}
+
+static void gve_queues_mem_free(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ gve_free_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
+ gve_free_qpls(priv, qpls_alloc_cfg);
+}
+
+static int gve_queues_mem_alloc(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ int err;
+
+ err = gve_alloc_qpls(priv, qpls_alloc_cfg);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
+ return err;
+ }
+ tx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
+ rx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
+ err = gve_alloc_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "Failed to alloc rings\n");
+ goto free_qpls;
+ }
+
+ return 0;
+
+free_qpls:
+ gve_free_qpls(priv, qpls_alloc_cfg);
+ return err;
+}
+
+static void gve_queues_mem_remove(struct gve_priv *priv)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
+
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_queues_mem_free(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ priv->qpls = NULL;
+ priv->tx = NULL;
+ priv->rx = NULL;
+}
+
+/* The passed-in queue memory is stored into priv and the queues are made live.
+ * No memory is allocated. Passed-in memory is freed on errors.
+ */
+static int gve_queues_start(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ struct net_device *dev = priv->dev;
int err;
+ /* Record new resources into priv */
+ priv->qpls = qpls_alloc_cfg->qpls;
+ priv->tx = tx_alloc_cfg->tx;
+ priv->rx = rx_alloc_cfg->rx;
+
+ /* Record new configs into priv */
+ priv->qpl_cfg = *qpls_alloc_cfg->qpl_cfg;
+ priv->tx_cfg = *tx_alloc_cfg->qcfg;
+ priv->rx_cfg = *rx_alloc_cfg->qcfg;
+ priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
+ priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
+
if (priv->xdp_prog)
priv->num_xdp_queues = priv->rx_cfg.num_queues;
else
priv->num_xdp_queues = 0;
- err = gve_alloc_qpls(priv);
- if (err)
- return err;
-
- err = gve_alloc_rings(priv);
- if (err)
- goto free_qpls;
+ gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
+ gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
+ gve_init_sync_stats(priv);
err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
if (err)
- goto free_rings;
+ goto stop_and_free_rings;
err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
if (err)
- goto free_rings;
+ goto stop_and_free_rings;
err = gve_reg_xdp_info(priv, dev);
if (err)
- goto free_rings;
+ goto stop_and_free_rings;
err = gve_register_qpls(priv);
if (err)
goto reset;
- if (!gve_is_gqi(priv)) {
- /* Hard code this for now. This may be tuned in the future for
- * performance.
- */
- priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
- }
+ priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
+ priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
+
err = gve_create_rings(priv);
if (err)
goto reset;
@@ -1346,32 +1464,53 @@ static int gve_open(struct net_device *dev)
priv->interface_up_cnt++;
return 0;
-free_rings:
- gve_free_rings(priv);
-free_qpls:
- gve_free_qpls(priv);
- return err;
-
reset:
- /* This must have been called from a reset due to the rtnl lock
- * so just return at this point.
- */
if (gve_get_reset_in_progress(priv))
- return err;
- /* Otherwise reset before returning */
+ goto stop_and_free_rings;
gve_reset_and_teardown(priv, true);
/* if this fails there is nothing we can do so just ignore the return */
gve_reset_recovery(priv, false);
/* return the original error */
return err;
+stop_and_free_rings:
+ gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
+ gve_queues_mem_remove(priv);
+ return err;
}
-static int gve_close(struct net_device *dev)
+static int gve_open(struct net_device *dev)
{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(dev);
int err;
- netif_carrier_off(dev);
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+
+ err = gve_queues_mem_alloc(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+
+ /* No need to free on error: ownership of resources is lost after
+ * calling gve_queues_start.
+ */
+ err = gve_queues_start(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int gve_queues_stop(struct gve_priv *priv)
+{
+ int err;
+
+ netif_carrier_off(priv->dev);
if (gve_get_device_rings_ok(priv)) {
gve_turndown(priv);
gve_drain_page_cache(priv);
@@ -1386,8 +1525,10 @@ static int gve_close(struct net_device *dev)
del_timer_sync(&priv->stats_report_timer);
gve_unreg_xdp_info(priv);
- gve_free_rings(priv);
- gve_free_qpls(priv);
+
+ gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
+
priv->interface_down_cnt++;
return 0;
@@ -1402,10 +1543,26 @@ err:
return gve_reset_recovery(priv, false);
}
+static int gve_close(struct net_device *dev)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = gve_queues_stop(priv);
+ if (err)
+ return err;
+
+ gve_queues_mem_remove(priv);
+ return 0;
+}
+
static int gve_remove_xdp_queues(struct gve_priv *priv)
{
+ int qpl_start_id;
int err;
+ qpl_start_id = gve_xdp_tx_start_queue_id(priv);
+
err = gve_destroy_xdp_rings(priv);
if (err)
return err;
@@ -1416,18 +1573,22 @@ static int gve_remove_xdp_queues(struct gve_priv *priv)
gve_unreg_xdp_info(priv);
gve_free_xdp_rings(priv);
- gve_free_xdp_qpls(priv);
+
+ gve_free_n_qpls(priv, priv->qpls, qpl_start_id, gve_num_xdp_qpls(priv));
priv->num_xdp_queues = 0;
return 0;
}
static int gve_add_xdp_queues(struct gve_priv *priv)
{
+ int start_id;
int err;
- priv->num_xdp_queues = priv->tx_cfg.num_queues;
+ priv->num_xdp_queues = priv->rx_cfg.num_queues;
- err = gve_alloc_xdp_qpls(priv);
+ start_id = gve_xdp_tx_start_queue_id(priv);
+ err = gve_alloc_n_qpls(priv, priv->qpls, priv->tx_pages_per_qpl,
+ start_id, gve_num_xdp_qpls(priv));
if (err)
goto err;
@@ -1452,7 +1613,7 @@ static int gve_add_xdp_queues(struct gve_priv *priv)
free_xdp_rings:
gve_free_xdp_rings(priv);
free_xdp_qpls:
- gve_free_xdp_qpls(priv);
+ gve_free_n_qpls(priv, priv->qpls, start_id, gve_num_xdp_qpls(priv));
err:
priv->num_xdp_queues = 0;
return err;
@@ -1702,42 +1863,87 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+static int gve_adjust_config(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ int err;
+
+ /* Allocate resources for the new confiugration */
+ err = gve_queues_mem_alloc(priv, qpls_alloc_cfg,
+ tx_alloc_cfg, rx_alloc_cfg);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Adjust config failed to alloc new queues");
+ return err;
+ }
+
+ /* Teardown the device and free existing resources */
+ err = gve_close(priv->dev);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Adjust config failed to close old queues");
+ gve_queues_mem_free(priv, qpls_alloc_cfg,
+ tx_alloc_cfg, rx_alloc_cfg);
+ return err;
+ }
+
+ /* Bring the device back up again with the new resources. */
+ err = gve_queues_start(priv, qpls_alloc_cfg,
+ tx_alloc_cfg, rx_alloc_cfg);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n");
+ /* No need to free on error: ownership of resources is lost after
+ * calling gve_queues_start.
+ */
+ gve_turndown(priv);
+ return err;
+ }
+
+ return 0;
+}
+
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config)
{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
+ struct gve_qpl_config new_qpl_cfg;
int err;
- if (netif_carrier_ok(priv->dev)) {
- /* To make this process as simple as possible we teardown the
- * device, set the new configuration, and then bring the device
- * up again.
- */
- err = gve_close(priv->dev);
- /* we have already tried to reset in close,
- * just fail at this point
- */
- if (err)
- return err;
- priv->tx_cfg = new_tx_config;
- priv->rx_cfg = new_rx_config;
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
- err = gve_open(priv->dev);
- if (err)
- goto err;
+ /* qpl_cfg is not read-only, it contains a map that gets updated as
+ * rings are allocated, which is why we cannot use the yet unreleased
+ * one in priv.
+ */
+ qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+
+ /* Relay the new config from ethtool */
+ qpls_alloc_cfg.tx_cfg = &new_tx_config;
+ tx_alloc_cfg.qcfg = &new_tx_config;
+ rx_alloc_cfg.qcfg_tx = &new_tx_config;
+ qpls_alloc_cfg.rx_cfg = &new_rx_config;
+ rx_alloc_cfg.qcfg = &new_rx_config;
+ tx_alloc_cfg.num_rings = new_tx_config.num_queues;
- return 0;
+ if (netif_carrier_ok(priv->dev)) {
+ err = gve_adjust_config(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ return err;
}
/* Set the config for the next up. */
priv->tx_cfg = new_tx_config;
priv->rx_cfg = new_rx_config;
return 0;
-err:
- netif_err(priv, drv, priv->dev,
- "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
- gve_turndown(priv);
- return err;
}
static void gve_turndown(struct gve_priv *priv)
@@ -1853,40 +2059,91 @@ out:
priv->tx_timeo_cnt++;
}
+u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
+{
+ if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE)
+ return GVE_MAX_RX_BUFFER_SIZE;
+ else
+ return GVE_DEFAULT_RX_BUFFER_SIZE;
+}
+
+/* header-split is not supported on non-DQO_RDA yet even if device advertises it */
+bool gve_header_split_supported(const struct gve_priv *priv)
+{
+ return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT;
+}
+
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
+ bool enable_hdr_split;
+ int err = 0;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ return 0;
+
+ if (!gve_header_split_supported(priv)) {
+ dev_err(&priv->pdev->dev, "Header-split not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ enable_hdr_split = true;
+ else
+ enable_hdr_split = false;
+
+ if (enable_hdr_split == priv->header_split_enabled)
+ return 0;
+
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+
+ rx_alloc_cfg.enable_header_split = enable_hdr_split;
+ rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
+
+ if (netif_running(priv->dev))
+ err = gve_adjust_config(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ return err;
+}
+
static int gve_set_features(struct net_device *netdev,
netdev_features_t features)
{
const netdev_features_t orig_features = netdev->features;
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(netdev);
+ struct gve_qpl_config new_qpl_cfg;
int err;
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ /* qpl_cfg is not read-only, it contains a map that gets updated as
+ * rings are allocated, which is why we cannot use the yet unreleased
+ * one in priv.
+ */
+ qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
netdev->features ^= NETIF_F_LRO;
if (netif_carrier_ok(netdev)) {
- /* To make this process as simple as possible we
- * teardown the device, set the new configuration,
- * and then bring the device up again.
- */
- err = gve_close(netdev);
- /* We have already tried to reset in close, just fail
- * at this point.
- */
- if (err)
- goto err;
-
- err = gve_open(netdev);
- if (err)
- goto err;
+ err = gve_adjust_config(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err) {
+ /* Revert the change on error. */
+ netdev->features = orig_features;
+ return err;
+ }
}
}
return 0;
-err:
- /* Reverts the change on error. */
- netdev->features = orig_features;
- netif_err(priv, drv, netdev,
- "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
- return err;
}
static const struct net_device_ops gve_netdev_ops = {
@@ -2051,6 +2308,8 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
goto err;
}
+ priv->num_registered_pages = 0;
+
if (skip_describe_device)
goto setup_device;
@@ -2080,7 +2339,6 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
if (!gve_is_gqi(priv))
netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
- priv->num_registered_pages = 0;
priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
/* gvnic has one Notification Block per MSI-x vector, except for the
* management vector
@@ -2297,6 +2555,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
priv->ethtool_flags = 0x0;
+ priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
+ priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_set_probe_in_progress(priv);
priv->gve_wq = alloc_ordered_workqueue("gve", 0);
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 76615d47e055..20f5a9e7fae9 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -23,7 +23,9 @@ static void gve_rx_free_buffer(struct device *dev,
gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
}
-static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
+static void gve_rx_unfill_pages(struct gve_priv *priv,
+ struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
u32 slots = rx->mask + 1;
int i;
@@ -36,7 +38,7 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
for (i = 0; i < slots; i++)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(priv, rx->data.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
rx->data.qpl = NULL;
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
@@ -49,16 +51,26 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
rx->data.page_info = NULL;
}
-static void gve_rx_free_ring(struct gve_priv *priv, int idx)
+void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
+{
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+
+ if (!gve_rx_was_added_to_block(priv, idx))
+ return;
+
+ gve_remove_napi(priv, ntfy_idx);
+ gve_rx_remove_from_block(priv, idx);
+}
+
+static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
- struct gve_rx_ring *rx = &priv->rx[idx];
struct device *dev = &priv->pdev->dev;
u32 slots = rx->mask + 1;
+ int idx = rx->q_num;
size_t bytes;
- gve_rx_remove_from_block(priv, idx);
-
- bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
+ bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
rx->desc.desc_ring = NULL;
@@ -66,7 +78,7 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx)
rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL;
- gve_rx_unfill_pages(priv, rx);
+ gve_rx_unfill_pages(priv, rx, cfg);
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(dev, bytes, rx->data.data_ring,
@@ -93,7 +105,8 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
struct gve_rx_slot_page_info *page_info,
- union gve_rx_data_slot *data_slot)
+ union gve_rx_data_slot *data_slot,
+ struct gve_rx_ring *rx)
{
struct page *page;
dma_addr_t dma;
@@ -101,14 +114,19 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
GFP_ATOMIC);
- if (err)
+ if (err) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_buf_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
return err;
+ }
gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
return 0;
}
-static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
+static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
struct gve_priv *priv = rx->gve;
u32 slots;
@@ -127,7 +145,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
return -ENOMEM;
if (!rx->data.raw_addressing) {
- rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num);
+ rx->data.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
if (!rx->data.qpl) {
kvfree(rx->data.page_info);
rx->data.page_info = NULL;
@@ -143,8 +161,9 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
&rx->data.data_ring[i].qpl_offset);
continue;
}
- err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
- &rx->data.data_ring[i]);
+ err = gve_rx_alloc_buffer(priv, &priv->pdev->dev,
+ &rx->data.page_info[i],
+ &rx->data.data_ring[i], rx);
if (err)
goto alloc_err_rda;
}
@@ -185,7 +204,7 @@ alloc_err_qpl:
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(priv, rx->data.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
rx->data.qpl = NULL;
return err;
@@ -207,13 +226,23 @@ static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
ctx->drop_pkt = false;
}
-static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
+void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
+{
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+
+ gve_rx_add_to_block(priv, idx);
+ gve_add_napi(priv, ntfy_idx, gve_napi_poll);
+}
+
+static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
- struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev;
+ u32 slots = priv->rx_data_slot_cnt;
int filled_pages;
size_t bytes;
- u32 slots;
int err;
netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
@@ -223,9 +252,8 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
rx->gve = priv;
rx->q_num = idx;
- slots = priv->rx_data_slot_cnt;
rx->mask = slots - 1;
- rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
+ rx->data.raw_addressing = cfg->raw_addressing;
/* alloc rx data ring */
bytes = sizeof(*rx->data.data_ring) * slots;
@@ -246,7 +274,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
goto abort_with_slots;
}
- filled_pages = gve_prefill_rx_pages(rx);
+ filled_pages = gve_rx_prefill_pages(rx, cfg);
if (filled_pages < 0) {
err = -ENOMEM;
goto abort_with_copy_pool;
@@ -269,7 +297,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
(unsigned long)rx->data.data_bus);
/* alloc rx desc ring */
- bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
+ bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
GFP_KERNEL);
if (!rx->desc.desc_ring) {
@@ -277,15 +305,11 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
goto abort_with_q_resources;
}
rx->cnt = 0;
- rx->db_threshold = priv->rx_desc_cnt / 2;
+ rx->db_threshold = slots / 2;
rx->desc.seqno = 1;
- /* Allocating half-page buffers allows page-flipping which is faster
- * than copying or allocating new pages.
- */
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
- gve_rx_add_to_block(priv, idx);
return 0;
@@ -294,7 +318,7 @@ abort_with_q_resources:
rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL;
abort_filled:
- gve_rx_unfill_pages(priv, rx);
+ gve_rx_unfill_pages(priv, rx, cfg);
abort_with_copy_pool:
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
@@ -306,36 +330,58 @@ abort_with_slots:
return err;
}
-int gve_rx_alloc_rings(struct gve_priv *priv)
+int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
+ struct gve_rx_ring *rx;
int err = 0;
- int i;
+ int i, j;
+
+ if (!cfg->raw_addressing && !cfg->qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc QPL ring before allocing QPLs\n");
+ return -EINVAL;
+ }
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- err = gve_rx_alloc_ring(priv, i);
+ rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to alloc rx ring=%d: err=%d\n",
i, err);
- break;
+ goto cleanup;
}
}
- /* Unallocate if there was an error */
- if (err) {
- int j;
- for (j = 0; j < i; j++)
- gve_rx_free_ring(priv, j);
- }
+ cfg->rx = rx;
+ return 0;
+
+cleanup:
+ for (j = 0; j < i; j++)
+ gve_rx_free_ring_gqi(priv, &rx[j], cfg);
+ kvfree(rx);
return err;
}
-void gve_rx_free_rings_gqi(struct gve_priv *priv)
+void gve_rx_free_rings_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
+ struct gve_rx_ring *rx = cfg->rx;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++)
- gve_rx_free_ring(priv, i);
+ if (!rx)
+ return;
+
+ for (i = 0; i < cfg->qcfg->num_queues; i++)
+ gve_rx_free_ring_gqi(priv, &rx[i], cfg);
+
+ kvfree(rx);
+ cfg->rx = NULL;
}
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
@@ -896,10 +942,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
gve_rx_free_buffer(dev, page_info, data_slot);
page_info->page = NULL;
if (gve_rx_alloc_buffer(priv, dev, page_info,
- data_slot)) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_buf_alloc_fail++;
- u64_stats_update_end(&rx->statss);
+ data_slot, rx)) {
break;
}
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index f281e42a7ef9..8e8071308aeb 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -199,20 +199,42 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
return 0;
}
-static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
+static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
+{
+ struct device *hdev = &priv->pdev->dev;
+ int buf_count = rx->dqo.bufq.mask + 1;
+
+ if (rx->dqo.hdr_bufs.data) {
+ dma_free_coherent(hdev, priv->header_buf_size * buf_count,
+ rx->dqo.hdr_bufs.data, rx->dqo.hdr_bufs.addr);
+ rx->dqo.hdr_bufs.data = NULL;
+ }
+}
+
+void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
+{
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+
+ if (!gve_rx_was_added_to_block(priv, idx))
+ return;
+
+ gve_remove_napi(priv, ntfy_idx);
+ gve_rx_remove_from_block(priv, idx);
+}
+
+static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
- struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev;
size_t completion_queue_slots;
size_t buffer_queue_slots;
+ int idx = rx->q_num;
size_t size;
int i;
completion_queue_slots = rx->dqo.complq.mask + 1;
buffer_queue_slots = rx->dqo.bufq.mask + 1;
- gve_rx_remove_from_block(priv, idx);
-
if (rx->q_resources) {
dma_free_coherent(hdev, sizeof(*rx->q_resources),
rx->q_resources, rx->q_resources_bus);
@@ -226,7 +248,7 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
}
if (rx->dqo.qpl) {
- gve_unassign_qpl(priv, rx->dqo.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, rx->dqo.qpl->id);
rx->dqo.qpl = NULL;
}
@@ -248,20 +270,44 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
kvfree(rx->dqo.buf_states);
rx->dqo.buf_states = NULL;
+ gve_rx_free_hdr_bufs(priv, rx);
+
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
-static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
+static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
+{
+ struct device *hdev = &priv->pdev->dev;
+ int buf_count = rx->dqo.bufq.mask + 1;
+
+ rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count,
+ &rx->dqo.hdr_bufs.addr, GFP_KERNEL);
+ if (!rx->dqo.hdr_bufs.data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx)
+{
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+
+ gve_rx_add_to_block(priv, idx);
+ gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
+}
+
+static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
- struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev;
size_t size;
int i;
- const u32 buffer_queue_slots =
- priv->queue_format == GVE_DQO_RDA_FORMAT ?
- priv->options_dqo_rda.rx_buff_ring_entries : priv->rx_desc_cnt;
- const u32 completion_queue_slots = priv->rx_desc_cnt;
+ const u32 buffer_queue_slots = cfg->raw_addressing ?
+ priv->options_dqo_rda.rx_buff_ring_entries : cfg->ring_size;
+ const u32 completion_queue_slots = cfg->ring_size;
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
@@ -274,7 +320,7 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
- rx->dqo.num_buf_states = priv->queue_format == GVE_DQO_RDA_FORMAT ?
+ rx->dqo.num_buf_states = cfg->raw_addressing ?
min_t(s16, S16_MAX, buffer_queue_slots * 4) :
priv->rx_pages_per_qpl;
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
@@ -283,6 +329,11 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!rx->dqo.buf_states)
return -ENOMEM;
+ /* Allocate header buffers for header-split */
+ if (cfg->enable_header_split)
+ if (gve_rx_alloc_hdr_bufs(priv, rx))
+ goto err;
+
/* Set up linked list of buffer IDs */
for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
rx->dqo.buf_states[i].next = i + 1;
@@ -308,8 +359,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!rx->dqo.bufq.desc_ring)
goto err;
- if (priv->queue_format != GVE_DQO_RDA_FORMAT) {
- rx->dqo.qpl = gve_assign_rx_qpl(priv, rx->q_num);
+ if (!cfg->raw_addressing) {
+ rx->dqo.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
if (!rx->dqo.qpl)
goto err;
rx->dqo.next_qpl_page_idx = 0;
@@ -320,12 +371,10 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!rx->q_resources)
goto err;
- gve_rx_add_to_block(priv, idx);
-
return 0;
err:
- gve_rx_free_ring_dqo(priv, idx);
+ gve_rx_free_ring_dqo(priv, rx, cfg);
return -ENOMEM;
}
@@ -337,13 +386,26 @@ void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx)
iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]);
}
-int gve_rx_alloc_rings_dqo(struct gve_priv *priv)
+int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
- int err = 0;
+ struct gve_rx_ring *rx;
+ int err;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- err = gve_rx_alloc_ring_dqo(priv, i);
+ if (!cfg->raw_addressing && !cfg->qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc QPL ring before allocing QPLs\n");
+ return -EINVAL;
+ }
+
+ rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to alloc rx ring=%d: err=%d\n",
@@ -352,21 +414,30 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv)
}
}
+ cfg->rx = rx;
return 0;
err:
for (i--; i >= 0; i--)
- gve_rx_free_ring_dqo(priv, i);
-
+ gve_rx_free_ring_dqo(priv, &rx[i], cfg);
+ kvfree(rx);
return err;
}
-void gve_rx_free_rings_dqo(struct gve_priv *priv)
+void gve_rx_free_rings_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
+ struct gve_rx_ring *rx = cfg->rx;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++)
- gve_rx_free_ring_dqo(priv, i);
+ if (!rx)
+ return;
+
+ for (i = 0; i < cfg->qcfg->num_queues; i++)
+ gve_rx_free_ring_dqo(priv, &rx[i], cfg);
+
+ kvfree(rx);
+ cfg->rx = NULL;
}
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
@@ -404,6 +475,10 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
desc->buf_addr = cpu_to_le64(buf_state->addr +
buf_state->page_info.page_offset);
+ if (rx->dqo.hdr_bufs.data)
+ desc->header_buf_addr =
+ cpu_to_le64(rx->dqo.hdr_bufs.addr +
+ priv->header_buf_size * bufq->tail);
bufq->tail = (bufq->tail + 1) & bufq->mask;
complq->num_free_slots--;
@@ -419,7 +494,7 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- const int data_buffer_size = priv->data_buffer_size_dqo;
+ const u16 data_buffer_size = priv->data_buffer_size_dqo;
int pagecount;
/* Can't reuse if we only fit one buffer per page */
@@ -606,13 +681,16 @@ static int gve_rx_append_frags(struct napi_struct *napi,
*/
static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
const struct gve_rx_compl_desc_dqo *compl_desc,
- int queue_idx)
+ u32 desc_idx, int queue_idx)
{
const u16 buffer_id = le16_to_cpu(compl_desc->buf_id);
+ const bool hbo = compl_desc->header_buffer_overflow;
const bool eop = compl_desc->end_of_packet != 0;
+ const bool hsplit = compl_desc->split_header;
struct gve_rx_buf_state_dqo *buf_state;
struct gve_priv *priv = rx->gve;
u16 buf_len;
+ u16 hdr_len;
if (unlikely(buffer_id >= rx->dqo.num_buf_states)) {
net_err_ratelimited("%s: Invalid RX buffer_id=%u\n",
@@ -633,12 +711,35 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
}
buf_len = compl_desc->packet_len;
+ hdr_len = compl_desc->header_len;
/* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
prefetch(buf_state->page_info.page);
+ /* Copy the header into the skb in the case of header split */
+ if (hsplit) {
+ int unsplit = 0;
+
+ if (hdr_len && !hbo) {
+ rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi,
+ rx->dqo.hdr_bufs.data +
+ desc_idx * priv->header_buf_size,
+ hdr_len);
+ if (unlikely(!rx->ctx.skb_head))
+ goto error;
+ rx->ctx.skb_tail = rx->ctx.skb_head;
+ } else {
+ unsplit = 1;
+ }
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_hsplit_pkt++;
+ rx->rx_hsplit_unsplit_pkt += unsplit;
+ rx->rx_hsplit_bytes += hdr_len;
+ u64_stats_update_end(&rx->statss);
+ }
+
/* Sync the portion of dma buffer for CPU to read. */
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
buf_state->page_info.page_offset,
@@ -781,7 +882,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* Do not read data until we own the descriptor */
dma_rmb();
- err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num);
+ err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
if (err < 0) {
gve_rx_free_skb(rx);
u64_stats_update_begin(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 07ba124780df..4b9853adc113 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -196,29 +196,36 @@ static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake);
-static void gve_tx_free_ring(struct gve_priv *priv, int idx)
+void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx];
+
+ if (!gve_tx_was_added_to_block(priv, idx))
+ return;
+
+ gve_remove_napi(priv, ntfy_idx);
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ netdev_tx_reset_queue(tx->netdev_txq);
+ gve_tx_remove_from_block(priv, idx);
+}
+
+static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct gve_tx_alloc_rings_cfg *cfg)
+{
struct device *hdev = &priv->pdev->dev;
+ int idx = tx->q_num;
size_t bytes;
u32 slots;
- gve_tx_remove_from_block(priv, idx);
slots = tx->mask + 1;
- if (tx->q_num < priv->tx_cfg.num_queues) {
- gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
- netdev_tx_reset_queue(tx->netdev_txq);
- } else {
- gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
- }
-
dma_free_coherent(hdev, sizeof(*tx->q_resources),
tx->q_resources, tx->q_resources_bus);
tx->q_resources = NULL;
if (!tx->raw_addressing) {
gve_tx_fifo_release(priv, &tx->tx_fifo);
- gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
tx->tx_fifo.qpl = NULL;
}
@@ -232,11 +239,23 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
}
-static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
+void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx)
{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx];
+
+ gve_tx_add_to_block(priv, idx);
+
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ gve_add_napi(priv, ntfy_idx, gve_napi_poll);
+}
+
+static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg,
+ struct gve_tx_ring *tx,
+ int idx)
+{
struct device *hdev = &priv->pdev->dev;
- u32 slots = priv->tx_desc_cnt;
size_t bytes;
/* Make sure everything is zeroed to start */
@@ -245,23 +264,23 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
spin_lock_init(&tx->xdp_lock);
tx->q_num = idx;
- tx->mask = slots - 1;
+ tx->mask = cfg->ring_size - 1;
/* alloc metadata */
- tx->info = vcalloc(slots, sizeof(*tx->info));
+ tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info));
if (!tx->info)
return -ENOMEM;
/* alloc tx queue */
- bytes = sizeof(*tx->desc) * slots;
+ bytes = sizeof(*tx->desc) * cfg->ring_size;
tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
if (!tx->desc)
goto abort_with_info;
- tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
- tx->dev = &priv->pdev->dev;
+ tx->raw_addressing = cfg->raw_addressing;
+ tx->dev = hdev;
if (!tx->raw_addressing) {
- tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx);
+ tx->tx_fifo.qpl = gve_assign_tx_qpl(cfg, idx);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;
/* map Tx FIFO */
@@ -277,12 +296,6 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
if (!tx->q_resources)
goto abort_with_fifo;
- netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx,
- (unsigned long)tx->bus);
- if (idx < priv->tx_cfg.num_queues)
- tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
- gve_tx_add_to_block(priv, idx);
-
return 0;
abort_with_fifo:
@@ -290,7 +303,7 @@ abort_with_fifo:
gve_tx_fifo_release(priv, &tx->tx_fifo);
abort_with_qpl:
if (!tx->raw_addressing)
- gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@@ -300,36 +313,73 @@ abort_with_info:
return -ENOMEM;
}
-int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings)
+int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
{
+ struct gve_tx_ring *tx = cfg->tx;
int err = 0;
- int i;
+ int i, j;
+
+ if (!cfg->raw_addressing && !cfg->qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc QPL ring before allocing QPLs\n");
+ return -EINVAL;
+ }
+
+ if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc more than the max num of Tx rings\n");
+ return -EINVAL;
+ }
+
+ if (cfg->start_idx == 0) {
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
+ } else if (!tx) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc tx rings from a nonzero start idx without tx array\n");
+ return -EINVAL;
+ }
- for (i = start_id; i < start_id + num_rings; i++) {
- err = gve_tx_alloc_ring(priv, i);
+ for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to alloc tx ring=%d: err=%d\n",
i, err);
- break;
+ goto cleanup;
}
}
- /* Unallocate if there was an error */
- if (err) {
- int j;
- for (j = start_id; j < i; j++)
- gve_tx_free_ring(priv, j);
- }
+ cfg->tx = tx;
+ return 0;
+
+cleanup:
+ for (j = 0; j < i; j++)
+ gve_tx_free_ring_gqi(priv, &tx[j], cfg);
+ if (cfg->start_idx == 0)
+ kvfree(tx);
return err;
}
-void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings)
+void gve_tx_free_rings_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
{
+ struct gve_tx_ring *tx = cfg->tx;
int i;
- for (i = start_id; i < start_id + num_rings; i++)
- gve_tx_free_ring(priv, i);
+ if (!tx)
+ return;
+
+ for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ gve_tx_free_ring_gqi(priv, &tx[i], cfg);
+
+ if (cfg->start_idx == 0) {
+ kvfree(tx);
+ cfg->tx = NULL;
+ }
}
/* gve_tx_avail - Calculates the number of slots available in the ring
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index f59c4710f118..bc34b6cd3a3e 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -188,13 +188,27 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
}
}
-static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx)
+void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx];
- struct device *hdev = &priv->pdev->dev;
- size_t bytes;
+ if (!gve_tx_was_added_to_block(priv, idx))
+ return;
+
+ gve_remove_napi(priv, ntfy_idx);
+ gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
+ netdev_tx_reset_queue(tx->netdev_txq);
+ gve_tx_clean_pending_packets(tx);
gve_tx_remove_from_block(priv, idx);
+}
+
+static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct gve_tx_alloc_rings_cfg *cfg)
+{
+ struct device *hdev = &priv->pdev->dev;
+ int idx = tx->q_num;
+ size_t bytes;
if (tx->q_resources) {
dma_free_coherent(hdev, sizeof(*tx->q_resources),
@@ -223,7 +237,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx)
tx->dqo.tx_qpl_buf_next = NULL;
if (tx->dqo.qpl) {
- gve_unassign_qpl(priv, tx->dqo.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, tx->dqo.qpl->id);
tx->dqo.qpl = NULL;
}
@@ -253,9 +267,22 @@ static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx)
return 0;
}
-static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
+void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx)
{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx];
+
+ gve_tx_add_to_block(priv, idx);
+
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
+}
+
+static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg,
+ struct gve_tx_ring *tx,
+ int idx)
+{
struct device *hdev = &priv->pdev->dev;
int num_pending_packets;
size_t bytes;
@@ -263,12 +290,11 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
memset(tx, 0, sizeof(*tx));
tx->q_num = idx;
- tx->dev = &priv->pdev->dev;
- tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ tx->dev = hdev;
atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
/* Queue sizes must be a power of 2 */
- tx->mask = priv->tx_desc_cnt - 1;
+ tx->mask = cfg->ring_size - 1;
tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ?
priv->options_dqo_rda.tx_comp_ring_entries - 1 :
tx->mask;
@@ -327,8 +353,8 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!tx->q_resources)
goto err;
- if (gve_is_qpl(priv)) {
- tx->dqo.qpl = gve_assign_tx_qpl(priv, idx);
+ if (!cfg->raw_addressing) {
+ tx->dqo.qpl = gve_assign_tx_qpl(cfg, idx);
if (!tx->dqo.qpl)
goto err;
@@ -336,22 +362,45 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
goto err;
}
- gve_tx_add_to_block(priv, idx);
-
return 0;
err:
- gve_tx_free_ring_dqo(priv, idx);
+ gve_tx_free_ring_dqo(priv, tx, cfg);
return -ENOMEM;
}
-int gve_tx_alloc_rings_dqo(struct gve_priv *priv)
+int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
{
+ struct gve_tx_ring *tx = cfg->tx;
int err = 0;
- int i;
+ int i, j;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- err = gve_tx_alloc_ring_dqo(priv, i);
+ if (!cfg->raw_addressing && !cfg->qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc QPL ring before allocing QPLs\n");
+ return -EINVAL;
+ }
+
+ if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc more than the max num of Tx rings\n");
+ return -EINVAL;
+ }
+
+ if (cfg->start_idx == 0) {
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
+ } else if (!tx) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc tx rings from a nonzero start idx without tx array\n");
+ return -EINVAL;
+ }
+
+ for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to alloc tx ring=%d: err=%d\n",
@@ -360,27 +409,32 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv)
}
}
+ cfg->tx = tx;
return 0;
err:
- for (i--; i >= 0; i--)
- gve_tx_free_ring_dqo(priv, i);
-
+ for (j = 0; j < i; j++)
+ gve_tx_free_ring_dqo(priv, &tx[j], cfg);
+ if (cfg->start_idx == 0)
+ kvfree(tx);
return err;
}
-void gve_tx_free_rings_dqo(struct gve_priv *priv)
+void gve_tx_free_rings_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
{
+ struct gve_tx_ring *tx = cfg->tx;
int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- struct gve_tx_ring *tx = &priv->tx[i];
+ if (!tx)
+ return;
- gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
- netdev_tx_reset_queue(tx->netdev_txq);
- gve_tx_clean_pending_packets(tx);
+ for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ gve_tx_free_ring_dqo(priv, &tx[i], cfg);
- gve_tx_free_ring_dqo(priv, i);
+ if (cfg->start_idx == 0) {
+ kvfree(tx);
+ cfg->tx = NULL;
}
}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 26e08d753270..2349750075a5 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -8,6 +8,14 @@
#include "gve_adminq.h"
#include "gve_utils.h"
+bool gve_tx_was_added_to_block(struct gve_priv *priv, int queue_idx)
+{
+ struct gve_notify_block *block =
+ &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
+
+ return block->tx != NULL;
+}
+
void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
{
struct gve_notify_block *block =
@@ -30,6 +38,14 @@ void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
queue_idx);
}
+bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx)
+{
+ struct gve_notify_block *block =
+ &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
+
+ return block->rx != NULL;
+}
+
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
{
struct gve_notify_block *block =
@@ -48,11 +64,9 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
rx->ntfy_id = ntfy_idx;
}
-struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
- struct gve_rx_slot_page_info *page_info, u16 len)
+struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi,
+ u8 *data, u16 len)
{
- void *va = page_info->page_address + page_info->page_offset +
- page_info->pad;
struct sk_buff *skb;
skb = napi_alloc_skb(napi, len);
@@ -60,12 +74,21 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
return NULL;
__skb_put(skb, len);
- skb_copy_to_linear_data_offset(skb, 0, va, len);
+ skb_copy_to_linear_data_offset(skb, 0, data, len);
skb->protocol = eth_type_trans(skb, dev);
return skb;
}
+struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
+ struct gve_rx_slot_page_info *page_info, u16 len)
+{
+ void *va = page_info->page_address + page_info->page_offset +
+ page_info->pad;
+
+ return gve_rx_copy_data(dev, napi, va, len);
+}
+
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
{
page_info->pagecnt_bias--;
@@ -81,3 +104,18 @@ void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
page_ref_add(page_info->page, INT_MAX - pagecount);
}
}
+
+void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
+ int (*gve_poll)(struct napi_struct *, int))
+{
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ netif_napi_add(priv->dev, &block->napi, gve_poll);
+}
+
+void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
+{
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ netif_napi_del(&block->napi);
+}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h
index 324fd98a6112..bf2e9a0adb36 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.h
+++ b/drivers/net/ethernet/google/gve/gve_utils.h
@@ -11,17 +11,25 @@
#include "gve.h"
+bool gve_tx_was_added_to_block(struct gve_priv *priv, int queue_idx);
void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx);
+bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx);
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
+struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi,
+ u8 *data, u16 len);
+
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
+void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
+ int (*gve_poll)(struct napi_struct *, int));
+void gve_remove_napi(struct gve_priv *priv, int ntfy_idx);
#endif /* _GVE_UTILS_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 8a1027ad340d..d4293f76d69d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -12,7 +12,9 @@
#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
-static struct class *hnae_class;
+static const struct class hnae_class = {
+ .name = "hnae",
+};
static void
hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
@@ -111,7 +113,7 @@ static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
WARN_ON(!fwnode);
- dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
+ dev = class_find_device(&hnae_class, NULL, fwnode, __ae_match);
return dev ? cls_to_ae_dev(dev) : NULL;
}
@@ -415,7 +417,7 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
hdev->owner = owner;
hdev->id = (int)atomic_inc_return(&id);
hdev->cls_dev.parent = hdev->dev;
- hdev->cls_dev.class = hnae_class;
+ hdev->cls_dev.class = &hnae_class;
hdev->cls_dev.release = hnae_release;
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
ret = device_register(&hdev->cls_dev);
@@ -448,13 +450,12 @@ EXPORT_SYMBOL(hnae_ae_unregister);
static int __init hnae_init(void)
{
- hnae_class = class_create("hnae");
- return PTR_ERR_OR_ZERO(hnae_class);
+ return class_register(&hnae_class);
}
static void __exit hnae_exit(void)
{
- class_destroy(hnae_class);
+ class_unregister(&hnae_class);
}
subsys_initcall(hnae_init);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index d7e175a9cb49..f19f1e1d1f9f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -388,6 +388,7 @@ struct hnae3_dev_specs {
u16 mc_mac_size;
u32 mac_stats_num;
u8 tnl_num;
+ u8 hilink_version;
};
struct hnae3_client_ops {
@@ -819,6 +820,7 @@ struct hnae3_tc_info {
u8 max_tc; /* Total number of TCs */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
+ bool mqprio_destroy;
bool dcb_ets_active;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index d92ad6082d8e..652d71326231 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -351,7 +351,7 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
{
static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = {
- {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS},
+ {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT},
};
u32 i;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 533c19d25e4f..552396518e08 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -55,7 +55,7 @@
#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3
#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024
#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT 30000
-#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS 500000
+#define HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT 1000000
enum hclge_opcode_type {
/* Generic commands */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index 3b6dbf158b98..f72dc0cee30e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -76,7 +76,7 @@ static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
if (hns3_nic_resetting(ndev))
return -EBUSY;
- if (h->kinfo.dcb_ops->ieee_setapp)
+ if (h->kinfo.dcb_ops->ieee_delapp)
return h->kinfo.dcb_ops->ieee_delapp(h, app);
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index c083d1d10767..807eb3bbb11c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1097,6 +1097,8 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
*pos += scnprintf(buf + *pos, len - *pos,
"TX timeout threshold: %d seconds\n",
dev->watchdog_timeo / HZ);
+ *pos += scnprintf(buf + *pos, len - *pos, "Hilink Version: %u\n",
+ dev_specs->hilink_version);
}
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index f1695c889d3a..19668a8d22f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2473,9 +2473,9 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb,
return features;
if (skb->encapsulation)
- len = skb_inner_transport_header(skb) - skb->data;
+ len = skb_inner_transport_offset(skb);
else
- len = skb_transport_header(skb) - skb->data;
+ len = skb_transport_offset(skb);
/* Assume L4 is 60 byte as TCP is the only protocol with a
* a flexible value, and it's max len is 60 bytes.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 4d15eb73b972..9bb708fa42f2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -828,7 +828,8 @@ struct hclge_dev_specs_1_cmd {
__le16 mc_mac_size;
u8 rsv1[6];
u8 tnl_num;
- u8 rsv2[5];
+ u8 hilink_version;
+ u8 rsv2[4];
};
/* mac speed type defined in firmware command */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index b98301e205f7..eabbacb1c714 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -619,6 +619,8 @@ static int hclge_setup_tc(struct hnae3_handle *h,
return ret;
}
+ kinfo->tc_info.mqprio_destroy = !tc;
+
ret = hclge_notify_down_uinit(hdev);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 5ea9e59569ef..b4afb66efe5c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -645,8 +645,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
- count += 1;
- handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ if (hdev->ae_dev->dev_specs.hilink_version !=
+ HCLGE_HILINK_H60) {
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ }
+
count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
count += 1;
@@ -884,7 +888,7 @@ static const struct hclge_speed_bit_map speed_bit_map[] = {
{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
{HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS},
{HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS},
- {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
+ {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BITS},
};
static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
@@ -940,7 +944,7 @@ static void hclge_update_fec_support(struct hclge_mac *mac)
mac->supported);
}
-static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[8] = {
+static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[] = {
{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT},
{HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT},
{HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT},
@@ -948,10 +952,12 @@ static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[8] = {
{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT},
{HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT},
{HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT},
- {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
};
-static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[6] = {
+static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[] = {
{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT},
{HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT},
@@ -959,11 +965,13 @@ static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[6] = {
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT},
{HCLGE_SUPPORT_100G_R2_BIT,
ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT},
- {HCLGE_SUPPORT_200G_BIT,
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT,
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
};
-static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[8] = {
+static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[] = {
{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT},
{HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT},
{HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT},
@@ -971,10 +979,12 @@ static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[8] = {
{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT},
{HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT},
{HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT},
- {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
};
-static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[9] = {
+static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[] = {
{HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
{HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
@@ -983,7 +993,9 @@ static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[9] = {
{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT},
{HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
{HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT},
- {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
};
static void hclge_convert_setting_sr(u16 speed_ability,
@@ -1154,7 +1166,7 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
static u32 hclge_get_max_speed(u16 speed_ability)
{
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ if (speed_ability & HCLGE_SUPPORT_200G_BITS)
return HCLGE_MAC_SPEED_200G;
if (speed_ability & HCLGE_SUPPORT_100G_BITS)
@@ -1350,6 +1362,7 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
ae_dev->dev_specs.tnl_num = req1->tnl_num;
+ ae_dev->dev_specs.hilink_version = req1->hilink_version;
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -2890,7 +2903,10 @@ static int hclge_mac_init(struct hclge_dev *hdev)
int ret;
hdev->support_sfp_query = true;
- hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+
+ if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
if (ret)
@@ -12092,6 +12108,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ hclge_reset_tc_config(hdev);
+
ret = hclge_tm_init_hw(hdev, true);
if (ret) {
dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 51979cf71262..e821dd2f1528 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -191,9 +191,10 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_SUPPORT_40G_BIT BIT(5)
#define HCLGE_SUPPORT_100M_BIT BIT(6)
#define HCLGE_SUPPORT_10M_BIT BIT(7)
-#define HCLGE_SUPPORT_200G_BIT BIT(8)
+#define HCLGE_SUPPORT_200G_R4_EXT_BIT BIT(8)
#define HCLGE_SUPPORT_50G_R1_BIT BIT(9)
#define HCLGE_SUPPORT_100G_R2_BIT BIT(10)
+#define HCLGE_SUPPORT_200G_R4_BIT BIT(11)
#define HCLGE_SUPPORT_GE \
(HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
@@ -201,6 +202,8 @@ enum HLCGE_PORT_TYPE {
(HCLGE_SUPPORT_50G_R2_BIT | HCLGE_SUPPORT_50G_R1_BIT)
#define HCLGE_SUPPORT_100G_BITS \
(HCLGE_SUPPORT_100G_R4_BIT | HCLGE_SUPPORT_100G_R2_BIT)
+#define HCLGE_SUPPORT_200G_BITS \
+ (HCLGE_SUPPORT_200G_R4_EXT_BIT | HCLGE_SUPPORT_200G_R4_BIT)
enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING,
@@ -253,6 +256,12 @@ enum HCLGE_MAC_DUPLEX {
HCLGE_MAC_FULL
};
+/* hilink version */
+enum hclge_hilink_version {
+ HCLGE_HILINK_H32 = 0,
+ HCLGE_HILINK_H60 = 1,
+};
+
#define QUERY_SFP_SPEED 0
#define QUERY_ACTIVE_SPEED 1
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 4b0d07ca2505..d4a0e0be7a72 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -1123,10 +1123,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
- if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
+ if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B) ||
+ req->mbx_src_vfid > hdev->num_req_vfs)) {
dev_warn(&hdev->pdev->dev,
- "dropped invalid mailbox message, code = %u\n",
- req->msg.code);
+ "dropped invalid mailbox message, code = %u, vfid = %u\n",
+ req->msg.code, req->mbx_src_vfid);
/* dropping/not processing this invalid message */
crq->desc[crq->next_to_use].flag = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 80a2a0073d97..507d7ce26d83 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -108,7 +108,7 @@ void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
u64 ns = nsec;
u32 sec_h;
- if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
+ if (!hdev->ptp || !test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
return;
/* Since the BD does not have enough space for the higher 16 bits of
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index c58c31221762..00c3f2548bf6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -2143,3 +2143,19 @@ int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable)
return ret;
}
+
+void hclge_reset_tc_config(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_knic_private_info *kinfo;
+
+ kinfo = &vport->nic.kinfo;
+
+ if (!kinfo->tc_info.mqprio_destroy)
+ return;
+
+ /* clear tc info, including mqprio_destroy and mqprio_active */
+ memset(&kinfo->tc_info, 0, sizeof(kinfo->tc_info));
+ hclge_tm_schd_info_update(hdev, 0);
+ hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 53eec6df5194..0985916629d3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -277,4 +277,5 @@ int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
int hclge_up_to_tc_map(struct hclge_dev *hdev);
int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable);
+void hclge_reset_tc_config(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 5e27470c6b1e..f2d4669c81cf 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -987,7 +987,7 @@ static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue)
{
#ifdef DEBUG
printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
- printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status));
+ printk("%s: command-stats: %04x\n", dev->name, swab16(p->xmit_cmds[0]->cmd_status));
printk("%s: check, whether you set the right interrupt number!\n",dev->name);
#endif
sun3_82586_close(dev);
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index d55638ad8704..639fbb12bd35 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -368,6 +368,15 @@ config IGC
To compile this driver as a module, choose M here. The module
will be called igc.
+
+config IGC_LEDS
+ def_bool LEDS_TRIGGER_NETDEV
+ depends on IGC && LEDS_CLASS
+ depends on LEDS_CLASS=y || IGC=m
+ help
+ Optional support for controlling the NIC LED's with the netdev
+ LED trigger.
+
config IDPF
tristate "Intel(R) Infrastructure Data Path Function Support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 01f0f12035ca..3fcb8daaa243 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -171,8 +171,8 @@ static int debug = 3;
static int eeprom_bad_csum_allow = 0;
static int use_io = 0;
module_param(debug, int, 0);
-module_param(eeprom_bad_csum_allow, int, 0);
-module_param(use_io, int, 0);
+module_param(eeprom_bad_csum_allow, int, 0444);
+module_param(use_io, int, 0444);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index fc0f98ea6133..dc553c51d79a 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -2186,7 +2186,7 @@ static int e1000_get_rxnfc(struct net_device *netdev,
}
}
-static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2223,16 +2223,16 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
if (ret_val)
goto release;
- edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->supported, phy_data);
/* EEE Advertised */
- edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised, adapter->eee_advert);
/* EEE Link Partner Advertised */
ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
if (ret_val)
goto release;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
/* EEE PCS Status */
ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
@@ -2262,11 +2262,13 @@ release:
return ret_val;
}
-static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct e1000_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
s32 ret_val;
ret_val = e1000e_get_eee(netdev, &eee_curr);
@@ -2283,12 +2285,17 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EINVAL;
}
- if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+
+ if (linkmode_andnot(tmp, edata->advertised, supported)) {
e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n");
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index a2788fd5f8bb..19e450a5bd31 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -2559,7 +2559,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
(u16)(mac_reg & 0xFFFF));
hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
- FIELD_GET(E1000_RAH_AV, mac_reg));
+ (u16)((mac_reg & E1000_RAH_AV) >> 16));
}
e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index af5d9d97a0d6..cc8c531ec3df 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6688,14 +6688,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
} else if (hw->mac.type >= e1000_pch_lpt) {
- if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) {
/* ULP does not support wake from unicast, multicast
* or broadcast.
*/
retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
-
- if (retval)
- return retval;
+ if (retval)
+ return retval;
+ }
}
/* Ensure that the appropriate bits are set in LPI_CTRL
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 9b701615c7c6..ba24f3fa92c3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -687,6 +687,54 @@ struct i40e_pf {
};
/**
+ * __i40e_pf_next_vsi - get next valid VSI
+ * @pf: pointer to the PF struct
+ * @idx: pointer to start position number
+ *
+ * Find and return next non-NULL VSI pointer in pf->vsi array and
+ * updates idx position. Returns NULL if no VSI is found.
+ **/
+static __always_inline struct i40e_vsi *
+__i40e_pf_next_vsi(struct i40e_pf *pf, int *idx)
+{
+ while (*idx < pf->num_alloc_vsi) {
+ if (pf->vsi[*idx])
+ return pf->vsi[*idx];
+ (*idx)++;
+ }
+ return NULL;
+}
+
+#define i40e_pf_for_each_vsi(_pf, _i, _vsi) \
+ for (_i = 0, _vsi = __i40e_pf_next_vsi(_pf, &_i); \
+ _vsi; \
+ _i++, _vsi = __i40e_pf_next_vsi(_pf, &_i))
+
+/**
+ * __i40e_pf_next_veb - get next valid VEB
+ * @pf: pointer to the PF struct
+ * @idx: pointer to start position number
+ *
+ * Find and return next non-NULL VEB pointer in pf->veb array and
+ * updates idx position. Returns NULL if no VEB is found.
+ **/
+static __always_inline struct i40e_veb *
+__i40e_pf_next_veb(struct i40e_pf *pf, int *idx)
+{
+ while (*idx < I40E_MAX_VEB) {
+ if (pf->veb[*idx])
+ return pf->veb[*idx];
+ (*idx)++;
+ }
+ return NULL;
+}
+
+#define i40e_pf_for_each_veb(_pf, _i, _veb) \
+ for (_i = 0, _veb = __i40e_pf_next_veb(_pf, &_i); \
+ _veb; \
+ _i++, _veb = __i40e_pf_next_veb(_pf, &_i))
+
+/**
* i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
* @macaddr: the MAC Address as the base key
*
@@ -735,7 +783,6 @@ struct i40e_new_mac_filter {
struct i40e_veb {
struct i40e_pf *pf;
u16 idx;
- u16 veb_idx; /* index of VEB parent */
u16 seid;
u16 uplink_seid;
u16 stats_idx; /* index of VEB parent */
@@ -1120,14 +1167,12 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
static inline struct i40e_vsi *
i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- struct i40e_vsi *vsi = pf->vsi[i];
-
- if (vsi && vsi->type == type)
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->type == type)
return vsi;
- }
return NULL;
}
@@ -1309,4 +1354,40 @@ static inline struct i40e_pf *i40e_hw_to_pf(struct i40e_hw *hw)
struct device *i40e_hw_to_dev(struct i40e_hw *hw);
+/**
+ * i40e_pf_get_vsi_by_seid - find VSI by SEID
+ * @pf: pointer to a PF
+ * @seid: SEID of the VSI
+ **/
+static inline struct i40e_vsi *
+i40e_pf_get_vsi_by_seid(struct i40e_pf *pf, u16 seid)
+{
+ struct i40e_vsi *vsi;
+ int i;
+
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->seid == seid)
+ return vsi;
+
+ return NULL;
+}
+
+/**
+ * i40e_pf_get_veb_by_seid - find VEB by SEID
+ * @pf: pointer to a PF
+ * @seid: SEID of the VSI
+ **/
+static inline struct i40e_veb *
+i40e_pf_get_veb_by_seid(struct i40e_pf *pf, u16 seid)
+{
+ struct i40e_veb *veb;
+ int i;
+
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->seid == seid)
+ return veb;
+
+ return NULL;
+}
+
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 306758428aef..b32071ee84af 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -148,8 +148,6 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
u32 reg_idx;
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1);
wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
}
@@ -576,8 +574,6 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
for (i = 0; i < qvlist_info->num_vectors; i++) {
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
v_idx = qv_info->v_idx;
/* Validate vector id belongs to this client */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index b96a92187ab3..8aa43aefe84c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -947,16 +947,16 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
static void i40e_dcbnl_del_app(struct i40e_pf *pf,
struct i40e_dcb_app_priority_table *app)
{
+ struct i40e_vsi *vsi;
int v, err;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v] && pf->vsi[v]->netdev) {
- err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ if (vsi->netdev) {
+ err = i40e_dcbnl_vsi_del_app(vsi, app);
dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
- pf->vsi[v]->seid, err, app->selector,
+ vsi->seid, err, app->selector,
app->protocolid, app->priority);
}
- }
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index ef70ddbe9c2f..f9ba45f596c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -24,31 +24,13 @@ enum ring_type {
**/
static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
{
- int i;
-
- if (seid < 0)
+ if (seid < 0) {
dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
- else
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
- return pf->vsi[i];
-
- return NULL;
-}
-/**
- * i40e_dbg_find_veb - searches for the veb with the given seid
- * @pf: the PF structure to search for the veb
- * @seid: seid of the veb it is searching for
- **/
-static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
-{
- int i;
+ return NULL;
+ }
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == seid)
- return pf->veb[i];
- return NULL;
+ return i40e_pf_get_vsi_by_seid(pf, seid);
}
/**************************************************************
@@ -653,12 +635,11 @@ out:
**/
static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i])
- dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
- i, pf->vsi[i]->seid);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, vsi->seid);
}
/**
@@ -696,15 +677,14 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
{
struct i40e_veb *veb;
- veb = i40e_dbg_find_veb(pf, seid);
+ veb = i40e_pf_get_veb_by_seid(pf, seid);
if (!veb) {
dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
return;
}
dev_info(&pf->pdev->dev,
- "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
- veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
- veb->uplink_seid,
+ "veb idx=%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
+ veb->idx, veb->stats_idx, veb->seid, veb->uplink_seid,
veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
i40e_dbg_dump_eth_stats(pf, &veb->stats);
}
@@ -718,11 +698,8 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
struct i40e_veb *veb;
int i;
- for (i = 0; i < I40E_MAX_VEB; i++) {
- veb = pf->veb[i];
- if (veb)
- i40e_dbg_dump_veb_seid(pf, veb->seid);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ i40e_dbg_dump_veb_seid(pf, veb->seid);
}
/**
@@ -851,10 +828,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "add relay", 9) == 0) {
struct i40e_veb *veb;
- int uplink_seid, i;
+ u8 enabled_tc = 0x1;
+ int uplink_seid;
cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
- if (cnt != 2) {
+ if (cnt == 0) {
+ uplink_seid = 0;
+ vsi_seid = 0;
+ } else if (cnt != 2) {
dev_info(&pf->pdev->dev,
"add relay: bad command string, cnt=%d\n",
cnt);
@@ -866,33 +847,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "add relay: VSI %d not found\n", vsi_seid);
- goto command_write_done;
- }
-
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
- break;
- if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
- uplink_seid != pf->mac_seid) {
+ if (uplink_seid != 0 && uplink_seid != pf->mac_seid) {
dev_info(&pf->pdev->dev,
"add relay: relay uplink %d not found\n",
uplink_seid);
goto command_write_done;
+ } else if (uplink_seid) {
+ vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "add relay: VSI %d not found\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+ enabled_tc = vsi->tc_config.enabled_tc;
+ } else if (vsi_seid) {
+ dev_info(&pf->pdev->dev,
+ "add relay: VSI must be 0 for floating relay\n");
+ goto command_write_done;
}
- veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
- vsi->tc_config.enabled_tc);
+ veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, enabled_tc);
if (veb)
dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
else
dev_info(&pf->pdev->dev, "add relay failed\n");
} else if (strncmp(cmd_buf, "del relay", 9) == 0) {
+ struct i40e_veb *veb;
int i;
+
cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
if (cnt != 1) {
dev_info(&pf->pdev->dev,
@@ -906,9 +890,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
/* find the veb */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->seid == veb_seid)
break;
+
if (i >= I40E_MAX_VEB) {
dev_info(&pf->pdev->dev,
"del relay: relay %d not found\n", veb_seid);
@@ -916,7 +901,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
- i40e_veb_release(pf->veb[i]);
+ i40e_veb_release(veb);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
unsigned int v;
int ret;
@@ -1251,8 +1236,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (cnt == 0) {
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- i40e_vsi_reset_stats(pf->vsi[i]);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ i40e_vsi_reset_stats(vsi);
dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
} else if (cnt == 1) {
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index c841779713f6..42e7e6cdaa6d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -5644,7 +5644,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
return 0;
}
-static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp phy_cfg;
@@ -5664,16 +5664,12 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (phy_cfg.eee_capability == 0)
return -EOPNOTSUPP;
- edata->supported = SUPPORTED_Autoneg;
- edata->lp_advertised = edata->supported;
-
/* Get current configuration */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
if (status)
return -EAGAIN;
- edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U;
- edata->eee_enabled = !!edata->advertised;
+ edata->eee_enabled = !!phy_cfg.eee_capability;
edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status;
@@ -5682,7 +5678,7 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
}
static int i40e_is_eee_param_supported(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5691,7 +5687,6 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
u32 value;
const char *name;
} param[] = {
- {edata->advertised & ~SUPPORTED_Autoneg, "advertise"},
{edata->tx_lpi_timer, "tx-timer"},
{edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
};
@@ -5709,7 +5704,7 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
return 0;
}
-static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int i40e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp abilities;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 54eb55464e31..f86578857e8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -310,11 +310,12 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
**/
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->id == id))
- return pf->vsi[i];
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->id == id)
+ return vsi;
return NULL;
}
@@ -552,24 +553,19 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
**/
void i40e_pf_reset_stats(struct i40e_pf *pf)
{
+ struct i40e_veb *veb;
int i;
memset(&pf->stats, 0, sizeof(pf->stats));
memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
pf->stat_offsets_loaded = false;
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (pf->veb[i]) {
- memset(&pf->veb[i]->stats, 0,
- sizeof(pf->veb[i]->stats));
- memset(&pf->veb[i]->stats_offsets, 0,
- sizeof(pf->veb[i]->stats_offsets));
- memset(&pf->veb[i]->tc_stats, 0,
- sizeof(pf->veb[i]->tc_stats));
- memset(&pf->veb[i]->tc_stats_offsets, 0,
- sizeof(pf->veb[i]->tc_stats_offsets));
- pf->veb[i]->stat_offsets_loaded = false;
- }
+ i40e_pf_for_each_veb(pf, i, veb) {
+ memset(&veb->stats, 0, sizeof(veb->stats));
+ memset(&veb->stats_offsets, 0, sizeof(veb->stats_offsets));
+ memset(&veb->tc_stats, 0, sizeof(veb->tc_stats));
+ memset(&veb->tc_stats_offsets, 0, sizeof(veb->tc_stats_offsets));
+ veb->stat_offsets_loaded = false;
}
pf->hw_csum_rx_error = 0;
}
@@ -2879,6 +2875,7 @@ err_no_memory_locked:
**/
static void i40e_sync_filters_subtask(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
if (!pf)
@@ -2890,11 +2887,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
}
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v] &&
- (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
- !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
- int ret = i40e_sync_vsi_filters(pf->vsi[v]);
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ if ((vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
+ !test_bit(__I40E_VSI_RELEASING, vsi->state)) {
+ int ret = i40e_sync_vsi_filters(vsi);
if (ret) {
/* come back and try again later */
@@ -5166,6 +5162,7 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
**/
static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int i;
if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
@@ -5175,9 +5172,10 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
I40E_IWARP_IRQ_PILE_ID);
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i])
- i40e_vsi_free_q_vectors(pf->vsi[i]);
+
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ i40e_vsi_free_q_vectors(vsi);
+
i40e_reset_interrupt_capability(pf);
}
@@ -5274,12 +5272,11 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
**/
static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v])
- i40e_quiesce_vsi(pf->vsi[v]);
- }
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ i40e_quiesce_vsi(vsi);
}
/**
@@ -5288,12 +5285,11 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
**/
static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v])
- i40e_unquiesce_vsi(pf->vsi[v]);
- }
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ i40e_unquiesce_vsi(vsi);
}
/**
@@ -5354,14 +5350,13 @@ wait_rx:
**/
static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v, ret = 0;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v]) {
- ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
- if (ret)
- break;
- }
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ ret = i40e_vsi_wait_queues_disabled(vsi);
+ if (ret)
+ break;
}
return ret;
@@ -6778,32 +6773,29 @@ out:
**/
static void i40e_dcb_reconfigure(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
u8 tc_map = 0;
int ret;
- u8 v;
+ int v;
/* Enable the TCs available on PF to all VEBs */
tc_map = i40e_pf_get_tc_map(pf);
if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
return;
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (!pf->veb[v])
- continue;
- ret = i40e_veb_config_tc(pf->veb[v], tc_map);
+ i40e_pf_for_each_veb(pf, v, veb) {
+ ret = i40e_veb_config_tc(veb, tc_map);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed configuring TC for VEB seid=%d\n",
- pf->veb[v]->seid);
+ veb->seid);
/* Will try to configure as many components */
}
}
/* Update each VSI */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (!pf->vsi[v])
- continue;
-
+ i40e_pf_for_each_vsi(pf, v, vsi) {
/* - Enable all TCs for the LAN VSI
* - For all others keep them at TC0 for now
*/
@@ -6812,17 +6804,17 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
else
tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
- ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
+ ret = i40e_vsi_config_tc(vsi, tc_map);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed configuring TC for VSI seid=%d\n",
- pf->vsi[v]->seid);
+ vsi->seid);
/* Will try to configure as many components */
} else {
/* Re-configure VSI vectors based on updated TC map */
- i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
- if (pf->vsi[v]->netdev)
- i40e_dcbnl_set_all(pf->vsi[v]);
+ i40e_vsi_map_rings_to_vectors(vsi);
+ if (vsi->netdev)
+ i40e_dcbnl_set_all(vsi);
}
}
}
@@ -9257,7 +9249,9 @@ int i40e_close(struct net_device *netdev)
**/
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
{
+ struct i40e_vsi *vsi;
u32 val;
+ int i;
/* do the biggest reset indicated */
if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -9313,29 +9307,20 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
"FW LLDP is enabled\n");
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
- int v;
-
/* Find the VSI(s) that requested a re-init */
- dev_info(&pf->pdev->dev,
- "VSI reinit requested\n");
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
+ dev_info(&pf->pdev->dev, "VSI reinit requested\n");
- if (vsi != NULL &&
- test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
vsi->state))
- i40e_vsi_reinit_locked(pf->vsi[v]);
+ i40e_vsi_reinit_locked(vsi);
}
} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
- int v;
-
/* Find the VSI(s) that needs to be brought down */
dev_info(&pf->pdev->dev, "VSI down requested\n");
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
- if (vsi != NULL &&
- test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
vsi->state)) {
set_bit(__I40E_VSI_DOWN, vsi->state);
i40e_down(vsi);
@@ -9888,6 +9873,7 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
**/
static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
{
+ struct i40e_vsi *vsi;
struct i40e_pf *pf;
int i;
@@ -9895,15 +9881,10 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
return;
pf = veb->pf;
- /* depth first... */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
- i40e_veb_link_event(pf->veb[i], link_up);
-
- /* ... now the local VSIs */
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
- i40e_vsi_link_event(pf->vsi[i], link_up);
+ /* Send link event to contained VSIs */
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == veb->seid)
+ i40e_vsi_link_event(vsi, link_up);
}
/**
@@ -9995,6 +9976,8 @@ static void i40e_link_event(struct i40e_pf *pf)
**/
static void i40e_watchdog_subtask(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int i;
/* if interface is down do nothing */
@@ -10015,15 +9998,14 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && pf->vsi[i]->netdev)
- i40e_update_stats(pf->vsi[i]);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->netdev)
+ i40e_update_stats(vsi);
if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) {
/* Update the stats for the active switching components */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i])
- i40e_update_veb_stats(pf->veb[i]);
+ i40e_pf_for_each_veb(pf, i, veb)
+ i40e_update_veb_stats(veb);
}
i40e_ptp_rx_hang(pf);
@@ -10368,89 +10350,84 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb)
}
/**
- * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
+ * i40e_reconstitute_veb - rebuild the VEB and VSIs connected to it
* @veb: pointer to the VEB instance
*
- * This is a recursive function that first builds the attached VSIs then
- * recurses in to build the next layer of VEB. We track the connections
- * through our own index numbers because the seid's from the HW could
- * change across the reset.
+ * This is a function that builds the attached VSIs. We track the connections
+ * through our own index numbers because the seid's from the HW could change
+ * across the reset.
**/
static int i40e_reconstitute_veb(struct i40e_veb *veb)
{
struct i40e_vsi *ctl_vsi = NULL;
struct i40e_pf *pf = veb->pf;
- int v, veb_idx;
- int ret;
+ struct i40e_vsi *vsi;
+ int v, ret;
- /* build VSI that owns this VEB, temporarily attached to base VEB */
- for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
- if (pf->vsi[v] &&
- pf->vsi[v]->veb_idx == veb->idx &&
- pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
- ctl_vsi = pf->vsi[v];
- break;
- }
- }
- if (!ctl_vsi) {
- dev_info(&pf->pdev->dev,
- "missing owner VSI for veb_idx %d\n", veb->idx);
- ret = -ENOENT;
- goto end_reconstitute;
+ /* As we do not maintain PV (port virtualizer) switch element then
+ * there can be only one non-floating VEB that have uplink to MAC SEID
+ * and its control VSI is the main one.
+ */
+ if (WARN_ON(veb->uplink_seid && veb->uplink_seid != pf->mac_seid)) {
+ dev_err(&pf->pdev->dev,
+ "Invalid uplink SEID for VEB %d\n", veb->idx);
+ return -ENOENT;
}
- if (ctl_vsi != pf->vsi[pf->lan_vsi])
- ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
- ret = i40e_add_vsi(ctl_vsi);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "rebuild of veb_idx %d owner VSI failed: %d\n",
- veb->idx, ret);
- goto end_reconstitute;
+
+ if (veb->uplink_seid == pf->mac_seid) {
+ /* Check that the LAN VSI has VEB owning flag set */
+ ctl_vsi = pf->vsi[pf->lan_vsi];
+
+ if (WARN_ON(ctl_vsi->veb_idx != veb->idx ||
+ !(ctl_vsi->flags & I40E_VSI_FLAG_VEB_OWNER))) {
+ dev_err(&pf->pdev->dev,
+ "Invalid control VSI for VEB %d\n", veb->idx);
+ return -ENOENT;
+ }
+
+ /* Add the control VSI to switch */
+ ret = i40e_add_vsi(ctl_vsi);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Rebuild of owner VSI for VEB %d failed: %d\n",
+ veb->idx, ret);
+ return ret;
+ }
+
+ i40e_vsi_reset_stats(ctl_vsi);
}
- i40e_vsi_reset_stats(ctl_vsi);
/* create the VEB in the switch and move the VSI onto the VEB */
ret = i40e_add_veb(veb, ctl_vsi);
if (ret)
- goto end_reconstitute;
+ return ret;
- if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
- veb->bridge_mode = BRIDGE_MODE_VEB;
- else
- veb->bridge_mode = BRIDGE_MODE_VEPA;
- i40e_config_bridge_mode(veb);
+ if (veb->uplink_seid) {
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
+ veb->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ veb->bridge_mode = BRIDGE_MODE_VEPA;
+ i40e_config_bridge_mode(veb);
+ }
/* create the remaining VSIs attached to this VEB */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ if (vsi == ctl_vsi)
continue;
- if (pf->vsi[v]->veb_idx == veb->idx) {
- struct i40e_vsi *vsi = pf->vsi[v];
-
+ if (vsi->veb_idx == veb->idx) {
vsi->uplink_seid = veb->seid;
ret = i40e_add_vsi(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
"rebuild of vsi_idx %d failed: %d\n",
v, ret);
- goto end_reconstitute;
+ return ret;
}
i40e_vsi_reset_stats(vsi);
}
}
- /* create any VEBs attached to this VEB - RECURSION */
- for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
- if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
- pf->veb[veb_idx]->uplink_seid = veb->seid;
- ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
- if (ret)
- break;
- }
- }
-
-end_reconstitute:
return ret;
}
@@ -10718,6 +10695,7 @@ static void i40e_clean_xps_state(struct i40e_vsi *vsi)
static void i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
int ret = 0;
u32 v;
@@ -10732,11 +10710,9 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v]) {
- i40e_clean_xps_state(pf->vsi[v]);
- pf->vsi[v]->seid = 0;
- }
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ i40e_clean_xps_state(vsi);
+ vsi->seid = 0;
}
i40e_shutdown_adminq(&pf->hw);
@@ -10850,6 +10826,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
+ struct i40e_veb *veb;
int ret;
u32 val;
int v;
@@ -10991,35 +10968,29 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
*/
if (vsi->uplink_seid != pf->mac_seid) {
dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
- /* find the one VEB connected to the MAC, and find orphans */
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (!pf->veb[v])
- continue;
-
- if (pf->veb[v]->uplink_seid == pf->mac_seid ||
- pf->veb[v]->uplink_seid == 0) {
- ret = i40e_reconstitute_veb(pf->veb[v]);
- if (!ret)
- continue;
+ /* Rebuild VEBs */
+ i40e_pf_for_each_veb(pf, v, veb) {
+ ret = i40e_reconstitute_veb(veb);
+ if (!ret)
+ continue;
- /* If Main VEB failed, we're in deep doodoo,
- * so give up rebuilding the switch and set up
- * for minimal rebuild of PF VSI.
- * If orphan failed, we'll report the error
- * but try to keep going.
- */
- if (pf->veb[v]->uplink_seid == pf->mac_seid) {
- dev_info(&pf->pdev->dev,
- "rebuild of switch failed: %d, will try to set up simple PF connection\n",
- ret);
- vsi->uplink_seid = pf->mac_seid;
- break;
- } else if (pf->veb[v]->uplink_seid == 0) {
- dev_info(&pf->pdev->dev,
- "rebuild of orphan VEB failed: %d\n",
- ret);
- }
+ /* If Main VEB failed, we're in deep doodoo,
+ * so give up rebuilding the switch and set up
+ * for minimal rebuild of PF VSI.
+ * If orphan failed, we'll report the error
+ * but try to keep going.
+ */
+ if (veb->uplink_seid == pf->mac_seid) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of switch failed: %d, will try to set up simple PF connection\n",
+ ret);
+ vsi->uplink_seid = pf->mac_seid;
+ break;
+ } else if (veb->uplink_seid == 0) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of orphan VEB failed: %d\n",
+ ret);
}
}
}
@@ -12098,6 +12069,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
*/
static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int err, i;
/* We cleared the MSI and MSI-X flags when disabling the old interrupt
@@ -12114,13 +12086,12 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
/* Now that we've re-acquired IRQs, we need to remap the vectors and
* rings together again.
*/
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i]) {
- err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
- if (err)
- goto err_unwind;
- i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ err = i40e_vsi_alloc_q_vectors(vsi);
+ if (err)
+ goto err_unwind;
+
+ i40e_vsi_map_rings_to_vectors(vsi);
}
err = i40e_setup_misc_vector(pf);
@@ -13122,19 +13093,16 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = NULL;
struct nlattr *attr, *br_spec;
- int i, rem;
+ struct i40e_veb *veb;
+ int rem;
/* Only for PF VSI for now */
if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
return -EOPNOTSUPP;
/* Find the HW bridge for PF VSI */
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!br_spec)
@@ -13199,19 +13167,14 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = NULL;
- int i;
+ struct i40e_veb *veb;
/* Only for PF VSI for now */
if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
return -EOPNOTSUPP;
/* Find the HW bridge for the PF VSI */
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
-
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
if (!veb)
return 0;
@@ -13245,12 +13208,12 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
/* MACLEN can support at most 63 words */
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len & ~(63 * 2))
goto out_err;
/* IPLEN and EIPLEN can support at most 127 dwords */
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len & ~(127 * 4))
goto out_err;
@@ -13560,9 +13523,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
return err;
i40e_queue_pair_disable_irq(vsi, queue_pair);
+ i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
- i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
i40e_queue_pair_clean_rings(vsi, queue_pair);
i40e_queue_pair_reset_stats(vsi, queue_pair);
@@ -14145,7 +14108,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
- struct i40e_veb *veb = NULL;
+ struct i40e_veb *veb;
struct i40e_pf *pf;
u16 uplink_seid;
int i, n, bkt;
@@ -14209,29 +14172,28 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
/* If this was the last thing on the VEB, except for the
* controlling VSI, remove the VEB, which puts the controlling
- * VSI onto the next level down in the switch.
+ * VSI onto the uplink port.
*
* Well, okay, there's one more exception here: don't remove
- * the orphan VEBs yet. We'll wait for an explicit remove request
+ * the floating VEBs yet. We'll wait for an explicit remove request
* from up the network stack.
*/
- for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] &&
- pf->vsi[i]->uplink_seid == uplink_seid &&
- (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
- n++; /* count the VSIs */
- }
- }
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
- if (pf->veb[i]->uplink_seid == uplink_seid)
- n++; /* count the VEBs */
- if (pf->veb[i]->seid == uplink_seid)
- veb = pf->veb[i];
+ veb = i40e_pf_get_veb_by_seid(pf, uplink_seid);
+ if (veb && veb->uplink_seid) {
+ n = 0;
+
+ /* Count non-controlling VSIs present on the VEB */
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == uplink_seid &&
+ (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ n++;
+
+ /* If there is no VSI except the control one then release
+ * the VEB and put the control VSI onto VEB uplink.
+ */
+ if (!n)
+ i40e_veb_release(veb);
}
- if (n == 0 && veb && veb->uplink_seid != 0)
- i40e_veb_release(veb);
return 0;
}
@@ -14389,8 +14351,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
struct i40e_vsi *vsi = NULL;
struct i40e_veb *veb = NULL;
u16 alloc_queue_pairs;
- int ret, i;
int v_idx;
+ int ret;
/* The requested uplink_seid must be either
* - the PF's port seid
@@ -14405,21 +14367,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
*
* Find which uplink_seid we were given and create a new VEB if needed
*/
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
- veb = pf->veb[i];
- break;
- }
- }
-
+ veb = i40e_pf_get_veb_by_seid(pf, uplink_seid);
if (!veb && uplink_seid != pf->mac_seid) {
-
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
- vsi = pf->vsi[i];
- break;
- }
- }
+ vsi = i40e_pf_get_vsi_by_seid(pf, uplink_seid);
if (!vsi) {
dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
uplink_seid);
@@ -14448,10 +14398,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
}
i40e_config_bridge_mode(veb);
}
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
if (!veb) {
dev_info(&pf->pdev->dev, "couldn't add VEB\n");
return NULL;
@@ -14681,29 +14628,24 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
struct i40e_pf *pf = branch->pf;
u16 branch_seid = branch->seid;
u16 veb_idx = branch->idx;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int i;
/* release any VEBs on this VEB - RECURSION */
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
- if (pf->veb[i]->uplink_seid == branch->seid)
- i40e_switch_branch_release(pf->veb[i]);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->uplink_seid == branch->seid)
+ i40e_switch_branch_release(veb);
/* Release the VSIs on this VEB, but not the owner VSI.
*
* NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
* the VEB itself, so don't use (*branch) after this loop.
*/
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (!pf->vsi[i])
- continue;
- if (pf->vsi[i]->uplink_seid == branch_seid &&
- (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
- i40e_vsi_release(pf->vsi[i]);
- }
- }
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == branch_seid &&
+ (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ i40e_vsi_release(vsi);
/* There's one corner case where the VEB might not have been
* removed, so double check it here and remove it if needed.
@@ -14741,38 +14683,35 @@ static void i40e_veb_clear(struct i40e_veb *veb)
**/
void i40e_veb_release(struct i40e_veb *veb)
{
- struct i40e_vsi *vsi = NULL;
+ struct i40e_vsi *vsi, *vsi_it;
struct i40e_pf *pf;
int i, n = 0;
pf = veb->pf;
/* find the remaining VSI and check for extras */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
+ i40e_pf_for_each_vsi(pf, i, vsi_it)
+ if (vsi_it->uplink_seid == veb->seid) {
+ if (vsi_it->flags & I40E_VSI_FLAG_VEB_OWNER)
+ vsi = vsi_it;
n++;
- vsi = pf->vsi[i];
}
- }
- if (n != 1) {
+
+ /* Floating VEB has to be empty and regular one must have
+ * single owner VSI.
+ */
+ if ((veb->uplink_seid && n != 1) || (!veb->uplink_seid && n != 0)) {
dev_info(&pf->pdev->dev,
"can't remove VEB %d with %d VSIs left\n",
veb->seid, n);
return;
}
- /* move the remaining VSI to uplink veb */
- vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
+ /* For regular VEB move the owner VSI to uplink port */
if (veb->uplink_seid) {
+ vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
vsi->uplink_seid = veb->uplink_seid;
- if (veb->uplink_seid == pf->mac_seid)
- vsi->veb_idx = I40E_NO_VEB;
- else
- vsi->veb_idx = veb->veb_idx;
- } else {
- /* floating VEB */
- vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
- vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
+ vsi->veb_idx = I40E_NO_VEB;
}
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
@@ -14790,8 +14729,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
int ret;
- ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, false,
+ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi ? vsi->seid : 0,
+ veb->enabled_tc, vsi ? false : true,
&veb->seid, enable_stats, NULL);
/* get a VEB from the hardware */
@@ -14823,9 +14762,11 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
return -ENOENT;
}
- vsi->uplink_seid = veb->seid;
- vsi->veb_idx = veb->idx;
- vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ if (vsi) {
+ vsi->uplink_seid = veb->seid;
+ vsi->veb_idx = veb->idx;
+ vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ }
return 0;
}
@@ -14850,8 +14791,9 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
u16 uplink_seid, u16 vsi_seid,
u8 enabled_tc)
{
- struct i40e_veb *veb, *uplink_veb = NULL;
- int vsi_idx, veb_idx;
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_veb *veb;
+ int veb_idx;
int ret;
/* if one seid is 0, the other must be 0 to create a floating relay */
@@ -14864,26 +14806,11 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
}
/* make sure there is such a vsi and uplink */
- for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
- if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
- break;
- if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
- dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
- vsi_seid);
- return NULL;
- }
-
- if (uplink_seid && uplink_seid != pf->mac_seid) {
- for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
- if (pf->veb[veb_idx] &&
- pf->veb[veb_idx]->seid == uplink_seid) {
- uplink_veb = pf->veb[veb_idx];
- break;
- }
- }
- if (!uplink_veb) {
- dev_info(&pf->pdev->dev,
- "uplink seid %d not found\n", uplink_seid);
+ if (vsi_seid) {
+ vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
+ if (!vsi) {
+ dev_err(&pf->pdev->dev, "vsi seid %d not found\n",
+ vsi_seid);
return NULL;
}
}
@@ -14895,14 +14822,14 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
veb = pf->veb[veb_idx];
veb->flags = flags;
veb->uplink_seid = uplink_seid;
- veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
/* create the VEB in the switch */
- ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
+ ret = i40e_add_veb(veb, vsi);
if (ret)
goto err_veb;
- if (vsi_idx == pf->lan_vsi)
+
+ if (vsi && vsi->idx == pf->lan_vsi)
pf->lan_veb = veb->idx;
return veb;
@@ -14930,6 +14857,7 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
u8 element_type = ele->element_type;
u16 seid = le16_to_cpu(ele->seid);
+ struct i40e_veb *veb;
if (printconfig)
dev_info(&pf->pdev->dev,
@@ -14948,13 +14876,10 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
int v;
/* find existing or else empty VEB */
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
- pf->lan_veb = v;
- break;
- }
- }
- if (pf->lan_veb >= I40E_MAX_VEB) {
+ veb = i40e_pf_get_veb_by_seid(pf, seid);
+ if (veb) {
+ pf->lan_veb = veb->idx;
+ } else {
v = i40e_veb_mem_alloc(pf);
if (v < 0)
break;
@@ -14967,7 +14892,6 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
pf->veb[pf->lan_veb]->seid = seid;
pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
pf->veb[pf->lan_veb]->pf = pf;
- pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
break;
case I40E_SWITCH_ELEMENT_TYPE_VSI:
if (num_reported != 1)
@@ -15630,6 +15554,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_I40E_DCB
enum i40e_get_fw_lldp_status_resp lldp_status;
#endif /* CONFIG_I40E_DCB */
+ struct i40e_vsi *vsi;
struct i40e_pf *pf;
struct i40e_hw *hw;
u16 wol_nvm_bits;
@@ -15640,7 +15565,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif /* CONFIG_I40E_DCB */
int err;
u32 val;
- u32 i;
err = pci_enable_device_mem(pdev);
if (err)
@@ -15990,12 +15914,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
/* if FDIR VSI was set up, start it now */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
- i40e_vsi_open(pf->vsi[i]);
- break;
- }
- }
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+ if (vsi)
+ i40e_vsi_open(vsi);
/* The driver only wants link up/down and module qualification
* reports from firmware. Note the negative logic.
@@ -16241,6 +16162,8 @@ static void i40e_remove(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int ret_code;
int i;
@@ -16298,24 +16221,19 @@ static void i40e_remove(struct pci_dev *pdev)
/* If there is a switch structure or any orphans, remove them.
* This will leave only the PF's VSI remaining.
*/
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
-
- if (pf->veb[i]->uplink_seid == pf->mac_seid ||
- pf->veb[i]->uplink_seid == 0)
- i40e_switch_branch_release(pf->veb[i]);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->uplink_seid == pf->mac_seid ||
+ veb->uplink_seid == 0)
+ i40e_switch_branch_release(veb);
/* Now we can shutdown the PF's VSIs, just before we kill
* adminq and hmc.
*/
- for (i = pf->num_alloc_vsi; i--;)
- if (pf->vsi[i]) {
- i40e_vsi_close(pf->vsi[i]);
- i40e_vsi_release(pf->vsi[i]);
- pf->vsi[i] = NULL;
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ i40e_vsi_close(vsi);
+ i40e_vsi_release(vsi);
+ pf->vsi[i] = NULL;
+ }
i40e_cloud_filter_exit(pf);
@@ -16352,18 +16270,17 @@ unmap:
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
rtnl_lock();
i40e_clear_interrupt_scheme(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i]) {
- if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
- i40e_vsi_clear_rings(pf->vsi[i]);
- i40e_vsi_clear(pf->vsi[i]);
- pf->vsi[i] = NULL;
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+ i40e_vsi_clear_rings(vsi);
+
+ i40e_vsi_clear(vsi);
+ pf->vsi[i] = NULL;
}
rtnl_unlock();
- for (i = 0; i < I40E_MAX_VEB; i++) {
- kfree(pf->veb[i]);
+ i40e_pf_for_each_veb(pf, i, veb) {
+ kfree(veb);
pf->veb[i] = NULL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index af4269330581..ce1f11b8ad65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -567,8 +567,7 @@ static inline bool i40e_is_fw_ver_lt(struct i40e_hw *hw, u16 maj, u16 min)
**/
static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min)
{
- return (hw->aq.fw_maj_ver > maj ||
- (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min));
+ return (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min);
}
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index b34c71770887..83a34e98bdc7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -491,8 +491,6 @@ static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
u32 v_idx, reg_idx, reg;
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
v_idx = qv_info->v_idx;
if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
/* Figure out the queue after CEQ and make that the
@@ -562,8 +560,6 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf,
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
for (i = 0; i < qvlist_info->num_vectors; i++) {
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
/* Validate vector id belongs to this vf */
if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 335fd13e86f7..ef2440f3abf8 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2170,19 +2170,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_add_cloud_filter(adapter);
return 0;
}
-
- if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
- iavf_del_cloud_filter(adapter);
- return 0;
- }
if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
iavf_del_cloud_filter(adapter);
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
- iavf_add_cloud_filter(adapter);
- return 0;
- }
if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
iavf_add_fdir_filter(adapter);
return IAVF_SUCCESS;
@@ -4423,12 +4414,12 @@ static netdev_features_t iavf_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
/* MACLEN can support at most 63 words */
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len & ~(63 * 2))
goto out_err;
/* IPLEN and EIPLEN can support at most 127 dwords */
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len & ~(127 * 4))
goto out_err;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 367b613d92c0..365c03d1c462 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -493,7 +493,6 @@ enum ice_pf_flags {
ICE_FLAG_DCB_ENA,
ICE_FLAG_FD_ENA,
ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
- ICE_FLAG_PTP, /* PTP is enabled by software */
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
ICE_FLAG_CLS_FLOWER,
@@ -606,6 +605,7 @@ struct ice_pf {
wait_queue_head_t reset_wait_queue;
u32 hw_csum_rx_error;
+ u32 hw_rx_eipe_error;
u32 oicr_err_reg;
struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */
struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
@@ -896,6 +896,7 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
}
void ice_debugfs_fwlog_init(struct ice_pf *pf);
+void ice_debugfs_pf_deinit(struct ice_pf *pf);
void ice_debugfs_init(void);
void ice_debugfs_exit(void);
void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
@@ -983,6 +984,8 @@ void ice_service_task_schedule(struct ice_pf *pf);
int ice_load(struct ice_pf *pf);
void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
+int ice_init_dev(struct ice_pf *pf);
+void ice_deinit_dev(struct ice_pf *pf);
/**
* ice_set_rdma_cap - enable RDMA support
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index cca0e753f38f..7cee365cc7d1 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2018-2020, Intel Corporation. */
#include "ice.h"
+#include <net/rps.h>
/**
* ice_is_arfs_active - helper to check is aRFS is active
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 7ac847718882..d2fd315556a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -190,15 +190,13 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
q_vector = vsi->q_vectors[v_idx];
ice_for_each_tx_ring(tx_ring, q_vector->tx) {
- if (vsi->netdev)
- netif_queue_set_napi(vsi->netdev, tx_ring->q_index,
- NETDEV_QUEUE_TYPE_TX, NULL);
+ ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
+ NULL);
tx_ring->q_vector = NULL;
}
ice_for_each_rx_ring(rx_ring, q_vector->rx) {
- if (vsi->netdev)
- netif_queue_set_napi(vsi->netdev, rx_ring->q_index,
- NETDEV_QUEUE_TYPE_RX, NULL);
+ ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
+ NULL);
rx_ring->q_vector = NULL;
}
@@ -538,7 +536,7 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
*
* Return 0 on success and a negative value on error.
*/
-int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
@@ -633,6 +631,62 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
+{
+ if (q_idx >= vsi->num_rxq)
+ return -EINVAL;
+
+ return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+ * @vsi: VSI
+ */
+static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+{
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+ vsi->rx_buf_len = ICE_RXBUF_1664;
+#if (PAGE_SIZE < 8192)
+ } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+ vsi->rx_buf_len = ICE_RXBUF_3072;
+ }
+}
+
+/**
+ * ice_vsi_cfg_rxqs - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Rx VSI for operation.
+ */
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+{
+ u16 i;
+
+ if (vsi->type == ICE_VSI_VF)
+ goto setup_rings;
+
+ ice_vsi_cfg_frame_size(vsi);
+setup_rings:
+ /* set up individual rings */
+ ice_for_each_rxq(vsi, i) {
+ int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
* @qs_cfg: gathered variables needed for pf->vsi queues assignment
@@ -828,7 +882,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @ring: Tx ring to be configured
* @qg_buf: queue group buffer
*/
-int
+static int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
@@ -899,6 +953,80 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
return 0;
}
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
+ u16 q_idx)
+{
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+
+ if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
+ return -EINVAL;
+
+ qg_buf->num_txqs = 1;
+
+ return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
+}
+
+/**
+ * ice_vsi_cfg_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ * @rings: Tx ring array to be configured
+ * @count: number of Tx ring array elements
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+static int
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
+{
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ int err = 0;
+ u16 q_idx;
+
+ qg_buf->num_txqs = 1;
+
+ for (q_idx = 0; q_idx < count; q_idx++) {
+ err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
+{
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
+}
+
+/**
+ * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx queues dedicated for XDP in given VSI for operation.
+ */
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
+{
+ int ret;
+ int i;
+
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
+ if (ret)
+ return ret;
+
+ ice_for_each_rxq(vsi, i)
+ ice_tx_xsk_pool(vsi, i);
+
+ return 0;
+}
+
/**
* ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 17321ba75602..b711bc921928 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -6,7 +6,8 @@
#include "ice.h"
-int ice_vsi_cfg_rxq(struct ice_rx_ring *ring);
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
@@ -14,9 +15,10 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
-int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
- struct ice_aqc_add_tx_qgrp *qg_buf);
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
+ u16 q_idx);
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 10c32cd80fff..4d8111aeb0ff 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -154,6 +154,12 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E823L_SFP:
hw->mac_type = ICE_MAC_GENERIC;
break;
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ hw->mac_type = ICE_MAC_GENERIC_3K_E825;
+ break;
case ICE_DEV_ID_E830_BACKPLANE:
case ICE_DEV_ID_E830_QSFP56:
case ICE_DEV_ID_E830_SFP:
@@ -170,6 +176,18 @@ static int ice_set_mac_type(struct ice_hw *hw)
}
/**
+ * ice_is_generic_mac - check if device's mac_type is generic
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if mac_type is generic (with SBQ support), false if not
+ */
+bool ice_is_generic_mac(struct ice_hw *hw)
+{
+ return (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->mac_type == ICE_MAC_GENERIC_3K_E825);
+}
+
+/**
* ice_is_e810
* @hw: pointer to the hardware structure
*
@@ -241,6 +259,25 @@ bool ice_is_e823(struct ice_hw *hw)
}
/**
+ * ice_is_e825c - Check if a device is E825C family device
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if the device is E825-C based, false if not.
+ */
+bool ice_is_e825c(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -965,9 +1002,9 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
*/
int ice_init_hw(struct ice_hw *hw)
{
- struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
+ void *mac_buf __free(kfree);
u16 mac_buf_len;
- void *mac_buf;
int status;
/* Set MAC type based on DeviceID */
@@ -1045,7 +1082,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) {
status = -ENOMEM;
goto err_unroll_sched;
@@ -1055,7 +1092,6 @@ int ice_init_hw(struct ice_hw *hw)
status = ice_aq_get_phy_caps(hw->port_info, false,
ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
NULL);
- devm_kfree(ice_hw_to_dev(hw), pcaps);
if (status)
dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
status);
@@ -1082,18 +1118,15 @@ int ice_init_hw(struct ice_hw *hw)
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
- mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
- sizeof(struct ice_aqc_manage_mac_read_resp),
- GFP_KERNEL);
- mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
-
+ mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp),
+ GFP_KERNEL);
if (!mac_buf) {
status = -ENOMEM;
goto err_unroll_fltr_mgmt_struct;
}
+ mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
- devm_kfree(ice_hw_to_dev(hw), mac_buf);
if (status)
goto err_unroll_fltr_mgmt_struct;
@@ -1362,9 +1395,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* it to HW register space and enables the hardware to prefetch descriptors
* instead of only fetching them on demand
*/
-int
-ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
- u32 rxq_index)
+int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index)
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
@@ -3240,19 +3272,14 @@ int ice_update_link_info(struct ice_port_info *pi)
return status;
if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
- struct ice_aqc_get_phy_caps_data *pcaps;
- struct ice_hw *hw;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
- hw = pi->hw;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
- GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
-
- devm_kfree(ice_hw_to_dev(hw), pcaps);
}
return status;
@@ -3393,8 +3420,8 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
- struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_hw *hw;
int status;
@@ -3404,7 +3431,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
*aq_failures = 0;
hw = pi->hw;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
@@ -3456,7 +3483,6 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
}
out:
- devm_kfree(ice_hw_to_dev(hw), pcaps);
return status;
}
@@ -3535,7 +3561,7 @@ int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
- struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
struct ice_hw *hw;
int status;
@@ -3604,8 +3630,6 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
}
out:
- kfree(pcaps);
-
return status;
}
@@ -4325,13 +4349,13 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
/* End of FW Admin Queue command wrappers */
/**
- * ice_write_byte - write a byte to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_byte - write a byte to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u8 src_byte, dest_byte, mask;
u8 *from, *dest;
@@ -4342,14 +4366,11 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = (u8)(BIT(ce_info->width) - 1);
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
src_byte = *from;
- src_byte &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_byte <<= shift_width;
+ src_byte &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4364,13 +4385,13 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_word - write a word to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_word - write a word to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u16 src_word, mask;
__le16 dest_word;
@@ -4382,17 +4403,14 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = BIT(ce_info->width) - 1;
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_word = *(u16 *)from;
- src_word &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_word <<= shift_width;
+ src_word &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4407,13 +4425,13 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_dword - write a dword to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_dword - write a dword to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u32 src_dword, mask;
__le32 dest_dword;
@@ -4425,25 +4443,14 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
-
- /* if the field width is exactly 32 on an x86 machine, then the shift
- * operation will not work because the SHL instructions count is masked
- * to 5 bits so the shift will do nothing
- */
- if (ce_info->width < 32)
- mask = BIT(ce_info->width) - 1;
- else
- mask = (u32)~0;
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_dword = *(u32 *)from;
- src_dword &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_dword <<= shift_width;
+ src_dword &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4458,13 +4465,13 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_qword - write a qword to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_qword - write a qword to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u64 src_qword, mask;
__le64 dest_qword;
@@ -4476,25 +4483,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
-
- /* if the field width is exactly 64 on an x86 machine, then the shift
- * operation will not work because the SHL instructions count is masked
- * to 6 bits so the shift will do nothing
- */
- if (ce_info->width < 64)
- mask = BIT_ULL(ce_info->width) - 1;
- else
- mask = (u64)~0;
+ mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_qword = *(u64 *)from;
- src_qword &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_qword <<= shift_width;
+ src_qword &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4513,11 +4509,10 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @hw: pointer to the hardware structure
* @src_ctx: pointer to a generic non-packed context structure
* @dest_ctx: pointer to memory for the packed structure
- * @ce_info: a description of the structure to be transformed
+ * @ce_info: List of Rx context elements
*/
-int
-ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
int f;
@@ -4533,16 +4528,16 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
}
switch (ce_info[f].size_of) {
case sizeof(u8):
- ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u16):
- ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u32):
- ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u64):
- ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 3e933f75e948..ffb22c7ce28b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -53,9 +53,8 @@ int ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-int
-ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
- u32 rxq_index);
+int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index);
int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
@@ -72,9 +71,8 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-int
-ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info);
+int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
@@ -112,6 +110,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
+bool ice_is_generic_mac(struct ice_hw *hw);
bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
@@ -251,6 +250,7 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
bool ice_is_e823(struct ice_hw *hw);
+bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index e7d2474c431c..ffe660f34992 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -666,7 +666,7 @@ bool ice_is_sbq_supported(struct ice_hw *hw)
/* The device sideband queue is only supported on devices with the
* generic MAC type.
*/
- return hw->mac_type == ICE_MAC_GENERIC;
+ return ice_is_generic_mac(hw);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 8b7504a9df31..7532d11ad7f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1825,6 +1825,7 @@ static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
seg_id = SEGMENT_TYPE_ICE_E830;
break;
case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
default:
seg_id = SEGMENT_TYPE_ICE_E810;
break;
@@ -1845,6 +1846,9 @@ static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
case ICE_MAC_E830:
sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB;
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ sign_type = SEGMENT_SIGN_TYPE_RSA3K_E825;
+ break;
case ICE_MAC_GENERIC:
default:
sign_type = SEGMENT_SIGN_TYPE_RSA2K;
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index c2bfba6b9ead..d252d98218d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -64,9 +64,6 @@ static const char * const ice_fwlog_level_string[] = {
"verbose",
};
-/* the order in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_fwlog_level
- */
static const char * const ice_fwlog_log_size[] = {
"128K",
"256K",
@@ -648,6 +645,16 @@ err_create_module_files:
}
/**
+ * ice_debugfs_pf_deinit - cleanup PF's debugfs
+ * @pf: pointer to the PF struct
+ */
+void ice_debugfs_pf_deinit(struct ice_pf *pf)
+{
+ debugfs_remove_recursive(pf->ice_debugfs_pf);
+ pf->ice_debugfs_pf = NULL;
+}
+
+/**
* ice_debugfs_init - create root directory for debugfs entries
*/
void ice_debugfs_init(void)
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index a2d384dbfc76..9dfae9bce758 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -71,5 +71,13 @@
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
+/* Intel(R) Ethernet Connection E825-C for backplane */
+#define ICE_DEV_ID_E825C_BACKPLANE 0x579c
+/* Intel(R) Ethernet Connection E825-C for QSFP */
+#define ICE_DEV_ID_E825C_QSFP 0x579d
+/* Intel(R) Ethernet Connection E825-C for SFP */
+#define ICE_DEV_ID_E825C_SFP 0x579e
+/* Intel(R) Ethernet Connection E825-C 1GbE */
+#define ICE_DEV_ID_E825C_SGMII 0x579f
#endif /* _ICE_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 65be56f2af9e..b516e42b41f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -445,6 +445,20 @@ ice_devlink_reload_empr_start(struct ice_pf *pf,
}
/**
+ * ice_devlink_reinit_down - unload given PF
+ * @pf: pointer to the PF struct
+ */
+static void ice_devlink_reinit_down(struct ice_pf *pf)
+{
+ /* No need to take devl_lock, it's already taken by devlink API */
+ ice_unload(pf);
+ rtnl_lock();
+ ice_vsi_decfg(ice_get_main_vsi(pf));
+ rtnl_unlock();
+ ice_deinit_dev(pf);
+}
+
+/**
* ice_devlink_reload_down - prepare for reload
* @devlink: pointer to the devlink instance to reload
* @netns_change: if true, the network namespace is changing
@@ -477,7 +491,7 @@ ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
"Remove all VFs before doing reinit\n");
return -EOPNOTSUPP;
}
- ice_unload(pf);
+ ice_devlink_reinit_down(pf);
return 0;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
return ice_devlink_reload_empr_start(pf, extack);
@@ -1270,6 +1284,45 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
}
/**
+ * ice_devlink_reinit_up - do reinit of the given PF
+ * @pf: pointer to the PF struct
+ */
+static int ice_devlink_reinit_up(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct ice_vsi_cfg_params params;
+ int err;
+
+ err = ice_init_dev(pf);
+ if (err)
+ return err;
+
+ params = ice_vsi_to_params(vsi);
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ rtnl_lock();
+ err = ice_vsi_cfg(vsi, &params);
+ rtnl_unlock();
+ if (err)
+ goto err_vsi_cfg;
+
+ /* No need to take devl_lock, it's already taken by devlink API */
+ err = ice_load(pf);
+ if (err)
+ goto err_load;
+
+ return 0;
+
+err_load:
+ rtnl_lock();
+ ice_vsi_decfg(vsi);
+ rtnl_unlock();
+err_vsi_cfg:
+ ice_deinit_dev(pf);
+ return err;
+}
+
+/**
* ice_devlink_reload_up - do reload up after reinit
* @devlink: pointer to the devlink instance reloading
* @action: the action requested
@@ -1289,7 +1342,7 @@ ice_devlink_reload_up(struct devlink *devlink,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return ice_load(pf);
+ return ice_devlink_reinit_up(pf);
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
return ice_devlink_reload_empr_finish(pf, extack);
@@ -1569,6 +1622,7 @@ static const struct devlink_port_ops ice_devlink_port_ops = {
* @pf: the PF to create a devlink port for
*
* Create and register a devlink_port for this PF.
+ * This function has to be called under devl_lock.
*
* Return: zero on success or an error code on failure.
*/
@@ -1581,6 +1635,8 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
struct device *dev;
int err;
+ devlink = priv_to_devlink(pf);
+
dev = ice_pf_to_dev(pf);
devlink_port = &pf->devlink_port;
@@ -1601,10 +1657,9 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
ice_devlink_set_switch_id(pf, &attrs.switch_id);
devlink_port_attrs_set(devlink_port, &attrs);
- devlink = priv_to_devlink(pf);
- err = devlink_port_register_with_ops(devlink, devlink_port, vsi->idx,
- &ice_devlink_port_ops);
+ err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_ops);
if (err) {
dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
pf->hw.pf_id, err);
@@ -1619,10 +1674,11 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
* @pf: the PF to cleanup
*
* Unregisters the devlink_port structure associated with this PF.
+ * This function has to be called under devl_lock.
*/
void ice_devlink_destroy_pf_port(struct ice_pf *pf)
{
- devlink_port_unregister(&pf->devlink_port);
+ devl_port_unregister(&pf->devlink_port);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index b9c5eced6326..e92be6f130a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -31,6 +31,26 @@ static const char * const pin_type_name[] = {
};
/**
+ * ice_dpll_is_reset - check if reset is in progress
+ * @pf: private board structure
+ * @extack: error reporting
+ *
+ * If reset is in progress, fill extack with error.
+ *
+ * Return:
+ * * false - no reset in progress
+ * * true - reset in progress
+ */
+static bool ice_dpll_is_reset(struct ice_pf *pf, struct netlink_ext_ack *extack)
+{
+ if (ice_is_reset_in_progress(pf->state)) {
+ NL_SET_ERR_MSG(extack, "PF reset in progress");
+ return true;
+ }
+ return false;
+}
+
+/**
* ice_dpll_pin_freq_set - set pin's frequency
* @pf: private board structure
* @pin: pointer to a pin
@@ -109,6 +129,9 @@ ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack);
mutex_unlock(&pf->dplls.lock);
@@ -254,6 +277,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
* ice_dpll_pin_enable - enable a pin on dplls
* @hw: board private hw structure
* @pin: pointer to a pin
+ * @dpll_idx: dpll index to connect to output pin
* @pin_type: type of pin being enabled
* @extack: error reporting
*
@@ -266,7 +290,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
*/
static int
ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
- enum ice_dpll_pin_type pin_type,
+ u8 dpll_idx, enum ice_dpll_pin_type pin_type,
struct netlink_ext_ack *extack)
{
u8 flags = 0;
@@ -280,10 +304,12 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
+ flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL;
if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
- ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0);
+ ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, dpll_idx,
+ 0, 0);
break;
default:
return -EINVAL;
@@ -370,7 +396,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
case ICE_DPLL_PIN_TYPE_INPUT:
ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
NULL, &pin->flags[0],
- &pin->freq, NULL);
+ &pin->freq, &pin->phase_adjust);
if (ret)
goto err;
if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) {
@@ -398,14 +424,27 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx,
- &pin->flags[0], NULL,
+ &pin->flags[0], &parent,
&pin->freq, NULL);
if (ret)
goto err;
- if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0])
- pin->state[0] = DPLL_PIN_STATE_CONNECTED;
- else
- pin->state[0] = DPLL_PIN_STATE_DISCONNECTED;
+
+ parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL;
+ if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ parent == pf->dplls.eec.dpll_idx ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ parent == pf->dplls.pps.dpll_idx ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+ } else {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ }
break;
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
for (parent = 0; parent < pf->dplls.rclk.num_parents;
@@ -488,6 +527,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
* @dpll: registered dpll pointer
* @dpll_priv: private data pointer passed on dpll registration
* @status: on success holds dpll's lock status
+ * @status_error: status error value
* @extack: error reporting
*
* Dpll subsystem callback, provides dpll's lock status.
@@ -500,6 +540,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
static int
ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
struct netlink_ext_ack *extack)
{
struct ice_dpll *d = dpll_priv;
@@ -568,9 +609,13 @@ ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
if (enable)
- ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack);
+ ret = ice_dpll_pin_enable(&pf->hw, p, d->dpll_idx, pin_type,
+ extack);
else
ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack);
if (!ret)
@@ -603,6 +648,11 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
struct netlink_ext_ack *extack)
{
bool enable = state == DPLL_PIN_STATE_CONNECTED;
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+
+ if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED)
+ return 0;
return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
extack, ICE_DPLL_PIN_TYPE_OUTPUT);
@@ -665,14 +715,16 @@ ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
if (ret)
goto unlock;
- if (pin_type == ICE_DPLL_PIN_TYPE_INPUT)
+ if (pin_type == ICE_DPLL_PIN_TYPE_INPUT ||
+ pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
*state = p->state[d->dpll_idx];
- else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
- *state = p->state[0];
ret = 0;
unlock:
mutex_unlock(&pf->dplls.lock);
@@ -790,6 +842,9 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
mutex_unlock(&pf->dplls.lock);
@@ -910,6 +965,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
u8 flag, flags_en = 0;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
switch (type) {
case ICE_DPLL_PIN_TYPE_INPUT:
@@ -1069,6 +1127,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL;
u32 hw_idx;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs)
@@ -1123,6 +1184,9 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL;
u32 hw_idx;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs)
@@ -1305,8 +1369,10 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
struct ice_dpll *de = &pf->dplls.eec;
struct ice_dpll *dp = &pf->dplls.pps;
- int ret;
+ int ret = 0;
+ if (ice_is_reset_in_progress(pf->state))
+ goto resched;
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_update_state(pf, de, false);
if (!ret)
@@ -1326,6 +1392,7 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
ice_dpll_notify_changes(de);
ice_dpll_notify_changes(dp);
+resched:
/* Run twice a second or reschedule if update failed */
kthread_queue_delayed_work(d->kworker, &d->work,
ret ? msecs_to_jiffies(10) :
@@ -1532,7 +1599,7 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf)
}
if (WARN_ON_ONCE(!vsi || !vsi->netdev))
return;
- netdev_dpll_pin_clear(vsi->netdev);
+ dpll_netdev_pin_clear(vsi->netdev);
dpll_pin_put(rclk->pin);
}
@@ -1576,7 +1643,7 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
if (WARN_ON((!vsi || !vsi->netdev)))
return -EINVAL;
- netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin);
+ dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
return 0;
@@ -2055,6 +2122,7 @@ void ice_dpll_init(struct ice_pf *pf)
struct ice_dplls *d = &pf->dplls;
int err = 0;
+ mutex_init(&d->lock);
err = ice_dpll_init_info(pf, cgu);
if (err)
goto err_exit;
@@ -2067,7 +2135,6 @@ void ice_dpll_init(struct ice_pf *pf)
err = ice_dpll_init_pins(pf, cgu);
if (err)
goto deinit_pps;
- mutex_init(&d->lock);
if (cgu) {
err = ice_dpll_init_worker(pf);
if (err)
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a19b06f18e40..255a9c8151b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -129,6 +129,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
+ ICE_PF_STAT("rx_eipe_error.nic", hw_rx_eipe_error),
ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
@@ -801,7 +802,7 @@ static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
if (!pf)
return -EINVAL;
- data = devm_kzalloc(ice_pf_to_dev(pf), size, GFP_KERNEL);
+ data = kzalloc(size, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -944,11 +945,9 @@ static u64 ice_loopback_test(struct net_device *netdev)
int num_frames, valid_frames;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
- struct device *dev;
- u8 *tx_frame;
+ u8 *tx_frame __free(kfree);
int i;
- dev = ice_pf_to_dev(pf);
netdev_info(netdev, "loopback test\n");
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
@@ -993,7 +992,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
for (i = 0; i < num_frames; i++) {
if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) {
ret = 8;
- goto lbtest_free_frame;
+ goto remove_mac_filters;
}
}
@@ -1003,8 +1002,6 @@ static u64 ice_loopback_test(struct net_device *netdev)
else if (valid_frames != num_frames)
ret = 10;
-lbtest_free_frame:
- devm_kfree(dev, tx_frame);
remove_mac_filters:
if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
@@ -2486,6 +2483,24 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
case SCTP_V4_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4;
break;
+ case GTPU_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_TEID_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_EH_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_UL_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_DL_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4;
+ break;
case TCP_V6_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6;
break;
@@ -2495,6 +2510,24 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
case SCTP_V6_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6;
break;
+ case GTPU_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_TEID_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_EH_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_UL_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_DL_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6;
+ break;
default:
break;
}
@@ -2518,6 +2551,12 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
+ case GTPU_V4_FLOW:
+ case GTPC_V4_FLOW:
+ case GTPC_TEID_V4_FLOW:
+ case GTPU_EH_V4_FLOW:
+ case GTPU_UL_V4_FLOW:
+ case GTPU_DL_V4_FLOW:
if (nfc->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV4_SA;
if (nfc->data & RXH_IP_DST)
@@ -2526,6 +2565,12 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
+ case GTPU_V6_FLOW:
+ case GTPC_V6_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ case GTPU_EH_V6_FLOW:
+ case GTPU_UL_V6_FLOW:
+ case GTPU_DL_V6_FLOW:
if (nfc->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV6_SA;
if (nfc->data & RXH_IP_DST)
@@ -2564,6 +2609,33 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
}
}
+ if (nfc->data & RXH_GTP_TEID) {
+ switch (nfc->flow_type) {
+ case GTPC_TEID_V4_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPC_TEID;
+ break;
+ case GTPU_V4_FLOW:
+ case GTPU_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_IP_TEID;
+ break;
+ case GTPU_EH_V4_FLOW:
+ case GTPU_EH_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_EH_TEID;
+ break;
+ case GTPU_UL_V4_FLOW:
+ case GTPU_UL_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_UP_TEID;
+ break;
+ case GTPU_DL_V4_FLOW:
+ case GTPU_DL_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_DWN_TEID;
+ break;
+ default:
+ break;
+ }
+ }
+
return hfld;
}
@@ -2676,6 +2748,13 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT ||
hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)
nfc->data |= (u64)RXH_L4_B_2_3;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_GTPC_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_IP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_EH_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
+ nfc->data |= (u64)RXH_GTP_TEID;
}
/**
@@ -3360,7 +3439,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
struct ice_pf *pf = ice_netdev_to_pf(dev);
/* only report timestamping if PTP is enabled */
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index ff82915ab497..2fd2e0cb483d 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -37,13 +37,13 @@
#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
-#define ICE_FLOW_HASH_GTP_TEID \
+#define ICE_FLOW_HASH_GTP_C_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
-#define ICE_FLOW_HASH_GTP_IPV4_TEID \
- (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
-#define ICE_FLOW_HASH_GTP_IPV6_TEID \
- (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
+#define ICE_FLOW_HASH_GTP_C_IPV4_TEID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_C_TEID)
+#define ICE_FLOW_HASH_GTP_C_IPV6_TEID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_C_TEID)
#define ICE_FLOW_HASH_GTP_U_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
@@ -66,6 +66,20 @@
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
ICE_FLOW_HASH_GTP_U_EH_QFI)
+#define ICE_FLOW_HASH_GTP_U_UP \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID))
+#define ICE_FLOW_HASH_GTP_U_DWN \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID))
+
+#define ICE_FLOW_HASH_GTP_U_IPV4_UP \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_UP)
+#define ICE_FLOW_HASH_GTP_U_IPV6_UP \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_UP)
+#define ICE_FLOW_HASH_GTP_U_IPV4_DWN \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_DWN)
+#define ICE_FLOW_HASH_GTP_U_IPV6_DWN \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_DWN)
+
#define ICE_FLOW_HASH_PPPOE_SESS_ID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
@@ -242,6 +256,13 @@ enum ice_flow_field {
#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_GTPC_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_IP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_EH_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_UP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID)
+
/* Flow headers and fields for AVF support */
enum ice_flow_avf_hdr_field {
/* Values 0 - 28 are reserved for future use */
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
index 92b5dac481cd..4fd15387a7e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.c
+++ b/drivers/net/ethernet/intel/ice/ice_fwlog.c
@@ -188,6 +188,8 @@ void ice_fwlog_deinit(struct ice_hw *hw)
if (hw->bus.func)
return;
+ ice_debugfs_pf_deinit(hw->back);
+
/* make sure FW logging is disabled to not put the FW in a weird state
* for the next driver load
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 9be724291ef8..ee3f0d3e3f6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1618,6 +1618,25 @@ static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
*/
{ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc4 with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_IPV4, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc4t with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_C_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4 with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4e with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_EH, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4u with input set IPv4 src/dst */
+ { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_UP, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4d with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_DWN, ICE_RSS_OUTER_HEADERS, false},
+
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
{ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false},
@@ -1632,6 +1651,24 @@ static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
/* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
{ICE_FLOW_SEG_HDR_ESP,
ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc6 with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_IPV6, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc6t with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_C_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6 with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6e with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_EH, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6u with input set IPv6 src/dst */
+ { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_UP, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6d with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_DWN, ICE_RSS_OUTER_HEADERS, false},
};
/**
@@ -1672,27 +1709,6 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
}
/**
- * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
- * @vsi: VSI
- */
-static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
-{
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
- vsi->rx_buf_len = ICE_RXBUF_1664;
-#if (PAGE_SIZE < 8192)
- } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
- vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
- vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
-#endif
- } else {
- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_3072;
- }
-}
-
-/**
* ice_pf_state_is_nominal - checks the PF for nominal state
* @pf: pointer to PF to check
*
@@ -1795,114 +1811,6 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
}
-int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
-{
- if (q_idx >= vsi->num_rxq)
- return -EINVAL;
-
- return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
-}
-
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
-{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
-
- if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
- return -EINVAL;
-
- qg_buf->num_txqs = 1;
-
- return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
-}
-
-/**
- * ice_vsi_cfg_rxqs - Configure the VSI for Rx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Rx VSI for operation.
- */
-int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
-{
- u16 i;
-
- if (vsi->type == ICE_VSI_VF)
- goto setup_rings;
-
- ice_vsi_cfg_frame_size(vsi);
-setup_rings:
- /* set up individual rings */
- ice_for_each_rxq(vsi, i) {
- int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
-
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_vsi_cfg_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- * @rings: Tx ring array to be configured
- * @count: number of Tx ring array elements
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
-{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
- int err = 0;
- u16 q_idx;
-
- qg_buf->num_txqs = 1;
-
- for (q_idx = 0; q_idx < count; q_idx++) {
- err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
- if (err)
- break;
- }
-
- return err;
-}
-
-/**
- * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
-{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
-}
-
-/**
- * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx queues dedicated for XDP in given VSI for operation.
- */
-int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
-{
- int ret;
- int i;
-
- ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
- if (ret)
- return ret;
-
- ice_for_each_rxq(vsi, i)
- ice_tx_xsk_pool(vsi, i);
-
- return 0;
-}
-
/**
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value
* @intrl: interrupt rate limit in usecs
@@ -2426,7 +2334,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
ice_vsi_map_rings_to_vectors(vsi);
/* Associate q_vector rings to napi */
- ice_vsi_set_napi_queues(vsi, true);
+ ice_vsi_set_napi_queues(vsi);
vsi->stat_offsets_loaded = false;
@@ -2849,74 +2757,19 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
}
/**
- * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
- * @vsi: the VSI being un-configured
- */
-void ice_vsi_dis_irq(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- u32 val;
- int i;
-
- /* disable interrupt causation from each queue */
- if (vsi->tx_rings) {
- ice_for_each_txq(vsi, i) {
- if (vsi->tx_rings[i]) {
- u16 reg;
-
- reg = vsi->tx_rings[i]->reg_idx;
- val = rd32(hw, QINT_TQCTL(reg));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(reg), val);
- }
- }
- }
-
- if (vsi->rx_rings) {
- ice_for_each_rxq(vsi, i) {
- if (vsi->rx_rings[i]) {
- u16 reg;
-
- reg = vsi->rx_rings[i]->reg_idx;
- val = rd32(hw, QINT_RQCTL(reg));
- val &= ~QINT_RQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_RQCTL(reg), val);
- }
- }
- }
-
- /* disable each interrupt */
- ice_for_each_q_vector(vsi, i) {
- if (!vsi->q_vectors[i])
- continue;
- wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
- }
-
- ice_flush(hw);
-
- /* don't call synchronize_irq() for VF's from the host */
- if (vsi->type == ICE_VSI_VF)
- return;
-
- ice_for_each_q_vector(vsi, i)
- synchronize_irq(vsi->q_vectors[i]->irq.virq);
-}
-
-/**
- * ice_queue_set_napi - Set the napi instance for the queue
+ * __ice_queue_set_napi - Set the napi instance for the queue
* @dev: device to which NAPI and queue belong
* @queue_index: Index of queue
* @type: queue type as RX or TX
* @napi: NAPI context
* @locked: is the rtnl_lock already held
*
- * Set the napi instance for the queue
+ * Set the napi instance for the queue. Caller indicates the lock status.
*/
static void
-ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
- enum netdev_queue_type type, struct napi_struct *napi,
- bool locked)
+__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi,
+ bool locked)
{
if (!locked)
rtnl_lock();
@@ -2926,26 +2779,79 @@ ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
}
/**
- * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * ice_queue_set_napi - Set the napi instance for the queue
+ * @vsi: VSI being configured
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ *
+ * Set the napi instance for the queue. The rtnl lock state is derived from the
+ * execution path.
+ */
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi)
+{
+ struct ice_pf *pf = vsi->back;
+
+ if (!vsi->netdev)
+ return;
+
+ if (current_work() == &pf->serv_task ||
+ test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
+ test_bit(ICE_DOWN, pf->state) ||
+ test_bit(ICE_SUSPENDED, pf->state))
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ false);
+ else
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ true);
+}
+
+/**
+ * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
* @q_vector: q_vector pointer
* @locked: is the rtnl_lock already held
*
+ * Associate the q_vector napi with all the queue[s] on the vector.
+ * Caller indicates the lock status.
+ */
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+{
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+ locked);
+
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+ locked);
+ /* Also set the interrupt number for the NAPI */
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
+
+/**
+ * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ *
* Associate the q_vector napi with all the queue[s] on the vector
*/
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
{
struct ice_rx_ring *rx_ring;
struct ice_tx_ring *tx_ring;
ice_for_each_rx_ring(rx_ring, q_vector->rx)
- ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
- NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
- locked);
+ ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
ice_for_each_tx_ring(tx_ring, q_vector->tx)
- ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
- NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
- locked);
+ ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
/* Also set the interrupt number for the NAPI */
netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
}
@@ -2953,11 +2859,10 @@ void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
/**
* ice_vsi_set_napi_queues
* @vsi: VSI pointer
- * @locked: is the rtnl_lock already held
*
* Associate queue[s] with napi for all vectors
*/
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
{
int i;
@@ -2965,7 +2870,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
return;
ice_for_each_q_vector(vsi, i)
- ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked);
+ ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
}
/**
@@ -3140,7 +3045,7 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
}
}
- tx_ring_stats = vsi_stat->rx_ring_stats;
+ tx_ring_stats = vsi_stat->tx_ring_stats;
vsi_stat->tx_ring_stats =
krealloc_array(vsi_stat->tx_ring_stats, req_txq,
sizeof(*vsi_stat->tx_ring_stats),
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 71bd27244941..9cd23afe5f15 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -54,14 +54,6 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf);
void ice_update_eth_stats(struct ice_vsi *vsi);
-int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
-
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
-
-int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
-
-int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
-
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
@@ -72,8 +64,6 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
-int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
-
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
@@ -91,9 +81,15 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi);
+
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked);
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
+
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
int ice_vsi_release(struct ice_vsi *vsi);
@@ -114,8 +110,6 @@ void
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
bool ena_ts);
-void ice_vsi_dis_irq(struct ice_vsi *vsi);
-
void ice_vsi_free_irq(struct ice_vsi *vsi);
void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index dd4a9bc0dfdc..33a164fa325a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -613,7 +613,7 @@ skip:
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_prepare_for_reset(pf);
+ ice_ptp_prepare_for_reset(pf, reset_type);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_exit(pf);
@@ -1649,8 +1649,10 @@ static void ice_clean_sbq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- /* Nothing to do here if sideband queue is not supported */
- if (!ice_is_sbq_supported(hw)) {
+ /* if mac_type is not generic, sideband is not supported
+ * and there's nothing to do here
+ */
+ if (!ice_is_generic_mac(hw)) {
clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
return;
}
@@ -3495,7 +3497,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx) {
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll);
- ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
+ __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
}
}
@@ -4572,90 +4574,6 @@ static void ice_decfg_netdev(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
-static int ice_start_eth(struct ice_vsi *vsi)
-{
- int err;
-
- err = ice_init_mac_fltr(vsi->back);
- if (err)
- return err;
-
- err = ice_vsi_open(vsi);
- if (err)
- ice_fltr_remove_all(vsi);
-
- return err;
-}
-
-static void ice_stop_eth(struct ice_vsi *vsi)
-{
- ice_fltr_remove_all(vsi);
- ice_vsi_close(vsi);
-}
-
-static int ice_init_eth(struct ice_pf *pf)
-{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- int err;
-
- if (!vsi)
- return -EINVAL;
-
- /* init channel list */
- INIT_LIST_HEAD(&vsi->ch_list);
-
- err = ice_cfg_netdev(vsi);
- if (err)
- return err;
- /* Setup DCB netlink interface */
- ice_dcbnl_setup(vsi);
-
- err = ice_init_mac_fltr(pf);
- if (err)
- goto err_init_mac_fltr;
-
- err = ice_devlink_create_pf_port(pf);
- if (err)
- goto err_devlink_create_pf_port;
-
- SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
-
- err = ice_register_netdev(vsi);
- if (err)
- goto err_register_netdev;
-
- err = ice_tc_indir_block_register(vsi);
- if (err)
- goto err_tc_indir_block_register;
-
- ice_napi_add(vsi);
-
- return 0;
-
-err_tc_indir_block_register:
- ice_unregister_netdev(vsi);
-err_register_netdev:
- ice_devlink_destroy_pf_port(pf);
-err_devlink_create_pf_port:
-err_init_mac_fltr:
- ice_decfg_netdev(vsi);
- return err;
-}
-
-static void ice_deinit_eth(struct ice_pf *pf)
-{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
-
- if (!vsi)
- return;
-
- ice_vsi_close(vsi);
- ice_unregister_netdev(vsi);
- ice_devlink_destroy_pf_port(pf);
- ice_tc_indir_block_unregister(vsi);
- ice_decfg_netdev(vsi);
-}
-
/**
* ice_wait_for_fw - wait for full FW readiness
* @hw: pointer to the hardware structure
@@ -4681,7 +4599,7 @@ static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
return -ETIMEDOUT;
}
-static int ice_init_dev(struct ice_pf *pf)
+int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
@@ -4774,7 +4692,7 @@ err_init_pf:
return err;
}
-static void ice_deinit_dev(struct ice_pf *pf)
+void ice_deinit_dev(struct ice_pf *pf)
{
ice_free_irq_msix_misc(pf);
ice_deinit_pf(pf);
@@ -5079,31 +4997,47 @@ static void ice_deinit(struct ice_pf *pf)
/**
* ice_load - load pf by init hw and starting VSI
* @pf: pointer to the pf instance
+ *
+ * This function has to be called under devl_lock.
*/
int ice_load(struct ice_pf *pf)
{
- struct ice_vsi_cfg_params params = {};
struct ice_vsi *vsi;
int err;
- err = ice_init_dev(pf);
+ devl_assert_locked(priv_to_devlink(pf));
+
+ vsi = ice_get_main_vsi(pf);
+
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
+ err = ice_cfg_netdev(vsi);
if (err)
return err;
- vsi = ice_get_main_vsi(pf);
+ /* Setup DCB netlink interface */
+ ice_dcbnl_setup(vsi);
- params = ice_vsi_to_params(vsi);
- params.flags = ICE_VSI_FLAG_INIT;
+ err = ice_init_mac_fltr(pf);
+ if (err)
+ goto err_init_mac_fltr;
- rtnl_lock();
- err = ice_vsi_cfg(vsi, &params);
+ err = ice_devlink_create_pf_port(pf);
if (err)
- goto err_vsi_cfg;
+ goto err_devlink_create_pf_port;
+
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
+
+ err = ice_register_netdev(vsi);
+ if (err)
+ goto err_register_netdev;
- err = ice_start_eth(ice_get_main_vsi(pf));
+ err = ice_tc_indir_block_register(vsi);
if (err)
- goto err_start_eth;
- rtnl_unlock();
+ goto err_tc_indir_block_register;
+
+ ice_napi_add(vsi);
err = ice_init_rdma(pf);
if (err)
@@ -5117,29 +5051,35 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
- ice_vsi_close(ice_get_main_vsi(pf));
- rtnl_lock();
-err_start_eth:
- ice_vsi_decfg(ice_get_main_vsi(pf));
-err_vsi_cfg:
- rtnl_unlock();
- ice_deinit_dev(pf);
+ ice_tc_indir_block_unregister(vsi);
+err_tc_indir_block_register:
+ ice_unregister_netdev(vsi);
+err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create_pf_port:
+err_init_mac_fltr:
+ ice_decfg_netdev(vsi);
return err;
}
/**
* ice_unload - unload pf by stopping VSI and deinit hw
* @pf: pointer to the pf instance
+ *
+ * This function has to be called under devl_lock.
*/
void ice_unload(struct ice_pf *pf)
{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ devl_assert_locked(priv_to_devlink(pf));
+
ice_deinit_features(pf);
ice_deinit_rdma(pf);
- rtnl_lock();
- ice_stop_eth(ice_get_main_vsi(pf));
- ice_vsi_decfg(ice_get_main_vsi(pf));
- rtnl_unlock();
- ice_deinit_dev(pf);
+ ice_tc_indir_block_unregister(vsi);
+ ice_unregister_netdev(vsi);
+ ice_devlink_destroy_pf_port(pf);
+ ice_decfg_netdev(vsi);
}
/**
@@ -5237,27 +5177,23 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err)
goto err_init;
- err = ice_init_eth(pf);
- if (err)
- goto err_init_eth;
-
- err = ice_init_rdma(pf);
+ devl_lock(priv_to_devlink(pf));
+ err = ice_load(pf);
+ devl_unlock(priv_to_devlink(pf));
if (err)
- goto err_init_rdma;
+ goto err_load;
err = ice_init_devlink(pf);
if (err)
goto err_init_devlink;
- ice_init_features(pf);
-
return 0;
err_init_devlink:
- ice_deinit_rdma(pf);
-err_init_rdma:
- ice_deinit_eth(pf);
-err_init_eth:
+ devl_lock(priv_to_devlink(pf));
+ ice_unload(pf);
+ devl_unlock(priv_to_devlink(pf));
+err_load:
ice_deinit(pf);
err_init:
pci_disable_device(pdev);
@@ -5340,8 +5276,6 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
- ice_debugfs_exit();
-
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
@@ -5355,12 +5289,14 @@ static void ice_remove(struct pci_dev *pdev)
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
- ice_deinit_features(pf);
+
ice_deinit_devlink(pf);
- ice_deinit_rdma(pf);
- ice_deinit_eth(pf);
- ice_deinit(pf);
+ devl_lock(priv_to_devlink(pf));
+ ice_unload(pf);
+ devl_unlock(priv_to_devlink(pf));
+
+ ice_deinit(pf);
ice_vsi_release_all(pf);
ice_setup_mc_magic_wake(pf);
@@ -5447,6 +5383,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
if (ret)
goto err_reinit;
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ ice_vsi_set_napi_queues(pf->vsi[v]);
}
ret = ice_req_irq_msix_misc(pf);
@@ -5752,6 +5689,10 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) },
@@ -5841,6 +5782,7 @@ module_init(ice_module_init);
static void __exit ice_module_exit(void)
{
pci_unregister_driver(&ice_driver);
+ ice_debugfs_exit();
destroy_workqueue(ice_wq);
destroy_workqueue(ice_lag_wq);
pr_info("module unloaded\n");
@@ -6736,6 +6678,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *net_stats, *stats_prev;
struct rtnl_link_stats64 *vsi_stats;
+ struct ice_pf *pf = vsi->back;
u64 pkts, bytes;
int i;
@@ -6781,21 +6724,18 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
net_stats = &vsi->net_stats;
stats_prev = &vsi->net_stats_prev;
- /* clear prev counters after reset */
- if (vsi_stats->tx_packets < stats_prev->tx_packets ||
- vsi_stats->rx_packets < stats_prev->rx_packets) {
- stats_prev->tx_packets = 0;
- stats_prev->tx_bytes = 0;
- stats_prev->rx_packets = 0;
- stats_prev->rx_bytes = 0;
+ /* Update netdev counters, but keep in mind that values could start at
+ * random value after PF reset. And as we increase the reported stat by
+ * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
+ * let's skip this round.
+ */
+ if (likely(pf->stat_prev_loaded)) {
+ net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
+ net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
+ net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
+ net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
}
- /* update netdev counters */
- net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
- net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
- net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
- net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
-
stats_prev->tx_packets = vsi_stats->tx_packets;
stats_prev->tx_bytes = vsi_stats->tx_bytes;
stats_prev->rx_packets = vsi_stats->rx_packets;
@@ -7060,6 +7000,50 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ */
+static void ice_vsi_dis_irq(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+ int i;
+
+ /* disable interrupt causation from each Rx queue; Tx queues are
+ * handled in ice_vsi_stop_tx_ring()
+ */
+ if (vsi->rx_rings) {
+ ice_for_each_rxq(vsi, i) {
+ if (vsi->rx_rings[i]) {
+ u16 reg;
+
+ reg = vsi->rx_rings[i]->reg_idx;
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+ }
+ }
+ }
+
+ /* disable each interrupt */
+ ice_for_each_q_vector(vsi, i) {
+ if (!vsi->q_vectors[i])
+ continue;
+ wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
+ }
+
+ ice_flush(hw);
+
+ /* don't call synchronize_irq() for VF's from the host */
+ if (vsi->type == ICE_VSI_VF)
+ return;
+
+ ice_for_each_q_vector(vsi, i)
+ synchronize_irq(vsi->q_vectors[i]->irq.virq);
+}
+
+/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
*
@@ -7548,7 +7532,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
* fail.
*/
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_reset(pf);
+ ice_ptp_rebuild(pf, reset_type);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_init(pf);
@@ -8012,6 +7996,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
pf_sw = pf->first_sw;
/* find the attribute in the netlink message */
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 3b6605c8585e..c11eba07283c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -601,17 +601,13 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
/* Read the low 32 bit value */
raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
- /* For PHYs which don't implement a proper timestamp ready bitmap,
- * verify that the timestamp value is different from the last cached
- * timestamp. If it is not, skip this for now assuming it hasn't yet
- * been captured by hardware.
+ /* Devices using this interface always verify the timestamp differs
+ * relative to the last cached timestamp value.
*/
- if (!drop_ts && tx->verify_cached &&
- raw_tstamp == tx->tstamps[idx].cached_tstamp)
+ if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
return;
- if (tx->verify_cached && raw_tstamp)
- tx->tstamps[idx].cached_tstamp = raw_tstamp;
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
@@ -701,9 +697,11 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
hw = &pf->hw;
/* Read the Tx ready status first */
- err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
- if (err)
- return;
+ if (tx->has_ready_bitmap) {
+ err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
+ if (err)
+ return;
+ }
/* Drop packets if the link went down */
link_up = ptp_port->link_up;
@@ -731,7 +729,8 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
* If we do not, the hardware logic for generating a new
* interrupt can get stuck on some devices.
*/
- if (!(tstamp_ready & BIT_ULL(phy_idx))) {
+ if (tx->has_ready_bitmap &&
+ !(tstamp_ready & BIT_ULL(phy_idx))) {
if (drop_ts)
goto skip_ts_read;
@@ -751,7 +750,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
* from the last cached timestamp. If it is not, skip this for
* now assuming it hasn't yet been captured by hardware.
*/
- if (!drop_ts && tx->verify_cached &&
+ if (!drop_ts && !tx->has_ready_bitmap &&
raw_tstamp == tx->tstamps[idx].cached_tstamp)
continue;
@@ -761,7 +760,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
skip_ts_read:
spin_lock_irqsave(&tx->lock, flags);
- if (tx->verify_cached && raw_tstamp)
+ if (!tx->has_ready_bitmap && raw_tstamp)
tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
@@ -965,6 +964,22 @@ ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
}
/**
+ * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
+ * @pf: Board private structure
+ *
+ * Called by the clock owner to flush all the Tx timestamp trackers associated
+ * with the clock.
+ */
+static void
+ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
+{
+ struct ice_ptp_port *port;
+
+ list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member)
+ ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
+}
+
+/**
* ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
* @pf: Board private structure
* @tx: Tx tracking structure to release
@@ -1014,7 +1029,7 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
tx->block = port / ICE_PORTS_PER_QUAD;
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
tx->len = INDEX_PER_PORT_E82X;
- tx->verify_cached = 0;
+ tx->has_ready_bitmap = 1;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1037,7 +1052,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
* verify new timestamps against cached copy of the last read
* timestamp.
*/
- tx->verify_cached = 1;
+ tx->has_ready_bitmap = 0;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1430,7 +1445,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS))
@@ -1456,14 +1471,14 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
}
/**
- * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt
+ * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
* @pf: PF private structure
* @ena: bool value to enable or disable interrupt
* @threshold: Minimum number of packets at which intr is triggered
*
* Utility function to enable or disable Tx timestamp interrupt and threshold
*/
-static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
+static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
{
struct ice_hw *hw = &pf->hw;
int err = 0;
@@ -2162,7 +2177,7 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
struct hwtstamp_config *config;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return -EIO;
config = &pf->ptp.tstamp_config;
@@ -2232,7 +2247,7 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return -EAGAIN;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -2616,7 +2631,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
int err;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
err = ice_ptp_update_cached_phctime(pf);
@@ -2629,36 +2644,72 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
- * ice_ptp_reset - Initialize PTP hardware clock support after reset
+ * ice_ptp_prepare_for_reset - Prepare PTP for reset
+ * @pf: Board private structure
+ * @reset_type: the reset type being performed
+ */
+void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ u8 src_tmr;
+
+ if (ptp->state != ICE_PTP_READY)
+ return;
+
+ ptp->state = ICE_PTP_RESETTING;
+
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_disable_timestamp_mode(pf);
+
+ kthread_cancel_delayed_work_sync(&ptp->work);
+
+ if (reset_type == ICE_RESET_PFR)
+ return;
+
+ ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+
+ /* Disable periodic outputs */
+ ice_ptp_disable_all_clkout(pf);
+
+ src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
+
+ /* Disable source clock */
+ wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Acquire PHC and system timer to restore after reset */
+ ptp->reset_time = ktime_get_real_ns();
+}
+
+/**
+ * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
* @pf: Board private structure
+ *
+ * Companion function for ice_ptp_rebuild() which handles tasks that only the
+ * PTP clock owner instance should perform.
*/
-void ice_ptp_reset(struct ice_pf *pf)
+static int ice_ptp_rebuild_owner(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- int err, itr = 1;
u64 time_diff;
-
- if (test_bit(ICE_PFR_REQ, pf->state) ||
- !ice_pf_src_tmr_owned(pf))
- goto pfr;
+ int err;
err = ice_ptp_init_phc(hw);
if (err)
- goto err;
+ return err;
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
- goto err;
+ return err;
}
/* Write the increment time value to PHY and LAN */
err = ice_ptp_write_incval(hw, ice_base_incval(pf));
if (err) {
ice_ptp_unlock(hw);
- goto err;
+ return err;
}
/* Write the initial Time value to PHY and LAN using the cached PHC
@@ -2674,38 +2725,54 @@ void ice_ptp_reset(struct ice_pf *pf)
err = ice_ptp_write_init(pf, &ts);
if (err) {
ice_ptp_unlock(hw);
- goto err;
+ return err;
}
/* Release the global hardware lock */
ice_ptp_unlock(hw);
+ /* Flush software tracking of any outstanding timestamps since we're
+ * about to flush the PHY timestamp block.
+ */
+ ice_ptp_flush_all_tx_tracker(pf);
+
if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
- err = ice_ptp_tx_ena_intr(pf, true, itr);
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
- goto err;
- }
+ return err;
-pfr:
- /* Init Tx structures */
- if (ice_is_e810(&pf->hw)) {
- err = ice_ptp_init_tx_e810(pf, &ptp->port.tx);
- } else {
- kthread_init_delayed_work(&ptp->port.ov_work,
- ice_ptp_wait_for_offsets);
- err = ice_ptp_init_tx_e82x(pf, &ptp->port.tx,
- ptp->port.port_num);
+ ice_ptp_restart_all_phy(pf);
}
- if (err)
+
+ return 0;
+}
+
+/**
+ * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
+ * @pf: Board private structure
+ * @reset_type: the reset type being performed
+ */
+void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ int err;
+
+ if (ptp->state == ICE_PTP_READY) {
+ ice_ptp_prepare_for_reset(pf, reset_type);
+ } else if (ptp->state != ICE_PTP_RESETTING) {
+ err = -EINVAL;
+ dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
goto err;
+ }
- set_bit(ICE_FLAG_PTP, pf->flags);
+ if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
+ err = ice_ptp_rebuild_owner(pf);
+ if (err)
+ goto err;
+ }
- /* Restart the PHY timestamping block */
- if (!test_bit(ICE_PFR_REQ, pf->state) &&
- ice_pf_src_tmr_owned(pf))
- ice_ptp_restart_all_phy(pf);
+ ptp->state = ICE_PTP_READY;
/* Start periodic work going */
kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
@@ -2714,6 +2781,7 @@ pfr:
return;
err:
+ ptp->state = ICE_PTP_ERROR;
dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
@@ -2923,39 +2991,6 @@ int ice_ptp_clock_index(struct ice_pf *pf)
}
/**
- * ice_ptp_prepare_for_reset - Prepare PTP for reset
- * @pf: Board private structure
- */
-void ice_ptp_prepare_for_reset(struct ice_pf *pf)
-{
- struct ice_ptp *ptp = &pf->ptp;
- u8 src_tmr;
-
- clear_bit(ICE_FLAG_PTP, pf->flags);
-
- /* Disable timestamping for both Tx and Rx */
- ice_ptp_disable_timestamp_mode(pf);
-
- kthread_cancel_delayed_work_sync(&ptp->work);
-
- if (test_bit(ICE_PFR_REQ, pf->state))
- return;
-
- ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
-
- /* Disable periodic outputs */
- ice_ptp_disable_all_clkout(pf);
-
- src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
-
- /* Disable source clock */
- wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
-
- /* Acquire PHC and system timer to restore after reset */
- ptp->reset_time = ktime_get_real_ns();
-}
-
-/**
* ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
* @pf: Board private structure
*
@@ -2967,7 +3002,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- int err, itr = 1;
+ int err;
err = ice_ptp_init_phc(hw);
if (err) {
@@ -3002,7 +3037,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
- err = ice_ptp_tx_ena_intr(pf, true, itr);
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
goto err_exit;
}
@@ -3195,6 +3230,8 @@ void ice_ptp_init(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
int err;
+ ptp->state = ICE_PTP_INITIALIZING;
+
ice_ptp_init_phy_model(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3219,12 +3256,13 @@ void ice_ptp_init(struct ice_pf *pf)
/* Configure initial Tx interrupt settings */
ice_ptp_cfg_tx_interrupt(pf);
- set_bit(ICE_FLAG_PTP, pf->flags);
- err = ice_ptp_init_work(pf, ptp);
+ err = ice_ptp_create_auxbus_device(pf);
if (err)
goto err;
- err = ice_ptp_create_auxbus_device(pf);
+ ptp->state = ICE_PTP_READY;
+
+ err = ice_ptp_init_work(pf, ptp);
if (err)
goto err;
@@ -3237,7 +3275,7 @@ err:
ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- clear_bit(ICE_FLAG_PTP, pf->flags);
+ ptp->state = ICE_PTP_ERROR;
dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
@@ -3250,9 +3288,11 @@ err:
*/
void ice_ptp_release(struct ice_pf *pf)
{
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
+ pf->ptp.state = ICE_PTP_UNINIT;
+
/* Disable timestamping for both Tx and Rx */
ice_ptp_disable_timestamp_mode(pf);
@@ -3260,8 +3300,6 @@ void ice_ptp_release(struct ice_pf *pf)
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
- clear_bit(ICE_FLAG_PTP, pf->flags);
-
kthread_cancel_delayed_work_sync(&pf->ptp.work);
ice_ptp_port_phy_stop(&pf->ptp.port);
@@ -3271,6 +3309,9 @@ void ice_ptp_release(struct ice_pf *pf)
pf->ptp.kworker = NULL;
}
+ if (ice_pf_src_tmr_owned(pf))
+ ice_ptp_unregister_auxbus_driver(pf);
+
if (!pf->ptp.clock)
return;
@@ -3280,7 +3321,5 @@ void ice_ptp_release(struct ice_pf *pf)
ptp_clock_unregister(pf->ptp.clock);
pf->ptp.clock = NULL;
- ice_ptp_unregister_auxbus_driver(pf);
-
dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 087dd32d8762..3af20025043a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -100,7 +100,7 @@ struct ice_perout_channel {
* the last timestamp we read for a given index. If the current timestamp
* value is the same as the cached value, we assume a new timestamp hasn't
* been captured. This avoids reporting stale timestamps to the stack. This is
- * only done if the verify_cached flag is set in ice_ptp_tx structure.
+ * only done if the has_ready_bitmap flag is not set in ice_ptp_tx structure.
*/
struct ice_tx_tstamp {
struct sk_buff *skb;
@@ -130,7 +130,9 @@ enum ice_tx_tstamp_work {
* @init: if true, the tracker is initialized;
* @calibrating: if true, the PHY is calibrating the Tx offset. During this
* window, timestamps are temporarily disabled.
- * @verify_cached: if true, verify new timestamp differs from last read value
+ * @has_ready_bitmap: if true, the hardware has a valid Tx timestamp ready
+ * bitmap register. If false, fall back to verifying new
+ * timestamp values against previously cached copy.
* @last_ll_ts_idx_read: index of the last LL TS read by the FW
*/
struct ice_ptp_tx {
@@ -143,7 +145,7 @@ struct ice_ptp_tx {
u8 len;
u8 init : 1;
u8 calibrating : 1;
- u8 verify_cached : 1;
+ u8 has_ready_bitmap : 1;
s8 last_ll_ts_idx_read;
};
@@ -203,8 +205,17 @@ struct ice_ptp_port_owner {
#define GLTSYN_TGT_H_IDX_MAX 4
+enum ice_ptp_state {
+ ICE_PTP_UNINIT = 0,
+ ICE_PTP_INITIALIZING,
+ ICE_PTP_READY,
+ ICE_PTP_RESETTING,
+ ICE_PTP_ERROR,
+};
+
/**
* struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
+ * @state: current state of PTP state machine
* @tx_interrupt_mode: the TX interrupt mode for the PTP clock
* @port: data for the PHY port initialization procedure
* @ports_owner: data for the auxiliary driver owner
@@ -227,6 +238,7 @@ struct ice_ptp_port_owner {
* @late_cached_phc_updates: number of times cached PHC update is late
*/
struct ice_ptp {
+ enum ice_ptp_state state;
enum ice_ptp_tx_interrupt tx_interrupt_mode;
struct ice_ptp_port port;
struct ice_ptp_port_owner ports_owner;
@@ -304,8 +316,9 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx);
-void ice_ptp_reset(struct ice_pf *pf);
-void ice_ptp_prepare_for_reset(struct ice_pf *pf);
+void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
+void ice_ptp_prepare_for_reset(struct ice_pf *pf,
+ enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
@@ -345,8 +358,15 @@ ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
return 0;
}
-static inline void ice_ptp_reset(struct ice_pf *pf) { }
-static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
+static inline void ice_ptp_rebuild(struct ice_pf *pf,
+ enum ice_reset_req reset_type)
+{
+}
+
+static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
+ enum ice_reset_req reset_type)
+{
+}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index a94a1c48c3de..a958fcf3e6be 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -240,7 +240,6 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
}
vf->lan_vsi_idx = vsi->idx;
- vf->lan_vsi_num = vsi->vsi_num;
return vsi;
}
@@ -1068,6 +1067,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
struct ice_pf *pf = pci_get_drvdata(pdev);
u16 prev_msix, prev_queues, queues;
bool needs_rebuild = false;
+ struct ice_vsi *vsi;
struct ice_vf *vf;
int id;
@@ -1102,6 +1102,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (!vf)
return -ENOENT;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -ENOENT;
+
prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs;
@@ -1122,7 +1126,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (vf->first_vector_idx < 0)
goto unroll;
- if (ice_vf_reconfig_vsi(vf)) {
+ if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) {
/* Try to rebuild with previous values */
needs_rebuild = true;
goto unroll;
@@ -1148,8 +1152,10 @@ unroll:
if (vf->first_vector_idx < 0)
return -EINVAL;
- if (needs_rebuild)
+ if (needs_rebuild) {
ice_vf_reconfig_vsi(vf);
+ ice_vf_init_host_cfg(vf, vsi);
+ }
ice_ena_vf_mappings(vf);
ice_put_vf(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 839e5da24ad5..f8f1d2bdc1be 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -143,8 +143,12 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
- if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
- BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
+ if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) {
+ ring->vsi->back->hw_rx_eipe_error++;
+ return;
+ }
+
+ if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))))
goto checksum_fail;
if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index a508e917ce5f..9ff92dba5823 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -132,6 +132,7 @@ enum ice_mac_type {
ICE_MAC_E810,
ICE_MAC_E830,
ICE_MAC_GENERIC,
+ ICE_MAC_GENERIC_3K_E825,
};
/* Media Types */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 2ffdae9a82df..21d26e19338a 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -280,12 +280,6 @@ int ice_vf_reconfig_vsi(struct ice_vf *vf)
return err;
}
- /* Update the lan_vsi_num field since it might have been changed. The
- * PF lan_vsi_idx number remains the same so we don't need to change
- * that.
- */
- vf->lan_vsi_num = vsi->vsi_num;
-
return 0;
}
@@ -315,7 +309,6 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
* vf->lan_vsi_idx
*/
vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
- vf->lan_vsi_num = vsi->vsi_num;
return 0;
}
@@ -1315,13 +1308,12 @@ int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
}
/**
- * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
+ * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access
* @vf: VF to remove access to VSI for
*/
void ice_vf_invalidate_vsi(struct ice_vf *vf)
{
vf->lan_vsi_idx = ICE_NO_VSI;
- vf->lan_vsi_num = ICE_NO_VSI;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 0cc9034065c5..fec16919ec19 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -109,11 +109,6 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
- /* VSI indices - actual VSI pointers are maintained in the PF structure
- * When assigned, these will be non-zero, because VSI 0 is always
- * the main LAN VSI for the PF.
- */
- u16 lan_vsi_num; /* ID as used by firmware */
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index c925813ec9ca..1ff9818b4c84 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -440,7 +440,6 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vf->driver_caps = *(u32 *)msg;
else
vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
- VIRTCHNL_VF_OFFLOAD_RSS_REG |
VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
@@ -453,14 +452,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
vf->driver_caps);
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
- } else {
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
- else
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
- }
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
@@ -506,7 +499,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
- vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID;
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
@@ -552,27 +545,20 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
*/
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_find_vsi(pf, vsi_id);
-
- return (vsi && (vsi->vf == vf));
+ return vsi_id == ICE_VF_VSI_ID;
}
/**
* ice_vc_isvalid_q_id
- * @vf: pointer to the VF info
- * @vsi_id: VSI ID
+ * @vsi: VSI to check queue ID against
* @qid: VSI relative queue ID
*
* check for the valid queue ID
*/
-static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
{
- struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
/* allocated Tx and Rx queues should be always equal for VF VSI */
- return (vsi && (qid < vsi->alloc_txq));
+ return qid < vsi->alloc_txq;
}
/**
@@ -1330,7 +1316,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
*/
q_map = vqs->rx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1352,7 +1338,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1457,7 +1443,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1483,7 +1469,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
} else if (q_map) {
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1539,7 +1525,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
vsi_q_id = vsi_q_id_idx;
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
return VIRTCHNL_STATUS_ERR_PARAM;
q_vector->num_ring_rx++;
@@ -1553,7 +1539,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
vsi_q_id = vsi_q_id_idx;
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
return VIRTCHNL_STATUS_ERR_PARAM;
q_vector->num_ring_tx++;
@@ -1710,7 +1696,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
qpi->txq.headwb_enabled ||
!ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
!ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
- !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
+ !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index 60dfbe05980a..3a4115869153 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -19,6 +19,15 @@
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+/* VFs only get a single VSI. For ice hardware, the VF does not need to know
+ * its VSI index. However, the virtchnl interface requires a VSI number,
+ * mainly due to legacy hardware.
+ *
+ * Since the VF doesn't need this information, report a static value to the VF
+ * instead of leaking any information about the PF or hardware setup.
+ */
+#define ICE_VF_VSI_ID 1
+
struct ice_virtchnl_ops {
int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 5e19d48a05b4..d796dbd2a440 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -13,8 +13,6 @@
* - opcodes needed by VF when caps are activated
*
* Caps that don't use new opcodes (no opcodes should be allowed):
- * - VIRTCHNL_VF_OFFLOAD_RSS_AQ
- * - VIRTCHNL_VF_OFFLOAD_RSS_REG
* - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
* - VIRTCHNL_VF_OFFLOAD_CRC
* - VIRTCHNL_VF_OFFLOAD_RX_POLLING
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index f001553e1a1a..8e4ff3af86c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -94,9 +94,6 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
return -EINVAL;
- if (vsi_id != vf->lan_vsi_num)
- return -EINVAL;
-
if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 8b81a1677045..1857220d27fe 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -179,6 +179,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
return -EBUSY;
usleep_range(1000, 2000);
}
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
@@ -195,13 +199,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
}
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
-
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
- ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);
ice_qp_reset_stats(vsi, q_idx);
@@ -217,53 +218,39 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
*/
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
- u16 size = __struct_size(qg_buf);
struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
int err;
- if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
- return -EINVAL;
-
- qg_buf->num_txqs = 1;
-
- tx_ring = vsi->tx_rings[q_idx];
- rx_ring = vsi->rx_rings[q_idx];
- q_vector = rx_ring->q_vector;
-
- err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
+ err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
if (err)
return err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
- memset(qg_buf, 0, size);
- qg_buf->num_txqs = 1;
- err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
+ err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
if (err)
return err;
ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx);
}
- err = ice_vsi_cfg_rxq(rx_ring);
+ err = ice_vsi_cfg_single_rxq(vsi, q_idx);
if (err)
return err;
+ q_vector = vsi->rx_rings[q_idx]->q_vector;
ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
if (err)
return err;
- clear_bit(ICE_CFG_BUSY, vsi->state);
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ clear_bit(ICE_CFG_BUSY, vsi->state);
return 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 0acc125decb3..e7a036538246 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -37,8 +37,6 @@ struct idpf_vport_max_q;
#define IDPF_MB_MAX_ERR 20
#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
-#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000
-#define IDPF_WAIT_FOR_EVENT_TIMEO 60000
#define IDPF_MAX_WAIT 500
@@ -66,14 +64,12 @@ struct idpf_mac_filter {
/**
* enum idpf_state - State machine to handle bring up
- * @__IDPF_STARTUP: Start the state machine
* @__IDPF_VER_CHECK: Negotiate virtchnl version
* @__IDPF_GET_CAPS: Negotiate capabilities
* @__IDPF_INIT_SW: Init based on given capabilities
* @__IDPF_STATE_LAST: Must be last, used to determine size
*/
enum idpf_state {
- __IDPF_STARTUP,
__IDPF_VER_CHECK,
__IDPF_GET_CAPS,
__IDPF_INIT_SW,
@@ -87,6 +83,7 @@ enum idpf_state {
* @IDPF_HR_RESET_IN_PROG: Reset in progress
* @IDPF_REMOVE_IN_PROG: Driver remove in progress
* @IDPF_MB_INTR_MODE: Mailbox in interrupt mode
+ * @IDPF_VC_CORE_INIT: virtchnl core has been init
* @IDPF_FLAGS_NBITS: Must be last
*/
enum idpf_flags {
@@ -95,6 +92,7 @@ enum idpf_flags {
IDPF_HR_RESET_IN_PROG,
IDPF_REMOVE_IN_PROG,
IDPF_MB_INTR_MODE,
+ IDPF_VC_CORE_INIT,
IDPF_FLAGS_NBITS,
};
@@ -209,71 +207,6 @@ struct idpf_dev_ops {
struct idpf_reg_ops reg_ops;
};
-/* These macros allow us to generate an enum and a matching char * array of
- * stringified enums that are always in sync. Checkpatch issues a bogus warning
- * about this being a complex macro; but it's wrong, these are never used as a
- * statement and instead only used to define the enum and array.
- */
-#define IDPF_FOREACH_VPORT_VC_STATE(STATE) \
- STATE(IDPF_VC_CREATE_VPORT) \
- STATE(IDPF_VC_CREATE_VPORT_ERR) \
- STATE(IDPF_VC_ENA_VPORT) \
- STATE(IDPF_VC_ENA_VPORT_ERR) \
- STATE(IDPF_VC_DIS_VPORT) \
- STATE(IDPF_VC_DIS_VPORT_ERR) \
- STATE(IDPF_VC_DESTROY_VPORT) \
- STATE(IDPF_VC_DESTROY_VPORT_ERR) \
- STATE(IDPF_VC_CONFIG_TXQ) \
- STATE(IDPF_VC_CONFIG_TXQ_ERR) \
- STATE(IDPF_VC_CONFIG_RXQ) \
- STATE(IDPF_VC_CONFIG_RXQ_ERR) \
- STATE(IDPF_VC_ENA_QUEUES) \
- STATE(IDPF_VC_ENA_QUEUES_ERR) \
- STATE(IDPF_VC_DIS_QUEUES) \
- STATE(IDPF_VC_DIS_QUEUES_ERR) \
- STATE(IDPF_VC_MAP_IRQ) \
- STATE(IDPF_VC_MAP_IRQ_ERR) \
- STATE(IDPF_VC_UNMAP_IRQ) \
- STATE(IDPF_VC_UNMAP_IRQ_ERR) \
- STATE(IDPF_VC_ADD_QUEUES) \
- STATE(IDPF_VC_ADD_QUEUES_ERR) \
- STATE(IDPF_VC_DEL_QUEUES) \
- STATE(IDPF_VC_DEL_QUEUES_ERR) \
- STATE(IDPF_VC_ALLOC_VECTORS) \
- STATE(IDPF_VC_ALLOC_VECTORS_ERR) \
- STATE(IDPF_VC_DEALLOC_VECTORS) \
- STATE(IDPF_VC_DEALLOC_VECTORS_ERR) \
- STATE(IDPF_VC_SET_SRIOV_VFS) \
- STATE(IDPF_VC_SET_SRIOV_VFS_ERR) \
- STATE(IDPF_VC_GET_RSS_LUT) \
- STATE(IDPF_VC_GET_RSS_LUT_ERR) \
- STATE(IDPF_VC_SET_RSS_LUT) \
- STATE(IDPF_VC_SET_RSS_LUT_ERR) \
- STATE(IDPF_VC_GET_RSS_KEY) \
- STATE(IDPF_VC_GET_RSS_KEY_ERR) \
- STATE(IDPF_VC_SET_RSS_KEY) \
- STATE(IDPF_VC_SET_RSS_KEY_ERR) \
- STATE(IDPF_VC_GET_STATS) \
- STATE(IDPF_VC_GET_STATS_ERR) \
- STATE(IDPF_VC_ADD_MAC_ADDR) \
- STATE(IDPF_VC_ADD_MAC_ADDR_ERR) \
- STATE(IDPF_VC_DEL_MAC_ADDR) \
- STATE(IDPF_VC_DEL_MAC_ADDR_ERR) \
- STATE(IDPF_VC_GET_PTYPE_INFO) \
- STATE(IDPF_VC_GET_PTYPE_INFO_ERR) \
- STATE(IDPF_VC_LOOPBACK_STATE) \
- STATE(IDPF_VC_LOOPBACK_STATE_ERR) \
- STATE(IDPF_VC_NBITS)
-
-#define IDPF_GEN_ENUM(ENUM) ENUM,
-#define IDPF_GEN_STRING(STRING) #STRING,
-
-enum idpf_vport_vc_state {
- IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_ENUM)
-};
-
-extern const char * const idpf_vport_vc_state_str[];
-
/**
* enum idpf_vport_reset_cause - Vport soft reset causes
* @IDPF_SR_Q_CHANGE: Soft reset queue change
@@ -358,11 +291,7 @@ struct idpf_port_stats {
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
* @link_speed_mbps: Link speed in mbps
- * @vc_msg: Virtchnl message buffer
- * @vc_state: Virtchnl message state
- * @vchnl_wq: Wait queue for virtchnl messages
* @sw_marker_wq: workqueue for marker packets
- * @vc_buf_lock: Lock to protect virtchnl buffer
*/
struct idpf_vport {
u16 num_txq;
@@ -408,12 +337,7 @@ struct idpf_vport {
bool link_up;
u32 link_speed_mbps;
- char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
- DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
-
- wait_queue_head_t vchnl_wq;
wait_queue_head_t sw_marker_wq;
- struct mutex vc_buf_lock;
};
/**
@@ -476,15 +400,11 @@ struct idpf_vport_user_config_data {
* enum idpf_vport_config_flags - Vport config flags
* @IDPF_VPORT_REG_NETDEV: Register netdev
* @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset
- * @IDPF_VPORT_ADD_MAC_REQ: Asynchronous add ether address in flight
- * @IDPF_VPORT_DEL_MAC_REQ: Asynchronous delete ether address in flight
* @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last
*/
enum idpf_vport_config_flags {
IDPF_VPORT_REG_NETDEV,
IDPF_VPORT_UP_REQUESTED,
- IDPF_VPORT_ADD_MAC_REQ,
- IDPF_VPORT_DEL_MAC_REQ,
IDPF_VPORT_CONFIG_FLAGS_NBITS,
};
@@ -555,11 +475,13 @@ struct idpf_vector_lifo {
struct idpf_vport_config {
struct idpf_vport_user_config_data user_config;
struct idpf_vport_max_q max_q;
- void *req_qs_chunks;
+ struct virtchnl2_add_queues *req_qs_chunks;
spinlock_t mac_filter_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
};
+struct idpf_vc_xn_manager;
+
/**
* struct idpf_adapter - Device data struct generated on probe
* @pdev: PCI device struct given on probe
@@ -601,9 +523,7 @@ struct idpf_vport_config {
* @stats_task: Periodic statistics retrieval task
* @stats_wq: Workqueue for statistics task
* @caps: Negotiated capabilities with device
- * @vchnl_wq: Wait queue for virtchnl messages
- * @vc_state: Virtchnl message state
- * @vc_msg: Virtchnl message buffer
+ * @vcxn_mngr: Virtchnl transaction manager
* @dev_ops: See idpf_dev_ops
* @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
* to VFs but is used to initialize them
@@ -659,10 +579,8 @@ struct idpf_adapter {
struct delayed_work stats_task;
struct workqueue_struct *stats_wq;
struct virtchnl2_get_capabilities caps;
+ struct idpf_vc_xn_manager *vcxn_mngr;
- wait_queue_head_t vchnl_wq;
- DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
- char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
struct idpf_dev_ops dev_ops;
int num_vfs;
bool crc_enable;
@@ -903,68 +821,18 @@ void idpf_mbx_task(struct work_struct *work);
void idpf_vc_event_task(struct work_struct *work);
void idpf_dev_ops_init(struct idpf_adapter *adapter);
void idpf_vf_dev_ops_init(struct idpf_adapter *adapter);
-int idpf_vport_adjust_qs(struct idpf_vport *vport);
-int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
-void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
-int idpf_vc_core_init(struct idpf_adapter *adapter);
-void idpf_vc_core_deinit(struct idpf_adapter *adapter);
int idpf_intr_req(struct idpf_adapter *adapter);
void idpf_intr_rel(struct idpf_adapter *adapter);
-int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
- struct idpf_vec_regs *reg_vals);
u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter);
-int idpf_send_delete_queues_msg(struct idpf_vport *vport);
-int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
- u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_reset_cause reset_cause);
-int idpf_send_enable_vport_msg(struct idpf_vport *vport);
-int idpf_send_disable_vport_msg(struct idpf_vport *vport);
-int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
-int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
-int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
-int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
-int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
-int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
-int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
void idpf_deinit_task(struct idpf_adapter *adapter);
int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
u16 *q_vector_idxs,
struct idpf_vector_info *vec_info);
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
-int idpf_send_get_stats_msg(struct idpf_vport *vport);
-int idpf_get_vec_ids(struct idpf_adapter *adapter,
- u16 *vecids, int num_vecids,
- struct virtchnl2_vector_chunks *chunks);
-int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
- void *msg, int msg_size);
-int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg);
void idpf_set_ethtool_ops(struct net_device *netdev);
-int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-int idpf_add_del_mac_filters(struct idpf_vport *vport,
- struct idpf_netdev_priv *np,
- bool add, bool async);
-int idpf_set_promiscuous(struct idpf_adapter *adapter,
- struct idpf_vport_user_config_data *config_data,
- u32 vport_id);
-int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
-u32 idpf_get_vport_id(struct idpf_vport *vport);
-int idpf_vport_queue_ids_init(struct idpf_vport *vport);
-int idpf_queue_reg_init(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
-int idpf_send_enable_queues_msg(struct idpf_vport *vport);
-int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-int idpf_check_supported_desc_ids(struct idpf_vport *vport);
void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector,
u16 itr, bool tx);
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
-int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
index c7f43d2fcd13..4849590a5591 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -516,6 +516,8 @@ post_buffs_out:
/* Wrap to end of end ring since current ntp is 0 */
cq->next_to_post = cq->ring_size - 1;
+ dma_wmb();
+
wr32(hw, cq->reg.tail, cq->next_to_post);
}
@@ -546,11 +548,6 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
int err = 0;
u16 i;
- if (*num_q_msg == 0)
- return 0;
- else if (*num_q_msg > cq->ring_size)
- return -EBADR;
-
/* take the lock before we start messing with the ring */
mutex_lock(&cq->cq_lock);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
index 8dee098bbfb0..e8e046ef2f0d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -69,6 +69,11 @@ struct idpf_ctlq_msg {
u8 context[IDPF_INDIRECT_CTX_SIZE];
struct idpf_dma_mem *payload;
} indirect;
+ struct {
+ u32 rsvd;
+ u16 data;
+ u16 flags;
+ } sw_cookie;
} ctx;
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 34ad1ac46b78..3df9935685e9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_lan_pf_regs.h"
+#include "idpf_virtchnl.h"
#define IDPF_PF_ITR_IDX_SPACING 0x4
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 58179bd733ff..5d3532c27d57 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -2,14 +2,11 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
static const struct net_device_ops idpf_netdev_ops_splitq;
static const struct net_device_ops idpf_netdev_ops_singleq;
-const char * const idpf_vport_vc_state_str[] = {
- IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
-};
-
/**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index
* @adapter: private data struct
@@ -82,19 +79,12 @@ static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
*/
void idpf_intr_rel(struct idpf_adapter *adapter)
{
- int err;
-
if (!adapter->msix_entries)
return;
idpf_mb_intr_rel_irq(adapter);
pci_free_irq_vectors(adapter->pdev);
-
- err = idpf_send_dealloc_vectors_msg(adapter);
- if (err)
- dev_err(&adapter->pdev->dev,
- "Failed to deallocate vectors: %d\n", err);
-
+ idpf_send_dealloc_vectors_msg(adapter);
idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -975,7 +965,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
struct idpf_rss_data *rss_data;
struct idpf_vport_max_q max_q;
u16 idx = vport->idx;
- int i;
vport_config = adapter->vport_config[vport->idx];
idpf_deinit_rss(vport);
@@ -985,20 +974,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
idpf_send_destroy_vport_msg(vport);
- /* Set all bits as we dont know on which vc_state the vport vhnl_wq
- * is waiting on and wakeup the virtchnl workqueue even if it is
- * waiting for the response as we are going down
- */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- set_bit(i, vport->vc_state);
- wake_up(&vport->vchnl_wq);
-
- mutex_destroy(&vport->vc_buf_lock);
-
- /* Clear all the bits */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- clear_bit(i, vport->vc_state);
-
/* Release all max queues allocated to the adapter's pool */
max_q.max_rxq = vport_config->max_q.max_rxq;
max_q.max_txq = vport_config->max_q.max_txq;
@@ -1253,7 +1228,7 @@ void idpf_mbx_task(struct work_struct *work)
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
msecs_to_jiffies(300));
- idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0);
+ idpf_recv_mb_msg(adapter);
}
/**
@@ -1543,9 +1518,7 @@ void idpf_init_task(struct work_struct *work)
vport_config = adapter->vport_config[index];
init_waitqueue_head(&vport->sw_marker_wq);
- init_waitqueue_head(&vport->vchnl_wq);
- mutex_init(&vport->vc_buf_lock);
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
@@ -1823,6 +1796,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
goto unlock_mutex;
}
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
+
/* Initialize the state machine, also allocate memory and request
* resources
*/
@@ -1902,7 +1877,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
* mess with. Nothing below should use those variables from new_vport
* and should instead always refer to them in vport if they need to.
*/
- memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state));
+ memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
@@ -1951,7 +1926,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
/* Same comment as above regarding avoiding copying the wait_queues and
* mutexes applies here. We do not want to mess with those if possible.
*/
- memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state));
+ memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Since idpf_vport_queues_alloc was called with new_port, the queue
* back pointers are currently pointing to the local new_vport. Reset
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index e1febc74cefd..f784eea044bd 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_devids.h"
+#include "idpf_virtchnl.h"
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
@@ -30,6 +31,7 @@ static void idpf_remove(struct pci_dev *pdev)
idpf_sriov_configure(pdev, 0);
idpf_vc_core_deinit(adapter);
+
/* Be a good citizen and leave the device clean on exit */
adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);
idpf_deinit_dflt_mbx(adapter);
@@ -66,6 +68,8 @@ destroy_wqs:
adapter->vport_config = NULL;
kfree(adapter->netdevs);
adapter->netdevs = NULL;
+ kfree(adapter->vcxn_mngr);
+ adapter->vcxn_mngr = NULL;
mutex_destroy(&adapter->vport_ctrl_lock);
mutex_destroy(&adapter->vector_lock);
@@ -229,8 +233,6 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&adapter->queue_lock);
mutex_init(&adapter->vc_buf_lock);
- init_waitqueue_head(&adapter->vchnl_wq);
-
INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task);
INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task);
INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 2f8ad79ae3f0..6dd7a66bb897 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
/**
* idpf_buf_lifo_push - push a buffer pointer onto stack
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 8ade4e3a9fe1..629cb5cb7c9f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_lan_vf_regs.h"
+#include "idpf_virtchnl.h"
#define IDPF_VF_ITR_IDX_SPACING 0x40
@@ -137,7 +138,7 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
/* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */
if (trig_cause == IDPF_HR_FUNC_RESET &&
!test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
- idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL);
+ idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index d0cdd63b3d5b..a5f9b7a5effe 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -2,46 +2,192 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
+
+#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
+#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
+#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
+#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
+#define IDPF_VC_XN_RING_LEN U8_MAX
+
+/**
+ * enum idpf_vc_xn_state - Virtchnl transaction status
+ * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
+ * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
+ * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
+ * buffer updated
+ * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
+ * was an error, buffer not updated
+ * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
+ * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
+ * return context; a callback may be provided to handle
+ * return
+ */
+enum idpf_vc_xn_state {
+ IDPF_VC_XN_IDLE = 1,
+ IDPF_VC_XN_WAITING,
+ IDPF_VC_XN_COMPLETED_SUCCESS,
+ IDPF_VC_XN_COMPLETED_FAILED,
+ IDPF_VC_XN_SHUTDOWN,
+ IDPF_VC_XN_ASYNC,
+};
+
+struct idpf_vc_xn;
+/* Callback for asynchronous messages */
+typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
+ const struct idpf_ctlq_msg *);
+
+/**
+ * struct idpf_vc_xn - Data structure representing virtchnl transactions
+ * @completed: virtchnl event loop uses that to signal when a reply is
+ * available, uses kernel completion API
+ * @state: virtchnl event loop stores the data below, protected by the
+ * completion's lock.
+ * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
+ * truncated on its way to the receiver thread according to
+ * reply_buf.iov_len.
+ * @reply: Reference to the buffer(s) where the reply data should be written
+ * to. May be 0-length (then NULL address permitted) if the reply data
+ * should be ignored.
+ * @async_handler: if sent asynchronously, a callback can be provided to handle
+ * the reply when it's received
+ * @vc_op: corresponding opcode sent with this transaction
+ * @idx: index used as retrieval on reply receive, used for cookie
+ * @salt: changed every message to make unique, used for cookie
+ */
+struct idpf_vc_xn {
+ struct completion completed;
+ enum idpf_vc_xn_state state;
+ size_t reply_sz;
+ struct kvec reply;
+ async_vc_cb async_handler;
+ u32 vc_op;
+ u8 idx;
+ u8 salt;
+};
+
+/**
+ * struct idpf_vc_xn_params - Parameters for executing transaction
+ * @send_buf: kvec for send buffer
+ * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
+ * @timeout_ms: timeout to wait for reply
+ * @async: send message asynchronously, will not wait on completion
+ * @async_handler: If sent asynchronously, optional callback handler. The user
+ * must be careful when using async handlers as the memory for
+ * the recv_buf _cannot_ be on stack if this is async.
+ * @vc_op: virtchnl op to send
+ */
+struct idpf_vc_xn_params {
+ struct kvec send_buf;
+ struct kvec recv_buf;
+ int timeout_ms;
+ bool async;
+ async_vc_cb async_handler;
+ u32 vc_op;
+};
+
+/**
+ * struct idpf_vc_xn_manager - Manager for tracking transactions
+ * @ring: backing and lookup for transactions
+ * @free_xn_bm: bitmap for free transactions
+ * @xn_bm_lock: make bitmap access synchronous where necessary
+ * @salt: used to make cookie unique every message
+ */
+struct idpf_vc_xn_manager {
+ struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
+ DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
+ spinlock_t xn_bm_lock;
+ u8 salt;
+};
+
+/**
+ * idpf_vid_to_vport - Translate vport id to vport pointer
+ * @adapter: private data struct
+ * @v_id: vport id to translate
+ *
+ * Returns vport matching v_id, NULL if not found.
+ */
+static
+struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
+{
+ u16 num_max_vports = idpf_get_max_vports(adapter);
+ int i;
+
+ for (i = 0; i < num_max_vports; i++)
+ if (adapter->vport_ids[i] == v_id)
+ return adapter->vports[i];
+
+ return NULL;
+}
+
+/**
+ * idpf_handle_event_link - Handle link event message
+ * @adapter: private data struct
+ * @v2e: virtchnl event message
+ */
+static void idpf_handle_event_link(struct idpf_adapter *adapter,
+ const struct virtchnl2_event *v2e)
+{
+ struct idpf_netdev_priv *np;
+ struct idpf_vport *vport;
+
+ vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
+ if (!vport) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
+ v2e->vport_id);
+ return;
+ }
+ np = netdev_priv(vport->netdev);
+
+ vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
+
+ if (vport->link_up == v2e->link_status)
+ return;
+
+ vport->link_up = v2e->link_status;
+
+ if (np->state != __IDPF_VPORT_UP)
+ return;
+
+ if (vport->link_up) {
+ netif_tx_start_all_queues(vport->netdev);
+ netif_carrier_on(vport->netdev);
+ } else {
+ netif_tx_stop_all_queues(vport->netdev);
+ netif_carrier_off(vport->netdev);
+ }
+}
/**
* idpf_recv_event_msg - Receive virtchnl event message
- * @vport: virtual port structure
+ * @adapter: Driver specific private structure
* @ctlq_msg: message to copy from
*
* Receive virtchnl event message
*/
-static void idpf_recv_event_msg(struct idpf_vport *vport,
+static void idpf_recv_event_msg(struct idpf_adapter *adapter,
struct idpf_ctlq_msg *ctlq_msg)
{
- struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ int payload_size = ctlq_msg->ctx.indirect.payload->size;
struct virtchnl2_event *v2e;
- bool link_status;
u32 event;
+ if (payload_size < sizeof(*v2e)) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode,
+ payload_size);
+ return;
+ }
+
v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
event = le32_to_cpu(v2e->event);
switch (event) {
case VIRTCHNL2_EVENT_LINK_CHANGE:
- vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
- link_status = v2e->link_status;
-
- if (vport->link_up == link_status)
- break;
-
- vport->link_up = link_status;
- if (np->state == __IDPF_VPORT_UP) {
- if (vport->link_up) {
- netif_carrier_on(vport->netdev);
- netif_tx_start_all_queues(vport->netdev);
- } else {
- netif_tx_stop_all_queues(vport->netdev);
- netif_carrier_off(vport->netdev);
- }
- }
- break;
+ idpf_handle_event_link(adapter, v2e);
+ return;
default:
- dev_err(&vport->adapter->pdev->dev,
+ dev_err(&adapter->pdev->dev,
"Unknown event %d from PF\n", event);
break;
}
@@ -93,13 +239,14 @@ err_kfree:
* @op: virtchnl opcode
* @msg_size: size of the payload
* @msg: pointer to buffer holding the payload
+ * @cookie: unique SW generated cookie per message
*
* Will prepare the control queue message and initiates the send api
*
* Returns 0 on success, negative on failure
*/
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg)
+ u16 msg_size, u8 *msg, u16 cookie)
{
struct idpf_ctlq_msg *ctlq_msg;
struct idpf_dma_mem *dma_mem;
@@ -139,8 +286,12 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
err = -ENOMEM;
goto dma_alloc_error;
}
- memcpy(dma_mem->va, msg, msg_size);
+
+ /* It's possible we're just sending an opcode but no buffer */
+ if (msg && msg_size)
+ memcpy(dma_mem->va, msg, msg_size);
ctlq_msg->ctx.indirect.payload = dma_mem;
+ ctlq_msg->ctx.sw_cookie.data = cookie;
err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
if (err)
@@ -159,592 +310,432 @@ dma_mem_error:
return err;
}
-/**
- * idpf_find_vport - Find vport pointer from control queue message
- * @adapter: driver specific private structure
- * @vport: address of vport pointer to copy the vport from adapters vport list
- * @ctlq_msg: control queue message
+/* API for virtchnl "transaction" support ("xn" for short).
*
- * Return 0 on success, error value on failure. Also this function does check
- * for the opcodes which expect to receive payload and return error value if
- * it is not the case.
+ * We are reusing the completion lock to serialize the accesses to the
+ * transaction state for simplicity, but it could be its own separate synchro
+ * as well. For now, this API is only used from within a workqueue context;
+ * raw_spin_lock() is enough.
*/
-static int idpf_find_vport(struct idpf_adapter *adapter,
- struct idpf_vport **vport,
- struct idpf_ctlq_msg *ctlq_msg)
-{
- bool no_op = false, vid_found = false;
- int i, err = 0;
- char *vc_msg;
- u32 v_id;
+/**
+ * idpf_vc_xn_lock - Request exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+#define idpf_vc_xn_lock(xn) \
+ raw_spin_lock(&(xn)->completed.wait.lock)
- vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL);
- if (!vc_msg)
- return -ENOMEM;
+/**
+ * idpf_vc_xn_unlock - Release exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+#define idpf_vc_xn_unlock(xn) \
+ raw_spin_unlock(&(xn)->completed.wait.lock)
- if (ctlq_msg->data_len) {
- size_t payload_size = ctlq_msg->ctx.indirect.payload->size;
+/**
+ * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
+ * reset the transaction state.
+ * @xn: struct idpf_vc_xn to update
+ */
+static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
+{
+ xn->reply.iov_base = NULL;
+ xn->reply.iov_len = 0;
- if (!payload_size) {
- dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n");
- kfree(vc_msg);
+ if (xn->state != IDPF_VC_XN_SHUTDOWN)
+ xn->state = IDPF_VC_XN_IDLE;
+}
- return -EINVAL;
- }
+/**
+ * idpf_vc_xn_init - Initialize virtchnl transaction object
+ * @vcxn_mngr: pointer to vc transaction manager struct
+ */
+static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
+{
+ int i;
- memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN));
- }
-
- switch (ctlq_msg->cookie.mbx.chnl_opcode) {
- case VIRTCHNL2_OP_VERSION:
- case VIRTCHNL2_OP_GET_CAPS:
- case VIRTCHNL2_OP_CREATE_VPORT:
- case VIRTCHNL2_OP_SET_SRIOV_VFS:
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- case VIRTCHNL2_OP_GET_PTYPE_INFO:
- goto free_vc_msg;
- case VIRTCHNL2_OP_ENABLE_VPORT:
- case VIRTCHNL2_OP_DISABLE_VPORT:
- case VIRTCHNL2_OP_DESTROY_VPORT:
- v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- case VIRTCHNL2_OP_DEL_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ADD_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_STATS:
- v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_RSS_LUT:
- case VIRTCHNL2_OP_SET_RSS_LUT:
- v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_RSS_KEY:
- case VIRTCHNL2_OP_SET_RSS_KEY:
- v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_EVENT:
- v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_LOOPBACK:
- v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
- v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ADD_MAC_ADDR:
- case VIRTCHNL2_OP_DEL_MAC_ADDR:
- v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id);
- break;
- default:
- no_op = true;
- break;
- }
+ spin_lock_init(&vcxn_mngr->xn_bm_lock);
- if (no_op)
- goto free_vc_msg;
+ for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
+ struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
- for (i = 0; i < idpf_get_max_vports(adapter); i++) {
- if (adapter->vport_ids[i] == v_id) {
- vid_found = true;
- break;
- }
+ xn->state = IDPF_VC_XN_IDLE;
+ xn->idx = i;
+ idpf_vc_xn_release_bufs(xn);
+ init_completion(&xn->completed);
}
- if (vid_found)
- *vport = adapter->vports[i];
- else
- err = -EINVAL;
-
-free_vc_msg:
- kfree(vc_msg);
-
- return err;
+ bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
}
/**
- * idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer.
- * @adapter: driver specific private structure
- * @vport: virtual port structure
- * @ctlq_msg: msg to copy from
- * @err_enum: err bit to set on error
+ * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
+ * @vcxn_mngr: pointer to vc transaction manager struct
*
- * Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success,
- * negative on failure.
+ * All waiting threads will be woken-up and their transaction aborted. Further
+ * operations on that object will fail.
*/
-static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- struct idpf_ctlq_msg *ctlq_msg,
- enum idpf_vport_vc_state err_enum)
+static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
{
- if (ctlq_msg->cookie.mbx.chnl_retval) {
- if (vport)
- set_bit(err_enum, vport->vc_state);
- else
- set_bit(err_enum, adapter->vc_state);
+ int i;
- return -EINVAL;
- }
+ spin_lock_bh(&vcxn_mngr->xn_bm_lock);
+ bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
+ spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
- if (vport)
- memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(int, ctlq_msg->ctx.indirect.payload->size,
- IDPF_CTLQ_MAX_BUF_LEN));
- else
- memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(int, ctlq_msg->ctx.indirect.payload->size,
- IDPF_CTLQ_MAX_BUF_LEN));
+ for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
+ struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
- return 0;
+ idpf_vc_xn_lock(xn);
+ xn->state = IDPF_VC_XN_SHUTDOWN;
+ idpf_vc_xn_release_bufs(xn);
+ idpf_vc_xn_unlock(xn);
+ complete_all(&xn->completed);
+ }
}
/**
- * idpf_recv_vchnl_op - helper function with common logic when handling the
- * reception of VIRTCHNL OPs.
- * @adapter: driver specific private structure
- * @vport: virtual port structure
- * @ctlq_msg: msg to copy from
- * @state: state bit used on timeout check
- * @err_state: err bit to set on error
+ * idpf_vc_xn_pop_free - Pop a free transaction from free list
+ * @vcxn_mngr: transaction manager to pop from
+ *
+ * Returns NULL if no free transactions
*/
-static void idpf_recv_vchnl_op(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- struct idpf_ctlq_msg *ctlq_msg,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_state)
+static
+struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
{
- wait_queue_head_t *vchnl_wq;
- int err;
+ struct idpf_vc_xn *xn = NULL;
+ unsigned long free_idx;
- if (vport)
- vchnl_wq = &vport->vchnl_wq;
- else
- vchnl_wq = &adapter->vchnl_wq;
+ spin_lock_bh(&vcxn_mngr->xn_bm_lock);
+ free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
+ if (free_idx == IDPF_VC_XN_RING_LEN)
+ goto do_unlock;
- err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state);
- if (wq_has_sleeper(vchnl_wq)) {
- if (vport)
- set_bit(state, vport->vc_state);
- else
- set_bit(state, adapter->vc_state);
+ clear_bit(free_idx, vcxn_mngr->free_xn_bm);
+ xn = &vcxn_mngr->ring[free_idx];
+ xn->salt = vcxn_mngr->salt++;
- wake_up(vchnl_wq);
- } else {
- if (!err) {
- dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n",
- ctlq_msg->cookie.mbx.chnl_opcode);
- } else {
- /* Clear the errors since there is no sleeper to pass
- * them on
- */
- if (vport)
- clear_bit(err_state, vport->vc_state);
- else
- clear_bit(err_state, adapter->vc_state);
- }
- }
+do_unlock:
+ spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
+
+ return xn;
}
/**
- * idpf_recv_mb_msg - Receive message over mailbox
- * @adapter: Driver specific private structure
- * @op: virtchannel operation code
- * @msg: Received message holding buffer
- * @msg_size: message size
- *
- * Will receive control queue message and posts the receive buffer. Returns 0
- * on success and negative on failure.
+ * idpf_vc_xn_push_free - Push a free transaction to free list
+ * @vcxn_mngr: transaction manager to push to
+ * @xn: transaction to push
*/
-int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
- void *msg, int msg_size)
+static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
+ struct idpf_vc_xn *xn)
{
- struct idpf_vport *vport = NULL;
- struct idpf_ctlq_msg ctlq_msg;
- struct idpf_dma_mem *dma_mem;
- bool work_done = false;
- int num_retry = 2000;
- u16 num_q_msg;
- int err;
-
- while (1) {
- struct idpf_vport_config *vport_config;
- int payload_size = 0;
-
- /* Try to get one message */
- num_q_msg = 1;
- dma_mem = NULL;
- err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg);
- /* If no message then decide if we have to retry based on
- * opcode
- */
- if (err || !num_q_msg) {
- /* Increasing num_retry to consider the delayed
- * responses because of large number of VF's mailbox
- * messages. If the mailbox message is received from
- * the other side, we come out of the sleep cycle
- * immediately else we wait for more time.
- */
- if (!op || !num_retry--)
- break;
- if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
- err = -EIO;
- break;
- }
- msleep(20);
- continue;
- }
+ idpf_vc_xn_release_bufs(xn);
+ set_bit(xn->idx, vcxn_mngr->free_xn_bm);
+}
- /* If we are here a message is received. Check if we are looking
- * for a specific message based on opcode. If it is different
- * ignore and post buffers
+/**
+ * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
+ * @adapter: driver specific private structure with vcxn_mngr
+ * @params: parameters for this particular transaction including
+ * -vc_op: virtchannel operation to send
+ * -send_buf: kvec iov for send buf and len
+ * -recv_buf: kvec iov for recv buf and len (ignored if NULL)
+ * -timeout_ms: timeout waiting for a reply (milliseconds)
+ * -async: don't wait for message reply, will lose caller context
+ * -async_handler: callback to handle async replies
+ *
+ * @returns >= 0 for success, the size of the initial reply (may or may not be
+ * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
+ * error.
+ */
+static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params)
+{
+ const struct kvec *send_buf = &params->send_buf;
+ struct idpf_vc_xn *xn;
+ ssize_t retval;
+ u16 cookie;
+
+ xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
+ /* no free transactions available */
+ if (!xn)
+ return -ENOSPC;
+
+ idpf_vc_xn_lock(xn);
+ if (xn->state == IDPF_VC_XN_SHUTDOWN) {
+ retval = -ENXIO;
+ goto only_unlock;
+ } else if (xn->state != IDPF_VC_XN_IDLE) {
+ /* We're just going to clobber this transaction even though
+ * it's not IDLE. If we don't reuse it we could theoretically
+ * eventually leak all the free transactions and not be able to
+ * send any messages. At least this way we make an attempt to
+ * remain functional even though something really bad is
+ * happening that's corrupting what was supposed to be free
+ * transactions.
*/
- if (op && ctlq_msg.cookie.mbx.chnl_opcode != op)
- goto post_buffs;
+ WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
+ xn->idx, xn->vc_op);
+ }
- err = idpf_find_vport(adapter, &vport, &ctlq_msg);
- if (err)
- goto post_buffs;
+ xn->reply = params->recv_buf;
+ xn->reply_sz = 0;
+ xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
+ xn->vc_op = params->vc_op;
+ xn->async_handler = params->async_handler;
+ idpf_vc_xn_unlock(xn);
- if (ctlq_msg.data_len)
- payload_size = ctlq_msg.ctx.indirect.payload->size;
+ if (!params->async)
+ reinit_completion(&xn->completed);
+ cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
+ FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
- /* All conditions are met. Either a message requested is
- * received or we received a message to be processed
- */
- switch (ctlq_msg.cookie.mbx.chnl_opcode) {
- case VIRTCHNL2_OP_VERSION:
- case VIRTCHNL2_OP_GET_CAPS:
- if (ctlq_msg.cookie.mbx.chnl_retval) {
- dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n",
- ctlq_msg.cookie.mbx.chnl_opcode,
- ctlq_msg.cookie.mbx.chnl_retval);
- err = -EBADMSG;
- } else if (msg) {
- memcpy(msg, ctlq_msg.ctx.indirect.payload->va,
- min_t(int, payload_size, msg_size));
- }
- work_done = true;
- break;
- case VIRTCHNL2_OP_CREATE_VPORT:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_CREATE_VPORT,
- IDPF_VC_CREATE_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_ENABLE_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ENA_VPORT,
- IDPF_VC_ENA_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_DISABLE_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DIS_VPORT,
- IDPF_VC_DIS_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_DESTROY_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DESTROY_VPORT,
- IDPF_VC_DESTROY_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_CONFIG_TXQ,
- IDPF_VC_CONFIG_TXQ_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_CONFIG_RXQ,
- IDPF_VC_CONFIG_RXQ_ERR);
- break;
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ENA_QUEUES,
- IDPF_VC_ENA_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DIS_QUEUES,
- IDPF_VC_DIS_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_ADD_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ADD_QUEUES,
- IDPF_VC_ADD_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_DEL_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DEL_QUEUES,
- IDPF_VC_DEL_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_MAP_IRQ,
- IDPF_VC_MAP_IRQ_ERR);
- break;
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_UNMAP_IRQ,
- IDPF_VC_UNMAP_IRQ_ERR);
- break;
- case VIRTCHNL2_OP_GET_STATS:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_STATS,
- IDPF_VC_GET_STATS_ERR);
- break;
- case VIRTCHNL2_OP_GET_RSS_LUT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_RSS_LUT,
- IDPF_VC_GET_RSS_LUT_ERR);
- break;
- case VIRTCHNL2_OP_SET_RSS_LUT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_SET_RSS_LUT,
- IDPF_VC_SET_RSS_LUT_ERR);
- break;
- case VIRTCHNL2_OP_GET_RSS_KEY:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_RSS_KEY,
- IDPF_VC_GET_RSS_KEY_ERR);
- break;
- case VIRTCHNL2_OP_SET_RSS_KEY:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_SET_RSS_KEY,
- IDPF_VC_SET_RSS_KEY_ERR);
- break;
- case VIRTCHNL2_OP_SET_SRIOV_VFS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_SET_SRIOV_VFS,
- IDPF_VC_SET_SRIOV_VFS_ERR);
- break;
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_ALLOC_VECTORS,
- IDPF_VC_ALLOC_VECTORS_ERR);
- break;
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_DEALLOC_VECTORS,
- IDPF_VC_DEALLOC_VECTORS_ERR);
- break;
- case VIRTCHNL2_OP_GET_PTYPE_INFO:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_GET_PTYPE_INFO,
- IDPF_VC_GET_PTYPE_INFO_ERR);
- break;
- case VIRTCHNL2_OP_LOOPBACK:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_LOOPBACK_STATE,
- IDPF_VC_LOOPBACK_STATE_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
- /* This message can only be sent asynchronously. As
- * such we'll have lost the context in which it was
- * called and thus can only really report if it looks
- * like an error occurred. Don't bother setting ERR bit
- * or waking chnl_wq since no work queue will be waiting
- * to read the message.
- */
- if (ctlq_msg.cookie.mbx.chnl_retval) {
- dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- }
- break;
- case VIRTCHNL2_OP_ADD_MAC_ADDR:
- vport_config = adapter->vport_config[vport->idx];
- if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ,
- vport_config->flags)) {
- /* Message was sent asynchronously. We don't
- * normally print errors here, instead
- * prefer to handle errors in the function
- * calling wait_for_event. However, if
- * asynchronous, the context in which the
- * message was sent is lost. We can't really do
- * anything about at it this point, but we
- * should at a minimum indicate that it looks
- * like something went wrong. Also don't bother
- * setting ERR bit or waking vchnl_wq since no
- * one will be waiting to read the async
- * message.
- */
- if (ctlq_msg.cookie.mbx.chnl_retval)
- dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- break;
- }
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ADD_MAC_ADDR,
- IDPF_VC_ADD_MAC_ADDR_ERR);
- break;
- case VIRTCHNL2_OP_DEL_MAC_ADDR:
- vport_config = adapter->vport_config[vport->idx];
- if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ,
- vport_config->flags)) {
- /* Message was sent asynchronously like the
- * VIRTCHNL2_OP_ADD_MAC_ADDR
- */
- if (ctlq_msg.cookie.mbx.chnl_retval)
- dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- break;
- }
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DEL_MAC_ADDR,
- IDPF_VC_DEL_MAC_ADDR_ERR);
- break;
- case VIRTCHNL2_OP_EVENT:
- idpf_recv_event_msg(vport, &ctlq_msg);
- break;
- default:
- dev_warn(&adapter->pdev->dev,
- "Unhandled virtchnl response %d\n",
- ctlq_msg.cookie.mbx.chnl_opcode);
- break;
- }
+ retval = idpf_send_mb_msg(adapter, params->vc_op,
+ send_buf->iov_len, send_buf->iov_base,
+ cookie);
+ if (retval) {
+ idpf_vc_xn_lock(xn);
+ goto release_and_unlock;
+ }
-post_buffs:
- if (ctlq_msg.data_len)
- dma_mem = ctlq_msg.ctx.indirect.payload;
- else
- num_q_msg = 0;
+ if (params->async)
+ return 0;
- err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq,
- &num_q_msg, &dma_mem);
- /* If post failed clear the only buffer we supplied */
- if (err && dma_mem)
- dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
- dma_mem->va, dma_mem->pa);
+ wait_for_completion_timeout(&xn->completed,
+ msecs_to_jiffies(params->timeout_ms));
- /* Applies only if we are looking for a specific opcode */
- if (work_done)
- break;
+ /* No need to check the return value; we check the final state of the
+ * transaction below. It's possible the transaction actually gets more
+ * timeout than specified if we get preempted here but after
+ * wait_for_completion_timeout returns. This should be non-issue
+ * however.
+ */
+ idpf_vc_xn_lock(xn);
+ switch (xn->state) {
+ case IDPF_VC_XN_SHUTDOWN:
+ retval = -ENXIO;
+ goto only_unlock;
+ case IDPF_VC_XN_WAITING:
+ dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
+ params->vc_op, params->timeout_ms);
+ retval = -ETIME;
+ break;
+ case IDPF_VC_XN_COMPLETED_SUCCESS:
+ retval = xn->reply_sz;
+ break;
+ case IDPF_VC_XN_COMPLETED_FAILED:
+ dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
+ params->vc_op);
+ retval = -EIO;
+ break;
+ default:
+ /* Invalid state. */
+ WARN_ON_ONCE(1);
+ retval = -EIO;
+ break;
}
- return err;
+release_and_unlock:
+ idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
+ /* If we receive a VC reply after here, it will be dropped. */
+only_unlock:
+ idpf_vc_xn_unlock(xn);
+
+ return retval;
}
/**
- * __idpf_wait_for_event - wrapper function for wait on virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout
- * @err_check: check if this specific error bit is set
- * @timeout: Max time to wait
+ * idpf_vc_xn_forward_async - Handle async reply receives
+ * @adapter: private data struct
+ * @xn: transaction to handle
+ * @ctlq_msg: corresponding ctlq_msg
*
- * Checks if state is set upon expiry of timeout. Returns 0 on success,
- * negative on failure.
+ * For async sends we're going to lose the caller's context so, if an
+ * async_handler was provided, it can deal with the reply, otherwise we'll just
+ * check and report if there is an error.
*/
-static int __idpf_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check,
- int timeout)
+static int
+idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
{
- int time_to_wait, num_waits;
- wait_queue_head_t *vchnl_wq;
- unsigned long *vc_state;
+ int err = 0;
- time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT);
- num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT);
+ if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
+ xn->reply_sz = 0;
+ err = -EINVAL;
+ goto release_bufs;
+ }
- if (vport) {
- vchnl_wq = &vport->vchnl_wq;
- vc_state = vport->vc_state;
- } else {
- vchnl_wq = &adapter->vchnl_wq;
- vc_state = adapter->vc_state;
+ if (xn->async_handler) {
+ err = xn->async_handler(adapter, xn, ctlq_msg);
+ goto release_bufs;
+ }
+
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ xn->reply_sz = 0;
+ dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EINVAL;
}
- while (num_waits) {
- int event;
+release_bufs:
+ idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
+
+ return err;
+}
+
+/**
+ * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
+ * @adapter: driver specific private structure with vcxn_mngr
+ * @ctlq_msg: controlq message to send back to receiving thread
+ */
+static int
+idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ const void *payload = NULL;
+ size_t payload_size = 0;
+ struct idpf_vc_xn *xn;
+ u16 msg_info;
+ int err = 0;
+ u16 xn_idx;
+ u16 salt;
+
+ msg_info = ctlq_msg->ctx.sw_cookie.data;
+ xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
+ if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
+ xn_idx);
+ return -EINVAL;
+ }
+ xn = &adapter->vcxn_mngr->ring[xn_idx];
+ salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
+ if (xn->salt != salt) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
+ xn->salt, salt);
+ return -EINVAL;
+ }
- /* If we are here and a reset is detected do not wait but
- * return. Reset timing is out of drivers control. So
- * while we are cleaning resources as part of reset if the
- * underlying HW mailbox is gone, wait on mailbox messages
- * is not meaningful
+ idpf_vc_xn_lock(xn);
+ switch (xn->state) {
+ case IDPF_VC_XN_WAITING:
+ /* success */
+ break;
+ case IDPF_VC_XN_IDLE:
+ dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ case IDPF_VC_XN_SHUTDOWN:
+ /* ENXIO is a bit special here as the recv msg loop uses that
+ * know if it should stop trying to clean the ring if we lost
+ * the virtchnl. We need to stop playing with registers and
+ * yield.
*/
- if (idpf_is_reset_detected(adapter))
- return 0;
+ err = -ENXIO;
+ goto out_unlock;
+ case IDPF_VC_XN_ASYNC:
+ err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
+ idpf_vc_xn_unlock(xn);
+ return err;
+ default:
+ dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EBUSY;
+ goto out_unlock;
+ }
- event = wait_event_timeout(*vchnl_wq,
- test_and_clear_bit(state, vc_state),
- msecs_to_jiffies(time_to_wait));
- if (event) {
- if (test_and_clear_bit(err_check, vc_state)) {
- dev_err(&adapter->pdev->dev, "VC response error %s\n",
- idpf_vport_vc_state_str[err_check]);
+ if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
+ xn->reply_sz = 0;
+ xn->state = IDPF_VC_XN_COMPLETED_FAILED;
+ err = -EINVAL;
+ goto out_unlock;
+ }
- return -EINVAL;
- }
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ xn->reply_sz = 0;
+ xn->state = IDPF_VC_XN_COMPLETED_FAILED;
+ err = -EINVAL;
+ goto out_unlock;
+ }
- return 0;
- }
- num_waits--;
+ if (ctlq_msg->data_len) {
+ payload = ctlq_msg->ctx.indirect.payload->va;
+ payload_size = ctlq_msg->ctx.indirect.payload->size;
}
- /* Timeout occurred */
- dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n",
- idpf_vport_vc_state_str[state]);
+ xn->reply_sz = payload_size;
+ xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
- return -ETIMEDOUT;
+ if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
+ memcpy(xn->reply.iov_base, payload,
+ min_t(size_t, xn->reply.iov_len, payload_size));
+
+out_unlock:
+ idpf_vc_xn_unlock(xn);
+ /* we _cannot_ hold lock while calling complete */
+ complete(&xn->completed);
+
+ return err;
}
/**
- * idpf_min_wait_for_event - wait for virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout
- * @err_check: check if this specific error bit is set
+ * idpf_recv_mb_msg - Receive message over mailbox
+ * @adapter: Driver specific private structure
*
- * Returns 0 on success, negative on failure.
+ * Will receive control queue message and posts the receive buffer. Returns 0
+ * on success and negative on failure.
*/
-static int idpf_min_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check)
+int idpf_recv_mb_msg(struct idpf_adapter *adapter)
{
- return __idpf_wait_for_event(adapter, vport, state, err_check,
- IDPF_WAIT_FOR_EVENT_TIMEO_MIN);
-}
+ struct idpf_ctlq_msg ctlq_msg;
+ struct idpf_dma_mem *dma_mem;
+ int post_err, err;
+ u16 num_recv;
-/**
- * idpf_wait_for_event - wait for virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout after 500ms
- * @err_check: check if this specific error bit is set
- *
- * Returns 0 on success, negative on failure.
- */
-static int idpf_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check)
-{
- /* Increasing the timeout in __IDPF_INIT_SW flow to consider large
- * number of VF's mailbox message responses. When a message is received
- * on mailbox, this thread is woken up by the idpf_recv_mb_msg before
- * the timeout expires. Only in the error case i.e. if no message is
- * received on mailbox, we wait for the complete timeout which is
- * less likely to happen.
- */
- return __idpf_wait_for_event(adapter, vport, state, err_check,
- IDPF_WAIT_FOR_EVENT_TIMEO);
+ while (1) {
+ /* This will get <= num_recv messages and output how many
+ * actually received on num_recv.
+ */
+ num_recv = 1;
+ err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
+ if (err || !num_recv)
+ break;
+
+ if (ctlq_msg.data_len) {
+ dma_mem = ctlq_msg.ctx.indirect.payload;
+ } else {
+ dma_mem = NULL;
+ num_recv = 0;
+ }
+
+ if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
+ idpf_recv_event_msg(adapter, &ctlq_msg);
+ else
+ err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
+
+ post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
+ adapter->hw.arq,
+ &num_recv, &dma_mem);
+
+ /* If post failed clear the only buffer we supplied */
+ if (post_err) {
+ if (dma_mem)
+ dmam_free_coherent(&adapter->pdev->dev,
+ dma_mem->size, dma_mem->va,
+ dma_mem->pa);
+ break;
+ }
+
+ /* virtchnl trying to shutdown, stop cleaning */
+ if (err == -ENXIO)
+ break;
+ }
+
+ return err;
}
/**
@@ -785,7 +776,11 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
*/
static int idpf_send_ver_msg(struct idpf_adapter *adapter)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_version_info vvi;
+ ssize_t reply_sz;
+ u32 major, minor;
+ int err = 0;
if (adapter->virt_ver_maj) {
vvi.major = cpu_to_le32(adapter->virt_ver_maj);
@@ -795,43 +790,29 @@ static int idpf_send_ver_msg(struct idpf_adapter *adapter)
vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
}
- return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi),
- (u8 *)&vvi);
-}
-
-/**
- * idpf_recv_ver_msg - Receive virtchnl version message
- * @adapter: Driver specific private structure
- *
- * Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need
- * to send version message again, otherwise negative on failure.
- */
-static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
-{
- struct virtchnl2_version_info vvi;
- u32 major, minor;
- int err;
+ xn_params.vc_op = VIRTCHNL2_OP_VERSION;
+ xn_params.send_buf.iov_base = &vvi;
+ xn_params.send_buf.iov_len = sizeof(vvi);
+ xn_params.recv_buf = xn_params.send_buf;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi,
- sizeof(vvi));
- if (err)
- return err;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(vvi))
+ return -EIO;
major = le32_to_cpu(vvi.major);
minor = le32_to_cpu(vvi.minor);
if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
- dev_warn(&adapter->pdev->dev,
- "Virtchnl major version (%d) greater than supported\n",
- major);
-
+ dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
return -EINVAL;
}
if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
minor > IDPF_VIRTCHNL_VERSION_MINOR)
- dev_warn(&adapter->pdev->dev,
- "Virtchnl minor version (%d) didn't match\n", minor);
+ dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
/* If we have a mismatch, resend version to update receiver on what
* version we will use.
@@ -856,7 +837,9 @@ static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
*/
static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
{
- struct virtchnl2_get_capabilities caps = { };
+ struct virtchnl2_get_capabilities caps = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
caps.csum_caps =
cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 |
@@ -913,21 +896,20 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_PROMISC |
VIRTCHNL2_CAP_LOOPBACK);
- return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps),
- (u8 *)&caps);
-}
+ xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
+ xn_params.send_buf.iov_base = &caps;
+ xn_params.send_buf.iov_len = sizeof(caps);
+ xn_params.recv_buf.iov_base = &adapter->caps;
+ xn_params.recv_buf.iov_len = sizeof(adapter->caps);
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
-/**
- * idpf_recv_get_caps_msg - Receive virtchnl get capabilities message
- * @adapter: Driver specific private structure
- *
- * Receive virtchnl get capabilities message. Returns 0 on success, negative on
- * failure.
- */
-static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter)
-{
- return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps,
- sizeof(struct virtchnl2_get_capabilities));
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(adapter->caps))
+ return -EIO;
+
+ return 0;
}
/**
@@ -1254,8 +1236,10 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q)
{
struct virtchnl2_create_vport *vport_msg;
+ struct idpf_vc_xn_params xn_params = {};
u16 idx = adapter->next_vport;
int err, buf_size;
+ ssize_t reply_sz;
buf_size = sizeof(struct virtchnl2_create_vport);
if (!adapter->vport_params_reqd[idx]) {
@@ -1286,35 +1270,38 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
return err;
}
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size,
- (u8 *)vport_msg);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT,
- IDPF_VC_CREATE_VPORT_ERR);
- if (err) {
- dev_err(&adapter->pdev->dev, "Failed to receive create vport message");
-
- goto rel_lock;
- }
-
if (!adapter->vport_params_recvd[idx]) {
adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
GFP_KERNEL);
if (!adapter->vport_params_recvd[idx]) {
err = -ENOMEM;
- goto rel_lock;
+ goto free_vport_params;
}
}
- vport_msg = adapter->vport_params_recvd[idx];
- memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+ xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
+ xn_params.send_buf.iov_base = vport_msg;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0) {
+ err = reply_sz;
+ goto free_vport_params;
+ }
+ if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) {
+ err = -EIO;
+ goto free_vport_params;
+ }
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
+ return 0;
+
+free_vport_params:
+ kfree(adapter->vport_params_recvd[idx]);
+ adapter->vport_params_recvd[idx] = NULL;
+ kfree(adapter->vport_params_reqd[idx]);
+ adapter->vport_params_reqd[idx] = NULL;
return err;
}
@@ -1366,26 +1353,19 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
*/
int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT,
- IDPF_VC_DESTROY_VPORT_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1397,26 +1377,19 @@ rel_lock:
*/
int idpf_send_enable_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT,
- IDPF_VC_ENA_VPORT_ERR);
+ xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1428,26 +1401,19 @@ rel_lock:
*/
int idpf_send_disable_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT,
- IDPF_VC_DIS_VPORT_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1459,11 +1425,13 @@ rel_lock:
*/
static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
{
- struct virtchnl2_config_tx_queues *ctq;
+ struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
+ struct virtchnl2_txq_info *qi __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks;
- struct virtchnl2_txq_info *qi;
- int err = 0, i, k = 0;
+ ssize_t reply_sz;
+ int i, k = 0;
totqs = vport->num_txq + vport->num_complq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
@@ -1524,10 +1492,8 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
}
/* Make sure accounting agrees */
- if (k != totqs) {
- err = -EINVAL;
- goto error;
- }
+ if (k != totqs)
+ return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large
@@ -1541,12 +1507,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks);
ctq = kzalloc(buf_sz, GFP_KERNEL);
- if (!ctq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!ctq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) {
memset(ctq, 0, buf_sz);
@@ -1554,17 +1519,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
ctq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(vport->adapter,
- VIRTCHNL2_OP_CONFIG_TX_QUEUES,
- buf_sz, (u8 *)ctq);
- if (err)
- goto mbx_error;
-
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_CONFIG_TXQ,
- IDPF_VC_CONFIG_TXQ_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = ctq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
totqs -= num_chunks;
@@ -1573,13 +1532,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(ctq);
-error:
- kfree(qi);
-
- return err;
+ return 0;
}
/**
@@ -1591,11 +1544,13 @@ error:
*/
static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
{
- struct virtchnl2_config_rx_queues *crq;
+ struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
+ struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks;
- struct virtchnl2_rxq_info *qi;
- int err = 0, i, k = 0;
+ ssize_t reply_sz;
+ int i, k = 0;
totqs = vport->num_rxq + vport->num_bufq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
@@ -1676,10 +1631,8 @@ common_qi_fields:
}
/* Make sure accounting agrees */
- if (k != totqs) {
- err = -EINVAL;
- goto error;
- }
+ if (k != totqs)
+ return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large
@@ -1693,12 +1646,11 @@ common_qi_fields:
buf_sz = struct_size(crq, qinfo, num_chunks);
crq = kzalloc(buf_sz, GFP_KERNEL);
- if (!crq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!crq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) {
memset(crq, 0, buf_sz);
@@ -1706,17 +1658,11 @@ common_qi_fields:
crq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(vport->adapter,
- VIRTCHNL2_OP_CONFIG_RX_QUEUES,
- buf_sz, (u8 *)crq);
- if (err)
- goto mbx_error;
-
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_CONFIG_RXQ,
- IDPF_VC_CONFIG_RXQ_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = crq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
totqs -= num_chunks;
@@ -1725,42 +1671,28 @@ common_qi_fields:
buf_sz = struct_size(crq, qinfo, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(crq);
-error:
- kfree(qi);
-
- return err;
+ return 0;
}
/**
* idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
* queues message
* @vport: virtual port data structure
- * @vc_op: virtchnl op code to send
+ * @ena: if true enable, false disable
*
* Send enable or disable queues virtchnl message. Returns 0 on success,
* negative on failure.
*/
-static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
+static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
{
+ struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
+ struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_queue_chunks *qcs;
- struct virtchnl2_queue_chunk *qc;
u32 config_sz, chunk_sz, buf_sz;
- int i, j, k = 0, err = 0;
-
- /* validate virtchnl op */
- switch (vc_op) {
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- break;
- default:
- return -EINVAL;
- }
+ ssize_t reply_sz;
+ int i, j, k = 0;
num_txq = vport->num_txq + vport->num_complq;
num_rxq = vport->num_rxq + vport->num_bufq;
@@ -1779,10 +1711,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
}
- if (vport->num_txq != k) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_txq != k)
+ return -EINVAL;
if (!idpf_is_queue_model_split(vport->txq_model))
goto setup_rx;
@@ -1794,10 +1724,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
- if (vport->num_complq != (k - vport->num_txq)) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_complq != (k - vport->num_txq))
+ return -EINVAL;
setup_rx:
for (i = 0; i < vport->num_rxq_grp; i++) {
@@ -1823,10 +1751,8 @@ setup_rx:
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
}
- if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
+ return -EINVAL;
if (!idpf_is_queue_model_split(vport->rxq_model))
goto send_msg;
@@ -1845,10 +1771,8 @@ setup_rx:
}
if (vport->num_bufq != k - (vport->num_txq +
vport->num_complq +
- vport->num_rxq)) {
- err = -EINVAL;
- goto error;
- }
+ vport->num_rxq))
+ return -EINVAL;
send_msg:
/* Chunk up the queue info into multiple messages */
@@ -1861,12 +1785,16 @@ send_msg:
buf_sz = struct_size(eq, chunks.chunks, num_chunks);
eq = kzalloc(buf_sz, GFP_KERNEL);
- if (!eq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!eq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ if (ena) {
+ xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ } else {
+ xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ }
for (i = 0, k = 0; i < num_msgs; i++) {
memset(eq, 0, buf_sz);
@@ -1875,20 +1803,11 @@ send_msg:
qcs = &eq->chunks;
memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq);
- if (err)
- goto mbx_error;
-
- if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES)
- err = idpf_wait_for_event(adapter, vport,
- IDPF_VC_ENA_QUEUES,
- IDPF_VC_ENA_QUEUES_ERR);
- else
- err = idpf_min_wait_for_event(adapter, vport,
- IDPF_VC_DIS_QUEUES,
- IDPF_VC_DIS_QUEUES_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = eq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
num_q -= num_chunks;
@@ -1897,13 +1816,7 @@ send_msg:
buf_sz = struct_size(eq, chunks.chunks, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(eq);
-error:
- kfree(qc);
-
- return err;
+ return 0;
}
/**
@@ -1917,12 +1830,13 @@ error:
*/
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_queue_vector_maps *vqvm;
- struct virtchnl2_queue_vector *vqv;
+ struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
+ struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
u32 num_msgs, num_chunks, num_q;
- int i, j, k = 0, err = 0;
+ ssize_t reply_sz;
+ int i, j, k = 0;
num_q = vport->num_txq + vport->num_rxq;
@@ -1952,10 +1866,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
}
- if (vport->num_txq != k) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_txq != k)
+ return -EINVAL;
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
@@ -1982,15 +1894,11 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
if (idpf_is_queue_model_split(vport->txq_model)) {
- if (vport->num_rxq != k - vport->num_complq) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - vport->num_complq)
+ return -EINVAL;
} else {
- if (vport->num_rxq != k - vport->num_txq) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - vport->num_txq)
+ return -EINVAL;
}
/* Chunk up the vector info into multiple messages */
@@ -2003,39 +1911,28 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks);
vqvm = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqvm) {
- err = -ENOMEM;
- goto error;
- }
+ if (!vqvm)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ if (map) {
+ xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ } else {
+ xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ }
for (i = 0, k = 0; i < num_msgs; i++) {
memset(vqvm, 0, buf_sz);
+ xn_params.send_buf.iov_base = vqvm;
+ xn_params.send_buf.iov_len = buf_sz;
vqvm->vport_id = cpu_to_le32(vport->vport_id);
vqvm->num_qv_maps = cpu_to_le16(num_chunks);
memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
- if (map) {
- err = idpf_send_mb_msg(adapter,
- VIRTCHNL2_OP_MAP_QUEUE_VECTOR,
- buf_sz, (u8 *)vqvm);
- if (!err)
- err = idpf_wait_for_event(adapter, vport,
- IDPF_VC_MAP_IRQ,
- IDPF_VC_MAP_IRQ_ERR);
- } else {
- err = idpf_send_mb_msg(adapter,
- VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
- buf_sz, (u8 *)vqvm);
- if (!err)
- err =
- idpf_min_wait_for_event(adapter, vport,
- IDPF_VC_UNMAP_IRQ,
- IDPF_VC_UNMAP_IRQ_ERR);
- }
- if (err)
- goto mbx_error;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
num_q -= num_chunks;
@@ -2044,13 +1941,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(vqvm);
-error:
- kfree(vqv);
-
- return err;
+ return 0;
}
/**
@@ -2062,7 +1953,7 @@ error:
*/
int idpf_send_enable_queues_msg(struct idpf_vport *vport)
{
- return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES);
+ return idpf_send_ena_dis_queues_msg(vport, true);
}
/**
@@ -2076,7 +1967,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
int err, i;
- err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES);
+ err = idpf_send_ena_dis_queues_msg(vport, false);
if (err)
return err;
@@ -2087,8 +1978,10 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
/* schedule the napi to receive all the marker packets */
+ local_bh_disable();
for (i = 0; i < vport->num_q_vectors; i++)
napi_schedule(&vport->q_vectors[i].napi);
+ local_bh_enable();
return idpf_wait_for_marker_event(vport);
}
@@ -2122,22 +2015,21 @@ static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchun
*/
int idpf_send_delete_queues_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
struct virtchnl2_create_vport *vport_params;
struct virtchnl2_queue_reg_chunks *chunks;
- struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
u16 vport_idx = vport->idx;
- int buf_size, err;
+ ssize_t reply_sz;
u16 num_chunks;
+ int buf_size;
- vport_config = adapter->vport_config[vport_idx];
+ vport_config = vport->adapter->vport_config[vport_idx];
if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
+ chunks = &vport_config->req_qs_chunks->chunks;
} else {
- vport_params = adapter->vport_params_recvd[vport_idx];
+ vport_params = vport->adapter->vport_params_recvd[vport_idx];
chunks = &vport_params->chunks;
}
@@ -2154,21 +2046,13 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport)
idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
num_chunks);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES,
- buf_size, (u8 *)eq);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES,
- IDPF_VC_DEL_QUEUES_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(eq);
+ xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = eq;
+ xn_params.send_buf.iov_len = buf_size;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2203,14 +2087,21 @@ int idpf_send_config_queues_msg(struct idpf_vport *vport)
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
- struct virtchnl2_add_queues aq = { };
- struct virtchnl2_add_queues *vc_msg;
+ struct virtchnl2_add_queues aq = {};
u16 vport_idx = vport->idx;
- int size, err;
+ ssize_t reply_sz;
+ int size;
+
+ vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!vc_msg)
+ return -ENOMEM;
- vport_config = adapter->vport_config[vport_idx];
+ vport_config = vport->adapter->vport_config[vport_idx];
+ kfree(vport_config->req_qs_chunks);
+ vport_config->req_qs_chunks = NULL;
aq.vport_id = cpu_to_le32(vport->vport_id);
aq.num_tx_q = cpu_to_le16(num_tx_q);
@@ -2218,47 +2109,33 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
aq.num_rx_q = cpu_to_le16(num_rx_q);
aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
- mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES,
- sizeof(struct virtchnl2_add_queues), (u8 *)&aq);
- if (err)
- goto rel_lock;
-
- /* We want vport to be const to prevent incidental code changes making
- * changes to the vport config. We're making a special exception here
- * to discard const to use the virtchnl.
- */
- err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport,
- IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR);
- if (err)
- goto rel_lock;
-
- kfree(vport_config->req_qs_chunks);
- vport_config->req_qs_chunks = NULL;
+ xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &aq;
+ xn_params.send_buf.iov_len = sizeof(aq);
+ xn_params.recv_buf.iov_base = vc_msg;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg;
/* compare vc_msg num queues with vport num queues */
if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
- le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) {
- err = -EINVAL;
- goto rel_lock;
- }
+ le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
+ return -EINVAL;
size = struct_size(vc_msg, chunks.chunks,
le16_to_cpu(vc_msg->chunks.num_chunks));
- vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
- if (!vport_config->req_qs_chunks) {
- err = -ENOMEM;
- goto rel_lock;
- }
+ if (reply_sz < size)
+ return -EIO;
-rel_lock:
- mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock);
+ vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
+ if (!vport_config->req_qs_chunks)
+ return -ENOMEM;
- return err;
+ return 0;
}
/**
@@ -2270,53 +2147,49 @@ rel_lock:
*/
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
{
- struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec;
- struct virtchnl2_alloc_vectors ac = { };
+ struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
+ struct virtchnl2_alloc_vectors ac = {};
+ ssize_t reply_sz;
u16 num_vchunks;
- int size, err;
+ int size;
ac.num_vectors = cpu_to_le16(num_vectors);
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS,
- sizeof(ac), (u8 *)&ac);
- if (err)
- goto rel_lock;
+ rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcvd_vec)
+ return -ENOMEM;
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS,
- IDPF_VC_ALLOC_VECTORS_ERR);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
+ xn_params.send_buf.iov_base = &ac;
+ xn_params.send_buf.iov_len = sizeof(ac);
+ xn_params.recv_buf.iov_base = rcvd_vec;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg;
num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
-
size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
- if (size > sizeof(adapter->vc_msg)) {
- err = -EINVAL;
- goto rel_lock;
- }
+ if (reply_sz < size)
+ return -EIO;
+
+ if (size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
kfree(adapter->req_vec_chunks);
- adapter->req_vec_chunks = NULL;
- adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL);
- if (!adapter->req_vec_chunks) {
- err = -ENOMEM;
- goto rel_lock;
- }
+ adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
+ if (!adapter->req_vec_chunks)
+ return -ENOMEM;
- alloc_vec = adapter->req_vec_chunks;
- if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) {
+ if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL;
- err = -EINVAL;
+ return -EINVAL;
}
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2329,29 +2202,24 @@ int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
{
struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
- int buf_size, err;
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+ int buf_size;
buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size,
- (u8 *)vcs);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS,
- IDPF_VC_DEALLOC_VECTORS_ERR);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
+ xn_params.send_buf.iov_base = vcs;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL;
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2374,25 +2242,18 @@ static int idpf_get_max_vfs(struct idpf_adapter *adapter)
*/
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
{
- struct virtchnl2_sriov_vfs_info svi = { };
- int err;
+ struct virtchnl2_sriov_vfs_info svi = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
svi.num_vfs = cpu_to_le16(num_vfs);
+ xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &svi;
+ xn_params.send_buf.iov_len = sizeof(svi);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS,
- sizeof(svi), (u8 *)&svi);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS,
- IDPF_VC_SET_SRIOV_VFS_ERR);
-
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2405,10 +2266,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct rtnl_link_stats64 *netstats = &np->netstats;
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_vport_stats stats_msg = { };
- struct virtchnl2_vport_stats *stats;
- int err;
+ struct virtchnl2_vport_stats stats_msg = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+
/* Don't send get_stats message if the link is down */
if (np->state <= __IDPF_VPORT_DOWN)
@@ -2416,46 +2277,38 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
stats_msg.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
+ xn_params.send_buf.iov_base = &stats_msg;
+ xn_params.send_buf.iov_len = sizeof(stats_msg);
+ xn_params.recv_buf = xn_params.send_buf;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS,
- sizeof(struct virtchnl2_vport_stats),
- (u8 *)&stats_msg);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS,
- IDPF_VC_GET_STATS_ERR);
- if (err)
- goto rel_lock;
-
- stats = (struct virtchnl2_vport_stats *)vport->vc_msg;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(stats_msg))
+ return -EIO;
spin_lock_bh(&np->stats_lock);
- netstats->rx_packets = le64_to_cpu(stats->rx_unicast) +
- le64_to_cpu(stats->rx_multicast) +
- le64_to_cpu(stats->rx_broadcast);
- netstats->rx_bytes = le64_to_cpu(stats->rx_bytes);
- netstats->rx_dropped = le64_to_cpu(stats->rx_discards);
- netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop);
- netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length);
-
- netstats->tx_packets = le64_to_cpu(stats->tx_unicast) +
- le64_to_cpu(stats->tx_multicast) +
- le64_to_cpu(stats->tx_broadcast);
- netstats->tx_bytes = le64_to_cpu(stats->tx_bytes);
- netstats->tx_errors = le64_to_cpu(stats->tx_errors);
- netstats->tx_dropped = le64_to_cpu(stats->tx_discards);
-
- vport->port_stats.vport_stats = *stats;
+ netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
+ le64_to_cpu(stats_msg.rx_multicast) +
+ le64_to_cpu(stats_msg.rx_broadcast);
+ netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
+ le64_to_cpu(stats_msg.tx_multicast) +
+ le64_to_cpu(stats_msg.tx_broadcast);
+ netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
+ netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
+ netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
+ netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
+ netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
+ netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
+
+ vport->port_stats.vport_stats = stats_msg;
spin_unlock_bh(&np->stats_lock);
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2467,70 +2320,70 @@ rel_lock:
*/
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_rss_lut *recv_rl;
+ struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
+ struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data;
- struct virtchnl2_rss_lut *rl;
int buf_size, lut_buf_size;
- int i, err;
+ ssize_t reply_sz;
+ int i;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ rss_data =
+ &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
rl = kzalloc(buf_size, GFP_KERNEL);
if (!rl)
return -ENOMEM;
rl->vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
- if (!get) {
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = rl;
+ xn_params.send_buf.iov_len = buf_size;
+
+ if (get) {
+ recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!recv_rl)
+ return -ENOMEM;
+ xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
+ xn_params.recv_buf.iov_base = recv_rl;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ } else {
rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
for (i = 0; i < rss_data->rss_lut_size; i++)
rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT,
- buf_size, (u8 *)rl);
- if (err)
- goto free_mem;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT,
- IDPF_VC_SET_RSS_LUT_ERR);
-
- goto free_mem;
+ xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
}
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (!get)
+ return 0;
+ if (reply_sz < sizeof(struct virtchnl2_rss_lut))
+ return -EIO;
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT,
- buf_size, (u8 *)rl);
- if (err)
- goto free_mem;
+ lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
+ if (reply_sz < lut_buf_size)
+ return -EIO;
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT,
- IDPF_VC_GET_RSS_LUT_ERR);
- if (err)
- goto free_mem;
-
- recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg;
+ /* size didn't change, we can reuse existing lut buf */
if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
goto do_memcpy;
rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
kfree(rss_data->rss_lut);
- lut_buf_size = rss_data->rss_lut_size * sizeof(u32);
rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
if (!rss_data->rss_lut) {
rss_data->rss_lut_size = 0;
- err = -ENOMEM;
- goto free_mem;
+ return -ENOMEM;
}
do_memcpy:
- memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size);
-free_mem:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(rl);
+ memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
- return err;
+ return 0;
}
/**
@@ -2542,68 +2395,70 @@ free_mem:
*/
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_rss_key *recv_rk;
+ struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
+ struct virtchnl2_rss_key *rk __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data;
- struct virtchnl2_rss_key *rk;
- int i, buf_size, err;
+ ssize_t reply_sz;
+ int i, buf_size;
+ u16 key_size;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ rss_data =
+ &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
rk = kzalloc(buf_size, GFP_KERNEL);
if (!rk)
return -ENOMEM;
rk->vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
-
+ xn_params.send_buf.iov_base = rk;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
if (get) {
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY,
- buf_size, (u8 *)rk);
- if (err)
- goto error;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY,
- IDPF_VC_GET_RSS_KEY_ERR);
- if (err)
- goto error;
-
- recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg;
- if (rss_data->rss_key_size !=
- le16_to_cpu(recv_rk->key_len)) {
- rss_data->rss_key_size =
- min_t(u16, NETDEV_RSS_KEY_LEN,
- le16_to_cpu(recv_rk->key_len));
- kfree(rss_data->rss_key);
- rss_data->rss_key = kzalloc(rss_data->rss_key_size,
- GFP_KERNEL);
- if (!rss_data->rss_key) {
- rss_data->rss_key_size = 0;
- err = -ENOMEM;
- goto error;
- }
- }
- memcpy(rss_data->rss_key, recv_rk->key_flex,
- rss_data->rss_key_size);
+ recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!recv_rk)
+ return -ENOMEM;
+
+ xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
+ xn_params.recv_buf.iov_base = recv_rk;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
} else {
rk->key_len = cpu_to_le16(rss_data->rss_key_size);
for (i = 0; i < rss_data->rss_key_size; i++)
rk->key_flex[i] = rss_data->rss_key[i];
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY,
- buf_size, (u8 *)rk);
- if (err)
- goto error;
+ xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
+ }
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (!get)
+ return 0;
+ if (reply_sz < sizeof(struct virtchnl2_rss_key))
+ return -EIO;
+
+ key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
+ le16_to_cpu(recv_rk->key_len));
+ if (reply_sz < key_size)
+ return -EIO;
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY,
- IDPF_VC_SET_RSS_KEY_ERR);
+ /* key len didn't change, reuse existing buf */
+ if (rss_data->rss_key_size == key_size)
+ goto do_memcpy;
+
+ rss_data->rss_key_size = key_size;
+ kfree(rss_data->rss_key);
+ rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
+ if (!rss_data->rss_key) {
+ rss_data->rss_key_size = 0;
+ return -ENOMEM;
}
-error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(rk);
+do_memcpy:
+ memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
- return err;
+ return 0;
}
/**
@@ -2655,13 +2510,15 @@ static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
*/
int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{
+ struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
+ struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
- struct virtchnl2_get_ptype_info get_ptype_info;
int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_get_ptype_info *ptype_info;
+ struct idpf_vc_xn_params xn_params = {};
u16 next_ptype_id = 0;
- int err = 0, i, j, k;
+ ssize_t reply_sz;
+ int i, j, k;
if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE;
@@ -2670,43 +2527,44 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+ get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
+ if (!get_ptype_info)
+ return -ENOMEM;
+
ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
if (!ptype_info)
return -ENOMEM;
- mutex_lock(&adapter->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
+ xn_params.send_buf.iov_base = get_ptype_info;
+ xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
+ xn_params.recv_buf.iov_base = ptype_info;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
while (next_ptype_id < max_ptype) {
- get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id);
+ get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
- get_ptype_info.num_ptypes =
+ get_ptype_info->num_ptypes =
cpu_to_le16(max_ptype - next_ptype_id);
else
- get_ptype_info.num_ptypes =
+ get_ptype_info->num_ptypes =
cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
- sizeof(struct virtchnl2_get_ptype_info),
- (u8 *)&get_ptype_info);
- if (err)
- goto vc_buf_unlock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO,
- IDPF_VC_GET_PTYPE_INFO_ERR);
- if (err)
- goto vc_buf_unlock;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+ if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN)
+ return -EIO;
ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
- if (ptypes_recvd > max_ptype) {
- err = -EINVAL;
- goto vc_buf_unlock;
- }
+ if (ptypes_recvd > max_ptype)
+ return -EINVAL;
- next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) +
- le16_to_cpu(get_ptype_info.num_ptypes);
+ next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
+ le16_to_cpu(get_ptype_info->num_ptypes);
ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
@@ -2719,17 +2577,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
((u8 *)ptype_info + ptype_offset);
ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
- if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) {
- err = -EINVAL;
- goto vc_buf_unlock;
- }
+ if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
/* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) ==
- IDPF_INVALID_PTYPE_ID) {
- err = 0;
- goto vc_buf_unlock;
- }
+ IDPF_INVALID_PTYPE_ID)
+ return 0;
if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10);
@@ -2857,11 +2711,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
}
}
-vc_buf_unlock:
- mutex_unlock(&adapter->vc_buf_lock);
- kfree(ptype_info);
-
- return err;
+ return 0;
}
/**
@@ -2873,27 +2723,20 @@ vc_buf_unlock:
*/
int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_loopback loopback;
- int err;
+ ssize_t reply_sz;
loopback.vport_id = cpu_to_le32(vport->vport_id);
loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK,
- sizeof(loopback), (u8 *)&loopback);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &loopback;
+ xn_params.send_buf.iov_len = sizeof(loopback);
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_LOOPBACK_STATE,
- IDPF_VC_LOOPBACK_STATE_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2958,7 +2801,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
return -ENOENT;
}
- adapter->state = __IDPF_STARTUP;
+ adapter->state = __IDPF_VER_CHECK;
return 0;
}
@@ -3055,35 +2898,42 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
u16 num_max_vports;
int err = 0;
+ if (!adapter->vcxn_mngr) {
+ adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);
+ if (!adapter->vcxn_mngr) {
+ err = -ENOMEM;
+ goto init_failed;
+ }
+ }
+ idpf_vc_xn_init(adapter->vcxn_mngr);
+
while (adapter->state != __IDPF_INIT_SW) {
switch (adapter->state) {
- case __IDPF_STARTUP:
- if (idpf_send_ver_msg(adapter))
- goto init_failed;
- adapter->state = __IDPF_VER_CHECK;
- goto restart;
case __IDPF_VER_CHECK:
- err = idpf_recv_ver_msg(adapter);
- if (err == -EIO) {
- return err;
- } else if (err == -EAGAIN) {
- adapter->state = __IDPF_STARTUP;
+ err = idpf_send_ver_msg(adapter);
+ switch (err) {
+ case 0:
+ /* success, move state machine forward */
+ adapter->state = __IDPF_GET_CAPS;
+ fallthrough;
+ case -EAGAIN:
goto restart;
- } else if (err) {
+ default:
+ /* Something bad happened, try again but only a
+ * few times.
+ */
goto init_failed;
}
- if (idpf_send_get_caps_msg(adapter))
- goto init_failed;
- adapter->state = __IDPF_GET_CAPS;
- goto restart;
case __IDPF_GET_CAPS:
- if (idpf_recv_get_caps_msg(adapter))
+ err = idpf_send_get_caps_msg(adapter);
+ if (err)
goto init_failed;
adapter->state = __IDPF_INIT_SW;
break;
default:
dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
adapter->state);
+ err = -EINVAL;
goto init_failed;
}
break;
@@ -3142,7 +2992,9 @@ restart:
queue_delayed_work(adapter->init_wq, &adapter->init_task,
msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
- goto no_err;
+ set_bit(IDPF_VC_CORE_INIT, adapter->flags);
+
+ return 0;
err_intr_req:
cancel_delayed_work_sync(&adapter->serv_task);
@@ -3151,7 +3003,6 @@ err_intr_req:
err_netdev_alloc:
kfree(adapter->vports);
adapter->vports = NULL;
-no_err:
return err;
init_failed:
@@ -3168,7 +3019,9 @@ init_failed:
* register writes might not have taken effect. Retry to initialize
* the mailbox again
*/
- adapter->state = __IDPF_STARTUP;
+ adapter->state = __IDPF_VER_CHECK;
+ if (adapter->vcxn_mngr)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_dflt_mbx(adapter);
set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
@@ -3184,29 +3037,22 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
- int i;
+ if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
+ return;
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_task(adapter);
idpf_intr_rel(adapter);
- /* Set all bits as we dont know on which vc_state the vhnl_wq is
- * waiting on and wakeup the virtchnl workqueue even if it is waiting
- * for the response as we are going down
- */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- set_bit(i, adapter->vc_state);
- wake_up(&adapter->vchnl_wq);
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
idpf_vport_params_buf_rel(adapter);
- /* Clear all the bits */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- clear_bit(i, adapter->vc_state);
-
kfree(adapter->vports);
adapter->vports = NULL;
+
+ clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
}
/**
@@ -3622,6 +3468,75 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
}
/**
+ * idpf_mac_filter_async_handler - Async callback for mac filters
+ * @adapter: private data struct
+ * @xn: transaction for message
+ * @ctlq_msg: received message
+ *
+ * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
+ * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
+ * situation to deal with errors returned on the reply. The best we can
+ * ultimately do is remove it from our list of mac filters and report the
+ * error.
+ */
+static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
+ struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ struct virtchnl2_mac_addr_list *ma_list;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_mac_addr *mac_addr;
+ struct idpf_mac_filter *f, *tmp;
+ struct list_head *ma_list_head;
+ struct idpf_vport *vport;
+ u16 num_entries;
+ int i;
+
+ /* if success we're done, we're only here if something bad happened */
+ if (!ctlq_msg->cookie.mbx.chnl_retval)
+ return 0;
+
+ /* make sure at least struct is there */
+ if (xn->reply_sz < sizeof(*ma_list))
+ goto invalid_payload;
+
+ ma_list = ctlq_msg->ctx.indirect.payload->va;
+ mac_addr = ma_list->mac_addr_list;
+ num_entries = le16_to_cpu(ma_list->num_mac_addr);
+ /* we should have received a buffer at least this big */
+ if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
+ goto invalid_payload;
+
+ vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
+ if (!vport)
+ goto invalid_payload;
+
+ vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
+ ma_list_head = &vport_config->user_config.mac_filter_list;
+
+ /* We can't do much to reconcile bad filters at this point, however we
+ * should at least remove them from our list one way or the other so we
+ * have some idea what good filters we have.
+ */
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+ list_for_each_entry_safe(f, tmp, ma_list_head, list)
+ for (i = 0; i < num_entries; i++)
+ if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
+ list_del(&f->list);
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+ dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
+ xn->vc_op);
+
+ return 0;
+
+invalid_payload:
+ dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
+ xn->vc_op, xn->reply_sz);
+
+ return -EINVAL;
+}
+
+/**
* idpf_add_del_mac_filters - Add/del mac filters
* @vport: Virtual port data structure
* @np: Netdev private structure
@@ -3634,17 +3549,21 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
struct idpf_netdev_priv *np,
bool add, bool async)
{
- struct virtchnl2_mac_addr_list *ma_list = NULL;
+ struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
+ struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
struct idpf_adapter *adapter = np->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
- enum idpf_vport_config_flags mac_flag;
- struct pci_dev *pdev = adapter->pdev;
- enum idpf_vport_vc_state vc, vc_err;
- struct virtchnl2_mac_addr *mac_addr;
- struct idpf_mac_filter *f, *tmp;
u32 num_msgs, total_filters = 0;
- int i = 0, k, err = 0;
- u32 vop;
+ struct idpf_mac_filter *f;
+ ssize_t reply_sz;
+ int i = 0, k;
+
+ xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
+ VIRTCHNL2_OP_DEL_MAC_ADDR;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.async = async;
+ xn_params.async_handler = idpf_mac_filter_async_handler;
vport_config = adapter->vport_config[np->vport_idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
@@ -3668,13 +3587,13 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
GFP_ATOMIC);
if (!mac_addr) {
- err = -ENOMEM;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- goto error;
+
+ return -ENOMEM;
}
- list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list,
- list) {
+ list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
+ list) {
if (add && f->add) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
i++;
@@ -3693,26 +3612,11 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- if (add) {
- vop = VIRTCHNL2_OP_ADD_MAC_ADDR;
- vc = IDPF_VC_ADD_MAC_ADDR;
- vc_err = IDPF_VC_ADD_MAC_ADDR_ERR;
- mac_flag = IDPF_VPORT_ADD_MAC_REQ;
- } else {
- vop = VIRTCHNL2_OP_DEL_MAC_ADDR;
- vc = IDPF_VC_DEL_MAC_ADDR;
- vc_err = IDPF_VC_DEL_MAC_ADDR_ERR;
- mac_flag = IDPF_VPORT_DEL_MAC_REQ;
- }
-
/* Chunk up the filters into multiple messages to avoid
* sending a control queue message buffer that is too large
*/
num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
- if (!async)
- mutex_lock(&vport->vc_buf_lock);
-
for (i = 0, k = 0; i < num_msgs; i++) {
u32 entries_size, buf_size, num_entries;
@@ -3724,10 +3628,8 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
kfree(ma_list);
ma_list = kzalloc(buf_size, GFP_ATOMIC);
- if (!ma_list) {
- err = -ENOMEM;
- goto list_prep_error;
- }
+ if (!ma_list)
+ return -ENOMEM;
} else {
memset(ma_list, 0, buf_size);
}
@@ -3736,34 +3638,17 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
ma_list->num_mac_addr = cpu_to_le16(num_entries);
memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
- if (async)
- set_bit(mac_flag, vport_config->flags);
-
- err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list);
- if (err)
- goto mbx_error;
-
- if (!async) {
- err = idpf_wait_for_event(adapter, vport, vc, vc_err);
- if (err)
- goto mbx_error;
- }
+ xn_params.send_buf.iov_base = ma_list;
+ xn_params.send_buf.iov_len = buf_size;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_entries;
total_filters -= num_entries;
}
-mbx_error:
- if (!async)
- mutex_unlock(&vport->vc_buf_lock);
- kfree(ma_list);
-list_prep_error:
- kfree(mac_addr);
-error:
- if (err)
- dev_err(&pdev->dev, "Failed to add or del mac filters %d", err);
-
- return err;
+ return 0;
}
/**
@@ -3780,9 +3665,10 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
struct idpf_vport_user_config_data *config_data,
u32 vport_id)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_promisc_info vpi;
+ ssize_t reply_sz;
u16 flags = 0;
- int err;
if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
flags |= VIRTCHNL2_UNICAST_PROMISC;
@@ -3792,9 +3678,13 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
vpi.vport_id = cpu_to_le32(vport_id);
vpi.flags = cpu_to_le16(flags);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE,
- sizeof(struct virtchnl2_promisc_info),
- (u8 *)&vpi);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &vpi;
+ xn_params.send_buf.iov_len = sizeof(vpi);
+ /* setting promiscuous is only ever done asynchronously */
+ xn_params.async = true;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
new file mode 100644
index 000000000000..83da5d8da56b
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _IDPF_VIRTCHNL_H_
+#define _IDPF_VIRTCHNL_H_
+
+struct idpf_adapter;
+struct idpf_netdev_priv;
+struct idpf_vec_regs;
+struct idpf_vport;
+struct idpf_vport_max_q;
+struct idpf_vport_user_config_data;
+
+int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
+void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
+int idpf_vc_core_init(struct idpf_adapter *adapter);
+void idpf_vc_core_deinit(struct idpf_adapter *adapter);
+
+int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+ struct idpf_vec_regs *reg_vals);
+int idpf_queue_reg_init(struct idpf_vport *vport);
+int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+
+int idpf_recv_mb_msg(struct idpf_adapter *adapter);
+int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
+ u16 msg_size, u8 *msg, u16 cookie);
+
+void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
+u32 idpf_get_vport_id(struct idpf_vport *vport);
+int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
+int idpf_send_enable_vport_msg(struct idpf_vport *vport);
+int idpf_send_disable_vport_msg(struct idpf_vport *vport);
+
+int idpf_vport_adjust_qs(struct idpf_vport *vport);
+int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
+ u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
+int idpf_send_delete_queues_msg(struct idpf_vport *vport);
+int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+int idpf_send_disable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_vport *vport);
+
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
+int idpf_get_vec_ids(struct idpf_adapter *adapter,
+ u16 *vecids, int num_vecids,
+ struct virtchnl2_vector_chunks *chunks);
+int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
+int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
+
+int idpf_add_del_mac_filters(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ bool add, bool async);
+int idpf_set_promiscuous(struct idpf_adapter *adapter,
+ struct idpf_vport_user_config_data *config_data,
+ u32 vport_id);
+int idpf_check_supported_desc_ids(struct idpf_vport *vport);
+int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
+int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
+int idpf_send_get_stats_msg(struct idpf_vport *vport);
+int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
+int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
+int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+
+#endif /* _IDPF_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index b66199c9bb3a..99977a22b843 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -3027,7 +3027,7 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
-static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int igb_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -3038,11 +3038,13 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
(hw->phy.media_type != e1000_media_type_copper))
return -EOPNOTSUPP;
- edata->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->supported);
if (!hw->dev_spec._82575.eee_disable)
- edata->advertised =
- mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised,
+ adapter->eee_advert);
/* The IPCNFG and EEER registers are not supported on I354. */
if (hw->mac.type == e1000_i354) {
@@ -3068,7 +3070,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (ret_val)
return -ENODATA;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
break;
case e1000_i354:
case e1000_i210:
@@ -3079,7 +3081,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (ret_val)
return -ENODATA;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
break;
default:
@@ -3099,18 +3101,20 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
edata->eee_enabled = false;
edata->eee_active = false;
edata->tx_lpi_enabled = false;
- edata->advertised &= ~edata->advertised;
+ linkmode_zero(edata->advertised);
}
return 0;
}
static int igb_set_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct e1000_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
bool adv1g_eee = true, adv100m_eee = true;
s32 ret_val;
@@ -3118,7 +3122,7 @@ static int igb_set_eee(struct net_device *netdev,
(hw->phy.media_type != e1000_media_type_copper))
return -EOPNOTSUPP;
- memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+ memset(&eee_curr, 0, sizeof(struct ethtool_keee));
ret_val = igb_get_eee(netdev, &eee_curr);
if (ret_val)
@@ -3138,14 +3142,21 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- if (!edata->advertised || (edata->advertised &
- ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+ if (linkmode_andnot(tmp, edata->advertised, supported)) {
dev_err(&adapter->pdev->dev,
"EEE Advertisement supports only 100Tx and/or 100T full duplex\n");
return -EINVAL;
}
- adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL);
- adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL);
+ adv100m_eee = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->advertised);
+ adv1g_eee = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised);
} else if (!edata->eee_enabled) {
dev_err(&adapter->pdev->dev,
@@ -3153,7 +3164,7 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
adapter->flags |= IGB_FLAG_EEE;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index cebb44f51d5f..a3f100769e39 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -202,7 +202,7 @@ static struct notifier_block dca_notifier = {
#endif
#ifdef CONFIG_PCI_IOV
static unsigned int max_vfs;
-module_param(max_vfs, uint, 0);
+module_param(max_vfs, uint, 0444);
MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
@@ -2538,7 +2538,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -6985,44 +6985,31 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
static void igb_tsync_interrupt(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 ack = 0, tsicr = rd32(E1000_TSICR);
+ u32 tsicr = rd32(E1000_TSICR);
struct ptp_clock_event event;
if (tsicr & TSINTR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= TSINTR_SYS_WRAP;
}
if (tsicr & E1000_TSICR_TXTS) {
/* retrieve hardware timestamp */
schedule_work(&adapter->ptp_tx_work);
- ack |= E1000_TSICR_TXTS;
}
- if (tsicr & TSINTR_TT0) {
+ if (tsicr & TSINTR_TT0)
igb_perout(adapter, 0);
- ack |= TSINTR_TT0;
- }
- if (tsicr & TSINTR_TT1) {
+ if (tsicr & TSINTR_TT1)
igb_perout(adapter, 1);
- ack |= TSINTR_TT1;
- }
- if (tsicr & TSINTR_AUTT0) {
+ if (tsicr & TSINTR_AUTT0)
igb_extts(adapter, 0);
- ack |= TSINTR_AUTT0;
- }
- if (tsicr & TSINTR_AUTT1) {
+ if (tsicr & TSINTR_AUTT1)
igb_extts(adapter, 1);
- ack |= TSINTR_AUTT1;
- }
-
- /* acknowledge the interrupts */
- wr32(E1000_TSICR, ack);
}
static irqreturn_t igb_msix_other(int irq, void *data)
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 319c544b9f04..f94570556120 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -957,7 +957,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
/* adjust timestamp for the TX latency based on link speed */
- if (adapter->hw.mac.type == e1000_i210) {
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_TX_LATENCY_10;
@@ -1003,6 +1003,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
ktime_t *timestamp)
{
struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps ts;
__le64 *regval = (__le64 *)va;
int adjust = 0;
@@ -1022,7 +1023,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
/* adjust timestamp for the RX latency based on link speed */
- if (adapter->hw.mac.type == e1000_i210) {
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_RX_LATENCY_10;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index a4d4f00e6a87..b0cf310e6f7b 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2655,7 +2655,7 @@ igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 95d1e8c490a4..ebffd3054285 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -6,6 +6,7 @@
#
obj-$(CONFIG_IGC) += igc.o
+igc-$(CONFIG_IGC_LEDS) += igc_leds.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 45430e246e9c..90316dc58630 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -168,7 +168,7 @@ struct igc_ring {
struct igc_adapter {
struct net_device *netdev;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
u16 eee_advert;
unsigned long state;
@@ -295,6 +295,9 @@ struct igc_adapter {
struct timespec64 start;
struct timespec64 period;
} perout[IGC_N_PEROUT];
+
+ /* LEDs */
+ struct mutex led_mutex;
};
void igc_up(struct igc_adapter *adapter);
@@ -567,7 +570,6 @@ struct igc_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
- struct net_device poll_dev;
/* for dynamic allocation of rings associated with this q_vector */
struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
@@ -585,7 +587,7 @@ enum igc_filter_match_flags {
struct igc_nfc_filter {
u8 match_flags;
u16 etype;
- __be16 vlan_etype;
+ u16 vlan_etype;
u16 vlan_tci;
u16 vlan_tci_mask;
u8 src_addr[ETH_ALEN];
@@ -720,6 +722,8 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter);
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
+int igc_led_setup(struct igc_adapter *adapter);
+
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index b95d2c86e803..1a64f1ca6ca8 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -981,7 +981,7 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
fsp->flow_type |= FLOW_EXT;
- fsp->h_ext.vlan_etype = rule->filter.vlan_etype;
+ fsp->h_ext.vlan_etype = htons(rule->filter.vlan_etype);
fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK;
}
@@ -1249,7 +1249,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
/* VLAN etype matching */
if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) {
- rule->filter.vlan_etype = fsp->h_ext.vlan_etype;
+ rule->filter.vlan_etype = ntohs(fsp->h_ext.vlan_etype);
rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE;
}
@@ -1623,18 +1623,17 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static int igc_ethtool_get_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
u32 eeer;
if (hw->dev_spec._base.eee_enable)
- edata->advertised =
- mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised,
+ adapter->eee_advert);
*edata = adapter->eee;
- edata->supported = SUPPORTED_Autoneg;
eeer = rd32(IGC_EEER);
@@ -1647,9 +1646,6 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
edata->eee_enabled = hw->dev_spec._base.eee_enable;
- edata->advertised = SUPPORTED_Autoneg;
- edata->lp_advertised = SUPPORTED_Autoneg;
-
/* Report correct negotiated EEE status for devices that
* wrongly report EEE at half-duplex
*/
@@ -1657,21 +1653,21 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
edata->eee_enabled = false;
edata->eee_active = false;
edata->tx_lpi_enabled = false;
- edata->advertised &= ~edata->advertised;
+ linkmode_zero(edata->advertised);
}
return 0;
}
static int igc_ethtool_set_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
s32 ret_val;
- memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+ memset(&eee_curr, 0, sizeof(struct ethtool_keee));
ret_val = igc_ethtool_get_eee(netdev, &eee_curr);
if (ret_val) {
@@ -1699,7 +1695,8 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
+
if (hw->dev_spec._base.eee_enable != edata->eee_enabled) {
hw->dev_spec._base.eee_enable = edata->eee_enabled;
adapter->flags |= IGC_FLAG_EEE;
diff --git a/drivers/net/ethernet/intel/igc/igc_leds.c b/drivers/net/ethernet/intel/igc/igc_leds.c
new file mode 100644
index 000000000000..bf240c5daf86
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_leds.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Linutronix GmbH */
+
+#include <linux/bits.h>
+#include <linux/leds.h>
+#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
+#include <uapi/linux/uleds.h>
+
+#include "igc.h"
+
+#define IGC_NUM_LEDS 3
+
+#define IGC_LEDCTL_LED0_MODE_SHIFT 0
+#define IGC_LEDCTL_LED0_MODE_MASK GENMASK(3, 0)
+#define IGC_LEDCTL_LED0_BLINK BIT(7)
+#define IGC_LEDCTL_LED1_MODE_SHIFT 8
+#define IGC_LEDCTL_LED1_MODE_MASK GENMASK(11, 8)
+#define IGC_LEDCTL_LED1_BLINK BIT(15)
+#define IGC_LEDCTL_LED2_MODE_SHIFT 16
+#define IGC_LEDCTL_LED2_MODE_MASK GENMASK(19, 16)
+#define IGC_LEDCTL_LED2_BLINK BIT(23)
+
+#define IGC_LEDCTL_MODE_ON 0x00
+#define IGC_LEDCTL_MODE_OFF 0x01
+#define IGC_LEDCTL_MODE_LINK_10 0x05
+#define IGC_LEDCTL_MODE_LINK_100 0x06
+#define IGC_LEDCTL_MODE_LINK_1000 0x07
+#define IGC_LEDCTL_MODE_LINK_2500 0x08
+#define IGC_LEDCTL_MODE_ACTIVITY 0x0b
+
+#define IGC_SUPPORTED_MODES \
+ (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK_1000) | \
+ BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_10) | \
+ BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX))
+
+#define IGC_ACTIVITY_MODES \
+ (BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX))
+
+struct igc_led_classdev {
+ struct net_device *netdev;
+ struct led_classdev led;
+ int index;
+};
+
+#define lcdev_to_igc_ldev(lcdev) \
+ container_of(lcdev, struct igc_led_classdev, led)
+
+static void igc_led_select(struct igc_adapter *adapter, int led,
+ u32 *mask, u32 *shift, u32 *blink)
+{
+ switch (led) {
+ case 0:
+ *mask = IGC_LEDCTL_LED0_MODE_MASK;
+ *shift = IGC_LEDCTL_LED0_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED0_BLINK;
+ break;
+ case 1:
+ *mask = IGC_LEDCTL_LED1_MODE_MASK;
+ *shift = IGC_LEDCTL_LED1_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED1_BLINK;
+ break;
+ case 2:
+ *mask = IGC_LEDCTL_LED2_MODE_MASK;
+ *shift = IGC_LEDCTL_LED2_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED2_BLINK;
+ break;
+ default:
+ *mask = *shift = *blink = 0;
+ netdev_err(adapter->netdev, "Unknown LED %d selected!\n", led);
+ }
+}
+
+static void igc_led_set(struct igc_adapter *adapter, int led, u32 mode,
+ bool blink)
+{
+ u32 shift, mask, blink_bit, ledctl;
+ struct igc_hw *hw = &adapter->hw;
+
+ igc_led_select(adapter, led, &mask, &shift, &blink_bit);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ mutex_lock(&adapter->led_mutex);
+
+ /* Set mode */
+ ledctl = rd32(IGC_LEDCTL);
+ ledctl &= ~mask;
+ ledctl |= mode << shift;
+
+ /* Configure blinking */
+ if (blink)
+ ledctl |= blink_bit;
+ else
+ ledctl &= ~blink_bit;
+ wr32(IGC_LEDCTL, ledctl);
+
+ mutex_unlock(&adapter->led_mutex);
+ pm_runtime_put(&adapter->pdev->dev);
+}
+
+static u32 igc_led_get(struct igc_adapter *adapter, int led)
+{
+ u32 shift, mask, blink_bit, ledctl;
+ struct igc_hw *hw = &adapter->hw;
+
+ igc_led_select(adapter, led, &mask, &shift, &blink_bit);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ mutex_lock(&adapter->led_mutex);
+ ledctl = rd32(IGC_LEDCTL);
+ mutex_unlock(&adapter->led_mutex);
+ pm_runtime_put(&adapter->pdev->dev);
+
+ return (ledctl & mask) >> shift;
+}
+
+static int igc_led_brightness_set_blocking(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode;
+
+ if (brightness)
+ mode = IGC_LEDCTL_MODE_ON;
+ else
+ mode = IGC_LEDCTL_MODE_OFF;
+
+ netdev_dbg(adapter->netdev, "Set brightness for LED %d to mode %u!\n",
+ ldev->index, mode);
+
+ igc_led_set(adapter, ldev->index, mode, false);
+
+ return 0;
+}
+
+static int igc_led_hw_control_is_supported(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ if (flags & ~IGC_SUPPORTED_MODES)
+ return -EOPNOTSUPP;
+
+ /* If Tx and Rx selected, activity can be offloaded unless some other
+ * mode is selected as well.
+ */
+ if ((flags & BIT(TRIGGER_NETDEV_TX)) &&
+ (flags & BIT(TRIGGER_NETDEV_RX)) &&
+ !(flags & ~IGC_ACTIVITY_MODES))
+ return 0;
+
+ /* Single Rx or Tx activity is not supported. */
+ if (flags & IGC_ACTIVITY_MODES)
+ return -EOPNOTSUPP;
+
+ /* Only one mode can be active at a given time. */
+ if (flags & (flags - 1))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int igc_led_hw_control_set(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode = IGC_LEDCTL_MODE_OFF;
+ bool blink = false;
+
+ if (flags & BIT(TRIGGER_NETDEV_LINK_10))
+ mode = IGC_LEDCTL_MODE_LINK_10;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_100))
+ mode = IGC_LEDCTL_MODE_LINK_100;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode = IGC_LEDCTL_MODE_LINK_1000;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_2500))
+ mode = IGC_LEDCTL_MODE_LINK_2500;
+ if ((flags & BIT(TRIGGER_NETDEV_TX)) &&
+ (flags & BIT(TRIGGER_NETDEV_RX)))
+ mode = IGC_LEDCTL_MODE_ACTIVITY;
+
+ netdev_dbg(adapter->netdev, "Set HW control for LED %d to mode %u!\n",
+ ldev->index, mode);
+
+ /* blink is recommended for activity */
+ if (mode == IGC_LEDCTL_MODE_ACTIVITY)
+ blink = true;
+
+ igc_led_set(adapter, ldev->index, mode, blink);
+
+ return 0;
+}
+
+static int igc_led_hw_control_get(struct led_classdev *led_cdev,
+ unsigned long *flags)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode;
+
+ mode = igc_led_get(adapter, ldev->index);
+
+ switch (mode) {
+ case IGC_LEDCTL_MODE_ACTIVITY:
+ *flags = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case IGC_LEDCTL_MODE_LINK_10:
+ *flags = BIT(TRIGGER_NETDEV_LINK_10);
+ break;
+ case IGC_LEDCTL_MODE_LINK_100:
+ *flags = BIT(TRIGGER_NETDEV_LINK_100);
+ break;
+ case IGC_LEDCTL_MODE_LINK_1000:
+ *flags = BIT(TRIGGER_NETDEV_LINK_1000);
+ break;
+ case IGC_LEDCTL_MODE_LINK_2500:
+ *flags = BIT(TRIGGER_NETDEV_LINK_2500);
+ break;
+ }
+
+ return 0;
+}
+
+static struct device *igc_led_hw_control_get_device(struct led_classdev *led_cdev)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+
+ return &ldev->netdev->dev;
+}
+
+static void igc_led_get_name(struct igc_adapter *adapter, int index, char *buf,
+ size_t buf_len)
+{
+ snprintf(buf, buf_len, "igc-%x%x-led%d",
+ pci_domain_nr(adapter->pdev->bus),
+ pci_dev_id(adapter->pdev), index);
+}
+
+static void igc_setup_ldev(struct igc_led_classdev *ldev,
+ struct net_device *netdev, int index)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct led_classdev *led_cdev = &ldev->led;
+ char led_name[LED_MAX_NAME_SIZE];
+
+ ldev->netdev = netdev;
+ ldev->index = index;
+
+ igc_led_get_name(adapter, index, led_name, LED_MAX_NAME_SIZE);
+ led_cdev->name = led_name;
+ led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
+ led_cdev->max_brightness = 1;
+ led_cdev->brightness_set_blocking = igc_led_brightness_set_blocking;
+ led_cdev->hw_control_trigger = "netdev";
+ led_cdev->hw_control_is_supported = igc_led_hw_control_is_supported;
+ led_cdev->hw_control_set = igc_led_hw_control_set;
+ led_cdev->hw_control_get = igc_led_hw_control_get;
+ led_cdev->hw_control_get_device = igc_led_hw_control_get_device;
+
+ devm_led_classdev_register(&netdev->dev, led_cdev);
+}
+
+int igc_led_setup(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &netdev->dev;
+ struct igc_led_classdev *leds;
+ int i;
+
+ mutex_init(&adapter->led_mutex);
+
+ leds = devm_kcalloc(dev, IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ for (i = 0; i < IGC_NUM_LEDS; i++)
+ igc_setup_ldev(leds + i, netdev, i);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index ba8d3fe186ae..2e1cfbd82f4f 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3385,7 +3385,7 @@ static int igc_flex_filter_select(struct igc_adapter *adapter,
u32 fhftsl;
if (input->index >= MAX_FLEX_FILTER) {
- dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n");
+ netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n");
return -EINVAL;
}
@@ -3420,7 +3420,6 @@ static int igc_flex_filter_select(struct igc_adapter *adapter,
static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
struct igc_flex_filter *input)
{
- struct device *dev = &adapter->pdev->dev;
struct igc_hw *hw = &adapter->hw;
u8 *data = input->data;
u8 *mask = input->mask;
@@ -3434,7 +3433,7 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
* out early to avoid surprises later.
*/
if (input->length % 8 != 0) {
- dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n");
+ netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n");
return -EINVAL;
}
@@ -3504,8 +3503,8 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
}
wr32(IGC_WUFC, wufc);
- dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n",
- input->index);
+ netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n",
+ input->index);
return 0;
}
@@ -3577,9 +3576,9 @@ static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
static int igc_add_flex_filter(struct igc_adapter *adapter,
struct igc_nfc_rule *rule)
{
- struct igc_flex_filter flex = { };
struct igc_nfc_filter *filter = &rule->filter;
unsigned int eth_offset, user_offset;
+ struct igc_flex_filter flex = { };
int ret, index;
bool vlan;
@@ -3615,10 +3614,12 @@ static int igc_add_flex_filter(struct igc_adapter *adapter,
ETH_ALEN, NULL);
/* Add VLAN etype */
- if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE)
- igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12,
- sizeof(filter->vlan_etype),
- NULL);
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
+ __be16 vlan_etype = cpu_to_be16(filter->vlan_etype);
+
+ igc_flex_filter_add_field(&flex, &vlan_etype, 12,
+ sizeof(vlan_etype), NULL);
+ }
/* Add VLAN TCI */
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
@@ -5276,7 +5277,7 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -5302,25 +5303,22 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
static void igc_tsync_interrupt(struct igc_adapter *adapter)
{
- u32 ack, tsauxc, sec, nsec, tsicr;
struct igc_hw *hw = &adapter->hw;
+ u32 tsauxc, sec, nsec, tsicr;
struct ptp_clock_event event;
struct timespec64 ts;
tsicr = rd32(IGC_TSICR);
- ack = 0;
if (tsicr & IGC_TSICR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_SYS_WRAP;
}
if (tsicr & IGC_TSICR_TXTS) {
/* retrieve hardware timestamp */
igc_ptp_tx_tstamp_event(adapter);
- ack |= IGC_TSICR_TXTS;
}
if (tsicr & IGC_TSICR_TT0) {
@@ -5334,7 +5332,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
wr32(IGC_TSAUXC, tsauxc);
adapter->perout[0].start = ts;
spin_unlock(&adapter->tmreg_lock);
- ack |= IGC_TSICR_TT0;
}
if (tsicr & IGC_TSICR_TT1) {
@@ -5348,7 +5345,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
wr32(IGC_TSAUXC, tsauxc);
adapter->perout[1].start = ts;
spin_unlock(&adapter->tmreg_lock);
- ack |= IGC_TSICR_TT1;
}
if (tsicr & IGC_TSICR_AUTT0) {
@@ -5358,7 +5354,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
event.index = 0;
event.timestamp = sec * NSEC_PER_SEC + nsec;
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_AUTT0;
}
if (tsicr & IGC_TSICR_AUTT1) {
@@ -5368,11 +5363,7 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
event.index = 1;
event.timestamp = sec * NSEC_PER_SEC + nsec;
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_AUTT1;
}
-
- /* acknowledge the interrupts */
- wr32(IGC_TSICR, ack);
}
/**
@@ -6487,7 +6478,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
int cpu = smp_processor_id();
struct netdev_queue *nq;
struct igc_ring *ring;
- int i, drops;
+ int i, nxmit;
if (unlikely(!netif_carrier_ok(dev)))
return -ENETDOWN;
@@ -6503,16 +6494,15 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
/* Avoid transmit queue timeout since we share it with the slow path */
txq_trans_cond_update(nq);
- drops = 0;
+ nxmit = 0;
for (i = 0; i < num_frames; i++) {
int err;
struct xdp_frame *xdpf = frames[i];
err = igc_xdp_init_tx_descriptor(ring, xdpf);
- if (err) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (err)
+ break;
+ nxmit++;
}
if (flags & XDP_XMIT_FLUSH)
@@ -6520,7 +6510,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
__netif_tx_unlock(nq);
- return num_frames - drops;
+ return nxmit;
}
static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
@@ -6977,6 +6967,12 @@ static int igc_probe(struct pci_dev *pdev,
pm_runtime_put_noidle(&pdev->dev);
+ if (IS_ENABLED(CONFIG_IGC_LEDS)) {
+ err = igc_led_setup(adapter);
+ if (err)
+ goto err_register;
+ }
+
return 0;
err_register:
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index d38c87d7e5e8..e5b893fc5b66 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -12,6 +12,7 @@
#define IGC_MDIC 0x00020 /* MDI Control - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define IGC_VET 0x00038 /* VLAN Ether Type - RW */
+#define IGC_LEDCTL 0x00E00 /* LED Control - RW */
#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b6f0376e42f4..559b443c409f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -949,19 +949,19 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
void ixgbe_write_eitr(struct ixgbe_q_vector *);
int ixgbe_poll(struct napi_struct *napi, int budget);
int ethtool_ioctl(struct ifreq *ifr);
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue);
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask);
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id, u8 queue);
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
@@ -1059,7 +1059,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
void ixgbe_store_key(struct ixgbe_adapter *adapter);
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
-s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
#ifdef CONFIG_IXGBE_IPSEC
void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 6835d5f18753..283a23150a4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -15,10 +15,10 @@
#define IXGBE_82598_VFT_TBL_SIZE 128
#define IXGBE_82598_RX_PB_SIZE 512
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
/**
@@ -66,7 +66,7 @@ out:
IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
}
-static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -93,12 +93,12 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
* not known. Perform the SFP init if necessary.
*
**/
-static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
u16 list_offset, data_offset;
+ int ret_val;
/* Identify the PHY */
phy->ops.identify(hw);
@@ -148,9 +148,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
* Then set pcie completion timeout
*
**/
-static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+static int ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
- s32 ret_val;
+ int ret_val;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -170,7 +170,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
*
* Determines the link capabilities by reading the AUTOC register.
**/
-static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -271,7 +271,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
*
* Enable flow control according to the current settings.
**/
-static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+static int ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
{
u32 fctrl_reg;
u32 rmcs_reg;
@@ -411,13 +411,13 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
**/
-static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
+ int status = 0;
u32 autoc_reg;
u32 links_reg;
u32 i;
- s32 status = 0;
/* Restart link */
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -457,7 +457,7 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
* Function indicates success when phy link is available. If phy is not ready
* within 5 seconds of MAC indicating link, the function returns error.
**/
-static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+static int ixgbe_validate_link_ready(struct ixgbe_hw *hw)
{
u32 timeout;
u16 an_reg;
@@ -493,7 +493,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
*
* Reads the links register to determine if link is up and the current speed
**/
-static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *link_up,
bool link_up_wait_to_complete)
{
@@ -579,7 +579,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
*
* Set the link speed in the AUTOC register and restarts link.
**/
-static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
@@ -624,11 +624,11 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
*
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
- s32 status;
+ int status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
@@ -647,15 +647,15 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
* clears all interrupts, performing a PHY reset, and performing a link (MAC)
* reset.
**/
-static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
{
- s32 status;
- s32 phy_status = 0;
- u32 ctrl;
+ int phy_status = 0;
+ u8 analog_val;
u32 gheccr;
- u32 i;
+ int status;
u32 autoc;
- u8 analog_val;
+ u32 ctrl;
+ u32 i;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -781,7 +781,7 @@ mac_reset_top:
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq set index
**/
-static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+static int ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -805,7 +805,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq clear index (not used in 82598, but elsewhere)
**/
-static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+static int ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -836,7 +836,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+static int ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
{
u32 regindex;
@@ -881,7 +881,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
*
* Clears the VLAN filter table, and the VMDq index associated with the filter
**/
-static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+static int ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
{
u32 offset;
u32 vlanbyte;
@@ -905,7 +905,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
*
* Performs read operation to Atlas analog register specified.
**/
-static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+static int ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
{
u32 atlas_ctl;
@@ -927,7 +927,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
*
* Performs write operation to Atlas analog register specified.
**/
-static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+static int ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
{
u32 atlas_ctl;
@@ -948,13 +948,13 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
*
* Performs 8 byte read operation to SFP module's data over I2C interface.
**/
-static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+static int ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
u8 byte_offset, u8 *eeprom_data)
{
- s32 status = 0;
u16 sfp_addr = 0;
u16 sfp_data = 0;
u16 sfp_stat = 0;
+ int status = 0;
u16 gssr;
u32 i;
@@ -1019,7 +1019,7 @@ out:
*
* Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
**/
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data)
{
return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
@@ -1034,8 +1034,8 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
**/
-static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *sff8472_data)
+static int ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
{
return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
byte_offset, sff8472_data);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 339e106a5732..cdaf087b4e85 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -21,24 +21,24 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void
ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed);
-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
@@ -98,10 +98,10 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
}
}
-static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+static int ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
- s32 ret_val;
u16 list_offset, data_offset, data_value;
+ int ret_val;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
ixgbe_init_mac_link_ops_82599(hw);
@@ -173,10 +173,10 @@ setup_sfp_err:
* prot_autoc_write_82599(). Note, that locked can only be true in cases
* where this function doesn't return an error.
**/
-static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
+static int prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
u32 *reg_val)
{
- s32 ret_val;
+ int ret_val;
*locked = false;
/* If LESM is on then we need to hold the SW/FW semaphore. */
@@ -203,9 +203,9 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
* This part (82599) may need to hold a the SW/FW lock around all writes to
* AUTOC. Likewise after a write we need to do a pipeline reset.
**/
-static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+static int prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
{
- s32 ret_val = 0;
+ int ret_val = 0;
/* Blocked by MNG FW so bail */
if (ixgbe_check_reset_blocked(hw))
@@ -237,7 +237,7 @@ out:
return ret_val;
}
-static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -263,11 +263,11 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
* not known. Perform the SFP init if necessary.
*
**/
-static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
+ int ret_val;
u32 esdp;
if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
@@ -322,7 +322,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
*
* Determines the link capabilities by reading the AUTOC register.
**/
-static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -334,7 +334,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*autoneg = true;
return 0;
@@ -500,14 +502,14 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
**/
-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete)
{
+ bool got_lock = false;
+ int status = 0;
u32 autoc_reg;
u32 links_reg;
u32 i;
- s32 status = 0;
- bool got_lock = false;
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
status = hw->mac.ops.acquire_swfw_sync(hw,
@@ -657,15 +659,15 @@ ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
*
* Implements the Intel SmartSpeed algorithm.
**/
-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
- s32 status = 0;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- s32 i, j;
- bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ bool link_up = false;
+ int status = 0;
+ s32 i, j;
/* Set autoneg_advertised value based on input link speed */
hw->phy.autoneg_advertised = 0;
@@ -767,16 +769,15 @@ out:
*
* Set the link speed in the AUTOC register and restarts link.
**/
-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- bool autoneg = false;
- s32 status;
- u32 pma_pmd_1g, link_mode, links_reg, i;
- u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 pma_pmd_10g_serial, pma_pmd_1g, link_mode, links_reg, i;
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ bool autoneg = false;
+ int status;
/* holds the value of AUTOC register at this current point in time */
u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -785,6 +786,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* temporary variable used for comparison purposes */
u32 autoc = current_autoc;
+ pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
&autoneg);
@@ -882,11 +885,11 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
*
* Restarts link on PHY and MAC based on settings passed in.
**/
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- s32 status;
+ int status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
@@ -905,13 +908,13 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
ixgbe_link_speed link_speed;
- s32 status;
u32 ctrl, i, autoc, autoc2;
- u32 curr_lms;
bool link_up = false;
+ u32 curr_lms;
+ int status;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -1081,7 +1084,7 @@ mac_reset_top:
* @hw: pointer to hardware structure
* @fdircmd: current value of FDIRCMD register
*/
-static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+static int ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
{
int i;
@@ -1099,12 +1102,12 @@ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
* ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
{
- int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
u32 fdircmd;
- s32 err;
+ int err;
+ int i;
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
@@ -1212,7 +1215,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
/*
* Continue setup of fdirctrl register bits:
@@ -1236,7 +1239,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
/*
* Continue setup of fdirctrl register bits:
@@ -1359,7 +1362,7 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* Note that the tunnel bit in input must not be set when the hardware
* tunneling support does not exist.
**/
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue)
@@ -1515,7 +1518,7 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
#define IXGBE_STORE_AS_BE16(_value) __swab16(ntohs((_value)))
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask)
{
/* mask IPv6 since it is currently not supported */
@@ -1627,12 +1630,12 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
return 0;
}
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id, u8 queue)
{
u32 fdirport, fdirvlan, fdirhash, fdircmd;
- s32 err;
+ int err;
/* currently IPv6 is not supported, must be programmed with 0 */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
@@ -1690,13 +1693,13 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
return 0;
}
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id)
{
u32 fdirhash;
u32 fdircmd;
- s32 err;
+ int err;
/* configure FDIRHASH register */
fdirhash = (__force u32)input->formatted.bkt_hash;
@@ -1734,7 +1737,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
*
* Performs read operation to Omer analog register specified.
**/
-static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
+static int ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
{
u32 core_ctl;
@@ -1756,7 +1759,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
*
* Performs write operation to Omer analog register specified.
**/
-static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
+static int ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
{
u32 core_ctl;
@@ -1776,9 +1779,9 @@ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
-static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
+static int ixgbe_start_hw_82599(struct ixgbe_hw *hw)
{
- s32 ret_val = 0;
+ int ret_val = 0;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -1802,9 +1805,9 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
* If PHY already detected, maintains current PHY type in hw struct,
* otherwise executes the PHY detection routine.
**/
-static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
/* Detect PHY if not unknown - returns success if already detected. */
status = ixgbe_identify_phy_generic(hw);
@@ -1835,7 +1838,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
*
* Enables the Rx DMA unit for 82599
**/
-static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+static int ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
{
/*
* Workaround for 82599 silicon errata when enabling the Rx datapath.
@@ -1865,12 +1868,12 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
* Return: -EACCES if the FW is not present or if the FW version is
* not supported.
**/
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
u16 fw_offset, fw_ptp_cfg_offset;
- s32 status = -EACCES;
- u16 offset;
+ int status = -EACCES;
u16 fw_version = 0;
+ u16 offset;
/* firmware check is only necessary for SFI devices */
if (hw->phy.media_type != ixgbe_media_type_fiber)
@@ -1917,7 +1920,7 @@ fw_version_err:
static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
{
u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
- s32 status;
+ int status;
/* get the offset to the Firmware Module block */
status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
@@ -1956,7 +1959,7 @@ static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
*
* Retrieves 16 bit word(s) read from EEPROM
**/
-static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -1982,7 +1985,7 @@ static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
*
* Reads a 16 bit word from the EEPROM
**/
-static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+static int ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
u16 offset, u16 *data)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -2006,11 +2009,11 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
* full pipeline reset. Note - We must hold the SW/FW semaphore before writing
* to AUTOC, so this function assumes the semaphore is held.
**/
-static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
{
- s32 ret_val;
- u32 anlp1_reg = 0;
u32 i, autoc_reg, autoc2_reg;
+ u32 anlp1_reg = 0;
+ int ret_val;
/* Enable link if disabled in NVM */
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
@@ -2061,12 +2064,12 @@ reset_pipeline_out:
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
**/
-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
- u32 esdp;
- s32 status;
s32 timeout = 200;
+ int status;
+ u32 esdp;
if (hw->phy.qsfp_shared_i2c_bus == true) {
/* Acquire I2C bus ownership. */
@@ -2115,12 +2118,12 @@ release_i2c_access:
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
**/
-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
- u32 esdp;
- s32 status;
s32 timeout = 200;
+ int status;
+ u32 esdp;
if (hw->phy.qsfp_shared_i2c_bus == true) {
/* Acquire I2C bus ownership. */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 2e6e0365154a..3be1bfb16498 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -10,10 +10,10 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
+static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
+static int ixgbe_ready_eeprom(struct ixgbe_hw *hw);
static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
u16 count);
@@ -22,15 +22,15 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
+static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
-static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
+static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
/* Base table for registers values that change by MAC */
const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
@@ -111,12 +111,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
*
* Called at init time to set up flow control.
**/
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
+int ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
{
- s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
- u16 reg_cu = 0;
bool locked = false;
+ int ret_val = 0;
+ u16 reg_cu = 0;
/*
* Validate the requested mode. Strict IEEE mode does not allow
@@ -267,11 +267,11 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
* table, VLAN filter table, calls routine to set up link and flow control
* settings, and leaves transmit and receive units disabled and uninitialized
**/
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
+int ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
- s32 ret_val;
- u32 ctrl_ext;
u16 device_caps;
+ u32 ctrl_ext;
+ int ret_val;
/* Set the media type */
hw->phy.media_type = hw->mac.ops.get_media_type(hw);
@@ -330,7 +330,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
* 82599
* X540
**/
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+int ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
{
u32 i;
@@ -354,9 +354,9 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
* up link and flow control settings, and leaves transmit and receive units
* disabled and uninitialized
**/
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
+int ixgbe_init_hw_generic(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
/* Reset the hardware */
status = hw->mac.ops.reset_hw(hw);
@@ -380,7 +380,7 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
* Clears all hardware statistics counters by reading them from the hardware
* Statistics counters are clear on read.
**/
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
+int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
{
u16 i = 0;
@@ -489,14 +489,14 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
*
* Reads the part number string from the EEPROM.
**/
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size)
{
- s32 ret_val;
- u16 data;
+ int ret_val;
u16 pba_ptr;
u16 offset;
u16 length;
+ u16 data;
if (pba_num == NULL) {
hw_dbg(hw, "PBA string buffer was null\n");
@@ -599,7 +599,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
* A reset of the adapter must be performed prior to calling this function
* in order for the MAC address to have been loaded from the EEPROM into RAR0
**/
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
+int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
{
u32 rar_high;
u32 rar_low;
@@ -653,7 +653,7 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
*
* Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
**/
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
{
u16 link_status;
@@ -709,7 +709,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
+int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
{
u32 reg_val;
u16 i;
@@ -759,7 +759,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
* Store the index for the link active LED. This will be used to support
* blinking the LED.
**/
-s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
+int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 led_reg, led_mode;
@@ -800,7 +800,7 @@ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @index: led number to turn on
**/
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
@@ -821,7 +821,7 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
* @hw: pointer to hardware structure
* @index: led number to turn off
**/
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
@@ -844,7 +844,7 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
u32 eec;
@@ -895,11 +895,11 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
u16 i, count;
+ int status;
hw->eeprom.ops.init_params(hw);
@@ -942,14 +942,14 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
* If ixgbe_eeprom_update_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
- u16 word;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
u16 page_size;
+ int status;
+ u16 word;
u16 i;
- u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
/* Prepare the EEPROM for writing */
status = ixgbe_acquire_eeprom(hw);
@@ -1019,7 +1019,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
* If ixgbe_eeprom_update_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
hw->eeprom.ops.init_params(hw);
@@ -1038,11 +1038,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
u16 i, count;
+ int status;
hw->eeprom.ops.init_params(hw);
@@ -1077,12 +1077,12 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
- u16 word_in;
u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+ u16 word_in;
+ int status;
u16 i;
/* Prepare the EEPROM for reading */
@@ -1129,7 +1129,7 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
*
* Reads 16 bit value from EEPROM through bit-bang method
**/
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data)
{
hw->eeprom.ops.init_params(hw);
@@ -1149,11 +1149,11 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
*
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
+ int status;
u32 eerd;
- s32 status;
u32 i;
hw->eeprom.ops.init_params(hw);
@@ -1189,11 +1189,11 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
* This function is called only when we are writing a new large buffer
* at given offset so the data would be overwritten anyway.
**/
-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset)
{
u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
- s32 status;
+ int status;
u16 i;
for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
@@ -1229,7 +1229,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
}
@@ -1243,11 +1243,11 @@ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
*
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
+ int status;
u32 eewr;
- s32 status;
u16 i;
hw->eeprom.ops.init_params(hw);
@@ -1286,7 +1286,7 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
*
* Write a 16 bit word to the EEPROM using the EEWR register.
**/
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
}
@@ -1299,7 +1299,7 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
* Polls the status bit (bit 1) of the EERD or EEWR to determine when the
* read or write is done respectively.
**/
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
@@ -1325,7 +1325,7 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
* Prepares EEPROM for access using bit-bang method. This function should
* be called before issuing a command to the EEPROM.
**/
-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
{
u32 eec;
u32 i;
@@ -1371,7 +1371,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
*
* Sets the hardware semaphores so EEPROM access can occur for bit-bang method
**/
-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
{
u32 timeout = 2000;
u32 i;
@@ -1462,7 +1462,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
* ixgbe_ready_eeprom - Polls for EEPROM ready
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+static int ixgbe_ready_eeprom(struct ixgbe_hw *hw)
{
u16 i;
u8 spi_stat_reg;
@@ -1680,7 +1680,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
* ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
* @hw: pointer to hardware structure
**/
-s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -1728,7 +1728,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/**
@@ -1739,12 +1739,12 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/*
* Read the first word from the EEPROM. If this times out or fails, do
@@ -1786,10 +1786,10 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
* ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
* @hw: pointer to hardware structure
**/
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
+int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum;
+ int status;
/*
* Read the first word from the EEPROM. If this times out or fails, do
@@ -1823,7 +1823,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
*
* Puts an ethernet address into a receive address register.
**/
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr)
{
u32 rar_low, rar_high;
@@ -1876,7 +1876,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
*
* Clears an ethernet address from a receive address register.
**/
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -1917,7 +1917,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
* of the receive address registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called.
**/
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
+int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
{
u32 i;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -1980,7 +1980,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
**/
-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
{
u32 vector = 0;
@@ -2049,7 +2049,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
* registers for the first multicast addresses, and hashes the rest into the
* multicast table.
**/
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev)
{
struct netdev_hw_addr *ha;
@@ -2091,7 +2091,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
*
* Enables multicast address in RAR and the use of the multicast hash table.
**/
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
+int ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
{
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
@@ -2108,7 +2108,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
*
* Disables multicast address in RAR and the use of the multicast hash table.
**/
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
+int ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
{
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
@@ -2124,7 +2124,7 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
*
* Enable flow control according to the current settings.
**/
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+int ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
{
u32 mflcn_reg, fccfg_reg;
u32 reg;
@@ -2252,7 +2252,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
* Find the intersection between advertised settings and link partner's
* advertised settings
**/
-s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
if ((!(adv_reg)) || (!(lp_reg)))
@@ -2294,10 +2294,10 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
*
* Enable flow control according on 1 gig fiber.
**/
-static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
{
u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
- s32 ret_val;
+ int ret_val;
/*
* On multispeed fiber at 1g, bail out if
@@ -2328,10 +2328,10 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
*
* Enable flow control according to IEEE clause 37.
**/
-static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
{
u32 links2, anlp1_reg, autoc_reg, links;
- s32 ret_val;
+ int ret_val;
/*
* On backplane, bail out if
@@ -2367,7 +2367,7 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
*
* Enable flow control according to IEEE clause 37.
**/
-static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
{
u16 technology_ability_reg = 0;
u16 lp_technology_ability_reg = 0;
@@ -2395,7 +2395,7 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
ixgbe_link_speed speed;
- s32 ret_val = -EIO;
+ int ret_val = -EIO;
bool link_up;
/*
@@ -2501,7 +2501,7 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
* bit hasn't caused the primary requests to be disabled, else 0
* is returned signifying primary requests disabled.
**/
-static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
+static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
{
u32 i, poll;
u16 value;
@@ -2573,7 +2573,7 @@ gio_disable_fail:
* Acquires the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
{
u32 gssr = 0;
u32 swmask = mask;
@@ -2641,7 +2641,7 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
*
* The default case requires no protection so just to the register read.
**/
-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
{
*locked = false;
*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -2655,7 +2655,7 @@ s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
* @locked: bool to indicate whether the SW/FW lock was already taken by
* previous read.
**/
-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
{
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
return 0;
@@ -2668,7 +2668,7 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
* Stops the receive data path and waits for the HW to internally
* empty the Rx security block.
**/
-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
+int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
{
#define IXGBE_MAX_SECRX_POLL 40
int i;
@@ -2700,7 +2700,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
*
* Enables the receive data path
**/
-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
+int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
{
u32 secrxreg;
@@ -2719,7 +2719,7 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
*
* Enables the Rx DMA unit
**/
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
+int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
{
if (regval & IXGBE_RXCTRL_RXEN)
hw->mac.ops.enable_rx(hw);
@@ -2734,14 +2734,14 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
* @hw: pointer to hardware structure
* @index: led number to blink
**/
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
{
- ixgbe_link_speed speed = 0;
- bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ixgbe_link_speed speed = 0;
+ bool link_up = false;
bool locked = false;
- s32 ret_val;
+ int ret_val;
if (index > 3)
return -EINVAL;
@@ -2782,12 +2782,12 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
* @hw: pointer to hardware structure
* @index: led number to stop blinking
**/
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
- u32 autoc_reg = 0;
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
bool locked = false;
- s32 ret_val;
+ u32 autoc_reg = 0;
+ int ret_val;
if (index > 3)
return -EINVAL;
@@ -2821,10 +2821,10 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
* pointer, and returns the value at that location. This is used in both
* get and set mac_addr routines.
**/
-static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
+static int ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset)
{
- s32 ret_val;
+ int ret_val;
/*
* First read the EEPROM pointer to see if the MAC addresses are
@@ -2849,11 +2849,11 @@ static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
* set_lan_id() is called by identify_sfp(), but this cannot be relied
* upon for non-SFP connections, so we must call it here.
**/
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
{
u16 san_mac_data, san_mac_offset;
+ int ret_val;
u8 i;
- s32 ret_val;
/*
* First read the EEPROM pointer to see if the MAC addresses are
@@ -2942,7 +2942,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
* @rar: receive address register index to disassociate
* @vmdq: VMDq pool index to remove from the rar
**/
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar_lo, mpsar_hi;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -2993,7 +2993,7 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq pool index
**/
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -3026,7 +3026,7 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* VFs advertized and not 0.
* MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
**/
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
{
u32 rar = hw->mac.san_mac_rar_index;
@@ -3045,7 +3045,7 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
* ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
* @hw: pointer to hardware structure
**/
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
{
int i;
@@ -3065,9 +3065,9 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* return the VLVF index where this VLAN id should be placed
*
**/
-static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
+static int ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
{
- s32 regindex, first_empty_slot;
+ int regindex, first_empty_slot;
u32 bits;
/* short cut the special case */
@@ -3115,11 +3115,11 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
{
u32 regidx, vfta_delta, vfta, bits;
- s32 vlvf_index;
+ int vlvf_index;
if ((vlan > 4095) || (vind > 63))
return -EINVAL;
@@ -3226,7 +3226,7 @@ vfta_update:
*
* Clears the VLAN filter table, and the VMDq index associated with the filter
**/
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
{
u32 offset;
@@ -3276,7 +3276,7 @@ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
*
* Reads the links register to determine if link is up and the current speed
**/
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
{
bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw);
@@ -3396,8 +3396,8 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* This function will read the EEPROM from the alternative SAN MAC address
* block to check the support for the alternative WWNN/WWPN prefix support.
**/
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
+int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
{
u16 offset, caps;
u16 alt_san_mac_blk_offset;
@@ -3494,7 +3494,7 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
* This function will read the EEPROM location for the device capabilities,
* and return the word through device_caps.
**/
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
+int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
{
hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
@@ -3604,7 +3604,7 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
* by the caller.
**/
-s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
u32 timeout)
{
u32 hicr, i, fwsts;
@@ -3676,15 +3676,15 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
* Communicates with the manageability block. On success return 0
* else return -EIO or -EINVAL.
**/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
u32 length, u32 timeout,
bool return_data)
{
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
struct ixgbe_hic_hdr *hdr = buffer;
- u32 *u32arr = buffer;
u16 buf_len, dword_len;
- s32 status;
+ u32 *u32arr = buffer;
+ int status;
u32 bi;
if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
@@ -3753,13 +3753,13 @@ rel_out:
* else returns -EBUSY when encountering an error acquiring
* semaphore or -EIO when command fails.
**/
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, __always_unused u16 len,
__always_unused const char *driver_ver)
{
struct ixgbe_hic_drv_info fw_cmd;
+ int ret_val;
int i;
- s32 ret_val;
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
@@ -3875,10 +3875,10 @@ static const u8 ixgbe_emc_therm_limit[4] = {
*
* Returns error code.
**/
-static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
+static int ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
u16 *ets_offset)
{
- s32 status;
+ int status;
status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
if (status)
@@ -3903,13 +3903,13 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
*
* Returns the thermal sensor data structure
**/
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
{
- s32 status;
u16 ets_offset;
- u16 ets_cfg;
u16 ets_sensor;
u8 num_sensors;
+ u16 ets_cfg;
+ int status;
u8 i;
struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
@@ -3959,17 +3959,17 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
* Inits the thermal sensor thresholds according to the NVM map
* and save off the threshold and location values into mac.thermal_sensor_data
**/
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
{
- s32 status;
- u16 ets_offset;
- u16 ets_cfg;
- u16 ets_sensor;
+ struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
u8 low_thresh_delta;
u8 num_sensors;
u8 therm_limit;
+ u16 ets_sensor;
+ u16 ets_offset;
+ u16 ets_cfg;
+ int status;
u8 i;
- struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
@@ -4192,16 +4192,16 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
*
* Set the link speed in the MAC and/or PHY register and restarts link.
*/
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- s32 status = 0;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ bool autoneg, link_up = false;
u32 speedcnt = 0;
+ int status = 0;
u32 i = 0;
- bool autoneg, link_up = false;
/* Mask off requested but non-supported speeds */
status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
@@ -4340,8 +4340,8 @@ out:
void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed)
{
- s32 status;
u8 rs, eeprom_data;
+ int status;
switch (speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 34761e691d52..6493abf189de 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -8,89 +8,89 @@
#include "ixgbe.h"
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+int ixgbe_init_hw_generic(struct ixgbe_hw *hw);
+int ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+int ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
+int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
+int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
+int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
+int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
+int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev);
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *);
+int ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
+int ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
+int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
+int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
+int ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+int ixgbe_setup_fc_generic(struct ixgbe_hw *);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
+int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on, bool vlvf_bypass);
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver, u16 len, const char *str);
u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
+int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
u32 timeout, bool return_data);
-s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout);
-s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout);
+int ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
u32 (*data)[FW_PHY_ACT_DATA_COUNT]);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_present(struct ixgbe_hw *hw);
@@ -111,8 +111,8 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
#define IXGBE_EMC_DIODE3_DATA 0x2A
#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
+int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
void ixgbe_get_etk_id(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
@@ -121,7 +121,7 @@ void ixgbe_get_orom_version(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index d26cea5b43bd..502666f28124 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -18,7 +18,7 @@
* @max: max credits by traffic class
* @max_frame: maximum frame size
*/
-static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
+static int ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
__u16 *max, int max_frame)
{
int min_percent = 100;
@@ -59,7 +59,7 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
* It should be called only after the rules are checked by
* ixgbe_dcb_check_config().
*/
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
+int ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config,
int max_frame, u8 direction)
{
@@ -247,7 +247,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
+int ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
u8 pfc_en;
@@ -283,7 +283,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
}
/* Helper routines to abstract HW specifics from DCB netlink ops */
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+int ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -300,7 +300,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
return -EINVAL;
}
-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
+int ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
{
__u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
__u8 prio_type[IEEE_8021QAZ_MAX_TCS];
@@ -333,7 +333,7 @@ s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
bwg_id, prio_type, ets->prio_tc);
}
-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+int ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
u16 *refill, u16 *max, u8 *bwg_id,
u8 *prio_type, u8 *prio_tc)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 60cd5863bf5e..91788e4c4e19 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -124,15 +124,15 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
/* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
+int ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
+int ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
+int ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+int ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
+int ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 379ae747cdce..185c3e5f9837 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -15,10 +15,8 @@
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *prio_type)
+int ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *prio_type)
{
u32 reg = 0;
u32 credit_refill = 0;
@@ -75,11 +73,8 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type)
+int ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
u32 reg, max_credits;
u8 i;
@@ -124,11 +119,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type)
+int ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
u32 reg;
u8 i;
@@ -171,7 +163,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Priority Flow Control for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
+int ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 fcrtl, reg;
u8 i;
@@ -224,7 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+static int ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -260,7 +252,7 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index fdca41abb44c..5bf3f13c6953 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -46,27 +46,19 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
+int ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
/* DCB hw initialization */
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type);
#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 7948849840a5..c61bd9059541 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -17,7 +17,7 @@
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -76,7 +76,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -128,7 +128,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -187,7 +187,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Priority Flow Control (PFC) for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+int ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
u32 i, j, fcrtl, reg;
u8 max_tc = 0;
@@ -272,7 +272,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
+static int ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -330,7 +330,7 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
{
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index c6f084883cab..f6e5a87c03e3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -70,30 +70,21 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
+int ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
/* DCB hw initialization */
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type,
- u8 *prio_tc);
-
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type,
- u8 *prio_tc);
-
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type,
+ u8 *prio_tc);
+
+int ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type,
+ u8 *prio_tc);
+
+int ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type,
u8 *prio_tc);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9a63457712c7..6e6e6f1847b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -349,6 +349,8 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
case ixgbe_sfp_type_1g_sx_core1:
case ixgbe_sfp_type_1g_lx_core0:
case ixgbe_sfp_type_1g_lx_core1:
+ case ixgbe_sfp_type_1g_bx_core0:
+ case ixgbe_sfp_type_1g_bx_core1:
ethtool_link_ksettings_add_link_mode(cmd, supported,
FIBRE);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
@@ -459,7 +461,7 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
- s32 err = 0;
+ int err = 0;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber)) {
@@ -3326,9 +3328,9 @@ static int ixgbe_get_module_info(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- s32 status;
u8 sff8472_rev, addr_mode;
bool page_swap = false;
+ int status;
if (hw->phy.type == ixgbe_phy_fw)
return -ENXIO;
@@ -3372,7 +3374,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- s32 status = -EFAULT;
+ int status = -EFAULT;
u8 databyte = 0xFF;
int i = 0;
@@ -3403,66 +3405,68 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
static const struct {
ixgbe_link_speed mac_speed;
- u32 supported;
+ u32 link_mode;
} ixgbe_ls_map[] = {
- { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
- { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
- { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
- { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
- { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
+ { IXGBE_LINK_SPEED_10_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_100_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_1GB_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_2_5GB_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT },
+ { IXGBE_LINK_SPEED_10GB_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
};
static const struct {
u32 lp_advertised;
- u32 mac_speed;
+ u32 link_mode;
} ixgbe_lp_map[] = {
- { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
- { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
- { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
- { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
- { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
- { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
+ { FW_PHY_ACT_UD_2_100M_TX_EEE, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_1G_T_EEE, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_T_EEE, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_1G_KX_EEE, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_KX4_EEE, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_KR_EEE, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
};
static int
-ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
+ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
struct ixgbe_hw *hw = &adapter->hw;
- s32 rc;
+ int rc;
u16 i;
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
if (rc)
return rc;
- edata->lp_advertised = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
if (info[0] & ixgbe_lp_map[i].lp_advertised)
- edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->lp_advertised);
}
- edata->supported = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
- edata->supported |= ixgbe_ls_map[i].supported;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->supported);
}
- edata->advertised = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
- edata->advertised |= ixgbe_ls_map[i].supported;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->advertised);
}
- edata->eee_enabled = !!edata->advertised;
+ edata->eee_enabled = !linkmode_empty(edata->advertised);
edata->tx_lpi_enabled = edata->eee_enabled;
- if (edata->advertised & edata->lp_advertised)
- edata->eee_active = true;
+
+ linkmode_and(common, edata->advertised, edata->lp_advertised);
+ edata->eee_active = !linkmode_empty(common);
return 0;
}
-static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3476,17 +3480,17 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
-static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- struct ethtool_eee eee_data;
- s32 ret_val;
+ struct ethtool_keee eee_data;
+ int ret_val;
if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
return -EOPNOTSUPP;
- memset(&eee_data, 0, sizeof(struct ethtool_eee));
+ memset(&eee_data, 0, sizeof(struct ethtool_keee));
ret_val = ixgbe_get_eee(netdev, &eee_data);
if (ret_val)
@@ -3504,7 +3508,7 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EINVAL;
}
- if (eee_data.advertised != edata->advertised) {
+ if (!linkmode_equal(eee_data.advertised, edata->advertised)) {
e_err(drv,
"Setting EEE advertised speeds is not supported\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bd541527c8c7..f985252c8c8d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -153,7 +153,7 @@ MODULE_PARM_DESC(max_vfs,
#endif /* CONFIG_PCI_IOV */
static bool allow_unsupported_sfp;
-module_param(allow_unsupported_sfp, bool, 0);
+module_param(allow_unsupported_sfp, bool, 0444);
MODULE_PARM_DESC(allow_unsupported_sfp,
"Allow unsupported and untested SFP+ modules on 82599-based adapters");
@@ -205,7 +205,7 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
return 0;
}
-static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
+static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 link_status = 0;
@@ -1106,6 +1106,44 @@ static int ixgbe_tx_maxrate(struct net_device *netdev,
}
/**
+ * ixgbe_update_tx_ring_stats - Update Tx ring specific counters
+ * @tx_ring: ring to update
+ * @q_vector: queue vector ring belongs to
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ixgbe_update_tx_ring_stats(struct ixgbe_ring *tx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes)
+{
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += bytes;
+ tx_ring->stats.packets += pkts;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += bytes;
+ q_vector->tx.total_packets += pkts;
+}
+
+/**
+ * ixgbe_update_rx_ring_stats - Update Rx ring specific counters
+ * @rx_ring: ring to update
+ * @q_vector: queue vector ring belongs to
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes)
+{
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.bytes += bytes;
+ rx_ring->stats.packets += pkts;
+ u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_bytes += bytes;
+ q_vector->rx.total_packets += pkts;
+}
+
+/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
@@ -1207,12 +1245,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
i += tx_ring->count;
tx_ring->next_to_clean = i;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.bytes += total_bytes;
- tx_ring->stats.packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
+ ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets,
+ total_bytes);
adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
@@ -2429,12 +2463,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
ixgbe_xdp_ring_update_tail_locked(ring);
}
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- q_vector->rx.total_packets += total_rx_packets;
- q_vector->rx.total_bytes += total_rx_bytes;
+ ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets,
+ total_rx_bytes);
return total_rx_packets;
}
@@ -2939,8 +2969,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
- u32 mask;
struct ixgbe_hw *hw = &adapter->hw;
+ u32 mask;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -7809,7 +7839,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- s32 err;
+ int err;
/* not searching for SFP so there is nothing to do here */
if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
@@ -10205,7 +10235,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -10525,6 +10555,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
}
/**
+ * ixgbe_irq_disable_single - Disable single IRQ vector
+ * @adapter: adapter structure
+ * @ring: ring index
+ **/
+static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 qmask = BIT_ULL(ring);
+ u32 mask;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = qmask & IXGBE_EIMC_RTX_QUEUE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ mask = (qmask & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ mask = (qmask >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ synchronize_irq(adapter->msix_entries[ring].vector);
+ else
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/**
* ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
* @adapter: adapter structure
* @ring: ring index
@@ -10540,6 +10608,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
+ ixgbe_irq_disable_single(adapter, ring);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
ixgbe_disable_txr(adapter, tx_ring);
if (xdp_ring)
ixgbe_disable_txr(adapter, xdp_ring);
@@ -10548,9 +10621,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
if (xdp_ring)
synchronize_rcu();
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_disable(&rx_ring->q_vector->napi);
-
ixgbe_clean_tx_ring(tx_ring);
if (xdp_ring)
ixgbe_clean_tx_ring(xdp_ring);
@@ -10578,9 +10648,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_enable(&rx_ring->q_vector->napi);
-
ixgbe_configure_tx_ring(adapter, tx_ring);
if (xdp_ring)
ixgbe_configure_tx_ring(adapter, xdp_ring);
@@ -10589,6 +10656,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
if (xdp_ring)
clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+ ixgbe_irq_enable_queues(adapter, BIT_ULL(ring));
+ IXGBE_WRITE_FLUSH(&adapter->hw);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index fe7ef5773369..d67d77e5dacc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -15,7 +15,7 @@
*
* returns SUCCESS if it successfully read message from buffer
**/
-s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+int ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -38,7 +38,7 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
*
* returns SUCCESS if it successfully copied message into the buffer
**/
-s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+int ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -58,7 +58,7 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -75,7 +75,7 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -92,7 +92,7 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -109,7 +109,7 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if it successfully received a message notification
**/
-static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+static int ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
@@ -134,7 +134,7 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if it successfully received a message acknowledgement
**/
-static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+static int ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
@@ -162,11 +162,11 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
-static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val;
+ int ret_val;
if (!mbx->ops)
return -EIO;
@@ -189,11 +189,11 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
-static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+static int ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val;
+ int ret_val;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops || !mbx->timeout)
@@ -208,7 +208,7 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
return ixgbe_poll_for_ack(hw, mbx_id);
}
-static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+static int ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
@@ -227,9 +227,9 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
{
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ int index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
@@ -248,9 +248,9 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
{
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ int index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
@@ -269,7 +269,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 reg_offset = (vf_number < 32) ? 0 : 1;
u32 vf_shift = vf_number % 32;
@@ -305,7 +305,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* return SUCCESS if we obtained the mailbox lock
**/
-static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 p2v_mailbox;
@@ -329,10 +329,10 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if it successfully copied message into the buffer
**/
-static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
- s32 ret_val;
+ int ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
@@ -368,10 +368,10 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
-static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
- s32 ret_val;
+ int ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 6434c190e7a4..bd205306934b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -96,11 +96,11 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
-s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+int ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+int ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+int ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+int ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+int ixgbe_check_for_rst(struct ixgbe_hw *, u16);
#ifdef CONFIG_PCI_IOV
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* CONFIG_PCI_IOV */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index f28140a05f09..07eaa3c3f4d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -11,19 +11,19 @@
static void ixgbe_i2c_start(struct ixgbe_hw *hw);
static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
+static int ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
+static int ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
+static int ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
+static int ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
+static int ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
+static int ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
-static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
+static int ixgbe_get_phy_id(struct ixgbe_hw *hw);
+static int ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
/**
* ixgbe_out_i2c_byte_ack - Send I2C byte with ack
@@ -32,9 +32,9 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
*
* Returns an error code on error.
**/
-static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
+static int ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
{
- s32 status;
+ int status;
status = ixgbe_clock_out_i2c_byte(hw, byte);
if (status)
@@ -49,9 +49,9 @@ static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
*
* Returns an error code on error.
**/
-static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
+static int ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
{
- s32 status;
+ int status;
status = ixgbe_clock_in_i2c_byte(hw, byte);
if (status)
@@ -85,7 +85,7 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
*
* Returns an error code on error.
*/
-s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+int ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -163,7 +163,7 @@ fail:
*
* Returns an error code on error.
*/
-s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -260,7 +260,7 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
*
* Determines the physical layer module found on the current adapter.
**/
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
{
u32 status = -EFAULT;
u32 phy_addr;
@@ -332,11 +332,11 @@ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
**/
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_id(struct ixgbe_hw *hw)
{
- s32 status;
u16 phy_id_high = 0;
u16 phy_id_low = 0;
+ int status;
status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
&phy_id_high);
@@ -394,11 +394,11 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
* ixgbe_reset_phy_generic - Performs a PHY reset
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+int ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
{
u32 i;
u16 ctrl = 0;
- s32 status = 0;
+ int status = 0;
if (hw->phy.type == ixgbe_phy_unknown)
status = ixgbe_identify_phy_generic(hw);
@@ -470,8 +470,8 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
*
* Reads a value from a specified PHY register without the SWFW lock
**/
-s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 *phy_data)
+int ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
{
u32 i, data, command;
@@ -546,11 +546,11 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
- s32 status;
u32 gssr = hw->phy.phy_semaphore_mask;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
@@ -571,8 +571,8 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 5 bit device type
* @phy_data: Data to write to the PHY register
**/
-s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data)
+int ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data)
{
u32 i, command;
@@ -644,11 +644,11 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 5 bit device type
* @phy_data: Data to write to the PHY register
**/
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
- s32 status;
u32 gssr = hw->phy.phy_semaphore_mask;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
@@ -668,7 +668,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
* @hw: pointer to hardware structure
* @cmd: command register value to write
**/
-static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
+static int ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
{
IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd);
@@ -684,11 +684,11 @@ static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
* @regnum: register number
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr,
int regnum, u32 gssr)
{
u32 hwaddr, cmd;
- s32 data;
+ int data;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -718,11 +718,11 @@ mii_bus_read_done:
* @regnum: register number
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr,
int devad, int regnum, u32 gssr)
{
u32 hwaddr, cmd;
- s32 data;
+ int data;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -756,11 +756,11 @@ mii_bus_read_done:
* @val: value to write
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
int regnum, u16 val, u32 gssr)
{
u32 hwaddr, cmd;
- s32 err;
+ int err;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -787,12 +787,12 @@ static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
* @val: value to write
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr,
int devad, int regnum, u16 val,
u32 gssr)
{
u32 hwaddr, cmd;
- s32 err;
+ int err;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -821,7 +821,7 @@ mii_bus_write_done:
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
+static int ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
@@ -837,7 +837,7 @@ static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
+static int ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -854,7 +854,7 @@ static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
+static int ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -872,7 +872,7 @@ static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
+static int ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -889,7 +889,7 @@ static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -907,7 +907,7 @@ static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
* @devad: device address to read
* @regnum: register number
**/
-static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
int devad, int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -925,7 +925,7 @@ static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -944,7 +944,7 @@ static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr,
int devad, int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -1023,13 +1023,13 @@ out:
*
* ixgbe_mii_bus_init initializes a mii_bus structure in adapter
**/
-s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
+int ixgbe_mii_bus_init(struct ixgbe_hw *hw)
{
- s32 (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val);
- s32 (*read_c22)(struct mii_bus *bus, int addr, int regnum);
- s32 (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum,
+ int (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ int (*read_c22)(struct mii_bus *bus, int addr, int regnum);
+ int (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum,
u16 val);
- s32 (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);
+ int (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);
struct ixgbe_adapter *adapter = hw->back;
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
@@ -1095,12 +1095,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
*
* Restart autonegotiation and PHY and waits for completion.
**/
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
+int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
{
- s32 status = 0;
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
- bool autoneg = false;
ixgbe_link_speed speed;
+ bool autoneg = false;
+ int status = 0;
ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
@@ -1173,7 +1173,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* @speed: new link speed
* @autoneg_wait_to_complete: unused
**/
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+int ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
@@ -1214,10 +1214,10 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
* Determines the supported link capabilities by reading the PHY auto
* negotiation register.
*/
-static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
+static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
{
u16 speed_ability;
- s32 status;
+ int status;
status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
&speed_ability);
@@ -1253,11 +1253,11 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
* @speed: pointer to link speed
* @autoneg: boolean auto-negotiation value
*/
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+int ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
- s32 status = 0;
+ int status = 0;
*autoneg = true;
if (!hw->phy.speeds_supported)
@@ -1276,15 +1276,15 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
* Reads the VS1 register to determine if link is up and the current speed for
* the PHY.
**/
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up)
{
- s32 status;
- u32 time_out;
u32 max_time_out = 10;
- u16 phy_link = 0;
u16 phy_speed = 0;
+ u16 phy_link = 0;
u16 phy_data = 0;
+ u32 time_out;
+ int status;
/* Initialize speed and link to default case */
*link_up = false;
@@ -1326,7 +1326,7 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* it is called via a function pointer that could call other
* functions that could return an error.
**/
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
+int ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
{
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
bool autoneg = false;
@@ -1399,13 +1399,13 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
* ixgbe_reset_phy_nl - Performs a PHY reset
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+int ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
{
u16 phy_offset, control, eword, edata, block_crc;
- bool end_data = false;
u16 list_offset, data_offset;
+ bool end_data = false;
u16 phy_data = 0;
- s32 ret_val;
+ int ret_val;
u32 i;
/* Blocked by MNG FW so bail */
@@ -1506,7 +1506,7 @@ err_eeprom:
*
* Determines HW type and calls appropriate function.
**/
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_module_generic(struct ixgbe_hw *hw)
{
switch (hw->mac.ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
@@ -1527,19 +1527,20 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
*
* Searches for and identifies the SFP module and assigns appropriate PHY type.
**/
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
{
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
struct ixgbe_adapter *adapter = hw->back;
- s32 status;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u8 bitrate_nominal = 0;
+ u8 comp_codes_10g = 0;
+ u8 comp_codes_1g = 0;
+ u16 enforce_sfp = 0;
u32 vendor_oui = 0;
- enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
u8 identifier = 0;
- u8 comp_codes_1g = 0;
- u8 comp_codes_10g = 0;
- u8 oui_bytes[3] = {0, 0, 0};
u8 cable_tech = 0;
u8 cable_spec = 0;
- u16 enforce_sfp = 0;
+ int status;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
@@ -1576,7 +1577,12 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
+ if (status)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_BITRATE_NOMINAL,
+ &bitrate_nominal);
if (status)
goto err_read_i2c_eeprom;
@@ -1659,6 +1665,18 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_lx_core1;
+ /* Support only Ethernet 1000BASE-BX10, checking the Bit Rate
+ * Nominal Value as per SFF-8472 by convention 1.25 Gb/s should
+ * be rounded up to 0Dh (13 in units of 100 MBd) for 1000BASE-BX
+ */
+ } else if ((comp_codes_1g & IXGBE_SFF_BASEBX10_CAPABLE) &&
+ (bitrate_nominal == 0xD)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1747,7 +1765,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
return -EOPNOTSUPP;
}
@@ -1763,7 +1783,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel)
return 0;
@@ -1792,10 +1814,10 @@ err_read_i2c_eeprom:
*
* Searches for and identifies the QSFP module and assigns appropriate PHY type
**/
-static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+static int ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
- s32 status;
+ int status;
u32 vendor_oui = 0;
enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
u8 identifier = 0;
@@ -1975,7 +1997,7 @@ err_read_i2c_eeprom:
* Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
* so it returns the offsets to the phy init sequence block.
**/
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset)
{
@@ -1999,12 +2021,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core0)
+ sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core1)
+ sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
@@ -2065,7 +2089,7 @@ err_phy:
*
* Performs byte read operation to SFP module's EEPROM over I2C interface.
**/
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
@@ -2081,7 +2105,7 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs byte read operation to SFP module's SFF-8472 data over I2C
**/
-s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
@@ -2097,7 +2121,7 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs byte write operation to SFP module's EEPROM over I2C interface.
**/
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data)
{
return hw->phy.ops.write_i2c_byte(hw, byte_offset,
@@ -2131,14 +2155,14 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data, bool lock)
{
- s32 status;
- u32 max_retry = 10;
- u32 retry = 0;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 max_retry = 10;
bool nack = true;
+ u32 retry = 0;
+ int status;
if (hw->mac.type >= ixgbe_mac_X550)
max_retry = 3;
@@ -2221,7 +2245,7 @@ fail:
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2238,7 +2262,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2256,13 +2280,13 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data, bool lock)
{
- s32 status;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
u32 max_retry = 1;
u32 retry = 0;
- u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ int status;
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return -EBUSY;
@@ -2324,7 +2348,7 @@ fail:
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2341,7 +2365,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2422,10 +2446,10 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
*
* Clocks in one byte data via I2C data/clock
**/
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
+static int ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
{
- s32 i;
bool bit = false;
+ int i;
*data = 0;
for (i = 7; i >= 0; i--) {
@@ -2443,12 +2467,12 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
*
* Clocks out one byte data via I2C data/clock
**/
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
+static int ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
{
- s32 status;
- s32 i;
- u32 i2cctl;
bool bit = false;
+ int status;
+ u32 i2cctl;
+ int i;
for (i = 7; i >= 0; i--) {
bit = (data >> i) & 0x1;
@@ -2474,14 +2498,14 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
*
* Clocks in/out one bit via I2C data/clock
**/
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
+static int ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
- u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
- s32 status = 0;
- u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
u32 timeout = 10;
bool ack = true;
+ int status = 0;
+ u32 i = 0;
if (data_oe_bit) {
i2cctl |= IXGBE_I2C_DATA_OUT(hw);
@@ -2525,7 +2549,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
*
* Clocks in one bit via I2C data/clock
**/
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
+static int ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
@@ -2559,10 +2583,10 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
*
* Clocks out one bit via I2C data/clock
**/
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
+static int ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
{
- s32 status;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ int status;
status = ixgbe_set_i2c_data(hw, &i2cctl, data);
if (status == 0) {
@@ -2647,7 +2671,7 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
* Sets the I2C data bit
* Asserts the I2C data output enable on X550 hardware.
**/
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
+static int ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
@@ -2769,7 +2793,7 @@ bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @on: true for on, false for off
**/
-s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
+int ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
{
u32 status;
u16 reg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index ef72729d7c93..14aa2ca51f70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -17,6 +17,7 @@
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_BITRATE_NOMINAL 0xC
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
#define IXGBE_SFF_SFF_8472_SWAP 0x5C
#define IXGBE_SFF_SFF_8472_COMP 0x5E
@@ -39,6 +40,7 @@
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_BASEBX10_CAPABLE 0x64
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
@@ -121,57 +123,57 @@
/* SFP+ SFF-8472 Compliance code */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
-s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw);
+int ixgbe_mii_bus_init(struct ixgbe_hw *hw);
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
+int ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+int ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
-s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
-s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+int ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
+int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
-s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+int ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+int ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
+int ixgbe_identify_module_generic(struct ixgbe_hw *hw);
+int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
-s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
-s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+int ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
u16 *val, bool lock);
-s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
u16 val, bool lock);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 7299a830f6e4..fcfd0a075eee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -492,7 +492,7 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf
struct net_device *dev = adapter->netdev;
int pf_max_frame = dev->mtu + ETH_HLEN;
u32 reg_offset, vf_shift, vfre;
- s32 err = 0;
+ int err = 0;
#ifdef CONFIG_FCOE
if (dev->features & NETIF_F_FCOE_MTU)
@@ -775,7 +775,7 @@ static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
- s32 retval;
+ int retval;
ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
@@ -1254,7 +1254,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
struct ixgbe_hw *hw = &adapter->hw;
- s32 retval;
+ int retval;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
@@ -1418,7 +1418,7 @@ void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- s32 retval;
+ int retval;
if (vf >= adapter->num_vfs)
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index f1f69ce67420..78deea5ec536 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -46,4 +46,11 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
int ixgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring);
+void ixgbe_update_tx_ring_stats(struct ixgbe_ring *tx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes);
+void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes);
+
#endif /* #define _IXGBE_TXRX_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 61b9774b3d31..ed440dd0c4f9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3210,6 +3210,9 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_1g_lx_core0 = 13,
ixgbe_sfp_type_1g_lx_core1 = 14,
+ ixgbe_sfp_type_1g_bx_core0 = 15,
+ ixgbe_sfp_type_1g_bx_core1 = 16,
+
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -3393,50 +3396,50 @@ struct ixgbe_hw;
/* Function pointer table */
struct ixgbe_eeprom_operations {
- s32 (*init_params)(struct ixgbe_hw *);
- s32 (*read)(struct ixgbe_hw *, u16, u16 *);
- s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
- s32 (*write)(struct ixgbe_hw *, u16, u16);
- s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
- s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
- s32 (*update_checksum)(struct ixgbe_hw *);
- s32 (*calc_checksum)(struct ixgbe_hw *);
+ int (*init_params)(struct ixgbe_hw *);
+ int (*read)(struct ixgbe_hw *, u16, u16 *);
+ int (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ int (*write)(struct ixgbe_hw *, u16, u16);
+ int (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ int (*validate_checksum)(struct ixgbe_hw *, u16 *);
+ int (*update_checksum)(struct ixgbe_hw *);
+ int (*calc_checksum)(struct ixgbe_hw *);
};
struct ixgbe_mac_operations {
- s32 (*init_hw)(struct ixgbe_hw *);
- s32 (*reset_hw)(struct ixgbe_hw *);
- s32 (*start_hw)(struct ixgbe_hw *);
- s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ int (*init_hw)(struct ixgbe_hw *);
+ int (*reset_hw)(struct ixgbe_hw *);
+ int (*start_hw)(struct ixgbe_hw *);
+ int (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
- s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
- s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
- s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
- s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
- s32 (*stop_adapter)(struct ixgbe_hw *);
- s32 (*get_bus_info)(struct ixgbe_hw *);
+ int (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ int (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+ int (*get_device_caps)(struct ixgbe_hw *, u16 *);
+ int (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
+ int (*stop_adapter)(struct ixgbe_hw *);
+ int (*get_bus_info)(struct ixgbe_hw *);
void (*set_lan_id)(struct ixgbe_hw *);
- s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
- s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
- s32 (*setup_sfp)(struct ixgbe_hw *);
- s32 (*disable_rx_buff)(struct ixgbe_hw *);
- s32 (*enable_rx_buff)(struct ixgbe_hw *);
- s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
- s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
+ int (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
+ int (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
+ int (*setup_sfp)(struct ixgbe_hw *);
+ int (*disable_rx_buff)(struct ixgbe_hw *);
+ int (*enable_rx_buff)(struct ixgbe_hw *);
+ int (*enable_rx_dma)(struct ixgbe_hw *, u32);
+ int (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
void (*release_swfw_sync)(struct ixgbe_hw *, u32);
void (*init_swfw_sync)(struct ixgbe_hw *);
- s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
- s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
+ int (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+ int (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
/* Link */
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
void (*stop_link_on_d3)(struct ixgbe_hw *);
- s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
- s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ int (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ int (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed);
@@ -3444,38 +3447,38 @@ struct ixgbe_mac_operations {
void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
/* LED */
- s32 (*led_on)(struct ixgbe_hw *, u32);
- s32 (*led_off)(struct ixgbe_hw *, u32);
- s32 (*blink_led_start)(struct ixgbe_hw *, u32);
- s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
- s32 (*init_led_link_act)(struct ixgbe_hw *);
+ int (*led_on)(struct ixgbe_hw *, u32);
+ int (*led_off)(struct ixgbe_hw *, u32);
+ int (*blink_led_start)(struct ixgbe_hw *, u32);
+ int (*blink_led_stop)(struct ixgbe_hw *, u32);
+ int (*init_led_link_act)(struct ixgbe_hw *);
/* RAR, Multicast, VLAN */
- s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
- s32 (*clear_rar)(struct ixgbe_hw *, u32);
- s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
- s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
- s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
- s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
- s32 (*enable_mc)(struct ixgbe_hw *);
- s32 (*disable_mc)(struct ixgbe_hw *);
- s32 (*clear_vfta)(struct ixgbe_hw *);
- s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
- s32 (*init_uta_tables)(struct ixgbe_hw *);
+ int (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+ int (*clear_rar)(struct ixgbe_hw *, u32);
+ int (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ int (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
+ int (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
+ int (*init_rx_addrs)(struct ixgbe_hw *);
+ int (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
+ int (*enable_mc)(struct ixgbe_hw *);
+ int (*disable_mc)(struct ixgbe_hw *);
+ int (*clear_vfta)(struct ixgbe_hw *);
+ int (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
+ int (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* Flow Control */
- s32 (*fc_enable)(struct ixgbe_hw *);
- s32 (*setup_fc)(struct ixgbe_hw *);
+ int (*fc_enable)(struct ixgbe_hw *);
+ int (*setup_fc)(struct ixgbe_hw *);
void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
+ int (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
const char *);
- s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
- s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+ int (*get_thermal_sensor_data)(struct ixgbe_hw *);
+ int (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
@@ -3484,47 +3487,47 @@ struct ixgbe_mac_operations {
void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* DMA Coalescing */
- s32 (*dmac_config)(struct ixgbe_hw *hw);
- s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
- s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
- s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
- s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+ int (*dmac_config)(struct ixgbe_hw *hw);
+ int (*dmac_update_tcs)(struct ixgbe_hw *hw);
+ int (*dmac_config_tcs)(struct ixgbe_hw *hw);
+ int (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+ int (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
};
struct ixgbe_phy_operations {
- s32 (*identify)(struct ixgbe_hw *);
- s32 (*identify_sfp)(struct ixgbe_hw *);
- s32 (*init)(struct ixgbe_hw *);
- s32 (*reset)(struct ixgbe_hw *);
- s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
- s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
- s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
- s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
- s32 (*setup_link)(struct ixgbe_hw *);
- s32 (*setup_internal_link)(struct ixgbe_hw *);
- s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
- s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
- s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
- s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
- s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
- s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ int (*identify)(struct ixgbe_hw *);
+ int (*identify_sfp)(struct ixgbe_hw *);
+ int (*init)(struct ixgbe_hw *);
+ int (*reset)(struct ixgbe_hw *);
+ int (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
+ int (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ int (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
+ int (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
+ int (*setup_link)(struct ixgbe_hw *);
+ int (*setup_internal_link)(struct ixgbe_hw *);
+ int (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
+ int (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
+ int (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ int (*read_i2c_sff8472)(struct ixgbe_hw *, u8, u8 *);
+ int (*read_i2c_eeprom)(struct ixgbe_hw *, u8, u8 *);
+ int (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
bool (*check_overtemp)(struct ixgbe_hw *);
- s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
- s32 (*enter_lplu)(struct ixgbe_hw *);
- s32 (*handle_lasi)(struct ixgbe_hw *hw, bool *);
- s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ int (*set_phy_power)(struct ixgbe_hw *, bool on);
+ int (*enter_lplu)(struct ixgbe_hw *);
+ int (*handle_lasi)(struct ixgbe_hw *hw, bool *);
+ int (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
u8 *value);
- s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ int (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
u8 value);
};
struct ixgbe_link_operations {
- s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
- s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ int (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
+ int (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
u16 *val);
- s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
- s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ int (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
+ int (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
u16 val);
};
@@ -3602,14 +3605,14 @@ struct ixgbe_phy_info {
#include "ixgbe_mbx.h"
struct ixgbe_mbx_operations {
- s32 (*init_params)(struct ixgbe_hw *hw);
- s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*check_for_msg)(struct ixgbe_hw *, u16);
- s32 (*check_for_ack)(struct ixgbe_hw *, u16);
- s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+ int (*init_params)(struct ixgbe_hw *hw);
+ int (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*check_for_msg)(struct ixgbe_hw *, u16);
+ int (*check_for_ack)(struct ixgbe_hw *, u16);
+ int (*check_for_rst)(struct ixgbe_hw *, u16);
};
struct ixgbe_mbx_stats {
@@ -3656,7 +3659,7 @@ struct ixgbe_hw {
struct ixgbe_info {
enum ixgbe_mac_type mac;
- s32 (*get_invariants)(struct ixgbe_hw *);
+ int (*get_invariants)(struct ixgbe_hw *);
const struct ixgbe_mac_operations *mac_ops;
const struct ixgbe_eeprom_operations *eeprom_ops;
const struct ixgbe_phy_operations *phy_ops;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 57a912e4653f..f1ffa398f6df 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -16,9 +16,9 @@
#define IXGBE_X540_VFT_TBL_SIZE 128
#define IXGBE_X540_RX_PB_SIZE 384
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static int ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static int ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static int ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
@@ -26,7 +26,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
return ixgbe_media_type_copper;
}
-s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+int ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -51,7 +51,7 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
* @speed: new link speed
* @autoneg_wait_to_complete: true when waiting for completion is needed
**/
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
return hw->phy.ops.setup_link_speed(hw, speed,
@@ -66,11 +66,11 @@ s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+int ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
- s32 status;
- u32 ctrl, i;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ int status;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -166,9 +166,9 @@ mac_reset_top:
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+int ixgbe_start_hw_X540(struct ixgbe_hw *hw)
{
- s32 ret_val;
+ int ret_val;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -184,7 +184,7 @@ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -215,9 +215,9 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static int ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -237,10 +237,10 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
*
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
-static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+static int ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -259,9 +259,9 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
*
* Write a 16 bit word to the EEPROM using the EEWR register.
**/
-static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+static int ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -281,10 +281,10 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
-static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+static int ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -303,7 +303,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
*
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+static int ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -368,7 +368,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/**
@@ -379,12 +379,12 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+static int ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -439,10 +439,10 @@ out:
* checksum and updates the EEPROM and instructs the hardware to update
* the flash.
**/
-static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+static int ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -484,10 +484,10 @@ out:
* Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
* EEPROM from shadow RAM to the flash device.
**/
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+static int ixgbe_update_flash_X540(struct ixgbe_hw *hw)
{
+ int status;
u32 flup;
- s32 status;
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == -EIO) {
@@ -529,7 +529,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
* Polls the FLUDONE (bit 26) of the EEC Register to determine when the
* flash update is done.
**/
-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+static int ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
{
u32 i;
u32 reg;
@@ -551,7 +551,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore thought the SW_FW_SYNC register for
* the specified function (CSR, PHY0, PHY1, NVM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
@@ -660,7 +660,7 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
*
* Sets the hardware semaphores so SW/FW can gain control of shared resources
*/
-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+static int ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
{
u32 timeout = 2000;
u32 i;
@@ -760,7 +760,7 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
* Devices that implement the version 2 interface:
* X540
**/
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
@@ -798,7 +798,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
* Devices that implement the version 2 interface:
* X540
**/
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index e246c0d2a427..b69a680d3ab5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -3,17 +3,17 @@
#include "ixgbe_type.h"
-s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+int ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+int ixgbe_start_hw_X540(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+int ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index c1adc94a5a65..2decb0710b6e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -6,13 +6,13 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
-static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
+static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
+static int ixgbe_setup_fc_x550em(struct ixgbe_hw *);
static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *);
static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *);
-static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *);
+static int ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *);
-static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -29,7 +29,7 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -41,7 +41,7 @@ static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -55,7 +55,7 @@ static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -91,7 +91,7 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
*
* Returns status code
*/
-static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
+static int ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
{
return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
}
@@ -104,7 +104,7 @@ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
*
* Returns status code
*/
-static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
+static int ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
{
return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
}
@@ -117,9 +117,9 @@ static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
*
* Returns status code
*/
-static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
+static int ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
{
- s32 status;
+ int status;
status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value);
if (status)
@@ -135,9 +135,9 @@ static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
*
* Returns status code
*/
-static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
+static int ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
{
- s32 status;
+ int status;
status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE,
value);
@@ -153,9 +153,9 @@ static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
* This function assumes that the caller has acquired the proper semaphore.
* Returns error code
*/
-static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
+static int ixgbe_reset_cs4227(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
u32 retry;
u16 value;
u8 reg;
@@ -225,7 +225,7 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- s32 status;
+ int status;
u16 value;
u8 retry;
@@ -292,7 +292,7 @@ out:
*
* Returns error code
*/
-static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
@@ -347,13 +347,13 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
return -EOPNOTSUPP;
}
-static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
return -EOPNOTSUPP;
@@ -368,7 +368,7 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+static int ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -383,7 +383,7 @@ static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32
+static int
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
@@ -399,7 +399,7 @@ ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+static int ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -414,7 +414,7 @@ static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
*
* Returns an error code on error.
**/
-static s32
+static int
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
@@ -427,7 +427,7 @@ ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
* @activity: activity to perform
* @data: Pointer to 4 32-bit words of data
*/
-s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+int ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
u32 (*data)[FW_PHY_ACT_DATA_COUNT])
{
union {
@@ -435,7 +435,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
struct ixgbe_hic_phy_activity_resp rsp;
} hic;
u16 retries = FW_PHY_ACT_RETRIES;
- s32 rc;
+ int rc;
u32 i;
do {
@@ -484,12 +484,12 @@ static const struct {
*
* Returns error code
*/
-static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
{
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
u16 phy_speeds;
u16 phy_id_lo;
- s32 rc;
+ int rc;
u16 i;
if (hw->phy.id)
@@ -526,7 +526,7 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
*
* Returns error code
*/
-static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
{
if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
@@ -545,7 +545,7 @@ static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
*
* Returns error code
*/
-static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
+static int ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
{
u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
@@ -557,10 +557,10 @@ static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
* ixgbe_setup_fw_link - Setup firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+static int ixgbe_setup_fw_link(struct ixgbe_hw *hw)
{
u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
u16 i;
if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
@@ -613,7 +613,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
*
* Called at init time to set up flow control.
*/
-static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
{
if (hw->fc.requested_mode == ixgbe_fc_default)
hw->fc.requested_mode = ixgbe_fc_full;
@@ -627,7 +627,7 @@ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+static int ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -659,7 +659,7 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
*
* Note: ctrl can be NULL if the IOSF control register value is not needed
*/
-static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+static int ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
{
u32 i, command;
@@ -690,12 +690,12 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
* @device_type: 3 bit device type
* @phy_data: Pointer to read data from the register
**/
-static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data)
{
u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
u32 command, error;
- s32 ret;
+ int ret;
ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
if (ret)
@@ -732,10 +732,10 @@ out:
* ixgbe_get_phy_token - Get the token for shared PHY access
* @hw: Pointer to hardware structure
*/
-static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_token(struct ixgbe_hw *hw)
{
struct ixgbe_hic_phy_token_req token_cmd;
- s32 status;
+ int status;
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
@@ -761,10 +761,10 @@ static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
* ixgbe_put_phy_token - Put the token for shared PHY access
* @hw: Pointer to hardware structure
*/
-static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
+static int ixgbe_put_phy_token(struct ixgbe_hw *hw)
{
struct ixgbe_hic_phy_token_req token_cmd;
- s32 status;
+ int status;
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
@@ -790,7 +790,7 @@ static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
* @device_type: 3 bit device type
* @data: Data to write to the register
**/
-static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
__always_unused u32 device_type,
u32 data)
{
@@ -816,7 +816,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 3 bit device type
* @data: Pointer to read data from the register
**/
-static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
__always_unused u32 device_type,
u32 *data)
{
@@ -824,7 +824,7 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
struct ixgbe_hic_internal_phy_req cmd;
struct ixgbe_hic_internal_phy_resp rsp;
} hic;
- s32 status;
+ int status;
memset(&hic, 0, sizeof(hic));
hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
@@ -851,14 +851,14 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
*
* Reads a 16 bit word(s) from the EEPROM using the hostif.
**/
-static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+static int ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
u32 current_word = 0;
u16 words_to_read;
- s32 status;
+ int status;
u32 i;
/* Take semaphore for the entire operation. */
@@ -923,14 +923,14 @@ out:
*
* Returns error status for any failure
**/
-static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+static int ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
u16 size, u16 *csum, u16 *buffer,
u32 buffer_size)
{
- u16 buf[256];
- s32 status;
u16 length, bufsz, i, start;
u16 *local_buffer;
+ u16 buf[256];
+ int status;
bufsz = ARRAY_SIZE(buf);
@@ -991,14 +991,14 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
*
* Returns a negative error code on error, or the 16-bit checksum
**/
-static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
+static int ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
u32 buffer_size)
{
u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
+ u16 pointer, i, size;
u16 *local_buffer;
- s32 status;
u16 checksum = 0;
- u16 pointer, i, size;
+ int status;
hw->eeprom.ops.init_params(hw);
@@ -1060,7 +1060,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
@@ -1068,7 +1068,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
*
* Returns a negative error code on error, or the 16-bit checksum
**/
-static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static int ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
{
return ixgbe_calc_checksum_X550(hw, NULL, 0);
}
@@ -1080,11 +1080,11 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
*
* Reads a 16 bit word from the EEPROM using the hostif.
**/
-static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static int ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
- s32 status;
+ int status;
buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
buffer.hdr.req.buf_lenh = 0;
@@ -1118,12 +1118,12 @@ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
+static int ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -1168,11 +1168,11 @@ static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
*
* Write a 16 bit word to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
u16 data)
{
- s32 status;
struct ixgbe_hic_write_shadow_ram buffer;
+ int status;
buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
buffer.hdr.req.buf_lenh = 0;
@@ -1196,9 +1196,9 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
*
* Write a 16 bit word to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+static int ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
{
- s32 status = 0;
+ int status = 0;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
@@ -1216,10 +1216,10 @@ static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
**/
-static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+static int ixgbe_update_flash_X550(struct ixgbe_hw *hw)
{
- s32 status = 0;
union ixgbe_hic_hdr2 buffer;
+ int status = 0;
buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
buffer.req.buf_lenh = 0;
@@ -1238,7 +1238,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
* Sets bus link width and speed to unknown because X550em is
* not a PCI device.
**/
-static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
+static int ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
{
hw->bus.type = ixgbe_bus_type_internal;
hw->bus.width = ixgbe_bus_width_unknown;
@@ -1269,9 +1269,9 @@ static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
**/
static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
{
- u32 rxctrl, pfdtxgswc;
- s32 status;
struct ixgbe_hic_disable_rxen fw_cmd;
+ u32 rxctrl, pfdtxgswc;
+ int status;
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
if (rxctrl & IXGBE_RXCTRL_RXEN) {
@@ -1311,10 +1311,10 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
* checksum and updates the EEPROM and instructs the hardware to update
* the flash.
**/
-static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static int ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum = 0;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -1351,11 +1351,11 @@ static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
*
* Write a 16 bit word(s) to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+static int ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words,
u16 *data)
{
- s32 status = 0;
+ int status = 0;
u32 i = 0;
/* Take semaphore for the entire operation. */
@@ -1387,12 +1387,12 @@ static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
* @device_type: 3 bit device type
* @data: Data to write to the register
**/
-static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 data)
{
u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
u32 command, error;
- s32 ret;
+ int ret;
ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
if (ret)
@@ -1430,10 +1430,10 @@ out:
*
* iXfI configuration needed for ixgbe_mac_X550EM_x devices.
**/
-static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
+static int ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
{
- s32 status;
u32 reg_val;
+ int status;
/* Disable training protocol FSM. */
status = ixgbe_read_iosf_sb_reg_x550(hw,
@@ -1502,10 +1502,10 @@ static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
* internal PHY
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
+static int ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
{
- s32 status;
u32 link_ctrl;
+ int status;
/* Restart auto-negotiation. */
status = hw->mac.ops.read_iosf_sb_reg(hw,
@@ -1551,11 +1551,11 @@ static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
* Configures the integrated KR PHY to use iXFI mode. Used to connect an
* internal and external PHY at a specific speed, without autonegotiation.
**/
-static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static int ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
{
struct ixgbe_mac_info *mac = &hw->mac;
- s32 status;
u32 reg_val;
+ int status;
/* iXFI is only supported with X552 */
if (mac->type != ixgbe_mac_X550EM_x)
@@ -1608,7 +1608,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
* @hw: pointer to hardware structure
* @linear: true if SFP module is linear
*/
-static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+static int ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
{
switch (hw->phy.sfp_type) {
case ixgbe_sfp_type_not_present:
@@ -1645,14 +1645,14 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
*
* Configures the extern PHY and the integrated KR PHY for SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
- s32 status;
- u16 reg_slice, reg_val;
bool setup_linear = false;
+ u16 reg_slice, reg_val;
+ int status;
/* Check if SFP module is supported and linear */
status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1691,11 +1691,11 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
* Configures the integrated PHY for native SFI mode. Used to connect the
* internal PHY directly to an SFP cage, without autonegotiation.
**/
-static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static int ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
{
struct ixgbe_mac_info *mac = &hw->mac;
- s32 status;
u32 reg_val;
+ int status;
/* Disable all AN and force speed to 10G Serial. */
status = mac->ops.read_iosf_sb_reg(hw,
@@ -1790,13 +1790,13 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
*
* Configure the integrated PHY for native SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
bool setup_linear = false;
u32 reg_phy_int;
- s32 ret_val;
+ int ret_val;
/* Check if SFP module is supported and linear */
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1839,14 +1839,14 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
*
* Configure the integrated PHY for SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
u32 reg_slice, slice_offset;
bool setup_linear = false;
u16 reg_phy_ext;
- s32 ret_val;
+ int ret_val;
/* Check if SFP module is supported and linear */
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1918,12 +1918,12 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
*
* Returns error status for any failure
**/
-static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait)
{
- s32 status;
ixgbe_link_speed force_speed;
+ int status;
/* Setup internal/external PHY link speed to iXFI (10G), unless
* only 1G is auto advertised then setup KX link.
@@ -1954,7 +1954,7 @@ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
*
* Check that both the MAC and X557 external PHY have link.
**/
-static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
+static int ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up,
bool link_up_wait_to_complete)
@@ -1998,13 +1998,13 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
* @speed: unused
* @autoneg_wait_to_complete: unused
*/
-static s32
+static int
ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val;
- s32 rc;
+ int rc;
rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2071,12 +2071,12 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
* @speed: the link speed to force
* @autoneg_wait: true when waiting for completion is needed
*/
-static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+static int ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val;
- s32 rc;
+ int rc;
rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2148,7 +2148,7 @@ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
{
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
ixgbe_link_speed speed;
- s32 status = -EIO;
+ int status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -2276,10 +2276,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
{
- s32 status;
bool linear;
+ int status;
/* Check if SFP module is supported */
status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
@@ -2297,7 +2297,7 @@ static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
* @speed: pointer to link speed
* @autoneg: true when autoneg or autotry is enabled
**/
-static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -2375,7 +2375,7 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
* Determime if external Base T PHY interrupt cause is high temperature
* failure alarm or link status change.
**/
-static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
+static int ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
bool *is_overtemp)
{
u32 status;
@@ -2463,7 +2463,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
*
* Returns PHY access status
**/
-static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
{
bool lsc, overtemp;
u32 status;
@@ -2555,7 +2555,7 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
* failure alarm then return error, else if link status change
* then setup internal/external PHY link
**/
-static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
+static int ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
bool *is_overtemp)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -2579,11 +2579,11 @@ static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
*
* Configures the integrated KR PHY.
**/
-static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
+static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed)
{
- s32 status;
u32 reg_val;
+ int status;
status = hw->mac.ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2634,7 +2634,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
* ixgbe_setup_kr_x550em - Configure the KR PHY
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
/* leave link alone for 2.5G */
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
@@ -2652,7 +2652,7 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
*
* Returns error code if unable to get link status.
**/
-static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
+static int ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
{
u32 ret;
u16 autoneg_status;
@@ -2686,7 +2686,7 @@ static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
* A return of a non-zero value indicates an error, and the base driver should
* not report link up.
**/
-static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
{
ixgbe_link_speed force_speed;
bool link_up;
@@ -2746,9 +2746,9 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
+static int ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
status = ixgbe_reset_phy_generic(hw);
@@ -2764,7 +2764,7 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @led_idx: led number to turn on
**/
-static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static int ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
@@ -2786,7 +2786,7 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* @hw: pointer to hardware structure
* @led_idx: led number to turn off
**/
-static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
@@ -2819,12 +2819,12 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* semaphore, -EIO when command fails or -ENIVAL when incorrect
* params passed.
**/
-static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, u16 len,
const char *driver_ver)
{
struct ixgbe_hic_drv_info2 fw_cmd;
- s32 ret_val;
+ int ret_val;
int i;
if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
@@ -2866,12 +2866,12 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
*
* Determine lowest common link speed with link partner.
**/
-static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
+static int ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed *lcd_speed)
{
- u16 an_lp_status;
- s32 status;
u16 word = hw->eeprom.ctrl_word_3;
+ u16 an_lp_status;
+ int status;
*lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -2884,28 +2884,28 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
/* If link partner advertised 1G, return 1G */
if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
*lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
- return status;
+ return 0;
}
/* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
(word & NVM_INIT_CTRL_3_D10GMP_PORT0))
- return status;
+ return 0;
/* Link partner not capable of lower speeds, return 10G */
*lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
- return status;
+ return 0;
}
/**
* ixgbe_setup_fc_x550em - Set up flow control
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
{
bool pause, asm_dir;
u32 reg_val;
- s32 rc = 0;
+ int rc = 0;
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
@@ -2990,7 +2990,7 @@ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
{
u32 link_s1, lp_an_page_low, an_cntl_1;
ixgbe_link_speed speed;
- s32 status = -EIO;
+ int status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -3073,13 +3073,13 @@ static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
* (from D0 to non-D0). Link is required to enter LPLU so avoid resetting
* the X557 PHY immediately prior to entering LPLU.
**/
-static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
{
u16 an_10g_cntl_reg, autoneg_reg, speed;
- s32 status;
ixgbe_link_speed lcd_speed;
u32 save_autoneg;
bool link_up;
+ int status;
/* If blocked by MNG FW, then don't restart AN */
if (ixgbe_check_reset_blocked(hw))
@@ -3130,7 +3130,7 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
(lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
(lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
- return status;
+ return 0;
/* Clear AN completed indication */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
@@ -3167,10 +3167,10 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
* ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
+static int ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
{
u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
return 0;
@@ -3196,7 +3196,7 @@ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
static bool ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
{
u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
if (rc)
@@ -3239,10 +3239,10 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
* set during init_shared_code because the PHY/SFP type was
* not known. Perform the SFP init if necessary.
**/
-static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
+ int ret_val;
hw->mac.ops.set_lan_id(hw);
@@ -3367,9 +3367,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
** @hw: pointer to hardware structure
**/
-static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
u16 reg;
status = hw->phy.ops.read_reg(hw,
@@ -3441,14 +3441,14 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
** and clears all interrupts, perform a PHY reset, and perform a link (MAC)
** reset.
**/
-static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
ixgbe_link_speed link_speed;
- s32 status;
+ bool link_up = false;
u32 ctrl = 0;
+ int status;
u32 i;
- bool link_up = false;
- u32 swfw_mask = hw->phy.phy_semaphore_mask;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -3609,10 +3609,10 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
*
* Called at init time to set up flow control.
**/
-static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
+static int ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
{
- s32 status = 0;
u32 an_cntl = 0;
+ int status = 0;
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
@@ -3714,9 +3714,9 @@ static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
*
* Acquires the SWFW semaphore and sets the I2C MUX
*/
-static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+static int ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
{
- s32 status;
+ int status;
status = ixgbe_acquire_swfw_sync_X540(hw, mask);
if (status)
@@ -3750,11 +3750,11 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
*
* Acquires the SWFW semaphore and get the shared PHY token as needed
*/
-static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+static int ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
{
u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
int retries = FW_PHY_TOKEN_RETRIES;
- s32 status;
+ int status;
while (--retries) {
status = 0;
@@ -3807,11 +3807,11 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* Token. The PHY Token is needed since the MDIO is shared between to MAC
* instances.
*/
-static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
return -EBUSY;
@@ -3833,11 +3833,11 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
* Writes a value to specified PHY register using the SWFW lock and PHY Token.
* The PHY Token is needed since the MDIO is shared between to MAC instances.
*/
-static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
return -EBUSY;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 59798bc33298..d34d715c59eb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -359,12 +359,8 @@ construct_skb:
ixgbe_xdp_ring_update_tail_locked(ring);
}
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- q_vector->rx.total_packets += total_rx_packets;
- q_vector->rx.total_bytes += total_rx_bytes;
+ ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets,
+ total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
@@ -499,13 +495,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
}
tx_ring->next_to_clean = ntc;
-
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.bytes += total_bytes;
- tx_ring->stats.packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
+ ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets,
+ total_bytes);
if (xsk_frames)
xsk_tx_completed(pool, xsk_frames);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a44e4bd56142..9c960017a6de 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4413,7 +4413,7 @@ ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 884d64114bff..837295fecd17 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -180,6 +180,7 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
source "drivers/net/ethernet/marvell/octeon_ep/Kconfig"
+source "drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig"
source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index ceba4aa4f026..a399defe25fd 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -12,5 +12,6 @@ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
obj-y += octeon_ep/
+obj-y += octeon_ep_vf/
obj-y += octeontx2/
obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a641b3534ca3..40a5f1431e4e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -5097,7 +5097,7 @@ static int mvneta_ethtool_set_wol(struct net_device *dev,
}
static int mvneta_ethtool_get_eee(struct net_device *dev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
u32 lpi_ctl0;
@@ -5113,7 +5113,7 @@ static int mvneta_ethtool_get_eee(struct net_device *dev,
}
static int mvneta_ethtool_set_eee(struct net_device *dev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
u32 lpi_ctl0;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig b/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig
new file mode 100644
index 000000000000..e371a3ef0c49
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell's Octeon PCI Endpoint NIC VF Driver Configuration
+#
+
+config OCTEON_EP_VF
+ tristate "Marvell Octeon PCI Endpoint NIC VF Driver"
+ depends on 64BIT
+ depends on PCI
+ help
+ This driver supports the networking functionality of Marvell's
+ Octeon PCI Endpoint NIC VF.
+
+ To know the list of devices supported by this driver, refer to the
+ documentation in
+ <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst>.
+
+ To compile this driver as a module, choose M here.
+ The name of the module will be octeon_ep_vf.
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile b/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile
new file mode 100644
index 000000000000..4a5f9fcb0b40
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Network driver for Marvell's Octeon PCI Endpoint NIC VF
+#
+
+obj-$(CONFIG_OCTEON_EP_VF) += octeon_ep_vf.o
+
+octeon_ep_vf-y := octep_vf_main.o octep_vf_cn9k.o octep_vf_cnxk.o \
+ octep_vf_tx.o octep_vf_rx.o octep_vf_mbox.o \
+ octep_vf_ethtool.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
new file mode 100644
index 000000000000..88937fce75f1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+#include "octep_vf_regs_cn9k.h"
+
+/* Dump useful hardware IQ/OQ CSRs for debug purpose */
+static void cn93_vf_dump_q_regs(struct octep_vf_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_DBELL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_CONTROL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_ENABLE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_BADDR(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_CNTS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_CONTROL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_ENABLE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_CNTS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_BYTE_CNT(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static void cn93_vf_reset_iq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no);
+
+ /* Disable the Tx/Instruction Ring */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no));
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no),
+ val & GENMASK_ULL(31, 0));
+}
+
+/* Reset Hardware Rx queue */
+static void cn93_vf_reset_oq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ /* Disable Output (Rx) Ring */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_vf_read_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no));
+ octep_vf_write_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0));
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0));
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_vf_reset_io_queues_cn93(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CN93 VF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cn93_vf_reset_iq(oct, q);
+ cn93_vf_reset_oq(oct, q);
+ }
+}
+
+/* Initialize configuration limits and initial active config */
+static void octep_vf_init_config_cn93_vf(struct octep_vf_device *oct)
+{
+ struct octep_vf_config *conf = oct->conf;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(0));
+ conf->ring_cfg.max_io_rings = (reg_val >> CN93_VF_R_IN_CTL_RPVF_POS) &
+ CN93_VF_R_IN_CTL_RPVF_MASK;
+ conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;
+
+ conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
+ conf->iq.db_min = OCTEP_VF_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
+
+ conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_vf_setup_iq_regs_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ struct octep_vf_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_VF_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CN93_VF_R_IN_CTL_IDLE));
+ }
+ reg_val |= CN93_VF_R_IN_CTL_RDSIZE;
+ reg_val |= CN93_VF_R_IN_CTL_IS_64B;
+ reg_val |= CN93_VF_R_IN_CTL_ESR;
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this queue */
+ iq->doorbell_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ struct octep_vf_oq *oq = oct->oq[oq_no];
+ u32 time_threshold = 0;
+ u64 oq_ctl = ULL(0);
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_VF_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CN93_VF_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CN93_VF_R_OUT_CTL_IMODE);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ES_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ES_D);
+ reg_val |= (CN93_VF_R_OUT_CTL_ES_P);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
+
+ oq_ctl = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+ oq_ctl &= ~GENMASK_ULL(22, 0); //clear the ISIZE and BSIZE (22-0)
+ oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0)); //populate the BSIZE (15-0)
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+}
+
+/* Setup registers for a VF mailbox */
+static void octep_vf_setup_mbox_regs_cn93(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+
+ /* PF to VF DATA reg. VF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_INT(q_no);
+
+ /* VF to PF DATA reg. VF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cn93_handle_vf_mbox_intr(struct octep_vf_device *oct)
+{
+ if (oct->mbox)
+ schedule_work(&oct->mbox->wk.work);
+ else
+ dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n");
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_vf_ioq_intr_handler_cn93(void *data)
+{
+ struct octep_vf_ioq_vector *vector = data;
+ struct octep_vf_device *oct;
+ struct octep_vf_oq *oq;
+ u64 reg_val;
+
+ oct = vector->octep_vf_dev;
+ oq = vector->oq;
+ /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */
+ if (oq->q_no == 0) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0));
+ if (reg_val & CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS) {
+ cn93_handle_vf_mbox_intr(oct);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val);
+ }
+ }
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_vf_reinit_regs_cn93(struct octep_vf_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_vf_enable_interrupts_cn93(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+ /* Enable PF to VF mbox interrupt by setting 2nd bit*/
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0),
+ CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB);
+}
+
+/* Disable all interrupts */
+static void octep_vf_disable_interrupts_cn93(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ /* Disable PF to VF mbox interrupt by setting 2nd bit*/
+ if (oct->mbox)
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0);
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_vf_update_iq_read_index_cn93(struct octep_vf_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_vf_enable_iq_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0));
+
+ while (octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_vf_enable_oq_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0));
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_enable_io_queues_cn93(struct octep_vf_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_enable_iq_cn93(oct, q);
+ octep_vf_enable_oq_cn93(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assigned to VF */
+static void octep_vf_disable_iq_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assigned to VF */
+static void octep_vf_disable_oq_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_disable_io_queues_cn93(struct octep_vf_device *oct)
+{
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_disable_iq_cn93(oct, q);
+ octep_vf_disable_oq_cn93(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_vf_dump_registers_cn93(struct octep_vf_device *oct)
+{
+ u8 num_rings, q;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++)
+ cn93_vf_dump_q_regs(oct, q);
+}
+
+/**
+ * octep_vf_device_setup_cn93() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - set initial configuration and max limits.
+ */
+void octep_vf_device_setup_cn93(struct octep_vf_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cn93;
+ oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cn93;
+ oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cn93;
+
+ oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cn93;
+ oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cn93;
+
+ oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cn93;
+ oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cn93;
+
+ oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cn93;
+
+ oct->hw_ops.enable_iq = octep_vf_enable_iq_cn93;
+ oct->hw_ops.enable_oq = octep_vf_enable_oq_cn93;
+ oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cn93;
+
+ oct->hw_ops.disable_iq = octep_vf_disable_iq_cn93;
+ oct->hw_ops.disable_oq = octep_vf_disable_oq_cn93;
+ oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cn93;
+ oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cn93;
+
+ oct->hw_ops.dump_registers = octep_vf_dump_registers_cn93;
+ octep_vf_init_config_cn93_vf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
new file mode 100644
index 000000000000..1f79dfad42c6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+#include "octep_vf_regs_cnxk.h"
+
+/* Dump useful hardware IQ/OQ CSRs for debug purpose */
+static void cnxk_vf_dump_q_regs(struct octep_vf_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_CONTROL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_ENABLE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_CNTS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_CONTROL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_ENABLE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_CNTS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_ERR_TYPE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static void cnxk_vf_reset_iq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no);
+
+ /* Disable the Tx/Instruction Ring */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no), val & GENMASK_ULL(31, 0));
+}
+
+/* Reset Hardware Rx queue */
+static void cnxk_vf_reset_oq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ /* Disable Output (Rx) Ring */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_vf_read_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no));
+ octep_vf_write_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0));
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_vf_reset_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CNXK VF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cnxk_vf_reset_iq(oct, q);
+ cnxk_vf_reset_oq(oct, q);
+ }
+}
+
+/* Initialize configuration limits and initial active config */
+static void octep_vf_init_config_cnxk_vf(struct octep_vf_device *oct)
+{
+ struct octep_vf_config *conf = oct->conf;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(0));
+ conf->ring_cfg.max_io_rings = (reg_val >> CNXK_VF_R_IN_CTL_RPVF_POS) &
+ CNXK_VF_R_IN_CTL_RPVF_MASK;
+ conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;
+
+ conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
+ conf->iq.db_min = OCTEP_VF_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
+ conf->oq.wmark = OCTEP_VF_OQ_WMARK_MIN;
+
+ conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ struct octep_vf_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_VF_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CNXK_VF_R_IN_CTL_IDLE));
+ }
+ reg_val |= CNXK_VF_R_IN_CTL_RDSIZE;
+ reg_val |= CNXK_VF_R_IN_CTL_IS_64B;
+ reg_val |= CNXK_VF_R_IN_CTL_ESR;
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this queue */
+ iq->doorbell_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ struct octep_vf_oq *oq = oct->oq[oq_no];
+ u32 time_threshold = 0;
+ u64 oq_ctl = ULL(0);
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_IMODE);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_D);
+ reg_val |= (CNXK_VF_R_OUT_CTL_ES_P);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
+
+ oq_ctl = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+ /* Clear the ISIZE and BSIZE (22-0) */
+ oq_ctl &= ~GENMASK_ULL(22, 0);
+ /* Populate the BSIZE (15-0) */
+ oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ /* set watermark for backpressure */
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no));
+ reg_val &= ~GENMASK_ULL(31, 0);
+ reg_val |= CFG_GET_OQ_WMARK(oct->conf);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), reg_val);
+}
+
+/* Setup registers for a VF mailbox */
+static void octep_vf_setup_mbox_regs_cnxk(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+
+ /* PF to VF DATA reg. VF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_INT(q_no);
+
+ /* VF to PF DATA reg. VF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cnxk_handle_vf_mbox_intr(struct octep_vf_device *oct)
+{
+ if (oct->mbox)
+ schedule_work(&oct->mbox->wk.work);
+ else
+ dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n");
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_vf_ioq_intr_handler_cnxk(void *data)
+{
+ struct octep_vf_ioq_vector *vector = data;
+ struct octep_vf_device *oct;
+ struct octep_vf_oq *oq;
+ u64 reg_val;
+
+ oct = vector->octep_vf_dev;
+ oq = vector->oq;
+ /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */
+ if (oq->q_no == 0) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0));
+ if (reg_val & CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS) {
+ cnxk_handle_vf_mbox_intr(oct);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val);
+ }
+ }
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_vf_reinit_regs_cnxk(struct octep_vf_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_vf_enable_interrupts_cnxk(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+ /* Enable PF to VF mbox interrupt by setting 2nd bit*/
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0),
+ CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB);
+}
+
+/* Disable all interrupts */
+static void octep_vf_disable_interrupts_cnxk(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ /* Disable PF to VF mbox interrupt by setting 2nd bit*/
+ if (oct->mbox)
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0);
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_vf_update_iq_read_index_cnxk(struct octep_vf_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_vf_enable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0));
+
+ while (octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_vf_enable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0));
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_enable_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_enable_iq_cnxk(oct, q);
+ octep_vf_enable_oq_cnxk(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assigned to VF */
+static void octep_vf_disable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assigned to VF */
+static void octep_vf_disable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_disable_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_disable_iq_cnxk(oct, q);
+ octep_vf_disable_oq_cnxk(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_vf_dump_registers_cnxk(struct octep_vf_device *oct)
+{
+ u8 num_rings, q;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++)
+ cnxk_vf_dump_q_regs(oct, q);
+}
+
+/**
+ * octep_vf_device_setup_cnxk() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - set initial configuration and max limits.
+ */
+void octep_vf_device_setup_cnxk(struct octep_vf_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cnxk;
+ oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cnxk;
+ oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cnxk;
+
+ oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cnxk;
+ oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cnxk;
+
+ oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cnxk;
+ oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cnxk;
+
+ oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cnxk;
+
+ oct->hw_ops.enable_iq = octep_vf_enable_iq_cnxk;
+ oct->hw_ops.enable_oq = octep_vf_enable_oq_cnxk;
+ oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cnxk;
+
+ oct->hw_ops.disable_iq = octep_vf_disable_iq_cnxk;
+ oct->hw_ops.disable_oq = octep_vf_disable_oq_cnxk;
+ oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cnxk;
+ oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cnxk;
+
+ oct->hw_ops.dump_registers = octep_vf_dump_registers_cnxk;
+ octep_vf_init_config_cnxk_vf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h
new file mode 100644
index 000000000000..e03a647b0110
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_CONFIG_H_
+#define _OCTEP_VF_CONFIG_H_
+
+/* Tx instruction types by length */
+#define OCTEP_VF_32BYTE_INSTR 32
+#define OCTEP_VF_64BYTE_INSTR 64
+
+/* Tx Queue: maximum descriptors per ring */
+#define OCTEP_VF_IQ_MAX_DESCRIPTORS 1024
+/* Minimum input (Tx) requests to be enqueued to ring doorbell */
+#define OCTEP_VF_DB_MIN 8
+/* Packet threshold for Tx queue interrupt */
+#define OCTEP_VF_IQ_INTR_THRESHOLD 0x0
+
+/* Minimum watermark for backpressure */
+#define OCTEP_VF_OQ_WMARK_MIN 256
+
+/* Rx Queue: maximum descriptors per ring */
+#define OCTEP_VF_OQ_MAX_DESCRIPTORS 1024
+
+/* Rx buffer size: Use page size buffers.
+ * Build skb from allocated page buffer once the packet is received.
+ * When a gathered packet is received, make head page as skb head and
+ * page buffers in consecutive Rx descriptors as fragments.
+ */
+#define OCTEP_VF_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
+#define OCTEP_VF_OQ_PKTS_PER_INTR 128
+#define OCTEP_VF_OQ_REFILL_THRESHOLD (OCTEP_VF_OQ_MAX_DESCRIPTORS / 4)
+
+#define OCTEP_VF_OQ_INTR_PKT_THRESHOLD 1
+#define OCTEP_VF_OQ_INTR_TIME_THRESHOLD 10
+
+#define OCTEP_VF_MSIX_NAME_SIZE (IFNAMSIZ + 32)
+
+/* Tx Queue wake threshold
+ * wakeup a stopped Tx queue if minimum 2 descriptors are available.
+ * Even a skb with fragments consume only one Tx queue descriptor entry.
+ */
+#define OCTEP_VF_WAKE_QUEUE_THRESHOLD 2
+
+/* Minimum MTU supported by Octeon network interface */
+#define OCTEP_VF_MIN_MTU ETH_MIN_MTU
+/* Maximum MTU supported by Octeon interface*/
+#define OCTEP_VF_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
+/* Default MTU */
+#define OCTEP_VF_DEFAULT_MTU 1500
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
+
+#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs)
+#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+#define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark)
+
+#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->ring_cfg.active_io_rings)
+#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->ring_cfg.max_io_rings)
+
+#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
+#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
+
+#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix)
+
+/* Hardware Tx Queue configuration. */
+struct octep_vf_iq_config {
+ /* Size of the Input queue (number of commands) */
+ u16 num_descs;
+
+ /* Command size - 32 or 64 bytes */
+ u16 instr_type;
+
+ /* Minimum number of commands pending to be posted to Octeon before driver
+ * hits the Input queue doorbell.
+ */
+ u16 db_min;
+
+ /* Trigger the IQ interrupt when processed cmd count reaches
+ * this level.
+ */
+ u32 intr_threshold;
+};
+
+/* Hardware Rx Queue configuration. */
+struct octep_vf_oq_config {
+ /* Size of Output queue (number of descriptors) */
+ u16 num_descs;
+
+ /* Size of buffer in this Output queue. */
+ u16 buf_size;
+
+ /* The number of buffers that were consumed during packet processing
+ * by the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ u16 refill_threshold;
+
+ /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver usually does not use packet count interrupt coalescing.
+ */
+ u32 oq_intr_pkt;
+
+ /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host
+ * if at least one packet was sent in the time interval specified by
+ * this field. The driver uses time interval interrupt coalescing by
+ * default. The time is specified in microseconds.
+ */
+ u32 oq_intr_time;
+
+ /* Water mark for backpressure.
+ * Output queue sends backpressure signal to source when
+ * free buffer count falls below wmark.
+ */
+ u32 wmark;
+};
+
+/* Tx/Rx configuration */
+struct octep_vf_ring_config {
+ /* Max number of IOQs */
+ u16 max_io_rings;
+
+ /* Number of active IOQs */
+ u16 active_io_rings;
+};
+
+/* Octeon MSI-x config. */
+struct octep_vf_msix_config {
+ /* Number of IOQ interrupts */
+ u16 ioq_msix;
+};
+
+/* Data Structure to hold configuration limits and active config */
+struct octep_vf_config {
+ /* Input Queue attributes. */
+ struct octep_vf_iq_config iq;
+
+ /* Output Queue attributes. */
+ struct octep_vf_oq_config oq;
+
+ /* MSI-X interrupt config */
+ struct octep_vf_msix_config msix_cfg;
+
+ /* NIC VF ring Configuration */
+ struct octep_vf_ring_config ring_cfg;
+};
+#endif /* _OCTEP_VF_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
new file mode 100644
index 000000000000..a1979b45e355
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+static const char octep_vf_gstrings_global_stats[][ETH_GSTRING_LEN] = {
+ "rx_alloc_errors",
+ "tx_busy_errors",
+ "tx_hw_pkts",
+ "tx_hw_octs",
+ "tx_hw_bcast",
+ "tx_hw_mcast",
+ "rx_hw_pkts",
+ "rx_hw_bytes",
+ "rx_hw_bcast",
+ "rx_dropped_bytes_fifo_full",
+};
+
+#define OCTEP_VF_GLOBAL_STATS_CNT (sizeof(octep_vf_gstrings_global_stats) / ETH_GSTRING_LEN)
+
+static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
+ "tx_packets_posted[Q-%u]",
+ "tx_packets_completed[Q-%u]",
+ "tx_bytes[Q-%u]",
+ "tx_busy[Q-%u]",
+};
+
+#define OCTEP_VF_TX_Q_STATS_CNT (sizeof(octep_vf_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+
+static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets[Q-%u]",
+ "rx_bytes[Q-%u]",
+ "rx_alloc_errors[Q-%u]",
+};
+
+#define OCTEP_VF_RX_Q_STATS_CNT (sizeof(octep_vf_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+
+static void octep_vf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ strscpy(info->driver, OCTEP_VF_DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info));
+}
+
+static void octep_vf_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ char *strings = (char *)data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_global_stats[i]);
+ strings += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_VF_TX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_tx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_VF_RX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_rx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int octep_vf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return OCTEP_VF_GLOBAL_STATS_CNT + (num_queues *
+ (OCTEP_VF_TX_Q_STATS_CNT + OCTEP_VF_RX_Q_STATS_CNT));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void octep_vf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_tx_stats *iface_tx_stats;
+ struct octep_vf_iface_rx_stats *iface_rx_stats;
+ u64 rx_alloc_errors, tx_busy_errors;
+ int q, i;
+
+ rx_alloc_errors = 0;
+ tx_busy_errors = 0;
+
+ octep_vf_get_if_stats(oct);
+ iface_tx_stats = &oct->iface_tx_stats;
+ iface_rx_stats = &oct->iface_rx_stats;
+
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ tx_busy_errors += iq->stats.tx_busy;
+ rx_alloc_errors += oq->stats.alloc_failures;
+ }
+ i = 0;
+ data[i++] = rx_alloc_errors;
+ data[i++] = tx_busy_errors;
+ data[i++] = iface_tx_stats->pkts;
+ data[i++] = iface_tx_stats->octs;
+ data[i++] = iface_tx_stats->bcst;
+ data[i++] = iface_tx_stats->mcst;
+ data[i++] = iface_rx_stats->pkts;
+ data[i++] = iface_rx_stats->octets;
+ data[i++] = iface_rx_stats->bcast_pkts;
+ data[i++] = iface_rx_stats->dropped_octets_fifo_full;
+
+ /* Per Tx Queue stats */
+ for (q = 0; q < oct->num_iqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+
+ data[i++] = iq->stats.instr_posted;
+ data[i++] = iq->stats.instr_completed;
+ data[i++] = iq->stats.bytes_sent;
+ data[i++] = iq->stats.tx_busy;
+ }
+
+ /* Per Rx Queue stats */
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ data[i++] = oq->stats.packets;
+ data[i++] = oq->stats.bytes;
+ data[i++] = oq->stats.alloc_failures;
+ }
+}
+
+#define OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(octep_vf_speeds, ksettings, name) \
+{ \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_T)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_R)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \
+}
+
+static int octep_vf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_link_info *link_info;
+ u32 advertised_modes, supported_modes;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ octep_vf_get_link_info(oct);
+
+ advertised_modes = oct->link_info.advertised_modes;
+ supported_modes = oct->link_info.supported_modes;
+ link_info = &oct->link_info;
+
+ OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported);
+ OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising);
+
+ if (link_info->autoneg) {
+ if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ cmd->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = link_info->speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ return 0;
+}
+
+static const struct ethtool_ops octep_vf_ethtool_ops = {
+ .get_drvinfo = octep_vf_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = octep_vf_get_strings,
+ .get_sset_count = octep_vf_get_sset_count,
+ .get_ethtool_stats = octep_vf_get_ethtool_stats,
+ .get_link_ksettings = octep_vf_get_link_ksettings,
+};
+
+void octep_vf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &octep_vf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
new file mode 100644
index 000000000000..dd49d0b8b494
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -0,0 +1,1231 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/vmalloc.h>
+#include <net/netdev_queues.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+struct workqueue_struct *octep_vf_wq;
+
+/* Supported Devices */
+static const struct pci_device_id octep_vf_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_VF)},
+ {0, },
+};
+MODULE_DEVICE_TABLE(pci, octep_vf_pci_id_tbl);
+
+MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>");
+MODULE_DESCRIPTION(OCTEP_VF_DRV_STRING);
+MODULE_LICENSE("GPL");
+
+/**
+ * octep_vf_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate resources to hold per Tx/Rx queue interrupt info.
+ * This is the information passed to interrupt handler, from which napi poll
+ * is scheduled and includes quick access to private data of Tx/Rx queue
+ * corresponding to the interrupt being handled.
+ *
+ * Return: 0, on successful allocation of resources for all queue interrupts.
+ * -1, if failed to allocate any resource.
+ */
+static int octep_vf_alloc_ioq_vectors(struct octep_vf_device *oct)
+{
+ struct octep_vf_ioq_vector *ioq_vector;
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
+ if (!oct->ioq_vector[i])
+ goto free_ioq_vector;
+
+ ioq_vector = oct->ioq_vector[i];
+ ioq_vector->iq = oct->iq[i];
+ ioq_vector->oq = oct->oq[i];
+ ioq_vector->octep_vf_dev = oct;
+ }
+
+ dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
+ return 0;
+
+free_ioq_vector:
+ while (i) {
+ i--;
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_free_ioq_vectors(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (oct->ioq_vector[i]) {
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ }
+ netdev_info(oct->netdev, "Freed IOQ Vectors\n");
+}
+
+/**
+ * octep_vf_enable_msix_range() - enable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts)
+ * for the Octeon device.
+ *
+ * Return: 0, on successfully enabling all MSI-x interrupts.
+ * -1, if failed to enable any MSI-x interrupt.
+ */
+static int octep_vf_enable_msix_range(struct octep_vf_device *oct)
+{
+ int num_msix, msix_allocated;
+ int i;
+
+ /* Generic interrupts apart from input/output queues */
+ //num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
+ num_msix = oct->num_oqs;
+ oct->msix_entries = kcalloc(num_msix, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ goto msix_alloc_err;
+
+ for (i = 0; i < num_msix; i++)
+ oct->msix_entries[i].entry = i;
+
+ msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
+ num_msix, num_msix);
+ if (msix_allocated != num_msix) {
+ dev_err(&oct->pdev->dev,
+ "Failed to enable %d msix irqs; got only %d\n",
+ num_msix, msix_allocated);
+ goto enable_msix_err;
+ }
+ oct->num_irqs = msix_allocated;
+ dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
+
+ return 0;
+
+enable_msix_err:
+ if (msix_allocated > 0)
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+msix_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_vf_disable_msix() - disable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Disable MSI-x on the Octeon device.
+ */
+static void octep_vf_disable_msix(struct octep_vf_device *oct)
+{
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
+}
+
+/**
+ * octep_vf_ioq_intr_handler() - handler for all Tx/Rx queue interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data contains pointers to Tx/Rx queue private data
+ * and correspong NAPI context.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_vf_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_vf_ioq_vector *ioq_vector = data;
+ struct octep_vf_device *oct = ioq_vector->octep_vf_dev;
+
+ return oct->hw_ops.ioq_intr_handler(ioq_vector);
+}
+
+/**
+ * octep_vf_request_irqs() - Register interrupt handlers.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Register handlers for all queue and non-queue interrupts.
+ *
+ * Return: 0, on successful registration of all interrupt handlers.
+ * -1, on any error.
+ */
+static int octep_vf_request_irqs(struct octep_vf_device *oct)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_vf_ioq_vector *ioq_vector;
+ struct msix_entry *msix_entry;
+ int ret, i;
+
+ /* Request IRQs for Tx/Rx queues */
+ for (i = 0; i < oct->num_oqs; i++) {
+ ioq_vector = oct->ioq_vector[i];
+ msix_entry = &oct->msix_entries[i];
+
+ snprintf(ioq_vector->name, sizeof(ioq_vector->name),
+ "%s-q%d", netdev->name, i);
+ ret = request_irq(msix_entry->vector,
+ octep_vf_ioq_intr_handler, 0,
+ ioq_vector->name, ioq_vector);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for Q-%d; err=%d",
+ i, ret);
+ goto ioq_irq_err;
+ }
+
+ cpumask_set_cpu(i % num_online_cpus(),
+ &ioq_vector->affinity_mask);
+ irq_set_affinity_hint(msix_entry->vector,
+ &ioq_vector->affinity_mask);
+ }
+
+ return 0;
+ioq_irq_err:
+ while (i) {
+ --i;
+ free_irq(oct->msix_entries[i].vector, oct);
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_irqs() - free all registered interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free all queue and non-queue interrupts of the Octeon device.
+ */
+static void octep_vf_free_irqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_irqs; i++) {
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
+ }
+ netdev_info(oct->netdev, "IRQs freed\n");
+}
+
+/**
+ * octep_vf_setup_irqs() - setup interrupts for the Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate data structures to hold per interrupt information, allocate/enable
+ * MSI-x interrupt and register interrupt handlers.
+ *
+ * Return: 0, on successful allocation and registration of all interrupts.
+ * -1, on any error.
+ */
+static int octep_vf_setup_irqs(struct octep_vf_device *oct)
+{
+ if (octep_vf_alloc_ioq_vectors(oct))
+ goto ioq_vector_err;
+
+ if (octep_vf_enable_msix_range(oct))
+ goto enable_msix_err;
+
+ if (octep_vf_request_irqs(oct))
+ goto request_irq_err;
+
+ return 0;
+
+request_irq_err:
+ octep_vf_disable_msix(oct);
+enable_msix_err:
+ octep_vf_free_ioq_vectors(oct);
+ioq_vector_err:
+ return -1;
+}
+
+/**
+ * octep_vf_clean_irqs() - free all interrupts and its resources.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_clean_irqs(struct octep_vf_device *oct)
+{
+ octep_vf_free_irqs(oct);
+ octep_vf_disable_msix(oct);
+ octep_vf_free_ioq_vectors(oct);
+}
+
+/**
+ * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
+{
+ u32 pkts_pend = oq->pkts_pending;
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+ if (iq->pkts_processed) {
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
+ }
+ if (oq->last_pkt_count - pkts_pend) {
+ writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+ oq->last_pkt_count = pkts_pend;
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ smp_wmb();
+ writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+}
+
+/**
+ * octep_vf_napi_poll() - NAPI poll function for Tx/Rx.
+ *
+ * @napi: pointer to napi context.
+ * @budget: max number of packets to be processed in single invocation.
+ */
+static int octep_vf_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octep_vf_ioq_vector *ioq_vector =
+ container_of(napi, struct octep_vf_ioq_vector, napi);
+ u32 tx_pending, rx_done;
+
+ tx_pending = octep_vf_iq_process_completions(ioq_vector->iq, 64);
+ rx_done = octep_vf_oq_process_rx(ioq_vector->oq, budget);
+
+ /* need more polling if tx completion processing is still pending or
+ * processed at least 'budget' number of rx packets.
+ */
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+ if (likely(napi_complete_done(napi, rx_done)))
+ octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+
+ return rx_done;
+}
+
+/**
+ * octep_vf_napi_add() - Add NAPI poll for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_add(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
+ netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, octep_vf_napi_poll);
+ oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
+ }
+}
+
+/**
+ * octep_vf_napi_delete() - delete NAPI poll callback for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_delete(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
+ netif_napi_del(&oct->ioq_vector[i]->napi);
+ oct->oq[i]->napi = NULL;
+ }
+}
+
+/**
+ * octep_vf_napi_enable() - enable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_enable(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
+ napi_enable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+/**
+ * octep_vf_napi_disable() - disable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_disable(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
+ napi_disable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+static void octep_vf_link_up(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+}
+
+static void octep_vf_set_rx_state(struct octep_vf_device *oct, bool up)
+{
+ int err;
+
+ err = octep_vf_mbox_set_rx_state(oct, up);
+ if (err)
+ netdev_err(oct->netdev, "Set Rx state to %d failed with err:%d\n", up, err);
+}
+
+static int octep_vf_get_link_status(struct octep_vf_device *oct)
+{
+ int err;
+
+ err = octep_vf_mbox_get_link_status(oct, &oct->link_info.oper_up);
+ if (err)
+ netdev_err(oct->netdev, "Get link status failed with err:%d\n", err);
+ return oct->link_info.oper_up;
+}
+
+static void octep_vf_set_link_status(struct octep_vf_device *oct, bool up)
+{
+ int err;
+
+ err = octep_vf_mbox_set_link_status(oct, up);
+ if (err) {
+ netdev_err(oct->netdev, "Set link status to %d failed with err:%d\n", up, err);
+ return;
+ }
+ oct->link_info.oper_up = up;
+}
+
+/**
+ * octep_vf_open() - start the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues
+ * and interrupts..
+ *
+ * Return: 0, on successfully setting up device and bring it up.
+ * -1, on any error.
+ */
+static int octep_vf_open(struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ int err, ret;
+
+ netdev_info(netdev, "Starting netdev ...\n");
+ netif_carrier_off(netdev);
+
+ oct->hw_ops.reset_io_queues(oct);
+
+ if (octep_vf_setup_iqs(oct))
+ goto setup_iq_err;
+ if (octep_vf_setup_oqs(oct))
+ goto setup_oq_err;
+ if (octep_vf_setup_irqs(oct))
+ goto setup_irq_err;
+
+ err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
+ if (err)
+ goto set_queues_err;
+ err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
+ if (err)
+ goto set_queues_err;
+
+ octep_vf_napi_add(oct);
+ octep_vf_napi_enable(oct);
+
+ oct->link_info.admin_up = 1;
+ octep_vf_set_rx_state(oct, true);
+
+ ret = octep_vf_get_link_status(oct);
+ if (!ret)
+ octep_vf_set_link_status(oct, true);
+
+ /* Enable the input and output queues for this Octeon device */
+ oct->hw_ops.enable_io_queues(oct);
+
+ /* Enable Octeon device interrupts */
+ oct->hw_ops.enable_interrupts(oct);
+
+ octep_vf_oq_dbell_init(oct);
+
+ ret = octep_vf_get_link_status(oct);
+ if (ret)
+ octep_vf_link_up(netdev);
+
+ return 0;
+
+set_queues_err:
+ octep_vf_napi_disable(oct);
+ octep_vf_napi_delete(oct);
+ octep_vf_clean_irqs(oct);
+setup_irq_err:
+ octep_vf_free_oqs(oct);
+setup_oq_err:
+ octep_vf_free_iqs(oct);
+setup_iq_err:
+ return -1;
+}
+
+/**
+ * octep_vf_stop() - stop the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * stop the device Tx/Rx operations, bring down the link and
+ * free up all resources allocated for Tx/Rx queues and interrupts.
+ */
+static int octep_vf_stop(struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ netdev_info(netdev, "Stopping the device ...\n");
+
+ /* Stop Tx from stack */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ octep_vf_set_link_status(oct, false);
+ octep_vf_set_rx_state(oct, false);
+
+ oct->link_info.admin_up = 0;
+ oct->link_info.oper_up = 0;
+
+ oct->hw_ops.disable_interrupts(oct);
+ octep_vf_napi_disable(oct);
+ octep_vf_napi_delete(oct);
+
+ octep_vf_clean_irqs(oct);
+ octep_vf_clean_iqs(oct);
+
+ oct->hw_ops.disable_io_queues(oct);
+ oct->hw_ops.reset_io_queues(oct);
+ octep_vf_free_oqs(oct);
+ octep_vf_free_iqs(oct);
+ netdev_info(netdev, "Device stopped !!\n");
+ return 0;
+}
+
+/**
+ * octep_vf_iq_full_check() - check if a Tx queue is full.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Return: 0, if the Tx queue is not full.
+ * 1, if the Tx queue is full.
+ */
+static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
+{
+ int ret;
+
+ ret = netif_subqueue_maybe_stop(iq->netdev, iq->q_no, IQ_INSTR_SPACE(iq),
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD,
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD);
+ switch (ret) {
+ case 0: /* Stopped the queue, since IQ is full */
+ return 1;
+ case -1: /*
+ * Pending updates in write index from
+ * iq_process_completion in other cpus
+ * caused queues to get re-enabled after
+ * being stopped
+ */
+ iq->stats.restart_cnt++;
+ fallthrough;
+ case 1: /* Queue left enabled, since IQ is not yet full*/
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * octep_vf_start_xmit() - Enqueue packet to Octoen hardware Tx Queue.
+ *
+ * @skb: packet skbuff pointer.
+ * @netdev: kernel network device.
+ *
+ * Return: NETDEV_TX_BUSY, if Tx Queue is full.
+ * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue.
+ */
+static netdev_tx_t octep_vf_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ netdev_features_t feat = netdev->features;
+ struct octep_vf_tx_sglist_desc *sglist;
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct octep_vf_tx_desc_hw *hw_desc;
+ struct skb_shared_info *shinfo;
+ struct octep_vf_instr_hdr *ih;
+ struct octep_vf_iq *iq;
+ skb_frag_t *frag;
+ u16 nr_frags, si;
+ int xmit_more;
+ u16 q_no, wi;
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ q_no = skb_get_queue_mapping(skb);
+ if (q_no >= oct->num_iqs) {
+ netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
+ q_no = q_no % oct->num_iqs;
+ }
+
+ iq = oct->iq[q_no];
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+
+ wi = iq->host_write_index;
+ hw_desc = &iq->desc_ring[wi];
+ hw_desc->ih64 = 0;
+
+ tx_buffer = iq->buff_info + wi;
+ tx_buffer->skb = skb;
+
+ ih = &hw_desc->ih;
+ ih->tlen = skb->len;
+ ih->pkind = oct->fw_info.pkind;
+ ih->fsz = oct->fw_info.fsz;
+ ih->tlen = skb->len + ih->fsz;
+
+ if (!nr_frags) {
+ tx_buffer->gather = 0;
+ tx_buffer->dma = dma_map_single(iq->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, tx_buffer->dma))
+ goto dma_map_err;
+ hw_desc->dptr = tx_buffer->dma;
+ } else {
+ /* Scatter/Gather */
+ dma_addr_t dma;
+ u16 len;
+
+ sglist = tx_buffer->sglist;
+
+ ih->gsz = nr_frags + 1;
+ ih->gather = 1;
+ tx_buffer->gather = 1;
+
+ len = skb_headlen(skb);
+ dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_err;
+
+ memset(sglist, 0, OCTEP_VF_SGLIST_SIZE_PER_PKT);
+ sglist[0].len[3] = len;
+ sglist[0].dma_ptr[0] = dma;
+
+ si = 1; /* entry 0 is main skb, mapped above */
+ frag = &shinfo->frags[0];
+ while (nr_frags--) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(iq->dev, frag, 0,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_sg_err;
+
+ sglist[si >> 2].len[3 - (si & 3)] = len;
+ sglist[si >> 2].dma_ptr[si & 3] = dma;
+
+ frag++;
+ si++;
+ }
+ hw_desc->dptr = tx_buffer->sglist_dma;
+ }
+ if (oct->fw_info.tx_ol_flags) {
+ if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) {
+ hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
+ hw_desc->txm.ol_flags |= OCTEP_VF_TX_OFFLOAD_TSO;
+ hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size;
+ hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs;
+ } else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
+ }
+ /* due to ESR txm will be swapped by hw */
+ hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]);
+ }
+
+ xmit_more = netdev_xmit_more();
+
+ netdev_tx_sent_queue(iq->netdev_q, skb->len);
+
+ skb_tx_timestamp(skb);
+ iq->fill_cnt++;
+ wi++;
+ iq->host_write_index = wi & iq->ring_size_mask;
+
+ /* octep_iq_full_check stops the queue and returns
+ * true if so, in case the queue has become full
+ * by inserting current packet. If so, we can
+ * go ahead and ring doorbell.
+ */
+ if (!octep_vf_iq_full_check(iq) && xmit_more &&
+ iq->fill_cnt < iq->fill_threshold)
+ return NETDEV_TX_OK;
+
+ goto ring_dbell;
+
+dma_map_sg_err:
+ if (si > 0) {
+ dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
+ sglist[0].len[0], DMA_TO_DEVICE);
+ sglist[0].len[0] = 0;
+ }
+ while (si > 1) {
+ dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
+ sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
+ sglist[si >> 2].len[si & 3] = 0;
+ si--;
+ }
+ tx_buffer->gather = 0;
+dma_map_err:
+ dev_kfree_skb_any(skb);
+ring_dbell:
+ /* Flush the hw descriptors before writing to doorbell */
+ smp_wmb();
+ writel(iq->fill_cnt, iq->doorbell_reg);
+ iq->stats.instr_posted += iq->fill_cnt;
+ iq->fill_cnt = 0;
+ return NETDEV_TX_OK;
+}
+
+int octep_vf_get_if_stats(struct octep_vf_device *oct)
+{
+ struct octep_vf_iface_rxtx_stats vf_stats;
+ int ret, size;
+
+ memset(&vf_stats, 0, sizeof(struct octep_vf_iface_rxtx_stats));
+ ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_STATS,
+ (u8 *)&vf_stats, &size);
+
+ if (ret)
+ return ret;
+
+ memcpy(&oct->iface_rx_stats, &vf_stats.iface_rx_stats,
+ sizeof(struct octep_vf_iface_rx_stats));
+ memcpy(&oct->iface_tx_stats, &vf_stats.iface_tx_stats,
+ sizeof(struct octep_vf_iface_tx_stats));
+
+ return 0;
+}
+
+int octep_vf_get_link_info(struct octep_vf_device *oct)
+{
+ int ret, size;
+
+ ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
+ (u8 *)&oct->link_info, &size);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get VF link info failed via VF Mbox\n");
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * octep_vf_get_stats64() - Get Octeon network device statistics.
+ *
+ * @netdev: kernel network device.
+ * @stats: pointer to stats structure to be filled in.
+ */
+static void octep_vf_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ int q;
+
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ }
+ stats->tx_packets = tx_packets;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->rx_bytes = rx_bytes;
+ if (!octep_vf_get_if_stats(oct)) {
+ stats->multicast = oct->iface_rx_stats.mcast_pkts;
+ stats->rx_errors = oct->iface_rx_stats.err_pkts;
+ stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full +
+ oct->iface_rx_stats.err_pkts;
+ stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full;
+ stats->tx_dropped = oct->iface_tx_stats.dropped;
+ }
+}
+
+/**
+ * octep_vf_tx_timeout_task - work queue task to Handle Tx queue timeout.
+ *
+ * @work: pointer to Tx queue timeout work_struct
+ *
+ * Stop and start the device so that it frees up all queue resources
+ * and restarts the queues, that potentially clears a Tx queue timeout
+ * condition.
+ **/
+static void octep_vf_tx_timeout_task(struct work_struct *work)
+{
+ struct octep_vf_device *oct = container_of(work, struct octep_vf_device,
+ tx_timeout_task);
+ struct net_device *netdev = oct->netdev;
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ octep_vf_stop(netdev);
+ octep_vf_open(netdev);
+ }
+ rtnl_unlock();
+ netdev_put(netdev, NULL);
+}
+
+/**
+ * octep_vf_tx_timeout() - Handle Tx Queue timeout.
+ *
+ * @netdev: pointer to kernel network device.
+ * @txqueue: Timed out Tx queue number.
+ *
+ * Schedule a work to handle Tx queue timeout.
+ */
+static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ netdev_hold(netdev, NULL, GFP_ATOMIC);
+ schedule_work(&oct->tx_timeout_task);
+}
+
+static int octep_vf_set_mac(struct net_device *netdev, void *p)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = octep_vf_mbox_set_mac_addr(oct, addr->sa_data);
+ if (err)
+ return err;
+
+ memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+
+static int octep_vf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_link_info *link_info;
+ int err;
+
+ link_info = &oct->link_info;
+ if (link_info->mtu == new_mtu)
+ return 0;
+
+ err = octep_vf_mbox_set_mtu(oct, new_mtu);
+ if (!err) {
+ oct->link_info.mtu = new_mtu;
+ netdev->mtu = new_mtu;
+ }
+ return err;
+}
+
+static int octep_vf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 rx_offloads = 0, tx_offloads = 0;
+ int err;
+
+ /* We only support features received from firmware */
+ if ((features & netdev->hw_features) != features)
+ return -EINVAL;
+
+ if (features & NETIF_F_TSO)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_TSO6)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_IP_CSUM)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_IPV6_CSUM)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_RXCSUM)
+ rx_offloads |= OCTEP_VF_RX_OFFLOAD_CKSUM;
+
+ err = octep_vf_mbox_set_offloads(oct, tx_offloads, rx_offloads);
+ if (!err)
+ netdev->features = features;
+
+ return err;
+}
+
+static const struct net_device_ops octep_vf_netdev_ops = {
+ .ndo_open = octep_vf_open,
+ .ndo_stop = octep_vf_stop,
+ .ndo_start_xmit = octep_vf_start_xmit,
+ .ndo_get_stats64 = octep_vf_get_stats64,
+ .ndo_tx_timeout = octep_vf_tx_timeout,
+ .ndo_set_mac_address = octep_vf_set_mac,
+ .ndo_change_mtu = octep_vf_change_mtu,
+ .ndo_set_features = octep_vf_set_features,
+};
+
+static const char *octep_vf_devid_to_str(struct octep_vf_device *oct)
+{
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_VF:
+ return "CN93XX";
+ case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
+ return "CNF95N";
+ case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
+ return "CN10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
+ return "CNF10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
+ return "CNF10KB";
+ case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
+ return "CN10KB";
+ default:
+ return "Unsupported";
+ }
+}
+
+/**
+ * octep_vf_device_setup() - Setup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Setup Octeon device hardware operations, configuration, etc ...
+ */
+int octep_vf_device_setup(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+
+ /* allocate memory for oct->conf */
+ oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
+ if (!oct->conf)
+ return -ENOMEM;
+
+ /* Map BAR region 0 */
+ oct->mmio.hw_addr = ioremap(pci_resource_start(oct->pdev, 0),
+ pci_resource_len(oct->pdev, 0));
+ if (!oct->mmio.hw_addr) {
+ dev_err(&pdev->dev,
+ "Failed to remap BAR0; start=0x%llx len=0x%llx\n",
+ pci_resource_start(oct->pdev, 0),
+ pci_resource_len(oct->pdev, 0));
+ goto ioremap_err;
+ }
+ oct->mmio.mapped = 1;
+
+ oct->chip_id = pdev->device;
+ oct->rev_id = pdev->revision;
+ dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
+
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_VF:
+ case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
+ case OCTEP_PCI_DEVICE_ID_CN98_VF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
+ octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
+ OCTEP_VF_MINOR_REV(oct));
+ octep_vf_device_setup_cn93(oct);
+ break;
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
+ case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
+ case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
+ octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
+ OCTEP_VF_MINOR_REV(oct));
+ octep_vf_device_setup_cnxk(oct);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported device\n");
+ goto unsupported_dev;
+ }
+
+ return 0;
+
+unsupported_dev:
+ iounmap(oct->mmio.hw_addr);
+ioremap_err:
+ kfree(oct->conf);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * octep_vf_device_cleanup() - Cleanup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Cleanup Octeon device allocated resources.
+ */
+static void octep_vf_device_cleanup(struct octep_vf_device *oct)
+{
+ dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
+
+ if (oct->mmio.mapped)
+ iounmap(oct->mmio.hw_addr);
+
+ kfree(oct->conf);
+ oct->conf = NULL;
+}
+
+static int octep_vf_get_mac_addr(struct octep_vf_device *oct, u8 *addr)
+{
+ return octep_vf_mbox_get_mac_addr(oct, addr);
+}
+
+/**
+ * octep_vf_probe() - Octeon PCI device probe handler.
+ *
+ * @pdev: PCI device structure.
+ * @ent: entry in Octeon PCI device ID table.
+ *
+ * Initializes and enables the Octeon PCI device for network operations.
+ * Initializes Octeon private data structure and registers a network device.
+ */
+static int octep_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask !!\n");
+ goto disable_pci_device;
+ }
+
+ err = pci_request_mem_regions(pdev, OCTEP_VF_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map PCI memory regions\n");
+ goto disable_pci_device;
+ }
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct octep_vf_device),
+ OCTEP_VF_MAX_QUEUES);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto mem_regions_release;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ octep_vf_dev = netdev_priv(netdev);
+ octep_vf_dev->netdev = netdev;
+ octep_vf_dev->pdev = pdev;
+ octep_vf_dev->dev = &pdev->dev;
+ pci_set_drvdata(pdev, octep_vf_dev);
+
+ err = octep_vf_device_setup(octep_vf_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Device setup failed\n");
+ goto netdevice_free;
+ }
+ INIT_WORK(&octep_vf_dev->tx_timeout_task, octep_vf_tx_timeout_task);
+
+ netdev->netdev_ops = &octep_vf_netdev_ops;
+ octep_vf_set_ethtool_ops(netdev);
+ netif_carrier_off(netdev);
+
+ if (octep_vf_setup_mbox(octep_vf_dev)) {
+ dev_err(&pdev->dev, "VF Mailbox setup failed\n");
+ err = -ENOMEM;
+ goto device_cleanup;
+ }
+
+ if (octep_vf_mbox_version_check(octep_vf_dev)) {
+ dev_err(&pdev->dev, "PF VF Mailbox version mismatch\n");
+ err = -EINVAL;
+ goto delete_mbox;
+ }
+
+ if (octep_vf_mbox_get_fw_info(octep_vf_dev)) {
+ dev_err(&pdev->dev, "unable to get fw info\n");
+ err = -EINVAL;
+ goto delete_mbox;
+ }
+
+ netdev->hw_features = NETIF_F_SG;
+ if (OCTEP_VF_TX_IP_CSUM(octep_vf_dev->fw_info.tx_ol_flags))
+ netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+
+ if (OCTEP_VF_RX_IP_CSUM(octep_vf_dev->fw_info.rx_ol_flags))
+ netdev->hw_features |= NETIF_F_RXCSUM;
+
+ netdev->min_mtu = OCTEP_VF_MIN_MTU;
+ netdev->max_mtu = OCTEP_VF_MAX_MTU;
+ netdev->mtu = OCTEP_VF_DEFAULT_MTU;
+
+ if (OCTEP_VF_TX_TSO(octep_vf_dev->fw_info.tx_ol_flags)) {
+ netdev->hw_features |= NETIF_F_TSO;
+ netif_set_tso_max_size(netdev, netdev->max_mtu);
+ }
+
+ netdev->features |= netdev->hw_features;
+ octep_vf_get_mac_addr(octep_vf_dev, octep_vf_dev->mac_addr);
+ eth_hw_addr_set(netdev, octep_vf_dev->mac_addr);
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto delete_mbox;
+ }
+ dev_info(&pdev->dev, "Device probe successful\n");
+ return 0;
+
+delete_mbox:
+ octep_vf_delete_mbox(octep_vf_dev);
+device_cleanup:
+ octep_vf_device_cleanup(octep_vf_dev);
+netdevice_free:
+ free_netdev(netdev);
+mem_regions_release:
+ pci_release_mem_regions(pdev);
+disable_pci_device:
+ pci_disable_device(pdev);
+ dev_err(&pdev->dev, "Device probe failed\n");
+ return err;
+}
+
+/**
+ * octep_vf_remove() - Remove Octeon PCI device from driver control.
+ *
+ * @pdev: PCI device structure of the Octeon device.
+ *
+ * Cleanup all resources allocated for the Octeon device.
+ * Unregister from network device and disable the PCI device.
+ */
+static void octep_vf_remove(struct pci_dev *pdev)
+{
+ struct octep_vf_device *oct = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ if (!oct)
+ return;
+
+ octep_vf_mbox_dev_remove(oct);
+ cancel_work_sync(&oct->tx_timeout_task);
+ netdev = oct->netdev;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+ octep_vf_delete_mbox(oct);
+ octep_vf_device_cleanup(oct);
+ pci_release_mem_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver octep_vf_driver = {
+ .name = OCTEP_VF_DRV_NAME,
+ .id_table = octep_vf_pci_id_tbl,
+ .probe = octep_vf_probe,
+ .remove = octep_vf_remove,
+};
+
+/**
+ * octep_vf_init_module() - Module initialization.
+ *
+ * create common resource for the driver and register PCI driver.
+ */
+static int __init octep_vf_init_module(void)
+{
+ int ret;
+
+ pr_info("%s: Loading %s ...\n", OCTEP_VF_DRV_NAME, OCTEP_VF_DRV_STRING);
+
+ ret = pci_register_driver(&octep_vf_driver);
+ if (ret < 0) {
+ pr_err("%s: Failed to register PCI driver; err=%d\n",
+ OCTEP_VF_DRV_NAME, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * octep_vf_exit_module() - Module exit routine.
+ *
+ * unregister the driver with PCI subsystem and cleanup common resources.
+ */
+static void __exit octep_vf_exit_module(void)
+{
+ pr_info("%s: Unloading ...\n", OCTEP_VF_DRV_NAME);
+
+ pci_unregister_driver(&octep_vf_driver);
+
+ pr_info("%s: Unloading complete\n", OCTEP_VF_DRV_NAME);
+}
+
+module_init(octep_vf_init_module);
+module_exit(octep_vf_exit_module);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
new file mode 100644
index 000000000000..5769f62545cd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_MAIN_H_
+#define _OCTEP_VF_MAIN_H_
+
+#include "octep_vf_tx.h"
+#include "octep_vf_rx.h"
+#include "octep_vf_mbox.h"
+
+#define OCTEP_VF_DRV_NAME "octeon_ep_vf"
+#define OCTEP_VF_DRV_STRING "Marvell Octeon EndPoint NIC VF Driver"
+
+#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203 //93xx VF
+#define OCTEP_PCI_DEVICE_ID_CNF95N_VF 0xB403 //95N VF
+#define OCTEP_PCI_DEVICE_ID_CN98_VF 0xB103
+#define OCTEP_PCI_DEVICE_ID_CN10KA_VF 0xB903
+#define OCTEP_PCI_DEVICE_ID_CNF10KA_VF 0xBA03
+#define OCTEP_PCI_DEVICE_ID_CNF10KB_VF 0xBC03
+#define OCTEP_PCI_DEVICE_ID_CN10KB_VF 0xBD03
+
+#define OCTEP_VF_MAX_QUEUES 63
+#define OCTEP_VF_MAX_IQ OCTEP_VF_MAX_QUEUES
+#define OCTEP_VF_MAX_OQ OCTEP_VF_MAX_QUEUES
+
+#define OCTEP_VF_MAX_MSIX_VECTORS OCTEP_VF_MAX_OQ
+
+#define OCTEP_VF_IQ_INTR_RESEND_BIT 59
+#define OCTEP_VF_OQ_INTR_RESEND_BIT 59
+
+#define IQ_INSTR_PENDING(iq) ({ typeof(iq) iq__ = (iq); \
+ ((iq__)->host_write_index - (iq__)->flush_index) & \
+ (iq__)->ring_size_mask; \
+ })
+#define IQ_INSTR_SPACE(iq) ({ typeof(iq) iq_ = (iq); \
+ (iq_)->max_count - IQ_INSTR_PENDING(iq_); \
+ })
+
+/* PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octep_vf_mmio {
+ /* The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /* Flag indicating the mapping was successful. */
+ int mapped;
+};
+
+struct octep_vf_hw_ops {
+ void (*setup_iq_regs)(struct octep_vf_device *oct, int q);
+ void (*setup_oq_regs)(struct octep_vf_device *oct, int q);
+ void (*setup_mbox_regs)(struct octep_vf_device *oct, int mbox);
+
+ irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
+ void (*reinit_regs)(struct octep_vf_device *oct);
+ u32 (*update_iq_read_idx)(struct octep_vf_iq *iq);
+
+ void (*enable_interrupts)(struct octep_vf_device *oct);
+ void (*disable_interrupts)(struct octep_vf_device *oct);
+
+ void (*enable_io_queues)(struct octep_vf_device *oct);
+ void (*disable_io_queues)(struct octep_vf_device *oct);
+ void (*enable_iq)(struct octep_vf_device *oct, int q);
+ void (*disable_iq)(struct octep_vf_device *oct, int q);
+ void (*enable_oq)(struct octep_vf_device *oct, int q);
+ void (*disable_oq)(struct octep_vf_device *oct, int q);
+ void (*reset_io_queues)(struct octep_vf_device *oct);
+ void (*dump_registers)(struct octep_vf_device *oct);
+};
+
+/* Octeon mailbox data */
+struct octep_vf_mbox_data {
+ /* Holds the offset of received data via mailbox. */
+ u32 data_index;
+
+ /* Holds the received data via mailbox. */
+ u8 recv_data[OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE];
+};
+
+/* wrappers around work structs */
+struct octep_vf_mbox_wk {
+ struct work_struct work;
+ void *ctxptr;
+};
+
+/* Octeon device mailbox */
+struct octep_vf_mbox {
+ /* A mutex to protect access to this q_mbox. */
+ struct mutex lock;
+
+ u32 state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ u8 __iomem *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ u8 __iomem *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ u8 __iomem *mbox_read_reg;
+
+ /* Octeon mailbox data */
+ struct octep_vf_mbox_data mbox_data;
+
+ /* Octeon mailbox work handler to process Mbox messages */
+ struct octep_vf_mbox_wk wk;
+};
+
+/* Tx/Rx queue vector per interrupt. */
+struct octep_vf_ioq_vector {
+ char name[OCTEP_VF_MSIX_NAME_SIZE];
+ struct napi_struct napi;
+ struct octep_vf_device *octep_vf_dev;
+ struct octep_vf_iq *iq;
+ struct octep_vf_oq *oq;
+ cpumask_t affinity_mask;
+};
+
+/* Octeon hardware/firmware offload capability flags. */
+#define OCTEP_VF_CAP_TX_CHECKSUM BIT(0)
+#define OCTEP_VF_CAP_RX_CHECKSUM BIT(1)
+#define OCTEP_VF_CAP_TSO BIT(2)
+
+/* Link modes */
+enum octep_vf_link_mode_bit_indices {
+ OCTEP_VF_LINK_MODE_10GBASE_T = 0,
+ OCTEP_VF_LINK_MODE_10GBASE_R,
+ OCTEP_VF_LINK_MODE_10GBASE_CR,
+ OCTEP_VF_LINK_MODE_10GBASE_KR,
+ OCTEP_VF_LINK_MODE_10GBASE_LR,
+ OCTEP_VF_LINK_MODE_10GBASE_SR,
+ OCTEP_VF_LINK_MODE_25GBASE_CR,
+ OCTEP_VF_LINK_MODE_25GBASE_KR,
+ OCTEP_VF_LINK_MODE_25GBASE_SR,
+ OCTEP_VF_LINK_MODE_40GBASE_CR4,
+ OCTEP_VF_LINK_MODE_40GBASE_KR4,
+ OCTEP_VF_LINK_MODE_40GBASE_LR4,
+ OCTEP_VF_LINK_MODE_40GBASE_SR4,
+ OCTEP_VF_LINK_MODE_50GBASE_CR2,
+ OCTEP_VF_LINK_MODE_50GBASE_KR2,
+ OCTEP_VF_LINK_MODE_50GBASE_SR2,
+ OCTEP_VF_LINK_MODE_50GBASE_CR,
+ OCTEP_VF_LINK_MODE_50GBASE_KR,
+ OCTEP_VF_LINK_MODE_50GBASE_LR,
+ OCTEP_VF_LINK_MODE_50GBASE_SR,
+ OCTEP_VF_LINK_MODE_100GBASE_CR4,
+ OCTEP_VF_LINK_MODE_100GBASE_KR4,
+ OCTEP_VF_LINK_MODE_100GBASE_LR4,
+ OCTEP_VF_LINK_MODE_100GBASE_SR4,
+ OCTEP_VF_LINK_MODE_NBITS
+};
+
+/* Hardware interface link state information. */
+struct octep_vf_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ u64 supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ u64 advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ u32 speed;
+
+ /* MTU */
+ u16 mtu;
+
+ /* Autonegotiation state. */
+#define OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ u8 autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_VF_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ u8 pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ u8 admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ u8 oper_up;
+};
+
+/* Hardware interface stats information. */
+struct octep_vf_iface_rxtx_stats {
+ /* Hardware Interface Rx statistics */
+ struct octep_vf_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Tx statistics */
+ struct octep_vf_iface_tx_stats iface_tx_stats;
+};
+
+struct octep_vf_fw_info {
+ /* pkind value to be used in every Tx hardware descriptor */
+ u8 pkind;
+ /* front size data */
+ u8 fsz;
+ /* supported rx offloads OCTEP_VF_RX_OFFLOAD_* */
+ u16 rx_ol_flags;
+ /* supported tx offloads OCTEP_VF_TX_OFFLOAD_* */
+ u16 tx_ol_flags;
+};
+
+/* The Octeon device specific private data structure.
+ * Each Octeon device has this structure to represent all its components.
+ */
+struct octep_vf_device {
+ struct octep_vf_config *conf;
+
+ /* Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /* Device capabilities enabled */
+ u64 caps_enabled;
+ /* Device capabilities supported */
+ u64 caps_supported;
+
+ /* Pointer to basic Linux device */
+ struct device *dev;
+ /* Linux PCI device pointer */
+ struct pci_dev *pdev;
+ /* Netdev corresponding to the Octeon device */
+ struct net_device *netdev;
+
+ /* memory mapped io range */
+ struct octep_vf_mmio mmio;
+
+ /* MAC address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* Tx queues (IQ: Instruction Queue) */
+ u16 num_iqs;
+ /* Pointers to Octeon Tx queues */
+ struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
+
+ /* Rx queues (OQ: Output Queue) */
+ u16 num_oqs;
+ /* Pointers to Octeon Rx queues */
+ struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
+
+ /* Hardware port number of the PCIe interface */
+ u16 pcie_port;
+
+ /* Hardware operations */
+ struct octep_vf_hw_ops hw_ops;
+
+ /* IRQ info */
+ u16 num_irqs;
+ u16 num_non_ioq_irqs;
+ char *non_ioq_irq_names;
+ struct msix_entry *msix_entries;
+ /* IOq information of it's corresponding MSI-X interrupt. */
+ struct octep_vf_ioq_vector *ioq_vector[OCTEP_VF_MAX_QUEUES];
+
+ /* Hardware Interface Tx statistics */
+ struct octep_vf_iface_tx_stats iface_tx_stats;
+ /* Hardware Interface Rx statistics */
+ struct octep_vf_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Link info like supported modes, aneg support */
+ struct octep_vf_iface_link_info link_info;
+
+ /* Mailbox to talk to VFs */
+ struct octep_vf_mbox *mbox;
+
+ /* Work entry to handle Tx timeout */
+ struct work_struct tx_timeout_task;
+
+ /* offset for iface stats */
+ u32 ctrl_mbox_ifstats_offset;
+
+ /* Negotiated Mbox version */
+ u32 mbox_neg_ver;
+
+ /* firmware info */
+ struct octep_vf_fw_info fw_info;
+};
+
+static inline u16 OCTEP_VF_MAJOR_REV(struct octep_vf_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct)
+{
+ return (oct->rev_id & 0x3);
+}
+
+/* Octeon CSR read/write access APIs */
+#define octep_vf_write_csr(octep_vf_dev, reg_off, value) \
+ writel(value, (octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_write_csr64(octep_vf_dev, reg_off, val64) \
+ writeq(val64, (octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_read_csr(octep_vf_dev, reg_off) \
+ readl((octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_read_csr64(octep_vf_dev, reg_off) \
+ readq((octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+extern struct workqueue_struct *octep_vf_wq;
+
+int octep_vf_device_setup(struct octep_vf_device *oct);
+int octep_vf_setup_iqs(struct octep_vf_device *oct);
+void octep_vf_free_iqs(struct octep_vf_device *oct);
+void octep_vf_clean_iqs(struct octep_vf_device *oct);
+int octep_vf_setup_oqs(struct octep_vf_device *oct);
+void octep_vf_free_oqs(struct octep_vf_device *oct);
+void octep_vf_oq_dbell_init(struct octep_vf_device *oct);
+void octep_vf_device_setup_cn93(struct octep_vf_device *oct);
+void octep_vf_device_setup_cnxk(struct octep_vf_device *oct);
+int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget);
+int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget);
+void octep_vf_set_ethtool_ops(struct net_device *netdev);
+int octep_vf_get_link_info(struct octep_vf_device *oct);
+int octep_vf_get_if_stats(struct octep_vf_device *oct);
+void octep_vf_mbox_work(struct work_struct *work);
+#endif /* _OCTEP_VF_MAIN_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
new file mode 100644
index 000000000000..2eab21e43048
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+/* When a new command is implemented, the below table should be updated
+ * with new command and it's version info.
+ */
+static u32 pfvf_cmd_versions[OCTEP_PFVF_MBOX_CMD_MAX] = {
+ [0 ... OCTEP_PFVF_MBOX_CMD_DEV_REMOVE] = OCTEP_PFVF_MBOX_VERSION_V1,
+ [OCTEP_PFVF_MBOX_CMD_GET_FW_INFO ... OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS] =
+ OCTEP_PFVF_MBOX_VERSION_V2
+};
+
+int octep_vf_setup_mbox(struct octep_vf_device *oct)
+{
+ int ring = 0;
+
+ oct->mbox = vzalloc(sizeof(*oct->mbox));
+ if (!oct->mbox)
+ return -1;
+
+ mutex_init(&oct->mbox->lock);
+
+ oct->hw_ops.setup_mbox_regs(oct, ring);
+ INIT_WORK(&oct->mbox->wk.work, octep_vf_mbox_work);
+ oct->mbox->wk.ctxptr = oct;
+ oct->mbox_neg_ver = OCTEP_PFVF_MBOX_VERSION_CURRENT;
+ dev_info(&oct->pdev->dev, "setup vf mbox successfully\n");
+ return 0;
+}
+
+void octep_vf_delete_mbox(struct octep_vf_device *oct)
+{
+ if (oct->mbox) {
+ if (work_pending(&oct->mbox->wk.work))
+ cancel_work_sync(&oct->mbox->wk.work);
+
+ mutex_destroy(&oct->mbox->lock);
+ vfree(oct->mbox);
+ oct->mbox = NULL;
+ dev_info(&oct->pdev->dev, "Deleted vf mbox successfully\n");
+ }
+}
+
+int octep_vf_mbox_version_check(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_version.opcode = OCTEP_PFVF_MBOX_CMD_VERSION;
+ cmd.s_version.version = OCTEP_PFVF_MBOX_VERSION_CURRENT;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret == OCTEP_PFVF_MBOX_CMD_STATUS_NACK) {
+ dev_err(&oct->pdev->dev,
+ "VF Mbox version is incompatible with PF\n");
+ return -EINVAL;
+ }
+ oct->mbox_neg_ver = (u32)rsp.s_version.version;
+ dev_dbg(&oct->pdev->dev,
+ "VF Mbox version:%u Negotiated VF version with PF:%u\n",
+ (u32)cmd.s_version.version,
+ (u32)rsp.s_version.version);
+ return 0;
+}
+
+void octep_vf_mbox_work(struct work_struct *work)
+{
+ struct octep_vf_mbox_wk *wk = container_of(work, struct octep_vf_mbox_wk, work);
+ struct octep_vf_iface_link_info *link_info;
+ struct octep_vf_device *oct = NULL;
+ struct octep_vf_mbox *mbox = NULL;
+ union octep_pfvf_mbox_word *notif;
+ u64 pf_vf_data;
+
+ oct = (struct octep_vf_device *)wk->ctxptr;
+ link_info = &oct->link_info;
+ mbox = oct->mbox;
+ pf_vf_data = readq(mbox->mbox_read_reg);
+
+ notif = (union octep_pfvf_mbox_word *)&pf_vf_data;
+
+ switch (notif->s.opcode) {
+ case OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS:
+ if (notif->s_link_status.status) {
+ link_info->oper_up = OCTEP_PFVF_LINK_STATUS_UP;
+ netif_carrier_on(oct->netdev);
+ dev_info(&oct->pdev->dev, "netif_carrier_on\n");
+ } else {
+ link_info->oper_up = OCTEP_PFVF_LINK_STATUS_DOWN;
+ netif_carrier_off(oct->netdev);
+ dev_info(&oct->pdev->dev, "netif_carrier_off\n");
+ }
+ break;
+ default:
+ dev_err(&oct->pdev->dev,
+ "Received unsupported notif %d\n", notif->s.opcode);
+ break;
+ }
+}
+
+static int __octep_vf_mbox_send_cmd(struct octep_vf_device *oct,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ u64 reg_val = 0ull;
+ int count;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+
+ cmd.s.type = OCTEP_PFVF_MBOX_TYPE_CMD;
+ writeq(cmd.u64, mbox->mbox_write_reg);
+
+ /* No response for notification messages */
+ if (!rsp)
+ return 0;
+
+ for (count = 0; count < OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT; count++) {
+ usleep_range(1000, 1500);
+ reg_val = readq(mbox->mbox_write_reg);
+ if (reg_val != cmd.u64) {
+ rsp->u64 = reg_val;
+ break;
+ }
+ }
+ if (count == OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT) {
+ dev_err(&oct->pdev->dev, "mbox send command timed out\n");
+ return OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT;
+ }
+ if (rsp->s.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "mbox_send: Received NACK\n");
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NACK;
+ }
+ rsp->u64 = reg_val;
+ return 0;
+}
+
+int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ int ret;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+ mutex_lock(&mbox->lock);
+ if (pfvf_cmd_versions[cmd.s.opcode] > oct->mbox_neg_ver) {
+ dev_dbg(&oct->pdev->dev, "CMD:%d not supported in Version:%d\n",
+ cmd.s.opcode, oct->mbox_neg_ver);
+ mutex_unlock(&mbox->lock);
+ return -EOPNOTSUPP;
+ }
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, rsp);
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode,
+ u8 *data, int *size)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int data_len = 0, tmp_len = 0;
+ int read_cnt, i = 0, ret;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+
+ mutex_lock(&mbox->lock);
+ cmd.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 0;
+ /* Send cmd to read data from PF */
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n");
+ mutex_unlock(&mbox->lock);
+ return ret;
+ }
+ /* PF sends the data length of requested CMD
+ * in ACK
+ */
+ data_len = *((int32_t *)rsp.s_data.data);
+ tmp_len = data_len;
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ while (data_len) {
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n");
+ mutex_unlock(&mbox->lock);
+ mbox->mbox_data.data_index = 0;
+ memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE);
+ return ret;
+ }
+ if (data_len > OCTEP_PFVF_MBOX_MAX_DATA_SIZE) {
+ data_len -= OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
+ read_cnt = OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
+ } else {
+ read_cnt = data_len;
+ data_len = 0;
+ }
+ for (i = 0; i < read_cnt; i++) {
+ mbox->mbox_data.recv_data[mbox->mbox_data.data_index] =
+ rsp.s_data.data[i];
+ mbox->mbox_data.data_index++;
+ }
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ }
+ memcpy(data, mbox->mbox_data.recv_data, tmp_len);
+ *size = tmp_len;
+ mbox->mbox_data.data_index = 0;
+ memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE);
+ mutex_unlock(&mbox->lock);
+ return 0;
+}
+
+int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu)
+{
+ int frame_size = mtu + ETH_HLEN + ETH_FCS_LEN;
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret = 0;
+
+ if (mtu < ETH_MIN_MTU || frame_size > ETH_MAX_MTU) {
+ dev_err(&oct->pdev->dev,
+ "Failed to set MTU to %d MIN MTU:%d MAX MTU:%d\n",
+ mtu, ETH_MIN_MTU, ETH_MAX_MTU);
+ return -EINVAL;
+ }
+
+ cmd.u64 = 0;
+ cmd.s_set_mtu.opcode = OCTEP_PFVF_MBOX_CMD_SET_MTU;
+ cmd.s_set_mtu.mtu = mtu;
+
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Mbox send failed; err=%d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mtu.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Received Mbox NACK from PF for MTU:%d\n", mtu);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR;
+ for (i = 0; i < ETH_ALEN; i++)
+ cmd.s_set_mac.mac_addr[i] = mac_addr[i];
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Mbox send failed; err = %d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "get_mac: mbox send failed; err = %d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "get_mac: received NACK\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = rsp.s_set_mac.mac_addr[i];
+ return 0;
+}
+
+int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_state.opcode = OCTEP_PFVF_MBOX_CMD_SET_RX_STATE;
+ cmd.s_link_state.state = state;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set Rx state via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set Rx state received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS;
+ cmd.s_link_status.status = status;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set link status received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Get link status received NACK\n");
+ return -EINVAL;
+ }
+ *oper_up = rsp.s_link_status.status;
+ return 0;
+}
+
+int octep_vf_mbox_dev_remove(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s.opcode = OCTEP_PFVF_MBOX_CMD_DEV_REMOVE;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, NULL);
+ return ret;
+}
+
+int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_fw_info.opcode = OCTEP_PFVF_MBOX_CMD_GET_FW_INFO;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_fw_info.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Get link status received NACK\n");
+ return -EINVAL;
+ }
+ oct->fw_info.pkind = rsp.s_fw_info.pkind;
+ oct->fw_info.fsz = rsp.s_fw_info.fsz;
+ oct->fw_info.rx_ol_flags = rsp.s_fw_info.rx_ol_flags;
+ oct->fw_info.tx_ol_flags = rsp.s_fw_info.tx_ol_flags;
+
+ return 0;
+}
+
+int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads,
+ u16 rx_offloads)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_offloads.opcode = OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS;
+ cmd.s_offloads.rx_ol_flags = rx_offloads;
+ cmd.s_offloads.tx_ol_flags = tx_offloads;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set offloads via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set offloads received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h
new file mode 100644
index 000000000000..9b5efad37eab
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_MBOX_H_
+#define _OCTEP_VF_MBOX_H_
+
+/* When a new command is implemented, VF Mbox version should be bumped.
+ */
+enum octep_pfvf_mbox_version {
+ OCTEP_PFVF_MBOX_VERSION_V0,
+ OCTEP_PFVF_MBOX_VERSION_V1,
+ OCTEP_PFVF_MBOX_VERSION_V2
+};
+
+#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+
+enum octep_pfvf_mbox_opcode {
+ OCTEP_PFVF_MBOX_CMD_VERSION,
+ OCTEP_PFVF_MBOX_CMD_SET_MTU,
+ OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
+ OCTEP_PFVF_MBOX_CMD_GET_STATS,
+ OCTEP_PFVF_MBOX_CMD_SET_RX_STATE,
+ OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_MTU,
+ OCTEP_PFVF_MBOX_CMD_DEV_REMOVE,
+ OCTEP_PFVF_MBOX_CMD_GET_FW_INFO,
+ OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS,
+ OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_MAX,
+};
+
+enum octep_pfvf_mbox_word_type {
+ OCTEP_PFVF_MBOX_TYPE_CMD,
+ OCTEP_PFVF_MBOX_TYPE_RSP_ACK,
+ OCTEP_PFVF_MBOX_TYPE_RSP_NACK,
+};
+
+enum octep_pfvf_mbox_cmd_status {
+ OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP = 1,
+ OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT = 2,
+ OCTEP_PFVF_MBOX_CMD_STATUS_NACK = 3,
+ OCTEP_PFVF_MBOX_CMD_STATUS_BUSY = 4,
+ OCTEP_PFVF_MBOX_CMD_STATUS_ERR = 5
+};
+
+enum octep_pfvf_link_status {
+ OCTEP_PFVF_LINK_STATUS_DOWN,
+ OCTEP_PFVF_LINK_STATUS_UP,
+};
+
+enum octep_pfvf_link_speed {
+ OCTEP_PFVF_LINK_SPEED_NONE,
+ OCTEP_PFVF_LINK_SPEED_1000,
+ OCTEP_PFVF_LINK_SPEED_10000,
+ OCTEP_PFVF_LINK_SPEED_25000,
+ OCTEP_PFVF_LINK_SPEED_40000,
+ OCTEP_PFVF_LINK_SPEED_50000,
+ OCTEP_PFVF_LINK_SPEED_100000,
+ OCTEP_PFVF_LINK_SPEED_LAST,
+};
+
+enum octep_pfvf_link_duplex {
+ OCTEP_PFVF_LINK_HALF_DUPLEX,
+ OCTEP_PFVF_LINK_FULL_DUPLEX,
+};
+
+enum octep_pfvf_link_autoneg {
+ OCTEP_PFVF_LINK_AUTONEG,
+ OCTEP_PFVF_LINK_FIXED,
+};
+
+#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT 8000
+#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_UDELAY 1000
+#define OCTEP_PFVF_MBOX_MAX_RETRIES 2
+#define OCTEP_PFVF_MBOX_VERSION 0
+#define OCTEP_PFVF_MBOX_MAX_DATA_SIZE 6
+#define OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE 320
+#define OCTEP_PFVF_MBOX_MORE_FRAG_FLAG 1
+
+union octep_pfvf_mbox_word {
+ u64 u64;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 data:48;
+ } s;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 frag:1;
+ u64 rsvd:5;
+ u8 data[6];
+ } s_data;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 version:48;
+ } s_version;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u8 mac_addr[6];
+ } s_set_mac;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 mtu:48;
+ } s_set_mtu;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 state:1;
+ u64 rsvd:53;
+ } s_link_state;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 status:1;
+ u64 rsvd:53;
+ } s_link_status;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 pkind:8;
+ u64 fsz:8;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ u64 rsvd:6;
+ } s_fw_info;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:22;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ } s_offloads;
+} __packed;
+
+int octep_vf_setup_mbox(struct octep_vf_device *oct);
+void octep_vf_delete_mbox(struct octep_vf_device *oct);
+int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp);
+int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode,
+ u8 *data, int *size);
+int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu);
+int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr);
+int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr);
+int octep_vf_mbox_version_check(struct octep_vf_device *oct);
+int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state);
+int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status);
+int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up);
+int octep_vf_mbox_dev_remove(struct octep_vf_device *oct);
+int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct);
+int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads, u16 rx_offloads);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h
new file mode 100644
index 000000000000..25e2a876ebba
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_REGS_CN9K_H_
+#define _OCTEP_VF_REGS_CN9K_H_
+
+/*############################ RST #########################*/
+#define CN93_VF_CONFIG_XPANSION_BAR 0x38
+#define CN93_VF_CONFIG_PCIE_CAP 0x70
+#define CN93_VF_CONFIG_PCIE_DEVCAP 0x74
+#define CN93_VF_CONFIG_PCIE_DEVCTL 0x78
+#define CN93_VF_CONFIG_PCIE_LINKCAP 0x7C
+#define CN93_VF_CONFIG_PCIE_LINKCTL 0x80
+#define CN93_VF_CONFIG_PCIE_SLOTCAP 0x84
+#define CN93_VF_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CN93_VF_RING_OFFSET BIT_ULL(17)
+
+/*###################### RING IN REGISTERS #########################*/
+#define CN93_VF_SDP_R_IN_CONTROL_START 0x10000
+#define CN93_VF_SDP_R_IN_ENABLE_START 0x10010
+#define CN93_VF_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CN93_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CN93_VF_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CN93_VF_SDP_R_IN_CNTS_START 0x10050
+#define CN93_VF_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CN93_VF_SDP_R_IN_PKT_CNT_START 0x10080
+#define CN93_VF_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CN93_VF_SDP_R_IN_CONTROL(ring) \
+ (CN93_VF_SDP_R_IN_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_ENABLE(ring) \
+ (CN93_VF_SDP_R_IN_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_BADDR(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_DBELL(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_CNTS(ring) \
+ (CN93_VF_SDP_R_IN_CNTS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INT_LEVELS(ring) \
+ (CN93_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_PKT_CNT(ring) \
+ (CN93_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_BYTE_CNT(ring) \
+ (CN93_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+/*------------------ R_IN Masks ----------------*/
+
+/** Rings per Virtual Function **/
+#define CN93_VF_R_IN_CTL_RPVF_MASK (0xF)
+#define CN93_VF_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ **/
+#define CN93_VF_R_IN_CTL_IDLE BIT_ULL(28)
+#define CN93_VF_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CN93_VF_R_IN_CTL_IS_64B BIT_ULL(24)
+#define CN93_VF_R_IN_CTL_D_NSR BIT_ULL(8)
+#define CN93_VF_R_IN_CTL_D_ESR BIT_ULL(6)
+#define CN93_VF_R_IN_CTL_D_ROR BIT_ULL(5)
+#define CN93_VF_R_IN_CTL_NSR BIT_ULL(3)
+#define CN93_VF_R_IN_CTL_ESR BIT_ULL(1)
+#define CN93_VF_R_IN_CTL_ROR BIT_ULL(0)
+
+#define CN93_VF_R_IN_CTL_MASK (CN93_VF_R_IN_CTL_RDSIZE | CN93_VF_R_IN_CTL_IS_64B)
+
+/*###################### RING OUT REGISTERS #########################*/
+#define CN93_VF_SDP_R_OUT_CNTS_START 0x10100
+#define CN93_VF_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CN93_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CN93_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CN93_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CN93_VF_SDP_R_OUT_CONTROL_START 0x10150
+#define CN93_VF_SDP_R_OUT_ENABLE_START 0x10160
+#define CN93_VF_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CN93_VF_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CN93_VF_SDP_R_OUT_CONTROL(ring) \
+ (CN93_VF_SDP_R_OUT_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_ENABLE(ring) \
+ (CN93_VF_SDP_R_OUT_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_CNTS(ring) \
+ (CN93_VF_SDP_R_OUT_CNTS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_INT_LEVELS(ring) \
+ (CN93_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_PKT_CNT(ring) \
+ (CN93_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_BYTE_CNT(ring) \
+ (CN93_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CN93_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CN93_VF_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CN93_VF_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CN93_VF_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CN93_VF_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CN93_VF_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CN93_VF_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CN93_VF_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CN93_VF_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CN93_VF_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CN93_VF_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CN93_VF_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CN93_VF_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ##################### Mail Box Registers ########################## */
+/* SDP PF to VF Mailbox Data Register */
+#define CN93_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+/* SDP Packet PF to VF Mailbox Interrupt Register */
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220
+/* SDP VF to PF Mailbox Data Register */
+#define CN93_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1)
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0)
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CN93_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CN93_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CN93_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_VF_RING_OFFSET))
+#endif /* _OCTEP_VF_REGS_CN9K_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h
new file mode 100644
index 000000000000..2e156745ef64
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_REGS_CNXK_H_
+#define _OCTEP_VF_REGS_CNXK_H_
+
+/*############################ RST #########################*/
+#define CNXK_VF_CONFIG_XPANSION_BAR 0x38
+#define CNXK_VF_CONFIG_PCIE_CAP 0x70
+#define CNXK_VF_CONFIG_PCIE_DEVCAP 0x74
+#define CNXK_VF_CONFIG_PCIE_DEVCTL 0x78
+#define CNXK_VF_CONFIG_PCIE_LINKCAP 0x7C
+#define CNXK_VF_CONFIG_PCIE_LINKCTL 0x80
+#define CNXK_VF_CONFIG_PCIE_SLOTCAP 0x84
+#define CNXK_VF_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CNXK_VF_RING_OFFSET (0x1ULL << 17)
+
+/*###################### RING IN REGISTERS #########################*/
+#define CNXK_VF_SDP_R_IN_CONTROL_START 0x10000
+#define CNXK_VF_SDP_R_IN_ENABLE_START 0x10010
+#define CNXK_VF_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CNXK_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CNXK_VF_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CNXK_VF_SDP_R_IN_CNTS_START 0x10050
+#define CNXK_VF_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CNXK_VF_SDP_R_IN_PKT_CNT_START 0x10080
+#define CNXK_VF_SDP_R_IN_BYTE_CNT_START 0x10090
+#define CNXK_VF_SDP_R_ERR_TYPE_START 0x10400
+
+#define CNXK_VF_SDP_R_ERR_TYPE(ring) \
+ (CNXK_VF_SDP_R_ERR_TYPE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_CONTROL(ring) \
+ (CNXK_VF_SDP_R_IN_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_ENABLE(ring) \
+ (CNXK_VF_SDP_R_IN_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_BADDR(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_DBELL(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_CNTS(ring) \
+ (CNXK_VF_SDP_R_IN_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INT_LEVELS(ring) \
+ (CNXK_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_PKT_CNT(ring) \
+ (CNXK_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_BYTE_CNT(ring) \
+ (CNXK_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+/*------------------ R_IN Masks ----------------*/
+
+/** Rings per Virtual Function **/
+#define CNXK_VF_R_IN_CTL_RPVF_MASK (0xF)
+#define CNXK_VF_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ **/
+#define CNXK_VF_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CNXK_VF_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CNXK_VF_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CNXK_VF_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CNXK_VF_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CNXK_VF_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CNXK_VF_R_IN_CTL_NSR (0x1ULL << 3)
+#define CNXK_VF_R_IN_CTL_ESR (0x1ULL << 1)
+#define CNXK_VF_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CNXK_VF_R_IN_CTL_MASK (CNXK_VF_R_IN_CTL_RDSIZE | CNXK_VF_R_IN_CTL_IS_64B)
+
+/*###################### RING OUT REGISTERS #########################*/
+#define CNXK_VF_SDP_R_OUT_CNTS_START 0x10100
+#define CNXK_VF_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CNXK_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CNXK_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CNXK_VF_SDP_R_OUT_CONTROL_START 0x10150
+#define CNXK_VF_SDP_R_OUT_WMARK_START 0x10160
+#define CNXK_VF_SDP_R_OUT_ENABLE_START 0x10170
+#define CNXK_VF_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CNXK_VF_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CNXK_VF_SDP_R_OUT_CONTROL(ring) \
+ (CNXK_VF_SDP_R_OUT_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_ENABLE(ring) \
+ (CNXK_VF_SDP_R_OUT_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_WMARK(ring) \
+ (CNXK_VF_SDP_R_OUT_WMARK_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_CNTS(ring) \
+ (CNXK_VF_SDP_R_OUT_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_INT_LEVELS(ring) \
+ (CNXK_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_PKT_CNT(ring) \
+ (CNXK_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_BYTE_CNT(ring) \
+ (CNXK_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CNXK_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CNXK_VF_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CNXK_VF_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CNXK_VF_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CNXK_VF_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CNXK_VF_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CNXK_VF_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CNXK_VF_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CNXK_VF_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CNXK_VF_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CNXK_VF_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CNXK_VF_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CNXK_VF_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ##################### Mail Box Registers ########################## */
+/* SDP PF to VF Mailbox Data Register */
+#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+/* SDP Packet PF to VF Mailbox Interrupt Register */
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220
+/* SDP VF to PF Mailbox Data Register */
+#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1)
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0)
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CNXK_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET))
+#endif /* _OCTEP_VF_REGS_CNXK_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
new file mode 100644
index 000000000000..82821bc28634
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+static void octep_vf_oq_reset_indices(struct octep_vf_oq *oq)
+{
+ oq->host_read_idx = 0;
+ oq->host_refill_idx = 0;
+ oq->refill_count = 0;
+ oq->last_pkt_count = 0;
+ oq->pkts_pending = 0;
+}
+
+/**
+ * octep_vf_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: 0, if successfully filled receive buffers for all descriptors.
+ * -ENOMEM, if failed to allocate a buffer or failed to map for DMA.
+ */
+static int octep_vf_oq_fill_ring_buffers(struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < oq->max_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "Rx buffer alloc failed\n");
+ goto rx_buf_alloc_err;
+ }
+ desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer alloc: DMA mapping error!\n",
+ oq->q_no);
+ goto dma_map_err;
+ }
+ oq->buff_info[i].page = page;
+ }
+
+ return 0;
+
+dma_map_err:
+ put_page(page);
+rx_buf_alloc_err:
+ while (i) {
+ i--;
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * octep_vf_oq_refill() - refill buffers for used Rx ring descriptors.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: number of descriptors successfully refilled with receive buffers.
+ */
+static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 refill_idx, i;
+
+ refill_idx = oq->host_refill_idx;
+ for (i = 0; i < oq->refill_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+ oq->stats.alloc_failures++;
+ break;
+ }
+
+ desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer refill: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ oq->stats.alloc_failures++;
+ break;
+ }
+ oq->buff_info[refill_idx].page = page;
+ refill_idx++;
+ if (refill_idx == oq->max_count)
+ refill_idx = 0;
+ }
+ oq->host_refill_idx = refill_idx;
+ oq->refill_count -= i;
+
+ return i;
+}
+
+/**
+ * octep_vf_setup_oq() - Setup a Rx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Rx queue number to be setup.
+ *
+ * Allocate resources for a Rx queue.
+ */
+static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_oq *oq;
+ u32 desc_ring_size;
+
+ oq = vzalloc(sizeof(*oq));
+ if (!oq)
+ goto create_oq_fail;
+ oct->oq[q_no] = oq;
+
+ oq->octep_vf_dev = oct;
+ oq->netdev = oct->netdev;
+ oq->dev = &oct->pdev->dev;
+ oq->q_no = q_no;
+ oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ oq->ring_size_mask = oq->max_count - 1;
+ oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+ oq->max_single_buffer_size = oq->buffer_size - OCTEP_VF_OQ_RESP_HW_SIZE;
+
+ /* When the hardware/firmware supports additional capabilities,
+ * additional header is filled-in by Octeon after length field in
+ * Rx packets. this header contains additional packet information.
+ */
+ if (oct->fw_info.rx_ol_flags)
+ oq->max_single_buffer_size -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+
+ oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
+
+ desc_ring_size = oq->max_count * OCTEP_VF_OQ_DESC_SIZE;
+ oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
+ &oq->desc_ring_dma, GFP_KERNEL);
+
+ if (unlikely(!oq->desc_ring)) {
+ dev_err(oq->dev,
+ "Failed to allocate DMA memory for OQ-%d !!\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ oq->buff_info = vzalloc(oq->max_count * OCTEP_VF_OQ_RECVBUF_SIZE);
+
+ if (unlikely(!oq->buff_info)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to allocate buffer info for OQ-%d\n", q_no);
+ goto buf_list_err;
+ }
+
+ if (octep_vf_oq_fill_ring_buffers(oq))
+ goto oq_fill_buff_err;
+
+ octep_vf_oq_reset_indices(oq);
+ oct->hw_ops.setup_oq_regs(oct, q_no);
+ oct->num_oqs++;
+
+ return 0;
+
+oq_fill_buff_err:
+ vfree(oq->buff_info);
+ oq->buff_info = NULL;
+buf_list_err:
+ dma_free_coherent(oq->dev, desc_ring_size,
+ oq->desc_ring, oq->desc_ring_dma);
+ oq->desc_ring = NULL;
+desc_dma_alloc_err:
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+create_oq_fail:
+ return -ENOMEM;
+}
+
+/**
+ * octep_vf_oq_free_ring_buffers() - Free ring buffers.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free receive buffers in unused Rx queue descriptors.
+ */
+static void octep_vf_oq_free_ring_buffers(struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ int i;
+
+ if (!oq->desc_ring || !oq->buff_info)
+ return;
+
+ for (i = 0; i < oq->max_count; i++) {
+ if (oq->buff_info[i].page) {
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ desc_ring[i].buffer_ptr = 0;
+ }
+ }
+ octep_vf_oq_reset_indices(oq);
+}
+
+/**
+ * octep_vf_free_oq() - Free Rx queue resources.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free all resources of a Rx queue.
+ */
+static int octep_vf_free_oq(struct octep_vf_oq *oq)
+{
+ struct octep_vf_device *oct = oq->octep_vf_dev;
+ int q_no = oq->q_no;
+
+ octep_vf_oq_free_ring_buffers(oq);
+
+ vfree(oq->buff_info);
+
+ if (oq->desc_ring)
+ dma_free_coherent(oq->dev,
+ oq->max_count * OCTEP_VF_OQ_DESC_SIZE,
+ oq->desc_ring, oq->desc_ring_dma);
+
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+ oct->num_oqs--;
+ return 0;
+}
+
+/**
+ * octep_vf_setup_oqs() - setup resources for all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_vf_setup_oqs(struct octep_vf_device *oct)
+{
+ int i, retval = 0;
+
+ oct->num_oqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ retval = octep_vf_setup_oq(oct, i);
+ if (retval) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup OQ(RxQ)-%d.\n", i);
+ goto oq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+oq_setup_err:
+ while (i) {
+ i--;
+ octep_vf_free_oq(oct->oq[i]);
+ }
+ return retval;
+}
+
+/**
+ * octep_vf_oq_dbell_init() - Initialize Rx queue doorbell.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Write number of descriptors to Rx queue doorbell register.
+ */
+void octep_vf_oq_dbell_init(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/**
+ * octep_vf_free_oqs() - Free resources of all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_vf_free_oqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (!oct->oq[i])
+ continue;
+ octep_vf_free_oq(oct->oq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully freed OQ(RxQ)-%d.\n", i);
+ }
+}
+
+/**
+ * octep_vf_oq_check_hw_for_pkts() - Check for new Rx packets.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: packets received after previous check.
+ */
+static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
+ struct octep_vf_oq *oq)
+{
+ u32 pkt_count, new_pkts;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts = pkt_count - oq->last_pkt_count;
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+ * this counter is not cleared every time read, to save write cycles.
+ */
+ if (unlikely(pkt_count > 0xF0000000U)) {
+ writel(pkt_count, oq->pkts_sent_reg);
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+ oq->last_pkt_count = pkt_count;
+ oq->pkts_pending += new_pkts;
+ return new_pkts;
+}
+
+/**
+ * __octep_vf_oq_process_rx() - Process hardware Rx queue and push to stack.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ * @pkts_to_process: number of packets to be processed.
+ *
+ * Process the new packets in Rx queue.
+ * Packets larger than single Rx buffer arrive in consecutive descriptors.
+ * But, count returned by the API only accounts full packets, not fragments.
+ *
+ * Return: number of packets processed and pushed to stack.
+ */
+static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
+ struct octep_vf_oq *oq, u16 pkts_to_process)
+{
+ struct octep_vf_oq_resp_hw_ext *resp_hw_ext = NULL;
+ netdev_features_t feat = oq->netdev->features;
+ struct octep_vf_rx_buffer *buff_info;
+ struct octep_vf_oq_resp_hw *resp_hw;
+ u32 pkt, rx_bytes, desc_used;
+ u16 data_offset, rx_ol_flags;
+ struct sk_buff *skb;
+ u32 read_idx;
+
+ read_idx = oq->host_read_idx;
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ buff_info = (struct octep_vf_rx_buffer *)&oq->buff_info[read_idx];
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ resp_hw = page_address(buff_info->page);
+ buff_info->page = NULL;
+
+ /* Swap the length field that is in Big-Endian to CPU */
+ buff_info->len = be64_to_cpu(resp_hw->length);
+ if (oct->fw_info.rx_ol_flags) {
+ /* Extended response header is immediately after
+ * response header (resp_hw)
+ */
+ resp_hw_ext = (struct octep_vf_oq_resp_hw_ext *)
+ (resp_hw + 1);
+ buff_info->len -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+ /* Packet Data is immediately after
+ * extended response header.
+ */
+ data_offset = OCTEP_VF_OQ_RESP_HW_SIZE +
+ OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+ rx_ol_flags = resp_hw_ext->rx_ol_flags;
+ } else {
+ /* Data is immediately after
+ * Hardware Rx response header.
+ */
+ data_offset = OCTEP_VF_OQ_RESP_HW_SIZE;
+ rx_ol_flags = 0;
+ }
+ rx_bytes += buff_info->len;
+
+ if (buff_info->len <= oq->max_single_buffer_size) {
+ skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ skb_put(skb, buff_info->len);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ } else {
+ struct skb_shared_info *shinfo;
+ u16 data_len;
+
+ skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ /* Head fragment includes response header(s);
+ * subsequent fragments contains only data.
+ */
+ skb_put(skb, oq->max_single_buffer_size);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+
+ shinfo = skb_shinfo(skb);
+ data_len = buff_info->len - oq->max_single_buffer_size;
+ while (data_len) {
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info = (struct octep_vf_rx_buffer *)
+ &oq->buff_info[read_idx];
+ if (data_len < oq->buffer_size) {
+ buff_info->len = data_len;
+ data_len = 0;
+ } else {
+ buff_info->len = oq->buffer_size;
+ data_len -= oq->buffer_size;
+ }
+
+ skb_add_rx_frag(skb, shinfo->nr_frags,
+ buff_info->page, 0,
+ buff_info->len,
+ buff_info->len);
+ buff_info->page = NULL;
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ }
+ }
+
+ skb->dev = oq->netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (feat & NETIF_F_RXCSUM &&
+ OCTEP_VF_RX_CSUM_VERIFIED(rx_ol_flags))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ napi_gro_receive(oq->napi, skb);
+ }
+
+ oq->host_read_idx = read_idx;
+ oq->refill_count += desc_used;
+ oq->stats.packets += pkt;
+ oq->stats.bytes += rx_bytes;
+
+ return pkt;
+}
+
+/**
+ * octep_vf_oq_process_rx() - Process Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @budget: max number of packets can be processed in one invocation.
+ *
+ * Check for newly received packets and process them.
+ * Keeps checking for new packets until budget is used or no new packets seen.
+ *
+ * Return: number of packets processed.
+ */
+int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget)
+{
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_vf_device *oct = oq->octep_vf_dev;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+ if (oq->pkts_pending == 0)
+ octep_vf_oq_check_hw_for_pkts(oct, oq);
+ pkts_available = min(budget - total_pkts_processed,
+ oq->pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_vf_oq_process_rx(oct, oq,
+ pkts_available);
+ oq->pkts_pending -= pkts_processed;
+ total_pkts_processed += pkts_processed;
+ }
+
+ if (oq->refill_count >= oq->refill_threshold) {
+ u32 desc_refilled = octep_vf_oq_refill(oct, oq);
+
+ /* flush pending writes before updating credits */
+ smp_wmb();
+ writel(desc_refilled, oq->pkts_credit_reg);
+ }
+
+ return total_pkts_processed;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
new file mode 100644
index 000000000000..fe46838b5200
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_RX_H_
+#define _OCTEP_VF_RX_H_
+
+/* struct octep_vf_oq_desc_hw - Octeon Hardware OQ descriptor format.
+ *
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ *
+ * @buffer_ptr: DMA address of the skb->data
+ * @info_ptr: DMA address of host memory, used to update pkt count by hw.
+ * This is currently unused to save pci writes.
+ */
+struct octep_vf_oq_desc_hw {
+ dma_addr_t buffer_ptr;
+ u64 info_ptr;
+};
+
+static_assert(sizeof(struct octep_vf_oq_desc_hw) == 16);
+
+#define OCTEP_VF_OQ_DESC_SIZE (sizeof(struct octep_vf_oq_desc_hw))
+
+/* Rx offload flags */
+#define OCTEP_VF_RX_OFFLOAD_VLAN_STRIP BIT(0)
+#define OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_VF_RX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_VF_RX_OFFLOAD_TCP_CKSUM BIT(3)
+
+#define OCTEP_VF_RX_OFFLOAD_CKSUM (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_VF_RX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_UDP_CKSUM))
+
+/* bit 0 is vlan strip */
+#define OCTEP_VF_RX_CSUM_IP_VERIFIED BIT(1)
+#define OCTEP_VF_RX_CSUM_L4_VERIFIED BIT(2)
+
+#define OCTEP_VF_RX_CSUM_VERIFIED(flags) ((flags) & \
+ (OCTEP_VF_RX_CSUM_L4_VERIFIED | \
+ OCTEP_VF_RX_CSUM_IP_VERIFIED))
+
+/* Extended Response Header in packet data received from Hardware.
+ * Includes metadata like checksum status.
+ * this is valid only if hardware/firmware published support for this.
+ * This is at offset 0 of packet data (skb->data).
+ */
+struct octep_vf_oq_resp_hw_ext {
+ /* Reserved. */
+ u64 rsvd:48;
+
+ /* rx offload flags */
+ u16 rx_ol_flags;
+};
+
+static_assert(sizeof(struct octep_vf_oq_resp_hw_ext) == 8);
+
+#define OCTEP_VF_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_vf_oq_resp_hw_ext))
+
+/* Length of Rx packet DMA'ed by Octeon to Host.
+ * this is in bigendian; so need to be converted to cpu endian.
+ * Octeon writes this at the beginning of Rx buffer (skb->data).
+ */
+struct octep_vf_oq_resp_hw {
+ /* The Length of the packet. */
+ __be64 length;
+};
+
+static_assert(sizeof(struct octep_vf_oq_resp_hw) == 8);
+
+#define OCTEP_VF_OQ_RESP_HW_SIZE (sizeof(struct octep_vf_oq_resp_hw))
+
+/* Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers. The fields are operated by
+ * OS-dependent routines.
+ */
+struct octep_vf_rx_buffer {
+ struct page *page;
+
+ /* length from rx hardware descriptor after converting to cpu endian */
+ u64 len;
+};
+
+#define OCTEP_VF_OQ_RECVBUF_SIZE (sizeof(struct octep_vf_rx_buffer))
+
+/* Output Queue statistics. Each output queue has four stats fields. */
+struct octep_vf_oq_stats {
+ /* Number of packets received from the Device. */
+ u64 packets;
+
+ /* Number of bytes received from the Device. */
+ u64 bytes;
+
+ /* Number of times failed to allocate buffers. */
+ u64 alloc_failures;
+};
+
+#define OCTEP_VF_OQ_STATS_SIZE (sizeof(struct octep_vf_oq_stats))
+
+/* Hardware interface Rx statistics */
+struct octep_vf_iface_rx_stats {
+ /* Received packets */
+ u64 pkts;
+
+ /* Octets of received packets */
+ u64 octets;
+
+ /* Received PAUSE and Control packets */
+ u64 pause_pkts;
+
+ /* Received PAUSE and Control octets */
+ u64 pause_octets;
+
+ /* Filtered DMAC0 packets */
+ u64 dmac0_pkts;
+
+ /* Filtered DMAC0 octets */
+ u64 dmac0_octets;
+
+ /* Packets dropped due to RX FIFO full */
+ u64 dropped_pkts_fifo_full;
+
+ /* Octets dropped due to RX FIFO full */
+ u64 dropped_octets_fifo_full;
+
+ /* Error packets */
+ u64 err_pkts;
+
+ /* Filtered DMAC1 packets */
+ u64 dmac1_pkts;
+
+ /* Filtered DMAC1 octets */
+ u64 dmac1_octets;
+
+ /* NCSI-bound packets dropped */
+ u64 ncsi_dropped_pkts;
+
+ /* NCSI-bound octets dropped */
+ u64 ncsi_dropped_octets;
+
+ /* Multicast packets received. */
+ u64 mcast_pkts;
+
+ /* Broadcast packets received. */
+ u64 bcast_pkts;
+
+};
+
+/* The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon OQ.
+ */
+struct octep_vf_oq {
+ u32 q_no;
+
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct napi_struct *napi;
+
+ /* The receive buffer list. This list has the virtual addresses
+ * of the buffers.
+ */
+ struct octep_vf_rx_buffer *buff_info;
+
+ /* Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ u8 __iomem *pkts_credit_reg;
+
+ /* Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ u8 __iomem *pkts_sent_reg;
+
+ /* Statistics for this OQ. */
+ struct octep_vf_oq_stats stats;
+
+ /* Packets pending to be processed */
+ u32 pkts_pending;
+ u32 last_pkt_count;
+
+ /* Index in the ring where the driver should read the next packet */
+ u32 host_read_idx;
+
+ /* Number of descriptors in this ring. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ /* The number of descriptors pending refill. */
+ u32 refill_count;
+
+ /* Index in the ring where the driver will refill the
+ * descriptor's buffer
+ */
+ u32 host_refill_idx;
+ u32 refill_threshold;
+
+ /* The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+ u32 max_single_buffer_size;
+
+ /* The 8B aligned descriptor ring starts at this address. */
+ struct octep_vf_oq_desc_hw *desc_ring;
+
+ /* DMA mapped address of the OQ descriptor ring. */
+ dma_addr_t desc_ring_dma;
+};
+
+#define OCTEP_VF_OQ_SIZE (sizeof(struct octep_vf_oq))
+#endif /* _OCTEP_VF_RX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
new file mode 100644
index 000000000000..47a5c054fdb6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <net/netdev_queues.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+/* Reset various index of Tx queue data structure. */
+static void octep_vf_iq_reset_indices(struct octep_vf_iq *iq)
+{
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octep_vf_read_index = 0;
+ iq->flush_index = 0;
+ iq->pkts_processed = 0;
+ iq->pkt_in_done = 0;
+}
+
+/**
+ * octep_vf_iq_process_completions() - Process Tx queue completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @budget: max number of completions to be processed in one invocation.
+ */
+int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
+{
+ u32 compl_pkts, compl_bytes, compl_sg;
+ struct octep_vf_device *oct = iq->octep_vf_dev;
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ compl_pkts = 0;
+ compl_sg = 0;
+ compl_bytes = 0;
+ iq->octep_vf_read_index = oct->hw_ops.update_iq_read_idx(iq);
+
+ while (likely(budget && (fi != iq->octep_vf_read_index))) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+ compl_bytes += skb->len;
+ compl_pkts++;
+ budget--;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+ compl_sg++;
+
+ dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[3], DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->pkts_processed += compl_pkts;
+ iq->stats.instr_completed += compl_pkts;
+ iq->stats.bytes_sent += compl_bytes;
+ iq->stats.sgentry_sent += compl_sg;
+ iq->flush_index = fi;
+
+ netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
+ compl_bytes, IQ_INSTR_SPACE(iq),
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD);
+
+ return !budget;
+}
+
+/**
+ * octep_vf_iq_free_pending() - Free Tx buffers for pending completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ */
+static void octep_vf_iq_free_pending(struct octep_vf_iq *iq)
+{
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ while (fi != iq->host_write_index) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+
+ dma_unmap_single(iq->dev,
+ tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0],
+ DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->flush_index = fi;
+ netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
+}
+
+/**
+ * octep_vf_clean_iqs() - Clean Tx queues to shutdown the device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free the buffers in Tx queue descriptors pending completion and
+ * reset queue indices
+ */
+void octep_vf_clean_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ octep_vf_iq_free_pending(oct->iq[i]);
+ octep_vf_iq_reset_indices(oct->iq[i]);
+ }
+}
+
+/**
+ * octep_vf_setup_iq() - Setup a Tx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Tx queue number to be setup.
+ *
+ * Allocate resources for a Tx queue.
+ */
+static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
+{
+ u32 desc_ring_size, buff_info_size, sglist_size;
+ struct octep_vf_iq *iq;
+ int i;
+
+ iq = vzalloc(sizeof(*iq));
+ if (!iq)
+ goto iq_alloc_err;
+ oct->iq[q_no] = iq;
+
+ iq->octep_vf_dev = oct;
+ iq->netdev = oct->netdev;
+ iq->dev = &oct->pdev->dev;
+ iq->q_no = q_no;
+ iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->ring_size_mask = iq->max_count - 1;
+ iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+ iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
+
+ /* Allocate memory for hardware queue descriptors */
+ desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
+ &iq->desc_ring_dma, GFP_KERNEL);
+ if (unlikely(!iq->desc_ring)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ /* Allocate memory for hardware SGLIST descriptors */
+ sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
+ &iq->sglist_dma, GFP_KERNEL);
+ if (unlikely(!iq->sglist)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d SGLIST\n",
+ q_no);
+ goto sglist_alloc_err;
+ }
+
+ /* allocate memory to manage Tx packets pending completion */
+ buff_info_size = OCTEP_VF_IQ_TXBUFF_INFO_SIZE * iq->max_count;
+ iq->buff_info = vzalloc(buff_info_size);
+ if (!iq->buff_info) {
+ dev_err(iq->dev,
+ "Failed to allocate buff info for IQ-%d\n", q_no);
+ goto buff_info_err;
+ }
+
+ /* Setup sglist addresses in tx_buffer entries */
+ for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
+ struct octep_vf_tx_buffer *tx_buffer;
+
+ tx_buffer = &iq->buff_info[i];
+ tx_buffer->sglist =
+ &iq->sglist[i * OCTEP_VF_SGLIST_ENTRIES_PER_PKT];
+ tx_buffer->sglist_dma =
+ iq->sglist_dma + (i * OCTEP_VF_SGLIST_SIZE_PER_PKT);
+ }
+
+ octep_vf_iq_reset_indices(iq);
+ oct->hw_ops.setup_iq_regs(oct, q_no);
+
+ oct->num_iqs++;
+ return 0;
+
+buff_info_err:
+ dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
+sglist_alloc_err:
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+desc_dma_alloc_err:
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+iq_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_vf_free_iq() - Free Tx queue resources.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Free all the resources allocated for a Tx queue.
+ */
+static void octep_vf_free_iq(struct octep_vf_iq *iq)
+{
+ struct octep_vf_device *oct = iq->octep_vf_dev;
+ u64 desc_ring_size, sglist_size;
+ int q_no = iq->q_no;
+
+ desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+
+ vfree(iq->buff_info);
+
+ if (iq->desc_ring)
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+
+ sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ if (iq->sglist)
+ dma_free_coherent(iq->dev, sglist_size,
+ iq->sglist, iq->sglist_dma);
+
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+ oct->num_iqs--;
+}
+
+/**
+ * octep_vf_setup_iqs() - setup resources for all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_vf_setup_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ oct->num_iqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (octep_vf_setup_iq(oct, i)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup IQ(TxQ)-%d.\n", i);
+ goto iq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+iq_setup_err:
+ while (i) {
+ i--;
+ octep_vf_free_iq(oct->iq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_iqs() - Free resources of all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_vf_free_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ octep_vf_free_iq(oct->iq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully destroyed IQ(TxQ)-%d.\n", i);
+ }
+ oct->num_iqs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
new file mode 100644
index 000000000000..f338b975103c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_TX_H_
+#define _OCTEP_VF_TX_H_
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+#define TX_BUFTYPE_NONE 0
+#define TX_BUFTYPE_NET 1
+#define TX_BUFTYPE_NET_SG 2
+#define NUM_TX_BUFTYPES 3
+
+/* Hardware format for Scatter/Gather list
+ *
+ * 63 48|47 32|31 16|15 0
+ * -----------------------------------------
+ * | Len 0 | Len 1 | Len 2 | Len 3 |
+ * -----------------------------------------
+ * | Ptr 0 |
+ * -----------------------------------------
+ * | Ptr 1 |
+ * -----------------------------------------
+ * | Ptr 2 |
+ * -----------------------------------------
+ * | Ptr 3 |
+ * -----------------------------------------
+ */
+struct octep_vf_tx_sglist_desc {
+ u16 len[4];
+ dma_addr_t dma_ptr[4];
+};
+
+static_assert(sizeof(struct octep_vf_tx_sglist_desc) == 40);
+
+/* Each Scatter/Gather entry sent to hardwar hold four pointers.
+ * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
+ * is for main skb which also goes as a gather buffer to Octeon hardware.
+ * To allocate sufficient SGLIST entries for a packet with max fragments,
+ * align by adding 3 before calcuating max SGLIST entries per packet.
+ */
+#define OCTEP_VF_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4)
+#define OCTEP_VF_SGLIST_SIZE_PER_PKT \
+ (OCTEP_VF_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_vf_tx_sglist_desc))
+
+struct octep_vf_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct octep_vf_tx_sglist_desc *sglist;
+ dma_addr_t sglist_dma;
+ u8 gather;
+};
+
+#define OCTEP_VF_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_vf_tx_buffer))
+
+/* VF Hardware interface Tx statistics */
+struct octep_vf_iface_tx_stats {
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
+ /* Packets dropped */
+ u64 dropped;
+
+ /* Reserved */
+ u64 reserved[13];
+};
+
+/* VF Input Queue statistics */
+struct octep_vf_iq_stats {
+ /* Instructions posted to this queue. */
+ u64 instr_posted;
+
+ /* Instructions copied by hardware for processing. */
+ u64 instr_completed;
+
+ /* Instructions that could not be processed. */
+ u64 instr_dropped;
+
+ /* Bytes sent through this queue. */
+ u64 bytes_sent;
+
+ /* Gather entries sent through this queue. */
+ u64 sgentry_sent;
+
+ /* Number of transmit failures due to TX_BUSY */
+ u64 tx_busy;
+
+ /* Number of times the queue is restarted */
+ u64 restart_cnt;
+};
+
+/* The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (up to 4) for
+ * a Octeon device has one such structure to represent it.
+ */
+struct octep_vf_iq {
+ u32 q_no;
+
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ struct device *dev;
+ struct netdev_queue *netdev_q;
+
+ /* Index in input ring where driver should write the next packet */
+ u16 host_write_index;
+
+ /* Index in input ring where Octeon is expected to read next packet */
+ u16 octep_vf_read_index;
+
+ /* This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u16 flush_index;
+
+ /* Statistics for this input queue. */
+ struct octep_vf_iq_stats stats;
+
+ /* Pointer to the Virtual Base addr of the input ring. */
+ struct octep_vf_tx_desc_hw *desc_ring;
+
+ /* DMA mapped base address of the input descriptor ring. */
+ dma_addr_t desc_ring_dma;
+
+ /* Info of Tx buffers pending completion. */
+ struct octep_vf_tx_buffer *buff_info;
+
+ /* Base pointer to Scatter/Gather lists for all ring descriptors. */
+ struct octep_vf_tx_sglist_desc *sglist;
+
+ /* DMA mapped addr of Scatter Gather Lists */
+ dma_addr_t sglist_dma;
+
+ /* Octeon doorbell register for the ring. */
+ u8 __iomem *doorbell_reg;
+
+ /* Octeon instruction count register for this ring. */
+ u8 __iomem *inst_cnt_reg;
+
+ /* interrupt level register for this ring */
+ u8 __iomem *intr_lvl_reg;
+
+ /* Maximum no. of instructions in this queue. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ u32 pkt_in_done;
+ u32 pkts_processed;
+
+ u32 status;
+
+ /* Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /* The max. number of instructions that can be held pending by the
+ * driver before ringing doorbell.
+ */
+ u32 fill_threshold;
+};
+
+/* Hardware Tx Instruction Header */
+struct octep_vf_instr_hdr {
+ /* Data Len */
+ u64 tlen:16;
+
+ /* Reserved */
+ u64 rsvd:20;
+
+ /* PKIND for SDP */
+ u64 pkind:6;
+
+ /* Front Data size */
+ u64 fsz:6;
+
+ /* No. of entries in gather list */
+ u64 gsz:14;
+
+ /* Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /* Reserved3 */
+ u64 reserved3:1;
+};
+
+static_assert(sizeof(struct octep_vf_instr_hdr) == 8);
+
+/* Tx offload flags */
+#define OCTEP_VF_TX_OFFLOAD_VLAN_INSERT BIT(0)
+#define OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_VF_TX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_VF_TX_OFFLOAD_TCP_CKSUM BIT(3)
+#define OCTEP_VF_TX_OFFLOAD_SCTP_CKSUM BIT(4)
+#define OCTEP_VF_TX_OFFLOAD_TCP_TSO BIT(5)
+#define OCTEP_VF_TX_OFFLOAD_UDP_TSO BIT(6)
+
+#define OCTEP_VF_TX_OFFLOAD_CKSUM (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_VF_TX_OFFLOAD_TSO (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_VF_TX_OFFLOAD_UDP_TSO)
+
+#define OCTEP_VF_TX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_UDP_CKSUM))
+
+#define OCTEP_VF_TX_TSO(flags) ((flags) & \
+ (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_VF_TX_OFFLOAD_UDP_TSO))
+
+struct tx_mdata {
+ /* offload flags */
+ u16 ol_flags;
+
+ /* gso size */
+ u16 gso_size;
+
+ /* gso flags */
+ u16 gso_segs;
+
+ /* reserved */
+ u16 rsvd1;
+
+ /* reserved */
+ u64 rsvd2;
+};
+
+static_assert(sizeof(struct tx_mdata) == 16);
+
+/* 64-byte Tx instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ *
+ * only first 16-bytes (dptr and ih) are mandatory; rest are optional
+ * and filled by the driver based on firmware/hardware capabilities.
+ * These optional headers together called Front Data and its size is
+ * described by ih->fsz.
+ */
+struct octep_vf_tx_desc_hw {
+ /* Pointer where the input data is available. */
+ u64 dptr;
+
+ /* Instruction Header. */
+ union {
+ struct octep_vf_instr_hdr ih;
+ u64 ih64;
+ };
+
+ union {
+ u64 txm64[2];
+ struct tx_mdata txm;
+ };
+
+ /* Additional headers available in a 64-byte instruction. */
+ u64 exhdr[4];
+};
+
+static_assert(sizeof(struct octep_vf_tx_desc_hw) == 64);
+
+#define OCTEP_VF_IQ_DESC_SIZE (sizeof(struct octep_vf_tx_desc_hw))
+#endif /* _OCTEP_VF_TX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index edeb0f737312..61ab7f66f053 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -837,6 +837,8 @@ enum nix_af_status {
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
NIX_AF_ERR_LINK_CREDITS = -431,
+ NIX_AF_ERR_INVALID_BPID = -434,
+ NIX_AF_ERR_INVALID_BPID_REQ = -435,
NIX_AF_ERR_INVALID_MCAST_GRP = -436,
NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437,
NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438,
@@ -1114,6 +1116,7 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_CUSTOM0 BIT(19)
#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
#define NIX_FLOW_KEY_TYPE_AH BIT(22)
@@ -1553,6 +1556,7 @@ struct flow_msg {
u32 mpls_lse[4];
u8 icmp_type;
u8 icmp_code;
+ __be16 tcp_flags;
};
struct npc_install_flow_req {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index b0b4dea548e1..d883157393ea 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -85,8 +85,7 @@ enum npc_kpu_lc_ltype {
enum npc_kpu_ld_ltype {
NPC_LT_LD_TCP = 1,
NPC_LT_LD_UDP,
- NPC_LT_LD_ICMP,
- NPC_LT_LD_SCTP,
+ NPC_LT_LD_SCTP = 4,
NPC_LT_LD_ICMP6,
NPC_LT_LD_CUSTOM0,
NPC_LT_LD_CUSTOM1,
@@ -97,6 +96,7 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_NSH,
NPC_LT_LD_TU_MPLS_IN_NSH,
NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_LT_LD_ICMP,
};
enum npc_kpu_le_ltype {
@@ -140,14 +140,14 @@ enum npc_kpu_lg_ltype {
enum npc_kpu_lh_ltype {
NPC_LT_LH_TU_TCP = 1,
NPC_LT_LH_TU_UDP,
- NPC_LT_LH_TU_ICMP,
- NPC_LT_LH_TU_SCTP,
+ NPC_LT_LH_TU_SCTP = 4,
NPC_LT_LH_TU_ICMP6,
+ NPC_LT_LH_CUSTOM0,
+ NPC_LT_LH_CUSTOM1,
NPC_LT_LH_TU_IGMP = 8,
NPC_LT_LH_TU_ESP,
NPC_LT_LH_TU_AH,
- NPC_LT_LH_CUSTOM0 = 0xE,
- NPC_LT_LH_CUSTOM1 = 0xF,
+ NPC_LT_LH_TU_ICMP = 0xF,
};
/* NPC port kind defines how the incoming or outgoing packets
@@ -155,10 +155,11 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
-#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CPT_HDR_PTP_PKIND
enum npc_pkind_type {
NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_CPT_HDR_PTP_PKIND = 54ULL,
NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
NPC_RX_CHLEN24B_PKIND = 57ULL,
@@ -216,6 +217,7 @@ enum key_fields {
NPC_MPLS4_TTL,
NPC_TYPE_ICMP,
NPC_CODE_ICMP,
+ NPC_TCP_FLAGS,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index a820bad3abb2..41de72c8607f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -35,6 +35,7 @@
#define NPC_ETYPE_NSH 0x894f
#define NPC_ETYPE_DSA 0xdada
#define NPC_ETYPE_PPPOE 0x8864
+#define NPC_ETYPE_ERSPA 0x88be
#define NPC_PPP_IP 0x0021
#define NPC_PPP_IP6 0x0057
@@ -59,6 +60,9 @@
#define NPC_IPNH_MPLS 137
#define NPC_IPNH_HOSTID 139
#define NPC_IPNH_SHIM6 140
+#define NPC_IPNH_CUSTOM 253
+
+#define NPC_IP6_ROUTE_TYPE 4
#define NPC_UDP_PORT_PTP_E 319
#define NPC_UDP_PORT_PTP_G 320
@@ -187,6 +191,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU2_EXDSA,
NPC_S_KPU2_CPT_CTAG,
NPC_S_KPU2_CPT_QINQ,
+ NPC_S_KPU2_MT,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
@@ -231,6 +236,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU8_ICMP6,
NPC_S_KPU8_GRE,
NPC_S_KPU8_AH,
+ NPC_S_KPU8_CUSTOM,
NPC_S_KPU9_TU_MPLS_IN_GRE,
NPC_S_KPU9_TU_MPLS_IN_NSH,
NPC_S_KPU9_TU_MPLS_IN_IP,
@@ -242,6 +248,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU9_GTPC,
NPC_S_KPU9_GTPU,
NPC_S_KPU9_ESP,
+ NPC_S_KPU9_CUSTOM,
NPC_S_KPU10_TU_MPLS_IN_VXLANGPE,
NPC_S_KPU10_TU_MPLS_PL,
NPC_S_KPU10_TU_MPLS,
@@ -318,10 +325,10 @@ enum npc_kpu_lc_uflag {
NPC_F_LC_U_UNK_PROTO = 0x10,
NPC_F_LC_U_IP_FRAG = 0x20,
NPC_F_LC_U_IP6_FRAG = 0x40,
+ NPC_F_LC_L_6TO4 = 0x80,
};
enum npc_kpu_lc_lflag {
NPC_F_LC_L_IP_IN_IP = 1,
- NPC_F_LC_L_6TO4,
NPC_F_LC_L_MPLS_IN_IP,
NPC_F_LC_L_IP6_TUN_IP6,
NPC_F_LC_L_IP6_MPLS_IN_IP,
@@ -334,6 +341,8 @@ enum npc_kpu_lc_lflag {
NPC_F_LC_L_EXT_MOBILITY,
NPC_F_LC_L_EXT_HOSTID,
NPC_F_LC_L_EXT_SHIM6,
+ NPC_F_LC_L_IP6_SRH_SEG_1,
+ NPC_F_LC_L_IP6_SRH_SEG_2,
};
enum npc_kpu_ld_lflag {
@@ -970,10 +979,10 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 48, 0,
NPC_LID_LA, NPC_LT_NA,
0,
- 0, 0, 0, 0,
+ 0, 7, 0, 0,
},
{
@@ -2786,6 +2795,24 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_MT, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_MT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4501,6 +4528,24 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0xff00,
NPC_IP_VER_6,
NPC_IP_VER_MASK,
+ (NPC_IP6_ROUTE_TYPE << 8) | 1,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ (NPC_IP6_ROUTE_TYPE << 8) | 2,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
0x0000,
0x0000,
},
@@ -4776,6 +4821,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_CUSTOM,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
0x0000,
0x0000,
NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
@@ -4884,6 +4938,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_CUSTOM,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
0x0000,
0x0000,
NPC_IP_VER_4,
@@ -5064,6 +5127,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
0x0000,
0x0000,
NPC_IP_VER_6,
@@ -5208,6 +5280,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5325,6 +5406,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5433,6 +5523,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5532,6 +5631,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5649,6 +5757,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5757,6 +5874,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5883,6 +6009,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5982,6 +6117,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -6081,6 +6225,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -6310,6 +6463,15 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0xffff,
0x0000,
0x0000,
+ 0x0009,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
0x0000,
0x0000,
},
@@ -6756,6 +6918,78 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
{
NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
0x0000,
0xffff,
NPC_GRE_F_ROUTE,
@@ -6836,6 +7070,15 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU8_CUSTOM, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -7304,6 +7547,24 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU9_CUSTOM, 0xff,
+ 0x4000,
+ 0xf000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_CUSTOM, 0xff,
+ 0x6000,
+ 0xf000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -8384,7 +8645,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
0,
@@ -8536,7 +8797,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
NPC_F_LA_U_HAS_IH_NIX,
@@ -8693,7 +8954,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 30, 1,
NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
NPC_F_LA_U_HAS_HIGIG2,
@@ -8818,7 +9079,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 38, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
@@ -8947,7 +9208,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 14, 0,
NPC_LID_LA, NPC_LT_NA,
0,
@@ -9124,7 +9385,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
0,
@@ -9204,7 +9465,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
@@ -9213,7 +9474,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
+ NPC_S_NA, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
NPC_F_LB_U_UNK_ETYPE,
0, 0, 0, 0,
@@ -9228,7 +9489,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
@@ -9324,7 +9585,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
@@ -9428,7 +9689,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
@@ -9532,7 +9793,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
0,
@@ -9628,7 +9889,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
@@ -9684,7 +9945,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -9757,7 +10018,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
+ NPC_S_NA, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_UNK_ETYPE,
0, 0, 0, 0,
@@ -9772,7 +10033,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 18, 1,
NPC_LID_LB, NPC_LT_LB_EDSA,
NPC_F_LB_L_EDSA,
@@ -9836,7 +10097,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_EXDSA,
NPC_F_LB_L_EXDSA,
@@ -9923,6 +10184,22 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG_C, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -9949,7 +10226,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10029,7 +10306,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10101,7 +10378,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10165,7 +10442,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10237,7 +10514,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10310,80 +10587,80 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_IP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ 6, 0, 42, 1, 0,
+ NPC_S_KPU5_IP6, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_ARP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_RARP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_PTP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_FCOE, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_MPLS, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_MPLS, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 0, 0,
- NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_NSH, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
@@ -10397,7 +10674,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10469,7 +10746,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10533,7 +10810,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10605,7 +10882,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10685,7 +10962,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_DSA,
NPC_F_LB_L_DSA,
@@ -10733,7 +11010,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
NPC_F_LB_L_DSA_VLAN,
@@ -10894,7 +11171,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 6, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
NPC_F_LB_L_FDSA,
@@ -10942,7 +11219,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
NPC_F_LB_L_FDSA,
@@ -10990,7 +11267,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
@@ -11014,7 +11291,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 2, 0,
NPC_LID_LC, NPC_LT_NA,
0,
@@ -11063,15 +11340,15 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
- NPC_S_KPU5_IP, 10, 0,
+ NPC_S_KPU5_IP, 10, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
- NPC_S_KPU5_IP6, 10, 0,
+ 6, 0, 42, 0, 0,
+ NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
0, 0, 0, 0,
@@ -11119,7 +11396,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 0, 0, 2, 0,
+ 2, 0, 4, 2, 0,
NPC_S_KPU8_UDP, 20, 1,
NPC_LID_LC, NPC_LT_LC_IP,
0,
@@ -11223,7 +11500,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 8, 10, 2, 0,
+ 2, 8, 4, 2, 0,
NPC_S_KPU8_UDP, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP_OPT,
0,
@@ -11450,6 +11727,22 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0, 0,
NPC_S_KPU6_IP6_ROUT, 40, 1,
NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_IP6_SRH_SEG_1,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_IP6_SRH_SEG_2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
NPC_F_LC_L_EXT_ROUT,
0, 0, 0, 0,
},
@@ -11695,6 +11988,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP,
@@ -11791,6 +12092,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP_OPT,
@@ -11951,6 +12260,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP6,
@@ -12080,6 +12397,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12184,6 +12509,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12280,6 +12613,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12368,6 +12709,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12472,6 +12821,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12568,6 +12925,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12681,6 +13046,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12769,6 +13142,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12857,6 +13238,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -13058,6 +13447,14 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
NPC_S_KPU9_ESP, 8, 1,
NPC_LID_LD, NPC_LT_LD_UDP,
@@ -13458,6 +13855,70 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 24, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LD, NPC_LT_LD_GRE,
@@ -13529,6 +13990,14 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_LD, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_CUSTOM, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_CUSTOM0,
+ 0,
+ 0, 0xff, 0, 0,
+ },
+ {
NPC_ERRLEV_LD, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -13946,6 +14415,22 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LE, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -15105,7 +15590,9 @@ static struct npc_lt_def_cfg npc_lt_defaults = {
},
.rx_et = {
{
- .lid = NPC_LID_LB,
+ .offset = -2,
+ .valid = 1,
+ .lid = NPC_LID_LC,
.ltype_match = NPC_LT_NA,
.ltype_mask = 0x0,
},
@@ -15139,6 +15626,12 @@ static struct npc_mcam_kex npc_mkex_default = {
/* Ethertype: 2 bytes, KW0[55:40] */
KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
},
+ [NPC_LT_LA_CPT_HDR] = {
+ /* DMAC: 6 bytes, KW1[55:8] */
+ KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
+ },
/* Layer A: HiGig2: */
[NPC_LT_LA_HIGIG2_ETHER] = {
/* Classification: 2 bytes, KW1[23:8] */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 5c1d04a3c559..07d4859de53a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -817,6 +817,8 @@ static int rvu_fwdata_init(struct rvu *rvu)
err = cgx_get_fwdata_base(&fwdbase);
if (err)
goto fail;
+
+ BUILD_BUG_ON(offsetof(struct rvu_fwdata, cgx_fw_data) > FWDATA_CGX_LMAC_OFFSET);
rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
if (!rvu->fwdata)
goto fail;
@@ -1484,7 +1486,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
/* All CGX mapped PFs are set with assigned NIX block during init */
if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
blkaddr = pf->nix_blkaddr;
- } else if (is_afvf(pcifunc)) {
+ } else if (is_lbk_vf(rvu, pcifunc)) {
vf = pcifunc - 1;
/* Assign NIX based on VF number. All even numbered VFs get
* NIX0 and odd numbered gets NIX1
@@ -2034,7 +2036,7 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
u16 target;
/* Only PF can add VF permissions */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_lbk_vf(rvu, pcifunc))
return -EOPNOTSUPP;
target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
@@ -2618,6 +2620,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 3. Cleanup pools (NPA)
*/
+ /* Free allocated BPIDs */
+ rvu_nix_flr_free_bpids(rvu, pcifunc);
+
/* Free multicast/mirror node associated with the 'pcifunc' */
rvu_nix_mcast_flr_free_entries(rvu, pcifunc);
@@ -3151,6 +3156,7 @@ static int rvu_enable_sriov(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
int err, chans, vfs;
+ int pos = 0;
if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
dev_warn(&pdev->dev,
@@ -3158,6 +3164,12 @@ static int rvu_enable_sriov(struct rvu *rvu)
return 0;
}
+ /* Get RVU VFs device id */
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &rvu->vf_devid);
+
chans = rvu_get_num_lbk_chans();
if (chans < 0)
return chans;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 43be37dd1f32..f390525a6217 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -288,6 +288,16 @@ enum rvu_pfvf_flags {
#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
+struct nix_bp {
+ struct rsrc_bmap bpids; /* free bpids bitmap */
+ u16 cgx_bpid_cnt;
+ u16 sdp_bpid_cnt;
+ u16 free_pool_base;
+ u16 *fn_map; /* pcifunc mapping */
+ u8 *intf_map; /* interface type map */
+ u8 *ref_cnt;
+};
+
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
@@ -363,6 +373,7 @@ struct nix_hw {
struct nix_lso lso;
struct nix_txvlan txvlan;
struct nix_ipolicer *ipolicer;
+ struct nix_bp bp;
u64 *tx_credits;
u8 cc_mcs_cnt;
};
@@ -432,6 +443,13 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq;
};
+struct channel_fwdata {
+ struct sdp_node_info info;
+ u8 valid;
+#define RVU_CHANL_INFO_RESERVED 379
+ u8 reserved[RVU_CHANL_INFO_RESERVED];
+};
+
struct rvu_fwdata {
#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
#define RVU_FWDATA_VERSION 0x0001
@@ -450,11 +468,13 @@ struct rvu_fwdata {
u64 msixtr_base;
u32 ptp_ext_clk_rate;
u32 ptp_ext_tstamp;
-#define FWDATA_RESERVED_MEM 1022
+ struct channel_fwdata channel_data;
+#define FWDATA_RESERVED_MEM 958
u64 reserved[FWDATA_RESERVED_MEM];
#define CGX_MAX 9
#define CGX_LMACS_MAX 4
#define CGX_LMACS_USX 8
+#define FWDATA_CGX_LMAC_OFFSET 10536
union {
struct cgx_lmac_fwdata_s
cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
@@ -503,6 +523,7 @@ struct rvu {
struct mutex rsrc_lock; /* Serialize resource alloc/free */
struct mutex alias_lock; /* Serialize bar2 alias access */
int vfs; /* Number of VFs attached to RVU */
+ u16 vf_devid; /* VF devices id */
int nix_blkaddr[MAX_NIX_BLKS];
/* Mbox */
@@ -732,9 +753,11 @@ static inline bool is_rvu_supports_nix1(struct rvu *rvu)
/* Function Prototypes
* RVU
*/
-static inline bool is_afvf(u16 pcifunc)
+#define RVU_LBK_VF_DEVID 0xA0F8
+static inline bool is_lbk_vf(struct rvu *rvu, u16 pcifunc)
{
- return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
+ return (!(pcifunc & ~RVU_PFVF_FUNC_MASK) &&
+ (rvu->vf_devid == RVU_LBK_VF_DEVID));
}
static inline bool is_vf(u16 pcifunc)
@@ -794,7 +817,7 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
int rvu_sdp_init(struct rvu *rvu);
bool is_sdp_pfvf(u16 pcifunc);
bool is_sdp_pf(u16 pcifunc);
-bool is_sdp_vf(u16 pcifunc);
+bool is_sdp_vf(struct rvu *rvu, u16 pcifunc);
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
@@ -873,6 +896,7 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx);
int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx, u16 mcam_index);
+void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index e6d7914ce61c..2500f5ba4f5a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -2870,6 +2870,10 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "%d ", ntohs(rule->packet.dport));
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
break;
+ case NPC_TCP_FLAGS:
+ seq_printf(s, "%d ", rule->packet.tcp_flags);
+ seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
+ break;
case NPC_IPSEC_SPI:
seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 1e6fbd98423d..96c04f7d93f8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1235,8 +1235,8 @@ static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
- RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
};
@@ -1434,15 +1434,6 @@ static const struct devlink_param rvu_af_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
rvu_af_dl_dwrr_mtu_validate),
-};
-
-static const struct devlink_param rvu_af_dl_param_exact_match[] = {
- DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
- "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- rvu_af_npc_exact_feature_get,
- rvu_af_npc_exact_feature_disable,
- rvu_af_npc_exact_feature_validate),
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
"npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
@@ -1457,6 +1448,15 @@ static const struct devlink_param rvu_af_dl_param_exact_match[] = {
rvu_af_dl_nix_maxlf_validate),
};
+static const struct devlink_param rvu_af_dl_param_exact_match[] = {
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_npc_exact_feature_get,
+ rvu_af_npc_exact_feature_disable,
+ rvu_af_npc_exact_feature_validate),
+};
+
/* Devlink switch mode */
static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 66203a90f052..d39001cdc707 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -499,29 +499,115 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
+#define NIX_BPIDS_PER_LMAC 8
+#define NIX_BPIDS_PER_CPT 1
+static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr)
+{
+ struct nix_bp *bp = &hw->bp;
+ int err, max_bpids;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg);
+
+ /* Reserve the BPIds for CGX and SDP */
+ bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC;
+ bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg);
+ bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt +
+ NIX_BPIDS_PER_CPT;
+ bp->bpids.max = max_bpids - bp->free_pool_base;
+
+ err = rvu_alloc_bitmap(&bp->bpids);
+ if (err)
+ return err;
+
+ bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!bp->fn_map)
+ return -ENOMEM;
+
+ bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u8), GFP_KERNEL);
+ if (!bp->intf_map)
+ return -ENOMEM;
+
+ bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u8), GFP_KERNEL);
+ if (!bp->ref_cnt)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, bpid, err;
+ struct nix_hw *nix_hw;
+ struct nix_bp *bp;
+
+ if (!is_lbk_vf(rvu, pcifunc))
+ return;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return;
+
+ bp = &nix_hw->bp;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+ if (bp->fn_map[bpid] == pcifunc) {
+ bp->ref_cnt[bpid]--;
+ if (bp->ref_cnt[bpid])
+ continue;
+ rvu_free_rsrc(&bp->bpids, bpid);
+ bp->fn_map[bpid] = 0;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+}
+
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct nix_bp_cfg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, pf, type, err;
+ u16 chan_base, chan, bpid;
struct rvu_pfvf *pfvf;
- int blkaddr, pf, type;
- u16 chan_base, chan;
+ struct nix_hw *nix_hw;
+ struct nix_bp *bp;
u64 cfg;
pf = rvu_get_pf(pcifunc);
- type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+ bp = &nix_hw->bp;
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
cfg & ~BIT_ULL(16));
+
+ if (type == NIX_INTF_TYPE_LBK) {
+ bpid = cfg & GENMASK(8, 0);
+ mutex_lock(&rvu->rsrc_lock);
+ rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base);
+ for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+ if (bp->fn_map[bpid] == pcifunc) {
+ bp->fn_map[bpid] = 0;
+ bp->ref_cnt[bpid] = 0;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ }
}
return 0;
}
@@ -529,25 +615,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
- int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
- u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
+ int bpid, blkaddr, sdp_chan_base, err;
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_pfvf *pfvf;
+ struct nix_hw *nix_hw;
u8 cgx_id, lmac_id;
- u64 cfg;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
- lmac_chan_cnt = cfg & 0xFF;
+ struct nix_bp *bp;
- cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
- lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
- sdp_chan_cnt = cfg & 0xFFF;
- sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
- pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ bp = &nix_hw->bp;
/* Backpressure IDs range division
* CGX channles are mapped to (0 - 191) BPIDs
@@ -561,38 +642,48 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
- if ((req->chan_base + req->chan_cnt) > 16)
- return -EINVAL;
+ if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC)
+ return NIX_AF_ERR_INVALID_BPID_REQ;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
- bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
- (lmac_id * lmac_chan_cnt) + req->chan_base;
+ bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) +
+ (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
- if (bpid > cgx_bpid_cnt)
- return -EINVAL;
+ if (bpid > bp->cgx_bpid_cnt)
+ return NIX_AF_ERR_INVALID_BPID;
break;
case NIX_INTF_TYPE_LBK:
- if ((req->chan_base + req->chan_cnt) > 63)
- return -EINVAL;
- bpid = cgx_bpid_cnt + req->chan_base;
- if (req->bpid_per_chan)
- bpid += chan_id;
- if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
- return -EINVAL;
+ /* Alloc bpid from the free pool */
+ mutex_lock(&rvu->rsrc_lock);
+ bpid = rvu_alloc_rsrc(&bp->bpids);
+ if (bpid < 0) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return NIX_AF_ERR_INVALID_BPID;
+ }
+ bp->fn_map[bpid] = req->hdr.pcifunc;
+ bp->ref_cnt[bpid]++;
+ bpid += bp->free_pool_base;
+ mutex_unlock(&rvu->rsrc_lock);
break;
case NIX_INTF_TYPE_SDP:
- if ((req->chan_base + req->chan_cnt) > 255)
- return -EINVAL;
+ if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt)
+ return NIX_AF_ERR_INVALID_BPID_REQ;
- bpid = sdp_bpid_cnt + req->chan_base;
+ /* Handle usecase of 2 SDP blocks */
+ if (!hw->cap.programmable_chans)
+ sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START;
+ else
+ sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base;
+
+ bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
- if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
- return -EINVAL;
+ if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt))
+ return NIX_AF_ERR_INVALID_BPID;
break;
default:
return -EINVAL;
@@ -612,7 +703,7 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
u64 cfg;
pf = rvu_get_pf(pcifunc);
- type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
type = NIX_INTF_TYPE_SDP;
@@ -1523,7 +1614,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
cfg = NPC_TX_DEF_PKIND;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
- intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
intf = NIX_INTF_TYPE_SDP;
@@ -1899,7 +1990,7 @@ static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
- if (is_afvf(pcifunc)) {/* LBK links */
+ if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */
return hw->cgx_links;
} else if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -1916,7 +2007,7 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
struct rvu_hwinfo *hw = rvu->hw;
int pf = rvu_get_pf(pcifunc);
- if (is_afvf(pcifunc)) { /* LBK links */
+ if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */
*start = hw->cap.nix_txsch_per_cgx_lmac * link;
*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
@@ -3356,7 +3447,7 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
int pf;
/* skip multicast pkt replication for AF's VFs & SDP links */
- if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc))
return 0;
if (!hw->cap.nix_rx_multicast)
@@ -3703,7 +3794,7 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
else
rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
@@ -4039,6 +4130,13 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_match = NPC_LT_LE_GTPU;
field->ltype_mask = 0xF;
break;
+ case NIX_FLOW_KEY_TYPE_CUSTOM0:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 6;
+ field->bytesm1 = 1; /* 2 Bytes*/
+ field->ltype_match = NPC_LT_LC_CUSTOM0;
+ field->ltype_mask = 0xF;
+ break;
case NIX_FLOW_KEY_TYPE_VLAN:
field->lid = NPC_LID_LB;
field->hdr_offset = 2; /* Skip TPID (2-bytes) */
@@ -4420,7 +4518,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
rvu_get_lbk_link_max_frs(rvu, &max_mtu);
else
rvu_get_lmac_link_max_frs(rvu, &max_mtu);
@@ -4784,6 +4882,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err)
return err;
+ err = nix_setup_bpids(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
/* Configure segmentation offload formats */
nix_setup_lso(rvu, nix_hw, blkaddr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index e5d6156655ba..e350242bbafb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -395,7 +395,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
owner = mcam->entry2pfvf_map[index];
target_func = (entry->action >> 4) & 0xffff;
/* do nothing when target is LBK/PF or owner is not PF */
- if (is_pffunc_af(owner) || is_afvf(target_func) ||
+ if (is_pffunc_af(owner) || is_lbk_vf(rvu, target_func) ||
(owner & RVU_PFVF_FUNC_MASK) ||
!(target_func & RVU_PFVF_FUNC_MASK))
return;
@@ -415,6 +415,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
return;
}
+ /* AF modifies given action iff PF/VF has requested for it */
+ if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT)
+ return;
+
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func);
@@ -604,7 +608,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int blkaddr, index;
/* AF's and SDP VFs work in promiscuous mode */
- if (is_afvf(pcifunc) || is_sdp_vf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -769,7 +773,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
return;
/* Skip LBK VFs */
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
return;
/* If pkt replication is not supported,
@@ -849,7 +853,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u16 vf_func;
/* Only CGX PF/VF can add allmulticast entry */
- if (is_afvf(pcifunc) && is_sdp_vf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) && is_sdp_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index c75669c8fde7..c181e7aa9eb6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -53,6 +53,7 @@ static const char * const npc_flow_names[] = {
[NPC_MPLS4_TTL] = "lse depth 4",
[NPC_TYPE_ICMP] = "icmp type",
[NPC_CODE_ICMP] = "icmp code",
+ [NPC_TCP_FLAGS] = "tcp flags",
[NPC_UNKNOWN] = "unknown",
};
@@ -530,6 +531,7 @@ do { \
NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
NPC_SCAN_HDR(NPC_TYPE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 0, 1);
NPC_SCAN_HDR(NPC_CODE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 1, 1);
+ NPC_SCAN_HDR(NPC_TCP_FLAGS, NPC_LID_LD, NPC_LT_LD_TCP, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
@@ -574,7 +576,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
- BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP);
+ BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP) |
+ BIT_ULL(NPC_TCP_FLAGS);
/* for tcp/udp/sctp corresponding layer type should be in the key */
if (*features & proto_flags) {
@@ -982,7 +985,8 @@ do { \
mask->icmp_type, 0);
NPC_WRITE_FLOW(NPC_CODE_ICMP, icmp_code, pkt->icmp_code, 0,
mask->icmp_code, 0);
-
+ NPC_WRITE_FLOW(NPC_TCP_FLAGS, tcp_flags, ntohs(pkt->tcp_flags), 0,
+ ntohs(mask->tcp_flags), 0);
NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0,
ntohl(mask->spi), 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 6f73ad9807f0..086f05c0376f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -439,6 +439,9 @@
#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32)
+#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12)
+#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0)
+
/* SSO */
#define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index ae50d56258ec..38cfe148f4b7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -40,8 +40,12 @@ bool is_sdp_pf(u16 pcifunc)
!(pcifunc & RVU_PFVF_FUNC_MASK));
}
-bool is_sdp_vf(u16 pcifunc)
+#define RVU_SDP_VF_DEVID 0xA0F7
+bool is_sdp_vf(struct rvu *rvu, u16 pcifunc)
{
+ if (!(pcifunc & ~RVU_PFVF_FUNC_MASK))
+ return (rvu->vf_devid == RVU_SDP_VF_DEVID);
+
return (is_sdp_pfvf(pcifunc) &&
!!(pcifunc & RVU_PFVF_FUNC_MASK));
}
@@ -52,6 +56,14 @@ int rvu_sdp_init(struct rvu *rvu)
struct rvu_pfvf *pfvf;
u32 i = 0;
+ if (rvu->fwdata->channel_data.valid) {
+ sdp_pf_num[0] = 0;
+ pfvf = &rvu->pf[sdp_pf_num[0]];
+ pfvf->sdp_info = &rvu->fwdata->channel_data.info;
+
+ return 0;
+ }
+
while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OTX2_SDP_PF,
pdev)) != NULL) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 4fd44b6eecea..87bdb93cb066 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -638,6 +638,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
BIT(FLOW_DISSECTOR_KEY_IPSEC) |
BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
dissector->used_keys);
@@ -857,6 +858,16 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match;
+
+ flow_rule_match_tcp(rule, &match);
+
+ flow_spec->tcp_flags = match.key->flags;
+ flow_mask->tcp_flags = match.mask->flags;
+ req->features |= BIT_ULL(NPC_TCP_FLAGS);
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
struct flow_match_mpls match;
u8 bit;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index d58b07e7e123..7063c78bd35f 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -286,7 +286,6 @@ mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
static void
mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
- struct page *page;
int i;
for (i = 0; i < q->n_desc; i++) {
@@ -301,19 +300,12 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
entry->buf = NULL;
}
- if (!q->cache.va)
- return;
-
- page = virt_to_page(q->cache.va);
- __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- memset(&q->cache, 0, sizeof(q->cache));
+ page_frag_cache_drain(&q->cache);
}
static void
mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
- struct page *page;
-
for (;;) {
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
@@ -323,12 +315,7 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
skb_free_frag(buf);
}
- if (!q->cache.va)
- return;
-
- page = virt_to_page(q->cache.va);
- __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- memset(&q->cache, 0, sizeof(q->cache));
+ page_frag_cache_drain(&q->cache);
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index f5b1f8c7834f..7f20813456e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2199,8 +2199,9 @@ reset_slave:
if (cmd != MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
slave, cmd);
- /* Turn on internal error letting slave reset itself immeditaly,
- * otherwise it might take till timeout on command is passed
+ /* Turn on internal error letting slave reset itself
+ * immediately, otherwise it might take till timeout on
+ * command is passed
*/
reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
}
@@ -2954,7 +2955,7 @@ static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
dummy_admin.default_vlan = vlan;
/* VF wants to move to other VST state which is valid with current
- * rate limit. Either differnt default vlan in VST or other
+ * rate limit. Either different default vlan in VST or other
* supported QoS priority. Otherwise we don't allow this change when
* the TX rate is still configured.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 4d4f9cf9facb..e130e7259275 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -115,7 +115,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
return;
}
- /* Acessing the CQ outside of rcu_read_lock is safe, because
+ /* Accessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
++cq->arm_sn;
@@ -137,7 +137,7 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
return;
}
- /* Acessing the CQ outside of rcu_read_lock is safe, because
+ /* Accessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
cq->event(cq, event_type);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 9e3b76182088..cd754cd76bde 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -96,8 +96,8 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
#define MLX4_EN_WRAP_AROUND_SEC 10UL
/* By scheduling the overflow check every 5 seconds, we have a reasonably
- * good chance we wont miss a wrap around.
- * TOTO: Use a timer instead of a work queue to increase the guarantee.
+ * good chance we won't miss a wrap around.
+ * TODO: Use a timer instead of a work queue to increase the guarantee.
*/
#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 33bbcced8105..5d3fde63b273 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -42,6 +42,7 @@
#include <net/ip.h>
#include <net/vxlan.h>
#include <net/devlink.h>
+#include <net/rps.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -1072,7 +1073,8 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
1, MLX4_MCAST_CONFIG);
/* Update multicast list - we cache all addresses so they won't
- * change while HW is updated holding the command semaphor */
+ * change while HW is updated holding the command semaphore
+ */
netif_addr_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
@@ -1817,7 +1819,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_set_rss_steer_rules(priv))
mlx4_warn(mdev, "Failed setting steering rules\n");
- /* Attach rx QP to bradcast address */
+ /* Attach rx QP to broadcast address */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index a09b6e05337d..eac49657bd07 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -762,7 +762,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
- en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
+ en_err(priv, "CQE completed in error - vendor syndrome:%d syndrome:%d\n",
((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
((struct mlx4_err_cqe *)cqe)->syndrome);
goto next;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 65cb63f6c465..1ddb11cb25f9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -992,7 +992,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->ts_requested = 1;
}
- /* Prepare ctrl segement apart opcode+ownership, which depends on
+ /* Prepare ctrl segment apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 6598b10a9ff4..9572a45f6143 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -210,7 +210,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
s_eqe->slave_id = slave;
- /* ensure all information is written before setting the ownersip bit */
+ /* ensure all information is written before setting the ownership bit */
dma_wmb();
s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
++slave_eq->prod;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
index 954b86faac29..40ca29bb928c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
@@ -44,7 +44,7 @@
/* Default supported priorities for VPP allocation */
#define MLX4_DEFAULT_QOS_PRIO (0)
-/* Derived from FW feature definition, 0 is the default vport fo all QPs */
+/* Derived from FW feature definition, 0 is the default vport for all QPs */
#define MLX4_VPP_DEFAULT_VPORT (0)
struct mlx4_vport_qos_param {
@@ -98,7 +98,7 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
u16 *available_vpp, u8 *vpp_p_up);
/**
- * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities.
+ * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among different priorities.
* The total number of VPPs assigned to all for a port must not exceed
* the value reported by available_vpp in mlx4_ALLOCATE_VPP_get.
* VPP allocation is allowed only after the port type has been set,
@@ -113,7 +113,7 @@ int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up);
/**
- * mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport.
+ * mlx4_SET_VPORT_QOS_get - Query QoS properties of a Vport.
* Each priority allowed for the Vport is assigned with a share of the BW,
* and a BW limitation. This commands query the current QoS values.
*
@@ -128,7 +128,7 @@ int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
struct mlx4_vport_qos_param *out_param);
/**
- * mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport.
+ * mlx4_SET_VPORT_QOS_set - Set QoS properties of a Vport.
* QoS parameters can be modified at any time, but must be initialized
* before any QP is associated with the VPort.
*
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2581226836b5..7b02ff61126d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -129,7 +129,7 @@ static const struct mlx4_profile default_profile = {
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
.num_mpt = 1 << 19,
- .num_mtt = 1 << 20, /* It is really num mtt segements */
+ .num_mtt = 1 << 20, /* It is really num mtt segments */
};
static const struct mlx4_profile low_mem_profile = {
@@ -1508,7 +1508,7 @@ static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
priv->v2p.port1 = port1;
priv->v2p.port2 = port2;
} else {
- mlx4_err(dev, "Failed to change port mape: %d\n", err);
+ mlx4_err(dev, "Failed to change port map: %d\n", err);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index e9cd4bb6f83d..d3d9ec042d2c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -112,7 +112,7 @@ struct mlx4_en_stat_out_flow_control_mbox {
__be64 tx_pause_duration;
/* Number of transmitter transitions from XOFF state to XON state */
__be64 tx_pause_transition;
- /* Reserverd */
+ /* Reserved */
__be64 reserved[2];
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 256a06b3c096..4e43f4a7d246 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -2118,7 +2118,7 @@ static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
* @data: output buffer to put the requested data into.
*
* Reads cable module eeprom data, puts the outcome data into
- * data pointer paramer.
+ * data pointer parameter.
* Returns num of read bytes on success or a negative error
* code.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index c44870b175f9..76dc5a9b9648 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -29,7 +29,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \
- lib/crypto.o
+ lib/crypto.o lib/sd.o
#
# Netdev extra
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index cf0477f53dc4..47e7c2639774 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -210,7 +210,7 @@ static bool is_dpll_supported(struct mlx5_core_dev *dev)
return false;
if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) {
- mlx5_core_warn(dev, "Missing SyncE capability\n");
+ mlx5_core_dbg(dev, "Missing SyncE capability\n");
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3e064234f6fe..98d4306929f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -157,6 +157,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (action == DEVLINK_RELOAD_ACTION_FW_ACTIVATE &&
+ !dev->priv.fw_reset) {
+ NL_SET_ERR_MSG_MOD(extack, "FW activate is unsupported for this function");
+ return -EOPNOTSUPP;
+ }
+
if (mlx5_core_is_pf(dev) && pci_num_vf(pdev))
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 928bf24d4b12..904e08de852e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -41,6 +41,7 @@ struct mlx5_dpll_synce_status {
enum mlx5_msees_oper_status oper_status;
bool ho_acq;
bool oper_freq_measure;
+ enum mlx5_msees_failure_reason failure_reason;
s32 frequency_diff;
};
@@ -60,6 +61,7 @@ mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
synce_status->oper_status = MLX5_GET(msees_reg, out, oper_status);
synce_status->ho_acq = MLX5_GET(msees_reg, out, ho_acq);
synce_status->oper_freq_measure = MLX5_GET(msees_reg, out, oper_freq_measure);
+ synce_status->failure_reason = MLX5_GET(msees_reg, out, failure_reason);
synce_status->frequency_diff = MLX5_GET(msees_reg, out, frequency_diff);
return 0;
}
@@ -99,6 +101,26 @@ mlx5_dpll_lock_status_get(struct mlx5_dpll_synce_status *synce_status)
}
}
+static enum dpll_lock_status_error
+mlx5_dpll_lock_status_error_get(struct mlx5_dpll_synce_status *synce_status)
+{
+ switch (synce_status->oper_status) {
+ case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
+ fallthrough;
+ case MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING:
+ switch (synce_status->failure_reason) {
+ case MLX5_MSEES_FAILURE_REASON_PORT_DOWN:
+ return DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN;
+ case MLX5_MSEES_FAILURE_REASON_TOO_HIGH_FREQUENCY_DIFF:
+ return DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH;
+ default:
+ return DPLL_LOCK_STATUS_ERROR_UNDEFINED;
+ }
+ default:
+ return DPLL_LOCK_STATUS_ERROR_NONE;
+ }
+}
+
static enum dpll_pin_state
mlx5_dpll_pin_state_get(struct mlx5_dpll_synce_status *synce_status)
{
@@ -118,10 +140,11 @@ mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status,
return 0;
}
-static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
- void *priv,
- enum dpll_lock_status *status,
- struct netlink_ext_ack *extack)
+static int
+mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll, void *priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack)
{
struct mlx5_dpll_synce_status synce_status;
struct mlx5_dpll *mdpll = priv;
@@ -131,6 +154,7 @@ static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
if (err)
return err;
*status = mlx5_dpll_lock_status_get(&synce_status);
+ *status_error = mlx5_dpll_lock_status_error_get(&synce_status);
return 0;
}
@@ -261,7 +285,7 @@ static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll,
{
if (mdpll->tracking_netdev)
return;
- netdev_dpll_pin_set(netdev, mdpll->dpll_pin);
+ dpll_netdev_pin_set(netdev, mdpll->dpll_pin);
mdpll->tracking_netdev = netdev;
}
@@ -269,7 +293,7 @@ static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll)
{
if (!mdpll->tracking_netdev)
return;
- netdev_dpll_pin_clear(mdpll->tracking_netdev);
+ dpll_netdev_pin_clear(mdpll->tracking_netdev);
mdpll->tracking_netdev = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 55c6ace0acd5..84db05fb9389 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -60,6 +60,7 @@
#include "lib/clock.h"
#include "en/rx_res.h"
#include "en/selq.h"
+#include "lib/sd.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -791,6 +792,8 @@ struct mlx5e_channel {
struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
+ int vec_ix;
+ int sd_ix;
int cpu;
/* Sync between icosq recovery and XSK enable/disable. */
struct mutex icosq_recovery_lock;
@@ -914,7 +917,7 @@ struct mlx5e_priv {
bool tx_ptp_opened;
bool rx_ptp_opened;
struct hwtstamp_config tstamp;
- u16 q_counter;
+ u16 q_counter[MLX5_SD_MAX_GROUP_SZ];
u16 drop_rq_q_counter;
struct notifier_block events_nb;
struct notifier_block blocking_events_nb;
@@ -1029,12 +1032,12 @@ struct mlx5e_xsk_param;
struct mlx5e_rq_param;
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
- struct mlx5e_xsk_param *xsk, int node,
+ struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
struct mlx5e_rq *rq);
#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_close_rq(struct mlx5e_rq *rq);
-int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
struct mlx5e_sq_param;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
index 48581ea3adcb..874a1016623c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
@@ -23,20 +23,26 @@ bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix)
return test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
}
-void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id)
{
struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
*rqn = c->rq.rqn;
+ if (vhca_id)
+ *vhca_id = MLX5_CAP_GEN(c->mdev, vhca_id);
}
-void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id)
{
struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
WARN_ON_ONCE(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state));
*rqn = c->xskrq.rqn;
+ if (vhca_id)
+ *vhca_id = MLX5_CAP_GEN(c->mdev, vhca_id);
}
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
index 637ca90daaa8..6715aa9383b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
@@ -10,8 +10,10 @@ struct mlx5e_channels;
unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs);
bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix);
-void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
-void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id);
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id);
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
#endif /* __MLX5_EN_CHANNELS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
index 40c8df111754..e2d8d2754be0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -20,10 +20,8 @@
#define NUM_REQ_PPCNT_COUNTER_S1 MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1
#define NUM_REQ_Q_COUNTERS_S1 MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1
-int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
+static int mlx5e_monitor_counter_cap(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev = priv->mdev;
-
if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters))
return false;
if (MLX5_CAP_PCAM_REG(mdev, ppcnt) &&
@@ -36,24 +34,38 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
return true;
}
-static void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
+int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *pos;
+ int i;
+
+ mlx5_sd_for_each_dev(i, priv->mdev, pos)
+ if (!mlx5e_monitor_counter_cap(pos))
+ return false;
+ return true;
+}
+
+static void mlx5e_monitor_counter_arm(struct mlx5_core_dev *mdev)
{
u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
MLX5_SET(arm_monitor_counter_in, in, opcode,
MLX5_CMD_OP_ARM_MONITOR_COUNTER);
- mlx5_cmd_exec_in(priv->mdev, arm_monitor_counter, in);
+ mlx5_cmd_exec_in(mdev, arm_monitor_counter, in);
}
static void mlx5e_monitor_counters_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
monitor_counters_work);
+ struct mlx5_core_dev *pos;
+ int i;
mutex_lock(&priv->state_lock);
mlx5e_stats_update_ndo_stats(priv);
mutex_unlock(&priv->state_lock);
- mlx5e_monitor_counter_arm(priv);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos)
+ mlx5e_monitor_counter_arm(pos);
}
static int mlx5e_monitor_event_handler(struct notifier_block *nb,
@@ -97,15 +109,13 @@ static int fill_monitor_counter_q_counter_set1(int cnt, int q_counter, u32 *in)
}
/* check if mlx5e_monitor_counter_supported before calling this function*/
-static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
+static void mlx5e_set_monitor_counter(struct mlx5_core_dev *mdev, int q_counter)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters);
int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters);
int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 :
MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters);
u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
- int q_counter = priv->q_counter;
int cnt = 0;
if (num_ppcnt_counters >= NUM_REQ_PPCNT_COUNTER_S1 &&
@@ -127,13 +137,17 @@ static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
/* check if mlx5e_monitor_counter_supported before calling this function*/
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
{
+ struct mlx5_core_dev *pos;
+ int i;
+
INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work);
MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
MONITOR_COUNTER);
- mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
-
- mlx5e_set_monitor_counter(priv);
- mlx5e_monitor_counter_arm(priv);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ mlx5_eq_notifier_register(pos, &priv->monitor_counters_nb);
+ mlx5e_set_monitor_counter(pos, priv->q_counter[i]);
+ mlx5e_monitor_counter_arm(pos);
+ }
queue_work(priv->wq, &priv->update_stats_work);
}
@@ -141,11 +155,15 @@ void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
+ struct mlx5_core_dev *pos;
+ int i;
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
- mlx5_cmd_exec_in(priv->mdev, set_monitor_counter, in);
- mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ mlx5_cmd_exec_in(pos, set_monitor_counter, in);
+ mlx5_eq_notifier_unregister(pos, &priv->monitor_counters_nb);
+ }
cancel_work_sync(&priv->monitor_counters_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 5d213a9886f1..a3f31d9d527e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -240,11 +240,14 @@ static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
return xsk->headroom + hw_mtu;
}
-static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk)
+static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_tail_room)
{
- /* SKBs built on XDP_PASS on XSK RQs don't have headroom. */
- u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL);
u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 headroom;
+
+ if (no_head_tail_room)
+ return SKB_DATA_ALIGN(hw_mtu);
+ headroom = mlx5e_get_linear_rq_headroom(params, NULL);
return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
}
@@ -254,6 +257,7 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk,
bool mpwqe)
{
+ bool no_head_tail_room;
u32 sz;
/* XSK frames are mapped as individual pages, because frames may come in
@@ -262,7 +266,13 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
if (xsk)
return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
- sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
+ no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk);
+
+ /* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations.
+ * no_head_tail_room should be set in the case of XDP with Striding RQ
+ * when SKB is not linear. This is because another page is allocated for the linear part.
+ */
+ sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, no_head_tail_room));
/* XDP in mlx5e doesn't support multiple packets per page.
* Do not assume sz <= PAGE_SIZE if params->xdp_prog is set.
@@ -289,7 +299,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
return false;
- /* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
+ /* Call mlx5e_rx_get_linear_sz_skb with the no_head_tail_room parameter set
+ * to exclude headroom and tailroom from calculations.
+ * no_head_tail_room is true when SKB is built on XDP_PASS on XSK RQs
+ * since packet data buffers don't have headroom and tailroom resreved for the SKB.
+ * Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
* must fit into a CPU page.
*/
if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
@@ -674,7 +688,7 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
.napi = &c->napi,
.ch_stats = c->stats,
.node = cpu_to_node(c->cpu),
- .ix = c->ix,
+ .ix = c->vec_ix,
};
}
@@ -945,7 +959,6 @@ static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *param
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- u16 q_counter,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
@@ -1007,7 +1020,6 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
- MLX5_SET(rqc, rqc, counter_set_id, q_counter);
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
@@ -1018,7 +1030,6 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
}
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
- u16 q_counter,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
@@ -1027,7 +1038,6 @@ void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
- MLX5_SET(rqc, rqc, counter_set_id, q_counter);
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
}
@@ -1292,13 +1302,12 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- u16 q_counter,
struct mlx5e_channel_param *cparam)
{
u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
int err;
- err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
+ err = mlx5e_build_rq_param(mdev, params, NULL, &cparam->rq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 6800949dafbc..9a781f18b57f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -130,10 +130,8 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- u16 q_counter,
struct mlx5e_rq_param *param);
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
- u16 q_counter,
struct mlx5e_rq_param *param);
void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param);
@@ -149,7 +147,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- u16 q_counter,
struct mlx5e_channel_param *cparam);
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 078f56a3cbb2..d0af7271da34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -42,9 +42,9 @@ mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metad
WARN_ON_ONCE(tracker->inuse);
tracker->inuse = true;
- spin_lock(&list->tracker_list_lock);
+ spin_lock_bh(&list->tracker_list_lock);
list_add_tail(&tracker->entry, &list->tracker_list_head);
- spin_unlock(&list->tracker_list_lock);
+ spin_unlock_bh(&list->tracker_list_lock);
}
static void
@@ -54,9 +54,9 @@ mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 me
WARN_ON_ONCE(!tracker->inuse);
tracker->inuse = false;
- spin_lock(&list->tracker_list_lock);
+ spin_lock_bh(&list->tracker_list_lock);
list_del(&tracker->entry);
- spin_unlock(&list->tracker_list_lock);
+ spin_unlock_bh(&list->tracker_list_lock);
}
void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
@@ -155,7 +155,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
- spin_lock(&cqe_list->tracker_list_lock);
+ spin_lock_bh(&cqe_list->tracker_list_lock);
list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
struct sk_buff *skb =
mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
@@ -170,7 +170,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
pos->inuse = false;
list_del(&pos->entry);
}
- spin_unlock(&cqe_list->tracker_list_lock);
+ spin_unlock_bh(&cqe_list->tracker_list_lock);
}
#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
@@ -646,7 +646,6 @@ static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
struct net_device *netdev,
- u16 q_counter,
struct mlx5e_ptp_params *ptp_params)
{
struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
@@ -655,7 +654,7 @@ static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
mlx5e_init_rq_type_params(mdev, params);
params->sw_mtu = netdev->max_mtu;
- mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params);
+ mlx5e_build_rq_param(mdev, params, NULL, rq_params);
}
static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
@@ -681,7 +680,7 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
/* RQ */
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
params->vlan_strip_disable = orig->vlan_strip_disable;
- mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
+ mlx5e_ptp_build_rq_param(c->mdev, c->netdev, cparams);
}
}
@@ -714,13 +713,16 @@ static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
int node = dev_to_node(c->mdev->device);
- int err;
+ int err, sd_ix;
+ u16 q_counter;
err = mlx5e_init_ptp_rq(c, params, &c->rq);
if (err)
return err;
- return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq);
+ sd_ix = mlx5_sd_ch_ix_get_dev_ix(c->mdev, MLX5E_PTP_CHANNEL_IX);
+ q_counter = c->priv->q_counter[sd_ix];
+ return mlx5e_open_rq(params, rq_param, NULL, node, q_counter, &c->rq);
}
static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
@@ -935,6 +937,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
mlx5e_ptp_rx_set_fs(c->priv);
mlx5e_activate_rq(&c->rq);
+ netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
}
mlx5e_trigger_napi_sched(&c->napi);
}
@@ -943,8 +946,10 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
{
int tc;
- if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, NULL);
mlx5e_deactivate_rq(&c->rq);
+ }
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
for (tc = 0; tc < c->num_tc; tc++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 34adf8c3f81a..e87e26f2c669 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -122,8 +122,8 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
memset(&param_sq, 0, sizeof(param_sq));
memset(&param_cq, 0, sizeof(param_cq));
- mlx5e_build_sq_param(priv->mdev, params, &param_sq);
- mlx5e_build_tx_cq_param(priv->mdev, params, &param_cq);
+ mlx5e_build_sq_param(c->mdev, params, &param_sq);
+ mlx5e_build_tx_cq_param(c->mdev, params, &param_cq);
err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
if (err)
goto err_free_sq;
@@ -176,7 +176,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
*/
smp_wmb();
- qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node_qid);
+ qos_dbg(sq->mdev, "Activate QoS SQ qid %u\n", node_qid);
mlx5e_activate_txqsq(sq);
return 0;
@@ -190,7 +190,7 @@ void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
if (!sq) /* Handle the case when the SQ failed to open. */
return;
- qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid);
+ qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 4358798d6ce1..25d751eba99b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -294,8 +294,8 @@ static void mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
params = &priv->channels.params;
rq_sz = mlx5e_rqwq_get_size(rq);
- real_time = mlx5_is_real_time_rq(priv->mdev);
- rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
+ real_time = mlx5_is_real_time_rq(rq->mdev);
+ rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(rq->mdev, params, NULL));
mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 6b44ddce14e9..0ab9db319530 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -219,7 +219,6 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *sq, int tc)
{
bool stopped = netif_xmit_stopped(sq->txq);
- struct mlx5e_priv *priv = sq->priv;
u8 state;
int err;
@@ -227,7 +226,7 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
- err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
+ err = mlx5_core_query_sq_state(sq->mdev, sq->sqn, &state);
if (!err)
devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
index 7b8ff7a71003..bcafb4bf9415 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
@@ -4,6 +4,33 @@
#include "rqt.h"
#include <linux/mlx5/transobj.h>
+static bool verify_num_vhca_ids(struct mlx5_core_dev *mdev, u32 *vhca_ids,
+ unsigned int size)
+{
+ unsigned int max_num_vhca_id = MLX5_CAP_GEN_2(mdev, max_rqt_vhca_id);
+ int i;
+
+ /* Verify that all vhca_ids are in range [0, max_num_vhca_ids - 1] */
+ for (i = 0; i < size; i++)
+ if (vhca_ids[i] >= max_num_vhca_id)
+ return false;
+ return true;
+}
+
+static bool rqt_verify_vhca_ids(struct mlx5_core_dev *mdev, u32 *vhca_ids,
+ unsigned int size)
+{
+ if (!vhca_ids)
+ return true;
+
+ if (!MLX5_CAP_GEN(mdev, cross_vhca_rqt))
+ return false;
+ if (!verify_num_vhca_ids(mdev, vhca_ids, size))
+ return false;
+
+ return true;
+}
+
void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
unsigned int num_channels)
{
@@ -13,19 +40,38 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
indir->table[i] = i % num_channels;
}
+static void fill_rqn_list(void *rqtc, u32 *rqns, u32 *vhca_ids, unsigned int size)
+{
+ unsigned int i;
+
+ if (vhca_ids) {
+ MLX5_SET(rqtc, rqtc, rq_vhca_id_format, 1);
+ for (i = 0; i < size; i++) {
+ MLX5_SET(rqtc, rqtc, rq_vhca[i].rq_num, rqns[i]);
+ MLX5_SET(rqtc, rqtc, rq_vhca[i].rq_vhca_id, vhca_ids[i]);
+ }
+ } else {
+ for (i = 0; i < size; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]);
+ }
+}
static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- u16 max_size, u32 *init_rqns, u16 init_size)
+ u16 max_size, u32 *init_rqns, u32 *init_vhca_ids, u16 init_size)
{
+ int entry_sz;
void *rqtc;
int inlen;
int err;
u32 *in;
- int i;
+
+ if (!rqt_verify_vhca_ids(mdev, init_vhca_ids, init_size))
+ return -EOPNOTSUPP;
rqt->mdev = mdev;
rqt->size = max_size;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * init_size;
+ entry_sz = init_vhca_ids ? MLX5_ST_SZ_BYTES(rq_vhca) : MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + entry_sz * init_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -33,10 +79,9 @@ static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, rqt_max_size, rqt->size);
-
MLX5_SET(rqtc, rqtc, rqt_actual_size, init_size);
- for (i = 0; i < init_size; i++)
- MLX5_SET(rqtc, rqtc, rq_num[i], init_rqns[i]);
+
+ fill_rqn_list(rqtc, init_rqns, init_vhca_ids, init_size);
err = mlx5_core_create_rqt(rqt->mdev, in, inlen, &rqt->rqtn);
@@ -49,7 +94,7 @@ int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
{
u16 max_size = indir_enabled ? indir_table_size : 1;
- return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1);
+ return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, NULL, 1);
}
static int mlx5e_bits_invert(unsigned long a, int size)
@@ -63,7 +108,8 @@ static int mlx5e_bits_invert(unsigned long a, int size)
return inv;
}
-static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns,
+static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, u32 *rss_vhca_ids, u32 *vhca_ids,
+ unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir)
{
unsigned int i;
@@ -82,30 +128,42 @@ static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns
*/
return -EINVAL;
rss_rqns[i] = rqns[ix];
+ if (vhca_ids)
+ rss_vhca_ids[i] = vhca_ids[ix];
}
return 0;
}
int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- u32 *rqns, unsigned int num_rqns,
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir)
{
- u32 *rss_rqns;
+ u32 *rss_rqns, *rss_vhca_ids = NULL;
int err;
rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL);
if (!rss_rqns)
return -ENOMEM;
- err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (vhca_ids) {
+ rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids),
+ GFP_KERNEL);
+ if (!rss_vhca_ids) {
+ kvfree(rss_rqns);
+ return -ENOMEM;
+ }
+ }
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, rss_vhca_ids, vhca_ids, num_rqns, hfunc, indir);
if (err)
goto out;
- err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns,
+ err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns, rss_vhca_ids,
indir->actual_table_size);
out:
+ kvfree(rss_vhca_ids);
kvfree(rss_rqns);
return err;
}
@@ -126,15 +184,20 @@ void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
}
-static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int size)
+static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
+ unsigned int size)
{
- unsigned int i;
+ int entry_sz;
void *rqtc;
int inlen;
u32 *in;
int err;
- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * size;
+ if (!rqt_verify_vhca_ids(rqt->mdev, vhca_ids, size))
+ return -EINVAL;
+
+ entry_sz = vhca_ids ? MLX5_ST_SZ_BYTES(rq_vhca) : MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + entry_sz * size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -143,8 +206,8 @@ static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int siz
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
MLX5_SET(rqtc, rqtc, rqt_actual_size, size);
- for (i = 0; i < size; i++)
- MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]);
+
+ fill_rqn_list(rqtc, rqns, vhca_ids, size);
err = mlx5_core_modify_rqt(rqt->mdev, rqt->rqtn, in, inlen);
@@ -152,17 +215,21 @@ static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int siz
return err;
}
-int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn)
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id)
{
- return mlx5e_rqt_redirect(rqt, &rqn, 1);
+ return mlx5e_rqt_redirect(rqt, &rqn, vhca_id, 1);
}
-int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
+ unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir)
{
- u32 *rss_rqns;
+ u32 *rss_rqns, *rss_vhca_ids = NULL;
int err;
+ if (!rqt_verify_vhca_ids(rqt->mdev, vhca_ids, num_rqns))
+ return -EINVAL;
+
if (WARN_ON(rqt->size != indir->max_table_size))
return -EINVAL;
@@ -170,13 +237,23 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_
if (!rss_rqns)
return -ENOMEM;
- err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (vhca_ids) {
+ rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids),
+ GFP_KERNEL);
+ if (!rss_vhca_ids) {
+ kvfree(rss_rqns);
+ return -ENOMEM;
+ }
+ }
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, rss_vhca_ids, vhca_ids, num_rqns, hfunc, indir);
if (err)
goto out;
- err = mlx5e_rqt_redirect(rqt, rss_rqns, indir->actual_table_size);
+ err = mlx5e_rqt_redirect(rqt, rss_rqns, rss_vhca_ids, indir->actual_table_size);
out:
+ kvfree(rss_vhca_ids);
kvfree(rss_rqns);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
index 77fba3ebd18d..e0bc30308c77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
@@ -20,7 +20,7 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
unsigned int num_channels);
struct mlx5e_rqt {
- struct mlx5_core_dev *mdev;
+ struct mlx5_core_dev *mdev; /* primary */
u32 rqtn;
u16 size;
};
@@ -28,7 +28,7 @@ struct mlx5e_rqt {
int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
bool indir_enabled, u32 init_rqn, u32 indir_table_size);
int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- u32 *rqns, unsigned int num_rqns,
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir);
void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt);
@@ -38,8 +38,9 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
}
u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels);
-int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn);
-int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id);
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
+ unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir);
#endif /* __MLX5_EN_RQT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index c1545a2e8d6d..5f742f896600 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -74,7 +74,7 @@ struct mlx5e_rss {
struct mlx5e_tir *tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_rqt rqt;
- struct mlx5_core_dev *mdev;
+ struct mlx5_core_dev *mdev; /* primary */
u32 drop_rqn;
bool inner_ft_support;
bool enabled;
@@ -473,21 +473,22 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
return 0;
}
-static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
int err;
- err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, num_rqns, rss->hash.hfunc, &rss->indir);
+ err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, vhca_ids, num_rqns, rss->hash.hfunc,
+ &rss->indir);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), err);
return err;
}
-void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
rss->enabled = true;
- mlx5e_rss_apply(rss, rqns, num_rqns);
+ mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
}
void mlx5e_rss_disable(struct mlx5e_rss *rss)
@@ -495,7 +496,7 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss)
int err;
rss->enabled = false;
- err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn);
+ err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn, NULL);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
@@ -568,7 +569,7 @@ int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
const u8 *key, const u8 *hfunc,
- u32 *rqns, unsigned int num_rqns)
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
bool changed_indir = false;
bool changed_hash = false;
@@ -608,7 +609,7 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
}
if (changed_indir && rss->enabled) {
- err = mlx5e_rss_apply(rss, rqns, num_rqns);
+ err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
if (err) {
mlx5e_rss_copy(rss, old_rss);
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index d1d0bc350e92..d0df98963c8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -39,7 +39,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner, u32 *tirn);
-void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns);
+void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
void mlx5e_rss_disable(struct mlx5e_rss *rss);
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
@@ -47,7 +47,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
const u8 *key, const u8 *hfunc,
- u32 *rqns, unsigned int num_rqns);
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss);
u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt);
int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index b23e224e3763..a86eade9a9e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -8,7 +8,7 @@
#define MLX5E_MAX_NUM_RSS 16
struct mlx5e_rx_res {
- struct mlx5_core_dev *mdev;
+ struct mlx5_core_dev *mdev; /* primary */
enum mlx5e_rx_res_features features;
unsigned int max_nch;
u32 drop_rqn;
@@ -19,6 +19,7 @@ struct mlx5e_rx_res {
struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
bool rss_active;
u32 *rss_rqns;
+ u32 *rss_vhca_ids;
unsigned int rss_nch;
struct {
@@ -34,6 +35,13 @@ struct mlx5e_rx_res {
/* API for rx_res_rss_* */
+static u32 *get_vhca_ids(struct mlx5e_rx_res *res, int offset)
+{
+ bool multi_vhca = res->features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
+
+ return multi_vhca ? res->rss_vhca_ids + offset : NULL;
+}
+
void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch)
{
int i;
@@ -85,8 +93,11 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i
return PTR_ERR(rss);
mlx5e_rss_set_indir_uniform(rss, init_nch);
- if (res->rss_active)
- mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
+ if (res->rss_active) {
+ u32 *vhca_ids = get_vhca_ids(res, 0);
+
+ mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
+ }
res->rss[i] = rss;
*rss_idx = i;
@@ -153,10 +164,12 @@ static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res)
for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
struct mlx5e_rss *rss = res->rss[i];
+ u32 *vhca_ids;
if (!rss)
continue;
- mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
+ vhca_ids = get_vhca_ids(res, 0);
+ mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
}
}
@@ -200,6 +213,7 @@ int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
const u32 *indir, const u8 *key, const u8 *hfunc)
{
+ u32 *vhca_ids = get_vhca_ids(res, 0);
struct mlx5e_rss *rss;
if (rss_idx >= MLX5E_MAX_NUM_RSS)
@@ -209,7 +223,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, res->rss_nch);
+ return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, vhca_ids,
+ res->rss_nch);
}
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
@@ -280,11 +295,13 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
static void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
{
+ kvfree(res->rss_vhca_ids);
kvfree(res->rss_rqns);
kvfree(res);
}
-static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch)
+static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch,
+ bool multi_vhca)
{
struct mlx5e_rx_res *rx_res;
@@ -298,6 +315,15 @@ static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsig
return NULL;
}
+ if (multi_vhca) {
+ rx_res->rss_vhca_ids = kvcalloc(max_nch, sizeof(*rx_res->rss_vhca_ids), GFP_KERNEL);
+ if (!rx_res->rss_vhca_ids) {
+ kvfree(rx_res->rss_rqns);
+ kvfree(rx_res);
+ return NULL;
+ }
+ }
+
return rx_res;
}
@@ -424,10 +450,11 @@ mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features featu
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{
+ bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
struct mlx5e_rx_res *res;
int err;
- res = mlx5e_rx_res_alloc(mdev, max_nch);
+ res = mlx5e_rx_res_alloc(mdev, max_nch, multi_vhca);
if (!res)
return ERR_PTR(-ENOMEM);
@@ -504,10 +531,11 @@ static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res,
struct mlx5e_channels *chs,
unsigned int ix)
{
+ u32 *vhca_id = get_vhca_ids(res, ix);
u32 rqn = res->rss_rqns[ix];
int err;
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn, vhca_id);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
@@ -519,7 +547,7 @@ static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res,
{
int err;
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn, NULL);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
@@ -534,10 +562,12 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
nch = mlx5e_channels_get_num(chs);
for (ix = 0; ix < chs->num; ix++) {
+ u32 *vhca_id = get_vhca_ids(res, ix);
+
if (mlx5e_channels_is_xsk(chs, ix))
- mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
else
- mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
}
res->rss_nch = chs->num;
@@ -554,7 +584,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
rqn = res->drop_rqn;
- err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
+ err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn, NULL);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n",
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
@@ -573,7 +603,7 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
- err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn);
+ err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn, NULL);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n",
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
@@ -584,10 +614,12 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
unsigned int ix, bool xsk)
{
+ u32 *vhca_id = get_vhca_ids(res, ix);
+
if (xsk)
- mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
else
- mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
mlx5e_rx_res_rss_enable(res);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 82aaba8a82b3..7b1a9f0f1874 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -18,6 +18,7 @@ struct mlx5e_rss_params_hash;
enum mlx5e_rx_res_features {
MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0),
MLX5E_RX_RES_FEATURE_PTP = BIT(1),
+ MLX5E_RX_RES_FEATURE_MULTI_VHCA = BIT(2),
};
/* Setup */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index 86bf007fd05b..b500cc2c9689 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -37,7 +37,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
- mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
+ mlx5_core_dbg(priv->mdev, "firmware flow level support is missing\n");
err = -EOPNOTSUPP;
goto err_check;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index ac458a8d10e0..53ca16cb9c41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -63,10 +63,12 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
struct mlx5e_create_cq_param ccp = {};
struct dim_cq_moder trap_moder = {};
struct mlx5e_rq *rq = &t->rq;
+ u16 q_counter;
int node;
int err;
node = dev_to_node(mdev->device);
+ q_counter = priv->q_counter[0];
ccp.netdev = priv->netdev;
ccp.wq = priv->wq;
@@ -79,7 +81,7 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
return err;
mlx5e_init_trap_rq(t, &t->params, rq);
- err = mlx5e_open_rq(&t->params, rq_param, NULL, node, rq);
+ err = mlx5e_open_rq(&t->params, rq_param, NULL, node, q_counter, rq);
if (err)
goto err_destroy_cq;
@@ -116,15 +118,14 @@ static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct ml
}
static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
- int max_mtu, u16 q_counter,
- struct mlx5e_trap *t)
+ int max_mtu, struct mlx5e_trap *t)
{
struct mlx5e_params *params = &t->params;
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
mlx5e_init_rq_type_params(mdev, params);
params->sw_mtu = max_mtu;
- mlx5e_build_rq_param(mdev, params, NULL, q_counter, &t->rq_param);
+ mlx5e_build_rq_param(mdev, params, NULL, &t->rq_param);
}
static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
@@ -138,7 +139,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
if (!t)
return ERR_PTR(-ENOMEM);
- mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, priv->q_counter, t);
+ mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, t);
t->priv = priv;
t->mdev = priv->mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index ebada0c5af3c..db776e515b6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -6,10 +6,10 @@
#include "setup.h"
#include "en/params.h"
-static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
+static int mlx5e_xsk_map_pool(struct mlx5_core_dev *mdev,
struct xsk_buff_pool *pool)
{
- struct device *dev = mlx5_core_dma_dev(priv->mdev);
+ struct device *dev = mlx5_core_dma_dev(mdev);
return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC);
}
@@ -89,7 +89,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
if (unlikely(!mlx5e_xsk_is_pool_sane(pool)))
return -EINVAL;
- err = mlx5e_xsk_map_pool(priv, pool);
+ err = mlx5e_xsk_map_pool(mlx5_sd_ch_ix_get_dev(priv->mdev, ix), pool);
if (unlikely(err))
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 82e6abbc1734..06592b9f0424 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -49,10 +49,9 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- u16 q_counter,
struct mlx5e_channel_param *cparam)
{
- mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq);
+ mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq);
mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq);
}
@@ -93,6 +92,7 @@ static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *param
struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool,
struct mlx5e_xsk_param *xsk)
{
+ u16 q_counter = c->priv->q_counter[c->sd_ix];
struct mlx5e_rq *xskrq = &c->xskrq;
int err;
@@ -100,7 +100,7 @@ static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *param
if (err)
return err;
- err = mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), xskrq);
+ err = mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), q_counter, xskrq);
if (err)
return err;
@@ -125,7 +125,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (!cparam)
return -ENOMEM;
- mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam);
+ mlx5e_build_xsk_cparam(priv->mdev, params, xsk, cparam);
err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->xskrq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 05612d9c6080..c54fd01ea635 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -984,21 +984,41 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
queue_work(sa_entry->ipsec->wq, &work->work);
}
-static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
+static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct net *net = dev_net(x->xso.dev);
u64 packets, bytes, lastuse;
lockdep_assert(lockdep_is_held(&x->lock) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex));
+ lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
+ lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return;
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
+ x->stats.integrity_failed += packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
+
+ mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
+ }
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+ return;
+
mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
x->curlft.packets += packets;
x->curlft.bytes += bytes;
+
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
+ x->stats.replay += packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
+ }
}
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
@@ -1156,7 +1176,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
- .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
+ .xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
.xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index adaea3493193..7d943e93cf6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -137,7 +137,6 @@ struct mlx5e_ipsec_hw_stats {
struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_rx_drop_sp_alloc;
atomic64_t ipsec_rx_drop_sadb_miss;
- atomic64_t ipsec_rx_drop_syndrome;
atomic64_t ipsec_tx_drop_bundle;
atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 51a144246ea6..727fa7c18523 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -304,12 +304,6 @@ drop:
return false;
}
-enum {
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
-};
-
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
u32 ipsec_meta_data)
@@ -343,20 +337,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
-
- switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
- xo->status = CRYPTO_SUCCESS;
- break;
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
- xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
- break;
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
- xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
- break;
- default:
- atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
- }
+ xo->status = CRYPTO_SUCCESS;
}
int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata)
@@ -374,8 +355,6 @@ int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metada
return err;
}
- *metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED);
-
+ *metadata = ipsec_obj_id;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 2ed99772f168..82064614846f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -43,7 +43,6 @@
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
-#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24))
struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index e0e36a09721c..dd36b04e30a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -51,7 +51,6 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_syndrome) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_bundle) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 984fa04bd331..e3e57c849436 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -96,7 +96,7 @@ bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
{
u8 max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
- if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx))
+ if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx) || mlx5_get_sd(mdev))
return false;
/* Check the possibility to post the required ICOSQ WQEs. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index f11075e67658..adc6d8ea0960 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -11,6 +11,7 @@
#ifdef CONFIG_MLX5_EN_TLS
#include "lib/crypto.h"
+#include "lib/mlx5.h"
struct mlx5_crypto_dek *mlx5_ktls_create_key(struct mlx5_crypto_dek_pool *dek_pool,
struct tls_crypto_info *crypto_info);
@@ -61,7 +62,8 @@ void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_
static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev)
{
- return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx);
+ return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx) &&
+ !mlx5_get_sd(mdev);
}
bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 9b597cb24598..65ccb33edafb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -267,7 +267,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
goto err_out;
}
- pdev = mlx5_core_dma_dev(sq->channel->priv->mdev);
+ pdev = mlx5_core_dma_dev(sq->channel->mdev);
buf->dma_addr = dma_map_single(pdev, &buf->progress,
PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
@@ -425,14 +425,12 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
{
struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;
struct mlx5e_ktls_offload_context_rx *priv_rx;
- struct mlx5e_ktls_rx_resync_ctx *resync;
u8 tracker_state, auth_state, *ctx;
struct device *dev;
u32 hw_seq;
priv_rx = buf->priv_rx;
- resync = &priv_rx->resync;
- dev = mlx5_core_dma_dev(resync->priv->mdev);
+ dev = mlx5_core_dma_dev(sq->channel->mdev);
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index d4ebd8743114..b2cabd6ab86c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -310,9 +310,9 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
-static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
- struct mlx5e_macsec_sa *sa,
- bool is_tx, struct net_device *netdev, u32 fs_id)
+static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa, bool is_tx,
+ struct net_device *netdev, u32 fs_id)
{
int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
@@ -322,20 +322,49 @@ static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
fs_id);
- mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
sa->macsec_rule = NULL;
}
+static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa, bool is_tx,
+ struct net_device *netdev, u32 fs_id)
+{
+ mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id);
+ mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
+}
+
+static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
+ struct mlx5e_macsec_sa *sa, bool encrypt,
+ bool is_tx, u32 *fs_id)
+{
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
+ struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
+ struct mlx5_macsec_rule_attrs rule_attrs;
+ union mlx5_macsec_rule *macsec_rule;
+
+ rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+ rule_attrs.sci = sa->sci;
+ rule_attrs.assoc_num = sa->assoc_num;
+ rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id);
+ if (!macsec_rule)
+ return -ENOMEM;
+
+ sa->macsec_rule = macsec_rule;
+
+ return 0;
+}
+
static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
struct mlx5e_macsec_sa *sa,
bool encrypt, bool is_tx, u32 *fs_id)
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec *macsec = priv->macsec;
- struct mlx5_macsec_rule_attrs rule_attrs;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_macsec_obj_attrs obj_attrs;
- union mlx5_macsec_rule *macsec_rule;
int err;
obj_attrs.next_pn = sa->next_pn;
@@ -357,20 +386,12 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
if (err)
return err;
- rule_attrs.macsec_obj_id = sa->macsec_obj_id;
- rule_attrs.sci = sa->sci;
- rule_attrs.assoc_num = sa->assoc_num;
- rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
- MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
-
- macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id);
- if (!macsec_rule) {
- err = -ENOMEM;
- goto destroy_macsec_object;
+ if (sa->active) {
+ err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id);
+ if (err)
+ goto destroy_macsec_object;
}
- sa->macsec_rule = macsec_rule;
-
return 0;
destroy_macsec_object:
@@ -526,9 +547,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
goto destroy_sa;
macsec_device->tx_sa[assoc_num] = tx_sa;
- if (!secy->operational ||
- assoc_num != tx_sc->encoding_sa ||
- !tx_sa->active)
+ if (!secy->operational)
goto out;
err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
@@ -595,7 +614,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
if (ctx_tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
} else {
@@ -604,7 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
out:
mutex_unlock(&macsec->lock);
@@ -1030,8 +1049,9 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
- rx_sc->sc_xarray_element->fs_id);
+ if (rx_sa->active)
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
rx_sc->rx_sa[assoc_num] = NULL;
@@ -1112,8 +1132,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
if (!rx_sa || !rx_sa->macsec_rule)
continue;
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
- rx_sc->sc_xarray_element->fs_id);
+ mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
}
}
@@ -1124,8 +1144,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
continue;
if (rx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false,
- &rx_sc->sc_xarray_element->fs_id);
+ err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false,
+ &rx_sc->sc_xarray_element->fs_id);
if (err)
goto out;
}
@@ -1178,7 +1198,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
if (!tx_sa)
continue;
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
for (i = 0; i < MACSEC_NUM_AN; ++i) {
@@ -1187,7 +1207,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
continue;
if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index e66f486faafe..c7f542d0b8f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/rps.h>
#include "en.h"
#define ARFS_HASH_SHIFT BITS_PER_BYTE
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index c8e8f512803e..91848eae4565 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -70,6 +70,7 @@
#include "qos.h"
#include "en/trap.h"
#include "lib/devcom.h"
+#include "lib/sd.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
@@ -1024,7 +1025,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5_wq_destroy(&rq->wq_ctrl);
}
-int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter)
{
struct mlx5_core_dev *mdev = rq->mdev;
u8 ts_format;
@@ -1051,6 +1052,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, ts_format, ts_format);
+ MLX5_SET(rqc, rqc, counter_set_id, q_counter);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
@@ -1274,7 +1276,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
- struct mlx5e_xsk_param *xsk, int node,
+ struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
struct mlx5e_rq *rq)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -1287,7 +1289,7 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (err)
return err;
- err = mlx5e_create_rq(rq, param);
+ err = mlx5e_create_rq(rq, param, q_counter);
if (err)
goto err_free_rq;
@@ -1806,6 +1808,7 @@ void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
+ netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, sq->cq.napi);
}
void mlx5e_tx_disable_queue(struct netdev_queue *txq)
@@ -1819,6 +1822,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
+ netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, NULL);
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
@@ -2333,13 +2337,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_rq_param *rq_params)
{
+ u16 q_counter = c->priv->q_counter[c->sd_ix];
int err;
err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq);
if (err)
return err;
- return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), &c->rq);
+ return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), q_counter, &c->rq);
}
static int mlx5e_open_queues(struct mlx5e_channel *c,
@@ -2526,14 +2531,20 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
- int cpu = mlx5_comp_vector_get_cpu(priv->mdev, ix);
struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
unsigned int irq;
+ int vec_ix;
+ int cpu;
int err;
- err = mlx5_comp_irqn_get(priv->mdev, ix, &irq);
+ mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
+ vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
+ cpu = mlx5_comp_vector_get_cpu(mdev, vec_ix);
+
+ err = mlx5_comp_irqn_get(mdev, vec_ix, &irq);
if (err)
return err;
@@ -2546,20 +2557,23 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
return -ENOMEM;
c->priv = priv;
- c->mdev = priv->mdev;
+ c->mdev = mdev;
c->tstamp = &priv->tstamp;
c->ix = ix;
+ c->vec_ix = vec_ix;
+ c->sd_ix = mlx5_sd_ch_ix_get_dev_ix(mdev, ix);
c->cpu = cpu;
- c->pdev = mlx5_core_dma_dev(priv->mdev);
+ c->pdev = mlx5_core_dma_dev(mdev);
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
+ c->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix]->ch;
c->aff_mask = irq_get_effective_affinity_mask(irq);
- c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
+ c->lag_port = mlx5e_enumerate_lag_port(mdev, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll);
+ netif_napi_set_irq(&c->napi, irq);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
@@ -2602,12 +2616,16 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_xsk(c);
else
mlx5e_activate_rq(&c->rq);
+
+ netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
{
int tc;
+ netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, NULL);
+
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_deactivate_xsk(c);
else
@@ -2647,7 +2665,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
if (!chs->c || !cparam)
goto err_free;
- err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam);
+ err = mlx5e_build_channel_param(priv->mdev, &chs->params, cparam);
if (err)
goto err_free;
@@ -2935,15 +2953,18 @@ static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- int num_comp_vectors, ix, irq;
-
- num_comp_vectors = mlx5_comp_vectors_max(mdev);
+ int ix;
for (ix = 0; ix < params->num_channels; ix++) {
+ int num_comp_vectors, irq, vec_ix;
+ struct mlx5_core_dev *mdev;
+
+ mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
+ num_comp_vectors = mlx5_comp_vectors_max(mdev);
cpumask_clear(priv->scratchpad.cpumask);
+ vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
- for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
+ for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) {
int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
@@ -3335,7 +3356,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_cq *cq = &drop_rq->cq;
int err;
- mlx5e_build_drop_rq_param(mdev, priv->drop_rq_q_counter, &rq_param);
+ mlx5e_build_drop_rq_param(mdev, &rq_param);
err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
if (err)
@@ -3349,7 +3370,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
if (err)
goto err_destroy_cq;
- err = mlx5e_create_rq(drop_rq, &rq_param);
+ err = mlx5e_create_rq(drop_rq, &rq_param, priv->drop_rq_q_counter);
if (err)
goto err_free_rq;
@@ -5264,13 +5285,17 @@ void mlx5e_create_q_counters(struct mlx5e_priv *priv)
u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
- int err;
+ struct mlx5_core_dev *pos;
+ int err, i;
MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
- err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
- if (!err)
- priv->q_counter =
- MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+
+ mlx5_sd_for_each_dev(i, mdev, pos) {
+ err = mlx5_cmd_exec_inout(pos, alloc_q_counter, in, out);
+ if (!err)
+ priv->q_counter[i] =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ }
err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
if (!err)
@@ -5281,13 +5306,17 @@ void mlx5e_create_q_counters(struct mlx5e_priv *priv)
void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
+ struct mlx5_core_dev *pos;
+ int i;
MLX5_SET(dealloc_q_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
- if (priv->q_counter) {
- MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
- priv->q_counter);
- mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ if (priv->q_counter[i]) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ priv->q_counter[i]);
+ mlx5_cmd_exec_in(pos, dealloc_q_counter, in);
+ }
}
if (priv->drop_rq_q_counter) {
@@ -5371,6 +5400,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
features = MLX5E_RX_RES_FEATURE_PTP;
if (mlx5_tunnel_inner_ft_supported(mdev))
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
+ if (mlx5_get_sd(priv->mdev))
+ features |= MLX5E_RX_RES_FEATURE_MULTI_VHCA;
priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch, priv->drop_rq.rqn,
&priv->channels.params.packet_merge,
@@ -5980,28 +6011,52 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
free_netdev(netdev);
}
-static int mlx5e_resume(struct auxiliary_device *adev)
+static int _mlx5e_resume(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = edev->mdev;
- int err;
+ struct mlx5_core_dev *pos, *to;
+ int err, i;
if (netif_device_present(netdev))
return 0;
- err = mlx5e_create_mdev_resources(mdev, true);
- if (err)
- return err;
+ mlx5_sd_for_each_dev(i, mdev, pos) {
+ err = mlx5e_create_mdev_resources(pos, true);
+ if (err)
+ goto err_destroy_mdev_res;
+ }
err = mlx5e_attach_netdev(priv);
- if (err) {
- mlx5e_destroy_mdev_resources(mdev);
+ if (err)
+ goto err_destroy_mdev_res;
+
+ return 0;
+
+err_destroy_mdev_res:
+ to = pos;
+ mlx5_sd_for_each_dev_to(i, mdev, to, pos)
+ mlx5e_destroy_mdev_resources(pos);
+ return err;
+}
+
+static int mlx5e_resume(struct auxiliary_device *adev)
+{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct auxiliary_device *actual_adev;
+ int err;
+
+ err = mlx5_sd_init(mdev);
+ if (err)
return err;
- }
+ actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
+ if (actual_adev)
+ return _mlx5e_resume(actual_adev);
return 0;
}
@@ -6011,21 +6066,36 @@ static int _mlx5e_suspend(struct auxiliary_device *adev)
struct mlx5e_priv *priv = mlx5e_dev->priv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_dev *pos;
+ int i;
if (!netif_device_present(netdev)) {
if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
- mlx5e_destroy_mdev_resources(mdev);
+ mlx5_sd_for_each_dev(i, mdev, pos)
+ mlx5e_destroy_mdev_resources(pos);
return -ENODEV;
}
mlx5e_detach_netdev(priv);
- mlx5e_destroy_mdev_resources(mdev);
+ mlx5_sd_for_each_dev(i, mdev, pos)
+ mlx5e_destroy_mdev_resources(pos);
+
return 0;
}
static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- return _mlx5e_suspend(adev);
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct auxiliary_device *actual_adev;
+ int err = 0;
+
+ actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
+ if (actual_adev)
+ err = _mlx5e_suspend(actual_adev);
+
+ mlx5_sd_cleanup(mdev);
+ return err;
}
static int _mlx5e_probe(struct auxiliary_device *adev)
@@ -6071,9 +6141,9 @@ static int _mlx5e_probe(struct auxiliary_device *adev)
goto err_destroy_netdev;
}
- err = mlx5e_resume(adev);
+ err = _mlx5e_resume(adev);
if (err) {
- mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err);
+ mlx5_core_err(mdev, "_mlx5e_resume failed, %d\n", err);
goto err_profile_cleanup;
}
@@ -6104,15 +6174,29 @@ err_devlink_unregister:
static int mlx5e_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
- return _mlx5e_probe(adev);
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct auxiliary_device *actual_adev;
+ int err;
+
+ err = mlx5_sd_init(mdev);
+ if (err)
+ return err;
+
+ actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
+ if (actual_adev)
+ return _mlx5e_probe(actual_adev);
+ return 0;
}
-static void mlx5e_remove(struct auxiliary_device *adev)
+static void _mlx5e_remove(struct auxiliary_device *adev)
{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
+ struct mlx5_core_dev *mdev = edev->mdev;
- mlx5_core_uplink_netdev_set(priv->mdev, NULL);
+ mlx5_core_uplink_netdev_set(mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
_mlx5e_suspend(adev);
@@ -6122,6 +6206,19 @@ static void mlx5e_remove(struct auxiliary_device *adev)
mlx5e_destroy_devlink(mlx5e_dev);
}
+static void mlx5e_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct auxiliary_device *actual_adev;
+
+ actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
+ if (actual_adev)
+ _mlx5e_remove(actual_adev);
+
+ mlx5_sd_cleanup(mdev);
+}
+
static const struct auxiliary_device_id mlx5e_id_table[] = {
{ .name = MLX5_ADEV_NAME ".eth", },
{},
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 4b96ad657145..f3d0898bdbc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -561,11 +561,23 @@ static const struct counter_desc drop_rq_stats_desc[] = {
#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
+static bool q_counter_any(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *pos;
+ int i;
+
+ mlx5_sd_for_each_dev(i, priv->mdev, pos)
+ if (priv->q_counter[i++])
+ return true;
+
+ return false;
+}
+
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
{
int num_stats = 0;
- if (priv->q_counter)
+ if (q_counter_any(priv))
num_stats += NUM_Q_COUNTERS;
if (priv->drop_rq_q_counter)
@@ -578,7 +590,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
{
int i;
- for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
q_stats_desc[i].format);
@@ -593,7 +605,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
{
int i;
- for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
q_stats_desc, i);
for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
@@ -607,18 +619,23 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
- int ret;
+ struct mlx5_core_dev *pos;
+ u32 rx_out_of_buffer = 0;
+ int ret, i;
MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
- if (priv->q_counter) {
- MLX5_SET(query_q_counter_in, in, counter_set_id,
- priv->q_counter);
- ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
- if (!ret)
- qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
- out, out_of_buffer);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ if (priv->q_counter[i]) {
+ MLX5_SET(query_q_counter_in, in, counter_set_id,
+ priv->q_counter[i]);
+ ret = mlx5_cmd_exec_inout(pos, query_q_counter, in, out);
+ if (!ret)
+ rx_out_of_buffer += MLX5_GET(query_q_counter_out,
+ out, out_of_buffer);
+ }
}
+ qcnt->rx_out_of_buffer = rx_out_of_buffer;
if (priv->drop_rq_q_counter) {
MLX5_SET(query_q_counter_in, in, counter_set_id,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9fb2c057bd78..31ed26cac9bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -766,7 +766,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
return err;
mlx5e_rss_params_indir_init_uniform(&indir, hp->num_channels);
- err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
+ err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, NULL, hp->num_channels,
mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
&indir);
@@ -1169,7 +1169,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
- params.q_counter = priv->q_counter;
+ params.q_counter = priv->q_counter[0];
err = devl_param_driverinit_value_get(
devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES, &val);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 5c166d9d2dca..2fa076b23fbe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -401,6 +401,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
+ /* ensure skb is put on metadata_map before tracking the index */
+ wmb();
mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
if (!netif_tx_queue_stopped(sq->txq) &&
mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 190f10aba170..5a0047bdcb51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -152,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
xa_for_each(&esw->offloads.vport_reps, i, rep) {
rpriv = rep->rep_data[REP_ETH].priv;
- if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems))
+ if (!rpriv || !rpriv->netdev)
continue;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index b0455134c98e..baaae628b0a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -535,21 +535,26 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
}
static bool
-esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest)
+esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest)
{
- bool vf_dest = false, pf_dest = false;
+ bool internal_dest = false, external_dest = false;
int i;
for (i = 0; i < max_dest; i++) {
- if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK)
continue;
- if (dests[i].vport.num == MLX5_VPORT_UPLINK)
- pf_dest = true;
+ /* Uplink dest is external, but considered as internal
+ * if there is reformat because firmware uses LB+hairpin to support it.
+ */
+ if (dests[i].vport.num == MLX5_VPORT_UPLINK &&
+ !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID))
+ external_dest = true;
else
- vf_dest = true;
+ internal_dest = true;
- if (vf_dest && pf_dest)
+ if (internal_dest && external_dest)
return true;
}
@@ -695,9 +700,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
/* Header rewrite with combined wire+loopback in FDB is not allowed */
if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
- esw_dests_to_vf_pf_vports(dest, i)) {
+ esw_dests_to_int_external(dest, i)) {
esw_warn(esw->dev,
- "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n");
+ "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n");
rule = ERR_PTR(-EINVAL);
goto err_esw_get;
}
@@ -3658,22 +3663,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
- struct net *devl_net, *netdev_net;
- bool ret = false;
-
- mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
- if (dev->mlx5e_res.uplink_netdev) {
- netdev_net = dev_net(dev->mlx5e_res.uplink_netdev);
- devl_net = devlink_net(devlink);
- ret = net_eq(devl_net, netdev_net);
- }
- mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
- return ret;
-}
-
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -3718,13 +3707,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
- !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
- return -EPERM;
- }
-
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 58f4c0d0fafa..e7faf7e73ca4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -366,18 +366,18 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
return -EIO;
}
- mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
+ mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED);
/* Loop until device state turns to disable */
end = jiffies + msecs_to_jiffies(delay_ms);
do {
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
cond_resched();
} while (!time_after(jiffies, end));
- if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
return -EIO;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index f27eab6e4929..2911aa34a5be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -703,19 +703,30 @@ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT);
mlx5_eq_notifier_register(dev, &fw_reset->nb);
}
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
{
- mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ if (!fw_reset)
+ return;
+
+ mlx5_eq_notifier_unregister(dev, &fw_reset->nb);
}
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
cancel_work_sync(&fw_reset->fw_live_patch_work);
cancel_work_sync(&fw_reset->reset_request_work);
@@ -733,9 +744,13 @@ static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{
- struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+ struct mlx5_fw_reset *fw_reset;
int err;
+ if (!MLX5_CAP_MCAM_REG(dev, mfrl))
+ return 0;
+
+ fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
if (!fw_reset)
return -ENOMEM;
fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events");
@@ -771,6 +786,9 @@ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
devl_params_unregister(priv_to_devlink(dev),
mlx5_fw_reset_devlink_params,
ARRAY_SIZE(mlx5_fw_reset_devlink_params));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 8ff6dc9bc803..ad38e31822df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -116,9 +116,9 @@ u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev)
return MLX5_SENSOR_PCI_COMM_ERR;
if (pci_channel_offline(dev->pdev))
return MLX5_SENSOR_PCI_ERR;
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
return MLX5_SENSOR_NIC_DISABLED;
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET)
return MLX5_SENSOR_NIC_SW_RESET;
if (sensor_fw_synd_rfr(dev))
return MLX5_SENSOR_FW_SYND_RFR;
@@ -185,7 +185,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
/* Write the NIC interface field to initiate the reset, the command
* interface address also resides here, don't overwrite it.
*/
- mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET);
+ mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET);
return true;
}
@@ -246,13 +246,13 @@ recover_from_sw_reset:
/* Recover from SW reset */
end = jiffies + msecs_to_jiffies(delay_ms);
do {
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
msleep(20);
} while (!time_after(jiffies, end));
- if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
}
@@ -272,26 +272,26 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
u8 nic_interface = mlx5_get_nic_state(dev);
switch (nic_interface) {
- case MLX5_NIC_IFC_FULL:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
break;
- case MLX5_NIC_IFC_DISABLED:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED:
mlx5_core_warn(dev, "starting teardown\n");
break;
- case MLX5_NIC_IFC_NO_DRAM_NIC:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
break;
- case MLX5_NIC_IFC_SW_RESET:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET:
/* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases:
* 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
* and this is a VF), this is not recoverable by SW reset.
* Logging of this is handled elsewhere.
* 2. FW reset has been issued by another function, driver can
* be reloaded to recover after the mode switches to
- * MLX5_NIC_IFC_DISABLED.
+ * MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED.
*/
if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
mlx5_core_warn(dev, "NIC SW reset in progress\n");
@@ -452,10 +452,10 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct health_buffer __iomem *h = health->health;
u8 synd = ioread8(&h->synd);
+ devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
if (!synd)
return 0;
- devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
return 0;
@@ -555,12 +555,17 @@ static void mlx5_fw_reporter_err_work(struct work_struct *work)
&fw_reporter_ctx);
}
-static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
+static const struct devlink_health_reporter_ops mlx5_fw_reporter_pf_ops = {
.name = "fw",
.diagnose = mlx5_fw_reporter_diagnose,
.dump = mlx5_fw_reporter_dump,
};
+static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = mlx5_fw_reporter_diagnose,
+};
+
static int
mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
void *priv_ctx,
@@ -646,12 +651,17 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
}
}
-static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
+static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_pf_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
.dump = mlx5_fw_fatal_reporter_dump,
};
+static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
+ .name = "fw_fatal",
+ .recover = mlx5_fw_fatal_reporter_recover,
+};
+
#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
@@ -659,10 +669,14 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
+ const struct devlink_health_reporter_ops *fw_fatal_ops;
struct mlx5_core_health *health = &dev->priv.health;
+ const struct devlink_health_reporter_ops *fw_ops;
struct devlink *devlink = priv_to_devlink(dev);
u64 grace_period;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
+ fw_ops = &mlx5_fw_reporter_pf_ops;
if (mlx5_core_is_ecpf(dev)) {
grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
} else if (mlx5_core_is_pf(dev)) {
@@ -670,18 +684,19 @@ void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
} else {
/* VF or SF */
grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_ops;
+ fw_ops = &mlx5_fw_reporter_ops;
}
health->fw_reporter =
- devl_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
- 0, dev);
+ devl_health_reporter_create(devlink, fw_ops, 0, dev);
if (IS_ERR(health->fw_reporter))
mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(health->fw_reporter));
health->fw_fatal_reporter =
devl_health_reporter_create(devlink,
- &mlx5_fw_fatal_reporter_ops,
+ fw_fatal_ops,
grace_period,
dev);
if (IS_ERR(health->fw_fatal_reporter))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index ec32b686f586..d58032dd0df7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -10,6 +10,7 @@ enum mlx5_devcom_component {
MLX5_DEVCOM_ESW_OFFLOADS,
MLX5_DEVCOM_MPV,
MLX5_DEVCOM_HCA_PORTS,
+ MLX5_DEVCOM_SD_GROUP,
MLX5_DEVCOM_NUM_COMPONENTS,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 2b5826a785c4..37d5f445598c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -54,4 +54,16 @@ static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *md
{
return mdev->mlx5e_res.uplink_netdev;
}
+
+struct mlx5_sd;
+
+static inline struct mlx5_sd *mlx5_get_sd(struct mlx5_core_dev *dev)
+{
+ return dev->sd;
+}
+
+static inline void mlx5_set_sd(struct mlx5_core_dev *dev, struct mlx5_sd *sd)
+{
+ dev->sd = sd;
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
new file mode 100644
index 000000000000..5b28084e8a03
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -0,0 +1,524 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "lib/sd.h"
+#include "mlx5_core.h"
+#include "lib/mlx5.h"
+#include "fs_cmd.h"
+#include <linux/mlx5/vport.h>
+#include <linux/debugfs.h>
+
+#define sd_info(__dev, format, ...) \
+ dev_info((__dev)->device, "Socket-Direct: " format, ##__VA_ARGS__)
+#define sd_warn(__dev, format, ...) \
+ dev_warn((__dev)->device, "Socket-Direct: " format, ##__VA_ARGS__)
+
+struct mlx5_sd {
+ u32 group_id;
+ u8 host_buses;
+ struct mlx5_devcom_comp_dev *devcom;
+ struct dentry *dfs;
+ bool primary;
+ union {
+ struct { /* primary */
+ struct mlx5_core_dev *secondaries[MLX5_SD_MAX_GROUP_SZ - 1];
+ struct mlx5_flow_table *tx_ft;
+ };
+ struct { /* secondary */
+ struct mlx5_core_dev *primary_dev;
+ u32 alias_obj_id;
+ };
+ };
+};
+
+static int mlx5_sd_get_host_buses(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+
+ if (!sd)
+ return 1;
+
+ return sd->host_buses;
+}
+
+static struct mlx5_core_dev *mlx5_sd_get_primary(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+
+ if (!sd)
+ return dev;
+
+ return sd->primary ? dev : sd->primary_dev;
+}
+
+struct mlx5_core_dev *
+mlx5_sd_primary_get_peer(struct mlx5_core_dev *primary, int idx)
+{
+ struct mlx5_sd *sd;
+
+ if (idx == 0)
+ return primary;
+
+ if (idx >= mlx5_sd_get_host_buses(primary))
+ return NULL;
+
+ sd = mlx5_get_sd(primary);
+ return sd->secondaries[idx - 1];
+}
+
+int mlx5_sd_ch_ix_get_dev_ix(struct mlx5_core_dev *dev, int ch_ix)
+{
+ return ch_ix % mlx5_sd_get_host_buses(dev);
+}
+
+int mlx5_sd_ch_ix_get_vec_ix(struct mlx5_core_dev *dev, int ch_ix)
+{
+ return ch_ix / mlx5_sd_get_host_buses(dev);
+}
+
+struct mlx5_core_dev *mlx5_sd_ch_ix_get_dev(struct mlx5_core_dev *primary, int ch_ix)
+{
+ int mdev_idx = mlx5_sd_ch_ix_get_dev_ix(primary, ch_ix);
+
+ return mlx5_sd_primary_get_peer(primary, mdev_idx);
+}
+
+static bool ft_create_alias_supported(struct mlx5_core_dev *dev)
+{
+ u64 obj_allowed = MLX5_CAP_GEN_2_64(dev, allowed_object_for_other_vhca_access);
+ u32 obj_supp = MLX5_CAP_GEN_2(dev, cross_vhca_object_to_object_supported);
+
+ if (!(obj_supp &
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_ROOT_TO_REMOTE_FLOW_TABLE))
+ return false;
+
+ if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE))
+ return false;
+
+ return true;
+}
+
+static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
+{
+ /* Feature is currently implemented for PFs only */
+ if (!mlx5_core_is_pf(dev))
+ return false;
+
+ /* Honor the SW implementation limit */
+ if (host_buses > MLX5_SD_MAX_GROUP_SZ)
+ return false;
+
+ /* Disconnect secondaries from the network */
+ if (!MLX5_CAP_GEN(dev, eswitch_manager))
+ return false;
+ if (!MLX5_CAP_GEN(dev, silent_mode))
+ return false;
+
+ /* RX steering from primary to secondaries */
+ if (!MLX5_CAP_GEN(dev, cross_vhca_rqt))
+ return false;
+ if (host_buses > MLX5_CAP_GEN_2(dev, max_rqt_vhca_id))
+ return false;
+
+ /* TX steering from secondaries to primary */
+ if (!ft_create_alias_supported(dev))
+ return false;
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default))
+ return false;
+
+ return true;
+}
+
+static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
+ u8 *host_buses, u8 *sd_group)
+{
+ u32 out[MLX5_ST_SZ_DW(mpir_reg)];
+ int err;
+
+ err = mlx5_query_mpir_reg(dev, out);
+ if (err)
+ return err;
+
+ err = mlx5_query_nic_vport_sd_group(dev, sd_group);
+ if (err)
+ return err;
+
+ *sdm = MLX5_GET(mpir_reg, out, sdm);
+ *host_buses = MLX5_GET(mpir_reg, out, host_buses);
+
+ return 0;
+}
+
+static u32 mlx5_sd_group_id(struct mlx5_core_dev *dev, u8 sd_group)
+{
+ return (u32)((MLX5_CAP_GEN(dev, native_port_num) << 8) | sd_group);
+}
+
+static int sd_init(struct mlx5_core_dev *dev)
+{
+ u8 host_buses, sd_group;
+ struct mlx5_sd *sd;
+ u32 group_id;
+ bool sdm;
+ int err;
+
+ if (!MLX5_CAP_MCAM_REG(dev, mpir))
+ return 0;
+
+ err = mlx5_query_sd(dev, &sdm, &host_buses, &sd_group);
+ if (err)
+ return err;
+
+ if (!sdm)
+ return 0;
+
+ if (!sd_group)
+ return 0;
+
+ group_id = mlx5_sd_group_id(dev, sd_group);
+
+ if (!mlx5_sd_is_supported(dev, host_buses)) {
+ sd_warn(dev, "can't support requested netdev combining for group id 0x%x), skipping\n",
+ group_id);
+ return 0;
+ }
+
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return -ENOMEM;
+
+ sd->host_buses = host_buses;
+ sd->group_id = group_id;
+
+ mlx5_set_sd(dev, sd);
+
+ return 0;
+}
+
+static void sd_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+
+ mlx5_set_sd(dev, NULL);
+ kfree(sd);
+}
+
+static int sd_register(struct mlx5_core_dev *dev)
+{
+ struct mlx5_devcom_comp_dev *devcom, *pos;
+ struct mlx5_core_dev *peer, *primary;
+ struct mlx5_sd *sd, *primary_sd;
+ int err, i;
+
+ sd = mlx5_get_sd(dev);
+ devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
+ sd->group_id, NULL, dev);
+ if (!devcom)
+ return -ENOMEM;
+
+ sd->devcom = devcom;
+
+ if (mlx5_devcom_comp_get_size(devcom) != sd->host_buses)
+ return 0;
+
+ mlx5_devcom_comp_lock(devcom);
+ mlx5_devcom_comp_set_ready(devcom, true);
+ mlx5_devcom_comp_unlock(devcom);
+
+ if (!mlx5_devcom_for_each_peer_begin(devcom)) {
+ err = -ENODEV;
+ goto err_devcom_unreg;
+ }
+
+ primary = dev;
+ mlx5_devcom_for_each_peer_entry(devcom, peer, pos)
+ if (peer->pdev->bus->number < primary->pdev->bus->number)
+ primary = peer;
+
+ primary_sd = mlx5_get_sd(primary);
+ primary_sd->primary = true;
+ i = 0;
+ /* loop the secondaries */
+ mlx5_devcom_for_each_peer_entry(primary_sd->devcom, peer, pos) {
+ struct mlx5_sd *peer_sd = mlx5_get_sd(peer);
+
+ primary_sd->secondaries[i++] = peer;
+ peer_sd->primary = false;
+ peer_sd->primary_dev = primary;
+ }
+
+ mlx5_devcom_for_each_peer_end(devcom);
+ return 0;
+
+err_devcom_unreg:
+ mlx5_devcom_comp_lock(sd->devcom);
+ mlx5_devcom_comp_set_ready(sd->devcom, false);
+ mlx5_devcom_comp_unlock(sd->devcom);
+ mlx5_devcom_unregister_component(sd->devcom);
+ return err;
+}
+
+static void sd_unregister(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+
+ mlx5_devcom_comp_lock(sd->devcom);
+ mlx5_devcom_comp_set_ready(sd->devcom, false);
+ mlx5_devcom_comp_unlock(sd->devcom);
+ mlx5_devcom_unregister_component(sd->devcom);
+}
+
+static int sd_cmd_set_primary(struct mlx5_core_dev *primary, u8 *alias_key)
+{
+ struct mlx5_cmd_allow_other_vhca_access_attr allow_attr = {};
+ struct mlx5_sd *sd = mlx5_get_sd(primary);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *nic_ns;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ nic_ns = mlx5_get_flow_namespace(primary, MLX5_FLOW_NAMESPACE_EGRESS);
+ if (!nic_ns)
+ return -EOPNOTSUPP;
+
+ ft = mlx5_create_flow_table(nic_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ return err;
+ }
+ sd->tx_ft = ft;
+ memcpy(allow_attr.access_key, alias_key, ACCESS_KEY_LEN);
+ allow_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
+ allow_attr.obj_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id;
+
+ err = mlx5_cmd_allow_other_vhca_access(primary, &allow_attr);
+ if (err) {
+ mlx5_core_err(primary, "Failed to allow other vhca access err=%d\n",
+ err);
+ mlx5_destroy_flow_table(ft);
+ return err;
+ }
+
+ return 0;
+}
+
+static void sd_cmd_unset_primary(struct mlx5_core_dev *primary)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(primary);
+
+ mlx5_destroy_flow_table(sd->tx_ft);
+}
+
+static int sd_secondary_create_alias_ft(struct mlx5_core_dev *secondary,
+ struct mlx5_core_dev *primary,
+ struct mlx5_flow_table *ft,
+ u32 *obj_id, u8 *alias_key)
+{
+ u32 aliased_object_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id;
+ u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(primary, vhca_id);
+ struct mlx5_cmd_alias_obj_create_attr alias_attr = {};
+ int ret;
+
+ memcpy(alias_attr.access_key, alias_key, ACCESS_KEY_LEN);
+ alias_attr.obj_id = aliased_object_id;
+ alias_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
+ alias_attr.vhca_id = vhca_id_to_be_accessed;
+ ret = mlx5_cmd_alias_obj_create(secondary, &alias_attr, obj_id);
+ if (ret) {
+ mlx5_core_err(secondary, "Failed to create alias object err=%d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sd_secondary_destroy_alias_ft(struct mlx5_core_dev *secondary)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(secondary);
+
+ mlx5_cmd_alias_obj_destroy(secondary, sd->alias_obj_id,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
+}
+
+static int sd_cmd_set_secondary(struct mlx5_core_dev *secondary,
+ struct mlx5_core_dev *primary,
+ u8 *alias_key)
+{
+ struct mlx5_sd *primary_sd = mlx5_get_sd(primary);
+ struct mlx5_sd *sd = mlx5_get_sd(secondary);
+ int err;
+
+ err = mlx5_fs_cmd_set_l2table_entry_silent(secondary, 1);
+ if (err)
+ return err;
+
+ err = sd_secondary_create_alias_ft(secondary, primary, primary_sd->tx_ft,
+ &sd->alias_obj_id, alias_key);
+ if (err)
+ goto err_unset_silent;
+
+ err = mlx5_fs_cmd_set_tx_flow_table_root(secondary, sd->alias_obj_id, false);
+ if (err)
+ goto err_destroy_alias_ft;
+
+ return 0;
+
+err_destroy_alias_ft:
+ sd_secondary_destroy_alias_ft(secondary);
+err_unset_silent:
+ mlx5_fs_cmd_set_l2table_entry_silent(secondary, 0);
+ return err;
+}
+
+static void sd_cmd_unset_secondary(struct mlx5_core_dev *secondary)
+{
+ mlx5_fs_cmd_set_tx_flow_table_root(secondary, 0, true);
+ sd_secondary_destroy_alias_ft(secondary);
+ mlx5_fs_cmd_set_l2table_entry_silent(secondary, 0);
+}
+
+static void sd_print_group(struct mlx5_core_dev *primary)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(primary);
+ struct mlx5_core_dev *pos;
+ int i;
+
+ sd_info(primary, "group id %#x, primary %s, vhca %#x\n",
+ sd->group_id, pci_name(primary->pdev),
+ MLX5_CAP_GEN(primary, vhca_id));
+ mlx5_sd_for_each_secondary(i, primary, pos)
+ sd_info(primary, "group id %#x, secondary_%d %s, vhca %#x\n",
+ sd->group_id, i - 1, pci_name(pos->pdev),
+ MLX5_CAP_GEN(pos, vhca_id));
+}
+
+static ssize_t dev_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct mlx5_core_dev *dev;
+ char tbuf[32];
+ int ret;
+
+ dev = filp->private_data;
+ ret = snprintf(tbuf, sizeof(tbuf), "%s vhca %#x\n", pci_name(dev->pdev),
+ MLX5_CAP_GEN(dev, vhca_id));
+
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static const struct file_operations dev_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = dev_read,
+};
+
+int mlx5_sd_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_dev *primary, *pos, *to;
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+ u8 alias_key[ACCESS_KEY_LEN];
+ int err, i;
+
+ err = sd_init(dev);
+ if (err)
+ return err;
+
+ sd = mlx5_get_sd(dev);
+ if (!sd)
+ return 0;
+
+ err = sd_register(dev);
+ if (err)
+ goto err_sd_cleanup;
+
+ if (!mlx5_devcom_comp_is_ready(sd->devcom))
+ return 0;
+
+ primary = mlx5_sd_get_primary(dev);
+
+ for (i = 0; i < ACCESS_KEY_LEN; i++)
+ alias_key[i] = get_random_u8();
+
+ err = sd_cmd_set_primary(primary, alias_key);
+ if (err)
+ goto err_sd_unregister;
+
+ sd->dfs = debugfs_create_dir("multi-pf", mlx5_debugfs_get_dev_root(primary));
+ debugfs_create_x32("group_id", 0400, sd->dfs, &sd->group_id);
+ debugfs_create_file("primary", 0400, sd->dfs, primary, &dev_fops);
+
+ mlx5_sd_for_each_secondary(i, primary, pos) {
+ char name[32];
+
+ err = sd_cmd_set_secondary(pos, primary, alias_key);
+ if (err)
+ goto err_unset_secondaries;
+
+ snprintf(name, sizeof(name), "secondary_%d", i - 1);
+ debugfs_create_file(name, 0400, sd->dfs, pos, &dev_fops);
+
+ }
+
+ sd_info(primary, "group id %#x, size %d, combined\n",
+ sd->group_id, mlx5_devcom_comp_get_size(sd->devcom));
+ sd_print_group(primary);
+
+ return 0;
+
+err_unset_secondaries:
+ to = pos;
+ mlx5_sd_for_each_secondary_to(i, primary, to, pos)
+ sd_cmd_unset_secondary(pos);
+ sd_cmd_unset_primary(primary);
+ debugfs_remove_recursive(sd->dfs);
+err_sd_unregister:
+ sd_unregister(dev);
+err_sd_cleanup:
+ sd_cleanup(dev);
+ return err;
+}
+
+void mlx5_sd_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+ struct mlx5_core_dev *primary, *pos;
+ int i;
+
+ if (!sd)
+ return;
+
+ if (!mlx5_devcom_comp_is_ready(sd->devcom))
+ goto out;
+
+ primary = mlx5_sd_get_primary(dev);
+ mlx5_sd_for_each_secondary(i, primary, pos)
+ sd_cmd_unset_secondary(pos);
+ sd_cmd_unset_primary(primary);
+ debugfs_remove_recursive(sd->dfs);
+
+ sd_info(primary, "group id %#x, uncombined\n", sd->group_id);
+out:
+ sd_unregister(dev);
+ sd_cleanup(dev);
+}
+
+struct auxiliary_device *mlx5_sd_get_adev(struct mlx5_core_dev *dev,
+ struct auxiliary_device *adev,
+ int idx)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+ struct mlx5_core_dev *primary;
+
+ if (!sd)
+ return adev;
+
+ if (!mlx5_devcom_comp_is_ready(sd->devcom))
+ return NULL;
+
+ primary = mlx5_sd_get_primary(dev);
+ if (dev == primary)
+ return adev;
+
+ return &primary->priv.adev[idx]->adev;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h
new file mode 100644
index 000000000000..137efaf9aabc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_SD_H__
+#define __MLX5_LIB_SD_H__
+
+#define MLX5_SD_MAX_GROUP_SZ 2
+
+struct mlx5_sd;
+
+struct mlx5_core_dev *mlx5_sd_primary_get_peer(struct mlx5_core_dev *primary, int idx);
+int mlx5_sd_ch_ix_get_dev_ix(struct mlx5_core_dev *dev, int ch_ix);
+int mlx5_sd_ch_ix_get_vec_ix(struct mlx5_core_dev *dev, int ch_ix);
+struct mlx5_core_dev *mlx5_sd_ch_ix_get_dev(struct mlx5_core_dev *primary, int ch_ix);
+struct auxiliary_device *mlx5_sd_get_adev(struct mlx5_core_dev *dev,
+ struct auxiliary_device *adev,
+ int idx);
+
+int mlx5_sd_init(struct mlx5_core_dev *dev);
+void mlx5_sd_cleanup(struct mlx5_core_dev *dev);
+
+#define mlx5_sd_for_each_dev_from_to(i, primary, ix_from, to, pos) \
+ for (i = ix_from; \
+ (pos = mlx5_sd_primary_get_peer(primary, i)) && pos != (to); i++)
+
+#define mlx5_sd_for_each_dev(i, primary, pos) \
+ mlx5_sd_for_each_dev_from_to(i, primary, 0, NULL, pos)
+
+#define mlx5_sd_for_each_dev_to(i, primary, to, pos) \
+ mlx5_sd_for_each_dev_from_to(i, primary, 0, to, pos)
+
+#define mlx5_sd_for_each_secondary(i, primary, pos) \
+ mlx5_sd_for_each_dev_from_to(i, primary, 1, NULL, pos)
+
+#define mlx5_sd_for_each_secondary_to(i, primary, to, pos) \
+ mlx5_sd_for_each_dev_from_to(i, primary, 1, to, pos)
+
+#endif /* __MLX5_LIB_SD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index bccf6e53556c..c2593625c09a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -187,31 +187,36 @@ static struct mlx5_profile profile[] = {
};
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
- u32 warn_time_mili)
+ u32 warn_time_mili, const char *init_state)
{
unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
u32 fw_initializing;
- int err = 0;
do {
fw_initializing = ioread32be(&dev->iseg->initializing);
if (!(fw_initializing >> 31))
break;
- if (time_after(jiffies, end) ||
- test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
- err = -EBUSY;
- break;
+ if (time_after(jiffies, end)) {
+ mlx5_core_err(dev, "Firmware over %u MS in %s state, aborting\n",
+ max_wait_mili, init_state);
+ return -ETIMEDOUT;
+ }
+ if (test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
+ mlx5_core_warn(dev, "device is being removed, stop waiting for FW %s\n",
+ init_state);
+ return -ENODEV;
}
if (warn_time_mili && time_after(jiffies, warn)) {
- mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n",
- jiffies_to_msecs(end - warn) / 1000, fw_initializing);
+ mlx5_core_warn(dev, "Waiting for FW %s, timeout abort in %ds (0x%x)\n",
+ init_state, jiffies_to_msecs(end - warn) / 1000,
+ fw_initializing);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
} while (true);
- return err;
+ return 0;
}
static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
@@ -1151,12 +1156,10 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, timeout,
- mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
- if (err) {
- mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
- timeout);
+ mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL),
+ "pre-initializing");
+ if (err)
return err;
- }
err = mlx5_cmd_enable(dev);
if (err) {
@@ -1166,12 +1169,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
mlx5_tout_query_iseg(dev);
- err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0);
- if (err) {
- mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n",
- mlx5_tout_ms(dev, FW_INIT));
+ err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0, "initializing");
+ if (err)
goto err_cmd_cleanup;
- }
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a79b7959361b..58732f44940f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -312,13 +312,6 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
return ret;
}
-enum {
- MLX5_NIC_IFC_FULL = 0,
- MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2,
- MLX5_NIC_IFC_SW_RESET = 7
-};
-
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index c93492b67788..99219ea52c4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -74,7 +74,8 @@ static void mlx5_sf_dev_release(struct device *device)
kfree(sf_dev);
}
-static void mlx5_sf_dev_remove(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev)
+static void mlx5_sf_dev_remove_aux(struct mlx5_core_dev *dev,
+ struct mlx5_sf_dev *sf_dev)
{
int id;
@@ -138,7 +139,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
return;
xa_err:
- mlx5_sf_dev_remove(dev, sf_dev);
+ mlx5_sf_dev_remove_aux(dev, sf_dev);
add_err:
mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
sf_index, sfnum, err);
@@ -149,7 +150,7 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
xa_erase(&table->devices, sf_index);
- mlx5_sf_dev_remove(dev, sf_dev);
+ mlx5_sf_dev_remove_aux(dev, sf_dev);
}
static int
@@ -367,7 +368,7 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
xa_for_each(&table->devices, index, sf_dev) {
xa_erase(&table->devices, index);
- mlx5_sf_dev_remove(table->dev, sf_dev);
+ mlx5_sf_dev_remove_aux(table->dev, sf_dev);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 169c2c68ed5c..bc863e1f062e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -95,24 +95,29 @@ mdev_err:
static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
+ struct mlx5_core_dev *mdev = sf_dev->mdev;
+ struct devlink *devlink;
- mlx5_drain_health_wq(sf_dev->mdev);
+ devlink = priv_to_devlink(mdev);
+ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_drain_health_wq(mdev);
devlink_unregister(devlink);
- if (mlx5_dev_is_lightweight(sf_dev->mdev))
- mlx5_uninit_one_light(sf_dev->mdev);
+ if (mlx5_dev_is_lightweight(mdev))
+ mlx5_uninit_one_light(mdev);
else
- mlx5_uninit_one(sf_dev->mdev);
- iounmap(sf_dev->mdev->iseg);
- mlx5_mdev_uninit(sf_dev->mdev);
+ mlx5_uninit_one(mdev);
+ iounmap(mdev->iseg);
+ mlx5_mdev_uninit(mdev);
mlx5_devlink_free(devlink);
}
static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ struct mlx5_core_dev *mdev = sf_dev->mdev;
- mlx5_unload_one(sf_dev->mdev, false);
+ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_unload_one(mdev, false);
}
static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 7e36e1062139..64f4cc284aea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -54,6 +54,107 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE = 3425,
};
+static struct mlx5dr_dbg_dump_buff *
+mlx5dr_dbg_dump_data_init_new_buff(struct mlx5dr_dbg_dump_data *dump_data)
+{
+ struct mlx5dr_dbg_dump_buff *new_buff;
+
+ new_buff = kzalloc(sizeof(*new_buff), GFP_KERNEL);
+ if (!new_buff)
+ return NULL;
+
+ new_buff->buff = kvzalloc(MLX5DR_DEBUG_DUMP_BUFF_SIZE, GFP_KERNEL);
+ if (!new_buff->buff) {
+ kfree(new_buff);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&new_buff->node);
+ list_add_tail(&new_buff->node, &dump_data->buff_list);
+
+ return new_buff;
+}
+
+static struct mlx5dr_dbg_dump_data *
+mlx5dr_dbg_create_dump_data(void)
+{
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ dump_data = kzalloc(sizeof(*dump_data), GFP_KERNEL);
+ if (!dump_data)
+ return NULL;
+
+ INIT_LIST_HEAD(&dump_data->buff_list);
+
+ if (!mlx5dr_dbg_dump_data_init_new_buff(dump_data)) {
+ kfree(dump_data);
+ return NULL;
+ }
+
+ return dump_data;
+}
+
+static void
+mlx5dr_dbg_destroy_dump_data(struct mlx5dr_dbg_dump_data *dump_data)
+{
+ struct mlx5dr_dbg_dump_buff *dump_buff, *tmp_buff;
+
+ if (!dump_data)
+ return;
+
+ list_for_each_entry_safe(dump_buff, tmp_buff, &dump_data->buff_list, node) {
+ kvfree(dump_buff->buff);
+ list_del(&dump_buff->node);
+ kfree(dump_buff);
+ }
+
+ kfree(dump_data);
+}
+
+static int
+mlx5dr_dbg_dump_data_print(struct seq_file *file, char *str, u32 size)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+ struct mlx5dr_dbg_dump_buff *buff;
+ u32 buff_capacity, write_size;
+ int remain_size, ret;
+
+ if (size >= MLX5DR_DEBUG_DUMP_BUFF_SIZE)
+ return -EINVAL;
+
+ dump_data = dmn->dump_info.dump_data;
+ buff = list_last_entry(&dump_data->buff_list,
+ struct mlx5dr_dbg_dump_buff, node);
+
+ buff_capacity = (MLX5DR_DEBUG_DUMP_BUFF_SIZE - 1) - buff->index;
+ remain_size = buff_capacity - size;
+ write_size = (remain_size > 0) ? size : buff_capacity;
+
+ if (likely(write_size)) {
+ ret = snprintf(buff->buff + buff->index, write_size + 1, "%s", str);
+ if (ret < 0)
+ return ret;
+
+ buff->index += write_size;
+ }
+
+ if (remain_size < 0) {
+ remain_size *= -1;
+ buff = mlx5dr_dbg_dump_data_init_new_buff(dump_data);
+ if (!buff)
+ return -ENOMEM;
+
+ ret = snprintf(buff->buff, remain_size + 1, "%s", str + write_size);
+ if (ret < 0)
+ return ret;
+
+ buff->index += remain_size;
+ }
+
+ return 0;
+}
+
void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
{
mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
@@ -109,36 +210,68 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 hit_tbl_ptr, miss_tbl_ptr;
u32 hit_tbl_id, miss_tbl_id;
+ int ret;
switch (action->action_type) {
case DR_ACTION_TYP_DROP:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_DROP, action_id, rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DROP, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_FT:
if (action->dest_tbl->is_fw_tbl)
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->fw_tbl.id,
- -1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->fw_tbl.id,
+ -1);
else
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->tbl->table_id,
- DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->tbl->table_id,
+ DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
+
+ if (ret < 0)
+ return ret;
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_CTR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
- action->ctr->ctr_id + action->ctr->offset);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
+ action->ctr->ctr_id + action->ctr->offset);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TAG:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
- action->flow_tag->flow_tag);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
+ action->flow_tag->flow_tag);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_MODIFY_HDR:
{
@@ -150,83 +283,171 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
ptrn_arg = !action->rewrite->single_action_opt && ptrn && arg;
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
- DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
- rule_id, action->rewrite->index,
- action->rewrite->single_action_opt,
- ptrn_arg ? action->rewrite->num_of_actions : 0,
- ptrn_arg ? ptrn->index : 0,
- ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
+ DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
+ rule_id, action->rewrite->index,
+ action->rewrite->single_action_opt,
+ ptrn_arg ? action->rewrite->num_of_actions : 0,
+ ptrn_arg ? ptrn->index : 0,
+ ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (ptrn_arg) {
for (i = 0; i < action->rewrite->num_of_actions; i++) {
- seq_printf(file, ",0x%016llx",
- be64_to_cpu(((__be64 *)rewrite_data)[i]));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ ",0x%016llx",
+ be64_to_cpu(((__be64 *)rewrite_data)[i]));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
}
}
- seq_puts(file, "\n");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "\n");
+ if (ret < 0)
+ return ret;
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
}
case DR_ACTION_TYP_VPORT:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
- action->vport->caps->num);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
+ action->vport->caps->num);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
- rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
- rule_id,
- (action->rewrite->ptrn && action->rewrite->arg) ?
- mlx5dr_arg_get_obj_id(action->rewrite->arg) :
- action->rewrite->index);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
+ rule_id,
+ (action->rewrite->ptrn && action->rewrite->arg) ?
+ mlx5dr_arg_get_obj_id(action->rewrite->arg) :
+ action->rewrite->index);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
- rule_id, action->reformat->id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
+ rule_id, action->reformat->id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_L2_TO_TNL_L3:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
- rule_id, action->reformat->id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
+ rule_id, action->reformat->id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_POP_VLAN:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
- rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_PUSH_VLAN:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
- rule_id, action->push_vlan->vlan_hdr);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
+ rule_id, action->push_vlan->vlan_hdr);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_INSERT_HDR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
- rule_id, action->reformat->id,
- action->reformat->param_0,
- action->reformat->param_1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_REMOVE_HDR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
- rule_id, action->reformat->id,
- action->reformat->param_0,
- action->reformat->param_1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_SAMPLER:
- seq_printf(file,
- "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id, rule_id,
- 0, 0, action->sampler->sampler_id,
- action->sampler->rx_icm_addr,
- action->sampler->tx_icm_addr);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id,
+ rule_id, 0, 0, action->sampler->sampler_id,
+ action->sampler->rx_icm_addr,
+ action->sampler->tx_icm_addr);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_RANGE:
if (action->range->hit_tbl_action->dest_tbl->is_fw_tbl) {
@@ -247,10 +468,17 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
DR_DBG_PTR_TO_ID(action->range->miss_tbl_action->dest_tbl->tbl);
}
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id, rule_id,
- hit_tbl_id, hit_tbl_ptr, miss_tbl_id, miss_tbl_ptr,
- action->range->definer_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id,
+ rule_id, hit_tbl_id, hit_tbl_ptr, miss_tbl_id,
+ miss_tbl_ptr, action->range->definer_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
default:
return 0;
@@ -263,8 +491,10 @@ static int
dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
bool is_rx, const u64 rule_id, u8 format_ver)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char hw_ste_dump[DR_HEX_SIZE];
u32 mem_rec_type;
+ int ret;
if (format_ver == MLX5_STEERING_FORMAT_CONNECTX_5) {
mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 :
@@ -277,9 +507,16 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste),
DR_STE_SIZE_REDUCED);
- seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
- dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
- hw_ste_dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
+ dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)),
+ rule_id, hw_ste_dump);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -309,6 +546,7 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
{
struct mlx5dr_rule_action_member *action_mem;
const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_rule_rx_tx *rx = &rule->rx;
struct mlx5dr_rule_rx_tx *tx = &rule->tx;
u8 format_ver;
@@ -316,8 +554,15 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
format_ver = rule->matcher->tbl->dmn->info.caps.sw_format_ver;
- seq_printf(file, "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE, rule_id,
- DR_DBG_PTR_TO_ID(rule->matcher));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE,
+ rule_id, DR_DBG_PTR_TO_ID(rule->matcher));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (rx->nic_matcher) {
ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver);
@@ -344,46 +589,94 @@ static int
dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
u8 criteria, const u64 matcher_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char dump[DR_HEX_SIZE];
+ int ret;
- seq_printf(file, "%d,0x%llx,", DR_DUMP_REC_TYPE_MATCHER_MASK,
- matcher_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "%d,0x%llx,",
+ DR_DUMP_REC_TYPE_MATCHER_MASK, matcher_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (criteria & DR_MATCHER_CRITERIA_OUTER) {
dr_dump_hex_print(dump, (char *)&mask->outer, sizeof(mask->outer));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_INNER) {
dr_dump_hex_print(dump, (char *)&mask->inner, sizeof(mask->inner));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC) {
dr_dump_hex_print(dump, (char *)&mask->misc, sizeof(mask->misc));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC2) {
dr_dump_hex_print(dump, (char *)&mask->misc2, sizeof(mask->misc2));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC3) {
dr_dump_hex_print(dump, (char *)&mask->misc3, sizeof(mask->misc3));
- seq_printf(file, "%s\n", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s\n", dump);
} else {
- seq_puts(file, ",\n");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",\n");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -391,9 +684,19 @@ static int
dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
u32 index, bool is_rx, const u64 matcher_id)
{
- seq_printf(file, "%d,0x%llx,%d,%d,0x%x\n",
- DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index, is_rx,
- builder->lu_type);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%d,%d,0x%x\n",
+ DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index,
+ is_rx, builder->lu_type);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -403,6 +706,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
const u64 matcher_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr, e_icm_addr;
int i, ret;
@@ -412,11 +716,19 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk);
e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk);
- seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
- rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
- matcher_id, matcher_rx_tx->num_of_builders,
- dr_dump_icm_to_idx(s_icm_addr),
- dr_dump_icm_to_idx(e_icm_addr));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
+ rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
+ matcher_id, matcher_rx_tx->num_of_builders,
+ dr_dump_icm_to_idx(s_icm_addr),
+ dr_dump_icm_to_idx(e_icm_addr));
+
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
ret = dr_dump_matcher_builder(file,
@@ -434,13 +746,22 @@ dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
{
struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
struct mlx5dr_matcher_rx_tx *tx = &matcher->tx;
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 matcher_id;
int ret;
matcher_id = DR_DBG_PTR_TO_ID(matcher);
- seq_printf(file, "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
- matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl), matcher->prio);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
+ matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl),
+ matcher->prio);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
ret = dr_dump_matcher_mask(file, &matcher->mask,
matcher->match_criteria, matcher_id);
@@ -486,15 +807,24 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_table_rx_tx *table_rx_tx,
const u64 table_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr;
+ int ret;
rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
DR_DUMP_REC_TYPE_TABLE_TX;
s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk);
- seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
- dr_dump_icm_to_idx(s_icm_addr));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n", rec_type, table_id,
+ dr_dump_icm_to_idx(s_icm_addr));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -503,11 +833,19 @@ static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
{
struct mlx5dr_table_rx_tx *rx = &table->rx;
struct mlx5dr_table_rx_tx *tx = &table->tx;
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
- seq_printf(file, "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
- DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
- table->table_type, table->level);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
+ DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
+ table->table_type, table->level);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (rx->nic_dmn) {
ret = dr_dump_table_rx_tx(file, true, rx,
@@ -546,46 +884,86 @@ static int
dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
const u64 domain_id)
{
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_DOMAIN_SEND_RING, DR_DBG_PTR_TO_ID(ring),
- domain_id, ring->cq->mcq.cqn, ring->qp->qpn);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_SEND_RING,
+ DR_DBG_PTR_TO_ID(ring), domain_id,
+ ring->cq->mcq.cqn, ring->qp->qpn);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain_info_flex_parser(struct seq_file *file,
const char *flex_parser_name,
const u8 flex_parser_value,
const u64 domain_id)
{
- seq_printf(file, "%d,0x%llx,%s,0x%x\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
- flex_parser_name, flex_parser_value);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%s,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
+ flex_parser_name, flex_parser_value);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
const u64 domain_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_cmd_vport_cap *vport_caps;
unsigned long i, vports_num;
+ int ret;
xa_for_each(&caps->vports.vports_caps_xa, vports_num, vport_caps)
; /* count the number of vports in xarray */
- seq_printf(file, "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
- caps->nic_rx_drop_address, caps->nic_tx_drop_address,
- caps->flex_protocols, vports_num, caps->eswitch_manager);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
+ caps->nic_rx_drop_address, caps->nic_tx_drop_address,
+ caps->flex_protocols, vports_num, caps->eswitch_manager);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
xa_for_each(&caps->vports.vports_caps_xa, i, vport_caps) {
vport_caps = xa_load(&caps->vports.vports_caps_xa, i);
- seq_printf(file, "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT, domain_id, i,
- vport_caps->vport_gvmi, vport_caps->icm_address_rx,
- vport_caps->icm_address_tx);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT,
+ domain_id, i, vport_caps->vport_gvmi,
+ vport_caps->icm_address_rx,
+ vport_caps->icm_address_tx);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -627,24 +1005,32 @@ dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info,
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 domain_id = DR_DBG_PTR_TO_ID(dmn);
int ret;
- seq_printf(file, "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
- DR_DUMP_REC_TYPE_DOMAIN,
- domain_id, dmn->type, dmn->info.caps.gvmi,
- dmn->info.supp_sw_steering,
- /* package version */
- LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
- LINUX_VERSION_SUBLEVEL,
- pci_name(dmn->mdev->pdev),
- 0, /* domain flags */
- dmn->num_buddies[DR_ICM_TYPE_STE],
- dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
- dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
+ DR_DUMP_REC_TYPE_DOMAIN,
+ domain_id, dmn->type, dmn->info.caps.gvmi,
+ dmn->info.supp_sw_steering,
+ /* package version */
+ LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
+ LINUX_VERSION_SUBLEVEL,
+ pci_name(dmn->mdev->pdev),
+ 0, /* domain flags */
+ dmn->num_buddies[DR_ICM_TYPE_STE],
+ dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
+ dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
ret = dr_dump_domain_info(file, &dmn->info, domain_id);
if (ret < 0)
@@ -683,11 +1069,91 @@ unlock_mutex:
return ret;
}
-static int dr_dump_show(struct seq_file *file, void *priv)
+static void *
+dr_dump_start(struct seq_file *file, loff_t *pos)
{
- return dr_dump_domain_all(file, file->private);
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ if (atomic_read(&dmn->dump_info.state) != MLX5DR_DEBUG_DUMP_STATE_FREE) {
+ mlx5_core_warn(dmn->mdev, "Dump already in progress\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS);
+ dump_data = dmn->dump_info.dump_data;
+
+ if (dump_data) {
+ return seq_list_start(&dump_data->buff_list, *pos);
+ } else if (*pos == 0) {
+ dump_data = mlx5dr_dbg_create_dump_data();
+ if (!dump_data)
+ goto exit;
+
+ dmn->dump_info.dump_data = dump_data;
+ if (dr_dump_domain_all(file, dmn)) {
+ mlx5dr_dbg_destroy_dump_data(dump_data);
+ dmn->dump_info.dump_data = NULL;
+ goto exit;
+ }
+
+ return seq_list_start(&dump_data->buff_list, *pos);
+ }
+
+exit:
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
+ return NULL;
}
-DEFINE_SHOW_ATTRIBUTE(dr_dump);
+
+static void *
+dr_dump_next(struct seq_file *file, void *v, loff_t *pos)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ dump_data = dmn->dump_info.dump_data;
+
+ return seq_list_next(v, &dump_data->buff_list, pos);
+}
+
+static void
+dr_dump_stop(struct seq_file *file, void *v)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ if (v && IS_ERR(v))
+ return;
+
+ if (!v) {
+ dump_data = dmn->dump_info.dump_data;
+ if (dump_data) {
+ mlx5dr_dbg_destroy_dump_data(dump_data);
+ dmn->dump_info.dump_data = NULL;
+ }
+ }
+
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
+}
+
+static int
+dr_dump_show(struct seq_file *file, void *v)
+{
+ struct mlx5dr_dbg_dump_buff *entry;
+
+ entry = list_entry(v, struct mlx5dr_dbg_dump_buff, node);
+ seq_printf(file, "%s", entry->buff);
+
+ return 0;
+}
+
+static const struct seq_operations dr_dump_sops = {
+ .start = dr_dump_start,
+ .next = dr_dump_next,
+ .stop = dr_dump_stop,
+ .show = dr_dump_show,
+};
+DEFINE_SEQ_ATTRIBUTE(dr_dump);
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
index def6cf853eea..57c6b363b870 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
@@ -1,10 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+#define MLX5DR_DEBUG_DUMP_BUFF_SIZE (64 * 1024 * 1024)
+#define MLX5DR_DEBUG_DUMP_BUFF_LENGTH 512
+
+enum {
+ MLX5DR_DEBUG_DUMP_STATE_FREE,
+ MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS,
+};
+
+struct mlx5dr_dbg_dump_buff {
+ char *buff;
+ u32 index;
+ struct list_head node;
+};
+
+struct mlx5dr_dbg_dump_data {
+ struct list_head buff_list;
+};
+
struct mlx5dr_dbg_dump_info {
struct mutex dbg_mutex; /* protect dbg lists */
struct dentry *steering_debugfs;
struct dentry *fdb_debugfs;
+ struct mlx5dr_dbg_dump_data *dump_data;
+ atomic_t state;
};
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
index 253d7ad9b809..8b63968bbee9 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
@@ -124,6 +124,41 @@ static void mlxbf_gige_get_pauseparam(struct net_device *netdev,
pause->tx_pause = 1;
}
+static bool mlxbf_gige_llu_counters_enabled(struct mlxbf_gige *priv)
+{
+ u32 data;
+
+ if (priv->hw_version == MLXBF_GIGE_VERSION_BF2) {
+ data = readl(priv->llu_base + MLXBF_GIGE_BF2_LLU_GENERAL_CONFIG);
+ if (data & MLXBF_GIGE_BF2_LLU_COUNTERS_EN)
+ return true;
+ } else {
+ data = readl(priv->llu_base + MLXBF_GIGE_BF3_LLU_GENERAL_CONFIG);
+ if (data & MLXBF_GIGE_BF3_LLU_COUNTERS_EN)
+ return true;
+ }
+
+ return false;
+}
+
+static void mlxbf_gige_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+ u64 data_lo, data_hi;
+
+ /* Read LLU counters to provide stats only if counters are enabled */
+ if (mlxbf_gige_llu_counters_enabled(priv)) {
+ data_lo = readl(priv->llu_base + MLXBF_GIGE_TX_PAUSE_CNT_LO);
+ data_hi = readl(priv->llu_base + MLXBF_GIGE_TX_PAUSE_CNT_HI);
+ pause_stats->tx_pause_frames = (data_hi << 32) | data_lo;
+
+ data_lo = readl(priv->llu_base + MLXBF_GIGE_RX_PAUSE_CNT_LO);
+ data_hi = readl(priv->llu_base + MLXBF_GIGE_RX_PAUSE_CNT_HI);
+ pause_stats->rx_pause_frames = (data_hi << 32) | data_lo;
+ }
+}
+
const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ringparam = mlxbf_gige_get_ringparam,
@@ -134,6 +169,7 @@ const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.get_ethtool_stats = mlxbf_gige_get_ethtool_stats,
.nway_reset = phy_ethtool_nway_reset,
.get_pauseparam = mlxbf_gige_get_pauseparam,
+ .get_pause_stats = mlxbf_gige_get_pause_stats,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index cd0973229c9b..98a8681c21b9 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -99,4 +99,34 @@
#define MLXBF_GIGE_100M_IPG_SIZE 119
#define MLXBF_GIGE_10M_IPG_SIZE 1199
+/* Offsets into OOB LLU block for pause frame counters */
+#define MLXBF_GIGE_BF2_TX_PAUSE_CNT_HI 0x33d8
+#define MLXBF_GIGE_BF2_TX_PAUSE_CNT_LO 0x33dc
+#define MLXBF_GIGE_BF2_RX_PAUSE_CNT_HI 0x3210
+#define MLXBF_GIGE_BF2_RX_PAUSE_CNT_LO 0x3214
+
+#define MLXBF_GIGE_BF3_TX_PAUSE_CNT_HI 0x3a88
+#define MLXBF_GIGE_BF3_TX_PAUSE_CNT_LO 0x3a8c
+#define MLXBF_GIGE_BF3_RX_PAUSE_CNT_HI 0x38c0
+#define MLXBF_GIGE_BF3_RX_PAUSE_CNT_LO 0x38c4
+
+#define MLXBF_GIGE_TX_PAUSE_CNT_HI ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_TX_PAUSE_CNT_HI : \
+ MLXBF_GIGE_BF3_TX_PAUSE_CNT_HI)
+#define MLXBF_GIGE_TX_PAUSE_CNT_LO ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_TX_PAUSE_CNT_LO : \
+ MLXBF_GIGE_BF3_TX_PAUSE_CNT_LO)
+#define MLXBF_GIGE_RX_PAUSE_CNT_HI ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_RX_PAUSE_CNT_HI : \
+ MLXBF_GIGE_BF3_RX_PAUSE_CNT_HI)
+#define MLXBF_GIGE_RX_PAUSE_CNT_LO ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_RX_PAUSE_CNT_LO : \
+ MLXBF_GIGE_BF3_RX_PAUSE_CNT_LO)
+
+#define MLXBF_GIGE_BF2_LLU_GENERAL_CONFIG 0x2110
+#define MLXBF_GIGE_BF3_LLU_GENERAL_CONFIG 0x2030
+
+#define MLXBF_GIGE_BF2_LLU_COUNTERS_EN BIT(0)
+#define MLXBF_GIGE_BF3_LLU_COUNTERS_EN BIT(4)
+
#endif /* !defined(__MLXBF_GIGE_REGS_H__) */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index faa63ea9b83e..1915fa41c622 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -95,7 +95,7 @@ struct mlxsw_afa_set {
*/
has_trap:1,
has_police:1;
- unsigned int ref_count;
+ refcount_t ref_count;
struct mlxsw_afa_set *next; /* Pointer to the next set. */
struct mlxsw_afa_set *prev; /* Pointer to the previous set,
* note that set may have multiple
@@ -120,7 +120,7 @@ struct mlxsw_afa_fwd_entry {
struct rhash_head ht_node;
struct mlxsw_afa_fwd_entry_ht_key ht_key;
u32 kvdl_index;
- unsigned int ref_count;
+ refcount_t ref_count;
};
static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
@@ -282,7 +282,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
/* Need to initialize the set to pass by default */
mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
set->ht_key.is_first = is_first;
- set->ref_count = 1;
+ refcount_set(&set->ref_count, 1);
return set;
}
@@ -330,7 +330,7 @@ static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
struct mlxsw_afa_set *set)
{
- if (--set->ref_count)
+ if (!refcount_dec_and_test(&set->ref_count))
return;
if (set->shared)
mlxsw_afa_set_unshare(mlxsw_afa, set);
@@ -350,7 +350,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
mlxsw_afa_set_ht_params);
if (set) {
- set->ref_count++;
+ refcount_inc(&set->ref_count);
mlxsw_afa_set_put(mlxsw_afa, orig_set);
} else {
set = orig_set;
@@ -564,7 +564,7 @@ mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port)
if (!fwd_entry)
return ERR_PTR(-ENOMEM);
fwd_entry->ht_key.local_port = local_port;
- fwd_entry->ref_count = 1;
+ refcount_set(&fwd_entry->ref_count, 1);
err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
&fwd_entry->ht_node,
@@ -607,7 +607,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
mlxsw_afa_fwd_entry_ht_params);
if (fwd_entry) {
- fwd_entry->ref_count++;
+ refcount_inc(&fwd_entry->ref_count);
return fwd_entry;
}
return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
@@ -616,7 +616,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
struct mlxsw_afa_fwd_entry *fwd_entry)
{
- if (--fwd_entry->ref_count)
+ if (!refcount_dec_and_test(&fwd_entry->ref_count))
return;
mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 0d5e6f9b466e..947500f8ed71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/errno.h>
+#include <linux/refcount.h>
#include "item.h"
#include "core_acl_flex_keys.h"
@@ -107,7 +108,7 @@ EXPORT_SYMBOL(mlxsw_afk_destroy);
struct mlxsw_afk_key_info {
struct list_head list;
- unsigned int ref_count;
+ refcount_t ref_count;
unsigned int blocks_count;
int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value
* is index inside "blocks"
@@ -334,7 +335,7 @@ mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk,
if (err)
goto err_picker;
list_add(&key_info->list, &mlxsw_afk->key_info_list);
- key_info->ref_count = 1;
+ refcount_set(&key_info->ref_count, 1);
return key_info;
err_picker:
@@ -356,7 +357,7 @@ mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk,
key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage);
if (key_info) {
- key_info->ref_count++;
+ refcount_inc(&key_info->ref_count);
return key_info;
}
return mlxsw_afk_key_info_create(mlxsw_afk, elusage);
@@ -365,7 +366,7 @@ EXPORT_SYMBOL(mlxsw_afk_key_info_get);
void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info)
{
- if (--key_info->ref_count)
+ if (!refcount_dec_and_test(&key_info->ref_count))
return;
mlxsw_afk_key_info_destroy(key_info);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index f1b48d6615f6..5c511e1a8efa 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -44,16 +44,19 @@ static const struct thermal_trip default_thermal_trips[] = {
.type = THERMAL_TRIP_ACTIVE,
.temperature = MLXSW_THERMAL_ASIC_TEMP_NORM,
.hysteresis = MLXSW_THERMAL_HYSTERESIS_TEMP,
+ .flags = THERMAL_TRIP_FLAG_RW_TEMP,
},
{
/* In range - 40-100% PWM */
.type = THERMAL_TRIP_ACTIVE,
.temperature = MLXSW_THERMAL_ASIC_TEMP_HIGH,
.hysteresis = MLXSW_THERMAL_HYSTERESIS_TEMP,
+ .flags = THERMAL_TRIP_FLAG_RW_TEMP,
},
{ /* Warning */
.type = THERMAL_TRIP_HOT,
.temperature = MLXSW_THERMAL_ASIC_TEMP_HOT,
+ .flags = THERMAL_TRIP_FLAG_RW_TEMP,
},
};
@@ -62,16 +65,19 @@ static const struct thermal_trip default_thermal_module_trips[] = {
.type = THERMAL_TRIP_ACTIVE,
.temperature = MLXSW_THERMAL_MODULE_TEMP_NORM,
.hysteresis = MLXSW_THERMAL_HYSTERESIS_TEMP,
+ .flags = THERMAL_TRIP_FLAG_RW_TEMP,
},
{
/* In range - 40-100% PWM */
.type = THERMAL_TRIP_ACTIVE,
.temperature = MLXSW_THERMAL_MODULE_TEMP_HIGH,
.hysteresis = MLXSW_THERMAL_HYSTERESIS_TEMP,
+ .flags = THERMAL_TRIP_FLAG_RW_TEMP,
},
{ /* Warning */
.type = THERMAL_TRIP_HOT,
.temperature = MLXSW_THERMAL_MODULE_TEMP_HOT,
+ .flags = THERMAL_TRIP_FLAG_RW_TEMP,
},
};
@@ -92,9 +98,6 @@ static const struct mlxsw_cooling_states default_cooling_states[] = {
#define MLXSW_THERMAL_NUM_TRIPS ARRAY_SIZE(default_thermal_trips)
-/* Make sure all trips are writable */
-#define MLXSW_THERMAL_TRIP_MASK (BIT(MLXSW_THERMAL_NUM_TRIPS) - 1)
-
struct mlxsw_thermal;
struct mlxsw_thermal_module {
@@ -420,7 +423,6 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
module_tz->tzdev = thermal_zone_device_register_with_trips(tz_name,
module_tz->trips,
MLXSW_THERMAL_NUM_TRIPS,
- MLXSW_THERMAL_TRIP_MASK,
module_tz,
&mlxsw_thermal_module_ops,
&mlxsw_thermal_params,
@@ -548,7 +550,6 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
gearbox_tz->tzdev = thermal_zone_device_register_with_trips(tz_name,
gearbox_tz->trips,
MLXSW_THERMAL_NUM_TRIPS,
- MLXSW_THERMAL_TRIP_MASK,
gearbox_tz,
&mlxsw_thermal_gearbox_ops,
&mlxsw_thermal_params, 0,
@@ -773,7 +774,6 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
thermal->tzdev = thermal_zone_device_register_with_trips("mlxsw",
thermal->trips,
MLXSW_THERMAL_NUM_TRIPS,
- MLXSW_THERMAL_TRIP_MASK,
thermal,
&mlxsw_thermal_ops,
&mlxsw_thermal_params, 0,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 6b98c3287b49..f0ceb196a6ce 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -708,7 +708,6 @@ static const struct i2c_device_id mlxsw_m_i2c_id[] = {
static struct i2c_driver mlxsw_m_i2c_driver = {
.driver.name = "mlxsw_minimal",
- .class = I2C_CLASS_HWMON,
.id_table = mlxsw_m_i2c_id,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 5d3413636a62..bb642e9bb6cf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -176,13 +176,15 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
- unsigned int counter_index, u64 *packets,
- u64 *bytes)
+ unsigned int counter_index, bool clear,
+ u64 *packets, u64 *bytes)
{
+ enum mlxsw_reg_mgpc_opcode op = clear ? MLXSW_REG_MGPC_OPCODE_CLEAR :
+ MLXSW_REG_MGPC_OPCODE_NOP;
char mgpc_pl[MLXSW_REG_MGPC_LEN];
int err;
- mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
+ mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, op,
MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
if (err)
@@ -2695,23 +2697,18 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
{
char sgcr_pl[MLXSW_REG_SGCR_LEN];
- u16 max_lag;
int err;
if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
return 0;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
- if (err)
- return err;
-
/* In DDD mode, which we by default use, each LAG entry is 8 PGT
* entries. The LAG table address needs to be 8-aligned, but that ought
* to be the case, since the LAG table is allocated first.
*/
err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base,
- max_lag * 8);
+ mlxsw_sp->max_lag * 8);
if (err)
return err;
if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) {
@@ -2728,33 +2725,31 @@ static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
err_mid_alloc_range:
mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
- max_lag * 8);
+ mlxsw_sp->max_lag * 8);
return err;
}
static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp)
{
- u16 max_lag;
- int err;
-
if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
return;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
- if (err)
- return;
-
mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
- max_lag * 8);
+ mlxsw_sp->max_lag * 8);
}
#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
+struct mlxsw_sp_lag {
+ struct net_device *dev;
+ refcount_t ref_count;
+ u16 lag_id;
+};
+
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
char slcr_pl[MLXSW_REG_SLCR_LEN];
- u16 max_lag;
u32 seed;
int err;
@@ -2773,7 +2768,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &mlxsw_sp->max_lag);
if (err)
return err;
@@ -2784,7 +2779,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
+ mlxsw_sp->lags = kcalloc(mlxsw_sp->max_lag, sizeof(struct mlxsw_sp_lag),
GFP_KERNEL);
if (!mlxsw_sp->lags) {
err = -ENOMEM;
@@ -4269,19 +4264,48 @@ mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
+static struct mlxsw_sp_lag *
+mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
+ struct mlxsw_sp_lag *lag;
+ u16 lag_id;
+ int i, err;
+
+ for (i = 0; i < mlxsw_sp->max_lag; i++) {
+ if (!mlxsw_sp->lags[i].dev)
+ break;
+ }
+
+ if (i == mlxsw_sp->max_lag) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Exceeded number of supported LAG devices");
+ return ERR_PTR(-EBUSY);
+ }
+ lag_id = i;
mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
+ if (err)
+ return ERR_PTR(err);
+
+ lag = &mlxsw_sp->lags[lag_id];
+ lag->lag_id = lag_id;
+ lag->dev = lag_dev;
+ refcount_set(&lag->ref_count, 1);
+
+ return lag;
}
-static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
+static int
+mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
- mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
+ lag->dev = NULL;
+
+ mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag->lag_id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
}
@@ -4329,34 +4353,44 @@ static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
}
-static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
- struct net_device *lag_dev,
- u16 *p_lag_id)
+static struct mlxsw_sp_lag *
+mlxsw_sp_lag_find(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev)
{
- struct mlxsw_sp_upper *lag;
- int free_lag_id = -1;
- u16 max_lag;
- int err, i;
+ int i;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
- if (err)
- return err;
+ for (i = 0; i < mlxsw_sp->max_lag; i++) {
+ if (!mlxsw_sp->lags[i].dev)
+ continue;
- for (i = 0; i < max_lag; i++) {
- lag = mlxsw_sp_lag_get(mlxsw_sp, i);
- if (lag->ref_count) {
- if (lag->dev == lag_dev) {
- *p_lag_id = i;
- return 0;
- }
- } else if (free_lag_id < 0) {
- free_lag_id = i;
- }
+ if (mlxsw_sp->lags[i].dev == lag_dev)
+ return &mlxsw_sp->lags[i];
}
- if (free_lag_id < 0)
- return -EBUSY;
- *p_lag_id = free_lag_id;
- return 0;
+
+ return NULL;
+}
+
+static struct mlxsw_sp_lag *
+mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_lag *lag;
+
+ lag = mlxsw_sp_lag_find(mlxsw_sp, lag_dev);
+ if (lag) {
+ refcount_inc(&lag->ref_count);
+ return lag;
+ }
+
+ return mlxsw_sp_lag_create(mlxsw_sp, lag_dev, extack);
+}
+
+static void
+mlxsw_sp_lag_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
+{
+ if (!refcount_dec_and_test(&lag->ref_count))
+ return;
+
+ mlxsw_sp_lag_destroy(mlxsw_sp, lag);
}
static bool
@@ -4365,12 +4399,6 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
struct netdev_lag_upper_info *lag_upper_info,
struct netlink_ext_ack *extack)
{
- u16 lag_id;
-
- if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
- NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
- return false;
- }
if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
return false;
@@ -4482,22 +4510,16 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_upper *lag;
+ struct mlxsw_sp_lag *lag;
u16 lag_id;
u8 port_index;
int err;
- err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
- if (err)
- return err;
- lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
- if (!lag->ref_count) {
- err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
- if (err)
- return err;
- lag->dev = lag_dev;
- }
+ lag = mlxsw_sp_lag_get(mlxsw_sp, lag_dev, extack);
+ if (IS_ERR(lag))
+ return PTR_ERR(lag);
+ lag_id = lag->lag_id;
err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
if (err)
return err;
@@ -4515,7 +4537,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->local_port);
mlxsw_sp_port->lag_id = lag_id;
mlxsw_sp_port->lagged = 1;
- lag->ref_count++;
err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port);
if (err)
@@ -4542,7 +4563,6 @@ err_replay:
err_router_join:
mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
err_fid_port_join_lag:
- lag->ref_count--;
mlxsw_sp_port->lagged = 0;
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
mlxsw_sp_port->local_port);
@@ -4550,8 +4570,7 @@ err_fid_port_join_lag:
err_col_port_add:
mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
err_lag_uppers_bridge_join:
- if (!lag->ref_count)
- mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
+ mlxsw_sp_lag_put(mlxsw_sp, lag);
return err;
}
@@ -4560,12 +4579,11 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 lag_id = mlxsw_sp_port->lag_id;
- struct mlxsw_sp_upper *lag;
+ struct mlxsw_sp_lag *lag;
if (!mlxsw_sp_port->lagged)
return;
- lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
- WARN_ON(lag->ref_count == 0);
+ lag = &mlxsw_sp->lags[lag_id];
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
@@ -4579,13 +4597,11 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
- if (lag->ref_count == 1)
- mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
+ mlxsw_sp_lag_put(mlxsw_sp, lag);
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
mlxsw_sp_port->local_port);
mlxsw_sp_port->lagged = 0;
- lag->ref_count--;
/* Make sure untagged frames are allowed to ingress */
mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a0c9775fa955..3beb5d0847ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -78,11 +78,6 @@ struct mlxsw_sp_span_entry;
enum mlxsw_sp_l3proto;
union mlxsw_sp_l3addr;
-struct mlxsw_sp_upper {
- struct net_device *dev;
- unsigned int ref_count;
-};
-
enum mlxsw_sp_rif_type {
MLXSW_SP_RIF_TYPE_SUBPORT,
MLXSW_SP_RIF_TYPE_VLAN,
@@ -136,6 +131,7 @@ struct mlxsw_sp_span_ops;
struct mlxsw_sp_qdisc_state;
struct mlxsw_sp_mall_entry;
struct mlxsw_sp_pgt;
+struct mlxsw_sp_lag;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -164,7 +160,8 @@ struct mlxsw_sp {
const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN];
const unsigned char *mac_mask;
- struct mlxsw_sp_upper *lags;
+ struct mlxsw_sp_lag *lags;
+ u16 max_lag;
struct mlxsw_sp_port_mapping *port_mapping;
struct mlxsw_sp_port_mapping_events port_mapping_events;
struct rhashtable sample_trigger_ht;
@@ -257,12 +254,6 @@ struct mlxsw_sp_fid_core_ops {
void (*fini)(struct mlxsw_sp *mlxsw_sp);
};
-static inline struct mlxsw_sp_upper *
-mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
-{
- return &mlxsw_sp->lags[lag_id];
-}
-
struct mlxsw_sp_port_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
@@ -715,8 +706,8 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
- unsigned int counter_index, u64 *packets,
- u64 *bytes);
+ unsigned int counter_index, bool clear,
+ u64 *packets, u64 *bytes);
int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 7c59c8a13584..3e70cee4d2f3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -9,6 +9,7 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
@@ -55,7 +56,7 @@ struct mlxsw_sp_acl_ruleset {
struct rhash_head ht_node; /* Member of acl HT */
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
- unsigned int ref_count;
+ refcount_t ref_count;
unsigned int min_prio;
unsigned int max_prio;
unsigned long priv[];
@@ -99,7 +100,7 @@ static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{
/* We hold a reference on ruleset ourselves */
- return ruleset->ref_count == 2;
+ return refcount_read(&ruleset->ref_count) == 2;
}
int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
@@ -176,7 +177,7 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
ruleset = kzalloc(alloc_size, GFP_KERNEL);
if (!ruleset)
return ERR_PTR(-ENOMEM);
- ruleset->ref_count = 1;
+ refcount_set(&ruleset->ref_count, 1);
ruleset->ht_key.block = block;
ruleset->ht_key.chain_index = chain_index;
ruleset->ht_key.ops = ops;
@@ -222,13 +223,13 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
{
- ruleset->ref_count++;
+ refcount_inc(&ruleset->ref_count);
}
static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset)
{
- if (--ruleset->ref_count)
+ if (!refcount_dec_and_test(&ruleset->ref_count))
return;
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
}
@@ -1023,7 +1024,7 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
rulei = mlxsw_sp_acl_rule_rulei(rule);
if (rulei->counter_valid) {
err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
- &current_packets,
+ false, &current_packets,
&current_bytes);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 50ea1eff02b2..f20052776b3f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -9,6 +9,7 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <net/devlink.h>
#include <trace/events/mlxsw.h>
@@ -155,7 +156,7 @@ struct mlxsw_sp_acl_tcam_vregion {
struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
} rehash;
struct mlxsw_sp *mlxsw_sp;
- unsigned int ref_count;
+ refcount_t ref_count;
};
struct mlxsw_sp_acl_tcam_vchunk;
@@ -176,7 +177,7 @@ struct mlxsw_sp_acl_tcam_vchunk {
unsigned int priority; /* Priority within the vregion and group */
struct mlxsw_sp_acl_tcam_vgroup *vgroup;
struct mlxsw_sp_acl_tcam_vregion *vregion;
- unsigned int ref_count;
+ refcount_t ref_count;
};
struct mlxsw_sp_acl_tcam_entry {
@@ -769,7 +770,7 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
vregion->tcam = tcam;
vregion->mlxsw_sp = mlxsw_sp;
vregion->vgroup = vgroup;
- vregion->ref_count = 1;
+ refcount_set(&vregion->ref_count, 1);
vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
if (IS_ERR(vregion->key_info)) {
@@ -856,7 +857,7 @@ mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
*/
return ERR_PTR(-EOPNOTSUPP);
}
- vregion->ref_count++;
+ refcount_inc(&vregion->ref_count);
return vregion;
}
@@ -871,7 +872,7 @@ static void
mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion)
{
- if (--vregion->ref_count)
+ if (!refcount_dec_and_test(&vregion->ref_count))
return;
mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
}
@@ -924,7 +925,7 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
INIT_LIST_HEAD(&vchunk->ventry_list);
vchunk->priority = priority;
vchunk->vgroup = vgroup;
- vchunk->ref_count = 1;
+ refcount_set(&vchunk->ref_count, 1);
vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
priority, elusage);
@@ -1008,7 +1009,7 @@ mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
elusage)))
return ERR_PTR(-EINVAL);
- vchunk->ref_count++;
+ refcount_inc(&vchunk->ref_count);
return vchunk;
}
return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
@@ -1019,7 +1020,7 @@ static void
mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
- if (--vchunk->ref_count)
+ if (!refcount_dec_and_test(&vchunk->ref_count))
return;
mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index c8a356accdf8..ca80af06465f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -1181,9 +1181,11 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
char ratr_pl[MLXSW_REG_RATR_LEN];
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_nexthop *nh;
+ unsigned int n_done = 0;
u32 adj_hash_index = 0;
u32 adj_index = 0;
u32 adj_size = 0;
+ int err;
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
if (!mlxsw_sp_nexthop_is_forward(nh) ||
@@ -1192,15 +1194,27 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size,
&adj_hash_index);
- if (enable)
- mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
- else
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ if (enable) {
+ err = mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
+ if (err)
+ goto err_counter_enable;
+ } else {
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
+ }
mlxsw_sp_nexthop_eth_update(mlxsw_sp,
adj_index + adj_hash_index, nh,
true, ratr_pl);
+ n_done++;
}
return 0;
+
+err_counter_enable:
+ mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
+ if (!n_done--)
+ break;
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
+ }
+ return err;
}
static u64
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 221aa6a474eb..01d81ae3662a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -361,7 +361,7 @@ static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_route *route = route_priv;
return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
- packets, bytes);
+ false, packets, bytes);
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7164f9e6370f..40ba314fbc72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -19,6 +19,7 @@
#include <linux/net_namespace.h>
#include <linux/mutex.h>
#include <linux/genalloc.h>
+#include <linux/xarray.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -501,7 +502,7 @@ struct mlxsw_sp_rt6 {
struct mlxsw_sp_lpm_tree {
u8 id; /* tree ID */
- unsigned int ref_count;
+ refcount_t ref_count;
enum mlxsw_sp_l3proto proto;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
@@ -578,7 +579,7 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
lpm_tree = &mlxsw_sp->router->lpm.trees[i];
- if (lpm_tree->ref_count == 0)
+ if (refcount_read(&lpm_tree->ref_count) == 0)
return lpm_tree;
}
return NULL;
@@ -654,7 +655,7 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
sizeof(lpm_tree->prefix_usage));
memset(&lpm_tree->prefix_ref_count, 0,
sizeof(lpm_tree->prefix_ref_count));
- lpm_tree->ref_count = 1;
+ refcount_set(&lpm_tree->ref_count, 1);
return lpm_tree;
err_left_struct_set:
@@ -678,7 +679,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
lpm_tree = &mlxsw_sp->router->lpm.trees[i];
- if (lpm_tree->ref_count != 0 &&
+ if (refcount_read(&lpm_tree->ref_count) &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
prefix_usage)) {
@@ -691,14 +692,15 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
{
- lpm_tree->ref_count++;
+ refcount_inc(&lpm_tree->ref_count);
}
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+ if (!refcount_dec_and_test(&lpm_tree->ref_count))
+ return;
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
@@ -2250,7 +2252,7 @@ int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
- p_counter, NULL);
+ false, p_counter, NULL);
}
static struct mlxsw_sp_neigh_entry *
@@ -3048,6 +3050,8 @@ struct mlxsw_sp_nexthop_key {
struct fib_nh *fib_nh;
};
+struct mlxsw_sp_nexthop_counter;
+
struct mlxsw_sp_nexthop {
struct list_head neigh_list_node; /* member of neigh entry list */
struct list_head crif_list_node;
@@ -3079,8 +3083,8 @@ struct mlxsw_sp_nexthop {
struct mlxsw_sp_neigh_entry *neigh_entry;
struct mlxsw_sp_ipip_entry *ipip_entry;
};
- unsigned int counter_index;
- bool counter_valid;
+ struct mlxsw_sp_nexthop_counter *counter;
+ u32 id; /* NH ID for members of a NH object group. */
};
static struct net_device *
@@ -3105,8 +3109,10 @@ struct mlxsw_sp_nexthop_group_info {
int sum_norm_weight;
u8 adj_index_valid:1,
gateway:1, /* routes using the group use a gateway */
- is_resilient:1;
+ is_resilient:1,
+ hw_stats:1;
struct list_head list; /* member in nh_res_grp_list */
+ struct xarray nexthop_counters;
struct mlxsw_sp_nexthop nexthops[] __counted_by(count);
};
@@ -3150,39 +3156,148 @@ struct mlxsw_sp_nexthop_group {
bool can_destroy;
};
-void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+struct mlxsw_sp_nexthop_counter {
+ unsigned int counter_index;
+ refcount_t ref_count;
+};
+
+static struct mlxsw_sp_nexthop_counter *
+mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_nexthop_counter *nhct;
+ int err;
+
+ nhct = kzalloc(sizeof(*nhct), GFP_KERNEL);
+ if (!nhct)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nhct->counter_index);
+ if (err)
+ goto err_counter_alloc;
+
+ refcount_set(&nhct->ref_count, 1);
+ return nhct;
+
+err_counter_alloc:
+ kfree(nhct);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_counter *nhct)
+{
+ mlxsw_sp_flow_counter_free(mlxsw_sp, nhct->counter_index);
+ kfree(nhct);
+}
+
+static struct mlxsw_sp_nexthop_counter *
+mlxsw_sp_nexthop_sh_counter_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
+ struct mlxsw_sp_nexthop_counter *nhct;
+ void *ptr;
+ int err;
+
+ nhct = xa_load(&nh_grp->nhgi->nexthop_counters, nh->id);
+ if (nhct) {
+ refcount_inc(&nhct->ref_count);
+ return nhct;
+ }
+
+ nhct = mlxsw_sp_nexthop_counter_alloc(mlxsw_sp);
+ if (IS_ERR(nhct))
+ return nhct;
+
+ ptr = xa_store(&nh_grp->nhgi->nexthop_counters, nh->id, nhct,
+ GFP_KERNEL);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ goto err_store;
+ }
+
+ return nhct;
+
+err_store:
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nhct);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_nexthop_sh_counter_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
+ struct mlxsw_sp_nexthop_counter *nhct;
+
+ nhct = xa_load(&nh_grp->nhgi->nexthop_counters, nh->id);
+ if (WARN_ON(!nhct))
+ return;
+
+ if (!refcount_dec_and_test(&nhct->ref_count))
+ return;
+
+ xa_erase(&nh_grp->nhgi->nexthop_counters, nh->id);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nhct);
+}
+
+int mlxsw_sp_nexthop_counter_enable(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
+ const char *table_adj = MLXSW_SP_DPIPE_TABLE_NAME_ADJ;
+ struct mlxsw_sp_nexthop_counter *nhct;
struct devlink *devlink;
+ bool dpipe_stats;
+
+ if (nh->counter)
+ return 0;
devlink = priv_to_devlink(mlxsw_sp->core);
- if (!devlink_dpipe_table_counter_enabled(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
- return;
+ dpipe_stats = devlink_dpipe_table_counter_enabled(devlink, table_adj);
+ if (!(nh->nhgi->hw_stats || dpipe_stats))
+ return 0;
- if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
- return;
+ if (nh->id)
+ nhct = mlxsw_sp_nexthop_sh_counter_get(mlxsw_sp, nh);
+ else
+ nhct = mlxsw_sp_nexthop_counter_alloc(mlxsw_sp);
+ if (IS_ERR(nhct))
+ return PTR_ERR(nhct);
- nh->counter_valid = true;
+ nh->counter = nhct;
+ return 0;
}
-void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
+void mlxsw_sp_nexthop_counter_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
{
- if (!nh->counter_valid)
+ if (!nh->counter)
return;
- mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
- nh->counter_valid = false;
+
+ if (nh->id)
+ mlxsw_sp_nexthop_sh_counter_put(mlxsw_sp, nh);
+ else
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh->counter);
+ nh->counter = NULL;
+}
+
+static int mlxsw_sp_nexthop_counter_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ if (nh->nhgi->hw_stats)
+ return mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
+ return 0;
}
int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh, u64 *p_counter)
{
- if (!nh->counter_valid)
+ if (!nh->counter)
return -EINVAL;
- return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
- p_counter, NULL);
+ return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter->counter_index,
+ true, p_counter, NULL);
}
struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
@@ -3655,8 +3770,9 @@ static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
WARN_ON_ONCE(1);
return -EINVAL;
}
- if (nh->counter_valid)
- mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
+ if (nh->counter)
+ mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter->counter_index,
+ true);
else
mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
@@ -3743,6 +3859,7 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
nh = &nhgi->nexthops[i];
if (!nh->should_offload) {
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
nh->offloaded = 0;
continue;
}
@@ -3750,6 +3867,10 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
if (nh->update || reallocate) {
int err = 0;
+ err = mlxsw_sp_nexthop_counter_update(mlxsw_sp, nh);
+ if (err)
+ return err;
+
err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
true, ratr_pl);
if (err)
@@ -4506,7 +4627,10 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+ err = mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
+ if (err)
+ goto err_counter_enable;
+
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
if (!dev)
@@ -4530,7 +4654,8 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
err_nexthop_neigh_init:
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
+err_counter_enable:
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
return err;
}
@@ -4540,7 +4665,7 @@ static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
}
@@ -5005,9 +5130,9 @@ mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
break;
}
- mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
nh->ifindex = dev->ifindex;
+ nh->id = nh_obj->id;
err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
if (err)
@@ -5029,7 +5154,6 @@ mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
err_type_init:
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
return err;
}
@@ -5040,7 +5164,7 @@ static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
nh->should_offload = 0;
}
@@ -5052,6 +5176,7 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_info *nhgi;
struct mlxsw_sp_nexthop *nh;
bool is_resilient = false;
+ bool hw_stats = false;
unsigned int nhs;
int err, i;
@@ -5061,9 +5186,11 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
break;
case NH_NOTIFIER_INFO_TYPE_GRP:
nhs = info->nh_grp->num_nh;
+ hw_stats = info->nh_grp->hw_stats;
break;
case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
nhs = info->nh_res_table->num_nh_buckets;
+ hw_stats = info->nh_res_table->hw_stats;
is_resilient = true;
break;
default:
@@ -5078,6 +5205,10 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
nhgi->is_resilient = is_resilient;
nhgi->count = nhs;
+ nhgi->hw_stats = hw_stats;
+
+ xa_init_flags(&nhgi->nexthop_counters, XA_FLAGS_ALLOC1);
+
for (i = 0; i < nhgi->count; i++) {
struct nh_notifier_single_info *nh_obj;
int weight;
@@ -5160,6 +5291,8 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
WARN_ON_ONCE(nhgi->adj_index_valid);
+ WARN_ON(!xa_empty(&nhgi->nexthop_counters));
+ xa_destroy(&nhgi->nexthop_counters);
kfree(nhgi);
}
@@ -5299,6 +5432,43 @@ err_out:
return err;
}
+static int mlxsw_sp_nexthop_obj_res_group_pre(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct nh_notifier_grp_info *grp_info = info->nh_grp;
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ int err;
+ int i;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!nh_grp)
+ return 0;
+ nhgi = nh_grp->nhgi;
+
+ if (nhgi->hw_stats == grp_info->hw_stats)
+ return 0;
+
+ nhgi->hw_stats = grp_info->hw_stats;
+
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ if (nh->offloaded)
+ nh->update = 1;
+ }
+
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_group_refresh;
+
+ return 0;
+
+err_group_refresh:
+ nhgi->hw_stats = !grp_info->hw_stats;
+ return err;
+}
+
static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
struct nh_notifier_info *info)
{
@@ -5475,6 +5645,79 @@ err_nexthop_obj_init:
return err;
}
+static void
+mlxsw_sp_nexthop_obj_mp_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group_info *nhgi,
+ struct nh_notifier_grp_hw_stats_info *info)
+{
+ int nhi;
+
+ for (nhi = 0; nhi < info->num_nh; nhi++) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[nhi];
+ u64 packets;
+ int err;
+
+ err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &packets);
+ if (err)
+ continue;
+
+ nh_grp_hw_stats_report_delta(info, nhi, packets);
+ }
+}
+
+static void
+mlxsw_sp_nexthop_obj_res_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group_info *nhgi,
+ struct nh_notifier_grp_hw_stats_info *info)
+{
+ int nhi = -1;
+ int bucket;
+
+ for (bucket = 0; bucket < nhgi->count; bucket++) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[bucket];
+ u64 packets;
+ int err;
+
+ if (nhi == -1 || info->stats[nhi].id != nh->id) {
+ for (nhi = 0; nhi < info->num_nh; nhi++)
+ if (info->stats[nhi].id == nh->id)
+ break;
+ if (WARN_ON_ONCE(nhi == info->num_nh)) {
+ nhi = -1;
+ continue;
+ }
+ }
+
+ err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &packets);
+ if (err)
+ continue;
+
+ nh_grp_hw_stats_report_delta(info, nhi, packets);
+ }
+}
+
+static void mlxsw_sp_nexthop_obj_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ if (info->type != NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS)
+ return;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!nh_grp)
+ return;
+ nhgi = nh_grp->nhgi;
+
+ if (nhgi->is_resilient)
+ mlxsw_sp_nexthop_obj_res_hw_stats_get(mlxsw_sp, nhgi,
+ info->nh_grp_hw_stats);
+ else
+ mlxsw_sp_nexthop_obj_mp_hw_stats_get(mlxsw_sp, nhgi,
+ info->nh_grp_hw_stats);
+}
+
static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -5490,6 +5733,10 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
mutex_lock(&router->lock);
switch (event) {
+ case NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE:
+ err = mlxsw_sp_nexthop_obj_res_group_pre(router->mlxsw_sp,
+ info);
+ break;
case NEXTHOP_EVENT_REPLACE:
err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
break;
@@ -5500,6 +5747,9 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
info);
break;
+ case NEXTHOP_EVENT_HW_STATS_REPORT_DELTA:
+ mlxsw_sp_nexthop_obj_hw_stats_get(router->mlxsw_sp, info);
+ break;
default:
break;
}
@@ -6733,7 +6983,10 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
#if IS_ENABLED(CONFIG_IPV6)
nh->neigh_tbl = &nd_tbl;
#endif
- mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+
+ err = mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
+ if (err)
+ return err;
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
@@ -6749,7 +7002,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
err_nexthop_type_init:
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
return err;
}
@@ -6758,7 +7011,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
}
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index ed3b628caafe..0432c7cc6b07 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -156,10 +156,10 @@ int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_nexthop *nh, bool force,
char *ratr_pl);
-void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+int mlxsw_sp_nexthop_counter_enable(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh);
-void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh);
+void mlxsw_sp_nexthop_counter_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh);
static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
const union mlxsw_sp_l3addr *addr2)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 6c749c148148..6397ff0dc951 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -61,7 +61,7 @@ struct mlxsw_sp_bridge_port {
struct mlxsw_sp_bridge_device *bridge_device;
struct list_head list;
struct list_head vlans_list;
- unsigned int ref_count;
+ refcount_t ref_count;
u8 stp_state;
unsigned long flags;
bool mrouter;
@@ -495,7 +495,7 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
BR_MCAST_FLOOD;
INIT_LIST_HEAD(&bridge_port->vlans_list);
list_add(&bridge_port->list, &bridge_device->ports_list);
- bridge_port->ref_count = 1;
+ refcount_set(&bridge_port->ref_count, 1);
err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
NULL, NULL, NULL, false, extack);
@@ -531,7 +531,7 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
if (bridge_port) {
- bridge_port->ref_count++;
+ refcount_inc(&bridge_port->ref_count);
return bridge_port;
}
@@ -558,7 +558,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
{
struct mlxsw_sp_bridge_device *bridge_device;
- if (--bridge_port->ref_count != 0)
+ if (!refcount_dec_and_test(&bridge_port->ref_count))
return;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_bridge_port_destroy(bridge_port);
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 54f2eac11a63..2f803377c9f9 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -156,7 +156,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned int op,
txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
- if (kss->spidev->master->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ if (kss->spidev->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
msg = &kss->spi_msg2;
xfer = kss->spi_xfer2;
@@ -180,7 +180,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned int op,
ret = spi_sync(kss->spidev, msg);
if (ret < 0)
netdev_err(ks->netdev, "read: spi_sync() failed\n");
- else if (kss->spidev->master->flags & SPI_CONTROLLER_HALF_DUPLEX)
+ else if (kss->spidev->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)
memcpy(rxb, trx, rxl);
else
memcpy(rxb, trx + 2, rxl);
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 5693784eec5b..443128adbcb6 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -464,7 +464,7 @@ static struct regmap_config regcfg = {
.val_bits = 16,
.max_register = 0xee,
.reg_stride = 2,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
.readable_reg = encx24j600_regmap_readable,
.writeable_reg = encx24j600_regmap_writeable,
@@ -485,7 +485,7 @@ static struct regmap_config phycfg = {
.reg_bits = 8,
.val_bits = 16,
.max_register = 0x1f,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
.readable_reg = encx24j600_phymap_readable,
.writeable_reg = encx24j600_phymap_writeable,
@@ -513,4 +513,5 @@ int devm_regmap_init_encx24j600(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
+MODULE_DESCRIPTION("Microchip ENCX24J600 helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index a2b3f4433ca8..8a6ae171e375 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1055,7 +1055,7 @@ static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
}
static int lan743x_ethtool_get_eee(struct net_device *netdev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
@@ -1092,7 +1092,7 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
}
static int lan743x_ethtool_set_eee(struct net_device *netdev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter;
struct phy_device *phydev;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 45e209a7d083..bd8aa83b47e5 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1196,7 +1196,7 @@ static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
if (ret < 0) {
netif_err(adapter, drv, adapter->netdev,
- "erro %d SGMII get mode failed\n", ret);
+ "error %d SGMII get mode failed\n", ret);
return ret;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 2f04bc77a118..2801f08bf1c9 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1712,13 +1712,13 @@ bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter)
struct lan743x_ptp *ptp = &adapter->ptp;
bool result = false;
- spin_lock_bh(&ptp->tx_ts_lock);
+ spin_lock(&ptp->tx_ts_lock);
if (ptp->pending_tx_timestamps < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
/* request granted */
ptp->pending_tx_timestamps++;
result = true;
}
- spin_unlock_bh(&ptp->tx_ts_lock);
+ spin_unlock(&ptp->tx_ts_lock);
return result;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
index ac525ff1503e..3a01e13bd10b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
@@ -25,6 +25,8 @@ static void lan966x_vcap_is1_port_keys(struct lan966x_port *port,
for (int l = 0; l < admin->lookups; ++l) {
out->prf(out->dst, "\n Lookup %d: ", l);
+ val = lan_rd(lan966x, ANA_VCAP_S1_CFG(port->chip_port, l));
+
out->prf(out->dst, "\n other: ");
switch (ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(val)) {
case VCAP_IS1_PS_OTHER_NORMAL:
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index 4af285918ea2..75868b3f548e 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -347,10 +347,10 @@ int sparx5_del_mact_entry(struct sparx5 *sparx5,
list) {
if ((vid == 0 || mact_entry->vid == vid) &&
ether_addr_equal(addr, mact_entry->mac)) {
+ sparx5_mact_forget(sparx5, addr, mact_entry->vid);
+
list_del(&mact_entry->list);
devm_kfree(sparx5->dev, mact_entry);
-
- sparx5_mact_forget(sparx5, addr, mact_entry->vid);
}
}
mutex_unlock(&sparx5->mact_lock);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index d1f7fc8b1b71..3c066b62e689 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -757,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sparx5);
sparx5->pdev = pdev;
sparx5->dev = &pdev->dev;
+ spin_lock_init(&sparx5->tx_lock);
/* Do switch core reset if available */
reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 6f565c0c0c3d..316fed5f2735 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -280,6 +280,7 @@ struct sparx5 {
int xtr_irq;
/* Frame DMA */
int fdma_irq;
+ spinlock_t tx_lock; /* lock for frame transmission */
struct sparx5_rx rx;
struct sparx5_tx tx;
/* PTP */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 6db6ac6a3bbc..ac7e1cffbcec 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -244,10 +244,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
+ spin_lock(&sparx5->tx_lock);
if (sparx5->fdma_irq > 0)
ret = sparx5_fdma_xmit(sparx5, ifh, skb);
else
ret = sparx5_inject(sparx5, ifh, skb, dev);
+ spin_unlock(&sparx5->tx_lock);
if (ret == -EBUSY)
goto busy;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index d33b27214539..1332db9a08eb 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1249,15 +1249,47 @@ void mana_gd_free_res_map(struct gdma_resource *r)
r->size = 0;
}
+static int irq_setup(unsigned int *irqs, unsigned int len, int node)
+{
+ const struct cpumask *next, *prev = cpu_none_mask;
+ cpumask_var_t cpus __free(free_cpumask_var);
+ int cpu, weight;
+
+ if (!alloc_cpumask_var(&cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ rcu_read_lock();
+ for_each_numa_hop_mask(next, node) {
+ weight = cpumask_weight_andnot(next, prev);
+ while (weight > 0) {
+ cpumask_andnot(cpus, next, prev);
+ for_each_cpu(cpu, cpus) {
+ if (len-- == 0)
+ goto done;
+ irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
+ cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
+ --weight;
+ }
+ }
+ prev = next;
+ }
+done:
+ rcu_read_unlock();
+ return 0;
+}
+
static int mana_gd_setup_irqs(struct pci_dev *pdev)
{
- unsigned int max_queues_per_port = num_online_cpus();
struct gdma_context *gc = pci_get_drvdata(pdev);
+ unsigned int max_queues_per_port;
struct gdma_irq_context *gic;
unsigned int max_irqs, cpu;
- int nvec, irq;
+ int start_irq_index = 1;
+ int nvec, *irqs, irq;
int err, i = 0, j;
+ cpus_read_lock();
+ max_queues_per_port = num_online_cpus();
if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
max_queues_per_port = MANA_MAX_NUM_QUEUES;
@@ -1265,8 +1297,18 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
max_irqs = max_queues_per_port + 1;
nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
- if (nvec < 0)
+ if (nvec < 0) {
+ cpus_read_unlock();
return nvec;
+ }
+ if (nvec <= num_online_cpus())
+ start_irq_index = 0;
+
+ irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL);
+ if (!irqs) {
+ err = -ENOMEM;
+ goto free_irq_vector;
+ }
gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
GFP_KERNEL);
@@ -1294,17 +1336,41 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
goto free_irq;
}
- err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
- if (err)
- goto free_irq;
-
- cpu = cpumask_local_spread(i, gc->numa_node);
- irq_set_affinity_and_hint(irq, cpumask_of(cpu));
+ if (!i) {
+ err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_irq;
+
+ /* If number of IRQ is one extra than number of online CPUs,
+ * then we need to assign IRQ0 (hwc irq) and IRQ1 to
+ * same CPU.
+ * Else we will use different CPUs for IRQ0 and IRQ1.
+ * Also we are using cpumask_local_spread instead of
+ * cpumask_first for the node, because the node can be
+ * mem only.
+ */
+ if (start_irq_index) {
+ cpu = cpumask_local_spread(i, gc->numa_node);
+ irq_set_affinity_and_hint(irq, cpumask_of(cpu));
+ } else {
+ irqs[start_irq_index] = irq;
+ }
+ } else {
+ irqs[i - start_irq_index] = irq;
+ err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0,
+ gic->name, gic);
+ if (err)
+ goto free_irq;
+ }
}
+ err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
+ if (err)
+ goto free_irq;
+
gc->max_num_msix = nvec;
gc->num_msix_usable = nvec;
-
+ cpus_read_unlock();
return 0;
free_irq:
@@ -1317,8 +1383,10 @@ free_irq:
}
kfree(gc->irq_contexts);
+ kfree(irqs);
gc->irq_contexts = NULL;
free_irq_vector:
+ cpus_read_unlock();
pci_free_irq_vectors(pdev);
return err;
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 56ccbd4c37fe..ed2fb44500b0 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -3078,4 +3078,5 @@ void ocelot_deinit_port(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_deinit_port);
+MODULE_DESCRIPTION("Microsemi Ocelot switch family library");
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 2b383d92d7f5..2c3f62907958 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -460,7 +460,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
set_tun->ttl = ip6_dst_hoplimit(dst);
dst_release(dst);
} else {
- set_tun->ttl = net->ipv6.devconf_all->hop_limit;
+ set_tun->ttl = READ_ONCE(net->ipv6.devconf_all->hop_limit);
}
#endif
} else {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 361d7c495e2d..2c7bd6e80d99 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -337,6 +337,11 @@ static void nfp_fl_lag_do_work(struct work_struct *work)
acti_netdevs = kmalloc_array(entry->slave_cnt,
sizeof(*acti_netdevs), GFP_KERNEL);
+ if (!acti_netdevs) {
+ schedule_delayed_work(&lag->work,
+ NFP_FL_LAG_DELAY);
+ continue;
+ }
/* Include sanity check in the loop. It may be that a bond has
* changed between processing the last notification and the
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 7a549b834e97..31f896c4aa26 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1761,7 +1761,7 @@ static void nv_get_stats(int cpu, struct fe_priv *np,
/*
* nv_get_stats64: dev->ndo_get_stats64 function
* Get latest stats value from the nic.
- * Called with read_lock(&dev_base_lock) held for read -
+ * Called with rcu_read_lock() held -
* only synchronized against unregister_netdevice.
*/
static void
@@ -3090,7 +3090,7 @@ static void set_bufsize(struct net_device *dev)
/*
* nv_change_mtu: dev->change_mtu function
- * Called with dev_base_lock held for read.
+ * Called with RTNL held for read.
*/
static int nv_change_mtu(struct net_device *dev, int new_mtu)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 9ffef2e06885..2ccc2c2a06e3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -76,6 +76,8 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err);
+bool ionic_notifyq_service(struct ionic_cq *cq);
+bool ionic_adminq_service(struct ionic_cq *cq);
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_wait);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index c49aa358e424..6ba8d4aca0a0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -93,6 +93,7 @@ static void ionic_unmap_bars(struct ionic *ionic)
bars[i].len = 0;
}
}
+ ionic->num_bars = 0;
}
void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num)
@@ -215,15 +216,17 @@ out:
static void ionic_clear_pci(struct ionic *ionic)
{
- ionic->idev.dev_info_regs = NULL;
- ionic->idev.dev_cmd_regs = NULL;
- ionic->idev.intr_status = NULL;
- ionic->idev.intr_ctrl = NULL;
-
- ionic_unmap_bars(ionic);
- pci_release_regions(ionic->pdev);
+ if (ionic->num_bars) {
+ ionic->idev.dev_info_regs = NULL;
+ ionic->idev.dev_cmd_regs = NULL;
+ ionic->idev.intr_status = NULL;
+ ionic->idev.intr_ctrl = NULL;
+
+ ionic_unmap_bars(ionic);
+ pci_release_regions(ionic->pdev);
+ }
- if (atomic_read(&ionic->pdev->enable_cnt) > 0)
+ if (pci_is_enabled(ionic->pdev))
pci_disable_device(ionic->pdev);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 91327ef670c7..c3ae11a48024 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -113,8 +113,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = {
void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
- struct dentry *intr_dentry, *stats_dentry;
struct ionic_dev *idev = &lif->ionic->idev;
+ struct dentry *intr_dentry, *stats_dentry;
struct debugfs_regset32 *intr_ctrl_regset;
struct ionic_intr_info *intr = &qcq->intr;
struct debugfs_blob_wrapper *desc_blob;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 1e7c71f7f081..874499337132 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -319,22 +319,32 @@ do_check_time:
u8 ionic_dev_cmd_status(struct ionic_dev *idev)
{
+ if (!idev->dev_cmd_regs)
+ return (u8)PCI_ERROR_RESPONSE;
return ioread8(&idev->dev_cmd_regs->comp.comp.status);
}
bool ionic_dev_cmd_done(struct ionic_dev *idev)
{
+ if (!idev->dev_cmd_regs)
+ return false;
return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE;
}
void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp)
{
+ if (!idev->dev_cmd_regs)
+ return;
memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp));
}
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd)
{
idev->opcode = cmd->cmd.opcode;
+
+ if (!idev->dev_cmd_regs)
+ return;
+
memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd));
iowrite32(0, &idev->dev_cmd_regs->done);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
@@ -619,43 +629,25 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
cq->desc_size = desc_size;
cq->tail_idx = 0;
cq->done_color = 1;
+ cq->idev = &lif->ionic->idev;
return 0;
}
-void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa)
-{
- struct ionic_cq_info *cur;
- unsigned int i;
-
- cq->base = base;
- cq->base_pa = base_pa;
-
- for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
- cur->cq_desc = base + (i * cq->desc_size);
-}
-
-void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q)
-{
- cq->bound_q = q;
-}
-
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg)
{
- struct ionic_cq_info *cq_info;
unsigned int work_done = 0;
if (work_to_do == 0)
return 0;
- cq_info = &cq->info[cq->tail_idx];
- while (cb(cq, cq_info)) {
+ while (cb(cq)) {
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
+
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
- cq_info = &cq->info[cq->tail_idx];
if (++work_done >= work_to_do)
break;
@@ -682,7 +674,6 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
return -EINVAL;
q->lif = lif;
- q->idev = idev;
q->index = index;
q->num_descs = num_descs;
q->desc_size = desc_size;
@@ -696,53 +687,11 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
return 0;
}
-void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
-{
- struct ionic_desc_info *cur;
- unsigned int i;
-
- q->base = base;
- q->base_pa = base_pa;
-
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
- cur->desc = base + (i * q->desc_size);
-}
-
-void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa)
-{
- struct ionic_desc_info *cur;
- unsigned int i;
-
- q->cmb_base = base;
- q->cmb_base_pa = base_pa;
-
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
- cur->cmb_desc = base + (i * q->desc_size);
-}
-
-void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
-{
- struct ionic_desc_info *cur;
- unsigned int i;
-
- q->sg_base = base;
- q->sg_base_pa = base_pa;
-
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
- cur->sg_desc = base + (i * q->sg_desc_size);
-}
-
-void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
- void *cb_arg)
+void ionic_q_post(struct ionic_queue *q, bool ring_doorbell)
{
- struct ionic_desc_info *desc_info;
struct ionic_lif *lif = q->lif;
struct device *dev = q->dev;
- desc_info = &q->info[q->head_idx];
- desc_info->cb = cb;
- desc_info->cb_arg = cb_arg;
-
q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n",
@@ -761,7 +710,7 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
}
}
-static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
+bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
{
unsigned int mask, tail, head;
@@ -771,37 +720,3 @@ static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
return ((pos - tail) & mask) < ((head - tail) & mask);
}
-
-void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
- unsigned int stop_index)
-{
- struct ionic_desc_info *desc_info;
- ionic_desc_cb cb;
- void *cb_arg;
- u16 index;
-
- /* check for empty queue */
- if (q->tail_idx == q->head_idx)
- return;
-
- /* stop index must be for a descriptor that is not yet completed */
- if (unlikely(!ionic_q_is_posted(q, stop_index)))
- dev_err(q->dev,
- "ionic stop is not posted %s stop %u tail %u head %u\n",
- q->name, stop_index, q->tail_idx, q->head_idx);
-
- do {
- desc_info = &q->info[q->tail_idx];
- index = q->tail_idx;
- q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
-
- cb = desc_info->cb;
- cb_arg = desc_info->cb_arg;
-
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
-
- if (cb)
- cb(q, desc_info, cq_info, cb_arg);
- } while (index != stop_index);
-}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 2667e1cde16b..f30eee4a5a80 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/skbuff.h>
+#include <linux/bpf_trace.h>
#include "ionic_if.h"
#include "ionic_regs.h"
@@ -15,9 +16,10 @@
#define IONIC_MAX_TX_DESC 8192
#define IONIC_MAX_RX_DESC 16384
#define IONIC_MIN_TXRX_DESC 64
-#define IONIC_DEF_TXRX_DESC 4096
+#define IONIC_DEF_TXRX_DESC 1024
#define IONIC_RX_FILL_THRESHOLD 16
#define IONIC_RX_FILL_DIV 8
+#define IONIC_TSO_DESCS_NEEDED 44 /* 64K TSO @1500B */
#define IONIC_LIFS_MAX 1024
#define IONIC_WATCHDOG_SECS 5
#define IONIC_ITR_COAL_USEC_DEFAULT 64
@@ -120,11 +122,13 @@ static_assert(sizeof(struct ionic_log_event) == 64);
/* I/O */
static_assert(sizeof(struct ionic_txq_desc) == 16);
static_assert(sizeof(struct ionic_txq_sg_desc) == 128);
+static_assert(sizeof(struct ionic_txq_sg_desc_v1) == 256);
static_assert(sizeof(struct ionic_txq_comp) == 16);
static_assert(sizeof(struct ionic_rxq_desc) == 16);
static_assert(sizeof(struct ionic_rxq_sg_desc) == 128);
static_assert(sizeof(struct ionic_rxq_comp) == 16);
+static_assert(sizeof(struct ionic_rxq_comp) == sizeof(struct ionic_txq_comp));
/* SR/IOV */
static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64);
@@ -173,21 +177,8 @@ struct ionic_dev {
struct ionic_devinfo dev_info;
};
-struct ionic_cq_info {
- union {
- void *cq_desc;
- struct ionic_admin_comp *admincq;
- struct ionic_notifyq_event *notifyq;
- };
-};
-
struct ionic_queue;
struct ionic_qcq;
-struct ionic_desc_info;
-
-typedef void (*ionic_desc_cb)(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg);
#define IONIC_MAX_BUF_LEN ((u16)-1)
#define IONIC_PAGE_SIZE PAGE_SIZE
@@ -195,6 +186,11 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
__GFP_COMP | __GFP_MEMALLOC)
+#define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \
+ (VLAN_ETH_HLEN + \
+ XDP_PACKET_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
+
struct ionic_buf_info {
struct page *page;
dma_addr_t dma_addr;
@@ -202,26 +198,25 @@ struct ionic_buf_info {
u32 len;
};
-#define IONIC_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1)
+#define IONIC_TX_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1)
+#define IONIC_RX_MAX_FRAGS (1 + IONIC_RX_MAX_SG_ELEMS)
-struct ionic_desc_info {
- union {
- void *desc;
- struct ionic_txq_desc *txq_desc;
- struct ionic_rxq_desc *rxq_desc;
- struct ionic_admin_cmd *adminq_desc;
- };
- void __iomem *cmb_desc;
- union {
- void *sg_desc;
- struct ionic_txq_sg_desc *txq_sg_desc;
- struct ionic_rxq_sg_desc *rxq_sgl_desc;
- };
+struct ionic_tx_desc_info {
unsigned int bytes;
unsigned int nbufs;
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ enum xdp_action act;
struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1];
- ionic_desc_cb cb;
- void *cb_arg;
+};
+
+struct ionic_rx_desc_info {
+ unsigned int nbufs;
+ struct ionic_buf_info bufs[IONIC_RX_MAX_FRAGS];
+};
+
+struct ionic_admin_desc_info {
+ void *ctx;
};
#define IONIC_QUEUE_NAME_MAX_SZ 16
@@ -229,7 +224,12 @@ struct ionic_desc_info {
struct ionic_queue {
struct device *dev;
struct ionic_lif *lif;
- struct ionic_desc_info *info;
+ union {
+ void *info;
+ struct ionic_tx_desc_info *tx_info;
+ struct ionic_rx_desc_info *rx_info;
+ struct ionic_admin_desc_info *admin_info;
+ };
u64 dbval;
unsigned long dbell_deadline;
unsigned long dbell_jiffies;
@@ -239,26 +239,33 @@ struct ionic_queue {
unsigned int num_descs;
unsigned int max_sg_elems;
u64 features;
- u64 drop;
- struct ionic_dev *idev;
unsigned int type;
unsigned int hw_index;
unsigned int hw_type;
+ bool xdp_flush;
union {
void *base;
struct ionic_txq_desc *txq;
struct ionic_rxq_desc *rxq;
struct ionic_admin_cmd *adminq;
};
- void __iomem *cmb_base;
+ union {
+ void __iomem *cmb_base;
+ struct ionic_txq_desc __iomem *cmb_txq;
+ struct ionic_rxq_desc __iomem *cmb_rxq;
+ };
union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
+ struct ionic_txq_sg_desc_v1 *txq_sgl_v1;
struct ionic_rxq_sg_desc *rxq_sgl;
};
+ struct xdp_rxq_info *xdp_rxq_info;
+ struct ionic_queue *partner;
dma_addr_t base_pa;
dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa;
+ u64 drop;
unsigned int desc_size;
unsigned int sg_desc_size;
unsigned int pid;
@@ -280,7 +287,6 @@ struct ionic_intr_info {
struct ionic_cq {
struct ionic_lif *lif;
- struct ionic_cq_info *info;
struct ionic_queue *bound_q;
struct ionic_intr_info *bound_intr;
u16 tail_idx;
@@ -289,6 +295,7 @@ struct ionic_cq {
unsigned int desc_size;
void *base;
dma_addr_t base_pa;
+ struct ionic_dev *idev;
} ____cacheline_aligned_in_smp;
struct ionic;
@@ -363,23 +370,20 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
unsigned int num_descs, size_t desc_size);
void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa);
void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q);
-typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+typedef bool (*ionic_cq_cb)(struct ionic_cq *cq);
typedef void (*ionic_cq_done_cb)(void *done_arg);
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg);
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do);
int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
struct ionic_queue *q, unsigned int index, const char *name,
unsigned int num_descs, size_t desc_size,
size_t sg_desc_size, unsigned int pid);
-void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
-void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa);
-void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
-void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
- void *cb_arg);
-void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
- unsigned int stop_index);
+void ionic_q_post(struct ionic_queue *q, bool ring_doorbell);
+bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos);
+
int ionic_heartbeat_check(struct ionic *ionic);
bool ionic_is_fw_running(struct ionic_dev *idev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index cd3c0b01402e..91183965a6b7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -90,18 +90,23 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *p)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev;
unsigned int offset;
unsigned int size;
regs->version = IONIC_DEV_CMD_REG_VERSION;
+ idev = &lif->ionic->idev;
+ if (!idev->dev_info_regs)
+ return;
+
offset = 0;
size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size);
offset += size;
size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
- memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size);
+ memcpy_fromio(p + offset, idev->dev_cmd_regs->words, size);
}
static void ionic_get_link_ext_stats(struct net_device *netdev,
@@ -721,6 +726,11 @@ static int ionic_set_channels(struct net_device *netdev,
ionic_init_queue_params(lif, &qparam);
+ if ((ch->rx_count || ch->tx_count) && lif->xdp_prog) {
+ netdev_info(lif->netdev, "Split Tx/Rx interrupts not available when using XDP\n");
+ return -EOPNOTSUPP;
+ }
+
if (ch->rx_count != ch->tx_count) {
netdev_info(netdev, "The rx and tx count must be equal\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
index 5f40324cd243..3c209c1a2337 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
@@ -109,6 +109,11 @@ int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw,
dl = priv_to_devlink(ionic);
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
+ if (!idev->dev_cmd_regs) {
+ err = -ENXIO;
+ goto err_out;
+ }
+
buf_sz = sizeof(idev->dev_cmd_regs->data);
netdev_dbg(netdev,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index cf2d5ad7b68c..7f0c6cdc375e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -46,18 +46,26 @@ static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif);
+static int ionic_xdp_queues_config(struct ionic_lif *lif);
+static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q);
+
static void ionic_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct ionic_intr_info *intr;
struct dim_cq_moder cur_moder;
+ struct ionic_intr_info *intr;
struct ionic_qcq *qcq;
struct ionic_lif *lif;
+ struct ionic_queue *q;
u32 new_coal;
- cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
qcq = container_of(dim, struct ionic_qcq, dim);
- lif = qcq->q.lif;
+ q = &qcq->q;
+ if (q->type == IONIC_QTYPE_RXQ)
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ else
+ cur_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+ lif = q->lif;
new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
new_coal = new_coal ? new_coal : 1;
@@ -422,10 +430,9 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->sg_base_pa = 0;
}
+ ionic_xdp_unregister_rxq_info(&qcq->q);
ionic_qcq_intr_free(lif, qcq);
- vfree(qcq->cq.info);
- qcq->cq.info = NULL;
vfree(qcq->q.info);
qcq->q.info = NULL;
}
@@ -529,14 +536,11 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int num_descs, unsigned int desc_size,
unsigned int cq_desc_size,
unsigned int sg_desc_size,
+ unsigned int desc_info_size,
unsigned int pid, struct ionic_qcq **qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
struct device *dev = lif->ionic->dev;
- void *q_base, *cq_base, *sg_base;
- dma_addr_t cq_base_pa = 0;
- dma_addr_t sg_base_pa = 0;
- dma_addr_t q_base_pa = 0;
struct ionic_qcq *new;
int err;
@@ -552,7 +556,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
new->q.dev = dev;
new->flags = flags;
- new->q.info = vcalloc(num_descs, sizeof(*new->q.info));
+ new->q.info = vcalloc(num_descs, desc_info_size);
if (!new->q.info) {
netdev_err(lif->netdev, "Cannot allocate queue info\n");
err = -ENOMEM;
@@ -571,19 +575,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = ionic_alloc_qcq_interrupt(lif, new);
if (err)
- goto err_out;
-
- new->cq.info = vcalloc(num_descs, sizeof(*new->cq.info));
- if (!new->cq.info) {
- netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
- err = -ENOMEM;
- goto err_out_free_irq;
- }
+ goto err_out_free_q_info;
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
netdev_err(lif->netdev, "Cannot initialize completion queue\n");
- goto err_out_free_cq_info;
+ goto err_out_free_irq;
}
if (flags & IONIC_QCQ_F_NOTIFYQ) {
@@ -601,16 +598,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->q_base) {
netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
err = -ENOMEM;
- goto err_out_free_cq_info;
+ goto err_out_free_irq;
}
- q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
- q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
- ionic_q_map(&new->q, q_base, q_base_pa);
-
- cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
- cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
- ionic_cq_map(&new->cq, cq_base, cq_base_pa);
- ionic_cq_bind(&new->cq, &new->q);
+ new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
+ new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
+
+ /* Base the NotifyQ cq.base off of the ALIGNed q.base */
+ new->cq.base = PTR_ALIGN(new->q.base + q_size, PAGE_SIZE);
+ new->cq.base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
+ new->cq.bound_q = &new->q;
} else {
/* regular DMA q descriptors */
new->q_size = PAGE_SIZE + (num_descs * desc_size);
@@ -619,11 +615,10 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->q_base) {
netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
err = -ENOMEM;
- goto err_out_free_cq_info;
+ goto err_out_free_irq;
}
- q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
- q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
- ionic_q_map(&new->q, q_base, q_base_pa);
+ new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
+ new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
if (flags & IONIC_QCQ_F_CMB_RINGS) {
/* on-chip CMB q descriptors */
@@ -648,7 +643,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
}
new->cmb_q_base_pa -= idev->phy_cmb_pages;
- ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa);
+ new->q.cmb_base = new->cmb_q_base;
+ new->q.cmb_base_pa = new->cmb_q_base_pa;
}
/* cq DMA descriptors */
@@ -660,10 +656,9 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = -ENOMEM;
goto err_out_free_q;
}
- cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
- cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
- ionic_cq_map(&new->cq, cq_base, cq_base_pa);
- ionic_cq_bind(&new->cq, &new->q);
+ new->cq.base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
+ new->cq.base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
+ new->cq.bound_q = &new->q;
}
if (flags & IONIC_QCQ_F_SG) {
@@ -675,13 +670,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = -ENOMEM;
goto err_out_free_cq;
}
- sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
- sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
- ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
+ new->q.sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
+ new->q.sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
}
INIT_WORK(&new->dim.work, ionic_dim_work);
- new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
*qcq = new;
@@ -695,8 +689,6 @@ err_out_free_q:
ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
}
dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
-err_out_free_cq_info:
- vfree(new->cq.info);
err_out_free_irq:
if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
@@ -722,7 +714,9 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
IONIC_ADMINQ_LENGTH,
sizeof(struct ionic_admin_cmd),
sizeof(struct ionic_admin_comp),
- 0, lif->kern_pid, &lif->adminqcq);
+ 0,
+ sizeof(struct ionic_admin_desc_info),
+ lif->kern_pid, &lif->adminqcq);
if (err)
return err;
ionic_debugfs_add_qcq(lif, lif->adminqcq);
@@ -733,7 +727,9 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
flags, IONIC_NOTIFYQ_LENGTH,
sizeof(struct ionic_notifyq_cmd),
sizeof(union ionic_notifyq_comp),
- 0, lif->kern_pid, &lif->notifyqcq);
+ 0,
+ sizeof(struct ionic_admin_desc_info),
+ lif->kern_pid, &lif->notifyqcq);
if (err)
goto err_out;
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
@@ -862,8 +858,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.type = q->type,
.ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
- .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
- IONIC_QINIT_F_SG),
+ .flags = cpu_to_le16(IONIC_QINIT_F_IRQ),
.intr_index = cpu_to_le16(cq->bound_intr->index),
.pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs),
@@ -875,6 +870,13 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
};
int err;
+ q->partner = &lif->txqcqs[q->index]->q;
+ q->partner->partner = q;
+
+ if (!lif->xdp_prog ||
+ (lif->xdp_prog->aux && lif->xdp_prog->aux->xdp_has_frags))
+ ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG);
+
if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
@@ -945,6 +947,7 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &txq);
if (err)
goto err_qcq_alloc;
@@ -1004,6 +1007,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &rxq);
if (err)
goto err_qcq_alloc;
@@ -1157,71 +1161,6 @@ int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
}
-static bool ionic_notifyq_service(struct ionic_cq *cq,
- struct ionic_cq_info *cq_info)
-{
- union ionic_notifyq_comp *comp = cq_info->cq_desc;
- struct ionic_deferred_work *work;
- struct net_device *netdev;
- struct ionic_queue *q;
- struct ionic_lif *lif;
- u64 eid;
-
- q = cq->bound_q;
- lif = q->info[0].cb_arg;
- netdev = lif->netdev;
- eid = le64_to_cpu(comp->event.eid);
-
- /* Have we run out of new completions to process? */
- if ((s64)(eid - lif->last_eid) <= 0)
- return false;
-
- lif->last_eid = eid;
-
- dev_dbg(lif->ionic->dev, "notifyq event:\n");
- dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
- comp, sizeof(*comp), true);
-
- switch (le16_to_cpu(comp->event.ecode)) {
- case IONIC_EVENT_LINK_CHANGE:
- ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
- break;
- case IONIC_EVENT_RESET:
- if (lif->ionic->idev.fw_status_ready &&
- !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
- !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- netdev_err(lif->netdev, "Reset event dropped\n");
- clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
- } else {
- work->type = IONIC_DW_TYPE_LIF_RESET;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
- }
- }
- break;
- default:
- netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
- comp->event.ecode, eid);
- break;
- }
-
- return true;
-}
-
-static bool ionic_adminq_service(struct ionic_cq *cq,
- struct ionic_cq_info *cq_info)
-{
- struct ionic_admin_comp *comp = cq_info->cq_desc;
-
- if (!color_match(comp->color, cq->done_color))
- return false;
-
- ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
-
- return true;
-}
-
static int ionic_adminq_napi(struct napi_struct *napi, int budget)
{
struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
@@ -1252,8 +1191,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_rx_service, NULL, NULL);
if (lif->hwstamp_txq)
- tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
- ionic_tx_service, NULL, NULL);
+ tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget);
work_done = max(max(n_work, a_work), max(rx_work, tx_work));
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -1640,6 +1578,12 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
netdev->priv_flags |= IFF_UNICAST_FLT |
IFF_LIVE_ADDR_CHANGE;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
return 0;
}
@@ -1777,6 +1721,21 @@ static int ionic_start_queues_reconfig(struct ionic_lif *lif)
return err;
}
+static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu,
+ struct bpf_prog *xdp_prog)
+{
+ if (!xdp_prog)
+ return true;
+
+ if (mtu <= IONIC_XDP_MAX_LINEAR_MTU)
+ return true;
+
+ if (xdp_prog->aux && xdp_prog->aux->xdp_has_frags)
+ return true;
+
+ return false;
+}
+
static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -1789,8 +1748,13 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
.mtu = cpu_to_le32(new_mtu),
},
};
+ struct bpf_prog *xdp_prog;
int err;
+ xdp_prog = READ_ONCE(lif->xdp_prog);
+ if (!ionic_xdp_is_valid_mtu(lif, new_mtu, xdp_prog))
+ return -EINVAL;
+
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
@@ -2070,6 +2034,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &lif->txqcqs[i]);
if (err)
goto err_out;
@@ -2101,6 +2066,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2166,6 +2132,10 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
int derr = 0;
int i, err;
+ err = ionic_xdp_queues_config(lif);
+ if (err)
+ return err;
+
for (i = 0; i < lif->nxqs; i++) {
if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
@@ -2211,6 +2181,8 @@ err_out:
derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
}
+ ionic_xdp_queues_config(lif);
+
return err;
}
@@ -2668,11 +2640,151 @@ static void ionic_vf_attr_replay(struct ionic_lif *lif)
ionic_vf_start(ionic);
}
+static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
+{
+ struct xdp_rxq_info *xi;
+
+ if (!q->xdp_rxq_info)
+ return;
+
+ xi = q->xdp_rxq_info;
+ q->xdp_rxq_info = NULL;
+
+ xdp_rxq_info_unreg(xi);
+ kfree(xi);
+}
+
+static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
+{
+ struct xdp_rxq_info *rxq_info;
+ int err;
+
+ rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
+ if (!rxq_info)
+ return -ENOMEM;
+
+ err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
+ if (err) {
+ dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n",
+ q->index, err);
+ goto err_out;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err) {
+ dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
+ q->index, err);
+ xdp_rxq_info_unreg(rxq_info);
+ goto err_out;
+ }
+
+ q->xdp_rxq_info = rxq_info;
+
+ return 0;
+
+err_out:
+ kfree(rxq_info);
+ return err;
+}
+
+static int ionic_xdp_queues_config(struct ionic_lif *lif)
+{
+ unsigned int i;
+ int err;
+
+ if (!lif->rxqcqs)
+ return 0;
+
+ /* There's no need to rework memory if not going to/from NULL program.
+ * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info
+ * This way we don't need to keep an *xdp_prog in every queue struct.
+ */
+ if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info)
+ return 0;
+
+ for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
+ struct ionic_queue *q = &lif->rxqcqs[i]->q;
+
+ if (q->xdp_rxq_info) {
+ ionic_xdp_unregister_rxq_info(q);
+ continue;
+ }
+
+ err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id);
+ if (err) {
+ dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n",
+ i, err);
+ goto err_out;
+ }
+ }
+
+ return 0;
+
+err_out:
+ for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++)
+ ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q);
+
+ return err;
+}
+
+static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct bpf_prog *old_prog;
+ u32 maxfs;
+
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+#define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts"
+ NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT);
+ netdev_info(lif->netdev, XDP_ERR_SPLIT);
+ return -EOPNOTSUPP;
+ }
+
+ if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) {
+#define XDP_ERR_MTU "MTU is too large for XDP without frags support"
+ NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU);
+ netdev_info(lif->netdev, XDP_ERR_MTU);
+ return -EINVAL;
+ }
+
+ maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
+ if (bpf->prog && !(bpf->prog->aux && bpf->prog->aux->xdp_has_frags))
+ maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU);
+ netdev->max_mtu = maxfs;
+
+ if (!netif_running(netdev)) {
+ old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ } else {
+ mutex_lock(&lif->queue_lock);
+ ionic_stop_queues_reconfig(lif);
+ old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ ionic_start_queues_reconfig(lif);
+ mutex_unlock(&lif->queue_lock);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return ionic_xdp_config(netdev, bpf);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open,
.ndo_stop = ionic_stop,
.ndo_eth_ioctl = ionic_eth_ioctl,
.ndo_start_xmit = ionic_start_xmit,
+ .ndo_bpf = ionic_xdp,
+ .ndo_xdp_xmit = ionic_xdp_xmit,
.ndo_get_stats64 = ionic_get_stats64,
.ndo_set_rx_mode = ionic_ndo_set_rx_mode,
.ndo_set_features = ionic_set_features,
@@ -2755,6 +2867,8 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
swap(a->q.base, b->q.base);
swap(a->q.base_pa, b->q.base_pa);
swap(a->q.info, b->q.info);
+ swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info);
+ swap(a->q.partner, b->q.partner);
swap(a->q_base, b->q_base);
swap(a->q_base_pa, b->q_base_pa);
swap(a->q_size, b->q_size);
@@ -2770,7 +2884,6 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
swap(a->cq.desc_size, b->cq.desc_size);
swap(a->cq.base, b->cq.base);
swap(a->cq.base_pa, b->cq.base_pa);
- swap(a->cq.info, b->cq.info);
swap(a->cq_base, b->cq_base);
swap(a->cq_base_pa, b->cq_base_pa);
swap(a->cq_size, b->cq_size);
@@ -2834,6 +2947,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &lif->txqcqs[i]);
if (err)
goto err_out;
@@ -2842,6 +2956,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &tx_qcqs[i]);
if (err)
goto err_out;
@@ -2863,6 +2978,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2871,6 +2987,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &rx_qcqs[i]);
if (err)
goto err_out;
@@ -3391,9 +3508,12 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
napi_enable(&qcq->napi);
- if (qcq->flags & IONIC_QCQ_F_INTR)
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
+ irq_set_affinity_hint(qcq->intr.vector,
+ &qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -3442,7 +3562,7 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif)
dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
/* preset the callback info */
- q->info[0].cb_arg = lif;
+ q->admin_info[0].ctx = lif;
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -3559,7 +3679,10 @@ int ionic_lif_init(struct ionic_lif *lif)
goto err_out_notifyq_deinit;
}
- err = ionic_init_nic_features(lif);
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ err = ionic_set_nic_features(lif, lif->netdev->features);
+ else
+ err = ionic_init_nic_features(lif);
if (err)
goto err_out_notifyq_deinit;
@@ -3691,6 +3814,7 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
union ionic_q_identity __iomem *q_ident;
struct ionic *ionic = lif->ionic;
struct ionic_dev *idev;
+ u16 max_frags;
int qtype;
int err;
@@ -3758,17 +3882,16 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
qtype, qti->sg_desc_stride);
- if (qti->max_sg_elems >= IONIC_MAX_FRAGS) {
- qti->max_sg_elems = IONIC_MAX_FRAGS - 1;
- dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n",
- qtype, qti->max_sg_elems);
- }
+ if (qtype == IONIC_QTYPE_TXQ)
+ max_frags = IONIC_TX_MAX_FRAGS;
+ else if (qtype == IONIC_QTYPE_RXQ)
+ max_frags = IONIC_RX_MAX_FRAGS;
+ else
+ max_frags = 1;
- if (qti->max_sg_elems > MAX_SKB_FRAGS) {
- qti->max_sg_elems = MAX_SKB_FRAGS;
- dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n",
- qtype, qti->max_sg_elems);
- }
+ qti->max_sg_elems = min_t(u16, max_frags - 1, MAX_SKB_FRAGS);
+ dev_dbg(ionic->dev, "qtype %d max_sg_elems %d\n",
+ qtype, qti->max_sg_elems);
}
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 61548b3eea93..08f4266fe2aa 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -37,6 +37,7 @@ struct ionic_tx_stats {
u64 dma_map_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
+ u64 xdp_frames;
};
struct ionic_rx_stats {
@@ -51,6 +52,11 @@ struct ionic_rx_stats {
u64 alloc_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
+ u64 xdp_drop;
+ u64 xdp_aborted;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_redirect;
};
#define IONIC_QCQ_F_INITED BIT(0)
@@ -65,25 +71,25 @@ struct ionic_qcq {
void *q_base;
dma_addr_t q_base_pa;
u32 q_size;
+ u32 cq_size;
void *cq_base;
dma_addr_t cq_base_pa;
- u32 cq_size;
void *sg_base;
dma_addr_t sg_base_pa;
u32 sg_size;
+ unsigned int flags;
void __iomem *cmb_q_base;
phys_addr_t cmb_q_base_pa;
u32 cmb_q_size;
u32 cmb_pgid;
u32 cmb_order;
struct dim dim;
+ struct timer_list napi_deadline;
struct ionic_queue q;
struct ionic_cq cq;
- struct ionic_intr_info intr;
- struct timer_list napi_deadline;
struct napi_struct napi;
- unsigned int flags;
struct ionic_qcq *napi_qcq;
+ struct ionic_intr_info intr;
struct dentry *dentry;
};
@@ -135,6 +141,12 @@ struct ionic_lif_sw_stats {
u64 hw_rx_over_errors;
u64 hw_rx_missed_errors;
u64 hw_tx_aborted_errors;
+ u64 xdp_drop;
+ u64 xdp_aborted;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_redirect;
+ u64 xdp_frames;
};
enum ionic_lif_state_flags {
@@ -230,6 +242,7 @@ struct ionic_lif {
struct ionic_phc *phc;
struct dentry *dentry;
+ struct bpf_prog *xdp_prog;
};
struct ionic_phc {
@@ -314,7 +327,7 @@ static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
static inline bool ionic_txq_hwstamp_enabled(struct ionic_queue *q)
{
- return unlikely(q->features & IONIC_TXQ_F_HWSTAMP);
+ return q->features & IONIC_TXQ_F_HWSTAMP;
}
void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 165ab08ad2dd..c1259324b0be 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -15,7 +15,7 @@
#include "ionic_debugfs.h"
MODULE_DESCRIPTION(IONIC_DRV_DESCRIPTION);
-MODULE_AUTHOR("Pensando Systems, Inc");
+MODULE_AUTHOR("Shannon Nelson <shannon.nelson@amd.com>");
MODULE_LICENSE("GPL");
static const char *ionic_error_to_str(enum ionic_status_code code)
@@ -190,7 +190,8 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
static void ionic_adminq_flush(struct ionic_lif *lif)
{
- struct ionic_desc_info *desc_info;
+ struct ionic_admin_desc_info *desc_info;
+ struct ionic_admin_cmd *desc;
unsigned long irqflags;
struct ionic_queue *q;
@@ -203,10 +204,10 @@ static void ionic_adminq_flush(struct ionic_lif *lif)
q = &lif->adminqcq->q;
while (q->tail_idx != q->head_idx) {
- desc_info = &q->info[q->tail_idx];
- memset(desc_info->desc, 0, sizeof(union ionic_adminq_cmd));
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
+ desc = &q->adminq[q->tail_idx];
+ desc_info = &q->admin_info[q->tail_idx];
+ memset(desc, 0, sizeof(union ionic_adminq_cmd));
+ desc_info->ctx = NULL;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
}
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
@@ -246,25 +247,93 @@ static int ionic_adminq_check_err(struct ionic_lif *lif,
return err;
}
-static void ionic_adminq_cb(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg)
+bool ionic_notifyq_service(struct ionic_cq *cq)
{
- struct ionic_admin_ctx *ctx = cb_arg;
+ struct ionic_deferred_work *work;
+ union ionic_notifyq_comp *comp;
+ struct net_device *netdev;
+ struct ionic_queue *q;
+ struct ionic_lif *lif;
+ u64 eid;
+
+ comp = &((union ionic_notifyq_comp *)cq->base)[cq->tail_idx];
+
+ q = cq->bound_q;
+ lif = q->admin_info[0].ctx;
+ netdev = lif->netdev;
+ eid = le64_to_cpu(comp->event.eid);
+
+ /* Have we run out of new completions to process? */
+ if ((s64)(eid - lif->last_eid) <= 0)
+ return false;
+
+ lif->last_eid = eid;
+
+ dev_dbg(lif->ionic->dev, "notifyq event:\n");
+ dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
+ comp, sizeof(*comp), true);
+
+ switch (le16_to_cpu(comp->event.ecode)) {
+ case IONIC_EVENT_LINK_CHANGE:
+ ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
+ break;
+ case IONIC_EVENT_RESET:
+ if (lif->ionic->idev.fw_status_ready &&
+ !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
+ !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "Reset event dropped\n");
+ clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
+ } else {
+ work->type = IONIC_DW_TYPE_LIF_RESET;
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ }
+ }
+ break;
+ default:
+ netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
+ comp->event.ecode, eid);
+ break;
+ }
+
+ return true;
+}
+
+bool ionic_adminq_service(struct ionic_cq *cq)
+{
+ struct ionic_admin_desc_info *desc_info;
+ struct ionic_queue *q = cq->bound_q;
struct ionic_admin_comp *comp;
+ u16 index;
- if (!ctx)
- return;
+ comp = &((struct ionic_admin_comp *)cq->base)[cq->tail_idx];
- comp = cq_info->cq_desc;
+ if (!color_match(comp->color, cq->done_color))
+ return false;
- memcpy(&ctx->comp, comp, sizeof(*comp));
+ /* check for empty queue */
+ if (q->tail_idx == q->head_idx)
+ return false;
+
+ do {
+ desc_info = &q->admin_info[q->tail_idx];
+ index = q->tail_idx;
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+ if (likely(desc_info->ctx)) {
+ struct ionic_admin_ctx *ctx = desc_info->ctx;
- dev_dbg(q->dev, "comp admin queue command:\n");
- dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
- &ctx->comp, sizeof(ctx->comp), true);
+ memcpy(&ctx->comp, comp, sizeof(*comp));
- complete_all(&ctx->work);
+ dev_dbg(q->dev, "comp admin queue command:\n");
+ dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
+ &ctx->comp, sizeof(ctx->comp), true);
+ complete_all(&ctx->work);
+ desc_info->ctx = NULL;
+ }
+ } while (index != le16_to_cpu(comp->comp_index));
+
+ return true;
}
bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
@@ -298,7 +367,8 @@ bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
- struct ionic_desc_info *desc_info;
+ struct ionic_admin_desc_info *desc_info;
+ struct ionic_admin_cmd *desc;
unsigned long irqflags;
struct ionic_queue *q;
int err = 0;
@@ -320,14 +390,17 @@ int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
if (err)
goto err_out;
- desc_info = &q->info[q->head_idx];
- memcpy(desc_info->desc, &ctx->cmd, sizeof(ctx->cmd));
+ desc_info = &q->admin_info[q->head_idx];
+ desc_info->ctx = ctx;
+
+ desc = &q->adminq[q->head_idx];
+ memcpy(desc, &ctx->cmd, sizeof(ctx->cmd));
dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx->cmd, sizeof(ctx->cmd), true);
- ionic_q_post(q, true, ionic_adminq_cb, ctx);
+ ionic_q_post(q, true);
err_out:
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
@@ -416,6 +489,9 @@ static void ionic_dev_cmd_clean(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
+ if (!idev->dev_cmd_regs)
+ return;
+
iowrite32(0, &idev->dev_cmd_regs->doorbell);
memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 1f6022fb7679..0107599a9dd4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -27,6 +27,12 @@ static const struct ionic_stat_desc ionic_lif_stats_desc[] = {
IONIC_LIF_STAT_DESC(hw_rx_over_errors),
IONIC_LIF_STAT_DESC(hw_rx_missed_errors),
IONIC_LIF_STAT_DESC(hw_tx_aborted_errors),
+ IONIC_LIF_STAT_DESC(xdp_drop),
+ IONIC_LIF_STAT_DESC(xdp_aborted),
+ IONIC_LIF_STAT_DESC(xdp_pass),
+ IONIC_LIF_STAT_DESC(xdp_tx),
+ IONIC_LIF_STAT_DESC(xdp_redirect),
+ IONIC_LIF_STAT_DESC(xdp_frames),
};
static const struct ionic_stat_desc ionic_port_stats_desc[] = {
@@ -135,6 +141,7 @@ static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
IONIC_TX_STAT_DESC(csum_none),
IONIC_TX_STAT_DESC(csum),
IONIC_TX_STAT_DESC(vlan_inserted),
+ IONIC_TX_STAT_DESC(xdp_frames),
};
static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
@@ -149,6 +156,11 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(hwstamp_invalid),
IONIC_RX_STAT_DESC(dropped),
IONIC_RX_STAT_DESC(vlan_stripped),
+ IONIC_RX_STAT_DESC(xdp_drop),
+ IONIC_RX_STAT_DESC(xdp_aborted),
+ IONIC_RX_STAT_DESC(xdp_pass),
+ IONIC_RX_STAT_DESC(xdp_tx),
+ IONIC_RX_STAT_DESC(xdp_redirect),
};
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
@@ -171,6 +183,7 @@ static void ionic_add_lif_txq_stats(struct ionic_lif *lif, int q_num,
stats->tx_csum += txstats->csum;
stats->tx_hwstamp_valid += txstats->hwstamp_valid;
stats->tx_hwstamp_invalid += txstats->hwstamp_invalid;
+ stats->xdp_frames += txstats->xdp_frames;
}
static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num,
@@ -185,6 +198,11 @@ static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num,
stats->rx_csum_error += rxstats->csum_error;
stats->rx_hwstamp_valid += rxstats->hwstamp_valid;
stats->rx_hwstamp_invalid += rxstats->hwstamp_invalid;
+ stats->xdp_drop += rxstats->xdp_drop;
+ stats->xdp_aborted += rxstats->xdp_aborted;
+ stats->xdp_pass += rxstats->xdp_pass;
+ stats->xdp_tx += rxstats->xdp_tx;
+ stats->xdp_redirect += rxstats->xdp_redirect;
}
static void ionic_get_lif_stats(struct ionic_lif *lif,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 6f4776759863..5dba6d2d633c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -5,27 +5,40 @@
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <net/ip6_checksum.h>
+#include <net/netdev_queues.h>
#include "ionic.h"
#include "ionic_lif.h"
#include "ionic_txrx.h"
-static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
- ionic_desc_cb cb_func, void *cb_arg)
+static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
+ void *data, size_t len);
+
+static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
+ const skb_frag_t *frag,
+ size_t offset, size_t len);
+
+static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
+ struct ionic_tx_desc_info *desc_info);
+
+static void ionic_tx_clean(struct ionic_queue *q,
+ struct ionic_tx_desc_info *desc_info,
+ struct ionic_txq_comp *comp);
+
+static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
{
- ionic_q_post(q, ring_dbell, cb_func, cb_arg);
+ ionic_q_post(q, ring_dbell);
}
-static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
- ionic_desc_cb cb_func, void *cb_arg)
+static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell)
{
- ionic_q_post(q, ring_dbell, cb_func, cb_arg);
+ ionic_q_post(q, ring_dbell);
}
bool ionic_txq_poke_doorbell(struct ionic_queue *q)
{
- unsigned long now, then, dif;
struct netdev_queue *netdev_txq;
+ unsigned long now, then, dif;
struct net_device *netdev;
netdev = q->lif->netdev;
@@ -83,46 +96,61 @@ bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
return true;
}
-static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
+static inline struct ionic_txq_sg_elem *ionic_tx_sg_elems(struct ionic_queue *q)
+{
+ if (likely(q->sg_desc_size == sizeof(struct ionic_txq_sg_desc_v1)))
+ return q->txq_sgl_v1[q->head_idx].elems;
+ else
+ return q->txq_sgl[q->head_idx].elems;
+}
+
+static inline struct netdev_queue *q_to_ndq(struct net_device *netdev,
+ struct ionic_queue *q)
{
- return netdev_get_tx_queue(q->lif->netdev, q->index);
+ return netdev_get_tx_queue(netdev, q->index);
+}
+
+static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info)
+{
+ return page_address(buf_info->page) + buf_info->page_offset;
+}
+
+static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
+{
+ return buf_info->dma_addr + buf_info->page_offset;
+}
+
+static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
+{
+ return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset);
}
static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
- struct net_device *netdev = q->lif->netdev;
- struct ionic_rx_stats *stats;
- struct device *dev;
+ struct device *dev = q->dev;
+ dma_addr_t dma_addr;
struct page *page;
- dev = q->dev;
- stats = q_to_rx_stats(q);
-
- if (unlikely(!buf_info)) {
- net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
- netdev->name, q->name);
- return -EINVAL;
- }
-
page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
if (unlikely(!page)) {
net_err_ratelimited("%s: %s page alloc failed\n",
- netdev->name, q->name);
- stats->alloc_err++;
+ dev_name(dev), q->name);
+ q_to_rx_stats(q)->alloc_err++;
return -ENOMEM;
}
- buf_info->dma_addr = dma_map_page(dev, page, 0,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
+ dma_addr = dma_map_page(dev, page, 0,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
__free_pages(page, 0);
net_err_ratelimited("%s: %s dma map failed\n",
- netdev->name, q->name);
- stats->dma_map_err++;
+ dev_name(dev), q->name);
+ q_to_rx_stats(q)->dma_map_err++;
return -EIO;
}
+ buf_info->dma_addr = dma_addr;
buf_info->page = page;
buf_info->page_offset = 0;
@@ -132,12 +160,11 @@ static int ionic_rx_page_alloc(struct ionic_queue *q,
static void ionic_rx_page_free(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
- struct net_device *netdev = q->lif->netdev;
struct device *dev = q->dev;
if (unlikely(!buf_info)) {
net_err_ratelimited("%s: %s invalid buf_info in free\n",
- netdev->name, q->name);
+ dev_name(dev), q->name);
return;
}
@@ -150,7 +177,7 @@ static void ionic_rx_page_free(struct ionic_queue *q,
}
static bool ionic_rx_buf_recycle(struct ionic_queue *q,
- struct ionic_buf_info *buf_info, u32 used)
+ struct ionic_buf_info *buf_info, u32 len)
{
u32 size;
@@ -162,7 +189,7 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
if (page_to_nid(buf_info->page) != numa_mem_id())
return false;
- size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
+ size = ALIGN(len, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ);
buf_info->page_offset += size;
if (buf_info->page_offset >= IONIC_PAGE_SIZE)
return false;
@@ -172,88 +199,96 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
return true;
}
-static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+static void ionic_rx_add_skb_frag(struct ionic_queue *q,
+ struct sk_buff *skb,
+ struct ionic_buf_info *buf_info,
+ u32 off, u32 len,
+ bool synced)
+{
+ if (!synced)
+ dma_sync_single_range_for_cpu(q->dev, ionic_rx_buf_pa(buf_info),
+ off, len, DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf_info->page, buf_info->page_offset + off,
+ len,
+ IONIC_PAGE_SIZE);
+
+ if (!ionic_rx_buf_recycle(q, buf_info, len)) {
+ dma_unmap_page(q->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+ buf_info->page = NULL;
+ }
+}
+
+static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
+ struct ionic_rx_desc_info *desc_info,
+ unsigned int headroom,
+ unsigned int len,
+ unsigned int num_sg_elems,
+ bool synced)
{
- struct net_device *netdev = q->lif->netdev;
struct ionic_buf_info *buf_info;
- struct ionic_rx_stats *stats;
- struct device *dev = q->dev;
struct sk_buff *skb;
unsigned int i;
u16 frag_len;
- u16 len;
-
- stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0];
- len = le16_to_cpu(comp->len);
-
prefetchw(buf_info->page);
skb = napi_get_frags(&q_to_qcq(q)->napi);
if (unlikely(!skb)) {
net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
- netdev->name, q->name);
- stats->alloc_err++;
+ dev_name(q->dev), q->name);
+ q_to_rx_stats(q)->alloc_err++;
return NULL;
}
- i = comp->num_sg_elems + 1;
- do {
- if (unlikely(!buf_info->page)) {
- dev_kfree_skb(skb);
- return NULL;
- }
-
- frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE - buf_info->page_offset));
- len -= frag_len;
-
- dma_sync_single_for_cpu(dev,
- buf_info->dma_addr + buf_info->page_offset,
- frag_len, DMA_FROM_DEVICE);
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf_info->page, buf_info->page_offset, frag_len,
- IONIC_PAGE_SIZE);
-
- if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
- dma_unmap_page(dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- buf_info->page = NULL;
- }
+ if (headroom)
+ frag_len = min_t(u16, len,
+ IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
+ else
+ frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
- buf_info++;
+ if (unlikely(!buf_info->page))
+ goto err_bad_buf_page;
+ ionic_rx_add_skb_frag(q, skb, buf_info, headroom, frag_len, synced);
+ len -= frag_len;
+ buf_info++;
- i--;
- } while (i > 0);
+ for (i = 0; i < num_sg_elems; i++, buf_info++) {
+ if (unlikely(!buf_info->page))
+ goto err_bad_buf_page;
+ frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
+ ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced);
+ len -= frag_len;
+ }
return skb;
+
+err_bad_buf_page:
+ dev_kfree_skb(skb);
+ return NULL;
}
-static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
+ struct ionic_queue *q,
+ struct ionic_rx_desc_info *desc_info,
+ unsigned int headroom,
+ unsigned int len,
+ bool synced)
{
- struct net_device *netdev = q->lif->netdev;
struct ionic_buf_info *buf_info;
- struct ionic_rx_stats *stats;
struct device *dev = q->dev;
struct sk_buff *skb;
- u16 len;
-
- stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0];
- len = le16_to_cpu(comp->len);
skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
if (unlikely(!skb)) {
net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
- netdev->name, q->name);
- stats->alloc_err++;
+ dev_name(dev), q->name);
+ q_to_rx_stats(q)->alloc_err++;
return NULL;
}
@@ -262,30 +297,343 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
return NULL;
}
- dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
- len, DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
- dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
- len, DMA_FROM_DEVICE);
+ if (!synced)
+ dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
+ headroom, len, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
+ dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info),
+ headroom, len, DMA_FROM_DEVICE);
skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, q->lif->netdev);
+ skb->protocol = eth_type_trans(skb, netdev);
return skb;
}
+static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
+ struct ionic_tx_desc_info *desc_info)
+{
+ unsigned int nbufs = desc_info->nbufs;
+ struct ionic_buf_info *buf_info;
+ struct device *dev = q->dev;
+ int i;
+
+ if (!nbufs)
+ return;
+
+ buf_info = desc_info->bufs;
+ dma_unmap_single(dev, buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ if (desc_info->act == XDP_TX)
+ __free_pages(buf_info->page, 0);
+ buf_info->page = NULL;
+
+ buf_info++;
+ for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) {
+ dma_unmap_page(dev, buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ if (desc_info->act == XDP_TX)
+ __free_pages(buf_info->page, 0);
+ buf_info->page = NULL;
+ }
+
+ if (desc_info->act == XDP_REDIRECT)
+ xdp_return_frame(desc_info->xdpf);
+
+ desc_info->nbufs = 0;
+ desc_info->xdpf = NULL;
+ desc_info->act = 0;
+}
+
+static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
+ enum xdp_action act, struct page *page, int off,
+ bool ring_doorbell)
+{
+ struct ionic_tx_desc_info *desc_info;
+ struct ionic_buf_info *buf_info;
+ struct ionic_tx_stats *stats;
+ struct ionic_txq_desc *desc;
+ size_t len = frame->len;
+ dma_addr_t dma_addr;
+ u64 cmd;
+
+ desc_info = &q->tx_info[q->head_idx];
+ desc = &q->txq[q->head_idx];
+ buf_info = desc_info->bufs;
+ stats = q_to_tx_stats(q);
+
+ dma_addr = ionic_tx_map_single(q, frame->data, len);
+ if (!dma_addr)
+ return -EIO;
+ buf_info->dma_addr = dma_addr;
+ buf_info->len = len;
+ buf_info->page = page;
+ buf_info->page_offset = off;
+
+ desc_info->nbufs = 1;
+ desc_info->xdpf = frame;
+ desc_info->act = act;
+
+ if (xdp_frame_has_frags(frame)) {
+ struct ionic_txq_sg_elem *elem;
+ struct skb_shared_info *sinfo;
+ struct ionic_buf_info *bi;
+ skb_frag_t *frag;
+ int i;
+
+ bi = &buf_info[1];
+ sinfo = xdp_get_shared_info_from_frame(frame);
+ frag = sinfo->frags;
+ elem = ionic_tx_sg_elems(q);
+ for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
+ dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
+ if (!dma_addr) {
+ ionic_tx_desc_unmap_bufs(q, desc_info);
+ return -EIO;
+ }
+ bi->dma_addr = dma_addr;
+ bi->len = skb_frag_size(frag);
+ bi->page = skb_frag_page(frag);
+
+ elem->addr = cpu_to_le64(bi->dma_addr);
+ elem->len = cpu_to_le16(bi->len);
+ elem++;
+
+ desc_info->nbufs++;
+ }
+ }
+
+ cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
+ 0, (desc_info->nbufs - 1), buf_info->dma_addr);
+ desc->cmd = cpu_to_le64(cmd);
+ desc->len = cpu_to_le16(len);
+ desc->csum_start = 0;
+ desc->csum_offset = 0;
+
+ stats->xdp_frames++;
+ stats->pkts++;
+ stats->bytes += len;
+
+ ionic_txq_post(q, ring_doorbell);
+
+ return 0;
+}
+
+int ionic_xdp_xmit(struct net_device *netdev, int n,
+ struct xdp_frame **xdp_frames, u32 flags)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_queue *txq;
+ struct netdev_queue *nq;
+ int nxmit;
+ int space;
+ int cpu;
+ int qi;
+
+ if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ /* AdminQ is assumed on cpu 0, while we attempt to affinitize the
+ * TxRx queue pairs 0..n-1 on cpus 1..n. We try to keep with that
+ * affinitization here, but of course irqbalance and friends might
+ * have juggled things anyway, so we have to check for the 0 case.
+ */
+ cpu = smp_processor_id();
+ qi = cpu ? (cpu - 1) % lif->nxqs : cpu;
+
+ txq = &lif->txqcqs[qi]->q;
+ nq = netdev_get_tx_queue(netdev, txq->index);
+ __netif_tx_lock(nq, cpu);
+ txq_trans_cond_update(nq);
+
+ if (netif_tx_queue_stopped(nq) ||
+ !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
+ ionic_q_space_avail(txq),
+ 1, 1)) {
+ __netif_tx_unlock(nq);
+ return -EIO;
+ }
+
+ space = min_t(int, n, ionic_q_space_avail(txq));
+ for (nxmit = 0; nxmit < space ; nxmit++) {
+ if (ionic_xdp_post_frame(txq, xdp_frames[nxmit],
+ XDP_REDIRECT,
+ virt_to_page(xdp_frames[nxmit]->data),
+ 0, false)) {
+ nxmit--;
+ break;
+ }
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ ionic_dbell_ring(lif->kern_dbpage, txq->hw_type,
+ txq->dbval | txq->head_idx);
+
+ netif_txq_maybe_stop(q_to_ndq(netdev, txq),
+ ionic_q_space_avail(txq),
+ 4, 4);
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
+static bool ionic_run_xdp(struct ionic_rx_stats *stats,
+ struct net_device *netdev,
+ struct bpf_prog *xdp_prog,
+ struct ionic_queue *rxq,
+ struct ionic_buf_info *buf_info,
+ int len)
+{
+ u32 xdp_action = XDP_ABORTED;
+ struct xdp_buff xdp_buf;
+ struct ionic_queue *txq;
+ struct netdev_queue *nq;
+ struct xdp_frame *xdpf;
+ int remain_len;
+ int frag_len;
+ int err = 0;
+
+ xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info);
+ frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
+ xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
+ XDP_PACKET_HEADROOM, frag_len, false);
+
+ dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
+ XDP_PACKET_HEADROOM, len,
+ DMA_FROM_DEVICE);
+
+ prefetchw(&xdp_buf.data_hard_start);
+
+ /* We limit MTU size to one buffer if !xdp_has_frags, so
+ * if the recv len is bigger than one buffer
+ * then we know we have frag info to gather
+ */
+ remain_len = len - frag_len;
+ if (remain_len) {
+ struct skb_shared_info *sinfo;
+ struct ionic_buf_info *bi;
+ skb_frag_t *frag;
+
+ bi = buf_info;
+ sinfo = xdp_get_shared_info_from_buff(&xdp_buf);
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(&xdp_buf);
+
+ do {
+ if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
+ err = -ENOSPC;
+ goto out_xdp_abort;
+ }
+
+ frag = &sinfo->frags[sinfo->nr_frags];
+ sinfo->nr_frags++;
+ bi++;
+ frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi));
+ dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi),
+ 0, frag_len, DMA_FROM_DEVICE);
+ skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
+ sinfo->xdp_frags_size += frag_len;
+ remain_len -= frag_len;
+
+ if (page_is_pfmemalloc(bi->page))
+ xdp_buff_set_frag_pfmemalloc(&xdp_buf);
+ } while (remain_len > 0);
+ }
+
+ xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
+
+ switch (xdp_action) {
+ case XDP_PASS:
+ stats->xdp_pass++;
+ return false; /* false = we didn't consume the packet */
+
+ case XDP_DROP:
+ ionic_rx_page_free(rxq, buf_info);
+ stats->xdp_drop++;
+ break;
+
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(&xdp_buf);
+ if (!xdpf)
+ goto out_xdp_abort;
+
+ txq = rxq->partner;
+ nq = netdev_get_tx_queue(netdev, txq->index);
+ __netif_tx_lock(nq, smp_processor_id());
+ txq_trans_cond_update(nq);
+
+ if (netif_tx_queue_stopped(nq) ||
+ !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
+ ionic_q_space_avail(txq),
+ 1, 1)) {
+ __netif_tx_unlock(nq);
+ goto out_xdp_abort;
+ }
+
+ dma_unmap_page(rxq->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
+ buf_info->page,
+ buf_info->page_offset,
+ true);
+ __netif_tx_unlock(nq);
+ if (err) {
+ netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
+ goto out_xdp_abort;
+ }
+ stats->xdp_tx++;
+
+ /* the Tx completion will free the buffers */
+ break;
+
+ case XDP_REDIRECT:
+ /* unmap the pages before handing them to a different device */
+ dma_unmap_page(rxq->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
+ if (err) {
+ netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
+ goto out_xdp_abort;
+ }
+ buf_info->page = NULL;
+ rxq->xdp_flush = true;
+ stats->xdp_redirect++;
+ break;
+
+ case XDP_ABORTED:
+ default:
+ goto out_xdp_abort;
+ }
+
+ return true;
+
+out_xdp_abort:
+ trace_xdp_exception(netdev, xdp_prog, xdp_action);
+ ionic_rx_page_free(rxq, buf_info);
+ stats->xdp_aborted++;
+
+ return true;
+}
+
static void ionic_rx_clean(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info,
- void *cb_arg)
+ struct ionic_rx_desc_info *desc_info,
+ struct ionic_rxq_comp *comp)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_qcq *qcq = q_to_qcq(q);
struct ionic_rx_stats *stats;
- struct ionic_rxq_comp *comp;
+ struct bpf_prog *xdp_prog;
+ unsigned int headroom;
struct sk_buff *skb;
-
- comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
+ bool synced = false;
+ bool use_copybreak;
+ u16 len;
stats = q_to_rx_stats(q);
@@ -294,13 +642,25 @@ static void ionic_rx_clean(struct ionic_queue *q,
return;
}
+ len = le16_to_cpu(comp->len);
stats->pkts++;
- stats->bytes += le16_to_cpu(comp->len);
+ stats->bytes += len;
+
+ xdp_prog = READ_ONCE(q->lif->xdp_prog);
+ if (xdp_prog) {
+ if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
+ return;
+ synced = true;
+ }
- if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
- skb = ionic_rx_copybreak(q, desc_info, comp);
+ headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
+ use_copybreak = len <= q->lif->rx_copybreak;
+ if (use_copybreak)
+ skb = ionic_rx_copybreak(netdev, q, desc_info,
+ headroom, len, synced);
else
- skb = ionic_rx_frags(q, desc_info, comp);
+ skb = ionic_rx_build_skb(q, desc_info, headroom, len,
+ comp->num_sg_elems, synced);
if (unlikely(!skb)) {
stats->dropped++;
@@ -352,7 +712,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
u64 hwstamp;
cq_desc_hwstamp =
- cq_info->cq_desc +
+ (void *)comp +
qcq->cq.desc_size -
sizeof(struct ionic_rxq_comp) -
IONIC_HWSTAMP_CQ_NEGOFFSET;
@@ -367,19 +727,19 @@ static void ionic_rx_clean(struct ionic_queue *q,
}
}
- if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
+ if (use_copybreak)
napi_gro_receive(&qcq->napi, skb);
else
napi_gro_frags(&qcq->napi);
}
-bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+bool ionic_rx_service(struct ionic_cq *cq)
{
+ struct ionic_rx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
- struct ionic_desc_info *desc_info;
struct ionic_rxq_comp *comp;
- comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
+ comp = &((struct ionic_rxq_comp *)cq->base)[cq->tail_idx];
if (!color_match(comp->pkt_type_color, cq->done_color))
return false;
@@ -391,31 +751,29 @@ bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
if (q->tail_idx != le16_to_cpu(comp->comp_index))
return false;
- desc_info = &q->info[q->tail_idx];
+ desc_info = &q->rx_info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */
- ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
-
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
+ ionic_rx_clean(q, desc_info, comp);
return true;
}
static inline void ionic_write_cmb_desc(struct ionic_queue *q,
- void __iomem *cmb_desc,
void *desc)
{
- if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS)
- memcpy_toio(cmb_desc, desc, q->desc_size);
+ /* Since Rx and Tx descriptors are the same size, we can
+ * save an instruction or two and skip the qtype check.
+ */
+ if (unlikely(q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS))
+ memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0]));
}
void ionic_rx_fill(struct ionic_queue *q)
{
struct net_device *netdev = q->lif->netdev;
- struct ionic_desc_info *desc_info;
- struct ionic_rxq_sg_desc *sg_desc;
+ struct ionic_rx_desc_info *desc_info;
struct ionic_rxq_sg_elem *sg_elem;
struct ionic_buf_info *buf_info;
unsigned int fill_threshold;
@@ -424,8 +782,9 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int frag_len;
unsigned int nfrags;
unsigned int n_fill;
- unsigned int i, j;
unsigned int len;
+ unsigned int i;
+ unsigned int j;
n_fill = ionic_q_space_avail(q);
@@ -434,13 +793,16 @@ void ionic_rx_fill(struct ionic_queue *q)
if (n_fill < fill_threshold)
return;
- len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
+ len = netdev->mtu + VLAN_ETH_HLEN;
for (i = n_fill; i; i--) {
+ unsigned int headroom;
+ unsigned int buf_len;
+
nfrags = 0;
remain_len = len;
- desc_info = &q->info[q->head_idx];
- desc = desc_info->desc;
+ desc = &q->rxq[q->head_idx];
+ desc_info = &q->rx_info[q->head_idx];
buf_info = &desc_info->bufs[0];
if (!buf_info->page) { /* alloc a new buffer? */
@@ -451,19 +813,26 @@ void ionic_rx_fill(struct ionic_queue *q)
}
}
- /* fill main descriptor - buf[0] */
- desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE - buf_info->page_offset));
+ /* fill main descriptor - buf[0]
+ * XDP uses space in the first buffer, so account for
+ * head room, tail room, and ip header in the first frag size.
+ */
+ headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
+ if (q->xdp_rxq_info)
+ buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN;
+ else
+ buf_len = ionic_rx_buf_size(buf_info);
+ frag_len = min_t(u16, len, buf_len);
+
+ desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
nfrags++;
/* fill sg descriptors - buf[1..n] */
- sg_desc = desc_info->sg_desc;
- for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
- sg_elem = &sg_desc->elems[j];
+ sg_elem = q->rxq_sgl[q->head_idx].elems;
+ for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) {
if (!buf_info->page) { /* alloc a new sg buffer? */
if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
sg_elem->addr = 0;
@@ -472,10 +841,8 @@ void ionic_rx_fill(struct ionic_queue *q)
}
}
- sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE -
- buf_info->page_offset));
+ sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
+ frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info));
sg_elem->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
@@ -483,18 +850,16 @@ void ionic_rx_fill(struct ionic_queue *q)
}
/* clear end sg element as a sentinel */
- if (j < q->max_sg_elems) {
- sg_elem = &sg_desc->elems[j];
+ if (j < q->max_sg_elems)
memset(sg_elem, 0, sizeof(*sg_elem));
- }
desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
IONIC_RXQ_DESC_OPCODE_SIMPLE;
desc_info->nbufs = nfrags;
- ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+ ionic_write_cmb_desc(q, desc);
- ionic_rxq_post(q, false, ionic_rx_clean, NULL);
+ ionic_rxq_post(q, false);
}
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
@@ -509,21 +874,19 @@ void ionic_rx_fill(struct ionic_queue *q)
void ionic_rx_empty(struct ionic_queue *q)
{
- struct ionic_desc_info *desc_info;
+ struct ionic_rx_desc_info *desc_info;
struct ionic_buf_info *buf_info;
unsigned int i, j;
for (i = 0; i < q->num_descs; i++) {
- desc_info = &q->info[i];
- for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
+ desc_info = &q->rx_info[i];
+ for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) {
buf_info = &desc_info->bufs[j];
if (buf_info->page)
ionic_rx_page_free(q, buf_info);
}
desc_info->nbufs = 0;
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
}
q->head_idx = 0;
@@ -568,16 +931,10 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
struct ionic_cq *cq = napi_to_cq(napi);
- struct ionic_dev *idev;
- struct ionic_lif *lif;
u32 work_done = 0;
u32 flags = 0;
- lif = cq->bound_q->lif;
- idev = &lif->ionic->idev;
-
- work_done = ionic_cq_service(cq, budget,
- ionic_tx_service, NULL, NULL);
+ work_done = ionic_tx_cq_service(cq, budget);
if (unlikely(!budget))
return budget;
@@ -590,7 +947,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
if (work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
- ionic_intr_credits(idev->intr_ctrl,
+ ionic_intr_credits(cq->idev->intr_ctrl,
cq->bound_intr->index,
work_done, flags);
}
@@ -601,26 +958,30 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
return work_done;
}
+static void ionic_xdp_do_flush(struct ionic_cq *cq)
+{
+ if (cq->bound_q->xdp_flush) {
+ xdp_do_flush();
+ cq->bound_q->xdp_flush = false;
+ }
+}
+
int ionic_rx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
struct ionic_cq *cq = napi_to_cq(napi);
- struct ionic_dev *idev;
- struct ionic_lif *lif;
u32 work_done = 0;
u32 flags = 0;
if (unlikely(!budget))
return budget;
- lif = cq->bound_q->lif;
- idev = &lif->ionic->idev;
-
work_done = ionic_cq_service(cq, budget,
ionic_rx_service, NULL, NULL);
ionic_rx_fill(cq->bound_q);
+ ionic_xdp_do_flush(cq);
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -629,7 +990,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
if (work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
- ionic_intr_credits(idev->intr_ctrl,
+ ionic_intr_credits(cq->idev->intr_ctrl,
cq->bound_intr->index,
work_done, flags);
}
@@ -646,7 +1007,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
struct ionic_cq *rxcq = napi_to_cq(napi);
unsigned int qi = rxcq->bound_q->index;
struct ionic_qcq *txqcq;
- struct ionic_dev *idev;
struct ionic_lif *lif;
struct ionic_cq *txcq;
bool resched = false;
@@ -655,12 +1015,10 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
u32 flags = 0;
lif = rxcq->bound_q->lif;
- idev = &lif->ionic->idev;
txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
- tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
- ionic_tx_service, NULL, NULL);
+ tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT);
if (unlikely(!budget))
return budget;
@@ -670,6 +1028,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(rxcq->bound_q);
+ ionic_xdp_do_flush(rxcq);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
ionic_dim_update(rxqcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -678,7 +1037,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
if (rx_work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
- ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
+ ionic_intr_credits(rxcq->idev->intr_ctrl, rxcq->bound_intr->index,
tx_work_done + rx_work_done, flags);
}
@@ -695,15 +1054,14 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
void *data, size_t len)
{
- struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev;
dma_addr_t dma_addr;
dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
- q->lif->netdev->name, q->name);
- stats->dma_map_err++;
+ dev_name(dev), q->name);
+ q_to_tx_stats(q)->dma_map_err++;
return 0;
}
return dma_addr;
@@ -713,24 +1071,23 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
const skb_frag_t *frag,
size_t offset, size_t len)
{
- struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev;
dma_addr_t dma_addr;
dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
- q->lif->netdev->name, q->name);
- stats->dma_map_err++;
+ dev_name(dev), q->name);
+ q_to_tx_stats(q)->dma_map_err++;
+ return 0;
}
return dma_addr;
}
static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
struct ionic_buf_info *buf_info = desc_info->bufs;
- struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev;
dma_addr_t dma_addr;
unsigned int nfrags;
@@ -738,10 +1095,8 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
int frag_idx;
dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
- if (dma_mapping_error(dev, dma_addr)) {
- stats->dma_map_err++;
+ if (!dma_addr)
return -EIO;
- }
buf_info->dma_addr = dma_addr;
buf_info->len = skb_headlen(skb);
buf_info++;
@@ -750,10 +1105,8 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
nfrags = skb_shinfo(skb)->nr_frags;
for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
- if (dma_mapping_error(dev, dma_addr)) {
- stats->dma_map_err++;
+ if (!dma_addr)
goto dma_fail;
- }
buf_info->dma_addr = dma_addr;
buf_info->len = skb_frag_size(frag);
buf_info++;
@@ -771,12 +1124,13 @@ dma_fail:
dma_unmap_page(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
}
- dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, desc_info->bufs[0].dma_addr,
+ desc_info->bufs[0].len, DMA_TO_DEVICE);
return -EIO;
}
static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
struct ionic_buf_info *buf_info = desc_info->bufs;
struct device *dev = q->dev;
@@ -785,41 +1139,48 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
if (!desc_info->nbufs)
return;
- dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
+ dma_unmap_single(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
buf_info++;
for (i = 1; i < desc_info->nbufs; i++, buf_info++)
- dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
+ dma_unmap_page(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
desc_info->nbufs = 0;
}
static void ionic_tx_clean(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info,
- void *cb_arg)
+ struct ionic_tx_desc_info *desc_info,
+ struct ionic_txq_comp *comp)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q);
- struct sk_buff *skb = cb_arg;
- u16 qi;
+ struct sk_buff *skb;
+
+ if (desc_info->xdpf) {
+ ionic_xdp_tx_desc_clean(q->partner, desc_info);
+ stats->clean++;
+
+ if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
+ netif_wake_subqueue(q->lif->netdev, q->index);
+
+ return;
+ }
ionic_tx_desc_unmap_bufs(q, desc_info);
+ skb = desc_info->skb;
if (!skb)
return;
- qi = skb_get_queue_mapping(skb);
-
- if (ionic_txq_hwstamp_enabled(q)) {
- if (cq_info) {
+ if (unlikely(ionic_txq_hwstamp_enabled(q))) {
+ if (comp) {
struct skb_shared_hwtstamps hwts = {};
__le64 *cq_desc_hwstamp;
u64 hwstamp;
cq_desc_hwstamp =
- cq_info->cq_desc +
+ (void *)comp +
qcq->cq.desc_size -
sizeof(struct ionic_txq_comp) -
IONIC_HWSTAMP_CQ_NEGOFFSET;
@@ -837,27 +1198,25 @@ static void ionic_tx_clean(struct ionic_queue *q,
stats->hwstamp_invalid++;
}
}
-
- } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
- netif_wake_subqueue(q->lif->netdev, qi);
}
desc_info->bytes = skb->len;
stats->clean++;
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, 1);
}
-bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+static bool ionic_tx_service(struct ionic_cq *cq,
+ unsigned int *total_pkts, unsigned int *total_bytes)
{
+ struct ionic_tx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
- struct ionic_desc_info *desc_info;
struct ionic_txq_comp *comp;
- int bytes = 0;
- int pkts = 0;
+ unsigned int bytes = 0;
+ unsigned int pkts = 0;
u16 index;
- comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
+ comp = &((struct ionic_txq_comp *)cq->base)[cq->tail_idx];
if (!color_match(comp->color, cq->done_color))
return false;
@@ -866,59 +1225,90 @@ bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
* several q entries completed for each cq completion
*/
do {
- desc_info = &q->info[q->tail_idx];
+ desc_info = &q->tx_info[q->tail_idx];
desc_info->bytes = 0;
index = q->tail_idx;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
- if (desc_info->cb_arg) {
+ ionic_tx_clean(q, desc_info, comp);
+ if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
+ desc_info->skb = NULL;
}
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
} while (index != le16_to_cpu(comp->comp_index));
- if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
- netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
+ (*total_pkts) += pkts;
+ (*total_bytes) += bytes;
return true;
}
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
+{
+ unsigned int work_done = 0;
+ unsigned int bytes = 0;
+ unsigned int pkts = 0;
+
+ if (work_to_do == 0)
+ return 0;
+
+ while (ionic_tx_service(cq, &pkts, &bytes)) {
+ if (cq->tail_idx == cq->num_descs - 1)
+ cq->done_color = !cq->done_color;
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+
+ if (++work_done >= work_to_do)
+ break;
+ }
+
+ if (work_done) {
+ struct ionic_queue *q = cq->bound_q;
+
+ if (likely(!ionic_txq_hwstamp_enabled(q)))
+ netif_txq_completed_wake(q_to_ndq(q->lif->netdev, q),
+ pkts, bytes,
+ ionic_q_space_avail(q),
+ IONIC_TSO_DESCS_NEEDED);
+ }
+
+ return work_done;
+}
+
void ionic_tx_flush(struct ionic_cq *cq)
{
- struct ionic_dev *idev = &cq->lif->ionic->idev;
u32 work_done;
- work_done = ionic_cq_service(cq, cq->num_descs,
- ionic_tx_service, NULL, NULL);
+ work_done = ionic_tx_cq_service(cq, cq->num_descs);
if (work_done)
- ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
+ ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index,
work_done, IONIC_INTR_CRED_RESET_COALESCE);
}
void ionic_tx_empty(struct ionic_queue *q)
{
- struct ionic_desc_info *desc_info;
+ struct ionic_tx_desc_info *desc_info;
int bytes = 0;
int pkts = 0;
/* walk the not completed tx entries, if any */
while (q->head_idx != q->tail_idx) {
- desc_info = &q->info[q->tail_idx];
+ desc_info = &q->tx_info[q->tail_idx];
desc_info->bytes = 0;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
- if (desc_info->cb_arg) {
+ ionic_tx_clean(q, desc_info, NULL);
+ if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
+ desc_info->skb = NULL;
}
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
}
- if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
- netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
+ if (likely(!ionic_txq_hwstamp_enabled(q))) {
+ struct netdev_queue *ndq = q_to_ndq(q->lif->netdev, q);
+
+ netdev_tx_completed_queue(ndq, pkts, bytes);
+ netdev_tx_reset_queue(ndq);
+ }
}
static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
@@ -966,8 +1356,8 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
return 0;
}
-static void ionic_tx_tso_post(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
+static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
+ struct ionic_tx_desc_info *desc_info,
struct sk_buff *skb,
dma_addr_t addr, u8 nsge, u16 len,
unsigned int hdrlen, unsigned int mss,
@@ -975,7 +1365,7 @@ static void ionic_tx_tso_post(struct ionic_queue *q,
u16 vlan_tci, bool has_vlan,
bool start, bool done)
{
- struct ionic_txq_desc *desc = desc_info->desc;
+ struct ionic_txq_desc *desc = &q->txq[q->head_idx];
u8 flags = 0;
u64 cmd;
@@ -991,22 +1381,23 @@ static void ionic_tx_tso_post(struct ionic_queue *q,
desc->hdr_len = cpu_to_le16(hdrlen);
desc->mss = cpu_to_le16(mss);
- ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+ ionic_write_cmb_desc(q, desc);
if (start) {
skb_tx_timestamp(skb);
- if (!ionic_txq_hwstamp_enabled(q))
- netdev_tx_sent_queue(q_to_ndq(q), skb->len);
- ionic_txq_post(q, false, ionic_tx_clean, skb);
+ if (likely(!ionic_txq_hwstamp_enabled(q)))
+ netdev_tx_sent_queue(q_to_ndq(netdev, q), skb->len);
+ ionic_txq_post(q, false);
} else {
- ionic_txq_post(q, done, NULL, NULL);
+ ionic_txq_post(q, done);
}
}
-static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
+static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
+ struct sk_buff *skb)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_desc_info *desc_info;
+ struct ionic_tx_desc_info *desc_info;
struct ionic_buf_info *buf_info;
struct ionic_txq_sg_elem *elem;
struct ionic_txq_desc *desc;
@@ -1028,8 +1419,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
bool encap;
int err;
- desc_info = &q->info[q->head_idx];
- buf_info = desc_info->bufs;
+ desc_info = &q->tx_info[q->head_idx];
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO;
@@ -1066,6 +1456,8 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
else
hdrlen = skb_tcp_all_headers(skb);
+ desc_info->skb = skb;
+ buf_info = desc_info->bufs;
tso_rem = len;
seg_rem = min(tso_rem, hdrlen + mss);
@@ -1092,8 +1484,8 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
chunk_len = min(frag_rem, seg_rem);
if (!desc) {
/* fill main descriptor */
- desc = desc_info->txq_desc;
- elem = desc_info->txq_sg_desc->elems;
+ desc = &q->txq[q->head_idx];
+ elem = ionic_tx_sg_elems(q);
desc_addr = frag_addr;
desc_len = chunk_len;
} else {
@@ -1111,13 +1503,13 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
seg_rem = min(tso_rem, mss);
done = (tso_rem == 0);
/* post descriptor */
- ionic_tx_tso_post(q, desc_info, skb,
+ ionic_tx_tso_post(netdev, q, desc_info, skb,
desc_addr, desc_nsge, desc_len,
hdrlen, mss, outer_csum, vlan_tci, has_vlan,
start, done);
start = false;
/* Buffer information is stored with the first tso descriptor */
- desc_info = &q->info[q->head_idx];
+ desc_info = &q->tx_info[q->head_idx];
desc_info->nbufs = 0;
}
@@ -1130,9 +1522,9 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
}
static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
- struct ionic_txq_desc *desc = desc_info->txq_desc;
+ struct ionic_txq_desc *desc = &q->txq[q->head_idx];
struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
bool has_vlan;
@@ -1160,7 +1552,7 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
desc->csum_offset = cpu_to_le16(skb->csum_offset);
- ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+ ionic_write_cmb_desc(q, desc);
if (skb_csum_is_sctp(skb))
stats->crc32_csum++;
@@ -1169,9 +1561,9 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
}
static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
- struct ionic_txq_desc *desc = desc_info->txq_desc;
+ struct ionic_txq_desc *desc = &q->txq[q->head_idx];
struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
bool has_vlan;
@@ -1199,20 +1591,20 @@ static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = 0;
desc->csum_offset = 0;
- ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+ ionic_write_cmb_desc(q, desc);
stats->csum_none++;
}
static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
- struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
struct ionic_buf_info *buf_info = &desc_info->bufs[1];
- struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ struct ionic_txq_sg_elem *elem;
unsigned int i;
+ elem = ionic_tx_sg_elems(q);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
elem->addr = cpu_to_le64(buf_info->dma_addr);
elem->len = cpu_to_le16(buf_info->len);
@@ -1221,14 +1613,18 @@ static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
stats->frags += skb_shinfo(skb)->nr_frags;
}
-static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
+static int ionic_tx(struct net_device *netdev, struct ionic_queue *q,
+ struct sk_buff *skb)
{
- struct ionic_desc_info *desc_info = &q->info[q->head_idx];
+ struct ionic_tx_desc_info *desc_info = &q->tx_info[q->head_idx];
struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ bool ring_dbell = true;
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO;
+ desc_info->skb = skb;
+
/* set up the initial descriptor */
if (skb->ip_summed == CHECKSUM_PARTIAL)
ionic_tx_calc_csum(q, skb, desc_info);
@@ -1242,16 +1638,22 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
stats->pkts++;
stats->bytes += skb->len;
- if (!ionic_txq_hwstamp_enabled(q))
- netdev_tx_sent_queue(q_to_ndq(q), skb->len);
- ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
+ if (likely(!ionic_txq_hwstamp_enabled(q))) {
+ struct netdev_queue *ndq = q_to_ndq(netdev, q);
+
+ if (unlikely(!ionic_q_has_space(q, MAX_SKB_FRAGS + 1)))
+ netif_tx_stop_queue(ndq);
+ ring_dbell = __netdev_tx_sent_queue(ndq, skb->len,
+ netdev_xmit_more());
+ }
+ ionic_txq_post(q, ring_dbell);
return 0;
}
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
- struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
bool too_many_frags = false;
skb_frag_t *frag;
int desc_bufs;
@@ -1267,17 +1669,20 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
/* Each desc is mss long max, so a descriptor for each gso_seg */
if (skb_is_gso(skb)) {
ndescs = skb_shinfo(skb)->gso_segs;
+ if (!nr_frags)
+ return ndescs;
} else {
ndescs = 1;
- if (skb_shinfo(skb)->nr_frags > q->max_sg_elems) {
+ if (!nr_frags)
+ return ndescs;
+
+ if (unlikely(nr_frags > q->max_sg_elems)) {
too_many_frags = true;
goto linearize;
}
- }
- /* If non-TSO, or no frags to check, we're done */
- if (!skb_is_gso(skb) || !skb_shinfo(skb)->nr_frags)
return ndescs;
+ }
/* We need to scan the skb to be sure that none of the MTU sized
* packets in the TSO will require more sgs per descriptor than we
@@ -1328,36 +1733,17 @@ linearize:
err = skb_linearize(skb);
if (err)
return err;
- stats->linearize++;
+ q_to_tx_stats(q)->linearize++;
}
return ndescs;
}
-static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
-{
- int stopped = 0;
-
- if (unlikely(!ionic_q_has_space(q, ndescs))) {
- netif_stop_subqueue(q->lif->netdev, q->index);
- stopped = 1;
-
- /* Might race with ionic_tx_clean, check again */
- smp_rmb();
- if (ionic_q_has_space(q, ndescs)) {
- netif_wake_subqueue(q->lif->netdev, q->index);
- stopped = 0;
- }
- }
-
- return stopped;
-}
-
static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_queue *q = &lif->hwstamp_txq->q;
+ struct ionic_queue *q;
int err, ndescs;
/* Does not stop/start txq, because we post to a separate tx queue
@@ -1365,6 +1751,7 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
* the timestamping queue, it is dropped.
*/
+ q = &lif->hwstamp_txq->q;
ndescs = ionic_tx_descs_needed(q, skb);
if (unlikely(ndescs < 0))
goto err_out_drop;
@@ -1374,9 +1761,9 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
if (skb_is_gso(skb))
- err = ionic_tx_tso(q, skb);
+ err = ionic_tx_tso(netdev, q, skb);
else
- err = ionic_tx(q, skb);
+ err = ionic_tx(netdev, q, skb);
if (err)
goto err_out_drop;
@@ -1414,23 +1801,19 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (ndescs < 0)
goto err_out_drop;
- if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
+ if (!netif_txq_maybe_stop(q_to_ndq(netdev, q),
+ ionic_q_space_avail(q),
+ ndescs, ndescs))
return NETDEV_TX_BUSY;
if (skb_is_gso(skb))
- err = ionic_tx_tso(q, skb);
+ err = ionic_tx_tso(netdev, q, skb);
else
- err = ionic_tx(q, skb);
+ err = ionic_tx(netdev, q, skb);
if (err)
goto err_out_drop;
- /* Stop the queue if there aren't descriptors for the next packet.
- * Since our SG lists per descriptor take care of most of the possible
- * fragmentation, we don't need to have many descriptors available.
- */
- ionic_maybe_stop_tx(q, 4);
-
return NETDEV_TX_OK;
err_out_drop:
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index d7cbaad8a6fb..9e73e324e7a1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -14,7 +14,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget);
int ionic_txrx_napi(struct napi_struct *napi, int budget);
netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
-bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+bool ionic_rx_service(struct ionic_cq *cq);
+int ionic_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **xdp, u32 flags);
#endif /* _IONIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 35ec9aab3dc7..51fa880eaf6c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1186,7 +1186,6 @@ static int
netxen_p3_has_mn(struct netxen_adapter *adapter)
{
u32 capability, flashed_ver;
- capability = 0;
/* NX2031 always had MN */
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
@@ -1197,7 +1196,6 @@ netxen_p3_has_mn(struct netxen_adapter *adapter)
flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
-
capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
if (capability & NX_PEG_TUNE_MN_PRESENT)
return 1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 5a5dbbb8d8aa..9a1660a12c57 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1794,8 +1794,6 @@ qed_rdma_create_srq(void *rdma_cxt,
goto err;
opaque_fid = p_hwfn->hw_info.opaque_fid;
-
- opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.opaque_fid = opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 0e240b5ab8d4..ae3ebf0cf999 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1776,7 +1776,7 @@ static int qede_get_tunable(struct net_device *dev,
return 0;
}
-static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int qede_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
@@ -1789,18 +1789,26 @@ static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- if (current_link.eee.adv_caps & QED_EEE_1G_ADV)
- edata->advertised = ADVERTISED_1000baseT_Full;
- if (current_link.eee.adv_caps & QED_EEE_10G_ADV)
- edata->advertised |= ADVERTISED_10000baseT_Full;
- if (current_link.sup_caps & QED_EEE_1G_ADV)
- edata->supported = ADVERTISED_1000baseT_Full;
- if (current_link.sup_caps & QED_EEE_10G_ADV)
- edata->supported |= ADVERTISED_10000baseT_Full;
- if (current_link.eee.lp_adv_caps & QED_EEE_1G_ADV)
- edata->lp_advertised = ADVERTISED_1000baseT_Full;
- if (current_link.eee.lp_adv_caps & QED_EEE_10G_ADV)
- edata->lp_advertised |= ADVERTISED_10000baseT_Full;
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised,
+ current_link.eee.adv_caps & QED_EEE_1G_ADV);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->advertised,
+ current_link.eee.adv_caps & QED_EEE_10G_ADV);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported,
+ current_link.sup_caps & QED_EEE_1G_ADV);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->supported,
+ current_link.sup_caps & QED_EEE_10G_ADV);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->lp_advertised,
+ current_link.eee.lp_adv_caps & QED_EEE_1G_ADV);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->lp_advertised,
+ current_link.eee.lp_adv_caps & QED_EEE_10G_ADV);
edata->tx_lpi_timer = current_link.eee.tx_lpi_timer;
edata->eee_enabled = current_link.eee.enable;
@@ -1810,11 +1818,14 @@ static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int qede_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
struct qed_link_params params;
+ bool unsupp;
if (!edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev, "Link settings are not allowed to be changed\n");
@@ -1832,21 +1843,26 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
memset(&params, 0, sizeof(params));
params.override_flags |= QED_LINK_OVERRIDE_EEE_CONFIG;
- if (!(edata->advertised & (ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full)) ||
- ((edata->advertised & (ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full)) !=
- edata->advertised)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+
+ unsupp = linkmode_andnot(tmp, edata->advertised, supported);
+ if (unsupp) {
DP_VERBOSE(edev, QED_MSG_DEBUG,
- "Invalid advertised capabilities %d\n",
- edata->advertised);
+ "Invalid advertised capabilities %*pb\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, edata->advertised);
return -EINVAL;
}
- if (edata->advertised & ADVERTISED_1000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised))
params.eee.adv_caps = QED_EEE_1G_ADV;
- if (edata->advertised & ADVERTISED_10000baseT_Full)
- params.eee.adv_caps |= QED_EEE_10G_ADV;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->advertised))
+ params.eee.adv_caps = QED_EEE_10G_ADV;
+
params.eee.enable = edata->eee_enabled;
params.eee.tx_lpi_enable = edata->tx_lpi_enabled;
params.eee.tx_lpi_timer = edata->tx_lpi_timer;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index cb1746bc0e0c..847fa62c80df 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -215,7 +215,7 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
- bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+ bd2_bits2 |= ((skb_transport_offset(skb) >> 1) &
ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
<< ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 41894d154013..b9dc0071c5de 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -446,8 +446,7 @@ static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
encap_descr |= skb_network_offset(skb) << 10;
first_desc->encap_descr = cpu_to_le16(encap_descr);
- first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
- skb->data;
+ first_desc->tcp_hdr_offset = skb_inner_transport_offset(skb);
first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 3270df72541b..4c06f55878de 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -771,5 +771,6 @@ static struct platform_driver emac_platform_driver = {
module_platform_driver(emac_platform_driver);
+MODULE_DESCRIPTION("Qualcomm EMAC Gigabit Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:qcom-emac");
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c
index 4292c89bd35c..6263e4cf47fa 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k.c
@@ -1,22 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- *
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
*/
/* This module implements the Qualcomm Atheros SPI protocol for
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.h b/drivers/net/ethernet/qualcomm/qca_7k.h
index 356de8ec5d48..828ee9c27578 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.h
+++ b/drivers/net/ethernet/qualcomm/qca_7k.h
@@ -1,21 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
*/
/* Qualcomm Atheros SPI register definition.
diff --git a/drivers/net/ethernet/qualcomm/qca_7k_common.c b/drivers/net/ethernet/qualcomm/qca_7k_common.c
index 6b511f05df61..5302da587620 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k_common.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k_common.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Atheros ethernet framing. Every Ethernet frame is surrounded
@@ -162,5 +149,5 @@ EXPORT_SYMBOL_GPL(qcafrm_fsm_decode);
MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 common");
MODULE_AUTHOR("Qualcomm Atheros Communications");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/qualcomm/qca_7k_common.h b/drivers/net/ethernet/qualcomm/qca_7k_common.h
index 928554f11e35..44ed66fdb407 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k_common.h
+++ b/drivers/net/ethernet/qualcomm/qca_7k_common.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Atheros Ethernet framing. Every Ethernet frame is surrounded by an atheros
@@ -107,9 +94,6 @@ struct qcafrm_handle {
/* Offset in buffer (borrowed for length too) */
u16 offset;
-
- /* Frame length as kept by this module */
- u16 len;
};
u16 qcafrm_create_header(u8 *buf, u16 len);
@@ -128,17 +112,6 @@ static inline void qcafrm_fsm_init_uart(struct qcafrm_handle *handle)
handle->state = handle->init;
}
-/* Gather received bytes and try to extract a full Ethernet frame
- * by following a simple state machine.
- *
- * Return: QCAFRM_GATHER No Ethernet frame fully received yet.
- * QCAFRM_NOHEAD Header expected but not found.
- * QCAFRM_INVLEN QCA7K frame length is invalid
- * QCAFRM_NOTAIL Footer expected but not found.
- * > 0 Number of byte in the fully received
- * Ethernet frame
- */
-
s32 qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_byte);
#endif /* _QCA_FRAMING_H */
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 1822f2ad8f0d..ff3b89e9028e 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file contains debugging routines for use in the QCA7K driver.
@@ -255,7 +242,7 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
struct qcaspi *qca = netdev_priv(dev);
ring->rx_max_pending = QCASPI_RX_MAX_FRAMES;
- ring->tx_max_pending = TX_RING_MAX_LEN;
+ ring->tx_max_pending = QCASPI_TX_RING_MAX_LEN;
ring->rx_pending = QCASPI_RX_MAX_FRAMES;
ring->tx_pending = qca->txr.count;
}
@@ -275,8 +262,8 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
if (qca->spi_thread)
kthread_park(qca->spi_thread);
- qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
- qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
+ qca->txr.count = max_t(u32, ring->tx_pending, QCASPI_TX_RING_MIN_LEN);
+ qca->txr.count = min_t(u16, qca->txr.count, QCASPI_TX_RING_MAX_LEN);
if (qca->spi_thread)
kthread_unpark(qca->spi_thread);
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.h b/drivers/net/ethernet/qualcomm/qca_debug.h
index 46a785844421..0d98cef3abc4 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.h
+++ b/drivers/net/ethernet/qualcomm/qca_debug.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file contains debugging routines for use in the QCA7K driver.
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5f3c11fb3fa2..5799ecc88a87 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This module implements the Qualcomm Atheros SPI protocol for
@@ -359,7 +346,7 @@ qcaspi_receive(struct qcaspi *qca)
/* Read the packet size. */
qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available);
- netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
+ netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %04x\n",
available);
if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) {
@@ -476,7 +463,7 @@ qcaspi_flush_tx_ring(struct qcaspi *qca)
* has been replaced by netif_tx_lock_bh() and so on.
*/
netif_tx_lock_bh(qca->net_dev);
- for (i = 0; i < TX_RING_MAX_LEN; i++) {
+ for (i = 0; i < QCASPI_TX_RING_MAX_LEN; i++) {
if (qca->txr.skb[i]) {
dev_kfree_skb(qca->txr.skb[i]);
qca->txr.skb[i] = NULL;
@@ -687,7 +674,7 @@ static int
qcaspi_netdev_open(struct net_device *dev)
{
struct qcaspi *qca = netdev_priv(dev);
- int ret = 0;
+ struct task_struct *thread;
if (!qca)
return -EINVAL;
@@ -697,23 +684,18 @@ qcaspi_netdev_open(struct net_device *dev)
qca->sync = QCASPI_SYNC_UNKNOWN;
qcafrm_fsm_init_spi(&qca->frm_handle);
- qca->spi_thread = kthread_run((void *)qcaspi_spi_thread,
- qca, "%s", dev->name);
+ thread = kthread_run((void *)qcaspi_spi_thread,
+ qca, "%s", dev->name);
- if (IS_ERR(qca->spi_thread)) {
+ if (IS_ERR(thread)) {
netdev_err(dev, "%s: unable to start kernel thread.\n",
QCASPI_DRV_NAME);
- return PTR_ERR(qca->spi_thread);
+ return PTR_ERR(thread);
}
- ret = request_irq(qca->spi_dev->irq, qcaspi_intr_handler, 0,
- dev->name, qca);
- if (ret) {
- netdev_err(dev, "%s: unable to get IRQ %d (irqval=%d).\n",
- QCASPI_DRV_NAME, qca->spi_dev->irq, ret);
- kthread_stop(qca->spi_thread);
- return ret;
- }
+ qca->spi_thread = thread;
+
+ enable_irq(qca->spi_dev->irq);
/* SPI thread takes care of TX queue */
@@ -728,10 +710,12 @@ qcaspi_netdev_close(struct net_device *dev)
netif_stop_queue(dev);
qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify);
- free_irq(qca->spi_dev->irq, qca);
+ disable_irq(qca->spi_dev->irq);
- kthread_stop(qca->spi_thread);
- qca->spi_thread = NULL;
+ if (qca->spi_thread) {
+ kthread_stop(qca->spi_thread);
+ qca->spi_thread = NULL;
+ }
qcaspi_flush_tx_ring(qca);
return 0;
@@ -831,8 +815,8 @@ qcaspi_netdev_init(struct net_device *dev)
qca->clkspeed = qcaspi_clkspeed;
qca->burst_len = qcaspi_burst_len;
qca->spi_thread = NULL;
- qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
- QCAFRM_FOOTER_LEN + 4) * 4;
+ qca->buffer_size = (QCAFRM_MAX_MTU + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
+ QCAFRM_FOOTER_LEN + QCASPI_HW_PKT_LEN) * QCASPI_RX_MAX_FRAMES;
memset(&qca->stats, 0, sizeof(struct qcaspi_stats));
@@ -881,6 +865,8 @@ qcaspi_netdev_setup(struct net_device *dev)
qcaspi_set_ethtool_ops(dev);
dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->needed_tailroom = ALIGN(QCAFRM_FOOTER_LEN + QCAFRM_MIN_LEN, 4);
+ dev->needed_headroom = ALIGN(QCAFRM_HEADER_LEN, 4);
dev->tx_queue_len = 100;
/* MTU range: 46 - 1500 */
@@ -891,7 +877,7 @@ qcaspi_netdev_setup(struct net_device *dev)
memset(qca, 0, sizeof(struct qcaspi));
memset(&qca->txr, 0, sizeof(qca->txr));
- qca->txr.count = TX_RING_MAX_LEN;
+ qca->txr.count = QCASPI_TX_RING_MAX_LEN;
}
static const struct of_device_id qca_spi_of_match[] = {
@@ -984,6 +970,15 @@ qca_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, qcaspi_devs);
+ ret = devm_request_irq(&spi->dev, spi->irq, qcaspi_intr_handler,
+ IRQF_NO_AUTOEN, qca->net_dev->name, qca);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to get IRQ %d (irqval=%d).\n",
+ spi->irq, ret);
+ free_netdev(qcaspi_devs);
+ return ret;
+ }
+
ret = of_get_ethdev_address(spi->dev.of_node, qca->net_dev);
if (ret) {
eth_hw_addr_random(qca->net_dev);
@@ -998,8 +993,8 @@ qca_spi_probe(struct spi_device *spi)
qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
if (signature != QCASPI_GOOD_SIGNATURE) {
- dev_err(&spi->dev, "Invalid signature (0x%04X)\n",
- signature);
+ dev_err(&spi->dev, "Invalid signature (expected 0x%04x, read 0x%04x)\n",
+ QCASPI_GOOD_SIGNATURE, signature);
free_netdev(qcaspi_devs);
return -EFAULT;
}
@@ -1048,6 +1043,6 @@ module_spi_driver(qca_spi_driver);
MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 SPI Driver");
MODULE_AUTHOR("Qualcomm Atheros Communications");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(QCASPI_DRV_VERSION);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 3067356106f0..d59cb2352cee 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Qualcomm Atheros SPI register definition.
@@ -39,8 +26,9 @@
#define QCASPI_GOOD_SIGNATURE 0xAA55
-#define TX_RING_MAX_LEN 10
-#define TX_RING_MIN_LEN 2
+#define QCASPI_TX_RING_MAX_LEN 10
+#define QCASPI_TX_RING_MIN_LEN 2
+#define QCASPI_RX_MAX_FRAMES 4
/* sync related constants */
#define QCASPI_SYNC_UNKNOWN 0
@@ -54,7 +42,7 @@
#define QCASPI_EVENT_CPUON 1
struct tx_ring {
- struct sk_buff *skb[TX_RING_MAX_LEN];
+ struct sk_buff *skb[QCASPI_TX_RING_MAX_LEN];
u16 head;
u16 tail;
u16 size;
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 223321897b96..321fd8d00730 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2017, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This module implements the Qualcomm Atheros UART protocol for
@@ -410,6 +397,6 @@ module_serdev_device_driver(qca_uart_driver);
MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
MODULE_AUTHOR("Qualcomm Atheros Communications");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(QCAUART_DRV_VERSION);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 5b69b9268c75..f3bea196a8f9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -520,4 +520,5 @@ static void __exit rmnet_exit(void)
module_init(rmnet_init)
module_exit(rmnet_exit)
MODULE_ALIAS_RTNL_LINK("rmnet");
+MODULE_DESCRIPTION("Qualcomm RmNet MAP driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 046b5f7d8e7c..9d2a9562c96f 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -98,7 +98,7 @@ static int rmnet_vnd_get_iflink(const struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
- return priv->real_dev->ifindex;
+ return READ_ONCE(priv->real_dev->ifindex);
}
static int rmnet_vnd_init(struct net_device *dev)
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 81567fcf3957..4c043052198d 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -68,6 +68,7 @@ enum mac_version {
/* support for RTL_GIGA_MAC_VER_60 has been removed */
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
+ RTL_GIGA_MAC_VER_65,
RTL_GIGA_MAC_NONE
};
@@ -84,3 +85,6 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
int rtl8168_get_led_mode(struct rtl8169_private *tp);
int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val);
void rtl8168_init_leds(struct net_device *ndev);
+int rtl8125_get_led_mode(struct rtl8169_private *tp, int index);
+int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode);
+void rtl8125_init_leds(struct net_device *ndev);
diff --git a/drivers/net/ethernet/realtek/r8169_leds.c b/drivers/net/ethernet/realtek/r8169_leds.c
index 007d077edcad..7c5dc9d0df85 100644
--- a/drivers/net/ethernet/realtek/r8169_leds.c
+++ b/drivers/net/ethernet/realtek/r8169_leds.c
@@ -18,12 +18,14 @@
#define RTL8168_LED_CTRL_LINK_100 BIT(1)
#define RTL8168_LED_CTRL_LINK_10 BIT(0)
-#define RTL8168_NUM_LEDS 3
+#define RTL8125_LED_CTRL_ACT BIT(9)
+#define RTL8125_LED_CTRL_LINK_2500 BIT(5)
+#define RTL8125_LED_CTRL_LINK_1000 BIT(3)
+#define RTL8125_LED_CTRL_LINK_100 BIT(1)
+#define RTL8125_LED_CTRL_LINK_10 BIT(0)
-#define RTL8168_SUPPORTED_MODES \
- (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK_100) | \
- BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_RX) | \
- BIT(TRIGGER_NETDEV_TX))
+#define RTL8168_NUM_LEDS 3
+#define RTL8125_NUM_LEDS 4
struct r8169_led_classdev {
struct led_classdev led;
@@ -33,28 +35,35 @@ struct r8169_led_classdev {
#define lcdev_to_r8169_ldev(lcdev) container_of(lcdev, struct r8169_led_classdev, led)
+static bool r8169_trigger_mode_is_valid(unsigned long flags)
+{
+ bool rx, tx;
+
+ if (flags & BIT(TRIGGER_NETDEV_HALF_DUPLEX))
+ return false;
+ if (flags & BIT(TRIGGER_NETDEV_FULL_DUPLEX))
+ return false;
+
+ rx = flags & BIT(TRIGGER_NETDEV_RX);
+ tx = flags & BIT(TRIGGER_NETDEV_TX);
+
+ return rx == tx;
+}
+
static int rtl8168_led_hw_control_is_supported(struct led_classdev *led_cdev,
unsigned long flags)
{
struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
struct rtl8169_private *tp = netdev_priv(ldev->ndev);
int shift = ldev->index * 4;
- bool rx, tx;
-
- if (flags & ~RTL8168_SUPPORTED_MODES)
- goto nosupp;
- rx = flags & BIT(TRIGGER_NETDEV_RX);
- tx = flags & BIT(TRIGGER_NETDEV_TX);
- if (rx != tx)
- goto nosupp;
+ if (!r8169_trigger_mode_is_valid(flags)) {
+ /* Switch LED off to indicate that mode isn't supported */
+ rtl8168_led_mod_ctrl(tp, 0x000f << shift, 0);
+ return -EOPNOTSUPP;
+ }
return 0;
-
-nosupp:
- /* Switch LED off to indicate that mode isn't supported */
- rtl8168_led_mod_ctrl(tp, 0x000f << shift, 0);
- return -EOPNOTSUPP;
}
static int rtl8168_led_hw_control_set(struct led_classdev *led_cdev,
@@ -129,7 +138,6 @@ static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev,
r8169_get_led_name(tp, index, led_name, LED_MAX_NAME_SIZE);
led_cdev->name = led_name;
- led_cdev->default_trigger = "netdev";
led_cdev->hw_control_trigger = "netdev";
led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
led_cdev->hw_control_is_supported = rtl8168_led_hw_control_is_supported;
@@ -155,3 +163,102 @@ void rtl8168_init_leds(struct net_device *ndev)
for (i = 0; i < RTL8168_NUM_LEDS; i++)
rtl8168_setup_ldev(leds + i, ndev, i);
}
+
+static int rtl8125_led_hw_control_is_supported(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+
+ if (!r8169_trigger_mode_is_valid(flags)) {
+ /* Switch LED off to indicate that mode isn't supported */
+ rtl8125_set_led_mode(tp, ldev->index, 0);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int rtl8125_led_hw_control_set(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ u16 mode = 0;
+
+ if (flags & BIT(TRIGGER_NETDEV_LINK_10))
+ mode |= RTL8125_LED_CTRL_LINK_10;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_100))
+ mode |= RTL8125_LED_CTRL_LINK_100;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode |= RTL8125_LED_CTRL_LINK_1000;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_2500))
+ mode |= RTL8125_LED_CTRL_LINK_2500;
+ if (flags & (BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX)))
+ mode |= RTL8125_LED_CTRL_ACT;
+
+ return rtl8125_set_led_mode(tp, ldev->index, mode);
+}
+
+static int rtl8125_led_hw_control_get(struct led_classdev *led_cdev,
+ unsigned long *flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ int mode;
+
+ mode = rtl8125_get_led_mode(tp, ldev->index);
+ if (mode < 0)
+ return mode;
+
+ if (mode & RTL8125_LED_CTRL_LINK_10)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_10);
+ if (mode & RTL8125_LED_CTRL_LINK_100)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_100);
+ if (mode & RTL8125_LED_CTRL_LINK_1000)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_1000);
+ if (mode & RTL8125_LED_CTRL_LINK_2500)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_2500);
+ if (mode & RTL8125_LED_CTRL_ACT)
+ *flags |= BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+
+ return 0;
+}
+
+static void rtl8125_setup_led_ldev(struct r8169_led_classdev *ldev,
+ struct net_device *ndev, int index)
+{
+ struct rtl8169_private *tp = netdev_priv(ndev);
+ struct led_classdev *led_cdev = &ldev->led;
+ char led_name[LED_MAX_NAME_SIZE];
+
+ ldev->ndev = ndev;
+ ldev->index = index;
+
+ r8169_get_led_name(tp, index, led_name, LED_MAX_NAME_SIZE);
+ led_cdev->name = led_name;
+ led_cdev->hw_control_trigger = "netdev";
+ led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
+ led_cdev->hw_control_is_supported = rtl8125_led_hw_control_is_supported;
+ led_cdev->hw_control_set = rtl8125_led_hw_control_set;
+ led_cdev->hw_control_get = rtl8125_led_hw_control_get;
+ led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
+
+ /* ignore errors */
+ devm_led_classdev_register(&ndev->dev, led_cdev);
+}
+
+void rtl8125_init_leds(struct net_device *ndev)
+{
+ /* bind resource mgmt to netdev */
+ struct device *dev = &ndev->dev;
+ struct r8169_led_classdev *leds;
+ int i;
+
+ leds = devm_kcalloc(dev, RTL8125_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return;
+
+ for (i = 0; i < RTL8125_NUM_LEDS; i++)
+ rtl8125_setup_led_ldev(leds + i, ndev, i);
+}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index dd73df6b17b0..5c879a5c86d7 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -55,6 +55,7 @@
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
+#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -136,6 +137,7 @@ static const struct {
[RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
[RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
+ [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2},
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -158,6 +160,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 },
{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
{ PCI_VDEVICE(REALTEK, 0x8125) },
+ { PCI_VDEVICE(REALTEK, 0x8126) },
{ PCI_VDEVICE(REALTEK, 0x3000) },
{}
};
@@ -327,13 +330,23 @@ enum rtl8168_registers {
};
enum rtl8125_registers {
+ LEDSEL0 = 0x18,
+ INT_CFG0_8125 = 0x34,
+#define INT_CFG0_ENABLE_8125 BIT(0)
+#define INT_CFG0_CLKREQEN BIT(3)
IntrMask_8125 = 0x38,
IntrStatus_8125 = 0x3c,
+ INT_CFG1_8125 = 0x7a,
+ LEDSEL2 = 0x84,
+ LEDSEL1 = 0x86,
TxPoll_8125 = 0x90,
+ LEDSEL3 = 0x96,
MAC0_BKP = 0x19e0,
EEE_TXIDLE_TIMER_8125 = 0x6048,
};
+#define LEDSEL_MASK_8125 0x23f
+
#define RX_VLAN_INNER_8125 BIT(22)
#define RX_VLAN_OUTER_8125 BIT(23)
#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125)
@@ -606,6 +619,7 @@ struct rtl8169_private {
struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
u16 cp_cmd;
+ u16 tx_lpi_timer;
u32 irq_mask;
int irq;
struct clk *clk;
@@ -629,7 +643,6 @@ struct rtl8169_private {
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
- int eee_adv;
const char *fw_name;
struct rtl_fw *rtl_fw;
@@ -663,6 +676,7 @@ MODULE_FIRMWARE(FIRMWARE_8168FP_3);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
+MODULE_FIRMWARE(FIRMWARE_8126A_2);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
@@ -824,6 +838,51 @@ int rtl8168_get_led_mode(struct rtl8169_private *tp)
return ret;
}
+static int rtl8125_get_led_reg(int index)
+{
+ static const int led_regs[] = { LEDSEL0, LEDSEL1, LEDSEL2, LEDSEL3 };
+
+ return led_regs[index];
+}
+
+int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode)
+{
+ int reg = rtl8125_get_led_reg(index);
+ struct device *dev = tp_to_dev(tp);
+ int ret;
+ u16 val;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tp->led_lock);
+ val = RTL_R16(tp, reg) & ~LEDSEL_MASK_8125;
+ RTL_W16(tp, reg, val | mode);
+ mutex_unlock(&tp->led_lock);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+int rtl8125_get_led_mode(struct rtl8169_private *tp, int index)
+{
+ int reg = rtl8125_get_led_reg(index);
+ struct device *dev = tp_to_dev(tp);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = RTL_R16(tp, reg);
+
+ pm_runtime_put_sync(dev);
+
+ return ret;
+}
+
void r8169_get_led_name(struct rtl8169_private *tp, int idx,
char *buf, int buf_len)
{
@@ -1140,7 +1199,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1155,7 +1214,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1341,7 +1400,7 @@ static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
if (enable)
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
else
@@ -1508,7 +1567,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
if (wolopts)
rtl_mod_config2(tp, 0, PME_SIGNAL);
else
@@ -1974,30 +2033,64 @@ static int rtl_set_coalesce(struct net_device *dev,
return 0;
}
-static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
+static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
+{
+ unsigned int timer_val = READ_ONCE(tp->dev->mtu) + ETH_HLEN + 0x20;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_48:
+ tp->tx_lpi_timer = timer_val;
+ r8168_mac_ocp_write(tp, 0xe048, timer_val);
+ break;
+ case RTL_GIGA_MAC_VER_61:
+ case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_65:
+ tp->tx_lpi_timer = timer_val;
+ RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
+ break;
+ default:
+ break;
+ }
+}
+
+static unsigned int r8169_get_tx_lpi_timer_us(struct rtl8169_private *tp)
+{
+ unsigned int speed = tp->phydev->speed;
+ unsigned int timer = tp->tx_lpi_timer;
+
+ if (!timer || speed == SPEED_UNKNOWN)
+ return 0;
+
+ /* tx_lpi_timer value is in bytes */
+ return DIV_ROUND_CLOSEST(timer * BITS_PER_BYTE, speed);
+}
+
+static int rtl8169_get_eee(struct net_device *dev, struct ethtool_keee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ int ret;
if (!rtl_supports_eee(tp))
return -EOPNOTSUPP;
- return phy_ethtool_get_eee(tp->phydev, data);
+ ret = phy_ethtool_get_eee(tp->phydev, data);
+ if (ret)
+ return ret;
+
+ data->tx_lpi_timer = r8169_get_tx_lpi_timer_us(tp);
+
+ return 0;
}
-static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
+static int rtl8169_set_eee(struct net_device *dev, struct ethtool_keee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
- int ret;
if (!rtl_supports_eee(tp))
return -EOPNOTSUPP;
- ret = phy_ethtool_set_eee(tp->phydev, data);
-
- if (!ret)
- tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN,
- MDIO_AN_EEE_ADV);
- return ret;
+ return phy_ethtool_set_eee(tp->phydev, data);
}
static void rtl8169_get_ringparam(struct net_device *dev,
@@ -2062,21 +2155,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.set_pauseparam = rtl8169_set_pauseparam,
};
-static void rtl_enable_eee(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int adv;
-
- /* respect EEE advertisement the user may have set */
- if (tp->eee_adv >= 0)
- adv = tp->eee_adv;
- else
- adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
-
- if (adv >= 0)
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
-}
-
static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{
/*
@@ -2095,6 +2173,9 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
u16 val;
enum mac_version ver;
} mac_info[] = {
+ /* 8126A family. */
+ { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 },
+
/* 8125B family. */
{ 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
@@ -2250,14 +2331,8 @@ static void rtl8125a_config_eee_mac(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
}
-static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp)
-{
- RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20);
-}
-
static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
{
- rtl8125_set_eee_txidle_timer(tp);
r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
}
@@ -2313,9 +2388,6 @@ static void rtl8169_init_phy(struct rtl8169_private *tp)
/* We may have called phy_speed_down before */
phy_speed_up(tp->phydev);
- if (rtl_supports_eee(tp))
- rtl_enable_eee(tp);
-
genphy_soft_reset(tp->phydev);
}
@@ -2368,6 +2440,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_65:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2554,7 +2627,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_65:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2797,7 +2870,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2811,7 +2884,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -2821,6 +2894,8 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
{
+ u8 val8;
+
if (tp->mac_version < RTL_GIGA_MAC_VER_32)
return;
@@ -2834,11 +2909,19 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
return;
rtl_mod_config5(tp, 0, ASPM_en);
- rtl_mod_config2(tp, 0, ClkReqEn);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_65:
+ val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
+ RTL_W8(tp, INT_CFG0_8125, val8);
+ break;
+ default:
+ rtl_mod_config2(tp, 0, ClkReqEn);
+ break;
+ }
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -2850,14 +2933,22 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
break;
}
- rtl_mod_config2(tp, ClkReqEn, 0);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_65:
+ val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
+ RTL_W8(tp, INT_CFG0_8125, val8);
+ break;
+ default:
+ rtl_mod_config2(tp, ClkReqEn, 0);
+ break;
+ }
rtl_mod_config5(tp, ASPM_en, 0);
}
}
@@ -3570,10 +3661,15 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
/* disable new tx descriptor format */
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
- if (tp->mac_version == RTL_GIGA_MAC_VER_63)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
else
- r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0300);
if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000);
@@ -3586,6 +3682,10 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
+ else
+ r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403);
r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068);
r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f);
@@ -3600,10 +3700,10 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
- if (tp->mac_version == RTL_GIGA_MAC_VER_63)
- rtl8125b_config_eee_mac(tp);
- else
+ if (tp->mac_version == RTL_GIGA_MAC_VER_61)
rtl8125a_config_eee_mac(tp);
+ else
+ rtl8125b_config_eee_mac(tp);
rtl_disable_rxdvgate(tp);
}
@@ -3647,6 +3747,12 @@ static void rtl_hw_start_8125b(struct rtl8169_private *tp)
rtl_hw_start_8125_common(tp);
}
+static void rtl_hw_start_8126a(struct rtl8169_private *tp)
+{
+ rtl_set_def_aspm_entry_latency(tp);
+ rtl_hw_start_8125_common(tp);
+}
+
static void rtl_hw_config(struct rtl8169_private *tp)
{
static const rtl_generic_fct hw_configs[] = {
@@ -3689,6 +3795,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
+ [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
};
if (hw_configs[tp->mac_version])
@@ -3699,9 +3806,23 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
{
int i;
+ RTL_W8(tp, INT_CFG0_8125, 0x00);
+
/* disable interrupt coalescing */
- for (i = 0xa00; i < 0xb00; i += 4)
- RTL_W32(tp, i, 0);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_61:
+ for (i = 0xa00; i < 0xb00; i += 4)
+ RTL_W32(tp, i, 0);
+ break;
+ case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_65:
+ for (i = 0xa00; i < 0xa80; i += 4)
+ RTL_W32(tp, i, 0);
+ RTL_W16(tp, INT_CFG1_8125, 0x0000);
+ break;
+ default:
+ break;
+ }
rtl_hw_config(tp);
}
@@ -3744,6 +3865,8 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, false);
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
+ rtl_set_eee_txidle_timer(tp);
+
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
rtl_hw_start_8169(tp);
else if (rtl_is_8125(tp))
@@ -3777,15 +3900,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
netdev_update_features(dev);
rtl_jumbo_config(tp);
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_61:
- case RTL_GIGA_MAC_VER_63:
- rtl8125_set_eee_txidle_timer(tp);
- break;
- default:
- break;
- }
+ rtl_set_eee_txidle_timer(tp);
return 0;
}
@@ -3929,7 +4044,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4080,8 +4195,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61:
- case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -5058,7 +5172,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
}
tp->phydev->mac_managed_pm = true;
-
+ if (rtl_supports_eee(tp))
+ phy_support_eee(tp->phydev);
phy_support_asym_pause(tp->phydev);
/* PHY will be woken up in rtl_open() */
@@ -5108,7 +5223,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
rtl_hw_init_8125(tp);
break;
default:
@@ -5193,7 +5308,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->dev = dev;
tp->pci_dev = pdev;
tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
- tp->eee_adv = -1;
tp->ocp_base = OCP_STD_PHY_BASE;
raw_spin_lock_init(&tp->cfg9346_usage_lock);
@@ -5201,11 +5315,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
raw_spin_lock_init(&tp->mac_ocp_lock);
mutex_init(&tp->led_lock);
- dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
- struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
/* Get the *optional* external "ether_clk" used on some boards */
tp->clk = devm_clk_get_optional_enabled(&pdev->dev, "ether_clk");
if (IS_ERR(tp->clk))
@@ -5320,6 +5429,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+
netdev_sw_irq_coalesce_default_on(dev);
/* configure chip for default features */
@@ -5356,10 +5467,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- if (IS_ENABLED(CONFIG_R8169_LEDS) &&
- tp->mac_version > RTL_GIGA_MAC_VER_06 &&
- tp->mac_version < RTL_GIGA_MAC_VER_61)
- rtl8168_init_leds(dev);
+ if (IS_ENABLED(CONFIG_R8169_LEDS)) {
+ if (rtl_is_8125(tp))
+ rtl8125_init_leds(dev);
+ else if (tp->mac_version > RTL_GIGA_MAC_VER_06)
+ rtl8168_init_leds(dev);
+ }
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index b50f16786c24..1f74317beb88 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -1102,6 +1102,12 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
rtl8125b_config_eee_phy(phydev);
}
+static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+}
+
void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
enum mac_version ver)
{
@@ -1152,6 +1158,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
+ [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index d6136fe5c206..b03fae7a0f72 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -34,6 +34,7 @@ config RAVB
select MII
select MDIO_BITBANG
select PHYLIB
+ select RESET_CONTROLLER
help
Renesas Ethernet AVB device driver.
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index e0f8276cffed..b48935ec7e28 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -205,7 +205,11 @@ enum ravb_reg {
TLFRCR = 0x0758,
RFCR = 0x0760,
MAFCR = 0x0778,
- CSR0 = 0x0800, /* RZ/G2L only */
+
+ /* TOE registers (RZ/G2L only) */
+ CSR0 = 0x0800,
+ CSR1 = 0x0804,
+ CSR2 = 0x0808,
};
@@ -978,16 +982,39 @@ enum CSR0_BIT {
CSR0_RPE = 0x00000020,
};
+enum CSR1_BIT {
+ CSR1_TIP4 = 0x00000001,
+ CSR1_TTCP4 = 0x00000010,
+ CSR1_TUDP4 = 0x00000020,
+ CSR1_TICMP4 = 0x00000040,
+ CSR1_TTCP6 = 0x00100000,
+ CSR1_TUDP6 = 0x00200000,
+ CSR1_TICMP6 = 0x00400000,
+ CSR1_THOP = 0x01000000,
+ CSR1_TROUT = 0x02000000,
+ CSR1_TAHD = 0x04000000,
+ CSR1_TDHD = 0x08000000,
+};
+
+enum CSR2_BIT {
+ CSR2_RIP4 = 0x00000001,
+ CSR2_RTCP4 = 0x00000010,
+ CSR2_RUDP4 = 0x00000020,
+ CSR2_RICMP4 = 0x00000040,
+ CSR2_RTCP6 = 0x00100000,
+ CSR2_RUDP6 = 0x00200000,
+ CSR2_RICMP6 = 0x00400000,
+ CSR2_RHOP = 0x01000000,
+ CSR2_RROUT = 0x02000000,
+ CSR2_RAHD = 0x04000000,
+ CSR2_RDHD = 0x08000000,
+};
+
#define DBAT_ENTRY_NUM 22
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
-#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
-
-#define GBETH_RX_BUFF_MAX 8192
-#define GBETH_RX_DESC_DATA_SIZE 4080
-
struct ravb_tstamp_skb {
struct list_head list;
struct sk_buff *skb;
@@ -1012,9 +1039,6 @@ struct ravb_ptp {
};
struct ravb_hw_info {
- void (*rx_ring_free)(struct net_device *ndev, int q);
- void (*rx_ring_format)(struct net_device *ndev, int q);
- void *(*alloc_rx_desc)(struct net_device *ndev, int q);
bool (*receive)(struct net_device *ndev, int *quota, int q);
void (*set_rate)(struct net_device *ndev);
int (*set_feature)(struct net_device *ndev, netdev_features_t features);
@@ -1025,9 +1049,10 @@ struct ravb_hw_info {
netdev_features_t net_hw_features;
netdev_features_t net_features;
int stats_len;
- size_t max_rx_len;
u32 tccr_mask;
- u32 rx_max_buf_size;
+ u32 rx_max_frame_size;
+ u32 rx_max_desc_use;
+ u32 rx_desc_size;
unsigned aligned_tx: 1;
/* hardware features */
@@ -1060,8 +1085,11 @@ struct ravb_private {
struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
- struct ravb_rx_desc *gbeth_rx_ring;
- struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
+ union {
+ struct ravb_rx_desc *desc;
+ struct ravb_ex_rx_desc *ex_desc;
+ void *raw;
+ } rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb;
@@ -1089,10 +1117,6 @@ struct ravb_private {
int msg_enable;
int speed;
int emac_irq;
- int erra_irq;
- int mgmta_irq;
- int rx_irqs[NUM_RX_QUEUE];
- int tx_irqs[NUM_TX_QUEUE];
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
@@ -1106,6 +1130,8 @@ struct ravb_private {
const struct ravb_hw_info *info;
struct reset_control *rstc;
+
+ u32 gti_tiv;
};
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index f7566cfa45ca..d1be030c8848 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/reset.h>
#include <linux/math64.h>
+#include <net/ip.h>
#include "ravb.h"
@@ -38,16 +39,6 @@
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
-static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
- "ch0", /* RAVB_BE */
- "ch1", /* RAVB_NC */
-};
-
-static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
- "ch18", /* RAVB_BE */
- "ch19", /* RAVB_NC */
-};
-
void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
u32 set)
{
@@ -96,13 +87,13 @@ static void ravb_set_rate_gbeth(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
switch (priv->speed) {
- case 10: /* 10BASE */
+ case 10: /* 10BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
break;
- case 100: /* 100BASE */
+ case 100: /* 100BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
break;
- case 1000: /* 1000BASE */
+ case 1000: /* 1000BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
break;
}
@@ -122,12 +113,23 @@ static void ravb_set_rate_rcar(struct net_device *ndev)
}
}
-static void ravb_set_buffer_align(struct sk_buff *skb)
+static struct sk_buff *
+ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info,
+ gfp_t gfp_mask)
{
- u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
+ struct sk_buff *skb;
+ u32 reserve;
+ skb = __netdev_alloc_skb(ndev, info->rx_max_frame_size + RAVB_ALIGN - 1,
+ gfp_mask);
+ if (!skb)
+ return NULL;
+
+ reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
if (reserve)
skb_reserve(skb, RAVB_ALIGN - reserve);
+
+ return skb;
}
/* Get MAC address from the MAC address registers
@@ -200,6 +202,13 @@ static const struct mdiobb_ops bb_ops = {
.get_mdio_data = ravb_get_mdio_data,
};
+static struct ravb_rx_desc *
+ravb_rx_get_desc(struct ravb_private *priv, unsigned int q,
+ unsigned int i)
+{
+ return priv->rx_ring[q].raw + priv->info->rx_desc_size * i;
+}
+
/* Free TX skb function for AVB-IP */
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{
@@ -244,67 +253,40 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
return free_num;
}
-static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned int ring_size;
- unsigned int i;
-
- if (!priv->gbeth_rx_ring)
- return;
-
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
-
- if (!dma_mapping_error(ndev->dev.parent,
- le32_to_cpu(desc->dptr)))
- dma_unmap_single(ndev->dev.parent,
- le32_to_cpu(desc->dptr),
- GBETH_RX_BUFF_MAX,
- DMA_FROM_DEVICE);
- }
- ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
- priv->rx_desc_dma[q]);
- priv->gbeth_rx_ring = NULL;
-}
-
-static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
+static void ravb_rx_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
unsigned int i;
- if (!priv->rx_ring[q])
+ if (!priv->rx_ring[q].raw)
return;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+ struct ravb_rx_desc *desc = ravb_rx_get_desc(priv, q, i);
if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr),
- RX_BUF_SZ,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
}
- ring_size = sizeof(struct ravb_ex_rx_desc) *
- (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
+ ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
priv->rx_desc_dma[q]);
- priv->rx_ring[q] = NULL;
+ priv->rx_ring[q].raw = NULL;
}
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
unsigned int ring_size;
unsigned int i;
- info->rx_ring_free(ndev, q);
+ ravb_rx_ring_free(ndev, q);
if (priv->tx_ring[q]) {
ravb_tx_free(ndev, q, false);
@@ -335,7 +317,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
}
-static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
+static void ravb_rx_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
struct ravb_rx_desc *rx_desc;
@@ -343,45 +325,15 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
dma_addr_t dma_addr;
unsigned int i;
- rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- memset(priv->gbeth_rx_ring, 0, rx_ring_size);
- /* Build RX ring buffer */
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- /* RX descriptor */
- rx_desc = &priv->gbeth_rx_ring[i];
- rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
- dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- GBETH_RX_BUFF_MAX,
- DMA_FROM_DEVICE);
- /* We just set the data size to 0 for a failed mapping which
- * should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- rx_desc->ds_cc = cpu_to_le16(0);
- rx_desc->dptr = cpu_to_le32(dma_addr);
- rx_desc->die_dt = DT_FEMPTY;
- }
- rx_desc = &priv->gbeth_rx_ring[i];
- rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
- rx_desc->die_dt = DT_LINKFIX; /* type */
-}
-
-static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_ex_rx_desc *rx_desc;
- unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- dma_addr_t dma_addr;
- unsigned int i;
-
- memset(priv->rx_ring[q], 0, rx_ring_size);
+ rx_ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
+ memset(priv->rx_ring[q].raw, 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
- rx_desc = &priv->rx_ring[q][i];
- rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
+ rx_desc = ravb_rx_get_desc(priv, q, i);
+ rx_desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- RX_BUF_SZ,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
@@ -391,7 +343,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
- rx_desc = &priv->rx_ring[q][i];
+ rx_desc = ravb_rx_get_desc(priv, q, i);
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
}
@@ -400,7 +352,6 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc;
@@ -413,7 +364,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
priv->dirty_rx[q] = 0;
priv->dirty_tx[q] = 0;
- info->rx_ring_format(ndev, q);
+ ravb_rx_ring_format(ndev, q);
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
@@ -439,30 +390,18 @@ static void ravb_ring_format(struct net_device *ndev, int q)
desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
}
-static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
+static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
- ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
+ ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
- priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
- &priv->rx_desc_dma[q],
- GFP_KERNEL);
- return priv->gbeth_rx_ring;
-}
+ priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
-static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned int ring_size;
-
- ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
-
- priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
- &priv->rx_desc_dma[q],
- GFP_KERNEL);
- return priv->rx_ring[q];
+ return priv->rx_ring[q].raw;
}
/* Init skb and descriptor buffer for Ethernet AVB */
@@ -484,10 +423,9 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
+ skb = ravb_alloc_skb(ndev, info, GFP_KERNEL);
if (!skb)
goto error;
- ravb_set_buffer_align(skb);
priv->rx_skb[q][i] = skb;
}
@@ -500,7 +438,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
}
/* Allocate all RX descriptors. */
- if (!info->alloc_rx_desc(ndev, q))
+ if (!ravb_alloc_rx_desc(ndev, q))
goto error;
priv->dirty_rx[q] = 0;
@@ -522,6 +460,36 @@ error:
return -ENOMEM;
}
+static void ravb_csum_init_gbeth(struct net_device *ndev)
+{
+ bool tx_enable = ndev->features & NETIF_F_HW_CSUM;
+ bool rx_enable = ndev->features & NETIF_F_RXCSUM;
+
+ if (!(tx_enable || rx_enable))
+ goto done;
+
+ ravb_write(ndev, 0, CSR0);
+ if (ravb_wait(ndev, CSR0, CSR0_TPE | CSR0_RPE, 0)) {
+ netdev_err(ndev, "Timeout enabling hardware checksum\n");
+
+ if (tx_enable)
+ ndev->features &= ~NETIF_F_HW_CSUM;
+
+ if (rx_enable)
+ ndev->features &= ~NETIF_F_RXCSUM;
+ } else {
+ if (tx_enable)
+ ravb_write(ndev, CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4, CSR1);
+
+ if (rx_enable)
+ ravb_write(ndev, CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4,
+ CSR2);
+ }
+
+done:
+ ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+}
+
static void ravb_emac_init_gbeth(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -536,7 +504,7 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
}
/* Receive frame limit set register */
- ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
+ ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
@@ -553,7 +521,8 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
/* E-MAC status register clear */
ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
- ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+
+ ravb_csum_init_gbeth(ndev);
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
@@ -596,6 +565,7 @@ static void ravb_emac_init(struct net_device *ndev)
static int ravb_dmac_init_gbeth(struct net_device *ndev)
{
+ struct ravb_private *priv = netdev_priv(ndev);
int error;
error = ravb_ring_init(ndev, RAVB_BE);
@@ -609,7 +579,7 @@ static int ravb_dmac_init_gbeth(struct net_device *ndev)
ravb_write(ndev, 0x60000000, RCR);
/* Set Max Frame Length (RTC) */
- ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
+ ravb_write(ndev, 0x7ffc0000 | priv->info->rx_max_frame_size, RTC);
/* Set FIFO size */
ravb_write(ndev, 0x00222200, TGC);
@@ -734,6 +704,30 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
}
}
+static void ravb_rx_csum_gbeth(struct sk_buff *skb)
+{
+ __wsum csum_ip_hdr, csum_proto;
+ u8 *hw_csum;
+
+ /* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4
+ * bytes appended to packet data. First 2 bytes is ip header checksum
+ * and last 2 bytes is protocol checksum.
+ */
+ if (unlikely(skb->len < sizeof(__sum16) * 2))
+ return;
+
+ hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
+ csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
+
+ hw_csum -= sizeof(__sum16);
+ csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
+ skb_trim(skb, skb->len - 2 * sizeof(__sum16));
+
+ /* TODO: IPV6 Rx checksum */
+ if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
static void ravb_rx_csum(struct sk_buff *skb)
{
u8 *hw_csum;
@@ -758,7 +752,8 @@ static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
skb = priv->rx_skb[RAVB_BE][entry];
priv->rx_skb[RAVB_BE][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
+ ALIGN(priv->info->rx_max_frame_size, 16),
+ DMA_FROM_DEVICE);
return skb;
}
@@ -784,7 +779,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
- desc = &priv->gbeth_rx_ring[entry];
+ desc = &priv->rx_ring[q].desc[entry];
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
@@ -815,6 +810,8 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
+ if (ndev->features & NETIF_F_RXCSUM)
+ ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
stats->rx_bytes += pkt_len;
@@ -842,6 +839,8 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
dev_kfree_skb(skb);
priv->rx_1st_skb->protocol =
eth_type_trans(priv->rx_1st_skb, ndev);
+ if (ndev->features & NETIF_F_RXCSUM)
+ ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q],
priv->rx_1st_skb);
rx_packets++;
@@ -851,23 +850,22 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
}
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
+ desc = &priv->rx_ring[q].desc[entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
- desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
+ desc = &priv->rx_ring[q].desc[entry];
+ desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
if (!priv->rx_skb[q][entry]) {
- skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
if (!skb)
break;
- ravb_set_buffer_align(skb);
dma_addr = dma_map_single(ndev->dev.parent,
skb->data,
- GBETH_RX_BUFF_MAX,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
/* We just set the data size to 0 for a failed mapping
@@ -907,7 +905,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
- desc = &priv->rx_ring[q][entry];
+ desc = &priv->rx_ring[q].ex_desc[entry];
while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
@@ -941,7 +939,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- RX_BUF_SZ,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -967,22 +965,21 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
}
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q][entry];
+ desc = &priv->rx_ring[q].ex_desc[entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q][entry];
- desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
+ desc = &priv->rx_ring[q].ex_desc[entry];
+ desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
if (!priv->rx_skb[q][entry]) {
- skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
if (!skb)
break; /* Better luck next round. */
- ravb_set_buffer_align(skb);
dma_addr = dma_map_single(ndev->dev.parent, skb->data,
- le16_to_cpu(desc->ds_cc),
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
/* We just set the data size to 0 for a failed mapping
@@ -1088,11 +1085,23 @@ static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ irqreturn_t result = IRQ_HANDLED;
+
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev))) {
+ result = IRQ_NONE;
+ goto out_rpm_put;
+ }
spin_lock(&priv->lock);
ravb_emac_interrupt_unlocked(ndev);
spin_unlock(&priv->lock);
- return IRQ_HANDLED;
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
+ return result;
}
/* Error interrupt handler */
@@ -1172,9 +1181,15 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
u32 iss;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Get interrupt status */
iss = ravb_read(ndev, ISS);
@@ -1218,6 +1233,9 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
}
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1226,9 +1244,15 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
u32 iss;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Get interrupt status */
iss = ravb_read(ndev, ISS);
@@ -1250,6 +1274,9 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
}
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1257,8 +1284,14 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Network control/Best effort queue RX/TX */
@@ -1266,6 +1299,9 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
result = IRQ_HANDLED;
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1284,25 +1320,16 @@ static int ravb_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = napi->dev;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- bool gptp = info->gptp || info->ccc_gac;
- struct ravb_rx_desc *desc;
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
- unsigned int entry;
- if (!gptp) {
- entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
- }
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (gptp || desc->die_dt != DT_FEMPTY) {
- if (ravb_rx(ndev, &quota, q))
- goto out;
- }
+ if (ravb_rx(ndev, &quota, q))
+ goto out;
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1732,89 +1759,159 @@ static const struct ethtool_ops ravb_ethtool_ops = {
.set_wol = ravb_set_wol,
};
-static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
- struct net_device *ndev, struct device *dev,
- const char *ch)
+static int ravb_set_config_mode(struct net_device *ndev)
{
- char *name;
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int error;
- name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
- if (!name)
- return -ENOMEM;
- error = request_irq(irq, handler, 0, name, ndev);
- if (error)
- netdev_err(ndev, "cannot request IRQ %s\n", name);
+ if (info->gptp) {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ if (error)
+ return error;
+ /* Set CSEL value */
+ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
+ } else if (info->ccc_gac) {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
+ } else {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ }
return error;
}
+static void ravb_set_gti(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
+ if (!(info->gptp || info->ccc_gac))
+ return;
+
+ ravb_write(ndev, priv->gti_tiv, GTI);
+
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+}
+
+static int ravb_compute_gti(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ struct device *dev = ndev->dev.parent;
+ unsigned long rate;
+ u64 inc;
+
+ if (!(info->gptp || info->ccc_gac))
+ return 0;
+
+ if (info->gptp_ref_clk)
+ rate = clk_get_rate(priv->gptp_clk);
+ else
+ rate = clk_get_rate(priv->clk);
+ if (!rate)
+ return -EINVAL;
+
+ inc = div64_ul(1000000000ULL << 20, rate);
+
+ if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
+ dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
+ inc, GTI_TIV_MIN, GTI_TIV_MAX);
+ return -EINVAL;
+ }
+ priv->gti_tiv = inc;
+
+ return 0;
+}
+
+/* Set tx and rx clock internal delay modes */
+static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ bool explicit_delay = false;
+ u32 delay;
+
+ if (!priv->info->internal_delay)
+ return;
+
+ if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 1800, according to DT bindings */
+ priv->rxcidm = !!delay;
+ explicit_delay = true;
+ }
+ if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 2000, according to DT bindings */
+ priv->txcidm = !!delay;
+ explicit_delay = true;
+ }
+
+ if (explicit_delay)
+ return;
+
+ /* Fall back to legacy rgmii-*id behavior */
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ priv->rxcidm = 1;
+ priv->rgmii_override = 1;
+ }
+
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ priv->txcidm = 1;
+ priv->rgmii_override = 1;
+ }
+}
+
+static void ravb_set_delay_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 set = 0;
+
+ if (!priv->info->internal_delay)
+ return;
+
+ if (priv->rxcidm)
+ set |= APSR_RDM;
+ if (priv->txcidm)
+ set |= APSR_TDM;
+ ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
+}
+
/* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- struct platform_device *pdev = priv->pdev;
- struct device *dev = &pdev->dev;
+ struct device *dev = &priv->pdev->dev;
int error;
napi_enable(&priv->napi[RAVB_BE]);
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
- if (!info->multi_irqs) {
- error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
- ndev->name, ndev);
- if (error) {
- netdev_err(ndev, "cannot request IRQ\n");
- goto out_napi_off;
- }
- } else {
- error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
- dev, "ch22:multi");
- if (error)
- goto out_napi_off;
- error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
- dev, "ch24:emac");
- if (error)
- goto out_free_irq;
- error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
- ndev, dev, "ch0:rx_be");
- if (error)
- goto out_free_irq_emac;
- error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
- ndev, dev, "ch18:tx_be");
- if (error)
- goto out_free_irq_be_rx;
- error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
- ndev, dev, "ch1:rx_nc");
- if (error)
- goto out_free_irq_be_tx;
- error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
- ndev, dev, "ch19:tx_nc");
- if (error)
- goto out_free_irq_nc_rx;
-
- if (info->err_mgmt_irqs) {
- error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt,
- ndev, dev, "err_a");
- if (error)
- goto out_free_irq_nc_tx;
- error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt,
- ndev, dev, "mgmt_a");
- if (error)
- goto out_free_irq_erra;
- }
- }
+ error = pm_runtime_resume_and_get(dev);
+ if (error < 0)
+ goto out_napi_off;
+
+ /* Set AVB config mode */
+ error = ravb_set_config_mode(ndev);
+ if (error)
+ goto out_rpm_put;
+
+ ravb_set_delay_mode(ndev);
+ ravb_write(ndev, priv->desc_bat_dma, DBAT);
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
- goto out_free_irq_mgmta;
+ goto out_set_reset;
+
ravb_emac_init(ndev);
+ ravb_set_gti(ndev);
+
/* Initialise PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_init(ndev, priv->pdev);
/* PHY control start */
@@ -1828,29 +1925,14 @@ static int ravb_open(struct net_device *ndev)
out_ptp_stop:
/* Stop PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_stop(ndev);
ravb_stop_dma(ndev);
-out_free_irq_mgmta:
- if (!info->multi_irqs)
- goto out_free_irq;
- if (info->err_mgmt_irqs)
- free_irq(priv->mgmta_irq, ndev);
-out_free_irq_erra:
- if (info->err_mgmt_irqs)
- free_irq(priv->erra_irq, ndev);
-out_free_irq_nc_tx:
- free_irq(priv->tx_irqs[RAVB_NC], ndev);
-out_free_irq_nc_rx:
- free_irq(priv->rx_irqs[RAVB_NC], ndev);
-out_free_irq_be_tx:
- free_irq(priv->tx_irqs[RAVB_BE], ndev);
-out_free_irq_be_rx:
- free_irq(priv->rx_irqs[RAVB_BE], ndev);
-out_free_irq_emac:
- free_irq(priv->emac_irq, ndev);
-out_free_irq:
- free_irq(ndev->irq, ndev);
+out_set_reset:
+ ravb_set_opmode(ndev, CCC_OPC_RESET);
+out_rpm_put:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
out_napi_off:
if (info->nc_queues)
napi_disable(&priv->napi[RAVB_NC]);
@@ -1935,6 +2017,36 @@ out_unlock:
rtnl_unlock();
}
+static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb)
+{
+ struct iphdr *ip = ip_hdr(skb);
+
+ /* TODO: Need to add support for VLAN tag 802.1Q */
+ if (skb_vlan_tag_present(skb))
+ return false;
+
+ /* TODO: Need to add hardware checksum for IPv6 */
+ if (skb->protocol != htons(ETH_P_IP))
+ return false;
+
+ switch (ip->protocol) {
+ case IPPROTO_TCP:
+ break;
+ case IPPROTO_UDP:
+ /* If the checksum value in the UDP header field is 0, TOE does
+ * not calculate checksum for UDP part of this frame as it is
+ * optional function as per standards.
+ */
+ if (udp_hdr(skb)->check == 0)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
/* Packet transmit function for Ethernet AVB */
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
@@ -1950,6 +2062,9 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 entry;
u32 len;
+ if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb))
+ skb_checksum_help(skb);
+
spin_lock_irqsave(&priv->lock, flags);
if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
num_tx_desc) {
@@ -2084,8 +2199,15 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *nstats, *stats0, *stats1;
+ struct device *dev = &priv->pdev->dev;
nstats = &ndev->stats;
+
+ pm_runtime_get_noresume(dev);
+
+ if (!pm_runtime_active(dev))
+ goto out_rpm_put;
+
stats0 = &priv->stats[RAVB_BE];
if (info->tx_counters) {
@@ -2127,6 +2249,8 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
nstats->rx_over_errors += stats1->rx_over_errors;
}
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return nstats;
}
@@ -2149,6 +2273,8 @@ static int ravb_close(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+ struct device *dev = &priv->pdev->dev;
+ int error;
netif_tx_stop_all_queues(ndev);
@@ -2157,8 +2283,16 @@ static int ravb_close(struct net_device *ndev)
ravb_write(ndev, 0, RIC2);
ravb_write(ndev, 0, TIC);
+ /* PHY disconnect */
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ }
+
/* Stop PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_stop(ndev);
/* Set the config mode to stop the AVB-DMAC's processes */
@@ -2175,29 +2309,8 @@ static int ravb_close(struct net_device *ndev)
}
}
- /* PHY disconnect */
- if (ndev->phydev) {
- phy_stop(ndev->phydev);
- phy_disconnect(ndev->phydev);
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
- }
-
cancel_work_sync(&priv->work);
- if (info->multi_irqs) {
- free_irq(priv->tx_irqs[RAVB_NC], ndev);
- free_irq(priv->rx_irqs[RAVB_NC], ndev);
- free_irq(priv->tx_irqs[RAVB_BE], ndev);
- free_irq(priv->rx_irqs[RAVB_BE], ndev);
- free_irq(priv->emac_irq, ndev);
- if (info->err_mgmt_irqs) {
- free_irq(priv->erra_irq, ndev);
- free_irq(priv->mgmta_irq, ndev);
- }
- }
- free_irq(ndev->irq, ndev);
-
if (info->nc_queues)
napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
@@ -2207,6 +2320,17 @@ static int ravb_close(struct net_device *ndev)
if (info->nc_queues)
ravb_ring_free(ndev, RAVB_NC);
+ /* Update statistics. */
+ ravb_get_stats(ndev);
+
+ /* Set reset mode. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ return error;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
}
@@ -2330,11 +2454,58 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
spin_unlock_irqrestore(&priv->lock, flags);
}
+static int ravb_endisable_csum_gbeth(struct net_device *ndev, enum ravb_reg reg,
+ u32 val, u32 mask)
+{
+ u32 csr0 = CSR0_TPE | CSR0_RPE;
+ int ret;
+
+ ravb_write(ndev, csr0 & ~mask, CSR0);
+ ret = ravb_wait(ndev, CSR0, mask, 0);
+ if (!ret)
+ ravb_write(ndev, val, reg);
+
+ ravb_write(ndev, csr0, CSR0);
+
+ return ret;
+}
+
static int ravb_set_features_gbeth(struct net_device *ndev,
netdev_features_t features)
{
- /* Place holder */
- return 0;
+ netdev_features_t changed = ndev->features ^ features;
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (changed & NETIF_F_RXCSUM) {
+ if (features & NETIF_F_RXCSUM)
+ val = CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4;
+ else
+ val = 0;
+
+ ret = ravb_endisable_csum_gbeth(ndev, CSR2, val, CSR0_RPE);
+ if (ret)
+ goto done;
+ }
+
+ if (changed & NETIF_F_HW_CSUM) {
+ if (features & NETIF_F_HW_CSUM)
+ val = CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4;
+ else
+ val = 0;
+
+ ret = ravb_endisable_csum_gbeth(ndev, CSR1, val, CSR0_TPE);
+ if (ret)
+ goto done;
+ }
+
+done:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
}
static int ravb_set_features_rcar(struct net_device *ndev,
@@ -2345,8 +2516,6 @@ static int ravb_set_features_rcar(struct net_device *ndev,
if (changed & NETIF_F_RXCSUM)
ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
- ndev->features = features;
-
return 0;
}
@@ -2355,8 +2524,24 @@ static int ravb_set_features(struct net_device *ndev,
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
+ int ret;
+
+ pm_runtime_get_noresume(dev);
+
+ if (pm_runtime_active(dev))
+ ret = info->set_feature(ndev, features);
+ else
+ ret = 0;
+
+ pm_runtime_put_noidle(dev);
+
+ if (ret)
+ return ret;
+
+ ndev->features = features;
- return info->set_feature(ndev, features);
+ return 0;
}
static const struct net_device_ops ravb_netdev_ops = {
@@ -2430,9 +2615,6 @@ static int ravb_mdio_release(struct ravb_private *priv)
}
static const struct ravb_hw_info ravb_gen3_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2443,9 +2625,10 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
@@ -2456,9 +2639,6 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
};
static const struct ravb_hw_info ravb_gen2_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2469,9 +2649,10 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.aligned_tx = 1,
.gptp = 1,
.nc_queues = 1,
@@ -2479,9 +2660,6 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
};
static const struct ravb_hw_info ravb_rzv2m_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2492,9 +2670,10 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.multi_irqs = 1,
.err_mgmt_irqs = 1,
.gptp = 1,
@@ -2504,9 +2683,6 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
};
static const struct ravb_hw_info gbeth_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_gbeth,
- .rx_ring_format = ravb_rx_ring_format_gbeth,
- .alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
.receive = ravb_rx_gbeth,
.set_rate = ravb_set_rate_gbeth,
.set_feature = ravb_set_features_gbeth,
@@ -2514,10 +2690,13 @@ static const struct ravb_hw_info gbeth_hw_info = {
.emac_init = ravb_emac_init_gbeth,
.gstrings_stats = ravb_gstrings_stats_gbeth,
.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
+ .net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
+ .net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
- .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
.tccr_mask = TCCR_TSRQ0,
- .rx_max_buf_size = SZ_8K,
+ .rx_max_frame_size = SZ_8K,
+ .rx_max_desc_use = 4080,
+ .rx_desc_size = sizeof(struct ravb_rx_desc),
.aligned_tx = 1,
.tx_counters = 1,
.carrier_counters = 1,
@@ -2537,100 +2716,91 @@ static const struct of_device_id ravb_match_table[] = {
};
MODULE_DEVICE_TABLE(of, ravb_match_table);
-static int ravb_set_gti(struct net_device *ndev)
+static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
+ const char *ch, int *irq, irq_handler_t handler)
{
- struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
- struct device *dev = ndev->dev.parent;
- unsigned long rate;
- uint64_t inc;
-
- if (info->gptp_ref_clk)
- rate = clk_get_rate(priv->gptp_clk);
- else
- rate = clk_get_rate(priv->clk);
- if (!rate)
- return -EINVAL;
+ struct platform_device *pdev = priv->pdev;
+ struct net_device *ndev = priv->ndev;
+ struct device *dev = &pdev->dev;
+ const char *dev_name;
+ unsigned long flags;
+ int error, irq_num;
- inc = div64_ul(1000000000ULL << 20, rate);
+ if (irq_name) {
+ dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
+ if (!dev_name)
+ return -ENOMEM;
- if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
- dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
- inc, GTI_TIV_MIN, GTI_TIV_MAX);
- return -EINVAL;
+ irq_num = platform_get_irq_byname(pdev, irq_name);
+ flags = 0;
+ } else {
+ dev_name = ndev->name;
+ irq_num = platform_get_irq(pdev, 0);
+ flags = IRQF_SHARED;
}
+ if (irq_num < 0)
+ return irq_num;
- ravb_write(ndev, inc, GTI);
+ if (irq)
+ *irq = irq_num;
- return 0;
+ error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev);
+ if (error)
+ netdev_err(ndev, "cannot request IRQ %s\n", dev_name);
+
+ return error;
}
-static int ravb_set_config_mode(struct net_device *ndev)
+static int ravb_setup_irqs(struct ravb_private *priv)
{
- struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct net_device *ndev = priv->ndev;
+ const char *irq_name, *emac_irq_name;
int error;
- if (info->gptp) {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
- if (error)
- return error;
- /* Set CSEL value */
- ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
- } else if (info->ccc_gac) {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
+ if (!info->multi_irqs)
+ return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt);
+
+ if (info->err_mgmt_irqs) {
+ irq_name = "dia";
+ emac_irq_name = "line3";
} else {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ irq_name = "ch22";
+ emac_irq_name = "ch24";
}
- return error;
-}
-
-/* Set tx and rx clock internal delay modes */
-static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- bool explicit_delay = false;
- u32 delay;
+ error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt);
+ if (error)
+ return error;
- if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
- /* Valid values are 0 and 1800, according to DT bindings */
- priv->rxcidm = !!delay;
- explicit_delay = true;
- }
- if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
- /* Valid values are 0 and 2000, according to DT bindings */
- priv->txcidm = !!delay;
- explicit_delay = true;
- }
+ error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq,
+ ravb_emac_interrupt);
+ if (error)
+ return error;
- if (explicit_delay)
- return;
+ if (info->err_mgmt_irqs) {
+ error = ravb_setup_irq(priv, "err_a", "err_a", NULL, ravb_multi_interrupt);
+ if (error)
+ return error;
- /* Fall back to legacy rgmii-*id behavior */
- if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- priv->rxcidm = 1;
- priv->rgmii_override = 1;
+ error = ravb_setup_irq(priv, "mgmt_a", "mgmt_a", NULL, ravb_multi_interrupt);
+ if (error)
+ return error;
}
- if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- priv->txcidm = 1;
- priv->rgmii_override = 1;
- }
-}
+ error = ravb_setup_irq(priv, "ch0", "ch0:rx_be", NULL, ravb_be_interrupt);
+ if (error)
+ return error;
-static void ravb_set_delay_mode(struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- u32 set = 0;
+ error = ravb_setup_irq(priv, "ch1", "ch1:rx_nc", NULL, ravb_nc_interrupt);
+ if (error)
+ return error;
- if (priv->rxcidm)
- set |= APSR_RDM;
- if (priv->txcidm)
- set |= APSR_TDM;
- ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
+ error = ravb_setup_irq(priv, "ch18", "ch18:tx_be", NULL, ravb_be_interrupt);
+ if (error)
+ return error;
+
+ return ravb_setup_irq(priv, "ch19", "ch19:tx_nc", NULL, ravb_nc_interrupt);
}
static int ravb_probe(struct platform_device *pdev)
@@ -2640,9 +2810,8 @@ static int ravb_probe(struct platform_device *pdev)
struct reset_control *rstc;
struct ravb_private *priv;
struct net_device *ndev;
- int error, irq, q;
struct resource *res;
- int i;
+ int error, q;
if (!np) {
dev_err(&pdev->dev,
@@ -2650,7 +2819,7 @@ static int ravb_probe(struct platform_device *pdev)
return -EINVAL;
}
- rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
"failed to get cpg reset\n");
@@ -2669,25 +2838,6 @@ static int ravb_probe(struct platform_device *pdev)
if (error)
goto out_free_netdev;
- pm_runtime_enable(&pdev->dev);
- error = pm_runtime_resume_and_get(&pdev->dev);
- if (error < 0)
- goto out_rpm_disable;
-
- if (info->multi_irqs) {
- if (info->err_mgmt_irqs)
- irq = platform_get_irq_byname(pdev, "dia");
- else
- irq = platform_get_irq_byname(pdev, "ch22");
- } else {
- irq = platform_get_irq(pdev, 0);
- }
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- ndev->irq = irq;
-
SET_NETDEV_DEV(ndev, &pdev->dev);
priv = netdev_priv(ndev);
@@ -2702,10 +2852,43 @@ static int ravb_probe(struct platform_device *pdev)
priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
}
+ error = ravb_setup_irqs(priv);
+ if (error)
+ goto out_reset_assert;
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ error = PTR_ERR(priv->clk);
+ goto out_reset_assert;
+ }
+
+ if (info->gptp_ref_clk) {
+ priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
+ if (IS_ERR(priv->gptp_clk)) {
+ error = PTR_ERR(priv->gptp_clk);
+ goto out_reset_assert;
+ }
+ }
+
+ priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
+ if (IS_ERR(priv->refclk)) {
+ error = PTR_ERR(priv->refclk);
+ goto out_reset_assert;
+ }
+ clk_prepare(priv->refclk);
+
+ platform_set_drvdata(pdev, ndev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ error = pm_runtime_resume_and_get(&pdev->dev);
+ if (error < 0)
+ goto out_rpm_disable;
+
priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->addr)) {
error = PTR_ERR(priv->addr);
- goto out_release;
+ goto out_rpm_put;
}
/* The Ether-specific entries in the device structure. */
@@ -2716,79 +2899,14 @@ static int ravb_probe(struct platform_device *pdev)
error = of_get_phy_mode(np, &priv->phy_interface);
if (error && error != -ENODEV)
- goto out_release;
+ goto out_rpm_put;
priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
- if (info->multi_irqs) {
- if (info->err_mgmt_irqs)
- irq = platform_get_irq_byname(pdev, "line3");
- else
- irq = platform_get_irq_byname(pdev, "ch24");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->emac_irq = irq;
- for (i = 0; i < NUM_RX_QUEUE; i++) {
- irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->rx_irqs[i] = irq;
- }
- for (i = 0; i < NUM_TX_QUEUE; i++) {
- irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->tx_irqs[i] = irq;
- }
-
- if (info->err_mgmt_irqs) {
- irq = platform_get_irq_byname(pdev, "err_a");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->erra_irq = irq;
-
- irq = platform_get_irq_byname(pdev, "mgmt_a");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->mgmta_irq = irq;
- }
- }
-
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- error = PTR_ERR(priv->clk);
- goto out_release;
- }
-
- priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
- if (IS_ERR(priv->refclk)) {
- error = PTR_ERR(priv->refclk);
- goto out_release;
- }
- clk_prepare_enable(priv->refclk);
-
- if (info->gptp_ref_clk) {
- priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
- if (IS_ERR(priv->gptp_clk)) {
- error = PTR_ERR(priv->gptp_clk);
- goto out_disable_refclk;
- }
- clk_prepare_enable(priv->gptp_clk);
- }
-
- ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ ndev->max_mtu = info->rx_max_frame_size -
+ (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
@@ -2802,25 +2920,11 @@ static int ravb_probe(struct platform_device *pdev)
ndev->netdev_ops = &ravb_netdev_ops;
ndev->ethtool_ops = &ravb_ethtool_ops;
- /* Set AVB config mode */
- error = ravb_set_config_mode(ndev);
+ error = ravb_compute_gti(ndev);
if (error)
- goto out_disable_gptp_clk;
-
- if (info->gptp || info->ccc_gac) {
- /* Set GTI value */
- error = ravb_set_gti(ndev);
- if (error)
- goto out_disable_gptp_clk;
+ goto out_rpm_put;
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
- }
-
- if (info->internal_delay) {
- ravb_parse_delay_mode(np, ndev);
- ravb_set_delay_mode(ndev);
- }
+ ravb_parse_delay_mode(np, ndev);
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
@@ -2831,22 +2935,22 @@ static int ravb_probe(struct platform_device *pdev)
"Cannot allocate desc base address table (size %d bytes)\n",
priv->desc_bat_size);
error = -ENOMEM;
- goto out_disable_gptp_clk;
+ goto out_rpm_put;
}
for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
priv->desc_bat[q].die_dt = DT_EOS;
- ravb_write(ndev, priv->desc_bat_dma, DBAT);
/* Initialise HW timestamp list */
INIT_LIST_HEAD(&priv->ts_skb_list);
- /* Initialise PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_init(ndev, pdev);
-
/* Debug message level */
priv->msg_enable = RAVB_DEF_MSG_ENABLE;
+ /* Set config mode as this is needed for PHY initialization. */
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ if (error)
+ goto out_rpm_put;
+
/* Read and set MAC address */
ravb_read_mac_address(np, ndev);
if (!is_valid_ether_addr(ndev->dev_addr)) {
@@ -2859,9 +2963,14 @@ static int ravb_probe(struct platform_device *pdev)
error = ravb_mdio_init(priv);
if (error) {
dev_err(&pdev->dev, "failed to initialize MDIO\n");
- goto out_dma_free;
+ goto out_reset_mode;
}
+ /* Undo previous switch to config opmode. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ goto out_mdio_release;
+
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
if (info->nc_queues)
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
@@ -2877,7 +2986,8 @@ static int ravb_probe(struct platform_device *pdev)
netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
(u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
- platform_set_drvdata(pdev, ndev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -2886,22 +2996,19 @@ out_napi_del:
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
+out_mdio_release:
ravb_mdio_release(priv);
-out_dma_free:
+out_reset_mode:
+ ravb_set_opmode(ndev, CCC_OPC_RESET);
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
-
- /* Stop PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_stop(ndev);
-out_disable_gptp_clk:
- clk_disable_unprepare(priv->gptp_clk);
-out_disable_refclk:
- clk_disable_unprepare(priv->refclk);
-out_release:
+out_rpm_put:
pm_runtime_put(&pdev->dev);
out_rpm_disable:
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ clk_unprepare(priv->refclk);
+out_reset_assert:
reset_control_assert(rstc);
out_free_netdev:
free_netdev(ndev);
@@ -2913,6 +3020,12 @@ static void ravb_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
+ int error;
+
+ error = pm_runtime_resume_and_get(dev);
+ if (error < 0)
+ return;
unregister_netdev(ndev);
if (info->nc_queues)
@@ -2921,20 +3034,13 @@ static void ravb_remove(struct platform_device *pdev)
ravb_mdio_release(priv);
- /* Stop PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_stop(ndev);
-
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
- ravb_set_opmode(ndev, CCC_OPC_RESET);
-
- clk_disable_unprepare(priv->gptp_clk);
- clk_disable_unprepare(priv->refclk);
-
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ clk_unprepare(priv->refclk);
reset_control_assert(priv->rstc);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
@@ -2960,6 +3066,9 @@ static int ravb_wol_setup(struct net_device *ndev)
/* Enable MagicPacket */
ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
+ if (priv->info->ccc_gac)
+ ravb_ptp_stop(ndev);
+
return enable_irq_wake(priv->emac_irq);
}
@@ -2967,6 +3076,20 @@ static int ravb_wol_restore(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ int error;
+
+ /* Set reset mode to rearm the WoL logic. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ return error;
+
+ /* Set AVB config mode. */
+ error = ravb_set_config_mode(ndev);
+ if (error)
+ return error;
+
+ if (priv->info->ccc_gac)
+ ravb_ptp_init(ndev, priv->pdev);
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
@@ -2980,102 +3103,96 @@ static int ravb_wol_restore(struct net_device *ndev)
return disable_irq_wake(priv->emac_irq);
}
-static int __maybe_unused ravb_suspend(struct device *dev)
+static int ravb_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
int ret;
if (!netif_running(ndev))
- return 0;
+ goto reset_assert;
netif_device_detach(ndev);
if (priv->wol_enabled)
- ret = ravb_wol_setup(ndev);
- else
- ret = ravb_close(ndev);
+ return ravb_wol_setup(ndev);
- if (priv->info->ccc_gac)
- ravb_ptp_stop(ndev);
+ ret = ravb_close(ndev);
+ if (ret)
+ return ret;
- return ret;
+ ret = pm_runtime_force_suspend(&priv->pdev->dev);
+ if (ret)
+ return ret;
+
+reset_assert:
+ return reset_control_assert(priv->rstc);
}
-static int __maybe_unused ravb_resume(struct device *dev)
+static int ravb_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
- int ret = 0;
-
- /* If WoL is enabled set reset mode to rearm the WoL logic */
- if (priv->wol_enabled) {
- ret = ravb_set_opmode(ndev, CCC_OPC_RESET);
- if (ret)
- return ret;
- }
-
- /* All register have been reset to default values.
- * Restore all registers which where setup at probe time and
- * reopen device if it was running before system suspended.
- */
+ int ret;
- /* Set AVB config mode */
- ret = ravb_set_config_mode(ndev);
+ ret = reset_control_deassert(priv->rstc);
if (ret)
return ret;
- if (info->gptp || info->ccc_gac) {
- /* Set GTI value */
- ret = ravb_set_gti(ndev);
+ if (!netif_running(ndev))
+ return 0;
+
+ /* If WoL is enabled restore the interface. */
+ if (priv->wol_enabled) {
+ ret = ravb_wol_restore(ndev);
+ if (ret)
+ return ret;
+ } else {
+ ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
-
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
}
- if (info->internal_delay)
- ravb_set_delay_mode(ndev);
+ /* Reopening the interface will restore the device to the working state. */
+ ret = ravb_open(ndev);
+ if (ret < 0)
+ goto out_rpm_put;
- /* Restore descriptor base address table */
- ravb_write(ndev, priv->desc_bat_dma, DBAT);
+ ravb_set_rx_mode(ndev);
+ netif_device_attach(ndev);
- if (priv->info->ccc_gac)
- ravb_ptp_init(ndev, priv->pdev);
+ return 0;
- if (netif_running(ndev)) {
- if (priv->wol_enabled) {
- ret = ravb_wol_restore(ndev);
- if (ret)
- return ret;
- }
- ret = ravb_open(ndev);
- if (ret < 0)
- return ret;
- ravb_set_rx_mode(ndev);
- netif_device_attach(ndev);
+out_rpm_put:
+ if (!priv->wol_enabled) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
return ret;
}
-static int __maybe_unused ravb_runtime_nop(struct device *dev)
+static int ravb_runtime_suspend(struct device *dev)
{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ clk_disable(priv->refclk);
+
return 0;
}
+static int ravb_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ return clk_enable(priv->refclk);
+}
+
static const struct dev_pm_ops ravb_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
- SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
+ SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
+ RUNTIME_PM_OPS(ravb_runtime_suspend, ravb_runtime_resume, NULL)
};
static struct platform_driver ravb_driver = {
@@ -3083,7 +3200,7 @@ static struct platform_driver ravb_driver = {
.remove_new = ravb_remove,
.driver = {
.name = "ravb",
- .pm = &ravb_dev_pm_ops,
+ .pm = pm_ptr(&ravb_dev_pm_ops),
.of_match_table = ravb_match_table,
},
};
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 9e59669a93dd..755db89db909 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -32,7 +32,6 @@
#include <net/fib_rules.h>
#include <net/fib_notifier.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include <generated/utsrelease.h>
#include "rocker_hw.h"
#include "rocker.h"
@@ -2227,7 +2226,6 @@ static void rocker_port_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
- strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static struct rocker_port_stats {
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index d14e0cfc3a6b..1458939c3bf5 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -503,7 +503,6 @@ struct sxgbe_priv_data {
bool tx_path_in_lpi_mode;
int lpi_irq;
int eee_enabled;
- int eee_active;
int tx_lpi_timer;
};
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 8ba017ec9849..4a439b34114d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -133,22 +133,20 @@ static const struct sxgbe_stats sxgbe_gstrings_stats[] = {
#define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats)
static int sxgbe_get_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
if (!priv->hw_cap.eee)
return -EOPNOTSUPP;
- edata->eee_enabled = priv->eee_enabled;
- edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer;
return phy_ethtool_get_eee(dev->phydev, edata);
}
static int sxgbe_set_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 71439825ea4e..ecbe3994f2b1 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -130,7 +130,6 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
if (phy_init_eee(ndev->phydev, true))
return false;
- priv->eee_active = 1;
timer_setup(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, 0);
priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
add_timer(&priv->eee_ctrl_timer);
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 175bd9cdfdac..551f890db90a 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -595,7 +595,7 @@ void efx_stop_all(struct efx_nic *efx)
efx_stop_datapath(efx);
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index e001f27085c6..1cb32aedd89c 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2085,7 +2085,7 @@ int ef4_net_stop(struct net_device *net_dev)
return 0;
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
static void ef4_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index fac227d372db..dcd901eccfc8 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -11,6 +11,7 @@
#include "net_driver.h"
#include <linux/module.h>
#include <linux/iommu.h>
+#include <net/rps.h>
#include "efx.h"
#include "nic.h"
#include "rx_common.h"
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index e4b294b8e9ac..88e5bc347a44 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -605,7 +605,7 @@ static size_t efx_siena_update_stats_atomic(struct efx_nic *efx, u64 *full_stats
return efx->type->update_stats(efx, full_stats, core_stats);
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
void efx_siena_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 4579f43484c3..219fb358a646 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -11,6 +11,7 @@
#include "net_driver.h"
#include <linux/module.h>
#include <linux/iommu.h>
+#include <net/rps.h>
#include "efx.h"
#include "nic.h"
#include "rx_common.h"
diff --git a/drivers/net/ethernet/sfc/siena/tx_common.c b/drivers/net/ethernet/sfc/siena/tx_common.c
index a7a9ab304e13..71f9b5ec5ae4 100644
--- a/drivers/net/ethernet/sfc/siena/tx_common.c
+++ b/drivers/net/ethernet/sfc/siena/tx_common.c
@@ -317,11 +317,10 @@ static int efx_tx_tso_header_length(struct sk_buff *skb)
size_t header_len;
if (skb->encapsulation)
- header_len = skb_inner_transport_header(skb) -
- skb->data +
+ header_len = skb_inner_transport_offset(skb) +
(inner_tcp_hdr(skb)->doff << 2u);
else
- header_len = skb_transport_header(skb) - skb->data +
+ header_len = skb_transport_offset(skb) +
(tcp_hdr(skb)->doff << 2u);
return header_len;
}
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 9f2393d34371..2adb132b2f7e 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -336,11 +336,10 @@ int efx_tx_tso_header_length(struct sk_buff *skb)
size_t header_len;
if (skb->encapsulation)
- header_len = skb_inner_transport_header(skb) -
- skb->data +
+ header_len = skb_inner_transport_offset(skb) +
(inner_tcp_hdr(skb)->doff << 2u);
else
- header_len = skb_transport_header(skb) - skb->data +
+ header_len = skb_transport_offset(skb) +
(tcp_hdr(skb)->doff << 2u);
return header_len;
}
diff --git a/drivers/net/ethernet/sfc/tx_tso.c b/drivers/net/ethernet/sfc/tx_tso.c
index 64a6768f75ea..ddf149db8180 100644
--- a/drivers/net/ethernet/sfc/tx_tso.c
+++ b/drivers/net/ethernet/sfc/tx_tso.c
@@ -174,8 +174,8 @@ static int tso_start(struct tso_state *st, struct efx_nic *efx,
unsigned int header_len, in_len;
dma_addr_t dma_addr;
- st->ip_off = skb_network_header(skb) - skb->data;
- st->tcp_off = skb_transport_header(skb) - skb->data;
+ st->ip_off = skb_network_offset(skb);
+ st->tcp_off = skb_transport_offset(skb);
header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
in_len = skb_headlen(skb) - header_len;
st->header_len = header_len;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 758347616535..78ff3af7911a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -98,6 +98,7 @@ static int watchdog = 1000;
module_param(watchdog, int, 0400);
MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
+MODULE_DESCRIPTION("SMC 91C9x/91C1xxx Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:smc91x");
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 31cb7d0166f0..74f1ccc96459 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -56,6 +56,7 @@
#define SMSC_MDIONAME "smsc911x-mdio"
#define SMSC_DRV_VERSION "2008-10-21"
+MODULE_DESCRIPTION("SMSC LAN911x/LAN921x Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SMSC_DRV_VERSION);
MODULE_ALIAS("platform:smsc911x");
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index e1c4a11c1f18..15cb96c2506d 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -26,6 +26,7 @@
#define DRV_DESCRIPTION "SMSC LAN9420 driver"
#define DRV_VERSION "1.01"
+MODULE_DESCRIPTION("SMSC LAN9420 Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 85dcda51df05..4ec61f1ee71a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -165,9 +165,9 @@ config DWMAC_STARFIVE
help
Support for ethernet controllers on StarFive RISC-V SoCs
- This selects the StarFive platform specific glue layer support for
- the stmmac device driver. This driver is used for StarFive JH7110
- ethernet controller.
+ This selects the StarFive platform specific glue layer support
+ for the stmmac device driver. This driver is used for the
+ StarFive JH7100 and JH7110 ethernet controllers.
config DWMAC_STI
tristate "STi GMAC support"
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 5ba606a596e7..a6fefe675ef1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -225,6 +225,8 @@ struct stmmac_extra_stats {
unsigned long mtl_est_hlbf;
unsigned long mtl_est_btre;
unsigned long mtl_est_btrlm;
+ unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES];
+ unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES];
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
@@ -369,6 +371,7 @@ enum request_irq_err {
REQ_IRQ_ERR_ALL,
REQ_IRQ_ERR_TX,
REQ_IRQ_ERR_RX,
+ REQ_IRQ_ERR_SFTY,
REQ_IRQ_ERR_SFTY_UE,
REQ_IRQ_ERR_SFTY_CE,
REQ_IRQ_ERR_LPI,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 31631e3f89d0..e254b21fdb59 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -106,6 +106,7 @@ struct qcom_ethqos {
struct clk *link_clk;
struct phy *serdes_phy;
unsigned int speed;
+ int serdes_speed;
phy_interface_t phy_mode;
const struct ethqos_emac_por *por;
@@ -169,6 +170,9 @@ static void rgmii_dump(void *priv)
static void
ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
{
+ if (!phy_interface_mode_is_rgmii(ethqos->phy_mode))
+ return;
+
switch (speed) {
case SPEED_1000:
ethqos->link_clk_rate = RGMII_1000_NOM_CLK_FREQ;
@@ -606,19 +610,39 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
*/
static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
{
+ struct net_device *dev = platform_get_drvdata(ethqos->pdev);
+ struct stmmac_priv *priv = netdev_priv(dev);
int val;
val = readl(ethqos->mac_base + MAC_CTRL_REG);
switch (ethqos->speed) {
+ case SPEED_2500:
+ val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ RGMII_IO_MACRO_CONFIG2);
+ if (ethqos->serdes_speed != SPEED_2500)
+ phy_set_speed(ethqos->serdes_phy, SPEED_2500);
+ ethqos->serdes_speed = SPEED_2500;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0);
+ break;
case SPEED_1000:
val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
+ if (ethqos->serdes_speed != SPEED_1000)
+ phy_set_speed(ethqos->serdes_phy, SPEED_1000);
+ ethqos->serdes_speed = SPEED_1000;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
case SPEED_100:
val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE;
+ if (ethqos->serdes_speed != SPEED_1000)
+ phy_set_speed(ethqos->serdes_phy, SPEED_1000);
+ ethqos->serdes_speed = SPEED_1000;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
case SPEED_10:
val |= ETHQOS_MAC_CTRL_PORT_SEL;
@@ -627,6 +651,10 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR,
SGMII_10M_RX_CLK_DVDR),
RGMII_IO_MACRO_CONFIG);
+ if (ethqos->serdes_speed != SPEED_1000)
+ phy_set_speed(ethqos->serdes_phy, ethqos->speed);
+ ethqos->serdes_speed = SPEED_1000;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
}
@@ -728,7 +756,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct device *dev = &pdev->dev;
struct qcom_ethqos *ethqos;
- int ret;
+ int ret, i;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -799,6 +827,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
"Failed to get serdes phy\n");
ethqos->speed = SPEED_1000;
+ ethqos->serdes_speed = SPEED_1000;
ethqos_update_link_clk(ethqos, SPEED_1000);
ethqos_set_func_clk_en(ethqos);
@@ -822,6 +851,10 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->serdes_powerdown = qcom_ethqos_serdes_powerdown;
}
+ /* Enable TSO on queue0 and enable TBS on rest of the queues */
+ for (i = 1; i < plat_dat->tx_queues_to_use; i++)
+ plat_dat->tx_queues_cfg[i].tbs_en = 1;
+
return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index ba2ce776bd4d..68f85e4605cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -585,4 +585,5 @@ static struct platform_driver socfpga_dwmac_driver = {
};
module_platform_driver(socfpga_dwmac_driver);
+MODULE_DESCRIPTION("Altera SOC DWMAC Specific Glue layer");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 5d630affb4d1..4e1076faee0c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -15,13 +15,20 @@
#include "stmmac_platform.h"
-#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
-#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
-#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
+#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
+#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
+#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
+
+#define JH7100_SYSMAIN_REGISTER49_DLYCHAIN 0xc8
+
+struct starfive_dwmac_data {
+ unsigned int gtxclk_dlychain;
+};
struct starfive_dwmac {
struct device *dev;
struct clk *clk_tx;
+ const struct starfive_dwmac_data *data;
};
static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
@@ -67,6 +74,8 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
mode = STARFIVE_DWMAC_PHY_INFT_RGMII;
break;
@@ -89,6 +98,14 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
if (err)
return dev_err_probe(dwmac->dev, err, "error setting phy mode\n");
+ if (dwmac->data) {
+ err = regmap_write(regmap, JH7100_SYSMAIN_REGISTER49_DLYCHAIN,
+ dwmac->data->gtxclk_dlychain);
+ if (err)
+ return dev_err_probe(dwmac->dev, err,
+ "error selecting gtxclk delay chain\n");
+ }
+
return 0;
}
@@ -114,6 +131,8 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
+ dwmac->data = device_get_match_data(&pdev->dev);
+
dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx");
if (IS_ERR(dwmac->clk_tx))
return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx),
@@ -144,8 +163,13 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
+static const struct starfive_dwmac_data jh7100_data = {
+ .gtxclk_dlychain = 4,
+};
+
static const struct of_device_id starfive_dwmac_match[] = {
- { .compatible = "starfive,jh7110-dwmac" },
+ { .compatible = "starfive,jh7100-dwmac", .data = &jh7100_data },
+ { .compatible = "starfive,jh7110-dwmac" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, starfive_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 358e7dcb6a9a..17d9120db5fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -92,7 +92,7 @@
#define DMA_TBS_FTOV BIT(0)
#define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV)
-/* Following DMA defines are chanels oriented */
+/* Following DMA defines are channel-oriented */
#define DMA_CHAN_BASE_ADDR 0x00001100
#define DMA_CHAN_BASE_OFFSET 0x80
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 1bd34b2a47e8..29367105df54 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -224,7 +224,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
- .est_off = EST_XGMAC_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 14c9d2637dfe..dff02d75d519 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -86,10 +86,6 @@ struct stmmac_counters {
unsigned int mmc_rx_discard_octets_gb;
unsigned int mmc_rx_align_err_frames;
- /* IPC */
- unsigned int mmc_rx_ipc_intr_mask;
- unsigned int mmc_rx_ipc_intr;
-
/* IPv4 */
unsigned int mmc_rx_ipv4_gd;
unsigned int mmc_rx_ipv4_hderr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 8597c6abae8d..7eb477faa75a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -316,9 +316,6 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
- /* IPC */
- mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK);
- mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR);
/* IPv4 */
mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f155e4841c62..dddcaa9220cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -31,6 +31,7 @@ struct stmmac_resources {
int wol_irq;
int lpi_irq;
int irq;
+ int sfty_irq;
int sfty_ce_irq;
int sfty_ue_irq;
int rx_irq[MTL_MAX_RX_QUEUES];
@@ -298,6 +299,7 @@ struct stmmac_priv {
void __iomem *ptpaddr;
void __iomem *estaddr;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ int sfty_irq;
int sfty_ce_irq;
int sfty_ue_irq;
int rx_irq[MTL_MAX_RX_QUEUES];
@@ -306,6 +308,7 @@ struct stmmac_priv {
char int_name_mac[IFNAMSIZ + 9];
char int_name_wol[IFNAMSIZ + 9];
char int_name_lpi[IFNAMSIZ + 9];
+ char int_name_sfty[IFNAMSIZ + 10];
char int_name_sfty_ce[IFNAMSIZ + 10];
char int_name_sfty_ue[IFNAMSIZ + 10];
char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
index 4da6ccc17c20..c9693f77e1f6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -81,6 +81,7 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
u32 status, value, feqn, hbfq, hbfs, btrl, btrl_max;
void __iomem *est_addr = priv->estaddr;
u32 txqcnt_mask = BIT(txqcnt) - 1;
+ int i;
status = readl(est_addr + EST_STATUS);
@@ -125,6 +126,11 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
x->mtl_est_hlbf++;
+ for (i = 0; i < txqcnt; i++) {
+ if (feqn & BIT(i))
+ x->mtl_est_txq_hlbf[i]++;
+ }
+
/* Clear Interrupt */
writel(feqn, est_addr + EST_FRM_SZ_ERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index ec44becf0e2d..e1537a57815f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -243,8 +243,6 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_discard_frames_gb),
STMMAC_MMC_STAT(mmc_rx_discard_octets_gb),
STMMAC_MMC_STAT(mmc_rx_align_err_frames),
- STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
- STMMAC_MMC_STAT(mmc_rx_ipc_intr),
STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
@@ -897,15 +895,13 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static int stmmac_ethtool_op_get_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
- edata->eee_enabled = priv->eee_enabled;
- edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer;
edata->tx_lpi_enabled = priv->tx_lpi_enabled;
@@ -913,7 +909,7 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
}
static int stmmac_ethtool_op_set_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 75d029704503..24cd80490d19 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2506,6 +2506,13 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
if (!xsk_tx_peek_desc(pool, &xdp_desc))
break;
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ xdp_desc.len > priv->plat->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ continue;
+ }
+
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -2672,7 +2679,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
}
if (skb) {
stmmac_get_tx_hwtstamp(priv, p, skb);
- } else {
+ } else if (tx_q->xsk_pool &&
+ xp_tx_metadata_enabled(tx_q->xsk_pool)) {
struct stmmac_xsk_tx_complete tx_compl = {
.priv = priv,
.desc = p,
@@ -3590,6 +3598,10 @@ static void stmmac_free_irq(struct net_device *dev,
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
fallthrough;
+ case REQ_IRQ_ERR_SFTY:
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
+ free_irq(priv->sfty_irq, dev);
+ fallthrough;
case REQ_IRQ_ERR_WOL:
free_irq(dev->irq, dev);
fallthrough;
@@ -3660,6 +3672,23 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
}
}
+ /* Request the common Safety Feature Correctible/Uncorrectible
+ * Error line in case of another line is used
+ */
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
+ int_name = priv->int_name_sfty;
+ sprintf(int_name, "%s:%s", dev->name, "safety");
+ ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc sfty MSI %d (error: %d)\n",
+ __func__, priv->sfty_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY;
+ goto irq_error;
+ }
+ }
+
/* Request the Safety Feature Correctible Error line in
* case of another line is used
*/
@@ -3797,6 +3826,21 @@ static int stmmac_request_irq_single(struct net_device *dev)
}
}
+ /* Request the common Safety Feature Correctible/Uncorrectible
+ * Error line in case of another line is used
+ */
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
+ ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
+ __func__, priv->sfty_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY;
+ goto irq_error;
+ }
+ }
+
return 0;
irq_error:
@@ -4005,8 +4049,10 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
- if (priv->fpe_wq)
+ if (priv->fpe_wq) {
destroy_workqueue(priv->fpe_wq);
+ priv->fpe_wq = NULL;
+ }
netdev_info(priv->dev, "FPE workqueue stop");
}
@@ -4497,6 +4543,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return stmmac_tso_xmit(skb, dev);
}
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ skb->len > priv->plat->est->max_sdu[queue]){
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ goto max_sdu_err;
+ }
+
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
@@ -4714,6 +4767,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_map_err:
netdev_err(priv->dev, "Tx DMA map failed\n");
+max_sdu_err:
dev_kfree_skb(skb);
priv->xstats.tx_dropped++;
return NETDEV_TX_OK;
@@ -4870,6 +4924,13 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
return STMMAC_XDP_CONSUMED;
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ xdpf->len > priv->plat->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ return STMMAC_XDP_CONSUMED;
+ }
+
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -6003,10 +6064,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
priv->tx_path_in_lpi_mode = false;
}
- for (queue = 0; queue < queues_count; queue++) {
- status = stmmac_host_mtl_irq_status(priv, priv->hw,
- queue);
- }
+ for (queue = 0; queue < queues_count; queue++)
+ stmmac_host_mtl_irq_status(priv, priv->hw, queue);
/* PCS link status */
if (priv->hw->pcs &&
@@ -6041,8 +6100,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
- /* Check if a fatal error happened */
- if (stmmac_safety_feat_interrupt(priv))
+ /* Check ASP error if it isn't delivered via an individual IRQ */
+ if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
return IRQ_HANDLED;
/* To handle Common interrupts */
@@ -6059,11 +6118,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6079,11 +6133,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6105,11 +6154,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6136,11 +6180,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -7491,6 +7530,7 @@ int stmmac_dvr_probe(struct device *device,
priv->dev->irq = res->irq;
priv->wol_irq = res->wol_irq;
priv->lpi_irq = res->lpi_irq;
+ priv->sfty_irq = res->sfty_irq;
priv->sfty_ce_irq = res->sfty_ce_irq;
priv->sfty_ue_irq = res->sfty_ue_irq;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
index aefc121464b5..13a30e6df4c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -110,6 +110,8 @@ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
/* Enable and restart the Auto-Negotiation */
if (ane)
value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN;
+ else
+ value &= ~GMAC_AN_CTRL_ANE;
/* In case of MAC-2-MAC connection, block is configured to operate
* according to MAC conf register.
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 70eadc83ca68..54797edc9b38 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -743,6 +743,14 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
}
+ stmmac_res->sfty_irq =
+ platform_get_irq_byname_optional(pdev, "sfty");
+ if (stmmac_res->sfty_irq < 0) {
+ if (stmmac_res->sfty_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(&pdev->dev, "IRQ sfty not found\n");
+ }
+
stmmac_res->addr = devm_platform_ioremap_resource(pdev, 0);
return PTR_ERR_OR_ZERO(stmmac_res->addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 26fa33e5ec34..cce00719937d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -915,8 +915,30 @@ struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
return time;
}
-static int tc_setup_taprio(struct stmmac_priv *priv,
- struct tc_taprio_qopt_offload *qopt)
+static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct plat_stmmacenet_data *plat = priv->plat;
+ u32 num_tc = qopt->mqprio.qopt.num_tc;
+ u32 offset, count, i, j;
+
+ /* QueueMaxSDU received from the driver corresponds to the Linux traffic
+ * class. Map queueMaxSDU per Linux traffic class to DWMAC Tx queues.
+ */
+ for (i = 0; i < num_tc; i++) {
+ if (!qopt->max_sdu[i])
+ continue;
+
+ offset = qopt->mqprio.qopt.offset[i];
+ count = qopt->mqprio.qopt.count[i];
+
+ for (j = offset; j < offset + count; j++)
+ plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
+ }
+}
+
+static int tc_taprio_configure(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
@@ -968,8 +990,6 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
if (qopt->cmd == TAPRIO_CMD_DESTROY)
goto disable;
- else if (qopt->cmd != TAPRIO_CMD_REPLACE)
- return -EOPNOTSUPP;
if (qopt->num_entries >= dep)
return -EINVAL;
@@ -1045,6 +1065,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->ter = qopt->cycle_time_extension;
+ tc_taprio_map_maxsdu_txq(priv, qopt);
+
if (fpe && !priv->dma_cap.fpesel) {
mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
@@ -1078,6 +1100,11 @@ disable:
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
+ /* Reset taprio status */
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ priv->xstats.max_sdu_txq_drop[i] = 0;
+ priv->xstats.mtl_est_txq_hlbf[i] = 0;
+ }
mutex_unlock(&priv->plat->est->lock);
}
@@ -1095,6 +1122,57 @@ disable:
return ret;
}
+static void tc_taprio_stats(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ u64 window_drops = 0;
+ int i = 0;
+
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+ window_drops += priv->xstats.max_sdu_txq_drop[i] +
+ priv->xstats.mtl_est_txq_hlbf[i];
+ qopt->stats.window_drops = window_drops;
+
+ /* Transmission overrun doesn't happen for stmmac, hence always 0 */
+ qopt->stats.tx_overruns = 0;
+}
+
+static void tc_taprio_queue_stats(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct tc_taprio_qopt_queue_stats *q_stats = &qopt->queue_stats;
+ int queue = qopt->queue_stats.queue;
+
+ q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] +
+ priv->xstats.mtl_est_txq_hlbf[queue];
+
+ /* Transmission overrun doesn't happen for stmmac, hence always 0 */
+ q_stats->stats.tx_overruns = 0;
+}
+
+static int tc_setup_taprio(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ int err = 0;
+
+ switch (qopt->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ case TAPRIO_CMD_DESTROY:
+ err = tc_taprio_configure(priv, qopt);
+ break;
+ case TAPRIO_CMD_STATS:
+ tc_taprio_stats(priv, qopt);
+ break;
+ case TAPRIO_CMD_QUEUE_STATS:
+ tc_taprio_queue_stats(priv, qopt);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
static int tc_setup_etf(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt)
{
@@ -1126,6 +1204,7 @@ static int tc_query_caps(struct stmmac_priv *priv,
return -EOPNOTSUPP;
caps->gate_mask_per_txq = true;
+ caps->supports_queue_max_sdu = true;
return 0;
}
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 3525d5c0d694..351609f4f011 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1144,9 +1144,9 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
nskb->protocol = skb->protocol;
offset = skb_mac_header(skb) - skb->data;
skb_set_mac_header(nskb, offset);
- offset = skb_network_header(skb) - skb->data;
+ offset = skb_network_offset(skb);
skb_set_network_header(nskb, offset);
- offset = skb_transport_header(skb) - skb->data;
+ offset = skb_transport_offset(skb);
skb_set_transport_header(nskb, offset);
offset = 0;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 35fceba01ea4..d6ce2c9f0a8d 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -514,14 +514,14 @@ am65_cpsw_set_link_ksettings(struct net_device *ndev,
return phylink_ethtool_ksettings_set(salve->phylink, ecmd);
}
-static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
return phylink_ethtool_get_eee(salve->phylink, edata);
}
-static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 9d2f4ac783e4..2939a21ca74f 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -294,7 +294,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
txqueue,
netif_tx_queue_stopped(netif_txq),
jiffies_to_msecs(jiffies - trans_start),
- dql_avail(&netif_txq->dql),
+ netdev_queue_dql_avail(netif_txq),
k3_cppi_desc_pool_avail(tx_chn->desc_pool));
if (netif_tx_queue_stopped(netif_txq)) {
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 26dc906eae90..57fe936bb177 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -90,4 +90,5 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
}
EXPORT_SYMBOL_GPL(ti_cm_get_macid);
+MODULE_DESCRIPTION("TI CPSW Switch common module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index a557a477d039..f7b283353ba2 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -422,7 +422,7 @@ int cpsw_set_link_ksettings(struct net_device *ndev,
return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd);
}
-int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
@@ -434,7 +434,7 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 0e27c433098d..7efa72502c86 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -496,8 +496,8 @@ int cpsw_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *ecmd);
int cpsw_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *ecmd);
-int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata);
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata);
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata);
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata);
int cpsw_nway_reset(struct net_device *ndev);
void cpsw_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index a27ec1dcc8d5..9a7dd7efcf69 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -45,7 +45,7 @@ static int emac_set_link_ksettings(struct net_device *ndev,
return phy_ethtool_set_link_ksettings(ndev, ecmd);
}
-static int emac_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int emac_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
if (!ndev->phydev)
return -EOPNOTSUPP;
@@ -53,7 +53,7 @@ static int emac_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
return phy_ethtool_get_eee(ndev->phydev, edata);
}
-static int emac_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int emac_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
if (!ndev->phydev)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 411898a4f38c..cf7b73f8f450 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1489,9 +1489,6 @@ static int emac_ndo_stop(struct net_device *ndev)
/* Destroying the queued work in ndo_stop() */
cancel_delayed_work_sync(&emac->stats_work);
- /* stop PRUs */
- prueth_emac_stop(emac);
-
if (prueth->emacs_initialized == 1)
icss_iep_exit(emac->iep);
@@ -1502,7 +1499,6 @@ static int emac_ndo_stop(struct net_device *ndev)
free_irq(emac->rx_chns.irq[rx_flow], emac);
prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
- prueth_cleanup_tx_chns(emac);
prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
prueth_cleanup_tx_chns(emac);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index d5b75af163d3..5ee8e8980393 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -384,18 +384,18 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
- descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
- if (!descr->skb) {
- descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */
- return -ENOMEM;
- }
descr->hw_regs.dmac_cmd_status = 0;
descr->hw_regs.result_size = 0;
descr->hw_regs.valid_size = 0;
descr->hw_regs.data_error = 0;
descr->hw_regs.payload.dev_addr = 0;
descr->hw_regs.payload.size = 0;
- descr->skb = NULL;
+
+ descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
+ if (!descr->skb) {
+ descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */
+ return -ENOMEM;
+ }
offset = ((unsigned long)descr->skb->data) &
(GELIC_NET_RXBUF_ALIGN - 1);
@@ -698,7 +698,7 @@ gelic_card_get_next_tx_descr(struct gelic_card *card)
}
/**
- * gelic_net_set_txdescr_cmdstat - sets the tx descriptor command field
+ * gelic_descr_set_tx_cmdstat - sets the tx descriptor command field
* @descr: descriptor structure to fill out
* @skb: packet to consider
*
@@ -1461,7 +1461,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
}
/**
- * gelic_ether_setup_netdev - initialization of net_device
+ * gelic_net_setup_netdev - initialization of net_device
* @netdev: net_device structure
* @card: card structure
*
@@ -1518,14 +1518,16 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
return 0;
}
+#define GELIC_ALIGN (32)
+
/**
* gelic_alloc_card_net - allocates net_device and card structure
+ * @netdev: interface device structure
*
* returns the card structure or NULL in case of errors
*
* the card and net_device structures are linked to each other
*/
-#define GELIC_ALIGN (32)
static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
{
struct gelic_card *card;
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index aeed2a093e34..edd8b59680e5 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -664,7 +664,7 @@ static int mse102x_probe_spi(struct spi_device *spi)
spi->bits_per_word = 8;
spi->mode |= SPI_MODE_3;
/* enforce minimum speed to ensure device functionality */
- spi->master->min_speed_hz = MIN_FREQ_HZ;
+ spi->controller->min_speed_hz = MIN_FREQ_HZ;
if (!spi->max_speed_hz)
spi->max_speed_hz = MAX_FREQ_HZ;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 1db754615cca..945c13d1a982 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1958,8 +1958,6 @@ int wx_sw_init(struct wx *wx)
return -ENOMEM;
}
- wx->msix_in_use = false;
-
return 0;
}
EXPORT_SYMBOL(wx_sw_init);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 8706223a6e5a..6dff2c85682d 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1257,7 +1257,7 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
/* compute header lengths */
l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
- *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) :
+ *hdr_len = enc ? skb_inner_transport_offset(skb) :
skb_transport_offset(skb);
*hdr_len += l4len;
@@ -1614,14 +1614,12 @@ static int wx_acquire_msix_vectors(struct wx *wx)
/* One for non-queue interrupts */
nvecs += 1;
- if (!wx->msix_in_use) {
- wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!wx->msix_entry) {
- kfree(wx->msix_q_entries);
- wx->msix_q_entries = NULL;
- return -ENOMEM;
- }
+ wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!wx->msix_entry) {
+ kfree(wx->msix_q_entries);
+ wx->msix_q_entries = NULL;
+ return -ENOMEM;
}
nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
@@ -1931,10 +1929,8 @@ void wx_reset_interrupt_capability(struct wx *wx)
if (pdev->msix_enabled) {
kfree(wx->msix_q_entries);
wx->msix_q_entries = NULL;
- if (!wx->msix_in_use) {
- kfree(wx->msix_entry);
- wx->msix_entry = NULL;
- }
+ kfree(wx->msix_entry);
+ wx->msix_entry = NULL;
}
pci_free_irq_vectors(wx->pdev);
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index b4dc4f341117..1fdeb464d5f4 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -1047,7 +1047,6 @@ struct wx {
unsigned int queues_per_pool;
struct msix_entry *msix_q_entries;
struct msix_entry *msix_entry;
- bool msix_in_use;
struct wx_ring_feature ring_feature[RING_F_ARRAY_SIZE];
/* misc interrupt status block */
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index 7507f762edfe..42718875277c 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_TXGBE) += txgbe.o
txgbe-objs := txgbe_main.o \
txgbe_hw.o \
txgbe_phy.o \
+ txgbe_irq.o \
txgbe_ethtool.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
new file mode 100644
index 000000000000..b3e3605d1edb
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/irqdomain.h>
+#include <linux/pci.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_hw.h"
+#include "txgbe_type.h"
+#include "txgbe_phy.h"
+#include "txgbe_irq.h"
+
+/**
+ * txgbe_irq_enable - Enable default interrupt generation settings
+ * @wx: pointer to private structure
+ * @queues: enable irqs for queues
+ **/
+void txgbe_irq_enable(struct wx *wx, bool queues)
+{
+ wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
+
+ /* unmask interrupt */
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
+ if (queues)
+ wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
+}
+
+/**
+ * txgbe_intr - msi/legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ struct pci_dev *pdev;
+ u32 eicr;
+
+ q_vector = wx->q_vector[0];
+ pdev = wx->pdev;
+
+ eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+ if (!eicr) {
+ /* shared interrupt alert!
+ * the interrupt that we masked before the ICR read.
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+ wx->isb_mem[WX_ISB_VEC0] = 0;
+ if (!(pdev->msi_enabled))
+ wr32(wx, WX_PX_INTA, 1);
+
+ wx->isb_mem[WX_ISB_MISC] = 0;
+ /* would disable interrupts here but it is auto disabled */
+ napi_schedule_irqoff(&q_vector->napi);
+
+ /* re-enable link(maybe) and non-queue interrupts, no flush.
+ * txgbe_poll will re-enable the queue interrupts
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * txgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @wx: board private structure
+ *
+ * Allocate MSI-X vectors and request interrupts from the kernel.
+ **/
+static int txgbe_request_msix_irqs(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
+
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
+ }
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_q_entries[vector].vector,
+ wx->q_vector[vector]);
+ }
+ wx_reset_interrupt_capability(wx);
+ return err;
+}
+
+/**
+ * txgbe_request_irq - initialize interrupts
+ * @wx: board private structure
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+int txgbe_request_irq(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
+
+ if (pdev->msix_enabled)
+ err = txgbe_request_msix_irqs(wx);
+ else if (pdev->msi_enabled)
+ err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
+ netdev->name, wx);
+ else
+ err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
+ netdev->name, wx);
+
+ if (err)
+ wx_err(wx, "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+static int txgbe_request_gpio_irq(struct txgbe *txgbe)
+{
+ txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ return request_threaded_irq(txgbe->gpio_irq, NULL,
+ txgbe_gpio_irq_handler,
+ IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
+}
+
+static int txgbe_request_link_irq(struct txgbe *txgbe)
+{
+ txgbe->link_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
+ return request_threaded_irq(txgbe->link_irq, NULL,
+ txgbe_link_irq_handler,
+ IRQF_ONESHOT, "txgbe-link-irq", txgbe);
+}
+
+static const struct irq_chip txgbe_irq_chip = {
+ .name = "txgbe-misc-irq",
+};
+
+static int txgbe_misc_irq_domain_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct txgbe *txgbe = d->host_data;
+
+ irq_set_chip_data(irq, txgbe);
+ irq_set_chip(irq, &txgbe->misc.chip);
+ irq_set_nested_thread(irq, true);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
+ .map = txgbe_misc_irq_domain_map,
+};
+
+static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ unsigned int nhandled = 0;
+ unsigned int sub_irq;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ if (eicr & TXGBE_PX_MISC_GPIO) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
+ if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
+ TXGBE_PX_MISC_ETH_AN)) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
+
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void txgbe_del_irq_domain(struct txgbe *txgbe)
+{
+ int hwirq, virq;
+
+ for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++) {
+ virq = irq_find_mapping(txgbe->misc.domain, hwirq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(txgbe->misc.domain);
+}
+
+void txgbe_free_misc_irq(struct txgbe *txgbe)
+{
+ free_irq(txgbe->gpio_irq, txgbe);
+ free_irq(txgbe->link_irq, txgbe);
+ free_irq(txgbe->misc.irq, txgbe);
+ txgbe_del_irq_domain(txgbe);
+}
+
+int txgbe_setup_misc_irq(struct txgbe *txgbe)
+{
+ struct wx *wx = txgbe->wx;
+ int hwirq, err;
+
+ txgbe->misc.nirqs = 2;
+ txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
+ &txgbe_misc_irq_domain_ops, txgbe);
+ if (!txgbe->misc.domain)
+ return -ENOMEM;
+
+ for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++)
+ irq_create_mapping(txgbe->misc.domain, hwirq);
+
+ txgbe->misc.chip = txgbe_irq_chip;
+ if (wx->pdev->msix_enabled)
+ txgbe->misc.irq = wx->msix_entry->vector;
+ else
+ txgbe->misc.irq = wx->pdev->irq;
+
+ err = request_threaded_irq(txgbe->misc.irq, NULL,
+ txgbe_misc_irq_handle,
+ IRQF_ONESHOT,
+ wx->netdev->name, txgbe);
+ if (err)
+ goto del_misc_irq;
+
+ err = txgbe_request_gpio_irq(txgbe);
+ if (err)
+ goto free_msic_irq;
+
+ err = txgbe_request_link_irq(txgbe);
+ if (err)
+ goto free_gpio_irq;
+
+ return 0;
+
+free_gpio_irq:
+ free_irq(txgbe->gpio_irq, txgbe);
+free_msic_irq:
+ free_irq(txgbe->misc.irq, txgbe);
+del_misc_irq:
+ txgbe_del_irq_domain(txgbe);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
new file mode 100644
index 000000000000..b77945e7a0f2
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+void txgbe_irq_enable(struct wx *wx, bool queues);
+int txgbe_request_irq(struct wx *wx);
+void txgbe_free_misc_irq(struct txgbe *txgbe);
+int txgbe_setup_misc_irq(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 3b151c410a5c..bd4624d14ca0 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -17,6 +17,7 @@
#include "txgbe_type.h"
#include "txgbe_hw.h"
#include "txgbe_phy.h"
+#include "txgbe_irq.h"
#include "txgbe_ethtool.h"
char txgbe_driver_name[] = "txgbe";
@@ -76,137 +77,11 @@ static int txgbe_enumerate_functions(struct wx *wx)
return physfns;
}
-/**
- * txgbe_irq_enable - Enable default interrupt generation settings
- * @wx: pointer to private structure
- * @queues: enable irqs for queues
- **/
-static void txgbe_irq_enable(struct wx *wx, bool queues)
-{
- wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
-
- /* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC);
- if (queues)
- wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
-}
-
-/**
- * txgbe_intr - msi/legacy mode Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
-{
- struct wx_q_vector *q_vector;
- struct wx *wx = data;
- struct pci_dev *pdev;
- u32 eicr;
-
- q_vector = wx->q_vector[0];
- pdev = wx->pdev;
-
- eicr = wx_misc_isb(wx, WX_ISB_VEC0);
- if (!eicr) {
- /* shared interrupt alert!
- * the interrupt that we masked before the ICR read.
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, true);
- return IRQ_NONE; /* Not our interrupt */
- }
- wx->isb_mem[WX_ISB_VEC0] = 0;
- if (!(pdev->msi_enabled))
- wr32(wx, WX_PX_INTA, 1);
-
- wx->isb_mem[WX_ISB_MISC] = 0;
- /* would disable interrupts here but it is auto disabled */
- napi_schedule_irqoff(&q_vector->napi);
-
- /* re-enable link(maybe) and non-queue interrupts, no flush.
- * txgbe_poll will re-enable the queue interrupts
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, false);
-
- return IRQ_HANDLED;
-}
-
-/**
- * txgbe_request_msix_irqs - Initialize MSI-X interrupts
- * @wx: board private structure
- *
- * Allocate MSI-X vectors and request interrupts from the kernel.
- **/
-static int txgbe_request_msix_irqs(struct wx *wx)
-{
- struct net_device *netdev = wx->netdev;
- int vector, err;
-
- for (vector = 0; vector < wx->num_q_vectors; vector++) {
- struct wx_q_vector *q_vector = wx->q_vector[vector];
- struct msix_entry *entry = &wx->msix_q_entries[vector];
-
- if (q_vector->tx.ring && q_vector->rx.ring)
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-TxRx-%d", netdev->name, entry->entry);
- else
- /* skip this unused q_vector */
- continue;
-
- err = request_irq(entry->vector, wx_msix_clean_rings, 0,
- q_vector->name, q_vector);
- if (err) {
- wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
- q_vector->name, err);
- goto free_queue_irqs;
- }
- }
-
- return 0;
-
-free_queue_irqs:
- while (vector) {
- vector--;
- free_irq(wx->msix_q_entries[vector].vector,
- wx->q_vector[vector]);
- }
- wx_reset_interrupt_capability(wx);
- return err;
-}
-
-/**
- * txgbe_request_irq - initialize interrupts
- * @wx: board private structure
- *
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-static int txgbe_request_irq(struct wx *wx)
-{
- struct net_device *netdev = wx->netdev;
- struct pci_dev *pdev = wx->pdev;
- int err;
-
- if (pdev->msix_enabled)
- err = txgbe_request_msix_irqs(wx);
- else if (pdev->msi_enabled)
- err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
- netdev->name, wx);
- else
- err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
- netdev->name, wx);
-
- if (err)
- wx_err(wx, "request_irq failed, Error %d\n", err);
-
- return err;
-}
-
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
+ txgbe_reinit_gpio_intr(wx);
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@@ -518,6 +393,7 @@ static void txgbe_shutdown(struct pci_dev *pdev)
int txgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct wx *wx = netdev_priv(dev);
+ struct txgbe *txgbe = wx->priv;
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
@@ -528,6 +404,7 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
else
txgbe_reset(wx);
+ txgbe_free_misc_irq(txgbe);
wx_clear_interrupt_scheme(wx);
if (tc)
@@ -536,6 +413,7 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
netdev_reset_tc(dev);
wx_init_interrupt_scheme(wx);
+ txgbe_setup_misc_irq(txgbe);
if (netif_running(dev))
txgbe_open(dev);
@@ -751,10 +629,14 @@ static int txgbe_probe(struct pci_dev *pdev,
txgbe->wx = wx;
wx->priv = txgbe;
- err = txgbe_init_phy(txgbe);
+ err = txgbe_setup_misc_irq(txgbe);
if (err)
goto err_release_hw;
+ err = txgbe_init_phy(txgbe);
+ if (err)
+ goto err_free_misc_irq;
+
err = register_netdev(netdev);
if (err)
goto err_remove_phy;
@@ -781,6 +663,8 @@ static int txgbe_probe(struct pci_dev *pdev,
err_remove_phy:
txgbe_remove_phy(txgbe);
+err_free_misc_irq:
+ txgbe_free_misc_irq(txgbe);
err_release_hw:
wx_clear_interrupt_scheme(wx);
wx_control_hw(wx, false);
@@ -813,6 +697,7 @@ static void txgbe_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
txgbe_remove_phy(txgbe);
+ txgbe_free_misc_irq(txgbe);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 1b84d495d14e..93295916b1d2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -292,6 +292,21 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
return 0;
}
+irqreturn_t txgbe_link_irq_handler(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ u32 status;
+ bool up;
+
+ status = rd32(wx, TXGBE_CFG_PORT_ST);
+ up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP);
+
+ phylink_mac_change(wx->phylink, up);
+
+ return IRQ_HANDLED;
+}
+
static int txgbe_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct wx *wx = gpiochip_get_data(chip);
@@ -437,7 +452,7 @@ static int txgbe_gpio_set_type(struct irq_data *d, unsigned int type)
}
static const struct irq_chip txgbe_gpio_irq_chip = {
- .name = "txgbe_gpio_irq",
+ .name = "txgbe-gpio-irq",
.irq_ack = txgbe_gpio_irq_ack,
.irq_mask = txgbe_gpio_irq_mask,
.irq_unmask = txgbe_gpio_irq_unmask,
@@ -446,29 +461,25 @@ static const struct irq_chip txgbe_gpio_irq_chip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
-static void txgbe_irq_handler(struct irq_desc *desc)
+irqreturn_t txgbe_gpio_irq_handler(int irq, void *data)
{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct wx *wx = irq_desc_get_handler_data(desc);
- struct txgbe *txgbe = wx->priv;
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
irq_hw_number_t hwirq;
unsigned long gpioirq;
struct gpio_chip *gc;
unsigned long flags;
- u32 eicr;
-
- eicr = wx_misc_isb(wx, WX_ISB_MISC);
-
- chained_irq_enter(chip, desc);
gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
gc = txgbe->gpio;
for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
int gpio = irq_find_mapping(gc->irq.domain, hwirq);
+ struct irq_data *d = irq_get_irq_data(gpio);
u32 irq_type = irq_get_trigger_type(gpio);
- generic_handle_domain_irq(gc->irq.domain, hwirq);
+ txgbe_gpio_irq_ack(d);
+ handle_nested_irq(gpio);
if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
raw_spin_lock_irqsave(&wx->gpio_lock, flags);
@@ -477,17 +488,34 @@ static void txgbe_irq_handler(struct irq_desc *desc)
}
}
- chained_irq_exit(chip, desc);
+ return IRQ_HANDLED;
+}
+
+void txgbe_reinit_gpio_intr(struct wx *wx)
+{
+ struct txgbe *txgbe = wx->priv;
+ irq_hw_number_t hwirq;
+ unsigned long gpioirq;
+ struct gpio_chip *gc;
+ unsigned long flags;
+
+ /* for gpio interrupt pending before irq enable */
+ gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
- if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
- TXGBE_PX_MISC_ETH_AN)) {
- u32 reg = rd32(wx, TXGBE_CFG_PORT_ST);
+ gc = txgbe->gpio;
+ for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
+ int gpio = irq_find_mapping(gc->irq.domain, hwirq);
+ struct irq_data *d = irq_get_irq_data(gpio);
+ u32 irq_type = irq_get_trigger_type(gpio);
- phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP));
- }
+ txgbe_gpio_irq_ack(d);
- /* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC);
+ if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ raw_spin_lock_irqsave(&wx->gpio_lock, flags);
+ txgbe_toggle_trigger(gc, hwirq);
+ raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
+ }
+ }
}
static int txgbe_gpio_init(struct txgbe *txgbe)
@@ -524,19 +552,6 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
girq = &gc->irq;
gpio_irq_chip_set_chip(girq, &txgbe_gpio_irq_chip);
- girq->parent_handler = txgbe_irq_handler;
- girq->parent_handler_data = wx;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(dev, girq->num_parents,
- sizeof(*girq->parents), GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
-
- /* now only suuported on MSI-X interrupt */
- if (!wx->msix_entry)
- return -EPERM;
-
- girq->parents[0] = wx->msix_entry->vector;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
@@ -754,8 +769,6 @@ int txgbe_init_phy(struct txgbe *txgbe)
goto err_unregister_i2c;
}
- wx->msix_in_use = true;
-
return 0;
err_unregister_i2c:
@@ -788,5 +801,4 @@ void txgbe_remove_phy(struct txgbe *txgbe)
phylink_destroy(txgbe->wx->phylink);
xpcs_destroy(txgbe->xpcs);
software_node_unregister_node_group(txgbe->nodes.group);
- txgbe->wx->msix_in_use = false;
}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
index 1ab592124986..8a026d804fe2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
@@ -4,6 +4,9 @@
#ifndef _TXGBE_PHY_H_
#define _TXGBE_PHY_H_
+irqreturn_t txgbe_gpio_irq_handler(int irq, void *data);
+void txgbe_reinit_gpio_intr(struct wx *wx);
+irqreturn_t txgbe_link_irq_handler(int irq, void *data);
int txgbe_init_phy(struct txgbe *txgbe);
void txgbe_remove_phy(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 270a6fd9ad0b..1b4ff50d5857 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -5,6 +5,7 @@
#define _TXGBE_TYPE_H_
#include <linux/property.h>
+#include <linux/irq.h>
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
@@ -169,15 +170,31 @@ struct txgbe_nodes {
const struct software_node *group[SWNODE_MAX + 1];
};
+enum txgbe_misc_irqs {
+ TXGBE_IRQ_GPIO = 0,
+ TXGBE_IRQ_LINK,
+ TXGBE_IRQ_MAX
+};
+
+struct txgbe_irq {
+ struct irq_chip chip;
+ struct irq_domain *domain;
+ int nirqs;
+ int irq;
+};
+
struct txgbe {
struct wx *wx;
struct txgbe_nodes nodes;
+ struct txgbe_irq misc;
struct dw_xpcs *xpcs;
struct platform_device *sfp_dev;
struct platform_device *i2c_dev;
struct clk_lookup *clock;
struct clk *clk;
struct gpio_chip *gpio;
+ unsigned int gpio_irq;
+ unsigned int link_irq;
};
#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 3318b50a5911..f165616f36fe 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -539,8 +539,7 @@ static int w5300_hw_probe(struct platform_device *pdev)
eth_hw_addr_random(ndev);
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 765aa516aada..940452d0a4d2 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1114,8 +1114,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
ndev->irq = rc;
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- lp->base_addr = devm_ioremap_resource(&ofdev->dev, res);
+ lp->base_addr = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
if (IS_ERR(lp->base_addr)) {
rc = PTR_ERR(lp->base_addr);
goto error;
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 9f505cf02d96..e9bc38fd2025 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1240,9 +1240,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
SelectPage(0);
PutWord(XIRCREG0_TRS, (u_short)pktlen+2);
- freespace = GetWord(XIRCREG0_TSO);
- okay = freespace & 0x8000;
- freespace &= 0x7fff;
+ freespace = GetWord(XIRCREG0_TSO) & 0x7fff;
/* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */
okay = pktlen +2 < freespace;
pr_debug("%s: avail. tx space=%u%s\n",
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 32c51c244153..2f6739fe78af 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -221,7 +221,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
struct genevehdr *gnvh = geneve_hdr(skb);
struct metadata_dst *tun_dst = NULL;
unsigned int len;
- int err = 0;
+ int nh, err = 0;
void *oiph;
if (ip_tunnel_collect_metadata() || gs->collect_md) {
@@ -272,9 +272,23 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
skb->pkt_type = PACKET_HOST;
}
- oiph = skb_network_header(skb);
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_reset_network_header(skb);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(geneve->dev, rx_length_errors);
+ DEV_STATS_INC(geneve->dev, rx_errors);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
if (geneve_get_sk_family(gs) == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
#if IS_ENABLED(CONFIG_IPV6)
@@ -319,22 +333,16 @@ static int geneve_init(struct net_device *dev)
struct geneve_dev *geneve = netdev_priv(dev);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&geneve->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
err = dst_cache_init(&geneve->cfg.info.dst_cache, GFP_KERNEL);
if (err) {
- free_percpu(dev->tstats);
gro_cells_destroy(&geneve->gro_cells);
return err;
}
+ netdev_lockdep_set_classes(dev);
return 0;
}
@@ -344,7 +352,6 @@ static void geneve_uninit(struct net_device *dev)
dst_cache_destroy(&geneve->cfg.info.dst_cache);
gro_cells_destroy(&geneve->gro_cells);
- free_percpu(dev->tstats);
}
/* Callback from net/ipv4/udp.c to receive packets */
@@ -507,7 +514,7 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
gh_len = geneve_hlen(gh);
hlen = off_gnv + gh_len;
- if (skb_gro_header_hard(skb, hlen)) {
+ if (!skb_gro_may_pull(skb, hlen)) {
gh = skb_gro_header_slow(skb, hlen, off_gnv);
if (unlikely(!gh))
goto out;
@@ -1121,7 +1128,6 @@ static const struct net_device_ops geneve_netdev_ops = {
.ndo_open = geneve_open,
.ndo_stop = geneve_stop,
.ndo_start_xmit = geneve_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = geneve_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -1141,7 +1147,7 @@ static const struct ethtool_ops geneve_ethtool_ops = {
};
/* Info for udev, that this is a virtual tunnel endpoint */
-static struct device_type geneve_type = {
+static const struct device_type geneve_type = {
.name = "geneve",
};
@@ -1188,6 +1194,7 @@ static void geneve_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
/* MTU range: 68 - (something less than 65535) */
dev->min_mtu = ETH_MIN_MTU;
/* The max_mtu calculation does not take account of GENEVE
@@ -1900,29 +1907,26 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
}
}
-static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
+static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
- geneve_destroy_tunnels(net, &list);
-
- /* unregister the devices gathered above */
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ geneve_destroy_tunnels(net, dev_to_kill);
+}
- list_for_each_entry(net, net_list, exit_list) {
- const struct geneve_net *gn = net_generic(net, geneve_net_id);
+static void __net_exit geneve_exit_net(struct net *net)
+{
+ const struct geneve_net *gn = net_generic(net, geneve_net_id);
- WARN_ON_ONCE(!list_empty(&gn->sock_list));
- }
+ WARN_ON_ONCE(!list_empty(&gn->sock_list));
}
static struct pernet_operations geneve_net_ops = {
.init = geneve_init_net,
- .exit_batch = geneve_exit_batch_net,
+ .exit_batch_rtnl = geneve_exit_batch_rtnl,
+ .exit = geneve_exit_net,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
};
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index b1919278e931..ba4704c2c640 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -711,25 +711,11 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
return ret;
}
-static int gtp_dev_init(struct net_device *dev)
-{
- struct gtp_dev *gtp = netdev_priv(dev);
-
- gtp->dev = dev;
-
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
- return 0;
-}
-
static void gtp_dev_uninit(struct net_device *dev)
{
struct gtp_dev *gtp = netdev_priv(dev);
gtp_encap_disable(gtp);
- free_percpu(dev->tstats);
}
static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
@@ -942,10 +928,8 @@ tx_err:
}
static const struct net_device_ops gtp_netdev_ops = {
- .ndo_init = gtp_dev_init,
.ndo_uninit = gtp_dev_uninit,
.ndo_start_xmit = gtp_dev_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
};
static const struct device_type gtp_type = {
@@ -957,6 +941,7 @@ static void gtp_link_setup(struct net_device *dev)
unsigned int max_gtp_header_len = sizeof(struct iphdr) +
sizeof(struct udphdr) +
sizeof(struct gtp0_header);
+ struct gtp_dev *gtp = netdev_priv(dev);
dev->netdev_ops = &gtp_netdev_ops;
dev->needs_free_netdev = true;
@@ -970,11 +955,13 @@ static void gtp_link_setup(struct net_device *dev)
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->priv_flags |= IFF_NO_QUEUE;
dev->features |= NETIF_F_LLTX;
netif_keep_dst(dev);
dev->needed_headroom = LL_MAX_HEADER + max_gtp_header_len;
+ gtp->dev = dev;
}
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
@@ -1876,23 +1863,23 @@ static int __net_init gtp_net_init(struct net *net)
return 0;
}
-static void __net_exit gtp_net_exit(struct net *net)
+static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
- struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp;
- LIST_HEAD(list);
+ struct net *net;
- rtnl_lock();
- list_for_each_entry(gtp, &gn->gtp_dev_list, list)
- gtp_dellink(gtp->dev, &list);
+ list_for_each_entry(net, net_list, exit_list) {
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp;
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+ gtp_dellink(gtp->dev, dev_to_kill);
+ }
}
static struct pernet_operations gtp_net_ops = {
.init = gtp_net_init,
- .exit = gtp_net_exit,
+ .exit_batch_rtnl = gtp_net_exit_batch_rtnl,
.id = &gtp_net_id,
.size = sizeof(struct gtp_net),
};
@@ -1903,26 +1890,26 @@ static int __init gtp_init(void)
get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
- err = rtnl_link_register(&gtp_link_ops);
+ err = register_pernet_subsys(&gtp_net_ops);
if (err < 0)
goto error_out;
- err = genl_register_family(&gtp_genl_family);
+ err = rtnl_link_register(&gtp_link_ops);
if (err < 0)
- goto unreg_rtnl_link;
+ goto unreg_pernet_subsys;
- err = register_pernet_subsys(&gtp_net_ops);
+ err = genl_register_family(&gtp_genl_family);
if (err < 0)
- goto unreg_genl_family;
+ goto unreg_rtnl_link;
pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
sizeof(struct pdp_ctx));
return 0;
-unreg_genl_family:
- genl_unregister_family(&gtp_genl_family);
unreg_rtnl_link:
rtnl_link_unregister(&gtp_link_ops);
+unreg_pernet_subsys:
+ unregister_pernet_subsys(&gtp_net_ops);
error_out:
pr_err("error loading GTP module loaded\n");
return err;
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 164c7f605af5..f632b0cfd5ae 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -11,17 +11,16 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/gpio/consumer.h>
#include <linux/hrtimer.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/regmap.h>
#include <linux/skbuff.h>
-#include <linux/of_gpio.h>
#include <linux/ieee802154.h>
#include <net/mac802154.h>
@@ -316,7 +315,7 @@ static const struct regmap_config at86rf230_regmap_spi_config = {
.val_bits = 8,
.write_flag_mask = CMD_REG | CMD_WRITE,
.read_flag_mask = CMD_REG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = AT86RF2XX_NUMREGS,
.writeable_reg = at86rf230_reg_writeable,
.readable_reg = at86rf230_reg_readable,
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 4ec0dab38872..e685a7f946f0 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2857,19 +2857,13 @@ static int ca8210_interrupt_init(struct spi_device *spi)
*/
static int ca8210_dev_com_init(struct ca8210_priv *priv)
{
- priv->mlme_workqueue = alloc_ordered_workqueue(
- "MLME work queue",
- WQ_UNBOUND
- );
+ priv->mlme_workqueue = alloc_ordered_workqueue("MLME work queue", 0);
if (!priv->mlme_workqueue) {
dev_crit(&priv->spi->dev, "alloc of mlme_workqueue failed!\n");
return -ENOMEM;
}
- priv->irq_workqueue = alloc_ordered_workqueue(
- "ca8210 irq worker",
- WQ_UNBOUND
- );
+ priv->irq_workqueue = alloc_ordered_workqueue("ca8210 irq worker", 0);
if (!priv->irq_workqueue) {
dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n");
destroy_workqueue(priv->mlme_workqueue);
@@ -2956,7 +2950,7 @@ static int ca8210_test_interface_init(struct ca8210_priv *priv)
node_name,
sizeof(node_name),
"ca8210@%d_%d",
- priv->spi->master->bus_num,
+ priv->spi->controller->bus_num,
spi_get_chipselect(priv->spi, 0)
);
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 87abe3b46316..433fb5839203 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -12,7 +12,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/skbuff.h>
-#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/ieee802154.h>
#include <linux/debugfs.h>
@@ -251,7 +250,7 @@ static const struct regmap_config mcr20a_dar_regmap = {
.val_bits = 8,
.write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE,
.read_flag_mask = REGISTER_ACCESS | REGISTER_READ,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.writeable_reg = mcr20a_dar_writeable,
.readable_reg = mcr20a_dar_readable,
.volatile_reg = mcr20a_dar_volatile,
@@ -387,7 +386,7 @@ static const struct regmap_config mcr20a_iar_regmap = {
.val_bits = 8,
.write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE | IAR_INDEX,
.read_flag_mask = REGISTER_ACCESS | REGISTER_READ | IAR_INDEX,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.writeable_reg = mcr20a_iar_writeable,
.readable_reg = mcr20a_iar_readable,
.volatile_reg = mcr20a_iar_volatile,
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index ee4cfbf2c5cc..d3f42efc5d1a 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -388,7 +388,7 @@ static const struct regmap_config mrf24j40_short_regmap = {
.pad_bits = 1,
.write_flag_mask = MRF24J40_SHORT_WRITE,
.read_flag_mask = MRF24J40_SHORT_READ,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = MRF24J40_SHORT_NUMREGS,
.writeable_reg = mrf24j40_short_reg_writeable,
.readable_reg = mrf24j40_short_reg_readable,
@@ -495,7 +495,7 @@ static const struct regmap_config mrf24j40_long_regmap = {
.pad_bits = 5,
.write_flag_mask = MRF24J40_LONG_ACCESS,
.read_flag_mask = MRF24J40_LONG_ACCESS,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = MRF24J40_LONG_NUMREGS,
.writeable_reg = mrf24j40_long_reg_writeable,
.readable_reg = mrf24j40_long_reg_readable,
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index f3355e040a9e..334cd62cf286 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -21,7 +21,6 @@
struct clk;
struct icc_path;
struct net_device;
-struct platform_device;
struct ipa_power;
struct ipa_smp2p;
@@ -31,7 +30,7 @@ struct ipa_interrupt;
* struct ipa - IPA information
* @gsi: Embedded GSI structure
* @version: IPA hardware version
- * @pdev: Platform device
+ * @dev: IPA device pointer
* @completion: Used to signal pipeline clear transfer complete
* @nb: Notifier block used for remoteproc SSR
* @notifier: Remoteproc SSR notifier
@@ -79,7 +78,7 @@ struct ipa_interrupt;
struct ipa {
struct gsi gsi;
enum ipa_version version;
- struct platform_device *pdev;
+ struct device *dev;
struct completion completion;
struct notifier_block nb;
void *notifier;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index f1419fbd776c..39219963dbb3 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -174,7 +174,7 @@ bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
const char *table = route ? "route" : "filter";
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 size;
size = route ? ipa->route_count : ipa->filter_count + 1;
@@ -204,7 +204,7 @@ bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
/* Validate the memory region that holds headers */
static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const struct ipa_mem *mem;
u32 offset_max;
u32 size_max;
@@ -256,7 +256,7 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
const char *name, u32 offset)
{
struct ipa_cmd_register_write *payload;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 offset_max;
u32 bit_count;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index afa1d56d9095..dd490941615e 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -233,8 +233,8 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *other_data;
- struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name other_name;
+ struct device *dev = ipa->dev;
if (ipa_gsi_endpoint_data_empty(data))
return true;
@@ -388,7 +388,7 @@ static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
enum ipa_endpoint_name name;
u32 max;
@@ -606,7 +606,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"no transaction to reset modem exception endpoints\n");
return -EBUSY;
}
@@ -1498,8 +1498,7 @@ ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
if (endpoint_id == command_endpoint->endpoint_id) {
complete(&ipa->completion);
} else {
- dev_err(&ipa->pdev->dev,
- "unexpected tagged packet from endpoint %u\n",
+ dev_err(ipa->dev, "unexpected tagged packet from endpoint %u\n",
endpoint_id);
}
@@ -1536,6 +1535,7 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
void *data = page_address(page) + NET_SKB_PAD;
u32 unused = buffer_size - total_len;
struct ipa *ipa = endpoint->ipa;
+ struct device *dev = ipa->dev;
u32 resid = total_len;
while (resid) {
@@ -1544,7 +1544,7 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
u32 len;
if (resid < IPA_STATUS_SIZE) {
- dev_err(&endpoint->ipa->pdev->dev,
+ dev_err(dev,
"short message (%u bytes < %zu byte status)\n",
resid, IPA_STATUS_SIZE);
break;
@@ -1666,8 +1666,8 @@ void ipa_endpoint_default_route_clear(struct ipa *ipa)
*/
static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
struct ipa *ipa = endpoint->ipa;
+ struct device *dev = ipa->dev;
struct gsi *gsi = &ipa->gsi;
bool suspended = false;
dma_addr_t addr;
@@ -1769,7 +1769,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
gsi_channel_reset(&ipa->gsi, channel_id, true);
if (ret)
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d resetting channel %u for endpoint %u\n",
ret, endpoint->channel_id, endpoint->endpoint_id);
}
@@ -1817,7 +1817,7 @@ int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
ret = gsi_channel_start(gsi, endpoint->channel_id);
if (ret) {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d starting %cX channel %u for endpoint %u\n",
ret, endpoint->toward_ipa ? 'T' : 'R',
endpoint->channel_id, endpoint_id);
@@ -1854,14 +1854,13 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
/* Note that if stop fails, the channel's state is not well-defined */
ret = gsi_channel_stop(gsi, endpoint->channel_id);
if (ret)
- dev_err(&ipa->pdev->dev,
- "error %d attempting to stop endpoint %u\n", ret,
- endpoint_id);
+ dev_err(ipa->dev, "error %d attempting to stop endpoint %u\n",
+ ret, endpoint_id);
}
void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
+ struct device *dev = endpoint->ipa->dev;
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
@@ -1881,7 +1880,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
+ struct device *dev = endpoint->ipa->dev;
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
@@ -1983,7 +1982,7 @@ void ipa_endpoint_deconfig(struct ipa *ipa)
int ipa_endpoint_config(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const struct reg *reg;
u32 endpoint_id;
u32 hw_limit;
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index 4bc05948f772..c3e8784d51d9 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -19,6 +19,7 @@
* time only these three are supported.
*/
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
@@ -43,6 +44,30 @@ struct ipa_interrupt {
u32 enabled;
};
+/* Clear the suspend interrupt for all endpoints that signaled it */
+static void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 unit_count;
+ u32 unit;
+
+ unit_count = DIV_ROUND_UP(ipa->endpoint_count, 32);
+ for (unit = 0; unit < unit_count; unit++) {
+ const struct reg *reg;
+ u32 val;
+
+ reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
+ val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
+
+ /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
+ if (!val || ipa->version == IPA_VERSION_3_0)
+ continue;
+
+ reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit));
+ }
+}
+
/* Process a particular interrupt type that has been received */
static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
{
@@ -70,7 +95,7 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
* caused the interrupt, so defer clearing until after
* the handler has been called.
*/
- ipa_power_suspend_handler(ipa, irq_id);
+ ipa_interrupt_suspend_clear_all(interrupt);
fallthrough;
default: /* Silently ignore (and clear) any other condition */
@@ -85,14 +110,13 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
+ struct device *dev = ipa->dev;
const struct reg *reg;
- struct device *dev;
u32 pending;
u32 offset;
u32 mask;
int ret;
- dev = &ipa->pdev->dev;
ret = pm_runtime_get_sync(dev);
if (WARN_ON(ret < 0))
goto out_power_put;
@@ -205,30 +229,6 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
ipa_interrupt_suspend_control(interrupt, endpoint_id, false);
}
-/* Clear the suspend interrupt for all endpoints that signaled it */
-void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
-{
- struct ipa *ipa = interrupt->ipa;
- u32 unit_count;
- u32 unit;
-
- unit_count = roundup(ipa->endpoint_count, 32);
- for (unit = 0; unit < unit_count; unit++) {
- const struct reg *reg;
- u32 val;
-
- reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
- val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
-
- /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
- if (ipa->version == IPA_VERSION_3_0)
- continue;
-
- reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
- iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit));
- }
-}
-
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
{
@@ -236,29 +236,17 @@ void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
}
/* Configure the IPA interrupt framework */
-struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
+int ipa_interrupt_config(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
- struct ipa_interrupt *interrupt;
+ struct ipa_interrupt *interrupt = ipa->interrupt;
+ unsigned int irq = interrupt->irq;
+ struct device *dev = ipa->dev;
const struct reg *reg;
- unsigned int irq;
int ret;
- ret = platform_get_irq_byname(ipa->pdev, "ipa");
- if (ret <= 0) {
- dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n",
- ret);
- return ERR_PTR(ret ? : -EINVAL);
- }
- irq = ret;
-
- interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
- if (!interrupt)
- return ERR_PTR(-ENOMEM);
interrupt->ipa = ipa;
- interrupt->irq = irq;
- /* Start with all IPA interrupts disabled */
+ /* Disable all IPA interrupt types */
reg = ipa_reg(ipa, IPA_IRQ_EN);
iowrite32(0, ipa->reg_virt + reg_offset(reg));
@@ -271,26 +259,59 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
ret = dev_pm_set_wake_irq(dev, irq);
if (ret) {
- dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n", ret);
+ dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n",
+ ret);
goto err_free_irq;
}
- return interrupt;
+ ipa->interrupt = interrupt;
+
+ return 0;
err_free_irq:
free_irq(interrupt->irq, interrupt);
err_kfree:
kfree(interrupt);
- return ERR_PTR(ret);
+ return ret;
}
/* Inverse of ipa_interrupt_config() */
-void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt)
+void ipa_interrupt_deconfig(struct ipa *ipa)
{
- struct device *dev = &interrupt->ipa->pdev->dev;
+ struct ipa_interrupt *interrupt = ipa->interrupt;
+ struct device *dev = ipa->dev;
+
+ ipa->interrupt = NULL;
dev_pm_clear_wake_irq(dev);
free_irq(interrupt->irq, interrupt);
+}
+
+/* Initialize the IPA interrupt structure */
+struct ipa_interrupt *ipa_interrupt_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipa_interrupt *interrupt;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "ipa");
+ if (irq <= 0) {
+ dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n", irq);
+
+ return ERR_PTR(irq ? : -EINVAL);
+ }
+
+ interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
+ if (!interrupt)
+ return ERR_PTR(-ENOMEM);
+ interrupt->irq = irq;
+
+ return interrupt;
+}
+
+/* Inverse of ipa_interrupt_init() */
+void ipa_interrupt_exit(struct ipa_interrupt *interrupt)
+{
kfree(interrupt);
}
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index 12e3e798ccb3..f3f4f4330a59 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -35,14 +35,6 @@ void ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt,
u32 endpoint_id);
/**
- * ipa_interrupt_suspend_clear_all - clear all suspend interrupts
- * @interrupt: IPA interrupt structure
- *
- * Clear the TX_SUSPEND interrupt for all endpoints that signaled it.
- */
-void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
-
-/**
* ipa_interrupt_simulate_suspend() - Simulate TX_SUSPEND IPA interrupt
* @interrupt: IPA interrupt structure
*
@@ -84,17 +76,31 @@ void ipa_interrupt_irq_enable(struct ipa *ipa);
void ipa_interrupt_irq_disable(struct ipa *ipa);
/**
- * ipa_interrupt_config() - Configure the IPA interrupt framework
+ * ipa_interrupt_config() - Configure IPA interrupts
* @ipa: IPA pointer
*
- * Return: Pointer to IPA SMP2P info, or a pointer-coded error
+ * Return: 0 if successful, or a negative error code
*/
-struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa);
+int ipa_interrupt_config(struct ipa *ipa);
/**
* ipa_interrupt_deconfig() - Inverse of ipa_interrupt_config()
+ * @ipa: IPA pointer
+ */
+void ipa_interrupt_deconfig(struct ipa *ipa);
+
+/**
+ * ipa_interrupt_init() - Initialize the IPA interrupt structure
+ * @pdev: IPA platform device pointer
+ *
+ * Return: Pointer to an IPA interrupt structure, or a pointer-coded error
+ */
+struct ipa_interrupt *ipa_interrupt_init(struct platform_device *pdev);
+
+/**
+ * ipa_interrupt_exit() - Inverse of ipa_interrupt_init()
* @interrupt: IPA interrupt structure
*/
-void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt);
+void ipa_interrupt_exit(struct ipa_interrupt *interrupt);
#endif /* _IPA_INTERRUPT_H_ */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 00475fd7a205..57b241417e8c 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -7,7 +7,6 @@
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/bitfield.h>
-#include <linux/device.h>
#include <linux/bug.h>
#include <linux/io.h>
#include <linux/firmware.h>
@@ -114,7 +113,7 @@ int ipa_setup(struct ipa *ipa)
{
struct ipa_endpoint *exception_endpoint;
struct ipa_endpoint *command_endpoint;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
int ret;
ret = gsi_setup(&ipa->gsi);
@@ -542,12 +541,9 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
if (ret)
goto err_hardware_deconfig;
- ipa->interrupt = ipa_interrupt_config(ipa);
- if (IS_ERR(ipa->interrupt)) {
- ret = PTR_ERR(ipa->interrupt);
- ipa->interrupt = NULL;
+ ret = ipa_interrupt_config(ipa);
+ if (ret)
goto err_mem_deconfig;
- }
ipa_uc_config(ipa);
@@ -572,8 +568,7 @@ err_endpoint_deconfig:
ipa_endpoint_deconfig(ipa);
err_uc_deconfig:
ipa_uc_deconfig(ipa);
- ipa_interrupt_deconfig(ipa->interrupt);
- ipa->interrupt = NULL;
+ ipa_interrupt_deconfig(ipa);
err_mem_deconfig:
ipa_mem_deconfig(ipa);
err_hardware_deconfig:
@@ -591,8 +586,7 @@ static void ipa_deconfig(struct ipa *ipa)
ipa_modem_deconfig(ipa);
ipa_endpoint_deconfig(ipa);
ipa_uc_deconfig(ipa);
- ipa_interrupt_deconfig(ipa->interrupt);
- ipa->interrupt = NULL;
+ ipa_interrupt_deconfig(ipa);
ipa_mem_deconfig(ipa);
ipa_hardware_deconfig(ipa);
}
@@ -808,6 +802,7 @@ out_self:
static int ipa_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct ipa_interrupt *interrupt;
enum ipa_firmware_loader loader;
const struct ipa_data *data;
struct ipa_power *power;
@@ -839,12 +834,21 @@ static int ipa_probe(struct platform_device *pdev)
if (loader == IPA_LOADER_DEFER)
return -EPROBE_DEFER;
- /* The clock and interconnects might not be ready when we're
- * probed, so might return -EPROBE_DEFER.
+ /* The IPA interrupt might not be ready when we're probed, so this
+ * might return -EPROBE_DEFER.
+ */
+ interrupt = ipa_interrupt_init(pdev);
+ if (IS_ERR(interrupt))
+ return PTR_ERR(interrupt);
+
+ /* The clock and interconnects might not be ready when we're probed,
+ * so this might return -EPROBE_DEFER.
*/
power = ipa_power_init(dev, data->power_data);
- if (IS_ERR(power))
- return PTR_ERR(power);
+ if (IS_ERR(power)) {
+ ret = PTR_ERR(power);
+ goto err_interrupt_exit;
+ }
/* No more EPROBE_DEFER. Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
@@ -853,18 +857,19 @@ static int ipa_probe(struct platform_device *pdev)
goto err_power_exit;
}
- ipa->pdev = pdev;
+ ipa->dev = dev;
dev_set_drvdata(dev, ipa);
+ ipa->interrupt = interrupt;
ipa->power = power;
ipa->version = data->version;
ipa->modem_route_count = data->modem_route_count;
init_completion(&ipa->completion);
- ret = ipa_reg_init(ipa);
+ ret = ipa_reg_init(ipa, pdev);
if (ret)
goto err_kfree_ipa;
- ret = ipa_mem_init(ipa, data->mem_data);
+ ret = ipa_mem_init(ipa, pdev, data->mem_data);
if (ret)
goto err_reg_exit;
@@ -882,7 +887,7 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_endpoint_exit;
- ret = ipa_smp2p_init(ipa, loader == IPA_LOADER_MODEM);
+ ret = ipa_smp2p_init(ipa, pdev, loader == IPA_LOADER_MODEM);
if (ret)
goto err_table_exit;
@@ -939,17 +944,27 @@ err_kfree_ipa:
kfree(ipa);
err_power_exit:
ipa_power_exit(power);
+err_interrupt_exit:
+ ipa_interrupt_exit(interrupt);
return ret;
}
static void ipa_remove(struct platform_device *pdev)
{
- struct ipa *ipa = dev_get_drvdata(&pdev->dev);
- struct ipa_power *power = ipa->power;
- struct device *dev = &pdev->dev;
+ struct ipa_interrupt *interrupt;
+ struct ipa_power *power;
+ struct device *dev;
+ struct ipa *ipa;
int ret;
+ ipa = dev_get_drvdata(&pdev->dev);
+ dev = ipa->dev;
+ WARN_ON(dev != &pdev->dev);
+
+ power = ipa->power;
+ interrupt = ipa->interrupt;
+
/* Prevent the modem from triggering a call to ipa_setup(). This
* also ensures a modem-initiated setup that's underway completes.
*/
@@ -991,6 +1006,7 @@ out_power_put:
ipa_reg_exit(ipa);
kfree(ipa);
ipa_power_exit(power);
+ ipa_interrupt_exit(interrupt);
dev_info(dev, "IPA driver removed");
}
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 694960537ecd..709f061ede61 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -9,6 +9,7 @@
#include <linux/bug.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/soc/qcom/smem.h>
@@ -75,9 +76,9 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
- const struct reg *reg;
const struct ipa_mem *mem;
struct gsi_trans *trans;
+ const struct reg *reg;
u32 offset;
u16 size;
u32 val;
@@ -87,7 +88,7 @@ int ipa_mem_setup(struct ipa *ipa)
*/
trans = ipa_cmd_trans_alloc(ipa, 4);
if (!trans) {
- dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
+ dev_err(ipa->dev, "no transaction for memory setup\n");
return -EBUSY;
}
@@ -217,8 +218,8 @@ static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
{
- struct device *dev = &ipa->pdev->dev;
enum ipa_mem_id mem_id = mem->id;
+ struct device *dev = ipa->dev;
u16 size_multiple;
/* Make sure the memory region is valid for this version of IPA */
@@ -254,7 +255,7 @@ static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
{
DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
enum ipa_mem_id mem_id;
u32 i;
@@ -290,7 +291,7 @@ static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
/* Do all memory regions fit within the IPA local memory? */
static bool ipa_mem_size_valid(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 limit = ipa->mem_size;
u32 i;
@@ -317,7 +318,7 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
*/
int ipa_mem_config(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const struct ipa_mem *mem;
const struct reg *reg;
dma_addr_t addr;
@@ -393,7 +394,7 @@ err_dma_free:
/* Inverse of ipa_mem_config() */
void ipa_mem_deconfig(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
ipa->zero_size = 0;
@@ -420,8 +421,7 @@ int ipa_mem_zero_modem(struct ipa *ipa)
*/
trans = ipa_cmd_trans_alloc(ipa, 3);
if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction to zero modem memory\n");
+ dev_err(ipa->dev, "no transaction to zero modem memory\n");
return -EBUSY;
}
@@ -452,7 +452,7 @@ int ipa_mem_zero_modem(struct ipa *ipa)
*/
static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
unsigned long iova;
phys_addr_t phys;
@@ -485,13 +485,12 @@ static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
static void ipa_imem_exit(struct ipa *ipa)
{
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
- struct device *dev;
if (!ipa->imem_size)
return;
- dev = &ipa->pdev->dev;
domain = iommu_get_domain_for_dev(dev);
if (domain) {
size_t size;
@@ -527,7 +526,7 @@ static void ipa_imem_exit(struct ipa *ipa)
*/
static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
unsigned long iova;
phys_addr_t phys;
@@ -594,7 +593,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
static void ipa_smem_exit(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
domain = iommu_get_domain_for_dev(dev);
@@ -615,9 +614,10 @@ static void ipa_smem_exit(struct ipa *ipa)
}
/* Perform memory region-related initialization */
-int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
+int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
+ const struct ipa_mem_data *mem_data)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
struct resource *res;
int ret;
@@ -634,14 +634,13 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
if (!ipa_table_mem_valid(ipa, true))
return -EINVAL;
- ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(dev, "error %d setting DMA mask\n", ret);
return ret;
}
- res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
- "ipa-shared");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipa-shared");
if (!res) {
dev_err(dev,
"DT error getting \"ipa-shared\" memory property\n");
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index 868e9c20e8c4..28aad00a151d 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -6,6 +6,8 @@
#ifndef _IPA_MEM_H_
#define _IPA_MEM_H_
+struct platform_device;
+
struct ipa;
struct ipa_mem_data;
@@ -100,7 +102,8 @@ int ipa_mem_setup(struct ipa *ipa); /* No ipa_mem_teardown() needed */
int ipa_mem_zero_modem(struct ipa *ipa);
-int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data);
+int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
+ const struct ipa_mem_data *mem_data);
void ipa_mem_exit(struct ipa *ipa);
#endif /* _IPA_MEM_H_ */
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index 423422a2a445..c27ca3f27f7d 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -39,10 +39,14 @@ enum ipa_modem_state {
/**
* struct ipa_priv - IPA network device private data
* @ipa: IPA pointer
+ * @tx: Transmit endpoint pointer
+ * @rx: Receive endpoint pointer
* @work: Work structure used to wake the modem netdev TX queue
*/
struct ipa_priv {
struct ipa *ipa;
+ struct ipa_endpoint *tx;
+ struct ipa_endpoint *rx;
struct work_struct work;
};
@@ -54,16 +58,16 @@ static int ipa_open(struct net_device *netdev)
struct device *dev;
int ret;
- dev = &ipa->pdev->dev;
+ dev = ipa->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_power_put;
- ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ret = ipa_endpoint_enable_one(priv->tx);
if (ret)
goto err_power_put;
- ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ ret = ipa_endpoint_enable_one(priv->rx);
if (ret)
goto err_disable_tx;
@@ -75,7 +79,7 @@ static int ipa_open(struct net_device *netdev)
return 0;
err_disable_tx:
- ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ipa_endpoint_disable_one(priv->tx);
err_power_put:
pm_runtime_put_noidle(dev);
@@ -90,15 +94,15 @@ static int ipa_stop(struct net_device *netdev)
struct device *dev;
int ret;
- dev = &ipa->pdev->dev;
+ dev = ipa->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto out_power_put;
netif_stop_queue(netdev);
- ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
- ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ipa_endpoint_disable_one(priv->rx);
+ ipa_endpoint_disable_one(priv->tx);
out_power_put:
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
@@ -106,13 +110,16 @@ out_power_put:
return 0;
}
-/** ipa_start_xmit() - Transmits an skb.
- * @skb: skb to be transmitted
- * @dev: network device
+/** ipa_start_xmit() - Transmit an skb
+ * @skb: Socket buffer to be transmitted
+ * @netdev: Network device
*
- * Return codes:
- * NETDEV_TX_OK: Success
- * NETDEV_TX_BUSY: Error while transmitting the skb. Try again later
+ * Return: NETDEV_TX_OK if successful (or dropped), NETDEV_TX_BUSY otherwise
+
+ * Normally NETDEV_TX_OK indicates the buffer was successfully transmitted.
+ * If the buffer has an unexpected protocol or its size is out of range it
+ * is quietly dropped, returning NETDEV_TX_OK. NETDEV_TX_BUSY indicates
+ * the buffer cannot be sent at this time and should retried later.
*/
static netdev_tx_t
ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
@@ -132,29 +139,41 @@ ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP))
goto err_drop_skb;
- /* The hardware must be powered for us to transmit */
- dev = &ipa->pdev->dev;
+ /* The hardware must be powered for us to transmit, so if we're not
+ * ready we want the network stack to stop queueing until power is
+ * ACTIVE. Once runtime resume has completed, we inform the network
+ * stack it's OK to try transmitting again.
+ *
+ * We learn from pm_runtime_get() whether the hardware is powered.
+ * If it was not, powering up is either started or already underway.
+ * And in that case we want to disable queueing, expecting it to be
+ * re-enabled once power is ACTIVE. But runtime PM and network
+ * transmit run concurrently, and if we're not careful the requests
+ * to stop and start queueing could occur in the wrong order.
+ *
+ * For that reason we *always* stop queueing here, *before* the call
+ * to pm_runtime_get(). If we determine here that power is ACTIVE,
+ * we restart queueing before transmitting the SKB. Otherwise
+ * queueing will eventually be enabled after resume completes.
+ */
+ netif_stop_queue(netdev);
+
+ dev = ipa->dev;
ret = pm_runtime_get(dev);
if (ret < 1) {
/* If a resume won't happen, just drop the packet */
if (ret < 0 && ret != -EINPROGRESS) {
- ipa_power_modem_queue_active(ipa);
+ netif_wake_queue(netdev);
pm_runtime_put_noidle(dev);
goto err_drop_skb;
}
- /* No power (yet). Stop the network stack from transmitting
- * until we're resumed; ipa_modem_resume() arranges for the
- * TX queue to be started again.
- */
- ipa_power_modem_queue_stop(ipa);
-
pm_runtime_put_noidle(dev);
return NETDEV_TX_BUSY;
}
- ipa_power_modem_queue_active(ipa);
+ netif_wake_queue(netdev);
ret = ipa_endpoint_skb_tx(endpoint, skb);
@@ -233,14 +252,14 @@ static void ipa_modem_netdev_setup(struct net_device *netdev)
*/
void ipa_modem_suspend(struct net_device *netdev)
{
- struct ipa_priv *priv = netdev_priv(netdev);
- struct ipa *ipa = priv->ipa;
+ struct ipa_priv *priv;
if (!(netdev->flags & IFF_UP))
return;
- ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
- ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ priv = netdev_priv(netdev);
+ ipa_endpoint_suspend_one(priv->rx);
+ ipa_endpoint_suspend_one(priv->tx);
}
/**
@@ -258,7 +277,7 @@ static void ipa_modem_wake_queue_work(struct work_struct *work)
{
struct ipa_priv *priv = container_of(work, struct ipa_priv, work);
- ipa_power_modem_queue_wake(priv->ipa);
+ netif_wake_queue(priv->tx->netdev);
}
/** ipa_modem_resume() - resume callback for runtime_pm
@@ -268,14 +287,14 @@ static void ipa_modem_wake_queue_work(struct work_struct *work)
*/
void ipa_modem_resume(struct net_device *netdev)
{
- struct ipa_priv *priv = netdev_priv(netdev);
- struct ipa *ipa = priv->ipa;
+ struct ipa_priv *priv;
if (!(netdev->flags & IFF_UP))
return;
- ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
- ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ priv = netdev_priv(netdev);
+ ipa_endpoint_resume_one(priv->tx);
+ ipa_endpoint_resume_one(priv->rx);
/* Arrange for the TX queue to be restarted */
(void)queue_pm_work(&priv->work);
@@ -303,19 +322,24 @@ int ipa_modem_start(struct ipa *ipa)
goto out_set_state;
}
- SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
+ SET_NETDEV_DEV(netdev, ipa->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
+ priv->tx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
+ priv->rx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX];
INIT_WORK(&priv->work, ipa_modem_wake_queue_work);
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+
+ priv->tx->netdev = netdev;
+ priv->rx->netdev = netdev;
+
ipa->modem_netdev = netdev;
ret = register_netdev(netdev);
if (ret) {
ipa->modem_netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
+ priv->rx->netdev = NULL;
+ priv->tx->netdev = NULL;
+
free_netdev(netdev);
}
@@ -355,9 +379,11 @@ int ipa_modem_stop(struct ipa *ipa)
if (netdev->flags & IFF_UP)
(void)ipa_stop(netdev);
unregister_netdev(netdev);
+
ipa->modem_netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
+ priv->rx->netdev = NULL;
+ priv->tx->netdev = NULL;
+
free_netdev(netdev);
}
@@ -370,7 +396,7 @@ int ipa_modem_stop(struct ipa *ipa)
/* Treat a "clean" modem stop the same as a crash */
static void ipa_modem_crashed(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
int ret;
/* Prevent the modem from triggering a call to ipa_setup() */
@@ -417,7 +443,7 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
{
struct ipa *ipa = container_of(nb, struct ipa, nb);
struct qcom_ssr_notify_data *notify_data = data;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
switch (action) {
case QCOM_SSR_BEFORE_POWERUP:
@@ -466,7 +492,7 @@ int ipa_modem_config(struct ipa *ipa)
void ipa_modem_deconfig(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
int ret;
ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb);
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index e223886123ce..41ca7ef5e20f 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -35,28 +35,10 @@
#define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */
/**
- * enum ipa_power_flag - IPA power flags
- * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
- * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended
- * @IPA_POWER_FLAG_STOPPED: Modem TX is disabled by ipa_start_xmit()
- * @IPA_POWER_FLAG_STARTED: Modem TX was enabled by ipa_runtime_resume()
- * @IPA_POWER_FLAG_COUNT: Number of defined power flags
- */
-enum ipa_power_flag {
- IPA_POWER_FLAG_RESUMED,
- IPA_POWER_FLAG_SYSTEM,
- IPA_POWER_FLAG_STOPPED,
- IPA_POWER_FLAG_STARTED,
- IPA_POWER_FLAG_COUNT, /* Last; not a flag */
-};
-
-/**
* struct ipa_power - IPA power management information
* @dev: IPA device pointer
* @core: IPA core clock
* @qmp: QMP handle for AOSS communication
- * @spinlock: Protects modem TX queue enable/disable
- * @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
* @interconnect: Interconnect array
*/
@@ -64,8 +46,6 @@ struct ipa_power {
struct device *dev;
struct clk *core;
struct qmp *qmp;
- spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
- DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
struct icc_bulk_data interconnect[] __counted_by(interconnect_count);
};
@@ -147,7 +127,6 @@ static int ipa_runtime_suspend(struct device *dev)
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
- __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags);
ipa_endpoint_suspend(ipa);
gsi_suspend(&ipa->gsi);
}
@@ -179,8 +158,6 @@ static int ipa_suspend(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
- __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
-
/* Increment the disable depth to ensure that the IRQ won't
* be re-enabled until the matching _enable call in
* ipa_resume(). We do this to ensure that the interrupt
@@ -202,8 +179,6 @@ static int ipa_resume(struct device *dev)
ret = pm_runtime_force_resume(dev);
- __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
-
/* Now that PM runtime is enabled again it's safe
* to turn the IRQ back on and process any data
* that was received during suspend.
@@ -219,84 +194,6 @@ u32 ipa_core_clock_rate(struct ipa *ipa)
return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0;
}
-void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
-{
- /* To handle an IPA interrupt we will have resumed the hardware
- * just to handle the interrupt, so we're done. If we are in a
- * system suspend, trigger a system resume.
- */
- if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags))
- if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags))
- pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
-
- /* Acknowledge/clear the suspend interrupt on all endpoints */
- ipa_interrupt_suspend_clear_all(ipa->interrupt);
-}
-
-/* The next few functions coordinate stopping and starting the modem
- * network device transmit queue.
- *
- * Transmit can be running concurrent with power resume, and there's a
- * chance the resume completes before the transmit path stops the queue,
- * leaving the queue in a stopped state. The next two functions are used
- * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit()
- * to conditionally stop the TX queue; and ipa_power_modem_queue_start()
- * is used by ipa_runtime_resume() to conditionally restart it.
- *
- * Two flags and a spinlock are used. If the queue is stopped, the STOPPED
- * power flag is set. And if the queue is started, the STARTED flag is set.
- * The queue is only started on resume if the STOPPED flag is set. And the
- * queue is only started in ipa_start_xmit() if the STARTED flag is *not*
- * set. As a result, the queue remains operational if the two activites
- * happen concurrently regardless of the order they complete. The spinlock
- * ensures the flag and TX queue operations are done atomically.
- *
- * The first function stops the modem netdev transmit queue, but only if
- * the STARTED flag is *not* set. That flag is cleared if it was set.
- * If the queue is stopped, the STOPPED flag is set. This is called only
- * from the power ->runtime_resume operation.
- */
-void ipa_power_modem_queue_stop(struct ipa *ipa)
-{
- struct ipa_power *power = ipa->power;
- unsigned long flags;
-
- spin_lock_irqsave(&power->spinlock, flags);
-
- if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) {
- netif_stop_queue(ipa->modem_netdev);
- __set_bit(IPA_POWER_FLAG_STOPPED, power->flags);
- }
-
- spin_unlock_irqrestore(&power->spinlock, flags);
-}
-
-/* This function starts the modem netdev transmit queue, but only if the
- * STOPPED flag is set. That flag is cleared if it was set. If the queue
- * was restarted, the STARTED flag is set; this allows ipa_start_xmit()
- * to skip stopping the queue in the event of a race.
- */
-void ipa_power_modem_queue_wake(struct ipa *ipa)
-{
- struct ipa_power *power = ipa->power;
- unsigned long flags;
-
- spin_lock_irqsave(&power->spinlock, flags);
-
- if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) {
- __set_bit(IPA_POWER_FLAG_STARTED, power->flags);
- netif_wake_queue(ipa->modem_netdev);
- }
-
- spin_unlock_irqrestore(&power->spinlock, flags);
-}
-
-/* This function clears the STARTED flag once the TX queue is operating */
-void ipa_power_modem_queue_active(struct ipa *ipa)
-{
- clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
-}
-
static int ipa_power_retention_init(struct ipa_power *power)
{
struct qmp *qmp = qmp_get(power->dev);
@@ -341,7 +238,7 @@ int ipa_power_setup(struct ipa *ipa)
ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
- ret = device_init_wakeup(&ipa->pdev->dev, true);
+ ret = device_init_wakeup(ipa->dev, true);
if (ret)
ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
@@ -350,7 +247,7 @@ int ipa_power_setup(struct ipa *ipa)
void ipa_power_teardown(struct ipa *ipa)
{
- (void)device_init_wakeup(&ipa->pdev->dev, false);
+ (void)device_init_wakeup(ipa->dev, false);
ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
}
@@ -385,7 +282,6 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
}
power->dev = dev;
power->core = clk;
- spin_lock_init(&power->spinlock);
power->interconnect_count = data->interconnect_count;
ret = ipa_interconnect_init(power, data->interconnect_data);
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 3a4c59ea1222..227cc04bea80 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -24,24 +24,6 @@ extern const struct dev_pm_ops ipa_pm_ops;
u32 ipa_core_clock_rate(struct ipa *ipa);
/**
- * ipa_power_modem_queue_stop() - Possibly stop the modem netdev TX queue
- * @ipa: IPA pointer
- */
-void ipa_power_modem_queue_stop(struct ipa *ipa);
-
-/**
- * ipa_power_modem_queue_wake() - Possibly wake the modem netdev TX queue
- * @ipa: IPA pointer
- */
-void ipa_power_modem_queue_wake(struct ipa *ipa);
-
-/**
- * ipa_power_modem_queue_active() - Report modem netdev TX queue active
- * @ipa: IPA pointer
- */
-void ipa_power_modem_queue_active(struct ipa *ipa);
-
-/**
* ipa_power_retention() - Control register retention on power collapse
* @ipa: IPA pointer
* @enable: Whether retention should be enabled or disabled
@@ -49,17 +31,6 @@ void ipa_power_modem_queue_active(struct ipa *ipa);
void ipa_power_retention(struct ipa *ipa, bool enable);
/**
- * ipa_power_suspend_handler() - Handler for SUSPEND IPA interrupts
- * @ipa: IPA pointer
- * @irq_id: IPA interrupt ID (unused)
- *
- * If an RX endpoint is suspended, and the IPA has a packet destined for
- * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
- * that it should resume the endpoint.
- */
-void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id);
-
-/**
* ipa_power_setup() - Set up IPA power management
* @ipa: IPA pointer
*
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index f70f0a1d1cda..65c40e207802 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -96,7 +96,7 @@ static void ipa_server_init_complete(struct ipa_qmi *ipa_qmi)
IPA_QMI_INIT_COMPLETE_IND_SZ,
ipa_init_complete_ind_ei, &ind);
if (ret)
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d sending init complete indication\n", ret);
else
ipa_qmi->indication_sent = true;
@@ -148,7 +148,7 @@ static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
ipa = container_of(ipa_qmi, struct ipa, qmi);
ret = ipa_modem_start(ipa);
if (ret)
- dev_err(&ipa->pdev->dev, "error %d starting modem\n", ret);
+ dev_err(ipa->dev, "error %d starting modem\n", ret);
}
/* All QMI clients from the modem node are gone (modem shut down or crashed). */
@@ -199,7 +199,7 @@ static void ipa_server_indication_register(struct qmi_handle *qmi,
ipa_qmi->indication_requested = true;
ipa_qmi_ready(ipa_qmi); /* We might be ready now */
} else {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d sending register indication response\n", ret);
}
}
@@ -228,7 +228,7 @@ static void ipa_server_driver_init_complete(struct qmi_handle *qmi,
ipa_qmi->uc_ready = true;
ipa_qmi_ready(ipa_qmi); /* We might be ready now */
} else {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d sending init complete response\n", ret);
}
}
@@ -417,7 +417,7 @@ static void ipa_client_init_driver_work(struct work_struct *work)
qmi = &ipa_qmi->client_handle;
ipa = container_of(ipa_qmi, struct ipa, qmi);
- dev = &ipa->pdev->dev;
+ dev = ipa->dev;
ret = qmi_txn_init(qmi, &txn, NULL, NULL);
if (ret < 0) {
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index 6a3203ae6f1e..98625956e0bb 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -4,6 +4,7 @@
* Copyright (C) 2019-2023 Linaro Ltd.
*/
+#include <linux/platform_device.h>
#include <linux/io.h>
#include "ipa.h"
@@ -132,9 +133,9 @@ static const struct regs *ipa_regs(enum ipa_version version)
}
}
-int ipa_reg_init(struct ipa *ipa)
+int ipa_reg_init(struct ipa *ipa, struct platform_device *pdev)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
const struct regs *regs;
struct resource *res;
@@ -146,8 +147,7 @@ int ipa_reg_init(struct ipa *ipa)
return -EINVAL;
/* Setup IPA register memory */
- res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
- "ipa-reg");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipa-reg");
if (!res) {
dev_err(dev, "DT error getting \"ipa-reg\" memory property\n");
return -ENODEV;
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 2998f115f12c..62c62495b796 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -12,6 +12,8 @@
#include "ipa_version.h"
#include "reg.h"
+struct platform_device;
+
struct ipa;
/**
@@ -643,7 +645,7 @@ extern const struct regs ipa_regs_v5_5;
const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
-int ipa_reg_init(struct ipa *ipa);
+int ipa_reg_init(struct ipa *ipa, struct platform_device *pdev);
void ipa_reg_exit(struct ipa *ipa);
#endif /* _IPA_REG_H_ */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index 5620dc271fac..2f917582c423 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -5,7 +5,7 @@
*/
#include <linux/types.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
@@ -84,15 +84,13 @@ struct ipa_smp2p {
*/
static void ipa_smp2p_notify(struct ipa_smp2p *smp2p)
{
- struct device *dev;
u32 value;
u32 mask;
if (smp2p->notified)
return;
- dev = &smp2p->ipa->pdev->dev;
- smp2p->power_on = pm_runtime_get_if_active(dev, true) > 0;
+ smp2p->power_on = pm_runtime_get_if_active(smp2p->ipa->dev) > 0;
/* Signal whether the IPA power is enabled */
mask = BIT(smp2p->enabled_bit);
@@ -152,15 +150,16 @@ static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p)
static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
{
struct ipa_smp2p *smp2p = dev_id;
+ struct ipa *ipa = smp2p->ipa;
struct device *dev;
int ret;
/* Ignore any (spurious) interrupts received after the first */
- if (smp2p->ipa->setup_complete)
+ if (ipa->setup_complete)
return IRQ_HANDLED;
/* Power needs to be active for setup */
- dev = &smp2p->ipa->pdev->dev;
+ dev = ipa->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "error %d getting power for setup\n", ret);
@@ -168,7 +167,7 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
}
/* An error here won't cause driver shutdown, so warn if one occurs */
- ret = ipa_setup(smp2p->ipa);
+ ret = ipa_setup(ipa);
WARN(ret != 0, "error %d from ipa_setup()\n", ret);
out_power_put:
@@ -179,14 +178,15 @@ out_power_put:
}
/* Initialize SMP2P interrupts */
-static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p, const char *name,
- irq_handler_t handler)
+static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p,
+ struct platform_device *pdev,
+ const char *name, irq_handler_t handler)
{
- struct device *dev = &smp2p->ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
unsigned int irq;
int ret;
- ret = platform_get_irq_byname(smp2p->ipa->pdev, name);
+ ret = platform_get_irq_byname(pdev, name);
if (ret <= 0)
return ret ? : -EINVAL;
irq = ret;
@@ -208,7 +208,7 @@ static void ipa_smp2p_irq_exit(struct ipa_smp2p *smp2p, u32 irq)
/* Drop the power reference if it was taken in ipa_smp2p_notify() */
static void ipa_smp2p_power_release(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
if (!ipa->smp2p->power_on)
return;
@@ -219,10 +219,11 @@ static void ipa_smp2p_power_release(struct ipa *ipa)
}
/* Initialize the IPA SMP2P subsystem */
-int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
+int
+ipa_smp2p_init(struct ipa *ipa, struct platform_device *pdev, bool modem_init)
{
struct qcom_smem_state *enabled_state;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
struct qcom_smem_state *valid_state;
struct ipa_smp2p *smp2p;
u32 enabled_bit;
@@ -261,7 +262,7 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
/* We have enough information saved to handle notifications */
ipa->smp2p = smp2p;
- ret = ipa_smp2p_irq_init(smp2p, "ipa-clock-query",
+ ret = ipa_smp2p_irq_init(smp2p, pdev, "ipa-clock-query",
ipa_smp2p_modem_clk_query_isr);
if (ret < 0)
goto err_null_smp2p;
@@ -273,7 +274,7 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
if (modem_init) {
/* Result will be non-zero (negative for error) */
- ret = ipa_smp2p_irq_init(smp2p, "ipa-setup-ready",
+ ret = ipa_smp2p_irq_init(smp2p, pdev, "ipa-setup-ready",
ipa_smp2p_modem_setup_ready_isr);
if (ret < 0)
goto err_notifier_unregister;
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
index 9b969b03d1a4..2a3d8eefb13b 100644
--- a/drivers/net/ipa/ipa_smp2p.h
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -8,17 +8,20 @@
#include <linux/types.h>
+struct platform_device;
+
struct ipa;
/**
* ipa_smp2p_init() - Initialize the IPA SMP2P subsystem
* @ipa: IPA pointer
+ * @pdev: Platform device pointer
* @modem_init: Whether the modem is responsible for GSI initialization
*
* Return: 0 if successful, or a negative error code
- *
*/
-int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
+int ipa_smp2p_init(struct ipa *ipa, struct platform_device *pdev,
+ bool modem_init);
/**
* ipa_smp2p_exit() - Inverse of ipa_smp2p_init()
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 7b637bb8b41c..a24ac11b8893 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -163,7 +163,7 @@ ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6)
bool ipa_filtered_valid(struct ipa *ipa, u64 filtered)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 count;
if (!filtered) {
@@ -236,8 +236,7 @@ ipa_filter_reset_table(struct ipa *ipa, bool hashed, bool ipv6, bool modem)
trans = ipa_cmd_trans_alloc(ipa, hweight64(ep_mask));
if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction for %s filter reset\n",
+ dev_err(ipa->dev, "no transaction for %s filter reset\n",
modem ? "modem" : "AP");
return -EBUSY;
}
@@ -298,8 +297,7 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
trans = ipa_cmd_trans_alloc(ipa, hash_support ? 4 : 2);
if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction for %s route reset\n",
+ dev_err(ipa->dev, "no transaction for %s route reset\n",
modem ? "modem" : "AP");
return -EBUSY;
}
@@ -327,7 +325,7 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
void ipa_table_reset(struct ipa *ipa, bool modem)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const char *ee_name;
int ret;
@@ -356,7 +354,7 @@ int ipa_table_hash_flush(struct ipa *ipa)
trans = ipa_cmd_trans_alloc(ipa, 1);
if (!trans) {
- dev_err(&ipa->pdev->dev, "no transaction for hash flush\n");
+ dev_err(ipa->dev, "no transaction for hash flush\n");
return -EBUSY;
}
@@ -469,7 +467,7 @@ int ipa_table_setup(struct ipa *ipa)
*/
trans = ipa_cmd_trans_alloc(ipa, 8);
if (!trans) {
- dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
+ dev_err(ipa->dev, "no transaction for table setup\n");
return -EBUSY;
}
@@ -713,7 +711,7 @@ bool ipa_table_mem_valid(struct ipa *ipa, bool filter)
*/
int ipa_table_init(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
dma_addr_t addr;
__le64 le_addr;
__le64 *virt;
@@ -763,7 +761,7 @@ int ipa_table_init(struct ipa *ipa)
void ipa_table_exit(struct ipa *ipa)
{
u32 count = max_t(u32, 1 + ipa->filter_count, ipa->route_count);
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
size_t size;
size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 7eaa0b4ebed9..bfd5dc6dab43 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -127,7 +127,7 @@ static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
static void ipa_uc_event_handler(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
if (shared->event == IPA_UC_EVENT_ERROR)
dev_err(dev, "microcontroller error event\n");
@@ -141,7 +141,7 @@ static void ipa_uc_event_handler(struct ipa *ipa)
static void ipa_uc_response_hdlr(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
/* An INIT_COMPLETED response message is sent to the AP by the
* microcontroller when it is operational. Other than this, the AP
@@ -191,7 +191,7 @@ void ipa_uc_config(struct ipa *ipa)
/* Inverse of ipa_uc_config() */
void ipa_uc_deconfig(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
ipa_interrupt_disable(ipa, IPA_IRQ_UC_1);
ipa_interrupt_disable(ipa, IPA_IRQ_UC_0);
@@ -208,8 +208,8 @@ void ipa_uc_deconfig(struct ipa *ipa)
/* Take a proxy power reference for the microcontroller */
void ipa_uc_power(struct ipa *ipa)
{
+ struct device *dev = ipa->dev;
static bool already;
- struct device *dev;
int ret;
if (already)
@@ -217,7 +217,6 @@ void ipa_uc_power(struct ipa *ipa)
already = true; /* Only do this on first boot */
/* This power reference dropped in ipa_uc_response_hdlr() above */
- dev = &ipa->pdev->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index df7c43a109e1..5920f7e63352 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -349,7 +349,7 @@ static int ipvlan_get_iflink(const struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- return ipvlan->phy_dev->ifindex;
+ return READ_ONCE(ipvlan->phy_dev->ifindex);
}
static const struct net_device_ops ipvlan_netdev_ops = {
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index f6d53e63ef4e..f6eab66c2660 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -144,6 +144,7 @@ static int loopback_dev_init(struct net_device *dev)
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
if (!dev->lstats)
return -ENOMEM;
+ netdev_lockdep_set_classes(dev);
return 0;
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 7f5426285c61..0206b84284ab 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3519,18 +3519,13 @@ static int macsec_dev_init(struct net_device *dev)
struct net_device *real_dev = macsec->real_dev;
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&macsec->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
dev->features = real_dev->features & MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
macsec_set_head_tail_room(dev);
@@ -3550,7 +3545,6 @@ static void macsec_dev_uninit(struct net_device *dev)
struct macsec_dev *macsec = macsec_priv(dev);
gro_cells_destroy(&macsec->gro_cells);
- free_percpu(dev->tstats);
}
static netdev_features_t macsec_fix_features(struct net_device *dev,
@@ -3753,7 +3747,7 @@ static void macsec_get_stats64(struct net_device *dev,
static int macsec_get_iflink(const struct net_device *dev)
{
- return macsec_priv(dev)->real_dev->ifindex;
+ return READ_ONCE(macsec_priv(dev)->real_dev->ifindex);
}
static const struct net_device_ops macsec_netdev_ops = {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a3cc665757e8..0cec2783a3e7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1158,7 +1158,7 @@ static int macvlan_dev_get_iflink(const struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- return vlan->lowerdev->ifindex;
+ return READ_ONCE(vlan->lowerdev->ifindex);
}
static const struct ethtool_ops macvlan_ethtool_ops = {
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index 68f8ee0ec8ba..f40eb50bb978 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -94,6 +94,10 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
int ret;
u32 cmd;
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
/* Prepare the read operation */
cmd = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT);
unimac_mdio_writel(priv, cmd, MDIO_CMD);
@@ -103,7 +107,7 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
ret = priv->wait_func(priv->wait_func_data);
if (ret)
- return ret;
+ goto out;
cmd = unimac_mdio_readl(priv, MDIO_CMD);
@@ -112,10 +116,15 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* that condition here and ignore the MDIO controller read failure
* indication.
*/
- if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL))
- return -EIO;
+ if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL)) {
+ ret = -EIO;
+ goto out;
+ }
- return cmd & 0xffff;
+ ret = cmd & 0xffff;
+out:
+ clk_disable_unprepare(priv->clk);
+ return ret;
}
static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
@@ -123,6 +132,11 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
{
struct unimac_mdio_priv *priv = bus->priv;
u32 cmd;
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
/* Prepare the write operation */
cmd = MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
@@ -131,7 +145,10 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
unimac_mdio_start(priv);
- return priv->wait_func(priv->wait_func_data);
+ ret = priv->wait_func(priv->wait_func_data);
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
}
/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with
@@ -178,14 +195,19 @@ static int unimac_mdio_reset(struct mii_bus *bus)
return 0;
}
-static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
+static int unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
{
unsigned long rate;
u32 reg, div;
+ int ret;
/* Keep the hardware default values */
if (!priv->clk_freq)
- return;
+ return 0;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
if (!priv->clk)
rate = 250000000;
@@ -195,7 +217,8 @@ static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
div = (rate / (2 * priv->clk_freq)) - 1;
if (div & ~MDIO_CLK_DIV_MASK) {
pr_warn("Incorrect MDIO clock frequency, ignoring\n");
- return;
+ ret = 0;
+ goto out;
}
/* The MDIO clock is the reference clock (typically 250Mhz) divided by
@@ -205,6 +228,9 @@ static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
reg &= ~(MDIO_CLK_DIV_MASK << MDIO_CLK_DIV_SHIFT);
reg |= div << MDIO_CLK_DIV_SHIFT;
unimac_mdio_writel(priv, reg, MDIO_CFG);
+out:
+ clk_disable_unprepare(priv->clk);
+ return ret;
}
static int unimac_mdio_probe(struct platform_device *pdev)
@@ -235,24 +261,12 @@ static int unimac_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
}
- priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq))
priv->clk_freq = 0;
- unimac_mdio_clk_set(priv);
-
priv->mii_bus = mdiobus_alloc();
- if (!priv->mii_bus) {
- ret = -ENOMEM;
- goto out_clk_disable;
- }
+ if (!priv->mii_bus)
+ return -ENOMEM;
bus = priv->mii_bus;
bus->priv = priv;
@@ -261,17 +275,29 @@ static int unimac_mdio_probe(struct platform_device *pdev)
priv->wait_func = pdata->wait_func;
priv->wait_func_data = pdata->wait_func_data;
bus->phy_mask = ~pdata->phy_mask;
+ priv->clk = pdata->clk;
} else {
bus->name = "unimac MII bus";
priv->wait_func_data = priv;
priv->wait_func = unimac_mdio_poll;
+ priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ }
+
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto out_mdio_free;
}
+
bus->parent = &pdev->dev;
bus->read = unimac_mdio_read;
bus->write = unimac_mdio_write;
bus->reset = unimac_mdio_reset;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
+ ret = unimac_mdio_clk_set(priv);
+ if (ret)
+ goto out_mdio_free;
+
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "MDIO bus registration failed\n");
@@ -286,8 +312,6 @@ static int unimac_mdio_probe(struct platform_device *pdev)
out_mdio_free:
mdiobus_free(bus);
-out_clk_disable:
- clk_disable_unprepare(priv->clk);
return ret;
}
@@ -297,36 +321,20 @@ static void unimac_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
- clk_disable_unprepare(priv->clk);
-}
-
-static int __maybe_unused unimac_mdio_suspend(struct device *d)
-{
- struct unimac_mdio_priv *priv = dev_get_drvdata(d);
-
- clk_disable_unprepare(priv->clk);
-
- return 0;
}
static int __maybe_unused unimac_mdio_resume(struct device *d)
{
struct unimac_mdio_priv *priv = dev_get_drvdata(d);
- int ret;
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
- unimac_mdio_clk_set(priv);
-
- return 0;
+ return unimac_mdio_clk_set(priv);
}
static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops,
- unimac_mdio_suspend, unimac_mdio_resume);
+ NULL, unimac_mdio_resume);
static const struct of_device_id unimac_mdio_ids[] = {
+ { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
{ .compatible = "brcm,asp-v2.0-mdio", },
{ .compatible = "brcm,genet-mdio-v5", },
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index abd8b508ec16..9d8f43b28aac 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -14,6 +14,20 @@
#include <linux/clk.h>
#define MDIO_MODE_REG 0x40
+#define MDIO_MODE_MDC_MODE BIT(12)
+/* 0 = Clause 22, 1 = Clause 45 */
+#define MDIO_MODE_C45 BIT(8)
+#define MDIO_MODE_DIV_MASK GENMASK(7, 0)
+#define MDIO_MODE_DIV(x) FIELD_PREP(MDIO_MODE_DIV_MASK, (x) - 1)
+#define MDIO_MODE_DIV_1 0x0
+#define MDIO_MODE_DIV_2 0x1
+#define MDIO_MODE_DIV_4 0x3
+#define MDIO_MODE_DIV_8 0x7
+#define MDIO_MODE_DIV_16 0xf
+#define MDIO_MODE_DIV_32 0x1f
+#define MDIO_MODE_DIV_64 0x3f
+#define MDIO_MODE_DIV_128 0x7f
+#define MDIO_MODE_DIV_256 0xff
#define MDIO_ADDR_REG 0x44
#define MDIO_DATA_WRITE_REG 0x48
#define MDIO_DATA_READ_REG 0x4c
@@ -26,9 +40,6 @@
#define MDIO_CMD_ACCESS_CODE_C45_WRITE 1
#define MDIO_CMD_ACCESS_CODE_C45_READ 2
-/* 0 = Clause 22, 1 = Clause 45 */
-#define MDIO_MODE_C45 BIT(8)
-
#define IPQ4019_MDIO_TIMEOUT 10000
#define IPQ4019_MDIO_SLEEP 10
@@ -41,6 +52,7 @@ struct ipq4019_mdio_data {
void __iomem *membase;
void __iomem *eth_ldo_rdy;
struct clk *mdio_clk;
+ unsigned int mdc_rate;
};
static int ipq4019_mdio_wait_busy(struct mii_bus *bus)
@@ -203,6 +215,38 @@ static int ipq4019_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
+static int ipq4019_mdio_set_div(struct ipq4019_mdio_data *priv)
+{
+ unsigned long ahb_rate;
+ int div;
+ u32 val;
+
+ /* If we don't have a clock for AHB use the fixed value */
+ ahb_rate = IPQ_MDIO_CLK_RATE;
+ if (priv->mdio_clk)
+ ahb_rate = clk_get_rate(priv->mdio_clk);
+
+ /* MDC rate is ahb_rate/(MDIO_MODE_DIV + 1)
+ * While supported, internal documentation doesn't
+ * assure correct functionality of the MDIO bus
+ * with divider of 1, 2 or 4.
+ */
+ for (div = 8; div <= 256; div *= 2) {
+ /* The requested rate is supported by the div */
+ if (priv->mdc_rate == DIV_ROUND_UP(ahb_rate, div)) {
+ val = readl(priv->membase + MDIO_MODE_REG);
+ val &= ~MDIO_MODE_DIV_MASK;
+ val |= MDIO_MODE_DIV(div);
+ writel(val, priv->membase + MDIO_MODE_REG);
+
+ return 0;
+ }
+ }
+
+ /* The requested rate is not supported */
+ return -EINVAL;
+}
+
static int ipq_mdio_reset(struct mii_bus *bus)
{
struct ipq4019_mdio_data *priv = bus->priv;
@@ -225,10 +269,58 @@ static int ipq_mdio_reset(struct mii_bus *bus)
return ret;
ret = clk_prepare_enable(priv->mdio_clk);
- if (ret == 0)
- mdelay(10);
+ if (ret)
+ return ret;
- return ret;
+ mdelay(10);
+
+ /* Restore MDC rate */
+ return ipq4019_mdio_set_div(priv);
+}
+
+static void ipq4019_mdio_select_mdc_rate(struct platform_device *pdev,
+ struct ipq4019_mdio_data *priv)
+{
+ unsigned long ahb_rate;
+ int div;
+ u32 val;
+
+ /* MDC rate defined in DT, we don't have to decide a default value */
+ if (!of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &priv->mdc_rate))
+ return;
+
+ /* If we don't have a clock for AHB use the fixed value */
+ ahb_rate = IPQ_MDIO_CLK_RATE;
+ if (priv->mdio_clk)
+ ahb_rate = clk_get_rate(priv->mdio_clk);
+
+ /* Check what is the current div set */
+ val = readl(priv->membase + MDIO_MODE_REG);
+ div = FIELD_GET(MDIO_MODE_DIV_MASK, val);
+
+ /* div is not set to the default value of /256
+ * Probably someone changed that (bootloader, other drivers)
+ * Keep this and don't overwrite it.
+ */
+ if (div != MDIO_MODE_DIV_256) {
+ priv->mdc_rate = DIV_ROUND_UP(ahb_rate, div + 1);
+ return;
+ }
+
+ /* If div is /256 assume nobody have set this value and
+ * try to find one MDC rate that is close the 802.3 spec of
+ * 2.5MHz
+ */
+ for (div = 256; div >= 8; div /= 2) {
+ /* Stop as soon as we found a divider that
+ * reached the closest value to 2.5MHz
+ */
+ if (DIV_ROUND_UP(ahb_rate, div) > 2500000)
+ break;
+
+ priv->mdc_rate = DIV_ROUND_UP(ahb_rate, div);
+ }
}
static int ipq4019_mdio_probe(struct platform_device *pdev)
@@ -252,6 +344,11 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
if (IS_ERR(priv->mdio_clk))
return PTR_ERR(priv->mdio_clk);
+ ipq4019_mdio_select_mdc_rate(pdev, priv);
+ ret = ipq4019_mdio_set_div(priv);
+ if (ret)
+ return ret;
+
/* The platform resource is provided on the chipset IPQ5018 */
/* This resource is optional */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 64ebcb6d235c..08e607f62e10 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -139,6 +139,53 @@ bool of_mdiobus_child_is_phy(struct device_node *child)
}
EXPORT_SYMBOL(of_mdiobus_child_is_phy);
+static int __of_mdiobus_parse_phys(struct mii_bus *mdio, struct device_node *np,
+ bool *scanphys)
+{
+ struct device_node *child;
+ int addr, rc = 0;
+
+ /* Loop over the child nodes and register a phy_device for each phy */
+ for_each_available_child_of_node(np, child) {
+ if (of_node_name_eq(child, "ethernet-phy-package")) {
+ /* Ignore invalid ethernet-phy-package node */
+ if (!of_property_present(child, "reg"))
+ continue;
+
+ rc = __of_mdiobus_parse_phys(mdio, child, NULL);
+ if (rc && rc != -ENODEV)
+ goto exit;
+
+ continue;
+ }
+
+ addr = of_mdio_parse_addr(&mdio->dev, child);
+ if (addr < 0) {
+ /* Skip scanning for invalid ethernet-phy-package node */
+ if (scanphys)
+ *scanphys = true;
+ continue;
+ }
+
+ if (of_mdiobus_child_is_phy(child))
+ rc = of_mdiobus_register_phy(mdio, child, addr);
+ else
+ rc = of_mdiobus_register_device(mdio, child, addr);
+
+ if (rc == -ENODEV)
+ dev_err(&mdio->dev,
+ "MDIO device at address %d is missing.\n",
+ addr);
+ else if (rc)
+ goto exit;
+ }
+
+ return 0;
+exit:
+ of_node_put(child);
+ return rc;
+}
+
/**
* __of_mdiobus_register - Register mii_bus and create PHYs from the device tree
* @mdio: pointer to mii_bus structure
@@ -180,33 +227,18 @@ int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
return rc;
/* Loop over the child nodes and register a phy_device for each phy */
- for_each_available_child_of_node(np, child) {
- addr = of_mdio_parse_addr(&mdio->dev, child);
- if (addr < 0) {
- scanphys = true;
- continue;
- }
-
- if (of_mdiobus_child_is_phy(child))
- rc = of_mdiobus_register_phy(mdio, child, addr);
- else
- rc = of_mdiobus_register_device(mdio, child, addr);
-
- if (rc == -ENODEV)
- dev_err(&mdio->dev,
- "MDIO device at address %d is missing.\n",
- addr);
- else if (rc)
- goto unregister;
- }
+ rc = __of_mdiobus_parse_phys(mdio, np, &scanphys);
+ if (rc)
+ goto unregister;
if (!scanphys)
return 0;
/* auto scan for PHYs with empty reg property */
for_each_available_child_of_node(np, child) {
- /* Skip PHYs with reg property set */
- if (of_property_present(child, "reg"))
+ /* Skip PHYs with reg property set or ethernet-phy-package node */
+ if (of_property_present(child, "reg") ||
+ of_node_name_eq(child, "ethernet-phy-package"))
continue;
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
@@ -227,15 +259,16 @@ int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
if (!rc)
break;
if (rc != -ENODEV)
- goto unregister;
+ goto put_unregister;
}
}
}
return 0;
-unregister:
+put_unregister:
of_node_put(child);
+unregister:
mdiobus_unregister(mdio);
return rc;
}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 6e14ba5e06c8..d7070dd4fe73 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -42,14 +42,20 @@ MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>");
MODULE_DESCRIPTION("Console driver for network interfaces");
MODULE_LICENSE("GPL");
-#define MAX_PARAM_LENGTH 256
-#define MAX_PRINT_CHUNK 1000
+#define MAX_PARAM_LENGTH 256
+#define MAX_USERDATA_ENTRY_LENGTH 256
+#define MAX_USERDATA_VALUE_LENGTH 200
+/* The number 3 comes from userdata entry format characters (' ', '=', '\n') */
+#define MAX_USERDATA_NAME_LENGTH (MAX_USERDATA_ENTRY_LENGTH - \
+ MAX_USERDATA_VALUE_LENGTH - 3)
+#define MAX_USERDATA_ITEMS 16
+#define MAX_PRINT_CHUNK 1000
static char config[MAX_PARAM_LENGTH];
module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0);
MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]");
-static bool oops_only = false;
+static bool oops_only;
module_param(oops_only, bool, 0600);
MODULE_PARM_DESC(oops_only, "Only log oops messages");
@@ -79,7 +85,10 @@ static struct console netconsole_ext;
/**
* struct netconsole_target - Represents a configured netconsole target.
* @list: Links this target into the target_list.
- * @item: Links us into the configfs subsystem hierarchy.
+ * @group: Links us into the configfs subsystem hierarchy.
+ * @userdata_group: Links to the userdata configfs hierarchy
+ * @userdata_complete: Cached, formatted string of append
+ * @userdata_length: String length of userdata_complete
* @enabled: On / off knob to enable / disable target.
* Visible from userspace (read-write).
* We maintain a strict 1:1 correspondence between this and
@@ -102,7 +111,10 @@ static struct console netconsole_ext;
struct netconsole_target {
struct list_head list;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
- struct config_item item;
+ struct config_group group;
+ struct config_group userdata_group;
+ char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS];
+ size_t userdata_length;
#endif
bool enabled;
bool extended;
@@ -134,14 +146,14 @@ static void __exit dynamic_netconsole_exit(void)
*/
static void netconsole_target_get(struct netconsole_target *nt)
{
- if (config_item_name(&nt->item))
- config_item_get(&nt->item);
+ if (config_item_name(&nt->group.cg_item))
+ config_group_get(&nt->group);
}
static void netconsole_target_put(struct netconsole_target *nt)
{
- if (config_item_name(&nt->item))
- config_item_put(&nt->item);
+ if (config_item_name(&nt->group.cg_item))
+ config_group_put(&nt->group);
}
#else /* !CONFIG_NETCONSOLE_DYNAMIC */
@@ -215,15 +227,33 @@ static struct netconsole_target *alloc_and_init(void)
* | remote_ip
* | local_mac
* | remote_mac
+ * | userdata/
+ * | <key>/
+ * | value
+ * | ...
* |
* <target>/...
*/
static struct netconsole_target *to_target(struct config_item *item)
{
- return item ?
- container_of(item, struct netconsole_target, item) :
- NULL;
+ struct config_group *cfg_group;
+
+ cfg_group = to_config_group(item);
+ if (!cfg_group)
+ return NULL;
+ return container_of(to_config_group(item),
+ struct netconsole_target, group);
+}
+
+/* Get rid of possible trailing newline, returning the new length */
+static void trim_newline(char *s, size_t maxlen)
+{
+ size_t len;
+
+ len = strnlen(s, maxlen);
+ if (s[len - 1] == '\n')
+ s[len - 1] = '\0';
}
/*
@@ -370,7 +400,7 @@ static ssize_t release_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
err = -EINVAL;
goto out_unlock;
}
@@ -398,7 +428,7 @@ static ssize_t extended_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
err = -EINVAL;
goto out_unlock;
}
@@ -420,22 +450,17 @@ static ssize_t dev_name_store(struct config_item *item, const char *buf,
size_t count)
{
struct netconsole_target *nt = to_target(item);
- size_t len;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
mutex_unlock(&dynamic_netconsole_mutex);
return -EINVAL;
}
strscpy(nt->np.dev_name, buf, IFNAMSIZ);
-
- /* Get rid of possible trailing newline from echo(1) */
- len = strnlen(nt->np.dev_name, IFNAMSIZ);
- if (nt->np.dev_name[len - 1] == '\n')
- nt->np.dev_name[len - 1] = '\0';
+ trim_newline(nt->np.dev_name, IFNAMSIZ);
mutex_unlock(&dynamic_netconsole_mutex);
return strnlen(buf, count);
@@ -450,7 +475,7 @@ static ssize_t local_port_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
@@ -473,7 +498,7 @@ static ssize_t remote_port_store(struct config_item *item,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
@@ -495,12 +520,13 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
if (strnchr(buf, count, ':')) {
const char *end;
+
if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
pr_err("invalid IPv6 address at: <%c>\n", *end);
@@ -510,9 +536,9 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
} else
goto out_unlock;
} else {
- if (!nt->np.ipv6) {
+ if (!nt->np.ipv6)
nt->np.local_ip.ip = in_aton(buf);
- } else
+ else
goto out_unlock;
}
@@ -531,12 +557,13 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
if (strnchr(buf, count, ':')) {
const char *end;
+
if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
pr_err("invalid IPv6 address at: <%c>\n", *end);
@@ -546,9 +573,9 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
} else
goto out_unlock;
} else {
- if (!nt->np.ipv6) {
+ if (!nt->np.ipv6)
nt->np.remote_ip.ip = in_aton(buf);
- } else
+ else
goto out_unlock;
}
@@ -568,7 +595,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
@@ -585,6 +612,180 @@ out_unlock:
return -EINVAL;
}
+struct userdatum {
+ struct config_item item;
+ char value[MAX_USERDATA_VALUE_LENGTH];
+};
+
+static struct userdatum *to_userdatum(struct config_item *item)
+{
+ return container_of(item, struct userdatum, item);
+}
+
+struct userdata {
+ struct config_group group;
+};
+
+static struct userdata *to_userdata(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct userdata, group);
+}
+
+static struct netconsole_target *userdata_to_target(struct userdata *ud)
+{
+ struct config_group *netconsole_group;
+
+ netconsole_group = to_config_group(ud->group.cg_item.ci_parent);
+ return to_target(&netconsole_group->cg_item);
+}
+
+static ssize_t userdatum_value_show(struct config_item *item, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", &(to_userdatum(item)->value[0]));
+}
+
+static void update_userdata(struct netconsole_target *nt)
+{
+ int complete_idx = 0, child_count = 0;
+ struct list_head *entry;
+
+ /* Clear the current string in case the last userdatum was deleted */
+ nt->userdata_length = 0;
+ nt->userdata_complete[0] = 0;
+
+ list_for_each(entry, &nt->userdata_group.cg_children) {
+ struct userdatum *udm_item;
+ struct config_item *item;
+
+ if (child_count >= MAX_USERDATA_ITEMS)
+ break;
+ child_count++;
+
+ item = container_of(entry, struct config_item, ci_entry);
+ udm_item = to_userdatum(item);
+
+ /* Skip userdata with no value set */
+ if (strnlen(udm_item->value, MAX_USERDATA_VALUE_LENGTH) == 0)
+ continue;
+
+ /* This doesn't overflow userdata_complete since it will write
+ * one entry length (1/MAX_USERDATA_ITEMS long), entry count is
+ * checked to not exceed MAX items with child_count above
+ */
+ complete_idx += scnprintf(&nt->userdata_complete[complete_idx],
+ MAX_USERDATA_ENTRY_LENGTH, " %s=%s\n",
+ item->ci_name, udm_item->value);
+ }
+ nt->userdata_length = strnlen(nt->userdata_complete,
+ sizeof(nt->userdata_complete));
+}
+
+static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
+ size_t count)
+{
+ struct userdatum *udm = to_userdatum(item);
+ struct netconsole_target *nt;
+ struct userdata *ud;
+ int ret;
+
+ if (count > MAX_USERDATA_VALUE_LENGTH)
+ return -EMSGSIZE;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+
+ ret = strscpy(udm->value, buf, sizeof(udm->value));
+ if (ret < 0)
+ goto out_unlock;
+ trim_newline(udm->value, sizeof(udm->value));
+
+ ud = to_userdata(item->ci_parent);
+ nt = userdata_to_target(ud);
+ update_userdata(nt);
+
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return count;
+out_unlock:
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return ret;
+}
+
+CONFIGFS_ATTR(userdatum_, value);
+
+static struct configfs_attribute *userdatum_attrs[] = {
+ &userdatum_attr_value,
+ NULL,
+};
+
+static void userdatum_release(struct config_item *item)
+{
+ kfree(to_userdatum(item));
+}
+
+static struct configfs_item_operations userdatum_ops = {
+ .release = userdatum_release,
+};
+
+static const struct config_item_type userdatum_type = {
+ .ct_item_ops = &userdatum_ops,
+ .ct_attrs = userdatum_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item *userdatum_make_item(struct config_group *group,
+ const char *name)
+{
+ struct netconsole_target *nt;
+ struct userdatum *udm;
+ struct userdata *ud;
+ size_t child_count;
+
+ if (strlen(name) > MAX_USERDATA_NAME_LENGTH)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ ud = to_userdata(&group->cg_item);
+ nt = userdata_to_target(ud);
+ child_count = list_count_nodes(&nt->userdata_group.cg_children);
+ if (child_count >= MAX_USERDATA_ITEMS)
+ return ERR_PTR(-ENOSPC);
+
+ udm = kzalloc(sizeof(*udm), GFP_KERNEL);
+ if (!udm)
+ return ERR_PTR(-ENOMEM);
+
+ config_item_init_type_name(&udm->item, name, &userdatum_type);
+ return &udm->item;
+}
+
+static void userdatum_drop(struct config_group *group, struct config_item *item)
+{
+ struct netconsole_target *nt;
+ struct userdata *ud;
+
+ ud = to_userdata(&group->cg_item);
+ nt = userdata_to_target(ud);
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ update_userdata(nt);
+ config_item_put(item);
+ mutex_unlock(&dynamic_netconsole_mutex);
+}
+
+static struct configfs_attribute *userdata_attrs[] = {
+ NULL,
+};
+
+static struct configfs_group_operations userdata_ops = {
+ .make_item = userdatum_make_item,
+ .drop_item = userdatum_drop,
+};
+
+static struct config_item_type userdata_type = {
+ .ct_item_ops = &userdatum_ops,
+ .ct_group_ops = &userdata_ops,
+ .ct_attrs = userdata_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
CONFIGFS_ATTR(, enabled);
CONFIGFS_ATTR(, extended);
CONFIGFS_ATTR(, dev_name);
@@ -629,6 +830,15 @@ static const struct config_item_type netconsole_target_type = {
.ct_owner = THIS_MODULE,
};
+static void init_target_config_group(struct netconsole_target *nt,
+ const char *name)
+{
+ config_group_init_type_name(&nt->group, name, &netconsole_target_type);
+ config_group_init_type_name(&nt->userdata_group, "userdata",
+ &userdata_type);
+ configfs_add_default_group(&nt->userdata_group, &nt->group);
+}
+
static struct netconsole_target *find_cmdline_target(const char *name)
{
struct netconsole_target *nt, *ret = NULL;
@@ -636,7 +846,7 @@ static struct netconsole_target *find_cmdline_target(const char *name)
spin_lock_irqsave(&target_list_lock, flags);
list_for_each_entry(nt, &target_list, list) {
- if (!strcmp(nt->item.ci_name, name)) {
+ if (!strcmp(nt->group.cg_item.ci_name, name)) {
ret = nt;
break;
}
@@ -650,8 +860,8 @@ static struct netconsole_target *find_cmdline_target(const char *name)
* Group operations and type for netconsole_subsys.
*/
-static struct config_item *make_netconsole_target(struct config_group *group,
- const char *name)
+static struct config_group *make_netconsole_target(struct config_group *group,
+ const char *name)
{
struct netconsole_target *nt;
unsigned long flags;
@@ -663,23 +873,25 @@ static struct config_item *make_netconsole_target(struct config_group *group,
if (!strncmp(name, NETCONSOLE_PARAM_TARGET_PREFIX,
strlen(NETCONSOLE_PARAM_TARGET_PREFIX))) {
nt = find_cmdline_target(name);
- if (nt)
- return &nt->item;
+ if (nt) {
+ init_target_config_group(nt, name);
+ return &nt->group;
+ }
}
nt = alloc_and_init();
if (!nt)
return ERR_PTR(-ENOMEM);
- /* Initialize the config_item member */
- config_item_init_type_name(&nt->item, name, &netconsole_target_type);
+ /* Initialize the config_group member */
+ init_target_config_group(nt, name);
/* Adding, but it is disabled */
spin_lock_irqsave(&target_list_lock, flags);
list_add(&nt->list, &target_list);
spin_unlock_irqrestore(&target_list_lock, flags);
- return &nt->item;
+ return &nt->group;
}
static void drop_netconsole_target(struct config_group *group,
@@ -699,11 +911,11 @@ static void drop_netconsole_target(struct config_group *group,
if (nt->enabled)
netpoll_cleanup(&nt->np);
- config_item_put(&nt->item);
+ config_item_put(&nt->group.cg_item);
}
static struct configfs_group_operations netconsole_subsys_group_ops = {
- .make_item = make_netconsole_target,
+ .make_group = make_netconsole_target,
.drop_item = drop_netconsole_target,
};
@@ -729,8 +941,7 @@ static void populate_configfs_item(struct netconsole_target *nt,
snprintf(target_name, sizeof(target_name), "%s%d",
NETCONSOLE_PARAM_TARGET_PREFIX, cmdline_count);
- config_item_init_type_name(&nt->item, target_name,
- &netconsole_target_type);
+ init_target_config_group(nt, target_name);
}
#endif /* CONFIG_NETCONSOLE_DYNAMIC */
@@ -781,6 +992,7 @@ restart:
spin_unlock_irqrestore(&target_list_lock, flags);
if (stopped) {
const char *msg = "had an event";
+
switch (event) {
case NETDEV_UNREGISTER:
msg = "unregistered";
@@ -824,19 +1036,34 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
const char *msg_ready = msg;
const char *release;
int release_len = 0;
+ int userdata_len = 0;
+ char *userdata = NULL;
+
+#ifdef CONFIG_NETCONSOLE_DYNAMIC
+ userdata = nt->userdata_complete;
+ userdata_len = nt->userdata_length;
+#endif
if (nt->release) {
release = init_utsname()->release;
release_len = strlen(release) + 1;
}
- if (msg_len + release_len <= MAX_PRINT_CHUNK) {
+ if (msg_len + release_len + userdata_len <= MAX_PRINT_CHUNK) {
/* No fragmentation needed */
if (nt->release) {
scnprintf(buf, MAX_PRINT_CHUNK, "%s,%s", release, msg);
msg_len += release_len;
- msg_ready = buf;
+ } else {
+ memcpy(buf, msg, msg_len);
}
+
+ if (userdata)
+ msg_len += scnprintf(&buf[msg_len],
+ MAX_PRINT_CHUNK - msg_len,
+ "%s", userdata);
+
+ msg_ready = buf;
netpoll_send_udp(&nt->np, msg_ready, msg_len);
return;
}
@@ -860,24 +1087,48 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
memcpy(buf + release_len, header, header_len);
header_len += release_len;
- while (offset < body_len) {
+ while (offset < body_len + userdata_len) {
int this_header = header_len;
- int this_chunk;
+ int this_offset = 0;
+ int this_chunk = 0;
this_header += scnprintf(buf + this_header,
sizeof(buf) - this_header,
- ",ncfrag=%d/%d;", offset, body_len);
-
- this_chunk = min(body_len - offset,
- MAX_PRINT_CHUNK - this_header);
- if (WARN_ON_ONCE(this_chunk <= 0))
- return;
-
- memcpy(buf + this_header, body + offset, this_chunk);
-
- netpoll_send_udp(&nt->np, buf, this_header + this_chunk);
+ ",ncfrag=%d/%d;", offset,
+ body_len + userdata_len);
+
+ /* Not all body data has been written yet */
+ if (offset < body_len) {
+ this_chunk = min(body_len - offset,
+ MAX_PRINT_CHUNK - this_header);
+ if (WARN_ON_ONCE(this_chunk <= 0))
+ return;
+ memcpy(buf + this_header, body + offset, this_chunk);
+ this_offset += this_chunk;
+ }
+ /* Body is fully written and there is pending userdata to write,
+ * append userdata in this chunk
+ */
+ if (offset + this_offset >= body_len &&
+ offset + this_offset < userdata_len + body_len) {
+ int sent_userdata = (offset + this_offset) - body_len;
+ int preceding_bytes = this_chunk + this_header;
+
+ if (WARN_ON_ONCE(sent_userdata < 0))
+ return;
+
+ this_chunk = min(userdata_len - sent_userdata,
+ MAX_PRINT_CHUNK - preceding_bytes);
+ if (WARN_ON_ONCE(this_chunk <= 0))
+ return;
+ memcpy(buf + this_header + this_offset,
+ userdata + sent_userdata,
+ this_chunk);
+ this_offset += this_chunk;
+ }
- offset += this_chunk;
+ netpoll_send_udp(&nt->np, buf, this_header + this_offset);
+ offset += this_offset;
}
}
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index bcbc1e19edde..64c0cdd31bf8 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -129,7 +129,7 @@ static void nsim_bus_dev_release(struct device *dev)
complete(&nsim_bus_devs_released);
}
-static struct device_type nsim_bus_dev_type = {
+static const struct device_type nsim_bus_dev_type = {
.groups = nsim_bus_dev_attr_groups,
.release = nsim_bus_dev_release,
};
@@ -232,9 +232,154 @@ del_device_store(const struct bus_type *bus, const char *buf, size_t count)
}
static BUS_ATTR_WO(del_device);
+static ssize_t link_device_store(const struct bus_type *bus, const char *buf, size_t count)
+{
+ struct netdevsim *nsim_a, *nsim_b, *peer;
+ struct net_device *dev_a, *dev_b;
+ unsigned int ifidx_a, ifidx_b;
+ int netnsfd_a, netnsfd_b, err;
+ struct net *ns_a, *ns_b;
+
+ err = sscanf(buf, "%d:%u %d:%u", &netnsfd_a, &ifidx_a, &netnsfd_b,
+ &ifidx_b);
+ if (err != 4) {
+ pr_err("Format for linking two devices is \"netnsfd_a:ifidx_a netnsfd_b:ifidx_b\" (int uint int uint).\n");
+ return -EINVAL;
+ }
+
+ ns_a = get_net_ns_by_fd(netnsfd_a);
+ if (IS_ERR(ns_a)) {
+ pr_err("Could not find netns with fd: %d\n", netnsfd_a);
+ return -EINVAL;
+ }
+
+ ns_b = get_net_ns_by_fd(netnsfd_b);
+ if (IS_ERR(ns_b)) {
+ pr_err("Could not find netns with fd: %d\n", netnsfd_b);
+ put_net(ns_a);
+ return -EINVAL;
+ }
+
+ err = -EINVAL;
+ rtnl_lock();
+ dev_a = __dev_get_by_index(ns_a, ifidx_a);
+ if (!dev_a) {
+ pr_err("Could not find device with ifindex %u in netnsfd %d\n",
+ ifidx_a, netnsfd_a);
+ goto out_err;
+ }
+
+ if (!netdev_is_nsim(dev_a)) {
+ pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n",
+ ifidx_a, netnsfd_a);
+ goto out_err;
+ }
+
+ dev_b = __dev_get_by_index(ns_b, ifidx_b);
+ if (!dev_b) {
+ pr_err("Could not find device with ifindex %u in netnsfd %d\n",
+ ifidx_b, netnsfd_b);
+ goto out_err;
+ }
+
+ if (!netdev_is_nsim(dev_b)) {
+ pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n",
+ ifidx_b, netnsfd_b);
+ goto out_err;
+ }
+
+ if (dev_a == dev_b) {
+ pr_err("Cannot link a netdevsim to itself\n");
+ goto out_err;
+ }
+
+ err = -EBUSY;
+ nsim_a = netdev_priv(dev_a);
+ peer = rtnl_dereference(nsim_a->peer);
+ if (peer) {
+ pr_err("Netdevsim %d:%u is already linked\n", netnsfd_a,
+ ifidx_a);
+ goto out_err;
+ }
+
+ nsim_b = netdev_priv(dev_b);
+ peer = rtnl_dereference(nsim_b->peer);
+ if (peer) {
+ pr_err("Netdevsim %d:%u is already linked\n", netnsfd_b,
+ ifidx_b);
+ goto out_err;
+ }
+
+ err = 0;
+ rcu_assign_pointer(nsim_a->peer, nsim_b);
+ rcu_assign_pointer(nsim_b->peer, nsim_a);
+
+out_err:
+ put_net(ns_b);
+ put_net(ns_a);
+ rtnl_unlock();
+
+ return !err ? count : err;
+}
+static BUS_ATTR_WO(link_device);
+
+static ssize_t unlink_device_store(const struct bus_type *bus, const char *buf, size_t count)
+{
+ struct netdevsim *nsim, *peer;
+ struct net_device *dev;
+ unsigned int ifidx;
+ int netnsfd, err;
+ struct net *ns;
+
+ err = sscanf(buf, "%u:%u", &netnsfd, &ifidx);
+ if (err != 2) {
+ pr_err("Format for unlinking a device is \"netnsfd:ifidx\" (int uint).\n");
+ return -EINVAL;
+ }
+
+ ns = get_net_ns_by_fd(netnsfd);
+ if (IS_ERR(ns)) {
+ pr_err("Could not find netns with fd: %d\n", netnsfd);
+ return -EINVAL;
+ }
+
+ err = -EINVAL;
+ rtnl_lock();
+ dev = __dev_get_by_index(ns, ifidx);
+ if (!dev) {
+ pr_err("Could not find device with ifindex %u in netnsfd %d\n",
+ ifidx, netnsfd);
+ goto out_put_netns;
+ }
+
+ if (!netdev_is_nsim(dev)) {
+ pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n",
+ ifidx, netnsfd);
+ goto out_put_netns;
+ }
+
+ nsim = netdev_priv(dev);
+ peer = rtnl_dereference(nsim->peer);
+ if (!peer)
+ goto out_put_netns;
+
+ err = 0;
+ RCU_INIT_POINTER(nsim->peer, NULL);
+ RCU_INIT_POINTER(peer->peer, NULL);
+
+out_put_netns:
+ put_net(ns);
+ rtnl_unlock();
+
+ return !err ? count : err;
+}
+static BUS_ATTR_WO(unlink_device);
+
static struct attribute *nsim_bus_attrs[] = {
&bus_attr_new_device.attr,
&bus_attr_del_device.attr,
+ &bus_attr_link_device.attr,
+ &bus_attr_unlink_device.attr,
NULL
};
ATTRIBUTE_GROUPS(nsim_bus);
@@ -260,7 +405,7 @@ static int nsim_num_vf(struct device *dev)
return nsim_bus_dev->num_vfs;
}
-static struct bus_type nsim_bus = {
+static const struct bus_type nsim_bus = {
.name = DRV_NAME,
.dev_name = DRV_NAME,
.bus_groups = nsim_bus_groups,
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 77e8250282a5..8330bc0bcb7e 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -29,18 +29,35 @@
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ unsigned int len = skb->len;
+ struct netdevsim *peer_ns;
+ rcu_read_lock();
if (!nsim_ipsec_tx(ns, skb))
- goto out;
+ goto out_drop_free;
+ peer_ns = rcu_dereference(ns->peer);
+ if (!peer_ns)
+ goto out_drop_free;
+
+ skb_tx_timestamp(skb);
+ if (unlikely(dev_forward_skb(peer_ns->netdev, skb) == NET_RX_DROP))
+ goto out_drop_cnt;
+
+ rcu_read_unlock();
u64_stats_update_begin(&ns->syncp);
ns->tx_packets++;
- ns->tx_bytes += skb->len;
+ ns->tx_bytes += len;
u64_stats_update_end(&ns->syncp);
+ return NETDEV_TX_OK;
-out:
+out_drop_free:
dev_kfree_skb(skb);
-
+out_drop_cnt:
+ rcu_read_unlock();
+ u64_stats_update_begin(&ns->syncp);
+ ns->tx_dropped++;
+ u64_stats_update_end(&ns->syncp);
return NETDEV_TX_OK;
}
@@ -70,6 +87,7 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin(&ns->syncp);
stats->tx_bytes = ns->tx_bytes;
stats->tx_packets = ns->tx_packets;
+ stats->tx_dropped = ns->tx_dropped;
} while (u64_stats_fetch_retry(&ns->syncp, start));
}
@@ -265,6 +283,21 @@ nsim_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
+static int nsim_get_iflink(const struct net_device *dev)
+{
+ struct netdevsim *nsim, *peer;
+ int iflink;
+
+ nsim = netdev_priv(dev);
+
+ rcu_read_lock();
+ peer = rcu_dereference(nsim->peer);
+ iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0;
+ rcu_read_unlock();
+
+ return iflink;
+}
+
static const struct net_device_ops nsim_netdev_ops = {
.ndo_start_xmit = nsim_start_xmit,
.ndo_set_rx_mode = nsim_set_rx_mode,
@@ -282,6 +315,7 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_set_vf_rss_query_en = nsim_set_vf_rss_query_en,
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
+ .ndo_get_iflink = nsim_get_iflink,
.ndo_bpf = nsim_bpf,
};
@@ -302,7 +336,6 @@ static void nsim_setup(struct net_device *dev)
eth_hw_addr_random(dev);
dev->tx_queue_len = 0;
- dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE |
IFF_NO_QUEUE;
@@ -413,8 +446,13 @@ err_free_netdev:
void nsim_destroy(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
+ struct netdevsim *peer;
rtnl_lock();
+ peer = rtnl_dereference(ns->peer);
+ if (peer)
+ RCU_INIT_POINTER(peer->peer, NULL);
+ RCU_INIT_POINTER(ns->peer, NULL);
unregister_netdevice(dev);
if (nsim_dev_port_is_pf(ns->nsim_dev_port)) {
nsim_macsec_teardown(ns);
@@ -427,6 +465,11 @@ void nsim_destroy(struct netdevsim *ns)
free_netdev(dev);
}
+bool netdev_is_nsim(struct net_device *dev)
+{
+ return dev->netdev_ops == &nsim_netdev_ops;
+}
+
static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 028c825b86db..553c4b9b4f63 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -98,6 +98,7 @@ struct netdevsim {
u64 tx_packets;
u64 tx_bytes;
+ u64 tx_dropped;
struct u64_stats_sync syncp;
struct nsim_bus_dev *nsim_bus_dev;
@@ -125,11 +126,13 @@ struct netdevsim {
} udp_ports;
struct nsim_ethtool ethtool;
+ struct netdevsim __rcu *peer;
};
struct netdevsim *
nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port);
void nsim_destroy(struct netdevsim *ns);
+bool netdev_is_nsim(struct net_device *dev);
void nsim_ethtool_init(struct netdevsim *ns);
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 39171380ccf2..a4d2e76a8d58 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -145,7 +145,7 @@ static int netkit_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(nk->peer);
if (peer)
- iflink = peer->ifindex;
+ iflink = READ_ONCE(peer->ifindex);
rcu_read_unlock();
return iflink;
}
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 5e19a6839dea..e5a0987a263e 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -17,17 +17,6 @@ static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static int nlmon_dev_init(struct net_device *dev)
-{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- return dev->lstats == NULL ? -ENOMEM : 0;
-}
-
-static void nlmon_dev_uninit(struct net_device *dev)
-{
- free_percpu(dev->lstats);
-}
-
struct nlmon {
struct netlink_tap nt;
};
@@ -51,15 +40,7 @@ static int nlmon_close(struct net_device *dev)
static void
nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- u64 packets, bytes;
-
- dev_lstats_read(dev, &packets, &bytes);
-
- stats->rx_packets = packets;
- stats->tx_packets = 0;
-
- stats->rx_bytes = bytes;
- stats->tx_bytes = 0;
+ dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes);
}
static u32 always_on(struct net_device *dev)
@@ -72,8 +53,6 @@ static const struct ethtool_ops nlmon_ethtool_ops = {
};
static const struct net_device_ops nlmon_ops = {
- .ndo_init = nlmon_dev_init,
- .ndo_uninit = nlmon_dev_uninit,
.ndo_open = nlmon_open,
.ndo_stop = nlmon_close,
.ndo_start_xmit = nlmon_xmit,
@@ -92,6 +71,7 @@ static void nlmon_setup(struct net_device *dev)
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
NETIF_F_HIGHDMA | NETIF_F_LLTX;
dev->flags = IFF_NOARP;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
/* That's rather a softlimit here, which, of course,
* can be altered. Not a real MTU, but what is to be
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index dc3962b2aa6b..853b8c138718 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -398,4 +398,5 @@ void lynx_pcs_destroy(struct phylink_pcs *pcs)
}
EXPORT_SYMBOL(lynx_pcs_destroy);
+MODULE_DESCRIPTION("NXP Lynx PCS phylink library");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c
index 8501dd365279..4f63abe638c4 100644
--- a/drivers/net/pcs/pcs-mtk-lynxi.c
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
@@ -303,4 +303,5 @@ void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs)
}
EXPORT_SYMBOL(mtk_pcs_lynxi_destroy);
+MODULE_DESCRIPTION("MediaTek SGMII library for LynxI");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index d93f84fbb1fd..4bd66fdde367 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -183,7 +183,7 @@ static void miic_converter_enable(struct miic *miic, int port, int enable)
miic_reg_rmw(miic, MIIC_CONVRST, MIIC_CONVRST_PHYIF_RST(port), val);
}
-static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
+static int miic_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising, bool permit)
{
@@ -234,7 +234,7 @@ static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
return 0;
}
-static void miic_link_up(struct phylink_pcs *pcs, unsigned int mode,
+static void miic_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex)
{
struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
@@ -333,6 +333,7 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
miic_port->miic = miic;
miic_port->port = port - 1;
miic_port->pcs.ops = &miic_phylink_ops;
+ miic_port->pcs.neg_mode = true;
return &miic_port->pcs;
}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 31f0beba638a..31525fe9c32e 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -10,7 +10,7 @@
#include <linux/pcs/pcs-xpcs.h>
#include <linux/mdio.h>
#include <linux/phylink.h>
-#include <linux/workqueue.h>
+
#include "pcs-xpcs.h"
#define phylink_pcs_to_xpcs(pl_pcs) \
@@ -130,7 +130,6 @@ static const phy_interface_t xpcs_1000basex_interfaces[] = {
static const phy_interface_t xpcs_2500basex_interfaces[] = {
PHY_INTERFACE_MODE_2500BASEX,
- PHY_INTERFACE_MODE_MAX,
};
enum {
@@ -293,7 +292,7 @@ static int xpcs_soft_reset(struct dw_xpcs *xpcs,
dev = MDIO_MMD_VEND2;
break;
default:
- return -1;
+ return -EINVAL;
}
ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET);
@@ -614,14 +613,15 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
xpcs = phylink_pcs_to_xpcs(pcs);
compat = xpcs_find_compat(xpcs->id, state->interface);
+ if (!compat)
+ return -EINVAL;
/* Populate the supported link modes for this PHY interface type.
* FIXME: what about the port modes and autoneg bit? This masks
* all those away.
*/
- if (compat)
- for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
- set_bit(compat->supported[i], xpcs_supported);
+ for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
+ set_bit(compat->supported[i], xpcs_supported);
linkmode_and(supported, supported, xpcs_supported);
@@ -636,8 +636,7 @@ void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
const struct xpcs_compat *compat = &xpcs->id->compat[i];
for (j = 0; j < compat->num_interfaces; j++)
- if (compat->interface[j] < PHY_INTERFACE_MODE_MAX)
- __set_bit(compat->interface[j], interfaces);
+ __set_bit(compat->interface[j], interfaces);
}
}
EXPORT_SYMBOL_GPL(xpcs_get_interfaces);
@@ -891,7 +890,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
return ret;
break;
default:
- return -1;
+ return -EINVAL;
}
if (compat->pma_config) {
@@ -1456,4 +1455,5 @@ struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr,
}
EXPORT_SYMBOL_GPL(xpcs_create_mdiodev);
+MODULE_DESCRIPTION("Synopsys DesignWare XPCS library");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 9e2672800f0b..1df0595c5ba9 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -232,6 +232,7 @@ config MARVELL_10G_PHY
config MARVELL_88Q2XXX_PHY
tristate "Marvell 88Q2XXX PHY"
+ depends on HWMON || HWMON=n
help
Support for the Marvell 88Q2XXX 100/1000BASE-T1 Automotive Ethernet
PHYs.
@@ -335,12 +336,7 @@ config NCN26000_PHY
Currently supports the NCN26000 10BASE-T1S Industrial PHY
with MII interface.
-config AT803X_PHY
- tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs"
- depends on REGULATOR
- help
- Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
- QCA8337(Internal qca8k PHY) model
+source "drivers/net/phy/qcom/Kconfig"
config QSEMI_PHY
tristate "Quality Semiconductor PHYs"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 6097afd44392..197acfa0b412 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_ADIN_PHY) += adin.o
obj-$(CONFIG_ADIN1100_PHY) += adin1100.o
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia/
-obj-$(CONFIG_AT803X_PHY) += at803x.o
ifdef CONFIG_AX88796B_RUST_PHY
obj-$(CONFIG_AX88796B_PHY) += ax88796b_rust.o
else
@@ -91,6 +90,7 @@ endif
obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja.o
obj-$(CONFIG_NXP_CBTX_PHY) += nxp-cbtx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
+obj-y += qcom/
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
index 7619d6185801..85f910e2d4fb 100644
--- a/drivers/net/phy/adin1100.c
+++ b/drivers/net/phy/adin1100.c
@@ -18,6 +18,12 @@
#define PHY_ID_ADIN1110 0x0283bc91
#define PHY_ID_ADIN2111 0x0283bca1
+#define ADIN_PHY_SUBSYS_IRQ_MASK 0x0021
+#define ADIN_LINK_STAT_CHNG_IRQ_EN BIT(1)
+
+#define ADIN_PHY_SUBSYS_IRQ_STATUS 0x0011
+#define ADIN_LINK_STAT_CHNG BIT(1)
+
#define ADIN_FORCED_MODE 0x8000
#define ADIN_FORCED_MODE_EN BIT(0)
@@ -136,6 +142,53 @@ static int adin_config_aneg(struct phy_device *phydev)
return genphy_c45_config_aneg(phydev);
}
+static int adin_phy_ack_intr(struct phy_device *phydev)
+{
+ /* Clear pending interrupts */
+ int rc = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ ADIN_PHY_SUBSYS_IRQ_STATUS);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int adin_config_intr(struct phy_device *phydev)
+{
+ u16 irq_mask;
+ int ret;
+
+ ret = adin_phy_ack_intr(phydev);
+ if (ret)
+ return ret;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ irq_mask = ADIN_LINK_STAT_CHNG_IRQ_EN;
+ else
+ irq_mask = 0;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ ADIN_PHY_SUBSYS_IRQ_MASK,
+ ADIN_LINK_STAT_CHNG_IRQ_EN, irq_mask);
+}
+
+static irqreturn_t adin_phy_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ ADIN_PHY_SUBSYS_IRQ_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & ADIN_LINK_STAT_CHNG))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int adin_set_powerdown_mode(struct phy_device *phydev, bool en)
{
int ret;
@@ -275,6 +328,8 @@ static struct phy_driver adin_driver[] = {
.probe = adin_probe,
.config_aneg = adin_config_aneg,
.read_status = adin_read_status,
+ .config_intr = adin_config_intr,
+ .handle_interrupt = adin_phy_handle_interrupt,
.set_loopback = adin_set_loopback,
.suspend = adin_suspend,
.resume = adin_resume,
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index 97a2fafa15ca..71bfddb8f453 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -22,9 +22,13 @@
#define PHY_ID_AQR107 0x03a1b4e0
#define PHY_ID_AQCS109 0x03a1b5c2
#define PHY_ID_AQR405 0x03a1b4b0
+#define PHY_ID_AQR111 0x03a1b610
+#define PHY_ID_AQR111B0 0x03a1b612
#define PHY_ID_AQR112 0x03a1b662
#define PHY_ID_AQR412 0x03a1b712
+#define PHY_ID_AQR113 0x31c31c40
#define PHY_ID_AQR113C 0x31c31c12
+#define PHY_ID_AQR813 0x31c31cb2
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
@@ -727,6 +731,15 @@ static int aqr113c_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS,
+ MDIO_PMD_TXDIS_GLOBAL);
+ if (ret)
+ return ret;
+
+ ret = aqr107_wait_processor_intensive_op(phydev);
+ if (ret)
+ return ret;
+
return aqr107_fill_interface_modes(phydev);
}
@@ -746,6 +759,16 @@ static int aqr107_probe(struct phy_device *phydev)
return aqr_hwmon_probe(phydev);
}
+static int aqr111_config_init(struct phy_device *phydev)
+{
+ /* AQR111 reports supporting speed up to 10G,
+ * however only speeds up to 5G are supported.
+ */
+ phy_set_max_speed(phydev, SPEED_5000);
+
+ return aqr107_config_init(phydev);
+}
+
static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQ1202),
@@ -820,6 +843,44 @@ static struct phy_driver aqr_driver[] = {
.link_change_notify = aqr107_link_change_notify,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR111),
+ .name = "Aquantia AQR111",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr111_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0),
+ .name = "Aquantia AQR111B0",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr111_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
.name = "Aquantia AQR405",
.config_aneg = aqr_config_aneg,
@@ -864,6 +925,25 @@ static struct phy_driver aqr_driver[] = {
.link_change_notify = aqr107_link_change_notify,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR113),
+ .name = "Aquantia AQR113",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr113c_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
.name = "Aquantia AQR113C",
.probe = aqr107_probe,
@@ -882,6 +962,25 @@ static struct phy_driver aqr_driver[] = {
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR813),
+ .name = "Aquantia AQR813",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr107_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
};
module_phy_driver(aqr_driver);
@@ -894,9 +993,13 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR111) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR113) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR813) },
{ }
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
deleted file mode 100644
index a62442a55774..000000000000
--- a/drivers/net/phy/at803x.c
+++ /dev/null
@@ -1,2432 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * drivers/net/phy/at803x.c
- *
- * Driver for Qualcomm Atheros AR803x PHY
- *
- * Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
- */
-
-#include <linux/phy.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool_netlink.h>
-#include <linux/bitfield.h>
-#include <linux/regulator/of_regulator.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/consumer.h>
-#include <linux/of.h>
-#include <linux/phylink.h>
-#include <linux/sfp.h>
-#include <dt-bindings/net/qca-ar803x.h>
-
-#define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10
-#define AT803X_SFC_ASSERT_CRS BIT(11)
-#define AT803X_SFC_FORCE_LINK BIT(10)
-#define AT803X_SFC_MDI_CROSSOVER_MODE_M GENMASK(6, 5)
-#define AT803X_SFC_AUTOMATIC_CROSSOVER 0x3
-#define AT803X_SFC_MANUAL_MDIX 0x1
-#define AT803X_SFC_MANUAL_MDI 0x0
-#define AT803X_SFC_SQE_TEST BIT(2)
-#define AT803X_SFC_POLARITY_REVERSAL BIT(1)
-#define AT803X_SFC_DISABLE_JABBER BIT(0)
-
-#define AT803X_SPECIFIC_STATUS 0x11
-#define AT803X_SS_SPEED_MASK GENMASK(15, 14)
-#define AT803X_SS_SPEED_1000 2
-#define AT803X_SS_SPEED_100 1
-#define AT803X_SS_SPEED_10 0
-#define AT803X_SS_DUPLEX BIT(13)
-#define AT803X_SS_SPEED_DUPLEX_RESOLVED BIT(11)
-#define AT803X_SS_MDIX BIT(6)
-
-#define QCA808X_SS_SPEED_MASK GENMASK(9, 7)
-#define QCA808X_SS_SPEED_2500 4
-
-#define AT803X_INTR_ENABLE 0x12
-#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15)
-#define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14)
-#define AT803X_INTR_ENABLE_DUPLEX_CHANGED BIT(13)
-#define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12)
-#define AT803X_INTR_ENABLE_LINK_FAIL BIT(11)
-#define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10)
-#define AT803X_INTR_ENABLE_LINK_FAIL_BX BIT(8)
-#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX BIT(7)
-#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5)
-#define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1)
-#define AT803X_INTR_ENABLE_WOL BIT(0)
-
-#define AT803X_INTR_STATUS 0x13
-
-#define AT803X_SMART_SPEED 0x14
-#define AT803X_SMART_SPEED_ENABLE BIT(5)
-#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK GENMASK(4, 2)
-#define AT803X_SMART_SPEED_BYPASS_TIMER BIT(1)
-#define AT803X_CDT 0x16
-#define AT803X_CDT_MDI_PAIR_MASK GENMASK(9, 8)
-#define AT803X_CDT_ENABLE_TEST BIT(0)
-#define AT803X_CDT_STATUS 0x1c
-#define AT803X_CDT_STATUS_STAT_NORMAL 0
-#define AT803X_CDT_STATUS_STAT_SHORT 1
-#define AT803X_CDT_STATUS_STAT_OPEN 2
-#define AT803X_CDT_STATUS_STAT_FAIL 3
-#define AT803X_CDT_STATUS_STAT_MASK GENMASK(9, 8)
-#define AT803X_CDT_STATUS_DELTA_TIME_MASK GENMASK(7, 0)
-#define AT803X_LED_CONTROL 0x18
-
-#define AT803X_PHY_MMD3_WOL_CTRL 0x8012
-#define AT803X_WOL_EN BIT(5)
-#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
-#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
-#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
-#define AT803X_REG_CHIP_CONFIG 0x1f
-#define AT803X_BT_BX_REG_SEL 0x8000
-
-#define AT803X_DEBUG_ADDR 0x1D
-#define AT803X_DEBUG_DATA 0x1E
-
-#define AT803X_MODE_CFG_MASK 0x0F
-#define AT803X_MODE_CFG_BASET_RGMII 0x00
-#define AT803X_MODE_CFG_BASET_SGMII 0x01
-#define AT803X_MODE_CFG_BX1000_RGMII_50OHM 0x02
-#define AT803X_MODE_CFG_BX1000_RGMII_75OHM 0x03
-#define AT803X_MODE_CFG_BX1000_CONV_50OHM 0x04
-#define AT803X_MODE_CFG_BX1000_CONV_75OHM 0x05
-#define AT803X_MODE_CFG_FX100_RGMII_50OHM 0x06
-#define AT803X_MODE_CFG_FX100_CONV_50OHM 0x07
-#define AT803X_MODE_CFG_RGMII_AUTO_MDET 0x0B
-#define AT803X_MODE_CFG_FX100_RGMII_75OHM 0x0E
-#define AT803X_MODE_CFG_FX100_CONV_75OHM 0x0F
-
-#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
-#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
-
-#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00
-#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2)
-#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2)
-#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
-
-#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05
-#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
-
-#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
-#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
-#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
-#define AT803X_DEBUG_HIB_CTRL_PS_HIB_EN BIT(15)
-
-#define AT803X_DEBUG_REG_3C 0x3C
-
-#define AT803X_DEBUG_REG_GREEN 0x3D
-#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6)
-
-#define AT803X_DEBUG_REG_1F 0x1F
-#define AT803X_DEBUG_PLL_ON BIT(2)
-#define AT803X_DEBUG_RGMII_1V8 BIT(3)
-
-#define MDIO_AZ_DEBUG 0x800D
-
-/* AT803x supports either the XTAL input pad, an internal PLL or the
- * DSP as clock reference for the clock output pad. The XTAL reference
- * is only used for 25 MHz output, all other frequencies need the PLL.
- * The DSP as a clock reference is used in synchronous ethernet
- * applications.
- *
- * By default the PLL is only enabled if there is a link. Otherwise
- * the PHY will go into low power state and disabled the PLL. You can
- * set the PLL_ON bit (see debug register 0x1f) to keep the PLL always
- * enabled.
- */
-#define AT803X_MMD7_CLK25M 0x8016
-#define AT803X_CLK_OUT_MASK GENMASK(4, 2)
-#define AT803X_CLK_OUT_25MHZ_XTAL 0
-#define AT803X_CLK_OUT_25MHZ_DSP 1
-#define AT803X_CLK_OUT_50MHZ_PLL 2
-#define AT803X_CLK_OUT_50MHZ_DSP 3
-#define AT803X_CLK_OUT_62_5MHZ_PLL 4
-#define AT803X_CLK_OUT_62_5MHZ_DSP 5
-#define AT803X_CLK_OUT_125MHZ_PLL 6
-#define AT803X_CLK_OUT_125MHZ_DSP 7
-
-/* The AR8035 has another mask which is compatible with the AR8031/AR8033 mask
- * but doesn't support choosing between XTAL/PLL and DSP.
- */
-#define AT8035_CLK_OUT_MASK GENMASK(4, 3)
-
-#define AT803X_CLK_OUT_STRENGTH_MASK GENMASK(8, 7)
-#define AT803X_CLK_OUT_STRENGTH_FULL 0
-#define AT803X_CLK_OUT_STRENGTH_HALF 1
-#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
-
-#define AT803X_DEFAULT_DOWNSHIFT 5
-#define AT803X_MIN_DOWNSHIFT 2
-#define AT803X_MAX_DOWNSHIFT 9
-
-#define AT803X_MMD3_SMARTEEE_CTL1 0x805b
-#define AT803X_MMD3_SMARTEEE_CTL2 0x805c
-#define AT803X_MMD3_SMARTEEE_CTL3 0x805d
-#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN BIT(8)
-
-#define ATH9331_PHY_ID 0x004dd041
-#define ATH8030_PHY_ID 0x004dd076
-#define ATH8031_PHY_ID 0x004dd074
-#define ATH8032_PHY_ID 0x004dd023
-#define ATH8035_PHY_ID 0x004dd072
-#define AT8030_PHY_ID_MASK 0xffffffef
-
-#define QCA8081_PHY_ID 0x004dd101
-
-#define QCA8327_A_PHY_ID 0x004dd033
-#define QCA8327_B_PHY_ID 0x004dd034
-#define QCA8337_PHY_ID 0x004dd036
-#define QCA9561_PHY_ID 0x004dd042
-#define QCA8K_PHY_ID_MASK 0xffffffff
-
-#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
-
-#define AT803X_PAGE_FIBER 0
-#define AT803X_PAGE_COPPER 1
-
-/* don't turn off internal PLL */
-#define AT803X_KEEP_PLL_ENABLED BIT(0)
-#define AT803X_DISABLE_SMARTEEE BIT(1)
-
-/* disable hibernation mode */
-#define AT803X_DISABLE_HIBERNATION_MODE BIT(2)
-
-/* ADC threshold */
-#define QCA808X_PHY_DEBUG_ADC_THRESHOLD 0x2c80
-#define QCA808X_ADC_THRESHOLD_MASK GENMASK(7, 0)
-#define QCA808X_ADC_THRESHOLD_80MV 0
-#define QCA808X_ADC_THRESHOLD_100MV 0xf0
-#define QCA808X_ADC_THRESHOLD_200MV 0x0f
-#define QCA808X_ADC_THRESHOLD_300MV 0xff
-
-/* CLD control */
-#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7 0x8007
-#define QCA808X_8023AZ_AFE_CTRL_MASK GENMASK(8, 4)
-#define QCA808X_8023AZ_AFE_EN 0x90
-
-/* AZ control */
-#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL 0x8008
-#define QCA808X_MMD3_AZ_TRAINING_VAL 0x1c32
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB 0x8014
-#define QCA808X_MSE_THRESHOLD_20DB_VALUE 0x529
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB 0x800E
-#define QCA808X_MSE_THRESHOLD_17DB_VALUE 0x341
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB 0x801E
-#define QCA808X_MSE_THRESHOLD_27DB_VALUE 0x419
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB 0x8020
-#define QCA808X_MSE_THRESHOLD_28DB_VALUE 0x341
-
-#define QCA808X_PHY_MMD7_TOP_OPTION1 0x901c
-#define QCA808X_TOP_OPTION1_DATA 0x0
-
-#define QCA808X_PHY_MMD3_DEBUG_1 0xa100
-#define QCA808X_MMD3_DEBUG_1_VALUE 0x9203
-#define QCA808X_PHY_MMD3_DEBUG_2 0xa101
-#define QCA808X_MMD3_DEBUG_2_VALUE 0x48ad
-#define QCA808X_PHY_MMD3_DEBUG_3 0xa103
-#define QCA808X_MMD3_DEBUG_3_VALUE 0x1698
-#define QCA808X_PHY_MMD3_DEBUG_4 0xa105
-#define QCA808X_MMD3_DEBUG_4_VALUE 0x8001
-#define QCA808X_PHY_MMD3_DEBUG_5 0xa106
-#define QCA808X_MMD3_DEBUG_5_VALUE 0x1111
-#define QCA808X_PHY_MMD3_DEBUG_6 0xa011
-#define QCA808X_MMD3_DEBUG_6_VALUE 0x5f85
-
-/* master/slave seed config */
-#define QCA808X_PHY_DEBUG_LOCAL_SEED 9
-#define QCA808X_MASTER_SLAVE_SEED_ENABLE BIT(1)
-#define QCA808X_MASTER_SLAVE_SEED_CFG GENMASK(12, 2)
-#define QCA808X_MASTER_SLAVE_SEED_RANGE 0x32
-
-/* Hibernation yields lower power consumpiton in contrast with normal operation mode.
- * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s.
- */
-#define QCA808X_DBG_AN_TEST 0xb
-#define QCA808X_HIBERNATION_EN BIT(15)
-
-#define QCA808X_CDT_ENABLE_TEST BIT(15)
-#define QCA808X_CDT_INTER_CHECK_DIS BIT(13)
-#define QCA808X_CDT_STATUS BIT(11)
-#define QCA808X_CDT_LENGTH_UNIT BIT(10)
-
-#define QCA808X_MMD3_CDT_STATUS 0x8064
-#define QCA808X_MMD3_CDT_DIAG_PAIR_A 0x8065
-#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066
-#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067
-#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068
-#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT GENMASK(15, 8)
-#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT GENMASK(7, 0)
-
-#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12)
-#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8)
-#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4)
-#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0)
-
-#define QCA808X_CDT_STATUS_STAT_TYPE GENMASK(1, 0)
-#define QCA808X_CDT_STATUS_STAT_FAIL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
-#define QCA808X_CDT_STATUS_STAT_NORMAL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
-#define QCA808X_CDT_STATUS_STAT_SAME_OPEN FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
-#define QCA808X_CDT_STATUS_STAT_SAME_SHORT FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
-
-#define QCA808X_CDT_STATUS_STAT_MDI GENMASK(3, 2)
-#define QCA808X_CDT_STATUS_STAT_MDI1 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
-#define QCA808X_CDT_STATUS_STAT_MDI2 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
-#define QCA808X_CDT_STATUS_STAT_MDI3 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
-
-/* NORMAL are MDI with type set to 0 */
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI1
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI1)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI1)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI2
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI2)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI2)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI3
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI3)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI3)
-
-/* Added for reference of existence but should be handled by wait_for_completion already */
-#define QCA808X_CDT_STATUS_STAT_BUSY (BIT(1) | BIT(3))
-
-/* QCA808X 1G chip type */
-#define QCA808X_PHY_MMD7_CHIP_TYPE 0x901d
-#define QCA808X_PHY_CHIP_TYPE_1G BIT(0)
-
-#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL 0x9072
-#define QCA8081_PHY_FIFO_RSTN BIT(11)
-
-MODULE_DESCRIPTION("Qualcomm Atheros AR803x and QCA808X PHY driver");
-MODULE_AUTHOR("Matus Ujhelyi");
-MODULE_LICENSE("GPL");
-
-enum stat_access_type {
- PHY,
- MMD
-};
-
-struct at803x_hw_stat {
- const char *string;
- u8 reg;
- u32 mask;
- enum stat_access_type access_type;
-};
-
-static struct at803x_hw_stat qca83xx_hw_stats[] = {
- { "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
- { "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
- { "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
-};
-
-struct at803x_ss_mask {
- u16 speed_mask;
- u8 speed_shift;
-};
-
-struct at803x_priv {
- int flags;
- u16 clk_25m_reg;
- u16 clk_25m_mask;
- u8 smarteee_lpi_tw_1g;
- u8 smarteee_lpi_tw_100m;
- bool is_fiber;
- bool is_1000basex;
- struct regulator_dev *vddio_rdev;
- struct regulator_dev *vddh_rdev;
- u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
-};
-
-struct at803x_context {
- u16 bmcr;
- u16 advertise;
- u16 control1000;
- u16 int_enable;
- u16 smart_speed;
- u16 led_control;
-};
-
-static int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
-{
- int ret;
-
- ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
- if (ret < 0)
- return ret;
-
- return phy_write(phydev, AT803X_DEBUG_DATA, data);
-}
-
-static int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
-{
- int ret;
-
- ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
- if (ret < 0)
- return ret;
-
- return phy_read(phydev, AT803X_DEBUG_DATA);
-}
-
-static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
- u16 clear, u16 set)
-{
- u16 val;
- int ret;
-
- ret = at803x_debug_reg_read(phydev, reg);
- if (ret < 0)
- return ret;
-
- val = ret & 0xffff;
- val &= ~clear;
- val |= set;
-
- return phy_write(phydev, AT803X_DEBUG_DATA, val);
-}
-
-static int at803x_write_page(struct phy_device *phydev, int page)
-{
- int mask;
- int set;
-
- if (page == AT803X_PAGE_COPPER) {
- set = AT803X_BT_BX_REG_SEL;
- mask = 0;
- } else {
- set = 0;
- mask = AT803X_BT_BX_REG_SEL;
- }
-
- return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set);
-}
-
-static int at803x_read_page(struct phy_device *phydev)
-{
- int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG);
-
- if (ccr < 0)
- return ccr;
-
- if (ccr & AT803X_BT_BX_REG_SEL)
- return AT803X_PAGE_COPPER;
-
- return AT803X_PAGE_FIBER;
-}
-
-static int at803x_enable_rx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0,
- AT803X_DEBUG_RX_CLK_DLY_EN);
-}
-
-static int at803x_enable_tx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0,
- AT803X_DEBUG_TX_CLK_DLY_EN);
-}
-
-static int at803x_disable_rx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- AT803X_DEBUG_RX_CLK_DLY_EN, 0);
-}
-
-static int at803x_disable_tx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE,
- AT803X_DEBUG_TX_CLK_DLY_EN, 0);
-}
-
-/* save relevant PHY registers to private copy */
-static void at803x_context_save(struct phy_device *phydev,
- struct at803x_context *context)
-{
- context->bmcr = phy_read(phydev, MII_BMCR);
- context->advertise = phy_read(phydev, MII_ADVERTISE);
- context->control1000 = phy_read(phydev, MII_CTRL1000);
- context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
- context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
- context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
-}
-
-/* restore relevant PHY registers from private copy */
-static void at803x_context_restore(struct phy_device *phydev,
- const struct at803x_context *context)
-{
- phy_write(phydev, MII_BMCR, context->bmcr);
- phy_write(phydev, MII_ADVERTISE, context->advertise);
- phy_write(phydev, MII_CTRL1000, context->control1000);
- phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
- phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
- phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
-}
-
-static int at803x_set_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
-{
- int ret, irq_enabled;
-
- if (wol->wolopts & WAKE_MAGIC) {
- struct net_device *ndev = phydev->attached_dev;
- const u8 *mac;
- unsigned int i;
- static const unsigned int offsets[] = {
- AT803X_LOC_MAC_ADDR_32_47_OFFSET,
- AT803X_LOC_MAC_ADDR_16_31_OFFSET,
- AT803X_LOC_MAC_ADDR_0_15_OFFSET,
- };
-
- if (!ndev)
- return -ENODEV;
-
- mac = (const u8 *)ndev->dev_addr;
-
- if (!is_valid_ether_addr(mac))
- return -EINVAL;
-
- for (i = 0; i < 3; i++)
- phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
- mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
-
- /* Enable WOL interrupt */
- ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
- if (ret)
- return ret;
- } else {
- /* Disable WOL interrupt */
- ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
- if (ret)
- return ret;
- }
-
- /* Clear WOL status */
- ret = phy_read(phydev, AT803X_INTR_STATUS);
- if (ret < 0)
- return ret;
-
- /* Check if there are other interrupts except for WOL triggered when PHY is
- * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can
- * be passed up to the interrupt PIN.
- */
- irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
- if (irq_enabled < 0)
- return irq_enabled;
-
- irq_enabled &= ~AT803X_INTR_ENABLE_WOL;
- if (ret & irq_enabled && !phy_polling_mode(phydev))
- phy_trigger_machine(phydev);
-
- return 0;
-}
-
-static void at803x_get_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
-{
- int value;
-
- wol->supported = WAKE_MAGIC;
- wol->wolopts = 0;
-
- value = phy_read(phydev, AT803X_INTR_ENABLE);
- if (value < 0)
- return;
-
- if (value & AT803X_INTR_ENABLE_WOL)
- wol->wolopts |= WAKE_MAGIC;
-}
-
-static int qca83xx_get_sset_count(struct phy_device *phydev)
-{
- return ARRAY_SIZE(qca83xx_hw_stats);
-}
-
-static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
- strscpy(data + i * ETH_GSTRING_LEN,
- qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
- }
-}
-
-static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
-{
- struct at803x_hw_stat stat = qca83xx_hw_stats[i];
- struct at803x_priv *priv = phydev->priv;
- int val;
- u64 ret;
-
- if (stat.access_type == MMD)
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
- else
- val = phy_read(phydev, stat.reg);
-
- if (val < 0) {
- ret = U64_MAX;
- } else {
- val = val & stat.mask;
- priv->stats[i] += val;
- ret = priv->stats[i];
- }
-
- return ret;
-}
-
-static void qca83xx_get_stats(struct phy_device *phydev,
- struct ethtool_stats *stats, u64 *data)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
- data[i] = qca83xx_get_stat(phydev, i);
-}
-
-static int at803x_suspend(struct phy_device *phydev)
-{
- int value;
- int wol_enabled;
-
- value = phy_read(phydev, AT803X_INTR_ENABLE);
- wol_enabled = value & AT803X_INTR_ENABLE_WOL;
-
- if (wol_enabled)
- value = BMCR_ISOLATE;
- else
- value = BMCR_PDOWN;
-
- phy_modify(phydev, MII_BMCR, 0, value);
-
- return 0;
-}
-
-static int at803x_resume(struct phy_device *phydev)
-{
- return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
-}
-
-static int at803x_parse_dt(struct phy_device *phydev)
-{
- struct device_node *node = phydev->mdio.dev.of_node;
- struct at803x_priv *priv = phydev->priv;
- u32 freq, strength, tw;
- unsigned int sel;
- int ret;
-
- if (!IS_ENABLED(CONFIG_OF_MDIO))
- return 0;
-
- if (of_property_read_bool(node, "qca,disable-smarteee"))
- priv->flags |= AT803X_DISABLE_SMARTEEE;
-
- if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
- priv->flags |= AT803X_DISABLE_HIBERNATION_MODE;
-
- if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
- if (!tw || tw > 255) {
- phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
- return -EINVAL;
- }
- priv->smarteee_lpi_tw_1g = tw;
- }
-
- if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
- if (!tw || tw > 255) {
- phydev_err(phydev, "invalid qca,smarteee-tw-us-100m\n");
- return -EINVAL;
- }
- priv->smarteee_lpi_tw_100m = tw;
- }
-
- ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
- if (!ret) {
- switch (freq) {
- case 25000000:
- sel = AT803X_CLK_OUT_25MHZ_XTAL;
- break;
- case 50000000:
- sel = AT803X_CLK_OUT_50MHZ_PLL;
- break;
- case 62500000:
- sel = AT803X_CLK_OUT_62_5MHZ_PLL;
- break;
- case 125000000:
- sel = AT803X_CLK_OUT_125MHZ_PLL;
- break;
- default:
- phydev_err(phydev, "invalid qca,clk-out-frequency\n");
- return -EINVAL;
- }
-
- priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
- priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
- }
-
- ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
- if (!ret) {
- priv->clk_25m_mask |= AT803X_CLK_OUT_STRENGTH_MASK;
- switch (strength) {
- case AR803X_STRENGTH_FULL:
- priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_FULL;
- break;
- case AR803X_STRENGTH_HALF:
- priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_HALF;
- break;
- case AR803X_STRENGTH_QUARTER:
- priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_QUARTER;
- break;
- default:
- phydev_err(phydev, "invalid qca,clk-out-strength\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int at803x_probe(struct phy_device *phydev)
-{
- struct device *dev = &phydev->mdio.dev;
- struct at803x_priv *priv;
- int ret;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- phydev->priv = priv;
-
- ret = at803x_parse_dt(phydev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int at803x_get_features(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int err;
-
- err = genphy_read_abilities(phydev);
- if (err)
- return err;
-
- if (phydev->drv->phy_id != ATH8031_PHY_ID)
- return 0;
-
- /* AR8031/AR8033 have different status registers
- * for copper and fiber operation. However, the
- * extended status register is the same for both
- * operation modes.
- *
- * As a result of that, ESTATUS_1000_XFULL is set
- * to 1 even when operating in copper TP mode.
- *
- * Remove this mode from the supported link modes
- * when not operating in 1000BaseX mode.
- */
- if (!priv->is_1000basex)
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- phydev->supported);
-
- return 0;
-}
-
-static int at803x_smarteee_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- u16 mask = 0, val = 0;
- int ret;
-
- if (priv->flags & AT803X_DISABLE_SMARTEEE)
- return phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_MMD3_SMARTEEE_CTL3,
- AT803X_MMD3_SMARTEEE_CTL3_LPI_EN, 0);
-
- if (priv->smarteee_lpi_tw_1g) {
- mask |= 0xff00;
- val |= priv->smarteee_lpi_tw_1g << 8;
- }
- if (priv->smarteee_lpi_tw_100m) {
- mask |= 0x00ff;
- val |= priv->smarteee_lpi_tw_100m;
- }
- if (!mask)
- return 0;
-
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL1,
- mask, val);
- if (ret)
- return ret;
-
- return phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL3,
- AT803X_MMD3_SMARTEEE_CTL3_LPI_EN,
- AT803X_MMD3_SMARTEEE_CTL3_LPI_EN);
-}
-
-static int at803x_clk_out_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- if (!priv->clk_25m_mask)
- return 0;
-
- return phy_modify_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M,
- priv->clk_25m_mask, priv->clk_25m_reg);
-}
-
-static int at8031_pll_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- /* The default after hardware reset is PLL OFF. After a soft reset, the
- * values are retained.
- */
- if (priv->flags & AT803X_KEEP_PLL_ENABLED)
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- 0, AT803X_DEBUG_PLL_ON);
- else
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- AT803X_DEBUG_PLL_ON, 0);
-}
-
-static int at803x_hibernation_mode_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- /* The default after hardware reset is hibernation mode enabled. After
- * software reset, the value is retained.
- */
- if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
- return 0;
-
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
- AT803X_DEBUG_HIB_CTRL_PS_HIB_EN, 0);
-}
-
-static int at803x_config_init(struct phy_device *phydev)
-{
- int ret;
-
- /* The RX and TX delay default is:
- * after HW reset: RX delay enabled and TX delay disabled
- * after SW reset: RX delay enabled, while TX delay retains the
- * value before reset.
- */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- ret = at803x_enable_rx_delay(phydev);
- else
- ret = at803x_disable_rx_delay(phydev);
- if (ret < 0)
- return ret;
-
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- ret = at803x_enable_tx_delay(phydev);
- else
- ret = at803x_disable_tx_delay(phydev);
- if (ret < 0)
- return ret;
-
- ret = at803x_smarteee_config(phydev);
- if (ret < 0)
- return ret;
-
- ret = at803x_clk_out_config(phydev);
- if (ret < 0)
- return ret;
-
- ret = at803x_hibernation_mode_config(phydev);
- if (ret < 0)
- return ret;
-
- /* Ar803x extended next page bit is enabled by default. Cisco
- * multigig switches read this bit and attempt to negotiate 10Gbps
- * rates even if the next page bit is disabled. This is incorrect
- * behaviour but we still need to accommodate it. XNP is only needed
- * for 10Gbps support, so disable XNP.
- */
- return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
-}
-
-static int at803x_ack_interrupt(struct phy_device *phydev)
-{
- int err;
-
- err = phy_read(phydev, AT803X_INTR_STATUS);
-
- return (err < 0) ? err : 0;
-}
-
-static int at803x_config_intr(struct phy_device *phydev)
-{
- int err;
- int value;
-
- value = phy_read(phydev, AT803X_INTR_ENABLE);
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- /* Clear any pending interrupts */
- err = at803x_ack_interrupt(phydev);
- if (err)
- return err;
-
- value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
- value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
- value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
- value |= AT803X_INTR_ENABLE_LINK_FAIL;
- value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
-
- err = phy_write(phydev, AT803X_INTR_ENABLE, value);
- } else {
- err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
- if (err)
- return err;
-
- /* Clear any pending interrupts */
- err = at803x_ack_interrupt(phydev);
- }
-
- return err;
-}
-
-static irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
-{
- int irq_status, int_enabled;
-
- irq_status = phy_read(phydev, AT803X_INTR_STATUS);
- if (irq_status < 0) {
- phy_error(phydev);
- return IRQ_NONE;
- }
-
- /* Read the current enabled interrupts */
- int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
- if (int_enabled < 0) {
- phy_error(phydev);
- return IRQ_NONE;
- }
-
- /* See if this was one of our enabled interrupts */
- if (!(irq_status & int_enabled))
- return IRQ_NONE;
-
- phy_trigger_machine(phydev);
-
- return IRQ_HANDLED;
-}
-
-static void at803x_link_change_notify(struct phy_device *phydev)
-{
- /*
- * Conduct a hardware reset for AT8030 every time a link loss is
- * signalled. This is necessary to circumvent a hardware bug that
- * occurs when the cable is unplugged while TX packets are pending
- * in the FIFO. In such cases, the FIFO enters an error mode it
- * cannot recover from by software.
- */
- if (phydev->state == PHY_NOLINK && phydev->mdio.reset_gpio) {
- struct at803x_context context;
-
- at803x_context_save(phydev, &context);
-
- phy_device_reset(phydev, 1);
- usleep_range(1000, 2000);
- phy_device_reset(phydev, 0);
- usleep_range(1000, 2000);
-
- at803x_context_restore(phydev, &context);
-
- phydev_dbg(phydev, "%s(): phy was reset\n", __func__);
- }
-}
-
-static int at803x_read_specific_status(struct phy_device *phydev,
- struct at803x_ss_mask ss_mask)
-{
- int ss;
-
- /* Read the AT8035 PHY-Specific Status register, which indicates the
- * speed and duplex that the PHY is actually using, irrespective of
- * whether we are in autoneg mode or not.
- */
- ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
- if (ss < 0)
- return ss;
-
- if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
- int sfc, speed;
-
- sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
- if (sfc < 0)
- return sfc;
-
- speed = ss & ss_mask.speed_mask;
- speed >>= ss_mask.speed_shift;
-
- switch (speed) {
- case AT803X_SS_SPEED_10:
- phydev->speed = SPEED_10;
- break;
- case AT803X_SS_SPEED_100:
- phydev->speed = SPEED_100;
- break;
- case AT803X_SS_SPEED_1000:
- phydev->speed = SPEED_1000;
- break;
- case QCA808X_SS_SPEED_2500:
- phydev->speed = SPEED_2500;
- break;
- }
- if (ss & AT803X_SS_DUPLEX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- if (ss & AT803X_SS_MDIX)
- phydev->mdix = ETH_TP_MDI_X;
- else
- phydev->mdix = ETH_TP_MDI;
-
- switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
- case AT803X_SFC_MANUAL_MDI:
- phydev->mdix_ctrl = ETH_TP_MDI;
- break;
- case AT803X_SFC_MANUAL_MDIX:
- phydev->mdix_ctrl = ETH_TP_MDI_X;
- break;
- case AT803X_SFC_AUTOMATIC_CROSSOVER:
- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
- break;
- }
- }
-
- return 0;
-}
-
-static int at803x_read_status(struct phy_device *phydev)
-{
- struct at803x_ss_mask ss_mask = { 0 };
- int err, old_link = phydev->link;
-
- /* Update the link, but return if there was an error */
- err = genphy_update_link(phydev);
- if (err)
- return err;
-
- /* why bother the PHY if nothing can have changed */
- if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
- return 0;
-
- phydev->speed = SPEED_UNKNOWN;
- phydev->duplex = DUPLEX_UNKNOWN;
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
- err = genphy_read_lpa(phydev);
- if (err < 0)
- return err;
-
- ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
- ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
- err = at803x_read_specific_status(phydev, ss_mask);
- if (err < 0)
- return err;
-
- if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
- phy_resolve_aneg_pause(phydev);
-
- return 0;
-}
-
-static int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
-{
- u16 val;
-
- switch (ctrl) {
- case ETH_TP_MDI:
- val = AT803X_SFC_MANUAL_MDI;
- break;
- case ETH_TP_MDI_X:
- val = AT803X_SFC_MANUAL_MDIX;
- break;
- case ETH_TP_MDI_AUTO:
- val = AT803X_SFC_AUTOMATIC_CROSSOVER;
- break;
- default:
- return 0;
- }
-
- return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL,
- AT803X_SFC_MDI_CROSSOVER_MODE_M,
- FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
-}
-
-static int at803x_prepare_config_aneg(struct phy_device *phydev)
-{
- int ret;
-
- ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
- if (ret < 0)
- return ret;
-
- /* Changes of the midx bits are disruptive to the normal operation;
- * therefore any changes to these registers must be followed by a
- * software reset to take effect.
- */
- if (ret == 1) {
- ret = genphy_soft_reset(phydev);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int at803x_config_aneg(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int ret;
-
- ret = at803x_prepare_config_aneg(phydev);
- if (ret)
- return ret;
-
- if (priv->is_1000basex)
- return genphy_c37_config_aneg(phydev);
-
- return genphy_config_aneg(phydev);
-}
-
-static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
-{
- int val;
-
- val = phy_read(phydev, AT803X_SMART_SPEED);
- if (val < 0)
- return val;
-
- if (val & AT803X_SMART_SPEED_ENABLE)
- *d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
- else
- *d = DOWNSHIFT_DEV_DISABLE;
-
- return 0;
-}
-
-static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
-{
- u16 mask, set;
- int ret;
-
- switch (cnt) {
- case DOWNSHIFT_DEV_DEFAULT_COUNT:
- cnt = AT803X_DEFAULT_DOWNSHIFT;
- fallthrough;
- case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
- set = AT803X_SMART_SPEED_ENABLE |
- AT803X_SMART_SPEED_BYPASS_TIMER |
- FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
- mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
- break;
- case DOWNSHIFT_DEV_DISABLE:
- set = 0;
- mask = AT803X_SMART_SPEED_ENABLE |
- AT803X_SMART_SPEED_BYPASS_TIMER;
- break;
- default:
- return -EINVAL;
- }
-
- ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
-
- /* After changing the smart speed settings, we need to perform a
- * software reset, use phy_init_hw() to make sure we set the
- * reapply any values which might got lost during software reset.
- */
- if (ret == 1)
- ret = phy_init_hw(phydev);
-
- return ret;
-}
-
-static int at803x_get_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, void *data)
-{
- switch (tuna->id) {
- case ETHTOOL_PHY_DOWNSHIFT:
- return at803x_get_downshift(phydev, data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int at803x_set_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, const void *data)
-{
- switch (tuna->id) {
- case ETHTOOL_PHY_DOWNSHIFT:
- return at803x_set_downshift(phydev, *(const u8 *)data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int at803x_cable_test_result_trans(u16 status)
-{
- switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
- case AT803X_CDT_STATUS_STAT_NORMAL:
- return ETHTOOL_A_CABLE_RESULT_CODE_OK;
- case AT803X_CDT_STATUS_STAT_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
- case AT803X_CDT_STATUS_STAT_OPEN:
- return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
- case AT803X_CDT_STATUS_STAT_FAIL:
- default:
- return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
- }
-}
-
-static bool at803x_cdt_test_failed(u16 status)
-{
- return FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status) ==
- AT803X_CDT_STATUS_STAT_FAIL;
-}
-
-static bool at803x_cdt_fault_length_valid(u16 status)
-{
- switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
- case AT803X_CDT_STATUS_STAT_OPEN:
- case AT803X_CDT_STATUS_STAT_SHORT:
- return true;
- }
- return false;
-}
-
-static int at803x_cdt_fault_length(int dt)
-{
- /* According to the datasheet the distance to the fault is
- * DELTA_TIME * 0.824 meters.
- *
- * The author suspect the correct formula is:
- *
- * fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
- *
- * where c is the speed of light, VF is the velocity factor of
- * the twisted pair cable, 125MHz the counter frequency and
- * we need to divide by 2 because the hardware will measure the
- * round trip time to the fault and back to the PHY.
- *
- * With a VF of 0.69 we get the factor 0.824 mentioned in the
- * datasheet.
- */
- return (dt * 824) / 10;
-}
-
-static int at803x_cdt_start(struct phy_device *phydev,
- u32 cdt_start)
-{
- return phy_write(phydev, AT803X_CDT, cdt_start);
-}
-
-static int at803x_cdt_wait_for_completion(struct phy_device *phydev,
- u32 cdt_en)
-{
- int val, ret;
-
- /* One test run takes about 25ms */
- ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
- !(val & cdt_en),
- 30000, 100000, true);
-
- return ret < 0 ? ret : 0;
-}
-
-static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
-{
- static const int ethtool_pair[] = {
- ETHTOOL_A_CABLE_PAIR_A,
- ETHTOOL_A_CABLE_PAIR_B,
- ETHTOOL_A_CABLE_PAIR_C,
- ETHTOOL_A_CABLE_PAIR_D,
- };
- int ret, val;
-
- val = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
- AT803X_CDT_ENABLE_TEST;
- ret = at803x_cdt_start(phydev, val);
- if (ret)
- return ret;
-
- ret = at803x_cdt_wait_for_completion(phydev, AT803X_CDT_ENABLE_TEST);
- if (ret)
- return ret;
-
- val = phy_read(phydev, AT803X_CDT_STATUS);
- if (val < 0)
- return val;
-
- if (at803x_cdt_test_failed(val))
- return 0;
-
- ethnl_cable_test_result(phydev, ethtool_pair[pair],
- at803x_cable_test_result_trans(val));
-
- if (at803x_cdt_fault_length_valid(val)) {
- val = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, val);
- ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
- at803x_cdt_fault_length(val));
- }
-
- return 1;
-}
-
-static int at803x_cable_test_get_status(struct phy_device *phydev,
- bool *finished, unsigned long pair_mask)
-{
- int retries = 20;
- int pair, ret;
-
- *finished = false;
-
- /* According to the datasheet the CDT can be performed when
- * there is no link partner or when the link partner is
- * auto-negotiating. Starting the test will restart the AN
- * automatically. It seems that doing this repeatedly we will
- * get a slot where our link partner won't disturb our
- * measurement.
- */
- while (pair_mask && retries--) {
- for_each_set_bit(pair, &pair_mask, 4) {
- ret = at803x_cable_test_one_pair(phydev, pair);
- if (ret < 0)
- return ret;
- if (ret)
- clear_bit(pair, &pair_mask);
- }
- if (pair_mask)
- msleep(250);
- }
-
- *finished = true;
-
- return 0;
-}
-
-static void at803x_cable_test_autoneg(struct phy_device *phydev)
-{
- /* Enable auto-negotiation, but advertise no capabilities, no link
- * will be established. A restart of the auto-negotiation is not
- * required, because the cable test will automatically break the link.
- */
- phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
- phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
-}
-
-static int at803x_cable_test_start(struct phy_device *phydev)
-{
- at803x_cable_test_autoneg(phydev);
- /* we do all the (time consuming) work later */
- return 0;
-}
-
-static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
- unsigned int selector)
-{
- struct phy_device *phydev = rdev_get_drvdata(rdev);
-
- if (selector)
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- 0, AT803X_DEBUG_RGMII_1V8);
- else
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- AT803X_DEBUG_RGMII_1V8, 0);
-}
-
-static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct phy_device *phydev = rdev_get_drvdata(rdev);
- int val;
-
- val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
- if (val < 0)
- return val;
-
- return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
-}
-
-static const struct regulator_ops vddio_regulator_ops = {
- .list_voltage = regulator_list_voltage_table,
- .set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
- .get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
-};
-
-static const unsigned int vddio_voltage_table[] = {
- 1500000,
- 1800000,
-};
-
-static const struct regulator_desc vddio_desc = {
- .name = "vddio",
- .of_match = of_match_ptr("vddio-regulator"),
- .n_voltages = ARRAY_SIZE(vddio_voltage_table),
- .volt_table = vddio_voltage_table,
- .ops = &vddio_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
-};
-
-static const struct regulator_ops vddh_regulator_ops = {
-};
-
-static const struct regulator_desc vddh_desc = {
- .name = "vddh",
- .of_match = of_match_ptr("vddh-regulator"),
- .n_voltages = 1,
- .fixed_uV = 2500000,
- .ops = &vddh_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
-};
-
-static int at8031_register_regulators(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
- struct regulator_config config = { };
-
- config.dev = dev;
- config.driver_data = phydev;
-
- priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
- if (IS_ERR(priv->vddio_rdev)) {
- phydev_err(phydev, "failed to register VDDIO regulator\n");
- return PTR_ERR(priv->vddio_rdev);
- }
-
- priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
- if (IS_ERR(priv->vddh_rdev)) {
- phydev_err(phydev, "failed to register VDDH regulator\n");
- return PTR_ERR(priv->vddh_rdev);
- }
-
- return 0;
-}
-
-static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
-{
- struct phy_device *phydev = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
- DECLARE_PHY_INTERFACE_MASK(interfaces);
- phy_interface_t iface;
-
- linkmode_zero(phy_support);
- phylink_set(phy_support, 1000baseX_Full);
- phylink_set(phy_support, 1000baseT_Full);
- phylink_set(phy_support, Autoneg);
- phylink_set(phy_support, Pause);
- phylink_set(phy_support, Asym_Pause);
-
- linkmode_zero(sfp_support);
- sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
- /* Some modules support 10G modes as well as others we support.
- * Mask out non-supported modes so the correct interface is picked.
- */
- linkmode_and(sfp_support, phy_support, sfp_support);
-
- if (linkmode_empty(sfp_support)) {
- dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
- return -EINVAL;
- }
-
- iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
-
- /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
- * interface for use with SFP modules.
- * However, some copper modules detected as having a preferred SGMII
- * interface do default to and function in 1000Base-X mode, so just
- * print a warning and allow such modules, as they may have some chance
- * of working.
- */
- if (iface == PHY_INTERFACE_MODE_SGMII)
- dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
- else if (iface != PHY_INTERFACE_MODE_1000BASEX)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct sfp_upstream_ops at8031_sfp_ops = {
- .attach = phy_sfp_attach,
- .detach = phy_sfp_detach,
- .module_insert = at8031_sfp_insert,
-};
-
-static int at8031_parse_dt(struct phy_device *phydev)
-{
- struct device_node *node = phydev->mdio.dev.of_node;
- struct at803x_priv *priv = phydev->priv;
- int ret;
-
- if (of_property_read_bool(node, "qca,keep-pll-enabled"))
- priv->flags |= AT803X_KEEP_PLL_ENABLED;
-
- ret = at8031_register_regulators(phydev);
- if (ret < 0)
- return ret;
-
- ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
- "vddio");
- if (ret) {
- phydev_err(phydev, "failed to get VDDIO regulator\n");
- return ret;
- }
-
- /* Only AR8031/8033 support 1000Base-X for SFP modules */
- return phy_sfp_probe(phydev, &at8031_sfp_ops);
-}
-
-static int at8031_probe(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int mode_cfg;
- int ccr;
- int ret;
-
- ret = at803x_probe(phydev);
- if (ret)
- return ret;
-
- /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
- * options.
- */
- ret = at8031_parse_dt(phydev);
- if (ret)
- return ret;
-
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- if (ccr < 0)
- return ccr;
- mode_cfg = ccr & AT803X_MODE_CFG_MASK;
-
- switch (mode_cfg) {
- case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
- case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
- priv->is_1000basex = true;
- fallthrough;
- case AT803X_MODE_CFG_FX100_RGMII_50OHM:
- case AT803X_MODE_CFG_FX100_RGMII_75OHM:
- priv->is_fiber = true;
- break;
- }
-
- /* Disable WoL in 1588 register which is enabled
- * by default
- */
- return phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
-}
-
-static int at8031_config_init(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int ret;
-
- /* Some bootloaders leave the fiber page selected.
- * Switch to the appropriate page (fiber or copper), as otherwise we
- * read the PHY capabilities from the wrong page.
- */
- phy_lock_mdio_bus(phydev);
- ret = at803x_write_page(phydev,
- priv->is_fiber ? AT803X_PAGE_FIBER :
- AT803X_PAGE_COPPER);
- phy_unlock_mdio_bus(phydev);
- if (ret)
- return ret;
-
- ret = at8031_pll_config(phydev);
- if (ret < 0)
- return ret;
-
- return at803x_config_init(phydev);
-}
-
-static int at8031_set_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
-{
- int ret;
-
- /* First setup MAC address and enable WOL interrupt */
- ret = at803x_set_wol(phydev, wol);
- if (ret)
- return ret;
-
- if (wol->wolopts & WAKE_MAGIC)
- /* Enable WOL function for 1588 */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- 0, AT803X_WOL_EN);
- else
- /* Disable WoL function for 1588 */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
-
- return ret;
-}
-
-static int at8031_config_intr(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int err, value = 0;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED &&
- priv->is_fiber) {
- /* Clear any pending interrupts */
- err = at803x_ack_interrupt(phydev);
- if (err)
- return err;
-
- value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
- value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
-
- err = phy_set_bits(phydev, AT803X_INTR_ENABLE, value);
- if (err)
- return err;
- }
-
- return at803x_config_intr(phydev);
-}
-
-/* AR8031 and AR8033 share the same read status logic */
-static int at8031_read_status(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- if (priv->is_1000basex)
- return genphy_c37_read_status(phydev);
-
- return at803x_read_status(phydev);
-}
-
-/* AR8031 and AR8035 share the same cable test get status reg */
-static int at8031_cable_test_get_status(struct phy_device *phydev,
- bool *finished)
-{
- return at803x_cable_test_get_status(phydev, finished, 0xf);
-}
-
-/* AR8031 and AR8035 share the same cable test start logic */
-static int at8031_cable_test_start(struct phy_device *phydev)
-{
- at803x_cable_test_autoneg(phydev);
- phy_write(phydev, MII_CTRL1000, 0);
- /* we do all the (time consuming) work later */
- return 0;
-}
-
-/* AR8032, AR9331 and QCA9561 share the same cable test get status reg */
-static int at8032_cable_test_get_status(struct phy_device *phydev,
- bool *finished)
-{
- return at803x_cable_test_get_status(phydev, finished, 0x3);
-}
-
-static int at8035_parse_dt(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- /* Mask is set by the generic at803x_parse_dt
- * if property is set. Assume property is set
- * with the mask not zero.
- */
- if (priv->clk_25m_mask) {
- /* Fixup for the AR8030/AR8035. This chip has another mask and
- * doesn't support the DSP reference. Eg. the lowest bit of the
- * mask. The upper two bits select the same frequencies. Mask
- * the lowest bit here.
- *
- * Warning:
- * There was no datasheet for the AR8030 available so this is
- * just a guess. But the AR8035 is listed as pin compatible
- * to the AR8030 so there might be a good chance it works on
- * the AR8030 too.
- */
- priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
- priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
- }
-
- return 0;
-}
-
-/* AR8030 and AR8035 shared the same special mask for clk_25m */
-static int at8035_probe(struct phy_device *phydev)
-{
- int ret;
-
- ret = at803x_probe(phydev);
- if (ret)
- return ret;
-
- return at8035_parse_dt(phydev);
-}
-
-static int qca83xx_config_init(struct phy_device *phydev)
-{
- u8 switch_revision;
-
- switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
-
- switch (switch_revision) {
- case 1:
- /* For 100M waveform */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
- /* Turn on Gigabit clock */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
- break;
-
- case 2:
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
- fallthrough;
- case 4:
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
- break;
- }
-
- /* Following original QCA sourcecode set port to prefer master */
- phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
-
- return 0;
-}
-
-static int qca8327_config_init(struct phy_device *phydev)
-{
- /* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
- * Disable on init and enable only with 100m speed following
- * qca original source code.
- */
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN, 0);
-
- return qca83xx_config_init(phydev);
-}
-
-static void qca83xx_link_change_notify(struct phy_device *phydev)
-{
- /* Set DAC Amplitude adjustment to +6% for 100m on link running */
- if (phydev->state == PHY_RUNNING) {
- if (phydev->speed == SPEED_100)
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN,
- QCA8327_DEBUG_MANU_CTRL_EN);
- } else {
- /* Reset DAC Amplitude adjustment */
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN, 0);
- }
-}
-
-static int qca83xx_resume(struct phy_device *phydev)
-{
- int ret, val;
-
- /* Skip reset if not suspended */
- if (!phydev->suspended)
- return 0;
-
- /* Reinit the port, reset values set by suspend */
- qca83xx_config_init(phydev);
-
- /* Reset the port on port resume */
- phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
-
- /* On resume from suspend the switch execute a reset and
- * restart auto-negotiation. Wait for reset to complete.
- */
- ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
- 50000, 600000, true);
- if (ret)
- return ret;
-
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-static int qca83xx_suspend(struct phy_device *phydev)
-{
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
- AT803X_DEBUG_GATE_CLK_IN1000, 0);
-
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
- AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
- AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
-
- return 0;
-}
-
-static int qca8337_suspend(struct phy_device *phydev)
-{
- /* Only QCA8337 support actual suspend. */
- genphy_suspend(phydev);
-
- return qca83xx_suspend(phydev);
-}
-
-static int qca8327_suspend(struct phy_device *phydev)
-{
- u16 mask = 0;
-
- /* QCA8327 cause port unreliability when phy suspend
- * is set.
- */
- mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
- phy_modify(phydev, MII_BMCR, mask, 0);
-
- return qca83xx_suspend(phydev);
-}
-
-static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
-{
- int ret;
-
- /* Enable fast retrain */
- ret = genphy_c45_fast_retrain(phydev, true);
- if (ret)
- return ret;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
- QCA808X_TOP_OPTION1_DATA);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
- QCA808X_MSE_THRESHOLD_20DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
- QCA808X_MSE_THRESHOLD_17DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
- QCA808X_MSE_THRESHOLD_27DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
- QCA808X_MSE_THRESHOLD_28DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
- QCA808X_MMD3_DEBUG_1_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
- QCA808X_MMD3_DEBUG_4_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
- QCA808X_MMD3_DEBUG_5_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
- QCA808X_MMD3_DEBUG_3_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
- QCA808X_MMD3_DEBUG_6_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
- QCA808X_MMD3_DEBUG_2_VALUE);
-
- return 0;
-}
-
-static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
-{
- u16 seed_value;
-
- if (!enable)
- return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
- QCA808X_MASTER_SLAVE_SEED_ENABLE, 0);
-
- seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
- return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
- QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE,
- FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) |
- QCA808X_MASTER_SLAVE_SEED_ENABLE);
-}
-
-static bool qca808x_is_prefer_master(struct phy_device *phydev)
-{
- return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) ||
- (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED);
-}
-
-static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
-{
- return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
-}
-
-static int qca808x_config_init(struct phy_device *phydev)
-{
- int ret;
-
- /* Active adc&vga on 802.3az for the link 1000M and 100M */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
- QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
- if (ret)
- return ret;
-
- /* Adjust the threshold on 802.3az for the link 1000M */
- ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
- QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
- QCA808X_MMD3_AZ_TRAINING_VAL);
- if (ret)
- return ret;
-
- if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
- /* Config the fast retrain for the link 2500M */
- ret = qca808x_phy_fast_retrain_config(phydev);
- if (ret)
- return ret;
-
- ret = genphy_read_master_slave(phydev);
- if (ret < 0)
- return ret;
-
- if (!qca808x_is_prefer_master(phydev)) {
- /* Enable seed and configure lower ramdom seed to make phy
- * linked as slave mode.
- */
- ret = qca808x_phy_ms_seed_enable(phydev, true);
- if (ret)
- return ret;
- }
- }
-
- /* Configure adc threshold as 100mv for the link 10M */
- return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
- QCA808X_ADC_THRESHOLD_MASK,
- QCA808X_ADC_THRESHOLD_100MV);
-}
-
-static int qca808x_read_status(struct phy_device *phydev)
-{
- struct at803x_ss_mask ss_mask = { 0 };
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
- if (ret < 0)
- return ret;
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
- ret & MDIO_AN_10GBT_STAT_LP2_5G);
-
- ret = genphy_read_status(phydev);
- if (ret)
- return ret;
-
- /* qca8081 takes the different bits for speed value from at803x */
- ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
- ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
- ret = at803x_read_specific_status(phydev, ss_mask);
- if (ret < 0)
- return ret;
-
- if (phydev->link) {
- if (phydev->speed == SPEED_2500)
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
- else
- phydev->interface = PHY_INTERFACE_MODE_SGMII;
- } else {
- /* generate seed as a lower random value to make PHY linked as SLAVE easily,
- * except for master/slave configuration fault detected or the master mode
- * preferred.
- *
- * the reason for not putting this code into the function link_change_notify is
- * the corner case where the link partner is also the qca8081 PHY and the seed
- * value is configured as the same value, the link can't be up and no link change
- * occurs.
- */
- if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
- qca808x_is_prefer_master(phydev)) {
- qca808x_phy_ms_seed_enable(phydev, false);
- } else {
- qca808x_phy_ms_seed_enable(phydev, true);
- }
- }
- }
-
- return 0;
-}
-
-static int qca808x_soft_reset(struct phy_device *phydev)
-{
- int ret;
-
- ret = genphy_soft_reset(phydev);
- if (ret < 0)
- return ret;
-
- if (qca808x_has_fast_retrain_or_slave_seed(phydev))
- ret = qca808x_phy_ms_seed_enable(phydev, true);
-
- return ret;
-}
-
-static bool qca808x_cdt_fault_length_valid(int cdt_code)
-{
- switch (cdt_code) {
- case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
- return true;
- default:
- return false;
- }
-}
-
-static int qca808x_cable_test_result_trans(int cdt_code)
-{
- switch (cdt_code) {
- case QCA808X_CDT_STATUS_STAT_NORMAL:
- return ETHTOOL_A_CABLE_RESULT_CODE_OK;
- case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
- case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
- return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
- case QCA808X_CDT_STATUS_STAT_FAIL:
- default:
- return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
- }
-}
-
-static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
- int result)
-{
- int val;
- u32 cdt_length_reg = 0;
-
- switch (pair) {
- case ETHTOOL_A_CABLE_PAIR_A:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
- break;
- case ETHTOOL_A_CABLE_PAIR_B:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
- break;
- case ETHTOOL_A_CABLE_PAIR_C:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
- break;
- case ETHTOOL_A_CABLE_PAIR_D:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
- break;
- default:
- return -EINVAL;
- }
-
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
- if (val < 0)
- return val;
-
- if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
- val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
- else
- val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
-
- return at803x_cdt_fault_length(val);
-}
-
-static int qca808x_cable_test_start(struct phy_device *phydev)
-{
- int ret;
-
- /* perform CDT with the following configs:
- * 1. disable hibernation.
- * 2. force PHY working in MDI mode.
- * 3. for PHY working in 1000BaseT.
- * 4. configure the threshold.
- */
-
- ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0);
- if (ret < 0)
- return ret;
-
- ret = at803x_config_mdix(phydev, ETH_TP_MDI);
- if (ret < 0)
- return ret;
-
- /* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */
- phydev->duplex = DUPLEX_FULL;
- phydev->speed = SPEED_1000;
- ret = genphy_c45_pma_setup_forced(phydev);
- if (ret < 0)
- return ret;
-
- ret = genphy_setup_forced(phydev);
- if (ret < 0)
- return ret;
-
- /* configure the thresholds for open, short, pair ok test */
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
-
- return 0;
-}
-
-static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
- u16 status)
-{
- int length, result;
- u16 pair_code;
-
- switch (pair) {
- case ETHTOOL_A_CABLE_PAIR_A:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_B:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_C:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_D:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
- break;
- default:
- return -EINVAL;
- }
-
- result = qca808x_cable_test_result_trans(pair_code);
- ethnl_cable_test_result(phydev, pair, result);
-
- if (qca808x_cdt_fault_length_valid(pair_code)) {
- length = qca808x_cdt_fault_length(phydev, pair, result);
- ethnl_cable_test_fault_length(phydev, pair, length);
- }
-
- return 0;
-}
-
-static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
-{
- int ret, val;
-
- *finished = false;
-
- val = QCA808X_CDT_ENABLE_TEST |
- QCA808X_CDT_LENGTH_UNIT;
- ret = at803x_cdt_start(phydev, val);
- if (ret)
- return ret;
-
- ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
- if (ret)
- return ret;
-
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
- if (val < 0)
- return val;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
- if (ret)
- return ret;
-
- *finished = true;
-
- return 0;
-}
-
-static int qca808x_get_features(struct phy_device *phydev)
-{
- int ret;
-
- ret = genphy_c45_pma_read_abilities(phydev);
- if (ret)
- return ret;
-
- /* The autoneg ability is not existed in bit3 of MMD7.1,
- * but it is supported by qca808x PHY, so we add it here
- * manually.
- */
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
-
- /* As for the qca8081 1G version chip, the 2500baseT ability is also
- * existed in the bit0 of MMD1.21, we need to remove it manually if
- * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d.
- */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE);
- if (ret < 0)
- return ret;
-
- if (QCA808X_PHY_CHIP_TYPE_1G & ret)
- linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
-
- return 0;
-}
-
-static int qca808x_config_aneg(struct phy_device *phydev)
-{
- int phy_ctrl = 0;
- int ret;
-
- ret = at803x_prepare_config_aneg(phydev);
- if (ret)
- return ret;
-
- /* The reg MII_BMCR also needs to be configured for force mode, the
- * genphy_config_aneg is also needed.
- */
- if (phydev->autoneg == AUTONEG_DISABLE)
- genphy_c45_pma_setup_forced(phydev);
-
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
- phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
-
- ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
- if (ret < 0)
- return ret;
-
- return __genphy_config_aneg(phydev, ret);
-}
-
-static void qca808x_link_change_notify(struct phy_device *phydev)
-{
- /* Assert interface sgmii fifo on link down, deassert it on link up,
- * the interface device address is always phy address added by 1.
- */
- mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
- MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
- QCA8081_PHY_FIFO_RSTN,
- phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
-}
-
-static struct phy_driver at803x_driver[] = {
-{
- /* Qualcomm Atheros AR8035 */
- PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
- .name = "Qualcomm Atheros AR8035",
- .flags = PHY_POLL_CABLE_TEST,
- .probe = at8035_probe,
- .config_aneg = at803x_config_aneg,
- .config_init = at803x_config_init,
- .soft_reset = genphy_soft_reset,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- /* PHY_GBIT_FEATURES */
- .read_status = at803x_read_status,
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .get_tunable = at803x_get_tunable,
- .set_tunable = at803x_set_tunable,
- .cable_test_start = at8031_cable_test_start,
- .cable_test_get_status = at8031_cable_test_get_status,
-}, {
- /* Qualcomm Atheros AR8030 */
- .phy_id = ATH8030_PHY_ID,
- .name = "Qualcomm Atheros AR8030",
- .phy_id_mask = AT8030_PHY_ID_MASK,
- .probe = at8035_probe,
- .config_init = at803x_config_init,
- .link_change_notify = at803x_link_change_notify,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
-}, {
- /* Qualcomm Atheros AR8031/AR8033 */
- PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
- .name = "Qualcomm Atheros AR8031/AR8033",
- .flags = PHY_POLL_CABLE_TEST,
- .probe = at8031_probe,
- .config_init = at8031_config_init,
- .config_aneg = at803x_config_aneg,
- .soft_reset = genphy_soft_reset,
- .set_wol = at8031_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .read_page = at803x_read_page,
- .write_page = at803x_write_page,
- .get_features = at803x_get_features,
- .read_status = at8031_read_status,
- .config_intr = at8031_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .get_tunable = at803x_get_tunable,
- .set_tunable = at803x_set_tunable,
- .cable_test_start = at8031_cable_test_start,
- .cable_test_get_status = at8031_cable_test_get_status,
-}, {
- /* Qualcomm Atheros AR8032 */
- PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
- .name = "Qualcomm Atheros AR8032",
- .probe = at803x_probe,
- .flags = PHY_POLL_CABLE_TEST,
- .config_init = at803x_config_init,
- .link_change_notify = at803x_link_change_notify,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at8032_cable_test_get_status,
-}, {
- /* ATHEROS AR9331 */
- PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
- .name = "Qualcomm Atheros AR9331 built-in PHY",
- .probe = at803x_probe,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .flags = PHY_POLL_CABLE_TEST,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at8032_cable_test_get_status,
- .read_status = at803x_read_status,
- .soft_reset = genphy_soft_reset,
- .config_aneg = at803x_config_aneg,
-}, {
- /* Qualcomm Atheros QCA9561 */
- PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
- .name = "Qualcomm Atheros QCA9561 built-in PHY",
- .probe = at803x_probe,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .flags = PHY_POLL_CABLE_TEST,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at8032_cable_test_get_status,
- .read_status = at803x_read_status,
- .soft_reset = genphy_soft_reset,
- .config_aneg = at803x_config_aneg,
-}, {
- /* QCA8337 */
- .phy_id = QCA8337_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8337 internal PHY",
- /* PHY_GBIT_FEATURES */
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca83xx_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8337_suspend,
- .resume = qca83xx_resume,
-}, {
- /* QCA8327-A from switch QCA8327-AL1A */
- .phy_id = QCA8327_A_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8327-A internal PHY",
- /* PHY_GBIT_FEATURES */
- .link_change_notify = qca83xx_link_change_notify,
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca8327_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8327_suspend,
- .resume = qca83xx_resume,
-}, {
- /* QCA8327-B from switch QCA8327-BL1A */
- .phy_id = QCA8327_B_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8327-B internal PHY",
- /* PHY_GBIT_FEATURES */
- .link_change_notify = qca83xx_link_change_notify,
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca8327_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8327_suspend,
- .resume = qca83xx_resume,
-}, {
- /* Qualcomm QCA8081 */
- PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
- .name = "Qualcomm QCA8081",
- .flags = PHY_POLL_CABLE_TEST,
- .probe = at803x_probe,
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .get_tunable = at803x_get_tunable,
- .set_tunable = at803x_set_tunable,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .get_features = qca808x_get_features,
- .config_aneg = qca808x_config_aneg,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
- .read_status = qca808x_read_status,
- .config_init = qca808x_config_init,
- .soft_reset = qca808x_soft_reset,
- .cable_test_start = qca808x_cable_test_start,
- .cable_test_get_status = qca808x_cable_test_get_status,
- .link_change_notify = qca808x_link_change_notify,
-}, };
-
-module_phy_driver(at803x_driver);
-
-static struct mdio_device_id __maybe_unused atheros_tbl[] = {
- { ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
- { PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
- { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
- { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
- { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
- { }
-};
-
-MODULE_DEVICE_TABLE(mdio, atheros_tbl);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 312a8bb35d78..370e4ed45098 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -665,10 +665,11 @@ static int bcm54616s_config_aneg(struct phy_device *phydev)
static int bcm54616s_read_status(struct phy_device *phydev)
{
struct bcm54616s_phy_priv *priv = phydev->priv;
+ bool changed;
int err;
if (priv->mode_1000bx_en)
- err = genphy_c37_read_status(phydev);
+ err = genphy_c37_read_status(phydev, &changed);
else
err = genphy_read_status(phydev);
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index b7cb71817780..c3426a17e6d0 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
+#include <linux/bitfield.h>
#define DP83822_PHY_ID 0x2000a240
#define DP83825S_PHY_ID 0x2000a140
@@ -34,6 +35,10 @@
#define MII_DP83822_GENCFG 0x465
#define MII_DP83822_SOR1 0x467
+/* DP83826 specific registers */
+#define MII_DP83826_VOD_CFG1 0x30b
+#define MII_DP83826_VOD_CFG2 0x30c
+
/* GENCFG */
#define DP83822_SIG_DET_LOW BIT(0)
@@ -95,6 +100,8 @@
#define DP83822_WOL_CLR_INDICATION BIT(11)
/* RCSR bits */
+#define DP83822_RMII_MODE_EN BIT(5)
+#define DP83822_RMII_MODE_SEL BIT(7)
#define DP83822_RGMII_MODE_EN BIT(9)
#define DP83822_RX_CLK_SHIFT BIT(12)
#define DP83822_TX_CLK_SHIFT BIT(11)
@@ -110,6 +117,19 @@
#define DP83822_RX_ER_STR_MASK GENMASK(9, 8)
#define DP83822_RX_ER_SHIFT 8
+/* DP83826: VOD_CFG1 & VOD_CFG2 */
+#define DP83826_VOD_CFG1_MINUS_MDIX_MASK GENMASK(13, 12)
+#define DP83826_VOD_CFG1_MINUS_MDI_MASK GENMASK(11, 6)
+#define DP83826_VOD_CFG2_MINUS_MDIX_MASK GENMASK(15, 12)
+#define DP83826_VOD_CFG2_PLUS_MDIX_MASK GENMASK(11, 6)
+#define DP83826_VOD_CFG2_PLUS_MDI_MASK GENMASK(5, 0)
+#define DP83826_CFG_DAC_MINUS_MDIX_5_TO_4 GENMASK(5, 4)
+#define DP83826_CFG_DAC_MINUS_MDIX_3_TO_0 GENMASK(3, 0)
+#define DP83826_CFG_DAC_PERCENT_PER_STEP 625
+#define DP83826_CFG_DAC_PERCENT_DEFAULT 10000
+#define DP83826_CFG_DAC_MINUS_DEFAULT 0x30
+#define DP83826_CFG_DAC_PLUS_DEFAULT 0x10
+
#define MII_DP83822_FIBER_ADVERTISE (ADVERTISED_TP | ADVERTISED_MII | \
ADVERTISED_FIBRE | \
ADVERTISED_Pause | ADVERTISED_Asym_Pause)
@@ -118,6 +138,8 @@ struct dp83822_private {
bool fx_signal_det_low;
int fx_enabled;
u16 fx_sd_enable;
+ u8 cfg_dac_minus;
+ u8 cfg_dac_plus;
};
static int dp83822_set_wol(struct phy_device *phydev,
@@ -233,7 +255,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
- /* Private data pointer is NULL on DP83825/26 */
+ /* Private data pointer is NULL on DP83825 */
if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
DP83822_DUP_MODE_CHANGE_INT_EN |
@@ -254,7 +276,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_PAGE_RX_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
- /* Private data pointer is NULL on DP83825/26 */
+ /* Private data pointer is NULL on DP83825 */
if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
@@ -380,7 +402,7 @@ static int dp83822_config_init(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
- int rgmii_delay;
+ int rgmii_delay = 0;
s32 rx_int_delay;
s32 tx_int_delay;
int err = 0;
@@ -390,30 +412,33 @@ static int dp83822_config_init(struct phy_device *phydev)
rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
true);
- if (rx_int_delay <= 0)
- rgmii_delay = 0;
- else
- rgmii_delay = DP83822_RX_CLK_SHIFT;
+ /* Set DP83822_RX_CLK_SHIFT to enable rx clk internal delay */
+ if (rx_int_delay > 0)
+ rgmii_delay |= DP83822_RX_CLK_SHIFT;
tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
false);
+
+ /* Set DP83822_TX_CLK_SHIFT to disable tx clk internal delay */
if (tx_int_delay <= 0)
- rgmii_delay &= ~DP83822_TX_CLK_SHIFT;
- else
rgmii_delay |= DP83822_TX_CLK_SHIFT;
- if (rgmii_delay) {
- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_RCSR, rgmii_delay);
- if (err)
- return err;
- }
+ err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay);
+ if (err)
+ return err;
+
+ err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
- phy_set_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+ if (err)
+ return err;
} else {
- phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+ err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+
+ if (err)
+ return err;
}
if (dp83822->fx_enabled) {
@@ -474,6 +499,85 @@ static int dp83822_config_init(struct phy_device *phydev)
return dp8382x_disable_wol(phydev);
}
+static int dp83826_config_rmii_mode(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ const char *of_val;
+ int ret;
+
+ if (!device_property_read_string(dev, "ti,rmii-mode", &of_val)) {
+ if (strcmp(of_val, "master") == 0) {
+ ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_SEL);
+ } else if (strcmp(of_val, "slave") == 0) {
+ ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_SEL);
+ } else {
+ phydev_err(phydev, "Invalid value for ti,rmii-mode property (%s)\n",
+ of_val);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dp83826_config_init(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ u16 val, mask;
+ int ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_EN);
+ if (ret)
+ return ret;
+
+ ret = dp83826_config_rmii_mode(phydev);
+ if (ret)
+ return ret;
+ } else {
+ ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_EN);
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->cfg_dac_minus != DP83826_CFG_DAC_MINUS_DEFAULT) {
+ val = FIELD_PREP(DP83826_VOD_CFG1_MINUS_MDI_MASK, dp83822->cfg_dac_minus) |
+ FIELD_PREP(DP83826_VOD_CFG1_MINUS_MDIX_MASK,
+ FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_5_TO_4,
+ dp83822->cfg_dac_minus));
+ mask = DP83826_VOD_CFG1_MINUS_MDIX_MASK | DP83826_VOD_CFG1_MINUS_MDI_MASK;
+ ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG1, mask, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(DP83826_VOD_CFG2_MINUS_MDIX_MASK,
+ FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_3_TO_0,
+ dp83822->cfg_dac_minus));
+ mask = DP83826_VOD_CFG2_MINUS_MDIX_MASK;
+ ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->cfg_dac_plus != DP83826_CFG_DAC_PLUS_DEFAULT) {
+ val = FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDIX_MASK, dp83822->cfg_dac_plus) |
+ FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDI_MASK, dp83822->cfg_dac_plus);
+ mask = DP83826_VOD_CFG2_PLUS_MDIX_MASK | DP83826_VOD_CFG2_PLUS_MDI_MASK;
+ ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ if (ret)
+ return ret;
+ }
+
+ return dp8382x_disable_wol(phydev);
+}
+
static int dp8382x_config_init(struct phy_device *phydev)
{
return dp8382x_disable_wol(phydev);
@@ -509,11 +613,44 @@ static int dp83822_of_init(struct phy_device *phydev)
return 0;
}
+
+static int dp83826_to_dac_minus_one_regval(int percent)
+{
+ int tmp = DP83826_CFG_DAC_PERCENT_DEFAULT - percent;
+
+ return tmp / DP83826_CFG_DAC_PERCENT_PER_STEP;
+}
+
+static int dp83826_to_dac_plus_one_regval(int percent)
+{
+ int tmp = percent - DP83826_CFG_DAC_PERCENT_DEFAULT;
+
+ return tmp / DP83826_CFG_DAC_PERCENT_PER_STEP;
+}
+
+static void dp83826_of_init(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ u32 val;
+
+ dp83822->cfg_dac_minus = DP83826_CFG_DAC_MINUS_DEFAULT;
+ if (!device_property_read_u32(dev, "ti,cfg-dac-minus-one-bp", &val))
+ dp83822->cfg_dac_minus += dp83826_to_dac_minus_one_regval(val);
+
+ dp83822->cfg_dac_plus = DP83826_CFG_DAC_PLUS_DEFAULT;
+ if (!device_property_read_u32(dev, "ti,cfg-dac-plus-one-bp", &val))
+ dp83822->cfg_dac_plus += dp83826_to_dac_plus_one_regval(val);
+}
#else
static int dp83822_of_init(struct phy_device *phydev)
{
return 0;
}
+
+static void dp83826_of_init(struct phy_device *phydev)
+{
+}
#endif /* CONFIG_OF_MDIO */
static int dp83822_read_straps(struct phy_device *phydev)
@@ -567,6 +704,22 @@ static int dp83822_probe(struct phy_device *phydev)
return 0;
}
+static int dp83826_probe(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822;
+
+ dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822),
+ GFP_KERNEL);
+ if (!dp83822)
+ return -ENOMEM;
+
+ phydev->priv = dp83822;
+
+ dp83826_of_init(phydev);
+
+ return 0;
+}
+
static int dp83822_suspend(struct phy_device *phydev)
{
int value;
@@ -610,6 +763,22 @@ static int dp83822_resume(struct phy_device *phydev)
.resume = dp83822_resume, \
}
+#define DP83826_PHY_DRIVER(_id, _name) \
+ { \
+ PHY_ID_MATCH_MODEL(_id), \
+ .name = (_name), \
+ /* PHY_BASIC_FEATURES */ \
+ .probe = dp83826_probe, \
+ .soft_reset = dp83822_phy_reset, \
+ .config_init = dp83826_config_init, \
+ .get_wol = dp83822_get_wol, \
+ .set_wol = dp83822_set_wol, \
+ .config_intr = dp83822_config_intr, \
+ .handle_interrupt = dp83822_handle_interrupt, \
+ .suspend = dp83822_suspend, \
+ .resume = dp83822_resume, \
+ }
+
#define DP8382X_PHY_DRIVER(_id, _name) \
{ \
PHY_ID_MATCH_MODEL(_id), \
@@ -628,8 +797,8 @@ static int dp83822_resume(struct phy_device *phydev)
static struct phy_driver dp83822_driver[] = {
DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
DP8382X_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
- DP8382X_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"),
- DP8382X_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"),
+ DP83826_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"),
+ DP83826_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"),
DP8382X_PHY_DRIVER(DP83825S_PHY_ID, "TI DP83825S"),
DP8382X_PHY_DRIVER(DP83825CM_PHY_ID, "TI DP83825M"),
DP8382X_PHY_DRIVER(DP83825CS_PHY_ID, "TI DP83825CS"),
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 5f08f9d38bd7..4120385c5a79 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -158,6 +158,7 @@
/* LED_DRV bits */
#define DP83867_LED_DRV_EN(x) BIT((x) * 4)
#define DP83867_LED_DRV_VAL(x) BIT((x) * 4 + 1)
+#define DP83867_LED_POLARITY(x) BIT((x) * 4 + 2)
#define DP83867_LED_FN(idx, val) (((val) & 0xf) << ((idx) * 4))
#define DP83867_LED_FN_MASK(idx) (0xf << ((idx) * 4))
@@ -1152,6 +1153,26 @@ static int dp83867_led_hw_control_get(struct phy_device *phydev, u8 index,
return 0;
}
+static int dp83867_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ /* Default active high */
+ u16 polarity = DP83867_LED_POLARITY(index);
+ u32 mode;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ polarity = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return phy_modify(phydev, DP83867_LEDCR2,
+ DP83867_LED_POLARITY(index), polarity);
+}
+
static struct phy_driver dp83867_driver[] = {
{
.phy_id = DP83867_PHY_ID,
@@ -1184,6 +1205,7 @@ static struct phy_driver dp83867_driver[] = {
.led_hw_is_supported = dp83867_led_hw_is_supported,
.led_hw_control_set = dp83867_led_hw_control_set,
.led_hw_control_get = dp83867_led_hw_control_get,
+ .led_polarity_set = dp83867_led_polarity_set,
},
};
module_phy_driver(dp83867_driver);
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 1c3ff77de56b..6b4bd9883304 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -1,10 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Marvell 88Q2XXX automotive 100BASE-T1/1000BASE-T1 PHY driver
+ *
+ * Derived from Marvell Q222x API
+ *
+ * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
*/
#include <linux/ethtool_netlink.h>
#include <linux/marvell_phy.h>
#include <linux/phy.h>
+#include <linux/hwmon.h>
+
+#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1)
#define MDIO_MMD_AN_MV_STAT 32769
#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
@@ -13,8 +20,38 @@
#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000
#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000
+#define MDIO_MMD_AN_MV_STAT2 32794
+#define MDIO_MMD_AN_MV_STAT2_AN_RESOLVED 0x0800
+#define MDIO_MMD_AN_MV_STAT2_100BT1 0x2000
+#define MDIO_MMD_AN_MV_STAT2_1000BT1 0x4000
+
+#define MDIO_MMD_PCS_MV_INT_EN 32784
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_UP 0x0040
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN 0x0080
+#define MDIO_MMD_PCS_MV_INT_EN_100BT1 0x1000
+
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT 32785
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP 0x0040
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_DOWN 0x0080
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_100BT1_GEN 0x1000
+
+#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL 32787
+#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS 0x0800
+
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1 32833
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT 0x0001
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT 0x0040
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT_EN 0x0080
+
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR2 32834
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK 0xc000
+
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR3 32835
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK 0xff00
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR3_MASK 0x00ff
+
#define MDIO_MMD_PCS_MV_100BT1_STAT1 33032
-#define MDIO_MMD_PCS_MV_100BT1_STAT1_IDLE_ERROR 0x00FF
+#define MDIO_MMD_PCS_MV_100BT1_STAT1_IDLE_ERROR 0x00ff
#define MDIO_MMD_PCS_MV_100BT1_STAT1_JABBER 0x0100
#define MDIO_MMD_PCS_MV_100BT1_STAT1_LINK 0x0200
#define MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_RX 0x1000
@@ -27,6 +64,71 @@
#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004
#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008
+#define MDIO_MMD_PCS_MV_100BT1_INT_EN 33042
+#define MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT 0x0400
+
+#define MDIO_MMD_PCS_MV_COPPER_INT_STAT 33043
+#define MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT 0x0400
+
+#define MDIO_MMD_PCS_MV_RX_STAT 33328
+
+#define MDIO_MMD_PCS_MV_TDR_RESET 65226
+#define MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST 0x1000
+
+#define MDIO_MMD_PCS_MV_TDR_OFF_SHORT_CABLE 65241
+
+#define MDIO_MMD_PCS_MV_TDR_OFF_LONG_CABLE 65242
+
+#define MDIO_MMD_PCS_MV_TDR_STATUS 65245
+#define MDIO_MMD_PCS_MV_TDR_STATUS_MASK 0x0003
+#define MDIO_MMD_PCS_MV_TDR_STATUS_OFF 0x0001
+#define MDIO_MMD_PCS_MV_TDR_STATUS_ON 0x0002
+#define MDIO_MMD_PCS_MV_TDR_STATUS_DIST_MASK 0xff00
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_MASK 0x00f0
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_SHORT 0x0030
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OPEN 0x00e0
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OK 0x0070
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_IN_PROGR 0x0080
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_NOISE 0x0050
+
+#define MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF 65246
+
+struct mmd_val {
+ int devad;
+ u32 regnum;
+ u16 val;
+};
+
+static const struct mmd_val mv88q222x_revb0_init_seq0[] = {
+ { MDIO_MMD_PCS, 0x8033, 0x6801 },
+ { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER | MDIO_PMA_CTRL1_SPEED1000 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x48 },
+ { MDIO_MMD_PCS, 0xffe4, 0x6b6 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1, 0x0 },
+ { MDIO_MMD_PCS, MDIO_CTRL1, 0x0 },
+};
+
+static const struct mmd_val mv88q222x_revb0_init_seq1[] = {
+ { MDIO_MMD_PCS, 0xfe79, 0x0 },
+ { MDIO_MMD_PCS, 0xfe07, 0x125a },
+ { MDIO_MMD_PCS, 0xfe09, 0x1288 },
+ { MDIO_MMD_PCS, 0xfe08, 0x2588 },
+ { MDIO_MMD_PCS, 0xfe11, 0x1105 },
+ { MDIO_MMD_PCS, 0xfe72, 0x042c },
+ { MDIO_MMD_PCS, 0xfbba, 0xcb2 },
+ { MDIO_MMD_PCS, 0xfbbb, 0xc4a },
+ { MDIO_MMD_AN, 0x8032, 0x2020 },
+ { MDIO_MMD_AN, 0x8031, 0xa28 },
+ { MDIO_MMD_AN, 0x8031, 0xc28 },
+ { MDIO_MMD_PCS, 0xffdb, 0xfc10 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x58 },
+ { MDIO_MMD_PCS, 0xfe79, 0x4 },
+ { MDIO_MMD_PCS, 0xfe5f, 0xe8 },
+ { MDIO_MMD_PCS, 0xfe05, 0x755c },
+};
+
static int mv88q2xxx_soft_reset(struct phy_device *phydev)
{
int ret;
@@ -50,20 +152,23 @@ static int mv88q2xxx_read_link_gbit(struct phy_device *phydev)
/* Read vendor specific Auto-Negotiation status register to get local
* and remote receiver status according to software initialization
- * guide.
+ * guide. However, when not in polling mode the local and remote
+ * receiver status are not evaluated due to the Marvell 88Q2xxx APIs.
*/
ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT);
if (ret < 0) {
return ret;
- } else if ((ret & MDIO_MMD_AN_MV_STAT_LOCAL_RX) &&
- (ret & MDIO_MMD_AN_MV_STAT_REMOTE_RX)) {
+ } else if (((ret & MDIO_MMD_AN_MV_STAT_LOCAL_RX) &&
+ (ret & MDIO_MMD_AN_MV_STAT_REMOTE_RX)) ||
+ !phy_polling_mode(phydev)) {
/* The link state is latched low so that momentary link
* drops can be detected. Do not double-read the status
* in polling mode to detect such short link drops except
* the link was already down.
*/
if (!phy_polling_mode(phydev) || !phydev->link) {
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_PCS_1000BT1_STAT);
if (ret < 0)
return ret;
else if (ret & MDIO_PCS_1000BT1_STAT_LINK)
@@ -71,7 +176,8 @@ static int mv88q2xxx_read_link_gbit(struct phy_device *phydev)
}
if (!link) {
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_PCS_1000BT1_STAT);
if (ret < 0)
return ret;
else if (ret & MDIO_PCS_1000BT1_STAT_LINK)
@@ -94,8 +200,20 @@ static int mv88q2xxx_read_link_100m(struct phy_device *phydev)
* the link was already down. In case we are not polling,
* we always read the realtime status.
*/
- if (!phy_polling_mode(phydev) || !phydev->link) {
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_100BT1_STAT1);
+ if (!phy_polling_mode(phydev)) {
+ phydev->link = false;
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_MMD_PCS_MV_100BT1_STAT2_LINK)
+ phydev->link = true;
+
+ return 0;
+ } else if (!phydev->link) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_STAT1);
if (ret < 0)
return ret;
else if (ret & MDIO_MMD_PCS_MV_100BT1_STAT1_LINK)
@@ -120,24 +238,90 @@ out:
static int mv88q2xxx_read_link(struct phy_device *phydev)
{
- int ret;
-
/* The 88Q2XXX PHYs do not have the PMA/PMD status register available,
* therefore we need to read the link status from the vendor specific
* registers depending on the speed.
*/
+
if (phydev->speed == SPEED_1000)
- ret = mv88q2xxx_read_link_gbit(phydev);
+ return mv88q2xxx_read_link_gbit(phydev);
+ else if (phydev->speed == SPEED_100)
+ return mv88q2xxx_read_link_100m(phydev);
+
+ phydev->link = false;
+ return 0;
+}
+
+static int mv88q2xxx_read_master_slave_state(struct phy_device *phydev)
+{
+ int ret;
+
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_MMD_AN_MV_STAT_LOCAL_MASTER)
+ phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
else
- ret = mv88q2xxx_read_link_100m(phydev);
+ phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
+
+ return 0;
+}
+
+static int mv88q2xxx_read_aneg_speed(struct phy_device *phydev)
+{
+ int ret;
+
+ phydev->speed = SPEED_UNKNOWN;
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_MMD_AN_MV_STAT2_AN_RESOLVED))
+ return 0;
- return ret;
+ if (ret & MDIO_MMD_AN_MV_STAT2_100BT1)
+ phydev->speed = SPEED_100;
+ else if (ret & MDIO_MMD_AN_MV_STAT2_1000BT1)
+ phydev->speed = SPEED_1000;
+
+ return 0;
}
static int mv88q2xxx_read_status(struct phy_device *phydev)
{
int ret;
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ /* We have to get the negotiated speed first, otherwise we are
+ * not able to read the link.
+ */
+ ret = mv88q2xxx_read_aneg_speed(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = mv88q2xxx_read_link(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_c45_baset1_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = mv88q2xxx_read_master_slave_state(phydev);
+ if (ret < 0)
+ return ret;
+
+ phy_resolve_aneg_linkmode(phydev);
+
+ return 0;
+ }
+
ret = mv88q2xxx_read_link(phydev);
if (ret < 0)
return ret;
@@ -166,7 +350,9 @@ static int mv88q2xxx_get_features(struct phy_device *phydev)
* sequence provided by Marvell. Disable it for now until a proper
* workaround is found or a new PHY revision is released.
*/
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+ if (phydev->drv->phy_id == MARVELL_PHY_ID_88Q2110)
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported);
return 0;
}
@@ -179,28 +365,29 @@ static int mv88q2xxx_config_aneg(struct phy_device *phydev)
if (ret)
return ret;
- return mv88q2xxx_soft_reset(phydev);
+ return phydev->drv->soft_reset(phydev);
}
static int mv88q2xxx_config_init(struct phy_device *phydev)
{
- int ret;
-
/* The 88Q2XXX PHYs do have the extended ability register available, but
* register MDIO_PMA_EXTABLE where they should signalize it does not
* work according to specification. Therefore, we force it here.
*/
phydev->pma_extable = MDIO_PMA_EXTABLE_BT1;
- /* Read the current PHY configuration */
- ret = genphy_c45_read_pma(phydev);
- if (ret)
- return ret;
+ /* Configure interrupt with default settings, output is driven low for
+ * active interrupt and high for inactive.
+ */
+ if (phy_interrupt_is_valid(phydev))
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
- return mv88q2xxx_config_aneg(phydev);
+ return 0;
}
-static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
+static int mv88q2xxx_get_sqi(struct phy_device *phydev)
{
int ret;
@@ -208,7 +395,8 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
/* Read the SQI from the vendor specific receiver status
* register
*/
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8230);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_RX_STAT);
if (ret < 0)
return ret;
@@ -218,7 +406,7 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
* but can be found in the Software Initialization Guide. Only
* revisions >= A0 are supported.
*/
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, 0xFC5D, 0x00FF, 0x00AC);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, 0xfc5d, 0xff, 0xac);
if (ret < 0)
return ret;
@@ -227,14 +415,386 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
return ret;
}
- return ret & 0x0F;
+ return ret & 0x0f;
}
-static int mv88q2xxxx_get_sqi_max(struct phy_device *phydev)
+static int mv88q2xxx_get_sqi_max(struct phy_device *phydev)
{
return 15;
}
+static int mv88q2xxx_config_intr(struct phy_device *phydev)
+{
+ int ret;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* Enable interrupts for 1000BASE-T1 link up and down events
+ * and enable general interrupts for 100BASE-T1.
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_INT_EN,
+ MDIO_MMD_PCS_MV_INT_EN_LINK_UP |
+ MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN |
+ MDIO_MMD_PCS_MV_INT_EN_100BT1);
+ if (ret < 0)
+ return ret;
+
+ /* Enable interrupts for 100BASE-T1 link events */
+ return phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_INT_EN,
+ MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT);
+ } else {
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_INT_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ return phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_INT_EN, 0);
+ }
+}
+
+static irqreturn_t mv88q2xxx_handle_interrupt(struct phy_device *phydev)
+{
+ bool trigger_machine = false;
+ int irq;
+
+ /* Before we can acknowledge the 100BT1 general interrupt, that is in
+ * the 1000BT1 interrupt status register, we have to acknowledge any
+ * interrupts that are related to it. Therefore we read first the 100BT1
+ * interrupt status register, followed by reading the 1000BT1 interrupt
+ * status register.
+ */
+
+ irq = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_COPPER_INT_STAT);
+ if (irq < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Check link status for 100BT1 */
+ if (irq & MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT)
+ trigger_machine = true;
+
+ irq = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_GPIO_INT_STAT);
+ if (irq < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Check link status for 1000BT1 */
+ if ((irq & MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP) ||
+ (irq & MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_DOWN))
+ trigger_machine = true;
+
+ if (!trigger_machine)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static int mv88q2xxx_suspend(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Disable PHY interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_DISABLED;
+ ret = mv88q2xxx_config_intr(phydev);
+ if (ret)
+ return ret;
+ }
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+static int mv88q2xxx_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable PHY interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ ret = mv88q2xxx_config_intr(phydev);
+ if (ret)
+ return ret;
+ }
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+#if IS_ENABLED(CONFIG_HWMON)
+static const struct hwmon_channel_info * const mv88q2xxx_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_ALARM),
+ NULL
+};
+
+static umode_t mv88q2xxx_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_max:
+ return 0644;
+ case hwmon_temp_alarm:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int mv88q2xxx_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int ret;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(MDIO_MMD_PCS_MV_TEMP_SENSOR3_MASK, ret);
+ *val = (ret - 75) * 1000;
+ return 0;
+ case hwmon_temp_max:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK,
+ ret);
+ *val = (ret - 75) * 1000;
+ return 0;
+ case hwmon_temp_alarm:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR1);
+ if (ret < 0)
+ return ret;
+
+ *val = !!(ret & MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mv88q2xxx_hwmon_write(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ clamp_val(val, -75000, 180000);
+ val = (val / 1000) + 75;
+ val = FIELD_PREP(MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK,
+ val);
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK,
+ val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops mv88q2xxx_hwmon_hwmon_ops = {
+ .is_visible = mv88q2xxx_hwmon_is_visible,
+ .read = mv88q2xxx_hwmon_read,
+ .write = mv88q2xxx_hwmon_write,
+};
+
+static const struct hwmon_chip_info mv88q2xxx_hwmon_chip_info = {
+ .ops = &mv88q2xxx_hwmon_hwmon_ops,
+ .info = mv88q2xxx_hwmon_info,
+};
+
+static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device *hwmon;
+ char *hwmon_name;
+ int ret;
+
+ /* Enable temperature sense */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TEMP_SENSOR2,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+ if (ret < 0)
+ return ret;
+
+ hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(hwmon_name))
+ return PTR_ERR(hwmon_name);
+
+ hwmon = devm_hwmon_device_register_with_info(dev,
+ hwmon_name,
+ phydev,
+ &mv88q2xxx_hwmon_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+
+#else
+static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif
+
+static int mv88q2xxx_probe(struct phy_device *phydev)
+{
+ return mv88q2xxx_hwmon_probe(phydev);
+}
+
+static int mv88q222x_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable RESET of DCL */
+ if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000) {
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x48);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_CTRL,
+ MDIO_PCS_1000BT1_CTRL_RESET);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xffe4, 0xc);
+ if (ret < 0)
+ return ret;
+
+ /* Disable RESET of DCL */
+ if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000)
+ return phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x58);
+
+ return 0;
+}
+
+static int mv88q222x_revb0_config_init(struct phy_device *phydev)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq0); i++) {
+ ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq0[i].devad,
+ mv88q222x_revb0_init_seq0[i].regnum,
+ mv88q222x_revb0_init_seq0[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ usleep_range(5000, 10000);
+
+ for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq1); i++) {
+ ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq1[i].devad,
+ mv88q222x_revb0_init_seq1[i].regnum,
+ mv88q222x_revb0_init_seq1[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return mv88q2xxx_config_init(phydev);
+}
+
+static int mv88q222x_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF, 0x0058);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TDR_OFF_LONG_CABLE, 0x00eb);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TDR_OFF_SHORT_CABLE, 0x010e);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_RESET,
+ 0x0d90);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_STATUS,
+ MDIO_MMD_PCS_MV_TDR_STATUS_ON);
+ if (ret < 0)
+ return ret;
+
+ /* According to the Marvell API the test is finished within 500 ms */
+ msleep(500);
+
+ return 0;
+}
+
+static int mv88q222x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret, status;
+ u32 dist;
+
+ status = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_STATUS);
+ if (status < 0)
+ return status;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_RESET,
+ MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST | 0xd90);
+ if (ret < 0)
+ return ret;
+
+ /* Test could not be finished */
+ if (FIELD_GET(MDIO_MMD_PCS_MV_TDR_STATUS_MASK, status) !=
+ MDIO_MMD_PCS_MV_TDR_STATUS_OFF)
+ return -ETIMEDOUT;
+
+ *finished = true;
+ /* Fault length reported in meters, convert to centimeters */
+ dist = FIELD_GET(MDIO_MMD_PCS_MV_TDR_STATUS_DIST_MASK, status) * 100;
+ switch (status & MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_MASK) {
+ case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OPEN:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ dist);
+ break;
+ case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_SHORT:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ dist);
+ break;
+ case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OK:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_OK);
+ break;
+ default:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
+ }
+
+ return 0;
+}
+
static struct phy_driver mv88q2xxx_driver[] = {
{
.phy_id = MARVELL_PHY_ID_88Q2110,
@@ -246,8 +806,29 @@ static struct phy_driver mv88q2xxx_driver[] = {
.read_status = mv88q2xxx_read_status,
.soft_reset = mv88q2xxx_soft_reset,
.set_loopback = genphy_c45_loopback,
- .get_sqi = mv88q2xxxx_get_sqi,
- .get_sqi_max = mv88q2xxxx_get_sqi_max,
+ .get_sqi = mv88q2xxx_get_sqi,
+ .get_sqi_max = mv88q2xxx_get_sqi_max,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0),
+ .name = "mv88q2220",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = mv88q2xxx_probe,
+ .get_features = mv88q2xxx_get_features,
+ .config_aneg = mv88q2xxx_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .config_init = mv88q222x_revb0_config_init,
+ .read_status = mv88q2xxx_read_status,
+ .soft_reset = mv88q222x_soft_reset,
+ .config_intr = mv88q2xxx_config_intr,
+ .handle_interrupt = mv88q2xxx_handle_interrupt,
+ .set_loopback = genphy_c45_loopback,
+ .cable_test_start = mv88q222x_cable_test_start,
+ .cable_test_get_status = mv88q222x_cable_test_get_status,
+ .get_sqi = mv88q2xxx_get_sqi,
+ .get_sqi_max = mv88q2xxx_get_sqi_max,
+ .suspend = mv88q2xxx_suspend,
+ .resume = mv88q2xxx_resume,
},
};
@@ -255,6 +836,7 @@ module_phy_driver(mv88q2xxx_driver);
static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
{ MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK },
+ { PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0), },
{ /*sentinel*/ }
};
MODULE_DEVICE_TABLE(mdio, mv88q2xxx_tbl);
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index e3aa30dad2e6..b88398e6872b 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -9,12 +9,10 @@
*/
#include <linux/module.h>
#include <linux/phy.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mdio.h>
#include <linux/marvell_phy.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/sfp.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index eba652a4c1d8..42ed013385bf 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -919,7 +919,10 @@ static int m88e1111_config_init_1000basex(struct phy_device *phydev)
if (extsr < 0)
return extsr;
- /* If using copper mode, ensure 1000BaseX auto-negotiation is enabled */
+ /* If using copper mode, ensure 1000BaseX auto-negotiation is enabled.
+ * FIXME: this does not actually enable 1000BaseX auto-negotiation if
+ * it was previously disabled in the Fiber BMCR!
+ */
mode = extsr & MII_M1111_HWCFG_MODE_MASK;
if (mode == MII_M1111_HWCFG_MODE_COPPER_1000X_NOAN) {
err = phy_modify(phydev, MII_M1111_PHY_EXT_SR,
@@ -1461,7 +1464,7 @@ static int m88e1540_get_fld(struct phy_device *phydev, u8 *msecs)
static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
{
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
int val, ret;
if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index afbad1ad8683..8b9ead76e40e 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -13,7 +13,6 @@
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -25,7 +24,6 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/reset.h>
@@ -459,19 +457,34 @@ EXPORT_SYMBOL(of_mdio_find_bus);
* found, set the of_node pointer for the mdio device. This allows
* auto-probed phy devices to be supplied with information passed in
* via DT.
+ * If a PHY package is found, PHY is searched also there.
*/
-static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
- struct mdio_device *mdiodev)
+static int of_mdiobus_find_phy(struct device *dev, struct mdio_device *mdiodev,
+ struct device_node *np)
{
- struct device *dev = &mdiodev->dev;
struct device_node *child;
- if (dev->of_node || !bus->dev.of_node)
- return;
-
- for_each_available_child_of_node(bus->dev.of_node, child) {
+ for_each_available_child_of_node(np, child) {
int addr;
+ if (of_node_name_eq(child, "ethernet-phy-package")) {
+ /* Validate PHY package reg presence */
+ if (!of_property_present(child, "reg")) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ if (!of_mdiobus_find_phy(dev, mdiodev, child)) {
+ /* The refcount for the PHY package will be
+ * incremented later when PHY join the Package.
+ */
+ of_node_put(child);
+ return 0;
+ }
+
+ continue;
+ }
+
addr = of_mdio_parse_addr(dev, child);
if (addr < 0)
continue;
@@ -481,9 +494,22 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
/* The refcount on "child" is passed to the mdio
* device. Do _not_ use of_node_put(child) here.
*/
- return;
+ return 0;
}
}
+
+ return -ENODEV;
+}
+
+static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
+ struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+
+ if (dev->of_node || !bus->dev.of_node)
+ return;
+
+ of_mdiobus_find_phy(dev, mdiodev, bus->dev.of_node);
}
#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio,
@@ -1398,7 +1424,7 @@ static const struct attribute_group *mdio_bus_dev_groups[] = {
NULL,
};
-struct bus_type mdio_bus_type = {
+const struct bus_type mdio_bus_type = {
.name = "mdio_bus",
.dev_groups = mdio_bus_dev_groups,
.match = mdio_bus_match,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index dad720138baa..8b8634600c51 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -114,12 +114,25 @@
#define LAN8814_INTR_CTRL_REG_POLARITY BIT(1)
#define LAN8814_INTR_CTRL_REG_INTR_ENABLE BIT(0)
+#define LAN8814_EEE_STATE 0x38
+#define LAN8814_EEE_STATE_MASK2P5P BIT(10)
+
+#define LAN8814_PD_CONTROLS 0x9d
+#define LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK GENMASK(3, 0)
+#define LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL 0xb
+
/* Represents 1ppm adjustment in 2^32 format with
* each nsec contains 4 clock cycles.
* The value is calculated as following: (1/1000000)/((2^-32)/4)
*/
#define LAN8814_1PPM_FORMAT 17179
+/* Represents 1ppm adjustment in 2^32 format with
+ * each nsec contains 8 clock cycles.
+ * The value is calculated as following: (1/1000000)/((2^-32)/8)
+ */
+#define LAN8841_1PPM_FORMAT 34360
+
#define PTP_RX_VERSION 0x0248
#define PTP_TX_VERSION 0x0288
#define PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8)
@@ -154,11 +167,13 @@
#define PTP_CMD_CTL_PTP_LTC_STEP_SEC_ BIT(5)
#define PTP_CMD_CTL_PTP_LTC_STEP_NSEC_ BIT(6)
+#define PTP_CLOCK_SET_SEC_HI 0x0205
#define PTP_CLOCK_SET_SEC_MID 0x0206
#define PTP_CLOCK_SET_SEC_LO 0x0207
#define PTP_CLOCK_SET_NS_HI 0x0208
#define PTP_CLOCK_SET_NS_LO 0x0209
+#define PTP_CLOCK_READ_SEC_HI 0x0229
#define PTP_CLOCK_READ_SEC_MID 0x022A
#define PTP_CLOCK_READ_SEC_LO 0x022B
#define PTP_CLOCK_READ_NS_HI 0x022C
@@ -2592,35 +2607,31 @@ static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb
}
static void lan8814_ptp_clock_set(struct phy_device *phydev,
- u32 seconds, u32 nano_seconds)
+ time64_t sec, u32 nsec)
{
- u32 sec_low, sec_high, nsec_low, nsec_high;
-
- sec_low = seconds & 0xffff;
- sec_high = (seconds >> 16) & 0xffff;
- nsec_low = nano_seconds & 0xffff;
- nsec_high = (nano_seconds >> 16) & 0x3fff;
-
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, sec_low);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, sec_high);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, nsec_low);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, nsec_high);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, lower_16_bits(sec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, upper_16_bits(sec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_HI, upper_32_bits(sec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, lower_16_bits(nsec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, upper_16_bits(nsec));
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
}
static void lan8814_ptp_clock_get(struct phy_device *phydev,
- u32 *seconds, u32 *nano_seconds)
+ time64_t *sec, u32 *nsec)
{
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
- *seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
- *seconds = (*seconds << 16) |
- lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
+ *sec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_HI);
+ *sec <<= 16;
+ *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
+ *sec <<= 16;
+ *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
- *nano_seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
- *nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
- lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
+ *nsec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
+ *nsec <<= 16;
+ *nsec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
}
static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
@@ -2630,7 +2641,7 @@ static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
ptp_clock_info);
struct phy_device *phydev = shared->phydev;
u32 nano_seconds;
- u32 seconds;
+ time64_t seconds;
mutex_lock(&shared->shared_lock);
lan8814_ptp_clock_get(phydev, &seconds, &nano_seconds);
@@ -2660,38 +2671,37 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
{
u32 nano_seconds_step;
u64 abs_time_step_ns;
- u32 unsigned_seconds;
+ time64_t set_seconds;
u32 nano_seconds;
u32 remainder;
s32 seconds;
if (time_step_ns > 15000000000LL) {
/* convert to clock set */
- lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
- unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
- &remainder);
+ lan8814_ptp_clock_get(phydev, &set_seconds, &nano_seconds);
+ set_seconds += div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
nano_seconds += remainder;
if (nano_seconds >= 1000000000) {
- unsigned_seconds++;
+ set_seconds++;
nano_seconds -= 1000000000;
}
- lan8814_ptp_clock_set(phydev, unsigned_seconds, nano_seconds);
+ lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
return;
} else if (time_step_ns < -15000000000LL) {
/* convert to clock set */
time_step_ns = -time_step_ns;
- lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
- unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
- &remainder);
+ lan8814_ptp_clock_get(phydev, &set_seconds, &nano_seconds);
+ set_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
nano_seconds_step = remainder;
if (nano_seconds < nano_seconds_step) {
- unsigned_seconds--;
+ set_seconds--;
nano_seconds += 1000000000;
}
nano_seconds -= nano_seconds_step;
- lan8814_ptp_clock_set(phydev, unsigned_seconds,
- nano_seconds);
+ lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
return;
}
@@ -3285,6 +3295,33 @@ static int lan8814_release_coma_mode(struct phy_device *phydev)
return 0;
}
+static void lan8814_clear_2psp_bit(struct phy_device *phydev)
+{
+ u16 val;
+
+ /* It was noticed that when traffic is passing through the PHY and the
+ * cable is removed then the LED was still one even though there is no
+ * link
+ */
+ val = lanphy_read_page_reg(phydev, 2, LAN8814_EEE_STATE);
+ val &= ~LAN8814_EEE_STATE_MASK2P5P;
+ lanphy_write_page_reg(phydev, 2, LAN8814_EEE_STATE, val);
+}
+
+static void lan8814_update_meas_time(struct phy_device *phydev)
+{
+ u16 val;
+
+ /* By setting the measure time to a value of 0xb this will allow cables
+ * longer than 100m to be used. This configuration can be used
+ * regardless of the mode of operation of the PHY
+ */
+ val = lanphy_read_page_reg(phydev, 1, LAN8814_PD_CONTROLS);
+ val &= ~LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK;
+ val |= LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL;
+ lanphy_write_page_reg(phydev, 1, LAN8814_PD_CONTROLS, val);
+}
+
static int lan8814_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -3321,6 +3358,10 @@ static int lan8814_probe(struct phy_device *phydev)
lan8814_ptp_init(phydev);
+ /* Errata workarounds */
+ lan8814_clear_2psp_bit(phydev);
+ lan8814_update_meas_time(phydev);
+
return 0;
}
@@ -4118,8 +4159,8 @@ static int lan8841_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
faster = false;
}
- rate = LAN8814_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
- rate += (LAN8814_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
+ rate = LAN8841_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
+ rate += (LAN8841_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
mutex_lock(&ptp_priv->ptp_lock);
phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_RATE_ADJ_HI,
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index ea1073adc5a1..b2d36a3a96f1 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -274,6 +274,14 @@ static int gpy_config_init(struct phy_device *phydev)
return ret < 0 ? ret : 0;
}
+static int gpy21x_config_init(struct phy_device *phydev)
+{
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, phydev->possible_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, phydev->possible_interfaces);
+
+ return gpy_config_init(phydev);
+}
+
static int gpy_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@@ -867,7 +875,7 @@ static struct phy_driver gpy_drivers[] = {
.phy_id_mask = PHY_ID_GPY21xB_MASK,
.name = "Maxlinear Ethernet GPY211B",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -884,7 +892,7 @@ static struct phy_driver gpy_drivers[] = {
PHY_ID_MATCH_MODEL(PHY_ID_GPY211C),
.name = "Maxlinear Ethernet GPY211C",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -902,7 +910,7 @@ static struct phy_driver gpy_drivers[] = {
.phy_id_mask = PHY_ID_GPY21xB_MASK,
.name = "Maxlinear Ethernet GPY212B",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -919,7 +927,7 @@ static struct phy_driver gpy_drivers[] = {
PHY_ID_MATCH_MODEL(PHY_ID_GPY212C),
.name = "Maxlinear Ethernet GPY212C",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -937,7 +945,7 @@ static struct phy_driver gpy_drivers[] = {
.phy_id_mask = PHY_ID_GPYx15B_MASK,
.name = "Maxlinear Ethernet GPY215B",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -954,7 +962,7 @@ static struct phy_driver gpy_drivers[] = {
PHY_ID_MATCH_MODEL(PHY_ID_GPY215C),
.name = "Maxlinear Ethernet GPY215C",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 747d14bf152c..5695935fdce9 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -208,7 +208,8 @@ static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
adv_l_mask = MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP |
MDIO_AN_T1_ADV_L_PAUSE_ASYM;
- adv_m_mask = MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L;
+ adv_m_mask = MDIO_AN_T1_ADV_M_1000BT1 | MDIO_AN_T1_ADV_M_100BT1 |
+ MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L;
switch (phydev->master_slave_set) {
case MASTER_SLAVE_CFG_MASTER_FORCE:
@@ -706,6 +707,22 @@ int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv)
changed = 1;
}
+ if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
+ val = linkmode_to_mii_eee_cap2_t(adv);
+
+ /* IEEE 802.3-2022 45.2.7.16 EEE advertisement 2
+ * (Register 7.62)
+ */
+ val = phy_modify_mmd_changed(phydev, MDIO_MMD_AN,
+ MDIO_AN_EEE_ADV2,
+ MDIO_EEE_2_5GT | MDIO_EEE_5GT,
+ val);
+ if (val < 0)
+ return val;
+ if (val > 0)
+ changed = 1;
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported_eee)) {
val = linkmode_adv_to_mii_10base_t1_t(adv);
@@ -745,6 +762,17 @@ int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv)
mii_eee_cap1_mod_linkmode_t(adv, val);
}
+ if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
+ /* IEEE 802.3-2022 45.2.7.16 EEE advertisement 2
+ * (Register 7.62)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV2);
+ if (val < 0)
+ return val;
+
+ mii_eee_cap2_mod_linkmode_adv_t(adv, val);
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported_eee)) {
/* IEEE 802.3cg-2019 45.2.7.25 10BASE-T1 AN control register
@@ -781,6 +809,17 @@ static int genphy_c45_read_eee_lpa(struct phy_device *phydev,
mii_eee_cap1_mod_linkmode_t(lpa, val);
}
+ if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
+ /* IEEE 802.3-2022 45.2.7.17 EEE link partner ability 2
+ * (Register 7.63)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE2);
+ if (val < 0)
+ return val;
+
+ mii_eee_cap2_mod_linkmode_adv_t(lpa, val);
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported_eee)) {
/* IEEE 802.3cg-2019 45.2.7.26 10BASE-T1 AN status register
@@ -831,6 +870,30 @@ static int genphy_c45_read_eee_cap1(struct phy_device *phydev)
}
/**
+ * genphy_c45_read_eee_cap2 - read supported EEE link modes from register 3.21
+ * @phydev: target phy_device struct
+ */
+static int genphy_c45_read_eee_cap2(struct phy_device *phydev)
+{
+ int val;
+
+ /* IEEE 802.3-2022 45.2.3.11 EEE control and capability 2
+ * (Register 3.21)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE2);
+ if (val < 0)
+ return val;
+
+ /* IEEE 802.3-2022 45.2.3.11 says 9 bits are reserved. */
+ if (val == 0xffff)
+ return 0;
+
+ mii_eee_cap2_mod_linkmode_sup_t(phydev->supported_eee, val);
+
+ return 0;
+}
+
+/**
* genphy_c45_read_eee_abilities - read supported EEE link modes
* @phydev: target phy_device struct
*/
@@ -848,6 +911,13 @@ int genphy_c45_read_eee_abilities(struct phy_device *phydev)
return val;
}
+ /* Same for cap2 (3.21) */
+ if (linkmode_intersects(phydev->supported, PHY_EEE_CAP2_FEATURES)) {
+ val = genphy_c45_read_eee_cap2(phydev);
+ if (val)
+ return val;
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported)) {
/* IEEE 802.3cg-2019 45.2.1.186b 10BASE-T1L PMA status register
@@ -1443,17 +1513,17 @@ EXPORT_SYMBOL(genphy_c45_eee_is_active);
/**
* genphy_c45_ethtool_get_eee - get EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
* Description: it reports the Supported/Advertisement/LP Advertisement
* capabilities.
*/
int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
- struct ethtool_eee *data)
+ struct ethtool_keee *data)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp) = {};
- bool overflow = false, is_enabled;
+ bool is_enabled;
int ret;
ret = genphy_c45_eee_is_active(phydev, adv, lp, &is_enabled);
@@ -1462,17 +1532,9 @@ int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
data->eee_enabled = is_enabled;
data->eee_active = ret;
-
- if (!ethtool_convert_link_mode_to_legacy_u32(&data->supported,
- phydev->supported_eee))
- overflow = true;
- if (!ethtool_convert_link_mode_to_legacy_u32(&data->advertised, adv))
- overflow = true;
- if (!ethtool_convert_link_mode_to_legacy_u32(&data->lp_advertised, lp))
- overflow = true;
-
- if (overflow)
- phydev_warn(phydev, "Not all supported or advertised EEE link modes were passed to the user space\n");
+ linkmode_copy(data->supported, phydev->supported_eee);
+ linkmode_copy(data->advertised, adv);
+ linkmode_copy(data->lp_advertised, lp);
return 0;
}
@@ -1481,50 +1543,53 @@ EXPORT_SYMBOL(genphy_c45_ethtool_get_eee);
/**
* genphy_c45_ethtool_set_eee - set EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
* Description: sets the Supported/Advertisement/LP Advertisement
* capabilities. If eee_enabled is false, no links modes are
* advertised, but the previously advertised link modes are
* retained. This allows EEE to be enabled/disabled in a
* non-destructive way.
+ * Returns either error code, 0 if there was no change, or positive
+ * value if there was a change which triggered auto-neg.
*/
int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
- struct ethtool_eee *data)
+ struct ethtool_keee *data)
{
int ret;
if (data->eee_enabled) {
- if (data->advertised) {
- __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
+ unsigned long *adv = data->advertised;
- ethtool_convert_legacy_u32_to_link_mode(adv,
- data->advertised);
- linkmode_andnot(adv, adv, phydev->supported_eee);
- if (!linkmode_empty(adv)) {
+ if (!linkmode_empty(adv)) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
+
+ if (linkmode_andnot(tmp, adv, phydev->supported_eee)) {
phydev_warn(phydev, "At least some EEE link modes are not supported.\n");
return -EINVAL;
}
-
- ethtool_convert_legacy_u32_to_link_mode(phydev->advertising_eee,
- data->advertised);
} else {
- linkmode_copy(phydev->advertising_eee,
- phydev->supported_eee);
+ adv = phydev->supported_eee;
}
- phydev->eee_enabled = true;
- } else {
- phydev->eee_enabled = false;
+ linkmode_copy(phydev->advertising_eee, adv);
}
+ phydev->eee_enabled = data->eee_enabled;
+
ret = genphy_c45_an_config_eee_aneg(phydev);
- if (ret < 0)
- return ret;
- if (ret > 0)
- return phy_restart_aneg(phydev);
+ if (ret > 0) {
+ ret = phy_restart_aneg(phydev);
+ if (ret < 0)
+ return ret;
- return 0;
+ /* explicitly return 1, otherwise (ret > 0) value will be
+ * overwritten by phy_restart_aneg().
+ */
+ return 1;
+ }
+
+ return ret;
}
EXPORT_SYMBOL(genphy_c45_ethtool_set_eee);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3376e58e2b88..c4236564c1cd 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -983,9 +983,17 @@ static int phy_check_link_status(struct phy_device *phydev)
if (phydev->link && phydev->state != PHY_RUNNING) {
phy_check_downshift(phydev);
phydev->state = PHY_RUNNING;
+ err = genphy_c45_eee_is_active(phydev,
+ NULL, NULL, NULL);
+ if (err <= 0)
+ phydev->enable_tx_lpi = false;
+ else
+ phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled;
+
phy_link_up(phydev);
} else if (!phydev->link && phydev->state != PHY_NOLINK) {
phydev->state = PHY_NOLINK;
+ phydev->enable_tx_lpi = false;
phy_link_down(phydev);
}
@@ -1290,7 +1298,6 @@ int phy_disable_interrupts(struct phy_device *phydev)
static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
struct phy_device *phydev = phy_dat;
- struct phy_driver *drv = phydev->drv;
irqreturn_t ret;
/* Wakeup interrupts may occur during a system sleep transition.
@@ -1316,7 +1323,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
}
mutex_lock(&phydev->lock);
- ret = drv->handle_interrupt(phydev);
+ ret = phydev->drv->handle_interrupt(phydev);
mutex_unlock(&phydev->lock);
return ret;
@@ -1632,12 +1639,12 @@ EXPORT_SYMBOL(phy_get_eee_err);
/**
* phy_ethtool_get_eee - get EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
- * Description: it reportes the Supported/Advertisement/LP Advertisement
- * capabilities.
+ * Description: reports the Supported/Advertisement/LP Advertisement
+ * capabilities, etc.
*/
-int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
+int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data)
{
int ret;
@@ -1646,6 +1653,7 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
mutex_lock(&phydev->lock);
ret = genphy_c45_ethtool_get_eee(phydev, data);
+ eeecfg_to_eee(data, &phydev->eee_cfg);
mutex_unlock(&phydev->lock);
return ret;
@@ -1653,13 +1661,43 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
EXPORT_SYMBOL(phy_ethtool_get_eee);
/**
+ * phy_ethtool_set_eee_noneg - Adjusts MAC LPI configuration without PHY
+ * renegotiation
+ * @phydev: pointer to the target PHY device structure
+ * @data: pointer to the ethtool_keee structure containing the new EEE settings
+ *
+ * This function updates the Energy Efficient Ethernet (EEE) configuration
+ * for cases where only the MAC's Low Power Idle (LPI) configuration changes,
+ * without triggering PHY renegotiation. It ensures that the MAC is properly
+ * informed of the new LPI settings by cycling the link down and up, which
+ * is necessary for the MAC to adopt the new configuration. This adjustment
+ * is done only if there is a change in the tx_lpi_enabled or tx_lpi_timer
+ * configuration.
+ */
+static void phy_ethtool_set_eee_noneg(struct phy_device *phydev,
+ struct ethtool_keee *data)
+{
+ if (phydev->eee_cfg.tx_lpi_enabled != data->tx_lpi_enabled ||
+ phydev->eee_cfg.tx_lpi_timer != data->tx_lpi_timer) {
+ eee_to_eeecfg(&phydev->eee_cfg, data);
+ phydev->enable_tx_lpi = eeecfg_mac_can_tx_lpi(&phydev->eee_cfg);
+ if (phydev->link) {
+ phydev->link = false;
+ phy_link_down(phydev);
+ phydev->link = true;
+ phy_link_up(phydev);
+ }
+ }
+}
+
+/**
* phy_ethtool_set_eee - set EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
* Description: it is to program the Advertisement EEE register.
*/
-int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
+int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data)
{
int ret;
@@ -1668,9 +1706,14 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
mutex_lock(&phydev->lock);
ret = genphy_c45_ethtool_set_eee(phydev, data);
+ if (ret >= 0) {
+ if (ret == 0)
+ phy_ethtool_set_eee_noneg(phydev, data);
+ eee_to_eeecfg(&phydev->eee_cfg, data);
+ }
mutex_unlock(&phydev->lock);
- return ret;
+ return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL(phy_ethtool_set_eee);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 3611ea64875e..8297ef681bf5 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -148,6 +148,14 @@ static const int phy_eee_cap1_features_array[] = {
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_eee_cap1_features);
+static const int phy_eee_cap2_features_array[] = {
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+};
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_eee_cap2_features);
+
static void features_init(void)
{
/* 10/100 half/full*/
@@ -232,6 +240,9 @@ static void features_init(void)
linkmode_set_bit_array(phy_eee_cap1_features_array,
ARRAY_SIZE(phy_eee_cap1_features_array),
phy_eee_cap1_features);
+ linkmode_set_bit_array(phy_eee_cap2_features_array,
+ ARRAY_SIZE(phy_eee_cap2_features_array),
+ phy_eee_cap2_features);
}
@@ -780,7 +791,7 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr,
* and identifiers in @c45_ids.
*
* Returns zero on success, %-EIO on bus access error, or %-ENODEV if
- * the "devices in package" is invalid.
+ * the "devices in package" is invalid or no device responds.
*/
static int get_phy_c45_ids(struct mii_bus *bus, int addr,
struct phy_c45_device_ids *c45_ids)
@@ -803,7 +814,11 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr,
*/
ret = phy_c45_probe_present(bus, addr, i);
if (ret < 0)
- return -EIO;
+ /* returning -ENODEV doesn't stop bus
+ * scanning
+ */
+ return (phy_reg == -EIO ||
+ phy_reg == -ENODEV) ? -ENODEV : -EIO;
if (!ret)
continue;
@@ -1413,6 +1428,11 @@ int phy_sfp_probe(struct phy_device *phydev,
}
EXPORT_SYMBOL(phy_sfp_probe);
+static bool phy_drv_supports_irq(const struct phy_driver *phydrv)
+{
+ return phydrv->config_intr && phydrv->handle_interrupt;
+}
+
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
@@ -1527,6 +1547,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (phydev->dev_flags & PHY_F_NO_IRQ)
phydev->irq = PHY_POLL;
+ if (!phy_drv_supports_irq(phydev->drv) && phy_interrupt_is_valid(phydev))
+ phydev->irq = PHY_POLL;
+
/* Port is set to PORT_TP by default and the actual PHY driver will set
* it to different value depending on the PHY configuration. If we have
* the generic PHY driver we can't figure it out, thus set the old
@@ -1592,7 +1615,6 @@ EXPORT_SYMBOL(phy_attach_direct);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface)
{
- struct bus_type *bus = &mdio_bus_type;
struct phy_device *phydev;
struct device *d;
int rc;
@@ -1603,7 +1625,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
/* Search the list of PHY devices on the mdio bus for the
* PHY with the requested name
*/
- d = bus_find_device_by_name(bus, NULL, bus_id);
+ d = bus_find_device_by_name(&mdio_bus_type, NULL, bus_id);
if (!d) {
pr_err("PHY %s not found\n", bus_id);
return ERR_PTR(-ENODEV);
@@ -1700,6 +1722,7 @@ int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
shared->priv_size = priv_size;
}
shared->base_addr = base_addr;
+ shared->np = NULL;
refcount_set(&shared->refcnt, 1);
bus->shared[base_addr] = shared;
} else {
@@ -1723,6 +1746,63 @@ err_unlock:
EXPORT_SYMBOL_GPL(phy_package_join);
/**
+ * of_phy_package_join - join a common PHY group in PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * This is a variant of phy_package_join for PHY package defined in DT.
+ *
+ * The parent node of the @phydev is checked as a valid PHY package node
+ * structure (by matching the node name "ethernet-phy-package") and the
+ * base_addr for the PHY package is passed to phy_package_join.
+ *
+ * With this configuration the shared struct will also have the np value
+ * filled to use additional DT defined properties in PHY specific
+ * probe_once and config_init_once PHY package OPs.
+ *
+ * Returns < 0 on error, 0 on success. Esp. calling phy_package_join()
+ * with the same cookie but a different priv_size is an error. Or a parent
+ * node is not detected or is not valid or doesn't match the expected node
+ * name for PHY package.
+ */
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct device_node *package_node;
+ u32 base_addr;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ package_node = of_get_parent(node);
+ if (!package_node)
+ return -EINVAL;
+
+ if (!of_node_name_eq(package_node, "ethernet-phy-package")) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (of_property_read_u32(package_node, "reg", &base_addr)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = phy_package_join(phydev, base_addr, priv_size);
+ if (ret)
+ goto exit;
+
+ phydev->shared->np = package_node;
+
+ return 0;
+exit:
+ of_node_put(package_node);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_phy_package_join);
+
+/**
* phy_package_leave - leave a common PHY group
* @phydev: target phy_device struct
*
@@ -1738,6 +1818,10 @@ void phy_package_leave(struct phy_device *phydev)
if (!shared)
return;
+ /* Decrease the node refcount on leave if present */
+ if (shared->np)
+ of_node_put(shared->np);
+
if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
bus->shared[shared->base_addr] = NULL;
mutex_unlock(&bus->shared_lock);
@@ -1791,6 +1875,40 @@ int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
EXPORT_SYMBOL_GPL(devm_phy_package_join);
/**
+ * devm_of_phy_package_join - resource managed of_phy_package_join()
+ * @dev: device that is registering this PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * Managed of_phy_package_join(). Shared storage fetched by this function,
+ * phy_package_leave() is automatically called on driver detach. See
+ * of_phy_package_join() for more information.
+ */
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size)
+{
+ struct phy_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = of_phy_package_join(phydev, priv_size);
+
+ if (!ret) {
+ *ptr = phydev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_of_phy_package_join);
+
+/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
*
@@ -1859,7 +1977,7 @@ int phy_suspend(struct phy_device *phydev)
{
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
struct net_device *netdev = phydev->attached_dev;
- struct phy_driver *phydrv = phydev->drv;
+ const struct phy_driver *phydrv = phydev->drv;
int ret;
if (phydev->suspended)
@@ -1884,7 +2002,7 @@ EXPORT_SYMBOL(phy_suspend);
int __phy_resume(struct phy_device *phydev)
{
- struct phy_driver *phydrv = phydev->drv;
+ const struct phy_driver *phydrv = phydev->drv;
int ret;
lockdep_assert_held(&phydev->lock);
@@ -2513,12 +2631,15 @@ EXPORT_SYMBOL(genphy_read_status);
/**
* genphy_c37_read_status - check the link status and update current link state
* @phydev: target phy_device struct
+ * @changed: pointer where to store if link changed
*
* Description: Check the link, then figure out the current state
* by comparing what we advertise with what the link partner
* advertises. This function is for Clause 37 1000Base-X mode.
+ *
+ * If link has changed, @changed is set to true, false otherwise.
*/
-int genphy_c37_read_status(struct phy_device *phydev)
+int genphy_c37_read_status(struct phy_device *phydev, bool *changed)
{
int lpa, err, old_link = phydev->link;
@@ -2528,9 +2649,13 @@ int genphy_c37_read_status(struct phy_device *phydev)
return err;
/* why bother the PHY if nothing can have changed */
- if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link) {
+ *changed = false;
return 0;
+ }
+ /* Signal link has changed */
+ *changed = true;
phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
@@ -2770,6 +2895,50 @@ void phy_advertise_supported(struct phy_device *phydev)
EXPORT_SYMBOL(phy_advertise_supported);
/**
+ * phy_advertise_eee_all - Advertise all supported EEE modes
+ * @phydev: target phy_device struct
+ *
+ * Description: Per default phylib preserves the EEE advertising at the time of
+ * phy probing, which might be a subset of the supported EEE modes. Use this
+ * function when all supported EEE modes should be advertised. This does not
+ * trigger auto-negotiation, so must be called before phy_start()/
+ * phylink_start() which will start auto-negotiation.
+ */
+void phy_advertise_eee_all(struct phy_device *phydev)
+{
+ linkmode_copy(phydev->advertising_eee, phydev->supported_eee);
+}
+EXPORT_SYMBOL_GPL(phy_advertise_eee_all);
+
+/**
+ * phy_support_eee - Set initial EEE policy configuration
+ * @phydev: Target phy_device struct
+ *
+ * This function configures the initial policy for Energy Efficient Ethernet
+ * (EEE) on the specified PHY device, influencing that EEE capabilities are
+ * advertised before the link is established. It should be called during PHY
+ * registration by the MAC driver and/or the PHY driver (for SmartEEE PHYs)
+ * if MAC supports LPI or PHY is capable to compensate missing LPI functionality
+ * of the MAC.
+ *
+ * The function sets default EEE policy parameters, including preparing the PHY
+ * to advertise EEE capabilities based on hardware support.
+ *
+ * It also sets the expected configuration for Low Power Idle (LPI) in the MAC
+ * driver. If the PHY framework determines that both local and remote
+ * advertisements support EEE, and the negotiated link mode is compatible with
+ * EEE, it will set enable_tx_lpi = true. The MAC driver is expected to act on
+ * this setting by enabling the LPI timer if enable_tx_lpi is set.
+ */
+void phy_support_eee(struct phy_device *phydev)
+{
+ linkmode_copy(phydev->advertising_eee, phydev->supported_eee);
+ phydev->eee_cfg.tx_lpi_enabled = true;
+ phydev->eee_cfg.eee_enabled = true;
+}
+EXPORT_SYMBOL(phy_support_eee);
+
+/**
* phy_support_sym_pause - Enable support of symmetrical pause
* @phydev: target phy_device struct
*
@@ -2959,7 +3128,7 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
if (delay < 0)
return delay;
- if (delay && size == 0)
+ if (size == 0)
return delay;
if (delay < delay_values[0] || delay > delay_values[size - 1]) {
@@ -2992,11 +3161,6 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
EXPORT_SYMBOL(phy_get_internal_delay);
-static bool phy_drv_supports_irq(struct phy_driver *phydrv)
-{
- return phydrv->config_intr && phydrv->handle_interrupt;
-}
-
static int phy_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -3097,6 +3261,7 @@ static int of_phy_led(struct phy_device *phydev,
struct device *dev = &phydev->mdio.dev;
struct led_init_data init_data = {};
struct led_classdev *cdev;
+ unsigned long modes = 0;
struct phy_led *phyled;
u32 index;
int err;
@@ -3114,6 +3279,21 @@ static int of_phy_led(struct phy_device *phydev,
if (index > U8_MAX)
return -EINVAL;
+ if (of_property_read_bool(led, "active-low"))
+ set_bit(PHY_LED_ACTIVE_LOW, &modes);
+ if (of_property_read_bool(led, "inactive-high-impedance"))
+ set_bit(PHY_LED_INACTIVE_HIGH_IMPEDANCE, &modes);
+
+ if (modes) {
+ /* Return error if asked to set polarity modes but not supported */
+ if (!phydev->drv->led_polarity_set)
+ return -EINVAL;
+
+ err = phydev->drv->led_polarity_set(phydev, index, modes);
+ if (err)
+ return err;
+ }
+
phyled->index = index;
if (phydev->drv->led_brightness_set)
cdev->brightness_set_blocking = phy_led_set_brightness;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index ed0b4ccaa6a6..503fd7c40523 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -2764,9 +2764,9 @@ EXPORT_SYMBOL_GPL(phylink_init_eee);
/**
* phylink_ethtool_get_eee() - read the energy efficient ethernet parameters
* @pl: a pointer to a &struct phylink returned from phylink_create()
- * @eee: a pointer to a &struct ethtool_eee for the read parameters
+ * @eee: a pointer to a &struct ethtool_keee for the read parameters
*/
-int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee)
+int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_keee *eee)
{
int ret = -EOPNOTSUPP;
@@ -2782,9 +2782,9 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee);
/**
* phylink_ethtool_set_eee() - set the energy efficient ethernet parameters
* @pl: a pointer to a &struct phylink returned from phylink_create()
- * @eee: a pointer to a &struct ethtool_eee for the desired parameters
+ * @eee: a pointer to a &struct ethtool_keee for the desired parameters
*/
-int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee)
+int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_keee *eee)
{
int ret = -EOPNOTSUPP;
diff --git a/drivers/net/phy/qcom/Kconfig b/drivers/net/phy/qcom/Kconfig
new file mode 100644
index 000000000000..570626cc8e14
--- /dev/null
+++ b/drivers/net/phy/qcom/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config QCOM_NET_PHYLIB
+ tristate
+
+config AT803X_PHY
+ tristate "Qualcomm Atheros AR803X PHYs"
+ select QCOM_NET_PHYLIB
+ depends on REGULATOR
+ help
+ Currently supports the AR8030, AR8031, AR8033, AR8035 model
+
+config QCA83XX_PHY
+ tristate "Qualcomm Atheros QCA833x PHYs"
+ select QCOM_NET_PHYLIB
+ help
+ Currently supports the internal QCA8337(Internal qca8k PHY) model
+
+config QCA808X_PHY
+ tristate "Qualcomm QCA808x PHYs"
+ select QCOM_NET_PHYLIB
+ help
+ Currently supports the QCA8081 model
+
+config QCA807X_PHY
+ tristate "Qualcomm QCA807x PHYs"
+ select QCOM_NET_PHYLIB
+ depends on OF_MDIO
+ help
+ Currently supports the Qualcomm QCA8072, QCA8075 and the PSGMII
+ control PHY.
diff --git a/drivers/net/phy/qcom/Makefile b/drivers/net/phy/qcom/Makefile
new file mode 100644
index 000000000000..f24fb550babd
--- /dev/null
+++ b/drivers/net/phy/qcom/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_NET_PHYLIB) += qcom-phy-lib.o
+obj-$(CONFIG_AT803X_PHY) += at803x.o
+obj-$(CONFIG_QCA83XX_PHY) += qca83xx.o
+obj-$(CONFIG_QCA808X_PHY) += qca808x.o
+obj-$(CONFIG_QCA807X_PHY) += qca807x.o
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
new file mode 100644
index 000000000000..4717c59d51d0
--- /dev/null
+++ b/drivers/net/phy/qcom/at803x.c
@@ -0,0 +1,1106 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * drivers/net/phy/at803x.c
+ *
+ * Driver for Qualcomm Atheros AR803x PHY
+ *
+ * Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
+ */
+
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
+#include <linux/bitfield.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/phylink.h>
+#include <linux/sfp.h>
+#include <dt-bindings/net/qca-ar803x.h>
+
+#include "qcom.h"
+
+#define AT803X_LED_CONTROL 0x18
+
+#define AT803X_PHY_MMD3_WOL_CTRL 0x8012
+#define AT803X_WOL_EN BIT(5)
+
+#define AT803X_REG_CHIP_CONFIG 0x1f
+#define AT803X_BT_BX_REG_SEL 0x8000
+
+#define AT803X_MODE_CFG_MASK 0x0F
+#define AT803X_MODE_CFG_BASET_RGMII 0x00
+#define AT803X_MODE_CFG_BASET_SGMII 0x01
+#define AT803X_MODE_CFG_BX1000_RGMII_50OHM 0x02
+#define AT803X_MODE_CFG_BX1000_RGMII_75OHM 0x03
+#define AT803X_MODE_CFG_BX1000_CONV_50OHM 0x04
+#define AT803X_MODE_CFG_BX1000_CONV_75OHM 0x05
+#define AT803X_MODE_CFG_FX100_RGMII_50OHM 0x06
+#define AT803X_MODE_CFG_FX100_CONV_50OHM 0x07
+#define AT803X_MODE_CFG_RGMII_AUTO_MDET 0x0B
+#define AT803X_MODE_CFG_FX100_RGMII_75OHM 0x0E
+#define AT803X_MODE_CFG_FX100_CONV_75OHM 0x0F
+
+#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
+#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
+
+#define AT803X_DEBUG_REG_1F 0x1F
+#define AT803X_DEBUG_PLL_ON BIT(2)
+#define AT803X_DEBUG_RGMII_1V8 BIT(3)
+
+/* AT803x supports either the XTAL input pad, an internal PLL or the
+ * DSP as clock reference for the clock output pad. The XTAL reference
+ * is only used for 25 MHz output, all other frequencies need the PLL.
+ * The DSP as a clock reference is used in synchronous ethernet
+ * applications.
+ *
+ * By default the PLL is only enabled if there is a link. Otherwise
+ * the PHY will go into low power state and disabled the PLL. You can
+ * set the PLL_ON bit (see debug register 0x1f) to keep the PLL always
+ * enabled.
+ */
+#define AT803X_MMD7_CLK25M 0x8016
+#define AT803X_CLK_OUT_MASK GENMASK(4, 2)
+#define AT803X_CLK_OUT_25MHZ_XTAL 0
+#define AT803X_CLK_OUT_25MHZ_DSP 1
+#define AT803X_CLK_OUT_50MHZ_PLL 2
+#define AT803X_CLK_OUT_50MHZ_DSP 3
+#define AT803X_CLK_OUT_62_5MHZ_PLL 4
+#define AT803X_CLK_OUT_62_5MHZ_DSP 5
+#define AT803X_CLK_OUT_125MHZ_PLL 6
+#define AT803X_CLK_OUT_125MHZ_DSP 7
+
+/* The AR8035 has another mask which is compatible with the AR8031/AR8033 mask
+ * but doesn't support choosing between XTAL/PLL and DSP.
+ */
+#define AT8035_CLK_OUT_MASK GENMASK(4, 3)
+
+#define AT803X_CLK_OUT_STRENGTH_MASK GENMASK(8, 7)
+#define AT803X_CLK_OUT_STRENGTH_FULL 0
+#define AT803X_CLK_OUT_STRENGTH_HALF 1
+#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
+
+#define AT803X_MMD3_SMARTEEE_CTL1 0x805b
+#define AT803X_MMD3_SMARTEEE_CTL2 0x805c
+#define AT803X_MMD3_SMARTEEE_CTL3 0x805d
+#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN BIT(8)
+
+#define ATH9331_PHY_ID 0x004dd041
+#define ATH8030_PHY_ID 0x004dd076
+#define ATH8031_PHY_ID 0x004dd074
+#define ATH8032_PHY_ID 0x004dd023
+#define ATH8035_PHY_ID 0x004dd072
+#define AT8030_PHY_ID_MASK 0xffffffef
+
+#define QCA9561_PHY_ID 0x004dd042
+
+#define AT803X_PAGE_FIBER 0
+#define AT803X_PAGE_COPPER 1
+
+/* don't turn off internal PLL */
+#define AT803X_KEEP_PLL_ENABLED BIT(0)
+#define AT803X_DISABLE_SMARTEEE BIT(1)
+
+/* disable hibernation mode */
+#define AT803X_DISABLE_HIBERNATION_MODE BIT(2)
+
+MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_LICENSE("GPL");
+
+struct at803x_priv {
+ int flags;
+ u16 clk_25m_reg;
+ u16 clk_25m_mask;
+ u8 smarteee_lpi_tw_1g;
+ u8 smarteee_lpi_tw_100m;
+ bool is_fiber;
+ bool is_1000basex;
+ struct regulator_dev *vddio_rdev;
+ struct regulator_dev *vddh_rdev;
+};
+
+struct at803x_context {
+ u16 bmcr;
+ u16 advertise;
+ u16 control1000;
+ u16 int_enable;
+ u16 smart_speed;
+ u16 led_control;
+};
+
+static int at803x_write_page(struct phy_device *phydev, int page)
+{
+ int mask;
+ int set;
+
+ if (page == AT803X_PAGE_COPPER) {
+ set = AT803X_BT_BX_REG_SEL;
+ mask = 0;
+ } else {
+ set = 0;
+ mask = AT803X_BT_BX_REG_SEL;
+ }
+
+ return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set);
+}
+
+static int at803x_read_page(struct phy_device *phydev)
+{
+ int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+
+ if (ccr < 0)
+ return ccr;
+
+ if (ccr & AT803X_BT_BX_REG_SEL)
+ return AT803X_PAGE_COPPER;
+
+ return AT803X_PAGE_FIBER;
+}
+
+static int at803x_enable_rx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0,
+ AT803X_DEBUG_RX_CLK_DLY_EN);
+}
+
+static int at803x_enable_tx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0,
+ AT803X_DEBUG_TX_CLK_DLY_EN);
+}
+
+static int at803x_disable_rx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ AT803X_DEBUG_RX_CLK_DLY_EN, 0);
+}
+
+static int at803x_disable_tx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE,
+ AT803X_DEBUG_TX_CLK_DLY_EN, 0);
+}
+
+/* save relevant PHY registers to private copy */
+static void at803x_context_save(struct phy_device *phydev,
+ struct at803x_context *context)
+{
+ context->bmcr = phy_read(phydev, MII_BMCR);
+ context->advertise = phy_read(phydev, MII_ADVERTISE);
+ context->control1000 = phy_read(phydev, MII_CTRL1000);
+ context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
+ context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
+ context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
+}
+
+/* restore relevant PHY registers from private copy */
+static void at803x_context_restore(struct phy_device *phydev,
+ const struct at803x_context *context)
+{
+ phy_write(phydev, MII_BMCR, context->bmcr);
+ phy_write(phydev, MII_ADVERTISE, context->advertise);
+ phy_write(phydev, MII_CTRL1000, context->control1000);
+ phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
+ phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
+ phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
+}
+
+static int at803x_suspend(struct phy_device *phydev)
+{
+ int value;
+ int wol_enabled;
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+ wol_enabled = value & AT803X_INTR_ENABLE_WOL;
+
+ if (wol_enabled)
+ value = BMCR_ISOLATE;
+ else
+ value = BMCR_PDOWN;
+
+ phy_modify(phydev, MII_BMCR, 0, value);
+
+ return 0;
+}
+
+static int at803x_resume(struct phy_device *phydev)
+{
+ return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
+}
+
+static int at803x_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct at803x_priv *priv = phydev->priv;
+ u32 freq, strength, tw;
+ unsigned int sel;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ if (of_property_read_bool(node, "qca,disable-smarteee"))
+ priv->flags |= AT803X_DISABLE_SMARTEEE;
+
+ if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
+ priv->flags |= AT803X_DISABLE_HIBERNATION_MODE;
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_1g = tw;
+ }
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-100m\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_100m = tw;
+ }
+
+ ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
+ if (!ret) {
+ switch (freq) {
+ case 25000000:
+ sel = AT803X_CLK_OUT_25MHZ_XTAL;
+ break;
+ case 50000000:
+ sel = AT803X_CLK_OUT_50MHZ_PLL;
+ break;
+ case 62500000:
+ sel = AT803X_CLK_OUT_62_5MHZ_PLL;
+ break;
+ case 125000000:
+ sel = AT803X_CLK_OUT_125MHZ_PLL;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-frequency\n");
+ return -EINVAL;
+ }
+
+ priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
+ priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
+ }
+
+ ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
+ if (!ret) {
+ priv->clk_25m_mask |= AT803X_CLK_OUT_STRENGTH_MASK;
+ switch (strength) {
+ case AR803X_STRENGTH_FULL:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_FULL;
+ break;
+ case AR803X_STRENGTH_HALF:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_HALF;
+ break;
+ case AR803X_STRENGTH_QUARTER:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_QUARTER;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-strength\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int at803x_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct at803x_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ ret = at803x_parse_dt(phydev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int at803x_get_features(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int err;
+
+ err = genphy_read_abilities(phydev);
+ if (err)
+ return err;
+
+ if (phydev->drv->phy_id != ATH8031_PHY_ID)
+ return 0;
+
+ /* AR8031/AR8033 have different status registers
+ * for copper and fiber operation. However, the
+ * extended status register is the same for both
+ * operation modes.
+ *
+ * As a result of that, ESTATUS_1000_XFULL is set
+ * to 1 even when operating in copper TP mode.
+ *
+ * Remove this mode from the supported link modes
+ * when not operating in 1000BaseX mode.
+ */
+ if (!priv->is_1000basex)
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->supported);
+
+ return 0;
+}
+
+static int at803x_smarteee_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ u16 mask = 0, val = 0;
+ int ret;
+
+ if (priv->flags & AT803X_DISABLE_SMARTEEE)
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN, 0);
+
+ if (priv->smarteee_lpi_tw_1g) {
+ mask |= 0xff00;
+ val |= priv->smarteee_lpi_tw_1g << 8;
+ }
+ if (priv->smarteee_lpi_tw_100m) {
+ mask |= 0x00ff;
+ val |= priv->smarteee_lpi_tw_100m;
+ }
+ if (!mask)
+ return 0;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL1,
+ mask, val);
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN);
+}
+
+static int at803x_clk_out_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ if (!priv->clk_25m_mask)
+ return 0;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M,
+ priv->clk_25m_mask, priv->clk_25m_reg);
+}
+
+static int at8031_pll_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is PLL OFF. After a soft reset, the
+ * values are retained.
+ */
+ if (priv->flags & AT803X_KEEP_PLL_ENABLED)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_PLL_ON);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_PLL_ON, 0);
+}
+
+static int at803x_hibernation_mode_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is hibernation mode enabled. After
+ * software reset, the value is retained.
+ */
+ if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
+ return 0;
+
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_PS_HIB_EN, 0);
+}
+
+static int at803x_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* The RX and TX delay default is:
+ * after HW reset: RX delay enabled and TX delay disabled
+ * after SW reset: RX delay enabled, while TX delay retains the
+ * value before reset.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ ret = at803x_enable_rx_delay(phydev);
+ else
+ ret = at803x_disable_rx_delay(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ ret = at803x_enable_tx_delay(phydev);
+ else
+ ret = at803x_disable_tx_delay(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_smarteee_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_clk_out_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_hibernation_mode_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Ar803x extended next page bit is enabled by default. Cisco
+ * multigig switches read this bit and attempt to negotiate 10Gbps
+ * rates even if the next page bit is disabled. This is incorrect
+ * behaviour but we still need to accommodate it. XNP is only needed
+ * for 10Gbps support, so disable XNP.
+ */
+ return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
+}
+
+static void at803x_link_change_notify(struct phy_device *phydev)
+{
+ /*
+ * Conduct a hardware reset for AT8030 every time a link loss is
+ * signalled. This is necessary to circumvent a hardware bug that
+ * occurs when the cable is unplugged while TX packets are pending
+ * in the FIFO. In such cases, the FIFO enters an error mode it
+ * cannot recover from by software.
+ */
+ if (phydev->state == PHY_NOLINK && phydev->mdio.reset_gpio) {
+ struct at803x_context context;
+
+ at803x_context_save(phydev, &context);
+
+ phy_device_reset(phydev, 1);
+ usleep_range(1000, 2000);
+ phy_device_reset(phydev, 0);
+ usleep_range(1000, 2000);
+
+ at803x_context_restore(phydev, &context);
+
+ phydev_dbg(phydev, "%s(): phy was reset\n", __func__);
+ }
+}
+
+static int at803x_config_aneg(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ ret = at803x_prepare_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ if (priv->is_1000basex)
+ return genphy_c37_config_aneg(phydev);
+
+ return genphy_config_aneg(phydev);
+}
+
+static int at803x_cable_test_result_trans(u16 status)
+{
+ switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+ case AT803X_CDT_STATUS_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case AT803X_CDT_STATUS_STAT_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case AT803X_CDT_STATUS_STAT_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case AT803X_CDT_STATUS_STAT_FAIL:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool at803x_cdt_test_failed(u16 status)
+{
+ return FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status) ==
+ AT803X_CDT_STATUS_STAT_FAIL;
+}
+
+static bool at803x_cdt_fault_length_valid(u16 status)
+{
+ switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+ case AT803X_CDT_STATUS_STAT_OPEN:
+ case AT803X_CDT_STATUS_STAT_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ static const int ethtool_pair[] = {
+ ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+ };
+ int ret, val;
+
+ val = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
+ AT803X_CDT_ENABLE_TEST;
+ ret = at803x_cdt_start(phydev, val);
+ if (ret)
+ return ret;
+
+ ret = at803x_cdt_wait_for_completion(phydev, AT803X_CDT_ENABLE_TEST);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, AT803X_CDT_STATUS);
+ if (val < 0)
+ return val;
+
+ if (at803x_cdt_test_failed(val))
+ return 0;
+
+ ethnl_cable_test_result(phydev, ethtool_pair[pair],
+ at803x_cable_test_result_trans(val));
+
+ if (at803x_cdt_fault_length_valid(val)) {
+ val = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, val);
+ ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
+ at803x_cdt_fault_length(val));
+ }
+
+ return 1;
+}
+
+static int at803x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished, unsigned long pair_mask)
+{
+ int retries = 20;
+ int pair, ret;
+
+ *finished = false;
+
+ /* According to the datasheet the CDT can be performed when
+ * there is no link partner or when the link partner is
+ * auto-negotiating. Starting the test will restart the AN
+ * automatically. It seems that doing this repeatedly we will
+ * get a slot where our link partner won't disturb our
+ * measurement.
+ */
+ while (pair_mask && retries--) {
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = at803x_cable_test_one_pair(phydev, pair);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ clear_bit(pair, &pair_mask);
+ }
+ if (pair_mask)
+ msleep(250);
+ }
+
+ *finished = true;
+
+ return 0;
+}
+
+static void at803x_cable_test_autoneg(struct phy_device *phydev)
+{
+ /* Enable auto-negotiation, but advertise no capabilities, no link
+ * will be established. A restart of the auto-negotiation is not
+ * required, because the cable test will automatically break the link.
+ */
+ phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
+ phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
+}
+
+static int at803x_cable_test_start(struct phy_device *phydev)
+{
+ at803x_cable_test_autoneg(phydev);
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+
+ if (selector)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_RGMII_1V8);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_RGMII_1V8, 0);
+}
+
+static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+ int val;
+
+ val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
+ if (val < 0)
+ return val;
+
+ return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
+}
+
+static const struct regulator_ops vddio_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
+ .get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
+};
+
+static const unsigned int vddio_voltage_table[] = {
+ 1500000,
+ 1800000,
+};
+
+static const struct regulator_desc vddio_desc = {
+ .name = "vddio",
+ .of_match = of_match_ptr("vddio-regulator"),
+ .n_voltages = ARRAY_SIZE(vddio_voltage_table),
+ .volt_table = vddio_voltage_table,
+ .ops = &vddio_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static const struct regulator_ops vddh_regulator_ops = {
+};
+
+static const struct regulator_desc vddh_desc = {
+ .name = "vddh",
+ .of_match = of_match_ptr("vddh-regulator"),
+ .n_voltages = 1,
+ .fixed_uV = 2500000,
+ .ops = &vddh_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static int at8031_register_regulators(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct regulator_config config = { };
+
+ config.dev = dev;
+ config.driver_data = phydev;
+
+ priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
+ if (IS_ERR(priv->vddio_rdev)) {
+ phydev_err(phydev, "failed to register VDDIO regulator\n");
+ return PTR_ERR(priv->vddio_rdev);
+ }
+
+ priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
+ if (IS_ERR(priv->vddh_rdev)) {
+ phydev_err(phydev, "failed to register VDDH regulator\n");
+ return PTR_ERR(priv->vddh_rdev);
+ }
+
+ return 0;
+}
+
+static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ phy_interface_t iface;
+
+ linkmode_zero(phy_support);
+ phylink_set(phy_support, 1000baseX_Full);
+ phylink_set(phy_support, 1000baseT_Full);
+ phylink_set(phy_support, Autoneg);
+ phylink_set(phy_support, Pause);
+ phylink_set(phy_support, Asym_Pause);
+
+ linkmode_zero(sfp_support);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
+ /* Some modules support 10G modes as well as others we support.
+ * Mask out non-supported modes so the correct interface is picked.
+ */
+ linkmode_and(sfp_support, phy_support, sfp_support);
+
+ if (linkmode_empty(sfp_support)) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
+
+ /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
+ * interface for use with SFP modules.
+ * However, some copper modules detected as having a preferred SGMII
+ * interface do default to and function in 1000Base-X mode, so just
+ * print a warning and allow such modules, as they may have some chance
+ * of working.
+ */
+ if (iface == PHY_INTERFACE_MODE_SGMII)
+ dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
+ else if (iface != PHY_INTERFACE_MODE_1000BASEX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct sfp_upstream_ops at8031_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = at8031_sfp_insert,
+};
+
+static int at8031_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ if (of_property_read_bool(node, "qca,keep-pll-enabled"))
+ priv->flags |= AT803X_KEEP_PLL_ENABLED;
+
+ ret = at8031_register_regulators(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
+ "vddio");
+ if (ret) {
+ phydev_err(phydev, "failed to get VDDIO regulator\n");
+ return ret;
+ }
+
+ /* Only AR8031/8033 support 1000Base-X for SFP modules */
+ return phy_sfp_probe(phydev, &at8031_sfp_ops);
+}
+
+static int at8031_probe(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int mode_cfg;
+ int ccr;
+ int ret;
+
+ ret = at803x_probe(phydev);
+ if (ret)
+ return ret;
+
+ /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
+ * options.
+ */
+ ret = at8031_parse_dt(phydev);
+ if (ret)
+ return ret;
+
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ if (ccr < 0)
+ return ccr;
+ mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+
+ switch (mode_cfg) {
+ case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
+ case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
+ priv->is_1000basex = true;
+ fallthrough;
+ case AT803X_MODE_CFG_FX100_RGMII_50OHM:
+ case AT803X_MODE_CFG_FX100_RGMII_75OHM:
+ priv->is_fiber = true;
+ break;
+ }
+
+ /* Disable WoL in 1588 register which is enabled
+ * by default
+ */
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+}
+
+static int at8031_config_init(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ /* Some bootloaders leave the fiber page selected.
+ * Switch to the appropriate page (fiber or copper), as otherwise we
+ * read the PHY capabilities from the wrong page.
+ */
+ phy_lock_mdio_bus(phydev);
+ ret = at803x_write_page(phydev,
+ priv->is_fiber ? AT803X_PAGE_FIBER :
+ AT803X_PAGE_COPPER);
+ phy_unlock_mdio_bus(phydev);
+ if (ret)
+ return ret;
+
+ ret = at8031_pll_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ return at803x_config_init(phydev);
+}
+
+static int at8031_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ /* First setup MAC address and enable WOL interrupt */
+ ret = at803x_set_wol(phydev, wol);
+ if (ret)
+ return ret;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ /* Enable WOL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ 0, AT803X_WOL_EN);
+ else
+ /* Disable WoL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+
+ return ret;
+}
+
+static int at8031_config_intr(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int err, value = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED &&
+ priv->is_fiber) {
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
+
+ err = phy_set_bits(phydev, AT803X_INTR_ENABLE, value);
+ if (err)
+ return err;
+ }
+
+ return at803x_config_intr(phydev);
+}
+
+/* AR8031 and AR8033 share the same read status logic */
+static int at8031_read_status(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ bool changed;
+
+ if (priv->is_1000basex)
+ return genphy_c37_read_status(phydev, &changed);
+
+ return at803x_read_status(phydev);
+}
+
+/* AR8031 and AR8035 share the same cable test get status reg */
+static int at8031_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ return at803x_cable_test_get_status(phydev, finished, 0xf);
+}
+
+/* AR8031 and AR8035 share the same cable test start logic */
+static int at8031_cable_test_start(struct phy_device *phydev)
+{
+ at803x_cable_test_autoneg(phydev);
+ phy_write(phydev, MII_CTRL1000, 0);
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+/* AR8032, AR9331 and QCA9561 share the same cable test get status reg */
+static int at8032_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ return at803x_cable_test_get_status(phydev, finished, 0x3);
+}
+
+static int at8035_parse_dt(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* Mask is set by the generic at803x_parse_dt
+ * if property is set. Assume property is set
+ * with the mask not zero.
+ */
+ if (priv->clk_25m_mask) {
+ /* Fixup for the AR8030/AR8035. This chip has another mask and
+ * doesn't support the DSP reference. Eg. the lowest bit of the
+ * mask. The upper two bits select the same frequencies. Mask
+ * the lowest bit here.
+ *
+ * Warning:
+ * There was no datasheet for the AR8030 available so this is
+ * just a guess. But the AR8035 is listed as pin compatible
+ * to the AR8030 so there might be a good chance it works on
+ * the AR8030 too.
+ */
+ priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
+ priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
+ }
+
+ return 0;
+}
+
+/* AR8030 and AR8035 shared the same special mask for clk_25m */
+static int at8035_probe(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = at803x_probe(phydev);
+ if (ret)
+ return ret;
+
+ return at8035_parse_dt(phydev);
+}
+
+static struct phy_driver at803x_driver[] = {
+{
+ /* Qualcomm Atheros AR8035 */
+ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
+ .name = "Qualcomm Atheros AR8035",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at8035_probe,
+ .config_aneg = at803x_config_aneg,
+ .config_init = at803x_config_init,
+ .soft_reset = genphy_soft_reset,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_GBIT_FEATURES */
+ .read_status = at803x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .cable_test_start = at8031_cable_test_start,
+ .cable_test_get_status = at8031_cable_test_get_status,
+}, {
+ /* Qualcomm Atheros AR8030 */
+ .phy_id = ATH8030_PHY_ID,
+ .name = "Qualcomm Atheros AR8030",
+ .phy_id_mask = AT8030_PHY_ID_MASK,
+ .probe = at8035_probe,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+}, {
+ /* Qualcomm Atheros AR8031/AR8033 */
+ PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
+ .name = "Qualcomm Atheros AR8031/AR8033",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at8031_probe,
+ .config_init = at8031_config_init,
+ .config_aneg = at803x_config_aneg,
+ .soft_reset = genphy_soft_reset,
+ .set_wol = at8031_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .read_page = at803x_read_page,
+ .write_page = at803x_write_page,
+ .get_features = at803x_get_features,
+ .read_status = at8031_read_status,
+ .config_intr = at8031_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .cable_test_start = at8031_cable_test_start,
+ .cable_test_get_status = at8031_cable_test_get_status,
+}, {
+ /* Qualcomm Atheros AR8032 */
+ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
+ .name = "Qualcomm Atheros AR8032",
+ .probe = at803x_probe,
+ .flags = PHY_POLL_CABLE_TEST,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at8032_cable_test_get_status,
+}, {
+ /* ATHEROS AR9331 */
+ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
+ .name = "Qualcomm Atheros AR9331 built-in PHY",
+ .probe = at803x_probe,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at8032_cable_test_get_status,
+ .read_status = at803x_read_status,
+ .soft_reset = genphy_soft_reset,
+ .config_aneg = at803x_config_aneg,
+}, {
+ /* Qualcomm Atheros QCA9561 */
+ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
+ .name = "Qualcomm Atheros QCA9561 built-in PHY",
+ .probe = at803x_probe,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at8032_cable_test_get_status,
+ .read_status = at803x_read_status,
+ .soft_reset = genphy_soft_reset,
+ .config_aneg = at803x_config_aneg,
+}, };
+
+module_phy_driver(at803x_driver);
+
+static struct mdio_device_id __maybe_unused atheros_tbl[] = {
+ { ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
+ { PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, atheros_tbl);
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
new file mode 100644
index 000000000000..672c6929119a
--- /dev/null
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 Sartura Ltd.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ * Christian Marangi <ansuelsmth@gmail.com>
+ *
+ * Qualcomm QCA8072 and QCA8075 PHY driver
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/bitfield.h>
+#include <linux/gpio/driver.h>
+#include <linux/sfp.h>
+
+#include "qcom.h"
+
+#define QCA807X_CHIP_CONFIGURATION 0x1f
+#define QCA807X_BT_BX_REG_SEL BIT(15)
+#define QCA807X_BT_BX_REG_SEL_FIBER 0
+#define QCA807X_BT_BX_REG_SEL_COPPER 1
+#define QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK GENMASK(3, 0)
+#define QCA807X_CHIP_CONFIGURATION_MODE_QSGMII_SGMII 4
+#define QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_FIBER 3
+#define QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_ALL_COPPER 0
+
+#define QCA807X_MEDIA_SELECT_STATUS 0x1a
+#define QCA807X_MEDIA_DETECTED_COPPER BIT(5)
+#define QCA807X_MEDIA_DETECTED_1000_BASE_X BIT(4)
+#define QCA807X_MEDIA_DETECTED_100_BASE_FX BIT(3)
+
+#define QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION 0x807e
+#define QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION_EN BIT(0)
+
+#define QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH 0x801a
+#define QCA807X_CONTROL_DAC_MASK GENMASK(2, 0)
+/* List of tweaks enabled by this bit:
+ * - With both FULL amplitude and FULL bias current: bias current
+ * is set to half.
+ * - With only DSP amplitude: bias current is set to half and
+ * is set to 1/4 with cable < 10m.
+ * - With DSP bias current (included both DSP amplitude and
+ * DSP bias current): bias current is half the detected current
+ * with cable < 10m.
+ */
+#define QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK BIT(2)
+#define QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT BIT(1)
+#define QCA807X_CONTROL_DAC_DSP_AMPLITUDE BIT(0)
+
+#define QCA807X_MMD7_LED_100N_1 0x8074
+#define QCA807X_MMD7_LED_100N_2 0x8075
+#define QCA807X_MMD7_LED_1000N_1 0x8076
+#define QCA807X_MMD7_LED_1000N_2 0x8077
+
+#define QCA807X_MMD7_LED_CTRL(x) (0x8074 + ((x) * 2))
+#define QCA807X_MMD7_LED_FORCE_CTRL(x) (0x8075 + ((x) * 2))
+
+/* LED hw control pattern for fiber port */
+#define QCA807X_LED_FIBER_PATTERN_MASK GENMASK(11, 1)
+#define QCA807X_LED_FIBER_TXACT_BLK_EN BIT(10)
+#define QCA807X_LED_FIBER_RXACT_BLK_EN BIT(9)
+#define QCA807X_LED_FIBER_FDX_ON_EN BIT(6)
+#define QCA807X_LED_FIBER_HDX_ON_EN BIT(5)
+#define QCA807X_LED_FIBER_1000BX_ON_EN BIT(2)
+#define QCA807X_LED_FIBER_100FX_ON_EN BIT(1)
+
+/* Some device repurpose the LED as GPIO out */
+#define QCA807X_GPIO_FORCE_EN QCA808X_LED_FORCE_EN
+#define QCA807X_GPIO_FORCE_MODE_MASK QCA808X_LED_FORCE_MODE_MASK
+
+#define QCA807X_FUNCTION_CONTROL 0x10
+#define QCA807X_FC_MDI_CROSSOVER_MODE_MASK GENMASK(6, 5)
+#define QCA807X_FC_MDI_CROSSOVER_AUTO 3
+#define QCA807X_FC_MDI_CROSSOVER_MANUAL_MDIX 1
+#define QCA807X_FC_MDI_CROSSOVER_MANUAL_MDI 0
+
+/* PQSGMII Analog PHY specific */
+#define PQSGMII_CTRL_REG 0x0
+#define PQSGMII_ANALOG_SW_RESET BIT(6)
+#define PQSGMII_DRIVE_CONTROL_1 0xb
+#define PQSGMII_TX_DRIVER_MASK GENMASK(7, 4)
+#define PQSGMII_TX_DRIVER_140MV 0x0
+#define PQSGMII_TX_DRIVER_160MV 0x1
+#define PQSGMII_TX_DRIVER_180MV 0x2
+#define PQSGMII_TX_DRIVER_200MV 0x3
+#define PQSGMII_TX_DRIVER_220MV 0x4
+#define PQSGMII_TX_DRIVER_240MV 0x5
+#define PQSGMII_TX_DRIVER_260MV 0x6
+#define PQSGMII_TX_DRIVER_280MV 0x7
+#define PQSGMII_TX_DRIVER_300MV 0x8
+#define PQSGMII_TX_DRIVER_320MV 0x9
+#define PQSGMII_TX_DRIVER_400MV 0xa
+#define PQSGMII_TX_DRIVER_500MV 0xb
+#define PQSGMII_TX_DRIVER_600MV 0xc
+#define PQSGMII_MODE_CTRL 0x6d
+#define PQSGMII_MODE_CTRL_AZ_WORKAROUND_MASK BIT(0)
+#define PQSGMII_MMD3_SERDES_CONTROL 0x805a
+
+#define PHY_ID_QCA8072 0x004dd0b2
+#define PHY_ID_QCA8075 0x004dd0b1
+
+#define QCA807X_COMBO_ADDR_OFFSET 4
+#define QCA807X_PQSGMII_ADDR_OFFSET 5
+#define SERDES_RESET_SLEEP 100
+
+enum qca807x_global_phy {
+ QCA807X_COMBO_ADDR = 4,
+ QCA807X_PQSGMII_ADDR = 5,
+};
+
+struct qca807x_shared_priv {
+ unsigned int package_mode;
+ u32 tx_drive_strength;
+};
+
+struct qca807x_gpio_priv {
+ struct phy_device *phy;
+};
+
+struct qca807x_priv {
+ bool dac_full_amplitude;
+ bool dac_full_bias_current;
+ bool dac_disable_bias_current_tweak;
+};
+
+static int qca807x_cable_test_start(struct phy_device *phydev)
+{
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+static int qca807x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
+ u16 *offload_trigger)
+{
+ /* Parsing specific to netdev trigger */
+ switch (phydev->port) {
+ case PORT_TP:
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA808X_LED_TX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA808X_LED_RX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED10_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED100_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED1000_ON;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
+ break;
+ case PORT_FIBRE:
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_TXACT_BLK_EN;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_RXACT_BLK_EN;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_100FX_ON_EN;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_1000BX_ON_EN;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_HDX_ON_EN;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_FDX_ON_EN;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (rules && !*offload_trigger)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int qca807x_led_hw_control_enable(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 1)
+ return -EINVAL;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_enable(phydev, reg);
+}
+
+static int qca807x_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 offload_trigger = 0;
+
+ if (index > 1)
+ return -EINVAL;
+
+ return qca807x_led_parse_netdev(phydev, rules, &offload_trigger);
+}
+
+static int qca807x_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 reg, mask, offload_trigger = 0;
+ int ret;
+
+ if (index > 1)
+ return -EINVAL;
+
+ ret = qca807x_led_parse_netdev(phydev, rules, &offload_trigger);
+ if (ret)
+ return ret;
+
+ ret = qca807x_led_hw_control_enable(phydev, index);
+ if (ret)
+ return ret;
+
+ switch (phydev->port) {
+ case PORT_TP:
+ reg = QCA807X_MMD7_LED_CTRL(index);
+ mask = QCA808X_LED_PATTERN_MASK;
+ break;
+ case PORT_FIBRE:
+ /* HW control pattern bits are in LED FORCE reg */
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ mask = QCA807X_LED_FIBER_PATTERN_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg, mask,
+ offload_trigger);
+}
+
+static bool qca807x_led_hw_control_status(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 1)
+ return false;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_status(phydev, reg);
+}
+
+static int qca807x_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ u16 reg;
+ int val;
+
+ if (index > 1)
+ return -EINVAL;
+
+ /* Check if we have hw control enabled */
+ if (qca807x_led_hw_control_status(phydev, index))
+ return -EINVAL;
+
+ /* Parsing specific to netdev trigger */
+ switch (phydev->port) {
+ case PORT_TP:
+ reg = QCA807X_MMD7_LED_CTRL(index);
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ if (val & QCA808X_LED_TX_BLINK)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA808X_LED_RX_BLINK)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA808X_LED_SPEED10_ON)
+ set_bit(TRIGGER_NETDEV_LINK_10, rules);
+ if (val & QCA808X_LED_SPEED100_ON)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA808X_LED_SPEED1000_ON)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA808X_LED_HALF_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA808X_LED_FULL_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+ break;
+ case PORT_FIBRE:
+ /* HW control pattern bits are in LED FORCE reg */
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ if (val & QCA807X_LED_FIBER_TXACT_BLK_EN)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA807X_LED_FIBER_RXACT_BLK_EN)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA807X_LED_FIBER_100FX_ON_EN)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA807X_LED_FIBER_1000BX_ON_EN)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA807X_LED_FIBER_HDX_ON_EN)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA807X_LED_FIBER_FDX_ON_EN)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qca807x_led_hw_control_reset(struct phy_device *phydev, u8 index)
+{
+ u16 reg, mask;
+
+ if (index > 1)
+ return -EINVAL;
+
+ switch (phydev->port) {
+ case PORT_TP:
+ reg = QCA807X_MMD7_LED_CTRL(index);
+ mask = QCA808X_LED_PATTERN_MASK;
+ break;
+ case PORT_FIBRE:
+ /* HW control pattern bits are in LED FORCE reg */
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ mask = QCA807X_LED_FIBER_PATTERN_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg, mask);
+}
+
+static int qca807x_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 reg;
+ int ret;
+
+ if (index > 1)
+ return -EINVAL;
+
+ /* If we are setting off the LED reset any hw control rule */
+ if (!value) {
+ ret = qca807x_led_hw_control_reset(phydev, index);
+ if (ret)
+ return ret;
+ }
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_brightness_set(phydev, reg, value);
+}
+
+static int qca807x_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u16 reg;
+
+ if (index > 1)
+ return -EINVAL;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_blink_set(phydev, reg, delay_on, delay_off);
+}
+
+#ifdef CONFIG_GPIOLIB
+static int qca807x_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int qca807x_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
+ u16 reg;
+ int val;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(offset);
+ val = phy_read_mmd(priv->phy, MDIO_MMD_AN, reg);
+
+ return FIELD_GET(QCA807X_GPIO_FORCE_MODE_MASK, val);
+}
+
+static void qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
+ u16 reg;
+ int val;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(offset);
+
+ val = phy_read_mmd(priv->phy, MDIO_MMD_AN, reg);
+ val &= ~QCA807X_GPIO_FORCE_MODE_MASK;
+ val |= QCA807X_GPIO_FORCE_EN;
+ val |= FIELD_PREP(QCA807X_GPIO_FORCE_MODE_MASK, value);
+
+ phy_write_mmd(priv->phy, MDIO_MMD_AN, reg, val);
+}
+
+static int qca807x_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ qca807x_gpio_set(gc, offset, value);
+
+ return 0;
+}
+
+static int qca807x_gpio(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct qca807x_gpio_priv *priv;
+ struct gpio_chip *gc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->phy = phydev;
+
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
+
+ gc->label = dev_name(dev);
+ gc->base = -1;
+ gc->ngpio = 2;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->can_sleep = true;
+ gc->get_direction = qca807x_gpio_get_direction;
+ gc->direction_output = qca807x_gpio_dir_out;
+ gc->get = qca807x_gpio_get;
+ gc->set = qca807x_gpio_set;
+
+ return devm_gpiochip_add_data(dev, gc, priv);
+}
+#endif
+
+static int qca807x_read_fiber_status(struct phy_device *phydev)
+{
+ bool changed;
+ int ss, err;
+
+ err = genphy_c37_read_status(phydev, &changed);
+ if (err || !changed)
+ return err;
+
+ /* Read the QCA807x PHY-Specific Status register fiber page,
+ * which indicates the speed and duplex that the PHY is actually
+ * using, irrespective of whether we are in autoneg mode or not.
+ */
+ ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
+ if (ss < 0)
+ return ss;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+ switch (FIELD_GET(AT803X_SS_SPEED_MASK, ss)) {
+ case AT803X_SS_SPEED_100:
+ phydev->speed = SPEED_100;
+ break;
+ case AT803X_SS_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ }
+
+ if (ss & AT803X_SS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+static int qca807x_read_status(struct phy_device *phydev)
+{
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported)) {
+ switch (phydev->port) {
+ case PORT_FIBRE:
+ return qca807x_read_fiber_status(phydev);
+ case PORT_TP:
+ return at803x_read_status(phydev);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return at803x_read_status(phydev);
+}
+
+static int qca807x_phy_package_probe_once(struct phy_device *phydev)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ struct qca807x_shared_priv *priv = shared->priv;
+ unsigned int tx_drive_strength;
+ const char *package_mode_name;
+
+ /* Default to 600mw if not defined */
+ if (of_property_read_u32(shared->np, "qcom,tx-drive-strength-milliwatt",
+ &tx_drive_strength))
+ tx_drive_strength = 600;
+
+ switch (tx_drive_strength) {
+ case 140:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_140MV;
+ break;
+ case 160:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_160MV;
+ break;
+ case 180:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_180MV;
+ break;
+ case 200:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_200MV;
+ break;
+ case 220:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_220MV;
+ break;
+ case 240:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_240MV;
+ break;
+ case 260:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_260MV;
+ break;
+ case 280:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_280MV;
+ break;
+ case 300:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_300MV;
+ break;
+ case 320:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_320MV;
+ break;
+ case 400:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_400MV;
+ break;
+ case 500:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_500MV;
+ break;
+ case 600:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_600MV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ priv->package_mode = PHY_INTERFACE_MODE_NA;
+ if (!of_property_read_string(shared->np, "qcom,package-mode",
+ &package_mode_name)) {
+ if (!strcasecmp(package_mode_name,
+ phy_modes(PHY_INTERFACE_MODE_PSGMII)))
+ priv->package_mode = PHY_INTERFACE_MODE_PSGMII;
+ else if (!strcasecmp(package_mode_name,
+ phy_modes(PHY_INTERFACE_MODE_QSGMII)))
+ priv->package_mode = PHY_INTERFACE_MODE_QSGMII;
+ else
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qca807x_phy_package_config_init_once(struct phy_device *phydev)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ struct qca807x_shared_priv *priv = shared->priv;
+ int val, ret;
+
+ /* Make sure PHY follow PHY package mode if enforced */
+ if (priv->package_mode != PHY_INTERFACE_MODE_NA &&
+ phydev->interface != priv->package_mode)
+ return -EINVAL;
+
+ phy_lock_mdio_bus(phydev);
+
+ /* Set correct PHY package mode */
+ val = __phy_package_read(phydev, QCA807X_COMBO_ADDR,
+ QCA807X_CHIP_CONFIGURATION);
+ val &= ~QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK;
+ /* package_mode can be QSGMII or PSGMII and we validate
+ * this in probe_once.
+ * With package_mode to NA, we default to PSGMII.
+ */
+ switch (priv->package_mode) {
+ case PHY_INTERFACE_MODE_QSGMII:
+ val |= QCA807X_CHIP_CONFIGURATION_MODE_QSGMII_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_PSGMII:
+ default:
+ val |= QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_ALL_COPPER;
+ }
+ ret = __phy_package_write(phydev, QCA807X_COMBO_ADDR,
+ QCA807X_CHIP_CONFIGURATION, val);
+ if (ret)
+ goto exit;
+
+ /* After mode change Serdes reset is required */
+ val = __phy_package_read(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG);
+ val &= ~PQSGMII_ANALOG_SW_RESET;
+ ret = __phy_package_write(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG, val);
+ if (ret)
+ goto exit;
+
+ msleep(SERDES_RESET_SLEEP);
+
+ val = __phy_package_read(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG);
+ val |= PQSGMII_ANALOG_SW_RESET;
+ ret = __phy_package_write(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG, val);
+ if (ret)
+ goto exit;
+
+ /* Workaround to enable AZ transmitting ability */
+ val = __phy_package_read_mmd(phydev, QCA807X_PQSGMII_ADDR,
+ MDIO_MMD_PMAPMD, PQSGMII_MODE_CTRL);
+ val &= ~PQSGMII_MODE_CTRL_AZ_WORKAROUND_MASK;
+ ret = __phy_package_write_mmd(phydev, QCA807X_PQSGMII_ADDR,
+ MDIO_MMD_PMAPMD, PQSGMII_MODE_CTRL, val);
+ if (ret)
+ goto exit;
+
+ /* Set PQSGMII TX AMP strength */
+ val = __phy_package_read(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_DRIVE_CONTROL_1);
+ val &= ~PQSGMII_TX_DRIVER_MASK;
+ val |= FIELD_PREP(PQSGMII_TX_DRIVER_MASK, priv->tx_drive_strength);
+ ret = __phy_package_write(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_DRIVE_CONTROL_1, val);
+ if (ret)
+ goto exit;
+
+ /* Prevent PSGMII going into hibernation via PSGMII self test */
+ val = __phy_package_read_mmd(phydev, QCA807X_COMBO_ADDR,
+ MDIO_MMD_PCS, PQSGMII_MMD3_SERDES_CONTROL);
+ val &= ~BIT(1);
+ ret = __phy_package_write_mmd(phydev, QCA807X_COMBO_ADDR,
+ MDIO_MMD_PCS, PQSGMII_MMD3_SERDES_CONTROL, val);
+
+exit:
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+static int qca807x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ phy_interface_t iface;
+ int ret;
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+
+ sfp_parse_support(phydev->sfp_bus, id, support, interfaces);
+ iface = sfp_select_interface(phydev->sfp_bus, support);
+
+ dev_info(&phydev->mdio.dev, "%s SFP module inserted\n", phy_modes(iface));
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_100BASEX:
+ /* Set PHY mode to PSGMII combo (1/4 copper + combo ports) mode */
+ ret = phy_modify(phydev,
+ QCA807X_CHIP_CONFIGURATION,
+ QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK,
+ QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_FIBER);
+ /* Enable fiber mode autodection (1000Base-X or 100Base-FX) */
+ ret = phy_set_bits_mmd(phydev,
+ MDIO_MMD_AN,
+ QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION,
+ QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION_EN);
+ /* Select fiber page */
+ ret = phy_clear_bits(phydev,
+ QCA807X_CHIP_CONFIGURATION,
+ QCA807X_BT_BX_REG_SEL);
+
+ phydev->port = PORT_FIBRE;
+ break;
+ default:
+ dev_err(&phydev->mdio.dev, "Incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void qca807x_sfp_remove(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+
+ /* Select copper page */
+ phy_set_bits(phydev,
+ QCA807X_CHIP_CONFIGURATION,
+ QCA807X_BT_BX_REG_SEL);
+
+ phydev->port = PORT_TP;
+}
+
+static const struct sfp_upstream_ops qca807x_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = qca807x_sfp_insert,
+ .module_remove = qca807x_sfp_remove,
+};
+
+static int qca807x_probe(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct qca807x_shared_priv *shared_priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct phy_package_shared *shared;
+ struct qca807x_priv *priv;
+ int ret;
+
+ ret = devm_of_phy_package_join(dev, phydev, sizeof(*shared_priv));
+ if (ret)
+ return ret;
+
+ if (phy_package_probe_once(phydev)) {
+ ret = qca807x_phy_package_probe_once(phydev);
+ if (ret)
+ return ret;
+ }
+
+ shared = phydev->shared;
+ shared_priv = shared->priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dac_full_amplitude = of_property_read_bool(node, "qcom,dac-full-amplitude");
+ priv->dac_full_bias_current = of_property_read_bool(node, "qcom,dac-full-bias-current");
+ priv->dac_disable_bias_current_tweak = of_property_read_bool(node,
+ "qcom,dac-disable-bias-current-tweak");
+
+#if IS_ENABLED(CONFIG_GPIOLIB)
+ /* Make sure we don't have mixed leds node and gpio-controller
+ * to prevent registering leds and having gpio-controller usage
+ * conflicting with them.
+ */
+ if (of_find_property(node, "leds", NULL) &&
+ of_find_property(node, "gpio-controller", NULL)) {
+ phydev_err(phydev, "Invalid property detected. LEDs and gpio-controller are mutually exclusive.");
+ return -EINVAL;
+ }
+
+ /* Do not register a GPIO controller unless flagged for it */
+ if (of_property_read_bool(node, "gpio-controller")) {
+ ret = qca807x_gpio(phydev);
+ if (ret)
+ return ret;
+ }
+#endif
+
+ /* Attach SFP bus on combo port*/
+ if (phy_read(phydev, QCA807X_CHIP_CONFIGURATION)) {
+ ret = phy_sfp_probe(phydev, &qca807x_sfp_ops);
+ if (ret)
+ return ret;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->advertising);
+ }
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int qca807x_config_init(struct phy_device *phydev)
+{
+ struct qca807x_priv *priv = phydev->priv;
+ u16 control_dac;
+ int ret;
+
+ if (phy_package_init_once(phydev)) {
+ ret = qca807x_phy_package_config_init_once(phydev);
+ if (ret)
+ return ret;
+ }
+
+ control_dac = phy_read_mmd(phydev, MDIO_MMD_AN,
+ QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH);
+ control_dac &= ~QCA807X_CONTROL_DAC_MASK;
+ if (!priv->dac_full_amplitude)
+ control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE;
+ if (!priv->dac_full_amplitude)
+ control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT;
+ if (!priv->dac_disable_bias_current_tweak)
+ control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK;
+ return phy_write_mmd(phydev, MDIO_MMD_AN,
+ QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH,
+ control_dac);
+}
+
+static struct phy_driver qca807x_drivers[] = {
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_QCA8072),
+ .name = "Qualcomm QCA8072",
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_GBIT_FEATURES */
+ .probe = qca807x_probe,
+ .config_init = qca807x_config_init,
+ .read_status = qca807x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .soft_reset = genphy_soft_reset,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .cable_test_start = qca807x_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_QCA8075),
+ .name = "Qualcomm QCA8075",
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_GBIT_FEATURES */
+ .probe = qca807x_probe,
+ .config_init = qca807x_config_init,
+ .read_status = qca807x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .soft_reset = genphy_soft_reset,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .cable_test_start = qca807x_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ .led_brightness_set = qca807x_led_brightness_set,
+ .led_blink_set = qca807x_led_blink_set,
+ .led_hw_is_supported = qca807x_led_hw_is_supported,
+ .led_hw_control_set = qca807x_led_hw_control_set,
+ .led_hw_control_get = qca807x_led_hw_control_get,
+ },
+};
+module_phy_driver(qca807x_drivers);
+
+static struct mdio_device_id __maybe_unused qca807x_tbl[] = {
+ { PHY_ID_MATCH_EXACT(PHY_ID_QCA8072) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_QCA8075) },
+ { }
+};
+
+MODULE_AUTHOR("Robert Marko <robert.marko@sartura.hr>");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("Qualcomm QCA807x PHY driver");
+MODULE_DEVICE_TABLE(mdio, qca807x_tbl);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
new file mode 100644
index 000000000000..5048304ccc9e
--- /dev/null
+++ b/drivers/net/phy/qcom/qca808x.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include "qcom.h"
+
+/* ADC threshold */
+#define QCA808X_PHY_DEBUG_ADC_THRESHOLD 0x2c80
+#define QCA808X_ADC_THRESHOLD_MASK GENMASK(7, 0)
+#define QCA808X_ADC_THRESHOLD_80MV 0
+#define QCA808X_ADC_THRESHOLD_100MV 0xf0
+#define QCA808X_ADC_THRESHOLD_200MV 0x0f
+#define QCA808X_ADC_THRESHOLD_300MV 0xff
+
+/* CLD control */
+#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7 0x8007
+#define QCA808X_8023AZ_AFE_CTRL_MASK GENMASK(8, 4)
+#define QCA808X_8023AZ_AFE_EN 0x90
+
+/* AZ control */
+#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL 0x8008
+#define QCA808X_MMD3_AZ_TRAINING_VAL 0x1c32
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB 0x8014
+#define QCA808X_MSE_THRESHOLD_20DB_VALUE 0x529
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB 0x800E
+#define QCA808X_MSE_THRESHOLD_17DB_VALUE 0x341
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB 0x801E
+#define QCA808X_MSE_THRESHOLD_27DB_VALUE 0x419
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB 0x8020
+#define QCA808X_MSE_THRESHOLD_28DB_VALUE 0x341
+
+#define QCA808X_PHY_MMD7_TOP_OPTION1 0x901c
+#define QCA808X_TOP_OPTION1_DATA 0x0
+
+#define QCA808X_PHY_MMD3_DEBUG_1 0xa100
+#define QCA808X_MMD3_DEBUG_1_VALUE 0x9203
+#define QCA808X_PHY_MMD3_DEBUG_2 0xa101
+#define QCA808X_MMD3_DEBUG_2_VALUE 0x48ad
+#define QCA808X_PHY_MMD3_DEBUG_3 0xa103
+#define QCA808X_MMD3_DEBUG_3_VALUE 0x1698
+#define QCA808X_PHY_MMD3_DEBUG_4 0xa105
+#define QCA808X_MMD3_DEBUG_4_VALUE 0x8001
+#define QCA808X_PHY_MMD3_DEBUG_5 0xa106
+#define QCA808X_MMD3_DEBUG_5_VALUE 0x1111
+#define QCA808X_PHY_MMD3_DEBUG_6 0xa011
+#define QCA808X_MMD3_DEBUG_6_VALUE 0x5f85
+
+/* master/slave seed config */
+#define QCA808X_PHY_DEBUG_LOCAL_SEED 9
+#define QCA808X_MASTER_SLAVE_SEED_ENABLE BIT(1)
+#define QCA808X_MASTER_SLAVE_SEED_CFG GENMASK(12, 2)
+#define QCA808X_MASTER_SLAVE_SEED_RANGE 0x32
+
+/* Hibernation yields lower power consumpiton in contrast with normal operation mode.
+ * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s.
+ */
+#define QCA808X_DBG_AN_TEST 0xb
+#define QCA808X_HIBERNATION_EN BIT(15)
+
+#define QCA808X_MMD7_LED2_CTRL 0x8074
+#define QCA808X_MMD7_LED2_FORCE_CTRL 0x8075
+#define QCA808X_MMD7_LED1_CTRL 0x8076
+#define QCA808X_MMD7_LED1_FORCE_CTRL 0x8077
+#define QCA808X_MMD7_LED0_CTRL 0x8078
+#define QCA808X_MMD7_LED_CTRL(x) (0x8078 - ((x) * 2))
+
+#define QCA808X_MMD7_LED0_FORCE_CTRL 0x8079
+#define QCA808X_MMD7_LED_FORCE_CTRL(x) (0x8079 - ((x) * 2))
+
+#define QCA808X_MMD7_LED_POLARITY_CTRL 0x901a
+/* QSDK sets by default 0x46 to this reg that sets BIT 6 for
+ * LED to active high. It's not clear what BIT 3 and BIT 4 does.
+ */
+#define QCA808X_LED_ACTIVE_HIGH BIT(6)
+
+/* QCA808X 1G chip type */
+#define QCA808X_PHY_MMD7_CHIP_TYPE 0x901d
+#define QCA808X_PHY_CHIP_TYPE_1G BIT(0)
+
+#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL 0x9072
+#define QCA8081_PHY_FIFO_RSTN BIT(11)
+
+#define QCA8081_PHY_ID 0x004dd101
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_LICENSE("GPL");
+
+struct qca808x_priv {
+ int led_polarity_mode;
+};
+
+static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable fast retrain */
+ ret = genphy_c45_fast_retrain(phydev, true);
+ if (ret)
+ return ret;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
+ QCA808X_TOP_OPTION1_DATA);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
+ QCA808X_MSE_THRESHOLD_20DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
+ QCA808X_MSE_THRESHOLD_17DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
+ QCA808X_MSE_THRESHOLD_27DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
+ QCA808X_MSE_THRESHOLD_28DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
+ QCA808X_MMD3_DEBUG_1_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
+ QCA808X_MMD3_DEBUG_4_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
+ QCA808X_MMD3_DEBUG_5_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
+ QCA808X_MMD3_DEBUG_3_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
+ QCA808X_MMD3_DEBUG_6_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
+ QCA808X_MMD3_DEBUG_2_VALUE);
+
+ return 0;
+}
+
+static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
+{
+ u16 seed_value;
+
+ if (!enable)
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+ QCA808X_MASTER_SLAVE_SEED_ENABLE, 0);
+
+ seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+ QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE,
+ FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) |
+ QCA808X_MASTER_SLAVE_SEED_ENABLE);
+}
+
+static bool qca808x_is_prefer_master(struct phy_device *phydev)
+{
+ return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) ||
+ (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED);
+}
+
+static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
+{
+ return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
+}
+
+static bool qca808x_is_1g_only(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE);
+ if (ret < 0)
+ return true;
+
+ return !!(QCA808X_PHY_CHIP_TYPE_1G & ret);
+}
+
+static void qca808x_fill_possible_interfaces(struct phy_device *phydev)
+{
+ unsigned long *possible = phydev->possible_interfaces;
+
+ __set_bit(PHY_INTERFACE_MODE_SGMII, possible);
+
+ if (!qca808x_is_1g_only(phydev))
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, possible);
+}
+
+static int qca808x_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct qca808x_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Init LED polarity mode to -1 */
+ priv->led_polarity_mode = -1;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int qca808x_config_init(struct phy_device *phydev)
+{
+ struct qca808x_priv *priv = phydev->priv;
+ int ret;
+
+ /* Default to LED Active High if active-low not in DT */
+ if (priv->led_polarity_mode == -1) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN,
+ QCA808X_MMD7_LED_POLARITY_CTRL,
+ QCA808X_LED_ACTIVE_HIGH);
+ if (ret)
+ return ret;
+ }
+
+ /* Active adc&vga on 802.3az for the link 1000M and 100M */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
+ QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
+ if (ret)
+ return ret;
+
+ /* Adjust the threshold on 802.3az for the link 1000M */
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
+ QCA808X_MMD3_AZ_TRAINING_VAL);
+ if (ret)
+ return ret;
+
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+ /* Config the fast retrain for the link 2500M */
+ ret = qca808x_phy_fast_retrain_config(phydev);
+ if (ret)
+ return ret;
+
+ ret = genphy_read_master_slave(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (!qca808x_is_prefer_master(phydev)) {
+ /* Enable seed and configure lower ramdom seed to make phy
+ * linked as slave mode.
+ */
+ ret = qca808x_phy_ms_seed_enable(phydev, true);
+ if (ret)
+ return ret;
+ }
+ }
+
+ qca808x_fill_possible_interfaces(phydev);
+
+ /* Configure adc threshold as 100mv for the link 10M */
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
+ QCA808X_ADC_THRESHOLD_MASK,
+ QCA808X_ADC_THRESHOLD_100MV);
+}
+
+static int qca808x_read_status(struct phy_device *phydev)
+{
+ struct at803x_ss_mask ss_mask = { 0 };
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+ if (ret < 0)
+ return ret;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
+ ret & MDIO_AN_10GBT_STAT_LP2_5G);
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ /* qca8081 takes the different bits for speed value from at803x */
+ ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
+ ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
+ ret = at803x_read_specific_status(phydev, ss_mask);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->link) {
+ if (phydev->speed == SPEED_2500)
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ else
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ } else {
+ /* generate seed as a lower random value to make PHY linked as SLAVE easily,
+ * except for master/slave configuration fault detected or the master mode
+ * preferred.
+ *
+ * the reason for not putting this code into the function link_change_notify is
+ * the corner case where the link partner is also the qca8081 PHY and the seed
+ * value is configured as the same value, the link can't be up and no link change
+ * occurs.
+ */
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
+ qca808x_is_prefer_master(phydev)) {
+ qca808x_phy_ms_seed_enable(phydev, false);
+ } else {
+ qca808x_phy_ms_seed_enable(phydev, true);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qca808x_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev))
+ ret = qca808x_phy_ms_seed_enable(phydev, true);
+
+ return ret;
+}
+
+static int qca808x_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ /* perform CDT with the following configs:
+ * 1. disable hibernation.
+ * 2. force PHY working in MDI mode.
+ * 3. for PHY working in 1000BaseT.
+ * 4. configure the threshold.
+ */
+
+ ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_config_mdix(phydev, ETH_TP_MDI);
+ if (ret < 0)
+ return ret;
+
+ /* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */
+ phydev->duplex = DUPLEX_FULL;
+ phydev->speed = SPEED_1000;
+ ret = genphy_c45_pma_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* configure the thresholds for open, short, pair ok test */
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
+
+ return 0;
+}
+
+static int qca808x_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* The autoneg ability is not existed in bit3 of MMD7.1,
+ * but it is supported by qca808x PHY, so we add it here
+ * manually.
+ */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+
+ /* As for the qca8081 1G version chip, the 2500baseT ability is also
+ * existed in the bit0 of MMD1.21, we need to remove it manually if
+ * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d.
+ */
+ if (qca808x_is_1g_only(phydev))
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
+
+ return 0;
+}
+
+static int qca808x_config_aneg(struct phy_device *phydev)
+{
+ int phy_ctrl = 0;
+ int ret;
+
+ ret = at803x_prepare_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ /* The reg MII_BMCR also needs to be configured for force mode, the
+ * genphy_config_aneg is also needed.
+ */
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ genphy_c45_pma_setup_forced(phydev);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
+ phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
+ if (ret < 0)
+ return ret;
+
+ return __genphy_config_aneg(phydev, ret);
+}
+
+static void qca808x_link_change_notify(struct phy_device *phydev)
+{
+ /* Assert interface sgmii fifo on link down, deassert it on link up,
+ * the interface device address is always phy address added by 1.
+ */
+ mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
+ MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
+ QCA8081_PHY_FIFO_RSTN,
+ phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
+}
+
+static int qca808x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
+ u16 *offload_trigger)
+{
+ /* Parsing specific to netdev trigger */
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA808X_LED_TX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA808X_LED_RX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED10_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED100_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED1000_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_2500, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED2500_ON;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
+
+ if (rules && !*offload_trigger)
+ return -EOPNOTSUPP;
+
+ /* Enable BLINK_CHECK_BYPASS by default to make the LED
+ * blink even with duplex or speed mode not enabled.
+ */
+ *offload_trigger |= QCA808X_LED_BLINK_CHECK_BYPASS;
+
+ return 0;
+}
+
+static int qca808x_led_hw_control_enable(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_enable(phydev, reg);
+}
+
+static int qca808x_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 offload_trigger = 0;
+
+ if (index > 2)
+ return -EINVAL;
+
+ return qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+}
+
+static int qca808x_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 reg, offload_trigger = 0;
+ int ret;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ ret = qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+ if (ret)
+ return ret;
+
+ ret = qca808x_led_hw_control_enable(phydev, index);
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_PATTERN_MASK,
+ offload_trigger);
+}
+
+static bool qca808x_led_hw_control_status(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return false;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_status(phydev, reg);
+}
+
+static int qca808x_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ u16 reg;
+ int val;
+
+ if (index > 2)
+ return -EINVAL;
+
+ /* Check if we have hw control enabled */
+ if (qca808x_led_hw_control_status(phydev, index))
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ if (val & QCA808X_LED_TX_BLINK)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA808X_LED_RX_BLINK)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA808X_LED_SPEED10_ON)
+ set_bit(TRIGGER_NETDEV_LINK_10, rules);
+ if (val & QCA808X_LED_SPEED100_ON)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA808X_LED_SPEED1000_ON)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA808X_LED_SPEED2500_ON)
+ set_bit(TRIGGER_NETDEV_LINK_2500, rules);
+ if (val & QCA808X_LED_HALF_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA808X_LED_FULL_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+
+ return 0;
+}
+
+static int qca808x_led_hw_control_reset(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_PATTERN_MASK);
+}
+
+static int qca808x_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 reg;
+ int ret;
+
+ if (index > 2)
+ return -EINVAL;
+
+ if (!value) {
+ ret = qca808x_led_hw_control_reset(phydev, index);
+ if (ret)
+ return ret;
+ }
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_brightness_set(phydev, reg, value);
+}
+
+static int qca808x_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_blink_set(phydev, reg, delay_on, delay_off);
+}
+
+static int qca808x_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ struct qca808x_priv *priv = phydev->priv;
+ bool active_low = false;
+ u32 mode;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ active_low = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* PHY polarity is global and can't be set per LED.
+ * To detect this, check if last requested polarity mode
+ * match the new one.
+ */
+ if (priv->led_polarity_mode >= 0 &&
+ priv->led_polarity_mode != active_low) {
+ phydev_err(phydev, "PHY polarity is global. Mismatched polarity on different LED\n");
+ return -EINVAL;
+ }
+
+ /* Save the last PHY polarity mode */
+ priv->led_polarity_mode = active_low;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN,
+ QCA808X_MMD7_LED_POLARITY_CTRL,
+ QCA808X_LED_ACTIVE_HIGH,
+ active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
+}
+
+static struct phy_driver qca808x_driver[] = {
+{
+ /* Qualcomm QCA8081 */
+ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
+ .name = "Qualcomm QCA8081",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = qca808x_probe,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .get_features = qca808x_get_features,
+ .config_aneg = qca808x_config_aneg,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_status = qca808x_read_status,
+ .config_init = qca808x_config_init,
+ .soft_reset = qca808x_soft_reset,
+ .cable_test_start = qca808x_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ .link_change_notify = qca808x_link_change_notify,
+ .led_brightness_set = qca808x_led_brightness_set,
+ .led_blink_set = qca808x_led_blink_set,
+ .led_hw_is_supported = qca808x_led_hw_is_supported,
+ .led_hw_control_set = qca808x_led_hw_control_set,
+ .led_hw_control_get = qca808x_led_hw_control_get,
+ .led_polarity_set = qca808x_led_polarity_set,
+}, };
+
+module_phy_driver(qca808x_driver);
+
+static struct mdio_device_id __maybe_unused qca808x_tbl[] = {
+ { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, qca808x_tbl);
diff --git a/drivers/net/phy/qcom/qca83xx.c b/drivers/net/phy/qcom/qca83xx.c
new file mode 100644
index 000000000000..5d083ef0250e
--- /dev/null
+++ b/drivers/net/phy/qcom/qca83xx.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include "qcom.h"
+
+#define AT803X_DEBUG_REG_3C 0x3C
+
+#define AT803X_DEBUG_REG_GREEN 0x3D
+#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6)
+
+#define MDIO_AZ_DEBUG 0x800D
+
+#define QCA8327_A_PHY_ID 0x004dd033
+#define QCA8327_B_PHY_ID 0x004dd034
+#define QCA8337_PHY_ID 0x004dd036
+#define QCA8K_PHY_ID_MASK 0xffffffff
+
+#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
+
+static struct at803x_hw_stat qca83xx_hw_stats[] = {
+ { "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
+ { "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
+ { "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
+};
+
+struct qca83xx_priv {
+ u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
+};
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA83XX PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
+
+static int qca83xx_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(qca83xx_hw_stats);
+}
+
+static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
+ strscpy(data + i * ETH_GSTRING_LEN,
+ qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
+{
+ struct at803x_hw_stat stat = qca83xx_hw_stats[i];
+ struct qca83xx_priv *priv = phydev->priv;
+ int val;
+ u64 ret;
+
+ if (stat.access_type == MMD)
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
+ else
+ val = phy_read(phydev, stat.reg);
+
+ if (val < 0) {
+ ret = U64_MAX;
+ } else {
+ val = val & stat.mask;
+ priv->stats[i] += val;
+ ret = priv->stats[i];
+ }
+
+ return ret;
+}
+
+static void qca83xx_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
+ data[i] = qca83xx_get_stat(phydev, i);
+}
+
+static int qca83xx_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct qca83xx_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int qca83xx_config_init(struct phy_device *phydev)
+{
+ u8 switch_revision;
+
+ switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
+
+ switch (switch_revision) {
+ case 1:
+ /* For 100M waveform */
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
+ /* Turn on Gigabit clock */
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
+ break;
+
+ case 2:
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
+ fallthrough;
+ case 4:
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
+ break;
+ }
+
+ /* Following original QCA sourcecode set port to prefer master */
+ phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
+
+ return 0;
+}
+
+static int qca8327_config_init(struct phy_device *phydev)
+{
+ /* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
+ * Disable on init and enable only with 100m speed following
+ * qca original source code.
+ */
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+
+ return qca83xx_config_init(phydev);
+}
+
+static void qca83xx_link_change_notify(struct phy_device *phydev)
+{
+ /* Set DAC Amplitude adjustment to +6% for 100m on link running */
+ if (phydev->state == PHY_RUNNING) {
+ if (phydev->speed == SPEED_100)
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN,
+ QCA8327_DEBUG_MANU_CTRL_EN);
+ } else {
+ /* Reset DAC Amplitude adjustment */
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+ }
+}
+
+static int qca83xx_resume(struct phy_device *phydev)
+{
+ int ret, val;
+
+ /* Skip reset if not suspended */
+ if (!phydev->suspended)
+ return 0;
+
+ /* Reinit the port, reset values set by suspend */
+ qca83xx_config_init(phydev);
+
+ /* Reset the port on port resume */
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+
+ /* On resume from suspend the switch execute a reset and
+ * restart auto-negotiation. Wait for reset to complete.
+ */
+ ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+ 50000, 600000, true);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int qca83xx_suspend(struct phy_device *phydev)
+{
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
+ AT803X_DEBUG_GATE_CLK_IN1000, 0);
+
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
+ AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
+
+ return 0;
+}
+
+static int qca8337_suspend(struct phy_device *phydev)
+{
+ /* Only QCA8337 support actual suspend. */
+ genphy_suspend(phydev);
+
+ return qca83xx_suspend(phydev);
+}
+
+static int qca8327_suspend(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ /* QCA8327 cause port unreliability when phy suspend
+ * is set.
+ */
+ mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
+ phy_modify(phydev, MII_BMCR, mask, 0);
+
+ return qca83xx_suspend(phydev);
+}
+
+static struct phy_driver qca83xx_driver[] = {
+{
+ /* QCA8337 */
+ .phy_id = QCA8337_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8337 internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca83xx_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8337_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-A from switch QCA8327-AL1A */
+ .phy_id = QCA8327_A_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-A internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca8327_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8327_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-B from switch QCA8327-BL1A */
+ .phy_id = QCA8327_B_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-B internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca8327_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8327_suspend,
+ .resume = qca83xx_resume,
+}, };
+
+module_phy_driver(qca83xx_driver);
+
+static struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
+ { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, qca83xx_tbl);
diff --git a/drivers/net/phy/qcom/qcom-phy-lib.c b/drivers/net/phy/qcom/qcom-phy-lib.c
new file mode 100644
index 000000000000..d28815ef56bb
--- /dev/null
+++ b/drivers/net/phy/qcom/qcom-phy-lib.c
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
+
+#include "qcom.h"
+
+MODULE_DESCRIPTION("Qualcomm PHY driver Common Functions");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
+
+int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
+{
+ int ret;
+
+ ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ return phy_read(phydev, AT803X_DEBUG_DATA);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_read);
+
+int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+ u16 clear, u16 set)
+{
+ u16 val;
+ int ret;
+
+ ret = at803x_debug_reg_read(phydev, reg);
+ if (ret < 0)
+ return ret;
+
+ val = ret & 0xffff;
+ val &= ~clear;
+ val |= set;
+
+ return phy_write(phydev, AT803X_DEBUG_DATA, val);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_mask);
+
+int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
+{
+ int ret;
+
+ ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ return phy_write(phydev, AT803X_DEBUG_DATA, data);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_write);
+
+int at803x_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret, irq_enabled;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ struct net_device *ndev = phydev->attached_dev;
+ const u8 *mac;
+ unsigned int i;
+ static const unsigned int offsets[] = {
+ AT803X_LOC_MAC_ADDR_32_47_OFFSET,
+ AT803X_LOC_MAC_ADDR_16_31_OFFSET,
+ AT803X_LOC_MAC_ADDR_0_15_OFFSET,
+ };
+
+ if (!ndev)
+ return -ENODEV;
+
+ mac = (const u8 *)ndev->dev_addr;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ for (i = 0; i < 3; i++)
+ phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
+ mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
+
+ /* Enable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
+ if (ret)
+ return ret;
+ } else {
+ /* Disable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
+ if (ret)
+ return ret;
+ }
+
+ /* Clear WOL status */
+ ret = phy_read(phydev, AT803X_INTR_STATUS);
+ if (ret < 0)
+ return ret;
+
+ /* Check if there are other interrupts except for WOL triggered when PHY is
+ * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can
+ * be passed up to the interrupt PIN.
+ */
+ irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (irq_enabled < 0)
+ return irq_enabled;
+
+ irq_enabled &= ~AT803X_INTR_ENABLE_WOL;
+ if (ret & irq_enabled && !phy_polling_mode(phydev))
+ phy_trigger_machine(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_set_wol);
+
+void at803x_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int value;
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (value < 0)
+ return;
+
+ if (value & AT803X_INTR_ENABLE_WOL)
+ wol->wolopts |= WAKE_MAGIC;
+}
+EXPORT_SYMBOL_GPL(at803x_get_wol);
+
+int at803x_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, AT803X_INTR_STATUS);
+
+ return (err < 0) ? err : 0;
+}
+EXPORT_SYMBOL_GPL(at803x_ack_interrupt);
+
+int at803x_config_intr(struct phy_device *phydev)
+{
+ int err;
+ int value;
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
+ value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
+ value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
+ value |= AT803X_INTR_ENABLE_LINK_FAIL;
+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
+
+ err = phy_write(phydev, AT803X_INTR_ENABLE, value);
+ } else {
+ err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
+ if (err)
+ return err;
+
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(at803x_config_intr);
+
+irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, int_enabled;
+
+ irq_status = phy_read(phydev, AT803X_INTR_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Read the current enabled interrupts */
+ int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (int_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* See if this was one of our enabled interrupts */
+ if (!(irq_status & int_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(at803x_handle_interrupt);
+
+int at803x_read_specific_status(struct phy_device *phydev,
+ struct at803x_ss_mask ss_mask)
+{
+ int ss;
+
+ /* Read the AT8035 PHY-Specific Status register, which indicates the
+ * speed and duplex that the PHY is actually using, irrespective of
+ * whether we are in autoneg mode or not.
+ */
+ ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
+ if (ss < 0)
+ return ss;
+
+ if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+ int sfc, speed;
+
+ sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
+ if (sfc < 0)
+ return sfc;
+
+ speed = ss & ss_mask.speed_mask;
+ speed >>= ss_mask.speed_shift;
+
+ switch (speed) {
+ case AT803X_SS_SPEED_10:
+ phydev->speed = SPEED_10;
+ break;
+ case AT803X_SS_SPEED_100:
+ phydev->speed = SPEED_100;
+ break;
+ case AT803X_SS_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ case QCA808X_SS_SPEED_2500:
+ phydev->speed = SPEED_2500;
+ break;
+ }
+ if (ss & AT803X_SS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (ss & AT803X_SS_MDIX)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+
+ switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
+ case AT803X_SFC_MANUAL_MDI:
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ break;
+ case AT803X_SFC_MANUAL_MDIX:
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+ break;
+ case AT803X_SFC_AUTOMATIC_CROSSOVER:
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_read_specific_status);
+
+int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
+{
+ u16 val;
+
+ switch (ctrl) {
+ case ETH_TP_MDI:
+ val = AT803X_SFC_MANUAL_MDI;
+ break;
+ case ETH_TP_MDI_X:
+ val = AT803X_SFC_MANUAL_MDIX;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = AT803X_SFC_AUTOMATIC_CROSSOVER;
+ break;
+ default:
+ return 0;
+ }
+
+ return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL,
+ AT803X_SFC_MDI_CROSSOVER_MODE_M,
+ FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
+}
+EXPORT_SYMBOL_GPL(at803x_config_mdix);
+
+int at803x_prepare_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
+ if (ret < 0)
+ return ret;
+
+ /* Changes of the midx bits are disruptive to the normal operation;
+ * therefore any changes to these registers must be followed by a
+ * software reset to take effect.
+ */
+ if (ret == 1) {
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_prepare_config_aneg);
+
+int at803x_read_status(struct phy_device *phydev)
+{
+ struct at803x_ss_mask ss_mask = { 0 };
+ int err, old_link = phydev->link;
+
+ /* Update the link, but return if there was an error */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ err = genphy_read_lpa(phydev);
+ if (err < 0)
+ return err;
+
+ ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
+ ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
+ err = at803x_read_specific_status(phydev, ss_mask);
+ if (err < 0)
+ return err;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+ phy_resolve_aneg_pause(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_read_status);
+
+static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
+{
+ int val;
+
+ val = phy_read(phydev, AT803X_SMART_SPEED);
+ if (val < 0)
+ return val;
+
+ if (val & AT803X_SMART_SPEED_ENABLE)
+ *d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
+ else
+ *d = DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ u16 mask, set;
+ int ret;
+
+ switch (cnt) {
+ case DOWNSHIFT_DEV_DEFAULT_COUNT:
+ cnt = AT803X_DEFAULT_DOWNSHIFT;
+ fallthrough;
+ case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
+ set = AT803X_SMART_SPEED_ENABLE |
+ AT803X_SMART_SPEED_BYPASS_TIMER |
+ FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
+ mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
+ break;
+ case DOWNSHIFT_DEV_DISABLE:
+ set = 0;
+ mask = AT803X_SMART_SPEED_ENABLE |
+ AT803X_SMART_SPEED_BYPASS_TIMER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
+
+ /* After changing the smart speed settings, we need to perform a
+ * software reset, use phy_init_hw() to make sure we set the
+ * reapply any values which might got lost during software reset.
+ */
+ if (ret == 1)
+ ret = phy_init_hw(phydev);
+
+ return ret;
+}
+
+int at803x_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return at803x_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(at803x_get_tunable);
+
+int at803x_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return at803x_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(at803x_set_tunable);
+
+int at803x_cdt_fault_length(int dt)
+{
+ /* According to the datasheet the distance to the fault is
+ * DELTA_TIME * 0.824 meters.
+ *
+ * The author suspect the correct formula is:
+ *
+ * fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
+ *
+ * where c is the speed of light, VF is the velocity factor of
+ * the twisted pair cable, 125MHz the counter frequency and
+ * we need to divide by 2 because the hardware will measure the
+ * round trip time to the fault and back to the PHY.
+ *
+ * With a VF of 0.69 we get the factor 0.824 mentioned in the
+ * datasheet.
+ */
+ return (dt * 824) / 10;
+}
+EXPORT_SYMBOL_GPL(at803x_cdt_fault_length);
+
+int at803x_cdt_start(struct phy_device *phydev, u32 cdt_start)
+{
+ return phy_write(phydev, AT803X_CDT, cdt_start);
+}
+EXPORT_SYMBOL_GPL(at803x_cdt_start);
+
+int at803x_cdt_wait_for_completion(struct phy_device *phydev,
+ u32 cdt_en)
+{
+ int val, ret;
+
+ /* One test run takes about 25ms */
+ ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
+ !(val & cdt_en),
+ 30000, 100000, true);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(at803x_cdt_wait_for_completion);
+
+static bool qca808x_cdt_fault_length_valid(int cdt_code)
+{
+ switch (cdt_code) {
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int qca808x_cable_test_result_trans(int cdt_code)
+{
+ switch (cdt_code) {
+ case QCA808X_CDT_STATUS_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+ case QCA808X_CDT_STATUS_STAT_FAIL:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
+ int result)
+{
+ int val;
+ u32 cdt_length_reg = 0;
+
+ switch (pair) {
+ case ETHTOOL_A_CABLE_PAIR_A:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_B:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_C:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_D:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
+ if (val < 0)
+ return val;
+
+ if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
+ else
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
+
+ return at803x_cdt_fault_length(val);
+}
+
+static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
+ u16 status)
+{
+ int length, result;
+ u16 pair_code;
+
+ switch (pair) {
+ case ETHTOOL_A_CABLE_PAIR_A:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_B:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_C:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_D:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ result = qca808x_cable_test_result_trans(pair_code);
+ ethnl_cable_test_result(phydev, pair, result);
+
+ if (qca808x_cdt_fault_length_valid(pair_code)) {
+ length = qca808x_cdt_fault_length(phydev, pair, result);
+ ethnl_cable_test_fault_length(phydev, pair, length);
+ }
+
+ return 0;
+}
+
+int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
+{
+ int ret, val;
+
+ *finished = false;
+
+ val = QCA808X_CDT_ENABLE_TEST |
+ QCA808X_CDT_LENGTH_UNIT;
+ ret = at803x_cdt_start(phydev, val);
+ if (ret)
+ return ret;
+
+ ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
+ if (ret)
+ return ret;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
+ if (val < 0)
+ return val;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
+ if (ret)
+ return ret;
+
+ *finished = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca808x_cable_test_get_status);
+
+int qca808x_led_reg_hw_control_enable(struct phy_device *phydev, u16 reg)
+{
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN);
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_hw_control_enable);
+
+bool qca808x_led_reg_hw_control_status(struct phy_device *phydev, u16 reg)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ return !(val & QCA808X_LED_FORCE_EN);
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_hw_control_status);
+
+int qca808x_led_reg_brightness_set(struct phy_device *phydev,
+ u16 reg, enum led_brightness value)
+{
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+ QCA808X_LED_FORCE_EN | (value ? QCA808X_LED_FORCE_ON :
+ QCA808X_LED_FORCE_OFF));
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_brightness_set);
+
+int qca808x_led_reg_blink_set(struct phy_device *phydev, u16 reg,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ int ret;
+
+ /* Set blink to 50% off, 50% on at 4Hz by default */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_LED_GLOBAL,
+ QCA808X_LED_BLINK_FREQ_MASK | QCA808X_LED_BLINK_DUTY_MASK,
+ QCA808X_LED_BLINK_FREQ_4HZ | QCA808X_LED_BLINK_DUTY_50_50);
+ if (ret)
+ return ret;
+
+ /* We use BLINK_1 for normal blinking */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_BLINK_1);
+ if (ret)
+ return ret;
+
+ /* We set blink to 4Hz, aka 250ms */
+ *delay_on = 250 / 2;
+ *delay_off = 250 / 2;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_blink_set);
diff --git a/drivers/net/phy/qcom/qcom.h b/drivers/net/phy/qcom/qcom.h
new file mode 100644
index 000000000000..4bb541728846
--- /dev/null
+++ b/drivers/net/phy/qcom/qcom.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10
+#define AT803X_SFC_ASSERT_CRS BIT(11)
+#define AT803X_SFC_FORCE_LINK BIT(10)
+#define AT803X_SFC_MDI_CROSSOVER_MODE_M GENMASK(6, 5)
+#define AT803X_SFC_AUTOMATIC_CROSSOVER 0x3
+#define AT803X_SFC_MANUAL_MDIX 0x1
+#define AT803X_SFC_MANUAL_MDI 0x0
+#define AT803X_SFC_SQE_TEST BIT(2)
+#define AT803X_SFC_POLARITY_REVERSAL BIT(1)
+#define AT803X_SFC_DISABLE_JABBER BIT(0)
+
+#define AT803X_SPECIFIC_STATUS 0x11
+#define AT803X_SS_SPEED_MASK GENMASK(15, 14)
+#define AT803X_SS_SPEED_1000 2
+#define AT803X_SS_SPEED_100 1
+#define AT803X_SS_SPEED_10 0
+#define AT803X_SS_DUPLEX BIT(13)
+#define AT803X_SS_SPEED_DUPLEX_RESOLVED BIT(11)
+#define AT803X_SS_MDIX BIT(6)
+
+#define QCA808X_SS_SPEED_MASK GENMASK(9, 7)
+#define QCA808X_SS_SPEED_2500 4
+
+#define AT803X_INTR_ENABLE 0x12
+#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15)
+#define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14)
+#define AT803X_INTR_ENABLE_DUPLEX_CHANGED BIT(13)
+#define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12)
+#define AT803X_INTR_ENABLE_LINK_FAIL BIT(11)
+#define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10)
+#define AT803X_INTR_ENABLE_LINK_FAIL_BX BIT(8)
+#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX BIT(7)
+#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5)
+#define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1)
+#define AT803X_INTR_ENABLE_WOL BIT(0)
+
+#define AT803X_INTR_STATUS 0x13
+
+#define AT803X_SMART_SPEED 0x14
+#define AT803X_SMART_SPEED_ENABLE BIT(5)
+#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK GENMASK(4, 2)
+#define AT803X_SMART_SPEED_BYPASS_TIMER BIT(1)
+
+#define AT803X_CDT 0x16
+#define AT803X_CDT_MDI_PAIR_MASK GENMASK(9, 8)
+#define AT803X_CDT_ENABLE_TEST BIT(0)
+#define AT803X_CDT_STATUS 0x1c
+#define AT803X_CDT_STATUS_STAT_NORMAL 0
+#define AT803X_CDT_STATUS_STAT_SHORT 1
+#define AT803X_CDT_STATUS_STAT_OPEN 2
+#define AT803X_CDT_STATUS_STAT_FAIL 3
+#define AT803X_CDT_STATUS_STAT_MASK GENMASK(9, 8)
+#define AT803X_CDT_STATUS_DELTA_TIME_MASK GENMASK(7, 0)
+
+#define QCA808X_CDT_ENABLE_TEST BIT(15)
+#define QCA808X_CDT_INTER_CHECK_DIS BIT(13)
+#define QCA808X_CDT_STATUS BIT(11)
+#define QCA808X_CDT_LENGTH_UNIT BIT(10)
+
+#define QCA808X_MMD3_CDT_STATUS 0x8064
+#define QCA808X_MMD3_CDT_DIAG_PAIR_A 0x8065
+#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066
+#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067
+#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068
+#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT GENMASK(15, 8)
+#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT GENMASK(7, 0)
+
+#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12)
+#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8)
+#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4)
+#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0)
+
+#define QCA808X_CDT_STATUS_STAT_TYPE GENMASK(1, 0)
+#define QCA808X_CDT_STATUS_STAT_FAIL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
+#define QCA808X_CDT_STATUS_STAT_NORMAL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
+#define QCA808X_CDT_STATUS_STAT_SAME_OPEN FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
+#define QCA808X_CDT_STATUS_STAT_SAME_SHORT FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
+
+#define QCA808X_CDT_STATUS_STAT_MDI GENMASK(3, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI1 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
+#define QCA808X_CDT_STATUS_STAT_MDI2 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI3 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
+
+/* NORMAL are MDI with type set to 0 */
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI1
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI2
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI3
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+
+/* Added for reference of existence but should be handled by wait_for_completion already */
+#define QCA808X_CDT_STATUS_STAT_BUSY (BIT(1) | BIT(3))
+
+#define QCA808X_MMD7_LED_GLOBAL 0x8073
+#define QCA808X_LED_BLINK_1 GENMASK(11, 6)
+#define QCA808X_LED_BLINK_2 GENMASK(5, 0)
+/* Values are the same for both BLINK_1 and BLINK_2 */
+#define QCA808X_LED_BLINK_FREQ_MASK GENMASK(5, 3)
+#define QCA808X_LED_BLINK_FREQ_2HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x0)
+#define QCA808X_LED_BLINK_FREQ_4HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x1)
+#define QCA808X_LED_BLINK_FREQ_8HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x2)
+#define QCA808X_LED_BLINK_FREQ_16HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x3)
+#define QCA808X_LED_BLINK_FREQ_32HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x4)
+#define QCA808X_LED_BLINK_FREQ_64HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x5)
+#define QCA808X_LED_BLINK_FREQ_128HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x6)
+#define QCA808X_LED_BLINK_FREQ_256HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x7)
+#define QCA808X_LED_BLINK_DUTY_MASK GENMASK(2, 0)
+#define QCA808X_LED_BLINK_DUTY_50_50 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x0)
+#define QCA808X_LED_BLINK_DUTY_75_25 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x1)
+#define QCA808X_LED_BLINK_DUTY_25_75 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x2)
+#define QCA808X_LED_BLINK_DUTY_33_67 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x3)
+#define QCA808X_LED_BLINK_DUTY_67_33 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x4)
+#define QCA808X_LED_BLINK_DUTY_17_83 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x5)
+#define QCA808X_LED_BLINK_DUTY_83_17 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x6)
+#define QCA808X_LED_BLINK_DUTY_8_92 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x7)
+
+/* LED hw control pattern is the same for every LED */
+#define QCA808X_LED_PATTERN_MASK GENMASK(15, 0)
+#define QCA808X_LED_SPEED2500_ON BIT(15)
+#define QCA808X_LED_SPEED2500_BLINK BIT(14)
+/* Follow blink trigger even if duplex or speed condition doesn't match */
+#define QCA808X_LED_BLINK_CHECK_BYPASS BIT(13)
+#define QCA808X_LED_FULL_DUPLEX_ON BIT(12)
+#define QCA808X_LED_HALF_DUPLEX_ON BIT(11)
+#define QCA808X_LED_TX_BLINK BIT(10)
+#define QCA808X_LED_RX_BLINK BIT(9)
+#define QCA808X_LED_TX_ON_10MS BIT(8)
+#define QCA808X_LED_RX_ON_10MS BIT(7)
+#define QCA808X_LED_SPEED1000_ON BIT(6)
+#define QCA808X_LED_SPEED100_ON BIT(5)
+#define QCA808X_LED_SPEED10_ON BIT(4)
+#define QCA808X_LED_COLLISION_BLINK BIT(3)
+#define QCA808X_LED_SPEED1000_BLINK BIT(2)
+#define QCA808X_LED_SPEED100_BLINK BIT(1)
+#define QCA808X_LED_SPEED10_BLINK BIT(0)
+
+/* LED force ctrl is the same for every LED
+ * No documentation exist for this, not even internal one
+ * with NDA as QCOM gives only info about configuring
+ * hw control pattern rules and doesn't indicate any way
+ * to force the LED to specific mode.
+ * These define comes from reverse and testing and maybe
+ * lack of some info or some info are not entirely correct.
+ * For the basic LED control and hw control these finding
+ * are enough to support LED control in all the required APIs.
+ *
+ * On doing some comparison with implementation with qca807x,
+ * it was found that it's 1:1 equal to it and confirms all the
+ * reverse done. It was also found further specification with the
+ * force mode and the blink modes.
+ */
+#define QCA808X_LED_FORCE_EN BIT(15)
+#define QCA808X_LED_FORCE_MODE_MASK GENMASK(14, 13)
+#define QCA808X_LED_FORCE_BLINK_1 FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x3)
+#define QCA808X_LED_FORCE_BLINK_2 FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x2)
+#define QCA808X_LED_FORCE_ON FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x1)
+#define QCA808X_LED_FORCE_OFF FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x0)
+
+#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
+#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
+#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
+
+#define AT803X_DEBUG_ADDR 0x1D
+#define AT803X_DEBUG_DATA 0x1E
+
+#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00
+#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2)
+#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2)
+#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
+
+#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05
+#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+
+#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
+#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
+#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
+#define AT803X_DEBUG_HIB_CTRL_PS_HIB_EN BIT(15)
+
+#define AT803X_DEFAULT_DOWNSHIFT 5
+#define AT803X_MIN_DOWNSHIFT 2
+#define AT803X_MAX_DOWNSHIFT 9
+
+enum stat_access_type {
+ PHY,
+ MMD
+};
+
+struct at803x_hw_stat {
+ const char *string;
+ u8 reg;
+ u32 mask;
+ enum stat_access_type access_type;
+};
+
+struct at803x_ss_mask {
+ u16 speed_mask;
+ u8 speed_shift;
+};
+
+int at803x_debug_reg_read(struct phy_device *phydev, u16 reg);
+int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+ u16 clear, u16 set);
+int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data);
+int at803x_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol);
+void at803x_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol);
+int at803x_ack_interrupt(struct phy_device *phydev);
+int at803x_config_intr(struct phy_device *phydev);
+irqreturn_t at803x_handle_interrupt(struct phy_device *phydev);
+int at803x_read_specific_status(struct phy_device *phydev,
+ struct at803x_ss_mask ss_mask);
+int at803x_config_mdix(struct phy_device *phydev, u8 ctrl);
+int at803x_prepare_config_aneg(struct phy_device *phydev);
+int at803x_read_status(struct phy_device *phydev);
+int at803x_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data);
+int at803x_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data);
+int at803x_cdt_fault_length(int dt);
+int at803x_cdt_start(struct phy_device *phydev, u32 cdt_start);
+int at803x_cdt_wait_for_completion(struct phy_device *phydev,
+ u32 cdt_en);
+int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished);
+int qca808x_led_reg_hw_control_enable(struct phy_device *phydev, u16 reg);
+bool qca808x_led_reg_hw_control_status(struct phy_device *phydev, u16 reg);
+int qca808x_led_reg_brightness_set(struct phy_device *phydev,
+ u16 reg, enum led_brightness value);
+int qca808x_led_reg_blink_set(struct phy_device *phydev, u16 reg,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 894172a3e15f..1fa70427b2a2 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -57,14 +57,6 @@
#define RTL8366RB_POWER_SAVE 0x15
#define RTL8366RB_POWER_SAVE_ON BIT(12)
-#define RTL_SUPPORTS_5000FULL BIT(14)
-#define RTL_SUPPORTS_2500FULL BIT(13)
-#define RTL_SUPPORTS_10000FULL BIT(0)
-#define RTL_ADV_2500FULL BIT(7)
-#define RTL_LPADV_10000FULL BIT(11)
-#define RTL_LPADV_5000FULL BIT(6)
-#define RTL_LPADV_2500FULL BIT(5)
-
#define RTL9000A_GINMR 0x14
#define RTL9000A_GINMR_LINK_STATUS BIT(4)
@@ -421,9 +413,11 @@ static int rtl8211f_config_init(struct phy_device *phydev)
ERR_PTR(ret));
return ret;
}
+
+ return genphy_soft_reset(phydev);
}
- return genphy_soft_reset(phydev);
+ return 0;
}
static int rtl821x_suspend(struct phy_device *phydev)
@@ -674,11 +668,11 @@ static int rtl822x_get_features(struct phy_device *phydev)
return val;
linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->supported, val & RTL_SUPPORTS_2500FULL);
+ phydev->supported, val & MDIO_PMA_SPEED_2_5G);
linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
- phydev->supported, val & RTL_SUPPORTS_5000FULL);
+ phydev->supported, val & MDIO_PMA_SPEED_5G);
linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- phydev->supported, val & RTL_SUPPORTS_10000FULL);
+ phydev->supported, val & MDIO_SPEED_10G);
return genphy_read_abilities(phydev);
}
@@ -688,14 +682,12 @@ static int rtl822x_config_aneg(struct phy_device *phydev)
int ret = 0;
if (phydev->autoneg == AUTONEG_ENABLE) {
- u16 adv2500 = 0;
-
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->advertising))
- adv2500 = RTL_ADV_2500FULL;
+ u16 adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising);
ret = phy_modify_paged_changed(phydev, 0xa5d, 0x12,
- RTL_ADV_2500FULL, adv2500);
+ MDIO_AN_10GBT_CTRL_ADV2_5G |
+ MDIO_AN_10GBT_CTRL_ADV5G,
+ adv);
if (ret < 0)
return ret;
}
@@ -713,12 +705,8 @@ static int rtl822x_read_status(struct phy_device *phydev)
if (lpadv < 0)
return lpadv;
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- phydev->lp_advertising, lpadv & RTL_LPADV_10000FULL);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
- phydev->lp_advertising, lpadv & RTL_LPADV_5000FULL);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->lp_advertising, lpadv & RTL_LPADV_2500FULL);
+ mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising,
+ lpadv);
}
ret = genphy_read_status(phydev);
@@ -736,7 +724,7 @@ static bool rtlgen_supports_2_5gbps(struct phy_device *phydev)
val = phy_read(phydev, 0x13);
phy_write(phydev, RTL821x_PAGE_SELECT, 0);
- return val >= 0 && val & RTL_SUPPORTS_2500FULL;
+ return val >= 0 && val & MDIO_PMA_SPEED_2_5G;
}
static int rtlgen_match_phy_device(struct phy_device *phydev)
@@ -1048,6 +1036,16 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ PHY_ID_MATCH_EXACT(0x001cc862),
+ .name = "RTL8251B 5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
.config_init = &rtl8366rb_config_init,
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 7fd9fe6a602b..7b1bc5fcef9b 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -22,7 +22,7 @@
struct gmii2rgmii {
struct phy_device *phy_dev;
- struct phy_driver *phy_drv;
+ const struct phy_driver *phy_drv;
struct phy_driver conv_phy_drv;
struct mdio_device *mdio;
};
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 3dd52bf28f15..fe380fe196e7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -295,7 +295,9 @@ static void ppp_setup(struct net_device *dev);
static const struct net_device_ops ppp_netdev_ops;
-static struct class *ppp_class;
+static const struct class ppp_class = {
+ .name = "ppp",
+};
/* per net-namespace data */
static inline struct ppp_net *ppp_pernet(struct net *net)
@@ -1394,11 +1396,9 @@ static int __init ppp_init(void)
goto out_net;
}
- ppp_class = class_create("ppp");
- if (IS_ERR(ppp_class)) {
- err = PTR_ERR(ppp_class);
+ err = class_register(&ppp_class);
+ if (err)
goto out_chrdev;
- }
err = rtnl_link_register(&ppp_link_ops);
if (err) {
@@ -1407,12 +1407,12 @@ static int __init ppp_init(void)
}
/* not a big deal if we fail here :-) */
- device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
+ device_create(&ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
return 0;
out_class:
- class_destroy(ppp_class);
+ class_unregister(&ppp_class);
out_chrdev:
unregister_chrdev(PPP_MAJOR, "ppp");
out_net:
@@ -1607,7 +1607,7 @@ static const struct net_device_ops ppp_netdev_ops = {
.ndo_fill_forward_path = ppp_fill_forward_path,
};
-static struct device_type ppp_type = {
+static const struct device_type ppp_type = {
.name = "ppp",
};
@@ -3549,8 +3549,8 @@ static void __exit ppp_cleanup(void)
pr_err("PPP: removing module but units remain!\n");
rtnl_link_unregister(&ppp_link_ops);
unregister_chrdev(PPP_MAJOR, "ppp");
- device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
- class_destroy(ppp_class);
+ device_destroy(&ppp_class, MKDEV(PPP_MAJOR, 0));
+ class_unregister(&ppp_class);
unregister_pernet_device(&ppp_net_ops);
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f575f225d417..0a44bbdcfb7b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -25,7 +25,6 @@
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sch_generic.h>
-#include <generated/utsrelease.h>
#include <linux/if_team.h>
#define DRV_NAME "team"
@@ -2074,7 +2073,6 @@ static void team_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static int team_ethtool_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4a4f8c8e79fa..0b3f21cba552 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -54,6 +54,7 @@
#include <linux/if_tun.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
+#include <linux/math.h>
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
#include <linux/rcupdate.h>
@@ -77,6 +78,7 @@
#include <net/ax25.h>
#include <net/rose.h>
#include <net/6lowpan.h>
+#include <net/rps.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
@@ -523,8 +525,7 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
{
struct tun_flow_entry *e;
- u32 txq = 0;
- u32 numqueues = 0;
+ u32 txq, numqueues;
numqueues = READ_ONCE(tun->numqueues);
@@ -534,8 +535,7 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
tun_flow_save_rps_rxhash(e, txq);
txq = e->queue_index;
} else {
- /* use multiply and shift instead of expensive divide */
- txq = ((u64)txq * numqueues) >> 32;
+ txq = reciprocal_scale(txq, numqueues);
}
return txq;
@@ -653,6 +653,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->tfiles[tun->numqueues - 1]);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
+ ntfile->xdp_rxq.queue_index = index;
rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
NULL);
@@ -977,20 +978,15 @@ static int tun_net_init(struct net_device *dev)
struct ifreq *ifr = tun->ifr;
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
spin_lock_init(&tun->lock);
err = security_tun_dev_alloc_security(&tun->security);
- if (err < 0) {
- free_percpu(dev->tstats);
+ if (err < 0)
return err;
- }
tun_flow_init(tun);
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
@@ -1008,7 +1004,6 @@ static int tun_net_init(struct net_device *dev)
if (err < 0) {
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
- free_percpu(dev->tstats);
return err;
}
return 0;
@@ -1344,7 +1339,6 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_select_queue = tun_select_queue,
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_bpf = tun_xdp,
.ndo_xdp_xmit = tun_xdp_xmit,
.ndo_change_carrier = tun_net_change_carrier,
@@ -1927,7 +1921,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
- ret = do_xdp_generic(xdp_prog, skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
if (ret != XDP_PASS) {
rcu_read_unlock();
local_bh_enable();
@@ -2317,7 +2311,6 @@ static void tun_free_netdev(struct net_device *dev)
BUG_ON(!(list_empty(&tun->disabled)));
- free_percpu(dev->tstats);
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
__tun_set_ebpf(tun, &tun->steering_prog, NULL);
@@ -2517,7 +2510,7 @@ build:
skb_record_rx_queue(skb, tfile->queue_index);
if (skb_xdp) {
- ret = do_xdp_generic(xdp_prog, skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
if (ret != XDP_PASS) {
ret = 0;
goto out;
@@ -3644,12 +3637,22 @@ static int tun_set_coalesce(struct net_device *dev,
return 0;
}
+static void tun_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ channels->combined_count = tun->numqueues;
+ channels->max_combined = tun->flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1;
+}
+
static const struct ethtool_ops tun_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_drvinfo = tun_get_drvinfo,
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_channels = tun_get_channels,
.get_ts_info = ethtool_op_get_ts_info,
.get_coalesce = tun_get_coalesce,
.set_coalesce = tun_set_coalesce,
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3fd7dccf0f9c..3c360d4f0635 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -99,6 +99,7 @@ config USB_RTL8150
config USB_RTL8152
tristate "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
select MII
+ select PHYLIB
select CRC32
select CRYPTO
select CRYPTO_HASH
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d837c1887416..88e084534853 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -667,7 +667,7 @@ static int ax88179_set_link_ksettings(struct net_device *net,
}
static int
-ax88179_ethtool_get_eee(struct usbnet *dev, struct ethtool_eee *data)
+ax88179_ethtool_get_eee(struct usbnet *dev, struct ethtool_keee *data)
{
int val;
@@ -676,29 +676,29 @@ ax88179_ethtool_get_eee(struct usbnet *dev, struct ethtool_eee *data)
MDIO_MMD_PCS);
if (val < 0)
return val;
- data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
+ mii_eee_cap1_mod_linkmode_t(data->supported, val);
/* Get advertisement EEE */
val = ax88179_phy_read_mmd_indirect(dev, MDIO_AN_EEE_ADV,
MDIO_MMD_AN);
if (val < 0)
return val;
- data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(data->advertised, val);
/* Get LP advertisement EEE */
val = ax88179_phy_read_mmd_indirect(dev, MDIO_AN_EEE_LPABLE,
MDIO_MMD_AN);
if (val < 0)
return val;
- data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(data->lp_advertised, val);
return 0;
}
static int
-ax88179_ethtool_set_eee(struct usbnet *dev, struct ethtool_eee *data)
+ax88179_ethtool_set_eee(struct usbnet *dev, struct ethtool_keee *data)
{
- u16 tmp16 = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
+ u16 tmp16 = linkmode_to_mii_eee_cap1_t(data->advertised);
return ax88179_phy_write_mmd_indirect(dev, MDIO_AN_EEE_ADV,
MDIO_MMD_AN, tmp16);
@@ -807,7 +807,7 @@ static void ax88179_enable_eee(struct usbnet *dev)
GMII_PHY_PAGE_SELECT, 2, &tmp16);
}
-static int ax88179_get_eee(struct net_device *net, struct ethtool_eee *edata)
+static int ax88179_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct usbnet *dev = netdev_priv(net);
struct ax88179_data *priv = dev->driver_priv;
@@ -818,7 +818,7 @@ static int ax88179_get_eee(struct net_device *net, struct ethtool_eee *edata)
return ax88179_ethtool_get_eee(dev, edata);
}
-static int ax88179_set_eee(struct net_device *net, struct ethtool_eee *edata)
+static int ax88179_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct usbnet *dev = netdev_priv(net);
struct ax88179_data *priv = dev->driver_priv;
@@ -1587,7 +1587,7 @@ static int ax88179_reset(struct usbnet *dev)
u16 *tmp16;
u8 *tmp;
struct ax88179_data *ax179_data = dev->driver_priv;
- struct ethtool_eee eee_data;
+ struct ethtool_keee eee_data;
tmp16 = (u16 *)buf;
tmp = (u8 *)buf;
@@ -1663,7 +1663,7 @@ static int ax88179_reset(struct usbnet *dev)
ax88179_disable_eee(dev);
ax88179_ethtool_get_eee(dev, &eee_data);
- eee_data.advertised = 0;
+ linkmode_zero(eee_data.advertised);
ax88179_ethtool_set_eee(dev, &eee_data);
/* Restart autoneg */
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index cd4083e0b3b9..e13e4920ee9b 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -339,7 +339,7 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
in6_dev = in6_dev_get(netdev);
if (!in6_dev)
goto out;
- is_router = !!in6_dev->cnf.forwarding;
+ is_router = !!READ_ONCE(in6_dev->cnf.forwarding);
in6_dev_put(in6_dev);
/* ipv6_stub != NULL if in6_dev_get returned an inet6_dev */
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 99ec1d4a972d..8b6d6a1b3c2e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -232,7 +232,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
err = dm_read_shared_word(dev, 1, loc, &res);
if (err < 0) {
netdev_err(dev->net, "MDIO read error: %d\n", err);
- return err;
+ return 0;
}
netdev_dbg(dev->net,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f088ea2ba6f3..1aeb36119d3f 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2465,7 +2465,7 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
}
}
-static struct device_type hso_type = {
+static const struct device_type hso_type = {
.name = "wwan",
};
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index a6d653ff552a..80ee4fcdfb36 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1501,7 +1501,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
lan78xx_rx_urb_submit_all(dev);
+ local_bh_disable();
napi_schedule(&dev->napi);
+ local_bh_enable();
}
return 0;
@@ -1673,7 +1675,7 @@ static int lan78xx_set_wol(struct net_device *netdev,
return ret;
}
-static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
+static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct lan78xx_net *dev = netdev_priv(net);
struct phy_device *phydev = net->phydev;
@@ -1709,7 +1711,7 @@ exit:
return ret;
}
-static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
+static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct lan78xx_net *dev = netdev_priv(net);
int ret;
@@ -3033,7 +3035,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
if (dev->chipid == ID_REV_CHIP_ID_7801_)
buf &= ~MAC_CR_GMII_EN_;
- if (dev->chipid == ID_REV_CHIP_ID_7800_) {
+ if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
+ dev->chipid == ID_REV_CHIP_ID_7850_) {
ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
if (!ret && sig != EEPROM_INDICATOR) {
/* Implies there is no external eeprom. Set mac speed */
@@ -3132,7 +3135,8 @@ static int lan78xx_open(struct net_device *net)
done:
mutex_unlock(&dev->dev_mutex);
- usb_autopm_put_interface(dev->intf);
+ if (ret < 0)
+ usb_autopm_put_interface(dev->intf);
return ret;
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0d0672d2a654..5d6aeb086fc7 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -10,6 +10,7 @@
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/phy.h>
#include <linux/usb.h>
#include <linux/crc32.h>
#include <linux/if_vlan.h>
@@ -891,8 +892,8 @@ struct r8152 {
void (*up)(struct r8152 *tp);
void (*down)(struct r8152 *tp);
void (*unload)(struct r8152 *tp);
- int (*eee_get)(struct r8152 *tp, struct ethtool_eee *eee);
- int (*eee_set)(struct r8152 *tp, struct ethtool_eee *eee);
+ int (*eee_get)(struct r8152 *tp, struct ethtool_keee *eee);
+ int (*eee_set)(struct r8152 *tp, struct ethtool_keee *eee);
bool (*in_nway)(struct r8152 *tp);
void (*hw_phy_cfg)(struct r8152 *tp);
void (*autosuspend_en)(struct r8152 *tp, bool enable);
@@ -8922,32 +8923,31 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
-static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
+static int r8152_get_eee(struct r8152 *tp, struct ethtool_keee *eee)
{
- u32 lp, adv, supported = 0;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
u16 val;
val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- supported = mmd_eee_cap_to_ethtool_sup_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->supported, val);
val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- adv = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->advertised, val);
val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
- lp = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val);
eee->eee_enabled = tp->eee_en;
- eee->eee_active = !!(supported & adv & lp);
- eee->supported = supported;
- eee->advertised = tp->eee_adv;
- eee->lp_advertised = lp;
+
+ linkmode_and(common, eee->advertised, eee->lp_advertised);
+ eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common);
return 0;
}
-static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
+static int r8152_set_eee(struct r8152 *tp, struct ethtool_keee *eee)
{
- u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised);
+ u16 val = linkmode_to_mii_eee_cap1_t(eee->advertised);
tp->eee_en = eee->eee_enabled;
tp->eee_adv = val;
@@ -8957,31 +8957,30 @@ static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
return 0;
}
-static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
+static int r8153_get_eee(struct r8152 *tp, struct ethtool_keee *eee)
{
- u32 lp, adv, supported = 0;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
u16 val;
val = ocp_reg_read(tp, OCP_EEE_ABLE);
- supported = mmd_eee_cap_to_ethtool_sup_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->supported, val);
val = ocp_reg_read(tp, OCP_EEE_ADV);
- adv = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->advertised, val);
val = ocp_reg_read(tp, OCP_EEE_LPABLE);
- lp = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val);
eee->eee_enabled = tp->eee_en;
- eee->eee_active = !!(supported & adv & lp);
- eee->supported = supported;
- eee->advertised = tp->eee_adv;
- eee->lp_advertised = lp;
+
+ linkmode_and(common, eee->advertised, eee->lp_advertised);
+ eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common);
return 0;
}
static int
-rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata)
+rtl_ethtool_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct r8152 *tp = netdev_priv(net);
int ret;
@@ -9008,7 +9007,7 @@ out:
}
static int
-rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata)
+rtl_ethtool_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct r8152 *tp = netdev_priv(net);
int ret;
@@ -10078,7 +10077,7 @@ static int rtl8152_cfgselector_choose_configuration(struct usb_device *udev)
* driver supports it.
*/
if (__rtl_get_hw_ver(udev) == RTL_VER_UNKNOWN)
- return 0;
+ return -ENODEV;
/* The vendor mode is not always config #1, so to find it out. */
c = udev->config;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index a530f20ee257..2fa46baa589e 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -2105,6 +2105,11 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long) &smsc95xx_info,
},
{
+ /* SYSTEC USB-SPEmodule1 10BASE-T1L Ethernet Device */
+ USB_DEVICE(0x0878, 0x1400),
+ .driver_info = (unsigned long)&smsc95xx_info,
+ },
+ {
/* Microchip's EVB-LAN8670-USB 10BASE-T1S Ethernet Device */
USB_DEVICE(0x184F, 0x0051),
.driver_info = (unsigned long)&smsc95xx_info,
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 143bd4ab160d..57947a5590cc 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -737,7 +737,9 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
data->eeprom_len = SR9800_EEPROM_LEN;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ goto out;
/* LED Setting Rule :
* AABB:CCDD
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 2d14b0d78541..e84efa661589 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1633,7 +1633,6 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_free_urb(dev->interrupt);
kfree(dev->padding_pkt);
- free_percpu(net->tstats);
free_netdev(net);
}
EXPORT_SYMBOL_GPL(usbnet_disconnect);
@@ -1645,7 +1644,6 @@ static const struct net_device_ops usbnet_netdev_ops = {
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_rx_mode = usbnet_set_rx_mode,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1654,11 +1652,11 @@ static const struct net_device_ops usbnet_netdev_ops = {
// precondition: never called in_interrupt
-static struct device_type wlan_type = {
+static const struct device_type wlan_type = {
.name = "wlan",
};
-static struct device_type wwan_type = {
+static const struct device_type wwan_type = {
.name = "wwan",
};
@@ -1710,10 +1708,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->rx_speed = SPEED_UNSET;
dev->tx_speed = SPEED_UNSET;
- net->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!net->tstats)
- goto out0;
-
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
init_waitqueue_head(&dev->wait);
@@ -1743,6 +1737,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
net->netdev_ops = &usbnet_netdev_ops;
net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
net->ethtool_ops = &usbnet_ethtool_ops;
+ net->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
// allow device-specific bind/init procedures
// NOTE net->name still not usable ...
@@ -1861,8 +1856,6 @@ out1:
*/
cancel_work_sync(&dev->kevent);
del_timer_sync(&dev->delay);
- free_percpu(net->tstats);
-out0:
free_netdev(net);
out:
return status;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 578e36ea1589..13d902462d8e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -729,80 +729,10 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
if (skb_shared(skb) || skb_head_is_locked(skb) ||
skb_shinfo(skb)->nr_frags ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
- u32 size, len, max_head_size, off, truesize, page_offset;
- struct sk_buff *nskb;
- struct page *page;
- int i, head_off;
- void *va;
-
- /* We need a private copy of the skb and data buffers since
- * the ebpf program can modify it. We segment the original skb
- * into order-0 pages without linearize it.
- *
- * Make sure we have enough space for linear and paged area
- */
- max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
- VETH_XDP_HEADROOM);
- if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
- goto drop;
-
- size = min_t(u32, skb->len, max_head_size);
- truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM;
-
- /* Allocate skb head */
- va = page_pool_dev_alloc_va(rq->page_pool, &truesize);
- if (!va)
- goto drop;
-
- nskb = napi_build_skb(va, truesize);
- if (!nskb) {
- page_pool_free_va(rq->page_pool, va, true);
+ if (skb_pp_cow_data(rq->page_pool, pskb, XDP_PACKET_HEADROOM))
goto drop;
- }
-
- skb_reserve(nskb, VETH_XDP_HEADROOM);
- skb_copy_header(nskb, skb);
- skb_mark_for_recycle(nskb);
-
- if (skb_copy_bits(skb, 0, nskb->data, size)) {
- consume_skb(nskb);
- goto drop;
- }
- skb_put(nskb, size);
- head_off = skb_headroom(nskb) - skb_headroom(skb);
- skb_headers_offset_update(nskb, head_off);
-
- /* Allocate paged area of new skb */
- off = size;
- len = skb->len - off;
-
- for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
- size = min_t(u32, len, PAGE_SIZE);
- truesize = size;
-
- page = page_pool_dev_alloc(rq->page_pool, &page_offset,
- &truesize);
- if (!page) {
- consume_skb(nskb);
- goto drop;
- }
-
- skb_add_rx_frag(nskb, i, page, page_offset, size,
- truesize);
- if (skb_copy_bits(skb, off,
- page_address(page) + page_offset,
- size)) {
- consume_skb(nskb);
- goto drop;
- }
-
- len -= size;
- off += size;
- }
-
- consume_skb(skb);
- skb = nskb;
+ skb = *pskb;
}
/* SKB "head" area always have tailroom for skb_shared_info */
@@ -1208,14 +1138,6 @@ static int veth_enable_xdp(struct net_device *dev)
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
return err;
}
-
- if (!veth_gro_requested(dev)) {
- /* user-space did not require GRO, but adding XDP
- * is supposed to get GRO working
- */
- dev->features |= NETIF_F_GRO;
- netdev_features_change(dev);
- }
}
}
@@ -1235,18 +1157,9 @@ static void veth_disable_xdp(struct net_device *dev)
for (i = 0; i < dev->real_num_rx_queues; i++)
rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
- if (!netif_running(dev) || !veth_gro_requested(dev)) {
+ if (!netif_running(dev) || !veth_gro_requested(dev))
veth_napi_del(dev);
- /* if user-space did not require GRO, since adding XDP
- * enabled it, clear it now
- */
- if (!veth_gro_requested(dev) && netif_running(dev)) {
- dev->features &= ~NETIF_F_GRO;
- netdev_features_change(dev);
- }
- }
-
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
}
@@ -1478,7 +1391,8 @@ static int veth_alloc_queues(struct net_device *dev)
struct veth_priv *priv = netdev_priv(dev);
int i;
- priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT);
+ priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq),
+ GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
if (!priv->rq)
return -ENOMEM;
@@ -1494,11 +1408,12 @@ static void veth_free_queues(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
- kfree(priv->rq);
+ kvfree(priv->rq);
}
static int veth_dev_init(struct net_device *dev)
{
+ netdev_lockdep_set_classes(dev);
return veth_alloc_queues(dev);
}
@@ -1530,7 +1445,7 @@ static int veth_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(priv->peer);
- iflink = peer ? peer->ifindex : 0;
+ iflink = peer ? READ_ONCE(peer->ifindex) : 0;
rcu_read_unlock();
return iflink;
@@ -1654,6 +1569,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
if (!old_prog) {
+ if (!veth_gro_requested(dev)) {
+ /* user-space did not require GRO, but adding
+ * XDP is supposed to get GRO working
+ */
+ dev->features |= NETIF_F_GRO;
+ netdev_features_change(dev);
+ }
+
peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
peer->max_mtu = max_mtu;
}
@@ -1669,6 +1592,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (dev->flags & IFF_UP)
veth_disable_xdp(dev);
+ /* if user-space did not require GRO, since adding XDP
+ * enabled it, clear it now
+ */
+ if (!veth_gro_requested(dev)) {
+ dev->features &= ~NETIF_F_GRO;
+ netdev_features_change(dev);
+ }
+
if (peer) {
peer->hw_features |= NETIF_F_GSO_SOFTWARE;
peer->max_mtu = ETH_MAX_MTU;
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index b1bb1b04b664..a1ba5169ed5d 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -13,19 +13,6 @@
#define DEFAULT_MTU (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + \
sizeof(struct af_vsockmon_hdr))
-static int vsockmon_dev_init(struct net_device *dev)
-{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- if (!dev->lstats)
- return -ENOMEM;
- return 0;
-}
-
-static void vsockmon_dev_uninit(struct net_device *dev)
-{
- free_percpu(dev->lstats);
-}
-
struct vsockmon {
struct vsock_tap vt;
};
@@ -59,9 +46,6 @@ static void
vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes);
-
- stats->tx_packets = 0;
- stats->tx_bytes = 0;
}
static int vsockmon_is_valid_mtu(int new_mtu)
@@ -79,8 +63,6 @@ static int vsockmon_change_mtu(struct net_device *dev, int new_mtu)
}
static const struct net_device_ops vsockmon_ops = {
- .ndo_init = vsockmon_dev_init,
- .ndo_uninit = vsockmon_dev_uninit,
.ndo_open = vsockmon_open,
.ndo_stop = vsockmon_close,
.ndo_start_xmit = vsockmon_xmit,
@@ -112,6 +94,7 @@ static void vsockmon_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->mtu = DEFAULT_MTU;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
}
static struct rtnl_link_ops vsockmon_link_ops __read_mostly = {
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 16106e088c63..3495591a5c29 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2841,26 +2841,19 @@ static int vxlan_init(struct net_device *dev)
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vnigroup_init(vxlan);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats) {
- err = -ENOMEM;
- goto err_vnigroup_uninit;
- }
-
err = gro_cells_init(&vxlan->gro_cells, dev);
if (err)
- goto err_free_percpu;
+ goto err_vnigroup_uninit;
err = vxlan_mdb_init(vxlan);
if (err)
goto err_gro_cells_destroy;
+ netdev_lockdep_set_classes(dev);
return 0;
err_gro_cells_destroy:
gro_cells_destroy(&vxlan->gro_cells);
-err_free_percpu:
- free_percpu(dev->tstats);
err_vnigroup_uninit:
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vnigroup_uninit(vxlan);
@@ -2891,8 +2884,6 @@ static void vxlan_uninit(struct net_device *dev)
gro_cells_destroy(&vxlan->gro_cells);
vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
-
- free_percpu(dev->tstats);
}
/* Start ageing timer and join group when device is brought up */
@@ -3223,7 +3214,6 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_rx_mode = vxlan_set_multicast_list,
.ndo_change_mtu = vxlan_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -3247,13 +3237,12 @@ static const struct net_device_ops vxlan_netdev_raw_ops = {
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = vxlan_change_mtu,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
};
/* Info for udev, that this is a virtual tunnel endpoint */
-static struct device_type vxlan_type = {
+static const struct device_type vxlan_type = {
.name = "vxlan",
};
@@ -3315,6 +3304,7 @@ static void vxlan_setup(struct net_device *dev)
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
INIT_LIST_HEAD(&vxlan->next);
timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
@@ -4826,55 +4816,43 @@ static __net_init int vxlan_init_net(struct net *net)
NULL);
}
-static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
+static void __net_exit vxlan_destroy_tunnels(struct vxlan_net *vn,
+ struct list_head *dev_to_kill)
{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
- struct net_device *dev, *aux;
-
- for_each_netdev_safe(net, dev, aux)
- if (dev->rtnl_link_ops == &vxlan_link_ops)
- unregister_netdevice_queue(dev, head);
-
- list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
- /* If vxlan->dev is in the same netns, it has already been added
- * to the list by the previous loop.
- */
- if (!net_eq(dev_net(vxlan->dev), net))
- unregister_netdevice_queue(vxlan->dev, head);
- }
+ list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next)
+ vxlan_dellink(vxlan->dev, dev_to_kill);
}
-static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+static void __net_exit vxlan_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- unsigned int h;
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
- }
- rtnl_lock();
- list_for_each_entry(net, net_list, exit_list)
- vxlan_destroy_tunnels(net, &list);
+ __unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ vxlan_destroy_tunnels(vn, dev_to_kill);
+ }
+}
- list_for_each_entry(net, net_list, exit_list) {
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+static void __net_exit vxlan_exit_net(struct net *net)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ unsigned int h;
- for (h = 0; h < PORT_HASH_SIZE; ++h)
- WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
- }
+ for (h = 0; h < PORT_HASH_SIZE; ++h)
+ WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
- .exit_batch = vxlan_exit_batch_net,
+ .exit_batch_rtnl = vxlan_exit_batch_rtnl,
+ .exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 7dda87756d3f..31ab2136cdf1 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -197,6 +197,18 @@ config FARSYNC
To compile this driver as a module, choose M here: the
module will be called farsync.
+config FSL_QMC_HDLC
+ tristate "Freescale QMC HDLC support"
+ depends on HDLC
+ depends on CPM_QMC
+ help
+ HDLC support using the Freescale QUICC Multichannel Controller (QMC).
+
+ To compile this driver as a module, choose M here: the
+ module will be called fsl_qmc_hdlc.
+
+ If unsure, say N.
+
config FSL_UCC_HDLC
tristate "Freescale QUICC Engine HDLC support"
depends on HDLC
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 8119b49d1da9..00e9b7ee1e01 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_WANXL) += wanxl.o
obj-$(CONFIG_PCI200SYN) += pci200syn.o
obj-$(CONFIG_PC300TOO) += pc300too.o
obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o
+obj-$(CONFIG_FSL_QMC_HDLC) += fsl_qmc_hdlc.o
obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o
obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o
diff --git a/drivers/net/wan/framer/framer-core.c b/drivers/net/wan/framer/framer-core.c
index c04dc88bda6c..f547c22e26ac 100644
--- a/drivers/net/wan/framer/framer-core.c
+++ b/drivers/net/wan/framer/framer-core.c
@@ -18,7 +18,12 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-static struct class *framer_class;
+static void framer_release(struct device *dev);
+static const struct class framer_class = {
+ .name = "framer",
+ .dev_release = framer_release,
+};
+
static DEFINE_MUTEX(framer_provider_mutex);
static LIST_HEAD(framer_provider_list);
static DEFINE_IDA(framer_ida);
@@ -384,7 +389,7 @@ static struct framer_provider *framer_provider_of_lookup(const struct device_nod
return ERR_PTR(-EPROBE_DEFER);
}
-static struct framer *framer_of_get_from_provider(struct of_phandle_args *args)
+static struct framer *framer_of_get_from_provider(const struct of_phandle_args *args)
{
struct framer_provider *framer_provider;
struct framer *framer;
@@ -627,7 +632,7 @@ struct framer *framer_create(struct device *dev, struct device_node *node,
INIT_DELAYED_WORK(&framer->polling_work, framer_polling_work);
BLOCKING_INIT_NOTIFIER_HEAD(&framer->notifier_list);
- framer->dev.class = framer_class;
+ framer->dev.class = &framer_class;
framer->dev.parent = dev;
framer->dev.of_node = node ? node : dev->of_node;
framer->id = id;
@@ -735,12 +740,13 @@ EXPORT_SYMBOL_GPL(devm_framer_create);
* should provide a custom of_xlate function that reads the *args* and returns
* the appropriate framer.
*/
-struct framer *framer_provider_simple_of_xlate(struct device *dev, struct of_phandle_args *args)
+struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct class_dev_iter iter;
struct framer *framer;
- class_dev_iter_init(&iter, framer_class, NULL, NULL);
+ class_dev_iter_init(&iter, &framer_class, NULL, NULL);
while ((dev = class_dev_iter_next(&iter))) {
framer = dev_to_framer(dev);
if (args->np != framer->dev.of_node)
@@ -768,7 +774,7 @@ EXPORT_SYMBOL_GPL(framer_provider_simple_of_xlate);
struct framer_provider *
__framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
struct framer_provider *framer_provider;
@@ -830,7 +836,7 @@ static void devm_framer_provider_of_unregister(struct device *dev, void *res)
struct framer_provider *
__devm_framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
struct framer_provider **ptr, *framer_provider;
@@ -869,14 +875,6 @@ static void framer_release(struct device *dev)
static int __init framer_core_init(void)
{
- framer_class = class_create("framer");
- if (IS_ERR(framer_class)) {
- pr_err("failed to create framer class (%pe)\n", framer_class);
- return PTR_ERR(framer_class);
- }
-
- framer_class->dev_release = framer_release;
-
- return 0;
+ return class_register(&framer_class);
}
device_initcall(framer_core_init);
diff --git a/drivers/net/wan/framer/pef2256/pef2256.c b/drivers/net/wan/framer/pef2256/pef2256.c
index 4f81053ee4f0..413a3c1d15bb 100644
--- a/drivers/net/wan/framer/pef2256/pef2256.c
+++ b/drivers/net/wan/framer/pef2256/pef2256.c
@@ -838,7 +838,7 @@ static int pef2256_probe(struct platform_device *pdev)
return 0;
}
-static int pef2256_remove(struct platform_device *pdev)
+static void pef2256_remove(struct platform_device *pdev)
{
struct pef2256 *pef2256 = platform_get_drvdata(pdev);
@@ -849,8 +849,6 @@ static int pef2256_remove(struct platform_device *pdev)
pef2256_write8(pef2256, PEF2256_IMR3, 0xff);
pef2256_write8(pef2256, PEF2256_IMR4, 0xff);
pef2256_write8(pef2256, PEF2256_IMR5, 0xff);
-
- return 0;
}
static const struct of_device_id pef2256_id_table[] = {
@@ -865,7 +863,7 @@ static struct platform_driver pef2256_driver = {
.of_match_table = pef2256_id_table,
},
.probe = pef2256_probe,
- .remove = pef2256_remove,
+ .remove_new = pef2256_remove,
};
module_platform_driver(pef2256_driver);
diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c
new file mode 100644
index 000000000000..960371df470a
--- /dev/null
+++ b/drivers/net/wan/fsl_qmc_hdlc.c
@@ -0,0 +1,797 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale QMC HDLC Device Driver
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bug.h>
+#include <linux/cleanup.h>
+#include <linux/bitmap.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/framer/framer.h>
+#include <linux/hdlc.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <soc/fsl/qe/qmc.h>
+
+struct qmc_hdlc_desc {
+ struct net_device *netdev;
+ struct sk_buff *skb; /* NULL if the descriptor is not in use */
+ dma_addr_t dma_addr;
+ size_t dma_size;
+};
+
+struct qmc_hdlc {
+ struct device *dev;
+ struct qmc_chan *qmc_chan;
+ struct net_device *netdev;
+ struct framer *framer;
+ spinlock_t carrier_lock; /* Protect carrier detection */
+ struct notifier_block nb;
+ bool is_crc32;
+ spinlock_t tx_lock; /* Protect tx descriptors */
+ struct qmc_hdlc_desc tx_descs[8];
+ unsigned int tx_out;
+ struct qmc_hdlc_desc rx_descs[4];
+ u32 slot_map;
+};
+
+static struct qmc_hdlc *netdev_to_qmc_hdlc(struct net_device *netdev)
+{
+ return dev_to_hdlc(netdev)->priv;
+}
+
+static int qmc_hdlc_framer_set_carrier(struct qmc_hdlc *qmc_hdlc)
+{
+ struct framer_status framer_status;
+ int ret;
+
+ if (!qmc_hdlc->framer)
+ return 0;
+
+ guard(spinlock_irqsave)(&qmc_hdlc->carrier_lock);
+
+ ret = framer_get_status(qmc_hdlc->framer, &framer_status);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "get framer status failed (%d)\n", ret);
+ return ret;
+ }
+ if (framer_status.link_is_on)
+ netif_carrier_on(qmc_hdlc->netdev);
+ else
+ netif_carrier_off(qmc_hdlc->netdev);
+
+ return 0;
+}
+
+static int qmc_hdlc_framer_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct qmc_hdlc *qmc_hdlc = container_of(nb, struct qmc_hdlc, nb);
+ int ret;
+
+ if (action != FRAMER_EVENT_STATUS)
+ return NOTIFY_DONE;
+
+ ret = qmc_hdlc_framer_set_carrier(qmc_hdlc);
+ return ret ? NOTIFY_DONE : NOTIFY_OK;
+}
+
+static int qmc_hdlc_framer_start(struct qmc_hdlc *qmc_hdlc)
+{
+ struct framer_status framer_status;
+ int ret;
+
+ if (!qmc_hdlc->framer)
+ return 0;
+
+ ret = framer_power_on(qmc_hdlc->framer);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "framer power-on failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* Be sure that get_status is supported */
+ ret = framer_get_status(qmc_hdlc->framer, &framer_status);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "get framer status failed (%d)\n", ret);
+ goto framer_power_off;
+ }
+
+ qmc_hdlc->nb.notifier_call = qmc_hdlc_framer_notifier;
+ ret = framer_notifier_register(qmc_hdlc->framer, &qmc_hdlc->nb);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "framer notifier register failed (%d)\n", ret);
+ goto framer_power_off;
+ }
+
+ return 0;
+
+framer_power_off:
+ framer_power_off(qmc_hdlc->framer);
+ return ret;
+}
+
+static void qmc_hdlc_framer_stop(struct qmc_hdlc *qmc_hdlc)
+{
+ if (!qmc_hdlc->framer)
+ return;
+
+ framer_notifier_unregister(qmc_hdlc->framer, &qmc_hdlc->nb);
+ framer_power_off(qmc_hdlc->framer);
+}
+
+static int qmc_hdlc_framer_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface,
+ const te1_settings *te1)
+{
+ struct framer_config config;
+ int ret;
+
+ if (!qmc_hdlc->framer)
+ return 0;
+
+ ret = framer_get_config(qmc_hdlc->framer, &config);
+ if (ret)
+ return ret;
+
+ switch (if_iface) {
+ case IF_IFACE_E1:
+ config.iface = FRAMER_IFACE_E1;
+ break;
+ case IF_IFACE_T1:
+ config.iface = FRAMER_IFACE_T1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (te1->clock_type) {
+ case CLOCK_DEFAULT:
+ /* Keep current value */
+ break;
+ case CLOCK_EXT:
+ config.clock_type = FRAMER_CLOCK_EXT;
+ break;
+ case CLOCK_INT:
+ config.clock_type = FRAMER_CLOCK_INT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ config.line_clock_rate = te1->clock_rate;
+
+ return framer_set_config(qmc_hdlc->framer, &config);
+}
+
+static int qmc_hdlc_framer_get_iface(struct qmc_hdlc *qmc_hdlc, int *if_iface, te1_settings *te1)
+{
+ struct framer_config config;
+ int ret;
+
+ if (!qmc_hdlc->framer) {
+ *if_iface = IF_IFACE_E1;
+ return 0;
+ }
+
+ ret = framer_get_config(qmc_hdlc->framer, &config);
+ if (ret)
+ return ret;
+
+ switch (config.iface) {
+ case FRAMER_IFACE_E1:
+ *if_iface = IF_IFACE_E1;
+ break;
+ case FRAMER_IFACE_T1:
+ *if_iface = IF_IFACE_T1;
+ break;
+ }
+
+ if (!te1)
+ return 0; /* Only iface type requested */
+
+ switch (config.clock_type) {
+ case FRAMER_CLOCK_EXT:
+ te1->clock_type = CLOCK_EXT;
+ break;
+ case FRAMER_CLOCK_INT:
+ te1->clock_type = CLOCK_INT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ te1->clock_rate = config.line_clock_rate;
+ return 0;
+}
+
+static int qmc_hdlc_framer_init(struct qmc_hdlc *qmc_hdlc)
+{
+ int ret;
+
+ if (!qmc_hdlc->framer)
+ return 0;
+
+ ret = framer_init(qmc_hdlc->framer);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "framer init failed (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qmc_hdlc_framer_exit(struct qmc_hdlc *qmc_hdlc)
+{
+ if (!qmc_hdlc->framer)
+ return;
+
+ framer_exit(qmc_hdlc->framer);
+}
+
+static int qmc_hdlc_recv_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc, size_t size);
+
+#define QMC_HDLC_RX_ERROR_FLAGS \
+ (QMC_RX_FLAG_HDLC_OVF | QMC_RX_FLAG_HDLC_UNA | \
+ QMC_RX_FLAG_HDLC_CRC | QMC_RX_FLAG_HDLC_ABORT)
+
+static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int flags)
+{
+ struct qmc_hdlc_desc *desc = context;
+ struct net_device *netdev;
+ struct qmc_hdlc *qmc_hdlc;
+ int ret;
+
+ netdev = desc->netdev;
+ qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_FROM_DEVICE);
+
+ if (flags & QMC_HDLC_RX_ERROR_FLAGS) {
+ netdev->stats.rx_errors++;
+ if (flags & QMC_RX_FLAG_HDLC_OVF) /* Data overflow */
+ netdev->stats.rx_over_errors++;
+ if (flags & QMC_RX_FLAG_HDLC_UNA) /* bits received not multiple of 8 */
+ netdev->stats.rx_frame_errors++;
+ if (flags & QMC_RX_FLAG_HDLC_ABORT) /* Received an abort sequence */
+ netdev->stats.rx_frame_errors++;
+ if (flags & QMC_RX_FLAG_HDLC_CRC) /* CRC error */
+ netdev->stats.rx_crc_errors++;
+ kfree_skb(desc->skb);
+ } else {
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += length;
+
+ skb_put(desc->skb, length);
+ desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
+ netif_rx(desc->skb);
+ }
+
+ /* Re-queue a transfer using the same descriptor */
+ ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, desc->dma_size);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "queue recv desc failed (%d)\n", ret);
+ netdev->stats.rx_errors++;
+ }
+}
+
+static int qmc_hdlc_recv_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc, size_t size)
+{
+ int ret;
+
+ desc->skb = dev_alloc_skb(size);
+ if (!desc->skb)
+ return -ENOMEM;
+
+ desc->dma_size = size;
+ desc->dma_addr = dma_map_single(qmc_hdlc->dev, desc->skb->data,
+ desc->dma_size, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(qmc_hdlc->dev, desc->dma_addr);
+ if (ret)
+ goto free_skb;
+
+ ret = qmc_chan_read_submit(qmc_hdlc->qmc_chan, desc->dma_addr, desc->dma_size,
+ qmc_hcld_recv_complete, desc);
+ if (ret)
+ goto dma_unmap;
+
+ return 0;
+
+dma_unmap:
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_FROM_DEVICE);
+free_skb:
+ kfree_skb(desc->skb);
+ desc->skb = NULL;
+ return ret;
+}
+
+static void qmc_hdlc_xmit_complete(void *context)
+{
+ struct qmc_hdlc_desc *desc = context;
+ struct net_device *netdev;
+ struct qmc_hdlc *qmc_hdlc;
+ struct sk_buff *skb;
+
+ netdev = desc->netdev;
+ qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+
+ scoped_guard(spinlock_irqsave, &qmc_hdlc->tx_lock) {
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_TO_DEVICE);
+ skb = desc->skb;
+ desc->skb = NULL; /* Release the descriptor */
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+ }
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ dev_consume_skb_any(skb);
+}
+
+static int qmc_hdlc_xmit_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc)
+{
+ int ret;
+
+ desc->dma_addr = dma_map_single(qmc_hdlc->dev, desc->skb->data,
+ desc->dma_size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(qmc_hdlc->dev, desc->dma_addr);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "failed to map skb\n");
+ return ret;
+ }
+
+ ret = qmc_chan_write_submit(qmc_hdlc->qmc_chan, desc->dma_addr, desc->dma_size,
+ qmc_hdlc_xmit_complete, desc);
+ if (ret) {
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_TO_DEVICE);
+ dev_err(qmc_hdlc->dev, "qmc chan write returns %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t qmc_hdlc_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+ struct qmc_hdlc_desc *desc;
+ int err;
+
+ guard(spinlock_irqsave)(&qmc_hdlc->tx_lock);
+
+ desc = &qmc_hdlc->tx_descs[qmc_hdlc->tx_out];
+ if (WARN_ONCE(desc->skb, "No tx descriptors available\n")) {
+ /* Should never happen.
+ * Previous xmit should have already stopped the queue.
+ */
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ desc->netdev = netdev;
+ desc->dma_size = skb->len;
+ desc->skb = skb;
+ err = qmc_hdlc_xmit_queue(qmc_hdlc, desc);
+ if (err) {
+ desc->skb = NULL; /* Release the descriptor */
+ if (err == -EBUSY) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+ dev_kfree_skb(skb);
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ qmc_hdlc->tx_out = (qmc_hdlc->tx_out + 1) % ARRAY_SIZE(qmc_hdlc->tx_descs);
+
+ if (qmc_hdlc->tx_descs[qmc_hdlc->tx_out].skb)
+ netif_stop_queue(netdev);
+
+ return NETDEV_TX_OK;
+}
+
+static int qmc_hdlc_xlate_slot_map(struct qmc_hdlc *qmc_hdlc,
+ u32 slot_map, struct qmc_chan_ts_info *ts_info)
+{
+ DECLARE_BITMAP(ts_mask_avail, 64);
+ DECLARE_BITMAP(ts_mask, 64);
+ DECLARE_BITMAP(map, 64);
+
+ /* Tx and Rx available masks must be identical */
+ if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
+ dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
+ ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
+ return -EINVAL;
+ }
+
+ bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail);
+ bitmap_from_u64(map, slot_map);
+ bitmap_scatter(ts_mask, map, ts_mask_avail, 64);
+
+ if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
+ dev_err(qmc_hdlc->dev, "Cannot translate timeslots %64pb -> (%64pb, %64pb)\n",
+ map, ts_mask_avail, ts_mask);
+ return -EINVAL;
+ }
+
+ bitmap_to_arr64(&ts_info->tx_ts_mask, ts_mask, 64);
+ ts_info->rx_ts_mask = ts_info->tx_ts_mask;
+ return 0;
+}
+
+static int qmc_hdlc_xlate_ts_info(struct qmc_hdlc *qmc_hdlc,
+ const struct qmc_chan_ts_info *ts_info, u32 *slot_map)
+{
+ DECLARE_BITMAP(ts_mask_avail, 64);
+ DECLARE_BITMAP(ts_mask, 64);
+ DECLARE_BITMAP(map, 64);
+ u32 slot_array[2];
+
+ /* Tx and Rx masks and available masks must be identical */
+ if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
+ dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
+ ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
+ return -EINVAL;
+ }
+ if (ts_info->rx_ts_mask != ts_info->tx_ts_mask) {
+ dev_err(qmc_hdlc->dev, "tx and rx timeslots mismatch (0x%llx, 0x%llx)\n",
+ ts_info->rx_ts_mask, ts_info->tx_ts_mask);
+ return -EINVAL;
+ }
+
+ bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail);
+ bitmap_from_u64(ts_mask, ts_info->rx_ts_mask);
+ bitmap_gather(map, ts_mask, ts_mask_avail, 64);
+
+ if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
+ dev_err(qmc_hdlc->dev, "Cannot translate timeslots (%64pb, %64pb) -> %64pb\n",
+ ts_mask_avail, ts_mask, map);
+ return -EINVAL;
+ }
+
+ bitmap_to_arr32(slot_array, map, 64);
+ if (slot_array[1]) {
+ dev_err(qmc_hdlc->dev, "Slot map out of 32bit (%64pb, %64pb) -> %64pb\n",
+ ts_mask_avail, ts_mask, map);
+ return -EINVAL;
+ }
+
+ *slot_map = slot_array[0];
+ return 0;
+}
+
+static int qmc_hdlc_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface, const te1_settings *te1)
+{
+ struct qmc_chan_ts_info ts_info;
+ int ret;
+
+ ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "get QMC channel ts info failed %d\n", ret);
+ return ret;
+ }
+ ret = qmc_hdlc_xlate_slot_map(qmc_hdlc, te1->slot_map, &ts_info);
+ if (ret)
+ return ret;
+
+ ret = qmc_chan_set_ts_info(qmc_hdlc->qmc_chan, &ts_info);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "set QMC channel ts info failed %d\n", ret);
+ return ret;
+ }
+
+ qmc_hdlc->slot_map = te1->slot_map;
+
+ ret = qmc_hdlc_framer_set_iface(qmc_hdlc, if_iface, te1);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "framer set iface failed %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qmc_hdlc_ioctl(struct net_device *netdev, struct if_settings *ifs)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+ te1_settings te1;
+ int ret;
+
+ switch (ifs->type) {
+ case IF_GET_IFACE:
+ if (ifs->size < sizeof(te1)) {
+ /* Retrieve type only */
+ ret = qmc_hdlc_framer_get_iface(qmc_hdlc, &ifs->type, NULL);
+ if (ret)
+ return ret;
+
+ if (!ifs->size)
+ return 0; /* only type requested */
+
+ ifs->size = sizeof(te1); /* data size wanted */
+ return -ENOBUFS;
+ }
+
+ memset(&te1, 0, sizeof(te1));
+
+ /* Retrieve info from framer */
+ ret = qmc_hdlc_framer_get_iface(qmc_hdlc, &ifs->type, &te1);
+ if (ret)
+ return ret;
+
+ /* Update slot_map */
+ te1.slot_map = qmc_hdlc->slot_map;
+
+ if (copy_to_user(ifs->ifs_ifsu.te1, &te1, sizeof(te1)))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_E1:
+ case IF_IFACE_T1:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (netdev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (copy_from_user(&te1, ifs->ifs_ifsu.te1, sizeof(te1)))
+ return -EFAULT;
+
+ return qmc_hdlc_set_iface(qmc_hdlc, ifs->type, &te1);
+
+ default:
+ return hdlc_ioctl(netdev, ifs);
+ }
+}
+
+static int qmc_hdlc_open(struct net_device *netdev)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+ struct qmc_chan_param chan_param;
+ struct qmc_hdlc_desc *desc;
+ int ret;
+ int i;
+
+ ret = qmc_hdlc_framer_start(qmc_hdlc);
+ if (ret)
+ return ret;
+
+ ret = hdlc_open(netdev);
+ if (ret)
+ goto framer_stop;
+
+ /* Update carrier */
+ qmc_hdlc_framer_set_carrier(qmc_hdlc);
+
+ chan_param.mode = QMC_HDLC;
+ /* HDLC_MAX_MRU + 4 for the CRC
+ * HDLC_MAX_MRU + 4 + 8 for the CRC and some extraspace needed by the QMC
+ */
+ chan_param.hdlc.max_rx_buf_size = HDLC_MAX_MRU + 4 + 8;
+ chan_param.hdlc.max_rx_frame_size = HDLC_MAX_MRU + 4;
+ chan_param.hdlc.is_crc32 = qmc_hdlc->is_crc32;
+ ret = qmc_chan_set_param(qmc_hdlc->qmc_chan, &chan_param);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "failed to set param (%d)\n", ret);
+ goto hdlc_close;
+ }
+
+ /* Queue as many recv descriptors as possible */
+ for (i = 0; i < ARRAY_SIZE(qmc_hdlc->rx_descs); i++) {
+ desc = &qmc_hdlc->rx_descs[i];
+
+ desc->netdev = netdev;
+ ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, chan_param.hdlc.max_rx_buf_size);
+ if (ret == -EBUSY && i != 0)
+ break; /* We use all the QMC chan capability */
+ if (ret)
+ goto free_desc;
+ }
+
+ ret = qmc_chan_start(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "qmc chan start failed (%d)\n", ret);
+ goto free_desc;
+ }
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+free_desc:
+ qmc_chan_reset(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
+ while (i--) {
+ desc = &qmc_hdlc->rx_descs[i];
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
+ DMA_FROM_DEVICE);
+ kfree_skb(desc->skb);
+ desc->skb = NULL;
+ }
+hdlc_close:
+ hdlc_close(netdev);
+framer_stop:
+ qmc_hdlc_framer_stop(qmc_hdlc);
+ return ret;
+}
+
+static int qmc_hdlc_close(struct net_device *netdev)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+ struct qmc_hdlc_desc *desc;
+ int i;
+
+ qmc_chan_stop(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
+ qmc_chan_reset(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
+
+ netif_stop_queue(netdev);
+
+ for (i = 0; i < ARRAY_SIZE(qmc_hdlc->tx_descs); i++) {
+ desc = &qmc_hdlc->tx_descs[i];
+ if (!desc->skb)
+ continue;
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
+ DMA_TO_DEVICE);
+ kfree_skb(desc->skb);
+ desc->skb = NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qmc_hdlc->rx_descs); i++) {
+ desc = &qmc_hdlc->rx_descs[i];
+ if (!desc->skb)
+ continue;
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
+ DMA_FROM_DEVICE);
+ kfree_skb(desc->skb);
+ desc->skb = NULL;
+ }
+
+ hdlc_close(netdev);
+ qmc_hdlc_framer_stop(qmc_hdlc);
+ return 0;
+}
+
+static int qmc_hdlc_attach(struct net_device *netdev, unsigned short encoding,
+ unsigned short parity)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+
+ if (encoding != ENCODING_NRZ)
+ return -EINVAL;
+
+ switch (parity) {
+ case PARITY_CRC16_PR1_CCITT:
+ qmc_hdlc->is_crc32 = false;
+ break;
+ case PARITY_CRC32_PR1_CCITT:
+ qmc_hdlc->is_crc32 = true;
+ break;
+ default:
+ dev_err(qmc_hdlc->dev, "unsupported parity %u\n", parity);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops qmc_hdlc_netdev_ops = {
+ .ndo_open = qmc_hdlc_open,
+ .ndo_stop = qmc_hdlc_close,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_siocwandev = qmc_hdlc_ioctl,
+};
+
+static int qmc_hdlc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qmc_chan_ts_info ts_info;
+ struct qmc_hdlc *qmc_hdlc;
+ struct qmc_chan_info info;
+ hdlc_device *hdlc;
+ int ret;
+
+ qmc_hdlc = devm_kzalloc(dev, sizeof(*qmc_hdlc), GFP_KERNEL);
+ if (!qmc_hdlc)
+ return -ENOMEM;
+
+ qmc_hdlc->dev = dev;
+ spin_lock_init(&qmc_hdlc->tx_lock);
+ spin_lock_init(&qmc_hdlc->carrier_lock);
+
+ qmc_hdlc->qmc_chan = devm_qmc_chan_get_bychild(dev, dev->of_node);
+ if (IS_ERR(qmc_hdlc->qmc_chan))
+ return dev_err_probe(dev, PTR_ERR(qmc_hdlc->qmc_chan),
+ "get QMC channel failed\n");
+
+ ret = qmc_chan_get_info(qmc_hdlc->qmc_chan, &info);
+ if (ret)
+ return dev_err_probe(dev, ret, "get QMC channel info failed\n");
+
+ if (info.mode != QMC_HDLC)
+ return dev_err_probe(dev, -EINVAL, "QMC chan mode %d is not QMC_HDLC\n",
+ info.mode);
+
+ ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
+ if (ret)
+ return dev_err_probe(dev, ret, "get QMC channel ts info failed\n");
+
+ ret = qmc_hdlc_xlate_ts_info(qmc_hdlc, &ts_info, &qmc_hdlc->slot_map);
+ if (ret)
+ return ret;
+
+ qmc_hdlc->framer = devm_framer_optional_get(dev, "fsl,framer");
+ if (IS_ERR(qmc_hdlc->framer))
+ return PTR_ERR(qmc_hdlc->framer);
+
+ ret = qmc_hdlc_framer_init(qmc_hdlc);
+ if (ret)
+ return ret;
+
+ qmc_hdlc->netdev = alloc_hdlcdev(qmc_hdlc);
+ if (!qmc_hdlc->netdev) {
+ ret = -ENOMEM;
+ goto framer_exit;
+ }
+
+ hdlc = dev_to_hdlc(qmc_hdlc->netdev);
+ hdlc->attach = qmc_hdlc_attach;
+ hdlc->xmit = qmc_hdlc_xmit;
+ SET_NETDEV_DEV(qmc_hdlc->netdev, dev);
+ qmc_hdlc->netdev->tx_queue_len = ARRAY_SIZE(qmc_hdlc->tx_descs);
+ qmc_hdlc->netdev->netdev_ops = &qmc_hdlc_netdev_ops;
+ ret = register_hdlc_device(qmc_hdlc->netdev);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to register hdlc device\n");
+ goto free_netdev;
+ }
+
+ platform_set_drvdata(pdev, qmc_hdlc);
+ return 0;
+
+free_netdev:
+ free_netdev(qmc_hdlc->netdev);
+framer_exit:
+ qmc_hdlc_framer_exit(qmc_hdlc);
+ return ret;
+}
+
+static int qmc_hdlc_remove(struct platform_device *pdev)
+{
+ struct qmc_hdlc *qmc_hdlc = platform_get_drvdata(pdev);
+
+ unregister_hdlc_device(qmc_hdlc->netdev);
+ free_netdev(qmc_hdlc->netdev);
+ qmc_hdlc_framer_exit(qmc_hdlc);
+
+ return 0;
+}
+
+static const struct of_device_id qmc_hdlc_id_table[] = {
+ { .compatible = "fsl,qmc-hdlc" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, qmc_hdlc_driver);
+
+static struct platform_driver qmc_hdlc_driver = {
+ .driver = {
+ .name = "fsl-qmc-hdlc",
+ .of_match_table = qmc_hdlc_id_table,
+ },
+ .probe = qmc_hdlc_probe,
+ .remove = qmc_hdlc_remove,
+};
+module_platform_driver(qmc_hdlc_driver);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("QMC HDLC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index a176653c8861..df275b4fccb6 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -263,7 +263,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
* call skb_cow_data, so that there's no chance that data is removed
* from the skb, so that later we can extract the original endpoint.
*/
- offset = skb->data - skb_network_header(skb);
+ offset = -skb_network_offset(skb);
skb_push(skb, offset);
num_frags = skb_cow_data(skb, 0, &trailer);
offset += sizeof(struct message_data);
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 2fceea9f6550..e3fd48dd3909 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1759,6 +1759,10 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
}
static const struct ieee80211_ops adm8211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = adm8211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = adm8211_start,
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index a742cec44e3d..815f8f599f5d 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1358,6 +1358,10 @@ static void ar5523_configure_filter(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops ar5523_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = ar5523_start,
.stop = ar5523_stop,
.tx = ar5523_tx,
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 0032f8aa892f..9ce6f49ab261 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -3613,7 +3613,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
hw_rev);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
goto err_free_mac;
}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index e5ef0352e319..8d274e0f374b 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _COREDUMP_H_
@@ -13,7 +13,11 @@
/**
* enum ath10k_fw_crash_dump_type - types of data in the dump file
- * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
+ * @ATH10K_FW_CRASH_DUMP_REGISTERS: Register crash dump in binary format
+ * @ATH10K_FW_CRASH_DUMP_CE_DATA: Copy Engine crash dump data
+ * @ATH10K_FW_CRASH_DUMP_RAM_DATA: RAM crash dump data, contains multiple
+ * struct ath10k_dump_ram_data_hdr
+ * @ATH10K_FW_CRASH_DUMP_MAX: Maximum enumeration
*/
enum ath10k_fw_crash_dump_type {
ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 907e1e13871a..dbaf262cd7c1 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
@@ -381,7 +382,7 @@ static int ath10k_htt_verify_version(struct ath10k_htt *htt)
htt->target_version_major != 3) {
ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
htt->target_version_major);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 4a9270e2a4c8..603f6de62b0a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HTT_H_
@@ -906,7 +906,7 @@ struct htt_data_tx_completion_ext {
__le16 msdus_rssi[];
} __packed;
-/**
+/*
* @brief target -> host TX completion indication message definition
*
* @details
@@ -1474,15 +1474,19 @@ enum htt_q_depth_type {
#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0
/**
- * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
+ * struct htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
*
* Defines host q state format and behavior. See htt_q_state.
*
+ * @paddr: Queue physical address
+ * @num_peers: Number of supported peers
+ * @num_tids: Number of supported TIDs
* @record_size: Defines the size of each host q entry in bytes. In practice
* however firmware (at least 10.4.3-00191) ignores this host
* configuration value and uses hardcoded value of 1.
* @record_multiplier: This is valid only when q depth type is MSDUs. It
* defines the exponent for the power of 2 multiplication.
+ * @pad: struct padding for 32-bit alignment
*/
struct htt_q_state_conf {
__le32 paddr;
@@ -1518,7 +1522,7 @@ struct htt_frag_desc_bank_cfg64 {
#define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6
/**
- * htt_q_state - shared between host and firmware via DMA
+ * struct htt_q_state - shared between host and firmware via DMA
*
* This structure is used for the host to expose it's software queue state to
* firmware so that its rate control can schedule fetch requests for optimized
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 090bcf148d0c..e322b528baaf 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "mac.h"
@@ -2034,8 +2034,8 @@ static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
if (!arvif->is_up)
return;
- if (!ieee80211_beacon_cntdwn_is_complete(vif)) {
- ieee80211_beacon_update_cntdwn(vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
+ ieee80211_beacon_update_cntdwn(vif, 0);
ret = ath10k_mac_setup_bcn_tmpl(arvif);
if (ret)
@@ -2047,7 +2047,7 @@ static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
ret);
} else {
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
}
@@ -4056,7 +4056,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
!(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) {
WARN_ON_ONCE(1);
ieee80211_free_txskb(hw, skb);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
@@ -7065,7 +7065,7 @@ static int ath10k_mac_set_tid_config(struct ath10k *ar, struct ieee80211_sta *st
if (sta) {
if (!sta->wme)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
arsta = (struct ath10k_sta *)sta->drv_priv;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 3de2de6d44bc..5c34b156b4ff 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/pci.h>
@@ -889,7 +889,7 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return ar_pci->targ_cpu_to_ce_addr(ar, addr);
}
@@ -2668,7 +2668,7 @@ static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (!ar_pci->pci_soft_reset)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return ar_pci->pci_soft_reset(ar);
}
@@ -2808,7 +2808,7 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (WARN_ON(!ar_pci->pci_hard_reset))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return ar_pci->pci_hard_reset(ar);
}
@@ -3594,7 +3594,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
break;
default:
WARN_ON(1);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 6b6aa3c36744..aed97fd121ba 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "debug.h"
@@ -851,6 +852,10 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
}
ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
arg->desc_id = ev->desc_id;
arg->status = ev->status;
@@ -1347,7 +1352,7 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
__le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
__le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
__le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
arg->min_tx_power = ev->hw_min_tx_power;
@@ -2119,9 +2124,9 @@ static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar,
case WMI_VDEV_SUBTYPE_MESH_11S:
return WMI_TLV_VDEV_SUBTYPE_MESH_11S;
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static struct sk_buff *
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 83a8f07a687f..8a2f87d0a3a3 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _WMI_TLV_H
#define _WMI_TLV_H
@@ -2343,7 +2343,7 @@ struct wmi_tlv_adaptive_qcs {
} __packed;
/**
- * wmi_tlv_tx_pause_id - firmware tx queue pause reason types
+ * enum wmi_tlv_tx_pause_id - firmware tx queue pause reason types
*
* @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
* Only vdev_map is valid.
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 88befe92f95d..2e9661f4bea8 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
@@ -3884,8 +3884,8 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
* actual channel switch is done
*/
if (arvif->vif->bss_conf.csa_active &&
- ieee80211_beacon_cntdwn_is_complete(arvif->vif)) {
- ieee80211_csa_finish(arvif->vif);
+ ieee80211_beacon_cntdwn_is_complete(arvif->vif, 0)) {
+ ieee80211_csa_finish(arvif->vif, 0);
continue;
}
@@ -6927,14 +6927,14 @@ void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
}
static void
-ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
+ath10k_wmi_put_start_scan_tlvs(u8 *tlvs,
const struct wmi_start_scan_arg *arg)
{
struct wmi_ie_data *ie;
struct wmi_chan_list *channels;
struct wmi_ssid_list *ssids;
struct wmi_bssid_list *bssids;
- void *ptr = tlvs->tlvs;
+ void *ptr = tlvs;
int i;
if (arg->n_channels) {
@@ -7012,7 +7012,7 @@ ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
cmd = (struct wmi_start_scan_cmd *)skb->data;
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
- ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+ ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
cmd->burst_duration_ms = __cpu_to_le32(0);
@@ -7041,7 +7041,7 @@ ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
- ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+ ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
return skb;
@@ -8733,9 +8733,9 @@ int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
case WMI_VDEV_SUBTYPE_MESH_11S:
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
@@ -8755,9 +8755,9 @@ static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
case WMI_VDEV_SUBTYPE_MESH_11S:
return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
@@ -8779,7 +8779,7 @@ static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static struct sk_buff *
@@ -8918,8 +8918,6 @@ ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
if (!skb)
return ERR_PTR(-ENOMEM);
- memset(skb->data, 0, sizeof(*cmd));
-
cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 9146df98fcee..2379501225a4 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _WMI_H_
@@ -3008,8 +3008,11 @@ enum wmi_coex_version {
* @WMI_10_4_TDLS_UAPSD_SLEEP_STA: TDLS sleep sta support enable/disable
* @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host
* enable/disable
- * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable
+ * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY: Explicit TDLS mode enable/disable
* @WMI_10_4_TX_DATA_ACK_RSSI: Enable DATA ACK RSSI if firmware is capable
+ * @WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT: Firmware supports Extended Peer
+ * TID configuration for QoS related settings
+ * @WMI_10_4_REPORT_AIRTIME: Firmware supports transmit airtime reporting
*/
enum wmi_10_4_feature_mask {
WMI_10_4_LTEU_SUPPORT = BIT(0),
@@ -3069,7 +3072,10 @@ struct host_memory_chunk {
struct wmi_host_mem_chunks {
__le32 count;
/* some fw revisions require at least 1 chunk regardless of count */
- struct host_memory_chunk items[1];
+ union {
+ struct host_memory_chunk item;
+ DECLARE_FLEX_ARRAY(struct host_memory_chunk, items);
+ };
} __packed;
struct wmi_init_cmd {
@@ -3215,23 +3221,16 @@ struct wmi_start_scan_common {
__le32 scan_ctrl_flags;
} __packed;
-struct wmi_start_scan_tlvs {
- /* TLV parameters. These includes channel list, ssid list, bssid list,
- * extra ies.
- */
- u8 tlvs[0];
-} __packed;
-
struct wmi_start_scan_cmd {
struct wmi_start_scan_common common;
__le32 burst_duration_ms;
- struct wmi_start_scan_tlvs tlvs;
+ u8 tlvs[];
} __packed;
/* This is the definition from 10.X firmware branch */
struct wmi_10x_start_scan_cmd {
struct wmi_start_scan_common common;
- struct wmi_start_scan_tlvs tlvs;
+ u8 tlvs[];
} __packed;
struct wmi_ssid_arg {
@@ -4260,13 +4259,6 @@ struct wmi_peer_sta_ps_state_chg_event {
__le32 peer_ps_state;
} __packed;
-struct wmi_pdev_chanlist_update_event {
- /* number of channels */
- __le32 num_chan;
- /* array of channels */
- struct wmi_channel channel_list[1];
-} __packed;
-
#define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32)
struct wmi_debug_mesg_event {
@@ -5793,30 +5785,6 @@ struct wmi_bcn_prb_info {
/* app IE */
} __packed;
-struct wmi_bcn_tmpl_cmd {
- /* unique id identifying the VDEV, generated by the caller */
- __le32 vdev_id;
- /* TIM IE offset from the beginning of the template. */
- __le32 tim_ie_offset;
- /* beacon probe capabilities and IEs */
- struct wmi_bcn_prb_info bcn_prb_info;
- /* beacon buffer length */
- __le32 buf_len;
- /* variable length data */
- u8 data[1];
-} __packed;
-
-struct wmi_prb_tmpl_cmd {
- /* unique id identifying the VDEV, generated by the caller */
- __le32 vdev_id;
- /* beacon probe capabilities and IEs */
- struct wmi_bcn_prb_info bcn_prb_info;
- /* beacon buffer length */
- __le32 buf_len;
- /* Variable length data */
- u8 data[1];
-} __packed;
-
enum wmi_sta_ps_mode {
/* enable power save for the given STA VDEV */
WMI_STA_PS_MODE_DISABLED = 0,
@@ -7197,7 +7165,13 @@ struct wmi_tdls_peer_capabilities {
__le32 is_peer_responder;
__le32 pref_offchan_num;
__le32 pref_offchan_bw;
- struct wmi_channel peer_chan_list[1];
+ union {
+ /* to match legacy implementation allocate room for
+ * at least one record even if peer_chan_len is 0
+ */
+ struct wmi_channel peer_chan_min_allocation;
+ DECLARE_FLEX_ARRAY(struct wmi_channel, peer_chan_list);
+ };
} __packed;
struct wmi_10_4_tdls_peer_update_cmd {
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 0c6ecbb9a066..c78bce19bd75 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -122,6 +122,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
+ .support_dual_stations = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -205,6 +206,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .support_dual_stations = false,
},
{
.name = "qca6390 hw2.0",
@@ -255,7 +257,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
@@ -290,6 +292,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
{
.name = "qcn9074 hw1.0",
@@ -372,6 +375,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .support_dual_stations = false,
},
{
.name = "wcn6855 hw2.0",
@@ -422,7 +426,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
@@ -457,6 +461,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
{
.name = "wcn6855 hw2.1",
@@ -505,7 +510,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
@@ -540,6 +545,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
{
.name = "wcn6750 hw1.0",
@@ -621,6 +627,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
+ .support_dual_stations = false,
},
{
.hw_rev = ATH11K_HW_IPQ5018_HW10,
@@ -702,6 +709,93 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .support_dual_stations = false,
+ },
+ {
+ .name = "qca2066 hw2.1",
+ .hw_rev = ATH11K_HW_QCA2066_HW21,
+ .fw = {
+ .dir = "QCA2066/hw2.1",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .full_monitor_mode = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 2 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
};
@@ -1775,10 +1869,9 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
mutex_lock(&ab->core_lock);
ath11k_thermal_unregister(ab);
- ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_spectral_deinit(ab);
- ath11k_hif_stop(ab);
+ ath11k_ce_cleanup_pipes(ab);
ath11k_wmi_detach(ab);
ath11k_dp_pdev_reo_cleanup(ab);
mutex_unlock(&ab->core_lock);
@@ -2033,6 +2126,9 @@ static void ath11k_core_reset(struct work_struct *work)
time_left = wait_for_completion_timeout(&ab->recovery_start,
ATH11K_RECOVER_START_TIMEOUT_HZ);
+ ath11k_hif_irq_disable(ab);
+ ath11k_hif_ce_irq_disable(ab);
+
ath11k_hif_power_down(ab);
ath11k_hif_power_up(ab);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 02e160d831be..b3fb74a226fb 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -147,6 +147,7 @@ enum ath11k_hw_rev {
ATH11K_HW_WCN6855_HW21,
ATH11K_HW_WCN6750_HW10,
ATH11K_HW_IPQ5018_HW10,
+ ATH11K_HW_QCA2066_HW21,
};
enum ath11k_firmware_mode {
@@ -314,6 +315,43 @@ struct ath11k_rekey_data {
bool enable_offload;
};
+/**
+ * struct ath11k_chan_power_info - TPE containing power info per channel chunk
+ * @chan_cfreq: channel center freq (MHz)
+ * e.g.
+ * channel 37/20 MHz, it is 6135
+ * channel 37/40 MHz, it is 6125
+ * channel 37/80 MHz, it is 6145
+ * channel 37/160 MHz, it is 6185
+ * @tx_power: transmit power (dBm)
+ */
+struct ath11k_chan_power_info {
+ u16 chan_cfreq;
+ s8 tx_power;
+};
+
+/**
+ * struct ath11k_reg_tpc_power_info - regulatory TPC power info
+ * @is_psd_power: is PSD power or not
+ * @eirp_power: Maximum EIRP power (dBm), valid only if power is PSD
+ * @ap_power_type: type of power (SP/LPI/VLP)
+ * @num_pwr_levels: number of power levels
+ * @reg_max: Array of maximum TX power (dBm) per PSD value
+ * @ap_constraint_power: AP constraint power (dBm)
+ * @tpe: TPE values processed from TPE IE
+ * @chan_power_info: power info to send to firmware
+ */
+struct ath11k_reg_tpc_power_info {
+ bool is_psd_power;
+ u8 eirp_power;
+ enum wmi_reg_6ghz_ap_type ap_power_type;
+ u8 num_pwr_levels;
+ u8 reg_max[IEEE80211_MAX_NUM_PWR_LEVEL];
+ u8 ap_constraint_power;
+ s8 tpe[IEEE80211_MAX_NUM_PWR_LEVEL];
+ struct ath11k_chan_power_info chan_power_info[IEEE80211_MAX_NUM_PWR_LEVEL];
+};
+
struct ath11k_vif {
u32 vdev_id;
enum wmi_vdev_type vdev_type;
@@ -368,6 +406,8 @@ struct ath11k_vif {
struct ieee80211_chanctx_conf chanctx;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
+
+ struct ath11k_reg_tpc_power_info reg_tpc_info;
};
struct ath11k_vif_iter {
@@ -735,6 +775,7 @@ struct ath11k {
/* protected by conf_mutex */
bool ps_state_enable;
bool ps_timekeeper_enable;
+ s8 max_allowed_tx_power;
};
struct ath11k_band_cap {
@@ -918,6 +959,7 @@ struct ath11k_base {
* This may or may not be used during the runtime
*/
struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+ struct cur_regulatory_info *reg_info_store;
/* Current DFS Regulatory */
enum ath11k_dfs_region dfs_region;
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 8975dc57ad77..1a62407e5a9f 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -104,11 +104,14 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
if (!ring->vaddr_unaligned)
return;
- if (ring->cached)
+ if (ring->cached) {
+ dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size,
+ DMA_FROM_DEVICE);
kfree(ring->vaddr_unaligned);
- else
+ } else {
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
+ }
ring->vaddr_unaligned = NULL;
}
@@ -249,7 +252,18 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
if (cached) {
ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
- ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
+ if (!ring->vaddr_unaligned)
+ return -ENOMEM;
+
+ ring->paddr_unaligned = dma_map_single(ab->dev,
+ ring->vaddr_unaligned,
+ ring->size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) {
+ kfree(ring->vaddr_unaligned);
+ ring->vaddr_unaligned = NULL;
+ return -ENOMEM;
+ }
}
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index c1072e66e3e8..272b1c35f98d 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -103,7 +103,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control)))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
@@ -1018,7 +1018,7 @@ int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index c060c4b5c0cc..f3d04568c221 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -626,15 +626,30 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
return NULL;
}
+static u32 *ath11k_hal_srng_dst_peek_with_dma(struct ath11k_base *ab,
+ struct hal_srng *srng, dma_addr_t *paddr)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
+ *paddr = srng->ring_base_paddr +
+ sizeof(*srng->ring_base_vaddr) * srng->u.dst_ring.tp;
+ return srng->ring_base_vaddr + srng->u.dst_ring.tp;
+ }
+
+ return NULL;
+}
+
static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
struct hal_srng *srng)
{
+ dma_addr_t desc_paddr;
u32 *desc;
/* prefetch only if desc is available */
- desc = ath11k_hal_srng_dst_peek(ab, srng);
+ desc = ath11k_hal_srng_dst_peek_with_dma(ab, srng, &desc_paddr);
if (likely(desc)) {
- dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc),
+ dma_sync_single_for_cpu(ab->dev, desc_paddr,
(srng->entry_size * sizeof(u32)),
DMA_FROM_DEVICE);
prefetch(desc);
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 80447f488954..65e8f244ebb9 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_H
@@ -674,6 +674,7 @@ struct hal_srng_config {
* @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host
* @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host
* @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host
+ * @HAL_RX_BUF_RBM_SW4_BM: For Tx completion -- returned to host
*/
enum hal_rx_buf_return_buf_manager {
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index e758ee8e17c9..8f7dd43dc1bd 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
@@ -246,7 +246,7 @@ int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
case HAL_REO_CMD_UNBLOCK_CACHE:
case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
ath11k_warn(ab, "Unsupported reo command %d\n", type);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
break;
default:
ath11k_warn(ab, "Unknown reo command %d\n", type);
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index 77d8f9237680..caa6dc12a790 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -58,7 +58,7 @@ static void ath11k_hw_wcn6855_tx_mesh_enable(struct ath11k_base *ab,
static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
struct target_resource_config *config)
{
- config->num_vdevs = 4;
+ config->num_vdevs = ab->hw_params.num_vdevs;
config->num_peers = 16;
config->num_tids = 32;
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 1b070747a5db..14ef4eb48f80 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -226,6 +226,7 @@ struct ath11k_hw_params {
u32 tx_ring_size;
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
+ bool support_dual_stations;
};
struct ath11k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index b13525bbbb80..a6a37d67a50a 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@@ -255,9 +255,6 @@ static const u32 ath11k_smps_map[] = {
[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
};
-static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-
enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy)
{
enum nl80211_he_ru_alloc ret;
@@ -1580,7 +1577,7 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
return;
if (vif->bss_conf.color_change_active &&
- ieee80211_beacon_cntdwn_is_complete(vif)) {
+ ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
arvif->bcca_zero_sent = true;
ieee80211_color_change_finish(vif);
return;
@@ -1589,7 +1586,7 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
arvif->bcca_zero_sent = false;
if (vif->bss_conf.color_change_active)
- ieee80211_beacon_update_cntdwn(vif);
+ ieee80211_beacon_update_cntdwn(vif, 0);
ath11k_mac_setup_bcn_tmpl(arvif);
}
@@ -2297,6 +2294,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ /* Initialize rx_mcs_160 to 9 which is an invalid value */
+ rx_mcs_160 = 9;
if (support_160) {
for (i = 7; i >= 0; i--) {
u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
@@ -2308,6 +2307,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
}
}
+ /* Initialize rx_mcs_80 to 9 which is an invalid value */
+ rx_mcs_80 = 9;
for (i = 7; i >= 0; i--) {
u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
@@ -3026,7 +3027,14 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
rcu_read_unlock();
+ if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
+ ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
+ arvif->vdev_id, bss_conf->bssid);
+ return;
+ }
+
peer_arg.is_assoc = true;
+
ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
@@ -3049,12 +3057,6 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
return;
}
- if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
- ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
- arvif->vdev_id, bss_conf->bssid);
- return;
- }
-
WARN_ON(arvif->is_up);
arvif->aid = vif->cfg.aid;
@@ -3397,6 +3399,18 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
return 0;
}
+static bool ath11k_mac_supports_station_tpc(struct ath11k *ar,
+ struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE &&
+ chandef->chan &&
+ chandef->chan->band == NL80211_BAND_6GHZ;
+}
+
static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -3596,7 +3610,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_TXPOWER) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev_id %i txpower %d\n",
arvif->vdev_id, info->txpower);
-
arvif->txpower = info->txpower;
ath11k_mac_txpower_recalc(ar);
}
@@ -4000,7 +4013,7 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
req->ssids[i].ssid_len);
}
} else {
- arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg->scan_f_passive = 1;
}
if (req->n_channels) {
@@ -4906,100 +4919,6 @@ static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif,
ar->num_stations--;
}
-static int ath11k_mac_station_add(struct ath11k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct ath11k_base *ab = ar->ab;
- struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
- struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
- struct peer_create_params peer_param;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- ret = ath11k_mac_inc_num_stations(arvif, sta);
- if (ret) {
- ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
- ar->max_num_stations);
- goto exit;
- }
-
- arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
- if (!arsta->rx_stats) {
- ret = -ENOMEM;
- goto dec_num_station;
- }
-
- peer_param.vdev_id = arvif->vdev_id;
- peer_param.peer_addr = sta->addr;
- peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
-
- ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
- if (ret) {
- ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- goto free_rx_stats;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
-
- if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
- arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
- if (!arsta->tx_stats) {
- ret = -ENOMEM;
- goto free_peer;
- }
- }
-
- if (ieee80211_vif_is_mesh(vif)) {
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "setting USE_4ADDR for mesh STA %pM\n", sta->addr);
- ret = ath11k_wmi_set_peer_param(ar, sta->addr,
- arvif->vdev_id,
- WMI_PEER_USE_4ADDR, 1);
- if (ret) {
- ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
- sta->addr, ret);
- goto free_tx_stats;
- }
- }
-
- ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
- if (ret) {
- ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
- sta->addr, arvif->vdev_id, ret);
- goto free_tx_stats;
- }
-
- if (ab->hw_params.vdev_start_delay &&
- !arvif->is_started &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP) {
- ret = ath11k_start_vdev_delay(ar->hw, vif);
- if (ret) {
- ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
- goto free_tx_stats;
- }
- }
-
- ewma_avg_rssi_init(&arsta->avg_rssi);
- return 0;
-
-free_tx_stats:
- kfree(arsta->tx_stats);
- arsta->tx_stats = NULL;
-free_peer:
- ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
-free_rx_stats:
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
-dec_num_station:
- ath11k_mac_dec_num_stations(arvif, sta);
-exit:
- return ret;
-}
-
static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
struct ieee80211_sta *sta)
{
@@ -5028,140 +4947,6 @@ static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
return bw;
}
-static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
-{
- struct ath11k *ar = hw->priv;
- struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
- struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
- struct ath11k_peer *peer;
- int ret = 0;
-
- /* cancel must be done outside the mutex to avoid deadlock */
- if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST)) {
- cancel_work_sync(&arsta->update_wk);
- cancel_work_sync(&arsta->set_4addr_wk);
- }
-
- mutex_lock(&ar->conf_mutex);
-
- if (old_state == IEEE80211_STA_NOTEXIST &&
- new_state == IEEE80211_STA_NONE) {
- memset(arsta, 0, sizeof(*arsta));
- arsta->arvif = arvif;
- arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
- INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
- INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
-
- ret = ath11k_mac_station_add(ar, vif, sta);
- if (ret)
- ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- } else if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST)) {
- bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay &&
- vif->type == NL80211_IFTYPE_STATION;
-
- ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
-
- if (!skip_peer_delete) {
- ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
- if (ret)
- ath11k_warn(ar->ab,
- "Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- else
- ath11k_dbg(ar->ab,
- ATH11K_DBG_MAC,
- "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- }
-
- ath11k_mac_dec_num_stations(arvif, sta);
- mutex_lock(&ar->ab->tbl_mtx_lock);
- spin_lock_bh(&ar->ab->base_lock);
- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (skip_peer_delete && peer) {
- peer->sta = NULL;
- } else if (peer && peer->sta == sta) {
- ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
- vif->addr, arvif->vdev_id);
- ath11k_peer_rhash_delete(ar->ab, peer);
- peer->sta = NULL;
- list_del(&peer->list);
- kfree(peer);
- ar->num_peers--;
- }
- spin_unlock_bh(&ar->ab->base_lock);
- mutex_unlock(&ar->ab->tbl_mtx_lock);
-
- kfree(arsta->tx_stats);
- arsta->tx_stats = NULL;
-
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
- } else if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC &&
- (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_MESH_POINT ||
- vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath11k_station_assoc(ar, vif, sta, false);
- if (ret)
- ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
- sta->addr);
-
- spin_lock_bh(&ar->data_lock);
- /* Set arsta bw and prev bw */
- arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
- arsta->bw_prev = arsta->bw;
- spin_unlock_bh(&ar->data_lock);
- } else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTHORIZED) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = true;
-
- spin_unlock_bh(&ar->ab->base_lock);
-
- if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
- ret = ath11k_wmi_set_peer_param(ar, sta->addr,
- arvif->vdev_id,
- WMI_PEER_AUTHORIZE,
- 1);
- if (ret)
- ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
- sta->addr, arvif->vdev_id, ret);
- }
- } else if (old_state == IEEE80211_STA_AUTHORIZED &&
- new_state == IEEE80211_STA_ASSOC) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = false;
-
- spin_unlock_bh(&ar->ab->base_lock);
- } else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTH &&
- (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_MESH_POINT ||
- vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath11k_station_disassoc(ar, vif, sta);
- if (ret)
- ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
- sta->addr);
- }
-
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -6940,6 +6725,14 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
ret);
}
+ if (ath11k_wmi_supports_6ghz_cc_ext(ar)) {
+ struct cur_regulatory_info *reg_info;
+
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "interface added to change reg rules\n");
+ ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_LPI_AP);
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -7266,6 +7059,15 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
return ret;
}
+ /* TODO: For now we only set TPC power here. However when
+ * channel changes, say CSA, it should be updated again.
+ */
+ if (ath11k_mac_supports_station_tpc(ar, arvif, chandef)) {
+ ath11k_mac_fill_reg_tpc_info(ar, arvif->vif, &arvif->chanctx);
+ ath11k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id,
+ &arvif->reg_tpc_info);
+ }
+
if (!restart)
ar->num_started_vdevs++;
@@ -7542,8 +7344,8 @@ unlock:
mutex_unlock(&ar->conf_mutex);
}
-static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int ath11k_mac_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
@@ -7589,6 +7391,501 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
return 0;
}
+static int ath11k_mac_stop_vdev_early(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret;
+
+ if (WARN_ON(!arvif->is_started))
+ return -EBUSY;
+
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ arvif->is_started = false;
+
+ /* TODO: Setup ps and cts/rts protection */
+ return 0;
+}
+
+static u8 ath11k_mac_get_tpe_count(u8 txpwr_intrprt, u8 txpwr_cnt)
+{
+ switch (txpwr_intrprt) {
+ /* Refer "Table 9-276-Meaning of Maximum Transmit Power Count subfield
+ * if the Maximum Transmit Power Interpretation subfield is 0 or 2" of
+ * "IEEE Std 802.11ax 2021".
+ */
+ case IEEE80211_TPE_LOCAL_EIRP:
+ case IEEE80211_TPE_REG_CLIENT_EIRP:
+ txpwr_cnt = txpwr_cnt <= 3 ? txpwr_cnt : 3;
+ txpwr_cnt = txpwr_cnt + 1;
+ break;
+ /* Refer "Table 9-277-Meaning of Maximum Transmit Power Count subfield
+ * if Maximum Transmit Power Interpretation subfield is 1 or 3" of
+ * "IEEE Std 802.11ax 2021".
+ */
+ case IEEE80211_TPE_LOCAL_EIRP_PSD:
+ case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
+ txpwr_cnt = txpwr_cnt <= 4 ? txpwr_cnt : 4;
+ txpwr_cnt = txpwr_cnt ? (BIT(txpwr_cnt - 1)) : 1;
+ break;
+ }
+
+ return txpwr_cnt;
+}
+
+static u8 ath11k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def)
+{
+ if (chan_def->chan->flags & IEEE80211_CHAN_PSD) {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 4;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return 8;
+ default:
+ return 1;
+ }
+ } else {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 3;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return 4;
+ default:
+ return 1;
+ }
+ }
+}
+
+static u16 ath11k_mac_get_6ghz_start_frequency(struct cfg80211_chan_def *chan_def)
+{
+ u16 diff_seq;
+
+ /* It is to get the lowest channel number's center frequency of the chan.
+ * For example,
+ * bandwidth=40 MHz, center frequency is 5965, lowest channel is 1
+ * with center frequency 5955, its diff is 5965 - 5955 = 10.
+ * bandwidth=80 MHz, center frequency is 5985, lowest channel is 1
+ * with center frequency 5955, its diff is 5985 - 5955 = 30.
+ * bandwidth=160 MHz, center frequency is 6025, lowest channel is 1
+ * with center frequency 5955, its diff is 6025 - 5955 = 70.
+ */
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_160:
+ diff_seq = 70;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ diff_seq = 30;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ diff_seq = 10;
+ break;
+ default:
+ diff_seq = 0;
+ }
+
+ return chan_def->center_freq1 - diff_seq;
+}
+
+static u16 ath11k_mac_get_seg_freq(struct cfg80211_chan_def *chan_def,
+ u16 start_seq, u8 seq)
+{
+ u16 seg_seq;
+
+ /* It is to get the center frequency of the specific bandwidth.
+ * start_seq means the lowest channel number's center frequency.
+ * seq 0/1/2/3 means 20 MHz/40 MHz/80 MHz/160 MHz&80P80.
+ * For example,
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5955 when bandwidth=20 MHz, its diff is 5955 - 5955 = 0.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5965 when bandwidth=40 MHz, its diff is 5965 - 5955 = 10.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5985 when bandwidth=80 MHz, its diff is 5985 - 5955 = 30.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 6025 when bandwidth=160 MHz, its diff is 6025 - 5955 = 70.
+ */
+ if (chan_def->width == NL80211_CHAN_WIDTH_80P80 && seq == 3)
+ return chan_def->center_freq2;
+
+ seg_seq = 10 * (BIT(seq) - 1);
+ return seg_seq + start_seq;
+}
+
+static void ath11k_mac_get_psd_channel(struct ath11k *ar,
+ u16 step_freq,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for each 20 MHz.
+ * For example, if the chan is 160 MHz and center frequency is 6025,
+ * then it include 8 channels, they are 1/5/9/13/17/21/25/29,
+ * channel number 1's center frequency is 5955, it is parameter start_freq.
+ * parameter i is the step of the 8 channels. i is 0~7 for the 8 channels.
+ * the channel 1/5/9/13/17/21/25/29 maps i=0/1/2/3/4/5/6/7,
+ * and maps its center frequency is 5955/5975/5995/6015/6035/6055/6075/6095,
+ * the gap is 20 for each channel, parameter step_freq means the gap.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = *start_freq + i * step_freq;
+ *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+static void ath11k_mac_get_eirp_power(struct ath11k *ar,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ struct cfg80211_chan_def *def,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for 20 MHz/40 MHz/80 MHz/
+ * 160 MHz&80P80 bandwidth, and then plus 10 to the center frequency,
+ * it is the center frequency of a channel number.
+ * For example, when configured channel number is 1.
+ * center frequency is 5965 when bandwidth=40 MHz, after plus 10, it is 5975,
+ * then it is channel number 5.
+ * center frequency is 5985 when bandwidth=80 MHz, after plus 10, it is 5995,
+ * then it is channel number 9.
+ * center frequency is 6025 when bandwidth=160 MHz, after plus 10, it is 6035,
+ * then it is channel number 17.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = ath11k_mac_get_seg_freq(def, *start_freq, i);
+
+ /* For the 20 MHz, its center frequency is same with same channel */
+ if (i != 0)
+ *center_freq += 10;
+
+ *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ath11k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info;
+ struct ieee80211_channel *chan, *temp_chan;
+ u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
+ bool is_psd_power = false, is_tpe_present = false;
+ s8 max_tx_power[IEEE80211_MAX_NUM_PWR_LEVEL],
+ psd_power, tx_power;
+ s8 eirp_power = 0;
+ u16 start_freq, center_freq;
+
+ chan = ctx->def.chan;
+ start_freq = ath11k_mac_get_6ghz_start_frequency(&ctx->def);
+ pwr_reduction = bss_conf->pwr_reduction;
+
+ if (arvif->reg_tpc_info.num_pwr_levels) {
+ is_tpe_present = true;
+ num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels;
+ } else {
+ num_pwr_levels = ath11k_mac_get_num_pwr_levels(&ctx->def);
+ }
+
+ for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) {
+ /* STA received TPE IE*/
+ if (is_tpe_present) {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ psd_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ /* Connecting AP is not psd power */
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ /* convert psd power to EIRP power based
+ * on channel width
+ */
+ tx_power =
+ min_t(s8, tx_power,
+ psd_power + 13 + pwr_lvl_idx * 3);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ /* local power is not PSD power */
+ } else {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ reg_tpc_info->tpe[pwr_lvl_idx];
+ /* Connecting AP is not psd power */
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ }
+ /* STA not received TPE IE */
+ } else {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] = psd_power;
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] = tx_power;
+ }
+ }
+
+ if (is_psd_power) {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ eirp_power = eirp_power - pwr_reduction;
+
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "eirp power : %d firmware report power : %d\n",
+ eirp_power, ar->max_allowed_tx_power);
+ /* Firmware reports lower max_allowed_tx_power during vdev
+ * start response. In case of 6 GHz, firmware is not aware
+ * of EIRP power unless driver sets EIRP power through WMI
+ * TPC command. So radio which does not support idle power
+ * save can set maximum calculated EIRP power directly to
+ * firmware through TPC command without min comparison with
+ * vdev start response's max_allowed_tx_power.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps)
+ eirp_power = min_t(s8,
+ eirp_power,
+ ar->max_allowed_tx_power);
+ } else {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ max_tx_power[pwr_lvl_idx] =
+ max_tx_power[pwr_lvl_idx] - pwr_reduction;
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps)
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ max_tx_power[pwr_lvl_idx],
+ ar->max_allowed_tx_power);
+ }
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].chan_cfreq = center_freq;
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].tx_power =
+ max_tx_power[pwr_lvl_idx];
+ }
+
+ reg_tpc_info->num_pwr_levels = num_pwr_levels;
+ reg_tpc_info->is_psd_power = is_psd_power;
+ reg_tpc_info->eirp_power = eirp_power;
+ reg_tpc_info->ap_power_type =
+ ath11k_reg_ap_pwr_convert(vif->bss_conf.power_type);
+}
+
+static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ieee80211_tx_pwr_env *single_tpe;
+ enum wmi_reg_6ghz_client_type client_type;
+ struct cur_regulatory_info *reg_info;
+ int i;
+ u8 pwr_count, pwr_interpret, pwr_category;
+ u8 psd_index = 0, non_psd_index = 0, local_tpe_count = 0, reg_tpe_count = 0;
+ bool use_local_tpe, non_psd_set = false, psd_set = false;
+
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ client_type = reg_info->client_type;
+
+ for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
+ single_tpe = &bss_conf->tx_pwr_env[i];
+ pwr_category = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+
+ if (pwr_category == client_type) {
+ if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP ||
+ pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD)
+ local_tpe_count++;
+ else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP ||
+ pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD)
+ reg_tpe_count++;
+ }
+ }
+
+ if (!reg_tpe_count && !local_tpe_count) {
+ ath11k_warn(ab,
+ "no transmit power envelope match client power type %d\n",
+ client_type);
+ return;
+ } else if (!reg_tpe_count) {
+ use_local_tpe = true;
+ } else {
+ use_local_tpe = false;
+ }
+
+ for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
+ single_tpe = &bss_conf->tx_pwr_env[i];
+ pwr_category = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+
+ if (pwr_category != client_type)
+ continue;
+
+ /* get local transmit power envelope */
+ if (use_local_tpe) {
+ if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP) {
+ non_psd_index = i;
+ non_psd_set = true;
+ } else if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD) {
+ psd_index = i;
+ psd_set = true;
+ }
+ /* get regulatory transmit power envelope */
+ } else {
+ if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP) {
+ non_psd_index = i;
+ non_psd_set = true;
+ } else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD) {
+ psd_index = i;
+ psd_set = true;
+ }
+ }
+ }
+
+ if (non_psd_set && !psd_set) {
+ single_tpe = &bss_conf->tx_pwr_env[non_psd_index];
+ pwr_count = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ arvif->reg_tpc_info.is_psd_power = false;
+ arvif->reg_tpc_info.eirp_power = 0;
+
+ arvif->reg_tpc_info.num_pwr_levels =
+ ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "non PSD power[%d] : %d\n",
+ i, single_tpe->tx_power[i]);
+ arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
+ }
+ }
+
+ if (psd_set) {
+ single_tpe = &bss_conf->tx_pwr_env[psd_index];
+ pwr_count = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ arvif->reg_tpc_info.is_psd_power = true;
+
+ if (pwr_count == 0) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "TPE PSD power : %d\n", single_tpe->tx_power[0]);
+ arvif->reg_tpc_info.num_pwr_levels =
+ ath11k_mac_get_num_pwr_levels(&ctx->def);
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++)
+ arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[0] / 2;
+ } else {
+ arvif->reg_tpc_info.num_pwr_levels =
+ ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "TPE PSD power[%d] : %d\n",
+ i, single_tpe->tx_power[i]);
+ arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
+ }
+ }
+ }
+}
+
static int
ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -7599,7 +7896,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
int ret;
- struct peer_create_params param;
+ struct cur_regulatory_info *reg_info;
+ enum ieee80211_ap_reg_power power_type;
mutex_lock(&ar->conf_mutex);
@@ -7607,6 +7905,24 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
"chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
+ if (ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ power_type = vif->bss_conf.power_type;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type);
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath11k_reg_handle_chan_list(ab, reg_info, power_type);
+ arvif->chanctx = *ctx;
+ ath11k_mac_parse_tx_pwr_env(ar, vif, ctx);
+ }
+
/* for QCA6390 bss peer must be created before vdev_start */
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
@@ -7622,21 +7938,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params.vdev_start_delay &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
- param.vdev_id = arvif->vdev_id;
- param.peer_type = WMI_PEER_TYPE_DEFAULT;
- param.peer_addr = ar->mac_addr;
-
- ret = ath11k_peer_create(ar, arvif, NULL, &param);
- if (ret) {
- ath11k_warn(ab, "failed to create peer after vdev start delay: %d",
- ret);
- goto out;
- }
- }
-
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_mac_monitor_start(ar);
if (ret) {
@@ -7649,15 +7950,17 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- ret = ath11k_mac_vdev_start(arvif, ctx);
- if (ret) {
- ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
- arvif->vdev_id, vif->addr,
- ctx->def.chan->center_freq, ret);
- goto out;
- }
+ if (!arvif->is_started) {
+ ret = ath11k_mac_vdev_start(arvif, ctx);
+ if (ret) {
+ ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ ctx->def.chan->center_freq, ret);
+ goto out;
+ }
- arvif->is_started = true;
+ arvif->is_started = true;
+ }
if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
@@ -7697,8 +8000,6 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
"chanctx unassign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
- WARN_ON(!arvif->is_started);
-
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
spin_lock_bh(&ab->base_lock);
@@ -7722,24 +8023,13 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
return;
}
- ret = ath11k_mac_vdev_stop(arvif);
- if (ret)
- ath11k_warn(ab, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
-
- arvif->is_started = false;
-
- if (ab->hw_params.vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_STA) {
- ret = ath11k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
+ if (arvif->is_started) {
+ ret = ath11k_mac_vdev_stop(arvif);
if (ret)
- ath11k_warn(ar->ab,
- "failed to delete peer %pM for vdev %d: %d\n",
- arvif->bssid, arvif->vdev_id, ret);
- else
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "removed peer %pM vdev %d after vdev stop\n",
- arvif->bssid, arvif->vdev_id);
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
}
if (ab->hw_params.vdev_start_delay &&
@@ -8962,8 +9252,8 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
arg->dwell_time_active = scan_time_msec;
arg->dwell_time_passive = scan_time_msec;
arg->max_scan_time = scan_time_msec;
- arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
- arg->scan_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ arg->scan_f_passive = 1;
+ arg->scan_f_filter_prb_req = 1;
arg->burst_duration = duration;
ret = ath11k_start_scan(ar, arg);
@@ -9097,6 +9387,252 @@ err_fallback:
return 0;
}
+static int ath11k_mac_station_add(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct peer_create_params peer_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_mac_inc_num_stations(arvif, sta);
+ if (ret) {
+ ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ if (!arsta->rx_stats) {
+ ret = -ENOMEM;
+ goto dec_num_station;
+ }
+
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = sta->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+ ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ goto free_rx_stats;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
+ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
+ if (!arsta->tx_stats) {
+ ret = -ENOMEM;
+ goto free_peer;
+ }
+ }
+
+ if (ieee80211_vif_is_mesh(vif)) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for mesh STA %pM\n", sta->addr);
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+ if (ret) {
+ ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
+ sta->addr, ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto free_tx_stats;
+ }
+
+ if (ab->hw_params.vdev_start_delay &&
+ !arvif->is_started &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath11k_mac_start_vdev_delay(ar->hw, vif);
+ if (ret) {
+ ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ewma_avg_rssi_init(&arsta->avg_rssi);
+ return 0;
+
+free_tx_stats:
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+free_peer:
+ ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+free_rx_stats:
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+dec_num_station:
+ ath11k_mac_dec_num_stations(arvif, sta);
+exit:
+ return ret;
+}
+
+static int ath11k_mac_station_remove(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ int ret;
+
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->is_started &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath11k_mac_stop_vdev_early(ar->hw, vif);
+ if (ret) {
+ ath11k_warn(ab, "failed to do early vdev stop: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath11k_warn(ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ ath11k_mac_dec_num_stations(arvif, sta);
+
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+
+ return ret;
+}
+
+static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k_peer *peer;
+ int ret = 0;
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ cancel_work_sync(&arsta->update_wk);
+ cancel_work_sync(&arsta->set_4addr_wk);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
+ INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
+
+ ret = ath11k_mac_station_add(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ret = ath11k_mac_station_remove(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer && peer->sta == sta) {
+ ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ ath11k_peer_rhash_delete(ar->ab, peer);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
+ sta->addr);
+
+ spin_lock_bh(&ar->data_lock);
+ /* Set arsta bw and prev bw */
+ arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
+ arsta->bw_prev = arsta->bw;
+ spin_unlock_bh(&ar->data_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ }
+ } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = false;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static const struct ieee80211_ops ath11k_ops = {
.tx = ath11k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
@@ -9288,6 +9824,33 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
return 0;
}
+static void ath11k_mac_setup_mac_address_list(struct ath11k *ar)
+{
+ struct mac_address *addresses;
+ u16 n_addresses;
+ int i;
+
+ if (!ar->ab->hw_params.support_dual_stations)
+ return;
+
+ n_addresses = ar->ab->hw_params.num_vdevs;
+ addresses = kcalloc(n_addresses, sizeof(*addresses), GFP_KERNEL);
+ if (!addresses)
+ return;
+
+ memcpy(addresses[0].addr, ar->mac_addr, ETH_ALEN);
+ for (i = 1; i < n_addresses; i++) {
+ memcpy(addresses[i].addr, ar->mac_addr, ETH_ALEN);
+ /* set Local Administered Address bit */
+ addresses[i].addr[0] |= 0x2;
+
+ addresses[i].addr[0] += (i - 1) << 4;
+ }
+
+ ar->hw->wiphy->addresses = addresses;
+ ar->hw->wiphy->n_addresses = n_addresses;
+}
+
static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@@ -9307,28 +9870,46 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
return -ENOMEM;
}
- limits[0].max = 1;
- limits[0].types |= BIT(NL80211_IFTYPE_STATION);
-
- limits[1].max = 16;
- limits[1].types |= BIT(NL80211_IFTYPE_AP);
+ if (ab->hw_params.support_dual_stations) {
+ limits[0].max = 2;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
- if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
- limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+ limits[1].max = 1;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
- combinations[0].limits = limits;
- combinations[0].n_limits = n_limits;
- combinations[0].max_interfaces = 16;
- combinations[0].num_different_channels = 1;
- combinations[0].beacon_int_infra_match = true;
- combinations[0].beacon_int_min_gcd = 100;
- combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_80P80) |
- BIT(NL80211_CHAN_WIDTH_160);
+ combinations[0].limits = limits;
+ combinations[0].n_limits = 2;
+ combinations[0].max_interfaces = ab->hw_params.num_vdevs;
+ combinations[0].num_different_channels = 2;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ } else {
+ limits[0].max = 1;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+
+ limits[1].max = 16;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations[0].limits = limits;
+ combinations[0].n_limits = 2;
+ combinations[0].max_interfaces = 16;
+ combinations[0].num_different_channels = 1;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160);
+ }
ar->hw->wiphy->iface_combinations = combinations;
ar->hw->wiphy->n_iface_combinations = 1;
@@ -9393,6 +9974,8 @@ static void __ath11k_mac_unregister(struct ath11k *ar)
kfree(ar->hw->wiphy->iface_combinations[0].limits);
kfree(ar->hw->wiphy->iface_combinations);
+ kfree(ar->hw->wiphy->addresses);
+
SET_IEEE80211_DEV(ar->hw, NULL);
}
@@ -9435,6 +10018,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ath11k_pdev_caps_update(ar);
SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+ ath11k_mac_setup_mac_address_list(ar);
SET_IEEE80211_DEV(ar->hw, ab->dev);
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 0dfdeed5177b..f5800fbecff8 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_MAC_H
@@ -176,4 +176,7 @@ int ath11k_mac_wait_tx_complete(struct ath11k *ar);
int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval);
+void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 6835c14b82cc..fb4ecf9a103e 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
@@ -20,35 +20,7 @@
#define MHI_TIMEOUT_DEFAULT_MS 20000
#define RDDM_DUMP_SIZE 0x420000
-static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
- {
- .num = 0,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 0,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 1,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 0,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
+static const struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
{
.num = 20,
.name = "IPCR",
@@ -102,46 +74,18 @@ static struct mhi_event_config ath11k_mhi_events_qca6390[] = {
},
};
-static struct mhi_controller_config ath11k_mhi_config_qca6390 = {
+static const struct mhi_controller_config ath11k_mhi_config_qca6390 = {
.max_channels = 128,
.timeout_ms = 2000,
.use_bounce_buf = false,
- .buf_len = 0,
+ .buf_len = 8192,
.num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390),
.ch_cfg = ath11k_mhi_channels_qca6390,
.num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390),
.event_cfg = ath11k_mhi_events_qca6390,
};
-static struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = {
- {
- .num = 0,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x14,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 1,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x14,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
+static const struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = {
{
.num = 20,
.name = "IPCR",
@@ -195,7 +139,7 @@ static struct mhi_event_config ath11k_mhi_events_qcn9074[] = {
},
};
-static struct mhi_controller_config ath11k_mhi_config_qcn9074 = {
+static const struct mhi_controller_config ath11k_mhi_config_qcn9074 = {
.max_channels = 30,
.timeout_ms = 10000,
.use_bounce_buf = false,
@@ -384,7 +328,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
struct mhi_controller *mhi_ctrl;
- struct mhi_controller_config *ath11k_mhi_config;
+ const struct mhi_controller_config *ath11k_mhi_config;
int ret;
mhi_ctrl = mhi_alloc_controller();
@@ -423,7 +367,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
goto free_controller;
} else {
mhi_ctrl->iova_start = 0;
- mhi_ctrl->iova_stop = 0xFFFFFFFF;
+ mhi_ctrl->iova_stop = ab_pci->dma_mask;
}
mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
@@ -443,6 +387,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
case ATH11K_HW_QCA6390_HW20:
case ATH11K_HW_WCN6855_HW20:
case ATH11K_HW_WCN6855_HW21:
+ case ATH11K_HW_QCA2066_HW21:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 09e65c5e55c4..be9d2c69cc41 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -18,7 +18,8 @@
#include "qmi.h"
#define ATH11K_PCI_BAR_NUM 0
-#define ATH11K_PCI_DMA_MASK 32
+#define ATH11K_PCI_DMA_MASK 36
+#define ATH11K_PCI_COHERENT_DMA_MASK 32
#define TCSR_SOC_HW_VERSION 0x0224
#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
@@ -28,6 +29,8 @@
#define QCN9074_DEVICE_ID 0x1104
#define WCN6855_DEVICE_ID 0x1103
+#define TCSR_SOC_HW_SUB_VER 0x1910010
+
static const struct pci_device_id ath11k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
@@ -526,14 +529,24 @@ static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
goto disable_device;
}
- ret = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
+ ret = dma_set_mask(&pdev->dev,
+ DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
if (ret) {
ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
ATH11K_PCI_DMA_MASK, ret);
goto release_region;
}
+ ab_pci->dma_mask = DMA_BIT_MASK(ATH11K_PCI_DMA_MASK);
+
+ ret = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(ATH11K_PCI_COHERENT_DMA_MASK));
+ if (ret) {
+ ath11k_err(ab, "failed to set pci coherent dma mask to %d: %d\n",
+ ATH11K_PCI_COHERENT_DMA_MASK, ret);
+ goto release_region;
+ }
+
pci_set_master(pdev);
ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
@@ -731,8 +744,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
u32 soc_hw_version_major, soc_hw_version_minor, addr;
- const struct ath11k_pci_ops *pci_ops;
int ret;
+ u32 sub_version;
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
@@ -777,6 +790,12 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
switch (pci_dev->device) {
case QCA6390_DEVICE_ID:
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
@@ -790,13 +809,21 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_pci_free_region;
}
- pci_ops = &ath11k_pci_ops_qca6390;
break;
case QCN9074_DEVICE_ID:
- pci_ops = &ath11k_pci_ops_qcn9074;
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qcn9074);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
case WCN6855_DEVICE_ID:
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
@@ -809,7 +836,19 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
break;
case 0x10:
case 0x11:
- ab->hw_rev = ATH11K_HW_WCN6855_HW21;
+ sub_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_SUB_VER);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "sub_version 0x%x\n",
+ sub_version);
+ switch (sub_version) {
+ case 0x1019A0E1:
+ case 0x1019B0E1:
+ case 0x1019C0E1:
+ case 0x1019D0E1:
+ ab->hw_rev = ATH11K_HW_QCA2066_HW21;
+ break;
+ default:
+ ab->hw_rev = ATH11K_HW_WCN6855_HW21;
+ }
break;
default:
goto unsupported_wcn6855_soc;
@@ -823,7 +862,6 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
- pci_ops = &ath11k_pci_ops_qca6390;
break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
@@ -832,12 +870,6 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
- ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
- if (ret) {
- ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
- goto err_pci_free_region;
- }
-
ret = ath11k_pcic_init_msi_config(ab);
if (ret) {
ath11k_err(ab, "failed to init msi config: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index e9a01f344ec6..6be73333d90b 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_PCI_H
#define _ATH11K_PCI_H
@@ -72,6 +72,7 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
u16 link_ctl;
+ u64 dma_mask;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index 15e2ceb22a44..add4db4c50bc 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -115,6 +115,17 @@ static const struct ath11k_msi_config ath11k_msi_config[] = {
},
.hw_rev = ATH11K_HW_WCN6750_HW10,
},
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA2066_HW21,
+ },
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 2c7cab62b9bb..5006f81f779b 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@@ -3249,7 +3249,8 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
case ATH11K_QMI_EVENT_FW_INIT_DONE:
clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
- ath11k_hal_dump_srng_stats(ab);
+ if (ab->is_reset)
+ ath11k_hal_dump_srng_stats(ab);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index b4fd4d2107c7..737fcd450d4b 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -425,6 +425,11 @@ static void ath11k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
/* Use the flags of both the rules */
new_rule->flags = rule1->flags | rule2->flags;
+ if ((rule1->flags & NL80211_RRF_PSD) && (rule2->flags & NL80211_RRF_PSD))
+ new_rule->psd = min_t(s8, rule1->psd, rule2->psd);
+ else
+ new_rule->flags &= ~NL80211_RRF_PSD;
+
/* To be safe, lts use the max cac timeout of both rules */
new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
rule2->dfs_cac_ms);
@@ -527,13 +532,14 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
static void
ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
- u32 reg_flags)
+ s8 psd, u32 reg_flags)
{
reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+ reg_rule->psd = psd;
reg_rule->flags = reg_flags;
}
@@ -563,7 +569,7 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -584,7 +590,7 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
ath11k_reg_update_rule(regd->reg_rules + i, start_freq,
end_freq, bw, reg_rule->ant_gain,
- reg_rule->reg_power, flags);
+ reg_rule->reg_power, reg_rule->psd_eirp, flags);
regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
@@ -605,7 +611,7 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -618,25 +624,68 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
*rule_idx = i;
}
+enum wmi_reg_6ghz_ap_type
+ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type)
+{
+ switch (power_type) {
+ case IEEE80211_REG_LPI_AP:
+ return WMI_REG_INDOOR_AP;
+ case IEEE80211_REG_SP_AP:
+ return WMI_REG_STANDARD_POWER_AP;
+ case IEEE80211_REG_VLP_AP:
+ return WMI_REG_VERY_LOW_POWER_AP;
+ default:
+ return WMI_REG_MAX_AP_TYPE;
+ }
+}
+
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
- struct cur_regulatory_info *reg_info, bool intersect)
+ struct cur_regulatory_info *reg_info, bool intersect,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type)
{
struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
- struct cur_reg_rule *reg_rule;
+ struct cur_reg_rule *reg_rule, *reg_rule_6ghz;
u8 i = 0, j = 0, k = 0;
u8 num_rules;
u16 max_bw;
- u32 flags;
+ u32 flags, reg_6ghz_number, max_bw_6ghz;
char alpha2[3];
num_rules = reg_info->num_5ghz_reg_rules + reg_info->num_2ghz_reg_rules;
- /* FIXME: Currently taking reg rules for 6 GHz only from Indoor AP mode list.
- * This can be updated after complete 6 GHz regulatory support is added.
- */
- if (reg_info->is_ext_reg_event)
- num_rules += reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
+ if (reg_info->is_ext_reg_event) {
+ if (vdev_type == WMI_VDEV_TYPE_STA) {
+ enum wmi_reg_6ghz_ap_type ap_type;
+
+ ap_type = ath11k_reg_ap_pwr_convert(power_type);
+
+ if (ap_type == WMI_REG_MAX_AP_TYPE)
+ ap_type = WMI_REG_INDOOR_AP;
+
+ reg_6ghz_number = reg_info->num_6ghz_rules_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+
+ if (reg_6ghz_number == 0) {
+ ap_type = WMI_REG_INDOOR_AP;
+ reg_6ghz_number = reg_info->num_6ghz_rules_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ }
+
+ reg_rule_6ghz = reg_info->reg_rules_6ghz_client_ptr
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ max_bw_6ghz = reg_info->max_bw_6ghz_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ } else {
+ reg_6ghz_number = reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
+ reg_rule_6ghz =
+ reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP];
+ max_bw_6ghz = reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP];
+ }
+
+ num_rules += reg_6ghz_number;
+ }
if (!num_rules)
goto ret;
@@ -683,14 +732,13 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
* per other BW rule flags we pass from here
*/
flags = NL80211_RRF_AUTO_BW;
- } else if (reg_info->is_ext_reg_event &&
- reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] &&
- (k < reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP])) {
- reg_rule = reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP] +
- k++;
- max_bw = min_t(u16, reg_rule->max_bw,
- reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP]);
+ } else if (reg_info->is_ext_reg_event && reg_6ghz_number &&
+ k < reg_6ghz_number) {
+ reg_rule = reg_rule_6ghz + k++;
+ max_bw = min_t(u16, reg_rule->max_bw, max_bw_6ghz);
flags = NL80211_RRF_AUTO_BW;
+ if (reg_rule->psd_flag)
+ flags |= NL80211_RRF_PSD;
} else {
break;
}
@@ -702,7 +750,7 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
reg_rule->start_freq,
reg_rule->end_freq, max_bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
/* Update dfs cac timeout if the dfs domain is ETSI and the
* new rule covers weather radar band.
@@ -758,6 +806,159 @@ ret:
return new_regd;
}
+static bool ath11k_reg_is_world_alpha(char *alpha)
+{
+ if (alpha[0] == '0' && alpha[1] == '0')
+ return true;
+
+ if (alpha[0] == 'n' && alpha[1] == 'a')
+ return true;
+
+ return false;
+}
+
+static enum wmi_vdev_type ath11k_reg_get_ar_vdev_type(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+
+ /* Currently each struct ath11k maps to one struct ieee80211_hw/wiphy
+ * and one struct ieee80211_regdomain, so it could only store one group
+ * reg rules. It means multi-interface concurrency in the same ath11k is
+ * not support for the regdomain. So get the vdev type of the first entry
+ * now. After concurrency support for the regdomain, this should change.
+ */
+ arvif = list_first_entry_or_null(&ar->arvifs, struct ath11k_vif, list);
+ if (arvif)
+ return arvif->vdev_type;
+
+ return WMI_VDEV_TYPE_UNSPEC;
+}
+
+int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info,
+ enum ieee80211_ap_reg_power power_type)
+{
+ struct ieee80211_regdomain *regd;
+ bool intersect = false;
+ int pdev_idx;
+ struct ath11k *ar;
+ enum wmi_vdev_type vdev_type;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg handle chan list");
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested ctry,
+ * fw retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ return -EINVAL;
+ }
+
+ pdev_idx = reg_info->phy_id;
+
+ /* Avoid default reg rule updates sent during FW recovery if
+ * it is already available
+ */
+ spin_lock_bh(&ab->base_lock);
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
+ ab->default_regd[pdev_idx]) {
+ spin_unlock_bh(&ab->base_lock);
+ goto retfail;
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ if (pdev_idx >= ab->num_radios) {
+ /* Process the event for phy0 only if single_pdev_only
+ * is true. If pdev_idx is valid but not 0, discard the
+ * event. Otherwise, it goes to fallback. In either case
+ * ath11k_reg_reset_info() needs to be called to avoid
+ * memory leak issue.
+ */
+ ath11k_reg_reset_info(reg_info);
+
+ if (ab->hw_params.single_pdev_only &&
+ pdev_idx < ab->hw_params.num_rxmda_per_pdev)
+ return 0;
+ goto fallback;
+ }
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
+ (char *)reg_info->alpha2, 2))
+ goto retfail;
+
+ /* Intersect new rules with default regd if a new country setting was
+ * requested, i.e a default regd was already set during initialization
+ * and the regd coming from this event has a valid country info.
+ */
+ if (ab->default_regd[pdev_idx] &&
+ !ath11k_reg_is_world_alpha((char *)
+ ab->default_regd[pdev_idx]->alpha2) &&
+ !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
+ intersect = true;
+
+ ar = ab->pdevs[pdev_idx].ar;
+ vdev_type = ath11k_reg_get_ar_vdev_type(ar);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi handle chan list power type %d vdev type %d intersect %d\n",
+ power_type, vdev_type, intersect);
+
+ regd = ath11k_reg_build_regd(ab, reg_info, intersect, vdev_type, power_type);
+ if (!regd) {
+ ath11k_warn(ab, "failed to build regd from reg_info\n");
+ goto fallback;
+ }
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ath11k_reg_reset_info(&ab->reg_info_store[pdev_idx]);
+ ab->reg_info_store[pdev_idx] = *reg_info;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ if (ab->default_regd[pdev_idx]) {
+ /* The initial rules from FW after WMI Init is to build
+ * the default regd. From then on, any rules updated for
+ * the pdev could be due to user reg changes.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ queue_work(ab->workqueue, &ar->regd_update_work);
+ } else {
+ /* This regd would be applied during mac registration and is
+ * held constant throughout for regd intersection purpose
+ */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+fallback:
+ /* Fallback to older reg (by sending previous country setting
+ * again if fw has succeeded and we failed to process here.
+ * The Regdomain should be uniform across driver and fw. Since the
+ * FW has processed the command and sent a success status, we expect
+ * this function to succeed as well. If it doesn't, CTRY needs to be
+ * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
+ */
+ /* TODO: This is rare, but still should also be handled */
+ WARN_ON(1);
+
+retfail:
+
+ return -EINVAL;
+}
+
void ath11k_regd_update_work(struct work_struct *work)
{
struct ath11k *ar = container_of(work, struct ath11k,
@@ -781,10 +982,36 @@ void ath11k_reg_init(struct ath11k *ar)
ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
}
+void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info)
+{
+ int i, j;
+
+ if (!reg_info)
+ return;
+
+ kfree(reg_info->reg_rules_2ghz_ptr);
+ kfree(reg_info->reg_rules_5ghz_ptr);
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
+
+ for (j = 0; j < WMI_REG_MAX_CLIENT_TYPE; j++)
+ kfree(reg_info->reg_rules_6ghz_client_ptr[i][j]);
+ }
+
+ memset(reg_info, 0, sizeof(*reg_info));
+}
+
void ath11k_reg_free(struct ath11k_base *ab)
{
int i;
+ for (i = 0; i < ab->num_radios; i++)
+ ath11k_reg_reset_info(&ab->reg_info_store[i]);
+
+ kfree(ab->reg_info_store);
+ ab->reg_info_store = NULL;
+
for (i = 0; i < ab->hw_params.max_radios; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index f28902f85e41..64edb794260a 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -30,11 +30,20 @@ enum ath11k_dfs_region {
/* ATH11K Regulatory API's */
void ath11k_reg_init(struct ath11k *ar);
+void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info);
void ath11k_reg_free(struct ath11k_base *ab);
void ath11k_regd_update_work(struct work_struct *work);
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
- struct cur_regulatory_info *reg_info, bool intersect);
+ struct cur_regulatory_info *reg_info, bool intersect,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type);
int ath11k_regd_update(struct ath11k *ar);
int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
+enum wmi_reg_6ghz_ap_type
+ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type);
+int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info,
+ enum ieee80211_ap_reg_power power_type);
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
index 43bb23265d34..302d66092b97 100644
--- a/drivers/net/wireless/ath/ath11k/testmode.c
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -198,7 +198,7 @@ static void ath11k_tm_wmi_event_segmented(struct ath11k_base *ab, u32 cmd_id,
u16 length;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
index c29b11ab5bfa..41e7499f075f 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.c
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/device.h>
@@ -163,6 +163,9 @@ int ath11k_thermal_register(struct ath11k_base *ab)
struct ath11k_pdev *pdev;
int i, ret;
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 8a65fa04b48d..34ab9631ff36 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -238,8 +238,8 @@ static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb,
(void *)tb);
}
-const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
- size_t len, gfp_t gfp)
+const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
+ struct sk_buff *skb, gfp_t gfp)
{
const void **tb;
int ret;
@@ -248,7 +248,7 @@ const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
if (!tb)
return ERR_PTR(-ENOMEM);
- ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len);
+ ret = ath11k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
if (ret) {
kfree(tb);
return ERR_PTR(ret);
@@ -2098,7 +2098,7 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
WMI_SCAN_EVENT_BSS_CHANNEL |
WMI_SCAN_EVENT_FOREIGN_CHAN |
WMI_SCAN_EVENT_DEQUEUED;
- arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->scan_f_chan_stat_evnt = 1;
if (test_bit(WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE,
ar->ab->wmi_ab.svc_map))
@@ -2379,6 +2379,70 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
return ret;
}
+int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar,
+ u32 vdev_id,
+ struct ath11k_reg_tpc_power_info *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_tpc_power_cmd *cmd;
+ struct wmi_vdev_ch_power_info *ch;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ u8 *ptr;
+ int i, ret, len, array_len;
+
+ array_len = sizeof(*ch) * param->num_pwr_levels;
+ len = sizeof(*cmd) + TLV_HDR_SIZE + array_len;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_TPC_POWER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->psd_power = param->is_psd_power;
+ cmd->eirp_power = param->eirp_power;
+ cmd->power_type_6ghz = param->ap_power_type;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n",
+ vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type);
+
+ ptr += sizeof(*cmd);
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, array_len);
+
+ ptr += TLV_HDR_SIZE;
+ ch = (struct wmi_vdev_ch_power_info *)ptr;
+
+ for (i = 0; i < param->num_pwr_levels; i++, ch++) {
+ ch->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_CH_POWER_INFO) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*ch) - TLV_HDR_SIZE);
+
+ ch->chan_cfreq = param->chan_power_info[i].chan_cfreq;
+ ch->tx_power = param->chan_power_info[i].tx_power;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tpc chan freq %d TX power %d\n",
+ ch->chan_cfreq, ch->tx_power);
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
struct scan_cancel_param *param)
{
@@ -3930,7 +3994,7 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
struct ath11k_vif *arvif;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -3956,8 +4020,7 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
- ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
- GFP_KERNEL);
+ ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
@@ -4749,6 +4812,14 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
soc->pdevs[0].pdev_id = 0;
}
+ if (!soc->reg_info_store) {
+ soc->reg_info_store = kcalloc(soc->num_radios,
+ sizeof(*soc->reg_info_store),
+ GFP_ATOMIC);
+ if (!soc->reg_info_store)
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -4786,6 +4857,7 @@ static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab)
{
kfree(ab->db_caps);
ab->db_caps = NULL;
+ ab->num_db_cap = 0;
}
static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab,
@@ -5003,7 +5075,7 @@ static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buf
const struct wmi_vdev_start_resp_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5028,6 +5100,7 @@ static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buf
vdev_rsp->mac_id = ev->mac_id;
vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
+ vdev_rsp->max_allowed_tx_power = ev->max_allowed_tx_power;
kfree(tb);
return 0;
@@ -5102,7 +5175,7 @@ static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab,
ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n");
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5278,7 +5351,7 @@ static int ath11k_pull_reg_chan_list_ext_update_ev(struct ath11k_base *ab,
ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory ext channel list\n");
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5634,7 +5707,7 @@ static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *
const struct wmi_peer_delete_resp_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5666,7 +5739,7 @@ static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
const struct wmi_vdev_delete_resp_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5686,15 +5759,15 @@ static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
return 0;
}
-static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf,
- u32 len, u32 *vdev_id,
- u32 *tx_status)
+static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id, u32 *tx_status)
{
const void **tb;
const struct wmi_bcn_tx_status_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5722,7 +5795,7 @@ static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_
const struct wmi_vdev_stopped_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5876,7 +5949,7 @@ static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab,
const struct wmi_mgmt_tx_compl_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6052,7 +6125,7 @@ static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb,
const struct wmi_scan_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6085,7 +6158,7 @@ static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buf
const struct wmi_peer_sta_kickout_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6112,7 +6185,7 @@ static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb,
const struct wmi_roam_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6153,14 +6226,14 @@ exit:
return idx;
}
-static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf,
- u32 len, struct wmi_chan_info_event *ch_info_ev)
+static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_chan_info_event *ch_info_ev)
{
const void **tb;
const struct wmi_chan_info_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6199,7 +6272,7 @@ ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
const struct wmi_pdev_bss_chan_info_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6239,7 +6312,7 @@ ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *sk
const struct wmi_vdev_install_key_compl_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6270,7 +6343,7 @@ static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff
const struct wmi_peer_assoc_conf_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6995,7 +7068,7 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s
const void **tb;
int ret, i;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -7060,32 +7133,15 @@ static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
wake_up(&wmi->tx_ce_desc_wq);
}
-static bool ath11k_reg_is_world_alpha(char *alpha)
-{
- if (alpha[0] == '0' && alpha[1] == '0')
- return true;
-
- if (alpha[0] == 'n' && alpha[1] == 'a')
- return true;
-
- return false;
-}
-
-static int ath11k_reg_chan_list_event(struct ath11k_base *ab,
- struct sk_buff *skb,
+static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb,
enum wmi_reg_chan_list_cmd_type id)
{
- struct cur_regulatory_info *reg_info = NULL;
- struct ieee80211_regdomain *regd = NULL;
- bool intersect = false;
- int ret = 0, pdev_idx, i, j;
- struct ath11k *ar;
+ struct cur_regulatory_info *reg_info;
+ int ret;
reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
- if (!reg_info) {
- ret = -ENOMEM;
- goto fallback;
- }
+ if (!reg_info)
+ return -ENOMEM;
if (id == WMI_REG_CHAN_LIST_CC_ID)
ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info);
@@ -7093,118 +7149,22 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab,
ret = ath11k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
if (ret) {
- ath11k_warn(ab, "failed to extract regulatory info from received event\n");
- goto fallback;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg chan list id %d", id);
-
- if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
- /* In case of failure to set the requested ctry,
- * fw retains the current regd. We print a failure info
- * and return from here.
- */
- ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
- goto mem_free;
- }
-
- pdev_idx = reg_info->phy_id;
-
- /* Avoid default reg rule updates sent during FW recovery if
- * it is already available
- */
- spin_lock(&ab->base_lock);
- if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
- ab->default_regd[pdev_idx]) {
- spin_unlock(&ab->base_lock);
+ ath11k_warn(ab, "failed to extract regulatory info\n");
goto mem_free;
}
- spin_unlock(&ab->base_lock);
- if (pdev_idx >= ab->num_radios) {
- /* Process the event for phy0 only if single_pdev_only
- * is true. If pdev_idx is valid but not 0, discard the
- * event. Otherwise, it goes to fallback.
- */
- if (ab->hw_params.single_pdev_only &&
- pdev_idx < ab->hw_params.num_rxmda_per_pdev)
- goto mem_free;
- else
- goto fallback;
- }
-
- /* Avoid multiple overwrites to default regd, during core
- * stop-start after mac registration.
- */
- if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
- !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
- (char *)reg_info->alpha2, 2))
+ ret = ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_UNSET_AP);
+ if (ret) {
+ ath11k_warn(ab, "failed to process regulatory info %d\n", ret);
goto mem_free;
-
- /* Intersect new rules with default regd if a new country setting was
- * requested, i.e a default regd was already set during initialization
- * and the regd coming from this event has a valid country info.
- */
- if (ab->default_regd[pdev_idx] &&
- !ath11k_reg_is_world_alpha((char *)
- ab->default_regd[pdev_idx]->alpha2) &&
- !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
- intersect = true;
-
- regd = ath11k_reg_build_regd(ab, reg_info, intersect);
- if (!regd) {
- ath11k_warn(ab, "failed to build regd from reg_info\n");
- goto fallback;
- }
-
- spin_lock(&ab->base_lock);
- if (ab->default_regd[pdev_idx]) {
- /* The initial rules from FW after WMI Init is to build
- * the default regd. From then on, any rules updated for
- * the pdev could be due to user reg changes.
- * Free previously built regd before assigning the newly
- * generated regd to ar. NULL pointer handling will be
- * taken care by kfree itself.
- */
- ar = ab->pdevs[pdev_idx].ar;
- kfree(ab->new_regd[pdev_idx]);
- ab->new_regd[pdev_idx] = regd;
- queue_work(ab->workqueue, &ar->regd_update_work);
- } else {
- /* This regd would be applied during mac registration and is
- * held constant throughout for regd intersection purpose
- */
- ab->default_regd[pdev_idx] = regd;
}
- ab->dfs_region = reg_info->dfs_region;
- spin_unlock(&ab->base_lock);
- goto mem_free;
+ kfree(reg_info);
+ return 0;
-fallback:
- /* Fallback to older reg (by sending previous country setting
- * again if fw has succeeded and we failed to process here.
- * The Regdomain should be uniform across driver and fw. Since the
- * FW has processed the command and sent a success status, we expect
- * this function to succeed as well. If it doesn't, CTRY needs to be
- * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
- */
- /* TODO: This is rare, but still should also be handled */
- WARN_ON(1);
mem_free:
- if (reg_info) {
- kfree(reg_info->reg_rules_2ghz_ptr);
- kfree(reg_info->reg_rules_5ghz_ptr);
- if (reg_info->is_ext_reg_event) {
- for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
- kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
-
- for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
- for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
- kfree(reg_info->reg_rules_6ghz_client_ptr[j][i]);
- }
- kfree(reg_info);
- }
+ ath11k_reg_reset_info(reg_info);
+ kfree(reg_info);
return ret;
}
@@ -7362,7 +7322,7 @@ static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff
}
ar->last_wmi_vdev_start_status = 0;
-
+ ar->max_allowed_tx_power = vdev_start_resp.max_allowed_tx_power;
status = vdev_start_resp.status;
if (WARN_ON_ONCE(status)) {
@@ -7384,8 +7344,7 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s
struct ath11k_vif *arvif;
u32 vdev_id, tx_status;
- if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
- &vdev_id, &tx_status) != 0) {
+ if (ath11k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
ath11k_warn(ab, "failed to extract bcn tx status");
return;
}
@@ -7416,7 +7375,7 @@ static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab,
enum ath11k_wmi_peer_ps_state peer_previous_ps_state;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -7884,7 +7843,7 @@ static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
/* HW channel counters frequency value in hertz */
u32 cc_freq_hz = ab->cc_freq_hz;
- if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ if (ath11k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
ath11k_warn(ab, "failed to extract chan info event");
return;
}
@@ -8216,7 +8175,7 @@ static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab,
const struct wmi_pdev_ctl_failsafe_chk_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8267,7 +8226,7 @@ ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
}
if (arvif->is_up && arvif->vif->bss_conf.csa_active)
- ieee80211_csa_finish(arvif->vif);
+ ieee80211_csa_finish(arvif->vif, 0);
}
rcu_read_unlock();
}
@@ -8281,7 +8240,7 @@ ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab,
const u32 *vdev_ids;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8315,7 +8274,7 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
struct ath11k *ar;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8369,7 +8328,7 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
const struct wmi_pdev_temperature_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8409,7 +8368,7 @@ static void ath11k_fils_discovery_event(struct ath11k_base *ab,
const struct wmi_fils_discovery_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
@@ -8441,7 +8400,7 @@ static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab,
const struct wmi_probe_resp_tx_status_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
@@ -8567,7 +8526,7 @@ static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
const struct wmi_twt_add_dialog_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
@@ -8604,7 +8563,7 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
u64 replay_ctr;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -9793,3 +9752,9 @@ int ath11k_wmi_sta_keepalive(struct ath11k *ar,
return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
}
+
+bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar)
+{
+ return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
+ ar->ab->wmi_ab.svc_map) && ar->supports_6ghz;
+}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index ff0a9a92beeb..bb419e3abb00 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -15,6 +15,7 @@ struct ath11k;
struct ath11k_fw_stats;
struct ath11k_fw_dbglog;
struct ath11k_vif;
+struct ath11k_reg_tpc_power_info;
#define PSOC_HOST_MAX_NUM_SS (8)
@@ -327,6 +328,22 @@ enum wmi_tlv_cmd_id {
WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+ WMI_VDEV_SET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_TX_POWER_CMDID,
+ WMI_VDEV_LIMIT_OFFCHAN_CMDID,
+ WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID,
+ WMI_VDEV_CHAINMASK_CONFIG_CMDID,
+ WMI_VDEV_GET_BCN_RECEPTION_STATS_CMDID,
+ WMI_VDEV_GET_MWS_COEX_INFO_CMDID,
+ WMI_VDEV_DELETE_ALL_PEER_CMDID,
+ WMI_VDEV_BSS_MAX_IDLE_TIME_CMDID,
+ WMI_VDEV_AUDIO_SYNC_TRIGGER_CMDID,
+ WMI_VDEV_AUDIO_SYNC_QTIMER_CMDID,
+ WMI_VDEV_SET_PCL_CMDID,
+ WMI_VDEV_GET_BIG_DATA_CMDID,
+ WMI_VDEV_GET_BIG_DATA_P2_CMDID,
+ WMI_VDEV_SET_TPC_POWER_CMDID,
WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
WMI_PEER_DELETE_CMDID,
WMI_PEER_FLUSH_TIDS_CMDID,
@@ -1880,6 +1897,8 @@ enum wmi_tlv_tag {
WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
+ WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5,
+ WMI_TAG_VDEV_CH_POWER_INFO,
WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
WMI_TAG_MAX
@@ -2114,6 +2133,7 @@ enum wmi_tlv_service {
/* The second 128 bits */
WMI_MAX_EXT_SERVICE = 256,
WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL = 265,
+ WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT = 280,
WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281,
WMI_TLV_SERVICE_BIOS_SAR_SUPPORT = 326,
WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN = 357,
@@ -3168,6 +3188,41 @@ struct wlan_ssid {
u8 ssid[WLAN_SSID_MAX_LEN];
};
+struct wmi_vdev_ch_power_info {
+ u32 tlv_header;
+
+ /* Channel center frequency (MHz) */
+ u32 chan_cfreq;
+
+ /* Unit: dBm, either PSD/EIRP power for this frequency or
+ * incremental for non-PSD BW
+ */
+ u32 tx_power;
+} __packed;
+
+struct wmi_vdev_set_tpc_power_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+
+ /* Value: 0 or 1, is PSD power or not */
+ u32 psd_power;
+
+ /* Maximum EIRP power (dBm units), valid only if power is PSD */
+ u32 eirp_power;
+
+ /* Type: WMI_6GHZ_REG_TYPE, used for halphy CTL lookup */
+ u32 power_type_6ghz;
+
+ /* This fixed_param TLV is followed by the below TLVs:
+ * num_pwr_levels of wmi_vdev_ch_power_info
+ * For PSD power, it is the PSD/EIRP power of the frequency (20 MHz chunks).
+ * For non-PSD power, the power values are for 20, 40, and till
+ * BSS BW power levels.
+ * The num_pwr_levels will be checked by sw how many elements present
+ * in the variable-length array.
+ */
+} __packed;
+
#define WMI_IE_BITMAP_SIZE 8
/* prefix used by scan requestor ids on the host */
@@ -3308,24 +3363,19 @@ struct scan_req_params {
u32 vdev_id;
u32 pdev_id;
enum wmi_scan_priority scan_priority;
- union {
- struct {
- u32 scan_ev_started:1,
- scan_ev_completed:1,
- scan_ev_bss_chan:1,
- scan_ev_foreign_chan:1,
- scan_ev_dequeued:1,
- scan_ev_preempted:1,
- scan_ev_start_failed:1,
- scan_ev_restarted:1,
- scan_ev_foreign_chn_exit:1,
- scan_ev_invalid:1,
- scan_ev_gpio_timeout:1,
- scan_ev_suspended:1,
- scan_ev_resumed:1;
- };
- u32 scan_events;
- };
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
u32 scan_ctrl_flags_ext;
u32 dwell_time_active;
u32 dwell_time_active_2g;
@@ -3339,36 +3389,31 @@ struct scan_req_params {
u32 idle_time;
u32 max_scan_time;
u32 probe_delay;
- union {
- struct {
- u32 scan_f_passive:1,
- scan_f_bcast_probe:1,
- scan_f_cck_rates:1,
- scan_f_ofdm_rates:1,
- scan_f_chan_stat_evnt:1,
- scan_f_filter_prb_req:1,
- scan_f_bypass_dfs_chn:1,
- scan_f_continue_on_err:1,
- scan_f_offchan_mgmt_tx:1,
- scan_f_offchan_data_tx:1,
- scan_f_promisc_mode:1,
- scan_f_capture_phy_err:1,
- scan_f_strict_passive_pch:1,
- scan_f_half_rate:1,
- scan_f_quarter_rate:1,
- scan_f_force_active_dfs_chn:1,
- scan_f_add_tpc_ie_in_probe:1,
- scan_f_add_ds_ie_in_probe:1,
- scan_f_add_spoofed_mac_in_probe:1,
- scan_f_add_rand_seq_in_probe:1,
- scan_f_en_ie_whitelist_in_probe:1,
- scan_f_forced:1,
- scan_f_2ghz:1,
- scan_f_5ghz:1,
- scan_f_80mhz:1;
- };
- u32 scan_flags;
- };
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
u32 burst_duration;
u32 num_chan;
@@ -4119,6 +4164,7 @@ struct wmi_vdev_start_resp_event {
};
u32 cfgd_tx_streams;
u32 cfgd_rx_streams;
+ s32 max_allowed_tx_power;
} __packed;
/* VDEV start response status codes */
@@ -4951,6 +4997,7 @@ struct ath11k_targ_cap {
};
enum wmi_vdev_type {
+ WMI_VDEV_TYPE_UNSPEC = 0,
WMI_VDEV_TYPE_AP = 1,
WMI_VDEV_TYPE_STA = 2,
WMI_VDEV_TYPE_IBSS = 3,
@@ -6295,8 +6342,8 @@ enum wmi_sta_keepalive_method {
#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
-const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
- size_t len, gfp_t gfp);
+const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
+ struct sk_buff *skb, gfp_t gfp);
int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
u32 cmd_id);
struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
@@ -6479,5 +6526,9 @@ int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_va
int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar);
int ath11k_wmi_sta_keepalive(struct ath11k *ar,
const struct wmi_sta_keepalive_arg *arg);
+bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar);
+int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar,
+ u32 vdev_id,
+ struct ath11k_reg_tpc_power_info *param);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index 62c52e733b5e..71669f94ff75 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -19,7 +19,9 @@ ath12k-y += core.o \
hw.o \
mhi.o \
pci.o \
- dp_mon.o
+ dp_mon.o \
+ fw.o \
+ p2p.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 6c01b282fcd3..391b6fb2bd42 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -14,6 +14,7 @@
#include "dp_rx.h"
#include "debug.h"
#include "hif.h"
+#include "fw.h"
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
@@ -104,27 +105,66 @@ int ath12k_core_resume(struct ath12k_base *ab)
return 0;
}
-static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
- size_t name_len)
+static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len, bool with_variant,
+ bool bus_type_mode)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
- if (ab->qmi.target.bdf_ext[0] != '\0')
+ if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
ab->qmi.target.bdf_ext);
- scnprintf(name, name_len,
- "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
- ath12k_bus_str(ab->hif.bus),
- ab->qmi.target.chip_id,
- ab->qmi.target.board_id, variant);
+ switch (ab->id.bdf_search) {
+ case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
+ if (bus_type_mode)
+ scnprintf(name, name_len,
+ "bus=%s",
+ ath12k_bus_str(ab->hif.bus));
+ else
+ scnprintf(name, name_len,
+ "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath12k_bus_str(ab->hif.bus),
+ ab->id.vendor, ab->id.device,
+ ab->id.subsystem_vendor,
+ ab->id.subsystem_device,
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id,
+ variant);
+ break;
+ default:
+ scnprintf(name, name_len,
+ "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath12k_bus_str(ab->hif.bus),
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id, variant);
+ break;
+ }
ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
return 0;
}
+static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath12k_core_create_board_name(ab, name, name_len, true, false);
+}
+
+static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath12k_core_create_board_name(ab, name, name_len, false, false);
+}
+
+static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath12k_core_create_board_name(ab, name, name_len, false, true);
+}
+
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *file)
{
@@ -159,7 +199,9 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
struct ath12k_board_data *bd,
const void *buf, size_t buf_len,
const char *boardname,
- int bd_ie_type)
+ int ie_id,
+ int name_id,
+ int data_id)
{
const struct ath12k_fw_ie *hdr;
bool name_match_found;
@@ -169,7 +211,7 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
name_match_found = false;
- /* go through ATH12K_BD_IE_BOARD_ elements */
+ /* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
while (buf_len > sizeof(struct ath12k_fw_ie)) {
hdr = buf;
board_ie_id = le32_to_cpu(hdr->id);
@@ -180,48 +222,50 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
buf += sizeof(*hdr);
if (buf_len < ALIGN(board_ie_len, 4)) {
- ath12k_err(ab, "invalid ATH12K_BD_IE_BOARD length: %zu < %zu\n",
+ ath12k_err(ab, "invalid %s length: %zu < %zu\n",
+ ath12k_bd_ie_type_str(ie_id),
buf_len, ALIGN(board_ie_len, 4));
ret = -EINVAL;
goto out;
}
- switch (board_ie_id) {
- case ATH12K_BD_IE_BOARD_NAME:
+ if (board_ie_id == name_id) {
ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
board_ie_data, board_ie_len);
if (board_ie_len != strlen(boardname))
- break;
+ goto next;
ret = memcmp(board_ie_data, boardname, strlen(boardname));
if (ret)
- break;
+ goto next;
name_match_found = true;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
- "boot found match for name '%s'",
+ "boot found match %s for name '%s'",
+ ath12k_bd_ie_type_str(ie_id),
boardname);
- break;
- case ATH12K_BD_IE_BOARD_DATA:
+ } else if (board_ie_id == data_id) {
if (!name_match_found)
/* no match found */
- break;
+ goto next;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
- "boot found board data for '%s'", boardname);
+ "boot found %s for '%s'",
+ ath12k_bd_ie_type_str(ie_id),
+ boardname);
bd->data = board_ie_data;
bd->len = board_ie_len;
ret = 0;
goto out;
- default:
- ath12k_warn(ab, "unknown ATH12K_BD_IE_BOARD found: %d\n",
+ } else {
+ ath12k_warn(ab, "unknown %s id found: %d\n",
+ ath12k_bd_ie_type_str(ie_id),
board_ie_id);
- break;
}
-
+next:
/* jump over the padding */
board_ie_len = ALIGN(board_ie_len, 4);
@@ -238,7 +282,10 @@ out:
static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
struct ath12k_board_data *bd,
- const char *boardname)
+ const char *boardname,
+ int ie_id_match,
+ int name_id,
+ int data_id)
{
size_t len, magic_len;
const u8 *data;
@@ -303,22 +350,23 @@ static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
goto err;
}
- switch (ie_id) {
- case ATH12K_BD_IE_BOARD:
+ if (ie_id == ie_id_match) {
ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
ie_len,
boardname,
- ATH12K_BD_IE_BOARD);
+ ie_id_match,
+ name_id,
+ data_id);
if (ret == -ENOENT)
/* no match found, continue */
- break;
+ goto next;
else if (ret)
/* there was an error, bail out */
goto err;
/* either found or error, so stop searching */
goto out;
}
-
+next:
/* jump over the padding */
ie_len = ALIGN(ie_len, 4);
@@ -328,8 +376,9 @@ static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
out:
if (!bd->data || !bd->len) {
- ath12k_err(ab,
- "failed to fetch board data for %s from %s\n",
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to fetch %s for %s from %s\n",
+ ath12k_bd_ie_type_str(ie_id_match),
boardname, filepath);
ret = -ENODATA;
goto err;
@@ -356,28 +405,56 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
return 0;
}
-#define BOARD_NAME_SIZE 100
+#define BOARD_NAME_SIZE 200
int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
{
- char boardname[BOARD_NAME_SIZE];
+ char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
+ char *filename, filepath[100];
int bd_api;
int ret;
- ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ filename = ATH12K_BOARD_API2_FILE;
+
+ ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
if (ret) {
ath12k_err(ab, "failed to create board name: %d", ret);
return ret;
}
bd_api = 2;
- ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname);
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH12K_BD_IE_BOARD,
+ ATH12K_BD_IE_BOARD_NAME,
+ ATH12K_BD_IE_BOARD_DATA);
+ if (!ret)
+ goto success;
+
+ ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
+ sizeof(fallback_boardname));
+ if (ret) {
+ ath12k_err(ab, "failed to create fallback board name: %d", ret);
+ return ret;
+ }
+
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
+ ATH12K_BD_IE_BOARD,
+ ATH12K_BD_IE_BOARD_NAME,
+ ATH12K_BD_IE_BOARD_DATA);
if (!ret)
goto success;
bd_api = 1;
ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
if (ret) {
- ath12k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
+ ath12k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+ ath12k_err(ab, "failed to fetch board data for %s from %s\n",
+ boardname, filepath);
+ if (memcmp(boardname, fallback_boardname, strlen(boardname)))
+ ath12k_err(ab, "failed to fetch board data for %s from %s\n",
+ fallback_boardname, filepath);
+
+ ath12k_err(ab, "failed to fetch board.bin from %s\n",
ab->hw_params->fw.dir);
return ret;
}
@@ -387,6 +464,79 @@ success:
return 0;
}
+int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
+{
+ char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
+ int ret;
+
+ ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to create board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH12K_BD_IE_REGDB,
+ ATH12K_BD_IE_REGDB_NAME,
+ ATH12K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
+ ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
+ BOARD_NAME_SIZE);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to create default board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
+ ATH12K_BD_IE_REGDB,
+ ATH12K_BD_IE_REGDB_NAME,
+ ATH12K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
+ ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
+ if (ret)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
+ ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
+
+exit:
+ if (!ret)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
+
+ return ret;
+}
+
+u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
+{
+ if (ab->num_radios == 2)
+ return TARGET_NUM_STATIONS_DBS;
+ else if (ab->num_radios == 3)
+ return TARGET_NUM_PEERS_PDEV_DBS_SBS;
+ return TARGET_NUM_STATIONS_SINGLE;
+}
+
+u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
+{
+ if (ab->num_radios == 2)
+ return TARGET_NUM_PEERS_PDEV_DBS;
+ else if (ab->num_radios == 3)
+ return TARGET_NUM_PEERS_PDEV_DBS_SBS;
+ return TARGET_NUM_PEERS_PDEV_SINGLE;
+}
+
+u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
+{
+ if (ab->num_radios == 2)
+ return TARGET_NUM_TIDS(DBS);
+ else if (ab->num_radios == 3)
+ return TARGET_NUM_TIDS(DBS_SBS);
+ return TARGET_NUM_TIDS(SINGLE);
+}
+
static void ath12k_core_stop(struct ath12k_base *ab)
{
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
@@ -592,14 +742,14 @@ static int ath12k_core_start(struct ath12k_base *ab,
ath12k_dp_cc_config(ab);
- ath12k_dp_pdev_pre_alloc(ab);
-
ret = ath12k_dp_rx_pdev_reo_setup(ab);
if (ret) {
ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
goto err_mac_destroy;
}
+ ath12k_dp_hal_rx_desc_init(ab);
+
ret = ath12k_wmi_cmd_init(ab);
if (ret) {
ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
@@ -759,20 +909,30 @@ static void ath12k_rfkill_work(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
struct ath12k *ar;
+ struct ath12k_hw *ah;
+ struct ieee80211_hw *hw;
bool rfkill_radio_on;
- int i;
+ int i, j;
spin_lock_bh(&ab->base_lock);
rfkill_radio_on = ab->rfkill_radio_on;
spin_unlock_bh(&ab->base_lock);
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- if (!ar)
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+ if (!ah)
continue;
- ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
- wiphy_rfkill_set_hw_state(ar->hw->wiphy, !rfkill_radio_on);
+ for (j = 0; j < ah->num_radio; j++) {
+ ar = &ah->radio[j];
+ if (!ar)
+ continue;
+
+ ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
+ }
+
+ hw = ah->hw;
+ wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
}
}
@@ -801,6 +961,7 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
{
struct ath12k *ar;
struct ath12k_pdev *pdev;
+ struct ath12k_hw *ah;
int i;
spin_lock_bh(&ab->base_lock);
@@ -810,16 +971,24 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
if (ab->is_reset)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ for (i = 0; i < ab->num_hw; i++) {
+ if (!ab->ah[i])
+ continue;
+
+ ah = ab->ah[i];
+ ieee80211_stop_queues(ah->hw);
+ }
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar || ar->state == ATH12K_STATE_OFF)
continue;
- ieee80211_stop_queues(ar->hw);
ath12k_mac_drain_tx(ar);
complete(&ar->scan.started);
complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
complete(&ar->install_key_done);
@@ -856,7 +1025,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
case ATH12K_STATE_ON:
ar->state = ATH12K_STATE_RESTARTING;
ath12k_core_halt(ar);
- ieee80211_restart_hw(ar->hw);
+ ieee80211_restart_hw(ath12k_ar_to_hw(ar));
break;
case ATH12K_STATE_OFF:
ath12k_warn(ab,
@@ -979,6 +1148,8 @@ int ath12k_core_pre_init(struct ath12k_base *ab)
return ret;
}
+ ath12k_fw_map(ab);
+
return 0;
}
@@ -1007,6 +1178,7 @@ void ath12k_core_deinit(struct ath12k_base *ab)
ath12k_hif_power_down(ab);
ath12k_mac_destroy(ab);
ath12k_core_soc_destroy(ab);
+ ath12k_fw_unmap(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@@ -1054,6 +1226,8 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
+ ab->qmi.num_radios = U8_MAX;
+ ab->slo_capable = true;
return ab;
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 8458dc292821..97e5a0ccd233 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CORE_H
@@ -13,6 +13,7 @@
#include <linux/bitfield.h>
#include <linux/dmi.h>
#include <linux/ctype.h>
+#include <linux/firmware.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -24,6 +25,7 @@
#include "hal_rx.h"
#include "reg.h"
#include "dbring.h"
+#include "fw.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -55,6 +57,11 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+enum ath12k_bdf_search {
+ ATH12K_BDF_SEARCH_DEFAULT,
+ ATH12K_BDF_SEARCH_BUS_AND_BOARD,
+};
+
enum wme_ac {
WME_AC_BE,
WME_AC_BK,
@@ -259,6 +266,7 @@ struct ath12k_vif {
u8 tx_encap_type;
u8 vdev_stats_id;
u32 punct_bitmap;
+ bool ps;
};
struct ath12k_vif_iter {
@@ -420,7 +428,7 @@ struct ath12k_sta {
};
#define ATH12K_MIN_5G_FREQ 4150
-#define ATH12K_MIN_6G_FREQ 5945
+#define ATH12K_MIN_6G_FREQ 5925
#define ATH12K_MAX_6G_FREQ 7115
#define ATH12K_NUM_CHANS 100
#define ATH12K_MAX_5G_CHAN 173
@@ -468,7 +476,7 @@ struct ath12k_per_peer_tx_stats {
struct ath12k {
struct ath12k_base *ab;
struct ath12k_pdev *pdev;
- struct ieee80211_hw *hw;
+ struct ath12k_hw *ah;
struct ath12k_wmi_pdev *wmi;
struct ath12k_pdev_dp dp;
u8 mac_addr[ETH_ALEN];
@@ -532,6 +540,7 @@ struct ath12k {
/* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */
u8 pdev_idx;
u8 lmac_id;
+ u8 hw_link_id;
struct completion peer_assoc_done;
struct completion peer_delete_done;
@@ -591,6 +600,13 @@ struct ath12k {
int monitor_vdev_id;
};
+struct ath12k_hw {
+ struct ieee80211_hw *hw;
+
+ u8 num_radio;
+ struct ath12k radio[] __aligned(sizeof(void *));
+};
+
struct ath12k_band_cap {
u32 phy_id;
u32 max_bw_supported;
@@ -724,6 +740,16 @@ struct ath12k_base {
u8 fw_pdev_count;
struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS];
+
+ /* Holds information of wiphy (hw) registration.
+ *
+ * In Multi/Single Link Operation case, all pdevs are registered as
+ * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
+ * registered as separate wiphys.
+ */
+ struct ath12k_hw *ah[MAX_RADIOS];
+ u8 num_hw;
+
struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
unsigned long long free_vdev_stats_id_map;
@@ -793,10 +819,44 @@ struct ath12k_base {
/* true means radio is on */
bool rfkill_radio_on;
+ struct {
+ enum ath12k_bdf_search bdf_search;
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+ } id;
+
+ struct {
+ u32 api_version;
+
+ const struct firmware *fw;
+ const u8 *amss_data;
+ size_t amss_len;
+ const u8 *amss_dualmac_data;
+ size_t amss_dualmac_len;
+ const u8 *m3_data;
+ size_t m3_len;
+
+ DECLARE_BITMAP(fw_features, ATH12K_FW_FEATURE_COUNT);
+ } fw;
+
+ const struct hal_rx_ops *hal_rx_ops;
+
+ /* slo_capable denotes if the single/multi link operation
+ * is supported within the same chip (SoC).
+ */
+ bool slo_capable;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
+struct ath12k_pdev_map {
+ struct ath12k_base *ab;
+ u8 pdev_idx;
+};
+
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
int ath12k_core_pre_init(struct ath12k_base *ab);
int ath12k_core_init(struct ath12k_base *ath12k);
@@ -810,6 +870,7 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
int ath12k_core_fetch_bdf(struct ath12k_base *ath12k,
struct ath12k_board_data *bd);
void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd);
+int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd);
int ath12k_core_check_dt(struct ath12k_base *ath12k);
int ath12k_core_check_smbios(struct ath12k_base *ab);
void ath12k_core_halt(struct ath12k *ar);
@@ -818,6 +879,9 @@ int ath12k_core_suspend(struct ath12k_base *ab);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
+u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab);
+u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
+u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{
@@ -882,4 +946,18 @@ static inline const char *ath12k_bus_str(enum ath12k_bus bus)
return "unknown";
}
+static inline struct ath12k_hw *ath12k_hw_to_ah(struct ieee80211_hw *hw)
+{
+ return hw->priv;
+}
+
+static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah)
+{
+ return ah->radio;
+}
+
+static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
+{
+ return ar->ah->hw;
+}
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index a6f81f2f97ef..c8e1b244b69e 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -997,6 +997,29 @@ void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
}
}
+bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
+{
+ if (test_bit(WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS, ab->wmi_ab.svc_map) &&
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start &&
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end &&
+ ab->hw_params->hal_ops->get_hal_rx_compact_ops) {
+ return true;
+ }
+ return false;
+}
+
+void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
+{
+ if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
+ /* RX TLVS compaction is supported, hence change the hal_rx_ops
+ * to compact hal_rx_ops.
+ */
+ ab->hal_rx_ops = ab->hw_params->hal_ops->get_hal_rx_compact_ops();
+ }
+ ab->hal.hal_desc_sz =
+ ab->hal_rx_ops->rx_desc_get_desc_size();
+}
+
static void ath12k_dp_service_mon_ring(struct timer_list *t)
{
struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 1df3cdd46140..eb2dd408e081 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_H
@@ -150,7 +150,7 @@ struct ath12k_pdev_dp {
#define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */
-#define DP_BA_WIN_SZ_MAX 256
+#define DP_BA_WIN_SZ_MAX 1024
#define DP_TCL_NUM_RING_MAX 4
@@ -170,6 +170,7 @@ struct ath12k_pdev_dp {
#define DP_REO_CMD_RING_SIZE 128
#define DP_REO_STATUS_RING_SIZE 2048
#define DP_RXDMA_BUF_RING_SIZE 4096
+#define DP_RX_MAC_BUF_RING_SIZE 2048
#define DP_RXDMA_REFILL_RING_SIZE 2048
#define DP_RXDMA_ERR_DST_RING_SIZE 1024
#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
@@ -765,6 +766,11 @@ enum htt_stats_internal_ppdu_frametype {
#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET GENMASK(31, 16)
#define HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET BIT(23)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK GENMASK(18, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK GENMASK(16, 0)
+
enum htt_rx_filter_tlv_flags {
HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
@@ -1088,6 +1094,11 @@ struct htt_rx_ring_selection_cfg_cmd {
__le32 rx_mpdu_offset;
__le32 rx_msdu_offset;
__le32 rx_attn_offset;
+ __le32 info2;
+ __le32 reserved[2];
+ __le32 rx_mpdu_start_end_mask;
+ __le32 rx_msdu_end_word_mask;
+ __le32 info3;
} __packed;
struct htt_rx_ring_tlv_filter {
@@ -1104,6 +1115,9 @@ struct htt_rx_ring_tlv_filter {
u16 rx_msdu_end_offset;
u16 rx_msdu_start_offset;
u16 rx_attn_offset;
+ u16 rx_mpdu_start_wmask;
+ u16 rx_mpdu_end_wmask;
+ u32 rx_msdu_end_wmask;
};
#define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0
@@ -1820,4 +1834,6 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
u32 cookie);
struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
u32 desc_id);
+bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab);
+void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index be4b39f5fa80..2d56913a75d0 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_mon.h"
@@ -864,7 +864,7 @@ static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff
{
u32 rx_pkt_offset, l2_hdr_offset;
- rx_pkt_offset = ar->ab->hw_params->hal_desc_sz;
+ rx_pkt_offset = ar->ab->hal.hal_desc_sz;
l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab,
(struct hal_rx_desc *)msdu->data);
skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
@@ -917,7 +917,8 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
u8 qos_pkt = 0;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
- hdr_desc = ab->hw_params->hal_ops->rx_desc_get_msdu_payload(rx_desc);
+ hdr_desc =
+ ab->hal_rx_ops->rx_desc_get_msdu_payload(rx_desc);
/* Base size */
wh = (struct ieee80211_hdr_3addr *)hdr_desc;
@@ -1130,7 +1131,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
rx_status->flag |= RX_FLAG_8023;
- ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+ ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
}
static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 1ee83f765929..ca76c018dd0c 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -23,34 +23,34 @@
static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc))
+ if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc))
return HAL_ENCRYPT_TYPE_OPEN;
- return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc);
+ return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc);
}
u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc);
+ return ab->hal_rx_ops->rx_desc_get_decap_type(desc);
}
static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc);
+ return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc);
}
static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
}
static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc);
}
static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
@@ -58,7 +58,7 @@ static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
{
struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
return ieee80211_has_morefrags(hdr->frame_control);
}
@@ -67,156 +67,156 @@ static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
{
struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
}
static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc);
}
static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);
+ return ab->hal_rx_ops->dp_rx_h_msdu_done(desc);
}
static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);
+ return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc);
}
static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);
+ return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc);
}
static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc);
+ return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc);
}
u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc);
+ return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc);
}
static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_len(desc);
}
static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc);
}
static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc);
}
static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc);
}
static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc);
}
static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc);
}
static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));
+ return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc));
}
static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc);
}
static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc);
}
u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc);
+ return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc);
}
static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc);
+ return ab->hal_rx_ops->rx_desc_get_first_msdu(desc);
}
static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc);
+ return ab->hal_rx_ops->rx_desc_get_last_msdu(desc);
}
static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
- ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
+ ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
}
static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
struct hal_rx_desc *desc,
u16 len)
{
- ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);
+ ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len);
}
static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
- ab->hw_params->hal_ops->rx_desc_is_da_mcbc(desc));
+ ab->hal_rx_ops->rx_desc_is_da_mcbc(desc));
}
static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);
+ return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc);
}
static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc);
+ return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc);
}
static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr)
{
- ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr);
+ ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr);
}
static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
@@ -224,13 +224,19 @@ static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
u8 *crypto_hdr,
enum hal_encrypt_type enctype)
{
- ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
+ ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
}
static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc);
+}
+
+static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
}
static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
@@ -1761,7 +1767,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
int buf_first_hdr_len, buf_first_len;
struct hal_rx_desc *ldesc;
int space_extra, rem_len, buf_len;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
/* As the msdu is spread across multiple rx buffers,
* find the offset to the start of msdu for computing
@@ -2458,7 +2464,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
rx_status->flag |= RX_FLAG_8023;
- ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+ ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
}
static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
@@ -2473,7 +2479,7 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
u8 l3_pad_bytes;
u16 msdu_len;
int ret;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
if (!last_buf) {
@@ -2804,7 +2810,7 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer
u8 mic[IEEE80211_CCMP_MIC_LEN];
int head_len, tail_len, ret;
size_t data_len;
- u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
u8 *key, *data;
u8 key_idx;
@@ -2844,7 +2850,7 @@ mic_fail:
ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
- ieee80211_rx(ar->hw, msdu);
+ ieee80211_rx(ath12k_ar_to_hw(ar), msdu);
return -EINVAL;
}
@@ -2854,7 +2860,7 @@ static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
struct ieee80211_hdr *hdr;
size_t hdr_len;
size_t crypto_len;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
if (!flags)
return;
@@ -2892,7 +2898,7 @@ static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
bool is_decrypted = false;
int msdu_len = 0;
int extra_space;
- u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
first_frag = skb_peek(&rx_tid->rx_frags);
last_frag = skb_peek_tail(&rx_tid->rx_frags);
@@ -2968,7 +2974,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
struct ath12k_rx_desc_info *desc_info;
u8 dst_ind;
- hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ hal_rx_desc_sz = ab->hal.hal_desc_sz;
link_desc_banks = dp->link_desc_banks;
reo_dest_ring = rx_tid->dst_ring_desc;
@@ -3122,7 +3128,7 @@ static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
struct ieee80211_hdr *hdr;
u64 pn = 0;
u8 *ehdr;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
@@ -3305,7 +3311,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
struct ath12k_skb_rxcb *rxcb;
struct hal_rx_desc *rx_desc;
u16 msdu_len;
- u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
struct ath12k_rx_desc_info *desc_info;
u64 desc_va;
@@ -3486,7 +3492,7 @@ static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
int n_buffs;
n_buffs = DIV_ROUND_UP(msdu_len,
- (DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz));
+ (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz));
skb_queue_walk_safe(msdu_list, skb, tmp) {
rxcb = ATH12K_SKB_RXCB(skb);
@@ -3510,7 +3516,7 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
@@ -3607,7 +3613,7 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
@@ -3695,16 +3701,15 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct hal_rx_wbm_rel_info err_info;
struct hal_srng *srng;
struct sk_buff *msdu;
- struct sk_buff_head msdu_list[MAX_RADIOS];
+ struct sk_buff_head msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
- int mac_id;
+ u8 mac_id;
int num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
- int ret, i;
+ int ret, pdev_id;
- for (i = 0; i < ab->num_radios; i++)
- __skb_queue_head_init(&msdu_list[i]);
+ __skb_queue_head_init(&msdu_list);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
rx_ring = &dp->rx_refill_buf_ring;
@@ -3737,11 +3742,6 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
}
}
- /* FIXME: Extract mac id correctly. Since descs are not tied
- * to mac, we can extract from vdev id in ring desc.
- */
- mac_id = 0;
-
if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
@@ -3771,7 +3771,8 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
rxcb->err_rel_src = err_info.err_rel_src;
rxcb->err_code = err_info.err_code;
rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
- __skb_queue_tail(&msdu_list[mac_id], msdu);
+
+ __skb_queue_tail(&msdu_list, msdu);
rxcb->is_first_msdu = err_info.first_msdu;
rxcb->is_last_msdu = err_info.last_msdu;
@@ -3788,21 +3789,22 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
rcu_read_lock();
- for (i = 0; i < ab->num_radios; i++) {
- if (!rcu_dereference(ab->pdevs_active[i])) {
- __skb_queue_purge(&msdu_list[i]);
+ while ((msdu = __skb_dequeue(&msdu_list))) {
+ mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
+ (struct hal_rx_desc *)msdu->data);
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
+ ar = ab->pdevs[pdev_id].ar;
+
+ if (!ar || !rcu_dereference(ar->ab->pdevs_active[mac_id])) {
+ dev_kfree_skb_any(msdu);
continue;
}
- ar = ab->pdevs[i].ar;
-
if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
- __skb_queue_purge(&msdu_list[i]);
+ dev_kfree_skb_any(msdu);
continue;
}
-
- while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
- ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
+ ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
}
rcu_read_unlock();
done:
@@ -3922,7 +3924,7 @@ int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
int ret;
- u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
@@ -3935,14 +3937,20 @@ int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
tlv_filter.rx_packet_offset = hal_rx_desc_sz;
tlv_filter.rx_mpdu_start_offset =
- ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
+ ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
tlv_filter.rx_msdu_end_offset =
- ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
+ ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
+
+ if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
+ tlv_filter.rx_mpdu_start_wmask =
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start();
+ tlv_filter.rx_msdu_end_wmask =
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end();
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n",
+ tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask);
+ }
- /* TODO: Selectively subscribe to required qwords within msdu_end
- * and mpdu_start and setup the mask in below msg
- * and modify the rx_desc struct
- */
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
HAL_RXDMA_BUF,
DP_RXDMA_REFILL_RING_SIZE,
@@ -3957,7 +3965,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
int ret;
- u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
int i;
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
@@ -3973,9 +3981,9 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
tlv_filter.rx_mpdu_start_offset =
- ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
+ ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
tlv_filter.rx_msdu_end_offset =
- ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
+ ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
/* TODO: Selectively subscribe to required qwords within msdu_end
* and mpdu_start and setup the mask in below msg
@@ -4086,7 +4094,7 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
ret = ath12k_dp_srng_setup(ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
- i, 1024);
+ i, DP_RX_MAC_BUF_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
i);
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index 62f9cdbb811c..572b87153647 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -151,7 +151,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
@@ -401,7 +401,7 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
}
}
- ieee80211_tx_status_skb(ar->hw, msdu);
+ ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
}
static void
@@ -498,7 +498,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
* Might end up reporting it out-of-band from HTT stats.
*/
- ieee80211_tx_status_skb(ar->hw, msdu);
+ ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
exit:
rcu_read_unlock();
@@ -837,7 +837,7 @@ int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -964,6 +964,26 @@ int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
}
+ if (tlv_filter->rx_mpdu_start_wmask > 0 &&
+ tlv_filter->rx_msdu_end_wmask > 0) {
+ cmd->info2 |=
+ le32_encode_bits(true,
+ HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET);
+ cmd->rx_mpdu_start_end_mask =
+ le32_encode_bits(tlv_filter->rx_mpdu_start_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK);
+ /* mpdu_end is not used for any hardwares so far
+ * please assign it in future if any chip is
+ * using through hal ops
+ */
+ cmd->rx_mpdu_start_end_mask |=
+ le32_encode_bits(tlv_filter->rx_mpdu_end_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
+ cmd->rx_msdu_end_word_mask =
+ le32_encode_bits(tlv_filter->rx_msdu_end_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
+ }
+
ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
diff --git a/drivers/net/wireless/ath/ath12k/fw.c b/drivers/net/wireless/ath/ath12k/fw.c
new file mode 100644
index 000000000000..5be4b2d4a19d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/fw.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+
+#include "debug.h"
+
+static int ath12k_fw_request_firmware_api_n(struct ath12k_base *ab,
+ const char *name)
+{
+ size_t magic_len, len, ie_len;
+ int ie_id, i, index, bit, ret;
+ struct ath12k_fw_ie *hdr;
+ const u8 *data;
+ __le32 *timestamp;
+
+ ab->fw.fw = ath12k_core_firmware_request(ab, name);
+ if (IS_ERR(ab->fw.fw)) {
+ ret = PTR_ERR(ab->fw.fw);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to load %s: %d\n", name, ret);
+ ab->fw.fw = NULL;
+ return ret;
+ }
+
+ data = ab->fw.fw->data;
+ len = ab->fw.fw->size;
+
+ /* magic also includes the null byte, check that as well */
+ magic_len = strlen(ATH12K_FIRMWARE_MAGIC) + 1;
+
+ if (len < magic_len) {
+ ath12k_err(ab, "firmware image too small to contain magic: %zu\n",
+ len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH12K_FIRMWARE_MAGIC, magic_len) != 0) {
+ ath12k_err(ab, "Invalid firmware magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* jump over the padding */
+ magic_len = ALIGN(magic_len, 4);
+
+ /* make sure there's space for padding */
+ if (magic_len > len) {
+ ath12k_err(ab, "No space for padding after magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ len -= magic_len;
+ data += magic_len;
+
+ /* loop elements */
+ while (len > sizeof(struct ath12k_fw_ie)) {
+ hdr = (struct ath12k_fw_ie *)data;
+
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data += sizeof(*hdr);
+
+ if (len < ie_len) {
+ ath12k_err(ab, "Invalid length for FW IE %d (%zu < %zu)\n",
+ ie_id, len, ie_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH12K_FW_IE_TIMESTAMP:
+ if (ie_len != sizeof(u32))
+ break;
+
+ timestamp = (__le32 *)data;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "found fw timestamp %d\n",
+ le32_to_cpup(timestamp));
+ break;
+ case ATH12K_FW_IE_FEATURES:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found firmware features ie (%zd B)\n",
+ ie_len);
+
+ for (i = 0; i < ATH12K_FW_FEATURE_COUNT; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index == ie_len)
+ break;
+
+ if (data[index] & (1 << bit))
+ __set_bit(i, ab->fw.fw_features);
+ }
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "features", "",
+ ab->fw.fw_features,
+ sizeof(ab->fw.fw_features));
+ break;
+ case ATH12K_FW_IE_AMSS_IMAGE:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found fw image ie (%zd B)\n",
+ ie_len);
+
+ ab->fw.amss_data = data;
+ ab->fw.amss_len = ie_len;
+ break;
+ case ATH12K_FW_IE_M3_IMAGE:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found m3 image ie (%zd B)\n",
+ ie_len);
+
+ ab->fw.m3_data = data;
+ ab->fw.m3_len = ie_len;
+ break;
+ case ATH12K_FW_IE_AMSS_DUALMAC_IMAGE:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found dualmac fw image ie (%zd B)\n",
+ ie_len);
+ ab->fw.amss_dualmac_data = data;
+ ab->fw.amss_dualmac_len = ie_len;
+ break;
+ default:
+ ath12k_warn(ab, "Unknown FW IE: %u\n", ie_id);
+ break;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ /* make sure there's space for padding */
+ if (ie_len > len)
+ break;
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+ return 0;
+
+err:
+ release_firmware(ab->fw.fw);
+ ab->fw.fw = NULL;
+ return ret;
+}
+
+void ath12k_fw_map(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_fw_request_firmware_api_n(ab, ATH12K_FW_API2_FILE);
+ if (ret == 0)
+ ab->fw.api_version = 2;
+ else
+ ab->fw.api_version = 1;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "using fw api %d\n",
+ ab->fw.api_version);
+}
+
+void ath12k_fw_unmap(struct ath12k_base *ab)
+{
+ release_firmware(ab->fw.fw);
+ memset(&ab->fw, 0, sizeof(ab->fw));
+}
diff --git a/drivers/net/wireless/ath/ath12k/fw.h b/drivers/net/wireless/ath/ath12k/fw.h
new file mode 100644
index 000000000000..3ff041f15fa0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/fw.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_FW_H
+#define ATH12K_FW_H
+
+#define ATH12K_FW_API2_FILE "firmware-2.bin"
+#define ATH12K_FIRMWARE_MAGIC "QCOM-ATH12K-FW"
+
+enum ath12k_fw_ie_type {
+ ATH12K_FW_IE_TIMESTAMP = 0,
+ ATH12K_FW_IE_FEATURES = 1,
+ ATH12K_FW_IE_AMSS_IMAGE = 2,
+ ATH12K_FW_IE_M3_IMAGE = 3,
+ ATH12K_FW_IE_AMSS_DUALMAC_IMAGE = 4,
+};
+
+enum ath12k_fw_features {
+ /* The firmware supports setting the QRTR id via register
+ * PCIE_LOCAL_REG_QRTR_NODE_ID
+ */
+ ATH12K_FW_FEATURE_MULTI_QRTR_ID = 0,
+
+ /* keep last */
+ ATH12K_FW_FEATURE_COUNT,
+};
+
+void ath12k_fw_map(struct ath12k_base *ab);
+void ath12k_fw_unmap(struct ath12k_base *ab);
+
+#endif /* ATH12K_FW_H */
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index a489369d8068..78310da8cfe8 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
@@ -449,8 +449,8 @@ static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
{
- return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) &
- RX_MSDU_END_INFO5_DA_IS_MCBC;
+ return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
+ RX_MPDU_START_INFO6_MCAST_BCAST;
}
static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
@@ -626,6 +626,21 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
return 0;
}
+static u16 ath12k_hal_qcn9274_rx_mpdu_start_wmask_get(void)
+{
+ return QCN9274_MPDU_START_WMASK;
+}
+
+static u32 ath12k_hal_qcn9274_rx_msdu_end_wmask_get(void)
+{
+ return QCN9274_MSDU_END_WMASK;
+}
+
+static const struct hal_rx_ops *ath12k_hal_qcn9274_get_hal_rx_compact_ops(void)
+{
+ return &hal_rx_qcn9274_compact_ops;
+}
+
static bool ath12k_hw_qcn9274_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274.msdu_end.info14,
@@ -680,7 +695,17 @@ static u32 ath12k_hw_qcn9274_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
return errmap;
}
-const struct hal_ops hal_qcn9274_ops = {
+static u32 ath12k_hw_qcn9274_get_rx_desc_size(void)
+{
+ return sizeof(struct hal_rx_desc_qcn9274);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+{
+ return 0;
+}
+
+const struct hal_rx_ops hal_rx_qcn9274_ops = {
.rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes,
@@ -712,13 +737,367 @@ const struct hal_ops hal_qcn9274_ops = {
.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
.rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl,
- .create_srng_config = ath12k_hal_srng_create_config_qcn9274,
- .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
.dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
.dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted,
.dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err,
+ .rx_desc_get_desc_size = ath12k_hw_qcn9274_get_rx_desc_size,
+ .rx_desc_get_msdu_src_link_id = ath12k_hw_qcn9274_rx_desc_get_msdu_src_link,
+};
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_FIRST_MSDU);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_LAST_MSDU);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_L3_HDR_PADDING);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info2,
+ RX_MPDU_START_INFO2_ENC_TYPE);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info11,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
+ RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
+}
+
+static bool
+ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
+}
+
+static u16
+ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
+}
+
+static u16 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info10,
+ RX_MSDU_END_INFO10_MSDU_LENGTH);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_SGI);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_RATE_MCS);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_RECV_BW);
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.phy_meta_data);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_PKT_TYPE);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_QCN9274_INFO5_TID);
+}
+
+static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.sw_peer_id);
+}
+
+static void ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ fdesc->u.qcn9274_compact.msdu_end = ldesc->u.qcn9274_compact.msdu_end;
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.phy_ppdu_id);
+}
+
+static void
+ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info10);
+
+ info = u32_replace_bits(info, len, RX_MSDU_END_INFO10_MSDU_LENGTH);
+ desc->u.qcn9274_compact.msdu_end.info10 = __cpu_to_le32(info);
+}
+
+static u8 *ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcn9274_compact.msdu_payload[0];
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274_compact, mpdu_start);
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274_compact, msdu_end);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
+}
+
+static u8 *ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9274_compact.mpdu_start.addr2;
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info6) &
+ RX_MPDU_START_INFO6_MCAST_BCAST;
+}
+
+static void ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ hdr->frame_control = desc->u.qcn9274_compact.mpdu_start.frame_ctrl;
+ hdr->duration_id = desc->u.qcn9274_compact.mpdu_start.duration;
+ ether_addr_copy(hdr->addr1, desc->u.qcn9274_compact.mpdu_start.addr1);
+ ether_addr_copy(hdr->addr2, desc->u.qcn9274_compact.mpdu_start.addr2);
+ ether_addr_copy(hdr->addr3, desc->u.qcn9274_compact.mpdu_start.addr3);
+ if (__le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
+ ether_addr_copy(hdr->addr4, desc->u.qcn9274_compact.mpdu_start.addr4);
+ }
+ hdr->seq_ctrl = desc->u.qcn9274_compact.mpdu_start.seq_ctrl;
+}
+
+static void
+ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ unsigned int key_id;
+
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[1] = 0;
+ crypto_hdr[2] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ break;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[1] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[2] = 0;
+ break;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ return;
+ }
+ key_id = le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info5,
+ RX_MPDU_START_INFO5_KEY_ID);
+ crypto_hdr[3] = 0x20 | (key_id << 6);
+ crypto_hdr[4] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[5] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[6] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[1]);
+ crypto_hdr[7] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]);
+}
+
+static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.frame_ctrl);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
+ RX_MSDU_END_INFO14_MSDU_DONE);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
+ RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
+ RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
+{
+ return (le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
+ RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info13);
+ u32 errmap = 0;
+
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+static u32 ath12k_hw_qcn9274_compact_get_rx_desc_size(void)
+{
+ return sizeof(struct hal_rx_desc_qcn9274_compact);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+{
+ return le64_get_bits(desc->u.qcn9274_compact.msdu_end.msdu_end_tag,
+ RX_MSDU_END_64_TLV_SRC_LINK_ID);
+}
+
+const struct hal_rx_ops hal_rx_qcn9274_compact_ops = {
+ .rx_desc_get_first_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes,
+ .rx_desc_encrypt_valid = ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath12k_hw_qcn9274_compact_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_end_tlv = ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv,
+ .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len,
+ .rx_desc_get_msdu_payload = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload,
+ .rx_desc_get_mpdu_start_offset =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset,
+ .rx_desc_get_msdu_end_offset =
+ ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset,
+ .rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2,
+ .rx_desc_is_da_mcbc = ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc,
+ .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr,
+ .rx_desc_get_crypto_header = ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr,
+ .rx_desc_get_mpdu_frame_ctl =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl,
+ .dp_rx_h_msdu_done = ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done,
+ .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail,
+ .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail,
+ .dp_rx_h_is_decrypted = ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted,
+ .dp_rx_h_mpdu_err = ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err,
+ .rx_desc_get_desc_size = ath12k_hw_qcn9274_compact_get_rx_desc_size,
+ .rx_desc_get_msdu_src_link_id =
+ ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link,
+};
+
+const struct hal_ops hal_qcn9274_ops = {
+ .create_srng_config = ath12k_hal_srng_create_config_qcn9274,
+ .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
+ .rxdma_ring_wmask_rx_mpdu_start = ath12k_hal_qcn9274_rx_mpdu_start_wmask_get,
+ .rxdma_ring_wmask_rx_msdu_end = ath12k_hal_qcn9274_rx_msdu_end_wmask_get,
+ .get_hal_rx_compact_ops = ath12k_hal_qcn9274_get_hal_rx_compact_ops,
};
static bool ath12k_hw_wcn7850_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
@@ -1134,7 +1513,17 @@ static u32 ath12k_hw_wcn7850_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
return errmap;
}
-const struct hal_ops hal_wcn7850_ops = {
+static u32 ath12k_hw_wcn7850_get_rx_desc_size(void)
+{
+ return sizeof(struct hal_rx_desc_wcn7850);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+{
+ return 0;
+}
+
+const struct hal_rx_ops hal_rx_wcn7850_ops = {
.rx_desc_get_first_msdu = ath12k_hw_wcn7850_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath12k_hw_wcn7850_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes,
@@ -1167,13 +1556,21 @@ const struct hal_ops hal_wcn7850_ops = {
.rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
.rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl,
- .create_srng_config = ath12k_hal_srng_create_config_wcn7850,
- .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
.dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail,
.dp_rx_h_is_decrypted = ath12k_hw_wcn7850_dp_rx_h_is_decrypted,
.dp_rx_h_mpdu_err = ath12k_hw_wcn7850_dp_rx_h_mpdu_err,
+ .rx_desc_get_desc_size = ath12k_hw_wcn7850_get_rx_desc_size,
+ .rx_desc_get_msdu_src_link_id = ath12k_hw_wcn7850_rx_desc_get_msdu_src_link,
+};
+
+const struct hal_ops hal_wcn7850_ops = {
+ .create_srng_config = ath12k_hal_srng_create_config_wcn7850,
+ .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
+ .rxdma_ring_wmask_rx_mpdu_start = NULL,
+ .rxdma_ring_wmask_rx_msdu_end = NULL,
+ .get_hal_rx_compact_ops = NULL,
};
static int ath12k_hal_alloc_cont_rdp(struct ath12k_base *ab)
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index fc47e7e6b498..107927d64bbb 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_H
@@ -1023,6 +1023,8 @@ struct ath12k_hal {
/* shadow register configuration */
u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
int num_shadow_reg_configured;
+
+ u32 hal_desc_sz;
};
/* Maps WBM ring number and Return Buffer Manager Id per TCL ring */
@@ -1031,7 +1033,7 @@ struct ath12k_hal_tcl_to_wbm_rbm_map {
u8 rbm_id;
};
-struct hal_ops {
+struct hal_rx_ops {
bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
@@ -1070,18 +1072,30 @@ struct hal_ops {
void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
u8 *crypto_hdr,
enum hal_encrypt_type enctype);
- int (*create_srng_config)(struct ath12k_base *ab);
bool (*dp_rx_h_msdu_done)(struct hal_rx_desc *desc);
bool (*dp_rx_h_l4_cksum_fail)(struct hal_rx_desc *desc);
bool (*dp_rx_h_ip_cksum_fail)(struct hal_rx_desc *desc);
bool (*dp_rx_h_is_decrypted)(struct hal_rx_desc *desc);
u32 (*dp_rx_h_mpdu_err)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_desc_size)(void);
+ u8 (*rx_desc_get_msdu_src_link_id)(struct hal_rx_desc *desc);
+};
+
+struct hal_ops {
+ int (*create_srng_config)(struct ath12k_base *ab);
+ u16 (*rxdma_ring_wmask_rx_mpdu_start)(void);
+ u32 (*rxdma_ring_wmask_rx_msdu_end)(void);
+ const struct hal_rx_ops *(*get_hal_rx_compact_ops)(void);
const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map;
};
extern const struct hal_ops hal_qcn9274_ops;
extern const struct hal_ops hal_wcn7850_ops;
+extern const struct hal_rx_ops hal_rx_qcn9274_ops;
+extern const struct hal_rx_ops hal_rx_qcn9274_compact_ops;
+extern const struct hal_rx_ops hal_rx_wcn7850_ops;
+
u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
int tid, u32 ba_window_size,
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 6c17adc6d60b..63340256d3f6 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -2500,13 +2500,13 @@ struct hal_rx_reo_queue {
#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30)
#define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(9, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(11, 10)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(12)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(24, 13)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(27)
struct hal_reo_update_rx_queue {
struct hal_reo_cmd_hdr cmd;
@@ -2517,6 +2517,12 @@ struct hal_reo_update_rx_queue {
__le32 pn[4];
} __packed;
+struct hal_rx_reo_queue_1k {
+ struct hal_desc_header desc_hdr;
+ __le32 rx_bitmap_1023_288[23];
+ __le32 reserved[8];
+} __packed;
+
#define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0)
#define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX GENMASK(2, 1)
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
index 4f25eb9f7745..f7c1aaa3b5d4 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
@@ -247,7 +247,7 @@ int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
case HAL_REO_CMD_UNBLOCK_CACHE:
case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
ath12k_warn(ab, "Unsupported reo command %d\n", type);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
break;
default:
ath12k_warn(ab, "Unknown reo command %d\n", type);
@@ -688,23 +688,28 @@ void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
{
- u32 num_ext_desc;
+ u32 num_ext_desc, num_1k_desc = 0;
if (ba_window_size <= 1) {
if (tid != HAL_DESC_REO_NON_QOS_TID)
num_ext_desc = 1;
else
num_ext_desc = 0;
+
} else if (ba_window_size <= 105) {
num_ext_desc = 1;
} else if (ba_window_size <= 210) {
num_ext_desc = 2;
- } else {
+ } else if (ba_window_size <= 256) {
num_ext_desc = 3;
+ } else {
+ num_ext_desc = 10;
+ num_1k_desc = 1;
}
return sizeof(struct hal_rx_reo_queue) +
- (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+ (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext)) +
+ (num_1k_desc * sizeof(struct hal_rx_reo_queue_1k));
}
void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index de60d988d860..0b17dfd47856 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -897,7 +897,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = false,
- .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
.num_tcl_banks = 48,
.max_tx_ring = 4,
@@ -914,6 +913,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_on_level = 0,
.rddm_size = 0,
+
+ .def_num_link = 0,
+ .max_mlo_peer = 256,
+
+ .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
+
+ .supports_sta_ps = false,
},
{
.name = "wcn7850 hw2.0",
@@ -950,7 +956,10 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.vdev_start_delay = true,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.idle_ps = true,
@@ -960,7 +969,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = true,
- .hal_desc_sz = sizeof(struct hal_rx_desc_wcn7850),
.num_tcl_banks = 7,
.max_tx_ring = 3,
@@ -978,6 +986,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_on_level = 1,
.rddm_size = 0x780000,
+
+ .def_num_link = 2,
+ .max_mlo_peer = 32,
+
+ .otp_board_id_register = 0,
+
+ .supports_sta_ps = true,
},
{
.name = "qcn9274 hw2.0",
@@ -987,7 +1002,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
- .max_radios = 1,
+ .max_radios = 2,
.single_pdev_only = false,
.qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
.internal_sleep_clock = false,
@@ -1023,7 +1038,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = false,
- .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
.num_tcl_banks = 48,
.max_tx_ring = 4,
@@ -1040,6 +1054,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_on_level = 0,
.rddm_size = 0,
+
+ .def_num_link = 0,
+ .max_mlo_peer = 256,
+
+ .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
+
+ .supports_sta_ps = false,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index d2622bfef942..87965980b938 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HW_H
@@ -17,19 +17,30 @@
/* Num VDEVS per radio */
#define TARGET_NUM_VDEVS (16 + 1)
-#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV_SINGLE (TARGET_NUM_STATIONS_SINGLE + \
+ TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV_DBS (TARGET_NUM_STATIONS_DBS + \
+ TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV_DBS_SBS (TARGET_NUM_STATIONS_DBS_SBS + \
+ TARGET_NUM_VDEVS)
/* Num of peers for Single Radio mode */
-#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV_SINGLE)
/* Num of peers for DBS */
-#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV_DBS)
/* Num of peers for DBS_SBS */
-#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV_DBS_SBS)
-/* Max num of stations (per radio) */
-#define TARGET_NUM_STATIONS 512
+/* Max num of stations for Single Radio mode */
+#define TARGET_NUM_STATIONS_SINGLE 512
+
+/* Max num of stations for DBS */
+#define TARGET_NUM_STATIONS_DBS 128
+
+/* Max num of stations for DBS_SBS */
+#define TARGET_NUM_STATIONS_DBS_SBS 128
#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
#define TARGET_NUM_PEER_KEYS 2
@@ -66,6 +77,8 @@
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 1
#define TARGET_RX_BATCHMODE 1
+#define TARGET_RX_PEER_METADATA_VER_V1A 2
+#define TARGET_RX_PEER_METADATA_VER_V1B 3
#define ATH12K_HW_MAX_QUEUES 4
#define ATH12K_QUEUE_LEN 4096
@@ -174,7 +187,6 @@ struct ath12k_hw_params {
bool reoq_lut_support:1;
bool supports_shadow_regs:1;
- u32 hal_desc_sz;
u32 num_tcl_banks;
u32 max_tx_ring;
@@ -192,6 +204,13 @@ struct ath12k_hw_params {
u32 rfkill_on_level;
u32 rddm_size;
+
+ u8 def_num_link;
+ u16 max_mlo_peer;
+
+ u32 otp_board_id_register;
+
+ bool supports_sta_ps;
};
struct ath12k_hw_ops {
@@ -242,10 +261,16 @@ enum ath12k_bd_ie_board_type {
ATH12K_BD_IE_BOARD_DATA = 1,
};
+enum ath12k_bd_ie_regdb_type {
+ ATH12K_BD_IE_REGDB_NAME = 0,
+ ATH12K_BD_IE_REGDB_DATA = 1,
+};
+
enum ath12k_bd_ie_type {
/* contains sub IEs of enum ath12k_bd_ie_board_type */
ATH12K_BD_IE_BOARD = 0,
- ATH12K_BD_IE_BOARD_EXT = 1,
+ /* contains sub IEs of enum ath12k_bd_ie_regdb_type */
+ ATH12K_BD_IE_REGDB = 1,
};
struct ath12k_hw_regs {
@@ -315,6 +340,18 @@ struct ath12k_hw_regs {
u32 hal_reo_status_ring_base;
};
+static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type)
+{
+ switch (type) {
+ case ATH12K_BD_IE_BOARD:
+ return "board data";
+ case ATH12K_BD_IE_REGDB:
+ return "regdb data";
+ }
+
+ return "unknown";
+}
+
int ath12k_hw_init(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 88cec54c6c2e..52a5fb8b03e9 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@@ -241,8 +241,8 @@ static const u32 ath12k_smps_map[] = {
[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
};
-static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+static int ath12k_start_vdev_delay(struct ath12k *ar,
+ struct ath12k_vif *arvif);
static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
{
@@ -542,7 +542,7 @@ struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
arvif_iter.vdev_id = vdev_id;
flags = IEEE80211_IFACE_ITER_RESUME_ALL;
- ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
flags,
ath12k_get_arvif_iter,
&arvif_iter);
@@ -563,7 +563,8 @@ struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar) {
+ if (pdev && pdev->ar &&
+ (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) {
arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id);
if (arvif)
return arvif;
@@ -1040,7 +1041,7 @@ static int ath12k_mac_monitor_start(struct ath12k *ar)
if (ar->monitor_started)
return 0;
- ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
ath12k_mac_get_any_chandef_iter,
&chandef);
if (!chandef)
@@ -1083,9 +1084,49 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar)
return ret;
}
-static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ret = ath12k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ ar->num_started_vdevs--;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
+
+ if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
+ arvif->vdev_id);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static int ath12k_mac_config(struct ath12k *ar, u32 changed)
{
- struct ath12k *ar = hw->priv;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
@@ -1122,11 +1163,84 @@ err_mon_del:
return ret;
}
+static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ int ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ ret = ath12k_mac_config(ar, changed);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to update config pdev idx %d: %d\n",
+ ar->pdev_idx, ret);
+
+ return ret;
+}
+
+static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_mgmt *mgmt;
+ const u8 *p2p_ie;
+ int ret;
+
+ mgmt = (void *)bcn->data;
+ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ mgmt->u.beacon.variable,
+ bcn->len - (mgmt->u.beacon.variable -
+ bcn->data));
+ if (!p2p_ie) {
+ ath12k_warn(ar->ab, "no P2P ie found in beacon\n");
+ return -ENOENT;
+ }
+
+ ret = ath12k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
+ u8 oui_type, size_t ie_offset)
+{
+ const u8 *next, *end;
+ size_t len;
+ u8 *ie;
+
+ if (WARN_ON(skb->len < ie_offset))
+ return -EINVAL;
+
+ ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
+ skb->data + ie_offset,
+ skb->len - ie_offset);
+ if (!ie)
+ return -ENOENT;
+
+ len = ie[1] + 2;
+ end = skb->data + skb->len;
+ next = ie + len;
+
+ if (WARN_ON(next > end))
+ return -EINVAL;
+
+ memmove(ie, next, end - next);
+ skb_trim(skb, skb->len - len);
+
+ return 0;
+}
+
static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
{
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
struct sk_buff *bcn;
@@ -1154,14 +1268,37 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
ies, (skb_tail_pointer(bcn) - ies)))
arvif->wpaie_present = true;
- ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+ if (arvif->vif->type == NL80211_IFTYPE_AP && arvif->vif->p2p) {
+ ret = ath12k_mac_setup_bcn_p2p_ie(arvif, bcn);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup P2P GO bcn ie: %d\n",
+ ret);
+ goto free_bcn_skb;
+ }
- kfree_skb(bcn);
+ /* P2P IE is inserted by firmware automatically (as
+ * configured above) so remove it from the base beacon
+ * template to avoid duplicate P2P IEs in beacon frames.
+ */
+ ret = ath12k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA,
+ WLAN_OUI_TYPE_WFA_P2P,
+ offsetof(struct ieee80211_mgmt,
+ u.beacon.variable));
+ if (ret) {
+ ath12k_warn(ab, "failed to remove P2P vendor ie: %d\n",
+ ret);
+ goto free_bcn_skb;
+ }
+ }
+
+ ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
if (ret)
ath12k_warn(ab, "failed to submit beacon template command: %d\n",
ret);
+free_bcn_skb:
+ kfree_skb(bcn);
return ret;
}
@@ -1214,6 +1351,7 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
u32 aid;
lockdep_assert_held(&ar->conf_mutex);
@@ -1228,7 +1366,7 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
arg->peer_associd = aid;
arg->auth_flag = true;
/* TODO: STA WAR in ath10k for listen interval required? */
- arg->peer_listen_intval = ar->hw->conf.listen_interval;
+ arg->peer_listen_intval = hw->conf.listen_interval;
arg->peer_nss = 1;
arg->peer_caps = vif->bss_conf.assoc_capability;
}
@@ -1242,6 +1380,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
const u8 *rsnie = NULL;
const u8 *wpaie = NULL;
@@ -1250,7 +1389,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
return;
- bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ bss = cfg80211_get_bss(hw->wiphy, def.chan, info->bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (arvif->rsnie_present || arvif->wpaie_present) {
@@ -1270,7 +1409,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
ies->data,
ies->len);
rcu_read_unlock();
- cfg80211_put_bss(ar->hw->wiphy, bss);
+ cfg80211_put_bss(hw->wiphy, bss);
}
/* FIXME: base on RSN IE/WPA IE is a correct idea? */
@@ -1304,6 +1443,7 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
enum nl80211_band band;
u32 ratemask;
u8 rate;
@@ -1315,7 +1455,7 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
return;
band = def.chan->band;
- sband = ar->hw->wiphy->bands[band];
+ sband = hw->wiphy->bands[band];
ratemask = sta->deflink.supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
@@ -2266,12 +2406,11 @@ static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
ath12k_smps_map[smps]);
}
-static void ath12k_bss_assoc(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
+static void ath12k_bss_assoc(struct ath12k *ar,
+ struct ath12k_vif *arvif,
struct ieee80211_bss_conf *bss_conf)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_vif *vif = arvif->vif;
struct ath12k_wmi_peer_assoc_arg peer_arg;
struct ieee80211_sta *ap_sta;
struct ath12k_peer *peer;
@@ -2361,11 +2500,9 @@ static void ath12k_bss_assoc(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
-static void ath12k_bss_disassoc(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void ath12k_bss_disassoc(struct ath12k *ar,
+ struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -2413,6 +2550,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
struct cfg80211_chan_def *def)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
const struct ieee80211_supported_band *sband;
u8 basic_rate_idx;
int hw_rate_code;
@@ -2422,7 +2560,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
lockdep_assert_held(&ar->conf_mutex);
- sband = ar->hw->wiphy->bands[def->chan->band];
+ sband = hw->wiphy->bands[def->chan->band];
basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
@@ -2449,6 +2587,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
struct ieee80211_bss_conf *info)
{
struct ath12k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct sk_buff *tmpl;
int ret;
u32 interval;
@@ -2457,7 +2596,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
if (info->fils_discovery.max_interval) {
interval = info->fils_discovery.max_interval;
- tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif);
+ tmpl = ieee80211_get_fils_discovery_tmpl(hw, arvif->vif);
if (tmpl)
ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
tmpl);
@@ -2465,7 +2604,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
unsol_bcast_probe_resp_enabled = 1;
interval = info->unsol_bcast_probe_resp_interval;
- tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw,
+ tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw,
arvif->vif);
if (tmpl)
ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
@@ -2491,13 +2630,60 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
return ret;
}
-static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u64 changed)
+static void ath12k_mac_vif_setup_ps(struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
+ enum wmi_sta_powersave_param param;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int timeout;
+ bool enable_ps;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ enable_ps = arvif->ps;
+ if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ timeout = conf->dynamic_ps_timeout;
+ if (timeout == 0) {
+ /* firmware doesn't like 0 */
+ timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
+ }
+
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ timeout);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
+}
+
+static void ath12k_mac_bss_info_changed(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_vif_cfg *vif_cfg = &vif->cfg;
struct cfg80211_chan_def def;
u32 param_id, param_value;
enum nl80211_band band;
@@ -2510,7 +2696,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
u8 rateidx;
u32 rate;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
if (changed & BSS_CHANGED_BEACON_INT) {
arvif->beacon_interval = info->beacon_int;
@@ -2666,9 +2852,9 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc)
- ath12k_bss_assoc(hw, vif, info);
+ ath12k_bss_assoc(ar, arvif, info);
else
- ath12k_bss_disassoc(hw, vif);
+ ath12k_bss_disassoc(ar, arvif);
}
if (changed & BSS_CHANGED_TXPOWER) {
@@ -2768,14 +2954,35 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath12k_mac_fils_discovery(arvif, info);
- if (changed & BSS_CHANGED_EHT_PUNCTURING)
- arvif->punct_bitmap = info->eht_puncturing;
+ if (changed & BSS_CHANGED_PS &&
+ ar->ab->hw_params->supports_sta_ps) {
+ arvif->ps = vif_cfg->ps;
+ ath12k_mac_vif_setup_ps(arvif);
+ }
+}
+
+static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_mac_bss_info_changed(ar, arvif, info, changed);
mutex_unlock(&ar->conf_mutex);
}
void __ath12k_mac_scan_finish(struct ath12k *ar)
{
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
@@ -2784,7 +2991,7 @@ void __ath12k_mac_scan_finish(struct ath12k *ar)
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
if (ar->scan.is_roc && ar->scan.roc_notify)
- ieee80211_remain_on_channel_expired(ar->hw);
+ ieee80211_remain_on_channel_expired(hw);
fallthrough;
case ATH12K_SCAN_STARTING:
if (!ar->scan.is_roc) {
@@ -2795,7 +3002,7 @@ void __ath12k_mac_scan_finish(struct ath12k *ar)
ATH12K_SCAN_STARTING)),
};
- ieee80211_scan_completed(ar->hw, &info);
+ ieee80211_scan_completed(hw, &info);
}
ar->scan.state = ATH12K_SCAN_IDLE;
@@ -2940,13 +3147,16 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
struct ath12k_wmi_scan_req_arg arg = {};
int ret;
int i;
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
@@ -2988,7 +3198,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
for (i = 0; i < arg.num_ssids; i++)
arg.ssid[i] = req->ssids[i];
} else {
- arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg.scan_f_passive = 1;
}
if (req->n_channels) {
@@ -3014,7 +3224,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
}
/* Add a margin to account for event/command processing */
- ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
msecs_to_jiffies(arg.max_scan_time +
ATH12K_MAC_SCAN_TIMEOUT_MSECS));
@@ -3025,13 +3235,17 @@ exit:
kfree(arg.extraie.ptr);
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
ath12k_scan_abort(ar);
@@ -3159,8 +3373,9 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_peer *peer;
struct ath12k_sta *arsta;
@@ -3175,6 +3390,9 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
return 1;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
return 1;
@@ -3696,7 +3914,7 @@ static int ath12k_mac_station_add(struct ath12k *ar,
if (ab->hw_params->vdev_start_delay &&
!arvif->is_started &&
arvif->vdev_type != WMI_VDEV_TYPE_AP) {
- ret = ath12k_start_vdev_delay(ar->hw, vif);
+ ret = ath12k_start_vdev_delay(ar, arvif);
if (ret) {
ath12k_warn(ab, "failed to delay vdev start: %d\n", ret);
goto free_peer;
@@ -3750,7 +3968,8 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k_peer *peer;
@@ -3761,6 +3980,8 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NOTEXIST))
cancel_work_sync(&arsta->update_wk);
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
@@ -3775,6 +3996,13 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ ath12k_bss_disassoc(ar, arvif);
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
@@ -3856,6 +4084,7 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
}
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -3863,7 +4092,8 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
s16 txpwr;
@@ -3879,6 +4109,8 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL)
return -EINVAL;
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
@@ -3899,12 +4131,15 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u32 changed)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_peer *peer;
u32 bw, smps;
+ ar = ath12k_ah_to_ar(ah);
+
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
@@ -3964,10 +4199,10 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &arsta->update_wk);
}
-static int ath12k_conf_tx_uapsd(struct ath12k *ar, struct ieee80211_vif *vif,
+static int ath12k_conf_tx_uapsd(struct ath12k_vif *arvif,
u16 ac, bool enable)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
u32 value;
int ret;
@@ -4021,17 +4256,16 @@ exit:
return ret;
}
-static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- unsigned int link_id, u16 ac,
- const struct ieee80211_tx_queue_params *params)
+static int ath12k_mac_conf_tx(struct ath12k_vif *arvif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct wmi_wmm_params_arg *p = NULL;
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
int ret;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
switch (ac) {
case IEEE80211_AC_VO:
@@ -4061,17 +4295,36 @@ static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
ret = ath12k_wmi_send_wmm_update_cmd(ar, arvif->vdev_id,
&arvif->wmm_params);
if (ret) {
- ath12k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
+ ath12k_warn(ab, "pdev idx %d failed to set wmm params: %d\n",
+ ar->pdev_idx, ret);
goto exit;
}
- ret = ath12k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
-
+ ret = ath12k_conf_tx_uapsd(arvif, ac, params->uapsd);
if (ret)
- ath12k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
+ ath12k_warn(ab, "pdev idx %d failed to set sta uapsd: %d\n",
+ ar->pdev_idx, ret);
exit:
+ return ret;
+}
+
+static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ int ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_conf_tx(arvif, link_id, ac, params);
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -4782,7 +5035,7 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
{
int num_mgmt;
- ieee80211_free_txskb(ar->hw, skb);
+ ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -4914,8 +5167,8 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
arvif = ath12k_vif_to_arvif(skb_cb->vif);
- if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
- arvif->is_started) {
+
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
@@ -4959,20 +5212,41 @@ static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
- ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+ ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work);
return 0;
}
+static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb,
+ bool is_prb_rsp)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ if (likely(!is_prb_rsp))
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arvif->u.ap.noa_data &&
+ !pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
+ GFP_ATOMIC))
+ skb_put_data(skb, arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
- struct ath12k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
u32 info_flags = info->flags;
@@ -4987,10 +5261,11 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
}
+ is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
- is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
if (ret) {
ath12k_warn(ar->ab, "failed to queue management frame %d\n",
@@ -5000,6 +5275,10 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
return;
}
+ /* This is case only for P2P_GO */
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p)
+ ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
+
ret = ath12k_dp_tx(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
@@ -5018,7 +5297,7 @@ void ath12k_mac_drain_tx(struct ath12k *ar)
static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* TODO: Need to support new monitor mode */
}
@@ -5044,14 +5323,12 @@ static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab)
ATH12K_RECONFIGURE_TIMEOUT_HZ);
}
-static int ath12k_mac_op_start(struct ieee80211_hw *hw)
+static int ath12k_mac_start(struct ath12k *ar)
{
- struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev = ar->pdev;
int ret;
- ath12k_mac_drain_tx(ar);
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
@@ -5074,14 +5351,14 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
1, pdev->pdev_id);
if (ret) {
- ath12k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
+ ath12k_err(ab, "failed to enable PMF QOS: (%d\n", ret);
goto err;
}
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
pdev->pdev_id);
if (ret) {
- ath12k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
+ ath12k_err(ab, "failed to enable dynamic bw: %d\n", ret);
goto err;
}
@@ -5111,7 +5388,7 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
1, pdev->pdev_id);
if (ret) {
- ath12k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
+ ath12k_err(ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
goto err;
}
@@ -5130,14 +5407,14 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
* such as rssi, rx_duration.
*/
ret = ath12k_mac_config_mon_status_default(ar, true);
- if (ret && (ret != -ENOTSUPP)) {
+ if (ret && (ret != -EOPNOTSUPP)) {
ath12k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n",
ret);
goto err;
}
- if (ret == -ENOTSUPP)
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ if (ret == -EOPNOTSUPP)
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
"monitor status config is not yet supported");
/* Configure the hash seed for hash based reo dest ring selection */
@@ -5159,7 +5436,6 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
&ab->pdevs[ar->pdev_idx]);
return 0;
-
err:
ar->state = ATH12K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
@@ -5167,6 +5443,25 @@ err:
return ret;
}
+static int ath12k_mac_op_start(struct ieee80211_hw *hw)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ ath12k_mac_drain_tx(ar);
+
+ ret = ath12k_mac_start(ar);
+ if (ret) {
+ ath12k_err(ab, "fail to start mac operations in pdev idx %d ret %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath12k_mac_rfkill_config(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
@@ -5224,17 +5519,14 @@ int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable)
return 0;
}
-static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+static void ath12k_mac_stop(struct ath12k *ar)
{
- struct ath12k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
int ret;
- ath12k_mac_drain_tx(ar);
-
mutex_lock(&ar->conf_mutex);
ret = ath12k_mac_config_mon_status_default(ar, false);
- if (ret && (ret != -ENOTSUPP))
+ if (ret && (ret != -EOPNOTSUPP))
ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
ret);
@@ -5260,6 +5552,16 @@ static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
atomic_set(&ar->num_pending_mgmt_tx, 0);
}
+static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+
+ ath12k_mac_drain_tx(ar);
+
+ ath12k_mac_stop(ar);
+}
+
static u8
ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
{
@@ -5269,7 +5571,7 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
do {
if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) {
vdev_stats_id++;
- if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) {
+ if (vdev_stats_id >= ATH12K_MAX_VDEV_STATS_ID) {
vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
break;
}
@@ -5376,12 +5678,11 @@ static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar,
return ret;
}
-static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void ath12k_mac_update_vif_offload(struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
u32 param_id, param_value;
int ret;
@@ -5423,11 +5724,20 @@ static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
}
}
+static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ ath12k_mac_update_vif_offload(arvif);
+}
+
static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
struct ath12k_wmi_peer_create_arg peer_param;
@@ -5439,6 +5749,9 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
if (vif->type == NL80211_IFTYPE_AP &&
@@ -5483,17 +5796,29 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
+
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+
break;
case NL80211_IFTYPE_MESH_POINT:
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
fallthrough;
case NL80211_IFTYPE_AP:
arvif->vdev_type = WMI_VDEV_TYPE_AP;
+
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
ar->monitor_vdev_id = bit;
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+ break;
default:
WARN_ON(1);
break;
@@ -5526,7 +5851,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
list_add(&arvif->list, &ar->arvifs);
spin_unlock_bh(&ar->data_lock);
- ath12k_mac_op_update_vif_offload(hw, vif);
+ ath12k_mac_update_vif_offload(arvif);
nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
@@ -5685,12 +6010,16 @@ static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif
static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_base *ab;
unsigned long time_left;
int ret;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n",
@@ -5766,19 +6095,15 @@ err_vdev_del:
FIF_PROBE_REQ | \
FIF_FCSFAIL)
-static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
+static void ath12k_mac_configure_filter(struct ath12k *ar,
+ unsigned int total_flags)
{
- struct ath12k *ar = hw->priv;
bool reset_flag;
int ret;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
- *total_flags &= SUPPORTED_FILTERS;
- ar->filter_flags = *total_flags;
+ ar->filter_flags = total_flags;
/* For monitor mode */
reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
@@ -5793,16 +6118,36 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
ath12k_warn(ar->ab,
"fail to set monitor filter: %d\n", ret);
}
+
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"total_flags:0x%x, reset_flag:%d\n",
- *total_flags, reset_flag);
+ total_flags, reset_flag);
+}
+
+static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+
+ *total_flags &= SUPPORTED_FILTERS;
+ ath12k_mac_configure_filter(ar, *total_flags);
mutex_unlock(&ar->conf_mutex);
}
static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
@@ -5816,9 +6161,12 @@ static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *
static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
int ret;
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
mutex_unlock(&ar->conf_mutex);
@@ -5826,14 +6174,13 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx
return ret;
}
-static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
+static int ath12k_mac_ampdu_action(struct ath12k_vif *arvif,
+ struct ieee80211_ampdu_params *params)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k *ar = arvif->ar;
int ret = -EINVAL;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
switch (params->action) {
case IEEE80211_AMPDU_RX_START:
@@ -5854,16 +6201,40 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
break;
}
+ return ret;
+}
+
+static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ int ret = -EINVAL;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_ampdu_action(arvif, params);
mutex_unlock(&ar->conf_mutex);
+ if (ret)
+ ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n",
+ ar->pdev_idx, params->action, ret);
+
return ret;
}
static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
+
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx add freq %u width %d ptr %pK\n",
@@ -5886,8 +6257,12 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
+
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx remove freq %u width %d ptr %pK\n",
@@ -5995,6 +6370,11 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
+ /* Fill the MBSSID flags to indicate AP is non MBSSID by default
+ * Corresponding flags would be updated with MBSSID support.
+ */
+ arg.mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP;
+
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
arg.ssid_len = arvif->u.ap.ssid_len;
@@ -6071,46 +6451,6 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
return 0;
}
-static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
-{
- struct ath12k *ar = arvif->ar;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- reinit_completion(&ar->vdev_setup_done);
-
- ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
- if (ret) {
- ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
- arvif->vdev_id, ret);
- goto err;
- }
-
- ret = ath12k_mac_vdev_setup_sync(ar);
- if (ret) {
- ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
- arvif->vdev_id, ret);
- goto err;
- }
-
- WARN_ON(ar->num_started_vdevs == 0);
-
- ar->num_started_vdevs--;
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
- arvif->vif->addr, arvif->vdev_id);
-
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
- clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
- arvif->vdev_id);
- }
-
- return 0;
-err:
- return ret;
-}
-
static int ath12k_mac_vdev_start(struct ath12k_vif *arvif,
struct ieee80211_chanctx_conf *ctx)
{
@@ -6215,6 +6555,8 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
if (WARN_ON(!arvif->is_started))
continue;
+ arvif->punct_bitmap = vifs[i].new_ctx->def.punctured;
+
/* Firmware expect vdev_restart only if vdev is up.
* If vdev is down then it expect vdev_stop->vdev_start.
*/
@@ -6266,7 +6608,7 @@ ath12k_mac_update_active_vif_chan(struct ath12k *ar,
struct ieee80211_chanctx_conf *ctx)
{
struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx };
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
lockdep_assert_held(&ar->conf_mutex);
@@ -6295,8 +6637,12 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
+
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
mutex_lock(&ar->conf_mutex);
@@ -6311,7 +6657,8 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
goto unlock;
if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
- changed & IEEE80211_CHANCTX_CHANGE_RADAR)
+ changed & IEEE80211_CHANCTX_CHANGE_RADAR ||
+ changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING)
ath12k_mac_update_active_vif_chan(ar, ctx);
/* TODO: Recalc radar detection */
@@ -6320,12 +6667,11 @@ unlock:
mutex_unlock(&ar->conf_mutex);
}
-static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int ath12k_start_vdev_delay(struct ath12k *ar,
+ struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_vif *vif = arvif->vif;
int ret;
if (WARN_ON(arvif->is_started))
@@ -6359,19 +6705,23 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
struct ath12k_wmi_peer_create_arg param;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx assign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
- arvif->punct_bitmap = link_conf->eht_puncturing;
+ arvif->punct_bitmap = ctx->def.punctured;
/* for some targets bss peer must be created before vdev_start */
if (ab->hw_params->vdev_start_delay &&
@@ -6438,11 +6788,15 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
@@ -6466,11 +6820,13 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = false;
}
- ret = ath12k_mac_vdev_stop(arvif);
- if (ret)
- ath12k_warn(ab, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
-
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA) {
+ ath12k_bss_disassoc(ar, arvif);
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
arvif->is_started = false;
if (ab->hw_params->vdev_start_delay &&
@@ -6490,7 +6846,10 @@ ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
int n_vifs,
enum ieee80211_chanctx_switch_mode mode)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
@@ -6532,10 +6891,15 @@ ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
*/
static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- struct ath12k *ar = hw->priv;
- int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
- return ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+ return ret;
}
static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
@@ -6553,15 +6917,10 @@ static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP;
}
-static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop)
+static void ath12k_mac_flush(struct ath12k *ar)
{
- struct ath12k *ar = hw->priv;
long time_left;
- if (drop)
- return;
-
time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
(atomic_read(&ar->dp.num_tx_pending) == 0),
ATH12K_FLUSH_TIMEOUT);
@@ -6576,6 +6935,18 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
time_left);
}
+static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+
+ if (drop)
+ return;
+
+ ath12k_mac_flush(ar);
+}
+
static int
ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar,
enum nl80211_band band,
@@ -6778,7 +7149,7 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data,
arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
spin_unlock_bh(&ar->data_lock);
- ieee80211_queue_work(ar->hw, &arsta->update_wk);
+ ieee80211_queue_work(ath12k_ar_to_hw(ar), &arsta->update_wk);
}
static void ath12k_mac_disable_peer_fixed_rate(void *data,
@@ -6826,8 +7197,10 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
sgi = mask->control[band].gi;
- if (sgi == NL80211_TXRATE_FORCE_LGI)
- return -EINVAL;
+ if (sgi == NL80211_TXRATE_FORCE_LGI) {
+ ret = -EINVAL;
+ goto out;
+ }
/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
* requires passing at least one of used basic rates along with them.
@@ -6843,7 +7216,7 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
if (ret) {
ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
arvif->vdev_id, ret);
- return ret;
+ goto out;
}
ieee80211_iterate_stations_atomic(hw,
ath12k_mac_disable_peer_fixed_rate,
@@ -6888,7 +7261,8 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
*/
ath12k_warn(ar->ab,
"Setting more than one MCS Value in bitrate mask not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
ieee80211_iterate_stations_atomic(hw,
@@ -6915,6 +7289,7 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
+out:
return ret;
}
@@ -6922,14 +7297,18 @@ static void
ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif;
int recovery_count;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH12K_STATE_RESTARTED) {
@@ -7013,7 +7392,8 @@ ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ieee80211_supported_band *sband;
struct survey_info *ar_survey;
int ret = 0;
@@ -7021,6 +7401,8 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (idx >= ATH12K_NUM_CHANS)
return -ENOENT;
+ ar = ath12k_ah_to_ar(ah);
+
ar_survey = &ar->survey[idx];
mutex_lock(&ar->conf_mutex);
@@ -7052,6 +7434,7 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
exit:
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -7089,6 +7472,125 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
+static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.roc_notify = false;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath12k_scan_abort(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+
+ return 0;
+}
+
+static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_wmi_scan_req_arg arg;
+ struct ath12k *ar;
+ u32 scan_time_msec;
+ int ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ reinit_completion(&ar->scan.on_channel);
+ ar->scan.state = ATH12K_SCAN_STARTING;
+ ar->scan.is_roc = true;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.roc_freq = chan->center_freq;
+ ar->scan.roc_notify = true;
+ ret = 0;
+ break;
+ case ATH12K_SCAN_STARTING:
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ scan_time_msec = hw->wiphy->max_remain_on_channel_duration * 2;
+
+ memset(&arg, 0, sizeof(arg));
+ ath12k_wmi_start_scan_init(ar, &arg);
+ arg.num_chan = 1;
+ arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
+ GFP_KERNEL);
+ if (!arg.chan_list) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH12K_SCAN_ID;
+ arg.chan_list[0] = chan->center_freq;
+ arg.dwell_time_active = scan_time_msec;
+ arg.dwell_time_passive = scan_time_msec;
+ arg.max_scan_time = scan_time_msec;
+ arg.scan_f_passive = 1;
+ arg.burst_duration = duration;
+
+ ret = ath12k_start_scan(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ goto free_chan_list;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ar->ab, "failed to switch to channel for roc scan\n");
+ ret = ath12k_scan_stop(ar);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+ ret = -ETIMEDOUT;
+ goto free_chan_list;
+ }
+
+ ieee80211_queue_delayed_work(hw, &ar->scan.timeout,
+ msecs_to_jiffies(duration));
+
+ ret = 0;
+
+free_chan_list:
+ kfree(arg.chan_list);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
static const struct ieee80211_ops ath12k_ops = {
.tx = ath12k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
@@ -7123,6 +7625,8 @@ static const struct ieee80211_ops ath12k_ops = {
.get_survey = ath12k_mac_op_get_survey,
.flush = ath12k_mac_op_flush,
.sta_statistics = ath12k_mac_op_sta_statistics,
+ .remain_on_channel = ath12k_mac_op_remain_on_channel,
+ .cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
};
static void ath12k_mac_update_ch_list(struct ath12k *ar,
@@ -7158,9 +7662,9 @@ static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
}
static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
- u32 supported_bands)
+ u32 supported_bands,
+ struct ieee80211_supported_band *bands[])
{
- struct ieee80211_hw *hw = ar->hw;
struct ieee80211_supported_band *band;
struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
void *channels;
@@ -7186,7 +7690,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_g_rates_size;
band->bitrates = ath12k_g_rates;
- hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+ bands[NL80211_BAND_2GHZ] = band;
if (ar->ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
@@ -7198,7 +7702,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
}
if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
- if (reg_cap->high_5ghz_chan >= ATH12K_MAX_6G_FREQ) {
+ if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6G_FREQ) {
channels = kmemdup(ath12k_6ghz_channels,
sizeof(ath12k_6ghz_channels), GFP_KERNEL);
if (!channels) {
@@ -7213,7 +7717,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
- hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+ bands[NL80211_BAND_6GHZ] = band;
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
@@ -7235,7 +7739,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
- hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+ bands[NL80211_BAND_5GHZ] = band;
if (ar->ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
@@ -7251,28 +7755,59 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
return 0;
}
-static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
+static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah)
{
- struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ar->hw;
- struct wiphy *wiphy = hw->wiphy;
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ u16 interface_modes = U16_MAX;
+
+ interface_modes &= ar->ab->hw_params->interface_modes;
+
+ return interface_modes == U16_MAX ? 0 : interface_modes;
+}
+
+static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah,
+ enum nl80211_iftype type)
+{
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ u16 interface_modes, mode;
+ bool is_enable = true;
+
+ mode = BIT(type);
+
+ interface_modes = ar->ab->hw_params->interface_modes;
+ if (!(interface_modes & mode))
+ is_enable = false;
+
+ return is_enable;
+}
+
+static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
+{
+ struct wiphy *wiphy = ah->hw->wiphy;
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
int n_limits, max_interfaces;
- bool ap, mesh;
+ bool ap, mesh, p2p;
- ap = ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_AP);
+ ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP);
+ p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE);
mesh = IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT);
+ ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT);
combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
if (!combinations)
return -ENOMEM;
- if (ap || mesh) {
+ if ((ap || mesh) && !p2p) {
n_limits = 2;
max_interfaces = 16;
+ } else if (p2p) {
+ n_limits = 3;
+ if (ap || mesh)
+ max_interfaces = 16;
+ else
+ max_interfaces = 3;
} else {
n_limits = 1;
max_interfaces = 1;
@@ -7287,14 +7822,22 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
- if (ap) {
+ if (ap || mesh || p2p)
limits[1].max = max_interfaces;
+
+ if (ap)
limits[1].types |= BIT(NL80211_IFTYPE_AP);
- }
if (mesh)
limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+ if (p2p) {
+ limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+ limits[2].max = 1;
+ limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE);
+ }
+
combinations[0].limits = limits;
combinations[0].n_limits = n_limits;
combinations[0].max_interfaces = max_interfaces;
@@ -7349,21 +7892,27 @@ static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
},
};
-static void __ath12k_mac_unregister(struct ath12k *ar)
+static void ath12k_mac_cleanup_unregister(struct ath12k *ar)
{
- struct ieee80211_hw *hw = ar->hw;
- struct wiphy *wiphy = hw->wiphy;
-
- cancel_work_sync(&ar->regd_update_work);
-
- ieee80211_unregister_hw(hw);
-
idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+}
+
+static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
+{
+ struct ieee80211_hw *hw = ah->hw;
+ struct wiphy *wiphy = hw->wiphy;
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+
+ cancel_work_sync(&ar->regd_update_work);
+
+ ieee80211_unregister_hw(hw);
+
+ ath12k_mac_cleanup_unregister(ar);
kfree(wiphy->iface_combinations[0].limits);
kfree(wiphy->iface_combinations);
@@ -7371,28 +7920,42 @@ static void __ath12k_mac_unregister(struct ath12k *ar)
SET_IEEE80211_DEV(hw, NULL);
}
-void ath12k_mac_unregister(struct ath12k_base *ab)
+static int ath12k_mac_setup_register(struct ath12k *ar,
+ u32 *ht_cap,
+ struct ieee80211_supported_band *bands[])
{
- struct ath12k *ar;
- struct ath12k_pdev *pdev;
- int i;
+ struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+ int ret;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar)
- continue;
+ init_waitqueue_head(&ar->txmgmt_empty_waitq);
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
- __ath12k_mac_unregister(ar);
- }
+ ath12k_pdev_caps_update(ar);
+
+ ret = ath12k_mac_setup_channels_rates(ar,
+ cap->supported_bands,
+ bands);
+ if (ret)
+ return ret;
+
+ ath12k_mac_setup_ht_vht_cap(ar, cap, ht_cap);
+ ath12k_mac_setup_sband_iftype_data(ar, cap);
+
+ ar->max_num_stations = ath12k_core_get_max_station_per_radio(ar->ab);
+ ar->max_num_peers = ath12k_core_get_max_peers_per_radio(ar->ab);
+
+ return 0;
}
-static int __ath12k_mac_register(struct ath12k *ar)
+static int ath12k_mac_hw_register(struct ath12k_hw *ah)
{
- struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ah->hw;
struct wiphy *wiphy = hw->wiphy;
- struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev;
+ struct ath12k_pdev_cap *cap;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
@@ -7407,30 +7970,34 @@ static int __ath12k_mac_register(struct ath12k *ar)
int ret;
u32 ht_cap = 0;
- ath12k_pdev_caps_update(ar);
+ pdev = ar->pdev;
- SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr);
-
- SET_IEEE80211_DEV(hw, ab->dev);
+ if (ab->pdevs_macaddr_valid)
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ else
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
- ret = ath12k_mac_setup_channels_rates(ar,
- cap->supported_bands);
+ ret = ath12k_mac_setup_register(ar, &ht_cap, hw->wiphy->bands);
if (ret)
- goto err;
+ goto out;
- ath12k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
- ath12k_mac_setup_sband_iftype_data(ar, cap);
+ wiphy->max_ap_assoc_sta = ar->max_num_stations;
- ret = ath12k_mac_setup_iface_combinations(ar);
- if (ret) {
- ath12k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
- goto err_free_channels;
- }
+ cap = &pdev->cap;
wiphy->available_antennas_rx = cap->rx_chain_mask;
wiphy->available_antennas_tx = cap->tx_chain_mask;
- wiphy->interface_modes = ab->hw_params->interface_modes;
+ SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr);
+ SET_IEEE80211_DEV(hw, ab->dev);
+
+ ret = ath12k_mac_setup_iface_combinations(ah);
+ if (ret) {
+ ath12k_err(ab, "failed to setup interface combinations: %d\n", ret);
+ goto err_cleanup_unregister;
+ }
+
+ wiphy->interface_modes = ath12k_mac_get_ifmodes(ah);
if (wiphy->bands[NL80211_BAND_2GHZ] &&
wiphy->bands[NL80211_BAND_5GHZ] &&
@@ -7483,15 +8050,10 @@ static int __ath12k_mac_register(struct ath12k *ar)
wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
- ar->max_num_stations = TARGET_NUM_STATIONS;
- ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
-
- wiphy->max_ap_assoc_sta = ar->max_num_stations;
-
hw->queues = ATH12K_HW_MAX_QUEUES;
wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT;
hw->vif_data_size = sizeof(struct ath12k_vif);
hw->sta_data_size = sizeof(struct ath12k_sta);
@@ -7524,7 +8086,7 @@ static int __ath12k_mac_register(struct ath12k *ar)
ret = ieee80211_register_hw(hw);
if (ret) {
- ath12k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
+ ath12k_err(ab, "ieee80211 registration failed: %d\n", ret);
goto err_free_if_combs;
}
@@ -7552,142 +8114,213 @@ err_free_if_combs:
kfree(wiphy->iface_combinations[0].limits);
kfree(wiphy->iface_combinations);
-err_free_channels:
- kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
- kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
- kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+err_cleanup_unregister:
+ ath12k_mac_cleanup_unregister(ar);
-err:
+out:
SET_IEEE80211_DEV(hw, NULL);
+
return ret;
}
+static void ath12k_mac_setup(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev = ar->pdev;
+ u8 pdev_idx = ar->pdev_idx;
+
+ ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, pdev_idx);
+
+ ar->wmi = &ab->wmi_ab.wmi[pdev_idx];
+ /* FIXME: wmi[0] is already initialized during attach,
+ * Should we do this again?
+ */
+ ath12k_wmi_pdev_attach(ab, pdev_idx);
+
+ ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
+ ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
+ ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
+ ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
+
+ spin_lock_init(&ar->data_lock);
+ INIT_LIST_HEAD(&ar->arvifs);
+ INIT_LIST_HEAD(&ar->ppdu_stats_info);
+ mutex_init(&ar->conf_mutex);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->vdev_delete_done);
+ init_completion(&ar->peer_assoc_done);
+ init_completion(&ar->peer_delete_done);
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->bss_survey_done);
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
+
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
+ INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+ clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+}
+
int ath12k_mac_register(struct ath12k_base *ab)
{
- struct ath12k *ar;
- struct ath12k_pdev *pdev;
+ struct ath12k_hw *ah;
int i;
int ret;
if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (ab->pdevs_macaddr_valid) {
- ether_addr_copy(ar->mac_addr, pdev->mac_addr);
- } else {
- ether_addr_copy(ar->mac_addr, ab->mac_addr);
- ar->mac_addr[4] += i;
- }
-
- ret = __ath12k_mac_register(ar);
- if (ret)
- goto err_cleanup;
-
- init_waitqueue_head(&ar->txmgmt_empty_waitq);
- idr_init(&ar->txmgmt_idr);
- spin_lock_init(&ar->txmgmt_idr_lock);
- }
-
/* Initialize channel counters frequency value in hertz */
ab->cc_freq_hz = 320000;
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+
+ ret = ath12k_mac_hw_register(ah);
+ if (ret)
+ goto err;
+ }
+
return 0;
-err_cleanup:
+err:
for (i = i - 1; i >= 0; i--) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- __ath12k_mac_unregister(ar);
+ ah = ab->ah[i];
+ if (!ah)
+ continue;
+
+ ath12k_mac_hw_unregister(ah);
}
return ret;
}
-int ath12k_mac_allocate(struct ath12k_base *ab)
+void ath12k_mac_unregister(struct ath12k_base *ab)
+{
+ struct ath12k_hw *ah;
+ int i;
+
+ for (i = ab->num_hw - 1; i >= 0; i--) {
+ ah = ab->ah[i];
+ if (!ah)
+ continue;
+
+ ath12k_mac_hw_unregister(ah);
+ }
+}
+
+static void ath12k_mac_hw_destroy(struct ath12k_hw *ah)
+{
+ ieee80211_free_hw(ah->hw);
+}
+
+static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
+ struct ath12k_pdev_map *pdev_map,
+ u8 num_pdev_map)
{
struct ieee80211_hw *hw;
struct ath12k *ar;
struct ath12k_pdev *pdev;
- int ret;
+ struct ath12k_hw *ah;
int i;
+ u8 pdev_idx;
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
- return 0;
+ hw = ieee80211_alloc_hw(struct_size(ah, radio, num_pdev_map),
+ &ath12k_ops);
+ if (!hw)
+ return NULL;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- hw = ieee80211_alloc_hw(sizeof(struct ath12k), &ath12k_ops);
- if (!hw) {
- ath12k_warn(ab, "failed to allocate mac80211 hw device\n");
- ret = -ENOMEM;
- goto err_free_mac;
- }
+ ah = ath12k_hw_to_ah(hw);
+ ah->hw = hw;
+ ah->num_radio = num_pdev_map;
+
+ for (i = 0; i < num_pdev_map; i++) {
+ ab = pdev_map[i].ab;
+ pdev_idx = pdev_map[i].pdev_idx;
+ pdev = &ab->pdevs[pdev_idx];
- ar = hw->priv;
- ar->hw = hw;
+ ar = ath12k_ah_to_ar(ah);
+ ar->ah = ah;
ar->ab = ab;
+ ar->hw_link_id = i;
ar->pdev = pdev;
- ar->pdev_idx = i;
- ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, i);
-
- ar->wmi = &ab->wmi_ab.wmi[i];
- /* FIXME: wmi[0] is already initialized during attach,
- * Should we do this again?
- */
- ath12k_wmi_pdev_attach(ab, i);
-
- ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
- ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
- ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
- ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
-
+ ar->pdev_idx = pdev_idx;
pdev->ar = ar;
- spin_lock_init(&ar->data_lock);
- INIT_LIST_HEAD(&ar->arvifs);
- INIT_LIST_HEAD(&ar->ppdu_stats_info);
- mutex_init(&ar->conf_mutex);
- init_completion(&ar->vdev_setup_done);
- init_completion(&ar->vdev_delete_done);
- init_completion(&ar->peer_assoc_done);
- init_completion(&ar->peer_delete_done);
- init_completion(&ar->install_key_done);
- init_completion(&ar->bss_survey_done);
- init_completion(&ar->scan.started);
- init_completion(&ar->scan.completed);
-
- INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
- INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
-
- INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
- skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- }
- return 0;
-
-err_free_mac:
- ath12k_mac_destroy(ab);
+ ath12k_mac_setup(ar);
+ }
- return ret;
+ return ah;
}
void ath12k_mac_destroy(struct ath12k_base *ab)
{
- struct ath12k *ar;
struct ath12k_pdev *pdev;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar)
+ if (!pdev->ar)
continue;
- ieee80211_free_hw(ar->hw);
pdev->ar = NULL;
}
+
+ for (i = 0; i < ab->num_hw; i++) {
+ if (!ab->ah[i])
+ continue;
+
+ ath12k_mac_hw_destroy(ab->ah[i]);
+ ab->ah[i] = NULL;
+ }
+}
+
+int ath12k_mac_allocate(struct ath12k_base *ab)
+{
+ struct ath12k_hw *ah;
+ struct ath12k_pdev_map pdev_map[MAX_RADIOS];
+ int ret, i, j;
+ u8 radio_per_hw;
+
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ ab->num_hw = ab->num_radios;
+ radio_per_hw = 1;
+
+ for (i = 0; i < ab->num_hw; i++) {
+ for (j = 0; j < radio_per_hw; j++) {
+ pdev_map[j].ab = ab;
+ pdev_map[j].pdev_idx = (i * radio_per_hw) + j;
+ }
+
+ ah = ath12k_mac_hw_allocate(ab, pdev_map, radio_per_hw);
+ if (!ah) {
+ ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n",
+ i);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ab->ah[i] = ah;
+ }
+
+ ath12k_dp_pdev_pre_alloc(ab);
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ if (!ab->ah[i])
+ continue;
+
+ ath12k_mac_hw_destroy(ab->ah[i]);
+ ab->ah[i] = NULL;
+ }
+
+ return ret;
}
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 7c63bb628adc..3f5e1be0dff9 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_MAC_H
@@ -12,6 +12,8 @@
struct ath12k;
struct ath12k_base;
+struct ath12k_hw;
+struct ath12k_pdev_map;
struct ath12k_generic_iter {
struct ath12k *ar;
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index d5441ddb374b..adb8c3ec1950 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/firmware.h>
#include "core.h"
#include "debug.h"
@@ -13,6 +14,8 @@
#include "pci.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000
+#define OTP_INVALID_BOARD_ID 0xFFFF
+#define OTP_VALID_DUALMAC_BOARD_ID_MASK 0x1000
static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
{
@@ -358,23 +361,60 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
{
struct ath12k_base *ab = ab_pci->ab;
struct mhi_controller *mhi_ctrl;
+ unsigned int board_id;
int ret;
+ bool dualmac = false;
mhi_ctrl = mhi_alloc_controller();
if (!mhi_ctrl)
return -ENOMEM;
- ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
- ab_pci->amss_path,
- sizeof(ab_pci->amss_path));
-
ab_pci->mhi_ctrl = mhi_ctrl;
mhi_ctrl->cntrl_dev = ab->dev;
- mhi_ctrl->fw_image = ab_pci->amss_path;
mhi_ctrl->regs = ab->mem;
mhi_ctrl->reg_len = ab->mem_len;
mhi_ctrl->rddm_size = ab->hw_params->rddm_size;
+ if (ab->hw_params->otp_board_id_register) {
+ board_id =
+ ath12k_pci_read32(ab, ab->hw_params->otp_board_id_register);
+ board_id = u32_get_bits(board_id, OTP_BOARD_ID_MASK);
+
+ if (!board_id || (board_id == OTP_INVALID_BOARD_ID)) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to read board id\n");
+ } else if (board_id & OTP_VALID_DUALMAC_BOARD_ID_MASK) {
+ dualmac = true;
+ ab->slo_capable = false;
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "dualmac fw selected for board id: %x\n", board_id);
+ }
+ }
+
+ if (dualmac) {
+ if (ab->fw.amss_dualmac_data && ab->fw.amss_dualmac_len > 0) {
+ /* use MHI firmware file from firmware-N.bin */
+ mhi_ctrl->fw_data = ab->fw.amss_dualmac_data;
+ mhi_ctrl->fw_sz = ab->fw.amss_dualmac_len;
+ } else {
+ ath12k_warn(ab, "dualmac firmware IE not present in firmware-N.bin\n");
+ ret = -ENOENT;
+ goto free_controller;
+ }
+ } else {
+ if (ab->fw.amss_data && ab->fw.amss_len > 0) {
+ /* use MHI firmware file from firmware-N.bin */
+ mhi_ctrl->fw_data = ab->fw.amss_data;
+ mhi_ctrl->fw_sz = ab->fw.amss_len;
+ } else {
+ /* use the old separate mhi.bin MHI firmware file */
+ ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
+ ab_pci->amss_path,
+ sizeof(ab_pci->amss_path));
+ mhi_ctrl->fw_image = ab_pci->amss_path;
+ }
+ }
+
ret = ath12k_mhi_get_msi(ab_pci);
if (ret) {
ath12k_err(ab, "failed to get msi for mhi\n");
diff --git a/drivers/net/wireless/ath/ath12k/p2p.c b/drivers/net/wireless/ath/ath12k/p2p.c
new file mode 100644
index 000000000000..d334df720032
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/p2p.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <net/mac80211.h>
+#include "core.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath12k_p2p_noa_ie_fill(u8 *data, size_t len,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 ctwindow = le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_CTWIN_TU);
+ bool oppps = le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS);
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ u8 noa_descriptors = le32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_DESC_NUM);
+ int i;
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = le32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_INDEX);
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count =
+ __le32_to_cpu(noa->descriptors[i].type_count);
+ noa_attr->desc[i].duration = noa->descriptors[i].duration;
+ noa_attr->desc[i].interval = noa->descriptors[i].interval;
+ noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t ath12k_p2p_noa_ie_len_compute(const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ size_t len = 0;
+
+ if (!(le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM)) &&
+ !(le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS)))
+ return 0;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM) *
+ sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath12k_p2p_noa_ie_assign(struct ath12k_vif *arvif, void *ie,
+ size_t len)
+{
+ struct ath12k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ kfree(arvif->u.ap.noa_data);
+
+ arvif->u.ap.noa_data = ie;
+ arvif->u.ap.noa_len = len;
+}
+
+static void __ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ath12k *ar = arvif->ar;
+ void *ie;
+ size_t len;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath12k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+ len = ath12k_p2p_noa_ie_len_compute(noa);
+ if (!len)
+ return;
+
+ ie = kmalloc(len, GFP_ATOMIC);
+ if (!ie)
+ return;
+
+ ath12k_p2p_noa_ie_fill(ie, len, noa);
+ ath12k_p2p_noa_ie_assign(arvif, ie, len);
+}
+
+void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ath12k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ __ath12k_p2p_noa_update(arvif, noa);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_p2p_noa_arg *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath12k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ath12k_p2p_noa_arg arg = {
+ .vdev_id = vdev_id,
+ .noa = noa,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_p2p_noa_update_vdev_iter,
+ &arg);
+}
diff --git a/drivers/net/wireless/ath/ath12k/p2p.h b/drivers/net/wireless/ath/ath12k/p2p.h
new file mode 100644
index 000000000000..5768139a7844
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/p2p.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved..
+ */
+
+#ifndef ATH12K_P2P_H
+#define ATH12K_P2P_H
+
+#include "wmi.h"
+
+struct ath12k_wmi_p2p_noa_info;
+
+struct ath12k_p2p_noa_arg {
+ u32 vdev_id;
+ const struct ath12k_wmi_p2p_noa_info *noa;
+};
+
+void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+ const struct ath12k_wmi_p2p_noa_info *noa);
+void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
+ const struct ath12k_wmi_p2p_noa_info *noa);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index f0d2e2d8719c..14954bc05144 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -39,6 +39,10 @@
#define QCN9274_DEVICE_ID 0x1109
#define WCN7850_DEVICE_ID 0x1107
+#define PCIE_LOCAL_REG_QRTR_NODE_ID 0x1E03164
+#define DOMAIN_NUMBER_MASK GENMASK(7, 4)
+#define BUS_NUMBER_MASK GENMASK(3, 0)
+
static const struct pci_device_id ath12k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
@@ -201,18 +205,17 @@ static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
/* If offset lies within CE register range, use 2nd window */
else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
window_start = 2 * WINDOW_START;
- /* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE
- * use 0th window
- */
- else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) &&
- !((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK))
- window_start = 0;
else
window_start = WINDOW_START;
return window_start;
}
+static inline bool ath12k_pci_is_offset_within_mhi_region(u32 offset)
+{
+ return (offset >= PCI_MHIREGLEN_REG && offset <= PCI_MHI_REGION_END);
+}
+
static void ath12k_pci_soc_global_reset(struct ath12k_base *ab)
{
u32 val, delay;
@@ -682,12 +685,22 @@ static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
{
struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ struct pci_bus *bus = ab_pci->pdev->bus;
+
cfg->tgt_ce = ab->hw_params->target_ce_config;
cfg->tgt_ce_len = ab->hw_params->target_ce_count;
cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
+
+ if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) {
+ ab_pci->qmi_instance =
+ u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) |
+ u32_encode_bits(bus->number, BUS_NUMBER_MASK);
+ ab->qmi.service_ins_id += ab_pci->qmi_instance;
+ }
}
static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
@@ -901,6 +914,26 @@ static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
}
+static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ u32 reg;
+
+ /* On platforms with two or more identical mhi devices, qmi service run
+ * with identical qrtr-node-id. Because of this identical ID qrtr-lookup
+ * cannot register more than one qmi service with identical node ID.
+ *
+ * This generates a unique instance ID from PCIe domain number and bus number,
+ * writes to the given register, it is available for firmware when the QMI service
+ * is spawned.
+ */
+ reg = PCIE_LOCAL_REG_QRTR_NODE_ID & WINDOW_RANGE_MASK;
+ ath12k_pci_write32(ab, reg, ab_pci->qmi_instance);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n",
+ reg, ab_pci->qmi_instance, ath12k_pci_read32(ab, reg));
+}
+
static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
{
if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
@@ -1138,15 +1171,17 @@ u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath12k_pci_select_window(ab_pci, offset);
- val = ioread32(ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
+
+ if (ath12k_pci_is_offset_within_mhi_region(offset)) {
+ offset = offset - PCI_MHIREGLEN_REG;
+ val = ioread32(ab->mem +
+ (offset & WINDOW_RANGE_MASK));
+ } else {
+ val = ioread32(ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
spin_unlock_bh(&ab_pci->window_lock);
} else {
- if ((!window_start) &&
- (offset >= PCI_MHIREGLEN_REG &&
- offset <= PCI_MHI_REGION_END))
- offset = offset - PCI_MHIREGLEN_REG;
-
val = ioread32(ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
@@ -1183,15 +1218,17 @@ void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath12k_pci_select_window(ab_pci, offset);
- iowrite32(value, ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
+
+ if (ath12k_pci_is_offset_within_mhi_region(offset)) {
+ offset = offset - PCI_MHIREGLEN_REG;
+ iowrite32(value, ab->mem +
+ (offset & WINDOW_RANGE_MASK));
+ } else {
+ iowrite32(value, ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
spin_unlock_bh(&ab_pci->window_lock);
} else {
- if ((!window_start) &&
- (offset >= PCI_MHIREGLEN_REG &&
- offset <= PCI_MHI_REGION_END))
- offset = offset - PCI_MHIREGLEN_REG;
-
iowrite32(value, ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
@@ -1219,6 +1256,9 @@ int ath12k_pci_power_up(struct ath12k_base *ab)
ath12k_pci_msi_enable(ab_pci);
+ if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features))
+ ath12k_pci_update_qrtr_node_id(ab);
+
ret = ath12k_mhi_start(ab_pci);
if (ret) {
ath12k_err(ab, "failed to start mhi: %d\n", ret);
@@ -1310,11 +1350,21 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
goto err_free_core;
}
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ ab->id.vendor = pdev->vendor;
+ ab->id.device = pdev->device;
+ ab->id.subsystem_vendor = pdev->subsystem_vendor;
+ ab->id.subsystem_device = pdev->subsystem_device;
+
switch (pci_dev->device) {
case QCN9274_DEVICE_ID:
ab_pci->msi_config = &ath12k_msi_config[0];
ab->static_window_map = true;
ab_pci->pci_ops = &ath12k_pci_ops_qcn9274;
+ ab->hal_rx_ops = &hal_rx_qcn9274_ops;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
@@ -1333,9 +1383,11 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
}
break;
case WCN7850_DEVICE_ID:
+ ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD;
ab_pci->msi_config = &ath12k_msi_config[0];
ab->static_window_map = false;
ab_pci->pci_ops = &ath12k_pci_ops_wcn7850;
+ ab->hal_rx_ops = &hal_rx_wcn7850_ops;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index b2edf32ada20..ca93693ba4e9 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_PCI_H
#define ATH12K_PCI_H
@@ -53,6 +53,9 @@
#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
+#define QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB 0x1E20338
+#define OTP_BOARD_ID_MASK GENMASK(15, 0)
+
#define PCI_BAR_WINDOW0_BASE 0x1E00000
#define PCI_BAR_WINDOW0_END 0x1E7FFFC
#define PCI_SOC_RANGE_MASK 0x3FFF
@@ -111,6 +114,7 @@ struct ath12k_pci {
u16 link_ctl;
unsigned long irq_flags;
const struct ath12k_pci_ops *pci_ops;
+ u32 qmi_instance;
};
static inline struct ath12k_pci *ath12k_pci_priv(struct ath12k_base *ab)
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 77a132f6bbd1..92845ffff44a 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@@ -17,7 +17,7 @@
#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
#define ATH12K_QMI_MAX_CHUNK_SIZE 2097152
-static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -61,7 +61,7 @@ static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -511,7 +511,7 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -528,7 +528,68 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_phy_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_phy_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ num_phy_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ num_phy),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ board_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -753,7 +814,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -789,7 +850,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -821,7 +882,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -863,7 +924,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@@ -890,7 +951,7 @@ static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -930,7 +991,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@@ -957,7 +1018,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -975,7 +1036,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
@@ -983,7 +1044,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1009,7 +1070,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1026,7 +1087,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1042,7 +1103,7 @@ static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -1068,7 +1129,7 @@ static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1094,7 +1155,7 @@ static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1348,7 +1409,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -1483,7 +1544,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1501,7 +1562,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -1525,7 +1586,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1542,7 +1603,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1595,7 +1656,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1630,7 +1691,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
@@ -1654,7 +1715,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1671,7 +1732,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1706,7 +1767,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1724,7 +1785,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -1862,7 +1923,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1879,22 +1940,78 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
-static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
-static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *req)
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enable_fwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enable_fwlog),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
+ struct qmi_wlanfw_host_cap_req_msg_v01 *req)
{
+ struct wlfw_host_mlo_chip_info_s_v01 *info;
+ u8 hw_link_id = 0;
+ int i;
+
+ if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "skip QMI MLO cap due to invalid num_radio %d\n",
+ ab->qmi.num_radios);
+ return;
+ }
+
req->mlo_capable_valid = 1;
req->mlo_capable = 1;
req->mlo_chip_id_valid = 1;
@@ -1905,28 +2022,31 @@ static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *re
/* Max peer number generally won't change for the same device
* but needs to be synced with host driver.
*/
- req->max_mlo_peer = 32;
+ req->max_mlo_peer = ab->hw_params->max_mlo_peer;
req->mlo_num_chips_valid = 1;
req->mlo_num_chips = 1;
+
+ info = &req->mlo_chip_info[0];
+ info->chip_id = 0;
+ info->num_local_links = ab->qmi.num_radios;
+
+ for (i = 0; i < info->num_local_links; i++) {
+ info->hw_link_id[i] = hw_link_id;
+ info->valid_mlo_link_id[i] = 1;
+
+ hw_link_id++;
+ }
+
req->mlo_chip_info_valid = 1;
- req->mlo_chip_info[0].chip_id = 0;
- req->mlo_chip_info[0].num_local_links = 2;
- req->mlo_chip_info[0].hw_link_id[0] = 0;
- req->mlo_chip_info[0].hw_link_id[1] = 1;
- req->mlo_chip_info[0].valid_mlo_link_id[0] = 1;
- req->mlo_chip_info[0].valid_mlo_link_id[1] = 1;
}
static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
{
- struct qmi_wlanfw_host_cap_req_msg_v01 req;
- struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_host_cap_req_msg_v01 req = {};
+ struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
req.num_clients_valid = 1;
req.num_clients = 1;
req.mem_cfg_mode = ab->qmi.target_mem_mode;
@@ -1963,10 +2083,10 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
*/
req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
-
- ath12k_host_cap_parse_mlo(&req);
}
+ ath12k_host_cap_parse_mlo(ab, &req);
+
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -1977,6 +2097,7 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "Failed to send host capability request,err = %d\n", ret);
goto out;
}
@@ -1996,6 +2117,62 @@ out:
return ret;
}
+static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_phy_cap_req_msg_v01 req = {};
+ struct qmi_wlanfw_phy_cap_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
+ int ret;
+
+ if (!ab->slo_capable)
+ goto out;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_phy_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_PHY_CAP_REQ_V01,
+ QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_phy_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "failed to send phy capability request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!resp.num_phy_valid) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ ab->qmi.num_radios = resp.num_phy;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "phy capability resp valid %d num_phy %d valid %d board_id %d\n",
+ resp.num_phy_valid, resp.num_phy,
+ resp.board_id_valid, resp.board_id);
+
+ return;
+
+out:
+ /* If PHY capability not advertised then rely on default num link */
+ ab->qmi.num_radios = ab->hw_params->def_num_link;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "no valid response from PHY capability, choose default num_phy %d\n",
+ ab->qmi.num_radios);
+}
+
static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_ind_register_req_msg_v01 *req;
@@ -2040,6 +2217,7 @@ static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_ind_register_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "Failed to send indication register request, err = %d\n",
ret);
goto out;
@@ -2068,8 +2246,8 @@ resp_out:
static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
{
struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
- struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0, i;
bool delayed;
@@ -2077,8 +2255,6 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
if (!req)
return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
-
/* Some targets by default request a block of big contiguous
* DMA memory, it's hard to allocate from kernel. So host returns
* failure to firmware and firmware then request multiple blocks of
@@ -2088,7 +2264,6 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
delayed = true;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
- memset(req, 0, sizeof(*req));
} else {
delayed = false;
req->mem_seg_len = ab->qmi.mem_seg_count;
@@ -2114,6 +2289,7 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n",
ret);
goto out;
@@ -2208,17 +2384,14 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
{
- struct qmi_wlanfw_cap_req_msg_v01 req;
- struct qmi_wlanfw_cap_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_cap_req_msg_v01 req = {};
+ struct qmi_wlanfw_cap_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
unsigned int board_id = ATH12K_BOARD_ID_DEFAULT;
int ret = 0;
int r;
int i;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -2229,6 +2402,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_cap_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send target cap request, err = %d\n",
ret);
goto out;
@@ -2310,8 +2484,8 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
const u8 *data, u32 len, u8 type)
{
struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
- struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
const u8 *temp = data;
int ret;
u32 remaining = len;
@@ -2319,7 +2493,6 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
while (remaining) {
req->valid = 1;
@@ -2423,8 +2596,7 @@ static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
break;
case ATH12K_QMI_BDF_TYPE_REGDB:
- ret = ath12k_core_fetch_board_data_api_1(ab, &bd,
- ATH12K_REGDB_FILE_NAME);
+ ret = ath12k_core_fetch_regdb(ab, &bd);
if (ret) {
ath12k_warn(ab, "qmi failed to load regdb bin:\n");
goto out;
@@ -2497,37 +2669,56 @@ out:
static int ath12k_qmi_m3_load(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
- const struct firmware *fw;
+ const struct firmware *fw = NULL;
+ const void *m3_data;
char path[100];
+ size_t m3_len;
int ret;
- if (m3_mem->vaddr || m3_mem->size)
+ if (m3_mem->vaddr)
+ /* m3 firmware buffer is already available in the DMA buffer */
return 0;
- fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
- if (IS_ERR(fw)) {
- ret = PTR_ERR(fw);
- ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
- path, sizeof(path));
- ath12k_err(ab, "failed to load %s: %d\n", path, ret);
- return ret;
+ if (ab->fw.m3_data && ab->fw.m3_len > 0) {
+ /* firmware-N.bin had a m3 firmware file so use that */
+ m3_data = ab->fw.m3_data;
+ m3_len = ab->fw.m3_len;
+ } else {
+ /* No m3 file in firmware-N.bin so try to request old
+ * separate m3.bin.
+ */
+ fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
+ if (IS_ERR(fw)) {
+ ret = PTR_ERR(fw);
+ ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
+ path, sizeof(path));
+ ath12k_err(ab, "failed to load %s: %d\n", path, ret);
+ return ret;
+ }
+
+ m3_data = fw->data;
+ m3_len = fw->size;
}
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
- fw->size, &m3_mem->paddr,
+ m3_len, &m3_mem->paddr,
GFP_KERNEL);
if (!m3_mem->vaddr) {
ath12k_err(ab, "failed to allocate memory for M3 with size %zu\n",
fw->size);
- release_firmware(fw);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
- memcpy(m3_mem->vaddr, fw->data, fw->size);
- m3_mem->size = fw->size;
+ memcpy(m3_mem->vaddr, m3_data, m3_len);
+ m3_mem->size = m3_len;
+
+ ret = 0;
+
+out:
release_firmware(fw);
- return 0;
+ return ret;
}
static void ath12k_qmi_m3_free(struct ath12k_base *ab)
@@ -2546,14 +2737,11 @@ static void ath12k_qmi_m3_free(struct ath12k_base *ab)
static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
- struct qmi_wlanfw_m3_info_req_msg_v01 req;
- struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_m3_info_req_msg_v01 req = {};
+ struct qmi_wlanfw_m3_info_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
ret = ath12k_qmi_m3_load(ab);
if (ret) {
ath12k_err(ab, "failed to load m3 firmware: %d", ret);
@@ -2573,6 +2761,7 @@ static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send M3 information request, err = %d\n",
ret);
goto out;
@@ -2597,14 +2786,11 @@ out:
static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
u32 mode)
{
- struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
- struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_wlan_mode_req_msg_v01 req = {};
+ struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
req.mode = mode;
req.hw_debug_valid = 1;
req.hw_debug = 0;
@@ -2619,6 +2805,7 @@ static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n",
mode, ret);
goto out;
@@ -2649,10 +2836,10 @@ out:
static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
- struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
+ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp = {};
struct ce_pipe_config *ce_cfg;
struct service_to_pipe *svc_cfg;
- struct qmi_txn txn = {};
+ struct qmi_txn txn;
int ret = 0, pipe_num;
ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
@@ -2662,8 +2849,6 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
if (!req)
return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
-
req->host_version_valid = 1;
strscpy(req->host_version, ATH12K_HOST_VERSION_STRING,
sizeof(req->host_version));
@@ -2710,6 +2895,7 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send wlan config request, err = %d\n",
ret);
goto out;
@@ -2733,6 +2919,49 @@ out:
return ret;
}
+static int ath12k_qmi_wlanfw_wlan_ini_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp = {};
+ struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {};
+ struct qmi_txn txn;
+ int ret;
+
+ req.enable_fwlog_valid = true;
+ req.enable_fwlog = 1;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_ini_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ ATH12K_QMI_WLANFW_WLAN_INI_REQ_V01,
+ QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "failed to send QMI wlan ini request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to receive QMI wlan ini request: %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "QMI wlan ini response failure: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
{
int ret;
@@ -2749,6 +2978,12 @@ int ath12k_qmi_firmware_start(struct ath12k_base *ab,
{
int ret;
+ ret = ath12k_qmi_wlanfw_wlan_ini_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send wlan fw ini: %d\n", ret);
+ return ret;
+ }
+
ret = ath12k_qmi_wlanfw_wlan_cfg_send(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret);
@@ -2792,6 +3027,8 @@ static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
struct ath12k_base *ab = qmi->ab;
int ret;
+ ath12k_qmi_phy_cap_send(ab);
+
ret = ath12k_qmi_fw_ind_register_send(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret);
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index e25bbaa125e8..6ee33c9851c6 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_QMI_H
@@ -15,7 +15,6 @@
#define ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE 64
#define ATH12K_QMI_CALDB_ADDRESS 0x4BA00000
#define ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
-#define ATH12K_QMI_WLFW_NODE_ID_BASE 0x07
#define ATH12K_QMI_WLFW_SERVICE_ID_V01 0x45
#define ATH12K_QMI_WLFW_SERVICE_VERS_V01 0x01
#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
@@ -141,6 +140,7 @@ struct ath12k_qmi {
u32 target_mem_mode;
bool target_mem_delayed;
u8 cal_done;
+ u8 num_radios;
struct target_info target;
struct m3_mem_region m3_mem;
unsigned int service_ins_id;
@@ -251,6 +251,22 @@ struct qmi_wlanfw_host_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+#define QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN 0
+#define QMI_WLANFW_PHY_CAP_REQ_V01 0x0057
+#define QMI_WLANFW_PHY_CAP_RESP_MSG_V01_MAX_LEN 18
+#define QMI_WLANFW_PHY_CAP_RESP_V01 0x0057
+
+struct qmi_wlanfw_phy_cap_req_msg_v01 {
+};
+
+struct qmi_wlanfw_phy_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 num_phy_valid;
+ u8 num_phy;
+ u8 board_id_valid;
+ u32 board_id;
+};
+
#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
#define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020
#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18
@@ -559,6 +575,21 @@ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+#define ATH12K_QMI_WLANFW_WLAN_INI_REQ_V01 0x002F
+#define ATH12K_QMI_WLANFW_WLAN_INI_RESP_V01 0x002F
+#define QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_INI_RESP_MSG_V01_MAX_LEN 7
+
+struct qmi_wlanfw_wlan_ini_req_msg_v01 {
+ /* Must be set to true if enable_fwlog is being passed */
+ u8 enable_fwlog_valid;
+ u8 enable_fwlog;
+};
+
+struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
int ath12k_qmi_firmware_start(struct ath12k_base *ab,
u32 mode);
void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index f924bc13ccff..f308e9a6ed55 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
#include "core.h"
@@ -48,7 +48,8 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath12k_wmi_init_country_arg arg;
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
int ret;
ath12k_dbg(ar->ab, ATH12K_DBG_REG,
@@ -95,7 +96,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
struct ieee80211_supported_band **bands;
struct ath12k_wmi_scan_chan_list_arg *arg;
struct ieee80211_channel *channel;
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ath12k_wmi_channel_arg *ch;
enum nl80211_band band;
int num_channels = 0;
@@ -103,7 +104,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!bands[band])
+ if (!(ar->mac.sbands[band].channels && bands[band]))
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
@@ -129,7 +130,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
ch = arg->channel;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!bands[band])
+ if (!(ar->mac.sbands[band].channels && bands[band]))
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
@@ -199,7 +200,7 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
int ath12k_regd_update(struct ath12k *ar, bool init)
{
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h
index 55f20c446ca9..a0db6702a189 100644
--- a/drivers/net/wireless/ath/ath12k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath12k/rx_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_RX_DESC_H
#define ATH12K_RX_DESC_H
@@ -147,6 +147,61 @@ struct rx_mpdu_start_qcn9274 {
__le32 res1;
} __packed;
+#define QCN9274_MPDU_START_SELECT_MPDU_START_TAG BIT(0)
+#define QCN9274_MPDU_START_SELECT_INFO0_REO_QUEUE_DESC_LO BIT(1)
+#define QCN9274_MPDU_START_SELECT_INFO1_PN_31_0 BIT(2)
+#define QCN9274_MPDU_START_SELECT_PN_95_32 BIT(3)
+#define QCN9274_MPDU_START_SELECT_PN_127_96_INFO2 BIT(4)
+#define QCN9274_MPDU_START_SELECT_PEER_MDATA_INFO3_PHY_PPDU_ID BIT(5)
+#define QCN9274_MPDU_START_SELECT_AST_IDX_SW_PEER_ID_INFO4 BIT(6)
+#define QCN9274_MPDU_START_SELECT_INFO5_INFO6 BIT(7)
+#define QCN9274_MPDU_START_SELECT_FRAME_CTRL_DURATION_ADDR1_31_0 BIT(8)
+#define QCN9274_MPDU_START_SELECT_ADDR2_47_0_ADDR1_47_32 BIT(9)
+#define QCN9274_MPDU_START_SELECT_ADDR3_47_0_SEQ_CTRL BIT(10)
+#define QCN9274_MPDU_START_SELECT_ADDR4_47_0_QOS_CTRL BIT(11)
+#define QCN9274_MPDU_START_SELECT_HT_CTRL_INFO7 BIT(12)
+#define QCN9274_MPDU_START_SELECT_ML_ADDR1_47_0_ML_ADDR2_15_0 BIT(13)
+#define QCN9274_MPDU_START_SELECT_ML_ADDR2_47_16_INFO8 BIT(14)
+#define QCN9274_MPDU_START_SELECT_RES_0_RES_1 BIT(15)
+
+#define QCN9274_MPDU_START_WMASK (QCN9274_MPDU_START_SELECT_INFO1_PN_31_0 | \
+ QCN9274_MPDU_START_SELECT_PN_95_32 | \
+ QCN9274_MPDU_START_SELECT_PN_127_96_INFO2 | \
+ QCN9274_MPDU_START_SELECT_PEER_MDATA_INFO3_PHY_PPDU_ID | \
+ QCN9274_MPDU_START_SELECT_AST_IDX_SW_PEER_ID_INFO4 | \
+ QCN9274_MPDU_START_SELECT_INFO5_INFO6 | \
+ QCN9274_MPDU_START_SELECT_FRAME_CTRL_DURATION_ADDR1_31_0 | \
+ QCN9274_MPDU_START_SELECT_ADDR2_47_0_ADDR1_47_32 | \
+ QCN9274_MPDU_START_SELECT_ADDR3_47_0_SEQ_CTRL | \
+ QCN9274_MPDU_START_SELECT_ADDR4_47_0_QOS_CTRL)
+
+/* The below rx_mpdu_start_qcn9274_compact structure is tied with the mask
+ * value QCN9274_MPDU_START_WMASK. If the mask value changes the structure
+ * will also change.
+ */
+
+struct rx_mpdu_start_qcn9274_compact {
+ __le32 info1;
+ __le32 pn[4];
+ __le32 info2;
+ __le32 peer_meta_data;
+ __le16 info3;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+} __packed;
+
/* rx_mpdu_start
*
* reo_destination_indication
@@ -608,6 +663,8 @@ enum rx_msdu_start_reception_type {
RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
};
+#define RX_MSDU_END_64_TLV_SRC_LINK_ID GENMASK(24, 22)
+
#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
@@ -786,6 +843,52 @@ struct rx_msdu_end_qcn9274 {
__le32 info14;
} __packed;
+#define QCN9274_MSDU_END_SELECT_MSDU_END_TAG BIT(0)
+#define QCN9274_MSDU_END_SELECT_INFO0_PHY_PPDUID_IP_HDR_CSUM_INFO1 BIT(1)
+#define QCN9274_MSDU_END_SELECT_INFO2_CUMULATIVE_CSUM_RULE_IND_0 BIT(2)
+#define QCN9274_MSDU_END_SELECT_IPV6_OP_CRC_INFO3_TYPE13 BIT(3)
+#define QCN9274_MSDU_END_SELECT_RULE_IND_1_TCP_SEQ_NUM BIT(4)
+#define QCN9274_MSDU_END_SELECT_TCP_ACK_NUM_INFO4_WINDOW_SIZE BIT(5)
+#define QCN9274_MSDU_END_SELECT_SA_SW_PER_ID_INFO5_SA_DA_ID BIT(6)
+#define QCN9274_MSDU_END_SELECT_INFO6_FSE_METADATA BIT(7)
+#define QCN9274_MSDU_END_SELECT_CCE_MDATA_TCP_UDP_CSUM_INFO7_IP_LEN BIT(8)
+#define QCN9274_MSDU_END_SELECT_INFO8_INFO9 BIT(9)
+#define QCN9274_MSDU_END_SELECT_INFO10_INFO11 BIT(10)
+#define QCN9274_MSDU_END_SELECT_VLAN_CTAG_STAG_CI_PEER_MDATA BIT(11)
+#define QCN9274_MSDU_END_SELECT_INFO12_AND_FLOW_ID_TOEPLITZ BIT(12)
+#define QCN9274_MSDU_END_SELECT_PPDU_START_TS_63_32_PHY_MDATA BIT(13)
+#define QCN9274_MSDU_END_SELECT_PPDU_START_TS_31_0_TOEPLITZ_HASH_2_4 BIT(14)
+#define QCN9274_MSDU_END_SELECT_RES0_SA_47_0 BIT(15)
+#define QCN9274_MSDU_END_SELECT_INFO13_INFO14 BIT(16)
+
+#define QCN9274_MSDU_END_WMASK (QCN9274_MSDU_END_SELECT_MSDU_END_TAG | \
+ QCN9274_MSDU_END_SELECT_SA_SW_PER_ID_INFO5_SA_DA_ID | \
+ QCN9274_MSDU_END_SELECT_INFO10_INFO11 | \
+ QCN9274_MSDU_END_SELECT_INFO12_AND_FLOW_ID_TOEPLITZ | \
+ QCN9274_MSDU_END_SELECT_PPDU_START_TS_63_32_PHY_MDATA | \
+ QCN9274_MSDU_END_SELECT_INFO13_INFO14)
+
+/* The below rx_msdu_end_qcn9274_compact structure is tied with the mask value
+ * QCN9274_MSDU_END_WMASK. If the mask value changes the structure will also
+ * change.
+ */
+
+struct rx_msdu_end_qcn9274_compact {
+ __le64 msdu_end_tag;
+ __le16 sa_sw_peer_id;
+ __le16 info5;
+ __le16 sa_idx;
+ __le16 da_idx_or_sw_peer_id;
+ __le32 info10;
+ __le32 info11;
+ __le32 info12;
+ __le32 flow_id_toeplitz;
+ __le32 ppdu_start_timestamp_63_32;
+ __le32 phy_meta_data;
+ __le32 info13;
+ __le32 info14;
+} __packed;
+
/* These macro definitions are only used for WCN7850 */
#define RX_MSDU_END_WCN7850_INFO2_KEY_ID BIT(7, 0)
@@ -1450,16 +1553,18 @@ struct rx_msdu_end_wcn7850 {
*
*/
-/* TODO: Move to compact TLV approach
- * By default these tlv's are not aligned to 128b boundary
- * Need to remove unused qwords and make them compact/aligned
- */
struct hal_rx_desc_qcn9274 {
struct rx_msdu_end_qcn9274 msdu_end;
struct rx_mpdu_start_qcn9274 mpdu_start;
u8 msdu_payload[];
} __packed;
+struct hal_rx_desc_qcn9274_compact {
+ struct rx_msdu_end_qcn9274_compact msdu_end;
+ struct rx_mpdu_start_qcn9274_compact mpdu_start;
+ u8 msdu_payload[];
+} __packed;
+
#define RX_BE_PADDING0_BYTES 8
#define RX_BE_PADDING1_BYTES 8
@@ -1484,6 +1589,7 @@ struct hal_rx_desc_wcn7850 {
struct hal_rx_desc {
union {
struct hal_rx_desc_qcn9274 qcn9274;
+ struct hal_rx_desc_qcn9274_compact qcn9274_compact;
struct hal_rx_desc_wcn7850 wcn7850;
} u;
} __packed;
diff --git a/drivers/net/wireless/ath/ath12k/trace.h b/drivers/net/wireless/ath/ath12k/trace.h
index f72096684b74..240737e1542d 100644
--- a/drivers/net/wireless/ath/ath12k/trace.h
+++ b/drivers/net/wireless/ath/ath12k/trace.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
@@ -140,6 +140,33 @@ TRACE_EVENT(ath12k_htt_rxdesc,
)
);
+TRACE_EVENT(ath12k_wmi_diag,
+ TP_PROTO(struct ath12k_base *ab, const void *data, size_t len),
+
+ TP_ARGS(ab, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s tlv diag len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 11cc3005c0f9..9d69a1769926 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -19,6 +19,7 @@
#include "mac.h"
#include "hw.h"
#include "peer.h"
+#include "p2p.h"
struct ath12k_wmi_svc_ready_parse {
bool wmi_svc_bitmap_done;
@@ -162,6 +163,14 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
+ [WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_enable_event) },
+ [WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_disable_event) },
+ [WMI_TAG_P2P_NOA_INFO] = {
+ .min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
+ [WMI_TAG_P2P_NOA_EVENT] = {
+ .min_len = sizeof(struct wmi_p2p_noa_event) },
};
static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
@@ -179,18 +188,9 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config)
{
config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
-
- if (ab->num_radios == 2) {
- config->num_peers = TARGET_NUM_PEERS(DBS);
- config->num_tids = TARGET_NUM_TIDS(DBS);
- } else if (ab->num_radios == 3) {
- config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
- config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
- } else {
- /* Control should not reach here */
- config->num_peers = TARGET_NUM_PEERS(SINGLE);
- config->num_tids = TARGET_NUM_TIDS(SINGLE);
- }
+ config->num_peers = ab->num_radios *
+ ath12k_core_get_max_peers_per_radio(ab);
+ config->num_tids = ath12k_core_get_max_num_tids(ab);
config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
config->num_peer_keys = TARGET_NUM_PEER_KEYS;
@@ -228,6 +228,9 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
config->peer_map_unmap_version = 0x32;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
+ config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B;
}
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -359,8 +362,8 @@ static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
}
static const void **
-ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
- size_t len, gfp_t gfp)
+ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
+ struct sk_buff *skb, gfp_t gfp)
{
const void **tb;
int ret;
@@ -369,7 +372,7 @@ ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
if (!tb)
return ERR_PTR(-ENOMEM);
- ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len);
+ ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
if (ret) {
kfree(tb);
return ERR_PTR(ret);
@@ -493,13 +496,13 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
mac_caps = wmi_mac_phy_caps + phy_idx;
- pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
+ pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
- fw_pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
+ fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
ab->fw_pdev_count++;
@@ -727,6 +730,20 @@ static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *sk
return 0;
}
+static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
+ struct ieee80211_tx_info *info)
+{
+ struct ath12k_base *ab = ar->ab;
+ u32 freq = 0;
+
+ if (ab->hw_params->single_pdev_only &&
+ ar->scan.is_roc &&
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ return freq;
+}
+
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
{
struct sk_buff *skb;
@@ -752,6 +769,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_mgmt_send_cmd *cmd;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
struct wmi_tlv *frame_tlv;
struct sk_buff *skb;
u32 buf_len;
@@ -770,7 +788,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->desc_id = cpu_to_le32(buf_id);
- cmd->chanfreq = 0;
+ cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->frame_len = cpu_to_le32(frame->len);
@@ -826,6 +844,9 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+ if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
+ cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
+
ptr = skb->data + sizeof(*cmd);
len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
@@ -1024,6 +1045,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
cmd->regdomain = cpu_to_le32(arg->regdomain);
cmd->he_ops = cpu_to_le32(arg->he_ops);
cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
+ cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
if (!restart) {
if (arg->ssid) {
@@ -1051,7 +1073,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
/* Note: This is a nested TLV containing:
- * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
*/
ptr += sizeof(*tlv);
@@ -1710,6 +1732,48 @@ int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
return ret;
}
+int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
+ const u8 *p2p_ie)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
+ size_t p2p_ie_len, aligned_len;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+
+ p2p_ie_len = p2p_ie[1] + 2;
+ aligned_len = roundup(p2p_ie_len, sizeof(u32));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
+
+ ptr += sizeof(*cmd);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
+ aligned_len);
+ memcpy(tlv->value, p2p_ie, p2p_ie_len);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn)
@@ -2130,7 +2194,7 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar,
WMI_SCAN_EVENT_BSS_CHANNEL |
WMI_SCAN_EVENT_FOREIGN_CHAN |
WMI_SCAN_EVENT_DEQUEUED;
- arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->scan_f_chan_stat_evnt = 1;
arg->num_bssid = 1;
/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
@@ -3265,6 +3329,9 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf
wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
+ wmi_cfg->flags2 = le32_encode_bits(tg_cfg->dp_peer_meta_data_ver,
+ WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
+
wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
}
@@ -4214,7 +4281,7 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
for (i = 0; i < ab->fw_pdev_count; i++) {
struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
- if (fw_pdev->pdev_id == le32_to_cpu(caps->pdev_id) &&
+ if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
bands = fw_pdev->supported_bands;
break;
@@ -4271,7 +4338,8 @@ static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
return 0;
} else {
for (i = 0; i < ab->num_radios; i++) {
- if (ab->pdevs[i].pdev_id == le32_to_cpu(caps->pdev_id))
+ if (ab->pdevs[i].pdev_id ==
+ ath12k_wmi_caps_ext_get_pdev_id(caps))
break;
}
@@ -4374,7 +4442,7 @@ static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buf
const struct wmi_vdev_start_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4452,7 +4520,7 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4738,7 +4806,7 @@ static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *
const struct wmi_peer_delete_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4770,7 +4838,7 @@ static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
const struct wmi_vdev_delete_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4790,15 +4858,15 @@ static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
return 0;
}
-static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf,
- u32 len, u32 *vdev_id,
- u32 *tx_status)
+static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id, u32 *tx_status)
{
const void **tb;
const struct wmi_bcn_tx_status_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4826,7 +4894,7 @@ static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_
const struct wmi_vdev_stopped_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4948,7 +5016,7 @@ static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
info->flags |= IEEE80211_TX_STAT_ACK;
- ieee80211_tx_status_irqsafe(ar->hw, msdu);
+ ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -4970,7 +5038,7 @@ static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
const struct wmi_mgmt_tx_compl_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5006,6 +5074,10 @@ static void ath12k_wmi_event_scan_started(struct ath12k *ar)
break;
case ATH12K_SCAN_STARTING:
ar->scan.state = ATH12K_SCAN_RUNNING;
+
+ if (ar->scan.is_roc)
+ ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
+
complete(&ar->scan.started);
break;
}
@@ -5076,6 +5148,8 @@ static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
{
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
@@ -5087,7 +5161,11 @@ static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
break;
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
- ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
+
+ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+ complete(&ar->scan.on_channel);
+
break;
}
}
@@ -5141,7 +5219,7 @@ static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_scan_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5174,7 +5252,7 @@ static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buf
const struct wmi_peer_sta_kickout_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5201,7 +5279,7 @@ static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_roam_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5226,13 +5304,14 @@ static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
static int freq_to_idx(struct ath12k *ar, int freq)
{
struct ieee80211_supported_band *sband;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
int band, ch, idx = 0;
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
if (!ar->mac.sbands[band].channels)
continue;
- sband = ar->hw->wiphy->bands[band];
+ sband = hw->wiphy->bands[band];
if (!sband)
continue;
@@ -5245,14 +5324,14 @@ exit:
return idx;
}
-static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf,
- u32 len, struct wmi_chan_info_event *ch_info_ev)
+static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_chan_info_event *ch_info_ev)
{
const void **tb;
const struct wmi_chan_info_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5291,7 +5370,7 @@ ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_pdev_bss_chan_info_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5331,7 +5410,7 @@ ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *sk
const struct wmi_vdev_install_key_compl_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5362,7 +5441,7 @@ static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff
const struct wmi_peer_assoc_conf_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5384,13 +5463,13 @@ static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff
}
static int
-ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf,
- u32 len, const struct wmi_pdev_temperature_event *ev)
+ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ const struct wmi_pdev_temperature_event *ev)
{
const void **tb;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5725,8 +5804,7 @@ static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *s
{
u32 vdev_id, tx_status;
- if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
- &vdev_id, &tx_status) != 0) {
+ if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
ath12k_warn(ab, "failed to extract bcn tx status");
return;
}
@@ -5864,7 +5942,7 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
status->freq, status->band, status->signal,
status->rate_idx);
- ieee80211_rx_ni(ar->hw, skb);
+ ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
exit:
rcu_read_unlock();
@@ -6037,7 +6115,7 @@ static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff
goto exit;
}
- sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
arg.mac_addr, NULL);
if (!sta) {
ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
@@ -6110,7 +6188,7 @@ static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
/* HW channel counters frequency value in hertz */
u32 cc_freq_hz = ab->cc_freq_hz;
- if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
ath12k_warn(ab, "failed to extract chan info event");
return;
}
@@ -6395,7 +6473,7 @@ static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
const struct wmi_pdev_ctl_failsafe_chk_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6446,7 +6524,7 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
}
if (arvif->is_up && arvif->vif->bss_conf.csa_active)
- ieee80211_csa_finish(arvif->vif);
+ ieee80211_csa_finish(arvif->vif, 0);
}
rcu_read_unlock();
}
@@ -6460,7 +6538,7 @@ ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
const u32 *vdev_ids;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6494,7 +6572,7 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
struct ath12k *ar;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6531,7 +6609,7 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
if (ar->dfs_block_radar_events)
ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ath12k_ar_to_hw(ar));
exit:
rcu_read_unlock();
@@ -6546,7 +6624,7 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
struct ath12k *ar;
struct wmi_pdev_temperature_event ev = {0};
- if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
+ if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
ath12k_warn(ab, "failed to extract pdev temperature event");
return;
}
@@ -6573,7 +6651,7 @@ static void ath12k_fils_discovery_event(struct ath12k_base *ab,
const struct wmi_fils_discovery_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab,
@@ -6603,7 +6681,7 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
const struct wmi_probe_resp_tx_status_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab,
@@ -6628,6 +6706,56 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
kfree(tb);
}
+static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_p2p_noa_event *ev;
+ const struct ath12k_wmi_p2p_noa_info *noa;
+ struct ath12k *ar;
+ int ret, vdev_id;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_P2P_NOA_EVENT];
+ noa = tb[WMI_TAG_P2P_NOA_INFO];
+
+ if (!ev || !noa) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi tlv p2p noa vdev_id %i descriptors %u\n",
+ vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
+ vdev_id);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+
+ ret = 0;
+
+unlock:
+ rcu_read_unlock();
+out:
+ kfree(tb);
+ return ret;
+}
+
static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
@@ -6635,7 +6763,7 @@ static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
const void **tb;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6662,6 +6790,70 @@ static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
kfree(tb);
}
+static void
+ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ trace_ath12k_wmi_diag(ab, skb->data, skb->len);
+}
+
+static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_enable_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
+ le32_to_cpu(ev->pdev_id),
+ le32_to_cpu(ev->status));
+
+exit:
+ kfree(tb);
+}
+
+static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_disable_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
+ le32_to_cpu(ev->pdev_id),
+ le32_to_cpu(ev->status));
+
+exit:
+ kfree(tb);
+}
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -6757,11 +6949,18 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_RFKILL_STATE_CHANGE_EVENTID:
ath12k_rfkill_state_change_event(ab, skb);
break;
+ case WMI_TWT_ENABLE_EVENTID:
+ ath12k_wmi_twt_enable_event(ab, skb);
+ break;
+ case WMI_TWT_DISABLE_EVENTID:
+ ath12k_wmi_twt_disable_event(ab, skb);
+ break;
+ case WMI_P2P_NOA_EVENTID:
+ ath12k_wmi_p2p_noa_event(ab, skb);
+ break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
- case WMI_TWT_ENABLE_EVENTID:
- case WMI_TWT_DISABLE_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
ath12k_dbg(ab, ATH12K_DBG_WMI,
"ignoring unsupported event 0x%x\n", id);
@@ -6772,6 +6971,9 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_VDEV_DELETE_RESP_EVENTID:
ath12k_vdev_delete_resp_event(ab, skb);
break;
+ case WMI_DIAG_EVENTID:
+ ath12k_wmi_diag_event(ab, skb);
+ break;
/* TODO: Add remaining events */
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 06e5b9b4049b..103462feb935 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_WMI_H
@@ -168,10 +168,6 @@ struct wmi_tlv {
#define WLAN_SCAN_MAX_HINT_BSSID 10
#define MAX_RNR_BSS 5
-#define WLAN_SCAN_PARAMS_MAX_SSID 16
-#define WLAN_SCAN_PARAMS_MAX_BSSID 4
-#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
-
#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
#define WMI_BA_MODE_BUFFER_SIZE_256 3
@@ -2163,6 +2159,10 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_11BE = 289,
+ WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS = 361,
+
+ WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT = 365,
+
WMI_MAX_EXT2_SERVICE,
};
@@ -2350,6 +2350,7 @@ struct ath12k_wmi_resource_config_arg {
u32 twt_ap_pdev_count;
u32 twt_ap_sta_count;
bool is_reg_cc_ext_event_supported;
+ u8 dp_peer_meta_data_ver;
};
struct ath12k_wmi_init_cmd_arg {
@@ -2402,6 +2403,7 @@ struct wmi_init_cmd {
} __packed;
#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
+#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4)
struct ath12k_wmi_resource_config_params {
__le32 tlv_header;
@@ -2542,9 +2544,17 @@ struct ath12k_wmi_hw_mode_cap_params {
#define WMI_MAX_HECAP_PHY_SIZE (3)
+/* pdev_id is present in lower 16 bits of pdev_and_hw_link_ids in
+ * ath12k_wmi_mac_phy_caps_params & ath12k_wmi_caps_ext_params.
+ *
+ * hw_link_id is present in higher 16 bits of pdev_and_hw_link_ids.
+ */
+#define WMI_CAPS_PARAMS_PDEV_ID GENMASK(15, 0)
+#define WMI_CAPS_PARAMS_HW_LINK_ID GENMASK(31, 16)
+
struct ath12k_wmi_mac_phy_caps_params {
__le32 hw_mode_id;
- __le32 pdev_id;
+ __le32 pdev_and_hw_link_ids;
__le32 phy_id;
__le32 supported_flags;
__le32 supported_bands;
@@ -2636,13 +2646,7 @@ struct wmi_service_ready_ext2_event {
struct ath12k_wmi_caps_ext_params {
__le32 hw_mode_id;
- union {
- struct {
- __le16 pdev_id;
- __le16 hw_link_id;
- } __packed ath12k_wmi_pdev_to_link_map;
- __le32 pdev_id;
- };
+ __le32 pdev_and_hw_link_ids;
__le32 phy_id;
__le32 wireless_modes_ext;
__le32 eht_cap_mac_info_2ghz[WMI_MAX_EHTCAP_MAC_SIZE];
@@ -2716,6 +2720,9 @@ struct wmi_vdev_create_cmd {
struct ath12k_wmi_mac_addr_params vdev_macaddr;
__le32 num_cfg_txrx_streams;
__le32 pdev_id;
+ __le32 mbssid_flags;
+ __le32 mbssid_tx_vdev_id;
+ __le32 vdev_stats_id_valid;
__le32 vdev_stats_id;
} __packed;
@@ -2764,6 +2771,10 @@ struct ath12k_wmi_ssid_params {
#define ATH12K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
+enum wmi_vdev_mbssid_flags {
+ WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP = BIT(0),
+};
+
struct wmi_vdev_start_request_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -2782,7 +2793,7 @@ struct wmi_vdev_start_request_cmd {
__le32 cac_duration_ms;
__le32 regdomain;
__le32 min_data_rate;
- __le32 mbssid_flags;
+ __le32 mbssid_flags; /* uses enum wmi_vdev_mbssid_flags */
__le32 mbssid_tx_vdev_id;
__le32 eht_ops;
__le32 punct_bitmap;
@@ -3146,7 +3157,7 @@ struct ath12k_wmi_element_info_arg {
#define WLAN_SCAN_PARAMS_MAX_SSID 16
#define WLAN_SCAN_PARAMS_MAX_BSSID 4
-#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 512
/* Values lower than this may be refused by some firmware revisions with a scan
* completion with a timedout reason.
@@ -3270,24 +3281,19 @@ struct ath12k_wmi_scan_req_arg {
u32 vdev_id;
u32 pdev_id;
enum wmi_scan_priority scan_priority;
- union {
- struct {
- u32 scan_ev_started:1,
- scan_ev_completed:1,
- scan_ev_bss_chan:1,
- scan_ev_foreign_chan:1,
- scan_ev_dequeued:1,
- scan_ev_preempted:1,
- scan_ev_start_failed:1,
- scan_ev_restarted:1,
- scan_ev_foreign_chn_exit:1,
- scan_ev_invalid:1,
- scan_ev_gpio_timeout:1,
- scan_ev_suspended:1,
- scan_ev_resumed:1;
- };
- u32 scan_events;
- };
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
u32 dwell_time_active;
u32 dwell_time_active_2g;
u32 dwell_time_passive;
@@ -3300,36 +3306,31 @@ struct ath12k_wmi_scan_req_arg {
u32 idle_time;
u32 max_scan_time;
u32 probe_delay;
- union {
- struct {
- u32 scan_f_passive:1,
- scan_f_bcast_probe:1,
- scan_f_cck_rates:1,
- scan_f_ofdm_rates:1,
- scan_f_chan_stat_evnt:1,
- scan_f_filter_prb_req:1,
- scan_f_bypass_dfs_chn:1,
- scan_f_continue_on_err:1,
- scan_f_offchan_mgmt_tx:1,
- scan_f_offchan_data_tx:1,
- scan_f_promisc_mode:1,
- scan_f_capture_phy_err:1,
- scan_f_strict_passive_pch:1,
- scan_f_half_rate:1,
- scan_f_quarter_rate:1,
- scan_f_force_active_dfs_chn:1,
- scan_f_add_tpc_ie_in_probe:1,
- scan_f_add_ds_ie_in_probe:1,
- scan_f_add_spoofed_mac_in_probe:1,
- scan_f_add_rand_seq_in_probe:1,
- scan_f_en_ie_whitelist_in_probe:1,
- scan_f_forced:1,
- scan_f_2ghz:1,
- scan_f_5ghz:1,
- scan_f_80mhz:1;
- };
- u32 scan_flags;
- };
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
u32 burst_duration;
u32 num_chan;
@@ -3489,6 +3490,37 @@ struct wmi_get_pdev_temperature_cmd {
__le32 pdev_id;
} __packed;
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+
+struct wmi_p2p_noa_event {
+ __le32 vdev_id;
+} __packed;
+
+struct ath12k_wmi_p2p_noa_descriptor {
+ __le32 type_count; /* 255: continuous schedule, 0: reserved */
+ __le32 duration; /* Absent period duration in micro seconds */
+ __le32 interval; /* Absent period interval in micro seconds */
+ __le32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+#define WMI_P2P_NOA_INFO_CHANGED_FLAG BIT(0)
+#define WMI_P2P_NOA_INFO_INDEX GENMASK(15, 8)
+#define WMI_P2P_NOA_INFO_OPP_PS BIT(16)
+#define WMI_P2P_NOA_INFO_CTWIN_TU GENMASK(23, 17)
+#define WMI_P2P_NOA_INFO_DESC_NUM GENMASK(31, 24)
+
+struct ath12k_wmi_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ * Bits 7-1 - Reserved
+ * Bits 15-8 - Index (identifies the instance of NOA sub element)
+ * Bit 16 - Opp PS state of the AP
+ * Bits 23-17 - Ctwindow in TUs
+ * Bits 31-24 - Number of NOA descriptors
+ */
+ __le32 noa_attr;
+ struct ath12k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
#define WMI_BEACON_TX_BUFFER_SIZE 512
struct wmi_bcn_tmpl_cmd {
@@ -3503,6 +3535,12 @@ struct wmi_bcn_tmpl_cmd {
__le32 esp_ie_offset;
} __packed;
+struct wmi_p2p_go_set_beacon_ie_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 ie_buf_len;
+} __packed;
+
struct wmi_vdev_install_key_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -4797,6 +4835,16 @@ struct wmi_rfkill_state_change_event {
__le32 radio_state;
} __packed;
+struct wmi_twt_enable_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
+struct wmi_twt_disable_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -4806,6 +4854,8 @@ int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len);
int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame);
+int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
+ const u8 *p2p_ie);
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn);
@@ -4917,4 +4967,30 @@ int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
enum wmi_host_hw_mode_config_type mode);
+static inline u32
+ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids, WMI_CAPS_PARAMS_PDEV_ID);
+}
+
+static inline u32
+ath12k_wmi_caps_ext_get_hw_link_id(const struct ath12k_wmi_caps_ext_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids, WMI_CAPS_PARAMS_HW_LINK_ID);
+}
+
+static inline u32
+ath12k_wmi_mac_phy_get_pdev_id(const struct ath12k_wmi_mac_phy_caps_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids,
+ WMI_CAPS_PARAMS_PDEV_ID);
+}
+
+static inline u32
+ath12k_wmi_mac_phy_get_hw_link_id(const struct ath12k_wmi_mac_phy_caps_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids,
+ WMI_CAPS_PARAMS_HW_LINK_ID);
+}
+
#endif
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index c630343ca4f9..eea4bda77608 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -779,6 +779,10 @@ static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
const struct ieee80211_ops ath5k_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = ath5k_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath5k_start,
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index e37db4af33de..61b2e3f15f0e 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1119,7 +1119,7 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
wiphy_lock(vif->ar->wiphy);
- cfg80211_ch_switch_notify(vif->ndev, &chandef, 0, 0);
+ cfg80211_ch_switch_notify(vif->ndev, &chandef, 0);
wiphy_unlock(vif->ar->wiphy);
}
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 9bfaadfa6c00..1a6697b6e3b4 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -144,7 +144,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
return ret;
}
-static int ath_ahb_remove(struct platform_device *pdev)
+static void ath_ahb_remove(struct platform_device *pdev)
{
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
@@ -155,13 +155,11 @@ static int ath_ahb_remove(struct platform_device *pdev)
free_irq(sc->irq, sc);
ieee80211_free_hw(sc->hw);
}
-
- return 0;
}
static struct platform_driver ath_ahb_driver = {
.probe = ath_ahb_probe,
- .remove = ath_ahb_remove,
+ .remove_new = ath_ahb_remove,
.driver = {
.name = "ath9k",
},
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index 988222cea9df..acc84e6711b0 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -643,7 +643,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
} else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
+ antcomb->rssi_lna2) {
/* set to A-B */
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 57e2b4c89125..ad72a30b67c3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -851,8 +851,6 @@
#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN 0x0000000e
#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN_S 1
-#define AR_PHY_POWER_TX_RATE1 0x9934
-#define AR_PHY_POWER_TX_RATE2 0x9938
#define AR_PHY_POWER_TX_RATE_MAX 0x993c
#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
#define PHY_AGC_CLR 0x10000000
@@ -1041,13 +1039,6 @@
#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001
-/*
- * AGC 3 Register Map
- */
-#define AR_AGC3_BASE 0xce00
-
-#define AR_PHY_RSSI_3 (AR_AGC3_BASE + 0x180)
-
/* GLB Registers */
#define AR_GLB_BASE 0x20000
#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index ee72faac2f1d..b399a7926ef5 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -365,10 +365,10 @@ bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
if (!vif || !vif->bss_conf.csa_active)
return false;
- if (!ieee80211_beacon_cntdwn_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, 0))
return false;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
return true;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 237f4ec2cffd..6c33e898b300 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -306,7 +306,6 @@ struct ath9k_htc_tx {
DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
struct timer_list cleanup_timer;
spinlock_t tx_lock;
- bool initialized;
};
struct ath9k_htc_tx_ctl {
@@ -515,6 +514,7 @@ struct ath9k_htc_priv {
unsigned long ps_usecount;
bool ps_enabled;
bool ps_idle;
+ bool initialized;
#ifdef CONFIG_MAC80211_LEDS
enum led_brightness brightness;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 533471e69400..547634f82183 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -514,10 +514,10 @@ bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv)
if (!vif || !vif->bss_conf.csa_active)
return false;
- if (!ieee80211_beacon_cntdwn_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, 0))
return false;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
priv->csa_vif = NULL;
return true;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 0aa5bdeb44a1..3633f9eb2c55 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -966,6 +966,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
htc_handle->drv_priv = priv;
+ /* Allow ath9k_wmi_event_tasklet() to operate. */
+ smp_wmb();
+ priv->initialized = true;
+
return 0;
err_init:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a9b5212051a..b389e19381c4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1868,6 +1868,10 @@ static void ath9k_htc_channel_switch_beacon(struct ieee80211_hw *hw,
}
struct ieee80211_ops ath9k_htc_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = ath9k_htc_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath9k_htc_start,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index efcaeccb055a..ce9c04e418b8 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -815,10 +815,6 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv)
skb_queue_head_init(&priv->tx.data_vo_queue);
skb_queue_head_init(&priv->tx.tx_failed);
- /* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */
- smp_wmb();
- priv->tx.initialized = true;
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c48ff0ffbfef..a2943aaecb20 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2786,6 +2786,10 @@ static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
struct ieee80211_ops ath9k_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = ath9k_tx,
.start = ath9k_start,
.stop = ath9k_stop,
diff --git a/drivers/net/wireless/ath/ath9k/reg_aic.h b/drivers/net/wireless/ath/ath9k/reg_aic.h
index 955147ab48a2..f50994910eae 100644
--- a/drivers/net/wireless/ath/ath9k/reg_aic.h
+++ b/drivers/net/wireless/ath/ath9k/reg_aic.h
@@ -17,10 +17,6 @@
#ifndef REG_AIC_H
#define REG_AIC_H
-#define AR_SM_BASE 0xa200
-#define AR_SM1_BASE 0xb200
-#define AR_AGC_BASE 0x9e00
-
#define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0)
#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 1476b42b52a9..805ad31edba2 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -155,6 +155,12 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
}
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ /* Check if ath9k_htc_probe_device() completed. */
+ if (!data_race(priv->initialized)) {
+ kfree_skb(skb);
+ continue;
+ }
+
hdr = (struct wmi_cmd_hdr *) skb->data;
cmd_id = be16_to_cpu(hdr->command_id);
wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
@@ -169,10 +175,6 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
&wmi->drv_priv->fatal_work);
break;
case WMI_TXSTATUS_EVENTID:
- /* Check if ath9k_tx_init() completed. */
- if (!data_race(priv->tx.initialized))
- break;
-
spin_lock_bh(&priv->tx.tx_lock);
if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
spin_unlock_bh(&priv->tx.tx_lock);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index f15684379b03..d519b676a109 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -369,12 +369,11 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
struct list_head bf_head;
struct ath_tx_status ts;
struct ath_frame_info *fi;
- int ret;
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
- while ((ret = ath_tid_dequeue(tid, &skb)) == 0) {
+ while (ath_tid_dequeue(tid, &skb) == 0) {
fi = get_frame_info(skb);
bf = fi->bf;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 524327d24964..7e7797bf44b7 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1712,6 +1712,10 @@ static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops carl9170_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = carl9170_op_start,
.stop = carl9170_op_stop,
.tx = carl9170_op_tx,
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6bb9aa2bfe65..e902ca80eba7 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -189,7 +189,7 @@ static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
{
- struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct _carl9170_tx_superframe *super;
unsigned int chunks;
int cookie = -1;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 4e6b4df8562f..bfbd3c7a70b3 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1347,6 +1347,10 @@ static void wcn36xx_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif
}
static const struct ieee80211_ops wcn36xx_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = wcn36xx_start,
.stop = wcn36xx_stop,
.add_interface = wcn36xx_add_interface,
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 447b51cff8f9..0b55a272bfd6 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2178,6 +2178,10 @@ static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
static const struct ieee80211_ops at76_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = at76_mac80211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.add_interface = at76_add_interface,
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
index 67b4bac048e5..c0d8fc0b22fb 100644
--- a/drivers/net/wireless/broadcom/b43/b43.h
+++ b/drivers/net/wireless/broadcom/b43/b43.h
@@ -1082,6 +1082,22 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
return dev->__using_pio_transfers;
}
+static inline void b43_wake_queue(struct b43_wldev *dev, int queue_prio)
+{
+ if (dev->qos_enabled)
+ ieee80211_wake_queue(dev->wl->hw, queue_prio);
+ else
+ ieee80211_wake_queue(dev->wl->hw, 0);
+}
+
+static inline void b43_stop_queue(struct b43_wldev *dev, int queue_prio)
+{
+ if (dev->qos_enabled)
+ ieee80211_stop_queue(dev->wl->hw, queue_prio);
+ else
+ ieee80211_stop_queue(dev->wl->hw, 0);
+}
+
/* Message printing */
__printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...);
__printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...);
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 760d1a28edc6..6ac7dcebfff9 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1399,7 +1399,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
should_inject_overflow(ring)) {
/* This TX ring is full. */
unsigned int skb_mapping = skb_get_queue_mapping(skb);
- ieee80211_stop_queue(dev->wl->hw, skb_mapping);
+ b43_stop_queue(dev, skb_mapping);
dev->wl->tx_queue_stopped[skb_mapping] = true;
ring->stopped = true;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
@@ -1570,7 +1570,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
} else {
/* If the driver queue is running wake the corresponding
* mac80211 queue. */
- ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
+ b43_wake_queue(dev, ring->queue_prio);
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 92ca0b2ca286..badb2f494035 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -2587,7 +2587,8 @@ static void b43_request_firmware(struct work_struct *work)
start_ieee80211:
wl->hw->queues = B43_QOS_QUEUE_NUM;
- if (!modparam_qos || dev->fw.opensource)
+ if (!modparam_qos || dev->fw.opensource ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM4331)
wl->hw->queues = 1;
err = ieee80211_register_hw(wl->hw);
@@ -3603,7 +3604,7 @@ static void b43_tx_work(struct work_struct *work)
err = b43_dma_tx(dev, skb);
if (err == -ENOSPC) {
wl->tx_queue_stopped[queue_num] = true;
- ieee80211_stop_queue(wl->hw, queue_num);
+ b43_stop_queue(dev, queue_num);
skb_queue_head(&wl->tx_queue[queue_num], skb);
break;
}
@@ -3627,6 +3628,7 @@ static void b43_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
+ u16 skb_queue_mapping;
if (unlikely(skb->len < 2 + 2 + 6)) {
/* Too short, this can't be a valid frame. */
@@ -3635,12 +3637,12 @@ static void b43_op_tx(struct ieee80211_hw *hw,
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
- skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
- if (!wl->tx_queue_stopped[skb->queue_mapping]) {
+ skb_queue_mapping = skb_get_queue_mapping(skb);
+ skb_queue_tail(&wl->tx_queue[skb_queue_mapping], skb);
+ if (!wl->tx_queue_stopped[skb_queue_mapping])
ieee80211_queue_work(wl->hw, &wl->tx_work);
- } else {
- ieee80211_stop_queue(wl->hw, skb->queue_mapping);
- }
+ else
+ b43_stop_queue(wl->current_dev, skb_queue_mapping);
}
static void b43_qos_params_upload(struct b43_wldev *dev,
@@ -5170,6 +5172,10 @@ static int b43_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static const struct ieee80211_ops b43_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = b43_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.conf_tx = b43_op_conf_tx,
diff --git a/drivers/net/wireless/broadcom/b43/phy_ht.c b/drivers/net/wireless/broadcom/b43/phy_ht.c
index d050971d150a..26a226126bc4 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ht.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ht.c
@@ -322,8 +322,8 @@ static void b43_phy_ht_bphy_reset(struct b43_wldev *dev, bool reset)
B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX);
else
b43_phy_mask(dev, B43_PHY_B_BBCFG,
- (u16)~(B43_PHY_B_BBCFG_RSTCCA |
- B43_PHY_B_BBCFG_RSTRX));
+ 0xffff & ~(B43_PHY_B_BBCFG_RSTCCA |
+ B43_PHY_B_BBCFG_RSTRX));
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp);
}
@@ -551,7 +551,7 @@ static void b43_phy_ht_tx_power_ctl(struct b43_wldev *dev, bool enable)
phy_ht->tx_pwr_idx[i] =
b43_phy_read(dev, status_regs[i]);
}
- b43_phy_mask(dev, B43_PHY_HT_TXPCTL_CMD_C1, ~en_bits);
+ b43_phy_mask(dev, B43_PHY_HT_TXPCTL_CMD_C1, 0xffff & ~en_bits);
} else {
b43_phy_set(dev, B43_PHY_HT_TXPCTL_CMD_C1, en_bits);
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index 2c0c019a815d..4bb005b93f2c 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -6246,7 +6246,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
/* Take BPHY out of the reset */
b43_phy_mask(dev, B43_PHY_B_BBCFG,
- (u16)~(B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX));
+ ~(B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX) & 0xffff);
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
}
@@ -6377,7 +6377,7 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
} else if (channel_type == NL80211_CHAN_HT40MINUS) {
b43_phy_mask(dev, B43_NPHY_RXCTL, ~B43_NPHY_RXCTL_BSELU20);
if (phy->rev >= 7)
- b43_phy_mask(dev, 0x310, (u16)~0x8000);
+ b43_phy_mask(dev, 0x310, 0x7fff);
}
if (phy->rev >= 19) {
diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c
index 0cf70fdb60a6..e41f2f5b4c26 100644
--- a/drivers/net/wireless/broadcom/b43/pio.c
+++ b/drivers/net/wireless/broadcom/b43/pio.c
@@ -525,7 +525,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (total_len > (q->buffer_size - q->buffer_used)) {
/* Not enough memory on the queue. */
err = -EBUSY;
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
q->stopped = true;
goto out;
}
@@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
(q->free_packet_slots == 0)) {
/* The queue is full. */
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
q->stopped = true;
}
@@ -587,7 +587,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
list_add(&pack->list, &q->packets_list);
if (q->stopped) {
- ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
+ b43_wake_queue(dev, q->queue_prio);
q->stopped = false;
}
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 760136638a95..18eb610f600a 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -3531,6 +3531,10 @@ static int b43legacy_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static const struct ieee80211_ops b43legacy_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = b43legacy_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.conf_tx = b43legacy_op_conf_tx,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
index ac3a36fa3640..f471c962104a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
@@ -7,21 +7,33 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <feature.h>
#include "vops.h"
-static int brcmf_bca_attach(struct brcmf_pub *drvr)
+#define BRCMF_BCA_E_LAST 212
+
+static void brcmf_bca_feat_attach(struct brcmf_if *ifp)
{
- pr_err("%s: executing\n", __func__);
- return 0;
+ /* SAE support not confirmed so disabling for now */
+ ifp->drvr->feat_flags &= ~BIT(BRCMF_FEAT_SAE);
}
-static void brcmf_bca_detach(struct brcmf_pub *drvr)
+static int brcmf_bca_alloc_fweh_info(struct brcmf_pub *drvr)
{
- pr_err("%s: executing\n", __func__);
+ struct brcmf_fweh_info *fweh;
+
+ fweh = kzalloc(struct_size(fweh, evt_handler, BRCMF_BCA_E_LAST),
+ GFP_KERNEL);
+ if (!fweh)
+ return -ENOMEM;
+
+ fweh->num_event_codes = BRCMF_BCA_E_LAST;
+ drvr->fweh = fweh;
+ return 0;
}
const struct brcmf_fwvid_ops brcmf_bca_ops = {
- .attach = brcmf_bca_attach,
- .detach = brcmf_bca_detach,
+ .feat_attach = brcmf_bca_feat_attach,
+ .alloc_fweh_info = brcmf_bca_alloc_fweh_info,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 28d6a30cc010..b99aa66dc5a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -32,6 +32,7 @@
#include "vendor.h"
#include "bus.h"
#include "common.h"
+#include "fwvid.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
@@ -1179,8 +1180,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
scan_request = cfg->scan_request;
cfg->scan_request = NULL;
- if (timer_pending(&cfg->escan_timeout))
- del_timer_sync(&cfg->escan_timeout);
+ timer_delete_sync(&cfg->escan_timeout);
if (fw_abort) {
/* Do a scan abort to stop the driver's scan engine */
@@ -1687,52 +1687,39 @@ static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e)
return reason;
}
-static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
+int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags)
{
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_wsec_pmk_le pmk;
int err;
+ if (key_len > sizeof(pmk.key)) {
+ bphy_err(drvr, "key must be less than %zu bytes\n",
+ sizeof(pmk.key));
+ return -EINVAL;
+ }
+
memset(&pmk, 0, sizeof(pmk));
- /* pass pmk directly */
- pmk.key_len = cpu_to_le16(pmk_len);
- pmk.flags = cpu_to_le16(0);
- memcpy(pmk.key, pmk_data, pmk_len);
+ /* pass key material directly */
+ pmk.key_len = cpu_to_le16(key_len);
+ pmk.flags = cpu_to_le16(flags);
+ memcpy(pmk.key, key, key_len);
- /* store psk in firmware */
+ /* store key material in firmware */
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK,
&pmk, sizeof(pmk));
if (err < 0)
bphy_err(drvr, "failed to change PSK in firmware (len=%u)\n",
- pmk_len);
+ key_len);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_set_wsec);
-static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data,
- u16 pwd_len)
+static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
{
- struct brcmf_pub *drvr = ifp->drvr;
- struct brcmf_wsec_sae_pwd_le sae_pwd;
- int err;
-
- if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
- bphy_err(drvr, "sae_password must be less than %d\n",
- BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
- return -EINVAL;
- }
-
- sae_pwd.key_len = cpu_to_le16(pwd_len);
- memcpy(sae_pwd.key, pwd_data, pwd_len);
-
- err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
- sizeof(sae_pwd));
- if (err < 0)
- bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
- pwd_len);
-
- return err;
+ return brcmf_set_wsec(ifp, pmk_data, pmk_len, 0);
}
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason,
@@ -2503,8 +2490,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
bphy_err(drvr, "failed to clean up user-space RSNE\n");
goto done;
}
- err = brcmf_set_sae_password(ifp, sme->crypto.sae_pwd,
- sme->crypto.sae_pwd_len);
+ err = brcmf_fwvid_set_sae_password(ifp, &sme->crypto);
if (!err && sme->crypto.psk)
err = brcmf_set_pmk(ifp, sme->crypto.psk,
BRCMF_WSEC_MAX_PSK_LEN);
@@ -3081,7 +3067,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
struct brcmf_scb_val_le scbval;
struct brcmf_pktcnt_le pktcnt;
s32 err;
- u32 rate;
+ u32 rate = 0;
u32 rssi;
/* Get the current tx rate */
@@ -4322,6 +4308,9 @@ brcmf_pmksa_v3_op(struct brcmf_if *ifp, struct cfg80211_pmksa *pmksa,
int ret;
pmk_op = kzalloc(sizeof(*pmk_op), GFP_KERNEL);
+ if (!pmk_op)
+ return -ENOMEM;
+
pmk_op->version = cpu_to_le16(BRCMF_PMKSA_VER_3);
if (!pmksa) {
@@ -5115,6 +5104,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
bool mbss;
int is_11d;
bool supports_11d;
+ bool closednet;
brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
settings->chandef.chan->hw_value,
@@ -5254,8 +5244,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
if (crypto->sae_pwd) {
brcmf_dbg(INFO, "using SAE offload\n");
profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_SAE);
- err = brcmf_set_sae_password(ifp, crypto->sae_pwd,
- crypto->sae_pwd_len);
+ err = brcmf_fwvid_set_sae_password(ifp, crypto);
if (err < 0)
goto exit;
}
@@ -5285,12 +5274,12 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
goto exit;
}
- err = brcmf_fil_iovar_int_set(ifp, "closednet",
- settings->hidden_ssid);
+ closednet =
+ (settings->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
+ err = brcmf_fil_iovar_int_set(ifp, "closednet", closednet);
if (err) {
bphy_err(drvr, "%s closednet error (%d)\n",
- settings->hidden_ssid ?
- "enabled" : "disabled",
+ (closednet ? "enabled" : "disabled"),
err);
goto exit;
}
@@ -5362,10 +5351,12 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev,
msleep(400);
if (profile->use_fwauth != BIT(BRCMF_PROFILE_FWAUTH_NONE)) {
+ struct cfg80211_crypto_settings crypto = {};
+
if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_PSK))
brcmf_set_pmk(ifp, NULL, 0);
if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_SAE))
- brcmf_set_sae_password(ifp, NULL, 0);
+ brcmf_fwvid_set_sae_password(ifp, &crypto);
profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE);
}
@@ -7271,7 +7262,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
u32 nmode = 0;
u32 vhtmode = 0;
u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT };
- u32 rxchain;
+ u32 rxchain = 0;
u32 nchain;
int err;
s32 i;
@@ -8437,6 +8428,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
brcmf_btcoex_detach(cfg);
wiphy_unregister(cfg->wiphy);
wl_deinit_priv(cfg);
+ cancel_work_sync(&cfg->escan_timeout_work);
brcmf_free_wiphy(cfg->wiphy);
kfree(cfg);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 0e1fa3f0dea2..dc3a6a537507 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -468,4 +468,6 @@ void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
void brcmf_cfg80211_free_netdev(struct net_device *ndev);
+int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags);
+
#endif /* BRCMFMAC_CFG80211_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index b6d458e022fa..b24faae35873 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -266,7 +266,7 @@ static int brcmf_c_process_cal_blob(struct brcmf_if *ifp)
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = ifp->drvr;
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+ struct brcmf_fweh_info *fweh = drvr->fweh;
u8 buf[BRCMF_DCMD_SMLEN];
struct brcmf_bus *bus;
struct brcmf_rev_info_le revinfo;
@@ -413,15 +413,21 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
brcmf_c_set_joinpref_default(ifp);
/* Setup event_msgs, enable E_IF */
- err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
- BRCMF_EVENTING_MASK_LEN);
+ err = brcmf_fil_iovar_data_get(ifp, "event_msgs", fweh->event_mask,
+ fweh->event_mask_len);
if (err) {
bphy_err(drvr, "Get event_msgs error (%d)\n", err);
goto done;
}
- setbit(eventmask, BRCMF_E_IF);
- err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
- BRCMF_EVENTING_MASK_LEN);
+ /*
+ * BRCMF_E_IF can safely be used to set the appropriate bit
+ * in the event_mask as the firmware event code is guaranteed
+ * to match the value of BRCMF_E_IF because it is old cruft
+ * that all vendors have.
+ */
+ setbit(fweh->event_mask, BRCMF_E_IF);
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs", fweh->event_mask,
+ fweh->event_mask_len);
if (err) {
bphy_err(drvr, "Set event_msgs error (%d)\n", err);
goto done;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index f599d5f896e8..bf91b1e1368f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -691,7 +691,7 @@ static int brcmf_net_mon_open(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- u32 monitor;
+ u32 monitor = 0;
int err;
brcmf_dbg(TRACE, "Enter\n");
@@ -1348,13 +1348,17 @@ int brcmf_attach(struct device *dev)
goto fail;
}
+ /* attach firmware event handler */
+ ret = brcmf_fweh_attach(drvr);
+ if (ret != 0) {
+ bphy_err(drvr, "brcmf_fweh_attach failed\n");
+ goto fail;
+ }
+
/* Attach to events important for core code */
brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
brcmf_psm_watchdog_notify);
- /* attach firmware event handler */
- brcmf_fweh_attach(drvr);
-
ret = brcmf_bus_started(drvr, drvr->ops);
if (ret != 0) {
bphy_err(drvr, "dongle is not responding: err=%d\n", ret);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index e4f911dd414b..ea76b8d33401 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -122,7 +122,7 @@ struct brcmf_pub {
struct mutex proto_block;
unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
- struct brcmf_fweh_info fweh;
+ struct brcmf_fweh_info *fweh;
struct brcmf_ampdu_rx_reorder
*reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
index b75652ba9359..9a4837881486 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
@@ -7,21 +7,53 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <fwil.h>
#include "vops.h"
-static int brcmf_cyw_attach(struct brcmf_pub *drvr)
+#define BRCMF_CYW_E_LAST 197
+
+static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
{
- pr_err("%s: executing\n", __func__);
- return 0;
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_wsec_sae_pwd_le sae_pwd;
+ u16 pwd_len = crypto->sae_pwd_len;
+ int err;
+
+ if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
+ bphy_err(drvr, "sae_password must be less than %d\n",
+ BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
+ return -EINVAL;
+ }
+
+ sae_pwd.key_len = cpu_to_le16(pwd_len);
+ memcpy(sae_pwd.key, crypto->sae_pwd, pwd_len);
+
+ err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
+ sizeof(sae_pwd));
+ if (err < 0)
+ bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
+ pwd_len);
+
+ return err;
}
-static void brcmf_cyw_detach(struct brcmf_pub *drvr)
+static int brcmf_cyw_alloc_fweh_info(struct brcmf_pub *drvr)
{
- pr_err("%s: executing\n", __func__);
+ struct brcmf_fweh_info *fweh;
+
+ fweh = kzalloc(struct_size(fweh, evt_handler, BRCMF_CYW_E_LAST),
+ GFP_KERNEL);
+ if (!fweh)
+ return -ENOMEM;
+
+ fweh->num_event_codes = BRCMF_CYW_E_LAST;
+ drvr->fweh = fweh;
+ return 0;
}
const struct brcmf_fwvid_ops brcmf_cyw_ops = {
- .attach = brcmf_cyw_attach,
- .detach = brcmf_cyw_detach,
+ .set_sae_password = brcmf_cyw_set_sae_pwd,
+ .alloc_fweh_info = brcmf_cyw_alloc_fweh_info,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 86ff174936a9..c3a602197662 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -83,6 +83,15 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&acepc_t8_data,
},
{
+ /* ACEPC W5 Pro Cherry Trail Z8350 HDMI stick, same wifi as the T8 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "3"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
+ },
+ .driver_data = (void *)&acepc_t8_data,
+ },
+ {
/* Chuwi Hi8 Pro with D2D3_Hi8Pro.233 BIOS */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 6d10c9efbe93..f23310a77a5d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -13,6 +13,7 @@
#include "debug.h"
#include "fwil.h"
#include "fwil_types.h"
+#include "fwvid.h"
#include "feature.h"
#include "common.h"
@@ -183,7 +184,7 @@ static void brcmf_feat_wlc_version_overrides(struct brcmf_pub *drv)
static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
enum brcmf_feat_id id, char *name)
{
- u32 data;
+ u32 data = 0;
int err;
/* we need to know firmware error */
@@ -339,6 +340,11 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_SCAN_V2, "scan_ver");
+ brcmf_feat_wlc_version_overrides(drvr);
+ brcmf_feat_firmware_overrides(drvr);
+
+ brcmf_fwvid_feat_attach(ifp);
+
if (drvr->settings->feature_disable) {
brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
ifp->drvr->feat_flags,
@@ -346,9 +352,6 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
ifp->drvr->feat_flags &= ~drvr->settings->feature_disable;
}
- brcmf_feat_wlc_version_overrides(drvr);
- brcmf_feat_firmware_overrides(drvr);
-
/* set chip related quirks */
switch (drvr->bus_if->chip) {
case BRCM_CC_43236_CHIP_ID:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 68960ae98987..f0b6a7607f16 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -14,7 +14,8 @@
#include "fweh.h"
#include "fwil.h"
#include "proto.h"
-
+#include "bus.h"
+#include "fwvid.h"
/**
* struct brcmf_fweh_queue_item - event item on event queue.
*
@@ -28,7 +29,7 @@
*/
struct brcmf_fweh_queue_item {
struct list_head q;
- enum brcmf_fweh_event_code code;
+ u32 code;
u8 ifidx;
u8 ifaddr[ETH_ALEN];
struct brcmf_event_msg_be emsg;
@@ -94,7 +95,7 @@ static void brcmf_fweh_queue_event(struct brcmf_fweh_info *fweh,
static int brcmf_fweh_call_event_handler(struct brcmf_pub *drvr,
struct brcmf_if *ifp,
- enum brcmf_fweh_event_code code,
+ u32 fwcode,
struct brcmf_event_msg *emsg,
void *data)
{
@@ -102,13 +103,13 @@ static int brcmf_fweh_call_event_handler(struct brcmf_pub *drvr,
int err = -EINVAL;
if (ifp) {
- fweh = &ifp->drvr->fweh;
+ fweh = ifp->drvr->fweh;
/* handle the event if valid interface and handler */
- if (fweh->evt_handler[code])
- err = fweh->evt_handler[code](ifp, emsg, data);
+ if (fweh->evt_handler[fwcode])
+ err = fweh->evt_handler[fwcode](ifp, emsg, data);
else
- bphy_err(drvr, "unhandled event %d ignored\n", code);
+ bphy_err(drvr, "unhandled fwevt %d ignored\n", fwcode);
} else {
bphy_err(drvr, "no interface object\n");
}
@@ -142,7 +143,7 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
is_p2pdev = ((ifevent->flags & BRCMF_E_IF_FLAG_NOIF) &&
(ifevent->role == BRCMF_E_IF_ROLE_P2P_CLIENT ||
((ifevent->role == BRCMF_E_IF_ROLE_STA) &&
- (drvr->fweh.p2pdev_setup_ongoing))));
+ (drvr->fweh->p2pdev_setup_ongoing))));
if (!is_p2pdev && (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
brcmf_dbg(EVENT, "event can be ignored\n");
return;
@@ -163,7 +164,7 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
return;
if (!is_p2pdev)
brcmf_proto_add_if(drvr, ifp);
- if (!drvr->fweh.evt_handler[BRCMF_E_IF])
+ if (!drvr->fweh->evt_handler[BRCMF_E_IF])
if (brcmf_net_attach(ifp, false) < 0)
return;
}
@@ -183,6 +184,45 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
}
}
+static void brcmf_fweh_map_event_code(struct brcmf_fweh_info *fweh,
+ enum brcmf_fweh_event_code code,
+ u32 *fw_code)
+{
+ int i;
+
+ if (WARN_ON(!fw_code))
+ return;
+
+ *fw_code = code;
+ if (fweh->event_map) {
+ for (i = 0; i < fweh->event_map->n_items; i++) {
+ if (fweh->event_map->items[i].code == code) {
+ *fw_code = fweh->event_map->items[i].fwevt_code;
+ break;
+ }
+ }
+ }
+}
+
+static void brcmf_fweh_map_fwevt_code(struct brcmf_fweh_info *fweh, u32 fw_code,
+ enum brcmf_fweh_event_code *code)
+{
+ int i;
+
+ if (WARN_ON(!code))
+ return;
+
+ *code = fw_code;
+ if (fweh->event_map) {
+ for (i = 0; i < fweh->event_map->n_items; i++) {
+ if (fweh->event_map->items[i].fwevt_code == fw_code) {
+ *code = fweh->event_map->items[i].code;
+ break;
+ }
+ }
+ }
+}
+
/**
* brcmf_fweh_dequeue_event() - get event from the queue.
*
@@ -221,15 +261,19 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
struct brcmf_event_msg emsg;
fweh = container_of(work, struct brcmf_fweh_info, event_work);
- drvr = container_of(fweh, struct brcmf_pub, fweh);
+ drvr = fweh->drvr;
while ((event = brcmf_fweh_dequeue_event(fweh))) {
- brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n",
- brcmf_fweh_event_name(event->code), event->code,
+ enum brcmf_fweh_event_code code;
+
+ brcmf_fweh_map_fwevt_code(fweh, event->code, &code);
+ brcmf_dbg(EVENT, "event %s (%u:%u) ifidx %u bsscfg %u addr %pM\n",
+ brcmf_fweh_event_name(code), code, event->code,
event->emsg.ifidx, event->emsg.bsscfgidx,
event->emsg.addr);
if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) {
- bphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx);
+ bphy_err(drvr, "invalid bsscfg index: %u\n",
+ event->emsg.bsscfgidx);
goto event_free;
}
@@ -237,7 +281,7 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
emsg_be = &event->emsg;
emsg.version = be16_to_cpu(emsg_be->version);
emsg.flags = be16_to_cpu(emsg_be->flags);
- emsg.event_code = event->code;
+ emsg.event_code = code;
emsg.status = be32_to_cpu(emsg_be->status);
emsg.reason = be32_to_cpu(emsg_be->reason);
emsg.auth_type = be32_to_cpu(emsg_be->auth_type);
@@ -283,7 +327,7 @@ event_free:
*/
void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing)
{
- ifp->drvr->fweh.p2pdev_setup_ongoing = ongoing;
+ ifp->drvr->fweh->p2pdev_setup_ongoing = ongoing;
}
/**
@@ -291,12 +335,27 @@ void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing)
*
* @drvr: driver information object.
*/
-void brcmf_fweh_attach(struct brcmf_pub *drvr)
+int brcmf_fweh_attach(struct brcmf_pub *drvr)
{
- struct brcmf_fweh_info *fweh = &drvr->fweh;
+ struct brcmf_fweh_info *fweh;
+ int err;
+
+ err = brcmf_fwvid_alloc_fweh_info(drvr);
+ if (err < 0)
+ return err;
+
+ fweh = drvr->fweh;
+ fweh->drvr = drvr;
+
+ fweh->event_mask_len = DIV_ROUND_UP(fweh->num_event_codes, 8);
+ fweh->event_mask = kzalloc(fweh->event_mask_len, GFP_KERNEL);
+ if (!fweh->event_mask)
+ return -ENOMEM;
+
INIT_WORK(&fweh->event_work, brcmf_fweh_event_worker);
spin_lock_init(&fweh->evt_q_lock);
INIT_LIST_HEAD(&fweh->event_q);
+ return 0;
}
/**
@@ -306,14 +365,19 @@ void brcmf_fweh_attach(struct brcmf_pub *drvr)
*/
void brcmf_fweh_detach(struct brcmf_pub *drvr)
{
- struct brcmf_fweh_info *fweh = &drvr->fweh;
+ struct brcmf_fweh_info *fweh = drvr->fweh;
+
+ if (!fweh)
+ return;
/* cancel the worker if initialized */
if (fweh->event_work.func) {
cancel_work_sync(&fweh->event_work);
WARN_ON(!list_empty(&fweh->event_q));
- memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
}
+ drvr->fweh = NULL;
+ kfree(fweh->event_mask);
+ kfree(fweh);
}
/**
@@ -326,11 +390,17 @@ void brcmf_fweh_detach(struct brcmf_pub *drvr)
int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
brcmf_fweh_handler_t handler)
{
- if (drvr->fweh.evt_handler[code]) {
+ struct brcmf_fweh_info *fweh = drvr->fweh;
+ u32 evt_handler_idx;
+
+ brcmf_fweh_map_event_code(fweh, code, &evt_handler_idx);
+
+ if (fweh->evt_handler[evt_handler_idx]) {
bphy_err(drvr, "event code %d already registered\n", code);
return -ENOSPC;
}
- drvr->fweh.evt_handler[code] = handler;
+
+ fweh->evt_handler[evt_handler_idx] = handler;
brcmf_dbg(TRACE, "event handler registered for %s\n",
brcmf_fweh_event_name(code));
return 0;
@@ -345,9 +415,12 @@ int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
void brcmf_fweh_unregister(struct brcmf_pub *drvr,
enum brcmf_fweh_event_code code)
{
+ u32 evt_handler_idx;
+
brcmf_dbg(TRACE, "event handler cleared for %s\n",
brcmf_fweh_event_name(code));
- drvr->fweh.evt_handler[code] = NULL;
+ brcmf_fweh_map_event_code(drvr->fweh, code, &evt_handler_idx);
+ drvr->fweh->evt_handler[evt_handler_idx] = NULL;
}
/**
@@ -357,27 +430,28 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr,
*/
int brcmf_fweh_activate_events(struct brcmf_if *ifp)
{
- struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_fweh_info *fweh = ifp->drvr->fweh;
+ enum brcmf_fweh_event_code code;
int i, err;
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
- memset(eventmask, 0, sizeof(eventmask));
- for (i = 0; i < BRCMF_E_LAST; i++) {
- if (ifp->drvr->fweh.evt_handler[i]) {
+ memset(fweh->event_mask, 0, fweh->event_mask_len);
+ for (i = 0; i < fweh->num_event_codes; i++) {
+ if (fweh->evt_handler[i]) {
+ brcmf_fweh_map_fwevt_code(fweh, i, &code);
brcmf_dbg(EVENT, "enable event %s\n",
- brcmf_fweh_event_name(i));
- setbit(eventmask, i);
+ brcmf_fweh_event_name(code));
+ setbit(fweh->event_mask, i);
}
}
/* want to handle IF event as well */
brcmf_dbg(EVENT, "enable event IF\n");
- setbit(eventmask, BRCMF_E_IF);
+ setbit(fweh->event_mask, BRCMF_E_IF);
- err = brcmf_fil_iovar_data_set(ifp, "event_msgs",
- eventmask, BRCMF_EVENTING_MASK_LEN);
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs", fweh->event_mask,
+ fweh->event_mask_len);
if (err)
- bphy_err(drvr, "Set event_msgs error (%d)\n", err);
+ bphy_err(fweh->drvr, "Set event_msgs error (%d)\n", err);
return err;
}
@@ -397,21 +471,21 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
struct brcmf_event *event_packet,
u32 packet_len, gfp_t gfp)
{
- enum brcmf_fweh_event_code code;
- struct brcmf_fweh_info *fweh = &drvr->fweh;
+ u32 fwevt_idx;
+ struct brcmf_fweh_info *fweh = drvr->fweh;
struct brcmf_fweh_queue_item *event;
void *data;
u32 datalen;
/* get event info */
- code = get_unaligned_be32(&event_packet->msg.event_type);
+ fwevt_idx = get_unaligned_be32(&event_packet->msg.event_type);
datalen = get_unaligned_be32(&event_packet->msg.datalen);
data = &event_packet[1];
- if (code >= BRCMF_E_LAST)
+ if (fwevt_idx >= fweh->num_event_codes)
return;
- if (code != BRCMF_E_IF && !fweh->evt_handler[code])
+ if (fwevt_idx != BRCMF_E_IF && !fweh->evt_handler[fwevt_idx])
return;
if (datalen > BRCMF_DCMD_MAXLEN ||
@@ -422,8 +496,8 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
if (!event)
return;
+ event->code = fwevt_idx;
event->datalen = datalen;
- event->code = code;
event->ifidx = event_packet->msg.ifidx;
/* use memcpy to get aligned event message */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index 48414e8b9389..9ca1b2aadcb5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -17,6 +17,10 @@ struct brcmf_pub;
struct brcmf_if;
struct brcmf_cfg80211_info;
+#define BRCMF_ABSTRACT_EVENT_BIT BIT(31)
+#define BRCMF_ABSTRACT_ENUM_DEF(_id, _val) \
+ BRCMF_ENUM_DEF(_id, (BRCMF_ABSTRACT_EVENT_BIT | (_val)))
+
/* list of firmware events */
#define BRCMF_FWEH_EVENT_ENUM_DEFLIST \
BRCMF_ENUM_DEF(SET_SSID, 0) \
@@ -98,16 +102,9 @@ struct brcmf_cfg80211_info;
/* firmware event codes sent by the dongle */
enum brcmf_fweh_event_code {
BRCMF_FWEH_EVENT_ENUM_DEFLIST
- /* this determines event mask length which must match
- * minimum length check in device firmware so it is
- * hard-coded here.
- */
- BRCMF_E_LAST = 139
};
#undef BRCMF_ENUM_DEF
-#define BRCMF_EVENTING_MASK_LEN DIV_ROUND_UP(BRCMF_E_LAST, 8)
-
/* flags field values in struct brcmf_event_msg */
#define BRCMF_EVENT_MSG_LINK 0x01
#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
@@ -288,27 +285,66 @@ typedef int (*brcmf_fweh_handler_t)(struct brcmf_if *ifp,
void *data);
/**
+ * struct brcmf_fweh_event_map_item - fweh event and firmware event pair.
+ *
+ * @code: fweh event code as used by higher layers.
+ * @fwevt_code: firmware event code as used by firmware.
+ *
+ * This mapping is needed when a functionally identical event has a
+ * different numerical definition between vendors. When such mapping
+ * is needed the higher layer event code should not collide with the
+ * firmware event.
+ */
+struct brcmf_fweh_event_map_item {
+ enum brcmf_fweh_event_code code;
+ u32 fwevt_code;
+};
+
+/**
+ * struct brcmf_fweh_event_map - mapping between firmware event and fweh event.
+ *
+ * @n_items: number of mapping items.
+ * @items: array of fweh event and firmware event pairs.
+ */
+struct brcmf_fweh_event_map {
+ u32 n_items;
+ const struct brcmf_fweh_event_map_item items[] __counted_by(n_items);
+};
+
+/**
* struct brcmf_fweh_info - firmware event handling information.
*
* @p2pdev_setup_ongoing: P2P device creation in progress.
* @event_work: event worker.
* @evt_q_lock: lock for event queue protection.
* @event_q: event queue.
- * @evt_handler: registered event handlers.
+ * @event_mask_len: length of @event_mask used to enable firmware events.
+ * @event_mask: byte array used in 'event_msgs' iovar command.
+ * @event_map: mapping between fweh event and firmware event which
+ * may be provided by vendor-specific module for events that need
+ * mapping.
+ * @num_event_codes: number of firmware events supported by firmware which
+ * does a minimum length check for the @event_mask. This value is to
+ * be provided by vendor-specific module determining @event_mask_len
+ * and consequently the allocation size for @event_mask.
+ * @evt_handler: event handler registry indexed by firmware event code.
*/
struct brcmf_fweh_info {
+ struct brcmf_pub *drvr;
bool p2pdev_setup_ongoing;
struct work_struct event_work;
spinlock_t evt_q_lock;
struct list_head event_q;
- int (*evt_handler[BRCMF_E_LAST])(struct brcmf_if *ifp,
- const struct brcmf_event_msg *evtmsg,
- void *data);
+ uint event_mask_len;
+ u8 *event_mask;
+ struct brcmf_fweh_event_map *event_map;
+ uint num_event_codes;
+ brcmf_fweh_handler_t evt_handler[] __counted_by(num_event_codes);
};
const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code);
-void brcmf_fweh_attach(struct brcmf_pub *drvr);
+int brcmf_fweh_attach(struct brcmf_pub *drvr);
void brcmf_fweh_detach(struct brcmf_pub *drvr);
int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
int (*handler)(struct brcmf_if *ifp,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index 72fe8bce6eaf..6385a7db7f7d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -142,6 +142,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_set);
s32
brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
@@ -160,36 +161,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
return err;
}
-
-
-s32
-brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
-{
- s32 err;
- __le32 data_le = cpu_to_le32(data);
-
- mutex_lock(&ifp->drvr->proto_block);
- brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data);
- err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
- mutex_unlock(&ifp->drvr->proto_block);
-
- return err;
-}
-
-s32
-brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
-{
- s32 err;
- __le32 data_le = cpu_to_le32(*data);
-
- mutex_lock(&ifp->drvr->proto_block);
- err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
- mutex_unlock(&ifp->drvr->proto_block);
- *data = le32_to_cpu(data_le);
- brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
-
- return err;
-}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_get);
static u32
brcmf_create_iovar(const char *name, const char *data, u32 datalen,
@@ -239,6 +211,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *dat
mutex_unlock(&drvr->proto_block);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_set);
s32
brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
@@ -270,26 +243,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
mutex_unlock(&drvr->proto_block);
return err;
}
-
-s32
-brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
-{
- __le32 data_le = cpu_to_le32(data);
-
- return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
-}
-
-s32
-brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
-{
- __le32 data_le = cpu_to_le32(*data);
- s32 err;
-
- err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
- if (err == 0)
- *data = le32_to_cpu(data_le);
- return err;
-}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_get);
static u32
brcmf_create_bsscfg(s32 bsscfgidx, const char *name, char *data, u32 datalen,
@@ -364,6 +318,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
mutex_unlock(&drvr->proto_block);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_set);
s32
brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
@@ -394,28 +349,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
mutex_unlock(&drvr->proto_block);
return err;
}
-
-s32
-brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
-{
- __le32 data_le = cpu_to_le32(data);
-
- return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
- sizeof(data_le));
-}
-
-s32
-brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
-{
- __le32 data_le = cpu_to_le32(*data);
- s32 err;
-
- err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
- sizeof(data_le));
- if (err == 0)
- *data = le32_to_cpu(data_le);
- return err;
-}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_get);
static u32 brcmf_create_xtlv(const char *name, u16 id, char *data, u32 len,
char *buf, u32 buflen)
@@ -465,6 +399,7 @@ s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
mutex_unlock(&drvr->proto_block);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_set);
s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len)
@@ -494,39 +429,4 @@ s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
mutex_unlock(&drvr->proto_block);
return err;
}
-
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data)
-{
- __le32 data_le = cpu_to_le32(data);
-
- return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le,
- sizeof(data_le));
-}
-
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data)
-{
- __le32 data_le = cpu_to_le32(*data);
- s32 err;
-
- err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
- if (err == 0)
- *data = le32_to_cpu(data_le);
- return err;
-}
-
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data)
-{
- return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
-}
-
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data)
-{
- __le16 data_le = cpu_to_le16(*data);
- s32 err;
-
- err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
- if (err == 0)
- *data = le16_to_cpu(data_le);
- return err;
-}
-
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_get);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index bc693157c4b1..a315a7fac6a0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -81,29 +81,122 @@
s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
-s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
-s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
+static inline
+s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
+{
+ s32 err;
+ __le32 data_le = cpu_to_le32(data);
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data,
- u32 len);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data);
+ err = brcmf_fil_cmd_data_set(ifp, cmd, &data_le, sizeof(data_le));
+
+ return err;
+}
+static inline
+s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
+{
+ s32 err;
+ __le32 data_le = cpu_to_le32(*data);
+
+ err = brcmf_fil_cmd_data_get(ifp, cmd, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
+
+ return err;
+}
+
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name,
+ const void *data, u32 len);
s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data);
-s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
-
-s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, void *data,
- u32 len);
-s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, void *data,
- u32 len);
-s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data);
-s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
+static inline
+s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
+}
+static inline
+s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+
+
+s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
+ void *data, u32 len);
+s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
+ void *data, u32 len);
+static inline
+s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
+ sizeof(data_le));
+}
+static inline
+s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
+ sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+
s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data);
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data);
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data);
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data);
+static inline
+s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id,
+ u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le,
+ sizeof(data_le));
+}
+static inline
+s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id,
+ u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+static inline
+s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id,
+ u8 *data)
+{
+ return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
+}
+static inline
+s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id,
+ u16 *data)
+{
+ __le16 data_le = cpu_to_le16(*data);
+ s32 err;
+
+ err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le16_to_cpu(data_le);
+ return err;
+}
#endif /* _fwil_h_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 9d248ba1c0b2..e74a23e11830 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -584,7 +584,7 @@ struct brcmf_wsec_key_le {
struct brcmf_wsec_pmk_le {
__le16 key_len;
__le16 flags;
- u8 key[2 * BRCMF_WSEC_MAX_PSK_LEN + 1];
+ u8 key[BRCMF_WSEC_MAX_SAE_PASSWORD_LEN];
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
index 86eafdb40541..41eafcda77f7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
@@ -90,7 +90,7 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *vmod,
return -ERANGE;
if (WARN_ON(!vmod) || WARN_ON(!vops) ||
- WARN_ON(!vops->attach) || WARN_ON(!vops->detach))
+ WARN_ON(!vops->alloc_fweh_info))
return -EINVAL;
if (WARN_ON(fwvid_list[fwvid].vmod))
@@ -150,7 +150,7 @@ static inline int brcmf_fwvid_request_module(enum brcmf_fwvendor fwvid)
}
#endif
-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr)
+int brcmf_fwvid_attach(struct brcmf_pub *drvr)
{
enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid;
int ret;
@@ -175,7 +175,7 @@ int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr)
return ret;
}
-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr)
+void brcmf_fwvid_detach(struct brcmf_pub *drvr)
{
enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid;
@@ -187,9 +187,10 @@ void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr)
mutex_lock(&fwvid_list_lock);
- drvr->vops = NULL;
- list_del(&drvr->bus_if->list);
-
+ if (drvr->vops) {
+ drvr->vops = NULL;
+ list_del(&drvr->bus_if->list);
+ }
mutex_unlock(&fwvid_list_lock);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
index 43df58bb70ad..e6ac9fc341bc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
@@ -6,12 +6,15 @@
#define FWVID_H_
#include "firmware.h"
+#include "cfg80211.h"
struct brcmf_pub;
+struct brcmf_if;
struct brcmf_fwvid_ops {
- int (*attach)(struct brcmf_pub *drvr);
- void (*detach)(struct brcmf_pub *drvr);
+ void (*feat_attach)(struct brcmf_if *ifp);
+ int (*set_sae_password)(struct brcmf_if *ifp, struct cfg80211_crypto_settings *crypto);
+ int (*alloc_fweh_info)(struct brcmf_pub *drvr);
};
/* exported functions */
@@ -20,28 +23,37 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *mod,
int brcmf_fwvid_unregister_vendor(enum brcmf_fwvendor fwvid, struct module *mod);
/* core driver functions */
-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr);
-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr);
+int brcmf_fwvid_attach(struct brcmf_pub *drvr);
+void brcmf_fwvid_detach(struct brcmf_pub *drvr);
const char *brcmf_fwvid_vendor_name(struct brcmf_pub *drvr);
-static inline int brcmf_fwvid_attach(struct brcmf_pub *drvr)
+static inline void brcmf_fwvid_feat_attach(struct brcmf_if *ifp)
{
- int ret;
+ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
- ret = brcmf_fwvid_attach_ops(drvr);
- if (ret)
- return ret;
+ if (!vops->feat_attach)
+ return;
- return drvr->vops->attach(drvr);
+ vops->feat_attach(ifp);
}
-static inline void brcmf_fwvid_detach(struct brcmf_pub *drvr)
+static inline int brcmf_fwvid_set_sae_password(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
+{
+ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
+
+ if (!vops || !vops->set_sae_password)
+ return -EOPNOTSUPP;
+
+ return vops->set_sae_password(ifp, crypto);
+}
+
+static inline int brcmf_fwvid_alloc_fweh_info(struct brcmf_pub *drvr)
{
if (!drvr->vops)
- return;
+ return -EIO;
- drvr->vops->detach(drvr);
- brcmf_fwvid_detach_ops(drvr);
+ return drvr->vops->alloc_fweh_info(drvr);
}
#endif /* FWVID_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
index 5573a47766ad..05d7c2a4fba5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
@@ -7,21 +7,34 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <cfg80211.h>
#include "vops.h"
-static int brcmf_wcc_attach(struct brcmf_pub *drvr)
+#define BRCMF_WCC_E_LAST 213
+
+static int brcmf_wcc_set_sae_pwd(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
{
- pr_debug("%s: executing\n", __func__);
- return 0;
+ return brcmf_set_wsec(ifp, crypto->sae_pwd, crypto->sae_pwd_len,
+ BRCMF_WSEC_PASSPHRASE);
}
-static void brcmf_wcc_detach(struct brcmf_pub *drvr)
+static int brcmf_wcc_alloc_fweh_info(struct brcmf_pub *drvr)
{
- pr_debug("%s: executing\n", __func__);
+ struct brcmf_fweh_info *fweh;
+
+ fweh = kzalloc(struct_size(fweh, evt_handler, BRCMF_WCC_E_LAST),
+ GFP_KERNEL);
+ if (!fweh)
+ return -ENOMEM;
+
+ fweh->num_event_codes = BRCMF_WCC_E_LAST;
+ drvr->fweh = fweh;
+ return 0;
}
const struct brcmf_fwvid_ops brcmf_wcc_ops = {
- .attach = brcmf_wcc_attach,
- .detach = brcmf_wcc_detach,
+ .set_sae_password = brcmf_wcc_set_sae_pwd,
+ .alloc_fweh_info = brcmf_wcc_alloc_fweh_info,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
index 89c8829528c2..9540a05247c2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <net/mac80211.h>
#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 543e93ec49d2..92860dc0a92e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -959,6 +959,10 @@ static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops brcms_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = brcms_ops_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = brcms_ops_start,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index ccc621b8ed9f..a27d6f0b8819 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -383,8 +383,9 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
return sh;
}
-static void wlc_phy_timercb_phycal(struct brcms_phy *pi)
+static void wlc_phy_timercb_phycal(void *ptr)
{
+ struct brcms_phy *pi = ptr;
uint delay = 5;
if (PHY_PERICAL_MPHASE_PENDING(pi)) {
@@ -551,8 +552,7 @@ wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core,
if (!pi->phycal_timer)
goto err;
- if (!wlc_phy_attach_nphy(pi))
- goto err;
+ wlc_phy_attach_nphy(pi);
} else if (ISLCNPHY(pi)) {
if (!wlc_phy_attach_lcnphy(pi))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
index 8668fa5558a2..70a9ec050717 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
@@ -941,7 +941,7 @@ void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag);
void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
-bool wlc_phy_attach_nphy(struct brcms_phy *pi);
+void wlc_phy_attach_nphy(struct brcms_phy *pi);
bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index 7717eb85a1db..aae2cf95fe95 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -3299,7 +3299,7 @@ wlc_lcnphy_run_samples(struct brcms_phy *pi,
if (iqcalmode) {
- and_phy_reg(pi, 0x453, (u16) ~(0x1 << 15));
+ and_phy_reg(pi, 0x453, 0xffff & ~(0x1 << 15));
or_phy_reg(pi, 0x453, (0x1 << 15));
} else {
write_phy_reg(pi, 0x63f, 1);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 8580a2754789..d69879e1bd87 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -14546,7 +14546,7 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
wlc_phy_txpwr_apply_nphy(pi);
}
-static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
+static void wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
{
struct ssb_sprom *sprom = &pi->d11core->bus->sprom;
@@ -14595,11 +14595,9 @@ static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
pi->phycal_tempdelta = 0;
wlc_phy_txpwr_srom_read_ppr_nphy(pi);
-
- return true;
}
-bool wlc_phy_attach_nphy(struct brcms_phy *pi)
+void wlc_phy_attach_nphy(struct brcms_phy *pi)
{
uint i;
@@ -14645,10 +14643,7 @@ bool wlc_phy_attach_nphy(struct brcms_phy *pi)
pi->pi_fptr.chanset = wlc_phy_chanspec_set_nphy;
pi->pi_fptr.txpwrrecalc = wlc_phy_txpower_recalc_target_nphy;
- if (!wlc_phy_txpwr_srom_read_nphy(pi))
- return false;
-
- return true;
+ wlc_phy_txpwr_srom_read_nphy(pi);
}
static s32 get_rf_pwr_offset(struct brcms_phy *pi, s16 pga_gn, s16 pad_gn)
@@ -17587,7 +17582,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
or_phy_reg(pi, 0x122, (0x1 << 0));
if (NREV_GE(pi->pubpi.phy_rev, 3))
- and_phy_reg(pi, 0x1e7, (u16) (~(0x1 << 15)));
+ and_phy_reg(pi, 0x1e7, 0x7fff);
else
or_phy_reg(pi, 0x1e7, (0x1 << 15));
@@ -18086,7 +18081,7 @@ wlc_phy_rfctrlintc_override_nphy(struct brcms_phy *pi, u8 field, u16 value,
(0x1 << 10));
and_phy_reg(pi, 0x2ff, (u16)
- ~(0x3 << 14));
+ 0xffff & ~(0x3 << 14));
or_phy_reg(pi, 0x2ff, (0x1 << 13));
or_phy_reg(pi, 0x2ff, (0x1 << 0));
} else {
@@ -21053,7 +21048,7 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
(val | MAC_PHY_FORCE_CLK));
and_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
- (u16) (~(BBCFG_RESETCCA | BBCFG_RESETRX)));
+ 0xffff & ~(BBCFG_RESETCCA | BBCFG_RESETRX));
bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
}
@@ -21287,7 +21282,8 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
bcma_set16(pi->d11core, D11REGOFFS(psm_gpio_oe), mask);
- bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out), ~mask);
+ bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out),
+ 0xffff & ~mask);
if (lut_init) {
write_phy_reg(pi, 0xf8, 0x02d8);
@@ -23197,7 +23193,7 @@ void wlc_phy_stopplayback_nphy(struct brcms_phy *pi)
or_phy_reg(pi, 0xc3, NPHY_sampleCmd_STOP);
else if (playback_status & 0x2)
and_phy_reg(pi, 0xc2,
- (u16) ~NPHY_iqloCalCmdGctl_IQLO_CAL_EN);
+ 0xffff & ~NPHY_iqloCalCmdGctl_IQLO_CAL_EN);
and_phy_reg(pi, 0xc3, (u16) ~(0x1 << 2));
@@ -28202,8 +28198,9 @@ void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type)
if (NREV_GE(pi->pubpi.phy_rev, 3))
and_phy_reg(pi, 0x1e7,
- (u16) (~((0x1 << 15) |
- (0x1 << 14) | (0x1 << 13))));
+ 0xffff & ~((0x1 << 15) |
+ (0x1 << 14) |
+ (0x1 << 13)));
else
and_phy_reg(pi, 0x1e7,
(u16) (~((0x1 << 14) | (0x1 << 13))));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
index a0de5db0cd64..b72381791536 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
@@ -57,12 +57,11 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim)
}
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn)(struct brcms_phy *pi),
+ void (*fn)(void *pi),
void *arg, const char *name)
{
return (struct wlapi_timer *)
- brcms_init_timer(physhim->wl, (void (*)(void *))fn,
- arg, name);
+ brcms_init_timer(physhim->wl, fn, arg, name);
}
void wlapi_free_timer(struct wlapi_timer *t)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
index dd8774717ade..27d0934e600e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
@@ -131,7 +131,7 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim);
/* PHY to WL utility functions */
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn)(struct brcms_phy *pi),
+ void (*fn)(void *pi),
void *arg, const char *name);
void wlapi_free_timer(struct wlapi_timer *t);
void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 9eaf5ec133f9..075b705a8d7b 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3432,6 +3432,10 @@ static const struct attribute_group il3945_attribute_group = {
};
static struct ieee80211_ops il3945_mac_ops __ro_after_init = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = il3945_mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = il3945_mac_start,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 70e420df1643..4beb7be6d51d 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -6301,6 +6301,10 @@ il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
}
static const struct ieee80211_ops il4965_mac_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = il4965_mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = il4965_mac_start,
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 17570d62c896..9d33a66a49b5 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -3438,9 +3438,7 @@ il_init_geos(struct il_priv *il)
if (!channels)
return -ENOMEM;
- rates =
- kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
- GFP_KERNEL);
+ rates = kcalloc(RATE_COUNT_LEGACY, sizeof(*rates), GFP_KERNEL);
if (!rates) {
kfree(channels);
return -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 20971304fdef..4b04865fc2c9 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -46,6 +46,15 @@ config IWLWIFI
if IWLWIFI
+config IWLWIFI_KUNIT_TESTS
+ tristate
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option for iwlwifi kunit tests.
+
+ If unsure, say N.
+
config IWLWIFI_LEDS
bool
depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index b983982aee45..8bb94a4c12cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -18,6 +18,7 @@ iwlwifi-objs += queue/tx.o
iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
+iwlwifi-objs += fw/regulatory.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_EFI) += fw/uefi.o
@@ -33,4 +34,6 @@ obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/
obj-$(CONFIG_IWLMEI) += mei/
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
+
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
index 134635c70ce8..25952d0bea99 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_AX210_UCODE_API_MAX 86
+#define IWL_AX210_UCODE_API_MAX 89
/* Lowest firmware API version supported */
#define IWL_AX210_UCODE_API_MIN 59
@@ -299,3 +299,9 @@ MODULE_FIRMWARE(IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
+
+MODULE_FIRMWARE("iwlwifi-so-a0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-so-a0-gf4-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ty-a0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ma-b0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ma-b0-gf4-a0.pnvm");
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 82da957adcf6..072b0a5827d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 86
+#define IWL_BZ_UCODE_API_MAX 90
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 80
@@ -129,10 +129,6 @@ static const struct iwl_base_params iwl_bz_base_params = {
IWL_DEVICE_BZ_COMMON, \
.ht_params = &iwl_22000_ht_params
-#define IWL_DEVICE_GL_A \
- IWL_DEVICE_BZ_COMMON, \
- .ht_params = &iwl_gl_a_ht_params
-
/*
* This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
* A-MPDU, with additional overhead to account for processing time.
@@ -153,6 +149,7 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
};
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
+const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
const struct iwl_cfg iwl_cfg_bz = {
.fw_name_mac = "bz",
@@ -179,3 +176,5 @@ MODULE_FIRMWARE(IWL_BZ_A_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_FM4_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_GL_C_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
+
+MODULE_FIRMWARE("iwlwifi-gl-c0-fm-c0.pnvm");
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 80eb9b499538..9b79279fd76c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 86
+#define IWL_SC_UCODE_API_MAX 90
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 82
@@ -33,6 +33,10 @@
#define IWL_SC_A_GF_A_FW_PRE "iwlwifi-sc-a0-gf-a0"
#define IWL_SC_A_GF4_A_FW_PRE "iwlwifi-sc-a0-gf4-a0"
#define IWL_SC_A_WH_A_FW_PRE "iwlwifi-sc-a0-wh-a0"
+#define IWL_SC2_A_FM_C_FW_PRE "iwlwifi-sc2-a0-fm-c0"
+#define IWL_SC2_A_WH_A_FW_PRE "iwlwifi-sc2-a0-wh-a0"
+#define IWL_SC2F_A_FM_C_FW_PRE "iwlwifi-sc2f-a0-fm-c0"
+#define IWL_SC2F_A_WH_A_FW_PRE "iwlwifi-sc2f-a0-wh-a0"
#define IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_FM_B_FW_PRE "-" __stringify(api) ".ucode"
@@ -48,6 +52,14 @@
IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2F_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2F_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
static const struct iwl_base_params iwl_sc_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -124,6 +136,9 @@ static const struct iwl_base_params iwl_sc_base_params = {
#define IWL_DEVICE_SC \
IWL_DEVICE_BZ_COMMON, \
+ .uhb_supported = true, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
+ .num_rbds = IWL_NUM_RBDS_SC_EHT, \
.ht_params = &iwl_22000_ht_params
/*
@@ -149,10 +164,21 @@ const char iwl_sc_name[] = "Intel(R) TBD Sc device";
const struct iwl_cfg iwl_cfg_sc = {
.fw_name_mac = "sc",
- .uhb_supported = true,
IWL_DEVICE_SC,
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .num_rbds = IWL_NUM_RBDS_SC_EHT,
+};
+
+const char iwl_sc2_name[] = "Intel(R) TBD Sc2 device";
+
+const struct iwl_cfg iwl_cfg_sc2 = {
+ .fw_name_mac = "sc2",
+ IWL_DEVICE_SC,
+};
+
+const char iwl_sc2f_name[] = "Intel(R) TBD Sc2f device";
+
+const struct iwl_cfg iwl_cfg_sc2f = {
+ .fw_name_mac = "sc2f",
+ IWL_DEVICE_SC,
};
MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
@@ -162,3 +188,7 @@ MODULE_FIRMWARE(IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 5f3d5b15f727..52b008ce53bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -1570,6 +1570,10 @@ static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
}
const struct ieee80211_ops iwlagn_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = iwlagn_mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = iwlagn_mac_start,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index dcc4810cb324..4caf2e25a297 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -4,7 +4,6 @@
* Copyright (C) 2019-2023 Intel Corporation
*/
#include <linux/uuid.h>
-#include <linux/dmi.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
@@ -13,68 +12,21 @@
const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
0xA5, 0xB3, 0x1F, 0x73,
0x8E, 0x28, 0x5A, 0xDE);
-IWL_EXPORT_SYMBOL(iwl_guid);
-const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
- 0x81, 0x4F, 0x75, 0xE4,
- 0xDD, 0x26, 0xB5, 0xFD);
-IWL_EXPORT_SYMBOL(iwl_rfi_guid);
-
-static const struct dmi_system_id dmi_ppag_approved_list[] = {
- { .ident = "HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- },
- },
- { .ident = "SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "MSFT",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- },
- },
- { .ident = "ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
- },
- },
- { .ident = "GOOGLE-ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- },
- },
- { .ident = "RAZER",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
- },
- },
- {}
+static const size_t acpi_dsm_size[DSM_FUNC_NUM_FUNCS] = {
+ [DSM_FUNC_QUERY] = sizeof(u32),
+ [DSM_FUNC_DISABLE_SRD] = sizeof(u8),
+ [DSM_FUNC_ENABLE_INDONESIA_5G2] = sizeof(u8),
+ [DSM_FUNC_ENABLE_6E] = sizeof(u32),
+ [DSM_FUNC_REGULATORY_CONFIG] = sizeof(u32),
+ /* Not supported in driver */
+ [5] = (size_t)0,
+ [DSM_FUNC_11AX_ENABLEMENT] = sizeof(u32),
+ [DSM_FUNC_ENABLE_UNII4_CHAN] = sizeof(u32),
+ [DSM_FUNC_ACTIVATE_CHANNEL] = sizeof(u32),
+ [DSM_FUNC_FORCE_DISABLE_CHANNELS] = sizeof(u32),
+ [DSM_FUNC_ENERGY_DETECTION_THRESHOLD] = sizeof(u32),
+ [DSM_FUNC_RFI_CONFIG] = sizeof(u32),
};
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
@@ -200,46 +152,41 @@ out:
}
/*
- * Evaluate a DSM with no arguments and a u8 return value,
+ * This function receives a DSM function number, calculates its expected size
+ * according to Intel BIOS spec, and fills in the value in a 32-bit field.
+ * In case the expected size is smaller than 32-bit, padding will be added.
*/
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value)
+int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
{
+ size_t expected_size;
+ u64 tmp;
int ret;
- u64 val;
- ret = iwl_acpi_get_dsm_integer(dev, rev, func,
- guid, &val, sizeof(u8));
+ BUILD_BUG_ON(ARRAY_SIZE(acpi_dsm_size) != DSM_FUNC_NUM_FUNCS);
- if (ret < 0)
- return ret;
-
- /* cast val (u64) to be u8 */
- *value = (u8)val;
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
+ if (WARN_ON(func >= ARRAY_SIZE(acpi_dsm_size)))
+ return -EINVAL;
-/*
- * Evaluate a DSM with no arguments and a u32 return value,
- */
-int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value)
-{
- int ret;
- u64 val;
+ expected_size = acpi_dsm_size[func];
- ret = iwl_acpi_get_dsm_integer(dev, rev, func,
- guid, &val, sizeof(u32));
+ /* Currently all ACPI DSMs are either 8-bit or 32-bit */
+ if (expected_size != sizeof(u8) && expected_size != sizeof(u32))
+ return -EOPNOTSUPP;
- if (ret < 0)
+ ret = iwl_acpi_get_dsm_integer(fwrt->dev, ACPI_DSM_REV, func,
+ &iwl_guid, &tmp, expected_size);
+ if (ret)
return ret;
- /* cast val (u64) to be u32 */
- *value = (u32)val;
+ if ((expected_size == sizeof(u8) && tmp != (u8)tmp) ||
+ (expected_size == sizeof(u32) && tmp != (u32)tmp))
+ IWL_DEBUG_RADIO(fwrt,
+ "DSM value overflows the expected size, truncating\n");
+ *value = (u32)tmp;
+
return 0;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32);
static union acpi_object *
iwl_acpi_get_wifi_pkg_range(struct device *dev,
@@ -307,9 +254,8 @@ iwl_acpi_get_wifi_pkg(struct device *dev,
tbl_rev);
}
-
-int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver)
+int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev, i, block_list_size, enabled;
@@ -331,22 +277,9 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
ACPI_TYPE_INTEGER) {
u32 tas_selection =
(u32)wifi_pkg->package.elements[1].integer.value;
- u16 override_iec =
- (tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
- u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
- ACPI_WTAS_ENABLE_IEC_POS;
- u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS;
-
- enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
- if (fw_ver <= 3) {
- cmd->v3.override_tas_iec = cpu_to_le16(override_iec);
- cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec);
- } else {
- cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb;
- cmd->v4.override_tas_iec = (u8)override_iec;
- cmd->v4.enable_tas_iec = (u8)enabled_iec;
- }
+ enabled = iwl_parse_tas_selection(fwrt, tas_data,
+ tas_selection);
} else if (tbl_rev == 0 &&
wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
@@ -365,22 +298,16 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev);
if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
wifi_pkg->package.elements[2].integer.value >
- APCI_WTAS_BLACK_LIST_MAX) {
+ IWL_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
wifi_pkg->package.elements[2].integer.value);
ret = -EINVAL;
goto out_free;
}
block_list_size = wifi_pkg->package.elements[2].integer.value;
- cmd->v4.block_list_size = cpu_to_le32(block_list_size);
+ tas_data->block_list_size = cpu_to_le32(block_list_size);
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
- if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
- IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
- block_list_size);
- ret = -EINVAL;
- goto out_free;
- }
for (i = 0; i < block_list_size; i++) {
u32 country;
@@ -394,7 +321,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
}
country = wifi_pkg->package.elements[3 + i].integer.value;
- cmd->v4.block_list_array[i] = cpu_to_le32(country);
+ tas_data->block_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
@@ -403,19 +330,19 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_tas);
-int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
{
union acpi_object *wifi_pkg, *data;
u32 mcc_val;
int ret, tbl_rev;
- data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDD_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WRDD_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
@@ -439,46 +366,42 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc);
-u64 iwl_acpi_get_pwr_limit(struct device *dev)
+int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt, u64 *dflt_pwr_limit)
{
union acpi_object *data, *wifi_pkg;
- u64 dflt_pwr_limit;
- int tbl_rev;
+ int tbl_rev, ret = -EINVAL;
- data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
- if (IS_ERR(data)) {
- dflt_pwr_limit = 0;
+ *dflt_pwr_limit = 0;
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_SPLC_METHOD);
+ if (IS_ERR(data))
goto out;
- }
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev);
if (IS_ERR(wifi_pkg) || tbl_rev != 0 ||
- wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
- dflt_pwr_limit = 0;
+ wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER)
goto out_free;
- }
- dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+ *dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+ ret = 0;
out_free:
kfree(data);
out:
- return dflt_pwr_limit;
+ return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
-int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev;
- data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD);
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_ECKV_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_ECKV_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
@@ -499,11 +422,11 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
-static int iwl_sar_set_profile(union acpi_object *table,
- struct iwl_sar_profile *profile,
- bool enabled, u8 num_chains, u8 num_sub_bands)
+static int iwl_acpi_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled, u8 num_chains,
+ u8 num_sub_bands)
{
int i, j, idx = 0;
@@ -511,8 +434,8 @@ static int iwl_sar_set_profile(union acpi_object *table,
* The table from ACPI is flat, but we store it in a
* structured array.
*/
- for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) {
- for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) {
+ for (i = 0; i < BIOS_SAR_MAX_CHAINS_PER_PROFILE; i++) {
+ for (j = 0; j < BIOS_SAR_MAX_SUB_BANDS_NUM; j++) {
/* if we don't have the values, use the default */
if (i >= num_chains || j >= num_sub_bands) {
profile->chains[i].subbands[j] = 0;
@@ -535,73 +458,7 @@ static int iwl_sar_set_profile(union acpi_object *table,
return 0;
}
-static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_subbands,
- int prof_a, int prof_b)
-{
- int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b };
- int i, j;
-
- for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) {
- struct iwl_sar_profile *prof;
-
- /* don't allow SAR to be disabled (profile 0 means disable) */
- if (profs[i] == 0)
- return -EPERM;
-
- /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
- if (profs[i] > ACPI_SAR_PROFILE_NUM)
- return -EINVAL;
-
- /* profiles go from 1 to 4, so decrement to access the array */
- prof = &fwrt->sar_profiles[profs[i] - 1];
-
- /* if the profile is disabled, do nothing */
- if (!prof->enabled) {
- IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
- profs[i]);
- /*
- * if one of the profiles is disabled, we
- * ignore all of them and return 1 to
- * differentiate disabled from other failures.
- */
- return 1;
- }
-
- IWL_DEBUG_INFO(fwrt,
- "SAR EWRD: chain %d profile index %d\n",
- i, profs[i]);
- IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
- for (j = 0; j < n_subbands; j++) {
- per_chain[i * n_subbands + j] =
- cpu_to_le16(prof->chains[i].subbands[j]);
- IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
- j, prof->chains[i].subbands[j]);
- }
- }
-
- return 0;
-}
-
-int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b)
-{
- int i, ret = 0;
-
- for (i = 0; i < n_tables; i++) {
- ret = iwl_sar_fill_table(fwrt,
- &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0],
- n_subbands, prof_a, prof_b);
- if (ret)
- break;
- }
-
- return ret;
-}
-IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
-
-int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *table, *data;
int ret, tbl_rev;
@@ -680,16 +537,15 @@ read_table:
/* The profile from WRDS is officially profile 1, but goes
* into sar_profiles[0] (because we don't have a profile 0).
*/
- ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0],
- flags & IWL_SAR_ENABLE_MSK,
- num_chains, num_sub_bands);
+ ret = iwl_acpi_sar_set_profile(table, &fwrt->sar_profiles[0],
+ flags & IWL_SAR_ENABLE_MSK,
+ num_chains, num_sub_bands);
out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table);
-int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
bool enabled;
@@ -767,7 +623,7 @@ read_table:
* from index 1, so the maximum value allowed here is
* ACPI_SAR_PROFILES_NUM - 1.
*/
- if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
+ if (n_profiles >= BIOS_SAR_MAX_PROFILE_NUM) {
ret = -EINVAL;
goto out_free;
}
@@ -776,13 +632,15 @@ read_table:
pos = 3;
for (i = 0; i < n_profiles; i++) {
+ union acpi_object *table = &wifi_pkg->package.elements[pos];
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
* have profile 0). So in the array we start from 1.
*/
- ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos],
- &fwrt->sar_profiles[i + 1], enabled,
- num_chains, num_sub_bands);
+ ret = iwl_acpi_sar_set_profile(table,
+ &fwrt->sar_profiles[i + 1],
+ enabled, num_chains,
+ num_sub_bands);
if (ret < 0)
break;
@@ -794,9 +652,8 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table);
-int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
int i, j, k, ret, tbl_rev;
@@ -811,7 +668,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
.revisions = BIT(3),
.bands = ACPI_GEO_NUM_BANDS_REV2,
.profiles = ACPI_NUM_GEO_PROFILES_REV3,
- .min_profiles = 3,
+ .min_profiles = BIOS_GEO_MIN_PROFILE_NUM,
},
{
.revisions = BIT(2),
@@ -897,7 +754,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
read_table:
fwrt->geo_rev = tbl_rev;
for (i = 0; i < num_profiles; i++) {
- for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) {
+ for (j = 0; j < BIOS_GEO_MAX_NUM_BANDS; j++) {
union acpi_object *entry;
/*
@@ -921,7 +778,7 @@ read_table:
entry->integer.value;
}
- for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) {
+ for (k = 0; k < BIOS_GEO_NUM_CHAINS; k++) {
/* same here as above */
if (j >= num_bands) {
fwrt->geo_profiles[i].bands[j].chains[k] =
@@ -949,151 +806,26 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table);
-
-bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
-{
- /*
- * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
- * earlier firmware versions. Unfortunately, we don't have a
- * TLV API flag to rely on, so rely on the major version which
- * is in the first byte of ucode_ver. This was implemented
- * initially on version 38 and then backported to 17. It was
- * also backported to 29, but only for 7265D devices. The
- * intention was to have it in 36 as well, but not all 8000
- * family got this feature enabled. The 8000 family is the
- * only one using version 36, so skip this version entirely.
- */
- return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
- fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
-}
-IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
-
-int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset *table,
- u32 n_bands, u32 n_profiles)
-{
- int i, j;
-
- if (!fwrt->geo_enabled)
- return -ENODATA;
-
- if (!iwl_sar_geo_support(fwrt))
- return -EOPNOTSUPP;
-
- for (i = 0; i < n_profiles; i++) {
- for (j = 0; j < n_bands; j++) {
- struct iwl_per_chain_offset *chain =
- &table[i * n_bands + j];
-
- chain->max_tx_power =
- cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
- chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0];
- chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1];
- IWL_DEBUG_RADIO(fwrt,
- "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
- i, j,
- fwrt->geo_profiles[i].bands[j].chains[0],
- fwrt->geo_profiles[i].bands[j].chains[1],
- fwrt->geo_profiles[i].bands[j].max);
- }
- }
-
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
-
-__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
-{
- int ret;
- u8 value;
- u32 val;
- __le32 config_bitmap = 0;
-
- /*
- * Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'.
- * Setting config_bitmap Indonesia bit is valid only for HR/JF.
- */
- switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) {
- case IWL_CFG_RF_TYPE_HR1:
- case IWL_CFG_RF_TYPE_HR2:
- case IWL_CFG_RF_TYPE_JF1:
- case IWL_CFG_RF_TYPE_JF2:
- ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
- DSM_FUNC_ENABLE_INDONESIA_5G2,
- &iwl_guid, &value);
-
- if (!ret && value == DSM_VALUE_INDONESIA_ENABLE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
- break;
- default:
- break;
- }
-
- /*
- ** Evaluate func 'DSM_FUNC_DISABLE_SRD'
- */
- ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
- DSM_FUNC_DISABLE_SRD,
- &iwl_guid, &value);
- if (!ret) {
- if (value == DSM_VALUE_SRD_PASSIVE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
- else if (value == DSM_VALUE_SRD_DISABLE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
- }
-
- if (fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) {
- /*
- ** Evaluate func 'DSM_FUNC_REGULATORY_CONFIG'
- */
- ret = iwl_acpi_get_dsm_u32(fwrt->dev, 0,
- DSM_FUNC_REGULATORY_CONFIG,
- &iwl_guid, &val);
- /*
- * China 2022 enable if the BIOS object does not exist or
- * if it is enabled in BIOS.
- */
- if (ret < 0 || val & DSM_MASK_CHINA_22_REG)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK);
- }
-
- return config_bitmap;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap);
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data, *flags;
int i, j, ret, tbl_rev, num_sub_bands = 0;
int idx = 2;
- u8 cmd_ver;
-
- fwrt->ppag_flags = 0;
- fwrt->ppag_table_valid = false;
data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- /* try to read ppag table rev 2 or 1 (both have the same data size) */
+ /* try to read ppag table rev 3, 2 or 1 (all have the same data size) */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev == 1 || tbl_rev == 2) {
+ if (tbl_rev >= 1 && tbl_rev <= 3) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
IWL_DEBUG_RADIO(fwrt,
- "Reading PPAG table v2 (tbl_rev=%d)\n",
+ "Reading PPAG table (tbl_rev=%d)\n",
tbl_rev);
goto read_table;
} else {
@@ -1128,19 +860,8 @@ read_table:
goto out_free;
}
- fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK;
- cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
- WIDE_ID(PHY_OPS_GROUP,
- PER_PLATFORM_ANT_GAIN_CMD),
- IWL_FW_CMD_VER_UNKNOWN);
- if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) {
- ret = -EINVAL;
- goto out_free;
- }
- if (!fwrt->ppag_flags && cmd_ver <= 3) {
- ret = 0;
- goto out_free;
- }
+ fwrt->ppag_flags = iwl_bios_get_ppag_flags(flags->integer.value,
+ fwrt->ppag_ver);
/*
* read, verify gain values and save them into the PPAG table.
@@ -1158,132 +879,15 @@ read_table:
}
fwrt->ppag_chains[i].subbands[j] = ent->integer.value;
- /* from ver 4 the fw deals with out of range values */
- if (cmd_ver >= 4)
- continue;
- if ((j == 0 &&
- (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
- fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
- (j != 0 &&
- (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
- fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
- ret = -EINVAL;
- goto out_free;
- }
}
}
- fwrt->ppag_table_valid = true;
ret = 0;
out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table);
-
-int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
- int *cmd_size)
-{
- u8 cmd_ver;
- int i, j, num_sub_bands;
- s8 *gain;
-
- /* many firmware images for JF lie about this */
- if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
- CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
- return -EOPNOTSUPP;
-
- if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
- IWL_DEBUG_RADIO(fwrt,
- "PPAG capability not supported by FW, command not sent.\n");
- return -EINVAL;
- }
-
- cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
- WIDE_ID(PHY_OPS_GROUP,
- PER_PLATFORM_ANT_GAIN_CMD),
- IWL_FW_CMD_VER_UNKNOWN);
- if (!fwrt->ppag_table_valid || (cmd_ver <= 3 && !fwrt->ppag_flags)) {
- IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
- return -EINVAL;
- }
-
- /* The 'flags' field is the same in v1 and in v2 so we can just
- * use v1 to access it.
- */
- cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
-
- IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
- if (cmd_ver == 1) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V1;
- gain = cmd->v1.gain[0];
- *cmd_size = sizeof(cmd->v1);
- if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) {
- /* in this case FW supports revision 0 */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table rev is %d, send truncated table\n",
- fwrt->ppag_ver);
- }
- } else if (cmd_ver >= 2 && cmd_ver <= 4) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd->v2.gain[0];
- *cmd_size = sizeof(cmd->v2);
- if (fwrt->ppag_ver == 0) {
- /* in this case FW supports revisions 1 or 2 */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table rev is 0, send padded table\n");
- }
- } else {
- IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
- return -EINVAL;
- }
-
- /* ppag mode */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG MODE bits were read from bios: %d\n",
- cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK));
- if ((cmd_ver == 1 && !fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
- (cmd_ver == 2 && fwrt->ppag_ver == 2)) {
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
- IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
- } else {
- IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
- }
-
- IWL_DEBUG_RADIO(fwrt,
- "PPAG MODE bits going to be sent: %d\n",
- cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK));
-
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
- for (j = 0; j < num_sub_bands; j++) {
- gain[i * num_sub_bands + j] =
- fwrt->ppag_chains[i].subbands[j];
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table: chain[%d] band[%d]: gain = %d\n",
- i, j, gain[i * num_sub_bands + j]);
- }
- }
-
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_read_ppag_table);
-
-bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
-{
-
- if (!dmi_check_system(dmi_ppag_approved_list)) {
- IWL_DEBUG_RADIO(fwrt,
- "System vendor '%s' is not in the approved list, disabling PPAG.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
- fwrt->ppag_flags = 0;
- return false;
- }
-
- return true;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved);
void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
struct iwl_phy_specific_cfg *filters)
@@ -1296,7 +900,6 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
if (IS_ERR(data))
return;
- /* try to read wtas table revision 1 or revision 0*/
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WPFC_WIFI_DATA_SIZE,
&tbl_rev);
@@ -1306,13 +909,14 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
if (tbl_rev != 0)
goto out_free;
- BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) != ACPI_WPFC_WIFI_DATA_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) !=
+ ACPI_WPFC_WIFI_DATA_SIZE - 1);
for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) {
- if (wifi_pkg->package.elements[i].type != ACPI_TYPE_INTEGER)
- return;
+ if (wifi_pkg->package.elements[i + 1].type != ACPI_TYPE_INTEGER)
+ goto out_free;
tmp.filter_cfg_chains[i] =
- cpu_to_le32(wifi_pkg->package.elements[i].integer.value);
+ cpu_to_le32(wifi_pkg->package.elements[i + 1].integer.value);
}
IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n");
@@ -1321,3 +925,38 @@ out_free:
kfree(data);
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_phy_filters);
+
+void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data;
+ int tbl_rev;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_GLAI_METHOD);
+ if (IS_ERR(data))
+ return;
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_GLAI_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg))
+ goto out_free;
+
+ if (tbl_rev != 0) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid GLAI revision: %d\n", tbl_rev);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ wifi_pkg->package.elements[1].integer.value > ACPI_GLAI_MAX_STATUS)
+ goto out_free;
+
+ fwrt->uefi_tables_lock_status =
+ wifi_pkg->package.elements[1].integer.value;
+
+ IWL_DEBUG_RADIO(fwrt,
+ "Loaded UEFI WIFI GUID lock status: %d from ACPI\n",
+ fwrt->uefi_tables_lock_status);
+out_free:
+ kfree(data);
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_guid_lock_status);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index e9277f6f3582..1d32b82f73db 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -7,6 +7,7 @@
#define __iwl_fw_acpi__
#include <linux/acpi.h>
+#include "fw/regulatory.h"
#include "fw/api/commands.h"
#include "fw/api/power.h"
#include "fw/api/phy.h"
@@ -25,6 +26,7 @@
#define ACPI_PPAG_METHOD "PPAG"
#define ACPI_WTAS_METHOD "WTAS"
#define ACPI_WPFC_METHOD "WPFC"
+#define ACPI_GLAI_METHOD "GLAI"
#define ACPI_WIFI_DOMAIN (0x07)
@@ -56,187 +58,90 @@
#define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \
ACPI_SAR_NUM_CHAINS_REV2 * \
ACPI_SAR_NUM_SUB_BANDS_REV2 + 3)
-#define ACPI_WPFC_WIFI_DATA_SIZE 4 /* 4 filter config words */
+#define ACPI_WPFC_WIFI_DATA_SIZE 5 /* domain and 4 filter config words */
/* revision 0 and 1 are identical, except for the semantics in the FW */
#define ACPI_GEO_NUM_BANDS_REV0 2
#define ACPI_GEO_NUM_BANDS_REV2 3
-#define ACPI_GEO_NUM_CHAINS 2
#define ACPI_WRDD_WIFI_DATA_SIZE 2
#define ACPI_SPLC_WIFI_DATA_SIZE 2
#define ACPI_ECKV_WIFI_DATA_SIZE 2
-
+/*
+ * One element for domain type,
+ * and one for the status
+ */
+#define ACPI_GLAI_WIFI_DATA_SIZE 2
+#define ACPI_GLAI_MAX_STATUS 2
/*
* TAS size: 1 elelment for type,
* 1 element for enabled field,
* 1 element for block list size,
* 16 elements for block list array
*/
-#define APCI_WTAS_BLACK_LIST_MAX 16
-#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
-#define ACPI_WTAS_ENABLED_MSK 0x1
-#define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2
-#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
-#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
-#define ACPI_WTAS_ENABLE_IEC_POS 0x2
-#define ACPI_WTAS_USA_UHB_MSK BIT(16)
-#define ACPI_WTAS_USA_UHB_POS 16
-
+#define ACPI_WTAS_WIFI_DATA_SIZE (3 + IWL_WTAS_BLACK_LIST_MAX)
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V1) + 2)
#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V2) + 2)
-/* PPAG gain value bounds in 1/8 dBm */
-#define ACPI_PPAG_MIN_LB -16
-#define ACPI_PPAG_MAX_LB 24
-#define ACPI_PPAG_MIN_HB -16
-#define ACPI_PPAG_MAX_HB 40
-#define ACPI_PPAG_MASK 3
-#define IWL_PPAG_ETSI_MASK BIT(0)
-
#define IWL_SAR_ENABLE_MSK BIT(0)
#define IWL_REDUCE_POWER_FLAGS_POS 1
-/*
- * The profile for revision 2 is a superset of revision 1, which is in
- * turn a superset of revision 0. So we can store all revisions
- * inside revision 2, which is what we represent here.
- */
-struct iwl_sar_profile_chain {
- u8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
-};
-
-struct iwl_sar_profile {
- bool enabled;
- struct iwl_sar_profile_chain chains[ACPI_SAR_NUM_CHAINS_REV2];
-};
-
-/* Same thing as with SAR, all revisions fit in revision 2 */
-struct iwl_geo_profile_band {
- u8 max;
- u8 chains[ACPI_GEO_NUM_CHAINS];
-};
-
-struct iwl_geo_profile {
- struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2];
-};
-
-/* Same thing as with SAR, all revisions fit in revision 2 */
-struct iwl_ppag_chain {
- s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
-};
-
-enum iwl_dsm_funcs_rev_0 {
- DSM_FUNC_QUERY = 0,
- DSM_FUNC_DISABLE_SRD = 1,
- DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
- DSM_FUNC_ENABLE_6E = 3,
- DSM_FUNC_REGULATORY_CONFIG = 4,
- DSM_FUNC_11AX_ENABLEMENT = 6,
- DSM_FUNC_ENABLE_UNII4_CHAN = 7,
- DSM_FUNC_ACTIVATE_CHANNEL = 8,
- DSM_FUNC_FORCE_DISABLE_CHANNELS = 9,
- DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10,
-};
-
-enum iwl_dsm_values_srd {
- DSM_VALUE_SRD_ACTIVE,
- DSM_VALUE_SRD_PASSIVE,
- DSM_VALUE_SRD_DISABLE,
- DSM_VALUE_SRD_MAX
-};
-
-enum iwl_dsm_values_indonesia {
- DSM_VALUE_INDONESIA_DISABLE,
- DSM_VALUE_INDONESIA_ENABLE,
- DSM_VALUE_INDONESIA_RESERVED,
- DSM_VALUE_INDONESIA_MAX
-};
-
-/* DSM RFI uses a different GUID, so need separate definitions */
-
-#define DSM_RFI_FUNC_ENABLE 3
-
-enum iwl_dsm_values_rfi {
- DSM_VALUE_RFI_ENABLE,
- DSM_VALUE_RFI_DISABLE,
- DSM_VALUE_RFI_MAX
-};
-
-enum iwl_dsm_masks_reg {
- DSM_MASK_CHINA_22_REG = BIT(2)
-};
+/* The Inidcator whether UEFI WIFI GUID tables are locked is read from ACPI */
+#define UEFI_WIFI_GUID_UNLOCKED 0
+
+#define ACPI_DSM_REV 0
#ifdef CONFIG_ACPI
struct iwl_fw_runtime;
extern const guid_t iwl_guid;
-extern const guid_t iwl_rfi_guid;
-
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value);
-
-int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value);
/**
* iwl_acpi_get_mcc - read MCC from ACPI, if available
*
- * @dev: the struct device
+ * @fwrt: the fw runtime struct
* @mcc: output buffer (3 bytes) that will get the MCC
*
* This function tries to read the current MCC from ACPI if available.
*/
-int iwl_acpi_get_mcc(struct device *dev, char *mcc);
+int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
-u64 iwl_acpi_get_pwr_limit(struct device *dev);
+int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt, u64 *dflt_pwr_limit);
/*
* iwl_acpi_get_eckv - read external clock validation from ACPI, if available
*
- * @dev: the struct device
+ * @fwrt: the fw runtime struct
* @extl_clk: output var (2 bytes) that will get the clk indication.
*
* This function tries to read the external clock indication
* from ACPI if available.
*/
-int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk);
-
-int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b);
+int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk);
-int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt);
-int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt);
-int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt);
-bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
-
-int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset *table,
- u32 n_bands, u32 n_profiles);
-
-int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver);
-
-__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt);
-int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
- int *cmd_size);
-
-bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt);
-
void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
struct iwl_phy_specific_cfg *filters);
+void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt);
+
+int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value);
+
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
@@ -245,92 +150,61 @@ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
return ERR_PTR(-ENOENT);
}
-static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value)
-{
- return -ENOENT;
-}
-
-static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value)
-{
- return -ENOENT;
-}
-
-static inline int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+static inline int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
{
return -ENOENT;
}
-static inline u64 iwl_acpi_get_pwr_limit(struct device *dev)
+static inline int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
{
+ *dflt_pwr_limit = 0;
return 0;
}
-static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+static inline int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
{
return -ENOENT;
}
-static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b)
+static inline int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+static inline int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
-{
- return -ENOENT;
-}
-
-static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+static inline int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
{
return 1;
}
-static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
-{
- return false;
-}
-
-static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver)
+static inline int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data)
{
return -ENOENT;
}
-static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
-{
- return 0;
-}
-
static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt,
- union iwl_ppag_table_cmd *cmd, int *cmd_size)
-{
- return -ENOENT;
-}
+/* macro since the second argument doesn't always exist */
+#define iwl_acpi_get_phy_filters(fwrt, filters) do { } while (0)
-static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+static inline void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt)
{
- return false;
}
-static inline void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
- struct iwl_phy_specific_cfg *filters)
+static inline int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
{
+ return -ENOENT;
}
-
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
index 3e81e9369224..bc27e15488f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
+ * Copyright (C) 2023 Intel Corporation
* Copyright (C) 2013-2014, 2018-2019 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
@@ -170,7 +171,11 @@ enum iwl_bt_ci_compliance {
* @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
* @ttc_status: is TTC enabled - one bit per PHY
* @rrc_status: is RRC enabled - one bit per PHY
- * @reserved: reserved
+ * The following fields are only for version 5, and are reserved in version 4:
+ * @wifi_loss_low_rssi: The predicted lost WiFi rate (% of air time that BT is
+ * utilizing) when the RSSI is low (<= -65 dBm)
+ * @wifi_loss_mid_high_rssi: The predicted lost WiFi rate (% of air time that
+ * BT is utilizing) when the RSSI is mid/high (>= -65 dBm)
*/
struct iwl_bt_coex_profile_notif {
__le32 mbox_msg[4];
@@ -182,7 +187,10 @@ struct iwl_bt_coex_profile_notif {
__le32 bt_activity_grading;
u8 ttc_status;
u8 rrc_status;
- __le16 reserved;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
+ u8 wifi_loss_low_rssi;
+ u8 wifi_loss_mid_high_rssi;
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4
+ * BT_COEX_PROFILE_NTFY_API_S_VER_5
+ */
#endif /* __iwl_fw_api_coex_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index ea99d41040d2..d2a74beed3a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -324,7 +324,7 @@ struct iwl_wowlan_patterns_cmd {
u8 n_patterns;
/**
- * @n_patterns: sta_id
+ * @sta_id: sta_id
*/
u8 sta_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 751b596ea1a5..0f7903c5a4df 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -101,7 +101,7 @@ enum iwl_data_path_subcmd_ids {
RX_NO_DATA_NOTIF = 0xF5,
/**
- * @THERMAL_DUAL_CHAIN_DISABLE_REQ: firmware request for SMPS mode,
+ * @THERMAL_DUAL_CHAIN_REQUEST: firmware request for SMPS mode,
* &struct iwl_thermal_dual_chain_request
*/
THERMAL_DUAL_CHAIN_REQUEST = 0xF6,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 394747deb269..47c914de2992 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@@ -319,7 +319,7 @@ struct iwl_fw_ini_conf_set_tlv {
* @IWL_FW_INI_CONFIG_SET_TYPE_CSR: for CSR configuration
* @IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: for DBGC_DRAM_ADDR configuration
* @IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: for PERIPH SCRATCH HWM configuration
- * @IWL_FW_INI_ALLOCATION_NUM: max number of configuration supported
+ * @IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM: max number of configuration supported
*/
enum iwl_fw_ini_config_set_type {
@@ -360,6 +360,7 @@ enum iwl_fw_ini_allocation_id {
* @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location
* @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location
* @IWL_FW_INI_LOCATION_NPK_PATH: NPK location
+ * @IWL_FW_INI_LOCATION_NUM: number of valid locations
*/
enum iwl_fw_ini_buffer_location {
IWL_FW_INI_LOCATION_INVALID,
@@ -439,6 +440,7 @@ enum iwl_fw_ini_region_device_memory_subtype {
* Hard coded time points in which the driver can send hcmd or perform dump
* collection
*
+ * @IWL_FW_INI_TIME_POINT_INVALID: invalid timepoint
* @IWL_FW_INI_TIME_POINT_EARLY: pre loading the FW
* @IWL_FW_INI_TIME_POINT_AFTER_ALIVE: first cmd from host after alive notif
* @IWL_FW_INI_TIME_POINT_POST_INIT: last cmd in series of init sequence
@@ -553,7 +555,7 @@ enum iwl_fw_ini_dump_policy {
* enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW.
*
* @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions
- * @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions
+ * @IWL_FW_INI_DUMP_MEDIUM: dump more regions than "brief", but not all regions
* @IWL_FW_INI_DUMP_VERBOSE : dump all regions
*/
enum iwl_fw_ini_dump_type {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index b740c65a7dca..b31ae6889bd0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -394,7 +394,7 @@ struct iwl_buf_alloc_cmd {
*
* @first_word: magic word value
* @second_word: magic word value
- * @framfrags: DRAM fragmentaion detail
+ * @dram_frags: DRAM fragmentaion detail
*/
struct iwl_dram_info {
__le32 first_word;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index b044990c7b87..25530a29317e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -630,6 +630,7 @@ enum iwl_location_frame_format {
* @IWL_LOCATION_BW_20MHZ: 20MHz
* @IWL_LOCATION_BW_40MHZ: 40MHz
* @IWL_LOCATION_BW_80MHZ: 80MHz
+ * @IWL_LOCATION_BW_160MHZ: 160MHz
*/
enum iwl_location_bw {
IWL_LOCATION_BW_20MHZ,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index f15e6d64c298..c6d1f5644638 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -242,9 +242,9 @@ struct iwl_mac_low_latency_cmd {
* @esr_transition_timeout: the timeout required by the AP for the
* eSR transition.
* Available only from version 2 of the command.
- * This values comes from the EMLSR transition delay in the EML
+ * This value comes from the EMLSR transition delay in the EML
* Capabilities subfield.
- * @medium_sync_delay: the value as it appeasr in P802.11be_D2.2 Figure 9-1002j.
+ * @medium_sync_delay: the value as it appears in P802.11be_D2.2 Figure 9-1002j.
* @assoc_id: unique ID assigned by the AP during association
* @reserved1: alignment
* @data_policy: see &enum iwl_mac_data_policy
@@ -317,7 +317,6 @@ enum iwl_mac_config_filter_flags {
* If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0
* len delim to determine if AGG or single.
* @client: client mac data
- * @go_ibss: mac data for go or ibss
* @p2p_dev: mac data for p2p device
*/
struct iwl_mac_config_cmd {
@@ -374,7 +373,7 @@ struct iwl_mac_config_cmd {
* iwl_link_ctx_cfg_cmd::bss_color_disable
* @LINK_CONTEXT_MODIFY_EHT_PARAMS: covers iwl_link_ctx_cfg_cmd::puncture_mask.
* This flag can be set only if the MAC that this link relates to has
- * eht_support set to true.
+ * eht_support set to true. No longer used since _VER_3 of this command.
* @LINK_CONTEXT_MODIFY_ALL: set all above flags
*/
enum iwl_link_ctx_modify_flags {
@@ -447,6 +446,7 @@ enum iwl_link_ctx_flags {
* @listen_lmac: indicates whether the link should be allocated on the Listen
* Lmac or on the Main Lmac. Cannot be changed on an active Link.
* Relevant only for eSR.
+ * @reserved1: in version 2, listen_lmac became reserved
* @cck_rates: basic rates available for CCK
* @ofdm_rates: basic rates available for OFDM
* @cck_short_preamble: 1 for enabling short preamble, 0 otherwise
@@ -462,7 +462,7 @@ enum iwl_link_ctx_flags {
* @bi: beacon interval in TU, applicable only when associated
* @dtim_interval: DTIM interval in TU.
* Relevant only for GO, otherwise this is offloaded.
- * @puncture_mask: puncture mask for EHT
+ * @puncture_mask: puncture mask for EHT (removed in VER_3)
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @flags: a combination from &enum iwl_link_ctx_flags
* @flags_mask: what of %flags have changed. Also &enum iwl_link_ctx_flags
@@ -472,10 +472,10 @@ enum iwl_link_ctx_flags {
* @bssid_index: index of the associated VAP
* @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
* @spec_link_id: link_id as the AP knows it
- * @reserved: alignment
+ * @reserved2: alignment
* @ibss_bssid_addr: bssid for ibss
* @reserved_for_ibss_bssid_addr: reserved
- * @reserved1: reserved for future use
+ * @reserved3: reserved for future use
*/
struct iwl_link_config_cmd {
__le32 action;
@@ -486,7 +486,10 @@ struct iwl_link_config_cmd {
__le16 reserved_for_local_link_addr;
__le32 modify_mask;
__le32 active;
- __le32 listen_lmac;
+ union {
+ __le32 listen_lmac;
+ __le32 reserved1;
+ };
__le32 cck_rates;
__le32 ofdm_rates;
__le32 cck_short_preamble;
@@ -502,7 +505,7 @@ struct iwl_link_config_cmd {
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
__le32 bi;
__le32 dtim_interval;
- __le16 puncture_mask;
+ __le16 puncture_mask; /* removed in _VER_3 */
__le16 frame_time_rts_th;
__le32 flags;
__le32 flags_mask;
@@ -512,11 +515,11 @@ struct iwl_link_config_cmd {
u8 bssid_index;
u8 bss_color;
u8 spec_link_id;
- u8 reserved;
+ u8 reserved2;
u8 ibss_bssid_addr[6];
__le16 reserved_for_ibss_bssid_addr;
- __le32 reserved1[8];
-} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1 */
+ __le32 reserved3[8];
+} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3 */
/* Currently FW supports link ids in the range 0-3 and can have
* at most two active links for each vif.
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 55882190251c..545826973a80 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_mac_h__
@@ -431,8 +431,8 @@ enum iwl_he_pkt_ext_constellations {
};
#define MAX_HE_SUPP_NSS 2
-#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4
-#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5
+#define MAX_CHANNEL_BW_INDX_API_D_VER_1 4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_2 5
/**
* struct iwl_he_pkt_ext_v1 - QAM thresholds
@@ -455,7 +455,7 @@ enum iwl_he_pkt_ext_constellations {
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext_v1 {
- u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_1][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */
/**
@@ -480,7 +480,7 @@ struct iwl_he_pkt_ext_v1 {
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext_v2 {
- u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2];
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 7ec959244ffc..58034dfa7e70 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_nvm_reg_h__
#define __iwl_fw_api_nvm_reg_h__
+#include "fw/regulatory.h"
/**
* enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands
*/
@@ -438,36 +439,30 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
-#define IWL_TAS_BLOCK_LIST_MAX 16
/**
- * struct iwl_tas_config_cmd_v2 - configures the TAS
+ * struct iwl_tas_config_cmd_common - configures the TAS.
+ * This is also the v2 structure.
* @block_list_size: size of relevant field in block_list_array
* @block_list_array: list of countries where TAS must be disabled
*/
-struct iwl_tas_config_cmd_v2 {
+struct iwl_tas_config_cmd_common {
__le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
+ __le32 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
/**
* struct iwl_tas_config_cmd_v3 - configures the TAS
- * @block_list_size: size of relevant field in block_list_array
- * @block_list_array: list of countries where TAS must be disabled
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
*/
struct iwl_tas_config_cmd_v3 {
- __le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
__le16 override_tas_iec;
__le16 enable_tas_iec;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
/**
- * struct iwl_tas_config_cmd_v3 - configures the TAS
- * @block_list_size: size of relevant field in block_list_array
- * @block_list_array: list of countries where TAS must be disabled
+ * struct iwl_tas_config_cmd_v4 - configures the TAS
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
@@ -475,19 +470,20 @@ struct iwl_tas_config_cmd_v3 {
* @reserved: reserved
*/
struct iwl_tas_config_cmd_v4 {
- __le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
u8 override_tas_iec;
u8 enable_tas_iec;
u8 usa_tas_uhb_allowed;
u8 reserved;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
-union iwl_tas_config_cmd {
- struct iwl_tas_config_cmd_v2 v2;
- struct iwl_tas_config_cmd_v3 v3;
- struct iwl_tas_config_cmd_v4 v4;
+struct iwl_tas_config_cmd {
+ struct iwl_tas_config_cmd_common common;
+ union {
+ struct iwl_tas_config_cmd_v3 v3;
+ struct iwl_tas_config_cmd_v4 v4;
+ };
};
+
/**
* enum iwl_lari_config_masks - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index 306ed88de463..08a2c416ce60 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -142,6 +142,8 @@ struct iwl_phy_context_cmd_v1 {
* @lmac_id: the lmac id the phy context belongs to
* @ci: channel info
* @rxchain_info: ???
+ * @sbb_bandwidth: 0 disabled, 1 - 40Mhz ... 4 - 320MHz
+ * @sbb_ctrl_channel_loc: location of the control channel
* @dsp_cfg_flags: set to 0
* @reserved: reserved to align to 64 bit
*/
@@ -152,9 +154,20 @@ struct iwl_phy_context_cmd {
/* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */
struct iwl_fw_channel_info ci;
__le32 lmac_id;
- __le32 rxchain_info; /* reserved in _VER_4 */
+ union {
+ __le32 rxchain_info; /* reserved in _VER_4 */
+ struct { /* used for _VER_5/_VER_6 */
+ u8 sbb_bandwidth;
+ u8 sbb_ctrl_channel_loc;
+ __le16 puncture_mask; /* added in VER_6 */
+ };
+ };
__le32 dsp_cfg_flags;
__le32 reserved;
-} __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */
+} __packed; /* PHY_CONTEXT_CMD_API_VER_3,
+ * PHY_CONTEXT_CMD_API_VER_4,
+ * PHY_CONTEXT_CMD_API_VER_5,
+ * PHY_CONTEXT_CMD_API_VER_6
+ */
#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 040d83fa5424..0bf38243f88a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -506,13 +506,40 @@ struct iwl_geo_tx_power_profiles_resp {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_RSP */
/**
+ * enum iwl_ppag_flags - PPAG enable masks
+ * @IWL_PPAG_ETSI_MASK: enable PPAG in ETSI
+ * @IWL_PPAG_CHINA_MASK: enable PPAG in China
+ * @IWL_PPAG_ETSI_LPI_UHB_MASK: enable LPI in ETSI for UHB
+ * @IWL_PPAG_ETSI_VLP_UHB_MASK: enable VLP in ETSI for UHB
+ * @IWL_PPAG_ETSI_SP_UHB_MASK: enable SP in ETSI for UHB
+ * @IWL_PPAG_USA_LPI_UHB_MASK: enable LPI in USA for UHB
+ * @IWL_PPAG_USA_VLP_UHB_MASK: enable VLP in USA for UHB
+ * @IWL_PPAG_USA_SP_UHB_MASK: enable SP in USA for UHB
+ * @IWL_PPAG_CANADA_LPI_UHB_MASK: enable LPI in CANADA for UHB
+ * @IWL_PPAG_CANADA_VLP_UHB_MASK: enable VLP in CANADA for UHB
+ * @IWL_PPAG_CANADA_SP_UHB_MASK: enable SP in CANADA for UHB
+ */
+enum iwl_ppag_flags {
+ IWL_PPAG_ETSI_MASK = BIT(0),
+ IWL_PPAG_CHINA_MASK = BIT(1),
+ IWL_PPAG_ETSI_LPI_UHB_MASK = BIT(2),
+ IWL_PPAG_ETSI_VLP_UHB_MASK = BIT(3),
+ IWL_PPAG_ETSI_SP_UHB_MASK = BIT(4),
+ IWL_PPAG_USA_LPI_UHB_MASK = BIT(5),
+ IWL_PPAG_USA_VLP_UHB_MASK = BIT(6),
+ IWL_PPAG_USA_SP_UHB_MASK = BIT(7),
+ IWL_PPAG_CANADA_LPI_UHB_MASK = BIT(8),
+ IWL_PPAG_CANADA_VLP_UHB_MASK = BIT(9),
+ IWL_PPAG_CANADA_SP_UHB_MASK = BIT(10),
+};
+
+/**
* union iwl_ppag_table_cmd - union for all versions of PPAG command
* @v1: version 1
* @v2: version 2
- *
- * @flags: bit 0 - indicates enablement of PPAG for ETSI
- * bit 1 - indicates enablement of PPAG for CHINA BIOS
- * bit 1 can be used only in v3 (identical to v2)
+ * version 3, 4 and 5 are the same structure as v2,
+ * but has a different format of the flags bitmap
+ * @flags: values from &enum iwl_ppag_flags
* @gain: table of antenna gain values per chain and sub-band
* @reserved: reserved
*/
@@ -529,6 +556,11 @@ union iwl_ppag_table_cmd {
} v2;
} __packed;
+#define IWL_PPAG_CMD_V4_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
+#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V4_MASK | \
+ IWL_PPAG_ETSI_LPI_UHB_MASK | \
+ IWL_PPAG_USA_LPI_UHB_MASK)
+
#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index d62fed543276..d7f8a276b683 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021, 2023 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -109,6 +109,7 @@ enum iwl_sta_flags {
* @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
* @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
* station info array (1 - n 1X mode)
+ * @STA_KEY_FLG_AMSDU_SPP: SPP (signaling and payload protected) A-MSDU
* @STA_KEY_FLG_KEYID_MSK: the index of the key
* @STA_KEY_FLG_KEYID_POS: key index bit position
* @STA_KEY_NOT_VALID: key is invalid
@@ -129,6 +130,7 @@ enum iwl_sta_key_flag {
STA_KEY_FLG_EN_MSK = (7 << 0),
STA_KEY_FLG_WEP_KEY_MAP = BIT(3),
+ STA_KEY_FLG_AMSDU_SPP = BIT(7),
STA_KEY_FLG_KEYID_POS = 8,
STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
STA_KEY_NOT_VALID = BIT(11),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 842360b1e995..d9e4c75403b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -76,6 +76,8 @@ enum iwl_tx_flags {
* to a secured STA
* @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
* selection, retry limits and BT kill
+ * @IWL_TX_FLAGS_RTS: firmware used an RTS
+ * @IWL_TX_FLAGS_CTS: firmware used CTS-to-self
*/
enum iwl_tx_cmd_flags {
IWL_TX_FLAGS_CMD_RATE = BIT(0),
@@ -884,6 +886,7 @@ struct iwl_tx_path_flush_cmd {
/**
* struct iwl_flush_queue_info - virtual flush queue info
+ * @tid: the tid to flush
* @queue_num: virtual queue id
* @read_before_flush: read pointer before flush
* @read_after_flush: read pointer after flush
@@ -897,6 +900,7 @@ struct iwl_flush_queue_info {
/**
* struct iwl_tx_path_flush_cmd_rsp -- queue/FIFO flush command response
+ * @sta_id: the station for which the queue was flushed
* @num_flushed_queues: number of queues in queues array
* @queues: all flushed queues
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 9c69d3674384..e6c0f928a6bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2019-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2019-2021, 2023-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -66,6 +66,16 @@ enum iwl_gen2_tx_fifo {
IWL_GEN2_TRIG_TX_FIFO_VO,
};
+enum iwl_bz_tx_fifo {
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+};
/**
* enum iwl_tx_queue_cfg_actions - TXQ config options
* @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 80fda056e46a..db6d7013df66 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1727,10 +1727,12 @@ iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
/**
* mask_apply_and_normalize - applies mask on val and normalize the result
*
- * The normalization is based on the first set bit in the mask
- *
* @val: value
* @mask: mask to apply and to normalize with
+ *
+ * The normalization is based on the first set bit in the mask
+ *
+ * Returns: the extracted value
*/
static u32 mask_apply_and_normalize(u32 val, u32 mask)
{
@@ -2199,15 +2201,16 @@ struct iwl_dump_ini_mem_ops {
};
/**
- * iwl_dump_ini_mem
- *
- * Creates a dump tlv and copy a memory region into it.
- * Returns the size of the current dump tlv or 0 if failed
+ * iwl_dump_ini_mem - dump memory region
*
* @fwrt: fw runtime struct
* @list: list to add the dump tlv to
* @reg_data: memory region
* @ops: memory dump operations
+ *
+ * Creates a dump tlv and copy a memory region into it.
+ *
+ * Returns: the size of the current dump tlv or 0 if failed
*/
static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
struct iwl_dump_ini_region_data *reg_data,
@@ -2426,9 +2429,12 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_debug_info_tlv *debug_info =
(void *)node->tlv.data;
+ BUILD_BUG_ON(sizeof(cfg_name->cfg_name) !=
+ sizeof(debug_info->debug_cfg_name));
+
cfg_name->image_type = debug_info->image_type;
cfg_name->cfg_name_len =
- cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME);
+ cpu_to_le32(sizeof(cfg_name->cfg_name));
memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name,
sizeof(cfg_name->cfg_name));
cfg_name++;
@@ -2872,7 +2878,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
le32_to_cpu(desc->trig_desc.type));
- schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay));
+ queue_delayed_work(system_unbound_wq, &wk_data->wk,
+ usecs_to_jiffies(delay));
return 0;
}
@@ -3174,7 +3181,9 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
if (sync)
iwl_fw_dbg_collect_sync(fwrt, idx);
else
- schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
+ queue_delayed_work(system_unbound_wq,
+ &fwrt->dump.wks[idx].wk,
+ usecs_to_jiffies(delay));
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index eb38c686b5cb..98d56e778d99 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -306,8 +306,6 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync)
_iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync);
}
-void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt);
-
static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
struct iwl_lmac_alive *lmac,
struct iwl_umac_alive *umac)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 06d6f7f66430..5c76e3b94968 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2014, 2018-2024 Intel Corporation
* Copyright (C) 2014-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -16,7 +16,7 @@
/**
* enum iwl_fw_error_dump_type - types of data in the dump file
* @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0
- * @IWL_FW_ERROR_DUMP_RXF:
+ * @IWL_FW_ERROR_DUMP_RXF: RX FIFO contents
* @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
* &struct iwl_fw_error_dump_txcmd packets
* @IWL_FW_ERROR_DUMP_DEV_FW_INFO: struct %iwl_fw_error_dump_info
@@ -24,21 +24,24 @@
* @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor
* @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
* sections like this in a single file.
+ * @IWL_FW_ERROR_DUMP_TXF: TX FIFO contents
* @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
* @IWL_FW_ERROR_DUMP_MEM: chunk of memory
* @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
* Structured as &struct iwl_fw_error_dump_trigger_desc.
* @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
* &struct iwl_fw_error_dump_rb
- * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were
+ * @IWL_FW_ERROR_DUMP_PAGING: UMAC's image memory segments which were
* paged to the DRAM.
* @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers.
+ * @IWL_FW_ERROR_DUMP_INTERNAL_TXF: internal TX FIFO data
* @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and
* for that reason is not in use in any other place in the Linux Wi-Fi
* stack.
* @IWL_FW_ERROR_DUMP_MEM_CFG: the addresses and sizes of fifos in the smem,
* which we get from the fw after ALIVE. The content is structured as
* &struct iwl_fw_error_dump_smem_cfg.
+ * @IWL_FW_ERROR_DUMP_D3_DEBUG_DATA: D3 debug data
*/
enum iwl_fw_error_dump_type {
/* 0 is deprecated */
@@ -59,8 +62,6 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */
IWL_FW_ERROR_DUMP_MEM_CFG = 16,
IWL_FW_ERROR_DUMP_D3_DEBUG_DATA = 17,
-
- IWL_FW_ERROR_DUMP_MAX,
};
/**
@@ -442,7 +443,7 @@ struct iwl_fw_ini_err_table_dump {
* struct iwl_fw_error_dump_rb - content of an Receive Buffer
* @index: the index of the Receive Buffer in the Rx queue
* @rxq: the RB's Rx queue
- * @reserved:
+ * @reserved: reserved
* @data: the content of the Receive Buffer
*/
struct iwl_fw_error_dump_rb {
@@ -488,7 +489,7 @@ struct iwl_fw_ini_special_device_memory {
* struct iwl_fw_error_dump_paging - content of the UMAC's image page
* block on DRAM
* @index: the index of the page block
- * @reserved:
+ * @reserved: reserved
* @data: the content of the page block
*/
struct iwl_fw_error_dump_paging {
@@ -511,6 +512,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
/**
* enum iwl_fw_dbg_trigger - triggers available
*
+ * @FW_DBG_TRIGGER_INVALID: invalid trigger value
* @FW_DBG_TRIGGER_USER: trigger log collection by user
* This should not be defined as a trigger to the driver, but a value the
* driver should set to indicate that the trigger was initiated by the
@@ -530,14 +532,15 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
* events.
* @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
- * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a
- * threshold.
- * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
+ * @FW_DBG_TRIGGER_TX_LATENCY: trigger log collection when the tx latency
+ * goes above a threshold.
+ * @FW_DBG_TRIGGER_TDLS: trigger log collection upon TDLS related events.
* @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
* the firmware sends a tx reply.
* @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts
* @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure
* in the driver.
+ * @FW_DBG_TRIGGER_MAX: beyond triggers, number for sizing arrays etc.
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index bfc39bd5bbc6..f69d29e531c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2008-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2008-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -216,6 +216,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* ADD_MODIFY_STA_KEY_API_S_VER_2.
* @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
* @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
+ * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL: support for adaptive dwell in scanning
* @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
* @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
* indicating low latency direction.
@@ -239,14 +240,21 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S.
* @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of
* STA_CONTEXT_DOT11AX_API_S
+ * @IWL_UCODE_TLV_API_FTM_RTT_ACCURACY: version 7 of the range response API
+ * is supported by FW, this indicates the RTT confidence value
* @IWL_UCODE_TLV_API_SAR_TABLE_VER: This ucode supports different sar
* version tables.
* @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
- * SCAN_CONFIG_DB_CMD_API_S.
+ * SCAN_CONFIG_DB_CMD_API_S.
+ * @IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP: support for setting adaptive dwell
+ * number of APs in the 5 GHz band
+ * @IWL_UCODE_TLV_API_BAND_IN_RX_DATA: FW reports band number in RX notification
* @IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX: Firmware offloaded the station disable tx
* logic.
* @IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR: Firmware supports clearing the debug
* internal buffer
+ * @IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD: Firmware doesn't need the host to
+ * configure the smart fifo
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -287,6 +295,7 @@ enum iwl_ucode_tlv_api {
/* API Set 2 */
IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX = (__force iwl_ucode_tlv_api_t)66,
IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR = (__force iwl_ucode_tlv_api_t)67,
+ IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD = (__force iwl_ucode_tlv_api_t)68,
NUM_IWL_UCODE_TLV_API
/*
@@ -383,6 +392,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* channels even when these are not enabled.
* @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection
* complete to FW.
+ * @IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT: Support SPP (signaling and payload
+ * protected) A-MSDU.
+ * @IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT: Support secure LTF measurement.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -468,6 +480,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98,
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
+ IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT = (__force iwl_ucode_tlv_capa_t)103,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104,
IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105,
IWL_UCODE_TLV_CAPA_SYNCED_TIME = (__force iwl_ucode_tlv_capa_t)106,
@@ -480,7 +493,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT = (__force iwl_ucode_tlv_capa_t)114,
IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = (__force iwl_ucode_tlv_capa_t)116,
IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = (__force iwl_ucode_tlv_capa_t)117,
-
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT = (__force iwl_ucode_tlv_capa_t)121,
NUM_IWL_UCODE_TLV_CAPA
/*
* This construction make both sparse (which cannot increment the previous
@@ -566,6 +579,7 @@ enum iwl_fw_dbg_reg_operator {
* struct iwl_fw_dbg_reg_op - an operation on a register
*
* @op: &enum iwl_fw_dbg_reg_operator
+ * @reserved: reserved
* @addr: offset of the register
* @val: value
*/
@@ -612,6 +626,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
* @version: version of the TLV - currently 0
* @monitor_mode: &enum iwl_fw_dbg_monitor_mode
* @size_power: buffer size will be 2^(size_power + 11)
+ * @reserved: reserved
* @base_reg: addr of the base addr register (PRPH)
* @end_reg: addr of the end addr register (PRPH)
* @write_ptr_reg: the addr of the reg of the write pointer
@@ -722,6 +737,8 @@ enum iwl_fw_dbg_trigger_vif_type {
* @trig_dis_ms: the time, in milliseconds, after an occurrence of this
* trigger in which another occurrence should be ignored.
* @flags: &enum iwl_fw_dbg_trigger_flags
+ * @reserved: reserved (for alignment)
+ * @data: trigger data
*/
struct iwl_fw_dbg_trigger_tlv {
__le32 id;
@@ -762,7 +779,7 @@ struct iwl_fw_dbg_trigger_missed_bcon {
/**
* struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW.
- * cmds: the list of commands to trigger the collection on
+ * @cmds: the list of commands to trigger the collection on
*/
struct iwl_fw_dbg_trigger_cmd {
struct cmd {
@@ -772,7 +789,7 @@ struct iwl_fw_dbg_trigger_cmd {
} __packed;
/**
- * iwl_fw_dbg_trigger_stats - configures trigger for statistics
+ * struct iwl_fw_dbg_trigger_stats - configures trigger for statistics
* @stop_offset: the offset of the value to be monitored
* @stop_threshold: the threshold above which to collect
* @start_offset: the offset of the value to be monitored
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 650e4bde9c17..1195e708caa9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2024 Intel Corporation
*/
#include "iwl-drv.h"
@@ -12,6 +12,8 @@
#include "fw/api/alive.h"
#include "fw/uefi.h"
+#define IWL_PNVM_REDUCED_CAP_BIT BIT(25)
+
struct iwl_pnvm_section {
__le32 offset;
const u8 data[];
@@ -173,6 +175,7 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
while (len >= sizeof(*tlv)) {
u32 tlv_len, tlv_type;
+ u32 rf_type;
len -= sizeof(*tlv);
tlv = (const void *)data;
@@ -201,6 +204,16 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
len -= ALIGN(tlv_len, 4);
+ trans->reduced_cap_sku = false;
+ rf_type = CSR_HW_RFID_TYPE(trans->hw_rf_id);
+ if ((trans->sku_id[0] & IWL_PNVM_REDUCED_CAP_BIT) &&
+ rf_type == IWL_CFG_RF_TYPE_FM)
+ trans->reduced_cap_sku = true;
+
+ IWL_DEBUG_FW(trans,
+ "Reduced SKU device %d\n",
+ trans->reduced_cap_sku);
+
if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
@@ -239,7 +252,7 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
}
new_len = pnvm->size;
- *data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
+ *data = kvmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
release_firmware(pnvm);
if (!*data)
@@ -255,21 +268,27 @@ static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len)
struct pnvm_sku_package *package;
u8 *image = NULL;
- /* First attempt to get the PNVM from BIOS */
- package = iwl_uefi_get_pnvm(trans_p, len);
- if (!IS_ERR_OR_NULL(package)) {
- if (*len >= sizeof(*package)) {
- /* we need only the data */
- *len -= sizeof(*package);
- image = kmemdup(package->data, *len, GFP_KERNEL);
+ /* Get PNVM from BIOS for non-Intel SKU */
+ if (trans_p->sku_id[2]) {
+ package = iwl_uefi_get_pnvm(trans_p, len);
+ if (!IS_ERR_OR_NULL(package)) {
+ if (*len >= sizeof(*package)) {
+ /* we need only the data */
+ *len -= sizeof(*package);
+ image = kvmemdup(package->data,
+ *len, GFP_KERNEL);
+ }
+ /*
+ * free package regardless of whether kmemdup
+ * succeeded
+ */
+ kfree(package);
+ if (image)
+ return image;
}
- /* free package regardless of whether kmemdup succeeded */
- kfree(package);
- if (image)
- return image;
}
- /* If it's not available, try from the filesystem */
+ /* If it's not available, or for Intel SKU, try from the filesystem */
if (iwl_pnvm_get_from_fs(trans_p, &image, len))
return NULL;
return image;
@@ -314,7 +333,7 @@ static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
set:
iwl_trans_set_pnvm(trans, capa);
free:
- kfree(data);
+ kvfree(data);
kfree(pnvm_data);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
new file mode 100644
index 000000000000..36d506463e0e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <linux/dmi.h>
+#include "iwl-drv.h"
+#include "iwl-debug.h"
+#include "regulatory.h"
+#include "fw/runtime.h"
+#include "fw/uefi.h"
+
+#define GET_BIOS_TABLE(__name, ...) \
+do { \
+ int ret = -ENOENT; \
+ if (fwrt->uefi_tables_lock_status > UEFI_WIFI_GUID_UNLOCKED) \
+ ret = iwl_uefi_get_ ## __name(__VA_ARGS__); \
+ if (ret < 0) \
+ ret = iwl_acpi_get_ ## __name(__VA_ARGS__); \
+ return ret; \
+} while (0)
+
+#define IWL_BIOS_TABLE_LOADER(__name) \
+int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt) \
+{GET_BIOS_TABLE(__name, fwrt); } \
+IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
+
+#define IWL_BIOS_TABLE_LOADER_DATA(__name, data_type) \
+int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt, \
+ data_type * data) \
+{GET_BIOS_TABLE(__name, fwrt, data); } \
+IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
+
+IWL_BIOS_TABLE_LOADER(wrds_table);
+IWL_BIOS_TABLE_LOADER(ewrd_table);
+IWL_BIOS_TABLE_LOADER(wgds_table);
+IWL_BIOS_TABLE_LOADER(ppag_table);
+IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data);
+IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64);
+IWL_BIOS_TABLE_LOADER_DATA(mcc, char);
+IWL_BIOS_TABLE_LOADER_DATA(eckv, u32);
+
+
+static const struct dmi_system_id dmi_ppag_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ },
+ },
+ { .ident = "GOOGLE-ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ },
+ },
+ { .ident = "RAZER",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
+ },
+ },
+ { .ident = "Honor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
+ },
+ },
+ {}
+};
+
+static const struct dmi_system_id dmi_tas_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "LENOVO",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "Acer",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ },
+ },
+ { .ident = "MSI",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
+ },
+ },
+ { .ident = "Honor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
+ },
+ },
+ /* keep last */
+ {}
+};
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
+{
+ /*
+ * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
+ * earlier firmware versions. Unfortunately, we don't have a
+ * TLV API flag to rely on, so rely on the major version which
+ * is in the first byte of ucode_ver. This was implemented
+ * initially on version 38 and then backported to 17. It was
+ * also backported to 29, but only for 7265D devices. The
+ * intention was to have it in 36 as well, but not all 8000
+ * family got this feature enabled. The 8000 family is the
+ * only one using version 36, so skip this version entirely.
+ */
+ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
+ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
+
+int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles)
+{
+ int i, j;
+
+ if (!fwrt->geo_enabled)
+ return -ENODATA;
+
+ if (!iwl_sar_geo_support(fwrt))
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < n_profiles; i++) {
+ for (j = 0; j < n_bands; j++) {
+ struct iwl_per_chain_offset *chain =
+ &table[i * n_bands + j];
+
+ chain->max_tx_power =
+ cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
+ chain->chain_a =
+ fwrt->geo_profiles[i].bands[j].chains[0];
+ chain->chain_b =
+ fwrt->geo_profiles[i].bands[j].chains[1];
+ IWL_DEBUG_RADIO(fwrt,
+ "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+ i, j,
+ fwrt->geo_profiles[i].bands[j].chains[0],
+ fwrt->geo_profiles[i].bands[j].chains[1],
+ fwrt->geo_profiles[i].bands[j].max);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_fill_table);
+
+static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int profs[BIOS_SAR_NUM_CHAINS] = { prof_a, prof_b };
+ int i, j;
+
+ for (i = 0; i < BIOS_SAR_NUM_CHAINS; i++) {
+ struct iwl_sar_profile *prof;
+
+ /* don't allow SAR to be disabled (profile 0 means disable) */
+ if (profs[i] == 0)
+ return -EPERM;
+
+ /* we are off by one, so allow up to BIOS_SAR_MAX_PROFILE_NUM */
+ if (profs[i] > BIOS_SAR_MAX_PROFILE_NUM)
+ return -EINVAL;
+
+ /* profiles go from 1 to 4, so decrement to access the array */
+ prof = &fwrt->sar_profiles[profs[i] - 1];
+
+ /* if the profile is disabled, do nothing */
+ if (!prof->enabled) {
+ IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
+ profs[i]);
+ /*
+ * if one of the profiles is disabled, we
+ * ignore all of them and return 1 to
+ * differentiate disabled from other failures.
+ */
+ return 1;
+ }
+
+ IWL_DEBUG_INFO(fwrt,
+ "SAR EWRD: chain %d profile index %d\n",
+ i, profs[i]);
+ IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
+ for (j = 0; j < n_subbands; j++) {
+ per_chain[i * n_subbands + j] =
+ cpu_to_le16(prof->chains[i].subbands[j]);
+ IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
+ j, prof->chains[i].subbands[j]);
+ }
+ }
+
+ return 0;
+}
+
+int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < n_tables; i++) {
+ ret = iwl_sar_fill_table(fwrt,
+ &per_chain[i * n_subbands * BIOS_SAR_NUM_CHAINS],
+ n_subbands, prof_a, prof_b);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_fill_profile);
+
+static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain,
+ int subband)
+{
+ s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband];
+
+ if ((subband == 0 &&
+ (ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) ||
+ (subband != 0 &&
+ (ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val);
+ return false;
+ }
+ return true;
+}
+
+int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd, int *cmd_size)
+{
+ u8 cmd_ver;
+ int i, j, num_sub_bands;
+ s8 *gain;
+ bool send_ppag_always;
+
+ /* many firmware images for JF lie about this */
+ if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
+ CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
+ return -EOPNOTSUPP;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG capability not supported by FW, command not sent.\n");
+ return -EINVAL;
+ }
+
+ cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+ WIDE_ID(PHY_OPS_GROUP,
+ PER_PLATFORM_ANT_GAIN_CMD), 1);
+ /*
+ * Starting from ver 4, driver needs to send the PPAG CMD regardless
+ * if PPAG is enabled/disabled or valid/invalid.
+ */
+ send_ppag_always = cmd_ver > 3;
+
+ /* Don't send PPAG if it is disabled */
+ if (!send_ppag_always && !fwrt->ppag_flags) {
+ IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
+ return -EINVAL;
+ }
+
+ /* The 'flags' field is the same in v1 and in v2 so we can just
+ * use v1 to access it.
+ */
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+
+ IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
+ if (cmd_ver == 1) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+ gain = cmd->v1.gain[0];
+ *cmd_size = sizeof(cmd->v1);
+ if (fwrt->ppag_ver >= 1) {
+ /* in this case FW supports revision 0 */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is %d, send truncated table\n",
+ fwrt->ppag_ver);
+ }
+ } else if (cmd_ver >= 2 && cmd_ver <= 5) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = cmd->v2.gain[0];
+ *cmd_size = sizeof(cmd->v2);
+ if (fwrt->ppag_ver == 0) {
+ /* in this case FW supports revisions 1,2 or 3 */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is 0, send padded table\n");
+ }
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
+ return -EINVAL;
+ }
+
+ /* ppag mode */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG MODE bits were read from bios: %d\n",
+ le32_to_cpu(cmd->v1.flags));
+
+ if (cmd_ver == 5)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK);
+ else if (cmd_ver < 5)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK);
+
+ if ((cmd_ver == 1 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
+ (cmd_ver == 2 && fwrt->ppag_ver >= 2)) {
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
+ }
+
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG MODE bits going to be sent: %d\n",
+ le32_to_cpu(cmd->v1.flags));
+
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
+ if (!send_ppag_always &&
+ !iwl_ppag_value_valid(fwrt, i, j))
+ return -EINVAL;
+
+ gain[i * num_sub_bands + j] =
+ fwrt->ppag_chains[i].subbands[j];
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table: chain[%d] band[%d]: gain = %d\n",
+ i, j, gain[i * num_sub_bands + j]);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fill_ppag_table);
+
+bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+ if (!dmi_check_system(dmi_ppag_approved_list)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "System vendor '%s' is not in the approved list, disabling PPAG.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ fwrt->ppag_flags = 0;
+ return false;
+ }
+
+ return true;
+}
+IWL_EXPORT_SYMBOL(iwl_is_ppag_approved);
+
+bool iwl_is_tas_approved(void)
+{
+ return dmi_check_system(dmi_tas_approved_list);
+}
+IWL_EXPORT_SYMBOL(iwl_is_tas_approved);
+
+int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data,
+ const u32 tas_selection)
+{
+ u8 override_iec = u32_get_bits(tas_selection,
+ IWL_WTAS_OVERRIDE_IEC_MSK);
+ u8 enabled_iec = u32_get_bits(tas_selection, IWL_WTAS_ENABLE_IEC_MSK);
+ u8 usa_tas_uhb = u32_get_bits(tas_selection, IWL_WTAS_USA_UHB_MSK);
+ int enabled = tas_selection & IWL_WTAS_ENABLED_MSK;
+
+ IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n",
+ tas_selection);
+
+ tas_data->usa_tas_uhb_allowed = usa_tas_uhb;
+ tas_data->override_tas_iec = override_iec;
+ tas_data->enable_tas_iec = enabled_iec;
+
+ return enabled;
+}
+
+__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+{
+ int ret;
+ u32 val;
+ __le32 config_bitmap = 0;
+
+ switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_HR1:
+ case IWL_CFG_RF_TYPE_HR2:
+ case IWL_CFG_RF_TYPE_JF1:
+ case IWL_CFG_RF_TYPE_JF2:
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_INDONESIA_5G2,
+ &val);
+
+ if (!ret && val == DSM_VALUE_INDONESIA_ENABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
+ break;
+ default:
+ break;
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_DISABLE_SRD, &val);
+ if (!ret) {
+ if (val == DSM_VALUE_SRD_PASSIVE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+ else if (val == DSM_VALUE_SRD_DISABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+ }
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) {
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_REGULATORY_CONFIG,
+ &val);
+ /*
+ * China 2022 enable if the BIOS object does not exist or
+ * if it is enabled in BIOS.
+ */
+ if (ret < 0 || val & DSM_MASK_CHINA_22_REG)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK);
+ }
+
+ return config_bitmap;
+}
+IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap);
+
+int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value)
+{
+ GET_BIOS_TABLE(dsm, fwrt, func, value);
+}
+IWL_EXPORT_SYMBOL(iwl_bios_get_dsm);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
new file mode 100644
index 000000000000..28e774766847
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2023 Intel Corporation
+ */
+
+#ifndef __fw_regulatory_h__
+#define __fw_regulatory_h__
+
+#include "fw/img.h"
+#include "fw/api/commands.h"
+#include "fw/api/power.h"
+#include "fw/api/phy.h"
+#include "fw/api/config.h"
+#include "fw/img.h"
+#include "iwl-trans.h"
+
+#define BIOS_SAR_MAX_PROFILE_NUM 4
+/*
+ * Each SAR profile has (up to, depends on the table revision) 4 chains:
+ * chain A, chain B, chain A when in CDB, chain B when in CDB
+ */
+#define BIOS_SAR_MAX_CHAINS_PER_PROFILE 4
+#define BIOS_SAR_NUM_CHAINS 2
+#define BIOS_SAR_MAX_SUB_BANDS_NUM 11
+
+#define BIOS_GEO_NUM_CHAINS 2
+#define BIOS_GEO_MAX_NUM_BANDS 3
+#define BIOS_GEO_MAX_PROFILE_NUM 8
+#define BIOS_GEO_MIN_PROFILE_NUM 3
+
+#define IWL_SAR_ENABLE_MSK BIT(0)
+
+/* PPAG gain value bounds in 1/8 dBm */
+#define IWL_PPAG_MIN_LB -16
+#define IWL_PPAG_MAX_LB 24
+#define IWL_PPAG_MIN_HB -16
+#define IWL_PPAG_MAX_HB 40
+
+#define IWL_PPAG_ETSI_CHINA_MASK 3
+#define IWL_PPAG_REV3_MASK 0x7FF
+
+#define IWL_WTAS_BLACK_LIST_MAX 16
+#define IWL_WTAS_ENABLED_MSK 0x1
+#define IWL_WTAS_OVERRIDE_IEC_MSK 0x2
+#define IWL_WTAS_ENABLE_IEC_MSK 0x4
+#define IWL_WTAS_USA_UHB_MSK BIT(16)
+
+/*
+ * The profile for revision 2 is a superset of revision 1, which is in
+ * turn a superset of revision 0. So we can store all revisions
+ * inside revision 2, which is what we represent here.
+ */
+
+/*
+ * struct iwl_sar_profile_chain - per-chain values of a SAR profile
+ * @subbands: the SAR value for each subband
+ */
+struct iwl_sar_profile_chain {
+ u8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM];
+};
+
+/*
+ * struct iwl_sar_profile - SAR profile from SAR tables
+ * @enabled: whether the profile is enabled or not
+ * @chains: per-chain SAR values
+ */
+struct iwl_sar_profile {
+ bool enabled;
+ struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE];
+};
+
+/* Same thing as with SAR, all revisions fit in revision 2 */
+
+/*
+ * struct iwl_geo_profile_band - per-band geo SAR offsets
+ * @max: the max tx power allowed for the band
+ * @chains: SAR offsets values for each chain
+ */
+struct iwl_geo_profile_band {
+ u8 max;
+ u8 chains[BIOS_GEO_NUM_CHAINS];
+};
+
+/*
+ * struct iwl_geo_profile - geo profile
+ * @bands: per-band table of the SAR offsets
+ */
+struct iwl_geo_profile {
+ struct iwl_geo_profile_band bands[BIOS_GEO_MAX_NUM_BANDS];
+};
+
+/* Same thing as with SAR, all revisions fit in revision 2 */
+struct iwl_ppag_chain {
+ s8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM];
+};
+
+struct iwl_tas_data {
+ __le32 block_list_size;
+ __le32 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
+ u8 override_tas_iec;
+ u8 enable_tas_iec;
+ u8 usa_tas_uhb_allowed;
+};
+
+/* For DSM revision 0 and 4 */
+enum iwl_dsm_funcs {
+ DSM_FUNC_QUERY = 0,
+ DSM_FUNC_DISABLE_SRD = 1,
+ DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
+ DSM_FUNC_ENABLE_6E = 3,
+ DSM_FUNC_REGULATORY_CONFIG = 4,
+ DSM_FUNC_11AX_ENABLEMENT = 6,
+ DSM_FUNC_ENABLE_UNII4_CHAN = 7,
+ DSM_FUNC_ACTIVATE_CHANNEL = 8,
+ DSM_FUNC_FORCE_DISABLE_CHANNELS = 9,
+ DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10,
+ DSM_FUNC_RFI_CONFIG = 11,
+ DSM_FUNC_NUM_FUNCS = 12,
+};
+
+enum iwl_dsm_values_srd {
+ DSM_VALUE_SRD_ACTIVE,
+ DSM_VALUE_SRD_PASSIVE,
+ DSM_VALUE_SRD_DISABLE,
+ DSM_VALUE_SRD_MAX
+};
+
+enum iwl_dsm_values_indonesia {
+ DSM_VALUE_INDONESIA_DISABLE,
+ DSM_VALUE_INDONESIA_ENABLE,
+ DSM_VALUE_INDONESIA_RESERVED,
+ DSM_VALUE_INDONESIA_MAX
+};
+
+enum iwl_dsm_values_rfi {
+ DSM_VALUE_RFI_DLVR_DISABLE = BIT(0),
+ DSM_VALUE_RFI_DDR_DISABLE = BIT(1),
+};
+
+#define DSM_VALUE_RFI_DISABLE (DSM_VALUE_RFI_DLVR_DISABLE |\
+ DSM_VALUE_RFI_DDR_DISABLE)
+
+enum iwl_dsm_masks_reg {
+ DSM_MASK_CHINA_22_REG = BIT(2)
+};
+
+struct iwl_fw_runtime;
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles);
+
+int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b);
+
+int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd,
+ int *cmd_size);
+
+bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt);
+
+bool iwl_is_tas_approved(void);
+
+int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data,
+ const u32 tas_selection);
+
+int iwl_bios_get_wrds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_wgds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_ppag_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
+
+int iwl_bios_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit);
+
+int iwl_bios_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
+int iwl_bios_get_eckv(struct iwl_fw_runtime *fwrt, u32 *ext_clk);
+
+__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value);
+
+static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes,
+ const u8 ppag_ver)
+{
+ return ppag_modes & (ppag_ver < 3 ? IWL_PPAG_ETSI_CHINA_MASK :
+ IWL_PPAG_REV3_MASK);
+}
+#endif /* __fw_regulatory_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 357727774db9..b2bc4fd37abf 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __iwl_fw_runtime_h__
#define __iwl_fw_runtime_h__
@@ -14,6 +14,7 @@
#include "fw/api/power.h"
#include "iwl-eeprom-parse.h"
#include "fw/acpi.h"
+#include "fw/regulatory.h"
struct iwl_fw_runtime_ops {
void (*dump_start)(void *ctx);
@@ -100,6 +101,11 @@ struct iwl_txf_iter_data {
* @dump: debug dump data
* @uats_enabled: VLP or AFC AP is enabled
* @uats_table: AP type table
+ * @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock:
+ * 0: Unlocked, 1 and 2: Locked.
+ * Only read the UEFI variables if locked.
+ * @sar_profiles: sar profiles as read from WRDS/EWRD BIOS tables
+ * @geo_profiles: geographic profiles as read from WGDS BIOS table
*/
struct iwl_fw_runtime {
struct iwl_trans *trans;
@@ -158,24 +164,22 @@ struct iwl_fw_runtime {
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool tpc_enabled;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
-#ifdef CONFIG_ACPI
- struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
+ struct iwl_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM];
u8 sar_chain_a_profile;
u8 sar_chain_b_profile;
- struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3];
+ u8 reduced_power_flags;
+ struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM];
u32 geo_rev;
u32 geo_num_profiles;
bool geo_enabled;
struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
u32 ppag_flags;
- u32 ppag_ver;
- bool ppag_table_valid;
+ u8 ppag_ver;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
- u8 reduced_power_flags;
- bool uats_enabled;
struct iwl_uats_table_cmd uats_table;
-#endif
+ u8 uefi_tables_lock_status;
+ bool uats_enabled;
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 2964c5fb11e9..e81fc0129b9d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2021-2023 Intel Corporation
+ * Copyright(c) 2021-2024 Intel Corporation
*/
#include "iwl-drv.h"
@@ -76,6 +76,42 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
return data;
}
+static
+void *iwl_uefi_get_verified_variable(struct iwl_trans *trans,
+ efi_char16_t *uefi_var_name,
+ char *var_name,
+ unsigned int expected_size,
+ unsigned long *size)
+{
+ void *var;
+ unsigned long var_size;
+
+ var = iwl_uefi_get_variable(uefi_var_name, &IWL_EFI_VAR_GUID,
+ &var_size);
+
+ if (IS_ERR(var)) {
+ IWL_DEBUG_RADIO(trans,
+ "%s UEFI variable not found 0x%lx\n", var_name,
+ PTR_ERR(var));
+ return var;
+ }
+
+ if (var_size < expected_size) {
+ IWL_DEBUG_RADIO(trans,
+ "Invalid %s UEFI variable len (%lu)\n",
+ var_name, var_size);
+ kfree(var);
+ return ERR_PTR(-EINVAL);
+ }
+
+ IWL_DEBUG_RADIO(trans, "%s from UEFI with size %lu\n", var_name,
+ var_size);
+
+ if (size)
+ *size = var_size;
+ return var;
+}
+
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data)
{
@@ -230,26 +266,13 @@ u8 *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
unsigned long package_size;
u8 *data;
- package = iwl_uefi_get_variable(IWL_UEFI_REDUCED_POWER_NAME,
- &IWL_EFI_VAR_GUID, &package_size);
-
- if (IS_ERR(package)) {
- IWL_DEBUG_FW(trans,
- "Reduced Power UEFI variable not found 0x%lx (len %lu)\n",
- PTR_ERR(package), package_size);
+ package = iwl_uefi_get_verified_variable(trans,
+ IWL_UEFI_REDUCED_POWER_NAME,
+ "Reduced Power",
+ sizeof(*package),
+ &package_size);
+ if (IS_ERR(package))
return ERR_CAST(package);
- }
-
- if (package_size < sizeof(*package)) {
- IWL_DEBUG_FW(trans,
- "Invalid Reduced Power UEFI variable len (%lu)\n",
- package_size);
- kfree(package);
- return ERR_PTR(-EINVAL);
- }
-
- IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n",
- package_size);
IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n",
package->rev, package->total_size, package->n_skus);
@@ -283,32 +306,15 @@ static int iwl_uefi_step_parse(struct uefi_cnv_common_step_data *common_step_dat
void iwl_uefi_get_step_table(struct iwl_trans *trans)
{
struct uefi_cnv_common_step_data *data;
- unsigned long package_size;
int ret;
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
- data = iwl_uefi_get_variable(IWL_UEFI_STEP_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
-
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "STEP UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_STEP_NAME,
+ "STEP", sizeof(*data), NULL);
+ if (IS_ERR(data))
return;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid STEP table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
- return;
- }
-
- IWL_DEBUG_FW(trans, "Read STEP from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_step_parse(data, trans);
if (ret < 0)
@@ -318,7 +324,6 @@ void iwl_uefi_get_step_table(struct iwl_trans *trans)
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_step_table);
-#ifdef CONFIG_ACPI
static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data,
struct iwl_fw_runtime *fwrt)
{
@@ -355,31 +360,15 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_wlan_sgom_data *data;
- unsigned long package_size;
int ret;
if (!fwrt->geo_enabled)
return;
- data = iwl_uefi_get_variable(IWL_UEFI_SGOM_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "SGOM UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
- return;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid SGOM table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_SGOM_NAME,
+ "SGOM", sizeof(*data), NULL);
+ if (IS_ERR(data))
return;
- }
-
- IWL_DEBUG_FW(trans, "Read SGOM from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_sgom_parse(data, fwrt);
if (ret < 0)
@@ -404,28 +393,12 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_wlan_uats_data *data;
- unsigned long package_size;
int ret;
- data = iwl_uefi_get_variable(IWL_UEFI_UATS_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "UATS UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_UATS_NAME,
+ "UATS", sizeof(*data), NULL);
+ if (IS_ERR(data))
return -EINVAL;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid UATS table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
- return -EINVAL;
- }
-
- IWL_DEBUG_FW(trans, "Read UATS from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_uats_parse(data, fwrt);
if (ret < 0) {
@@ -438,4 +411,298 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
return 0;
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_uats_table);
-#endif /* CONFIG_ACPI */
+
+static void iwl_uefi_set_sar_profile(struct iwl_fw_runtime *fwrt,
+ struct uefi_sar_profile *uefi_sar_prof,
+ u8 prof_index, bool enabled)
+{
+ memcpy(&fwrt->sar_profiles[prof_index].chains, uefi_sar_prof,
+ sizeof(struct uefi_sar_profile));
+
+ fwrt->sar_profiles[prof_index].enabled = enabled & IWL_SAR_ENABLE_MSK;
+}
+
+int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_wrds *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDS_NAME,
+ "WRDS", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WRDS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDS revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ /* The profile from WRDS is officially profile 1, but goes
+ * into sar_profiles[0] (because we don't have a profile 0).
+ */
+ iwl_uefi_set_sar_profile(fwrt, &data->sar_profile, 0, data->mode);
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_ewrd *data;
+ int i, ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_EWRD_NAME,
+ "EWRD", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_EWRD_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI EWRD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->num_profiles >= BIOS_SAR_MAX_PROFILE_NUM) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < data->num_profiles; i++)
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+ * have profile 0). So in the array we start from 1.
+ */
+ iwl_uefi_set_sar_profile(fwrt, &data->sar_profiles[i], i + 1,
+ data->mode);
+
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_wgds *data;
+ int i, ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WGDS_NAME,
+ "WGDS", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WGDS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WGDS revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->num_profiles < BIOS_GEO_MIN_PROFILE_NUM ||
+ data->num_profiles > BIOS_GEO_MAX_PROFILE_NUM) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Invalid number of profiles in WGDS: %d\n",
+ data->num_profiles);
+ goto out;
+ }
+
+ fwrt->geo_rev = data->revision;
+ for (i = 0; i < data->num_profiles; i++)
+ memcpy(&fwrt->geo_profiles[i], &data->geo_profiles[i],
+ sizeof(struct iwl_geo_profile));
+
+ fwrt->geo_num_profiles = data->num_profiles;
+ fwrt->geo_enabled = true;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_ppag *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_PPAG_NAME,
+ "PPAG", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision < IWL_UEFI_MIN_PPAG_REV ||
+ data->revision > IWL_UEFI_MAX_PPAG_REV) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI PPAG revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ fwrt->ppag_ver = data->revision;
+ fwrt->ppag_flags = iwl_bios_get_ppag_flags(data->ppag_modes,
+ fwrt->ppag_ver);
+
+ BUILD_BUG_ON(sizeof(fwrt->ppag_chains) != sizeof(data->ppag_chains));
+ memcpy(&fwrt->ppag_chains, &data->ppag_chains,
+ sizeof(data->ppag_chains));
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data)
+{
+ struct uefi_cnv_var_wtas *uefi_tas;
+ int ret = 0, enabled, i;
+
+ uefi_tas = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WTAS_NAME,
+ "WTAS", sizeof(*uefi_tas), NULL);
+ if (IS_ERR(uefi_tas))
+ return -EINVAL;
+
+ if (uefi_tas->revision != IWL_UEFI_WTAS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WTAS revision:%d\n",
+ uefi_tas->revision);
+ goto out;
+ }
+
+ enabled = iwl_parse_tas_selection(fwrt, tas_data,
+ uefi_tas->tas_selection);
+ if (!enabled) {
+ IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
+ ret = 0;
+ goto out;
+ }
+
+ IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n",
+ uefi_tas->revision);
+ if (uefi_tas->black_list_size > IWL_WTAS_BLACK_LIST_MAX) {
+ IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %d\n",
+ uefi_tas->black_list_size);
+ ret = -EINVAL;
+ goto out;
+ }
+ tas_data->block_list_size = cpu_to_le32(uefi_tas->black_list_size);
+ IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", uefi_tas->black_list_size);
+
+ for (i = 0; i < uefi_tas->black_list_size; i++) {
+ tas_data->block_list_array[i] =
+ cpu_to_le32(uefi_tas->black_list[i]);
+ IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n",
+ uefi_tas->black_list[i]);
+ }
+out:
+ kfree(uefi_tas);
+ return ret;
+}
+
+int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
+{
+ struct uefi_cnv_var_splc *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_SPLC_NAME,
+ "SPLC", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_SPLC_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI SPLC revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *dflt_pwr_limit = data->default_pwr_limit;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
+{
+ struct uefi_cnv_var_wrdd *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDD_NAME,
+ "WRDD", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WRDD_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->mcc != UEFI_MCC_CHINA) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "UEFI WRDD is supported only for CN\n");
+ goto out;
+ }
+
+ mcc[0] = (data->mcc >> 8) & 0xff;
+ mcc[1] = data->mcc & 0xff;
+ mcc[2] = '\0';
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
+{
+ struct uefi_cnv_var_eckv *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_ECKV_NAME,
+ "ECKV", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_ECKV_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *extl_clk = data->ext_clock_valid;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value)
+{
+ struct uefi_cnv_var_general_cfg *data;
+ int ret = -EINVAL;
+
+ /* Not supported function index */
+ if (func >= DSM_FUNC_NUM_FUNCS || func == 5)
+ return -EOPNOTSUPP;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_DSM_NAME,
+ "DSM", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_DSM_REVISION) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI DSM revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (ARRAY_SIZE(data->functions) != UEFI_MAX_DSM_FUNCS) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid size of DSM functions array\n");
+ goto out;
+ }
+
+ *value = data->functions[func];
+ ret = 0;
+out:
+ kfree(data);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index bf61a8df1225..303cc299d1bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -5,15 +5,38 @@
#ifndef __iwl_fw_uefi__
#define __iwl_fw_uefi__
+#include "fw/regulatory.h"
+
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
#define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping"
#define IWL_UEFI_STEP_NAME L"UefiCnvCommonSTEP"
#define IWL_UEFI_UATS_NAME L"CnvUefiWlanUATS"
+#define IWL_UEFI_WRDS_NAME L"UefiCnvWlanWRDS"
+#define IWL_UEFI_EWRD_NAME L"UefiCnvWlanEWRD"
+#define IWL_UEFI_WGDS_NAME L"UefiCnvWlanWGDS"
+#define IWL_UEFI_PPAG_NAME L"UefiCnvWlanPPAG"
+#define IWL_UEFI_WTAS_NAME L"UefiCnvWlanWTAS"
+#define IWL_UEFI_SPLC_NAME L"UefiCnvWlanSPLC"
+#define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD"
+#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV"
+#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
+
#define IWL_SGOM_MAP_SIZE 339
#define IWL_UATS_MAP_SIZE 339
+#define IWL_UEFI_WRDS_REVISION 2
+#define IWL_UEFI_EWRD_REVISION 2
+#define IWL_UEFI_WGDS_REVISION 3
+#define IWL_UEFI_MIN_PPAG_REV 1
+#define IWL_UEFI_MAX_PPAG_REV 3
+#define IWL_UEFI_WTAS_REVISION 1
+#define IWL_UEFI_SPLC_REVISION 0
+#define IWL_UEFI_WRDD_REVISION 0
+#define IWL_UEFI_ECKV_REVISION 0
+#define IWL_UEFI_DSM_REVISION 4
+
struct pnvm_sku_package {
u8 rev;
u32 total_size;
@@ -42,6 +65,120 @@ struct uefi_cnv_common_step_data {
} __packed;
/*
+ * struct uefi_sar_profile - a SAR profile as defined in UEFI
+ *
+ * @chains: a per-chain table of SAR values
+ */
+struct uefi_sar_profile {
+ struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_wrds - WRDS table as defined in UEFI
+ *
+ * @revision: the revision of the table
+ * @mode: is WRDS enbaled/disabled
+ * @sar_profile: sar profile #1
+ */
+struct uefi_cnv_var_wrds {
+ u8 revision;
+ u32 mode;
+ struct uefi_sar_profile sar_profile;
+} __packed;
+
+/*
+ * struct uefi_cnv_var_ewrd - EWRD table as defined in UEFI
+ * @revision: the revision of the table
+ * @mode: is WRDS enbaled/disabled
+ * @num_profiles: how many additional profiles we have in this table (0-3)
+ * @sar_profiles: the additional SAR profiles (#2-#4)
+ */
+struct uefi_cnv_var_ewrd {
+ u8 revision;
+ u32 mode;
+ u32 num_profiles;
+ struct uefi_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM - 1];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_wgds - WGDS table as defined in UEFI
+ * @revision: the revision of the table
+ * @num_profiles: the number of geo profiles we have in the table.
+ * The first 3 are mandatory, and can have up to 8.
+ * @geo_profiles: a per-profile table of the offsets to add to SAR values.
+ */
+struct uefi_cnv_var_wgds {
+ u8 revision;
+ u8 num_profiles;
+ struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_ppag - PPAG table as defined in UEFI
+ * @revision: the revision of the table
+ * @ppag_modes: values from &enum iwl_ppag_flags
+ * @ppag_chains: the PPAG values per chain and band
+ */
+struct uefi_cnv_var_ppag {
+ u8 revision;
+ u32 ppag_modes;
+ struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
+} __packed;
+
+/* struct uefi_cnv_var_wtas - WTAS tabled as defined in UEFI
+ * @revision: the revision of the table
+ * @tas_selection: different options of TAS enablement.
+ * @black_list_size: the number of defined entried in the black list
+ * @black_list: a list of countries that are not allowed to use the TAS feature
+ */
+struct uefi_cnv_var_wtas {
+ u8 revision;
+ u32 tas_selection;
+ u8 black_list_size;
+ u16 black_list[IWL_WTAS_BLACK_LIST_MAX];
+} __packed;
+
+/* struct uefi_cnv_var_splc - SPLC tabled as defined in UEFI
+ * @revision: the revision of the table
+ * @default_pwr_limit: The default maximum power per device
+ */
+struct uefi_cnv_var_splc {
+ u8 revision;
+ u32 default_pwr_limit;
+} __packed;
+
+#define UEFI_MCC_CHINA 0x434e
+
+/* struct uefi_cnv_var_wrdd - WRDD table as defined in UEFI
+ * @revision: the revision of the table
+ * @mcc: country identifier as defined in ISO/IEC 3166-1 Alpha 2 code
+ */
+struct uefi_cnv_var_wrdd {
+ u8 revision;
+ u32 mcc;
+} __packed;
+
+/* struct uefi_cnv_var_eckv - ECKV table as defined in UEFI
+ * @revision: the revision of the table
+ * @ext_clock_valid: indicates if external 32KHz clock is valid
+ */
+struct uefi_cnv_var_eckv {
+ u8 revision;
+ u32 ext_clock_valid;
+} __packed;
+
+#define UEFI_MAX_DSM_FUNCS 32
+
+/* struct uefi_cnv_var_general_cfg - DSM-like table as defined in UEFI
+ * @revision: the revision of the table
+ * @functions: payload of the different DSM functions
+ */
+struct uefi_cnv_var_general_cfg {
+ u8 revision;
+ u32 functions[UEFI_MAX_DSM_FUNCS];
+} __packed;
+
+/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
* disable the feature in old kernels.
@@ -55,6 +192,21 @@ int iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
void iwl_uefi_get_step_table(struct iwl_trans *trans);
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data);
+int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
+int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit);
+int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
+int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk);
+int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value);
+void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_uats_table(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt);
#else /* CONFIG_EFI */
static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
@@ -85,13 +237,56 @@ iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
{
return 0;
}
-#endif /* CONFIG_EFI */
-#if defined(CONFIG_EFI) && defined(CONFIG_ACPI)
-void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
-int iwl_uefi_get_uats_table(struct iwl_trans *trans,
- struct iwl_fw_runtime *fwrt);
-#else
+static inline int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
+{
+ *dflt_pwr_limit = 0;
+ return 0;
+}
+
+static inline int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
+{
+ return -ENOENT;
+}
+
static inline
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt)
{
@@ -103,6 +298,5 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
{
return 0;
}
-
-#endif
+#endif /* CONFIG_EFI */
#endif /* __iwl_fw_uefi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index ae6f1cd4d660..6aa4f7f9c708 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __IWL_CONFIG_H__
#define __IWL_CONFIG_H__
@@ -12,6 +12,7 @@
#include <linux/ieee80211.h>
#include <linux/nl80211.h>
#include "iwl-csr.h"
+#include "iwl-drv.h"
enum iwl_device_family {
IWL_DEVICE_FAMILY_UNDEFINED,
@@ -418,6 +419,8 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_BZ 0x46
#define IWL_CFG_MAC_TYPE_GL 0x47
#define IWL_CFG_MAC_TYPE_SC 0x48
+#define IWL_CFG_MAC_TYPE_SC2 0x49
+#define IWL_CFG_MAC_TYPE_SC2F 0x4A
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -442,6 +445,9 @@ struct iwl_cfg {
#define IWL_CFG_NO_160 0x1
#define IWL_CFG_160 0x0
+#define IWL_CFG_NO_320 0x1
+#define IWL_CFG_320 0x0
+
#define IWL_CFG_CORES_BT 0x0
#define IWL_CFG_CORES_BT_GNSS 0x5
@@ -471,6 +477,15 @@ struct iwl_dev_info {
const char *name;
};
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+extern const struct iwl_dev_info iwl_dev_info_table[];
+extern const unsigned int iwl_dev_info_table_size;
+const struct iwl_dev_info *
+iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
+ u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
+ u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step);
+#endif
+
/*
* This list declares the config structures for all devices.
*/
@@ -526,7 +541,10 @@ extern const char iwl_ax221_name[];
extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
extern const char iwl_bz_name[];
+extern const char iwl_mtp_name[];
extern const char iwl_sc_name[];
+extern const char iwl_sc2_name[];
+extern const char iwl_sc2f_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
@@ -632,6 +650,8 @@ extern const struct iwl_cfg iwl_cfg_bz;
extern const struct iwl_cfg iwl_cfg_gl;
extern const struct iwl_cfg iwl_cfg_sc;
+extern const struct iwl_cfg iwl_cfg_sc2;
+extern const struct iwl_cfg iwl_cfg_sc2f;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 72075720969c..561d0c261123 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -64,21 +64,22 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,},
};
-static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
- struct list_head *list)
+/* add a new TLV node, returning it so it can be modified */
+static struct iwl_ucode_tlv *iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
+ struct list_head *list)
{
u32 len = le32_to_cpu(tlv->length);
struct iwl_dbg_tlv_node *node;
- node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
+ node = kzalloc(struct_size(node, tlv.data, len), GFP_KERNEL);
if (!node)
- return -ENOMEM;
+ return NULL;
memcpy(&node->tlv, tlv, sizeof(node->tlv));
memcpy(node->tlv.data, tlv->data, len);
list_add_tail(&node->list, list);
- return 0;
+ return &node->tlv;
}
static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv)
@@ -103,10 +104,18 @@ static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
return -EINVAL;
+ /* we use this as a string, ensure input was NUL terminated */
+ if (strnlen(debug_info->debug_cfg_name,
+ sizeof(debug_info->debug_cfg_name)) ==
+ sizeof(debug_info->debug_cfg_name))
+ return -EINVAL;
+
IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
debug_info->debug_cfg_name);
- return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list))
+ return -ENOMEM;
+ return 0;
}
static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
@@ -175,7 +184,9 @@ static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
return -EINVAL;
}
- return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list))
+ return -ENOMEM;
+ return 0;
}
static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
@@ -246,11 +257,9 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
const struct iwl_ucode_tlv *tlv)
{
const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
- struct iwl_fw_ini_trigger_tlv *dup_trig;
u32 tp = le32_to_cpu(trig->time_point);
u32 rf = le32_to_cpu(trig->reset_fw);
- struct iwl_ucode_tlv *dup = NULL;
- int ret;
+ struct iwl_ucode_tlv *new_tlv;
if (le32_to_cpu(tlv->length) < sizeof(*trig))
return -EINVAL;
@@ -267,20 +276,18 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
"WRT: time point %u for trigger TLV with reset_fw %u\n",
tp, rf);
trans->dbg.last_tp_resetfw = 0xFF;
+
+ new_tlv = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
+ if (!new_tlv)
+ return -ENOMEM;
+
if (!le32_to_cpu(trig->occurrences)) {
- dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
- GFP_KERNEL);
- if (!dup)
- return -ENOMEM;
- dup_trig = (void *)dup->data;
- dup_trig->occurrences = cpu_to_le32(-1);
- tlv = dup;
- }
+ struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new_tlv->data;
- ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
- kfree(dup);
+ new_trig->occurrences = cpu_to_le32(-1);
+ }
- return ret;
+ return 0;
}
static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
@@ -304,7 +311,9 @@ static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
return -EINVAL;
}
- return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list))
+ return -ENOMEM;
+ return 0;
}
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
@@ -1148,7 +1157,9 @@ iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
if (!match) {
IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
le32_to_cpu(trig->time_point));
- return iwl_dbg_tlv_add(trig_tlv, trig_list);
+ if (!iwl_dbg_tlv_add(trig_tlv, trig_list))
+ return -ENOMEM;
+ return 0;
}
return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
@@ -1234,7 +1245,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
}
}
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n",
tp, dump_data.trig->reset_fw);
IWL_DEBUG_FW(fwrt,
@@ -1244,22 +1255,22 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
if (fwrt->trans->trans_cfg->device_family ==
IWL_DEVICE_FAMILY_9000) {
- fwrt->trans->dbg.restart_required = TRUE;
+ fwrt->trans->dbg.restart_required = true;
} else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
fwrt->trans->dbg.last_tp_resetfw ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw = 0xFF;
IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n");
- fwrt->trans->dbg.restart_required = TRUE;
+ fwrt->trans->dbg.restart_required = true;
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
IWL_DEBUG_FW(fwrt,
"WRT: stop only and no reload firmware\n");
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw =
le32_to_cpu(dump_data.trig->reset_fw);
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index abf8001bdac1..4696d73c8971 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -187,6 +187,7 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
case IWL_CFG_RF_TYPE_HR1:
case IWL_CFG_RF_TYPE_HR2:
rf = "hr";
+ rf_step = 'b';
break;
case IWL_CFG_RF_TYPE_GF:
rf = "gf";
@@ -1424,35 +1425,25 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
const struct iwl_op_mode_ops *ops = op->ops;
struct dentry *dbgfs_dir = NULL;
struct iwl_op_mode *op_mode = NULL;
- int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
/* also protects start/stop from racing against each other */
lockdep_assert_held(&iwlwifi_opmode_table_mtx);
- for (retry = 0; retry <= max_retry; retry++) {
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
- drv->dbgfs_op_mode = debugfs_create_dir(op->name,
- drv->dbgfs_drv);
- dbgfs_dir = drv->dbgfs_op_mode;
+ drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+ drv->dbgfs_drv);
+ dbgfs_dir = drv->dbgfs_op_mode;
#endif
- op_mode = ops->start(drv->trans, drv->trans->cfg,
- &drv->fw, dbgfs_dir);
-
- if (op_mode)
- return op_mode;
-
- if (test_bit(STATUS_TRANS_DEAD, &drv->trans->status))
- break;
-
- IWL_ERR(drv, "retry init count %d\n", retry);
+ op_mode = ops->start(drv->trans, drv->trans->cfg,
+ &drv->fw, dbgfs_dir);
+ if (op_mode)
+ return op_mode;
#ifdef CONFIG_IWLWIFI_DEBUGFS
- debugfs_remove_recursive(drv->dbgfs_op_mode);
- drv->dbgfs_op_mode = NULL;
+ debugfs_remove_recursive(drv->dbgfs_op_mode);
+ drv->dbgfs_op_mode = NULL;
#endif
- }
return NULL;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 3d1a27ba35c6..1549ff429549 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -6,6 +6,7 @@
#ifndef __iwl_drv_h__
#define __iwl_drv_h__
#include <linux/export.h>
+#include <kunit/visibility.h>
/* for all modules */
#define DRV_NAME "iwlwifi"
@@ -89,8 +90,13 @@ void iwl_drv_stop(struct iwl_drv *drv);
#define IWL_EXPORT_SYMBOL(sym)
#endif
-/* max retry for init flow */
-#define IWL_MAX_INIT_RETRY 2
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+#define EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(sym) EXPORT_SYMBOL_IF_KUNIT(sym)
+#define VISIBLE_IF_IWLWIFI_KUNIT
+#else
+#define EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(sym)
+#define VISIBLE_IF_IWLWIFI_KUNIT static
+#endif
#define FW_NAME_PRE_BUFSIZE 64
struct iwl_trans;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index 5aab64c63a13..2b290fab1ef2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -270,7 +270,7 @@ enum iwl_eeprom_enhanced_txpwr_flags {
};
/**
- * struct iwl_eeprom_enhanced_txpwr
+ * struct iwl_eeprom_enhanced_txpwr - enhanced regulatory TX power limits
* @flags: entry flags
* @channel: channel number
* @chain_a_max: chain a max power in 1/2 dBm
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index e0400ba2ab74..6ba374efaacb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2023-2024 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fh_h__
@@ -570,18 +570,19 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
/**
* struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers
- * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
- * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
- * @finished_rb_num [0:11] - Indicates the index of the current RB
+ * @closed_rb_num: [0:11] Indicates the index of the RB which was closed
+ * @closed_fr_num: [0:11] Indicates the index of the RX Frame which was closed
+ * @finished_rb_num: [0:11] Indicates the index of the current RB
* in which the last frame was written to
- * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ * @finished_fr_num: [0:11] Indicates the index of the RX Frame
* which was transferred
+ * @__spare: reserved
*/
struct iwl_rb_status {
__le16 closed_rb_num;
__le16 closed_fr_num;
__le16 finished_rb_num;
- __le16 finished_fr_nam;
+ __le16 finished_fr_num;
__le32 __spare;
} __packed;
@@ -651,15 +652,15 @@ struct iwl_tfd_tb {
*
* This structure contains dma address and length of transmission address
*
- * @tb_len length of the tx buffer
- * @addr 64 bits dma address
+ * @tb_len: length of the tx buffer
+ * @addr: 64 bits dma address
*/
struct iwl_tfh_tb {
__le16 tb_len;
__le64 addr;
} __packed;
-/**
+/*
* Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
* Both driver and device share these circular buffers, each of which must be
* contiguous 256 TFDs.
@@ -698,10 +699,11 @@ struct iwl_tfd {
/**
* struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD)
- * @ num_tbs 0-4 number of active tbs
- * 5 -15 reserved
- * @ tbs[25] transmit frame buffer descriptors
- * @ __pad padding
+ * @num_tbs:
+ * 0-4 number of active tbs
+ * 5-15 reserved
+ * @tbs: transmit frame buffer descriptors
+ * @__pad: padding
*/
struct iwl_tfh_tfd {
__le16 num_tbs;
@@ -718,10 +720,12 @@ struct iwl_tfh_tfd {
* struct iwlagn_schedq_bc_tbl scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to 22000:
- * @tfd_offset 0-12 - tx command byte count
+ * @tfd_offset:
+ * For devices up to 22000:
+ * 0-12 - tx command byte count
* 12-16 - station index
- * For 22000:
- * @tfd_offset 0-12 - tx command byte count
+ * For 22000:
+ * 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 2f6774ec37b2..baa39a18087a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -156,6 +156,8 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
* @NVM_CHANNEL_80MHZ: 80 MHz channel okay
* @NVM_CHANNEL_160MHZ: 160 MHz channel okay
* @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
+ * @NVM_CHANNEL_VLP: client support connection to UHB VLP AP
+ * @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
*/
enum iwl_nvm_channel_flags {
NVM_CHANNEL_VALID = BIT(0),
@@ -170,6 +172,8 @@ enum iwl_nvm_channel_flags {
NVM_CHANNEL_80MHZ = BIT(10),
NVM_CHANNEL_160MHZ = BIT(11),
NVM_CHANNEL_DC_HIGH = BIT(12),
+ NVM_CHANNEL_VLP = BIT(13),
+ NVM_CHANNEL_AFC = BIT(14),
};
/**
@@ -309,7 +313,7 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
/* Note: already can print up to 101 characters, 110 is the limit! */
IWL_DEBUG_DEV(dev, level,
- "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
chan, flags,
CHECK_AND_PRINT_I(VALID),
CHECK_AND_PRINT_I(IBSS),
@@ -322,7 +326,9 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
CHECK_AND_PRINT_I(40MHZ),
CHECK_AND_PRINT_I(80MHZ),
CHECK_AND_PRINT_I(160MHZ),
- CHECK_AND_PRINT_I(DC_HIGH));
+ CHECK_AND_PRINT_I(DC_HIGH),
+ CHECK_AND_PRINT_I(VLP),
+ CHECK_AND_PRINT_I(AFC));
#undef CHECK_AND_PRINT_I
}
@@ -366,6 +372,12 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
(flags & IEEE80211_CHAN_NO_IR))
flags |= IEEE80211_CHAN_IR_CONCURRENT;
+ /* Set the AP type for the UHB case. */
+ if (!(nvm_flags & NVM_CHANNEL_VLP))
+ flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT;
+ if (!(nvm_flags & NVM_CHANNEL_AFC))
+ flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT;
+
return flags;
}
@@ -695,10 +707,11 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI,
.phy_cap_info[5] =
+ FIELD_PREP_CONST(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US) |
IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
- IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
- IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP,
.phy_cap_info[6] =
IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
@@ -732,6 +745,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
/*
* PPE thresholds for NSS = 2, and RU index bitmap set
* to 0xc.
+ * Note: just for stating what we want, not present in
+ * the transmitted data due to not including
+ * IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT.
*/
.eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
},
@@ -744,7 +760,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.mac_cap_info[0] =
IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
- IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
@@ -799,7 +814,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI,
.phy_cap_info[5] =
- IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
+ FIELD_PREP_CONST(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US),
},
/* For all MCS and bandwidth, set 2 NSS for both Tx and
@@ -827,6 +843,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
/*
* PPE thresholds for NSS = 2, and RU index bitmap set
* to 0xc.
+ * Note: just for stating what we want, not present in
+ * the transmitted data due to not including
+ * IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT.
*/
.eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
},
@@ -890,8 +909,9 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
bool no_320;
- no_320 = !trans->trans_cfg->integrated &&
- trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB;
+ no_320 = (!trans->trans_cfg->integrated &&
+ trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB) ||
+ trans->reduced_cap_sku;
if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be)
iftype_data->eht_cap.has_eht = false;
@@ -1056,6 +1076,26 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] &=
~IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
}
+
+ if (trans->step_urm) {
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs11_max_nss = 0;
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0;
+ }
+
+ if (trans->no_160)
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &=
+ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+
+ if (trans->reduced_cap_sku) {
+ memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0,
+ sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320));
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0;
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0;
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &=
+ ~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA;
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
+ }
}
static void iwl_init_he_hw_capab(struct iwl_trans *trans,
@@ -1572,7 +1612,8 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
struct iwl_reg_capa reg_capa,
- const struct iwl_cfg *cfg)
+ const struct iwl_cfg *cfg,
+ bool uats_enabled)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1617,6 +1658,16 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
flags &= ~NL80211_RRF_NO_IR;
}
}
+
+ /* Set the AP type for the UHB case. */
+ if (uats_enabled) {
+ if (!(nvm_flags & NVM_CHANNEL_VLP))
+ flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT;
+
+ if (!(nvm_flags & NVM_CHANNEL_AFC))
+ flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT;
+ }
+
/*
* reg_capa is per regulatory domain so apply it for every channel
*/
@@ -1671,7 +1722,7 @@ static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver)
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u32 cap, u8 resp_ver)
+ u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled)
{
int ch_idx;
u16 ch_flags;
@@ -1737,7 +1788,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
ch_flags, reg_capa,
- cfg);
+ cfg, uats_enabled);
/* we can't continue the same rule */
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
@@ -2097,7 +2148,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
nvm->sku_cap_mimo_disabled =
!!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
- if (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM)
+ if (CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM)
nvm->sku_cap_11be_enable = true;
/* Initialize PHY sku data */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 651ed25b683b..fd9c3bed9407 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -50,7 +50,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u32 cap, u8 resp_ver);
+ u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled);
/**
* struct iwl_nvm_section - describes an NVM section in memory.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 3dc618a7c70f..1ca82f3e4ebf 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -68,9 +68,11 @@ struct iwl_cfg;
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
- * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
+ * @hw_rf_kill: notifies of a change in the HW rf kill switch. True means that
* the radio is killed. Return %true if the device should be stopped by
* the transport immediately after the call. May sleep.
+ * Note that this must not return %true for newer devices using gen2 PCIe
+ * transport.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index dd32c287b983..a7d44df06eab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -368,12 +368,19 @@ enum {
WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
};
-#define CNVI_AUX_MISC_CHIP 0xA200B0
+#define CNVI_AUX_MISC_CHIP 0xA200B0
+#define CNVI_AUX_MISC_CHIP_MAC_STEP(_val) (((_val) & 0xf000000) >> 24)
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE(_val) ((_val) & 0xfff)
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U 0x930
+
#define CNVR_AUX_MISC_CHIP 0xA2B800
#define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890
#define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938
#define CNVI_SCU_SEQ_DATA_DW9 0xA27488
+#define CNVI_PMU_STEP_FLOW 0xA2D588
+#define CNVI_PMU_STEP_FLOW_FORCE_URM BIT(2)
+
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
/* device family 9000 WPROT register */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 5789a8735976..b93cef7b2330 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -519,6 +519,7 @@ struct iwl_pnvm_image {
* Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets.
* Must be atomic
+ * @set_q_ptrs: set queue pointers internally, after D3 when HW state changed
* @txq_enable: setup a queue. To setup an AC queue, use the
* iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
* this one. The op_mode must not configure the HCMD queue. The scheduler
@@ -528,6 +529,8 @@ struct iwl_pnvm_image {
* hardware scheduler bug. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
+ * @txq_alloc: Allocate a new TX queue, may sleep.
+ * @txq_free: Free a previously allocated TX queue.
* @txq_set_shared_mode: change Tx queue shared/unshared marking
* @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
* @wait_txq_empty: wait until specific tx queue is empty. May sleep.
@@ -547,23 +550,27 @@ struct iwl_pnvm_image {
* the op_mode. May be called several times before start_fw, can't be
* called after that.
* @set_pmi: set the power pmi state
+ * @sw_reset: trigger software reset of the NIC
* @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
* Sleeping is not allowed between grab_nic_access and
* release_nic_access.
* @release_nic_access: let the NIC go to sleep. The "flags" parameter
* must be the same one that was sent before to the grab_nic_access.
- * @set_bits_mask - set SRAM register according to value and mask.
+ * @set_bits_mask: set SRAM register according to value and mask.
* @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
* TX'ed commands and similar. The buffer will be vfree'd by the caller.
* Note that the transport must fill in the proper file headers.
* @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
* of the trans debugfs
+ * @sync_nmi: trigger a firmware NMI and wait for it to complete
* @load_pnvm: save the pnvm data in DRAM
* @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
* context info.
* @load_reduce_power: copy reduce power table to the corresponding DRAM memory
* @set_reduce_power: set reduce power table addresses in the sratch buffer
* @interrupts: disable/enable interrupts to transport
+ * @imr_dma_data: set up IMR DMA
+ * @rxq_dma_data: retrieve RX queue DMA data, see @struct iwl_trans_rxq_dma_data
*/
struct iwl_trans_ops {
@@ -775,7 +782,7 @@ struct iwl_self_init_dram {
* @imr_size: imr dram size received from fw
* @sram_addr: sram address from debug tlv
* @sram_size: sram size from debug tlv
- * @imr2sram_remainbyte`: size remained after each dma transfer
+ * @imr2sram_remainbyte: size remained after each dma transfer
* @imr_curr_addr: current dst address used during dma transfer
* @imr_base_addr: imr address received from fw
*/
@@ -822,12 +829,16 @@ struct iwl_pc_data {
* @fw_mon: DRAM buffer for firmware monitor
* @hw_error: equals true if hw error interrupt was received from the FW
* @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
+ * @unsupported_region_msk: unsupported regions out of active_regions
* @active_regions: active regions
* @debug_info_tlv_list: list of debug info TLVs
* @time_point: array of debug time points
* @periodic_trig_list: periodic triggers list
* @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
* @ucode_preset: preset based on ucode
+ * @restart_required: indicates debug restart is required
+ * @last_tp_resetfw: last handling of reset during debug timepoint
+ * @imr_data: IMR debug data allocation
* @dump_file_name_ext: dump file name extension
* @dump_file_name_ext_valid: dump file name extension if valid or not
* @num_pc: number of program counter for cpu
@@ -930,6 +941,7 @@ struct iwl_pcie_first_tb_buf {
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
+ * @block: queue is blocked
* @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
* @write_ptr: 1-st empty entry (index) host_w
* @read_ptr: last used entry (index) host_r
@@ -938,6 +950,8 @@ struct iwl_pcie_first_tb_buf {
* @id: queue id
* @low_mark: low watermark, resume queue if free space more than this
* @high_mark: high watermark, stop queue if free space less than this
+ * @overflow_q: overflow queue for handling frames that didn't fit on HW queue
+ * @overflow_tx: need to transmit from overflow
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
@@ -990,10 +1004,19 @@ struct iwl_txq {
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @page_offs: offset from skb->cb to mac header page pointer
* @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
- * @queue_used - bit mask of used queues
- * @queue_stopped - bit mask of stopped queues
+ * @queue_used: bit mask of used queues
+ * @queue_stopped: bit mask of stopped queues
+ * @txq: array of TXQ data structures representing the TXQs
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
* @queue_alloc_cmd_ver: queue allocation command version
+ * @bc_pool: bytecount DMA allocations pool
+ * @bc_tbl_size: bytecount table size
+ * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
+ * (and similar usage)
+ * @tfd: TFD data
+ * @tfd.max_tbs: max number of buffers per TFD
+ * @tfd.size: TFD size
+ * @tfd.addr_size: TFD/TB address size
*/
struct iwl_trans_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
@@ -1026,27 +1049,35 @@ struct iwl_trans_txqs {
/**
* struct iwl_trans - transport common data
*
- * @csme_own - true if we couldn't get ownership on the device
- * @ops - pointer to iwl_trans_ops
- * @op_mode - pointer to the op_mode
+ * @csme_own: true if we couldn't get ownership on the device
+ * @ops: pointer to iwl_trans_ops
+ * @op_mode: pointer to the op_mode
* @trans_cfg: the trans-specific configuration part
- * @cfg - pointer to the configuration
- * @drv - pointer to iwl_drv
+ * @cfg: pointer to the configuration
+ * @drv: pointer to iwl_drv
+ * @state: current device state
* @status: a bit-mask of transport status flags
- * @dev - pointer to struct device * that represents the device
+ * @dev: pointer to struct device * that represents the device
* @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
* 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
- * @hw_rf_id a u32 with the device RF ID
- * @hw_crf_id a u32 with the device CRF ID
- * @hw_wfpm_id a u32 with the device wfpm ID
+ * @hw_rf_id: a u32 with the device RF ID
+ * @hw_cnv_id: a u32 with the device CNV ID
+ * @hw_crf_id: a u32 with the device CRF ID
+ * @hw_wfpm_id: a u32 with the device wfpm ID
* @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
+ * @sku_id: the SKU identifier (for PNVM matching)
+ * @pnvm_loaded: indicates PNVM was loaded
+ * @hw_rev: the revision data of the HW
* @hw_rev_step: The mac step of the HW
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
* @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
+ * @reduce_power_loaded: indicates reduced power section was loaded
* @failed_to_load_reduce_power_image: set to true if pnvm loading failed
+ * @command_groups: pointer to command group name list array
+ * @command_groups_size: array size of @command_groups
* @wide_cmd_header: true when ucode supports wide command header format
* @wait_command_queue: wait queue for sync commands
* @num_rx_queues: number of RX queues allocated by the transport;
@@ -1055,19 +1086,29 @@ struct iwl_trans_txqs {
* @iml: a pointer to the image loader itself
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @dev_cmd_pool_name: name for the TX command allocation pool
+ * @dbgfs_dir: iwlwifi debugfs base dir for this device
+ * @sync_cmd_lockdep_map: lockdep map for checking sync commands
* @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
* starting the firmware, used for tracing
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
+ * @dbg: additional debug data, see &struct iwl_trans_debug
+ * @init_dram: FW initialization DMA data
* @system_pm_mode: the system-wide power management mode in use.
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
+ * @name: the device name
* @txqs: transport tx queues data.
* @mbx_addr_0_step: step address data 0
* @mbx_addr_1_step: step address data 1
* @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
* only valid for discrete (not integrated) NICs
* @invalid_tx_cmd: invalid TX command buffer
+ * @reduced_cap_sku: reduced capability supported SKU
+ * @no_160: device not supporting 160 MHz
+ * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
+ * @trans_specific: data for the specific transport this is allocated for/with
*/
struct iwl_trans {
bool csme_own;
@@ -1090,6 +1131,8 @@ struct iwl_trans {
u32 hw_id;
char hw_id_str[52];
u32 sku_id[3];
+ bool reduced_cap_sku;
+ u8 no_160:1, step_urm:1;
u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 9fe1761691ec..535edb51d1c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -181,6 +181,9 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
struct iwl_mvm_sta *mvmsta;
u32 value;
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return 0;
+
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (!mvmsta)
return 0;
@@ -252,6 +255,124 @@ static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
swap(data->primary, data->secondary);
}
+static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int link_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return;
+
+ /* Done already */
+ if (mvmvif->bt_coex_esr_disabled == !enable)
+ return;
+
+ mvmvif->bt_coex_esr_disabled = !enable;
+
+ /* Nothing to do */
+ if (mvmvif->esr_active == enable)
+ return;
+
+ if (enable) {
+ /* Try to re-enable eSR*/
+ iwl_mvm_mld_select_links(mvm, vif, false);
+ return;
+ }
+
+ /*
+ * Find the primary link, as we want to switch to it and drop the
+ * secondary one.
+ */
+ link_id = iwl_mvm_mld_get_primary_link(mvm, vif, vif->active_links);
+ WARN_ON(link_id < 0);
+
+ ieee80211_set_active_links_async(vif,
+ vif->active_links & BIT(link_id));
+}
+
+/*
+ * This function receives the LB link id and checks if eSR should be
+ * enabled or disabled (due to BT coex)
+ */
+bool
+iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id, int primary_link)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+ bool have_wifi_loss_rate =
+ iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ BT_PROFILE_NOTIFICATION, 0) > 4;
+ s8 link_rssi = 0;
+ u8 wifi_loss_rate;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->last_bt_notif.wifi_loss_low_rssi == BT_OFF)
+ return true;
+
+ /* If LB link is the primary one we should always disable eSR */
+ if (link_id == primary_link)
+ return false;
+
+ /* The feature is not supported */
+ if (!have_wifi_loss_rate)
+ return true;
+
+ /*
+ * We might not have a link_info when checking whether we can
+ * (re)enable eSR - the LB link might not exist yet
+ */
+ if (link_info)
+ link_rssi = (s8)link_info->beacon_stats.avg_signal;
+
+ /*
+ * In case we don't know the RSSI - take the lower wifi loss,
+ * so we will more likely enter eSR, and if RSSI is low -
+ * we will get an update on this and exit eSR.
+ */
+ if (!link_rssi)
+ wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+
+ else if (!mvmvif->bt_coex_esr_disabled)
+ /* RSSI needs to get really low to disable eSR... */
+ wifi_loss_rate =
+ link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
+ mvm->last_bt_notif.wifi_loss_low_rssi :
+ mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ else
+ /* ...And really high before we enable it back */
+ wifi_loss_rate =
+ link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ?
+ mvm->last_bt_notif.wifi_loss_low_rssi :
+ mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+
+ return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
+}
+
+void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id)
+{
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
+ usable_links);
+ bool enable;
+
+ /* Not assoc, not MLD vif or only one usable link */
+ if (primary_link < 0)
+ return;
+
+ enable = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
+ primary_link);
+
+ iwl_mvm_bt_coex_enable_esr(mvm, vif, enable);
+}
+
static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_bt_iterator_data *data,
@@ -297,6 +418,8 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
return;
}
+ iwl_mvm_bt_coex_update_vif_esr(mvm, vif, link_id);
+
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
else
@@ -432,6 +555,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
return;
}
+ /* When BT is off this will be 0 */
+ if (data->notif->wifi_loss_low_rssi == BT_OFF)
+ iwl_mvm_bt_coex_enable_esr(mvm, vif, true);
+
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id);
}
@@ -454,6 +581,11 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_notif_iterator, &data);
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ rcu_read_unlock();
+ return;
+ }
+
iwl_mvm_bt_coex_tcm_based_ci(mvm, &data);
if (data.primary) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index c832068b5718..f5122c4678a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -11,6 +11,9 @@
#include "fw-api.h"
#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20
+#define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69
+#define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63
+#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 4582afb149d7..553c6fffc7c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -450,9 +450,9 @@ static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw,
}
static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM,
IWL_FW_CMD_VER_UNKNOWN);
int ret;
@@ -461,16 +461,14 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
struct wowlan_key_rsc_v5_data data = {};
int i;
- data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL);
+ data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL);
if (!data.rsc)
return -ENOMEM;
- memset(data.rsc, 0xff, sizeof(*data.rsc));
-
for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++)
data.rsc->mcast_key_id_map[i] =
IWL_MCAST_KEY_MAP_INVALID;
- data.rsc->sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ data.rsc->sta_id = cpu_to_le32(mvm_link->ap_sta_id);
ieee80211_iter_keys(mvm->hw, vif,
iwl_mvm_wowlan_get_rsc_v5_data,
@@ -494,7 +492,7 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
if (ver == 4) {
size = sizeof(*data.rsc_tsc);
data.rsc_tsc->sta_id =
- cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ cpu_to_le32(mvm_link->ap_sta_id);
} else {
/* ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN */
size = sizeof(data.rsc_tsc->params);
@@ -668,10 +666,9 @@ static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
}
static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link,
struct cfg80211_wowlan *wowlan)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_wowlan_patterns_cmd *pattern_cmd;
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
@@ -693,7 +690,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
pattern_cmd->n_patterns = wowlan->n_patterns;
if (ver >= 3)
- pattern_cmd->sta_id = mvmvif->deflink.ap_sta_id;
+ pattern_cmd->sta_id = mvm_link->ap_sta_id;
for (i = 0; i < wowlan->n_patterns; i++) {
int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
@@ -723,14 +720,15 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_chanctx_conf *ctx;
u8 chains_static, chains_dynamic;
- struct cfg80211_chan_def chandef;
+ struct cfg80211_chan_def chandef, ap_def;
int ret, i;
struct iwl_binding_cmd_v1 binding_cmd = {};
struct iwl_time_quota_cmd quota_cmd = {};
struct iwl_time_quota_data *quota;
u32 status;
- if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm)))
+ if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm) ||
+ ieee80211_vif_is_mld(vif)))
return -EINVAL;
/* add back the PHY */
@@ -744,12 +742,13 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EINVAL;
}
chandef = ctx->def;
+ ap_def = ctx->ap;
chains_static = ctx->rx_chains_static;
chains_dynamic = ctx->rx_chains_dynamic;
rcu_read_unlock();
ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt, &chandef,
- chains_static, chains_dynamic);
+ &ap_def, chains_static, chains_dynamic);
if (ret)
return ret;
@@ -927,6 +926,9 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+ if (ap_sta->mfp)
+ wowlan_config_cmd->flags |= IS_11W_ASSOC;
+
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
/* Query the last used seqno and set it */
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
@@ -987,7 +989,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
}
static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link)
{
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -1016,7 +1019,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
return -EIO;
}
- ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif);
+ ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif, mvm_link);
if (ret)
return ret;
@@ -1030,7 +1033,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
if (ver == 2) {
size = sizeof(tkip_data.tkip);
tkip_data.tkip.sta_id =
- cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ cpu_to_le32(mvm_link->ap_sta_id);
} else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) {
size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1);
} else {
@@ -1079,7 +1082,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
- kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ kek_kck_cmd.sta_id = cpu_to_le32(mvm_link->ap_sta_id);
if (cmd_ver == 4) {
cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4);
@@ -1112,6 +1115,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
struct cfg80211_wowlan *wowlan,
struct iwl_wowlan_config_cmd *wowlan_config_cmd,
struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_vif_link_info *mvm_link,
struct ieee80211_sta *ap_sta)
{
int ret;
@@ -1130,7 +1134,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
return ret;
}
- ret = iwl_mvm_wowlan_config_key_params(mvm, vif);
+ ret = iwl_mvm_wowlan_config_key_params(mvm, vif, mvm_link);
if (ret)
return ret;
@@ -1142,7 +1146,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
- ret = iwl_mvm_send_patterns(mvm, vif, wowlan);
+ ret = iwl_mvm_send_patterns(mvm, mvm_link, wowlan);
else
ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
if (ret)
@@ -1223,6 +1227,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = NULL;
struct iwl_mvm_vif *mvmvif = NULL;
struct ieee80211_sta *ap_sta = NULL;
+ struct iwl_mvm_vif_link_info *mvm_link;
struct iwl_d3_manager_config d3_cfg_cmd_data = {
/*
* Program the minimum sleep time to 10 seconds, as many
@@ -1237,7 +1242,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
.data[0] = &d3_cfg_cmd_data,
.len[0] = sizeof(d3_cfg_cmd_data),
};
- int ret;
+ int ret, primary_link;
int len __maybe_unused;
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -1251,21 +1256,46 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
return -EINVAL;
}
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif))
+ return 1;
+
+ if (ieee80211_vif_is_mld(vif) && vif->cfg.assoc) {
+ /*
+ * Select the 'best' link. May need to revisit, it seems
+ * better to not optimize for throughput but rather range,
+ * reliability and power here - and select 2.4 GHz ...
+ */
+ primary_link =
+ iwl_mvm_mld_get_primary_link(mvm, vif,
+ vif->active_links);
+
+ if (WARN_ONCE(primary_link < 0, "no primary link in 0x%x\n",
+ vif->active_links))
+ primary_link = __ffs(vif->active_links);
+
+ ret = ieee80211_set_active_links(vif, BIT(primary_link));
+ if (ret)
+ return ret;
+ } else {
+ primary_link = 0;
+ }
+
mutex_lock(&mvm->mutex);
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
synchronize_net();
- vif = iwl_mvm_get_bss_vif(mvm);
- if (IS_ERR_OR_NULL(vif)) {
- ret = 1;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvm_link = mvmvif->link[primary_link];
+ if (WARN_ON_ONCE(!mvm_link)) {
+ ret = -EINVAL;
goto out_noreset;
}
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA) {
+ if (mvm_link->ap_sta_id == IWL_MVM_INVALID_STA) {
/* if we're not associated, this must be netdetect */
if (!wowlan->nd_config) {
ret = 1;
@@ -1279,24 +1309,31 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->net_detect = true;
} else {
- struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .offloading_tid = 0,
+ };
- wowlan_config_cmd.sta_id = mvmvif->deflink.ap_sta_id;
+ wowlan_config_cmd.sta_id = mvm_link->ap_sta_id;
ap_sta = rcu_dereference_protected(
- mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id],
+ mvm->fw_id_to_mac_id[mvm_link->ap_sta_id],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(ap_sta)) {
ret = -EINVAL;
goto out_noreset;
}
+ ret = iwl_mvm_sta_ensure_queue(
+ mvm, ap_sta->txq[wowlan_config_cmd.offloading_tid]);
+ if (ret)
+ goto out_noreset;
+
ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
vif, mvmvif, ap_sta);
if (ret)
goto out_noreset;
ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
- vif, mvmvif, ap_sta);
+ vif, mvmvif, mvm_link, ap_sta);
if (ret)
goto out;
@@ -1462,7 +1499,8 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
status->pattern_number;
if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
- IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH |
+ IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE))
wakeup.disconnect = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
@@ -1486,6 +1524,9 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
wakeup.tcp_match = true;
+ if (reasons & IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC)
+ wakeup.unprot_deauth_disassoc = true;
+
if (status->wake_packet) {
int pktsize = status->wake_packet_bufsize;
int pktlen = status->wake_packet_length;
@@ -1839,9 +1880,12 @@ iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key,
memcpy(seq->aes_gmac.pn, key->ipn, sizeof(seq->aes_gmac.pn));
break;
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
BUILD_BUG_ON(sizeof(seq->aes_cmac.pn) != sizeof(key->ipn));
memcpy(seq->aes_cmac.pn, key->ipn, sizeof(seq->aes_cmac.pn));
break;
+ default:
+ WARN_ON(1);
}
}
@@ -1931,7 +1975,7 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
struct ieee80211_vif *vif,
struct iwl_mvm *mvm, u32 gtk_cipher)
{
- int i;
+ int i, j;
struct ieee80211_key_conf *key;
struct {
struct ieee80211_key_conf conf;
@@ -1939,6 +1983,7 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
} conf = {
.conf.cipher = gtk_cipher,
};
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
@@ -1972,10 +2017,18 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
memcpy(conf.conf.key, status->gtk[i].key,
sizeof(status->gtk[i].key));
- key = ieee80211_gtk_rekey_add(vif, &conf.conf);
+ key = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
if (IS_ERR(key))
return false;
- iwl_mvm_set_key_rx_seq_idx(key, status, i);
+
+ for (j = 0; j < ARRAY_SIZE(status->gtk_seq); j++) {
+ if (!status->gtk_seq[j].valid ||
+ status->gtk_seq[j].key_id != key->keyidx)
+ continue;
+ iwl_mvm_set_key_rx_seq_idx(key, status, j);
+ break;
+ }
+ WARN_ON(j == ARRAY_SIZE(status->gtk_seq));
}
return true;
@@ -1995,6 +2048,7 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
.conf.keyidx = key_data->id,
};
struct ieee80211_key_seq seq;
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
if (!key_data->len)
return true;
@@ -2020,17 +2074,17 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key));
memcpy(conf.conf.key, key_data->key, conf.conf.keylen);
- key_config = ieee80211_gtk_rekey_add(vif, &conf.conf);
+ key_config = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
if (IS_ERR(key_config))
return false;
ieee80211_set_key_rx_seq(key_config, 0, &seq);
if (key_config->keyidx == 4 || key_config->keyidx == 5) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
- struct iwl_mvm_vif_link_info *mvm_link =
- mvmvif->link[link_id];
+ struct iwl_mvm_vif_link_info *mvm_link;
+ link_id = link_id < 0 ? 0 : link_id;
+ mvm_link = mvmvif->link[link_id];
mvm_link->igtk = key_config;
}
@@ -2065,7 +2119,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
.status = status,
};
int i;
-
u32 disconnection_reasons =
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
@@ -2073,9 +2126,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
if (!status || !vif->bss_conf.bssid)
return false;
- if (status->wakeup_reasons & disconnection_reasons)
- return false;
-
if (iwl_mvm_lookup_wowlan_status_ver(mvm) > 6 ||
iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
WOWLAN_INFO_NOTIFICATION,
@@ -2136,6 +2186,9 @@ out:
mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
}
+ if (status->wakeup_reasons & disconnection_reasons)
+ return false;
+
return true;
}
@@ -2193,7 +2246,10 @@ static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status,
static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
struct iwl_wowlan_igtk_status *data)
{
+ int i;
+
BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key));
+ BUILD_BUG_ON(sizeof(status->igtk.ipn) != sizeof(data->ipn));
if (!data->key_len)
return;
@@ -2205,7 +2261,10 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
+ WOWLAN_IGTK_MIN_INDEX;
memcpy(status->igtk.key, data->key, sizeof(data->key));
- memcpy(status->igtk.ipn, data->ipn, sizeof(data->ipn));
+
+ /* mac80211 expects big endian for memcmp() to work, convert */
+ for (i = 0; i < sizeof(data->ipn); i++)
+ status->igtk.ipn[i] = data->ipn[sizeof(data->ipn) - i - 1];
}
static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
@@ -2839,6 +2898,9 @@ iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
u8 sta_id = mvm->net_detect ? IWL_MVM_INVALID_STA :
mvmvif->deflink.ap_sta_id;
+ /* bug - FW with MLO has status notification */
+ WARN_ON(ieee80211_vif_is_mld(vif));
+
d3_data->status = iwl_mvm_send_wowlan_get_status(mvm, sta_id);
}
@@ -2947,7 +3009,7 @@ static void iwl_mvm_nd_match_info_handler(struct iwl_mvm *mvm,
if (results->matched_profiles) {
memcpy(results->matches, notif->matches, matches_len);
- d3_data->nd_results_valid = TRUE;
+ d3_data->nd_results_valid = true;
}
/* no scan should be active at this point */
@@ -3345,6 +3407,7 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
+ unsigned long end = jiffies + 60 * HZ;
u32 pme_asserted;
while (true) {
@@ -3358,6 +3421,12 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
if (msleep_interruptible(100))
break;
+
+ if (time_is_before_jiffies(end)) {
+ IWL_ERR(mvm,
+ "ending pseudo-D3 with timeout after ~60 seconds\n");
+ return -ETIMEDOUT;
+ }
}
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index e8b881596baf..51b01f7528be 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -381,9 +381,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_bf(vif, param, value);
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
else
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -578,34 +578,47 @@ static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct iwl_mvm_phy_ctxt *phy_ctxt;
+ struct ieee80211_bss_conf *link_conf;
u16 value;
- int ret;
+ int link_id, ret = -EINVAL;
ret = kstrtou16(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&mvm->mutex);
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
- /* make sure the channel context is assigned */
- if (!chanctx_conf) {
+ mvm->dbgfs_rx_phyinfo = value;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct cfg80211_chan_def min_def, ap_def;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ u8 chains_static, chains_dynamic;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ continue;
+ }
+ /* A command can't be sent with RCU lock held, so copy
+ * everything here and use it after unlocking
+ */
+ min_def = chanctx_conf->min_def;
+ ap_def = chanctx_conf->ap;
+ chains_static = chanctx_conf->rx_chains_static;
+ chains_dynamic = chanctx_conf->rx_chains_dynamic;
rcu_read_unlock();
- mutex_unlock(&mvm->mutex);
- return -EINVAL;
- }
- phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv];
- rcu_read_unlock();
+ phy_ctxt = mvmvif->link[link_id]->phy_ctxt;
+ if (!phy_ctxt)
+ continue;
- mvm->dbgfs_rx_phyinfo = value;
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &min_def, &ap_def,
+ chains_static, chains_dynamic);
+ }
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
- chanctx_conf->rx_chains_static,
- chanctx_conf->rx_chains_dynamic);
mutex_unlock(&mvm->mutex);
return ret ?: count;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index edc8204f7c0e..79f4ac8cbc72 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -391,9 +391,7 @@ static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct file *file,
char buf[12];
u32 value;
- err = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_6E,
- &iwl_guid, &value);
+ err = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value);
if (err)
return err;
@@ -877,14 +875,14 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
le16_to_cpu(rsp->curr_mcc));
pos += scnprintf(pos, endpos - pos, "Block list entries:");
- for (i = 0; i < APCI_WTAS_BLACK_LIST_MAX; i++)
+ for (i = 0; i < IWL_WTAS_BLACK_LIST_MAX; i++)
pos += scnprintf(pos, endpos - pos, " 0x%x",
le16_to_cpu(rsp->block_list[i]));
pos += scnprintf(pos, endpos - pos, "\nOEM name: %s\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
pos += scnprintf(pos, endpos - pos, "\tVendor In Approved List: %s\n",
- iwl_mvm_is_vendor_in_approved_list() ? "YES" : "NO");
+ iwl_is_tas_approved() ? "YES" : "NO");
pos += scnprintf(pos, endpos - pos,
"\tDo TAS Support Dual Radio?: %s\n",
rsp->in_dual_radio ? "TRUE" : "FALSE");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 233ae81884a0..4863a3c74640 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
@@ -821,9 +821,10 @@ iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* If secure LTF is turned off, replace the flag with PMF only
*/
flags = le32_to_cpu(target->initiator_ap_flags);
- if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) &&
- !IWL_MVM_FTM_INITIATOR_SECURE_LTF) {
- flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+ if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
+ if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
+ flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+
flags |= IWL_INITIATOR_AP_FLAGS_PMF;
target->initiator_ap_flags = cpu_to_le32(flags);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 8f10590f9cdd..8e760300a1ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -12,6 +12,9 @@ struct iwl_mvm_pasn_sta {
struct list_head list;
struct iwl_mvm_int_sta int_sta;
u8 addr[ETH_ALEN];
+
+ /* must be last as it followed by buffer holding the key */
+ struct ieee80211_key_conf keyconf;
};
struct iwl_mvm_pasn_hltk_data {
@@ -303,6 +306,10 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
{
list_del(&sta->list);
+ if (sta->keyconf.keylen)
+ iwl_mvm_sec_key_del_pasn(mvm, vif, BIT(sta->int_sta.sta_id),
+ &sta->keyconf);
+
if (iwl_mvm_has_mld_api(mvm->fw))
iwl_mvm_mld_rm_sta_id(mvm, sta->int_sta.sta_id);
else
@@ -342,6 +349,12 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
}
if (hltk && hltk_len) {
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT)) {
+ IWL_ERR(mvm, "No support for secure LTF measurement\n");
+ return -EINVAL;
+ }
+
hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher);
if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) {
IWL_ERR(mvm, "invalid cipher: %u\n", cipher);
@@ -352,12 +365,12 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
}
if (tk && tk_len) {
- sta = kzalloc(sizeof(*sta), GFP_KERNEL);
+ sta = kzalloc(sizeof(*sta) + tk_len, GFP_KERNEL);
if (!sta)
return -ENOBUFS;
ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr,
- cipher, tk, tk_len);
+ cipher, tk, tk_len, &sta->keyconf);
if (ret) {
kfree(sta);
return ret;
@@ -425,7 +438,7 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_read_unlock();
phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def,
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def, &ctx.ap,
ctx.rx_chains_static,
ctx.rx_chains_dynamic);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 1252084662c6..e1c2b7fc92ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -16,6 +16,7 @@
#include "fw/acpi.h"
#include "fw/pnvm.h"
#include "fw/uefi.h"
+#include "fw/regulatory.h"
#include "mvm.h"
#include "fw/dbg.h"
@@ -487,7 +488,6 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
#endif /* CONFIG_ACPI */
}
-#if defined(CONFIG_ACPI) && defined(CONFIG_EFI)
static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
{
u8 cmd_ver;
@@ -567,17 +567,6 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
return ret;
}
-#else
-
-static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-
-static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
-{
-}
-#endif
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
@@ -677,6 +666,11 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
NULL);
+ if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ)
+ mvm->trans->step_urm = !!(iwl_read_umac_prph(mvm->trans,
+ CNVI_PMU_STEP_FLOW) &
+ CNVI_PMU_STEP_FLOW_FORCE_URM);
+
/* Send init config command to mark that we are sending NVM access
* commands
*/
@@ -890,7 +884,6 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
sizeof(cmd), &cmd);
}
-#ifdef CONFIG_ACPI
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
@@ -931,9 +924,9 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
/* all structs have the same common part, add it */
len += sizeof(cmd.common);
- ret = iwl_sar_select_profile(&mvm->fwrt, per_chain,
- IWL_NUM_CHAIN_TABLES,
- n_subbands, prof_a, prof_b);
+ ret = iwl_sar_fill_profile(&mvm->fwrt, per_chain,
+ IWL_NUM_CHAIN_TABLES,
+ n_subbands, prof_a, prof_b);
/* return on error or if the profile is disabled (positive number) */
if (ret)
@@ -989,7 +982,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
resp = (void *)cmd.resp_pkt->data;
ret = le32_to_cpu(resp->profile_idx);
- if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES_REV3))
+ if (WARN_ON(ret > BIOS_GEO_MAX_PROFILE_NUM))
ret = -EIO;
iwl_free_resp(&cmd);
@@ -1003,7 +996,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
u16 len;
u32 n_bands;
u32 n_profiles;
- u32 sk = 0;
+ __le32 sk = cpu_to_le32(0);
int ret;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
@@ -1020,27 +1013,35 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
/* the ops field is at the same spot for all versions, so set in v1 */
cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
+ /* Only set to South Korea if the table revision is 1 */
+ if (mvm->fwrt.geo_rev == 1)
+ sk = cpu_to_le32(1);
+
if (cmd_ver == 5) {
len = sizeof(cmd.v5);
n_bands = ARRAY_SIZE(cmd.v5.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
+ n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
+ cmd.v5.table_revision = sk;
} else if (cmd_ver == 4) {
len = sizeof(cmd.v4);
n_bands = ARRAY_SIZE(cmd.v4.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
+ n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
+ cmd.v4.table_revision = sk;
} else if (cmd_ver == 3) {
len = sizeof(cmd.v3);
n_bands = ARRAY_SIZE(cmd.v3.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
+ cmd.v3.table_revision = sk;
} else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
len = sizeof(cmd.v2);
n_bands = ARRAY_SIZE(cmd.v2.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
+ cmd.v2.table_revision = sk;
} else {
len = sizeof(cmd.v1);
n_bands = ARRAY_SIZE(cmd.v1.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
}
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
@@ -1052,8 +1053,8 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
/* the table is at the same position for all versions, so set use v1 */
- ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0],
- n_bands, n_profiles);
+ ret = iwl_sar_geo_fill_table(&mvm->fwrt, &cmd.v1.table[0][0],
+ n_bands, n_profiles);
/*
* It is a valid scenario to not support SAR, or miss wgds table,
@@ -1062,27 +1063,6 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
if (ret)
return 0;
- /* Only set to South Korea if the table revision is 1 */
- if (mvm->fwrt.geo_rev == 1)
- sk = 1;
-
- /*
- * Set the table_revision to South Korea (1) or not (0). The
- * element name is misleading, as it doesn't contain the table
- * revision number, but whether the South Korea variation
- * should be used.
- * This must be done after calling iwl_sar_geo_init().
- */
- if (cmd_ver == 5)
- cmd.v5.table_revision = cpu_to_le32(sk);
- else if (cmd_ver == 4)
- cmd.v4.table_revision = cpu_to_le32(sk);
- else if (cmd_ver == 3)
- cmd.v3.table_revision = cpu_to_le32(sk);
- else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
- IWL_UCODE_TLV_API_SAR_TABLE_VER))
- cmd.v2.table_revision = cpu_to_le32(sk);
-
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
@@ -1091,7 +1071,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
union iwl_ppag_table_cmd cmd;
int ret, cmd_size;
- ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
+ ret = iwl_fill_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
/* Not supporting PPAG table is a valid scenario */
if (ret < 0)
return 0;
@@ -1110,80 +1090,19 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
/* no need to read the table, done in INIT stage */
- if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt)))
+ if (!(iwl_is_ppag_approved(&mvm->fwrt)))
return 0;
return iwl_mvm_ppag_send_cmd(mvm);
}
-static const struct dmi_system_id dmi_tas_approved_list[] = {
- { .ident = "HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- },
- },
- { .ident = "SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "LENOVO",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- },
- },
- { .ident = "MSFT",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- },
- },
- { .ident = "Acer",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- },
- },
- { .ident = "ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
- },
- },
- { .ident = "MSI",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
- },
- },
- { .ident = "Honor",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
- },
- },
- /* keep last */
- {}
-};
-
-bool iwl_mvm_is_vendor_in_approved_list(void)
-{
- return dmi_check_system(dmi_tas_approved_list);
-}
-
static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc)
{
int i;
u32 size = le32_to_cpu(*le_size);
/* Verify that there is room for another country */
- if (size >= IWL_TAS_BLOCK_LIST_MAX)
+ if (size >= IWL_WTAS_BLACK_LIST_MAX)
return false;
for (i = 0; i < size; i++) {
@@ -1200,21 +1119,21 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
int ret;
- union iwl_tas_config_cmd cmd = {};
+ struct iwl_tas_data data = {};
+ struct iwl_tas_config_cmd cmd = {};
int cmd_size, fw_ver;
- BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
- APCI_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(cmd.common.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
return;
}
- fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
-
- ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
+ ret = iwl_bios_get_tas_table(&mvm->fwrt, &data);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"TAS table invalid or unavailable. (%d)\n",
@@ -1225,16 +1144,16 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
if (ret == 0)
return;
- if (!iwl_mvm_is_vendor_in_approved_list()) {
+ if (!iwl_is_tas_approved()) {
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
- if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
- &cmd.v4.block_list_size,
- IWL_MCC_US)) ||
- (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
- &cmd.v4.block_list_size,
- IWL_MCC_CANADA))) {
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ if ((!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_US)) ||
+ (!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_CANADA))) {
IWL_DEBUG_RADIO(mvm,
"Unable to add US/Canada to TAS block list, disabling TAS\n");
return;
@@ -1242,41 +1161,64 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
} else {
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is in the approved list.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
}
- /* v4 is the same size as v3, so no need to differentiate here */
- cmd_size = fw_ver < 3 ?
- sizeof(struct iwl_tas_config_cmd_v2) :
- sizeof(struct iwl_tas_config_cmd_v3);
+ fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ memcpy(&cmd.common, &data, sizeof(struct iwl_tas_config_cmd_common));
+
+ /* Set v3 or v4 specific parts. will be trunctated for fw_ver < 3 */
+ if (fw_ver == 4) {
+ cmd.v4.override_tas_iec = data.override_tas_iec;
+ cmd.v4.enable_tas_iec = data.enable_tas_iec;
+ cmd.v4.usa_tas_uhb_allowed = data.usa_tas_uhb_allowed;
+ } else {
+ cmd.v3.override_tas_iec = cpu_to_le16(data.override_tas_iec);
+ cmd.v3.enable_tas_iec = cpu_to_le16(data.enable_tas_iec);
+ }
+
+ cmd_size = sizeof(struct iwl_tas_config_cmd_common);
+ if (fw_ver >= 3)
+ /* v4 is the same size as v3 */
+ cmd_size += sizeof(struct iwl_tas_config_cmd_v3);
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
-static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
+static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
{
- u8 value;
- int ret = iwl_acpi_get_dsm_u8(mvm->fwrt.dev, 0, DSM_RFI_FUNC_ENABLE,
- &iwl_rfi_guid, &value);
+ u32 value = 0;
+ /* default behaviour is disabled */
+ bool bios_enable_rfi = false;
+ int ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_RFI_CONFIG, &value);
+
if (ret < 0) {
IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret);
+ return bios_enable_rfi;
+ }
- } else if (value >= DSM_VALUE_RFI_MAX) {
- IWL_DEBUG_RADIO(mvm, "DSM RFI got invalid value, ret=%d\n",
- value);
-
- } else if (value == DSM_VALUE_RFI_ENABLE) {
+ value &= DSM_VALUE_RFI_DISABLE;
+ /* RFI BIOS CONFIG value can be 0 or 3 only.
+ * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled.
+ * 1 and 2 are invalid BIOS configurations, So, it's not possible to
+ * disable ddr/dlvr separately.
+ */
+ if (!value) {
IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n");
- return DSM_VALUE_RFI_ENABLE;
+ bios_enable_rfi = true;
+ } else if (value == DSM_VALUE_RFI_DISABLE) {
+ IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to disable\n");
+ } else {
+ IWL_DEBUG_RADIO(mvm,
+ "DSM RFI got invalid value, value=%d\n", value);
}
- IWL_DEBUG_RADIO(mvm, "DSM RFI is disabled\n");
-
- /* default behaviour is disabled */
- return DSM_VALUE_RFI_DISABLE;
+ return bios_enable_rfi;
}
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
@@ -1288,43 +1230,34 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE), 1);
- cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
+ cmd.config_bitmap = iwl_get_lari_config_bitmap(&mvm->fwrt);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_11AX_ENABLEMENT,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
if (!ret)
cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_UNII4_CHAN,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
if (!ret)
cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ACTIVATE_CHANNEL,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
if (!ret) {
if (cmd_ver < 8)
value &= ~ACTIVATE_5G2_IN_WW_MASK;
cmd.chan_state_active_bitmap = cpu_to_le32(value);
}
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_6E,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value);
if (!ret)
cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_FORCE_DISABLE_CHANNELS,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS,
+ &value);
if (!ret)
cmd.force_disable_channels_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
+ &value);
if (!ret)
cmd.edt_bitmap = cpu_to_le32(value);
@@ -1390,15 +1323,17 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
if (le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_VLP_AP_SUPPORTED ||
le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_AFC_AP_SUPPORTED)
- mvm->fwrt.uats_enabled = TRUE;
+ mvm->fwrt.uats_enabled = true;
}
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
+void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm)
{
int ret;
+ iwl_acpi_get_guid_lock_status(&mvm->fwrt);
+
/* read PPAG table */
- ret = iwl_acpi_get_ppag_table(&mvm->fwrt);
+ ret = iwl_bios_get_ppag_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"PPAG BIOS table invalid or unavailable. (%d)\n",
@@ -1406,7 +1341,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
/* read SAR tables */
- ret = iwl_sar_get_wrds_table(&mvm->fwrt);
+ ret = iwl_bios_get_wrds_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
@@ -1415,7 +1350,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
* If not available, don't fail and don't bother with EWRD and
* WGDS */
- if (!iwl_sar_get_wgds_table(&mvm->fwrt)) {
+ if (!iwl_bios_get_wgds_table(&mvm->fwrt)) {
/*
* If basic SAR is not available, we check for WGDS,
* which should *not* be available either. If it is
@@ -1426,7 +1361,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
} else {
- ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
+ ret = iwl_bios_get_ewrd_table(&mvm->fwrt);
/* if EWRD is not available, we can still use
* WRDS, so don't fail */
if (ret < 0)
@@ -1436,7 +1371,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
/* read geo SAR table */
if (iwl_sar_geo_support(&mvm->fwrt)) {
- ret = iwl_sar_get_wgds_table(&mvm->fwrt);
+ ret = iwl_bios_get_wgds_table(&mvm->fwrt);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Geo SAR BIOS table invalid or unavailable. (%d)\n",
@@ -1446,59 +1381,18 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->phy_filters);
-}
-#else /* CONFIG_ACPI */
-inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
- int prof_a, int prof_b)
-{
- return 1;
+ if (iwl_bios_get_eckv(&mvm->fwrt, &mvm->ext_clock_valid))
+ IWL_DEBUG_RADIO(mvm, "ECKV table doesn't exist in BIOS\n");
}
-inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+static void iwl_mvm_disconnect_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- return -ENOENT;
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_hw_restart_disconnect(vif);
}
-static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-
-int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
-{
- return -ENOENT;
-}
-
-static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-
-static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
-{
-}
-
-static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
-{
-}
-
-bool iwl_mvm_is_vendor_in_approved_list(void)
-{
- return false;
-}
-
-static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
-{
- return DSM_VALUE_RFI_DISABLE;
-}
-
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
-{
-}
-
-#endif /* CONFIG_ACPI */
-
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
{
u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
@@ -1543,10 +1437,15 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
/* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
if (flags & ERROR_RECOVERY_UPDATE_DB) {
resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
- if (resp)
+ if (resp) {
IWL_ERR(mvm,
"Failed to send recovery cmd blob was invalid %d\n",
resp);
+
+ ieee80211_iterate_interfaces(mvm->hw, 0,
+ iwl_mvm_disconnect_iterator,
+ mvm);
+ }
}
}
@@ -1781,9 +1680,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (!mvm->ptp_data.ptp_clock)
iwl_mvm_ptp_init(mvm);
- if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
- IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");
-
ret = iwl_mvm_ppag_init(mvm);
if (ret)
goto error;
@@ -1803,7 +1699,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_uats_init(mvm);
if (iwl_rfi_supported(mvm)) {
- if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
+ if (iwl_mvm_eval_dsm_rfi(mvm))
iwl_rfi_send_config_cmd(mvm, NULL);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index be48b0fc9cb6..f13f13e6b71a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include "mvm.h"
#include "time-event.h"
@@ -53,6 +53,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
unsigned int link_id = link_conf->link_id;
struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
struct iwl_link_config_cmd cmd = {};
+ unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
if (WARN_ON_ONCE(!link_info))
return -EINVAL;
@@ -84,7 +86,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid)
memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN);
- cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
+ if (cmd_ver < 2)
+ cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD);
}
@@ -100,6 +103,8 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_link_config_cmd cmd = {};
u32 ht_flag, flags = 0, flags_mask = 0;
int ret;
+ unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
if (WARN_ON_ONCE(!link_info ||
link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
@@ -190,12 +195,21 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
if (changes & LINK_CONTEXT_MODIFY_EHT_PARAMS) {
+ struct ieee80211_chanctx_conf *ctx;
+ struct cfg80211_chan_def *def = NULL;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(link_conf->chanctx_conf);
+ if (ctx)
+ def = iwl_mvm_chanctx_def(mvm, ctx);
+
if (iwlwifi_mod_params.disable_11be ||
- !link_conf->eht_support)
+ !link_conf->eht_support || !def ||
+ iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) >= 6)
changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
else
- cmd.puncture_mask =
- cpu_to_le16(link_conf->eht_puncturing);
+ cmd.puncture_mask = cpu_to_le16(def->punctured);
+ rcu_read_unlock();
}
cmd.bss_color = link_conf->he_bss_color.color;
@@ -224,7 +238,8 @@ send_cmd:
cmd.flags = cpu_to_le32(flags);
cmd.flags_mask = cpu_to_le32(flags_mask);
cmd.spec_link_id = link_conf->link_id;
- cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
+ if (cmd_ver < 2)
+ cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_MODIFY);
if (!ret && (changes & LINK_CONTEXT_MODIFY_ACTIVE))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index c4f96125cf33..228ede7b8957 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -31,6 +31,17 @@ const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
IWL_GEN2_TRIG_TX_FIFO_BK,
};
+const u8 iwl_mvm_ac_to_bz_tx_fifo[] = {
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+};
+
struct iwl_mvm_mac_iface_iterator_data {
struct iwl_mvm *mvm;
struct ieee80211_vif *vif;
@@ -455,7 +466,7 @@ void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm,
break;
case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
/* Protect when channel wider than 20MHz */
- if (link_conf->chandef.width > NL80211_CHAN_WIDTH_20)
+ if (link_conf->chanreq.oper.width > NL80211_CHAN_WIDTH_20)
*protection_flags |= cpu_to_le32(ht_flag);
break;
default:
@@ -494,7 +505,7 @@ void iwl_mvm_set_fw_qos_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (link_conf->qos)
*qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
- if (link_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
+ if (link_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)
*qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
}
@@ -910,8 +921,8 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
link_conf = rcu_dereference(vif->link_conf[link_id]);
if (link_conf) {
basic = link_conf->basic_rates;
- if (link_conf->chandef.chan)
- band = link_conf->chandef.chan->band;
+ if (link_conf->chanreq.oper.chan)
+ band = link_conf->chanreq.oper.chan->band;
}
rcu_read_unlock();
}
@@ -1466,8 +1477,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
mvmvif->csa_countdown = true;
- if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
- int c = ieee80211_beacon_update_cntdwn(csa_vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) {
+ int c = ieee80211_beacon_update_cntdwn(csa_vif, 0);
iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif,
&csa_vif->bss_conf);
@@ -1486,7 +1497,7 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
}
} else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) {
/* we don't have CSA NoA scheduled yet, switch now */
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
RCU_INIT_POINTER(mvm->csa_vif, NULL);
}
}
@@ -1626,10 +1637,22 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
* TODO: the threshold should be adjusted based on latency conditions,
* and/or in case of a CS flow on one of the other AP vifs.
*/
- if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG)
- iwl_mvm_connection_loss(mvm, vif, "missed beacons");
- else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD)
- ieee80211_beacon_loss(vif);
+ if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) {
+ if (rx_missed_bcon_since_rx >= IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD) {
+ iwl_mvm_connection_loss(mvm, vif, "missed beacons");
+ } else {
+ IWL_WARN(mvm,
+ "missed beacons exceeds threshold, but receiving data. Stay connected, Expect bugs.\n");
+ IWL_WARN(mvm,
+ "missed_beacons:%d, missed_beacons_since_rx:%d\n",
+ rx_missed_bcon, rx_missed_bcon_since_rx);
+ }
+ } else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) {
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ ieee80211_beacon_loss(vif);
+ else
+ ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
+ }
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
@@ -1832,7 +1855,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
csa_vif->bss_conf.beacon_int));
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 53e26c3c3a9a..1935630d3def 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -138,7 +138,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
resp->channels,
__le16_to_cpu(resp->mcc),
__le16_to_cpu(resp->geo_info),
- le32_to_cpu(resp->cap), resp_ver);
+ le32_to_cpu(resp->cap), resp_ver,
+ mvm->fwrt.uats_enabled);
/* Store the return source id */
src_id = resp->source_id;
if (IS_ERR_OR_NULL(regd)) {
@@ -263,6 +264,9 @@ static const u8 tm_if_types_ext_capa_sta[] = {
__bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \
IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \
__bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY))
+#define IWL_MVM_MLD_CAPA_OPS FIELD_PREP_CONST( \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME)
static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
{
@@ -272,6 +276,7 @@ static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
.extended_capabilities_len = sizeof(he_if_types_ext_capa_sta),
/* relevant only if EHT is supported */
.eml_capabilities = IWL_MVM_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
},
{
.iftype = NL80211_IFTYPE_STATION,
@@ -280,6 +285,7 @@ static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
.extended_capabilities_len = sizeof(tm_if_types_ext_capa_sta),
/* relevant only if EHT is supported */
.eml_capabilities = IWL_MVM_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
},
};
@@ -490,6 +496,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM))
hw->wiphy->hw_timestamp_max_peers = 1;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
+
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
hw->wiphy->features |=
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
@@ -695,6 +706,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
}
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP,
+ TOF_RANGE_REQ_CMD),
+ IWL_FW_CMD_VER_UNKNOWN) >= 11) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SECURE_LTF);
+ }
+
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
#ifdef CONFIG_PM_SLEEP
@@ -1195,14 +1218,12 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
- int retry, max_retry = 0;
mutex_lock(&mvm->mutex);
/* we are starting the mac not in error flow, and restart is enabled */
if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
iwlwifi_mod_params.fw_restart) {
- max_retry = IWL_MAX_INIT_RETRY;
/*
* This will prevent mac80211 recovery flows to trigger during
* init failures
@@ -1210,13 +1231,7 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
}
- for (retry = 0; retry <= max_retry; retry++) {
- ret = __iwl_mvm_mac_start(mvm);
- if (!ret || mvm->pldr_sync)
- break;
-
- IWL_ERR(mvm, "mac start retry %d\n", retry);
- }
+ ret = __iwl_mvm_mac_start(mvm);
clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
mutex_unlock(&mvm->mutex);
@@ -1350,6 +1365,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
* discover that its list is now empty.
*/
cancel_work_sync(&mvm->async_handlers_wk);
+ wiphy_work_cancel(hw->wiphy, &mvm->async_handlers_wiphy_wk);
}
struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
@@ -1433,7 +1449,7 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
if (ret)
goto out_unlock;
@@ -1454,7 +1470,8 @@ out_unlock:
}
void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1617,7 +1634,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_remove_mac;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
goto out_remove_mac;
@@ -1628,6 +1645,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
+ if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
+ vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
+
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_device_vif = vif;
@@ -1638,7 +1658,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvm->monitor_on = true;
mvm->monitor_p80 =
- iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef);
+ iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chanreq.oper);
}
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
@@ -2560,7 +2580,7 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_sf_update(mvm, vif, false);
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
}
if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
@@ -2581,7 +2601,7 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
/* FIXME: need to update per link when FW API will
* support it
*/
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
if (ret)
IWL_ERR(mvm,
"failed to update CQM thresholds\n");
@@ -2608,9 +2628,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) {
if ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be))
+ !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id);
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
@@ -2619,10 +2637,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* Update MU EDCA params */
if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
vif->cfg.assoc &&
- ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be)))
+ (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id);
/*
@@ -3418,16 +3433,16 @@ iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
.tolerated = true,
};
- if (WARN_ON_ONCE(!link_conf->chandef.chan ||
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan ||
!mvmvif->link[link_id]))
return;
- if (!(link_conf->chandef.chan->flags & IEEE80211_CHAN_RADAR)) {
+ if (!(link_conf->chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR)) {
mvmvif->link[link_id]->he_ru_2mhz_block = false;
return;
}
- cfg80211_bss_iter(hw->wiphy, &link_conf->chandef,
+ cfg80211_bss_iter(hw->wiphy, &link_conf->chanreq.oper,
iwl_mvm_check_he_obss_narrow_bw_ru_iter,
&iter_data);
@@ -3487,10 +3502,10 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
return;
/* FIXME: MEI needs to be updated for MLO */
- if (!vif->bss_conf.chandef.chan)
+ if (!vif->bss_conf.chanreq.oper.chan)
return;
- conn_info.channel = vif->bss_conf.chandef.chan->hw_value;
+ conn_info.channel = vif->bss_conf.chanreq.oper.chan->hw_value;
switch (mvm_sta->pairwise_cipher) {
case WLAN_CIPHER_SUITE_TKIP:
@@ -3698,6 +3713,19 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
mvmvif->ap_sta = sta;
+ /*
+ * Initialize the rates here already - this really tells
+ * the firmware only what the supported legacy rates are
+ * (may be) since it's initialized already from what the
+ * AP advertised in the beacon/probe response. This will
+ * allow the firmware to send auth/assoc frames with one
+ * of the supported rates already, rather than having to
+ * use a mandatory rate.
+ * If we're the AP, we'll just assume mandatory rates at
+ * this point, but we know nothing about the STA anyway.
+ */
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+
return 0;
}
@@ -3724,10 +3752,8 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
* the default bss_conf
*/
if (!mvm->mld_api_is_used &&
- ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be)))
+ (vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->deflink.sta_id);
} else if (vif->type == NL80211_IFTYPE_STATION) {
iwl_mvm_vif_set_he_support(hw, vif, sta, true);
@@ -3779,7 +3805,7 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
NL80211_TDLS_ENABLE_LINK);
} else {
/* enable beacon filtering */
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
mvmvif->authorized = 1;
@@ -3796,13 +3822,17 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
mvm_sta->authorized = true;
- iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
-
/* MFP is set by default before the station is authorized.
* Clear it here in case it's not used.
*/
- if (!sta->mfp)
- return callbacks->update_sta(mvm, vif, sta);
+ if (!sta->mfp) {
+ int ret = callbacks->update_sta(mvm, vif, sta);
+
+ if (ret)
+ return ret;
+ }
+
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
return 0;
}
@@ -3833,7 +3863,7 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
mvmvif->authorized = 0;
/* disable beacon filtering */
- iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ iwl_mvm_disable_beacon_filter(mvm, vif);
}
return 0;
@@ -4412,44 +4442,6 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
return true;
}
-#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
-#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
-#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
-#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
-#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
-
-static void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
- u32 duration_ms,
- u32 *duration_tu,
- u32 *delay)
-{
- u32 dtim_interval = vif->bss_conf.dtim_period *
- vif->bss_conf.beacon_int;
-
- *delay = AUX_ROC_MIN_DELAY;
- *duration_tu = MSEC_TO_TU(duration_ms);
-
- /*
- * If we are associated we want the delay time to be at least one
- * dtim interval so that the FW can wait until after the DTIM and
- * then start the time event, this will potentially allow us to
- * remain off-channel for the max duration.
- * Since we want to use almost a whole dtim interval we would also
- * like the delay to be for 2-3 dtim intervals, in case there are
- * other time events with higher priority.
- */
- if (vif->cfg.assoc) {
- *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
- /* We cannot remain off-channel longer than the DTIM interval */
- if (dtim_interval <= *duration_tu) {
- *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER;
- if (*duration_tu <= AUX_ROC_MIN_DURATION)
- *duration_tu = dtim_interval -
- AUX_ROC_MIN_SAFETY_BUFFER;
- }
- }
-}
-
static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
@@ -4547,48 +4539,6 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
return res;
}
-static int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
- struct ieee80211_channel *channel,
- struct ieee80211_vif *vif,
- int duration, u32 activity)
-{
- int res;
- u32 duration_tu, delay;
- struct iwl_roc_req roc_req = {
- .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
- .activity = cpu_to_le32(activity),
- .sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
- };
-
- lockdep_assert_held(&mvm->mutex);
-
- /* Set the channel info data */
- iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
- channel->hw_value,
- iwl_mvm_phy_band_from_nl80211(channel->band),
- IWL_PHY_CHANNEL_MODE20, 0);
-
- iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu,
- &delay);
- roc_req.duration = cpu_to_le32(duration_tu);
- roc_req.max_delay = cpu_to_le32(delay);
-
- IWL_DEBUG_TE(mvm,
- "\t(requested = %ums, max_delay = %ums)\n",
- duration, delay);
- IWL_DEBUG_TE(mvm,
- "Requesting to remain on channel %u for %utu\n",
- channel->hw_value, duration_tu);
-
- /* Set the node address */
- memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
-
- res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- 0, sizeof(roc_req), &roc_req);
-
- return res;
-}
-
static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id)
{
int ret = 0;
@@ -4705,7 +4655,7 @@ static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm,
cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
return iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt,
- &chandef, 1, 1);
+ &chandef, NULL, 1, 1);
}
/* Execute the common part for MLD and non-MLD modes */
@@ -4793,8 +4743,8 @@ static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac,
data->responder = true;
}
-static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
- struct ieee80211_chanctx_conf *ctx)
+bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm_ftm_responder_iter_data data = {
.responder = false,
@@ -4813,9 +4763,7 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
{
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt;
- bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
- iwl_mvm_enable_fils(mvm, ctx);
- struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def;
+ struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx);
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -4828,7 +4776,7 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
goto out;
}
- ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def,
+ ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def, &ctx->ap,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
if (ret) {
@@ -4881,9 +4829,7 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
- iwl_mvm_enable_fils(mvm, ctx);
- struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def;
+ struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx);
if (WARN_ONCE((phy_ctxt->ref > 1) &&
(changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
@@ -4908,7 +4854,7 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
}
iwl_mvm_bt_coex_vif_change(mvm);
- iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def,
+ iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, &ctx->ap,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
@@ -5361,8 +5307,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
return -EINVAL;
if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
- return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
- return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ return iwl_mvm_enable_beacon_filter(mvm, vif);
+ return iwl_mvm_disable_beacon_filter(mvm, vif);
}
return -EOPNOTSUPP;
@@ -5446,7 +5392,7 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm,
iwl_mvm_csa_client_absent(mvm, vif);
if (mvmvif->bf_data.bf_enabled) {
- int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ int ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
return ret;
@@ -5606,8 +5552,16 @@ void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
if (chsw->count >= mvmvif->csa_count && chsw->block_tx) {
if (mvmvif->csa_misbehave) {
+ struct ieee80211_bss_conf *link_conf;
+
/* Second time, give up on this AP*/
- iwl_mvm_abort_channel_switch(hw, vif);
+
+ link_conf = wiphy_dereference(hw->wiphy,
+ vif->link_conf[chsw->link_id]);
+ if (WARN_ON(!link_conf))
+ return;
+
+ iwl_mvm_abort_channel_switch(hw, vif, link_conf);
ieee80211_chswitch_done(vif, false, 0);
mvmvif->csa_misbehave = false;
return;
@@ -6108,6 +6062,7 @@ void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
}
}
+#define SYNC_RX_QUEUE_TIMEOUT (HZ)
void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
enum iwl_mvm_rxq_notif_type type,
bool sync,
@@ -6156,11 +6111,12 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
ret = wait_event_timeout(mvm->rx_sync_waitq,
READ_ONCE(mvm->queue_sync_state) == 0 ||
- iwl_mvm_is_radio_killed(mvm),
- HZ);
- WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm),
- "queue sync: failed to sync, state is 0x%lx\n",
- mvm->queue_sync_state);
+ iwl_mvm_is_radio_hw_killed(mvm),
+ SYNC_RX_QUEUE_TIMEOUT);
+ WARN_ONCE(!ret && !iwl_mvm_is_radio_hw_killed(mvm),
+ "queue sync: failed to sync, state is 0x%lx, cookie %d\n",
+ mvm->queue_sync_state,
+ mvm->queue_sync_cookie);
}
out:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
index ea3e9e9c6e26..8a38fc4b0b0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <net/mac80211.h>
@@ -62,11 +62,13 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
struct ieee80211_key_conf *keyconf)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool pairwise = keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ bool igtk = keyconf->keyidx == 4 || keyconf->keyidx == 5;
u32 flags = 0;
lockdep_assert_held(&mvm->mutex);
- if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ if (!pairwise)
flags |= IWL_SEC_KEY_FLAG_MCAST_KEY;
switch (keyconf->cipher) {
@@ -96,14 +98,19 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
if (!sta && vif->type == NL80211_IFTYPE_STATION)
sta = mvmvif->ap_sta;
- /* Set the MFP flag also for an AP interface where the key is an IGTK
- * key as in such a case the station would always be NULL
+ /*
+ * If we are installing an iGTK (in AP or STA mode), we need to tell
+ * the firmware this key will en/decrypt MGMT frames.
+ * Same goes if we are installing a pairwise key for an MFP station.
+ * In case we're installing a groupwise key (which is not an iGTK),
+ * then, we will not use this key for MGMT frames.
*/
- if ((!IS_ERR_OR_NULL(sta) && sta->mfp) ||
- (vif->type == NL80211_IFTYPE_AP &&
- (keyconf->keyidx == 4 || keyconf->keyidx == 5)))
+ if ((!IS_ERR_OR_NULL(sta) && sta->mfp && pairwise) || igtk)
flags |= IWL_SEC_KEY_FLAG_MFP;
+ if (keyconf->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ flags |= IWL_SEC_KEY_FLAG_SPP_AMSDU;
+
return flags;
}
@@ -335,6 +342,21 @@ static int _iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
return ret;
}
+int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 sta_mask,
+ struct ieee80211_key_conf *keyconf)
+{
+ u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) |
+ IWL_SEC_KEY_FLAG_MFP;
+
+ if (WARN_ON(!sta_mask))
+ return -EINVAL;
+
+ return __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, keyconf->keyidx,
+ 0);
+}
+
int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
index f313a8d771e4..bb7851042177 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include "mvm.h"
@@ -167,7 +167,7 @@ static int iwl_mvm_mld_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_PROMISC |
- MAC_FILTER_IN_CONTROL_AND_MGMT |
+ MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT |
MAC_CFG_FILTER_ACCEPT_BEACON |
MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
MAC_CFG_FILTER_ACCEPT_GRP);
@@ -205,8 +205,11 @@ static int iwl_mvm_mld_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
cmd.p2p_dev.is_disc_extended =
iwl_mac_ctxt_p2p_dev_has_extended_disc(mvm, vif);
- /* Override the filter flags to accept only probe requests */
- cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
+ /* Override the filter flags to accept all management frames. This is
+ * needed to support both P2P device discovery using probe requests and
+ * P2P service discovery using action frames
+ */
+ cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT);
return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 893b69fc841b..084314bf6f36 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include "mvm.h"
@@ -47,7 +47,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
goto out_unlock;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
goto out_remove_mac;
@@ -254,9 +254,6 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
if (!rcu_access_pointer(link_conf->chanctx_conf))
n_active++;
- if (n_active > iwl_mvm_max_active_links(mvm, vif))
- return -EOPNOTSUPP;
-
if (WARN_ON_ONCE(!mvmvif->link[link_id]))
return -EINVAL;
@@ -607,6 +604,7 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw,
struct iwl_mvm_link_sel_data {
u8 link_id;
enum nl80211_band band;
+ enum nl80211_chan_width width;
bool active;
};
@@ -658,7 +656,8 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
continue;
data[n_data].link_id = link_id;
- data[n_data].band = link_conf->chandef.chan->band;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ data[n_data].width = link_conf->chanreq.oper.width;
data[n_data].active = vif->active_links & BIT(link_id);
n_data++;
}
@@ -753,8 +752,8 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
}
- /* Update EHT Puncturing info */
- if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc)
+ /* if associated, maybe puncturing changed - we'll check later */
+ if (vif->cfg.assoc)
link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
if (link_changes) {
@@ -1122,17 +1121,12 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
{
struct iwl_mvm_vif_link_info *new_link[IEEE80211_MLD_MAX_NUM_LINKS] = {};
- unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u16 removed = old_links & ~new_links;
u16 added = new_links & ~old_links;
int err, i;
- if (hweight16(new_links) > 1 &&
- n_active > iwl_mvm_max_active_links(mvm, vif))
- return -EOPNOTSUPP;
-
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
int r;
@@ -1224,6 +1218,146 @@ iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw,
return ret;
}
+/*
+ * This function receives a subset of the usable links bitmap and
+ * returns the primary link id, and -1 if such link doesn't exist
+ * (e.g. non-MLO connection) or wasn't found.
+ */
+int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ unsigned long usable_links)
+{
+ struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
+ u8 link_id, n_data = 0;
+
+ if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc)
+ return -1;
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ data[n_data].width = link_conf->chanreq.oper.width;
+ data[n_data].active = true;
+ n_data++;
+ }
+
+ if (n_data <= 1)
+ return -1;
+
+ /* The logic should be modified to handle more than 2 links */
+ WARN_ON_ONCE(n_data > 2);
+
+ /* Primary link is the link with the wider bandwidth or higher band */
+ if (data[0].width > data[1].width)
+ return data[0].link_id;
+ if (data[0].width < data[1].width)
+ return data[1].link_id;
+ if (data[0].band >= data[1].band)
+ return data[0].link_id;
+
+ return data[1].link_id;
+}
+
+/*
+ * This function receives a bitmap of usable links and check if we can enter
+ * eSR on those links.
+ */
+static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ unsigned long desired_links)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
+ desired_links);
+ const struct wiphy_iftype_ext_capab *ext_capa;
+ bool ret = true;
+ int link_id;
+
+ if (primary_link < 0)
+ return false;
+
+ if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP))
+ return false;
+
+ ext_capa = cfg80211_get_iftype_ext_capa(mvm->hw->wiphy,
+ ieee80211_vif_type_p2p(vif));
+ if (!ext_capa ||
+ !(ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP))
+ return false;
+
+ for_each_set_bit(link_id, &desired_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ /* BT Coex effects eSR mode only if one of the link is on LB */
+ if (link_conf->chanreq.oper.chan->band != NL80211_BAND_2GHZ)
+ continue;
+
+ ret = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
+ primary_link);
+ // Mark eSR as disabled for the next time
+ if (!ret)
+ mvmvif->bt_coex_esr_disabled = true;
+ break;
+ }
+
+ return ret;
+}
+
+static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 desired_links)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int n_links = hweight16(desired_links);
+ bool ret = true;
+
+ if (n_links <= 1)
+ return true;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Check if HW supports the wanted number of links */
+ if (n_links > iwl_mvm_max_active_links(mvm, vif)) {
+ ret = false;
+ goto unlock;
+ }
+
+ /* If it is an eSR device, check that we can enter eSR */
+ if (iwl_mvm_is_esr_supported(mvm->fwrt.trans))
+ ret = iwl_mvm_can_enter_esr(mvm, vif, desired_links);
+unlock:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static enum ieee80211_neg_ttlm_res
+iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u16 map;
+ u8 i;
+
+ /* Verify all TIDs are mapped to the same links set */
+ map = neg_ttlm->downlink[0];
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] ||
+ neg_ttlm->uplink[i] != map)
+ return NEG_TTLM_RES_REJECT;
+ }
+
+ return NEG_TTLM_RES_ACCEPT;
+}
+
const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
@@ -1318,4 +1452,6 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.change_vif_links = iwl_mvm_mld_change_vif_links,
.change_sta_links = iwl_mvm_mld_change_sta_links,
+ .can_activate_links = iwl_mvm_mld_can_activate_links,
+ .can_neg_ttlm = iwl_mvm_mld_can_neg_ttlm,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 40627961b834..44571114fb15 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -40,8 +40,9 @@
#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50
+#define IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD 4
#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
-#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 19
/* A TimeUnit is 1024 microsecond */
#define MSEC_TO_TU(_msec) (_msec*1000/1024)
@@ -105,6 +106,7 @@ struct iwl_mvm_phy_ctxt {
/* track for RLC config command */
u32 center_freq1;
bool rlc_disabled;
+ u32 channel_load_by_us;
};
struct iwl_mvm_time_event_data {
@@ -121,7 +123,7 @@ struct iwl_mvm_time_event_data {
* if the te is in the time event list or not (when id == TE_MAX)
*/
u32 id;
- u8 link_id;
+ s8 link_id;
};
/* Power management */
@@ -359,6 +361,7 @@ struct iwl_mvm_vif_link_info {
* @pm_enabled - indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
+ * @bt_coex_esr_disabled: indicates if esr is disabled due to bt coex
* @low_latency: bit flags for low latency
* see enum &iwl_mvm_low_latency_cause for causes.
* @low_latency_actual: boolean, indicates low latency is set,
@@ -389,6 +392,7 @@ struct iwl_mvm_vif {
bool pm_enabled;
bool monitor_active;
bool esr_active;
+ bool bt_coex_esr_disabled;
u8 low_latency: 6;
u8 low_latency_actual: 1;
@@ -537,14 +541,12 @@ struct iwl_mvm_tt_mgmt {
#ifdef CONFIG_THERMAL
/**
- *struct iwl_mvm_thermal_device - thermal zone related data
- * @temp_trips: temperature thresholds for report
- * @fw_trips_index: keep indexes to original array - temp_trips
+ * struct iwl_mvm_thermal_device - thermal zone related data
+ * @trips: temperature thresholds for report
* @tzone: thermal zone device data
*/
struct iwl_mvm_thermal_device {
struct thermal_trip trips[IWL_MAX_DTS_TRIPS];
- u8 fw_trips_index[IWL_MAX_DTS_TRIPS];
struct thermal_zone_device *tzone;
};
@@ -848,6 +850,9 @@ struct iwl_mvm {
spinlock_t async_handlers_lock;
struct work_struct async_handlers_wk;
+ /* For async rx handlers that require the wiphy lock */
+ struct wiphy_work async_handlers_wiphy_wk;
+
struct work_struct roc_done_wk;
unsigned long init_status;
@@ -1215,7 +1220,6 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
- * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
* @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log
* if this is set, when intentionally triggered
@@ -1230,7 +1234,6 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_FIRMWARE_RUNNING,
- IWL_MVM_STATUS_NEED_FLUSH_P2P,
IWL_MVM_STATUS_IN_D3,
IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
IWL_MVM_STATUS_STARTING,
@@ -1567,13 +1570,17 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_trans *trans = mvm->fwrt.trans;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
if (vif->type == NL80211_IFTYPE_AP)
return mvm->fw->ucode_capa.num_beacons;
- if (iwl_mvm_is_esr_supported(trans) ||
- (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
- CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))
+ if ((iwl_mvm_is_esr_supported(trans) &&
+ !mvmvif->bt_coex_esr_disabled) ||
+ ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
+ CSR_HW_RFID_IS_CDB(trans->hw_rf_id))))
return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM;
return 1;
@@ -1581,12 +1588,16 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
extern const u8 iwl_mvm_ac_to_tx_fifo[];
extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[];
+extern const u8 iwl_mvm_ac_to_bz_tx_fifo[];
static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
enum ieee80211_ac_numbers ac)
{
- return iwl_mvm_has_new_tx_api(mvm) ?
- iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac];
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ return iwl_mvm_ac_to_bz_tx_fifo[ac];
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_ac_to_gen2_tx_fifo[ac];
+ return iwl_mvm_ac_to_tx_fifo[ac];
}
struct iwl_rate_info {
@@ -1801,18 +1812,20 @@ void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
/* MVM PHY */
struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm);
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic);
int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic);
void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt);
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt);
int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
-u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef);
-u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef);
int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
u8 chains_static, u8 chains_dynamic);
@@ -2116,6 +2129,12 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
+bool iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id, int primary_link);
+void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id);
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -2129,11 +2148,9 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
{}
#endif
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags);
+ struct ieee80211_vif *vif);
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags);
+ struct ieee80211_vif *vif);
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
@@ -2366,7 +2383,7 @@ u64 iwl_mvm_ptp_get_adj_time(struct iwl_mvm *mvm, u64 base_time);
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm);
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm);
+void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2387,6 +2404,10 @@ int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf);
+int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 sta_mask,
+ struct ieee80211_key_conf *keyconf);
void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_vif_link_info *link,
@@ -2511,7 +2532,7 @@ static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm,
static inline void
iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
struct iwl_fw_channel_info *ci,
- struct cfg80211_chan_def *chandef)
+ const struct cfg80211_chan_def *chandef)
{
enum nl80211_band band = chandef->chan->band;
@@ -2601,7 +2622,6 @@ static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm,
void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool forbidden);
-bool iwl_mvm_is_vendor_in_approved_list(void);
/* Callbacks for ieee80211_ops */
void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
@@ -2691,7 +2711,8 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
@@ -2730,4 +2751,28 @@ bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
struct ieee80211_chanctx_conf *ctx);
void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool valid_links_changed);
+int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ unsigned long usable_links);
+
+bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx);
+
+static inline struct cfg80211_chan_def *
+iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx)
+{
+ bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
+ iwl_mvm_enable_fils(mvm, ctx);
+
+ return use_def ? &ctx->def : &ctx->min_def;
+}
+
+void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
+ u32 duration_ms,
+ u32 *duration_tu,
+ u32 *delay);
+int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration, u32 activity);
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index c0dd441e800e..ae8177222881 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -590,7 +590,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
return -EIO;
if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
- !iwl_acpi_get_mcc(mvm->dev, mcc)) {
+ !iwl_bios_get_mcc(&mvm->fwrt, mcc)) {
kfree(regd);
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
MCC_SOURCE_BIOS, NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index adbbe19aeae5..a93981cb9714 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -161,9 +161,9 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
if (!vif || vif->type != NL80211_IFTYPE_STATION)
return;
- if (!vif->bss_conf.chandef.chan ||
- vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ ||
- vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
+ if (!vif->bss_conf.chanreq.oper.chan ||
+ vif->bss_conf.chanreq.oper.chan->band != NL80211_BAND_2GHZ ||
+ vif->bss_conf.chanreq.oper.width < NL80211_CHAN_WIDTH_40)
return;
if (!vif->cfg.assoc)
@@ -219,7 +219,7 @@ void iwl_mvm_update_link_smps(struct ieee80211_vif *vif,
return;
if (mvm->fw_static_smps_request &&
- link_conf->chandef.width == NL80211_CHAN_WIDTH_160 &&
+ link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_160 &&
link_conf->he_support)
mode = IEEE80211_SMPS_STATIC;
@@ -259,7 +259,7 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
}
/**
- * enum iwl_rx_handler_context context for Rx handler
+ * enum iwl_rx_handler_context: context for Rx handler
* @RX_HANDLER_SYNC : this means that it will be called in the Rx path
* which can't acquire mvm->mutex.
* @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
@@ -267,15 +267,19 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
* it will be called from a worker with mvm->mutex held.
* @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
* mutex itself, it will be called from a worker without mvm->mutex held.
+ * @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock
+ * and mvm->mutex. Will be handled with the wiphy_work queue infra
+ * instead of regular work queue.
*/
enum iwl_rx_handler_context {
RX_HANDLER_SYNC,
RX_HANDLER_ASYNC_LOCKED,
RX_HANDLER_ASYNC_UNLOCKED,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
};
/**
- * struct iwl_rx_handlers handler for FW notification
+ * struct iwl_rx_handlers: handler for FW notification
* @cmd_id: command id
* @min_size: minimum size to expect for the notification
* @context: see &iwl_rx_handler_context
@@ -316,7 +320,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
struct iwl_tlc_update_notif),
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
- RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif),
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_bt_coex_profile_notif),
RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
@@ -324,7 +329,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
iwl_mvm_handle_rx_system_oper_stats,
- RX_HANDLER_ASYNC_LOCKED,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
struct iwl_system_statistics_notif_oper),
RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
iwl_mvm_handle_rx_system_oper_part1_stats,
@@ -673,6 +678,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *work);
static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
{
@@ -682,7 +689,7 @@ static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
if (!backoff)
return 0;
- dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
+ iwl_bios_get_pwr_limit(&mvm->fwrt, &dflt_pwr_limit);
while (backoff->pwr) {
if (dflt_pwr_limit >= backoff->pwr)
@@ -1194,7 +1201,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
&iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
- iwl_mvm_get_acpi_tables(mvm);
+ iwl_mvm_get_bios_tables(mvm);
iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
iwl_uefi_get_step_table(trans);
@@ -1265,6 +1272,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->add_stream_lock);
+ wiphy_work_init(&mvm->async_handlers_wiphy_wk,
+ iwl_mvm_async_handlers_wiphy_wk);
init_waitqueue_head(&mvm->rx_sync_waitq);
mvm->queue_sync_state = 0;
@@ -1551,35 +1560,62 @@ void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
spin_unlock_bh(&mvm->async_handlers_lock);
}
-static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+/*
+ * This function receives a bitmap of rx async handler contexts
+ * (&iwl_rx_handler_context) to handle, and runs only them
+ */
+static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm,
+ u8 contexts)
{
- struct iwl_mvm *mvm =
- container_of(wk, struct iwl_mvm, async_handlers_wk);
struct iwl_async_handler_entry *entry, *tmp;
LIST_HEAD(local_list);
- /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
-
/*
- * Sync with Rx path with a lock. Remove all the entries from this list,
- * add them to a local one (lock free), and then handle them.
+ * Sync with Rx path with a lock. Remove all the entries of the
+ * wanted contexts from this list, add them to a local one (lock free),
+ * and then handle them.
*/
spin_lock_bh(&mvm->async_handlers_lock);
- list_splice_init(&mvm->async_handlers_list, &local_list);
+ list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+ if (!(BIT(entry->context) & contexts))
+ continue;
+ list_del(&entry->list);
+ list_add_tail(&entry->list, &local_list);
+ }
spin_unlock_bh(&mvm->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &local_list, list) {
- if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_lock(&mvm->mutex);
entry->fn(mvm, &entry->rxb);
iwl_free_rxb(&entry->rxb);
list_del(&entry->list);
- if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_unlock(&mvm->mutex);
kfree(entry);
}
}
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) |
+ BIT(RX_HANDLER_ASYNC_UNLOCKED);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -1659,7 +1695,11 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
spin_lock(&mvm->async_handlers_lock);
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
- schedule_work(&mvm->async_handlers_wk);
+ if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY)
+ wiphy_work_queue(mvm->hw->wiphy,
+ &mvm->async_handlers_wiphy_wk);
+ else
+ schedule_work(&mvm->async_handlers_wk);
break;
}
}
@@ -1788,12 +1828,8 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
{
- bool state = iwl_mvm_is_radio_killed(mvm);
-
- if (state)
- wake_up(&mvm->rx_sync_waitq);
-
- wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+ wiphy_rfkill_set_hw_state(mvm->hw->wiphy,
+ iwl_mvm_is_radio_killed(mvm));
}
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
@@ -1818,10 +1854,12 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
bool unified = iwl_mvm_has_unified_ucode(mvm);
- if (state)
+ if (state) {
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
- else
+ wake_up(&mvm->rx_sync_waitq);
+ } else {
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+ }
iwl_mvm_set_rfkill_state(mvm);
@@ -1955,7 +1993,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
ieee80211_restart_hw(mvm->hw);
} else if (mvm->fwrt.trans->dbg.restart_required) {
IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
- mvm->fwrt.trans->dbg.restart_required = FALSE;
+ mvm->fwrt.trans->dbg.restart_required = false;
ieee80211_restart_hw(mvm->hw);
} else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
ieee80211_restart_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 334d1f59f6e4..ce264b386029 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -9,7 +9,7 @@
#include "mvm.h"
/* Maps the driver specific channel width definition to the fw values */
-u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef)
{
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -33,7 +33,7 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
* Maps the driver specific control channel position (relative to the center
* freq) definitions to the the fw values
*/
-u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef)
{
int offs = chandef->chan->center_freq - chandef->center_freq1;
int abs_offs = abs(offs);
@@ -116,7 +116,7 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd_v1 *cmd,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
struct iwl_phy_context_cmd_tail *tail =
@@ -137,7 +137,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd *cmd,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
cmd->lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm,
@@ -197,14 +197,18 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
*/
static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic,
u32 action)
{
int ret;
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1);
- if (ver == 3 || ver == 4) {
+ if (ver < 5 || !ap || !ap->chan)
+ ap = NULL;
+
+ if (ver >= 3 && ver <= 6) {
struct iwl_phy_context_cmd cmd = {};
/* Set the command header fields */
@@ -215,6 +219,14 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
chains_static,
chains_dynamic);
+ if (ap) {
+ cmd.sbb_bandwidth = iwl_mvm_get_channel_width(ap);
+ cmd.sbb_ctrl_channel_loc = iwl_mvm_get_ctrl_pos(ap);
+ }
+
+ if (ver == 6)
+ cmd.puncture_mask = cpu_to_le16(chandef->punctured);
+
ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
0, sizeof(cmd), &cmd);
} else if (ver < 3) {
@@ -254,7 +266,8 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
* Send a command to add a PHY context based on the current HW configuration.
*/
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic)
{
int ret;
@@ -267,7 +280,7 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->width = chandef->width;
ctxt->center_freq1 = chandef->center_freq1;
- ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap,
chains_static, chains_dynamic,
FW_CTXT_ACTION_ADD);
@@ -300,7 +313,8 @@ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
* changed.
*/
int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic)
{
enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY;
@@ -324,7 +338,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
int ret;
/* ... remove it here ...*/
- ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, NULL,
chains_static, chains_dynamic,
FW_CTXT_ACTION_REMOVE);
if (ret)
@@ -338,7 +352,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->width = chandef->width;
ctxt->center_freq1 = chandef->center_freq1;
- return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap,
chains_static, chains_dynamic,
action);
}
@@ -358,7 +372,7 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
cfg80211_chandef_create(&chandef, ctxt->channel, NL80211_CHAN_NO_HT);
- iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, 1, 1,
+ iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, NULL, 1, 1,
FW_CTXT_ACTION_REMOVE);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 1b9b06e0443f..41e68aa6bec8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -20,8 +20,7 @@
static
int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd,
- u32 flags)
+ struct iwl_beacon_filter_cmd *cmd)
{
u16 len;
@@ -62,7 +61,7 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
len = offsetof(struct iwl_beacon_filter_cmd,
bf_threshold_absolute_low);
- return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
+ return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, 0,
len, cmd);
}
@@ -813,8 +812,7 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_beacon_filter_cmd *cmd,
- u32 cmd_flags)
+ struct iwl_beacon_filter_cmd *cmd)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
@@ -825,7 +823,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd);
if (!ret)
mvmvif->bf_data.bf_enabled = true;
@@ -834,20 +832,18 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
}
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
struct iwl_beacon_filter_cmd cmd = {
IWL_BF_CMD_CONFIG_DEFAULTS,
.bf_enable_beacon_filter = cpu_to_le32(1),
};
- return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags);
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
}
static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
struct iwl_beacon_filter_cmd cmd = {};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -856,7 +852,7 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret)
mvmvif->bf_data.bf_enabled = false;
@@ -865,10 +861,9 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
}
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
- return _iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ return _iwl_mvm_disable_beacon_filter(mvm, vif);
}
static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
@@ -919,7 +914,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
!vif->cfg.ps ||
iwl_mvm_vif_low_latency(mvmvif));
- return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0);
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
}
int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 6cba8a353b53..00860feefa7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include "rs.h"
#include "fw-api.h"
@@ -479,9 +479,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
}
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvm_link_sta->orig_amsdu_len) {
+ u32 enabled = le32_to_cpu(notif->amsdu_enabled);
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
+ if (size < 2000) {
+ size = 0;
+ enabled = 0;
+ }
+
if (link_sta->agg.max_amsdu_len < size) {
/*
* In debug link_sta->agg.max_amsdu_len < size
@@ -492,7 +498,7 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
goto out;
}
- mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
+ mvmsta->amsdu_enabled = enabled;
mvmsta->max_amsdu_len = size;
link_sta->agg.max_rc_amsdu_len = mvmsta->max_amsdu_len;
@@ -525,10 +531,10 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta,
const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
const struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap;
- if (WARN_ON_ONCE(!link_conf->chandef.chan))
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
- if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
switch (le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
@@ -538,7 +544,7 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta,
default:
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
}
- } else if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ &&
+ } else if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ &&
eht_cap->has_eht) {
switch (u8_get_bits(eht_cap->eht_cap_elem.mac_cap_info[0],
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 481d68cbbbd8..a8c4e354e2ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -4161,6 +4161,8 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
* @mvm: The mvm component
* @mvmsta: The station
* @enable: Enable Tx protection?
+ *
+ * Returns: an error code
*/
int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 8caa971770c6..b1add7942c5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -752,6 +752,19 @@ iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le,
spin_unlock(&mvm->tcm.lock);
}
+static void iwl_mvm_handle_per_phy_stats(struct iwl_mvm *mvm,
+ struct iwl_stats_ntfy_per_phy *per_phy)
+{
+ int i;
+
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ if (!mvm->phy_ctxts[i].ref)
+ continue;
+ mvm->phy_ctxts[i].channel_load_by_us =
+ le32_to_cpu(per_phy[i].channel_load_by_us);
+ }
+}
+
static void
iwl_mvm_stats_ver_15(struct iwl_mvm *mvm,
struct iwl_statistics_operational_ntfy *stats)
@@ -766,6 +779,7 @@ iwl_mvm_stats_ver_15(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator_all_macs,
&data);
+ iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
}
static void
@@ -841,6 +855,7 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
struct iwl_stats_ntfy_per_link *link_stats;
struct ieee80211_bss_conf *bss_conf;
struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_vif_link_info *link_info;
int link_id;
int sig;
@@ -857,20 +872,26 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
continue;
mvmvif = iwl_mvm_vif_from_mac80211(bss_conf->vif);
- if (!mvmvif || !mvmvif->link[link_id])
+ link_info = mvmvif->link[link_id];
+ if (!link_info)
continue;
link_stats = &per_link[fw_link_id];
- mvmvif->link[link_id]->beacon_stats.num_beacons =
+ link_info->beacon_stats.num_beacons =
le32_to_cpu(link_stats->beacon_counter);
/* we basically just use the u8 to store 8 bits and then treat
* it as a s8 whenever we take it out to a different type.
*/
- mvmvif->link[link_id]->beacon_stats.avg_signal =
+ link_info->beacon_stats.avg_signal =
-le32_to_cpu(link_stats->beacon_average_energy);
+ if (link_info->phy_ctxt &&
+ link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ)
+ iwl_mvm_bt_coex_update_vif_esr(mvm, bss_conf->vif,
+ link_id);
+
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
*/
@@ -935,6 +956,7 @@ void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
average_energy);
+ iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
}
void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index af15d470c69b..1484eaedf452 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -282,6 +282,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
u32 status,
struct ieee80211_rx_status *stats)
{
+ struct wireless_dev *wdev;
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_vif *mvmvif;
u8 keyid;
@@ -303,9 +304,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
if (!ieee80211_is_beacon(hdr->frame_control))
return 0;
+ if (!sta)
+ return -1;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
/* key mismatch - will also report !MIC_OK but we shouldn't count it */
if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID))
- return -1;
+ goto report;
/* good cases */
if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
@@ -314,13 +321,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
return 0;
}
- if (!sta)
- return -1;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
- mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-
/*
* both keys will have the same cipher and MIC length, use
* whichever one is available
@@ -329,11 +329,11 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
if (!key) {
key = rcu_dereference(mvmvif->bcn_prot.keys[1]);
if (!key)
- return -1;
+ goto report;
}
if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
- return -1;
+ goto report;
/* get the real key ID */
keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
@@ -347,7 +347,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
return -1;
key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]);
if (!key)
- return -1;
+ goto report;
}
/* Report status to mac80211 */
@@ -355,6 +355,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
ieee80211_key_mic_failure(key);
else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
ieee80211_key_replay(key);
+report:
+ wdev = ieee80211_vif_to_wdev(mvmsta->vif);
+ if (wdev->netdev)
+ cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len);
return -1;
}
@@ -397,8 +401,11 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
case IWL_RX_MPDU_STATUS_SEC_GCM:
BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
/* alg is CCM: check MIC only */
- if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
+ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) {
+ IWL_DEBUG_DROP(mvm,
+ "Dropping packet, bad MIC (CCM/GCM)\n");
return -1;
+ }
stats->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
*crypt_len = IEEE80211_CCMP_HDR_LEN;
@@ -516,11 +523,9 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
* (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
*/
if (ieee80211_is_ctl(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1)) {
- rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+ ieee80211_is_any_nullfunc(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
return false;
- }
if (ieee80211_is_data_qos(hdr->frame_control)) {
/* frame has qos control */
@@ -646,10 +651,8 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
rcu_read_lock();
ba_data = rcu_dereference(mvm->baid_map[baid]);
- if (!ba_data) {
- WARN(true, "BAID %d not found in map\n", baid);
+ if (WARN(!ba_data, "BAID %d not found in map\n", baid))
goto out;
- }
/* pick any STA ID to find the pointer */
sta_id = ffs(ba_data->sta_mask) - 1;
@@ -685,11 +688,11 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
len -= sizeof(*notif) + sizeof(*internal_notif);
- if (internal_notif->sync &&
- mvm->queue_sync_cookie != internal_notif->cookie) {
- WARN_ONCE(1, "Received expired RX queue sync message\n");
+ if (WARN_ONCE(internal_notif->sync &&
+ mvm->queue_sync_cookie != internal_notif->cookie,
+ "Received expired RX queue sync message (cookie %d but wanted %d, queue %d)\n",
+ internal_notif->cookie, mvm->queue_sync_cookie, queue))
return;
- }
switch (internal_notif->type) {
case IWL_MVM_RXQ_EMPTY:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 7b6f1cdca067..f3e3986b4c72 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -241,13 +241,11 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
return IWL_SCAN_TYPE_FRAGMENTED;
/*
- * in case of DCM with GO where BSS DTIM interval < 220msec
- * set all scan requests as fast-balance scan
+ * in case of DCM with P2P GO set all scan requests as
+ * fast-balance scan
*/
if (vif && vif->type == NL80211_IFTYPE_STATION &&
- data.is_dcm_with_p2p_go &&
- ((vif->bss_conf.beacon_int *
- vif->bss_conf.dtim_period) < 220))
+ data.is_dcm_with_p2p_go)
return IWL_SCAN_TYPE_FAST_BALANCE;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index 30d4233595e8..16285ae7cae9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2019, 2022-2023 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2019, 2022-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
*/
#include "mvm.h"
@@ -232,6 +232,9 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
};
struct ieee80211_sta *sta = NULL;
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD))
+ return 0;
/*
* Ignore the call if we are in HW Restart flow, or if the handled
* vif is a p2p device.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 2a3ca9785974..491c449fd431 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2015, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2015, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -71,7 +71,7 @@ u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta,
mpdu_dens = link_sta->ht_cap.ampdu_density;
}
- if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
/* overwrite HT values on 6 GHz */
mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
@@ -208,7 +208,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
if (sta->deflink.ht_cap.ht_supported ||
- mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ)
+ mvm_sta->vif->bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ)
add_sta_cmd.station_flags_msk |=
cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
STA_FLG_AGG_MPDU_DENS_MSK);
@@ -1502,6 +1502,34 @@ out_err:
return ret;
}
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,
+ struct ieee80211_txq *txq)
+{
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ int ret = -EINVAL;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+ !txq->sta) {
+ return 0;
+ }
+
+ if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
+ set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ ret = 0;
+ }
+
+ local_bh_disable();
+ spin_lock(&mvm->add_stream_lock);
+ if (!list_empty(&mvmtxq->list))
+ list_del_init(&mvmtxq->list);
+ spin_unlock(&mvm->add_stream_lock);
+ local_bh_enable();
+
+ return ret;
+}
+
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
@@ -2989,16 +3017,6 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
kfree_rcu(baid_data, rcu_head);
IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
-
- /*
- * After we've deleted it, do another queue sync
- * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
- * running it won't find a new session in the old
- * BAID. It can find the NULL pointer for the BAID,
- * but we must not have it find a different session.
- */
- iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
- true, NULL, 0);
}
return 0;
@@ -3559,6 +3577,9 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
key_flags = cpu_to_le16(keyidx);
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
+ if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ key_flags |= cpu_to_le16(STA_KEY_FLG_AMSDU_SPP);
+
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
@@ -4298,12 +4319,12 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len)
+ u8 *key, u32 key_len,
+ struct ieee80211_key_conf *keyconf)
{
int ret;
u16 queue;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct ieee80211_key_conf *keyconf;
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
bool mld = iwl_mvm_has_mld_api(mvm->fw);
@@ -4328,12 +4349,6 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
goto out;
- keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
- if (!keyconf) {
- ret = -ENOBUFS;
- goto out;
- }
-
keyconf->cipher = cipher;
memcpy(keyconf->key, key, key_len);
keyconf->keylen = key_len;
@@ -4354,10 +4369,9 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
0, NULL, 0, 0, true);
}
- kfree(keyconf);
- return 0;
out:
- iwl_mvm_dealloc_int_sta(mvm, sta);
+ if (ret)
+ iwl_mvm_dealloc_int_sta(mvm, sta);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index b33a0ce096d4..b3450569864e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -571,10 +571,12 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
bool disable);
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len);
+ u8 *key, u32 key_len,
+ struct ieee80211_key_conf *key_conf_out);
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 2e653a417d62..a59d264a11c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -45,32 +45,24 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
te_data->link_id = -1;
}
-void iwl_mvm_roc_done_wk(struct work_struct *wk)
+static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
{
- struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
-
/*
* Clear the ROC_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
- * in the case that the time event actually completed in the firmware
- * (which is handled in iwl_mvm_te_handle_notif).
- */
- clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
-
- synchronize_net();
-
- /*
- * Flush the offchannel queue -- this is called when the time
+ * in the case that the time event actually completed in the firmware.
+ *
+ * Also flush the offchannel queue -- this is called when the time
* event finishes or is canceled, so that frames queued for it
* won't get stuck on the queue and be transmitted in the next
* time event.
*/
-
- mutex_lock(&mvm->mutex);
- if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
struct iwl_mvm_vif *mvmvif;
+ synchronize_net();
+
/*
* NB: access to this pointer would be racy, but the flush bit
* can only be set when we had a P2P-Device VIF, and we have a
@@ -105,21 +97,16 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
}
}
- /*
- * Clear the ROC_AUX_RUNNING status bit.
- * This will cause the TX path to drop offchannel transmissions.
- * That would also be done by mac80211, but it is racy, in particular
- * in the case that the time event actually completed in the firmware
- * (which is handled in iwl_mvm_te_handle_notif).
- */
+ /* Do the same for AUX ROC */
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
- /* do the same in case of hot spot 2.0 */
+ synchronize_net();
+
iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
mvm->aux_sta.tfd_queue_msk);
if (mvm->mld_api_is_used) {
iwl_mvm_mld_rm_aux_sta(mvm);
- goto out_unlock;
+ return;
}
/* In newer version of this command an aux station is added only
@@ -128,8 +115,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
if (iwl_mvm_has_new_station_api(mvm->fw))
iwl_mvm_rm_aux_sta(mvm);
}
+}
-out_unlock:
+void iwl_mvm_roc_done_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_cleanup_roc(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -163,12 +156,12 @@ static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
* So we just do nothing here and the switch
* will be performed on the last TBTT.
*/
- if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) {
IWL_WARN(mvm, "CSA NOA started too early\n");
goto out_unlock;
}
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
rcu_read_unlock();
@@ -294,18 +287,6 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
}
}
-static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm)
-{
- /*
- * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the
- * roc_done_wk is already scheduled or running, so don't schedule it
- * again to avoid a race where the roc_done_wk clears this bit after
- * it is set here, affecting the next run of the roc_done_wk.
- */
- if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status))
- iwl_mvm_roc_finished(mvm);
-}
-
/*
* Handles a FW notification for an event that is known to the driver.
*
@@ -357,7 +338,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
switch (te_data->vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
ieee80211_remain_on_channel_expired(mvm->hw);
- iwl_mvm_p2p_roc_finished(mvm);
+ iwl_mvm_roc_finished(mvm);
break;
case NL80211_IFTYPE_STATION:
/*
@@ -692,7 +673,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
/* Determine whether mac or link id should be used, and validate the link id */
static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 link_id)
+ s8 link_id)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
@@ -706,8 +687,7 @@ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
"Invalid link ID for session protection: %u\n", link_id))
return -EINVAL;
- if (WARN(ieee80211_vif_is_mld(vif) &&
- !(vif->active_links & BIT(link_id)),
+ if (WARN(!mvmvif->link[link_id]->active,
"Session Protection on an inactive link: %u\n", link_id))
return -EINVAL;
@@ -716,7 +696,7 @@ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 id, u32 link_id)
+ u32 id, s8 link_id)
{
int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
struct iwl_mvm_session_prot_cmd cmd = {
@@ -745,7 +725,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif = te_data->vif;
struct iwl_mvm_vif *mvmvif;
enum nl80211_iftype iftype;
- unsigned int link_id;
+ s8 link_id;
if (!vif)
return false;
@@ -783,7 +763,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
iwl_mvm_cancel_session_protection(mvm, vif, id,
link_id);
if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
- iwl_mvm_p2p_roc_finished(mvm);
+ iwl_mvm_roc_finished(mvm);
}
}
return false;
@@ -929,7 +909,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (WARN(ver > 2 && mvmvif->time_event_data.link_id >= 0 &&
mvmvif->time_event_data.link_id != notif_link_id,
- "SESION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n",
+ "SESSION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n",
notif_link_id, mvmvif->time_event_data.link_id))
goto out_unlock;
@@ -973,7 +953,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
/* End TE, notify mac80211 */
mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
mvmvif->time_event_data.link_id = -1;
- iwl_mvm_p2p_roc_finished(mvm);
+ iwl_mvm_roc_finished(mvm);
ieee80211_remain_on_channel_expired(mvm->hw);
} else if (le32_to_cpu(notif->start)) {
if (WARN_ON(mvmvif->time_event_data.id !=
@@ -987,6 +967,86 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
rcu_read_unlock();
}
+#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
+#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
+#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
+#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
+#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
+
+void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
+ u32 duration_ms,
+ u32 *duration_tu,
+ u32 *delay)
+{
+ u32 dtim_interval = vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int;
+
+ *delay = AUX_ROC_MIN_DELAY;
+ *duration_tu = MSEC_TO_TU(duration_ms);
+
+ /*
+ * If we are associated we want the delay time to be at least one
+ * dtim interval so that the FW can wait until after the DTIM and
+ * then start the time event, this will potentially allow us to
+ * remain off-channel for the max duration.
+ * Since we want to use almost a whole dtim interval we would also
+ * like the delay to be for 2-3 dtim intervals, in case there are
+ * other time events with higher priority.
+ */
+ if (vif->cfg.assoc) {
+ *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
+ /* We cannot remain off-channel longer than the DTIM interval */
+ if (dtim_interval <= *duration_tu) {
+ *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER;
+ if (*duration_tu <= AUX_ROC_MIN_DURATION)
+ *duration_tu = dtim_interval -
+ AUX_ROC_MIN_SAFETY_BUFFER;
+ }
+ }
+}
+
+int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration, u32 activity)
+{
+ int res;
+ u32 duration_tu, delay;
+ struct iwl_roc_req roc_req = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .activity = cpu_to_le32(activity),
+ .sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
+ channel->hw_value,
+ iwl_mvm_phy_band_from_nl80211(channel->band),
+ IWL_PHY_CHANNEL_MODE20, 0);
+
+ iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu,
+ &delay);
+ roc_req.duration = cpu_to_le32(duration_tu);
+ roc_req.max_delay = cpu_to_le32(delay);
+
+ IWL_DEBUG_TE(mvm,
+ "\t(requested = %ums, max_delay = %ums)\n",
+ duration, delay);
+ IWL_DEBUG_TE(mvm,
+ "Requesting to remain on channel %u for %utu\n",
+ channel->hw_value, duration_tu);
+
+ /* Set the node address */
+ memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
+
+ res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
+ 0, sizeof(roc_req), &roc_req);
+
+ return res;
+}
+
static int
iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -1164,18 +1224,22 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ te_data = &mvmvif->time_event_data;
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) {
+ IWL_DEBUG_TE(mvm,
+ "No remain on channel event\n");
+ return;
+ }
+
iwl_mvm_cancel_session_protection(mvm, vif,
- mvmvif->time_event_data.id,
- mvmvif->time_event_data.link_id);
- iwl_mvm_p2p_roc_finished(mvm);
+ te_data->id,
+ te_data->link_id);
} else {
iwl_mvm_roc_station_remove(mvm, mvmvif);
- iwl_mvm_roc_finished(mvm);
}
-
- return;
+ goto cleanup_roc;
}
te_data = iwl_mvm_get_roc_te(mvm);
@@ -1186,13 +1250,21 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
- iwl_mvm_p2p_roc_finished(mvm);
- } else {
+ else
iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
- iwl_mvm_roc_finished(mvm);
- }
+
+cleanup_roc:
+ /*
+ * In case we get here before the ROC event started,
+ * (so the status bit isn't set) set it here so iwl_mvm_cleanup_roc will
+ * cleanup things properly
+ */
+ set_bit(vif->type == NL80211_IFTYPE_P2P_DEVICE ?
+ IWL_MVM_STATUS_ROC_RUNNING : IWL_MVM_STATUS_ROC_AUX_RUNNING,
+ &mvm->status);
+ iwl_mvm_cleanup_roc(mvm);
}
void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
@@ -1297,7 +1369,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
struct iwl_notification_wait wait_notif;
- int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
+ int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, (s8)link_id);
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color = cpu_to_le32(mac_link_id),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index dee9c367dcd3..61a4638d1be2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -555,6 +555,22 @@ static int compare_temps(const void *a, const void *b)
return ((s16)le16_to_cpu(*(__le16 *)a) -
(s16)le16_to_cpu(*(__le16 *)b));
}
+
+struct iwl_trip_walk_data {
+ __le16 *thresholds;
+ int count;
+};
+
+static int iwl_trip_temp_cb(struct thermal_trip *trip, void *arg)
+{
+ struct iwl_trip_walk_data *twd = arg;
+
+ if (trip->temperature == THERMAL_TEMP_INVALID)
+ return 0;
+
+ twd->thresholds[twd->count++] = cpu_to_le16((s16)(trip->temperature / 1000));
+ return 0;
+}
#endif
int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
@@ -562,42 +578,25 @@ int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
struct temp_report_ths_cmd cmd = {0};
int ret;
#ifdef CONFIG_THERMAL
- int i, j, idx = 0;
+ struct iwl_trip_walk_data twd = { .thresholds = cmd.thresholds, .count = 0 };
lockdep_assert_held(&mvm->mutex);
if (!mvm->tz_device.tzone)
goto send;
- /* The driver holds array of temperature trips that are unsorted
- * and uncompressed, the FW should get it compressed and sorted
+ /*
+ * The thermal core holds an array of temperature trips that are
+ * unsorted and uncompressed, the FW should get it compressed and
+ * sorted.
*/
/* compress trips to cmd array, remove uninitialized values*/
- for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) {
- if (mvm->tz_device.trips[i].temperature != INT_MIN) {
- cmd.thresholds[idx++] =
- cpu_to_le16((s16)(mvm->tz_device.trips[i].temperature / 1000));
- }
- }
- cmd.num_temps = cpu_to_le32(idx);
-
- if (!idx)
- goto send;
+ for_each_thermal_trip(mvm->tz_device.tzone, iwl_trip_temp_cb, &twd);
- /*sort cmd array*/
- sort(cmd.thresholds, idx, sizeof(s16), compare_temps, NULL);
-
- /* we should save the indexes of trips because we sort
- * and compress the orginal array
- */
- for (i = 0; i < idx; i++) {
- for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) {
- if ((int)(le16_to_cpu(cmd.thresholds[i]) * 1000) ==
- mvm->tz_device.trips[j].temperature)
- mvm->tz_device.fw_trips_index[i] = j;
- }
- }
+ cmd.num_temps = cpu_to_le32(twd.count);
+ if (twd.count)
+ sort(cmd.thresholds, twd.count, sizeof(s16), compare_temps, NULL);
send:
#endif
@@ -668,9 +667,6 @@ static struct thermal_zone_device_ops tzone_ops = {
.set_trip_temp = iwl_mvm_tzone_set_trip_temp,
};
-/* make all trips writable */
-#define IWL_WRITABLE_TRIPS_MSK (BIT(IWL_MAX_DTS_TRIPS) - 1)
-
static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
{
int i, ret;
@@ -686,10 +682,18 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
+ /*
+ * 0 is a valid temperature,
+ * so initialize the array with S16_MIN which invalid temperature
+ */
+ for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) {
+ mvm->tz_device.trips[i].temperature = THERMAL_TEMP_INVALID;
+ mvm->tz_device.trips[i].type = THERMAL_TRIP_PASSIVE;
+ mvm->tz_device.trips[i].flags = THERMAL_TRIP_FLAG_RW_TEMP;
+ }
mvm->tz_device.tzone = thermal_zone_device_register_with_trips(name,
mvm->tz_device.trips,
IWL_MAX_DTS_TRIPS,
- IWL_WRITABLE_TRIPS_MSK,
mvm, &tzone_ops,
NULL, 0, 0);
if (IS_ERR(mvm->tz_device.tzone)) {
@@ -704,15 +708,6 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
if (ret) {
IWL_DEBUG_TEMP(mvm, "Failed to enable thermal zone\n");
thermal_zone_device_unregister(mvm->tz_device.tzone);
- return;
- }
-
- /* 0 is a valid temperature,
- * so initialize the array with S16_MIN which invalid temperature
- */
- for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) {
- mvm->tz_device.trips[i].temperature = INT_MIN;
- mvm->tz_device.trips[i].type = THERMAL_TRIP_PASSIVE;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 461f26d9214e..782ddc8c296b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -520,6 +520,31 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
}
}
+static bool iwl_mvm_use_host_rate(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info)
+{
+ if (unlikely(!mvmsta))
+ return true;
+
+ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
+ return true;
+
+ if (likely(ieee80211_is_data(hdr->frame_control) &&
+ mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED))
+ return false;
+
+ /*
+ * Not a data frame, use host rate if on an old device that
+ * can't possibly be doing MLO (firmware may be selecting a
+ * bad rate), if we might be doing MLO we need to let FW pick
+ * (since we don't necesarily know the link), but FW rate
+ * selection was fixed.
+ */
+ return mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ;
+}
+
static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen,
const u8 *addr3_override)
{
@@ -567,12 +592,12 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
/*
- * For data and mgmt packets rate info comes from the fw. Only
+ * For data and mgmt packets rate info comes from the fw (for
+ * new devices, older FW is somewhat broken for this). Only
* set rate/antenna for injected frames with fixed rate, or
- * when no sta is given.
+ * when no sta is given, or with older firmware.
*/
- if (unlikely(!sta ||
- info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
+ if (unlikely(iwl_mvm_use_host_rate(mvm, mvmsta, hdr, info))) {
flags |= IWL_TX_FLAGS_CMD_RATE;
rate_n_flags =
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
@@ -881,10 +906,10 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
if (WARN_ON(!link_conf))
band = NL80211_BAND_2GHZ;
else
- band = link_conf->chandef.chan->band;
+ band = link_conf->chanreq.oper.chan->band;
rcu_read_unlock();
} else {
- band = mvmsta->vif->bss_conf.chandef.chan->band;
+ band = mvmsta->vif->bss_conf.chanreq.oper.chan->band;
}
lmac = iwl_mvm_get_lmac_id(mvm, band);
@@ -926,9 +951,15 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
next = skb_gso_segment(skb, netdev_flags);
skb_shinfo(skb)->gso_size = mss;
skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
- if (WARN_ON_ONCE(IS_ERR(next)))
- return -EINVAL;
- else if (next)
+
+ if (IS_ERR(next) && PTR_ERR(next) == -ENOMEM)
+ return -ENOMEM;
+
+ if (WARN_ONCE(IS_ERR(next),
+ "skb_gso_segment error: %d\n", (int)PTR_ERR(next)))
+ return PTR_ERR(next);
+
+ if (next)
consume_skb(skb);
skb_list_walk_safe(next, tmp, next) {
@@ -984,8 +1015,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
u8 tid;
- snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
- tcp_hdrlen(skb);
+ snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
if (!mvmsta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
@@ -1636,12 +1666,18 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
* of the batch. This is why the SSN of the SCD is written at the end of the
* whole struct at a variable offset. This function knows how to cope with the
* variable offset and returns the SSN of the SCD.
+ *
+ * For 22000-series and lower, this is just 12 bits. For later, 16 bits.
*/
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp)
{
- return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
- tx_resp->frame_count) & 0xfff;
+ u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
+ tx_resp->frame_count);
+
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return val & 0xFFFF;
+ return val & 0xFFF;
}
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
@@ -2174,6 +2210,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
tfd_cnt, pkt_len))
return;
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
+ sta_id, le32_to_cpu(ba_res->flags),
+ le16_to_cpu(ba_res->txed),
+ le16_to_cpu(ba_res->done));
+
rcu_read_lock();
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
@@ -2209,12 +2251,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_tx_airtime(mvm, mvmsta,
le32_to_cpu(ba_res->wireless_time));
rcu_read_unlock();
-
- IWL_DEBUG_TX_REPLY(mvm,
- "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
- sta_id, le32_to_cpu(ba_res->flags),
- le16_to_cpu(ba_res->txed),
- le16_to_cpu(ba_res->done));
return;
}
@@ -2246,9 +2282,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
rcu_read_unlock();
- iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
- tid_data->rate_n_flags, false);
-
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from %pM, sta_id = %d\n",
ba_notif->sta_addr, ba_notif->sta_id);
@@ -2261,6 +2294,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
ba_notif->reduced_txp);
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
+ tid_data->rate_n_flags, false);
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 91286018a69d..ab56ff87c6f9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -249,6 +249,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
* This is the special case in which init is set and we call a callback in
* this case to clear the state indicating that station creation is in
* progress.
+ *
+ * Returns: an error code indicating success or failure
*/
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index fa4a14546860..c8fc8b4fd85c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -119,7 +119,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
prph_sc_ctrl->version.version = 0;
prph_sc_ctrl->version.mac_id =
- cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ cpu_to_le16((u16)trans->hw_rev);
prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 5f55efe64bf5..0fa92704cd14 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -180,7 +180,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
- cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ cpu_to_le16((u16)trans->hw_rev);
/* size is in DWs */
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 2c9b98c8184b..4a657036b9d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -502,12 +502,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* Bz devices */
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
/* Sc devices */
{IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -526,7 +530,7 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
IWL_CFG_ANY, _cfg, _name)
-static const struct iwl_dev_info iwl_dev_info_table[] = {
+VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
/* 9000 */
IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name),
@@ -1008,8 +1012,13 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_gl, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_gl, iwl_mtp_name),
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1115,8 +1124,24 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc, iwl_sc_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_sc2_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_sc2f_name),
#endif /* CONFIG_IWLMVM */
};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int iwl_dev_info_table_size = ARRAY_SIZE(iwl_dev_info_table);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table_size);
+#endif
/*
* Read rf id and cdb info from prph register and store it
@@ -1143,6 +1168,20 @@ static void get_crf_id(struct iwl_trans *iwl_trans)
iwl_trans->hw_cnv_id =
iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);
+ /* In BZ, the MAC step must be read from the CNVI aux register */
+ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ) {
+ u8 step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id);
+
+ /* For BZ-U, take B step also when A step is indicated */
+ if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(iwl_trans->hw_cnv_id) ==
+ CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) &&
+ step == SILICON_A_STEP)
+ step = SILICON_B_STEP;
+
+ iwl_trans->hw_rev_step = step;
+ iwl_trans->hw_rev |= step;
+ }
+
/* Read cdb info (also contains the jacket info if needed in the future */
iwl_trans->hw_wfpm_id =
iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
@@ -1236,7 +1275,7 @@ out:
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-static const struct iwl_dev_info *
+VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step)
@@ -1299,6 +1338,7 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
return NULL;
}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_pci_find_dev_info);
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -1382,6 +1422,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (dev_info) {
iwl_trans->cfg = dev_info->cfg;
iwl_trans->name = dev_info->name;
+ iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160;
}
#if IS_ENABLED(CONFIG_IWLMVM)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 63e13577aff8..6c76b2dd6878 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1484,12 +1484,9 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
state ? "disabled" : "enabled");
- if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
- if (trans->trans_cfg->gen2)
- _iwl_trans_pcie_gen2_stop_device(trans);
- else
- _iwl_trans_pcie_stop_device(trans, from_irq);
- }
+ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) &&
+ !WARN_ON(trans->trans_cfg->gen2))
+ _iwl_trans_pcie_stop_device(trans, from_irq);
}
void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
@@ -1718,6 +1715,7 @@ enable_msi:
static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
{
+#if defined(CONFIG_SMP)
int iter_rx_q, i, ret, cpu, offset;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1738,6 +1736,7 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
"Failed to set affinity mask for IRQ %d\n",
trans_pcie->msix_entries[i].vector);
}
+#endif
}
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 6c2b37e56c78..fa8eba47dc4c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1331,7 +1331,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
trans->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
- ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+ ip_hdrlen = skb_network_header_len(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
amsdu_pad = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index ca74b1b63cac..33973a60d0bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
@@ -271,9 +271,10 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
meta = NULL;
goto unmap;
}
- IWL_WARN(trans,
- "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
- len, (unsigned long long)oldphys, (unsigned long long)phys);
+ IWL_DEBUG_TX(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys,
+ (unsigned long long)phys);
ret = 0;
unmap:
@@ -352,7 +353,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
&dev_cmd->hdr, start_len, 0);
- ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+ ip_hdrlen = skb_network_header_len(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
amsdu_pad = 0;
@@ -1601,8 +1602,8 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (read_ptr == tfd_num)
goto out;
- IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
- txq_id, txq->read_ptr, tfd_num, ssn);
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
+ txq_id, read_ptr, txq->read_ptr, tfd_num, ssn);
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
@@ -1630,7 +1631,8 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
struct sk_buff *skb = txq->entries[read_ptr].skb;
- if (WARN_ON_ONCE(!skb))
+ if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
+ read_ptr, txq->read_ptr, txq_id))
continue;
iwl_txq_free_tso_page(trans, skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
new file mode 100644
index 000000000000..5658471bdf0a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+iwlwifi-tests-y += module.o devinfo.o
+
+ccflags-y += -I$(srctree)/$(src)/../
+
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlwifi-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
new file mode 100644
index 000000000000..7aa47fce6e2d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for the iwlwifi device info table
+ *
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <kunit/test.h>
+#include "iwl-drv.h"
+#include "iwl-config.h"
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di)
+{
+ printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n",
+ pfx, di->device, di->subdevice, di->mac_type, di->mac_step,
+ di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160,
+ di->cores);
+}
+
+static void devinfo_table_order(struct kunit *test)
+{
+ int idx;
+
+ for (idx = 0; idx < iwl_dev_info_table_size; idx++) {
+ const struct iwl_dev_info *di = &iwl_dev_info_table[idx];
+ const struct iwl_dev_info *ret;
+
+ ret = iwl_pci_find_dev_info(di->device, di->subdevice,
+ di->mac_type, di->mac_step,
+ di->rf_type, di->cdb,
+ di->jacket, di->rf_id,
+ di->no_160, di->cores, di->rf_step);
+ if (ret != di) {
+ iwl_pci_print_dev_info("searched: ", di);
+ iwl_pci_print_dev_info("found: ", ret);
+ KUNIT_FAIL(test,
+ "unusable entry at index %d (found index %d instead)\n",
+ idx, (int)(ret - iwl_dev_info_table));
+ }
+ }
+}
+
+static struct kunit_case devinfo_test_cases[] = {
+ KUNIT_CASE(devinfo_table_order),
+ {}
+};
+
+static struct kunit_suite iwlwifi_devinfo = {
+ .name = "iwlwifi-devinfo",
+ .test_cases = devinfo_test_cases,
+};
+
+kunit_test_suite(iwlwifi_devinfo);
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/module.c b/drivers/net/wireless/intel/iwlwifi/tests/module.c
new file mode 100644
index 000000000000..0c54f818e5a7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/module.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Module boilerplate for the iwlwifi kunit module.
+ *
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("kunit tests for iwlwifi");
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index c6084683aedd..687841b2fa2a 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -704,6 +704,10 @@ static void p54_set_coverage_class(struct ieee80211_hw *dev,
}
static const struct ieee80211_ops p54_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = p54_tx_80211,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = p54_start,
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 104d2b6dc9af..5a525da434c2 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -1132,7 +1132,7 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
if (!cmdarray[i].cmdbuf) {
lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
ret = -1;
- goto done;
+ goto free_cmd_array;
}
}
@@ -1140,8 +1140,17 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
init_waitqueue_head(&cmdarray[i].cmdwait_q);
lbs_cleanup_and_insert_cmd(priv, &cmdarray[i]);
}
- ret = 0;
+ return 0;
+free_cmd_array:
+ for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
+ if (cmdarray[i].cmdbuf) {
+ kfree(cmdarray[i].cmdbuf);
+ cmdarray[i].cmdbuf = NULL;
+ }
+ }
+ kfree(priv->cmd_array);
+ priv->cmd_array = NULL;
done:
return ret;
}
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index 8690b0114e23..b722a6587fd3 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1052,7 +1052,7 @@ static int if_spi_init_card(struct if_spi_card *card)
"attached to SPI bus_num %d, chip_select %d. "
"spi->max_speed_hz=%d\n",
card->card_id, card->card_rev,
- card->spi->master->bus_num,
+ card->spi->controller->bus_num,
spi_get_chipselect(card->spi, 0),
card->spi->max_speed_hz);
err = if_spi_prog_helper_firmware(card, helper);
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 199d33ed3bb9..9cca69fe04d7 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -473,6 +473,10 @@ static int lbtf_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static const struct ieee80211_ops lbtf_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = lbtf_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = lbtf_op_start,
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index da211372a481..b90f922f1cdc 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -288,6 +288,6 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
mwifiex_dbg(priv->adapter, MSG,
"indicating channel switch completion to kernel\n");
wiphy_lock(priv->wdev.wiphy);
- cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0, 0);
+ cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0);
wiphy_unlock(priv->wdev.wiphy);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 90e401100898..c0c635e74bc5 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -392,12 +392,10 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
chan_list =
(struct mwifiex_ie_types_chan_list_param_set *) *buffer;
- memset(chan_list, 0,
- sizeof(struct mwifiex_ie_types_chan_list_param_set));
+ memset(chan_list, 0, struct_size(chan_list, chan_scan_param, 1));
chan_list->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
- chan_list->header.len = cpu_to_le16(
- sizeof(struct mwifiex_ie_types_chan_list_param_set) -
- sizeof(struct mwifiex_ie_types_header));
+ chan_list->header.len =
+ cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));
chan_list->chan_scan_param[0].chan_number =
bss_desc->bcn_ht_oper->primary_chan;
chan_list->chan_scan_param[0].radio_type =
@@ -411,8 +409,8 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
(bss_desc->bcn_ht_oper->ht_param &
IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
- *buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set);
- ret_len += sizeof(struct mwifiex_ie_types_chan_list_param_set);
+ *buffer += struct_size(chan_list, chan_scan_param, 1);
+ ret_len += struct_size(chan_list, chan_scan_param, 1);
}
if (bss_desc->bcn_bss_co_2040) {
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 3604abcbcff9..b909a7665e9c 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -3359,7 +3359,7 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
}
if (!wowlan->patterns[i].pkt_offset) {
- if (!(byte_seq[0] & 0x01) &&
+ if (is_unicast_ether_addr(byte_seq) &&
(byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) {
mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST;
continue;
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index f9c9fec7c792..9deaf59dcb62 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -566,14 +566,8 @@ mwifiex_verext_write(struct file *file, const char __user *ubuf,
int ret;
u32 versionstrsel;
struct mwifiex_private *priv = (void *)file->private_data;
- char buf[16];
- memset(buf, 0, sizeof(buf));
-
- if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
- return -EFAULT;
-
- ret = kstrtou32(buf, 10, &versionstrsel);
+ ret = kstrtou32_from_user(ubuf, count, 10, &versionstrsel);
if (ret)
return ret;
@@ -874,19 +868,14 @@ mwifiex_timeshare_coex_write(struct file *file, const char __user *ubuf,
{
bool timeshare_coex;
struct mwifiex_private *priv = file->private_data;
- char kbuf[16];
int ret;
if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15)
return -EOPNOTSUPP;
- memset(kbuf, 0, sizeof(kbuf));
-
- if (copy_from_user(&kbuf, ubuf, min_t(size_t, sizeof(kbuf) - 1, count)))
- return -EFAULT;
-
- if (kstrtobool(kbuf, &timeshare_coex))
- return -EINVAL;
+ ret = kstrtobool_from_user(ubuf, count, &timeshare_coex);
+ if (ret)
+ return ret;
ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX,
HostCmd_ACT_GEN_SET, 0, &timeshare_coex, true);
@@ -970,9 +959,6 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
priv->dfs_dev_dir = debugfs_create_dir(priv->netdev->name,
mwifiex_dfs_dir);
- if (!priv->dfs_dev_dir)
- return;
-
MWIFIEX_DFS_ADD_FILE(info);
MWIFIEX_DFS_ADD_FILE(debug);
MWIFIEX_DFS_ADD_FILE(getlog);
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 62f3c9a52a1d..3adc447b715f 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -770,7 +770,7 @@ struct mwifiex_chan_scan_param_set {
struct mwifiex_ie_types_chan_list_param_set {
struct mwifiex_ie_types_header header;
- struct mwifiex_chan_scan_param_set chan_scan_param[1];
+ struct mwifiex_chan_scan_param_set chan_scan_param[];
} __packed;
struct mwifiex_ie_types_rxba_sync {
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 318b42b1896f..175882485a19 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -28,11 +28,9 @@
#include <linux/inetdevice.h>
#include <linux/devcoredump.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index a2ddac363b10..0326b121747c 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -664,15 +664,14 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* Copy the current channel TLV to the command being
prepared */
- memcpy(chan_tlv_out->chan_scan_param + tlv_idx,
+ memcpy(&chan_tlv_out->chan_scan_param[tlv_idx],
tmp_chan_list,
- sizeof(chan_tlv_out->chan_scan_param));
+ sizeof(*chan_tlv_out->chan_scan_param));
/* Increment the TLV header length by the size
appended */
le16_unaligned_add_cpu(&chan_tlv_out->header.len,
- sizeof(
- chan_tlv_out->chan_scan_param));
+ sizeof(*chan_tlv_out->chan_scan_param));
/*
* The tlv buffer length is set to the number of bytes
@@ -2369,12 +2368,11 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
chan_idx < MWIFIEX_BG_SCAN_CHAN_MAX &&
bgscan_cfg_in->chan_list[chan_idx].chan_number;
chan_idx++) {
- temp_chan = chan_list_tlv->chan_scan_param + chan_idx;
+ temp_chan = &chan_list_tlv->chan_scan_param[chan_idx];
/* Increment the TLV header length by size appended */
le16_unaligned_add_cpu(&chan_list_tlv->header.len,
- sizeof(
- chan_list_tlv->chan_scan_param));
+ sizeof(*chan_list_tlv->chan_scan_param));
temp_chan->chan_number =
bgscan_cfg_in->chan_list[chan_idx].chan_number;
@@ -2413,7 +2411,7 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
chan_scan_param);
le16_unaligned_add_cpu(&chan_list_tlv->header.len,
chan_num *
- sizeof(chan_list_tlv->chan_scan_param[0]));
+ sizeof(*chan_list_tlv->chan_scan_param));
}
tlv_pos += (sizeof(chan_list_tlv->header)
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 00a5679b5c51..8558995e8fc7 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -871,7 +871,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
}
} else {
memcpy(ra, skb->data, ETH_ALEN);
- if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
+ if (is_multicast_ether_addr(ra) || mwifiex_is_skb_mgmt_frame(skb))
eth_broadcast_addr(ra);
ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
}
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 13bcb123d122..ce8fea76dbb2 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -5610,6 +5610,10 @@ static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops mwl8k_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mwl8k_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = mwl8k_start,
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index d6575fe18c6b..f7f2d9a8ab0f 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_MT792x_USB) += mt792x-usb.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
- tx.o agg-rx.o mcu.o
+ tx.o agg-rx.o mcu.o wed.o
mt76-$(CONFIG_PCI) += pci.o
mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 10cbd9e560e7..07c386c7b4d0 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -122,7 +122,7 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
struct mt76_wcid *wcid = status->wcid;
struct mt76_rx_tid *tid;
- u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
+ u8 tidno;
u16 seqno;
if (!ieee80211_is_ctl(bar->frame_control))
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 00230f106294..72a7bd5a8576 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -197,9 +197,8 @@ mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
q->tail = q->head;
}
-static void
-__mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
- bool reset_idx)
+void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx)
{
if (!q || !q->ndesc)
return;
@@ -219,8 +218,7 @@ __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
mt76_dma_sync_idx(dev, q);
}
-static void
-mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
{
__mt76_dma_queue_reset(dev, q, true);
}
@@ -632,9 +630,8 @@ free_skb:
return ret;
}
-static int
-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
- bool allow_direct)
+int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ bool allow_direct)
{
int len = SKB_WITH_OVERHEAD(q->buf_size);
int frames = 0;
@@ -681,81 +678,6 @@ done:
return frames;
}
-int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
-{
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
- int ret = 0, type, ring;
- u16 flags;
-
- if (!q || !q->ndesc)
- return -EINVAL;
-
- flags = q->flags;
- if (!q->wed || !mtk_wed_device_active(q->wed))
- q->flags &= ~MT_QFLAG_WED;
-
- if (!(q->flags & MT_QFLAG_WED))
- return 0;
-
- type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
- ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
-
- switch (type) {
- case MT76_WED_Q_TX:
- ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
- reset);
- if (!ret)
- q->wed_regs = q->wed->tx_ring[ring].reg_base;
- break;
- case MT76_WED_Q_TXFREE:
- /* WED txfree queue needs ring to be initialized before setup */
- q->flags = 0;
- mt76_dma_queue_reset(dev, q);
- mt76_dma_rx_fill(dev, q, false);
-
- ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
- if (!ret)
- q->wed_regs = q->wed->txfree_ring.reg_base;
- break;
- case MT76_WED_Q_RX:
- ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
- reset);
- if (!ret)
- q->wed_regs = q->wed->rx_ring[ring].reg_base;
- break;
- case MT76_WED_RRO_Q_DATA:
- q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
- mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
- q->head = q->ndesc - 1;
- q->queued = q->head;
- break;
- case MT76_WED_RRO_Q_MSDU_PG:
- q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
- mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
- q->head = q->ndesc - 1;
- q->queued = q->head;
- break;
- case MT76_WED_RRO_Q_IND:
- q->flags &= ~MT_QFLAG_WED;
- mt76_dma_queue_reset(dev, q);
- mt76_dma_rx_fill(dev, q, false);
- mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- q->flags = flags;
-
- return ret;
-#else
- return 0;
-#endif
-}
-EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
-
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
@@ -800,7 +722,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
if (ret)
return ret;
- ret = mt76_dma_wed_setup(dev, q, false);
+ ret = mt76_wed_dma_setup(dev, q, false);
if (ret)
return ret;
@@ -863,7 +785,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
mt76_dma_rx_cleanup(dev, q);
/* reset WED rx queues */
- mt76_dma_wed_setup(dev, q, true);
+ mt76_wed_dma_setup(dev, q, true);
if (mt76_queue_is_wed_tx_free(q))
return;
@@ -1054,20 +976,6 @@ void mt76_dma_attach(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_dma_attach);
-void mt76_dma_wed_reset(struct mt76_dev *dev)
-{
- struct mt76_mmio *mmio = &dev->mmio;
-
- if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
- return;
-
- complete(&mmio->wed_reset);
-
- if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
- dev_err(dev->dev, "wed reset complete timeout\n");
-}
-EXPORT_SYMBOL_GPL(mt76_dma_wed_reset);
-
void mt76_dma_cleanup(struct mt76_dev *dev)
{
int i;
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index c479cc6388ef..1de5a2b20f74 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -79,15 +79,18 @@ enum mt76_dma_wed_ind_reason {
int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
-int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
-void mt76_dma_wed_reset(struct mt76_dev *dev);
+int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ bool allow_direct);
+void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx);
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q);
static inline void
mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
dev->queue_ops->reset_q(dev, q);
if (mtk_wed_device_active(&dev->mmio.wed))
- mt76_dma_wed_setup(dev, q, true);
+ mt76_wed_dma_setup(dev, q, true);
}
static inline void
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 8a3a90d1bfac..068206e48aec 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -579,13 +579,18 @@ EXPORT_SYMBOL_GPL(mt76_unregister_phy);
int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
{
+ bool is_qrx = mt76_queue_is_rx(dev, q);
struct page_pool_params pp_params = {
.order = 0,
.flags = 0,
.nid = NUMA_NO_NODE,
.dev = dev->dma_dev,
};
- int idx = q - dev->q_rx;
+ int idx = is_qrx ? q - dev->q_rx : -1;
+
+ /* Allocate page_pools just for rx/wed_tx_free queues */
+ if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
+ return 0;
switch (idx) {
case MT_RXQ_MAIN:
@@ -604,6 +609,9 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
pp_params.dma_dir = DMA_FROM_DEVICE;
pp_params.max_len = PAGE_SIZE;
pp_params.offset = 0;
+ /* NAPI is available just for rx queues */
+ if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
+ pp_params.napi = &dev->napi[idx];
}
q->page_pool = page_pool_create(&pp_params);
@@ -1613,8 +1621,8 @@ EXPORT_SYMBOL_GPL(mt76_get_sar_power);
static void
__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
- ieee80211_csa_finish(vif);
+ if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
+ ieee80211_csa_finish(vif, 0);
}
void mt76_csa_finish(struct mt76_dev *dev)
@@ -1638,7 +1646,7 @@ __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.csa_active)
return;
- dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
+ dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
}
void mt76_csa_check(struct mt76_dev *dev)
@@ -1854,19 +1862,3 @@ enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
return MT_DFS_STATE_ACTIVE;
}
EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
-
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct net_device *netdev, enum tc_setup_type type,
- void *type_data)
-{
- struct mt76_phy *phy = hw->priv;
- struct mtk_wed_device *wed = &phy->dev->mmio.wed;
-
- if (!mtk_wed_device_active(wed))
- return -EOPNOTSUPP;
-
- return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
-}
-EXPORT_SYMBOL_GPL(mt76_net_setup_tc);
-#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index c3e0e23e0161..cd2e9737c3bf 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -85,113 +85,6 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
}
EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
- int i;
-
- for (i = 0; i < dev->rx_token_size; i++) {
- struct mt76_txwi_cache *t;
-
- t = mt76_rx_token_release(dev, i);
- if (!t || !t->ptr)
- continue;
-
- mt76_put_page_pool_buf(t->ptr, false);
- t->ptr = NULL;
-
- mt76_put_rxwi(dev, t);
- }
-
- mt76_free_pending_rxwi(dev);
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_release_rx_buf);
-
-u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
- struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- int i, len = SKB_WITH_OVERHEAD(q->buf_size);
- struct mt76_txwi_cache *t = NULL;
-
- for (i = 0; i < size; i++) {
- enum dma_data_direction dir;
- dma_addr_t addr;
- u32 offset;
- int token;
- void *buf;
-
- t = mt76_get_rxwi(dev);
- if (!t)
- goto unmap;
-
- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
- if (!buf)
- goto unmap;
-
- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
- dir = page_pool_get_dma_dir(q->page_pool);
- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
-
- desc->buf0 = cpu_to_le32(addr);
- token = mt76_rx_token_consume(dev, buf, t, addr);
- if (token < 0) {
- mt76_put_page_pool_buf(buf, false);
- goto unmap;
- }
-
- token = FIELD_PREP(MT_DMA_CTL_TOKEN, token);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32);
-#endif
- desc->token |= cpu_to_le32(token);
- desc++;
- }
-
- return 0;
-
-unmap:
- if (t)
- mt76_put_rxwi(dev, t);
- mt76_mmio_wed_release_rx_buf(wed);
-
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_init_rx_buf);
-
-int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
-
- spin_lock_bh(&dev->token_lock);
- dev->token_size = wed->wlan.token_start;
- spin_unlock_bh(&dev->token_lock);
-
- return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_enable);
-
-void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
-
- spin_lock_bh(&dev->token_lock);
- dev->token_size = dev->drv->token_size;
- spin_unlock_bh(&dev->token_lock);
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_disable);
-
-void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
-
- complete(&dev->mmio.wed_reset_complete);
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_reset_complete);
-#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
-
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
{
static const struct mt76_bus_ops mt76_mmio_ops = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index b20c34d5a0f7..a91f6ddacbd9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -210,6 +210,8 @@ struct mt76_queue {
u16 first;
u16 head;
u16 tail;
+ u8 hw_idx;
+ u8 ep;
int ndesc;
int queued;
int buf_size;
@@ -217,7 +219,6 @@ struct mt76_queue {
bool blocked;
u8 buf_offset;
- u8 hw_idx;
u16 flags;
struct mtk_wed_device *wed;
@@ -1081,12 +1082,6 @@ bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
void mt76_pci_disable_aspm(struct pci_dev *pdev);
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct net_device *netdev, enum tc_setup_type type,
- void *type_data);
-#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
-
static inline u16 mt76_chip(struct mt76_dev *dev)
{
return dev->rev >> 16;
@@ -1097,13 +1092,34 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
return dev->rev & 0xffff;
}
+void mt76_wed_release_rx_buf(struct mtk_wed_device *wed);
+void mt76_wed_offload_disable(struct mtk_wed_device *wed);
+void mt76_wed_reset_complete(struct mtk_wed_device *wed);
+void mt76_wed_dma_reset(struct mt76_dev *dev);
+int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size);
-void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed);
-int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed);
-void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed);
-void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed);
-#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
+u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size);
+int mt76_wed_offload_enable(struct mtk_wed_device *wed);
+int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
+#else
+static inline u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+{
+ return 0;
+}
+
+static inline int mt76_wed_offload_enable(struct mtk_wed_device *wed)
+{
+ return 0;
+}
+
+static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset)
+{
+ return 0;
+}
+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
@@ -1470,13 +1486,6 @@ static inline bool mt76u_urb_error(struct urb *urb)
urb->status != -ENOENT;
}
-/* Map hardware queues to usb endpoints */
-static inline u8 q2ep(u8 qid)
-{
- /* TODO: take management packets to queue 5 */
- return qid + 1;
-}
-
static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
int timeout, int ep)
@@ -1598,6 +1607,18 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
struct mt76_power_limits *dest,
s8 target_power);
+static inline bool mt76_queue_is_rx(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+ if (q == &dev->q_rx[i])
+ return true;
+ }
+
+ return false;
+}
+
static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q)
{
return (q->flags & MT_QFLAG_WED) &&
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index e2146d30e553..9b49267b1eab 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -701,6 +701,10 @@ static void mt7603_tx(struct ieee80211_hw *hw,
}
const struct ieee80211_ops mt7603_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7603_tx,
.start = mt7603_start,
.stop = mt7603_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index ae34d019e588..c807bd8d928d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -353,7 +353,7 @@ static void
mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
if (vif->bss_conf.csa_active)
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index fdde3d70b300..98d64d3d2993 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -227,6 +227,11 @@ static inline bool is_mt7992(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7992;
}
+static inline bool is_mt799x(struct mt76_dev *dev)
+{
+ return is_mt7996(dev) || is_mt7992(dev);
+}
+
static inline bool is_mt7622(struct mt76_dev *dev)
{
if (!IS_ENABLED(CONFIG_MT7622_WMAC))
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
index bd2a92467a97..5f132115ebfc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
@@ -32,6 +32,11 @@ enum {
MT_LMAC_PSMP0,
};
+enum {
+ MT_TXS_MPDU_FMT = 0,
+ MT_TXS_PPDU_FMT = 2,
+};
+
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
#define MT_TX_FREE_COUNT GENMASK(12, 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index c7914643e9c0..b841bf628d02 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -544,7 +544,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD5_PID, pid);
if (pid >= MT_PACKET_ID_FIRST) {
val |= MT_TXD5_TX_STATUS_HOST;
- amsdu_en = amsdu_en && !is_mt7921(dev);
+ amsdu_en = 0;
}
txwi[5] = cpu_to_le32(val);
@@ -579,6 +579,8 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
spe_idx = 24 + phy_idx;
txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, spe_idx));
}
+
+ txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
}
}
EXPORT_SYMBOL_GPL(mt76_connac2_mac_write_txwi);
@@ -714,6 +716,9 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct sk_buff_head list;
struct sk_buff *skb;
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == MT_TXS_PPDU_FMT)
+ return false;
+
mt76_tx_status_lock(dev, &list);
skb = mt76_tx_status_skb_get(dev, wcid, pid, &list);
if (skb) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 3a20ba0d2492..af0c2b2aacb0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -66,7 +66,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
(is_mt7921(dev) && addr == 0x900000) ||
- (is_mt7925(dev) && addr == 0x900000) ||
+ (is_mt7925(dev) && (addr == 0x900000 || addr == 0xe0002800)) ||
(is_mt7996(dev) && addr == 0x900000) ||
(is_mt7992(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
@@ -283,6 +283,9 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
};
struct sk_buff *skb;
+ if (is_mt799x(dev) && !wcid->sta)
+ hdr.muar_idx = 0xe;
+
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
&hdr.wlan_idx_hi);
skb = mt76_mcu_msg_alloc(dev, NULL, len);
@@ -2101,7 +2104,7 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
int j, msg_len, num_ch;
struct sk_buff *skb;
- num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len;
+ num_ch = i == batch_size - 1 ? n_chan - i * batch_len : batch_len;
msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv);
skb = mt76_mcu_msg_alloc(dev, NULL, msg_len);
if (!skb) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index ae6d0179727d..657a4d1f856b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -808,6 +808,7 @@ enum {
STA_REC_MLD = 0x20,
STA_REC_EHT = 0x22,
STA_REC_PN_INFO = 0x26,
+ STA_REC_KEY_V3 = 0x27,
STA_REC_HDRT = 0x28,
STA_REC_HDR_TRANS = 0x2B,
STA_REC_MAX_NUM
@@ -935,6 +936,9 @@ enum {
PHY_TYPE_INDEX_NUM
};
+#define HR_DSSS_ERP_BASIC_RATE GENMASK(3, 0)
+#define OFDM_BASIC_RATE (BIT(6) | BIT(8) | BIT(10))
+
#define PHY_TYPE_BIT_HR_DSSS BIT(PHY_TYPE_HR_DSSS_INDEX)
#define PHY_TYPE_BIT_ERP BIT(PHY_TYPE_ERP_INDEX)
#define PHY_TYPE_BIT_OFDM BIT(PHY_TYPE_OFDM_INDEX)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 293e66fa83d5..79b7996ad1a8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -59,6 +59,10 @@ mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static const struct ieee80211_ops mt76x0e_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x0e_start,
.stop = mt76x0e_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index dd042949cf82..bba44f289b4e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -118,6 +118,10 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops mt76x0u_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x0u_start,
.stop = mt76x0u_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 85a78dea4085..29b9a15f8dbe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -67,7 +67,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
- int pid, len = tx_info->skb->len, ep = q2ep(dev->mphy.q_tx[qid]->hw_idx);
+ int pid, len = tx_info->skb->len, ep = dev->mphy.q_tx[qid]->ep;
struct mt76x02_txwi *txwi;
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index b38bb7a2362b..bfc8c69f43fa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -132,6 +132,10 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
}
const struct ieee80211_ops mt76x2_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x2_start,
.stop = mt76x2_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index ca78e14251c2..e92bb871f231 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -18,6 +18,7 @@ static const struct usb_device_id mt76x2u_device_table[] = {
{ USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
{ USB_DEVICE(0x0e8d, 0x7632) }, /* HC-M7662BU1 */
{ USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */
+ { USB_DEVICE(0x0846, 0x9014) }, /* Netgear WNDA3100v3 */
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
{ USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
{ USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index ac07ed1f63a3..9fe390fdd730 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -103,6 +103,10 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
}
const struct ieee80211_ops mt76x2u_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x2u_start,
.stop = mt76x2u_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index c91a1c54027f..0baa82c8df5a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -614,7 +614,7 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
mtk_wed_device_dma_reset(wed);
mt7915_dma_disable(dev, force);
- mt76_dma_wed_reset(&dev->mt76);
+ mt76_wed_dma_reset(&dev->mt76);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index b01edbed969c..e45361111f9b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -1520,12 +1520,6 @@ void mt7915_mac_reset_work(struct work_struct *work)
if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
return;
- if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
- mtk_wed_device_stop(&dev->mt76.mmio.wed);
- if (!is_mt798x(&dev->mt76))
- mt76_wr(dev, MT_INT_WED_MASK_CSR, 0);
- }
-
ieee80211_stop_queues(mt76_hw(dev));
if (ext_phy)
ieee80211_stop_queues(ext_phy->hw);
@@ -1545,6 +1539,9 @@ void mt7915_mac_reset_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mtk_wed_device_stop(&dev->mt76.mmio.wed);
+
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index df2d4279790d..3709d18da0e6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -1708,6 +1708,6 @@ const struct ieee80211_ops mt7915_ops = {
.set_radar_background = mt7915_set_radar_background,
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
.net_fill_forward_path = mt7915_net_fill_forward_path,
- .net_setup_tc = mt76_net_setup_tc,
+ .net_setup_tc = mt76_wed_net_setup_tc,
#endif
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index c67c4f6ca2aa..d90f98c50039 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -228,7 +228,7 @@ mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION)
return;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
static void
@@ -463,10 +463,10 @@ static bool mt7915_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
.tolerated = true,
};
- if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ if (!(vif->bss_conf.chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR))
return false;
- cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chanreq.oper,
mt7915_check_he_obss_narrow_bw_ru_iter,
&iter_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 3039f53e2245..d6ecd698cdcd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -490,6 +490,11 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
return dev->reg.map[i].maps + ofs;
}
+ return 0;
+}
+
+static u32 __mt7915_reg_remap_addr(struct mt7915_dev *dev, u32 addr)
+{
if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
(addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
(addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
@@ -514,15 +519,30 @@ void mt7915_memcpy_fromio(struct mt7915_dev *dev, void *buf, u32 offset,
{
u32 addr = __mt7915_reg_addr(dev, offset);
- memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ if (addr) {
+ memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ memcpy_fromio(buf, dev->mt76.mmio.regs +
+ __mt7915_reg_remap_addr(dev, offset), len);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- u32 addr = __mt7915_reg_addr(dev, offset);
+ u32 addr = __mt7915_reg_addr(dev, offset), val;
- return dev->bus_ops->rr(mdev, addr);
+ if (addr)
+ return dev->bus_ops->rr(mdev, addr);
+
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rr(mdev, __mt7915_reg_remap_addr(dev, offset));
+ spin_unlock_bh(&dev->reg_lock);
+
+ return val;
}
static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
@@ -530,7 +550,14 @@ static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
u32 addr = __mt7915_reg_addr(dev, offset);
- dev->bus_ops->wr(mdev, addr, val);
+ if (addr) {
+ dev->bus_ops->wr(mdev, addr, val);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ dev->bus_ops->wr(mdev, __mt7915_reg_remap_addr(dev, offset), val);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
@@ -538,7 +565,14 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
u32 addr = __mt7915_reg_addr(dev, offset);
- return dev->bus_ops->rmw(mdev, addr, mask, val);
+ if (addr)
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rmw(mdev, __mt7915_reg_remap_addr(dev, offset), mask, val);
+ spin_unlock_bh(&dev->reg_lock);
+
+ return val;
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
@@ -672,13 +706,13 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
}
wed->wlan.init_buf = mt7915_wed_init_buf;
- wed->wlan.offload_enable = mt76_mmio_wed_offload_enable;
- wed->wlan.offload_disable = mt76_mmio_wed_offload_disable;
- wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf;
- wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf;
+ wed->wlan.offload_enable = mt76_wed_offload_enable;
+ wed->wlan.offload_disable = mt76_wed_offload_disable;
+ wed->wlan.init_rx_buf = mt76_wed_init_rx_buf;
+ wed->wlan.release_rx_buf = mt76_wed_release_rx_buf;
wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
wed->wlan.reset = mt7915_mmio_wed_reset;
- wed->wlan.reset_complete = mt76_mmio_wed_reset_complete;
+ wed->wlan.reset_complete = mt76_wed_reset_complete;
dev->mt76.rx_token_size = wed->wlan.rx_npkt;
@@ -707,6 +741,7 @@ static int mt7915_mmio_init(struct mt76_dev *mdev,
dev = container_of(mdev, struct mt7915_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
+ spin_lock_init(&dev->reg_lock);
switch (device_id) {
case 0x7915:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 4727d9c7b11d..6e79bc65f5a5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -287,6 +287,7 @@ struct mt7915_dev {
struct list_head sta_rc_list;
struct list_head twt_list;
+ spinlock_t reg_lock;
u32 hw_pattern;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
index 8b4809703efc..f5b99917c08e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -516,7 +516,8 @@ static int mt798x_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie)
if (ret)
return ret;
- if (version == 0x8a00 || version == 0x8a10 || version == 0x8b00) {
+ if (version == 0x8a00 || version == 0x8a10 ||
+ version == 0x8b00 || version == 0x8c10) {
rg_xo_01 = 0x1d59080f;
rg_xo_03 = 0x34c00fe0;
} else {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index 48433c6d5e7d..ef0c721d26e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -138,9 +138,14 @@ mt7921_regd_notifier(struct wiphy *wiphy,
if (pm->suspended)
return;
+ dev->regd_in_progress = true;
+
mt792x_mutex_acquire(dev);
mt7921_regd_update(dev);
mt792x_mutex_release(dev);
+
+ dev->regd_in_progress = false;
+ wake_up(&dev->wait);
}
int mt7921_mac_init(struct mt792x_dev *dev)
@@ -261,6 +266,7 @@ int mt7921_register_device(struct mt792x_dev *dev)
spin_lock_init(&dev->pm.wake.lock);
mutex_init(&dev->pm.mutex);
init_waitqueue_head(&dev->pm.wait);
+ init_waitqueue_head(&dev->wait);
if (mt76_is_sdio(&dev->mt76))
init_waitqueue_head(&dev->mt76.sdio.wait);
spin_lock_init(&dev->pm.txq_lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 0d5adc5ddae3..ca36de34171b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -325,6 +325,19 @@ static void mt7921_roc_iter(void *priv, u8 *mac,
mt7921_mcu_abort_roc(phy, mvif, phy->roc_token_id);
}
+void mt7921_roc_abort_sync(struct mt792x_dev *dev)
+{
+ struct mt792x_phy *phy = &dev->phy;
+
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+ if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ ieee80211_iterate_active_interfaces(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_roc_iter, (void *)phy);
+}
+EXPORT_SYMBOL_GPL(mt7921_roc_abort_sync);
+
void mt7921_roc_work(struct work_struct *work)
{
struct mt792x_phy *phy;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index f5582477c7e4..8b4ce32a2cd1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -1272,7 +1272,7 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
.mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, alpha2),
};
int ret, valid_cnt = 0;
- u16 buf_len = 0;
+ u32 buf_len = 0;
u8 *pos;
if (!clc)
@@ -1283,7 +1283,7 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
if (mt76_find_power_limits_node(&dev->mt76))
req.cap |= CLC_CAP_DTS_EN;
- buf_len = le16_to_cpu(clc->len) - sizeof(*clc);
+ buf_len = le32_to_cpu(clc->len) - sizeof(*clc);
pos = clc->data;
while (buf_len > 16) {
struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 1cb21133992b..3016636d18c6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -322,4 +322,5 @@ int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
enum mt7921_roc_req type, u8 token_id);
int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
u8 token_id);
+void mt7921_roc_abort_sync(struct mt792x_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index dde26f327478..cda853e86676 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/of.h>
#include "mt7921.h"
#include "../mt76_connac2_mac.h"
@@ -369,6 +370,9 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_free_irq;
+ if (of_property_read_bool(dev->mt76.dev->of_node, "wakeup-source"))
+ device_init_wakeup(dev->mt76.dev, true);
+
return 0;
err_free_irq:
@@ -386,7 +390,11 @@ static void mt7921_pci_remove(struct pci_dev *pdev)
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+ if (of_property_read_bool(dev->mt76.dev->of_node, "wakeup-source"))
+ device_init_wakeup(dev->mt76.dev, false);
+
mt7921e_unregister_device(dev);
+ set_bit(MT76_REMOVED, &mdev->phy.state);
devm_free_irq(&pdev->dev, pdev->irq, dev);
mt76_free_device(&dev->mt76);
pci_free_irq_vectors(pdev);
@@ -405,10 +413,15 @@ static int mt7921_pci_suspend(struct device *device)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
+ mt7921_roc_abort_sync(dev);
+
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto restore_suspend;
+ wait_event_timeout(dev->wait,
+ !dev->regd_in_progress, 5 * HZ);
+
err = mt76_connac_mcu_set_hif_suspend(mdev, true);
if (err)
goto restore_suspend;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index a9ce1e746b95..004d942ee11a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -216,6 +216,8 @@ static int mt7921s_suspend(struct device *__dev)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
+ mt7921_roc_abort_sync(dev);
+
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto restore_suspend;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
index 8f9b7a2f376c..c4cbc8976046 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
@@ -2,11 +2,61 @@
/* Copyright (C) 2023 MediaTek Inc. */
#include <linux/etherdevice.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/thermal.h>
#include <linux/firmware.h>
#include "mt7925.h"
#include "mac.h"
#include "mcu.h"
+static ssize_t mt7925_thermal_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ switch (to_sensor_dev_attr(attr)->index) {
+ case 0: {
+ struct mt792x_phy *phy = dev_get_drvdata(dev);
+ struct mt792x_dev *mdev = phy->dev;
+ int temperature;
+
+ mt792x_mutex_acquire(mdev);
+ temperature = mt7925_mcu_get_temperature(phy);
+ mt792x_mutex_release(mdev);
+
+ if (temperature < 0)
+ return temperature;
+ /* display in millidegree Celsius */
+ return sprintf(buf, "%u\n", temperature * 1000);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+static SENSOR_DEVICE_ATTR_RO(temp1_input, mt7925_thermal_temp, 0);
+
+static struct attribute *mt7925_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mt7925_hwmon);
+
+static int mt7925_thermal_init(struct mt792x_phy *phy)
+{
+ struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ struct device *hwmon;
+ const char *name;
+
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7925_%s",
+ wiphy_name(wiphy));
+
+ hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy,
+ mt7925_hwmon_groups);
+ return PTR_ERR_OR_ZERO(hwmon);
+}
static void
mt7925_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *req)
@@ -142,6 +192,12 @@ static void mt7925_init_work(struct work_struct *work)
return;
}
+ ret = mt7925_thermal_init(&dev->phy);
+ if (ret) {
+ dev_err(dev->mt76.dev, "thermal init failed\n");
+ return;
+ }
+
/* we support chip reset now */
dev->hw_init_done = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 125a1be3cb64..6179798a8845 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -359,6 +359,7 @@ mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mvif->sta.vif = mvif;
mt76_wcid_init(&mvif->sta.wcid);
mt7925_mac_wtbl_update(dev, idx,
@@ -526,7 +527,7 @@ static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (cmd == SET_KEY && !mvif->mt76.cipher) {
struct mt792x_phy *phy = mt792x_hw_phy(hw);
- mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mvif->mt76.cipher = mt7925_mcu_get_cipher(key->cipher);
mt7925_mcu_add_bss_info(phy, mvif->mt76.ctx, vif, sta, true);
}
@@ -710,7 +711,7 @@ static void mt7925_bss_info_changed(struct ieee80211_hw *hw,
if (slottime != phy->slottime) {
phy->slottime = slottime;
- mt792x_mac_set_timeing(phy);
+ mt7925_mcu_set_timing(phy, vif);
}
}
@@ -1274,6 +1275,25 @@ mt7925_channel_switch_beacon(struct ieee80211_hw *hw,
}
static int
+mt7925_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ static const u8 mq_to_aci[] = {
+ [IEEE80211_AC_VO] = 3,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ };
+
+ /* firmware uses access class index */
+ mvif->queue_params[mq_to_aci[queue]] = *params;
+
+ return 0;
+}
+
+static int
mt7925_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
@@ -1396,7 +1416,7 @@ const struct ieee80211_ops mt7925_ops = {
.add_interface = mt7925_add_interface,
.remove_interface = mt792x_remove_interface,
.config = mt7925_config,
- .conf_tx = mt792x_conf_tx,
+ .conf_tx = mt7925_conf_tx,
.configure_filter = mt7925_configure_filter,
.bss_info_changed = mt7925_bss_info_changed,
.start_ap = mt7925_start_ap,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index c5fd7116929b..bd37cb8d734b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -656,6 +656,42 @@ int mt7925_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl)
return ret;
}
+int mt7925_mcu_get_temperature(struct mt792x_phy *phy)
+{
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 _rsv2[4];
+ } __packed req = {
+ .tag = cpu_to_le16(0x0),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ };
+ struct mt7925_thermal_evt {
+ u8 rsv[4];
+ __le32 temperature;
+ } __packed * evt;
+ struct mt792x_dev *dev = phy->dev;
+ int temperature, ret;
+ struct sk_buff *skb;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_WM_UNI_CMD_QUERY(THERMAL),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ skb_pull(skb, 4 + sizeof(struct tlv));
+ evt = (struct mt7925_thermal_evt *)skb->data;
+
+ temperature = le32_to_cpu(evt->temperature);
+
+ dev_kfree_skb(skb);
+
+ return temperature;
+}
+
static void
mt7925_mcu_parse_phy_cap(struct mt792x_dev *dev, char *data)
{
@@ -814,6 +850,7 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct sta_rec_hdr_trans *hdr_trans;
struct mt76_wcid *wcid;
struct tlv *tlv;
@@ -827,7 +864,11 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
else
hdr_trans->from_ds = true;
- wcid = (struct mt76_wcid *)sta->drv_priv;
+ if (sta)
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ else
+ wcid = &mvif->sta.wcid;
+
if (!wcid)
return;
@@ -895,7 +936,7 @@ int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
e = (struct edca *)tlv;
e->set = WMM_PARAM_SET;
- e->queue = ac + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
+ e->queue = ac;
e->aifs = q->aifs;
e->txop = cpu_to_le16(q->txop);
@@ -921,61 +962,67 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
{
+ struct mt792x_sta *msta = container_of(wcid, struct mt792x_sta, wcid);
struct sta_rec_sec_uni *sec;
+ struct mt792x_vif *mvif = msta->vif;
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
struct tlv *tlv;
- tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
+ sta = msta == &mvif->sta ?
+ NULL :
+ container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V3, sizeof(*sec));
sec = (struct sta_rec_sec_uni *)tlv;
- sec->add = cmd;
+ sec->bss_idx = mvif->mt76.idx;
+ sec->is_authenticator = 0;
+ sec->mgmt_prot = 0;
+ sec->wlan_idx = (u8)wcid->idx;
+
+ if (sta) {
+ sec->tx_key = 1;
+ sec->key_type = 1;
+ memcpy(sec->peer_addr, sta->addr, ETH_ALEN);
+ } else {
+ memcpy(sec->peer_addr, vif->bss_conf.bssid, ETH_ALEN);
+ }
if (cmd == SET_KEY) {
- struct sec_key_uni *sec_key;
u8 cipher;
- cipher = mt76_connac_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
+ sec->add = 1;
+ cipher = mt7925_mcu_get_cipher(key->cipher);
+ if (cipher == CONNAC3_CIPHER_NONE)
return -EOPNOTSUPP;
- sec_key = &sec->key[0];
- sec_key->cipher_len = sizeof(*sec_key);
-
- if (cipher == MCU_CIPHER_BIP_CMAC_128) {
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
- sec_key->key_id = sta_key_conf->keyidx;
- sec_key->key_len = 16;
- memcpy(sec_key->key, sta_key_conf->key, 16);
-
- sec_key = &sec->key[1];
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
- sec->n_cipher = 2;
+ if (cipher == CONNAC3_CIPHER_BIP_CMAC_128) {
+ sec->cipher_id = CONNAC3_CIPHER_BIP_CMAC_128;
+ sec->key_id = sta_key_conf->keyidx;
+ sec->key_len = 32;
+ memcpy(sec->key, sta_key_conf->key, 16);
+ memcpy(sec->key + 16, key->key, 16);
} else {
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = cipher;
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- memcpy(sec_key->key, key->key, key->keylen);
+ sec->cipher_id = cipher;
+ sec->key_id = key->keyidx;
+ sec->key_len = key->keylen;
+ memcpy(sec->key, key->key, key->keylen);
- if (cipher == MCU_CIPHER_TKIP) {
+ if (cipher == CONNAC3_CIPHER_TKIP) {
/* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
+ memcpy(sec->key + 16, key->key + 24, 8);
+ memcpy(sec->key + 24, key->key + 16, 8);
}
/* store key_conf for BIP batch update */
- if (cipher == MCU_CIPHER_AES_CCMP) {
+ if (cipher == CONNAC3_CIPHER_AES_CCMP) {
memcpy(sta_key_conf->key, key->key, key->keylen);
sta_key_conf->keyidx = key->keyidx;
}
-
- sec->n_cipher = 1;
}
} else {
- sec->n_cipher = 0;
+ sec->add = 0;
}
return 0;
@@ -1460,12 +1507,10 @@ mt7925_mcu_sta_phy_tlv(struct sk_buff *skb,
struct tlv *tlv;
u8 af = 0, mm = 0;
- if (!sta->deflink.ht_cap.ht_supported && !sta->deflink.he_6ghz_capa.capa)
- return;
-
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
phy = (struct sta_rec_phy *)tlv;
phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif, chandef->chan->band, sta);
+ phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
if (sta->deflink.ht_cap.ht_supported) {
af = sta->deflink.ht_cap.ampdu_factor;
mm = sta->deflink.ht_cap.ampdu_density;
@@ -1573,8 +1618,6 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
{
struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
- struct wtbl_req_hdr *wtbl_hdr;
- struct tlv *sta_wtbl;
struct sk_buff *skb;
skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid,
@@ -1598,30 +1641,11 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
mt7925_mcu_sta_state_v2_tlv(phy, skb, info->sta,
info->vif, info->rcpi,
info->state);
- mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta);
mt7925_mcu_sta_mld_tlv(skb, info->vif, info->sta);
}
- sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
- sizeof(struct tlv));
-
- wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, info->wcid,
- WTBL_RESET_AND_SET,
- sta_wtbl, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- if (info->enable) {
- mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif,
- info->sta, sta_wtbl,
- wtbl_hdr);
- mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, info->vif, info->wcid,
- sta_wtbl, wtbl_hdr);
- if (info->sta)
- mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
- sta_wtbl, wtbl_hdr,
- true, true);
- }
+ if (info->enable)
+ mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta);
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
}
@@ -2049,9 +2073,9 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
enum nl80211_band band = chandef->chan->band;
struct mt76_connac_bss_basic_tlv *basic_req;
- u8 idx, basic_phy;
struct tlv *tlv;
int conn_type;
+ u8 idx;
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*basic_req));
basic_req = (struct mt76_connac_bss_basic_tlv *)tlv;
@@ -2062,8 +2086,10 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band, sta);
- basic_phy = mt76_connac_get_phy_mode_v2(phy, vif, band, sta);
- basic_req->nonht_basic_phy = cpu_to_le16(basic_phy);
+ if (band == NL80211_BAND_2GHZ)
+ basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_ERP_INDEX);
+ else
+ basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_OFDM_INDEX);
memcpy(basic_req->bssid, vif->bss_conf.bssid, ETH_ALEN);
basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, sta);
@@ -2122,21 +2148,21 @@ mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
sec = (struct bss_sec_tlv *)tlv;
switch (mvif->cipher) {
- case MCU_CIPHER_GCMP_256:
- case MCU_CIPHER_GCMP:
+ case CONNAC3_CIPHER_GCMP_256:
+ case CONNAC3_CIPHER_GCMP:
sec->mode = MODE_WPA3_SAE;
sec->status = 8;
break;
- case MCU_CIPHER_AES_CCMP:
+ case CONNAC3_CIPHER_AES_CCMP:
sec->mode = MODE_WPA2_PSK;
sec->status = 6;
break;
- case MCU_CIPHER_TKIP:
+ case CONNAC3_CIPHER_TKIP:
sec->mode = MODE_WPA2_PSK;
sec->status = 4;
break;
- case MCU_CIPHER_WEP104:
- case MCU_CIPHER_WEP40:
+ case CONNAC3_CIPHER_WEP104:
+ case CONNAC3_CIPHER_WEP40:
sec->mode = MODE_SHARED;
sec->status = 0;
break;
@@ -2167,6 +2193,11 @@ mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy,
bmc = (struct bss_rate_tlv *)tlv;
+ if (band == NL80211_BAND_2GHZ)
+ bmc->basic_rate = cpu_to_le16(HR_DSSS_ERP_BASIC_RATE);
+ else
+ bmc->basic_rate = cpu_to_le16(OFDM_BASIC_RATE);
+
bmc->short_preamble = (band == NL80211_BAND_2GHZ);
bmc->bc_fixed_rate = idx;
bmc->mc_fixed_rate = idx;
@@ -2249,6 +2280,38 @@ mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
vif->bss_conf.he_bss_color.color : 0;
}
+static void
+mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_phy *phy = mvif->phy;
+ struct bss_ifs_time_tlv *ifs_time;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_IFS_TIME, sizeof(*ifs_time));
+ ifs_time = (struct bss_ifs_time_tlv *)tlv;
+ ifs_time->slot_valid = true;
+ ifs_time->slot_time = cpu_to_le16(phy->slottime);
+}
+
+int mt7925_mcu_set_timing(struct mt792x_phy *phy,
+ struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_dev *dev = phy->dev;
+ struct sk_buff *skb;
+
+ skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ MT7925_BSS_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7925_mcu_bss_ifs_tlv(skb, vif);
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD(BSS_INFO_UPDATE), true);
+}
+
int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
struct ieee80211_vif *vif,
@@ -2273,6 +2336,7 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, vif, sta);
mt7925_mcu_bss_qos_tlv(skb, vif);
mt7925_mcu_bss_mld_tlv(skb, vif, sta);
+ mt7925_mcu_bss_ifs_tlv(skb, vif);
if (vif->bss_conf.he_support) {
mt7925_mcu_bss_he_tlv(skb, vif, phy);
@@ -2845,12 +2909,16 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (cmd & __MCU_CMD_FIELD_UNI) {
uni_txd = (struct mt76_connac2_mcu_uni_txd *)txd;
uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
- uni_txd->option = MCU_CMD_UNI_EXT_ACK;
uni_txd->cid = cpu_to_le16(mcu_cmd);
uni_txd->s2d_index = MCU_S2D_H2N;
uni_txd->pkt_type = MCU_PKT_ID;
uni_txd->seq = seq;
+ if (cmd & __MCU_CMD_FIELD_QUERY)
+ uni_txd->option = MCU_CMD_UNI_QUERY_ACK;
+ else
+ uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+
goto exit;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index 3c41e21303b1..2a0bbfe7bfa5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -159,6 +159,20 @@ enum {
UNI_EVENT_SCAN_DONE_NLO = 3,
};
+enum connac3_mcu_cipher_type {
+ CONNAC3_CIPHER_NONE = 0,
+ CONNAC3_CIPHER_WEP40 = 1,
+ CONNAC3_CIPHER_TKIP = 2,
+ CONNAC3_CIPHER_AES_CCMP = 4,
+ CONNAC3_CIPHER_WEP104 = 5,
+ CONNAC3_CIPHER_BIP_CMAC_128 = 6,
+ CONNAC3_CIPHER_WEP128 = 7,
+ CONNAC3_CIPHER_WAPI = 8,
+ CONNAC3_CIPHER_CCMP_256 = 10,
+ CONNAC3_CIPHER_GCMP = 11,
+ CONNAC3_CIPHER_GCMP_256 = 12,
+};
+
struct mt7925_mcu_scan_chinfo_event {
u8 nr_chan;
u8 alpha2[3];
@@ -208,7 +222,7 @@ struct scan_req_tlv {
__le16 channel_dwell_time; /* channel Dwell interval */
__le16 timeout_value;
__le16 probe_delay_time;
- u8 func_mask_ext;
+ __le32 func_mask_ext;
};
struct scan_ssid_tlv {
@@ -334,7 +348,8 @@ struct bss_req_hdr {
struct bss_rate_tlv {
__le16 tag;
__le16 len;
- u8 __rsv1[4];
+ u8 __rsv1[2];
+ __le16 basic_rate;
__le16 bc_trans;
__le16 mc_trans;
u8 short_preamble;
@@ -382,25 +397,22 @@ struct sta_rec_eht {
u8 _rsv2[3];
} __packed;
-struct sec_key_uni {
- __le16 wlan_idx;
- u8 mgmt_prot;
- u8 cipher_id;
- u8 cipher_len;
- u8 key_id;
- u8 key_len;
- u8 need_resp;
- u8 key[32];
-} __packed;
-
struct sta_rec_sec_uni {
__le16 tag;
__le16 len;
u8 add;
- u8 n_cipher;
- u8 rsv[2];
-
- struct sec_key_uni key[2];
+ u8 tx_key;
+ u8 key_type;
+ u8 is_authenticator;
+ u8 peer_addr[6];
+ u8 bss_idx;
+ u8 cipher_id;
+ u8 key_id;
+ u8 key_len;
+ u8 wlan_idx;
+ u8 mgmt_prot;
+ u8 key[32];
+ u8 key_rsc[16];
} __packed;
struct sta_rec_hdr_trans {
@@ -428,6 +440,22 @@ struct sta_rec_mld {
} __packed link[2];
} __packed;
+struct bss_ifs_time_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 slot_valid;
+ u8 sifs_valid;
+ u8 rifs_valid;
+ u8 eifs_valid;
+ __le16 slot_time;
+ __le16 sifs_time;
+ __le16 rifs_time;
+ __le16 eifs_time;
+ u8 eifs_cck_valid;
+ u8 rsv;
+ __le16 eifs_cck_time;
+} __packed;
+
#define MT7925_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct sta_rec_basic) + \
sizeof(struct sta_rec_bf) + \
@@ -440,7 +468,7 @@ struct sta_rec_mld {
sizeof(struct sta_rec_bfee) + \
sizeof(struct sta_rec_phy) + \
sizeof(struct sta_rec_ra) + \
- sizeof(struct sta_rec_sec) + \
+ sizeof(struct sta_rec_sec_uni) + \
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct sta_rec_eht) + \
@@ -455,6 +483,7 @@ struct sta_rec_mld {
sizeof(struct bss_mld_tlv) + \
sizeof(struct bss_info_uni_he) + \
sizeof(struct bss_info_uni_bss_color) + \
+ sizeof(struct bss_ifs_time_tlv) + \
sizeof(struct tlv))
#define MT_CONNAC3_SKU_POWER_LIMIT 449
@@ -509,6 +538,33 @@ struct mt7925_wow_pattern_tlv {
u8 rsv[4];
} __packed;
+static inline enum connac3_mcu_cipher_type
+mt7925_mcu_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return CONNAC3_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return CONNAC3_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return CONNAC3_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return CONNAC3_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return CONNAC3_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return CONNAC3_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return CONNAC3_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return CONNAC3_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return CONNAC3_CIPHER_WAPI;
+ default:
+ return CONNAC3_CIPHER_NONE;
+ }
+}
+
int mt7925_mcu_set_dbdc(struct mt76_phy *phy);
int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req);
@@ -525,6 +581,8 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
int enable);
+int mt7925_mcu_set_timing(struct mt792x_phy *phy,
+ struct ieee80211_vif *vif);
int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable);
int mt7925_mcu_set_channel_domain(struct mt76_phy *phy);
int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index 33785f526acf..8a4a71f6bcb6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -271,6 +271,7 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7925_mcu_config_sniffer(struct mt792x_vif *vif,
struct ieee80211_chanctx_conf *ctx);
+int mt7925_mcu_get_temperature(struct mt792x_phy *phy);
int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 1fd99a856541..07b74d492ce1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -386,6 +386,8 @@ static int mt7925_pci_probe(struct pci_dev *pdev,
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ mt76_rmw_field(dev, MT_HW_EMI_CTL, MT_HW_EMI_CTL_SLPPROT_EN, 1);
+
ret = mt792x_wfsys_reset(dev);
if (ret)
goto err_free_dev;
@@ -425,6 +427,7 @@ static void mt7925_pci_remove(struct pci_dev *pdev)
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
mt7925e_unregister_device(dev);
+ set_bit(MT76_REMOVED, &mdev->phy.state);
devm_free_irq(&pdev->dev, pdev->irq, dev);
mt76_free_device(&dev->mt76);
pci_free_irq_vectors(pdev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index 3c897b34aaa7..a8556de3d480 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -186,6 +186,8 @@ struct mt792x_dev {
bool hw_init_done:1;
bool fw_assert:1;
bool has_eht:1;
+ bool regd_in_progress:1;
+ wait_queue_head_t wait;
struct work_struct init_work;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
index e7afea87e82e..9317f8ff2070 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
@@ -66,13 +66,15 @@ free:
}
/* MTCL : Country List Table for 6G band */
-static void
+static int
mt792x_asar_acpi_read_mtcl(struct mt792x_dev *dev, u8 **table, u8 *version)
{
- if (mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL) < 0)
- *version = 1;
- else
- *version = 2;
+ int ret;
+
+ *version = ((ret = mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL)) < 0)
+ ? 1 : 2;
+
+ return ret;
}
/* MTDS : Dynamic SAR Power Table */
@@ -166,16 +168,16 @@ int mt792x_init_acpi_sar(struct mt792x_dev *dev)
if (!asar)
return -ENOMEM;
- mt792x_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
+ ret = mt792x_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
+ if (ret) {
+ devm_kfree(dev->mt76.dev, asar->countrylist);
+ asar->countrylist = NULL;
+ }
- /* MTDS is mandatory. Return error if table is invalid */
ret = mt792x_asar_acpi_read_mtds(dev, (u8 **)&asar->dyn, asar->ver);
if (ret) {
devm_kfree(dev->mt76.dev, asar->dyn);
- devm_kfree(dev->mt76.dev, asar->countrylist);
- devm_kfree(dev->mt76.dev, asar);
-
- return ret;
+ asar->dyn = NULL;
}
/* MTGS is optional */
@@ -290,7 +292,7 @@ int mt792x_init_acpi_sar_power(struct mt792x_phy *phy, bool set_default)
const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa;
int i;
- if (!phy->acpisar)
+ if (!phy->acpisar || !((struct mt792x_acpi_sar *)phy->acpisar)->dyn)
return 0;
/* When ACPI SAR enabled in HW, we should apply rules for .frp
@@ -353,11 +355,15 @@ static u8
mt792x_acpi_get_mtcl_map(int row, int column, struct mt792x_asar_cl *cl)
{
u8 config = 0;
+ u8 mode_6g, mode_5g9;
+
+ mode_6g = (cl->mode_6g > 0x02) ? 0 : cl->mode_6g;
+ mode_5g9 = (cl->mode_5g9 > 0x01) ? 0 : cl->mode_5g9;
- if (cl->cl6g[row] & BIT(column))
- config |= (cl->mode_6g & 0x3) << 2;
+ if ((cl->cl6g[row] & BIT(column)) || cl->mode_6g == 0x02)
+ config |= (mode_6g & 0x3) << 2;
if (cl->version > 1 && cl->cl5g9[row] & BIT(column))
- config |= (cl->mode_5g9 & 0x3);
+ config |= (mode_5g9 & 0x3);
return config;
}
@@ -374,7 +380,7 @@ u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
"AT", "BE", "BG", "CY", "CZ", "HR", "DK", "EE",
"FI", "FR", "DE", "GR", "HU", "IS", "IE", "IT",
"LV", "LI", "LT", "LU", "MT", "NL", "NO", "PL",
- "PT", "RO", "MT", "SK", "SI", "ES", "CH",
+ "PT", "RO", "SK", "SI", "ES", "SE", "CH",
};
struct mt792x_acpi_sar *sar = phy->acpisar;
struct mt792x_asar_cl *cl;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index c42101aa9e45..a405af8d9052 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -354,6 +354,7 @@ static const char mt792x_gstrings_stats[][ETH_GSTRING_LEN] = {
"v_tx_bw_40",
"v_tx_bw_80",
"v_tx_bw_160",
+ "v_tx_bw_320",
"v_tx_mcs_0",
"v_tx_mcs_1",
"v_tx_mcs_2",
@@ -684,9 +685,10 @@ mt792x_get_mac80211_ops(struct device *dev,
if (!(*fw_features & MT792x_FW_CAP_CNM)) {
ops->remain_on_channel = NULL;
ops->cancel_remain_on_channel = NULL;
- ops->add_chanctx = NULL;
- ops->remove_chanctx = NULL;
- ops->change_chanctx = NULL;
+ ops->add_chanctx = ieee80211_emulate_add_chanctx;
+ ops->remove_chanctx = ieee80211_emulate_remove_chanctx;
+ ops->change_chanctx = ieee80211_emulate_change_chanctx;
+ ops->switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx;
ops->assign_vif_chanctx = NULL;
ops->unassign_vif_chanctx = NULL;
ops->mgd_prepare_tx = NULL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
index 488326ce5ed4..5cc2d59b774a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
@@ -12,6 +12,8 @@ irqreturn_t mt792x_irq_handler(int irq, void *dev_instance)
{
struct mt792x_dev *dev = dev_instance;
+ if (test_bit(MT76_REMOVED, &dev->mt76.phy.state))
+ return IRQ_NONE;
mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
@@ -123,14 +125,13 @@ static void mt792x_dma_prefetch(struct mt792x_dev *dev)
int mt792x_dma_enable(struct mt792x_dev *dev)
{
- if (is_mt7925(&dev->mt76))
- mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28));
-
/* configure perfetch settings */
mt792x_dma_prefetch(dev);
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
+ if (is_mt7925(&dev->mt76))
+ mt76_wr(dev, MT_WFDMA0_RST_DRX_PTR, ~0);
/* configure delay interrupt */
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
@@ -140,12 +141,20 @@ int mt792x_dma_enable(struct mt792x_dev *dev)
MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ FIELD_PREP(MT_WFDMA0_GLO_CFG_DMA_SIZE, 3) |
+ MT_WFDMA0_GLO_CFG_FIFO_DIS_CHECK |
+ MT_WFDMA0_GLO_CFG_RX_WB_DDONE |
MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ if (is_mt7925(&dev->mt76)) {
+ mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28));
+ mt76_set(dev, MT_WFDMA0_INT_RX_PRI, 0x0F00);
+ mt76_set(dev, MT_WFDMA0_INT_TX_PRI, 0x7F00);
+ }
mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
/* enable interrupts for TX/RX rings */
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_regs.h b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
index a99af23e4b56..458cfd0260b1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
@@ -292,9 +292,12 @@
#define MT_WFDMA0_GLO_CFG_TX_DMA_BUSY BIT(1)
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA0_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WFDMA0_GLO_CFG_DMA_SIZE GENMASK(5, 4)
#define MT_WFDMA0_GLO_CFG_TX_WB_DDONE BIT(6)
#define MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL BIT(9)
+#define MT_WFDMA0_GLO_CFG_FIFO_DIS_CHECK BIT(11)
#define MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MT_WFDMA0_GLO_CFG_RX_WB_DDONE BIT(13)
#define MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN BIT(15)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
@@ -322,6 +325,8 @@
#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
#define MT_WFDMA0_RST_DRX_PTR MT_WFDMA0(0x280)
+#define MT_WFDMA0_INT_RX_PRI MT_WFDMA0(0x298)
+#define MT_WFDMA0_INT_TX_PRI MT_WFDMA0(0x29c)
#define MT_WFDMA0_GLO_CFG_EXT0 MT_WFDMA0(0x2b0)
#define MT_WFDMA0_CSR_TX_DMASHDL_ENABLE BIT(6)
#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
@@ -389,6 +394,9 @@
#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
+#define MT_HW_EMI_CTL 0x18011100
+#define MT_HW_EMI_CTL_SLPPROT_EN BIT(1)
+
#define MT_PCIE_MAC_BASE 0x10000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
index 589a3efb9f8c..b49668a4b784 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
@@ -121,44 +121,25 @@ static void mt792xu_uhw_wr(struct mt76_dev *dev, u32 addr, u32 val)
static void mt792xu_dma_prefetch(struct mt792x_dev *dev)
{
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
- MT_WPDMA0_BASE_PTR_MASK, 0x80);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
- MT_WPDMA0_BASE_PTR_MASK, 0xc0);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
- MT_WPDMA0_BASE_PTR_MASK, 0x100);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
- MT_WPDMA0_BASE_PTR_MASK, 0x140);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
- MT_WPDMA0_BASE_PTR_MASK, 0x180);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
- MT_WPDMA0_BASE_PTR_MASK, 0x280);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
- MT_WPDMA0_BASE_PTR_MASK, 0x2c0);
+#define DMA_PREFETCH_CONF(_idx_, _cnt_, _base_) \
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL((_idx_)), \
+ MT_WPDMA0_MAX_CNT_MASK | MT_WPDMA0_BASE_PTR_MASK, \
+ FIELD_PREP(MT_WPDMA0_MAX_CNT_MASK, (_cnt_)) | \
+ FIELD_PREP(MT_WPDMA0_BASE_PTR_MASK, (_base_)))
+
+ DMA_PREFETCH_CONF(0, 4, 0x080);
+ DMA_PREFETCH_CONF(1, 4, 0x0c0);
+ DMA_PREFETCH_CONF(2, 4, 0x100);
+ DMA_PREFETCH_CONF(3, 4, 0x140);
+ DMA_PREFETCH_CONF(4, 4, 0x180);
+ DMA_PREFETCH_CONF(16, 4, 0x280);
+ DMA_PREFETCH_CONF(17, 4, 0x2c0);
}
static void mt792xu_wfdma_init(struct mt792x_dev *dev)
{
+ int i;
+
mt792xu_dma_prefetch(dev);
mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_OMIT_RX_INFO);
@@ -169,10 +150,27 @@ static void mt792xu_wfdma_init(struct mt792x_dev *dev)
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- /* disable dmashdl */
- mt76_clear(dev, MT_UWFDMA0_GLO_CFG_EXT0,
- MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
- mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
+ mt76_rmw(dev, MT_DMASHDL_REFILL, MT_DMASHDL_REFILL_MASK, 0xffe00000);
+ mt76_clear(dev, MT_DMASHDL_PAGE, MT_DMASHDL_GROUP_SEQ_ORDER);
+ mt76_rmw(dev, MT_DMASHDL_PKT_MAX_SIZE,
+ MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 0));
+ for (i = 0; i < 5; i++)
+ mt76_wr(dev, MT_DMASHDL_GROUP_QUOTA(i),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0xfff));
+ for (i = 5; i < 16; i++)
+ mt76_wr(dev, MT_DMASHDL_GROUP_QUOTA(i),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x0) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x0));
+ mt76_wr(dev, MT_DMASHDL_Q_MAP(0), 0x32013201);
+ mt76_wr(dev, MT_DMASHDL_Q_MAP(1), 0x32013201);
+ mt76_wr(dev, MT_DMASHDL_Q_MAP(2), 0x55555444);
+ mt76_wr(dev, MT_DMASHDL_Q_MAP(3), 0x55555444);
+
+ mt76_wr(dev, MT_DMASHDL_SCHED_SET(0), 0x76540132);
+ mt76_wr(dev, MT_DMASHDL_SCHED_SET(1), 0xFEDCBA98);
mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
index 483ad81b6eec..73e633d0d700 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -237,7 +237,8 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
+ MT_WFDMA0_GLO_CFG_EXT_EN);
if (dev->hif2)
mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
@@ -694,7 +695,7 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
mt7996_dma_disable(dev, force);
- mt76_dma_wed_reset(&dev->mt76);
+ mt76_wed_dma_reset(&dev->mt76);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 0cf0d1fe420a..283df84f1b43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -493,7 +493,7 @@ static void mt7996_mac_init_basic_rates(struct mt7996_dev *dev)
void mt7996_mac_init(struct mt7996_dev *dev)
{
-#define HIF_TXD_V2_1 4
+#define HIF_TXD_V2_1 0x21
int i;
mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT);
@@ -507,11 +507,6 @@ void mt7996_mac_init(struct mt7996_dev *dev)
mt76_rmw_field(dev, i, MT_LED_GPIO_SEL_MASK, 4);
}
- /* txs report queue */
- mt76_rmw_field(dev, MT_DMA_TCRF1(0), MT_DMA_TCRF1_QIDX, 0);
- mt76_rmw_field(dev, MT_DMA_TCRF1(1), MT_DMA_TCRF1_QIDX, 6);
- mt76_rmw_field(dev, MT_DMA_TCRF1(2), MT_DMA_TCRF1_QIDX, 0);
-
/* rro module init */
if (is_mt7996(&dev->mt76))
mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2);
@@ -1012,11 +1007,12 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
elem->phy_cap_info[7] |= min_t(int, sts - 1, 2) << 3;
- if (vif != NL80211_IFTYPE_AP)
+ if (!(vif == NL80211_IFTYPE_AP || vif == NL80211_IFTYPE_STATION))
return;
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
- elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+ if (vif == NL80211_IFTYPE_AP)
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 53258488d49f..0384fb059ddf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -732,6 +732,9 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
txwi[2] |= cpu_to_le32(val);
+
+ if (wcid->amsdu)
+ txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU);
}
static void
@@ -862,8 +865,6 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val |= MT_TXD3_PROTECT_FRAME;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
val |= MT_TXD3_NO_ACK;
- if (wcid->amsdu)
- val |= MT_TXD3_HW_AMSDU;
txwi[3] = cpu_to_le32(val);
txwi[4] = 0;
@@ -1188,25 +1189,28 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_tx_info *info;
struct sk_buff_head list;
struct rate_info rate = {};
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
bool cck = false;
u32 txrate, txs, mode, stbc;
txs = le32_to_cpu(txs_data[0]);
mt76_tx_status_lock(mdev, &list);
- skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
- if (skb) {
- info = IEEE80211_SKB_CB(skb);
- if (!(txs & MT_TXS0_ACK_ERROR_MASK))
- info->flags |= IEEE80211_TX_STAT_ACK;
+ /* only report MPDU TXS */
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) {
+ skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
+ if (skb) {
+ info = IEEE80211_SKB_CB(skb);
+ if (!(txs & MT_TXS0_ACK_ERROR_MASK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ampdu_len = 1;
- info->status.ampdu_ack_len =
- !!(info->flags & IEEE80211_TX_STAT_ACK);
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len =
+ !!(info->flags & IEEE80211_TX_STAT_ACK);
- info->status.rates[0].idx = -1;
+ info->status.rates[0].idx = -1;
+ }
}
if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) {
@@ -2527,6 +2531,34 @@ static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
return 0;
}
+static bool
+mt7996_mac_twt_param_equal(struct mt7996_sta *msta,
+ struct ieee80211_twt_params *twt_agrt)
+{
+ u16 type = le16_to_cpu(twt_agrt->req_type);
+ u8 exp;
+ int i;
+
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
+ for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
+ struct mt7996_twt_flow *f;
+
+ if (!(msta->twt.flowid_mask & BIT(i)))
+ continue;
+
+ f = &msta->twt.flow[i];
+ if (f->duration == twt_agrt->min_twt_dur &&
+ f->mantissa == twt_agrt->mantissa &&
+ f->exp == exp &&
+ f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
+ f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
+ f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
+ return true;
+ }
+
+ return false;
+}
+
void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_twt_setup *twt)
@@ -2538,8 +2570,7 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
enum ieee80211_twt_setup_cmd sta_setup_cmd;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_twt_flow *flow;
- int flowid, table_id;
- u8 exp;
+ u8 flowid, table_id, exp;
if (mt7996_mac_check_twt_req(twt))
goto out;
@@ -2552,9 +2583,19 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
goto unlock;
+ if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
+ setup_cmd = TWT_SETUP_CMD_DICTATE;
+ twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR;
+ goto unlock;
+ }
+
+ if (mt7996_mac_twt_param_equal(msta, twt_agrt))
+ goto unlock;
+
flowid = ffs(~msta->twt.flowid_mask) - 1;
- le16p_replace_bits(&twt_agrt->req_type, flowid,
- IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type |= le16_encode_bits(flowid,
+ IEEE80211_TWT_REQTYPE_FLOWID);
table_id = ffs(~dev->twt.table_mask) - 1;
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
@@ -2601,10 +2642,10 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
unlock:
mutex_unlock(&dev->mt76.mutex);
out:
- le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
- IEEE80211_TWT_REQTYPE_SETUP_CMD);
- twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
- (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt_agrt->req_type |=
+ le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED;
}
void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 51deea84b642..f7da8d6dd903 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -350,9 +350,12 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_SMS4:
+ break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- break;
+ if (key->keyidx == 6 || key->keyidx == 7)
+ break;
+ fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
default:
@@ -1450,6 +1453,10 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
#endif
const struct ieee80211_ops mt7996_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7996_tx,
.start = mt7996_start,
.stop = mt7996_stop,
@@ -1495,6 +1502,6 @@ const struct ieee80211_ops mt7996_ops = {
.set_radar_background = mt7996_set_radar_background,
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
.net_fill_forward_path = mt7996_net_fill_forward_path,
- .net_setup_tc = mt76_net_setup_tc,
+ .net_setup_tc = mt76_wed_net_setup_tc,
#endif
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 699be57309c2..b44abe2acc81 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -341,7 +341,7 @@ mt7996_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION)
return;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
static void
@@ -732,13 +732,10 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb)
static struct tlv *
mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len)
{
- struct tlv *ptlv, tlv = {
- .tag = cpu_to_le16(tag),
- .len = cpu_to_le16(len),
- };
+ struct tlv *ptlv = skb_put(skb, len);
- ptlv = skb_put(skb, len);
- memcpy(ptlv, &tlv, sizeof(tlv));
+ ptlv->tag = cpu_to_le16(tag);
+ ptlv->len = cpu_to_le16(len);
return ptlv;
}
@@ -1240,6 +1237,9 @@ mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
static void
mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_vif *vif = container_of((void *)msta->vif,
+ struct ieee80211_vif, drv_priv);
struct ieee80211_eht_mcs_nss_supp *mcs_map;
struct ieee80211_eht_cap_elem_fixed *elem;
struct sta_rec_eht *eht;
@@ -1259,8 +1259,17 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info);
eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
- memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20));
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) {
+ memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz,
+ sizeof(eht->mcs_map_bw20));
+ return;
+ }
+
memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80));
memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160));
memcpy(eht->mcs_map_bw320, &mcs_map->bw._320, sizeof(eht->mcs_map_bw320));
@@ -2510,7 +2519,7 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
info = IEEE80211_SKB_CB(skb);
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
- len = sizeof(*bcn) + MT_TXD_SIZE + skb->len;
+ len = ALIGN(sizeof(*bcn) + MT_TXD_SIZE + skb->len, 4);
tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len);
bcn = (struct bss_bcn_content_tlv *)tlv;
bcn->enable = en;
@@ -2579,8 +2588,7 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
info->band = band;
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
- len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
-
+ len = ALIGN(sizeof(*discov) + MT_TXD_SIZE + skb->len, 4);
tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, len);
discov = (struct bss_inband_discovery_tlv *)tlv;
@@ -3539,7 +3547,7 @@ int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
u32 addr = le32_to_cpu(*(__le32 *)(skb->data + 12));
u8 *buf = (u8 *)dev->mt76.eeprom.data + addr;
- skb_pull(skb, 64);
+ skb_pull(skb, 48);
memcpy(buf, skb->data, MT7996_EEPROM_BLOCK_SIZE);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 36cacc495c75..43468bcaffc6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -800,10 +800,10 @@ enum {
sizeof(struct sta_rec_hdr_trans) + \
sizeof(struct tlv))
-#define MT7996_MAX_BEACON_SIZE 1342
+#define MT7996_MAX_BEACON_SIZE 1338
#define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \
sizeof(struct bss_bcn_content_tlv) + \
- MT_TXD_SIZE + \
+ 4 + MT_TXD_SIZE + \
sizeof(struct bss_bcn_cntdwn_tlv) + \
sizeof(struct bss_bcn_mbss_tlv))
#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 9f2abfa273c9..304e5fd14803 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -140,7 +140,6 @@ static u32 mt7996_reg_map_l1(struct mt7996_dev *dev, u32 addr)
u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
- dev->reg_l1_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1);
dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1,
MT_HIF_REMAP_L1_MASK,
FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
@@ -155,7 +154,6 @@ static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
- dev->reg_l2_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
MT_HIF_REMAP_L2_MASK,
FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
@@ -165,26 +163,10 @@ static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
return MT_HIF_REMAP_BASE_L2 + offset;
}
-static void mt7996_reg_remap_restore(struct mt7996_dev *dev)
-{
- /* remap to ori status */
- if (unlikely(dev->reg_l1_backup)) {
- dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L1, dev->reg_l1_backup);
- dev->reg_l1_backup = 0;
- }
-
- if (dev->reg_l2_backup) {
- dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L2, dev->reg_l2_backup);
- dev->reg_l2_backup = 0;
- }
-}
-
static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
{
int i;
- mt7996_reg_remap_restore(dev);
-
if (addr < 0x100000)
return addr;
@@ -201,6 +183,11 @@ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
return dev->reg.map[i].mapped + ofs;
}
+ return 0;
+}
+
+static u32 __mt7996_reg_remap_addr(struct mt7996_dev *dev, u32 addr)
+{
if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
(addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
(addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
@@ -225,28 +212,60 @@ void mt7996_memcpy_fromio(struct mt7996_dev *dev, void *buf, u32 offset,
{
u32 addr = __mt7996_reg_addr(dev, offset);
- memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ if (addr) {
+ memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ memcpy_fromio(buf, dev->mt76.mmio.regs +
+ __mt7996_reg_remap_addr(dev, offset), len);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7996_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ u32 addr = __mt7996_reg_addr(dev, offset), val;
+
+ if (addr)
+ return dev->bus_ops->rr(mdev, addr);
- return dev->bus_ops->rr(mdev, __mt7996_reg_addr(dev, offset));
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rr(mdev, __mt7996_reg_remap_addr(dev, offset));
+ spin_unlock_bh(&dev->reg_lock);
+
+ return val;
}
static void mt7996_wr(struct mt76_dev *mdev, u32 offset, u32 val)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ u32 addr = __mt7996_reg_addr(dev, offset);
- dev->bus_ops->wr(mdev, __mt7996_reg_addr(dev, offset), val);
+ if (addr) {
+ dev->bus_ops->wr(mdev, addr, val);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ dev->bus_ops->wr(mdev, __mt7996_reg_remap_addr(dev, offset), val);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7996_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ u32 addr = __mt7996_reg_addr(dev, offset);
+
+ if (addr)
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rmw(mdev, __mt7996_reg_remap_addr(dev, offset), mask, val);
+ spin_unlock_bh(&dev->reg_lock);
- return dev->bus_ops->rmw(mdev, __mt7996_reg_addr(dev, offset), mask, val);
+ return val;
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
@@ -391,13 +410,13 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.amsdu_max_len = 1536;
wed->wlan.init_buf = mt7996_wed_init_buf;
- wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf;
- wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf;
- wed->wlan.offload_enable = mt76_mmio_wed_offload_enable;
- wed->wlan.offload_disable = mt76_mmio_wed_offload_disable;
+ wed->wlan.init_rx_buf = mt76_wed_init_rx_buf;
+ wed->wlan.release_rx_buf = mt76_wed_release_rx_buf;
+ wed->wlan.offload_enable = mt76_wed_offload_enable;
+ wed->wlan.offload_disable = mt76_wed_offload_disable;
if (!hif2) {
wed->wlan.reset = mt7996_mmio_wed_reset;
- wed->wlan.reset_complete = mt76_mmio_wed_reset_complete;
+ wed->wlan.reset_complete = mt76_wed_reset_complete;
}
if (mtk_wed_device_attach(wed))
@@ -421,6 +440,7 @@ static int mt7996_mmio_init(struct mt76_dev *mdev,
dev = container_of(mdev, struct mt7996_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
+ spin_lock_init(&dev->reg_lock);
switch (device_id) {
case 0x7990:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index bc73bcb47bf0..36d1f247d55a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -53,6 +53,7 @@
#define MT7996_MAX_TWT_AGRT 16
#define MT7996_MAX_STA_TWT_AGRT 8
+#define MT7996_MIN_TWT_DUR 64
#define MT7996_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 3)
/* NOTE: used to map mt76_rates. idx may change if firmware expands table */
@@ -320,12 +321,11 @@ struct mt7996_dev {
struct rchan *relay_fwlog;
struct {
- u8 table_mask;
+ u16 table_mask;
u8 n_agrt;
} twt;
- u32 reg_l1_backup;
- u32 reg_l2_backup;
+ spinlock_t reg_lock;
u8 wtbl_size_group;
};
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 5a0bcb5071bd..342c3aea549d 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -767,7 +767,7 @@ static void mt76u_status_worker(struct mt76_worker *w)
if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state))
return;
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
q = dev->phy.q_tx[i];
if (!q)
continue;
@@ -872,9 +872,8 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (err < 0)
return err;
- mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
- q->entry[idx].urb, mt76u_complete_tx,
- &q->entry[idx]);
+ mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb,
+ mt76u_complete_tx, &q->entry[idx]);
q->head = (q->head + 1) % q->ndesc;
q->entry[idx].skb = tx_info.skb;
@@ -906,9 +905,13 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
}
}
-static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
+static void
+mt76u_ac_to_hwq(struct mt76_dev *dev, struct mt76_queue *q, u8 qid)
{
- if (mt76_chip(dev) == 0x7663) {
+ u8 ac = qid < IEEE80211_NUM_ACS ? qid : IEEE80211_AC_BE;
+
+ switch (mt76_chip(dev)) {
+ case 0x7663: {
static const u8 lmac_queue_map[] = {
/* ac to lmac mapping */
[IEEE80211_AC_BK] = 0,
@@ -917,33 +920,36 @@ static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
[IEEE80211_AC_VO] = 4,
};
- if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
- return 1; /* BE */
-
- return lmac_queue_map[ac];
+ q->hw_idx = lmac_queue_map[ac];
+ q->ep = q->hw_idx + 1;
+ break;
+ }
+ case 0x7961:
+ case 0x7925:
+ q->hw_idx = mt76_ac_to_hwq(ac);
+ q->ep = qid == MT_TXQ_PSD ? MT_EP_OUT_HCCA : q->hw_idx + 1;
+ break;
+ default:
+ q->hw_idx = mt76_ac_to_hwq(ac);
+ q->ep = q->hw_idx + 1;
+ break;
}
-
- return mt76_ac_to_hwq(ac);
}
static int mt76u_alloc_tx(struct mt76_dev *dev)
{
- struct mt76_queue *q;
- int i, j, err;
+ int i;
for (i = 0; i <= MT_TXQ_PSD; i++) {
- if (i >= IEEE80211_NUM_ACS) {
- dev->phy.q_tx[i] = dev->phy.q_tx[0];
- continue;
- }
+ struct mt76_queue *q;
+ int j, err;
q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
if (!q)
return -ENOMEM;
spin_lock_init(&q->lock);
- q->hw_idx = mt76u_ac_to_hwq(dev, i);
-
+ mt76u_ac_to_hwq(dev, q, i);
dev->phy.q_tx[i] = q;
q->entry = devm_kcalloc(dev->dev,
@@ -969,7 +975,7 @@ static void mt76u_free_tx(struct mt76_dev *dev)
mt76_worker_teardown(&dev->usb.status_worker);
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
struct mt76_queue *q;
int j;
@@ -999,7 +1005,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
dev_err(dev->dev, "timed out waiting for pending tx\n");
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
q = dev->phy.q_tx[i];
if (!q)
continue;
@@ -1013,7 +1019,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
/* On device removal we maight queue skb's, but mt76u_tx_kick()
* will fail to submit urb, cleanup those skb's manually.
*/
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
q = dev->phy.q_tx[i];
if (!q)
continue;
diff --git a/drivers/net/wireless/mediatek/mt76/wed.c b/drivers/net/wireless/mediatek/mt76/wed.c
new file mode 100644
index 000000000000..f89e4537555c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/wed.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2023 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include "mt76.h"
+#include "dma.h"
+
+void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+ int i;
+
+ for (i = 0; i < dev->rx_token_size; i++) {
+ struct mt76_txwi_cache *t;
+
+ t = mt76_rx_token_release(dev, i);
+ if (!t || !t->ptr)
+ continue;
+
+ mt76_put_page_pool_buf(t->ptr, false);
+ t->ptr = NULL;
+
+ mt76_put_rxwi(dev, t);
+ }
+
+ mt76_free_pending_rxwi(dev);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_release_rx_buf);
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+ struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i, len = SKB_WITH_OVERHEAD(q->buf_size);
+ struct mt76_txwi_cache *t = NULL;
+
+ for (i = 0; i < size; i++) {
+ enum dma_data_direction dir;
+ dma_addr_t addr;
+ u32 offset;
+ int token;
+ void *buf;
+
+ t = mt76_get_rxwi(dev);
+ if (!t)
+ goto unmap;
+
+ buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
+ if (!buf)
+ goto unmap;
+
+ addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
+ dir = page_pool_get_dma_dir(q->page_pool);
+ dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
+
+ desc->buf0 = cpu_to_le32(addr);
+ token = mt76_rx_token_consume(dev, buf, t, addr);
+ if (token < 0) {
+ mt76_put_page_pool_buf(buf, false);
+ goto unmap;
+ }
+
+ token = FIELD_PREP(MT_DMA_CTL_TOKEN, token);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32);
+#endif
+ desc->token |= cpu_to_le32(token);
+ desc++;
+ }
+
+ return 0;
+
+unmap:
+ if (t)
+ mt76_put_rxwi(dev, t);
+ mt76_wed_release_rx_buf(wed);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(mt76_wed_init_rx_buf);
+
+int mt76_wed_offload_enable(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ spin_lock_bh(&dev->token_lock);
+ dev->token_size = wed->wlan.token_start;
+ spin_unlock_bh(&dev->token_lock);
+
+ return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_offload_enable);
+
+int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
+{
+ int ret = 0, type, ring;
+ u16 flags;
+
+ if (!q || !q->ndesc)
+ return -EINVAL;
+
+ flags = q->flags;
+ if (!q->wed || !mtk_wed_device_active(q->wed))
+ q->flags &= ~MT_QFLAG_WED;
+
+ if (!(q->flags & MT_QFLAG_WED))
+ return 0;
+
+ type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
+ ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
+
+ switch (type) {
+ case MT76_WED_Q_TX:
+ ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
+ reset);
+ if (!ret)
+ q->wed_regs = q->wed->tx_ring[ring].reg_base;
+ break;
+ case MT76_WED_Q_TXFREE:
+ /* WED txfree queue needs ring to be initialized before setup */
+ q->flags = 0;
+ mt76_dma_queue_reset(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
+
+ ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
+ if (!ret)
+ q->wed_regs = q->wed->txfree_ring.reg_base;
+ break;
+ case MT76_WED_Q_RX:
+ ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
+ reset);
+ if (!ret)
+ q->wed_regs = q->wed->rx_ring[ring].reg_base;
+ break;
+ case MT76_WED_RRO_Q_DATA:
+ q->flags &= ~MT_QFLAG_WED;
+ __mt76_dma_queue_reset(dev, q, false);
+ mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
+ q->head = q->ndesc - 1;
+ q->queued = q->head;
+ break;
+ case MT76_WED_RRO_Q_MSDU_PG:
+ q->flags &= ~MT_QFLAG_WED;
+ __mt76_dma_queue_reset(dev, q, false);
+ mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
+ q->head = q->ndesc - 1;
+ q->queued = q->head;
+ break;
+ case MT76_WED_RRO_Q_IND:
+ q->flags &= ~MT_QFLAG_WED;
+ mt76_dma_queue_reset(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
+ mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ q->flags = flags;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_wed_dma_setup);
+#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
+
+void mt76_wed_offload_disable(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ spin_lock_bh(&dev->token_lock);
+ dev->token_size = dev->drv->token_size;
+ spin_unlock_bh(&dev->token_lock);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_offload_disable);
+
+void mt76_wed_reset_complete(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ complete(&dev->mmio.wed_reset_complete);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_reset_complete);
+
+int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mtk_wed_device *wed = &phy->dev->mmio.wed;
+
+ if (!mtk_wed_device_active(wed))
+ return -EOPNOTSUPP;
+
+ return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_net_setup_tc);
+
+void mt76_wed_dma_reset(struct mt76_dev *dev)
+{
+ struct mt76_mmio *mmio = &dev->mmio;
+
+ if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
+ return;
+
+ complete(&mmio->wed_reset);
+
+ if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
+ dev_err(dev->dev, "wed reset complete timeout\n");
+}
+EXPORT_SYMBOL_GPL(mt76_wed_dma_reset);
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index c8d332456a6b..a7330576486b 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -405,6 +405,10 @@ out:
}
const struct ieee80211_ops mt7601u_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7601u_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = mt7601u_start,
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index ad2509d8c99a..089102ed9ae5 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -356,7 +356,7 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
memcpy(vif->auth.ssid.ssid, sme->ssid, sme->ssid_len);
vif->auth.ssid.ssid_len = sme->ssid_len;
}
- vif->auth.key_mgmt_suite = cpu_to_be32(sme->crypto.akm_suites[0]);
+ vif->auth.key_mgmt_suite = sme->crypto.akm_suites[0];
ether_addr_copy(vif->auth.bssid, sme->bssid);
break;
@@ -1518,7 +1518,7 @@ static struct wilc_vif *wilc_get_vif_from_type(struct wilc *wl, int type)
{
struct wilc_vif *vif;
- list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ wilc_for_each_vif(wl, vif) {
if (vif->iftype == type)
return vif;
}
@@ -1609,7 +1609,6 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
cfg80211_unregister_netdevice(vif->ndev);
vif->monitor_flag = 0;
- wilc_set_operation_mode(vif, 0, 0, 0);
mutex_lock(&wl->vif_mutex);
list_del_rcu(&vif->list);
wl->vif_num--;
@@ -1804,15 +1803,24 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
INIT_LIST_HEAD(&wl->rxq_head.list);
INIT_LIST_HEAD(&wl->vif_list);
+ wl->hif_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ wiphy_name(wl->wiphy));
+ if (!wl->hif_workqueue) {
+ ret = -ENOMEM;
+ goto free_cfg;
+ }
vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE,
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto free_cfg;
+ goto free_hq;
}
return 0;
+free_hq:
+ destroy_workqueue(wl->hif_workqueue);
+
free_cfg:
wilc_wlan_cfg_deinit(wl);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index 839f142663e8..f1085ccb7eed 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -107,7 +107,7 @@ static struct wilc_vif *wilc_get_vif_from_idx(struct wilc *wilc, int idx)
if (index < 0 || index >= WILC_NUM_CONCURRENT_IFC)
return NULL;
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (vif->idx == index)
return vif;
}
@@ -377,38 +377,49 @@ struct wilc_join_bss_param *
wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct cfg80211_crypto_settings *crypto)
{
- struct wilc_join_bss_param *param;
- struct ieee80211_p2p_noa_attr noa_attr;
- u8 rates_len = 0;
- const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
+ const u8 *ies_data, *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie;
+ struct ieee80211_p2p_noa_attr noa_attr;
+ const struct cfg80211_bss_ies *ies;
+ struct wilc_join_bss_param *param;
+ u8 rates_len = 0, ies_len;
int ret;
- const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies);
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
return NULL;
+ rcu_read_lock();
+ ies = rcu_dereference(bss->ies);
+ ies_data = kmemdup(ies->data, ies->len, GFP_ATOMIC);
+ if (!ies_data) {
+ rcu_read_unlock();
+ kfree(param);
+ return NULL;
+ }
+ ies_len = ies->len;
+ rcu_read_unlock();
+
param->beacon_period = cpu_to_le16(bss->beacon_interval);
param->cap_info = cpu_to_le16(bss->capability);
param->bss_type = WILC_FW_BSS_TYPE_INFRA;
param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq);
ether_addr_copy(param->bssid, bss->bssid);
- ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
+ ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies_data, ies_len);
if (ssid_elm) {
if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN)
memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]);
}
- tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len);
+ tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies_data, ies_len);
if (tim_elm && tim_elm[1] >= 2)
param->dtim_period = tim_elm[3];
memset(param->p_suites, 0xFF, 3);
memset(param->akm_suites, 0xFF, 3);
- rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
+ rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies_data, ies_len);
if (rates_ie) {
rates_len = rates_ie[1];
if (rates_len > WILC_MAX_RATES_SUPPORTED)
@@ -419,7 +430,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
if (rates_len < WILC_MAX_RATES_SUPPORTED) {
supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
- ies->data, ies->len);
+ ies_data, ies_len);
if (supp_rates_ie) {
u8 ext_rates = supp_rates_ie[1];
@@ -434,11 +445,11 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
}
- ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
+ ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies_data, ies_len);
if (ht_ie)
param->ht_capable = true;
- ret = cfg80211_get_p2p_attr(ies->data, ies->len,
+ ret = cfg80211_get_p2p_attr(ies_data, ies_len,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *)&noa_attr, sizeof(noa_attr));
if (ret > 0) {
@@ -462,7 +473,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wmm_ie) {
struct ieee80211_wmm_param_ie *ie;
@@ -477,13 +488,13 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wpa_ie) {
param->mode_802_11i = 1;
param->rsn_found = true;
}
- rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len);
+ rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies_data, ies_len);
if (rsn_ie) {
int rsn_ie_len = sizeof(struct element) + rsn_ie[1];
int offset = 8;
@@ -517,6 +528,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
param->akm_suites[i] = crypto->akm_suites[i] & 0xFF;
}
+ kfree(ies_data);
return (void *)param;
}
@@ -1555,26 +1567,28 @@ int wilc_deinit(struct wilc_vif *vif)
void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- int result;
- struct host_if_msg *msg;
- int id;
struct host_if_drv *hif_drv;
+ struct host_if_msg *msg;
struct wilc_vif *vif;
+ int srcu_idx;
+ int result;
+ int id;
id = get_unaligned_le32(&buffer[length - 4]);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
- return;
- hif_drv = vif->hif_drv;
+ goto out;
+ hif_drv = vif->hif_drv;
if (!hif_drv) {
netdev_err(vif->ndev, "driver not init[%p]\n", hif_drv);
- return;
+ goto out;
}
msg = wilc_alloc_work(vif, handle_rcvd_ntwrk_info, false);
if (IS_ERR(msg))
- return;
+ goto out;
msg->body.net_info.frame_len = get_unaligned_le16(&buffer[6]) - 1;
msg->body.net_info.rssi = buffer[8];
@@ -1583,7 +1597,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
GFP_KERNEL);
if (!msg->body.net_info.mgmt) {
kfree(msg);
- return;
+ goto out;
}
result = wilc_enqueue_work(msg);
@@ -1592,43 +1606,41 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg->body.net_info.mgmt);
kfree(msg);
}
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- int result;
- struct host_if_msg *msg;
- int id;
struct host_if_drv *hif_drv;
+ struct host_if_msg *msg;
struct wilc_vif *vif;
+ int srcu_idx;
+ int result;
+ int id;
mutex_lock(&wilc->deinit_lock);
id = get_unaligned_le32(&buffer[length - 4]);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
- if (!vif) {
- mutex_unlock(&wilc->deinit_lock);
- return;
- }
+ if (!vif)
+ goto out;
hif_drv = vif->hif_drv;
if (!hif_drv) {
- mutex_unlock(&wilc->deinit_lock);
- return;
+ goto out;
}
if (!hif_drv->conn_info.conn_result) {
netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
- mutex_unlock(&wilc->deinit_lock);
- return;
+ goto out;
}
msg = wilc_alloc_work(vif, handle_rcvd_gnrl_async_info, false);
- if (IS_ERR(msg)) {
- mutex_unlock(&wilc->deinit_lock);
- return;
- }
+ if (IS_ERR(msg))
+ goto out;
msg->body.mac_info.status = buffer[7];
result = wilc_enqueue_work(msg);
@@ -1636,32 +1648,36 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
kfree(msg);
}
-
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
mutex_unlock(&wilc->deinit_lock);
}
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- int result;
- int id;
struct host_if_drv *hif_drv;
struct wilc_vif *vif;
+ int srcu_idx;
+ int result;
+ int id;
id = get_unaligned_le32(&buffer[length - 4]);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
- return;
- hif_drv = vif->hif_drv;
+ goto out;
- if (!hif_drv)
- return;
+ hif_drv = vif->hif_drv;
+ if (!hif_drv) {
+ goto out;
+ }
if (hif_drv->usr_scan_req.scan_result) {
struct host_if_msg *msg;
msg = wilc_alloc_work(vif, handle_scan_complete, false);
if (IS_ERR(msg))
- return;
+ goto out;
result = wilc_enqueue_work(msg);
if (result) {
@@ -1670,6 +1686,8 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg);
}
}
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan,
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 81e8f25863f5..710e29bea560 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -96,7 +96,7 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
struct wilc_vif *vif;
struct ieee80211_hdr *h = (struct ieee80211_hdr *)mac_header;
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (vif->iftype == WILC_STATION_MODE)
if (ether_addr_equal_unaligned(h->addr2, vif->bssid)) {
ndev = vif->ndev;
@@ -132,7 +132,7 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (!is_zero_ether_addr(vif->bssid))
ret_val++;
}
@@ -140,6 +140,19 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
return ret_val;
}
+static void wilc_wake_tx_queues(struct wilc *wl)
+{
+ int srcu_idx;
+ struct wilc_vif *ifc;
+
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ wilc_for_each_vif(wl, ifc) {
+ if (ifc->mac_opened && netif_queue_stopped(ifc->ndev))
+ netif_wake_queue(ifc->ndev);
+ }
+ srcu_read_unlock(&wl->srcu, srcu_idx);
+}
+
static int wilc_txq_task(void *vp)
{
int ret;
@@ -160,17 +173,7 @@ static int wilc_txq_task(void *vp)
do {
ret = wilc_wlan_handle_txq(wl, &txq_count);
if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
- int srcu_idx;
- struct wilc_vif *ifc;
-
- srcu_idx = srcu_read_lock(&wl->srcu);
- list_for_each_entry_rcu(ifc, &wl->vif_list,
- list) {
- if (ifc->mac_opened &&
- netif_queue_stopped(ifc->ndev))
- netif_wake_queue(ifc->ndev);
- }
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ wilc_wake_tx_queues(wl);
}
if (ret != WILC_VMM_ENTRY_FULL_RETRY)
break;
@@ -284,7 +287,7 @@ static int wilc_init_fw_config(struct net_device *dev, struct wilc_vif *vif)
if (!wilc_wlan_cfg_set(vif, 0, WID_11G_OPERATING_MODE, &b, 1, 0, 0))
goto fail;
- b = WILC_FW_PREAMBLE_SHORT;
+ b = WILC_FW_PREAMBLE_AUTO;
if (!wilc_wlan_cfg_set(vif, 0, WID_PREAMBLE, &b, 1, 0, 0))
goto fail;
@@ -416,7 +419,7 @@ static int wilc_init_fw_config(struct net_device *dev, struct wilc_vif *vif)
b = 1;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_IMMEDIATE_BA_ENABLED, &b, 1,
- 1, 1))
+ 1, 0))
goto fail;
return 0;
@@ -665,7 +668,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
/* Verify MAC Address is not already in use: */
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, tmp_vif) {
wilc_get_mac_address(tmp_vif, mac_addr);
if (ether_addr_equal(addr->sa_data, mac_addr)) {
if (vif != tmp_vif) {
@@ -768,7 +771,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (vif->mac_opened)
netif_stop_queue(vif->ndev);
}
@@ -811,19 +814,21 @@ static int wilc_mac_close(struct net_device *ndev)
void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
u32 pkt_offset)
{
- unsigned int frame_len = 0;
- int stats;
unsigned char *buff_to_send = NULL;
- struct sk_buff *skb;
struct net_device *wilc_netdev;
+ unsigned int frame_len = 0;
struct wilc_vif *vif;
+ struct sk_buff *skb;
+ int srcu_idx;
+ int stats;
if (!wilc)
return;
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_netdev = get_if_handler(wilc, buff);
if (!wilc_netdev)
- return;
+ goto out;
buff += pkt_offset;
vif = netdev_priv(wilc_netdev);
@@ -834,7 +839,7 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
skb = dev_alloc_skb(frame_len);
if (!skb)
- return;
+ goto out;
skb->dev = wilc_netdev;
@@ -847,6 +852,8 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
stats = netif_rx(skb);
netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats);
}
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
@@ -855,7 +862,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff;
u16 type = le16_to_cpup((__le16 *)buff);
u32 type_bit = BIT(type >> 4);
@@ -890,8 +897,7 @@ static const struct net_device_ops wilc_netdev_ops = {
void wilc_netdev_cleanup(struct wilc *wilc)
{
- struct wilc_vif *vif;
- int srcu_idx, ifc_cnt = 0;
+ struct wilc_vif *vif, *vif_tmp;
if (!wilc)
return;
@@ -901,32 +907,19 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc->firmware = NULL;
}
- srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ list_for_each_entry_safe(vif, vif_tmp, &wilc->vif_list, list) {
+ mutex_lock(&wilc->vif_mutex);
+ list_del_rcu(&vif->list);
+ wilc->vif_num--;
+ mutex_unlock(&wilc->vif_mutex);
+ synchronize_srcu(&wilc->srcu);
if (vif->ndev)
unregister_netdev(vif->ndev);
}
- srcu_read_unlock(&wilc->srcu, srcu_idx);
wilc_wfi_deinit_mon_interface(wilc, false);
destroy_workqueue(wilc->hif_workqueue);
- while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) {
- mutex_lock(&wilc->vif_mutex);
- if (wilc->vif_num <= 0) {
- mutex_unlock(&wilc->vif_mutex);
- break;
- }
- vif = wilc_get_wl_to_vif(wilc);
- if (!IS_ERR(vif))
- list_del_rcu(&vif->list);
-
- wilc->vif_num--;
- mutex_unlock(&wilc->vif_mutex);
- synchronize_srcu(&wilc->srcu);
- ifc_cnt++;
- }
-
wilc_wlan_cfg_deinit(wilc);
wlan_deinit_locks(wilc);
wiphy_unregister(wilc->wiphy);
@@ -941,7 +934,7 @@ static u8 wilc_get_available_idx(struct wilc *wl)
int srcu_idx;
srcu_idx = srcu_read_lock(&wl->srcu);
- list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ wilc_for_each_vif(wl, vif) {
if (vif->idx == 0)
idx = 1;
else
@@ -989,13 +982,6 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
goto error;
}
- wl->hif_workqueue = alloc_ordered_workqueue("%s-wq", WQ_MEM_RECLAIM,
- ndev->name);
- if (!wl->hif_workqueue) {
- ret = -ENOMEM;
- goto unregister_netdev;
- }
-
ndev->needs_free_netdev = true;
vif->iftype = vif_type;
vif->idx = wilc_get_available_idx(wl);
@@ -1008,12 +994,11 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
return vif;
-unregister_netdev:
+error:
if (rtnl_locked)
cfg80211_unregister_netdevice(ndev);
else
unregister_netdev(ndev);
- error:
free_netdev(ndev);
return ERR_PTR(ret);
}
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index aafe3dc44ac6..5937d6d45695 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -13,6 +13,7 @@
#include <net/ieee80211_radiotap.h>
#include <linux/if_arp.h>
#include <linux/gpio/consumer.h>
+#include <linux/rculist.h>
#include "hif.h"
#include "wlan.h"
@@ -29,6 +30,11 @@
#define TX_BACKOFF_WEIGHT_MS 1
+#define wilc_for_each_vif(w, v) \
+ struct wilc *_w = w; \
+ list_for_each_entry_srcu(v, &_w->vif_list, list, \
+ srcu_read_lock_held(&_w->srcu))
+
struct wilc_wfi_stats {
unsigned long rx_packets;
unsigned long tx_packets;
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 1d8b241ce43c..61c3572ce321 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(enable_crc16,
#define WILC_SPI_RSP_HDR_EXTRA_DATA 8
struct wilc_spi {
- bool isinit; /* true if SPI protocol has been configured */
+ bool isinit; /* true if wilc_spi_init was successful */
bool probing_crc; /* true if we're probing chip's CRC config */
bool crc7_enabled; /* true if crc7 is currently enabled */
bool crc16_enabled; /* true if crc16 is currently enabled */
@@ -55,6 +55,8 @@ struct wilc_spi {
static const struct wilc_hif_func wilc_hif_spi;
static int wilc_spi_reset(struct wilc *wilc);
+static int wilc_spi_configure_bus_protocol(struct wilc *wilc);
+static int wilc_validate_chipid(struct wilc *wilc);
/********************************************
*
@@ -192,11 +194,11 @@ static void wilc_wlan_power(struct wilc *wilc, bool on)
/* assert ENABLE: */
gpiod_set_value(gpios->enable, 1);
mdelay(5);
- /* assert RESET: */
- gpiod_set_value(gpios->reset, 1);
- } else {
/* deassert RESET: */
gpiod_set_value(gpios->reset, 0);
+ } else {
+ /* assert RESET: */
+ gpiod_set_value(gpios->reset, 1);
/* deassert ENABLE: */
gpiod_set_value(gpios->enable, 0);
}
@@ -232,8 +234,27 @@ static int wilc_bus_probe(struct spi_device *spi)
}
clk_prepare_enable(wilc->rtc_clk);
+ dev_info(&spi->dev, "Selected CRC config: crc7=%s, crc16=%s\n",
+ enable_crc7 ? "on" : "off", enable_crc16 ? "on" : "off");
+
+ /* we need power to configure the bus protocol and to read the chip id: */
+
+ wilc_wlan_power(wilc, true);
+
+ ret = wilc_spi_configure_bus_protocol(wilc);
+ if (ret)
+ goto power_down;
+
+ ret = wilc_validate_chipid(wilc);
+ if (ret)
+ goto power_down;
+
+ wilc_wlan_power(wilc, false);
return 0;
+power_down:
+ clk_disable_unprepare(wilc->rtc_clk);
+ wilc_wlan_power(wilc, false);
netdev_cleanup:
wilc_netdev_cleanup(wilc);
free:
@@ -301,7 +322,6 @@ static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
- msg.spi = spi;
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
@@ -344,7 +364,6 @@ static int wilc_spi_rx(struct wilc *wilc, u8 *rb, u32 rlen)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
- msg.spi = spi;
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
@@ -382,8 +401,6 @@ static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
- msg.spi = spi;
-
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
if (ret < 0)
@@ -477,7 +494,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
********************************************/
static u8 wilc_get_crc7(u8 *buffer, u32 len)
{
- return crc7_be(0xfe, buffer, len);
+ return crc7_be(0xfe, buffer, len) | 0x01;
}
static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
@@ -1106,26 +1123,34 @@ static int wilc_spi_deinit(struct wilc *wilc)
static int wilc_spi_init(struct wilc *wilc, bool resume)
{
- struct spi_device *spi = to_spi_device(wilc->dev);
struct wilc_spi *spi_priv = wilc->bus_data;
- u32 reg;
- u32 chipid;
- int ret, i;
+ int ret;
if (spi_priv->isinit) {
/* Confirm we can read chipid register without error: */
- ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid);
- if (ret == 0)
+ if (wilc_validate_chipid(wilc) == 0)
return 0;
-
- dev_err(&spi->dev, "Fail cmd read chip id...\n");
}
wilc_wlan_power(wilc, true);
- /*
- * configure protocol
- */
+ ret = wilc_spi_configure_bus_protocol(wilc);
+ if (ret) {
+ wilc_wlan_power(wilc, false);
+ return ret;
+ }
+
+ spi_priv->isinit = true;
+
+ return 0;
+}
+
+static int wilc_spi_configure_bus_protocol(struct wilc *wilc)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ u32 reg;
+ int ret, i;
/*
* Infer the CRC settings that are currently in effect. This
@@ -1177,6 +1202,15 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
spi_priv->probing_crc = false;
+ return 0;
+}
+
+static int wilc_validate_chipid(struct wilc *wilc)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ u32 chipid;
+ int ret;
+
/*
* make sure can read chip id without protocol error
*/
@@ -1185,9 +1219,10 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
dev_err(&spi->dev, "Fail cmd read chip id...\n");
return ret;
}
-
- spi_priv->isinit = true;
-
+ if (!is_wilc1000(chipid)) {
+ dev_err(&spi->dev, "Unknown chip id 0x%x\n", chipid);
+ return -ENODEV;
+ }
return 0;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 9eb115c79c90..a9e872a7b2c3 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -12,11 +12,6 @@
#define WAKE_UP_TRIAL_RETRY 10000
-static inline bool is_wilc1000(u32 id)
-{
- return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID;
-}
-
static inline void acquire_bus(struct wilc *wilc, enum bus_acquire acquire)
{
mutex_lock(&wilc->hif_cs);
@@ -730,7 +725,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
mutex_lock(&wilc->txq_add_to_head_cs);
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list)
+ wilc_for_each_vif(wilc, vif)
wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev);
srcu_read_unlock(&wilc->srcu, srcu_idx);
@@ -1198,27 +1193,32 @@ int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif)
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
- if (ret) {
- netdev_err(vif->ndev, "Error while reading reg\n");
+ ret = wilc->hif_func->hif_read_reg(wilc, GLOBAL_MODE_CONTROL, &reg);
+ if (ret)
goto release;
- }
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
- (reg | WILC_ABORT_REQ_BIT));
- if (ret) {
- netdev_err(vif->ndev, "Error while writing reg\n");
+ reg &= ~WILC_GLOBAL_MODE_ENABLE_WIFI;
+ ret = wilc->hif_func->hif_write_reg(wilc, GLOBAL_MODE_CONTROL, reg);
+ if (ret)
goto release;
- }
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_FW_HOST_COMM, &reg);
+ ret = wilc->hif_func->hif_read_reg(wilc, PWR_SEQ_MISC_CTRL, &reg);
+ if (ret)
+ goto release;
+
+ reg &= ~WILC_PWR_SEQ_ENABLE_WIFI_SLEEP;
+ ret = wilc->hif_func->hif_write_reg(wilc, PWR_SEQ_MISC_CTRL, reg);
+ if (ret)
+ goto release;
+
+ ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
if (ret) {
netdev_err(vif->ndev, "Error while reading reg\n");
goto release;
}
- reg = BIT(0);
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_FW_HOST_COMM, reg);
+ ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
+ (reg | WILC_ABORT_REQ_BIT));
if (ret) {
netdev_err(vif->ndev, "Error while writing reg\n");
goto release;
@@ -1410,7 +1410,7 @@ static int init_chip(struct net_device *dev)
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
- acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
chipid = wilc_get_chipid(wilc, true);
@@ -1440,7 +1440,7 @@ static int init_chip(struct net_device *dev)
}
release:
- release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
return ret;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index a72cd5cac81d..54643d8fef04 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -156,6 +156,12 @@
#define WILC_GP_REG_0 0x149c
#define WILC_GP_REG_1 0x14a0
+#define GLOBAL_MODE_CONTROL 0x1614
+#define PWR_SEQ_MISC_CTRL 0x3008
+
+#define WILC_GLOBAL_MODE_ENABLE_WIFI BIT(0)
+#define WILC_PWR_SEQ_ENABLE_WIFI_SLEEP BIT(28)
+
#define WILC_HAVE_SDIO_IRQ_GPIO BIT(0)
#define WILC_HAVE_USE_PMU BIT(1)
#define WILC_HAVE_SLEEP_CLK_SRC_RTC BIT(2)
@@ -403,6 +409,11 @@ struct wilc_cfg_rsp {
struct wilc_vif;
+static inline bool is_wilc1000(u32 id)
+{
+ return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID;
+}
+
int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
u32 buffer_size);
int wilc_wlan_start(struct wilc *wilc);
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
index 506d2f31efb5..641f847d47ab 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -7,7 +7,6 @@
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/usb.h>
-#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <net/ieee80211_radiotap.h>
@@ -685,6 +684,10 @@ static int plfxlc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
}
static const struct ieee80211_ops plfxlc_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = plfxlc_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = plfxlc_op_start,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 3b283e93a13e..76b07db284f8 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -478,7 +478,7 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
continue;
wiphy_lock(priv_to_wiphy(vif->mac));
- cfg80211_ch_switch_notify(vif->netdev, &chandef, 0, 0);
+ cfg80211_ch_switch_notify(vif->netdev, &chandef, 0);
wiphy_unlock(priv_to_wiphy(vif->mac));
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index 13dd672b825e..42e21e9f303b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1705,6 +1705,10 @@ static int rt2400pci_tx_last_beacon(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops rt2400pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index ecddda4c471e..36ddc5a69fa4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -2003,6 +2003,10 @@ static int rt2500pci_tx_last_beacon(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops rt2500pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
index 13fdcff0ad66..09923765e2db 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
@@ -1794,6 +1794,10 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
}
static const struct ieee80211_ops rt2500usb_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index aaf31857ae1e..3bb81bcff0ac 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -10946,13 +10946,13 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
/* Apparently the data is read from end to start */
reg = rt2800_register_read_lock(rt2x00dev, efuse_data3_reg);
/* The returned value is in CPU order, but eeprom is le */
- *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+ *(__le32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
reg = rt2800_register_read_lock(rt2x00dev, efuse_data2_reg);
- *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
+ *(__le32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
reg = rt2800_register_read_lock(rt2x00dev, efuse_data1_reg);
- *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
+ *(__le32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
reg = rt2800_register_read_lock(rt2x00dev, efuse_data0_reg);
- *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
+ *(__le32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
mutex_unlock(&rt2x00dev->csr_mutex);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index dcb56f708a5f..14c45aba836f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -287,6 +287,10 @@ static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
}
static const struct ieee80211_ops rt2800pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
index 7118d4f9038d..701ba54bf3e5 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
@@ -132,6 +132,10 @@ static int rt2800soc_write_firmware(struct rt2x00_dev *rt2x00dev,
}
static const struct ieee80211_ops rt2800soc_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index b2a8e75a901b..160bef79acdb 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -629,6 +629,10 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
}
static const struct ieee80211_ops rt2800usb_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c b/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
index ad95f9eba301..1000fbfb94b8 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
@@ -197,10 +197,7 @@ void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
transfer += header_length;
} else {
skb_push(skb, iv_len + align);
- if (align < icv_len)
- skb_put(skb, icv_len - align);
- else if (align > icv_len)
- skb_trim(skb, rxdesc->size + iv_len + icv_len);
+ skb_put(skb, icv_len - align);
/* Move ieee80211 header */
memmove(skb->data + transfer,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 483723bf514b..d1cd5694e3c7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -2872,6 +2872,10 @@ static u64 rt61pci_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops rt61pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index dfa9d5213898..b79dda952a33 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -2291,6 +2291,10 @@ static u64 rt73usb_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops rt73usb_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index f6c25a52b69a..77b6cb7e1f6b 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -1607,6 +1607,10 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
}
static const struct ieee80211_ops rtl8180_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtl8180_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rtl8180_start,
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index 04945f905d6d..78d99afa373d 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1377,6 +1377,10 @@ static int rtl8187_conf_tx(struct ieee80211_hw *dev,
static const struct ieee80211_ops rtl8187_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtl8187_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rtl8187_start,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 4695fb4e2d2d..fd92d23c43d9 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -6,6 +6,7 @@
*/
#include <asm/byteorder.h>
+#include <linux/average.h>
#define RTL8XXXU_DEBUG_REG_WRITE 0x01
#define RTL8XXXU_DEBUG_REG_READ 0x02
@@ -498,6 +499,7 @@ struct rtl8xxxu_txdesc40 {
#define DESC_RATE_ID_SHIFT 16
#define DESC_RATE_ID_MASK 0xf
#define TXDESC_NAVUSEHDR BIT(20)
+#define TXDESC_EN_DESC_ID BIT(21)
#define TXDESC_SEC_RC4 0x00400000
#define TXDESC_SEC_AES 0x00c00000
#define TXDESC_PKT_OFFSET_SHIFT 26
@@ -1774,6 +1776,8 @@ struct rtl8xxxu_cfo_tracking {
#define RTL8XXXU_HW_LED_CONTROL 2
#define RTL8XXXU_MAX_MAC_ID_NUM 128
#define RTL8XXXU_BC_MC_MACID 0
+#define RTL8XXXU_BC_MC_MACID1 1
+#define RTL8XXXU_MAX_SEC_CAM_NUM 64
struct rtl8xxxu_priv {
struct ieee80211_hw *hw;
@@ -1855,6 +1859,8 @@ struct rtl8xxxu_priv {
int next_mbox;
int nr_out_eps;
+ /* Ensure no added or deleted stas while iterating */
+ struct mutex sta_mutex;
struct mutex h2c_mutex;
/* Protect the indirect register accesses of RTL8710BU. */
struct mutex syson_indirect_access_mutex;
@@ -1889,18 +1895,14 @@ struct rtl8xxxu_priv {
u8 pi_enabled:1;
u8 no_pape:1;
u8 int_buf[USB_INTR_CONTENT_LENGTH];
- u8 rssi_level;
DECLARE_BITMAP(tx_aggr_started, IEEE80211_NUM_TIDS);
DECLARE_BITMAP(tid_tx_operational, IEEE80211_NUM_TIDS);
- /*
- * Only one virtual interface permitted because only STA mode
- * is supported and no iface_combinations are provided.
- */
- struct ieee80211_vif *vif;
+
+ struct ieee80211_vif *vifs[2];
struct delayed_work ra_watchdog;
struct work_struct c2hcmd_work;
struct sk_buff_head c2hcmd_queue;
- struct work_struct update_beacon_work;
+ struct delayed_work update_beacon_work;
struct rtl8xxxu_btcoex bt_coex;
struct rtl8xxxu_ra_report ra_report;
struct rtl8xxxu_cfo_tracking cfo_tracking;
@@ -1910,13 +1912,23 @@ struct rtl8xxxu_priv {
char led_name[32];
struct led_classdev led_cdev;
DECLARE_BITMAP(mac_id_map, RTL8XXXU_MAX_MAC_ID_NUM);
+ DECLARE_BITMAP(cam_map, RTL8XXXU_MAX_SEC_CAM_NUM);
};
+DECLARE_EWMA(rssi, 10, 16);
+
struct rtl8xxxu_sta_info {
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
u8 macid;
+ struct ewma_rssi avg_rssi;
+ u8 rssi_level;
+};
+
+struct rtl8xxxu_vif {
+ int port_num;
+ u8 hw_key_idx;
};
struct rtl8xxxu_rx_urb {
@@ -1986,11 +1998,13 @@ struct rtl8xxxu_fileops {
u8 init_reg_rxfltmap:1;
u8 init_reg_pkt_life_time:1;
u8 init_reg_hmtfr:1;
+ u8 supports_concurrent:1;
u8 ampdu_max_time;
u8 ustime_tsf_edca;
u16 max_aggr_num;
u8 supports_ap:1;
u16 max_macid_num;
+ u16 max_sec_cam_num;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
index 6d0f975f891b..afe9cc1b49dc 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
@@ -1699,7 +1699,7 @@ void rtl8188e_handle_ra_tx_report2(struct rtl8xxxu_priv *priv, struct sk_buff *s
/* We only use macid 0, so only the first item is relevant.
* AP mode will use more of them if it's ever implemented.
*/
- if (!priv->vif || priv->vif->type == NL80211_IFTYPE_STATION)
+ if (!priv->vifs[0] || priv->vifs[0]->type == NL80211_IFTYPE_STATION)
items = 1;
for (macid = 0; macid < items; macid++) {
@@ -1882,6 +1882,7 @@ struct rtl8xxxu_fileops rtl8188eu_fops = {
.has_tx_report = 1,
.init_reg_pkt_life_time = 1,
.gen2_thermal_meter = 1,
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
/*
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
index 1e1c8fa194cb..464216d007ce 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
@@ -1751,6 +1751,8 @@ struct rtl8xxxu_fileops rtl8188fu_fops = {
.max_aggr_num = 0x0c14,
.supports_ap = 1,
.max_macid_num = 16,
+ .max_sec_cam_num = 16,
+ .supports_concurrent = 1,
.adda_1t_init = 0x03c00014,
.adda_1t_path_on = 0x03c00014,
.trxff_boundary = 0x3f7f,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
index b30a9a513cb8..3ee7d8f87da6 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
@@ -613,6 +613,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
.adda_2t_path_on_a = 0x04db25a4,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index 47bcaec6f2db..63b73ace27ec 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1769,6 +1769,7 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.needs_full_init = 1,
.supports_ap = 1,
.max_macid_num = 128,
+ .max_sec_cam_num = 64,
.adda_1t_init = 0x0fc01616,
.adda_1t_path_on = 0x0fc01616,
.adda_2t_path_on_a = 0x0fc01616,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
index 28e93835e05a..21e4204769d0 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
@@ -2014,26 +2014,40 @@ static int rtl8192fu_led_brightness_set(struct led_classdev *led_cdev,
struct rtl8xxxu_priv *priv = container_of(led_cdev,
struct rtl8xxxu_priv,
led_cdev);
- u16 ledcfg;
+ u32 ledcfg;
/* Values obtained by observing the USB traffic from the Windows driver. */
rtl8xxxu_write32(priv, REG_SW_GPIO_SHARE_CTRL_0, 0x20080);
rtl8xxxu_write32(priv, REG_SW_GPIO_SHARE_CTRL_1, 0x1b0000);
- ledcfg = rtl8xxxu_read16(priv, REG_LEDCFG0);
+ ledcfg = rtl8xxxu_read32(priv, REG_LEDCFG0);
+
+ /* Comfast CF-826F uses LED1. Asus USB-N13 C1 uses LED0. Set both. */
+
+ u32p_replace_bits(&ledcfg, LED_GPIO_ENABLE, LEDCFG0_LED2EN);
+ u32p_replace_bits(&ledcfg, LED_IO_MODE_OUTPUT, LEDCFG0_LED0_IO_MODE);
+ u32p_replace_bits(&ledcfg, LED_IO_MODE_OUTPUT, LEDCFG0_LED1_IO_MODE);
if (brightness == LED_OFF) {
- /* Value obtained like above. */
- ledcfg = BIT(1) | BIT(7);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED0CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED0SV);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED1CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED1SV);
} else if (brightness == LED_ON) {
- /* Value obtained like above. */
- ledcfg = BIT(1) | BIT(7) | BIT(11);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED0CM);
+ u32p_replace_bits(&ledcfg, LED_SW_ON, LEDCFG0_LED0SV);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED1CM);
+ u32p_replace_bits(&ledcfg, LED_SW_ON, LEDCFG0_LED1SV);
} else if (brightness == RTL8XXXU_HW_LED_CONTROL) {
- /* Value obtained by brute force. */
- ledcfg = BIT(8) | BIT(9);
+ u32p_replace_bits(&ledcfg, LED_MODE_TX_OR_RX_EVENTS,
+ LEDCFG0_LED0CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED0SV);
+ u32p_replace_bits(&ledcfg, LED_MODE_TX_OR_RX_EVENTS,
+ LEDCFG0_LED1CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED1SV);
}
- rtl8xxxu_write16(priv, REG_LEDCFG0, ledcfg);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, ledcfg);
return 0;
}
@@ -2081,6 +2095,7 @@ struct rtl8xxxu_fileops rtl8192fu_fops = {
.max_aggr_num = 0x1f1f,
.supports_ap = 1,
.max_macid_num = 128,
+ .max_sec_cam_num = 64,
.trxff_boundary = 0x3f3f,
.pbp_rx = PBP_PAGE_SIZE_256,
.pbp_tx = PBP_PAGE_SIZE_256,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
index 871b8cca8a18..46d57510e9fc 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
@@ -1877,6 +1877,7 @@ struct rtl8xxxu_fileops rtl8710bu_fops = {
.max_aggr_num = 0x0c14,
.supports_ap = 1,
.max_macid_num = 16,
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x03c00016,
.adda_1t_path_on = 0x03c00016,
.trxff_boundary = 0x3f7f,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
index 15a30e496221..ad1bb9377ca2 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
@@ -510,6 +510,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
.adda_2t_path_on_a = 0x04db25a4,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 954369ed6226..9640c841d20a 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1744,6 +1744,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.max_aggr_num = 0x0c14,
.supports_ap = 1,
.max_macid_num = 128,
+ .max_sec_cam_num = 64,
.adda_1t_init = 0x01c00014,
.adda_1t_path_on = 0x01c00014,
.adda_2t_path_on_a = 0x01c00014,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 180907319e8c..4a49f8f9d80f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1633,33 +1633,41 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
}
static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv,
- enum nl80211_iftype linktype)
+ enum nl80211_iftype linktype, int port_num)
{
- u8 val8;
-
- val8 = rtl8xxxu_read8(priv, REG_MSR);
- val8 &= ~MSR_LINKTYPE_MASK;
+ u8 val8, type;
switch (linktype) {
case NL80211_IFTYPE_UNSPECIFIED:
- val8 |= MSR_LINKTYPE_NONE;
+ type = MSR_LINKTYPE_NONE;
break;
case NL80211_IFTYPE_ADHOC:
- val8 |= MSR_LINKTYPE_ADHOC;
+ type = MSR_LINKTYPE_ADHOC;
break;
case NL80211_IFTYPE_STATION:
- val8 |= MSR_LINKTYPE_STATION;
+ type = MSR_LINKTYPE_STATION;
break;
case NL80211_IFTYPE_AP:
- val8 |= MSR_LINKTYPE_AP;
+ type = MSR_LINKTYPE_AP;
break;
default:
- goto out;
+ return;
+ }
+
+ switch (port_num) {
+ case 0:
+ val8 = rtl8xxxu_read8(priv, REG_MSR) & 0x0c;
+ val8 |= type;
+ break;
+ case 1:
+ val8 = rtl8xxxu_read8(priv, REG_MSR) & 0x03;
+ val8 |= type << 2;
+ break;
+ default:
+ return;
}
rtl8xxxu_write8(priv, REG_MSR, val8);
-out:
- return;
}
static void
@@ -3572,27 +3580,47 @@ void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
}
-static int rtl8xxxu_set_mac(struct rtl8xxxu_priv *priv)
+static int rtl8xxxu_set_mac(struct rtl8xxxu_priv *priv, int port_num)
{
int i;
u16 reg;
- reg = REG_MACID;
+ switch (port_num) {
+ case 0:
+ reg = REG_MACID;
+ break;
+ case 1:
+ reg = REG_MACID1;
+ break;
+ default:
+ WARN_ONCE(1, "%s: invalid port_num\n", __func__);
+ return -EINVAL;
+ }
for (i = 0; i < ETH_ALEN; i++)
- rtl8xxxu_write8(priv, reg + i, priv->mac_addr[i]);
+ rtl8xxxu_write8(priv, reg + i, priv->vifs[port_num]->addr[i]);
return 0;
}
-static int rtl8xxxu_set_bssid(struct rtl8xxxu_priv *priv, const u8 *bssid)
+static int rtl8xxxu_set_bssid(struct rtl8xxxu_priv *priv, const u8 *bssid, int port_num)
{
int i;
u16 reg;
dev_dbg(&priv->udev->dev, "%s: (%pM)\n", __func__, bssid);
- reg = REG_BSSID;
+ switch (port_num) {
+ case 0:
+ reg = REG_BSSID;
+ break;
+ case 1:
+ reg = REG_BSSID1;
+ break;
+ default:
+ WARN_ONCE(1, "%s: invalid port_num\n", __func__);
+ return -EINVAL;
+ }
for (i = 0; i < ETH_ALEN; i++)
rtl8xxxu_write8(priv, reg + i, bssid[i]);
@@ -4025,10 +4053,13 @@ static inline u8 rtl8xxxu_get_macid(struct rtl8xxxu_priv *priv,
{
struct rtl8xxxu_sta_info *sta_info;
- if (!priv->vif || priv->vif->type == NL80211_IFTYPE_STATION || !sta)
+ if (!sta)
return 0;
sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ if (!sta_info)
+ return 0;
+
return sta_info->macid;
}
@@ -4235,9 +4266,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
}
- rtl8xxxu_set_mac(priv);
- rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION);
-
/*
* Configure initial WMAC settings
*/
@@ -4511,6 +4539,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8188e_ra_info_init_all(&priv->ra_info);
set_bit(RTL8XXXU_BC_MC_MACID, priv->mac_id_map);
+ set_bit(RTL8XXXU_BC_MC_MACID1, priv->mac_id_map);
exit:
return ret;
@@ -4530,8 +4559,10 @@ static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
* This is a bit of a hack - the lower bits of the cipher
* suite selector happens to match the cipher index in the CAM
*/
- addr = key->keyidx << CAM_CMD_KEY_SHIFT;
+ addr = key->hw_key_idx << CAM_CMD_KEY_SHIFT;
ctrl = (key->cipher & 0x0f) << 2 | key->keyidx | CAM_WRITE_VALID;
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ ctrl |= BIT(6);
for (j = 5; j >= 0; j--) {
switch (j) {
@@ -4574,7 +4605,7 @@ static int rtl8xxxu_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
{
struct rtl8xxxu_priv *priv = hw->priv;
- schedule_work(&priv->update_beacon_work);
+ schedule_delayed_work(&priv->update_beacon_work, 0);
return 0;
}
@@ -4839,10 +4870,9 @@ static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
dev_dbg(&priv->udev->dev, "%s: rates %08x\n", __func__, rate_cfg);
- while (rate_cfg) {
- rate_cfg = (rate_cfg >> 1);
- rate_idx++;
- }
+ if (rate_cfg)
+ rate_idx = __fls(rate_cfg);
+
rtl8xxxu_write8(priv, REG_INIRTS_RATE_SEL, rate_idx);
}
@@ -4888,14 +4918,20 @@ static void rtl8xxxu_set_aifs(struct rtl8xxxu_priv *priv, u8 slot_time)
u8 aifs, aifsn, sifs;
int i;
- if (priv->vif) {
+ for (i = 0; i < ARRAY_SIZE(priv->vifs); i++) {
struct ieee80211_sta *sta;
+ if (!priv->vifs[i])
+ continue;
+
rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, priv->vif->bss_conf.bssid);
+ sta = ieee80211_find_sta(priv->vifs[i], priv->vifs[i]->bss_conf.bssid);
if (sta)
wireless_mode = rtl8xxxu_wireless_mode(priv->hw, sta);
rcu_read_unlock();
+
+ if (wireless_mode)
+ break;
}
if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ ||
@@ -4952,19 +4988,21 @@ static void
rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u64 changed)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
+ struct rtl8xxxu_sta_info *sta_info;
struct ieee80211_sta *sta;
struct rtl8xxxu_ra_report *rarpt;
+ u8 val8, macid;
u32 val32;
- u8 val8;
rarpt = &priv->ra_report;
if (changed & BSS_CHANGED_ASSOC) {
dev_dbg(dev, "Changed ASSOC: %i!\n", vif->cfg.assoc);
- rtl8xxxu_set_linktype(priv, vif->type);
+ rtl8xxxu_set_linktype(priv, vif->type, rtlvif->port_num);
if (vif->cfg.assoc) {
u32 ramask;
@@ -4980,6 +5018,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rcu_read_unlock();
goto error;
}
+ macid = rtl8xxxu_get_macid(priv, sta);
if (sta->deflink.ht_cap.ht_supported)
dev_info(dev, "%s: HT supported\n", __func__);
@@ -5000,19 +5039,20 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
bw = RATE_INFO_BW_40;
else
bw = RATE_INFO_BW_20;
+
+ sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ sta_info->rssi_level = RTL8XXXU_RATR_STA_INIT;
rcu_read_unlock();
rtl8xxxu_update_ra_report(rarpt, highest_rate, sgi, bw);
- priv->vif = vif;
- priv->rssi_level = RTL8XXXU_RATR_STA_INIT;
-
priv->fops->update_rate_mask(priv, ramask, 0, sgi,
- bw == RATE_INFO_BW_40, 0);
+ bw == RATE_INFO_BW_40, macid);
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
- rtl8xxxu_stop_tx_beacon(priv);
+ if (rtlvif->port_num == 0)
+ rtl8xxxu_stop_tx_beacon(priv);
/* joinbss sequence */
rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
@@ -5054,7 +5094,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & BSS_CHANGED_BSSID) {
dev_dbg(dev, "Changed BSSID!\n");
- rtl8xxxu_set_bssid(priv, bss_conf->bssid);
+ rtl8xxxu_set_bssid(priv, bss_conf->bssid, rtlvif->port_num);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
@@ -5070,7 +5110,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (changed & BSS_CHANGED_BEACON)
- schedule_work(&priv->update_beacon_work);
+ schedule_delayed_work(&priv->update_beacon_work, 0);
error:
return;
@@ -5079,11 +5119,12 @@ error:
static int rtl8xxxu_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
dev_dbg(dev, "Start AP mode\n");
- rtl8xxxu_set_bssid(priv, vif->bss_conf.bssid);
+ rtl8xxxu_set_bssid(priv, vif->bss_conf.bssid, rtlvif->port_num);
rtl8xxxu_write16(priv, REG_BCN_INTERVAL, vif->bss_conf.beacon_int);
priv->fops->report_connect(priv, RTL8XXXU_BC_MC_MACID, 0, true);
@@ -5509,13 +5550,14 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct rtl8xxxu_tx_urb *tx_urb;
struct ieee80211_sta *sta = NULL;
struct ieee80211_vif *vif = tx_info->control.vif;
+ struct rtl8xxxu_vif *rtlvif = vif ? (struct rtl8xxxu_vif *)vif->drv_priv : NULL;
struct device *dev = &priv->udev->dev;
u32 queue, rts_rate;
u16 pktlen = skb->len;
int tx_desc_size = priv->fops->tx_desc_size;
u8 macid;
int ret;
- bool ampdu_enable, sgi = false, short_preamble = false;
+ bool ampdu_enable, sgi = false, short_preamble = false, bmc = false;
if (skb_headroom(skb) < tx_desc_size) {
dev_warn(dev,
@@ -5557,10 +5599,14 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
tx_desc->txdw0 =
TXDESC_OWN | TXDESC_FIRST_SEGMENT | TXDESC_LAST_SEGMENT;
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
- is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
tx_desc->txdw0 |= TXDESC_BROADMULTICAST;
+ bmc = true;
+ }
+
tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
+ macid = rtl8xxxu_get_macid(priv, sta);
if (tx_info->control.hw_key) {
switch (tx_info->control.hw_key->cipher) {
@@ -5575,6 +5621,10 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
default:
break;
}
+ if (bmc && rtlvif && rtlvif->hw_key_idx != 0xff) {
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC_EN_DESC_ID);
+ macid = rtlvif->hw_key_idx;
+ }
}
/* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */
@@ -5618,7 +5668,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
else
rts_rate = 0;
- macid = rtl8xxxu_get_macid(priv, sta);
priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble,
ampdu_enable, rts_rate, macid);
@@ -5680,18 +5729,44 @@ static void rtl8xxxu_send_beacon_frame(struct ieee80211_hw *hw,
static void rtl8xxxu_update_beacon_work_callback(struct work_struct *work)
{
struct rtl8xxxu_priv *priv =
- container_of(work, struct rtl8xxxu_priv, update_beacon_work);
+ container_of(work, struct rtl8xxxu_priv, update_beacon_work.work);
struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_vif *vif = priv->vif;
+ struct ieee80211_vif *vif = priv->vifs[0];
if (!vif) {
WARN_ONCE(true, "no vif to update beacon\n");
return;
}
+ if (vif->bss_conf.csa_active) {
+ if (ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
+ ieee80211_csa_finish(vif, 0);
+ return;
+ }
+ schedule_delayed_work(&priv->update_beacon_work,
+ msecs_to_jiffies(vif->bss_conf.beacon_int));
+ }
rtl8xxxu_send_beacon_frame(hw, vif);
}
+static inline bool rtl8xxxu_is_packet_match_bssid(struct rtl8xxxu_priv *priv,
+ struct ieee80211_hdr *hdr,
+ int port_num)
+{
+ return priv->vifs[port_num] &&
+ priv->vifs[port_num]->type == NL80211_IFTYPE_STATION &&
+ priv->vifs[port_num]->cfg.assoc &&
+ ether_addr_equal(priv->vifs[port_num]->bss_conf.bssid, hdr->addr2);
+}
+
+static inline bool rtl8xxxu_is_sta_sta(struct rtl8xxxu_priv *priv)
+{
+ return (priv->vifs[0] && priv->vifs[0]->cfg.assoc &&
+ priv->vifs[0]->type == NL80211_IFTYPE_STATION) &&
+ (priv->vifs[1] && priv->vifs[1]->cfg.assoc &&
+ priv->vifs[1]->type == NL80211_IFTYPE_STATION);
+}
+
void rtl8723au_rx_parse_phystats(struct rtl8xxxu_priv *priv,
struct ieee80211_rx_status *rx_status,
struct rtl8723au_phy_stats *phy_stats,
@@ -5708,12 +5783,11 @@ void rtl8723au_rx_parse_phystats(struct rtl8xxxu_priv *priv,
rx_status->signal = priv->fops->cck_rssi(priv, phy_stats);
} else {
bool parse_cfo = priv->fops->set_crystal_cap &&
- priv->vif &&
- priv->vif->type == NL80211_IFTYPE_STATION &&
- priv->vif->cfg.assoc &&
!crc_icv_err &&
!ieee80211_is_ctl(hdr->frame_control) &&
- ether_addr_equal(priv->vif->bss_conf.bssid, hdr->addr2);
+ !rtl8xxxu_is_sta_sta(priv) &&
+ (rtl8xxxu_is_packet_match_bssid(priv, hdr, 0) ||
+ rtl8xxxu_is_packet_match_bssid(priv, hdr, 1));
if (parse_cfo) {
priv->cfo_tracking.cfo_tail[0] = phy_stats->path_cfotail[0];
@@ -5748,12 +5822,11 @@ static void jaguar2_rx_parse_phystats_type1(struct rtl8xxxu_priv *priv,
bool crc_icv_err)
{
bool parse_cfo = priv->fops->set_crystal_cap &&
- priv->vif &&
- priv->vif->type == NL80211_IFTYPE_STATION &&
- priv->vif->cfg.assoc &&
!crc_icv_err &&
!ieee80211_is_ctl(hdr->frame_control) &&
- ether_addr_equal(priv->vif->bss_conf.bssid, hdr->addr2);
+ !rtl8xxxu_is_sta_sta(priv) &&
+ (rtl8xxxu_is_packet_match_bssid(priv, hdr, 0) ||
+ rtl8xxxu_is_packet_match_bssid(priv, hdr, 1));
u8 pwdb_max = 0;
int rx_path;
@@ -6029,18 +6102,20 @@ void rtl8723bu_update_bt_link_info(struct rtl8xxxu_priv *priv, u8 bt_info)
btcoex->bt_busy = false;
}
+static inline bool rtl8xxxu_is_assoc(struct rtl8xxxu_priv *priv)
+{
+ return (priv->vifs[0] && priv->vifs[0]->cfg.assoc) ||
+ (priv->vifs[1] && priv->vifs[1]->cfg.assoc);
+}
+
static
void rtl8723bu_handle_bt_inquiry(struct rtl8xxxu_priv *priv)
{
- struct ieee80211_vif *vif;
struct rtl8xxxu_btcoex *btcoex;
- bool wifi_connected;
- vif = priv->vif;
btcoex = &priv->bt_coex;
- wifi_connected = (vif && vif->cfg.assoc);
- if (!wifi_connected) {
+ if (!rtl8xxxu_is_assoc(priv)) {
rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
rtl8723bu_set_coex_with_type(priv, 0);
} else if (btcoex->has_sco || btcoex->has_hid || btcoex->has_a2dp) {
@@ -6058,15 +6133,11 @@ void rtl8723bu_handle_bt_inquiry(struct rtl8xxxu_priv *priv)
static
void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv)
{
- struct ieee80211_vif *vif;
struct rtl8xxxu_btcoex *btcoex;
- bool wifi_connected;
- vif = priv->vif;
btcoex = &priv->bt_coex;
- wifi_connected = (vif && vif->cfg.assoc);
- if (wifi_connected) {
+ if (rtl8xxxu_is_assoc(priv)) {
u32 val32 = 0;
u32 high_prio_tx = 0, high_prio_rx = 0;
@@ -6249,6 +6320,76 @@ static void rtl8188e_c2hcmd_callback(struct work_struct *work)
}
}
+#define rtl8xxxu_iterate_vifs_atomic(priv, iterator, data) \
+ ieee80211_iterate_active_interfaces_atomic((priv)->hw, \
+ IEEE80211_IFACE_ITER_NORMAL, iterator, data)
+
+struct rtl8xxxu_rx_update_rssi_data {
+ struct rtl8xxxu_priv *priv;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_rx_status *rx_status;
+ u8 *bssid;
+};
+
+static void rtl8xxxu_rx_update_rssi_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtl8xxxu_rx_update_rssi_data *iter_data = data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr = iter_data->hdr;
+ struct rtl8xxxu_priv *priv = iter_data->priv;
+ struct rtl8xxxu_sta_info *sta_info;
+ struct ieee80211_rx_status *rx_status = iter_data->rx_status;
+ u8 *bssid = iter_data->bssid;
+
+ if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
+ return;
+
+ if (!(ether_addr_equal(vif->addr, hdr->addr1) ||
+ ieee80211_is_beacon(hdr->frame_control)))
+ return;
+
+ sta = ieee80211_find_sta_by_ifaddr(priv->hw, hdr->addr2,
+ vif->addr);
+ if (!sta)
+ return;
+
+ sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ ewma_rssi_add(&sta_info->avg_rssi, -rx_status->signal);
+}
+
+static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
+{
+ __le16 fc = hdr->frame_control;
+ u8 *bssid;
+
+ if (ieee80211_has_tods(fc))
+ bssid = hdr->addr1;
+ else if (ieee80211_has_fromds(fc))
+ bssid = hdr->addr2;
+ else
+ bssid = hdr->addr3;
+
+ return bssid;
+}
+
+static void rtl8xxxu_rx_update_rssi(struct rtl8xxxu_priv *priv,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_hdr *hdr)
+{
+ struct rtl8xxxu_rx_update_rssi_data data = {};
+
+ if (ieee80211_is_ctl(hdr->frame_control))
+ return;
+
+ data.priv = priv;
+ data.hdr = hdr;
+ data.rx_status = rx_status;
+ data.bssid = get_hdr_bssid(hdr);
+
+ rtl8xxxu_iterate_vifs_atomic(priv, rtl8xxxu_rx_update_rssi_iter, &data);
+}
+
int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
{
struct ieee80211_hw *hw = priv->hw;
@@ -6308,18 +6449,26 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
skb_queue_tail(&priv->c2hcmd_queue, skb);
schedule_work(&priv->c2hcmd_work);
} else {
+ struct ieee80211_hdr *hdr;
+
phy_stats = (struct rtl8723au_phy_stats *)skb->data;
skb_pull(skb, drvinfo_sz + desc_shift);
skb_trim(skb, pkt_len);
- if (rx_desc->phy_stats)
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (rx_desc->phy_stats) {
priv->fops->parse_phystats(
priv, rx_status, phy_stats,
rx_desc->rxmcs,
- (struct ieee80211_hdr *)skb->data,
+ hdr,
rx_desc->crc32 || rx_desc->icverr);
+ if (!rx_desc->crc32 && !rx_desc->icverr)
+ rtl8xxxu_rx_update_rssi(priv,
+ rx_status,
+ hdr);
+ }
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
@@ -6416,10 +6565,15 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
} else {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- if (rx_desc->phy_stats)
+ if (rx_desc->phy_stats) {
priv->fops->parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs, hdr,
rx_desc->crc32 || rx_desc->icverr);
+ if (!rx_desc->crc32 && !rx_desc->icverr)
+ rtl8xxxu_rx_update_rssi(priv,
+ rx_status,
+ hdr);
+ }
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
@@ -6563,29 +6717,123 @@ error:
return ret;
}
+static void rtl8xxxu_switch_ports(struct rtl8xxxu_priv *priv)
+{
+ u8 macid[ETH_ALEN], bssid[ETH_ALEN], macid_1[ETH_ALEN], bssid_1[ETH_ALEN];
+ u8 msr, bcn_ctrl, bcn_ctrl_1, atimwnd[2], atimwnd_1[2];
+ struct rtl8xxxu_vif *rtlvif;
+ struct ieee80211_vif *vif;
+ u8 tsftr[8], tsftr_1[8];
+ int i;
+
+ msr = rtl8xxxu_read8(priv, REG_MSR);
+ bcn_ctrl = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+ bcn_ctrl_1 = rtl8xxxu_read8(priv, REG_BEACON_CTRL_1);
+
+ for (i = 0; i < ARRAY_SIZE(atimwnd); i++)
+ atimwnd[i] = rtl8xxxu_read8(priv, REG_ATIMWND + i);
+ for (i = 0; i < ARRAY_SIZE(atimwnd_1); i++)
+ atimwnd_1[i] = rtl8xxxu_read8(priv, REG_ATIMWND_1 + i);
+
+ for (i = 0; i < ARRAY_SIZE(tsftr); i++)
+ tsftr[i] = rtl8xxxu_read8(priv, REG_TSFTR + i);
+ for (i = 0; i < ARRAY_SIZE(tsftr); i++)
+ tsftr_1[i] = rtl8xxxu_read8(priv, REG_TSFTR1 + i);
+
+ for (i = 0; i < ARRAY_SIZE(macid); i++)
+ macid[i] = rtl8xxxu_read8(priv, REG_MACID + i);
+
+ for (i = 0; i < ARRAY_SIZE(bssid); i++)
+ bssid[i] = rtl8xxxu_read8(priv, REG_BSSID + i);
+
+ for (i = 0; i < ARRAY_SIZE(macid_1); i++)
+ macid_1[i] = rtl8xxxu_read8(priv, REG_MACID1 + i);
+
+ for (i = 0; i < ARRAY_SIZE(bssid_1); i++)
+ bssid_1[i] = rtl8xxxu_read8(priv, REG_BSSID1 + i);
+
+ /* disable bcn function, disable update TSF */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, (bcn_ctrl &
+ (~BEACON_FUNCTION_ENABLE)) | BEACON_DISABLE_TSF_UPDATE);
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, (bcn_ctrl_1 &
+ (~BEACON_FUNCTION_ENABLE)) | BEACON_DISABLE_TSF_UPDATE);
+
+ /* switch msr */
+ msr = (msr & 0xf0) | ((msr & 0x03) << 2) | ((msr & 0x0c) >> 2);
+ rtl8xxxu_write8(priv, REG_MSR, msr);
+
+ /* write port0 */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, bcn_ctrl_1 & ~BEACON_FUNCTION_ENABLE);
+ for (i = 0; i < ARRAY_SIZE(atimwnd_1); i++)
+ rtl8xxxu_write8(priv, REG_ATIMWND + i, atimwnd_1[i]);
+ for (i = 0; i < ARRAY_SIZE(tsftr_1); i++)
+ rtl8xxxu_write8(priv, REG_TSFTR + i, tsftr_1[i]);
+ for (i = 0; i < ARRAY_SIZE(macid_1); i++)
+ rtl8xxxu_write8(priv, REG_MACID + i, macid_1[i]);
+ for (i = 0; i < ARRAY_SIZE(bssid_1); i++)
+ rtl8xxxu_write8(priv, REG_BSSID + i, bssid_1[i]);
+
+ /* write port1 */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, bcn_ctrl & ~BEACON_FUNCTION_ENABLE);
+ for (i = 0; i < ARRAY_SIZE(atimwnd); i++)
+ rtl8xxxu_write8(priv, REG_ATIMWND_1 + i, atimwnd[i]);
+ for (i = 0; i < ARRAY_SIZE(tsftr); i++)
+ rtl8xxxu_write8(priv, REG_TSFTR1 + i, tsftr[i]);
+ for (i = 0; i < ARRAY_SIZE(macid); i++)
+ rtl8xxxu_write8(priv, REG_MACID1 + i, macid[i]);
+ for (i = 0; i < ARRAY_SIZE(bssid); i++)
+ rtl8xxxu_write8(priv, REG_BSSID1 + i, bssid[i]);
+
+ /* write bcn ctl */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, bcn_ctrl_1);
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, bcn_ctrl);
+
+ vif = priv->vifs[0];
+ priv->vifs[0] = priv->vifs[1];
+ priv->vifs[1] = vif;
+
+ /* priv->vifs[0] is NULL here, based on how this function is currently
+ * called from rtl8xxxu_add_interface().
+ * When this function will be used in the future for a different
+ * scenario, please check whether vifs[0] or vifs[1] can be NULL and if
+ * necessary add code to set port_num = 1.
+ */
+ rtlvif = (struct rtl8xxxu_vif *)priv->vifs[1]->drv_priv;
+ rtlvif->port_num = 1;
+}
+
static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
- int ret;
+ int port_num;
u8 val8;
- if (!priv->vif)
- priv->vif = vif;
+ if (!priv->vifs[0])
+ port_num = 0;
+ else if (!priv->vifs[1])
+ port_num = 1;
else
return -EOPNOTSUPP;
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- rtl8xxxu_stop_tx_beacon(priv);
+ if (port_num == 0) {
+ rtl8xxxu_stop_tx_beacon(priv);
- val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
- val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
- BEACON_DISABLE_TSF_UPDATE;
- rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
- ret = 0;
+ val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+ val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
+ BEACON_DISABLE_TSF_UPDATE;
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+ }
break;
case NL80211_IFTYPE_AP:
+ if (port_num == 1) {
+ rtl8xxxu_switch_ports(priv);
+ port_num = 0;
+ }
+
rtl8xxxu_write8(priv, REG_BEACON_CTRL,
BEACON_DISABLE_TSF_UPDATE | BEACON_CTRL_MBSSID);
rtl8xxxu_write8(priv, REG_ATIMWND, 0x0c); /* 12ms */
@@ -6602,29 +6850,31 @@ static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
val8 = rtl8xxxu_read8(priv, REG_CCK_CHECK);
val8 &= ~BIT_BCN_PORT_SEL;
rtl8xxxu_write8(priv, REG_CCK_CHECK, val8);
-
- ret = 0;
break;
default:
- ret = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
- rtl8xxxu_set_linktype(priv, vif->type);
+ priv->vifs[port_num] = vif;
+ rtlvif->port_num = port_num;
+ rtlvif->hw_key_idx = 0xff;
+
+ rtl8xxxu_set_linktype(priv, vif->type, port_num);
ether_addr_copy(priv->mac_addr, vif->addr);
- rtl8xxxu_set_mac(priv);
+ rtl8xxxu_set_mac(priv, port_num);
- return ret;
+ return 0;
}
static void rtl8xxxu_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
dev_dbg(&priv->udev->dev, "%s\n", __func__);
- if (priv->vif)
- priv->vif = NULL;
+ priv->vifs[rtlvif->port_num] = NULL;
}
static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
@@ -6746,8 +6996,8 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
else
rcr |= RCR_CHECK_BSSID_BEACON | RCR_CHECK_BSSID_MATCH;
- if (priv->vif && priv->vif->type == NL80211_IFTYPE_AP)
- rcr &= ~RCR_CHECK_BSSID_MATCH;
+ if (priv->vifs[0] && priv->vifs[0]->type == NL80211_IFTYPE_AP)
+ rcr &= ~(RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON);
if (*total_flags & FIF_CONTROL)
rcr |= RCR_ACCEPT_CTRL_FRAME;
@@ -6784,11 +7034,19 @@ static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts)
return 0;
}
+static int rtl8xxxu_get_free_sec_cam(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ return find_first_zero_bit(priv->cam_map, priv->fops->max_sec_cam_num);
+}
+
static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
u8 mac_addr[ETH_ALEN];
@@ -6800,9 +7058,6 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
dev_dbg(dev, "%s: cmd %02x, cipher %08x, index %i\n",
__func__, cmd, key->cipher, key->keyidx);
- if (vif->type != NL80211_IFTYPE_STATION)
- return -EOPNOTSUPP;
-
if (key->keyidx > 3)
return -EOPNOTSUPP;
@@ -6826,7 +7081,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ether_addr_copy(mac_addr, sta->addr);
} else {
dev_dbg(dev, "%s: group key\n", __func__);
- eth_broadcast_addr(mac_addr);
+ ether_addr_copy(mac_addr, vif->bss_conf.bssid);
}
val16 = rtl8xxxu_read16(priv, REG_CR);
@@ -6840,16 +7095,28 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
- key->hw_key_idx = key->keyidx;
+
+ retval = rtl8xxxu_get_free_sec_cam(hw);
+ if (retval < 0)
+ return -EOPNOTSUPP;
+
+ key->hw_key_idx = retval;
+
+ if (vif->type == NL80211_IFTYPE_AP && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ rtlvif->hw_key_idx = key->hw_key_idx;
+
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
rtl8xxxu_cam_write(priv, key, mac_addr);
+ set_bit(key->hw_key_idx, priv->cam_map);
retval = 0;
break;
case DISABLE_KEY:
rtl8xxxu_write32(priv, REG_CAM_WRITE, 0x00000000);
val32 = CAM_CMD_POLLING | CAM_CMD_WRITE |
- key->keyidx << CAM_CMD_KEY_SHIFT;
+ key->hw_key_idx << CAM_CMD_KEY_SHIFT;
rtl8xxxu_write32(priv, REG_CAM_CMD, val32);
+ rtlvif->hw_key_idx = 0xff;
+ clear_bit(key->hw_key_idx, priv->cam_map);
retval = 0;
break;
default:
@@ -6930,6 +7197,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
int signal, struct ieee80211_sta *sta,
bool force)
{
+ struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
struct ieee80211_hw *hw = priv->hw;
u16 wireless_mode;
u8 rssi_level, ratr_idx;
@@ -6938,7 +7206,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
u8 go_up_gap = 5;
u8 macid = rtl8xxxu_get_macid(priv, sta);
- rssi_level = priv->rssi_level;
+ rssi_level = sta_info->rssi_level;
snr = rtl8xxxu_signal_to_snr(signal);
snr_thresh_high = RTL8XXXU_SNR_THRESH_HIGH;
snr_thresh_low = RTL8XXXU_SNR_THRESH_LOW;
@@ -6963,18 +7231,16 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
else
rssi_level = RTL8XXXU_RATR_STA_LOW;
- if (rssi_level != priv->rssi_level || force) {
+ if (rssi_level != sta_info->rssi_level || force) {
int sgi = 0;
u32 rate_bitmap = 0;
- rcu_read_lock();
rate_bitmap = (sta->deflink.supp_rates[0] & 0xfff) |
(sta->deflink.ht_cap.mcs.rx_mask[0] << 12) |
(sta->deflink.ht_cap.mcs.rx_mask[1] << 20);
if (sta->deflink.ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
sgi = 1;
- rcu_read_unlock();
wireless_mode = rtl8xxxu_wireless_mode(hw, sta);
switch (wireless_mode) {
@@ -7055,7 +7321,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
break;
}
- priv->rssi_level = rssi_level;
+ sta_info->rssi_level = rssi_level;
priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi, txbw_40mhz, macid);
}
}
@@ -7085,7 +7351,7 @@ static void rtl8xxxu_track_cfo(struct rtl8xxxu_priv *priv)
int cfo_khz_a, cfo_khz_b, cfo_average;
int crystal_cap;
- if (!priv->vif || !priv->vif->cfg.assoc) {
+ if (!rtl8xxxu_is_assoc(priv)) {
/* Reset */
cfo->adjust = true;
@@ -7148,41 +7414,64 @@ static void rtl8xxxu_track_cfo(struct rtl8xxxu_priv *priv)
rtl8xxxu_set_atc_status(priv, abs(cfo_average) >= CFO_TH_ATC);
}
-static void rtl8xxxu_watchdog_callback(struct work_struct *work)
+static void rtl8xxxu_ra_iter(void *data, struct ieee80211_sta *sta)
{
- struct ieee80211_vif *vif;
- struct rtl8xxxu_priv *priv;
+ struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ struct rtl8xxxu_priv *priv = data;
+ int signal = -ewma_rssi_read(&sta_info->avg_rssi);
- priv = container_of(work, struct rtl8xxxu_priv, ra_watchdog.work);
- vif = priv->vif;
+ priv->fops->report_rssi(priv, rtl8xxxu_get_macid(priv, sta),
+ rtl8xxxu_signal_to_snr(signal));
+ rtl8xxxu_refresh_rate_mask(priv, signal, sta, false);
+}
- if (vif && vif->type == NL80211_IFTYPE_STATION) {
- int signal;
- struct ieee80211_sta *sta;
+struct rtl8xxxu_stas_entry {
+ struct list_head list;
+ struct ieee80211_sta *sta;
+};
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- if (!sta) {
- struct device *dev = &priv->udev->dev;
+struct rtl8xxxu_iter_stas_data {
+ struct rtl8xxxu_priv *priv;
+ struct list_head list;
+};
- dev_dbg(dev, "%s: no sta found\n", __func__);
- rcu_read_unlock();
- goto out;
- }
- rcu_read_unlock();
+static void rtl8xxxu_collect_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtl8xxxu_iter_stas_data *iter_stas = data;
+ struct rtl8xxxu_stas_entry *stas_entry;
- signal = ieee80211_ave_rssi(vif);
+ stas_entry = kmalloc(sizeof(*stas_entry), GFP_ATOMIC);
+ if (!stas_entry)
+ return;
- priv->fops->report_rssi(priv, 0,
- rtl8xxxu_signal_to_snr(signal));
+ stas_entry->sta = sta;
+ list_add_tail(&stas_entry->list, &iter_stas->list);
+}
- if (priv->fops->set_crystal_cap)
- rtl8xxxu_track_cfo(priv);
+static void rtl8xxxu_watchdog_callback(struct work_struct *work)
+{
- rtl8xxxu_refresh_rate_mask(priv, signal, sta, false);
+ struct rtl8xxxu_iter_stas_data iter_data;
+ struct rtl8xxxu_stas_entry *sta_entry, *tmp;
+ struct rtl8xxxu_priv *priv;
+
+ priv = container_of(work, struct rtl8xxxu_priv, ra_watchdog.work);
+ iter_data.priv = priv;
+ INIT_LIST_HEAD(&iter_data.list);
+
+ mutex_lock(&priv->sta_mutex);
+ ieee80211_iterate_stations_atomic(priv->hw, rtl8xxxu_collect_sta_iter,
+ &iter_data);
+ list_for_each_entry_safe(sta_entry, tmp, &iter_data.list, list) {
+ list_del_init(&sta_entry->list);
+ rtl8xxxu_ra_iter(priv, sta_entry->sta);
+ kfree(sta_entry);
}
+ mutex_unlock(&priv->sta_mutex);
+
+ if (priv->fops->set_crystal_cap)
+ rtl8xxxu_track_cfo(priv);
-out:
schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
}
@@ -7304,7 +7593,9 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
if (priv->usb_interrupts)
rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+ cancel_work_sync(&priv->c2hcmd_work);
cancel_delayed_work_sync(&priv->ra_watchdog);
+ cancel_delayed_work_sync(&priv->update_beacon_work);
rtl8xxxu_free_rx_resources(priv);
rtl8xxxu_free_tx_resources(priv);
@@ -7315,16 +7606,34 @@ static int rtl8xxxu_sta_add(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
+ mutex_lock(&priv->sta_mutex);
+ ewma_rssi_init(&sta_info->avg_rssi);
if (vif->type == NL80211_IFTYPE_AP) {
+ sta_info->rssi_level = RTL8XXXU_RATR_STA_INIT;
sta_info->macid = rtl8xxxu_acquire_macid(priv);
- if (sta_info->macid >= RTL8XXXU_MAX_MAC_ID_NUM)
+ if (sta_info->macid >= RTL8XXXU_MAX_MAC_ID_NUM) {
+ mutex_unlock(&priv->sta_mutex);
return -ENOSPC;
+ }
rtl8xxxu_refresh_rate_mask(priv, 0, sta, true);
priv->fops->report_connect(priv, sta_info->macid, H2C_MACID_ROLE_STA, true);
+ } else {
+ switch (rtlvif->port_num) {
+ case 0:
+ sta_info->macid = RTL8XXXU_BC_MC_MACID;
+ break;
+ case 1:
+ sta_info->macid = RTL8XXXU_BC_MC_MACID1;
+ break;
+ default:
+ break;
+ }
}
+ mutex_unlock(&priv->sta_mutex);
return 0;
}
@@ -7336,13 +7645,19 @@ static int rtl8xxxu_sta_remove(struct ieee80211_hw *hw,
struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
+ mutex_lock(&priv->sta_mutex);
if (vif->type == NL80211_IFTYPE_AP)
rtl8xxxu_release_macid(priv, sta_info->macid);
+ mutex_unlock(&priv->sta_mutex);
return 0;
}
static const struct ieee80211_ops rtl8xxxu_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtl8xxxu_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.add_interface = rtl8xxxu_add_interface,
@@ -7476,6 +7791,20 @@ static void rtl8xxxu_deinit_led(struct rtl8xxxu_priv *priv)
led_classdev_unregister(led);
}
+static const struct ieee80211_iface_limit rtl8xxxu_limits[] = {
+ { .max = 2, .types = BIT(NL80211_IFTYPE_STATION), },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_AP), },
+};
+
+static const struct ieee80211_iface_combination rtl8xxxu_combinations[] = {
+ {
+ .limits = rtl8xxxu_limits,
+ .n_limits = ARRAY_SIZE(rtl8xxxu_limits),
+ .max_interfaces = 2,
+ .num_different_channels = 1,
+ },
+};
+
static int rtl8xxxu_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -7522,7 +7851,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
untested = 0;
break;
case 0x2357:
- if (id->idProduct == 0x0109)
+ if (id->idProduct == 0x0109 || id->idProduct == 0x0135)
untested = 0;
break;
case 0x0b05:
@@ -7555,13 +7884,14 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
mutex_init(&priv->usb_buf_mutex);
mutex_init(&priv->syson_indirect_access_mutex);
mutex_init(&priv->h2c_mutex);
+ mutex_init(&priv->sta_mutex);
INIT_LIST_HEAD(&priv->tx_urb_free_list);
spin_lock_init(&priv->tx_urb_lock);
INIT_LIST_HEAD(&priv->rx_urb_pending_list);
spin_lock_init(&priv->rx_urb_lock);
INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
INIT_DELAYED_WORK(&priv->ra_watchdog, rtl8xxxu_watchdog_callback);
- INIT_WORK(&priv->update_beacon_work, rtl8xxxu_update_beacon_work_callback);
+ INIT_DELAYED_WORK(&priv->update_beacon_work, rtl8xxxu_update_beacon_work_callback);
skb_queue_head_init(&priv->c2hcmd_queue);
usb_set_intfdata(interface, hw);
@@ -7611,6 +7941,8 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (ret)
goto err_set_intfdata;
+ hw->vif_data_size = sizeof(struct rtl8xxxu_vif);
+
hw->wiphy->max_scan_ssids = 1;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
if (priv->fops->max_macid_num)
@@ -7620,6 +7952,13 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
hw->queues = 4;
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+ if (priv->fops->supports_concurrent) {
+ hw->wiphy->iface_combinations = rtl8xxxu_combinations;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtl8xxxu_combinations);
+ }
+
sband = &rtl8xxxu_supported_band;
sband->ht_cap.ht_supported = true;
sband->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
@@ -7806,6 +8145,9 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192fu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x318b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192fu_fops},
+/* TP-Link TL-WN823N V2 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0135, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192fu_fops},
#ifdef CONFIG_RTL8XXXU_UNTESTED
/* Still supported by rtlwifi */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index 920ee50e2115..61c0c0ec07b3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -146,6 +146,21 @@
#define GPIO_INTM_EDGE_TRIG_IRQ BIT(9)
#define REG_LEDCFG0 0x004c
+#define LEDCFG0_LED0CM GENMASK(2, 0)
+#define LEDCFG0_LED1CM GENMASK(10, 8)
+#define LED_MODE_SW_CTRL 0x0
+#define LED_MODE_TX_OR_RX_EVENTS 0x3
+#define LEDCFG0_LED0SV BIT(3)
+#define LEDCFG0_LED1SV BIT(11)
+#define LED_SW_OFF 0x0
+#define LED_SW_ON 0x1
+#define LEDCFG0_LED0_IO_MODE BIT(7)
+#define LEDCFG0_LED1_IO_MODE BIT(15)
+#define LED_IO_MODE_OUTPUT 0x0
+#define LED_IO_MODE_INPUT 0x1
+#define LEDCFG0_LED2EN BIT(21)
+#define LED_GPIO_DISABLE 0x0
+#define LED_GPIO_ENABLE 0x1
#define LEDCFG0_DPDT_SELECT BIT(23)
#define REG_LEDCFG1 0x004d
#define LEDCFG1_HW_LED_CONTROL BIT(1)
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 69e97647e3d6..2e60a6991ca1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1903,6 +1903,10 @@ void rtl_init_sw_leds(struct ieee80211_hw *hw)
EXPORT_SYMBOL(rtl_init_sw_leds);
const struct ieee80211_ops rtl_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = rtl_op_start,
.stop = rtl_op_stop,
.tx = rtl_op_tx,
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index 2e945554ed6d..c1fbc29d5ca1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -1287,18 +1287,44 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
}
EXPORT_SYMBOL_GPL(rtl_get_hwinfo);
-void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size)
+static void _rtl_fw_block_write_usb(struct ieee80211_hw *hw, u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 start = START_ADDRESS;
+ u32 n;
+
+ while (size > 0) {
+ if (size >= 64)
+ n = 64;
+ else if (size >= 8)
+ n = 8;
+ else
+ n = 1;
+
+ rtl_write_chunk(rtlpriv, start, n, buffer);
+
+ start += n;
+ buffer += n;
+ size -= n;
+ }
+}
+
+void rtl_fw_block_write(struct ieee80211_hw *hw, u8 *buffer, u32 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 *pu4byteptr = (u8 *)buffer;
u32 i;
- for (i = 0; i < size; i++)
- rtl_write_byte(rtlpriv, (START_ADDRESS + i), *(pu4byteptr + i));
+ if (rtlpriv->rtlhal.interface == INTF_PCI) {
+ for (i = 0; i < size; i++)
+ rtl_write_byte(rtlpriv, (START_ADDRESS + i),
+ *(buffer + i));
+ } else if (rtlpriv->rtlhal.interface == INTF_USB) {
+ _rtl_fw_block_write_usb(hw, buffer, size);
+ }
}
EXPORT_SYMBOL_GPL(rtl_fw_block_write);
-void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
+void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, u8 *buffer,
u32 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h
index 1ec59f439382..4821625ad1e5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.h
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h
@@ -91,8 +91,8 @@ void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate);
int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
int max_size, u8 *hwinfo, int *params);
void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen);
-void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
+void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, u8 *buffer,
u32 size);
-void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size);
+void rtl_fw_block_write(struct ieee80211_hw *hw, u8 *buffer, u32 size);
void rtl_efuse_ops_init(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 96ce05bcf0b3..11709b6c83f1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -378,13 +378,13 @@ static void _rtl_pci_io_handler_init(struct device *dev,
rtlpriv->io.dev = dev;
- rtlpriv->io.write8_async = pci_write8_async;
- rtlpriv->io.write16_async = pci_write16_async;
- rtlpriv->io.write32_async = pci_write32_async;
+ rtlpriv->io.write8 = pci_write8_async;
+ rtlpriv->io.write16 = pci_write16_async;
+ rtlpriv->io.write32 = pci_write32_async;
- rtlpriv->io.read8_sync = pci_read8_sync;
- rtlpriv->io.read16_sync = pci_read16_sync;
- rtlpriv->io.read32_sync = pci_read32_sync;
+ rtlpriv->io.read8 = pci_read8_sync;
+ rtlpriv->io.read16 = pci_read16_sync;
+ rtlpriv->io.read32 = pci_read32_sync;
}
static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
@@ -2374,7 +2374,6 @@ EXPORT_SYMBOL(rtl_pci_resume);
#endif /* CONFIG_PM_SLEEP */
const struct rtl_intf_ops rtl_pci_ops = {
- .read_efuse_byte = read_efuse_byte,
.adapter_start = rtl_pci_start,
.adapter_stop = rtl_pci_stop,
.check_buddy_priv = rtl_pci_check_buddy_priv,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 50e139186a93..ed151754fc6e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -350,7 +350,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool defaultadapter = true;
__le32 *pdesc = (__le32 *)pdesc8;
u16 seq_number;
__le16 fc = hdr->frame_control;
@@ -503,9 +502,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) {
set_tx_desc_hwseq_en(pdesc, 1);
set_tx_desc_pkt_id(pdesc, 8);
-
- if (!defaultadapter)
- set_tx_desc_qos(pdesc, 1);
}
set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
index 91e4427ab022..4757f93b84e4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
@@ -11,7 +11,7 @@
#define CHIP_VENDOR_UMC_B_CUT BIT(6)
#define IS_92C_1T2R(version) \
- (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
+ (((version) & CHIP_92C_1T2R) == CHIP_92C_1T2R)
#define IS_VENDOR_UMC(version) \
(((version) & CHIP_VENDOR_UMC) ? true : false)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 5ec0eb8773a5..4217c9a08d01 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -622,6 +622,9 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
u16 valuelow;
switch (queue_sel) {
+ default:
+ WARN_ON(1);
+ fallthrough;
case (TX_SELE_HQ | TX_SELE_LQ):
valuehi = QUEUE_HIGH;
valuelow = QUEUE_LOW;
@@ -634,9 +637,6 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
valuehi = QUEUE_HIGH;
valuelow = QUEUE_NORMAL;
break;
- default:
- WARN_ON(1);
- break;
}
if (!wmm_enable) {
beq = valuelow;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index 4ff0d4118193..a76f2dc8a977 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -101,7 +101,8 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
rtlphy->rf_type = RF_1T1R;
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Chip RF Type: %s\n",
- rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
+ rtlphy->rf_type == RF_2T2R ? "RF_2T2R" :
+ rtlphy->rf_type == RF_1T2R ? "RF_1T2R" : "RF_1T1R");
if (get_rf_type(rtlphy) == RF_1T1R)
rtlpriv->dm.rfpath_rxenable[0] = true;
else
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 20b4aac69642..48be7e346efc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -40,7 +40,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.thermalvalue = 0;
/* for firmware buf */
- rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
+ rtlpriv->rtlhal.pfirmware = kmalloc(0x4000, GFP_KERNEL);
if (!rtlpriv->rtlhal.pfirmware) {
pr_err("Can't alloc buffer for fw\n");
return 1;
@@ -61,7 +61,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
fw_name, rtlpriv->io.dev,
GFP_KERNEL, hw, rtl_fw_cb);
if (err) {
- vfree(rtlpriv->rtlhal.pfirmware);
+ kfree(rtlpriv->rtlhal.pfirmware);
rtlpriv->rtlhal.pfirmware = NULL;
}
return err;
@@ -72,7 +72,7 @@ static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->rtlhal.pfirmware) {
- vfree(rtlpriv->rtlhal.pfirmware);
+ kfree(rtlpriv->rtlhal.pfirmware);
rtlpriv->rtlhal.pfirmware = NULL;
}
}
@@ -145,7 +145,6 @@ MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
/* rx */
- .in_ep_num = RTL92C_USB_BULK_IN_NUM,
.rx_urb_num = RTL92C_NUM_RX_URBS,
.rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER,
.usb_rx_hdl = rtl8192cu_rx_hdl,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index 2f44c8aa6066..aa702ba7c9f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -79,68 +79,75 @@ static int configvernoutep(struct ieee80211_hw *hw)
static void twooutepmapping(struct ieee80211_hw *hw, bool is_chip8,
bool bwificfg, struct rtl_ep_map *ep_map)
{
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB Chip-B & WMM Setting.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 2;
- ep_map->ep_mapping[RTL_TXQ_BK] = 3;
- ep_map->ep_mapping[RTL_TXQ_VI] = 3;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
} else { /* typical setting */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB typical Setting.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 3;
- ep_map->ep_mapping[RTL_TXQ_BK] = 3;
- ep_map->ep_mapping[RTL_TXQ_VI] = 2;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
}
}
static void threeoutepmapping(struct ieee80211_hw *hw, bool bwificfg,
struct rtl_ep_map *ep_map)
{
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB 3EP Setting for WMM.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 5;
- ep_map->ep_mapping[RTL_TXQ_BK] = 3;
- ep_map->ep_mapping[RTL_TXQ_VI] = 3;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
} else { /* typical setting */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB 3EP Setting for typical.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 5;
- ep_map->ep_mapping[RTL_TXQ_BK] = 5;
- ep_map->ep_mapping[RTL_TXQ_VI] = 3;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
}
}
static void oneoutepmapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
{
- ep_map->ep_mapping[RTL_TXQ_BE] = 2;
- ep_map->ep_mapping[RTL_TXQ_BK] = 2;
- ep_map->ep_mapping[RTL_TXQ_VI] = 2;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
}
static int _out_ep_mapping(struct ieee80211_hw *hw)
@@ -475,9 +482,9 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool defaultadapter = true;
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ struct rtl_sta_info *sta_entry;
+ u8 agg_state = RTL_AGG_STOP;
+ u8 ampdu_density = 0;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 rate_flag = info->control.rates[0].flags;
@@ -486,6 +493,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
skb_get_queue_mapping(skb));
u8 *txdesc8;
__le32 *txdesc;
+ u8 tid;
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc);
@@ -499,10 +507,21 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_tx_rate(txdesc, tcb_desc->hw_rate);
if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble)
set_tx_desc_data_shortgi(txdesc, 1);
- if (mac->tids[tid].agg.agg_state == RTL_AGG_ON &&
- info->flags & IEEE80211_TX_CTL_AMPDU) {
+
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ tid = ieee80211_get_tid(hdr);
+ agg_state = sta_entry->tids[tid].agg.agg_state;
+ ampdu_density = sta->deflink.ht_cap.ampdu_density;
+ }
+
+ if (agg_state == RTL_AGG_OPERATIONAL &&
+ info->flags & IEEE80211_TX_CTL_AMPDU) {
set_tx_desc_agg_enable(txdesc, 1);
set_tx_desc_max_agg_num(txdesc, 0x14);
+ set_tx_desc_ampdu_density(txdesc, ampdu_density);
+ tcb_desc->rts_enable = 1;
+ tcb_desc->rts_rate = DESC_RATE24M;
} else {
set_tx_desc_agg_break(txdesc, 1);
}
@@ -537,14 +556,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_data_bw(txdesc, 0);
set_tx_desc_data_sc(txdesc, 0);
}
- rcu_read_lock();
- sta = ieee80211_find_sta(mac->vif, mac->bssid);
- if (sta) {
- u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
-
- set_tx_desc_ampdu_density(txdesc, ampdu_density);
- }
- rcu_read_unlock();
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf = info->control.hw_key;
@@ -587,8 +598,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
ppsc->fwctrl_lps) {
set_tx_desc_hwseq_en(txdesc, 1);
set_tx_desc_pkt_id(txdesc, 8);
- if (!defaultadapter)
- set_tx_desc_qos(txdesc, 1);
}
if (ieee80211_has_morefrags(fc))
set_tx_desc_more_frag(txdesc, 1);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
index 5f81cab205cc..09e61dc0f317 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
@@ -4,15 +4,12 @@
#ifndef __RTL92CU_TRX_H__
#define __RTL92CU_TRX_H__
-#define RTL92C_USB_BULK_IN_NUM 1
#define RTL92C_NUM_RX_URBS 8
#define RTL92C_NUM_TX_URBS 32
#define RTL92C_SIZE_MAX_RX_BUFFER 15360 /* 8192 */
#define RX_DRV_INFO_SIZE_UNIT 8
-#define RTL_AGG_ON 1
-
enum usb_rx_agg_mode {
USB_RX_AGG_DISABLE,
USB_RX_AGG_DMA,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 743ac6871bf4..4ba42f6be3f2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1669,10 +1669,8 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
u8 cutvalue[2];
u16 chipvalue;
- rtlpriv->intf_ops->read_efuse_byte(hw, EEPROME_CHIP_VERSION_H,
- &cutvalue[1]);
- rtlpriv->intf_ops->read_efuse_byte(hw, EEPROME_CHIP_VERSION_L,
- &cutvalue[0]);
+ read_efuse_byte(hw, EEPROME_CHIP_VERSION_H, &cutvalue[1]);
+ read_efuse_byte(hw, EEPROME_CHIP_VERSION_L, &cutvalue[0]);
chipvalue = (cutvalue[1] << 8) | cutvalue[0];
switch (chipvalue) {
case 0xAA55:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 02ac69c08ed3..192982ec8152 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -42,6 +42,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
bool packet_beacon)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
struct phy_sts_cck_8192d *cck_buf;
s8 rx_pwr_all, rx_pwr[4];
@@ -62,9 +63,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
u8 report, cck_highpwr;
cck_buf = (struct phy_sts_cck_8192d *)p_drvinfo;
if (ppsc->rfpwr_state == ERFON)
- cck_highpwr = (u8) rtl_get_bbreg(hw,
- RFPGA0_XA_HSSIPARAMETER2,
- BIT(9));
+ cck_highpwr = rtlphy->cck_high_power;
else
cck_highpwr = false;
if (!cck_highpwr) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index d9823ddab7be..65bfc14702f4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -349,7 +349,6 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool b_defaultadapter = true;
/* bool b_trigger_ac = false; */
u8 *pdesc8 = (u8 *)pdesc_tx;
__le32 *pdesc = (__le32 *)pdesc8;
@@ -503,10 +502,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_hwseq_en_8723(pdesc, 1);
/* set_tx_desc_hwseq_en(pdesc, 1); */
/* set_tx_desc_pkt_id(pdesc, 8); */
-
- if (!b_defaultadapter)
- set_tx_desc_hwseq_sel_8723(pdesc, 1);
- /* set_tx_desc_qos(pdesc, 1); */
+ /* set_tx_desc_qos(pdesc, 1); */
}
set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 30bf2775a335..6e8c87a2fae4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -23,86 +23,23 @@ MODULE_DESCRIPTION("USB basic driver for rtlwifi");
#define MAX_USBCTRL_VENDORREQ_TIMES 10
-static void usbctrl_async_callback(struct urb *urb)
-{
- if (urb) {
- /* free dr */
- kfree(urb->setup_packet);
- /* free databuf */
- kfree(urb->transfer_buffer);
- }
-}
-
-static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
- u16 value, u16 index, void *pdata,
- u16 len)
-{
- int rc;
- unsigned int pipe;
- u8 reqtype;
- struct usb_ctrlrequest *dr;
- struct urb *urb;
- const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE;
- u8 *databuf;
-
- if (WARN_ON_ONCE(len > databuf_maxlen))
- len = databuf_maxlen;
-
- pipe = usb_sndctrlpipe(udev, 0); /* write_out */
- reqtype = REALTEK_USB_VENQT_WRITE;
-
- dr = kzalloc(sizeof(*dr), GFP_ATOMIC);
- if (!dr)
- return -ENOMEM;
-
- databuf = kzalloc(databuf_maxlen, GFP_ATOMIC);
- if (!databuf) {
- kfree(dr);
- return -ENOMEM;
- }
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- kfree(databuf);
- kfree(dr);
- return -ENOMEM;
- }
-
- dr->bRequestType = reqtype;
- dr->bRequest = request;
- dr->wValue = cpu_to_le16(value);
- dr->wIndex = cpu_to_le16(index);
- dr->wLength = cpu_to_le16(len);
- /* data are already in little-endian order */
- memcpy(databuf, pdata, len);
- usb_fill_control_urb(urb, udev, pipe,
- (unsigned char *)dr, databuf, len,
- usbctrl_async_callback, NULL);
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (rc < 0) {
- kfree(databuf);
- kfree(dr);
- }
- usb_free_urb(urb);
- return rc;
-}
-
-static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
- u16 value, u16 index, void *pdata,
- u16 len)
+static void _usbctrl_vendorreq_sync(struct usb_device *udev, u8 reqtype,
+ u16 value, void *pdata, u16 len)
{
unsigned int pipe;
int status;
- u8 reqtype;
int vendorreq_times = 0;
static int count;
- pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
- reqtype = REALTEK_USB_VENQT_READ;
+ if (reqtype == REALTEK_USB_VENQT_READ)
+ pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
+ else
+ pipe = usb_sndctrlpipe(udev, 0); /* write_out */
do {
- status = usb_control_msg(udev, pipe, request, reqtype, value,
- index, pdata, len, 1000);
+ status = usb_control_msg(udev, pipe, REALTEK_USB_VENQT_CMD_REQ,
+ reqtype, value, REALTEK_USB_VENQT_CMD_IDX,
+ pdata, len, 1000);
if (status < 0) {
/* firmware download is checksumed, don't retry */
if ((value >= FW_8192C_START_ADDRESS &&
@@ -114,18 +51,15 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
} while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES);
if (status < 0 && count++ < 4)
- pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
- value, status, *(u32 *)pdata);
- return status;
+ dev_err(&udev->dev, "reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x reqtype=0x%x\n",
+ value, status, *(u32 *)pdata, reqtype);
}
static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
{
struct device *dev = rtlpriv->io.dev;
struct usb_device *udev = to_usb_device(dev);
- u8 request;
u16 wvalue;
- u16 index;
__le32 *data;
unsigned long flags;
@@ -134,14 +68,33 @@ static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
rtlpriv->usb_data_index = 0;
data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
- request = REALTEK_USB_VENQT_CMD_REQ;
- index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
wvalue = (u16)addr;
- _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
+ _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_READ, wvalue, data, len);
return le32_to_cpu(*data);
}
+
+static void _usb_write_sync(struct rtl_priv *rtlpriv, u32 addr, u32 val, u16 len)
+{
+ struct device *dev = rtlpriv->io.dev;
+ struct usb_device *udev = to_usb_device(dev);
+ unsigned long flags;
+ __le32 *data;
+ u16 wvalue;
+
+ spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags);
+ if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
+ rtlpriv->usb_data_index = 0;
+ data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
+ spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
+
+ wvalue = (u16)(addr & 0x0000ffff);
+ *data = cpu_to_le32(val);
+
+ _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_WRITE, wvalue, data, len);
+}
+
static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
{
return (u8)_usb_read_sync(rtlpriv, addr, 1);
@@ -157,45 +110,27 @@ static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
return _usb_read_sync(rtlpriv, addr, 4);
}
-static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
- u16 len)
+static void _usb_write8_sync(struct rtl_priv *rtlpriv, u32 addr, u8 val)
{
- u8 request;
- u16 wvalue;
- u16 index;
- __le32 data;
- int ret;
-
- request = REALTEK_USB_VENQT_CMD_REQ;
- index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
- wvalue = (u16)(addr&0x0000ffff);
- data = cpu_to_le32(val);
-
- ret = _usbctrl_vendorreq_async_write(udev, request, wvalue,
- index, &data, len);
- if (ret < 0)
- dev_err(&udev->dev, "error %d writing at 0x%x\n", ret, addr);
+ _usb_write_sync(rtlpriv, addr, val, 1);
}
-static void _usb_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
+static void _usb_write16_sync(struct rtl_priv *rtlpriv, u32 addr, u16 val)
{
- struct device *dev = rtlpriv->io.dev;
-
- _usb_write_async(to_usb_device(dev), addr, val, 1);
+ _usb_write_sync(rtlpriv, addr, val, 2);
}
-static void _usb_write16_async(struct rtl_priv *rtlpriv, u32 addr, u16 val)
+static void _usb_write32_sync(struct rtl_priv *rtlpriv, u32 addr, u32 val)
{
- struct device *dev = rtlpriv->io.dev;
-
- _usb_write_async(to_usb_device(dev), addr, val, 2);
+ _usb_write_sync(rtlpriv, addr, val, 4);
}
-static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
+static void _usb_write_chunk_sync(struct rtl_priv *rtlpriv, u32 addr,
+ u32 length, u8 *data)
{
- struct device *dev = rtlpriv->io.dev;
+ struct usb_device *udev = to_usb_device(rtlpriv->io.dev);
- _usb_write_async(to_usb_device(dev), addr, val, 4);
+ _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_WRITE, addr, data, length);
}
static void _rtl_usb_io_handler_init(struct device *dev,
@@ -205,12 +140,13 @@ static void _rtl_usb_io_handler_init(struct device *dev,
rtlpriv->io.dev = dev;
mutex_init(&rtlpriv->io.bb_mutex);
- rtlpriv->io.write8_async = _usb_write8_async;
- rtlpriv->io.write16_async = _usb_write16_async;
- rtlpriv->io.write32_async = _usb_write32_async;
- rtlpriv->io.read8_sync = _usb_read8_sync;
- rtlpriv->io.read16_sync = _usb_read16_sync;
- rtlpriv->io.read32_sync = _usb_read32_sync;
+ rtlpriv->io.write8 = _usb_write8_sync;
+ rtlpriv->io.write16 = _usb_write16_sync;
+ rtlpriv->io.write32 = _usb_write32_sync;
+ rtlpriv->io.write_chunk = _usb_write_chunk_sync;
+ rtlpriv->io.read8 = _usb_read8_sync;
+ rtlpriv->io.read16 = _usb_read16_sync;
+ rtlpriv->io.read32 = _usb_read32_sync;
}
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
@@ -280,7 +216,6 @@ static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
rtlusb->rx_max_size = rtlpriv->cfg->usb_interface_cfg->rx_max_size;
rtlusb->rx_urb_num = rtlpriv->cfg->usb_interface_cfg->rx_urb_num;
- rtlusb->in_ep = rtlpriv->cfg->usb_interface_cfg->in_ep_num;
rtlusb->usb_rx_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_hdl;
rtlusb->usb_rx_segregate_hdl =
rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl;
@@ -312,20 +247,38 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc;
- if (usb_endpoint_dir_in(pep_desc))
+ if (usb_endpoint_dir_in(pep_desc)) {
+ if (usb_endpoint_xfer_bulk(pep_desc)) {
+ /* The vendor drivers assume there is only one
+ * bulk in ep and that it's the first in ep.
+ */
+ if (rtlusb->in_ep_nums == 0)
+ rtlusb->in_ep = usb_endpoint_num(pep_desc);
+ else
+ pr_warn("%s: bulk in endpoint is not the first in endpoint\n",
+ __func__);
+ }
+
rtlusb->in_ep_nums++;
- else if (usb_endpoint_dir_out(pep_desc))
+ } else if (usb_endpoint_dir_out(pep_desc)) {
+ if (rtlusb->out_ep_nums < RTL_USB_MAX_BULKOUT_NUM) {
+ if (usb_endpoint_xfer_bulk(pep_desc))
+ rtlusb->out_eps[rtlusb->out_ep_nums] =
+ usb_endpoint_num(pep_desc);
+ } else {
+ pr_warn("%s: found more bulk out endpoints than the expected %d\n",
+ __func__, RTL_USB_MAX_BULKOUT_NUM);
+ }
+
rtlusb->out_ep_nums++;
+ }
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n",
pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
pep_desc->bInterval);
}
- if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num) {
- pr_err("Too few input end points found\n");
- return -EINVAL;
- }
+
if (rtlusb->out_ep_nums == 0) {
pr_err("No output end points found\n");
return -EINVAL;
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
index 3bf85b23eec1..12529afc0510 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.h
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.h
@@ -19,6 +19,7 @@
#define RTL_USB_MAX_TXQ_NUM 4 /* max tx queue */
#define RTL_USB_MAX_EP_NUM 6 /* max ep number */
+#define RTL_USB_MAX_BULKOUT_NUM 4
#define RTL_USB_MAX_TX_URBS_NUM 8
enum rtl_txq {
@@ -94,6 +95,7 @@ struct rtl_usb {
/* Tx */
u8 out_ep_nums ;
+ u8 out_eps[RTL_USB_MAX_BULKOUT_NUM];
u8 out_queue_sel;
struct rtl_ep_map ep_map;
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index d87cd2252eac..9fabf597cfd6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1397,8 +1397,6 @@ struct rtl_phy {
#define RTL_AGG_PROGRESS 1
#define RTL_AGG_START 2
#define RTL_AGG_OPERATIONAL 3
-#define RTL_AGG_OFF 0
-#define RTL_AGG_ON 1
#define RTL_RX_AGG_START 1
#define RTL_RX_AGG_STOP 0
#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2
@@ -1447,13 +1445,15 @@ struct rtl_io {
/*PCI IO map */
unsigned long pci_base_addr; /*device I/O address */
- void (*write8_async)(struct rtl_priv *rtlpriv, u32 addr, u8 val);
- void (*write16_async)(struct rtl_priv *rtlpriv, u32 addr, u16 val);
- void (*write32_async)(struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ void (*write8)(struct rtl_priv *rtlpriv, u32 addr, u8 val);
+ void (*write16)(struct rtl_priv *rtlpriv, u32 addr, u16 val);
+ void (*write32)(struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ void (*write_chunk)(struct rtl_priv *rtlpriv, u32 addr, u32 length,
+ u8 *data);
- u8 (*read8_sync)(struct rtl_priv *rtlpriv, u32 addr);
- u16 (*read16_sync)(struct rtl_priv *rtlpriv, u32 addr);
- u32 (*read32_sync)(struct rtl_priv *rtlpriv, u32 addr);
+ u8 (*read8)(struct rtl_priv *rtlpriv, u32 addr);
+ u16 (*read16)(struct rtl_priv *rtlpriv, u32 addr);
+ u32 (*read32)(struct rtl_priv *rtlpriv, u32 addr);
};
@@ -1471,7 +1471,6 @@ struct rtl_mac {
enum nl80211_iftype opmode;
/*Probe Beacon management */
- struct rtl_tid_data tids[MAX_TID_COUNT];
enum rtl_link_state link_state;
int n_channels;
@@ -2290,7 +2289,6 @@ struct rtl_hal_ops {
struct rtl_intf_ops {
/*com */
- void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
int (*adapter_start)(struct ieee80211_hw *hw);
void (*adapter_stop)(struct ieee80211_hw *hw);
bool (*check_buddy_priv)(struct ieee80211_hw *hw,
@@ -2354,7 +2352,6 @@ struct rtl_mod_params {
struct rtl_hal_usbint_cfg {
/* data - rx */
- u32 in_ep_num;
u32 rx_urb_num;
u32 rx_max_size;
@@ -2916,25 +2913,25 @@ extern u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M];
static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
{
- return rtlpriv->io.read8_sync(rtlpriv, addr);
+ return rtlpriv->io.read8(rtlpriv, addr);
}
static inline u16 rtl_read_word(struct rtl_priv *rtlpriv, u32 addr)
{
- return rtlpriv->io.read16_sync(rtlpriv, addr);
+ return rtlpriv->io.read16(rtlpriv, addr);
}
static inline u32 rtl_read_dword(struct rtl_priv *rtlpriv, u32 addr)
{
- return rtlpriv->io.read32_sync(rtlpriv, addr);
+ return rtlpriv->io.read32(rtlpriv, addr);
}
static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8)
{
- rtlpriv->io.write8_async(rtlpriv, addr, val8);
+ rtlpriv->io.write8(rtlpriv, addr, val8);
if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read8_sync(rtlpriv, addr);
+ rtlpriv->io.read8(rtlpriv, addr);
}
static inline void rtl_write_byte_with_val32(struct ieee80211_hw *hw,
@@ -2947,19 +2944,25 @@ static inline void rtl_write_byte_with_val32(struct ieee80211_hw *hw,
static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16)
{
- rtlpriv->io.write16_async(rtlpriv, addr, val16);
+ rtlpriv->io.write16(rtlpriv, addr, val16);
if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read16_sync(rtlpriv, addr);
+ rtlpriv->io.read16(rtlpriv, addr);
}
static inline void rtl_write_dword(struct rtl_priv *rtlpriv,
u32 addr, u32 val32)
{
- rtlpriv->io.write32_async(rtlpriv, addr, val32);
+ rtlpriv->io.write32(rtlpriv, addr, val32);
if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read32_sync(rtlpriv, addr);
+ rtlpriv->io.read32(rtlpriv, addr);
+}
+
+static inline void rtl_write_chunk(struct rtl_priv *rtlpriv,
+ u32 addr, u32 length, u8 *data)
+{
+ rtlpriv->io.write_chunk(rtlpriv, addr, length, data);
}
static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 1b2ad81838be..5b2036798159 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -316,23 +316,13 @@ static ssize_t rtw_debugfs_set_single_input(struct file *filp,
{
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
- struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- char tmp[32 + 1];
u32 input;
- int num;
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtou32_from_user(buffer, count, 0, &input);
if (ret)
return ret;
- num = kstrtoint(tmp, 0, &input);
-
- if (num) {
- rtw_warn(rtwdev, "kstrtoint failed\n");
- return num;
- }
-
debugfs_priv->cb_data = input;
return count;
@@ -485,19 +475,12 @@ static ssize_t rtw_debugfs_set_fix_rate(struct file *filp,
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 fix_rate;
- char tmp[32 + 1];
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtou8_from_user(buffer, count, 0, &fix_rate);
if (ret)
return ret;
- ret = kstrtou8(tmp, 0, &fix_rate);
- if (ret) {
- rtw_warn(rtwdev, "invalid args, [rate]\n");
- return ret;
- }
-
dm_info->fix_rate = fix_rate;
return count;
@@ -879,20 +862,13 @@ static ssize_t rtw_debugfs_set_coex_enable(struct file *filp,
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_coex *coex = &rtwdev->coex;
- char tmp[32 + 1];
bool enable;
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtobool_from_user(buffer, count, &enable);
if (ret)
return ret;
- ret = kstrtobool(tmp, &enable);
- if (ret) {
- rtw_warn(rtwdev, "invalid arguments\n");
- return ret;
- }
-
mutex_lock(&rtwdev->mutex);
coex->manual_control = !enable;
mutex_unlock(&rtwdev->mutex);
@@ -951,18 +927,13 @@ static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- char tmp[32 + 1];
bool input;
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtobool_from_user(buffer, count, &input);
if (ret)
return ret;
- ret = kstrtobool(tmp, &input);
- if (ret)
- return -EINVAL;
-
if (!input)
return -EINVAL;
@@ -1030,11 +1001,12 @@ static ssize_t rtw_debugfs_set_dm_cap(struct file *filp,
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- int bit;
+ int ret, bit;
bool en;
- if (kstrtoint_from_user(buffer, count, 10, &bit))
- return -EINVAL;
+ ret = kstrtoint_from_user(buffer, count, 10, &bit);
+ if (ret)
+ return ret;
en = bit > 0;
bit = abs(bit);
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 298663b03580..0c1c1ff31085 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -309,6 +309,13 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
+ if (pwr_on && rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) {
+ if (chip->id == RTW_CHIP_TYPE_8822C ||
+ chip->id == RTW_CHIP_TYPE_8822B ||
+ chip->id == RTW_CHIP_TYPE_8821C)
+ rtw_write8_clr(rtwdev, REG_SYS_STATUS1 + 1, BIT(0));
+ }
+
if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO)
rtw_write32(rtwdev, REG_SDIO_HIMR, imr);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index d8d68f16014e..7af5bf7fe5b6 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -927,6 +927,10 @@ static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw,
}
const struct ieee80211_ops rtw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtw_ops_tx,
.wake_tx_queue = rtw_ops_wake_tx_queue,
.start = rtw_ops_start,
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 6d22628129d0..ffba6b88f392 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -2032,8 +2032,6 @@ static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev)
rtw_phy_setup_phy_cond(rtwdev, hal->pkg_type);
rtw_phy_init_tx_power(rtwdev);
- if (rfe_def->agc_btg_tbl)
- rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
rtw_load_table(rtwdev, rfe_def->phy_pg_tbl);
rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl);
rtw_phy_tx_power_by_rate_config(hal);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 2bfc0e822b8d..9986a4cb37eb 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -1450,6 +1450,7 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
const struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
struct pci_dev *pdev = rtwpci->pdev;
const struct rtw_intf_phy_para *para;
u16 cut;
@@ -1498,6 +1499,9 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",
ret);
}
+
+ if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5)
+ rtw_write32_mask(rtwdev, REG_ANAPARSW_MAC_0, BIT_CF_L_V2, 0x1);
}
static int __maybe_unused rtw_pci_suspend(struct device *dev)
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 128e75a81bf3..37ef80c9091d 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -1761,12 +1761,15 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
void rtw_phy_load_tables(struct rtw_dev *rtwdev)
{
+ const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev);
const struct rtw_chip_info *chip = rtwdev->chip;
u8 rf_path;
rtw_load_table(rtwdev, chip->mac_tbl);
rtw_load_table(rtwdev, chip->bb_tbl);
rtw_load_table(rtwdev, chip->agc_tbl);
+ if (rfe_def->agc_btg_tbl)
+ rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
rtw_load_rfk_table(rtwdev);
for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 1634f03784f1..b122f226924b 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -557,6 +557,9 @@
#define REG_RFE_INV16 0x0cbe
#define BIT_RFE_BUF_EN BIT(3)
+#define REG_ANAPARSW_MAC_0 0x1010
+#define BIT_CF_L_V2 GENMASK(29, 28)
+
#define REG_ANAPAR_XTAL_0 0x1040
#define BIT_XCAP_0 GENMASK(23, 10)
#define REG_CPU_DMEM_CON 0x1080
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 429bb420b056..fe5d8e188350 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -773,9 +773,9 @@ static void rtw8821c_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->cck_fa_cnt = cck_fa_cnt;
dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = ofdm_fa_cnt;
if (cck_enable)
dm_info->total_fa_cnt += cck_fa_cnt;
- dm_info->total_fa_cnt = ofdm_fa_cnt;
crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK);
dm_info->cck_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
index 7a5cbdc31ef7..e2c7d9f87683 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
@@ -9,24 +9,36 @@
#include "usb.h"
static const struct usb_device_id rtw_8821cu_id_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8731, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb820, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc80c, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc820, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82a, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* TOTOLINK A650UA v3 */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82c, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xd811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8821cu_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index e6ab1ac6d709..a0188511099a 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -33,6 +33,36 @@ static void rtw_usb_fill_tx_checksum(struct rtw_usb *rtwusb,
rtw_tx_fill_txdesc_checksum(rtwdev, &pkt_info, skb->data);
}
+static void rtw_usb_reg_sec(struct rtw_dev *rtwdev, u32 addr, __le32 *data)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ struct usb_device *udev = rtwusb->udev;
+ bool reg_on_section = false;
+ u16 t_reg = 0x4e0;
+ u8 t_len = 1;
+ int status;
+
+ /* There are three sections:
+ * 1. on (0x00~0xFF; 0x1000~0x10FF): this section is always powered on
+ * 2. off (< 0xFE00, excluding "on" section): this section could be
+ * powered off
+ * 3. local (>= 0xFE00): usb specific registers section
+ */
+ if (addr <= 0xff || (addr >= 0x1000 && addr <= 0x10ff))
+ reg_on_section = true;
+
+ if (!reg_on_section)
+ return;
+
+ status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE,
+ t_reg, 0, data, t_len, 500);
+
+ if (status != t_len && status != -ENODEV)
+ rtw_err(rtwdev, "%s: reg 0x%x, usb write %u fail, status: %d\n",
+ __func__, t_reg, t_len, status);
+}
+
static u32 rtw_usb_read(struct rtw_dev *rtwdev, u32 addr, u16 len)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
@@ -58,6 +88,11 @@ static u32 rtw_usb_read(struct rtw_dev *rtwdev, u32 addr, u16 len)
rtw_err(rtwdev, "read register 0x%x failed with %d\n",
addr, ret);
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8822B ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8821C)
+ rtw_usb_reg_sec(rtwdev, addr, data);
+
return le32_to_cpu(*data);
}
@@ -102,6 +137,11 @@ static void rtw_usb_write(struct rtw_dev *rtwdev, u32 addr, u32 val, int len)
if (ret < 0 && ret != -ENODEV && count++ < 4)
rtw_err(rtwdev, "write register 0x%x failed with %d\n",
addr, ret);
+
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8822B ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8821C)
+ rtw_usb_reg_sec(rtwdev, addr, data);
}
static void rtw_usb_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 914c94988b2f..11fbdd142162 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -777,3 +777,64 @@ void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
SET_DCTL_SEC_ENT5_V1(cmd, addr_cam->sec_ent[5]);
SET_DCTL_SEC_ENT6_V1(cmd, addr_cam->sec_ent[6]);
}
+
+void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c)
+{
+ struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+
+ h2c->c0 = le32_encode_bits(rtwsta ? rtwsta->mac_id : rtwvif->mac_id,
+ DCTLINFO_V2_C0_MACID) |
+ le32_encode_bits(1, DCTLINFO_V2_C0_OP);
+
+ h2c->w4 = le32_encode_bits(addr_cam->sec_ent_keyid[0],
+ DCTLINFO_V2_W4_SEC_ENT0_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[1],
+ DCTLINFO_V2_W4_SEC_ENT1_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[2],
+ DCTLINFO_V2_W4_SEC_ENT2_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[3],
+ DCTLINFO_V2_W4_SEC_ENT3_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[4],
+ DCTLINFO_V2_W4_SEC_ENT4_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[5],
+ DCTLINFO_V2_W4_SEC_ENT5_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[6],
+ DCTLINFO_V2_W4_SEC_ENT6_KEYID);
+ h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_SEC_ENT0_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT1_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT2_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT3_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT4_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT5_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT6_KEYID);
+
+ h2c->w5 = le32_encode_bits(addr_cam->sec_cam_map[0],
+ DCTLINFO_V2_W5_SEC_ENT_VALID_V1) |
+ le32_encode_bits(addr_cam->sec_ent[0],
+ DCTLINFO_V2_W5_SEC_ENT0_V1);
+ h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_SEC_ENT_VALID_V1 |
+ DCTLINFO_V2_W5_SEC_ENT0_V1);
+
+ h2c->w6 = le32_encode_bits(addr_cam->sec_ent[1],
+ DCTLINFO_V2_W6_SEC_ENT1_V1) |
+ le32_encode_bits(addr_cam->sec_ent[2],
+ DCTLINFO_V2_W6_SEC_ENT2_V1) |
+ le32_encode_bits(addr_cam->sec_ent[3],
+ DCTLINFO_V2_W6_SEC_ENT3_V1) |
+ le32_encode_bits(addr_cam->sec_ent[4],
+ DCTLINFO_V2_W6_SEC_ENT4_V1);
+ h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_SEC_ENT1_V1 |
+ DCTLINFO_V2_W6_SEC_ENT2_V1 |
+ DCTLINFO_V2_W6_SEC_ENT3_V1 |
+ DCTLINFO_V2_W6_SEC_ENT4_V1);
+
+ h2c->w7 = le32_encode_bits(addr_cam->sec_ent[5],
+ DCTLINFO_V2_W7_SEC_ENT5_V1) |
+ le32_encode_bits(addr_cam->sec_ent[6],
+ DCTLINFO_V2_W7_SEC_ENT6_V1);
+ h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_SEC_ENT5_V1 |
+ DCTLINFO_V2_W7_SEC_ENT6_V1);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index 83c160a614e6..fa09d11c345c 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -352,6 +352,111 @@ static inline void FWCMD_SET_ADDR_BSSID_BSSID5(void *cmd, u32 value)
le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(31, 24));
}
+struct rtw89_h2c_dctlinfo_ud_v2 {
+ __le32 c0;
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 m0;
+ __le32 m1;
+ __le32 m2;
+ __le32 m3;
+ __le32 m4;
+ __le32 m5;
+ __le32 m6;
+ __le32 m7;
+ __le32 m8;
+ __le32 m9;
+ __le32 m10;
+ __le32 m11;
+ __le32 m12;
+ __le32 m13;
+ __le32 m14;
+ __le32 m15;
+} __packed;
+
+#define DCTLINFO_V2_C0_MACID GENMASK(6, 0)
+#define DCTLINFO_V2_C0_OP BIT(7)
+
+#define DCTLINFO_V2_W0_QOS_FIELD_H GENMASK(7, 0)
+#define DCTLINFO_V2_W0_HW_EXSEQ_MACID GENMASK(14, 8)
+#define DCTLINFO_V2_W0_QOS_DATA BIT(15)
+#define DCTLINFO_V2_W0_AES_IV_L GENMASK(31, 16)
+#define DCTLINFO_V2_W0_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W1_AES_IV_H GENMASK(31, 0)
+#define DCTLINFO_V2_W1_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W2_SEQ0 GENMASK(11, 0)
+#define DCTLINFO_V2_W2_SEQ1 GENMASK(23, 12)
+#define DCTLINFO_V2_W2_AMSDU_MAX_LEN GENMASK(26, 24)
+#define DCTLINFO_V2_W2_STA_AMSDU_EN BIT(27)
+#define DCTLINFO_V2_W2_CHKSUM_OFLD_EN BIT(28)
+#define DCTLINFO_V2_W2_WITH_LLC BIT(29)
+#define DCTLINFO_V2_W2_NAT25_EN BIT(30)
+#define DCTLINFO_V2_W2_IS_MLD BIT(31)
+#define DCTLINFO_V2_W2_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W3_SEQ2 GENMASK(11, 0)
+#define DCTLINFO_V2_W3_SEQ3 GENMASK(23, 12)
+#define DCTLINFO_V2_W3_TGT_IND GENMASK(27, 24)
+#define DCTLINFO_V2_W3_TGT_IND_EN BIT(28)
+#define DCTLINFO_V2_W3_HTC_LB GENMASK(31, 29)
+#define DCTLINFO_V2_W3_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W4_VLAN_TAG_SEL GENMASK(7, 5)
+#define DCTLINFO_V2_W4_HTC_ORDER BIT(8)
+#define DCTLINFO_V2_W4_SEC_KEY_ID GENMASK(10, 9)
+#define DCTLINFO_V2_W4_VLAN_RX_DYNAMIC_PCP_EN BIT(11)
+#define DCTLINFO_V2_W4_VLAN_RX_PKT_DROP BIT(12)
+#define DCTLINFO_V2_W4_VLAN_RX_VALID BIT(13)
+#define DCTLINFO_V2_W4_VLAN_TX_VALID BIT(14)
+#define DCTLINFO_V2_W4_WAPI BIT(15)
+#define DCTLINFO_V2_W4_SEC_ENT_MODE GENMASK(17, 16)
+#define DCTLINFO_V2_W4_SEC_ENT0_KEYID GENMASK(19, 18)
+#define DCTLINFO_V2_W4_SEC_ENT1_KEYID GENMASK(21, 20)
+#define DCTLINFO_V2_W4_SEC_ENT2_KEYID GENMASK(23, 22)
+#define DCTLINFO_V2_W4_SEC_ENT3_KEYID GENMASK(25, 24)
+#define DCTLINFO_V2_W4_SEC_ENT4_KEYID GENMASK(27, 26)
+#define DCTLINFO_V2_W4_SEC_ENT5_KEYID GENMASK(29, 28)
+#define DCTLINFO_V2_W4_SEC_ENT6_KEYID GENMASK(31, 30)
+#define DCTLINFO_V2_W4_ALL GENMASK(31, 5)
+#define DCTLINFO_V2_W5_SEC_ENT7_KEYID GENMASK(1, 0)
+#define DCTLINFO_V2_W5_SEC_ENT8_KEYID GENMASK(3, 2)
+#define DCTLINFO_V2_W5_SEC_ENT_VALID_V1 GENMASK(23, 8)
+#define DCTLINFO_V2_W5_SEC_ENT0_V1 GENMASK(31, 24)
+#define DCTLINFO_V2_W5_ALL (GENMASK(31, 8) | GENMASK(3, 0))
+#define DCTLINFO_V2_W6_SEC_ENT1_V1 GENMASK(7, 0)
+#define DCTLINFO_V2_W6_SEC_ENT2_V1 GENMASK(15, 8)
+#define DCTLINFO_V2_W6_SEC_ENT3_V1 GENMASK(23, 16)
+#define DCTLINFO_V2_W6_SEC_ENT4_V1 GENMASK(31, 24)
+#define DCTLINFO_V2_W6_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W7_SEC_ENT5_V1 GENMASK(7, 0)
+#define DCTLINFO_V2_W7_SEC_ENT6_V1 GENMASK(15, 8)
+#define DCTLINFO_V2_W7_SEC_ENT7 GENMASK(23, 16)
+#define DCTLINFO_V2_W7_SEC_ENT8 GENMASK(31, 24)
+#define DCTLINFO_V2_W7_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W8_MLD_SMA_L_V1 GENMASK(31, 0)
+#define DCTLINFO_V2_W8_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W9_MLD_SMA_H_V1 GENMASK(15, 0)
+#define DCTLINFO_V2_W9_MLD_TMA_L_V1 GENMASK(31, 16)
+#define DCTLINFO_V2_W9_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W10_MLD_TMA_H_V1 GENMASK(31, 0)
+#define DCTLINFO_V2_W10_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W11_MLD_TA_BSSID_L_V1 GENMASK(31, 0)
+#define DCTLINFO_V2_W11_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W12_MLD_TA_BSSID_H_V1 GENMASK(15, 0)
+#define DCTLINFO_V2_W12_ALL GENMASK(15, 0)
+
int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
@@ -373,6 +478,10 @@ void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta,
u8 *cmd);
+void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c);
int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, u8 *cmd);
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index cbf6821af6b8..051a3cad6101 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -212,33 +212,68 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
rtw89_config_default_chandef(rtwdev);
}
+static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev,
+ struct rtw89_entity_weight *w)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chanctx_cfg *cfg;
+ struct rtw89_vif *rtwvif;
+ int idx;
+
+ for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) {
+ cfg = hal->sub[idx].cfg;
+ if (!cfg) {
+ /* doesn't run with chanctx ops; one channel at most */
+ w->active_chanctxs = 1;
+ break;
+ }
+
+ if (cfg->ref_count > 0)
+ w->active_chanctxs++;
+ }
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (rtwvif->chanctx_assigned)
+ w->active_roles++;
+ }
+}
+
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
{
+ DECLARE_BITMAP(recalc_map, NUM_OF_RTW89_SUB_ENTITY) = {};
struct rtw89_hal *hal = &rtwdev->hal;
const struct cfg80211_chan_def *chandef;
+ struct rtw89_entity_weight w = {};
enum rtw89_entity_mode mode;
struct rtw89_chan chan;
- u8 weight;
- u8 last;
u8 idx;
lockdep_assert_held(&rtwdev->mutex);
- weight = bitmap_weight(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
- switch (weight) {
+ bitmap_copy(recalc_map, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+
+ rtw89_entity_calculate_weight(rtwdev, &w);
+ switch (w.active_chanctxs) {
default:
- rtw89_warn(rtwdev, "unknown ent chan weight: %d\n", weight);
- bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ rtw89_warn(rtwdev, "unknown ent chanctxs weight: %d\n",
+ w.active_chanctxs);
+ bitmap_zero(recalc_map, NUM_OF_RTW89_SUB_ENTITY);
fallthrough;
case 0:
rtw89_config_default_chandef(rtwdev);
+ set_bit(RTW89_SUB_ENTITY_0, recalc_map);
fallthrough;
case 1:
- last = RTW89_SUB_ENTITY_0;
mode = RTW89_ENTITY_MODE_SCC;
break;
- case 2:
- last = RTW89_SUB_ENTITY_1;
+ case 2 ... NUM_OF_RTW89_SUB_ENTITY:
+ if (w.active_roles != NUM_OF_RTW89_MCC_ROLES) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "unhandled ent: %d chanctxs %d roles\n",
+ w.active_chanctxs, w.active_roles);
+ return RTW89_ENTITY_MODE_UNHANDLED;
+ }
+
mode = rtw89_get_entity_mode(rtwdev);
if (mode == RTW89_ENTITY_MODE_MCC)
break;
@@ -247,7 +282,7 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
break;
}
- for (idx = 0; idx <= last; idx++) {
+ for_each_set_bit(idx, recalc_map, NUM_OF_RTW89_SUB_ENTITY) {
chandef = rtw89_chandef_get(rtwdev, idx);
rtw89_get_channel_params(chandef, &chan);
if (chan.channel == 0) {
@@ -287,6 +322,13 @@ static void rtw89_chanctx_notify(struct rtw89_dev *rtwdev,
}
}
+static bool rtw89_concurrent_via_mrc(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
+
+ return chip_gen == RTW89_CHIP_BE;
+}
+
/* This function centrally manages how MCC roles are sorted and iterated.
* And, it guarantees that ordered_idx is less than NUM_OF_RTW89_MCC_ROLES.
* So, if data needs to pass an array for ordered_idx, the array can declare
@@ -320,19 +362,12 @@ int rtw89_iterate_mcc_roles(struct rtw89_dev *rtwdev,
return 0;
}
-/* For now, IEEE80211_HW_TIMING_BEACON_ONLY can make things simple to ensure
- * correctness of MCC calculation logic below. We have noticed that once driver
- * declares WIPHY_FLAG_SUPPORTS_MLO, the use of IEEE80211_HW_TIMING_BEACON_ONLY
- * will be restricted. We will make an alternative in driver when it is ready
- * for MLO.
- */
static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *role, u64 tsf)
{
struct rtw89_vif *rtwvif = role->rtwvif;
- struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
u32 bcn_intvl_us = ieee80211_tu_to_usec(role->beacon_interval);
- u64 sync_tsf = vif->bss_conf.sync_tsf;
+ u64 sync_tsf = READ_ONCE(rtwvif->sync_bcn_tsf);
u32 remainder;
if (tsf < sync_tsf) {
@@ -346,16 +381,13 @@ static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev,
return remainder;
}
-static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev)
+static int __mcc_fw_req_tsf(struct rtw89_dev *rtwdev, u64 *tsf_ref, u64 *tsf_aux)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role *ref = &mcc->role_ref;
struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mac_mcc_tsf_rpt rpt = {};
struct rtw89_fw_mcc_tsf_req req = {};
- u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval);
- u32 tbtt_ofst_ref, tbtt_ofst_aux;
- u64 tsf_ref, tsf_aux;
int ret;
req.group = mcc->group;
@@ -365,11 +397,63 @@ static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev)
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC h2c failed to request tsf: %d\n", ret);
- return RTW89_MCC_DFLT_BCN_OFST_TIME;
+ return ret;
+ }
+
+ *tsf_ref = (u64)rpt.tsf_x_high << 32 | rpt.tsf_x_low;
+ *tsf_aux = (u64)rpt.tsf_y_high << 32 | rpt.tsf_y_low;
+
+ return 0;
+}
+
+static int __mrc_fw_req_tsf(struct rtw89_dev *rtwdev, u64 *tsf_ref, u64 *tsf_aux)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_fw_mrc_req_tsf_arg arg = {};
+ struct rtw89_mac_mrc_tsf_rpt rpt = {};
+ int ret;
+
+ BUILD_BUG_ON(RTW89_MAC_MRC_MAX_REQ_TSF_NUM < NUM_OF_RTW89_MCC_ROLES);
+
+ arg.num = 2;
+ arg.infos[0].band = ref->rtwvif->mac_idx;
+ arg.infos[0].port = ref->rtwvif->port;
+ arg.infos[1].band = aux->rtwvif->mac_idx;
+ arg.infos[1].port = aux->rtwvif->port;
+
+ ret = rtw89_fw_h2c_mrc_req_tsf(rtwdev, &arg, &rpt);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to request tsf: %d\n", ret);
+ return ret;
}
- tsf_ref = (u64)rpt.tsf_x_high << 32 | rpt.tsf_x_low;
- tsf_aux = (u64)rpt.tsf_y_high << 32 | rpt.tsf_y_low;
+ *tsf_ref = rpt.tsfs[0];
+ *tsf_aux = rpt.tsfs[1];
+
+ return 0;
+}
+
+static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval);
+ u32 tbtt_ofst_ref, tbtt_ofst_aux;
+ u64 tsf_ref, tsf_aux;
+ int ret;
+
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_req_tsf(rtwdev, &tsf_ref, &tsf_aux);
+ else
+ ret = __mcc_fw_req_tsf(rtwdev, &tsf_ref, &tsf_aux);
+
+ if (ret)
+ return RTW89_MCC_DFLT_BCN_OFST_TIME;
+
tbtt_ofst_ref = rtw89_mcc_get_tbtt_ofst(rtwdev, ref, tsf_ref);
tbtt_ofst_aux = rtw89_mcc_get_tbtt_ofst(rtwdev, aux, tsf_aux);
@@ -392,6 +476,28 @@ void rtw89_mcc_role_fw_macid_bitmap_set_bit(struct rtw89_mcc_role *mcc_role,
mcc_role->macid_bitmap[idx] |= BIT(pos);
}
+static
+u32 rtw89_mcc_role_fw_macid_bitmap_to_u32(struct rtw89_mcc_role *mcc_role)
+{
+ unsigned int macid;
+ unsigned int i, j;
+ u32 bitmap = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mcc_role->macid_bitmap); i++) {
+ for (j = 0; j < 8; j++) {
+ macid = i * 8 + j;
+ if (macid >= 32)
+ goto out;
+
+ if (mcc_role->macid_bitmap[i] & BIT(j))
+ bitmap |= BIT(macid);
+ }
+ }
+
+out:
+ return bitmap;
+}
+
static void rtw89_mcc_role_macid_sta_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
@@ -588,6 +694,9 @@ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev)
int ret;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (!rtwvif->chanctx_assigned)
+ continue;
+
if (sel.bind_vif[rtwvif->sub_entity_idx]) {
rtw89_warn(rtwdev,
"MCC skip extra vif <macid %d> on chanctx[%d]\n",
@@ -1150,7 +1259,11 @@ static void rtw89_mcc_sync_tbtt(struct rtw89_dev *rtwdev,
tsf_ofst_tgt = bcn_intvl_src_us - remainder;
config->sync.macid_tgt = tgt->rtwvif->mac_id;
+ config->sync.band_tgt = tgt->rtwvif->mac_idx;
+ config->sync.port_tgt = tgt->rtwvif->port;
config->sync.macid_src = src->rtwvif->mac_id;
+ config->sync.band_src = src->rtwvif->mac_idx;
+ config->sync.port_src = src->rtwvif->port;
config->sync.offset = tsf_ofst_tgt / 1024;
config->sync.enable = true;
@@ -1297,6 +1410,37 @@ static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *ro
return 0;
}
+static
+void __mrc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role,
+ struct rtw89_fw_mrc_add_arg *arg, u8 slot_idx)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_policy *policy = &role->policy;
+ struct rtw89_fw_mrc_add_slot_arg *slot_arg;
+ const struct rtw89_chan *chan;
+
+ slot_arg = &arg->slots[slot_idx];
+ role->slot_idx = slot_idx;
+
+ slot_arg->duration = role->duration;
+ slot_arg->role_num = 1;
+
+ chan = rtw89_chan_get(rtwdev, role->rtwvif->sub_entity_idx);
+
+ slot_arg->roles[0].role_type = RTW89_H2C_MRC_ROLE_WIFI;
+ slot_arg->roles[0].is_master = role == ref;
+ slot_arg->roles[0].band = chan->band_type;
+ slot_arg->roles[0].bw = chan->band_width;
+ slot_arg->roles[0].central_ch = chan->channel;
+ slot_arg->roles[0].primary_ch = chan->primary_channel;
+ slot_arg->roles[0].en_tx_null = !policy->dis_tx_null;
+ slot_arg->roles[0].null_early = policy->tx_null_early;
+ slot_arg->roles[0].macid = role->rtwvif->mac_id;
+ slot_arg->roles[0].macid_main_bitmap =
+ rtw89_mcc_role_fw_macid_bitmap_to_u32(role);
+}
+
static int __mcc_fw_add_bt_role(struct rtw89_dev *rtwdev)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1318,6 +1462,20 @@ static int __mcc_fw_add_bt_role(struct rtw89_dev *rtwdev)
return 0;
}
+static
+void __mrc_fw_add_bt_role(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_mrc_add_arg *arg, u8 slot_idx)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_bt_role *bt_role = &mcc->bt_role;
+ struct rtw89_fw_mrc_add_slot_arg *slot_arg = &arg->slots[slot_idx];
+
+ slot_arg->duration = bt_role->duration;
+ slot_arg->role_num = 1;
+
+ slot_arg->roles[0].role_type = RTW89_H2C_MRC_ROLE_BT;
+}
+
static int __mcc_fw_start(struct rtw89_dev *rtwdev, bool replace)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1403,6 +1561,130 @@ static int __mcc_fw_start(struct rtw89_dev *rtwdev, bool replace)
return 0;
}
+static void __mrc_fw_add_courtesy(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_mrc_add_arg *arg)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ struct rtw89_mcc_pattern *pattern = &config->pattern;
+ struct rtw89_mcc_courtesy *courtesy = &pattern->courtesy;
+ struct rtw89_fw_mrc_add_slot_arg *slot_arg_src;
+ u8 slot_idx_tgt;
+
+ if (!courtesy->enable)
+ return;
+
+ if (courtesy->macid_src == ref->rtwvif->mac_id) {
+ slot_arg_src = &arg->slots[ref->slot_idx];
+ slot_idx_tgt = aux->slot_idx;
+ } else {
+ slot_arg_src = &arg->slots[aux->slot_idx];
+ slot_idx_tgt = ref->slot_idx;
+ }
+
+ slot_arg_src->courtesy_target = slot_idx_tgt;
+ slot_arg_src->courtesy_period = courtesy->slot_num;
+ slot_arg_src->courtesy_en = true;
+}
+
+static int __mrc_fw_start(struct rtw89_dev *rtwdev, bool replace)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ struct rtw89_mcc_pattern *pattern = &config->pattern;
+ struct rtw89_mcc_sync *sync = &config->sync;
+ struct rtw89_fw_mrc_start_arg start_arg = {};
+ struct rtw89_fw_mrc_add_arg add_arg = {};
+ int ret;
+
+ BUILD_BUG_ON(RTW89_MAC_MRC_MAX_ADD_SLOT_NUM <
+ NUM_OF_RTW89_MCC_ROLES + 1 /* bt role */);
+
+ if (replace) {
+ start_arg.old_sch_idx = mcc->group;
+ start_arg.action = RTW89_H2C_MRC_START_ACTION_REPLACE_OLD;
+ mcc->group = RTW89_MCC_NEXT_GROUP(mcc->group);
+ }
+
+ add_arg.sch_idx = mcc->group;
+ add_arg.sch_type = RTW89_H2C_MRC_SCH_BAND0_ONLY;
+
+ switch (pattern->plan) {
+ case RTW89_MCC_PLAN_TAIL_BT:
+ __mrc_fw_add_role(rtwdev, ref, &add_arg, 0);
+ __mrc_fw_add_role(rtwdev, aux, &add_arg, 1);
+ __mrc_fw_add_bt_role(rtwdev, &add_arg, 2);
+
+ add_arg.slot_num = 3;
+ add_arg.btc_in_sch = true;
+ break;
+ case RTW89_MCC_PLAN_MID_BT:
+ __mrc_fw_add_role(rtwdev, ref, &add_arg, 0);
+ __mrc_fw_add_bt_role(rtwdev, &add_arg, 1);
+ __mrc_fw_add_role(rtwdev, aux, &add_arg, 2);
+
+ add_arg.slot_num = 3;
+ add_arg.btc_in_sch = true;
+ break;
+ case RTW89_MCC_PLAN_NO_BT:
+ __mrc_fw_add_role(rtwdev, ref, &add_arg, 0);
+ __mrc_fw_add_role(rtwdev, aux, &add_arg, 1);
+
+ add_arg.slot_num = 2;
+ add_arg.btc_in_sch = false;
+ break;
+ default:
+ rtw89_warn(rtwdev, "MCC unknown plan: %d\n", pattern->plan);
+ return -EFAULT;
+ }
+
+ __mrc_fw_add_courtesy(rtwdev, &add_arg);
+
+ ret = rtw89_fw_h2c_mrc_add(rtwdev, &add_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger add: %d\n", ret);
+ return ret;
+ }
+
+ if (sync->enable) {
+ struct rtw89_fw_mrc_sync_arg sync_arg = {
+ .offset = sync->offset,
+ .src = {
+ .band = sync->band_src,
+ .port = sync->port_src,
+ },
+ .dest = {
+ .band = sync->band_tgt,
+ .port = sync->port_tgt,
+ },
+ };
+
+ ret = rtw89_fw_h2c_mrc_sync(rtwdev, &sync_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger sync: %d\n", ret);
+ return ret;
+ }
+ }
+
+ start_arg.sch_idx = mcc->group;
+ start_arg.start_tsf = config->start_tsf;
+
+ ret = rtw89_fw_h2c_mrc_start(rtwdev, &start_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger start: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int __mcc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_changed)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1444,6 +1726,60 @@ static int __mcc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_chang
return 0;
}
+static int __mrc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_changed)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_config *config = &mcc->config;
+ struct rtw89_mcc_sync *sync = &config->sync;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_fw_mrc_upd_duration_arg dur_arg = {
+ .sch_idx = mcc->group,
+ .start_tsf = config->start_tsf,
+ .slot_num = 2,
+ .slots[0] = {
+ .slot_idx = ref->slot_idx,
+ .duration = ref->duration,
+ },
+ .slots[1] = {
+ .slot_idx = aux->slot_idx,
+ .duration = aux->duration,
+ },
+ };
+ struct rtw89_fw_mrc_sync_arg sync_arg = {
+ .offset = sync->offset,
+ .src = {
+ .band = sync->band_src,
+ .port = sync->port_src,
+ },
+ .dest = {
+ .band = sync->band_tgt,
+ .port = sync->port_tgt,
+ },
+
+ };
+ int ret;
+
+ ret = rtw89_fw_h2c_mrc_upd_duration(rtwdev, &dur_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to update duration: %d\n", ret);
+ return ret;
+ }
+
+ if (!sync->enable || !sync_changed)
+ return 0;
+
+ ret = rtw89_fw_h2c_mrc_sync(rtwdev, &sync_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger sync: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1494,7 +1830,7 @@ static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable)
if (!rtwvif_go->chanctx_assigned)
return;
- rtw89_fw_h2c_update_beacon(rtwdev, rtwvif_go);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_go);
}
static void rtw89_mcc_start_beacon_noa(struct rtw89_dev *rtwdev)
@@ -1562,7 +1898,11 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
if (ret)
return ret;
- ret = __mcc_fw_start(rtwdev, false);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_start(rtwdev, false);
+ else
+ ret = __mcc_fw_start(rtwdev, false);
+
if (ret)
return ret;
@@ -1580,16 +1920,23 @@ static void rtw89_mcc_stop(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop\n");
- ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group,
- ref->rtwvif->mac_id, true);
- if (ret)
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC h2c failed to trigger stop: %d\n", ret);
+ if (rtw89_concurrent_via_mrc(rtwdev)) {
+ ret = rtw89_fw_h2c_mrc_del(rtwdev, mcc->group);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger del: %d\n", ret);
+ } else {
+ ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group,
+ ref->rtwvif->mac_id, true);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC h2c failed to trigger stop: %d\n", ret);
- ret = rtw89_fw_h2c_del_mcc_group(rtwdev, mcc->group, true);
- if (ret)
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC h2c failed to delete group: %d\n", ret);
+ ret = rtw89_fw_h2c_del_mcc_group(rtwdev, mcc->group, true);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC h2c failed to delete group: %d\n", ret);
+ }
rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_STOP);
@@ -1615,7 +1962,11 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
if (old_cfg.pattern.plan != RTW89_MCC_PLAN_NO_BT ||
config->pattern.plan != RTW89_MCC_PLAN_NO_BT) {
- ret = __mcc_fw_start(rtwdev, true);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_start(rtwdev, true);
+ else
+ ret = __mcc_fw_start(rtwdev, true);
+
if (ret)
return ret;
} else {
@@ -1624,7 +1975,11 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
else
sync_changed = true;
- ret = __mcc_fw_set_duration_no_bt(rtwdev, sync_changed);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_set_duration_no_bt(rtwdev, sync_changed);
+ else
+ ret = __mcc_fw_set_duration_no_bt(rtwdev, sync_changed);
+
if (ret)
return ret;
}
@@ -1666,12 +2021,75 @@ static void rtw89_mcc_track(struct rtw89_dev *rtwdev)
rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_BCN_OFFSET_CHANGE);
}
+static int __mcc_fw_upd_macid_bitmap(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_role *upd)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ int ret;
+
+ ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group,
+ upd->rtwvif->mac_id,
+ upd->macid_bitmap);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC h2c failed to update macid bitmap: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __mrc_fw_upd_macid_bitmap(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_role *cur,
+ struct rtw89_mcc_role *upd)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_fw_mrc_upd_bitmap_arg arg = {};
+ u32 old = rtw89_mcc_role_fw_macid_bitmap_to_u32(cur);
+ u32 new = rtw89_mcc_role_fw_macid_bitmap_to_u32(upd);
+ u32 add = new & ~old;
+ u32 del = old & ~new;
+ int ret;
+ int i;
+
+ arg.sch_idx = mcc->group;
+ arg.macid = upd->rtwvif->mac_id;
+
+ for (i = 0; i < 32; i++) {
+ if (add & BIT(i)) {
+ arg.client_macid = i;
+ arg.action = RTW89_H2C_MRC_UPD_BITMAP_ACTION_ADD;
+
+ ret = rtw89_fw_h2c_mrc_upd_bitmap(rtwdev, &arg);
+ if (ret)
+ goto err;
+ }
+ }
+
+ for (i = 0; i < 32; i++) {
+ if (del & BIT(i)) {
+ arg.client_macid = i;
+ arg.action = RTW89_H2C_MRC_UPD_BITMAP_ACTION_DEL;
+
+ ret = rtw89_fw_h2c_mrc_upd_bitmap(rtwdev, &arg);
+ if (ret)
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to update bitmap: %d\n", ret);
+ return ret;
+}
+
static int rtw89_mcc_upd_map_iterator(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *mcc_role,
unsigned int ordered_idx,
void *data)
{
- struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role upd = {
.rtwvif = mcc_role->rtwvif,
};
@@ -1685,14 +2103,13 @@ static int rtw89_mcc_upd_map_iterator(struct rtw89_dev *rtwdev,
sizeof(mcc_role->macid_bitmap)) == 0)
return 0;
- ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group,
- upd.rtwvif->mac_id,
- upd.macid_bitmap);
- if (ret) {
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC h2c failed to update macid bitmap: %d\n", ret);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_upd_macid_bitmap(rtwdev, mcc_role, &upd);
+ else
+ ret = __mcc_fw_upd_macid_bitmap(rtwdev, &upd);
+
+ if (ret)
return ret;
- }
memcpy(mcc_role->macid_bitmap, upd.macid_bitmap,
sizeof(mcc_role->macid_bitmap));
@@ -1900,6 +2317,41 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
rtw89_queue_chanctx_work(rtwdev);
}
+static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx1,
+ enum rtw89_sub_entity_idx idx2)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_sub_entity tmp;
+ struct rtw89_vif *rtwvif;
+ u8 cur;
+
+ if (idx1 == idx2)
+ return;
+
+ hal->sub[idx1].cfg->idx = idx2;
+ hal->sub[idx2].cfg->idx = idx1;
+
+ tmp = hal->sub[idx1];
+ hal->sub[idx1] = hal->sub[idx2];
+ hal->sub[idx2] = tmp;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (!rtwvif->chanctx_assigned)
+ continue;
+ if (rtwvif->sub_entity_idx == idx1)
+ rtwvif->sub_entity_idx = idx2;
+ else if (rtwvif->sub_entity_idx == idx2)
+ rtwvif->sub_entity_idx = idx1;
+ }
+
+ cur = atomic_read(&hal->roc_entity_idx);
+ if (cur == idx1)
+ atomic_set(&hal->roc_entity_idx, idx2);
+ else if (cur == idx2)
+ atomic_set(&hal->roc_entity_idx, idx1);
+}
+
int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
struct ieee80211_chanctx_conf *ctx)
{
@@ -1913,8 +2365,8 @@ int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
return -ENOENT;
rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
- rtw89_set_channel(rtwdev);
cfg->idx = idx;
+ cfg->ref_count = 0;
hal->sub[idx].cfg = cfg;
return 0;
}
@@ -1924,47 +2376,8 @@ void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
- enum rtw89_entity_mode mode;
- struct rtw89_vif *rtwvif;
- u8 drop, roll;
-
- drop = cfg->idx;
- if (drop != RTW89_SUB_ENTITY_0)
- goto out;
- roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY, drop + 1);
-
- /* Follow rtw89_config_default_chandef() when rtw89_entity_recalc(). */
- if (roll == NUM_OF_RTW89_SUB_ENTITY)
- goto out;
-
- /* RTW89_SUB_ENTITY_0 is going to release, and another exists.
- * Make another roll down to RTW89_SUB_ENTITY_0 to replace.
- */
- hal->sub[roll].cfg->idx = RTW89_SUB_ENTITY_0;
- hal->sub[RTW89_SUB_ENTITY_0] = hal->sub[roll];
-
- rtw89_for_each_rtwvif(rtwdev, rtwvif) {
- if (rtwvif->sub_entity_idx == roll)
- rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
- }
-
- atomic_cmpxchg(&hal->roc_entity_idx, roll, RTW89_SUB_ENTITY_0);
-
- drop = roll;
-
-out:
- mode = rtw89_get_entity_mode(rtwdev);
- switch (mode) {
- case RTW89_ENTITY_MODE_MCC:
- rtw89_mcc_stop(rtwdev);
- break;
- default:
- break;
- }
-
- clear_bit(drop, hal->entity_map);
- rtw89_set_channel(rtwdev);
+ clear_bit(cfg->idx, hal->entity_map);
}
void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
@@ -1985,16 +2398,73 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
struct ieee80211_chanctx_conf *ctx)
{
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ struct rtw89_entity_weight w = {};
rtwvif->sub_entity_idx = cfg->idx;
rtwvif->chanctx_assigned = true;
- return 0;
+ cfg->ref_count++;
+
+ if (cfg->idx == RTW89_SUB_ENTITY_0)
+ goto out;
+
+ rtw89_entity_calculate_weight(rtwdev, &w);
+ if (w.active_chanctxs != 1)
+ goto out;
+
+ /* put the first active chanctx at RTW89_SUB_ENTITY_0 */
+ rtw89_swap_sub_entity(rtwdev, cfg->idx, RTW89_SUB_ENTITY_0);
+
+out:
+ return rtw89_set_channel(rtwdev);
}
void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct ieee80211_chanctx_conf *ctx)
{
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_entity_weight w = {};
+ enum rtw89_sub_entity_idx roll;
+ enum rtw89_entity_mode cur;
+
rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
rtwvif->chanctx_assigned = false;
+ cfg->ref_count--;
+
+ if (cfg->ref_count != 0)
+ goto out;
+
+ if (cfg->idx != RTW89_SUB_ENTITY_0)
+ goto out;
+
+ roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY,
+ cfg->idx + 1);
+ /* Follow rtw89_config_default_chandef() when rtw89_entity_recalc(). */
+ if (roll == NUM_OF_RTW89_SUB_ENTITY)
+ goto out;
+
+ /* RTW89_SUB_ENTITY_0 is going to release, and another exists.
+ * Make another roll down to RTW89_SUB_ENTITY_0 to replace.
+ */
+ rtw89_swap_sub_entity(rtwdev, cfg->idx, roll);
+
+out:
+ rtw89_entity_calculate_weight(rtwdev, &w);
+
+ cur = rtw89_get_entity_mode(rtwdev);
+ switch (cur) {
+ case RTW89_ENTITY_MODE_MCC:
+ /* If still multi-roles, re-plan MCC for chanctx changes.
+ * Otherwise, just stop MCC.
+ */
+ rtw89_mcc_stop(rtwdev);
+ if (w.active_roles == NUM_OF_RTW89_MCC_ROLES)
+ rtw89_mcc_start(rtwdev);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_set_channel(rtwdev);
}
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index 9b98d8f4ee9d..ffa412f281f3 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -38,6 +38,11 @@ enum rtw89_chanctx_pause_reasons {
RTW89_CHANCTX_PAUSE_REASON_ROC,
};
+struct rtw89_entity_weight {
+ unsigned int active_chanctxs;
+ unsigned int active_roles;
+};
+
static inline bool rtw89_get_entity_state(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index f37afb4cbb63..d9b66d43f32e 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -129,68 +129,75 @@ static const u32 cxtbl[] = {
static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
/* firmware version must be in decreasing order for each chip */
+ {RTL8922A, RTW89_FW_VER_CODE(0, 35, 8, 0),
+ .fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
+ .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
+ .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
+ .fwlrole = 2, .frptmap = 7, .fcxctrl = 7, .fcxinit = 7,
+ .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6,
+ },
{RTL8851B, RTW89_FW_VER_CODE(0, 29, 29, 0),
.fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 2, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1800, .max_role_num = 6,
+ .fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 57, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 42, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 2, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 0, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 2, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 29, 29, 0),
.fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 2, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1800, .max_role_num = 6,
+ .fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 29, 14, 0),
.fcxbtcrpt = 5, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 4,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1800, .max_role_num = 6,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 27, 0, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 1, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 1, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852A, RTW89_FW_VER_CODE(0, 13, 37, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852A, RTW89_FW_VER_CODE(0, 13, 0, 0),
.fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2,
.fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
- .fwlrole = 0, .frptmap = 0, .fcxctrl = 0,
- .info_buf = 1024, .max_role_num = 5,
+ .fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
},
/* keep it to be the last as default entry */
@@ -198,8 +205,8 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2,
.fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
- .fwlrole = 0, .frptmap = 0, .fcxctrl = 0,
- .info_buf = 1024, .max_role_num = 5,
+ .fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
},
};
@@ -351,17 +358,26 @@ enum btc_cx_poicy_type {
/* TDMA off + pri: WL_Rx = BT, BT_HI > WL_Tx > BT_Lo */
BTC_CXP_OFF_EQ3 = (BTC_CXP_OFF << 8) | 5,
+ /* TDMA off + pri: WL_Rx = BT, BT_HI > WL_Tx > BT_Lo */
+ BTC_CXP_OFF_EQ4 = (BTC_CXP_OFF << 8) | 6,
+
+ /* TDMA off + pri: WL_Rx = BT, BT_HI > WL_Tx > BT_Lo */
+ BTC_CXP_OFF_EQ5 = (BTC_CXP_OFF << 8) | 7,
+
/* TDMA off + pri: BT_Hi > WL > BT_Lo */
- BTC_CXP_OFF_BWB0 = (BTC_CXP_OFF << 8) | 6,
+ BTC_CXP_OFF_BWB0 = (BTC_CXP_OFF << 8) | 8,
/* TDMA off + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo */
- BTC_CXP_OFF_BWB1 = (BTC_CXP_OFF << 8) | 7,
+ BTC_CXP_OFF_BWB1 = (BTC_CXP_OFF << 8) | 9,
/* TDMA off + pri: WL_Hi-Tx > BT, BT_Hi > other-WL > BT_Lo */
- BTC_CXP_OFF_BWB2 = (BTC_CXP_OFF << 8) | 8,
+ BTC_CXP_OFF_BWB2 = (BTC_CXP_OFF << 8) | 10,
/* TDMA off + pri: WL_Hi-Tx = BT */
- BTC_CXP_OFF_BWB3 = (BTC_CXP_OFF << 8) | 9,
+ BTC_CXP_OFF_BWB3 = (BTC_CXP_OFF << 8) | 11,
+
+ /* TDMA off + pri: WL > BT, Block-BT*/
+ BTC_CXP_OFF_WL2 = (BTC_CXP_OFF << 8) | 12,
/* TDMA off+Bcn-Protect + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo*/
BTC_CXP_OFFB_BWB0 = (BTC_CXP_OFFB << 8) | 0,
@@ -676,20 +692,25 @@ static void _run_coex(struct rtw89_dev *rtwdev,
static void _write_scbd(struct rtw89_dev *rtwdev, u32 val, bool state);
static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update);
-static void _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
- void *param, u16 len)
+static int _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
+ void *param, u16 len)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_dm *dm = &btc->dm;
int ret;
- if (!wl->status.map.init_ok) {
+ if (len > BTC_H2C_MAXLEN || len == 0) {
+ btc->fwinfo.cnt_h2c_fail++;
+ dm->error.map.h2c_buffer_over = true;
+ return -EINVAL;
+ } else if (!wl->status.map.init_ok) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): return by btc not init!!\n", __func__);
pfwinfo->cnt_h2c_fail++;
- return;
+ return -EINVAL;
} else if ((wl->status.map.rf_off_pre == BTC_LPS_RF_OFF &&
wl->status.map.rf_off == BTC_LPS_RF_OFF) ||
(wl->status.map.lps_pre == BTC_LPS_RF_OFF &&
@@ -697,20 +718,23 @@ static void _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): return by wl off!!\n", __func__);
pfwinfo->cnt_h2c_fail++;
- return;
+ return -EINVAL;
}
- pfwinfo->cnt_h2c++;
-
ret = rtw89_fw_h2c_raw_with_hdr(rtwdev, h2c_class, h2c_func, param, len,
false, true);
- if (ret != 0)
+ if (ret)
pfwinfo->cnt_h2c_fail++;
+ else
+ pfwinfo->cnt_h2c++;
+
+ return ret;
}
static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
@@ -728,7 +752,9 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
if (type & BTC_RESET_CTRL) {
memset(&btc->ctrl, 0, sizeof(btc->ctrl));
- btc->ctrl.trace_step = FCXDEF_STEP;
+ btc->manual_ctrl = false;
+ if (ver->fcxctrl != 7)
+ btc->ctrl.ctrl.trace_step = FCXDEF_STEP;
}
/* Init Coex variables that are not zero */
@@ -777,22 +803,27 @@ static void _get_reg_status(struct rtw89_dev *rtwdev, u8 type, u8 *val)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
- struct rtw89_btc_module *md = &btc->mdinfo;
+ union rtw89_btc_module_info *md = &btc->mdinfo;
union rtw89_btc_fbtc_mreg_val *pmreg;
u32 pre_agc_addr = R_BTC_BB_PRE_AGC_S1;
u32 reg_val;
- u8 idx;
+ u8 idx, switch_type;
- if (md->ant.btg_pos == RF_PATH_A)
+ if (ver->fcxinit == 7)
+ switch_type = md->md_v7.switch_type;
+ else
+ switch_type = md->md.switch_type;
+
+ if (btc->btg_pos == RF_PATH_A)
pre_agc_addr = R_BTC_BB_PRE_AGC_S0;
switch (type) {
case BTC_CSTATUS_TXDIV_POS:
- if (md->switch_type == BTC_SWITCH_INTERNAL)
+ if (switch_type == BTC_SWITCH_INTERNAL)
*val = BTC_ANT_DIV_MAIN;
break;
case BTC_CSTATUS_RXDIV_POS:
- if (md->switch_type == BTC_SWITCH_INTERNAL)
+ if (switch_type == BTC_SWITCH_INTERNAL)
*val = BTC_ANT_DIV_MAIN;
break;
case BTC_CSTATUS_BB_GNT_MUX:
@@ -1117,7 +1148,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
void *rpt_content = NULL, *pfinfo = NULL;
u8 rpt_type = 0;
u16 wl_slot_set = 0, wl_slot_real = 0;
- u32 trace_step = btc->ctrl.trace_step, rpt_len = 0, diff_t = 0;
+ u32 trace_step = 0, rpt_len = 0, diff_t = 0;
u32 cnt_leak_slot, bt_slot_real, bt_slot_set, cnt_rx_imr;
u8 i, val = 0;
@@ -1207,6 +1238,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
break;
case BTC_RPT_TYPE_STEP:
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
+ if (ver->fcxctrl != 7)
+ trace_step = btc->ctrl.ctrl.trace_step;
+
if (ver->fcxstep == 2) {
pfinfo = &pfwinfo->rpt_fbtc_step.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.v2.step[0]) *
@@ -1920,6 +1954,7 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
struct rtw89_btc_btf_fwinfo *fwinfo = &btc->fwinfo;
struct rtw89_btc_btf_set_report r = {0};
u32 val, bit_map;
+ int ret;
if ((wl_smap->rf_off || wl_smap->lps != BTC_LPS_OFF) && rpt_state != 0)
return;
@@ -1938,13 +1973,13 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
if (val == fwinfo->rpt_en_map)
return;
- fwinfo->rpt_en_map = val;
-
r.fver = BTF_SET_REPORT_VER;
r.enable = cpu_to_le32(val);
r.para = cpu_to_le32(rpt_state);
- _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r, sizeof(r));
+ ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r, sizeof(r));
+ if (!ret)
+ fwinfo->rpt_en_map = val;
}
static void rtw89_btc_fw_set_slots(struct rtw89_dev *rtwdev, u8 num,
@@ -2032,6 +2067,7 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
+ int ret;
dm->run_action = action;
@@ -2060,11 +2096,12 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
if (btc->lps == 1)
rtw89_set_coex_ctrl_lps(rtwdev, btc->lps);
- _send_fw_cmd(rtwdev, BTFC_SET, SET_CX_POLICY,
- btc->policy, btc->policy_len);
-
- memcpy(&dm->tdma_now, &dm->tdma, sizeof(dm->tdma_now));
- memcpy(&dm->slot_now, &dm->slot, sizeof(dm->slot_now));
+ ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_CX_POLICY,
+ btc->policy, btc->policy_len);
+ if (!ret) {
+ memcpy(&dm->tdma_now, &dm->tdma, sizeof(dm->tdma_now));
+ memcpy(&dm->slot_now, &dm->slot, sizeof(dm->slot_now));
+ }
if (btc->update_policy_force)
btc->update_policy_force = false;
@@ -2083,20 +2120,32 @@ static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
switch (type) {
case CXDRVINFO_INIT:
- rtw89_fw_h2c_cxdrv_init(rtwdev);
+ if (ver->fcxinit == 7)
+ rtw89_fw_h2c_cxdrv_init_v7(rtwdev, type);
+ else
+ rtw89_fw_h2c_cxdrv_init(rtwdev, type);
break;
case CXDRVINFO_ROLE:
if (ver->fwlrole == 0)
- rtw89_fw_h2c_cxdrv_role(rtwdev);
+ rtw89_fw_h2c_cxdrv_role(rtwdev, type);
else if (ver->fwlrole == 1)
- rtw89_fw_h2c_cxdrv_role_v1(rtwdev);
+ rtw89_fw_h2c_cxdrv_role_v1(rtwdev, type);
else if (ver->fwlrole == 2)
- rtw89_fw_h2c_cxdrv_role_v2(rtwdev);
+ rtw89_fw_h2c_cxdrv_role_v2(rtwdev, type);
break;
case CXDRVINFO_CTRL:
- rtw89_fw_h2c_cxdrv_ctrl(rtwdev);
+ if (ver->drvinfo_type == 1)
+ type = 2;
+
+ if (ver->fcxctrl == 7)
+ rtw89_fw_h2c_cxdrv_ctrl_v7(rtwdev, type);
+ else
+ rtw89_fw_h2c_cxdrv_ctrl(rtwdev, type);
break;
case CXDRVINFO_TRX:
+ if (ver->drvinfo_type == 1)
+ type = 3;
+
dm->trx_info.tx_power = u32_get_bits(rf_para.wl_tx_power,
RTW89_BTC_WL_DEF_TX_PWR);
dm->trx_info.rx_gain = u32_get_bits(rf_para.wl_rx_gain,
@@ -2107,11 +2156,18 @@ static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
RTW89_BTC_WL_DEF_TX_PWR);
dm->trx_info.cn = wl->cn_report;
dm->trx_info.nhm = wl->nhm.pwr;
- rtw89_fw_h2c_cxdrv_trx(rtwdev);
+ rtw89_fw_h2c_cxdrv_trx(rtwdev, type);
break;
case CXDRVINFO_RFK:
- rtw89_fw_h2c_cxdrv_rfk(rtwdev);
+ if (ver->drvinfo_type == 1)
+ return;
+
+ rtw89_fw_h2c_cxdrv_rfk(rtwdev, type);
break;
+ case CXDRVINFO_TXPWR:
+ case CXDRVINFO_FDDT:
+ case CXDRVINFO_MLO:
+ case CXDRVINFO_OSI:
default:
break;
}
@@ -2261,20 +2317,25 @@ static void _set_bt_tx_power(struct rtw89_dev *rtwdev, u8 level)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ int ret;
u8 buf;
- if (bt->rf_para.tx_pwr_freerun == level)
+ if (btc->cx.cnt_bt[BTC_BCNT_INFOUPDATE] == 0)
return;
- bt->rf_para.tx_pwr_freerun = level;
- btc->dm.rf_trx_para.bt_tx_power = level;
+ if (bt->rf_para.tx_pwr_freerun == level)
+ return;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): level = %d\n",
__func__, level);
buf = (s8)(-level);
- _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_TX_PWR, &buf, 1);
+ ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_TX_PWR, &buf, 1);
+ if (!ret) {
+ bt->rf_para.tx_pwr_freerun = level;
+ btc->dm.rf_trx_para.bt_tx_power = level;
+ }
}
#define BTC_BT_RX_NORMAL_LVL 7
@@ -2284,6 +2345,9 @@ static void _set_bt_rx_gain(struct rtw89_dev *rtwdev, u8 level)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ if (btc->cx.cnt_bt[BTC_BCNT_INFOUPDATE] == 0)
+ return;
+
if ((bt->rf_para.rx_gain_freerun == level ||
level > BTC_BT_RX_NORMAL_LVL) &&
(!rtwdev->chip->scbd || bt->lna_constrain == level))
@@ -2333,7 +2397,7 @@ static void _set_rf_trx_para(struct rtw89_dev *rtwdev)
}
/* decide trx_para_level */
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
/* fix LNA2 + TIA gain not change by GNT_BT */
if ((btc->dm.wl_btg_rx && b->profile_cnt.now != 0) ||
dm->bt_only == 1)
@@ -2435,7 +2499,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
u8 en = 0, i, ch = 0, bw = 0;
u8 mode, connect_cnt;
- if (btc->ctrl.manual || wl->status.map.scan)
+ if (btc->manual_ctrl || wl->status.map.scan)
return;
if (ver->fwlrole == 0) {
@@ -2560,8 +2624,16 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc;
+ union rtw89_btc_module_info *md = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = btc->ver;
+ u8 isolation;
+
+ if (ver->fcxinit == 7)
+ isolation = md->md_v7.ant.isolation;
+ else
+ isolation = md->md.ant.isolation;
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
btc->dm.trx_para_level = 0;
return false;
}
@@ -2584,7 +2656,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
}
/* TODO get isolation by BT psd */
- if (btc->mdinfo.ant.isolation >= BTC_FREERUN_ANTISO_MIN) {
+ if (isolation >= BTC_FREERUN_ANTISO_MIN) {
btc->dm.trx_para_level = 5;
return true;
}
@@ -2712,7 +2784,7 @@ void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type)
u8 type;
u32 tbl_w1, tbl_b1, tbl_b4;
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.wl.status.map._4way)
tbl_w1 = cxtbl[1];
else
@@ -3023,12 +3095,13 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &btc->cx.wl.role_info_v1;
struct rtw89_btc_bt_hid_desc *hid = &btc->cx.bt.link_info.hid_desc;
struct rtw89_btc_bt_hfp_desc *hfp = &btc->cx.bt.link_info.hfp_desc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
u8 type, null_role;
u32 tbl_w1, tbl_b1, tbl_b4;
type = FIELD_GET(BTC_CXP_MASK, policy_type);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.wl.status.map._4way)
tbl_w1 = cxtbl[1];
else if (hid->exist && hid->type == BTC_HID_218)
@@ -3048,9 +3121,16 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
tbl_b4 = cxtbl[2];
}
} else {
- tbl_w1 = cxtbl[16];
tbl_b1 = cxtbl[17];
tbl_b4 = cxtbl[17];
+
+ if (wl->bg_mode)
+ tbl_w1 = cxtbl[8];
+ else if ((wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) &&
+ hid->exist)
+ tbl_w1 = cxtbl[19];
+ else
+ tbl_w1 = cxtbl[16];
}
btc->bt_req_en = false;
@@ -3615,7 +3695,7 @@ static void _action_bt_idle(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
switch (btc->cx.state_map) {
case BTC_WBUSY_BNOSCAN: /*wl-busy + bt idle*/
case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-idle */
@@ -3654,7 +3734,7 @@ static void _action_bt_hfp(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.wl.status.map._4way) {
_set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_BT_HFP);
} else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
@@ -3664,7 +3744,12 @@ static void _action_bt_hfp(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_OFF_BWB1, BTC_ACT_BT_HFP);
}
} else {
- _set_policy(rtwdev, BTC_CXP_OFF_EQ2, BTC_ACT_BT_HFP);
+ if (wl->bg_mode)
+ _set_policy(rtwdev, BTC_CXP_OFF_BWB1, BTC_ACT_BT_HFP);
+ else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL))
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ5, BTC_ACT_BT_HFP);
+ else
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ2, BTC_ACT_BT_HFP);
}
}
@@ -3679,7 +3764,7 @@ static void _action_bt_hid(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (wl->status.map._4way) {
policy_type = BTC_CXP_OFF_WL;
} else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
@@ -3697,7 +3782,12 @@ static void _action_bt_hid(struct rtw89_dev *rtwdev)
policy_type = BTC_CXP_OFF_BWB1;
}
} else { /* dedicated-antenna */
- policy_type = BTC_CXP_OFF_EQ3;
+ if (wl->bg_mode)
+ policy_type = BTC_CXP_OFF_BWB1;
+ else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL))
+ policy_type = BTC_CXP_OFF_EQ4;
+ else
+ policy_type = BTC_CXP_OFF_EQ3;
}
_set_policy(rtwdev, policy_type, BTC_ACT_BT_HID);
@@ -3947,7 +4037,7 @@ static void _action_wl_other(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED)
+ if (btc->ant_type == BTC_ANT_SHARED)
_set_policy(rtwdev, BTC_CXP_OFFB_BWB0, BTC_ACT_WL_OTHER);
else
_set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_OTHER);
@@ -3991,7 +4081,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
u32 is_btg;
u8 i, val;
- if (btc->ctrl.manual)
+ if (btc->manual_ctrl)
return;
if (ver->fwlrole == 0)
@@ -4063,7 +4153,7 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc_dm *dm = &btc->dm;
u8 is_preagc, val;
- if (btc->ctrl.manual)
+ if (btc->manual_ctrl)
return;
if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC)
@@ -4083,7 +4173,7 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
else if (ver->fwlrole == 2 && wl_rinfo->dbcc_en &&
wl_rinfo->dbcc_2g_phy != RTW89_PHY_1)
is_preagc = BTC_PREAGC_DISABLE;
- else if (btc->mdinfo.ant.type == BTC_ANT_SHARED)
+ else if (btc->ant_type == BTC_ANT_SHARED)
is_preagc = BTC_PREAGC_DISABLE;
else
is_preagc = BTC_PREAGC_ENABLE;
@@ -4187,13 +4277,12 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
struct rtw89_txtime_data data = {.rtwdev = rtwdev};
- u8 mode;
- u8 tx_retry;
+ u8 mode, igno_bt, tx_retry;
u32 tx_time;
u16 enable;
bool reenable = false;
- if (btc->ctrl.manual)
+ if (btc->manual_ctrl)
return;
if (ver->fwlrole == 0)
@@ -4205,7 +4294,12 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
else
return;
- if (btc->dm.freerun || btc->ctrl.igno_bt || b->profile_cnt.now == 0 ||
+ if (ver->fcxctrl == 7)
+ igno_bt = btc->ctrl.ctrl_v7.igno_bt;
+ else
+ igno_bt = btc->ctrl.ctrl.igno_bt;
+
+ if (btc->dm.freerun || igno_bt || b->profile_cnt.now == 0 ||
mode == BTC_WLINK_5G || mode == BTC_WLINK_NOLINK) {
enable = 0;
tx_time = BTC_MAX_TX_TIME_DEF;
@@ -4402,7 +4496,7 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev)
if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED)
+ if (btc->ant_type == BTC_ANT_SHARED)
_set_policy(rtwdev, BTC_CXP_OFFE_DEF,
BTC_RSN_NTFY_SCAN_START);
else
@@ -4430,7 +4524,7 @@ static void _action_wl_25g_mcc(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
BTC_ACT_WL_25G_MCC);
@@ -4447,7 +4541,7 @@ static void _action_wl_2g_mcc(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
BTC_ACT_WL_2G_MCC);
@@ -4465,7 +4559,7 @@ static void _action_wl_2g_scc(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev,
BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_SCC);
@@ -4487,7 +4581,7 @@ static void _action_wl_2g_scc_v1(struct rtw89_dev *rtwdev)
u16 policy_type = BTC_CXP_OFF_BT;
u32 dur;
- if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED) {
+ if (btc->ant_type == BTC_ANT_DEDICATED) {
policy_type = BTC_CXP_OFF_EQ0;
} else {
/* shared-antenna */
@@ -4549,7 +4643,7 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev)
u16 policy_type = BTC_CXP_OFF_BT;
u32 dur;
- if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED) {
+ if (btc->ant_type == BTC_ANT_DEDICATED) {
policy_type = BTC_CXP_OFF_EQ0;
} else {
/* shared-antenna */
@@ -4607,7 +4701,7 @@ static void _action_wl_2g_ap(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
BTC_ACT_WL_2G_AP);
@@ -4624,7 +4718,7 @@ static void _action_wl_2g_go(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev,
BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_GO);
@@ -4642,7 +4736,7 @@ static void _action_wl_2g_gc(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
_action_by_bt(rtwdev);
} else {/* dedicated-antenna */
_set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_GC);
@@ -4655,7 +4749,7 @@ static void _action_wl_2g_nan(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev,
BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_NAN);
@@ -5351,7 +5445,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
- u8 mode;
+ u8 mode, igno_bt, always_freerun;
lockdep_assert_held(&rtwdev->mutex);
@@ -5368,20 +5462,28 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
else
return;
+ if (ver->fcxctrl == 7) {
+ igno_bt = btc->ctrl.ctrl_v7.igno_bt;
+ always_freerun = btc->ctrl.ctrl_v7.always_freerun;
+ } else {
+ igno_bt = btc->ctrl.ctrl.igno_bt;
+ always_freerun = btc->ctrl.ctrl.always_freerun;
+ }
+
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): reason=%d, mode=%d\n",
__func__, reason, mode);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): wl_only=%d, bt_only=%d\n",
__func__, dm->wl_only, dm->bt_only);
/* Be careful to change the following function sequence!! */
- if (btc->ctrl.manual) {
+ if (btc->manual_ctrl) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): return for Manual CTRL!!\n",
__func__);
return;
}
- if (btc->ctrl.igno_bt &&
+ if (igno_bt &&
(reason == BTC_RSN_UPDATE_BT_INFO ||
reason == BTC_RSN_UPDATE_BT_SCBD)) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -5418,24 +5520,24 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
dm->freerun = false;
dm->cnt_dm[BTC_DCNT_RUN]++;
dm->fddt_train = BTC_FDDT_DISABLE;
- btc->ctrl.igno_bt = false;
bt->scan_rx_low_pri = false;
+ igno_bt = false;
- if (btc->ctrl.always_freerun) {
+ if (always_freerun) {
_action_freerun(rtwdev);
- btc->ctrl.igno_bt = true;
+ igno_bt = true;
goto exit;
}
if (dm->wl_only) {
_action_wl_only(rtwdev);
- btc->ctrl.igno_bt = true;
+ igno_bt = true;
goto exit;
}
if (wl->status.map.rf_off || wl->status.map.lps || dm->bt_only) {
_action_wl_off(rtwdev, mode);
- btc->ctrl.igno_bt = true;
+ igno_bt = true;
goto exit;
}
@@ -5525,6 +5627,10 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
exit:
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): exit\n", __func__);
+ if (ver->fcxctrl == 7)
+ btc->ctrl.ctrl_v7.igno_bt = igno_bt;
+ else
+ btc->ctrl.ctrl.igno_bt = igno_bt;
_action_common(rtwdev);
}
@@ -5560,16 +5666,26 @@ static void _set_init_info(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
- dm->init_info.wl_only = (u8)dm->wl_only;
- dm->init_info.bt_only = (u8)dm->bt_only;
- dm->init_info.wl_init_ok = (u8)wl->status.map.init_ok;
- dm->init_info.dbcc_en = rtwdev->dbcc_en;
- dm->init_info.cx_other = btc->cx.other.type;
- dm->init_info.wl_guard_ch = chip->afh_guard_ch;
- dm->init_info.module = btc->mdinfo;
+ if (ver->fcxinit == 7) {
+ dm->init_info.init_v7.wl_only = (u8)dm->wl_only;
+ dm->init_info.init_v7.bt_only = (u8)dm->bt_only;
+ dm->init_info.init_v7.wl_init_ok = (u8)wl->status.map.init_ok;
+ dm->init_info.init_v7.cx_other = btc->cx.other.type;
+ dm->init_info.init_v7.wl_guard_ch = chip->afh_guard_ch;
+ dm->init_info.init_v7.module = btc->mdinfo.md_v7;
+ } else {
+ dm->init_info.init.wl_only = (u8)dm->wl_only;
+ dm->init_info.init.bt_only = (u8)dm->bt_only;
+ dm->init_info.init.wl_init_ok = (u8)wl->status.map.init_ok;
+ dm->init_info.init.dbcc_en = rtwdev->dbcc_en;
+ dm->init_info.init.cx_other = btc->cx.other.type;
+ dm->init_info.init.wl_guard_ch = chip->afh_guard_ch;
+ dm->init_info.init.module = btc->mdinfo.md;
+ }
}
void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
@@ -5578,11 +5694,15 @@ void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_btc_ver *ver = btc->ver;
_reset_btc_var(rtwdev, BTC_RESET_ALL);
btc->dm.run_reason = BTC_RSN_NONE;
btc->dm.run_action = BTC_ACT_NONE;
- btc->ctrl.igno_bt = true;
+ if (ver->fcxctrl == 7)
+ btc->ctrl.ctrl_v7.igno_bt = true;
+ else
+ btc->ctrl.ctrl.igno_bt = true;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): mode=%d\n", __func__, mode);
@@ -6298,7 +6418,7 @@ static void rtw89_btc_ntfy_wl_sta_iter(void *data, struct ieee80211_sta *sta)
if (BTC_RSSI_LOW(link_info->rssi_state[i]))
rssi_map |= BIT(i);
- if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED &&
+ if (btc->ant_type == BTC_ANT_DEDICATED &&
BTC_RSSI_CHANGE(link_info->rssi_state[i]))
is_sta_change = true;
}
@@ -6489,13 +6609,16 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
u32 ver_main = 0, ver_sub = 0, ver_hotfix = 0, id_branch = 0;
+ u8 cv, rfe, iso, ant_num, ant_single_pos;
if (!(dm->coex_info_map & BTC_COEX_INFO_CX))
return;
@@ -6545,11 +6668,24 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
ver_main, ver_sub, ver_hotfix, id_branch,
bt->ver_info.fw, bt->run_patch_code ? "patch" : "ROM");
+ if (ver->fcxinit == 7) {
+ cv = md->md_v7.kt_ver;
+ rfe = md->md_v7.rfe_type;
+ iso = md->md_v7.ant.isolation;
+ ant_num = md->md_v7.ant.num;
+ ant_single_pos = md->md_v7.ant.single_pos;
+ } else {
+ cv = md->md.cv;
+ rfe = md->md.rfe_type;
+ iso = md->md.ant.isolation;
+ ant_num = md->md.ant.num;
+ ant_single_pos = md->md.ant.single_pos;
+ }
+
seq_printf(m, " %-15s : cv:%x, rfe_type:0x%x, ant_iso:%d, ant_pg:%d, %s",
- "[hw_info]", btc->mdinfo.cv, btc->mdinfo.rfe_type,
- btc->mdinfo.ant.isolation, btc->mdinfo.ant.num,
- (btc->mdinfo.ant.num > 1 ? "" : (btc->mdinfo.ant.single_pos ?
- "1Ant_Pos:S1, " : "1Ant_Pos:S0, ")));
+ "[hw_info]", cv, rfe, iso, ant_num,
+ ant_num > 1 ? "" :
+ ant_single_pos ? "1Ant_Pos:S1, " : "1Ant_Pos:S0, ");
seq_printf(m, "3rd_coex:%d, dbcc:%d, tx_num:%d, rx_num:%d\n",
btc->cx.other.type, rtwdev->dbcc_en, hal->tx_nss,
@@ -6722,20 +6858,26 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_bt_info *bt = &cx->bt;
struct rtw89_btc_wl_info *wl = &cx->wl;
- struct rtw89_btc_module *module = &btc->mdinfo;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
+ union rtw89_btc_module_info *md = &btc->mdinfo;
u8 *afh = bt_linfo->afh_map;
u8 *afh_le = bt_linfo->afh_map_le;
+ u8 bt_pos;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_BT))
return;
+ if (ver->fcxinit == 7)
+ bt_pos = md->md_v7.bt_pos;
+ else
+ bt_pos = md->md.bt_pos;
+
seq_puts(m, "========== [BT Status] ==========\n");
seq_printf(m, " %-15s : enable:%s, btg:%s%s, connect:%s, ",
"[status]", bt->enable.now ? "Y" : "N",
bt->btg_type ? "Y" : "N",
- (bt->enable.now && (bt->btg_type != module->bt_pos) ?
+ (bt->enable.now && (bt->btg_type != bt_pos) ?
"(efuse-mismatch!!)" : ""),
(bt_linfo->status.map.connect ? "Y" : "N"));
@@ -6934,10 +7076,13 @@ static const char *steps_to_str(u16 step)
CASE_BTC_POLICY_STR(OFF_EQ1);
CASE_BTC_POLICY_STR(OFF_EQ2);
CASE_BTC_POLICY_STR(OFF_EQ3);
+ CASE_BTC_POLICY_STR(OFF_EQ4);
+ CASE_BTC_POLICY_STR(OFF_EQ5);
CASE_BTC_POLICY_STR(OFF_BWB0);
CASE_BTC_POLICY_STR(OFF_BWB1);
CASE_BTC_POLICY_STR(OFF_BWB2);
CASE_BTC_POLICY_STR(OFF_BWB3);
+ CASE_BTC_POLICY_STR(OFF_WL2);
CASE_BTC_POLICY_STR(OFFB_BWB0);
CASE_BTC_POLICY_STR(OFFE_DEF);
CASE_BTC_POLICY_STR(OFFE_DEF2);
@@ -7123,21 +7268,22 @@ static void _show_dm_step(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ u8 igno_bt;
if (!(dm->coex_info_map & BTC_COEX_INFO_DM))
return;
seq_printf(m, "========== [Mechanism Status %s] ==========\n",
- (btc->ctrl.manual ? "(Manual)" : "(Auto)"));
+ (btc->manual_ctrl ? "(Manual)" : "(Auto)"));
seq_printf(m,
" %-15s : type:%s, reason:%s(), action:%s(), ant_path:%s, init_mode:%s, run_cnt:%d\n",
"[status]",
- module->ant.type == BTC_ANT_SHARED ? "shared" : "dedicated",
+ btc->ant_type == BTC_ANT_SHARED ? "shared" : "dedicated",
steps_to_str(dm->run_reason),
steps_to_str(dm->run_action | BTC_ACT_EXT_BIT),
id_to_ant(FIELD_GET(GENMASK(7, 0), dm->set_ant_path)),
@@ -7146,8 +7292,13 @@ static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_dm_step(rtwdev, m);
+ if (ver->fcxctrl == 7)
+ igno_bt = btc->ctrl.ctrl_v7.igno_bt;
+ else
+ igno_bt = btc->ctrl.ctrl.igno_bt;
+
seq_printf(m, " %-15s : wl_only:%d, bt_only:%d, igno_bt:%d, free_run:%d, wl_ps_ctrl:%d, wl_mimo_ps:%d, ",
- "[dm_flag]", dm->wl_only, dm->bt_only, btc->ctrl.igno_bt,
+ "[dm_flag]", dm->wl_only, dm->bt_only, igno_bt,
dm->freerun, btc->lps, dm->wl_mimo_ps);
seq_printf(m, "leak_ap:%d, fw_offload:%s%s\n", dm->leak_ap,
@@ -7888,10 +8039,11 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_steps_v2 *pstep = NULL;
+ const struct rtw89_btc_ver *ver = btc->ver;
u8 type, val, cnt = 0, state = 0;
bool outloop = false;
u16 i, diff_t, n_start = 0, n_stop = 0;
- u16 pos_old, pos_new;
+ u16 pos_old, pos_new, trace_step;
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
if (!pcinfo->valid)
@@ -7908,11 +8060,16 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
do {
switch (state) {
case 0:
+ if (ver->fcxctrl == 7 || ver->fcxctrl == 1)
+ trace_step = 50;
+ else
+ trace_step = btc->ctrl.ctrl.trace_step;
+
n_start = pos_old;
if (pos_new >= pos_old)
n_stop = pos_new;
else
- n_stop = btc->ctrl.trace_step - 1;
+ n_stop = trace_step - 1;
state = 1;
break;
@@ -8742,7 +8899,7 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_printf(m, "WL FW / BT FW %d.%d.%d.%d / NA\n",
fw_suit->major_ver, fw_suit->minor_ver,
fw_suit->sub_ver, fw_suit->sub_idex);
- seq_printf(m, "manual %d\n", btc->ctrl.manual);
+ seq_printf(m, "manual %d\n", btc->manual_ctrl);
seq_puts(m, "=========================================\n");
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index 46e25c6f88a6..13303830684e 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -7,6 +7,8 @@
#include "core.h"
+#define BTC_H2C_MAXLEN 2020
+
enum btc_mode {
BTC_MODE_NORMAL,
BTC_MODE_WL,
@@ -23,6 +25,7 @@ enum btc_wl_rfk_type {
BTC_WRFKT_DACK = 4,
BTC_WRFKT_RXDCK = 5,
BTC_WRFKT_TSSI = 6,
+ BTC_WRFKT_CHLK = 7,
};
#define NM_EXEC false
@@ -152,6 +155,10 @@ enum btc_lps_state {
#define BTC_REG_NOTFOUND 0xff
+#define R_BTC_ZB_COEX_TBL_0 0xE328
+#define R_BTC_ZB_COEX_TBL_1 0xE32c
+#define R_BTC_ZB_BREAK_TBL 0xE350
+
enum btc_ant_div_pos {
BTC_ANT_DIV_MAIN = 0,
BTC_ANT_DIV_AUX = 1,
@@ -180,6 +187,20 @@ enum btc_btgctrl_type {
BTC_BTGCTRL_BB_GNT_NOTFOUND,
};
+enum btc_wa_type {
+ BTC_WA_5G_HI_CH_RX = BIT(0),
+ BTC_WA_NULL_AP = BIT(1),
+ BTC_WA_HFP_ZB = BIT(2), /* HFP PTA req bit4 define issue */
+};
+
+enum btc_3cx_type {
+ BTC_3CX_NONE = 0,
+ BTC_3CX_BT2 = BIT(0),
+ BTC_3CX_ZB = BIT(1),
+ BTC_3CX_LTE = BIT(2),
+ BTC_3CX_MAX,
+};
+
void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode);
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index fd527a249996..d474b8d5df3d 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -372,7 +372,7 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
chip->ops->set_txpwr(rtwdev, chan, phy_idx);
}
-void rtw89_set_channel(struct rtw89_dev *rtwdev)
+int rtw89_set_channel(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -399,7 +399,7 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
break;
default:
WARN(1, "Invalid ent mode: %d\n", mode);
- return;
+ return -EINVAL;
}
roc_idx = atomic_read(&hal->roc_entity_idx);
@@ -426,6 +426,7 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
}
rtw89_set_entity_state(rtwdev, true);
+ return 0;
}
void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -1176,7 +1177,8 @@ static __le32 rtw89_build_txwd_info2_v1(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info4(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, 1) |
+ bool rts_en = !desc_info->is_bmc;
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, rts_en) |
FIELD_PREP(RTW89_TXWD_INFO4_HW_RTS_EN, 1);
return cpu_to_le32(dword);
@@ -1329,7 +1331,8 @@ static __le32 rtw89_build_txwd_info2_v2(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info4_v2(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(BE_TXD_INFO4_RTS_EN, 1) |
+ bool rts_en = !desc_info->is_bmc;
+ u32 dword = FIELD_PREP(BE_TXD_INFO4_RTS_EN, rts_en) |
FIELD_PREP(BE_TXD_INFO4_HW_RTS_EN, 1);
return cpu_to_le32(dword);
@@ -1866,6 +1869,17 @@ static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
ieee80211_queue_work(rtwdev->hw, &rtwdev->cancel_6ghz_probe_work);
}
+static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif *rtwvif,
+ struct ieee80211_hdr *hdr, size_t len)
+{
+ struct ieee80211_mgmt *mgmt = (typeof(mgmt))hdr;
+
+ if (len < offsetof(typeof(*mgmt), u.beacon.variable))
+ return;
+
+ WRITE_ONCE(rtwvif->sync_bcn_tsf, le64_to_cpu(mgmt->u.beacon.timestamp));
+}
+
static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -1896,8 +1910,10 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
return;
if (ieee80211_is_beacon(hdr->frame_control)) {
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ rtw89_vif_sync_bcn_tsf(rtwvif, hdr, skb->len);
rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu);
+ }
pkt_stat->beacon_nr++;
}
@@ -3345,6 +3361,14 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
return ret;
}
+ ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif, rtwsta);
+ if (ret)
+ return ret;
+
+ ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif, rtwsta);
+ if (ret)
+ return ret;
+
rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE);
}
@@ -3393,7 +3417,7 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, true);
}
- ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+ ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
return ret;
@@ -3442,7 +3466,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
}
}
- ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+ ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
return ret;
@@ -3485,6 +3509,8 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
rtw89_warn(rtwdev, "failed to send h2c general packet\n");
return ret;
}
+
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
}
return ret;
@@ -3611,7 +3637,8 @@ static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
cpu_to_le16(867), cpu_to_le16(1733), cpu_to_le16(2600), cpu_to_le16(3467),
};
const struct rtw89_chip_info *chip = rtwdev->chip;
- const __le16 *highest = chip->support_bw160 ? highest_bw160 : highest_bw80;
+ const __le16 *highest = chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160) ?
+ highest_bw160 : highest_bw80;
struct rtw89_hal *hal = &rtwdev->hal;
u16 tx_mcs_map = 0, rx_mcs_map = 0;
u8 sts_cap = 3;
@@ -3640,34 +3667,34 @@ static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
- if (chip->support_bw160)
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
IEEE80211_VHT_CAP_SHORT_GI_160;
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map);
vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1];
vht_cap->vht_mcs.tx_highest = highest[hal->tx_nss - 1];
-}
-#define RTW89_SBAND_IFTYPES_NR 2
+ if (ieee80211_hw_check(rtwdev->hw, SUPPORTS_VHT_EXT_NSS_BW))
+ vht_cap->vht_mcs.tx_highest |=
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
+}
static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
enum nl80211_band band,
- struct ieee80211_supported_band *sband)
+ enum nl80211_iftype iftype,
+ struct ieee80211_sband_iftype_data *iftype_data)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_hal *hal = &rtwdev->hal;
- struct ieee80211_sband_iftype_data *iftype_data;
bool no_ng16 = (chip->chip_id == RTL8852A && hal->cv == CHIP_CBV) ||
(chip->chip_id == RTL8852B && hal->cv == CHIP_CAV);
+ struct ieee80211_sta_he_cap *he_cap;
+ int nss = hal->rx_nss;
+ u8 *mac_cap_info;
+ u8 *phy_cap_info;
u16 mcs_map = 0;
int i;
- int nss = hal->rx_nss;
- int idx = 0;
-
- iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL);
- if (!iftype_data)
- return;
for (i = 0; i < 8; i++) {
if (i < nss)
@@ -3676,12 +3703,196 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
mcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
}
- for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
- struct ieee80211_sta_he_cap *he_cap;
- u8 *mac_cap_info;
- u8 *phy_cap_info;
+ he_cap = &iftype_data->he_cap;
+ mac_cap_info = he_cap->he_cap_elem.mac_cap_info;
+ phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
+
+ he_cap->has_he = true;
+ mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
+ if (iftype == NL80211_IFTYPE_STATION)
+ mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
+ mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
+ IEEE80211_HE_MAC_CAP2_BSR;
+ mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2;
+ if (iftype == NL80211_IFTYPE_AP)
+ mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL;
+ mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS |
+ IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
+ if (iftype == NL80211_IFTYPE_STATION)
+ mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
+ if (band == NL80211_BAND_2GHZ) {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ } else {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ }
+ phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
+ phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_DOPPLER_TX;
+ phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM;
+ if (iftype == NL80211_IFTYPE_STATION)
+ phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2;
+ if (iftype == NL80211_IFTYPE_AP)
+ phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
+ phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ phy_cap_info[5] = no_ng16 ? 0 :
+ IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
+ IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
+ phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE;
+ phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP7_MAX_NC_1;
+ phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
+ u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ if (iftype == NL80211_IFTYPE_STATION)
+ phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
+ he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160)) {
+ he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
+ }
+
+ if (band == NL80211_BAND_6GHZ) {
+ __le16 capa;
- switch (i) {
+ capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+ iftype_data->he_6ghz_capa.capa = capa;
+ }
+}
+
+static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev,
+ enum nl80211_band band,
+ enum nl80211_iftype iftype,
+ struct ieee80211_sband_iftype_data *iftype_data)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct ieee80211_eht_cap_elem_fixed *eht_cap_elem;
+ struct ieee80211_eht_mcs_nss_supp *eht_nss;
+ struct ieee80211_sta_eht_cap *eht_cap;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool support_320mhz = false;
+ int sts = 8;
+ u8 val;
+
+ if (chip->chip_gen == RTW89_CHIP_AX)
+ return;
+
+ if (band == NL80211_BAND_6GHZ &&
+ chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_320))
+ support_320mhz = true;
+
+ eht_cap = &iftype_data->eht_cap;
+ eht_cap_elem = &eht_cap->eht_cap_elem;
+ eht_nss = &eht_cap->eht_mcs_nss_supp;
+
+ eht_cap->has_eht = true;
+
+ eht_cap_elem->mac_cap_info[0] =
+ u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991,
+ IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
+ eht_cap_elem->mac_cap_info[1] = 0;
+
+ eht_cap_elem->phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
+ if (support_320mhz)
+ eht_cap_elem->phy_cap_info[0] |=
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+
+ eht_cap_elem->phy_cap_info[0] |=
+ u8_encode_bits(u8_get_bits(sts - 1, BIT(0)),
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
+ eht_cap_elem->phy_cap_info[1] =
+ u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)),
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
+ if (support_320mhz)
+ eht_cap_elem->phy_cap_info[1] |=
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] = 0;
+
+ eht_cap_elem->phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK;
+
+ eht_cap_elem->phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ u8_encode_bits(1, IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
+
+ eht_cap_elem->phy_cap_info[5] =
+ u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+
+ eht_cap_elem->phy_cap_info[6] = 0;
+ eht_cap_elem->phy_cap_info[7] = 0;
+ eht_cap_elem->phy_cap_info[8] = 0;
+
+ val = u8_encode_bits(hal->rx_nss, IEEE80211_EHT_MCS_NSS_RX) |
+ u8_encode_bits(hal->tx_nss, IEEE80211_EHT_MCS_NSS_TX);
+ eht_nss->bw._80.rx_tx_mcs9_max_nss = val;
+ eht_nss->bw._80.rx_tx_mcs11_max_nss = val;
+ eht_nss->bw._80.rx_tx_mcs13_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs9_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs11_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs13_max_nss = val;
+ if (support_320mhz) {
+ eht_nss->bw._320.rx_tx_mcs9_max_nss = val;
+ eht_nss->bw._320.rx_tx_mcs11_max_nss = val;
+ eht_nss->bw._320.rx_tx_mcs13_max_nss = val;
+ }
+}
+
+#define RTW89_SBAND_IFTYPES_NR 2
+
+static void rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev,
+ enum nl80211_band band,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_sband_iftype_data *iftype_data;
+ enum nl80211_iftype iftype;
+ int idx = 0;
+
+ iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL);
+ if (!iftype_data)
+ return;
+
+ for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
+ switch (iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
break;
@@ -3694,92 +3905,10 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
break;
}
- iftype_data[idx].types_mask = BIT(i);
- he_cap = &iftype_data[idx].he_cap;
- mac_cap_info = he_cap->he_cap_elem.mac_cap_info;
- phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
-
- he_cap->has_he = true;
- mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
- if (i == NL80211_IFTYPE_STATION)
- mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
- mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
- IEEE80211_HE_MAC_CAP2_BSR;
- mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2;
- if (i == NL80211_IFTYPE_AP)
- mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL;
- mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS |
- IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
- if (i == NL80211_IFTYPE_STATION)
- mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
- if (band == NL80211_BAND_2GHZ) {
- phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
- } else {
- phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
- if (chip->support_bw160)
- phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
- }
- phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
- IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
- IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
- phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
- IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_DOPPLER_TX;
- phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM;
- if (i == NL80211_IFTYPE_STATION)
- phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2;
- if (i == NL80211_IFTYPE_AP)
- phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
- phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
- if (chip->support_bw160)
- phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
- phy_cap_info[5] = no_ng16 ? 0 :
- IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
- IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
- phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
- IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
- IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
- IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE;
- phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
- IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP7_MAX_NC_1;
- phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
- if (chip->support_bw160)
- phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
- phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
- IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
- u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
- if (i == NL80211_IFTYPE_STATION)
- phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
- he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
- he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
- if (chip->support_bw160) {
- he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
- he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
- }
-
- if (band == NL80211_BAND_6GHZ) {
- __le16 capa;
+ iftype_data[idx].types_mask = BIT(iftype);
- capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
- IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
- le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
- IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
- le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
- IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
- iftype_data[idx].he_6ghz_capa.capa = capa;
- }
+ rtw89_init_he_cap(rtwdev, band, iftype, &iftype_data[idx]);
+ rtw89_init_eht_cap(rtwdev, band, iftype, &iftype_data[idx]);
idx++;
}
@@ -3800,7 +3929,7 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
if (!sband_2ghz)
goto err;
rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
+ rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
}
@@ -3810,7 +3939,7 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
goto err;
rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
+ rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
}
@@ -3818,7 +3947,7 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
sband_6ghz = kmemdup(&rtw89_sband_6ghz, size, GFP_KERNEL);
if (!sband_6ghz)
goto err;
- rtw89_init_he_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
+ rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
hw->wiphy->bands[NL80211_BAND_6GHZ] = sband_6ghz;
}
@@ -3879,7 +4008,7 @@ void rtw89_core_update_beacon_work(struct work_struct *work)
rtwdev = rtwvif->rtwdev;
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif);
mutex_unlock(&rtwdev->mutex);
}
@@ -3944,7 +4073,6 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
- rtwdev->mac.qta_mode = RTW89_QTA_SCC;
ret = rtw89_mac_init(rtwdev);
if (ret) {
rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret);
@@ -3961,6 +4089,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
return ret;
rtw89_phy_init_bb_reg(rtwdev);
+ rtw89_chip_bb_postinit(rtwdev);
rtw89_phy_init_rf_reg(rtwdev, false);
rtw89_btc_ntfy_init(rtwdev, BTC_MODE_NORMAL);
@@ -3983,6 +4112,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
+ rtw89_chip_rfk_init_late(rtwdev);
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.log.enable);
rtw89_fw_h2c_init_ba_cam(rtwdev);
@@ -4078,6 +4208,15 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_traffic_stats_init(rtwdev, &rtwdev->stats);
rtwdev->hal.rx_fltr = DEFAULT_AX_RX_FLTR;
+ rtwdev->dbcc_en = false;
+ rtwdev->mlo_dbcc_mode = MLO_DBCC_NOT_SUPPORT;
+ rtwdev->mac.qta_mode = RTW89_QTA_SCC;
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ rtwdev->dbcc_en = true;
+ rtwdev->mac.qta_mode = RTW89_QTA_DBCC;
+ rtwdev->mlo_dbcc_mode = MLO_2_PLUS_0_1RF;
+ }
INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
INIT_WORK(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work);
@@ -4085,6 +4224,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_WORK(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work);
init_completion(&rtwdev->fw.req.completion);
+ init_completion(&rtwdev->rfk_wait.completion);
schedule_work(&rtwdev->load_firmware_work);
@@ -4290,6 +4430,7 @@ EXPORT_SYMBOL(rtw89_chip_info_setup);
static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw89_hal *hal = &rtwdev->hal;
@@ -4324,8 +4465,8 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
- /* ref: description of rtw89_mcc_get_tbtt_ofst() in chan.c */
- ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
ieee80211_hw_set(hw, CONNECTION_MONITOR);
@@ -4362,6 +4503,8 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->wiphy->max_remain_on_channel_duration = 1000;
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
ret = rtw89_core_set_supported_band(rtwdev);
if (ret) {
@@ -4453,9 +4596,10 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &early_fw);
if (no_chanctx) {
- ops->add_chanctx = NULL;
- ops->remove_chanctx = NULL;
- ops->change_chanctx = NULL;
+ ops->add_chanctx = ieee80211_emulate_add_chanctx;
+ ops->remove_chanctx = ieee80211_emulate_remove_chanctx;
+ ops->change_chanctx = ieee80211_emulate_change_chanctx;
+ ops->switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx;
ops->assign_vif_chanctx = NULL;
ops->unassign_vif_chanctx = NULL;
ops->remain_on_channel = NULL;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index ea6df859ba15..2e854c9af709 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -17,6 +17,7 @@ struct rtw89_pci_info;
struct rtw89_mac_gen_def;
struct rtw89_phy_gen_def;
struct rtw89_efuse_block_cfg;
+struct rtw89_h2c_rf_tssi;
struct rtw89_fw_txpwr_track_cfg;
struct rtw89_phy_rfk_log_fmt;
@@ -32,6 +33,7 @@ extern const struct ieee80211_ops rtw89_ops;
#define MASKDWORD 0xffffffff
#define RFREG_MASK 0xfffff
#define INV_RF_DATA 0xffffffff
+#define BYPASS_CR_DATA 0xbabecafe
#define RTW89_TRACK_WORK_PERIOD round_jiffies_relative(HZ * 2)
#define RTW89_FORBID_BA_TIMER round_jiffies_relative(HZ * 4)
@@ -878,7 +880,7 @@ enum rtw89_ps_mode {
#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
#define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
#define RTW89_BYR_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
-#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
+#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
enum rtw89_ru_bandwidth {
RTW89_RU26 = 0,
@@ -956,6 +958,9 @@ struct rtw89_port_reg {
u32 mbssid;
u32 mbssid_drop;
u32 tsf_sync;
+ u32 ptcl_dbg;
+ u32 ptcl_dbg_info;
+ u32 bcn_drop_all;
u32 hiq_win[RTW89_PORT_NUM];
};
@@ -1146,9 +1151,15 @@ struct rtw89_mac_ax_gnt {
u8 gnt_wl;
} __packed;
+struct rtw89_mac_ax_wl_act {
+ u8 wlan_act_en;
+ u8 wlan_act;
+};
+
#define RTW89_MAC_AX_COEX_GNT_NR 2
struct rtw89_mac_ax_coex_gnt {
struct rtw89_mac_ax_gnt band[RTW89_MAC_AX_COEX_GNT_NR];
+ struct rtw89_mac_ax_wl_act bt[RTW89_MAC_AX_COEX_GNT_NR];
};
enum rtw89_btc_ncnt {
@@ -1266,6 +1277,18 @@ struct rtw89_btc_ant_info {
u8 stream_cnt: 4;
};
+struct rtw89_btc_ant_info_v7 {
+ u8 type; /* shared, dedicated(non-shared) */
+ u8 num; /* antenna count */
+ u8 isolation;
+ u8 single_pos;/* wifi 1ss-1ant at 0:S0 or 1:S1 */
+
+ u8 diversity; /* only for wifi use 1-antenna */
+ u8 btg_pos; /* btg-circuit at 0:S0/1:S1/others:all */
+ u8 stream_cnt; /* spatial_stream count */
+ u8 rsvd;
+} __packed;
+
enum rtw89_tfc_dir {
RTW89_TFC_UL,
RTW89_TFC_DL,
@@ -1660,6 +1683,16 @@ struct rtw89_btc_dm_emap {
u32 wl_e2g_hang: 1;
u32 wl_ver_mismatch: 1;
u32 bt_ver_mismatch: 1;
+ u32 rfe_type0: 1;
+ u32 h2c_buffer_over: 1;
+ u32 bt_tx_hang: 1; /* for SNR too low bug, BT has no Tx req*/
+ u32 wl_no_sta_ntfy: 1;
+
+ u32 h2c_bmap_mismatch: 1;
+ u32 c2h_bmap_mismatch: 1;
+ u32 h2c_struct_invalid: 1;
+ u32 c2h_struct_invalid: 1;
+ u32 h2c_c2h_buffer_mismatch: 1;
};
union rtw89_btc_dm_error_map {
@@ -1708,6 +1741,7 @@ struct rtw89_btc_wl_info {
u8 cn_report;
u8 coex_mode;
+ bool bg_mode;
bool scbd_change;
u32 scbd;
};
@@ -1725,6 +1759,25 @@ struct rtw89_btc_module {
u8 kt_ver_adie;
};
+struct rtw89_btc_module_v7 {
+ u8 rfe_type;
+ u8 kt_ver;
+ u8 bt_solo;
+ u8 bt_pos; /* wl-end view: get from efuse, must compare bt.btg_type*/
+
+ u8 switch_type; /* WL/BT switch type: 0: internal, 1: external */
+ u8 wa_type; /* WA type: 0:none, 1: 51B 5G_Hi-Ch_Rx */
+ u8 kt_ver_adie;
+ u8 rsvd;
+
+ struct rtw89_btc_ant_info_v7 ant;
+} __packed;
+
+union rtw89_btc_module_info {
+ struct rtw89_btc_module md;
+ struct rtw89_btc_module_v7 md_v7;
+};
+
#define RTW89_BTC_DM_MAXSTEP 30
#define RTW89_BTC_DM_CNT_MAX (RTW89_BTC_DM_MAXSTEP * 8)
@@ -1747,6 +1800,25 @@ struct rtw89_btc_init_info {
u16 rsvd;
};
+struct rtw89_btc_init_info_v7 {
+ u8 wl_guard_ch;
+ u8 wl_only;
+ u8 wl_init_ok;
+ u8 rsvd3;
+
+ u8 cx_other;
+ u8 bt_only;
+ u8 pta_mode;
+ u8 pta_direction;
+
+ struct rtw89_btc_module_v7 module;
+} __packed;
+
+union rtw89_btc_init_info_u {
+ struct rtw89_btc_init_info init;
+ struct rtw89_btc_init_info_v7 init_v7;
+};
+
struct rtw89_btc_wl_tx_limit_para {
u16 enable;
u32 tx_time; /* unit: us */
@@ -2485,7 +2557,7 @@ struct rtw89_btc_dm {
struct rtw89_btc_fbtc_tdma tdma;
struct rtw89_btc_fbtc_tdma tdma_now;
struct rtw89_mac_ax_coex_gnt gnt;
- struct rtw89_btc_init_info init_info; /* pass to wl_fw if offload */
+ union rtw89_btc_init_info_u init_info; /* pass to wl_fw if offload */
struct rtw89_btc_rf_trx_para rf_trx_para;
struct rtw89_btc_wl_tx_limit_para wl_tx_limit;
struct rtw89_btc_dm_step dm_step;
@@ -2534,6 +2606,18 @@ struct rtw89_btc_ctrl {
u32 rsvd: 12;
};
+struct rtw89_btc_ctrl_v7 {
+ u8 manual;
+ u8 igno_bt;
+ u8 always_freerun;
+ u8 rsvd;
+} __packed;
+
+union rtw89_btc_ctrl_list {
+ struct rtw89_btc_ctrl ctrl;
+ struct rtw89_btc_ctrl_v7 ctrl_v7;
+};
+
struct rtw89_btc_dbg {
/* cmd "rb" */
bool rb_done;
@@ -2706,7 +2790,9 @@ struct rtw89_btc_ver {
u8 fwlrole;
u8 frptmap;
u8 fcxctrl;
+ u8 fcxinit;
+ u8 drvinfo_type;
u16 info_buf;
u8 max_role_num;
};
@@ -2718,8 +2804,8 @@ struct rtw89_btc {
struct rtw89_btc_cx cx;
struct rtw89_btc_dm dm;
- struct rtw89_btc_ctrl ctrl;
- struct rtw89_btc_module mdinfo;
+ union rtw89_btc_ctrl_list ctrl;
+ union rtw89_btc_module_info mdinfo;
struct rtw89_btc_btf_fwinfo fwinfo;
struct rtw89_btc_dbg dbg;
@@ -2731,11 +2817,14 @@ struct rtw89_btc {
u32 bt_req_len;
u8 policy[RTW89_BTC_POLICY_MAXLEN];
+ u8 ant_type;
+ u8 btg_pos;
u16 policy_len;
u16 policy_type;
bool bt_req_en;
bool update_policy_force;
bool lps;
+ bool manual_ctrl;
};
enum rtw89_btc_hmsg {
@@ -2875,7 +2964,7 @@ struct rtw89_ba_cam_entry {
#define RTW89_MAX_ADDR_CAM_NUM 128
#define RTW89_MAX_BSSID_CAM_NUM 20
#define RTW89_MAX_SEC_CAM_NUM 128
-#define RTW89_MAX_BA_CAM_NUM 8
+#define RTW89_MAX_BA_CAM_NUM 24
#define RTW89_SEC_CAM_IN_ADDR_CAM 7
struct rtw89_addr_cam_entry {
@@ -2932,6 +3021,7 @@ struct rtw89_sta {
struct ewma_evm evm_min[RF_PATH_MAX];
struct ewma_evm evm_max[RF_PATH_MAX];
struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
+ DECLARE_BITMAP(ampdu_map, IEEE80211_NUM_TIDS);
struct ieee80211_rx_status rx_status;
u16 rx_hw_rate;
__le32 htc_template;
@@ -3041,6 +3131,7 @@ struct rtw89_vif {
u8 bcn_hit_cond;
u8 hit_rule;
u8 last_noa_nr;
+ u64 sync_bcn_tsf;
bool offchan;
bool trigger;
bool lsig_txop;
@@ -3111,7 +3202,7 @@ struct rtw89_hci_ops {
void (*ctrl_txdma_ch)(struct rtw89_dev *rtwdev, bool enable);
void (*ctrl_txdma_fw_ch)(struct rtw89_dev *rtwdev, bool enable);
void (*ctrl_trxhci)(struct rtw89_dev *rtwdev, bool enable);
- int (*poll_txdma_ch)(struct rtw89_dev *rtwdev);
+ int (*poll_txdma_ch_idle)(struct rtw89_dev *rtwdev);
void (*clr_idx_all)(struct rtw89_dev *rtwdev);
void (*clear)(struct rtw89_dev *rtwdev, struct pci_dev *pdev);
void (*disable_intr)(struct rtw89_dev *rtwdev);
@@ -3131,6 +3222,7 @@ struct rtw89_chip_ops {
int (*enable_bb_rf)(struct rtw89_dev *rtwdev);
int (*disable_bb_rf)(struct rtw89_dev *rtwdev);
void (*bb_preinit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+ void (*bb_postinit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
void (*bb_reset)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
void (*bb_sethw)(struct rtw89_dev *rtwdev);
@@ -3152,7 +3244,9 @@ struct rtw89_chip_ops {
int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
void (*fem_setup)(struct rtw89_dev *rtwdev);
void (*rfe_gpio)(struct rtw89_dev *rtwdev);
+ void (*rfk_hw_init)(struct rtw89_dev *rtwdev);
void (*rfk_init)(struct rtw89_dev *rtwdev);
+ void (*rfk_init_late)(struct rtw89_dev *rtwdev);
void (*rfk_channel)(struct rtw89_dev *rtwdev);
void (*rfk_band_changed)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
@@ -3196,6 +3290,22 @@ struct rtw89_chip_ops {
int (*h2c_dctl_sec_cam)(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta);
+ int (*h2c_default_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+ int (*h2c_assoc_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*h2c_ampdu_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*h2c_default_dmac_tbl)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+ int (*h2c_update_beacon)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
+ int (*h2c_ba_cam)(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params);
void (*btc_set_rfe)(struct rtw89_dev *rtwdev);
void (*btc_init_cfg)(struct rtw89_dev *rtwdev);
@@ -3225,8 +3335,62 @@ enum rtw89_dma_ch {
RTW89_DMA_CH_NUM = 13
};
+#define MLO_MODE_FOR_BB0_BB1_RF(bb0, bb1, rf) ((rf) << 12 | (bb1) << 4 | (bb0))
+
+enum rtw89_mlo_dbcc_mode {
+ MLO_DBCC_NOT_SUPPORT = 1,
+ MLO_0_PLUS_2_1RF = MLO_MODE_FOR_BB0_BB1_RF(0, 2, 1),
+ MLO_0_PLUS_2_2RF = MLO_MODE_FOR_BB0_BB1_RF(0, 2, 2),
+ MLO_1_PLUS_1_1RF = MLO_MODE_FOR_BB0_BB1_RF(1, 1, 1),
+ MLO_1_PLUS_1_2RF = MLO_MODE_FOR_BB0_BB1_RF(1, 1, 2),
+ MLO_2_PLUS_0_1RF = MLO_MODE_FOR_BB0_BB1_RF(2, 0, 1),
+ MLO_2_PLUS_0_2RF = MLO_MODE_FOR_BB0_BB1_RF(2, 0, 2),
+ MLO_2_PLUS_2_2RF = MLO_MODE_FOR_BB0_BB1_RF(2, 2, 2),
+ DBCC_LEGACY = 0xffffffff,
+};
+
+enum rtw89_scan_be_operation {
+ RTW89_SCAN_OP_STOP,
+ RTW89_SCAN_OP_START,
+ RTW89_SCAN_OP_SETPARM,
+ RTW89_SCAN_OP_GETRPT,
+ RTW89_SCAN_OP_NUM
+};
+
+enum rtw89_scan_be_mode {
+ RTW89_SCAN_MODE_SA,
+ RTW89_SCAN_MODE_MACC,
+ RTW89_SCAN_MODE_NUM
+};
+
+enum rtw89_scan_be_opmode {
+ RTW89_SCAN_OPMODE_NONE,
+ RTW89_SCAN_OPMODE_TBTT,
+ RTW89_SCAN_OPMODE_INTV,
+ RTW89_SCAN_OPMODE_CNT,
+ RTW89_SCAN_OPMODE_NUM,
+};
+
+struct rtw89_scan_option {
+ bool enable;
+ bool target_ch_mode;
+ u8 num_macc_role;
+ u8 num_opch;
+ u8 repeat;
+ u16 norm_pd;
+ u16 slow_pd;
+ u16 norm_cy;
+ u8 opch_end;
+ u64 prohib_chan;
+ enum rtw89_phy_idx band;
+ enum rtw89_scan_be_operation operation;
+ enum rtw89_scan_be_mode scan_mode;
+ enum rtw89_mlo_dbcc_mode mlo_mode;
+};
+
enum rtw89_qta_mode {
RTW89_QTA_SCC,
+ RTW89_QTA_DBCC,
RTW89_QTA_DLFW,
RTW89_QTA_WOW,
@@ -3713,7 +3877,7 @@ struct rtw89_chip_info {
u32 rf_base_addr[2];
u8 support_chanctx_num;
u8 support_bands;
- bool support_bw160;
+ u16 support_bandwidths;
bool support_unii4;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
@@ -3790,6 +3954,7 @@ struct rtw89_chip_info {
const u32 *c2h_regs;
struct rtw89_reg_def c2h_counter_reg;
const struct rtw89_page_regs *page_regs;
+ u32 wow_reason_reg;
bool cfo_src_fd;
bool cfo_hw_comp;
const struct rtw89_reg_def *dcfo_comp;
@@ -3838,7 +4003,7 @@ enum rtw89_host_rpr_mode {
RTW89_RPR_MODE_STF
};
-#define RTW89_COMPLETION_BUF_SIZE 24
+#define RTW89_COMPLETION_BUF_SIZE 40
#define RTW89_WAIT_COND_IDLE UINT_MAX
struct rtw89_completion_data {
@@ -3897,6 +4062,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_NO_DEEP_PS,
RTW89_FW_FEATURE_NO_LPS_PG,
RTW89_FW_FEATURE_BEACON_FILTER,
+ RTW89_FW_FEATURE_MACID_PAUSE_SLEEP,
};
struct rtw89_fw_suit {
@@ -3957,6 +4123,19 @@ struct rtw89_fw_elm_info {
struct rtw89_phy_rfk_log_fmt *rfk_log_fmt;
};
+enum rtw89_fw_mss_dev_type {
+ RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF = 0xF,
+ RTW89_FW_MSS_DEV_TYPE_FWSEC_INV = 0xFF,
+};
+
+struct rtw89_fw_secure {
+ bool secure_boot;
+ u32 sb_sel_mgn;
+ u8 mss_dev_type;
+ u8 mss_cust_idx;
+ u8 mss_key_num;
+};
+
struct rtw89_fw_info {
struct rtw89_fw_req_info req;
int fw_format;
@@ -3971,6 +4150,7 @@ struct rtw89_fw_info {
struct rtw89_fw_log log;
u32 feature_map;
struct rtw89_fw_elm_info elm_info;
+ struct rtw89_fw_secure sec;
};
#define RTW89_CHK_FW_FEATURE(_feat, _fw) \
@@ -4045,6 +4225,7 @@ struct rtw89_tas_info {
struct rtw89_chanctx_cfg {
enum rtw89_sub_entity_idx idx;
+ int ref_count;
};
enum rtw89_chanctx_changes {
@@ -4064,13 +4245,16 @@ enum rtw89_entity_mode {
RTW89_ENTITY_MODE_MCC,
NUM_OF_RTW89_ENTITY_MODE,
- RTW89_ENTITY_MODE_INVALID = NUM_OF_RTW89_ENTITY_MODE,
+ RTW89_ENTITY_MODE_INVALID = -EINVAL,
+ RTW89_ENTITY_MODE_UNHANDLED = -ESRCH,
};
struct rtw89_sub_entity {
struct cfg80211_chan_def chandef;
struct rtw89_chan chan;
struct rtw89_chan_rcd rcd;
+
+ /* only assigned when running with chanctx_ops */
struct rtw89_chanctx_cfg *cfg;
};
@@ -4123,6 +4307,7 @@ enum rtw89_flags {
RTW89_FLAG_CMAC1_FUNC,
RTW89_FLAG_FW_RDY,
RTW89_FLAG_RUNNING,
+ RTW89_FLAG_PROBE_DONE,
RTW89_FLAG_BFEE_MON,
RTW89_FLAG_BFEE_EN,
RTW89_FLAG_BFEE_TIMER_KEEP,
@@ -4179,6 +4364,21 @@ struct rtw89_phy_stat {
struct rtw89_pkt_stat last_pkt_stat;
};
+enum rtw89_rfk_report_state {
+ RTW89_RFK_STATE_START = 0x0,
+ RTW89_RFK_STATE_OK = 0x1,
+ RTW89_RFK_STATE_FAIL = 0x2,
+ RTW89_RFK_STATE_TIMEOUT = 0x3,
+ RTW89_RFK_STATE_H2C_CMD_ERR = 0x4,
+};
+
+struct rtw89_rfk_wait_info {
+ struct completion completion;
+ ktime_t start_time;
+ enum rtw89_rfk_report_state state;
+ u8 version;
+};
+
#define RTW89_DACK_PATH_NR 2
#define RTW89_DACK_IDX_NR 2
#define RTW89_DACK_MSBK_NR 16
@@ -4194,15 +4394,18 @@ struct rtw89_dack_info {
bool msbk_timeout[RTW89_DACK_PATH_NR];
};
-#define RTW89_IQK_CHS_NR 2
-#define RTW89_IQK_PATH_NR 4
+#define RTW89_RFK_CHS_NR 3
struct rtw89_rfk_mcc_info {
- u8 ch[RTW89_IQK_CHS_NR];
- u8 band[RTW89_IQK_CHS_NR];
+ u8 ch[RTW89_RFK_CHS_NR];
+ u8 band[RTW89_RFK_CHS_NR];
+ u8 bw[RTW89_RFK_CHS_NR];
u8 table_idx;
};
+#define RTW89_IQK_CHS_NR 2
+#define RTW89_IQK_PATH_NR 4
+
struct rtw89_lck_info {
u8 thermal[RF_PATH_MAX];
};
@@ -4380,6 +4583,11 @@ struct rtw89_cfo_tracking_info {
u8 lock_cnt;
};
+enum rtw89_tssi_mode {
+ RTW89_TSSI_NORMAL = 0,
+ RTW89_TSSI_SCAN = 1,
+};
+
enum rtw89_tssi_alimk_band {
TSSI_ALIMK_2G = 0,
TSSI_ALIMK_5GL,
@@ -4589,6 +4797,7 @@ struct rtw89_hw_scan_info {
struct ieee80211_vif *scanning_vif;
struct list_head pkt_list[NUM_NL80211_BANDS];
struct rtw89_chan op_chan;
+ bool abort;
u32 last_chan_idx;
};
@@ -4605,6 +4814,48 @@ enum rtw89_phy_bb_gain_band {
RTW89_BB_GAIN_BAND_NR,
};
+enum rtw89_phy_gain_band_be {
+ RTW89_BB_GAIN_BAND_2G_BE = 0,
+ RTW89_BB_GAIN_BAND_5G_L_BE = 1,
+ RTW89_BB_GAIN_BAND_5G_M_BE = 2,
+ RTW89_BB_GAIN_BAND_5G_H_BE = 3,
+ RTW89_BB_GAIN_BAND_6G_L0_BE = 4,
+ RTW89_BB_GAIN_BAND_6G_L1_BE = 5,
+ RTW89_BB_GAIN_BAND_6G_M0_BE = 6,
+ RTW89_BB_GAIN_BAND_6G_M1_BE = 7,
+ RTW89_BB_GAIN_BAND_6G_H0_BE = 8,
+ RTW89_BB_GAIN_BAND_6G_H1_BE = 9,
+ RTW89_BB_GAIN_BAND_6G_UH0_BE = 10,
+ RTW89_BB_GAIN_BAND_6G_UH1_BE = 11,
+
+ RTW89_BB_GAIN_BAND_NR_BE,
+};
+
+enum rtw89_phy_bb_bw_be {
+ RTW89_BB_BW_20_40 = 0,
+ RTW89_BB_BW_80_160_320 = 1,
+
+ RTW89_BB_BW_NR_BE,
+};
+
+enum rtw89_bw20_sc {
+ RTW89_BW20_SC_20M = 1,
+ RTW89_BW20_SC_40M = 2,
+ RTW89_BW20_SC_80M = 4,
+ RTW89_BW20_SC_160M = 8,
+ RTW89_BW20_SC_320M = 16,
+};
+
+enum rtw89_cmac_table_bw {
+ RTW89_CMAC_BW_20M = 0,
+ RTW89_CMAC_BW_40M = 1,
+ RTW89_CMAC_BW_80M = 2,
+ RTW89_CMAC_BW_160M = 3,
+ RTW89_CMAC_BW_320M = 4,
+
+ RTW89_CMAC_BW_NR,
+};
+
enum rtw89_phy_bb_rxsc_num {
RTW89_BB_RXSC_NUM_40 = 9, /* SC: 0, 1~8 */
RTW89_BB_RXSC_NUM_80 = 13, /* SC: 0, 1~8, 9~12 */
@@ -4627,6 +4878,27 @@ struct rtw89_phy_bb_gain_info {
[RTW89_BB_RXSC_NUM_160];
};
+struct rtw89_phy_bb_gain_info_be {
+ s8 lna_gain[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE][RF_PATH_MAX]
+ [LNA_GAIN_NUM];
+ s8 tia_gain[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE][RF_PATH_MAX]
+ [TIA_GAIN_NUM];
+ s8 lna_gain_bypass[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE]
+ [RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 lna_op1db[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE]
+ [RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 tia_lna_op1db[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE]
+ [RF_PATH_MAX][LNA_GAIN_NUM + 1];
+ s8 rpl_ofst_20[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_20M];
+ s8 rpl_ofst_40[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_40M];
+ s8 rpl_ofst_80[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_80M];
+ s8 rpl_ofst_160[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_160M];
+};
+
struct rtw89_phy_efuse_gain {
bool offset_valid;
bool comp_valid;
@@ -4681,6 +4953,9 @@ struct rtw89_mcc_role {
struct rtw89_mcc_policy policy;
struct rtw89_mcc_limit limit;
+ /* only valid when running with FW MRC mechanism */
+ u8 slot_idx;
+
/* byte-array in LE order for FW */
u8 macid_bitmap[BITS_TO_BYTES(RTW89_MAX_MAC_ID_NUM)];
@@ -4724,7 +4999,11 @@ struct rtw89_mcc_sync {
bool enable;
u16 offset; /* TU */
u8 macid_src;
+ u8 band_src;
+ u8 port_src;
u8 macid_tgt;
+ u8 band_tgt;
+ u8 port_tgt;
};
struct rtw89_mcc_config {
@@ -4757,6 +5036,7 @@ struct rtw89_dev {
const struct ieee80211_ops *ops;
bool dbcc_en;
+ enum rtw89_mlo_dbcc_mode mlo_dbcc_mode;
struct rtw89_hw_scan_info scan_info;
const struct rtw89_chip_info *chip;
const struct rtw89_pci_info *pci_info;
@@ -4806,6 +5086,7 @@ struct rtw89_dev {
DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM);
struct rtw89_phy_stat phystat;
+ struct rtw89_rfk_wait_info rfk_wait;
struct rtw89_dack_info dack;
struct rtw89_iqk_info iqk;
struct rtw89_dpk_info dpk;
@@ -4824,7 +5105,10 @@ struct rtw89_dev {
struct rtw89_env_monitor_info env_monitor;
struct rtw89_dig_info dig;
struct rtw89_phy_ch_info ch_info;
- struct rtw89_phy_bb_gain_info bb_gain;
+ union {
+ struct rtw89_phy_bb_gain_info ax;
+ struct rtw89_phy_bb_gain_info_be be;
+ } bb_gain;
struct rtw89_phy_efuse_gain efuse_gain;
struct rtw89_phy_ul_tb_info ul_tb_info;
struct rtw89_antdiv_info antdiv;
@@ -4969,12 +5253,12 @@ static inline void rtw89_hci_ctrl_trxhci(struct rtw89_dev *rtwdev, bool enable)
rtwdev->hci.ops->ctrl_trxhci(rtwdev, enable);
}
-static inline int rtw89_hci_poll_txdma_ch(struct rtw89_dev *rtwdev)
+static inline int rtw89_hci_poll_txdma_ch_idle(struct rtw89_dev *rtwdev)
{
int ret = 0;
- if (rtwdev->hci.ops->poll_txdma_ch)
- ret = rtwdev->hci.ops->poll_txdma_ch(rtwdev);
+ if (rtwdev->hci.ops->poll_txdma_ch_idle)
+ ret = rtwdev->hci.ops->poll_txdma_ch_idle(rtwdev);
return ret;
}
@@ -5437,6 +5721,14 @@ static inline void rtw89_chip_rfe_gpio(struct rtw89_dev *rtwdev)
chip->ops->rfe_gpio(rtwdev);
}
+static inline void rtw89_chip_rfk_hw_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->rfk_hw_init)
+ chip->ops->rfk_hw_init(rtwdev);
+}
+
static inline
void rtw89_chip_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
@@ -5446,6 +5738,20 @@ void rtw89_chip_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
chip->ops->bb_preinit(rtwdev, phy_idx);
}
+static inline
+void rtw89_chip_bb_postinit(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (!chip->ops->bb_postinit)
+ return;
+
+ chip->ops->bb_postinit(rtwdev, RTW89_PHY_0);
+
+ if (rtwdev->dbcc_en)
+ chip->ops->bb_postinit(rtwdev, RTW89_PHY_1);
+}
+
static inline void rtw89_chip_bb_sethw(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -5462,6 +5768,14 @@ static inline void rtw89_chip_rfk_init(struct rtw89_dev *rtwdev)
chip->ops->rfk_init(rtwdev);
}
+static inline void rtw89_chip_rfk_init_late(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->rfk_init_late)
+ chip->ops->rfk_init_late(rtwdev);
+}
+
static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -5750,6 +6064,18 @@ out:
rcu_read_unlock();
}
+static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev)
+{
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_1_PLUS_1_1RF:
+ case MLO_1_PLUS_1_2RF:
+ case DBCC_LEGACY:
+ return true;
+ default:
+ return false;
+ }
+}
+
int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel);
int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
@@ -5815,7 +6141,7 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
struct rtw89_chan *chan);
-void rtw89_set_channel(struct rtw89_dev *rtwdev);
+int rtw89_set_channel(struct rtw89_dev *rtwdev);
void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_chan *chan);
u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 44829a148185..affffc4092ba 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -3427,14 +3427,17 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_btc *btc = &rtwdev->btc;
- bool btc_manual;
+ const struct rtw89_btc_ver *ver = btc->ver;
int ret;
- ret = kstrtobool_from_user(user_buf, count, &btc_manual);
+ ret = kstrtobool_from_user(user_buf, count, &btc->manual_ctrl);
if (ret)
return ret;
- btc->ctrl.manual = btc_manual;
+ if (ver->fcxctrl == 7)
+ btc->ctrl.ctrl_v7.manual = btc->manual_ctrl;
+ else
+ btc->ctrl.ctrl.manual = btc->manual_ctrl;
return count;
}
diff --git a/drivers/net/wireless/realtek/rtw89/efuse.h b/drivers/net/wireless/realtek/rtw89/efuse.h
index 5c6787179bad..72416f56a071 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse.h
+++ b/drivers/net/wireless/realtek/rtw89/efuse.h
@@ -23,5 +23,6 @@ int rtw89_parse_efuse_map_be(struct rtw89_dev *rtwdev);
int rtw89_parse_phycap_map_be(struct rtw89_dev *rtwdev);
int rtw89_cnv_efuse_state_be(struct rtw89_dev *rtwdev, bool idle);
int rtw89_read_efuse_ver(struct rtw89_dev *rtwdev, u8 *efv);
+int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/efuse_be.c b/drivers/net/wireless/realtek/rtw89/efuse_be.c
index 8e8b7cd315f7..0be26d5fdf7c 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse_be.c
+++ b/drivers/net/wireless/realtek/rtw89/efuse_be.c
@@ -7,6 +7,31 @@
#include "mac.h"
#include "reg.h"
+#define EFUSE_EXTERNALPN_ADDR_BE 0x1580
+#define EFUSE_B1_MSSDEVTYPE_MASK GENMASK(3, 0)
+#define EFUSE_B1_MSSCUSTIDX0_MASK GENMASK(7, 4)
+#define EFUSE_SERIALNUM_ADDR_BE 0x1581
+#define EFUSE_B2_MSSKEYNUM_MASK GENMASK(3, 0)
+#define EFUSE_B2_MSSCUSTIDX1_MASK BIT(6)
+#define EFUSE_SB_CRYP_SEL_ADDR 0x1582
+#define EFUSE_SB_CRYP_SEL_SIZE 2
+#define EFUSE_SB_CRYP_SEL_DEFAULT 0xFFFF
+#define SB_SEL_MGN_MAX_SIZE 2
+#define EFUSE_SEC_BE_START 0x1580
+#define EFUSE_SEC_BE_SIZE 4
+
+enum rtw89_efuse_mss_dev_type {
+ MSS_DEV_TYPE_FWSEC_DEF = 0xF,
+ MSS_DEV_TYPE_FWSEC_WINLIN_INBOX = 0xC,
+ MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_NON_COB = 0xA,
+ MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_COB = 0x9,
+ MSS_DEV_TYPE_FWSEC_NONWIN_INBOX = 0x6,
+};
+
+static const u32 sb_sel_mgn[SB_SEL_MGN_MAX_SIZE] = {
+ 0x8000100, 0xC000180
+};
+
static void rtw89_enable_efuse_pwr_cut_ddv_be(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -418,3 +443,120 @@ out_free:
return ret;
}
+
+static u16 get_sb_cryp_sel_idx(u16 sb_cryp_sel)
+{
+ u8 low_bit, high_bit, cnt_zero = 0;
+ u8 idx, sel_form_v, sel_idx_v;
+ u16 sb_cryp_sel_v = 0x0;
+
+ sel_form_v = u16_get_bits(sb_cryp_sel, MASKBYTE0);
+ sel_idx_v = u16_get_bits(sb_cryp_sel, MASKBYTE1);
+
+ for (idx = 0; idx < 4; idx++) {
+ low_bit = !!(sel_form_v & BIT(idx));
+ high_bit = !!(sel_form_v & BIT(7 - idx));
+ if (low_bit != high_bit)
+ return U16_MAX;
+ if (low_bit)
+ continue;
+
+ cnt_zero++;
+ if (cnt_zero == 1)
+ sb_cryp_sel_v = idx * 16;
+ else if (cnt_zero > 1)
+ return U16_MAX;
+ }
+
+ low_bit = u8_get_bits(sel_idx_v, 0x0F);
+ high_bit = u8_get_bits(sel_idx_v, 0xF0);
+
+ if ((low_bit ^ high_bit) != 0xF)
+ return U16_MAX;
+
+ return sb_cryp_sel_v + low_bit;
+}
+
+static u8 get_mss_dev_type_idx(struct rtw89_dev *rtwdev, u8 mss_dev_type)
+{
+ switch (mss_dev_type) {
+ case MSS_DEV_TYPE_FWSEC_WINLIN_INBOX:
+ mss_dev_type = 0x0;
+ break;
+ case MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_NON_COB:
+ mss_dev_type = 0x1;
+ break;
+ case MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_COB:
+ mss_dev_type = 0x2;
+ break;
+ case MSS_DEV_TYPE_FWSEC_NONWIN_INBOX:
+ mss_dev_type = 0x3;
+ break;
+ case MSS_DEV_TYPE_FWSEC_DEF:
+ mss_dev_type = RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF;
+ break;
+ default:
+ rtw89_warn(rtwdev, "unknown mss_dev_type %d", mss_dev_type);
+ mss_dev_type = RTW89_FW_MSS_DEV_TYPE_FWSEC_INV;
+ break;
+ }
+
+ return mss_dev_type;
+}
+
+int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u32 sec_addr = EFUSE_SEC_BE_START;
+ u32 sec_size = EFUSE_SEC_BE_SIZE;
+ u16 sb_cryp_sel, sb_cryp_sel_idx;
+ u8 sec_map[EFUSE_SEC_BE_SIZE];
+ u8 mss_dev_type;
+ u8 b1, b2;
+ int ret;
+
+ ret = rtw89_dump_physical_efuse_map_be(rtwdev, sec_map,
+ sec_addr, sec_size, false);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump secsel map\n");
+ return ret;
+ }
+
+ sb_cryp_sel = sec_map[EFUSE_SB_CRYP_SEL_ADDR - sec_addr] |
+ sec_map[EFUSE_SB_CRYP_SEL_ADDR - sec_addr + 1] << 8;
+ if (sb_cryp_sel == EFUSE_SB_CRYP_SEL_DEFAULT)
+ goto out;
+
+ sb_cryp_sel_idx = get_sb_cryp_sel_idx(sb_cryp_sel);
+ if (sb_cryp_sel_idx >= SB_SEL_MGN_MAX_SIZE) {
+ rtw89_warn(rtwdev, "invalid SB cryp sel idx %d\n", sb_cryp_sel_idx);
+ goto out;
+ }
+
+ sec->sb_sel_mgn = sb_sel_mgn[sb_cryp_sel_idx];
+
+ b1 = sec_map[EFUSE_EXTERNALPN_ADDR_BE - sec_addr];
+ b2 = sec_map[EFUSE_SERIALNUM_ADDR_BE - sec_addr];
+
+ mss_dev_type = u8_get_bits(b1, EFUSE_B1_MSSDEVTYPE_MASK);
+ sec->mss_cust_idx = 0x1F - (u8_get_bits(b1, EFUSE_B1_MSSCUSTIDX0_MASK) |
+ u8_get_bits(b2, EFUSE_B2_MSSCUSTIDX1_MASK) << 4);
+ sec->mss_key_num = 0xF - u8_get_bits(b2, EFUSE_B2_MSSKEYNUM_MASK);
+
+ sec->mss_dev_type = get_mss_dev_type_idx(rtwdev, mss_dev_type);
+ if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_INV) {
+ rtw89_warn(rtwdev, "invalid mss_dev_type %d\n", mss_dev_type);
+ goto out;
+ }
+
+ sec->secure_boot = true;
+
+out:
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "MSS secure_boot=%d dev_type=%d cust_idx=%d key_num=%d\n",
+ sec->secure_boot, sec->mss_dev_type, sec->mss_cust_idx,
+ sec->mss_key_num);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_efuse_read_fw_secure_be);
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 09684cea9731..185cd339c085 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -13,6 +13,8 @@
#include "reg.h"
#include "util.h"
+static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
+
union rtw89_fw_element_arg {
size_t offset;
enum rtw89_rf_path rf_path;
@@ -163,6 +165,161 @@ static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
return 0;
}
+static int __get_mssc_key_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mss_pool_hdr *mss_hdr,
+ u32 rmp_tbl_size, u32 *key_idx)
+{
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u32 sel_byte_idx;
+ u32 mss_sel_idx;
+ u8 sel_bit_idx;
+ int i;
+
+ if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) {
+ if (!mss_hdr->defen)
+ return -ENOENT;
+
+ mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
+ sec->mss_key_num;
+ } else {
+ if (mss_hdr->defen)
+ mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3;
+ else
+ mss_sel_idx = 0;
+ mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) *
+ le16_to_cpu(mss_hdr->msscust_max) +
+ sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
+ sec->mss_key_num;
+ }
+
+ sel_byte_idx = mss_sel_idx >> 3;
+ sel_bit_idx = mss_sel_idx & 0x7;
+
+ if (sel_byte_idx >= rmp_tbl_size)
+ return -EFAULT;
+
+ if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx)))
+ return -ENOENT;
+
+ *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1));
+
+ for (i = 0; i < sel_byte_idx; i++)
+ *key_idx += hweight8(mss_hdr->rmp_tbl[i]);
+
+ return 0;
+}
+
+static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_section_info *section_info,
+ const struct rtw89_fw_hdr_section_v1 *section,
+ const void *content,
+ u32 *mssc_len)
+{
+ const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len;
+ const union rtw89_fw_section_mssc_content *section_content = content;
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u32 rmp_tbl_size;
+ u32 key_sign_len;
+ u32 real_key_idx;
+ u32 sb_sel_ver;
+ int ret;
+
+ if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) {
+ rtw89_err(rtwdev, "[ERR] wrong MSS signature\n");
+ return -ENOENT;
+ }
+
+ if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) {
+ rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) *
+ le16_to_cpu(mss_hdr->msscust_max) *
+ mss_hdr->mssdev_max) >> 3;
+ if (mss_hdr->defen)
+ rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE;
+ } else {
+ rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n",
+ mss_hdr->rmpfmt);
+ return -EINVAL;
+ }
+
+ if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) {
+ rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n",
+ rmp_tbl_size, (int)sizeof(*mss_hdr),
+ le32_to_cpu(mss_hdr->key_raw_offset));
+ return -EINVAL;
+ }
+
+ key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2;
+ if (!key_sign_len)
+ key_sign_len = 512;
+
+ if (info->dsp_checksum)
+ key_sign_len += FWDL_SECURITY_CHKSUM_LEN;
+
+ *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size +
+ le16_to_cpu(mss_hdr->keypair_num) * key_sign_len;
+
+ if (!sec->secure_boot)
+ goto out;
+
+ sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v);
+ if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn)
+ goto ignore;
+
+ ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx);
+ if (ret)
+ goto ignore;
+
+ section_info->key_addr = content + section_info->len +
+ le32_to_cpu(mss_hdr->key_raw_offset) +
+ key_sign_len * real_key_idx;
+ section_info->key_len = key_sign_len;
+ section_info->key_idx = real_key_idx;
+
+out:
+ if (info->secure_section_exist) {
+ section_info->ignore = true;
+ return 0;
+ }
+
+ info->secure_section_exist = true;
+
+ return 0;
+
+ignore:
+ section_info->ignore = true;
+
+ return 0;
+}
+
+static int __parse_security_section(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_section_info *section_info,
+ const struct rtw89_fw_hdr_section_v1 *section,
+ const void *content,
+ u32 *mssc_len)
+{
+ int ret;
+
+ section_info->mssc =
+ le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
+
+ if (section_info->mssc == FORMATTED_MSSC) {
+ ret = __parse_formatted_mssc(rtwdev, info, section_info,
+ section, content, mssc_len);
+ if (ret)
+ return -EINVAL;
+ } else {
+ *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN;
+ if (info->dsp_checksum)
+ *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN;
+
+ info->secure_section_exist = true;
+ }
+
+ return 0;
+}
+
static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
struct rtw89_fw_bin_info *info)
{
@@ -173,10 +330,12 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
const u8 *fw_end = fw + len;
const u8 *bin;
u32 base_hdr_len;
- u32 mssc_len = 0;
+ u32 mssc_len;
+ int ret;
u32 i;
info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM);
+ info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM);
base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR);
@@ -199,16 +358,9 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
section_info = info->section_info;
for (i = 0; i < info->section_num; i++) {
section = &fw_hdr->sections[i];
+
section_info->type =
le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE);
- if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
- section_info->mssc =
- le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
- mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
- } else {
- section_info->mssc = 0;
- }
-
section_info->len =
le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE);
if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM))
@@ -217,15 +369,40 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
section_info->dladdr =
le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR);
section_info->addr = bin;
- bin += section_info->len;
+
+ if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
+ ret = __parse_security_section(rtwdev, info, section_info,
+ section, bin, &mssc_len);
+ if (ret)
+ return ret;
+ } else {
+ section_info->mssc = 0;
+ mssc_len = 0;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
+ i, section_info->type, section_info->len,
+ section_info->mssc, mssc_len, bin - fw);
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
+ section_info->ignore, section_info->key_addr,
+ section_info->key_addr ?
+ section_info->key_addr - section_info->addr : 0,
+ section_info->key_len, section_info->key_idx);
+
+ bin += section_info->len + mssc_len;
section_info++;
}
- if (fw_end != bin + mssc_len) {
+ if (fw_end != bin) {
rtw89_err(rtwdev, "[ERR]fw bin size\n");
return -EINVAL;
}
+ if (!info->secure_section_exist)
+ rtw89_warn(rtwdev, "no firmware secure section\n");
+
return 0;
}
@@ -458,6 +635,8 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -919,9 +1098,56 @@ static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
len + H2C_HEADER_LEN));
}
-static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
+static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr *fw_hdr)
+{
+ le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
+ FW_HDR_W7_PART_SIZE);
+
+ return 0;
+}
+
+static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_v1 *fw_hdr)
{
+ struct rtw89_fw_hdr_section_info *section_info;
+ struct rtw89_fw_hdr_section_v1 *section;
+ u8 dst_sec_idx = 0;
+ u8 sec_idx;
+
+ le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
+ FW_HDR_V1_W7_PART_SIZE);
+
+ for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) {
+ section_info = &info->section_info[sec_idx];
+ section = &fw_hdr->sections[sec_idx];
+
+ if (section_info->ignore)
+ continue;
+
+ if (dst_sec_idx != sec_idx)
+ fw_hdr->sections[dst_sec_idx] = *section;
+
+ dst_sec_idx++;
+ }
+
+ le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM);
+
+ return (info->section_num - dst_sec_idx) * sizeof(*section);
+}
+
+static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_suit *fw_suit,
+ struct rtw89_fw_bin_info *info)
+{
+ u32 len = info->hdr_len - info->dynamic_hdr_len;
+ struct rtw89_fw_hdr_v1 *fw_hdr_v1;
+ const u8 *fw = fw_suit->data;
+ struct rtw89_fw_hdr *fw_hdr;
struct sk_buff *skb;
+ u32 truncated;
u32 ret = 0;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
@@ -931,7 +1157,26 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l
}
skb_put_data(skb, fw, len);
- SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
+
+ switch (fw_suit->hdr_ver) {
+ case 0:
+ fw_hdr = (struct rtw89_fw_hdr *)skb->data;
+ truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr);
+ break;
+ case 1:
+ fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data;
+ truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto fail;
+ }
+
+ if (truncated) {
+ len -= truncated;
+ skb_trim(skb, len);
+ }
+
rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FWDL,
H2C_FUNC_MAC_FWHDR_DL, len);
@@ -950,12 +1195,14 @@ fail:
return ret;
}
-static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
+static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_suit *fw_suit,
+ struct rtw89_fw_bin_info *info)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
int ret;
- ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
+ ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info);
if (ret) {
rtw89_err(rtwdev, "[ERR]FW header download\n");
return ret;
@@ -979,9 +1226,21 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
const u8 *section = info->addr;
u32 residue_len = info->len;
+ bool copy_key = false;
u32 pkt_len;
int ret;
+ if (info->ignore)
+ return 0;
+
+ if (info->key_addr && info->key_len) {
+ if (info->len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len)
+ rtw89_warn(rtwdev, "ignore to copy key data because of len %d, %d, %d\n",
+ info->len, FWDL_SECTION_PER_PKT_LEN, info->key_len);
+ else
+ copy_key = true;
+ }
+
while (residue_len) {
if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
pkt_len = FWDL_SECTION_PER_PKT_LEN;
@@ -995,6 +1254,10 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
}
skb_put_data(skb, section, pkt_len);
+ if (copy_key)
+ memcpy(skb->data + pkt_len - info->key_len,
+ info->key_addr, info->key_len);
+
ret = rtw89_h2c_tx(rtwdev, skb, true);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
@@ -1101,7 +1364,7 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
struct rtw89_fw_suit *fw_suit)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
- struct rtw89_fw_bin_info info;
+ struct rtw89_fw_bin_info info = {};
int ret;
ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info);
@@ -1120,8 +1383,7 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
return ret;
}
- ret = rtw89_fw_download_hdr(rtwdev, fw_suit->data, info.hdr_len -
- info.dynamic_hdr_len);
+ ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info);
if (ret)
return ret;
@@ -1485,13 +1747,108 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
-#define H2C_BA_CAM_LEN 8
+int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
+
+ rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif, rtwsta, h2c);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2);
+
+int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) |
+ le32_encode_bits(1, DCTLINFO_V2_C0_OP);
+
+ h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL);
+ h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL);
+ h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL);
+ h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL);
+ h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL);
+ h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL);
+ h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL);
+ h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL);
+ h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL);
+ h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL);
+ h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL);
+ h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL);
+ h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2);
+
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_h2c_ba_cam *h2c;
u8 macid = rtwsta->mac_id;
+ u32 len = sizeof(*h2c);
struct sk_buff *skb;
u8 entry_idx;
int ret;
@@ -1509,32 +1866,34 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
return 0;
}
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
return -ENOMEM;
}
- skb_put(skb, H2C_BA_CAM_LEN);
- SET_BA_CAM_MACID(skb->data, macid);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam *)skb->data;
+
+ h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID);
if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
- SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
+ h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1);
else
- SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
+ h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX);
if (!valid)
goto end;
- SET_BA_CAM_VALID(skb->data, valid);
- SET_BA_CAM_TID(skb->data, params->tid);
+ h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) |
+ le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID);
if (params->buf_size > 64)
- SET_BA_CAM_BMAP_SIZE(skb->data, 4);
+ h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
else
- SET_BA_CAM_BMAP_SIZE(skb->data, 0);
+ h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
/* If init req is set, hw will set the ssn */
- SET_BA_CAM_INIT_REQ(skb->data, 1);
- SET_BA_CAM_SSN(skb->data, params->ssn);
+ h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) |
+ le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN);
if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
- SET_BA_CAM_STD_EN(skb->data, 1);
- SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
+ h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BA_CAM_W1_BAND);
}
end:
@@ -1542,7 +1901,7 @@ end:
H2C_CAT_MAC,
H2C_CL_BA_CAM,
H2C_FUNC_MAC_BA_CAM, 0, 1,
- H2C_BA_CAM_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -1556,31 +1915,35 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam);
static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
u8 entry_idx, u8 uid)
{
+ struct rtw89_h2c_ba_cam *h2c;
+ u32 len = sizeof(*h2c);
struct sk_buff *skb;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
return -ENOMEM;
}
- skb_put(skb, H2C_BA_CAM_LEN);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam *)skb->data;
- SET_BA_CAM_VALID(skb->data, 1);
- SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
- SET_BA_CAM_UID(skb->data, uid);
- SET_BA_CAM_BAND(skb->data, 0);
- SET_BA_CAM_STD_EN(skb->data, 0);
+ h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID);
+ h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) |
+ le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) |
+ le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) |
+ le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
H2C_CL_BA_CAM,
H2C_FUNC_MAC_BA_CAM, 0, 1,
- H2C_BA_CAM_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -1609,14 +1972,132 @@ void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
}
}
+int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_h2c_ba_cam_v1 *h2c;
+ u8 macid = rtwsta->mac_id;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u8 entry_idx;
+ u8 bmap_size;
+ int ret;
+
+ ret = valid ?
+ rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
+ rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
+ if (ret) {
+ /* it still works even if we don't have static BA CAM, because
+ * hardware can create dynamic BA CAM automatically.
+ */
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "failed to %s entry tid=%d for h2c ba cam\n",
+ valid ? "alloc" : "free", params->tid);
+ return 0;
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data;
+
+ if (params->buf_size > 512)
+ bmap_size = 10;
+ else if (params->buf_size > 256)
+ bmap_size = 8;
+ else if (params->buf_size > 64)
+ bmap_size = 4;
+ else
+ bmap_size = 0;
+
+ h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) |
+ le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) |
+ le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) |
+ le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) |
+ le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) |
+ le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK);
+
+ entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */
+ h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) |
+ le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) |
+ le32_encode_bits(!!rtwvif->mac_idx, RTW89_H2C_BA_CAM_V1_W1_BAND_SEL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_BA_CAM,
+ H2C_FUNC_MAC_BA_CAM_V1, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1);
+
+int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
+ u8 offset, u8 mac_idx)
+{
+ struct rtw89_h2c_ba_cam_init *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam_init *)skb->data;
+
+ h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) |
+ le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) |
+ le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_BA_CAM,
+ H2C_FUNC_MAC_BA_CAM_INIT, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_LOG_CFG_LEN 12
int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
{
struct sk_buff *skb;
- u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
- BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
+ u32 comp = 0;
int ret;
+ if (enable)
+ comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
+ BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) |
+ BIT(RTW89_FW_LOG_COMP_SCAN);
+
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
@@ -1815,6 +2296,50 @@ fail:
return ret;
}
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ rtwvif->sub_entity_idx);
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_h2c_lps_ch_info *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ if (chip->chip_gen != RTW89_CHIP_BE)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_lps_ch_info *)skb->data;
+
+ h2c->info[0].central_ch = chan->channel;
+ h2c->info[0].pri_ch = chan->primary_channel;
+ h2c->info[0].band = chan->band_type;
+ h2c->info[0].bw = chan->band_width;
+ h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
+ H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_P2P_ACT_LEN 20
int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_p2p_noa_desc *desc,
@@ -1892,11 +2417,12 @@ static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
#define H2C_CMC_TBL_LEN 68
int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 macid = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
struct sk_buff *skb;
- u8 macid = rtwvif->mac_id;
int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
@@ -1937,6 +2463,91 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl);
+
+int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE);
+ h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL);
+
+ h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) |
+ le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) |
+ le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
+ h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL);
+
+ h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL);
+
+ h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL);
+
+ h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+ h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL);
+
+ h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
+ h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL);
+
+ h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE);
+ h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL);
+
+ h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) |
+ le32_encode_bits(1, CCTLINFO_G7_W7_NR) |
+ le32_encode_bits(1, CCTLINFO_G7_W7_CB) |
+ le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) |
+ le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE);
+ h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL);
+
+ h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL);
+
+ h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) |
+ le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) |
+ le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L);
+ h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL);
+
+ h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) |
+ le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) |
+ le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE);
+ h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7);
static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta, u8 *pads)
@@ -1950,9 +2561,6 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
u16 ppe;
int i;
- if (!sta->deflink.he_cap.has_he)
- return;
-
ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
if (!ppe_th) {
@@ -2011,7 +2619,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
int ret;
memset(pads, 0, sizeof(pads));
- if (sta)
+ if (sta && sta->deflink.he_cap.has_he)
__get_sta_he_pkt_padding(rtwdev, sta, pads);
if (vif->p2p)
@@ -2073,6 +2681,246 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl);
+
+static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta, u8 *pads)
+{
+ u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
+ u16 ppe_thres_hdr;
+ u8 ppe16, ppe8;
+ u8 n, idx, sh;
+ u8 ru_bitmap;
+ bool ppe_th;
+ u16 ppe;
+ int i;
+
+ ppe_th = !!u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT);
+ if (!ppe_th) {
+ u8 pad;
+
+ pad = u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+
+ for (i = 0; i < RTW89_PPE_BW_NUM; i++)
+ pads[i] = pad;
+
+ return;
+ }
+
+ ppe_thres_hdr = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres);
+ ru_bitmap = u16_get_bits(ppe_thres_hdr,
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n = hweight8(ru_bitmap);
+ n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE +
+ (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
+
+ for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
+ if (!(ru_bitmap & BIT(i))) {
+ pads[i] = 1;
+ continue;
+ }
+
+ idx = n >> 3;
+ sh = n & 7;
+ n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2;
+
+ ppe = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres + idx);
+ ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+ sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE;
+ ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+
+ if (ppe16 != 7 && ppe8 == 7)
+ pads[i] = 2;
+ else if (ppe8 != 7)
+ pads[i] = 1;
+ else
+ pads[i] = 0;
+ }
+}
+
+int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u8 pads[RTW89_PPE_BW_NUM];
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u16 lowest_rate;
+ int ret;
+
+ memset(pads, 0, sizeof(pads));
+ if (sta) {
+ if (sta->deflink.eht_cap.has_eht)
+ __get_sta_eht_pkt_padding(rtwdev, sta, pads);
+ else if (sta->deflink.he_cap.has_he)
+ __get_sta_he_pkt_padding(rtwdev, sta, pads);
+ }
+
+ if (vif->p2p)
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ lowest_rate = RTW89_HW_RATE_CCK1;
+ else
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) |
+ le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB);
+ h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB |
+ CCTLINFO_G7_W0_DISDATAFB);
+
+ h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
+ h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
+
+ h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
+ h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
+
+ h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
+ h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
+
+ h2c->w4 = le32_encode_bits(rtwvif->port, CCTLINFO_G7_W4_MULTI_PORT_ID);
+ h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID);
+
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
+ h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM);
+ h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM);
+ }
+
+ if (vif->bss_conf.eht_support) {
+ u16 punct = vif->bss_conf.chanreq.oper.punctured;
+
+ h2c->w4 |= le32_encode_bits(~punct,
+ CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+ h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+ }
+
+ h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
+ h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
+
+ h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
+ CCTLINFO_G7_W6_ULDL);
+ h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL);
+
+ if (sta) {
+ h2c->w8 = le32_encode_bits(sta->deflink.he_cap.has_he,
+ CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
+ h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7);
+
+int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u16 agg_num = 0;
+ u8 ba_bmap = 0;
+ int ret;
+ u8 tid;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) {
+ if (agg_num == 0)
+ agg_num = rtwsta->ampdu_params[tid].agg_num;
+ else
+ agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num);
+ }
+
+ if (agg_num <= 0x20)
+ ba_bmap = 3;
+ else if (agg_num > 0x20 && agg_num <= 0x40)
+ ba_bmap = 0;
+ else if (agg_num > 0x40 && agg_num <= 0x80)
+ ba_bmap = 1;
+ else if (agg_num > 0x80 && agg_num <= 0x100)
+ ba_bmap = 2;
+ else if (agg_num > 0x100 && agg_num <= 0x200)
+ ba_bmap = 4;
+ else if (agg_num > 0x200 && agg_num <= 0x400)
+ ba_bmap = 5;
+
+ h2c->c0 = le32_encode_bits(rtwsta->mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP);
+ h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta)
@@ -2155,18 +3003,20 @@ fail:
return ret;
}
-#define H2C_BCN_BASE_LEN 12
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
- struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif->sub_entity_idx);
- struct sk_buff *skb;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_h2c_bcn_upd *h2c;
struct sk_buff *skb_beacon;
- u16 tim_offset;
+ struct ieee80211_hdr *hdr;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
int bcn_total_len;
u16 beacon_rate;
+ u16 tim_offset;
void *noa_data;
u8 noa_len;
int ret;
@@ -2192,23 +3042,27 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
skb_put_data(skb_beacon, noa_data, noa_len);
}
- bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
+ hdr = (struct ieee80211_hdr *)skb_beacon;
+ tim_offset -= ieee80211_hdrlen(hdr->frame_control);
+
+ bcn_total_len = len + skb_beacon->len;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
dev_kfree_skb_any(skb_beacon);
return -ENOMEM;
}
- skb_put(skb, H2C_BCN_BASE_LEN);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_bcn_upd *)skb->data;
- SET_BCN_UPD_PORT(skb->data, rtwvif->port);
- SET_BCN_UPD_MBSSID(skb->data, 0);
- SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
- SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
- SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
- SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
- SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
- SET_BCN_UPD_RATE(skb->data, beacon_rate);
+ h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_W0_PORT) |
+ le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) |
+ le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST);
+ h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) |
+ le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) |
+ le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) |
+ le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE);
skb_put_data(skb, skb_beacon->data, skb_beacon->len);
dev_kfree_skb_any(skb_beacon);
@@ -2227,6 +3081,90 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
return 0;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon);
+
+int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_h2c_bcn_upd_be *h2c;
+ struct sk_buff *skb_beacon;
+ struct ieee80211_hdr *hdr;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int bcn_total_len;
+ u16 beacon_rate;
+ u16 tim_offset;
+ void *noa_data;
+ u8 noa_len;
+ int ret;
+
+ if (vif->p2p)
+ beacon_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ beacon_rate = RTW89_HW_RATE_CCK1;
+ else
+ beacon_rate = RTW89_HW_RATE_OFDM6;
+
+ skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
+ NULL, 0);
+ if (!skb_beacon) {
+ rtw89_err(rtwdev, "failed to get beacon skb\n");
+ return -ENOMEM;
+ }
+
+ noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data);
+ if (noa_len &&
+ (noa_len <= skb_tailroom(skb_beacon) ||
+ pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
+ skb_put_data(skb_beacon, noa_data, noa_len);
+ }
+
+ hdr = (struct ieee80211_hdr *)skb_beacon;
+ tim_offset -= ieee80211_hdrlen(hdr->frame_control);
+
+ bcn_total_len = len + skb_beacon->len;
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ dev_kfree_skb_any(skb_beacon);
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data;
+
+ h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) |
+ le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) |
+ le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST);
+ h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) |
+ le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) |
+ le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) |
+ le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE);
+
+ skb_put_data(skb, skb_beacon->data, skb_beacon->len);
+ dev_kfree_skb_any(skb_beacon);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_BCN_UPD_BE, 0, 1,
+ bcn_total_len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
#define H2C_ROLE_MAINTAIN_LEN 4
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
@@ -2277,45 +3215,93 @@ fail:
return ret;
}
-#define H2C_JOIN_INFO_LEN 4
+static enum rtw89_fw_sta_type
+rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta);
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+
+ if (!sta)
+ goto by_vif;
+
+ if (sta->deflink.eht_cap.has_eht)
+ return RTW89_FW_BE_STA;
+ else if (sta->deflink.he_cap.has_he)
+ return RTW89_FW_AX_STA;
+ else
+ return RTW89_FW_N_AC_STA;
+
+by_vif:
+ if (vif->bss_conf.eht_support)
+ return RTW89_FW_BE_STA;
+ else if (vif->bss_conf.he_support)
+ return RTW89_FW_AX_STA;
+ else
+ return RTW89_FW_N_AC_STA;
+}
+
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, bool dis_conn)
{
struct sk_buff *skb;
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
u8 self_role = rtwvif->self_role;
+ enum rtw89_fw_sta_type sta_type;
u8 net_type = rtwvif->net_type;
+ struct rtw89_h2c_join_v1 *h2c_v1;
+ struct rtw89_h2c_join *h2c;
+ u32 len = sizeof(*h2c);
+ bool format_v1 = false;
int ret;
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ len = sizeof(*h2c_v1);
+ format_v1 = true;
+ }
+
if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
self_role = RTW89_SELF_ROLE_AP_CLIENT;
net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
}
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
}
- skb_put(skb, H2C_JOIN_INFO_LEN);
- SET_JOININFO_MACID(skb->data, mac_id);
- SET_JOININFO_OP(skb->data, dis_conn);
- SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
- SET_JOININFO_WMM(skb->data, rtwvif->wmm);
- SET_JOININFO_TGR(skb->data, rtwvif->trigger);
- SET_JOININFO_ISHESTA(skb->data, 0);
- SET_JOININFO_DLBW(skb->data, 0);
- SET_JOININFO_TF_MAC_PAD(skb->data, 0);
- SET_JOININFO_DL_T_PE(skb->data, 0);
- SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
- SET_JOININFO_NET_TYPE(skb->data, net_type);
- SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
- SET_JOININFO_SELF_ROLE(skb->data, self_role);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_join *)skb->data;
+
+ h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) |
+ le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_JOININFO_W0_BAND) |
+ le32_encode_bits(rtwvif->wmm, RTW89_H2C_JOININFO_W0_WMM) |
+ le32_encode_bits(rtwvif->trigger, RTW89_H2C_JOININFO_W0_TGR) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) |
+ le32_encode_bits(rtwvif->port, RTW89_H2C_JOININFO_W0_PORT_ID) |
+ le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) |
+ le32_encode_bits(rtwvif->wifi_role, RTW89_H2C_JOININFO_W0_WIFI_ROLE) |
+ le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE);
+ if (!format_v1)
+ goto done;
+
+ h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data;
+
+ sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif, rtwsta);
+
+ h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE);
+ h2c_v1->w2 = 0;
+
+done:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
H2C_FUNC_MAC_JOININFO, 0, 1,
- H2C_JOIN_INFO_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -2368,24 +3354,49 @@ fail:
int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
bool pause)
{
- struct rtw89_fw_macid_pause_grp h2c = {{0}};
- u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
+ struct rtw89_fw_macid_pause_sleep_grp *h2c_new;
+ struct rtw89_fw_macid_pause_grp *h2c;
+ __le32 set = cpu_to_le32(BIT(sh));
+ u8 h2c_macid_pause_id;
struct sk_buff *skb;
+ u32 len;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
+ if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) {
+ h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP;
+ len = sizeof(*h2c_new);
+ } else {
+ h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE;
+ len = sizeof(*h2c);
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
- rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
+ rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n");
return -ENOMEM;
}
- h2c.mask_grp[grp] = cpu_to_le32(BIT(sh));
- if (pause)
- h2c.pause_grp[grp] = cpu_to_le32(BIT(sh));
- skb_put_data(skb, &h2c, len);
+ skb_put(skb, len);
+
+ if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) {
+ h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data;
+
+ h2c_new->n[0].pause_mask_grp[grp] = set;
+ h2c_new->n[0].sleep_mask_grp[grp] = set;
+ if (pause) {
+ h2c_new->n[0].pause_grp[grp] = set;
+ h2c_new->n[0].sleep_grp[grp] = set;
+ }
+ } else {
+ h2c = (struct rtw89_fw_macid_pause_grp *)skb->data;
+
+ h2c->mask_grp[grp] = set;
+ if (pause)
+ h2c->pause_grp[grp] = set;
+ }
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
- H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
+ h2c_macid_pause_id, 1, 0,
len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
@@ -2516,6 +3527,8 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
{
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL;
+ s32 thold = RTW89_DEFAULT_CQM_THOLD;
+ u32 hyst = RTW89_DEFAULT_CQM_HYST;
struct rtw89_h2c_bcnfltr *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
@@ -2536,14 +3549,19 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
skb_put(skb, len);
h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
+ if (bss_conf->cqm_rssi_hyst)
+ hyst = bss_conf->cqm_rssi_hyst;
+ if (bss_conf->cqm_rssi_thold)
+ thold = bss_conf->cqm_rssi_thold;
+
h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
RTW89_H2C_BCNFLTR_W0_MODE) |
le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) |
- le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
- le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI,
+ le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
+ le32_encode_bits(thold + MAX_RSSI,
RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
@@ -2735,11 +3753,11 @@ fail:
return ret;
}
-int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_init_info *init_info = &dm->init_info;
+ struct rtw89_btc_init_info *init_info = &dm->init_info.init;
struct rtw89_btc_module *module = &init_info->module;
struct rtw89_btc_ant_info *ant = &module->ant;
struct rtw89_h2c_cxinit *h2c;
@@ -2755,7 +3773,7 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
skb_put(skb, len);
h2c = (struct rtw89_h2c_cxinit *)skb->data;
- h2c->hdr.type = CXDRVINFO_INIT;
+ h2c->hdr.type = type;
h2c->hdr.len = len - H2C_LEN_CXDRVHDR;
h2c->ant_type = ant->type;
@@ -2802,12 +3820,53 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7;
+ struct rtw89_h2c_cxinit_v7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data;
+
+ h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fcxinit;
+ h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
+ h2c->init = *init_info;
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define PORT_DATA_OFFSET 4
#define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
#define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
(4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
@@ -2832,7 +3891,7 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
skb_put(skb, len);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
@@ -2888,7 +3947,7 @@ fail:
#define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
(4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
@@ -2912,7 +3971,7 @@ int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
skb_put(skb, len);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
@@ -2978,7 +4037,7 @@ fail:
#define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \
(4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
@@ -3002,7 +4061,7 @@ int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev)
skb_put(skb, len);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
@@ -3062,11 +4121,11 @@ fail:
}
#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
- struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
+ struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl;
struct sk_buff *skb;
u8 *cmd;
int ret;
@@ -3079,7 +4138,7 @@ int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
@@ -3106,8 +4165,47 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7;
+ struct rtw89_h2c_cxctrl_v7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data;
+
+ h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fcxctrl;
+ h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7;
+ h2c->ctrl = *ctrl;
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_trx_info *trx = &btc->dm.trx_info;
@@ -3123,7 +4221,7 @@ int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev)
skb_put(skb, H2C_LEN_CXDRVINFO_TRX);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl);
@@ -3163,7 +4261,7 @@ fail:
}
#define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
@@ -3180,7 +4278,7 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
@@ -3296,62 +4394,163 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
return 0;
}
-#define H2C_LEN_SCAN_LIST_OFFLOAD 4
-int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
struct list_head *chan_list)
{
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_chinfo_elem *elem;
struct rtw89_mac_chinfo *ch_info;
+ struct rtw89_h2c_chinfo *h2c;
struct sk_buff *skb;
- int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
unsigned int cond;
- u8 *cmd;
+ int skb_len;
int ret;
+ static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
+
+ skb_len = struct_size(h2c, elem, ch_num);
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
return -ENOMEM;
}
- skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
- cmd = skb->data;
+ skb_put(skb, sizeof(*h2c));
+ h2c = (struct rtw89_h2c_chinfo *)skb->data;
+
+ h2c->ch_num = ch_num;
+ h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
+
+ list_for_each_entry(ch_info, chan_list, list) {
+ elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem));
+
+ elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) |
+ le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) |
+ le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) |
+ le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH);
+
+ elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) |
+ le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) |
+ le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) |
+ le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) |
+ le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) |
+ le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) |
+ le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) |
+ le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) |
+ le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) |
+ le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM);
+
+ elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) |
+ le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) |
+ le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) |
+ le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3);
+
+ elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) |
+ le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) |
+ le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) |
+ le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
+
+ cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
+ struct list_head *chan_list)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_chinfo_elem_be *elem;
+ struct rtw89_mac_chinfo_be *ch_info;
+ struct rtw89_h2c_chinfo *h2c;
+ struct sk_buff *skb;
+ unsigned int cond;
+ int skb_len;
+ int ret;
+
+ static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
+
+ skb_len = struct_size(h2c, elem, ch_num);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, sizeof(*h2c));
+ h2c = (struct rtw89_h2c_chinfo *)skb->data;
- RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
- /* in unit of 4 bytes */
- RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
+ h2c->ch_num = ch_num;
+ h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
+ h2c->arg = u8_encode_bits(RTW89_PHY_0, RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK);
list_for_each_entry(ch_info, chan_list, list) {
- cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
-
- RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
- RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
- RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
- RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
- RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
- RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
- RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
- RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
- RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
- RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
- RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
- RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
- RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
- RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
- RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
- RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
- RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
- RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
- RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
- RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
- RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
- RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
+ elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem));
+
+ elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) |
+ le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
+ le32_encode_bits(ch_info->central_ch,
+ RTW89_H2C_CHINFO_BE_W0_CENTER_CH) |
+ le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH);
+
+ elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) |
+ le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) |
+ le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) |
+ le32_encode_bits(ch_info->pause_data,
+ RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) |
+ le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) |
+ le32_encode_bits(ch_info->rand_seq_num,
+ RTW89_H2C_CHINFO_BE_W1_RANDOM) |
+ le32_encode_bits(ch_info->notify_action,
+ RTW89_H2C_CHINFO_BE_W1_NOTIFY) |
+ le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0,
+ RTW89_H2C_CHINFO_BE_W1_PROBE) |
+ le32_encode_bits(ch_info->leave_crit,
+ RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) |
+ le32_encode_bits(ch_info->chkpt_timer,
+ RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER);
+
+ elem->w2 = le32_encode_bits(ch_info->leave_time,
+ RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) |
+ le32_encode_bits(ch_info->leave_th,
+ RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) |
+ le32_encode_bits(ch_info->tx_pkt_ctrl,
+ RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL);
+
+ elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) |
+ le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) |
+ le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) |
+ le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3);
+
+ elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) |
+ le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) |
+ le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) |
+ le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7);
+
+ elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) |
+ le32_encode_bits(ch_info->fw_probe0_ssids,
+ RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS);
+
+ elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids,
+ RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) |
+ le32_encode_bits(ch_info->fw_probe0_bssids,
+ RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
- cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH);
+ cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
if (ret) {
@@ -3410,7 +4609,10 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
H2C_FUNC_SCANOFLD, 1, 1,
len);
- cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD);
+ if (option->enable)
+ cond = RTW89_SCANOFLD_WAIT_COND_START;
+ else
+ cond = RTW89_SCANOFLD_WAIT_COND_STOP;
ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
if (ret) {
@@ -3421,6 +4623,169 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
return 0;
}
+static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan;
+ u8 i, idx;
+
+ sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+ if (chan->flags & IEEE80211_CHAN_DISABLED) {
+ idx = (chan->hw_value - 1) / 4;
+ option->prohib_chan |= BIT(idx);
+ }
+ }
+}
+
+int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_scanofld_be_macc_role *macc_role;
+ struct rtw89_chan *op = &scan_info->op_chan;
+ struct rtw89_h2c_scanofld_be_opch *opch;
+ struct rtw89_h2c_scanofld_be *h2c;
+ struct sk_buff *skb;
+ u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
+ u8 opch_size = sizeof(*opch) * option->num_opch;
+ u8 probe_id[NUM_NL80211_BANDS];
+ unsigned int cond;
+ void *ptr;
+ int ret;
+ u32 len;
+ u8 i;
+
+ rtw89_scan_get_6g_disabled_chan(rtwdev, option);
+
+ len = sizeof(*h2c) + macc_role_size + opch_size;
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_scanofld_be *)skb->data;
+ ptr = skb->data;
+
+ h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) |
+ le32_encode_bits(option->scan_mode,
+ RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) |
+ le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) |
+ le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) |
+ le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) |
+ le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) |
+ le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) |
+ le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND);
+
+ h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) |
+ le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) |
+ le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD);
+
+ h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) |
+ le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) |
+ le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END);
+
+ h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) |
+ le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID);
+
+ h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ],
+ RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) |
+ le32_encode_bits(probe_id[NL80211_BAND_6GHZ],
+ RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
+
+ h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE);
+
+ h2c->w6 = le32_encode_bits(option->prohib_chan,
+ RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW);
+ h2c->w7 = le32_encode_bits(option->prohib_chan >> 32,
+ RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH);
+ ptr += sizeof(*h2c);
+
+ for (i = 0; i < option->num_macc_role; i++) {
+ macc_role = (struct rtw89_h2c_scanofld_be_macc_role *)&h2c->role[i];
+ macc_role->w0 =
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END);
+ ptr += sizeof(*macc_role);
+ }
+
+ for (i = 0; i < option->num_opch; i++) {
+ opch = ptr;
+ opch->w0 = le32_encode_bits(rtwvif->mac_id,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) |
+ le32_encode_bits(option->band,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) |
+ le32_encode_bits(rtwvif->port,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) |
+ le32_encode_bits(RTW89_SCAN_OPMODE_INTV,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) |
+ le32_encode_bits(true,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) |
+ le32_encode_bits(RTW89_OFF_CHAN_TIME / 10,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
+
+ opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) |
+ le32_encode_bits(op->band_type,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
+ le32_encode_bits(op->band_width,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
+ le32_encode_bits(0x3,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) |
+ le32_encode_bits(op->primary_channel,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) |
+ le32_encode_bits(op->channel,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH);
+
+ opch->w2 = le32_encode_bits(0,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) |
+ le32_encode_bits(0,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) |
+ le32_encode_bits(2,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS);
+
+ opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) |
+ le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) |
+ le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) |
+ le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3);
+ ptr += sizeof(*opch);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_SCANOFLD_BE, 1, 1,
+ len);
+
+ if (option->enable)
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
+ else
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP;
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n");
+ return ret;
+ }
+
+ return 0;
+}
+
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page)
@@ -3497,6 +4862,328 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
+int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ struct rtw89_fw_h2c_rfk_pre_info *h2c;
+ u8 tbl_sel = rfk_mcc->table_idx;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u8 tbl, path;
+ u32 val32;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
+
+ h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+
+ BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
+
+ for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
+ for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
+ h2c->dbcc.ch[path][tbl] = cpu_to_le32(rfk_mcc->ch[tbl]);
+ h2c->dbcc.band[path][tbl] = cpu_to_le32(rfk_mcc->band[tbl]);
+ }
+ }
+
+ for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
+ h2c->tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
+ h2c->tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]);
+ }
+
+ h2c->phy_idx = cpu_to_le32(phy_idx);
+ h2c->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]);
+ h2c->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]);
+ h2c->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
+
+ val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1);
+ h2c->ktbl_sel0 = cpu_to_le32(val32);
+ val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1);
+ h2c->ktbl_sel1 = cpu_to_le32(val32);
+ val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ h2c->rfmod0 = cpu_to_le32(val32);
+ val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
+ h2c->rfmod1 = cpu_to_le32(val32);
+
+ if (rtw89_is_mlo_1_1(rtwdev))
+ h2c->mlo_1_1 = cpu_to_le32(1);
+
+ h2c->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_PRE_NOTIFY, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_h2c_rf_tssi *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_tssi *)skb->data;
+
+ h2c->len = cpu_to_le16(len);
+ h2c->phy = phy_idx;
+ h2c->ch = chan->channel;
+ h2c->bw = chan->band_width;
+ h2c->band = chan->band_type;
+ h2c->hwtx_en = true;
+ h2c->cv = hal->cv;
+ h2c->tssi_mode = tssi_mode;
+
+ rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c);
+ rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_h2c_rf_iqk *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_iqk *)skb->data;
+
+ h2c->phy_idx = cpu_to_le32(phy_idx);
+ h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_h2c_rf_dpk *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_dpk *)skb->data;
+
+ h2c->len = len;
+ h2c->phy = phy_idx;
+ h2c->dpk_enable = true;
+ h2c->kpath = RF_AB;
+ h2c->cur_band = chan->band_type;
+ h2c->cur_bw = chan->band_width;
+ h2c->cur_ch = chan->channel;
+ h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_h2c_rf_txgapk *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_txgapk *)skb->data;
+
+ h2c->len = len;
+ h2c->ktype = 2;
+ h2c->phy = phy_idx;
+ h2c->kpath = RF_AB;
+ h2c->band = chan->band_type;
+ h2c->bw = chan->band_width;
+ h2c->ch = chan->channel;
+ h2c->cv = hal->cv;
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_h2c_rf_dack *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_dack *)skb->data;
+
+ h2c->len = cpu_to_le32(len);
+ h2c->phy = cpu_to_le32(phy_idx);
+ h2c->type = cpu_to_le32(0);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_h2c_rf_rxdck *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_rxdck *)skb->data;
+
+ h2c->len = len;
+ h2c->phy = phy_idx;
+ h2c->is_afe = false;
+ h2c->kpath = RF_AB;
+ h2c->cur_band = chan->band_type;
+ h2c->cur_bw = chan->band_width;
+ h2c->cur_ch = chan->channel;
+ h2c->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack)
@@ -3600,7 +5287,7 @@ static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
default:
return false;
case RTW89_C2H_CAT_MAC:
- return rtw89_mac_c2h_chk_atomic(rtwdev, class, func);
+ return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func);
case RTW89_C2H_CAT_OUTSRC:
return rtw89_phy_c2h_chk_atomic(rtwdev, class, func);
}
@@ -4050,8 +5737,66 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
}
}
-static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, bool connected)
+static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
+ int ssid_num,
+ struct rtw89_mac_chinfo_be *ch_info)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct rtw89_pktofld_info *info;
+ u8 band, probe_count = 0, i;
+
+ ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
+ ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
+ ch_info->bw = RTW89_SCAN_WIDTH;
+ ch_info->tx_null = false;
+ ch_info->pause_data = false;
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+
+ if (ssid_num) {
+ band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
+
+ list_for_each_entry(info, &scan_info->pkt_list[band], list) {
+ if (info->channel_6ghz &&
+ ch_info->pri_ch != info->channel_6ghz)
+ continue;
+ ch_info->pkt_id[probe_count++] = info->id;
+ if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
+ break;
+ }
+ }
+
+ if (ch_info->ch_band == RTW89_BAND_6G) {
+ if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
+ !ch_info->is_psc) {
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+ if (!req->duration_mandatory)
+ ch_info->period -= RTW89_DWELL_TIME_6G;
+ }
+ }
+
+ for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
+ ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
+
+ switch (chan_type) {
+ case RTW89_CHAN_DFS:
+ if (ch_info->ch_band != RTW89_BAND_6G)
+ ch_info->period =
+ max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
+ ch_info->dwell_time = RTW89_DWELL_TIME;
+ break;
+ case RTW89_CHAN_ACTIVE:
+ break;
+ default:
+ rtw89_warn(rtwdev, "Channel type out of bound\n");
+ break;
+ }
+}
+
+int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected)
{
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_mac_chinfo *ch_info, *tmp;
@@ -4074,7 +5819,7 @@ static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
goto out;
}
- if (req->duration_mandatory)
+ if (req->duration)
ch_info->period = req->duration;
else if (channel->band == NL80211_BAND_6GHZ)
ch_info->period = RTW89_CHANNEL_TIME_6G +
@@ -4127,9 +5872,69 @@ out:
return ret;
}
+int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected)
+{
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct rtw89_mac_chinfo_be *ch_info, *tmp;
+ struct ieee80211_channel *channel;
+ struct list_head chan_list;
+ enum rtw89_chan_type type;
+ int list_len, ret;
+ bool random_seq;
+ u32 idx;
+
+ random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN);
+ INIT_LIST_HEAD(&chan_list);
+
+ for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
+ idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx++, list_len++) {
+ channel = req->channels[idx];
+ ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
+ if (!ch_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (req->duration)
+ ch_info->period = req->duration;
+ else if (channel->band == NL80211_BAND_6GHZ)
+ ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
+ else
+ ch_info->period = RTW89_CHANNEL_TIME;
+
+ ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
+ ch_info->central_ch = channel->hw_value;
+ ch_info->pri_ch = channel->hw_value;
+ ch_info->rand_seq_num = random_seq;
+ ch_info->is_psc = cfg80211_channel_is_psc(channel);
+
+ if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+ type = RTW89_CHAN_DFS;
+ else
+ type = RTW89_CHAN_ACTIVE;
+ rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info);
+
+ list_add_tail(&ch_info->list, &chan_list);
+ }
+
+ rtwdev->scan_info.last_chan_idx = idx;
+ ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list);
+
+out:
+ list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool connected)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
int ret;
ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
@@ -4137,7 +5942,7 @@ static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
rtw89_err(rtwdev, "Update probe request failed\n");
goto out;
}
- ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected);
+ ret = mac->add_chan_list(rtwdev, rtwvif, connected);
out:
return ret;
}
@@ -4154,9 +5959,11 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan);
rtwdev->scan_info.scanning_vif = vif;
rtwdev->scan_info.last_chan_idx = 0;
+ rtwdev->scan_info.abort = false;
rtwvif->scan_ies = &scan_req->ies;
rtwvif->scan_req = req;
ieee80211_stop_queues(rtwdev->hw);
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, false);
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
get_random_mask_addr(mac_addr, req->mac_addr,
@@ -4181,10 +5988,10 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct cfg80211_scan_info info = {
.aborted = aborted,
};
- struct rtw89_vif *rtwvif;
if (!vif)
return;
@@ -4197,22 +6004,29 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtw89_core_scan_complete(rtwdev, vif, true);
ieee80211_scan_completed(rtwdev->hw, &info);
ieee80211_wake_queues(rtwdev->hw);
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, true);
rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
rtw89_release_pkt_list(rtwdev);
- rtwvif = (struct rtw89_vif *)vif->drv_priv;
rtwvif->scan_req = NULL;
rtwvif->scan_ies = NULL;
scan_info->last_chan_idx = 0;
scan_info->scanning_vif = NULL;
+ scan_info->abort = false;
rtw89_chanctx_proceed(rtwdev);
}
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
{
- rtw89_hw_scan_offload(rtwdev, vif, false);
- rtw89_hw_scan_complete(rtwdev, vif, true);
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ int ret;
+
+ scan_info->abort = true;
+
+ ret = rtw89_hw_scan_offload(rtwdev, vif, false);
+ if (ret)
+ rtw89_hw_scan_complete(rtwdev, vif, true);
}
static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
@@ -4231,6 +6045,7 @@ static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool enable)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_scan_option opt = {0};
struct rtw89_vif *rtwvif;
bool connected;
@@ -4248,7 +6063,18 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
if (ret)
goto out;
}
- ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP;
+ opt.scan_mode = RTW89_SCAN_MODE_SA;
+ opt.band = RTW89_PHY_0;
+ opt.num_macc_role = 0;
+ opt.mlo_mode = rtwdev->mlo_dbcc_mode;
+ opt.num_opch = connected ? 1 : 0;
+ opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID;
+ }
+
+ ret = mac->scan_offload(rtwdev, &opt, rtwvif);
out:
return ret;
}
@@ -4922,6 +6748,372 @@ int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
}
+static
+u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_add_slot_arg *slot_arg,
+ struct rtw89_h2c_mrc_add_slot *slot_h2c)
+{
+ bool fill_h2c = !!slot_h2c;
+ unsigned int i;
+
+ if (!fill_h2c)
+ goto calc_len;
+
+ slot_h2c->w0 = le32_encode_bits(slot_arg->duration,
+ RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) |
+ le32_encode_bits(slot_arg->courtesy_en,
+ RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) |
+ le32_encode_bits(slot_arg->role_num,
+ RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM);
+ slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period,
+ RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) |
+ le32_encode_bits(slot_arg->courtesy_target,
+ RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET);
+
+ for (i = 0; i < slot_arg->role_num; i++) {
+ slot_h2c->roles[i].w0 =
+ le32_encode_bits(slot_arg->roles[i].macid,
+ RTW89_H2C_MRC_ADD_ROLE_W0_MACID) |
+ le32_encode_bits(slot_arg->roles[i].role_type,
+ RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) |
+ le32_encode_bits(slot_arg->roles[i].is_master,
+ RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) |
+ le32_encode_bits(slot_arg->roles[i].en_tx_null,
+ RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN);
+ slot_h2c->roles[i].w1 =
+ le32_encode_bits(slot_arg->roles[i].central_ch,
+ RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) |
+ le32_encode_bits(slot_arg->roles[i].primary_ch,
+ RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) |
+ le32_encode_bits(slot_arg->roles[i].bw,
+ RTW89_H2C_MRC_ADD_ROLE_W1_BW) |
+ le32_encode_bits(slot_arg->roles[i].band,
+ RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) |
+ le32_encode_bits(slot_arg->roles[i].null_early,
+ RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) |
+ le32_encode_bits(true,
+ RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC);
+ slot_h2c->roles[i].macid_main_bitmap =
+ cpu_to_le32(slot_arg->roles[i].macid_main_bitmap);
+ slot_h2c->roles[i].macid_paired_bitmap =
+ cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap);
+ }
+
+calc_len:
+ return struct_size(slot_h2c, roles, slot_arg->role_num);
+}
+
+int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_add_arg *arg)
+{
+ struct rtw89_h2c_mrc_add *h2c_head;
+ struct sk_buff *skb;
+ unsigned int i;
+ void *tmp;
+ u32 len;
+ int ret;
+
+ len = sizeof(*h2c_head);
+ for (i = 0; i < arg->slot_num; i++)
+ len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL);
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc add\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ tmp = skb->data;
+
+ h2c_head = tmp;
+ h2c_head->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_ADD_W0_SCH_IDX) |
+ le32_encode_bits(arg->sch_type,
+ RTW89_H2C_MRC_ADD_W0_SCH_TYPE) |
+ le32_encode_bits(arg->slot_num,
+ RTW89_H2C_MRC_ADD_W0_SLOT_NUM) |
+ le32_encode_bits(arg->btc_in_sch,
+ RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH);
+
+ tmp += sizeof(*h2c_head);
+ for (i = 0; i < arg->slot_num; i++)
+ tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_ADD_MRC, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_start_arg *arg)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct rtw89_h2c_mrc_start *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc start\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_start *)skb->data;
+
+ h2c->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_START_W0_SCH_IDX) |
+ le32_encode_bits(arg->old_sch_idx,
+ RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) |
+ le32_encode_bits(arg->action,
+ RTW89_H2C_MRC_START_W0_ACTION);
+
+ h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
+ h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_START_MRC, 0, 0,
+ len);
+
+ cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct rtw89_h2c_mrc_del *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc del\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_del *)skb->data;
+
+ h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_DEL_MRC, 0, 0,
+ len);
+
+ cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_req_tsf_arg *arg,
+ struct rtw89_mac_mrc_tsf_rpt *rpt)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct rtw89_h2c_mrc_req_tsf *h2c;
+ struct rtw89_mac_mrc_tsf_rpt *tmp;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len;
+ int ret;
+
+ len = struct_size(h2c, infos, arg->num);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data;
+
+ h2c->req_tsf_num = arg->num;
+ for (i = 0; i < arg->num; i++)
+ h2c->infos[i] =
+ u8_encode_bits(arg->infos[i].band,
+ RTW89_H2C_MRC_REQ_TSF_INFO_BAND) |
+ u8_encode_bits(arg->infos[i].port,
+ RTW89_H2C_MRC_REQ_TSF_INFO_PORT);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_REQ_TSF, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF);
+ if (ret)
+ return ret;
+
+ tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf;
+ *rpt = *tmp;
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_bitmap_arg *arg)
+{
+ struct rtw89_h2c_mrc_upd_bitmap *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data;
+
+ h2c->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) |
+ le32_encode_bits(arg->action,
+ RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) |
+ le32_encode_bits(arg->macid,
+ RTW89_H2C_MRC_UPD_BITMAP_W0_MACID);
+ h2c->w1 = le32_encode_bits(arg->client_macid,
+ RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_UPD_BITMAP, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_sync_arg *arg)
+{
+ struct rtw89_h2c_mrc_sync *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_sync *)skb->data;
+
+ h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) |
+ le32_encode_bits(arg->src.port,
+ RTW89_H2C_MRC_SYNC_W0_SRC_PORT) |
+ le32_encode_bits(arg->src.band,
+ RTW89_H2C_MRC_SYNC_W0_SRC_BAND) |
+ le32_encode_bits(arg->dest.port,
+ RTW89_H2C_MRC_SYNC_W0_DEST_PORT) |
+ le32_encode_bits(arg->dest.band,
+ RTW89_H2C_MRC_SYNC_W0_DEST_BAND);
+ h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_SYNC, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_duration_arg *arg)
+{
+ struct rtw89_h2c_mrc_upd_duration *h2c;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len;
+ int ret;
+
+ len = struct_size(h2c, slots, arg->slot_num);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data;
+
+ h2c->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) |
+ le32_encode_bits(arg->slot_num,
+ RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH);
+
+ h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
+ h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
+
+ for (i = 0; i < arg->slot_num; i++) {
+ h2c->slots[i] =
+ le32_encode_bits(arg->slots[i].slot_idx,
+ RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) |
+ le32_encode_bits(arg->slots[i].duration,
+ RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_UPD_DURATION, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
{
static const u8 zeros[U8_MAX] = {};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 01016588b1fc..44311f65b4fa 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -64,6 +64,8 @@ struct rtw89_h2creg_sch_tx_en {
#define RTW89_H2CREG_SCH_TX_EN_W1_MASK GENMASK(15, 0)
#define RTW89_H2CREG_SCH_TX_EN_W1_BAND BIT(16)
+#define RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN GENMASK(23, 16)
+
#define RTW89_H2CREG_MAX 4
#define RTW89_C2HREG_MAX 4
#define RTW89_C2HREG_HDR_LEN 2
@@ -95,7 +97,9 @@ enum rtw89_mac_h2c_type {
RTW89_FWCMD_H2CREG_FUNC_FWERR,
RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE,
RTW89_FWCMD_H2CREG_FUNC_GETPKT_INFORM,
- RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN
+ RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN,
+ RTW89_FWCMD_H2CREG_FUNC_WOW_TRX_STOP = 0x6,
+ RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL = 0xA,
};
enum rtw89_mac_c2h_type {
@@ -104,7 +108,8 @@ enum rtw89_mac_c2h_type {
RTW89_FWCMD_C2HREG_FUNC_ERR_MSG,
RTW89_FWCMD_C2HREG_FUNC_PHY_CAP,
RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT,
- RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF
+ RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK = 0xA,
+ RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF,
};
enum rtw89_fw_c2h_category {
@@ -149,6 +154,7 @@ enum rtw89_fw_log_comp {
RTW89_FW_LOG_COMP_TWT,
RTW89_FW_LOG_COMP_RF,
RTW89_FW_LOG_COMP_MCC = 20,
+ RTW89_FW_LOG_COMP_SCAN = 28,
};
enum rtw89_pkt_offload_op {
@@ -169,6 +175,16 @@ enum rtw89_scanofld_notify_reason {
RTW89_SCAN_ENTER_CH_NOTIFY,
RTW89_SCAN_LEAVE_CH_NOTIFY,
RTW89_SCAN_END_SCAN_NOTIFY,
+ RTW89_SCAN_REPORT_NOTIFY,
+ RTW89_SCAN_CHKPT_NOTIFY,
+ RTW89_SCAN_ENTER_OP_NOTIFY,
+ RTW89_SCAN_LEAVE_OP_NOTIFY,
+};
+
+enum rtw89_scanofld_status {
+ RTW89_SCAN_STATUS_NOTIFY,
+ RTW89_SCAN_STATUS_SUCCESS,
+ RTW89_SCAN_STATUS_FAIL,
};
enum rtw89_chan_type {
@@ -184,6 +200,9 @@ enum rtw89_p2pps_action {
RTW89_P2P_ACT_TERMINATE = 3,
};
+#define RTW89_DEFAULT_CQM_HYST 4
+#define RTW89_DEFAULT_CQM_THOLD -70
+
enum rtw89_bcn_fltr_offload_mode {
RTW89_BCN_FLTR_OFFLOAD_MODE_0 = 0,
RTW89_BCN_FLTR_OFFLOAD_MODE_1,
@@ -216,6 +235,10 @@ struct rtw89_fw_hdr_section_info {
u32 dladdr;
u32 mssc;
u8 type;
+ bool ignore;
+ const u8 *key_addr;
+ u32 key_len;
+ u32 key_idx;
};
struct rtw89_fw_bin_info {
@@ -223,6 +246,8 @@ struct rtw89_fw_bin_info {
u32 hdr_len;
bool dynamic_hdr_en;
u32 dynamic_hdr_len;
+ bool dsp_checksum;
+ bool secure_section_exist;
struct rtw89_fw_hdr_section_info section_info[FWDL_SECTION_MAX_NUM];
};
@@ -231,6 +256,15 @@ struct rtw89_fw_macid_pause_grp {
__le32 mask_grp[4];
} __packed;
+struct rtw89_fw_macid_pause_sleep_grp {
+ struct {
+ __le32 pause_grp[4];
+ __le32 pause_mask_grp[4];
+ __le32 sleep_grp[4];
+ __le32 sleep_mask_grp[4];
+ } __packed n[4];
+} __packed;
+
#define RTW89_H2C_MAX_SIZE 2048
#define RTW89_CHANNEL_TIME 45
#define RTW89_CHANNEL_TIME_6G 20
@@ -243,6 +277,7 @@ struct rtw89_fw_macid_pause_grp {
#define RTW89_SCANOFLD_MAX_IE_LEN 512
#define RTW89_SCANOFLD_PKT_NONE 0xFF
#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
+#define RTW89_CHAN_INVALID 0xFF
#define RTW89_MAC_CHINFO_SIZE 28
#define RTW89_SCAN_LIST_GUARD 4
#define RTW89_SCAN_LIST_LIMIT \
@@ -274,9 +309,32 @@ struct rtw89_mac_chinfo {
bool is_psc;
};
-struct rtw89_scan_option {
- bool enable;
- bool target_ch_mode;
+struct rtw89_mac_chinfo_be {
+ u8 period;
+ u8 dwell_time;
+ u8 central_ch;
+ u8 pri_ch;
+ u8 bw:3;
+ u8 ch_band:2;
+ u8 dfs_ch:1;
+ u8 pause_data:1;
+ u8 tx_null:1;
+ u8 rand_seq_num:1;
+ u8 notify_action:5;
+ u8 probe_id;
+ u8 leave_crit;
+ u8 chkpt_timer;
+ u8 leave_time;
+ u8 leave_th;
+ u16 tx_pkt_ctrl;
+ u8 pkt_id[RTW89_SCANOFLD_MAX_SSID];
+ u8 sw_def;
+ u16 fw_probe0_ssids;
+ u16 fw_probe0_shortssids;
+ u16 fw_probe0_bssids;
+
+ struct list_head list;
+ bool is_psc;
};
struct rtw89_pktofld_info {
@@ -419,6 +477,7 @@ static inline void RTW89_SET_EDCA_PARAM(void *cmd, u32 val)
#define FWDL_SECURITY_SECTION_TYPE 9
#define FWDL_SECURITY_SIGLEN 512
+#define FWDL_SECURITY_CHKSUM_LEN 8
struct rtw89_fw_dynhdr_sec {
__le32 w0;
@@ -472,6 +531,7 @@ struct rtw89_fw_hdr {
#define FW_HDR_W4_MIN GENMASK(31, 24)
#define FW_HDR_W5_YEAR GENMASK(31, 0)
#define FW_HDR_W6_SEC_NUM GENMASK(15, 8)
+#define FW_HDR_W7_PART_SIZE GENMASK(15, 0)
#define FW_HDR_W7_DYN_HDR BIT(16)
#define FW_HDR_W7_CMD_VERSERION GENMASK(31, 24)
@@ -489,6 +549,7 @@ struct rtw89_fw_hdr_section_v1 {
#define FWSECTION_HDR_V1_W1_CHECKSUM BIT(28)
#define FWSECTION_HDR_V1_W1_REDL BIT(29)
#define FWSECTION_HDR_V1_W2_MSSC GENMASK(7, 0)
+#define FORMATTED_MSSC 0xFF
#define FWSECTION_HDR_V1_W2_BBMCU_IDX GENMASK(27, 24)
struct rtw89_fw_hdr_v1 {
@@ -521,12 +582,42 @@ struct rtw89_fw_hdr_v1 {
#define FW_HDR_V1_W5_YEAR GENMASK(15, 0)
#define FW_HDR_V1_W5_HDR_SIZE GENMASK(31, 16)
#define FW_HDR_V1_W6_SEC_NUM GENMASK(15, 8)
+#define FW_HDR_V1_W6_DSP_CHKSUM BIT(24)
+#define FW_HDR_V1_W7_PART_SIZE GENMASK(15, 0)
#define FW_HDR_V1_W7_DYN_HDR BIT(16)
-static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val)
-{
- le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0));
-}
+enum rtw89_fw_mss_pool_rmp_tbl_type {
+ MSS_POOL_RMP_TBL_BITMASK = 0x0,
+ MSS_POOL_RMP_TBL_RECORD = 0x1,
+};
+
+#define FWDL_MSS_POOL_DEFKEYSETS_SIZE 8
+
+struct rtw89_fw_mss_pool_hdr {
+ u8 signature[8]; /* equal to mss_signature[] */
+ __le32 rmp_tbl_offset;
+ __le32 key_raw_offset;
+ u8 defen;
+ u8 rsvd[3];
+ u8 rmpfmt; /* enum rtw89_fw_mss_pool_rmp_tbl_type */
+ u8 mssdev_max;
+ __le16 keypair_num;
+ __le16 msscust_max;
+ __le16 msskey_num_max;
+ __le32 rsvd3;
+ u8 rmp_tbl[];
+} __packed;
+
+union rtw89_fw_section_mssc_content {
+ struct {
+ u8 pad[58];
+ __le32 v;
+ } __packed sb_sel_ver;
+ struct {
+ u8 pad[60];
+ __le16 v;
+ } __packed key_sign_len;
+} __packed;
static inline void SET_CTRL_INFO_MACID(void *table, u32 val)
{
@@ -1198,6 +1289,149 @@ static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val)
GENMASK(31, 30));
}
+struct rtw89_h2c_cctlinfo_ud_g7 {
+ __le32 c0;
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 m0;
+ __le32 m1;
+ __le32 m2;
+ __le32 m3;
+ __le32 m4;
+ __le32 m5;
+ __le32 m6;
+ __le32 m7;
+ __le32 m8;
+ __le32 m9;
+ __le32 m10;
+ __le32 m11;
+ __le32 m12;
+ __le32 m13;
+ __le32 m14;
+ __le32 m15;
+} __packed;
+
+#define CCTLINFO_G7_C0_MACID GENMASK(6, 0)
+#define CCTLINFO_G7_C0_OP BIT(7)
+
+#define CCTLINFO_G7_W0_DATARATE GENMASK(11, 0)
+#define CCTLINFO_G7_W0_DATA_GI_LTF GENMASK(14, 12)
+#define CCTLINFO_G7_W0_TRYRATE BIT(15)
+#define CCTLINFO_G7_W0_ARFR_CTRL GENMASK(17, 16)
+#define CCTLINFO_G7_W0_DIS_HE1SS_STBC BIT(18)
+#define CCTLINFO_G7_W0_ACQ_RPT_EN BIT(20)
+#define CCTLINFO_G7_W0_MGQ_RPT_EN BIT(21)
+#define CCTLINFO_G7_W0_ULQ_RPT_EN BIT(22)
+#define CCTLINFO_G7_W0_TWTQ_RPT_EN BIT(23)
+#define CCTLINFO_G7_W0_FORCE_TXOP BIT(24)
+#define CCTLINFO_G7_W0_DISRTSFB BIT(25)
+#define CCTLINFO_G7_W0_DISDATAFB BIT(26)
+#define CCTLINFO_G7_W0_NSTR_EN BIT(27)
+#define CCTLINFO_G7_W0_AMPDU_DENSITY GENMASK(31, 28)
+#define CCTLINFO_G7_W0_ALL (GENMASK(31, 20) | GENMASK(18, 0))
+#define CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE GENMASK(11, 0)
+#define CCTLINFO_G7_W1_RTS_TXCNT_LMT GENMASK(15, 12)
+#define CCTLINFO_G7_W1_RTSRATE GENMASK(27, 16)
+#define CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE GENMASK(31, 28)
+#define CCTLINFO_G7_W1_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W2_DATA_TX_CNT_LMT GENMASK(5, 0)
+#define CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL BIT(6)
+#define CCTLINFO_G7_W2_MAX_AGG_NUM_SEL BIT(7)
+#define CCTLINFO_G7_W2_RTS_EN BIT(8)
+#define CCTLINFO_G7_W2_CTS2SELF_EN BIT(9)
+#define CCTLINFO_G7_W2_CCA_RTS GENMASK(11, 10)
+#define CCTLINFO_G7_W2_HW_RTS_EN BIT(12)
+#define CCTLINFO_G7_W2_RTS_DROP_DATA_MODE GENMASK(14, 13)
+#define CCTLINFO_G7_W2_PRELD_EN BIT(15)
+#define CCTLINFO_G7_W2_AMPDU_MAX_LEN GENMASK(26, 16)
+#define CCTLINFO_G7_W2_UL_MU_DIS BIT(27)
+#define CCTLINFO_G7_W2_AMPDU_MAX_TIME GENMASK(31, 28)
+#define CCTLINFO_G7_W2_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W3_MAX_AGG_NUM GENMASK(7, 0)
+#define CCTLINFO_G7_W3_DATA_BW GENMASK(10, 8)
+#define CCTLINFO_G7_W3_DATA_BW_ER BIT(11)
+#define CCTLINFO_G7_W3_BA_BMAP GENMASK(14, 12)
+#define CCTLINFO_G7_W3_VCS_STBC BIT(15)
+#define CCTLINFO_G7_W3_VO_LFTIME_SEL GENMASK(18, 16)
+#define CCTLINFO_G7_W3_VI_LFTIME_SEL GENMASK(21, 19)
+#define CCTLINFO_G7_W3_BE_LFTIME_SEL GENMASK(24, 22)
+#define CCTLINFO_G7_W3_BK_LFTIME_SEL GENMASK(27, 25)
+#define CCTLINFO_G7_W3_AMPDU_TIME_SEL BIT(28)
+#define CCTLINFO_G7_W3_AMPDU_LEN_SEL BIT(29)
+#define CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL BIT(30)
+#define CCTLINFO_G7_W3_LSIG_TXOP_EN BIT(31)
+#define CCTLINFO_G7_W3_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W4_MULTI_PORT_ID GENMASK(2, 0)
+#define CCTLINFO_G7_W4_BYPASS_PUNC BIT(3)
+#define CCTLINFO_G7_W4_MBSSID GENMASK(7, 4)
+#define CCTLINFO_G7_W4_DATA_DCM BIT(8)
+#define CCTLINFO_G7_W4_DATA_ER BIT(9)
+#define CCTLINFO_G7_W4_DATA_LDPC BIT(10)
+#define CCTLINFO_G7_W4_DATA_STBC BIT(11)
+#define CCTLINFO_G7_W4_A_CTRL_BQR BIT(12)
+#define CCTLINFO_G7_W4_A_CTRL_BSR BIT(14)
+#define CCTLINFO_G7_W4_A_CTRL_CAS BIT(15)
+#define CCTLINFO_G7_W4_ACT_SUBCH_CBW GENMASK(31, 16)
+#define CCTLINFO_G7_W4_ALL (GENMASK(31, 14) | GENMASK(12, 0))
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 GENMASK(1, 0)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 GENMASK(3, 2)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 GENMASK(5, 4)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 GENMASK(7, 6)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4 GENMASK(9, 8)
+#define CCTLINFO_G7_W5_SR_RATE GENMASK(14, 10)
+#define CCTLINFO_G7_W5_TID_DISABLE GENMASK(23, 16)
+#define CCTLINFO_G7_W5_ADDR_CAM_INDEX GENMASK(31, 24)
+#define CCTLINFO_G7_W5_ALL (GENMASK(31, 16) | GENMASK(14, 0))
+#define CCTLINFO_G7_W6_AID12_PAID GENMASK(11, 0)
+#define CCTLINFO_G7_W6_RESP_REF_RATE GENMASK(23, 12)
+#define CCTLINFO_G7_W6_ULDL BIT(31)
+#define CCTLINFO_G7_W6_ALL (BIT(31) | GENMASK(23, 0))
+#define CCTLINFO_G7_W7_NC GENMASK(2, 0)
+#define CCTLINFO_G7_W7_NR GENMASK(5, 3)
+#define CCTLINFO_G7_W7_NG GENMASK(7, 6)
+#define CCTLINFO_G7_W7_CB GENMASK(9, 8)
+#define CCTLINFO_G7_W7_CS GENMASK(11, 10)
+#define CCTLINFO_G7_W7_CSI_STBC_EN BIT(13)
+#define CCTLINFO_G7_W7_CSI_LDPC_EN BIT(14)
+#define CCTLINFO_G7_W7_CSI_PARA_EN BIT(15)
+#define CCTLINFO_G7_W7_CSI_FIX_RATE GENMASK(27, 16)
+#define CCTLINFO_G7_W7_CSI_BW GENMASK(31, 29)
+#define CCTLINFO_G7_W7_ALL (GENMASK(31, 29) | GENMASK(27, 13) | GENMASK(11, 0))
+#define CCTLINFO_G7_W8_ALL_ACK_SUPPORT BIT(0)
+#define CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT BIT(1)
+#define CCTLINFO_G7_W8_BSR_OM_UPD_EN BIT(2)
+#define CCTLINFO_G7_W8_MACID_FWD_IDC BIT(3)
+#define CCTLINFO_G7_W8_AZ_SEC_EN BIT(4)
+#define CCTLINFO_G7_W8_CSI_SEC_EN BIT(5)
+#define CCTLINFO_G7_W8_FIX_UL_ADDRCAM_IDX BIT(6)
+#define CCTLINFO_G7_W8_CTRL_CNT_VLD BIT(7)
+#define CCTLINFO_G7_W8_CTRL_CNT GENMASK(11, 8)
+#define CCTLINFO_G7_W8_RESP_SEC_TYPE GENMASK(15, 12)
+#define CCTLINFO_G7_W8_ALL GENMASK(15, 0)
+/* W9~13 are reserved */
+#define CCTLINFO_G7_W14_VO_CURR_RATE GENMASK(11, 0)
+#define CCTLINFO_G7_W14_VI_CURR_RATE GENMASK(23, 12)
+#define CCTLINFO_G7_W14_BE_CURR_RATE_L GENMASK(31, 24)
+#define CCTLINFO_G7_W14_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W15_BE_CURR_RATE_H GENMASK(3, 0)
+#define CCTLINFO_G7_W15_BK_CURR_RATE GENMASK(15, 4)
+#define CCTLINFO_G7_W15_MGNT_CURR_RATE GENMASK(27, 16)
+#define CCTLINFO_G7_W15_ALL GENMASK(27, 0)
+
static inline void SET_DCTL_MACID_V1(void *table, u32 val)
{
le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
@@ -1500,105 +1734,98 @@ static inline void SET_DCTL_SEC_ENT6_V1(void *table, u32 val)
GENMASK(31, 24));
}
-static inline void SET_BCN_UPD_PORT(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
-}
-
-static inline void SET_BCN_UPD_MBSSID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
-}
-
-static inline void SET_BCN_UPD_BAND(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16));
-}
-
-static inline void SET_BCN_UPD_GRP_IE_OFST(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, (val - 24) | BIT(7), GENMASK(31, 24));
-}
-
-static inline void SET_BCN_UPD_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0));
-}
-
-static inline void SET_BCN_UPD_SSN_SEL(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(9, 8));
-}
-
-static inline void SET_BCN_UPD_SSN_MODE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(11, 10));
-}
-
-static inline void SET_BCN_UPD_RATE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(20, 12));
-}
-
-static inline void SET_BCN_UPD_TXPWR(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(23, 21));
-}
-
-static inline void SET_BCN_UPD_TXINFO_CTRL_EN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(0));
-}
-
-static inline void SET_BCN_UPD_NTX_PATH_EN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(4, 1));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_A(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(6, 5));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_B(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(8, 7));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_C(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(10, 9));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_D(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(12, 11));
-}
-
-static inline void SET_BCN_UPD_PATH_ANTSEL_A(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(13));
-}
-
-static inline void SET_BCN_UPD_PATH_ANTSEL_B(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(14));
-}
-
-static inline void SET_BCN_UPD_PATH_ANTSEL_C(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(15));
-}
+struct rtw89_h2c_bcn_upd {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+} __packed;
-static inline void SET_BCN_UPD_PATH_ANTSEL_D(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(16));
-}
+#define RTW89_H2C_BCN_UPD_W0_PORT GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_W0_MBSSID GENMASK(15, 8)
+#define RTW89_H2C_BCN_UPD_W0_BAND GENMASK(23, 16)
+#define RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST GENMASK(31, 24)
+#define RTW89_H2C_BCN_UPD_W1_MACID GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_W1_SSN_SEL GENMASK(9, 8)
+#define RTW89_H2C_BCN_UPD_W1_SSN_MODE GENMASK(11, 10)
+#define RTW89_H2C_BCN_UPD_W1_RATE GENMASK(20, 12)
+#define RTW89_H2C_BCN_UPD_W1_TXPWR GENMASK(23, 21)
+#define RTW89_H2C_BCN_UPD_W2_TXINFO_CTRL_EN BIT(0)
+#define RTW89_H2C_BCN_UPD_W2_NTX_PATH_EN GENMASK(4, 1)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_A GENMASK(6, 5)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_B GENMASK(8, 7)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_C GENMASK(10, 9)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_D GENMASK(12, 11)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_A BIT(13)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_B BIT(14)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_C BIT(15)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_D BIT(16)
+#define RTW89_H2C_BCN_UPD_W2_CSA_OFST GENMASK(31, 17)
+
+struct rtw89_h2c_bcn_upd_be {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 w16;
+ __le32 w17;
+ __le32 w18;
+ __le32 w19;
+ __le32 w20;
+ __le32 w21;
+ __le32 w22;
+ __le32 w23;
+ __le32 w24;
+ __le32 w25;
+ __le32 w26;
+ __le32 w27;
+ __le32 w28;
+ __le32 w29;
+} __packed;
-static inline void SET_BCN_UPD_CSA_OFST(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 17));
-}
+#define RTW89_H2C_BCN_UPD_BE_W0_PORT GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_BE_W0_MBSSID GENMASK(15, 8)
+#define RTW89_H2C_BCN_UPD_BE_W0_BAND GENMASK(23, 16)
+#define RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST GENMASK(31, 24)
+#define RTW89_H2C_BCN_UPD_BE_W1_MACID GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL GENMASK(9, 8)
+#define RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE GENMASK(11, 10)
+#define RTW89_H2C_BCN_UPD_BE_W1_RATE GENMASK(20, 12)
+#define RTW89_H2C_BCN_UPD_BE_W1_TXPWR GENMASK(23, 21)
+#define RTW89_H2C_BCN_UPD_BE_W1_MACID_EXT GENMASK(31, 24)
+#define RTW89_H2C_BCN_UPD_BE_W2_TXINFO_CTRL_EN BIT(0)
+#define RTW89_H2C_BCN_UPD_BE_W2_NTX_PATH_EN GENMASK(4, 1)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_A GENMASK(6, 5)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_B GENMASK(8, 7)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_C GENMASK(10, 9)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_D GENMASK(12, 11)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_A BIT(13)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_B BIT(14)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_C BIT(15)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_D BIT(16)
+#define RTW89_H2C_BCN_UPD_BE_W2_CSA_OFST GENMASK(31, 17)
+#define RTW89_H2C_BCN_UPD_BE_W3_MLIE_CSA_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W3_CRITICAL_UPD_FLAG_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W4_VAP1_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W4_VAP2_DTIM_CNT_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W5_VAP3_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W5_VAP4_DTIM_CNT_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W6_VAP5_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W6_VAP6_DTIM_CNT_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W7_VAP7_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W7_ECSA_OFST GENMASK(30, 16)
+#define RTW89_H2C_BCN_UPD_BE_W7_PROTECTION_KEY_ID BIT(31)
static inline void SET_FWROLE_MAINTAIN_MACID(void *h2c, u32 val)
{
@@ -1620,70 +1847,46 @@ static inline void SET_FWROLE_MAINTAIN_WIFI_ROLE(void *h2c, u32 val)
le32p_replace_bits((__le32 *)h2c, val, GENMASK(16, 13));
}
-static inline void SET_JOININFO_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
-}
-
-static inline void SET_JOININFO_OP(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(8));
-}
-
-static inline void SET_JOININFO_BAND(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(9));
-}
-
-static inline void SET_JOININFO_WMM(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(11, 10));
-}
-
-static inline void SET_JOININFO_TGR(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(12));
-}
-
-static inline void SET_JOININFO_ISHESTA(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(13));
-}
-
-static inline void SET_JOININFO_DLBW(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 14));
-}
-
-static inline void SET_JOININFO_TF_MAC_PAD(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(17, 16));
-}
-
-static inline void SET_JOININFO_DL_T_PE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(20, 18));
-}
-
-static inline void SET_JOININFO_PORT_ID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 21));
-}
+enum rtw89_fw_sta_type { /* value of RTW89_H2C_JOININFO_W1_STA_TYPE */
+ RTW89_FW_N_AC_STA = 0,
+ RTW89_FW_AX_STA = 1,
+ RTW89_FW_BE_STA = 2,
+};
-static inline void SET_JOININFO_NET_TYPE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(25, 24));
-}
+struct rtw89_h2c_join {
+ __le32 w0;
+} __packed;
-static inline void SET_JOININFO_WIFI_ROLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(29, 26));
-}
+struct rtw89_h2c_join_v1 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+} __packed;
-static inline void SET_JOININFO_SELF_ROLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 30));
-}
+#define RTW89_H2C_JOININFO_W0_MACID GENMASK(7, 0)
+#define RTW89_H2C_JOININFO_W0_OP BIT(8)
+#define RTW89_H2C_JOININFO_W0_BAND BIT(9)
+#define RTW89_H2C_JOININFO_W0_WMM GENMASK(11, 10)
+#define RTW89_H2C_JOININFO_W0_TGR BIT(12)
+#define RTW89_H2C_JOININFO_W0_ISHESTA BIT(13)
+#define RTW89_H2C_JOININFO_W0_DLBW GENMASK(15, 14)
+#define RTW89_H2C_JOININFO_W0_TF_MAC_PAD GENMASK(17, 16)
+#define RTW89_H2C_JOININFO_W0_DL_T_PE GENMASK(20, 18)
+#define RTW89_H2C_JOININFO_W0_PORT_ID GENMASK(23, 21)
+#define RTW89_H2C_JOININFO_W0_NET_TYPE GENMASK(25, 24)
+#define RTW89_H2C_JOININFO_W0_WIFI_ROLE GENMASK(29, 26)
+#define RTW89_H2C_JOININFO_W0_SELF_ROLE GENMASK(31, 30)
+#define RTW89_H2C_JOININFO_W1_STA_TYPE GENMASK(2, 0)
+#define RTW89_H2C_JOININFO_W1_IS_MLD BIT(3)
+#define RTW89_H2C_JOININFO_W1_MAIN_MACID GENMASK(11, 4)
+#define RTW89_H2C_JOININFO_W1_MLO_MODE BIT(12)
+#define RTW89_H2C_JOININFO_W1_EMLSR_CAB BIT(13)
+#define RTW89_H2C_JOININFO_W1_NSTR_EN BIT(14)
+#define RTW89_H2C_JOININFO_W1_INIT_PWR_STATE BIT(15)
+#define RTW89_H2C_JOININFO_W1_EMLSR_PADDING GENMASK(18, 16)
+#define RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY GENMASK(21, 19)
+#define RTW89_H2C_JOININFO_W2_MACID_EXT GENMASK(7, 0)
+#define RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT GENMASK(15, 8)
struct rtw89_h2c_notify_dbcc {
__le32 w0;
@@ -1741,60 +1944,47 @@ static inline void SET_LOG_CFG_COMP_EXT(void *h2c, u32 val)
le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 0));
}
-static inline void SET_BA_CAM_VALID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(0));
-}
-
-static inline void SET_BA_CAM_INIT_REQ(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(1));
-}
-
-static inline void SET_BA_CAM_ENTRY_IDX(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(3, 2));
-}
-
-static inline void SET_BA_CAM_TID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 4));
-}
-
-static inline void SET_BA_CAM_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
-}
-
-static inline void SET_BA_CAM_BMAP_SIZE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16));
-}
-
-static inline void SET_BA_CAM_SSN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20));
-}
-
-static inline void SET_BA_CAM_UID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(7, 0));
-}
+struct rtw89_h2c_ba_cam {
+ __le32 w0;
+ __le32 w1;
+} __packed;
-static inline void SET_BA_CAM_STD_EN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, BIT(8));
-}
+#define RTW89_H2C_BA_CAM_W0_VALID BIT(0)
+#define RTW89_H2C_BA_CAM_W0_INIT_REQ BIT(1)
+#define RTW89_H2C_BA_CAM_W0_ENTRY_IDX GENMASK(3, 2)
+#define RTW89_H2C_BA_CAM_W0_TID GENMASK(7, 4)
+#define RTW89_H2C_BA_CAM_W0_MACID GENMASK(15, 8)
+#define RTW89_H2C_BA_CAM_W0_BMAP_SIZE GENMASK(19, 16)
+#define RTW89_H2C_BA_CAM_W0_SSN GENMASK(31, 20)
+#define RTW89_H2C_BA_CAM_W1_UID GENMASK(7, 0)
+#define RTW89_H2C_BA_CAM_W1_STD_EN BIT(8)
+#define RTW89_H2C_BA_CAM_W1_BAND BIT(9)
+#define RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1 GENMASK(31, 28)
+
+struct rtw89_h2c_ba_cam_v1 {
+ __le32 w0;
+ __le32 w1;
+} __packed;
-static inline void SET_BA_CAM_BAND(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, BIT(9));
-}
+#define RTW89_H2C_BA_CAM_V1_W0_VALID BIT(0)
+#define RTW89_H2C_BA_CAM_V1_W0_INIT_REQ BIT(1)
+#define RTW89_H2C_BA_CAM_V1_W0_TID_MASK GENMASK(7, 4)
+#define RTW89_H2C_BA_CAM_V1_W0_MACID_MASK GENMASK(15, 8)
+#define RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK GENMASK(19, 16)
+#define RTW89_H2C_BA_CAM_V1_W0_SSN_MASK GENMASK(31, 20)
+#define RTW89_H2C_BA_CAM_V1_W1_UID_VALUE_MASK GENMASK(7, 0)
+#define RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN BIT(8)
+#define RTW89_H2C_BA_CAM_V1_W1_BAND_SEL BIT(9)
+#define RTW89_H2C_BA_CAM_V1_W1_MLD_EN BIT(10)
+#define RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK GENMASK(31, 24)
+
+struct rtw89_h2c_ba_cam_init {
+ __le32 w0;
+} __packed;
-static inline void SET_BA_CAM_ENTRY_IDX_V1(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(31, 28));
-}
+#define RTW89_H2C_BA_CAM_INIT_USERS_MASK GENMASK(7, 0)
+#define RTW89_H2C_BA_CAM_INIT_OFFSET_MASK GENMASK(19, 12)
+#define RTW89_H2C_BA_CAM_INIT_BAND_SEL BIT(24)
static inline void SET_LPS_PARM_MACID(void *h2c, u32 val)
{
@@ -1846,6 +2036,17 @@ static inline void SET_LPS_PARM_LASTRPWM(void *h2c, u32 val)
le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8));
}
+struct rtw89_h2c_lps_ch_info {
+ struct {
+ u8 pri_ch;
+ u8 central_ch;
+ u8 bw;
+ u8 band;
+ } __packed info[2];
+
+ __le32 mlo_dbcc_mode_lps;
+} __packed;
+
static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0));
@@ -2128,9 +2329,15 @@ enum rtw89_btc_btf_set {
SET_BT_IGNORE_WLAN_ACT,
SET_BT_TX_PWR,
SET_BT_LNA_CONSTRAIN,
- SET_BT_GOLDEN_RX_RANGE,
+ SET_BT_QUERY_DEV_LIST,
+ SET_BT_QUERY_DEV_INFO,
SET_BT_PSD_REPORT,
SET_H2C_TEST,
+ SET_IOFLD_RF,
+ SET_IOFLD_BB,
+ SET_IOFLD_MAC,
+ SET_IOFLD_SCBD,
+ SET_H2C_MACRO,
SET_MAX1,
};
@@ -2144,6 +2351,10 @@ enum rtw89_btc_cxdrvinfo {
CXDRVINFO_CTRL,
CXDRVINFO_SCAN,
CXDRVINFO_TRX, /* WL traffic to WL fw */
+ CXDRVINFO_TXPWR,
+ CXDRVINFO_FDDT,
+ CXDRVINFO_MLO,
+ CXDRVINFO_OSI,
CXDRVINFO_MAX,
};
@@ -2170,7 +2381,19 @@ struct rtw89_h2c_cxhdr {
u8 len;
} __packed;
+struct rtw89_h2c_cxhdr_v7 {
+ u8 type;
+ u8 ver;
+ u8 len;
+} __packed;
+
+struct rtw89_h2c_cxctrl_v7 {
+ struct rtw89_h2c_cxhdr_v7 hdr;
+ struct rtw89_btc_ctrl_v7 ctrl;
+} __packed;
+
#define H2C_LEN_CXDRVHDR sizeof(struct rtw89_h2c_cxhdr)
+#define H2C_LEN_CXDRVHDR_V7 sizeof(struct rtw89_h2c_cxhdr_v7)
struct rtw89_h2c_cxinit {
struct rtw89_h2c_cxhdr hdr;
@@ -2204,6 +2427,11 @@ struct rtw89_h2c_cxinit {
#define RTW89_H2C_CXINIT_INFO_CX_OTHER BIT(3)
#define RTW89_H2C_CXINIT_INFO_BT_ONLY BIT(4)
+struct rtw89_h2c_cxinit_v7 {
+ struct rtw89_h2c_cxhdr_v7 hdr;
+ struct rtw89_btc_init_info_v7 init;
+} __packed;
+
static inline void RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(void *cmd, u8 val)
{
u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0));
@@ -2569,135 +2797,91 @@ static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(void *cmd, u32 val)
le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 16));
}
-static inline void RTW89_SET_FWCMD_SCANOFLD_CH_NUM(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PERIOD(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_DWELL(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_CENTER_CH(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PRI_CH(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 24));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_BW(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(2, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_ACTION(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(7, 3));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_NUM_PKT(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(11, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_TX(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(12));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(13));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_BAND(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(15, 14));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT_ID(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_DFS(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(24));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_TX_NULL(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(25));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_RANDOM(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(26));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_CFG_TX(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(27));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT0(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT1(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT2(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT3(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(31, 24));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT4(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT5(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(15, 8));
-}
+struct rtw89_h2c_chinfo_elem {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+} __packed;
-static inline void RTW89_SET_FWCMD_CHINFO_PKT6(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(23, 16));
-}
+#define RTW89_H2C_CHINFO_W0_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_W0_DWELL GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_W0_CENTER_CH GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W0_PRI_CH GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_W1_BW GENMASK(2, 0)
+#define RTW89_H2C_CHINFO_W1_ACTION GENMASK(7, 3)
+#define RTW89_H2C_CHINFO_W1_NUM_PKT GENMASK(11, 8)
+#define RTW89_H2C_CHINFO_W1_TX BIT(12)
+#define RTW89_H2C_CHINFO_W1_PAUSE_DATA BIT(13)
+#define RTW89_H2C_CHINFO_W1_BAND GENMASK(15, 14)
+#define RTW89_H2C_CHINFO_W1_PKT_ID GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W1_DFS BIT(24)
+#define RTW89_H2C_CHINFO_W1_TX_NULL BIT(25)
+#define RTW89_H2C_CHINFO_W1_RANDOM BIT(26)
+#define RTW89_H2C_CHINFO_W1_CFG_TX BIT(27)
+#define RTW89_H2C_CHINFO_W2_PKT0 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_W2_PKT1 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_W2_PKT2 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W2_PKT3 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_W3_PKT4 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_W3_PKT5 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_W3_PKT6 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W3_PKT7 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_W4_POWER_IDX GENMASK(15, 0)
+
+struct rtw89_h2c_chinfo_elem_be {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+} __packed;
-static inline void RTW89_SET_FWCMD_CHINFO_PKT7(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(31, 24));
-}
+#define RTW89_H2C_CHINFO_BE_W0_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W0_DWELL GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W0_CENTER_CH GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_BE_W0_PRI_CH GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W1_BW GENMASK(2, 0)
+#define RTW89_H2C_CHINFO_BE_W1_CH_BAND GENMASK(4, 3)
+#define RTW89_H2C_CHINFO_BE_W1_DFS BIT(5)
+#define RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA BIT(6)
+#define RTW89_H2C_CHINFO_BE_W1_TX_NULL BIT(7)
+#define RTW89_H2C_CHINFO_BE_W1_RANDOM BIT(8)
+#define RTW89_H2C_CHINFO_BE_W1_NOTIFY GENMASK(13, 9)
+#define RTW89_H2C_CHINFO_BE_W1_PROBE BIT(14)
+#define RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT GENMASK(17, 15)
+#define RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL GENMASK(31, 16)
+#define RTW89_H2C_CHINFO_BE_W3_PKT0 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W3_PKT1 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W3_PKT2 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_BE_W3_PKT3 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W4_PKT4 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W4_PKT5 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W4_PKT6 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_BE_W4_PKT7 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W5_SW_DEF GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS GENMASK(31, 16)
+#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS GENMASK(15, 0)
+#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS GENMASK(31, 16)
+
+struct rtw89_h2c_chinfo {
+ u8 ch_num;
+ u8 elem_size;
+ u8 arg;
+ u8 rsvd0;
+ struct rtw89_h2c_chinfo_elem elem[] __counted_by(ch_num);
+} __packed;
-static inline void RTW89_SET_FWCMD_CHINFO_POWER_IDX(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(15, 0));
-}
+#define RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK BIT(0)
+#define RTW89_H2C_CHINFO_ARG_APPEND_MASK BIT(1)
struct rtw89_h2c_scanofld {
__le32 w0;
@@ -2726,6 +2910,79 @@ struct rtw89_h2c_scanofld {
#define RTW89_H2C_SCANOFLD_W2_NORM_PD GENMASK(15, 0)
#define RTW89_H2C_SCANOFLD_W2_SLOW_PD GENMASK(23, 16)
+struct rtw89_h2c_scanofld_be_macc_role {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND GENMASK(1, 0)
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT GENMASK(4, 2)
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID GENMASK(23, 8)
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END GENMASK(31, 24)
+
+struct rtw89_h2c_scanofld_be_opch {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+} __packed;
+
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID GENMASK(15, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND GENMASK(17, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT GENMASK(20, 18)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY GENMASK(22, 21)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL BIT(23)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND GENMASK(9, 8)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW GENMASK(12, 10)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY GENMASK(14, 13)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS GENMASK(18, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0 GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1 GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2 GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3 GENMASK(31, 24)
+
+struct rtw89_h2c_scanofld_be {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ struct rtw89_h2c_scanofld_be_macc_role role[];
+} __packed;
+
+#define RTW89_H2C_SCANOFLD_BE_W0_OP GENMASK(1, 0)
+#define RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE GENMASK(3, 2)
+#define RTW89_H2C_SCANOFLD_BE_W0_REPEAT GENMASK(5, 4)
+#define RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END BIT(6)
+#define RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH BIT(7)
+#define RTW89_H2C_SCANOFLD_BE_W0_MACID GENMASK(23, 8)
+#define RTW89_H2C_SCANOFLD_BE_W0_PORT GENMASK(26, 24)
+#define RTW89_H2C_SCANOFLD_BE_W0_BAND GENMASK(28, 27)
+#define RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W1_NUM_OP GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W1_NORM_PD GENMASK(31, 16)
+#define RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD GENMASK(15, 0)
+#define RTW89_H2C_SCANOFLD_BE_W2_NORM_CY GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_W2_OPCH_END GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_W3_PROBEID GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W4_DELAY_START GENMASK(31, 16)
+#define RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE GENMASK(31, 0)
+#define RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW GENMASK(31, 0)
+#define RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH GENMASK(31, 0)
+
static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
@@ -3160,6 +3417,225 @@ inline void RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(void *cmd, u32 val)
le32p_replace_bits((__le32 *)cmd + 4, val, GENMASK(31, 0));
}
+enum rtw89_h2c_mrc_sch_types {
+ RTW89_H2C_MRC_SCH_BAND0_ONLY = 0,
+ RTW89_H2C_MRC_SCH_BAND1_ONLY = 1,
+ RTW89_H2C_MRC_SCH_DUAL_BAND = 2,
+};
+
+enum rtw89_h2c_mrc_role_types {
+ RTW89_H2C_MRC_ROLE_WIFI = 0,
+ RTW89_H2C_MRC_ROLE_BT = 1,
+ RTW89_H2C_MRC_ROLE_EMPTY = 2,
+};
+
+#define RTW89_MAC_MRC_MAX_ADD_SLOT_NUM 3
+#define RTW89_MAC_MRC_MAX_ADD_ROLE_NUM_PER_SLOT 1 /* before MLO */
+
+struct rtw89_fw_mrc_add_slot_arg {
+ u16 duration; /* unit: TU */
+ bool courtesy_en;
+ u8 courtesy_period;
+ u8 courtesy_target; /* slot idx */
+
+ unsigned int role_num;
+ struct {
+ enum rtw89_h2c_mrc_role_types role_type;
+ bool is_master;
+ bool en_tx_null;
+ enum rtw89_band band;
+ enum rtw89_bandwidth bw;
+ u8 macid;
+ u8 central_ch;
+ u8 primary_ch;
+ u8 null_early; /* unit: TU */
+
+ /* if MLD, for macid: [0, chip::support_mld_num)
+ * otherwise, for macid: [0, 32)
+ */
+ u32 macid_main_bitmap;
+ /* for MLD, bit X maps to macid: X + chip::support_mld_num */
+ u32 macid_paired_bitmap;
+ } roles[RTW89_MAC_MRC_MAX_ADD_ROLE_NUM_PER_SLOT];
+};
+
+struct rtw89_fw_mrc_add_arg {
+ u8 sch_idx;
+ enum rtw89_h2c_mrc_sch_types sch_type;
+ bool btc_in_sch;
+
+ unsigned int slot_num;
+ struct rtw89_fw_mrc_add_slot_arg slots[RTW89_MAC_MRC_MAX_ADD_SLOT_NUM];
+};
+
+struct rtw89_h2c_mrc_add_role {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 macid_main_bitmap;
+ __le32 macid_paired_bitmap;
+} __packed;
+
+#define RTW89_H2C_MRC_ADD_ROLE_W0_MACID GENMASK(15, 0)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE GENMASK(23, 16)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER BIT(24)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE BIT(25)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN BIT(26)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN BIT(27)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG GENMASK(7, 0)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH GENMASK(15, 8)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_BW GENMASK(19, 16)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE GENMASK(21, 20)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS BIT(22)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC BIT(23)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY GENMASK(31, 24)
+#define RTW89_H2C_MRC_ADD_ROLE_W2_ALT_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_MRC_ADD_ROLE_W2_ALT_ROLE_TYPE GENMASK(15, 8)
+#define RTW89_H2C_MRC_ADD_ROLE_W2_ALT_ROLE_MACID GENMASK(23, 16)
+
+struct rtw89_h2c_mrc_add_slot {
+ __le32 w0;
+ __le32 w1;
+ struct rtw89_h2c_mrc_add_role roles[];
+} __packed;
+
+#define RTW89_H2C_MRC_ADD_SLOT_W0_DURATION GENMASK(15, 0)
+#define RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN BIT(17)
+#define RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM GENMASK(31, 24)
+#define RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET GENMASK(15, 8)
+
+struct rtw89_h2c_mrc_add {
+ __le32 w0;
+ /* Logically append flexible struct rtw89_h2c_mrc_add_slot, but there
+ * are other flexible array inside it. We cannot access them correctly
+ * through this struct. So, in case misusing, we don't really declare
+ * it here.
+ */
+} __packed;
+
+#define RTW89_H2C_MRC_ADD_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_ADD_W0_SCH_TYPE GENMASK(7, 4)
+#define RTW89_H2C_MRC_ADD_W0_SLOT_NUM GENMASK(15, 8)
+#define RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH BIT(16)
+
+enum rtw89_h2c_mrc_start_actions {
+ RTW89_H2C_MRC_START_ACTION_START_NEW = 0,
+ RTW89_H2C_MRC_START_ACTION_REPLACE_OLD = 1,
+};
+
+struct rtw89_fw_mrc_start_arg {
+ u8 sch_idx;
+ u8 old_sch_idx;
+ u64 start_tsf;
+ enum rtw89_h2c_mrc_start_actions action;
+};
+
+struct rtw89_h2c_mrc_start {
+ __le32 w0;
+ __le32 start_tsf_low;
+ __le32 start_tsf_high;
+} __packed;
+
+#define RTW89_H2C_MRC_START_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_START_W0_OLD_SCH_IDX GENMASK(7, 4)
+#define RTW89_H2C_MRC_START_W0_ACTION GENMASK(15, 8)
+
+struct rtw89_h2c_mrc_del {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_MRC_DEL_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_DEL_W0_DEL_ALL BIT(4)
+#define RTW89_H2C_MRC_DEL_W0_STOP_ONLY BIT(5)
+#define RTW89_H2C_MRC_DEL_W0_SPECIFIC_ROLE_EN BIT(6)
+#define RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX GENMASK(15, 8)
+#define RTW89_H2C_MRC_DEL_W0_SPECIFIC_ROLE_MACID GENMASK(31, 16)
+
+#define RTW89_MAC_MRC_MAX_REQ_TSF_NUM 2
+
+struct rtw89_fw_mrc_req_tsf_arg {
+ unsigned int num;
+ struct {
+ u8 band;
+ u8 port;
+ } infos[RTW89_MAC_MRC_MAX_REQ_TSF_NUM];
+};
+
+struct rtw89_h2c_mrc_req_tsf {
+ u8 req_tsf_num;
+ u8 infos[] __counted_by(req_tsf_num);
+} __packed;
+
+#define RTW89_H2C_MRC_REQ_TSF_INFO_BAND GENMASK(3, 0)
+#define RTW89_H2C_MRC_REQ_TSF_INFO_PORT GENMASK(7, 4)
+
+enum rtw89_h2c_mrc_upd_bitmap_actions {
+ RTW89_H2C_MRC_UPD_BITMAP_ACTION_DEL = 0,
+ RTW89_H2C_MRC_UPD_BITMAP_ACTION_ADD = 1,
+};
+
+struct rtw89_fw_mrc_upd_bitmap_arg {
+ u8 sch_idx;
+ u8 macid;
+ u8 client_macid;
+ enum rtw89_h2c_mrc_upd_bitmap_actions action;
+};
+
+struct rtw89_h2c_mrc_upd_bitmap {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION BIT(4)
+#define RTW89_H2C_MRC_UPD_BITMAP_W0_MACID GENMASK(31, 16)
+#define RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID GENMASK(15, 0)
+
+struct rtw89_fw_mrc_sync_arg {
+ u8 offset; /* unit: TU */
+ struct {
+ u8 band;
+ u8 port;
+ } src, dest;
+};
+
+struct rtw89_h2c_mrc_sync {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_MRC_SYNC_W0_SYNC_EN BIT(0)
+#define RTW89_H2C_MRC_SYNC_W0_SRC_PORT GENMASK(11, 8)
+#define RTW89_H2C_MRC_SYNC_W0_SRC_BAND GENMASK(15, 12)
+#define RTW89_H2C_MRC_SYNC_W0_DEST_PORT GENMASK(19, 16)
+#define RTW89_H2C_MRC_SYNC_W0_DEST_BAND GENMASK(23, 20)
+#define RTW89_H2C_MRC_SYNC_W1_OFFSET GENMASK(15, 0)
+
+struct rtw89_fw_mrc_upd_duration_arg {
+ u8 sch_idx;
+ u64 start_tsf;
+
+ unsigned int slot_num;
+ struct {
+ u8 slot_idx;
+ u16 duration; /* unit: TU */
+ } slots[RTW89_MAC_MRC_MAX_ADD_SLOT_NUM];
+};
+
+struct rtw89_h2c_mrc_upd_duration {
+ __le32 w0;
+ __le32 start_tsf_low;
+ __le32 start_tsf_high;
+ __le32 slots[];
+} __packed;
+
+#define RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM GENMASK(15, 8)
+#define RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH BIT(16)
+#define RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX GENMASK(7, 0)
+#define RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION GENMASK(31, 16)
+
#define RTW89_C2H_HEADER_LEN 8
struct rtw89_c2h_hdr {
@@ -3275,20 +3751,29 @@ struct rtw89_c2h_ra_rpt {
#define RTW89_GET_MAC_C2H_PKTOFLD_LEN(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 16))
-#define RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
-#define RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(19, 16))
-#define RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 20))
-#define RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 24))
-#define RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(3, 0))
-#define RTW89_GET_MAC_C2H_SCANOFLD_AIR_DENSITY(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(7, 4))
-#define RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(25, 24))
+struct rtw89_c2h_scanofld {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+} __packed;
+
+#define RTW89_C2H_SCANOFLD_W2_PRI_CH GENMASK(7, 0)
+#define RTW89_C2H_SCANOFLD_W2_RSN GENMASK(19, 16)
+#define RTW89_C2H_SCANOFLD_W2_STATUS GENMASK(23, 20)
+#define RTW89_C2H_SCANOFLD_W2_PERIOD GENMASK(31, 24)
+#define RTW89_C2H_SCANOFLD_W5_TX_FAIL GENMASK(3, 0)
+#define RTW89_C2H_SCANOFLD_W5_AIR_DENSITY GENMASK(7, 4)
+#define RTW89_C2H_SCANOFLD_W5_BAND GENMASK(25, 24)
+#define RTW89_C2H_SCANOFLD_W5_MAC_IDX BIT(26)
+#define RTW89_C2H_SCANOFLD_W6_SW_DEF GENMASK(7, 0)
+#define RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD GENMASK(15, 8)
+#define RTW89_C2H_SCANOFLD_W6_FW_DEF GENMASK(23, 16)
+#define RTW89_C2H_SCANOFLD_W7_REPORT_TSF GENMASK(31, 0)
#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
@@ -3339,6 +3824,36 @@ static_assert(sizeof(struct rtw89_mac_mcc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE)
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
+struct rtw89_mac_mrc_tsf_rpt {
+ unsigned int num;
+ u64 tsfs[RTW89_MAC_MRC_MAX_REQ_TSF_NUM];
+};
+
+static_assert(sizeof(struct rtw89_mac_mrc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE);
+
+struct rtw89_c2h_mrc_tsf_rpt_info {
+ __le32 tsf_low;
+ __le32 tsf_high;
+} __packed;
+
+struct rtw89_c2h_mrc_tsf_rpt {
+ struct rtw89_c2h_hdr hdr;
+ __le32 w2;
+ struct rtw89_c2h_mrc_tsf_rpt_info infos[];
+} __packed;
+
+#define RTW89_C2H_MRC_TSF_RPT_W2_REQ_TSF_NUM GENMASK(7, 0)
+
+struct rtw89_c2h_mrc_status_rpt {
+ struct rtw89_c2h_hdr hdr;
+ __le32 w2;
+ __le32 tsf_low;
+ __le32 tsf_high;
+} __packed;
+
+#define RTW89_C2H_MRC_STATUS_RPT_W2_STATUS GENMASK(5, 0)
+#define RTW89_C2H_MRC_STATUS_RPT_W2_SCH_IDX GENMASK(7, 6)
+
struct rtw89_c2h_pkt_ofld_rsp {
__le32 w0;
__le32 w1;
@@ -3647,6 +4162,9 @@ struct rtw89_fw_h2c_rf_reg_info {
#define H2C_FUNC_MAC_BCN_UPD 0x5
#define H2C_FUNC_MAC_DCTLINFO_UD_V1 0x9
#define H2C_FUNC_MAC_CCTLINFO_UD_V1 0xa
+#define H2C_FUNC_MAC_DCTLINFO_UD_V2 0xc
+#define H2C_FUNC_MAC_BCN_UPD_BE 0xd
+#define H2C_FUNC_MAC_CCTLINFO_UD_G7 0x11
/* CLASS 6 - Address CAM */
#define H2C_CL_MAC_ADDR_CAM_UPDATE 0x6
@@ -3672,6 +4190,8 @@ enum rtw89_fw_ofld_h2c_func {
H2C_FUNC_CFG_BCNFLTR = 0x1e,
H2C_FUNC_OFLD_RSSI = 0x1f,
H2C_FUNC_OFLD_TP = 0x20,
+ H2C_FUNC_MAC_MACID_PAUSE_SLEEP = 0x28,
+ H2C_FUNC_SCANOFLD_BE = 0x2c,
NUM_OF_RTW89_FW_OFLD_H2C_FUNC,
};
@@ -3683,6 +4203,14 @@ enum rtw89_fw_ofld_h2c_func {
RTW89_FW_OFLD_WAIT_COND(RTW89_PKT_OFLD_WAIT_TAG(pkt_id, pkt_op), \
H2C_FUNC_PACKET_OFLD)
+#define RTW89_SCANOFLD_WAIT_COND_ADD_CH RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH)
+
+#define RTW89_SCANOFLD_WAIT_COND_START RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD)
+#define RTW89_SCANOFLD_WAIT_COND_STOP RTW89_FW_OFLD_WAIT_COND(1, H2C_FUNC_SCANOFLD)
+#define RTW89_SCANOFLD_BE_WAIT_COND_START RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD_BE)
+#define RTW89_SCANOFLD_BE_WAIT_COND_STOP RTW89_FW_OFLD_WAIT_COND(1, H2C_FUNC_SCANOFLD_BE)
+
+
/* CLASS 10 - Security CAM */
#define H2C_CL_MAC_SEC_CAM 0xa
#define H2C_FUNC_MAC_SEC_UPD 0x1
@@ -3690,6 +4218,8 @@ enum rtw89_fw_ofld_h2c_func {
/* CLASS 12 - BA CAM */
#define H2C_CL_BA_CAM 0xc
#define H2C_FUNC_MAC_BA_CAM 0x0
+#define H2C_FUNC_MAC_BA_CAM_V1 0x1
+#define H2C_FUNC_MAC_BA_CAM_INIT 0x2
/* CLASS 14 - MCC */
#define H2C_CL_MCC 0xe
@@ -3710,15 +4240,50 @@ enum rtw89_mcc_h2c_func {
#define RTW89_MCC_WAIT_COND(group, func) \
((group) * NUM_OF_RTW89_MCC_H2C_FUNC + (func))
+/* CLASS 24 - MRC */
+#define H2C_CL_MRC 0x18
+enum rtw89_mrc_h2c_func {
+ H2C_FUNC_MRC_REQ_TSF = 0x0,
+ H2C_FUNC_ADD_MRC = 0x1,
+ H2C_FUNC_START_MRC = 0x2,
+ H2C_FUNC_DEL_MRC = 0x3,
+ H2C_FUNC_MRC_SYNC = 0x4,
+ H2C_FUNC_MRC_UPD_DURATION = 0x5,
+ H2C_FUNC_MRC_UPD_BITMAP = 0x6,
+
+ NUM_OF_RTW89_MRC_H2C_FUNC,
+};
+
+/* can consider MRC's sch_idx as MCC's group */
+#define RTW89_MRC_WAIT_COND(sch_idx, func) \
+ ((sch_idx) * NUM_OF_RTW89_MRC_H2C_FUNC + (func))
+
+#define RTW89_MRC_WAIT_COND_REQ_TSF \
+ RTW89_MRC_WAIT_COND(0 /* don't care */, H2C_FUNC_MRC_REQ_TSF)
+
#define H2C_CAT_OUTSRC 0x2
#define H2C_CL_OUTSRC_RA 0x1
#define H2C_FUNC_OUTSRC_RA_MACIDCFG 0x0
+#define H2C_CL_OUTSRC_DM 0x2
+#define H2C_FUNC_FW_LPS_CH_INFO 0xb
+
#define H2C_CL_OUTSRC_RF_REG_A 0x8
#define H2C_CL_OUTSRC_RF_REG_B 0x9
#define H2C_CL_OUTSRC_RF_FW_NOTIFY 0xa
#define H2C_FUNC_OUTSRC_RF_GET_MCCCH 0x2
+#define H2C_CL_OUTSRC_RF_FW_RFK 0xb
+
+enum rtw89_rfk_offload_h2c_func {
+ H2C_FUNC_RFK_TSSI_OFFLOAD = 0x0,
+ H2C_FUNC_RFK_IQK_OFFLOAD = 0x1,
+ H2C_FUNC_RFK_DPK_OFFLOAD = 0x3,
+ H2C_FUNC_RFK_TXGAPK_OFFLOAD = 0x4,
+ H2C_FUNC_RFK_DACK_OFFLOAD = 0x5,
+ H2C_FUNC_RFK_RXDCK_OFFLOAD = 0x6,
+ H2C_FUNC_RFK_PRE_NOTIFY = 0x8,
+};
struct rtw89_fw_h2c_rf_get_mccch {
__le32 ch_0;
@@ -3729,6 +4294,114 @@ struct rtw89_fw_h2c_rf_get_mccch {
__le32 current_band_type;
} __packed;
+#define NUM_OF_RTW89_FW_RFK_PATH 2
+#define NUM_OF_RTW89_FW_RFK_TBL 3
+
+struct rtw89_fw_h2c_rfk_pre_info {
+ struct {
+ __le32 ch[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL];
+ __le32 band[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL];
+ } __packed dbcc;
+
+ __le32 mlo_mode;
+ struct {
+ __le32 cur_ch[NUM_OF_RTW89_FW_RFK_PATH];
+ __le32 cur_band[NUM_OF_RTW89_FW_RFK_PATH];
+ } __packed tbl;
+
+ __le32 phy_idx;
+ __le32 cur_band;
+ __le32 cur_bw;
+ __le32 cur_center_ch;
+
+ __le32 ktbl_sel0;
+ __le32 ktbl_sel1;
+ __le32 rfmod0;
+ __le32 rfmod1;
+
+ __le32 mlo_1_1;
+ __le32 rfe_type;
+ __le32 drv_mode;
+
+ struct {
+ __le32 ch[NUM_OF_RTW89_FW_RFK_PATH];
+ __le32 band[NUM_OF_RTW89_FW_RFK_PATH];
+ } __packed mlo;
+} __packed;
+
+struct rtw89_h2c_rf_tssi {
+ __le16 len;
+ u8 phy;
+ u8 ch;
+ u8 bw;
+ u8 band;
+ u8 hwtx_en;
+ u8 cv;
+ s8 curr_tssi_cck_de[2];
+ s8 curr_tssi_cck_de_20m[2];
+ s8 curr_tssi_cck_de_40m[2];
+ s8 curr_tssi_efuse_cck_de[2];
+ s8 curr_tssi_ofdm_de[2];
+ s8 curr_tssi_ofdm_de_20m[2];
+ s8 curr_tssi_ofdm_de_40m[2];
+ s8 curr_tssi_ofdm_de_80m[2];
+ s8 curr_tssi_ofdm_de_160m[2];
+ s8 curr_tssi_ofdm_de_320m[2];
+ s8 curr_tssi_efuse_ofdm_de[2];
+ s8 curr_tssi_ofdm_de_diff_20m[2];
+ s8 curr_tssi_ofdm_de_diff_80m[2];
+ s8 curr_tssi_ofdm_de_diff_160m[2];
+ s8 curr_tssi_ofdm_de_diff_320m[2];
+ s8 curr_tssi_trim_de[2];
+ u8 pg_thermal[2];
+ u8 ftable[2][128];
+ u8 tssi_mode;
+} __packed;
+
+struct rtw89_h2c_rf_iqk {
+ __le32 phy_idx;
+ __le32 dbcc;
+} __packed;
+
+struct rtw89_h2c_rf_dpk {
+ u8 len;
+ u8 phy;
+ u8 dpk_enable;
+ u8 kpath;
+ u8 cur_band;
+ u8 cur_bw;
+ u8 cur_ch;
+ u8 dpk_dbg_en;
+} __packed;
+
+struct rtw89_h2c_rf_txgapk {
+ u8 len;
+ u8 ktype;
+ u8 phy;
+ u8 kpath;
+ u8 band;
+ u8 bw;
+ u8 ch;
+ u8 cv;
+} __packed;
+
+struct rtw89_h2c_rf_dack {
+ __le32 len;
+ __le32 phy;
+ __le32 type;
+} __packed;
+
+struct rtw89_h2c_rf_rxdck {
+ u8 len;
+ u8 phy;
+ u8 is_afe;
+ u8 kpath;
+ u8 cur_band;
+ u8 cur_bw;
+ u8 cur_ch;
+ u8 rxdck_dbg_en;
+} __packed;
+
enum rtw89_rf_log_type {
RTW89_RF_RUN_LOG = 0,
RTW89_RF_RPT_LOG = 1,
@@ -3800,6 +4473,12 @@ struct rtw89_c2h_rf_txgapk_rpt_log {
u8 rsv1;
} __packed;
+struct rtw89_c2h_rfk_report {
+ struct rtw89_c2h_hdr hdr;
+ u8 state; /* enum rtw89_rfk_report_state */
+ u8 version;
+} __packed;
+
#define RTW89_FW_RSVD_PLE_SIZE 0x800
#define RTW89_FW_BACKTRACE_INFO_SIZE 8
@@ -3830,21 +4509,39 @@ void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u8 type, u8 cat, u8 class, u8 func,
bool rack, bool dack, u32 len);
int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif);
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif);
+int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif,
struct rtw89_sta *rtwsta, const u8 *scan_mac_addr);
int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
void rtw89_fw_c2h_work(struct work_struct *work);
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
@@ -3866,25 +4563,41 @@ int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu);
int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi);
-int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id);
int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
struct sk_buff *skb_ofld);
-int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
struct list_head *chan_list);
+int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
+ struct list_head *chan_list);
int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *opt,
struct rtw89_vif *vif);
+int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *opt,
+ struct rtw89_vif *vif);
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page);
int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode);
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);
@@ -3898,10 +4611,16 @@ void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw);
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params);
+int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params);
void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
+ u8 offset, u8 mac_idx);
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len);
int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
@@ -3916,6 +4635,10 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool enable);
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
+int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
+int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
const struct rtw89_pkt_drop_params *params);
@@ -3956,6 +4679,20 @@ int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
u8 target, u8 offset);
int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mcc_duration *p);
+int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_add_arg *arg);
+int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_start_arg *arg);
+int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx);
+int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_req_tsf_arg *arg,
+ struct rtw89_mac_mrc_tsf_rpt *rpt);
+int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_bitmap_arg *arg);
+int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_sync_arg *arg);
+int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_duration_arg *arg);
static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
{
@@ -3965,6 +4702,65 @@ static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(rtwdev);
}
+static inline int rtw89_chip_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_default_cmac_tbl(rtwdev, rtwvif, rtwsta);
+}
+
+static inline int rtw89_chip_h2c_default_dmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->h2c_default_dmac_tbl)
+ return chip->ops->h2c_default_dmac_tbl(rtwdev, rtwvif, rtwsta);
+
+ return 0;
+}
+
+static inline int rtw89_chip_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_update_beacon(rtwdev, rtwvif);
+}
+
+static inline int rtw89_chip_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+}
+
+static inline int rtw89_chip_h2c_ampdu_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->h2c_ampdu_cmac_tbl)
+ return chip->ops->h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
+
+ return 0;
+}
+
+static inline
+int rtw89_chip_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_ba_cam(rtwdev, rtwsta, valid, params);
+}
+
/* must consider compatibility; don't insert new in the mid */
struct rtw89_fw_txpwr_byrate_entry {
u8 band;
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index c485ef2cc3d3..aa5b396b5d2b 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1625,7 +1625,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_size19 = {RTW89_WDE_PG_64, 3328, 0,},
/* PCIE */
.ple_size0 = {RTW89_PLE_PG_128, 1520, 16,},
- .ple_size0_v1 = {RTW89_PLE_PG_128, 2672, 256, 212992,},
+ .ple_size0_v1 = {RTW89_PLE_PG_128, 2688, 240, 212992,},
.ple_size3_v1 = {RTW89_PLE_PG_128, 2928, 0, 212992,},
/* DLFW */
.ple_size4 = {RTW89_PLE_PG_128, 64, 1472,},
@@ -1650,8 +1650,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_qt17 = {0, 0, 0, 0,},
/* 8852C PCIE SCC */
.wde_qt18 = {3228, 60, 0, 40,},
- .ple_qt0 = {320, 0, 32, 16, 13, 13, 292, 0, 32, 18, 1, 4, 0,},
- .ple_qt1 = {320, 0, 32, 16, 1944, 1944, 2223, 0, 1963, 1949, 1, 1935, 0,},
+ .ple_qt0 = {320, 320, 32, 16, 13, 13, 292, 292, 64, 18, 1, 4, 0,},
+ .ple_qt1 = {320, 320, 32, 16, 1316, 1316, 1595, 1595, 1367, 1321, 1, 1307, 0,},
/* PCIE SCC */
.ple_qt4 = {264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,},
/* PCIE SCC */
@@ -1677,7 +1677,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt_52b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
/* 8851B PCIE WOW */
.ple_qt_51b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
- .ple_rsvd_qt0 = {2, 112, 56, 6, 6, 6, 6, 0, 0, 62,},
+ .ple_rsvd_qt0 = {2, 107, 107, 6, 6, 6, 6, 0, 0, 0,},
.ple_rsvd_qt1 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
.rsvd0_size0 = {212992, 0,},
.rsvd1_size0 = {587776, 2048,},
@@ -2025,6 +2025,9 @@ void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool enable)
{
u32 msk32 = B_AX_UC_MGNT_DEC | B_AX_BMC_MGNT_DEC;
+ if (rtwdev->chip->chip_gen != RTW89_CHIP_AX)
+ return;
+
if (enable)
rtw89_write32_set(rtwdev, R_AX_SEC_ENG_CTRL, msk32);
else
@@ -2537,6 +2540,9 @@ static int spatial_reuse_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_SR_CTRL, mac_idx);
rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BSSID_SRC_CTRL, mac_idx);
+ rtw89_write8_set(rtwdev, reg, B_AX_PLCP_SRC_EN);
+
return 0;
}
@@ -3192,13 +3198,11 @@ static int set_cpuio_ax(struct rtw89_dev *rtwdev,
return 0;
}
-int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
+int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
+ bool band1_en)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_dle_mem *cfg;
- struct rtw89_cpuio_ctrl ctrl_para = {0};
- u16 pkt_id;
- int ret;
cfg = get_dle_mem_cfg(rtwdev, mode);
if (!cfg) {
@@ -3213,6 +3217,16 @@ int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mod
dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU);
+ return mac->dle_quota_change(rtwdev, band1_en);
+}
+
+static int dle_quota_change_ax(struct rtw89_dev *rtwdev, bool band1_en)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ struct rtw89_cpuio_ctrl ctrl_para = {0};
+ u16 pkt_id;
+ int ret;
+
ret = mac->dle_buf_req(rtwdev, 0x20, true, &pkt_id);
if (ret) {
rtw89_err(rtwdev, "[ERR]WDE DLE buf req\n");
@@ -3301,7 +3315,7 @@ static int band1_enable_ax(struct rtw89_dev *rtwdev)
return ret;
}
- ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
return ret;
@@ -3676,6 +3690,28 @@ static int trx_init_ax(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_mac_feat_init(struct rtw89_dev *rtwdev)
+{
+#define BACAM_1024BMP_OCC_ENTRY 4
+#define BACAM_MAX_RU_SUPPORT_B0_STA 1
+#define BACAM_MAX_RU_SUPPORT_B1_STA 1
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 users, offset;
+
+ if (chip->bacam_ver != RTW89_BACAM_V1)
+ return 0;
+
+ offset = 0;
+ users = BACAM_MAX_RU_SUPPORT_B0_STA;
+ rtw89_fw_h2c_init_ba_cam_users(rtwdev, users, offset, RTW89_MAC_0);
+
+ offset += users * BACAM_1024BMP_OCC_ENTRY;
+ users = BACAM_MAX_RU_SUPPORT_B1_STA;
+ rtw89_fw_h2c_init_ba_cam_users(rtwdev, users, offset, RTW89_MAC_1);
+
+ return 0;
+}
+
static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -3910,6 +3946,10 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
if (ret)
goto fail;
+ ret = rtw89_mac_feat_init(rtwdev);
+ if (ret)
+ goto fail;
+
if (rtwdev->hci.ops->mac_post_init) {
ret = rtwdev->hci.ops->mac_post_init(rtwdev);
if (ret)
@@ -4000,6 +4040,9 @@ static const struct rtw89_port_reg rtw89_port_base_ax = {
.mbssid = R_AX_MBSSID_CTRL,
.mbssid_drop = R_AX_MBSSID_DROP_0,
.tsf_sync = R_AX_PORT0_TSF_SYNC,
+ .ptcl_dbg = R_AX_PTCL_DBG,
+ .ptcl_dbg_info = R_AX_PTCL_DBG_INFO,
+ .bcn_drop_all = R_AX_BCN_DROP_ALL0,
.hiq_win = {R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG,
R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2,
R_AX_PORT_HGQ_WINDOW_CFG + 3},
@@ -4008,13 +4051,15 @@ static const struct rtw89_port_reg rtw89_port_base_ax = {
static void rtw89_mac_check_packet_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, u8 type)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ const struct rtw89_port_reg *p = mac->port_base;
u8 mask = B_AX_PTCL_DBG_INFO_MASK_BY_PORT(rtwvif->port);
u32 reg_info, reg_ctrl;
u32 val;
int ret;
- reg_info = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_DBG_INFO, rtwvif->mac_idx);
- reg_ctrl = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_DBG, rtwvif->mac_idx);
+ reg_info = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg_info, rtwvif->mac_idx);
+ reg_ctrl = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg, rtwvif->mac_idx);
rtw89_write32_mask(rtwdev, reg_ctrl, B_AX_PTCL_DBG_SEL_MASK, type);
rtw89_write32_set(rtwdev, reg_ctrl, B_AX_PTCL_DBG_EN);
@@ -4031,7 +4076,7 @@ static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvi
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_port_reg *p = mac->port_base;
- rtw89_write32_set(rtwdev, R_AX_BCN_DROP_ALL0, BIT(rtwvif->port));
+ rtw89_write32_set(rtwdev, p->bcn_drop_all, BIT(rtwvif->port));
rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK, 1);
rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area, B_AX_BCN_MSK_AREA_MASK, 0);
rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 0);
@@ -4044,9 +4089,9 @@ static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvi
if (rtwvif->port == RTW89_PORT_0)
rtw89_mac_check_packet_ctrl(rtwdev, rtwvif, AX_PTCL_DBG_BCNQ_NUM1);
- rtw89_write32_clr(rtwdev, R_AX_BCN_DROP_ALL0, BIT(rtwvif->port));
+ rtw89_write32_clr(rtwdev, p->bcn_drop_all, BIT(rtwvif->port));
rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TBTT_PROHIB_EN);
- fsleep(2);
+ fsleep(2000);
}
#define BCN_INTERVAL 100
@@ -4159,13 +4204,11 @@ static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev,
rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bit);
}
-static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool en)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_port_reg *p = mac->port_base;
- bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
- rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
if (en)
rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
@@ -4173,6 +4216,15 @@ static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
}
+static void rtw89_mac_port_cfg_rx_sync_by_nettype(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
+ rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
+
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, en);
+}
+
static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool en)
{
@@ -4471,7 +4523,11 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
if (ret)
return ret;
- ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif);
+ ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif, NULL);
+ if (ret)
+ return ret;
+
+ ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif, NULL);
if (ret)
return ret;
@@ -4508,7 +4564,7 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_mac_port_cfg_net_type(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif);
rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif);
- rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_rx_sync_by_nettype(rtwdev, rtwvif);
rtw89_mac_port_cfg_tx_sw_by_nettype(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif);
rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif);
@@ -4571,6 +4627,7 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif)
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct ieee80211_hw *hw = rtwdev->hw;
bool tolerated = true;
u32 reg;
@@ -4578,18 +4635,19 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
if (!vif->bss_conf.he_support || vif->type != NL80211_IFTYPE_STATION)
return;
- if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ if (!(vif->bss_conf.chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR))
return;
- cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chanreq.oper,
rtw89_mac_check_he_obss_narrow_bw_ru_iter,
&tolerated);
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXTRIG_TEST_USER_2, rtwvif->mac_idx);
+ reg = rtw89_mac_reg_by_idx(rtwdev, mac->narrow_bw_ru_dis.addr,
+ rtwvif->mac_idx);
if (tolerated)
- rtw89_write32_clr(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+ rtw89_write32_clr(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
else
- rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+ rtw89_write32_set(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
}
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
@@ -4641,35 +4699,52 @@ static bool rtw89_is_op_chan(struct rtw89_dev *rtwdev, u8 band, u8 channel)
}
static void
-rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len)
{
+ const struct rtw89_c2h_scanofld *c2h =
+ (const struct rtw89_c2h_scanofld *)skb->data;
struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct rtw89_chan new;
- u8 reason, status, tx_fail, band, actual_period;
- u32 last_chan = rtwdev->scan_info.last_chan_idx;
+ u8 reason, status, tx_fail, band, actual_period, expect_period;
+ u32 last_chan = rtwdev->scan_info.last_chan_idx, report_tsf;
+ u8 mac_idx, sw_def, fw_def;
u16 chan;
int ret;
if (!rtwvif)
return;
- tx_fail = RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h->data);
- status = RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h->data);
- chan = RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h->data);
- reason = RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h->data);
- band = RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h->data);
- actual_period = RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h->data);
+ tx_fail = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_TX_FAIL);
+ status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
+ chan = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PRI_CH);
+ reason = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_RSN);
+ band = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_BAND);
+ actual_period = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PERIOD);
+ mac_idx = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_MAC_IDX);
+
if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
- "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
- band, chan, reason, status, tx_fail, actual_period);
+ "mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
+ mac_idx, band, chan, reason, status, tx_fail, actual_period);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ sw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_SW_DEF);
+ expect_period = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD);
+ fw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_FW_DEF);
+ report_tsf = le32_get_bits(c2h->w7, RTW89_C2H_SCANOFLD_W7_REPORT_TSF);
+
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "sw_def: %d, fw_def: %d, tsf: %x, expect: %d\n",
+ sw_def, fw_def, report_tsf, expect_period);
+ }
switch (reason) {
+ case RTW89_SCAN_LEAVE_OP_NOTIFY:
case RTW89_SCAN_LEAVE_CH_NOTIFY:
if (rtw89_is_op_chan(rtwdev, band, chan)) {
rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, false);
@@ -4685,9 +4760,10 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
rtw89_warn(rtwdev, "HW scan failed: %d\n", ret);
}
} else {
- rtw89_hw_scan_complete(rtwdev, vif, false);
+ rtw89_hw_scan_complete(rtwdev, vif, rtwdev->scan_info.abort);
}
break;
+ case RTW89_SCAN_ENTER_OP_NOTIFY:
case RTW89_SCAN_ENTER_CH_NOTIFY:
if (rtw89_is_op_chan(rtwdev, band, chan)) {
rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx,
@@ -4807,8 +4883,13 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
default:
return;
case H2C_FUNC_ADD_SCANOFLD_CH:
+ cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
+ break;
case H2C_FUNC_SCANOFLD:
- cond = RTW89_FW_OFLD_WAIT_COND(0, h2c_func);
+ cond = RTW89_SCANOFLD_WAIT_COND_START;
+ break;
+ case H2C_FUNC_SCANOFLD_BE:
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
break;
}
@@ -5021,6 +5102,84 @@ rtw89_mac_c2h_mcc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data);
}
+static void
+rtw89_mac_c2h_mrc_tsf_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ const struct rtw89_c2h_mrc_tsf_rpt *c2h_rpt;
+ struct rtw89_completion_data data = {};
+ struct rtw89_mac_mrc_tsf_rpt *rpt;
+ unsigned int i;
+
+ c2h_rpt = (const struct rtw89_c2h_mrc_tsf_rpt *)c2h->data;
+ rpt = (struct rtw89_mac_mrc_tsf_rpt *)data.buf;
+ rpt->num = min_t(u8, RTW89_MAC_MRC_MAX_REQ_TSF_NUM,
+ le32_get_bits(c2h_rpt->w2,
+ RTW89_C2H_MRC_TSF_RPT_W2_REQ_TSF_NUM));
+
+ for (i = 0; i < rpt->num; i++) {
+ u32 tsf_high = le32_to_cpu(c2h_rpt->infos[i].tsf_high);
+ u32 tsf_low = le32_to_cpu(c2h_rpt->infos[i].tsf_low);
+
+ rpt->tsfs[i] = (u64)tsf_high << 32 | tsf_low;
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H TSF RPT: index %u> %llu\n",
+ i, rpt->tsfs[i]);
+ }
+
+ rtw89_complete_cond(wait, RTW89_MRC_WAIT_COND_REQ_TSF, &data);
+}
+
+static void
+rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ const struct rtw89_c2h_mrc_status_rpt *c2h_rpt;
+ struct rtw89_completion_data data = {};
+ enum rtw89_mac_mrc_status status;
+ unsigned int cond;
+ bool next = false;
+ u32 tsf_high;
+ u32 tsf_low;
+ u8 sch_idx;
+ u8 func;
+
+ c2h_rpt = (const struct rtw89_c2h_mrc_status_rpt *)c2h->data;
+ sch_idx = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MRC_STATUS_RPT_W2_SCH_IDX);
+ status = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MRC_STATUS_RPT_W2_STATUS);
+ tsf_high = le32_to_cpu(c2h_rpt->tsf_high);
+ tsf_low = le32_to_cpu(c2h_rpt->tsf_low);
+
+ switch (status) {
+ case RTW89_MAC_MRC_START_SCH_OK:
+ func = H2C_FUNC_START_MRC;
+ break;
+ case RTW89_MAC_MRC_STOP_SCH_OK:
+ /* H2C_FUNC_DEL_MRC without STOP_ONLY, so wait for DEL_SCH_OK */
+ func = H2C_FUNC_DEL_MRC;
+ next = true;
+ break;
+ case RTW89_MAC_MRC_DEL_SCH_OK:
+ func = H2C_FUNC_DEL_MRC;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "invalid MRC C2H STS RPT: status %d\n", status);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: sch_idx %d, status %d, tsf %llu\n",
+ sch_idx, status, (u64)tsf_high << 32 | tsf_low);
+
+ if (next)
+ return;
+
+ cond = RTW89_MRC_WAIT_COND(sch_idx, func);
+ rtw89_complete_cond(wait, cond, &data);
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -5052,7 +5211,39 @@ void (* const rtw89_mac_c2h_mcc_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_MCC_STATUS_RPT] = rtw89_mac_c2h_mcc_status_rpt,
};
-bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
+static
+void (* const rtw89_mac_c2h_mrc_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_MRC_TSF_RPT] = rtw89_mac_c2h_mrc_tsf_rpt,
+ [RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT] = rtw89_mac_c2h_mrc_status_rpt,
+};
+
+static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
+{
+ const struct rtw89_c2h_scanofld *c2h =
+ (const struct rtw89_c2h_scanofld *)skb->data;
+ struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_completion_data data = {};
+ unsigned int cond;
+ u8 status, reason;
+
+ status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
+ reason = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_RSN);
+ data.err = status != RTW89_SCAN_STATUS_SUCCESS;
+
+ if (reason == RTW89_SCAN_END_SCAN_NOTIFY) {
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP;
+ else
+ cond = RTW89_SCANOFLD_WAIT_COND_STOP;
+
+ rtw89_complete_cond(fw_ofld_wait, cond, &data);
+ }
+}
+
+bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u8 class, u8 func)
{
switch (class) {
default:
@@ -5069,11 +5260,16 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
switch (func) {
default:
return false;
+ case RTW89_MAC_C2H_FUNC_SCANOFLD_RSP:
+ rtw89_mac_c2h_scanofld_rsp_atomic(rtwdev, c2h);
+ return false;
case RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP:
return true;
}
case RTW89_MAC_C2H_CLASS_MCC:
return true;
+ case RTW89_MAC_C2H_CLASS_MRC:
+ return true;
}
}
@@ -5096,6 +5292,10 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MCC)
handler = rtw89_mac_c2h_mcc_handler[func];
break;
+ case RTW89_MAC_C2H_CLASS_MRC:
+ if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MRC)
+ handler = rtw89_mac_c2h_mrc_handler[func];
+ break;
case RTW89_MAC_C2H_CLASS_FWDBG:
return;
default:
@@ -5115,8 +5315,7 @@ bool rtw89_mac_get_txpwr_cr_ax(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr)
{
- const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem;
- enum rtw89_qta_mode mode = dle_mem->mode;
+ enum rtw89_qta_mode mode = rtwdev->mac.qta_mode;
u32 addr = rtw89_mac_reg_by_idx(rtwdev, reg_base, phy_idx);
if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR_AX) {
@@ -5143,7 +5342,8 @@ error:
return false;
}
-int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+static
+int rtw89_mac_cfg_ppdu_status_ax(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PPDU_STAT, mac_idx);
int ret;
@@ -5166,7 +5366,6 @@ int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
return 0;
}
-EXPORT_SYMBOL(rtw89_mac_cfg_ppdu_status);
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx)
{
@@ -5419,7 +5618,8 @@ int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v1);
-int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
+static
+int rtw89_mac_cfg_plt_ax(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
{
u32 reg;
u16 val;
@@ -5515,7 +5715,7 @@ bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
return !!val;
}
-u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band)
+static u16 rtw89_mac_get_plt_cnt_ax(struct rtw89_dev *rtwdev, u8 band)
{
u32 reg;
u16 cnt;
@@ -6069,6 +6269,41 @@ int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
return ret;
}
+static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ int ret;
+
+ if (enable_wow) {
+ ret = rtw89_mac_resize_ple_rx_quota(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
+ return ret;
+ }
+
+ rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
+ rtw89_write32_clr(rtwdev, mac->rx_fltr, B_AX_SNIFFER_MODE);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0);
+ rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0);
+ rtw89_write32(rtwdev, R_AX_TF_FWD, 0);
+ rtw89_write32(rtwdev, R_AX_HW_RPT_FWD, 0);
+ } else {
+ ret = rtw89_mac_resize_ple_rx_quota(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
+ return ret;
+ }
+
+ rtw89_write32_clr(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD);
+ rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD);
+ }
+
+ return 0;
+}
+
static u8 rtw89_fw_get_rdy_ax(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type)
{
u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL);
@@ -6096,6 +6331,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.rx_fltr = R_AX_RX_FLTR_OPT,
.port_base = &rtw89_port_base_ax,
.agg_len_ht = R_AX_AGG_LEN_HT_0,
+ .ps_status = R_AX_PPWRBIT_SETTING,
.muedca_ctrl = {
.addr = R_AX_MUEDCA_EN,
@@ -6106,6 +6342,11 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN |
B_AX_BFMEE_HE_NDPA_EN,
},
+ .narrow_bw_ru_dis = {
+ .addr = R_AX_RXTRIG_TEST_USER_2,
+ .mask = B_AX_RXTRIG_RU26_DIS,
+ },
+ .wow_ctrl = {.addr = R_AX_WOW_CTRL, .mask = B_AX_WOW_WOWEN,},
.check_mac_en = rtw89_mac_check_mac_en_ax,
.sys_init = sys_init_ax,
@@ -6117,6 +6358,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.bf_assoc = rtw89_mac_bf_assoc_ax,
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_ax,
+ .cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_ax,
.dle_mix_cfg = dle_mix_cfg_ax,
.chk_dle_rdy = chk_dle_rdy_ax,
@@ -6128,6 +6370,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.wde_quota_cfg = wde_quota_cfg_ax,
.ple_quota_cfg = ple_quota_cfg_ax,
.set_cpuio = set_cpuio_ax,
+ .dle_quota_change = dle_quota_change_ax,
.disable_cpu = rtw89_mac_disable_cpu_ax,
.fwdl_enable_wcpu = rtw89_mac_enable_cpu_ax,
@@ -6137,6 +6380,9 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.parse_phycap_map = rtw89_parse_phycap_map_ax,
.cnv_efuse_state = rtw89_cnv_efuse_state_ax,
+ .cfg_plt = rtw89_mac_cfg_plt_ax,
+ .get_plt_cnt = rtw89_mac_get_plt_cnt_ax,
+
.get_txpwr_cr = rtw89_mac_get_txpwr_cr_ax,
.write_xtal_si = rtw89_mac_write_xtal_si_ax,
@@ -6146,5 +6392,10 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.dump_err_status = rtw89_mac_dump_err_status_ax,
.is_txq_empty = mac_is_txq_empty_ax,
+
+ .add_chan_list = rtw89_hw_scan_add_chan_list,
+ .scan_offload = rtw89_fw_h2c_scan_offload,
+
+ .wow_config_mac = rtw89_wow_config_mac_ax,
};
EXPORT_SYMBOL(rtw89_mac_gen_ax);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index ed98b49809a4..6fb457153a11 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -169,6 +169,12 @@ enum rtw89_mac_ax_l0_to_l1_event {
MAC_AX_L0_TO_L1_EVENT_MAX = 15,
};
+enum rtw89_mac_wow_fw_status {
+ WOWLAN_NOT_READY = 0x00,
+ WOWLAN_SLEEP_READY = 0x01,
+ WOWLAN_RESUME_READY = 0x02,
+};
+
#define RTW89_PORT_OFFSET_TU_TO_32US(shift_tu) ((shift_tu) * 1024 / 32)
enum rtw89_mac_dbg_port_sel {
@@ -406,13 +412,21 @@ enum rtw89_mac_c2h_mcc_func {
NUM_OF_RTW89_MAC_C2H_FUNC_MCC,
};
+enum rtw89_mac_c2h_mrc_func {
+ RTW89_MAC_C2H_FUNC_MRC_TSF_RPT = 0,
+ RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT = 1,
+
+ NUM_OF_RTW89_MAC_C2H_FUNC_MRC,
+};
+
enum rtw89_mac_c2h_class {
- RTW89_MAC_C2H_CLASS_INFO,
- RTW89_MAC_C2H_CLASS_OFLD,
- RTW89_MAC_C2H_CLASS_TWT,
- RTW89_MAC_C2H_CLASS_WOW,
- RTW89_MAC_C2H_CLASS_MCC,
- RTW89_MAC_C2H_CLASS_FWDBG,
+ RTW89_MAC_C2H_CLASS_INFO = 0x0,
+ RTW89_MAC_C2H_CLASS_OFLD = 0x1,
+ RTW89_MAC_C2H_CLASS_TWT = 0x2,
+ RTW89_MAC_C2H_CLASS_WOW = 0x3,
+ RTW89_MAC_C2H_CLASS_MCC = 0x4,
+ RTW89_MAC_C2H_CLASS_FWDBG = 0x5,
+ RTW89_MAC_C2H_CLASS_MRC = 0xe,
RTW89_MAC_C2H_CLASS_MAX,
};
@@ -441,6 +455,12 @@ enum rtw89_mac_mcc_status {
RTW89_MAC_MCC_TXNULL1_FAIL = 27,
};
+enum rtw89_mac_mrc_status {
+ RTW89_MAC_MRC_START_SCH_OK = 0,
+ RTW89_MAC_MRC_STOP_SCH_OK = 1,
+ RTW89_MAC_MRC_DEL_SCH_OK = 2,
+};
+
struct rtw89_mac_ax_coex {
#define RTW89_MAC_AX_COEX_RTK_MODE 0
#define RTW89_MAC_AX_COEX_CSR_MODE 1
@@ -894,9 +914,12 @@ struct rtw89_mac_gen_def {
u32 rx_fltr;
const struct rtw89_port_reg *port_base;
u32 agg_len_ht;
+ u32 ps_status;
struct rtw89_reg_def muedca_ctrl;
struct rtw89_reg_def bfee_ctrl;
+ struct rtw89_reg_def narrow_bw_ru_dis;
+ struct rtw89_reg_def wow_ctrl;
int (*check_mac_en)(struct rtw89_dev *rtwdev, u8 band,
enum rtw89_mac_hwmod_sel sel);
@@ -913,6 +936,7 @@ struct rtw89_mac_gen_def {
enum rtw89_machdr_frame_type type,
enum rtw89_mac_fwd_target fwd_target,
u8 mac_idx);
+ int (*cfg_ppdu_status)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
int (*dle_mix_cfg)(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg);
int (*chk_dle_rdy)(struct rtw89_dev *rtwdev, bool wde_or_ple);
@@ -930,6 +954,7 @@ struct rtw89_mac_gen_def {
const struct rtw89_ple_quota *max_cfg);
int (*set_cpuio)(struct rtw89_dev *rtwdev,
struct rtw89_cpuio_ctrl *ctrl_para, bool wd);
+ int (*dle_quota_change)(struct rtw89_dev *rtwdev, bool band1_en);
void (*disable_cpu)(struct rtw89_dev *rtwdev);
int (*fwdl_enable_wcpu)(struct rtw89_dev *rtwdev, u8 boot_reason,
@@ -940,6 +965,9 @@ struct rtw89_mac_gen_def {
int (*parse_phycap_map)(struct rtw89_dev *rtwdev);
int (*cnv_efuse_state)(struct rtw89_dev *rtwdev, bool idle);
+ int (*cfg_plt)(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt);
+ u16 (*get_plt_cnt)(struct rtw89_dev *rtwdev, u8 band);
+
bool (*get_txpwr_cr)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr);
@@ -952,6 +980,14 @@ struct rtw89_mac_gen_def {
enum mac_ax_err_info err);
bool (*is_txq_empty)(struct rtw89_dev *rtwdev);
+
+ int (*add_chan_list)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
+ int (*scan_offload)(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif *rtwvif);
+
+ int (*wow_config_mac)(struct rtw89_dev *rtwdev, bool enable_wow);
};
extern const struct rtw89_mac_gen_def rtw89_mac_gen_ax;
@@ -1086,6 +1122,8 @@ void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
u16 offset_tu);
int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
u64 *tsf);
+void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool en);
void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif);
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
@@ -1127,7 +1165,8 @@ static inline int rtw89_chip_reset_bb_rf(struct rtw89_dev *rtwdev)
u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev);
int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err);
-bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func);
+bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u8 class, u8 func);
void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev);
@@ -1135,9 +1174,20 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
u32 *tx_en, enum rtw89_sch_tx_sel sel);
int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
u32 *tx_en, enum rtw89_sch_tx_sel sel);
+int rtw89_mac_stop_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel);
int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
-int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_ids, bool enable);
+int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+
+static inline
+int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ return mac->cfg_ppdu_status(rtwdev, mac_idx, enable);
+}
+
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx);
void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop);
int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex);
@@ -1147,13 +1197,31 @@ int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
-int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt);
-u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band);
+int rtw89_mac_cfg_gnt_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
+
+static inline
+int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ return mac->cfg_plt(rtwdev, plt);
+}
+
+static inline
+u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ return mac->get_plt_cnt(rtwdev, band);
+}
+
void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val);
u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev);
bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev);
int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl);
int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl);
+int rtw89_mac_cfg_ctrl_path_v2(struct rtw89_dev *rtwdev, bool wl);
void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter);
void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev);
@@ -1306,6 +1374,7 @@ enum rtw89_mac_xtal_si_offset {
#define XTAL_SI_BIG_PWR_CUT BIT(1)
XTAL_SI_XTAL_DRV = 0x15,
#define XTAL_SI_DRV_LATCH BIT(4)
+ XTAL_SI_XTAL_PLL = 0x16,
XTAL_SI_XTAL_XMD_2 = 0x24,
#define XTAL_SI_LDO_LPS GENMASK(6, 4)
XTAL_SI_XTAL_XMD_4 = 0x26,
@@ -1339,6 +1408,7 @@ enum rtw89_mac_xtal_si_offset {
XTAL_SI_SRAM_CTRL = 0xA1,
#define XTAL_SI_SRAM_DIS BIT(1)
#define FULL_BIT_MASK GENMASK(7, 0)
+ XTAL_SI_APBT = 0xD1,
XTAL_SI_PLL = 0xE0,
XTAL_SI_PLL_1 = 0xE1,
};
@@ -1364,7 +1434,8 @@ int rtw89_mac_resize_ple_rx_quota(struct rtw89_dev *rtwdev, bool wow);
int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_mac_idx band);
void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool wow);
-int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode);
+int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
+ bool band1_en);
int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev,
enum rtw89_mac_dle_rsvd_qt_type type,
struct rtw89_mac_dle_rsvd_qt_cfg *cfg);
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 93889d2fface..31d1ffb16e83 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -441,7 +441,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
* when disconnected by peer
*/
if (rtwdev->scanning)
- rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
}
}
@@ -449,10 +449,11 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
ether_addr_copy(rtwvif->bssid, conf->bssid);
rtw89_cam_bssid_changed(rtwdev, rtwvif);
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+ WRITE_ONCE(rtwvif->sync_bcn_tsf, 0);
}
if (changed & BSS_CHANGED_BEACON)
- rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif);
if (changed & BSS_CHANGED_ERP_SLOT)
rtw89_conf_tx(rtwdev, rtwvif);
@@ -497,7 +498,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
ether_addr_copy(rtwvif->bssid, vif->bss_conf.bssid);
rtw89_cam_bssid_changed(rtwdev, rtwvif);
rtw89_mac_port_update(rtwdev, rtwvif);
- rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_TYPE_CHANGE);
rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
@@ -518,7 +519,7 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&rtwdev->mutex);
rtw89_mac_stop_ap(rtwdev, rtwvif);
- rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
mutex_unlock(&rtwdev->mutex);
}
@@ -660,6 +661,8 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mutex_lock(&rtwdev->mutex);
clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
+ clear_bit(tid, rtwsta->ampdu_map);
+ rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
mutex_unlock(&rtwdev->mutex);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -668,17 +671,19 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
set_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
rtwsta->ampdu_params[tid].agg_num = params->buf_size;
rtwsta->ampdu_params[tid].amsdu = params->amsdu;
+ set_bit(tid, rtwsta->ampdu_map);
rtw89_leave_ps_mode(rtwdev);
+ rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_START:
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, true, params);
+ rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, true, params);
mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_STOP:
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, false, params);
+ rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, false, params);
mutex_unlock(&rtwdev->mutex);
break;
default:
@@ -990,7 +995,7 @@ static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
}
if (rtwdev->scanning)
- rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
if (type == IEEE80211_ROC_TYPE_MGMT_TX)
roc->state = RTW89_ROC_MGMT;
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index be30c9346293..f16467377eab 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -52,6 +52,9 @@ static const struct rtw89_port_reg rtw89_port_base_be = {
.mbssid = R_BE_MBSSID_CTRL,
.mbssid_drop = R_BE_MBSSID_DROP_0,
.tsf_sync = R_BE_PORT_0_TSF_SYNC,
+ .ptcl_dbg = R_BE_PTCL_DBG,
+ .ptcl_dbg_info = R_BE_PTCL_DBG_INFO,
+ .bcn_drop_all = R_BE_BCN_DROP_ALL0,
.hiq_win = {R_BE_P0MB_HGQ_WINDOW_CFG_0, R_BE_PORT_HGQ_WINDOW_CFG,
R_BE_PORT_HGQ_WINDOW_CFG + 1, R_BE_PORT_HGQ_WINDOW_CFG + 2,
R_BE_PORT_HGQ_WINDOW_CFG + 3},
@@ -988,6 +991,9 @@ static int spatial_reuse_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RX_SR_CTRL, mac_idx);
rtw89_write8_clr(rtwdev, reg, B_BE_SR_EN | B_BE_SR_CTRL_PLCP_EN);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_BSSID_SRC_CTRL, mac_idx);
+ rtw89_write8_set(rtwdev, reg, B_BE_PLCP_SRC_EN);
+
return 0;
}
@@ -995,7 +1001,8 @@ static int tmac_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 reg;
- rtw89_write32_clr(rtwdev, R_BE_TB_PPDU_CTRL, B_BE_QOSNULL_UPD_MUEDCA_EN);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TB_PPDU_CTRL, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_BE_QOSNULL_UPD_MUEDCA_EN);
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMTX_TCR_BE_4, mac_idx);
rtw89_write32_mask(rtwdev, reg, B_BE_EHT_HE_PPDU_4XLTF_ZLD_USTIMER_MASK, 0x12);
@@ -1449,6 +1456,71 @@ static int set_cpuio_be(struct rtw89_dev *rtwdev,
return 0;
}
+static int dle_upd_qta_aval_page_be(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_dle_ctrl_type type,
+ enum rtw89_mac_dle_ple_quota_id quota_id)
+{
+ u32 val;
+
+ if (type == DLE_CTRL_TYPE_WDE) {
+ rtw89_write32_mask(rtwdev, R_BE_WDE_BUFMGN_CTL,
+ B_BE_WDE_AVAL_UPD_QTAID_MASK, quota_id);
+ rtw89_write32_set(rtwdev, R_BE_WDE_BUFMGN_CTL, B_BE_WDE_AVAL_UPD_REQ);
+
+ return read_poll_timeout(rtw89_read32, val,
+ !(val & B_BE_WDE_AVAL_UPD_REQ),
+ 1, 2000, false, rtwdev, R_BE_WDE_BUFMGN_CTL);
+ } else if (type == DLE_CTRL_TYPE_PLE) {
+ rtw89_write32_mask(rtwdev, R_BE_PLE_BUFMGN_CTL,
+ B_BE_PLE_AVAL_UPD_QTAID_MASK, quota_id);
+ rtw89_write32_set(rtwdev, R_BE_PLE_BUFMGN_CTL, B_BE_PLE_AVAL_UPD_REQ);
+
+ return read_poll_timeout(rtw89_read32, val,
+ !(val & B_BE_PLE_AVAL_UPD_REQ),
+ 1, 2000, false, rtwdev, R_BE_PLE_BUFMGN_CTL);
+ }
+
+ rtw89_warn(rtwdev, "%s wrong type %d\n", __func__, type);
+ return -EINVAL;
+}
+
+static int dle_quota_change_be(struct rtw89_dev *rtwdev, bool band1_en)
+{
+ int ret;
+
+ if (band1_en) {
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_B0_TXPL);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE B0 TX avail page fail %d\n", ret);
+ return ret;
+ }
+
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_CMAC0_RX);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE CMAC0 RX avail page fail %d\n", ret);
+ return ret;
+ }
+ } else {
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_B1_TXPL);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE B1 TX avail page fail %d\n", ret);
+ return ret;
+ }
+
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_CMAC1_RX);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE CMAC1 RX avail page fail %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int preload_init_be(struct rtw89_dev *rtwdev, u8 mac_idx,
enum rtw89_qta_mode mode)
{
@@ -1480,6 +1552,13 @@ static int preload_init_be(struct rtw89_dev *rtwdev, u8 mac_idx,
static int dbcc_bb_ctrl_be(struct rtw89_dev *rtwdev, bool bb1_en)
{
+ u32 set = B_BE_FEN_BB1PLAT_RSTB | B_BE_FEN_BB1_IP_RSTN;
+
+ if (bb1_en)
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, set);
+ else
+ rtw89_write32_clr(rtwdev, R_BE_FEN_RST_ENABLE, set);
+
return 0;
}
@@ -1538,7 +1617,7 @@ static int band1_enable_be(struct rtw89_dev *rtwdev)
return ret;
}
- ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
return ret;
@@ -1593,7 +1672,7 @@ static int band1_disable_be(struct rtw89_dev *rtwdev)
return ret;
}
- ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, false);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
return ret;
@@ -1616,7 +1695,7 @@ static int dbcc_enable_be(struct rtw89_dev *rtwdev, bool enable)
if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) {
ret = rtw89_fw_h2c_notify_dbcc(rtwdev, true);
if (ret) {
- rtw89_err(rtwdev, "%s:[ERR]notfify dbcc1 fail %d\n",
+ rtw89_err(rtwdev, "%s:[ERR] notify dbcc1 fail %d\n",
__func__, ret);
return ret;
}
@@ -1625,7 +1704,7 @@ static int dbcc_enable_be(struct rtw89_dev *rtwdev, bool enable)
if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) {
ret = rtw89_fw_h2c_notify_dbcc(rtwdev, false);
if (ret) {
- rtw89_err(rtwdev, "%s:[ERR]notfify dbcc1 fail %d\n",
+ rtw89_err(rtwdev, "%s:[ERR] notify dbcc1 fail %d\n",
__func__, ret);
return ret;
}
@@ -1718,12 +1797,220 @@ static int trx_init_be(struct rtw89_dev *rtwdev)
return 0;
}
+int rtw89_mac_cfg_gnt_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ u32 val = 0;
+
+ if (gnt_cfg->band[0].gnt_bt)
+ val |= B_BE_GNT_BT_BB0_VAL | B_BE_GNT_BT_RX_BB0_VAL |
+ B_BE_GNT_BT_TX_BB0_VAL;
+
+ if (gnt_cfg->band[0].gnt_bt_sw_en)
+ val |= B_BE_GNT_BT_BB0_SWCTRL | B_BE_GNT_BT_RX_BB0_SWCTRL |
+ B_BE_GNT_BT_TX_BB0_SWCTRL;
+
+ if (gnt_cfg->band[0].gnt_wl)
+ val |= B_BE_GNT_WL_BB0_VAL | B_BE_GNT_WL_RX_VAL |
+ B_BE_GNT_WL_TX_VAL | B_BE_GNT_WL_BB_PWR_VAL;
+
+ if (gnt_cfg->band[0].gnt_wl_sw_en)
+ val |= B_BE_GNT_WL_BB0_SWCTRL | B_BE_GNT_WL_RX_SWCTRL |
+ B_BE_GNT_WL_TX_SWCTRL | B_BE_GNT_WL_BB_PWR_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_bt)
+ val |= B_BE_GNT_BT_BB1_VAL | B_BE_GNT_BT_RX_BB1_VAL |
+ B_BE_GNT_BT_TX_BB1_VAL;
+
+ if (gnt_cfg->band[1].gnt_bt_sw_en)
+ val |= B_BE_GNT_BT_BB1_SWCTRL | B_BE_GNT_BT_RX_BB1_SWCTRL |
+ B_BE_GNT_BT_TX_BB1_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_wl)
+ val |= B_BE_GNT_WL_BB1_VAL | B_BE_GNT_WL_RX_VAL |
+ B_BE_GNT_WL_TX_VAL | B_BE_GNT_WL_BB_PWR_VAL;
+
+ if (gnt_cfg->band[1].gnt_wl_sw_en)
+ val |= B_BE_GNT_WL_BB1_SWCTRL | B_BE_GNT_WL_RX_SWCTRL |
+ B_BE_GNT_WL_TX_SWCTRL | B_BE_GNT_WL_BB_PWR_SWCTRL;
+
+ if (gnt_cfg->bt[0].wlan_act_en)
+ val |= B_BE_WL_ACT_SWCTRL;
+ if (gnt_cfg->bt[0].wlan_act)
+ val |= B_BE_WL_ACT_VAL;
+ if (gnt_cfg->bt[1].wlan_act_en)
+ val |= B_BE_WL_ACT2_SWCTRL;
+ if (gnt_cfg->bt[1].wlan_act)
+ val |= B_BE_WL_ACT2_VAL;
+
+ rtw89_write32(rtwdev, R_BE_GNT_SW_CTRL, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v2);
+
+int rtw89_mac_cfg_ctrl_path_v2(struct rtw89_dev *rtwdev, bool wl)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_mac_ax_gnt *g = dm->gnt.band;
+ struct rtw89_mac_ax_wl_act *gbt = dm->gnt.bt;
+ int i;
+
+ if (wl)
+ return 0;
+
+ for (i = 0; i < RTW89_PHY_MAX; i++) {
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 1;
+ g[i].gnt_wl_sw_en = 1;
+ g[i].gnt_wl = 0;
+ gbt[i].wlan_act = 1;
+ gbt[i].wlan_act_en = 0;
+ }
+
+ return rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt);
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path_v2);
+
+static
+int rtw89_mac_cfg_plt_be(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
+{
+ u32 reg;
+ u16 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_BT_PLT, plt->band);
+ val = (plt->tx & RTW89_MAC_AX_PLT_LTE_RX ? B_BE_TX_PLT_GNT_LTE_RX : 0) |
+ (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_BE_TX_PLT_GNT_BT_TX : 0) |
+ (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_BE_TX_PLT_GNT_BT_RX : 0) |
+ (plt->tx & RTW89_MAC_AX_PLT_GNT_WL ? B_BE_TX_PLT_GNT_WL : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_BE_RX_PLT_GNT_LTE_RX : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_BE_RX_PLT_GNT_BT_TX : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_BE_RX_PLT_GNT_BT_RX : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_BE_RX_PLT_GNT_WL : 0) |
+ B_BE_PLT_EN;
+ rtw89_write16(rtwdev, reg, val);
+
+ return 0;
+}
+
+static u16 rtw89_mac_get_plt_cnt_be(struct rtw89_dev *rtwdev, u8 band)
+{
+ u32 reg;
+ u16 cnt;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_BT_PLT, band);
+ cnt = rtw89_read32_mask(rtwdev, reg, B_BE_BT_PLT_PKT_CNT_MASK);
+ rtw89_write16_set(rtwdev, reg, B_BE_BT_PLT_RST);
+
+ return cnt;
+}
+
+static int rtw89_set_hw_sch_tx_en_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 tx_en, u32 tx_en_mask)
+{
+ u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CTN_DRV_TXEN, mac_idx);
+ u32 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ val = rtw89_read32(rtwdev, reg);
+ val = (val & ~tx_en_mask) | (tx_en & tx_en_mask);
+ rtw89_write32(rtwdev, reg, val);
+
+ return 0;
+}
+
+int rtw89_mac_stop_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel)
+{
+ int ret;
+
+ *tx_en = rtw89_read32(rtwdev,
+ rtw89_mac_reg_by_idx(rtwdev, R_BE_CTN_DRV_TXEN, mac_idx));
+
+ switch (sel) {
+ case RTW89_SCH_TX_SEL_ALL:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx, 0,
+ B_BE_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_HIQ:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx,
+ 0, B_BE_CTN_TXEN_HGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MG0:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx,
+ 0, B_BE_CTN_TXEN_MGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MACID:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx, 0,
+ B_BE_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_stop_sch_tx_v2);
+
+int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
+{
+ int ret;
+
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx, tx_en,
+ B_BE_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v2);
+
+static
+int rtw89_mac_cfg_ppdu_status_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PPDU_STAT, mac_idx);
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ if (!enable) {
+ rtw89_write32_clr(rtwdev, reg, B_BE_PPDU_STAT_RPT_EN);
+ return 0;
+ }
+
+ rtw89_write32_mask(rtwdev, R_BE_HW_PPDU_STATUS, B_BE_FWD_PPDU_STAT_MASK, 3);
+ rtw89_write32(rtwdev, reg, B_BE_PPDU_STAT_RPT_EN | B_BE_PPDU_MAC_INFO |
+ B_BE_APP_RX_CNT_RPT | B_BE_APP_PLCP_HDR_RPT |
+ B_BE_PPDU_STAT_RPT_CRC32 | B_BE_PPDU_STAT_RPT_DMA);
+
+ return 0;
+}
+
static bool rtw89_mac_get_txpwr_cr_be(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr)
{
- const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem;
- enum rtw89_qta_mode mode = dle_mem->mode;
+ enum rtw89_qta_mode mode = rtwdev->mac.qta_mode;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, (enum rtw89_mac_idx)phy_idx,
@@ -2020,6 +2307,52 @@ static void rtw89_mac_dump_qta_lost_be(struct rtw89_dev *rtwdev)
dump_err_status_dispatcher_be(rtwdev);
}
+static int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable)
+{
+ struct rtw89_mac_h2c_info h2c_info = {};
+ struct rtw89_mac_c2h_info c2h_info = {};
+ u32 ret;
+
+ h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL;
+ h2c_info.content_len = sizeof(h2c_info.u.hdr);
+ h2c_info.u.hdr.w0 = u32_encode_bits(wow_enable, RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN);
+
+ ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info);
+ if (ret)
+ return ret;
+
+ if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int rtw89_wow_config_mac_be(struct rtw89_dev *rtwdev, bool enable_wow)
+{
+ if (enable_wow) {
+ rtw89_write32_set(rtwdev, R_BE_RX_STOP, B_BE_HOST_RX_STOP);
+ rtw89_write32_clr(rtwdev, R_BE_RX_FLTR_OPT, B_BE_SNIFFER_MODE);
+ rtw89_mac_cpu_io_rx(rtwdev, enable_wow);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw89_write32(rtwdev, R_BE_FWD_ERR, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_ACTN0, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_ACTN1, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_ACTN2, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_TF0, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_TF1, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_ERR, 0);
+ rtw89_write32(rtwdev, R_BE_HW_PPDU_STATUS, 0);
+ rtw89_write8(rtwdev, R_BE_DBG_WOW_READY, WOWLAN_NOT_READY);
+ } else {
+ rtw89_mac_cpu_io_rx(rtwdev, enable_wow);
+ rtw89_write32_clr(rtwdev, R_BE_RX_STOP, B_BE_HOST_RX_STOP);
+ rtw89_write32_set(rtwdev, R_BE_RX_FLTR_OPT, R_BE_RX_FLTR_OPT);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ }
+
+ return 0;
+}
+
static void rtw89_mac_dump_cmac_err_status_be(struct rtw89_dev *rtwdev,
u8 band)
{
@@ -2218,6 +2551,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.rx_fltr = R_BE_RX_FLTR_OPT,
.port_base = &rtw89_port_base_be,
.agg_len_ht = R_BE_AGG_LEN_HT_0,
+ .ps_status = R_BE_WMTX_POWER_BE_BIT_CTL,
.muedca_ctrl = {
.addr = R_BE_MUEDCA_EN,
@@ -2228,6 +2562,11 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.mask = B_BE_BFMEE_HT_NDPA_EN | B_BE_BFMEE_VHT_NDPA_EN |
B_BE_BFMEE_HE_NDPA_EN | B_BE_BFMEE_EHT_NDPA_EN,
},
+ .narrow_bw_ru_dis = {
+ .addr = R_BE_RXTRIG_TEST_USER_2,
+ .mask = B_BE_RXTRIG_RU26_DIS,
+ },
+ .wow_ctrl = {.addr = R_BE_WOW_CTRL, .mask = B_BE_WOW_WOWEN,},
.check_mac_en = rtw89_mac_check_mac_en_be,
.sys_init = sys_init_be,
@@ -2239,6 +2578,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.bf_assoc = rtw89_mac_bf_assoc_be,
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_be,
+ .cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_be,
.dle_mix_cfg = dle_mix_cfg_be,
.chk_dle_rdy = chk_dle_rdy_be,
@@ -2250,6 +2590,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.wde_quota_cfg = wde_quota_cfg_be,
.ple_quota_cfg = ple_quota_cfg_be,
.set_cpuio = set_cpuio_be,
+ .dle_quota_change = dle_quota_change_be,
.disable_cpu = rtw89_mac_disable_cpu_be,
.fwdl_enable_wcpu = rtw89_mac_fwdl_enable_wcpu_be,
@@ -2259,6 +2600,9 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.parse_phycap_map = rtw89_parse_phycap_map_be,
.cnv_efuse_state = rtw89_cnv_efuse_state_be,
+ .cfg_plt = rtw89_mac_cfg_plt_be,
+ .get_plt_cnt = rtw89_mac_get_plt_cnt_be,
+
.get_txpwr_cr = rtw89_mac_get_txpwr_cr_be,
.write_xtal_si = rtw89_mac_write_xtal_si_be,
@@ -2268,5 +2612,10 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.dump_err_status = rtw89_mac_dump_err_status_be,
.is_txq_empty = mac_is_txq_empty_be,
+
+ .add_chan_list = rtw89_hw_scan_add_chan_list_be,
+ .scan_offload = rtw89_fw_h2c_scan_offload_be,
+
+ .wow_config_mac = rtw89_wow_config_mac_be,
};
EXPORT_SYMBOL(rtw89_mac_gen_be);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 769f1ce62ebc..19001130ad94 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -155,8 +155,8 @@ static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
DMA_FROM_DEVICE);
}
-static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
- struct sk_buff *skb)
+static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
{
struct rtw89_pci_rxbd_info *rxbd_info;
struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
@@ -166,11 +166,59 @@ static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
+}
+
+static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 target_rx_tag;
+
+ if (!info->check_rx_tag)
+ return 0;
+
+ /* valid range is 1 ~ 0x1FFF */
+ if (rx_ring->target_rx_tag == 0)
+ target_rx_tag = 1;
+ else
+ target_rx_tag = rx_ring->target_rx_tag;
+
+ if (rx_info->tag != target_rx_tag) {
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n",
+ rx_info->tag, target_rx_tag);
+ return -EAGAIN;
+ }
return 0;
}
-static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
+static
+int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ int rx_tag_retry = 100;
+ int ret;
+
+ do {
+ rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
+ rtw89_pci_rxbd_info_update(rtwdev, skb);
+
+ ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb);
+ if (ret != -EAGAIN)
+ break;
+ } while (rx_tag_retry--);
+
+ /* update target rx_tag for next RX */
+ rx_ring->target_rx_tag = rx_info->tag + 1;
+
+ return ret;
+}
+
+static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
@@ -187,7 +235,7 @@ static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
}
}
-static void rtw89_pci_ctrl_txdma_fw_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
+static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
@@ -259,9 +307,8 @@ static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
skb = rx_ring->buf[skb_idx];
- rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
- ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
+ ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
if (ret) {
rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
bd_ring->wp, ret);
@@ -549,9 +596,8 @@ static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
skb = rx_ring->buf[skb_idx];
- rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
- ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
+ ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
if (ret) {
rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
bd_ring->wp, ret);
@@ -705,7 +751,7 @@ void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ?
rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0;
- isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR);
+ isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
if (isrs->halt_c2h_isrs)
rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
@@ -1550,6 +1596,7 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
bd_ring->rp = 0;
rx_ring->diliver_skb = NULL;
rx_ring->diliver_desc.ready = false;
+ rx_ring->target_rx_tag = 0;
rtw89_write16(rtwdev, addr_num, bd_ring->len);
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
@@ -1907,22 +1954,87 @@ static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u
return 0;
}
+static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
+{
+ u16 addr_2lsb = addr & B_AX_DBI_2LSB;
+ u16 write_addr;
+ u8 flag;
+ int ret;
+
+ write_addr = addr & B_AX_DBI_ADDR_MSK;
+ write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK);
+ rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data);
+ rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
+ rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
+
+ ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
+ 10 * RTW89_PCI_WR_RETRY_CNT, false,
+ rtwdev, R_AX_DBI_FLAG + 2);
+ if (ret)
+ rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n",
+ addr);
+
+ return ret;
+}
+
+static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
+{
+ u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
+ u8 flag;
+ int ret;
+
+ rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
+ rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
+
+ ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
+ 10 * RTW89_PCI_WR_RETRY_CNT, false,
+ rtwdev, R_AX_DBI_FLAG + 2);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n",
+ addr);
+ return ret;
+ }
+
+ read_addr = R_AX_DBI_RDATA + (addr & 3);
+ *value = rtw89_read8(rtwdev, read_addr);
+
+ return 0;
+}
+
static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
u8 data)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
struct pci_dev *pdev = rtwpci->pdev;
+ int ret;
- return pci_write_config_byte(pdev, addr, data);
+ ret = pci_write_config_byte(pdev, addr, data);
+ if (!ret)
+ return 0;
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
+ ret = rtw89_dbi_write8(rtwdev, addr, data);
+
+ return ret;
}
static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
u8 *value)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
struct pci_dev *pdev = rtwpci->pdev;
+ int ret;
- return pci_read_config_byte(pdev, addr, value);
+ ret = pci_read_config_byte(pdev, addr, value);
+ if (!ret)
+ return 0;
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
+ ret = rtw89_dbi_read8(rtwdev, addr, value);
+
+ return ret;
}
static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
@@ -2412,7 +2524,7 @@ static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev)
B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
}
-static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
+static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 ret, check, dma_busy;
@@ -2439,7 +2551,7 @@ static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
return 0;
}
-static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
+static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 ret, check, dma_busy;
@@ -2459,13 +2571,13 @@ static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
{
u32 ret;
- ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev);
+ ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev);
if (ret) {
rtw89_err(rtwdev, "txdma ch busy\n");
return ret;
}
- ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev);
+ ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev);
if (ret) {
rtw89_err(rtwdev, "rxdma ch busy\n");
return ret;
@@ -2644,8 +2756,8 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
}
/* disable all channels except to FW CMD channel to download firmware */
- rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false);
- rtw89_pci_ctrl_txdma_fw_ch_pcie(rtwdev, true);
+ rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false);
+ rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true);
/* start DMA activities */
rtw89_pci_ctrl_dma_all(rtwdev, true);
@@ -2758,7 +2870,7 @@ static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev)
}
/* enable DMA for all queues */
- rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, true);
+ rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true);
/* Release PCI IO */
rtw89_write32_clr(rtwdev, info->dma_stop1.addr,
@@ -3148,6 +3260,7 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
rx_ring->buf_sz = buf_sz;
rx_ring->diliver_skb = NULL;
rx_ring->diliver_desc.ready = false;
+ rx_ring->target_rx_tag = 0;
for (i = 0; i < len; i++) {
skb = dev_alloc_skb(buf_sz);
@@ -3387,8 +3500,7 @@ static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev)
rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
rtwpci->intrs[0] = 0;
- rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
- B_BE_PCIE_RX_RPQ0_IMR0_V1;
+ rtwpci->intrs[1] = 0;
}
static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev)
@@ -3540,12 +3652,20 @@ static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- int ret;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
if (rtw89_pci_disable_clkreq)
return;
+ gen_def->clkreq_set(rtwdev, enable);
+}
+
+static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ int ret;
+
ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
PCIE_CLKDLY_HW_30US);
if (ret)
@@ -3577,24 +3697,31 @@ static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- u8 value = 0;
- int ret;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
if (rtw89_pci_disable_aspm_l1)
return;
+ gen_def->aspm_set(rtwdev, enable);
+}
+
+static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u8 value = 0;
+ int ret;
+
ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
if (ret)
- rtw89_err(rtwdev, "failed to read ASPM Delay\n");
+ rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
- value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK);
- value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) |
- FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US);
+ u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK);
+ u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK);
ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
if (ret)
- rtw89_err(rtwdev, "failed to read ASPM Delay\n");
+ rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
if (enable)
@@ -3681,6 +3808,17 @@ static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ if (rtw89_pci_disable_l1ss)
+ return;
+
+ gen_def->l1ss_set(rtwdev, enable);
+}
+
+static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable)
+{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
@@ -3954,6 +4092,14 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
.lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax,
.lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax,
+
+ .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax,
+ .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax,
+ .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax,
+
+ .aspm_set = rtw89_pci_aspm_set_ax,
+ .clkreq_set = rtw89_pci_clkreq_set_ax,
+ .l1ss_set = rtw89_pci_l1ss_set_ax,
};
EXPORT_SYMBOL(rtw89_pci_gen_ax);
@@ -3988,10 +4134,11 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
.recovery_start = rtw89_pci_ops_recovery_start,
.recovery_complete = rtw89_pci_ops_recovery_complete,
- .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_pcie,
- .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_pcie,
+ .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch,
+ .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch,
.ctrl_trxhci = rtw89_pci_ctrl_dma_trx,
- .poll_txdma_ch = rtw89_poll_txdma_ch_idle_pcie,
+ .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle,
+
.clr_idx_all = rtw89_pci_clr_idx_all,
.clear = rtw89_pci_clear_resource,
.disable_intr = rtw89_pci_disable_intr_lock,
@@ -4068,6 +4215,8 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_irq;
}
+ set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags);
+
return 0;
err_free_irq:
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index ca5de77fee90..a63b6b7c9bfa 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -42,6 +42,7 @@
#define B_AX_DBI_WFLAG BIT(16)
#define B_AX_DBI_WREN_MSK GENMASK(15, 12)
#define B_AX_DBI_ADDR_MSK GENMASK(11, 2)
+#define B_AX_DBI_2LSB GENMASK(1, 0)
#define R_AX_DBI_WDATA 0x1094
#define R_AX_DBI_RDATA 0x1098
@@ -281,6 +282,21 @@
#define B_BE_PCIE_EN_SWENT_L23 BIT(1)
#define B_BE_SEL_REQ_EXIT_L1 BIT(0)
+#define R_BE_PCIE_MIX_CFG 0x300C
+#define B_BE_L1SS_TIMEOUT_CTRL BIT(18)
+#define B_BE_ASPM_CTRL_L1 BIT(17)
+#define B_BE_ASPM_CTRL_L0 BIT(16)
+#define B_BE_XFER_PENDING_FW BIT(11)
+#define B_BE_XFER_PENDING BIT(10)
+#define B_BE_REQ_EXIT_L1 BIT(9)
+#define B_BE_REQ_ENTR_L1 BIT(8)
+#define B_BE_L1SUB_ENABLE BIT(0)
+
+#define R_BE_L1_CLK_CTRL 0x3010
+#define B_BE_RAS_SD_HOLD_LTSSM BIT(12)
+#define B_BE_CLK_REQ_N BIT(1)
+#define B_BE_CLK_PM_EN BIT(0)
+
#define R_BE_PCIE_LAT_CTRL 0x3044
#define B_BE_ELBI_PHY_REMAP_MASK GENMASK(29, 24)
#define B_BE_SYS_SUS_L12_EN BIT(17)
@@ -289,6 +305,8 @@
#define B_BE_RTK_LDO_POWER_LATENCY_MASK GENMASK(11, 10)
#define B_BE_RTK_LDO_BIAS_LATENCY_MASK GENMASK(9, 8)
#define B_BE_CLK_REQ_LAT_MASK GENMASK(7, 4)
+#define B_BE_RTK_PM_SEL_OPT BIT(1)
+#define B_BE_CLK_REQ_SEL BIT(0)
#define R_BE_PCIE_HIMR0 0x30B0
#define B_BE_PCIE_HB1_IND_INTA_IMR BIT(31)
@@ -924,6 +942,8 @@
#define B_BE_SER_L1SUB_IMR BIT(1)
#define B_BE_SER_PMU_IMR BIT(0)
+#define R_BE_REG_PL1_ISR 0x34B4
+
#define R_BE_RX_APPEND_MODE 0x8920
#define B_BE_APPEND_OFFSET_MASK GENMASK(23, 16)
#define B_BE_APPEND_LEN_MASK GENMASK(15, 0)
@@ -996,7 +1016,7 @@
#define RTW89_PCI_TXWD_NUM_MAX 512
#define RTW89_PCI_TXWD_PAGE_SIZE 128
#define RTW89_PCI_ADDRINFO_MAX 4
-#define RTW89_PCI_RX_BUF_SIZE 11460
+#define RTW89_PCI_RX_BUF_SIZE (11454 + 40) /* +40 for rtw89_rxdesc_long_v2 */
#define RTW89_PCI_POLL_BDRAM_RST_CNT 100
#define RTW89_PCI_MULTITAG 8
@@ -1065,6 +1085,15 @@ enum rtw89_pcie_clkdly_hw {
PCIE_CLKDLY_HW_200US = 0x5,
};
+enum rtw89_pcie_clkdly_hw_v1 {
+ PCIE_CLKDLY_HW_V1_0 = 0,
+ PCIE_CLKDLY_HW_V1_16US = 0x1,
+ PCIE_CLKDLY_HW_V1_32US = 0x2,
+ PCIE_CLKDLY_HW_V1_64US = 0x3,
+ PCIE_CLKDLY_HW_V1_80US = 0x4,
+ PCIE_CLKDLY_HW_V1_96US = 0x5,
+};
+
enum mac_ax_bd_trunc_mode {
MAC_AX_BD_NORM,
MAC_AX_BD_TRUNC,
@@ -1215,6 +1244,14 @@ struct rtw89_pci_gen_def {
int (*lv1rst_stop_dma)(struct rtw89_dev *rtwdev);
int (*lv1rst_start_dma)(struct rtw89_dev *rtwdev);
+
+ void (*ctrl_txdma_ch)(struct rtw89_dev *rtwdev, bool enable);
+ void (*ctrl_txdma_fw_ch)(struct rtw89_dev *rtwdev, bool enable);
+ int (*poll_txdma_ch_idle)(struct rtw89_dev *rtwdev);
+
+ void (*aspm_set)(struct rtw89_dev *rtwdev, bool enable);
+ void (*clkreq_set)(struct rtw89_dev *rtwdev, bool enable);
+ void (*l1ss_set)(struct rtw89_dev *rtwdev, bool enable);
};
struct rtw89_pci_info {
@@ -1234,6 +1271,7 @@ struct rtw89_pci_info {
enum mac_ax_pcie_func_ctrl io_rcy_en;
enum mac_ax_io_rcy_tmr io_rcy_tmr;
bool rx_ring_eq_is_full;
+ bool check_rx_tag;
u32 init_cfg_reg;
u32 txhci_en_bit;
@@ -1276,7 +1314,7 @@ struct rtw89_pci_tx_data {
struct rtw89_pci_rx_info {
dma_addr_t dma;
- u32 fs:1, ls:1, tag:11, len:14;
+ u32 fs:1, ls:1, tag:13, len:14;
};
#define RTW89_PCI_TXBD_OPTION_LS BIT(14)
@@ -1405,6 +1443,7 @@ struct rtw89_pci_rx_ring {
u32 buf_sz;
struct sk_buff *diliver_skb;
struct rtw89_rx_desc_info diliver_desc;
+ u32 target_rx_tag:13;
};
struct rtw89_pci_isrs {
@@ -1521,6 +1560,7 @@ static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val)
}
extern const struct dev_pm_ops rtw89_pm_ops;
+extern const struct dev_pm_ops rtw89_pm_ops_be;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be;
@@ -1676,4 +1716,27 @@ static inline int rtw89_pci_reset_bdram(struct rtw89_dev *rtwdev)
return gen_def->rst_bdram(rtwdev);
}
+static inline void rtw89_pci_ctrl_txdma_ch(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ return gen_def->ctrl_txdma_ch(rtwdev, enable);
+}
+
+static inline void rtw89_pci_ctrl_txdma_fw_ch(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ return gen_def->ctrl_txdma_fw_ch(rtwdev, enable);
+}
+
+static inline int rtw89_pci_poll_txdma_ch_idle(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ return gen_def->poll_txdma_ch_idle(rtwdev);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
index 629ffa4bee91..7cc328222965 100644
--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
+++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
@@ -19,6 +19,54 @@ enum pcie_rxbd_mode {
#define PL0_TMR_MAC_1MS 0x27100
#define PL0_TMR_AUX_1MS 0x1E848
+static void rtw89_pci_aspm_set_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ u8 value = 0;
+ int ret;
+
+ ret = pci_read_config_byte(pdev, RTW89_PCIE_ASPM_CTRL, &value);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
+
+ u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK);
+
+ ret = pci_write_config_byte(pdev, RTW89_PCIE_ASPM_CTRL, value);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to write ASPM Delay\n");
+
+ if (enable)
+ rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_BE_ASPM_CTRL_L1);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_BE_ASPM_CTRL_L1);
+}
+
+static void rtw89_pci_l1ss_set_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ if (enable)
+ rtw89_write32_set(rtwdev, R_BE_PCIE_MIX_CFG,
+ B_BE_L1SUB_ENABLE);
+ else
+ rtw89_write32_clr(rtwdev, R_BE_PCIE_MIX_CFG,
+ B_BE_L1SUB_ENABLE);
+}
+
+static void rtw89_pci_clkreq_set_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ rtw89_write32_mask(rtwdev, R_BE_PCIE_LAT_CTRL, B_BE_CLK_REQ_LAT_MASK,
+ PCIE_CLKDLY_HW_V1_0);
+
+ if (enable)
+ rtw89_write32_set(rtwdev, R_BE_L1_CLK_CTRL,
+ B_BE_CLK_PM_EN);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
+ B_BE_CLK_PM_EN);
+}
+
static void _patch_pcie_power_wake_be(struct rtw89_dev *rtwdev, bool power_up)
{
if (power_up)
@@ -105,6 +153,10 @@ static void rtw89_pci_ctrl_trxdma_pcie_be(struct rtw89_dev *rtwdev,
val |= B_BE_STOP_AXI_MST;
rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val);
+
+ if (io_en == MAC_AX_PCIE_ENABLE)
+ rtw89_write32_mask(rtwdev, R_BE_HAXI_MST_WDT_TIMEOUT_SEL_V1,
+ B_BE_HAXI_MST_WDT_TIMEOUT_SEL_MASK, 4);
}
static void rtw89_pci_clr_idx_all_be(struct rtw89_dev *rtwdev)
@@ -257,6 +309,7 @@ static void rtw89_pci_ser_setting_be(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_BE_PL1_DBG_INFO, 0x0);
rtw89_write32_set(rtwdev, R_BE_FWS1IMR, B_BE_PCIE_SER_TIMEOUT_INDIC_EN);
rtw89_write32_set(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
+ rtw89_write32_mask(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_TIMER_UNIT_MASK, 1);
val32 = rtw89_read32(rtwdev, R_BE_REG_PL1_MASK);
val32 |= B_BE_SER_PMU_IMR | B_BE_SER_L1SUB_IMR | B_BE_SER_PM_MASTER_IMR |
@@ -264,8 +317,7 @@ static void rtw89_pci_ser_setting_be(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_BE_REG_PL1_MASK, val32);
}
-static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool all_en,
- bool h2c_en)
+static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool enable)
{
u32 mask_all;
u32 val;
@@ -278,12 +330,19 @@ static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool all_en,
val = rtw89_read32(rtwdev, R_BE_HAXI_DMA_STOP1);
val |= B_BE_STOP_CH13 | B_BE_STOP_CH14;
- if (all_en)
+ if (enable)
val &= ~mask_all;
else
val |= mask_all;
- if (h2c_en)
+ rtw89_write32(rtwdev, R_BE_HAXI_DMA_STOP1, val);
+}
+
+static void rtw89_pci_ctrl_txdma_fw_ch_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ u32 val = rtw89_read32(rtwdev, R_BE_HAXI_DMA_STOP1);
+
+ if (enable)
val &= ~B_BE_STOP_CH12;
else
val |= B_BE_STOP_CH12;
@@ -322,7 +381,8 @@ static int rtw89_pci_ops_mac_pre_init_be(struct rtw89_dev *rtwdev)
rtw89_pci_pcie_setting_be(rtwdev);
rtw89_pci_ser_setting_be(rtwdev);
- rtw89_pci_ctrl_txdma_ch_be(rtwdev, false, true);
+ rtw89_pci_ctrl_txdma_ch_be(rtwdev, false);
+ rtw89_pci_ctrl_txdma_fw_ch_be(rtwdev, true);
rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_ENABLE,
MAC_AX_PCIE_ENABLE, MAC_AX_PCIE_ENABLE);
@@ -432,7 +492,8 @@ static int rtw89_pci_ops_mac_post_init_be(struct rtw89_dev *rtwdev)
rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_IGNORE,
MAC_AX_PCIE_IGNORE, MAC_AX_PCIE_ENABLE);
rtw89_pci_ctrl_wpdma_pcie_be(rtwdev, true);
- rtw89_pci_ctrl_txdma_ch_be(rtwdev, true, true);
+ rtw89_pci_ctrl_txdma_ch_be(rtwdev, true);
+ rtw89_pci_ctrl_txdma_fw_ch_be(rtwdev, true);
rtw89_pci_configure_mit_be(rtwdev);
return 0;
@@ -489,6 +550,46 @@ static int rtw89_pci_lv1rst_start_dma_be(struct rtw89_dev *rtwdev)
return 0;
}
+static int __maybe_unused rtw89_pci_suspend_be(struct device *dev)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ rtw89_write32_set(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
+ rtw89_write32_set(rtwdev, R_BE_RSV_CTRL, B_BE_R_DIS_PRST);
+ rtw89_write32_clr(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
+ rtw89_write32_set(rtwdev, R_BE_PCIE_FRZ_CLK, B_BE_PCIE_FRZ_REG_RST);
+ rtw89_write32_clr(rtwdev, R_BE_REG_PL1_MASK, B_BE_SER_PM_MASTER_IMR);
+ return 0;
+}
+
+static int __maybe_unused rtw89_pci_resume_be(struct device *dev)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
+ struct rtw89_dev *rtwdev = hw->priv;
+ u32 polling;
+ int ret;
+
+ rtw89_write32_set(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
+ rtw89_write32_clr(rtwdev, R_BE_RSV_CTRL, B_BE_R_DIS_PRST);
+ rtw89_write32_clr(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
+ rtw89_write32_clr(rtwdev, R_BE_PCIE_FRZ_CLK, B_BE_PCIE_FRZ_REG_RST);
+ rtw89_write32_clr(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
+
+ ret = read_poll_timeout_atomic(rtw89_read32, polling, !polling, 1, 1000,
+ false, rtwdev, R_BE_REG_PL1_ISR);
+ if (ret)
+ rtw89_warn(rtwdev, "[ERR] PCIE SER clear polling fail\n");
+
+ rtw89_write32_set(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
+ rtw89_write32_set(rtwdev, R_BE_REG_PL1_MASK, B_BE_SER_PM_MASTER_IMR);
+
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(rtw89_pm_ops_be, rtw89_pci_suspend_be, rtw89_pci_resume_be);
+EXPORT_SYMBOL(rtw89_pm_ops_be);
+
const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
.isr_rdu = B_BE_RDU_CH1_INT | B_BE_RDU_CH0_INT,
.isr_halt_c2h = B_BE_HALT_C2H_INT,
@@ -505,5 +606,13 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
.lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_be,
.lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_be,
+
+ .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_be,
+ .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_be,
+ .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_be,
+
+ .aspm_set = rtw89_pci_aspm_set_be,
+ .clkreq_set = rtw89_pci_clkreq_set_be,
+ .l1ss_set = rtw89_pci_l1ss_set_be,
};
EXPORT_SYMBOL(rtw89_pci_gen_be);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index bafc7b1cc104..12da63d64307 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -13,6 +13,13 @@
#include "txrx.h"
#include "util.h"
+static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ return phy->phy0_phy1_offset(rtwdev, addr);
+}
+
static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
const struct rtw89_ra_report *report)
{
@@ -718,6 +725,53 @@ u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_get_txsc);
+u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_bandwidth dbw)
+{
+ enum rtw89_bandwidth cbw = chan->band_width;
+ u8 pri_ch = chan->primary_channel;
+ u8 central_ch = chan->channel;
+ u8 txsb_idx = 0;
+
+ if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
+ return txsb_idx;
+
+ switch (cbw) {
+ case RTW89_CHANNEL_WIDTH_40:
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ if (dbw == RTW89_CHANNEL_WIDTH_20)
+ txsb_idx = (pri_ch - central_ch + 6) / 4;
+ else
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ if (dbw == RTW89_CHANNEL_WIDTH_20)
+ txsb_idx = (pri_ch - central_ch + 14) / 4;
+ else if (dbw == RTW89_CHANNEL_WIDTH_40)
+ txsb_idx = (pri_ch - central_ch + 12) / 8;
+ else
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ case RTW89_CHANNEL_WIDTH_320:
+ if (dbw == RTW89_CHANNEL_WIDTH_20)
+ txsb_idx = (pri_ch - central_ch + 30) / 4;
+ else if (dbw == RTW89_CHANNEL_WIDTH_40)
+ txsb_idx = (pri_ch - central_ch + 28) / 8;
+ else if (dbw == RTW89_CHANNEL_WIDTH_80)
+ txsb_idx = (pri_ch - central_ch + 24) / 16;
+ else
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ default:
+ break;
+ }
+
+ return txsb_idx;
+}
+EXPORT_SYMBOL(rtw89_phy_get_txsb);
+
static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
{
return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
@@ -796,6 +850,71 @@ u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
+static u32 rtw89_phy_read_full_rf_v2_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr)
+{
+ static const u16 r_addr_ofst[2] = {0x2C24, 0x2D24};
+ static const u16 addr_ofst[2] = {0x2ADC, 0x2BDC};
+ bool busy, done;
+ int ret;
+ u32 val;
+
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_CTL_MASK, 0x1);
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
+ 1, 3800, false,
+ rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_BUSY);
+ if (ret) {
+ rtw89_warn(rtwdev, "poll HWSI is busy\n");
+ return INV_RF_DATA;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_MASK, addr);
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_RD, 0x1);
+ udelay(2);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done,
+ 1, 3800, false,
+ rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_RDONE);
+ if (ret) {
+ rtw89_warn(rtwdev, "read HWSI is busy\n");
+ val = INV_RF_DATA;
+ goto out;
+ }
+
+ val = rtw89_phy_read32_mask(rtwdev, r_addr_ofst[rf_path], RFREG_MASK);
+out:
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_POLL_MASK, 0);
+
+ return val;
+}
+
+static u32 rtw89_phy_read_rf_v2_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr, u32 mask)
+{
+ u32 val;
+
+ val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
+
+ return (val & mask) >> __ffs(mask);
+}
+
+u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
+ else
+ return rtw89_phy_read_rf_v2_a(rtwdev, rf_path, addr, mask);
+}
+EXPORT_SYMBOL(rtw89_phy_read_rf_v2);
+
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
@@ -875,6 +994,66 @@ bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
+static
+bool rtw89_phy_write_full_rf_v2_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 data)
+{
+ static const u32 addr_is_idle[2] = {0x2C24, 0x2D24};
+ static const u32 addr_ofst[2] = {0x2AE0, 0x2BE0};
+ bool busy;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
+ 1, 3800, false,
+ rtwdev, addr_is_idle[rf_path], BIT(29));
+ if (ret) {
+ rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__);
+ return false;
+ }
+
+ val = u32_encode_bits(addr, B_HWSI_DATA_ADDR) |
+ u32_encode_bits(data, B_HWSI_DATA_VAL);
+
+ rtw89_phy_write32(rtwdev, addr_ofst[rf_path], val);
+
+ return true;
+}
+
+static
+bool rtw89_phy_write_rf_a_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ u32 val;
+
+ if (mask == RFREG_MASK) {
+ val = data;
+ } else {
+ val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
+ val &= ~mask;
+ val |= (data << __ffs(mask)) & mask;
+ }
+
+ return rtw89_phy_write_full_rf_v2_a(rtwdev, rf_path, addr, val);
+}
+
+bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
+ else
+ return rtw89_phy_write_rf_a_v2(rtwdev, rf_path, addr, mask, data);
+}
+EXPORT_SYMBOL(rtw89_phy_write_rf_v2);
+
static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
{
return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
@@ -893,20 +1072,30 @@ static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
enum rtw89_rf_path rf_path,
void *extra_data)
{
- if (reg->addr == 0xfe)
+ u32 addr;
+
+ if (reg->addr == 0xfe) {
mdelay(50);
- else if (reg->addr == 0xfd)
+ } else if (reg->addr == 0xfd) {
mdelay(5);
- else if (reg->addr == 0xfc)
+ } else if (reg->addr == 0xfc) {
mdelay(1);
- else if (reg->addr == 0xfb)
+ } else if (reg->addr == 0xfb) {
udelay(50);
- else if (reg->addr == 0xfa)
+ } else if (reg->addr == 0xfa) {
udelay(5);
- else if (reg->addr == 0xf9)
+ } else if (reg->addr == 0xf9) {
udelay(1);
- else
- rtw89_phy_write32(rtwdev, reg->addr, reg->data);
+ } else if (reg->data == BYPASS_CR_DATA) {
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Bypass CR 0x%x\n", reg->addr);
+ } else {
+ addr = reg->addr;
+
+ if ((uintptr_t)extra_data == RTW89_PHY_1)
+ addr += rtw89_phy0_phy1_offset(rtwdev, reg->addr);
+
+ rtw89_phy_write32(rtwdev, addr, reg->data);
+ }
}
union rtw89_phy_bb_gain_arg {
@@ -929,7 +1118,7 @@ static void
rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
@@ -968,7 +1157,7 @@ static void
rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 rxsc_start = arg.rxsc_start;
u8 bw = arg.bw;
u8 path = arg.path;
@@ -1050,7 +1239,7 @@ static void
rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
@@ -1077,7 +1266,7 @@ static void
rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
@@ -1108,10 +1297,10 @@ rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_phy_config_bb_gain(struct rtw89_dev *rtwdev,
- const struct rtw89_reg2_def *reg,
- enum rtw89_rf_path rf_path,
- void *extra_data)
+static void rtw89_phy_config_bb_gain_ax(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
@@ -1420,12 +1609,15 @@ void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table;
rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
+ if (rtwdev->dbcc_en)
+ rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg,
+ (void *)RTW89_PHY_1);
rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0);
bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table;
if (bb_gain_table)
rtw89_phy_init_reg(rtwdev, bb_gain_table,
- rtw89_phy_config_bb_gain, NULL);
+ chip->phy_def->config_bb_gain, NULL);
rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
}
@@ -1467,11 +1659,9 @@ void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio)
kfree(rf_reg_info);
}
-static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
+static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev)
{
- struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
const struct rtw89_chip_info *chip = rtwdev->chip;
- const struct rtw89_phy_table *nctl_table;
u32 val;
int ret;
@@ -1491,6 +1681,15 @@ static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
1000, false, rtwdev);
if (ret)
rtw89_err(rtwdev, "failed to poll nctl block\n");
+}
+
+static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_phy_table *nctl_table;
+
+ rtw89_phy_preinit_rf_nctl(rtwdev);
nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table;
rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL);
@@ -1499,14 +1698,11 @@ static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
rtw89_rfk_parser(rtwdev, chip->nctl_post_table);
}
-static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
+static u32 rtw89_phy0_phy1_offset_ax(struct rtw89_dev *rtwdev, u32 addr)
{
u32 phy_page = addr >> 8;
u32 ofst = 0;
- if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
- return addr < 0x10000 ? 0x20000 : 0;
-
switch (phy_page) {
case 0x6:
case 0x7:
@@ -1561,6 +1757,7 @@ void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
}
+EXPORT_SYMBOL(rtw89_phy_set_phy_regs);
void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
const struct rtw89_phy_reg3_tbl *tbl)
@@ -2699,9 +2896,63 @@ void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk,
};
+static
+void rtw89_phy_rfk_report_prep(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
+
+ wait->state = RTW89_RFK_STATE_START;
+ wait->start_time = ktime_get();
+ reinit_completion(&wait->completion);
+}
+
+static
+int rtw89_phy_rfk_report_wait(struct rtw89_dev *rtwdev, const char *rfk_name,
+ unsigned int ms)
+{
+ struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
+ unsigned long time_left;
+
+ /* Since we can't receive C2H event during SER, use a fixed delay. */
+ if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) {
+ fsleep(1000 * ms / 2);
+ goto out;
+ }
+
+ time_left = wait_for_completion_timeout(&wait->completion,
+ msecs_to_jiffies(ms));
+ if (time_left == 0) {
+ rtw89_warn(rtwdev, "failed to wait RF %s\n", rfk_name);
+ return -ETIMEDOUT;
+ } else if (wait->state != RTW89_RFK_STATE_OK) {
+ rtw89_warn(rtwdev, "failed to do RF %s result from state %d\n",
+ rfk_name, wait->state);
+ return -EFAULT;
+ }
+
+out:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "RF %s takes %lld ms to complete\n",
+ rfk_name, ktime_ms_delta(ktime_get(), wait->start_time));
+
+ return 0;
+}
+
static void
rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
+ const struct rtw89_c2h_rfk_report *report =
+ (const struct rtw89_c2h_rfk_report *)c2h->data;
+ struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
+
+ wait->state = report->state;
+ wait->version = report->version;
+
+ complete(&wait->completion);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "RFK report state %d with version %d (%*ph)\n",
+ wait->state, wait->version,
+ (int)(len - sizeof(report->hdr)), &report->state);
}
static
@@ -2772,6 +3023,726 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
handler(rtwdev, skb, len);
}
+int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait);
+
+int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, tssi_mode);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "TSSI", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait);
+
+int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "IQK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait);
+
+int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "DPK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait);
+
+int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "TXGAPK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait);
+
+int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "DACK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait);
+
+int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "RX_DCK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_rxdck_and_wait);
+
+static u32 phy_tssi_get_cck_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 13:
+ return 4;
+ case 14:
+ return 5;
+ }
+
+ return 0;
+}
+
+#define PHY_TSSI_EXTRA_GROUP_BIT BIT(31)
+#define PHY_TSSI_EXTRA_GROUP(idx) (PHY_TSSI_EXTRA_GROUP_BIT | (idx))
+#define PHY_IS_TSSI_EXTRA_GROUP(group) ((group) & PHY_TSSI_EXTRA_GROUP_BIT)
+#define PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) \
+ ((group) & ~PHY_TSSI_EXTRA_GROUP_BIT)
+#define PHY_TSSI_EXTRA_GET_GROUP_IDX2(group) \
+ (PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
+
+static u32 phy_tssi_get_ofdm_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 14:
+ return 4;
+ case 36 ... 40:
+ return 5;
+ case 41 ... 43:
+ return PHY_TSSI_EXTRA_GROUP(5);
+ case 44 ... 48:
+ return 6;
+ case 49 ... 51:
+ return PHY_TSSI_EXTRA_GROUP(6);
+ case 52 ... 56:
+ return 7;
+ case 57 ... 59:
+ return PHY_TSSI_EXTRA_GROUP(7);
+ case 60 ... 64:
+ return 8;
+ case 100 ... 104:
+ return 9;
+ case 105 ... 107:
+ return PHY_TSSI_EXTRA_GROUP(9);
+ case 108 ... 112:
+ return 10;
+ case 113 ... 115:
+ return PHY_TSSI_EXTRA_GROUP(10);
+ case 116 ... 120:
+ return 11;
+ case 121 ... 123:
+ return PHY_TSSI_EXTRA_GROUP(11);
+ case 124 ... 128:
+ return 12;
+ case 129 ... 131:
+ return PHY_TSSI_EXTRA_GROUP(12);
+ case 132 ... 136:
+ return 13;
+ case 137 ... 139:
+ return PHY_TSSI_EXTRA_GROUP(13);
+ case 140 ... 144:
+ return 14;
+ case 149 ... 153:
+ return 15;
+ case 154 ... 156:
+ return PHY_TSSI_EXTRA_GROUP(15);
+ case 157 ... 161:
+ return 16;
+ case 162 ... 164:
+ return PHY_TSSI_EXTRA_GROUP(16);
+ case 165 ... 169:
+ return 17;
+ case 170 ... 172:
+ return PHY_TSSI_EXTRA_GROUP(17);
+ case 173 ... 177:
+ return 18;
+ }
+
+ return 0;
+}
+
+static u32 phy_tssi_get_6g_ofdm_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 5:
+ return 0;
+ case 6 ... 8:
+ return PHY_TSSI_EXTRA_GROUP(0);
+ case 9 ... 13:
+ return 1;
+ case 14 ... 16:
+ return PHY_TSSI_EXTRA_GROUP(1);
+ case 17 ... 21:
+ return 2;
+ case 22 ... 24:
+ return PHY_TSSI_EXTRA_GROUP(2);
+ case 25 ... 29:
+ return 3;
+ case 33 ... 37:
+ return 4;
+ case 38 ... 40:
+ return PHY_TSSI_EXTRA_GROUP(4);
+ case 41 ... 45:
+ return 5;
+ case 46 ... 48:
+ return PHY_TSSI_EXTRA_GROUP(5);
+ case 49 ... 53:
+ return 6;
+ case 54 ... 56:
+ return PHY_TSSI_EXTRA_GROUP(6);
+ case 57 ... 61:
+ return 7;
+ case 65 ... 69:
+ return 8;
+ case 70 ... 72:
+ return PHY_TSSI_EXTRA_GROUP(8);
+ case 73 ... 77:
+ return 9;
+ case 78 ... 80:
+ return PHY_TSSI_EXTRA_GROUP(9);
+ case 81 ... 85:
+ return 10;
+ case 86 ... 88:
+ return PHY_TSSI_EXTRA_GROUP(10);
+ case 89 ... 93:
+ return 11;
+ case 97 ... 101:
+ return 12;
+ case 102 ... 104:
+ return PHY_TSSI_EXTRA_GROUP(12);
+ case 105 ... 109:
+ return 13;
+ case 110 ... 112:
+ return PHY_TSSI_EXTRA_GROUP(13);
+ case 113 ... 117:
+ return 14;
+ case 118 ... 120:
+ return PHY_TSSI_EXTRA_GROUP(14);
+ case 121 ... 125:
+ return 15;
+ case 129 ... 133:
+ return 16;
+ case 134 ... 136:
+ return PHY_TSSI_EXTRA_GROUP(16);
+ case 137 ... 141:
+ return 17;
+ case 142 ... 144:
+ return PHY_TSSI_EXTRA_GROUP(17);
+ case 145 ... 149:
+ return 18;
+ case 150 ... 152:
+ return PHY_TSSI_EXTRA_GROUP(18);
+ case 153 ... 157:
+ return 19;
+ case 161 ... 165:
+ return 20;
+ case 166 ... 168:
+ return PHY_TSSI_EXTRA_GROUP(20);
+ case 169 ... 173:
+ return 21;
+ case 174 ... 176:
+ return PHY_TSSI_EXTRA_GROUP(21);
+ case 177 ... 181:
+ return 22;
+ case 182 ... 184:
+ return PHY_TSSI_EXTRA_GROUP(22);
+ case 185 ... 189:
+ return 23;
+ case 193 ... 197:
+ return 24;
+ case 198 ... 200:
+ return PHY_TSSI_EXTRA_GROUP(24);
+ case 201 ... 205:
+ return 25;
+ case 206 ... 208:
+ return PHY_TSSI_EXTRA_GROUP(25);
+ case 209 ... 213:
+ return 26;
+ case 214 ... 216:
+ return PHY_TSSI_EXTRA_GROUP(26);
+ case 217 ... 221:
+ return 27;
+ case 225 ... 229:
+ return 28;
+ case 230 ... 232:
+ return PHY_TSSI_EXTRA_GROUP(28);
+ case 233 ... 237:
+ return 29;
+ case 238 ... 240:
+ return PHY_TSSI_EXTRA_GROUP(29);
+ case 241 ... 245:
+ return 30;
+ case 246 ... 248:
+ return PHY_TSSI_EXTRA_GROUP(30);
+ case 249 ... 253:
+ return 31;
+ }
+
+ return 0;
+}
+
+static u32 phy_tssi_get_trim_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 8:
+ return 0;
+ case 9 ... 14:
+ return 1;
+ case 36 ... 48:
+ return 2;
+ case 49 ... 51:
+ return PHY_TSSI_EXTRA_GROUP(2);
+ case 52 ... 64:
+ return 3;
+ case 100 ... 112:
+ return 4;
+ case 113 ... 115:
+ return PHY_TSSI_EXTRA_GROUP(4);
+ case 116 ... 128:
+ return 5;
+ case 132 ... 144:
+ return 6;
+ case 149 ... 177:
+ return 7;
+ }
+
+ return 0;
+}
+
+static u32 phy_tssi_get_6g_trim_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 13:
+ return 0;
+ case 14 ... 16:
+ return PHY_TSSI_EXTRA_GROUP(0);
+ case 17 ... 29:
+ return 1;
+ case 33 ... 45:
+ return 2;
+ case 46 ... 48:
+ return PHY_TSSI_EXTRA_GROUP(2);
+ case 49 ... 61:
+ return 3;
+ case 65 ... 77:
+ return 4;
+ case 78 ... 80:
+ return PHY_TSSI_EXTRA_GROUP(4);
+ case 81 ... 93:
+ return 5;
+ case 97 ... 109:
+ return 6;
+ case 110 ... 112:
+ return PHY_TSSI_EXTRA_GROUP(6);
+ case 113 ... 125:
+ return 7;
+ case 129 ... 141:
+ return 8;
+ case 142 ... 144:
+ return PHY_TSSI_EXTRA_GROUP(8);
+ case 145 ... 157:
+ return 9;
+ case 161 ... 173:
+ return 10;
+ case 174 ... 176:
+ return PHY_TSSI_EXTRA_GROUP(10);
+ case 177 ... 189:
+ return 11;
+ case 193 ... 205:
+ return 12;
+ case 206 ... 208:
+ return PHY_TSSI_EXTRA_GROUP(12);
+ case 209 ... 221:
+ return 13;
+ case 225 ... 237:
+ return 14;
+ case 238 ... 240:
+ return PHY_TSSI_EXTRA_GROUP(14);
+ case 241 ... 253:
+ return 15;
+ }
+
+ return 0;
+}
+
+static s8 phy_tssi_get_ofdm_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
+ u32 gidx_1st;
+ u32 gidx_2nd;
+ s8 de_1st;
+ s8 de_2nd;
+ u32 gidx;
+ s8 val;
+
+ if (band == RTW89_BAND_6G)
+ goto calc_6g;
+
+ gidx = phy_tssi_get_ofdm_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
+ path, gidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+
+ return val;
+
+calc_6g:
+ gidx = phy_tssi_get_6g_ofdm_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
+ path, gidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_6g_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+
+ return val;
+}
+
+static s8 phy_tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
+ u32 tgidx_1st;
+ u32 tgidx_2nd;
+ s8 tde_1st;
+ s8 tde_2nd;
+ u32 tgidx;
+ s8 val;
+
+ if (band == RTW89_BAND_6G)
+ goto calc_6g;
+
+ tgidx = phy_tssi_get_trim_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+
+ return val;
+
+calc_6g:
+ tgidx = phy_tssi_get_6g_trim_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim_6g[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+
+ return val;
+}
+
+void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 ch = chan->channel;
+ s8 trim_de;
+ s8 ofdm_de;
+ s8 cck_de;
+ u8 gidx;
+ s8 val;
+ int i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
+ phy, ch);
+
+ for (i = RF_PATH_A; i <= RF_PATH_B; i++) {
+ trim_de = phy_tssi_get_ofdm_trim_de(rtwdev, phy, chan, i);
+ h2c->curr_tssi_trim_de[i] = trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d trim_de=0x%x\n", i, trim_de);
+
+ gidx = phy_tssi_get_cck_group(ch);
+ cck_de = tssi_info->tssi_cck[i][gidx];
+ val = u32_get_bits(cck_de + trim_de, 0xff);
+
+ h2c->curr_tssi_cck_de[i] = 0x0;
+ h2c->curr_tssi_cck_de_20m[i] = val;
+ h2c->curr_tssi_cck_de_40m[i] = val;
+ h2c->curr_tssi_efuse_cck_de[i] = cck_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d cck_de=0x%x\n", i, cck_de);
+
+ ofdm_de = phy_tssi_get_ofdm_de(rtwdev, phy, chan, i);
+ val = u32_get_bits(ofdm_de + trim_de, 0xff);
+
+ h2c->curr_tssi_ofdm_de[i] = 0x0;
+ h2c->curr_tssi_ofdm_de_20m[i] = val;
+ h2c->curr_tssi_ofdm_de_40m[i] = val;
+ h2c->curr_tssi_ofdm_de_80m[i] = val;
+ h2c->curr_tssi_ofdm_de_160m[i] = val;
+ h2c->curr_tssi_ofdm_de_320m[i] = val;
+ h2c->curr_tssi_efuse_ofdm_de[i] = ofdm_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d ofdm_de=0x%x\n", i, ofdm_de);
+ }
+}
+
+void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c)
+{
+ struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const s8 *thm_up[RF_PATH_B + 1] = {};
+ const s8 *thm_down[RF_PATH_B + 1] = {};
+ u8 subband = chan->subband_type;
+ s8 thm_ofst[128] = {0};
+ u8 thermal;
+ u8 path;
+ u8 i, j;
+
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
+ break;
+ case RTW89_CH_5G_BAND_1:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
+ break;
+ case RTW89_CH_5G_BAND_3:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
+ break;
+ case RTW89_CH_5G_BAND_4:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
+ break;
+ case RTW89_CH_6G_BAND_IDX0:
+ case RTW89_CH_6G_BAND_IDX1:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0];
+ break;
+ case RTW89_CH_6G_BAND_IDX2:
+ case RTW89_CH_6G_BAND_IDX3:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1];
+ break;
+ case RTW89_CH_6G_BAND_IDX4:
+ case RTW89_CH_6G_BAND_IDX5:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2];
+ break;
+ case RTW89_CH_6G_BAND_IDX6:
+ case RTW89_CH_6G_BAND_IDX7:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3];
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] tmeter tbl on subband: %u\n", subband);
+
+ for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
+ thermal = tssi_info->thermal[path];
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "path: %u, pg thermal: 0x%x\n", path, thermal);
+
+ if (thermal == 0xff) {
+ h2c->pg_thermal[path] = 0x38;
+ memset(h2c->ftable[path], 0, sizeof(h2c->ftable[path]));
+ continue;
+ }
+
+ h2c->pg_thermal[path] = thermal;
+
+ i = 0;
+ for (j = 0; j < 64; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up[path][i++] :
+ thm_up[path][DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 127; j >= 64; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down[path][i++] :
+ -thm_down[path][DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 128; i += 4) {
+ h2c->ftable[path][i + 0] = thm_ofst[i + 3];
+ h2c->ftable[path][i + 1] = thm_ofst[i + 2];
+ h2c->ftable[path][i + 2] = thm_ofst[i + 1];
+ h2c->ftable[path][i + 3] = thm_ofst[i + 0];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "thm ofst [%x]: %02x %02x %02x %02x\n",
+ i, thm_ofst[i], thm_ofst[i + 1],
+ thm_ofst[i + 2], thm_ofst[i + 3]);
+ }
+ }
+}
+
static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo)
{
const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
@@ -4551,6 +5522,9 @@ static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
const struct rtw89_agc_gaincode_set set)
{
+ if (!rtwdev->hal.support_igi)
+ return;
+
rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx);
rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx);
rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx);
@@ -4606,7 +5580,8 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
s8 cck_cca_th;
u32 pd_val = 0;
- under_region += PD_TH_SB_FLTR_CMP_VAL;
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_AX)
+ under_region += PD_TH_SB_FLTR_CMP_VAL;
switch (cbw) {
case RTW89_CHANNEL_WIDTH_40:
@@ -4953,12 +5928,15 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_physts_parsing_init(rtwdev);
rtw89_phy_dig_init(rtwdev);
rtw89_phy_cfo_init(rtwdev);
+ rtw89_phy_bb_wrap_init(rtwdev);
rtw89_phy_edcca_init(rtwdev);
+ rtw89_phy_ch_info_init(rtwdev);
rtw89_phy_ul_tb_info_init(rtwdev);
rtw89_phy_antdiv_init(rtwdev);
rtw89_chip_rfe_gpio(rtwdev);
rtw89_phy_antdiv_set_ant(rtwdev);
+ rtw89_chip_rfk_hw_init(rtwdev);
rtw89_phy_init_rf_nctl(rtwdev);
rtw89_chip_rfk_init(rtwdev);
rtw89_chip_set_txpwr_ctrl(rtwdev);
@@ -5400,6 +6378,78 @@ void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev)
rtw89_phy_edcca_log(rtwdev);
}
+enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
+ rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_1_PLUS_1_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_A;
+ else
+ return RF_B;
+ case MLO_1_PLUS_1_2RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_A;
+ else
+ return RF_D;
+ case MLO_0_PLUS_2_1RF:
+ case MLO_2_PLUS_0_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_AB;
+ else
+ return RF_AB;
+ case MLO_0_PLUS_2_2RF:
+ case MLO_2_PLUS_0_2RF:
+ case MLO_2_PLUS_2_2RF:
+ default:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_AB;
+ else
+ return RF_CD;
+ }
+}
+EXPORT_SYMBOL(rtw89_phy_get_kpath);
+
+enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
+ rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_1_PLUS_1_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_B;
+ case MLO_1_PLUS_1_2RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_D;
+ case MLO_0_PLUS_2_1RF:
+ case MLO_2_PLUS_0_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_B;
+ case MLO_0_PLUS_2_2RF:
+ case MLO_2_PLUS_0_2RF:
+ case MLO_2_PLUS_2_2RF:
+ default:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_C;
+ }
+}
+EXPORT_SYMBOL(rtw89_phy_get_syn_sel);
+
static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
.setting_addr = R_CCX,
.edcca_opt_mask = B_CCX_EDCCA_OPT_MSK,
@@ -5476,6 +6526,11 @@ const struct rtw89_phy_gen_def rtw89_phy_gen_ax = {
.ccx = &rtw89_ccx_regs_ax,
.physts = &rtw89_physts_regs_ax,
.cfo = &rtw89_cfo_regs_ax,
+ .phy0_phy1_offset = rtw89_phy0_phy1_offset_ax,
+ .config_bb_gain = rtw89_phy_config_bb_gain_ax,
+ .preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_ax,
+ .bb_wrap_init = NULL,
+ .ch_info_init = NULL,
.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax,
.set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 3e379077c6ca..082231ebbee5 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -7,6 +7,7 @@
#include "core.h"
+#define RTW89_BBMCU_ADDR_OFFSET 0x30000
#define RTW89_RF_ADDR_ADSEL_MASK BIT(16)
#define get_phy_headline(addr) FIELD_GET(GENMASK(31, 28), addr)
@@ -509,6 +510,14 @@ struct rtw89_phy_gen_def {
const struct rtw89_ccx_regs *ccx;
const struct rtw89_physts_regs *physts;
const struct rtw89_cfo_regs *cfo;
+ u32 (*phy0_phy1_offset)(struct rtw89_dev *rtwdev, u32 addr);
+ void (*config_bb_gain)(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data);
+ void (*preinit_rf_nctl)(struct rtw89_dev *rtwdev);
+ void (*bb_wrap_init)(struct rtw89_dev *rtwdev);
+ void (*ch_info_init)(struct rtw89_dev *rtwdev);
void (*set_txpwr_byrate)(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
@@ -604,6 +613,15 @@ static inline u32 rtw89_phy_read32_mask(struct rtw89_dev *rtwdev,
return rtw89_read32_mask(rtwdev, addr + phy->cr_base, mask);
}
+static inline void rtw89_bbmcu_write32(struct rtw89_dev *rtwdev,
+ u32 addr, u32 data, enum rtw89_phy_idx phy_idx)
+{
+ if (phy_idx && addr < 0x10000)
+ addr += 0x20000;
+
+ rtw89_write32(rtwdev, addr + RTW89_BBMCU_ADDR_OFFSET, data);
+}
+
static inline
enum rtw89_gain_offset rtw89_subband_to_gain_offset_band_of_ofdm(enum rtw89_subband subband)
{
@@ -664,6 +682,38 @@ enum rtw89_phy_bb_gain_band rtw89_subband_to_bb_gain_band(enum rtw89_subband sub
}
}
+static inline
+enum rtw89_phy_gain_band_be rtw89_subband_to_gain_band_be(enum rtw89_subband subband)
+{
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ return RTW89_BB_GAIN_BAND_2G_BE;
+ case RTW89_CH_5G_BAND_1:
+ return RTW89_BB_GAIN_BAND_5G_L_BE;
+ case RTW89_CH_5G_BAND_3:
+ return RTW89_BB_GAIN_BAND_5G_M_BE;
+ case RTW89_CH_5G_BAND_4:
+ return RTW89_BB_GAIN_BAND_5G_H_BE;
+ case RTW89_CH_6G_BAND_IDX0:
+ return RTW89_BB_GAIN_BAND_6G_L0_BE;
+ case RTW89_CH_6G_BAND_IDX1:
+ return RTW89_BB_GAIN_BAND_6G_L1_BE;
+ case RTW89_CH_6G_BAND_IDX2:
+ return RTW89_BB_GAIN_BAND_6G_M0_BE;
+ case RTW89_CH_6G_BAND_IDX3:
+ return RTW89_BB_GAIN_BAND_6G_M1_BE;
+ case RTW89_CH_6G_BAND_IDX4:
+ return RTW89_BB_GAIN_BAND_6G_H0_BE;
+ case RTW89_CH_6G_BAND_IDX5:
+ return RTW89_BB_GAIN_BAND_6G_H1_BE;
+ case RTW89_CH_6G_BAND_IDX6:
+ return RTW89_BB_GAIN_BAND_6G_UH0_BE;
+ case RTW89_CH_6G_BAND_IDX7:
+ return RTW89_BB_GAIN_BAND_6G_UH1_BE;
+ }
+}
+
enum rtw89_rfk_flag {
RTW89_RFK_F_WRF = 0,
RTW89_RFK_F_WM = 1,
@@ -728,14 +778,20 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_bandwidth dbw);
+u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_bandwidth dbw);
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
+u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask);
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
+bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio);
void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
@@ -759,6 +815,29 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ru, u8 ntx, u8 ch);
+static inline void rtw89_phy_preinit_rf_nctl(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ phy->preinit_rf_nctl(rtwdev);
+}
+
+static inline void rtw89_phy_bb_wrap_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ if (phy->bb_wrap_init)
+ phy->bb_wrap_init(rtwdev);
+}
+
+static inline void rtw89_phy_ch_info_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ if (phy->ch_info_init)
+ phy->ch_info_init(rtwdev);
+}
+
static inline
void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
@@ -809,6 +888,36 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func);
void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
+int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode,
+ unsigned int ms);
+int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c);
+void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c);
void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev);
void rtw89_phy_cfo_track_work(struct work_struct *work);
void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
@@ -836,5 +945,9 @@ void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan);
void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev);
void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev);
+enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
+enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c
index 63eeeea72b68..be0148f2b96f 100644
--- a/drivers/net/wireless/realtek/rtw89/phy_be.c
+++ b/drivers/net/wireless/realtek/rtw89/phy_be.c
@@ -78,6 +78,332 @@ static const struct rtw89_cfo_regs rtw89_cfo_regs_be = {
.valid_0_mask = B_DCFO_OPT_EN_V1,
};
+static u32 rtw89_phy0_phy1_offset_be(struct rtw89_dev *rtwdev, u32 addr)
+{
+ u32 phy_page = addr >> 8;
+ u32 ofst = 0;
+
+ if ((phy_page >= 0x4 && phy_page <= 0xF) ||
+ (phy_page >= 0x20 && phy_page <= 0x2B) ||
+ (phy_page >= 0x40 && phy_page <= 0x4f) ||
+ (phy_page >= 0x60 && phy_page <= 0x6f) ||
+ (phy_page >= 0xE4 && phy_page <= 0xE5) ||
+ (phy_page >= 0xE8 && phy_page <= 0xED))
+ ofst = 0x1000;
+ else
+ ofst = 0x0;
+
+ return ofst;
+}
+
+union rtw89_phy_bb_gain_arg_be {
+ u32 addr;
+ struct {
+ u8 type;
+#define BB_GAIN_TYPE_SUB0_BE GENMASK(3, 0)
+#define BB_GAIN_TYPE_SUB1_BE GENMASK(7, 4)
+ u8 path_bw;
+#define BB_GAIN_PATH_BE GENMASK(3, 0)
+#define BB_GAIN_BW_BE GENMASK(7, 4)
+ u8 gain_band;
+ u8 cfg_type;
+ } __packed;
+} __packed;
+
+static void
+rtw89_phy_cfg_bb_gain_error_be(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg_be arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+ u8 gband = arg.gain_band;
+ u8 type = arg.type;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_gain[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_gain[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 2:
+ for (i = 0; i < 2; i++, data >>= 8)
+ gain->tia_gain[gband][bw_type][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain error {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+static void
+rtw89_phy_cfg_bb_rpl_ofst_be(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg_be arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 type_sub0 = u8_get_bits(arg.type, BB_GAIN_TYPE_SUB0_BE);
+ u8 type_sub1 = u8_get_bits(arg.type, BB_GAIN_TYPE_SUB1_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+ u8 gband = arg.gain_band;
+ u8 ofst = 0;
+ int i;
+
+ switch (type_sub1) {
+ case RTW89_CMAC_BW_20M:
+ gain->rpl_ofst_20[gband][path][0] = (s8)data;
+ break;
+ case RTW89_CMAC_BW_40M:
+ for (i = 0; i < RTW89_BW20_SC_40M; i++, data >>= 8)
+ gain->rpl_ofst_40[gband][path][i] = data & 0xff;
+ break;
+ case RTW89_CMAC_BW_80M:
+ for (i = 0; i < RTW89_BW20_SC_80M; i++, data >>= 8)
+ gain->rpl_ofst_80[gband][path][i] = data & 0xff;
+ break;
+ case RTW89_CMAC_BW_160M:
+ if (type_sub0 == 0)
+ ofst = 0;
+ else
+ ofst = RTW89_BW20_SC_80M;
+
+ for (i = 0; i < RTW89_BW20_SC_80M; i++, data >>= 8)
+ gain->rpl_ofst_160[gband][path][i + ofst] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb rpl ofst {0x%x:0x%x} with unknown type_sub1: %d\n",
+ arg.addr, data, type_sub1);
+ break;
+ }
+}
+
+static void
+rtw89_phy_cfg_bb_gain_op1db_be(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg_be arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+ u8 gband = arg.gain_band;
+ u8 type = arg.type;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 2:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->tia_lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 3:
+ for (i = 4; i < 8; i++, data >>= 8)
+ gain->tia_lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+static void rtw89_phy_config_bb_gain_be(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ union rtw89_phy_bb_gain_arg_be arg = { .addr = reg->addr };
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+
+ if (bw_type >= RTW89_BB_BW_NR_BE)
+ return;
+
+ if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR_BE)
+ return;
+
+ if (path >= chip->rf_path_num)
+ return;
+
+ if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
+ rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
+ return;
+ }
+
+ switch (arg.cfg_type) {
+ case 0:
+ rtw89_phy_cfg_bb_gain_error_be(rtwdev, arg, reg->data);
+ break;
+ case 1:
+ rtw89_phy_cfg_bb_rpl_ofst_be(rtwdev, arg, reg->data);
+ break;
+ case 2:
+ /* ignore BB gain bypass */
+ break;
+ case 3:
+ rtw89_phy_cfg_bb_gain_op1db_be(rtwdev, arg, reg->data);
+ break;
+ case 4:
+ /* This cfg_type is only used by rfe_type >= 50 with eFEM */
+ if (efuse->rfe_type < 50)
+ break;
+ fallthrough;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
+ arg.addr, reg->data, arg.cfg_type);
+ break;
+ }
+}
+
+static void rtw89_phy_preinit_rf_nctl_be(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C0, B_GOTX_IQKDPK, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C1, B_GOTX_IQKDPK, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_IQKDPK_HC, B_IQKDPK_HC, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CLK_GCK, B_CLK_GCK, 0x00fffff);
+ rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST, B_IQK_DPK_PRST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST_C1, B_IQK_DPK_PRST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TXRFC, B_TXRFC_RST, 0x1);
+
+ if (rtwdev->dbcc_en) {
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST_C1, B_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TXRFC_C1, B_TXRFC_RST, 0x1);
+ }
+}
+
+static
+void rtw89_phy_bb_wrap_pwr_by_macid_init(struct rtw89_dev *rtwdev)
+{
+ u32 macid_idx, cr, base_macid_lmt, max_macid = 32;
+
+ base_macid_lmt = R_BE_PWR_MACID_LMT_BASE;
+
+ for (macid_idx = 0; macid_idx < 4 * max_macid; macid_idx += 4) {
+ cr = base_macid_lmt + macid_idx;
+ rtw89_write32(rtwdev, cr, 0x03007F7F);
+ }
+}
+
+static
+void rtw89_phy_bb_wrap_tx_path_by_macid_init(struct rtw89_dev *rtwdev)
+{
+ int i, max_macid = 32;
+ u32 cr = R_BE_PWR_MACID_PATH_BASE;
+
+ for (i = 0; i < max_macid; i++, cr += 4)
+ rtw89_write32(rtwdev, cr, 0x03C86000);
+}
+
+static void rtw89_phy_bb_wrap_tpu_set_all(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ u32 addr;
+
+ for (addr = R_BE_PWR_BY_RATE; addr <= R_BE_PWR_BY_RATE_END; addr += 4)
+ rtw89_write32(rtwdev, addr, 0);
+ for (addr = R_BE_PWR_RULMT_START; addr <= R_BE_PWR_RULMT_END; addr += 4)
+ rtw89_write32(rtwdev, addr, 0);
+ for (addr = R_BE_PWR_RATE_OFST_CTRL; addr <= R_BE_PWR_RATE_OFST_END; addr += 4)
+ rtw89_write32(rtwdev, addr, 0);
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_REF_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_LMT_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_LMTBF, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_LMTBF_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_BYRATE_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_RULMT, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_RULMT_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_SW, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_SW_DB, 0);
+}
+
+static
+void rtw89_phy_bb_wrap_listen_path_en_init(struct rtw89_dev *rtwdev)
+{
+ u32 addr;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL);
+ if (ret)
+ return;
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_LISTEN_PATH, RTW89_MAC_1);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_LISTEN_PATH_EN, 0x2);
+}
+
+static void rtw89_phy_bb_wrap_force_cr_init(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ u32 addr;
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_LMT, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_LMT_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_BOOST, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RATE_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_RULMT, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ENON, 0);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_MACID, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_MACID_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_COEX_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_COEX_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_FORCE_PWR_BY_RATE_EN, 0);
+}
+
+static void rtw89_phy_bb_wrap_ftm_init(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ u32 addr;
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FTM, mac_idx);
+ rtw89_write32(rtwdev, addr, 0xE4E431);
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FTM_SS, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, 0x7, 0);
+}
+
+static void rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_mac_idx mac_idx = RTW89_MAC_0;
+
+ rtw89_phy_bb_wrap_pwr_by_macid_init(rtwdev);
+ rtw89_phy_bb_wrap_tx_path_by_macid_init(rtwdev);
+ rtw89_phy_bb_wrap_listen_path_en_init(rtwdev);
+ rtw89_phy_bb_wrap_force_cr_init(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_ftm_init(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_tpu_set_all(rtwdev, mac_idx);
+}
+
+static void rtw89_phy_ch_info_init_be(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_CHINFO_SEG, B_CHINFO_SEG_LEN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CHINFO_SEG, B_CHINFO_SEG, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_CHINFO_DATA, B_CHINFO_DATA_BITMAP, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_ELM_SRC, B_CHINFO_ELM_BITMAP, 0x40303);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_ELM_SRC, B_CHINFO_SRC, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_TYPE_SCAL, B_CHINFO_TYPE, 0x3);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_TYPE_SCAL, B_CHINFO_SCAL, 0x0);
+}
+
struct rtw89_byr_spec_ent_be {
struct rtw89_rate_desc init;
u8 num_of_idx;
@@ -644,6 +970,11 @@ const struct rtw89_phy_gen_def rtw89_phy_gen_be = {
.ccx = &rtw89_ccx_regs_be,
.physts = &rtw89_physts_regs_be,
.cfo = &rtw89_cfo_regs_be,
+ .phy0_phy1_offset = rtw89_phy0_phy1_offset_be,
+ .config_bb_gain = rtw89_phy_config_bb_gain_be,
+ .preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_be,
+ .bb_wrap_init = rtw89_phy_bb_wrap_init_be,
+ .ch_info_init = rtw89_phy_ch_info_init_be,
.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_be,
.set_txpwr_offset = rtw89_phy_set_txpwr_offset_be,
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 917c01e5e9ed..31290d8cb7f7 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -14,6 +14,7 @@
static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u32 pwr_en_bit = 0xE;
u32 chk_msk = pwr_en_bit << (4 * macid);
u32 polling;
@@ -21,7 +22,7 @@ static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid)
ret = read_poll_timeout_atomic(rtw89_read32_mask, polling, !polling,
1000, 50000, false, rtwdev,
- R_AX_PPWRBIT_SETTING, chk_msk);
+ mac->ps_status, chk_msk);
if (ret) {
rtw89_info(rtwdev, "rtw89: failed to leave lps state\n");
return -EBUSY;
@@ -83,16 +84,17 @@ void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
rtw89_ps_power_mode_change(rtwdev, false);
}
-static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
struct rtw89_lps_parm lps_param = {
- .macid = mac_id,
+ .macid = rtwvif->mac_id,
.psmode = RTW89_MAC_AX_PS_MODE_LEGACY,
.lastrpwm = RTW89_LAST_RPWM_PS,
};
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL);
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
+ rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
}
static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id)
@@ -123,7 +125,7 @@ void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
- __rtw89_enter_lps(rtwdev, rtwvif->mac_id);
+ __rtw89_enter_lps(rtwdev, rtwvif);
if (ps_mode)
__rtw89_enter_ps_mode(rtwdev, rtwvif);
}
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 8456e2b0c14f..72e448e91b6f 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -3246,6 +3246,13 @@
#define R_AX_RX_SR_CTRL_C1 0xEE4A
#define B_AX_SR_EN BIT(0)
+#define R_AX_BSSID_SRC_CTRL 0xCE4B
+#define R_AX_BSSID_SRC_CTRL_C1 0xEE4B
+#define B_AX_BSSID_MATCH BIT(3)
+#define B_AX_PARTIAL_AID_MATCH BIT(2)
+#define B_AX_BSSCOLOR_MATCH BIT(1)
+#define B_AX_PLCP_SRC_EN BIT(0)
+
#define R_AX_CSIRPT_OPTION 0xCE64
#define R_AX_CSIRPT_OPTION_C1 0xEE64
#define B_AX_CSIPRT_HESU_AID_EN BIT(25)
@@ -3503,8 +3510,13 @@
#define B_AX_PTA_EDCCA_EN BIT(0)
#define R_BTC_COEX_WL_REQ 0xDA24
+#define R_BTC_COEX_WL_REQ_BE 0xE324
+#define B_BTC_TX_NULL_HI BIT(23)
#define B_BTC_TX_BCN_HI BIT(22)
+#define B_BTC_TX_TRI_HI BIT(17)
#define B_BTC_RSP_ACK_HI BIT(10)
+#define B_BTC_PRI_MASK_TX_TIME GENMASK(4, 3)
+#define B_BTC_PRI_MASK_RX_TIME_V1 GENMASK(2, 1)
#define R_BTC_BREAK_TABLE 0xDA2C
#define BTC_BREAK_PARAM 0xf0ffffff
@@ -3752,6 +3764,19 @@
#define B_BE_SYM_PADPDN_WL_RFC1_1P3 BIT(6)
#define B_BE_SYM_PADPDN_WL_RFC0_1P3 BIT(5)
+#define R_BE_RSV_CTRL 0x001C
+#define B_BE_HR_BE_DBG GENMASK(23, 12)
+#define B_BE_R_SYM_DIS_PCIE_FLR BIT(9)
+#define B_BE_R_EN_HRST_PWRON BIT(8)
+#define B_BE_LOCK_ALL_EN BIT(7)
+#define B_BE_R_DIS_PRST BIT(6)
+#define B_BE_WLOCK_1C_BIT6 BIT(5)
+#define B_BE_WLOCK_40 BIT(4)
+#define B_BE_WLOCK_08 BIT(3)
+#define B_BE_WLOCK_04 BIT(2)
+#define B_BE_WLOCK_00 BIT(1)
+#define B_BE_WLOCK_ALL BIT(0)
+
#define R_BE_AFE_LDO_CTRL 0x0020
#define B_BE_FORCE_MACBBBT_PWR_ON BIT(31)
#define B_BE_R_SYM_WLPOFF_P4_PC_EN BIT(28)
@@ -4033,6 +4058,30 @@
#define B_BE_SYSON_DIS_PMCR_BE_WRMSK BIT(2)
#define B_BE_SYSON_R_BE_ARB_MASK GENMASK(1, 0)
+#define R_BE_MEM_PWR_CTRL 0x00D0
+#define B_BE_DMEM5_WLMCU_DS BIT(31)
+#define B_BE_DMEM4_WLMCU_DS BIT(30)
+#define B_BE_DMEM3_WLMCU_DS BIT(29)
+#define B_BE_DMEM2_WLMCU_DS BIT(28)
+#define B_BE_DMEM1_WLMCU_DS BIT(27)
+#define B_BE_DMEM0_WLMCU_DS BIT(26)
+#define B_BE_IMEM5_WLMCU_DS BIT(25)
+#define B_BE_IMEM4_WLMCU_DS BIT(24)
+#define B_BE_IMEM3_WLMCU_DS BIT(23)
+#define B_BE_IMEM2_WLMCU_DS BIT(22)
+#define B_BE_IMEM1_WLMCU_DS BIT(21)
+#define B_BE_IMEM0_WLMCU_DS BIT(20)
+#define B_BE_MEM_BBMCU1_DS BIT(19)
+#define B_BE_MEM_BBMCU0_DS_V1 BIT(17)
+#define B_BE_MEM_BT_DS BIT(10)
+#define B_BE_MEM_SDIO_LS BIT(9)
+#define B_BE_MEM_SDIO_DS BIT(8)
+#define B_BE_MEM_USB_LS BIT(7)
+#define B_BE_MEM_USB_DS BIT(6)
+#define B_BE_MEM_PCI_LS BIT(5)
+#define B_BE_MEM_PCI_DS BIT(4)
+#define B_BE_MEM_WLMAC_LS BIT(3)
+
#define R_BE_PCIE_MIO_INTF 0x00E4
#define B_BE_AON_MIO_EPHY_1K_SEL_MASK GENMASK(29, 24)
#define B_BE_PCIE_MIO_ADDR_PAGE_V1_MASK GENMASK(20, 16)
@@ -4401,12 +4450,28 @@
#define R_BE_LTR_LATENCY_IDX2_V1 0x361C
#define R_BE_LTR_LATENCY_IDX3_V1 0x3620
+#define R_BE_H2CREG_DATA0 0x7140
+#define R_BE_H2CREG_DATA1 0x7144
+#define R_BE_H2CREG_DATA2 0x7148
+#define R_BE_H2CREG_DATA3 0x714C
+#define R_BE_C2HREG_DATA0 0x7150
+#define R_BE_C2HREG_DATA1 0x7154
+#define R_BE_C2HREG_DATA2 0x7158
+#define R_BE_C2HREG_DATA3 0x715C
+#define R_BE_H2CREG_CTRL 0x7160
+#define B_BE_H2CREG_TRIGGER BIT(0)
+#define R_BE_C2HREG_CTRL 0x7164
+#define B_BE_C2HREG_TRIGGER BIT(0)
+
#define R_BE_HCI_FUNC_EN 0x7880
#define B_BE_HCI_CR_PROTECT BIT(31)
#define B_BE_HCI_TRXBUF_EN BIT(2)
#define B_BE_HCI_RXDMA_EN BIT(1)
#define B_BE_HCI_TXDMA_EN BIT(0)
+#define R_BE_DBG_WOW_READY 0x815E
+#define B_BE_DBG_WOW_READY GENMASK(7, 0)
+
#define R_BE_DMAC_FUNC_EN 0x8400
#define B_BE_DMAC_CRPRT BIT(31)
#define B_BE_MAC_FUNC_EN BIT(30)
@@ -4488,6 +4553,42 @@
#define B_BE_RMAC_PPDU_HANG_CNT_MASK GENMASK(23, 16)
#define B_BE_SER_L0_COUNTER_MASK GENMASK(8, 0)
+#define R_BE_DMAC_SYS_CR32B 0x842C
+#define B_BE_DMAC_BB_PHY1_MASK GENMASK(31, 16)
+#define B_BE_DMAC_BB_PHY0_MASK GENMASK(15, 0)
+#define B_BE_DMAC_BB_CTRL_39 BIT(31)
+#define B_BE_DMAC_BB_CTRL_38 BIT(30)
+#define B_BE_DMAC_BB_CTRL_37 BIT(29)
+#define B_BE_DMAC_BB_CTRL_36 BIT(28)
+#define B_BE_DMAC_BB_CTRL_35 BIT(27)
+#define B_BE_DMAC_BB_CTRL_34 BIT(26)
+#define B_BE_DMAC_BB_CTRL_33 BIT(25)
+#define B_BE_DMAC_BB_CTRL_32 BIT(24)
+#define B_BE_DMAC_BB_CTRL_31 BIT(23)
+#define B_BE_DMAC_BB_CTRL_30 BIT(22)
+#define B_BE_DMAC_BB_CTRL_29 BIT(21)
+#define B_BE_DMAC_BB_CTRL_28 BIT(20)
+#define B_BE_DMAC_BB_CTRL_27 BIT(19)
+#define B_BE_DMAC_BB_CTRL_26 BIT(18)
+#define B_BE_DMAC_BB_CTRL_25 BIT(17)
+#define B_BE_DMAC_BB_CTRL_24 BIT(16)
+#define B_BE_DMAC_BB_CTRL_23 BIT(15)
+#define B_BE_DMAC_BB_CTRL_22 BIT(14)
+#define B_BE_DMAC_BB_CTRL_21 BIT(13)
+#define B_BE_DMAC_BB_CTRL_20 BIT(12)
+#define B_BE_DMAC_BB_CTRL_19 BIT(11)
+#define B_BE_DMAC_BB_CTRL_18 BIT(10)
+#define B_BE_DMAC_BB_CTRL_17 BIT(9)
+#define B_BE_DMAC_BB_CTRL_16 BIT(8)
+#define B_BE_DMAC_BB_CTRL_15 BIT(7)
+#define B_BE_DMAC_BB_CTRL_14 BIT(6)
+#define B_BE_DMAC_BB_CTRL_13 BIT(5)
+#define B_BE_DMAC_BB_CTRL_12 BIT(4)
+#define B_BE_DMAC_BB_CTRL_11 BIT(3)
+#define B_BE_DMAC_BB_CTRL_10 BIT(2)
+#define B_BE_DMAC_BB_CTRL_9 BIT(1)
+#define B_BE_DMAC_BB_CTRL_8 BIT(0)
+
#define R_BE_DLE_EMPTY0 0x8430
#define B_BE_PLE_EMPTY_QTA_DMAC_H2D BIT(27)
#define B_BE_PLE_EMPTY_QTA_DMAC_CPUIO BIT(26)
@@ -4924,6 +5025,12 @@
B_BE_CR_WRFF_OVERFLOW_ERR_INT_EN | \
B_BE_CR_WRFF_UNDERFLOW_ERR_INT_EN)
+#define R_BE_RX_STOP 0x8914
+#define B_BE_CPU_RX_STOP BIT(17)
+#define B_BE_HOST_RX_STOP BIT(16)
+#define B_BE_CPU_RX_CH_STOP_MSK GENMASK(15, 8)
+#define B_BE_HOST_RX_CH_STOP_MSK GENMASK(5, 0)
+
#define R_BE_DISP_FWD_WLAN_0 0x8938
#define B_BE_FWD_WLAN_CPU_TYPE_13_MASK GENMASK(31, 30)
#define B_BE_FWD_WLAN_CPU_TYPE_12_MASK GENMASK(29, 28)
@@ -4947,6 +5054,11 @@
#define B_BE_WDE_START_BOUND_MASK GENMASK(14, 8)
#define B_BE_WDE_PAGE_SEL_MASK GENMASK(1, 0)
+#define R_BE_WDE_BUFMGN_CTL 0x8C10
+#define B_BE_WDE_AVAL_UPD_REQ BIT(29)
+#define B_BE_WDE_AVAL_UPD_QTAID_MASK GENMASK(27, 24)
+#define B_BE_WDE_BUFMGN_FRZTMR_MODE BIT(0)
+
#define R_BE_WDE_ERR_IMR 0x8C38
#define B_BE_WDE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
#define B_BE_WDE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
@@ -5063,6 +5175,11 @@
#define B_BE_PLE_START_BOUND_MASK GENMASK(14, 8)
#define B_BE_PLE_PAGE_SEL_MASK GENMASK(1, 0)
+#define R_BE_PLE_BUFMGN_CTL 0x9010
+#define B_BE_PLE_AVAL_UPD_REQ BIT(29)
+#define B_BE_PLE_AVAL_UPD_QTAID_MASK GENMASK(27, 24)
+#define B_BE_PLE_BUFMGN_FRZTMR_MODE BIT(0)
+
#define R_BE_PLE_ERR_IMR 0x9038
#define B_BE_PLE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
#define B_BE_PLE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
@@ -5429,6 +5546,21 @@
#define B_BE_DROP_NONDMA_PPDU BIT(2)
#define B_BE_APPEND_FCS BIT(0)
+#define R_BE_FWD_ERR 0x9C10
+#define R_BE_FWD_ACTN0 0x9C14
+#define R_BE_FWD_ACTN1 0x9C18
+#define R_BE_FWD_ACTN2 0x9C1C
+#define R_BE_FWD_TF0 0x9C20
+#define R_BE_FWD_TF1 0x9C24
+
+#define R_BE_HW_PPDU_STATUS 0x9C30
+#define B_BE_FWD_RPKTTYPE_MASK GENMASK(31, 26)
+#define B_BE_FWD_PPDU_PRTID_MASK GENMASK(25, 23)
+#define B_BE_FWD_PPDU_FW_RLS BIT(22)
+#define B_BE_FWD_PPDU_QUEID_MASK GENMASK(21, 16)
+#define B_BE_FWD_OTHER_RPKT_MASK GENMASK(15, 8)
+#define B_BE_FWD_PPDU_STAT_MASK GENMASK(7, 0)
+
#define R_BE_CUT_AMSDU_CTRL 0x9C94
#define B_BE_EN_CUT_AMSDU BIT(31)
#define B_BE_CUT_AMSDU_CHKLEN_EN BIT(30)
@@ -5437,6 +5569,12 @@
#define B_BE_CUT_AMSDU_CHKLEN_L_TH_MASK GENMASK(23, 16)
#define B_BE_CUT_AMSDU_CHKLEN_H_TH_MASK GENMASK(15, 0)
+#define R_BE_WOW_CTRL 0x9CB8
+#define B_BE_WOW_HCI BIT(5)
+#define B_BE_WOW_DROP BIT(2)
+#define B_BE_WOW_WOWEN BIT(1)
+#define B_BE_WOW_FORCE_WAKEUP BIT(0)
+
#define R_BE_RX_HDRTRNS 0x9CC0
#define B_BE_RX_MGN_MLD_ADDR_EN BIT(6)
#define B_BE_HDR_INFO_MASK GENMASK(5, 4)
@@ -5727,6 +5865,9 @@
#define B_BE_STOP_CH1 BIT(1)
#define B_BE_STOP_CH0 BIT(0)
+#define R_BE_HAXI_MST_WDT_TIMEOUT_SEL_V1 0xB02C
+#define B_BE_HAXI_MST_WDT_TIMEOUT_SEL_MASK GENMASK(4, 0)
+
#define R_BE_HAXI_IDCT_MSK 0xB0B8
#define B_BE_HAXI_RRESP_ERR_IDCT_MSK BIT(7)
#define B_BE_HAXI_BRESP_ERR_IDCT_MSK BIT(6)
@@ -5777,6 +5918,15 @@
#define B_BE_PREC_PAGE_CH12_V1_MASK GENMASK(21, 16)
#define B_BE_PREC_PAGE_CH011_V1_MASK GENMASK(5, 0)
+#define R_BE_CH0_PAGE_CTRL 0xB718
+#define B_BE_CH0_GRP BIT(31)
+#define B_BE_CH0_MAX_PG_MASK GENMASK(28, 16)
+#define B_BE_CH0_MIN_PG_MASK GENMASK(12, 0)
+
+#define R_BE_CH0_PAGE_INFO 0xB750
+#define B_BE_CH0_AVAL_PG_MASK GENMASK(28, 16)
+#define B_BE_CH0_USE_PG_MASK GENMASK(12, 0)
+
#define R_BE_PUB_PAGE_INFO3 0xB78C
#define B_BE_G1_AVAL_PG_MASK GENMASK(28, 16)
#define B_BE_G0_AVAL_PG_MASK GENMASK(12, 0)
@@ -5822,6 +5972,39 @@
#define B_BE_MACID_ACQ_GRP0_CLR_P BIT(2)
#define B_BE_R_MACID_ACQ_CHK_EN BIT(0)
+#define R_BE_BT_BREAK_TABLE 0x0E344
+
+#define R_BE_GNT_SW_CTRL 0x0E348
+#define B_BE_WL_ACT2_VAL BIT(25)
+#define B_BE_WL_ACT2_SWCTRL BIT(24)
+#define B_BE_WL_ACT_VAL BIT(23)
+#define B_BE_WL_ACT_SWCTRL BIT(22)
+#define B_BE_GNT_BT_RX_BB1_VAL BIT(21)
+#define B_BE_GNT_BT_RX_BB1_SWCTRL BIT(20)
+#define B_BE_GNT_BT_TX_BB1_VAL BIT(19)
+#define B_BE_GNT_BT_TX_BB1_SWCTRL BIT(18)
+#define B_BE_GNT_BT_RX_BB0_VAL BIT(17)
+#define B_BE_GNT_BT_RX_BB0_SWCTRL BIT(16)
+#define B_BE_GNT_BT_TX_BB0_VAL BIT(15)
+#define B_BE_GNT_BT_TX_BB0_SWCTRL BIT(14)
+#define B_BE_GNT_WL_RX_VAL BIT(13)
+#define B_BE_GNT_WL_RX_SWCTRL BIT(12)
+#define B_BE_GNT_WL_TX_VAL BIT(11)
+#define B_BE_GNT_WL_TX_SWCTRL BIT(10)
+#define B_BE_GNT_BT_BB1_VAL BIT(9)
+#define B_BE_GNT_BT_BB1_SWCTRL BIT(8)
+#define B_BE_GNT_WL_BB1_VAL BIT(7)
+#define B_BE_GNT_WL_BB1_SWCTRL BIT(6)
+#define B_BE_GNT_BT_BB0_VAL BIT(5)
+#define B_BE_GNT_BT_BB0_SWCTRL BIT(4)
+#define B_BE_GNT_WL_BB0_VAL BIT(3)
+#define B_BE_GNT_WL_BB0_SWCTRL BIT(2)
+#define B_BE_GNT_WL_BB_PWR_VAL BIT(1)
+#define B_BE_GNT_WL_BB_PWR_SWCTRL BIT(0)
+
+#define R_BE_PWR_MACID_PATH_BASE 0x0E500
+#define R_BE_PWR_MACID_LMT_BASE 0x0ED00
+
#define R_BE_CMAC_FUNC_EN 0x10000
#define R_BE_CMAC_FUNC_EN_C1 0x14000
#define B_BE_CMAC_CRPRT BIT(31)
@@ -5873,6 +6056,16 @@
B_BE_RMAC_CKEN | B_BE_TXTIME_CKEN | B_BE_RESP_PKTCTL_CKEN | \
B_BE_SIGB_CKEN)
+#define R_BE_WMAC_RFMOD 0x10010
+#define R_BE_WMAC_RFMOD_C1 0x14010
+#define B_BE_CMAC_ASSERTION BIT(31)
+#define B_BE_WMAC_RFMOD_MASK GENMASK(2, 0)
+#define BE_WMAC_RFMOD_20M 0
+#define BE_WMAC_RFMOD_40M 1
+#define BE_WMAC_RFMOD_80M 2
+#define BE_WMAC_RFMOD_160M 3
+#define BE_WMAC_RFMOD_320M 4
+
#define R_BE_TX_SUB_BAND_VALUE 0x10088
#define R_BE_TX_SUB_BAND_VALUE_C1 0x14088
#define B_BE_PRI20_BITMAP_MASK GENMASK(31, 16)
@@ -6009,6 +6202,13 @@
#define B_BE_MACTX_LATENCY_MASK GENMASK(10, 8)
#define B_BE_PREBKF_TIME_MASK GENMASK(4, 0)
+#define R_BE_PREBKF_CFG_1 0x1033C
+#define R_BE_PREBKF_CFG_1_C1 0x1433C
+#define B_BE_SIFS_TIMEOUT_TB_AGGR_MASK GENMASK(31, 24)
+#define B_BE_SIFS_PREBKF_MASK GENMASK(23, 16)
+#define B_BE_SIFS_TIMEOUT_T2_MASK GENMASK(14, 8)
+#define B_BE_SIFS_MACTXEN_T1_MASK GENMASK(6, 0)
+
#define R_BE_CCA_CFG_0 0x10340
#define R_BE_CCA_CFG_0_C1 0x14340
#define B_BE_R_SIFS_AGGR_TIME_V1_MASK GENMASK(31, 24)
@@ -6050,11 +6250,36 @@
#define R_BE_MUEDCA_EN 0x10370
#define R_BE_MUEDCA_EN_C1 0x14370
+#define B_BE_SIFS_TIMEOUT_TB_T2_MASK GENMASK(30, 24)
+#define B_BE_SIFS_MACTXEN_TB_T1_MASK GENMASK(22, 16)
#define B_BE_MUEDCA_WMM_SEL BIT(8)
-#define B_BE_SET_MUEDCATIMER_TF_1 BIT(5)
+#define B_BE_SET_MUEDCATIMER_TF_MASK GENMASK(5, 4)
#define B_BE_SET_MUEDCATIMER_TF_0 BIT(4)
+#define B_BE_MUEDCA_EN_MASK GENMASK(1, 0)
#define B_BE_MUEDCA_EN_0 BIT(0)
+#define R_BE_CTN_DRV_TXEN 0x10398
+#define R_BE_CTN_DRV_TXEN_C1 0x14398
+#define B_BE_CTN_TXEN_TWT_3 BIT(17)
+#define B_BE_CTN_TXEN_TWT_2 BIT(16)
+#define B_BE_CTN_TXEN_TWT_1 BIT(15)
+#define B_BE_CTN_TXEN_TWT_0 BIT(14)
+#define B_BE_CTN_TXEN_ULQ BIT(13)
+#define B_BE_CTN_TXEN_BCNQ BIT(12)
+#define B_BE_CTN_TXEN_HGQ BIT(11)
+#define B_BE_CTN_TXEN_CPUMGQ BIT(10)
+#define B_BE_CTN_TXEN_MGQ1 BIT(9)
+#define B_BE_CTN_TXEN_MGQ BIT(8)
+#define B_BE_CTN_TXEN_VO_1 BIT(7)
+#define B_BE_CTN_TXEN_VI_1 BIT(6)
+#define B_BE_CTN_TXEN_BK_1 BIT(5)
+#define B_BE_CTN_TXEN_BE_1 BIT(4)
+#define B_BE_CTN_TXEN_VO_0 BIT(3)
+#define B_BE_CTN_TXEN_VI_0 BIT(2)
+#define B_BE_CTN_TXEN_BK_0 BIT(1)
+#define B_BE_CTN_TXEN_BE_0 BIT(0)
+#define B_BE_CTN_TXEN_ALL_MASK GENMASK(17, 0)
+
#define R_BE_TB_CHK_CCA_NAV 0x103AC
#define R_BE_TB_CHK_CCA_NAV_C1 0x143AC
#define B_BE_TB_CHK_TX_NAV BIT(15)
@@ -6212,6 +6437,8 @@
#define R_BE_TSFTR_HIGH_P0_C1 0x1443C
#define B_BE_TSFTR_HIGH_P0_MASK GENMASK(31, 0)
+#define R_BE_BCN_DROP_ALL0 0x10560
+
#define R_BE_MBSSID_CTRL 0x10568
#define R_BE_MBSSID_CTRL_C1 0x14568
#define B_BE_MBSSID_MODE_SEL BIT(20)
@@ -6282,6 +6509,17 @@
#define B_BE_SPEC_SIFS_OFDM_PTCL_MASK GENMASK(15, 8)
#define B_BE_SPEC_SIFS_CCK_PTCL_MASK GENMASK(7, 0)
+#define R_BE_TXRATE_CHK 0x10828
+#define R_BE_TXRATE_CHK_C1 0x14828
+#define B_BE_LATENCY_PADDING_PKT_TH_MASK GENMASK(31, 24)
+#define B_BE_PLCP_FETCH_BUFF_MASK GENMASK(23, 16)
+#define B_BE_OFDM_CCK_ERR_PROC BIT(6)
+#define B_BE_PKT_LAST_TX BIT(5)
+#define B_BE_BAND_MODE BIT(4)
+#define B_BE_MAX_TXNSS_MASK GENMASK(3, 2)
+#define B_BE_RTS_LIMIT_IN_OFDM6 BIT(1)
+#define B_BE_CHECK_CCK_EN BIT(0)
+
#define R_BE_MBSSID_DROP_0 0x1083C
#define R_BE_MBSSID_DROP_0_C1 0x1483C
#define B_BE_GI_LTF_FB_SEL BIT(30)
@@ -6289,6 +6527,20 @@
#define B_BE_PORT_DROP_4_0_MASK GENMASK(20, 16)
#define B_BE_MBSSID_DROP_15_0_MASK GENMASK(15, 0)
+#define R_BE_BT_PLT 0x1087C
+#define R_BE_BT_PLT_C1 0x1487C
+#define B_BE_BT_PLT_PKT_CNT_MASK GENMASK(31, 16)
+#define B_BE_BT_PLT_RST BIT(9)
+#define B_BE_PLT_EN BIT(8)
+#define B_BE_RX_PLT_GNT_LTE_RX BIT(7)
+#define B_BE_RX_PLT_GNT_BT_RX BIT(6)
+#define B_BE_RX_PLT_GNT_BT_TX BIT(5)
+#define B_BE_RX_PLT_GNT_WL BIT(4)
+#define B_BE_TX_PLT_GNT_LTE_RX BIT(3)
+#define B_BE_TX_PLT_GNT_BT_RX BIT(2)
+#define B_BE_TX_PLT_GNT_BT_TX BIT(1)
+#define B_BE_TX_PLT_GNT_WL BIT(0)
+
#define R_BE_PTCL_BSS_COLOR_0 0x108A0
#define R_BE_PTCL_BSS_COLOR_0_C1 0x148A0
#define B_BE_BSS_COLOB_BE_PORT_3_MASK GENMASK(29, 24)
@@ -6398,6 +6650,10 @@
#define B_BE_PTCL_DROP BIT(5)
#define B_BE_PTCL_TX_QUEUE_IDX_MASK GENMASK(4, 0)
+#define R_BE_PTCL_DBG_INFO 0x108F0
+
+#define R_BE_PTCL_DBG 0x108F4
+
#define R_BE_RX_ERROR_FLAG 0x10C00
#define R_BE_RX_ERROR_FLAG_C1 0x14C00
#define B_BE_RX_CSI_NOT_RELEASE_ERROR BIT(31)
@@ -6676,6 +6932,9 @@
#define B_BE_UPD_HGQMD BIT(1)
#define B_BE_UPD_TIMIE BIT(0)
+#define R_BE_WMTX_POWER_BE_BIT_CTL 0x10E0C
+#define R_BE_WMTX_POWER_BE_BIT_CTL_C1 0x14E0C
+
#define R_BE_WMTX_TCR_BE_4 0x10E2C
#define R_BE_WMTX_TCR_BE_4_C1 0x14E2C
#define B_BE_UL_EHT_MUMIMO_LTF_MODE BIT(30)
@@ -7056,6 +7315,20 @@
#define S_BE_BACAM_RST_ENT 1
#define S_BE_BACAM_RST_ALL 2
+#define R_BE_PPDU_STAT 0x11440
+#define R_BE_PPDU_STAT_C1 0x15440
+#define B_BE_STAT_IORST BIT(13)
+#define B_BE_STAT_GCKDIS BIT(12)
+#define B_BE_PPDU_STAT_WR_BW_MASK GENMASK(11, 10)
+#define B_BE_PPDU_STAT_RPT_TRIG BIT(8)
+#define B_BE_PPDU_STAT_RPT_DMA BIT(6)
+#define B_BE_PPDU_STAT_RPT_CRC32 BIT(5)
+#define B_BE_PPDU_STAT_RPT_ADDR BIT(4)
+#define B_BE_APP_PLCP_HDR_RPT BIT(3)
+#define B_BE_APP_RX_CNT_RPT BIT(2)
+#define B_BE_PPDU_MAC_INFO BIT(1)
+#define B_BE_PPDU_STAT_RPT_EN BIT(0)
+
#define R_BE_RX_SR_CTRL 0x1144A
#define R_BE_RX_SR_CTRL_C1 0x1544A
#define B_BE_SR_OP_MODE_MASK GENMASK(5, 4)
@@ -7063,6 +7336,13 @@
#define B_BE_SR_CTRL_PLCP_EN BIT(1)
#define B_BE_SR_EN BIT(0)
+#define R_BE_BSSID_SRC_CTRL 0x1144B
+#define R_BE_BSSID_SRC_CTRL_C1 0x1544B
+#define B_BE_BSSID_MATCH BIT(3)
+#define B_BE_PARTIAL_AID_MATCH BIT(2)
+#define B_BE_BSSCOLOR_MATCH BIT(1)
+#define B_BE_PLCP_SRC_EN BIT(0)
+
#define R_BE_CSIRPT_OPTION 0x11464
#define R_BE_CSIRPT_OPTION_C1 0x15464
#define B_BE_CSIPRT_EHTSU_AID_EN BIT(26)
@@ -7178,12 +7458,56 @@
#define R_BE_PWR_MODULE 0x11900
#define R_BE_PWR_MODULE_C1 0x15900
+#define R_BE_PWR_LISTEN_PATH 0x11988
+#define B_BE_PWR_LISTEN_PATH_EN GENMASK(31, 28)
+
+#define R_BE_PWR_REF_CTRL 0x11A20
+#define B_BE_PWR_REF_CTRL_OFDM GENMASK(9, 1)
+#define B_BE_PWR_REF_CTRL_CCK GENMASK(18, 10)
+#define B_BE_PWR_OFST_LMT_DB GENMASK(27, 19)
+#define R_BE_PWR_OFST_LMTBF 0x11A24
+#define B_BE_PWR_OFST_LMTBF_DB GENMASK(8, 0)
+#define R_BE_PWR_FORCE_LMT 0x11A28
+#define B_BE_PWR_FORCE_LMT_ON BIT(6)
+
+#define R_BE_PWR_RATE_CTRL 0x11A2C
+#define B_BE_PWR_OFST_BYRATE_DB GENMASK(8, 0)
+#define B_BE_FORCE_PWR_BY_RATE_EN BIT(19)
+#define B_BE_FORCE_PWR_BY_RATE_VAL GENMASK(28, 20)
#define R_BE_PWR_RATE_OFST_CTRL 0x11A30
+#define R_BE_PWR_RATE_OFST_END 0x11A38
+#define R_BE_PWR_RULMT_START 0x12048
+#define R_BE_PWR_RULMT_END 0x120e4
+
+#define R_BE_PWR_BOOST 0x11A40
+#define B_BE_PWR_CTRL_SEL BIT(16)
+#define B_BE_PWR_FORCE_RATE_ON BIT(29)
+#define R_BE_PWR_OFST_RULMT 0x11A44
+#define B_BE_PWR_OFST_RULMT_DB GENMASK(17, 9)
+#define B_BE_PWR_FORCE_RU_ON BIT(18)
+#define B_BE_PWR_FORCE_RU_ENON BIT(28)
+#define R_BE_PWR_FORCE_MACID 0x11A48
+#define B_BE_PWR_FORCE_MACID_ON BIT(9)
+
+#define R_BE_PWR_REG_CTRL 0x11A50
+#define B_BE_PWR_BT_EN BIT(23)
+
+#define R_BE_PWR_COEX_CTRL 0x11A54
+#define B_BE_PWR_BT_VAL GENMASK(8, 0)
+#define B_BE_PWR_FORCE_COEX_ON GENMASK(29, 27)
+
+#define R_BE_PWR_OFST_SW 0x11AE8
+#define B_BE_PWR_OFST_SW_DB GENMASK(27, 24)
+
+#define R_BE_PWR_FTM 0x11B00
+#define R_BE_PWR_FTM_SS 0x11B04
+
#define R_BE_PWR_BY_RATE 0x11E00
#define R_BE_PWR_BY_RATE_MAX 0x11FA8
#define R_BE_PWR_LMT 0x11FAC
#define R_BE_PWR_LMT_MAX 0x12040
+#define R_BE_PWR_BY_RATE_END 0x12044
#define R_BE_PWR_RU_LMT 0x12048
#define R_BE_PWR_RU_LMT_MAX 0x120E4
@@ -7223,6 +7547,7 @@
#define RR_MOD_M_RXBB GENMASK(9, 5)
#define RR_MOD_LO_SEL BIT(1)
#define RR_MODOPT 0x01
+#define RR_TXG_SEL GENMASK(19, 17)
#define RR_MODOPT_M_TXPWR GENMASK(5, 0)
#define RR_WLSEL 0x02
#define RR_WLSEL_AG GENMASK(18, 16)
@@ -7256,6 +7581,12 @@
#define CFGCH_BAND0_2G 0
#define CFGCH_BAND0_5G 1
#define CFGCH_BAND0_6G 0
+#define RR_CFGCH_BW_V2 GENMASK(12, 10)
+#define CFGCH_BW_V2_20M 0
+#define CFGCH_BW_V2_40M 1
+#define CFGCH_BW_V2_80M 2
+#define CFGCH_BW_V2_160M 3
+#define CFGCH_BW_V2_320M 4
#define RR_CFGCH_BW GENMASK(11, 10)
#define RR_CFGCH_CH GENMASK(7, 0)
#define CFGCH_BW_20M 3
@@ -7292,6 +7623,7 @@
#define RR_LUTWD0_LB GENMASK(5, 0)
#define RR_TM 0x42
#define RR_TM_TRI BIT(19)
+#define RR_TM_VAL_V1 GENMASK(7, 0)
#define RR_TM_VAL GENMASK(6, 1)
#define RR_TM2 0x43
#define RR_TM2_OFF GENMASK(19, 16)
@@ -7325,8 +7657,12 @@
#define RR_TXAC 0x5f
#define RR_TXAC_IQG GENMASK(3, 0)
#define RR_BIASA 0x60
-#define RR_BIASA_TXG GENMASK(15, 12)
#define RR_BIASA_TXA GENMASK(19, 16)
+#define RR_BIASA_TXG GENMASK(15, 12)
+#define RR_BIASD_TXA_V1 GENMASK(15, 12)
+#define RR_BIASA_TXA_V1 GENMASK(11, 8)
+#define RR_BIASD_TXG_V1 GENMASK(7, 4)
+#define RR_BIASA_TXG_V1 GENMASK(3, 0)
#define RR_BIASA_A GENMASK(2, 0)
#define RR_BIASA2 0x63
#define RR_BIASA2_LB GENMASK(4, 2)
@@ -7410,6 +7746,7 @@
#define RR_MIXER_GN GENMASK(4, 3)
#define RR_POW 0xa0
#define RR_POW_SYN GENMASK(3, 2)
+#define RR_POW_SYN_V1 GENMASK(3, 0)
#define RR_LOGEN 0xa3
#define RR_LOGEN_RPT GENMASK(19, 16)
#define RR_SX 0xaf
@@ -7436,6 +7773,8 @@
#define RR_MMD 0xd5
#define RR_MMD_RST_EN BIT(8)
#define RR_MMD_RST_SYN BIT(6)
+#define RR_SMD 0xd6
+#define RR_VCO2 BIT(19)
#define RR_IQKPLL 0xdc
#define RR_IQKPLL_MOD GENMASK(9, 8)
#define RR_SYNLUT 0xdd
@@ -7459,15 +7798,24 @@
#define RR_RFC_CKEN BIT(1)
#define R_UPD_P0 0x0000
+#define R_BBCLK 0x0000
+#define B_CLK_640M BIT(2)
#define R_RSTB_WATCH_DOG 0x000C
#define B_P0_RSTB_WATCH_DOG BIT(0)
#define B_P1_RSTB_WATCH_DOG BIT(1)
#define B_UPD_P0_EN BIT(31)
+#define R_EMLSR 0x0044
+#define B_EMLSR_PARM GENMASK(27, 12)
#define R_SPOOF_CG 0x00B4
#define B_SPOOF_CG_EN BIT(17)
+#define R_CHINFO_SEG 0x00B4
+#define B_CHINFO_SEG_LEN GENMASK(2, 0)
+#define B_CHINFO_SEG GENMASK(16, 7)
#define R_DFS_FFT_CG 0x00B8
#define B_DFS_CG_EN BIT(1)
#define B_DFS_FFT_EN BIT(0)
+#define R_CHINFO_DATA 0x00C0
+#define B_CHINFO_DATA_BITMAP GENMASK(22, 0)
#define R_ANAPAR_PW15 0x030C
#define B_ANAPAR_PW15 GENMASK(31, 24)
#define B_ANAPAR_PW15_H GENMASK(27, 24)
@@ -7497,6 +7845,23 @@
#define B_SWSI_READ_ADDR_ADDR_V1 GENMASK(7, 0)
#define B_SWSI_READ_ADDR_PATH_V1 GENMASK(10, 8)
#define B_SWSI_READ_ADDR_V1 GENMASK(10, 0)
+#define R_BRK_R 0x0418
+#define B_VHTMCS_LMT GENMASK(22, 21)
+#define B_HTMCS_LMT GENMASK(9, 8)
+#define R_BRK_EHT 0x0474
+#define B_RXEHT_NSS_MAX GENMASK(4, 2)
+#define R_BRK_RXEHT 0x0478
+#define B_RXEHT_N_USER_MAX GENMASK(31, 24)
+#define B_RXEHTTB_NSS_MAX GENMASK(16, 14)
+#define R_EN_SND_WO_NDP 0x047c
+#define R_EN_SND_WO_NDP_C1 0x147c
+#define B_EN_SND_WO_NDP BIT(1)
+#define R_BRK_HE 0x0480
+#define B_TB_NSS_MAX GENMASK(25, 23)
+#define B_NSS_MAX GENMASK(16, 14)
+#define B_N_USR_MAX GENMASK(13, 6)
+#define R_RXCCA_BE1 0x0520
+#define B_RXCCA_BE1_DIS BIT(0)
#define R_UPD_CLK_ADC 0x0700
#define B_UPD_CLK_ADC_VAL GENMASK(26, 25)
#define B_UPD_CLK_ADC_ON BIT(24)
@@ -7543,6 +7908,7 @@
#define B_PMAC_RXMOD_MSK GENMASK(7, 4)
#define R_MAC_SEL 0x09A4
#define B_MAC_SEL_OFDM_TRI_FILTER BIT(31)
+#define B_MAC_SEL GENMASK(19, 17)
#define B_MAC_SEL_PWR_EN BIT(16)
#define B_MAC_SEL_DPD_EN BIT(10)
#define B_MAC_SEL_MOD GENMASK(4, 2)
@@ -7588,19 +7954,28 @@
#define R_PD_CTRL 0x0C3C
#define B_PD_HIT_DIS BIT(9)
#define R_IOQ_IQK_DPK 0x0C60
+#define B_IOQ_IQK_DPK_CLKEN GENMASK(1, 0)
#define B_IOQ_IQK_DPK_EN BIT(1)
#define R_GNT_BT_WGT_EN 0x0C6C
#define B_GNT_BT_WGT_EN BIT(21)
+#define R_IQK_DPK_RST 0x0C6C
+#define R_IQK_DPK_RST_C1 0x1C6C
+#define B_IQK_DPK_RST BIT(0)
#define R_TX_COLLISION_T2R_ST 0x0C70
#define B_TX_COLLISION_T2R_ST_M GENMASK(25, 20)
#define R_TXGATING 0x0C74
#define B_TXGATING_EN BIT(4)
+#define R_TXRFC 0x0C7C
+#define R_TXRFC_C1 0x1C7C
+#define B_TXRFC_RST GENMASK(23, 21)
#define R_PD_ARBITER_OFF 0x0C80
#define B_PD_ARBITER_OFF BIT(31)
#define R_SNDCCA_A1 0x0C9C
#define B_SNDCCA_A1_EN GENMASK(19, 12)
#define R_SNDCCA_A2 0x0CA0
#define B_SNDCCA_A2_VAL GENMASK(19, 12)
+#define R_UDP_COEEF 0x0CBC
+#define B_UDP_COEEF BIT(19)
#define R_TX_COLLISION_T2R_ST_BE 0x0CC8
#define B_TX_COLLISION_T2R_ST_BE_M GENMASK(13, 8)
#define R_RXHT_MCS_LIMIT 0x0D18
@@ -7624,7 +7999,11 @@
#define R_CTLTOP 0x1008
#define B_CTLTOP_ON BIT(23)
#define B_CTLTOP_VAL GENMASK(15, 12)
+#define R_CLK_GCK 0x1008
+#define B_CLK_GCK GENMASK(24, 0)
#define R_EDCCA_RPT_SEL_BE 0x10CC
+#define R_ADC_FIFO_V1 0x10FC
+#define B_ADC_FIFO_EN_V1 GENMASK(31, 24)
#define R_S0_HW_SI_DIS 0x1200
#define B_S0_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
#define R_P0_RXCK 0x12A0
@@ -7771,6 +8150,27 @@
#define B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0 BIT(13)
#define R_DBCC_80P80_SEL_EVM_RPT2 0x2A10
#define B_DBCC_80P80_SEL_EVM_RPT2_EN BIT(0)
+#define R_AFEDAC0 0x2A5C
+#define B_AFEDAC0 GENMASK(31, 27)
+#define R_AFEDAC1 0x2A60
+#define B_AFEDAC1 GENMASK(2, 0)
+#define R_IQKDPK_HC 0x2AB8
+#define B_IQKDPK_HC BIT(28)
+#define R_HWSI_ADD0 0x2ADC
+#define R_HWSI_ADD1 0x2BDC
+#define B_HWSI_ADD_MASK GENMASK(11, 4)
+#define B_HWSI_ADD_CTL_MASK GENMASK(2, 0)
+#define B_HWSI_ADD_RD BIT(2)
+#define B_HWSI_ADD_POLL_MASK GENMASK(1, 0)
+#define B_HWSI_ADD_RUN BIT(1)
+#define B_HWSI_ADD_BUSY BIT(0)
+#define R_HWSI_DATA 0x2AE0
+#define B_HWSI_DATA_VAL GENMASK(27, 8)
+#define B_HWSI_DATA_ADDR GENMASK(7, 0)
+#define R_HWSI_VAL0 0x2C24
+#define R_HWSI_VAL1 0x2D24
+#define B_HWSI_VAL_RDONE BIT(31)
+#define B_HWSI_VAL_BUSY BIT(29)
#define R_P1_EN_SOUND_WO_NDP 0x2D7C
#define B_P1_EN_SOUND_WO_NDP BIT(1)
#define R_EDCCA_RPT_A_BE 0x2E38
@@ -7806,8 +8206,30 @@
#define R_S1_ADDCK 0x3E00
#define B_S1_ADDCK_I GENMASK(9, 0)
#define B_S1_ADDCK_Q GENMASK(19, 10)
+#define R_OP1DB_A 0x40B0
+#define B_OP1DB_A GENMASK(31, 24)
+#define R_OP1DB1_A 0x40BC
+#define B_TIA10_A GENMASK(15, 0)
+#define B_TIA1_A GENMASK(15, 8)
+#define B_TIA0_A GENMASK(7, 0)
+#define R_BKOFF_A 0x40E0
+#define B_BKOFF_IBADC_A GENMASK(23, 18)
+#define R_BACKOFF_A 0x40E4
+#define B_LNA_IBADC_A GENMASK(29, 18)
+#define B_BACKOFF_LNA_A GENMASK(29, 24)
+#define B_BACKOFF_IBADC_A GENMASK(23, 18)
+#define R_RXBY_WBADC_A 0x40F4
+#define B_RXBY_WBADC_A GENMASK(14, 10)
#define R_MUIC 0x40F8
#define B_MUIC_EN BIT(0)
+#define R_BT_RXBY_WBADC_A 0x4160
+#define B_BT_RXBY_WBADC_A BIT(31)
+#define R_BT_SHARE_A 0x4164
+#define B_BT_SHARE_A BIT(0)
+#define B_BT_TRK_OFF_A BIT(1)
+#define B_BTG_PATH_A BIT(4)
+#define R_FORCE_FIR_A 0x418C
+#define B_FORCE_FIR_A GENMASK(1, 0)
#define R_DCFO 0x4264
#define B_DCFO GENMASK(7, 0)
#define R_SEG0CSI 0x42AC
@@ -7846,8 +8268,30 @@
#define R_DPD_BF 0x44a0
#define B_DPD_BF_OFDM GENMASK(16, 12)
#define B_DPD_BF_SCA GENMASK(6, 0)
+#define R_LNA_OP 0x44B0
+#define B_LNA6 GENMASK(31, 24)
+#define R_LNA_TIA 0x44BC
+#define B_TIA10_B GENMASK(15, 0)
+#define B_TIA1_B GENMASK(15, 8)
+#define B_TIA0_B GENMASK(7, 0)
+#define R_BKOFF_B 0x44E0
+#define B_BKOFF_IBADC_B GENMASK(23, 18)
+#define R_BACKOFF_B 0x44E4
+#define B_LNA_IBADC_B GENMASK(29, 18)
+#define B_BACKOFF_LNA_B GENMASK(29, 24)
+#define B_BACKOFF_IBADC_B GENMASK(23, 18)
+#define R_RXBY_WBADC_B 0x44F4
+#define B_RXBY_WBADC_B GENMASK(14, 10)
+#define R_BT_RXBY_WBADC_B 0x4560
+#define B_BT_RXBY_WBADC_B BIT(31)
+#define R_BT_SHARE_B 0x4564
+#define B_BT_SHARE_B BIT(0)
+#define B_BT_TRK_OFF_B BIT(1)
+#define B_BTG_PATH_B BIT(4)
#define R_TXPATH_SEL 0x458C
#define B_TXPATH_SEL_MSK GENMASK(31, 28)
+#define R_FORCE_FIR_B 0x458C
+#define B_FORCE_FIR_B GENMASK(1, 0)
#define R_TXPWR 0x4594
#define B_TXPWR_MSK GENMASK(30, 22)
#define R_TXNSS_MAP 0x45B4
@@ -7910,10 +8354,12 @@
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC 0x46A0
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V1 0x4C24
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V2 0x46E8
+#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V3 0x41C8
#define B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC 0x46A4
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V1 0x4C28
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V2 0x46EC
+#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V3 0x41CC
#define B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH0_RXB_INIT_V1 0x46A8
#define B_PATH0_RXB_INIT_IDX_MSK_V1 GENMASK(14, 10)
@@ -7958,10 +8404,12 @@
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC 0x4774
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V1 0x4CE8
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V2 0x47A8
+#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V3 0x45C8
#define B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC 0x4778
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V1 0x4CEC
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V2 0x47AC
+#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V3 0x45CC
#define B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_G_TIA0_LNA6_OP1DB_V1 0x4778
#define B_PATH1_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0)
@@ -8092,6 +8540,15 @@
#define B_PATH1_5MDET_SB2 BIT(8)
#define B_PATH1_5MDET_SB0 BIT(6)
#define B_PATH1_5MDET_TH GENMASK(5, 0)
+#define R_S0S1_CSI_WGT 0x4D34
+#define B_S0S1_CSI_WGT_EN BIT(0)
+#define B_S0S1_CSI_WGT_TONE_IDX GENMASK(31, 20)
+#define R_CHINFO_ELM_SRC 0x4D84
+#define B_CHINFO_ELM_BITMAP GENMASK(22, 0)
+#define B_CHINFO_SRC GENMASK(31, 30)
+#define R_CHINFO_TYPE_SCAL 0x4D88
+#define B_CHINFO_TYPE GENMASK(2, 1)
+#define B_CHINFO_SCAL BIT(8)
#define R_RPL_BIAS_COMP 0x4DF0
#define B_RPL_BIAS_COMP_MASK GENMASK(7, 0)
#define R_RPL_PATHAB 0x4E0C
@@ -8239,14 +8696,90 @@
#define B_S0_DACKQ8_K GENMASK(15, 8)
#define R_DCFO_WEIGHT_V1 0x6244
#define B_DCFO_WEIGHT_MSK_V1 GENMASK(31, 28)
+#define R_DAC_CLK 0x625C
+#define B_DAC_CLK GENMASK(31, 30)
#define R_DCFO_OPT_V1 0x6260
#define B_DCFO_OPT_EN_V1 BIT(17)
+#define R_TXFCTR 0x627C
+#define B_TXFCTR_THD GENMASK(19, 10)
+#define R_TXSCALE 0x6284
+#define B_TXFCTR_EN BIT(19)
+#define R_PCOEFF01 0x6684
+#define B_PCOEFF01 GENMASK(23, 0)
+#define R_PCOEFF23 0x6688
+#define B_PCOEFF23 GENMASK(23, 0)
+#define R_PCOEFF45 0x668c
+#define B_PCOEFF45 GENMASK(23, 0)
+#define R_PCOEFF67 0x6690
+#define B_PCOEFF67 GENMASK(23, 0)
+#define R_PCOEFF89 0x6694
+#define B_PCOEFF89 GENMASK(23, 0)
+#define R_PCOEFFAB 0x6698
+#define B_PCOEFFAB GENMASK(23, 0)
+#define R_PCOEFFCD 0x669c
+#define B_PCOEFFCD GENMASK(23, 0)
+#define R_PCOEFFEF 0x66a0
+#define B_PCOEFFEF GENMASK(23, 0)
+#define R_MGAIN_BIAS 0x672c
+#define B_MGAIN_BIAS_BW20 GENMASK(3, 0)
+#define B_MGAIN_BIAS_BW40 GENMASK(7, 4)
+#define R_CCK_RPL_OFST 0x6750
+#define B_CCK_RPL_OFST GENMASK(7, 0)
+#define R_BK_FC0INV 0x6758
+#define B_BK_FC0INV GENMASK(18, 0)
+#define R_CCK_FC0INV 0x675c
+#define B_CCK_FC0INV GENMASK(18, 0)
#define R_SEG0R_EDCCA_LVL_BE 0x69EC
#define R_SEG0R_PPDU_LVL_BE 0x69F0
#define R_SEGSND 0x6A14
#define B_SEGSND_EN BIT(31)
+#define R_DBCC 0x6B48
+#define B_DBCC_EN BIT(0)
+#define R_FC0 0x6B4C
+#define B_BW40_2XFFT BIT(31)
+#define B_FC0 GENMASK(12, 0)
+#define R_FC0INV_SBW 0x6B50
+#define B_SMALLBW GENMASK(31, 30)
+#define B_RX_BT_SG0 GENMASK(25, 22)
+#define B_RX_1RCCA GENMASK(17, 14)
+#define B_FC0_INV GENMASK(6, 0)
+#define R_ANT_CHBW 0x6B54
+#define B_ANT_BT_SHARE BIT(16)
+#define B_CHBW_BW GENMASK(14, 12)
+#define B_CHBW_PRICH GENMASK(11, 8)
+#define B_ANT_RX_SG0 GENMASK(3, 0)
+#define R_SLOPE 0x6B6C
+#define B_EHT_RATE_TH GENMASK(31, 28)
+#define B_SLOPE_B GENMASK(27, 14)
+#define B_SLOPE_A GENMASK(13, 0)
+#define R_SC_CORNER 0x6B70
+#define B_SC_CORNER GENMASK(10, 0)
+#define R_MAG_A 0x6BF4
+#define B_MGA_AEND GENMASK(31, 24)
+#define R_MAG_AB 0x6BF8
+#define B_BY_SLOPE GENMASK(31, 24)
+#define B_MAG_AB GENMASK(23, 0)
+#define R_BEDGE 0x6BFC
+#define B_EHT_MCS14 BIT(31)
+#define B_HE_RATE_TH GENMASK(30, 27)
+#define R_BEDGE2 0x6C00
+#define B_EHT_MCS15 BIT(31)
+#define B_HT_VHT_TH GENMASK(11, 0)
+#define R_BEDGE3 0x6C04
+#define B_TB_EN BIT(23)
+#define B_HEMU_EN BIT(21)
+#define B_HEERSU_EN BIT(19)
+#define B_EHTTB_EN BIT(15)
+#define B_BEDGE_CFG GENMASK(1, 0)
+#define R_SU_PUNC 0x6C08
+#define B_SU_PUNC_EN BIT(1)
+#define R_BEDGE5 0x6C10
+#define B_HWGEN_EN BIT(25)
+#define B_PWROFST_COMP BIT(20)
#define R_RPL_BIAS_COMP1 0x6DF0
#define B_RPL_BIAS_COMP1_MASK GENMASK(7, 0)
+#define R_DBCC_FA 0x703C
+#define B_DBCC_FA BIT(12)
#define R_P1_TSSI_ALIM1 0x7630
#define B_P1_TSSI_ALIM1 GENMASK(29, 0)
#define B_P1_TSSI_ALIM11 GENMASK(29, 20)
@@ -8389,8 +8922,12 @@
#define B_PRT_COM_RXBB_V1 GENMASK(4, 0)
#define B_PRT_COM_DONE BIT(0)
#define R_COEF_SEL 0x8104
+#define R_COEF_SEL_C1 0x8204
#define B_COEF_SEL_IQC BIT(0)
+#define B_COEF_SEL_IQC_V1 GENMASK(1, 0)
#define B_COEF_SEL_MDPD BIT(8)
+#define B_COEF_SEL_MDPD_V1 GENMASK(9, 8)
+#define B_COEF_SEL_EN BIT(31)
#define R_CFIR_SYS 0x8120
#define R_IQK_RES 0x8124
#define B_IQK_RES_K BIT(28)
@@ -8412,8 +8949,10 @@
#define B_RFGAIN_BND GENMASK(4, 0)
#define R_CFIR_MAP 0x8150
#define R_CFIR_LUT 0x8154
+#define R_CFIR_LUT_C1 0x8254
#define B_CFIR_LUT_SEL BIT(8)
#define B_CFIR_LUT_SET BIT(4)
+#define B_CFIR_LUT_G5 BIT(5)
#define B_CFIR_LUT_G3 BIT(3)
#define B_CFIR_LUT_G2 BIT(2)
#define B_CFIR_LUT_GP_V1 GENMASK(2, 0)
@@ -8626,6 +9165,35 @@
#define B_DACKN0_V GENMASK(21, 14)
#define R_DACKN1_CTL 0xC224
#define B_DACKN1_V GENMASK(21, 14)
+#define R_GAIN_MAP0 0xE44C
+#define B_GAIN_MAP0_EN BIT(0)
+#define R_GAIN_MAP1 0xE54C
+#define B_GAIN_MAP1_EN BIT(0)
+#define R_GOTX_IQKDPK_C0 0xE464
+#define R_GOTX_IQKDPK_C1 0xE564
+#define B_GOTX_IQKDPK GENMASK(28, 27)
+#define R_IQK_DPK_PRST 0xE4AC
+#define R_IQK_DPK_PRST_C1 0xE5AC
+#define B_IQK_DPK_PRST BIT(27)
+#define R_TXPWR_RSTA 0xE60C
+#define B_TXPWR_RSTA BIT(16)
+#define R_TSSI_PWR_P0 0xE610
+#define R_TSSI_PWR_P1 0xE710
+#define B_TSSI_CONT_EN BIT(3)
+#define R_TSSI_MAP_OFST_P0 0xE620
+#define R_TSSI_MAP_OFST_P1 0xE720
+#define B_TSSI_MAP_OFST_OFDM GENMASK(17, 9)
+#define B_TSSI_MAP_OFST_CCK GENMASK(26, 18)
+#define R_TXAGC_REF0_P0 0xE628
+#define R_TXAGC_REF0_P1 0xE728
+#define B_TXAGC_REF0_OFDM_DBM GENMASK(8, 0)
+#define B_TXAGC_REF0_CCK_DBM GENMASK(17, 9)
+#define B_TXAGC_REF0_OFDM_CW GENMASK(26, 18)
+#define R_TXAGC_REF1_P0 0xE62C
+#define R_TXAGC_REF1_P1 0xE72C
+#define B_TXAGC_REF1_CCK_CW GENMASK(8, 0)
+#define R_TXPWR_RSTB 0xE70C
+#define B_TXPWR_RSTB BIT(16)
/* WiFi CPU local domain */
#define R_AX_WDT_CTRL 0x0040
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 5c167a9278ce..51d3e61eaa1d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -901,7 +901,7 @@ static void rtw8851b_set_gain_error(struct rtw89_dev *rtwdev,
enum rtw89_subband subband,
enum rtw89_rf_path path)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
s32 val;
u32 reg;
@@ -987,7 +987,7 @@ next:
static
void rtw8851b_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 band = rtw89_subband_to_bb_gain_band(subband);
u32 val;
@@ -1921,41 +1921,81 @@ static u8 rtw8851b_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p
static void rtw8851b_btc_set_rfe(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
- module->rfe_type = rtwdev->efuse.rfe_type;
- module->cv = rtwdev->hal.cv;
- module->bt_solo = 0;
- module->switch_type = BTC_SWITCH_INTERNAL;
- module->ant.isolation = 10;
- module->kt_ver_adie = rtwdev->hal.acv;
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
+ md->md_v7.ant.isolation = 10;
+ md->md_v7.kt_ver_adie = rtwdev->hal.acv;
- if (module->rfe_type == 0)
- return;
+ if (md->md_v7.rfe_type == 0)
+ return;
- /* rfe_type 3*n+1: 1-Ant(shared),
- * 3*n+2: 2-Ant+Div(non-shared),
- * 3*n+3: 2-Ant+no-Div(non-shared)
- */
- module->ant.num = (module->rfe_type % 3 == 1) ? 1 : 2;
- /* WL-1ss at S0, btg at s0 (On 1 WL RF) */
- module->ant.single_pos = RF_PATH_A;
- module->ant.btg_pos = RF_PATH_A;
- module->ant.stream_cnt = 1;
-
- if (module->ant.num == 1) {
- module->ant.type = BTC_ANT_SHARED;
- module->bt_pos = BTC_BT_BTG;
- module->wa_type = 1;
- module->ant.diversity = 0;
- } else { /* ant.num == 2 */
- module->ant.type = BTC_ANT_DEDICATED;
- module->bt_pos = BTC_BT_ALONE;
- module->switch_type = BTC_SWITCH_EXTERNAL;
- module->wa_type = 0;
- if (module->rfe_type % 3 == 2)
- module->ant.diversity = 1;
+ /* rfe_type 3*n+1: 1-Ant(shared),
+ * 3*n+2: 2-Ant+Div(non-shared),
+ * 3*n+3: 2-Ant+no-Div(non-shared)
+ */
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 3 == 1) ? 1 : 2;
+ /* WL-1ss at S0, btg at s0 (On 1 WL RF) */
+ md->md_v7.ant.single_pos = RF_PATH_A;
+ md->md_v7.ant.btg_pos = RF_PATH_A;
+ md->md_v7.ant.stream_cnt = 1;
+
+ if (md->md_v7.ant.num == 1) {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ md->md_v7.wa_type = 1;
+ md->md_v7.ant.diversity = 0;
+ } else { /* ant.num == 2 */
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ md->md_v7.switch_type = BTC_SWITCH_EXTERNAL;
+ md->md_v7.wa_type = 0;
+ if (md->md_v7.rfe_type % 3 == 2)
+ md->md_v7.ant.diversity = 1;
+ }
+ rtwdev->btc.btg_pos = md->md_v7.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md_v7.ant.type;
+ } else {
+ md->md.rfe_type = rtwdev->efuse.rfe_type;
+ md->md.cv = rtwdev->hal.cv;
+ md->md.bt_solo = 0;
+ md->md.switch_type = BTC_SWITCH_INTERNAL;
+ md->md.ant.isolation = 10;
+ md->md.kt_ver_adie = rtwdev->hal.acv;
+
+ if (md->md.rfe_type == 0)
+ return;
+
+ /* rfe_type 3*n+1: 1-Ant(shared),
+ * 3*n+2: 2-Ant+Div(non-shared),
+ * 3*n+3: 2-Ant+no-Div(non-shared)
+ */
+ md->md.ant.num = (md->md.rfe_type % 3 == 1) ? 1 : 2;
+ /* WL-1ss at S0, btg at s0 (On 1 WL RF) */
+ md->md.ant.single_pos = RF_PATH_A;
+ md->md.ant.btg_pos = RF_PATH_A;
+ md->md.ant.stream_cnt = 1;
+
+ if (md->md.ant.num == 1) {
+ md->md.ant.type = BTC_ANT_SHARED;
+ md->md.bt_pos = BTC_BT_BTG;
+ md->md.wa_type = 1;
+ md->md.ant.diversity = 0;
+ } else { /* ant.num == 2 */
+ md->md.ant.type = BTC_ANT_DEDICATED;
+ md->md.bt_pos = BTC_BT_ALONE;
+ md->md.switch_type = BTC_SWITCH_EXTERNAL;
+ md->md.wa_type = 0;
+ if (md->md.rfe_type % 3 == 2)
+ md->md.ant.diversity = 1;
+ }
+ rtwdev->btc.btg_pos = md->md.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md.ant.type;
}
}
@@ -1965,7 +2005,7 @@ void rtw8851b_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
if (group > BTC_BT_SS_GROUP)
group--; /* Tx-group=1, Rx-group=2 */
- if (rtwdev->btc.mdinfo.ant.type == BTC_ANT_SHARED) /* 1-Ant */
+ if (rtwdev->btc.ant_type == BTC_ANT_SHARED) /* 1-Ant */
group += 3;
rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group);
@@ -1980,9 +2020,9 @@ static void rtw8851b_btc_init_cfg(struct rtw89_dev *rtwdev)
};
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
- struct rtw89_btc_ant_info *ant = &module->ant;
- u8 path, path_min, path_max;
+ union rtw89_btc_module_info *md = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = btc->ver;
+ u8 path, path_min, path_max, str_cnt, ant_sing_pos;
/* PTA init */
rtw89_mac_coex_init(rtwdev, &coex_params);
@@ -1991,9 +2031,17 @@ static void rtw8851b_btc_init_cfg(struct rtw89_dev *rtwdev)
chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true);
chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true);
+ if (ver->fcxinit == 7) {
+ str_cnt = md->md_v7.ant.stream_cnt;
+ ant_sing_pos = md->md_v7.ant.single_pos;
+ } else {
+ str_cnt = md->md.ant.stream_cnt;
+ ant_sing_pos = md->md.ant.single_pos;
+ }
+
/* for 1-Ant && 1-ss case: only 1-path */
- if (ant->stream_cnt == 1) {
- path_min = ant->single_pos;
+ if (str_cnt == 1) {
+ path_min = ant_sing_pos;
path_max = path_min;
} else {
path_min = RF_PATH_A;
@@ -2016,7 +2064,7 @@ static void rtw8851b_btc_init_cfg(struct rtw89_dev *rtwdev)
/* if GNT_WL = 0 && BT = Tx_group -->
* Shared-Ant && BTG-path:WL mask(0x55f), others:WL THRU(0x5ff)
*/
- if (ant->type == BTC_ANT_SHARED && ant->btg_pos == path)
+ if (btc->ant_type == BTC_ANT_SHARED && btc->btg_pos == path)
rtw8851b_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x55f);
else
rtw8851b_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
@@ -2148,19 +2196,18 @@ void rtw8851b_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
static void rtw8851b_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_ant_info *ant = &btc->mdinfo.ant;
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWE, RFREG_MASK, 0x80000);
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWA, RFREG_MASK, 0x1);
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWD1, RFREG_MASK, 0x110);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWD1, RFREG_MASK, 0x110);
/* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */
if (state)
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWD0, RFREG_MASK, 0x179c);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWD0, RFREG_MASK, 0x179c);
else
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWD0, RFREG_MASK, 0x208);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWD0, RFREG_MASK, 0x208);
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWE, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWE, RFREG_MASK, 0x0);
}
#define LNA2_51B_MA 0x700
@@ -2175,7 +2222,6 @@ static void rtw8851b_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
* level=1 Fix LNA2=5: TIA 1/0= (LNA2,TIAN6) = (5,0)/(5,1) = 18dB/12dB
*/
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_ant_info *ant = &btc->mdinfo.ant;
const struct rtw89_reg2_def *rf;
u32 n, i, val;
@@ -2203,10 +2249,10 @@ static void rtw8851b_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
for (i = 0; i < n; i++, rf++) {
val = rf->data;
/* bit[10] = 1 if non-shared-ant for 8851b */
- if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED)
+ if (btc->ant_type == BTC_ANT_DEDICATED)
val |= 0x4;
- rtw89_write_rf(rtwdev, ant->btg_pos, rf->addr, LNA2_51B_MA, val);
+ rtw89_write_rf(rtwdev, btc->btg_pos, rf->addr, LNA2_51B_MA, val);
}
}
@@ -2299,6 +2345,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.enable_bb_rf = rtw8851b_mac_enable_bb_rf,
.disable_bb_rf = rtw8851b_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8851b_bb_reset,
.bb_sethw = rtw8851b_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
@@ -2309,7 +2356,9 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.read_phycap = rtw8851b_read_phycap,
.fem_setup = NULL,
.rfe_gpio = rtw8851b_rfe_gpio,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8851b_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8851b_rfk_channel,
.rfk_band_changed = rtw8851b_rfk_band_changed,
.rfk_scan = rtw8851b_rfk_scan,
@@ -2334,6 +2383,12 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx,
.resume_sch_tx = rtw89_mac_resume_sch_tx,
.h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8851b_btc_set_rfe,
.btc_init_cfg = rtw8851b_btc_init_cfg,
@@ -2394,7 +2449,9 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.support_chanctx_num = 0,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
- .support_bw160 = false,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
@@ -2449,6 +2506,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8851b_c2h_regs,
.page_regs = &rtw8851b_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = &rtw8851b_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
index 8cb5bde8f625..522883c8dfb9 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
@@ -5345,7 +5345,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 72,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 72,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CN][48] = 127,
@@ -5353,7 +5353,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 72,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 72,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CN][50] = 127,
@@ -5361,7 +5361,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 72,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 72,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CN][52] = 127,
@@ -5793,7 +5793,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 74,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 74,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CN][48] = 127,
@@ -5801,7 +5801,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 76,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 76,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CN][50] = 127,
@@ -5809,7 +5809,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 76,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 76,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CN][52] = 127,
@@ -6361,7 +6361,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 84,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 84,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CN][47] = 127,
@@ -6369,7 +6369,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 84,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 84,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CN][51] = 127,
@@ -6649,7 +6649,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 74,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 74,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CN][49] = 127,
@@ -7975,7 +7975,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 42,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 42,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CN][48] = 127,
@@ -7983,7 +7983,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 42,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 42,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CN][50] = 127,
@@ -7991,7 +7991,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 40,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 40,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CN][52] = 127,
@@ -8423,7 +8423,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 52,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 52,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CN][48] = 127,
@@ -8431,7 +8431,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 52,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 52,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CN][50] = 127,
@@ -8439,7 +8439,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 52,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 52,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CN][52] = 127,
@@ -8871,7 +8871,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][48] = 64,
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
- [2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_IC][48] = 64,
[2][0][RTW89_KCC][48] = 127,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CN][48] = 127,
@@ -8879,7 +8879,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][50] = 64,
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
- [2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_IC][50] = 64,
[2][0][RTW89_KCC][50] = 127,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CN][50] = 127,
@@ -8887,7 +8887,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][52] = 60,
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
- [2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_IC][52] = 60,
[2][0][RTW89_KCC][52] = 127,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CN][52] = 127,
@@ -11055,7 +11055,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 72,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 72,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CN][48] = 127,
@@ -11063,7 +11063,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 72,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 72,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CN][50] = 127,
@@ -11071,7 +11071,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 72,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 72,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CN][52] = 127,
@@ -11503,7 +11503,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 74,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 74,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CN][48] = 127,
@@ -11511,7 +11511,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 74,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 74,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CN][50] = 127,
@@ -11519,7 +11519,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 74,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 74,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CN][52] = 127,
@@ -12071,7 +12071,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 80,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 80,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CN][47] = 127,
@@ -12079,7 +12079,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 80,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 80,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CN][51] = 127,
@@ -12359,7 +12359,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 72,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 72,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CN][49] = 127,
@@ -13685,7 +13685,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 40,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 40,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CN][48] = 127,
@@ -13693,7 +13693,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 42,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 42,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CN][50] = 127,
@@ -13701,7 +13701,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 38,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 38,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CN][52] = 127,
@@ -14133,7 +14133,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 52,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 52,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CN][48] = 127,
@@ -14141,7 +14141,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 52,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 52,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CN][50] = 127,
@@ -14149,7 +14149,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 50,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 50,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CN][52] = 127,
@@ -14581,7 +14581,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][48] = 62,
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
- [2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_IC][48] = 62,
[2][0][RTW89_KCC][48] = 127,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CN][48] = 127,
@@ -14589,7 +14589,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][50] = 62,
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
- [2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_IC][50] = 62,
[2][0][RTW89_KCC][50] = 127,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CN][50] = 127,
@@ -14597,7 +14597,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][52] = 60,
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
- [2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_IC][52] = 60,
[2][0][RTW89_KCC][52] = 127,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CN][52] = 127,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index ade69bd30fc8..ca1374a71727 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -25,6 +25,8 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.autok_en = MAC_AX_PCIE_DISABLE,
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+ .rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 0c76c52ce22c..2deadec715cf 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -1665,28 +1665,55 @@ static u8 rtw8852a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p
static void rtw8852a_btc_set_rfe(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
- module->rfe_type = rtwdev->efuse.rfe_type;
- module->cv = rtwdev->hal.cv;
- module->bt_solo = 0;
- module->switch_type = BTC_SWITCH_INTERNAL;
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
- if (module->rfe_type > 0)
- module->ant.num = (module->rfe_type % 2 ? 2 : 3);
- else
- module->ant.num = 2;
+ if (md->md_v7.rfe_type > 0)
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 2 ? 2 : 3);
+ else
+ md->md_v7.ant.num = 2;
- module->ant.diversity = 0;
- module->ant.isolation = 10;
+ md->md_v7.ant.diversity = 0;
+ md->md_v7.ant.isolation = 10;
- if (module->ant.num == 3) {
- module->ant.type = BTC_ANT_DEDICATED;
- module->bt_pos = BTC_BT_ALONE;
+ if (md->md_v7.ant.num == 3) {
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md_v7.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md_v7.ant.type;
} else {
- module->ant.type = BTC_ANT_SHARED;
- module->bt_pos = BTC_BT_BTG;
+ md->md.rfe_type = rtwdev->efuse.rfe_type;
+ md->md.cv = rtwdev->hal.cv;
+ md->md.bt_solo = 0;
+ md->md.switch_type = BTC_SWITCH_INTERNAL;
+
+ if (md->md.rfe_type > 0)
+ md->md.ant.num = (md->md.rfe_type % 2 ? 2 : 3);
+ else
+ md->md.ant.num = 2;
+
+ md->md.ant.diversity = 0;
+ md->md.ant.isolation = 10;
+
+ if (md->md.ant.num == 3) {
+ md->md.ant.type = BTC_ANT_DEDICATED;
+ md->md.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md.ant.type = BTC_ANT_SHARED;
+ md->md.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md.ant.type;
}
}
@@ -1717,7 +1744,6 @@ static void rtw8852a_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
static void rtw8852a_btc_init_cfg(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_mac_ax_coex coex_params = {
.pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
@@ -1736,7 +1762,7 @@ static void rtw8852a_btc_init_cfg(struct rtw89_dev *rtwdev)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, 0xfffff, 0x0);
/* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
- if (module->ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
rtw8852a_set_trx_mask(rtwdev,
RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
rtw8852a_set_trx_mask(rtwdev,
@@ -2043,6 +2069,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.enable_bb_rf = rtw89_mac_enable_bb_rf,
.disable_bb_rf = rtw89_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8852a_bb_reset,
.bb_sethw = rtw8852a_bb_sethw,
.read_rf = rtw89_phy_read_rf,
@@ -2053,7 +2080,9 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.read_phycap = rtw8852a_read_phycap,
.fem_setup = rtw8852a_fem_setup,
.rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8852a_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8852a_rfk_channel,
.rfk_band_changed = rtw8852a_rfk_band_changed,
.rfk_scan = rtw8852a_rfk_scan,
@@ -2078,6 +2107,12 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx,
.resume_sch_tx = rtw89_mac_resume_sch_tx,
.h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852a_btc_set_rfe,
.btc_init_cfg = rtw8852a_btc_init_cfg,
@@ -2130,7 +2165,9 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.support_chanctx_num = 1,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
- .support_bw160 = false,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
@@ -2186,6 +2223,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.c2h_regs = rtw8852a_c2h_regs,
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.page_regs = &rtw8852a_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
.cfo_src_fd = false,
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852a_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index f1e890bde049..7c6ffedb77e2 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -26,6 +26,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index de887a35f3fb..d025c4135e1c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -988,7 +988,7 @@ static void rtw8852b_set_gain_error(struct rtw89_dev *rtwdev,
enum rtw89_subband subband,
enum rtw89_rf_path path)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
s32 val;
u32 reg;
@@ -1086,7 +1086,7 @@ next:
static
void rtw8852b_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 band = rtw89_subband_to_bb_gain_band(subband);
u32 val;
@@ -2125,28 +2125,55 @@ static u8 rtw8852b_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p
static void rtw8852b_btc_set_rfe(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
- module->rfe_type = rtwdev->efuse.rfe_type;
- module->cv = rtwdev->hal.cv;
- module->bt_solo = 0;
- module->switch_type = BTC_SWITCH_INTERNAL;
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
- if (module->rfe_type > 0)
- module->ant.num = module->rfe_type % 2 ? 2 : 3;
- else
- module->ant.num = 2;
+ if (md->md_v7.rfe_type > 0)
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 2 ? 2 : 3);
+ else
+ md->md_v7.ant.num = 2;
- module->ant.diversity = 0;
- module->ant.isolation = 10;
+ md->md_v7.ant.diversity = 0;
+ md->md_v7.ant.isolation = 10;
- if (module->ant.num == 3) {
- module->ant.type = BTC_ANT_DEDICATED;
- module->bt_pos = BTC_BT_ALONE;
+ if (md->md_v7.ant.num == 3) {
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md_v7.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md_v7.ant.type;
} else {
- module->ant.type = BTC_ANT_SHARED;
- module->bt_pos = BTC_BT_BTG;
+ md->md.rfe_type = rtwdev->efuse.rfe_type;
+ md->md.cv = rtwdev->hal.cv;
+ md->md.bt_solo = 0;
+ md->md.switch_type = BTC_SWITCH_INTERNAL;
+
+ if (md->md.rfe_type > 0)
+ md->md.ant.num = (md->md.rfe_type % 2 ? 2 : 3);
+ else
+ md->md.ant.num = 2;
+
+ md->md.ant.diversity = 0;
+ md->md.ant.isolation = 10;
+
+ if (md->md.ant.num == 3) {
+ md->md.ant.type = BTC_ANT_DEDICATED;
+ md->md.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md.ant.type = BTC_ANT_SHARED;
+ md->md.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md.ant.type;
}
}
@@ -2162,7 +2189,6 @@ void rtw8852b_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
static void rtw8852b_btc_init_cfg(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_mac_ax_coex coex_params = {
.pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
@@ -2181,7 +2207,7 @@ static void rtw8852b_btc_init_cfg(struct rtw89_dev *rtwdev)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0);
/* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
- if (module->ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
/* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */
@@ -2468,6 +2494,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.enable_bb_rf = rtw8852b_mac_enable_bb_rf,
.disable_bb_rf = rtw8852b_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8852b_bb_reset,
.bb_sethw = rtw8852b_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
@@ -2478,7 +2505,9 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.read_phycap = rtw8852b_read_phycap,
.fem_setup = NULL,
.rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8852b_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8852b_rfk_channel,
.rfk_band_changed = rtw8852b_rfk_band_changed,
.rfk_scan = rtw8852b_rfk_scan,
@@ -2503,6 +2532,12 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx,
.resume_sch_tx = rtw89_mac_resume_sch_tx,
.h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852b_btc_set_rfe,
.btc_init_cfg = rtw8852b_btc_init_cfg,
@@ -2564,7 +2599,9 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.support_chanctx_num = 0,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
- .support_bw160 = false,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
@@ -2620,6 +2657,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8852b_c2h_regs,
.page_regs = &rtw8852b_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = &rtw8852b_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
index d2ce16e98bac..07945d06dc59 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
@@ -16936,7 +16936,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_WW][8] = 52,
[0][0][1][0][RTW89_WW][10] = 52,
[0][0][1][0][RTW89_WW][12] = 52,
- [0][0][1][0][RTW89_WW][14] = 1,
+ [0][0][1][0][RTW89_WW][14] = 52,
[0][0][1][0][RTW89_WW][15] = 52,
[0][0][1][0][RTW89_WW][17] = 52,
[0][0][1][0][RTW89_WW][19] = 52,
@@ -16954,10 +16954,10 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_WW][42] = 28,
[0][0][1][0][RTW89_WW][44] = 28,
[0][0][1][0][RTW89_WW][46] = 28,
- [0][0][1][0][RTW89_WW][48] = 78,
- [0][0][1][0][RTW89_WW][50] = 78,
- [0][0][1][0][RTW89_WW][52] = 78,
- [0][1][1][0][RTW89_WW][0] = 1,
+ [0][0][1][0][RTW89_WW][48] = 76,
+ [0][0][1][0][RTW89_WW][50] = 76,
+ [0][0][1][0][RTW89_WW][52] = 76,
+ [0][1][1][0][RTW89_WW][0] = 30,
[0][1][1][0][RTW89_WW][2] = 32,
[0][1][1][0][RTW89_WW][4] = 30,
[0][1][1][0][RTW89_WW][6] = 30,
@@ -16982,9 +16982,9 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_WW][42] = 16,
[0][1][1][0][RTW89_WW][44] = 16,
[0][1][1][0][RTW89_WW][46] = 16,
- [0][1][1][0][RTW89_WW][48] = 56,
- [0][1][1][0][RTW89_WW][50] = 56,
- [0][1][1][0][RTW89_WW][52] = 56,
+ [0][1][1][0][RTW89_WW][48] = 50,
+ [0][1][1][0][RTW89_WW][50] = 50,
+ [0][1][1][0][RTW89_WW][52] = 50,
[0][0][2][0][RTW89_WW][0] = 42,
[0][0][2][0][RTW89_WW][2] = 42,
[0][0][2][0][RTW89_WW][4] = 42,
@@ -17038,9 +17038,9 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_WW][42] = 16,
[0][1][2][0][RTW89_WW][44] = 16,
[0][1][2][0][RTW89_WW][46] = 16,
- [0][1][2][0][RTW89_WW][48] = 58,
- [0][1][2][0][RTW89_WW][50] = 58,
- [0][1][2][0][RTW89_WW][52] = 58,
+ [0][1][2][0][RTW89_WW][48] = 50,
+ [0][1][2][0][RTW89_WW][50] = 52,
+ [0][1][2][0][RTW89_WW][52] = 52,
[0][1][2][1][RTW89_WW][0] = 14,
[0][1][2][1][RTW89_WW][2] = 14,
[0][1][2][1][RTW89_WW][4] = 14,
@@ -17066,9 +17066,9 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_WW][42] = 4,
[0][1][2][1][RTW89_WW][44] = 4,
[0][1][2][1][RTW89_WW][46] = 4,
- [0][1][2][1][RTW89_WW][48] = 58,
- [0][1][2][1][RTW89_WW][50] = 58,
- [0][1][2][1][RTW89_WW][52] = 58,
+ [0][1][2][1][RTW89_WW][48] = 50,
+ [0][1][2][1][RTW89_WW][50] = 52,
+ [0][1][2][1][RTW89_WW][52] = 52,
[1][0][2][0][RTW89_WW][1] = 42,
[1][0][2][0][RTW89_WW][5] = 42,
[1][0][2][0][RTW89_WW][9] = 52,
@@ -17095,8 +17095,8 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_WW][36] = 50,
[1][1][2][0][RTW89_WW][39] = 16,
[1][1][2][0][RTW89_WW][43] = 16,
- [1][1][2][0][RTW89_WW][47] = 68,
- [1][1][2][0][RTW89_WW][51] = 66,
+ [1][1][2][0][RTW89_WW][47] = 62,
+ [1][1][2][0][RTW89_WW][51] = 62,
[1][1][2][1][RTW89_WW][1] = 16,
[1][1][2][1][RTW89_WW][5] = 16,
[1][1][2][1][RTW89_WW][9] = 28,
@@ -17109,8 +17109,8 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_WW][36] = 36,
[1][1][2][1][RTW89_WW][39] = 4,
[1][1][2][1][RTW89_WW][43] = 4,
- [1][1][2][1][RTW89_WW][47] = 68,
- [1][1][2][1][RTW89_WW][51] = 66,
+ [1][1][2][1][RTW89_WW][47] = 62,
+ [1][1][2][1][RTW89_WW][51] = 62,
[2][0][2][0][RTW89_WW][3] = 42,
[2][0][2][0][RTW89_WW][11] = 52,
[2][0][2][0][RTW89_WW][18] = 52,
@@ -17227,7 +17227,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][14] = 78,
[0][0][1][0][RTW89_CN][14] = 58,
[0][0][1][0][RTW89_QATAR][14] = 58,
- [0][0][1][0][RTW89_UK][14] = 1,
+ [0][0][1][0][RTW89_UK][14] = 58,
[0][0][1][0][RTW89_FCC][15] = 76,
[0][0][1][0][RTW89_ETSI][15] = 58,
[0][0][1][0][RTW89_MKK][15] = 76,
@@ -17435,7 +17435,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 78,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 76,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CHILE][48] = 127,
@@ -17447,7 +17447,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 78,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 76,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CHILE][50] = 127,
@@ -17459,7 +17459,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 78,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 76,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CHILE][52] = 127,
@@ -17479,7 +17479,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][0] = 50,
[0][1][1][0][RTW89_CN][0] = 46,
[0][1][1][0][RTW89_QATAR][0] = 46,
- [0][1][1][0][RTW89_UK][0] = 1,
+ [0][1][1][0][RTW89_UK][0] = 46,
[0][1][1][0][RTW89_FCC][2] = 68,
[0][1][1][0][RTW89_ETSI][2] = 46,
[0][1][1][0][RTW89_MKK][2] = 48,
@@ -17771,7 +17771,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][48] = 56,
[0][1][1][0][RTW89_ETSI][48] = 127,
[0][1][1][0][RTW89_MKK][48] = 127,
- [0][1][1][0][RTW89_IC][48] = 127,
+ [0][1][1][0][RTW89_IC][48] = 50,
[0][1][1][0][RTW89_KCC][48] = 127,
[0][1][1][0][RTW89_ACMA][48] = 127,
[0][1][1][0][RTW89_CHILE][48] = 127,
@@ -17783,7 +17783,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][50] = 56,
[0][1][1][0][RTW89_ETSI][50] = 127,
[0][1][1][0][RTW89_MKK][50] = 127,
- [0][1][1][0][RTW89_IC][50] = 127,
+ [0][1][1][0][RTW89_IC][50] = 50,
[0][1][1][0][RTW89_KCC][50] = 127,
[0][1][1][0][RTW89_ACMA][50] = 127,
[0][1][1][0][RTW89_CHILE][50] = 127,
@@ -17795,7 +17795,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][52] = 56,
[0][1][1][0][RTW89_ETSI][52] = 127,
[0][1][1][0][RTW89_MKK][52] = 127,
- [0][1][1][0][RTW89_IC][52] = 127,
+ [0][1][1][0][RTW89_IC][52] = 50,
[0][1][1][0][RTW89_KCC][52] = 127,
[0][1][1][0][RTW89_ACMA][52] = 127,
[0][1][1][0][RTW89_CHILE][52] = 127,
@@ -18107,7 +18107,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 78,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 78,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CHILE][48] = 127,
@@ -18119,7 +18119,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 78,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 78,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CHILE][50] = 127,
@@ -18131,7 +18131,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 78,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 78,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CHILE][52] = 127,
@@ -18443,7 +18443,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][48] = 58,
[0][1][2][0][RTW89_ETSI][48] = 127,
[0][1][2][0][RTW89_MKK][48] = 127,
- [0][1][2][0][RTW89_IC][48] = 127,
+ [0][1][2][0][RTW89_IC][48] = 50,
[0][1][2][0][RTW89_KCC][48] = 127,
[0][1][2][0][RTW89_ACMA][48] = 127,
[0][1][2][0][RTW89_CHILE][48] = 127,
@@ -18455,7 +18455,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][50] = 58,
[0][1][2][0][RTW89_ETSI][50] = 127,
[0][1][2][0][RTW89_MKK][50] = 127,
- [0][1][2][0][RTW89_IC][50] = 127,
+ [0][1][2][0][RTW89_IC][50] = 52,
[0][1][2][0][RTW89_KCC][50] = 127,
[0][1][2][0][RTW89_ACMA][50] = 127,
[0][1][2][0][RTW89_CHILE][50] = 127,
@@ -18467,7 +18467,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][52] = 58,
[0][1][2][0][RTW89_ETSI][52] = 127,
[0][1][2][0][RTW89_MKK][52] = 127,
- [0][1][2][0][RTW89_IC][52] = 127,
+ [0][1][2][0][RTW89_IC][52] = 52,
[0][1][2][0][RTW89_KCC][52] = 127,
[0][1][2][0][RTW89_ACMA][52] = 127,
[0][1][2][0][RTW89_CHILE][52] = 127,
@@ -18779,7 +18779,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][48] = 58,
[0][1][2][1][RTW89_ETSI][48] = 127,
[0][1][2][1][RTW89_MKK][48] = 127,
- [0][1][2][1][RTW89_IC][48] = 127,
+ [0][1][2][1][RTW89_IC][48] = 50,
[0][1][2][1][RTW89_KCC][48] = 127,
[0][1][2][1][RTW89_ACMA][48] = 127,
[0][1][2][1][RTW89_CHILE][48] = 127,
@@ -18791,7 +18791,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][50] = 58,
[0][1][2][1][RTW89_ETSI][50] = 127,
[0][1][2][1][RTW89_MKK][50] = 127,
- [0][1][2][1][RTW89_IC][50] = 127,
+ [0][1][2][1][RTW89_IC][50] = 52,
[0][1][2][1][RTW89_KCC][50] = 127,
[0][1][2][1][RTW89_ACMA][50] = 127,
[0][1][2][1][RTW89_CHILE][50] = 127,
@@ -18803,7 +18803,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][52] = 58,
[0][1][2][1][RTW89_ETSI][52] = 127,
[0][1][2][1][RTW89_MKK][52] = 127,
- [0][1][2][1][RTW89_IC][52] = 127,
+ [0][1][2][1][RTW89_IC][52] = 52,
[0][1][2][1][RTW89_KCC][52] = 127,
[0][1][2][1][RTW89_ACMA][52] = 127,
[0][1][2][1][RTW89_CHILE][52] = 127,
@@ -18959,7 +18959,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 78,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 78,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CHILE][47] = 127,
@@ -18971,7 +18971,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 70,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 78,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CHILE][51] = 127,
@@ -19127,7 +19127,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_FCC][47] = 68,
[1][1][2][0][RTW89_ETSI][47] = 127,
[1][1][2][0][RTW89_MKK][47] = 127,
- [1][1][2][0][RTW89_IC][47] = 127,
+ [1][1][2][0][RTW89_IC][47] = 62,
[1][1][2][0][RTW89_KCC][47] = 127,
[1][1][2][0][RTW89_ACMA][47] = 127,
[1][1][2][0][RTW89_CHILE][47] = 127,
@@ -19139,7 +19139,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_FCC][51] = 66,
[1][1][2][0][RTW89_ETSI][51] = 127,
[1][1][2][0][RTW89_MKK][51] = 127,
- [1][1][2][0][RTW89_IC][51] = 127,
+ [1][1][2][0][RTW89_IC][51] = 62,
[1][1][2][0][RTW89_KCC][51] = 127,
[1][1][2][0][RTW89_ACMA][51] = 127,
[1][1][2][0][RTW89_CHILE][51] = 127,
@@ -19295,7 +19295,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_FCC][47] = 68,
[1][1][2][1][RTW89_ETSI][47] = 127,
[1][1][2][1][RTW89_MKK][47] = 127,
- [1][1][2][1][RTW89_IC][47] = 127,
+ [1][1][2][1][RTW89_IC][47] = 62,
[1][1][2][1][RTW89_KCC][47] = 127,
[1][1][2][1][RTW89_ACMA][47] = 127,
[1][1][2][1][RTW89_CHILE][47] = 127,
@@ -19307,7 +19307,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_FCC][51] = 66,
[1][1][2][1][RTW89_ETSI][51] = 127,
[1][1][2][1][RTW89_MKK][51] = 127,
- [1][1][2][1][RTW89_IC][51] = 127,
+ [1][1][2][1][RTW89_IC][51] = 62,
[1][1][2][1][RTW89_KCC][51] = 127,
[1][1][2][1][RTW89_ACMA][51] = 127,
[1][1][2][1][RTW89_CHILE][51] = 127,
@@ -19391,7 +19391,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 64,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 74,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CHILE][49] = 127,
@@ -19475,7 +19475,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_FCC][49] = 58,
[2][1][2][0][RTW89_ETSI][49] = 127,
[2][1][2][0][RTW89_MKK][49] = 127,
- [2][1][2][0][RTW89_IC][49] = 127,
+ [2][1][2][0][RTW89_IC][49] = 66,
[2][1][2][0][RTW89_KCC][49] = 127,
[2][1][2][0][RTW89_ACMA][49] = 127,
[2][1][2][0][RTW89_CHILE][49] = 127,
@@ -19559,7 +19559,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_FCC][49] = 58,
[2][1][2][1][RTW89_ETSI][49] = 127,
[2][1][2][1][RTW89_MKK][49] = 127,
- [2][1][2][1][RTW89_IC][49] = 127,
+ [2][1][2][1][RTW89_IC][49] = 66,
[2][1][2][1][RTW89_KCC][49] = 127,
[2][1][2][1][RTW89_ACMA][49] = 127,
[2][1][2][1][RTW89_CHILE][49] = 127,
@@ -20723,9 +20723,9 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_WW][42] = 14,
[0][1][RTW89_WW][44] = 14,
[0][1][RTW89_WW][46] = 14,
- [0][1][RTW89_WW][48] = 20,
- [0][1][RTW89_WW][50] = 20,
- [0][1][RTW89_WW][52] = 20,
+ [0][1][RTW89_WW][48] = 16,
+ [0][1][RTW89_WW][50] = 16,
+ [0][1][RTW89_WW][52] = 16,
[1][0][RTW89_WW][0] = 34,
[1][0][RTW89_WW][2] = 34,
[1][0][RTW89_WW][4] = 34,
@@ -20779,9 +20779,9 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_WW][42] = 16,
[1][1][RTW89_WW][44] = 16,
[1][1][RTW89_WW][46] = 16,
- [1][1][RTW89_WW][48] = 32,
- [1][1][RTW89_WW][50] = 32,
- [1][1][RTW89_WW][52] = 32,
+ [1][1][RTW89_WW][48] = 28,
+ [1][1][RTW89_WW][50] = 30,
+ [1][1][RTW89_WW][52] = 30,
[2][0][RTW89_WW][0] = 44,
[2][0][RTW89_WW][2] = 44,
[2][0][RTW89_WW][4] = 44,
@@ -20835,9 +20835,9 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_WW][42] = 16,
[2][1][RTW89_WW][44] = 16,
[2][1][RTW89_WW][46] = 16,
- [2][1][RTW89_WW][48] = 44,
- [2][1][RTW89_WW][50] = 44,
- [2][1][RTW89_WW][52] = 44,
+ [2][1][RTW89_WW][48] = 40,
+ [2][1][RTW89_WW][50] = 40,
+ [2][1][RTW89_WW][52] = 40,
[0][0][RTW89_FCC][0] = 52,
[0][0][RTW89_ETSI][0] = 24,
[0][0][RTW89_MKK][0] = 26,
@@ -21141,7 +21141,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 32,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 42,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CHILE][48] = 127,
@@ -21153,7 +21153,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 32,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 42,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CHILE][50] = 127,
@@ -21165,7 +21165,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 32,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 40,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CHILE][52] = 127,
@@ -21477,7 +21477,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][48] = 20,
[0][1][RTW89_ETSI][48] = 127,
[0][1][RTW89_MKK][48] = 127,
- [0][1][RTW89_IC][48] = 127,
+ [0][1][RTW89_IC][48] = 16,
[0][1][RTW89_KCC][48] = 127,
[0][1][RTW89_ACMA][48] = 127,
[0][1][RTW89_CHILE][48] = 127,
@@ -21489,7 +21489,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][50] = 20,
[0][1][RTW89_ETSI][50] = 127,
[0][1][RTW89_MKK][50] = 127,
- [0][1][RTW89_IC][50] = 127,
+ [0][1][RTW89_IC][50] = 16,
[0][1][RTW89_KCC][50] = 127,
[0][1][RTW89_ACMA][50] = 127,
[0][1][RTW89_CHILE][50] = 127,
@@ -21501,7 +21501,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][52] = 20,
[0][1][RTW89_ETSI][52] = 127,
[0][1][RTW89_MKK][52] = 127,
- [0][1][RTW89_IC][52] = 127,
+ [0][1][RTW89_IC][52] = 16,
[0][1][RTW89_KCC][52] = 127,
[0][1][RTW89_ACMA][52] = 127,
[0][1][RTW89_CHILE][52] = 127,
@@ -21813,7 +21813,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 44,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 54,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CHILE][48] = 127,
@@ -21825,7 +21825,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 44,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 54,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CHILE][50] = 127,
@@ -21837,7 +21837,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 44,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 52,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CHILE][52] = 127,
@@ -22149,7 +22149,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][48] = 32,
[1][1][RTW89_ETSI][48] = 127,
[1][1][RTW89_MKK][48] = 127,
- [1][1][RTW89_IC][48] = 127,
+ [1][1][RTW89_IC][48] = 28,
[1][1][RTW89_KCC][48] = 127,
[1][1][RTW89_ACMA][48] = 127,
[1][1][RTW89_CHILE][48] = 127,
@@ -22161,7 +22161,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][50] = 32,
[1][1][RTW89_ETSI][50] = 127,
[1][1][RTW89_MKK][50] = 127,
- [1][1][RTW89_IC][50] = 127,
+ [1][1][RTW89_IC][50] = 30,
[1][1][RTW89_KCC][50] = 127,
[1][1][RTW89_ACMA][50] = 127,
[1][1][RTW89_CHILE][50] = 127,
@@ -22173,7 +22173,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][52] = 32,
[1][1][RTW89_ETSI][52] = 127,
[1][1][RTW89_MKK][52] = 127,
- [1][1][RTW89_IC][52] = 127,
+ [1][1][RTW89_IC][52] = 30,
[1][1][RTW89_KCC][52] = 127,
[1][1][RTW89_ACMA][52] = 127,
[1][1][RTW89_CHILE][52] = 127,
@@ -22486,7 +22486,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
[2][0][RTW89_IC][48] = 127,
- [2][0][RTW89_KCC][48] = 127,
+ [2][0][RTW89_KCC][48] = 66,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CHILE][48] = 127,
[2][0][RTW89_UKRAINE][48] = 127,
@@ -22498,7 +22498,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
[2][0][RTW89_IC][50] = 127,
- [2][0][RTW89_KCC][50] = 127,
+ [2][0][RTW89_KCC][50] = 66,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CHILE][50] = 127,
[2][0][RTW89_UKRAINE][50] = 127,
@@ -22510,7 +22510,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
[2][0][RTW89_IC][52] = 127,
- [2][0][RTW89_KCC][52] = 127,
+ [2][0][RTW89_KCC][52] = 66,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CHILE][52] = 127,
[2][0][RTW89_UKRAINE][52] = 127,
@@ -22821,7 +22821,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][48] = 44,
[2][1][RTW89_ETSI][48] = 127,
[2][1][RTW89_MKK][48] = 127,
- [2][1][RTW89_IC][48] = 127,
+ [2][1][RTW89_IC][48] = 40,
[2][1][RTW89_KCC][48] = 127,
[2][1][RTW89_ACMA][48] = 127,
[2][1][RTW89_CHILE][48] = 127,
@@ -22833,7 +22833,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][50] = 44,
[2][1][RTW89_ETSI][50] = 127,
[2][1][RTW89_MKK][50] = 127,
- [2][1][RTW89_IC][50] = 127,
+ [2][1][RTW89_IC][50] = 40,
[2][1][RTW89_KCC][50] = 127,
[2][1][RTW89_ACMA][50] = 127,
[2][1][RTW89_CHILE][50] = 127,
@@ -22845,7 +22845,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][52] = 44,
[2][1][RTW89_ETSI][52] = 127,
[2][1][RTW89_MKK][52] = 127,
- [2][1][RTW89_IC][52] = 127,
+ [2][1][RTW89_IC][52] = 40,
[2][1][RTW89_KCC][52] = 127,
[2][1][RTW89_ACMA][52] = 127,
[2][1][RTW89_CHILE][52] = 127,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index 920b20bbcfb7..ed71364e6437 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -26,6 +26,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 8618d0204f66..17e6164855fa 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -842,7 +842,7 @@ static void rtw8852c_set_gain_error(struct rtw89_dev *rtwdev,
enum rtw89_subband subband,
enum rtw89_rf_path path)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
s32 val;
u32 reg;
@@ -2365,28 +2365,55 @@ static u8 rtw8852c_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p
static void rtw8852c_btc_set_rfe(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
- module->rfe_type = rtwdev->efuse.rfe_type;
- module->cv = rtwdev->hal.cv;
- module->bt_solo = 0;
- module->switch_type = BTC_SWITCH_INTERNAL;
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
- if (module->rfe_type > 0)
- module->ant.num = (module->rfe_type % 2 ? 2 : 3);
- else
- module->ant.num = 2;
+ if (md->md_v7.rfe_type > 0)
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 2 ? 2 : 3);
+ else
+ md->md_v7.ant.num = 2;
- module->ant.diversity = 0;
- module->ant.isolation = 10;
+ md->md_v7.ant.diversity = 0;
+ md->md_v7.ant.isolation = 10;
- if (module->ant.num == 3) {
- module->ant.type = BTC_ANT_DEDICATED;
- module->bt_pos = BTC_BT_ALONE;
+ if (md->md_v7.ant.num == 3) {
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md_v7.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md_v7.ant.type;
} else {
- module->ant.type = BTC_ANT_SHARED;
- module->bt_pos = BTC_BT_BTG;
+ md->md.rfe_type = rtwdev->efuse.rfe_type;
+ md->md.cv = rtwdev->hal.cv;
+ md->md.bt_solo = 0;
+ md->md.switch_type = BTC_SWITCH_INTERNAL;
+
+ if (md->md.rfe_type > 0)
+ md->md.ant.num = (md->md.rfe_type % 2 ? 2 : 3);
+ else
+ md->md.ant.num = 2;
+
+ md->md.ant.diversity = 0;
+ md->md.ant.isolation = 10;
+
+ if (md->md.ant.num == 3) {
+ md->md.ant.type = BTC_ANT_DEDICATED;
+ md->md.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md.ant.type = BTC_ANT_SHARED;
+ md->md.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md.ant.type;
}
}
@@ -2449,7 +2476,6 @@ void rtw8852c_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
static void rtw8852c_btc_init_cfg(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_mac_ax_coex coex_params = {
.pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
@@ -2468,7 +2494,7 @@ static void rtw8852c_btc_init_cfg(struct rtw89_dev *rtwdev)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0);
/* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
- if (module->ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
rtw8852c_set_trx_mask(rtwdev,
RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
rtw8852c_set_trx_mask(rtwdev,
@@ -2813,6 +2839,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.enable_bb_rf = rtw8852c_mac_enable_bb_rf,
.disable_bb_rf = rtw8852c_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8852c_bb_reset,
.bb_sethw = rtw8852c_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
@@ -2823,7 +2850,9 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.read_phycap = rtw8852c_read_phycap,
.fem_setup = NULL,
.rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8852c_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8852c_rfk_channel,
.rfk_band_changed = rtw8852c_rfk_band_changed,
.rfk_scan = rtw8852c_rfk_scan,
@@ -2848,6 +2877,12 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx_v1,
.resume_sch_tx = rtw89_mac_resume_sch_tx_v1,
.h2c_dctl_sec_cam = rtw89_fw_h2c_dctl_sec_cam_v1,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852c_btc_set_rfe,
.btc_init_cfg = rtw8852c_btc_init_cfg,
@@ -2902,7 +2937,10 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
- .support_bw160 = true,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
@@ -2959,6 +2997,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8852c_c2h_regs,
.page_regs = &rtw8852c_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3,
.cfo_src_fd = false,
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852c_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index 4592de3dbd94..583ea673a4f5 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -35,6 +35,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.io_rcy_en = MAC_AX_PCIE_ENABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_HAXI_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN_V1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 0e7300cc6d9e..367459bd1345 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2023 Realtek Corporation
*/
+#include "coex.h"
#include "debug.h"
#include "efuse.h"
#include "fw.h"
@@ -9,12 +10,16 @@
#include "phy.h"
#include "reg.h"
#include "rtw8922a.h"
+#include "rtw8922a_rfk.h"
+#include "util.h"
#define RTW8922A_FW_FORMAT_MAX 0
#define RTW8922A_FW_BASENAME "rtw89/rtw8922a_fw"
#define RTW8922A_MODULE_FIRMWARE \
RTW8922A_FW_BASENAME ".bin"
+#define HE_N_USER_MAX_8922A 4
+
static const struct rtw89_hfc_ch_cfg rtw8922a_hfc_chcfg_pcie[] = {
{2, 1641, grp_0}, /* ACH 0 */
{2, 1641, grp_0}, /* ACH 1 */
@@ -43,6 +48,8 @@ static const struct rtw89_hfc_pub_cfg rtw8922a_hfc_pubcfg_pcie = {
static const struct rtw89_hfc_param_ini rtw8922a_hfc_param_ini_pcie[] = {
[RTW89_QTA_SCC] = {rtw8922a_hfc_chcfg_pcie, &rtw8922a_hfc_pubcfg_pcie,
&rtw89_mac_size.hfc_prec_cfg_c0, RTW89_HCIFC_POH},
+ [RTW89_QTA_DBCC] = {rtw8922a_hfc_chcfg_pcie, &rtw8922a_hfc_pubcfg_pcie,
+ &rtw89_mac_size.hfc_prec_cfg_c0, RTW89_HCIFC_POH},
[RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_prec_cfg_c2,
RTW89_HCIFC_POH},
[RTW89_QTA_INVALID] = {NULL},
@@ -54,6 +61,11 @@ static const struct rtw89_dle_mem rtw8922a_dle_mem_pcie[] = {
&rtw89_mac_size.wde_qt0_v1, &rtw89_mac_size.ple_qt0,
&rtw89_mac_size.ple_qt1, &rtw89_mac_size.ple_rsvd_qt0,
&rtw89_mac_size.rsvd0_size0, &rtw89_mac_size.rsvd1_size0},
+ [RTW89_QTA_DBCC] = {RTW89_QTA_DBCC, &rtw89_mac_size.wde_size0_v1,
+ &rtw89_mac_size.ple_size0_v1, &rtw89_mac_size.wde_qt0_v1,
+ &rtw89_mac_size.wde_qt0_v1, &rtw89_mac_size.ple_qt0,
+ &rtw89_mac_size.ple_qt1, &rtw89_mac_size.ple_rsvd_qt0,
+ &rtw89_mac_size.rsvd0_size0, &rtw89_mac_size.rsvd1_size0},
[RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size4_v1,
&rtw89_mac_size.ple_size3_v1, &rtw89_mac_size.wde_qt4,
&rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt9,
@@ -63,6 +75,31 @@ static const struct rtw89_dle_mem rtw8922a_dle_mem_pcie[] = {
NULL},
};
+static const u32 rtw8922a_h2c_regs[RTW89_H2CREG_MAX] = {
+ R_BE_H2CREG_DATA0, R_BE_H2CREG_DATA1, R_BE_H2CREG_DATA2,
+ R_BE_H2CREG_DATA3
+};
+
+static const u32 rtw8922a_c2h_regs[RTW89_H2CREG_MAX] = {
+ R_BE_C2HREG_DATA0, R_BE_C2HREG_DATA1, R_BE_C2HREG_DATA2,
+ R_BE_C2HREG_DATA3
+};
+
+static const struct rtw89_page_regs rtw8922a_page_regs = {
+ .hci_fc_ctrl = R_BE_HCI_FC_CTRL,
+ .ch_page_ctrl = R_BE_CH_PAGE_CTRL,
+ .ach_page_ctrl = R_BE_CH0_PAGE_CTRL,
+ .ach_page_info = R_BE_CH0_PAGE_INFO,
+ .pub_page_info3 = R_BE_PUB_PAGE_INFO3,
+ .pub_page_ctrl1 = R_BE_PUB_PAGE_CTRL1,
+ .pub_page_ctrl2 = R_BE_PUB_PAGE_CTRL2,
+ .pub_page_info1 = R_BE_PUB_PAGE_INFO1,
+ .pub_page_info2 = R_BE_PUB_PAGE_INFO2,
+ .wp_page_ctrl1 = R_BE_WP_PAGE_CTRL1,
+ .wp_page_ctrl2 = R_BE_WP_PAGE_CTRL2,
+ .wp_page_info1 = R_BE_WP_PAGE_INFO1,
+};
+
static const struct rtw89_reg_imr rtw8922a_imr_dmac_regs[] = {
{R_BE_DISP_HOST_IMR, B_BE_DISP_HOST_IMR_CLR, B_BE_DISP_HOST_IMR_SET},
{R_BE_DISP_CPU_IMR, B_BE_DISP_CPU_IMR_CLR, B_BE_DISP_CPU_IMR_SET},
@@ -119,6 +156,51 @@ static const struct rtw89_imr_table rtw8922a_imr_cmac_table = {
.n_regs = ARRAY_SIZE(rtw8922a_imr_cmac_regs),
};
+static const struct rtw89_rrsr_cfgs rtw8922a_rrsr_cfgs = {
+ .ref_rate = {R_BE_TRXPTCL_RESP_1, B_BE_WMAC_RESP_REF_RATE_SEL, 0},
+ .rsc = {R_BE_PTCL_RRSR1, B_BE_RSC_MASK, 2},
+};
+
+static const struct rtw89_dig_regs rtw8922a_dig_regs = {
+ .seg0_pd_reg = R_SEG0R_PD_V2,
+ .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
+ .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1,
+ .bmode_pd_reg = R_BMODE_PDTH_EN_V2,
+ .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1,
+ .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V2,
+ .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1,
+ .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK},
+ .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK},
+ .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1},
+ .p1_tia_init = {R_PATH1_TIA_INIT_V1, B_PATH1_TIA_INIT_IDX_MSK_V1},
+ .p0_rxb_init = {R_PATH0_RXB_INIT_V1, B_PATH0_RXB_INIT_IDX_MSK_V1},
+ .p1_rxb_init = {R_PATH1_RXB_INIT_V1, B_PATH1_RXB_INIT_IDX_MSK_V1},
+ .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
+static const struct rtw89_edcca_regs rtw8922a_edcca_regs = {
+ .edcca_level = R_SEG0R_EDCCA_LVL_BE,
+ .edcca_mask = B_EDCCA_LVL_MSK0,
+ .edcca_p_mask = B_EDCCA_LVL_MSK1,
+ .ppdu_level = R_SEG0R_PPDU_LVL_BE,
+ .ppdu_mask = B_EDCCA_LVL_MSK1,
+ .rpt_a = R_EDCCA_RPT_A_BE,
+ .rpt_b = R_EDCCA_RPT_B_BE,
+ .rpt_sel = R_EDCCA_RPT_SEL_BE,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .rpt_sel_be = R_EDCCA_RPTREG_SEL_BE,
+ .rpt_sel_be_mask = B_EDCCA_RPTREG_SEL_BE_MSK,
+ .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST_BE,
+ .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_BE_M,
+};
+
static const struct rtw89_efuse_block_cfg rtw8922a_efuse_blocks[] = {
[RTW89_EFUSE_BLOCK_SYS] = {.offset = 0x00000, .size = 0x310},
[RTW89_EFUSE_BLOCK_RF] = {.offset = 0x10000, .size = 0x240},
@@ -130,6 +212,36 @@ static const struct rtw89_efuse_block_cfg rtw8922a_efuse_blocks[] = {
[RTW89_EFUSE_BLOCK_ADIE] = {.offset = 0x70000, .size = 0x10},
};
+static void rtw8922a_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_SHARE_A, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BTG_PATH_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_SHARE_B, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BTG_PATH_B, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x20, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA0_B, 0x30, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_BT_SHARE, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_BT_SG0, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN,
+ 0x1, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_SHARE_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BTG_PATH_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_SHARE_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BTG_PATH_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x1a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA0_B, 0x2a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xc, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_BT_SHARE, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_BT_SG0, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN,
+ 0x0, phy_idx);
+ }
+}
+
static int rtw8922a_pwr_on_func(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
@@ -273,6 +385,9 @@ static int rtw8922a_pwr_on_func(struct rtw89_dev *rtwdev)
rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, B_BE_FEN_BB_IP_RSTN |
B_BE_FEN_BBPLAT_RSTB);
+ if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags))
+ rtw89_efuse_read_fw_secure_be(rtwdev);
+
return 0;
}
@@ -574,6 +689,32 @@ static void rtw8922a_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
}
}
+static void rtw8922a_pa_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pabias_2g, pabias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
+ pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pabias_2g, pabias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG_V1, pabias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA_V1, pabias_5g);
+ }
+}
+
static void rtw8922a_phycap_parsing_pad_bias_trim(struct rtw89_dev *rtwdev,
u8 *phycap_map)
{
@@ -591,6 +732,31 @@ static void rtw8922a_phycap_parsing_pad_bias_trim(struct rtw89_dev *rtwdev,
}
}
+static void rtw8922a_pad_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pad_bias_2g, pad_bias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PAD_BIAS][TRIM] no PG, do nothing\n");
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ pad_bias_2g = u8_get_bits(info->pad_bias_trim[i], GENMASK(3, 0));
+ pad_bias_5g = u8_get_bits(info->pad_bias_trim[i], GENMASK(7, 4));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PAD_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pad_bias_2g, pad_bias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASD_TXG_V1, pad_bias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASD_TXA_V1, pad_bias_5g);
+ }
+}
+
static int rtw8922a_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
{
rtw8922a_phycap_parsing_thermal_trim(rtwdev, phycap_map);
@@ -600,6 +766,1547 @@ static int rtw8922a_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
return 0;
}
+static void rtw8922a_power_trim(struct rtw89_dev *rtwdev)
+{
+ rtw8922a_pa_bias_trim(rtwdev);
+ rtw8922a_pad_bias_trim(rtwdev);
+}
+
+static void rtw8922a_set_channel_mac(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ u8 mac_idx)
+{
+ u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_BE_TX_SUB_BAND_VALUE, mac_idx);
+ u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_BE_TXRATE_CHK, mac_idx);
+ u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMAC_RFMOD, mac_idx);
+ u8 txsb20 = 0, txsb40 = 0, txsb80 = 0;
+ u8 rf_mod_val, chk_rate_mask;
+ u32 txsb;
+ u32 reg;
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_160:
+ txsb80 = rtw89_phy_get_txsb(rtwdev, chan, RTW89_CHANNEL_WIDTH_80);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_80:
+ txsb40 = rtw89_phy_get_txsb(rtwdev, chan, RTW89_CHANNEL_WIDTH_40);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_40:
+ txsb20 = rtw89_phy_get_txsb(rtwdev, chan, RTW89_CHANNEL_WIDTH_20);
+ break;
+ default:
+ break;
+ }
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_160:
+ rf_mod_val = BE_WMAC_RFMOD_160M;
+ txsb = u32_encode_bits(txsb20, B_BE_TXSB_20M_MASK) |
+ u32_encode_bits(txsb40, B_BE_TXSB_40M_MASK) |
+ u32_encode_bits(txsb80, B_BE_TXSB_80M_MASK);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_mod_val = BE_WMAC_RFMOD_80M;
+ txsb = u32_encode_bits(txsb20, B_BE_TXSB_20M_MASK) |
+ u32_encode_bits(txsb40, B_BE_TXSB_40M_MASK);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_mod_val = BE_WMAC_RFMOD_40M;
+ txsb = u32_encode_bits(txsb20, B_BE_TXSB_20M_MASK);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ default:
+ rf_mod_val = BE_WMAC_RFMOD_20M;
+ txsb = 0;
+ break;
+ }
+
+ if (txsb20 <= BE_PRI20_BITMAP_MAX)
+ txsb |= u32_encode_bits(BIT(txsb20), B_BE_PRI20_BITMAP_MASK);
+
+ rtw89_write8_mask(rtwdev, rf_mod, B_BE_WMAC_RFMOD_MASK, rf_mod_val);
+ rtw89_write32(rtwdev, sub_carr, txsb);
+
+ switch (chan->band_type) {
+ case RTW89_BAND_2G:
+ chk_rate_mask = B_BE_BAND_MODE;
+ break;
+ case RTW89_BAND_5G:
+ case RTW89_BAND_6G:
+ chk_rate_mask = B_BE_CHECK_CCK_EN | B_BE_RTS_LIMIT_IN_OFDM6;
+ break;
+ default:
+ rtw89_warn(rtwdev, "Invalid band_type:%d\n", chan->band_type);
+ return;
+ }
+
+ rtw89_write8_clr(rtwdev, chk_rate, B_BE_BAND_MODE | B_BE_CHECK_CCK_EN |
+ B_BE_RTS_LIMIT_IN_OFDM6);
+ rtw89_write8_set(rtwdev, chk_rate, chk_rate_mask);
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_320:
+ case RTW89_CHANNEL_WIDTH_160:
+ case RTW89_CHANNEL_WIDTH_80:
+ case RTW89_CHANNEL_WIDTH_40:
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PREBKF_CFG_1, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_T1_MASK, 0x41);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_MUEDCA_EN, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_TB_T1_MASK, 0x41);
+ break;
+ default:
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PREBKF_CFG_1, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_T1_MASK, 0x3f);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_MUEDCA_EN, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_TB_T1_MASK, 0x3e);
+ break;
+ }
+}
+
+static const u32 rtw8922a_sco_barker_threshold[14] = {
+ 0x1fe4f, 0x1ff5e, 0x2006c, 0x2017b, 0x2028a, 0x20399, 0x204a8, 0x205b6,
+ 0x206c5, 0x207d4, 0x208e3, 0x209f2, 0x20b00, 0x20d8a
+};
+
+static const u32 rtw8922a_sco_cck_threshold[14] = {
+ 0x2bdac, 0x2bf21, 0x2c095, 0x2c209, 0x2c37e, 0x2c4f2, 0x2c666, 0x2c7db,
+ 0x2c94f, 0x2cac3, 0x2cc38, 0x2cdac, 0x2cf21, 0x2d29e
+};
+
+static int rtw8922a_ctrl_sco_cck(struct rtw89_dev *rtwdev,
+ u8 primary_ch, enum rtw89_bandwidth bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 ch_element;
+
+ if (primary_ch >= 14)
+ return -EINVAL;
+
+ ch_element = primary_ch - 1;
+
+ rtw89_phy_write32_idx(rtwdev, R_BK_FC0INV, B_BK_FC0INV,
+ rtw8922a_sco_barker_threshold[ch_element],
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CCK_FC0INV, B_CCK_FC0INV,
+ rtw8922a_sco_cck_threshold[ch_element],
+ phy_idx);
+
+ return 0;
+}
+
+struct rtw8922a_bb_gain {
+ u32 gain_g[BB_PATH_NUM_8922A];
+ u32 gain_a[BB_PATH_NUM_8922A];
+ u32 gain_g_mask;
+ u32 gain_a_mask;
+};
+
+static const struct rtw89_reg_def rpl_comp_bw160[RTW89_BW20_SC_160M] = {
+ { .addr = 0x41E8, .mask = 0xFF00},
+ { .addr = 0x41E8, .mask = 0xFF0000},
+ { .addr = 0x41E8, .mask = 0xFF000000},
+ { .addr = 0x41EC, .mask = 0xFF},
+ { .addr = 0x41EC, .mask = 0xFF00},
+ { .addr = 0x41EC, .mask = 0xFF0000},
+ { .addr = 0x41EC, .mask = 0xFF000000},
+ { .addr = 0x41F0, .mask = 0xFF}
+};
+
+static const struct rtw89_reg_def rpl_comp_bw80[RTW89_BW20_SC_80M] = {
+ { .addr = 0x41F4, .mask = 0xFF},
+ { .addr = 0x41F4, .mask = 0xFF00},
+ { .addr = 0x41F4, .mask = 0xFF0000},
+ { .addr = 0x41F4, .mask = 0xFF000000}
+};
+
+static const struct rtw89_reg_def rpl_comp_bw40[RTW89_BW20_SC_40M] = {
+ { .addr = 0x41F0, .mask = 0xFF0000},
+ { .addr = 0x41F0, .mask = 0xFF000000}
+};
+
+static const struct rtw89_reg_def rpl_comp_bw20[RTW89_BW20_SC_20M] = {
+ { .addr = 0x41F0, .mask = 0xFF00}
+};
+
+static const struct rtw8922a_bb_gain bb_gain_lna[LNA_GAIN_NUM] = {
+ { .gain_g = {0x409c, 0x449c}, .gain_a = {0x406C, 0x446C},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+ { .gain_g = {0x409c, 0x449c}, .gain_a = {0x406C, 0x446C},
+ .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF0000},
+ { .gain_g = {0x40a0, 0x44a0}, .gain_a = {0x4070, 0x4470},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+ { .gain_g = {0x40a0, 0x44a0}, .gain_a = {0x4070, 0x4470},
+ .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF0000},
+ { .gain_g = {0x40a4, 0x44a4}, .gain_a = {0x4074, 0x4474},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+ { .gain_g = {0x40a4, 0x44a4}, .gain_a = {0x4074, 0x4474},
+ .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF0000},
+ { .gain_g = {0x40a8, 0x44a8}, .gain_a = {0x4078, 0x4478},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+};
+
+static const struct rtw8922a_bb_gain bb_gain_tia[TIA_GAIN_NUM] = {
+ { .gain_g = {0x4054, 0x4454}, .gain_a = {0x4054, 0x4454},
+ .gain_g_mask = 0x7FC0000, .gain_a_mask = 0x1FF},
+ { .gain_g = {0x4058, 0x4458}, .gain_a = {0x4054, 0x4454},
+ .gain_g_mask = 0x1FF, .gain_a_mask = 0x3FE00 },
+};
+
+struct rtw8922a_bb_gain_bypass {
+ u32 gain_g[BB_PATH_NUM_8922A];
+ u32 gain_a[BB_PATH_NUM_8922A];
+ u32 gain_mask_g;
+ u32 gain_mask_a;
+};
+
+static void rtw8922a_set_rpl_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
+ u32 reg_path_ofst = 0;
+ u32 mask;
+ s32 val;
+ u32 reg;
+ int i;
+
+ if (path == RF_PATH_B)
+ reg_path_ofst = 0x400;
+
+ for (i = 0; i < RTW89_BW20_SC_160M; i++) {
+ reg = rpl_comp_bw160[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw160[i].mask;
+ val = gain->rpl_ofst_160[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < RTW89_BW20_SC_80M; i++) {
+ reg = rpl_comp_bw80[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw80[i].mask;
+ val = gain->rpl_ofst_80[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < RTW89_BW20_SC_40M; i++) {
+ reg = rpl_comp_bw40[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw40[i].mask;
+ val = gain->rpl_ofst_40[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < RTW89_BW20_SC_20M; i++) {
+ reg = rpl_comp_bw20[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw20[i].mask;
+ val = gain->rpl_ofst_20[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+}
+
+static void rtw8922a_set_lna_tia_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
+ enum rtw89_phy_bb_bw_be bw_type;
+ s32 val;
+ u32 reg;
+ u32 mask;
+ int i;
+
+ bw_type = chan->band_width <= RTW89_CHANNEL_WIDTH_40 ?
+ RTW89_BB_BW_20_40 : RTW89_BB_BW_80_160_320;
+
+ for (i = 0; i < LNA_GAIN_NUM; i++) {
+ if (chan->band_type == RTW89_BAND_2G) {
+ reg = bb_gain_lna[i].gain_g[path];
+ mask = bb_gain_lna[i].gain_g_mask;
+ } else {
+ reg = bb_gain_lna[i].gain_a[path];
+ mask = bb_gain_lna[i].gain_a_mask;
+ }
+ val = gain->lna_gain[gain_band][bw_type][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < TIA_GAIN_NUM; i++) {
+ if (chan->band_type == RTW89_BAND_2G) {
+ reg = bb_gain_tia[i].gain_g[path];
+ mask = bb_gain_tia[i].gain_g_mask;
+ } else {
+ reg = bb_gain_tia[i].gain_a[path];
+ mask = bb_gain_tia[i].gain_a_mask;
+ }
+ val = gain->tia_gain[gain_band][bw_type][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+}
+
+static void rtw8922a_set_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_lna_tia_gain(rtwdev, chan, path, phy_idx);
+ rtw8922a_set_rpl_gain(rtwdev, chan, path, phy_idx);
+}
+
+static void rtw8922a_set_rx_gain_normal_cck(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ s8 value = -gain->offset[path][RTW89_GAIN_OFFSET_2G_CCK]; /* S(8,2) */
+ u8 fraction = value & 0x3;
+
+ if (fraction) {
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW20,
+ (0x4 - fraction) << 1);
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW40,
+ (0x4 - fraction) << 1);
+
+ value >>= 2;
+ rtw89_phy_write32_mask(rtwdev, R_CCK_RPL_OFST, B_CCK_RPL_OFST,
+ value + 1 + 0xdc);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW20, 0);
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW40, 0);
+
+ value >>= 2;
+ rtw89_phy_write32_mask(rtwdev, R_CCK_RPL_OFST, B_CCK_RPL_OFST,
+ value + 0xdc);
+ }
+}
+
+static void rtw8922a_set_rx_gain_normal_ofdm(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ static const u32 rssi_tb_bias_comp[2] = {0x41f8, 0x45f8};
+ static const u32 rssi_tb_ext_comp[2] = {0x4208, 0x4608};
+ static const u32 rssi_ofst_addr[2] = {0x40c8, 0x44c8};
+ static const u32 rpl_bias_comp[2] = {0x41e8, 0x45e8};
+ static const u32 rpl_ext_comp[2] = {0x41f8, 0x45f8};
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ enum rtw89_gain_offset gain_band;
+ s8 v1, v2, v3;
+ s32 value;
+
+ gain_band = rtw89_subband_to_gain_offset_band_of_ofdm(chan->subband_type);
+ value = gain->offset[path][gain_band];
+ rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[path], 0xff000000, value + 0xF8);
+
+ value *= -4;
+ v1 = clamp_t(s32, value, S8_MIN, S8_MAX);
+ value -= v1;
+ v2 = clamp_t(s32, value, S8_MIN, S8_MAX);
+ value -= v2;
+ v3 = clamp_t(s32, value, S8_MIN, S8_MAX);
+
+ rtw89_phy_write32_mask(rtwdev, rpl_bias_comp[path], 0xff, v1);
+ rtw89_phy_write32_mask(rtwdev, rpl_ext_comp[path], 0xff, v2);
+ rtw89_phy_write32_mask(rtwdev, rpl_ext_comp[path], 0xff00, v3);
+
+ rtw89_phy_write32_mask(rtwdev, rssi_tb_bias_comp[path], 0xff0000, v1);
+ rtw89_phy_write32_mask(rtwdev, rssi_tb_ext_comp[path], 0xff0000, v2);
+ rtw89_phy_write32_mask(rtwdev, rssi_tb_ext_comp[path], 0xff000000, v3);
+}
+
+static void rtw8922a_set_rx_gain_normal(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+
+ if (!gain->offset_valid)
+ return;
+
+ if (chan->band_type == RTW89_BAND_2G)
+ rtw8922a_set_rx_gain_normal_cck(rtwdev, chan, path);
+
+ rtw8922a_set_rx_gain_normal_ofdm(rtwdev, chan, path);
+}
+
+static void rtw8922a_set_cck_parameters(struct rtw89_dev *rtwdev, u8 central_ch,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (central_ch == 14) {
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF01, B_PCOEFF01, 0x3b13ff, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF23, B_PCOEFF23, 0x1c42de, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF45, B_PCOEFF45, 0xfdb0ad, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF67, B_PCOEFF67, 0xf60f6e, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF89, B_PCOEFF89, 0xfd8f92, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFAB, B_PCOEFFAB, 0x02d011, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFCD, B_PCOEFFCD, 0x01c02c, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFEF, B_PCOEFFEF, 0xfff00a, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF01, B_PCOEFF01, 0x3a63ca, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF23, B_PCOEFF23, 0x2a833f, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF45, B_PCOEFF45, 0x1491f8, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF67, B_PCOEFF67, 0x03c0b0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF89, B_PCOEFF89, 0xfccff1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFAB, B_PCOEFFAB, 0xfccfc3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFCD, B_PCOEFFCD, 0xfebfdc, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFEF, B_PCOEFFEF, 0xffdff7, phy_idx);
+ }
+}
+
+static void rtw8922a_ctrl_ch(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u32 band_sel[2] = {0x4160, 0x4560};
+ u16 central_freq = chan->freq;
+ u8 central_ch = chan->channel;
+ u8 band = chan->band_type;
+ bool is_2g = band == RTW89_BAND_2G;
+ u8 chan_idx;
+ u8 path;
+ u8 sco;
+
+ if (!central_freq) {
+ rtw89_warn(rtwdev, "Invalid central_freq\n");
+ return;
+ }
+
+ rtw8922a_set_gain(rtwdev, chan, RF_PATH_A, phy_idx);
+ rtw8922a_set_gain(rtwdev, chan, RF_PATH_B, phy_idx);
+
+ for (path = RF_PATH_A; path < BB_PATH_NUM_8922A; path++)
+ rtw89_phy_write32_idx(rtwdev, band_sel[path], BIT((26)), is_2g, phy_idx);
+
+ rtw8922a_set_rx_gain_normal(rtwdev, chan, RF_PATH_A);
+ rtw8922a_set_rx_gain_normal(rtwdev, chan, RF_PATH_B);
+
+ rtw89_phy_write32_idx(rtwdev, R_FC0, B_FC0, central_freq, phy_idx);
+ sco = DIV_ROUND_CLOSEST(1 << 18, central_freq);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_FC0_INV, sco, phy_idx);
+
+ if (band == RTW89_BAND_2G)
+ rtw8922a_set_cck_parameters(rtwdev, central_ch, phy_idx);
+
+ chan_idx = rtw89_encode_chan_idx(rtwdev, chan->primary_channel, band);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx, phy_idx);
+}
+
+static void
+rtw8922a_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_sb, u8 bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, pri_sb, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, pri_sb, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x1, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, pri_sb, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x1, phy_idx);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri_sb:%d)\n", bw,
+ pri_sb);
+ break;
+ }
+
+ if (bw == RTW89_CHANNEL_WIDTH_40)
+ rtw89_phy_write32_idx(rtwdev, R_FC0, B_BW40_2XFFT, 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_FC0, B_BW40_2XFFT, 0, phy_idx);
+}
+
+static u32 rtw8922a_spur_freq(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ return 0;
+}
+
+#define CARRIER_SPACING_312_5 312500 /* 312.5 kHz */
+#define CARRIER_SPACING_78_125 78125 /* 78.125 kHz */
+#define MAX_TONE_NUM 2048
+
+static void rtw8922a_set_csi_tone_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s32 freq_diff, csi_idx, csi_tone_idx;
+ u32 spur_freq;
+
+ spur_freq = rtw8922a_spur_freq(rtwdev, chan);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_idx(rtwdev, R_S0S1_CSI_WGT, B_S0S1_CSI_WGT_EN,
+ 0, phy_idx);
+ return;
+ }
+
+ freq_diff = (spur_freq - chan->freq) * 1000000;
+ csi_idx = s32_div_u32_round_closest(freq_diff, CARRIER_SPACING_78_125);
+ s32_div_u32_round_down(csi_idx, MAX_TONE_NUM, &csi_tone_idx);
+
+ rtw89_phy_write32_idx(rtwdev, R_S0S1_CSI_WGT, B_S0S1_CSI_WGT_TONE_IDX,
+ csi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S0S1_CSI_WGT, B_S0S1_CSI_WGT_EN, 1, phy_idx);
+}
+
+static const struct rtw89_nbi_reg_def rtw8922a_nbi_reg_def[] = {
+ [RF_PATH_A] = {
+ .notch1_idx = {0x41a0, 0xFF},
+ .notch1_frac_idx = {0x41a0, 0xC00},
+ .notch1_en = {0x41a0, 0x1000},
+ .notch2_idx = {0x41ac, 0xFF},
+ .notch2_frac_idx = {0x41ac, 0xC00},
+ .notch2_en = {0x41ac, 0x1000},
+ },
+ [RF_PATH_B] = {
+ .notch1_idx = {0x45a0, 0xFF},
+ .notch1_frac_idx = {0x45a0, 0xC00},
+ .notch1_en = {0x45a0, 0x1000},
+ .notch2_idx = {0x45ac, 0xFF},
+ .notch2_frac_idx = {0x45ac, 0xC00},
+ .notch2_en = {0x45ac, 0x1000},
+ },
+};
+
+static void rtw8922a_set_nbi_tone_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_nbi_reg_def *nbi = &rtw8922a_nbi_reg_def[path];
+ s32 nbi_frac_idx, nbi_frac_tone_idx;
+ s32 nbi_idx, nbi_tone_idx;
+ bool notch2_chk = false;
+ u32 spur_freq, fc;
+ s32 freq_diff;
+
+ spur_freq = rtw8922a_spur_freq(rtwdev, chan);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 0, phy_idx);
+ return;
+ }
+
+ fc = chan->freq;
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160) {
+ fc = (spur_freq > fc) ? fc + 40 : fc - 40;
+ if ((fc > spur_freq &&
+ chan->channel < chan->primary_channel) ||
+ (fc < spur_freq &&
+ chan->channel > chan->primary_channel))
+ notch2_chk = true;
+ }
+
+ freq_diff = (spur_freq - fc) * 1000000;
+ nbi_idx = s32_div_u32_round_down(freq_diff, CARRIER_SPACING_312_5,
+ &nbi_frac_idx);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_20) {
+ s32_div_u32_round_down(nbi_idx + 32, 64, &nbi_tone_idx);
+ } else {
+ u16 tone_para = (chan->band_width == RTW89_CHANNEL_WIDTH_40) ?
+ 128 : 256;
+
+ s32_div_u32_round_down(nbi_idx, tone_para, &nbi_tone_idx);
+ }
+ nbi_frac_tone_idx =
+ s32_div_u32_round_closest(nbi_frac_idx, CARRIER_SPACING_78_125);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 && notch2_chk) {
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_idx.addr,
+ nbi->notch2_idx.mask, nbi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_frac_idx.addr,
+ nbi->notch2_frac_idx.mask, nbi_frac_tone_idx,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_idx.addr,
+ nbi->notch1_idx.mask, nbi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_frac_idx.addr,
+ nbi->notch1_frac_idx.mask, nbi_frac_tone_idx,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 0, phy_idx);
+ }
+}
+
+static void rtw8922a_spur_elimination(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_csi_tone_idx(rtwdev, chan, phy_idx);
+ rtw8922a_set_nbi_tone_idx(rtwdev, chan, RF_PATH_A, phy_idx);
+ rtw8922a_set_nbi_tone_idx(rtwdev, chan, RF_PATH_B, phy_idx);
+}
+
+static void rtw8922a_ctrl_afe_dac(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
+ enum rtw89_rf_path path)
+{
+ u32 cr_ofst = 0x0;
+
+ if (path == RF_PATH_B)
+ cr_ofst = 0x100;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ case RTW89_CHANNEL_WIDTH_40:
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC0 + cr_ofst, B_AFEDAC0, 0xE);
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC1 + cr_ofst, B_AFEDAC1, 0x7);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC0 + cr_ofst, B_AFEDAC0, 0xD);
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC1 + cr_ofst, B_AFEDAC1, 0x6);
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct rtw89_reg2_def bb_mcu0_init_reg[] = {
+ {0x6990, 0x00000000},
+ {0x6994, 0x00000000},
+ {0x6998, 0x00000000},
+ {0x6820, 0xFFFFFFFE},
+ {0x6800, 0xC0000FFE},
+ {0x6808, 0x76543210},
+ {0x6814, 0xBFBFB000},
+ {0x6818, 0x0478C009},
+ {0x6800, 0xC0000FFF},
+ {0x6820, 0xFFFFFFFF},
+};
+
+static const struct rtw89_reg2_def bb_mcu1_init_reg[] = {
+ {0x6990, 0x00000000},
+ {0x6994, 0x00000000},
+ {0x6998, 0x00000000},
+ {0x6820, 0xFFFFFFFE},
+ {0x6800, 0xC0000FFE},
+ {0x6808, 0x76543210},
+ {0x6814, 0xBFBFB000},
+ {0x6818, 0x0478C009},
+ {0x6800, 0xC0000FFF},
+ {0x6820, 0xFFFFFFFF},
+};
+
+static void rtw8922a_bbmcu_cr_init(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_reg2_def *reg;
+ int size;
+ int i;
+
+ if (phy_idx == RTW89_PHY_0) {
+ reg = bb_mcu0_init_reg;
+ size = ARRAY_SIZE(bb_mcu0_init_reg);
+ } else {
+ reg = bb_mcu1_init_reg;
+ size = ARRAY_SIZE(bb_mcu1_init_reg);
+ }
+
+ for (i = 0; i < size; i++, reg++)
+ rtw89_bbmcu_write32(rtwdev, reg->addr, reg->data, phy_idx);
+}
+
+static const u32 dmac_sys_mask[2] = {B_BE_DMAC_BB_PHY0_MASK, B_BE_DMAC_BB_PHY1_MASK};
+static const u32 bbrst_mask[2] = {B_BE_FEN_BBPLAT_RSTB, B_BE_FEN_BB1PLAT_RSTB};
+static const u32 glbrst_mask[2] = {B_BE_FEN_BB_IP_RSTN, B_BE_FEN_BB1_IP_RSTN};
+static const u32 mcu_bootrdy_mask[2] = {B_BE_BOOT_RDY0, B_BE_BOOT_RDY1};
+
+static void rtw8922a_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u32 rdy = 0;
+
+ if (phy_idx == RTW89_PHY_1)
+ rdy = 1;
+
+ rtw89_write32_mask(rtwdev, R_BE_DMAC_SYS_CR32B, dmac_sys_mask[phy_idx], 0x7FF9);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, glbrst_mask[phy_idx], 0x0);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, bbrst_mask[phy_idx], 0x0);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, glbrst_mask[phy_idx], 0x1);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, mcu_bootrdy_mask[phy_idx], rdy);
+ rtw89_write32_mask(rtwdev, R_BE_MEM_PWR_CTRL, B_BE_MEM_BBMCU0_DS_V1, 0);
+
+ fsleep(1);
+ rtw8922a_bbmcu_cr_init(rtwdev, phy_idx);
+}
+
+static void rtw8922a_bb_postinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ if (phy_idx == RTW89_PHY_0)
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, mcu_bootrdy_mask[phy_idx]);
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, bbrst_mask[phy_idx]);
+
+ rtw89_phy_write32_set(rtwdev, R_BBCLK, B_CLK_640M);
+ rtw89_phy_write32_clr(rtwdev, R_TXSCALE, B_TXFCTR_EN);
+ rtw89_phy_set_phy_regs(rtwdev, R_TXFCTR, B_TXFCTR_THD, 0x200);
+ rtw89_phy_set_phy_regs(rtwdev, R_SLOPE, B_EHT_RATE_TH, 0xA);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE, B_HE_RATE_TH, 0xA);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE2, B_HT_VHT_TH, 0xAAA);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE, B_EHT_MCS14, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE2, B_EHT_MCS15, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_EHTTB_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_HEERSU_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_HEMU_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_TB_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SU_PUNC, B_SU_PUNC_EN, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE5, B_HWGEN_EN, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE5, B_PWROFST_COMP, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_MAG_AB, B_BY_SLOPE, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_MAG_A, B_MGA_AEND, 0xe0);
+ rtw89_phy_set_phy_regs(rtwdev, R_MAG_AB, B_MAG_AB, 0xe0c000);
+ rtw89_phy_set_phy_regs(rtwdev, R_SLOPE, B_SLOPE_A, 0x3FE0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SLOPE, B_SLOPE_B, 0x3FE0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SC_CORNER, B_SC_CORNER, 0x200);
+ rtw89_phy_write32_idx(rtwdev, R_UDP_COEEF, B_UDP_COEEF, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_UDP_COEEF, B_UDP_COEEF, 0x1, phy_idx);
+}
+
+static void rtw8922a_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band,
+ bool en, enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ if (band == RTW89_BAND_2G)
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1,
+ B_RXCCA_BE1_DIS, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1, B_RXCCA_BE1_DIS, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1, phy_idx);
+ fsleep(1);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx);
+ }
+}
+
+static int rtw8922a_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path tx_path,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_reg2_def path_com_cr[] = {
+ {0x11A00, 0x21C86900},
+ {0x11A04, 0x00E4E433},
+ {0x11A08, 0x39390CC9},
+ {0x11A0C, 0x4E433240},
+ {0x11A10, 0x90CC900E},
+ {0x11A14, 0x00240393},
+ {0x11A18, 0x201C8600},
+ };
+ int ret = 0;
+ u32 reg;
+ int i;
+
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL, 0x0, phy_idx);
+
+ if (phy_idx == RTW89_PHY_1 && !rtwdev->dbcc_en)
+ return 0;
+
+ if (tx_path == RF_PATH_A) {
+ path_com_cr[0].data = 0x21C82900;
+ path_com_cr[1].data = 0x00E4E431;
+ path_com_cr[2].data = 0x39390C49;
+ path_com_cr[3].data = 0x4E431240;
+ path_com_cr[4].data = 0x90C4900E;
+ path_com_cr[6].data = 0x201C8200;
+ } else if (tx_path == RF_PATH_B) {
+ path_com_cr[0].data = 0x21C04900;
+ path_com_cr[1].data = 0x00E4E032;
+ path_com_cr[2].data = 0x39380C89;
+ path_com_cr[3].data = 0x4E032240;
+ path_com_cr[4].data = 0x80C8900E;
+ path_com_cr[6].data = 0x201C0400;
+ } else if (tx_path == RF_PATH_AB) {
+ path_com_cr[0].data = 0x21C86900;
+ path_com_cr[1].data = 0x00E4E433;
+ path_com_cr[2].data = 0x39390CC9;
+ path_com_cr[3].data = 0x4E433240;
+ path_com_cr[4].data = 0x90CC900E;
+ path_com_cr[6].data = 0x201C8600;
+ } else {
+ ret = -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(path_com_cr); i++) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, path_com_cr[i].addr, phy_idx);
+ rtw89_write32(rtwdev, reg, path_com_cr[i].data);
+ }
+
+ return ret;
+}
+
+static void rtw8922a_bb_reset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+}
+
+static int rtw8922a_cfg_rx_nss_limit(struct rtw89_dev *rtwdev, u8 rx_nss,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (rx_nss == 1) {
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_HTMCS_LMT, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_VHTMCS_LMT, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_N_USR_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_NSS_MAX, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_TB_NSS_MAX, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_EHT, B_RXEHT_NSS_MAX, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHTTB_NSS_MAX, 0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHT_N_USER_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ } else if (rx_nss == 2) {
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_HTMCS_LMT, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_VHTMCS_LMT, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_N_USR_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_NSS_MAX, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_TB_NSS_MAX, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_EHT, B_RXEHT_NSS_MAX, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHTTB_NSS_MAX, 1,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHT_N_USER_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rtw8922a_tssi_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x1);
+ }
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x1);
+ }
+}
+
+static int rtw8922a_ctrl_rx_path_tmac(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rx_path,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 rx_nss = (rx_path == RF_PATH_AB) ? 2 : 1;
+
+ /* Set to 0 first to avoid abnormal EDCCA report */
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x0, phy_idx);
+
+ if (rx_path == RF_PATH_A) {
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_1RCCA, 1, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, rx_path, phy_idx);
+ } else if (rx_path == RF_PATH_B) {
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_1RCCA, 2, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, rx_path, phy_idx);
+ } else if (rx_path == RF_PATH_AB) {
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_1RCCA, 3, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, rx_path, phy_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtw8922a_ctrl_mlo(struct rtw89_dev *rtwdev, enum rtw89_mlo_dbcc_mode mode)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
+ if (mode == MLO_1_PLUS_1_1RF || mode == DBCC_LEGACY) {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DBCC_FA, B_DBCC_FA, 0x0);
+ } else if (mode == MLO_2_PLUS_0_1RF || mode == MLO_0_PLUS_2_1RF ||
+ mode == MLO_DBCC_NOT_SUPPORT) {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DBCC_FA, B_DBCC_FA, 0x1);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (mode == MLO_2_PLUS_0_1RF) {
+ rtw8922a_ctrl_afe_dac(rtwdev, chan->band_width, RF_PATH_A);
+ rtw8922a_ctrl_afe_dac(rtwdev, chan->band_width, RF_PATH_B);
+ } else {
+ rtw89_warn(rtwdev, "unsupported MLO mode %d\n", mode);
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x6180);
+
+ if (mode == MLO_2_PLUS_0_1RF) {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xABA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEBA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEAA9);
+ } else if (mode == MLO_0_PLUS_2_1RF) {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xAFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEEFF);
+ } else if ((mode == MLO_1_PLUS_1_1RF) || (mode == DBCC_LEGACY)) {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x7BAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x3BAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x3AAB);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x180);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x0);
+ }
+
+ return 0;
+}
+
+static void rtw8922a_bb_sethw(struct rtw89_dev *rtwdev)
+{
+ u32 reg;
+
+ rtw89_phy_write32_clr(rtwdev, R_EN_SND_WO_NDP, B_EN_SND_WO_NDP);
+ rtw89_phy_write32_clr(rtwdev, R_EN_SND_WO_NDP_C1, B_EN_SND_WO_NDP);
+
+ rtw89_write32_mask(rtwdev, R_BE_PWR_BOOST, B_BE_PWR_CTRL_SEL, 0);
+ if (rtwdev->dbcc_en) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_BOOST, RTW89_MAC_1);
+ rtw89_write32_mask(rtwdev, reg, B_BE_PWR_CTRL_SEL, 0);
+ }
+
+ rtw8922a_ctrl_mlo(rtwdev, rtwdev->mlo_dbcc_mode);
+}
+
+static void rtw8922a_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (cck_en) {
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1, B_RXCCA_BE1_DIS, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF, B_PD_ARBITER_OFF,
+ 0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1, B_RXCCA_BE1_DIS, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF, B_PD_ARBITER_OFF,
+ 1, phy_idx);
+ }
+}
+
+static void rtw8922a_set_channel_bb(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ bool cck_en = chan->band_type == RTW89_BAND_2G;
+ u8 pri_sb = chan->pri_sb_idx;
+
+ if (cck_en)
+ rtw8922a_ctrl_sco_cck(rtwdev, chan->primary_channel,
+ chan->band_width, phy_idx);
+
+ rtw8922a_ctrl_ch(rtwdev, chan, phy_idx);
+ rtw8922a_ctrl_bw(rtwdev, pri_sb, chan->band_width, phy_idx);
+ rtw8922a_ctrl_cck_en(rtwdev, cck_en, phy_idx);
+ rtw8922a_spur_elimination(rtwdev, chan, phy_idx);
+
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, RF_PATH_AB, phy_idx);
+}
+
+static void rtw8922a_pre_set_channel_bb(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (!rtwdev->dbcc_en)
+ return;
+
+ if (phy_idx == RTW89_PHY_0) {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x6180);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xABA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEBA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEAA9);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xAFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEEFF);
+ }
+}
+
+static void rtw8922a_post_set_channel_bb(struct rtw89_dev *rtwdev,
+ enum rtw89_mlo_dbcc_mode mode)
+{
+ if (!rtwdev->dbcc_en)
+ return;
+
+ rtw8922a_ctrl_mlo(rtwdev, mode);
+}
+
+static void rtw8922a_set_channel(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8922a_set_channel_bb(rtwdev, chan, phy_idx);
+ rtw8922a_set_channel_rf(rtwdev, chan, phy_idx);
+}
+
+static void rtw8922a_dfs_en_idx(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, enum rtw89_rf_path path,
+ bool en)
+{
+ u32 path_ofst = (path == RF_PATH_B) ? 0x100 : 0x0;
+
+ if (en)
+ rtw89_phy_write32_idx(rtwdev, 0x2800 + path_ofst, BIT(1), 1,
+ phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, 0x2800 + path_ofst, BIT(1), 0,
+ phy_idx);
+}
+
+static void rtw8922a_dfs_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_dfs_en_idx(rtwdev, phy_idx, RF_PATH_A, en);
+ rtw8922a_dfs_en_idx(rtwdev, phy_idx, RF_PATH_B, en);
+}
+
+static void rtw8922a_adc_en_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool en)
+{
+ u32 val;
+
+ val = rtw89_phy_read32_mask(rtwdev, R_ADC_FIFO_V1, B_ADC_FIFO_EN_V1);
+
+ if (en) {
+ if (path == RF_PATH_A)
+ val &= ~0x1;
+ else
+ val &= ~0x2;
+ } else {
+ if (path == RF_PATH_A)
+ val |= 0x1;
+ else
+ val |= 0x2;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO_V1, B_ADC_FIFO_EN_V1, val);
+}
+
+static void rtw8922a_adc_en(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+{
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_A, en);
+ else
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_B, en);
+ } else {
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_A, en);
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_B, en);
+ }
+}
+
+static
+void rtw8922a_hal_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, enum rtw89_mac_idx mac_idx,
+ enum rtw89_band band, u32 *tx_en, bool enter)
+{
+ if (enter) {
+ rtw89_chip_stop_sch_tx(rtwdev, mac_idx, tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
+ rtw8922a_dfs_en(rtwdev, false, phy_idx);
+ rtw8922a_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
+ rtw8922a_adc_en(rtwdev, false, phy_idx);
+ fsleep(40);
+ rtw8922a_bb_reset_en(rtwdev, band, false, phy_idx);
+ } else {
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
+ rtw8922a_adc_en(rtwdev, true, phy_idx);
+ rtw8922a_dfs_en(rtwdev, true, phy_idx);
+ rtw8922a_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
+ rtw8922a_bb_reset_en(rtwdev, band, true, phy_idx);
+ rtw89_chip_resume_sch_tx(rtwdev, mac_idx, *tx_en);
+ }
+}
+
+static void rtw8922a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (enter) {
+ rtw8922a_pre_set_channel_bb(rtwdev, phy_idx);
+ rtw8922a_pre_set_channel_rf(rtwdev, phy_idx);
+ }
+
+ rtw8922a_hal_reset(rtwdev, phy_idx, mac_idx, chan->band_type, &p->tx_en, enter);
+
+ if (!enter) {
+ rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode);
+ rtw8922a_post_set_channel_rf(rtwdev, phy_idx);
+ }
+}
+
+static void rtw8922a_rfk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+ memset(rfk_mcc, 0, sizeof(*rfk_mcc));
+}
+
+static void rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, RTW89_PHY_0, 5);
+
+ rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, 58);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+}
+
+static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
+{
+ u32 rf_mode;
+ u8 path;
+ int ret;
+
+ for (path = 0; path < RF_PATH_NUM_8922A; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
+ 2, 5000, false, rtwdev, path, 0x00,
+ RR_MOD_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
+ path, ret);
+ }
+}
+
+static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u32 tx_en;
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, RF_AB);
+
+ rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, phy_idx, 5);
+ rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, 54);
+ rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, 84);
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_NORMAL, 6);
+ rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, 34);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_STOP);
+}
+
+static void rtw8922a_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_SCAN, 6);
+}
+
+static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+{
+}
+
+static void rtw8922a_rfk_track(struct rtw89_dev *rtwdev)
+{
+}
+
+static void rtw8922a_set_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ s16 ref_ofdm = 0;
+ s16 ref_cck = 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n");
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_BE_PWR_REF_CTRL,
+ B_BE_PWR_REF_CTRL_OFDM, ref_ofdm);
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_BE_PWR_REF_CTRL,
+ B_BE_PWR_REF_CTRL_CCK, ref_cck);
+}
+
+static void rtw8922a_bb_tx_triangular(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 ctrl = en ? 0x1 : 0x0;
+
+ rtw89_phy_write32_idx(rtwdev, R_BEDGE3, B_BEDGE_CFG, ctrl, phy_idx);
+}
+
+static void rtw8922a_set_tx_shape(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
+ const struct rtw89_tx_shape *tx_shape = &rfe_parms->tx_shape;
+ u8 tx_shape_idx;
+ u8 band, regd;
+
+ band = chan->band_type;
+ regd = rtw89_regd_get(rtwdev, band);
+ tx_shape_idx = (*tx_shape->lmt)[band][RTW89_RS_OFDM][regd];
+
+ if (tx_shape_idx == 0)
+ rtw8922a_bb_tx_triangular(rtwdev, false, phy_idx);
+ else
+ rtw8922a_bb_tx_triangular(rtwdev, true, phy_idx);
+}
+
+static void rtw8922a_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw8922a_set_tx_shape(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+}
+
+static void rtw8922a_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_txpwr_ref(rtwdev, phy_idx);
+}
+
+static void rtw8922a_ctrl_trx_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path tx_path, u8 tx_nss,
+ enum rtw89_rf_path rx_path, u8 rx_nss)
+{
+ enum rtw89_phy_idx phy_idx;
+
+ for (phy_idx = RTW89_PHY_0; phy_idx <= RTW89_PHY_1; phy_idx++) {
+ rtw8922a_ctrl_tx_path_tmac(rtwdev, tx_path, phy_idx);
+ rtw8922a_ctrl_rx_path_tmac(rtwdev, rx_path, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ }
+}
+
+static void rtw8922a_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_A, B_FORCE_FIR_A, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_A, B_RXBY_WBADC_A,
+ 0xf, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_A, B_BT_RXBY_WBADC_A,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_TRK_OFF_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB_A, B_OP1DB_A, 0x80, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB1_A, B_TIA10_A, 0x8080, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_A, B_LNA_IBADC_A, 0x34, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_A, B_BKOFF_IBADC_A, 0x34, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_B, B_FORCE_FIR_B, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_B, B_RXBY_WBADC_B,
+ 0xf, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_B, B_BT_RXBY_WBADC_B,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_TRK_OFF_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x80, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA10_B, 0x8080, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_B, B_LNA_IBADC_B, 0x34, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_B, B_BKOFF_IBADC_B, 0x34, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_A, B_FORCE_FIR_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_A, B_RXBY_WBADC_A,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_A, B_BT_RXBY_WBADC_A,
+ 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_TRK_OFF_A, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB_A, B_OP1DB_A, 0x1a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB1_A, B_TIA10_A, 0x2a2a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_A, B_LNA_IBADC_A, 0x7a6, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_A, B_BKOFF_IBADC_A, 0x26, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_B, B_FORCE_FIR_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_B, B_RXBY_WBADC_B,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_B, B_BT_RXBY_WBADC_B,
+ 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_TRK_OFF_B, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x20, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA10_B, 0x2a30, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_B, B_LNA_IBADC_B, 0x7a6, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_B, B_BKOFF_IBADC_B, 0x26, phy_idx);
+ }
+}
+
+static void rtw8922a_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 ntx_path = RF_PATH_AB;
+ u32 tx_en0, tx_en1;
+
+ if (hal->antenna_tx == RF_A)
+ ntx_path = RF_PATH_A;
+ else if (hal->antenna_tx == RF_B)
+ ntx_path = RF_PATH_B;
+
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_0, RTW89_MAC_0, band, &tx_en0, true);
+ if (rtwdev->dbcc_en)
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_1, RTW89_MAC_1, band,
+ &tx_en1, true);
+
+ rtw8922a_ctrl_trx_path(rtwdev, ntx_path, 2, RF_PATH_AB, 2);
+
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_0, RTW89_MAC_0, band, &tx_en0, false);
+ if (rtwdev->dbcc_en)
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_1, RTW89_MAC_1, band,
+ &tx_en0, false);
+}
+
+static u8 rtw8922a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ int th;
+
+ /* read thermal only if debugging */
+ if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_CFO | RTW89_DBG_RFK_TRACK))
+ return 80;
+
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+
+ fsleep(200);
+
+ th = rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL_V1);
+ th += (s8)info->thermal_trim[rf_path];
+
+ return clamp_t(int, th, 0, U8_MAX);
+}
+
+static void rtw8922a_btc_set_rfe(struct rtw89_dev *rtwdev)
+{
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
+ struct rtw89_btc_module_v7 *module = &md->md_v7;
+
+ module->rfe_type = rtwdev->efuse.rfe_type;
+ module->kt_ver = rtwdev->hal.cv;
+ module->bt_solo = 0;
+ module->switch_type = BTC_SWITCH_INTERNAL;
+ module->wa_type = 0;
+
+ module->ant.type = BTC_ANT_SHARED;
+ module->ant.num = 2;
+ module->ant.isolation = 10;
+ module->ant.diversity = 0;
+ module->ant.single_pos = RF_PATH_A;
+ module->ant.btg_pos = RF_PATH_B;
+
+ if (module->kt_ver <= 1)
+ module->wa_type |= BTC_WA_HFP_ZB;
+
+ rtwdev->btc.cx.other.type = BTC_3CX_NONE;
+
+ if (module->rfe_type == 0) {
+ rtwdev->btc.dm.error.map.rfe_type0 = true;
+ return;
+ }
+
+ module->ant.num = (module->rfe_type % 2) ? 2 : 3;
+
+ if (module->kt_ver == 0)
+ module->ant.num = 2;
+
+ if (module->ant.num == 3) {
+ module->ant.type = BTC_ANT_DEDICATED;
+ module->bt_pos = BTC_BT_ALONE;
+ } else {
+ module->ant.type = BTC_ANT_SHARED;
+ module->bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = module->ant.btg_pos;
+ rtwdev->btc.ant_type = module->ant.type;
+}
+
+static
+void rtw8922a_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
+{
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val);
+}
+
+static void rtw8922a_btc_init_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_ant_info_v7 *ant = &btc->mdinfo.md_v7.ant;
+ u32 wl_pri, path_min, path_max;
+ u8 path;
+
+ /* for 1-Ant && 1-ss case: only 1-path */
+ if (ant->num == 1) {
+ path_min = ant->single_pos;
+ path_max = path_min;
+ } else {
+ path_min = RF_PATH_A;
+ path_max = RF_PATH_B;
+ }
+
+ path = path_min;
+
+ for (path = path_min; path <= path_max; path++) {
+ /* set DEBUG_LUT_RFMODE_MASK = 1 to start trx-mask-setup */
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, BIT(17));
+
+ /* if GNT_WL=0 && BT=SS_group --> WL Tx/Rx = THRU */
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_SS_GROUP, 0x5ff);
+
+ /* if GNT_WL=0 && BT=Rx_group --> WL-Rx = THRU + WL-Tx = MASK */
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_RX_GROUP, 0x5df);
+
+ /* if GNT_WL = 0 && BT = Tx_group -->
+ * Shared-Ant && BTG-path:WL mask(0x55f), others:WL THRU(0x5ff)
+ */
+ if (btc->ant_type == BTC_ANT_SHARED && btc->btg_pos == path)
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
+ else
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0);
+ }
+
+ /* set WL PTA Hi-Pri: Ack-Tx, beacon-tx, Trig-frame-Tx, Null-Tx*/
+ wl_pri = B_BTC_RSP_ACK_HI | B_BTC_TX_BCN_HI | B_BTC_TX_TRI_HI |
+ B_BTC_TX_NULL_HI;
+ rtw89_write32(rtwdev, R_BTC_COEX_WL_REQ_BE, wl_pri);
+
+ /* set PTA break table */
+ rtw89_write32(rtwdev, R_BE_BT_BREAK_TABLE, BTC_BREAK_PARAM);
+
+ /* ZB coex table init for HFP PTA req-cmd bit-4 define issue COEX-900*/
+ rtw89_write32(rtwdev, R_BTC_ZB_COEX_TBL_0, 0xda5a5a5a);
+
+ rtw89_write32(rtwdev, R_BTC_ZB_COEX_TBL_1, 0xda5a5a5a);
+
+ rtw89_write32(rtwdev, R_BTC_ZB_BREAK_TBL, 0xf0ffffff);
+ btc->cx.wl.status.map.init_ok = true;
+}
+
+static void rtw8922a_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 chan_idx = phy_ppdu->chan_idx;
+ enum nl80211_band band;
+ u8 ch;
+
+ if (chan_idx == 0)
+ return;
+
+ rtw89_decode_chan_idx(rtwdev, chan_idx, &ch, &band);
+ status->freq = ieee80211_channel_to_frequency(ch, band);
+ status->band = band;
+}
+
+static void rtw8922a_query_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 path;
+ u8 *rx_power = phy_ppdu->rssi;
+
+ status->signal =
+ RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ status->chains |= BIT(path);
+ status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
+ }
+ if (phy_ppdu->valid)
+ rtw8922a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
+}
+
+static int rtw8922a_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ rtw89_write8_set(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_FEN_BBPLAT_RSTB | B_BE_FEN_BB_IP_RSTN);
+ rtw89_write32(rtwdev, R_BE_DMAC_SYS_CR32B, 0x7FF97FF9);
+
+ return 0;
+}
+
+static int rtw8922a_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ rtw89_write8_clr(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_FEN_BBPLAT_RSTB | B_BE_FEN_BB_IP_RSTN);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
@@ -610,10 +2317,56 @@ static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
#endif
static const struct rtw89_chip_ops rtw8922a_chip_ops = {
+ .enable_bb_rf = rtw8922a_mac_enable_bb_rf,
+ .disable_bb_rf = rtw8922a_mac_disable_bb_rf,
+ .bb_preinit = rtw8922a_bb_preinit,
+ .bb_postinit = rtw8922a_bb_postinit,
+ .bb_reset = rtw8922a_bb_reset,
+ .bb_sethw = rtw8922a_bb_sethw,
+ .read_rf = rtw89_phy_read_rf_v2,
+ .write_rf = rtw89_phy_write_rf_v2,
+ .set_channel = rtw8922a_set_channel,
+ .set_channel_help = rtw8922a_set_channel_help,
.read_efuse = rtw8922a_read_efuse,
.read_phycap = rtw8922a_read_phycap,
+ .fem_setup = NULL,
+ .rfe_gpio = NULL,
+ .rfk_hw_init = rtw8922a_rfk_hw_init,
+ .rfk_init = rtw8922a_rfk_init,
+ .rfk_init_late = rtw8922a_rfk_init_late,
+ .rfk_channel = rtw8922a_rfk_channel,
+ .rfk_band_changed = rtw8922a_rfk_band_changed,
+ .rfk_scan = rtw8922a_rfk_scan,
+ .rfk_track = rtw8922a_rfk_track,
+ .power_trim = rtw8922a_power_trim,
+ .set_txpwr = rtw8922a_set_txpwr,
+ .set_txpwr_ctrl = rtw8922a_set_txpwr_ctrl,
+ .init_txpwr_unit = NULL,
+ .get_thermal = rtw8922a_get_thermal,
+ .ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx,
+ .query_ppdu = rtw8922a_query_ppdu,
+ .ctrl_nbtg_bt_tx = rtw8922a_ctrl_nbtg_bt_tx,
+ .cfg_txrx_path = rtw8922a_bb_cfg_txrx_path,
+ .set_txpwr_ul_tb_offset = NULL,
.pwr_on_func = rtw8922a_pwr_on_func,
.pwr_off_func = rtw8922a_pwr_off_func,
+ .query_rxdesc = rtw89_core_query_rxdesc_v2,
+ .fill_txdesc = rtw89_core_fill_txdesc_v2,
+ .fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v2,
+ .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v2,
+ .mac_cfg_gnt = rtw89_mac_cfg_gnt_v2,
+ .stop_sch_tx = rtw89_mac_stop_sch_tx_v2,
+ .resume_sch_tx = rtw89_mac_resume_sch_tx_v2,
+ .h2c_dctl_sec_cam = rtw89_fw_h2c_dctl_sec_cam_v2,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl_g7,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl_g7,
+ .h2c_ampdu_cmac_tbl = rtw89_fw_h2c_ampdu_cmac_tbl_g7,
+ .h2c_default_dmac_tbl = rtw89_fw_h2c_default_dmac_tbl_v2,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon_be,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam_v1,
+
+ .btc_set_rfe = rtw8922a_btc_set_rfe,
+ .btc_init_cfg = rtw8922a_btc_init_cfg,
};
const struct rtw89_chip_info rtw8922a_chip_info = {
@@ -650,11 +2403,16 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
+ .dig_regs = &rtw8922a_dig_regs,
.tssi_dbw_table = NULL,
- .support_chanctx_num = 1,
+ .support_chanctx_num = 2,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
@@ -665,7 +2423,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.acam_num = 128,
.bcam_num = 20,
.scam_num = 32,
- .bacam_num = 8,
+ .bacam_num = 24,
.bacam_dynamic_num = 8,
.bacam_ver = RTW89_BACAM_V1,
.ppdu_max_usr = 16,
@@ -683,10 +2441,19 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
BIT(RTW89_PS_MODE_CLK_GATED) |
BIT(RTW89_PS_MODE_PWR_GATED),
.low_power_hci_modes = 0,
+ .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD_G7,
.hci_func_en_addr = R_BE_HCI_FUNC_EN,
.h2c_desc_size = sizeof(struct rtw89_rxdesc_short_v2),
.txwd_body_size = sizeof(struct rtw89_txwd_body_v2),
.txwd_info_size = sizeof(struct rtw89_txwd_info_v2),
+ .h2c_ctrl_reg = R_BE_H2CREG_CTRL,
+ .h2c_counter_reg = {R_BE_UDM1 + 1, B_BE_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8},
+ .h2c_regs = rtw8922a_h2c_regs,
+ .c2h_ctrl_reg = R_BE_C2HREG_CTRL,
+ .c2h_counter_reg = {R_BE_UDM1 + 1, B_BE_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
+ .c2h_regs = rtw8922a_c2h_regs,
+ .page_regs = &rtw8922a_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = NULL,
@@ -694,9 +2461,11 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.imr_info = NULL,
.imr_dmac_table = &rtw8922a_imr_dmac_table,
.imr_cmac_table = &rtw8922a_imr_cmac_table,
+ .rrsr_cfgs = &rtw8922a_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_VLD_V2, B_BSS_CLR_VLD0_V2},
.bss_clr_map_reg = R_BSS_CLR_MAP_V2,
.dma_ch_mask = 0,
+ .edcca_regs = &rtw8922a_edcca_regs,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8922a,
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
new file mode 100644
index 000000000000..2a371829268c
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2023 Realtek Corporation
+ */
+
+#include "chan.h"
+#include "debug.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8922a.h"
+#include "rtw8922a_rfk.h"
+
+static void rtw8922a_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_rf_path path)
+{
+ static const u32 tssi_trk_man[2] = {R_TSSI_PWR_P0, R_TSSI_PWR_P1};
+
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, tssi_trk_man[path], B_TSSI_CONT_EN, 0);
+ else
+ rtw89_phy_write32_mask(rtwdev, tssi_trk_man[path], B_TSSI_CONT_EN, 1);
+}
+
+void rtw8922a_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+{
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ else
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ } else {
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ }
+}
+
+static
+void rtw8922a_ctl_band_ch_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ u8 central_ch, enum rtw89_band band,
+ enum rtw89_bandwidth bw)
+{
+ const u32 rf_addr[2] = {RR_CFGCH, RR_CFGCH_V1};
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 rf_reg[RF_PATH_NUM_8922A][2];
+ u8 synpath;
+ u32 rf18;
+ u8 kpath;
+ u8 path;
+ u8 i;
+
+ rf_reg[RF_PATH_A][0] = rtw89_read_rf(rtwdev, RF_PATH_A, rf_addr[0], RFREG_MASK);
+ rf_reg[RF_PATH_A][1] = rtw89_read_rf(rtwdev, RF_PATH_A, rf_addr[1], RFREG_MASK);
+ rf_reg[RF_PATH_B][0] = rtw89_read_rf(rtwdev, RF_PATH_B, rf_addr[0], RFREG_MASK);
+ rf_reg[RF_PATH_B][1] = rtw89_read_rf(rtwdev, RF_PATH_B, rf_addr[1], RFREG_MASK);
+
+ kpath = rtw89_phy_get_kpath(rtwdev, phy);
+ synpath = rtw89_phy_get_syn_sel(rtwdev, phy);
+
+ rf18 = rtw89_read_rf(rtwdev, synpath, RR_CFGCH, RFREG_MASK);
+ if (rf18 == INV_RF_DATA) {
+ rtw89_warn(rtwdev, "[RFK] Invalid RF18 value\n");
+ return;
+ }
+
+ for (path = 0; path < RF_PATH_NUM_8922A; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ for (i = 0; i < 2; i++) {
+ if (rf_reg[path][i] == INV_RF_DATA) {
+ rtw89_warn(rtwdev,
+ "[RFK] Invalid RF_0x18 for Path-%d\n", path);
+ return;
+ }
+
+ rf_reg[path][i] &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BW |
+ RR_CFGCH_BAND0 | RR_CFGCH_CH);
+ rf_reg[path][i] |= u32_encode_bits(central_ch, RR_CFGCH_CH);
+
+ if (band == RTW89_BAND_2G)
+ rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x0);
+ else
+ rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x1);
+
+ switch (band) {
+ case RTW89_BAND_2G:
+ default:
+ break;
+ case RTW89_BAND_5G:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BAND1_5G, RR_CFGCH_BAND1) |
+ u32_encode_bits(CFGCH_BAND0_5G, RR_CFGCH_BAND0);
+ break;
+ case RTW89_BAND_6G:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BAND1_6G, RR_CFGCH_BAND1) |
+ u32_encode_bits(CFGCH_BAND0_6G, RR_CFGCH_BAND0);
+ break;
+ }
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ default:
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_40M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_80M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_160M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_320:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_320M, RR_CFGCH_BW_V2);
+ break;
+ }
+
+ rtw89_write_rf(rtwdev, path, rf_addr[i],
+ RFREG_MASK, rf_reg[path][i]);
+ fsleep(100);
+ }
+ }
+
+ if (hal->cv != CHIP_CAV)
+ return;
+
+ if (band == RTW89_BAND_2G) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x00003);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c990);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0xebe38);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000);
+ } else {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x00003);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c190);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0xebe38);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000);
+ }
+}
+
+void rtw8922a_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_ctl_band_ch_bw(rtwdev, phy_idx, chan->channel, chan->band_type,
+ chan->band_width);
+}
+
+enum _rf_syn_pow {
+ RF_SYN_ON_OFF,
+ RF_SYN_OFF_ON,
+ RF_SYN_ALLON,
+ RF_SYN_ALLOFF,
+};
+
+static void rtw8922a_set_syn01_cav(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn)
+{
+ if (syn == RF_SYN_ALLON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ } else if (syn == RF_SYN_ON_OFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x0);
+ } else if (syn == RF_SYN_OFF_ON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ } else if (syn == RF_SYN_ALLOFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x0);
+ }
+}
+
+static void rtw8922a_set_syn01_cbv(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn)
+{
+ if (syn == RF_SYN_ALLON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0xf);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0xf);
+ } else if (syn == RF_SYN_ON_OFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0xf);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0x0);
+ } else if (syn == RF_SYN_OFF_ON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0xf);
+ } else if (syn == RF_SYN_ALLOFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0x0);
+ }
+}
+
+static void rtw8922a_set_syn01(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "SYN config=%d\n", syn);
+
+ if (hal->cv == CHIP_CAV)
+ rtw8922a_set_syn01_cav(rtwdev, syn);
+ else
+ rtw8922a_set_syn01_cbv(rtwdev, syn);
+}
+
+static void rtw8922a_chlk_ktbl_sel(struct rtw89_dev *rtwdev, u8 kpath, u8 idx)
+{
+ u32 tmp;
+
+ if (idx > 2) {
+ rtw89_warn(rtwdev, "[DBCC][ERROR]indx is out of limit!! index(%d)", idx);
+ return;
+ }
+
+ if (kpath & RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1, idx);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_MDPD_V1, idx);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RR_TXG_SEL, 0x4 | idx);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, BIT(0));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, BIT(1));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G5, tmp);
+ }
+
+ if (kpath & RF_B) {
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1, idx);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_MDPD_V1, idx);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RR_TXG_SEL, 0x4 | idx);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, BIT(0));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT_C1, B_CFIR_LUT_G3, tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, BIT(1));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT_C1, B_CFIR_LUT_G5, tmp);
+ }
+}
+
+static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ enum rtw89_sub_entity_idx sub_entity_idx;
+ const struct rtw89_chan *chan;
+ enum rtw89_entity_mode mode;
+ u8 s0_tbl, s1_tbl;
+ u8 tbl_sel;
+
+ mode = rtw89_get_entity_mode(rtwdev);
+ switch (mode) {
+ case RTW89_ENTITY_MODE_MCC_PREPARE:
+ sub_entity_idx = RTW89_SUB_ENTITY_1;
+ tbl_sel = 1;
+ break;
+ default:
+ sub_entity_idx = RTW89_SUB_ENTITY_0;
+ tbl_sel = 0;
+ break;
+ }
+
+ chan = rtw89_chan_get(rtwdev, sub_entity_idx);
+
+ rfk_mcc->ch[tbl_sel] = chan->channel;
+ rfk_mcc->band[tbl_sel] = chan->band_type;
+ rfk_mcc->bw[tbl_sel] = chan->band_width;
+ rfk_mcc->table_idx = tbl_sel;
+
+ s0_tbl = tbl_sel;
+ s1_tbl = tbl_sel;
+
+ rtw8922a_chlk_ktbl_sel(rtwdev, RF_A, s0_tbl);
+ rtw8922a_chlk_ktbl_sel(rtwdev, RF_B, s1_tbl);
+}
+
+static void rtw8922a_rfk_mlo_ctrl(struct rtw89_dev *rtwdev)
+{
+ enum _rf_syn_pow syn_pow;
+
+ if (!rtwdev->dbcc_en)
+ goto set_rfk_reload;
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_0_PLUS_2_1RF:
+ syn_pow = RF_SYN_OFF_ON;
+ break;
+ case MLO_0_PLUS_2_2RF:
+ case MLO_1_PLUS_1_2RF:
+ case MLO_2_PLUS_0_1RF:
+ case MLO_2_PLUS_0_2RF:
+ case MLO_2_PLUS_2_2RF:
+ case MLO_DBCC_NOT_SUPPORT:
+ default:
+ syn_pow = RF_SYN_ON_OFF;
+ break;
+ case MLO_1_PLUS_1_1RF:
+ case DBCC_LEGACY:
+ syn_pow = RF_SYN_ALLON;
+ break;
+ }
+
+ rtw8922a_set_syn01(rtwdev, syn_pow);
+
+set_rfk_reload:
+ rtw8922a_chlk_reload(rtwdev);
+}
+
+static void rtw8922a_rfk_pll_init(struct rtw89_dev *rtwdev)
+{
+ int ret;
+ u8 tmp;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_PLL_1, &tmp);
+ if (ret)
+ return;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_PLL_1, tmp | 0xf8, 0xFF);
+ if (ret)
+ return;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_APBT, &tmp);
+ if (ret)
+ return;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_APBT, tmp & ~0x60, 0xFF);
+ if (ret)
+ return;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_PLL, &tmp);
+ if (ret)
+ return;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_PLL, tmp | 0x38, 0xFF);
+ if (ret)
+ return;
+}
+
+void rtw8922a_rfk_hw_init(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->dbcc_en)
+ rtw8922a_rfk_mlo_ctrl(rtwdev);
+
+ rtw8922a_rfk_pll_init(rtwdev);
+}
+
+void rtw8922a_pre_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ bool mlo_1_1;
+
+ if (!rtwdev->dbcc_en)
+ return;
+
+ mlo_1_1 = rtw89_is_mlo_1_1(rtwdev);
+ if (mlo_1_1)
+ rtw8922a_set_syn01(rtwdev, RF_SYN_ALLON);
+ else if (phy_idx == RTW89_PHY_0)
+ rtw8922a_set_syn01(rtwdev, RF_SYN_ON_OFF);
+ else
+ rtw8922a_set_syn01(rtwdev, RF_SYN_OFF_ON);
+
+ fsleep(1000);
+}
+
+void rtw8922a_post_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_rfk_mlo_ctrl(rtwdev);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h
new file mode 100644
index 000000000000..66bdd57c1eea
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2023 Realtek Corporation
+ */
+
+#ifndef __RTW89_8922A_RFK_H__
+#define __RTW89_8922A_RFK_H__
+
+#include "core.h"
+
+void rtw8922a_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx);
+void rtw8922a_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+void rtw8922a_rfk_hw_init(struct rtw89_dev *rtwdev);
+void rtw8922a_pre_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8922a_post_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
index 7b3d98d2c402..4981b657bd7b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -26,6 +26,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.io_rcy_en = MAC_AX_PCIE_ENABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_DEF,
.rx_ring_eq_is_full = true,
+ .check_rx_tag = true,
.init_cfg_reg = R_BE_HAXI_INIT_CFG1,
.txhci_en_bit = B_BE_TXDMA_EN,
@@ -79,7 +80,7 @@ static struct pci_driver rtw89_8922ae_driver = {
.id_table = rtw89_8922ae_id_table,
.probe = rtw89_pci_probe,
.remove = rtw89_pci_remove,
- .driver.pm = &rtw89_pm_ops,
+ .driver.pm = &rtw89_pm_ops_be,
};
module_pci_driver(rtw89_8922ae_driver);
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 5c7ca36c09b6..ccad026defb5 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -41,34 +41,8 @@ static void rtw89_wow_leave_lps(struct rtw89_dev *rtwdev)
static int rtw89_wow_config_mac(struct rtw89_dev *rtwdev, bool enable_wow)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
- int ret;
-
- if (enable_wow) {
- ret = rtw89_mac_resize_ple_rx_quota(rtwdev, true);
- if (ret) {
- rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
- return ret;
- }
- rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
- rtw89_write32_clr(rtwdev, mac->rx_fltr, B_AX_SNIFFER_MODE);
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
- rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0);
- rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0);
- rtw89_write32(rtwdev, R_AX_TF_FWD, 0);
- rtw89_write32(rtwdev, R_AX_HW_RPT_FWD, 0);
- } else {
- ret = rtw89_mac_resize_ple_rx_quota(rtwdev, false);
- if (ret) {
- rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
- return ret;
- }
- rtw89_write32_clr(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
- rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD);
- rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD);
- }
- return 0;
+ return mac->wow_config_mac(rtwdev, enable_wow);
}
static void rtw89_wow_set_rx_filter(struct rtw89_dev *rtwdev, bool enable)
@@ -85,21 +59,14 @@ static void rtw89_wow_set_rx_filter(struct rtw89_dev *rtwdev, bool enable)
static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 wow_reason_reg = rtwdev->chip->wow_reason_reg;
struct cfg80211_wowlan_nd_info nd_info;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
- u32 wow_reason_reg;
u8 reason;
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
- wow_reason_reg = R_AX_C2HREG_DATA3 + 3;
- else
- wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3;
-
reason = rtw89_read8(rtwdev, wow_reason_reg);
-
switch (reason) {
case RTW89_WOW_RSN_RX_DEAUTH:
wakeup.disconnect = true;
@@ -470,13 +437,14 @@ static int rtw89_wow_cfg_wake(struct rtw89_dev *rtwdev, bool wow)
static int rtw89_wow_check_fw_status(struct rtw89_dev *rtwdev, bool wow_enable)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u8 polling;
int ret;
ret = read_poll_timeout_atomic(rtw89_read8_mask, polling,
wow_enable == !!polling,
50, 50000, false, rtwdev,
- R_AX_WOW_CTRL, B_AX_WOW_WOWEN);
+ mac->wow_ctrl.addr, mac->wow_ctrl.mask);
if (ret)
rtw89_err(rtwdev, "failed to check wow status %s\n",
wow_enable ? "enabled" : "disabled");
@@ -519,7 +487,7 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
return ret;
}
- ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, wow_vif, wow_sta);
+ ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, wow_vif, wow_sta);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c assoc cmac tbl\n");
return ret;
@@ -566,7 +534,7 @@ static int rtw89_wow_enable_trx_pre(struct rtw89_dev *rtwdev)
rtw89_mac_ptk_drop_by_band_and_wait(rtwdev, RTW89_MAC_0);
- ret = rtw89_hci_poll_txdma_ch(rtwdev);
+ ret = rtw89_hci_poll_txdma_ch_idle(rtwdev);
if (ret) {
rtw89_err(rtwdev, "txdma ch busy\n");
return ret;
@@ -589,7 +557,7 @@ static int rtw89_wow_enable_trx_post(struct rtw89_dev *rtwdev)
rtw89_hci_disable_intr(rtwdev);
rtw89_hci_ctrl_trxhci(rtwdev, false);
- ret = rtw89_hci_poll_txdma_ch(rtwdev);
+ ret = rtw89_hci_poll_txdma_ch_idle(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to poll txdma ch idle pcie\n");
return ret;
@@ -699,14 +667,14 @@ static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
goto out;
}
+ rtw89_fw_release_general_pkt_list(rtwdev, true);
+
ret = rtw89_wow_cfg_wake(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to disable config wake\n");
goto out;
}
- rtw89_fw_release_general_pkt_list(rtwdev, true);
-
ret = rtw89_wow_check_fw_status(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to check disable fw ready\n");
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 05890536e353..211fa25b9a78 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -740,7 +740,7 @@ u16 rsi_get_connected_channel(struct ieee80211_vif *vif)
return 0;
bss = &vif->bss_conf;
- channel = bss->chandef.chan;
+ channel = bss->chanreq.oper.chan;
if (!channel)
return 0;
@@ -759,7 +759,7 @@ static void rsi_switch_channel(struct rsi_hw *adapter,
if (!vif)
return;
- channel = vif->bss_conf.chandef.chan;
+ channel = vif->bss_conf.chanreq.oper.chan;
if (!channel)
return;
@@ -1957,6 +1957,10 @@ static int rsi_mac80211_resume(struct ieee80211_hw *hw)
#endif
static const struct ieee80211_ops mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rsi_mac80211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rsi_mac80211_start,
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 10a465686439..dccc139cabb2 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -232,17 +232,17 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
if (!usb_reg_buf)
return status;
- usb_reg_buf[0] = (cpu_to_le32(value) & 0x00ff);
- usb_reg_buf[1] = (cpu_to_le32(value) & 0xff00) >> 8;
- usb_reg_buf[2] = (cpu_to_le32(value) & 0x00ff0000) >> 16;
- usb_reg_buf[3] = (cpu_to_le32(value) & 0xff000000) >> 24;
+ usb_reg_buf[0] = value & 0x00ff;
+ usb_reg_buf[1] = (value & 0xff00) >> 8;
+ usb_reg_buf[2] = (value & 0x00ff0000) >> 16;
+ usb_reg_buf[3] = (value & 0xff000000) >> 24;
status = usb_control_msg(usbdev,
usb_sndctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_WRITE,
RSI_USB_REQ_OUT,
- ((cpu_to_le32(reg) & 0xffff0000) >> 16),
- (cpu_to_le32(reg) & 0xffff),
+ (reg & 0xffff0000) >> 16,
+ reg & 0xffff,
(void *)usb_reg_buf,
len,
USB_CTRL_SET_TIMEOUT);
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index 537caf9d914a..a904602f02ce 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -144,13 +144,13 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
struct wfx_vif *wvif_ch0 = wdev_to_wvif(wvif->wdev, 0);
struct ieee80211_vif *vif_ch0 = wvif_to_vif(wvif_ch0);
- chan0 = vif_ch0->bss_conf.chandef.chan;
+ chan0 = vif_ch0->bss_conf.chanreq.oper.chan;
}
if (wdev_to_wvif(wvif->wdev, 1)) {
struct wfx_vif *wvif_ch1 = wdev_to_wvif(wvif->wdev, 1);
struct ieee80211_vif *vif_ch1 = wvif_to_vif(wvif_ch1);
- chan1 = vif_ch1->bss_conf.chandef.chan;
+ chan1 = vif_ch1->bss_conf.chanreq.oper.chan;
}
if (chan0 && chan1 && vif->type != NL80211_IFTYPE_AP) {
if (chan0->hw_value == chan1->hw_value) {
@@ -344,6 +344,7 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
const int pairwise_cipher_suite_size = 4 / sizeof(u16);
const int akm_suite_size = 4 / sizeof(u16);
+ int ret = -EINVAL;
const u16 *ptr;
if (unlikely(!skb))
@@ -352,22 +353,26 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
skb->len - ieoffset);
if (unlikely(!ptr))
- return -EINVAL;
+ goto free_skb;
ptr += pairwise_cipher_suite_count_offset;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
ptr += 1 + pairwise_cipher_suite_size * *ptr;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
ptr += 1 + akm_suite_size * *ptr;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
- return 0;
+ ret = 0;
+
+free_skb:
+ dev_kfree_skb(skb);
+ return ret;
}
int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
index 4c30b5772ce0..00c4731d8f8e 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_func.h>
@@ -178,12 +178,15 @@ static int cw1200_sdio_irq_unsubscribe(struct hwbus_priv *self)
return ret;
}
+/* Like the rest of the driver, this only supports one device per system */
+static struct gpio_desc *cw1200_reset;
+static struct gpio_desc *cw1200_powerup;
+
static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata)
{
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 0);
+ if (cw1200_reset) {
+ gpiod_set_value(cw1200_reset, 0);
msleep(30); /* Min is 2 * CLK32K cycles */
- gpio_free(pdata->reset);
}
if (pdata->power_ctrl)
@@ -196,16 +199,21 @@ static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata)
static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata)
{
- /* Ensure I/Os are pulled low */
- if (pdata->reset) {
- gpio_request(pdata->reset, "cw1200_wlan_reset");
- gpio_direction_output(pdata->reset, 0);
+ /* Ensure I/Os are pulled low (reset is active low) */
+ cw1200_reset = devm_gpiod_get_optional(NULL, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(cw1200_reset)) {
+ pr_err("could not get CW1200 SDIO reset GPIO\n");
+ return PTR_ERR(cw1200_reset);
}
- if (pdata->powerup) {
- gpio_request(pdata->powerup, "cw1200_wlan_powerup");
- gpio_direction_output(pdata->powerup, 0);
+ gpiod_set_consumer_name(cw1200_reset, "cw1200_wlan_reset");
+ cw1200_powerup = devm_gpiod_get_optional(NULL, "powerup", GPIOD_OUT_LOW);
+ if (IS_ERR(cw1200_powerup)) {
+ pr_err("could not get CW1200 SDIO powerup GPIO\n");
+ return PTR_ERR(cw1200_powerup);
}
- if (pdata->reset || pdata->powerup)
+ gpiod_set_consumer_name(cw1200_powerup, "cw1200_wlan_powerup");
+
+ if (cw1200_reset || cw1200_powerup)
msleep(10); /* Settle time? */
/* Enable 3v3 and 1v8 to hardware */
@@ -226,13 +234,13 @@ static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata)
}
/* Enable POWERUP signal */
- if (pdata->powerup) {
- gpio_set_value(pdata->powerup, 1);
+ if (cw1200_powerup) {
+ gpiod_set_value(cw1200_powerup, 1);
msleep(250); /* or more..? */
}
- /* Enable RSTn signal */
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 1);
+ /* Deassert RSTn signal, note active low */
+ if (cw1200_reset) {
+ gpiod_set_value(cw1200_reset, 0);
msleep(50); /* Or more..? */
}
return 0;
diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
index c82c0688b549..4f346fb977a9 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
@@ -11,7 +11,7 @@
*/
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -38,6 +38,8 @@ struct hwbus_priv {
const struct cw1200_platform_data_spi *pdata;
spinlock_t lock; /* Serialize all bus operations */
wait_queue_head_t wq;
+ struct gpio_desc *reset;
+ struct gpio_desc *powerup;
int claimed;
};
@@ -80,7 +82,7 @@ static int cw1200_spi_memcpy_fromio(struct hwbus_priv *self,
#endif
/* Header is LE16 */
- regaddr = cpu_to_le16(regaddr);
+ regaddr = (__force u16)cpu_to_le16(regaddr);
/* We have to byteswap if the SPI bus is limited to 8b operation
or we are running on a Big Endian system
@@ -145,7 +147,7 @@ static int cw1200_spi_memcpy_toio(struct hwbus_priv *self,
#endif
/* Header is LE16 */
- regaddr = cpu_to_le16(regaddr);
+ regaddr = (__force u16)cpu_to_le16(regaddr);
/* We have to byteswap if the SPI bus is limited to 8b operation
or we are running on a Big Endian system
@@ -275,12 +277,12 @@ static void cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
free_irq(self->func->irq, self);
}
-static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
+static int cw1200_spi_off(struct hwbus_priv *self, const struct cw1200_platform_data_spi *pdata)
{
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 0);
+ if (self->reset) {
+ /* Assert RESET, note active low */
+ gpiod_set_value(self->reset, 1);
msleep(30); /* Min is 2 * CLK32K cycles */
- gpio_free(pdata->reset);
}
if (pdata->power_ctrl)
@@ -291,18 +293,12 @@ static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
return 0;
}
-static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata)
+static int cw1200_spi_on(struct hwbus_priv *self, const struct cw1200_platform_data_spi *pdata)
{
/* Ensure I/Os are pulled low */
- if (pdata->reset) {
- gpio_request(pdata->reset, "cw1200_wlan_reset");
- gpio_direction_output(pdata->reset, 0);
- }
- if (pdata->powerup) {
- gpio_request(pdata->powerup, "cw1200_wlan_powerup");
- gpio_direction_output(pdata->powerup, 0);
- }
- if (pdata->reset || pdata->powerup)
+ gpiod_direction_output(self->reset, 1); /* Active low */
+ gpiod_direction_output(self->powerup, 0);
+ if (self->reset || self->powerup)
msleep(10); /* Settle time? */
/* Enable 3v3 and 1v8 to hardware */
@@ -323,13 +319,13 @@ static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata)
}
/* Enable POWERUP signal */
- if (pdata->powerup) {
- gpio_set_value(pdata->powerup, 1);
+ if (self->powerup) {
+ gpiod_set_value(self->powerup, 1);
msleep(250); /* or more..? */
}
- /* Enable RSTn signal */
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 1);
+ /* Assert RSTn signal, note active low */
+ if (self->reset) {
+ gpiod_set_value(self->reset, 0);
msleep(50); /* Or more..? */
}
return 0;
@@ -381,20 +377,33 @@ static int cw1200_spi_probe(struct spi_device *func)
spi_get_chipselect(func, 0), func->mode, func->bits_per_word,
func->max_speed_hz);
- if (cw1200_spi_on(plat_data)) {
+ self = devm_kzalloc(&func->dev, sizeof(*self), GFP_KERNEL);
+ if (!self) {
+ pr_err("Can't allocate SPI hwbus_priv.");
+ return -ENOMEM;
+ }
+
+ /* Request reset asserted */
+ self->reset = devm_gpiod_get_optional(&func->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(self->reset))
+ return dev_err_probe(&func->dev, PTR_ERR(self->reset),
+ "could not get reset GPIO\n");
+ gpiod_set_consumer_name(self->reset, "cw1200_wlan_reset");
+
+ self->powerup = devm_gpiod_get_optional(&func->dev, "powerup", GPIOD_OUT_LOW);
+ if (IS_ERR(self->powerup))
+ return dev_err_probe(&func->dev, PTR_ERR(self->powerup),
+ "could not get powerup GPIO\n");
+ gpiod_set_consumer_name(self->reset, "cw1200_wlan_powerup");
+
+ if (cw1200_spi_on(self, plat_data)) {
pr_err("spi_on() failed!\n");
- return -1;
+ return -ENODEV;
}
if (spi_setup(func)) {
pr_err("spi_setup() failed!\n");
- return -1;
- }
-
- self = devm_kzalloc(&func->dev, sizeof(*self), GFP_KERNEL);
- if (!self) {
- pr_err("Can't allocate SPI hwbus_priv.");
- return -ENOMEM;
+ return -ENODEV;
}
self->pdata = plat_data;
@@ -416,7 +425,7 @@ static int cw1200_spi_probe(struct spi_device *func)
if (status) {
cw1200_spi_irq_unsubscribe(self);
- cw1200_spi_off(plat_data);
+ cw1200_spi_off(self, plat_data);
}
return status;
@@ -434,7 +443,7 @@ static void cw1200_spi_disconnect(struct spi_device *func)
self->core = NULL;
}
}
- cw1200_spi_off(dev_get_platdata(&func->dev));
+ cw1200_spi_off(self, dev_get_platdata(&func->dev));
}
static int __maybe_unused cw1200_spi_suspend(struct device *dev)
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index 381013e0db63..a54a7b86864f 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -203,6 +203,10 @@ static const unsigned long cw1200_ttl[] = {
};
static const struct ieee80211_ops cw1200_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = cw1200_start,
.stop = cw1200_stop,
.add_interface = cw1200_add_interface,
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index cd9a41f59f32..0da2d29dd7bd 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1351,6 +1351,10 @@ static struct ieee80211_supported_band wl1251_band_2ghz = {
};
static const struct ieee80211_ops wl1251_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = wl1251_op_start,
.stop = wl1251_op_stop,
.add_interface = wl1251_op_add_interface,
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 1e082d039b82..2499dc908305 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -233,7 +233,7 @@ void wlcore_event_channel_switch(struct wl1271 *wl,
cancel_delayed_work(&wlvif->channel_switch_work);
} else {
set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags);
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
}
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 5736acb4d206..ef12169f8044 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -2910,7 +2910,7 @@ static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int ret;
wlvif->aid = vif->cfg.aid;
- wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
+ wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chanreq.oper);
wlvif->beacon_int = bss_conf->beacon_int;
wlvif->wmm_enabled = bss_conf->qos;
@@ -4242,7 +4242,7 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
/* Handle HT information change */
if ((changed & BSS_CHANGED_HT) &&
- (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
+ (bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)) {
ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
@@ -4515,7 +4515,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
/* Handle new association with HT. Do this after join. */
if (sta_exists) {
bool enabled =
- bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
+ bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT;
ret = wlcore_hw_set_peer_cap(wl,
&sta_ht_cap,
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index eb5482ed76ae..92fb5b8dcdae 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -16,7 +16,6 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
-#include <linux/gpio.h>
#include <linux/pm_runtime.h>
#include <linux/printk.h>
#include <linux/of.h>
@@ -75,8 +74,8 @@ static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr,
sdio_release_host(func);
- if (WARN_ON(ret))
- dev_err(child->parent, "sdio read failed (%d)\n", ret);
+ if (ret)
+ dev_err_ratelimited(child->parent, "sdio read failed (%d)\n", ret);
if (unlikely(dump)) {
printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr);
@@ -120,8 +119,8 @@ static int __must_check wl12xx_sdio_raw_write(struct device *child, int addr,
sdio_release_host(func);
- if (WARN_ON(ret))
- dev_err(child->parent, "sdio write failed (%d)\n", ret);
+ if (ret)
+ dev_err_ratelimited(child->parent, "sdio write failed (%d)\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index a84340c2075f..b55fe320633c 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -4,7 +4,7 @@
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
/*
@@ -196,8 +196,11 @@ static const struct ieee80211_regdomain hwsim_world_regdom_custom_04 = {
.reg_rules = {
REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0),
REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, 0),
- REG_RULE(5150 - 10, 5240 + 10, 80, 0, 30, 0),
+ REG_RULE(5150 - 10, 5240 + 10, 80, 0, 30, NL80211_RRF_AUTO_BW),
REG_RULE(5260 - 10, 5320 + 10, 80, 0, 30,
+ NL80211_RRF_DFS_CONCURRENT | NL80211_RRF_DFS |
+ NL80211_RRF_AUTO_BW),
+ REG_RULE(5500 - 10, 5720 + 10, 160, 0, 30,
NL80211_RRF_DFS_CONCURRENT | NL80211_RRF_DFS),
REG_RULE(5745 - 10, 5825 + 10, 80, 0, 30, 0),
REG_RULE(5855 - 10, 5925 + 10, 80, 0, 33, 0),
@@ -213,6 +216,7 @@ static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
struct hwsim_vif_priv {
u32 magic;
+ u32 skip_beacons;
u8 bssid[ETH_ALEN];
bool assoc;
bool bcn_en;
@@ -2128,6 +2132,16 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
return 0;
}
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void mac80211_hwsim_vif_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+
+ debugfs_create_u32("skip_beacons", 0600, vif->debugfs_dir,
+ &vp->skip_beacons);
+}
+#endif
static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2193,12 +2207,19 @@ static void __mac80211_hwsim_beacon_tx(struct ieee80211_bss_conf *link_conf,
struct ieee80211_vif *vif,
struct sk_buff *skb)
{
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct ieee80211_tx_info *info;
struct ieee80211_rate *txrate;
struct ieee80211_mgmt *mgmt;
/* TODO: get MCS */
int bitrate = 100;
+ if (vp->skip_beacons) {
+ vp->skip_beacons--;
+ dev_kfree_skb(skb);
+ return;
+ }
+
info = IEEE80211_SKB_CB(skb);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE))
ieee80211_get_tx_rates(vif, NULL, skb,
@@ -2284,8 +2305,8 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
rcu_dereference(link_conf->chanctx_conf)->def.chan);
}
- if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
- ieee80211_csa_finish(vif);
+ if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif, link_id))
+ ieee80211_csa_finish(vif, link_id);
}
static enum hrtimer_restart
@@ -2462,7 +2483,7 @@ static void mac80211_hwsim_vif_info_changed(struct ieee80211_hw *hw,
}
if (vif->type == NL80211_IFTYPE_STATION &&
- changed & BSS_CHANGED_MLD_VALID_LINKS) {
+ changed & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM)) {
u16 usable_links = ieee80211_vif_usable_links(vif);
if (vif->active_links != usable_links)
@@ -2653,10 +2674,11 @@ static int mac80211_hwsim_sta_state(struct ieee80211_hw *hw,
return mac80211_hwsim_sta_add(hw, vif, sta);
/*
- * when client is authorized (AP station marked as such),
- * enable all links
+ * in an MLO connection, when client is authorized
+ * (AP station marked as such), enable all links
*/
- if (vif->type == NL80211_IFTYPE_STATION &&
+ if (ieee80211_vif_is_mld(vif) &&
+ vif->type == NL80211_IFTYPE_STATION &&
new_state == IEEE80211_STA_AUTHORIZED && !sta->tdls)
ieee80211_set_active_links_async(vif,
ieee80211_vif_usable_links(vif));
@@ -2738,6 +2760,24 @@ static int mac80211_hwsim_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
+static enum ieee80211_neg_ttlm_res
+mac80211_hwsim_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u32 i;
+
+ /* For testing purposes, accept if all TIDs are mapped to the same links
+ * set, otherwise reject.
+ */
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] ||
+ neg_ttlm->downlink[i] != neg_ttlm->downlink[0])
+ return NEG_TTLM_RES_REJECT;
+ }
+
+ return NEG_TTLM_RES_ACCEPT;
+}
+
#ifdef CONFIG_NL80211_TESTMODE
/*
* This section contains example code for using netlink
@@ -3175,6 +3215,47 @@ static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
}
+static int mac80211_hwsim_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ int i;
+
+ if (n_vifs <= 0)
+ return -EINVAL;
+
+ wiphy_dbg(hw->wiphy,
+ "switch vif channel context mode: %u\n", mode);
+
+ for (i = 0; i < n_vifs; i++) {
+ hwsim_check_chanctx_magic(vifs[i].old_ctx);
+ wiphy_dbg(hw->wiphy,
+ "switch vif channel context: %d MHz/width: %d/cfreqs:%d/%d MHz -> %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].old_ctx->def.center_freq1,
+ vifs[i].old_ctx->def.center_freq2,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.width,
+ vifs[i].new_ctx->def.center_freq1,
+ vifs[i].new_ctx->def.center_freq2);
+
+ switch (mode) {
+ case CHANCTX_SWMODE_REASSIGN_VIF:
+ hwsim_check_chanctx_magic(vifs[i].new_ctx);
+ break;
+ case CHANCTX_SWMODE_SWAP_CONTEXTS:
+ hwsim_set_chanctx_magic(vifs[i].new_ctx);
+ hwsim_clear_chanctx_magic(vifs[i].old_ctx);
+ break;
+ default:
+ WARN_ON("Invalid mode");
+ }
+ }
+ return 0;
+}
+
static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx_pkts_nic",
"tx_bytes_nic",
@@ -3839,6 +3920,13 @@ out:
return err;
}
+#ifdef CONFIG_MAC80211_DEBUGFS
+#define HWSIM_DEBUGFS_OPS \
+ .vif_add_debugfs = mac80211_hwsim_vif_add_debugfs,
+#else
+#define HWSIM_DEBUGFS_OPS
+#endif
+
#define HWSIM_COMMON_OPS \
.tx = mac80211_hwsim_tx, \
.wake_tx_queue = ieee80211_handle_wake_tx_queue, \
@@ -3863,7 +3951,8 @@ out:
.get_et_stats = mac80211_hwsim_get_et_stats, \
.get_et_strings = mac80211_hwsim_get_et_strings, \
.start_pmsr = mac80211_hwsim_start_pmsr, \
- .abort_pmsr = mac80211_hwsim_abort_pmsr,
+ .abort_pmsr = mac80211_hwsim_abort_pmsr, \
+ HWSIM_DEBUGFS_OPS
#define HWSIM_NON_MLO_OPS \
.sta_add = mac80211_hwsim_sta_add, \
@@ -3877,6 +3966,10 @@ static const struct ieee80211_ops mac80211_hwsim_ops = {
HWSIM_NON_MLO_OPS
.sw_scan_start = mac80211_hwsim_sw_scan,
.sw_scan_complete = mac80211_hwsim_sw_scan_complete,
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
};
#define HWSIM_CHANCTX_OPS \
@@ -3888,7 +3981,8 @@ static const struct ieee80211_ops mac80211_hwsim_ops = {
.remove_chanctx = mac80211_hwsim_remove_chanctx, \
.change_chanctx = mac80211_hwsim_change_chanctx, \
.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx,\
- .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx,
+ .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx, \
+ .switch_vif_chanctx = mac80211_hwsim_switch_vif_chanctx,
static const struct ieee80211_ops mac80211_hwsim_mchan_ops = {
HWSIM_COMMON_OPS
@@ -3903,6 +3997,7 @@ static const struct ieee80211_ops mac80211_hwsim_mlo_ops = {
.change_vif_links = mac80211_hwsim_change_vif_links,
.change_sta_links = mac80211_hwsim_change_sta_links,
.sta_state = mac80211_hwsim_sta_state,
+ .can_neg_ttlm = mac80211_hwsim_can_neg_ttlm,
};
struct hwsim_new_radio_params {
@@ -4965,6 +5060,33 @@ static void mac80211_hwsim_sband_capab(struct ieee80211_supported_band *sband)
BIT(NL80211_IFTYPE_MESH_POINT) | \
BIT(NL80211_IFTYPE_OCB))
+static const u8 iftypes_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF |
+ WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB,
+ [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB,
+ [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+};
+
+#define MAC80211_HWSIM_MLD_CAPA_OPS \
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS, \
+ IEEE80211_MLD_MAX_NUM_LINKS - 1)
+
+static const struct wiphy_iftype_ext_capab mac80211_hwsim_iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = iftypes_ext_capa_ap,
+ .extended_capabilities_mask = iftypes_ext_capa_ap,
+ .extended_capabilities_len = sizeof(iftypes_ext_capa_ap),
+ .eml_capabilities = IEEE80211_EML_CAP_EMLSR_SUPP |
+ IEEE80211_EML_CAP_EMLMR_SUPPORT,
+ .mld_capa_and_ops = MAC80211_HWSIM_MLD_CAPA_OPS,
+ },
+};
+
static int mac80211_hwsim_new_radio(struct genl_info *info,
struct hwsim_new_radio_params *param)
{
@@ -5159,6 +5281,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
ieee80211_hw_set(hw, AP_LINK_PS);
+
+ hw->wiphy->iftype_ext_capab = mac80211_hwsim_iftypes_ext_capa;
+ hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(mac80211_hwsim_iftypes_ext_capa);
} else {
ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
@@ -5309,7 +5435,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
schedule_timeout_interruptible(1);
}
- /* TODO: Add param */
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_DFS_CONCURRENT);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.h b/drivers/net/wireless/virtual/mac80211_hwsim.h
index 4676cdaf4cfd..21b1afd83dc1 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.h
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.h
@@ -3,7 +3,7 @@
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
- * Copyright (C) 2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2020, 2022-2024 Intel Corporation
*/
#ifndef __MAC80211_HWSIM_H
@@ -84,6 +84,8 @@ enum hwsim_tx_control_flags {
* @HWSIM_CMD_START_PMSR: request to start peer measurement with the
* %HWSIM_ATTR_PMSR_REQUEST. Result will be sent back asynchronously
* with %HWSIM_CMD_REPORT_PMSR.
+ * @HWSIM_CMD_ABORT_PMSR: Abort previously started peer measurement.
+ * @HWSIM_CMD_REPORT_PMSR: Report peer measurement data.
* @__HWSIM_CMD_MAX: enum limit
*/
enum hwsim_commands {
@@ -298,6 +300,7 @@ enum hwsim_vqs {
* Information about a receiving or transmitting bitrate
* that can be mapped to struct rate_info
*
+ * @__HWSIM_RATE_INFO_ATTR_INVALID: reserved, netlink attribute 0 is invalid
* @HWSIM_RATE_INFO_ATTR_FLAGS: bitflag of flags from &enum rate_info_flags
* @HWSIM_RATE_INFO_ATTR_MCS: mcs index if struct describes an HT/VHT/HE rate
* @HWSIM_RATE_INFO_ATTR_LEGACY: bitrate in 100kbit/s for 802.11abg
diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c
index ba14d83353a4..6a84ec58d618 100644
--- a/drivers/net/wireless/virtual/virt_wifi.c
+++ b/drivers/net/wireless/virtual/virt_wifi.c
@@ -453,7 +453,7 @@ static int virt_wifi_net_device_get_iflink(const struct net_device *dev)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
- return priv->lowerdev->ifindex;
+ return READ_ONCE(priv->lowerdev->ifindex);
}
static const struct net_device_ops virt_wifi_ops = {
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_def.h b/drivers/net/wireless/zydas/zd1211rw/zd_def.h
index 8ca2d0aab170..2f55e8deee82 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_def.h
@@ -12,7 +12,7 @@
#include <linux/stringify.h>
#include <linux/device.h>
-typedef u16 __nocast zd_addr_t;
+typedef u16 zd_addr_t;
#define dev_printk_f(level, dev, fmt, args...) \
dev_printk(level, dev, "%s() " fmt, __func__, ##args)
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 5d534e15a844..900c063bd724 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -1343,6 +1343,10 @@ static u64 zd_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops zd_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = zd_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = zd_op_start,
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 8505d84eeed6..f3b567a13ded 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -380,7 +380,7 @@ static inline void handle_regs_int(struct urb *urb)
spin_lock_irqsave(&intr->lock, flags);
int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
- if (int_num == CR_INTERRUPT) {
+ if (int_num == (u16)CR_INTERRUPT) {
struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context));
spin_lock(&mac->lock);
memcpy(&mac->intr_buffer, urb->transfer_buffer,
@@ -416,7 +416,8 @@ out:
spin_unlock_irqrestore(&intr->lock, flags);
/* CR_INTERRUPT might override read_reg too. */
- if (int_num == CR_INTERRUPT && atomic_read(&intr->read_regs_enabled))
+ if (int_num == (u16)CR_INTERRUPT &&
+ atomic_read(&intr->read_regs_enabled))
handle_regs_int_override(urb);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index cc70360364b7..abc41a7089fa 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -57,8 +57,6 @@
#define CHECK_Q_STOP_TIMEOUT_US 1000000
#define CHECK_Q_STOP_STEP_US 10000
-#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
-
static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
enum mtk_txrx tx_rx, unsigned int index)
{
@@ -161,7 +159,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
skb_reset_tail_pointer(skb);
skb_put(skb, le16_to_cpu(gpd->data_buff_len));
- ret = md_ctrl->recv_skb(queue, skb);
+ ret = queue->recv_skb(queue, skb);
/* Break processing, will try again later */
if (ret < 0)
return ret;
@@ -897,13 +895,13 @@ static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
/**
* t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
- * @md_ctrl: CLDMA context structure.
+ * @queue: CLDMA queue.
* @recv_skb: Receiving skb callback.
*/
-void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
{
- md_ctrl->recv_skb = recv_skb;
+ queue->recv_skb = recv_skb;
}
/**
@@ -993,6 +991,28 @@ allow_sleep:
return ret;
}
+static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
+{
+ int qno;
+
+ for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) {
+ md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
+ t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb);
+ }
+
+ md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ;
+
+ for (qno = 0; qno < CLDMA_TXQ_NUM; qno++)
+ md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
+
+ if (cfg_id == CLDMA_DEDICATED_Q_CFG) {
+ md_ctrl->tx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
+ md_ctrl->rx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
+ t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP],
+ t7xx_port_proxy_recv_skb_from_dedicated_queue);
+ }
+}
+
static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
{
char dma_pool_name[32];
@@ -1018,16 +1038,9 @@ static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
dev_err(md_ctrl->dev, "control TX ring init fail\n");
goto err_free_tx_ring;
}
-
- md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
}
for (j = 0; j < CLDMA_RXQ_NUM; j++) {
- md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
-
- if (j == CLDMA_RXQ_NUM - 1)
- md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
-
ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
if (ret) {
dev_err(md_ctrl->dev, "Control RX ring init fail\n");
@@ -1094,6 +1107,7 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
{
struct device *dev = &t7xx_dev->pdev->dev;
struct cldma_ctrl *md_ctrl;
+ int qno;
md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
if (!md_ctrl)
@@ -1102,7 +1116,9 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
md_ctrl->t7xx_dev = t7xx_dev;
md_ctrl->dev = dev;
md_ctrl->hif_id = hif_id;
- md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
+ for (qno = 0; qno < CLDMA_RXQ_NUM; qno++)
+ md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb;
+
t7xx_hw_info_init(md_ctrl);
t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
return 0;
@@ -1332,9 +1348,10 @@ err_workqueue:
return -ENOMEM;
}
-void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
{
t7xx_cldma_late_release(md_ctrl);
+ t7xx_cldma_adjust_config(md_ctrl, cfg_id);
t7xx_cldma_late_init(md_ctrl);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
index 4410bac6993a..f2d9941be9c8 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
@@ -31,6 +31,10 @@
#include "t7xx_cldma.h"
#include "t7xx_pci.h"
+#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
+#define CLDMA_SHARED_Q_BUFF_SZ 3584
+#define CLDMA_DEDICATED_Q_BUFF_SZ 2048
+
/**
* enum cldma_id - Identifiers for CLDMA HW units.
* @CLDMA_ID_MD: Modem control channel.
@@ -55,6 +59,11 @@ struct cldma_gpd {
__le16 not_used2;
};
+enum cldma_cfg {
+ CLDMA_SHARED_Q_CFG,
+ CLDMA_DEDICATED_Q_CFG,
+};
+
struct cldma_request {
struct cldma_gpd *gpd; /* Virtual address for CPU */
dma_addr_t gpd_addr; /* Physical address for DMA */
@@ -82,6 +91,7 @@ struct cldma_queue {
wait_queue_head_t req_wq; /* Only for TX */
struct workqueue_struct *worker;
struct work_struct cldma_work;
+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
};
struct cldma_ctrl {
@@ -101,24 +111,22 @@ struct cldma_ctrl {
struct md_pm_entity *pm_entity;
struct t7xx_cldma_hw hw_info;
bool is_late_init;
- int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
};
+#define CLDMA_Q_IDX_DUMP 1
#define GPD_FLAGS_HWO BIT(0)
#define GPD_FLAGS_IOC BIT(7)
#define GPD_DMAPOOL_ALIGN 16
-#define CLDMA_MTU 3584 /* 3.5kB */
-
int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
-void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id);
void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
-void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
index 24e7d491468e..8d864d4ed77f 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -177,6 +177,11 @@ int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
return t7xx_acpi_reset(t7xx_dev, "_RST");
}
+int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev)
+{
+ return t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+}
+
static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
{
u32 val;
@@ -192,6 +197,7 @@ static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
{
struct t7xx_pci_dev *t7xx_dev = data;
+ t7xx_mode_update(t7xx_dev, T7XX_RESET);
msleep(RGU_RESET_DELAY_MS);
t7xx_reset_device_via_pmic(t7xx_dev);
return IRQ_HANDLED;
@@ -529,7 +535,7 @@ static void t7xx_md_hk_wq(struct work_struct *work)
/* Clear the HS2 EXIT event appended in core_reset() */
t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
- t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG);
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
md->core_md.handshake_ongoing = true;
@@ -544,7 +550,7 @@ static void t7xx_ap_hk_wq(struct work_struct *work)
/* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
- t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG);
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
md->core_ap.handshake_ongoing = true;
t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
@@ -758,6 +764,7 @@ err_destroy_hswq:
void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
{
+ enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
struct t7xx_modem *md = t7xx_dev->md;
t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
@@ -765,7 +772,8 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
if (!md->md_init_finish)
return;
- t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
+ if (mode != T7XX_RESET && mode != T7XX_UNKNOWN)
+ t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
t7xx_port_proxy_uninit(md->port_prox);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
index abe633cf7adc..b39e945a92e0 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
@@ -85,6 +85,7 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev);
void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev);
void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev);
int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev);
int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev);
#endif /* __T7XX_MODEM_OPS_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 91256e005b84..e0b1e7a616ca 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -52,6 +52,81 @@
#define PM_RESOURCE_POLL_TIMEOUT_US 10000
#define PM_RESOURCE_POLL_STEP_US 100
+static const char * const t7xx_mode_names[] = {
+ [T7XX_UNKNOWN] = "unknown",
+ [T7XX_READY] = "ready",
+ [T7XX_RESET] = "reset",
+ [T7XX_FASTBOOT_SWITCHING] = "fastboot_switching",
+ [T7XX_FASTBOOT_DOWNLOAD] = "fastboot_download",
+ [T7XX_FASTBOOT_DUMP] = "fastboot_dump",
+};
+
+static_assert(ARRAY_SIZE(t7xx_mode_names) == T7XX_MODE_LAST);
+
+static ssize_t t7xx_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ struct pci_dev *pdev;
+ int index = 0;
+
+ pdev = to_pci_dev(dev);
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (!t7xx_dev)
+ return -ENODEV;
+
+ index = sysfs_match_string(t7xx_mode_names, buf);
+ if (index == T7XX_FASTBOOT_SWITCHING) {
+ WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING);
+ } else if (index == T7XX_RESET) {
+ WRITE_ONCE(t7xx_dev->mode, T7XX_RESET);
+ t7xx_acpi_pldr_func(t7xx_dev);
+ }
+
+ return count;
+};
+
+static ssize_t t7xx_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum t7xx_mode mode = T7XX_UNKNOWN;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (!t7xx_dev)
+ return -ENODEV;
+
+ mode = READ_ONCE(t7xx_dev->mode);
+ if (mode < T7XX_MODE_LAST)
+ return sysfs_emit(buf, "%s\n", t7xx_mode_names[mode]);
+
+ return sysfs_emit(buf, "%s\n", t7xx_mode_names[T7XX_UNKNOWN]);
+}
+
+static DEVICE_ATTR_RW(t7xx_mode);
+
+static struct attribute *t7xx_mode_attr[] = {
+ &dev_attr_t7xx_mode.attr,
+ NULL
+};
+
+static const struct attribute_group t7xx_mode_attribute_group = {
+ .attrs = t7xx_mode_attr,
+};
+
+void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode)
+{
+ if (!t7xx_dev)
+ return;
+
+ WRITE_ONCE(t7xx_dev->mode, mode);
+ sysfs_notify(&t7xx_dev->pdev->dev.kobj, NULL, "t7xx_mode");
+}
+
enum t7xx_pm_state {
MTK_PM_EXCEPTION,
MTK_PM_INIT, /* Device initialized, but handshake not completed */
@@ -108,7 +183,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
pm_runtime_use_autosuspend(&pdev->dev);
- return t7xx_wait_pm_config(t7xx_dev);
+ return 0;
}
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
@@ -279,7 +354,8 @@ static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
int ret;
t7xx_dev = pci_get_drvdata(pdev);
- if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
+ if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT ||
+ READ_ONCE(t7xx_dev->mode) != T7XX_READY) {
dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
return -EFAULT;
}
@@ -729,16 +805,28 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
t7xx_pcie_mac_interrupts_dis(t7xx_dev);
+ ret = sysfs_create_group(&t7xx_dev->pdev->dev.kobj,
+ &t7xx_mode_attribute_group);
+ if (ret)
+ goto err_md_exit;
+
ret = t7xx_interrupt_init(t7xx_dev);
- if (ret) {
- t7xx_md_exit(t7xx_dev);
- return ret;
- }
+ if (ret)
+ goto err_remove_group;
+
t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
t7xx_pcie_mac_interrupts_en(t7xx_dev);
return 0;
+
+err_remove_group:
+ sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
+ &t7xx_mode_attribute_group);
+
+err_md_exit:
+ t7xx_md_exit(t7xx_dev);
+ return ret;
}
static void t7xx_pci_remove(struct pci_dev *pdev)
@@ -747,6 +835,9 @@ static void t7xx_pci_remove(struct pci_dev *pdev)
int i;
t7xx_dev = pci_get_drvdata(pdev);
+
+ sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
+ &t7xx_mode_attribute_group);
t7xx_md_exit(t7xx_dev);
for (i = 0; i < EXT_INT_NUM; i++) {
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h
index f08f1ab74469..49a11586d8d8 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -43,6 +43,16 @@ struct t7xx_addr_base {
typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param);
+enum t7xx_mode {
+ T7XX_UNKNOWN,
+ T7XX_READY,
+ T7XX_RESET,
+ T7XX_FASTBOOT_SWITCHING,
+ T7XX_FASTBOOT_DOWNLOAD,
+ T7XX_FASTBOOT_DUMP,
+ T7XX_MODE_LAST, /* must always be last */
+};
+
/* struct t7xx_pci_dev - MTK device context structure
* @intr_handler: array of handler function for request_threaded_irq
* @intr_thread: array of thread_fn for request_threaded_irq
@@ -59,6 +69,7 @@ typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param);
* @md_pm_lock: protects PCIe sleep lock
* @sleep_disable_count: PCIe L1.2 lock counter
* @sleep_lock_acquire: indicates that sleep has been disabled
+ * @mode: indicates the device mode
*/
struct t7xx_pci_dev {
t7xx_intr_callback intr_handler[EXT_INT_NUM];
@@ -82,6 +93,7 @@ struct t7xx_pci_dev {
#ifdef CONFIG_WWAN_DEBUGFS
struct dentry *debugfs_dir;
#endif
+ u32 mode;
};
enum t7xx_pm_id {
@@ -120,5 +132,5 @@ int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_enti
int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);
void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev);
-
+void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode);
#endif /* __T7XX_PCI_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h
index 4ae8a00a8532..f74d3bab810d 100644
--- a/drivers/net/wwan/t7xx/t7xx_port.h
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -75,6 +75,8 @@ enum port_ch {
PORT_CH_DSS6_TX = 0x20df,
PORT_CH_DSS7_RX = 0x20e0,
PORT_CH_DSS7_TX = 0x20e1,
+
+ PORT_CH_UNIMPORTANT = 0xffff,
};
struct t7xx_port;
@@ -135,11 +137,13 @@ struct t7xx_port {
};
};
+int t7xx_get_port_mtu(struct t7xx_port *port);
struct sk_buff *t7xx_port_alloc_skb(int payload);
struct sk_buff *t7xx_ctrl_alloc_skb(int payload);
int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb);
int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
unsigned int ex_msg);
+int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb);
int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
unsigned int ex_msg);
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
index 274846d39fbf..7d6388bf1d7c 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -48,6 +48,9 @@
i < (proxy)->port_count; \
i++, (p) = &(proxy)->ports[i])
+#define T7XX_MAX_POSSIBLE_PORTS_NUM \
+ (max(ARRAY_SIZE(t7xx_port_conf), ARRAY_SIZE(t7xx_early_port_conf)))
+
static const struct t7xx_port_conf t7xx_port_conf[] = {
{
.tx_ch = PORT_CH_UART2_TX,
@@ -100,6 +103,21 @@ static const struct t7xx_port_conf t7xx_port_conf[] = {
},
};
+static const struct t7xx_port_conf t7xx_early_port_conf[] = {
+ {
+ .tx_ch = PORT_CH_UNIMPORTANT,
+ .rx_ch = PORT_CH_UNIMPORTANT,
+ .txq_index = CLDMA_Q_IDX_DUMP,
+ .rxq_index = CLDMA_Q_IDX_DUMP,
+ .txq_exp_index = CLDMA_Q_IDX_DUMP,
+ .rxq_exp_index = CLDMA_Q_IDX_DUMP,
+ .path_id = CLDMA_ID_AP,
+ .ops = &wwan_sub_port_ops,
+ .name = "fastboot",
+ .port_type = WWAN_PORT_FASTBOOT,
+ },
+};
+
static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
{
const struct t7xx_port_conf *port_conf;
@@ -214,7 +232,17 @@ int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb)
return 0;
}
-static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
+int t7xx_get_port_mtu(struct t7xx_port *port)
+{
+ enum cldma_id path_id = port->port_conf->path_id;
+ int tx_qno = t7xx_port_get_queue_no(port);
+ struct cldma_ctrl *md_ctrl;
+
+ md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
+ return md_ctrl->tx_ring[tx_qno].pkt_size;
+}
+
+int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
{
enum cldma_id path_id = port->port_conf->path_id;
struct cldma_ctrl *md_ctrl;
@@ -329,6 +357,39 @@ static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox)
}
}
+/**
+ * t7xx_port_proxy_recv_skb_from_dedicated_queue() - Dispatch early port received skb.
+ * @queue: CLDMA queue.
+ * @skb: Socket buffer.
+ *
+ * Return:
+ ** 0 - Packet consumed.
+ ** -ERROR - Failed to process skb.
+ */
+int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb)
+{
+ struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
+ struct port_proxy *port_prox = t7xx_dev->md->port_prox;
+ const struct t7xx_port_conf *port_conf;
+ struct t7xx_port *port;
+ int ret;
+
+ port = &port_prox->ports[0];
+ if (WARN_ON_ONCE(port->port_conf->rxq_index != queue->index)) {
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ port_conf = port->port_conf;
+ ret = port_conf->ops->recv_skb(port, skb);
+ if (ret < 0 && ret != -ENOBUFS) {
+ dev_err(port->dev, "drop on RX ch %d, %d\n", port_conf->rx_ch, ret);
+ dev_kfree_skb_any(skb);
+ }
+
+ return ret;
+}
+
static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev,
struct cldma_queue *queue, u16 channel)
{
@@ -359,7 +420,7 @@ static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev
** 0 - Packet consumed.
** -ERROR - Failed to process skb.
*/
-static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
+int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
{
struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
@@ -444,33 +505,56 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
spin_lock_init(&port->port_update_lock);
port->chan_enable = false;
- if (port_conf->ops->init)
+ if (port_conf->ops && port_conf->ops->init)
port_conf->ops->init(port);
}
t7xx_proxy_setup_ch_mapping(port_prox);
}
+void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id)
+{
+ struct port_proxy *port_prox = md->port_prox;
+ const struct t7xx_port_conf *port_conf;
+ u32 port_count;
+ int i;
+
+ t7xx_port_proxy_uninit(port_prox);
+
+ if (cfg_id == PORT_CFG_ID_EARLY) {
+ port_conf = t7xx_early_port_conf;
+ port_count = ARRAY_SIZE(t7xx_early_port_conf);
+ } else {
+ port_conf = t7xx_port_conf;
+ port_count = ARRAY_SIZE(t7xx_port_conf);
+ }
+
+ for (i = 0; i < port_count; i++)
+ port_prox->ports[i].port_conf = &port_conf[i];
+
+ port_prox->cfg_id = cfg_id;
+ port_prox->port_count = port_count;
+
+ t7xx_proxy_init_all_ports(md);
+}
+
static int t7xx_proxy_alloc(struct t7xx_modem *md)
{
- unsigned int port_count = ARRAY_SIZE(t7xx_port_conf);
struct device *dev = &md->t7xx_dev->pdev->dev;
struct port_proxy *port_prox;
- int i;
- port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count,
+ port_prox = devm_kzalloc(dev,
+ struct_size(port_prox,
+ ports,
+ T7XX_MAX_POSSIBLE_PORTS_NUM),
GFP_KERNEL);
if (!port_prox)
return -ENOMEM;
md->port_prox = port_prox;
port_prox->dev = dev;
+ t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
- for (i = 0; i < port_count; i++)
- port_prox->ports[i].port_conf = &t7xx_port_conf[i];
-
- port_prox->port_count = port_count;
- t7xx_proxy_init_all_ports(md);
return 0;
}
@@ -492,8 +576,6 @@ int t7xx_port_proxy_init(struct t7xx_modem *md)
if (ret)
return ret;
- t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb);
- t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
return 0;
}
@@ -505,7 +587,7 @@ void t7xx_port_proxy_uninit(struct port_proxy *port_prox)
for_each_proxy_port(i, port, port_prox) {
const struct t7xx_port_conf *port_conf = port->port_conf;
- if (port_conf->ops->uninit)
+ if (port_conf->ops && port_conf->ops->uninit)
port_conf->ops->uninit(port);
}
}
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
index 81d059fbc0fb..7f5706811445 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
@@ -31,11 +31,18 @@
#define RX_QUEUE_MAXLEN 32
#define CTRL_QUEUE_MAXLEN 16
+enum port_cfg_id {
+ PORT_CFG_ID_INVALID,
+ PORT_CFG_ID_NORMAL,
+ PORT_CFG_ID_EARLY,
+};
+
struct port_proxy {
int port_count;
struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1];
struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES];
struct device *dev;
+ enum port_cfg_id cfg_id;
struct t7xx_port ports[];
};
@@ -98,5 +105,8 @@ void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int
int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg);
int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
bool en_flag);
+void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id);
+int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb);
+int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb);
#endif /* __T7XX_PORT_PROXY_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
index 17389c8f6600..4b23ba693f3f 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
+ * Copyright (c) 2024, Fibocom Wireless Inc.
*
* Authors:
* Amir Hanania <amir.hanania@intel.com>
@@ -15,6 +16,7 @@
* Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
* Eliot Lee <eliot.lee@intel.com>
* Sreehari Kancharla <sreehari.kancharla@intel.com>
+ * Jinjian Song <jinjian.song@fibocom.com>
*/
#include <linux/atomic.h>
@@ -33,7 +35,7 @@
#include "t7xx_port_proxy.h"
#include "t7xx_state_monitor.h"
-static int t7xx_port_ctrl_start(struct wwan_port *port)
+static int t7xx_port_wwan_start(struct wwan_port *port)
{
struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
@@ -44,30 +46,60 @@ static int t7xx_port_ctrl_start(struct wwan_port *port)
return 0;
}
-static void t7xx_port_ctrl_stop(struct wwan_port *port)
+static void t7xx_port_wwan_stop(struct wwan_port *port)
{
struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
atomic_dec(&port_mtk->usage_cnt);
}
-static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+static int t7xx_port_fastboot_tx(struct t7xx_port *port, struct sk_buff *skb)
+{
+ struct sk_buff *cur = skb, *tx_skb;
+ size_t actual, len, offset = 0;
+ int txq_mtu;
+ int ret;
+
+ txq_mtu = t7xx_get_port_mtu(port);
+ if (txq_mtu < 0)
+ return -EINVAL;
+
+ actual = cur->len;
+ while (actual) {
+ len = min_t(size_t, actual, txq_mtu);
+ tx_skb = __dev_alloc_skb(len, GFP_KERNEL);
+ if (!tx_skb)
+ return -ENOMEM;
+
+ skb_put_data(tx_skb, cur->data + offset, len);
+
+ ret = t7xx_port_send_raw_skb(port, tx_skb);
+ if (ret) {
+ dev_kfree_skb(tx_skb);
+ dev_err(port->dev, "Write error on fastboot port, %d\n", ret);
+ break;
+ }
+ offset += len;
+ actual -= len;
+ }
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static int t7xx_port_ctrl_tx(struct t7xx_port *port, struct sk_buff *skb)
{
- struct t7xx_port *port_private = wwan_port_get_drvdata(port);
const struct t7xx_port_conf *port_conf;
struct sk_buff *cur = skb, *cloned;
struct t7xx_fsm_ctl *ctl;
enum md_state md_state;
int cnt = 0, ret;
- if (!port_private->chan_enable)
- return -EINVAL;
-
- port_conf = port_private->port_conf;
- ctl = port_private->t7xx_dev->md->fsm_ctl;
+ port_conf = port->port_conf;
+ ctl = port->t7xx_dev->md->fsm_ctl;
md_state = t7xx_fsm_get_md_state(ctl);
if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) {
- dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n",
+ dev_warn(port->dev, "Cannot write to %s port when md_state=%d\n",
port_conf->name, md_state);
return -ENODEV;
}
@@ -75,10 +107,10 @@ static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
while (cur) {
cloned = skb_clone(cur, GFP_KERNEL);
cloned->len = skb_headlen(cur);
- ret = t7xx_port_send_skb(port_private, cloned, 0, 0);
+ ret = t7xx_port_send_skb(port, cloned, 0, 0);
if (ret) {
dev_kfree_skb(cloned);
- dev_err(port_private->dev, "Write error on %s port, %d\n",
+ dev_err(port->dev, "Write error on %s port, %d\n",
port_conf->name, ret);
return cnt ? cnt + ret : ret;
}
@@ -93,14 +125,53 @@ static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
return 0;
}
+static int t7xx_port_wwan_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct t7xx_port *port_private = wwan_port_get_drvdata(port);
+ const struct t7xx_port_conf *port_conf = port_private->port_conf;
+ int ret;
+
+ if (!port_private->chan_enable)
+ return -EINVAL;
+
+ if (port_conf->port_type != WWAN_PORT_FASTBOOT)
+ ret = t7xx_port_ctrl_tx(port_private, skb);
+ else
+ ret = t7xx_port_fastboot_tx(port_private, skb);
+
+ return ret;
+}
+
static const struct wwan_port_ops wwan_ops = {
- .start = t7xx_port_ctrl_start,
- .stop = t7xx_port_ctrl_stop,
- .tx = t7xx_port_ctrl_tx,
+ .start = t7xx_port_wwan_start,
+ .stop = t7xx_port_wwan_stop,
+ .tx = t7xx_port_wwan_tx,
};
+static void t7xx_port_wwan_create(struct t7xx_port *port)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ unsigned int header_len = sizeof(struct ccci_header), mtu;
+ struct wwan_port_caps caps;
+
+ if (!port->wwan.wwan_port) {
+ mtu = t7xx_get_port_mtu(port);
+ caps.frag_len = mtu - header_len;
+ caps.headroom_len = header_len;
+ port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
+ &wwan_ops, &caps, port);
+ if (IS_ERR(port->wwan.wwan_port))
+ dev_err(port->dev, "Unable to create WWAN port %s", port_conf->name);
+ }
+}
+
static int t7xx_port_wwan_init(struct t7xx_port *port)
{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (port_conf->port_type == WWAN_PORT_FASTBOOT)
+ t7xx_port_wwan_create(port);
+
port->rx_length_th = RX_QUEUE_MAXLEN;
return 0;
}
@@ -152,20 +223,14 @@ static int t7xx_port_wwan_disable_chl(struct t7xx_port *port)
static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state)
{
const struct t7xx_port_conf *port_conf = port->port_conf;
- unsigned int header_len = sizeof(struct ccci_header);
- struct wwan_port_caps caps;
+
+ if (port_conf->port_type == WWAN_PORT_FASTBOOT)
+ return;
if (state != MD_STATE_READY)
return;
- if (!port->wwan.wwan_port) {
- caps.frag_len = CLDMA_MTU - header_len;
- caps.headroom_len = header_len;
- port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
- &wwan_ops, &caps, port);
- if (IS_ERR(port->wwan.wwan_port))
- dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
- }
+ t7xx_port_wwan_create(port);
}
struct port_ops wwan_sub_port_ops = {
diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h
index c41d7d094c08..9c7dc72ac6f6 100644
--- a/drivers/net/wwan/t7xx/t7xx_reg.h
+++ b/drivers/net/wwan/t7xx/t7xx_reg.h
@@ -101,11 +101,33 @@ enum t7xx_pm_resume_state {
PM_RESUME_REG_STATE_L2_EXP,
};
+enum host_event_e {
+ HOST_EVENT_INIT = 0,
+ FASTBOOT_DL_NOTIFY = 0x3,
+};
+
#define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c
#define MISC_STAGE_MASK GENMASK(2, 0)
#define MISC_RESET_TYPE_PLDR BIT(26)
#define MISC_RESET_TYPE_FLDR BIT(27)
-#define LINUX_STAGE 4
+#define MISC_RESET_TYPE_PLDR BIT(26)
+#define MISC_LK_EVENT_MASK GENMASK(11, 8)
+#define HOST_EVENT_MASK GENMASK(31, 28)
+
+enum lk_event_id {
+ LK_EVENT_NORMAL = 0,
+ LK_EVENT_CREATE_PD_PORT = 1,
+ LK_EVENT_CREATE_POST_DL_PORT = 2,
+ LK_EVENT_RESET = 7,
+};
+
+enum t7xx_device_stage {
+ T7XX_DEV_STAGE_INIT = 0,
+ T7XX_DEV_STAGE_BROM_PRE = 1,
+ T7XX_DEV_STAGE_BROM_POST = 2,
+ T7XX_DEV_STAGE_LK = 3,
+ T7XX_DEV_STAGE_LINUX = 4,
+};
#define T7XX_PCIE_RESOURCE_STATUS 0x0d28
#define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0)
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
index 0bc97430211b..9889ca4621cf 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -47,6 +47,13 @@
#define FSM_MD_EX_PASS_TIMEOUT_MS 45000
#define FSM_CMD_TIMEOUT_MS 2000
+#define wait_for_expected_dev_stage(status) \
+ read_poll_timeout(ioread32, status, \
+ ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LINUX) || \
+ ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LK), 100000, \
+ 20000000, false, IREG_BASE(md->t7xx_dev) + \
+ T7XX_PCIE_MISC_DEV_STATUS)
+
void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
{
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
@@ -206,6 +213,55 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm
fsm_finish_command(ctl, cmd, 0);
}
+static void t7xx_host_event_notify(struct t7xx_modem *md, unsigned int event_id)
+{
+ u32 value;
+
+ value = ioread32(IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ value &= ~HOST_EVENT_MASK;
+ value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
+ iowrite32(value, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+}
+
+static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status)
+{
+ struct t7xx_modem *md = ctl->md;
+ struct cldma_ctrl *md_ctrl;
+ enum lk_event_id lk_event;
+ struct device *dev;
+ struct t7xx_port *port;
+
+ dev = &md->t7xx_dev->pdev->dev;
+ lk_event = FIELD_GET(MISC_LK_EVENT_MASK, status);
+ switch (lk_event) {
+ case LK_EVENT_NORMAL:
+ case LK_EVENT_RESET:
+ break;
+
+ case LK_EVENT_CREATE_PD_PORT:
+ case LK_EVENT_CREATE_POST_DL_PORT:
+ md_ctrl = md->md_ctrl[CLDMA_ID_AP];
+ t7xx_cldma_hif_hw_init(md_ctrl);
+ t7xx_cldma_stop(md_ctrl);
+ t7xx_cldma_switch_cfg(md_ctrl, CLDMA_DEDICATED_Q_CFG);
+
+ port = &ctl->md->port_prox->ports[0];
+ port->port_conf->ops->enable_chl(port);
+
+ t7xx_cldma_start(md_ctrl);
+
+ if (lk_event == LK_EVENT_CREATE_POST_DL_PORT)
+ t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DOWNLOAD);
+ else
+ t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DUMP);
+ break;
+
+ default:
+ dev_err(dev, "Invalid LK event %d\n", lk_event);
+ break;
+ }
+}
+
static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
{
ctl->curr_state = FSM_STATE_STOPPED;
@@ -226,8 +282,9 @@ static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comman
static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
{
- struct t7xx_pci_dev *t7xx_dev;
- struct cldma_ctrl *md_ctrl;
+ struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
+ struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev;
+ enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
int err;
if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
@@ -235,18 +292,20 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma
return;
}
- md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
- t7xx_dev = ctl->md->t7xx_dev;
-
ctl->curr_state = FSM_STATE_STOPPING;
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
t7xx_cldma_stop(md_ctrl);
- if (!ctl->md->rgu_irq_asserted) {
- t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
- /* Wait for the DRM disable to take effect */
- msleep(FSM_DRM_DISABLE_DELAY_MS);
+ if (mode == T7XX_FASTBOOT_SWITCHING)
+ t7xx_host_event_notify(ctl->md, FASTBOOT_DL_NOTIFY);
+
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
+ /* Wait for the DRM disable to take effect */
+ msleep(FSM_DRM_DISABLE_DELAY_MS);
+ if (mode == T7XX_FASTBOOT_SWITCHING) {
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
+ } else {
err = t7xx_acpi_fldr_func(t7xx_dev);
if (err)
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
@@ -272,6 +331,7 @@ static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
ctl->curr_state = FSM_STATE_READY;
t7xx_fsm_broadcast_ready_state(ctl);
+ t7xx_mode_update(md->t7xx_dev, T7XX_READY);
t7xx_md_event_notify(md, FSM_READY);
}
@@ -317,7 +377,8 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
{
struct t7xx_modem *md = ctl->md;
- u32 dev_status;
+ struct device *dev;
+ u32 status;
int ret;
if (!md)
@@ -329,23 +390,53 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
return;
}
+ dev = &md->t7xx_dev->pdev->dev;
ctl->curr_state = FSM_STATE_PRE_START;
t7xx_md_event_notify(md, FSM_PRE_START);
- ret = read_poll_timeout(ioread32, dev_status,
- (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000,
- false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ ret = wait_for_expected_dev_stage(status);
+
if (ret) {
- struct device *dev = &md->t7xx_dev->pdev->dev;
+ dev_err(dev, "read poll timeout %d\n", ret);
+ goto finish_command;
+ }
- fsm_finish_command(ctl, cmd, -ETIMEDOUT);
- dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK);
- return;
+ if (status != ctl->status || cmd->flag != 0) {
+ u32 stage = FIELD_GET(MISC_STAGE_MASK, status);
+
+ switch (stage) {
+ case T7XX_DEV_STAGE_INIT:
+ case T7XX_DEV_STAGE_BROM_PRE:
+ case T7XX_DEV_STAGE_BROM_POST:
+ dev_dbg(dev, "BROM_STAGE Entered\n");
+ ret = t7xx_fsm_append_cmd(ctl, FSM_CMD_START, 0);
+ break;
+
+ case T7XX_DEV_STAGE_LK:
+ dev_dbg(dev, "LK_STAGE Entered\n");
+ t7xx_lk_stage_event_handling(ctl, status);
+ break;
+
+ case T7XX_DEV_STAGE_LINUX:
+ dev_dbg(dev, "LINUX_STAGE Entered\n");
+ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM |
+ D2H_INT_ASYNC_MD_HK | D2H_INT_ASYNC_AP_HK);
+ if (cmd->flag == 0)
+ break;
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_NORMAL);
+ ret = fsm_routine_starting(ctl);
+ break;
+
+ default:
+ break;
+ }
+ ctl->status = status;
}
- t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
- t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
- fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
+finish_command:
+ fsm_finish_command(ctl, cmd, ret);
}
static int fsm_main_thread(void *data)
@@ -517,6 +608,7 @@ void t7xx_fsm_reset(struct t7xx_modem *md)
fsm_flush_event_cmd_qs(ctl);
ctl->curr_state = FSM_STATE_STOPPED;
ctl->exp_flg = false;
+ ctl->status = T7XX_DEV_STAGE_INIT;
}
int t7xx_fsm_init(struct t7xx_modem *md)
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
index b0b3662ae6d7..7b0a9baf488c 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
@@ -96,6 +96,7 @@ struct t7xx_fsm_ctl {
bool exp_flg;
spinlock_t notifier_lock; /* Protects notifier list */
struct list_head notifier_list;
+ u32 status; /* Device boot stage */
};
struct t7xx_fsm_event {
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 72e01e550a16..17431f1b1a0c 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -26,7 +26,9 @@
static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
-static struct class *wwan_class;
+static const struct class wwan_class = {
+ .name = "wwan",
+};
static int wwan_major;
static struct dentry *wwan_debugfs_dir;
@@ -130,7 +132,7 @@ static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match);
+ dev = class_find_device(&wwan_class, NULL, parent, wwan_dev_parent_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -147,7 +149,7 @@ static struct wwan_device *wwan_dev_get_by_name(const char *name)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, name, wwan_dev_name_match);
+ dev = class_find_device(&wwan_class, NULL, name, wwan_dev_name_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -183,7 +185,7 @@ static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match);
+ dev = class_find_device(&wwan_class, NULL, dir, wwan_dev_debugfs_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -239,7 +241,7 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
}
wwandev->dev.parent = parent;
- wwandev->dev.class = wwan_class;
+ wwandev->dev.class = &wwan_class;
wwandev->dev.type = &wwan_dev_type;
wwandev->id = id;
dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
@@ -265,7 +267,7 @@ done_unlock:
static int is_wwan_child(struct device *dev, void *data)
{
- return dev->class == wwan_class;
+ return dev->class == &wwan_class;
}
static void wwan_remove_dev(struct wwan_device *wwandev)
@@ -328,6 +330,10 @@ static const struct {
.name = "XMMRPC",
.devsuf = "xmmrpc",
},
+ [WWAN_PORT_FASTBOOT] = {
+ .name = "FASTBOOT",
+ .devsuf = "fastboot",
+ },
};
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@@ -371,7 +377,7 @@ static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match);
+ dev = class_find_device(&wwan_class, NULL, &minor, wwan_port_minor_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -401,7 +407,7 @@ static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
return -ENOMEM;
/* Collect ids of same name format ports */
- class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type);
+ class_dev_iter_init(&iter, &wwan_class, NULL, &wwan_port_dev_type);
while ((dev = class_dev_iter_next(&iter))) {
if (dev->parent != &wwandev->dev)
continue;
@@ -473,7 +479,7 @@ struct wwan_port *wwan_create_port(struct device *parent,
mutex_init(&port->data_lock);
port->dev.parent = &wwandev->dev;
- port->dev.class = wwan_class;
+ port->dev.class = &wwan_class;
port->dev.type = &wwan_port_dev_type;
port->dev.devt = MKDEV(wwan_major, minor);
dev_set_drvdata(&port->dev, drvdata);
@@ -916,7 +922,7 @@ static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static struct device_type wwan_type = { .name = "wwan" };
+static const struct device_type wwan_type = { .name = "wwan" };
static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
const char *ifname,
@@ -1208,11 +1214,9 @@ static int __init wwan_init(void)
if (err)
return err;
- wwan_class = class_create("wwan");
- if (IS_ERR(wwan_class)) {
- err = PTR_ERR(wwan_class);
+ err = class_register(&wwan_class);
+ if (err)
goto unregister;
- }
/* chrdev used for wwan ports */
wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port",
@@ -1229,7 +1233,7 @@ static int __init wwan_init(void)
return 0;
destroy:
- class_destroy(wwan_class);
+ class_unregister(&wwan_class);
unregister:
rtnl_link_unregister(&wwan_rtnl_link_ops);
return err;
@@ -1240,7 +1244,7 @@ static void __exit wwan_exit(void)
debugfs_remove_recursive(wwan_debugfs_dir);
__unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port");
rtnl_link_unregister(&wwan_rtnl_link_ops);
- class_destroy(wwan_class);
+ class_unregister(&wwan_class);
}
module_init(wwan_init);
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index ff3dd24ddb33..b02befd1b6fb 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -25,7 +25,9 @@ static int wwan_hwsim_devsnum = 2;
module_param_named(devices, wwan_hwsim_devsnum, int, 0444);
MODULE_PARM_DESC(devices, "Number of simulated devices");
-static struct class *wwan_hwsim_class;
+static const struct class wwan_hwsim_class = {
+ .name = "wwan_hwsim",
+};
static struct dentry *wwan_hwsim_debugfs_topdir;
static struct dentry *wwan_hwsim_debugfs_devcreate;
@@ -277,7 +279,7 @@ static struct wwan_hwsim_dev *wwan_hwsim_dev_new(void)
spin_unlock(&wwan_hwsim_devs_lock);
dev->dev.release = wwan_hwsim_dev_release;
- dev->dev.class = wwan_hwsim_class;
+ dev->dev.class = &wwan_hwsim_class;
dev_set_name(&dev->dev, "hwsim%u", dev->id);
spin_lock_init(&dev->ports_lock);
@@ -511,11 +513,9 @@ static int __init wwan_hwsim_init(void)
if (!wwan_wq)
return -ENOMEM;
- wwan_hwsim_class = class_create("wwan_hwsim");
- if (IS_ERR(wwan_hwsim_class)) {
- err = PTR_ERR(wwan_hwsim_class);
+ err = class_register(&wwan_hwsim_class);
+ if (err)
goto err_wq_destroy;
- }
wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL);
wwan_hwsim_debugfs_devcreate =
@@ -534,7 +534,7 @@ err_clean_devs:
wwan_hwsim_free_devs();
flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
- class_destroy(wwan_hwsim_class);
+ class_unregister(&wwan_hwsim_class);
err_wq_destroy:
destroy_workqueue(wwan_wq);
@@ -547,7 +547,7 @@ static void __exit wwan_hwsim_exit(void)
wwan_hwsim_free_devs();
flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
- class_destroy(wwan_hwsim_class);
+ class_unregister(&wwan_hwsim_class);
destroy_workqueue(wwan_wq);
}
diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c
index 27dd93deff6e..d702bee78082 100644
--- a/drivers/ntb/core.c
+++ b/drivers/ntb/core.c
@@ -100,6 +100,8 @@ EXPORT_SYMBOL(ntb_unregister_client);
int ntb_register_device(struct ntb_dev *ntb)
{
+ int ret;
+
if (!ntb)
return -EINVAL;
if (!ntb->pdev)
@@ -120,7 +122,11 @@ int ntb_register_device(struct ntb_dev *ntb)
ntb->ctx_ops = NULL;
spin_lock_init(&ntb->ctx_lock);
- return device_register(&ntb->dev);
+ ret = device_register(&ntb->dev);
+ if (ret)
+ put_device(&ntb->dev);
+
+ return ret;
}
EXPORT_SYMBOL(ntb_register_device);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index bb3726b622ad..4d0c527e8576 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1496,19 +1496,21 @@ static int btt_blk_init(struct btt *btt)
{
struct nd_btt *nd_btt = btt->nd_btt;
struct nd_namespace_common *ndns = nd_btt->ndns;
- int rc = -ENOMEM;
+ struct queue_limits lim = {
+ .logical_block_size = btt->sector_size,
+ .max_hw_sectors = UINT_MAX,
+ };
+ int rc;
- btt->btt_disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!btt->btt_disk)
- return -ENOMEM;
+ btt->btt_disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(btt->btt_disk))
+ return PTR_ERR(btt->btt_disk);
nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
btt->btt_disk->first_minor = 0;
btt->btt_disk->fops = &btt_fops;
btt->btt_disk->private_data = btt;
- blk_queue_logical_block_size(btt->btt_disk->queue, btt->sector_size);
- blk_queue_max_hw_sectors(btt->btt_disk->queue, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index e9898457a7bd..598fe2e89bda 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -451,6 +451,11 @@ static int pmem_attach_disk(struct device *dev,
{
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct queue_limits lim = {
+ .logical_block_size = pmem_sector_size(ndns),
+ .physical_block_size = PAGE_SIZE,
+ .max_hw_sectors = UINT_MAX,
+ };
int nid = dev_to_node(dev), fua;
struct resource *res = &nsio->res;
struct range bb_range;
@@ -497,9 +502,9 @@ static int pmem_attach_disk(struct device *dev,
return -EBUSY;
}
- disk = blk_alloc_disk(nid);
- if (!disk)
- return -ENOMEM;
+ disk = blk_alloc_disk(&lim, nid);
+ if (IS_ERR(disk))
+ return PTR_ERR(disk);
q = disk->queue;
pmem->disk = disk;
@@ -539,9 +544,6 @@ static int pmem_attach_disk(struct device *dev,
pmem->virt_addr = addr;
blk_queue_write_cache(q, true, fua);
- blk_queue_physical_block_size(q, PAGE_SIZE);
- blk_queue_logical_block_size(q, pmem_sector_size(ndns));
- blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q);
if (pmem->pfn_flags & PFN_MAP)
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index c727cd1f264b..a480cdeac288 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1516,7 +1516,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
goto put_dev;
}
- anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset);
+ anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL);
if (IS_ERR(anv->ctrl.admin_q)) {
ret = -ENOMEM;
goto put_dev;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 0a96362912ce..00864a634470 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -114,12 +114,21 @@ static DEFINE_MUTEX(nvme_subsystems_lock);
static DEFINE_IDA(nvme_instance_ida);
static dev_t nvme_ctrl_base_chr_devt;
-static struct class *nvme_class;
-static struct class *nvme_subsys_class;
+static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env);
+static const struct class nvme_class = {
+ .name = "nvme",
+ .dev_uevent = nvme_class_uevent,
+};
+
+static const struct class nvme_subsys_class = {
+ .name = "nvme-subsystem",
+};
static DEFINE_IDA(nvme_ns_chr_minor_ida);
static dev_t nvme_ns_chr_devt;
-static struct class *nvme_ns_chr_class;
+static const struct class nvme_ns_chr_class = {
+ .name = "nvme-generic",
+};
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
@@ -1398,8 +1407,10 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
sizeof(struct nvme_id_ctrl));
- if (error)
+ if (error) {
kfree(*id);
+ *id = NULL;
+ }
return error;
}
@@ -1528,6 +1539,7 @@ int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (error) {
dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
kfree(*id);
+ *id = NULL;
}
return error;
}
@@ -1727,12 +1739,23 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_init_integrity(struct gendisk *disk,
- struct nvme_ns_head *head, u32 max_integrity_segments)
+static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head)
{
struct blk_integrity integrity = { };
+ blk_integrity_unregister(disk);
+
+ if (!head->ms)
+ return true;
+
+ /*
+ * PI can always be supported as we can ask the controller to simply
+ * insert/strip it, which is not possible for other kinds of metadata.
+ */
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) ||
+ !(head->features & NVME_NS_METADATA_SUPPORTED))
+ return nvme_ns_has_pi(head);
+
switch (head->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
switch (head->guard_type) {
@@ -1775,53 +1798,32 @@ static void nvme_init_integrity(struct gendisk *disk,
}
integrity.tuple_size = head->ms;
+ integrity.pi_offset = head->pi_offset;
blk_integrity_register(disk, &integrity);
- blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
-}
-#else
-static void nvme_init_integrity(struct gendisk *disk,
- struct nvme_ns_head *head, u32 max_integrity_segments)
-{
+ return true;
}
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-static void nvme_config_discard(struct nvme_ctrl *ctrl, struct gendisk *disk,
- struct nvme_ns_head *head)
+static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
{
- struct request_queue *queue = disk->queue;
- u32 max_discard_sectors;
-
- if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(head, UINT_MAX)) {
- max_discard_sectors = nvme_lba_to_sect(head, ctrl->dmrsl);
- } else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
- max_discard_sectors = UINT_MAX;
- } else {
- blk_queue_max_discard_sectors(queue, 0);
- return;
- }
+ struct nvme_ctrl *ctrl = ns->ctrl;
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES);
- /*
- * If discard is already enabled, don't reset queue limits.
- *
- * This works around the fact that the block layer can't cope well with
- * updating the hardware limits when overridden through sysfs. This is
- * harmless because discard limits in NVMe are purely advisory.
- */
- if (queue->limits.max_discard_sectors)
- return;
+ if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
+ lim->max_hw_discard_sectors =
+ nvme_lba_to_sect(ns->head, ctrl->dmrsl);
+ else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
+ lim->max_hw_discard_sectors = UINT_MAX;
+ else
+ lim->max_hw_discard_sectors = 0;
+
+ lim->discard_granularity = lim->logical_block_size;
- blk_queue_max_discard_sectors(queue, max_discard_sectors);
if (ctrl->dmrl)
- blk_queue_max_discard_segments(queue, ctrl->dmrl);
+ lim->max_discard_segments = ctrl->dmrl;
else
- blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
- queue->limits.discard_granularity = queue_logical_block_size(queue);
-
- if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
- blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
+ lim->max_discard_segments = NVME_DSM_MAX_RANGES;
}
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
@@ -1832,42 +1834,38 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
a->csi == b->csi;
}
-static int nvme_init_ms(struct nvme_ctrl *ctrl, struct nvme_ns_head *head,
- struct nvme_id_ns *id)
+static int nvme_identify_ns_nvm(struct nvme_ctrl *ctrl, unsigned int nsid,
+ struct nvme_id_ns_nvm **nvmp)
{
- bool first = id->dps & NVME_NS_DPS_PI_FIRST;
- unsigned lbaf = nvme_lbaf_index(id->flbas);
- struct nvme_command c = { };
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.nsid = cpu_to_le32(nsid),
+ .identify.cns = NVME_ID_CNS_CS_NS,
+ .identify.csi = NVME_CSI_NVM,
+ };
struct nvme_id_ns_nvm *nvm;
- int ret = 0;
- u32 elbaf;
-
- head->pi_size = 0;
- head->ms = le16_to_cpu(id->lbaf[lbaf].ms);
- if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
- head->pi_size = sizeof(struct t10_pi_tuple);
- head->guard_type = NVME_NVM_NS_16B_GUARD;
- goto set_pi;
- }
+ int ret;
nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
if (!nvm)
return -ENOMEM;
- c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(head->ns_id);
- c.identify.cns = NVME_ID_CNS_CS_NS;
- c.identify.csi = NVME_CSI_NVM;
-
ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm));
if (ret)
- goto free_data;
+ kfree(nvm);
+ else
+ *nvmp = nvm;
+ return ret;
+}
- elbaf = le32_to_cpu(nvm->elbaf[lbaf]);
+static void nvme_configure_pi_elbas(struct nvme_ns_head *head,
+ struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm)
+{
+ u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]);
/* no support for storage tag formats right now */
if (nvme_elbaf_sts(elbaf))
- goto free_data;
+ return;
head->guard_type = nvme_elbaf_guard_type(elbaf);
switch (head->guard_type) {
@@ -1880,30 +1878,31 @@ static int nvme_init_ms(struct nvme_ctrl *ctrl, struct nvme_ns_head *head,
default:
break;
}
-
-free_data:
- kfree(nvm);
-set_pi:
- if (head->pi_size && (first || head->ms == head->pi_size))
- head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
- else
- head->pi_type = 0;
-
- return ret;
}
-static int nvme_configure_metadata(struct nvme_ctrl *ctrl,
- struct nvme_ns_head *head, struct nvme_id_ns *id)
+static void nvme_configure_metadata(struct nvme_ctrl *ctrl,
+ struct nvme_ns_head *head, struct nvme_id_ns *id,
+ struct nvme_id_ns_nvm *nvm)
{
- int ret;
-
- ret = nvme_init_ms(ctrl, head, id);
- if (ret)
- return ret;
-
head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+ head->pi_type = 0;
+ head->pi_size = 0;
+ head->pi_offset = 0;
+ head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms);
if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
- return 0;
+ return;
+
+ if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
+ nvme_configure_pi_elbas(head, id, nvm);
+ } else {
+ head->pi_size = sizeof(struct t10_pi_tuple);
+ head->guard_type = NVME_NVM_NS_16B_GUARD;
+ }
+
+ if (head->pi_size && head->ms >= head->pi_size)
+ head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+ if (!(id->dps & NVME_NS_DPS_PI_FIRST))
+ head->pi_offset = head->ms - head->pi_size;
if (ctrl->ops->flags & NVME_F_FABRICS) {
/*
@@ -1912,7 +1911,7 @@ static int nvme_configure_metadata(struct nvme_ctrl *ctrl,
* remap the separate metadata buffer from the block layer.
*/
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
- return 0;
+ return;
head->features |= NVME_NS_EXT_LBAS;
@@ -1939,33 +1938,32 @@ static int nvme_configure_metadata(struct nvme_ctrl *ctrl,
else
head->features |= NVME_NS_METADATA_SUPPORTED;
}
- return 0;
}
-static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
- struct request_queue *q)
+static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
{
- bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
-
- if (ctrl->max_hw_sectors) {
- u32 max_segments =
- (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
+ return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1;
+}
- max_segments = min_not_zero(max_segments, ctrl->max_segments);
- blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
- blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
- }
- blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
- blk_queue_dma_alignment(q, 3);
- blk_queue_write_cache(q, vwc, vwc);
+static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl,
+ struct queue_limits *lim)
+{
+ lim->max_hw_sectors = ctrl->max_hw_sectors;
+ lim->max_segments = min_t(u32, USHRT_MAX,
+ min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments));
+ lim->max_integrity_segments = ctrl->max_integrity_segments;
+ lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1;
+ lim->max_segment_size = UINT_MAX;
+ lim->dma_alignment = 3;
}
-static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk,
- struct nvme_ns_head *head, struct nvme_id_ns *id)
+static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
+ struct queue_limits *lim)
{
- sector_t capacity = nvme_lba_to_sect(head, le64_to_cpu(id->nsze));
+ struct nvme_ns_head *head = ns->head;
u32 bs = 1U << head->lba_shift;
u32 atomic_bs, phys_bs, io_opt = 0;
+ bool valid = true;
/*
* The block layer can't support LBA sizes larger than the page size
@@ -1973,12 +1971,10 @@ static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk,
* allow block I/O.
*/
if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) {
- capacity = 0;
bs = (1 << 9);
+ valid = false;
}
- blk_integrity_unregister(disk);
-
atomic_bs = phys_bs = bs;
if (id->nabo == 0) {
/*
@@ -1989,7 +1985,7 @@ static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk,
if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
else
- atomic_bs = (1 + ctrl->subsys->awupf) * bs;
+ atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
}
if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
@@ -1999,36 +1995,20 @@ static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk,
io_opt = bs * (1 + le16_to_cpu(id->nows));
}
- blk_queue_logical_block_size(disk->queue, bs);
/*
* Linux filesystems assume writing a single physical block is
* an atomic operation. Hence limit the physical block size to the
* value of the Atomic Write Unit Power Fail parameter.
*/
- blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
- blk_queue_io_min(disk->queue, phys_bs);
- blk_queue_io_opt(disk->queue, io_opt);
-
- /*
- * Register a metadata profile for PI, or the plain non-integrity NVMe
- * metadata masquerading as Type 0 if supported, otherwise reject block
- * I/O to namespaces with metadata except when the namespace supports
- * PI, as it can strip/insert in that case.
- */
- if (head->ms) {
- if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
- (head->features & NVME_NS_METADATA_SUPPORTED))
- nvme_init_integrity(disk, head,
- ctrl->max_integrity_segments);
- else if (!nvme_ns_has_pi(head))
- capacity = 0;
- }
-
- set_capacity_and_notify(disk, capacity);
-
- nvme_config_discard(ctrl, disk, head);
- blk_queue_max_write_zeroes_sectors(disk->queue,
- ctrl->max_zeroes_sectors);
+ lim->logical_block_size = bs;
+ lim->physical_block_size = min(phys_bs, atomic_bs);
+ lim->io_min = phys_bs;
+ lim->io_opt = io_opt;
+ if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
+ lim->max_write_zeroes_sectors = UINT_MAX;
+ else
+ lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors;
+ return valid;
}
static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
@@ -2042,7 +2022,8 @@ static inline bool nvme_first_scan(struct gendisk *disk)
return !disk_live(disk);
}
-static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
+static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id,
+ struct queue_limits *lim)
{
struct nvme_ctrl *ctrl = ns->ctrl;
u32 iob;
@@ -2070,38 +2051,36 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
return;
}
- blk_queue_chunk_sectors(ns->queue, iob);
+ lim->chunk_sectors = iob;
}
static int nvme_update_ns_info_generic(struct nvme_ns *ns,
struct nvme_ns_info *info)
{
+ struct queue_limits lim;
+ int ret;
+
blk_mq_freeze_queue(ns->disk->queue);
- nvme_set_queue_limits(ns->ctrl, ns->queue);
+ lim = queue_limits_start_update(ns->disk->queue);
+ nvme_set_ctrl_limits(ns->ctrl, &lim);
+ ret = queue_limits_commit_update(ns->disk->queue, &lim);
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
blk_mq_unfreeze_queue(ns->disk->queue);
- if (nvme_ns_head_multipath(ns->head)) {
- blk_mq_freeze_queue(ns->head->disk->queue);
- set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
- nvme_mpath_revalidate_paths(ns);
- blk_stack_limits(&ns->head->disk->queue->limits,
- &ns->queue->limits, 0);
- ns->head->disk->flags |= GENHD_FL_HIDDEN;
- blk_mq_unfreeze_queue(ns->head->disk->queue);
- }
-
/* Hide the block-interface for these devices */
- ns->disk->flags |= GENHD_FL_HIDDEN;
- set_bit(NVME_NS_READY, &ns->flags);
-
- return 0;
+ if (!ret)
+ ret = -ENODEV;
+ return ret;
}
static int nvme_update_ns_info_block(struct nvme_ns *ns,
struct nvme_ns_info *info)
{
+ bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT;
+ struct queue_limits lim;
+ struct nvme_id_ns_nvm *nvm = NULL;
struct nvme_id_ns *id;
+ sector_t capacity;
unsigned lbaf;
int ret;
@@ -2113,30 +2092,52 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
/* namespace not allocated or attached */
info->is_removed = true;
ret = -ENODEV;
- goto error;
+ goto out;
+ }
+
+ if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) {
+ ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
+ if (ret < 0)
+ goto out;
}
blk_mq_freeze_queue(ns->disk->queue);
lbaf = nvme_lbaf_index(id->flbas);
ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse);
- nvme_set_queue_limits(ns->ctrl, ns->queue);
-
- ret = nvme_configure_metadata(ns->ctrl, ns->head, id);
- if (ret < 0) {
- blk_mq_unfreeze_queue(ns->disk->queue);
- goto out;
- }
- nvme_set_chunk_sectors(ns, id);
- nvme_update_disk_info(ns->ctrl, ns->disk, ns->head, id);
+ capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
- if (ns->head->ids.csi == NVME_CSI_ZNS) {
- ret = nvme_update_zone_info(ns, lbaf);
+ lim = queue_limits_start_update(ns->disk->queue);
+ nvme_set_ctrl_limits(ns->ctrl, &lim);
+ nvme_configure_metadata(ns->ctrl, ns->head, id, nvm);
+ nvme_set_chunk_sectors(ns, id, &lim);
+ if (!nvme_update_disk_info(ns, id, &lim))
+ capacity = 0;
+ nvme_config_discard(ns, &lim);
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ ns->head->ids.csi == NVME_CSI_ZNS) {
+ ret = nvme_update_zone_info(ns, lbaf, &lim);
if (ret) {
blk_mq_unfreeze_queue(ns->disk->queue);
goto out;
}
}
+ ret = queue_limits_commit_update(ns->disk->queue, &lim);
+ if (ret) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
+
+ /*
+ * Register a metadata profile for PI, or the plain non-integrity NVMe
+ * metadata masquerading as Type 0 if supported, otherwise reject block
+ * I/O to namespaces with metadata except when the namespace supports
+ * PI, as it can strip/insert in that case.
+ */
+ if (!nvme_init_integrity(ns->disk, ns->head))
+ capacity = 0;
+
+ set_capacity_and_notify(ns->disk, capacity);
/*
* Only set the DEAC bit if the device guarantees that reads from
@@ -2147,62 +2148,81 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
ns->head->features |= NVME_NS_DEAC;
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
+ blk_queue_write_cache(ns->disk->queue, vwc, vwc);
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
- ret = nvme_revalidate_zones(ns);
+ ret = blk_revalidate_disk_zones(ns->disk, NULL);
if (ret && !nvme_first_scan(ns->disk))
goto out;
}
- if (nvme_ns_head_multipath(ns->head)) {
- blk_mq_freeze_queue(ns->head->disk->queue);
- nvme_update_disk_info(ns->ctrl, ns->head->disk, ns->head, id);
- set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
- nvme_mpath_revalidate_paths(ns);
- blk_stack_limits(&ns->head->disk->queue->limits,
- &ns->queue->limits, 0);
- disk_update_readahead(ns->head->disk);
- blk_mq_unfreeze_queue(ns->head->disk->queue);
- }
-
ret = 0;
out:
- /*
- * If probing fails due an unsupported feature, hide the block device,
- * but still allow other access.
- */
- if (ret == -ENODEV) {
- ns->disk->flags |= GENHD_FL_HIDDEN;
- set_bit(NVME_NS_READY, &ns->flags);
- ret = 0;
- }
-
-error:
+ kfree(nvm);
kfree(id);
return ret;
}
static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
{
+ bool unsupported = false;
+ int ret;
+
switch (info->ids.csi) {
case NVME_CSI_ZNS:
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
dev_info(ns->ctrl->device,
"block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
info->nsid);
- return nvme_update_ns_info_generic(ns, info);
+ ret = nvme_update_ns_info_generic(ns, info);
+ break;
}
- return nvme_update_ns_info_block(ns, info);
+ ret = nvme_update_ns_info_block(ns, info);
+ break;
case NVME_CSI_NVM:
- return nvme_update_ns_info_block(ns, info);
+ ret = nvme_update_ns_info_block(ns, info);
+ break;
default:
dev_info(ns->ctrl->device,
"block device for nsid %u not supported (csi %u)\n",
info->nsid, info->ids.csi);
- return nvme_update_ns_info_generic(ns, info);
+ ret = nvme_update_ns_info_generic(ns, info);
+ break;
+ }
+
+ /*
+ * If probing fails due an unsupported feature, hide the block device,
+ * but still allow other access.
+ */
+ if (ret == -ENODEV) {
+ ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
+ unsupported = true;
+ ret = 0;
}
+
+ if (!ret && nvme_ns_head_multipath(ns->head)) {
+ struct queue_limits lim;
+
+ blk_mq_freeze_queue(ns->head->disk->queue);
+ if (unsupported)
+ ns->head->disk->flags |= GENHD_FL_HIDDEN;
+ else
+ nvme_init_integrity(ns->head->disk, ns->head);
+ set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
+ nvme_mpath_revalidate_paths(ns);
+
+ lim = queue_limits_start_update(ns->head->disk->queue);
+ queue_limits_stack_bdev(&lim, ns->disk->part0, 0,
+ ns->head->disk->disk_name);
+ ret = queue_limits_commit_update(ns->head->disk->queue, &lim);
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
+ }
+
+ return ret;
}
#ifdef CONFIG_BLK_SED_OPAL
@@ -2877,7 +2897,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
subsys->awupf = le16_to_cpu(id->awupf);
nvme_mpath_default_iopolicy(subsys);
- subsys->dev.class = nvme_subsys_class;
+ subsys->dev.class = &nvme_subsys_class;
subsys->dev.release = nvme_release_subsystem;
subsys->dev.groups = nvme_subsys_attrs_groups;
dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
@@ -3117,11 +3137,17 @@ static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ct
return -EINVAL;
}
+ if (!ctrl->maxcmd) {
+ dev_err(ctrl->device, "Maximum outstanding commands is 0\n");
+ return -EINVAL;
+ }
+
return 0;
}
static int nvme_init_identify(struct nvme_ctrl *ctrl)
{
+ struct queue_limits lim;
struct nvme_id_ctrl *id;
u32 max_hw_sectors;
bool prev_apst_enabled;
@@ -3188,7 +3214,12 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->max_hw_sectors =
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
- nvme_set_queue_limits(ctrl, ctrl->admin_q);
+ lim = queue_limits_start_update(ctrl->admin_q);
+ nvme_set_ctrl_limits(ctrl, &lim);
+ ret = queue_limits_commit_update(ctrl->admin_q, &lim);
+ if (ret)
+ goto out_free;
+
ctrl->sgls = le32_to_cpu(id->sgls);
ctrl->kas = le16_to_cpu(id->kas);
ctrl->max_namespaces = le32_to_cpu(id->mnan);
@@ -3420,7 +3451,7 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
if (minor < 0)
return minor;
cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
- cdev_device->class = nvme_ns_chr_class;
+ cdev_device->class = &nvme_ns_chr_class;
cdev_device->release = nvme_cdev_rel;
device_initialize(cdev_device);
cdev_init(cdev, fops);
@@ -3692,7 +3723,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
if (!ns)
return;
- disk = blk_mq_alloc_disk(ctrl->tagset, ns);
+ disk = blk_mq_alloc_disk(ctrl->tagset, NULL, ns);
if (IS_ERR(disk))
goto out_free_ns;
disk->fops = &nvme_bdev_ops;
@@ -4353,6 +4384,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int cmd_size)
{
+ struct queue_limits lim = {};
int ret;
memset(set, 0, sizeof(*set));
@@ -4372,14 +4404,14 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
if (ret)
return ret;
- ctrl->admin_q = blk_mq_init_queue(set);
+ ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL);
if (IS_ERR(ctrl->admin_q)) {
ret = PTR_ERR(ctrl->admin_q);
goto out_free_tagset;
}
if (ctrl->ops->flags & NVME_F_FABRICS) {
- ctrl->fabrics_q = blk_mq_init_queue(set);
+ ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL);
if (IS_ERR(ctrl->fabrics_q)) {
ret = PTR_ERR(ctrl->fabrics_q);
goto out_cleanup_admin_q;
@@ -4443,7 +4475,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
return ret;
if (ctrl->ops->flags & NVME_F_FABRICS) {
- ctrl->connect_q = blk_mq_init_queue(set);
+ ctrl->connect_q = blk_mq_alloc_queue(set, NULL, NULL);
if (IS_ERR(ctrl->connect_q)) {
ret = PTR_ERR(ctrl->connect_q);
goto out_free_tag_set;
@@ -4613,7 +4645,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
ctrl->device = &ctrl->ctrl_device;
ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
ctrl->instance);
- ctrl->device->class = nvme_class;
+ ctrl->device->class = &nvme_class;
ctrl->device->parent = ctrl->dev;
if (ops->dev_attr_groups)
ctrl->device->groups = ops->dev_attr_groups;
@@ -4846,42 +4878,36 @@ static int __init nvme_core_init(void)
if (result < 0)
goto destroy_delete_wq;
- nvme_class = class_create("nvme");
- if (IS_ERR(nvme_class)) {
- result = PTR_ERR(nvme_class);
+ result = class_register(&nvme_class);
+ if (result)
goto unregister_chrdev;
- }
- nvme_class->dev_uevent = nvme_class_uevent;
- nvme_subsys_class = class_create("nvme-subsystem");
- if (IS_ERR(nvme_subsys_class)) {
- result = PTR_ERR(nvme_subsys_class);
+ result = class_register(&nvme_subsys_class);
+ if (result)
goto destroy_class;
- }
result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
"nvme-generic");
if (result < 0)
goto destroy_subsys_class;
- nvme_ns_chr_class = class_create("nvme-generic");
- if (IS_ERR(nvme_ns_chr_class)) {
- result = PTR_ERR(nvme_ns_chr_class);
+ result = class_register(&nvme_ns_chr_class);
+ if (result)
goto unregister_generic_ns;
- }
+
result = nvme_init_auth();
if (result)
goto destroy_ns_chr;
return 0;
destroy_ns_chr:
- class_destroy(nvme_ns_chr_class);
+ class_unregister(&nvme_ns_chr_class);
unregister_generic_ns:
unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
destroy_subsys_class:
- class_destroy(nvme_subsys_class);
+ class_unregister(&nvme_subsys_class);
destroy_class:
- class_destroy(nvme_class);
+ class_unregister(&nvme_class);
unregister_chrdev:
unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
destroy_delete_wq:
@@ -4897,9 +4923,9 @@ out:
static void __exit nvme_core_exit(void)
{
nvme_exit_auth();
- class_destroy(nvme_ns_chr_class);
- class_destroy(nvme_subsys_class);
- class_destroy(nvme_class);
+ class_unregister(&nvme_ns_chr_class);
+ class_unregister(&nvme_subsys_class);
+ class_unregister(&nvme_class);
unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
destroy_workqueue(nvme_delete_wq);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 495c171daead..1f0ea1f32d22 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -638,7 +638,7 @@ static struct key *nvmf_parse_key(int key_id)
}
key = key_lookup(key_id);
- if (!IS_ERR(key))
+ if (IS_ERR(key))
pr_err("key id %08x not found\n", key_id);
else
pr_debug("Using key id %08x\n", key_id);
@@ -1319,7 +1319,10 @@ out_free_opts:
return ERR_PTR(ret);
}
-static struct class *nvmf_class;
+static const struct class nvmf_class = {
+ .name = "nvme-fabrics",
+};
+
static struct device *nvmf_device;
static DEFINE_MUTEX(nvmf_dev_mutex);
@@ -1439,15 +1442,14 @@ static int __init nvmf_init(void)
if (!nvmf_default_host)
return -ENOMEM;
- nvmf_class = class_create("nvme-fabrics");
- if (IS_ERR(nvmf_class)) {
+ ret = class_register(&nvmf_class);
+ if (ret) {
pr_err("couldn't register class nvme-fabrics\n");
- ret = PTR_ERR(nvmf_class);
goto out_free_host;
}
nvmf_device =
- device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
+ device_create(&nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
if (IS_ERR(nvmf_device)) {
pr_err("couldn't create nvme-fabrics device!\n");
ret = PTR_ERR(nvmf_device);
@@ -1463,9 +1465,9 @@ static int __init nvmf_init(void)
return 0;
out_destroy_device:
- device_destroy(nvmf_class, MKDEV(0, 0));
+ device_destroy(&nvmf_class, MKDEV(0, 0));
out_destroy_class:
- class_destroy(nvmf_class);
+ class_unregister(&nvmf_class);
out_free_host:
nvmf_host_put(nvmf_default_host);
return ret;
@@ -1474,8 +1476,8 @@ out_free_host:
static void __exit nvmf_exit(void)
{
misc_deregister(&nvmf_misc);
- device_destroy(nvmf_class, MKDEV(0, 0));
- class_destroy(nvmf_class);
+ device_destroy(&nvmf_class, MKDEV(0, 0));
+ class_unregister(&nvmf_class);
nvmf_host_put(nvmf_default_host);
BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 74de1e64aeea..5397fb428b24 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -516,6 +516,7 @@ static void nvme_requeue_work(struct work_struct *work)
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
{
+ struct queue_limits lim;
bool vwc = false;
mutex_init(&head->lock);
@@ -532,9 +533,14 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
!nvme_is_unique_nsid(ctrl, head) || !multipath)
return 0;
- head->disk = blk_alloc_disk(ctrl->numa_node);
- if (!head->disk)
- return -ENOMEM;
+ blk_set_stacking_limits(&lim);
+ lim.dma_alignment = 3;
+ if (head->ids.csi != NVME_CSI_ZNS)
+ lim.max_zone_append_sectors = 0;
+
+ head->disk = blk_alloc_disk(&lim, ctrl->numa_node);
+ if (IS_ERR(head->disk))
+ return PTR_ERR(head->disk);
head->disk->fops = &nvme_ns_head_ops;
head->disk->private_data = head;
sprintf(head->disk->disk_name, "nvme%dn%d",
@@ -553,11 +559,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
- /* set to a default value of 512 until the disk is validated */
- blk_queue_logical_block_size(head->disk->queue, 512);
- blk_set_stacking_limits(&head->disk->queue->limits);
- blk_queue_dma_alignment(head->disk->queue, 3);
-
/* we need to propagate up the VMC settings */
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
vwc = true;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7b87763e2f8a..24193fcb8bd5 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -464,6 +464,7 @@ struct nvme_ns_head {
u16 ms;
u16 pi_size;
u8 pi_type;
+ u8 pi_offset;
u8 guard_type;
u16 sgs;
u32 sws;
@@ -1035,11 +1036,11 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
}
#endif /* CONFIG_NVME_MULTIPATH */
-int nvme_revalidate_zones(struct nvme_ns *ns);
int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
+int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
+ struct queue_limits *lim);
#ifdef CONFIG_BLK_DEV_ZONED
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd,
enum nvme_zone_mgmt_action action);
@@ -1050,13 +1051,6 @@ static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
{
return BLK_STS_NOTSUPP;
}
-
-static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
-{
- dev_warn(ns->ctrl->device,
- "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
- return -EPROTONOSUPPORT;
-}
#endif
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 20fdd40b1879..366f0bb4ebfc 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1006,6 +1006,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
{
int ret;
bool changed;
+ u16 max_queue_size;
ret = nvme_rdma_configure_admin_queue(ctrl, new);
if (ret)
@@ -1030,11 +1031,16 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
}
- if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
+ if (ctrl->ctrl.max_integrity_segments)
+ max_queue_size = NVME_RDMA_MAX_METADATA_QUEUE_SIZE;
+ else
+ max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE;
+
+ if (ctrl->ctrl.sqsize + 1 > max_queue_size) {
dev_warn(ctrl->ctrl.device,
- "ctrl sqsize %u > max queue size %u, clamping down\n",
- ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
- ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
+ "ctrl sqsize %u > max queue size %u, clamping down\n",
+ ctrl->ctrl.sqsize + 1, max_queue_size);
+ ctrl->ctrl.sqsize = max_queue_size - 1;
}
if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index f2832f70e7e0..09fcaa519e5b 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -221,14 +221,11 @@ static int ns_update_nuse(struct nvme_ns *ns)
ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id);
if (ret)
- goto out_free_id;
+ return ret;
ns->head->nuse = le64_to_cpu(id->nuse);
-
-out_free_id:
kfree(id);
-
- return ret;
+ return 0;
}
static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index a6d596e05602..3692b56cb58d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1344,7 +1344,6 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
{
- struct page *page;
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
unsigned int noreclaim_flag;
@@ -1355,11 +1354,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
if (queue->hdr_digest || queue->data_digest)
nvme_tcp_free_crypto(queue);
- if (queue->pf_cache.va) {
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
- queue->pf_cache.va = NULL;
- }
+ page_frag_cache_drain(&queue->pf_cache);
noreclaim_flag = memalloc_noreclaim_save();
/* ->sock will be released by fput() */
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 499bbb0eee8d..722384bcc765 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -7,16 +7,6 @@
#include <linux/vmalloc.h>
#include "nvme.h"
-int nvme_revalidate_zones(struct nvme_ns *ns)
-{
- struct request_queue *q = ns->queue;
-
- blk_queue_chunk_sectors(q, ns->head->zsze);
- blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
-
- return blk_revalidate_disk_zones(ns->disk, NULL);
-}
-
static int nvme_set_max_append(struct nvme_ctrl *ctrl)
{
struct nvme_command c = { };
@@ -45,10 +35,10 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl)
return 0;
}
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
+int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
+ struct queue_limits *lim)
{
struct nvme_effects_log *log = ns->head->effects;
- struct request_queue *q = ns->queue;
struct nvme_command c = { };
struct nvme_id_ns_zns *id;
int status;
@@ -109,10 +99,12 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
goto free_data;
}
- disk_set_zoned(ns->disk);
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1);
- disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1);
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
+ lim->zoned = 1;
+ lim->max_open_zones = le32_to_cpu(id->mor) + 1;
+ lim->max_active_zones = le32_to_cpu(id->mar) + 1;
+ lim->chunk_sectors = ns->head->zsze;
+ lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
free_data:
kfree(id);
return status;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 39cb570f833d..f5b7054a4a05 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -428,7 +428,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->cqes = (0x4 << 4) | 0x4;
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
- id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 2482a0db2504..77a6e817b315 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -273,6 +273,32 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, param_inline_data_size);
+static ssize_t nvmet_param_max_queue_size_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size);
+}
+
+static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int ret;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+ ret = kstrtoint(page, 0, &port->max_queue_size);
+ if (ret) {
+ pr_err("Invalid value '%s' for max_queue_size\n", page);
+ return -EINVAL;
+ }
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_max_queue_size);
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
char *page)
@@ -1859,6 +1885,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_trtype,
&nvmet_attr_addr_tsas,
&nvmet_attr_param_inline_data_size,
+ &nvmet_attr_param_max_queue_size,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_attr_param_pi_enable,
#endif
@@ -1917,6 +1944,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
INIT_LIST_HEAD(&port->subsystems);
INIT_LIST_HEAD(&port->referrals);
port->inline_data_size = -1; /* < 0 == let the transport choose */
+ port->max_queue_size = -1; /* < 0 == let the transport choose */
port->disc_addr.portid = cpu_to_le16(portid);
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 8658e9c08534..6bbe4df0166c 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -358,6 +358,18 @@ int nvmet_enable_port(struct nvmet_port *port)
if (port->inline_data_size < 0)
port->inline_data_size = 0;
+ /*
+ * If the transport didn't set the max_queue_size properly, then clamp
+ * it to the target limits. Also set default values in case the
+ * transport didn't set it at all.
+ */
+ if (port->max_queue_size < 0)
+ port->max_queue_size = NVMET_MAX_QUEUE_SIZE;
+ else
+ port->max_queue_size = clamp_t(int, port->max_queue_size,
+ NVMET_MIN_QUEUE_SIZE,
+ NVMET_MAX_QUEUE_SIZE);
+
port->enabled = true;
port->tr_ops = ops;
return 0;
@@ -1223,9 +1235,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
if (ctrl->ops->get_max_queue_size)
- ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
+ ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl),
+ ctrl->port->max_queue_size) - 1;
else
- ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+ ctrl->cap |= ctrl->port->max_queue_size - 1;
if (nvmet_is_passthru_subsys(ctrl->subsys))
nvmet_passthrough_override_cap(ctrl);
@@ -1411,6 +1424,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
+ ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
nvmet_init_cap(ctrl);
WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 68e82ccc0e4e..ce54da8c6b36 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -282,7 +282,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
id->lpa = (1 << 2);
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
- id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
if (ctrl->ops->flags & NVMF_KEYED_SGLS)
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 9964ffe347d2..b23f4cf840bd 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -157,7 +157,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
}
- if (sqsize > mqes) {
+ /* for fabrics, this value applies to only the I/O Submission Queues */
+ if (qid && sqsize > mqes) {
pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n",
sqsize, mqes, ctrl->cntlid);
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
@@ -251,8 +252,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
if (status)
goto out;
- ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
-
uuid_copy(&ctrl->hostid, &d->hostid);
ret = nvmet_setup_auth(ctrl);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 1471af250ea6..913cd2ec7a6f 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1556,7 +1556,9 @@ static const struct attribute_group *fcloop_dev_attr_groups[] = {
NULL,
};
-static struct class *fcloop_class;
+static const struct class fcloop_class = {
+ .name = "fcloop",
+};
static struct device *fcloop_device;
@@ -1564,15 +1566,14 @@ static int __init fcloop_init(void)
{
int ret;
- fcloop_class = class_create("fcloop");
- if (IS_ERR(fcloop_class)) {
+ ret = class_register(&fcloop_class);
+ if (ret) {
pr_err("couldn't register class fcloop\n");
- ret = PTR_ERR(fcloop_class);
return ret;
}
fcloop_device = device_create_with_groups(
- fcloop_class, NULL, MKDEV(0, 0), NULL,
+ &fcloop_class, NULL, MKDEV(0, 0), NULL,
fcloop_dev_attr_groups, "ctl");
if (IS_ERR(fcloop_device)) {
pr_err("couldn't create ctl device!\n");
@@ -1585,7 +1586,7 @@ static int __init fcloop_init(void)
return 0;
out_destroy_class:
- class_destroy(fcloop_class);
+ class_unregister(&fcloop_class);
return ret;
}
@@ -1643,8 +1644,8 @@ static void __exit fcloop_exit(void)
put_device(fcloop_device);
- device_destroy(fcloop_class, MKDEV(0, 0));
- class_destroy(fcloop_class);
+ device_destroy(&fcloop_class, MKDEV(0, 0));
+ class_unregister(&fcloop_class);
}
module_init(fcloop_init);
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index f11400a908f2..6426aac2634a 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -50,10 +50,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
{
- if (ns->bdev_handle) {
- bdev_release(ns->bdev_handle);
+ if (ns->bdev_file) {
+ fput(ns->bdev_file);
ns->bdev = NULL;
- ns->bdev_handle = NULL;
+ ns->bdev_file = NULL;
}
}
@@ -85,18 +85,18 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
if (ns->buffered_io)
return -ENOTBLK;
- ns->bdev_handle = bdev_open_by_path(ns->device_path,
+ ns->bdev_file = bdev_file_open_by_path(ns->device_path,
BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
- if (IS_ERR(ns->bdev_handle)) {
- ret = PTR_ERR(ns->bdev_handle);
+ if (IS_ERR(ns->bdev_file)) {
+ ret = PTR_ERR(ns->bdev_file);
if (ret != -ENOTBLK) {
pr_err("failed to open block device %s: (%d)\n",
ns->device_path, ret);
}
- ns->bdev_handle = NULL;
+ ns->bdev_file = NULL;
return ret;
}
- ns->bdev = ns->bdev_handle->bdev;
+ ns->bdev = file_bdev(ns->bdev_file);
ns->size = bdev_nr_bytes(ns->bdev);
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 6c8acebe1a1a..f460728e1df1 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -58,7 +58,7 @@
struct nvmet_ns {
struct percpu_ref ref;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct block_device *bdev;
struct file *file;
bool readonly;
@@ -163,6 +163,7 @@ struct nvmet_port {
void *priv;
bool enabled;
int inline_data_size;
+ int max_queue_size;
const struct nvmet_fabrics_ops *tr_ops;
bool pi_enable;
};
@@ -543,9 +544,10 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page);
-#define NVMET_QUEUE_SIZE 1024
+#define NVMET_MIN_QUEUE_SIZE 16
+#define NVMET_MAX_QUEUE_SIZE 1024
#define NVMET_NR_QUEUES 128
-#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
+#define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1)
/*
* Nice round number that makes a list of nsids fit into a page.
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index f2d963e1fe94..bb4a69d538fd 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -132,7 +132,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
- id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
/* don't support fuse commands */
id->fuses = 0;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 3a0f2c170f4c..f2bb9d95ecf4 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1956,6 +1956,14 @@ static int nvmet_rdma_add_port(struct nvmet_port *nport)
nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
}
+ if (nport->max_queue_size < 0) {
+ nport->max_queue_size = NVME_RDMA_DEFAULT_QUEUE_SIZE;
+ } else if (nport->max_queue_size > NVME_RDMA_MAX_QUEUE_SIZE) {
+ pr_warn("max_queue_size %u is too large, reducing to %u\n",
+ nport->max_queue_size, NVME_RDMA_MAX_QUEUE_SIZE);
+ nport->max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE;
+ }
+
ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
nport->disc_addr.trsvcid, &port->addr);
if (ret) {
@@ -2015,6 +2023,8 @@ static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
{
+ if (ctrl->pi_support)
+ return NVME_RDMA_MAX_METADATA_QUEUE_SIZE;
return NVME_RDMA_MAX_QUEUE_SIZE;
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index c8655fc5aa5b..2aa5762e9f50 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1591,7 +1591,6 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
- struct page *page;
struct nvmet_tcp_queue *queue =
container_of(w, struct nvmet_tcp_queue, release_work);
@@ -1615,8 +1614,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
ida_free(&nvmet_tcp_queue_ida, queue->idx);
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+ page_frag_cache_drain(&queue->pf_cache);
kfree(queue);
}
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 5b5c1e481722..3148d9f1bde6 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -456,8 +456,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
switch (zsa_req_op(req->cmd->zms.zsa)) {
case REQ_OP_ZONE_RESET:
ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
- get_capacity(req->ns->bdev->bd_disk),
- GFP_KERNEL);
+ get_capacity(req->ns->bdev->bd_disk));
if (ret < 0)
return blkdev_zone_mgmt_errno_to_nvme_status(ret);
break;
@@ -508,7 +507,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
goto out;
}
- ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
+ ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors);
if (ret < 0)
status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index b71267c6667c..fa8cd33be131 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1304,7 +1304,7 @@ static struct device_node *parse_remote_endpoint(struct device_node *np,
int index)
{
/* Return NULL for index > 0 to signify end of remote-endpoints. */
- if (!index || strcmp(prop_name, "remote-endpoint"))
+ if (index > 0 || strcmp(prop_name, "remote-endpoint"))
return NULL;
return of_graph_get_remote_port_parent(np);
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index c4e0432ae42a..e233734b7220 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -2065,6 +2065,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
/* populate the opp table */
new_opp->rates[0] = data->freq;
new_opp->level = data->level;
+ new_opp->turbo = data->turbo;
tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
new_opp->supplies[0].u_volt = u_volt;
new_opp->supplies[0].u_volt_min = u_volt - tol;
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index ec030b19164a..105de7c3274a 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -37,10 +37,12 @@ static ssize_t bw_name_read(struct file *fp, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct icc_path *path = fp->private_data;
+ const char *name = icc_get_name(path);
char buf[64];
- int i;
+ int i = 0;
- i = scnprintf(buf, sizeof(buf), "%.62s\n", icc_get_name(path));
+ if (name)
+ i = scnprintf(buf, sizeof(buf), "%.62s\n", name);
return simple_read_from_buffer(userbuf, count, ppos, buf, i);
}
@@ -56,11 +58,11 @@ static void opp_debug_create_bw(struct dev_pm_opp *opp,
struct dentry *pdentry)
{
struct dentry *d;
- char name[20];
+ char name[] = "icc-path-XXXXXXXXXXX"; /* Integers can take 11 chars max */
int i;
for (i = 0; i < opp_table->path_count; i++) {
- snprintf(name, sizeof(name), "icc-path-%.1d", i);
+ snprintf(name, sizeof(name), "icc-path-%d", i);
/* Create per-path directory */
d = debugfs_create_dir(name, pdentry);
@@ -78,7 +80,7 @@ static void opp_debug_create_clks(struct dev_pm_opp *opp,
struct opp_table *opp_table,
struct dentry *pdentry)
{
- char name[12];
+ char name[] = "rate_hz_XXXXXXXXXXX"; /* Integers can take 11 chars max */
int i;
if (opp_table->clk_count == 1) {
@@ -100,7 +102,7 @@ static void opp_debug_create_supplies(struct dev_pm_opp *opp,
int i;
for (i = 0; i < opp_table->regulator_count; i++) {
- char name[15];
+ char name[] = "supply-XXXXXXXXXXX"; /* Integers can take 11 chars max */
snprintf(name, sizeof(name), "supply-%d", i);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 74147262625b..d35001589d88 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -13,6 +13,11 @@ config FORCE_PCI
select HAVE_PCI
select PCI
+# select this to provide a generic PCI iomap,
+# without PCI itself having to be defined
+config GENERIC_PCI_IOMAP
+ bool
+
menuconfig PCI
bool "PCI support"
depends on HAVE_PCI
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index cc8b4e01e29d..175302036890 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -4,16 +4,17 @@
obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
remove.o pci.o pci-driver.o search.o \
- pci-sysfs.o rom.o setup-res.o irq.o vpd.o \
- setup-bus.o vc.o mmap.o setup-irq.o
+ rom.o setup-res.o irq.o vpd.o \
+ setup-bus.o vc.o mmap.o devres.o
obj-$(CONFIG_PCI) += msi/
obj-$(CONFIG_PCI) += pcie/
ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
-obj-$(CONFIG_SYSFS) += slot.o
+obj-$(CONFIG_SYSFS) += pci-sysfs.o slot.o
obj-$(CONFIG_ACPI) += pci-acpi.o
+obj-$(CONFIG_GENERIC_PCI_IOMAP) += iomap.o
endif
obj-$(CONFIG_OF) += of.o
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 2d0a8d78bffb..81c50dc64da9 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -565,7 +565,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
int max_epfs = sizeof(epc->function_num_map) * 8;
- int ret, value, epf;
+ int ret, epf, last_fn;
+ u32 reg, value;
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
@@ -573,6 +574,17 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
*/
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
+ /*
+ * Next function field in ARI_CAP_AND_CTR register for last function
+ * should be 0.
+ * Clearing Next Function Number field for the last function used.
+ */
+ last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG);
+ reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn);
+ value = cdns_pcie_readl(pcie, reg);
+ value &= ~CDNS_PCIE_ARI_CAP_NFN_MASK;
+ cdns_pcie_writel(pcie, reg, value);
+
if (ep->quirk_disable_flr) {
for (epf = 0; epf < max_epfs; epf++) {
if (!(epc->function_num_map & BIT(epf)))
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 03b96798f858..7a66a2f815dc 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -131,6 +131,12 @@
#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
/*
+ * Endpoint PF Registers
+ */
+#define CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(fn) (0x144 + (fn) * 0x1000)
+#define CDNS_PCIE_ARI_CAP_NFN_MASK GENMASK(15, 8)
+
+/*
* Root Port Registers (PCI configuration space for the root port function)
*/
#define CDNS_PCIE_RP_BASE 0x00200000
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index dc2c036ab28c..99a60270b26c 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -42,6 +42,19 @@
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
+#define IMX95_PCIE_PHY_GEN_CTRL 0x0
+#define IMX95_PCIE_REF_USE_PAD BIT(17)
+
+#define IMX95_PCIE_SS_RW_REG_0 0xf0
+#define IMX95_PCIE_REF_CLKEN BIT(23)
+#define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9)
+
+#define IMX95_PE0_GEN_CTRL_1 0x1050
+#define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0)
+
+#define IMX95_PE0_GEN_CTRL_3 0x1058
+#define IMX95_PCIE_LTSSM_EN BIT(0)
+
#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
enum imx6_pcie_variants {
@@ -52,14 +65,29 @@ enum imx6_pcie_variants {
IMX8MQ,
IMX8MM,
IMX8MP,
+ IMX95,
IMX8MQ_EP,
IMX8MM_EP,
IMX8MP_EP,
+ IMX95_EP,
};
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX6_PCIE_FLAG_HAS_PHYDRV BIT(3)
+#define IMX6_PCIE_FLAG_HAS_APP_RESET BIT(4)
+#define IMX6_PCIE_FLAG_HAS_PHY_RESET BIT(5)
+#define IMX6_PCIE_FLAG_HAS_SERDES BIT(6)
+#define IMX6_PCIE_FLAG_SUPPORT_64BIT BIT(7)
+
+#define imx6_check_flag(pci, val) (pci->drvdata->flags & val)
+
+#define IMX6_PCIE_MAX_CLKS 6
+
+#define IMX6_PCIE_MAX_INSTANCES 2
+
+struct imx6_pcie;
struct imx6_pcie_drvdata {
enum imx6_pcie_variants variant;
@@ -67,6 +95,14 @@ struct imx6_pcie_drvdata {
u32 flags;
int dbi_length;
const char *gpr;
+ const char * const *clk_names;
+ const u32 clks_cnt;
+ const u32 ltssm_off;
+ const u32 ltssm_mask;
+ const u32 mode_off[IMX6_PCIE_MAX_INSTANCES];
+ const u32 mode_mask[IMX6_PCIE_MAX_INSTANCES];
+ const struct pci_epc_features *epc_features;
+ int (*init_phy)(struct imx6_pcie *pcie);
};
struct imx6_pcie {
@@ -74,11 +110,7 @@ struct imx6_pcie {
int reset_gpio;
bool gpio_active_high;
bool link_is_up;
- struct clk *pcie_bus;
- struct clk *pcie_phy;
- struct clk *pcie_inbound_axi;
- struct clk *pcie;
- struct clk *pcie_aux;
+ struct clk_bulk_data clks[IMX6_PCIE_MAX_CLKS];
struct regmap *iomuxc_gpr;
u16 msi_ctrl;
u32 controller_id;
@@ -165,34 +197,44 @@ static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
+static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX95_PCIE_SS_RW_REG_0,
+ IMX95_PCIE_PHY_CR_PARA_SEL,
+ IMX95_PCIE_PHY_CR_PARA_SEL);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX95_PCIE_PHY_GEN_CTRL,
+ IMX95_PCIE_REF_USE_PAD, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ IMX95_PCIE_SS_RW_REG_0,
+ IMX95_PCIE_REF_CLKEN,
+ IMX95_PCIE_REF_CLKEN);
+
+ return 0;
+}
+
static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
{
- unsigned int mask, val, mode;
+ const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ unsigned int mask, val, mode, id;
- if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE)
+ if (drvdata->mode == DW_PCIE_EP_TYPE)
mode = PCI_EXP_TYPE_ENDPOINT;
else
mode = PCI_EXP_TYPE_ROOT_PORT;
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MQ:
- case IMX8MQ_EP:
- if (imx6_pcie->controller_id == 1) {
- mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
- val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
- mode);
- } else {
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
- }
- break;
- default:
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
- break;
- }
+ id = imx6_pcie->controller_id;
+
+ /* If mode_mask[id] is zero, means each controller have its individual gpr */
+ if (!drvdata->mode_mask[id])
+ id = 0;
+
+ mask = drvdata->mode_mask[id];
+ val = mode << (ffs(mask) - 1);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
}
static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
@@ -320,76 +362,66 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
return 0;
}
-static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx8mq_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- /*
- * The PHY initialization had been done in the PHY
- * driver, break here directly.
- */
- break;
- case IMX8MQ:
- case IMX8MQ_EP:
- /*
- * TODO: Currently this code assumes external
- * oscillator is being used
- */
+ /* TODO: Currently this code assumes external oscillator is being used */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_REF_USE_PAD,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ /*
+ * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is
+ * supplied by 3.3V, the VREG_BYPASS should be cleared to zero.
+ */
+ if (imx6_pcie->vph && regulator_get_voltage(imx6_pcie->vph) > 3000000)
regmap_update_bits(imx6_pcie->iomuxc_gpr,
imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_REF_USE_PAD,
- IMX8MQ_GPR_PCIE_REF_USE_PAD);
- /*
- * Regarding the datasheet, the PCIE_VPH is suggested
- * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
- * VREG_BYPASS should be cleared to zero.
- */
- if (imx6_pcie->vph &&
- regulator_get_voltage(imx6_pcie->vph) > 3000000)
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_VREG_BYPASS,
- 0);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_RX_EQ_MASK,
- IMX6SX_GPR12_PCIE_RX_EQ_2);
- fallthrough;
- default:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX8MQ_GPR_PCIE_VREG_BYPASS,
+ 0);
+
+ return 0;
+}
+
+static int imx7d_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+
+ return 0;
+}
+
+static int imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
- /* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
- break;
- }
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx6_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx6_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx6_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx6_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx6_pcie->tx_swing_low << 25);
+ return 0;
+}
- imx6_pcie_configure_type(imx6_pcie);
+static int imx6sx_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2);
+
+ return imx6_pcie_init_phy(imx6_pcie);
}
static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
@@ -407,13 +439,18 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
{
- unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+ unsigned long phy_rate = 0;
int mult, div;
u16 val;
+ int i;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
return 0;
+ for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
+ if (strncmp(imx6_pcie->clks[i].id, "pcie_phy", 8) == 0)
+ phy_rate = clk_get_rate(imx6_pcie->clks[i].clk);
+
switch (phy_rate) {
case 125000000:
/*
@@ -550,19 +587,11 @@ static int imx6_pcie_attach_pd(struct device *dev)
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
unsigned int offset;
int ret = 0;
switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
- ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
- if (ret) {
- dev_err(dev, "unable to enable pcie_axi clock\n");
- break;
- }
-
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
break;
@@ -582,6 +611,8 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
break;
case IMX7D:
+ case IMX95:
+ case IMX95_EP:
break;
case IMX8MM:
case IMX8MM_EP:
@@ -589,12 +620,6 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
case IMX8MQ_EP:
case IMX8MP:
case IMX8MP_EP:
- ret = clk_prepare_enable(imx6_pcie->pcie_aux);
- if (ret) {
- dev_err(dev, "unable to enable pcie_aux clock\n");
- break;
- }
-
offset = imx6_pcie_grp_offset(imx6_pcie);
/*
* Set the over ride low and enabled
@@ -615,9 +640,6 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
{
switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
- break;
case IMX6QP:
case IMX6Q:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
@@ -631,14 +653,6 @@ static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
break;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MP:
- case IMX8MP_EP:
- clk_disable_unprepare(imx6_pcie->pcie_aux);
- break;
default:
break;
}
@@ -650,23 +664,9 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
struct device *dev = pci->dev;
int ret;
- ret = clk_prepare_enable(imx6_pcie->pcie_phy);
- if (ret) {
- dev_err(dev, "unable to enable pcie_phy clock\n");
+ ret = clk_bulk_prepare_enable(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ if (ret)
return ret;
- }
-
- ret = clk_prepare_enable(imx6_pcie->pcie_bus);
- if (ret) {
- dev_err(dev, "unable to enable pcie_bus clock\n");
- goto err_pcie_bus;
- }
-
- ret = clk_prepare_enable(imx6_pcie->pcie);
- if (ret) {
- dev_err(dev, "unable to enable pcie clock\n");
- goto err_pcie;
- }
ret = imx6_pcie_enable_ref_clk(imx6_pcie);
if (ret) {
@@ -679,11 +679,7 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
return 0;
err_ref_clk:
- clk_disable_unprepare(imx6_pcie->pcie);
-err_pcie:
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-err_pcie_bus:
- clk_disable_unprepare(imx6_pcie->pcie_phy);
+ clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
return ret;
}
@@ -691,25 +687,15 @@ err_pcie_bus:
static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
{
imx6_pcie_disable_ref_clk(imx6_pcie);
- clk_disable_unprepare(imx6_pcie->pcie);
- clk_disable_unprepare(imx6_pcie->pcie_bus);
- clk_disable_unprepare(imx6_pcie->pcie_phy);
+ clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
}
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
+ reset_control_assert(imx6_pcie->pciephy_reset);
+ reset_control_assert(imx6_pcie->apps_reset);
+
switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- case IMX8MQ:
- case IMX8MQ_EP:
- reset_control_assert(imx6_pcie->pciephy_reset);
- fallthrough;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
case IMX6SX:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
@@ -730,6 +716,8 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
break;
+ default:
+ break;
}
/* Some boards don't have PCIe reset GPIO. */
@@ -743,14 +731,10 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
struct dw_pcie *pci = imx6_pcie->pci;
struct device *dev = pci->dev;
+ reset_control_deassert(imx6_pcie->pciephy_reset);
+
switch (imx6_pcie->drvdata->variant) {
- case IMX8MQ:
- case IMX8MQ_EP:
- reset_control_deassert(imx6_pcie->pciephy_reset);
- break;
case IMX7D:
- reset_control_deassert(imx6_pcie->pciephy_reset);
-
/* Workaround for ERR010728, failure of PCI-e PLL VCO to
* oscillate, especially when cold. This turns off "Duty-cycle
* Corrector" and other mysterious undocumented things.
@@ -782,11 +766,7 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
usleep_range(200, 500);
break;
- case IMX6Q: /* Nothing to do */
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
+ default:
break;
}
@@ -824,48 +804,25 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
static void imx6_pcie_ltssm_enable(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
- switch (imx6_pcie->drvdata->variant) {
- case IMX6Q:
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2,
- IMX6Q_GPR12_PCIE_CTL_2);
- break;
- case IMX7D:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- reset_control_deassert(imx6_pcie->apps_reset);
- break;
- }
+ if (drvdata->ltssm_mask)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
+ drvdata->ltssm_mask);
+
+ reset_control_deassert(imx6_pcie->apps_reset);
}
static void imx6_pcie_ltssm_disable(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
- switch (imx6_pcie->drvdata->variant) {
- case IMX6Q:
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0);
- break;
- case IMX7D:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- }
+ if (drvdata->ltssm_mask)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off,
+ drvdata->ltssm_mask, 0);
+
+ reset_control_assert(imx6_pcie->apps_reset);
}
static int imx6_pcie_start_link(struct dw_pcie *pci)
@@ -977,7 +934,11 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
imx6_pcie_assert_core_reset(imx6_pcie);
- imx6_pcie_init_phy(imx6_pcie);
+
+ if (imx6_pcie->drvdata->init_phy)
+ imx6_pcie->drvdata->init_phy(imx6_pcie);
+
+ imx6_pcie_configure_type(imx6_pcie);
ret = imx6_pcie_clk_enable(imx6_pcie);
if (ret) {
@@ -1081,14 +1042,35 @@ static const struct pci_epc_features imx8m_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = false,
- .reserved_bar = 1 << BAR_1 | 1 << BAR_3,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
.align = SZ_64K,
};
+/*
+ * BAR# | Default BAR enable | Default BAR Type | Default BAR Size | BAR Sizing Scheme
+ * ================================================================================================
+ * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
+ * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
+ * BAR1 should be disabled if BAR0 is 64bit.
+ * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
+ * BAR4 | Enable | 32-bit | 1M | Programmable Size
+ * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
+ */
+static const struct pci_epc_features imx95_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
+ .align = SZ_4K,
+};
+
static const struct pci_epc_features*
imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
- return &imx8m_pcie_epc_features;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ return imx6_pcie->drvdata->epc_features;
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
@@ -1103,7 +1085,6 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
int ret;
unsigned int pcie_dbi2_offset;
struct dw_pcie_ep *ep;
- struct resource *res;
struct dw_pcie *pci = imx6_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
@@ -1122,14 +1103,20 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
pcie_dbi2_offset = SZ_4K;
break;
}
+
pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
- ep->page_size = SZ_64K;
+ /*
+ * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining
+ * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC
+ * core code can fetch that from DT. But once all platform DTs were fixed, this and the
+ * above "dbi_base2" setting should be removed.
+ */
+ if (device_property_match_string(dev, "reg-names", "dbi2") >= 0)
+ pci->dbi_base2 = NULL;
+
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_SUPPORT_64BIT))
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
ret = dw_pcie_ep_init(ep);
if (ret) {
@@ -1251,6 +1238,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
int ret;
u16 val;
+ int i;
imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
@@ -1304,81 +1292,48 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return imx6_pcie->reset_gpio;
}
- /* Fetch clocks */
- imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(imx6_pcie->pcie_bus))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
- "pcie_bus clock source missing or invalid\n");
+ if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS)
+ return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n");
- imx6_pcie->pcie = devm_clk_get(dev, "pcie");
- if (IS_ERR(imx6_pcie->pcie))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
- "pcie clock source missing or invalid\n");
+ for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
+ imx6_pcie->clks[i].id = imx6_pcie->drvdata->clk_names[i];
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
- "pcie_inbound_axi");
- if (IS_ERR(imx6_pcie->pcie_inbound_axi))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
- "pcie_inbound_axi clock missing or invalid\n");
- break;
- case IMX8MQ:
- case IMX8MQ_EP:
- imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(imx6_pcie->pcie_aux))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
- "pcie_aux clock source missing or invalid\n");
- fallthrough;
- case IMX7D:
- if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
- imx6_pcie->controller_id = 1;
+ /* Fetch clocks */
+ ret = devm_clk_bulk_get(dev, imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ if (ret)
+ return ret;
- imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
- "pciephy");
- if (IS_ERR(imx6_pcie->pciephy_reset)) {
- dev_err(dev, "Failed to get PCIEPHY reset control\n");
- return PTR_ERR(imx6_pcie->pciephy_reset);
- }
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHYDRV)) {
+ imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx6_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
+ "failed to get pcie phy\n");
+ }
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
- "apps");
- if (IS_ERR(imx6_pcie->apps_reset)) {
- dev_err(dev, "Failed to get PCIE APPS reset control\n");
- return PTR_ERR(imx6_pcie->apps_reset);
- }
- break;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(imx6_pcie->pcie_aux))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
- "pcie_aux clock source missing or invalid\n");
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
- "apps");
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_APP_RESET)) {
+ imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
if (IS_ERR(imx6_pcie->apps_reset))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
"failed to get pcie apps reset control\n");
+ }
- imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
- if (IS_ERR(imx6_pcie->phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
- "failed to get pcie phy\n");
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHY_RESET)) {
+ imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
+ if (IS_ERR(imx6_pcie->pciephy_reset))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pciephy_reset),
+ "Failed to get PCIEPHY reset control\n");
+ }
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ case IMX7D:
+ if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
+ imx6_pcie->controller_id = 1;
break;
default:
break;
}
- /* Don't fetch the pcie_phy clock, if it has abstract PHY driver */
- if (imx6_pcie->phy == NULL) {
- imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pcie_phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
- "pcie_phy clock source missing or invalid\n");
- }
-
/* Grab turnoff reset */
imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
@@ -1387,12 +1342,32 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->turnoff_reset);
}
+ if (imx6_pcie->drvdata->gpr) {
/* Grab GPR config register range */
- imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
- if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
- dev_err(dev, "unable to find iomuxc registers\n");
- return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ imx6_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
+ if (IS_ERR(imx6_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ "unable to find iomuxc registers\n");
+ }
+
+ if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_SERDES)) {
+ void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app");
+
+ if (IS_ERR(off))
+ return dev_err_probe(dev, PTR_ERR(off),
+ "unable to find serdes registers\n");
+
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ imx6_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
+ if (IS_ERR(imx6_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ "unable to find iomuxc registers\n");
}
/* Grab PCIe PHY Tx Settings */
@@ -1469,6 +1444,11 @@ static void imx6_pcie_shutdown(struct platform_device *pdev)
imx6_pcie_assert_core_reset(imx6_pcie);
}
+static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"};
+static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
+static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
+static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
+
static const struct imx6_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
@@ -1476,6 +1456,13 @@ static const struct imx6_pcie_drvdata drvdata[] = {
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .clk_names = imx6q_clks,
+ .clks_cnt = ARRAY_SIZE(imx6q_clks),
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx6_pcie_init_phy,
},
[IMX6SX] = {
.variant = IMX6SX,
@@ -1483,6 +1470,13 @@ static const struct imx6_pcie_drvdata drvdata[] = {
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .clk_names = imx6sx_clks,
+ .clks_cnt = ARRAY_SIZE(imx6sx_clks),
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx6sx_pcie_init_phy,
},
[IMX6QP] = {
.variant = IMX6QP,
@@ -1491,40 +1485,122 @@ static const struct imx6_pcie_drvdata drvdata[] = {
IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .clk_names = imx6q_clks,
+ .clks_cnt = ARRAY_SIZE(imx6q_clks),
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx6_pcie_init_phy,
},
[IMX7D] = {
.variant = IMX7D,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_HAS_APP_RESET |
+ IMX6_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx7d-iomuxc-gpr",
+ .clk_names = imx6q_clks,
+ .clks_cnt = ARRAY_SIZE(imx6q_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx7d_pcie_init_phy,
},
[IMX8MQ] = {
.variant = IMX8MQ,
+ .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
+ IMX6_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx8mq-iomuxc-gpr",
+ .clk_names = imx8mq_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .mode_off[1] = IOMUXC_GPR12,
+ .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ .init_phy = imx8mq_pcie_init_phy,
},
[IMX8MM] = {
.variant = IMX8MM,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_HAS_PHYDRV |
+ IMX6_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mm-iomuxc-gpr",
+ .clk_names = imx8mm_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mm_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
},
[IMX8MP] = {
.variant = IMX8MP,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX6_PCIE_FLAG_HAS_PHYDRV |
+ IMX6_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mp-iomuxc-gpr",
+ .clk_names = imx8mm_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mm_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ },
+ [IMX95] = {
+ .variant = IMX95,
+ .flags = IMX6_PCIE_FLAG_HAS_SERDES,
+ .clk_names = imx8mq_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .ltssm_off = IMX95_PE0_GEN_CTRL_3,
+ .ltssm_mask = IMX95_PCIE_LTSSM_EN,
+ .mode_off[0] = IMX95_PE0_GEN_CTRL_1,
+ .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .init_phy = imx95_pcie_init_phy,
},
[IMX8MQ_EP] = {
.variant = IMX8MQ_EP,
+ .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
+ IMX6_PCIE_FLAG_HAS_PHY_RESET,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mq-iomuxc-gpr",
+ .clk_names = imx8mq_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .mode_off[1] = IOMUXC_GPR12,
+ .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
+ .init_phy = imx8mq_pcie_init_phy,
},
[IMX8MM_EP] = {
.variant = IMX8MM_EP,
+ .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mm-iomuxc-gpr",
+ .clk_names = imx8mm_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mm_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
},
[IMX8MP_EP] = {
.variant = IMX8MP_EP,
+ .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mp-iomuxc-gpr",
+ .clk_names = imx8mm_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mm_clks),
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
+ },
+ [IMX95_EP] = {
+ .variant = IMX95_EP,
+ .flags = IMX6_PCIE_FLAG_HAS_SERDES |
+ IMX6_PCIE_FLAG_SUPPORT_64BIT,
+ .clk_names = imx8mq_clks,
+ .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .ltssm_off = IMX95_PE0_GEN_CTRL_3,
+ .ltssm_mask = IMX95_PCIE_LTSSM_EN,
+ .mode_off[0] = IMX95_PE0_GEN_CTRL_1,
+ .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .init_phy = imx95_pcie_init_phy,
+ .epc_features = &imx95_pcie_epc_features,
+ .mode = DW_PCIE_EP_TYPE,
},
};
@@ -1536,9 +1612,11 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
{ .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], },
{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
{ .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
{},
};
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index c0c62533a3f1..844de4418724 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -924,12 +924,12 @@ static const struct pci_epc_features ks_pcie_am654_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
- .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
- .bar_fixed_64bit = 1 << BAR_0,
- .bar_fixed_size[2] = SZ_1M,
- .bar_fixed_size[3] = SZ_64K,
- .bar_fixed_size[4] = 256,
- .bar_fixed_size[5] = SZ_1M,
+ .bar[BAR_0] = { .type = BAR_RESERVED, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, },
+ .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
.align = SZ_1M,
};
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 2e398494e7c0..1f6ee1460ec2 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -250,7 +250,10 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = pcie->drvdata->dw_pcie_ops;
- ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4);
+ ls_epc->bar[BAR_2].only_64bit = true;
+ ls_epc->bar[BAR_3].type = BAR_RESERVED;
+ ls_epc->bar[BAR_4].only_64bit = true;
+ ls_epc->bar[BAR_5].type = BAR_RESERVED;
ls_epc->linkup_notifier = true;
pcie->pci = pci;
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 9a437cfce073..746a11dcb67f 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -629,8 +629,13 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
PCI_REBAR_CTRL_NBAR_SHIFT;
+ /*
+ * PCIe r6.0, sec 7.8.6.2 require us to support at least one
+ * size in the range from 1 MB to 512 GB. Advertise support
+ * for 1 MB BAR size only.
+ */
for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
}
/*
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index d5fc31f8345f..d15a5c2d5b48 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -328,7 +328,7 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
- u64 *msi_vaddr;
+ u64 *msi_vaddr = NULL;
int ret;
u32 ctrl, num_ctrls;
@@ -379,15 +379,20 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
* memory.
*/
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
- if (ret)
- dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+ if (!ret)
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
- msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
- GFP_KERNEL);
if (!msi_vaddr) {
- dev_err(dev, "Failed to alloc and map MSI data\n");
- dw_pcie_free_msi(pp);
- return -ENOMEM;
+ dev_warn(dev, "Failed to allocate 32-bit MSI address\n");
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
+ if (!msi_vaddr) {
+ dev_err(dev, "Failed to allocate MSI address\n");
+ dw_pcie_free_msi(pp);
+ return -ENOMEM;
+ }
}
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 208d3b0ba196..5e8e54f597dd 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -312,8 +312,12 @@ static const struct pci_epc_features keembay_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
- .reserved_bar = BIT(BAR_1) | BIT(BAR_3) | BIT(BAR_5),
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_16K,
};
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 2ce2a3bd932b..14772edcf0d3 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -53,6 +53,7 @@
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_TABLE_N 0x2000
+#define PARF_BDF_TO_SID_CFG 0x2c00
/* ELBI registers */
#define ELBI_SYS_CTRL 0x04
@@ -120,6 +121,9 @@
/* PARF_DEVICE_TYPE register fields */
#define DEVICE_TYPE_RC 0x4
+/* PARF_BDF_TO_SID_CFG fields */
+#define BDF_TO_SID_BYPASS BIT(0)
+
/* ELBI_SYS_CTRL register fields */
#define ELBI_SYS_CTRL_LT_ENABLE BIT(0)
@@ -229,6 +233,7 @@ struct qcom_pcie_ops {
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
+ bool no_l0s;
};
struct qcom_pcie {
@@ -272,6 +277,26 @@ static int qcom_pcie_start_link(struct dw_pcie *pci)
return 0;
}
+static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci)
+{
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ u16 offset;
+ u32 val;
+
+ if (!pcie->cfg->no_l0s)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPM_L0S;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
{
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
@@ -961,6 +986,7 @@ err_disable_regulators:
static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
{
+ qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_pcie_clear_hpc(pcie->pci);
return 0;
@@ -1008,11 +1034,17 @@ static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
int i, nr_map, size = 0;
u32 smmu_sid_base;
+ u32 val;
of_get_property(dev->of_node, "iommu-map", &size);
if (!size)
return 0;
+ /* Enable BDF to SID translation by disabling bypass mode (default) */
+ val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
+ val &= ~BDF_TO_SID_BYPASS;
+ writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
+
map = kzalloc(size, GFP_KERNEL);
if (!map)
return -ENOMEM;
@@ -1358,6 +1390,11 @@ static const struct qcom_pcie_cfg cfg_2_9_0 = {
.ops = &ops_2_9_0,
};
+static const struct qcom_pcie_cfg cfg_sc8280xp = {
+ .ops = &ops_1_9_0,
+ .no_l0s = true,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link,
@@ -1629,11 +1666,11 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
- { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0},
{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
- { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
{ .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
{ .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
@@ -1642,6 +1679,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-x1e80100", .data = &cfg_1_9_0 },
{ }
};
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index e9166619b1f9..0be760ed420b 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -383,7 +383,9 @@ static const struct pci_epc_features rcar_gen4_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = false,
- .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_1M,
};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 7afa9e9aabe2..1f7b662cb8e1 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -2007,9 +2007,13 @@ static const struct pci_epc_features tegra_pcie_epc_features = {
.core_init_notifier = true,
.msi_capable = false,
.msix_capable = false,
- .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
- .bar_fixed_64bit = 1 << BAR_0,
- .bar_fixed_size[0] = SZ_1M,
+ .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
+ .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features*
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index 3fced0d3e851..639bc2e12476 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -411,8 +411,12 @@ static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
.msi_capable = true,
.msix_capable = false,
.align = 1 << 16,
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
- .reserved_bar = BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
},
};
@@ -425,7 +429,12 @@ static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = {
.msi_capable = true,
.msix_capable = false,
.align = 1 << 12,
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
},
};
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 1eaffff40b8d..5992280e8110 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -49,6 +49,7 @@
#include <linux/refcount.h>
#include <linux/irqdomain.h>
#include <linux/acpi.h>
+#include <linux/sizes.h>
#include <asm/mshyperv.h>
/*
@@ -465,7 +466,7 @@ struct pci_eject_response {
u32 status;
} __packed;
-static int pci_ring_size = (4 * PAGE_SIZE);
+static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
/*
* Driver specific state.
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 5b0730c3891b..c08683febdd4 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -336,7 +336,7 @@ static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
readl(base + PCIE_RC_DL_MDIO_ADDR);
writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
- err = readw_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
+ err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
MDIO_WT_DONE(data), 10, 100);
return err;
}
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index e6909271def7..05967c6c0b42 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -440,11 +440,15 @@ static const struct pci_epc_features rcar_pcie_epc_features = {
.msi_capable = true,
.msix_capable = false,
/* use 64-bit BARs so mark BAR[1,3,5] as reserved */
- .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
- .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4,
- .bar_fixed_size[0] = 128,
- .bar_fixed_size[2] = 256,
- .bar_fixed_size[4] = 256,
+ .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = 128,
+ .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = 256,
+ .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256,
+ .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features*
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
new file mode 100644
index 000000000000..2c562b9eaf80
--- /dev/null
+++ b/drivers/pci/devres.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
+#include <linux/pci.h>
+#include "pci.h"
+
+/*
+ * PCI iomap devres
+ */
+#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
+
+struct pcim_iomap_devres {
+ void __iomem *table[PCIM_IOMAP_MAX];
+};
+
+
+static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
+{
+ struct resource **res = ptr;
+
+ pci_unmap_iospace(*res);
+}
+
+/**
+ * devm_pci_remap_iospace - Managed pci_remap_iospace()
+ * @dev: Generic device to remap IO address for
+ * @res: Resource describing the I/O space
+ * @phys_addr: physical address of range to be mapped
+ *
+ * Managed pci_remap_iospace(). Map is automatically unmapped on driver
+ * detach.
+ */
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+ phys_addr_t phys_addr)
+{
+ const struct resource **ptr;
+ int error;
+
+ ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ error = pci_remap_iospace(res, phys_addr);
+ if (error) {
+ devres_free(ptr);
+ } else {
+ *ptr = res;
+ devres_add(dev, ptr);
+ }
+
+ return error;
+}
+EXPORT_SYMBOL(devm_pci_remap_iospace);
+
+/**
+ * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *devm_pci_remap_cfgspace(struct device *dev,
+ resource_size_t offset,
+ resource_size_t size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = pci_remap_cfgspace(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_pci_remap_cfgspace);
+
+/**
+ * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
+ * @dev: generic device to handle the resource for
+ * @res: configuration space resource to be handled
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps with pci_remap_cfgspace() API that ensures the
+ * proper PCI configuration space memory attributes are guaranteed.
+ *
+ * All operations are managed and will be undone on driver detach.
+ *
+ * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure. Usage example::
+ *
+ * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ */
+void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
+ struct resource *res)
+{
+ resource_size_t size;
+ const char *name;
+ void __iomem *dest_ptr;
+
+ BUG_ON(!dev);
+
+ if (!res || resource_type(res) != IORESOURCE_MEM) {
+ dev_err(dev, "invalid resource\n");
+ return IOMEM_ERR_PTR(-EINVAL);
+ }
+
+ size = resource_size(res);
+
+ if (res->name)
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
+ res->name);
+ else
+ name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!name)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ if (!devm_request_mem_region(dev, res->start, size, name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return IOMEM_ERR_PTR(-EBUSY);
+ }
+
+ dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
+ if (!dest_ptr) {
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
+ devm_release_mem_region(dev, res->start, size);
+ dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
+ }
+
+ return dest_ptr;
+}
+EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
+
+/**
+ * pcim_set_mwi - a device-managed pci_set_mwi()
+ * @dev: the PCI device for which MWI is enabled
+ *
+ * Managed pci_set_mwi().
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int pcim_set_mwi(struct pci_dev *dev)
+{
+ struct pci_devres *dr;
+
+ dr = find_pci_dr(dev);
+ if (!dr)
+ return -ENOMEM;
+
+ dr->mwi = 1;
+ return pci_set_mwi(dev);
+}
+EXPORT_SYMBOL(pcim_set_mwi);
+
+
+static void pcim_release(struct device *gendev, void *res)
+{
+ struct pci_dev *dev = to_pci_dev(gendev);
+ struct pci_devres *this = res;
+ int i;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ if (this->region_mask & (1 << i))
+ pci_release_region(dev, i);
+
+ if (this->mwi)
+ pci_clear_mwi(dev);
+
+ if (this->restore_intx)
+ pci_intx(dev, this->orig_intx);
+
+ if (this->enabled && !this->pinned)
+ pci_disable_device(dev);
+}
+
+/*
+ * TODO: After the last four callers in pci.c are ported, find_pci_dr()
+ * needs to be made static again.
+ */
+struct pci_devres *find_pci_dr(struct pci_dev *pdev)
+{
+ if (pci_is_managed(pdev))
+ return devres_find(&pdev->dev, pcim_release, NULL, NULL);
+ return NULL;
+}
+
+static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
+{
+ struct pci_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
+ if (dr)
+ return dr;
+
+ new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
+ if (!new_dr)
+ return NULL;
+ return devres_get(&pdev->dev, new_dr, NULL, NULL);
+}
+
+/**
+ * pcim_enable_device - Managed pci_enable_device()
+ * @pdev: PCI device to be initialized
+ *
+ * Managed pci_enable_device().
+ */
+int pcim_enable_device(struct pci_dev *pdev)
+{
+ struct pci_devres *dr;
+ int rc;
+
+ dr = get_pci_dr(pdev);
+ if (unlikely(!dr))
+ return -ENOMEM;
+ if (dr->enabled)
+ return 0;
+
+ rc = pci_enable_device(pdev);
+ if (!rc) {
+ pdev->is_managed = 1;
+ dr->enabled = 1;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(pcim_enable_device);
+
+/**
+ * pcim_pin_device - Pin managed PCI device
+ * @pdev: PCI device to pin
+ *
+ * Pin managed PCI device @pdev. Pinned device won't be disabled on
+ * driver detach. @pdev must have been enabled with
+ * pcim_enable_device().
+ */
+void pcim_pin_device(struct pci_dev *pdev)
+{
+ struct pci_devres *dr;
+
+ dr = find_pci_dr(pdev);
+ WARN_ON(!dr || !dr->enabled);
+ if (dr)
+ dr->pinned = 1;
+}
+EXPORT_SYMBOL(pcim_pin_device);
+
+static void pcim_iomap_release(struct device *gendev, void *res)
+{
+ struct pci_dev *dev = to_pci_dev(gendev);
+ struct pcim_iomap_devres *this = res;
+ int i;
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (this->table[i])
+ pci_iounmap(dev, this->table[i]);
+}
+
+/**
+ * pcim_iomap_table - access iomap allocation table
+ * @pdev: PCI device to access iomap table for
+ *
+ * Access iomap allocation table for @dev. If iomap table doesn't
+ * exist and @pdev is managed, it will be allocated. All iomaps
+ * recorded in the iomap table are automatically unmapped on driver
+ * detach.
+ *
+ * This function might sleep when the table is first allocated but can
+ * be safely called without context and guaranteed to succeed once
+ * allocated.
+ */
+void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
+{
+ struct pcim_iomap_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
+ if (dr)
+ return dr->table;
+
+ new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!new_dr)
+ return NULL;
+ dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
+ return dr->table;
+}
+EXPORT_SYMBOL(pcim_iomap_table);
+
+/**
+ * pcim_iomap - Managed pcim_iomap()
+ * @pdev: PCI device to iomap for
+ * @bar: BAR to iomap
+ * @maxlen: Maximum length of iomap
+ *
+ * Managed pci_iomap(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+{
+ void __iomem **tbl;
+
+ BUG_ON(bar >= PCIM_IOMAP_MAX);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
+ return NULL;
+
+ tbl[bar] = pci_iomap(pdev, bar, maxlen);
+ return tbl[bar];
+}
+EXPORT_SYMBOL(pcim_iomap);
+
+/**
+ * pcim_iounmap - Managed pci_iounmap()
+ * @pdev: PCI device to iounmap for
+ * @addr: Address to unmap
+ *
+ * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
+ */
+void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ void __iomem **tbl;
+ int i;
+
+ pci_iounmap(pdev, addr);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ BUG_ON(!tbl);
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (tbl[i] == addr) {
+ tbl[i] = NULL;
+ return;
+ }
+ WARN_ON(1);
+}
+EXPORT_SYMBOL(pcim_iounmap);
+
+/**
+ * pcim_iomap_regions - Request and iomap PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to request and iomap
+ * @name: Name used when requesting regions
+ *
+ * Request and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
+{
+ void __iomem * const *iomap;
+ int i, rc;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ unsigned long len;
+
+ if (!(mask & (1 << i)))
+ continue;
+
+ rc = -EINVAL;
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ goto err_inval;
+
+ rc = pci_request_region(pdev, i, name);
+ if (rc)
+ goto err_inval;
+
+ rc = -ENOMEM;
+ if (!pcim_iomap(pdev, i, 0))
+ goto err_region;
+ }
+
+ return 0;
+
+ err_region:
+ pci_release_region(pdev, i);
+ err_inval:
+ while (--i >= 0) {
+ if (!(mask & (1 << i)))
+ continue;
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions);
+
+/**
+ * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to iomap
+ * @name: Name used when requesting regions
+ *
+ * Request all PCI BARs and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
+ const char *name)
+{
+ int request_mask = ((1 << 6) - 1) & ~mask;
+ int rc;
+
+ rc = pci_request_selected_regions(pdev, request_mask, name);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, mask, name);
+ if (rc)
+ pci_release_selected_regions(pdev, request_mask);
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions_request_all);
+
+/**
+ * pcim_iounmap_regions - Unmap and release PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to unmap and release
+ *
+ * Unmap and release regions specified by @mask.
+ */
+void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
+{
+ void __iomem * const *iomap;
+ int i;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return;
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++) {
+ if (!(mask & (1 << i)))
+ continue;
+
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+}
+EXPORT_SYMBOL(pcim_iounmap_regions);
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 1c3e4ea76bd2..2c54d80107cf 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -123,6 +123,22 @@ static const struct pci_epf_mhi_ep_info sm8450_info = {
.flags = MHI_EPF_USE_DMA,
};
+static struct pci_epf_header sa8775p_header = {
+ .vendorid = PCI_VENDOR_ID_QCOM,
+ .deviceid = 0x0306, /* FIXME: Update deviceid for sa8775p EP */
+ .baseclass_code = PCI_CLASS_OTHERS,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static const struct pci_epf_mhi_ep_info sa8775p_info = {
+ .config = &mhi_v1_config,
+ .epf_header = &sa8775p_header,
+ .bar_num = BAR_0,
+ .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
+ .msi_count = 32,
+ .mru = 0x8000,
+};
+
struct pci_epf_mhi {
const struct pci_epc_features *epc_features;
const struct pci_epf_mhi_ep_info *info;
@@ -913,8 +929,9 @@ static int pci_epf_mhi_probe(struct pci_epf *epf,
}
static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
- { .name = "sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
- { .name = "sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
+ { .name = "pci_epf_mhi_sa8775p", .driver_data = (kernel_ulong_t)&sa8775p_info },
+ { .name = "pci_epf_mhi_sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
+ { .name = "pci_epf_mhi_sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
{},
};
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index 0553946005c4..e01a98e74d21 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -1012,13 +1012,13 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb,
epc_features = ntb_epc->epc_features;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
- size = epc_features->bar_fixed_size[barno];
+ size = epc_features->bar[barno].fixed_size;
align = epc_features->align;
peer_ntb_epc = ntb->epc[!type];
peer_epc_features = peer_ntb_epc->epc_features;
peer_barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
- peer_size = peer_epc_features->bar_fixed_size[peer_barno];
+ peer_size = peer_epc_features->bar[peer_barno].fixed_size;
/* Check if epc_features is populated incorrectly */
if ((!IS_ALIGNED(size, align)))
@@ -1067,7 +1067,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb,
else if (size < ctrl_size + spad_size)
return -EINVAL;
- base = pci_epf_alloc_space(epf, size, barno, align, type);
+ base = pci_epf_alloc_space(epf, size, barno, epc_features, type);
if (!base) {
dev_err(dev, "%s intf: Config/Status/SPAD alloc region fail\n",
pci_epc_interface_string(type));
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 18c80002d3bd..cd4ffb39dcdc 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -729,7 +729,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
*/
add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
- if (!!(epc_features->reserved_bar & (1 << bar)))
+ if (epc_features->bar[bar].type == BAR_RESERVED)
continue;
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
@@ -841,14 +841,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
- if (epc_features->bar_fixed_size[test_reg_bar]) {
- if (test_reg_size > bar_size[test_reg_bar])
- return -ENOMEM;
- test_reg_size = bar_size[test_reg_bar];
- }
-
base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
- epc_features->align, PRIMARY_INTERFACE);
+ epc_features, PRIMARY_INTERFACE);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
@@ -862,12 +856,11 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
if (bar == test_reg_bar)
continue;
- if (!!(epc_features->reserved_bar & (1 << bar)))
+ if (epc_features->bar[bar].type == BAR_RESERVED)
continue;
base = pci_epf_alloc_space(epf, bar_size[bar], bar,
- epc_features->align,
- PRIMARY_INTERFACE);
+ epc_features, PRIMARY_INTERFACE);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
@@ -881,16 +874,12 @@ static void pci_epf_configure_bar(struct pci_epf *epf,
const struct pci_epc_features *epc_features)
{
struct pci_epf_bar *epf_bar;
- bool bar_fixed_64bit;
int i;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
epf_bar = &epf->bar[i];
- bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
- if (bar_fixed_64bit)
+ if (epc_features->bar[i].only_64bit)
epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
- if (epc_features->bar_fixed_size[i])
- bar_size[i] = epc_features->bar_fixed_size[i];
}
}
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index e75a2af77328..8e779eecd62d 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -422,7 +422,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
epf->func_no,
epf->vfunc_no);
barno = ntb->epf_ntb_bar[BAR_CONFIG];
- size = epc_features->bar_fixed_size[barno];
+ size = epc_features->bar[barno].fixed_size;
align = epc_features->align;
if ((!IS_ALIGNED(size, align)))
@@ -446,7 +446,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
else if (size < ctrl_size + spad_size)
return -EINVAL;
- base = pci_epf_alloc_space(epf, size, barno, align, 0);
+ base = pci_epf_alloc_space(epf, size, barno, epc_features, 0);
if (!base) {
dev_err(dev, "Config/Status/SPAD alloc region fail\n");
return -ENOMEM;
@@ -527,7 +527,6 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
{
const struct pci_epc_features *epc_features;
- u32 align;
struct device *dev = &ntb->epf->dev;
int ret;
struct pci_epf_bar *epf_bar;
@@ -538,19 +537,9 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
epc_features = pci_epc_get_features(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no);
- align = epc_features->align;
-
- if (size < 128)
- size = 128;
-
- if (align)
- size = ALIGN(size, align);
- else
- size = roundup_pow_of_two(size);
-
barno = ntb->epf_ntb_bar[BAR_DB];
- mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0);
+ mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, epc_features, 0);
if (!mw_addr) {
dev_err(dev, "Failed to allocate OB address\n");
return -ENOMEM;
@@ -1269,21 +1258,17 @@ static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "Cannot set DMA mask\n");
- return -EINVAL;
+ return ret;
}
ret = ntb_register_device(&ndev->ntb);
if (ret) {
dev_err(dev, "Failed to register NTB device\n");
- goto err_register_dev;
+ return ret;
}
dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
return 0;
-
-err_register_dev:
- put_device(&ndev->ntb.dev);
- return -EINVAL;
}
static struct pci_device_id pci_vntb_table[] = {
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index dcd4e66430c1..da3fc0795b0b 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -87,7 +87,7 @@ EXPORT_SYMBOL_GPL(pci_epc_get);
* @epc_features: pci_epc_features structure that holds the reserved bar bitmap
*
* Invoke to get the first unreserved BAR that can be used by the endpoint
- * function. For any incorrect value in reserved_bar return '0'.
+ * function.
*/
enum pci_barno
pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
@@ -102,32 +102,27 @@ EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
* @bar: the starting BAR number from where unreserved BAR should be searched
*
* Invoke to get the next unreserved BAR starting from @bar that can be used
- * for endpoint function. For any incorrect value in reserved_bar return '0'.
+ * for endpoint function.
*/
enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
*epc_features, enum pci_barno bar)
{
- unsigned long free_bar;
+ int i;
if (!epc_features)
return BAR_0;
/* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
- if ((epc_features->bar_fixed_64bit << 1) & 1 << bar)
+ if (bar > 0 && epc_features->bar[bar - 1].only_64bit)
bar++;
- /* Find if the reserved BAR is also a 64-bit BAR */
- free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit;
-
- /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */
- free_bar <<= 1;
- free_bar |= epc_features->reserved_bar;
-
- free_bar = find_next_zero_bit(&free_bar, 6, bar);
- if (free_bar > 5)
- return NO_BAR;
+ for (i = bar; i < PCI_STD_NUM_BARS; i++) {
+ /* If the BAR is not reserved, return it. */
+ if (epc_features->bar[i].type != BAR_RESERVED)
+ return i;
+ }
- return free_bar;
+ return NO_BAR;
}
EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 2c32de667937..0a28a0b0911b 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -17,7 +17,7 @@
static DEFINE_MUTEX(pci_epf_mutex);
-static struct bus_type pci_epf_bus_type;
+static const struct bus_type pci_epf_bus_type;
static const struct device_type pci_epf_type;
/**
@@ -251,14 +251,17 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
* @epf: the EPF device to whom allocate the memory
* @size: the size of the memory that has to be allocated
* @bar: the BAR number corresponding to the allocated register space
- * @align: alignment size for the allocation region
+ * @epc_features: the features provided by the EPC specific to this EPF
* @type: Identifies if the allocation is for primary EPC or secondary EPC
*
* Invoke to allocate memory for the PCI EPF register space.
*/
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
- size_t align, enum pci_epc_interface_type type)
+ const struct pci_epc_features *epc_features,
+ enum pci_epc_interface_type type)
{
+ u64 bar_fixed_size = epc_features->bar[bar].fixed_size;
+ size_t align = epc_features->align;
struct pci_epf_bar *epf_bar;
dma_addr_t phys_addr;
struct pci_epc *epc;
@@ -268,6 +271,15 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
if (size < 128)
size = 128;
+ if (epc_features->bar[bar].type == BAR_FIXED && bar_fixed_size) {
+ if (size > bar_fixed_size) {
+ dev_err(&epf->dev,
+ "requested BAR size is larger than fixed size\n");
+ return NULL;
+ }
+ size = bar_fixed_size;
+ }
+
if (align)
size = ALIGN(size, align);
else
@@ -507,7 +519,7 @@ static void pci_epf_device_remove(struct device *dev)
epf->driver = NULL;
}
-static struct bus_type pci_epf_bus_type = {
+static const struct bus_type pci_epf_bus_type = {
.name = "pci-epf",
.match = pci_epf_device_match,
.probe = pci_epf_device_probe,
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index a89b7de72dcf..7333b305f2a5 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -26,58 +26,79 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
hotplug_slot);
int rc;
- if (zdev->state != ZPCI_FN_STATE_STANDBY)
- return -EIO;
+ mutex_lock(&zdev->state_lock);
+ if (zdev->state != ZPCI_FN_STATE_STANDBY) {
+ rc = -EIO;
+ goto out;
+ }
rc = sclp_pci_configure(zdev->fid);
zpci_dbg(3, "conf fid:%x, rc:%d\n", zdev->fid, rc);
if (rc)
- return rc;
+ goto out;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
- return zpci_scan_configured_device(zdev, zdev->fh);
+ rc = zpci_scan_configured_device(zdev, zdev->fh);
+out:
+ mutex_unlock(&zdev->state_lock);
+ return rc;
}
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
- struct pci_dev *pdev;
+ struct pci_dev *pdev = NULL;
+ int rc;
- if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
- return -EIO;
+ mutex_lock(&zdev->state_lock);
+ if (zdev->state != ZPCI_FN_STATE_CONFIGURED) {
+ rc = -EIO;
+ goto out;
+ }
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
if (pdev && pci_num_vf(pdev)) {
pci_dev_put(pdev);
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
- pci_dev_put(pdev);
- return zpci_deconfigure_device(zdev);
+ rc = zpci_deconfigure_device(zdev);
+out:
+ mutex_unlock(&zdev->state_lock);
+ if (pdev)
+ pci_dev_put(pdev);
+ return rc;
}
static int reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
{
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
+ int rc = -EIO;
- if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
- return -EIO;
/*
- * We can't take the zdev->lock as reset_slot may be called during
- * probing and/or device removal which already happens under the
- * zdev->lock. Instead the user should use the higher level
- * pci_reset_function() or pci_bus_reset() which hold the PCI device
- * lock preventing concurrent removal. If not using these functions
- * holding the PCI device lock is required.
+ * If we can't get the zdev->state_lock the device state is
+ * currently undergoing a transition and we bail out - just
+ * the same as if the device's state is not configured at all.
*/
+ if (!mutex_trylock(&zdev->state_lock))
+ return rc;
- /* As long as the function is configured we can reset */
- if (probe)
- return 0;
+ /* We can reset only if the function is configured */
+ if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
+ goto out;
+
+ if (probe) {
+ rc = 0;
+ goto out;
+ }
- return zpci_hot_reset_device(zdev);
+ rc = zpci_hot_reset_device(zdev);
+out:
+ mutex_unlock(&zdev->state_lock);
+ return rc;
}
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c
new file mode 100644
index 000000000000..c9725428e387
--- /dev/null
+++ b/drivers/pci/iomap.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implement the default iomap interfaces
+ *
+ * (C) Copyright 2004 Linus Torvalds
+ */
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <linux/export.h>
+
+/**
+ * pci_iomap_range - create a virtual mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR from offset to the end, pass %0 here.
+ * */
+void __iomem *pci_iomap_range(struct pci_dev *dev,
+ int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (len <= offset || !start)
+ return NULL;
+ len -= offset;
+ start += offset;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+ if (flags & IORESOURCE_IO)
+ return __pci_ioport_map(dev, start, len);
+ if (flags & IORESOURCE_MEM)
+ return ioremap(start, len);
+ /* What? */
+ return NULL;
+}
+EXPORT_SYMBOL(pci_iomap_range);
+
+/**
+ * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR from offset to the end, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
+ int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+
+ if (flags & IORESOURCE_IO)
+ return NULL;
+
+ if (len <= offset || !start)
+ return NULL;
+
+ len -= offset;
+ start += offset;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+
+ if (flags & IORESOURCE_MEM)
+ return ioremap_wc(start, len);
+
+ /* What? */
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
+
+/**
+ * pci_iomap - create a virtual mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL(pci_iomap);
+
+/**
+ * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_wc_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined it's own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+ uintptr_t start = (uintptr_t) PCI_IOBASE;
+ uintptr_t addr = (uintptr_t) p;
+
+ if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ return;
+#endif
+ iounmap(p);
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index 0050e8f6814e..4555630be9ec 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -8,9 +8,13 @@
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/errno.h>
#include <linux/export.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
+#include "pci.h"
+
/**
* pci_request_irq - allocate an interrupt line for a PCI device
* @dev: PCI device to operate on
@@ -74,3 +78,203 @@ void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id)
kfree(free_irq(pci_irq_vector(dev, nr), dev_id));
}
EXPORT_SYMBOL(pci_free_irq);
+
+/**
+ * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
+ * @dev: the PCI device
+ * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
+ *
+ * Perform INTx swizzling for a device behind one level of bridge. This is
+ * required by section 9.1 of the PCI-to-PCI bridge specification for devices
+ * behind bridges on add-in cards. For devices with ARI enabled, the slot
+ * number is always 0 (see the Implementation Note in section 2.2.8.1 of
+ * the PCI Express Base Specification, Revision 2.1)
+ */
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
+{
+ int slot;
+
+ if (pci_ari_enabled(dev->bus))
+ slot = 0;
+ else
+ slot = PCI_SLOT(dev->devfn);
+
+ return (((pin - 1) + slot) % 4) + 1;
+}
+
+int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
+{
+ u8 pin;
+
+ pin = dev->pin;
+ if (!pin)
+ return -1;
+
+ while (!pci_is_root_bus(dev->bus)) {
+ pin = pci_swizzle_interrupt_pin(dev, pin);
+ dev = dev->bus->self;
+ }
+ *bridge = dev;
+ return pin;
+}
+
+/**
+ * pci_common_swizzle - swizzle INTx all the way to root bridge
+ * @dev: the PCI device
+ * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
+ *
+ * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
+ * bridges all the way up to a PCI root bus.
+ */
+u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ u8 pin = *pinp;
+
+ while (!pci_is_root_bus(dev->bus)) {
+ pin = pci_swizzle_interrupt_pin(dev, pin);
+ dev = dev->bus->self;
+ }
+ *pinp = pin;
+ return PCI_SLOT(dev->devfn);
+}
+EXPORT_SYMBOL_GPL(pci_common_swizzle);
+
+void pci_assign_irq(struct pci_dev *dev)
+{
+ u8 pin;
+ u8 slot = -1;
+ int irq = 0;
+ struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus);
+
+ if (!(hbrg->map_irq)) {
+ pci_dbg(dev, "runtime IRQ mapping not provided by arch\n");
+ return;
+ }
+
+ /*
+ * If this device is not on the primary bus, we need to figure out
+ * which interrupt pin it will come in on. We know which slot it
+ * will come in on because that slot is where the bridge is. Each
+ * time the interrupt line passes through a PCI-PCI bridge we must
+ * apply the swizzle function.
+ */
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ /* Cope with illegal. */
+ if (pin > 4)
+ pin = 1;
+
+ if (pin) {
+ /* Follow the chain of bridges, swizzling as we go. */
+ if (hbrg->swizzle_irq)
+ slot = (*(hbrg->swizzle_irq))(dev, &pin);
+
+ /*
+ * If a swizzling function is not used, map_irq() must
+ * ignore slot.
+ */
+ irq = (*(hbrg->map_irq))(dev, slot, pin);
+ if (irq == -1)
+ irq = 0;
+ }
+ dev->irq = irq;
+
+ pci_dbg(dev, "assign IRQ: got %d\n", dev->irq);
+
+ /*
+ * Always tell the device, so the driver knows what is the real IRQ
+ * to use; the device does not use it.
+ */
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
+{
+ struct pci_bus *bus = dev->bus;
+ bool mask_updated = true;
+ u32 cmd_status_dword;
+ u16 origcmd, newcmd;
+ unsigned long flags;
+ bool irq_pending;
+
+ /*
+ * We do a single dword read to retrieve both command and status.
+ * Document assumptions that make this possible.
+ */
+ BUILD_BUG_ON(PCI_COMMAND % 4);
+ BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
+
+ raw_spin_lock_irqsave(&pci_lock, flags);
+
+ bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
+
+ irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
+
+ /*
+ * Check interrupt status register to see whether our device
+ * triggered the interrupt (when masking) or the next IRQ is
+ * already pending (when unmasking).
+ */
+ if (mask != irq_pending) {
+ mask_updated = false;
+ goto done;
+ }
+
+ origcmd = cmd_status_dword;
+ newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
+ if (mask)
+ newcmd |= PCI_COMMAND_INTX_DISABLE;
+ if (newcmd != origcmd)
+ bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
+
+done:
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+
+ return mask_updated;
+}
+
+/**
+ * pci_check_and_mask_intx - mask INTx on pending interrupt
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, mask it and return
+ * true in that case. False is returned if no interrupt was pending.
+ */
+bool pci_check_and_mask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, true);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
+
+/**
+ * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, unmask it if not and
+ * return true. False is returned and the mask remains active if there was
+ * still an interrupt pending.
+ */
+bool pci_check_and_unmask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, false);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
+
+/**
+ * pcibios_penalize_isa_irq - penalize an ISA IRQ
+ * @irq: ISA IRQ to penalize
+ * @active: IRQ active or not
+ *
+ * Permits the platform to provide architecture-specific functionality when
+ * penalizing ISA IRQs. This is the default implementation. Architecture
+ * implementations can override this.
+ */
+void __weak pcibios_penalize_isa_irq(int irq, int active) {}
+
+int __weak pcibios_alloc_irq(struct pci_dev *dev)
+{
+ return 0;
+}
+
+void __weak pcibios_free_irq(struct pci_dev *dev)
+{
+}
diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c
index 4504039056d1..8da3347a95c4 100644
--- a/drivers/pci/mmap.c
+++ b/drivers/pci/mmap.c
@@ -11,6 +11,8 @@
#include <linux/mm.h>
#include <linux/pci.h>
+#include "pci.h"
+
#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
static const struct vm_operations_struct pci_phys_vm_ops = {
@@ -50,3 +52,30 @@ int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
}
#endif
+
+#if (defined(CONFIG_SYSFS) || defined(CONFIG_PROC_FS)) && \
+ (defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE))
+
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
+ enum pci_mmap_api mmap_api)
+{
+ resource_size_t pci_start = 0, pci_end;
+ unsigned long nr, start, size;
+
+ if (pci_resource_len(pdev, resno) == 0)
+ return 0;
+ nr = vma_pages(vma);
+ start = vma->vm_pgoff;
+ size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
+ if (mmap_api == PCI_MMAP_PROCFS) {
+ pci_resource_to_user(pdev, resno, &pdev->resource[resno],
+ &pci_start, &pci_end);
+ pci_start >>= PAGE_SHIFT;
+ }
+ if (start >= pci_start && start < pci_start + size &&
+ start + nr <= pci_start + size)
+ return 1;
+ return 0;
+}
+
+#endif
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index c8be056c248d..cfd84a899c82 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -61,7 +61,7 @@ static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
return (irq_hw_number_t)desc->msi_index |
pci_dev_id(dev) << 11 |
- (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
+ ((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27;
}
static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 0c361561b855..4f47a13cb500 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -661,7 +661,7 @@ done:
p2pdma = rcu_dereference(provider->p2pdma);
if (p2pdma)
xa_store(&p2pdma->map_types, map_types_idx(client),
- xa_mk_value(map_type), GFP_KERNEL);
+ xa_mk_value(map_type), GFP_ATOMIC);
rcu_read_unlock();
return map_type;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 51ec9e7e784f..af2996d0d17f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -419,15 +419,6 @@ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
return error;
}
-int __weak pcibios_alloc_irq(struct pci_dev *dev)
-{
- return 0;
-}
-
-void __weak pcibios_free_irq(struct pci_dev *dev)
-{
-}
-
#ifdef CONFIG_PCI_IOV
static inline bool pci_device_can_probe(struct pci_dev *pdev)
{
@@ -473,6 +464,13 @@ static void pci_device_remove(struct device *dev)
if (drv->remove) {
pm_runtime_get_sync(dev);
+ /*
+ * If the driver provides a .runtime_idle() callback and it has
+ * started to run already, it may continue to run in parallel
+ * with the code below, so wait until all of the runtime PM
+ * activity has completed.
+ */
+ pm_runtime_barrier(dev);
drv->remove(pci_dev);
pm_runtime_put_noidle(dev);
}
@@ -1382,10 +1380,7 @@ static int pci_pm_runtime_idle(struct device *dev)
if (!pci_dev->driver)
return 0;
- if (!pm)
- return -ENOSYS;
-
- if (pm->runtime_idle)
+ if (pm && pm->runtime_idle)
return pm->runtime_idle(dev);
return 0;
@@ -1714,7 +1709,7 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
return 1;
}
-struct bus_type pcie_port_bus_type = {
+const struct bus_type pcie_port_bus_type = {
.name = "pci_express",
.match = pcie_port_bus_match,
};
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 2321fdfefd7d..40cfa716392f 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1022,29 +1022,6 @@ void pci_remove_legacy_files(struct pci_bus *b)
#endif /* HAVE_PCI_LEGACY */
#if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
-
-int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
- enum pci_mmap_api mmap_api)
-{
- unsigned long nr, start, size;
- resource_size_t pci_start = 0, pci_end;
-
- if (pci_resource_len(pdev, resno) == 0)
- return 0;
- nr = vma_pages(vma);
- start = vma->vm_pgoff;
- size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
- if (mmap_api == PCI_MMAP_PROCFS) {
- pci_resource_to_user(pdev, resno, &pdev->resource[resno],
- &pci_start, &pci_end);
- pci_start >>= PAGE_SHIFT;
- }
- if (start >= pci_start && start < pci_start + size &&
- start + nr <= pci_start + size)
- return 1;
- return 0;
-}
-
/**
* pci_mmap_resource - map a PCI resource into user memory space
* @kobj: kobject for mapping
@@ -1410,79 +1387,89 @@ static const struct attribute_group pci_dev_reset_attr_group = {
.is_visible = pci_dev_reset_attr_is_visible,
};
+static ssize_t __resource_resize_show(struct device *dev, int n, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t ret;
+
+ pci_config_pm_runtime_get(pdev);
+
+ ret = sysfs_emit(buf, "%016llx\n",
+ (u64)pci_rebar_get_possible_sizes(pdev, n));
+
+ pci_config_pm_runtime_put(pdev);
+
+ return ret;
+}
+
+static ssize_t __resource_resize_store(struct device *dev, int n,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long size, flags;
+ int ret, i;
+ u16 cmd;
+
+ if (kstrtoul(buf, 0, &size) < 0)
+ return -EINVAL;
+
+ device_lock(dev);
+ if (dev->driver) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ pci_config_pm_runtime_get(pdev);
+
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
+ ret = aperture_remove_conflicting_pci_devices(pdev,
+ "resourceN_resize");
+ if (ret)
+ goto pm_put;
+ }
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_MEMORY);
+
+ flags = pci_resource_flags(pdev, n);
+
+ pci_remove_resource_files(pdev);
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) &&
+ pci_resource_flags(pdev, i) == flags)
+ pci_release_resource(pdev, i);
+ }
+
+ ret = pci_resize_resource(pdev, n, size);
+
+ pci_assign_unassigned_bus_resources(pdev->bus);
+
+ if (pci_create_resource_files(pdev))
+ pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");
+
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+pm_put:
+ pci_config_pm_runtime_put(pdev);
+unlock:
+ device_unlock(dev);
+
+ return ret ? ret : count;
+}
+
#define pci_dev_resource_resize_attr(n) \
static ssize_t resource##n##_resize_show(struct device *dev, \
struct device_attribute *attr, \
- char * buf) \
+ char *buf) \
{ \
- struct pci_dev *pdev = to_pci_dev(dev); \
- ssize_t ret; \
- \
- pci_config_pm_runtime_get(pdev); \
- \
- ret = sysfs_emit(buf, "%016llx\n", \
- (u64)pci_rebar_get_possible_sizes(pdev, n)); \
- \
- pci_config_pm_runtime_put(pdev); \
- \
- return ret; \
+ return __resource_resize_show(dev, n, buf); \
} \
- \
static ssize_t resource##n##_resize_store(struct device *dev, \
struct device_attribute *attr,\
const char *buf, size_t count)\
{ \
- struct pci_dev *pdev = to_pci_dev(dev); \
- unsigned long size, flags; \
- int ret, i; \
- u16 cmd; \
- \
- if (kstrtoul(buf, 0, &size) < 0) \
- return -EINVAL; \
- \
- device_lock(dev); \
- if (dev->driver) { \
- ret = -EBUSY; \
- goto unlock; \
- } \
- \
- pci_config_pm_runtime_get(pdev); \
- \
- if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \
- ret = aperture_remove_conflicting_pci_devices(pdev, \
- "resourceN_resize"); \
- if (ret) \
- goto pm_put; \
- } \
- \
- pci_read_config_word(pdev, PCI_COMMAND, &cmd); \
- pci_write_config_word(pdev, PCI_COMMAND, \
- cmd & ~PCI_COMMAND_MEMORY); \
- \
- flags = pci_resource_flags(pdev, n); \
- \
- pci_remove_resource_files(pdev); \
- \
- for (i = 0; i < PCI_STD_NUM_BARS; i++) { \
- if (pci_resource_len(pdev, i) && \
- pci_resource_flags(pdev, i) == flags) \
- pci_release_resource(pdev, i); \
- } \
- \
- ret = pci_resize_resource(pdev, n, size); \
- \
- pci_assign_unassigned_bus_resources(pdev->bus); \
- \
- if (pci_create_resource_files(pdev)) \
- pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\
- \
- pci_write_config_word(pdev, PCI_COMMAND, cmd); \
-pm_put: \
- pci_config_pm_runtime_put(pdev); \
-unlock: \
- device_unlock(dev); \
- \
- return ret ? ret : count; \
+ return __resource_resize_store(dev, n, buf, count); \
} \
static DEVICE_ATTR_RW(resource##n##_resize)
@@ -1660,7 +1647,7 @@ static const struct attribute_group pcie_dev_attr_group = {
.is_visible = pcie_dev_attrs_are_visible,
};
-static const struct attribute_group *pci_dev_attr_groups[] = {
+const struct attribute_group *pci_dev_attr_groups[] = {
&pci_dev_attr_group,
&pci_dev_hp_attr_group,
#ifdef CONFIG_PCI_IOV
@@ -1677,7 +1664,3 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
#endif
NULL,
};
-
-const struct device_type pci_dev_type = {
- .groups = pci_dev_attr_groups,
-};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c3585229c12a..e5f243dd4288 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -24,7 +24,6 @@
#include <linux/log2.h>
#include <linux/logic_pio.h>
#include <linux/pm_wakeup.h>
-#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
@@ -1068,6 +1067,34 @@ disable_acs_redir:
}
/**
+ * pcie_read_tlp_log - read TLP Header Log
+ * @dev: PCIe device
+ * @where: PCI Config offset of TLP Header Log
+ * @tlp_log: TLP Log structure to fill
+ *
+ * Fill @tlp_log from TLP Header Log registers, e.g., AER or DPC.
+ *
+ * Return: 0 on success and filled TLP Log structure, <0 on error.
+ */
+int pcie_read_tlp_log(struct pci_dev *dev, int where,
+ struct pcie_tlp_log *tlp_log)
+{
+ int i, ret;
+
+ memset(tlp_log, 0, sizeof(*tlp_log));
+
+ for (i = 0; i < 4; i++) {
+ ret = pci_read_config_dword(dev, where + i * 4,
+ &tlp_log->dw[i]);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcie_read_tlp_log);
+
+/**
* pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
*
@@ -1649,25 +1676,10 @@ static int pci_save_pcie_state(struct pci_dev *dev)
pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
- return 0;
-}
-
-void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
-{
-#ifdef CONFIG_PCIEASPM
- struct pci_dev *bridge;
- u32 ctl;
+ pci_save_aspm_l1ss_state(dev);
+ pci_save_ltr_state(dev);
- bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->ltr_path) {
- pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
- if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
- pci_dbg(bridge, "re-enabling LTR\n");
- pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
- }
- }
-#endif
+ return 0;
}
static void pci_restore_pcie_state(struct pci_dev *dev)
@@ -1676,6 +1688,13 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
struct pci_cap_saved_state *save_state;
u16 *cap;
+ /*
+ * Restore max latencies (in the LTR capability) before enabling
+ * LTR itself in PCI_EXP_DEVCTL2.
+ */
+ pci_restore_ltr_state(dev);
+ pci_restore_aspm_l1ss_state(dev);
+
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
if (!save_state)
return;
@@ -1733,46 +1752,6 @@ static void pci_restore_pcix_state(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
}
-static void pci_save_ltr_state(struct pci_dev *dev)
-{
- int ltr;
- struct pci_cap_saved_state *save_state;
- u32 *cap;
-
- if (!pci_is_pcie(dev))
- return;
-
- ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
- if (!ltr)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
- if (!save_state) {
- pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
- return;
- }
-
- /* Some broken devices only support dword access to LTR */
- cap = &save_state->cap.data[0];
- pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
-}
-
-static void pci_restore_ltr_state(struct pci_dev *dev)
-{
- struct pci_cap_saved_state *save_state;
- int ltr;
- u32 *cap;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
- ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
- if (!save_state || !ltr)
- return;
-
- /* Some broken devices only support dword access to LTR */
- cap = &save_state->cap.data[0];
- pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
-}
-
/**
* pci_save_state - save the PCI configuration space of a device before
* suspending
@@ -1797,7 +1776,6 @@ int pci_save_state(struct pci_dev *dev)
if (i != 0)
return i;
- pci_save_ltr_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
@@ -1898,12 +1876,6 @@ void pci_restore_state(struct pci_dev *dev)
if (!dev->state_saved)
return;
- /*
- * Restore max latencies (in the LTR capability) before enabling
- * LTR itself (in the PCIe capability).
- */
- pci_restore_ltr_state(dev);
-
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
pci_restore_pri_state(dev);
@@ -2184,107 +2156,6 @@ int pci_enable_device(struct pci_dev *dev)
EXPORT_SYMBOL(pci_enable_device);
/*
- * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
- * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
- * there's no need to track it separately. pci_devres is initialized
- * when a device is enabled using managed PCI device enable interface.
- */
-struct pci_devres {
- unsigned int enabled:1;
- unsigned int pinned:1;
- unsigned int orig_intx:1;
- unsigned int restore_intx:1;
- unsigned int mwi:1;
- u32 region_mask;
-};
-
-static void pcim_release(struct device *gendev, void *res)
-{
- struct pci_dev *dev = to_pci_dev(gendev);
- struct pci_devres *this = res;
- int i;
-
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
- if (this->region_mask & (1 << i))
- pci_release_region(dev, i);
-
- if (this->mwi)
- pci_clear_mwi(dev);
-
- if (this->restore_intx)
- pci_intx(dev, this->orig_intx);
-
- if (this->enabled && !this->pinned)
- pci_disable_device(dev);
-}
-
-static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
-{
- struct pci_devres *dr, *new_dr;
-
- dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
- if (dr)
- return dr;
-
- new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
- if (!new_dr)
- return NULL;
- return devres_get(&pdev->dev, new_dr, NULL, NULL);
-}
-
-static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
-{
- if (pci_is_managed(pdev))
- return devres_find(&pdev->dev, pcim_release, NULL, NULL);
- return NULL;
-}
-
-/**
- * pcim_enable_device - Managed pci_enable_device()
- * @pdev: PCI device to be initialized
- *
- * Managed pci_enable_device().
- */
-int pcim_enable_device(struct pci_dev *pdev)
-{
- struct pci_devres *dr;
- int rc;
-
- dr = get_pci_dr(pdev);
- if (unlikely(!dr))
- return -ENOMEM;
- if (dr->enabled)
- return 0;
-
- rc = pci_enable_device(pdev);
- if (!rc) {
- pdev->is_managed = 1;
- dr->enabled = 1;
- }
- return rc;
-}
-EXPORT_SYMBOL(pcim_enable_device);
-
-/**
- * pcim_pin_device - Pin managed PCI device
- * @pdev: PCI device to pin
- *
- * Pin managed PCI device @pdev. Pinned device won't be disabled on
- * driver detach. @pdev must have been enabled with
- * pcim_enable_device().
- */
-void pcim_pin_device(struct pci_dev *pdev)
-{
- struct pci_devres *dr;
-
- dr = find_pci_dr(pdev);
- WARN_ON(!dr || !dr->enabled);
- if (dr)
- dr->pinned = 1;
-}
-EXPORT_SYMBOL(pcim_pin_device);
-
-/*
* pcibios_device_add - provide arch specific hooks when adding device dev
* @dev: the PCI device being added
*
@@ -2318,17 +2189,6 @@ void __weak pcibios_release_device(struct pci_dev *dev) {}
*/
void __weak pcibios_disable_device(struct pci_dev *dev) {}
-/**
- * pcibios_penalize_isa_irq - penalize an ISA IRQ
- * @irq: ISA IRQ to penalize
- * @active: IRQ active or not
- *
- * Permits the platform to provide architecture-specific functionality when
- * penalizing ISA IRQs. This is the default implementation. Architecture
- * implementations can override this.
- */
-void __weak pcibios_penalize_isa_irq(int irq, int active) {}
-
static void do_pci_disable_device(struct pci_dev *dev)
{
u16 pci_command;
@@ -2532,7 +2392,7 @@ static void pci_pme_list_scan(struct work_struct *work)
* course of the call.
*/
if (bdev) {
- bref = pm_runtime_get_if_active(bdev, true);
+ bref = pm_runtime_get_if_active(bdev);
if (!bref)
continue;
@@ -3998,66 +3858,6 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
/**
- * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
- * @dev: the PCI device
- * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
- *
- * Perform INTx swizzling for a device behind one level of bridge. This is
- * required by section 9.1 of the PCI-to-PCI bridge specification for devices
- * behind bridges on add-in cards. For devices with ARI enabled, the slot
- * number is always 0 (see the Implementation Note in section 2.2.8.1 of
- * the PCI Express Base Specification, Revision 2.1)
- */
-u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
-{
- int slot;
-
- if (pci_ari_enabled(dev->bus))
- slot = 0;
- else
- slot = PCI_SLOT(dev->devfn);
-
- return (((pin - 1) + slot) % 4) + 1;
-}
-
-int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
-{
- u8 pin;
-
- pin = dev->pin;
- if (!pin)
- return -1;
-
- while (!pci_is_root_bus(dev->bus)) {
- pin = pci_swizzle_interrupt_pin(dev, pin);
- dev = dev->bus->self;
- }
- *bridge = dev;
- return pin;
-}
-
-/**
- * pci_common_swizzle - swizzle INTx all the way to root bridge
- * @dev: the PCI device
- * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
- *
- * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
- * bridges all the way up to a PCI root bus.
- */
-u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
-{
- u8 pin = *pinp;
-
- while (!pci_is_root_bus(dev->bus)) {
- pin = pci_swizzle_interrupt_pin(dev, pin);
- dev = dev->bus->self;
- }
- *pinp = pin;
- return PCI_SLOT(dev->devfn);
-}
-EXPORT_SYMBOL_GPL(pci_common_swizzle);
-
-/**
* pci_release_region - Release a PCI bar
* @pdev: PCI device whose resources were previously reserved by
* pci_request_region()
@@ -4353,8 +4153,8 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
if (res->end > IO_SPACE_LIMIT)
return -EINVAL;
- return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
- pgprot_device(PAGE_KERNEL));
+ return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+ pgprot_device(PAGE_KERNEL));
#else
/*
* This architecture does not have memory mapped I/O space,
@@ -4385,133 +4185,6 @@ void pci_unmap_iospace(struct resource *res)
}
EXPORT_SYMBOL(pci_unmap_iospace);
-static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
-{
- struct resource **res = ptr;
-
- pci_unmap_iospace(*res);
-}
-
-/**
- * devm_pci_remap_iospace - Managed pci_remap_iospace()
- * @dev: Generic device to remap IO address for
- * @res: Resource describing the I/O space
- * @phys_addr: physical address of range to be mapped
- *
- * Managed pci_remap_iospace(). Map is automatically unmapped on driver
- * detach.
- */
-int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
- phys_addr_t phys_addr)
-{
- const struct resource **ptr;
- int error;
-
- ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- error = pci_remap_iospace(res, phys_addr);
- if (error) {
- devres_free(ptr);
- } else {
- *ptr = res;
- devres_add(dev, ptr);
- }
-
- return error;
-}
-EXPORT_SYMBOL(devm_pci_remap_iospace);
-
-/**
- * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
- * @dev: Generic device to remap IO address for
- * @offset: Resource address to map
- * @size: Size of map
- *
- * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
- * detach.
- */
-void __iomem *devm_pci_remap_cfgspace(struct device *dev,
- resource_size_t offset,
- resource_size_t size)
-{
- void __iomem **ptr, *addr;
-
- ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- addr = pci_remap_cfgspace(offset, size);
- if (addr) {
- *ptr = addr;
- devres_add(dev, ptr);
- } else
- devres_free(ptr);
-
- return addr;
-}
-EXPORT_SYMBOL(devm_pci_remap_cfgspace);
-
-/**
- * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
- * @dev: generic device to handle the resource for
- * @res: configuration space resource to be handled
- *
- * Checks that a resource is a valid memory region, requests the memory
- * region and ioremaps with pci_remap_cfgspace() API that ensures the
- * proper PCI configuration space memory attributes are guaranteed.
- *
- * All operations are managed and will be undone on driver detach.
- *
- * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure. Usage example::
- *
- * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
- * if (IS_ERR(base))
- * return PTR_ERR(base);
- */
-void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
- struct resource *res)
-{
- resource_size_t size;
- const char *name;
- void __iomem *dest_ptr;
-
- BUG_ON(!dev);
-
- if (!res || resource_type(res) != IORESOURCE_MEM) {
- dev_err(dev, "invalid resource\n");
- return IOMEM_ERR_PTR(-EINVAL);
- }
-
- size = resource_size(res);
-
- if (res->name)
- name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
- res->name);
- else
- name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
- if (!name)
- return IOMEM_ERR_PTR(-ENOMEM);
-
- if (!devm_request_mem_region(dev, res->start, size, name)) {
- dev_err(dev, "can't request region for resource %pR\n", res);
- return IOMEM_ERR_PTR(-EBUSY);
- }
-
- dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
- if (!dest_ptr) {
- dev_err(dev, "ioremap failed for resource %pR\n", res);
- devm_release_mem_region(dev, res->start, size);
- dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
- }
-
- return dest_ptr;
-}
-EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
-
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
@@ -4662,27 +4335,6 @@ int pci_set_mwi(struct pci_dev *dev)
EXPORT_SYMBOL(pci_set_mwi);
/**
- * pcim_set_mwi - a device-managed pci_set_mwi()
- * @dev: the PCI device for which MWI is enabled
- *
- * Managed pci_set_mwi().
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int pcim_set_mwi(struct pci_dev *dev)
-{
- struct pci_devres *dr;
-
- dr = find_pci_dr(dev);
- if (!dr)
- return -ENOMEM;
-
- dr->mwi = 1;
- return pci_set_mwi(dev);
-}
-EXPORT_SYMBOL(pcim_set_mwi);
-
-/**
* pci_try_set_mwi - enables memory-write-invalidate PCI transaction
* @dev: the PCI device for which MWI is enabled
*
@@ -4770,78 +4422,6 @@ void pci_intx(struct pci_dev *pdev, int enable)
}
EXPORT_SYMBOL_GPL(pci_intx);
-static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
-{
- struct pci_bus *bus = dev->bus;
- bool mask_updated = true;
- u32 cmd_status_dword;
- u16 origcmd, newcmd;
- unsigned long flags;
- bool irq_pending;
-
- /*
- * We do a single dword read to retrieve both command and status.
- * Document assumptions that make this possible.
- */
- BUILD_BUG_ON(PCI_COMMAND % 4);
- BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
-
- raw_spin_lock_irqsave(&pci_lock, flags);
-
- bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
-
- irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
-
- /*
- * Check interrupt status register to see whether our device
- * triggered the interrupt (when masking) or the next IRQ is
- * already pending (when unmasking).
- */
- if (mask != irq_pending) {
- mask_updated = false;
- goto done;
- }
-
- origcmd = cmd_status_dword;
- newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
- if (mask)
- newcmd |= PCI_COMMAND_INTX_DISABLE;
- if (newcmd != origcmd)
- bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
-
-done:
- raw_spin_unlock_irqrestore(&pci_lock, flags);
-
- return mask_updated;
-}
-
-/**
- * pci_check_and_mask_intx - mask INTx on pending interrupt
- * @dev: the PCI device to operate on
- *
- * Check if the device dev has its INTx line asserted, mask it and return
- * true in that case. False is returned if no interrupt was pending.
- */
-bool pci_check_and_mask_intx(struct pci_dev *dev)
-{
- return pci_check_and_set_intx_mask(dev, true);
-}
-EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
-
-/**
- * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
- * @dev: the PCI device to operate on
- *
- * Check if the device dev has its INTx line asserted, unmask it if not and
- * return true. False is returned and the mask remains active if there was
- * still an interrupt pending.
- */
-bool pci_check_and_unmask_intx(struct pci_dev *dev)
-{
- return pci_check_and_set_intx_mask(dev, false);
-}
-EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
-
/**
* pci_wait_for_pending_transaction - wait for pending transaction
* @dev: the PCI device to operate on
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index e9750b1b19ba..17fed1846847 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -31,9 +31,6 @@ bool pcie_cap_has_rtctl(const struct pci_dev *dev);
/* Functions internal to the PCI core code */
-int pci_create_sysfs_dev_files(struct pci_dev *pdev);
-void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
-void pci_cleanup_rom(struct pci_dev *dev);
#ifdef CONFIG_DMI
extern const struct attribute_group pci_dev_smbios_attr_group;
#endif
@@ -97,7 +94,6 @@ void pci_msi_init(struct pci_dev *dev);
void pci_msix_init(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
-void pci_bridge_reconfigure_ltr(struct pci_dev *dev);
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type);
static inline void pci_wakeup_event(struct pci_dev *dev)
@@ -152,7 +148,7 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
/* Functions for PCI Hotplug drivers to use */
int pci_hp_add_bridge(struct pci_dev *dev);
-#ifdef HAVE_PCI_LEGACY
+#if defined(CONFIG_SYSFS) && defined(HAVE_PCI_LEGACY)
void pci_create_legacy_files(struct pci_bus *bus);
void pci_remove_legacy_files(struct pci_bus *bus);
#else
@@ -185,10 +181,22 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
return (dev->no_d1d2 || parent_dstates);
}
+
+#ifdef CONFIG_SYSFS
+int pci_create_sysfs_dev_files(struct pci_dev *pdev);
+void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
extern const struct attribute_group *pci_dev_groups[];
+extern const struct attribute_group *pci_dev_attr_groups[];
extern const struct attribute_group *pcibus_groups[];
-extern const struct device_type pci_dev_type;
extern const struct attribute_group *pci_bus_groups[];
+#else
+static inline int pci_create_sysfs_dev_files(struct pci_dev *pdev) { return 0; }
+static inline void pci_remove_sysfs_dev_files(struct pci_dev *pdev) { }
+#define pci_dev_groups NULL
+#define pci_dev_attr_groups NULL
+#define pcibus_groups NULL
+#define pci_bus_groups NULL
+#endif
extern unsigned long pci_hotplug_io_size;
extern unsigned long pci_hotplug_mmio_size;
@@ -368,11 +376,6 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
return 0;
}
-static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
-{
- return dev->error_state == pci_channel_io_perm_failure;
-}
-
/* pci_dev priv_flags */
#define PCI_DEV_ADDED 0
#define PCI_DPC_RECOVERED 1
@@ -409,7 +412,7 @@ struct aer_err_info {
unsigned int status; /* COR/UNCOR Error Status */
unsigned int mask; /* COR/UNCOR Error Mask */
- struct aer_header_log_regs tlp; /* TLP Header */
+ struct pcie_tlp_log tlp; /* TLP Header */
};
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
@@ -568,16 +571,28 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
int pcie_retrain_link(struct pci_dev *pdev, bool use_lt);
+
+/* ASPM-related functionality we need even without CONFIG_PCIEASPM */
+void pci_save_ltr_state(struct pci_dev *dev);
+void pci_restore_ltr_state(struct pci_dev *dev);
+void pci_configure_aspm_l1ss(struct pci_dev *dev);
+void pci_save_aspm_l1ss_state(struct pci_dev *dev);
+void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
+
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+void pci_configure_ltr(struct pci_dev *pdev);
+void pci_bridge_reconfigure_ltr(struct pci_dev *pdev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
+static inline void pci_configure_ltr(struct pci_dev *pdev) { }
+static inline void pci_bridge_reconfigure_ltr(struct pci_dev *pdev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
@@ -798,6 +813,27 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
#endif
/*
+ * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
+ * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
+ * there's no need to track it separately. pci_devres is initialized
+ * when a device is enabled using managed PCI device enable interface.
+ *
+ * TODO: Struct pci_devres and find_pci_dr() only need to be here because
+ * they're used in pci.c. Port or move these functions to devres.c and
+ * then remove them from here.
+ */
+struct pci_devres {
+ unsigned int enabled:1;
+ unsigned int pinned:1;
+ unsigned int orig_intx:1;
+ unsigned int restore_intx:1;
+ unsigned int mwi:1;
+ u32 region_mask;
+};
+
+struct pci_devres *find_pci_dr(struct pci_dev *pdev);
+
+/*
* Config Address for PCI Configuration Mechanism #1
*
* See PCI Local Bus Specification, Revision 3.0,
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 8de4ed5f98f1..6461aa93fe76 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -6,7 +6,7 @@ pcieportdrv-y := portdrv.o rcec.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
-obj-$(CONFIG_PCIEASPM) += aspm.o
+obj-y += aspm.o
obj-$(CONFIG_PCIEAER) += aer.o err.o
obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
obj-$(CONFIG_PCIE_PME) += pme.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 05fc30bb5134..ac6293c24976 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -664,11 +664,10 @@ static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
}
}
-static void __print_tlp_header(struct pci_dev *dev,
- struct aer_header_log_regs *t)
+static void __print_tlp_header(struct pci_dev *dev, struct pcie_tlp_log *t)
{
pci_err(dev, " TLP Header: %08x %08x %08x %08x\n",
- t->dw0, t->dw1, t->dw2, t->dw3);
+ t->dw[0], t->dw[1], t->dw[2], t->dw[3]);
}
static void __aer_print_error(struct pci_dev *dev,
@@ -1210,7 +1209,7 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
int type = pci_pcie_type(dev);
int aer = dev->aer_cap;
- int temp;
+ u32 aercc;
/* Must reset in this function */
info->status = 0;
@@ -1241,19 +1240,12 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
return 0;
/* Get First Error Pointer */
- pci_read_config_dword(dev, aer + PCI_ERR_CAP, &temp);
- info->first_error = PCI_ERR_CAP_FEP(temp);
+ pci_read_config_dword(dev, aer + PCI_ERR_CAP, &aercc);
+ info->first_error = PCI_ERR_CAP_FEP(aercc);
if (info->status & AER_LOG_TLP_MASKS) {
info->tlp_header_valid = 1;
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
+ pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG, &info->tlp);
}
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index bc0bd86695ec..2428d278e015 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -24,6 +24,166 @@
#include "../pci.h"
+void pci_save_ltr_state(struct pci_dev *dev)
+{
+ int ltr;
+ struct pci_cap_saved_state *save_state;
+ u32 *cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
+ if (!ltr)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
+ if (!save_state) {
+ pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
+ return;
+ }
+
+ /* Some broken devices only support dword access to LTR */
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
+}
+
+void pci_restore_ltr_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ int ltr;
+ u32 *cap;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
+ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
+ if (!save_state || !ltr)
+ return;
+
+ /* Some broken devices only support dword access to LTR */
+ cap = &save_state->cap.data[0];
+ pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
+}
+
+void pci_configure_aspm_l1ss(struct pci_dev *pdev)
+{
+ int rc;
+
+ pdev->l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+
+ rc = pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_L1SS,
+ 2 * sizeof(u32));
+ if (rc)
+ pci_err(pdev, "unable to allocate ASPM L1SS save buffer (%pe)\n",
+ ERR_PTR(rc));
+}
+
+void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
+{
+ struct pci_cap_saved_state *save_state;
+ u16 l1ss = pdev->l1ss;
+ u32 *cap;
+
+ /*
+ * Save L1 substate configuration. The ASPM L0s/L1 configuration
+ * in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
+ */
+ if (!l1ss)
+ return;
+
+ save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
+ return;
+
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, cap++);
+}
+
+void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
+{
+ struct pci_cap_saved_state *pl_save_state, *cl_save_state;
+ struct pci_dev *parent = pdev->bus->self;
+ u32 *cap, pl_ctl1, pl_ctl2, pl_l1_2_enable;
+ u32 cl_ctl1, cl_ctl2, cl_l1_2_enable;
+ u16 clnkctl, plnkctl;
+
+ /*
+ * In case BIOS enabled L1.2 when resuming, we need to disable it first
+ * on the downstream component before the upstream. So, don't attempt to
+ * restore either until we are at the downstream component.
+ */
+ if (pcie_downstream_port(pdev) || !parent)
+ return;
+
+ if (!pdev->l1ss || !parent->l1ss)
+ return;
+
+ cl_save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ pl_save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
+ if (!cl_save_state || !pl_save_state)
+ return;
+
+ cap = &cl_save_state->cap.data[0];
+ cl_ctl2 = *cap++;
+ cl_ctl1 = *cap;
+ cap = &pl_save_state->cap.data[0];
+ pl_ctl2 = *cap++;
+ pl_ctl1 = *cap;
+
+ /* Make sure L0s/L1 are disabled before updating L1SS config */
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &clnkctl);
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &plnkctl);
+ if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
+ FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
+ clnkctl & ~PCI_EXP_LNKCTL_ASPMC);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
+ plnkctl & ~PCI_EXP_LNKCTL_ASPMC);
+ }
+
+ /*
+ * Disable L1.2 on this downstream endpoint device first, followed
+ * by the upstream
+ */
+ pci_clear_and_set_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+
+ /*
+ * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD
+ * in PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
+ * enable bits, even though they're all in PCI_L1SS_CTL1.
+ */
+ pl_l1_2_enable = pl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ pl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
+ cl_l1_2_enable = cl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ cl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
+
+ /* Write back without enables first (above we cleared them in ctl1) */
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, pl_ctl2);
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cl_ctl2);
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, pl_ctl1);
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cl_ctl1);
+
+ /* Then write back the enables */
+ if (pl_l1_2_enable || cl_l1_2_enable) {
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ pl_ctl1 | pl_l1_2_enable);
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
+ cl_ctl1 | cl_l1_2_enable);
+ }
+
+ /* Restore L0s/L1 if they were enabled */
+ if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
+ FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, clnkctl);
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, plnkctl);
+ }
+}
+
+#ifdef CONFIG_PCIEASPM
+
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
@@ -141,16 +301,42 @@ static int policy_to_clkpm_state(struct pcie_link_state *link)
return 0;
}
+static void pci_update_aspm_saved_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u16 *cap, lnkctl, aspm_ctl;
+
+ save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
+ if (!save_state)
+ return;
+
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnkctl);
+
+ /*
+ * Update ASPM and CLKREQ bits of LNKCTL in save_state. We only
+ * write PCI_EXP_LNKCTL_CCC during enumeration, so it shouldn't
+ * change after being captured in save_state.
+ */
+ aspm_ctl = lnkctl & (PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
+ lnkctl &= ~(PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
+
+ /* Depends on pci_save_pcie_state(): cap[1] is LNKCTL */
+ cap = (u16 *)&save_state->cap.data[0];
+ cap[1] = lnkctl | aspm_ctl;
+}
+
static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
{
struct pci_dev *child;
struct pci_bus *linkbus = link->pdev->subordinate;
u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
- list_for_each_entry(child, &linkbus->devices, bus_list)
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_CLKREQ_EN,
val);
+ pci_update_aspm_saved_state(child);
+ }
link->clkpm_enabled = !!enable;
}
@@ -769,6 +955,12 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
pcie_config_aspm_dev(parent, upstream);
link->aspm_enabled = state;
+
+ /* Update latest ASPM configuration in saved context */
+ pci_save_aspm_l1ss_state(link->downstream);
+ pci_update_aspm_saved_state(link->downstream);
+ pci_save_aspm_l1ss_state(parent);
+ pci_update_aspm_saved_state(parent);
}
static void pcie_config_aspm_path(struct pcie_link_state *link)
@@ -938,6 +1130,78 @@ out:
up_read(&pci_bus_sem);
}
+void pci_bridge_reconfigure_ltr(struct pci_dev *pdev)
+{
+ struct pci_dev *bridge;
+ u32 ctl;
+
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge && bridge->ltr_path) {
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
+ if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
+ pci_dbg(bridge, "re-enabling LTR\n");
+ pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ }
+ }
+}
+
+void pci_configure_ltr(struct pci_dev *pdev)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
+ struct pci_dev *bridge;
+ u32 cap, ctl;
+
+ if (!pci_is_pcie(pdev))
+ return;
+
+ pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &cap);
+ if (!(cap & PCI_EXP_DEVCAP2_LTR))
+ return;
+
+ pcie_capability_read_dword(pdev, PCI_EXP_DEVCTL2, &ctl);
+ if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
+ pdev->ltr_path = 1;
+ return;
+ }
+
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge && bridge->ltr_path)
+ pdev->ltr_path = 1;
+
+ return;
+ }
+
+ if (!host->native_ltr)
+ return;
+
+ /*
+ * Software must not enable LTR in an Endpoint unless the Root
+ * Complex and all intermediate Switches indicate support for LTR.
+ * PCIe r4.0, sec 6.18.
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ pdev->ltr_path = 1;
+ return;
+ }
+
+ /*
+ * If we're configuring a hot-added device, LTR was likely
+ * disabled in the upstream bridge, so re-enable it before enabling
+ * it in the new device.
+ */
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge && bridge->ltr_path) {
+ pci_bridge_reconfigure_ltr(pdev);
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ pdev->ltr_path = 1;
+ }
+}
+
/* Recheck latencies and update aspm_capable for links under the root */
static void pcie_update_aspm_capable(struct pcie_link_state *root)
{
@@ -1447,3 +1711,5 @@ bool pcie_aspm_support_enabled(void)
{
return aspm_support_enabled;
}
+
+#endif /* CONFIG_PCIEASPM */
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 94111e438241..a668820696dc 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -190,7 +190,8 @@ out:
static void dpc_process_rp_pio_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, dpc_status, first_error;
- u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
+ u32 status, mask, sev, syserr, exc, log, prefix;
+ struct pcie_tlp_log tlp_log;
int i;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status);
@@ -216,16 +217,9 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
if (pdev->dpc_rp_log_size < 4)
goto clear_status;
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
- &dw0);
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
- &dw1);
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 8,
- &dw2);
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12,
- &dw3);
+ pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG, &tlp_log);
pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
- dw0, dw1, dw2, dw3);
+ tlp_log.dw[0], tlp_log.dw[1], tlp_log.dw[2], tlp_log.dw[3]);
if (pdev->dpc_rp_log_size < 5)
goto clear_status;
@@ -234,7 +228,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
pci_read_config_dword(pdev,
- cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
+ cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
}
clear_status:
@@ -303,10 +297,70 @@ void dpc_process_error(struct pci_dev *pdev)
}
}
+static void pci_clear_surpdn_errors(struct pci_dev *pdev)
+{
+ if (pdev->dpc_rp_extensions)
+ pci_write_config_dword(pdev, pdev->dpc_cap +
+ PCI_EXP_DPC_RP_PIO_STATUS, ~0);
+
+ /*
+ * In practice, Surprise Down errors have been observed to also set
+ * error bits in the Status Register as well as the Fatal Error
+ * Detected bit in the Device Status Register.
+ */
+ pci_write_config_word(pdev, PCI_STATUS, 0xffff);
+
+ pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_FED);
+}
+
+static void dpc_handle_surprise_removal(struct pci_dev *pdev)
+{
+ if (!pcie_wait_for_link(pdev, false)) {
+ pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
+ goto out;
+ }
+
+ if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev))
+ goto out;
+
+ pci_aer_raw_clear_status(pdev);
+ pci_clear_surpdn_errors(pdev);
+
+ pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS,
+ PCI_EXP_DPC_STATUS_TRIGGER);
+
+out:
+ clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
+ wake_up_all(&dpc_completed_waitqueue);
+}
+
+static bool dpc_is_surprise_removal(struct pci_dev *pdev)
+{
+ u16 status;
+
+ if (!pdev->is_hotplug_bridge)
+ return false;
+
+ if (pci_read_config_word(pdev, pdev->aer_cap + PCI_ERR_UNCOR_STATUS,
+ &status))
+ return false;
+
+ return status & PCI_ERR_UNC_SURPDN;
+}
+
static irqreturn_t dpc_handler(int irq, void *context)
{
struct pci_dev *pdev = context;
+ /*
+ * According to PCIe r6.0 sec 6.7.6, errors are an expected side effect
+ * of async removal and should be ignored by software.
+ */
+ if (dpc_is_surprise_removal(pdev)) {
+ dpc_handle_surprise_removal(pdev);
+ return IRQ_HANDLED;
+ }
+
dpc_process_error(pdev);
/* We configure DPC so it only triggers on ERR_FATAL */
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 59c90d04a609..705893b5f7b0 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -13,6 +13,7 @@
#define dev_fmt(fmt) "AER: " fmt
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -85,6 +86,18 @@ static int report_error_detected(struct pci_dev *dev,
return 0;
}
+static int pci_pm_runtime_get_sync(struct pci_dev *pdev, void *data)
+{
+ pm_runtime_get_sync(&pdev->dev);
+ return 0;
+}
+
+static int pci_pm_runtime_put(struct pci_dev *pdev, void *data)
+{
+ pm_runtime_put(&pdev->dev);
+ return 0;
+}
+
static int report_frozen_detected(struct pci_dev *dev, void *data)
{
return report_error_detected(dev, pci_channel_io_frozen, data);
@@ -207,6 +220,8 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
else
bridge = pci_upstream_bridge(dev);
+ pci_walk_bridge(bridge, pci_pm_runtime_get_sync, NULL);
+
pci_dbg(bridge, "broadcast error_detected message\n");
if (state == pci_channel_io_frozen) {
pci_walk_bridge(bridge, report_frozen_detected, &status);
@@ -251,10 +266,15 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
pcie_clear_device_status(dev);
pci_aer_clear_nonfatal_status(dev);
}
+
+ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
+
pci_info(bridge, "device recovery successful\n");
return status;
failed:
+ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
+
pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
/* TODO: Should kernel panic here? */
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 1f3803bde7ee..12c89ea0313b 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -96,7 +96,7 @@ struct pcie_port_service_driver {
int pcie_port_service_register(struct pcie_port_service_driver *new);
void pcie_port_service_unregister(struct pcie_port_service_driver *new);
-extern struct bus_type pcie_port_bus_type;
+extern const struct bus_type pcie_port_bus_type;
struct pci_dev;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index b7335be56008..1325fbae2f28 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2209,67 +2209,6 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
}
}
-static void pci_configure_ltr(struct pci_dev *dev)
-{
-#ifdef CONFIG_PCIEASPM
- struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
- struct pci_dev *bridge;
- u32 cap, ctl;
-
- if (!pci_is_pcie(dev))
- return;
-
- /* Read L1 PM substate capabilities */
- dev->l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
-
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
- if (!(cap & PCI_EXP_DEVCAP2_LTR))
- return;
-
- pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl);
- if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
- dev->ltr_path = 1;
- return;
- }
-
- bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->ltr_path)
- dev->ltr_path = 1;
-
- return;
- }
-
- if (!host->native_ltr)
- return;
-
- /*
- * Software must not enable LTR in an Endpoint unless the Root
- * Complex and all intermediate Switches indicate support for LTR.
- * PCIe r4.0, sec 6.18.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
- dev->ltr_path = 1;
- return;
- }
-
- /*
- * If we're configuring a hot-added device, LTR was likely
- * disabled in the upstream bridge, so re-enable it before enabling
- * it in the new device.
- */
- bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->ltr_path) {
- pci_bridge_reconfigure_ltr(dev);
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
- dev->ltr_path = 1;
- }
-#endif
-}
-
static void pci_configure_eetlp_prefix(struct pci_dev *dev)
{
#ifdef CONFIG_PCI_PASID
@@ -2320,6 +2259,7 @@ static void pci_configure_device(struct pci_dev *dev)
pci_configure_extended_tags(dev, NULL);
pci_configure_relaxed_ordering(dev);
pci_configure_ltr(dev);
+ pci_configure_aspm_l1ss(dev);
pci_configure_eetlp_prefix(dev);
pci_configure_serr(dev);
@@ -2357,6 +2297,10 @@ static void pci_release_dev(struct device *dev)
kfree(pci_dev);
}
+static const struct device_type pci_dev_type = {
+ .groups = pci_dev_attr_groups,
+};
+
struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
{
struct pci_dev *dev;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index d797df6e5f3e..bf4833221816 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3766,6 +3766,14 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003e, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
/*
+ * Apparently the LSI / Agere FW643 can't recover after a Secondary Bus
+ * Reset and requires a power-off or suspend/resume and rescan. Prevent
+ * use of that reset.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATT, 0x5900, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATT, 0x5901, quirk_no_bus_reset);
+
+/*
* Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
* automatically disables LTSSM when Secondary Bus Reset is received and
* the device stops working. Prevent bus reset for these devices. With
@@ -5527,6 +5535,7 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_3WARE, 0x1004, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
@@ -6225,6 +6234,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
#endif
/*
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
deleted file mode 100644
index cc7d26b015f3..000000000000
--- a/drivers/pci/setup-irq.c
+++ /dev/null
@@ -1,64 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support routines for initializing a PCI subsystem
- *
- * Extruded from code written by
- * Dave Rusling (david.rusling@reo.mts.dec.com)
- * David Mosberger (davidm@cs.arizona.edu)
- * David Miller (davem@redhat.com)
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/cache.h>
-#include "pci.h"
-
-void pci_assign_irq(struct pci_dev *dev)
-{
- u8 pin;
- u8 slot = -1;
- int irq = 0;
- struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus);
-
- if (!(hbrg->map_irq)) {
- pci_dbg(dev, "runtime IRQ mapping not provided by arch\n");
- return;
- }
-
- /*
- * If this device is not on the primary bus, we need to figure out
- * which interrupt pin it will come in on. We know which slot it
- * will come in on because that slot is where the bridge is. Each
- * time the interrupt line passes through a PCI-PCI bridge we must
- * apply the swizzle function.
- */
- pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
- /* Cope with illegal. */
- if (pin > 4)
- pin = 1;
-
- if (pin) {
- /* Follow the chain of bridges, swizzling as we go. */
- if (hbrg->swizzle_irq)
- slot = (*(hbrg->swizzle_irq))(dev, &pin);
-
- /*
- * If a swizzling function is not used, map_irq() must
- * ignore slot.
- */
- irq = (*(hbrg->map_irq))(dev, slot, pin);
- if (irq == -1)
- irq = 0;
- }
- dev->irq = irq;
-
- pci_dbg(dev, "assign IRQ: got %d\n", dev->irq);
-
- /*
- * Always tell the device, so the driver knows what is the real IRQ
- * to use; the device does not use it.
- */
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
-}
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 1804794d0e68..5a4adf6c04cf 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1672,7 +1672,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
rc = switchtec_init_isr(stdev);
if (rc) {
dev_err(&stdev->dev, "failed to init isr.\n");
- goto err_put;
+ goto err_exit_pci;
}
iowrite32(SWITCHTEC_EVENT_CLEAR |
@@ -1693,6 +1693,8 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
err_devadd:
stdev_kill(stdev);
+err_exit_pci:
+ switchtec_exit_pci(stdev);
err_put:
ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
put_device(&stdev->dev);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index ec6e0d9194a1..004d86230aa6 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -86,6 +86,15 @@ config RISCV_PMU_SBI
full perf feature support i.e. counter overflow, privilege mode
filtering, counter configuration.
+config STARFIVE_STARLINK_PMU
+ depends on ARCH_STARFIVE || (COMPILE_TEST && 64BIT)
+ bool "StarFive StarLink PMU"
+ help
+ Provide support for StarLink Performance Monitor Unit.
+ StarLink Performance Monitor Unit integrates one or more cores with
+ an L3 memory system. The L3 cache events are added into perf event
+ subsystem, allowing monitoring of various L3 cache perf events.
+
config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index a06338e3401c..29b1c28203ef 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
obj-$(CONFIG_RISCV_PMU) += riscv_pmu.o
obj-$(CONFIG_RISCV_PMU_LEGACY) += riscv_pmu_legacy.o
obj-$(CONFIG_RISCV_PMU_SBI) += riscv_pmu_sbi.o
+obj-$(CONFIG_STARFIVE_STARLINK_PMU) += starfive_starlink_pmu.o
obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
index 19d459a36be5..a9277dcf90ce 100644
--- a/drivers/perf/alibaba_uncore_drw_pmu.c
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -729,7 +729,7 @@ static int ali_drw_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int ali_drw_pmu_remove(struct platform_device *pdev)
+static void ali_drw_pmu_remove(struct platform_device *pdev)
{
struct ali_drw_pmu *drw_pmu = platform_get_drvdata(pdev);
@@ -739,8 +739,6 @@ static int ali_drw_pmu_remove(struct platform_device *pdev)
ali_drw_pmu_uninit_irq(drw_pmu);
perf_pmu_unregister(&drw_pmu->pmu);
-
- return 0;
}
static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
@@ -795,7 +793,7 @@ static struct platform_driver ali_drw_pmu_driver = {
.acpi_match_table = ali_drw_acpi_match,
},
.probe = ali_drw_pmu_probe,
- .remove = ali_drw_pmu_remove,
+ .remove_new = ali_drw_pmu_remove,
};
static int __init ali_drw_pmu_init(void)
diff --git a/drivers/perf/amlogic/meson_g12_ddr_pmu.c b/drivers/perf/amlogic/meson_g12_ddr_pmu.c
index 15d52ab3276a..99cc791892bc 100644
--- a/drivers/perf/amlogic/meson_g12_ddr_pmu.c
+++ b/drivers/perf/amlogic/meson_g12_ddr_pmu.c
@@ -355,11 +355,9 @@ static int g12_ddr_pmu_probe(struct platform_device *pdev)
return meson_ddr_pmu_create(pdev);
}
-static int g12_ddr_pmu_remove(struct platform_device *pdev)
+static void g12_ddr_pmu_remove(struct platform_device *pdev)
{
meson_ddr_pmu_remove(pdev);
-
- return 0;
}
static const struct of_device_id meson_ddr_pmu_dt_match[] = {
@@ -381,7 +379,7 @@ MODULE_DEVICE_TABLE(of, meson_ddr_pmu_dt_match);
static struct platform_driver g12_ddr_pmu_driver = {
.probe = g12_ddr_pmu_probe,
- .remove = g12_ddr_pmu_remove,
+ .remove_new = g12_ddr_pmu_remove,
.driver = {
.name = "meson-g12-ddr-pmu",
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 61de861eaf91..6be03f81ae5d 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1697,16 +1697,14 @@ error_pmu_init:
return ret;
}
-static int cci_pmu_remove(struct platform_device *pdev)
+static void cci_pmu_remove(struct platform_device *pdev)
{
if (!g_cci_pmu)
- return 0;
+ return;
cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
perf_pmu_unregister(&g_cci_pmu->pmu);
g_cci_pmu = NULL;
-
- return 0;
}
static struct platform_driver cci_pmu_driver = {
@@ -1716,7 +1714,7 @@ static struct platform_driver cci_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = cci_pmu_probe,
- .remove = cci_pmu_remove,
+ .remove_new = cci_pmu_remove,
};
module_platform_driver(cci_pmu_driver);
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 728d13d8e98a..641471bd5eff 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1515,13 +1515,11 @@ static int arm_ccn_probe(struct platform_device *pdev)
return arm_ccn_pmu_init(ccn);
}
-static int arm_ccn_remove(struct platform_device *pdev)
+static void arm_ccn_remove(struct platform_device *pdev)
{
struct arm_ccn *ccn = platform_get_drvdata(pdev);
arm_ccn_pmu_cleanup(ccn);
-
- return 0;
}
static const struct of_device_id arm_ccn_match[] = {
@@ -1539,7 +1537,7 @@ static struct platform_driver arm_ccn_driver = {
.suppress_bind_attrs = true,
},
.probe = arm_ccn_probe,
- .remove = arm_ccn_remove,
+ .remove_new = arm_ccn_remove,
};
static int __init arm_ccn_init(void)
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 7e3aa7e2345f..7ef9c7e4836b 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -493,6 +493,7 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
for (dn = cmn->dns; dn->type; dn++) {
struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
+ int pad = dn->logid < 10;
if (dn->type == CMN_TYPE_XP)
continue;
@@ -503,7 +504,7 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d)
continue;
- seq_printf(s, " #%-2d |", dn->logid);
+ seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid);
return;
}
seq_puts(s, " |");
@@ -516,7 +517,7 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
seq_puts(s, " X");
for (x = 0; x < cmn->mesh_x; x++)
- seq_printf(s, " %d ", x);
+ seq_printf(s, " %-2d ", x);
seq_puts(s, "\nY P D+");
y = cmn->mesh_y;
while (y--) {
@@ -526,13 +527,13 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
for (x = 0; x < cmn->mesh_x; x++)
seq_puts(s, "--------+");
- seq_printf(s, "\n%d |", y);
+ seq_printf(s, "\n%-2d |", y);
for (x = 0; x < cmn->mesh_x; x++) {
struct arm_cmn_node *xp = cmn->xps + xp_base + x;
for (p = 0; p < CMN_MAX_PORTS; p++)
port[p][x] = arm_cmn_device_connect_info(cmn, xp, p);
- seq_printf(s, " XP #%-2d |", xp_base + x);
+ seq_printf(s, " XP #%-3d|", xp_base + x);
}
seq_puts(s, "\n |");
@@ -2515,7 +2516,7 @@ static int arm_cmn_probe(struct platform_device *pdev)
return err;
}
-static int arm_cmn_remove(struct platform_device *pdev)
+static void arm_cmn_remove(struct platform_device *pdev)
{
struct arm_cmn *cmn = platform_get_drvdata(pdev);
@@ -2524,7 +2525,6 @@ static int arm_cmn_remove(struct platform_device *pdev)
perf_pmu_unregister(&cmn->pmu);
cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
debugfs_remove(cmn->debug);
- return 0;
}
#ifdef CONFIG_OF
@@ -2555,7 +2555,7 @@ static struct platform_driver arm_cmn_driver = {
.acpi_match_table = ACPI_PTR(arm_cmn_acpi_match),
},
.probe = arm_cmn_probe,
- .remove = arm_cmn_remove,
+ .remove_new = arm_cmn_remove,
};
static int __init arm_cmn_init(void)
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index 50b89b989ce7..b9a252272f1e 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -27,6 +27,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
@@ -100,13 +101,6 @@
#define ARM_CSPMU_ACTIVE_CPU_MASK 0x0
#define ARM_CSPMU_ASSOCIATED_CPU_MASK 0x1
-/* Check and use default if implementer doesn't provide attribute callback */
-#define CHECK_DEFAULT_IMPL_OPS(ops, callback) \
- do { \
- if (!ops->callback) \
- ops->callback = arm_cspmu_ ## callback; \
- } while (0)
-
/*
* Maximum poll count for reading counter value using high-low-high sequence.
*/
@@ -121,7 +115,9 @@ static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
{
- return *(struct acpi_apmt_node **)dev_get_platdata(dev);
+ struct acpi_apmt_node **ptr = dev_get_platdata(dev);
+
+ return ptr ? *ptr : NULL;
}
/*
@@ -317,6 +313,10 @@ static const char *arm_cspmu_get_name(const struct arm_cspmu *cspmu)
dev = cspmu->dev;
apmt_node = arm_cspmu_apmt_node(dev);
+ if (!apmt_node)
+ return devm_kasprintf(dev, GFP_KERNEL, PMUNAME "_%u",
+ atomic_fetch_inc(&pmu_idx[0]));
+
pmu_type = apmt_node->type;
if (pmu_type >= ACPI_APMT_NODE_TYPE_COUNT) {
@@ -408,21 +408,32 @@ static struct arm_cspmu_impl_match *arm_cspmu_impl_match_get(u32 pmiidr)
return NULL;
}
+#define DEFAULT_IMPL_OP(name) .name = arm_cspmu_##name
+
static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
{
int ret = 0;
- struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
struct acpi_apmt_node *apmt_node = arm_cspmu_apmt_node(cspmu->dev);
struct arm_cspmu_impl_match *match;
- /*
- * Get PMU implementer and product id from APMT node.
- * If APMT node doesn't have implementer/product id, try get it
- * from PMIIDR.
- */
- cspmu->impl.pmiidr =
- (apmt_node->impl_id) ? apmt_node->impl_id :
- readl(cspmu->base0 + PMIIDR);
+ /* Start with a default PMU implementation */
+ cspmu->impl.module = THIS_MODULE;
+ cspmu->impl.pmiidr = readl(cspmu->base0 + PMIIDR);
+ cspmu->impl.ops = (struct arm_cspmu_impl_ops) {
+ DEFAULT_IMPL_OP(get_event_attrs),
+ DEFAULT_IMPL_OP(get_format_attrs),
+ DEFAULT_IMPL_OP(get_identifier),
+ DEFAULT_IMPL_OP(get_name),
+ DEFAULT_IMPL_OP(is_cycle_counter_event),
+ DEFAULT_IMPL_OP(event_type),
+ DEFAULT_IMPL_OP(event_filter),
+ DEFAULT_IMPL_OP(set_ev_filter),
+ DEFAULT_IMPL_OP(event_attr_is_visible),
+ };
+
+ /* Firmware may override implementer/product ID from PMIIDR */
+ if (apmt_node && apmt_node->impl_id)
+ cspmu->impl.pmiidr = apmt_node->impl_id;
/* Find implementer specific attribute ops. */
match = arm_cspmu_impl_match_get(cspmu->impl.pmiidr);
@@ -450,24 +461,9 @@ static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
}
mutex_unlock(&arm_cspmu_lock);
+ }
- if (ret)
- return ret;
- } else
- cspmu->impl.module = THIS_MODULE;
-
- /* Use default callbacks if implementer doesn't provide one. */
- CHECK_DEFAULT_IMPL_OPS(impl_ops, get_event_attrs);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, get_format_attrs);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, get_identifier);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, get_name);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, is_cycle_counter_event);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, event_type);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, event_filter);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, event_attr_is_visible);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, set_ev_filter);
-
- return 0;
+ return ret;
}
static struct attribute_group *
@@ -512,23 +508,16 @@ arm_cspmu_alloc_format_attr_group(struct arm_cspmu *cspmu)
return format_group;
}
-static struct attribute_group **
-arm_cspmu_alloc_attr_group(struct arm_cspmu *cspmu)
+static int arm_cspmu_alloc_attr_groups(struct arm_cspmu *cspmu)
{
- struct attribute_group **attr_groups = NULL;
- struct device *dev = cspmu->dev;
+ const struct attribute_group **attr_groups = cspmu->attr_groups;
const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
cspmu->identifier = impl_ops->get_identifier(cspmu);
cspmu->name = impl_ops->get_name(cspmu);
if (!cspmu->identifier || !cspmu->name)
- return NULL;
-
- attr_groups = devm_kcalloc(dev, 5, sizeof(struct attribute_group *),
- GFP_KERNEL);
- if (!attr_groups)
- return NULL;
+ return -ENOMEM;
attr_groups[0] = arm_cspmu_alloc_event_attr_group(cspmu);
attr_groups[1] = arm_cspmu_alloc_format_attr_group(cspmu);
@@ -536,18 +525,14 @@ arm_cspmu_alloc_attr_group(struct arm_cspmu *cspmu)
attr_groups[3] = &arm_cspmu_cpumask_attr_group;
if (!attr_groups[0] || !attr_groups[1])
- return NULL;
+ return -ENOMEM;
- return attr_groups;
+ return 0;
}
static inline void arm_cspmu_reset_counters(struct arm_cspmu *cspmu)
{
- u32 pmcr = 0;
-
- pmcr |= PMCR_P;
- pmcr |= PMCR_C;
- writel(pmcr, cspmu->base0 + PMCR);
+ writel(PMCR_C | PMCR_P, cspmu->base0 + PMCR);
}
static inline void arm_cspmu_start_counters(struct arm_cspmu *cspmu)
@@ -962,7 +947,14 @@ static struct arm_cspmu *arm_cspmu_alloc(struct platform_device *pdev)
platform_set_drvdata(pdev, cspmu);
apmt_node = arm_cspmu_apmt_node(dev);
- cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC;
+ if (apmt_node) {
+ cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC;
+ } else {
+ u32 width = 0;
+
+ device_property_read_u32(dev, "reg-io-width", &width);
+ cspmu->has_atomic_dword = (width == 8);
+ }
return cspmu;
}
@@ -1153,11 +1145,6 @@ static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
}
}
- if (cpumask_empty(&cspmu->associated_cpus)) {
- dev_dbg(cspmu->dev, "No cpu associated with the PMU\n");
- return -ENODEV;
- }
-
return 0;
}
#else
@@ -1167,19 +1154,45 @@ static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
}
#endif
+static int arm_cspmu_of_get_cpus(struct arm_cspmu *cspmu)
+{
+ struct of_phandle_iterator it;
+ int ret, cpu;
+
+ of_for_each_phandle(&it, ret, dev_of_node(cspmu->dev), "cpus", NULL, 0) {
+ cpu = of_cpu_node_to_id(it.node);
+ if (cpu < 0)
+ continue;
+ cpumask_set_cpu(cpu, &cspmu->associated_cpus);
+ }
+ return ret == -ENOENT ? 0 : ret;
+}
+
static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu)
{
- return arm_cspmu_acpi_get_cpus(cspmu);
+ int ret = 0;
+
+ if (arm_cspmu_apmt_node(cspmu->dev))
+ ret = arm_cspmu_acpi_get_cpus(cspmu);
+ else if (device_property_present(cspmu->dev, "cpus"))
+ ret = arm_cspmu_of_get_cpus(cspmu);
+ else
+ cpumask_copy(&cspmu->associated_cpus, cpu_possible_mask);
+
+ if (!ret && cpumask_empty(&cspmu->associated_cpus)) {
+ dev_dbg(cspmu->dev, "No cpu associated with the PMU\n");
+ ret = -ENODEV;
+ }
+ return ret;
}
static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
{
int ret, capabilities;
- struct attribute_group **attr_groups;
- attr_groups = arm_cspmu_alloc_attr_group(cspmu);
- if (!attr_groups)
- return -ENOMEM;
+ ret = arm_cspmu_alloc_attr_groups(cspmu);
+ if (ret)
+ return ret;
ret = cpuhp_state_add_instance(arm_cspmu_cpuhp_state,
&cspmu->cpuhp_node);
@@ -1201,12 +1214,11 @@ static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
.start = arm_cspmu_start,
.stop = arm_cspmu_stop,
.read = arm_cspmu_read,
- .attr_groups = (const struct attribute_group **)attr_groups,
+ .attr_groups = cspmu->attr_groups,
.capabilities = capabilities,
};
/* Hardware counter init */
- arm_cspmu_stop_counters(cspmu);
arm_cspmu_reset_counters(cspmu);
ret = perf_pmu_register(&cspmu->pmu, cspmu->name, -1);
@@ -1252,14 +1264,12 @@ static int arm_cspmu_device_probe(struct platform_device *pdev)
return ret;
}
-static int arm_cspmu_device_remove(struct platform_device *pdev)
+static void arm_cspmu_device_remove(struct platform_device *pdev)
{
struct arm_cspmu *cspmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&cspmu->pmu);
cpuhp_state_remove_instance(arm_cspmu_cpuhp_state, &cspmu->cpuhp_node);
-
- return 0;
}
static const struct platform_device_id arm_cspmu_id[] = {
@@ -1268,13 +1278,20 @@ static const struct platform_device_id arm_cspmu_id[] = {
};
MODULE_DEVICE_TABLE(platform, arm_cspmu_id);
+static const struct of_device_id arm_cspmu_of_match[] = {
+ { .compatible = "arm,coresight-pmu" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, arm_cspmu_of_match);
+
static struct platform_driver arm_cspmu_driver = {
.driver = {
- .name = DRVNAME,
- .suppress_bind_attrs = true,
- },
+ .name = DRVNAME,
+ .of_match_table = arm_cspmu_of_match,
+ .suppress_bind_attrs = true,
+ },
.probe = arm_cspmu_device_probe,
- .remove = arm_cspmu_device_remove,
+ .remove_new = arm_cspmu_device_remove,
.id_table = arm_cspmu_id,
};
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h b/drivers/perf/arm_cspmu/arm_cspmu.h
index 2fe723555a6b..c9163acfe810 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.h
+++ b/drivers/perf/arm_cspmu/arm_cspmu.h
@@ -157,6 +157,7 @@ struct arm_cspmu {
int cycle_counter_logical_idx;
struct arm_cspmu_hw_events hw_events;
+ const struct attribute_group *attr_groups[5];
struct arm_cspmu_impl impl;
};
diff --git a/drivers/perf/arm_cspmu/nvidia_cspmu.c b/drivers/perf/arm_cspmu/nvidia_cspmu.c
index 0382b702f092..5b84b701ad62 100644
--- a/drivers/perf/arm_cspmu/nvidia_cspmu.c
+++ b/drivers/perf/arm_cspmu/nvidia_cspmu.c
@@ -388,12 +388,6 @@ static int nv_cspmu_init_ops(struct arm_cspmu *cspmu)
impl_ops->get_format_attrs = nv_cspmu_get_format_attrs;
impl_ops->get_name = nv_cspmu_get_name;
- /* Set others to NULL to use default callback. */
- impl_ops->event_type = NULL;
- impl_ops->event_attr_is_visible = NULL;
- impl_ops->get_identifier = NULL;
- impl_ops->is_cycle_counter_event = NULL;
-
return 0;
}
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index 30cea6859574..8a81be2dd5ec 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -724,7 +724,7 @@ out_teardown_dev:
return ret;
}
-static int dmc620_pmu_device_remove(struct platform_device *pdev)
+static void dmc620_pmu_device_remove(struct platform_device *pdev)
{
struct dmc620_pmu *dmc620_pmu = platform_get_drvdata(pdev);
@@ -732,8 +732,6 @@ static int dmc620_pmu_device_remove(struct platform_device *pdev)
/* perf will synchronise RCU before devres can free dmc620_pmu */
perf_pmu_unregister(&dmc620_pmu->pmu);
-
- return 0;
}
static const struct acpi_device_id dmc620_acpi_match[] = {
@@ -748,7 +746,7 @@ static struct platform_driver dmc620_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = dmc620_pmu_device_probe,
- .remove = dmc620_pmu_device_remove,
+ .remove_new = dmc620_pmu_device_remove,
};
static int __init dmc620_pmu_init(void)
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 7ec4498e312f..bae3ca37f846 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -774,14 +774,12 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
return rc;
}
-static int dsu_pmu_device_remove(struct platform_device *pdev)
+static void dsu_pmu_device_remove(struct platform_device *pdev)
{
struct dsu_pmu *dsu_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&dsu_pmu->pmu);
cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node);
-
- return 0;
}
static const struct of_device_id dsu_pmu_of_match[] = {
@@ -806,7 +804,7 @@ static struct platform_driver dsu_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = dsu_pmu_device_probe,
- .remove = dsu_pmu_device_remove,
+ .remove_new = dsu_pmu_device_remove,
};
static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 6303b82566f9..719aa953a1c4 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -716,7 +716,7 @@ static void smmu_pmu_free_msis(void *data)
{
struct device *dev = data;
- platform_msi_domain_free_irqs(dev);
+ platform_device_msi_free_irqs_all(dev);
}
static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
@@ -746,7 +746,7 @@ static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
return;
- ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
+ ret = platform_device_msi_init_and_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
if (ret) {
dev_warn(dev, "failed to allocate MSIs\n");
return;
@@ -965,14 +965,12 @@ out_unregister:
return err;
}
-static int smmu_pmu_remove(struct platform_device *pdev)
+static void smmu_pmu_remove(struct platform_device *pdev)
{
struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&smmu_pmu->pmu);
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
-
- return 0;
}
static void smmu_pmu_shutdown(struct platform_device *pdev)
@@ -997,7 +995,7 @@ static struct platform_driver smmu_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = smmu_pmu_probe,
- .remove = smmu_pmu_remove,
+ .remove_new = smmu_pmu_remove,
.shutdown = smmu_pmu_shutdown,
};
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index b622d75d8c9e..35f0de03416f 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1263,14 +1263,13 @@ out_free_handle:
return ret;
}
-static int arm_spe_pmu_device_remove(struct platform_device *pdev)
+static void arm_spe_pmu_device_remove(struct platform_device *pdev)
{
struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
arm_spe_pmu_perf_destroy(spe_pmu);
arm_spe_pmu_dev_teardown(spe_pmu);
free_percpu(spe_pmu->handle);
- return 0;
}
static struct platform_driver arm_spe_pmu_driver = {
@@ -1281,7 +1280,7 @@ static struct platform_driver arm_spe_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = arm_spe_pmu_device_probe,
- .remove = arm_spe_pmu_device_remove,
+ .remove_new = arm_spe_pmu_device_remove,
};
static int __init arm_spe_pmu_init(void)
diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
index bc0d414a6aff..308c9969642e 100644
--- a/drivers/perf/cxl_pmu.c
+++ b/drivers/perf/cxl_pmu.c
@@ -59,7 +59,7 @@
#define CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK GENMASK_ULL(63, 59)
#define CXL_PMU_FILTER_CFG_REG(n, f) (0x400 + 4 * ((f) + (n) * 8))
-#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(15, 0)
+#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(31, 0)
#define CXL_PMU_COUNTER_REG(n) (0xc00 + 8 * (n))
@@ -314,9 +314,9 @@ static bool cxl_pmu_config1_get_edge(struct perf_event *event)
}
/*
- * CPMU specification allows for 8 filters, each with a 16 bit value...
- * So we need to find 8x16bits to store it in.
- * As the value used for disable is 0xffff, a separate enable switch
+ * CPMU specification allows for 8 filters, each with a 32 bit value...
+ * So we need to find 8x32bits to store it in.
+ * As the value used for disable is 0xffff_ffff, a separate enable switch
* is needed.
*/
@@ -642,7 +642,7 @@ static void cxl_pmu_event_start(struct perf_event *event, int flags)
if (cxl_pmu_config1_hdm_filter_en(event))
cfg = cxl_pmu_config2_get_hdm_decoder(event);
else
- cfg = GENMASK(15, 0); /* No filtering if 0xFFFF_FFFF */
+ cfg = GENMASK(31, 0); /* No filtering if 0xFFFF_FFFF */
writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0));
}
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 7dbfaee372c7..4e8fa5a48fcf 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -826,7 +826,7 @@ cpuhp_state_err:
return ret;
}
-static int ddr_perf_remove(struct platform_device *pdev)
+static void ddr_perf_remove(struct platform_device *pdev)
{
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
@@ -836,7 +836,6 @@ static int ddr_perf_remove(struct platform_device *pdev)
perf_pmu_unregister(&pmu->pmu);
ida_free(&ddr_ida, pmu->id);
- return 0;
}
static struct platform_driver imx_ddr_pmu_driver = {
@@ -846,7 +845,7 @@ static struct platform_driver imx_ddr_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = ddr_perf_probe,
- .remove = ddr_perf_remove,
+ .remove_new = ddr_perf_remove,
};
module_platform_driver(imx_ddr_pmu_driver);
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 9685645bfe04..72c2d3074cde 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -679,7 +679,7 @@ format_string_err:
return ret;
}
-static int ddr_perf_remove(struct platform_device *pdev)
+static void ddr_perf_remove(struct platform_device *pdev)
{
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
@@ -689,8 +689,6 @@ static int ddr_perf_remove(struct platform_device *pdev)
perf_pmu_unregister(&pmu->pmu);
ida_free(&ddr_ida, pmu->id);
-
- return 0;
}
static struct platform_driver imx_ddr_pmu_driver = {
@@ -700,7 +698,7 @@ static struct platform_driver imx_ddr_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = ddr_perf_probe,
- .remove = ddr_perf_remove,
+ .remove_new = ddr_perf_remove,
};
module_platform_driver(imx_ddr_pmu_driver);
diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
index b90ba8aca3fa..5d1f0e9fdb08 100644
--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
+++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
@@ -216,10 +216,8 @@ static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset,
writeq_relaxed(val, pcie_pmu->base + offset);
}
-static void hisi_pcie_pmu_config_filter(struct perf_event *event)
+static u64 hisi_pcie_pmu_get_event_ctrl_val(struct perf_event *event)
{
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
u64 port, trig_len, thr_len, len_mode;
u64 reg = HISI_PCIE_INIT_SET;
@@ -256,10 +254,19 @@ static void hisi_pcie_pmu_config_filter(struct perf_event *event)
else
reg |= FIELD_PREP(HISI_PCIE_LEN_M, HISI_PCIE_LEN_M_DEFAULT);
+ return reg;
+}
+
+static void hisi_pcie_pmu_config_event_ctrl(struct perf_event *event)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 reg = hisi_pcie_pmu_get_event_ctrl_val(event);
+
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, reg);
}
-static void hisi_pcie_pmu_clear_filter(struct perf_event *event)
+static void hisi_pcie_pmu_clear_event_ctrl(struct perf_event *event)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -299,18 +306,24 @@ static bool hisi_pcie_pmu_valid_filter(struct perf_event *event,
if (hisi_pcie_get_trig_len(event) > HISI_PCIE_TRIG_MAX_VAL)
return false;
- if (requester_id) {
- if (!hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id))
- return false;
- }
+ /* Need to explicitly set filter of "port" or "bdf" */
+ if (!hisi_pcie_get_port(event) &&
+ !hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id))
+ return false;
return true;
}
+/*
+ * Check Whether two events share the same config. The same config means not
+ * only the event code, but also the filter settings of the two events are
+ * the same.
+ */
static bool hisi_pcie_pmu_cmp_event(struct perf_event *target,
struct perf_event *event)
{
- return hisi_pcie_get_real_event(target) == hisi_pcie_get_real_event(event);
+ return hisi_pcie_pmu_get_event_ctrl_val(target) ==
+ hisi_pcie_pmu_get_event_ctrl_val(event);
}
static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
@@ -385,40 +398,32 @@ static u64 hisi_pcie_pmu_read_counter(struct perf_event *event)
return hisi_pcie_pmu_readq(pcie_pmu, event->hw.event_base, idx);
}
-static int hisi_pcie_pmu_find_related_event(struct hisi_pcie_pmu *pcie_pmu,
- struct perf_event *event)
+/*
+ * Check all work events, if a relevant event is found then we return it
+ * first, otherwise return the first idle counter (need to reset).
+ */
+static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu,
+ struct perf_event *event)
{
+ int first_idle = -EAGAIN;
struct perf_event *sibling;
int idx;
for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
sibling = pcie_pmu->hw_events[idx];
- if (!sibling)
- continue;
-
- if (!hisi_pcie_pmu_cmp_event(sibling, event))
+ if (!sibling) {
+ if (first_idle == -EAGAIN)
+ first_idle = idx;
continue;
+ }
/* Related events must be used in group */
- if (sibling->group_leader == event->group_leader)
+ if (hisi_pcie_pmu_cmp_event(sibling, event) &&
+ sibling->group_leader == event->group_leader)
return idx;
- else
- return -EINVAL;
}
- return idx;
-}
-
-static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu)
-{
- int idx;
-
- for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
- if (!pcie_pmu->hw_events[idx])
- return idx;
- }
-
- return -EINVAL;
+ return first_idle;
}
static void hisi_pcie_pmu_event_update(struct perf_event *event)
@@ -505,7 +510,7 @@ static void hisi_pcie_pmu_start(struct perf_event *event, int flags)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
- hisi_pcie_pmu_config_filter(event);
+ hisi_pcie_pmu_config_event_ctrl(event);
hisi_pcie_pmu_enable_counter(pcie_pmu, hwc);
hisi_pcie_pmu_enable_int(pcie_pmu, hwc);
hisi_pcie_pmu_set_period(event);
@@ -526,7 +531,7 @@ static void hisi_pcie_pmu_stop(struct perf_event *event, int flags)
hisi_pcie_pmu_event_update(event);
hisi_pcie_pmu_disable_int(pcie_pmu, hwc);
hisi_pcie_pmu_disable_counter(pcie_pmu, hwc);
- hisi_pcie_pmu_clear_filter(event);
+ hisi_pcie_pmu_clear_event_ctrl(event);
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
@@ -544,27 +549,18 @@ static int hisi_pcie_pmu_add(struct perf_event *event, int flags)
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
- /* Check all working events to find a related event. */
- idx = hisi_pcie_pmu_find_related_event(pcie_pmu, event);
- if (idx < 0)
- return idx;
-
- /* Current event shares an enabled counter with the related event */
- if (idx < HISI_PCIE_MAX_COUNTERS) {
- hwc->idx = idx;
- goto start_count;
- }
-
- idx = hisi_pcie_pmu_get_event_idx(pcie_pmu);
+ idx = hisi_pcie_pmu_get_event_idx(pcie_pmu, event);
if (idx < 0)
return idx;
hwc->idx = idx;
- pcie_pmu->hw_events[idx] = event;
- /* Reset Counter to avoid previous statistic interference. */
- hisi_pcie_pmu_reset_counter(pcie_pmu, idx);
-start_count:
+ /* No enabled counter found with related event, reset it */
+ if (!pcie_pmu->hw_events[idx]) {
+ hisi_pcie_pmu_reset_counter(pcie_pmu, idx);
+ pcie_pmu->hw_events[idx] = event;
+ }
+
if (flags & PERF_EF_START)
hisi_pcie_pmu_start(event, PERF_EF_RELOAD);
@@ -714,10 +710,18 @@ static struct attribute *hisi_pcie_pmu_events_attr[] = {
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_flux, 0x0104),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_time, 0x10104),
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x0804),
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x10804),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_flux, 0x2004),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_time, 0x12004),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_flux, 0x0105),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_time, 0x10105),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x0405),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x10405),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_flux, 0x1005),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_time, 0x11005),
NULL
};
diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
index 40f1bc9f9b91..0e923f94fa5b 100644
--- a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
@@ -341,7 +341,7 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_cpa_pmu_remove(struct platform_device *pdev)
+static void hisi_cpa_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *cpa_pmu = platform_get_drvdata(pdev);
@@ -349,7 +349,6 @@ static int hisi_cpa_pmu_remove(struct platform_device *pdev)
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
&cpa_pmu->node);
hisi_cpa_pmu_enable_pm(cpa_pmu);
- return 0;
}
static struct platform_driver hisi_cpa_pmu_driver = {
@@ -359,7 +358,7 @@ static struct platform_driver hisi_cpa_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_cpa_pmu_probe,
- .remove = hisi_cpa_pmu_remove,
+ .remove_new = hisi_cpa_pmu_remove,
};
static int __init hisi_cpa_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index ffb039d05d07..b804e3738113 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -531,14 +531,13 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
+static void hisi_ddrc_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&ddrc_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
&ddrc_pmu->node);
- return 0;
}
static struct platform_driver hisi_ddrc_pmu_driver = {
@@ -548,7 +547,7 @@ static struct platform_driver hisi_ddrc_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_ddrc_pmu_probe,
- .remove = hisi_ddrc_pmu_remove,
+ .remove_new = hisi_ddrc_pmu_remove,
};
static int __init hisi_ddrc_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 15caf99e1eef..21e69b1cdd4d 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -534,14 +534,13 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_hha_pmu_remove(struct platform_device *pdev)
+static void hisi_hha_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&hha_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
&hha_pmu->node);
- return 0;
}
static struct platform_driver hisi_hha_pmu_driver = {
@@ -551,7 +550,7 @@ static struct platform_driver hisi_hha_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_hha_pmu_probe,
- .remove = hisi_hha_pmu_remove,
+ .remove_new = hisi_hha_pmu_remove,
};
static int __init hisi_hha_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 794dbcd19b7a..51ba76871097 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -568,14 +568,13 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_l3c_pmu_remove(struct platform_device *pdev)
+static void hisi_l3c_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&l3c_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
&l3c_pmu->node);
- return 0;
}
static struct platform_driver hisi_l3c_pmu_driver = {
@@ -585,7 +584,7 @@ static struct platform_driver hisi_l3c_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_l3c_pmu_probe,
- .remove = hisi_l3c_pmu_remove,
+ .remove_new = hisi_l3c_pmu_remove,
};
static int __init hisi_l3c_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index 797cf201996a..3cdb35c741f9 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -514,14 +514,13 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_pa_pmu_remove(struct platform_device *pdev)
+static void hisi_pa_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *pa_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&pa_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
&pa_pmu->node);
- return 0;
}
static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = {
@@ -539,7 +538,7 @@ static struct platform_driver hisi_pa_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_pa_pmu_probe,
- .remove = hisi_pa_pmu_remove,
+ .remove_new = hisi_pa_pmu_remove,
};
static int __init hisi_pa_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
index e706ca567676..765bbd61db26 100644
--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
@@ -460,14 +460,13 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_sllc_pmu_remove(struct platform_device *pdev)
+static void hisi_sllc_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *sllc_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&sllc_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
&sllc_pmu->node);
- return 0;
}
static struct platform_driver hisi_sllc_pmu_driver = {
@@ -477,7 +476,7 @@ static struct platform_driver hisi_sllc_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = hisi_sllc_pmu_probe,
- .remove = hisi_sllc_pmu_remove,
+ .remove_new = hisi_sllc_pmu_remove,
};
static int __init hisi_sllc_pmu_module_init(void)
diff --git a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
index 636fb79647c8..481dcc9e8fbf 100644
--- a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
@@ -287,12 +287,52 @@ static u64 hisi_uc_pmu_read_counter(struct hisi_pmu *uc_pmu,
return readq(uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
}
-static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
+static bool hisi_uc_pmu_get_glb_en_state(struct hisi_pmu *uc_pmu)
+{
+ u32 val;
+
+ val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+ return !!FIELD_GET(HISI_UC_EVENT_GLB_EN, val);
+}
+
+static void hisi_uc_pmu_write_counter_normal(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc, u64 val)
{
writeq(val, uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
}
+static void hisi_uc_pmu_write_counter_quirk_v2(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ hisi_uc_pmu_start_counters(uc_pmu);
+ hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
+ hisi_uc_pmu_stop_counters(uc_pmu);
+}
+
+static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ bool enable = hisi_uc_pmu_get_glb_en_state(uc_pmu);
+ bool erratum = uc_pmu->identifier == HISI_PMU_V2;
+
+ /*
+ * HiSilicon UC PMU v2 suffers the erratum 162700402 that the
+ * PMU counter cannot be set due to the lack of clock under power
+ * saving mode. This will lead to error or inaccurate counts.
+ * The clock can be enabled by the PMU global enabling control.
+ * The irq handler and pmu_start() will call the function to set
+ * period. If the function under irq context, the PMU has been
+ * enabled therefore we set counter directly. Other situations
+ * the PMU is disabled, we need to enable it to turn on the
+ * counter clock to set period, and then restore PMU enable
+ * status, the counter can hold its value without a clock.
+ */
+ if (enable || !erratum)
+ hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
+ else
+ hisi_uc_pmu_write_counter_quirk_v2(uc_pmu, hwc, val);
+}
+
static void hisi_uc_pmu_enable_counter_int(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc)
{
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 524ba82bfce2..e2abca188dbe 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -697,7 +697,7 @@ error:
return ret;
}
-static int cn10k_ddr_perf_remove(struct platform_device *pdev)
+static void cn10k_ddr_perf_remove(struct platform_device *pdev)
{
struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev);
@@ -706,7 +706,6 @@ static int cn10k_ddr_perf_remove(struct platform_device *pdev)
&ddr_pmu->node);
perf_pmu_unregister(&ddr_pmu->pmu);
- return 0;
}
#ifdef CONFIG_OF
@@ -733,7 +732,7 @@ static struct platform_driver cn10k_ddr_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = cn10k_ddr_perf_probe,
- .remove = cn10k_ddr_perf_remove,
+ .remove_new = cn10k_ddr_perf_remove,
};
static int __init cn10k_ddr_pmu_init(void)
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index fec8e82edb95..9e635f355470 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -351,15 +351,13 @@ static int tad_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int tad_pmu_remove(struct platform_device *pdev)
+static void tad_pmu_remove(struct platform_device *pdev)
{
struct tad_pmu *pmu = platform_get_drvdata(pdev);
cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state,
&pmu->node);
perf_pmu_unregister(&pmu->pmu);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -385,7 +383,7 @@ static struct platform_driver tad_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = tad_pmu_probe,
- .remove = tad_pmu_remove,
+ .remove_new = tad_pmu_remove,
};
static int tad_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 3f9a98c17a89..148df5ae8ef8 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -965,7 +965,7 @@ out_unregister:
return err;
}
-static int l2_cache_pmu_remove(struct platform_device *pdev)
+static void l2_cache_pmu_remove(struct platform_device *pdev)
{
struct l2cache_pmu *l2cache_pmu =
to_l2cache_pmu(platform_get_drvdata(pdev));
@@ -973,7 +973,6 @@ static int l2_cache_pmu_remove(struct platform_device *pdev)
perf_pmu_unregister(&l2cache_pmu->pmu);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
&l2cache_pmu->node);
- return 0;
}
static struct platform_driver l2_cache_pmu_driver = {
@@ -983,7 +982,7 @@ static struct platform_driver l2_cache_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = l2_cache_pmu_probe,
- .remove = l2_cache_pmu_remove,
+ .remove_new = l2_cache_pmu_remove,
};
static int __init register_l2_cache_pmu_driver(void)
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index 0dda70e1ef90..c78a6fd6c57f 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -150,19 +150,11 @@ u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- if (!rvpmu->ctr_get_width)
- /**
- * If the pmu driver doesn't support counter width, set it to default
- * maximum allowed by the specification.
- */
- cwidth = 63;
- else {
- if (hwc->idx == -1)
- /* Handle init case where idx is not initialized yet */
- cwidth = rvpmu->ctr_get_width(0);
- else
- cwidth = rvpmu->ctr_get_width(hwc->idx);
- }
+ if (hwc->idx == -1)
+ /* Handle init case where idx is not initialized yet */
+ cwidth = rvpmu->ctr_get_width(0);
+ else
+ cwidth = rvpmu->ctr_get_width(hwc->idx);
return GENMASK_ULL(cwidth, 0);
}
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index 79fdd667922e..fa0bccf4edf2 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -37,6 +37,12 @@ static int pmu_legacy_event_map(struct perf_event *event, u64 *config)
return pmu_legacy_ctr_get_idx(event);
}
+/* cycle & instret are always 64 bit, one bit less according to SBI spec */
+static int pmu_legacy_ctr_get_width(int idx)
+{
+ return 63;
+}
+
static u64 pmu_legacy_read_ctr(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -111,12 +117,14 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
pmu->ctr_stop = NULL;
pmu->event_map = pmu_legacy_event_map;
pmu->ctr_get_idx = pmu_legacy_ctr_get_idx;
- pmu->ctr_get_width = NULL;
+ pmu->ctr_get_width = pmu_legacy_ctr_get_width;
pmu->ctr_clear_idx = NULL;
pmu->ctr_read = pmu_legacy_read_ctr;
pmu->event_mapped = pmu_legacy_event_mapped;
pmu->event_unmapped = pmu_legacy_event_unmapped;
pmu->csr_index = pmu_legacy_csr_index;
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
}
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 16acd4dcdb96..452aab49db1e 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -512,7 +512,7 @@ static void pmu_sbi_set_scounteren(void *arg)
if (event->hw.idx != -1)
csr_write(CSR_SCOUNTEREN,
- csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
+ csr_read(CSR_SCOUNTEREN) | BIT(pmu_sbi_csr_index(event)));
}
static void pmu_sbi_reset_scounteren(void *arg)
@@ -521,7 +521,7 @@ static void pmu_sbi_reset_scounteren(void *arg)
if (event->hw.idx != -1)
csr_write(CSR_SCOUNTEREN,
- csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
+ csr_read(CSR_SCOUNTEREN) & ~BIT(pmu_sbi_csr_index(event)));
}
static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
@@ -731,14 +731,14 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
/* compute hardware counter index */
hidx = info->csr - CSR_CYCLE;
/* check if the corresponding bit is set in sscountovf */
- if (!(overflow & (1 << hidx)))
+ if (!(overflow & BIT(hidx)))
continue;
/*
* Keep a track of overflowed counters so that they can be started
* with updated initial value.
*/
- overflowed_ctrs |= 1 << lidx;
+ overflowed_ctrs |= BIT(lidx);
hw_evt = &event->hw;
riscv_pmu_event_update(event);
perf_sample_data_init(&data, 0, hw_evt->last_period);
diff --git a/drivers/perf/starfive_starlink_pmu.c b/drivers/perf/starfive_starlink_pmu.c
new file mode 100644
index 000000000000..5e5a672b4229
--- /dev/null
+++ b/drivers/perf/starfive_starlink_pmu.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * StarFive's StarLink PMU driver
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ *
+ * Author: Ji Sheng Teoh <jisheng.teoh@starfivetech.com>
+ *
+ */
+
+#define STARLINK_PMU_PDEV_NAME "starfive_starlink_pmu"
+#define pr_fmt(fmt) STARLINK_PMU_PDEV_NAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/cpu_pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+
+#define STARLINK_PMU_MAX_COUNTERS 64
+#define STARLINK_PMU_NUM_COUNTERS 16
+#define STARLINK_PMU_IDX_CYCLE_COUNTER 63
+
+#define STARLINK_PMU_EVENT_SELECT 0x060
+#define STARLINK_PMU_EVENT_COUNTER 0x160
+#define STARLINK_PMU_COUNTER_MASK GENMASK_ULL(63, 0)
+#define STARLINK_PMU_CYCLE_COUNTER 0x058
+
+#define STARLINK_PMU_CONTROL 0x040
+#define STARLINK_PMU_GLOBAL_ENABLE BIT_ULL(0)
+
+#define STARLINK_PMU_INTERRUPT_ENABLE 0x050
+#define STARLINK_PMU_COUNTER_OVERFLOW_STATUS 0x048
+#define STARLINK_PMU_CYCLE_OVERFLOW_MASK BIT_ULL(63)
+
+#define STARLINK_CYCLES 0x058
+#define CACHE_READ_REQUEST 0x04000701
+#define CACHE_WRITE_REQUEST 0x03000001
+#define CACHE_RELEASE_REQUEST 0x0003e001
+#define CACHE_READ_HIT 0x00901202
+#define CACHE_READ_MISS 0x04008002
+#define CACHE_WRITE_HIT 0x006c0002
+#define CACHE_WRITE_MISS 0x03000002
+#define CACHE_WRITEBACK 0x00000403
+
+#define to_starlink_pmu(p) (container_of(p, struct starlink_pmu, pmu))
+
+#define STARLINK_FORMAT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, starlink_pmu_sysfs_format_show, NULL), \
+ .var = (void *)_config, } \
+ })[0].attr.attr)
+
+#define STARLINK_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, starlink_pmu_sysfs_event_show, _id)
+
+static int starlink_pmu_cpuhp_state;
+
+struct starlink_hw_events {
+ struct perf_event *events[STARLINK_PMU_MAX_COUNTERS];
+ DECLARE_BITMAP(used_mask, STARLINK_PMU_MAX_COUNTERS);
+};
+
+struct starlink_pmu {
+ struct pmu pmu;
+ struct starlink_hw_events __percpu *hw_events;
+ struct hlist_node node;
+ struct notifier_block starlink_pmu_pm_nb;
+ void __iomem *pmu_base;
+ cpumask_t cpumask;
+ int irq;
+};
+
+static ssize_t
+starlink_pmu_sysfs_format_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
+}
+
+static struct attribute *starlink_pmu_format_attrs[] = {
+ STARLINK_FORMAT_ATTR(event, "config:0-31"),
+ NULL
+};
+
+static const struct attribute_group starlink_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = starlink_pmu_format_attrs,
+};
+
+static ssize_t
+starlink_pmu_sysfs_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct perf_pmu_events_attr *eattr = container_of(attr,
+ struct perf_pmu_events_attr, attr);
+
+ return sysfs_emit(buf, "event=0x%02llx\n", eattr->id);
+}
+
+static struct attribute *starlink_pmu_event_attrs[] = {
+ STARLINK_EVENT_ATTR(cycles, STARLINK_CYCLES),
+ STARLINK_EVENT_ATTR(read_request, CACHE_READ_REQUEST),
+ STARLINK_EVENT_ATTR(write_request, CACHE_WRITE_REQUEST),
+ STARLINK_EVENT_ATTR(release_request, CACHE_RELEASE_REQUEST),
+ STARLINK_EVENT_ATTR(read_hit, CACHE_READ_HIT),
+ STARLINK_EVENT_ATTR(read_miss, CACHE_READ_MISS),
+ STARLINK_EVENT_ATTR(write_hit, CACHE_WRITE_HIT),
+ STARLINK_EVENT_ATTR(write_miss, CACHE_WRITE_MISS),
+ STARLINK_EVENT_ATTR(writeback, CACHE_WRITEBACK),
+ NULL
+};
+
+static const struct attribute_group starlink_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = starlink_pmu_event_attrs,
+};
+
+static ssize_t
+cpumask_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, &starlink_pmu->cpumask);
+}
+
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *starlink_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group starlink_pmu_cpumask_attr_group = {
+ .attrs = starlink_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *starlink_pmu_attr_groups[] = {
+ &starlink_pmu_format_attr_group,
+ &starlink_pmu_events_attr_group,
+ &starlink_pmu_cpumask_attr_group,
+ NULL
+};
+
+static void starlink_pmu_set_event_period(struct perf_event *event)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = event->hw.idx;
+
+ /*
+ * Program counter to half of it's max count to handle
+ * cases of extreme interrupt latency.
+ */
+ u64 val = STARLINK_PMU_COUNTER_MASK >> 1;
+
+ local64_set(&hwc->prev_count, val);
+ if (hwc->config == STARLINK_CYCLES)
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_CYCLE_COUNTER);
+ else
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_EVENT_COUNTER +
+ idx * sizeof(u64));
+}
+
+static void starlink_pmu_counter_start(struct perf_event *event,
+ struct starlink_pmu *starlink_pmu)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = event->hw.idx;
+ u64 val;
+
+ /*
+ * Enable counter overflow interrupt[63:0],
+ * which is mapped as follow:
+ *
+ * event counter 0 - Bit [0]
+ * event counter 1 - Bit [1]
+ * ...
+ * cycle counter - Bit [63]
+ */
+ val = readq(starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
+
+ if (hwc->config == STARLINK_CYCLES) {
+ /*
+ * Cycle count has its dedicated register, and it starts
+ * counting as soon as STARLINK_PMU_GLOBAL_ENABLE is set.
+ */
+ val |= STARLINK_PMU_CYCLE_OVERFLOW_MASK;
+ } else {
+ writeq(event->hw.config, starlink_pmu->pmu_base +
+ STARLINK_PMU_EVENT_SELECT + idx * sizeof(u64));
+
+ val |= BIT_ULL(idx);
+ }
+
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
+
+ writeq(STARLINK_PMU_GLOBAL_ENABLE, starlink_pmu->pmu_base +
+ STARLINK_PMU_CONTROL);
+}
+
+static void starlink_pmu_counter_stop(struct perf_event *event,
+ struct starlink_pmu *starlink_pmu)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = event->hw.idx;
+ u64 val;
+
+ val = readq(starlink_pmu->pmu_base + STARLINK_PMU_CONTROL);
+ val &= ~STARLINK_PMU_GLOBAL_ENABLE;
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_CONTROL);
+
+ val = readq(starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
+ if (hwc->config == STARLINK_CYCLES)
+ val &= ~STARLINK_PMU_CYCLE_OVERFLOW_MASK;
+ else
+ val &= ~BIT_ULL(idx);
+
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
+}
+
+static void starlink_pmu_update(struct perf_event *event)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ u64 prev_raw_count, new_raw_count;
+ u64 oldval;
+ u64 delta;
+
+ do {
+ prev_raw_count = local64_read(&hwc->prev_count);
+ if (hwc->config == STARLINK_CYCLES)
+ new_raw_count = readq(starlink_pmu->pmu_base +
+ STARLINK_PMU_CYCLE_COUNTER);
+ else
+ new_raw_count = readq(starlink_pmu->pmu_base +
+ STARLINK_PMU_EVENT_COUNTER +
+ idx * sizeof(u64));
+ oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count);
+ } while (oldval != prev_raw_count);
+
+ delta = (new_raw_count - prev_raw_count) & STARLINK_PMU_COUNTER_MASK;
+ local64_add(delta, &event->count);
+}
+
+static void starlink_pmu_start(struct perf_event *event, int flags)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ return;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ starlink_pmu_set_event_period(event);
+ starlink_pmu_counter_start(event, starlink_pmu);
+
+ perf_event_update_userpage(event);
+}
+
+static void starlink_pmu_stop(struct perf_event *event, int flags)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ starlink_pmu_counter_stop(event, starlink_pmu);
+ starlink_pmu_update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int starlink_pmu_add(struct perf_event *event, int flags)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct starlink_hw_events *hw_events =
+ this_cpu_ptr(starlink_pmu->hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long *used_mask = hw_events->used_mask;
+ u32 n_events = STARLINK_PMU_NUM_COUNTERS;
+ int idx;
+
+ /*
+ * Cycle counter has dedicated register to hold counter value.
+ * Event other than cycle count has to be enabled through
+ * event select register, and assigned with independent counter
+ * as they appear.
+ */
+
+ if (hwc->config == STARLINK_CYCLES) {
+ idx = STARLINK_PMU_IDX_CYCLE_COUNTER;
+ } else {
+ idx = find_first_zero_bit(used_mask, n_events);
+ /* All counter are in use */
+ if (idx < 0)
+ return idx;
+
+ set_bit(idx, used_mask);
+ }
+
+ hwc->idx = idx;
+ hw_events->events[idx] = event;
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ starlink_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void starlink_pmu_del(struct perf_event *event, int flags)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct starlink_hw_events *hw_events =
+ this_cpu_ptr(starlink_pmu->hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ starlink_pmu_stop(event, PERF_EF_UPDATE);
+ hw_events->events[hwc->idx] = NULL;
+ clear_bit(hwc->idx, hw_events->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static bool starlink_pmu_validate_event_group(struct perf_event *event)
+{
+ struct perf_event *leader = event->group_leader;
+ struct perf_event *sibling;
+ int counter = 1;
+
+ /*
+ * Ensure hardware events in the group are on the same PMU,
+ * software events are acceptable.
+ */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return false;
+
+ for_each_sibling_event(sibling, leader) {
+ if (sibling->pmu != event->pmu && !is_software_event(sibling))
+ return false;
+
+ counter++;
+ }
+
+ return counter <= STARLINK_PMU_NUM_COUNTERS;
+}
+
+static int starlink_pmu_event_init(struct perf_event *event)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * Sampling is not supported, as counters are shared
+ * by all CPU.
+ */
+ if (hwc->sample_period)
+ return -EOPNOTSUPP;
+
+ /*
+ * Per-task and attach to a task are not supported,
+ * as uncore events are not specific to any CPU.
+ */
+ if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+
+ if (!starlink_pmu_validate_event_group(event))
+ return -EINVAL;
+
+ hwc->idx = -1;
+ hwc->config = event->attr.config;
+ event->cpu = cpumask_first(&starlink_pmu->cpumask);
+
+ return 0;
+}
+
+static irqreturn_t starlink_pmu_handle_irq(int irq_num, void *data)
+{
+ struct starlink_pmu *starlink_pmu = data;
+ struct starlink_hw_events *hw_events =
+ this_cpu_ptr(starlink_pmu->hw_events);
+ bool handled = false;
+ int idx;
+ u64 overflow_status;
+
+ for (idx = 0; idx < STARLINK_PMU_MAX_COUNTERS; idx++) {
+ struct perf_event *event = hw_events->events[idx];
+
+ if (!event)
+ continue;
+
+ overflow_status = readq(starlink_pmu->pmu_base +
+ STARLINK_PMU_COUNTER_OVERFLOW_STATUS);
+ if (!(overflow_status & BIT_ULL(idx)))
+ continue;
+
+ writeq(BIT_ULL(idx), starlink_pmu->pmu_base +
+ STARLINK_PMU_COUNTER_OVERFLOW_STATUS);
+
+ starlink_pmu_update(event);
+ starlink_pmu_set_event_period(event);
+ handled = true;
+ }
+ return IRQ_RETVAL(handled);
+}
+
+static int starlink_setup_irqs(struct starlink_pmu *starlink_pmu,
+ struct platform_device *pdev)
+{
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ ret = devm_request_irq(&pdev->dev, irq, starlink_pmu_handle_irq,
+ 0, STARLINK_PMU_PDEV_NAME, starlink_pmu);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to request IRQ\n");
+
+ starlink_pmu->irq = irq;
+
+ return 0;
+}
+
+static int starlink_pmu_pm_notify(struct notifier_block *b,
+ unsigned long cmd, void *v)
+{
+ struct starlink_pmu *starlink_pmu = container_of(b, struct starlink_pmu,
+ starlink_pmu_pm_nb);
+ struct starlink_hw_events *hw_events =
+ this_cpu_ptr(starlink_pmu->hw_events);
+ int enabled = bitmap_weight(hw_events->used_mask,
+ STARLINK_PMU_MAX_COUNTERS);
+ struct perf_event *event;
+ int idx;
+
+ if (!enabled)
+ return NOTIFY_OK;
+
+ for (idx = 0; idx < STARLINK_PMU_MAX_COUNTERS; idx++) {
+ event = hw_events->events[idx];
+ if (!event)
+ continue;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ /* Stop and update the counter */
+ starlink_pmu_stop(event, PERF_EF_UPDATE);
+ break;
+ case CPU_PM_EXIT:
+ case CPU_PM_ENTER_FAILED:
+ /* Restore and enable the counter */
+ starlink_pmu_start(event, PERF_EF_RELOAD);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static int starlink_pmu_pm_register(struct starlink_pmu *starlink_pmu)
+{
+ if (!IS_ENABLED(CONFIG_CPU_PM))
+ return 0;
+
+ starlink_pmu->starlink_pmu_pm_nb.notifier_call = starlink_pmu_pm_notify;
+ return cpu_pm_register_notifier(&starlink_pmu->starlink_pmu_pm_nb);
+}
+
+static void starlink_pmu_pm_unregister(struct starlink_pmu *starlink_pmu)
+{
+ if (!IS_ENABLED(CONFIG_CPU_PM))
+ return;
+
+ cpu_pm_unregister_notifier(&starlink_pmu->starlink_pmu_pm_nb);
+}
+
+static void starlink_pmu_destroy(struct starlink_pmu *starlink_pmu)
+{
+ starlink_pmu_pm_unregister(starlink_pmu);
+ cpuhp_state_remove_instance(starlink_pmu_cpuhp_state,
+ &starlink_pmu->node);
+}
+
+static int starlink_pmu_probe(struct platform_device *pdev)
+{
+ struct starlink_pmu *starlink_pmu;
+ struct starlink_hw_events *hw_events;
+ struct resource *res;
+ int cpuid, i, ret;
+
+ starlink_pmu = devm_kzalloc(&pdev->dev, sizeof(*starlink_pmu), GFP_KERNEL);
+ if (!starlink_pmu)
+ return -ENOMEM;
+
+ starlink_pmu->pmu_base =
+ devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(starlink_pmu->pmu_base))
+ return PTR_ERR(starlink_pmu->pmu_base);
+
+ starlink_pmu->hw_events = alloc_percpu_gfp(struct starlink_hw_events,
+ GFP_KERNEL);
+ if (!starlink_pmu->hw_events) {
+ dev_err(&pdev->dev, "Failed to allocate per-cpu PMU data\n");
+ return -ENOMEM;
+ }
+
+ for_each_possible_cpu(cpuid) {
+ hw_events = per_cpu_ptr(starlink_pmu->hw_events, cpuid);
+ for (i = 0; i < STARLINK_PMU_MAX_COUNTERS; i++)
+ hw_events->events[i] = NULL;
+ }
+
+ ret = starlink_setup_irqs(starlink_pmu, pdev);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(starlink_pmu_cpuhp_state,
+ &starlink_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register hotplug\n");
+ return ret;
+ }
+
+ ret = starlink_pmu_pm_register(starlink_pmu);
+ if (ret) {
+ cpuhp_state_remove_instance(starlink_pmu_cpuhp_state,
+ &starlink_pmu->node);
+ return ret;
+ }
+
+ starlink_pmu->pmu = (struct pmu) {
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = starlink_pmu_event_init,
+ .add = starlink_pmu_add,
+ .del = starlink_pmu_del,
+ .start = starlink_pmu_start,
+ .stop = starlink_pmu_stop,
+ .read = starlink_pmu_update,
+ .attr_groups = starlink_pmu_attr_groups,
+ };
+
+ ret = perf_pmu_register(&starlink_pmu->pmu, STARLINK_PMU_PDEV_NAME, -1);
+ if (ret)
+ starlink_pmu_destroy(starlink_pmu);
+
+ return ret;
+}
+
+static const struct of_device_id starlink_pmu_of_match[] = {
+ { .compatible = "starfive,jh8100-starlink-pmu" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, starlink_pmu_of_match);
+
+static struct platform_driver starlink_pmu_driver = {
+ .driver = {
+ .name = STARLINK_PMU_PDEV_NAME,
+ .of_match_table = starlink_pmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = starlink_pmu_probe,
+};
+
+static int
+starlink_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct starlink_pmu *starlink_pmu = hlist_entry_safe(node,
+ struct starlink_pmu,
+ node);
+
+ if (cpumask_empty(&starlink_pmu->cpumask))
+ cpumask_set_cpu(cpu, &starlink_pmu->cpumask);
+
+ WARN_ON(irq_set_affinity(starlink_pmu->irq, cpumask_of(cpu)));
+
+ return 0;
+}
+
+static int
+starlink_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct starlink_pmu *starlink_pmu = hlist_entry_safe(node,
+ struct starlink_pmu,
+ node);
+ unsigned int target;
+
+ if (!cpumask_test_and_clear_cpu(cpu, &starlink_pmu->cpumask))
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&starlink_pmu->pmu, cpu, target);
+
+ cpumask_set_cpu(target, &starlink_pmu->cpumask);
+ WARN_ON(irq_set_affinity(starlink_pmu->irq, cpumask_of(target)));
+
+ return 0;
+}
+
+static int __init starlink_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "soc/starfive/starlink_pmu:online",
+ starlink_pmu_online_cpu,
+ starlink_pmu_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ starlink_pmu_cpuhp_state = ret;
+
+ return platform_driver_register(&starlink_pmu_driver);
+}
+
+device_initcall(starlink_pmu_init);
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index 1edb9c03704f..e16d10c763de 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -993,7 +993,7 @@ static int tx2_uncore_probe(struct platform_device *pdev)
return 0;
}
-static int tx2_uncore_remove(struct platform_device *pdev)
+static void tx2_uncore_remove(struct platform_device *pdev)
{
struct tx2_uncore_pmu *tx2_pmu, *temp;
struct device *dev = &pdev->dev;
@@ -1009,7 +1009,6 @@ static int tx2_uncore_remove(struct platform_device *pdev)
}
}
}
- return 0;
}
static struct platform_driver tx2_uncore_driver = {
@@ -1019,7 +1018,7 @@ static struct platform_driver tx2_uncore_driver = {
.suppress_bind_attrs = true,
},
.probe = tx2_uncore_probe,
- .remove = tx2_uncore_remove,
+ .remove_new = tx2_uncore_remove,
};
static int __init tx2_uncore_driver_init(void)
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 7ce344248dda..0d49343d704b 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1937,7 +1937,7 @@ xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
}
}
-static int xgene_pmu_remove(struct platform_device *pdev)
+static void xgene_pmu_remove(struct platform_device *pdev)
{
struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev);
@@ -1947,13 +1947,11 @@ static int xgene_pmu_remove(struct platform_device *pdev)
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
&xgene_pmu->node);
-
- return 0;
}
static struct platform_driver xgene_pmu_driver = {
.probe = xgene_pmu_probe,
- .remove = xgene_pmu_remove,
+ .remove_new = xgene_pmu_remove,
.driver = {
.name = "xgene-pmu",
.of_match_table = xgene_pmu_of_match,
diff --git a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
index e625b32889bf..0928a526e2ab 100644
--- a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
+++ b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
@@ -706,7 +706,7 @@ static int mixel_dphy_probe(struct platform_device *pdev)
return ret;
}
- priv->id = of_alias_get_id(np, "mipi_dphy");
+ priv->id = of_alias_get_id(np, "mipi-dphy");
if (priv->id < 0) {
dev_err(dev, "Failed to get phy node alias id: %d\n",
priv->id);
diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
index a623f092b11f..a43e20abb10d 100644
--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
@@ -37,56 +37,28 @@
#define EUSB2_TUNE_EUSB_EQU 0x5A
#define EUSB2_TUNE_EUSB_HS_COMP_CUR 0x5B
-#define QCOM_EUSB2_REPEATER_INIT_CFG(r, v) \
- { \
- .reg = r, \
- .val = v, \
- }
-
-enum reg_fields {
- F_TUNE_EUSB_HS_COMP_CUR,
- F_TUNE_EUSB_EQU,
- F_TUNE_EUSB_SLEW,
- F_TUNE_USB2_HS_COMP_CUR,
- F_TUNE_USB2_PREEM,
- F_TUNE_USB2_EQU,
- F_TUNE_USB2_SLEW,
- F_TUNE_SQUELCH_U,
- F_TUNE_HSDISC,
- F_TUNE_RES_FSDIF,
- F_TUNE_IUSB2,
- F_TUNE_USB2_CROSSOVER,
- F_NUM_TUNE_FIELDS,
-
- F_FORCE_VAL_5 = F_NUM_TUNE_FIELDS,
- F_FORCE_EN_5,
-
- F_EN_CTL1,
-
- F_RPTR_STATUS,
- F_NUM_FIELDS,
-};
-
-static struct reg_field eusb2_repeater_tune_reg_fields[F_NUM_FIELDS] = {
- [F_TUNE_EUSB_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_EUSB_HS_COMP_CUR, 0, 1),
- [F_TUNE_EUSB_EQU] = REG_FIELD(EUSB2_TUNE_EUSB_EQU, 0, 1),
- [F_TUNE_EUSB_SLEW] = REG_FIELD(EUSB2_TUNE_EUSB_SLEW, 0, 1),
- [F_TUNE_USB2_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_USB2_HS_COMP_CUR, 0, 1),
- [F_TUNE_USB2_PREEM] = REG_FIELD(EUSB2_TUNE_USB2_PREEM, 0, 2),
- [F_TUNE_USB2_EQU] = REG_FIELD(EUSB2_TUNE_USB2_EQU, 0, 1),
- [F_TUNE_USB2_SLEW] = REG_FIELD(EUSB2_TUNE_USB2_SLEW, 0, 1),
- [F_TUNE_SQUELCH_U] = REG_FIELD(EUSB2_TUNE_SQUELCH_U, 0, 2),
- [F_TUNE_HSDISC] = REG_FIELD(EUSB2_TUNE_HSDISC, 0, 2),
- [F_TUNE_RES_FSDIF] = REG_FIELD(EUSB2_TUNE_RES_FSDIF, 0, 2),
- [F_TUNE_IUSB2] = REG_FIELD(EUSB2_TUNE_IUSB2, 0, 3),
- [F_TUNE_USB2_CROSSOVER] = REG_FIELD(EUSB2_TUNE_USB2_CROSSOVER, 0, 2),
-
- [F_FORCE_VAL_5] = REG_FIELD(EUSB2_FORCE_VAL_5, 0, 7),
- [F_FORCE_EN_5] = REG_FIELD(EUSB2_FORCE_EN_5, 0, 7),
-
- [F_EN_CTL1] = REG_FIELD(EUSB2_EN_CTL1, 0, 7),
-
- [F_RPTR_STATUS] = REG_FIELD(EUSB2_RPTR_STATUS, 0, 7),
+enum eusb2_reg_layout {
+ TUNE_EUSB_HS_COMP_CUR,
+ TUNE_EUSB_EQU,
+ TUNE_EUSB_SLEW,
+ TUNE_USB2_HS_COMP_CUR,
+ TUNE_USB2_PREEM,
+ TUNE_USB2_EQU,
+ TUNE_USB2_SLEW,
+ TUNE_SQUELCH_U,
+ TUNE_HSDISC,
+ TUNE_RES_FSDIF,
+ TUNE_IUSB2,
+ TUNE_USB2_CROSSOVER,
+ NUM_TUNE_FIELDS,
+
+ FORCE_VAL_5 = NUM_TUNE_FIELDS,
+ FORCE_EN_5,
+
+ EN_CTL1,
+
+ RPTR_STATUS,
+ LAYOUT_SIZE,
};
struct eusb2_repeater_cfg {
@@ -98,10 +70,11 @@ struct eusb2_repeater_cfg {
struct eusb2_repeater {
struct device *dev;
- struct regmap_field *regs[F_NUM_FIELDS];
+ struct regmap *regmap;
struct phy *phy;
struct regulator_bulk_data *vregs;
const struct eusb2_repeater_cfg *cfg;
+ u32 base;
enum phy_mode mode;
};
@@ -109,10 +82,10 @@ static const char * const pm8550b_vreg_l[] = {
"vdd18", "vdd3",
};
-static const u32 pm8550b_init_tbl[F_NUM_TUNE_FIELDS] = {
- [F_TUNE_IUSB2] = 0x8,
- [F_TUNE_SQUELCH_U] = 0x3,
- [F_TUNE_USB2_PREEM] = 0x5,
+static const u32 pm8550b_init_tbl[NUM_TUNE_FIELDS] = {
+ [TUNE_IUSB2] = 0x8,
+ [TUNE_SQUELCH_U] = 0x3,
+ [TUNE_USB2_PREEM] = 0x5,
};
static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
@@ -140,47 +113,42 @@ static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
static int eusb2_repeater_init(struct phy *phy)
{
- struct reg_field *regfields = eusb2_repeater_tune_reg_fields;
struct eusb2_repeater *rptr = phy_get_drvdata(phy);
struct device_node *np = rptr->dev->of_node;
- u32 init_tbl[F_NUM_TUNE_FIELDS] = { 0 };
- u8 override;
+ struct regmap *regmap = rptr->regmap;
+ const u32 *init_tbl = rptr->cfg->init_tbl;
+ u8 tune_usb2_preem = init_tbl[TUNE_USB2_PREEM];
+ u8 tune_hsdisc = init_tbl[TUNE_HSDISC];
+ u8 tune_iusb2 = init_tbl[TUNE_IUSB2];
+ u32 base = rptr->base;
u32 val;
int ret;
- int i;
+
+ of_property_read_u8(np, "qcom,tune-usb2-amplitude", &tune_iusb2);
+ of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &tune_hsdisc);
+ of_property_read_u8(np, "qcom,tune-usb2-preem", &tune_usb2_preem);
ret = regulator_bulk_enable(rptr->cfg->num_vregs, rptr->vregs);
if (ret)
return ret;
- regmap_field_update_bits(rptr->regs[F_EN_CTL1], EUSB2_RPTR_EN, EUSB2_RPTR_EN);
+ regmap_write(regmap, base + EUSB2_EN_CTL1, EUSB2_RPTR_EN);
- for (i = 0; i < F_NUM_TUNE_FIELDS; i++) {
- if (init_tbl[i]) {
- regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
- } else {
- /* Write 0 if there's no value set */
- u32 mask = GENMASK(regfields[i].msb, regfields[i].lsb);
-
- regmap_field_update_bits(rptr->regs[i], mask, 0);
- }
- }
- memcpy(init_tbl, rptr->cfg->init_tbl, sizeof(init_tbl));
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_HS_COMP_CUR, init_tbl[TUNE_EUSB_HS_COMP_CUR]);
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_EQU, init_tbl[TUNE_EUSB_EQU]);
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_SLEW, init_tbl[TUNE_EUSB_SLEW]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_HS_COMP_CUR, init_tbl[TUNE_USB2_HS_COMP_CUR]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_EQU, init_tbl[TUNE_USB2_EQU]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_SLEW, init_tbl[TUNE_USB2_SLEW]);
+ regmap_write(regmap, base + EUSB2_TUNE_SQUELCH_U, init_tbl[TUNE_SQUELCH_U]);
+ regmap_write(regmap, base + EUSB2_TUNE_RES_FSDIF, init_tbl[TUNE_RES_FSDIF]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_CROSSOVER, init_tbl[TUNE_USB2_CROSSOVER]);
- if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &override))
- init_tbl[F_TUNE_IUSB2] = override;
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, tune_usb2_preem);
+ regmap_write(regmap, base + EUSB2_TUNE_HSDISC, tune_hsdisc);
+ regmap_write(regmap, base + EUSB2_TUNE_IUSB2, tune_iusb2);
- if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &override))
- init_tbl[F_TUNE_HSDISC] = override;
-
- if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &override))
- init_tbl[F_TUNE_USB2_PREEM] = override;
-
- for (i = 0; i < F_NUM_TUNE_FIELDS; i++)
- regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
-
- ret = regmap_field_read_poll_timeout(rptr->regs[F_RPTR_STATUS],
- val, val & RPTR_OK, 10, 5);
+ ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, val, val & RPTR_OK, 10, 5);
if (ret)
dev_err(rptr->dev, "initialization timed-out\n");
@@ -191,6 +159,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
enum phy_mode mode, int submode)
{
struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+ struct regmap *regmap = rptr->regmap;
+ u32 base = rptr->base;
switch (mode) {
case PHY_MODE_USB_HOST:
@@ -199,10 +169,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
* per eUSB 1.2 Spec. Below implement software workaround until
* PHY and controller is fixing seen observation.
*/
- regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
- F_CLK_19P2M_EN, F_CLK_19P2M_EN);
- regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
- V_CLK_19P2M_EN, V_CLK_19P2M_EN);
+ regmap_write(regmap, base + EUSB2_FORCE_EN_5, F_CLK_19P2M_EN);
+ regmap_write(regmap, base + EUSB2_FORCE_VAL_5, V_CLK_19P2M_EN);
break;
case PHY_MODE_USB_DEVICE:
/*
@@ -211,10 +179,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
* repeater doesn't clear previous value due to shared
* regulators (say host <-> device mode switch).
*/
- regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
- F_CLK_19P2M_EN, 0);
- regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
- V_CLK_19P2M_EN, 0);
+ regmap_write(regmap, base + EUSB2_FORCE_EN_5, 0);
+ regmap_write(regmap, base + EUSB2_FORCE_VAL_5, 0);
break;
default:
return -EINVAL;
@@ -243,9 +209,8 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
struct device_node *np = dev->of_node;
- struct regmap *regmap;
- int i, ret;
u32 res;
+ int ret;
rptr = devm_kzalloc(dev, sizeof(*rptr), GFP_KERNEL);
if (!rptr)
@@ -258,22 +223,15 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
if (!rptr->cfg)
return -EINVAL;
- regmap = dev_get_regmap(dev->parent, NULL);
- if (!regmap)
+ rptr->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!rptr->regmap)
return -ENODEV;
ret = of_property_read_u32(np, "reg", &res);
if (ret < 0)
return ret;
- for (i = 0; i < F_NUM_FIELDS; i++)
- eusb2_repeater_tune_reg_fields[i].reg += res;
-
- ret = devm_regmap_field_bulk_alloc(dev, regmap, rptr->regs,
- eusb2_repeater_tune_reg_fields,
- F_NUM_FIELDS);
- if (ret)
- return ret;
+ rptr->base = res;
ret = eusb2_repeater_init_vregs(rptr);
if (ret < 0) {
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index c2590579190a..03fb0d4b75d7 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -299,7 +299,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
qphy->vreg = devm_regulator_get(dev, "vdda-phy");
if (IS_ERR(qphy->vreg))
- return dev_err_probe(dev, PTR_ERR(qphy->phy),
+ return dev_err_probe(dev, PTR_ERR(qphy->vreg),
"failed to get vreg\n");
phy_set_drvdata(qphy->phy, qphy);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 1ad10110dd25..17c4ad7553a5 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -3562,14 +3562,6 @@ static int qmp_combo_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = qmp_combo_typec_switch_register(qmp);
- if (ret)
- return ret;
-
- ret = drm_aux_bridge_register(dev);
- if (ret)
- return ret;
-
/* Check for legacy binding with child nodes. */
usb_np = of_get_child_by_name(dev->of_node, "usb3-phy");
if (usb_np) {
@@ -3589,6 +3581,14 @@ static int qmp_combo_probe(struct platform_device *pdev)
if (ret)
goto err_node_put;
+ ret = qmp_combo_typec_switch_register(qmp);
+ if (ret)
+ goto err_node_put;
+
+ ret = drm_aux_bridge_register(dev);
+ if (ret)
+ goto err_node_put;
+
pm_runtime_set_active(dev);
ret = devm_pm_runtime_enable(dev);
if (ret)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 6621246e4ddf..5c003988c35d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -1556,7 +1556,7 @@ static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
};
-static const struct qmp_usb_offsets qmp_usb_offsets_ipq8074 = {
+static const struct qmp_usb_offsets qmp_usb_offsets_v3 = {
.serdes = 0,
.pcs = 0x800,
.pcs_misc = 0x600,
@@ -1572,7 +1572,7 @@ static const struct qmp_usb_offsets qmp_usb_offsets_ipq9574 = {
.rx = 0x400,
};
-static const struct qmp_usb_offsets qmp_usb_offsets_v3 = {
+static const struct qmp_usb_offsets qmp_usb_offsets_v3_msm8996 = {
.serdes = 0,
.pcs = 0x600,
.tx = 0x200,
@@ -1624,7 +1624,7 @@ static const struct qmp_usb_offsets qmp_usb_offsets_v7 = {
static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = {
.lanes = 1,
- .offsets = &qmp_usb_offsets_ipq8074,
+ .offsets = &qmp_usb_offsets_v3,
.serdes_tbl = ipq9574_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(ipq9574_usb3_serdes_tbl),
@@ -1642,7 +1642,7 @@ static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = {
static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
.lanes = 1,
- .offsets = &qmp_usb_offsets_ipq8074,
+ .offsets = &qmp_usb_offsets_v3,
.serdes_tbl = ipq8074_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(ipq8074_usb3_serdes_tbl),
@@ -1678,7 +1678,7 @@ static const struct qmp_phy_cfg ipq9574_usb3phy_cfg = {
static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.lanes = 1,
- .offsets = &qmp_usb_offsets_v3,
+ .offsets = &qmp_usb_offsets_v3_msm8996,
.serdes_tbl = msm8996_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 8163a5983166..d45657aa986a 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -127,6 +127,24 @@ config PINCTRL_AXP209
selected.
Say Y to enable pinctrl and GPIO support for the AXP209 PMIC.
+config PINCTRL_AW9523
+ tristate "Awinic AW9523/AW9523B I2C GPIO expander pinctrl driver"
+ depends on OF && I2C
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select REGMAP
+ select REGMAP_I2C
+ help
+ The Awinic AW9523/AW9523B is a multi-function I2C GPIO
+ expander with PWM functionality. This driver bundles a
+ pinctrl driver to select the function muxing and a GPIO
+ driver to handle GPIO, when the GPIO function is selected.
+
+ Say yes to enable pinctrl and GPIO support for the AW9523(B).
+
config PINCTRL_BM1880
bool "Bitmain BM1880 Pinctrl driver"
depends on OF && (ARCH_BITMAIN || COMPILE_TEST)
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 1071f301cc70..2152539b53d5 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_PINCTRL_ARTPEC6) += pinctrl-artpec6.o
obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o
+obj-$(CONFIG_PINCTRL_AW9523) += pinctrl-aw9523.o
obj-$(CONFIG_PINCTRL_AXP209) += pinctrl-axp209.o
obj-$(CONFIG_PINCTRL_BM1880) += pinctrl-bm1880.o
obj-$(CONFIG_PINCTRL_CY8C95X0) += pinctrl-cy8c95x0.o
diff --git a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
index 012b0a3bad5a..628b60ccc2b0 100644
--- a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+++ b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
@@ -5,10 +5,10 @@
// Copyright (c) 2023 Cirrus Logic, Inc. and
// Cirrus Logic International Semiconductor Ltd.
+#include <linux/array_size.h>
#include <linux/bits.h>
#include <linux/build_bug.h>
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/mfd/cs42l43.h>
#include <linux/mfd/cs42l43-regs.h>
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/string_helpers.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinctrl.h>
@@ -276,7 +276,7 @@ static const struct pinmux_ops cs42l43_pin_mux_ops = {
static const unsigned int cs42l43_pin_drv_str_ma[] = { 1, 2, 4, 8, 9, 10, 12, 16 };
-static inline int cs42l43_pin_get_drv_str(struct cs42l43_pin *priv, unsigned int pin)
+static int cs42l43_pin_get_drv_str(struct cs42l43_pin *priv, unsigned int pin)
{
const struct cs42l43_pin_data *pdat = cs42l43_pin_pins[pin].drv_data;
unsigned int val;
@@ -289,8 +289,8 @@ static inline int cs42l43_pin_get_drv_str(struct cs42l43_pin *priv, unsigned int
return cs42l43_pin_drv_str_ma[(val & pdat->mask) >> pdat->shift];
}
-static inline int cs42l43_pin_set_drv_str(struct cs42l43_pin *priv, unsigned int pin,
- unsigned int ma)
+static int cs42l43_pin_set_drv_str(struct cs42l43_pin *priv, unsigned int pin,
+ unsigned int ma)
{
const struct cs42l43_pin_data *pdat = cs42l43_pin_pins[pin].drv_data;
int i;
@@ -314,7 +314,7 @@ err:
return -EINVAL;
}
-static inline int cs42l43_pin_get_db(struct cs42l43_pin *priv, unsigned int pin)
+static int cs42l43_pin_get_db(struct cs42l43_pin *priv, unsigned int pin)
{
unsigned int val;
int ret;
@@ -332,8 +332,8 @@ static inline int cs42l43_pin_get_db(struct cs42l43_pin *priv, unsigned int pin)
return 85; // Debounce is roughly 85uS
}
-static inline int cs42l43_pin_set_db(struct cs42l43_pin *priv, unsigned int pin,
- unsigned int us)
+static int cs42l43_pin_set_db(struct cs42l43_pin *priv, unsigned int pin,
+ unsigned int us)
{
if (pin >= CS42L43_NUM_GPIOS)
return -ENOTSUPP;
@@ -490,7 +490,7 @@ static void cs42l43_gpio_set(struct gpio_chip *chip, unsigned int offset, int va
int ret;
dev_dbg(priv->dev, "Setting gpio%d to %s\n",
- offset + 1, value ? "high" : "low");
+ offset + 1, str_high_low(value));
ret = pm_runtime_resume_and_get(priv->dev);
if (ret) {
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index ee56856cb80c..6649357637ff 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -412,6 +412,10 @@ static int pinctrl_get_device_gpio_range(struct gpio_chip *gc,
* @pctldev: pin controller device to add the range to
* @range: the GPIO range to add
*
+ * DEPRECATED: Don't use this function in new code. See section 2 of
+ * Documentation/devicetree/bindings/gpio/gpio.txt on how to bind pinctrl and
+ * gpio drivers.
+ *
* This adds a range of GPIOs to be handled by a certain pin controller. Call
* this to register handled ranges after registering your pin controller.
*/
@@ -1644,7 +1648,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
unsigned int i, pin;
#ifdef CONFIG_GPIOLIB
- struct gpio_device *gdev __free(gpio_device_put) = NULL;
+ struct gpio_device *gdev = NULL;
struct pinctrl_gpio_range *range;
int gpio_num;
#endif
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index d6f29e6faab7..89bd7ce6711a 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1492,7 +1492,7 @@ static int intel_pinctrl_probe_pwm(struct intel_pinctrl *pctrl,
.base_unit_bits = 22,
.bypass = true,
};
- struct pwm_lpss_chip *pwm;
+ struct pwm_chip *chip;
if (!(community->features & PINCTRL_FEATURE_PWM))
return 0;
@@ -1500,8 +1500,8 @@ static int intel_pinctrl_probe_pwm(struct intel_pinctrl *pctrl,
if (!IS_REACHABLE(CONFIG_PWM_LPSS))
return 0;
- pwm = devm_pwm_lpss_probe(pctrl->dev, community->regs + PWMC, &info);
- return PTR_ERR_OR_ZERO(pwm);
+ chip = devm_pwm_lpss_probe(pctrl->dev, community->regs + PWMC, &info);
+ return PTR_ERR_OR_ZERO(chip);
}
int intel_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7981.c b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
index 7e59a4407859..ef6123765885 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7981.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
@@ -700,6 +700,15 @@ static int mt7981_drv_vbus_pins[] = { 14, };
static int mt7981_drv_vbus_funcs[] = { 1, };
/* EMMC */
+static int mt7981_emmc_reset_pins[] = { 15, };
+static int mt7981_emmc_reset_funcs[] = { 2, };
+
+static int mt7981_emmc_4_pins[] = { 16, 17, 18, 19, 24, 25, };
+static int mt7981_emmc_4_funcs[] = { 2, 2, 2, 2, 2, 2, };
+
+static int mt7981_emmc_8_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, };
+static int mt7981_emmc_8_funcs[] = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, };
+
static int mt7981_emmc_45_pins[] = { 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, };
static int mt7981_emmc_45_funcs[] = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, };
@@ -737,6 +746,9 @@ static int mt7981_uart1_1_funcs[] = { 2, 2, 2, 2, };
static int mt7981_uart1_2_pins[] = { 9, 10, };
static int mt7981_uart1_2_funcs[] = { 2, 2, };
+static int mt7981_uart1_3_pins[] = { 26, 27, };
+static int mt7981_uart1_3_funcs[] = { 2, 2, };
+
/* UART2 */
static int mt7981_uart2_1_pins[] = { 22, 23, 24, 25, };
static int mt7981_uart2_1_funcs[] = { 3, 3, 3, 3, };
@@ -851,6 +863,12 @@ static const struct group_desc mt7981_groups[] = {
PINCTRL_PIN_GROUP("udi", mt7981_udi),
/* @GPIO(14) DRV_VBUS(1) */
PINCTRL_PIN_GROUP("drv_vbus", mt7981_drv_vbus),
+ /* @GPIO(15): EMMC_RSTB(2) */
+ PINCTRL_PIN_GROUP("emmc_reset", mt7981_emmc_reset),
+ /* @GPIO(16,17,18,19,24,25): EMMC_DATx, EMMC_CLK, EMMC_CMD */
+ PINCTRL_PIN_GROUP("emmc_4", mt7981_emmc_4),
+ /* @GPIO(16,17,18,19,20,21,22,23,24,25): EMMC_DATx, EMMC_CLK, EMMC_CMD */
+ PINCTRL_PIN_GROUP("emmc_8", mt7981_emmc_8),
/* @GPIO(15,25): EMMC(2) */
PINCTRL_PIN_GROUP("emmc_45", mt7981_emmc_45),
/* @GPIO(16,21): SNFI(3) */
@@ -871,6 +889,8 @@ static const struct group_desc mt7981_groups[] = {
PINCTRL_PIN_GROUP("uart1_1", mt7981_uart1_1),
/* @GPIO(9,10): UART1(2) */
PINCTRL_PIN_GROUP("uart1_2", mt7981_uart1_2),
+ /* @GPIO(26,27): UART1(2) */
+ PINCTRL_PIN_GROUP("uart1_3", mt7981_uart1_3),
/* @GPIO(22,25): UART1(3) */
PINCTRL_PIN_GROUP("uart2_1", mt7981_uart2_1),
/* @GPIO(22,24) PTA_EXT(4) */
@@ -933,7 +953,7 @@ static const struct group_desc mt7981_groups[] = {
static const char *mt7981_wa_aice_groups[] = { "wa_aice1", "wa_aice2", "wm_aice1_1",
"wa_aice3", "wm_aice1_2", };
static const char *mt7981_uart_groups[] = { "net_wo0_uart_txd_0", "net_wo0_uart_txd_1",
- "net_wo0_uart_txd_2", "uart0", "uart1_0", "uart1_1", "uart1_2", "uart2_0",
+ "net_wo0_uart_txd_2", "uart0", "uart1_0", "uart1_1", "uart1_2", "uart1_3", "uart2_0",
"uart2_0_tx_rx", "uart2_1", "wm_uart_0", "wm_aurt_1", "wm_aurt_2", };
static const char *mt7981_dfd_groups[] = { "dfd", "dfd_ntrst", };
static const char *mt7981_wdt_groups[] = { "watchdog", "watchdog1", };
@@ -952,7 +972,7 @@ static const char *mt7981_i2c_groups[] = { "i2c0_0", "i2c0_1", "u2_phy_i2c",
static const char *mt7981_pcm_groups[] = { "pcm", };
static const char *mt7981_udi_groups[] = { "udi", };
static const char *mt7981_usb_groups[] = { "drv_vbus", };
-static const char *mt7981_flash_groups[] = { "emmc_45", "snfi", };
+static const char *mt7981_flash_groups[] = { "emmc_reset", "emmc_4", "emmc_8", "emmc_45", "snfi", };
static const char *mt7981_ethernet_groups[] = { "smi_mdc_mdio", "gbe_ext_mdc_mdio",
"wf0_mode1", "wf0_mode3", "mt7531_int", };
static const char *mt7981_ant_groups[] = { "ant_sel", };
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7986.c b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
index acaac9b38aa8..39e80fa644c1 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7986.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
@@ -16,7 +16,7 @@
PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
_x_bits, 32, 0)
-/**
+/*
* enum - Locking variants of the iocfg bases
*
* MT7986 have multiple bases to program pin configuration listed as the below:
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8186.c b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
index 7be591591cce..dd19e74856a9 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8186.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
@@ -1198,7 +1198,6 @@ static const struct mtk_pin_reg_calc mt8186_reg_cals[PINCTRL_PIN_REG_MAX] = {
[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8186_pin_dir_range),
[PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8186_pin_di_range),
[PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8186_pin_do_range),
- [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8186_pin_dir_range),
[PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8186_pin_smt_range),
[PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8186_pin_ies_range),
[PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8186_pin_pu_range),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
index e3a76381f7f4..3f8a9dbcb704 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
@@ -1379,7 +1379,6 @@ static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = {
[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8192_pin_dir_range),
[PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8192_pin_di_range),
[PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8192_pin_do_range),
- [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8192_pin_dir_range),
[PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8192_pin_smt_range),
[PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8192_pin_ies_range),
[PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8192_pin_pu_range),
diff --git a/drivers/pinctrl/nomadik/Kconfig b/drivers/pinctrl/nomadik/Kconfig
index 0fea167c283f..aafecf348670 100644
--- a/drivers/pinctrl/nomadik/Kconfig
+++ b/drivers/pinctrl/nomadik/Kconfig
@@ -18,15 +18,15 @@ config PINCTRL_AB8505
endif
-if (ARCH_U8500 || ARCH_NOMADIK)
+if (ARCH_U8500 || ARCH_NOMADIK || COMPILE_TEST)
config PINCTRL_NOMADIK
bool "Nomadik pin controller driver"
- depends on OF && GPIOLIB
+ depends on OF
select PINMUX
select PINCONF
- select OF_GPIO
- select GPIOLIB_IRQCHIP
+ select GPIOLIB
+ select GPIO_NOMADIK
config PINCTRL_STN8815
bool "STN8815 pin controller driver"
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index 490e0959e8be..0b4a3dd9d8c7 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -3,8 +3,9 @@
#include <linux/types.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/gpio/driver.h>
-#include "pinctrl-nomadik.h"
+#include <linux/gpio/gpio-nomadik.h>
/* All the pins that can be used for GPIO and some other functions */
#define _GPIO(offset) (offset)
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index 1552222ac68e..c5a52fcaba30 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
@@ -3,8 +3,9 @@
#include <linux/types.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/gpio/driver.h>
-#include "pinctrl-nomadik.h"
+#include <linux/gpio/gpio-nomadik.h>
/* All the pins that can be used for GPIO and some other functions */
#define _GPIO(offset) (offset)
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 7911353ac97d..cb0f0d5a5e45 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1,12 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Generic GPIO driver for logic cells found in the Nomadik SoC
+ * Pinmux & pinconf driver for the IP block found in the Nomadik SoC. This
+ * depends on gpio-nomadik and some handling is intertwined; see nmk_gpio_chips
+ * which is used by this driver to access the GPIO banks array.
*
* Copyright (C) 2008,2009 STMicroelectronics
* Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
* Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
* Copyright (C) 2011-2013 Linus Walleij <linus.walleij@linaro.org>
*/
+
#include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
@@ -25,6 +28,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
/* Since we request GPIOs from ourself */
#include <linux/pinctrl/consumer.h>
@@ -36,15 +40,7 @@
#include "../core.h"
#include "../pinctrl-utils.h"
-#include "pinctrl-nomadik.h"
-
-/*
- * The GPIO module in the Nomadik family of Systems-on-Chip is an
- * AMBA device, managing 32 pins and alternate functions. The logic block
- * is currently used in the Nomadik and ux500.
- *
- * Symbols in this file are called "nmk_gpio" for "nomadik gpio"
- */
+#include <linux/gpio/gpio-nomadik.h>
/*
* pin configurations are represented by 32-bit integers:
@@ -76,8 +72,6 @@
* PIN_CFG - default config with alternate function
*/
-typedef unsigned long pin_cfg_t;
-
#define PIN_NUM_MASK 0x1ff
#define PIN_NUM(x) ((x) & PIN_NUM_MASK)
@@ -172,7 +166,6 @@ typedef unsigned long pin_cfg_t;
#define PIN_SLEEPMODE_DISABLED (0 << PIN_SLEEPMODE_SHIFT)
#define PIN_SLEEPMODE_ENABLED (1 << PIN_SLEEPMODE_SHIFT)
-
/* Shortcuts. Use these instead of separate DIR, PULL, and VAL. */
#define PIN_INPUT_PULLDOWN (PIN_DIR_INPUT | PIN_PULL_DOWN)
#define PIN_INPUT_PULLUP (PIN_DIR_INPUT | PIN_PULL_UP)
@@ -200,75 +193,6 @@ typedef unsigned long pin_cfg_t;
(PIN_CFG_DEFAULT |\
(PIN_NUM(num) | PIN_##alt | PIN_OUTPUT_##val))
-/*
- * "nmk_gpio" and "NMK_GPIO" stand for "Nomadik GPIO", leaving
- * the "gpio" namespace for generic and cross-machine functions
- */
-
-#define GPIO_BLOCK_SHIFT 5
-#define NMK_GPIO_PER_CHIP (1 << GPIO_BLOCK_SHIFT)
-#define NMK_MAX_BANKS DIV_ROUND_UP(512, NMK_GPIO_PER_CHIP)
-
-/* Register in the logic block */
-#define NMK_GPIO_DAT 0x00
-#define NMK_GPIO_DATS 0x04
-#define NMK_GPIO_DATC 0x08
-#define NMK_GPIO_PDIS 0x0c
-#define NMK_GPIO_DIR 0x10
-#define NMK_GPIO_DIRS 0x14
-#define NMK_GPIO_DIRC 0x18
-#define NMK_GPIO_SLPC 0x1c
-#define NMK_GPIO_AFSLA 0x20
-#define NMK_GPIO_AFSLB 0x24
-#define NMK_GPIO_LOWEMI 0x28
-
-#define NMK_GPIO_RIMSC 0x40
-#define NMK_GPIO_FIMSC 0x44
-#define NMK_GPIO_IS 0x48
-#define NMK_GPIO_IC 0x4c
-#define NMK_GPIO_RWIMSC 0x50
-#define NMK_GPIO_FWIMSC 0x54
-#define NMK_GPIO_WKS 0x58
-/* These appear in DB8540 and later ASICs */
-#define NMK_GPIO_EDGELEVEL 0x5C
-#define NMK_GPIO_LEVEL 0x60
-
-
-/* Pull up/down values */
-enum nmk_gpio_pull {
- NMK_GPIO_PULL_NONE,
- NMK_GPIO_PULL_UP,
- NMK_GPIO_PULL_DOWN,
-};
-
-/* Sleep mode */
-enum nmk_gpio_slpm {
- NMK_GPIO_SLPM_INPUT,
- NMK_GPIO_SLPM_WAKEUP_ENABLE = NMK_GPIO_SLPM_INPUT,
- NMK_GPIO_SLPM_NOCHANGE,
- NMK_GPIO_SLPM_WAKEUP_DISABLE = NMK_GPIO_SLPM_NOCHANGE,
-};
-
-struct nmk_gpio_chip {
- struct gpio_chip chip;
- void __iomem *addr;
- struct clk *clk;
- unsigned int bank;
- void (*set_ioforce)(bool enable);
- spinlock_t lock;
- bool sleepmode;
- /* Keep track of configured edges */
- u32 edge_rising;
- u32 edge_falling;
- u32 real_wake;
- u32 rwimsc;
- u32 fwimsc;
- u32 rimsc;
- u32 fimsc;
- u32 pull_up;
- u32 lowemi;
-};
-
/**
* struct nmk_pinctrl - state container for the Nomadik pin controller
* @dev: containing device pointer
@@ -283,14 +207,13 @@ struct nmk_pinctrl {
void __iomem *prcm_base;
};
-static struct nmk_gpio_chip *nmk_gpio_chips[NMK_MAX_BANKS];
+/* See nmk_gpio_populate_chip() that fills this array. */
+struct nmk_gpio_chip *nmk_gpio_chips[NMK_MAX_BANKS];
-static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
-
-#define NUM_BANKS ARRAY_SIZE(nmk_gpio_chips)
+DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, int gpio_mode)
+ unsigned int offset, int gpio_mode)
{
u32 afunc, bfunc;
@@ -304,21 +227,8 @@ static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip,
writel(bfunc, nmk_chip->addr + NMK_GPIO_AFSLB);
}
-static void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, enum nmk_gpio_slpm mode)
-{
- u32 slpm;
-
- slpm = readl(nmk_chip->addr + NMK_GPIO_SLPC);
- if (mode == NMK_GPIO_SLPM_NOCHANGE)
- slpm |= BIT(offset);
- else
- slpm &= ~BIT(offset);
- writel(slpm, nmk_chip->addr + NMK_GPIO_SLPC);
-}
-
static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, enum nmk_gpio_pull pull)
+ unsigned int offset, enum nmk_gpio_pull pull)
{
u32 pdis;
@@ -342,7 +252,7 @@ static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
}
static void __nmk_gpio_set_lowemi(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, bool lowemi)
+ unsigned int offset, bool lowemi)
{
bool enabled = nmk_chip->lowemi & BIT(offset);
@@ -359,29 +269,13 @@ static void __nmk_gpio_set_lowemi(struct nmk_gpio_chip *nmk_chip,
}
static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip,
- unsigned offset)
+ unsigned int offset)
{
writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRC);
}
-static void __nmk_gpio_set_output(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, int val)
-{
- if (val)
- writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATS);
- else
- writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATC);
-}
-
-static void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, int val)
-{
- writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRS);
- __nmk_gpio_set_output(nmk_chip, offset, val);
-}
-
static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
- unsigned offset, int gpio_mode,
+ unsigned int offset, int gpio_mode,
bool glitch)
{
u32 rwimsc = nmk_chip->rwimsc;
@@ -408,7 +302,7 @@ static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
}
static void
-nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset)
+nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned int offset)
{
u32 falling = nmk_chip->fimsc & BIT(offset);
u32 rising = nmk_chip->rimsc & BIT(offset);
@@ -447,7 +341,7 @@ static void nmk_write_masked(void __iomem *reg, u32 mask, u32 value)
}
static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
- unsigned offset, unsigned alt_num)
+ unsigned int offset, unsigned int alt_num)
{
int i;
u16 reg;
@@ -484,14 +378,14 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
*/
if (!alt_num) {
for (i = 0 ; i < PRCM_IDX_GPIOCR_ALTC_MAX ; i++) {
- if (pin_desc->altcx[i].used == true) {
+ if (pin_desc->altcx[i].used) {
reg = gpiocr_regs[pin_desc->altcx[i].reg_index];
bit = pin_desc->altcx[i].control_bit;
if (readl(npct->prcm_base + reg) & BIT(bit)) {
nmk_write_masked(npct->prcm_base + reg, BIT(bit), 0);
dev_dbg(npct->dev,
"PRCM GPIOCR: pin %i: alternate-C%i has been disabled\n",
- offset, i+1);
+ offset, i + 1);
}
}
}
@@ -499,10 +393,10 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
}
alt_index = alt_num - 1;
- if (pin_desc->altcx[alt_index].used == false) {
+ if (!pin_desc->altcx[alt_index].used) {
dev_warn(npct->dev,
- "PRCM GPIOCR: pin %i: alternate-C%i does not exist\n",
- offset, alt_num);
+ "PRCM GPIOCR: pin %i: alternate-C%i does not exist\n",
+ offset, alt_num);
return;
}
@@ -513,14 +407,14 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
for (i = 0 ; i < PRCM_IDX_GPIOCR_ALTC_MAX ; i++) {
if (i == alt_index)
continue;
- if (pin_desc->altcx[i].used == true) {
+ if (pin_desc->altcx[i].used) {
reg = gpiocr_regs[pin_desc->altcx[i].reg_index];
bit = pin_desc->altcx[i].control_bit;
if (readl(npct->prcm_base + reg) & BIT(bit)) {
nmk_write_masked(npct->prcm_base + reg, BIT(bit), 0);
dev_dbg(npct->dev,
"PRCM GPIOCR: pin %i: alternate-C%i has been disabled\n",
- offset, i+1);
+ offset, i + 1);
}
}
}
@@ -528,7 +422,7 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
reg = gpiocr_regs[pin_desc->altcx[alt_index].reg_index];
bit = pin_desc->altcx[alt_index].control_bit;
dev_dbg(npct->dev, "PRCM GPIOCR: pin %i: alternate-C%i has been selected\n",
- offset, alt_index+1);
+ offset, alt_index + 1);
nmk_write_masked(npct->prcm_base + reg, BIT(bit), BIT(bit));
}
@@ -548,7 +442,7 @@ static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
{
int i;
- for (i = 0; i < NUM_BANKS; i++) {
+ for (i = 0; i < NMK_MAX_BANKS; i++) {
struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
unsigned int temp = slpm[i];
@@ -566,7 +460,7 @@ static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
{
int i;
- for (i = 0; i < NUM_BANKS; i++) {
+ for (i = 0; i < NMK_MAX_BANKS; i++) {
struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
if (!chip)
@@ -578,7 +472,8 @@ static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
}
}
-static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)
+/* Only called by gpio-nomadik but requires knowledge of struct nmk_pinctrl. */
+int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)
{
int i;
u16 reg;
@@ -600,586 +495,16 @@ static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev,
pin_desc = npct->soc->altcx_pins + i;
gpiocr_regs = npct->soc->prcm_gpiocr_registers;
for (i = 0; i < PRCM_IDX_GPIOCR_ALTC_MAX; i++) {
- if (pin_desc->altcx[i].used == true) {
+ if (pin_desc->altcx[i].used) {
reg = gpiocr_regs[pin_desc->altcx[i].reg_index];
bit = pin_desc->altcx[i].control_bit;
if (readl(npct->prcm_base + reg) & BIT(bit))
- return NMK_GPIO_ALT_C+i+1;
+ return NMK_GPIO_ALT_C + i + 1;
}
}
return NMK_GPIO_ALT_C;
}
-/* IRQ functions */
-
-static void nmk_gpio_irq_ack(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
-
- clk_enable(nmk_chip->clk);
- writel(BIT(d->hwirq), nmk_chip->addr + NMK_GPIO_IC);
- clk_disable(nmk_chip->clk);
-}
-
-enum nmk_gpio_irq_type {
- NORMAL,
- WAKE,
-};
-
-static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
- int offset, enum nmk_gpio_irq_type which,
- bool enable)
-{
- u32 *rimscval;
- u32 *fimscval;
- u32 rimscreg;
- u32 fimscreg;
-
- if (which == NORMAL) {
- rimscreg = NMK_GPIO_RIMSC;
- fimscreg = NMK_GPIO_FIMSC;
- rimscval = &nmk_chip->rimsc;
- fimscval = &nmk_chip->fimsc;
- } else {
- rimscreg = NMK_GPIO_RWIMSC;
- fimscreg = NMK_GPIO_FWIMSC;
- rimscval = &nmk_chip->rwimsc;
- fimscval = &nmk_chip->fwimsc;
- }
-
- /* we must individually set/clear the two edges */
- if (nmk_chip->edge_rising & BIT(offset)) {
- if (enable)
- *rimscval |= BIT(offset);
- else
- *rimscval &= ~BIT(offset);
- writel(*rimscval, nmk_chip->addr + rimscreg);
- }
- if (nmk_chip->edge_falling & BIT(offset)) {
- if (enable)
- *fimscval |= BIT(offset);
- else
- *fimscval &= ~BIT(offset);
- writel(*fimscval, nmk_chip->addr + fimscreg);
- }
-}
-
-static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
- int offset, bool on)
-{
- /*
- * Ensure WAKEUP_ENABLE is on. No need to disable it if wakeup is
- * disabled, since setting SLPM to 1 increases power consumption, and
- * wakeup is anyhow controlled by the RIMSC and FIMSC registers.
- */
- if (nmk_chip->sleepmode && on) {
- __nmk_gpio_set_slpm(nmk_chip, offset,
- NMK_GPIO_SLPM_WAKEUP_ENABLE);
- }
-
- __nmk_gpio_irq_modify(nmk_chip, offset, WAKE, on);
-}
-
-static void nmk_gpio_irq_maskunmask(struct nmk_gpio_chip *nmk_chip,
- struct irq_data *d, bool enable)
-{
- unsigned long flags;
-
- clk_enable(nmk_chip->clk);
- spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
- spin_lock(&nmk_chip->lock);
-
- __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, enable);
-
- if (!(nmk_chip->real_wake & BIT(d->hwirq)))
- __nmk_gpio_set_wake(nmk_chip, d->hwirq, enable);
-
- spin_unlock(&nmk_chip->lock);
- spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
- clk_disable(nmk_chip->clk);
-}
-
-static void nmk_gpio_irq_mask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
-
- nmk_gpio_irq_maskunmask(nmk_chip, d, false);
- gpiochip_disable_irq(gc, irqd_to_hwirq(d));
-}
-
-static void nmk_gpio_irq_unmask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
-
- gpiochip_enable_irq(gc, irqd_to_hwirq(d));
- nmk_gpio_irq_maskunmask(nmk_chip, d, true);
-}
-
-static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
- unsigned long flags;
-
- clk_enable(nmk_chip->clk);
- spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
- spin_lock(&nmk_chip->lock);
-
- if (irqd_irq_disabled(d))
- __nmk_gpio_set_wake(nmk_chip, d->hwirq, on);
-
- if (on)
- nmk_chip->real_wake |= BIT(d->hwirq);
- else
- nmk_chip->real_wake &= ~BIT(d->hwirq);
-
- spin_unlock(&nmk_chip->lock);
- spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
- clk_disable(nmk_chip->clk);
-
- return 0;
-}
-
-static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
- bool enabled = !irqd_irq_disabled(d);
- bool wake = irqd_is_wakeup_set(d);
- unsigned long flags;
-
- if (type & IRQ_TYPE_LEVEL_HIGH)
- return -EINVAL;
- if (type & IRQ_TYPE_LEVEL_LOW)
- return -EINVAL;
-
- clk_enable(nmk_chip->clk);
- spin_lock_irqsave(&nmk_chip->lock, flags);
-
- if (enabled)
- __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, false);
-
- if (enabled || wake)
- __nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, false);
-
- nmk_chip->edge_rising &= ~BIT(d->hwirq);
- if (type & IRQ_TYPE_EDGE_RISING)
- nmk_chip->edge_rising |= BIT(d->hwirq);
-
- nmk_chip->edge_falling &= ~BIT(d->hwirq);
- if (type & IRQ_TYPE_EDGE_FALLING)
- nmk_chip->edge_falling |= BIT(d->hwirq);
-
- if (enabled)
- __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, true);
-
- if (enabled || wake)
- __nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, true);
-
- spin_unlock_irqrestore(&nmk_chip->lock, flags);
- clk_disable(nmk_chip->clk);
-
- return 0;
-}
-
-static unsigned int nmk_gpio_irq_startup(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
-
- clk_enable(nmk_chip->clk);
- nmk_gpio_irq_unmask(d);
- return 0;
-}
-
-static void nmk_gpio_irq_shutdown(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
-
- nmk_gpio_irq_mask(d);
- clk_disable(nmk_chip->clk);
-}
-
-static void nmk_gpio_irq_handler(struct irq_desc *desc)
-{
- struct irq_chip *host_chip = irq_desc_get_chip(desc);
- struct gpio_chip *chip = irq_desc_get_handler_data(desc);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- u32 status;
-
- chained_irq_enter(host_chip, desc);
-
- clk_enable(nmk_chip->clk);
- status = readl(nmk_chip->addr + NMK_GPIO_IS);
- clk_disable(nmk_chip->clk);
-
- while (status) {
- int bit = __ffs(status);
-
- generic_handle_domain_irq(chip->irq.domain, bit);
- status &= ~BIT(bit);
- }
-
- chained_irq_exit(host_chip, desc);
-}
-
-/* I/O Functions */
-
-static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
-{
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- int dir;
-
- clk_enable(nmk_chip->clk);
-
- dir = readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset);
-
- clk_disable(nmk_chip->clk);
-
- if (dir)
- return GPIO_LINE_DIRECTION_OUT;
-
- return GPIO_LINE_DIRECTION_IN;
-}
-
-static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
-{
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
-
- clk_enable(nmk_chip->clk);
-
- writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRC);
-
- clk_disable(nmk_chip->clk);
-
- return 0;
-}
-
-static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned offset)
-{
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- int value;
-
- clk_enable(nmk_chip->clk);
-
- value = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
-
- clk_disable(nmk_chip->clk);
-
- return value;
-}
-
-static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned offset,
- int val)
-{
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
-
- clk_enable(nmk_chip->clk);
-
- __nmk_gpio_set_output(nmk_chip, offset, val);
-
- clk_disable(nmk_chip->clk);
-}
-
-static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset,
- int val)
-{
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
-
- clk_enable(nmk_chip->clk);
-
- __nmk_gpio_make_output(nmk_chip, offset, val);
-
- clk_disable(nmk_chip->clk);
-
- return 0;
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int nmk_gpio_get_mode(struct nmk_gpio_chip *nmk_chip, int offset)
-{
- u32 afunc, bfunc;
-
- clk_enable(nmk_chip->clk);
-
- afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & BIT(offset);
- bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & BIT(offset);
-
- clk_disable(nmk_chip->clk);
-
- return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
-}
-
-static void nmk_gpio_dbg_show_one(struct seq_file *s,
- struct pinctrl_dev *pctldev, struct gpio_chip *chip,
- unsigned offset, unsigned gpio)
-{
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- int mode;
- bool is_out;
- bool data_out;
- bool pull;
- const char *modes[] = {
- [NMK_GPIO_ALT_GPIO] = "gpio",
- [NMK_GPIO_ALT_A] = "altA",
- [NMK_GPIO_ALT_B] = "altB",
- [NMK_GPIO_ALT_C] = "altC",
- [NMK_GPIO_ALT_C+1] = "altC1",
- [NMK_GPIO_ALT_C+2] = "altC2",
- [NMK_GPIO_ALT_C+3] = "altC3",
- [NMK_GPIO_ALT_C+4] = "altC4",
- };
-
- char *label = gpiochip_dup_line_label(chip, offset);
- if (IS_ERR(label))
- return;
-
- clk_enable(nmk_chip->clk);
- is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
- pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & BIT(offset));
- data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
- mode = nmk_gpio_get_mode(nmk_chip, offset);
- if ((mode == NMK_GPIO_ALT_C) && pctldev)
- mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio);
-
- if (is_out) {
- seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s",
- gpio,
- label ?: "(none)",
- data_out ? "hi" : "lo",
- (mode < 0) ? "unknown" : modes[mode]);
- } else {
- int irq = chip->to_irq(chip, offset);
- const int pullidx = pull ? 1 : 0;
- int val;
- static const char * const pulls[] = {
- "none ",
- "pull enabled",
- };
-
- seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
- gpio,
- label ?: "(none)",
- pulls[pullidx],
- (mode < 0) ? "unknown" : modes[mode]);
-
- val = nmk_gpio_get_input(chip, offset);
- seq_printf(s, " VAL %d", val);
-
- /*
- * This races with request_irq(), set_irq_type(),
- * and set_irq_wake() ... but those are "rare".
- */
- if (irq > 0 && irq_has_action(irq)) {
- char *trigger;
- bool wake;
-
- if (nmk_chip->edge_rising & BIT(offset))
- trigger = "edge-rising";
- else if (nmk_chip->edge_falling & BIT(offset))
- trigger = "edge-falling";
- else
- trigger = "edge-undefined";
-
- wake = !!(nmk_chip->real_wake & BIT(offset));
-
- seq_printf(s, " irq-%d %s%s",
- irq, trigger, wake ? " wakeup" : "");
- }
- }
- clk_disable(nmk_chip->clk);
-}
-
-static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
-{
- unsigned i;
- unsigned gpio = chip->base;
-
- for (i = 0; i < chip->ngpio; i++, gpio++) {
- nmk_gpio_dbg_show_one(s, NULL, chip, i, gpio);
- seq_printf(s, "\n");
- }
-}
-
-#else
-static inline void nmk_gpio_dbg_show_one(struct seq_file *s,
- struct pinctrl_dev *pctldev,
- struct gpio_chip *chip,
- unsigned offset, unsigned gpio)
-{
-}
-#define nmk_gpio_dbg_show NULL
-#endif
-
-/*
- * We will allocate memory for the state container using devm* allocators
- * binding to the first device reaching this point, it doesn't matter if
- * it is the pin controller or GPIO driver. However we need to use the right
- * platform device when looking up resources so pay attention to pdev.
- */
-static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np,
- struct platform_device *pdev)
-{
- struct nmk_gpio_chip *nmk_chip;
- struct platform_device *gpio_pdev;
- struct gpio_chip *chip;
- struct resource *res;
- struct clk *clk;
- void __iomem *base;
- u32 id;
-
- gpio_pdev = of_find_device_by_node(np);
- if (!gpio_pdev) {
- pr_err("populate \"%pOFn\": device not found\n", np);
- return ERR_PTR(-ENODEV);
- }
- if (of_property_read_u32(np, "gpio-bank", &id)) {
- dev_err(&pdev->dev, "populate: gpio-bank property not found\n");
- platform_device_put(gpio_pdev);
- return ERR_PTR(-EINVAL);
- }
-
- /* Already populated? */
- nmk_chip = nmk_gpio_chips[id];
- if (nmk_chip) {
- platform_device_put(gpio_pdev);
- return nmk_chip;
- }
-
- nmk_chip = devm_kzalloc(&pdev->dev, sizeof(*nmk_chip), GFP_KERNEL);
- if (!nmk_chip) {
- platform_device_put(gpio_pdev);
- return ERR_PTR(-ENOMEM);
- }
-
- nmk_chip->bank = id;
- chip = &nmk_chip->chip;
- chip->base = id * NMK_GPIO_PER_CHIP;
- chip->ngpio = NMK_GPIO_PER_CHIP;
- chip->label = dev_name(&gpio_pdev->dev);
- chip->parent = &gpio_pdev->dev;
-
- res = platform_get_resource(gpio_pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base)) {
- platform_device_put(gpio_pdev);
- return ERR_CAST(base);
- }
- nmk_chip->addr = base;
-
- clk = clk_get(&gpio_pdev->dev, NULL);
- if (IS_ERR(clk)) {
- platform_device_put(gpio_pdev);
- return (void *) clk;
- }
- clk_prepare(clk);
- nmk_chip->clk = clk;
-
- BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
- nmk_gpio_chips[id] = nmk_chip;
- return nmk_chip;
-}
-
-static void nmk_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(gc);
-
- seq_printf(p, "nmk%u-%u-%u", nmk_chip->bank,
- gc->base, gc->base + gc->ngpio - 1);
-}
-
-static const struct irq_chip nmk_irq_chip = {
- .irq_ack = nmk_gpio_irq_ack,
- .irq_mask = nmk_gpio_irq_mask,
- .irq_unmask = nmk_gpio_irq_unmask,
- .irq_set_type = nmk_gpio_irq_set_type,
- .irq_set_wake = nmk_gpio_irq_set_wake,
- .irq_startup = nmk_gpio_irq_startup,
- .irq_shutdown = nmk_gpio_irq_shutdown,
- .irq_print_chip = nmk_gpio_irq_print_chip,
- .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
-};
-
-static int nmk_gpio_probe(struct platform_device *dev)
-{
- struct device_node *np = dev->dev.of_node;
- struct nmk_gpio_chip *nmk_chip;
- struct gpio_chip *chip;
- struct gpio_irq_chip *girq;
- bool supports_sleepmode;
- int irq;
- int ret;
-
- nmk_chip = nmk_gpio_populate_chip(np, dev);
- if (IS_ERR(nmk_chip)) {
- dev_err(&dev->dev, "could not populate nmk chip struct\n");
- return PTR_ERR(nmk_chip);
- }
-
- supports_sleepmode =
- of_property_read_bool(np, "st,supports-sleepmode");
-
- /* Correct platform device ID */
- dev->id = nmk_chip->bank;
-
- irq = platform_get_irq(dev, 0);
- if (irq < 0)
- return irq;
-
- /*
- * The virt address in nmk_chip->addr is in the nomadik register space,
- * so we can simply convert the resource address, without remapping
- */
- nmk_chip->sleepmode = supports_sleepmode;
- spin_lock_init(&nmk_chip->lock);
-
- chip = &nmk_chip->chip;
- chip->parent = &dev->dev;
- chip->request = gpiochip_generic_request;
- chip->free = gpiochip_generic_free;
- chip->get_direction = nmk_gpio_get_dir;
- chip->direction_input = nmk_gpio_make_input;
- chip->get = nmk_gpio_get_input;
- chip->direction_output = nmk_gpio_make_output;
- chip->set = nmk_gpio_set_output;
- chip->dbg_show = nmk_gpio_dbg_show;
- chip->can_sleep = false;
- chip->owner = THIS_MODULE;
-
- girq = &chip->irq;
- gpio_irq_chip_set_chip(girq, &nmk_irq_chip);
- girq->parent_handler = nmk_gpio_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&dev->dev, 1,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = irq;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_edge_irq;
-
- clk_enable(nmk_chip->clk);
- nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
- clk_disable(nmk_chip->clk);
-
- ret = gpiochip_add_data(chip, nmk_chip);
- if (ret)
- return ret;
-
- platform_set_drvdata(dev, nmk_chip);
-
- dev_info(&dev->dev, "chip registered\n");
-
- return 0;
-}
-
static int nmk_get_groups_cnt(struct pinctrl_dev *pctldev)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
@@ -1188,43 +513,51 @@ static int nmk_get_groups_cnt(struct pinctrl_dev *pctldev)
}
static const char *nmk_get_group_name(struct pinctrl_dev *pctldev,
- unsigned selector)
+ unsigned int selector)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
return npct->soc->groups[selector].grp.name;
}
-static int nmk_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
- const unsigned **pins,
- unsigned *npins)
+static int nmk_get_group_pins(struct pinctrl_dev *pctldev, unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
*pins = npct->soc->groups[selector].grp.pins;
- *npins = npct->soc->groups[selector].grp.npins;
+ *num_pins = npct->soc->groups[selector].grp.npins;
return 0;
}
-static struct nmk_gpio_chip *find_nmk_gpio_from_pin(unsigned pin)
+/* This makes the mapping from pin number to a GPIO chip. We also return the pin
+ * offset in the GPIO chip for convenience (and to avoid a second loop).
+ */
+static struct nmk_gpio_chip *find_nmk_gpio_from_pin(unsigned int pin,
+ unsigned int *offset)
{
- int i;
+ int i, j = 0;
struct nmk_gpio_chip *nmk_gpio;
- for(i = 0; i < NMK_MAX_BANKS; i++) {
+ /* We assume that pins are allocated in bank order. */
+ for (i = 0; i < NMK_MAX_BANKS; i++) {
nmk_gpio = nmk_gpio_chips[i];
if (!nmk_gpio)
continue;
- if (pin >= nmk_gpio->chip.base &&
- pin < nmk_gpio->chip.base + nmk_gpio->chip.ngpio)
+ if (pin >= j && pin < j + nmk_gpio->chip.ngpio) {
+ if (offset)
+ *offset = pin - j;
return nmk_gpio;
+ }
+ j += nmk_gpio->chip.ngpio;
}
return NULL;
}
-static struct gpio_chip *find_gc_from_pin(unsigned pin)
+static struct gpio_chip *find_gc_from_pin(unsigned int pin)
{
- struct nmk_gpio_chip *nmk_gpio = find_nmk_gpio_from_pin(pin);
+ struct nmk_gpio_chip *nmk_gpio = find_nmk_gpio_from_pin(pin, NULL);
if (nmk_gpio)
return &nmk_gpio->chip;
@@ -1232,7 +565,7 @@ static struct gpio_chip *find_gc_from_pin(unsigned pin)
}
static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
- unsigned offset)
+ unsigned int offset)
{
struct gpio_chip *chip = find_gc_from_pin(offset);
@@ -1243,9 +576,9 @@ static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset);
}
-static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
- unsigned *num_maps, const char *group,
- const char *function)
+static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned int *reserved_maps,
+ unsigned int *num_maps, const char *group,
+ const char *function)
{
if (*num_maps == *reserved_maps)
return -ENOSPC;
@@ -1259,9 +592,9 @@ static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
}
static int nmk_dt_add_map_configs(struct pinctrl_map **map,
- unsigned *reserved_maps,
- unsigned *num_maps, const char *group,
- unsigned long *configs, unsigned num_configs)
+ unsigned int *reserved_maps,
+ unsigned int *num_maps, const char *group,
+ unsigned long *configs, unsigned int num_configs)
{
unsigned long *dup_configs;
@@ -1352,9 +685,9 @@ static const struct nmk_cfg_param nmk_cfg_params[] = {
static int nmk_dt_pin_config(int index, int val, unsigned long *config)
{
- if (nmk_cfg_params[index].choice == NULL)
+ if (!nmk_cfg_params[index].choice) {
*config = nmk_cfg_params[index].config;
- else {
+ } else {
/* test if out of range */
if (val < nmk_cfg_params[index].size) {
*config = nmk_cfg_params[index].config |
@@ -1377,15 +710,14 @@ static const char *nmk_find_pin_name(struct pinctrl_dev *pctldev, const char *pi
}
static bool nmk_pinctrl_dt_get_config(struct device_node *np,
- unsigned long *configs)
+ unsigned long *configs)
{
bool has_config = 0;
unsigned long cfg = 0;
int i, val, ret;
for (i = 0; i < ARRAY_SIZE(nmk_cfg_params); i++) {
- ret = of_property_read_u32(np,
- nmk_cfg_params[i].property, &val);
+ ret = of_property_read_u32(np, nmk_cfg_params[i].property, &val);
if (ret != -EINVAL) {
if (nmk_dt_pin_config(i, val, &cfg) == 0) {
*configs |= cfg;
@@ -1398,10 +730,10 @@ static bool nmk_pinctrl_dt_get_config(struct device_node *np,
}
static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np,
- struct pinctrl_map **map,
- unsigned *reserved_maps,
- unsigned *num_maps)
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *reserved_maps,
+ unsigned int *num_maps)
{
int ret;
const char *function = NULL;
@@ -1426,7 +758,7 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
of_property_for_each_string(np, "groups", prop, group) {
ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps,
- group, function);
+ group, function);
if (ret < 0)
goto exit;
}
@@ -1467,10 +799,11 @@ exit:
}
static int nmk_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps)
+ struct device_node *np_config,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
{
- unsigned reserved_maps;
+ unsigned int reserved_maps;
struct device_node *np;
int ret;
@@ -1480,7 +813,7 @@ static int nmk_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
for_each_child_of_node(np_config, np) {
ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map,
- &reserved_maps, num_maps);
+ &reserved_maps, num_maps);
if (ret < 0) {
pinctrl_utils_free_map(pctldev, *map, *num_maps);
of_node_put(np);
@@ -1508,7 +841,7 @@ static int nmk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
}
static const char *nmk_pmx_get_func_name(struct pinctrl_dev *pctldev,
- unsigned function)
+ unsigned int function)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
@@ -1516,7 +849,7 @@ static const char *nmk_pmx_get_func_name(struct pinctrl_dev *pctldev,
}
static int nmk_pmx_get_func_groups(struct pinctrl_dev *pctldev,
- unsigned function,
+ unsigned int function,
const char * const **groups,
unsigned * const num_groups)
{
@@ -1528,12 +861,12 @@ static int nmk_pmx_get_func_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
- unsigned group)
+static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned int function,
+ unsigned int group)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
const struct nmk_pingroup *g;
- static unsigned int slpm[NUM_BANKS];
+ static unsigned int slpm[NMK_MAX_BANKS];
unsigned long flags = 0;
bool glitch;
int ret = -EINVAL;
@@ -1544,7 +877,7 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
if (g->altsetting < 0)
return -EINVAL;
- dev_dbg(npct->dev, "enable group %s, %u pins\n", g->grp.name, g->grp.npins);
+ dev_dbg(npct->dev, "enable group %s, %zu pins\n", g->grp.name, g->grp.npins);
/*
* If we're setting altfunc C by setting both AFSLA and AFSLB to 1,
@@ -1579,26 +912,38 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
* Then mask the pins that need to be sleeping now when we're
* switching to the ALT C function.
*/
- for (i = 0; i < g->grp.npins; i++)
- slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(g->grp.pins[i]);
+ for (i = 0; i < g->grp.npins; i++) {
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned int bit;
+
+ nmk_chip = find_nmk_gpio_from_pin(g->grp.pins[i], &bit);
+ if (!nmk_chip) {
+ dev_err(npct->dev,
+ "invalid pin offset %d in group %s at index %d\n",
+ g->grp.pins[i], g->grp.name, i);
+ goto out_pre_slpm_init;
+ }
+
+ slpm[nmk_chip->bank] &= ~BIT(bit);
+ }
nmk_gpio_glitch_slpm_init(slpm);
}
for (i = 0; i < g->grp.npins; i++) {
struct nmk_gpio_chip *nmk_chip;
- unsigned bit;
+ unsigned int bit;
- nmk_chip = find_nmk_gpio_from_pin(g->grp.pins[i]);
+ nmk_chip = find_nmk_gpio_from_pin(g->grp.pins[i], &bit);
if (!nmk_chip) {
dev_err(npct->dev,
"invalid pin offset %d in group %s at index %d\n",
g->grp.pins[i], g->grp.name, i);
goto out_glitch;
}
- dev_dbg(npct->dev, "setting pin %d to altsetting %d\n", g->grp.pins[i], g->altsetting);
+ dev_dbg(npct->dev, "setting pin %d to altsetting %d\n",
+ g->grp.pins[i], g->altsetting);
clk_enable(nmk_chip->clk);
- bit = g->grp.pins[i] % NMK_GPIO_PER_CHIP;
/*
* If the pin is switching to altfunc, and there was an
* interrupt installed on it which has been lazy disabled,
@@ -1609,7 +954,7 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
nmk_gpio_disable_lazy_irq(nmk_chip, bit);
__nmk_gpio_set_mode_safe(nmk_chip, bit,
- (g->altsetting & NMK_GPIO_ALT_C), glitch);
+ (g->altsetting & NMK_GPIO_ALT_C), glitch);
clk_disable(nmk_chip->clk);
/*
@@ -1622,29 +967,30 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
*/
if ((g->altsetting & NMK_GPIO_ALT_C) == NMK_GPIO_ALT_C)
nmk_prcm_altcx_set_mode(npct, g->grp.pins[i],
- g->altsetting >> NMK_GPIO_ALT_CX_SHIFT);
+ g->altsetting >> NMK_GPIO_ALT_CX_SHIFT);
}
/* When all pins are successfully reconfigured we get here */
ret = 0;
out_glitch:
- if (glitch) {
+ if (glitch)
nmk_gpio_glitch_slpm_restore(slpm);
+out_pre_slpm_init:
+ if (glitch)
spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
- }
return ret;
}
static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset)
+ unsigned int pin)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
- unsigned bit;
+ unsigned int bit;
if (!range) {
dev_err(npct->dev, "invalid range\n");
@@ -1657,10 +1003,11 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
chip = range->gc;
nmk_chip = gpiochip_get_data(chip);
- dev_dbg(npct->dev, "enable pin %u as GPIO\n", offset);
+ dev_dbg(npct->dev, "enable pin %u as GPIO\n", pin);
+
+ find_nmk_gpio_from_pin(pin, &bit);
clk_enable(nmk_chip->clk);
- bit = offset % NMK_GPIO_PER_CHIP;
/* There is no glitch when converting any pin to GPIO */
__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
clk_disable(nmk_chip->clk);
@@ -1670,11 +1017,11 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
static void nmk_gpio_disable_free(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset)
+ unsigned int pin)
{
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
- dev_dbg(npct->dev, "disable pin %u as GPIO\n", offset);
+ dev_dbg(npct->dev, "disable pin %u as GPIO\n", pin);
/* Set the pin to some default state, GPIO is usually default */
}
@@ -1688,34 +1035,34 @@ static const struct pinmux_ops nmk_pinmux_ops = {
.strict = true,
};
-static int nmk_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin,
+static int nmk_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *config)
{
/* Not implemented */
return -EINVAL;
}
-static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
- unsigned long *configs, unsigned num_configs)
+static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
{
- static const char *pullnames[] = {
+ static const char * const pullnames[] = {
[NMK_GPIO_PULL_NONE] = "none",
[NMK_GPIO_PULL_UP] = "up",
[NMK_GPIO_PULL_DOWN] = "down",
[3] /* illegal */ = "??"
};
- static const char *slpmnames[] = {
+ static const char * const slpmnames[] = {
[NMK_GPIO_SLPM_INPUT] = "input/wakeup",
[NMK_GPIO_SLPM_NOCHANGE] = "no-change/no-wakeup",
};
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
struct nmk_gpio_chip *nmk_chip;
- unsigned bit;
- pin_cfg_t cfg;
+ unsigned int bit;
+ unsigned long cfg;
int pull, slpm, output, val, i;
bool lowemi, gpiomode, sleep;
- nmk_chip = find_nmk_gpio_from_pin(pin);
+ nmk_chip = find_nmk_gpio_from_pin(pin, &bit);
if (!nmk_chip) {
dev_err(npct->dev,
"invalid pin offset %d\n", pin);
@@ -1728,7 +1075,7 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
* here we just ignore that part. It's being handled by the
* framework and pinmux callback respectively.
*/
- cfg = (pin_cfg_t) configs[i];
+ cfg = configs[i];
pull = PIN_PULL(cfg);
slpm = PIN_SLPM(cfg);
output = PIN_DIR(cfg);
@@ -1773,13 +1120,12 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
lowemi ? "on" : "off");
clk_enable(nmk_chip->clk);
- bit = pin % NMK_GPIO_PER_CHIP;
if (gpiomode)
/* No glitch when going to GPIO mode */
__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
- if (output)
+ if (output) {
__nmk_gpio_make_output(nmk_chip, bit, val);
- else {
+ } else {
__nmk_gpio_make_input(nmk_chip, bit);
__nmk_gpio_set_pull(nmk_chip, bit, pull);
}
@@ -1844,17 +1190,17 @@ static int nmk_pinctrl_resume(struct device *dev)
static int nmk_pinctrl_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct device_node *prcm_np;
+ struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev);
+ struct fwnode_handle *prcm_fwnode;
struct nmk_pinctrl *npct;
- unsigned int version = 0;
+ uintptr_t version = 0;
int i;
npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL);
if (!npct)
return -ENOMEM;
- version = (unsigned int)device_get_match_data(&pdev->dev);
+ version = (uintptr_t)device_get_match_data(&pdev->dev);
/* Poke in other ASIC variants here */
if (version == PINCTRL_NMK_STN8815)
@@ -1870,33 +1216,33 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
* or after this point: it shouldn't matter as the APIs are orthogonal.
*/
for (i = 0; i < NMK_MAX_BANKS; i++) {
- struct device_node *gpio_np;
+ struct fwnode_handle *gpio_fwnode;
struct nmk_gpio_chip *nmk_chip;
- gpio_np = of_parse_phandle(np, "nomadik-gpio-chips", i);
- if (gpio_np) {
- dev_info(&pdev->dev,
- "populate NMK GPIO %d \"%pOFn\"\n",
- i, gpio_np);
- nmk_chip = nmk_gpio_populate_chip(gpio_np, pdev);
- if (IS_ERR(nmk_chip))
- dev_err(&pdev->dev,
- "could not populate nmk chip struct "
- "- continue anyway\n");
- of_node_put(gpio_np);
- }
+ gpio_fwnode = fwnode_find_reference(fwnode, "nomadik-gpio-chips", i);
+ if (IS_ERR(gpio_fwnode))
+ continue;
+
+ dev_info(&pdev->dev, "populate NMK GPIO %d \"%pfwP\"\n", i, gpio_fwnode);
+ nmk_chip = nmk_gpio_populate_chip(gpio_fwnode, pdev);
+ if (IS_ERR(nmk_chip))
+ dev_err(&pdev->dev,
+ "could not populate nmk chip struct - continue anyway\n");
+ else
+ /* We are NOT compatible with mobileye,eyeq5-gpio. */
+ BUG_ON(nmk_chip->is_mobileye_soc);
+ fwnode_handle_put(gpio_fwnode);
}
- prcm_np = of_parse_phandle(np, "prcm", 0);
- if (prcm_np) {
- npct->prcm_base = of_iomap(prcm_np, 0);
- of_node_put(prcm_np);
+ prcm_fwnode = fwnode_find_reference(fwnode, "prcm", 0);
+ if (!IS_ERR(prcm_fwnode)) {
+ npct->prcm_base = fwnode_iomap(prcm_fwnode, 0);
+ fwnode_handle_put(prcm_fwnode);
}
if (!npct->prcm_base) {
if (version == PINCTRL_NMK_STN8815) {
dev_info(&pdev->dev,
- "No PRCM base, "
- "assuming no ALT-Cx control is available\n");
+ "No PRCM base, assuming no ALT-Cx control is available\n");
} else {
dev_err(&pdev->dev, "missing PRCM base address\n");
return -EINVAL;
@@ -1919,19 +1265,6 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id nmk_gpio_match[] = {
- { .compatible = "st,nomadik-gpio", },
- {}
-};
-
-static struct platform_driver nmk_gpio_driver = {
- .driver = {
- .name = "gpio",
- .of_match_table = nmk_gpio_match,
- },
- .probe = nmk_gpio_probe,
-};
-
static SIMPLE_DEV_PM_OPS(nmk_pinctrl_pm_ops,
nmk_pinctrl_suspend,
nmk_pinctrl_resume);
@@ -1945,12 +1278,6 @@ static struct platform_driver nmk_pinctrl_driver = {
.probe = nmk_pinctrl_probe,
};
-static int __init nmk_gpio_init(void)
-{
- return platform_driver_register(&nmk_gpio_driver);
-}
-subsys_initcall(nmk_gpio_init);
-
static int __init nmk_pinctrl_init(void)
{
return platform_driver_register(&nmk_pinctrl_driver);
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.h b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
deleted file mode 100644
index 1ef2559bc571..000000000000
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef PINCTRL_PINCTRL_NOMADIK_H
-#define PINCTRL_PINCTRL_NOMADIK_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include <linux/pinctrl/pinctrl.h>
-
-/* Package definitions */
-#define PINCTRL_NMK_STN8815 0
-#define PINCTRL_NMK_DB8500 1
-
-/* Alternate functions: function C is set in hw by setting both A and B */
-#define NMK_GPIO_ALT_GPIO 0
-#define NMK_GPIO_ALT_A 1
-#define NMK_GPIO_ALT_B 2
-#define NMK_GPIO_ALT_C (NMK_GPIO_ALT_A | NMK_GPIO_ALT_B)
-
-#define NMK_GPIO_ALT_CX_SHIFT 2
-#define NMK_GPIO_ALT_C1 ((1<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
-#define NMK_GPIO_ALT_C2 ((2<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
-#define NMK_GPIO_ALT_C3 ((3<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
-#define NMK_GPIO_ALT_C4 ((4<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
-
-#define PRCM_GPIOCR_ALTCX(pin_num,\
- altc1_used, altc1_ri, altc1_cb,\
- altc2_used, altc2_ri, altc2_cb,\
- altc3_used, altc3_ri, altc3_cb,\
- altc4_used, altc4_ri, altc4_cb)\
-{\
- .pin = pin_num,\
- .altcx[PRCM_IDX_GPIOCR_ALTC1] = {\
- .used = altc1_used,\
- .reg_index = altc1_ri,\
- .control_bit = altc1_cb\
- },\
- .altcx[PRCM_IDX_GPIOCR_ALTC2] = {\
- .used = altc2_used,\
- .reg_index = altc2_ri,\
- .control_bit = altc2_cb\
- },\
- .altcx[PRCM_IDX_GPIOCR_ALTC3] = {\
- .used = altc3_used,\
- .reg_index = altc3_ri,\
- .control_bit = altc3_cb\
- },\
- .altcx[PRCM_IDX_GPIOCR_ALTC4] = {\
- .used = altc4_used,\
- .reg_index = altc4_ri,\
- .control_bit = altc4_cb\
- },\
-}
-
-/**
- * enum prcm_gpiocr_reg_index
- * Used to reference an PRCM GPIOCR register address.
- */
-enum prcm_gpiocr_reg_index {
- PRCM_IDX_GPIOCR1,
- PRCM_IDX_GPIOCR2,
- PRCM_IDX_GPIOCR3
-};
-/**
- * enum prcm_gpiocr_altcx_index
- * Used to reference an Other alternate-C function.
- */
-enum prcm_gpiocr_altcx_index {
- PRCM_IDX_GPIOCR_ALTC1,
- PRCM_IDX_GPIOCR_ALTC2,
- PRCM_IDX_GPIOCR_ALTC3,
- PRCM_IDX_GPIOCR_ALTC4,
- PRCM_IDX_GPIOCR_ALTC_MAX,
-};
-
-/**
- * struct prcm_gpio_altcx - Other alternate-C function
- * @used: other alternate-C function availability
- * @reg_index: PRCM GPIOCR register index used to control the function
- * @control_bit: PRCM GPIOCR bit used to control the function
- */
-struct prcm_gpiocr_altcx {
- bool used:1;
- u8 reg_index:2;
- u8 control_bit:5;
-} __packed;
-
-/**
- * struct prcm_gpio_altcx_pin_desc - Other alternate-C pin
- * @pin: The pin number
- * @altcx: array of other alternate-C[1-4] functions
- */
-struct prcm_gpiocr_altcx_pin_desc {
- unsigned short pin;
- struct prcm_gpiocr_altcx altcx[PRCM_IDX_GPIOCR_ALTC_MAX];
-};
-
-/**
- * struct nmk_function - Nomadik pinctrl mux function
- * @name: The name of the function, exported to pinctrl core.
- * @groups: An array of pin groups that may select this function.
- * @ngroups: The number of entries in @groups.
- */
-struct nmk_function {
- const char *name;
- const char * const *groups;
- unsigned ngroups;
-};
-
-/**
- * struct nmk_pingroup - describes a Nomadik pin group
- * @grp: Generic data of the pin group (name and pins)
- * @altsetting: the altsetting to apply to all pins in this group to
- * configure them to be used by a function
- */
-struct nmk_pingroup {
- struct pingroup grp;
- int altsetting;
-};
-
-#define NMK_PIN_GROUP(a, b) \
- { \
- .grp = PINCTRL_PINGROUP(#a, a##_pins, ARRAY_SIZE(a##_pins)), \
- .altsetting = b, \
- }
-
-/**
- * struct nmk_pinctrl_soc_data - Nomadik pin controller per-SoC configuration
- * @pins: An array describing all pins the pin controller affects.
- * All pins which are also GPIOs must be listed first within the
- * array, and be numbered identically to the GPIO controller's
- * numbering.
- * @npins: The number of entries in @pins.
- * @functions: The functions supported on this SoC.
- * @nfunction: The number of entries in @functions.
- * @groups: An array describing all pin groups the pin SoC supports.
- * @ngroups: The number of entries in @groups.
- * @altcx_pins: The pins that support Other alternate-C function on this SoC
- * @npins_altcx: The number of Other alternate-C pins
- * @prcm_gpiocr_registers: The array of PRCM GPIOCR registers on this SoC
- */
-struct nmk_pinctrl_soc_data {
- const struct pinctrl_pin_desc *pins;
- unsigned npins;
- const struct nmk_function *functions;
- unsigned nfunctions;
- const struct nmk_pingroup *groups;
- unsigned ngroups;
- const struct prcm_gpiocr_altcx_pin_desc *altcx_pins;
- unsigned npins_altcx;
- const u16 *prcm_gpiocr_registers;
-};
-
-#ifdef CONFIG_PINCTRL_STN8815
-
-void nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc);
-
-#else
-
-static inline void
-nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc)
-{
-}
-
-#endif
-
-#ifdef CONFIG_PINCTRL_DB8500
-
-void nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc);
-
-#else
-
-static inline void
-nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc)
-{
-}
-
-#endif
-
-#endif /* PINCTRL_PINCTRL_NOMADIK_H */
diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
index 4589900244c7..cdad4ef11a2f 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
@@ -474,7 +474,7 @@ enum {
#undef WPCM450_GRP
};
-static struct pingroup wpcm450_groups[] = {
+static const struct pingroup wpcm450_groups[] = {
#define WPCM450_GRP(x) PINCTRL_PINGROUP(#x, x ## _pins, ARRAY_SIZE(x ## _pins))
WPCM450_GRPS
#undef WPCM450_GRP
diff --git a/drivers/pinctrl/pinctrl-aw9523.c b/drivers/pinctrl/pinctrl-aw9523.c
new file mode 100644
index 000000000000..4edd371c469f
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-aw9523.c
@@ -0,0 +1,1119 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Awinic AW9523B i2c pin controller driver
+ * Copyright (c) 2020, AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@somainline.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define AW9523_MAX_FUNCS 2
+#define AW9523_NUM_PORTS 2
+#define AW9523_PINS_PER_PORT 8
+
+/*
+ * HW needs at least 20uS for reset and at least 1-2uS to recover from
+ * reset, but we have to account for eventual board quirks, if any:
+ * for this reason, keep reset asserted for 50uS and wait for 20uS
+ * to recover from the reset.
+ */
+#define AW9523_HW_RESET_US 50
+#define AW9523_HW_RESET_RECOVERY_US 20
+
+/* Port 0: P0_0...P0_7 - Port 1: P1_0...P1_7 */
+#define AW9523_PIN_TO_PORT(pin) (pin >> 3)
+#define AW9523_REG_IN_STATE(pin) (0x00 + AW9523_PIN_TO_PORT(pin))
+#define AW9523_REG_OUT_STATE(pin) (0x02 + AW9523_PIN_TO_PORT(pin))
+#define AW9523_REG_CONF_STATE(pin) (0x04 + AW9523_PIN_TO_PORT(pin))
+#define AW9523_REG_INTR_DIS(pin) (0x06 + AW9523_PIN_TO_PORT(pin))
+#define AW9523_REG_CHIPID 0x10
+#define AW9523_VAL_EXPECTED_CHIPID 0x23
+
+#define AW9523_REG_GCR 0x11
+#define AW9523_GCR_ISEL_MASK GENMASK(0, 1)
+#define AW9523_GCR_GPOMD_MASK BIT(4)
+
+#define AW9523_REG_PORT_MODE(pin) (0x12 + AW9523_PIN_TO_PORT(pin))
+#define AW9523_REG_SOFT_RESET 0x7f
+#define AW9523_VAL_RESET 0x00
+
+/*
+ * struct aw9523_irq - Interrupt controller structure
+ * @lock: mutex locking for the irq bus
+ * @irqchip: structure holding irqchip params
+ * @cached_gpio: stores the previous gpio status for bit comparison
+ */
+struct aw9523_irq {
+ struct mutex lock;
+ struct irq_chip *irqchip;
+ u16 cached_gpio;
+};
+
+/*
+ * struct aw9523_pinmux - Pin mux params
+ * @name: Name of the mux
+ * @grps: Groups of the mux
+ * @num_grps: Number of groups (sizeof array grps)
+ */
+struct aw9523_pinmux {
+ const char *name;
+ const char * const *grps;
+ const u8 num_grps;
+};
+
+/*
+ * struct aw9523 - Main driver structure
+ * @dev: device handle
+ * @regmap: regmap handle for current device
+ * @i2c_lock: Mutex lock for i2c operations
+ * @reset_gpio: Hardware reset (RSTN) signal GPIO
+ * @vio_vreg: VCC regulator (Optional)
+ * @pctl: pinctrl handle for current device
+ * @gpio: structure holding gpiochip params
+ * @irq: Interrupt controller structure
+ */
+struct aw9523 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex i2c_lock;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vio_vreg;
+ struct pinctrl_dev *pctl;
+ struct gpio_chip gpio;
+ struct aw9523_irq *irq;
+};
+
+static const struct pinctrl_pin_desc aw9523_pins[] = {
+ /* Port 0 */
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+
+ /* Port 1 */
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+};
+
+static int aw9523_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(aw9523_pins);
+}
+
+static const char *aw9523_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return aw9523_pins[selector].name;
+}
+
+static int aw9523_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ *pins = &aw9523_pins[selector].number;
+ *num_pins = 1;
+ return 0;
+}
+
+static const struct pinctrl_ops aw9523_pinctrl_ops = {
+ .get_groups_count = aw9523_pinctrl_get_groups_count,
+ .get_group_pins = aw9523_pinctrl_get_group_pins,
+ .get_group_name = aw9523_pinctrl_get_group_name,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static const char * const gpio_pwm_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5",
+ "gpio6", "gpio7", "gpio8", "gpio9", "gpio10", "gpio11",
+ "gpio12", "gpio13", "gpio14", "gpio15"
+};
+
+/* Warning: Do NOT reorder this array */
+static const struct aw9523_pinmux aw9523_pmx[] = {
+ {
+ .name = "pwm",
+ .grps = gpio_pwm_groups,
+ .num_grps = ARRAY_SIZE(gpio_pwm_groups),
+ },
+ {
+ .name = "gpio",
+ .grps = gpio_pwm_groups,
+ .num_grps = ARRAY_SIZE(gpio_pwm_groups),
+ },
+};
+
+static int aw9523_pmx_get_funcs_count(struct pinctrl_dev *pctl)
+{
+ return ARRAY_SIZE(aw9523_pmx);
+}
+
+static const char *aw9523_pmx_get_fname(struct pinctrl_dev *pctl,
+ unsigned int sel)
+{
+ return aw9523_pmx[sel].name;
+}
+
+static int aw9523_pmx_get_groups(struct pinctrl_dev *pctl, unsigned int sel,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ *groups = aw9523_pmx[sel].grps;
+ *num_groups = aw9523_pmx[sel].num_grps;
+ return 0;
+}
+
+static int aw9523_pmx_set_mux(struct pinctrl_dev *pctl, unsigned int fsel,
+ unsigned int grp)
+{
+ struct aw9523 *awi = pinctrl_dev_get_drvdata(pctl);
+ int ret, pin = aw9523_pins[grp].number % AW9523_PINS_PER_PORT;
+
+ if (fsel >= ARRAY_SIZE(aw9523_pmx))
+ return -EINVAL;
+
+ /*
+ * This maps directly to the aw9523_pmx array: programming a
+ * high bit means "gpio" and a low bit means "pwm".
+ */
+ mutex_lock(&awi->i2c_lock);
+ ret = regmap_update_bits(awi->regmap, AW9523_REG_PORT_MODE(pin),
+ BIT(pin), (fsel ? BIT(pin) : 0));
+ mutex_unlock(&awi->i2c_lock);
+ return ret;
+}
+
+static const struct pinmux_ops aw9523_pinmux_ops = {
+ .get_functions_count = aw9523_pmx_get_funcs_count,
+ .get_function_name = aw9523_pmx_get_fname,
+ .get_function_groups = aw9523_pmx_get_groups,
+ .set_mux = aw9523_pmx_set_mux,
+};
+
+static int aw9523_pcfg_param_to_reg(enum pin_config_param pcp, int pin, u8 *r)
+{
+ u8 reg;
+
+ switch (pcp) {
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ reg = AW9523_REG_IN_STATE(pin);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ reg = AW9523_REG_GCR;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ reg = AW9523_REG_CONF_STATE(pin);
+ break;
+ case PIN_CONFIG_OUTPUT:
+ reg = AW9523_REG_OUT_STATE(pin);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ *r = reg;
+
+ return 0;
+}
+
+static int aw9523_pconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct aw9523 *awi = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ int regbit = pin % AW9523_PINS_PER_PORT;
+ unsigned int val;
+ u8 reg;
+ int rc;
+
+ rc = aw9523_pcfg_param_to_reg(param, pin, &reg);
+ if (rc)
+ return rc;
+
+ mutex_lock(&awi->i2c_lock);
+ rc = regmap_read(awi->regmap, reg, &val);
+ mutex_unlock(&awi->i2c_lock);
+ if (rc)
+ return rc;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_INPUT_ENABLE:
+ case PIN_CONFIG_OUTPUT:
+ val &= BIT(regbit);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ val &= BIT(regbit);
+ val = !val;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if (pin >= AW9523_PINS_PER_PORT)
+ val = 0;
+ else
+ val = !FIELD_GET(AW9523_GCR_GPOMD_MASK, val);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ if (pin >= AW9523_PINS_PER_PORT)
+ val = 1;
+ else
+ val = FIELD_GET(AW9523_GCR_GPOMD_MASK, val);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (val < 1)
+ return -EINVAL;
+
+ *config = pinconf_to_config_packed(param, !!val);
+
+ return rc;
+}
+
+static int aw9523_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct aw9523 *awi = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param;
+ int regbit = pin % AW9523_PINS_PER_PORT;
+ u32 arg;
+ u8 reg;
+ unsigned int mask, val;
+ int i, rc;
+
+ mutex_lock(&awi->i2c_lock);
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ rc = aw9523_pcfg_param_to_reg(param, pin, &reg);
+ if (rc)
+ goto end;
+
+ switch (param) {
+ case PIN_CONFIG_OUTPUT:
+ /* First, enable pin output */
+ rc = regmap_update_bits(awi->regmap,
+ AW9523_REG_CONF_STATE(pin),
+ BIT(regbit), 0);
+ if (rc)
+ goto end;
+
+ /* Then, fall through to config output level */
+ fallthrough;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ arg = !arg;
+ fallthrough;
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_INPUT_ENABLE:
+ mask = BIT(regbit);
+ val = arg ? BIT(regbit) : 0;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ /* Open-Drain is supported only on port 0 */
+ if (pin >= AW9523_PINS_PER_PORT) {
+ rc = -EOPNOTSUPP;
+ goto end;
+ }
+ mask = AW9523_GCR_GPOMD_MASK;
+ val = 0;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ /* Port 1 is always Push-Pull */
+ if (pin >= AW9523_PINS_PER_PORT) {
+ mask = 0;
+ val = 0;
+ continue;
+ }
+ mask = AW9523_GCR_GPOMD_MASK;
+ val = AW9523_GCR_GPOMD_MASK;
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ goto end;
+ }
+
+ rc = regmap_update_bits(awi->regmap, reg, mask, val);
+ if (rc)
+ goto end;
+ }
+end:
+ mutex_unlock(&awi->i2c_lock);
+ return rc;
+}
+
+static const struct pinconf_ops aw9523_pinconf_ops = {
+ .pin_config_get = aw9523_pconf_get,
+ .pin_config_set = aw9523_pconf_set,
+ .is_generic = true,
+};
+
+/*
+ * aw9523_get_pin_direction - Get pin direction
+ * @regmap: Regmap structure
+ * @pin: gpiolib pin number
+ * @n: pin index in port register
+ *
+ * Return: Pin direction for success or negative number for error
+ */
+static int aw9523_get_pin_direction(struct regmap *regmap, u8 pin, u8 n)
+{
+ int ret;
+
+ ret = regmap_test_bits(regmap, AW9523_REG_CONF_STATE(pin), BIT(n));
+ if (ret < 0)
+ return ret;
+
+ return ret ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
+}
+
+/*
+ * aw9523_get_port_state - Get input or output state for entire port
+ * @regmap: Regmap structure
+ * @pin: gpiolib pin number
+ * @regbit: hw pin index, used to retrieve port number
+ * @state: returned port state
+ *
+ * Return: Zero for success or negative number for error
+ */
+static int aw9523_get_port_state(struct regmap *regmap, u8 pin,
+ u8 regbit, unsigned int *state)
+{
+ u8 reg;
+ int dir;
+
+ dir = aw9523_get_pin_direction(regmap, pin, regbit);
+ if (dir < 0)
+ return dir;
+
+ if (dir == GPIO_LINE_DIRECTION_IN)
+ reg = AW9523_REG_IN_STATE(pin);
+ else
+ reg = AW9523_REG_OUT_STATE(pin);
+
+ return regmap_read(regmap, reg, state);
+}
+
+static int aw9523_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ case IRQ_TYPE_EDGE_BOTH:
+ return 0;
+ default:
+ return -EINVAL;
+ };
+}
+
+/*
+ * aw9523_irq_mask - Mask interrupt
+ * @d: irq data
+ *
+ * Sets which interrupt to mask in the bitmap;
+ * The interrupt will be masked when unlocking the irq bus.
+ */
+static void aw9523_irq_mask(struct irq_data *d)
+{
+ struct aw9523 *awi = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int n = d->hwirq % AW9523_PINS_PER_PORT;
+
+ regmap_update_bits(awi->regmap,
+ AW9523_REG_INTR_DIS(d->hwirq),
+ BIT(n), BIT(n));
+ gpiochip_disable_irq(&awi->gpio, irqd_to_hwirq(d));
+}
+
+/*
+ * aw9523_irq_unmask - Unmask interrupt
+ * @d: irq data
+ *
+ * Sets which interrupt to unmask in the bitmap;
+ * The interrupt will be masked when unlocking the irq bus.
+ */
+static void aw9523_irq_unmask(struct irq_data *d)
+{
+ struct aw9523 *awi = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int n = d->hwirq % AW9523_PINS_PER_PORT;
+
+ gpiochip_enable_irq(&awi->gpio, irqd_to_hwirq(d));
+ regmap_update_bits(awi->regmap,
+ AW9523_REG_INTR_DIS(d->hwirq),
+ BIT(n), 0);
+}
+
+static irqreturn_t aw9523_irq_thread_func(int irq, void *dev_id)
+{
+ struct aw9523 *awi = (struct aw9523 *)dev_id;
+ unsigned long n, val = 0;
+ unsigned long changed_gpio;
+ unsigned int tmp, port_pin, i, ret;
+
+ for (i = 0; i < AW9523_NUM_PORTS; i++) {
+ port_pin = i * AW9523_PINS_PER_PORT;
+ ret = regmap_read(awi->regmap,
+ AW9523_REG_IN_STATE(port_pin),
+ &tmp);
+ if (ret)
+ return ret;
+ val |= (u8)tmp << (i * 8);
+ }
+
+ /* Handle GPIO input release interrupt as well */
+ changed_gpio = awi->irq->cached_gpio ^ val;
+ awi->irq->cached_gpio = val;
+
+ /*
+ * To avoid up to four *slow* i2c reads from any driver hooked
+ * up to our interrupts, just check for the irq_find_mapping
+ * result: if the interrupt is not mapped, then we don't want
+ * to care about it.
+ */
+ for_each_set_bit(n, &changed_gpio, awi->gpio.ngpio) {
+ tmp = irq_find_mapping(awi->gpio.irq.domain, n);
+ if (tmp <= 0)
+ continue;
+ handle_nested_irq(tmp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * aw9523_irq_bus_lock - Grab lock for interrupt operation
+ * @d: irq data
+ */
+static void aw9523_irq_bus_lock(struct irq_data *d)
+{
+ struct aw9523 *awi = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+
+ mutex_lock(&awi->irq->lock);
+ regcache_cache_only(awi->regmap, true);
+}
+
+/*
+ * aw9523_irq_bus_sync_unlock - Synchronize state and unlock
+ * @d: irq data
+ *
+ * Writes the interrupt mask bits (found in the bit map) to the
+ * hardware, then unlocks the bus.
+ */
+static void aw9523_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct aw9523 *awi = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+
+ regcache_cache_only(awi->regmap, false);
+ regcache_sync(awi->regmap);
+ mutex_unlock(&awi->irq->lock);
+}
+
+static int aw9523_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 regbit = offset % AW9523_PINS_PER_PORT;
+ int ret;
+
+ mutex_lock(&awi->i2c_lock);
+ ret = aw9523_get_pin_direction(awi->regmap, offset, regbit);
+ mutex_unlock(&awi->i2c_lock);
+
+ return ret;
+}
+
+static int aw9523_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 regbit = offset % AW9523_PINS_PER_PORT;
+ unsigned int val;
+ int ret;
+
+ mutex_lock(&awi->i2c_lock);
+ ret = aw9523_get_port_state(awi->regmap, offset, regbit, &val);
+ mutex_unlock(&awi->i2c_lock);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(regbit));
+}
+
+/**
+ * _aw9523_gpio_get_multiple - Get I/O state for an entire port
+ * @regmap: Regmap structure
+ * @pin: gpiolib pin number
+ * @regbit: hw pin index, used to retrieve port number
+ * @state: returned port I/O state
+ *
+ * Return: Zero for success or negative number for error
+ */
+static int _aw9523_gpio_get_multiple(struct aw9523 *awi, u8 regbit,
+ u8 *state, u8 mask)
+{
+ u32 dir_in, val;
+ u8 m;
+ int ret;
+
+ /* Registers are 8-bits wide */
+ ret = regmap_read(awi->regmap, AW9523_REG_CONF_STATE(regbit), &dir_in);
+ if (ret)
+ return ret;
+ *state = 0;
+
+ m = mask & dir_in;
+ if (m) {
+ ret = regmap_read(awi->regmap, AW9523_REG_IN_STATE(regbit),
+ &val);
+ if (ret)
+ return ret;
+ *state |= (u8)val & m;
+ }
+
+ m = mask & ~dir_in;
+ if (m) {
+ ret = regmap_read(awi->regmap, AW9523_REG_OUT_STATE(regbit),
+ &val);
+ if (ret)
+ return ret;
+ *state |= (u8)val & m;
+ }
+
+ return 0;
+}
+
+static int aw9523_gpio_get_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 m, state = 0;
+ int ret;
+
+ mutex_lock(&awi->i2c_lock);
+
+ /* Port 0 (gpio 0-7) */
+ m = *mask & U8_MAX;
+ if (m) {
+ ret = _aw9523_gpio_get_multiple(awi, 0, &state, m);
+ if (ret)
+ goto out;
+ }
+ *bits = state;
+
+ /* Port 1 (gpio 8-15) */
+ m = (*mask >> 8) & U8_MAX;
+ if (m) {
+ ret = _aw9523_gpio_get_multiple(awi, AW9523_PINS_PER_PORT,
+ &state, m);
+ if (ret)
+ goto out;
+
+ *bits |= (state << 8);
+ }
+out:
+ mutex_unlock(&awi->i2c_lock);
+ return ret;
+}
+
+static void aw9523_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 mask_lo, mask_hi, bits_lo, bits_hi;
+ unsigned int reg;
+ int ret = 0;
+
+ mask_lo = *mask & U8_MAX;
+ mask_hi = (*mask >> 8) & U8_MAX;
+ mutex_lock(&awi->i2c_lock);
+ if (mask_hi) {
+ reg = AW9523_REG_OUT_STATE(AW9523_PINS_PER_PORT);
+ bits_hi = (*bits >> 8) & U8_MAX;
+
+ ret = regmap_write_bits(awi->regmap, reg, mask_hi, bits_hi);
+ if (ret) {
+ dev_warn(awi->dev, "Cannot write port1 out level\n");
+ goto out;
+ }
+ }
+ if (mask_lo) {
+ reg = AW9523_REG_OUT_STATE(0);
+ bits_lo = *bits & U8_MAX;
+ ret = regmap_write_bits(awi->regmap, reg, mask_lo, bits_lo);
+ if (ret)
+ dev_warn(awi->dev, "Cannot write port0 out level\n");
+ }
+out:
+ mutex_unlock(&awi->i2c_lock);
+}
+
+static void aw9523_gpio_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 regbit = offset % AW9523_PINS_PER_PORT;
+
+ mutex_lock(&awi->i2c_lock);
+ regmap_update_bits(awi->regmap, AW9523_REG_OUT_STATE(offset),
+ BIT(regbit), value ? BIT(regbit) : 0);
+ mutex_unlock(&awi->i2c_lock);
+}
+
+
+static int aw9523_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 regbit = offset % AW9523_PINS_PER_PORT;
+ int ret;
+
+ mutex_lock(&awi->i2c_lock);
+ ret = regmap_update_bits(awi->regmap, AW9523_REG_CONF_STATE(offset),
+ BIT(regbit), BIT(regbit));
+ mutex_unlock(&awi->i2c_lock);
+
+ return ret;
+}
+
+static int aw9523_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct aw9523 *awi = gpiochip_get_data(chip);
+ u8 regbit = offset % AW9523_PINS_PER_PORT;
+ int ret;
+
+ mutex_lock(&awi->i2c_lock);
+ ret = regmap_update_bits(awi->regmap, AW9523_REG_OUT_STATE(offset),
+ BIT(regbit), value ? BIT(regbit) : 0);
+ if (ret)
+ goto end;
+
+ ret = regmap_update_bits(awi->regmap, AW9523_REG_CONF_STATE(offset),
+ BIT(regbit), 0);
+end:
+ mutex_unlock(&awi->i2c_lock);
+ return ret;
+}
+
+static int aw9523_drive_reset_gpio(struct aw9523 *awi)
+{
+ unsigned int chip_id;
+ int ret;
+
+ /*
+ * If the chip is already configured for any reason, then we
+ * will probably succeed in sending the soft reset signal to
+ * the hardware through I2C: this operation takes less time
+ * compared to a full HW reset and it gives the same results.
+ */
+ ret = regmap_write(awi->regmap, AW9523_REG_SOFT_RESET, 0);
+ if (ret == 0)
+ goto done;
+
+ dev_dbg(awi->dev, "Cannot execute soft reset: trying hard reset\n");
+ ret = gpiod_direction_output(awi->reset_gpio, 0);
+ if (ret)
+ return ret;
+
+ /* The reset pulse has to be longer than 20uS due to deglitch */
+ usleep_range(AW9523_HW_RESET_US, AW9523_HW_RESET_US + 1);
+
+ ret = gpiod_direction_output(awi->reset_gpio, 1);
+ if (ret)
+ return ret;
+done:
+ /* The HW needs at least 1uS to reliably recover after reset */
+ usleep_range(AW9523_HW_RESET_RECOVERY_US,
+ AW9523_HW_RESET_RECOVERY_US + 1);
+
+ /* Check the ChipID */
+ ret = regmap_read(awi->regmap, AW9523_REG_CHIPID, &chip_id);
+ if (ret) {
+ dev_err(awi->dev, "Cannot read Chip ID: %d\n", ret);
+ return ret;
+ }
+ if (chip_id != AW9523_VAL_EXPECTED_CHIPID) {
+ dev_err(awi->dev, "Bad ChipID; read 0x%x, expected 0x%x\n",
+ chip_id, AW9523_VAL_EXPECTED_CHIPID);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aw9523_hw_reset(struct aw9523 *awi)
+{
+ int ret, max_retries = 2;
+
+ /* Sometimes the chip needs more than one reset cycle */
+ do {
+ ret = aw9523_drive_reset_gpio(awi);
+ if (ret == 0)
+ break;
+ max_retries--;
+ } while (max_retries);
+
+ return ret;
+}
+
+static int aw9523_init_gpiochip(struct aw9523 *awi, unsigned int npins)
+{
+ struct device *dev = awi->dev;
+ struct gpio_chip *gc = &awi->gpio;
+
+ gc->label = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!gc->label)
+ return -ENOMEM;
+
+ gc->base = -1;
+ gc->ngpio = npins;
+ gc->get_direction = aw9523_gpio_get_direction;
+ gc->direction_input = aw9523_direction_input;
+ gc->direction_output = aw9523_direction_output;
+ gc->get = aw9523_gpio_get;
+ gc->get_multiple = aw9523_gpio_get_multiple;
+ gc->set = aw9523_gpio_set;
+ gc->set_multiple = aw9523_gpio_set_multiple;
+ gc->set_config = gpiochip_generic_config;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->can_sleep = false;
+
+ return 0;
+}
+
+static const struct irq_chip aw9523_irq_chip = {
+ .name = "aw9523",
+ .irq_mask = aw9523_irq_mask,
+ .irq_unmask = aw9523_irq_unmask,
+ .irq_bus_lock = aw9523_irq_bus_lock,
+ .irq_bus_sync_unlock = aw9523_irq_bus_sync_unlock,
+ .irq_set_type = aw9523_gpio_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static int aw9523_init_irq(struct aw9523 *awi, int irq)
+{
+ struct device *dev = awi->dev;
+ struct gpio_irq_chip *girq;
+ struct irq_chip *irqchip;
+ int ret;
+
+ if (!device_property_read_bool(dev, "interrupt-controller"))
+ return 0;
+
+ irqchip = devm_kzalloc(dev, sizeof(*irqchip), GFP_KERNEL);
+ if (!irqchip)
+ return -ENOMEM;
+
+ awi->irq = devm_kzalloc(dev, sizeof(*awi->irq), GFP_KERNEL);
+ if (!awi->irq)
+ return -ENOMEM;
+
+ awi->irq->irqchip = irqchip;
+ mutex_init(&awi->irq->lock);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, aw9523_irq_thread_func,
+ IRQF_ONESHOT, dev_name(dev), awi);
+ if (ret) {
+ dev_err(dev, "Failed to request irq %d\n", irq);
+ return ret;
+ }
+
+ girq = &awi->gpio.irq;
+ gpio_irq_chip_set_chip(girq, &aw9523_irq_chip);
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_EDGE_BOTH;
+ girq->handler = handle_simple_irq;
+ girq->threaded = true;
+
+ return 0;
+}
+
+static bool aw9523_is_reg_hole(unsigned int reg)
+{
+ return (reg > AW9523_REG_PORT_MODE(AW9523_PINS_PER_PORT) &&
+ reg < AW9523_REG_SOFT_RESET) ||
+ (reg > AW9523_REG_INTR_DIS(AW9523_PINS_PER_PORT) &&
+ reg < AW9523_REG_CHIPID);
+}
+
+static bool aw9523_readable_reg(struct device *dev, unsigned int reg)
+{
+ /* All available registers (minus holes) can be read */
+ return !aw9523_is_reg_hole(reg);
+}
+
+static bool aw9523_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return aw9523_is_reg_hole(reg) ||
+ reg == AW9523_REG_IN_STATE(0) ||
+ reg == AW9523_REG_IN_STATE(AW9523_PINS_PER_PORT) ||
+ reg == AW9523_REG_CHIPID ||
+ reg == AW9523_REG_SOFT_RESET;
+}
+
+static bool aw9523_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return !aw9523_is_reg_hole(reg) && reg != AW9523_REG_CHIPID;
+}
+
+static bool aw9523_precious_reg(struct device *dev, unsigned int reg)
+{
+ /* Reading AW9523_REG_IN_STATE clears interrupt status */
+ return aw9523_is_reg_hole(reg) ||
+ reg == AW9523_REG_IN_STATE(0) ||
+ reg == AW9523_REG_IN_STATE(AW9523_PINS_PER_PORT);
+}
+
+static const struct regmap_config aw9523_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_stride = 1,
+
+ .precious_reg = aw9523_precious_reg,
+ .readable_reg = aw9523_readable_reg,
+ .volatile_reg = aw9523_volatile_reg,
+ .writeable_reg = aw9523_writeable_reg,
+
+ .cache_type = REGCACHE_FLAT,
+ .disable_locking = true,
+
+ .num_reg_defaults_raw = AW9523_REG_SOFT_RESET,
+};
+
+static int aw9523_hw_init(struct aw9523 *awi)
+{
+ u8 p1_pin = AW9523_PINS_PER_PORT;
+ unsigned int val;
+ int ret;
+
+ /* No register caching during initialization */
+ regcache_cache_bypass(awi->regmap, true);
+
+ /* Bring up the chip */
+ ret = aw9523_hw_reset(awi);
+ if (ret) {
+ dev_err(awi->dev, "HW Reset failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * This is the expected chip and it is running: it's time to
+ * set a safe default configuration in case the user doesn't
+ * configure (all of the available) pins in this chip.
+ * P.S.: The writes order doesn't matter.
+ */
+
+ /* Set all pins as GPIO */
+ ret = regmap_write(awi->regmap, AW9523_REG_PORT_MODE(0), U8_MAX);
+ if (ret)
+ return ret;
+ ret = regmap_write(awi->regmap, AW9523_REG_PORT_MODE(p1_pin), U8_MAX);
+ if (ret)
+ return ret;
+
+ /* Set Open-Drain mode on Port 0 (Port 1 is always P-P) */
+ ret = regmap_write(awi->regmap, AW9523_REG_GCR, 0);
+ if (ret)
+ return ret;
+
+ /* Set all pins as inputs */
+ ret = regmap_write(awi->regmap, AW9523_REG_CONF_STATE(0), U8_MAX);
+ if (ret)
+ return ret;
+ ret = regmap_write(awi->regmap, AW9523_REG_CONF_STATE(p1_pin), U8_MAX);
+ if (ret)
+ return ret;
+
+ /* Disable all interrupts to avoid unreasoned wakeups */
+ ret = regmap_write(awi->regmap, AW9523_REG_INTR_DIS(0), U8_MAX);
+ if (ret)
+ return ret;
+ ret = regmap_write(awi->regmap, AW9523_REG_INTR_DIS(p1_pin), U8_MAX);
+ if (ret)
+ return ret;
+
+ /* Clear setup-generated interrupts by performing a port state read */
+ ret = aw9523_get_port_state(awi->regmap, 0, 0, &val);
+ if (ret)
+ return ret;
+ ret = aw9523_get_port_state(awi->regmap, p1_pin, 0, &val);
+ if (ret)
+ return ret;
+
+ /* Everything went fine: activate and reinitialize register cache */
+ regcache_cache_bypass(awi->regmap, false);
+ return regmap_reinit_cache(awi->regmap, &aw9523_regmap);
+}
+
+static int aw9523_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct pinctrl_desc *pdesc;
+ struct aw9523 *awi;
+ int ret;
+
+ awi = devm_kzalloc(dev, sizeof(*awi), GFP_KERNEL);
+ if (!awi)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, awi);
+
+ awi->dev = dev;
+ awi->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(awi->reset_gpio))
+ return PTR_ERR(awi->reset_gpio);
+ gpiod_set_consumer_name(awi->reset_gpio, "aw9523 reset");
+
+ awi->regmap = devm_regmap_init_i2c(client, &aw9523_regmap);
+ if (IS_ERR(awi->regmap))
+ return PTR_ERR(awi->regmap);
+
+ awi->vio_vreg = devm_regulator_get_optional(dev, "vio");
+ if (IS_ERR(awi->vio_vreg)) {
+ if (PTR_ERR(awi->vio_vreg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ awi->vio_vreg = NULL;
+ } else {
+ ret = regulator_enable(awi->vio_vreg);
+ if (ret)
+ return ret;
+ }
+
+ mutex_init(&awi->i2c_lock);
+ lockdep_set_subclass(&awi->i2c_lock,
+ i2c_adapter_depth(client->adapter));
+
+ pdesc = devm_kzalloc(dev, sizeof(*pdesc), GFP_KERNEL);
+ if (!pdesc)
+ return -ENOMEM;
+
+ ret = aw9523_hw_init(awi);
+ if (ret)
+ goto err_disable_vregs;
+
+ pdesc->name = dev_name(dev);
+ pdesc->owner = THIS_MODULE;
+ pdesc->pctlops = &aw9523_pinctrl_ops;
+ pdesc->pmxops = &aw9523_pinmux_ops;
+ pdesc->confops = &aw9523_pinconf_ops;
+ pdesc->pins = aw9523_pins;
+ pdesc->npins = ARRAY_SIZE(aw9523_pins);
+
+ ret = aw9523_init_gpiochip(awi, pdesc->npins);
+ if (ret)
+ goto err_disable_vregs;
+
+ if (client->irq) {
+ ret = aw9523_init_irq(awi, client->irq);
+ if (ret)
+ goto err_disable_vregs;
+ }
+
+ awi->pctl = devm_pinctrl_register(dev, pdesc, awi);
+ if (IS_ERR(awi->pctl)) {
+ ret = PTR_ERR(awi->pctl);
+ dev_err(dev, "Cannot register pinctrl: %d", ret);
+ goto err_disable_vregs;
+ }
+
+ ret = devm_gpiochip_add_data(dev, &awi->gpio, awi);
+ if (ret)
+ goto err_disable_vregs;
+
+ return ret;
+
+err_disable_vregs:
+ if (awi->vio_vreg)
+ regulator_disable(awi->vio_vreg);
+ mutex_destroy(&awi->i2c_lock);
+ return ret;
+}
+
+static void aw9523_remove(struct i2c_client *client)
+{
+ struct aw9523 *awi = i2c_get_clientdata(client);
+ int ret;
+
+ if (!awi)
+ return;
+
+ /*
+ * If the chip VIO is connected to a regulator that we can turn
+ * off, life is easy... otherwise, reinitialize the chip and
+ * set the pins to hardware defaults before removing the driver
+ * to leave it in a clean, safe and predictable state.
+ */
+ if (awi->vio_vreg) {
+ regulator_disable(awi->vio_vreg);
+ } else {
+ mutex_lock(&awi->i2c_lock);
+ ret = aw9523_hw_init(awi);
+ mutex_unlock(&awi->i2c_lock);
+ if (ret)
+ return;
+ }
+
+ mutex_destroy(&awi->i2c_lock);
+}
+
+static const struct i2c_device_id aw9523_i2c_id_table[] = {
+ { "aw9523_i2c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, aw9523_i2c_id_table);
+
+static const struct of_device_id of_aw9523_i2c_match[] = {
+ { .compatible = "awinic,aw9523-pinctrl", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_aw9523_i2c_match);
+
+static struct i2c_driver aw9523_driver = {
+ .driver = {
+ .name = "aw9523-pinctrl",
+ .of_match_table = of_aw9523_i2c_match,
+ },
+ .probe = aw9523_probe,
+ .remove = aw9523_remove,
+ .id_table = aw9523_i2c_id_table,
+};
+module_i2c_driver(aw9523_driver);
+
+MODULE_DESCRIPTION("Awinic AW9523 I2C GPIO Expander driver");
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-da9062.c b/drivers/pinctrl/pinctrl-da9062.c
index 3998b27cbe0e..22e3cd2cc963 100644
--- a/drivers/pinctrl/pinctrl-da9062.c
+++ b/drivers/pinctrl/pinctrl-da9062.c
@@ -281,10 +281,17 @@ static int da9062_pctl_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(&pdev->dev, &pctl->gc, pctl);
}
+static const struct of_device_id da9062_compatible_reg_id_table[] = {
+ { .compatible = "dlg,da9062-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, da9062_compatible_reg_id_table);
+
static struct platform_driver da9062_pctl_driver = {
.probe = da9062_pctl_probe,
.driver = {
.name = "da9062-gpio",
+ .of_match_table = da9062_compatible_reg_id_table,
},
};
module_platform_driver(da9062_pctl_driver);
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 4551575e4e7d..38c3a14c8b58 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -375,7 +375,8 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
static irqreturn_t mcp23s08_irq(int irq, void *data)
{
struct mcp23s08 *mcp = data;
- int intcap, intcon, intf, i, gpio, gpio_orig, intcap_mask, defval;
+ int intcap, intcon, intf, i, gpio, gpio_orig, intcap_mask, defval, gpinten;
+ unsigned long int enabled_interrupts;
unsigned int child_irq;
bool intf_set, intcap_changed, gpio_bit_changed,
defval_changed, gpio_set;
@@ -395,6 +396,9 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
if (mcp_read(mcp, MCP_INTCON, &intcon))
goto unlock;
+ if (mcp_read(mcp, MCP_GPINTEN, &gpinten))
+ goto unlock;
+
if (mcp_read(mcp, MCP_DEFVAL, &defval))
goto unlock;
@@ -410,9 +414,12 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
"intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
intcap, intf, gpio_orig, gpio);
- for (i = 0; i < mcp->chip.ngpio; i++) {
- /* We must check all of the inputs on the chip,
- * otherwise we may not notice a change on >=2 pins.
+ enabled_interrupts = gpinten;
+ for_each_set_bit(i, &enabled_interrupts, mcp->chip.ngpio) {
+ /*
+ * We must check all of the inputs with enabled interrupts
+ * on the chip, otherwise we may not notice a change
+ * on more than one pin.
*
* On at least the mcp23s17, INTCAP is only updated
* one byte at a time(INTCAPA and INTCAPB are
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 52aadd6d72a8..be9b8c010167 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1401,7 +1401,6 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
if (info->pincfg) {
const struct ocelot_pincfg_data *opd = info->pincfg_data;
- ret = 0;
switch (reg) {
case PINCONF_BIAS:
ret = ocelot_pincfg_clrsetbits(info, pin,
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 1485573b523c..5d9abd6547d0 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -723,9 +723,8 @@ static int st_gpio_direction_output(struct gpio_chip *chip,
struct st_gpio_bank *bank = gpiochip_get_data(chip);
__st_gpio_set(bank, offset, value);
- pinctrl_gpio_direction_output(chip, offset);
- return 0;
+ return pinctrl_gpio_direction_output(chip, offset);
}
static int st_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index f2be341f73e1..5c46b7d7ebcb 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -562,7 +562,7 @@ static int zynqmp_pinctrl_prepare_func_groups(struct device *dev, u32 fid,
const char **fgroups;
int ret, index, i;
- fgroups = devm_kzalloc(dev, sizeof(*fgroups) * func->ngroups, GFP_KERNEL);
+ fgroups = devm_kcalloc(dev, func->ngroups, sizeof(*fgroups), GFP_KERNEL);
if (!fgroups)
return -ENOMEM;
@@ -754,7 +754,7 @@ static int zynqmp_pinctrl_prepare_function_info(struct device *dev,
if (ret)
return ret;
- funcs = devm_kzalloc(dev, sizeof(*funcs) * pctrl->nfuncs, GFP_KERNEL);
+ funcs = devm_kcalloc(dev, pctrl->nfuncs, sizeof(*funcs), GFP_KERNEL);
if (!funcs)
return -ENOMEM;
@@ -768,7 +768,7 @@ static int zynqmp_pinctrl_prepare_function_info(struct device *dev,
pctrl->ngroups += funcs[i].ngroups;
}
- groups = devm_kzalloc(dev, sizeof(*groups) * pctrl->ngroups, GFP_KERNEL);
+ groups = devm_kcalloc(dev, pctrl->ngroups, sizeof(*groups), GFP_KERNEL);
if (!groups)
return -ENOMEM;
@@ -830,7 +830,7 @@ static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev,
if (ret)
return ret;
- pins = devm_kzalloc(dev, sizeof(*pins) * *npins, GFP_KERNEL);
+ pins = devm_kcalloc(dev, *npins, sizeof(*pins), GFP_KERNEL);
if (!pins)
return -ENOMEM;
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index abbb044d6ace..d924207d629b 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -188,8 +188,8 @@ out_free_pin:
}
out:
if (status)
- dev_err(pctldev->dev, "pin-%d (%s) status %d\n",
- pin, owner, status);
+ dev_err_probe(pctldev->dev, status, "pin-%d (%s)\n",
+ pin, owner);
return status;
}
@@ -441,7 +441,7 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting)
pname = desc ? desc->name : "non-existing";
gname = pctlops->get_group_name(pctldev,
setting->data.mux.group);
- dev_err(pctldev->dev,
+ dev_err_probe(pctldev->dev, ret,
"could not request pin %d (%s) from group %s "
" on device %s\n",
pins[i], pname, gname,
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index e0f2829c15d6..24619e80b2cc 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -125,7 +125,7 @@ config PINCTRL_SM8550_LPASS_LPI
platform.
config PINCTRL_SM8650_LPASS_LPI
- tristate "Qualcomm Technologies Inc SM8550 LPASS LPI pin controller driver"
+ tristate "Qualcomm Technologies Inc SM8650 LPASS LPI pin controller driver"
depends on ARM64 || COMPILE_TEST
depends on PINCTRL_LPASS_LPI
help
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index c8d519ca53eb..14bd55d64731 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -38,6 +38,7 @@ config PINCTRL_RENESAS
select PINCTRL_PFC_R8A779A0 if ARCH_R8A779A0
select PINCTRL_PFC_R8A779F0 if ARCH_R8A779F0
select PINCTRL_PFC_R8A779G0 if ARCH_R8A779G0
+ select PINCTRL_PFC_R8A779H0 if ARCH_R8A779H0
select PINCTRL_RZG2L if ARCH_RZG2L
select PINCTRL_RZV2M if ARCH_R9A09G011
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
@@ -154,6 +155,10 @@ config PINCTRL_PFC_R8A779G0
bool "pin control support for R-Car V4H" if COMPILE_TEST
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A779H0
+ bool "pin control support for R-Car V4M" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7740
bool "pin control support for R-Mobile A1" if COMPILE_TEST
select PINCTRL_SH_PFC_GPIO
@@ -187,9 +192,11 @@ config PINCTRL_RZG2L
bool "pin control support for RZ/{G2L,G2UL,V2L}" if COMPILE_TEST
depends on OF
select GPIOLIB
+ select GPIOLIB_IRQCHIP
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
+ select IRQ_DOMAIN_HIERARCHY
help
This selects GPIO and pinctrl driver for Renesas RZ/{G2L,G2UL,V2L}
platforms.
diff --git a/drivers/pinctrl/renesas/Makefile b/drivers/pinctrl/renesas/Makefile
index 3e776955bd4b..2ba623e04bf8 100644
--- a/drivers/pinctrl/renesas/Makefile
+++ b/drivers/pinctrl/renesas/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A77995) += pfc-r8a77995.o
obj-$(CONFIG_PINCTRL_PFC_R8A779A0) += pfc-r8a779a0.o
obj-$(CONFIG_PINCTRL_PFC_R8A779F0) += pfc-r8a779f0.o
obj-$(CONFIG_PINCTRL_PFC_R8A779G0) += pfc-r8a779g0.o
+obj-$(CONFIG_PINCTRL_PFC_R8A779H0) += pfc-r8a779h0.o
obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index 93e51abbf519..96d6040a8871 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -638,6 +638,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a779g0_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A779H0
+ {
+ .compatible = "renesas,pfc-r8a779h0",
+ .data = &r8a779h0_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_SH73A0
{
.compatible = "renesas,pfc-sh73a0",
@@ -731,10 +737,12 @@ static int sh_pfc_resume_noirq(struct device *dev)
sh_pfc_walk_regs(pfc, sh_pfc_restore_reg);
return 0;
}
+#define pm_psci_sleep_ptr(_ptr) pm_sleep_ptr(_ptr)
#else
static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; }
static int sh_pfc_suspend_noirq(struct device *dev) { return 0; }
static int sh_pfc_resume_noirq(struct device *dev) { return 0; }
+#define pm_psci_sleep_ptr(_ptr) PTR_IF(false, (_ptr))
#endif /* CONFIG_ARM_PSCI_FW */
static DEFINE_NOIRQ_DEV_PM_OPS(sh_pfc_pm, sh_pfc_suspend_noirq, sh_pfc_resume_noirq);
@@ -907,9 +915,11 @@ static void __init sh_pfc_check_cfg_reg(const char *drvname,
sh_pfc_err("reg 0x%x: var_field_width declares %u instead of %u bits\n",
cfg_reg->reg, rw, cfg_reg->reg_width);
- if (n != cfg_reg->nr_enum_ids)
+ if (n != cfg_reg->nr_enum_ids) {
sh_pfc_err("reg 0x%x: enum_ids[] has %u instead of %u values\n",
cfg_reg->reg, cfg_reg->nr_enum_ids, n);
+ n = cfg_reg->nr_enum_ids;
+ }
check_enum_ids:
sh_pfc_check_reg_enums(drvname, cfg_reg->reg, cfg_reg->enum_ids, n);
@@ -1415,7 +1425,7 @@ static struct platform_driver sh_pfc_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(sh_pfc_of_table),
- .pm = pm_sleep_ptr(&sh_pfc_pm),
+ .pm = pm_psci_sleep_ptr(&sh_pfc_pm),
},
};
diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
index acdea6ac1525..d2de526a3b58 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
@@ -2384,6 +2384,14 @@ static const unsigned int scif_clk_mux[] = {
SCIF_CLK_MARK,
};
+static const unsigned int scif_clk2_pins[] = {
+ /* SCIF_CLK2 */
+ RCAR_GP_PIN(8, 11),
+};
+static const unsigned int scif_clk2_mux[] = {
+ SCIF_CLK2_MARK,
+};
+
/* - SSI ------------------------------------------------- */
static const unsigned int ssi_data_pins[] = {
/* SSI_SD */
@@ -2694,6 +2702,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif4_clk),
SH_PFC_PIN_GROUP(scif4_ctrl),
SH_PFC_PIN_GROUP(scif_clk),
+ SH_PFC_PIN_GROUP(scif_clk2),
SH_PFC_PIN_GROUP(ssi_data),
SH_PFC_PIN_GROUP(ssi_ctrl),
@@ -3015,6 +3024,10 @@ static const char * const scif_clk_groups[] = {
"scif_clk",
};
+static const char * const scif_clk2_groups[] = {
+ "scif_clk2",
+};
+
static const char * const ssi_groups[] = {
"ssi_data",
"ssi_ctrl",
@@ -3102,6 +3115,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif3),
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(scif_clk2),
SH_PFC_FUNCTION(ssi),
diff --git a/drivers/pinctrl/renesas/pfc-r8a779h0.c b/drivers/pinctrl/renesas/pfc-r8a779h0.c
new file mode 100644
index 000000000000..afa8f06c85cf
--- /dev/null
+++ b/drivers/pinctrl/renesas/pfc-r8a779h0.c
@@ -0,0 +1,3967 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A779H0 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ *
+ * This file is based on the drivers/pinctrl/renesas/pfc-r8a779a0.c
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "sh_pfc.h"
+
+#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
+
+#define CPU_ALL_GP(fn, sfx) \
+ PORT_GP_CFG_19(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_29(1, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(1, 29, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_16(2, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(2, 17, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(2, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_13(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 16, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 17, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 18, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 20, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 21, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 22, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 25, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 26, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 27, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 28, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 29, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 30, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 31, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_14(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(4, 14, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 15, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 21, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(4, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(5, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(6, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(7, fn, sfx, CFG_FLAGS)
+
+#define CPU_ALL_NOGP(fn) \
+ PIN_NOGP_CFG(VDDQ_AVB0, "VDDQ_AVB0", fn, SH_PFC_PIN_CFG_IO_VOLTAGE_18_25), \
+ PIN_NOGP_CFG(VDDQ_AVB1, "VDDQ_AVB1", fn, SH_PFC_PIN_CFG_IO_VOLTAGE_18_25), \
+ PIN_NOGP_CFG(VDDQ_AVB2, "VDDQ_AVB2", fn, SH_PFC_PIN_CFG_IO_VOLTAGE_18_25)
+
+/*
+ * F_() : just information
+ * FM() : macro for FN_xxx / xxx_MARK
+ */
+
+/* GPSR0 */
+#define GPSR0_18 F_(MSIOF2_RXD, IP2SR0_11_8)
+#define GPSR0_17 F_(MSIOF2_SCK, IP2SR0_7_4)
+#define GPSR0_16 F_(MSIOF2_TXD, IP2SR0_3_0)
+#define GPSR0_15 F_(MSIOF2_SYNC, IP1SR0_31_28)
+#define GPSR0_14 F_(MSIOF2_SS1, IP1SR0_27_24)
+#define GPSR0_13 F_(MSIOF2_SS2, IP1SR0_23_20)
+#define GPSR0_12 F_(MSIOF5_RXD, IP1SR0_19_16)
+#define GPSR0_11 F_(MSIOF5_SCK, IP1SR0_15_12)
+#define GPSR0_10 F_(MSIOF5_TXD, IP1SR0_11_8)
+#define GPSR0_9 F_(MSIOF5_SYNC, IP1SR0_7_4)
+#define GPSR0_8 F_(MSIOF5_SS1, IP1SR0_3_0)
+#define GPSR0_7 F_(MSIOF5_SS2, IP0SR0_31_28)
+#define GPSR0_6 F_(IRQ0, IP0SR0_27_24)
+#define GPSR0_5 F_(IRQ1, IP0SR0_23_20)
+#define GPSR0_4 F_(IRQ2, IP0SR0_19_16)
+#define GPSR0_3 F_(IRQ3, IP0SR0_15_12)
+#define GPSR0_2 F_(GP0_02, IP0SR0_11_8)
+#define GPSR0_1 F_(GP0_01, IP0SR0_7_4)
+#define GPSR0_0 F_(GP0_00, IP0SR0_3_0)
+
+/* GPSR1 */
+#define GPSR1_29 F_(ERROROUTC_N_A, IP3SR1_23_20)
+#define GPSR1_28 F_(HTX3, IP3SR1_19_16)
+#define GPSR1_27 F_(HCTS3_N, IP3SR1_15_12)
+#define GPSR1_26 F_(HRTS3_N, IP3SR1_11_8)
+#define GPSR1_25 F_(HSCK3, IP3SR1_7_4)
+#define GPSR1_24 F_(HRX3, IP3SR1_3_0)
+#define GPSR1_23 F_(GP1_23, IP2SR1_31_28)
+#define GPSR1_22 F_(AUDIO_CLKIN, IP2SR1_27_24)
+#define GPSR1_21 F_(AUDIO_CLKOUT, IP2SR1_23_20)
+#define GPSR1_20 F_(SSI_SD, IP2SR1_19_16)
+#define GPSR1_19 F_(SSI_WS, IP2SR1_15_12)
+#define GPSR1_18 F_(SSI_SCK, IP2SR1_11_8)
+#define GPSR1_17 F_(SCIF_CLK, IP2SR1_7_4)
+#define GPSR1_16 F_(HRX0, IP2SR1_3_0)
+#define GPSR1_15 F_(HSCK0, IP1SR1_31_28)
+#define GPSR1_14 F_(HRTS0_N, IP1SR1_27_24)
+#define GPSR1_13 F_(HCTS0_N, IP1SR1_23_20)
+#define GPSR1_12 F_(HTX0, IP1SR1_19_16)
+#define GPSR1_11 F_(MSIOF0_RXD, IP1SR1_15_12)
+#define GPSR1_10 F_(MSIOF0_SCK, IP1SR1_11_8)
+#define GPSR1_9 F_(MSIOF0_TXD, IP1SR1_7_4)
+#define GPSR1_8 F_(MSIOF0_SYNC, IP1SR1_3_0)
+#define GPSR1_7 F_(MSIOF0_SS1, IP0SR1_31_28)
+#define GPSR1_6 F_(MSIOF0_SS2, IP0SR1_27_24)
+#define GPSR1_5 F_(MSIOF1_RXD, IP0SR1_23_20)
+#define GPSR1_4 F_(MSIOF1_TXD, IP0SR1_19_16)
+#define GPSR1_3 F_(MSIOF1_SCK, IP0SR1_15_12)
+#define GPSR1_2 F_(MSIOF1_SYNC, IP0SR1_11_8)
+#define GPSR1_1 F_(MSIOF1_SS1, IP0SR1_7_4)
+#define GPSR1_0 F_(MSIOF1_SS2, IP0SR1_3_0)
+
+/* GPSR2 */
+#define GPSR2_19 F_(CANFD1_RX, IP2SR2_15_12)
+#define GPSR2_17 F_(CANFD1_TX, IP2SR2_7_4)
+#define GPSR2_15 F_(CANFD3_RX, IP1SR2_31_28)
+#define GPSR2_14 F_(CANFD3_TX, IP1SR2_27_24)
+#define GPSR2_13 F_(CANFD2_RX, IP1SR2_23_20)
+#define GPSR2_12 F_(CANFD2_TX, IP1SR2_19_16)
+#define GPSR2_11 F_(CANFD0_RX, IP1SR2_15_12)
+#define GPSR2_10 F_(CANFD0_TX, IP1SR2_11_8)
+#define GPSR2_9 F_(CAN_CLK, IP1SR2_7_4)
+#define GPSR2_8 F_(TPU0TO0, IP1SR2_3_0)
+#define GPSR2_7 F_(TPU0TO1, IP0SR2_31_28)
+#define GPSR2_6 F_(FXR_TXDB, IP0SR2_27_24)
+#define GPSR2_5 F_(FXR_TXENB_N_A, IP0SR2_23_20)
+#define GPSR2_4 F_(RXDB_EXTFXR, IP0SR2_19_16)
+#define GPSR2_3 F_(CLK_EXTFXR, IP0SR2_15_12)
+#define GPSR2_2 F_(RXDA_EXTFXR, IP0SR2_11_8)
+#define GPSR2_1 F_(FXR_TXENA_N_A, IP0SR2_7_4)
+#define GPSR2_0 F_(FXR_TXDA, IP0SR2_3_0)
+
+/* GPSR3 */
+#define GPSR3_31 F_(TCLK4, IP3SR3_31_28)
+#define GPSR3_30 F_(TCLK3, IP3SR3_27_24)
+#define GPSR3_29 F_(RPC_INT_N, IP3SR3_23_20)
+#define GPSR3_28 F_(RPC_WP_N, IP3SR3_19_16)
+#define GPSR3_27 F_(RPC_RESET_N, IP3SR3_15_12)
+#define GPSR3_26 F_(QSPI1_IO3, IP3SR3_11_8)
+#define GPSR3_25 F_(QSPI1_SSL, IP3SR3_7_4)
+#define GPSR3_24 F_(QSPI1_IO2, IP3SR3_3_0)
+#define GPSR3_23 F_(QSPI1_MISO_IO1, IP2SR3_31_28)
+#define GPSR3_22 F_(QSPI1_SPCLK, IP2SR3_27_24)
+#define GPSR3_21 F_(QSPI1_MOSI_IO0, IP2SR3_23_20)
+#define GPSR3_20 F_(QSPI0_SPCLK, IP2SR3_19_16)
+#define GPSR3_19 F_(QSPI0_MOSI_IO0, IP2SR3_15_12)
+#define GPSR3_18 F_(QSPI0_MISO_IO1, IP2SR3_11_8)
+#define GPSR3_17 F_(QSPI0_IO2, IP2SR3_7_4)
+#define GPSR3_16 F_(QSPI0_IO3, IP2SR3_3_0)
+#define GPSR3_15 F_(QSPI0_SSL, IP1SR3_31_28)
+#define GPSR3_14 F_(PWM2, IP1SR3_27_24)
+#define GPSR3_13 F_(PWM1, IP1SR3_23_20)
+#define GPSR3_12 F_(SD_WP, IP1SR3_19_16)
+#define GPSR3_11 F_(SD_CD, IP1SR3_15_12)
+#define GPSR3_10 F_(MMC_SD_CMD, IP1SR3_11_8)
+#define GPSR3_9 F_(MMC_D6, IP1SR3_7_4)
+#define GPSR3_8 F_(MMC_D7, IP1SR3_3_0)
+#define GPSR3_7 F_(MMC_D4, IP0SR3_31_28)
+#define GPSR3_6 F_(MMC_D5, IP0SR3_27_24)
+#define GPSR3_5 F_(MMC_SD_D3, IP0SR3_23_20)
+#define GPSR3_4 F_(MMC_DS, IP0SR3_19_16)
+#define GPSR3_3 F_(MMC_SD_CLK, IP0SR3_15_12)
+#define GPSR3_2 F_(MMC_SD_D2, IP0SR3_11_8)
+#define GPSR3_1 F_(MMC_SD_D0, IP0SR3_7_4)
+#define GPSR3_0 F_(MMC_SD_D1, IP0SR3_3_0)
+
+/* GPSR4 */
+#define GPSR4_24 F_(AVS1, IP3SR4_3_0)
+#define GPSR4_23 F_(AVS0, IP2SR4_31_28)
+#define GPSR4_21 F_(PCIE0_CLKREQ_N, IP2SR4_23_20)
+#define GPSR4_15 F_(PWM4, IP1SR4_31_28)
+#define GPSR4_14 F_(PWM3, IP1SR4_27_24)
+#define GPSR4_13 F_(HSCK2, IP1SR4_23_20)
+#define GPSR4_12 F_(HCTS2_N, IP1SR4_19_16)
+#define GPSR4_11 F_(SCIF_CLK2, IP1SR4_15_12)
+#define GPSR4_10 F_(HRTS2_N, IP1SR4_11_8)
+#define GPSR4_9 F_(HTX2, IP1SR4_7_4)
+#define GPSR4_8 F_(HRX2, IP1SR4_3_0)
+#define GPSR4_7 F_(SDA3, IP0SR4_31_28)
+#define GPSR4_6 F_(SCL3, IP0SR4_27_24)
+#define GPSR4_5 F_(SDA2, IP0SR4_23_20)
+#define GPSR4_4 F_(SCL2, IP0SR4_19_16)
+#define GPSR4_3 F_(SDA1, IP0SR4_15_12)
+#define GPSR4_2 F_(SCL1, IP0SR4_11_8)
+#define GPSR4_1 F_(SDA0, IP0SR4_7_4)
+#define GPSR4_0 F_(SCL0, IP0SR4_3_0)
+
+/* GPSR 5 */
+#define GPSR5_20 F_(AVB2_RX_CTL, IP2SR5_19_16)
+#define GPSR5_19 F_(AVB2_TX_CTL, IP2SR5_15_12)
+#define GPSR5_18 F_(AVB2_RXC, IP2SR5_11_8)
+#define GPSR5_17 F_(AVB2_RD0, IP2SR5_7_4)
+#define GPSR5_16 F_(AVB2_TXC, IP2SR5_3_0)
+#define GPSR5_15 F_(AVB2_TD0, IP1SR5_31_28)
+#define GPSR5_14 F_(AVB2_RD1, IP1SR5_27_24)
+#define GPSR5_13 F_(AVB2_RD2, IP1SR5_23_20)
+#define GPSR5_12 F_(AVB2_TD1, IP1SR5_19_16)
+#define GPSR5_11 F_(AVB2_TD2, IP1SR5_15_12)
+#define GPSR5_10 F_(AVB2_MDIO, IP1SR5_11_8)
+#define GPSR5_9 F_(AVB2_RD3, IP1SR5_7_4)
+#define GPSR5_8 F_(AVB2_TD3, IP1SR5_3_0)
+#define GPSR5_7 F_(AVB2_TXCREFCLK, IP0SR5_31_28)
+#define GPSR5_6 F_(AVB2_MDC, IP0SR5_27_24)
+#define GPSR5_5 F_(AVB2_MAGIC, IP0SR5_23_20)
+#define GPSR5_4 F_(AVB2_PHY_INT, IP0SR5_19_16)
+#define GPSR5_3 F_(AVB2_LINK, IP0SR5_15_12)
+#define GPSR5_2 F_(AVB2_AVTP_MATCH, IP0SR5_11_8)
+#define GPSR5_1 F_(AVB2_AVTP_CAPTURE, IP0SR5_7_4)
+#define GPSR5_0 F_(AVB2_AVTP_PPS, IP0SR5_3_0)
+
+/* GPSR 6 */
+#define GPSR6_20 F_(AVB1_TXCREFCLK, IP2SR6_19_16)
+#define GPSR6_19 F_(AVB1_RD3, IP2SR6_15_12)
+#define GPSR6_18 F_(AVB1_TD3, IP2SR6_11_8)
+#define GPSR6_17 F_(AVB1_RD2, IP2SR6_7_4)
+#define GPSR6_16 F_(AVB1_TD2, IP2SR6_3_0)
+#define GPSR6_15 F_(AVB1_RD0, IP1SR6_31_28)
+#define GPSR6_14 F_(AVB1_RD1, IP1SR6_27_24)
+#define GPSR6_13 F_(AVB1_TD0, IP1SR6_23_20)
+#define GPSR6_12 F_(AVB1_TD1, IP1SR6_19_16)
+#define GPSR6_11 F_(AVB1_AVTP_CAPTURE, IP1SR6_15_12)
+#define GPSR6_10 F_(AVB1_AVTP_PPS, IP1SR6_11_8)
+#define GPSR6_9 F_(AVB1_RX_CTL, IP1SR6_7_4)
+#define GPSR6_8 F_(AVB1_RXC, IP1SR6_3_0)
+#define GPSR6_7 F_(AVB1_TX_CTL, IP0SR6_31_28)
+#define GPSR6_6 F_(AVB1_TXC, IP0SR6_27_24)
+#define GPSR6_5 F_(AVB1_AVTP_MATCH, IP0SR6_23_20)
+#define GPSR6_4 F_(AVB1_LINK, IP0SR6_19_16)
+#define GPSR6_3 F_(AVB1_PHY_INT, IP0SR6_15_12)
+#define GPSR6_2 F_(AVB1_MDC, IP0SR6_11_8)
+#define GPSR6_1 F_(AVB1_MAGIC, IP0SR6_7_4)
+#define GPSR6_0 F_(AVB1_MDIO, IP0SR6_3_0)
+
+/* GPSR7 */
+#define GPSR7_20 F_(AVB0_RX_CTL, IP2SR7_19_16)
+#define GPSR7_19 F_(AVB0_RXC, IP2SR7_15_12)
+#define GPSR7_18 F_(AVB0_RD0, IP2SR7_11_8)
+#define GPSR7_17 F_(AVB0_RD1, IP2SR7_7_4)
+#define GPSR7_16 F_(AVB0_TX_CTL, IP2SR7_3_0)
+#define GPSR7_15 F_(AVB0_TXC, IP1SR7_31_28)
+#define GPSR7_14 F_(AVB0_MDIO, IP1SR7_27_24)
+#define GPSR7_13 F_(AVB0_MDC, IP1SR7_23_20)
+#define GPSR7_12 F_(AVB0_RD2, IP1SR7_19_16)
+#define GPSR7_11 F_(AVB0_TD0, IP1SR7_15_12)
+#define GPSR7_10 F_(AVB0_MAGIC, IP1SR7_11_8)
+#define GPSR7_9 F_(AVB0_TXCREFCLK, IP1SR7_7_4)
+#define GPSR7_8 F_(AVB0_RD3, IP1SR7_3_0)
+#define GPSR7_7 F_(AVB0_TD1, IP0SR7_31_28)
+#define GPSR7_6 F_(AVB0_TD2, IP0SR7_27_24)
+#define GPSR7_5 F_(AVB0_PHY_INT, IP0SR7_23_20)
+#define GPSR7_4 F_(AVB0_LINK, IP0SR7_19_16)
+#define GPSR7_3 F_(AVB0_TD3, IP0SR7_15_12)
+#define GPSR7_2 F_(AVB0_AVTP_MATCH, IP0SR7_11_8)
+#define GPSR7_1 F_(AVB0_AVTP_CAPTURE, IP0SR7_7_4)
+#define GPSR7_0 F_(AVB0_AVTP_PPS, IP0SR7_3_0)
+
+
+/* SR0 */
+/* IP0SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_N_B) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR0_3_0 FM(MSIOF5_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_7_4 FM(MSIOF5_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_11_8 FM(MSIOF5_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_15_12 FM(MSIOF5_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_19_16 FM(MSIOF5_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1_A) FM(IRQ2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1_A) FM(TX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1_A) FM(RX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N_A) FM(CTS1_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N_A) FM(RTS1_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1_A) FM(SCK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR1 */
+/* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_B) FM(TX3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_B) FM(RX3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_B) FM(RTS3_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_B) FM(CTS3_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_B) FM(SCK3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_23_20 FM(MSIOF1_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_B) FM(TX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_B) FM(RX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_B) FM(CTS1_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_B) FM(RTS1_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_B) FM(SCK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_15_12 FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_19_16 FM(HTX0) FM(TX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR1_3_0 FM(HRX0) FM(RX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_7_4 FM(SCIF_CLK) FM(IRQ4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_27_24 FM(AUDIO_CLKIN) FM(PWM3_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_31_28 F_(0, 0) FM(TCLK2_A) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR1_3_0 FM(HRX3_A) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_7_4 FM(HSCK3_A) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_11_8 FM(HRTS3_N_A) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_15_12 FM(HCTS3_N_A) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_19_16 FM(HTX3_A) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_23_20 FM(ERROROUTC_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR2 */
+/* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR2_3_0 FM(FXR_TXDA) F_(0, 0) FM(TPU0TO2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_7_4 FM(FXR_TXENA_N_A) F_(0, 0) FM(TPU0TO3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_11_8 FM(RXDA_EXTFXR) F_(0, 0) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_15_12 FM(CLK_EXTFXR) F_(0, 0) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_19_16 FM(RXDB_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_23_20 FM(FXR_TXENB_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_27_24 FM(FXR_TXDB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_31_28 FM(TPU0TO1_A) F_(0, 0) F_(0, 0) FM(TCLK2_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR2_3_0 FM(TPU0TO0_A) F_(0, 0) F_(0, 0) FM(TCLK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_15_12 FM(CANFD0_RX) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2_A) F_(0, 0) FM(TCLK3_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3_A) FM(PWM1_B) FM(TCLK4_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_31_28 FM(CANFD3_RX) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR2_7_4 FM(CANFD1_TX) F_(0, 0) FM(PWM1_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_15_12 FM(CANFD1_RX) F_(0, 0) FM(PWM2_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR3 */
+/* IP0SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR3_3_0 FM(MMC_SD_D1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_7_4 FM(MMC_SD_D0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_11_8 FM(MMC_SD_D2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_15_12 FM(MMC_SD_CLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_19_16 FM(MMC_DS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_23_20 FM(MMC_SD_D3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_27_24 FM(MMC_D5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_31_28 FM(MMC_D4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR3_3_0 FM(MMC_D7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_7_4 FM(MMC_D6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_11_8 FM(MMC_SD_CMD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_15_12 FM(SD_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_19_16 FM(SD_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_23_20 FM(PWM1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_27_24 FM(PWM2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_31_28 FM(QSPI0_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR3_3_0 FM(QSPI0_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_7_4 FM(QSPI0_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_11_8 FM(QSPI0_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_15_12 FM(QSPI0_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_19_16 FM(QSPI0_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_23_20 FM(QSPI1_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_27_24 FM(QSPI1_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_31_28 FM(QSPI1_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR3_3_0 FM(QSPI1_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_7_4 FM(QSPI1_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_11_8 FM(QSPI1_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_15_12 FM(RPC_RESET_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_19_16 FM(RPC_WP_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_23_20 FM(RPC_INT_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_27_24 FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_31_28 FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR4 */
+/* IP0SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR4_3_0 FM(SCL0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_7_4 FM(SDA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_11_8 FM(SCL1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_15_12 FM(SDA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_19_16 FM(SCL2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_23_20 FM(SDA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_27_24 FM(SCL3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_31_28 FM(SDA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR4_3_0 FM(HRX2) FM(SCK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_7_4 FM(HTX2) FM(CTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_11_8 FM(HRTS2_N) FM(RTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_15_12 FM(SCIF_CLK2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_19_16 FM(HCTS2_N) FM(TX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_23_20 FM(HSCK2) FM(RX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_27_24 FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_31_28 FM(PWM4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR4_23_20 FM(PCIE0_CLKREQ_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_31_28 FM(AVS0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR4_3_0 FM(AVS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR5 */
+/* IP0SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR5_3_0 FM(AVB2_AVTP_PPS) FM(Ether_GPTP_PPS0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_7_4 FM(AVB2_AVTP_CAPTURE) FM(Ether_GPTP_CAPTURE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_11_8 FM(AVB2_AVTP_MATCH) FM(Ether_GPTP_MATCH) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_15_12 FM(AVB2_LINK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_19_16 FM(AVB2_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_23_20 FM(AVB2_MAGIC) FM(Ether_GPTP_PPS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_27_24 FM(AVB2_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_31_28 FM(AVB2_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR5_3_0 FM(AVB2_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_7_4 FM(AVB2_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_11_8 FM(AVB2_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_15_12 FM(AVB2_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_19_16 FM(AVB2_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_23_20 FM(AVB2_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_27_24 FM(AVB2_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_31_28 FM(AVB2_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR5_3_0 FM(AVB2_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_7_4 FM(AVB2_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_11_8 FM(AVB2_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_15_12 FM(AVB2_TX_CTL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_19_16 FM(AVB2_RX_CTL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR6 */
+/* IP0SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR6_3_0 FM(AVB1_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_7_4 FM(AVB1_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_11_8 FM(AVB1_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_15_12 FM(AVB1_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_19_16 FM(AVB1_LINK) FM(AVB1_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_23_20 FM(AVB1_AVTP_MATCH) FM(AVB1_MII_RX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_27_24 FM(AVB1_TXC) FM(AVB1_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_31_28 FM(AVB1_TX_CTL) FM(AVB1_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR6_3_0 FM(AVB1_RXC) FM(AVB1_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_7_4 FM(AVB1_RX_CTL) FM(AVB1_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_11_8 FM(AVB1_AVTP_PPS) FM(AVB1_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_15_12 FM(AVB1_AVTP_CAPTURE) FM(AVB1_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_19_16 FM(AVB1_TD1) FM(AVB1_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_23_20 FM(AVB1_TD0) FM(AVB1_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_27_24 FM(AVB1_RD1) FM(AVB1_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_31_28 FM(AVB1_RD0) FM(AVB1_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR6_3_0 FM(AVB1_TD2) FM(AVB1_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_7_4 FM(AVB1_RD2) FM(AVB1_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_11_8 FM(AVB1_TD3) FM(AVB1_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_15_12 FM(AVB1_RD3) FM(AVB1_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_19_16 FM(AVB1_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR7 */
+/* IP0SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR7_3_0 FM(AVB0_AVTP_PPS) FM(AVB0_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_7_4 FM(AVB0_AVTP_CAPTURE) FM(AVB0_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_11_8 FM(AVB0_AVTP_MATCH) FM(AVB0_MII_RX_ER) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_15_12 FM(AVB0_TD3) FM(AVB0_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_19_16 FM(AVB0_LINK) FM(AVB0_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_23_20 FM(AVB0_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_27_24 FM(AVB0_TD2) FM(AVB0_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_31_28 FM(AVB0_TD1) FM(AVB0_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR7_3_0 FM(AVB0_RD3) FM(AVB0_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_7_4 FM(AVB0_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_11_8 FM(AVB0_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_15_12 FM(AVB0_TD0) FM(AVB0_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_19_16 FM(AVB0_RD2) FM(AVB0_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_23_20 FM(AVB0_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_27_24 FM(AVB0_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_31_28 FM(AVB0_TXC) FM(AVB0_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR7_3_0 FM(AVB0_TX_CTL) FM(AVB0_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_7_4 FM(AVB0_RD1) FM(AVB0_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_11_8 FM(AVB0_RD0) FM(AVB0_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_15_12 FM(AVB0_RXC) FM(AVB0_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_19_16 FM(AVB0_RX_CTL) FM(AVB0_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+#define PINMUX_GPSR \
+ GPSR3_31 \
+ GPSR3_30 \
+ GPSR1_29 GPSR3_29 \
+ GPSR1_28 GPSR3_28 \
+ GPSR1_27 GPSR3_27 \
+ GPSR1_26 GPSR3_26 \
+ GPSR1_25 GPSR3_25 \
+ GPSR1_24 GPSR3_24 GPSR4_24 \
+ GPSR1_23 GPSR3_23 GPSR4_23 \
+ GPSR1_22 GPSR3_22 \
+ GPSR1_21 GPSR3_21 GPSR4_21 \
+ GPSR1_20 GPSR3_20 GPSR5_20 GPSR6_20 GPSR7_20 \
+ GPSR1_19 GPSR2_19 GPSR3_19 GPSR5_19 GPSR6_19 GPSR7_19 \
+GPSR0_18 GPSR1_18 GPSR3_18 GPSR5_18 GPSR6_18 GPSR7_18 \
+GPSR0_17 GPSR1_17 GPSR2_17 GPSR3_17 GPSR5_17 GPSR6_17 GPSR7_17 \
+GPSR0_16 GPSR1_16 GPSR3_16 GPSR5_16 GPSR6_16 GPSR7_16 \
+GPSR0_15 GPSR1_15 GPSR2_15 GPSR3_15 GPSR4_15 GPSR5_15 GPSR6_15 GPSR7_15 \
+GPSR0_14 GPSR1_14 GPSR2_14 GPSR3_14 GPSR4_14 GPSR5_14 GPSR6_14 GPSR7_14 \
+GPSR0_13 GPSR1_13 GPSR2_13 GPSR3_13 GPSR4_13 GPSR5_13 GPSR6_13 GPSR7_13 \
+GPSR0_12 GPSR1_12 GPSR2_12 GPSR3_12 GPSR4_12 GPSR5_12 GPSR6_12 GPSR7_12 \
+GPSR0_11 GPSR1_11 GPSR2_11 GPSR3_11 GPSR4_11 GPSR5_11 GPSR6_11 GPSR7_11 \
+GPSR0_10 GPSR1_10 GPSR2_10 GPSR3_10 GPSR4_10 GPSR5_10 GPSR6_10 GPSR7_10 \
+GPSR0_9 GPSR1_9 GPSR2_9 GPSR3_9 GPSR4_9 GPSR5_9 GPSR6_9 GPSR7_9 \
+GPSR0_8 GPSR1_8 GPSR2_8 GPSR3_8 GPSR4_8 GPSR5_8 GPSR6_8 GPSR7_8 \
+GPSR0_7 GPSR1_7 GPSR2_7 GPSR3_7 GPSR4_7 GPSR5_7 GPSR6_7 GPSR7_7 \
+GPSR0_6 GPSR1_6 GPSR2_6 GPSR3_6 GPSR4_6 GPSR5_6 GPSR6_6 GPSR7_6 \
+GPSR0_5 GPSR1_5 GPSR2_5 GPSR3_5 GPSR4_5 GPSR5_5 GPSR6_5 GPSR7_5 \
+GPSR0_4 GPSR1_4 GPSR2_4 GPSR3_4 GPSR4_4 GPSR5_4 GPSR6_4 GPSR7_4 \
+GPSR0_3 GPSR1_3 GPSR2_3 GPSR3_3 GPSR4_3 GPSR5_3 GPSR6_3 GPSR7_3 \
+GPSR0_2 GPSR1_2 GPSR2_2 GPSR3_2 GPSR4_2 GPSR5_2 GPSR6_2 GPSR7_2 \
+GPSR0_1 GPSR1_1 GPSR2_1 GPSR3_1 GPSR4_1 GPSR5_1 GPSR6_1 GPSR7_1 \
+GPSR0_0 GPSR1_0 GPSR2_0 GPSR3_0 GPSR4_0 GPSR5_0 GPSR6_0 GPSR7_0
+
+#define PINMUX_IPSR \
+\
+FM(IP0SR0_3_0) IP0SR0_3_0 FM(IP1SR0_3_0) IP1SR0_3_0 FM(IP2SR0_3_0) IP2SR0_3_0 \
+FM(IP0SR0_7_4) IP0SR0_7_4 FM(IP1SR0_7_4) IP1SR0_7_4 FM(IP2SR0_7_4) IP2SR0_7_4 \
+FM(IP0SR0_11_8) IP0SR0_11_8 FM(IP1SR0_11_8) IP1SR0_11_8 FM(IP2SR0_11_8) IP2SR0_11_8 \
+FM(IP0SR0_15_12) IP0SR0_15_12 FM(IP1SR0_15_12) IP1SR0_15_12 \
+FM(IP0SR0_19_16) IP0SR0_19_16 FM(IP1SR0_19_16) IP1SR0_19_16 \
+FM(IP0SR0_23_20) IP0SR0_23_20 FM(IP1SR0_23_20) IP1SR0_23_20 \
+FM(IP0SR0_27_24) IP0SR0_27_24 FM(IP1SR0_27_24) IP1SR0_27_24 \
+FM(IP0SR0_31_28) IP0SR0_31_28 FM(IP1SR0_31_28) IP1SR0_31_28 \
+\
+FM(IP0SR1_3_0) IP0SR1_3_0 FM(IP1SR1_3_0) IP1SR1_3_0 FM(IP2SR1_3_0) IP2SR1_3_0 FM(IP3SR1_3_0) IP3SR1_3_0 \
+FM(IP0SR1_7_4) IP0SR1_7_4 FM(IP1SR1_7_4) IP1SR1_7_4 FM(IP2SR1_7_4) IP2SR1_7_4 FM(IP3SR1_7_4) IP3SR1_7_4 \
+FM(IP0SR1_11_8) IP0SR1_11_8 FM(IP1SR1_11_8) IP1SR1_11_8 FM(IP2SR1_11_8) IP2SR1_11_8 FM(IP3SR1_11_8) IP3SR1_11_8 \
+FM(IP0SR1_15_12) IP0SR1_15_12 FM(IP1SR1_15_12) IP1SR1_15_12 FM(IP2SR1_15_12) IP2SR1_15_12 FM(IP3SR1_15_12) IP3SR1_15_12 \
+FM(IP0SR1_19_16) IP0SR1_19_16 FM(IP1SR1_19_16) IP1SR1_19_16 FM(IP2SR1_19_16) IP2SR1_19_16 FM(IP3SR1_19_16) IP3SR1_19_16 \
+FM(IP0SR1_23_20) IP0SR1_23_20 FM(IP1SR1_23_20) IP1SR1_23_20 FM(IP2SR1_23_20) IP2SR1_23_20 FM(IP3SR1_23_20) IP3SR1_23_20 \
+FM(IP0SR1_27_24) IP0SR1_27_24 FM(IP1SR1_27_24) IP1SR1_27_24 FM(IP2SR1_27_24) IP2SR1_27_24 \
+FM(IP0SR1_31_28) IP0SR1_31_28 FM(IP1SR1_31_28) IP1SR1_31_28 FM(IP2SR1_31_28) IP2SR1_31_28 \
+\
+FM(IP0SR2_3_0) IP0SR2_3_0 FM(IP1SR2_3_0) IP1SR2_3_0 \
+FM(IP0SR2_7_4) IP0SR2_7_4 FM(IP1SR2_7_4) IP1SR2_7_4 FM(IP2SR2_7_4) IP2SR2_7_4 \
+FM(IP0SR2_11_8) IP0SR2_11_8 FM(IP1SR2_11_8) IP1SR2_11_8 \
+FM(IP0SR2_15_12) IP0SR2_15_12 FM(IP1SR2_15_12) IP1SR2_15_12 FM(IP2SR2_15_12) IP2SR2_15_12 \
+FM(IP0SR2_19_16) IP0SR2_19_16 FM(IP1SR2_19_16) IP1SR2_19_16 \
+FM(IP0SR2_23_20) IP0SR2_23_20 FM(IP1SR2_23_20) IP1SR2_23_20 \
+FM(IP0SR2_27_24) IP0SR2_27_24 FM(IP1SR2_27_24) IP1SR2_27_24 \
+FM(IP0SR2_31_28) IP0SR2_31_28 FM(IP1SR2_31_28) IP1SR2_31_28 \
+\
+FM(IP0SR3_3_0) IP0SR3_3_0 FM(IP1SR3_3_0) IP1SR3_3_0 FM(IP2SR3_3_0) IP2SR3_3_0 FM(IP3SR3_3_0) IP3SR3_3_0 \
+FM(IP0SR3_7_4) IP0SR3_7_4 FM(IP1SR3_7_4) IP1SR3_7_4 FM(IP2SR3_7_4) IP2SR3_7_4 FM(IP3SR3_7_4) IP3SR3_7_4 \
+FM(IP0SR3_11_8) IP0SR3_11_8 FM(IP1SR3_11_8) IP1SR3_11_8 FM(IP2SR3_11_8) IP2SR3_11_8 FM(IP3SR3_11_8) IP3SR3_11_8 \
+FM(IP0SR3_15_12) IP0SR3_15_12 FM(IP1SR3_15_12) IP1SR3_15_12 FM(IP2SR3_15_12) IP2SR3_15_12 FM(IP3SR3_15_12) IP3SR3_15_12 \
+FM(IP0SR3_19_16) IP0SR3_19_16 FM(IP1SR3_19_16) IP1SR3_19_16 FM(IP2SR3_19_16) IP2SR3_19_16 FM(IP3SR3_19_16) IP3SR3_19_16 \
+FM(IP0SR3_23_20) IP0SR3_23_20 FM(IP1SR3_23_20) IP1SR3_23_20 FM(IP2SR3_23_20) IP2SR3_23_20 FM(IP3SR3_23_20) IP3SR3_23_20 \
+FM(IP0SR3_27_24) IP0SR3_27_24 FM(IP1SR3_27_24) IP1SR3_27_24 FM(IP2SR3_27_24) IP2SR3_27_24 FM(IP3SR3_27_24) IP3SR3_27_24 \
+FM(IP0SR3_31_28) IP0SR3_31_28 FM(IP1SR3_31_28) IP1SR3_31_28 FM(IP2SR3_31_28) IP2SR3_31_28 FM(IP3SR3_31_28) IP3SR3_31_28 \
+\
+FM(IP0SR4_3_0) IP0SR4_3_0 FM(IP1SR4_3_0) IP1SR4_3_0 FM(IP3SR4_3_0) IP3SR4_3_0 \
+FM(IP0SR4_7_4) IP0SR4_7_4 FM(IP1SR4_7_4) IP1SR4_7_4 \
+FM(IP0SR4_11_8) IP0SR4_11_8 FM(IP1SR4_11_8) IP1SR4_11_8 \
+FM(IP0SR4_15_12) IP0SR4_15_12 FM(IP1SR4_15_12) IP1SR4_15_12 \
+FM(IP0SR4_19_16) IP0SR4_19_16 FM(IP1SR4_19_16) IP1SR4_19_16 \
+FM(IP0SR4_23_20) IP0SR4_23_20 FM(IP1SR4_23_20) IP1SR4_23_20 FM(IP2SR4_23_20) IP2SR4_23_20 \
+FM(IP0SR4_27_24) IP0SR4_27_24 FM(IP1SR4_27_24) IP1SR4_27_24 \
+FM(IP0SR4_31_28) IP0SR4_31_28 FM(IP1SR4_31_28) IP1SR4_31_28 FM(IP2SR4_31_28) IP2SR4_31_28 \
+\
+FM(IP0SR5_3_0) IP0SR5_3_0 FM(IP1SR5_3_0) IP1SR5_3_0 FM(IP2SR5_3_0) IP2SR5_3_0 \
+FM(IP0SR5_7_4) IP0SR5_7_4 FM(IP1SR5_7_4) IP1SR5_7_4 FM(IP2SR5_7_4) IP2SR5_7_4 \
+FM(IP0SR5_11_8) IP0SR5_11_8 FM(IP1SR5_11_8) IP1SR5_11_8 FM(IP2SR5_11_8) IP2SR5_11_8 \
+FM(IP0SR5_15_12) IP0SR5_15_12 FM(IP1SR5_15_12) IP1SR5_15_12 FM(IP2SR5_15_12) IP2SR5_15_12 \
+FM(IP0SR5_19_16) IP0SR5_19_16 FM(IP1SR5_19_16) IP1SR5_19_16 FM(IP2SR5_19_16) IP2SR5_19_16 \
+FM(IP0SR5_23_20) IP0SR5_23_20 FM(IP1SR5_23_20) IP1SR5_23_20 \
+FM(IP0SR5_27_24) IP0SR5_27_24 FM(IP1SR5_27_24) IP1SR5_27_24 \
+FM(IP0SR5_31_28) IP0SR5_31_28 FM(IP1SR5_31_28) IP1SR5_31_28 \
+\
+FM(IP0SR6_3_0) IP0SR6_3_0 FM(IP1SR6_3_0) IP1SR6_3_0 FM(IP2SR6_3_0) IP2SR6_3_0 \
+FM(IP0SR6_7_4) IP0SR6_7_4 FM(IP1SR6_7_4) IP1SR6_7_4 FM(IP2SR6_7_4) IP2SR6_7_4 \
+FM(IP0SR6_11_8) IP0SR6_11_8 FM(IP1SR6_11_8) IP1SR6_11_8 FM(IP2SR6_11_8) IP2SR6_11_8 \
+FM(IP0SR6_15_12) IP0SR6_15_12 FM(IP1SR6_15_12) IP1SR6_15_12 FM(IP2SR6_15_12) IP2SR6_15_12 \
+FM(IP0SR6_19_16) IP0SR6_19_16 FM(IP1SR6_19_16) IP1SR6_19_16 FM(IP2SR6_19_16) IP2SR6_19_16 \
+FM(IP0SR6_23_20) IP0SR6_23_20 FM(IP1SR6_23_20) IP1SR6_23_20 \
+FM(IP0SR6_27_24) IP0SR6_27_24 FM(IP1SR6_27_24) IP1SR6_27_24 \
+FM(IP0SR6_31_28) IP0SR6_31_28 FM(IP1SR6_31_28) IP1SR6_31_28 \
+\
+FM(IP0SR7_3_0) IP0SR7_3_0 FM(IP1SR7_3_0) IP1SR7_3_0 FM(IP2SR7_3_0) IP2SR7_3_0 \
+FM(IP0SR7_7_4) IP0SR7_7_4 FM(IP1SR7_7_4) IP1SR7_7_4 FM(IP2SR7_7_4) IP2SR7_7_4 \
+FM(IP0SR7_11_8) IP0SR7_11_8 FM(IP1SR7_11_8) IP1SR7_11_8 FM(IP2SR7_11_8) IP2SR7_11_8 \
+FM(IP0SR7_15_12) IP0SR7_15_12 FM(IP1SR7_15_12) IP1SR7_15_12 FM(IP2SR7_15_12) IP2SR7_15_12 \
+FM(IP0SR7_19_16) IP0SR7_19_16 FM(IP1SR7_19_16) IP1SR7_19_16 FM(IP2SR7_19_16) IP2SR7_19_16 \
+FM(IP0SR7_23_20) IP0SR7_23_20 FM(IP1SR7_23_20) IP1SR7_23_20 \
+FM(IP0SR7_27_24) IP0SR7_27_24 FM(IP1SR7_27_24) IP1SR7_27_24 \
+FM(IP0SR7_31_28) IP0SR7_31_28 FM(IP1SR7_31_28) IP1SR7_31_28 \
+
+/* MOD_SEL4 */ /* 0 */ /* 1 */
+#define MOD_SEL4_7 FM(SEL_SDA3_0) FM(SEL_SDA3_1)
+#define MOD_SEL4_6 FM(SEL_SCL3_0) FM(SEL_SCL3_1)
+#define MOD_SEL4_5 FM(SEL_SDA2_0) FM(SEL_SDA2_1)
+#define MOD_SEL4_4 FM(SEL_SCL2_0) FM(SEL_SCL2_1)
+#define MOD_SEL4_3 FM(SEL_SDA1_0) FM(SEL_SDA1_1)
+#define MOD_SEL4_2 FM(SEL_SCL1_0) FM(SEL_SCL1_1)
+#define MOD_SEL4_1 FM(SEL_SDA0_0) FM(SEL_SDA0_1)
+#define MOD_SEL4_0 FM(SEL_SCL0_0) FM(SEL_SCL0_1)
+
+#define PINMUX_MOD_SELS \
+\
+MOD_SEL4_7 \
+MOD_SEL4_6 \
+MOD_SEL4_5 \
+MOD_SEL4_4 \
+MOD_SEL4_3 \
+MOD_SEL4_2 \
+MOD_SEL4_1 \
+MOD_SEL4_0
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+#define F_(x, y)
+#define FM(x) FN_##x,
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_FUNCTION_END,
+#undef F_
+#undef FM
+
+#define F_(x, y)
+#define FM(x) x##_MARK,
+ PINMUX_MARK_BEGIN,
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_MARK_END,
+#undef F_
+#undef FM
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(),
+
+ /* IP0SR0 */
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, ERROROUTC_N_B),
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_B),
+
+ PINMUX_IPSR_GPSR(IP0SR0_7_4, MSIOF3_SS1),
+
+ PINMUX_IPSR_GPSR(IP0SR0_11_8, MSIOF3_SS2),
+
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3),
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, MSIOF3_SCK),
+
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2),
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, MSIOF3_TXD),
+
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1),
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, MSIOF3_RXD),
+
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0),
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, MSIOF3_SYNC),
+
+ PINMUX_IPSR_GPSR(IP0SR0_31_28, MSIOF5_SS2),
+
+ /* IP1SR0 */
+ PINMUX_IPSR_GPSR(IP1SR0_3_0, MSIOF5_SS1),
+
+ PINMUX_IPSR_GPSR(IP1SR0_7_4, MSIOF5_SYNC),
+
+ PINMUX_IPSR_GPSR(IP1SR0_11_8, MSIOF5_TXD),
+
+ PINMUX_IPSR_GPSR(IP1SR0_15_12, MSIOF5_SCK),
+
+ PINMUX_IPSR_GPSR(IP1SR0_19_16, MSIOF5_RXD),
+
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, MSIOF2_SS2),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, TCLK1_A),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, IRQ2_B),
+
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, MSIOF2_SS1),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, HTX1_A),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, TX1_A),
+
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, MSIOF2_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, HRX1_A),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, RX1_A),
+
+ /* IP2SR0 */
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, MSIOF2_TXD),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, HCTS1_N_A),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, CTS1_N_A),
+
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, MSIOF2_SCK),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, HRTS1_N_A),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, RTS1_N_A),
+
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, MSIOF2_RXD),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, HSCK1_A),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, SCK1_A),
+
+ /* IP0SR1 */
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, MSIOF1_SS2),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, HTX3_B),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, TX3_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, MSIOF1_SS1),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, HCTS3_N_B),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, RX3_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, MSIOF1_SYNC),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, HRTS3_N_B),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, RTS3_N_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, MSIOF1_SCK),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, HSCK3_B),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, CTS3_N_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, MSIOF1_TXD),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, HRX3_B),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, SCK3_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_23_20, MSIOF1_RXD),
+
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, MSIOF0_SS2),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, HTX1_B),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, TX1_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, MSIOF0_SS1),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, HRX1_B),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, RX1_B),
+
+ /* IP1SR1 */
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, HCTS1_N_B),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, CTS1_N_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, MSIOF0_TXD),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, HRTS1_N_B),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, RTS1_N_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, MSIOF0_SCK),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, HSCK1_B),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, SCK1_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_15_12, MSIOF0_RXD),
+
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, HTX0),
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, TX0),
+
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, HCTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, CTS0_N),
+
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, HRTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, RTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, PWM0_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, HSCK0),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, SCK0),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, PWM0_A),
+
+ /* IP2SR1 */
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, HRX0),
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, RX0),
+
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, SCIF_CLK),
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, IRQ4_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, SSI_SCK),
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, TCLK3_B),
+
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, SSI_WS),
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, TCLK4_B),
+
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, SSI_SD),
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, IRQ0_B),
+
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, AUDIO_CLKOUT),
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, IRQ1_B),
+
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, AUDIO_CLKIN),
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, PWM3_C),
+
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK2_A),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, MSIOF4_SS1),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, IRQ3_B),
+
+ /* IP3SR1 */
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, HRX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, SCK3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, MSIOF4_SS2),
+
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, HSCK3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, CTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, MSIOF4_SCK),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, TPU0TO0_B),
+
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, HRTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, RTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, MSIOF4_TXD),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, TPU0TO1_B),
+
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, HCTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, RX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, MSIOF4_RXD),
+
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, HTX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, TX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, MSIOF4_SYNC),
+
+ PINMUX_IPSR_GPSR(IP3SR1_23_20, ERROROUTC_N_A),
+
+ /* IP0SR2 */
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, FXR_TXDA),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, TPU0TO2_B),
+
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, FXR_TXENA_N_A),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, TPU0TO3_B),
+
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, RXDA_EXTFXR),
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, IRQ5),
+
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, CLK_EXTFXR),
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, IRQ4_B),
+
+ PINMUX_IPSR_GPSR(IP0SR2_19_16, RXDB_EXTFXR),
+
+ PINMUX_IPSR_GPSR(IP0SR2_23_20, FXR_TXENB_N_A),
+
+ PINMUX_IPSR_GPSR(IP0SR2_27_24, FXR_TXDB),
+
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TPU0TO1_A),
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TCLK2_C),
+
+ /* IP1SR2 */
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TPU0TO0_A),
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TCLK1_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, CAN_CLK),
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, FXR_TXENA_N_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, CANFD0_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, FXR_TXENB_N_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, CANFD0_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, STPWT_EXTFXR),
+
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, CANFD2_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TPU0TO2_A),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TCLK3_C),
+
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, CANFD2_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TPU0TO3_A),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, PWM1_B),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TCLK4_C),
+
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, CANFD3_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, PWM2_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, CANFD3_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, PWM3_B),
+
+ /* IP2SR2 */
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, CANFD1_TX),
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, PWM1_C),
+
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, CANFD1_RX),
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, PWM2_C),
+
+ /* IP0SR3 */
+ PINMUX_IPSR_GPSR(IP0SR3_3_0, MMC_SD_D1),
+
+ PINMUX_IPSR_GPSR(IP0SR3_7_4, MMC_SD_D0),
+
+ PINMUX_IPSR_GPSR(IP0SR3_11_8, MMC_SD_D2),
+
+ PINMUX_IPSR_GPSR(IP0SR3_15_12, MMC_SD_CLK),
+
+ PINMUX_IPSR_GPSR(IP0SR3_19_16, MMC_DS),
+
+ PINMUX_IPSR_GPSR(IP0SR3_23_20, MMC_SD_D3),
+
+ PINMUX_IPSR_GPSR(IP0SR3_27_24, MMC_D5),
+
+ PINMUX_IPSR_GPSR(IP0SR3_31_28, MMC_D4),
+
+ /* IP1SR3 */
+ PINMUX_IPSR_GPSR(IP1SR3_3_0, MMC_D7),
+
+ PINMUX_IPSR_GPSR(IP1SR3_7_4, MMC_D6),
+
+ PINMUX_IPSR_GPSR(IP1SR3_11_8, MMC_SD_CMD),
+
+ PINMUX_IPSR_GPSR(IP1SR3_15_12, SD_CD),
+
+ PINMUX_IPSR_GPSR(IP1SR3_19_16, SD_WP),
+
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, PWM1_A),
+
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, PWM2_A),
+
+ PINMUX_IPSR_GPSR(IP1SR3_31_28, QSPI0_SSL),
+
+ /* IP2SR3 */
+ PINMUX_IPSR_GPSR(IP2SR3_3_0, QSPI0_IO3),
+
+ PINMUX_IPSR_GPSR(IP2SR3_7_4, QSPI0_IO2),
+
+ PINMUX_IPSR_GPSR(IP2SR3_11_8, QSPI0_MISO_IO1),
+
+ PINMUX_IPSR_GPSR(IP2SR3_15_12, QSPI0_MOSI_IO0),
+
+ PINMUX_IPSR_GPSR(IP2SR3_19_16, QSPI0_SPCLK),
+
+ PINMUX_IPSR_GPSR(IP2SR3_23_20, QSPI1_MOSI_IO0),
+
+ PINMUX_IPSR_GPSR(IP2SR3_27_24, QSPI1_SPCLK),
+
+ PINMUX_IPSR_GPSR(IP2SR3_31_28, QSPI1_MISO_IO1),
+
+ /* IP3SR3 */
+ PINMUX_IPSR_GPSR(IP3SR3_3_0, QSPI1_IO2),
+
+ PINMUX_IPSR_GPSR(IP3SR3_7_4, QSPI1_SSL),
+
+ PINMUX_IPSR_GPSR(IP3SR3_11_8, QSPI1_IO3),
+
+ PINMUX_IPSR_GPSR(IP3SR3_15_12, RPC_RESET_N),
+
+ PINMUX_IPSR_GPSR(IP3SR3_19_16, RPC_WP_N),
+
+ PINMUX_IPSR_GPSR(IP3SR3_23_20, RPC_INT_N),
+
+ PINMUX_IPSR_GPSR(IP3SR3_27_24, TCLK3_A),
+
+ PINMUX_IPSR_GPSR(IP3SR3_31_28, TCLK4_A),
+
+ /* IP0SR4 */
+ PINMUX_IPSR_MSEL(IP0SR4_3_0, SCL0, SEL_SCL0_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_7_4, SDA0, SEL_SDA0_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_11_8, SCL1, SEL_SCL1_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_15_12, SDA1, SEL_SDA1_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_19_16, SCL2, SEL_SCL2_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_23_20, SDA2, SEL_SDA2_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_27_24, SCL3, SEL_SCL3_0),
+
+ PINMUX_IPSR_MSEL(IP0SR4_31_28, SDA3, SEL_SDA3_0),
+
+ /* IP1SR4 */
+ PINMUX_IPSR_GPSR(IP1SR4_3_0, HRX2),
+ PINMUX_IPSR_GPSR(IP1SR4_3_0, SCK4),
+
+ PINMUX_IPSR_GPSR(IP1SR4_7_4, HTX2),
+ PINMUX_IPSR_GPSR(IP1SR4_7_4, CTS4_N),
+
+ PINMUX_IPSR_GPSR(IP1SR4_11_8, HRTS2_N),
+ PINMUX_IPSR_GPSR(IP1SR4_11_8, RTS4_N),
+
+ PINMUX_IPSR_GPSR(IP1SR4_15_12, SCIF_CLK2),
+
+ PINMUX_IPSR_GPSR(IP1SR4_19_16, HCTS2_N),
+ PINMUX_IPSR_GPSR(IP1SR4_19_16, TX4),
+
+ PINMUX_IPSR_GPSR(IP1SR4_23_20, HSCK2),
+ PINMUX_IPSR_GPSR(IP1SR4_23_20, RX4),
+
+ PINMUX_IPSR_GPSR(IP1SR4_27_24, PWM3_A),
+
+ PINMUX_IPSR_GPSR(IP1SR4_31_28, PWM4),
+
+ /* IP2SR4 */
+ PINMUX_IPSR_GPSR(IP2SR4_23_20, PCIE0_CLKREQ_N),
+
+ PINMUX_IPSR_GPSR(IP2SR4_31_28, AVS0),
+
+ /* IP3SR4 */
+ PINMUX_IPSR_GPSR(IP3SR4_3_0, AVS1),
+
+ /* IP0SR5 */
+ PINMUX_IPSR_GPSR(IP0SR5_3_0, AVB2_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP0SR5_3_0, Ether_GPTP_PPS0),
+
+ PINMUX_IPSR_GPSR(IP0SR5_7_4, AVB2_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP0SR5_7_4, Ether_GPTP_CAPTURE),
+
+ PINMUX_IPSR_GPSR(IP0SR5_11_8, AVB2_AVTP_MATCH),
+ PINMUX_IPSR_GPSR(IP0SR5_11_8, Ether_GPTP_MATCH),
+
+ PINMUX_IPSR_GPSR(IP0SR5_15_12, AVB2_LINK),
+
+ PINMUX_IPSR_GPSR(IP0SR5_19_16, AVB2_PHY_INT),
+
+ PINMUX_IPSR_GPSR(IP0SR5_23_20, AVB2_MAGIC),
+ PINMUX_IPSR_GPSR(IP0SR5_23_20, Ether_GPTP_PPS1),
+
+ PINMUX_IPSR_GPSR(IP0SR5_27_24, AVB2_MDC),
+
+ PINMUX_IPSR_GPSR(IP0SR5_31_28, AVB2_TXCREFCLK),
+
+ /* IP1SR5 */
+ PINMUX_IPSR_GPSR(IP1SR5_3_0, AVB2_TD3),
+
+ PINMUX_IPSR_GPSR(IP1SR5_7_4, AVB2_RD3),
+
+ PINMUX_IPSR_GPSR(IP1SR5_11_8, AVB2_MDIO),
+
+ PINMUX_IPSR_GPSR(IP1SR5_15_12, AVB2_TD2),
+
+ PINMUX_IPSR_GPSR(IP1SR5_19_16, AVB2_TD1),
+
+ PINMUX_IPSR_GPSR(IP1SR5_23_20, AVB2_RD2),
+
+ PINMUX_IPSR_GPSR(IP1SR5_27_24, AVB2_RD1),
+
+ PINMUX_IPSR_GPSR(IP1SR5_31_28, AVB2_TD0),
+
+ /* IP2SR5 */
+ PINMUX_IPSR_GPSR(IP2SR5_3_0, AVB2_TXC),
+
+ PINMUX_IPSR_GPSR(IP2SR5_7_4, AVB2_RD0),
+
+ PINMUX_IPSR_GPSR(IP2SR5_11_8, AVB2_RXC),
+
+ PINMUX_IPSR_GPSR(IP2SR5_15_12, AVB2_TX_CTL),
+
+ PINMUX_IPSR_GPSR(IP2SR5_19_16, AVB2_RX_CTL),
+
+ /* IP0SR6 */
+ PINMUX_IPSR_GPSR(IP0SR6_3_0, AVB1_MDIO),
+
+ PINMUX_IPSR_GPSR(IP0SR6_7_4, AVB1_MAGIC),
+
+ PINMUX_IPSR_GPSR(IP0SR6_11_8, AVB1_MDC),
+
+ PINMUX_IPSR_GPSR(IP0SR6_15_12, AVB1_PHY_INT),
+
+ PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_LINK),
+ PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_MII_TX_ER),
+
+ PINMUX_IPSR_GPSR(IP0SR6_23_20, AVB1_AVTP_MATCH),
+ PINMUX_IPSR_GPSR(IP0SR6_23_20, AVB1_MII_RX_ER),
+
+ PINMUX_IPSR_GPSR(IP0SR6_27_24, AVB1_TXC),
+ PINMUX_IPSR_GPSR(IP0SR6_27_24, AVB1_MII_TXC),
+
+ PINMUX_IPSR_GPSR(IP0SR6_31_28, AVB1_TX_CTL),
+ PINMUX_IPSR_GPSR(IP0SR6_31_28, AVB1_MII_TX_EN),
+
+ /* IP1SR6 */
+ PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_RXC),
+ PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_RX_CTL),
+ PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_MII_RX_DV),
+
+ PINMUX_IPSR_GPSR(IP1SR6_11_8, AVB1_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP1SR6_11_8, AVB1_MII_COL),
+
+ PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_MII_CRS),
+
+ PINMUX_IPSR_GPSR(IP1SR6_19_16, AVB1_TD1),
+ PINMUX_IPSR_GPSR(IP1SR6_19_16, AVB1_MII_TD1),
+
+ PINMUX_IPSR_GPSR(IP1SR6_23_20, AVB1_TD0),
+ PINMUX_IPSR_GPSR(IP1SR6_23_20, AVB1_MII_TD0),
+
+ PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_RD1),
+ PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_RD0),
+ PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_MII_RD0),
+
+ /* IP2SR6 */
+ PINMUX_IPSR_GPSR(IP2SR6_3_0, AVB1_TD2),
+ PINMUX_IPSR_GPSR(IP2SR6_3_0, AVB1_MII_TD2),
+
+ PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_RD2),
+ PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_MII_RD2),
+
+ PINMUX_IPSR_GPSR(IP2SR6_11_8, AVB1_TD3),
+ PINMUX_IPSR_GPSR(IP2SR6_11_8, AVB1_MII_TD3),
+
+ PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_RD3),
+ PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP2SR6_19_16, AVB1_TXCREFCLK),
+
+ /* IP0SR7 */
+ PINMUX_IPSR_GPSR(IP0SR7_3_0, AVB0_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP0SR7_3_0, AVB0_MII_COL),
+
+ PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_MII_CRS),
+
+ PINMUX_IPSR_GPSR(IP0SR7_11_8, AVB0_AVTP_MATCH),
+ PINMUX_IPSR_GPSR(IP0SR7_11_8, AVB0_MII_RX_ER),
+ PINMUX_IPSR_GPSR(IP0SR7_11_8, CC5_OSCOUT),
+
+ PINMUX_IPSR_GPSR(IP0SR7_15_12, AVB0_TD3),
+ PINMUX_IPSR_GPSR(IP0SR7_15_12, AVB0_MII_TD3),
+
+ PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_LINK),
+ PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_MII_TX_ER),
+
+ PINMUX_IPSR_GPSR(IP0SR7_23_20, AVB0_PHY_INT),
+
+ PINMUX_IPSR_GPSR(IP0SR7_27_24, AVB0_TD2),
+ PINMUX_IPSR_GPSR(IP0SR7_27_24, AVB0_MII_TD2),
+
+ PINMUX_IPSR_GPSR(IP0SR7_31_28, AVB0_TD1),
+ PINMUX_IPSR_GPSR(IP0SR7_31_28, AVB0_MII_TD1),
+
+ /* IP1SR7 */
+ PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_RD3),
+ PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP1SR7_7_4, AVB0_TXCREFCLK),
+
+ PINMUX_IPSR_GPSR(IP1SR7_11_8, AVB0_MAGIC),
+
+ PINMUX_IPSR_GPSR(IP1SR7_15_12, AVB0_TD0),
+ PINMUX_IPSR_GPSR(IP1SR7_15_12, AVB0_MII_TD0),
+
+ PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_RD2),
+ PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_MII_RD2),
+
+ PINMUX_IPSR_GPSR(IP1SR7_23_20, AVB0_MDC),
+
+ PINMUX_IPSR_GPSR(IP1SR7_27_24, AVB0_MDIO),
+
+ PINMUX_IPSR_GPSR(IP1SR7_31_28, AVB0_TXC),
+ PINMUX_IPSR_GPSR(IP1SR7_31_28, AVB0_MII_TXC),
+
+ /* IP2SR7 */
+ PINMUX_IPSR_GPSR(IP2SR7_3_0, AVB0_TX_CTL),
+ PINMUX_IPSR_GPSR(IP2SR7_3_0, AVB0_MII_TX_EN),
+
+ PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_RD1),
+ PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_RD0),
+ PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_MII_RD0),
+
+ PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_RXC),
+ PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_RX_CTL),
+ PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_MII_RX_DV),
+};
+
+/*
+ * Pins not associated with a GPIO port.
+ */
+enum {
+ GP_ASSIGN_LAST(),
+ NOGP_ALL(),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+ PINMUX_NOGP_ALL(),
+};
+
+/* - AUDIO CLOCK ----------------------------------------- */
+static const unsigned int audio_clkin_pins[] = {
+ /* CLK IN */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int audio_clkin_mux[] = {
+ AUDIO_CLKIN_MARK,
+};
+static const unsigned int audio_clkout_pins[] = {
+ /* CLK OUT */
+ RCAR_GP_PIN(1, 21),
+};
+static const unsigned int audio_clkout_mux[] = {
+ AUDIO_CLKOUT_MARK,
+};
+
+/* - AVB0 ------------------------------------------------ */
+static const unsigned int avb0_link_pins[] = {
+ /* AVB0_LINK */
+ RCAR_GP_PIN(7, 4),
+};
+static const unsigned int avb0_link_mux[] = {
+ AVB0_LINK_MARK,
+};
+static const unsigned int avb0_magic_pins[] = {
+ /* AVB0_MAGIC */
+ RCAR_GP_PIN(7, 10),
+};
+static const unsigned int avb0_magic_mux[] = {
+ AVB0_MAGIC_MARK,
+};
+static const unsigned int avb0_phy_int_pins[] = {
+ /* AVB0_PHY_INT */
+ RCAR_GP_PIN(7, 5),
+};
+static const unsigned int avb0_phy_int_mux[] = {
+ AVB0_PHY_INT_MARK,
+};
+static const unsigned int avb0_mdio_pins[] = {
+ /* AVB0_MDC, AVB0_MDIO */
+ RCAR_GP_PIN(7, 13), RCAR_GP_PIN(7, 14),
+};
+static const unsigned int avb0_mdio_mux[] = {
+ AVB0_MDC_MARK, AVB0_MDIO_MARK,
+};
+static const unsigned int avb0_rgmii_pins[] = {
+ /*
+ * AVB0_TX_CTL, AVB0_TXC, AVB0_TD0, AVB0_TD1, AVB0_TD2, AVB0_TD3,
+ * AVB0_RX_CTL, AVB0_RXC, AVB0_RD0, AVB0_RD1, AVB0_RD2, AVB0_RD3,
+ */
+ RCAR_GP_PIN(7, 16), RCAR_GP_PIN(7, 15),
+ RCAR_GP_PIN(7, 11), RCAR_GP_PIN(7, 7),
+ RCAR_GP_PIN(7, 6), RCAR_GP_PIN(7, 3),
+ RCAR_GP_PIN(7, 20), RCAR_GP_PIN(7, 19),
+ RCAR_GP_PIN(7, 18), RCAR_GP_PIN(7, 17),
+ RCAR_GP_PIN(7, 12), RCAR_GP_PIN(7, 8),
+};
+static const unsigned int avb0_rgmii_mux[] = {
+ AVB0_TX_CTL_MARK, AVB0_TXC_MARK,
+ AVB0_TD0_MARK, AVB0_TD1_MARK,
+ AVB0_TD2_MARK, AVB0_TD3_MARK,
+ AVB0_RX_CTL_MARK, AVB0_RXC_MARK,
+ AVB0_RD0_MARK, AVB0_RD1_MARK,
+ AVB0_RD2_MARK, AVB0_RD3_MARK,
+};
+static const unsigned int avb0_txcrefclk_pins[] = {
+ /* AVB0_TXCREFCLK */
+ RCAR_GP_PIN(7, 9),
+};
+static const unsigned int avb0_txcrefclk_mux[] = {
+ AVB0_TXCREFCLK_MARK,
+};
+static const unsigned int avb0_avtp_pps_pins[] = {
+ /* AVB0_AVTP_PPS */
+ RCAR_GP_PIN(7, 0),
+};
+static const unsigned int avb0_avtp_pps_mux[] = {
+ AVB0_AVTP_PPS_MARK,
+};
+static const unsigned int avb0_avtp_capture_pins[] = {
+ /* AVB0_AVTP_CAPTURE */
+ RCAR_GP_PIN(7, 1),
+};
+static const unsigned int avb0_avtp_capture_mux[] = {
+ AVB0_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb0_avtp_match_pins[] = {
+ /* AVB0_AVTP_MATCH */
+ RCAR_GP_PIN(7, 2),
+};
+static const unsigned int avb0_avtp_match_mux[] = {
+ AVB0_AVTP_MATCH_MARK,
+};
+
+/* - AVB1 ------------------------------------------------ */
+static const unsigned int avb1_link_pins[] = {
+ /* AVB1_LINK */
+ RCAR_GP_PIN(6, 4),
+};
+static const unsigned int avb1_link_mux[] = {
+ AVB1_LINK_MARK,
+};
+static const unsigned int avb1_magic_pins[] = {
+ /* AVB1_MAGIC */
+ RCAR_GP_PIN(6, 1),
+};
+static const unsigned int avb1_magic_mux[] = {
+ AVB1_MAGIC_MARK,
+};
+static const unsigned int avb1_phy_int_pins[] = {
+ /* AVB1_PHY_INT */
+ RCAR_GP_PIN(6, 3),
+};
+static const unsigned int avb1_phy_int_mux[] = {
+ AVB1_PHY_INT_MARK,
+};
+static const unsigned int avb1_mdio_pins[] = {
+ /* AVB1_MDC, AVB1_MDIO */
+ RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 0),
+};
+static const unsigned int avb1_mdio_mux[] = {
+ AVB1_MDC_MARK, AVB1_MDIO_MARK,
+};
+static const unsigned int avb1_rgmii_pins[] = {
+ /*
+ * AVB1_TX_CTL, AVB1_TXC, AVB1_TD0, AVB1_TD1, AVB1_TD2, AVB1_TD3,
+ * AVB1_RX_CTL, AVB1_RXC, AVB1_RD0, AVB1_RD1, AVB1_RD2, AVB1_RD3,
+ */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+ RCAR_GP_PIN(6, 13), RCAR_GP_PIN(6, 12),
+ RCAR_GP_PIN(6, 16), RCAR_GP_PIN(6, 18),
+ RCAR_GP_PIN(6, 9), RCAR_GP_PIN(6, 8),
+ RCAR_GP_PIN(6, 15), RCAR_GP_PIN(6, 14),
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 19),
+};
+static const unsigned int avb1_rgmii_mux[] = {
+ AVB1_TX_CTL_MARK, AVB1_TXC_MARK,
+ AVB1_TD0_MARK, AVB1_TD1_MARK,
+ AVB1_TD2_MARK, AVB1_TD3_MARK,
+ AVB1_RX_CTL_MARK, AVB1_RXC_MARK,
+ AVB1_RD0_MARK, AVB1_RD1_MARK,
+ AVB1_RD2_MARK, AVB1_RD3_MARK,
+};
+static const unsigned int avb1_txcrefclk_pins[] = {
+ /* AVB1_TXCREFCLK */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int avb1_txcrefclk_mux[] = {
+ AVB1_TXCREFCLK_MARK,
+};
+static const unsigned int avb1_avtp_pps_pins[] = {
+ /* AVB1_AVTP_PPS */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int avb1_avtp_pps_mux[] = {
+ AVB1_AVTP_PPS_MARK,
+};
+static const unsigned int avb1_avtp_capture_pins[] = {
+ /* AVB1_AVTP_CAPTURE */
+ RCAR_GP_PIN(6, 11),
+};
+static const unsigned int avb1_avtp_capture_mux[] = {
+ AVB1_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb1_avtp_match_pins[] = {
+ /* AVB1_AVTP_MATCH */
+ RCAR_GP_PIN(6, 5),
+};
+static const unsigned int avb1_avtp_match_mux[] = {
+ AVB1_AVTP_MATCH_MARK,
+};
+
+/* - AVB2 ------------------------------------------------ */
+static const unsigned int avb2_link_pins[] = {
+ /* AVB2_LINK */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int avb2_link_mux[] = {
+ AVB2_LINK_MARK,
+};
+static const unsigned int avb2_magic_pins[] = {
+ /* AVB2_MAGIC */
+ RCAR_GP_PIN(5, 5),
+};
+static const unsigned int avb2_magic_mux[] = {
+ AVB2_MAGIC_MARK,
+};
+static const unsigned int avb2_phy_int_pins[] = {
+ /* AVB2_PHY_INT */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int avb2_phy_int_mux[] = {
+ AVB2_PHY_INT_MARK,
+};
+static const unsigned int avb2_mdio_pins[] = {
+ /* AVB2_MDC, AVB2_MDIO */
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int avb2_mdio_mux[] = {
+ AVB2_MDC_MARK, AVB2_MDIO_MARK,
+};
+static const unsigned int avb2_rgmii_pins[] = {
+ /*
+ * AVB2_TX_CTL, AVB2_TXC, AVB2_TD0, AVB2_TD1, AVB2_TD2, AVB2_TD3,
+ * AVB2_RX_CTL, AVB2_RXC, AVB2_RD0, AVB2_RD1, AVB2_RD2, AVB2_RD3,
+ */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 16),
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 12),
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 8),
+ RCAR_GP_PIN(5, 20), RCAR_GP_PIN(5, 18),
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 14),
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 9),
+};
+static const unsigned int avb2_rgmii_mux[] = {
+ AVB2_TX_CTL_MARK, AVB2_TXC_MARK,
+ AVB2_TD0_MARK, AVB2_TD1_MARK,
+ AVB2_TD2_MARK, AVB2_TD3_MARK,
+ AVB2_RX_CTL_MARK, AVB2_RXC_MARK,
+ AVB2_RD0_MARK, AVB2_RD1_MARK,
+ AVB2_RD2_MARK, AVB2_RD3_MARK,
+};
+static const unsigned int avb2_txcrefclk_pins[] = {
+ /* AVB2_TXCREFCLK */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int avb2_txcrefclk_mux[] = {
+ AVB2_TXCREFCLK_MARK,
+};
+static const unsigned int avb2_avtp_pps_pins[] = {
+ /* AVB2_AVTP_PPS */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int avb2_avtp_pps_mux[] = {
+ AVB2_AVTP_PPS_MARK,
+};
+static const unsigned int avb2_avtp_capture_pins[] = {
+ /* AVB2_AVTP_CAPTURE */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int avb2_avtp_capture_mux[] = {
+ AVB2_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb2_avtp_match_pins[] = {
+ /* AVB2_AVTP_MATCH */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int avb2_avtp_match_mux[] = {
+ AVB2_AVTP_MATCH_MARK,
+};
+
+/* - CANFD0 ----------------------------------------------------------------- */
+static const unsigned int canfd0_data_pins[] = {
+ /* CANFD0_TX, CANFD0_RX */
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
+};
+static const unsigned int canfd0_data_mux[] = {
+ CANFD0_TX_MARK, CANFD0_RX_MARK,
+};
+
+/* - CANFD1 ----------------------------------------------------------------- */
+static const unsigned int canfd1_data_pins[] = {
+ /* CANFD1_TX, CANFD1_RX */
+ RCAR_GP_PIN(2, 17), RCAR_GP_PIN(2, 19),
+};
+static const unsigned int canfd1_data_mux[] = {
+ CANFD1_TX_MARK, CANFD1_RX_MARK,
+};
+
+/* - CANFD2 ----------------------------------------------------------------- */
+static const unsigned int canfd2_data_pins[] = {
+ /* CANFD2_TX, CANFD2_RX */
+ RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13),
+};
+static const unsigned int canfd2_data_mux[] = {
+ CANFD2_TX_MARK, CANFD2_RX_MARK,
+};
+
+/* - CANFD3 ----------------------------------------------------------------- */
+static const unsigned int canfd3_data_pins[] = {
+ /* CANFD3_TX, CANFD3_RX */
+ RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15),
+};
+static const unsigned int canfd3_data_mux[] = {
+ CANFD3_TX_MARK, CANFD3_RX_MARK,
+};
+
+/* - CANFD Clock ------------------------------------------------------------ */
+static const unsigned int can_clk_pins[] = {
+ /* CAN_CLK */
+ RCAR_GP_PIN(2, 9),
+};
+static const unsigned int can_clk_mux[] = {
+ CAN_CLK_MARK,
+};
+
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* HRX0, HTX0 */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12),
+};
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+ /* HSCK0 */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* HRTS0_N, HCTS0_N */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+
+/* - HSCIF1_A ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_a_pins[] = {
+ /* HRX1_A, HTX1_A */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int hscif1_data_a_mux[] = {
+ HRX1_A_MARK, HTX1_A_MARK,
+};
+static const unsigned int hscif1_clk_a_pins[] = {
+ /* HSCK1_A */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int hscif1_clk_a_mux[] = {
+ HSCK1_A_MARK,
+};
+static const unsigned int hscif1_ctrl_a_pins[] = {
+ /* HRTS1_N_A, HCTS1_N_A */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+};
+static const unsigned int hscif1_ctrl_a_mux[] = {
+ HRTS1_N_A_MARK, HCTS1_N_A_MARK,
+};
+
+/* - HSCIF1_B ---------------------------------------------------------------- */
+static const unsigned int hscif1_data_b_pins[] = {
+ /* HRX1_B, HTX1_B */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int hscif1_data_b_mux[] = {
+ HRX1_B_MARK, HTX1_B_MARK,
+};
+static const unsigned int hscif1_clk_b_pins[] = {
+ /* HSCK1_B */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int hscif1_clk_b_mux[] = {
+ HSCK1_B_MARK,
+};
+static const unsigned int hscif1_ctrl_b_pins[] = {
+ /* HRTS1_N_B, HCTS1_N_B */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int hscif1_ctrl_b_mux[] = {
+ HRTS1_N_B_MARK, HCTS1_N_B_MARK,
+};
+
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_pins[] = {
+ /* HRX2, HTX2 */
+ RCAR_GP_PIN(4, 8), RCAR_GP_PIN(4, 9),
+};
+static const unsigned int hscif2_data_mux[] = {
+ HRX2_MARK, HTX2_MARK,
+};
+static const unsigned int hscif2_clk_pins[] = {
+ /* HSCK2 */
+ RCAR_GP_PIN(4, 13),
+};
+static const unsigned int hscif2_clk_mux[] = {
+ HSCK2_MARK,
+};
+static const unsigned int hscif2_ctrl_pins[] = {
+ /* HRTS2_N, HCTS2_N */
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int hscif2_ctrl_mux[] = {
+ HRTS2_N_MARK, HCTS2_N_MARK,
+};
+
+/* - HSCIF3_A ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+ /* HRX3_A, HTX3_A */
+ RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 28),
+};
+static const unsigned int hscif3_data_a_mux[] = {
+ HRX3_A_MARK, HTX3_A_MARK,
+};
+static const unsigned int hscif3_clk_a_pins[] = {
+ /* HSCK3_A */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int hscif3_clk_a_mux[] = {
+ HSCK3_A_MARK,
+};
+static const unsigned int hscif3_ctrl_a_pins[] = {
+ /* HRTS3_N_A, HCTS3_N_A */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 27),
+};
+static const unsigned int hscif3_ctrl_a_mux[] = {
+ HRTS3_N_A_MARK, HCTS3_N_A_MARK,
+};
+
+/* - HSCIF3_B ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_b_pins[] = {
+ /* HRX3_B, HTX3_B */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int hscif3_data_b_mux[] = {
+ HRX3_B_MARK, HTX3_B_MARK,
+};
+static const unsigned int hscif3_clk_b_pins[] = {
+ /* HSCK3_B */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int hscif3_clk_b_mux[] = {
+ HSCK3_B_MARK,
+};
+static const unsigned int hscif3_ctrl_b_pins[] = {
+ /* HRTS3_N_B, HCTS3_N_B */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1),
+};
+static const unsigned int hscif3_ctrl_b_mux[] = {
+ HRTS3_N_B_MARK, HCTS3_N_B_MARK,
+};
+
+/* - I2C0 ------------------------------------------------------------------- */
+static const unsigned int i2c0_pins[] = {
+ /* SDA0, SCL0 */
+ RCAR_GP_PIN(4, 1), RCAR_GP_PIN(4, 0),
+};
+static const unsigned int i2c0_mux[] = {
+ SDA0_MARK, SCL0_MARK,
+};
+
+/* - I2C1 ------------------------------------------------------------------- */
+static const unsigned int i2c1_pins[] = {
+ /* SDA1, SCL1 */
+ RCAR_GP_PIN(4, 3), RCAR_GP_PIN(4, 2),
+};
+static const unsigned int i2c1_mux[] = {
+ SDA1_MARK, SCL1_MARK,
+};
+
+/* - I2C2 ------------------------------------------------------------------- */
+static const unsigned int i2c2_pins[] = {
+ /* SDA2, SCL2 */
+ RCAR_GP_PIN(4, 5), RCAR_GP_PIN(4, 4),
+};
+static const unsigned int i2c2_mux[] = {
+ SDA2_MARK, SCL2_MARK,
+};
+
+/* - I2C3 ------------------------------------------------------------------- */
+static const unsigned int i2c3_pins[] = {
+ /* SDA3, SCL3 */
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 6),
+};
+static const unsigned int i2c3_mux[] = {
+ SDA3_MARK, SCL3_MARK,
+};
+
+/* - MMC -------------------------------------------------------------------- */
+static const unsigned int mmc_data_pins[] = {
+ /* MMC_SD_D[0:3], MMC_D[4:7] */
+ RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 0),
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 5),
+ RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 6),
+ RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 8),
+};
+static const unsigned int mmc_data_mux[] = {
+ MMC_SD_D0_MARK, MMC_SD_D1_MARK,
+ MMC_SD_D2_MARK, MMC_SD_D3_MARK,
+ MMC_D4_MARK, MMC_D5_MARK,
+ MMC_D6_MARK, MMC_D7_MARK,
+};
+static const unsigned int mmc_ctrl_pins[] = {
+ /* MMC_SD_CLK, MMC_SD_CMD */
+ RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 10),
+};
+static const unsigned int mmc_ctrl_mux[] = {
+ MMC_SD_CLK_MARK, MMC_SD_CMD_MARK,
+};
+static const unsigned int mmc_cd_pins[] = {
+ /* SD_CD */
+ RCAR_GP_PIN(3, 11),
+};
+static const unsigned int mmc_cd_mux[] = {
+ SD_CD_MARK,
+};
+static const unsigned int mmc_wp_pins[] = {
+ /* SD_WP */
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int mmc_wp_mux[] = {
+ SD_WP_MARK,
+};
+static const unsigned int mmc_ds_pins[] = {
+ /* MMC_DS */
+ RCAR_GP_PIN(3, 4),
+};
+static const unsigned int mmc_ds_mux[] = {
+ MMC_DS_MARK,
+};
+
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* MSIOF0_SCK */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+ /* MSIOF0_SYNC */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+ /* MSIOF0_SS1 */
+ RCAR_GP_PIN(1, 7),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+ /* MSIOF0_SS2 */
+ RCAR_GP_PIN(1, 6),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_txd_pins[] = {
+ /* MSIOF0_TXD */
+ RCAR_GP_PIN(1, 9),
+};
+static const unsigned int msiof0_txd_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+static const unsigned int msiof0_rxd_pins[] = {
+ /* MSIOF0_RXD */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int msiof0_rxd_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_pins[] = {
+ /* MSIOF1_SCK */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int msiof1_clk_mux[] = {
+ MSIOF1_SCK_MARK,
+};
+static const unsigned int msiof1_sync_pins[] = {
+ /* MSIOF1_SYNC */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int msiof1_sync_mux[] = {
+ MSIOF1_SYNC_MARK,
+};
+static const unsigned int msiof1_ss1_pins[] = {
+ /* MSIOF1_SS1 */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int msiof1_ss1_mux[] = {
+ MSIOF1_SS1_MARK,
+};
+static const unsigned int msiof1_ss2_pins[] = {
+ /* MSIOF1_SS2 */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int msiof1_ss2_mux[] = {
+ MSIOF1_SS2_MARK,
+};
+static const unsigned int msiof1_txd_pins[] = {
+ /* MSIOF1_TXD */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int msiof1_txd_mux[] = {
+ MSIOF1_TXD_MARK,
+};
+static const unsigned int msiof1_rxd_pins[] = {
+ /* MSIOF1_RXD */
+ RCAR_GP_PIN(1, 5),
+};
+static const unsigned int msiof1_rxd_mux[] = {
+ MSIOF1_RXD_MARK,
+};
+
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_pins[] = {
+ /* MSIOF2_SCK */
+ RCAR_GP_PIN(0, 17),
+};
+static const unsigned int msiof2_clk_mux[] = {
+ MSIOF2_SCK_MARK,
+};
+static const unsigned int msiof2_sync_pins[] = {
+ /* MSIOF2_SYNC */
+ RCAR_GP_PIN(0, 15),
+};
+static const unsigned int msiof2_sync_mux[] = {
+ MSIOF2_SYNC_MARK,
+};
+static const unsigned int msiof2_ss1_pins[] = {
+ /* MSIOF2_SS1 */
+ RCAR_GP_PIN(0, 14),
+};
+static const unsigned int msiof2_ss1_mux[] = {
+ MSIOF2_SS1_MARK,
+};
+static const unsigned int msiof2_ss2_pins[] = {
+ /* MSIOF2_SS2 */
+ RCAR_GP_PIN(0, 13),
+};
+static const unsigned int msiof2_ss2_mux[] = {
+ MSIOF2_SS2_MARK,
+};
+static const unsigned int msiof2_txd_pins[] = {
+ /* MSIOF2_TXD */
+ RCAR_GP_PIN(0, 16),
+};
+static const unsigned int msiof2_txd_mux[] = {
+ MSIOF2_TXD_MARK,
+};
+static const unsigned int msiof2_rxd_pins[] = {
+ /* MSIOF2_RXD */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int msiof2_rxd_mux[] = {
+ MSIOF2_RXD_MARK,
+};
+
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_pins[] = {
+ /* MSIOF3_SCK */
+ RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_clk_mux[] = {
+ MSIOF3_SCK_MARK,
+};
+static const unsigned int msiof3_sync_pins[] = {
+ /* MSIOF3_SYNC */
+ RCAR_GP_PIN(0, 6),
+};
+static const unsigned int msiof3_sync_mux[] = {
+ MSIOF3_SYNC_MARK,
+};
+static const unsigned int msiof3_ss1_pins[] = {
+ /* MSIOF3_SS1 */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_ss1_mux[] = {
+ MSIOF3_SS1_MARK,
+};
+static const unsigned int msiof3_ss2_pins[] = {
+ /* MSIOF3_SS2 */
+ RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_ss2_mux[] = {
+ MSIOF3_SS2_MARK,
+};
+static const unsigned int msiof3_txd_pins[] = {
+ /* MSIOF3_TXD */
+ RCAR_GP_PIN(0, 4),
+};
+static const unsigned int msiof3_txd_mux[] = {
+ MSIOF3_TXD_MARK,
+};
+static const unsigned int msiof3_rxd_pins[] = {
+ /* MSIOF3_RXD */
+ RCAR_GP_PIN(0, 5),
+};
+static const unsigned int msiof3_rxd_mux[] = {
+ MSIOF3_RXD_MARK,
+};
+
+/* - MSIOF4 ----------------------------------------------------------------- */
+static const unsigned int msiof4_clk_pins[] = {
+ /* MSIOF4_SCK */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int msiof4_clk_mux[] = {
+ MSIOF4_SCK_MARK,
+};
+static const unsigned int msiof4_sync_pins[] = {
+ /* MSIOF4_SYNC */
+ RCAR_GP_PIN(1, 28),
+};
+static const unsigned int msiof4_sync_mux[] = {
+ MSIOF4_SYNC_MARK,
+};
+static const unsigned int msiof4_ss1_pins[] = {
+ /* MSIOF4_SS1 */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof4_ss1_mux[] = {
+ MSIOF4_SS1_MARK,
+};
+static const unsigned int msiof4_ss2_pins[] = {
+ /* MSIOF4_SS2 */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int msiof4_ss2_mux[] = {
+ MSIOF4_SS2_MARK,
+};
+static const unsigned int msiof4_txd_pins[] = {
+ /* MSIOF4_TXD */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int msiof4_txd_mux[] = {
+ MSIOF4_TXD_MARK,
+};
+static const unsigned int msiof4_rxd_pins[] = {
+ /* MSIOF4_RXD */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int msiof4_rxd_mux[] = {
+ MSIOF4_RXD_MARK,
+};
+
+/* - MSIOF5 ----------------------------------------------------------------- */
+static const unsigned int msiof5_clk_pins[] = {
+ /* MSIOF5_SCK */
+ RCAR_GP_PIN(0, 11),
+};
+static const unsigned int msiof5_clk_mux[] = {
+ MSIOF5_SCK_MARK,
+};
+static const unsigned int msiof5_sync_pins[] = {
+ /* MSIOF5_SYNC */
+ RCAR_GP_PIN(0, 9),
+};
+static const unsigned int msiof5_sync_mux[] = {
+ MSIOF5_SYNC_MARK,
+};
+static const unsigned int msiof5_ss1_pins[] = {
+ /* MSIOF5_SS1 */
+ RCAR_GP_PIN(0, 8),
+};
+static const unsigned int msiof5_ss1_mux[] = {
+ MSIOF5_SS1_MARK,
+};
+static const unsigned int msiof5_ss2_pins[] = {
+ /* MSIOF5_SS2 */
+ RCAR_GP_PIN(0, 7),
+};
+static const unsigned int msiof5_ss2_mux[] = {
+ MSIOF5_SS2_MARK,
+};
+static const unsigned int msiof5_txd_pins[] = {
+ /* MSIOF5_TXD */
+ RCAR_GP_PIN(0, 10),
+};
+static const unsigned int msiof5_txd_mux[] = {
+ MSIOF5_TXD_MARK,
+};
+static const unsigned int msiof5_rxd_pins[] = {
+ /* MSIOF5_RXD */
+ RCAR_GP_PIN(0, 12),
+};
+static const unsigned int msiof5_rxd_mux[] = {
+ MSIOF5_RXD_MARK,
+};
+
+/* - PCIE ------------------------------------------------------------------- */
+static const unsigned int pcie0_clkreq_n_pins[] = {
+ /* PCIE0_CLKREQ_N */
+ RCAR_GP_PIN(4, 21),
+};
+
+static const unsigned int pcie0_clkreq_n_mux[] = {
+ PCIE0_CLKREQ_N_MARK,
+};
+
+/* - PWM0_A ------------------------------------------------------------------- */
+static const unsigned int pwm0_a_pins[] = {
+ /* PWM0_A */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int pwm0_a_mux[] = {
+ PWM0_A_MARK,
+};
+
+/* - PWM0_B ------------------------------------------------------------------- */
+static const unsigned int pwm0_b_pins[] = {
+ /* PWM0_B */
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int pwm0_b_mux[] = {
+ PWM0_B_MARK,
+};
+
+/* - PWM1_A ------------------------------------------------------------------- */
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM1_A */
+ RCAR_GP_PIN(3, 13),
+};
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+
+/* - PWM1_B ------------------------------------------------------------------- */
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM1_B */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+
+/* - PWM1_C ------------------------------------------------------------------- */
+static const unsigned int pwm1_c_pins[] = {
+ /* PWM1_C */
+ RCAR_GP_PIN(2, 17),
+};
+static const unsigned int pwm1_c_mux[] = {
+ PWM1_C_MARK,
+};
+
+/* - PWM2_A ------------------------------------------------------------------- */
+static const unsigned int pwm2_a_pins[] = {
+ /* PWM2_A */
+ RCAR_GP_PIN(3, 14),
+};
+static const unsigned int pwm2_a_mux[] = {
+ PWM2_A_MARK,
+};
+
+/* - PWM2_B ------------------------------------------------------------------- */
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM2_B */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+
+/* - PWM2_C ------------------------------------------------------------------- */
+static const unsigned int pwm2_c_pins[] = {
+ /* PWM2_C */
+ RCAR_GP_PIN(2, 19),
+};
+static const unsigned int pwm2_c_mux[] = {
+ PWM2_C_MARK,
+};
+
+/* - PWM3_A ------------------------------------------------------------------- */
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM3_A */
+ RCAR_GP_PIN(4, 14),
+};
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+
+/* - PWM3_B ------------------------------------------------------------------- */
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM3_B */
+ RCAR_GP_PIN(2, 15),
+};
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+
+/* - PWM3_C ------------------------------------------------------------------- */
+static const unsigned int pwm3_c_pins[] = {
+ /* PWM3_C */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int pwm3_c_mux[] = {
+ PWM3_C_MARK,
+};
+
+/* - PWM4 ------------------------------------------------------------------- */
+static const unsigned int pwm4_pins[] = {
+ /* PWM4 */
+ RCAR_GP_PIN(4, 15),
+};
+static const unsigned int pwm4_mux[] = {
+ PWM4_MARK,
+};
+
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 15),
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 18),
+ RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 16),
+};
+static const unsigned int qspi0_data_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK
+};
+
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 25),
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+ QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int qspi1_data_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(3, 21), RCAR_GP_PIN(3, 23),
+ RCAR_GP_PIN(3, 24), RCAR_GP_PIN(3, 26),
+};
+static const unsigned int qspi1_data_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+ QSPI1_IO2_MARK, QSPI1_IO3_MARK
+};
+
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+ /* RX0, TX0 */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12),
+};
+static const unsigned int scif0_data_mux[] = {
+ RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+ /* SCK0 */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int scif0_clk_mux[] = {
+ SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+ /* RTS0_N, CTS0_N */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int scif0_ctrl_mux[] = {
+ RTS0_N_MARK, CTS0_N_MARK,
+};
+
+/* - SCIF1_A ------------------------------------------------------------------ */
+static const unsigned int scif1_data_a_pins[] = {
+ /* RX1_A, TX1_A */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int scif1_data_a_mux[] = {
+ RX1_A_MARK, TX1_A_MARK,
+};
+static const unsigned int scif1_clk_a_pins[] = {
+ /* SCK1_A */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int scif1_clk_a_mux[] = {
+ SCK1_A_MARK,
+};
+static const unsigned int scif1_ctrl_a_pins[] = {
+ /* RTS1_N_A, CTS1_N_A */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+};
+static const unsigned int scif1_ctrl_a_mux[] = {
+ RTS1_N_A_MARK, CTS1_N_A_MARK,
+};
+
+/* - SCIF1_B ------------------------------------------------------------------ */
+static const unsigned int scif1_data_b_pins[] = {
+ /* RX1_B, TX1_B */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int scif1_data_b_mux[] = {
+ RX1_B_MARK, TX1_B_MARK,
+};
+static const unsigned int scif1_clk_b_pins[] = {
+ /* SCK1_B */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int scif1_clk_b_mux[] = {
+ SCK1_B_MARK,
+};
+static const unsigned int scif1_ctrl_b_pins[] = {
+ /* RTS1_N_B, CTS1_N_B */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int scif1_ctrl_b_mux[] = {
+ RTS1_N_B_MARK, CTS1_N_B_MARK,
+};
+
+/* - SCIF3_A ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+ /* RX3_A, TX3_A */
+ RCAR_GP_PIN(1, 27), RCAR_GP_PIN(1, 28),
+};
+static const unsigned int scif3_data_a_mux[] = {
+ RX3_A_MARK, TX3_A_MARK,
+};
+static const unsigned int scif3_clk_a_pins[] = {
+ /* SCK3_A */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int scif3_clk_a_mux[] = {
+ SCK3_A_MARK,
+};
+static const unsigned int scif3_ctrl_a_pins[] = {
+ /* RTS3_N_A, CTS3_N_A */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int scif3_ctrl_a_mux[] = {
+ RTS3_N_A_MARK, CTS3_N_A_MARK,
+};
+
+/* - SCIF3_B ------------------------------------------------------------------ */
+static const unsigned int scif3_data_b_pins[] = {
+ /* RX3_B, TX3_B */
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int scif3_data_b_mux[] = {
+ RX3_B_MARK, TX3_B_MARK,
+};
+static const unsigned int scif3_clk_b_pins[] = {
+ /* SCK3_B */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int scif3_clk_b_mux[] = {
+ SCK3_B_MARK,
+};
+static const unsigned int scif3_ctrl_b_pins[] = {
+ /* RTS3_N_B, CTS3_N_B */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+};
+static const unsigned int scif3_ctrl_b_mux[] = {
+ RTS3_N_B_MARK, CTS3_N_B_MARK,
+};
+
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_pins[] = {
+ /* RX4, TX4 */
+ RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int scif4_data_mux[] = {
+ RX4_MARK, TX4_MARK,
+};
+static const unsigned int scif4_clk_pins[] = {
+ /* SCK4 */
+ RCAR_GP_PIN(4, 8),
+};
+static const unsigned int scif4_clk_mux[] = {
+ SCK4_MARK,
+};
+static const unsigned int scif4_ctrl_pins[] = {
+ /* RTS4_N, CTS4_N */
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 9),
+};
+static const unsigned int scif4_ctrl_mux[] = {
+ RTS4_N_MARK, CTS4_N_MARK,
+};
+
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(1, 17),
+};
+static const unsigned int scif_clk_mux[] = {
+ SCIF_CLK_MARK,
+};
+
+static const unsigned int scif_clk2_pins[] = {
+ /* SCIF_CLK2 */
+ RCAR_GP_PIN(4, 11),
+};
+static const unsigned int scif_clk2_mux[] = {
+ SCIF_CLK2_MARK,
+};
+
+/* - SSI ------------------------------------------------- */
+static const unsigned int ssi_data_pins[] = {
+ /* SSI_SD */
+ RCAR_GP_PIN(1, 20),
+};
+static const unsigned int ssi_data_mux[] = {
+ SSI_SD_MARK,
+};
+static const unsigned int ssi_ctrl_pins[] = {
+ /* SSI_SCK, SSI_WS */
+ RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 19),
+};
+static const unsigned int ssi_ctrl_mux[] = {
+ SSI_SCK_MARK, SSI_WS_MARK,
+};
+
+/* - TPU_A ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_a_pins[] = {
+ /* TPU0TO0_A */
+ RCAR_GP_PIN(2, 8),
+};
+static const unsigned int tpu_to0_a_mux[] = {
+ TPU0TO0_A_MARK,
+};
+static const unsigned int tpu_to1_a_pins[] = {
+ /* TPU0TO1_A */
+ RCAR_GP_PIN(2, 7),
+};
+static const unsigned int tpu_to1_a_mux[] = {
+ TPU0TO1_A_MARK,
+};
+static const unsigned int tpu_to2_a_pins[] = {
+ /* TPU0TO2_A */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int tpu_to2_a_mux[] = {
+ TPU0TO2_A_MARK,
+};
+static const unsigned int tpu_to3_a_pins[] = {
+ /* TPU0TO3_A */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int tpu_to3_a_mux[] = {
+ TPU0TO3_A_MARK,
+};
+
+/* - TPU_B ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_b_pins[] = {
+ /* TPU0TO0_B */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int tpu_to0_b_mux[] = {
+ TPU0TO0_B_MARK,
+};
+static const unsigned int tpu_to1_b_pins[] = {
+ /* TPU0TO1_B */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int tpu_to1_b_mux[] = {
+ TPU0TO1_B_MARK,
+};
+static const unsigned int tpu_to2_b_pins[] = {
+ /* TPU0TO2_B */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int tpu_to2_b_mux[] = {
+ TPU0TO2_B_MARK,
+};
+static const unsigned int tpu_to3_b_pins[] = {
+ /* TPU0TO3_B */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int tpu_to3_b_mux[] = {
+ TPU0TO3_B_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(audio_clkin),
+ SH_PFC_PIN_GROUP(audio_clkout),
+
+ SH_PFC_PIN_GROUP(avb0_link),
+ SH_PFC_PIN_GROUP(avb0_magic),
+ SH_PFC_PIN_GROUP(avb0_phy_int),
+ SH_PFC_PIN_GROUP(avb0_mdio),
+ SH_PFC_PIN_GROUP(avb0_rgmii),
+ SH_PFC_PIN_GROUP(avb0_txcrefclk),
+ SH_PFC_PIN_GROUP(avb0_avtp_pps),
+ SH_PFC_PIN_GROUP(avb0_avtp_capture),
+ SH_PFC_PIN_GROUP(avb0_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb1_link),
+ SH_PFC_PIN_GROUP(avb1_magic),
+ SH_PFC_PIN_GROUP(avb1_phy_int),
+ SH_PFC_PIN_GROUP(avb1_mdio),
+ SH_PFC_PIN_GROUP(avb1_rgmii),
+ SH_PFC_PIN_GROUP(avb1_txcrefclk),
+ SH_PFC_PIN_GROUP(avb1_avtp_pps),
+ SH_PFC_PIN_GROUP(avb1_avtp_capture),
+ SH_PFC_PIN_GROUP(avb1_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb2_link),
+ SH_PFC_PIN_GROUP(avb2_magic),
+ SH_PFC_PIN_GROUP(avb2_phy_int),
+ SH_PFC_PIN_GROUP(avb2_mdio),
+ SH_PFC_PIN_GROUP(avb2_rgmii),
+ SH_PFC_PIN_GROUP(avb2_txcrefclk),
+ SH_PFC_PIN_GROUP(avb2_avtp_pps),
+ SH_PFC_PIN_GROUP(avb2_avtp_capture),
+ SH_PFC_PIN_GROUP(avb2_avtp_match),
+
+ SH_PFC_PIN_GROUP(canfd0_data),
+ SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(canfd2_data),
+ SH_PFC_PIN_GROUP(canfd3_data),
+ SH_PFC_PIN_GROUP(can_clk),
+
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data),
+ SH_PFC_PIN_GROUP(hscif2_clk),
+ SH_PFC_PIN_GROUP(hscif2_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk_a),
+ SH_PFC_PIN_GROUP(hscif3_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_clk_b),
+ SH_PFC_PIN_GROUP(hscif3_ctrl_b),
+
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1),
+ SH_PFC_PIN_GROUP(i2c2),
+ SH_PFC_PIN_GROUP(i2c3),
+
+ BUS_DATA_PIN_GROUP(mmc_data, 1),
+ BUS_DATA_PIN_GROUP(mmc_data, 4),
+ BUS_DATA_PIN_GROUP(mmc_data, 8),
+ SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(mmc_cd),
+ SH_PFC_PIN_GROUP(mmc_wp),
+ SH_PFC_PIN_GROUP(mmc_ds),
+
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_txd),
+ SH_PFC_PIN_GROUP(msiof1_rxd),
+
+ SH_PFC_PIN_GROUP(msiof2_clk),
+ SH_PFC_PIN_GROUP(msiof2_sync),
+ SH_PFC_PIN_GROUP(msiof2_ss1),
+ SH_PFC_PIN_GROUP(msiof2_ss2),
+ SH_PFC_PIN_GROUP(msiof2_txd),
+ SH_PFC_PIN_GROUP(msiof2_rxd),
+
+ SH_PFC_PIN_GROUP(msiof3_clk),
+ SH_PFC_PIN_GROUP(msiof3_sync),
+ SH_PFC_PIN_GROUP(msiof3_ss1),
+ SH_PFC_PIN_GROUP(msiof3_ss2),
+ SH_PFC_PIN_GROUP(msiof3_txd),
+ SH_PFC_PIN_GROUP(msiof3_rxd),
+
+ SH_PFC_PIN_GROUP(msiof4_clk),
+ SH_PFC_PIN_GROUP(msiof4_sync),
+ SH_PFC_PIN_GROUP(msiof4_ss1),
+ SH_PFC_PIN_GROUP(msiof4_ss2),
+ SH_PFC_PIN_GROUP(msiof4_txd),
+ SH_PFC_PIN_GROUP(msiof4_rxd),
+
+ SH_PFC_PIN_GROUP(msiof5_clk),
+ SH_PFC_PIN_GROUP(msiof5_sync),
+ SH_PFC_PIN_GROUP(msiof5_ss1),
+ SH_PFC_PIN_GROUP(msiof5_ss2),
+ SH_PFC_PIN_GROUP(msiof5_txd),
+ SH_PFC_PIN_GROUP(msiof5_rxd),
+
+ SH_PFC_PIN_GROUP(pcie0_clkreq_n),
+
+ SH_PFC_PIN_GROUP(pwm0_a),
+ SH_PFC_PIN_GROUP(pwm0_b),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm1_c),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm2_c),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm3_c),
+ SH_PFC_PIN_GROUP(pwm4),
+
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ BUS_DATA_PIN_GROUP(qspi0_data, 2),
+ BUS_DATA_PIN_GROUP(qspi0_data, 4),
+ SH_PFC_PIN_GROUP(qspi1_ctrl),
+ BUS_DATA_PIN_GROUP(qspi1_data, 2),
+ BUS_DATA_PIN_GROUP(qspi1_data, 4),
+
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_a),
+ SH_PFC_PIN_GROUP(scif1_clk_a),
+ SH_PFC_PIN_GROUP(scif1_ctrl_a),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif1_clk_b),
+ SH_PFC_PIN_GROUP(scif1_ctrl_b),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk_a),
+ SH_PFC_PIN_GROUP(scif3_ctrl_a),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif3_clk_b),
+ SH_PFC_PIN_GROUP(scif3_ctrl_b),
+ SH_PFC_PIN_GROUP(scif4_data),
+ SH_PFC_PIN_GROUP(scif4_clk),
+ SH_PFC_PIN_GROUP(scif4_ctrl),
+ SH_PFC_PIN_GROUP(scif_clk),
+ SH_PFC_PIN_GROUP(scif_clk2),
+
+ SH_PFC_PIN_GROUP(ssi_data),
+ SH_PFC_PIN_GROUP(ssi_ctrl),
+
+ SH_PFC_PIN_GROUP(tpu_to0_a),
+ SH_PFC_PIN_GROUP(tpu_to0_b),
+ SH_PFC_PIN_GROUP(tpu_to1_a),
+ SH_PFC_PIN_GROUP(tpu_to1_b),
+ SH_PFC_PIN_GROUP(tpu_to2_a),
+ SH_PFC_PIN_GROUP(tpu_to2_b),
+ SH_PFC_PIN_GROUP(tpu_to3_a),
+ SH_PFC_PIN_GROUP(tpu_to3_b),
+};
+
+static const char * const audio_clk_groups[] = {
+ "audio_clkin",
+ "audio_clkout",
+};
+
+static const char * const avb0_groups[] = {
+ "avb0_link",
+ "avb0_magic",
+ "avb0_phy_int",
+ "avb0_mdio",
+ "avb0_rgmii",
+ "avb0_txcrefclk",
+ "avb0_avtp_pps",
+ "avb0_avtp_capture",
+ "avb0_avtp_match",
+};
+
+static const char * const avb1_groups[] = {
+ "avb1_link",
+ "avb1_magic",
+ "avb1_phy_int",
+ "avb1_mdio",
+ "avb1_rgmii",
+ "avb1_txcrefclk",
+ "avb1_avtp_pps",
+ "avb1_avtp_capture",
+ "avb1_avtp_match",
+};
+
+static const char * const avb2_groups[] = {
+ "avb2_link",
+ "avb2_magic",
+ "avb2_phy_int",
+ "avb2_mdio",
+ "avb2_rgmii",
+ "avb2_txcrefclk",
+ "avb2_avtp_pps",
+ "avb2_avtp_capture",
+ "avb2_avtp_match",
+};
+
+static const char * const canfd0_groups[] = {
+ "canfd0_data",
+};
+
+static const char * const canfd1_groups[] = {
+ "canfd1_data",
+};
+
+static const char * const canfd2_groups[] = {
+ "canfd2_data",
+};
+
+static const char * const canfd3_groups[] = {
+ "canfd3_data",
+};
+
+static const char * const can_clk_groups[] = {
+ "can_clk",
+};
+
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+ "hscif1_data_a",
+ "hscif1_clk_a",
+ "hscif1_ctrl_a",
+ "hscif1_data_b",
+ "hscif1_clk_b",
+ "hscif1_ctrl_b",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data",
+ "hscif2_clk",
+ "hscif2_ctrl",
+};
+
+static const char * const hscif3_groups[] = {
+ "hscif3_data_a",
+ "hscif3_clk_a",
+ "hscif3_ctrl_a",
+ "hscif3_data_b",
+ "hscif3_clk_b",
+ "hscif3_ctrl_b",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3",
+};
+
+static const char * const mmc_groups[] = {
+ "mmc_data1",
+ "mmc_data4",
+ "mmc_data8",
+ "mmc_ctrl",
+ "mmc_cd",
+ "mmc_wp",
+ "mmc_ds",
+};
+
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_txd",
+ "msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk",
+ "msiof1_sync",
+ "msiof1_ss1",
+ "msiof1_ss2",
+ "msiof1_txd",
+ "msiof1_rxd",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk",
+ "msiof2_sync",
+ "msiof2_ss1",
+ "msiof2_ss2",
+ "msiof2_txd",
+ "msiof2_rxd",
+};
+
+static const char * const msiof3_groups[] = {
+ "msiof3_clk",
+ "msiof3_sync",
+ "msiof3_ss1",
+ "msiof3_ss2",
+ "msiof3_txd",
+ "msiof3_rxd",
+};
+
+static const char * const msiof4_groups[] = {
+ "msiof4_clk",
+ "msiof4_sync",
+ "msiof4_ss1",
+ "msiof4_ss2",
+ "msiof4_txd",
+ "msiof4_rxd",
+};
+
+static const char * const msiof5_groups[] = {
+ "msiof5_clk",
+ "msiof5_sync",
+ "msiof5_ss1",
+ "msiof5_ss2",
+ "msiof5_txd",
+ "msiof5_rxd",
+};
+
+static const char * const pcie_groups[] = {
+ "pcie0_clkreq_n",
+};
+
+static const char * const pwm0_groups[] = {
+ "pwm0_a",
+ "pwm0_b",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+ "pwm1_c",
+};
+
+static const char * const pwm2_groups[] = {
+ "pwm2_a",
+ "pwm2_b",
+ "pwm2_c",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+ "pwm3_c",
+};
+
+static const char * const pwm4_groups[] = {
+ "pwm4",
+};
+
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+ "qspi1_ctrl",
+ "qspi1_data2",
+ "qspi1_data4",
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data",
+ "scif0_clk",
+ "scif0_ctrl",
+};
+
+static const char * const scif1_groups[] = {
+ "scif1_data_a",
+ "scif1_clk_a",
+ "scif1_ctrl_a",
+ "scif1_data_b",
+ "scif1_clk_b",
+ "scif1_ctrl_b",
+};
+
+static const char * const scif3_groups[] = {
+ "scif3_data_a",
+ "scif3_clk_a",
+ "scif3_ctrl_a",
+ "scif3_data_b",
+ "scif3_clk_b",
+ "scif3_ctrl_b",
+};
+
+static const char * const scif4_groups[] = {
+ "scif4_data",
+ "scif4_clk",
+ "scif4_ctrl",
+};
+
+static const char * const scif_clk_groups[] = {
+ "scif_clk",
+};
+
+static const char * const scif_clk2_groups[] = {
+ "scif_clk2",
+};
+
+static const char * const ssi_groups[] = {
+ "ssi_data",
+ "ssi_ctrl",
+};
+
+static const char * const tpu_groups[] = {
+ "tpu_to0_a",
+ "tpu_to0_b",
+ "tpu_to1_a",
+ "tpu_to1_b",
+ "tpu_to2_a",
+ "tpu_to2_b",
+ "tpu_to3_a",
+ "tpu_to3_b",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(audio_clk),
+
+ SH_PFC_FUNCTION(avb0),
+ SH_PFC_FUNCTION(avb1),
+ SH_PFC_FUNCTION(avb2),
+
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(canfd2),
+ SH_PFC_FUNCTION(canfd3),
+ SH_PFC_FUNCTION(can_clk),
+
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+
+ SH_PFC_FUNCTION(mmc),
+
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(msiof4),
+ SH_PFC_FUNCTION(msiof5),
+
+ SH_PFC_FUNCTION(pcie),
+
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+
+ SH_PFC_FUNCTION(qspi0),
+ SH_PFC_FUNCTION(qspi1),
+
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(scif_clk2),
+
+ SH_PFC_FUNCTION(ssi),
+
+ SH_PFC_FUNCTION(tpu),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+#define F_(x, y) FN_##y
+#define FM(x) FN_##x
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xE6050040, 32,
+ GROUP(-13, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_19 RESERVED */
+ GP_0_18_FN, GPSR0_18,
+ GP_0_17_FN, GPSR0_17,
+ GP_0_16_FN, GPSR0_16,
+ GP_0_15_FN, GPSR0_15,
+ GP_0_14_FN, GPSR0_14,
+ GP_0_13_FN, GPSR0_13,
+ GP_0_12_FN, GPSR0_12,
+ GP_0_11_FN, GPSR0_11,
+ GP_0_10_FN, GPSR0_10,
+ GP_0_9_FN, GPSR0_9,
+ GP_0_8_FN, GPSR0_8,
+ GP_0_7_FN, GPSR0_7,
+ GP_0_6_FN, GPSR0_6,
+ GP_0_5_FN, GPSR0_5,
+ GP_0_4_FN, GPSR0_4,
+ GP_0_3_FN, GPSR0_3,
+ GP_0_2_FN, GPSR0_2,
+ GP_0_1_FN, GPSR0_1,
+ GP_0_0_FN, GPSR0_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xE6050840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ GP_1_29_FN, GPSR1_29,
+ GP_1_28_FN, GPSR1_28,
+ GP_1_27_FN, GPSR1_27,
+ GP_1_26_FN, GPSR1_26,
+ GP_1_25_FN, GPSR1_25,
+ GP_1_24_FN, GPSR1_24,
+ GP_1_23_FN, GPSR1_23,
+ GP_1_22_FN, GPSR1_22,
+ GP_1_21_FN, GPSR1_21,
+ GP_1_20_FN, GPSR1_20,
+ GP_1_19_FN, GPSR1_19,
+ GP_1_18_FN, GPSR1_18,
+ GP_1_17_FN, GPSR1_17,
+ GP_1_16_FN, GPSR1_16,
+ GP_1_15_FN, GPSR1_15,
+ GP_1_14_FN, GPSR1_14,
+ GP_1_13_FN, GPSR1_13,
+ GP_1_12_FN, GPSR1_12,
+ GP_1_11_FN, GPSR1_11,
+ GP_1_10_FN, GPSR1_10,
+ GP_1_9_FN, GPSR1_9,
+ GP_1_8_FN, GPSR1_8,
+ GP_1_7_FN, GPSR1_7,
+ GP_1_6_FN, GPSR1_6,
+ GP_1_5_FN, GPSR1_5,
+ GP_1_4_FN, GPSR1_4,
+ GP_1_3_FN, GPSR1_3,
+ GP_1_2_FN, GPSR1_2,
+ GP_1_1_FN, GPSR1_1,
+ GP_1_0_FN, GPSR1_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR2", 0xE6058040, 32,
+ GROUP(-12, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP2_31_20 RESERVED */
+ GP_2_19_FN, GPSR2_19,
+ /* GP2_18 RESERVED */
+ GP_2_17_FN, GPSR2_17,
+ /* GP2_16 RESERVED */
+ GP_2_15_FN, GPSR2_15,
+ GP_2_14_FN, GPSR2_14,
+ GP_2_13_FN, GPSR2_13,
+ GP_2_12_FN, GPSR2_12,
+ GP_2_11_FN, GPSR2_11,
+ GP_2_10_FN, GPSR2_10,
+ GP_2_9_FN, GPSR2_9,
+ GP_2_8_FN, GPSR2_8,
+ GP_2_7_FN, GPSR2_7,
+ GP_2_6_FN, GPSR2_6,
+ GP_2_5_FN, GPSR2_5,
+ GP_2_4_FN, GPSR2_4,
+ GP_2_3_FN, GPSR2_3,
+ GP_2_2_FN, GPSR2_2,
+ GP_2_1_FN, GPSR2_1,
+ GP_2_0_FN, GPSR2_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xE6058840, 32, 1, GROUP(
+ GP_3_31_FN, GPSR3_31,
+ GP_3_30_FN, GPSR3_30,
+ GP_3_29_FN, GPSR3_29,
+ GP_3_28_FN, GPSR3_28,
+ GP_3_27_FN, GPSR3_27,
+ GP_3_26_FN, GPSR3_26,
+ GP_3_25_FN, GPSR3_25,
+ GP_3_24_FN, GPSR3_24,
+ GP_3_23_FN, GPSR3_23,
+ GP_3_22_FN, GPSR3_22,
+ GP_3_21_FN, GPSR3_21,
+ GP_3_20_FN, GPSR3_20,
+ GP_3_19_FN, GPSR3_19,
+ GP_3_18_FN, GPSR3_18,
+ GP_3_17_FN, GPSR3_17,
+ GP_3_16_FN, GPSR3_16,
+ GP_3_15_FN, GPSR3_15,
+ GP_3_14_FN, GPSR3_14,
+ GP_3_13_FN, GPSR3_13,
+ GP_3_12_FN, GPSR3_12,
+ GP_3_11_FN, GPSR3_11,
+ GP_3_10_FN, GPSR3_10,
+ GP_3_9_FN, GPSR3_9,
+ GP_3_8_FN, GPSR3_8,
+ GP_3_7_FN, GPSR3_7,
+ GP_3_6_FN, GPSR3_6,
+ GP_3_5_FN, GPSR3_5,
+ GP_3_4_FN, GPSR3_4,
+ GP_3_3_FN, GPSR3_3,
+ GP_3_2_FN, GPSR3_2,
+ GP_3_1_FN, GPSR3_1,
+ GP_3_0_FN, GPSR3_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR4", 0xE6060040, 32,
+ GROUP(-7, 1, 1, -1, 1, -5, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP4_31_25 RESERVED */
+ GP_4_24_FN, GPSR4_24,
+ GP_4_23_FN, GPSR4_23,
+ /* GP4_22 RESERVED */
+ GP_4_21_FN, GPSR4_21,
+ /* GP4_20_16 RESERVED */
+ GP_4_15_FN, GPSR4_15,
+ GP_4_14_FN, GPSR4_14,
+ GP_4_13_FN, GPSR4_13,
+ GP_4_12_FN, GPSR4_12,
+ GP_4_11_FN, GPSR4_11,
+ GP_4_10_FN, GPSR4_10,
+ GP_4_9_FN, GPSR4_9,
+ GP_4_8_FN, GPSR4_8,
+ GP_4_7_FN, GPSR4_7,
+ GP_4_6_FN, GPSR4_6,
+ GP_4_5_FN, GPSR4_5,
+ GP_4_4_FN, GPSR4_4,
+ GP_4_3_FN, GPSR4_3,
+ GP_4_2_FN, GPSR4_2,
+ GP_4_1_FN, GPSR4_1,
+ GP_4_0_FN, GPSR4_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR5", 0xE6060840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_21 RESERVED */
+ GP_5_20_FN, GPSR5_20,
+ GP_5_19_FN, GPSR5_19,
+ GP_5_18_FN, GPSR5_18,
+ GP_5_17_FN, GPSR5_17,
+ GP_5_16_FN, GPSR5_16,
+ GP_5_15_FN, GPSR5_15,
+ GP_5_14_FN, GPSR5_14,
+ GP_5_13_FN, GPSR5_13,
+ GP_5_12_FN, GPSR5_12,
+ GP_5_11_FN, GPSR5_11,
+ GP_5_10_FN, GPSR5_10,
+ GP_5_9_FN, GPSR5_9,
+ GP_5_8_FN, GPSR5_8,
+ GP_5_7_FN, GPSR5_7,
+ GP_5_6_FN, GPSR5_6,
+ GP_5_5_FN, GPSR5_5,
+ GP_5_4_FN, GPSR5_4,
+ GP_5_3_FN, GPSR5_3,
+ GP_5_2_FN, GPSR5_2,
+ GP_5_1_FN, GPSR5_1,
+ GP_5_0_FN, GPSR5_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR6", 0xE6061040, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP6_31_21 RESERVED */
+ GP_6_20_FN, GPSR6_20,
+ GP_6_19_FN, GPSR6_19,
+ GP_6_18_FN, GPSR6_18,
+ GP_6_17_FN, GPSR6_17,
+ GP_6_16_FN, GPSR6_16,
+ GP_6_15_FN, GPSR6_15,
+ GP_6_14_FN, GPSR6_14,
+ GP_6_13_FN, GPSR6_13,
+ GP_6_12_FN, GPSR6_12,
+ GP_6_11_FN, GPSR6_11,
+ GP_6_10_FN, GPSR6_10,
+ GP_6_9_FN, GPSR6_9,
+ GP_6_8_FN, GPSR6_8,
+ GP_6_7_FN, GPSR6_7,
+ GP_6_6_FN, GPSR6_6,
+ GP_6_5_FN, GPSR6_5,
+ GP_6_4_FN, GPSR6_4,
+ GP_6_3_FN, GPSR6_3,
+ GP_6_2_FN, GPSR6_2,
+ GP_6_1_FN, GPSR6_1,
+ GP_6_0_FN, GPSR6_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR7", 0xE6061840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP7_31_21 RESERVED */
+ GP_7_20_FN, GPSR7_20,
+ GP_7_19_FN, GPSR7_19,
+ GP_7_18_FN, GPSR7_18,
+ GP_7_17_FN, GPSR7_17,
+ GP_7_16_FN, GPSR7_16,
+ GP_7_15_FN, GPSR7_15,
+ GP_7_14_FN, GPSR7_14,
+ GP_7_13_FN, GPSR7_13,
+ GP_7_12_FN, GPSR7_12,
+ GP_7_11_FN, GPSR7_11,
+ GP_7_10_FN, GPSR7_10,
+ GP_7_9_FN, GPSR7_9,
+ GP_7_8_FN, GPSR7_8,
+ GP_7_7_FN, GPSR7_7,
+ GP_7_6_FN, GPSR7_6,
+ GP_7_5_FN, GPSR7_5,
+ GP_7_4_FN, GPSR7_4,
+ GP_7_3_FN, GPSR7_3,
+ GP_7_2_FN, GPSR7_2,
+ GP_7_1_FN, GPSR7_1,
+ GP_7_0_FN, GPSR7_0, ))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG("IP0SR0", 0xE6050060, 32, 4, GROUP(
+ IP0SR0_31_28
+ IP0SR0_27_24
+ IP0SR0_23_20
+ IP0SR0_19_16
+ IP0SR0_15_12
+ IP0SR0_11_8
+ IP0SR0_7_4
+ IP0SR0_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR0", 0xE6050064, 32, 4, GROUP(
+ IP1SR0_31_28
+ IP1SR0_27_24
+ IP1SR0_23_20
+ IP1SR0_19_16
+ IP1SR0_15_12
+ IP1SR0_11_8
+ IP1SR0_7_4
+ IP1SR0_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR0", 0xE6050068, 32,
+ GROUP(-20, 4, 4, 4),
+ GROUP(
+ /* IP2SR0_31_12 RESERVED */
+ IP2SR0_11_8
+ IP2SR0_7_4
+ IP2SR0_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR1", 0xE6050860, 32, 4, GROUP(
+ IP0SR1_31_28
+ IP0SR1_27_24
+ IP0SR1_23_20
+ IP0SR1_19_16
+ IP0SR1_15_12
+ IP0SR1_11_8
+ IP0SR1_7_4
+ IP0SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR1", 0xE6050864, 32, 4, GROUP(
+ IP1SR1_31_28
+ IP1SR1_27_24
+ IP1SR1_23_20
+ IP1SR1_19_16
+ IP1SR1_15_12
+ IP1SR1_11_8
+ IP1SR1_7_4
+ IP1SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR1", 0xE6050868, 32, 4, GROUP(
+ IP2SR1_31_28
+ IP2SR1_27_24
+ IP2SR1_23_20
+ IP2SR1_19_16
+ IP2SR1_15_12
+ IP2SR1_11_8
+ IP2SR1_7_4
+ IP2SR1_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP3SR1", 0xE605086C, 32,
+ GROUP(-8, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP3SR1_31_24 RESERVED */
+ IP3SR1_23_20
+ IP3SR1_19_16
+ IP3SR1_15_12
+ IP3SR1_11_8
+ IP3SR1_7_4
+ IP3SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR2", 0xE6058060, 32, 4, GROUP(
+ IP0SR2_31_28
+ IP0SR2_27_24
+ IP0SR2_23_20
+ IP0SR2_19_16
+ IP0SR2_15_12
+ IP0SR2_11_8
+ IP0SR2_7_4
+ IP0SR2_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR2", 0xE6058064, 32, 4, GROUP(
+ IP1SR2_31_28
+ IP1SR2_27_24
+ IP1SR2_23_20
+ IP1SR2_19_16
+ IP1SR2_15_12
+ IP1SR2_11_8
+ IP1SR2_7_4
+ IP1SR2_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR2", 0xE6058068, 32,
+ GROUP(-16, 4, -4, 4, -4),
+ GROUP(
+ /* IP2SR2_31_16 RESERVED */
+ IP2SR2_15_12
+ /* IP2SR2_11_8 RESERVED */
+ IP2SR2_7_4
+ /* IP2SR2_3_0 RESERVED */))
+ },
+ { PINMUX_CFG_REG("IP0SR3", 0xE6058860, 32, 4, GROUP(
+ IP0SR3_31_28
+ IP0SR3_27_24
+ IP0SR3_23_20
+ IP0SR3_19_16
+ IP0SR3_15_12
+ IP0SR3_11_8
+ IP0SR3_7_4
+ IP0SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR3", 0xE6058864, 32, 4, GROUP(
+ IP1SR3_31_28
+ IP1SR3_27_24
+ IP1SR3_23_20
+ IP1SR3_19_16
+ IP1SR3_15_12
+ IP1SR3_11_8
+ IP1SR3_7_4
+ IP1SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR3", 0xE6058868, 32, 4, GROUP(
+ IP2SR3_31_28
+ IP2SR3_27_24
+ IP2SR3_23_20
+ IP2SR3_19_16
+ IP2SR3_15_12
+ IP2SR3_11_8
+ IP2SR3_7_4
+ IP2SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP3SR3", 0xE605886C, 32, 4, GROUP(
+ IP3SR3_31_28
+ IP3SR3_27_24
+ IP3SR3_23_20
+ IP3SR3_19_16
+ IP3SR3_15_12
+ IP3SR3_11_8
+ IP3SR3_7_4
+ IP3SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR4", 0xE6060060, 32, 4, GROUP(
+ IP0SR4_31_28
+ IP0SR4_27_24
+ IP0SR4_23_20
+ IP0SR4_19_16
+ IP0SR4_15_12
+ IP0SR4_11_8
+ IP0SR4_7_4
+ IP0SR4_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR4", 0xE6060064, 32, 4, GROUP(
+ IP1SR4_31_28
+ IP1SR4_27_24
+ IP1SR4_23_20
+ IP1SR4_19_16
+ IP1SR4_15_12
+ IP1SR4_11_8
+ IP1SR4_7_4
+ IP1SR4_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR4", 0xE6060068, 32,
+ GROUP(4, -4, 4, -20),
+ GROUP(
+ IP2SR4_31_28
+ /* IP2SR4_27_24 RESERVED */
+ IP2SR4_23_20
+ /* IP2SR4_19_0 RESERVED */))
+ },
+ { PINMUX_CFG_REG_VAR("IP3SR4", 0xE606006C, 32,
+ GROUP(-28, 4),
+ GROUP(
+ /* IP3SR4_31_4 RESERVED */
+ IP3SR4_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR5", 0xE6060860, 32, 4, GROUP(
+ IP0SR5_31_28
+ IP0SR5_27_24
+ IP0SR5_23_20
+ IP0SR5_19_16
+ IP0SR5_15_12
+ IP0SR5_11_8
+ IP0SR5_7_4
+ IP0SR5_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR5", 0xE6060864, 32, 4, GROUP(
+ IP1SR5_31_28
+ IP1SR5_27_24
+ IP1SR5_23_20
+ IP1SR5_19_16
+ IP1SR5_15_12
+ IP1SR5_11_8
+ IP1SR5_7_4
+ IP1SR5_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR5", 0xE6060868, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR5_31_20 RESERVED */
+ IP2SR5_19_16
+ IP2SR5_15_12
+ IP2SR5_11_8
+ IP2SR5_7_4
+ IP2SR5_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR6", 0xE6061060, 32, 4, GROUP(
+ IP0SR6_31_28
+ IP0SR6_27_24
+ IP0SR6_23_20
+ IP0SR6_19_16
+ IP0SR6_15_12
+ IP0SR6_11_8
+ IP0SR6_7_4
+ IP0SR6_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR6", 0xE6061064, 32, 4, GROUP(
+ IP1SR6_31_28
+ IP1SR6_27_24
+ IP1SR6_23_20
+ IP1SR6_19_16
+ IP1SR6_15_12
+ IP1SR6_11_8
+ IP1SR6_7_4
+ IP1SR6_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR6", 0xE6061068, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR6_31_20 RESERVED */
+ IP2SR6_19_16
+ IP2SR6_15_12
+ IP2SR6_11_8
+ IP2SR6_7_4
+ IP2SR6_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR7", 0xE6061860, 32, 4, GROUP(
+ IP0SR7_31_28
+ IP0SR7_27_24
+ IP0SR7_23_20
+ IP0SR7_19_16
+ IP0SR7_15_12
+ IP0SR7_11_8
+ IP0SR7_7_4
+ IP0SR7_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR7", 0xE6061864, 32, 4, GROUP(
+ IP1SR7_31_28
+ IP1SR7_27_24
+ IP1SR7_23_20
+ IP1SR7_19_16
+ IP1SR7_15_12
+ IP1SR7_11_8
+ IP1SR7_7_4
+ IP1SR7_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR7", 0xE6061868, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR7_31_20 RESERVED */
+ IP2SR7_19_16
+ IP2SR7_15_12
+ IP2SR7_11_8
+ IP2SR7_7_4
+ IP2SR7_3_0))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE6060100, 32,
+ GROUP(-24, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED 31-8 */
+ MOD_SEL4_7
+ MOD_SEL4_6
+ MOD_SEL4_5
+ MOD_SEL4_4
+ MOD_SEL4_3
+ MOD_SEL4_2
+ MOD_SEL4_1
+ MOD_SEL4_0))
+ },
+ { },
+};
+
+static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+ { PINMUX_DRIVE_REG("DRV0CTRL0", 0xE6050080) {
+ { RCAR_GP_PIN(0, 7), 28, 3 }, /* MSIOF5_SS2 */
+ { RCAR_GP_PIN(0, 6), 24, 3 }, /* IRQ0 */
+ { RCAR_GP_PIN(0, 5), 20, 3 }, /* IRQ1 */
+ { RCAR_GP_PIN(0, 4), 16, 3 }, /* IRQ2 */
+ { RCAR_GP_PIN(0, 3), 12, 3 }, /* IRQ3 */
+ { RCAR_GP_PIN(0, 2), 8, 3 }, /* GP0_02 */
+ { RCAR_GP_PIN(0, 1), 4, 3 }, /* GP0_01 */
+ { RCAR_GP_PIN(0, 0), 0, 3 }, /* GP0_00 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL0", 0xE6050084) {
+ { RCAR_GP_PIN(0, 15), 28, 3 }, /* MSIOF2_SYNC */
+ { RCAR_GP_PIN(0, 14), 24, 3 }, /* MSIOF2_SS1 */
+ { RCAR_GP_PIN(0, 13), 20, 3 }, /* MSIOF2_SS2 */
+ { RCAR_GP_PIN(0, 12), 16, 3 }, /* MSIOF5_RXD */
+ { RCAR_GP_PIN(0, 11), 12, 3 }, /* MSIOF5_SCK */
+ { RCAR_GP_PIN(0, 10), 8, 3 }, /* MSIOF5_TXD */
+ { RCAR_GP_PIN(0, 9), 4, 3 }, /* MSIOF5_SYNC */
+ { RCAR_GP_PIN(0, 8), 0, 3 }, /* MSIOF5_SS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL0", 0xE6050088) {
+ { RCAR_GP_PIN(0, 18), 8, 3 }, /* MSIOF2_RXD */
+ { RCAR_GP_PIN(0, 17), 4, 3 }, /* MSIOF2_SCK */
+ { RCAR_GP_PIN(0, 16), 0, 3 }, /* MSIOF2_TXD */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL1", 0xE6050880) {
+ { RCAR_GP_PIN(1, 7), 28, 3 }, /* MSIOF0_SS1 */
+ { RCAR_GP_PIN(1, 6), 24, 3 }, /* MSIOF0_SS2 */
+ { RCAR_GP_PIN(1, 5), 20, 3 }, /* MSIOF1_RXD */
+ { RCAR_GP_PIN(1, 4), 16, 3 }, /* MSIOF1_TXD */
+ { RCAR_GP_PIN(1, 3), 12, 3 }, /* MSIOF1_SCK */
+ { RCAR_GP_PIN(1, 2), 8, 3 }, /* MSIOF1_SYNC */
+ { RCAR_GP_PIN(1, 1), 4, 3 }, /* MSIOF1_SS1 */
+ { RCAR_GP_PIN(1, 0), 0, 3 }, /* MSIOF1_SS2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL1", 0xE6050884) {
+ { RCAR_GP_PIN(1, 15), 28, 3 }, /* HSCK0 */
+ { RCAR_GP_PIN(1, 14), 24, 3 }, /* HRTS0_N */
+ { RCAR_GP_PIN(1, 13), 20, 3 }, /* HCTS0_N */
+ { RCAR_GP_PIN(1, 12), 16, 3 }, /* HTX0 */
+ { RCAR_GP_PIN(1, 11), 12, 3 }, /* MSIOF0_RXD */
+ { RCAR_GP_PIN(1, 10), 8, 3 }, /* MSIOF0_SCK */
+ { RCAR_GP_PIN(1, 9), 4, 3 }, /* MSIOF0_TXD */
+ { RCAR_GP_PIN(1, 8), 0, 3 }, /* MSIOF0_SYNC */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL1", 0xE6050888) {
+ { RCAR_GP_PIN(1, 23), 28, 3 }, /* GP1_23 */
+ { RCAR_GP_PIN(1, 22), 24, 3 }, /* AUDIO_CLKIN */
+ { RCAR_GP_PIN(1, 21), 20, 3 }, /* AUDIO_CLKOUT */
+ { RCAR_GP_PIN(1, 20), 16, 3 }, /* SSI_SD */
+ { RCAR_GP_PIN(1, 19), 12, 3 }, /* SSI_WS */
+ { RCAR_GP_PIN(1, 18), 8, 3 }, /* SSI_SCK */
+ { RCAR_GP_PIN(1, 17), 4, 3 }, /* SCIF_CLK */
+ { RCAR_GP_PIN(1, 16), 0, 3 }, /* HRX0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL1", 0xE605088C) {
+ { RCAR_GP_PIN(1, 29), 20, 2 }, /* ERROROUTC_N */
+ { RCAR_GP_PIN(1, 28), 16, 3 }, /* HTX3 */
+ { RCAR_GP_PIN(1, 27), 12, 3 }, /* HCTS3_N */
+ { RCAR_GP_PIN(1, 26), 8, 3 }, /* HRTS3_N */
+ { RCAR_GP_PIN(1, 25), 4, 3 }, /* HSCK3 */
+ { RCAR_GP_PIN(1, 24), 0, 3 }, /* HRX3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL2", 0xE6058080) {
+ { RCAR_GP_PIN(2, 7), 28, 3 }, /* TPU0TO1 */
+ { RCAR_GP_PIN(2, 6), 24, 3 }, /* FXR_TXDB */
+ { RCAR_GP_PIN(2, 5), 20, 3 }, /* FXR_TXENB_N */
+ { RCAR_GP_PIN(2, 4), 16, 3 }, /* RXDB_EXTFXR */
+ { RCAR_GP_PIN(2, 3), 12, 3 }, /* CLK_EXTFXR */
+ { RCAR_GP_PIN(2, 2), 8, 3 }, /* RXDA_EXTFXR */
+ { RCAR_GP_PIN(2, 1), 4, 3 }, /* FXR_TXENA_N */
+ { RCAR_GP_PIN(2, 0), 0, 3 }, /* FXR_TXDA */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL2", 0xE6058084) {
+ { RCAR_GP_PIN(2, 15), 28, 3 }, /* CANFD3_RX */
+ { RCAR_GP_PIN(2, 14), 24, 3 }, /* CANFD3_TX */
+ { RCAR_GP_PIN(2, 13), 20, 3 }, /* CANFD2_RX */
+ { RCAR_GP_PIN(2, 12), 16, 3 }, /* CANFD2_TX */
+ { RCAR_GP_PIN(2, 11), 12, 3 }, /* CANFD0_RX */
+ { RCAR_GP_PIN(2, 10), 8, 3 }, /* CANFD0_TX */
+ { RCAR_GP_PIN(2, 9), 4, 3 }, /* CAN_CLK */
+ { RCAR_GP_PIN(2, 8), 0, 3 }, /* TPU0TO0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL2", 0xE6058088) {
+ { RCAR_GP_PIN(2, 19), 12, 3 }, /* CANFD1_RX */
+ { RCAR_GP_PIN(2, 17), 4, 3 }, /* CANFD1_TX */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL3", 0xE6058880) {
+ { RCAR_GP_PIN(3, 7), 28, 3 }, /* MMC_D4 */
+ { RCAR_GP_PIN(3, 6), 24, 3 }, /* MMC_D5 */
+ { RCAR_GP_PIN(3, 5), 20, 3 }, /* MMC_SD_D3 */
+ { RCAR_GP_PIN(3, 4), 16, 3 }, /* MMC_DS */
+ { RCAR_GP_PIN(3, 3), 12, 3 }, /* MMC_SD_CLK */
+ { RCAR_GP_PIN(3, 2), 8, 3 }, /* MMC_SD_D2 */
+ { RCAR_GP_PIN(3, 1), 4, 3 }, /* MMC_SD_D0 */
+ { RCAR_GP_PIN(3, 0), 0, 3 }, /* MMC_SD_D1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL3", 0xE6058884) {
+ { RCAR_GP_PIN(3, 15), 28, 2 }, /* QSPI0_SSL */
+ { RCAR_GP_PIN(3, 14), 24, 2 }, /* PWM2 */
+ { RCAR_GP_PIN(3, 13), 20, 2 }, /* PWM1 */
+ { RCAR_GP_PIN(3, 12), 16, 3 }, /* SD_WP */
+ { RCAR_GP_PIN(3, 11), 12, 3 }, /* SD_CD */
+ { RCAR_GP_PIN(3, 10), 8, 3 }, /* MMC_SD_CMD */
+ { RCAR_GP_PIN(3, 9), 4, 3 }, /* MMC_D6*/
+ { RCAR_GP_PIN(3, 8), 0, 3 }, /* MMC_D7 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL3", 0xE6058888) {
+ { RCAR_GP_PIN(3, 23), 28, 2 }, /* QSPI1_MISO_IO1 */
+ { RCAR_GP_PIN(3, 22), 24, 2 }, /* QSPI1_SPCLK */
+ { RCAR_GP_PIN(3, 21), 20, 2 }, /* QSPI1_MOSI_IO0 */
+ { RCAR_GP_PIN(3, 20), 16, 2 }, /* QSPI0_SPCLK */
+ { RCAR_GP_PIN(3, 19), 12, 2 }, /* QSPI0_MOSI_IO0 */
+ { RCAR_GP_PIN(3, 18), 8, 2 }, /* QSPI0_MISO_IO1 */
+ { RCAR_GP_PIN(3, 17), 4, 2 }, /* QSPI0_IO2 */
+ { RCAR_GP_PIN(3, 16), 0, 2 }, /* QSPI0_IO3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL3", 0xE605888C) {
+ { RCAR_GP_PIN(3, 31), 28, 2 }, /* TCLK4 */
+ { RCAR_GP_PIN(3, 30), 24, 2 }, /* TCLK3 */
+ { RCAR_GP_PIN(3, 29), 20, 2 }, /* RPC_INT_N */
+ { RCAR_GP_PIN(3, 28), 16, 2 }, /* RPC_WP_N */
+ { RCAR_GP_PIN(3, 27), 12, 2 }, /* RPC_RESET_N */
+ { RCAR_GP_PIN(3, 26), 8, 2 }, /* QSPI1_IO3 */
+ { RCAR_GP_PIN(3, 25), 4, 2 }, /* QSPI1_SSL */
+ { RCAR_GP_PIN(3, 24), 0, 2 }, /* QSPI1_IO2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL4", 0xE6060080) {
+ { RCAR_GP_PIN(4, 7), 28, 3 }, /* SDA3 */
+ { RCAR_GP_PIN(4, 6), 24, 3 }, /* SCL3 */
+ { RCAR_GP_PIN(4, 5), 20, 3 }, /* SDA2 */
+ { RCAR_GP_PIN(4, 4), 16, 3 }, /* SCL2 */
+ { RCAR_GP_PIN(4, 3), 12, 3 }, /* SDA1 */
+ { RCAR_GP_PIN(4, 2), 8, 3 }, /* SCL1 */
+ { RCAR_GP_PIN(4, 1), 4, 3 }, /* SDA0 */
+ { RCAR_GP_PIN(4, 0), 0, 3 }, /* SCL0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL4", 0xE6060084) {
+ { RCAR_GP_PIN(4, 15), 28, 3 }, /* PWM4 */
+ { RCAR_GP_PIN(4, 14), 24, 3 }, /* PWM3 */
+ { RCAR_GP_PIN(4, 13), 20, 3 }, /* HSCK2 */
+ { RCAR_GP_PIN(4, 12), 16, 3 }, /* HCTS2_N */
+ { RCAR_GP_PIN(4, 11), 12, 3 }, /* SCIF_CLK2 */
+ { RCAR_GP_PIN(4, 10), 8, 3 }, /* HRTS2_N */
+ { RCAR_GP_PIN(4, 9), 4, 3 }, /* HTX2 */
+ { RCAR_GP_PIN(4, 8), 0, 3 }, /* HRX2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL4", 0xE6060088) {
+ { RCAR_GP_PIN(4, 23), 28, 3 }, /* AVS0 */
+ { RCAR_GP_PIN(4, 21), 20, 3 }, /* PCIE0_CLKREQ_N */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL4", 0xE606008C) {
+ { RCAR_GP_PIN(4, 24), 0, 3 }, /* AVS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL5", 0xE6060880) {
+ { RCAR_GP_PIN(5, 7), 28, 3 }, /* AVB2_TXCREFCLK */
+ { RCAR_GP_PIN(5, 6), 24, 3 }, /* AVB2_MDC */
+ { RCAR_GP_PIN(5, 5), 20, 3 }, /* AVB2_MAGIC */
+ { RCAR_GP_PIN(5, 4), 16, 3 }, /* AVB2_PHY_INT */
+ { RCAR_GP_PIN(5, 3), 12, 3 }, /* AVB2_LINK */
+ { RCAR_GP_PIN(5, 2), 8, 3 }, /* AVB2_AVTP_MATCH */
+ { RCAR_GP_PIN(5, 1), 4, 3 }, /* AVB2_AVTP_CAPTURE */
+ { RCAR_GP_PIN(5, 0), 0, 3 }, /* AVB2_AVTP_PPS */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL5", 0xE6060884) {
+ { RCAR_GP_PIN(5, 15), 28, 3 }, /* AVB2_TD0 */
+ { RCAR_GP_PIN(5, 14), 24, 3 }, /* AVB2_RD1 */
+ { RCAR_GP_PIN(5, 13), 20, 3 }, /* AVB2_RD2 */
+ { RCAR_GP_PIN(5, 12), 16, 3 }, /* AVB2_TD1 */
+ { RCAR_GP_PIN(5, 11), 12, 3 }, /* AVB2_TD2 */
+ { RCAR_GP_PIN(5, 10), 8, 3 }, /* AVB2_MDIO */
+ { RCAR_GP_PIN(5, 9), 4, 3 }, /* AVB2_RD3 */
+ { RCAR_GP_PIN(5, 8), 0, 3 }, /* AVB2_TD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL5", 0xE6060888) {
+ { RCAR_GP_PIN(5, 20), 16, 3 }, /* AVB2_RX_CTL */
+ { RCAR_GP_PIN(5, 19), 12, 3 }, /* AVB2_TX_CTL */
+ { RCAR_GP_PIN(5, 18), 8, 3 }, /* AVB2_RXC */
+ { RCAR_GP_PIN(5, 17), 4, 3 }, /* AVB2_RD0 */
+ { RCAR_GP_PIN(5, 16), 0, 3 }, /* AVB2_TXC */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL6", 0xE6061080) {
+ { RCAR_GP_PIN(6, 7), 28, 3 }, /* AVB1_TX_CTL */
+ { RCAR_GP_PIN(6, 6), 24, 3 }, /* AVB1_TXC */
+ { RCAR_GP_PIN(6, 5), 20, 3 }, /* AVB1_AVTP_MATCH */
+ { RCAR_GP_PIN(6, 4), 16, 3 }, /* AVB1_LINK */
+ { RCAR_GP_PIN(6, 3), 12, 3 }, /* AVB1_PHY_INT */
+ { RCAR_GP_PIN(6, 2), 8, 3 }, /* AVB1_MDC */
+ { RCAR_GP_PIN(6, 1), 4, 3 }, /* AVB1_MAGIC */
+ { RCAR_GP_PIN(6, 0), 0, 3 }, /* AVB1_MDIO */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL6", 0xE6061084) {
+ { RCAR_GP_PIN(6, 15), 28, 3 }, /* AVB1_RD0 */
+ { RCAR_GP_PIN(6, 14), 24, 3 }, /* AVB1_RD1 */
+ { RCAR_GP_PIN(6, 13), 20, 3 }, /* AVB1_TD0 */
+ { RCAR_GP_PIN(6, 12), 16, 3 }, /* AVB1_TD1 */
+ { RCAR_GP_PIN(6, 11), 12, 3 }, /* AVB1_AVTP_CAPTURE */
+ { RCAR_GP_PIN(6, 10), 8, 3 }, /* AVB1_AVTP_PPS */
+ { RCAR_GP_PIN(6, 9), 4, 3 }, /* AVB1_RX_CTL */
+ { RCAR_GP_PIN(6, 8), 0, 3 }, /* AVB1_RXC */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL6", 0xE6061088) {
+ { RCAR_GP_PIN(6, 20), 16, 3 }, /* AVB1_TXCREFCLK */
+ { RCAR_GP_PIN(6, 19), 12, 3 }, /* AVB1_RD3 */
+ { RCAR_GP_PIN(6, 18), 8, 3 }, /* AVB1_TD3 */
+ { RCAR_GP_PIN(6, 17), 4, 3 }, /* AVB1_RD2 */
+ { RCAR_GP_PIN(6, 16), 0, 3 }, /* AVB1_TD2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL7", 0xE6061880) {
+ { RCAR_GP_PIN(7, 7), 28, 3 }, /* AVB0_TD1 */
+ { RCAR_GP_PIN(7, 6), 24, 3 }, /* AVB0_TD2 */
+ { RCAR_GP_PIN(7, 5), 20, 3 }, /* AVB0_PHY_INT */
+ { RCAR_GP_PIN(7, 4), 16, 3 }, /* AVB0_LINK */
+ { RCAR_GP_PIN(7, 3), 12, 3 }, /* AVB0_TD3 */
+ { RCAR_GP_PIN(7, 2), 8, 3 }, /* AVB0_AVTP_MATCH */
+ { RCAR_GP_PIN(7, 1), 4, 3 }, /* AVB0_AVTP_CAPTURE */
+ { RCAR_GP_PIN(7, 0), 0, 3 }, /* AVB0_AVTP_PPS */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL7", 0xE6061884) {
+ { RCAR_GP_PIN(7, 15), 28, 3 }, /* AVB0_TXC */
+ { RCAR_GP_PIN(7, 14), 24, 3 }, /* AVB0_MDIO */
+ { RCAR_GP_PIN(7, 13), 20, 3 }, /* AVB0_MDC */
+ { RCAR_GP_PIN(7, 12), 16, 3 }, /* AVB0_RD2 */
+ { RCAR_GP_PIN(7, 11), 12, 3 }, /* AVB0_TD0 */
+ { RCAR_GP_PIN(7, 10), 8, 3 }, /* AVB0_MAGIC */
+ { RCAR_GP_PIN(7, 9), 4, 3 }, /* AVB0_TXCREFCLK */
+ { RCAR_GP_PIN(7, 8), 0, 3 }, /* AVB0_RD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL7", 0xE6061888) {
+ { RCAR_GP_PIN(7, 20), 16, 3 }, /* AVB0_RX_CTL */
+ { RCAR_GP_PIN(7, 19), 12, 3 }, /* AVB0_RXC */
+ { RCAR_GP_PIN(7, 18), 8, 3 }, /* AVB0_RD0 */
+ { RCAR_GP_PIN(7, 17), 4, 3 }, /* AVB0_RD1 */
+ { RCAR_GP_PIN(7, 16), 0, 3 }, /* AVB0_TX_CTL */
+ } },
+ { },
+};
+
+enum ioctrl_regs {
+ POC0,
+ POC1,
+ POC3,
+ POC4,
+ POC5,
+ POC6,
+ POC7,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POC0] = { 0xE60500A0, },
+ [POC1] = { 0xE60508A0, },
+ [POC3] = { 0xE60588A0, },
+ [POC4] = { 0xE60600A0, },
+ [POC5] = { 0xE60608A0, },
+ [POC6] = { 0xE60610A0, },
+ [POC7] = { 0xE60618A0, },
+ { /* sentinel */ },
+};
+
+static int r8a779h0_pin_to_pocctrl(unsigned int pin, u32 *pocctrl)
+{
+ int bit = pin & 0x1f;
+
+ switch (pin) {
+ case RCAR_GP_PIN(0, 0) ... RCAR_GP_PIN(0, 18):
+ *pocctrl = pinmux_ioctrl_regs[POC0].reg;
+ return bit;
+
+ case RCAR_GP_PIN(1, 0) ... RCAR_GP_PIN(1, 28):
+ *pocctrl = pinmux_ioctrl_regs[POC1].reg;
+ return bit;
+
+ case RCAR_GP_PIN(3, 0) ... RCAR_GP_PIN(3, 12):
+ *pocctrl = pinmux_ioctrl_regs[POC3].reg;
+ return bit;
+
+ case RCAR_GP_PIN(4, 0) ... RCAR_GP_PIN(4, 13):
+ *pocctrl = pinmux_ioctrl_regs[POC4].reg;
+ return bit;
+
+ case PIN_VDDQ_AVB2:
+ *pocctrl = pinmux_ioctrl_regs[POC5].reg;
+ return 0;
+
+ case PIN_VDDQ_AVB1:
+ *pocctrl = pinmux_ioctrl_regs[POC6].reg;
+ return 0;
+
+ case PIN_VDDQ_AVB0:
+ *pocctrl = pinmux_ioctrl_regs[POC7].reg;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xE60500C0, "PUD0", 0xE60500E0) {
+ [ 0] = RCAR_GP_PIN(0, 0), /* GP0_00 */
+ [ 1] = RCAR_GP_PIN(0, 1), /* GP0_01 */
+ [ 2] = RCAR_GP_PIN(0, 2), /* GP0_02 */
+ [ 3] = RCAR_GP_PIN(0, 3), /* IRQ3 */
+ [ 4] = RCAR_GP_PIN(0, 4), /* IRQ2 */
+ [ 5] = RCAR_GP_PIN(0, 5), /* IRQ1 */
+ [ 6] = RCAR_GP_PIN(0, 6), /* IRQ0 */
+ [ 7] = RCAR_GP_PIN(0, 7), /* MSIOF5_SS2 */
+ [ 8] = RCAR_GP_PIN(0, 8), /* MSIOF5_SS1 */
+ [ 9] = RCAR_GP_PIN(0, 9), /* MSIOF5_SYNC */
+ [10] = RCAR_GP_PIN(0, 10), /* MSIOF5_TXD */
+ [11] = RCAR_GP_PIN(0, 11), /* MSIOF5_SCK */
+ [12] = RCAR_GP_PIN(0, 12), /* MSIOF5_RXD */
+ [13] = RCAR_GP_PIN(0, 13), /* MSIOF2_SS2 */
+ [14] = RCAR_GP_PIN(0, 14), /* MSIOF2_SS1 */
+ [15] = RCAR_GP_PIN(0, 15), /* MSIOF2_SYNC */
+ [16] = RCAR_GP_PIN(0, 16), /* MSIOF2_TXD */
+ [17] = RCAR_GP_PIN(0, 17), /* MSIOF2_SCK */
+ [18] = RCAR_GP_PIN(0, 18), /* MSIOF2_RXD */
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xE60508C0, "PUD1", 0xE60508E0) {
+ [ 0] = RCAR_GP_PIN(1, 0), /* MSIOF1_SS2 */
+ [ 1] = RCAR_GP_PIN(1, 1), /* MSIOF1_SS1 */
+ [ 2] = RCAR_GP_PIN(1, 2), /* MSIOF1_SYNC */
+ [ 3] = RCAR_GP_PIN(1, 3), /* MSIOF1_SCK */
+ [ 4] = RCAR_GP_PIN(1, 4), /* MSIOF1_TXD */
+ [ 5] = RCAR_GP_PIN(1, 5), /* MSIOF1_RXD */
+ [ 6] = RCAR_GP_PIN(1, 6), /* MSIOF0_SS2 */
+ [ 7] = RCAR_GP_PIN(1, 7), /* MSIOF0_SS1 */
+ [ 8] = RCAR_GP_PIN(1, 8), /* MSIOF0_SYNC */
+ [ 9] = RCAR_GP_PIN(1, 9), /* MSIOF0_TXD */
+ [10] = RCAR_GP_PIN(1, 10), /* MSIOF0_SCK */
+ [11] = RCAR_GP_PIN(1, 11), /* MSIOF0_RXD */
+ [12] = RCAR_GP_PIN(1, 12), /* HTX0 */
+ [13] = RCAR_GP_PIN(1, 13), /* HCTS0_N */
+ [14] = RCAR_GP_PIN(1, 14), /* HRTS0_N */
+ [15] = RCAR_GP_PIN(1, 15), /* HSCK0 */
+ [16] = RCAR_GP_PIN(1, 16), /* HRX0 */
+ [17] = RCAR_GP_PIN(1, 17), /* SCIF_CLK */
+ [18] = RCAR_GP_PIN(1, 18), /* SSI_SCK */
+ [19] = RCAR_GP_PIN(1, 19), /* SSI_WS */
+ [20] = RCAR_GP_PIN(1, 20), /* SSI_SD */
+ [21] = RCAR_GP_PIN(1, 21), /* AUDIO_CLKOUT */
+ [22] = RCAR_GP_PIN(1, 22), /* AUDIO_CLKIN */
+ [23] = RCAR_GP_PIN(1, 23), /* GP1_23 */
+ [24] = RCAR_GP_PIN(1, 24), /* HRX3 */
+ [25] = RCAR_GP_PIN(1, 25), /* HSCK3 */
+ [26] = RCAR_GP_PIN(1, 26), /* HRTS3_N */
+ [27] = RCAR_GP_PIN(1, 27), /* HCTS3_N */
+ [28] = RCAR_GP_PIN(1, 28), /* HTX3 */
+ [29] = RCAR_GP_PIN(1, 29), /* ERROROUTC_N */
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xE60580C0, "PUD2", 0xE60580E0) {
+ [ 0] = RCAR_GP_PIN(2, 0), /* FXR_TXDA */
+ [ 1] = RCAR_GP_PIN(2, 1), /* FXR_TXENA_N */
+ [ 2] = RCAR_GP_PIN(2, 2), /* RXDA_EXTFXR */
+ [ 3] = RCAR_GP_PIN(2, 3), /* CLK_EXTFXR */
+ [ 4] = RCAR_GP_PIN(2, 4), /* RXDB_EXTFXR */
+ [ 5] = RCAR_GP_PIN(2, 5), /* FXR_TXENB_N */
+ [ 6] = RCAR_GP_PIN(2, 6), /* FXR_TXDB */
+ [ 7] = RCAR_GP_PIN(2, 7), /* TPU0TO1 */
+ [ 8] = RCAR_GP_PIN(2, 8), /* TPU0TO0 */
+ [ 9] = RCAR_GP_PIN(2, 9), /* CAN_CLK */
+ [10] = RCAR_GP_PIN(2, 10), /* CANFD0_TX */
+ [11] = RCAR_GP_PIN(2, 11), /* CANFD0_RX */
+ [12] = RCAR_GP_PIN(2, 12), /* CANFD2_TX */
+ [13] = RCAR_GP_PIN(2, 13), /* CANFD2_RX */
+ [14] = RCAR_GP_PIN(2, 14), /* CANFD3_TX */
+ [15] = RCAR_GP_PIN(2, 15), /* CANFD3_RX */
+ [16] = SH_PFC_PIN_NONE,
+ [17] = RCAR_GP_PIN(2, 17), /* CANFD1_TX */
+ [18] = SH_PFC_PIN_NONE,
+ [19] = RCAR_GP_PIN(2, 19), /* CANFD1_RX */
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xE60588C0, "PUD3", 0xE60588E0) {
+ [ 0] = RCAR_GP_PIN(3, 0), /* MMC_SD_D1 */
+ [ 1] = RCAR_GP_PIN(3, 1), /* MMC_SD_D0 */
+ [ 2] = RCAR_GP_PIN(3, 2), /* MMC_SD_D2 */
+ [ 3] = RCAR_GP_PIN(3, 3), /* MMC_SD_CLK */
+ [ 4] = RCAR_GP_PIN(3, 4), /* MMC_DS */
+ [ 5] = RCAR_GP_PIN(3, 5), /* MMC_SD_D3 */
+ [ 6] = RCAR_GP_PIN(3, 6), /* MMC_D5 */
+ [ 7] = RCAR_GP_PIN(3, 7), /* MMC_D4 */
+ [ 8] = RCAR_GP_PIN(3, 8), /* MMC_D7 */
+ [ 9] = RCAR_GP_PIN(3, 9), /* MMC_D6 */
+ [10] = RCAR_GP_PIN(3, 10), /* MMC_SD_CMD */
+ [11] = RCAR_GP_PIN(3, 11), /* SD_CD */
+ [12] = RCAR_GP_PIN(3, 12), /* SD_WP */
+ [13] = RCAR_GP_PIN(3, 13), /* PWM1 */
+ [14] = RCAR_GP_PIN(3, 14), /* PWM2 */
+ [15] = RCAR_GP_PIN(3, 15), /* QSPI0_SSL */
+ [16] = RCAR_GP_PIN(3, 16), /* QSPI0_IO3 */
+ [17] = RCAR_GP_PIN(3, 17), /* QSPI0_IO2 */
+ [18] = RCAR_GP_PIN(3, 18), /* QSPI0_MISO_IO1 */
+ [19] = RCAR_GP_PIN(3, 19), /* QSPI0_MOSI_IO0 */
+ [20] = RCAR_GP_PIN(3, 20), /* QSPI0_SPCLK */
+ [21] = RCAR_GP_PIN(3, 21), /* QSPI1_MOSI_IO0 */
+ [22] = RCAR_GP_PIN(3, 22), /* QSPI1_SPCLK */
+ [23] = RCAR_GP_PIN(3, 23), /* QSPI1_MISO_IO1 */
+ [24] = RCAR_GP_PIN(3, 24), /* QSPI1_IO2 */
+ [25] = RCAR_GP_PIN(3, 25), /* QSPI1_SSL */
+ [26] = RCAR_GP_PIN(3, 26), /* QSPI1_IO3 */
+ [27] = RCAR_GP_PIN(3, 27), /* RPC_RESET_N */
+ [28] = RCAR_GP_PIN(3, 28), /* RPC_WP_N */
+ [29] = RCAR_GP_PIN(3, 29), /* RPC_INT_N */
+ [30] = RCAR_GP_PIN(3, 30), /* TCLK3 */
+ [31] = RCAR_GP_PIN(3, 31), /* TCLK4 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xE60600C0, "PUD4", 0xE60600E0) {
+ [ 0] = RCAR_GP_PIN(4, 0), /* SCL0 */
+ [ 1] = RCAR_GP_PIN(4, 1), /* SDA0 */
+ [ 2] = RCAR_GP_PIN(4, 2), /* SCL1 */
+ [ 3] = RCAR_GP_PIN(4, 3), /* SDA1 */
+ [ 4] = RCAR_GP_PIN(4, 4), /* SCL2 */
+ [ 5] = RCAR_GP_PIN(4, 5), /* SDA2 */
+ [ 6] = RCAR_GP_PIN(4, 6), /* SCL3 */
+ [ 7] = RCAR_GP_PIN(4, 7), /* SDA3 */
+ [ 8] = RCAR_GP_PIN(4, 8), /* HRX2 */
+ [ 9] = RCAR_GP_PIN(4, 9), /* HTX2 */
+ [10] = RCAR_GP_PIN(4, 10), /* HRTS2_N */
+ [11] = RCAR_GP_PIN(4, 11), /* SCIF_CLK2 */
+ [12] = RCAR_GP_PIN(4, 12), /* HCTS2_N */
+ [13] = RCAR_GP_PIN(4, 13), /* HSCK2 */
+ [14] = RCAR_GP_PIN(4, 14), /* PWM3 */
+ [15] = RCAR_GP_PIN(4, 15), /* PWM4 */
+ [16] = SH_PFC_PIN_NONE,
+ [17] = SH_PFC_PIN_NONE,
+ [18] = SH_PFC_PIN_NONE,
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = RCAR_GP_PIN(4, 21), /* PCIE0_CLKREQ_N */
+ [22] = SH_PFC_PIN_NONE,
+ [23] = RCAR_GP_PIN(4, 23), /* AVS0 */
+ [24] = RCAR_GP_PIN(4, 24), /* AVS1 */
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xE60608C0, "PUD5", 0xE60608E0) {
+ [ 0] = RCAR_GP_PIN(5, 0), /* AVB2_AVTP_PPS */
+ [ 1] = RCAR_GP_PIN(5, 1), /* AVB0_AVTP_CAPTURE */
+ [ 2] = RCAR_GP_PIN(5, 2), /* AVB2_AVTP_MATCH */
+ [ 3] = RCAR_GP_PIN(5, 3), /* AVB2_LINK */
+ [ 4] = RCAR_GP_PIN(5, 4), /* AVB2_PHY_INT */
+ [ 5] = RCAR_GP_PIN(5, 5), /* AVB2_MAGIC */
+ [ 6] = RCAR_GP_PIN(5, 6), /* AVB2_MDC */
+ [ 7] = RCAR_GP_PIN(5, 7), /* AVB2_TXCREFCLK */
+ [ 8] = RCAR_GP_PIN(5, 8), /* AVB2_TD3 */
+ [ 9] = RCAR_GP_PIN(5, 9), /* AVB2_RD3 */
+ [10] = RCAR_GP_PIN(5, 10), /* AVB2_MDIO */
+ [11] = RCAR_GP_PIN(5, 11), /* AVB2_TD2 */
+ [12] = RCAR_GP_PIN(5, 12), /* AVB2_TD1 */
+ [13] = RCAR_GP_PIN(5, 13), /* AVB2_RD2 */
+ [14] = RCAR_GP_PIN(5, 14), /* AVB2_RD1 */
+ [15] = RCAR_GP_PIN(5, 15), /* AVB2_TD0 */
+ [16] = RCAR_GP_PIN(5, 16), /* AVB2_TXC */
+ [17] = RCAR_GP_PIN(5, 17), /* AVB2_RD0 */
+ [18] = RCAR_GP_PIN(5, 18), /* AVB2_RXC */
+ [19] = RCAR_GP_PIN(5, 19), /* AVB2_TX_CTL */
+ [20] = RCAR_GP_PIN(5, 20), /* AVB2_RX_CTL */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xE60610C0, "PUD6", 0xE60610E0) {
+ [ 0] = RCAR_GP_PIN(6, 0), /* AVB1_MDIO */
+ [ 1] = RCAR_GP_PIN(6, 1), /* AVB1_MAGIC */
+ [ 2] = RCAR_GP_PIN(6, 2), /* AVB1_MDC */
+ [ 3] = RCAR_GP_PIN(6, 3), /* AVB1_PHY_INT */
+ [ 4] = RCAR_GP_PIN(6, 4), /* AVB1_LINK */
+ [ 5] = RCAR_GP_PIN(6, 5), /* AVB1_AVTP_MATCH */
+ [ 6] = RCAR_GP_PIN(6, 6), /* AVB1_TXC */
+ [ 7] = RCAR_GP_PIN(6, 7), /* AVB1_TX_CTL */
+ [ 8] = RCAR_GP_PIN(6, 8), /* AVB1_RXC */
+ [ 9] = RCAR_GP_PIN(6, 9), /* AVB1_RX_CTL */
+ [10] = RCAR_GP_PIN(6, 10), /* AVB1_AVTP_PPS */
+ [11] = RCAR_GP_PIN(6, 11), /* AVB1_AVTP_CAPTURE */
+ [12] = RCAR_GP_PIN(6, 12), /* AVB1_TD1 */
+ [13] = RCAR_GP_PIN(6, 13), /* AVB1_TD0 */
+ [14] = RCAR_GP_PIN(6, 14), /* AVB1_RD1*/
+ [15] = RCAR_GP_PIN(6, 15), /* AVB1_RD0 */
+ [16] = RCAR_GP_PIN(6, 16), /* AVB1_TD2 */
+ [17] = RCAR_GP_PIN(6, 17), /* AVB1_RD2 */
+ [18] = RCAR_GP_PIN(6, 18), /* AVB1_TD3 */
+ [19] = RCAR_GP_PIN(6, 19), /* AVB1_RD3 */
+ [20] = RCAR_GP_PIN(6, 20), /* AVB1_TXCREFCLK */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN7", 0xE60618C0, "PUD7", 0xE60618E0) {
+ [ 0] = RCAR_GP_PIN(7, 0), /* AVB0_AVTP_PPS */
+ [ 1] = RCAR_GP_PIN(7, 1), /* AVB0_AVTP_CAPTURE */
+ [ 2] = RCAR_GP_PIN(7, 2), /* AVB0_AVTP_MATCH */
+ [ 3] = RCAR_GP_PIN(7, 3), /* AVB0_TD3 */
+ [ 4] = RCAR_GP_PIN(7, 4), /* AVB0_LINK */
+ [ 5] = RCAR_GP_PIN(7, 5), /* AVB0_PHY_INT */
+ [ 6] = RCAR_GP_PIN(7, 6), /* AVB0_TD2 */
+ [ 7] = RCAR_GP_PIN(7, 7), /* AVB0_TD1 */
+ [ 8] = RCAR_GP_PIN(7, 8), /* AVB0_RD3 */
+ [ 9] = RCAR_GP_PIN(7, 9), /* AVB0_TXCREFCLK */
+ [10] = RCAR_GP_PIN(7, 10), /* AVB0_MAGIC */
+ [11] = RCAR_GP_PIN(7, 11), /* AVB0_TD0 */
+ [12] = RCAR_GP_PIN(7, 12), /* AVB0_RD2 */
+ [13] = RCAR_GP_PIN(7, 13), /* AVB0_MDC */
+ [14] = RCAR_GP_PIN(7, 14), /* AVB0_MDIO */
+ [15] = RCAR_GP_PIN(7, 15), /* AVB0_TXC */
+ [16] = RCAR_GP_PIN(7, 16), /* AVB0_TX_CTL */
+ [17] = RCAR_GP_PIN(7, 17), /* AVB0_RD1 */
+ [18] = RCAR_GP_PIN(7, 18), /* AVB0_RD0 */
+ [19] = RCAR_GP_PIN(7, 19), /* AVB0_RXC */
+ [20] = RCAR_GP_PIN(7, 20), /* AVB0_RX_CTL */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { /* sentinel */ },
+};
+
+static const struct sh_pfc_soc_operations r8a779h0_pin_ops = {
+ .pin_to_pocctrl = r8a779h0_pin_to_pocctrl,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
+};
+
+const struct sh_pfc_soc_info r8a779h0_pinmux_info = {
+ .name = "r8a779h0_pfc",
+ .ops = &r8a779h0_pin_ops,
+ .unlock_reg = 0x1ff, /* PMMRn mask */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 80fb5011c7bb..eb5a8c654260 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -5,6 +5,7 @@
* Copyright (C) 2021 Renesas Electronics Corporation.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
@@ -38,8 +39,6 @@
*/
#define MUX_PIN_ID_MASK GENMASK(15, 0)
#define MUX_FUNC_MASK GENMASK(31, 16)
-#define MUX_FUNC_OFFS 16
-#define MUX_FUNC(pinconf) (((pinconf) & MUX_FUNC_MASK) >> MUX_FUNC_OFFS)
/* PIN capabilities */
#define PIN_CFG_IOLH_A BIT(0)
@@ -58,6 +57,8 @@
#define PIN_CFG_IOLH_C BIT(13)
#define PIN_CFG_SOFT_PS BIT(14)
#define PIN_CFG_OEN BIT(15)
+#define PIN_CFG_VARIABLE BIT(16)
+#define PIN_CFG_NOGPIO_INT BIT(17)
#define RZG2L_MPXED_COMMON_PIN_FUNCS(group) \
(PIN_CFG_IOLH_##group | \
@@ -77,27 +78,41 @@
PIN_CFG_FILNUM | \
PIN_CFG_FILCLKSEL)
+#define PIN_CFG_PIN_MAP_MASK GENMASK_ULL(35, 28)
+#define PIN_CFG_PIN_REG_MASK GENMASK(27, 20)
+#define PIN_CFG_MASK GENMASK(19, 0)
+
+/*
+ * m indicates the bitmap of supported pins, a is the register index
+ * and f is pin configuration capabilities supported.
+ */
+#define RZG2L_GPIO_PORT_SPARSE_PACK(m, a, f) (FIELD_PREP_CONST(PIN_CFG_PIN_MAP_MASK, (m)) | \
+ FIELD_PREP_CONST(PIN_CFG_PIN_REG_MASK, (a)) | \
+ FIELD_PREP_CONST(PIN_CFG_MASK, (f)))
+
/*
* n indicates number of pins in the port, a is the register index
* and f is pin configuration capabilities supported.
*/
-#define RZG2L_GPIO_PORT_PACK(n, a, f) (((n) << 28) | ((a) << 20) | (f))
-#define RZG2L_GPIO_PORT_GET_PINCNT(x) (((x) & GENMASK(30, 28)) >> 28)
+#define RZG2L_GPIO_PORT_PACK(n, a, f) RZG2L_GPIO_PORT_SPARSE_PACK((1ULL << (n)) - 1, (a), (f))
/*
- * BIT(31) indicates dedicated pin, p is the register index while
+ * BIT(63) indicates dedicated pin, p is the register index while
* referencing to SR/IEN/IOLH/FILxx registers, b is the register bits
* (b * 8) and f is the pin configuration capabilities supported.
*/
-#define RZG2L_SINGLE_PIN BIT(31)
+#define RZG2L_SINGLE_PIN BIT_ULL(63)
+#define RZG2L_SINGLE_PIN_INDEX_MASK GENMASK(30, 24)
+#define RZG2L_SINGLE_PIN_BITS_MASK GENMASK(22, 20)
+
#define RZG2L_SINGLE_PIN_PACK(p, b, f) (RZG2L_SINGLE_PIN | \
- ((p) << 24) | ((b) << 20) | (f))
-#define RZG2L_SINGLE_PIN_GET_BIT(x) (((x) & GENMASK(22, 20)) >> 20)
+ FIELD_PREP_CONST(RZG2L_SINGLE_PIN_INDEX_MASK, (p)) | \
+ FIELD_PREP_CONST(RZG2L_SINGLE_PIN_BITS_MASK, (b)) | \
+ FIELD_PREP_CONST(PIN_CFG_MASK, (f)))
-#define RZG2L_PIN_CFG_TO_CAPS(cfg) ((cfg) & GENMASK(19, 0))
#define RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg) ((cfg) & RZG2L_SINGLE_PIN ? \
- (((cfg) & GENMASK(30, 24)) >> 24) : \
- (((cfg) & GENMASK(26, 20)) >> 20))
+ FIELD_GET(RZG2L_SINGLE_PIN_INDEX_MASK, (cfg)) : \
+ FIELD_GET(PIN_CFG_PIN_REG_MASK, (cfg)))
#define P(off) (0x0000 + (off))
#define PM(off) (0x0100 + (off) * 2)
@@ -134,6 +149,33 @@
#define RZG2L_TINT_IRQ_START_INDEX 9
#define RZG2L_PACK_HWIRQ(t, i) (((t) << 16) | (i))
+/* Read/write 8 bits register */
+#define RZG2L_PCTRL_REG_ACCESS8(_read, _addr, _val) \
+ do { \
+ if (_read) \
+ _val = readb(_addr); \
+ else \
+ writeb(_val, _addr); \
+ } while (0)
+
+/* Read/write 16 bits register */
+#define RZG2L_PCTRL_REG_ACCESS16(_read, _addr, _val) \
+ do { \
+ if (_read) \
+ _val = readw(_addr); \
+ else \
+ writew(_val, _addr); \
+ } while (0)
+
+/* Read/write 32 bits register */
+#define RZG2L_PCTRL_REG_ACCESS32(_read, _addr, _val) \
+ do { \
+ if (_read) \
+ _val = readl(_addr); \
+ else \
+ writel(_val, _addr); \
+ } while (0)
+
/**
* struct rzg2l_register_offsets - specific register offsets
* @pwpr: PWPR register offset
@@ -189,17 +231,31 @@ struct rzg2l_hwcfg {
struct rzg2l_dedicated_configs {
const char *name;
- u32 config;
+ u64 config;
+};
+
+/**
+ * struct rzg2l_variable_pin_cfg - pin data cfg
+ * @cfg: port pin configuration
+ * @port: port number
+ * @pin: port pin
+ */
+struct rzg2l_variable_pin_cfg {
+ u32 cfg:20;
+ u32 port:5;
+ u32 pin:3;
};
struct rzg2l_pinctrl_data {
const char * const *port_pins;
- const u32 *port_pin_configs;
+ const u64 *port_pin_configs;
unsigned int n_ports;
const struct rzg2l_dedicated_configs *dedicated_pins;
unsigned int n_port_pins;
unsigned int n_dedicated_pins;
const struct rzg2l_hwcfg *hwcfg;
+ const struct rzg2l_variable_pin_cfg *variable_pin_cfg;
+ unsigned int n_variable_pin_cfg;
};
/**
@@ -212,6 +268,32 @@ struct rzg2l_pinctrl_pin_settings {
u16 drive_strength_ua;
};
+/**
+ * struct rzg2l_pinctrl_reg_cache - register cache structure (to be used in suspend/resume)
+ * @p: P registers cache
+ * @pm: PM registers cache
+ * @pmc: PMC registers cache
+ * @pfc: PFC registers cache
+ * @iolh: IOLH registers cache
+ * @ien: IEN registers cache
+ * @sd_ch: SD_CH registers cache
+ * @eth_poc: ET_POC registers cache
+ * @eth_mode: ETH_MODE register cache
+ * @qspi: QSPI registers cache
+ */
+struct rzg2l_pinctrl_reg_cache {
+ u8 *p;
+ u16 *pm;
+ u8 *pmc;
+ u32 *pfc;
+ u32 *iolh[2];
+ u32 *ien[2];
+ u8 sd_ch[2];
+ u8 eth_poc[2];
+ u8 eth_mode;
+ u8 qspi;
+};
+
struct rzg2l_pinctrl {
struct pinctrl_dev *pctl;
struct pinctrl_desc desc;
@@ -221,6 +303,8 @@ struct rzg2l_pinctrl {
void __iomem *base;
struct device *dev;
+ struct clk *clk;
+
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range gpio_range;
DECLARE_BITMAP(tint_slot, RZG2L_TINT_MAX_INTERRUPT);
@@ -231,10 +315,150 @@ struct rzg2l_pinctrl {
struct mutex mutex; /* serialize adding groups and functions */
struct rzg2l_pinctrl_pin_settings *settings;
+ struct rzg2l_pinctrl_reg_cache *cache;
+ struct rzg2l_pinctrl_reg_cache *dedicated_cache;
+ atomic_t wakeup_path;
};
static const u16 available_ps[] = { 1800, 2500, 3300 };
+#ifdef CONFIG_RISCV
+static u64 rzg2l_pinctrl_get_variable_pin_cfg(struct rzg2l_pinctrl *pctrl,
+ u64 pincfg,
+ unsigned int port,
+ u8 pin)
+{
+ unsigned int i;
+
+ for (i = 0; i < pctrl->data->n_variable_pin_cfg; i++) {
+ if (pctrl->data->variable_pin_cfg[i].port == port &&
+ pctrl->data->variable_pin_cfg[i].pin == pin)
+ return (pincfg & ~PIN_CFG_VARIABLE) | pctrl->data->variable_pin_cfg[i].cfg;
+ }
+
+ return 0;
+}
+
+static const struct rzg2l_variable_pin_cfg r9a07g043f_variable_pin_cfg[] = {
+ {
+ .port = 20,
+ .pin = 0,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 1,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 2,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 3,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 4,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 5,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 6,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 20,
+ .pin = 7,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 23,
+ .pin = 1,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT
+ },
+ {
+ .port = 23,
+ .pin = 2,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 23,
+ .pin = 3,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 23,
+ .pin = 4,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 23,
+ .pin = 5,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 24,
+ .pin = 0,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 24,
+ .pin = 1,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 24,
+ .pin = 2,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 24,
+ .pin = 3,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 24,
+ .pin = 4,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT,
+ },
+ {
+ .port = 24,
+ .pin = 5,
+ .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_NOGPIO_INT,
+ },
+};
+#endif
+
static void rzg2l_pinctrl_set_pfc_mode(struct rzg2l_pinctrl *pctrl,
u8 pin, u8 off, u8 func)
{
@@ -295,7 +519,7 @@ static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
pins = group->grp.pins;
for (i = 0; i < group->grp.npins; i++) {
- unsigned int *pin_data = pctrl->desc.pins[pins[i]].drv_data;
+ u64 *pin_data = pctrl->desc.pins[pins[i]].drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u32 pin = RZG2L_PIN_ID_TO_PIN(pins[i]);
@@ -432,8 +656,8 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
ret = of_property_read_u32_index(np, "pinmux", i, &value);
if (ret)
goto done;
- pins[i] = value & MUX_PIN_ID_MASK;
- psel_val[i] = MUX_FUNC(value);
+ pins[i] = FIELD_GET(MUX_PIN_ID_MASK, value);
+ psel_val[i] = FIELD_GET(MUX_FUNC_MASK, value);
}
if (parent) {
@@ -447,6 +671,16 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
name = np->name;
}
+ if (num_configs) {
+ ret = rzg2l_map_add_config(&maps[idx], name,
+ PIN_MAP_TYPE_CONFIGS_GROUP,
+ configs, num_configs);
+ if (ret < 0)
+ goto done;
+
+ idx++;
+ }
+
mutex_lock(&pctrl->mutex);
/* Register a single pin group listing all the pins we read from DT */
@@ -474,16 +708,6 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
maps[idx].data.mux.function = name;
idx++;
- if (num_configs) {
- ret = rzg2l_map_add_config(&maps[idx], name,
- PIN_MAP_TYPE_CONFIGS_GROUP,
- configs, num_configs);
- if (ret < 0)
- goto remove_group;
-
- idx++;
- }
-
dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
ret = 0;
goto done;
@@ -558,13 +782,13 @@ done:
}
static int rzg2l_validate_gpio_pin(struct rzg2l_pinctrl *pctrl,
- u32 cfg, u32 port, u8 bit)
+ u64 cfg, u32 port, u8 bit)
{
- u8 pincount = RZG2L_GPIO_PORT_GET_PINCNT(cfg);
+ u8 pinmap = FIELD_GET(PIN_CFG_PIN_MAP_MASK, cfg);
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg);
- u32 data;
+ u64 data;
- if (bit >= pincount || port >= pctrl->data->n_port_pins)
+ if (!(pinmap & BIT(bit)) || port >= pctrl->data->n_port_pins)
return -EINVAL;
data = pctrl->data->port_pin_configs[port];
@@ -856,7 +1080,7 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
enum pin_config_param param = pinconf_to_config_param(*config);
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
- unsigned int *pin_data = pin->drv_data;
+ u64 *pin_data = pin->drv_data;
unsigned int arg = 0;
u32 off, cfg;
int ret;
@@ -866,9 +1090,9 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
return -EINVAL;
off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
- cfg = RZG2L_PIN_CFG_TO_CAPS(*pin_data);
+ cfg = FIELD_GET(PIN_CFG_MASK, *pin_data);
if (*pin_data & RZG2L_SINGLE_PIN) {
- bit = RZG2L_SINGLE_PIN_GET_BIT(*pin_data);
+ bit = FIELD_GET(RZG2L_SINGLE_PIN_BITS_MASK, *pin_data);
} else {
bit = RZG2L_PIN_ID_TO_PIN(_pin);
@@ -959,7 +1183,7 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
struct rzg2l_pinctrl_pin_settings settings = pctrl->settings[_pin];
- unsigned int *pin_data = pin->drv_data;
+ u64 *pin_data = pin->drv_data;
enum pin_config_param param;
unsigned int i, arg, index;
u32 cfg, off;
@@ -970,9 +1194,9 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
return -EINVAL;
off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
- cfg = RZG2L_PIN_CFG_TO_CAPS(*pin_data);
+ cfg = FIELD_GET(PIN_CFG_MASK, *pin_data);
if (*pin_data & RZG2L_SINGLE_PIN) {
- bit = RZG2L_SINGLE_PIN_GET_BIT(*pin_data);
+ bit = FIELD_GET(RZG2L_SINGLE_PIN_BITS_MASK, *pin_data);
} else {
bit = RZG2L_PIN_ID_TO_PIN(_pin);
@@ -1164,7 +1388,7 @@ static int rzg2l_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
- u32 *pin_data = pin_desc->drv_data;
+ u64 *pin_data = pin_desc->drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u32 port = RZG2L_PIN_ID_TO_PORT(offset);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
@@ -1196,7 +1420,7 @@ static void rzg2l_gpio_set_direction(struct rzg2l_pinctrl *pctrl, u32 offset,
bool output)
{
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
- unsigned int *pin_data = pin_desc->drv_data;
+ u64 *pin_data = pin_desc->drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
unsigned long flags;
@@ -1217,7 +1441,7 @@ static int rzg2l_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
- unsigned int *pin_data = pin_desc->drv_data;
+ u64 *pin_data = pin_desc->drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
@@ -1248,7 +1472,7 @@ static void rzg2l_gpio_set(struct gpio_chip *chip, unsigned int offset,
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
- unsigned int *pin_data = pin_desc->drv_data;
+ u64 *pin_data = pin_desc->drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
unsigned long flags;
@@ -1281,7 +1505,7 @@ static int rzg2l_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
- unsigned int *pin_data = pin_desc->drv_data;
+ u64 *pin_data = pin_desc->drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
u16 reg16;
@@ -1366,7 +1590,7 @@ static const char * const rzg2l_gpio_names[] = {
"P48_0", "P48_1", "P48_2", "P48_3", "P48_4", "P48_5", "P48_6", "P48_7",
};
-static const u32 r9a07g044_gpio_configs[] = {
+static const u64 r9a07g044_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(2, 0x10, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(2, 0x11, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(2, 0x12, RZG2L_MPXED_PIN_FUNCS),
@@ -1418,7 +1642,7 @@ static const u32 r9a07g044_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(5, 0x40, RZG2L_MPXED_PIN_FUNCS),
};
-static const u32 r9a07g043_gpio_configs[] = {
+static const u64 r9a07g043_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(4, 0x10, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(5, 0x11, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
RZG2L_GPIO_PORT_PACK(4, 0x12, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
@@ -1438,9 +1662,28 @@ static const u32 r9a07g043_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(2, 0x20, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(4, 0x21, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(6, 0x22, RZG2L_MPXED_PIN_FUNCS),
+#ifdef CONFIG_RISCV
+ /* Below additional port pins (P19 - P28) are exclusively available on RZ/Five SoC only */
+ RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x06, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P19 */
+ RZG2L_GPIO_PORT_PACK(8, 0x07, PIN_CFG_VARIABLE), /* P20 */
+ RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x08, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P21 */
+ RZG2L_GPIO_PORT_PACK(4, 0x09, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P22 */
+ RZG2L_GPIO_PORT_SPARSE_PACK(0x3e, 0x0a, PIN_CFG_VARIABLE), /* P23 */
+ RZG2L_GPIO_PORT_PACK(6, 0x0b, PIN_CFG_VARIABLE), /* P24 */
+ RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x0c, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_FILONOFF |
+ PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_NOGPIO_INT), /* P25 */
+ 0x0, /* P26 */
+ 0x0, /* P27 */
+ RZG2L_GPIO_PORT_PACK(6, 0x0f, RZG2L_MPXED_PIN_FUNCS | PIN_CFG_NOGPIO_INT), /* P28 */
+#endif
};
-static const u32 r9a08g045_gpio_configs[] = {
+static const u64 r9a08g045_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(4, 0x20, RZG3S_MPXED_PIN_FUNCS(A)), /* P0 */
RZG2L_GPIO_PORT_PACK(5, 0x30, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
PIN_CFG_IO_VMC_ETH0)) |
@@ -1598,40 +1841,42 @@ static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = {
PIN_CFG_IO_VMC_SD1)) },
};
-static int rzg2l_gpio_get_gpioint(unsigned int virq, const struct rzg2l_pinctrl_data *data)
+static int rzg2l_gpio_get_gpioint(unsigned int virq, struct rzg2l_pinctrl *pctrl)
{
+ const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[virq];
+ const struct rzg2l_pinctrl_data *data = pctrl->data;
+ u64 *pin_data = pin_desc->drv_data;
unsigned int gpioint;
unsigned int i;
u32 port, bit;
+ if (*pin_data & PIN_CFG_NOGPIO_INT)
+ return -EINVAL;
+
port = virq / 8;
bit = virq % 8;
if (port >= data->n_ports ||
- bit >= RZG2L_GPIO_PORT_GET_PINCNT(data->port_pin_configs[port]))
+ bit >= hweight8(FIELD_GET(PIN_CFG_PIN_MAP_MASK, data->port_pin_configs[port])))
return -EINVAL;
gpioint = bit;
for (i = 0; i < port; i++)
- gpioint += RZG2L_GPIO_PORT_GET_PINCNT(data->port_pin_configs[i]);
+ gpioint += hweight8(FIELD_GET(PIN_CFG_PIN_MAP_MASK, data->port_pin_configs[i]));
return gpioint;
}
-static void rzg2l_gpio_irq_disable(struct irq_data *d)
+static void rzg2l_gpio_irq_endisable(struct rzg2l_pinctrl *pctrl,
+ unsigned int hwirq, bool enable)
{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip);
- unsigned int hwirq = irqd_to_hwirq(d);
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[hwirq];
- unsigned int *pin_data = pin_desc->drv_data;
+ u64 *pin_data = pin_desc->drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u8 bit = RZG2L_PIN_ID_TO_PIN(hwirq);
unsigned long flags;
void __iomem *addr;
- irq_chip_disable_parent(d);
-
addr = pctrl->base + ISEL(off);
if (bit >= 4) {
bit -= 4;
@@ -1639,36 +1884,28 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
}
spin_lock_irqsave(&pctrl->lock, flags);
- writel(readl(addr) & ~BIT(bit * 8), addr);
+ if (enable)
+ writel(readl(addr) | BIT(bit * 8), addr);
+ else
+ writel(readl(addr) & ~BIT(bit * 8), addr);
spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+static void rzg2l_gpio_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ irq_chip_disable_parent(d);
gpiochip_disable_irq(gc, hwirq);
}
static void rzg2l_gpio_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip);
unsigned int hwirq = irqd_to_hwirq(d);
- const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[hwirq];
- unsigned int *pin_data = pin_desc->drv_data;
- u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
- u8 bit = RZG2L_PIN_ID_TO_PIN(hwirq);
- unsigned long flags;
- void __iomem *addr;
gpiochip_enable_irq(gc, hwirq);
-
- addr = pctrl->base + ISEL(off);
- if (bit >= 4) {
- bit -= 4;
- addr += 4;
- }
-
- spin_lock_irqsave(&pctrl->lock, flags);
- writel(readl(addr) | BIT(bit * 8), addr);
- spin_unlock_irqrestore(&pctrl->lock, flags);
-
irq_chip_enable_parent(d);
}
@@ -1689,6 +1926,28 @@ static void rzg2l_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
seq_printf(p, dev_name(gc->parent));
}
+static int rzg2l_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip);
+ int ret;
+
+ /* It should not happen. */
+ if (!data->parent_data)
+ return -EOPNOTSUPP;
+
+ ret = irq_chip_set_wake_parent(data, on);
+ if (ret)
+ return ret;
+
+ if (on)
+ atomic_inc(&pctrl->wakeup_path);
+ else
+ atomic_dec(&pctrl->wakeup_path);
+
+ return 0;
+}
+
static const struct irq_chip rzg2l_gpio_irqchip = {
.name = "rzg2l-gpio",
.irq_disable = rzg2l_gpio_irq_disable,
@@ -1699,10 +1958,31 @@ static const struct irq_chip rzg2l_gpio_irqchip = {
.irq_eoi = rzg2l_gpio_irqc_eoi,
.irq_print_chip = rzg2l_gpio_irq_print_chip,
.irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_wake = rzg2l_gpio_irq_set_wake,
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
+static int rzg2l_gpio_interrupt_input_mode(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
+ u64 *pin_data = pin_desc->drv_data;
+ u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
+ u8 bit = RZG2L_PIN_ID_TO_PIN(offset);
+ u8 reg8;
+ int ret;
+
+ reg8 = readb(pctrl->base + PMC(off));
+ if (reg8 & BIT(bit)) {
+ ret = rzg2l_gpio_request(chip, offset);
+ if (ret)
+ return ret;
+ }
+
+ return rzg2l_gpio_direction_input(chip, offset);
+}
+
static int rzg2l_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
unsigned int child,
unsigned int child_type,
@@ -1712,16 +1992,25 @@ static int rzg2l_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(gc);
unsigned long flags;
int gpioint, irq;
+ int ret;
- gpioint = rzg2l_gpio_get_gpioint(child, pctrl->data);
+ gpioint = rzg2l_gpio_get_gpioint(child, pctrl);
if (gpioint < 0)
return gpioint;
+ ret = rzg2l_gpio_interrupt_input_mode(gc, child);
+ if (ret)
+ return ret;
+
spin_lock_irqsave(&pctrl->bitmap_lock, flags);
irq = bitmap_find_free_region(pctrl->tint_slot, RZG2L_TINT_MAX_INTERRUPT, get_order(1));
spin_unlock_irqrestore(&pctrl->bitmap_lock, flags);
- if (irq < 0)
- return -ENOSPC;
+ if (irq < 0) {
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ rzg2l_gpio_irq_endisable(pctrl, child, true);
pctrl->hwirq[irq] = child;
irq += RZG2L_TINT_IRQ_START_INDEX;
@@ -1729,6 +2018,10 @@ static int rzg2l_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
*parent_type = IRQ_TYPE_LEVEL_HIGH;
*parent = RZG2L_PACK_HWIRQ(gpioint, irq);
return 0;
+
+err:
+ rzg2l_gpio_free(gc, child);
+ return ret;
}
static int rzg2l_gpio_populate_parent_fwspec(struct gpio_chip *chip,
@@ -1746,6 +2039,35 @@ static int rzg2l_gpio_populate_parent_fwspec(struct gpio_chip *chip,
return 0;
}
+static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
+{
+ struct irq_domain *domain = pctrl->gpio_chip.irq.domain;
+
+ for (unsigned int i = 0; i < RZG2L_TINT_MAX_INTERRUPT; i++) {
+ struct irq_data *data;
+ unsigned int virq;
+
+ if (!pctrl->hwirq[i])
+ continue;
+
+ virq = irq_find_mapping(domain, pctrl->hwirq[i]);
+ if (!virq) {
+ dev_crit(pctrl->dev, "Failed to find IRQ mapping for hwirq %u\n",
+ pctrl->hwirq[i]);
+ continue;
+ }
+
+ data = irq_domain_get_irq_data(domain, virq);
+ if (!data) {
+ dev_crit(pctrl->dev, "Failed to get IRQ data for virq=%u\n", virq);
+ continue;
+ }
+
+ if (!irqd_irq_disabled(data))
+ rzg2l_gpio_irq_enable(data);
+ }
+}
+
static void rzg2l_gpio_irq_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
@@ -1761,6 +2083,8 @@ static void rzg2l_gpio_irq_domain_free(struct irq_domain *domain, unsigned int v
for (i = 0; i < RZG2L_TINT_MAX_INTERRUPT; i++) {
if (pctrl->hwirq[i] == hwirq) {
+ rzg2l_gpio_irq_endisable(pctrl, hwirq, false);
+ rzg2l_gpio_free(gc, hwirq);
spin_lock_irqsave(&pctrl->bitmap_lock, flags);
bitmap_release_region(pctrl->tint_slot, i, get_order(1));
spin_unlock_irqrestore(&pctrl->bitmap_lock, flags);
@@ -1788,11 +2112,74 @@ static void rzg2l_init_irq_valid_mask(struct gpio_chip *gc,
bit = offset % 8;
if (port >= pctrl->data->n_ports ||
- bit >= RZG2L_GPIO_PORT_GET_PINCNT(pctrl->data->port_pin_configs[port]))
+ bit >= hweight8(FIELD_GET(PIN_CFG_PIN_MAP_MASK,
+ pctrl->data->port_pin_configs[port])))
clear_bit(offset, valid_mask);
}
}
+static int rzg2l_pinctrl_reg_cache_alloc(struct rzg2l_pinctrl *pctrl)
+{
+ u32 nports = pctrl->data->n_port_pins / RZG2L_PINS_PER_PORT;
+ struct rzg2l_pinctrl_reg_cache *cache, *dedicated_cache;
+
+ cache = devm_kzalloc(pctrl->dev, sizeof(*cache), GFP_KERNEL);
+ if (!cache)
+ return -ENOMEM;
+
+ dedicated_cache = devm_kzalloc(pctrl->dev, sizeof(*dedicated_cache), GFP_KERNEL);
+ if (!dedicated_cache)
+ return -ENOMEM;
+
+ cache->p = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->p), GFP_KERNEL);
+ if (!cache->p)
+ return -ENOMEM;
+
+ cache->pm = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->pm), GFP_KERNEL);
+ if (!cache->pm)
+ return -ENOMEM;
+
+ cache->pmc = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->pmc), GFP_KERNEL);
+ if (!cache->pmc)
+ return -ENOMEM;
+
+ cache->pfc = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->pfc), GFP_KERNEL);
+ if (!cache->pfc)
+ return -ENOMEM;
+
+ for (u8 i = 0; i < 2; i++) {
+ u32 n_dedicated_pins = pctrl->data->n_dedicated_pins;
+
+ cache->iolh[i] = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->iolh[i]),
+ GFP_KERNEL);
+ if (!cache->iolh[i])
+ return -ENOMEM;
+
+ cache->ien[i] = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->ien[i]),
+ GFP_KERNEL);
+ if (!cache->ien[i])
+ return -ENOMEM;
+
+ /* Allocate dedicated cache. */
+ dedicated_cache->iolh[i] = devm_kcalloc(pctrl->dev, n_dedicated_pins,
+ sizeof(*dedicated_cache->iolh[i]),
+ GFP_KERNEL);
+ if (!dedicated_cache->iolh[i])
+ return -ENOMEM;
+
+ dedicated_cache->ien[i] = devm_kcalloc(pctrl->dev, n_dedicated_pins,
+ sizeof(*dedicated_cache->ien[i]),
+ GFP_KERNEL);
+ if (!dedicated_cache->ien[i])
+ return -ENOMEM;
+ }
+
+ pctrl->cache = cache;
+ pctrl->dedicated_cache = dedicated_cache;
+
+ return 0;
+}
+
static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
{
struct device_node *np = pctrl->dev->of_node;
@@ -1870,7 +2257,7 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
struct pinctrl_pin_desc *pins;
unsigned int i, j;
- u32 *pin_data;
+ u64 *pin_data;
int ret;
pctrl->desc.name = DRV_NAME;
@@ -1898,6 +2285,13 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
if (i && !(i % RZG2L_PINS_PER_PORT))
j++;
pin_data[i] = pctrl->data->port_pin_configs[j];
+#ifdef CONFIG_RISCV
+ if (pin_data[i] & PIN_CFG_VARIABLE)
+ pin_data[i] = rzg2l_pinctrl_get_variable_pin_cfg(pctrl,
+ pin_data[i],
+ j,
+ i % RZG2L_PINS_PER_PORT);
+#endif
pins[i].drv_data = &pin_data[i];
}
@@ -1926,6 +2320,10 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
}
}
+ ret = rzg2l_pinctrl_reg_cache_alloc(pctrl);
+ if (ret)
+ return ret;
+
ret = devm_pinctrl_register_and_init(pctrl->dev, &pctrl->desc, pctrl,
&pctrl->pctl);
if (ret) {
@@ -1951,7 +2349,6 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
static int rzg2l_pinctrl_probe(struct platform_device *pdev)
{
struct rzg2l_pinctrl *pctrl;
- struct clk *clk;
int ret;
BUILD_BUG_ON(ARRAY_SIZE(r9a07g044_gpio_configs) * RZG2L_PINS_PER_PORT >
@@ -1977,14 +2374,16 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(pctrl->base))
return PTR_ERR(pctrl->base);
- clk = devm_clk_get_enabled(pctrl->dev, NULL);
- if (IS_ERR(clk))
- return dev_err_probe(pctrl->dev, PTR_ERR(clk),
+ pctrl->clk = devm_clk_get_enabled(pctrl->dev, NULL);
+ if (IS_ERR(pctrl->clk)) {
+ return dev_err_probe(pctrl->dev, PTR_ERR(pctrl->clk),
"failed to enable GPIO clk\n");
+ }
spin_lock_init(&pctrl->lock);
spin_lock_init(&pctrl->bitmap_lock);
mutex_init(&pctrl->mutex);
+ atomic_set(&pctrl->wakeup_path, 0);
platform_set_drvdata(pdev, pctrl);
@@ -1996,6 +2395,224 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspend)
+{
+ u32 nports = pctrl->data->n_port_pins / RZG2L_PINS_PER_PORT;
+ struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
+
+ for (u32 port = 0; port < nports; port++) {
+ bool has_iolh, has_ien;
+ u32 off, caps;
+ u8 pincnt;
+ u64 cfg;
+
+ cfg = pctrl->data->port_pin_configs[port];
+ off = RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg);
+ pincnt = hweight8(FIELD_GET(PIN_CFG_PIN_MAP_MASK, cfg));
+
+ caps = FIELD_GET(PIN_CFG_MASK, cfg);
+ has_iolh = !!(caps & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C));
+ has_ien = !!(caps & PIN_CFG_IEN);
+
+ if (suspend)
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PFC(off), cache->pfc[port]);
+
+ /*
+ * Now cache the registers or set them in the order suggested by
+ * HW manual (section "Operation for GPIO Function").
+ */
+ RZG2L_PCTRL_REG_ACCESS8(suspend, pctrl->base + PMC(off), cache->pmc[port]);
+ if (has_iolh) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + IOLH(off),
+ cache->iolh[0][port]);
+ if (pincnt >= 4) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + IOLH(off) + 4,
+ cache->iolh[1][port]);
+ }
+ }
+
+ RZG2L_PCTRL_REG_ACCESS16(suspend, pctrl->base + PM(off), cache->pm[port]);
+ RZG2L_PCTRL_REG_ACCESS8(suspend, pctrl->base + P(off), cache->p[port]);
+
+ if (has_ien) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + IEN(off),
+ cache->ien[0][port]);
+ if (pincnt >= 4) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + IEN(off) + 4,
+ cache->ien[1][port]);
+ }
+ }
+ }
+}
+
+static void rzg2l_pinctrl_pm_setup_dedicated_regs(struct rzg2l_pinctrl *pctrl, bool suspend)
+{
+ struct rzg2l_pinctrl_reg_cache *cache = pctrl->dedicated_cache;
+
+ /*
+ * Make sure entries in pctrl->data->n_dedicated_pins[] having the same
+ * port offset are close together.
+ */
+ for (u32 i = 0, caps = 0; i < pctrl->data->n_dedicated_pins; i++) {
+ bool has_iolh, has_ien;
+ u32 off, next_off = 0;
+ u64 cfg, next_cfg;
+ u8 pincnt;
+
+ cfg = pctrl->data->dedicated_pins[i].config;
+ off = RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg);
+ if (i + 1 < pctrl->data->n_dedicated_pins) {
+ next_cfg = pctrl->data->dedicated_pins[i + 1].config;
+ next_off = RZG2L_PIN_CFG_TO_PORT_OFFSET(next_cfg);
+ }
+
+ if (off == next_off) {
+ /* Gather caps of all port pins. */
+ caps |= FIELD_GET(PIN_CFG_MASK, cfg);
+ continue;
+ }
+
+ /* And apply them in a single shot. */
+ has_iolh = !!(caps & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C));
+ has_ien = !!(caps & PIN_CFG_IEN);
+ pincnt = hweight8(FIELD_GET(RZG2L_SINGLE_PIN_BITS_MASK, cfg));
+
+ if (has_iolh) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + IOLH(off),
+ cache->iolh[0][i]);
+ }
+ if (has_ien) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + IEN(off),
+ cache->ien[0][i]);
+ }
+
+ if (pincnt >= 4) {
+ if (has_iolh) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend,
+ pctrl->base + IOLH(off) + 4,
+ cache->iolh[1][i]);
+ }
+ if (has_ien) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend,
+ pctrl->base + IEN(off) + 4,
+ cache->ien[1][i]);
+ }
+ }
+ caps = 0;
+ }
+}
+
+static void rzg2l_pinctrl_pm_setup_pfc(struct rzg2l_pinctrl *pctrl)
+{
+ u32 nports = pctrl->data->n_port_pins / RZG2L_PINS_PER_PORT;
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
+ const struct rzg2l_register_offsets *regs = &hwcfg->regs;
+
+ /* Set the PWPR register to allow PFC register to write. */
+ writel(0x0, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=0 */
+ writel(PWPR_PFCWE, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=1 */
+
+ /* Restore port registers. */
+ for (u32 port = 0; port < nports; port++) {
+ unsigned long pinmap;
+ u8 pmc = 0, max_pin;
+ u32 off, pfc = 0;
+ u64 cfg;
+ u16 pm;
+ u8 pin;
+
+ cfg = pctrl->data->port_pin_configs[port];
+ off = RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg);
+ pinmap = FIELD_GET(PIN_CFG_PIN_MAP_MASK, cfg);
+ max_pin = fls(pinmap);
+
+ pm = readw(pctrl->base + PM(off));
+ for_each_set_bit(pin, &pinmap, max_pin) {
+ struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
+
+ /* Nothing to do if PFC was not configured before. */
+ if (!(cache->pmc[port] & BIT(pin)))
+ continue;
+
+ /* Set pin to 'Non-use (Hi-Z input protection)' */
+ pm &= ~(PM_MASK << (pin * 2));
+ writew(pm, pctrl->base + PM(off));
+
+ /* Temporarily switch to GPIO mode with PMC register */
+ pmc &= ~BIT(pin);
+ writeb(pmc, pctrl->base + PMC(off));
+
+ /* Select Pin function mode. */
+ pfc &= ~(PFC_MASK << (pin * 4));
+ pfc |= (cache->pfc[port] & (PFC_MASK << (pin * 4)));
+ writel(pfc, pctrl->base + PFC(off));
+
+ /* Switch to Peripheral pin function. */
+ pmc |= BIT(pin);
+ writeb(pmc, pctrl->base + PMC(off));
+ }
+ }
+
+ /* Set the PWPR register to be write-protected. */
+ writel(0x0, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=0 */
+ writel(PWPR_B0WI, pctrl->base + regs->pwpr); /* B0WI=1, PFCWE=0 */
+}
+
+static int rzg2l_pinctrl_suspend_noirq(struct device *dev)
+{
+ struct rzg2l_pinctrl *pctrl = dev_get_drvdata(dev);
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
+ const struct rzg2l_register_offsets *regs = &hwcfg->regs;
+ struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
+
+ rzg2l_pinctrl_pm_setup_regs(pctrl, true);
+ rzg2l_pinctrl_pm_setup_dedicated_regs(pctrl, true);
+
+ for (u8 i = 0; i < 2; i++) {
+ cache->sd_ch[i] = readb(pctrl->base + SD_CH(regs->sd_ch, i));
+ cache->eth_poc[i] = readb(pctrl->base + ETH_POC(regs->eth_poc, i));
+ }
+
+ cache->qspi = readb(pctrl->base + QSPI);
+ cache->eth_mode = readb(pctrl->base + ETH_MODE);
+
+ if (!atomic_read(&pctrl->wakeup_path))
+ clk_disable_unprepare(pctrl->clk);
+ else
+ device_set_wakeup_path(dev);
+
+ return 0;
+}
+
+static int rzg2l_pinctrl_resume_noirq(struct device *dev)
+{
+ struct rzg2l_pinctrl *pctrl = dev_get_drvdata(dev);
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
+ const struct rzg2l_register_offsets *regs = &hwcfg->regs;
+ struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
+ int ret;
+
+ if (!atomic_read(&pctrl->wakeup_path)) {
+ ret = clk_prepare_enable(pctrl->clk);
+ if (ret)
+ return ret;
+ }
+
+ writeb(cache->qspi, pctrl->base + QSPI);
+ writeb(cache->eth_mode, pctrl->base + ETH_MODE);
+ for (u8 i = 0; i < 2; i++) {
+ writeb(cache->sd_ch[i], pctrl->base + SD_CH(regs->sd_ch, i));
+ writeb(cache->eth_poc[i], pctrl->base + ETH_POC(regs->eth_poc, i));
+ }
+
+ rzg2l_pinctrl_pm_setup_pfc(pctrl);
+ rzg2l_pinctrl_pm_setup_regs(pctrl, false);
+ rzg2l_pinctrl_pm_setup_dedicated_regs(pctrl, false);
+ rzg2l_gpio_irq_restore(pctrl);
+
+ return 0;
+}
+
static const struct rzg2l_hwcfg rzg2l_hwcfg = {
.regs = {
.pwpr = 0x3014,
@@ -2049,6 +2666,10 @@ static struct rzg2l_pinctrl_data r9a07g043_data = {
.n_port_pins = ARRAY_SIZE(r9a07g043_gpio_configs) * RZG2L_PINS_PER_PORT,
.n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common),
.hwcfg = &rzg2l_hwcfg,
+#ifdef CONFIG_RISCV
+ .variable_pin_cfg = r9a07g043f_variable_pin_cfg,
+ .n_variable_pin_cfg = ARRAY_SIZE(r9a07g043f_variable_pin_cfg),
+#endif
};
static struct rzg2l_pinctrl_data r9a07g044_data = {
@@ -2088,10 +2709,15 @@ static const struct of_device_id rzg2l_pinctrl_of_table[] = {
{ /* sentinel */ }
};
+static const struct dev_pm_ops rzg2l_pinctrl_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(rzg2l_pinctrl_suspend_noirq, rzg2l_pinctrl_resume_noirq)
+};
+
static struct platform_driver rzg2l_pinctrl_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(rzg2l_pinctrl_of_table),
+ .pm = pm_sleep_ptr(&rzg2l_pinctrl_pm_ops),
},
.probe = rzg2l_pinctrl_probe,
};
diff --git a/drivers/pinctrl/renesas/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h
index 8dc7a66009ad..0061e9640059 100644
--- a/drivers/pinctrl/renesas/sh_pfc.h
+++ b/drivers/pinctrl/renesas/sh_pfc.h
@@ -322,6 +322,7 @@ extern const struct sh_pfc_soc_info r8a77995_pinmux_info;
extern const struct sh_pfc_soc_info r8a779a0_pinmux_info;
extern const struct sh_pfc_soc_info r8a779f0_pinmux_info;
extern const struct sh_pfc_soc_info r8a779g0_pinmux_info;
+extern const struct sh_pfc_soc_info r8a779h0_pinmux_info;
extern const struct sh_pfc_soc_info sh7203_pinmux_info;
extern const struct sh_pfc_soc_info sh7264_pinmux_info;
extern const struct sh_pfc_soc_info sh7269_pinmux_info;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32mp257.c b/drivers/pinctrl/stm32/pinctrl-stm32mp257.c
index 73f091cd827e..23aebd4695e9 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32mp257.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32mp257.c
@@ -2562,7 +2562,7 @@ static const struct of_device_id stm32mp257_pctrl_match[] = {
};
static const struct dev_pm_ops stm32_pinctrl_dev_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, stm32_pinctrl_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(stm32_pinctrl_suspend, stm32_pinctrl_resume)
};
static struct platform_driver stm32mp257_pinctrl_driver = {
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index 3e88cc92e819..86a3d32a7763 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -409,7 +409,7 @@ static int do_cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
if (!rx_buf)
return -ENOMEM;
- spi_bus_lock(ec_spi->spi->master);
+ spi_bus_lock(ec_spi->spi->controller);
/*
* Leave a gap between CS assertion and clocking of data to allow the
@@ -469,7 +469,7 @@ static int do_cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
final_ret = terminate_request(ec_dev);
- spi_bus_unlock(ec_spi->spi->master);
+ spi_bus_unlock(ec_spi->spi->controller);
if (!ret)
ret = final_ret;
@@ -554,7 +554,7 @@ static int do_cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
if (!rx_buf)
return -ENOMEM;
- spi_bus_lock(ec_spi->spi->master);
+ spi_bus_lock(ec_spi->spi->controller);
/* Transmit phase - send our message */
debug_packet(ec_dev->dev, "out", ec_dev->dout, len);
@@ -590,7 +590,7 @@ static int do_cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
final_ret = terminate_request(ec_dev);
- spi_bus_unlock(ec_spi->spi->master);
+ spi_bus_unlock(ec_spi->spi->controller);
if (!ret)
ret = final_ret;
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
index c1aef3a8fb2d..dd5f370c3168 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.c
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -463,7 +463,7 @@ static ssize_t large_icm_show(struct device *dev,
if (res.a0)
return -EPERM;
- return snprintf(buf, PAGE_SIZE, "0x%lx", res.a1);
+ return sysfs_emit(buf, "0x%lx", res.a1);
}
static ssize_t large_icm_store(struct device *dev,
@@ -581,7 +581,7 @@ static ssize_t opn_show(struct device *dev,
}
mutex_unlock(&mfg_ops_lock);
- return snprintf(buf, PAGE_SIZE, "%s", (char *)opn_data);
+ return sysfs_emit(buf, "%s", (char *)opn_data);
}
static ssize_t opn_store(struct device *dev,
@@ -632,7 +632,7 @@ static ssize_t sku_show(struct device *dev,
}
mutex_unlock(&mfg_ops_lock);
- return snprintf(buf, PAGE_SIZE, "%s", (char *)sku_data);
+ return sysfs_emit(buf, "%s", (char *)sku_data);
}
static ssize_t sku_store(struct device *dev,
@@ -683,7 +683,7 @@ static ssize_t modl_show(struct device *dev,
}
mutex_unlock(&mfg_ops_lock);
- return snprintf(buf, PAGE_SIZE, "%s", (char *)modl_data);
+ return sysfs_emit(buf, "%s", (char *)modl_data);
}
static ssize_t modl_store(struct device *dev,
@@ -734,7 +734,7 @@ static ssize_t sn_show(struct device *dev,
}
mutex_unlock(&mfg_ops_lock);
- return snprintf(buf, PAGE_SIZE, "%s", (char *)sn_data);
+ return sysfs_emit(buf, "%s", (char *)sn_data);
}
static ssize_t sn_store(struct device *dev,
@@ -785,7 +785,7 @@ static ssize_t uuid_show(struct device *dev,
}
mutex_unlock(&mfg_ops_lock);
- return snprintf(buf, PAGE_SIZE, "%s", (char *)uuid_data);
+ return sysfs_emit(buf, "%s", (char *)uuid_data);
}
static ssize_t uuid_store(struct device *dev,
@@ -836,7 +836,7 @@ static ssize_t rev_show(struct device *dev,
}
mutex_unlock(&mfg_ops_lock);
- return snprintf(buf, PAGE_SIZE, "%s", (char *)rev_data);
+ return sysfs_emit(buf, "%s", (char *)rev_data);
}
static ssize_t rev_store(struct device *dev,
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index b1995ac268d7..4ed9c7fd2b62 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -99,8 +99,8 @@
*/
struct mlxbf_pmc_attribute {
struct device_attribute dev_attr;
- int index;
- int nr;
+ unsigned int index;
+ unsigned int nr;
};
/**
@@ -121,7 +121,7 @@ struct mlxbf_pmc_block_info {
void __iomem *mmio_base;
size_t blk_size;
size_t counters;
- int type;
+ unsigned int type;
struct mlxbf_pmc_attribute *attr_counter;
struct mlxbf_pmc_attribute *attr_event;
struct mlxbf_pmc_attribute attr_event_list;
@@ -149,17 +149,17 @@ struct mlxbf_pmc_block_info {
*/
struct mlxbf_pmc_context {
struct platform_device *pdev;
- uint32_t total_blocks;
- uint32_t tile_count;
- uint8_t llt_enable;
- uint8_t mss_enable;
- uint32_t group_num;
+ u32 total_blocks;
+ u32 tile_count;
+ u8 llt_enable;
+ u8 mss_enable;
+ u32 group_num;
struct device *hwmon_dev;
const char *block_name[MLXBF_PMC_MAX_BLOCKS];
struct mlxbf_pmc_block_info block[MLXBF_PMC_MAX_BLOCKS];
const struct attribute_group *groups[MLXBF_PMC_MAX_BLOCKS];
bool svc_sreg_support;
- uint32_t sreg_tbl_perf;
+ u32 sreg_tbl_perf;
unsigned int event_set;
};
@@ -169,7 +169,7 @@ struct mlxbf_pmc_context {
* @evt_name: Name of the event
*/
struct mlxbf_pmc_events {
- int evt_num;
+ u32 evt_num;
char *evt_name;
};
@@ -865,8 +865,7 @@ static struct mlxbf_pmc_context *pmc;
static const char *mlxbf_pmc_svc_uuid_str = "89c036b4-e7d7-11e6-8797-001aca00bfc4";
/* Calls an SMC to access a performance register */
-static int mlxbf_pmc_secure_read(void __iomem *addr, uint32_t command,
- uint64_t *result)
+static int mlxbf_pmc_secure_read(void __iomem *addr, u32 command, u64 *result)
{
struct arm_smccc_res res;
int status, err = 0;
@@ -892,8 +891,7 @@ static int mlxbf_pmc_secure_read(void __iomem *addr, uint32_t command,
}
/* Read from a performance counter */
-static int mlxbf_pmc_read(void __iomem *addr, uint32_t command,
- uint64_t *result)
+static int mlxbf_pmc_read(void __iomem *addr, u32 command, u64 *result)
{
if (pmc->svc_sreg_support)
return mlxbf_pmc_secure_read(addr, command, result);
@@ -907,22 +905,21 @@ static int mlxbf_pmc_read(void __iomem *addr, uint32_t command,
}
/* Convenience function for 32-bit reads */
-static int mlxbf_pmc_readl(void __iomem *addr, uint32_t *result)
+static int mlxbf_pmc_readl(void __iomem *addr, u32 *result)
{
- uint64_t read_out;
+ u64 read_out;
int status;
status = mlxbf_pmc_read(addr, MLXBF_PMC_READ_REG_32, &read_out);
if (status)
return status;
- *result = (uint32_t)read_out;
+ *result = (u32)read_out;
return 0;
}
/* Calls an SMC to access a performance register */
-static int mlxbf_pmc_secure_write(void __iomem *addr, uint32_t command,
- uint64_t value)
+static int mlxbf_pmc_secure_write(void __iomem *addr, u32 command, u64 value)
{
struct arm_smccc_res res;
int status, err = 0;
@@ -945,7 +942,7 @@ static int mlxbf_pmc_secure_write(void __iomem *addr, uint32_t command,
}
/* Write to a performance counter */
-static int mlxbf_pmc_write(void __iomem *addr, int command, uint64_t value)
+static int mlxbf_pmc_write(void __iomem *addr, int command, u64 value)
{
if (pmc->svc_sreg_support)
return mlxbf_pmc_secure_write(addr, command, value);
@@ -959,7 +956,7 @@ static int mlxbf_pmc_write(void __iomem *addr, int command, uint64_t value)
}
/* Check if the register offset is within the mapped region for the block */
-static bool mlxbf_pmc_valid_range(int blk_num, uint32_t offset)
+static bool mlxbf_pmc_valid_range(unsigned int blk_num, u32 offset)
{
if ((offset >= 0) && !(offset % MLXBF_PMC_REG_SIZE) &&
(offset + MLXBF_PMC_REG_SIZE <= pmc->block[blk_num].blk_size))
@@ -969,33 +966,33 @@ static bool mlxbf_pmc_valid_range(int blk_num, uint32_t offset)
}
/* Get the event list corresponding to a certain block */
-static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk,
- int *size)
+static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk, size_t *psize)
{
const struct mlxbf_pmc_events *events;
+ size_t size;
if (strstr(blk, "tilenet")) {
events = mlxbf_pmc_hnfnet_events;
- *size = ARRAY_SIZE(mlxbf_pmc_hnfnet_events);
+ size = ARRAY_SIZE(mlxbf_pmc_hnfnet_events);
} else if (strstr(blk, "tile")) {
events = mlxbf_pmc_hnf_events;
- *size = ARRAY_SIZE(mlxbf_pmc_hnf_events);
+ size = ARRAY_SIZE(mlxbf_pmc_hnf_events);
} else if (strstr(blk, "triogen")) {
events = mlxbf_pmc_smgen_events;
- *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
+ size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
} else if (strstr(blk, "trio")) {
switch (pmc->event_set) {
case MLXBF_PMC_EVENT_SET_BF1:
events = mlxbf_pmc_trio_events_1;
- *size = ARRAY_SIZE(mlxbf_pmc_trio_events_1);
+ size = ARRAY_SIZE(mlxbf_pmc_trio_events_1);
break;
case MLXBF_PMC_EVENT_SET_BF2:
events = mlxbf_pmc_trio_events_2;
- *size = ARRAY_SIZE(mlxbf_pmc_trio_events_2);
+ size = ARRAY_SIZE(mlxbf_pmc_trio_events_2);
break;
default:
events = NULL;
- *size = 0;
+ size = 0;
break;
}
} else if (strstr(blk, "mss")) {
@@ -1003,51 +1000,60 @@ static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk,
case MLXBF_PMC_EVENT_SET_BF1:
case MLXBF_PMC_EVENT_SET_BF2:
events = mlxbf_pmc_mss_events_1;
- *size = ARRAY_SIZE(mlxbf_pmc_mss_events_1);
+ size = ARRAY_SIZE(mlxbf_pmc_mss_events_1);
break;
case MLXBF_PMC_EVENT_SET_BF3:
events = mlxbf_pmc_mss_events_3;
- *size = ARRAY_SIZE(mlxbf_pmc_mss_events_3);
+ size = ARRAY_SIZE(mlxbf_pmc_mss_events_3);
break;
default:
events = NULL;
- *size = 0;
+ size = 0;
break;
}
} else if (strstr(blk, "ecc")) {
events = mlxbf_pmc_ecc_events;
- *size = ARRAY_SIZE(mlxbf_pmc_ecc_events);
+ size = ARRAY_SIZE(mlxbf_pmc_ecc_events);
} else if (strstr(blk, "pcie")) {
events = mlxbf_pmc_pcie_events;
- *size = ARRAY_SIZE(mlxbf_pmc_pcie_events);
+ size = ARRAY_SIZE(mlxbf_pmc_pcie_events);
} else if (strstr(blk, "l3cache")) {
events = mlxbf_pmc_l3c_events;
- *size = ARRAY_SIZE(mlxbf_pmc_l3c_events);
+ size = ARRAY_SIZE(mlxbf_pmc_l3c_events);
} else if (strstr(blk, "gic")) {
events = mlxbf_pmc_smgen_events;
- *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
+ size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
} else if (strstr(blk, "smmu")) {
events = mlxbf_pmc_smgen_events;
- *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
+ size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
} else if (strstr(blk, "llt_miss")) {
events = mlxbf_pmc_llt_miss_events;
- *size = ARRAY_SIZE(mlxbf_pmc_llt_miss_events);
+ size = ARRAY_SIZE(mlxbf_pmc_llt_miss_events);
} else if (strstr(blk, "llt")) {
events = mlxbf_pmc_llt_events;
- *size = ARRAY_SIZE(mlxbf_pmc_llt_events);
+ size = ARRAY_SIZE(mlxbf_pmc_llt_events);
} else {
events = NULL;
- *size = 0;
+ size = 0;
}
+ if (psize)
+ *psize = size;
+
return events;
}
+static bool mlxbf_pmc_event_supported(const char *blk)
+{
+ return !!mlxbf_pmc_event_list(blk, NULL);
+}
+
/* Get the event number given the name */
static int mlxbf_pmc_get_event_num(const char *blk, const char *evt)
{
const struct mlxbf_pmc_events *events;
- int i, size;
+ unsigned int i;
+ size_t size;
events = mlxbf_pmc_event_list(blk, &size);
if (!events)
@@ -1062,10 +1068,11 @@ static int mlxbf_pmc_get_event_num(const char *blk, const char *evt)
}
/* Get the event number given the name */
-static char *mlxbf_pmc_get_event_name(const char *blk, int evt)
+static char *mlxbf_pmc_get_event_name(const char *blk, u32 evt)
{
const struct mlxbf_pmc_events *events;
- int i, size;
+ unsigned int i;
+ size_t size;
events = mlxbf_pmc_event_list(blk, &size);
if (!events)
@@ -1080,9 +1087,9 @@ static char *mlxbf_pmc_get_event_name(const char *blk, int evt)
}
/* Method to enable/disable/reset l3cache counters */
-static int mlxbf_pmc_config_l3_counters(int blk_num, bool enable, bool reset)
+static int mlxbf_pmc_config_l3_counters(unsigned int blk_num, bool enable, bool reset)
{
- uint32_t perfcnt_cfg = 0;
+ u32 perfcnt_cfg = 0;
if (enable)
perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_EN;
@@ -1095,12 +1102,9 @@ static int mlxbf_pmc_config_l3_counters(int blk_num, bool enable, bool reset)
}
/* Method to handle l3cache counter programming */
-static int mlxbf_pmc_program_l3_counter(int blk_num, uint32_t cnt_num,
- uint32_t evt)
+static int mlxbf_pmc_program_l3_counter(unsigned int blk_num, u32 cnt_num, u32 evt)
{
- uint32_t perfcnt_sel_1 = 0;
- uint32_t perfcnt_sel = 0;
- uint32_t *wordaddr;
+ u32 perfcnt_sel_1 = 0, perfcnt_sel = 0, *wordaddr;
void __iomem *pmcaddr;
int ret;
@@ -1162,11 +1166,10 @@ static int mlxbf_pmc_program_l3_counter(int blk_num, uint32_t cnt_num,
}
/* Method to handle crspace counter programming */
-static int mlxbf_pmc_program_crspace_counter(int blk_num, uint32_t cnt_num,
- uint32_t evt)
+static int mlxbf_pmc_program_crspace_counter(unsigned int blk_num, u32 cnt_num, u32 evt)
{
- uint32_t word;
void *addr;
+ u32 word;
int ret;
addr = pmc->block[blk_num].mmio_base +
@@ -1187,7 +1190,7 @@ static int mlxbf_pmc_program_crspace_counter(int blk_num, uint32_t cnt_num,
}
/* Method to clear crspace counter value */
-static int mlxbf_pmc_clear_crspace_counter(int blk_num, uint32_t cnt_num)
+static int mlxbf_pmc_clear_crspace_counter(unsigned int blk_num, u32 cnt_num)
{
void *addr;
@@ -1199,10 +1202,9 @@ static int mlxbf_pmc_clear_crspace_counter(int blk_num, uint32_t cnt_num)
}
/* Method to program a counter to monitor an event */
-static int mlxbf_pmc_program_counter(int blk_num, uint32_t cnt_num,
- uint32_t evt, bool is_l3)
+static int mlxbf_pmc_program_counter(unsigned int blk_num, u32 cnt_num, u32 evt, bool is_l3)
{
- uint64_t perfctl, perfevt, perfmon_cfg;
+ u64 perfctl, perfevt, perfmon_cfg;
if (cnt_num >= pmc->block[blk_num].counters)
return -ENODEV;
@@ -1263,12 +1265,11 @@ static int mlxbf_pmc_program_counter(int blk_num, uint32_t cnt_num,
}
/* Method to handle l3 counter reads */
-static int mlxbf_pmc_read_l3_counter(int blk_num, uint32_t cnt_num,
- uint64_t *result)
+static int mlxbf_pmc_read_l3_counter(unsigned int blk_num, u32 cnt_num, u64 *result)
{
- uint32_t perfcnt_low = 0, perfcnt_high = 0;
- uint64_t value;
+ u32 perfcnt_low = 0, perfcnt_high = 0;
int status;
+ u64 value;
status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
MLXBF_PMC_L3C_PERF_CNT_LOW +
@@ -1295,11 +1296,10 @@ static int mlxbf_pmc_read_l3_counter(int blk_num, uint32_t cnt_num,
}
/* Method to handle crspace counter reads */
-static int mlxbf_pmc_read_crspace_counter(int blk_num, uint32_t cnt_num,
- uint64_t *result)
+static int mlxbf_pmc_read_crspace_counter(unsigned int blk_num, u32 cnt_num, u64 *result)
{
- uint32_t value;
int status = 0;
+ u32 value;
status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
MLXBF_PMC_CRSPACE_PERFMON_VAL0(pmc->block[blk_num].counters) +
@@ -1313,11 +1313,10 @@ static int mlxbf_pmc_read_crspace_counter(int blk_num, uint32_t cnt_num,
}
/* Method to read the counter value */
-static int mlxbf_pmc_read_counter(int blk_num, uint32_t cnt_num, bool is_l3,
- uint64_t *result)
+static int mlxbf_pmc_read_counter(unsigned int blk_num, u32 cnt_num, bool is_l3, u64 *result)
{
- uint32_t perfcfg_offset, perfval_offset;
- uint64_t perfmon_cfg;
+ u32 perfcfg_offset, perfval_offset;
+ u64 perfmon_cfg;
int status;
if (cnt_num >= pmc->block[blk_num].counters)
@@ -1351,13 +1350,11 @@ static int mlxbf_pmc_read_counter(int blk_num, uint32_t cnt_num, bool is_l3,
}
/* Method to read L3 block event */
-static int mlxbf_pmc_read_l3_event(int blk_num, uint32_t cnt_num,
- uint64_t *result)
+static int mlxbf_pmc_read_l3_event(unsigned int blk_num, u32 cnt_num, u64 *result)
{
- uint32_t perfcnt_sel = 0, perfcnt_sel_1 = 0;
- uint32_t *wordaddr;
+ u32 perfcnt_sel = 0, perfcnt_sel_1 = 0, *wordaddr;
void __iomem *pmcaddr;
- uint64_t evt;
+ u64 evt;
/* Select appropriate register information */
switch (cnt_num) {
@@ -1405,10 +1402,9 @@ static int mlxbf_pmc_read_l3_event(int blk_num, uint32_t cnt_num,
}
/* Method to read crspace block event */
-static int mlxbf_pmc_read_crspace_event(int blk_num, uint32_t cnt_num,
- uint64_t *result)
+static int mlxbf_pmc_read_crspace_event(unsigned int blk_num, u32 cnt_num, u64 *result)
{
- uint32_t word, evt;
+ u32 word, evt;
void *addr;
int ret;
@@ -1429,11 +1425,10 @@ static int mlxbf_pmc_read_crspace_event(int blk_num, uint32_t cnt_num,
}
/* Method to find the event currently being monitored by a counter */
-static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
- uint64_t *result)
+static int mlxbf_pmc_read_event(unsigned int blk_num, u32 cnt_num, bool is_l3, u64 *result)
{
- uint32_t perfcfg_offset, perfval_offset;
- uint64_t perfmon_cfg, perfevt;
+ u32 perfcfg_offset, perfval_offset;
+ u64 perfmon_cfg, perfevt;
if (cnt_num >= pmc->block[blk_num].counters)
return -EINVAL;
@@ -1469,9 +1464,9 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
}
/* Method to read a register */
-static int mlxbf_pmc_read_reg(int blk_num, uint32_t offset, uint64_t *result)
+static int mlxbf_pmc_read_reg(unsigned int blk_num, u32 offset, u64 *result)
{
- uint32_t ecc_out;
+ u32 ecc_out;
if (strstr(pmc->block_name[blk_num], "ecc")) {
if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + offset,
@@ -1490,7 +1485,7 @@ static int mlxbf_pmc_read_reg(int blk_num, uint32_t offset, uint64_t *result)
}
/* Method to write to a register */
-static int mlxbf_pmc_write_reg(int blk_num, uint32_t offset, uint64_t data)
+static int mlxbf_pmc_write_reg(unsigned int blk_num, u32 offset, u64 data)
{
if (strstr(pmc->block_name[blk_num], "ecc")) {
return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
@@ -1510,9 +1505,10 @@ static ssize_t mlxbf_pmc_counter_show(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_counter = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- int blk_num, cnt_num, offset;
+ unsigned int blk_num, cnt_num;
bool is_l3 = false;
- uint64_t value;
+ int offset;
+ u64 value;
blk_num = attr_counter->nr;
cnt_num = attr_counter->index;
@@ -1544,14 +1540,16 @@ static ssize_t mlxbf_pmc_counter_store(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_counter = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- int blk_num, cnt_num, offset, err, data;
+ unsigned int blk_num, cnt_num, data;
bool is_l3 = false;
- uint64_t evt_num;
+ u64 evt_num;
+ int offset;
+ int err;
blk_num = attr_counter->nr;
cnt_num = attr_counter->index;
- err = kstrtoint(buf, 0, &data);
+ err = kstrtouint(buf, 0, &data);
if (err < 0)
return err;
@@ -1580,7 +1578,7 @@ static ssize_t mlxbf_pmc_counter_store(struct device *dev,
if (err)
return err;
} else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_CRSPACE) {
- if (sscanf(attr->attr.name, "counter%d", &cnt_num) != 1)
+ if (sscanf(attr->attr.name, "counter%u", &cnt_num) != 1)
return -EINVAL;
err = mlxbf_pmc_clear_crspace_counter(blk_num, cnt_num);
} else
@@ -1595,10 +1593,11 @@ static ssize_t mlxbf_pmc_event_show(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_event = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- int blk_num, cnt_num, err;
+ unsigned int blk_num, cnt_num;
bool is_l3 = false;
- uint64_t evt_num;
char *evt_name;
+ u64 evt_num;
+ int err;
blk_num = attr_event->nr;
cnt_num = attr_event->index;
@@ -1624,8 +1623,10 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_event = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- int blk_num, cnt_num, evt_num, err;
+ unsigned int blk_num, cnt_num;
bool is_l3 = false;
+ int evt_num;
+ int err;
blk_num = attr_event->nr;
cnt_num = attr_event->index;
@@ -1636,7 +1637,7 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev,
if (evt_num < 0)
return -EINVAL;
} else {
- err = kstrtoint(buf, 0, &evt_num);
+ err = kstrtouint(buf, 0, &evt_num);
if (err < 0)
return err;
}
@@ -1658,9 +1659,11 @@ static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_event_list = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- int blk_num, i, size, len = 0, ret = 0;
const struct mlxbf_pmc_events *events;
char e_info[MLXBF_PMC_EVENT_INFO_LEN];
+ unsigned int blk_num, i, len = 0;
+ size_t size;
+ int ret = 0;
blk_num = attr_event_list->nr;
@@ -1686,8 +1689,8 @@ static ssize_t mlxbf_pmc_enable_show(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_enable = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- uint32_t perfcnt_cfg, word;
- int blk_num, value;
+ unsigned int blk_num, value;
+ u32 perfcnt_cfg, word;
blk_num = attr_enable->nr;
@@ -1707,7 +1710,7 @@ static ssize_t mlxbf_pmc_enable_show(struct device *dev,
value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
}
- return sysfs_emit(buf, "%d\n", value);
+ return sysfs_emit(buf, "%u\n", value);
}
/* Store function for "enable" sysfs files - only for l3cache & crspace */
@@ -1717,12 +1720,13 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_enable = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- int err, en, blk_num;
- uint32_t word;
+ unsigned int en, blk_num;
+ u32 word;
+ int err;
blk_num = attr_enable->nr;
- err = kstrtoint(buf, 0, &en);
+ err = kstrtouint(buf, 0, &en);
if (err < 0)
return err;
@@ -1760,10 +1764,13 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev,
}
/* Populate attributes for blocks with counters to monitor performance */
-static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_num)
{
struct mlxbf_pmc_attribute *attr;
- int i = 0, j = 0;
+ unsigned int i = 0, j = 0;
+
+ if (!mlxbf_pmc_event_supported(pmc->block_name[blk_num]))
+ return -ENOENT;
/* "event_list" sysfs to list events supported by the block */
attr = &pmc->block[blk_num].attr_event_list;
@@ -1812,8 +1819,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
attr->dev_attr.store = mlxbf_pmc_counter_store;
attr->index = j;
attr->nr = blk_num;
- attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "counter%d", j);
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "counter%u", j);
if (!attr->dev_attr.attr.name)
return -ENOMEM;
pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
@@ -1825,8 +1831,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
attr->dev_attr.store = mlxbf_pmc_event_store;
attr->index = j;
attr->nr = blk_num;
- attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "event%d", j);
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "event%u", j);
if (!attr->dev_attr.attr.name)
return -ENOMEM;
pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
@@ -1837,30 +1842,31 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
}
/* Populate attributes for blocks with registers to monitor performance */
-static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
+static int mlxbf_pmc_init_perftype_reg(struct device *dev, unsigned int blk_num)
{
- struct mlxbf_pmc_attribute *attr;
const struct mlxbf_pmc_events *events;
- int i = 0, j = 0;
+ struct mlxbf_pmc_attribute *attr;
+ unsigned int i = 0;
+ size_t count = 0;
- events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &j);
+ events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &count);
if (!events)
- return -EINVAL;
+ return -ENOENT;
pmc->block[blk_num].attr_event = devm_kcalloc(
- dev, j, sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
+ dev, count, sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
if (!pmc->block[blk_num].attr_event)
return -ENOMEM;
- while (j > 0) {
- --j;
- attr = &pmc->block[blk_num].attr_event[j];
+ while (count > 0) {
+ --count;
+ attr = &pmc->block[blk_num].attr_event[count];
attr->dev_attr.attr.mode = 0644;
attr->dev_attr.show = mlxbf_pmc_counter_show;
attr->dev_attr.store = mlxbf_pmc_counter_store;
attr->nr = blk_num;
attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- events[j].evt_name);
+ events[count].evt_name);
if (!attr->dev_attr.attr.name)
return -ENOMEM;
pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
@@ -1872,7 +1878,7 @@ static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
}
/* Helper to create the bfperf sysfs sub-directories and files */
-static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
+static int mlxbf_pmc_create_groups(struct device *dev, unsigned int blk_num)
{
int err;
@@ -1883,7 +1889,7 @@ static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER)
err = mlxbf_pmc_init_perftype_reg(dev, blk_num);
else
- err = -EINVAL;
+ err = -ENOENT;
if (err)
return err;
@@ -1914,19 +1920,20 @@ static bool mlxbf_pmc_guid_match(const guid_t *guid,
/* Helper to map the Performance Counters from the varios blocks */
static int mlxbf_pmc_map_counters(struct device *dev)
{
- uint64_t info[MLXBF_PMC_INFO_SZ];
- int i, tile_num, ret;
+ u64 info[MLXBF_PMC_INFO_SZ];
+ unsigned int tile_num, i;
+ int ret;
for (i = 0; i < pmc->total_blocks; ++i) {
/* Create sysfs for tiles only if block number < tile_count */
if (strstr(pmc->block_name[i], "tilenet")) {
- if (sscanf(pmc->block_name[i], "tilenet%d", &tile_num) != 1)
+ if (sscanf(pmc->block_name[i], "tilenet%u", &tile_num) != 1)
continue;
if (tile_num >= pmc->tile_count)
continue;
} else if (strstr(pmc->block_name[i], "tile")) {
- if (sscanf(pmc->block_name[i], "tile%d", &tile_num) != 1)
+ if (sscanf(pmc->block_name[i], "tile%u", &tile_num) != 1)
continue;
if (tile_num >= pmc->tile_count)
@@ -1936,9 +1943,9 @@ static int mlxbf_pmc_map_counters(struct device *dev)
/* Create sysfs only for enabled MSS blocks */
if (strstr(pmc->block_name[i], "mss") &&
pmc->event_set == MLXBF_PMC_EVENT_SET_BF3) {
- int mss_num;
+ unsigned int mss_num;
- if (sscanf(pmc->block_name[i], "mss%d", &mss_num) != 1)
+ if (sscanf(pmc->block_name[i], "mss%u", &mss_num) != 1)
continue;
if (!((pmc->mss_enable >> mss_num) & 0x1))
@@ -1947,17 +1954,17 @@ static int mlxbf_pmc_map_counters(struct device *dev)
/* Create sysfs only for enabled LLT blocks */
if (strstr(pmc->block_name[i], "llt_miss")) {
- int llt_num;
+ unsigned int llt_num;
- if (sscanf(pmc->block_name[i], "llt_miss%d", &llt_num) != 1)
+ if (sscanf(pmc->block_name[i], "llt_miss%u", &llt_num) != 1)
continue;
if (!((pmc->llt_enable >> llt_num) & 0x1))
continue;
} else if (strstr(pmc->block_name[i], "llt")) {
- int llt_num;
+ unsigned int llt_num;
- if (sscanf(pmc->block_name[i], "llt%d", &llt_num) != 1)
+ if (sscanf(pmc->block_name[i], "llt%u", &llt_num) != 1)
continue;
if (!((pmc->llt_enable >> llt_num) & 0x1))
@@ -1987,6 +1994,10 @@ static int mlxbf_pmc_map_counters(struct device *dev)
return -ENOMEM;
ret = mlxbf_pmc_create_groups(dev, i);
+ if (ret == -ENOENT) {
+ dev_warn(dev, "ignoring unsupported block: '%s'\n", pmc->block_name[i]);
+ continue;
+ }
if (ret)
return ret;
}
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index 5c022b258f91..0ce9fff1f7d4 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -348,20 +348,6 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
u32 regval, bit;
int ret;
- /*
- * Validate if item related to received signal type is valid.
- * It should never happen, excepted the situation when some
- * piece of hardware is broken. In such situation just produce
- * error message and return. Caller must continue to handle the
- * signals from other devices if any.
- */
- if (unlikely(!item)) {
- dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
- item->reg, item->mask);
-
- return;
- }
-
/* Mask event. */
ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
0);
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index aeb3feae40ff..035d6b4105cd 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -74,6 +74,12 @@ static const struct software_node ssam_node_tmp_pprof = {
.parent = &ssam_node_root,
};
+/* Fan speed function. */
+static const struct software_node ssam_node_fan_speed = {
+ .name = "ssam:01:05:01:01:01",
+ .parent = &ssam_node_root,
+};
+
/* Tablet-mode switch via KIP subsystem. */
static const struct software_node ssam_node_kip_tablet_switch = {
.name = "ssam:01:0e:01:00:01",
@@ -305,6 +311,7 @@ static const struct software_node *ssam_node_group_sp9[] = {
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
+ &ssam_node_fan_speed,
&ssam_node_pos_tablet_switch,
&ssam_node_hid_kip_keyboard,
&ssam_node_hid_kip_penstash,
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index bdd302274b9a..7e9251fc3341 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -56,8 +56,6 @@ config HUAWEI_WMI
depends on INPUT
select INPUT_SPARSEKMAP
select LEDS_CLASS
- select LEDS_TRIGGERS
- select LEDS_TRIGGER_AUDIO
select NEW_LEDS
help
This driver provides support for Huawei WMI hotkeys, battery charge
@@ -269,8 +267,6 @@ config ASUS_WMI
select INPUT_SPARSEKMAP
select LEDS_CLASS
select NEW_LEDS
- select LEDS_TRIGGERS
- select LEDS_TRIGGER_AUDIO
select ACPI_PLATFORM_PROFILE
help
Say Y here if you have a WMI aware Asus laptop (like Eee PCs or new
@@ -374,6 +370,7 @@ config FUJITSU_LAPTOP
depends on ACPI
depends on INPUT
depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_BATTERY
depends on ACPI_VIDEO || ACPI_VIDEO = n
select INPUT_SPARSEKMAP
select NEW_LEDS
@@ -507,8 +504,6 @@ config THINKPAD_ACPI
select NVRAM
select NEW_LEDS
select LEDS_CLASS
- select LEDS_TRIGGERS
- select LEDS_TRIGGER_AUDIO
help
This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
support for Fn-Fx key combinations, Bluetooth control, video
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 88b826e88ebd..ee2e164f86b9 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -276,6 +276,7 @@ static bool has_type_aa;
static u16 commun_func_bitmap;
static u8 commun_fn_key_number;
static bool cycle_gaming_thermal_profile = true;
+static bool predator_v4;
module_param(mailled, int, 0444);
module_param(brightness, int, 0444);
@@ -284,6 +285,7 @@ module_param(force_series, int, 0444);
module_param(force_caps, int, 0444);
module_param(ec_raw_mode, bool, 0444);
module_param(cycle_gaming_thermal_profile, bool, 0644);
+module_param(predator_v4, bool, 0444);
MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
@@ -292,6 +294,8 @@ MODULE_PARM_DESC(force_caps, "Force the capability bitmask to this value");
MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode");
MODULE_PARM_DESC(cycle_gaming_thermal_profile,
"Set thermal mode key in cycle mode. Disabling it sets the mode key in turbo toggle mode");
+MODULE_PARM_DESC(predator_v4,
+ "Enable features for predator laptops that use predator sense v4");
struct acer_data {
int mailled;
@@ -585,6 +589,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
.driver_data = &quirk_acer_predator_v4,
},
{
+ .callback = dmi_matched,
+ .ident = "Acer Predator PH16-71",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH16-71"),
+ },
+ .driver_data = &quirk_acer_predator_v4,
+ },
+ {
.callback = set_force_caps,
.ident = "Acer Aspire Switch 10E SW3-016",
.matches = {
@@ -725,7 +738,9 @@ enum acer_predator_v4_thermal_profile_wmi {
/* Find which quirks are needed for a particular vendor/ model pair */
static void __init find_quirks(void)
{
- if (!force_series) {
+ if (predator_v4) {
+ quirks = &quirk_acer_predator_v4;
+ } else if (!force_series) {
dmi_check_system(acer_quirks);
dmi_check_system(non_acer_quirks);
} else if (force_series == 2490) {
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 74bcb3d13104..018c48429616 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -678,7 +678,7 @@ static int __init acerhdf_register_thermal(void)
return -EINVAL;
thz_dev = thermal_zone_device_register_with_trips("acerhdf", trips, ARRAY_SIZE(trips),
- 0, NULL, &acerhdf_dev_ops,
+ NULL, &acerhdf_dev_ops,
&acerhdf_zone_params, 0,
(kernelmode) ? interval*1000 : 0);
if (IS_ERR(thz_dev))
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
index 54753213cc61..f88682d36447 100644
--- a/drivers/platform/x86/amd/Kconfig
+++ b/drivers/platform/x86/amd/Kconfig
@@ -8,7 +8,7 @@ source "drivers/platform/x86/amd/pmc/Kconfig"
config AMD_HSMP
tristate "AMD HSMP Driver"
- depends on AMD_NB && X86_64
+ depends on AMD_NB && X86_64 && ACPI
help
The driver provides a way for user space tools to monitor and manage
system management functionality on EPYC server CPUs from AMD.
diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c
index b55d80e29139..1927be901108 100644
--- a/drivers/platform/x86/amd/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp.c
@@ -18,9 +18,11 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/semaphore.h>
+#include <linux/acpi.h>
#define DRIVER_NAME "amd_hsmp"
-#define DRIVER_VERSION "2.0"
+#define DRIVER_VERSION "2.2"
+#define ACPI_HSMP_DEVICE_HID "AMDI0097"
/* HSMP Status / Error codes */
#define HSMP_STATUS_NOT_READY 0x00
@@ -40,9 +42,11 @@
* register into the SMN_INDEX register, and reads/writes the SMN_DATA reg.
* Below are required SMN address for HSMP Mailbox register offsets in SMU address space
*/
-#define SMN_HSMP_MSG_ID 0x3B10534
-#define SMN_HSMP_MSG_RESP 0x3B10980
-#define SMN_HSMP_MSG_DATA 0x3B109E0
+#define SMN_HSMP_BASE 0x3B00000
+#define SMN_HSMP_MSG_ID 0x0010534
+#define SMN_HSMP_MSG_ID_F1A_M0H 0x0010934
+#define SMN_HSMP_MSG_RESP 0x0010980
+#define SMN_HSMP_MSG_DATA 0x00109E0
#define HSMP_INDEX_REG 0xc4
#define HSMP_DATA_REG 0xc8
@@ -53,41 +57,86 @@
#define HSMP_ATTR_GRP_NAME_SIZE 10
+/* These are the strings specified in ACPI table */
+#define MSG_IDOFF_STR "MsgIdOffset"
+#define MSG_ARGOFF_STR "MsgArgOffset"
+#define MSG_RESPOFF_STR "MsgRspOffset"
+
+#define MAX_AMD_SOCKETS 8
+
+struct hsmp_mbaddr_info {
+ u32 base_addr;
+ u32 msg_id_off;
+ u32 msg_resp_off;
+ u32 msg_arg_off;
+ u32 size;
+};
+
struct hsmp_socket {
struct bin_attribute hsmp_attr;
+ struct hsmp_mbaddr_info mbinfo;
void __iomem *metric_tbl_addr;
+ void __iomem *virt_base_addr;
struct semaphore hsmp_sem;
char name[HSMP_ATTR_GRP_NAME_SIZE];
+ struct pci_dev *root;
+ struct device *dev;
u16 sock_ind;
};
struct hsmp_plat_device {
struct miscdevice hsmp_device;
struct hsmp_socket *sock;
- struct device *dev;
u32 proto_ver;
u16 num_sockets;
+ bool is_acpi_device;
+ bool is_probed;
};
static struct hsmp_plat_device plat_dev;
-static int amd_hsmp_rdwr(struct pci_dev *root, u32 address,
- u32 *value, bool write)
+static int amd_hsmp_pci_rdwr(struct hsmp_socket *sock, u32 offset,
+ u32 *value, bool write)
{
int ret;
- ret = pci_write_config_dword(root, HSMP_INDEX_REG, address);
+ if (!sock->root)
+ return -ENODEV;
+
+ ret = pci_write_config_dword(sock->root, HSMP_INDEX_REG,
+ sock->mbinfo.base_addr + offset);
if (ret)
return ret;
- ret = (write ? pci_write_config_dword(root, HSMP_DATA_REG, *value)
- : pci_read_config_dword(root, HSMP_DATA_REG, value));
+ ret = (write ? pci_write_config_dword(sock->root, HSMP_DATA_REG, *value)
+ : pci_read_config_dword(sock->root, HSMP_DATA_REG, value));
return ret;
}
+static void amd_hsmp_acpi_rdwr(struct hsmp_socket *sock, u32 offset,
+ u32 *value, bool write)
+{
+ if (write)
+ iowrite32(*value, sock->virt_base_addr + offset);
+ else
+ *value = ioread32(sock->virt_base_addr + offset);
+}
+
+static int amd_hsmp_rdwr(struct hsmp_socket *sock, u32 offset,
+ u32 *value, bool write)
+{
+ if (plat_dev.is_acpi_device)
+ amd_hsmp_acpi_rdwr(sock, offset, value, write);
+ else
+ return amd_hsmp_pci_rdwr(sock, offset, value, write);
+
+ return 0;
+}
+
/*
- * Send a message to the HSMP port via PCI-e config space registers.
+ * Send a message to the HSMP port via PCI-e config space registers
+ * or by writing to MMIO space.
*
* The caller is expected to zero out any unused arguments.
* If a response is expected, the number of response words should be greater than 0.
@@ -95,16 +144,19 @@ static int amd_hsmp_rdwr(struct pci_dev *root, u32 address,
* Returns 0 for success and populates the requested number of arguments.
* Returns a negative error code for failure.
*/
-static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
+static int __hsmp_send_message(struct hsmp_socket *sock, struct hsmp_message *msg)
{
+ struct hsmp_mbaddr_info *mbinfo;
unsigned long timeout, short_sleep;
u32 mbox_status;
u32 index;
int ret;
+ mbinfo = &sock->mbinfo;
+
/* Clear the status register */
mbox_status = HSMP_STATUS_NOT_READY;
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_WR);
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_resp_off, &mbox_status, HSMP_WR);
if (ret) {
pr_err("Error %d clearing mailbox status register\n", ret);
return ret;
@@ -113,7 +165,7 @@ static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
index = 0;
/* Write any message arguments */
while (index < msg->num_args) {
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2),
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_arg_off + (index << 2),
&msg->args[index], HSMP_WR);
if (ret) {
pr_err("Error %d writing message argument %d\n", ret, index);
@@ -123,7 +175,7 @@ static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
}
/* Write the message ID which starts the operation */
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_ID, &msg->msg_id, HSMP_WR);
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_id_off, &msg->msg_id, HSMP_WR);
if (ret) {
pr_err("Error %d writing message ID %u\n", ret, msg->msg_id);
return ret;
@@ -140,7 +192,7 @@ static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
timeout = jiffies + msecs_to_jiffies(HSMP_MSG_TIMEOUT);
while (time_before(jiffies, timeout)) {
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_RD);
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_resp_off, &mbox_status, HSMP_RD);
if (ret) {
pr_err("Error %d reading mailbox status\n", ret);
return ret;
@@ -175,7 +227,7 @@ static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
*/
index = 0;
while (index < msg->response_sz) {
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2),
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_arg_off + (index << 2),
&msg->args[index], HSMP_RD);
if (ret) {
pr_err("Error %d reading response %u for message ID:%u\n",
@@ -208,21 +260,19 @@ static int validate_message(struct hsmp_message *msg)
int hsmp_send_message(struct hsmp_message *msg)
{
- struct hsmp_socket *sock = &plat_dev.sock[msg->sock_ind];
- struct amd_northbridge *nb;
+ struct hsmp_socket *sock;
int ret;
if (!msg)
return -EINVAL;
-
- nb = node_to_amd_nb(msg->sock_ind);
- if (!nb || !nb->root)
- return -ENODEV;
-
ret = validate_message(msg);
if (ret)
return ret;
+ if (!plat_dev.sock || msg->sock_ind >= plat_dev.num_sockets)
+ return -ENODEV;
+ sock = &plat_dev.sock[msg->sock_ind];
+
/*
* The time taken by smu operation to complete is between
* 10us to 1ms. Sometime it may take more time.
@@ -233,7 +283,7 @@ int hsmp_send_message(struct hsmp_message *msg)
if (ret < 0)
return ret;
- ret = __hsmp_send_message(nb->root, msg);
+ ret = __hsmp_send_message(sock, msg);
up(&sock->hsmp_sem);
@@ -244,12 +294,7 @@ EXPORT_SYMBOL_GPL(hsmp_send_message);
static int hsmp_test(u16 sock_ind, u32 value)
{
struct hsmp_message msg = { 0 };
- struct amd_northbridge *nb;
- int ret = -ENODEV;
-
- nb = node_to_amd_nb(sock_ind);
- if (!nb || !nb->root)
- return ret;
+ int ret;
/*
* Test the hsmp port by performing TEST command. The test message
@@ -261,14 +306,15 @@ static int hsmp_test(u16 sock_ind, u32 value)
msg.args[0] = value;
msg.sock_ind = sock_ind;
- ret = __hsmp_send_message(nb->root, &msg);
+ ret = hsmp_send_message(&msg);
if (ret)
return ret;
/* Check the response value */
if (msg.args[0] != (value + 1)) {
- pr_err("Socket %d test message failed, Expected 0x%08X, received 0x%08X\n",
- sock_ind, (value + 1), msg.args[0]);
+ dev_err(plat_dev.sock[sock_ind].dev,
+ "Socket %d test message failed, Expected 0x%08X, received 0x%08X\n",
+ sock_ind, (value + 1), msg.args[0]);
return -EBADE;
}
@@ -337,6 +383,181 @@ static const struct file_operations hsmp_fops = {
.compat_ioctl = hsmp_ioctl,
};
+/* This is the UUID used for HSMP */
+static const guid_t acpi_hsmp_uuid = GUID_INIT(0xb74d619d, 0x5707, 0x48bd,
+ 0xa6, 0x9f, 0x4e, 0xa2,
+ 0x87, 0x1f, 0xc2, 0xf6);
+
+static inline bool is_acpi_hsmp_uuid(union acpi_object *obj)
+{
+ if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == UUID_SIZE)
+ return guid_equal((guid_t *)obj->buffer.pointer, &acpi_hsmp_uuid);
+
+ return false;
+}
+
+static inline int hsmp_get_uid(struct device *dev, u16 *sock_ind)
+{
+ char *uid;
+
+ /*
+ * UID (ID00, ID01..IDXX) is used for differentiating sockets,
+ * read it and strip the "ID" part of it and convert the remaining
+ * bytes to integer.
+ */
+ uid = acpi_device_uid(ACPI_COMPANION(dev));
+
+ return kstrtou16(uid + 2, 10, sock_ind);
+}
+
+static acpi_status hsmp_resource(struct acpi_resource *res, void *data)
+{
+ struct hsmp_socket *sock = data;
+ struct resource r;
+
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ if (!acpi_dev_resource_memory(res, &r))
+ return AE_ERROR;
+ if (!r.start || r.end < r.start || !(r.flags & IORESOURCE_MEM_WRITEABLE))
+ return AE_ERROR;
+ sock->mbinfo.base_addr = r.start;
+ sock->mbinfo.size = resource_size(&r);
+ break;
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ break;
+ default:
+ return AE_ERROR;
+ }
+
+ return AE_OK;
+}
+
+static int hsmp_read_acpi_dsd(struct hsmp_socket *sock)
+{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *guid, *mailbox_package;
+ union acpi_object *dsd;
+ acpi_status status;
+ int ret = 0;
+ int j;
+
+ status = acpi_evaluate_object_typed(ACPI_HANDLE(sock->dev), "_DSD", NULL,
+ &buf, ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status)) {
+ dev_err(sock->dev, "Failed to read mailbox reg offsets from DSD table, err: %s\n",
+ acpi_format_exception(status));
+ return -ENODEV;
+ }
+
+ dsd = buf.pointer;
+
+ /* HSMP _DSD property should contain 2 objects.
+ * 1. guid which is an acpi object of type ACPI_TYPE_BUFFER
+ * 2. mailbox which is an acpi object of type ACPI_TYPE_PACKAGE
+ * This mailbox object contains 3 more acpi objects of type
+ * ACPI_TYPE_PACKAGE for holding msgid, msgresp, msgarg offsets
+ * these packages inturn contain 2 acpi objects of type
+ * ACPI_TYPE_STRING and ACPI_TYPE_INTEGER
+ */
+ if (!dsd || dsd->type != ACPI_TYPE_PACKAGE || dsd->package.count != 2) {
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ guid = &dsd->package.elements[0];
+ mailbox_package = &dsd->package.elements[1];
+ if (!is_acpi_hsmp_uuid(guid) || mailbox_package->type != ACPI_TYPE_PACKAGE) {
+ dev_err(sock->dev, "Invalid hsmp _DSD table data\n");
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ for (j = 0; j < mailbox_package->package.count; j++) {
+ union acpi_object *msgobj, *msgstr, *msgint;
+
+ msgobj = &mailbox_package->package.elements[j];
+ msgstr = &msgobj->package.elements[0];
+ msgint = &msgobj->package.elements[1];
+
+ /* package should have 1 string and 1 integer object */
+ if (msgobj->type != ACPI_TYPE_PACKAGE ||
+ msgstr->type != ACPI_TYPE_STRING ||
+ msgint->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ if (!strncmp(msgstr->string.pointer, MSG_IDOFF_STR,
+ msgstr->string.length)) {
+ sock->mbinfo.msg_id_off = msgint->integer.value;
+ } else if (!strncmp(msgstr->string.pointer, MSG_RESPOFF_STR,
+ msgstr->string.length)) {
+ sock->mbinfo.msg_resp_off = msgint->integer.value;
+ } else if (!strncmp(msgstr->string.pointer, MSG_ARGOFF_STR,
+ msgstr->string.length)) {
+ sock->mbinfo.msg_arg_off = msgint->integer.value;
+ } else {
+ ret = -ENOENT;
+ goto free_buf;
+ }
+ }
+
+ if (!sock->mbinfo.msg_id_off || !sock->mbinfo.msg_resp_off ||
+ !sock->mbinfo.msg_arg_off)
+ ret = -EINVAL;
+
+free_buf:
+ ACPI_FREE(buf.pointer);
+ return ret;
+}
+
+static int hsmp_read_acpi_crs(struct hsmp_socket *sock)
+{
+ acpi_status status;
+
+ status = acpi_walk_resources(ACPI_HANDLE(sock->dev), METHOD_NAME__CRS,
+ hsmp_resource, sock);
+ if (ACPI_FAILURE(status)) {
+ dev_err(sock->dev, "Failed to look up MP1 base address from CRS method, err: %s\n",
+ acpi_format_exception(status));
+ return -EINVAL;
+ }
+ if (!sock->mbinfo.base_addr || !sock->mbinfo.size)
+ return -EINVAL;
+
+ /* The mapped region should be un cached */
+ sock->virt_base_addr = devm_ioremap_uc(sock->dev, sock->mbinfo.base_addr,
+ sock->mbinfo.size);
+ if (!sock->virt_base_addr) {
+ dev_err(sock->dev, "Failed to ioremap MP1 base address\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Parse the ACPI table to read the data */
+static int hsmp_parse_acpi_table(struct device *dev, u16 sock_ind)
+{
+ struct hsmp_socket *sock = &plat_dev.sock[sock_ind];
+ int ret;
+
+ sock->sock_ind = sock_ind;
+ sock->dev = dev;
+ plat_dev.is_acpi_device = true;
+
+ sema_init(&sock->hsmp_sem, 1);
+
+ /* Read MP1 base address from CRS method */
+ ret = hsmp_read_acpi_crs(sock);
+ if (ret)
+ return ret;
+
+ /* Read mailbox offsets from DSD table */
+ return hsmp_read_acpi_dsd(sock);
+}
+
static ssize_t hsmp_metric_tbl_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
@@ -345,14 +566,12 @@ static ssize_t hsmp_metric_tbl_read(struct file *filp, struct kobject *kobj,
struct hsmp_message msg = { 0 };
int ret;
- /* Do not support lseek(), reads entire metric table */
- if (count < bin_attr->size) {
- dev_err(plat_dev.dev, "Wrong buffer size\n");
+ if (!sock)
return -EINVAL;
- }
- if (!sock) {
- dev_err(plat_dev.dev, "Failed to read attribute private data\n");
+ /* Do not support lseek(), reads entire metric table */
+ if (count < bin_attr->size) {
+ dev_err(sock->dev, "Wrong buffer size\n");
return -EINVAL;
}
@@ -388,13 +607,13 @@ static int hsmp_get_tbl_dram_base(u16 sock_ind)
*/
dram_addr = msg.args[0] | ((u64)(msg.args[1]) << 32);
if (!dram_addr) {
- dev_err(plat_dev.dev, "Invalid DRAM address for metric table\n");
+ dev_err(sock->dev, "Invalid DRAM address for metric table\n");
return -ENOMEM;
}
- sock->metric_tbl_addr = devm_ioremap(plat_dev.dev, dram_addr,
+ sock->metric_tbl_addr = devm_ioremap(sock->dev, dram_addr,
sizeof(struct hsmp_metric_table));
if (!sock->metric_tbl_addr) {
- dev_err(plat_dev.dev, "Failed to ioremap metric table addr\n");
+ dev_err(sock->dev, "Failed to ioremap metric table addr\n");
return -ENOMEM;
}
return 0;
@@ -422,65 +641,91 @@ static int hsmp_init_metric_tbl_bin_attr(struct bin_attribute **hattrs, u16 sock
hattrs[0] = hattr;
if (plat_dev.proto_ver == HSMP_PROTO_VER6)
- return (hsmp_get_tbl_dram_base(sock_ind));
+ return hsmp_get_tbl_dram_base(sock_ind);
else
return 0;
}
-/* One bin sysfs for metrics table*/
+/* One bin sysfs for metrics table */
#define NUM_HSMP_ATTRS 1
-static int hsmp_create_sysfs_interface(void)
+static int hsmp_create_attr_list(struct attribute_group *attr_grp,
+ struct device *dev, u16 sock_ind)
{
- const struct attribute_group **hsmp_attr_grps;
struct bin_attribute **hsmp_bin_attrs;
+
+ /* Null terminated list of attributes */
+ hsmp_bin_attrs = devm_kcalloc(dev, NUM_HSMP_ATTRS + 1,
+ sizeof(*hsmp_bin_attrs),
+ GFP_KERNEL);
+ if (!hsmp_bin_attrs)
+ return -ENOMEM;
+
+ attr_grp->bin_attrs = hsmp_bin_attrs;
+
+ return hsmp_init_metric_tbl_bin_attr(hsmp_bin_attrs, sock_ind);
+}
+
+static int hsmp_create_non_acpi_sysfs_if(struct device *dev)
+{
+ const struct attribute_group **hsmp_attr_grps;
struct attribute_group *attr_grp;
- int ret;
u16 i;
- /* String formatting is currently limited to u8 sockets */
- if (WARN_ON(plat_dev.num_sockets > U8_MAX))
- return -ERANGE;
-
- hsmp_attr_grps = devm_kzalloc(plat_dev.dev, sizeof(struct attribute_group *) *
- (plat_dev.num_sockets + 1), GFP_KERNEL);
+ hsmp_attr_grps = devm_kcalloc(dev, plat_dev.num_sockets + 1,
+ sizeof(*hsmp_attr_grps),
+ GFP_KERNEL);
if (!hsmp_attr_grps)
return -ENOMEM;
/* Create a sysfs directory for each socket */
for (i = 0; i < plat_dev.num_sockets; i++) {
- attr_grp = devm_kzalloc(plat_dev.dev, sizeof(struct attribute_group), GFP_KERNEL);
+ attr_grp = devm_kzalloc(dev, sizeof(struct attribute_group),
+ GFP_KERNEL);
if (!attr_grp)
return -ENOMEM;
snprintf(plat_dev.sock[i].name, HSMP_ATTR_GRP_NAME_SIZE, "socket%u", (u8)i);
- attr_grp->name = plat_dev.sock[i].name;
-
- /* Null terminated list of attributes */
- hsmp_bin_attrs = devm_kzalloc(plat_dev.dev, sizeof(struct bin_attribute *) *
- (NUM_HSMP_ATTRS + 1), GFP_KERNEL);
- if (!hsmp_bin_attrs)
- return -ENOMEM;
-
- attr_grp->bin_attrs = hsmp_bin_attrs;
+ attr_grp->name = plat_dev.sock[i].name;
attr_grp->is_bin_visible = hsmp_is_sock_attr_visible;
hsmp_attr_grps[i] = attr_grp;
- /* Now create the leaf nodes */
- ret = hsmp_init_metric_tbl_bin_attr(hsmp_bin_attrs, i);
- if (ret)
- return ret;
+ hsmp_create_attr_list(attr_grp, dev, i);
}
- return devm_device_add_groups(plat_dev.dev, hsmp_attr_grps);
+
+ return devm_device_add_groups(dev, hsmp_attr_grps);
+}
+
+static int hsmp_create_acpi_sysfs_if(struct device *dev)
+{
+ struct attribute_group *attr_grp;
+ u16 sock_ind;
+ int ret;
+
+ attr_grp = devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
+ if (!attr_grp)
+ return -ENOMEM;
+
+ attr_grp->is_bin_visible = hsmp_is_sock_attr_visible;
+
+ ret = hsmp_get_uid(dev, &sock_ind);
+ if (ret)
+ return ret;
+
+ ret = hsmp_create_attr_list(attr_grp, dev, sock_ind);
+ if (ret)
+ return ret;
+
+ return devm_device_add_group(dev, attr_grp);
}
-static int hsmp_cache_proto_ver(void)
+static int hsmp_cache_proto_ver(u16 sock_ind)
{
struct hsmp_message msg = { 0 };
int ret;
msg.msg_id = HSMP_GET_PROTO_VER;
- msg.sock_ind = 0;
+ msg.sock_ind = sock_ind;
msg.response_sz = hsmp_msg_desc_table[HSMP_GET_PROTO_VER].response_sz;
ret = hsmp_send_message(&msg);
@@ -490,45 +735,150 @@ static int hsmp_cache_proto_ver(void)
return ret;
}
-static int hsmp_pltdrv_probe(struct platform_device *pdev)
+static inline bool is_f1a_m0h(void)
{
- int ret, i;
+ if (boot_cpu_data.x86 == 0x1A && boot_cpu_data.x86_model <= 0x0F)
+ return true;
- plat_dev.sock = devm_kzalloc(&pdev->dev,
- (plat_dev.num_sockets * sizeof(struct hsmp_socket)),
- GFP_KERNEL);
- if (!plat_dev.sock)
- return -ENOMEM;
- plat_dev.dev = &pdev->dev;
+ return false;
+}
+
+static int init_platform_device(struct device *dev)
+{
+ struct hsmp_socket *sock;
+ int ret, i;
for (i = 0; i < plat_dev.num_sockets; i++) {
- sema_init(&plat_dev.sock[i].hsmp_sem, 1);
- plat_dev.sock[i].sock_ind = i;
+ if (!node_to_amd_nb(i))
+ return -ENODEV;
+ sock = &plat_dev.sock[i];
+ sock->root = node_to_amd_nb(i)->root;
+ sock->sock_ind = i;
+ sock->dev = dev;
+ sock->mbinfo.base_addr = SMN_HSMP_BASE;
+
+ /*
+ * This is a transitional change from non-ACPI to ACPI, only
+ * family 0x1A, model 0x00 platform is supported for both ACPI and non-ACPI.
+ */
+ if (is_f1a_m0h())
+ sock->mbinfo.msg_id_off = SMN_HSMP_MSG_ID_F1A_M0H;
+ else
+ sock->mbinfo.msg_id_off = SMN_HSMP_MSG_ID;
+
+ sock->mbinfo.msg_resp_off = SMN_HSMP_MSG_RESP;
+ sock->mbinfo.msg_arg_off = SMN_HSMP_MSG_DATA;
+ sema_init(&sock->hsmp_sem, 1);
+
+ /* Test the hsmp interface on each socket */
+ ret = hsmp_test(i, 0xDEADBEEF);
+ if (ret) {
+ dev_err(dev, "HSMP test message failed on Fam:%x model:%x\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ dev_err(dev, "Is HSMP disabled in BIOS ?\n");
+ return ret;
+ }
}
- plat_dev.hsmp_device.name = HSMP_CDEV_NAME;
- plat_dev.hsmp_device.minor = MISC_DYNAMIC_MINOR;
- plat_dev.hsmp_device.fops = &hsmp_fops;
- plat_dev.hsmp_device.parent = &pdev->dev;
- plat_dev.hsmp_device.nodename = HSMP_DEVNODE_NAME;
- plat_dev.hsmp_device.mode = 0644;
+ return 0;
+}
+
+static const struct acpi_device_id amd_hsmp_acpi_ids[] = {
+ {ACPI_HSMP_DEVICE_HID, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, amd_hsmp_acpi_ids);
+
+static int hsmp_pltdrv_probe(struct platform_device *pdev)
+{
+ struct acpi_device *adev;
+ u16 sock_ind = 0;
+ int ret;
+
+ /*
+ * On ACPI supported BIOS, there is an ACPI HSMP device added for
+ * each socket, so the per socket probing, but the memory allocated for
+ * sockets should be contiguous to access it as an array,
+ * Hence allocate memory for all the sockets at once instead of allocating
+ * on each probe.
+ */
+ if (!plat_dev.is_probed) {
+ plat_dev.sock = devm_kcalloc(&pdev->dev, plat_dev.num_sockets,
+ sizeof(*plat_dev.sock),
+ GFP_KERNEL);
+ if (!plat_dev.sock)
+ return -ENOMEM;
+ }
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (adev && !acpi_match_device_ids(adev, amd_hsmp_acpi_ids)) {
+ ret = hsmp_get_uid(&pdev->dev, &sock_ind);
+ if (ret)
+ return ret;
+ if (sock_ind >= plat_dev.num_sockets)
+ return -EINVAL;
+ ret = hsmp_parse_acpi_table(&pdev->dev, sock_ind);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to parse ACPI table\n");
+ return ret;
+ }
+ /* Test the hsmp interface */
+ ret = hsmp_test(sock_ind, 0xDEADBEEF);
+ if (ret) {
+ dev_err(&pdev->dev, "HSMP test message failed on Fam:%x model:%x\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ dev_err(&pdev->dev, "Is HSMP disabled in BIOS ?\n");
+ return ret;
+ }
+ } else {
+ ret = init_platform_device(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init HSMP mailbox\n");
+ return ret;
+ }
+ }
- ret = hsmp_cache_proto_ver();
+ ret = hsmp_cache_proto_ver(sock_ind);
if (ret) {
- dev_err(plat_dev.dev, "Failed to read HSMP protocol version\n");
+ dev_err(&pdev->dev, "Failed to read HSMP protocol version\n");
return ret;
}
- ret = hsmp_create_sysfs_interface();
+ if (plat_dev.is_acpi_device)
+ ret = hsmp_create_acpi_sysfs_if(&pdev->dev);
+ else
+ ret = hsmp_create_non_acpi_sysfs_if(&pdev->dev);
if (ret)
- dev_err(plat_dev.dev, "Failed to create HSMP sysfs interface\n");
+ dev_err(&pdev->dev, "Failed to create HSMP sysfs interface\n");
+
+ if (!plat_dev.is_probed) {
+ plat_dev.hsmp_device.name = HSMP_CDEV_NAME;
+ plat_dev.hsmp_device.minor = MISC_DYNAMIC_MINOR;
+ plat_dev.hsmp_device.fops = &hsmp_fops;
+ plat_dev.hsmp_device.parent = &pdev->dev;
+ plat_dev.hsmp_device.nodename = HSMP_DEVNODE_NAME;
+ plat_dev.hsmp_device.mode = 0644;
+
+ ret = misc_register(&plat_dev.hsmp_device);
+ if (ret)
+ return ret;
+
+ plat_dev.is_probed = true;
+ }
+
+ return 0;
- return misc_register(&plat_dev.hsmp_device);
}
static void hsmp_pltdrv_remove(struct platform_device *pdev)
{
- misc_deregister(&plat_dev.hsmp_device);
+ /*
+ * We register only one misc_device even on multi socket system.
+ * So, deregister should happen only once.
+ */
+ if (plat_dev.is_probed) {
+ misc_deregister(&plat_dev.hsmp_device);
+ plat_dev.is_probed = false;
+ }
}
static struct platform_driver amd_hsmp_driver = {
@@ -536,15 +886,30 @@ static struct platform_driver amd_hsmp_driver = {
.remove_new = hsmp_pltdrv_remove,
.driver = {
.name = DRIVER_NAME,
+ .acpi_match_table = amd_hsmp_acpi_ids,
},
};
static struct platform_device *amd_hsmp_platdev;
+static int hsmp_plat_dev_register(void)
+{
+ int ret;
+
+ amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
+ if (!amd_hsmp_platdev)
+ return -ENOMEM;
+
+ ret = platform_device_add(amd_hsmp_platdev);
+ if (ret)
+ platform_device_put(amd_hsmp_platdev);
+
+ return ret;
+}
+
static int __init hsmp_plt_init(void)
{
int ret = -ENODEV;
- int i;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) {
pr_err("HSMP is not supported on Family:%x model:%x\n",
@@ -557,40 +922,19 @@ static int __init hsmp_plt_init(void)
* if we have N SMN/DF interfaces that ideally means N sockets
*/
plat_dev.num_sockets = amd_nb_num();
- if (plat_dev.num_sockets == 0)
+ if (plat_dev.num_sockets == 0 || plat_dev.num_sockets > MAX_AMD_SOCKETS)
return ret;
- /* Test the hsmp interface on each socket */
- for (i = 0; i < plat_dev.num_sockets; i++) {
- ret = hsmp_test(i, 0xDEADBEEF);
- if (ret) {
- pr_err("HSMP test message failed on Fam:%x model:%x\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
- pr_err("Is HSMP disabled in BIOS ?\n");
- return ret;
- }
- }
-
ret = platform_driver_register(&amd_hsmp_driver);
if (ret)
return ret;
- amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
- if (!amd_hsmp_platdev) {
- ret = -ENOMEM;
- goto drv_unregister;
- }
-
- ret = platform_device_add(amd_hsmp_platdev);
- if (ret) {
- platform_device_put(amd_hsmp_platdev);
- goto drv_unregister;
+ if (!plat_dev.is_acpi_device) {
+ ret = hsmp_plat_dev_register();
+ if (ret)
+ platform_driver_unregister(&amd_hsmp_driver);
}
- return 0;
-
-drv_unregister:
- platform_driver_unregister(&amd_hsmp_driver);
return ret;
}
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index f2eb07ef855a..d0cf46e2fc8e 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -90,12 +90,96 @@ out:
return err;
}
+static union acpi_object *apts_if_call(struct amd_pmf_dev *pdev, u32 state_index)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle ahandle = ACPI_HANDLE(pdev->dev);
+ struct acpi_object_list apts_if_arg_list;
+ union acpi_object apts_if_args[3];
+ acpi_status status;
+
+ apts_if_arg_list.count = 3;
+ apts_if_arg_list.pointer = &apts_if_args[0];
+
+ apts_if_args[0].type = ACPI_TYPE_INTEGER;
+ apts_if_args[0].integer.value = 1;
+ apts_if_args[1].type = ACPI_TYPE_INTEGER;
+ apts_if_args[1].integer.value = state_index;
+ apts_if_args[2].type = ACPI_TYPE_INTEGER;
+ apts_if_args[2].integer.value = 0;
+
+ status = acpi_evaluate_object(ahandle, "APTS", &apts_if_arg_list, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pdev->dev, "APTS state_idx:%u call failed\n", state_index);
+ kfree(buffer.pointer);
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+static int apts_if_call_store_buffer(struct amd_pmf_dev *pdev,
+ u32 index, void *data, size_t out_sz)
+{
+ union acpi_object *info;
+ size_t size;
+ int err = 0;
+
+ info = apts_if_call(pdev, index);
+ if (!info)
+ return -EIO;
+
+ if (info->type != ACPI_TYPE_BUFFER) {
+ dev_err(pdev->dev, "object is not a buffer\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ size = *(u16 *)info->buffer.pointer;
+ if (info->buffer.length < size) {
+ dev_err(pdev->dev, "buffer smaller than header size %u < %zu\n",
+ info->buffer.length, size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (size < out_sz) {
+ dev_err(pdev->dev, "buffer too small %zu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(data, info->buffer.pointer, out_sz);
+out:
+ kfree(info);
+ return err;
+}
+
int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index)
{
/* If bit-n is set, that indicates function n+1 is supported */
return !!(pdev->supported_func & BIT(index - 1));
}
+int apts_get_static_slider_granular_v2(struct amd_pmf_dev *pdev,
+ struct amd_pmf_apts_granular_output *data, u32 apts_idx)
+{
+ if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ return -EINVAL;
+
+ return apts_if_call_store_buffer(pdev, apts_idx, data, sizeof(*data));
+}
+
+int apmf_get_static_slider_granular_v2(struct amd_pmf_dev *pdev,
+ struct apmf_static_slider_granular_output_v2 *data)
+{
+ if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ return -EINVAL;
+
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR,
+ data, sizeof(*data));
+}
+
int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *data)
{
@@ -140,6 +224,43 @@ static void apmf_sbios_heartbeat_notify(struct work_struct *work)
kfree(info);
}
+int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag)
+{
+ struct sbios_hb_event_v2 args = { };
+ struct acpi_buffer params;
+ union acpi_object *info;
+
+ args.size = sizeof(args);
+
+ switch (flag) {
+ case ON_LOAD:
+ args.load = 1;
+ break;
+ case ON_UNLOAD:
+ args.unload = 1;
+ break;
+ case ON_SUSPEND:
+ args.suspend = 1;
+ break;
+ case ON_RESUME:
+ args.resume = 1;
+ break;
+ default:
+ dev_dbg(dev->dev, "Failed to send v2 heartbeat event, flag:0x%x\n", flag);
+ return -EINVAL;
+ }
+
+ params.length = sizeof(args);
+ params.pointer = &args;
+
+ info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2, &params);
+ if (!info)
+ return -EIO;
+
+ kfree(info);
+ return 0;
+}
+
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
{
union acpi_object *info;
@@ -166,6 +287,11 @@ int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data
return apmf_if_call_store_buffer(pdev, APMF_FUNC_AUTO_MODE, data, sizeof(*data));
}
+int apmf_get_sbios_requests_v2(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v2 *req)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS, req, sizeof(*req));
+}
+
int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req)
{
return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS,
@@ -218,8 +344,10 @@ static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
return err;
pdev->supported_func = output.supported_functions;
- dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x\n",
- output.supported_functions, output.notification_mask);
+ dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x version:%u\n",
+ output.supported_functions, output.notification_mask, output.version);
+
+ pdev->pmf_if_version = output.version;
return 0;
}
@@ -320,7 +448,7 @@ void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
{
acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
- if (pmf_dev->hb_interval)
+ if (pmf_dev->hb_interval && pmf_dev->pmf_if_version == PMF_IF_V1)
cancel_delayed_work_sync(&pmf_dev->heart_beat);
if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
@@ -344,7 +472,7 @@ int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
goto out;
}
- if (pmf_dev->hb_interval) {
+ if (pmf_dev->hb_interval && pmf_dev->pmf_if_version == PMF_IF_V1) {
/* send heartbeats only if the interval is not zero */
INIT_DELAYED_WORK(&pmf_dev->heart_beat, apmf_sbios_heartbeat_notify);
schedule_delayed_work(&pmf_dev->heart_beat, 0);
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index feaa09f5b35a..5d4f80698a8b 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -113,8 +113,9 @@ static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
{
dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
- debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
- &current_power_limits_fops);
+ if (dev->pmf_if_version == PMF_IF_V1)
+ debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
+ &current_power_limits_fops);
}
int amd_pmf_get_power_source(void)
@@ -296,7 +297,11 @@ static int amd_pmf_suspend_handler(struct device *dev)
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
- kfree(pdev->buf);
+ if (pdev->smart_pc_enabled)
+ cancel_delayed_work_sync(&pdev->pb_work);
+
+ if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_SUSPEND);
return 0;
}
@@ -312,6 +317,12 @@ static int amd_pmf_resume_handler(struct device *dev)
return ret;
}
+ if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_RESUME);
+
+ if (pdev->smart_pc_enabled)
+ schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
+
return 0;
}
@@ -330,9 +341,14 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
}
- if (!amd_pmf_init_smart_pc(dev)) {
+ amd_pmf_init_smart_pc(dev);
+ if (dev->smart_pc_enabled) {
dev_dbg(dev->dev, "Smart PC Solution Enabled\n");
- } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ /* If Smart PC is enabled, no need to check for other features */
+ return;
+ }
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_init_auto_mode(dev);
dev_dbg(dev->dev, "Auto Mode Init done\n");
} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
@@ -351,7 +367,7 @@ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
amd_pmf_deinit_sps(dev);
}
- if (!dev->smart_pc_enabled) {
+ if (dev->smart_pc_enabled) {
amd_pmf_deinit_smart_pc(dev);
} else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_deinit_auto_mode(dev);
@@ -434,6 +450,8 @@ static int amd_pmf_probe(struct platform_device *pdev)
amd_pmf_dbgfs_register(dev);
amd_pmf_init_features(dev);
apmf_install_handler(dev);
+ if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_LOAD);
dev_info(dev->dev, "registered PMF device successfully\n");
@@ -445,6 +463,8 @@ static void amd_pmf_remove(struct platform_device *pdev)
struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
amd_pmf_deinit_features(dev);
+ if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_UNLOAD);
apmf_acpi_deinit(dev);
amd_pmf_dbgfs_unregister(dev);
mutex_destroy(&dev->lock);
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 16999c5b334f..8c4df5753f40 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -17,7 +17,11 @@
#define POLICY_BUF_MAX_SZ 0x4b000
#define POLICY_SIGN_COOKIE 0x31535024
#define POLICY_COOKIE_OFFSET 0x10
-#define POLICY_COOKIE_LEN 0x14
+
+struct cookie_header {
+ u32 sign;
+ u32 length;
+} __packed;
/* APMF Functions */
#define APMF_FUNC_VERIFY_INTERFACE 0
@@ -30,6 +34,7 @@
#define APMF_FUNC_STATIC_SLIDER_GRANULAR 9
#define APMF_FUNC_DYN_SLIDER_AC 11
#define APMF_FUNC_DYN_SLIDER_DC 12
+#define APMF_FUNC_SBIOS_HEARTBEAT_V2 16
/* Message Definitions */
#define SET_SPL 0x03 /* SPL: Sustained Power Limit */
@@ -50,6 +55,8 @@
#define GET_STT_LIMIT_APU 0x20
#define GET_STT_LIMIT_HS2 0x21
#define SET_P3T 0x23 /* P3T: Peak Package Power Limit */
+#define SET_PMF_PPT 0x25
+#define SET_PMF_PPT_APU_ONLY 0x26
/* OS slider update notification */
#define DC_BEST_PERF 0
@@ -83,6 +90,47 @@
#define TA_OUTPUT_RESERVED_MEM 906
#define MAX_OPERATION_PARAMS 4
+#define PMF_IF_V1 1
+#define PMF_IF_V2 2
+
+#define APTS_MAX_STATES 16
+
+/* APTS PMF BIOS Interface */
+struct amd_pmf_apts_output {
+ u16 table_version;
+ u32 fan_table_idx;
+ u32 pmf_ppt;
+ u32 ppt_pmf_apu_only;
+ u32 stt_min_limit;
+ u8 stt_skin_temp_limit_apu;
+ u8 stt_skin_temp_limit_hs2;
+} __packed;
+
+struct amd_pmf_apts_granular_output {
+ u16 size;
+ struct amd_pmf_apts_output val;
+} __packed;
+
+struct amd_pmf_apts_granular {
+ u16 size;
+ struct amd_pmf_apts_output val[APTS_MAX_STATES];
+};
+
+struct sbios_hb_event_v2 {
+ u16 size;
+ u8 load;
+ u8 unload;
+ u8 suspend;
+ u8 resume;
+} __packed;
+
+enum sbios_hb_v2 {
+ ON_LOAD,
+ ON_UNLOAD,
+ ON_SUSPEND,
+ ON_RESUME,
+};
+
/* AMD PMF BIOS interfaces */
struct apmf_verify_interface {
u16 size;
@@ -114,6 +162,18 @@ struct apmf_sbios_req {
u8 skin_temp_hs2;
} __packed;
+struct apmf_sbios_req_v2 {
+ u16 size;
+ u32 pending_req;
+ u8 rsd;
+ u32 ppt_pmf;
+ u32 ppt_pmf_apu_only;
+ u32 stt_min_limit;
+ u8 skin_temp_apu;
+ u8 skin_temp_hs2;
+ u32 custom_policy[10];
+} __packed;
+
struct apmf_fan_idx {
u16 size;
u8 fan_ctl_mode;
@@ -194,6 +254,14 @@ enum power_modes {
POWER_MODE_MAX,
};
+enum power_modes_v2 {
+ POWER_MODE_BEST_PERFORMANCE,
+ POWER_MODE_BALANCED,
+ POWER_MODE_BEST_POWER_EFFICIENCY,
+ POWER_MODE_ENERGY_SAVE,
+ POWER_MODE_V2_MAX,
+};
+
struct amd_pmf_dev {
void __iomem *regbase;
void __iomem *smu_virt_addr;
@@ -229,10 +297,15 @@ struct amd_pmf_dev {
struct delayed_work pb_work;
struct pmf_action_table *prev_data;
u64 policy_addr;
- void *policy_base;
+ void __iomem *policy_base;
bool smart_pc_enabled;
+ u16 pmf_if_version;
};
+struct apmf_sps_prop_granular_v2 {
+ u8 power_states[POWER_SOURCE_MAX][POWER_MODE_V2_MAX];
+} __packed;
+
struct apmf_sps_prop_granular {
u32 fppt;
u32 sppt;
@@ -254,6 +327,16 @@ struct amd_pmf_static_slider_granular {
struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX];
};
+struct apmf_static_slider_granular_output_v2 {
+ u16 size;
+ struct apmf_sps_prop_granular_v2 sps_idx;
+} __packed;
+
+struct amd_pmf_static_slider_granular_v2 {
+ u16 size;
+ struct apmf_sps_prop_granular_v2 sps_idx;
+};
+
struct os_power_slider {
u16 size;
u8 slider_event;
@@ -441,11 +524,6 @@ struct apmf_dyn_slider_output {
struct apmf_cnqf_power_set ps[APMF_CNQF_MAX];
} __packed;
-enum smart_pc_status {
- PMF_SMART_PC_ENABLED,
- PMF_SMART_PC_DISABLED,
-};
-
/* Smart PC - TA internals */
enum system_state {
SYSTEM_STATE_S0i3,
@@ -590,6 +668,7 @@ int amd_pmf_get_power_source(void);
int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag);
int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer);
+int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag);
/* SPS Layer */
int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
@@ -607,6 +686,10 @@ const char *amd_pmf_source_as_str(unsigned int state);
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf);
+int apmf_get_static_slider_granular_v2(struct amd_pmf_dev *dev,
+ struct apmf_static_slider_granular_output_v2 *data);
+int apts_get_static_slider_granular_v2(struct amd_pmf_dev *pdev,
+ struct amd_pmf_apts_granular_output *data, u32 apts_idx);
/* Auto Mode Layer */
int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data);
@@ -614,6 +697,7 @@ void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev);
void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev);
void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms);
int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req);
+int apmf_get_sbios_requests_v2(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v2 *req);
void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event);
int amd_pmf_reset_amt(struct amd_pmf_dev *dev);
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index 33e23e25c8b1..92f7fb22277d 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -10,9 +10,27 @@
#include "pmf.h"
+static struct amd_pmf_static_slider_granular_v2 config_store_v2;
static struct amd_pmf_static_slider_granular config_store;
+static struct amd_pmf_apts_granular apts_config_store;
#ifdef CONFIG_AMD_PMF_DEBUG
+static const char *slider_v2_as_str(unsigned int state)
+{
+ switch (state) {
+ case POWER_MODE_BEST_PERFORMANCE:
+ return "Best Performance";
+ case POWER_MODE_BALANCED:
+ return "Balanced";
+ case POWER_MODE_BEST_POWER_EFFICIENCY:
+ return "Best Power Efficiency";
+ case POWER_MODE_ENERGY_SAVE:
+ return "Energy Save";
+ default:
+ return "Unknown Power Mode";
+ }
+}
+
static const char *slider_as_str(unsigned int state)
{
switch (state) {
@@ -63,10 +81,88 @@ static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *dat
pr_debug("Static Slider Data - END\n");
}
+
+static void amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 *data)
+{
+ unsigned int i, j;
+
+ pr_debug("Static Slider APTS state index data - BEGIN");
+ pr_debug("size: %u\n", data->size);
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++)
+ for (j = 0; j < POWER_MODE_V2_MAX; j++)
+ pr_debug("%s %s: %u\n", amd_pmf_source_as_str(i), slider_v2_as_str(j),
+ data->sps_idx.power_states[i][j]);
+
+ pr_debug("Static Slider APTS state index data - END\n");
+}
+
+static void amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular *info)
+{
+ int i;
+
+ pr_debug("Static Slider APTS index default values data - BEGIN");
+
+ for (i = 0; i < APTS_MAX_STATES; i++) {
+ pr_debug("Table Version[%d] = %u\n", i, info->val[i].table_version);
+ pr_debug("Fan Index[%d] = %u\n", i, info->val[i].fan_table_idx);
+ pr_debug("PPT[%d] = %u\n", i, info->val[i].pmf_ppt);
+ pr_debug("PPT APU[%d] = %u\n", i, info->val[i].ppt_pmf_apu_only);
+ pr_debug("STT Min[%d] = %u\n", i, info->val[i].stt_min_limit);
+ pr_debug("STT APU[%d] = %u\n", i, info->val[i].stt_skin_temp_limit_apu);
+ pr_debug("STT HS2[%d] = %u\n", i, info->val[i].stt_skin_temp_limit_hs2);
+ }
+
+ pr_debug("Static Slider APTS index default values data - END");
+}
#else
static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data) {}
+static void amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 *data) {}
+static void amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular *info) {}
#endif
+static void amd_pmf_load_apts_defaults_sps_v2(struct amd_pmf_dev *pdev)
+{
+ struct amd_pmf_apts_granular_output output;
+ struct amd_pmf_apts_output *ps;
+ int i;
+
+ memset(&apts_config_store, 0, sizeof(apts_config_store));
+
+ ps = apts_config_store.val;
+
+ for (i = 0; i < APTS_MAX_STATES; i++) {
+ apts_get_static_slider_granular_v2(pdev, &output, i);
+ ps[i].table_version = output.val.table_version;
+ ps[i].fan_table_idx = output.val.fan_table_idx;
+ ps[i].pmf_ppt = output.val.pmf_ppt;
+ ps[i].ppt_pmf_apu_only = output.val.ppt_pmf_apu_only;
+ ps[i].stt_min_limit = output.val.stt_min_limit;
+ ps[i].stt_skin_temp_limit_apu = output.val.stt_skin_temp_limit_apu;
+ ps[i].stt_skin_temp_limit_hs2 = output.val.stt_skin_temp_limit_hs2;
+ }
+
+ amd_pmf_dump_apts_sps_defaults(&apts_config_store);
+}
+
+static void amd_pmf_load_defaults_sps_v2(struct amd_pmf_dev *dev)
+{
+ struct apmf_static_slider_granular_output_v2 output;
+ unsigned int i, j;
+
+ memset(&config_store_v2, 0, sizeof(config_store_v2));
+ apmf_get_static_slider_granular_v2(dev, &output);
+
+ config_store_v2.size = output.size;
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++)
+ for (j = 0; j < POWER_MODE_V2_MAX; j++)
+ config_store_v2.sps_idx.power_states[i][j] =
+ output.sps_idx.power_states[i][j];
+
+ amd_pmf_dump_sps_defaults_v2(&config_store_v2);
+}
+
static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
{
struct apmf_static_slider_granular_output output;
@@ -94,6 +190,19 @@ static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
amd_pmf_dump_sps_defaults(&config_store);
}
+static void amd_pmf_update_slider_v2(struct amd_pmf_dev *dev, int idx)
+{
+ amd_pmf_send_cmd(dev, SET_PMF_PPT, false, apts_config_store.val[idx].pmf_ppt, NULL);
+ amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, false,
+ apts_config_store.val[idx].ppt_pmf_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ apts_config_store.val[idx].stt_min_limit, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ apts_config_store.val[idx].stt_skin_temp_limit_apu, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ apts_config_store.val[idx].stt_skin_temp_limit_hs2, NULL);
+}
+
void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
struct amd_pmf_static_slider_granular *table)
{
@@ -126,6 +235,32 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
}
}
+static int amd_pmf_update_sps_power_limits_v2(struct amd_pmf_dev *pdev, int pwr_mode)
+{
+ int src, index;
+
+ src = amd_pmf_get_power_source();
+
+ switch (pwr_mode) {
+ case POWER_MODE_PERFORMANCE:
+ index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BEST_PERFORMANCE];
+ amd_pmf_update_slider_v2(pdev, index);
+ break;
+ case POWER_MODE_BALANCED_POWER:
+ index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BALANCED];
+ amd_pmf_update_slider_v2(pdev, index);
+ break;
+ case POWER_MODE_POWER_SAVER:
+ index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BEST_POWER_EFFICIENCY];
+ amd_pmf_update_slider_v2(pdev, index);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
{
int mode;
@@ -134,6 +269,9 @@ int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
if (mode < 0)
return mode;
+ if (pmf->pmf_if_version == PMF_IF_V2)
+ return amd_pmf_update_sps_power_limits_v2(pmf, mode);
+
amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
return 0;
@@ -256,7 +394,12 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
dev->current_profile = PLATFORM_PROFILE_BALANCED;
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
- amd_pmf_load_defaults_sps(dev);
+ if (dev->pmf_if_version == PMF_IF_V2) {
+ amd_pmf_load_defaults_sps_v2(dev);
+ amd_pmf_load_apts_defaults_sps_v2(dev);
+ } else {
+ amd_pmf_load_defaults_sps(dev);
+ }
/* update SPS balanced power mode thermals */
amd_pmf_set_sps_power_limits(dev);
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index f8c0177afb0d..b438de4d6bfc 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -246,21 +246,28 @@ static void amd_pmf_invoke_cmd(struct work_struct *work)
static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
{
- u32 cookie, length;
+ struct cookie_header *header;
int res;
- cookie = readl(dev->policy_buf + POLICY_COOKIE_OFFSET);
- length = readl(dev->policy_buf + POLICY_COOKIE_LEN);
+ if (dev->policy_sz < POLICY_COOKIE_OFFSET + sizeof(*header))
+ return -EINVAL;
+
+ header = (struct cookie_header *)(dev->policy_buf + POLICY_COOKIE_OFFSET);
- if (cookie != POLICY_SIGN_COOKIE || !length)
+ if (header->sign != POLICY_SIGN_COOKIE || !header->length) {
+ dev_dbg(dev->dev, "cookie doesn't match\n");
+ return -EINVAL;
+ }
+
+ if (dev->policy_sz < header->length + 512)
return -EINVAL;
/* Update the actual length */
- dev->policy_sz = length + 512;
+ dev->policy_sz = header->length + 512;
res = amd_pmf_invoke_cmd_init(dev);
if (res == TA_PMF_TYPE_SUCCESS) {
/* Now its safe to announce that smart pc is enabled */
- dev->smart_pc_enabled = PMF_SMART_PC_ENABLED;
+ dev->smart_pc_enabled = true;
/*
* Start collecting the data from TA FW after a small delay
* or else, we might end up getting stale values.
@@ -268,8 +275,8 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3));
} else {
dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res);
- dev->smart_pc_enabled = PMF_SMART_PC_DISABLED;
- return res;
+ dev->smart_pc_enabled = false;
+ return -EIO;
}
return 0;
@@ -309,8 +316,8 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
amd_pmf_hex_dump_pb(dev);
ret = amd_pmf_start_policy_engine(dev);
- if (ret)
- return -EINVAL;
+ if (ret < 0)
+ return ret;
return length;
}
@@ -336,25 +343,6 @@ static void amd_pmf_remove_pb(struct amd_pmf_dev *dev) {}
static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev) {}
#endif
-static int amd_pmf_get_bios_buffer(struct amd_pmf_dev *dev)
-{
- dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
- if (!dev->policy_buf)
- return -ENOMEM;
-
- dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz);
- if (!dev->policy_base)
- return -ENOMEM;
-
- memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz);
-
- amd_pmf_hex_dump_pb(dev);
- if (pb_side_load)
- amd_pmf_open_pb(dev, dev->dbgfs_dir);
-
- return amd_pmf_start_policy_engine(dev);
-}
-
static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const void *data)
{
return ver->impl_id == TEE_IMPL_ID_AMDTEE;
@@ -453,22 +441,59 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
return ret;
INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd);
- amd_pmf_set_dram_addr(dev, true);
- amd_pmf_get_bios_buffer(dev);
+
+ ret = amd_pmf_set_dram_addr(dev, true);
+ if (ret)
+ goto error;
+
+ dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz);
+ if (!dev->policy_base) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
+ if (!dev->policy_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
+
+ amd_pmf_hex_dump_pb(dev);
+
dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
- if (!dev->prev_data)
- return -ENOMEM;
+ if (!dev->prev_data) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = amd_pmf_start_policy_engine(dev);
+ if (ret)
+ goto error;
+
+ if (pb_side_load)
+ amd_pmf_open_pb(dev, dev->dbgfs_dir);
+
+ return 0;
+
+error:
+ amd_pmf_deinit_smart_pc(dev);
- return dev->smart_pc_enabled;
+ return ret;
}
void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev)
{
- if (pb_side_load)
+ if (pb_side_load && dev->esbin)
amd_pmf_remove_pb(dev);
+ cancel_delayed_work_sync(&dev->pb_work);
kfree(dev->prev_data);
+ dev->prev_data = NULL;
kfree(dev->policy_buf);
- cancel_delayed_work_sync(&dev->pb_work);
+ dev->policy_buf = NULL;
+ kfree(dev->buf);
+ dev->buf = NULL;
amd_pmf_tee_deinit(dev);
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 18be35fdb381..3f07bbf809ef 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -101,13 +101,6 @@ module_param(fnlock_default, bool, 0444);
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
#define ASUS_ACPI_UID_ASUSWMI "ASUSWMI"
-#define ASUS_ACPI_UID_ATK "ATK"
-
-#define WMI_EVENT_QUEUE_SIZE 0x10
-#define WMI_EVENT_QUEUE_END 0x1
-#define WMI_EVENT_MASK 0xFFFF
-/* The WMI hotkey event value is always the same. */
-#define WMI_EVENT_VALUE_ATK 0xFF
#define WMI_EVENT_MASK 0xFFFF
@@ -219,7 +212,6 @@ struct asus_wmi {
int dsts_id;
int spec;
int sfun;
- bool wmi_event_queue;
struct input_dev *inputdev;
struct backlight_device *backlight_device;
@@ -489,7 +481,17 @@ static int asus_wmi_evaluate_method_agfn(const struct acpi_buffer args)
static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval)
{
- return asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval);
+ int err;
+
+ err = asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval);
+
+ if (err)
+ return err;
+
+ if (*retval == ~0)
+ return -ENODEV;
+
+ return 0;
}
static int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param,
@@ -1620,7 +1622,6 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MICMUTE_LED)) {
asus->micmute_led.name = "platform::micmute";
asus->micmute_led.max_brightness = 1;
- asus->micmute_led.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
asus->micmute_led.brightness_set_blocking = micmute_led_set;
asus->micmute_led.default_trigger = "audio-micmute";
@@ -4020,50 +4021,14 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
static void asus_wmi_notify(u32 value, void *context)
{
struct asus_wmi *asus = context;
- int code;
- int i;
-
- for (i = 0; i < WMI_EVENT_QUEUE_SIZE + 1; i++) {
- code = asus_wmi_get_event_code(value);
- if (code < 0) {
- pr_warn("Failed to get notify code: %d\n", code);
- return;
- }
-
- if (code == WMI_EVENT_QUEUE_END || code == WMI_EVENT_MASK)
- return;
+ int code = asus_wmi_get_event_code(value);
- asus_wmi_handle_event_code(code, asus);
-
- /*
- * Double check that queue is present:
- * ATK (with queue) uses 0xff, ASUSWMI (without) 0xd2.
- */
- if (!asus->wmi_event_queue || value != WMI_EVENT_VALUE_ATK)
- return;
- }
-
- pr_warn("Failed to process event queue, last code: 0x%x\n", code);
-}
-
-static int asus_wmi_notify_queue_flush(struct asus_wmi *asus)
-{
- int code;
- int i;
-
- for (i = 0; i < WMI_EVENT_QUEUE_SIZE + 1; i++) {
- code = asus_wmi_get_event_code(WMI_EVENT_VALUE_ATK);
- if (code < 0) {
- pr_warn("Failed to get event during flush: %d\n", code);
- return code;
- }
-
- if (code == WMI_EVENT_QUEUE_END || code == WMI_EVENT_MASK)
- return 0;
+ if (code < 0) {
+ pr_warn("Failed to get notify code: %d\n", code);
+ return;
}
- pr_warn("Failed to flush event queue\n");
- return -EIO;
+ asus_wmi_handle_event_code(code, asus);
}
/* Sysfs **********************************************************************/
@@ -4303,23 +4268,6 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
asus->dsts_id = ASUS_WMI_METHODID_DSTS;
}
- /*
- * Some devices can have multiple event codes stored in a queue before
- * the module load if it was unloaded intermittently after calling
- * the INIT method (enables event handling). The WMI notify handler is
- * expected to retrieve all event codes until a retrieved code equals
- * queue end marker (One or Ones). Old codes are flushed from the queue
- * upon module load. Not enabling this when it should be has minimal
- * visible impact so fall back if anything goes wrong.
- */
- wmi_uid = wmi_get_acpi_device_uid(asus->driver->event_guid);
- if (wmi_uid && !strcmp(wmi_uid, ASUS_ACPI_UID_ATK)) {
- dev_info(dev, "Detected ATK, enable event queue\n");
-
- if (!asus_wmi_notify_queue_flush(asus))
- asus->wmi_event_queue = true;
- }
-
/* CWAP allow to define the behavior of the Fn+F2 key,
* this method doesn't seems to be present on Eee PCs */
if (asus->driver->quirks->wapf >= 0)
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index e712df67fa6b..bd9f445974cc 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -57,8 +57,6 @@ config DELL_LAPTOP
select POWER_SUPPLY
select LEDS_CLASS
select NEW_LEDS
- select LEDS_TRIGGERS
- select LEDS_TRIGGER_AUDIO
help
This driver adds support for rfkill and backlight control to Dell
laptops (except for some models covered by the Compal driver).
@@ -165,7 +163,6 @@ config DELL_WMI
config DELL_WMI_PRIVACY
bool "Dell WMI Hardware Privacy Support"
- depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
depends on DELL_WMI
help
This option adds integration with the "Dell Hardware Privacy"
diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
index 6586438356de..42f7de2b4522 100644
--- a/drivers/platform/x86/dell/dell-laptop.c
+++ b/drivers/platform/x86/dell/dell-laptop.c
@@ -2252,7 +2252,6 @@ static int __init dell_init(void)
if (dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE) &&
dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE) &&
!dell_privacy_has_mic_mute()) {
- micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev);
if (ret < 0)
goto fail_led;
@@ -2261,7 +2260,6 @@ static int __init dell_init(void)
if (dell_smbios_find_token(GLOBAL_MUTE_DISABLE) &&
dell_smbios_find_token(GLOBAL_MUTE_ENABLE)) {
- mute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MUTE);
ret = led_classdev_register(&platform_device->dev, &mute_led_cdev);
if (ret < 0)
goto fail_backlight;
diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c
index db1e9240dd02..0b2299f7a2de 100644
--- a/drivers/platform/x86/dell/dell-wmi-ddv.c
+++ b/drivers/platform/x86/dell/dell-wmi-ddv.c
@@ -882,6 +882,7 @@ static struct wmi_driver dell_wmi_ddv_driver = {
},
.id_table = dell_wmi_ddv_id_table,
.probe = dell_wmi_ddv_probe,
+ .no_singleton = true,
};
module_wmi_driver(dell_wmi_ddv_driver);
diff --git a/drivers/platform/x86/dell/dell-wmi-privacy.c b/drivers/platform/x86/dell/dell-wmi-privacy.c
index c517bd45dd32..4b65e1655d42 100644
--- a/drivers/platform/x86/dell/dell-wmi-privacy.c
+++ b/drivers/platform/x86/dell/dell-wmi-privacy.c
@@ -288,7 +288,6 @@ static int dell_privacy_leds_setup(struct device *dev)
priv->cdev.max_brightness = 1;
priv->cdev.brightness_set_blocking = dell_privacy_micmute_led_set;
priv->cdev.default_trigger = "audio-micmute";
- priv->cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
return devm_led_classdev_register(dev, &priv->cdev);
}
@@ -298,10 +297,6 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
struct key_entry *keymap;
int ret, i, j;
- ret = wmi_has_guid(DELL_PRIVACY_GUID);
- if (!ret)
- pr_debug("Unable to detect available Dell privacy devices!\n");
-
priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
index b929b4f82420..9def7983d7d6 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -25,7 +25,7 @@ struct wmi_sysman_priv wmi_priv = {
/* reset bios to defaults */
static const char * const reset_types[] = {"builtinsafe", "lastknowngood", "factory", "custom"};
static int reset_option = -1;
-static struct class *fw_attr_class;
+static const struct class *fw_attr_class;
/**
diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c
index fafe8eaf6e3e..dd8240009565 100644
--- a/drivers/platform/x86/firmware_attributes_class.c
+++ b/drivers/platform/x86/firmware_attributes_class.c
@@ -10,11 +10,11 @@
static DEFINE_MUTEX(fw_attr_lock);
static int fw_attr_inuse;
-static struct class firmware_attributes_class = {
+static const struct class firmware_attributes_class = {
.name = "firmware-attributes",
};
-int fw_attributes_class_get(struct class **fw_attr_class)
+int fw_attributes_class_get(const struct class **fw_attr_class)
{
int err;
diff --git a/drivers/platform/x86/firmware_attributes_class.h b/drivers/platform/x86/firmware_attributes_class.h
index 486485cb1f54..363c75f1ac1b 100644
--- a/drivers/platform/x86/firmware_attributes_class.h
+++ b/drivers/platform/x86/firmware_attributes_class.h
@@ -5,7 +5,7 @@
#ifndef FW_ATTR_CLASS_H
#define FW_ATTR_CLASS_H
-int fw_attributes_class_get(struct class **fw_attr_class);
+int fw_attributes_class_get(const struct class **fw_attr_class);
int fw_attributes_class_put(void);
#endif /* FW_ATTR_CLASS_H */
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 085e044e888e..94480af49467 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -49,6 +49,8 @@
#include <linux/kfifo.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <acpi/battery.h>
#include <acpi/video.h>
#define FUJITSU_DRIVER_VERSION "0.6.0"
@@ -97,6 +99,10 @@
#define BACKLIGHT_OFF (BIT(0) | BIT(1))
#define BACKLIGHT_ON 0
+/* FUNC interface - battery control interface */
+#define FUNC_S006_METHOD 0x1006
+#define CHARGE_CONTROL_RW 0x21
+
/* Scancodes read from the GIRB register */
#define KEY1_CODE 0x410
#define KEY2_CODE 0x411
@@ -132,6 +138,7 @@ struct fujitsu_laptop {
spinlock_t fifo_lock;
int flags_supported;
int flags_state;
+ bool charge_control_supported;
};
static struct acpi_device *fext;
@@ -164,6 +171,110 @@ static int call_fext_func(struct acpi_device *device,
return value;
}
+/* Battery charge control code */
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int cc_end_value, s006_cc_return;
+ int value, ret;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value < 50 || value > 100)
+ return -EINVAL;
+
+ cc_end_value = value * 0x100 + 0x20;
+ s006_cc_return = call_fext_func(fext, FUNC_S006_METHOD,
+ CHARGE_CONTROL_RW, cc_end_value, 0x0);
+ if (s006_cc_return < 0)
+ return s006_cc_return;
+ /*
+ * The S006 0x21 method returns 0x00 in case the provided value
+ * is invalid.
+ */
+ if (s006_cc_return == 0x00)
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t charge_control_end_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int status;
+
+ status = call_fext_func(fext, FUNC_S006_METHOD,
+ CHARGE_CONTROL_RW, 0x21, 0x0);
+ if (status < 0)
+ return status;
+
+ return sysfs_emit(buf, "%d\n", status);
+}
+
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+
+/* ACPI battery hook */
+static int fujitsu_battery_add_hook(struct power_supply *battery,
+ struct acpi_battery_hook *hook)
+{
+ return device_create_file(&battery->dev,
+ &dev_attr_charge_control_end_threshold);
+}
+
+static int fujitsu_battery_remove_hook(struct power_supply *battery,
+ struct acpi_battery_hook *hook)
+{
+ device_remove_file(&battery->dev,
+ &dev_attr_charge_control_end_threshold);
+
+ return 0;
+}
+
+static struct acpi_battery_hook battery_hook = {
+ .add_battery = fujitsu_battery_add_hook,
+ .remove_battery = fujitsu_battery_remove_hook,
+ .name = "Fujitsu Battery Extension",
+};
+
+/*
+ * These functions are intended to be called from acpi_fujitsu_laptop_add and
+ * acpi_fujitsu_laptop_remove.
+ */
+static int fujitsu_battery_charge_control_add(struct acpi_device *device)
+{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
+ int s006_cc_return;
+
+ priv->charge_control_supported = false;
+ /*
+ * Check if the S006 0x21 method exists by trying to get the current
+ * battery charge limit.
+ */
+ s006_cc_return = call_fext_func(fext, FUNC_S006_METHOD,
+ CHARGE_CONTROL_RW, 0x21, 0x0);
+ if (s006_cc_return < 0)
+ return s006_cc_return;
+ if (s006_cc_return == UNSUPPORTED_CMD)
+ return -ENODEV;
+
+ priv->charge_control_supported = true;
+ battery_hook_register(&battery_hook);
+
+ return 0;
+}
+
+static void fujitsu_battery_charge_control_remove(struct acpi_device *device)
+{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
+
+ if (priv->charge_control_supported)
+ battery_hook_unregister(&battery_hook);
+}
+
/* Hardware access for LCD brightness control */
static int set_lcd_level(struct acpi_device *device, int level)
@@ -839,6 +950,10 @@ static int acpi_fujitsu_laptop_add(struct acpi_device *device)
if (ret)
goto err_free_fifo;
+ ret = fujitsu_battery_charge_control_add(device);
+ if (ret < 0)
+ pr_warn("Unable to register battery charge control: %d\n", ret);
+
return 0;
err_free_fifo:
@@ -851,6 +966,8 @@ static void acpi_fujitsu_laptop_remove(struct acpi_device *device)
{
struct fujitsu_laptop *priv = acpi_driver_data(device);
+ fujitsu_battery_charge_control_remove(device);
+
fujitsu_laptop_platform_remove(device);
kfifo_free(&priv->fifo);
diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
index 8c9f4f3227fc..2dc50152158a 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
@@ -24,7 +24,7 @@ struct bioscfg_priv bioscfg_drv = {
.mutex = __MUTEX_INITIALIZER(bioscfg_drv.mutex),
};
-static struct class *fw_attr_class;
+static const struct class *fw_attr_class;
ssize_t display_name_language_code_show(struct kobject *kobj,
struct kobj_attribute *attr,
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index e536604225c5..630519c08617 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -29,15 +29,19 @@
#include <linux/dmi.h>
MODULE_AUTHOR("Matthew Garrett <mjg59@srcf.ucam.org>");
-MODULE_DESCRIPTION("HP laptop WMI hotkeys driver");
+MODULE_DESCRIPTION("HP laptop WMI driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
-MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
+MODULE_ALIAS("wmi:5FB7F034-2C63-45E9-BE91-3D44E2C707E4");
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
-#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
+#define HPWMI_BIOS_GUID "5FB7F034-2C63-45E9-BE91-3D44E2C707E4"
+
+#define HP_OMEN_EC_THERMAL_PROFILE_FLAGS_OFFSET 0x62
+#define HP_OMEN_EC_THERMAL_PROFILE_TIMER_OFFSET 0x63
#define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
+
#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required
/* DMI board names of devices that should use the omen specific path for
@@ -55,17 +59,25 @@ static const char * const omen_thermal_profile_boards[] = {
"874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C",
"88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD",
"88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912",
- "8917", "8918", "8949", "894A", "89EB"
+ "8917", "8918", "8949", "894A", "89EB", "8BAD", "8A42"
};
/* DMI Board names of Omen laptops that are specifically set to be thermal
* profile version 0 by the Omen Command Center app, regardless of what
* the get system design information WMI call returns
*/
-static const char *const omen_thermal_profile_force_v0_boards[] = {
+static const char * const omen_thermal_profile_force_v0_boards[] = {
"8607", "8746", "8747", "8749", "874A", "8748"
};
+/* DMI board names of Omen laptops that have a thermal profile timer which will
+ * cause the embedded controller to set the thermal profile back to
+ * "balanced" when reaching zero.
+ */
+static const char * const omen_timed_thermal_profile_boards[] = {
+ "8BAD", "8A42"
+};
+
/* DMI Board names of Victus laptops */
static const char * const victus_thermal_profile_boards[] = {
"8A25"
@@ -182,6 +194,12 @@ enum hp_thermal_profile_omen_v1 {
HP_OMEN_V1_THERMAL_PROFILE_COOL = 0x50,
};
+enum hp_thermal_profile_omen_flags {
+ HP_OMEN_EC_FLAGS_TURBO = 0x04,
+ HP_OMEN_EC_FLAGS_NOTIMER = 0x02,
+ HP_OMEN_EC_FLAGS_JUSTSET = 0x01,
+};
+
enum hp_thermal_profile_victus {
HP_VICTUS_THERMAL_PROFILE_DEFAULT = 0x00,
HP_VICTUS_THERMAL_PROFILE_PERFORMANCE = 0x01,
@@ -449,7 +467,11 @@ static int hp_wmi_get_tablet_mode(void)
static int omen_thermal_profile_set(int mode)
{
- char buffer[2] = {0, mode};
+ /* The Omen Control Center actively sets the first byte of the buffer to
+ * 255, so let's mimic this behaviour to be as close as possible to
+ * the original software.
+ */
+ char buffer[2] = {-1, mode};
int ret;
ret = hp_wmi_perform_query(HPWMI_SET_PERFORMANCE_MODE, HPWMI_GM,
@@ -1201,10 +1223,33 @@ static int platform_profile_omen_get(struct platform_profile_handler *pprof,
return 0;
}
+static bool has_omen_thermal_profile_ec_timer(void)
+{
+ const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
+
+ if (!board_name)
+ return false;
+
+ return match_string(omen_timed_thermal_profile_boards,
+ ARRAY_SIZE(omen_timed_thermal_profile_boards),
+ board_name) >= 0;
+}
+
+inline int omen_thermal_profile_ec_flags_set(enum hp_thermal_profile_omen_flags flags)
+{
+ return ec_write(HP_OMEN_EC_THERMAL_PROFILE_FLAGS_OFFSET, flags);
+}
+
+inline int omen_thermal_profile_ec_timer_set(u8 value)
+{
+ return ec_write(HP_OMEN_EC_THERMAL_PROFILE_TIMER_OFFSET, value);
+}
+
static int platform_profile_omen_set(struct platform_profile_handler *pprof,
enum platform_profile_option profile)
{
int err, tp, tp_version;
+ enum hp_thermal_profile_omen_flags flags = 0;
tp_version = omen_get_thermal_policy_version();
@@ -1238,6 +1283,20 @@ static int platform_profile_omen_set(struct platform_profile_handler *pprof,
if (err < 0)
return err;
+ if (has_omen_thermal_profile_ec_timer()) {
+ err = omen_thermal_profile_ec_timer_set(0);
+ if (err < 0)
+ return err;
+
+ if (profile == PLATFORM_PROFILE_PERFORMANCE)
+ flags = HP_OMEN_EC_FLAGS_NOTIMER |
+ HP_OMEN_EC_FLAGS_TURBO;
+
+ err = omen_thermal_profile_ec_flags_set(flags);
+ if (err < 0)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
index 0ef1c46b617b..dde139c69945 100644
--- a/drivers/platform/x86/huawei-wmi.c
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -310,7 +310,6 @@ static void huawei_wmi_leds_setup(struct device *dev)
huawei->cdev.max_brightness = 1;
huawei->cdev.brightness_set_blocking = &huawei_wmi_micmute_led_set;
huawei->cdev.default_trigger = "audio-micmute";
- huawei->cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
huawei->cdev.dev = dev;
huawei->cdev.flags = LED_CORE_SUSPENDRESUME;
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 2ab7d9ac542d..1d4bbae115f1 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -179,7 +179,7 @@ static ssize_t rtl_set_state(struct device *dev,
return ret;
}
-static struct bus_type rtl_subsys = {
+static const struct bus_type rtl_subsys = {
.name = "ibm_rtl",
.dev_name = "ibm_rtl",
};
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 88eefccb6ed2..901849810ce2 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1091,6 +1091,8 @@ static const struct key_entry ideapad_keymap[] = {
{ KE_KEY, 0x07 | IDEAPAD_WMI_KEY, { KEY_HELP } },
{ KE_KEY, 0x0e | IDEAPAD_WMI_KEY, { KEY_PICKUP_PHONE } },
{ KE_KEY, 0x0f | IDEAPAD_WMI_KEY, { KEY_HANGUP_PHONE } },
+ /* Refresh Rate Toggle (Fn+R) */
+ { KE_KEY, 0x10 | IDEAPAD_WMI_KEY, { KEY_REFRESH_RATE_TOGGLE } },
/* Dark mode toggle */
{ KE_KEY, 0x13 | IDEAPAD_WMI_KEY, { KEY_PROG1 } },
/* Sound profile switch */
@@ -1100,7 +1102,7 @@ static const struct key_entry ideapad_keymap[] = {
/* Lenovo Support */
{ KE_KEY, 0x27 | IDEAPAD_WMI_KEY, { KEY_HELP } },
/* Refresh Rate Toggle */
- { KE_KEY, 0x0a | IDEAPAD_WMI_KEY, { KEY_DISPLAYTOGGLE } },
+ { KE_KEY, 0x0a | IDEAPAD_WMI_KEY, { KEY_REFRESH_RATE_TOGGLE } },
{ KE_END },
};
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
index 2cf3b4a8813f..584c44387e10 100644
--- a/drivers/platform/x86/intel/ifs/load.c
+++ b/drivers/platform/x86/intel/ifs/load.c
@@ -383,7 +383,7 @@ int ifs_load_firmware(struct device *dev)
unsigned int expected_size;
const struct firmware *fw;
char scan_path[64];
- int ret = -EINVAL;
+ int ret;
snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.scan",
test->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model,
diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
index 13ecd55c6668..95b4b71fab53 100644
--- a/drivers/platform/x86/intel/ifs/runtest.c
+++ b/drivers/platform/x86/intel/ifs/runtest.c
@@ -23,6 +23,12 @@
/* Max retries on the same chunk */
#define MAX_IFS_RETRIES 5
+struct run_params {
+ struct ifs_data *ifsd;
+ union ifs_scan *activate;
+ union ifs_status status;
+};
+
/*
* Number of TSC cycles that a logical CPU will wait for the other
* logical CPU on the core in the WRMSR(ACTIVATE_SCAN).
@@ -134,19 +140,56 @@ static bool can_restart(union ifs_status status)
return false;
}
+#define SPINUNIT 100 /* 100 nsec */
+static atomic_t array_cpus_in;
+static atomic_t scan_cpus_in;
+
+/*
+ * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus()
+ */
+static void wait_for_sibling_cpu(atomic_t *t, long long timeout)
+{
+ int cpu = smp_processor_id();
+ const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+ int all_cpus = cpumask_weight(smt_mask);
+
+ atomic_inc(t);
+ while (atomic_read(t) < all_cpus) {
+ if (timeout < SPINUNIT)
+ return;
+ ndelay(SPINUNIT);
+ timeout -= SPINUNIT;
+ touch_nmi_watchdog();
+ }
+}
+
/*
* Execute the scan. Called "simultaneously" on all threads of a core
* at high priority using the stop_cpus mechanism.
*/
static int doscan(void *data)
{
- int cpu = smp_processor_id();
- u64 *msrs = data;
+ int cpu = smp_processor_id(), start, stop;
+ struct run_params *params = data;
+ union ifs_status status;
+ struct ifs_data *ifsd;
int first;
+ ifsd = params->ifsd;
+
+ if (ifsd->generation) {
+ start = params->activate->gen2.start;
+ stop = params->activate->gen2.stop;
+ } else {
+ start = params->activate->gen0.start;
+ stop = params->activate->gen0.stop;
+ }
+
/* Only the first logical CPU on a core reports result */
first = cpumask_first(cpu_smt_mask(cpu));
+ wait_for_sibling_cpu(&scan_cpus_in, NSEC_PER_SEC);
+
/*
* This WRMSR will wait for other HT threads to also write
* to this MSR (at most for activate.delay cycles). Then it
@@ -155,12 +198,14 @@ static int doscan(void *data)
* take up to 200 milliseconds (in the case where all chunks
* are processed in a single pass) before it retires.
*/
- wrmsrl(MSR_ACTIVATE_SCAN, msrs[0]);
+ wrmsrl(MSR_ACTIVATE_SCAN, params->activate->data);
+ rdmsrl(MSR_SCAN_STATUS, status.data);
- if (cpu == first) {
- /* Pass back the result of the scan */
- rdmsrl(MSR_SCAN_STATUS, msrs[1]);
- }
+ trace_ifs_status(ifsd->cur_batch, start, stop, status.data);
+
+ /* Pass back the result of the scan */
+ if (cpu == first)
+ params->status = status;
return 0;
}
@@ -179,7 +224,7 @@ static void ifs_test_core(int cpu, struct device *dev)
struct ifs_data *ifsd;
int to_start, to_stop;
int status_chunk;
- u64 msrvals[2];
+ struct run_params params;
int retries;
ifsd = ifs_get_data(dev);
@@ -190,6 +235,8 @@ static void ifs_test_core(int cpu, struct device *dev)
to_start = 0;
to_stop = ifsd->valid_chunks - 1;
+ params.ifsd = ifs_get_data(dev);
+
if (ifsd->generation) {
activate.gen2.start = to_start;
activate.gen2.stop = to_stop;
@@ -207,12 +254,11 @@ static void ifs_test_core(int cpu, struct device *dev)
break;
}
- msrvals[0] = activate.data;
- stop_core_cpuslocked(cpu, doscan, msrvals);
-
- status.data = msrvals[1];
+ params.activate = &activate;
+ atomic_set(&scan_cpus_in, 0);
+ stop_core_cpuslocked(cpu, doscan, &params);
- trace_ifs_status(cpu, to_start, to_stop, status.data);
+ status = params.status;
/* Some cases can be retried, give up for others */
if (!can_restart(status))
@@ -250,34 +296,14 @@ static void ifs_test_core(int cpu, struct device *dev)
}
}
-#define SPINUNIT 100 /* 100 nsec */
-static atomic_t array_cpus_out;
-
-/*
- * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus()
- */
-static void wait_for_sibling_cpu(atomic_t *t, long long timeout)
-{
- int cpu = smp_processor_id();
- const struct cpumask *smt_mask = cpu_smt_mask(cpu);
- int all_cpus = cpumask_weight(smt_mask);
-
- atomic_inc(t);
- while (atomic_read(t) < all_cpus) {
- if (timeout < SPINUNIT)
- return;
- ndelay(SPINUNIT);
- timeout -= SPINUNIT;
- touch_nmi_watchdog();
- }
-}
-
static int do_array_test(void *data)
{
union ifs_array *command = data;
int cpu = smp_processor_id();
int first;
+ wait_for_sibling_cpu(&array_cpus_in, NSEC_PER_SEC);
+
/*
* Only one logical CPU on a core needs to trigger the Array test via MSR write.
*/
@@ -289,9 +315,6 @@ static int do_array_test(void *data)
rdmsrl(MSR_ARRAY_BIST, command->data);
}
- /* Tests complete faster if the sibling is spinning here */
- wait_for_sibling_cpu(&array_cpus_out, NSEC_PER_SEC);
-
return 0;
}
@@ -312,7 +335,7 @@ static void ifs_array_test_core(int cpu, struct device *dev)
timed_out = true;
break;
}
- atomic_set(&array_cpus_out, 0);
+ atomic_set(&array_cpus_in, 0);
stop_core_cpuslocked(cpu, do_array_test, &command);
if (command.ctrl_result)
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index b6708bab7c53..527d8fbc7cc1 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -196,7 +196,7 @@ static int int0002_probe(struct platform_device *pdev)
* IRQs into gpiolib.
*/
ret = devm_request_irq(dev, irq, int0002_irq,
- IRQF_SHARED, "INT0002", chip);
+ IRQF_ONESHOT | IRQF_SHARED, "INT0002", chip);
if (ret) {
dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret);
return ret;
diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c
index 683ae828276b..34b4cd23bfe5 100644
--- a/drivers/platform/x86/intel/pmc/arl.c
+++ b/drivers/platform/x86/intel/pmc/arl.c
@@ -673,6 +673,7 @@ static struct pmc_info arl_pmc_info_list[] = {
};
#define ARL_NPU_PCI_DEV 0xad1d
+#define ARL_GNA_PCI_DEV 0xae4c
/*
* Set power state of select devices that do not have drivers to D3
* so that they do not block Package C entry.
@@ -680,6 +681,7 @@ static struct pmc_info arl_pmc_info_list[] = {
static void arl_d3_fixup(void)
{
pmc_core_set_device_d3(ARL_NPU_PCI_DEV);
+ pmc_core_set_device_d3(ARL_GNA_PCI_DEV);
}
static int arl_resume(struct pmc_dev *pmcdev)
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 8f9c036809c7..10c96c1a850a 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -1389,6 +1389,15 @@ static int pmc_core_probe(struct platform_device *pdev)
return -ENOMEM;
pmcdev->pmcs[PMC_IDX_MAIN] = primary_pmc;
+ /* The last element in msr_map is empty */
+ pmcdev->num_of_pkgc = ARRAY_SIZE(msr_map) - 1;
+ pmcdev->pkgc_res_cnt = devm_kcalloc(&pdev->dev,
+ pmcdev->num_of_pkgc,
+ sizeof(*pmcdev->pkgc_res_cnt),
+ GFP_KERNEL);
+ if (!pmcdev->pkgc_res_cnt)
+ return -ENOMEM;
+
/*
* Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
* Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
@@ -1432,6 +1441,7 @@ static __maybe_unused int pmc_core_suspend(struct device *dev)
{
struct pmc_dev *pmcdev = dev_get_drvdata(dev);
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ unsigned int i;
if (pmcdev->suspend)
pmcdev->suspend(pmcdev);
@@ -1440,9 +1450,11 @@ static __maybe_unused int pmc_core_suspend(struct device *dev)
if (pm_suspend_via_firmware())
return 0;
- /* Save PC10 residency for checking later */
- if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
- return -EIO;
+ /* Save PKGC residency for checking later */
+ for (i = 0; i < pmcdev->num_of_pkgc; i++) {
+ if (rdmsrl_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i]))
+ return -EIO;
+ }
/* Save S0ix residency for checking later */
if (pmc_core_dev_state_get(pmc, &pmcdev->s0ix_counter))
@@ -1451,14 +1463,15 @@ static __maybe_unused int pmc_core_suspend(struct device *dev)
return 0;
}
-static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
+static inline bool pmc_core_is_deepest_pkgc_failed(struct pmc_dev *pmcdev)
{
- u64 pc10_counter;
+ u32 deepest_pkgc_msr = msr_map[pmcdev->num_of_pkgc - 1].bit_mask;
+ u64 deepest_pkgc_residency;
- if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
+ if (rdmsrl_safe(deepest_pkgc_msr, &deepest_pkgc_residency))
return false;
- if (pc10_counter == pmcdev->pc10_counter)
+ if (deepest_pkgc_residency == pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1])
return true;
return false;
@@ -1497,10 +1510,22 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev)
if (!warn_on_s0ix_failures)
return 0;
- if (pmc_core_is_pc10_failed(pmcdev)) {
- /* S0ix failed because of PC10 entry failure */
- dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
- pmcdev->pc10_counter);
+ if (pmc_core_is_deepest_pkgc_failed(pmcdev)) {
+ /* S0ix failed because of deepest PKGC entry failure */
+ dev_info(dev, "CPU did not enter %s!!! (%s cnt=0x%llx)\n",
+ msr_map[pmcdev->num_of_pkgc - 1].name,
+ msr_map[pmcdev->num_of_pkgc - 1].name,
+ pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1]);
+
+ for (i = 0; i < pmcdev->num_of_pkgc; i++) {
+ u64 pc_cnt;
+
+ if (!rdmsrl_safe(msr_map[i].bit_mask, &pc_cnt)) {
+ dev_info(dev, "Prev %s cnt = 0x%llx, Current %s cnt = 0x%llx\n",
+ msr_map[i].name, pmcdev->pkgc_res_cnt[i],
+ msr_map[i].name, pc_cnt);
+ }
+ }
return 0;
}
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index 54137faaae2b..83504c49a0e3 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -385,7 +385,8 @@ struct pmc {
* @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers
* used to read MPHY PG and PLL status are available
* @mutex_lock: mutex to complete one transcation
- * @pc10_counter: PC10 residency counter
+ * @pkgc_res_cnt: Array of PKGC residency counters
+ * @num_of_pkgc: Number of PKGC
* @s0ix_counter: S0ix residency (step adjusted)
* @num_lpm_modes: Count of enabled modes
* @lpm_en_modes: Array of enabled modes from lowest to highest priority
@@ -403,13 +404,15 @@ struct pmc_dev {
int pmc_xram_read_bit;
struct mutex lock; /* generic mutex lock for PMC Core */
- u64 pc10_counter;
u64 s0ix_counter;
int num_lpm_modes;
int lpm_en_modes[LPM_MAX_NUM_MODES];
void (*suspend)(struct pmc_dev *pmcdev);
int (*resume)(struct pmc_dev *pmcdev);
+ u64 *pkgc_res_cnt;
+ u8 num_of_pkgc;
+
bool has_die_c6;
u32 die_c6_offset;
struct telem_endpoint *punit_ep;
diff --git a/drivers/platform/x86/intel/pmc/lnl.c b/drivers/platform/x86/intel/pmc/lnl.c
index abad17cdd3d7..068d72504683 100644
--- a/drivers/platform/x86/intel/pmc/lnl.c
+++ b/drivers/platform/x86/intel/pmc/lnl.c
@@ -13,21 +13,6 @@
#include "core.h"
-#define SOCM_LPM_REQ_GUID 0x11594920
-
-#define PMC_DEVID_SOCM 0xa87f
-
-static const u8 LNL_LPM_REG_INDEX[] = {0, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20};
-
-static struct pmc_info lnl_pmc_info_list[] = {
- {
- .guid = SOCM_LPM_REQ_GUID,
- .devid = PMC_DEVID_SOCM,
- .map = &lnl_socm_reg_map,
- },
- {}
-};
-
const struct pmc_bit_map lnl_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
@@ -490,7 +475,6 @@ const struct pmc_reg_map lnl_socm_reg_map = {
.lpm_sts = lnl_lpm_maps,
.lpm_status_offset = MTL_LPM_STATUS_OFFSET,
.lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
- .lpm_reg_index = LNL_LPM_REG_INDEX,
};
#define LNL_NPU_PCI_DEV 0x643e
@@ -517,33 +501,19 @@ static int lnl_resume(struct pmc_dev *pmcdev)
int lnl_core_init(struct pmc_dev *pmcdev)
{
int ret;
- int func = 2;
- bool ssram_init = true;
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
lnl_d3_fixup();
pmcdev->suspend = cnl_suspend;
pmcdev->resume = lnl_resume;
- pmcdev->regmap_list = lnl_pmc_info_list;
- ret = pmc_core_ssram_init(pmcdev, func);
-
- /* If regbase not assigned, set map and discover using legacy method */
- if (ret) {
- ssram_init = false;
- pmc->map = &lnl_socm_reg_map;
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
- }
- pmc_core_get_low_power_modes(pmcdev);
+ pmc->map = &lnl_socm_reg_map;
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
- if (ssram_init) {
- ret = pmc_core_ssram_get_lpm_reqs(pmcdev);
- if (ret)
- return ret;
- }
+ pmc_core_get_low_power_modes(pmcdev);
return 0;
}
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
index 2662fbbddf0c..1d918000d72b 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
@@ -462,10 +462,10 @@ static long isst_if_core_power_state(void __user *argp)
struct tpmi_per_power_domain_info *power_domain_info;
struct isst_core_power core_power;
- if (disable_dynamic_sst_features())
+ if (copy_from_user(&core_power, argp, sizeof(core_power)))
return -EFAULT;
- if (copy_from_user(&core_power, argp, sizeof(core_power)))
+ if (core_power.get_set && disable_dynamic_sst_features())
return -EFAULT;
power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id);
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
index e73cdea67fff..910df7c654f4 100644
--- a/drivers/platform/x86/intel/tpmi.c
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -96,7 +96,7 @@ struct intel_tpmi_pfs_entry {
*/
struct intel_tpmi_pm_feature {
struct intel_tpmi_pfs_entry pfs_header;
- unsigned int vsec_offset;
+ u64 vsec_offset;
struct intel_vsec_device *vsec_dev;
};
@@ -376,7 +376,7 @@ static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused)
read_blocked = feature_state.read_blocked ? 'Y' : 'N';
write_blocked = feature_state.write_blocked ? 'Y' : 'N';
}
- seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%08x\t%c\t%c\t\t%c\t\t%c\n",
+ seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\t\t%c\t\t%c\n",
pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries,
pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset,
pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled,
@@ -395,7 +395,8 @@ static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
struct intel_tpmi_pm_feature *pfs = s->private;
int count, ret = 0;
void __iomem *mem;
- u32 off, size;
+ u32 size;
+ u64 off;
u8 *buffer;
size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
@@ -411,7 +412,7 @@ static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
mutex_lock(&tpmi_dev_lock);
for (count = 0; count < pfs->pfs_header.num_entries; ++count) {
- seq_printf(s, "TPMI Instance:%d offset:0x%x\n", count, off);
+ seq_printf(s, "TPMI Instance:%d offset:0x%llx\n", count, off);
mem = ioremap(off, size);
if (!mem) {
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
index a5e0f5c22179..b89c0dda9e5d 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -242,7 +242,7 @@ static int __init intel_uncore_init(void)
return -ENODEV;
uncore_max_entries = topology_max_packages() *
- topology_max_die_per_package();
+ topology_max_dies_per_package();
uncore_instances = kcalloc(uncore_max_entries,
sizeof(*uncore_instances), GFP_KERNEL);
if (!uncore_instances)
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 210b0a81b7ec..084c355c86f5 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -200,9 +200,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
sparse_keymap_report_event(input_dev, event, val, autorelease);
-
- /* Some devices need this to report further events */
- acpi_evaluate_object(handle, "VBDL", NULL, NULL);
}
/*
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index 778eb0aa3479..0fdfaf3a4f5c 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -236,10 +236,7 @@ static bool intel_vsec_walk_header(struct pci_dev *pdev,
for ( ; *header; header++) {
ret = intel_vsec_add_dev(pdev, *header, info);
- if (ret)
- dev_info(&pdev->dev, "Could not add device for VSEC id %d\n",
- (*header)->id);
- else
+ if (!ret)
have_devices = true;
}
diff --git a/drivers/platform/x86/intel/wmi/sbl-fw-update.c b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
index 040153ad67c1..75c82c08117f 100644
--- a/drivers/platform/x86/intel/wmi/sbl-fw-update.c
+++ b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
@@ -131,6 +131,7 @@ static struct wmi_driver intel_wmi_sbl_fw_update_driver = {
.probe = intel_wmi_sbl_fw_update_probe,
.remove = intel_wmi_sbl_fw_update_remove,
.id_table = intel_wmi_sbl_id_table,
+ .no_singleton = true,
};
module_wmi_driver(intel_wmi_sbl_fw_update_driver);
diff --git a/drivers/platform/x86/intel/wmi/thunderbolt.c b/drivers/platform/x86/intel/wmi/thunderbolt.c
index e2ad3f46f356..08df560a2c7a 100644
--- a/drivers/platform/x86/intel/wmi/thunderbolt.c
+++ b/drivers/platform/x86/intel/wmi/thunderbolt.c
@@ -63,6 +63,7 @@ static struct wmi_driver intel_wmi_thunderbolt_driver = {
.dev_groups = tbt_groups,
},
.id_table = intel_wmi_thunderbolt_id_table,
+ .no_singleton = true,
};
module_wmi_driver(intel_wmi_thunderbolt_driver);
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index b7c10c15a3d6..7d87cbd4b9c6 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -22,7 +22,7 @@
static int major;
-struct intel_scu_ipc_dev *scu;
+static struct intel_scu_ipc_dev *scu;
static DEFINE_MUTEX(scu_lock);
/* IOCTL commands */
diff --git a/drivers/platform/x86/intel_scu_pcidrv.c b/drivers/platform/x86/intel_scu_pcidrv.c
index d904fad499aa..dbf0310448da 100644
--- a/drivers/platform/x86/intel_scu_pcidrv.c
+++ b/drivers/platform/x86/intel_scu_pcidrv.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/pci.h>
-#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
static int intel_scu_pci_probe(struct pci_dev *pdev,
diff --git a/drivers/platform/x86/intel_scu_wdt.c b/drivers/platform/x86/intel_scu_wdt.c
index c2479777a1d6..a5031a25632e 100644
--- a/drivers/platform/x86/intel_scu_wdt.c
+++ b/drivers/platform/x86/intel_scu_wdt.c
@@ -13,7 +13,6 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
-#include <asm/intel-mid.h>
#include <asm/io_apic.h>
#include <asm/hw_irq.h>
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index 6bd14d0132db..3d66e1d4eb1f 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -20,9 +20,11 @@
#define P2SBC_HIDE BIT(8)
#define P2SB_DEVFN_DEFAULT PCI_DEVFN(31, 1)
+#define P2SB_DEVFN_GOLDMONT PCI_DEVFN(13, 0)
+#define SPI_DEVFN_GOLDMONT PCI_DEVFN(13, 2)
static const struct x86_cpu_id p2sb_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, P2SB_DEVFN_GOLDMONT),
{}
};
@@ -98,21 +100,12 @@ static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
{
- unsigned int slot, fn;
-
- if (PCI_FUNC(devfn) == 0) {
- /*
- * When function number of the P2SB device is zero, scan it and
- * other function numbers, and if devices are available, cache
- * their BAR0s.
- */
- slot = PCI_SLOT(devfn);
- for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
- p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
- } else {
- /* Scan the P2SB device and cache its BAR0 */
- p2sb_scan_and_cache_devfn(bus, devfn);
- }
+ /* Scan the P2SB device and cache its BAR0 */
+ p2sb_scan_and_cache_devfn(bus, devfn);
+
+ /* On Goldmont p2sb_bar() also gets called for the SPI controller */
+ if (devfn == P2SB_DEVFN_GOLDMONT)
+ p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT);
if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
return -ENOENT;
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 93a6414c6611..0aa7076bc9cc 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dmi.h>
@@ -17,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
+#include <linux/suspend.h>
struct pmc_bit_map {
const char *name;
@@ -448,6 +450,82 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
return 0;
}
+#ifdef CONFIG_SUSPEND
+static void pmc_dev_state_check(u32 sts, const struct pmc_bit_map *sts_map,
+ u32 fd, const struct pmc_bit_map *fd_map,
+ u32 sts_possible_false_pos)
+{
+ int index;
+
+ for (index = 0; sts_map[index].name; index++) {
+ if (!(fd_map[index].bit_mask & fd) &&
+ !(sts_map[index].bit_mask & sts)) {
+ if (sts_map[index].bit_mask & sts_possible_false_pos)
+ pm_pr_dbg("%s is in D0 prior to s2idle\n",
+ sts_map[index].name);
+ else
+ pr_err("%s is in D0 prior to s2idle\n",
+ sts_map[index].name);
+ }
+ }
+}
+
+static void pmc_s2idle_check(void)
+{
+ struct pmc_dev *pmc = &pmc_device;
+ const struct pmc_reg_map *m = pmc->map;
+ u32 func_dis, func_dis_2;
+ u32 d3_sts_0, d3_sts_1;
+ u32 false_pos_sts_0, false_pos_sts_1;
+ int i;
+
+ func_dis = pmc_reg_read(pmc, PMC_FUNC_DIS);
+ func_dis_2 = pmc_reg_read(pmc, PMC_FUNC_DIS_2);
+ d3_sts_0 = pmc_reg_read(pmc, PMC_D3_STS_0);
+ d3_sts_1 = pmc_reg_read(pmc, PMC_D3_STS_1);
+
+ /*
+ * Some blocks are not used on lower-featured versions of the SoC and
+ * always report D0, add these to false_pos mask to log at debug level.
+ */
+ if (m->d3_sts_1 == byt_d3_sts_1_map) {
+ /* Bay Trail */
+ false_pos_sts_0 = BIT_GBE | BIT_SATA | BIT_PCIE_PORT0 |
+ BIT_PCIE_PORT1 | BIT_PCIE_PORT2 | BIT_PCIE_PORT3 |
+ BIT_LPSS2_F5_I2C5;
+ false_pos_sts_1 = BIT_SMB | BIT_USH_SS_PHY | BIT_DFX;
+ } else {
+ /* Cherry Trail */
+ false_pos_sts_0 = BIT_GBE | BIT_SATA | BIT_LPSS2_F7_I2C7;
+ false_pos_sts_1 = BIT_SMB | BIT_STS_ISH;
+ }
+
+ pmc_dev_state_check(d3_sts_0, m->d3_sts_0, func_dis, m->func_dis, false_pos_sts_0);
+ pmc_dev_state_check(d3_sts_1, m->d3_sts_1, func_dis_2, m->func_dis_2, false_pos_sts_1);
+
+ /* Forced-on PMC clocks prevent S0i3 */
+ for (i = 0; i < PMC_CLK_NUM; i++) {
+ u32 ctl = pmc_reg_read(pmc, PMC_CLK_CTL_OFFSET + 4 * i);
+
+ if ((ctl & PMC_MASK_CLK_CTL) != PMC_CLK_CTL_FORCE_ON)
+ continue;
+
+ pr_err("clock %d is ON prior to freeze (ctl 0x%08x)\n", i, ctl);
+ }
+}
+
+static struct acpi_s2idle_dev_ops pmc_s2idle_ops = {
+ .check = pmc_s2idle_check,
+};
+
+static void pmc_s2idle_check_register(void)
+{
+ acpi_register_lps0_dev(&pmc_s2idle_ops);
+}
+#else
+static void pmc_s2idle_check_register(void) {}
+#endif
+
static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct pmc_dev *pmc = &pmc_device;
@@ -485,6 +563,7 @@ static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "platform clocks register failed: %d\n",
ret);
+ pmc_s2idle_check_register();
pmc->init = true;
return ret;
}
diff --git a/drivers/platform/x86/serdev_helpers.h b/drivers/platform/x86/serdev_helpers.h
new file mode 100644
index 000000000000..bcf3a0c356ea
--- /dev/null
+++ b/drivers/platform/x86/serdev_helpers.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * In some cases UART attached devices which require an in kernel driver,
+ * e.g. UART attached Bluetooth HCIs are described in the ACPI tables
+ * by an ACPI device with a broken or missing UartSerialBusV2() resource.
+ *
+ * This causes the kernel to create a /dev/ttyS# char-device for the UART
+ * instead of creating an in kernel serdev-controller + serdev-device pair
+ * for the in kernel driver.
+ *
+ * The quirk handling in acpi_quirk_skip_serdev_enumeration() makes the kernel
+ * create a serdev-controller device for these UARTs instead of a /dev/ttyS#.
+ *
+ * Instantiating the actual serdev-device to bind to is up to pdx86 code,
+ * this header provides a helper for getting the serdev-controller device.
+ */
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/printk.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+
+static inline struct device *
+get_serdev_controller(const char *serial_ctrl_hid,
+ const char *serial_ctrl_uid,
+ int serial_ctrl_port,
+ const char *serdev_ctrl_name)
+{
+ struct device *ctrl_dev, *child;
+ struct acpi_device *ctrl_adev;
+ char name[32];
+ int i;
+
+ ctrl_adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
+ if (!ctrl_adev) {
+ pr_err("error could not get %s/%s serial-ctrl adev\n",
+ serial_ctrl_hid, serial_ctrl_uid);
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* get_first_physical_node() returns a weak ref */
+ ctrl_dev = get_device(acpi_get_first_physical_node(ctrl_adev));
+ if (!ctrl_dev) {
+ pr_err("error could not get %s/%s serial-ctrl physical node\n",
+ serial_ctrl_hid, serial_ctrl_uid);
+ ctrl_dev = ERR_PTR(-ENODEV);
+ goto put_ctrl_adev;
+ }
+
+ /* Walk host -> uart-ctrl -> port -> serdev-ctrl */
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0:
+ snprintf(name, sizeof(name), "%s:0", dev_name(ctrl_dev));
+ break;
+ case 1:
+ snprintf(name, sizeof(name), "%s.%d",
+ dev_name(ctrl_dev), serial_ctrl_port);
+ break;
+ case 2:
+ strscpy(name, serdev_ctrl_name, sizeof(name));
+ break;
+ }
+
+ child = device_find_child_by_name(ctrl_dev, name);
+ put_device(ctrl_dev);
+ if (!child) {
+ pr_err("error could not find '%s' device\n", name);
+ ctrl_dev = ERR_PTR(-ENODEV);
+ goto put_ctrl_adev;
+ }
+
+ ctrl_dev = child;
+ }
+
+put_ctrl_adev:
+ acpi_dev_put(ctrl_adev);
+ return ctrl_dev;
+}
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index 8158e3cf5d6d..97b9c6392230 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -329,6 +329,19 @@ static const struct smi_node cs35l41_hda = {
.bus_type = SMI_AUTO_DETECT,
};
+static const struct smi_node cs35l54_hda = {
+ .instances = {
+ { "cs35l54-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l54-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l54-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l54-hda", IRQ_RESOURCE_AUTO, 0 },
+ /* a 5th entry is an alias address, not a real device */
+ { "cs35l54-hda_dummy_dev" },
+ {}
+ },
+ .bus_type = SMI_AUTO_DETECT,
+};
+
static const struct smi_node cs35l56_hda = {
.instances = {
{ "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
@@ -342,6 +355,19 @@ static const struct smi_node cs35l56_hda = {
.bus_type = SMI_AUTO_DETECT,
};
+static const struct smi_node cs35l57_hda = {
+ .instances = {
+ { "cs35l57-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l57-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l57-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l57-hda", IRQ_RESOURCE_AUTO, 0 },
+ /* a 5th entry is an alias address, not a real device */
+ { "cs35l57-hda_dummy_dev" },
+ {}
+ },
+ .bus_type = SMI_AUTO_DETECT,
+};
+
/*
* Note new device-ids must also be added to ignore_serial_bus_ids in
* drivers/acpi/scan.c: acpi_device_enumeration_by_parent().
@@ -350,7 +376,9 @@ static const struct acpi_device_id smi_acpi_ids[] = {
{ "BSG1160", (unsigned long)&bsg1160_data },
{ "BSG2150", (unsigned long)&bsg2150_data },
{ "CSC3551", (unsigned long)&cs35l41_hda },
+ { "CSC3554", (unsigned long)&cs35l54_hda },
{ "CSC3556", (unsigned long)&cs35l56_hda },
+ { "CSC3557", (unsigned long)&cs35l57_hda },
{ "INT3515", (unsigned long)&int3515_data },
/* Non-conforming _HID for Cirrus Logic already released */
{ "CLSA0100", (unsigned long)&cs35l41_hda },
diff --git a/drivers/platform/x86/silicom-platform.c b/drivers/platform/x86/silicom-platform.c
index 6ce43ccb3112..c0910af16a3a 100644
--- a/drivers/platform/x86/silicom-platform.c
+++ b/drivers/platform/x86/silicom-platform.c
@@ -256,12 +256,7 @@ static void silicom_gpio_set(struct gpio_chip *gc,
if (direction == GPIO_LINE_DIRECTION_IN)
return;
- if (value)
- silicom_mec_port_set(channel, 0);
- else if (value == 0)
- silicom_mec_port_set(channel, 1);
- else
- pr_err("Wrong argument value: %d\n", value);
+ silicom_mec_port_set(channel, !value);
}
static int silicom_gpio_direction_output(struct gpio_chip *gc,
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 3a396b763c49..9345316b45db 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -195,7 +195,7 @@ static const char * const level_options[] = {
[TLMI_LEVEL_MASTER] = "master",
};
static struct think_lmi tlmi_priv;
-static struct class *fw_attr_class;
+static const struct class *fw_attr_class;
static DEFINE_MUTEX(tlmi_mutex);
/* Convert BIOS WMI error string to suitable error code */
@@ -1009,7 +1009,16 @@ static ssize_t current_value_store(struct kobject *kobj,
* Note - this sets the variable and then the password as separate
* WMI calls. Function tlmi_save_bios_settings will error if the
* password is incorrect.
+ * Workstation's require the opcode to be set before changing the
+ * attribute.
*/
+ if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
+ ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
+ tlmi_priv.pwd_admin->password);
+ if (ret)
+ goto out;
+ }
+
set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
new_setting);
if (!set_str) {
@@ -1021,17 +1030,10 @@ static ssize_t current_value_store(struct kobject *kobj,
if (ret)
goto out;
- if (tlmi_priv.save_mode == TLMI_SAVE_BULK) {
+ if (tlmi_priv.save_mode == TLMI_SAVE_BULK)
tlmi_priv.save_required = true;
- } else {
- if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
- ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
- tlmi_priv.pwd_admin->password);
- if (ret)
- goto out;
- }
+ else
ret = tlmi_save_bios_settings("");
- }
} else { /* old non-opcode based authentication method (deprecated) */
if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s;",
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index c4895e9bc714..82429e59999d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -69,6 +69,7 @@
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/units.h>
#include <linux/workqueue.h>
#include <acpi/battery.h>
@@ -166,6 +167,7 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */
TP_HKEY_EV_PRIVACYGUARD_TOGGLE = 0x130f, /* Toggle priv.guard on/off */
TP_HKEY_EV_AMT_TOGGLE = 0x131a, /* Toggle AMT on/off */
+ TP_HKEY_EV_PROFILE_TOGGLE = 0x131f, /* Toggle platform profile */
/* Reasons for waking up from S3/S4 */
TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
@@ -3731,6 +3733,7 @@ static bool hotkey_notify_extended_hotkey(const u32 hkey)
switch (hkey) {
case TP_HKEY_EV_PRIVACYGUARD_TOGGLE:
case TP_HKEY_EV_AMT_TOGGLE:
+ case TP_HKEY_EV_PROFILE_TOGGLE:
tpacpi_driver_event(hkey);
return true;
}
@@ -6126,12 +6129,15 @@ enum thermal_access_mode {
TPACPI_THERMAL_ACPI_TMP07, /* Use ACPI TMP0-7 */
TPACPI_THERMAL_ACPI_UPDT, /* Use ACPI TMP0-7 with UPDT */
TPACPI_THERMAL_TPEC_8, /* Use ACPI EC regs, 8 sensors */
+ TPACPI_THERMAL_TPEC_12, /* Use ACPI EC regs, 12 sensors */
TPACPI_THERMAL_TPEC_16, /* Use ACPI EC regs, 16 sensors */
};
enum { /* TPACPI_THERMAL_TPEC_* */
TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
+ TP_EC_THERMAL_TMP0_NS = 0xA8, /* ACPI EC Non-Standard regs TMP 0..7 */
+ TP_EC_THERMAL_TMP8_NS = 0xB8, /* ACPI EC Non-standard regs TMP 8..11 */
TP_EC_FUNCREV = 0xEF, /* ACPI EC Functional revision */
TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
@@ -6144,8 +6150,104 @@ struct ibm_thermal_sensors_struct {
s32 temp[TPACPI_MAX_THERMAL_SENSORS];
};
+static const struct tpacpi_quirk thermal_quirk_table[] __initconst = {
+ /* Non-standard address for thermal registers on some ThinkPads */
+ TPACPI_Q_LNV3('R', '1', 'F', true), /* L13 Yoga Gen 2 */
+ TPACPI_Q_LNV3('N', '2', 'U', true), /* X13 Yoga Gen 2*/
+ TPACPI_Q_LNV3('R', '0', 'R', true), /* L380 */
+ TPACPI_Q_LNV3('R', '1', '5', true), /* L13 Yoga Gen 1*/
+ TPACPI_Q_LNV3('R', '1', '0', true), /* L390 */
+ TPACPI_Q_LNV3('N', '2', 'L', true), /* X13 Yoga Gen 1*/
+ TPACPI_Q_LNV3('R', '0', 'T', true), /* 11e Gen5 GL*/
+ TPACPI_Q_LNV3('R', '1', 'D', true), /* 11e Gen5 GL-R*/
+ TPACPI_Q_LNV3('R', '0', 'V', true), /* 11e Gen5 KL-Y*/
+};
+
static enum thermal_access_mode thermal_read_mode;
static bool thermal_use_labels;
+static bool thermal_with_ns_address; /* Non-standard thermal reg address */
+
+/* Function to check thermal read mode */
+static enum thermal_access_mode __init thermal_read_mode_check(void)
+{
+ u8 t, ta1, ta2, ver = 0;
+ int i;
+ int acpi_tmp7;
+
+ acpi_tmp7 = acpi_evalf(ec_handle, NULL, "TMP7", "qv");
+
+ if (thinkpad_id.ec_model) {
+ /*
+ * Direct EC access mode: sensors at registers 0x78-0x7F,
+ * 0xC0-0xC7. Registers return 0x00 for non-implemented,
+ * thermal sensors return 0x80 when not available.
+ *
+ * In some special cases (when Power Supply ID is 0xC2)
+ * above rule causes thermal control issues. Offset 0xEF
+ * determines EC version. 0xC0-0xC7 are not thermal registers
+ * in Ver 3.
+ */
+ if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
+ pr_warn("Thinkpad ACPI EC unable to access EC version\n");
+
+ /* Quirks to check non-standard EC */
+ thermal_with_ns_address = tpacpi_check_quirks(thermal_quirk_table,
+ ARRAY_SIZE(thermal_quirk_table));
+
+ /* Support for Thinkpads with non-standard address */
+ if (thermal_with_ns_address) {
+ pr_info("ECFW with non-standard thermal registers found\n");
+ return TPACPI_THERMAL_TPEC_12;
+ }
+
+ ta1 = ta2 = 0;
+ for (i = 0; i < 8; i++) {
+ if (acpi_ec_read(TP_EC_THERMAL_TMP0 + i, &t)) {
+ ta1 |= t;
+ } else {
+ ta1 = 0;
+ break;
+ }
+ if (ver < 3) {
+ if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
+ ta2 |= t;
+ } else {
+ ta1 = 0;
+ break;
+ }
+ }
+ }
+
+ if (ta1 == 0) {
+ /* This is sheer paranoia, but we handle it anyway */
+ if (acpi_tmp7) {
+ pr_err("ThinkPad ACPI EC access misbehaving, falling back to ACPI TMPx access mode\n");
+ return TPACPI_THERMAL_ACPI_TMP07;
+ }
+ pr_err("ThinkPad ACPI EC access misbehaving, disabling thermal sensors access\n");
+ return TPACPI_THERMAL_NONE;
+ }
+
+ if (ver >= 3) {
+ thermal_use_labels = true;
+ return TPACPI_THERMAL_TPEC_8;
+ }
+
+ return (ta2 != 0) ? TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
+ }
+
+ if (acpi_tmp7) {
+ if (tpacpi_is_ibm() && acpi_evalf(ec_handle, NULL, "UPDT", "qv")) {
+ /* 600e/x, 770e, 770x */
+ return TPACPI_THERMAL_ACPI_UPDT;
+ }
+ /* IBM/LENOVO DSDT EC.TMPx access, max 8 sensors */
+ return TPACPI_THERMAL_ACPI_TMP07;
+ }
+
+ /* temperatures not supported on 570, G4x, R30, R31, R32 */
+ return TPACPI_THERMAL_NONE;
+}
/* idx is zero-based */
static int thermal_get_sensor(int idx, s32 *value)
@@ -6174,6 +6276,20 @@ static int thermal_get_sensor(int idx, s32 *value)
}
break;
+ /* The Non-standard EC uses 12 Thermal areas */
+ case TPACPI_THERMAL_TPEC_12:
+ if (idx >= 12)
+ return -EINVAL;
+
+ t = idx < 8 ? TP_EC_THERMAL_TMP0_NS + idx :
+ TP_EC_THERMAL_TMP8_NS + (idx - 8);
+
+ if (!acpi_ec_read(t, &tmp))
+ return -EIO;
+
+ *value = tmp * MILLIDEGREE_PER_DEGREE;
+ return 0;
+
case TPACPI_THERMAL_ACPI_UPDT:
if (idx <= 7) {
snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
@@ -6208,17 +6324,17 @@ static int thermal_get_sensor(int idx, s32 *value)
static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s)
{
- int res, i;
- int n;
-
- n = 8;
- i = 0;
+ int res, i, n;
if (!s)
return -EINVAL;
if (thermal_read_mode == TPACPI_THERMAL_TPEC_16)
n = 16;
+ else if (thermal_read_mode == TPACPI_THERMAL_TPEC_12)
+ n = 12;
+ else
+ n = 8;
for (i = 0 ; i < n; i++) {
res = thermal_get_sensor(i, &s->temp[i]);
@@ -6317,18 +6433,36 @@ static struct attribute *thermal_temp_input_attr[] = {
NULL
};
+#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
+
static umode_t thermal_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- if (thermal_read_mode == TPACPI_THERMAL_NONE)
+ struct device_attribute *dev_attr = to_dev_attr(attr);
+ struct sensor_device_attribute *sensor_attr =
+ to_sensor_dev_attr(dev_attr);
+
+ int idx = sensor_attr->index;
+
+ switch (thermal_read_mode) {
+ case TPACPI_THERMAL_NONE:
return 0;
- if (attr == THERMAL_ATTRS(8) || attr == THERMAL_ATTRS(9) ||
- attr == THERMAL_ATTRS(10) || attr == THERMAL_ATTRS(11) ||
- attr == THERMAL_ATTRS(12) || attr == THERMAL_ATTRS(13) ||
- attr == THERMAL_ATTRS(14) || attr == THERMAL_ATTRS(15)) {
- if (thermal_read_mode != TPACPI_THERMAL_TPEC_16)
+ case TPACPI_THERMAL_ACPI_TMP07:
+ case TPACPI_THERMAL_ACPI_UPDT:
+ case TPACPI_THERMAL_TPEC_8:
+ if (idx >= 8)
+ return 0;
+ break;
+
+ case TPACPI_THERMAL_TPEC_12:
+ if (idx >= 12)
return 0;
+ break;
+
+ default:
+ break;
+
}
return attr->mode;
@@ -6375,78 +6509,9 @@ static const struct attribute_group temp_label_attr_group = {
static int __init thermal_init(struct ibm_init_struct *iibm)
{
- u8 t, ta1, ta2, ver = 0;
- int i;
- int acpi_tmp7;
-
vdbg_printk(TPACPI_DBG_INIT, "initializing thermal subdriver\n");
- acpi_tmp7 = acpi_evalf(ec_handle, NULL, "TMP7", "qv");
-
- if (thinkpad_id.ec_model) {
- /*
- * Direct EC access mode: sensors at registers
- * 0x78-0x7F, 0xC0-0xC7. Registers return 0x00 for
- * non-implemented, thermal sensors return 0x80 when
- * not available
- * The above rule is unfortunately flawed. This has been seen with
- * 0xC2 (power supply ID) causing thermal control problems.
- * The EC version can be determined by offset 0xEF and at least for
- * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7
- * are not thermal registers.
- */
- if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
- pr_warn("Thinkpad ACPI EC unable to access EC version\n");
-
- ta1 = ta2 = 0;
- for (i = 0; i < 8; i++) {
- if (acpi_ec_read(TP_EC_THERMAL_TMP0 + i, &t)) {
- ta1 |= t;
- } else {
- ta1 = 0;
- break;
- }
- if (ver < 3) {
- if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
- ta2 |= t;
- } else {
- ta1 = 0;
- break;
- }
- }
- }
- if (ta1 == 0) {
- /* This is sheer paranoia, but we handle it anyway */
- if (acpi_tmp7) {
- pr_err("ThinkPad ACPI EC access misbehaving, falling back to ACPI TMPx access mode\n");
- thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
- } else {
- pr_err("ThinkPad ACPI EC access misbehaving, disabling thermal sensors access\n");
- thermal_read_mode = TPACPI_THERMAL_NONE;
- }
- } else {
- if (ver >= 3) {
- thermal_read_mode = TPACPI_THERMAL_TPEC_8;
- thermal_use_labels = true;
- } else {
- thermal_read_mode =
- (ta2 != 0) ?
- TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
- }
- }
- } else if (acpi_tmp7) {
- if (tpacpi_is_ibm() &&
- acpi_evalf(ec_handle, NULL, "UPDT", "qv")) {
- /* 600e/x, 770e, 770x */
- thermal_read_mode = TPACPI_THERMAL_ACPI_UPDT;
- } else {
- /* IBM/LENOVO DSDT EC.TMPx access, max 8 sensors */
- thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
- }
- } else {
- /* temperatures not supported on 570, G4x, R30, R31, R32 */
- thermal_read_mode = TPACPI_THERMAL_NONE;
- }
+ thermal_read_mode = thermal_read_mode_check();
vdbg_printk(TPACPI_DBG_INIT, "thermal is %s, mode %d\n",
str_supported(thermal_read_mode != TPACPI_THERMAL_NONE),
@@ -8767,6 +8832,13 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */
TPACPI_Q_LNV3('R', '1', 'F', TPACPI_FAN_NS), /* L13 Yoga Gen 2 */
TPACPI_Q_LNV3('N', '2', 'U', TPACPI_FAN_NS), /* X13 Yoga Gen 2*/
+ TPACPI_Q_LNV3('R', '0', 'R', TPACPI_FAN_NS), /* L380 */
+ TPACPI_Q_LNV3('R', '1', '5', TPACPI_FAN_NS), /* L13 Yoga Gen 1 */
+ TPACPI_Q_LNV3('R', '1', '0', TPACPI_FAN_NS), /* L390 */
+ TPACPI_Q_LNV3('N', '2', 'L', TPACPI_FAN_NS), /* X13 Yoga Gen 1 */
+ TPACPI_Q_LNV3('R', '0', 'T', TPACPI_FAN_NS), /* 11e Gen5 GL */
+ TPACPI_Q_LNV3('R', '1', 'D', TPACPI_FAN_NS), /* 11e Gen5 GL-R */
+ TPACPI_Q_LNV3('R', '0', 'V', TPACPI_FAN_NS), /* 11e Gen5 KL-Y */
TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
};
@@ -9285,7 +9357,6 @@ static int mute_led_init(struct ibm_init_struct *iibm)
continue;
}
- mute_led_cdev[i].brightness = ledtrig_audio_get(i);
err = led_classdev_register(&tpacpi_pdev->dev, &mute_led_cdev[i]);
if (err < 0) {
while (i--)
@@ -10308,6 +10379,7 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode,
return 0;
default:
/* Unknown function */
+ pr_debug("unknown function 0x%x\n", funcmode);
return -EOPNOTSUPP;
}
return 0;
@@ -10493,8 +10565,8 @@ static void dytc_profile_refresh(void)
return;
perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
- convert_dytc_to_profile(funcmode, perfmode, &profile);
- if (profile != dytc_current_profile) {
+ err = convert_dytc_to_profile(funcmode, perfmode, &profile);
+ if (!err && profile != dytc_current_profile) {
dytc_current_profile = profile;
platform_profile_notify();
}
@@ -11118,7 +11190,23 @@ static void tpacpi_driver_event(const unsigned int hkey_event)
else
dytc_control_amt(!dytc_amt_active);
}
-
+ if (hkey_event == TP_HKEY_EV_PROFILE_TOGGLE) {
+ switch (dytc_current_profile) {
+ case PLATFORM_PROFILE_LOW_POWER:
+ dytc_profile_set(NULL, PLATFORM_PROFILE_BALANCED);
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ dytc_profile_set(NULL, PLATFORM_PROFILE_PERFORMANCE);
+ break;
+ case PLATFORM_PROFILE_PERFORMANCE:
+ dytc_profile_set(NULL, PLATFORM_PROFILE_LOW_POWER);
+ break;
+ default:
+ pr_warn("Profile HKEY unexpected profile %d", dytc_current_profile);
+ }
+ /* Notify user space the profile changed */
+ platform_profile_notify();
+ }
}
static void hotkey_driver_event(const unsigned int scancode)
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 7aee5e9ff2b8..c6a10ec2c83f 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -81,7 +81,7 @@ static const struct property_entry chuwi_hi8_air_props[] = {
};
static const struct ts_dmi_data chuwi_hi8_air_data = {
- .acpi_name = "MSSL1680:00",
+ .acpi_name = "MSSL1680",
.properties = chuwi_hi8_air_props,
};
@@ -415,18 +415,13 @@ static const struct property_entry gdix1001_upside_down_props[] = {
{ }
};
-static const struct ts_dmi_data gdix1001_00_upside_down_data = {
- .acpi_name = "GDIX1001:00",
- .properties = gdix1001_upside_down_props,
-};
-
-static const struct ts_dmi_data gdix1001_01_upside_down_data = {
- .acpi_name = "GDIX1001:01",
+static const struct ts_dmi_data gdix1001_upside_down_data = {
+ .acpi_name = "GDIX1001",
.properties = gdix1001_upside_down_props,
};
-static const struct ts_dmi_data gdix1002_00_upside_down_data = {
- .acpi_name = "GDIX1002:00",
+static const struct ts_dmi_data gdix1002_upside_down_data = {
+ .acpi_name = "GDIX1002",
.properties = gdix1001_upside_down_props,
};
@@ -1223,6 +1218,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Vi8 dual-boot (CWI506) */
+ .driver_data = (void *)&chuwi_vi8_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI2.D86JHBNR02"),
+ },
+ },
+ {
/* Chuwi Vi8 Plus (CWI519) */
.driver_data = (void *)&chuwi_vi8_plus_data,
.matches = {
@@ -1412,7 +1416,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Juno Tablet */
- .driver_data = (void *)&gdix1002_00_upside_down_data,
+ .driver_data = (void *)&gdix1002_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
/* Both product- and board-name being "Default string" is somewhat rare */
@@ -1658,7 +1662,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X89 (Android version / BIOS) */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "WISKY"),
DMI_MATCH(DMI_BOARD_NAME, "3G062i"),
@@ -1666,7 +1670,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X89 (Windows version / BIOS) */
- .driver_data = (void *)&gdix1001_01_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
/* tPAD is too generic, also match on bios date */
DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
@@ -1684,7 +1688,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X98 Pro */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
/*
* Only match BIOS date, because the manufacturers
@@ -1788,7 +1792,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* "WinBook TW100" */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
@@ -1796,7 +1800,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* WinBook TW700 */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
@@ -1821,7 +1825,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
int error;
if (has_acpi_companion(dev) &&
- !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
+ strstarts(client->name, ts_data->acpi_name)) {
error = device_create_managed_software_node(dev, ts_data->properties, NULL);
if (error)
dev_err(dev, "failed to add properties: %d\n", error);
diff --git a/drivers/platform/x86/wmi-bmof.c b/drivers/platform/x86/wmi-bmof.c
index 644d2fd889c0..df6f0ae6e6c7 100644
--- a/drivers/platform/x86/wmi-bmof.c
+++ b/drivers/platform/x86/wmi-bmof.c
@@ -94,6 +94,7 @@ static struct wmi_driver wmi_bmof_driver = {
.probe = wmi_bmof_probe,
.remove = wmi_bmof_remove,
.id_table = wmi_bmof_id_table,
+ .no_singleton = true,
};
module_wmi_driver(wmi_bmof_driver);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 3c288e8f404b..1920e115da89 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -57,6 +57,8 @@ static_assert(__alignof__(struct guid_block) == 1);
enum { /* wmi_block flags */
WMI_READ_TAKES_NO_ARGS,
+ WMI_GUID_DUPLICATED,
+ WMI_NO_EVENT_DATA,
};
struct wmi_block {
@@ -88,16 +90,6 @@ static const struct acpi_device_id wmi_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, wmi_device_ids);
-/* allow duplicate GUIDs as these device drivers use struct wmi_driver */
-static const char * const allow_duplicates[] = {
- "05901221-D566-11D1-B2F0-00A0C9062910", /* wmi-bmof */
- "8A42EA14-4F2A-FD45-6422-0087F7A7E608", /* dell-wmi-ddv */
- "44FADEB1-B204-40F2-8581-394BBDC1B651", /* intel-wmi-sbl-fw-update */
- "86CCFD48-205E-4A77-9C48-2021CBEDE341", /* intel-wmi-thunderbolt */
- "F1DDEE52-063C-4784-A11E-8A06684B9B01", /* dell-smm-hwmon */
- NULL
-};
-
#define dev_to_wblock(__dev) container_of_const(__dev, struct wmi_block, dev.dev)
#define dev_to_wdev(__dev) container_of_const(__dev, struct wmi_device, dev)
@@ -132,26 +124,6 @@ static const void *find_guid_context(struct wmi_block *wblock,
return NULL;
}
-static int get_subobj_info(acpi_handle handle, const char *pathname,
- struct acpi_device_info **info)
-{
- acpi_handle subobj_handle;
- acpi_status status;
-
- status = acpi_get_handle(handle, pathname, &subobj_handle);
- if (status == AE_NOT_FOUND)
- return -ENOENT;
-
- if (ACPI_FAILURE(status))
- return -EIO;
-
- status = acpi_get_object_info(subobj_handle, info);
- if (ACPI_FAILURE(status))
- return -EIO;
-
- return 0;
-}
-
static acpi_status wmi_method_enable(struct wmi_block *wblock, bool enable)
{
struct guid_block *block;
@@ -215,6 +187,12 @@ static int wmidev_match_guid(struct device *dev, const void *data)
struct wmi_block *wblock = dev_to_wblock(dev);
const guid_t *guid = data;
+ /* Legacy GUID-based functions are restricted to only see
+ * a single WMI device for each GUID.
+ */
+ if (test_bit(WMI_GUID_DUPLICATED, &wblock->flags))
+ return 0;
+
if (guid_equal(guid, &wblock->gblock.guid))
return 1;
@@ -226,13 +204,19 @@ static int wmidev_match_notify_id(struct device *dev, const void *data)
struct wmi_block *wblock = dev_to_wblock(dev);
const u32 *notify_id = data;
+ /* Legacy GUID-based functions are restricted to only see
+ * a single WMI device for each GUID.
+ */
+ if (test_bit(WMI_GUID_DUPLICATED, &wblock->flags))
+ return 0;
+
if (wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *notify_id)
return 1;
return 0;
}
-static struct bus_type wmi_bus_type;
+static const struct bus_type wmi_bus_type;
static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
{
@@ -316,7 +300,7 @@ EXPORT_SYMBOL_GPL(wmidev_instance_count);
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
* @instance: Instance index
* @method_id: Method ID to call
- * @in: Buffer containing input for the method call
+ * @in: Mandatory buffer containing input for the method call
* @out: Empty buffer to return the method results
*
* Call an ACPI-WMI method, the caller must free @out.
@@ -346,7 +330,7 @@ EXPORT_SYMBOL_GPL(wmi_evaluate_method);
* @wdev: A wmi bus device from a driver
* @instance: Instance index
* @method_id: Method ID to call
- * @in: Buffer containing input for the method call
+ * @in: Mandatory buffer containing input for the method call
* @out: Empty buffer to return the method results
*
* Call an ACPI-WMI method, the caller must free @out.
@@ -367,26 +351,25 @@ acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 met
block = &wblock->gblock;
handle = wblock->acpi_device->handle;
+ if (!in)
+ return AE_BAD_DATA;
+
if (!(block->flags & ACPI_WMI_METHOD))
return AE_BAD_DATA;
if (block->instance_count <= instance)
return AE_BAD_PARAMETER;
- input.count = 2;
+ input.count = 3;
input.pointer = params;
+
params[0].type = ACPI_TYPE_INTEGER;
params[0].integer.value = instance;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = method_id;
-
- if (in) {
- input.count = 3;
-
- params[2].type = get_param_acpi_type(wblock);
- params[2].buffer.length = in->length;
- params[2].buffer.pointer = in->pointer;
- }
+ params[2].type = get_param_acpi_type(wblock);
+ params[2].buffer.length = in->length;
+ params[2].buffer.pointer = in->pointer;
get_acpi_method_name(wblock, 'M', method);
@@ -890,6 +873,23 @@ static int wmi_dev_probe(struct device *dev)
struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
int ret = 0;
+ /* Some older WMI drivers will break if instantiated multiple times,
+ * so they are blocked from probing WMI devices with a duplicated GUID.
+ *
+ * New WMI drivers should support being instantiated multiple times.
+ */
+ if (test_bit(WMI_GUID_DUPLICATED, &wblock->flags) && !wdriver->no_singleton) {
+ dev_warn(dev, "Legacy driver %s cannot be instantiated multiple times\n",
+ dev->driver->name);
+
+ return -ENODEV;
+ }
+
+ if (wdriver->notify) {
+ if (test_bit(WMI_NO_EVENT_DATA, &wblock->flags) && !wdriver->no_notify_data)
+ return -ENODEV;
+ }
+
if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
dev_warn(dev, "failed to enable device -- probing anyway\n");
@@ -931,7 +931,7 @@ static struct class wmi_bus_class = {
.name = "wmi_bus",
};
-static struct bus_type wmi_bus_type = {
+static const struct bus_type wmi_bus_type = {
.name = "wmi",
.dev_groups = wmi_groups,
.match = wmi_dev_match,
@@ -979,9 +979,10 @@ static int wmi_create_device(struct device *wmi_bus_dev,
struct wmi_block *wblock,
struct acpi_device *device)
{
- struct acpi_device_info *info;
char method[WMI_ACPI_METHOD_NAME_SIZE];
- int result;
+ struct acpi_device_info *info;
+ acpi_handle method_handle;
+ acpi_status status;
uint count;
if (wblock->gblock.flags & ACPI_WMI_EVENT) {
@@ -990,6 +991,15 @@ static int wmi_create_device(struct device *wmi_bus_dev,
}
if (wblock->gblock.flags & ACPI_WMI_METHOD) {
+ get_acpi_method_name(wblock, 'M', method);
+ if (!acpi_has_method(device->handle, method)) {
+ dev_warn(wmi_bus_dev,
+ FW_BUG "%s method block execution control method not found\n",
+ method);
+
+ return -ENXIO;
+ }
+
wblock->dev.dev.type = &wmi_type_method;
goto out_init;
}
@@ -1000,15 +1010,19 @@ static int wmi_create_device(struct device *wmi_bus_dev,
* we ignore this data block.
*/
get_acpi_method_name(wblock, 'Q', method);
- result = get_subobj_info(device->handle, method, &info);
-
- if (result) {
+ status = acpi_get_handle(device->handle, method, &method_handle);
+ if (ACPI_FAILURE(status)) {
dev_warn(wmi_bus_dev,
- "%s data block query control method not found\n",
+ FW_BUG "%s data block query control method not found\n",
method);
- return result;
+
+ return -ENXIO;
}
+ status = acpi_get_object_info(method_handle, &info);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
wblock->dev.dev.type = &wmi_type_data;
/*
@@ -1037,10 +1051,12 @@ static int wmi_create_device(struct device *wmi_bus_dev,
wblock->dev.dev.parent = wmi_bus_dev;
count = guid_count(&wblock->gblock.guid);
- if (count)
+ if (count) {
dev_set_name(&wblock->dev.dev, "%pUL-%d", &wblock->gblock.guid, count);
- else
+ set_bit(WMI_GUID_DUPLICATED, &wblock->flags);
+ } else {
dev_set_name(&wblock->dev.dev, "%pUL", &wblock->gblock.guid);
+ }
device_initialize(&wblock->dev.dev);
@@ -1067,32 +1083,6 @@ static int wmi_add_device(struct platform_device *pdev, struct wmi_device *wdev)
return device_add(&wdev->dev);
}
-static bool guid_already_parsed_for_legacy(struct acpi_device *device, const guid_t *guid)
-{
- struct wmi_block *wblock;
-
- list_for_each_entry(wblock, &wmi_block_list, list) {
- /* skip warning and register if we know the driver will use struct wmi_driver */
- for (int i = 0; allow_duplicates[i] != NULL; i++) {
- if (guid_parse_and_compare(allow_duplicates[i], guid))
- return false;
- }
- if (guid_equal(&wblock->gblock.guid, guid)) {
- /*
- * Because we historically didn't track the relationship
- * between GUIDs and ACPI nodes, we don't know whether
- * we need to suppress GUIDs that are unique on a
- * given node but duplicated across nodes.
- */
- dev_warn(&device->dev, "duplicate WMI GUID %pUL (first instance was on %s)\n",
- guid, dev_name(&wblock->acpi_device->dev));
- return true;
- }
- }
-
- return false;
-}
-
/*
* Parse the _WDG method for the GUID data blocks
*/
@@ -1101,6 +1091,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct platform_device *pdev)
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
const struct guid_block *gblock;
+ bool event_data_available;
struct wmi_block *wblock;
union acpi_object *obj;
acpi_status status;
@@ -1120,6 +1111,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct platform_device *pdev)
return -ENXIO;
}
+ event_data_available = acpi_has_method(device->handle, "_WED");
gblock = (const struct guid_block *)obj->buffer.pointer;
total = obj->buffer.length / sizeof(struct guid_block);
@@ -1129,17 +1121,14 @@ static int parse_wdg(struct device *wmi_bus_dev, struct platform_device *pdev)
continue;
}
- if (guid_already_parsed_for_legacy(device, &gblock[i].guid))
- continue;
-
wblock = kzalloc(sizeof(*wblock), GFP_KERNEL);
- if (!wblock) {
- dev_err(wmi_bus_dev, "Failed to allocate %pUL\n", &gblock[i].guid);
+ if (!wblock)
continue;
- }
wblock->acpi_device = device;
wblock->gblock = gblock[i];
+ if (gblock[i].flags & ACPI_WMI_EVENT && !event_data_available)
+ set_bit(WMI_NO_EVENT_DATA, &wblock->flags);
retval = wmi_create_device(wmi_bus_dev, wblock, device);
if (retval) {
@@ -1205,30 +1194,46 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
}
}
-static void wmi_notify_driver(struct wmi_block *wblock)
+static int wmi_get_notify_data(struct wmi_block *wblock, union acpi_object **obj)
{
- struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
struct acpi_buffer data = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
- if (!driver->no_notify_data) {
- status = get_event_data(wblock, &data);
- if (ACPI_FAILURE(status)) {
- dev_warn(&wblock->dev.dev, "Failed to get event data\n");
- return;
- }
+ if (test_bit(WMI_NO_EVENT_DATA, &wblock->flags)) {
+ *obj = NULL;
+ return 0;
}
- if (driver->notify)
- driver->notify(&wblock->dev, data.pointer);
+ status = get_event_data(wblock, &data);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(&wblock->dev.dev, "Failed to get event data\n");
+ return -EIO;
+ }
+
+ *obj = data.pointer;
- kfree(data.pointer);
+ return 0;
+}
+
+static void wmi_notify_driver(struct wmi_block *wblock, union acpi_object *obj)
+{
+ struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
+
+ if (!obj && !driver->no_notify_data) {
+ dev_warn(&wblock->dev.dev, "Event contains no event data\n");
+ return;
+ }
+
+ if (driver->notify)
+ driver->notify(&wblock->dev, obj);
}
static int wmi_notify_device(struct device *dev, void *data)
{
struct wmi_block *wblock = dev_to_wblock(dev);
+ union acpi_object *obj;
u32 *event = data;
+ int ret;
if (!(wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *event))
return 0;
@@ -1238,15 +1243,36 @@ static int wmi_notify_device(struct device *dev, void *data)
* Because of this the WMI driver notify handler takes precedence.
*/
if (wblock->dev.dev.driver && wblock->driver_ready) {
- wmi_notify_driver(wblock);
+ ret = wmi_get_notify_data(wblock, &obj);
+ if (ret >= 0) {
+ wmi_notify_driver(wblock, obj);
+ kfree(obj);
+ }
} else {
- if (wblock->handler)
+ if (wblock->handler) {
wblock->handler(*event, wblock->handler_data);
+ } else {
+ /* The ACPI WMI specification says that _WED should be
+ * evaluated every time an notification is received, even
+ * if no consumers are present.
+ *
+ * Some firmware implementations actually depend on this
+ * by using a queue for events which will fill up if the
+ * WMI driver core stops evaluating _WED due to missing
+ * WMI event consumers.
+ *
+ * Because of this we need this seemingly useless call to
+ * wmi_get_notify_data() which in turn evaluates _WED.
+ */
+ ret = wmi_get_notify_data(wblock, &obj);
+ if (ret >= 0)
+ kfree(obj);
+ }
+
}
up_read(&wblock->notify_lock);
- acpi_bus_generate_netlink_event(wblock->acpi_device->pnp.device_class,
- dev_name(&wblock->dev.dev), *event, 0);
+ acpi_bus_generate_netlink_event("wmi", acpi_dev_name(wblock->acpi_device), *event, 0);
return -EBUSY;
}
@@ -1347,7 +1373,7 @@ static int acpi_wmi_probe(struct platform_device *device)
error = parse_wdg(wmi_bus_dev, device);
if (error) {
- pr_err("Failed to parse WDG method\n");
+ dev_err(&device->dev, "Failed to parse _WDG method\n");
return error;
}
diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
index f8221a15575b..a3415f1c0b5f 100644
--- a/drivers/platform/x86/x86-android-tablets/core.c
+++ b/drivers/platform/x86/x86-android-tablets/core.c
@@ -21,6 +21,7 @@
#include <linux/string.h>
#include "x86-android-tablets.h"
+#include "../serdev_helpers.h"
static struct platform_device *x86_android_tablet_device;
@@ -113,6 +114,9 @@ int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data)
if (irq_type != IRQ_TYPE_NONE && irq_type != irq_get_trigger_type(irq))
irq_set_irq_type(irq, irq_type);
+ if (data->free_gpio)
+ devm_gpiod_put(&x86_android_tablet_device->dev, gpiod);
+
return irq;
case X86_ACPI_IRQ_TYPE_PMIC:
status = acpi_get_handle(NULL, data->chip, &handle);
@@ -229,38 +233,20 @@ static __init int x86_instantiate_spi_dev(const struct x86_dev_info *dev_info, i
static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int idx)
{
- struct acpi_device *ctrl_adev, *serdev_adev;
+ struct acpi_device *serdev_adev;
struct serdev_device *serdev;
struct device *ctrl_dev;
int ret = -ENODEV;
- ctrl_adev = acpi_dev_get_first_match_dev(info->ctrl_hid, info->ctrl_uid, -1);
- if (!ctrl_adev) {
- pr_err("error could not get %s/%s ctrl adev\n",
- info->ctrl_hid, info->ctrl_uid);
- return -ENODEV;
- }
+ ctrl_dev = get_serdev_controller(info->ctrl_hid, info->ctrl_uid, 0,
+ info->ctrl_devname);
+ if (IS_ERR(ctrl_dev))
+ return PTR_ERR(ctrl_dev);
serdev_adev = acpi_dev_get_first_match_dev(info->serdev_hid, NULL, -1);
if (!serdev_adev) {
pr_err("error could not get %s serdev adev\n", info->serdev_hid);
- goto put_ctrl_adev;
- }
-
- /* get_first_physical_node() returns a weak ref, no need to put() it */
- ctrl_dev = acpi_get_first_physical_node(ctrl_adev);
- if (!ctrl_dev) {
- pr_err("error could not get %s/%s ctrl physical dev\n",
- info->ctrl_hid, info->ctrl_uid);
- goto put_serdev_adev;
- }
-
- /* ctrl_dev now points to the controller's parent, get the controller */
- ctrl_dev = device_find_child_by_name(ctrl_dev, info->ctrl_devname);
- if (!ctrl_dev) {
- pr_err("error could not get %s/%s %s ctrl dev\n",
- info->ctrl_hid, info->ctrl_uid, info->ctrl_devname);
- goto put_serdev_adev;
+ goto put_ctrl_dev;
}
serdev = serdev_device_alloc(to_serdev_controller(ctrl_dev));
@@ -283,8 +269,8 @@ static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int
put_serdev_adev:
acpi_dev_put(serdev_adev);
-put_ctrl_adev:
- acpi_dev_put(ctrl_adev);
+put_ctrl_dev:
+ put_device(ctrl_dev);
return ret;
}
diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
index f1c66a61bfc5..c297391955ad 100644
--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
+++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
@@ -116,6 +116,7 @@ static const struct x86_i2c_client_info lenovo_yb1_x90_i2c_clients[] __initconst
.trigger = ACPI_EDGE_SENSITIVE,
.polarity = ACPI_ACTIVE_LOW,
.con_id = "goodix_ts_irq",
+ .free_gpio = true,
},
}, {
/* Wacom Digitizer in keyboard half */
diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
index bc6bbf7ec6ea..278402dcb808 100644
--- a/drivers/platform/x86/x86-android-tablets/other.c
+++ b/drivers/platform/x86/x86-android-tablets/other.c
@@ -68,7 +68,7 @@ static const struct x86_i2c_client_info acer_b1_750_i2c_clients[] __initconst =
},
};
-static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
+static struct gpiod_lookup_table acer_b1_750_nvt_ts_gpios = {
.dev_id = "i2c-NVT-ts",
.table = {
GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
@@ -77,7 +77,7 @@ static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
};
static struct gpiod_lookup_table * const acer_b1_750_gpios[] = {
- &acer_b1_750_goodix_gpios,
+ &acer_b1_750_nvt_ts_gpios,
&int3496_reference_gpios,
NULL
};
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index 49fed9410adb..468993edfeee 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -39,6 +39,7 @@ struct x86_acpi_irq_data {
int index;
int trigger; /* ACPI_EDGE_SENSITIVE / ACPI_LEVEL_SENSITIVE */
int polarity; /* ACPI_ACTIVE_HIGH / ACPI_ACTIVE_LOW / ACPI_ACTIVE_BOTH */
+ bool free_gpio; /* Release GPIO after getting IRQ (for TYPE_GPIOINT) */
const char *con_id;
};
diff --git a/drivers/pmdomain/arm/scmi_perf_domain.c b/drivers/pmdomain/arm/scmi_perf_domain.c
index 709bbc448fad..d7ef46ccd9b8 100644
--- a/drivers/pmdomain/arm/scmi_perf_domain.c
+++ b/drivers/pmdomain/arm/scmi_perf_domain.c
@@ -159,6 +159,9 @@ static void scmi_perf_domain_remove(struct scmi_device *sdev)
struct genpd_onecell_data *scmi_pd_data = dev_get_drvdata(dev);
int i;
+ if (!scmi_pd_data)
+ return;
+
of_genpd_del_provider(dev->of_node);
for (i = 0; i < scmi_pd_data->num_domains; i++)
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 18e232b5ed53..4215ffd9b11c 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -311,72 +311,102 @@ static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
}
static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
- unsigned int state, int depth)
+ unsigned int state, int depth);
+
+static void _genpd_rollback_parent_state(struct gpd_link *link, int depth)
{
- struct generic_pm_domain *parent;
- struct gpd_link *link;
- int parent_state, ret;
+ struct generic_pm_domain *parent = link->parent;
+ int parent_state;
- if (state == genpd->performance_state)
- return 0;
+ genpd_lock_nested(parent, depth + 1);
- /* Propagate to parents of genpd */
- list_for_each_entry(link, &genpd->child_links, child_node) {
- parent = link->parent;
+ parent_state = link->prev_performance_state;
+ link->performance_state = parent_state;
- /* Find parent's performance state */
- ret = genpd_xlate_performance_state(genpd, parent, state);
- if (unlikely(ret < 0))
- goto err;
+ parent_state = _genpd_reeval_performance_state(parent, parent_state);
+ if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
+ pr_err("%s: Failed to roll back to %d performance state\n",
+ parent->name, parent_state);
+ }
- parent_state = ret;
+ genpd_unlock(parent);
+}
- genpd_lock_nested(parent, depth + 1);
+static int _genpd_set_parent_state(struct generic_pm_domain *genpd,
+ struct gpd_link *link,
+ unsigned int state, int depth)
+{
+ struct generic_pm_domain *parent = link->parent;
+ int parent_state, ret;
- link->prev_performance_state = link->performance_state;
- link->performance_state = parent_state;
- parent_state = _genpd_reeval_performance_state(parent,
- parent_state);
- ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
- if (ret)
- link->performance_state = link->prev_performance_state;
+ /* Find parent's performance state */
+ ret = genpd_xlate_performance_state(genpd, parent, state);
+ if (unlikely(ret < 0))
+ return ret;
- genpd_unlock(parent);
+ parent_state = ret;
- if (ret)
- goto err;
- }
+ genpd_lock_nested(parent, depth + 1);
- if (genpd->set_performance_state) {
- ret = genpd->set_performance_state(genpd, state);
- if (ret)
- goto err;
- }
+ link->prev_performance_state = link->performance_state;
+ link->performance_state = parent_state;
- genpd->performance_state = state;
- return 0;
+ parent_state = _genpd_reeval_performance_state(parent, parent_state);
+ ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
+ if (ret)
+ link->performance_state = link->prev_performance_state;
-err:
- /* Encountered an error, lets rollback */
- list_for_each_entry_continue_reverse(link, &genpd->child_links,
- child_node) {
- parent = link->parent;
+ genpd_unlock(parent);
- genpd_lock_nested(parent, depth + 1);
+ return ret;
+}
+
+static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state, int depth)
+{
+ struct gpd_link *link = NULL;
+ int ret;
+
+ if (state == genpd->performance_state)
+ return 0;
- parent_state = link->prev_performance_state;
- link->performance_state = parent_state;
+ /* When scaling up, propagate to parents first in normal order */
+ if (state > genpd->performance_state) {
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ ret = _genpd_set_parent_state(genpd, link, state, depth);
+ if (ret)
+ goto rollback_parents_up;
+ }
+ }
- parent_state = _genpd_reeval_performance_state(parent,
- parent_state);
- if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
- pr_err("%s: Failed to roll back to %d performance state\n",
- parent->name, parent_state);
+ if (genpd->set_performance_state) {
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret) {
+ if (link)
+ goto rollback_parents_up;
+ return ret;
}
+ }
- genpd_unlock(parent);
+ /* When scaling down, propagate to parents last in reverse order */
+ if (state < genpd->performance_state) {
+ list_for_each_entry_reverse(link, &genpd->child_links, child_node) {
+ ret = _genpd_set_parent_state(genpd, link, state, depth);
+ if (ret)
+ goto rollback_parents_down;
+ }
}
+ genpd->performance_state = state;
+ return 0;
+
+rollback_parents_up:
+ list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node)
+ _genpd_rollback_parent_state(link, depth);
+ return ret;
+rollback_parents_down:
+ list_for_each_entry_continue(link, &genpd->child_links, child_node)
+ _genpd_rollback_parent_state(link, depth);
return ret;
}
@@ -1100,6 +1130,7 @@ static int __init genpd_power_off_unused(void)
return 0;
}
+ pr_info("genpd: Disabling unused power domains\n");
mutex_lock(&gpd_list_lock);
list_for_each_entry(genpd, &gpd_list, gpd_list_node)
@@ -2235,7 +2266,7 @@ static DEFINE_MUTEX(of_genpd_mutex);
* to be a valid pointer to struct generic_pm_domain.
*/
static struct generic_pm_domain *genpd_xlate_simple(
- struct of_phandle_args *genpdspec,
+ const struct of_phandle_args *genpdspec,
void *data)
{
return data;
@@ -2252,7 +2283,7 @@ static struct generic_pm_domain *genpd_xlate_simple(
* the genpd_onecell_data struct when registering the provider.
*/
static struct generic_pm_domain *genpd_xlate_onecell(
- struct of_phandle_args *genpdspec,
+ const struct of_phandle_args *genpdspec,
void *data)
{
struct genpd_onecell_data *genpd_data = data;
@@ -2495,7 +2526,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider);
* on failure.
*/
static struct generic_pm_domain *genpd_get_from_provider(
- struct of_phandle_args *genpdspec)
+ const struct of_phandle_args *genpdspec)
{
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
struct of_genpd_provider *provider;
@@ -2526,7 +2557,7 @@ static struct generic_pm_domain *genpd_get_from_provider(
* Looks-up an I/O PM domain based upon phandle args provided and adds
* the device to the PM domain. Returns a negative error code on failure.
*/
-int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
+int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev)
{
struct generic_pm_domain *genpd;
int ret;
@@ -2560,8 +2591,8 @@ EXPORT_SYMBOL_GPL(of_genpd_add_device);
* provided and adds the subdomain to the parent PM domain. Returns a
* negative error code on failure.
*/
-int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
- struct of_phandle_args *subdomain_spec)
+int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
+ const struct of_phandle_args *subdomain_spec)
{
struct generic_pm_domain *parent, *subdomain;
int ret;
@@ -2598,8 +2629,8 @@ EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
* provided and removes the subdomain from the parent PM domain. Returns a
* negative error code on failure.
*/
-int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
- struct of_phandle_args *subdomain_spec)
+int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
+ const struct of_phandle_args *subdomain_spec)
{
struct generic_pm_domain *parent, *subdomain;
int ret;
diff --git a/drivers/pmdomain/imx/imx8m-blk-ctrl.c b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
index 1341a707f61b..ca942d7929c2 100644
--- a/drivers/pmdomain/imx/imx8m-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
@@ -258,11 +258,14 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
domain->power_dev =
dev_pm_domain_attach_by_name(dev, data->gpc_name);
- if (IS_ERR(domain->power_dev)) {
- dev_err_probe(dev, PTR_ERR(domain->power_dev),
+ if (IS_ERR_OR_NULL(domain->power_dev)) {
+ if (!domain->power_dev)
+ ret = -ENODEV;
+ else
+ ret = PTR_ERR(domain->power_dev);
+ dev_err_probe(dev, ret,
"failed to attach power domain \"%s\"\n",
data->gpc_name);
- ret = PTR_ERR(domain->power_dev);
goto cleanup_pds;
}
diff --git a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
index e3203eb6a022..77e889165eed 100644
--- a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
@@ -55,7 +55,7 @@ struct imx8mp_blk_ctrl_domain_data {
const char *gpc_name;
};
-#define DOMAIN_MAX_CLKS 2
+#define DOMAIN_MAX_CLKS 3
#define DOMAIN_MAX_PATHS 3
struct imx8mp_blk_ctrl_domain {
@@ -457,8 +457,8 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = {
},
[IMX8MP_HDMIBLK_PD_LCDIF] = {
.name = "hdmiblk-lcdif",
- .clk_names = (const char *[]){ "axi", "apb" },
- .num_clks = 2,
+ .clk_names = (const char *[]){ "axi", "apb", "fdcc" },
+ .num_clks = 3,
.gpc_name = "lcdif",
.path_names = (const char *[]){"lcdif-hdmi"},
.num_paths = 1,
@@ -483,8 +483,8 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = {
},
[IMX8MP_HDMIBLK_PD_HDMI_TX] = {
.name = "hdmiblk-hdmi-tx",
- .clk_names = (const char *[]){ "apb", "ref_266m" },
- .num_clks = 2,
+ .clk_names = (const char *[]){ "apb", "ref_266m", "fdcc" },
+ .num_clks = 3,
.gpc_name = "hdmi-tx",
},
[IMX8MP_HDMIBLK_PD_HDMI_TX_PHY] = {
@@ -687,11 +687,14 @@ static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
domain->power_dev =
dev_pm_domain_attach_by_name(dev, data->gpc_name);
- if (IS_ERR(domain->power_dev)) {
- dev_err_probe(dev, PTR_ERR(domain->power_dev),
+ if (IS_ERR_OR_NULL(domain->power_dev)) {
+ if (!domain->power_dev)
+ ret = -ENODEV;
+ else
+ ret = PTR_ERR(domain->power_dev);
+ dev_err_probe(dev, ret,
"failed to attach power domain %s\n",
data->gpc_name);
- ret = PTR_ERR(domain->power_dev);
goto cleanup_pds;
}
diff --git a/drivers/pmdomain/imx/scu-pd.c b/drivers/pmdomain/imx/scu-pd.c
index 891c1d925a9d..05841b0bf7f3 100644
--- a/drivers/pmdomain/imx/scu-pd.c
+++ b/drivers/pmdomain/imx/scu-pd.c
@@ -393,7 +393,7 @@ static int imx_sc_pd_power_off(struct generic_pm_domain *domain)
return imx_sc_pd_power(domain, false);
}
-static struct generic_pm_domain *imx_scu_pd_xlate(struct of_phandle_args *spec,
+static struct generic_pm_domain *imx_scu_pd_xlate(const struct of_phandle_args *spec,
void *data)
{
struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
diff --git a/drivers/pmdomain/mediatek/mtk-scpsys.c b/drivers/pmdomain/mediatek/mtk-scpsys.c
index b374d01fdac7..59a7a8c261ed 100644
--- a/drivers/pmdomain/mediatek/mtk-scpsys.c
+++ b/drivers/pmdomain/mediatek/mtk-scpsys.c
@@ -425,7 +425,6 @@ static struct scp *init_scp(struct platform_device *pdev,
bool bus_prot_reg_update)
{
struct genpd_onecell_data *pd_data;
- struct resource *res;
int i, j;
struct scp *scp;
struct clk *clk[CLK_MAX];
@@ -441,8 +440,7 @@ static struct scp *init_scp(struct platform_device *pdev,
scp->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- scp->base = devm_ioremap_resource(&pdev->dev, res);
+ scp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(scp->base))
return ERR_CAST(scp->base);
diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
index 3078896b1300..de9121ef4216 100644
--- a/drivers/pmdomain/qcom/rpmhpd.c
+++ b/drivers/pmdomain/qcom/rpmhpd.c
@@ -217,7 +217,6 @@ static struct rpmhpd *sa8540p_rpmhpds[] = {
[SC8280XP_CX] = &cx,
[SC8280XP_CX_AO] = &cx_ao,
[SC8280XP_EBI] = &ebi,
- [SC8280XP_GFX] = &gfx,
[SC8280XP_LCX] = &lcx,
[SC8280XP_LMX] = &lmx,
[SC8280XP_MMCX] = &mmcx,
@@ -692,6 +691,7 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
unsigned int active_corner, sleep_corner;
unsigned int this_active_corner = 0, this_sleep_corner = 0;
unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+ unsigned int peer_enabled_corner;
if (pd->state_synced) {
to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
@@ -701,9 +701,11 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
this_sleep_corner = pd->level_count - 1;
}
- if (peer && peer->enabled)
- to_active_sleep(peer, peer->corner, &peer_active_corner,
+ if (peer && peer->enabled) {
+ peer_enabled_corner = max(peer->corner, peer->enable_corner);
+ to_active_sleep(peer, peer_enabled_corner, &peer_active_corner,
&peer_sleep_corner);
+ }
active_corner = max(this_active_corner, peer_active_corner);
diff --git a/drivers/pmdomain/qcom/rpmpd.c b/drivers/pmdomain/qcom/rpmpd.c
index 7796d65f96e8..5e6280b4cf70 100644
--- a/drivers/pmdomain/qcom/rpmpd.c
+++ b/drivers/pmdomain/qcom/rpmpd.c
@@ -16,6 +16,8 @@
#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd)
+static struct qcom_smd_rpm *rpmpd_smd_rpm;
+
/* Resource types:
* RPMPD_X is X encoded as a little-endian, lower-case, ASCII string */
#define RPMPD_SMPA 0x61706d73
@@ -54,7 +56,6 @@ struct rpmpd {
bool enabled;
const int res_type;
const int res_id;
- struct qcom_smd_rpm *rpm;
unsigned int max_state;
__le32 key;
bool state_synced;
@@ -226,7 +227,46 @@ static struct rpmpd cx_s3a_vfl = {
.key = KEY_FLOOR_LEVEL,
};
+static struct rpmpd cx_s2b_corner_ao;
+static struct rpmpd cx_s2b_corner = {
+ .pd = { .name = "cx", },
+ .peer = &cx_s2b_corner_ao,
+ .res_type = RPMPD_SMPB,
+ .res_id = 2,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd cx_s2b_corner_ao = {
+ .pd = { .name = "cx_ao", },
+ .peer = &cx_s2b_corner,
+ .active_only = true,
+ .res_type = RPMPD_SMPB,
+ .res_id = 2,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd cx_s2b_vfc = {
+ .pd = { .name = "cx_vfc", },
+ .res_type = RPMPD_SMPB,
+ .res_id = 2,
+ .key = KEY_FLOOR_CORNER,
+};
+
/* G(F)X */
+static struct rpmpd gfx_s7a_corner = {
+ .pd = { .name = "gfx", },
+ .res_type = RPMPD_SMPA,
+ .res_id = 7,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd gfx_s7a_vfc = {
+ .pd = { .name = "gfx_vfc", },
+ .res_type = RPMPD_SMPA,
+ .res_id = 7,
+ .key = KEY_FLOOR_CORNER,
+};
+
static struct rpmpd gfx_s2b_corner = {
.pd = { .name = "gfx", },
.res_type = RPMPD_SMPB,
@@ -241,6 +281,20 @@ static struct rpmpd gfx_s2b_vfc = {
.key = KEY_FLOOR_CORNER,
};
+static struct rpmpd gfx_s4b_corner = {
+ .pd = { .name = "gfx", },
+ .res_type = RPMPD_SMPB,
+ .res_id = 4,
+ .key = KEY_CORNER,
+};
+
+static struct rpmpd gfx_s4b_vfc = {
+ .pd = { .name = "gfx_vfc", },
+ .res_type = RPMPD_SMPB,
+ .res_id = 4,
+ .key = KEY_FLOOR_CORNER,
+};
+
static struct rpmpd mx_rwmx0_lvl;
static struct rpmpd gx_rwgx0_lvl_ao;
static struct rpmpd gx_rwgx0_lvl = {
@@ -663,6 +717,34 @@ static const struct rpmpd_desc msm8953_desc = {
.max_state = RPM_SMD_LEVEL_TURBO,
};
+static struct rpmpd *msm8974_rpmpds[] = {
+ [MSM8974_VDDCX] = &cx_s2b_corner,
+ [MSM8974_VDDCX_AO] = &cx_s2b_corner_ao,
+ [MSM8974_VDDCX_VFC] = &cx_s2b_vfc,
+ [MSM8974_VDDGFX] = &gfx_s4b_corner,
+ [MSM8974_VDDGFX_VFC] = &gfx_s4b_vfc,
+};
+
+static const struct rpmpd_desc msm8974_desc = {
+ .rpmpds = msm8974_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8974_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
+static struct rpmpd *msm8974pro_pma8084_rpmpds[] = {
+ [MSM8974_VDDCX] = &cx_s2a_corner,
+ [MSM8974_VDDCX_AO] = &cx_s2a_corner_ao,
+ [MSM8974_VDDCX_VFC] = &cx_s2a_vfc,
+ [MSM8974_VDDGFX] = &gfx_s7a_corner,
+ [MSM8974_VDDGFX_VFC] = &gfx_s7a_vfc,
+};
+
+static const struct rpmpd_desc msm8974pro_pma8084_desc = {
+ .rpmpds = msm8974pro_pma8084_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8974pro_pma8084_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
static struct rpmpd *msm8976_rpmpds[] = {
[MSM8976_VDDCX] = &cx_s2a_lvl,
[MSM8976_VDDCX_AO] = &cx_s2a_lvl_ao,
@@ -856,6 +938,8 @@ static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,msm8917-rpmpd", .data = &msm8917_desc },
{ .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc },
{ .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc },
+ { .compatible = "qcom,msm8974-rpmpd", .data = &msm8974_desc },
+ { .compatible = "qcom,msm8974pro-pma8084-rpmpd", .data = &msm8974pro_pma8084_desc },
{ .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc },
{ .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc },
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
@@ -879,7 +963,7 @@ static int rpmpd_send_enable(struct rpmpd *pd, bool enable)
.value = cpu_to_le32(enable),
};
- return qcom_rpm_smd_write(pd->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ return qcom_rpm_smd_write(rpmpd_smd_rpm, QCOM_SMD_RPM_ACTIVE_STATE,
pd->res_type, pd->res_id, &req, sizeof(req));
}
@@ -891,7 +975,7 @@ static int rpmpd_send_corner(struct rpmpd *pd, int state, unsigned int corner)
.value = cpu_to_le32(corner),
};
- return qcom_rpm_smd_write(pd->rpm, state, pd->res_type, pd->res_id,
+ return qcom_rpm_smd_write(rpmpd_smd_rpm, state, pd->res_type, pd->res_id,
&req, sizeof(req));
};
@@ -1004,12 +1088,11 @@ static int rpmpd_probe(struct platform_device *pdev)
int i;
size_t num;
struct genpd_onecell_data *data;
- struct qcom_smd_rpm *rpm;
struct rpmpd **rpmpds;
const struct rpmpd_desc *desc;
- rpm = dev_get_drvdata(pdev->dev.parent);
- if (!rpm) {
+ rpmpd_smd_rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpmpd_smd_rpm) {
dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
return -ENODEV;
}
@@ -1039,7 +1122,6 @@ static int rpmpd_probe(struct platform_device *pdev)
continue;
}
- rpmpds[i]->rpm = rpm;
rpmpds[i]->max_state = desc->max_state;
rpmpds[i]->pd.power_off = rpmpd_power_off;
rpmpds[i]->pd.power_on = rpmpd_power_on;
diff --git a/drivers/pmdomain/renesas/Kconfig b/drivers/pmdomain/renesas/Kconfig
index 80bf2cf8b60e..54acb4b1ec7c 100644
--- a/drivers/pmdomain/renesas/Kconfig
+++ b/drivers/pmdomain/renesas/Kconfig
@@ -71,6 +71,10 @@ config SYSC_R8A779G0
bool "System Controller support for R-Car V4H" if COMPILE_TEST
select SYSC_RCAR_GEN4
+config SYSC_R8A779H0
+ bool "System Controller support for R-Car V4M" if COMPILE_TEST
+ select SYSC_RCAR_GEN4
+
config SYSC_RMOBILE
bool "System Controller support for R-Mobile" if COMPILE_TEST
diff --git a/drivers/pmdomain/renesas/Makefile b/drivers/pmdomain/renesas/Makefile
index e306e396fc8c..89180f19c23b 100644
--- a/drivers/pmdomain/renesas/Makefile
+++ b/drivers/pmdomain/renesas/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
obj-$(CONFIG_SYSC_R8A779A0) += r8a779a0-sysc.o
obj-$(CONFIG_SYSC_R8A779F0) += r8a779f0-sysc.o
obj-$(CONFIG_SYSC_R8A779G0) += r8a779g0-sysc.o
+obj-$(CONFIG_SYSC_R8A779H0) += r8a779h0-sysc.o
# Family
obj-$(CONFIG_SYSC_RCAR) += rcar-sysc.o
obj-$(CONFIG_SYSC_RCAR_GEN4) += rcar-gen4-sysc.o
diff --git a/drivers/pmdomain/renesas/r8a779a0-sysc.c b/drivers/pmdomain/renesas/r8a779a0-sysc.c
index 04f1bc322ae7..54cdf250f7c2 100644
--- a/drivers/pmdomain/renesas/r8a779a0-sysc.c
+++ b/drivers/pmdomain/renesas/r8a779a0-sysc.c
@@ -5,19 +5,7 @@
* Copyright (C) 2020 Renesas Electronics Corp.
*/
-#include <linux/bits.h>
-#include <linux/clk/renesas.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/of_address.h>
-#include <linux/pm_domain.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
#include <dt-bindings/power/r8a779a0-sysc.h>
diff --git a/drivers/pmdomain/renesas/r8a779f0-sysc.c b/drivers/pmdomain/renesas/r8a779f0-sysc.c
index 5602aa6bd7ed..6ed13cd1cb24 100644
--- a/drivers/pmdomain/renesas/r8a779f0-sysc.c
+++ b/drivers/pmdomain/renesas/r8a779f0-sysc.c
@@ -5,19 +5,7 @@
* Copyright (C) 2021 Renesas Electronics Corp.
*/
-#include <linux/bits.h>
-#include <linux/clk/renesas.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/of_address.h>
-#include <linux/pm_domain.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
#include <dt-bindings/power/r8a779f0-sysc.h>
diff --git a/drivers/pmdomain/renesas/r8a779g0-sysc.c b/drivers/pmdomain/renesas/r8a779g0-sysc.c
index b932eba1b804..249cf43af45b 100644
--- a/drivers/pmdomain/renesas/r8a779g0-sysc.c
+++ b/drivers/pmdomain/renesas/r8a779g0-sysc.c
@@ -5,19 +5,7 @@
* Copyright (C) 2022 Renesas Electronics Corp.
*/
-#include <linux/bits.h>
-#include <linux/clk/renesas.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/of_address.h>
-#include <linux/pm_domain.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
#include <dt-bindings/power/r8a779g0-sysc.h>
diff --git a/drivers/pmdomain/renesas/r8a779h0-sysc.c b/drivers/pmdomain/renesas/r8a779h0-sysc.c
new file mode 100644
index 000000000000..e13372cb80ee
--- /dev/null
+++ b/drivers/pmdomain/renesas/r8a779h0-sysc.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car V4M System Controller
+ *
+ * Copyright (C) 2023 Renesas Electronics Corp
+ */
+
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/renesas,r8a779h0-sysc.h>
+
+#include "rcar-gen4-sysc.h"
+
+static struct rcar_gen4_sysc_area r8a779h0_areas[] __initdata = {
+ { "always-on", R8A779H0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "c4", R8A779H0_PD_C4, R8A779H0_PD_ALWAYS_ON },
+ { "a2e0d0", R8A779H0_PD_A2E0D0, R8A779H0_PD_C4, PD_SCU },
+ { "a1e0d0c0", R8A779H0_PD_A1E0D0C0, R8A779H0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d0c1", R8A779H0_PD_A1E0D0C1, R8A779H0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d0c2", R8A779H0_PD_A1E0D0C2, R8A779H0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d0c3", R8A779H0_PD_A1E0D0C3, R8A779H0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a3cr0", R8A779H0_PD_A3CR0, R8A779H0_PD_ALWAYS_ON, PD_CPU_NOCR },
+ { "a3cr1", R8A779H0_PD_A3CR1, R8A779H0_PD_ALWAYS_ON, PD_CPU_NOCR },
+ { "a3cr2", R8A779H0_PD_A3CR2, R8A779H0_PD_ALWAYS_ON, PD_CPU_NOCR },
+ { "a33dga", R8A779H0_PD_A33DGA, R8A779H0_PD_C4 },
+ { "a23dgb", R8A779H0_PD_A23DGB, R8A779H0_PD_A33DGA },
+ { "a3vip0", R8A779H0_PD_A3VIP0, R8A779H0_PD_C4 },
+ { "a3vip2", R8A779H0_PD_A3VIP2, R8A779H0_PD_C4 },
+ { "a3dul", R8A779H0_PD_A3DUL, R8A779H0_PD_C4 },
+ { "a3isp0", R8A779H0_PD_A3ISP0, R8A779H0_PD_C4 },
+ { "a2cn0", R8A779H0_PD_A2CN0, R8A779H0_PD_C4 },
+ { "a1cn0", R8A779H0_PD_A1CN0, R8A779H0_PD_A2CN0 },
+ { "a1dsp0", R8A779H0_PD_A1DSP0, R8A779H0_PD_A2CN0 },
+ { "a1dsp1", R8A779H0_PD_A1DSP1, R8A779H0_PD_A2CN0 },
+ { "a2imp01", R8A779H0_PD_A2IMP01, R8A779H0_PD_C4 },
+ { "a2psc", R8A779H0_PD_A2PSC, R8A779H0_PD_C4 },
+ { "a2dma", R8A779H0_PD_A2DMA, R8A779H0_PD_C4 },
+ { "a2cv0", R8A779H0_PD_A2CV0, R8A779H0_PD_C4 },
+ { "a2cv1", R8A779H0_PD_A2CV1, R8A779H0_PD_C4 },
+ { "a2cv2", R8A779H0_PD_A2CV2, R8A779H0_PD_C4 },
+ { "a2cv3", R8A779H0_PD_A2CV3, R8A779H0_PD_C4 },
+ { "a3imr0", R8A779H0_PD_A3IMR0, R8A779H0_PD_C4 },
+ { "a3imr1", R8A779H0_PD_A3IMR1, R8A779H0_PD_C4 },
+ { "a3imr2", R8A779H0_PD_A3IMR2, R8A779H0_PD_C4 },
+ { "a3imr3", R8A779H0_PD_A3IMR3, R8A779H0_PD_C4 },
+ { "a3vc", R8A779H0_PD_A3VC, R8A779H0_PD_C4 },
+ { "a3pci", R8A779H0_PD_A3PCI, R8A779H0_PD_C4 },
+ { "a2pciphy", R8A779H0_PD_A2PCIPHY, R8A779H0_PD_A3PCI },
+};
+
+const struct rcar_gen4_sysc_info r8a779h0_sysc_info __initconst = {
+ .areas = r8a779h0_areas,
+ .num_areas = ARRAY_SIZE(r8a779h0_areas),
+};
diff --git a/drivers/pmdomain/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
index 9e5e6e077abc..66409cff2083 100644
--- a/drivers/pmdomain/renesas/rcar-gen4-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
@@ -50,13 +50,13 @@
#define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */
#define SYSCSR_TIMEOUT 10000
-#define SYSCSR_DELAY_US 10
+#define SYSCSR_DELAY_US 1
-#define PDRESR_RETRIES 1000
-#define PDRESR_DELAY_US 10
+#define PDRESR_RETRIES 10000
+#define PDRESR_DELAY_US 1
-#define SYSCISR_TIMEOUT 10000
-#define SYSCISR_DELAY_US 10
+#define SYSCISCR_TIMEOUT 10000
+#define SYSCISCR_DELAY_US 1
#define RCAR_GEN4_PD_ALWAYS_ON 64
#define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32)
@@ -97,7 +97,7 @@ static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask)
ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx),
val, !(val & isr_mask),
- SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
+ SYSCISCR_DELAY_US, SYSCISCR_TIMEOUT);
if (ret < 0) {
pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__);
return -EIO;
@@ -157,7 +157,7 @@ static int rcar_gen4_sysc_power(u8 pdr, bool on)
/* Wait until the power shutoff or resume request has completed * */
ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx),
val, (val & isr_mask),
- SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
+ SYSCISCR_DELAY_US, SYSCISCR_TIMEOUT);
if (ret < 0) {
ret = -EIO;
goto out;
@@ -285,6 +285,9 @@ static const struct of_device_id rcar_gen4_sysc_matches[] __initconst = {
#ifdef CONFIG_SYSC_R8A779G0
{ .compatible = "renesas,r8a779g0-sysc", .data = &r8a779g0_sysc_info },
#endif
+#ifdef CONFIG_SYSC_R8A779H0
+ { .compatible = "renesas,r8a779h0-sysc", .data = &r8a779h0_sysc_info },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/pmdomain/renesas/rcar-gen4-sysc.h b/drivers/pmdomain/renesas/rcar-gen4-sysc.h
index 388cfa8f8f9f..fdf843aa5113 100644
--- a/drivers/pmdomain/renesas/rcar-gen4-sysc.h
+++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.h
@@ -40,5 +40,6 @@ struct rcar_gen4_sysc_info {
extern const struct rcar_gen4_sysc_info r8a779a0_sysc_info;
extern const struct rcar_gen4_sysc_info r8a779f0_sysc_info;
extern const struct rcar_gen4_sysc_info r8a779g0_sysc_info;
+extern const struct rcar_gen4_sysc_info r8a779h0_sysc_info;
#endif /* __SOC_RENESAS_RCAR_GEN4_SYSC_H__ */
diff --git a/drivers/pmdomain/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c
index eed47696e825..35d9aa0dfab8 100644
--- a/drivers/pmdomain/renesas/rcar-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-sysc.c
@@ -45,10 +45,10 @@
#define PWRER_OFFS 0x14 /* Power Shutoff/Resume Error */
-#define SYSCSR_TIMEOUT 100
+#define SYSCSR_TIMEOUT 1000
#define SYSCSR_DELAY_US 1
-#define PWRER_RETRIES 100
+#define PWRER_RETRIES 1000
#define PWRER_DELAY_US 1
#define SYSCISR_TIMEOUT 1000
diff --git a/drivers/pmdomain/tegra/powergate-bpmp.c b/drivers/pmdomain/tegra/powergate-bpmp.c
index 179ed895c279..b0138ca9f851 100644
--- a/drivers/pmdomain/tegra/powergate-bpmp.c
+++ b/drivers/pmdomain/tegra/powergate-bpmp.c
@@ -305,7 +305,7 @@ static void tegra_bpmp_remove_powergates(struct tegra_bpmp *bpmp)
}
static struct generic_pm_domain *
-tegra_powergate_xlate(struct of_phandle_args *spec, void *data)
+tegra_powergate_xlate(const struct of_phandle_args *spec, void *data)
{
struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
struct genpd_onecell_data *genpd = data;
diff --git a/drivers/pmdomain/ti/omap_prm.c b/drivers/pmdomain/ti/omap_prm.c
index c2feae3a634c..b8ceb3c2b81c 100644
--- a/drivers/pmdomain/ti/omap_prm.c
+++ b/drivers/pmdomain/ti/omap_prm.c
@@ -695,6 +695,8 @@ static int omap_prm_domain_init(struct device *dev, struct omap_prm *prm)
data = prm->data;
name = devm_kasprintf(dev, GFP_KERNEL, "prm_%s",
data->name);
+ if (!name)
+ return -ENOMEM;
prmd->dev = dev;
prmd->prm = prm;
diff --git a/drivers/pmdomain/ti/ti_sci_pm_domains.c b/drivers/pmdomain/ti/ti_sci_pm_domains.c
index c091d569ecd5..9dddf227a3a6 100644
--- a/drivers/pmdomain/ti/ti_sci_pm_domains.c
+++ b/drivers/pmdomain/ti/ti_sci_pm_domains.c
@@ -85,7 +85,7 @@ static int ti_sci_pd_power_on(struct generic_pm_domain *domain)
* @data: genpd core data for all the powerdomains on the device
*/
static struct generic_pm_domain *ti_sci_pd_xlate(
- struct of_phandle_args *genpdspec,
+ const struct of_phandle_args *genpdspec,
void *data)
{
struct genpd_onecell_data *genpd_data = data;
diff --git a/drivers/pmdomain/xilinx/zynqmp-pm-domains.c b/drivers/pmdomain/xilinx/zynqmp-pm-domains.c
index 6fd514286d82..0b5831e5ba1b 100644
--- a/drivers/pmdomain/xilinx/zynqmp-pm-domains.c
+++ b/drivers/pmdomain/xilinx/zynqmp-pm-domains.c
@@ -210,7 +210,7 @@ static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
}
static struct generic_pm_domain *zynqmp_gpd_xlate
- (struct of_phandle_args *genpdspec, void *data)
+ (const struct of_phandle_args *genpdspec, void *data)
{
struct genpd_onecell_data *genpd_data = data;
unsigned int i, idx = genpdspec->args[0];
diff --git a/drivers/power/reset/as3722-poweroff.c b/drivers/power/reset/as3722-poweroff.c
index ab3350ce2d62..bb26fa6fa67c 100644
--- a/drivers/power/reset/as3722-poweroff.c
+++ b/drivers/power/reset/as3722-poweroff.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
struct as3722_poweroff {
@@ -18,22 +19,18 @@ struct as3722_poweroff {
struct as3722 *as3722;
};
-static struct as3722_poweroff *as3722_pm_poweroff;
-
-static void as3722_pm_power_off(void)
+static int as3722_pm_power_off(struct sys_off_data *data)
{
+ struct as3722_poweroff *as3722_pm_poweroff = data->cb_data;
int ret;
- if (!as3722_pm_poweroff) {
- pr_err("AS3722 poweroff is not initialised\n");
- return;
- }
-
ret = as3722_update_bits(as3722_pm_poweroff->as3722,
AS3722_RESET_CONTROL_REG, AS3722_POWER_OFF, AS3722_POWER_OFF);
if (ret < 0)
dev_err(as3722_pm_poweroff->dev,
"RESET_CONTROL_REG update failed, %d\n", ret);
+
+ return NOTIFY_DONE;
}
static int as3722_poweroff_probe(struct platform_device *pdev)
@@ -54,18 +51,14 @@ static int as3722_poweroff_probe(struct platform_device *pdev)
as3722_poweroff->as3722 = dev_get_drvdata(pdev->dev.parent);
as3722_poweroff->dev = &pdev->dev;
- as3722_pm_poweroff = as3722_poweroff;
- if (!pm_power_off)
- pm_power_off = as3722_pm_power_off;
- return 0;
-}
+ return devm_register_sys_off_handler(as3722_poweroff->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ as3722_pm_power_off,
+ as3722_poweroff);
-static void as3722_poweroff_remove(struct platform_device *pdev)
-{
- if (pm_power_off == as3722_pm_power_off)
- pm_power_off = NULL;
- as3722_pm_poweroff = NULL;
+ return 0;
}
static struct platform_driver as3722_poweroff_driver = {
@@ -73,7 +66,6 @@ static struct platform_driver as3722_poweroff_driver = {
.name = "as3722-power-off",
},
.probe = as3722_poweroff_probe,
- .remove_new = as3722_poweroff_remove,
};
module_platform_driver(as3722_poweroff_driver);
diff --git a/drivers/power/reset/atc260x-poweroff.c b/drivers/power/reset/atc260x-poweroff.c
index b4aa50e9685e..e3e4621ccb1d 100644
--- a/drivers/power/reset/atc260x-poweroff.c
+++ b/drivers/power/reset/atc260x-poweroff.c
@@ -16,13 +16,9 @@
struct atc260x_pwrc {
struct device *dev;
struct regmap *regmap;
- struct notifier_block restart_nb;
int (*do_poweroff)(const struct atc260x_pwrc *pwrc, bool restart);
};
-/* Global variable needed only for pm_power_off */
-static struct atc260x_pwrc *atc260x_pwrc_data;
-
static int atc2603c_do_poweroff(const struct atc260x_pwrc *pwrc, bool restart)
{
int ret, deep_sleep = 0;
@@ -165,18 +161,20 @@ static int atc2609a_init(const struct atc260x_pwrc *pwrc)
return ret;
}
-static void atc260x_pwrc_pm_handler(void)
+static int atc260x_pwrc_pm_handler(struct sys_off_data *data)
{
- atc260x_pwrc_data->do_poweroff(atc260x_pwrc_data, false);
+ struct atc260x_pwrc *pwrc = data->cb_data;
+
+ pwrc->do_poweroff(pwrc, false);
WARN_ONCE(1, "Unable to power off system\n");
+
+ return NOTIFY_DONE;
}
-static int atc260x_pwrc_restart_handler(struct notifier_block *nb,
- unsigned long mode, void *cmd)
+static int atc260x_pwrc_restart_handler(struct sys_off_data *data)
{
- struct atc260x_pwrc *pwrc = container_of(nb, struct atc260x_pwrc,
- restart_nb);
+ struct atc260x_pwrc *pwrc = data->cb_data;
pwrc->do_poweroff(pwrc, true);
return NOTIFY_DONE;
@@ -194,8 +192,6 @@ static int atc260x_pwrc_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->regmap = atc260x->regmap;
- priv->restart_nb.notifier_call = atc260x_pwrc_restart_handler;
- priv->restart_nb.priority = 192;
switch (atc260x->ic_type) {
case ATC2603C:
@@ -216,16 +212,20 @@ static int atc260x_pwrc_probe(struct platform_device *pdev)
if (ret)
return ret;
- platform_set_drvdata(pdev, priv);
-
- if (!pm_power_off) {
- atc260x_pwrc_data = priv;
- pm_power_off = atc260x_pwrc_pm_handler;
- } else {
- dev_warn(priv->dev, "Poweroff callback already assigned\n");
- }
+ ret = devm_register_sys_off_handler(priv->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ atc260x_pwrc_pm_handler,
+ priv);
+ if (ret)
+ dev_err(priv->dev, "failed to register power-off handler: %d\n",
+ ret);
- ret = register_restart_handler(&priv->restart_nb);
+ ret = devm_register_sys_off_handler(priv->dev,
+ SYS_OFF_MODE_RESTART,
+ SYS_OFF_PRIO_HIGH,
+ atc260x_pwrc_restart_handler,
+ priv);
if (ret)
dev_err(priv->dev, "failed to register restart handler: %d\n",
ret);
@@ -233,21 +233,8 @@ static int atc260x_pwrc_probe(struct platform_device *pdev)
return ret;
}
-static void atc260x_pwrc_remove(struct platform_device *pdev)
-{
- struct atc260x_pwrc *priv = platform_get_drvdata(pdev);
-
- if (atc260x_pwrc_data == priv) {
- pm_power_off = NULL;
- atc260x_pwrc_data = NULL;
- }
-
- unregister_restart_handler(&priv->restart_nb);
-}
-
static struct platform_driver atc260x_pwrc_driver = {
.probe = atc260x_pwrc_probe,
- .remove_new = atc260x_pwrc_remove,
.driver = {
.name = "atc260x-pwrc",
},
diff --git a/drivers/power/reset/axxia-reset.c b/drivers/power/reset/axxia-reset.c
index 24946766760c..797bf6773860 100644
--- a/drivers/power/reset/axxia-reset.c
+++ b/drivers/power/reset/axxia-reset.c
@@ -26,11 +26,10 @@
#define SC_EFUSE_INT_STATUS 0x180c
#define EFUSE_READ_DONE (1<<31)
-static struct regmap *syscon;
-
-static int axxia_restart_handler(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int axxia_restart_handler(struct sys_off_data *data)
{
+ struct regmap *syscon = data->cb_data;
+
/* Access Key (0xab) */
regmap_write(syscon, SC_CRIT_WRITE_KEY, 0xab);
/* Select internal boot from 0xffff0000 */
@@ -44,14 +43,10 @@ static int axxia_restart_handler(struct notifier_block *this,
return NOTIFY_DONE;
}
-static struct notifier_block axxia_restart_nb = {
- .notifier_call = axxia_restart_handler,
- .priority = 128,
-};
-
static int axxia_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct regmap *syscon;
int err;
syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
@@ -60,7 +55,8 @@ static int axxia_reset_probe(struct platform_device *pdev)
return PTR_ERR(syscon);
}
- err = register_restart_handler(&axxia_restart_nb);
+ err = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART,
+ 128, axxia_restart_handler, syscon);
if (err)
dev_err(dev, "cannot register restart handler (err=%d)\n", err);
diff --git a/drivers/power/reset/brcm-kona-reset.c b/drivers/power/reset/brcm-kona-reset.c
index d05728b1db09..ee3f1bb97653 100644
--- a/drivers/power/reset/brcm-kona-reset.c
+++ b/drivers/power/reset/brcm-kona-reset.c
@@ -15,8 +15,7 @@
static void __iomem *kona_reset_base;
-static int kona_reset_handler(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int kona_reset_handler(struct sys_off_data *data)
{
/*
* A soft reset is triggered by writing a 0 to bit 0 of the soft reset
@@ -31,18 +30,14 @@ static int kona_reset_handler(struct notifier_block *this,
return NOTIFY_DONE;
}
-static struct notifier_block kona_reset_nb = {
- .notifier_call = kona_reset_handler,
- .priority = 128,
-};
-
static int kona_reset_probe(struct platform_device *pdev)
{
kona_reset_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kona_reset_base))
return PTR_ERR(kona_reset_base);
- return register_restart_handler(&kona_reset_nb);
+ return devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART,
+ 128, kona_reset_handler, NULL);
}
static const struct of_device_id of_match[] = {
diff --git a/drivers/power/reset/gemini-poweroff.c b/drivers/power/reset/gemini-poweroff.c
index d309b610142c..06d6992dec89 100644
--- a/drivers/power/reset/gemini-poweroff.c
+++ b/drivers/power/reset/gemini-poweroff.c
@@ -70,12 +70,9 @@ static irqreturn_t gemini_powerbutton_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-/* This callback needs this static local as it has void as argument */
-static struct gemini_powercon *gpw_poweroff;
-
-static void gemini_poweroff(void)
+static int gemini_poweroff(struct sys_off_data *data)
{
- struct gemini_powercon *gpw = gpw_poweroff;
+ struct gemini_powercon *gpw = data->cb_data;
u32 val;
dev_crit(gpw->dev, "Gemini power off\n");
@@ -86,6 +83,8 @@ static void gemini_poweroff(void)
val &= ~GEMINI_CTRL_ENABLE;
val |= GEMINI_CTRL_SHUTDOWN;
writel(val, gpw->base + GEMINI_PWC_CTRLREG);
+
+ return NOTIFY_DONE;
}
static int gemini_poweroff_probe(struct platform_device *pdev)
@@ -148,8 +147,11 @@ static int gemini_poweroff_probe(struct platform_device *pdev)
if (ret)
return ret;
- pm_power_off = gemini_poweroff;
- gpw_poweroff = gpw;
+ ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ gemini_poweroff, gpw);
+ if (ret)
+ return ret;
dev_info(dev, "Gemini poweroff driver registered\n");
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index d96d248a6e25..c7eb6dc8e90a 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -14,8 +14,8 @@
#include <linux/pm.h>
static void __iomem *msm_ps_hold;
-static int deassert_pshold(struct notifier_block *nb, unsigned long action,
- void *data)
+
+static int do_msm_poweroff(struct sys_off_data *data)
{
writel(0, msm_ps_hold);
mdelay(10000);
@@ -23,25 +23,18 @@ static int deassert_pshold(struct notifier_block *nb, unsigned long action,
return NOTIFY_DONE;
}
-static struct notifier_block restart_nb = {
- .notifier_call = deassert_pshold,
- .priority = 128,
-};
-
-static void do_msm_poweroff(void)
-{
- deassert_pshold(&restart_nb, 0, NULL);
-}
-
static int msm_restart_probe(struct platform_device *pdev)
{
msm_ps_hold = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(msm_ps_hold))
return PTR_ERR(msm_ps_hold);
- register_restart_handler(&restart_nb);
+ devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART,
+ 128, do_msm_poweroff, NULL);
- pm_power_off = do_msm_poweroff;
+ devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT, do_msm_poweroff,
+ NULL);
return 0;
}
diff --git a/drivers/power/reset/mt6323-poweroff.c b/drivers/power/reset/mt6323-poweroff.c
index 57a63c0ab7fb..c663347547f9 100644
--- a/drivers/power/reset/mt6323-poweroff.c
+++ b/drivers/power/reset/mt6323-poweroff.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6397/rtc.h>
+#include <linux/reboot.h>
struct mt6323_pwrc {
struct device *dev;
@@ -21,11 +22,9 @@ struct mt6323_pwrc {
u32 base;
};
-static struct mt6323_pwrc *mt_pwrc;
-
-static void mt6323_do_pwroff(void)
+static int mt6323_do_pwroff(struct sys_off_data *data)
{
- struct mt6323_pwrc *pwrc = mt_pwrc;
+ struct mt6323_pwrc *pwrc = data->cb_data;
unsigned int val;
int ret;
@@ -44,6 +43,8 @@ static void mt6323_do_pwroff(void)
mdelay(1000);
WARN_ONCE(1, "Unable to power off system\n");
+
+ return NOTIFY_DONE;
}
static int mt6323_pwrc_probe(struct platform_device *pdev)
@@ -51,6 +52,7 @@ static int mt6323_pwrc_probe(struct platform_device *pdev)
struct mt6397_chip *mt6397_chip = dev_get_drvdata(pdev->dev.parent);
struct mt6323_pwrc *pwrc;
struct resource *res;
+ int ret;
pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
if (!pwrc)
@@ -63,19 +65,18 @@ static int mt6323_pwrc_probe(struct platform_device *pdev)
pwrc->base = res->start;
pwrc->regmap = mt6397_chip->regmap;
pwrc->dev = &pdev->dev;
- mt_pwrc = pwrc;
- pm_power_off = &mt6323_do_pwroff;
+ ret = devm_register_sys_off_handler(pwrc->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ mt6323_do_pwroff,
+ pwrc);
+ if (ret)
+ return dev_err_probe(pwrc->dev, ret, "failed to register power-off handler\n");
return 0;
}
-static void mt6323_pwrc_remove(struct platform_device *pdev)
-{
- if (pm_power_off == &mt6323_do_pwroff)
- pm_power_off = NULL;
-}
-
static const struct of_device_id mt6323_pwrc_dt_match[] = {
{ .compatible = "mediatek,mt6323-pwrc" },
{},
@@ -84,7 +85,6 @@ MODULE_DEVICE_TABLE(of, mt6323_pwrc_dt_match);
static struct platform_driver mt6323_pwrc_driver = {
.probe = mt6323_pwrc_probe,
- .remove_new = mt6323_pwrc_remove,
.driver = {
.name = "mt6323-pwrc",
.of_match_table = mt6323_pwrc_dt_match,
diff --git a/drivers/power/reset/regulator-poweroff.c b/drivers/power/reset/regulator-poweroff.c
index 15160809c423..fed4978e3858 100644
--- a/drivers/power/reset/regulator-poweroff.c
+++ b/drivers/power/reset/regulator-poweroff.c
@@ -13,18 +13,15 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/reboot.h>
#include <linux/regulator/consumer.h>
#define TIMEOUT_MS 3000
-/*
- * Hold configuration here, cannot be more than one instance of the driver
- * since pm_power_off itself is global.
- */
-static struct regulator *cpu_regulator;
-
-static void regulator_poweroff_do_poweroff(void)
+static int regulator_poweroff_do_poweroff(struct sys_off_data *data)
{
+ struct regulator *cpu_regulator = data->cb_data;
+
if (cpu_regulator && regulator_is_enabled(cpu_regulator))
regulator_force_disable(cpu_regulator);
@@ -32,30 +29,24 @@ static void regulator_poweroff_do_poweroff(void)
mdelay(TIMEOUT_MS);
WARN_ON(1);
+
+ return NOTIFY_DONE;
}
static int regulator_poweroff_probe(struct platform_device *pdev)
{
- /* If a pm_power_off function has already been added, leave it alone */
- if (pm_power_off != NULL) {
- dev_err(&pdev->dev,
- "%s: pm_power_off function already registered\n",
- __func__);
- return -EBUSY;
- }
+ struct regulator *cpu_regulator;
cpu_regulator = devm_regulator_get(&pdev->dev, "cpu");
if (IS_ERR(cpu_regulator))
return PTR_ERR(cpu_regulator);
- pm_power_off = &regulator_poweroff_do_poweroff;
- return 0;
-}
-
-static void regulator_poweroff_remove(struct platform_device *pdev)
-{
- if (pm_power_off == &regulator_poweroff_do_poweroff)
- pm_power_off = NULL;
+ /* Set this handler to low priority to not override an existing handler */
+ return devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_LOW,
+ regulator_poweroff_do_poweroff,
+ cpu_regulator);
}
static const struct of_device_id of_regulator_poweroff_match[] = {
@@ -66,7 +57,6 @@ MODULE_DEVICE_TABLE(of, of_regulator_poweroff_match);
static struct platform_driver regulator_poweroff_driver = {
.probe = regulator_poweroff_probe,
- .remove_new = regulator_poweroff_remove,
.driver = {
.name = "poweroff-regulator",
.of_match_table = of_regulator_poweroff_match,
diff --git a/drivers/power/reset/restart-poweroff.c b/drivers/power/reset/restart-poweroff.c
index f4d6004793d3..fcd588f9ae9d 100644
--- a/drivers/power/reset/restart-poweroff.c
+++ b/drivers/power/reset/restart-poweroff.c
@@ -14,29 +14,21 @@
#include <linux/module.h>
#include <linux/reboot.h>
-static void restart_poweroff_do_poweroff(void)
+static int restart_poweroff_do_poweroff(struct sys_off_data *data)
{
reboot_mode = REBOOT_HARD;
machine_restart(NULL);
+ return NOTIFY_DONE;
}
static int restart_poweroff_probe(struct platform_device *pdev)
{
- /* If a pm_power_off function has already been added, leave it alone */
- if (pm_power_off != NULL) {
- dev_err(&pdev->dev,
- "pm_power_off function already registered");
- return -EBUSY;
- }
-
- pm_power_off = &restart_poweroff_do_poweroff;
- return 0;
-}
-
-static void restart_poweroff_remove(struct platform_device *pdev)
-{
- if (pm_power_off == &restart_poweroff_do_poweroff)
- pm_power_off = NULL;
+ /* Set this handler to low priority to not override an existing handler */
+ return devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_LOW,
+ restart_poweroff_do_poweroff,
+ NULL);
}
static const struct of_device_id of_restart_poweroff_match[] = {
@@ -47,7 +39,6 @@ MODULE_DEVICE_TABLE(of, of_restart_poweroff_match);
static struct platform_driver restart_poweroff_driver = {
.probe = restart_poweroff_probe,
- .remove_new = restart_poweroff_remove,
.driver = {
.name = "poweroff-restart",
.of_match_table = of_restart_poweroff_match,
diff --git a/drivers/power/reset/rmobile-reset.c b/drivers/power/reset/rmobile-reset.c
index 5df9b41c68c7..7dbc51c32b0e 100644
--- a/drivers/power/reset/rmobile-reset.c
+++ b/drivers/power/reset/rmobile-reset.c
@@ -19,12 +19,9 @@
/* Reset Control Register 2 */
#define RESCNT2_PRES 0x80000000 /* Soft power-on reset */
-static void __iomem *sysc_base2;
-
-static int rmobile_reset_handler(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int rmobile_reset_handler(struct sys_off_data *data)
{
- pr_debug("%s %lu\n", __func__, mode);
+ void __iomem *sysc_base2 = (void __iomem *)data->cb_data;
/* Let's assume we have acquired the HPB semaphore */
writel(RESCNT2_PRES, sysc_base2 + RESCNT2);
@@ -32,37 +29,27 @@ static int rmobile_reset_handler(struct notifier_block *this,
return NOTIFY_DONE;
}
-static struct notifier_block rmobile_reset_nb = {
- .notifier_call = rmobile_reset_handler,
- .priority = 192,
-};
-
static int rmobile_reset_probe(struct platform_device *pdev)
{
+ void __iomem *sysc_base2;
int error;
- sysc_base2 = of_iomap(pdev->dev.of_node, 1);
- if (!sysc_base2)
- return -ENODEV;
+ sysc_base2 = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(sysc_base2))
+ return PTR_ERR(sysc_base2);
- error = register_restart_handler(&rmobile_reset_nb);
+ error = devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_RESTART,
+ SYS_OFF_PRIO_HIGH,
+ rmobile_reset_handler,
+ (__force void *)sysc_base2);
if (error) {
dev_err(&pdev->dev,
"cannot register restart handler (err=%d)\n", error);
- goto fail_unmap;
+ return error;
}
return 0;
-
-fail_unmap:
- iounmap(sysc_base2);
- return error;
-}
-
-static void rmobile_reset_remove(struct platform_device *pdev)
-{
- unregister_restart_handler(&rmobile_reset_nb);
- iounmap(sysc_base2);
}
static const struct of_device_id rmobile_reset_of_match[] = {
@@ -73,7 +60,6 @@ MODULE_DEVICE_TABLE(of, rmobile_reset_of_match);
static struct platform_driver rmobile_reset_driver = {
.probe = rmobile_reset_probe,
- .remove_new = rmobile_reset_remove,
.driver = {
.name = "rmobile_reset",
.of_match_table = rmobile_reset_of_match,
diff --git a/drivers/power/reset/syscon-poweroff.c b/drivers/power/reset/syscon-poweroff.c
index 1b2ce7734260..203936f4c544 100644
--- a/drivers/power/reset/syscon-poweroff.c
+++ b/drivers/power/reset/syscon-poweroff.c
@@ -13,44 +13,56 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
-static struct regmap *map;
-static u32 offset;
-static u32 value;
-static u32 mask;
+struct syscon_poweroff_data {
+ struct regmap *map;
+ u32 offset;
+ u32 value;
+ u32 mask;
+};
-static void syscon_poweroff(void)
+static int syscon_poweroff(struct sys_off_data *off_data)
{
+ struct syscon_poweroff_data *data = off_data->cb_data;
+
/* Issue the poweroff */
- regmap_update_bits(map, offset, mask, value);
+ regmap_update_bits(data->map, data->offset, data->mask, data->value);
mdelay(1000);
pr_emerg("Unable to poweroff system\n");
+
+ return NOTIFY_DONE;
}
static int syscon_poweroff_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct syscon_poweroff_data *data;
int mask_err, value_err;
- map = syscon_regmap_lookup_by_phandle(dev->of_node, "regmap");
- if (IS_ERR(map)) {
- map = syscon_node_to_regmap(dev->parent->of_node);
- if (IS_ERR(map)) {
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->map = syscon_regmap_lookup_by_phandle(dev->of_node, "regmap");
+ if (IS_ERR(data->map)) {
+ data->map = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(data->map)) {
dev_err(dev, "unable to get syscon");
- return PTR_ERR(map);
+ return PTR_ERR(data->map);
}
}
- if (of_property_read_u32(dev->of_node, "offset", &offset)) {
+ if (of_property_read_u32(dev->of_node, "offset", &data->offset)) {
dev_err(dev, "unable to read 'offset'");
return -EINVAL;
}
- value_err = of_property_read_u32(dev->of_node, "value", &value);
- mask_err = of_property_read_u32(dev->of_node, "mask", &mask);
+ value_err = of_property_read_u32(dev->of_node, "value", &data->value);
+ mask_err = of_property_read_u32(dev->of_node, "mask", &data->mask);
if (value_err && mask_err) {
dev_err(dev, "unable to read 'value' and 'mask'");
return -EINVAL;
@@ -58,28 +70,17 @@ static int syscon_poweroff_probe(struct platform_device *pdev)
if (value_err) {
/* support old binding */
- value = mask;
- mask = 0xFFFFFFFF;
+ data->value = data->mask;
+ data->mask = 0xFFFFFFFF;
} else if (mask_err) {
/* support value without mask*/
- mask = 0xFFFFFFFF;
- }
-
- if (pm_power_off) {
- dev_err(dev, "pm_power_off already claimed for %ps",
- pm_power_off);
- return -EBUSY;
+ data->mask = 0xFFFFFFFF;
}
- pm_power_off = syscon_poweroff;
-
- return 0;
-}
-
-static void syscon_poweroff_remove(struct platform_device *pdev)
-{
- if (pm_power_off == syscon_poweroff)
- pm_power_off = NULL;
+ return devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ syscon_poweroff, data);
}
static const struct of_device_id syscon_poweroff_of_match[] = {
@@ -89,7 +90,6 @@ static const struct of_device_id syscon_poweroff_of_match[] = {
static struct platform_driver syscon_poweroff_driver = {
.probe = syscon_poweroff_probe,
- .remove_new = syscon_poweroff_remove,
.driver = {
.name = "syscon-poweroff",
.of_match_table = syscon_poweroff_of_match,
diff --git a/drivers/power/reset/tps65086-restart.c b/drivers/power/reset/tps65086-restart.c
index ee8e9f4b837e..6976dbcac74f 100644
--- a/drivers/power/reset/tps65086-restart.c
+++ b/drivers/power/reset/tps65086-restart.c
@@ -9,22 +9,14 @@
#include <linux/platform_device.h>
#include <linux/reboot.h>
-struct tps65086_restart {
- struct notifier_block handler;
- struct device *dev;
-};
-
-static int tps65086_restart_notify(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int tps65086_restart_notify(struct sys_off_data *data)
{
- struct tps65086_restart *tps65086_restart =
- container_of(this, struct tps65086_restart, handler);
- struct tps65086 *tps65086 = dev_get_drvdata(tps65086_restart->dev->parent);
+ struct tps65086 *tps65086 = data->cb_data;
int ret;
ret = regmap_write(tps65086->regmap, TPS65086_FORCESHUTDN, 1);
if (ret) {
- dev_err(tps65086_restart->dev, "%s: error writing to tps65086 pmic: %d\n",
+ dev_err(tps65086->dev, "%s: error writing to tps65086 pmic: %d\n",
__func__, ret);
return NOTIFY_DONE;
}
@@ -39,44 +31,13 @@ static int tps65086_restart_notify(struct notifier_block *this,
static int tps65086_restart_probe(struct platform_device *pdev)
{
- struct tps65086_restart *tps65086_restart;
- int ret;
-
- tps65086_restart = devm_kzalloc(&pdev->dev, sizeof(*tps65086_restart), GFP_KERNEL);
- if (!tps65086_restart)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, tps65086_restart);
-
- tps65086_restart->handler.notifier_call = tps65086_restart_notify;
- tps65086_restart->handler.priority = 192;
- tps65086_restart->dev = &pdev->dev;
-
- ret = register_restart_handler(&tps65086_restart->handler);
- if (ret) {
- dev_err(&pdev->dev, "%s: cannot register restart handler: %d\n",
- __func__, ret);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void tps65086_restart_remove(struct platform_device *pdev)
-{
- struct tps65086_restart *tps65086_restart = platform_get_drvdata(pdev);
- int ret;
+ struct tps65086 *tps65086 = dev_get_drvdata(pdev->dev.parent);
- ret = unregister_restart_handler(&tps65086_restart->handler);
- if (ret) {
- /*
- * tps65086_restart_probe() registered the restart handler. So
- * unregistering should work fine. Checking the error code
- * shouldn't be needed, still doing it for completeness.
- */
- dev_err(&pdev->dev, "%s: cannot unregister restart handler: %d\n",
- __func__, ret);
- }
+ return devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_RESTART,
+ SYS_OFF_PRIO_HIGH,
+ tps65086_restart_notify,
+ tps65086);
}
static const struct platform_device_id tps65086_restart_id_table[] = {
@@ -90,7 +51,6 @@ static struct platform_driver tps65086_restart_driver = {
.name = "tps65086-restart",
},
.probe = tps65086_restart_probe,
- .remove_new = tps65086_restart_remove,
.id_table = tps65086_restart_id_table,
};
module_platform_driver(tps65086_restart_driver);
diff --git a/drivers/power/reset/xgene-reboot.c b/drivers/power/reset/xgene-reboot.c
index c2e5a99940d3..b5eee19bac42 100644
--- a/drivers/power/reset/xgene-reboot.c
+++ b/drivers/power/reset/xgene-reboot.c
@@ -22,17 +22,13 @@
struct xgene_reboot_context {
struct device *dev;
- void *csr;
+ void __iomem *csr;
u32 mask;
- struct notifier_block restart_handler;
};
-static int xgene_restart_handler(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int xgene_restart_handler(struct sys_off_data *data)
{
- struct xgene_reboot_context *ctx =
- container_of(this, struct xgene_reboot_context,
- restart_handler);
+ struct xgene_reboot_context *ctx = data->cb_data;
/* Issue the reboot */
writel(ctx->mask, ctx->csr);
@@ -54,23 +50,20 @@ static int xgene_reboot_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->csr = of_iomap(dev->of_node, 0);
- if (!ctx->csr) {
+ ctx->csr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->csr)) {
dev_err(dev, "can not map resource\n");
- return -ENODEV;
+ return PTR_ERR(ctx->csr);
}
if (of_property_read_u32(dev->of_node, "mask", &ctx->mask))
ctx->mask = 0xFFFFFFFF;
ctx->dev = dev;
- ctx->restart_handler.notifier_call = xgene_restart_handler;
- ctx->restart_handler.priority = 128;
- err = register_restart_handler(&ctx->restart_handler);
- if (err) {
- iounmap(ctx->csr);
+ err = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART, 128,
+ xgene_restart_handler, ctx);
+ if (err)
dev_err(dev, "cannot register restart handler (err=%d)\n", err);
- }
return err;
}
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index f21cb05815ec..3e31375491d5 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -978,6 +978,7 @@ config CHARGER_QCOM_SMB2
config FUEL_GAUGE_MM8013
tristate "Mitsumi MM8013 fuel gauge driver"
depends on I2C
+ select REGMAP_I2C
help
Say Y here to enable the Mitsumi MM8013 fuel gauge driver.
It enables the monitoring of many battery parameters, including
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 7905eba93dea..56f136b2d071 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -617,8 +617,7 @@ static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
*/
static void ab8500_btemp_external_power_changed(struct power_supply *psy)
{
- class_for_each_device(power_supply_class, NULL, psy,
- ab8500_btemp_get_ext_psy_data);
+ power_supply_for_each_device(psy, ab8500_btemp_get_ext_psy_data);
}
/* ab8500 btemp driver interrupts and their respective isr */
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index de912658facb..55ab7a28056e 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -1231,8 +1231,7 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
int ret;
/* Collect data from all power_supply class devices */
- class_for_each_device(power_supply_class, NULL,
- di->chargalg_psy, ab8500_chargalg_get_ext_psy_data);
+ power_supply_for_each_device(di->chargalg_psy, ab8500_chargalg_get_ext_psy_data);
ab8500_chargalg_end_of_charge(di);
ab8500_chargalg_check_temp(di);
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index d72f32c663bc..9b34d1a60f66 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -1949,8 +1949,7 @@ static void ab8500_charger_check_vbat_work(struct work_struct *work)
struct ab8500_charger *di = container_of(work,
struct ab8500_charger, check_vbat_work.work);
- class_for_each_device(power_supply_class, NULL,
- &di->usb_chg, ab8500_charger_get_ext_psy_data);
+ power_supply_for_each_device(&di->usb_chg, ab8500_charger_get_ext_psy_data);
/* First run old_vbat is 0. */
if (di->old_vbat == 0)
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 8c593fbdd45a..2ccaf6116c09 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2407,8 +2407,7 @@ out:
*/
static void ab8500_fg_external_power_changed(struct power_supply *psy)
{
- class_for_each_device(power_supply_class, NULL, psy,
- ab8500_fg_get_ext_psy_data);
+ power_supply_for_each_device(psy, ab8500_fg_get_ext_psy_data);
}
/**
diff --git a/drivers/power/supply/apm_power.c b/drivers/power/supply/apm_power.c
index 9d1a7fbcaed4..8ef1b6f1f787 100644
--- a/drivers/power/supply/apm_power.c
+++ b/drivers/power/supply/apm_power.c
@@ -79,8 +79,7 @@ static void find_main_battery(void)
main_battery = NULL;
bp.main = main_battery;
- error = class_for_each_device(power_supply_class, NULL, &bp,
- __find_main_battery);
+ error = power_supply_for_each_device(&bp, __find_main_battery);
if (error) {
main_battery = bp.main;
return;
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index e23308ad4cc7..dae7e5cfc54e 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -50,20 +50,24 @@ struct axp_data {
const char * const *irq_names;
unsigned int num_irq_names;
const int *curr_lim_table;
+ int curr_lim_table_size;
struct reg_field curr_lim_fld;
struct reg_field vbus_valid_bit;
struct reg_field vbus_mon_bit;
struct reg_field usb_bc_en_bit;
+ struct reg_field usb_bc_det_fld;
struct reg_field vbus_disable_bit;
bool vbus_needs_polling: 1;
};
struct axp20x_usb_power {
+ struct device *dev;
struct regmap *regmap;
struct regmap_field *curr_lim_fld;
struct regmap_field *vbus_valid_bit;
struct regmap_field *vbus_mon_bit;
struct regmap_field *usb_bc_en_bit;
+ struct regmap_field *usb_bc_det_fld;
struct regmap_field *vbus_disable_bit;
struct power_supply *supply;
const struct axp_data *axp_data;
@@ -115,6 +119,15 @@ static void axp20x_usb_power_poll_vbus(struct work_struct *work)
if (val != power->old_status)
power_supply_changed(power->supply);
+ if (power->usb_bc_en_bit && (val & AXP20X_PWR_STATUS_VBUS_PRESENT) !=
+ (power->old_status & AXP20X_PWR_STATUS_VBUS_PRESENT)) {
+ dev_dbg(power->dev, "Cable status changed, re-enabling USB BC");
+ ret = regmap_field_write(power->usb_bc_en_bit, 1);
+ if (ret)
+ dev_err(power->dev, "failed to enable USB BC: errno %d",
+ ret);
+ }
+
power->old_status = val;
power->online = val & AXP20X_PWR_STATUS_VBUS_USED;
@@ -123,6 +136,37 @@ out:
mod_delayed_work(system_power_efficient_wq, &power->vbus_detect, DEBOUNCE_TIME);
}
+static int axp20x_get_usb_type(struct axp20x_usb_power *power,
+ union power_supply_propval *val)
+{
+ unsigned int reg;
+ int ret;
+
+ if (!power->usb_bc_det_fld)
+ return -EINVAL;
+
+ ret = regmap_field_read(power->usb_bc_det_fld, &reg);
+ if (ret)
+ return ret;
+
+ switch (reg) {
+ case 1:
+ val->intval = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case 2:
+ val->intval = POWER_SUPPLY_USB_TYPE_CDP;
+ break;
+ case 3:
+ val->intval = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
static int axp20x_usb_power_get_property(struct power_supply *psy,
enum power_supply_property psp, union power_supply_propval *val)
{
@@ -160,12 +204,16 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
val->intval = ret * 1700; /* 1 step = 1.7 mV */
return 0;
- case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
ret = regmap_field_read(power->curr_lim_fld, &v);
if (ret)
return ret;
- val->intval = power->axp_data->curr_lim_table[v];
+ if (v < power->axp_data->curr_lim_table_size)
+ val->intval = power->axp_data->curr_lim_table[v];
+ else
+ val->intval = power->axp_data->curr_lim_table[
+ power->axp_data->curr_lim_table_size - 1];
return 0;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (IS_ENABLED(CONFIG_AXP20X_ADC)) {
@@ -189,6 +237,9 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
val->intval = ret * 375; /* 1 step = 0.375 mA */
return 0;
+
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return axp20x_get_usb_type(power, val);
default:
break;
}
@@ -256,19 +307,37 @@ static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
return -EINVAL;
}
-static int axp20x_usb_power_set_current_max(struct axp20x_usb_power *power, int intval)
+static int axp20x_usb_power_set_input_current_limit(struct axp20x_usb_power *power,
+ int intval)
{
- const unsigned int max = GENMASK(power->axp_data->curr_lim_fld.msb,
- power->axp_data->curr_lim_fld.lsb);
+ int ret;
+ unsigned int reg;
+ const unsigned int max = power->axp_data->curr_lim_table_size;
if (intval == -1)
return -EINVAL;
- for (unsigned int i = 0; i <= max; ++i)
- if (power->axp_data->curr_lim_table[i] == intval)
- return regmap_field_write(power->curr_lim_fld, i);
+ /*
+ * BC1.2 detection can cause a race condition if we try to set a current
+ * limit while it's in progress. When it finishes it will overwrite the
+ * current limit we just set.
+ */
+ if (power->usb_bc_en_bit) {
+ dev_dbg(power->dev,
+ "disabling BC1.2 detection because current limit was set");
+ ret = regmap_field_write(power->usb_bc_en_bit, 0);
+ if (ret)
+ return ret;
+ }
+
+ for (reg = max - 1; reg > 0; reg--)
+ if (power->axp_data->curr_lim_table[reg] <= intval)
+ break;
+
+ dev_dbg(power->dev, "setting input current limit reg to %d (%d uA), requested %d uA",
+ reg, power->axp_data->curr_lim_table[reg], intval);
- return -EINVAL;
+ return regmap_field_write(power->curr_lim_fld, reg);
}
static int axp20x_usb_power_set_property(struct power_supply *psy,
@@ -287,8 +356,8 @@ static int axp20x_usb_power_set_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
return axp20x_usb_power_set_voltage_min(power, val->intval);
- case POWER_SUPPLY_PROP_CURRENT_MAX:
- return axp20x_usb_power_set_current_max(power, val->intval);
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return axp20x_usb_power_set_input_current_limit(power, val->intval);
default:
return -EINVAL;
@@ -313,7 +382,7 @@ static int axp20x_usb_power_prop_writeable(struct power_supply *psy,
return power->vbus_disable_bit != NULL;
return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
- psp == POWER_SUPPLY_PROP_CURRENT_MAX;
+ psp == POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT;
}
static enum power_supply_property axp20x_usb_power_properties[] = {
@@ -322,7 +391,7 @@ static enum power_supply_property axp20x_usb_power_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_MIN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
POWER_SUPPLY_PROP_CURRENT_NOW,
};
@@ -331,7 +400,23 @@ static enum power_supply_property axp22x_usb_power_properties[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_MIN,
- POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+};
+
+static enum power_supply_property axp813_usb_power_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_USB_TYPE,
+};
+
+static enum power_supply_usb_type axp813_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
};
static const struct power_supply_desc axp20x_usb_power_desc = {
@@ -354,6 +439,18 @@ static const struct power_supply_desc axp22x_usb_power_desc = {
.set_property = axp20x_usb_power_set_property,
};
+static const struct power_supply_desc axp813_usb_power_desc = {
+ .name = "axp20x-usb",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = axp813_usb_power_properties,
+ .num_properties = ARRAY_SIZE(axp813_usb_power_properties),
+ .property_is_writeable = axp20x_usb_power_prop_writeable,
+ .get_property = axp20x_usb_power_get_property,
+ .set_property = axp20x_usb_power_set_property,
+ .usb_types = axp813_usb_types,
+ .num_usb_types = ARRAY_SIZE(axp813_usb_types),
+};
+
static const char * const axp20x_irq_names[] = {
"VBUS_PLUGIN",
"VBUS_REMOVAL",
@@ -388,10 +485,15 @@ static int axp221_usb_curr_lim_table[] = {
};
static int axp813_usb_curr_lim_table[] = {
+ 100000,
+ 500000,
900000,
1500000,
2000000,
2500000,
+ 3000000,
+ 3500000,
+ 4000000,
};
static const struct axp_data axp192_data = {
@@ -399,6 +501,7 @@ static const struct axp_data axp192_data = {
.irq_names = axp20x_irq_names,
.num_irq_names = ARRAY_SIZE(axp20x_irq_names),
.curr_lim_table = axp192_usb_curr_lim_table,
+ .curr_lim_table_size = ARRAY_SIZE(axp192_usb_curr_lim_table),
.curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
.vbus_valid_bit = REG_FIELD(AXP192_USB_OTG_STATUS, 2, 2),
.vbus_mon_bit = REG_FIELD(AXP20X_VBUS_MON, 3, 3),
@@ -409,6 +512,7 @@ static const struct axp_data axp202_data = {
.irq_names = axp20x_irq_names,
.num_irq_names = ARRAY_SIZE(axp20x_irq_names),
.curr_lim_table = axp20x_usb_curr_lim_table,
+ .curr_lim_table_size = ARRAY_SIZE(axp20x_usb_curr_lim_table),
.curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
.vbus_valid_bit = REG_FIELD(AXP20X_USB_OTG_STATUS, 2, 2),
.vbus_mon_bit = REG_FIELD(AXP20X_VBUS_MON, 3, 3),
@@ -419,6 +523,7 @@ static const struct axp_data axp221_data = {
.irq_names = axp22x_irq_names,
.num_irq_names = ARRAY_SIZE(axp22x_irq_names),
.curr_lim_table = axp221_usb_curr_lim_table,
+ .curr_lim_table_size = ARRAY_SIZE(axp221_usb_curr_lim_table),
.curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
.vbus_needs_polling = true,
};
@@ -428,17 +533,20 @@ static const struct axp_data axp223_data = {
.irq_names = axp22x_irq_names,
.num_irq_names = ARRAY_SIZE(axp22x_irq_names),
.curr_lim_table = axp20x_usb_curr_lim_table,
+ .curr_lim_table_size = ARRAY_SIZE(axp20x_usb_curr_lim_table),
.curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
.vbus_needs_polling = true,
};
static const struct axp_data axp813_data = {
- .power_desc = &axp22x_usb_power_desc,
+ .power_desc = &axp813_usb_power_desc,
.irq_names = axp22x_irq_names,
.num_irq_names = ARRAY_SIZE(axp22x_irq_names),
.curr_lim_table = axp813_usb_curr_lim_table,
- .curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
+ .curr_lim_table_size = ARRAY_SIZE(axp813_usb_curr_lim_table),
+ .curr_lim_fld = REG_FIELD(AXP22X_CHRG_CTRL3, 4, 7),
.usb_bc_en_bit = REG_FIELD(AXP288_BC_GLOBAL, 0, 0),
+ .usb_bc_det_fld = REG_FIELD(AXP288_BC_DET_STAT, 5, 7),
.vbus_disable_bit = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 7, 7),
.vbus_needs_polling = true,
};
@@ -558,6 +666,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, power);
+ power->dev = &pdev->dev;
power->axp_data = axp_data;
power->regmap = axp20x->regmap;
power->num_irqs = axp_data->num_irq_names;
@@ -586,6 +695,12 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
return ret;
ret = axp20x_regmap_field_alloc_optional(&pdev->dev, power->regmap,
+ axp_data->usb_bc_det_fld,
+ &power->usb_bc_det_fld);
+ if (ret)
+ return ret;
+
+ ret = axp20x_regmap_field_alloc_optional(&pdev->dev, power->regmap,
axp_data->vbus_disable_bit,
&power->vbus_disable_bit);
if (ret)
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 3be6f3b10ea4..95d9a35243c2 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -550,18 +550,20 @@ static const struct dmi_system_id axp288_quirks[] = {
.driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
{
- /* Intel Cherry Trail Compute Stick, Windows version */
+ /* Intel Bay Trail Compute Stick */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
- DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"),
+ /* Partial match for STCK1A32WFC STCK1A32FC, STCK1A8LFC variants */
+ DMI_MATCH(DMI_PRODUCT_NAME, "STCK1A"),
},
.driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
{
- /* Intel Cherry Trail Compute Stick, version without an OS */
+ /* Intel Cherry Trail Compute Stick */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
- DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"),
+ /* Partial match for STK1AW32SC and STK1A32SC variants */
+ DMI_MATCH(DMI_PRODUCT_NAME, "STK1A"),
},
.driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
},
@@ -600,6 +602,14 @@ static const struct dmi_system_id axp288_quirks[] = {
.driver_data = NULL,
},
{
+ /* Radxa ROCK Pi X Single Board Computer */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "ROCK Pi X"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Radxa"),
+ },
+ .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
+ },
+ {
/*
* Various Ace PC/Meegopad/MinisForum/Wintel Mini-PCs/HDMI-sticks
* This entry must be last because it is generic, this allows
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 6a4798a62588..25e28dac900d 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -991,6 +991,7 @@ static enum power_supply_property bq2415x_power_supply_props[] = {
/* TODO: maybe add more power supply properties */
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_ONLINE,
};
static int bq2415x_power_supply_get_property(struct power_supply *psy,
@@ -1017,6 +1018,15 @@ static int bq2415x_power_supply_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = bq->model;
break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ /* VBUS is present for all charging and fault states,
+ * except the 'Ready' state.
+ */
+ ret = bq2415x_exec_command(bq, BQ2415X_CHARGE_STATUS);
+ if (ret < 0)
+ return ret;
+ val->intval = ret > 0;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 1c4a9d137744..abca56834468 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1595,17 +1595,24 @@ static inline int bq27xxx_battery_read_fcc(struct bq27xxx_device_info *di)
* Return the Design Capacity in µAh
* Or < 0 if something fails.
*/
-static int bq27xxx_battery_read_dcap(struct bq27xxx_device_info *di)
+static int bq27xxx_battery_read_dcap(struct bq27xxx_device_info *di,
+ union power_supply_propval *val)
{
int dcap;
+ /* We only have to read charge design full once */
+ if (di->charge_design_full > 0) {
+ val->intval = di->charge_design_full;
+ return 0;
+ }
+
if (di->opts & BQ27XXX_O_ZERO)
dcap = bq27xxx_read(di, BQ27XXX_REG_DCAP, true);
else
dcap = bq27xxx_read(di, BQ27XXX_REG_DCAP, false);
if (dcap < 0) {
- dev_dbg(di->dev, "error reading initial last measured discharge\n");
+ dev_dbg(di->dev, "error reading design capacity\n");
return dcap;
}
@@ -1614,7 +1621,12 @@ static int bq27xxx_battery_read_dcap(struct bq27xxx_device_info *di)
else
dcap *= 1000;
- return dcap;
+ /* Save for later reads */
+ di->charge_design_full = dcap;
+
+ val->intval = dcap;
+
+ return 0;
}
/*
@@ -1816,17 +1828,14 @@ static int bq27xxx_battery_current_and_status(
val_curr->intval = curr;
if (val_status) {
- if (curr > 0) {
+ if (bq27xxx_battery_is_full(di, flags))
+ val_status->intval = POWER_SUPPLY_STATUS_FULL;
+ else if (curr > 0)
val_status->intval = POWER_SUPPLY_STATUS_CHARGING;
- } else if (curr < 0) {
+ else if (curr < 0)
val_status->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- } else {
- if (bq27xxx_battery_is_full(di, flags))
- val_status->intval = POWER_SUPPLY_STATUS_FULL;
- else
- val_status->intval =
- POWER_SUPPLY_STATUS_NOT_CHARGING;
- }
+ else
+ val_status->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
}
return 0;
@@ -1865,10 +1874,6 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
*/
if (!(di->opts & BQ27XXX_O_ZERO))
bq27xxx_battery_current_and_status(di, NULL, &status, &cache);
-
- /* We only have to read charge design full once */
- if (di->charge_design_full <= 0)
- di->charge_design_full = bq27xxx_battery_read_dcap(di);
}
if ((di->cache.capacity != cache.capacity) ||
@@ -2062,7 +2067,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
ret = bq27xxx_simple_value(di->cache.charge_full, val);
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- ret = bq27xxx_simple_value(di->charge_design_full, val);
+ ret = bq27xxx_battery_read_dcap(di, val);
break;
/*
* TODO: Implement these to make registers set from
@@ -2101,6 +2106,13 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
mod_delayed_work(system_wq, &di->work, HZ / 2);
}
+static void bq27xxx_battery_mutex_destroy(void *data)
+{
+ struct mutex *lock = data;
+
+ mutex_destroy(lock);
+}
+
int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
{
struct power_supply_desc *psy_desc;
@@ -2108,9 +2120,14 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
.of_node = di->dev->of_node,
.drv_data = di,
};
+ int ret;
INIT_DELAYED_WORK(&di->work, bq27xxx_battery_poll);
mutex_init(&di->lock);
+ ret = devm_add_action_or_reset(di->dev, bq27xxx_battery_mutex_destroy,
+ &di->lock);
+ if (ret)
+ return ret;
di->regs = bq27xxx_chip_data[di->chip].regs;
di->unseal_key = bq27xxx_chip_data[di->chip].unseal_key;
@@ -2128,7 +2145,7 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
psy_desc->get_property = bq27xxx_battery_get_property;
psy_desc->external_power_changed = bq27xxx_external_power_changed;
- di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg);
+ di->bat = devm_power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg);
if (IS_ERR(di->bat))
return dev_err_probe(di->dev, PTR_ERR(di->bat),
"failed to register battery\n");
@@ -2156,9 +2173,6 @@ void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
mutex_unlock(&di->lock);
cancel_delayed_work_sync(&di->work);
-
- power_supply_unregister(di->bat);
- mutex_destroy(&di->lock);
}
EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 3a1798b0c1a7..c1737f964840 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -13,8 +13,7 @@
#include <linux/power/bq27xxx_battery.h>
-static DEFINE_IDR(battery_id);
-static DEFINE_MUTEX(battery_mutex);
+static DEFINE_IDA(battery_id);
static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data)
{
@@ -136,30 +135,39 @@ static int bq27xxx_battery_i2c_bulk_write(struct bq27xxx_device_info *di,
return 0;
}
+static void bq27xxx_battery_i2c_devm_ida_free(void *data)
+{
+ int num = (long)data;
+
+ ida_free(&battery_id, num);
+}
+
static int bq27xxx_battery_i2c_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct bq27xxx_device_info *di;
int ret;
char *name;
- int num;
+ long num;
/* Get new ID for the new battery device */
- mutex_lock(&battery_mutex);
- num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
- mutex_unlock(&battery_mutex);
+ num = ida_alloc(&battery_id, GFP_KERNEL);
if (num < 0)
return num;
+ ret = devm_add_action_or_reset(&client->dev,
+ bq27xxx_battery_i2c_devm_ida_free,
+ (void *)num);
+ if (ret)
+ return ret;
- name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
+ name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%ld", id->name, num);
if (!name)
- goto err_mem;
+ return -ENOMEM;
di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
if (!di)
- goto err_mem;
+ return -ENOMEM;
- di->id = num;
di->dev = &client->dev;
di->chip = id->driver_data;
di->name = name;
@@ -171,7 +179,7 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client)
ret = bq27xxx_battery_setup(di);
if (ret)
- goto err_failed;
+ return ret;
/* Schedule a polling after about 1 min */
schedule_delayed_work(&di->work, 60 * HZ);
@@ -188,33 +196,21 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client)
"Unable to register IRQ %d error %d\n",
client->irq, ret);
bq27xxx_battery_teardown(di);
- goto err_failed;
+ return ret;
}
}
return 0;
-
-err_mem:
- ret = -ENOMEM;
-
-err_failed:
- mutex_lock(&battery_mutex);
- idr_remove(&battery_id, num);
- mutex_unlock(&battery_mutex);
-
- return ret;
}
static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
{
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
- free_irq(client->irq, di);
- bq27xxx_battery_teardown(di);
+ if (client->irq)
+ free_irq(client->irq, di);
- mutex_lock(&battery_mutex);
- idr_remove(&battery_id, di->id);
- mutex_unlock(&battery_mutex);
+ bq27xxx_battery_teardown(di);
}
static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
diff --git a/drivers/power/supply/da9030_battery.c b/drivers/power/supply/da9030_battery.c
index 581cf956d2d2..04e0f4162d42 100644
--- a/drivers/power/supply/da9030_battery.c
+++ b/drivers/power/supply/da9030_battery.c
@@ -530,8 +530,9 @@ static int da9030_battery_probe(struct platform_device *pdev)
da9030_battery_setup_psy(charger);
psy_cfg.drv_data = charger;
- charger->psy = power_supply_register(&pdev->dev, &charger->psy_desc,
- &psy_cfg);
+ charger->psy = devm_power_supply_register(&pdev->dev,
+ &charger->psy_desc,
+ &psy_cfg);
if (IS_ERR(charger->psy)) {
ret = PTR_ERR(charger->psy);
goto err_ps_register;
@@ -563,7 +564,6 @@ static void da9030_battery_remove(struct platform_device *dev)
DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT);
cancel_delayed_work_sync(&charger->work);
da9030_set_charge(charger, 0);
- power_supply_unregister(charger->psy);
}
static struct platform_driver da903x_battery_driver = {
diff --git a/drivers/power/supply/da9052-battery.c b/drivers/power/supply/da9052-battery.c
index 6f7c58a41e91..0d84c42c624e 100644
--- a/drivers/power/supply/da9052-battery.c
+++ b/drivers/power/supply/da9052-battery.c
@@ -622,7 +622,7 @@ static s32 da9052_bat_probe(struct platform_device *pdev)
}
}
- bat->psy = power_supply_register(&pdev->dev, &psy_desc, &psy_cfg);
+ bat->psy = devm_power_supply_register(&pdev->dev, &psy_desc, &psy_cfg);
if (IS_ERR(bat->psy)) {
ret = PTR_ERR(bat->psy);
goto err;
@@ -644,8 +644,6 @@ static void da9052_bat_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++)
da9052_free_irq(bat->da9052, da9052_bat_irq_bits[i], bat);
-
- power_supply_unregister(bat->psy);
}
static struct platform_driver da9052_bat_driver = {
diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c
index 37db9e4ed7f3..b13cecd84f58 100644
--- a/drivers/power/supply/da9150-charger.c
+++ b/drivers/power/supply/da9150-charger.c
@@ -521,42 +521,30 @@ static int da9150_charger_probe(struct platform_device *pdev)
charger->dev = dev;
/* Acquire ADC channels */
- charger->ibus_chan = iio_channel_get(dev, "CHAN_IBUS");
- if (IS_ERR(charger->ibus_chan)) {
- ret = PTR_ERR(charger->ibus_chan);
- goto ibus_chan_fail;
- }
+ charger->ibus_chan = devm_iio_channel_get(dev, "CHAN_IBUS");
+ if (IS_ERR(charger->ibus_chan))
+ return PTR_ERR(charger->ibus_chan);
- charger->vbus_chan = iio_channel_get(dev, "CHAN_VBUS");
- if (IS_ERR(charger->vbus_chan)) {
- ret = PTR_ERR(charger->vbus_chan);
- goto vbus_chan_fail;
- }
+ charger->vbus_chan = devm_iio_channel_get(dev, "CHAN_VBUS");
+ if (IS_ERR(charger->vbus_chan))
+ return PTR_ERR(charger->vbus_chan);
- charger->tjunc_chan = iio_channel_get(dev, "CHAN_TJUNC");
- if (IS_ERR(charger->tjunc_chan)) {
- ret = PTR_ERR(charger->tjunc_chan);
- goto tjunc_chan_fail;
- }
+ charger->tjunc_chan = devm_iio_channel_get(dev, "CHAN_TJUNC");
+ if (IS_ERR(charger->tjunc_chan))
+ return PTR_ERR(charger->tjunc_chan);
- charger->vbat_chan = iio_channel_get(dev, "CHAN_VBAT");
- if (IS_ERR(charger->vbat_chan)) {
- ret = PTR_ERR(charger->vbat_chan);
- goto vbat_chan_fail;
- }
+ charger->vbat_chan = devm_iio_channel_get(dev, "CHAN_VBAT");
+ if (IS_ERR(charger->vbat_chan))
+ return PTR_ERR(charger->vbat_chan);
/* Register power supplies */
- charger->usb = power_supply_register(dev, &usb_desc, NULL);
- if (IS_ERR(charger->usb)) {
- ret = PTR_ERR(charger->usb);
- goto usb_fail;
- }
+ charger->usb = devm_power_supply_register(dev, &usb_desc, NULL);
+ if (IS_ERR(charger->usb))
+ return PTR_ERR(charger->usb);
- charger->battery = power_supply_register(dev, &battery_desc, NULL);
- if (IS_ERR(charger->battery)) {
- ret = PTR_ERR(charger->battery);
- goto battery_fail;
- }
+ charger->battery = devm_power_supply_register(dev, &battery_desc, NULL);
+ if (IS_ERR(charger->battery))
+ return PTR_ERR(charger->battery);
/* Get initial online supply */
reg = da9150_reg_read(da9150, DA9150_STATUS_H);
@@ -616,22 +604,7 @@ tjunc_irq_fail:
chg_irq_fail:
if (!IS_ERR_OR_NULL(charger->usb_phy))
usb_unregister_notifier(charger->usb_phy, &charger->otg_nb);
-battery_fail:
- power_supply_unregister(charger->usb);
-usb_fail:
- iio_channel_release(charger->vbat_chan);
-
-vbat_chan_fail:
- iio_channel_release(charger->tjunc_chan);
-
-tjunc_chan_fail:
- iio_channel_release(charger->vbus_chan);
-
-vbus_chan_fail:
- iio_channel_release(charger->ibus_chan);
-
-ibus_chan_fail:
return ret;
}
@@ -656,15 +629,6 @@ static void da9150_charger_remove(struct platform_device *pdev)
if (!IS_ERR_OR_NULL(charger->usb_phy))
usb_unregister_notifier(charger->usb_phy, &charger->otg_nb);
cancel_work_sync(&charger->otg_work);
-
- power_supply_unregister(charger->battery);
- power_supply_unregister(charger->usb);
-
- /* Release ADC channels */
- iio_channel_release(charger->ibus_chan);
- iio_channel_release(charger->vbus_chan);
- iio_channel_release(charger->tjunc_chan);
- iio_channel_release(charger->vbat_chan);
}
static struct platform_driver da9150_charger_driver = {
diff --git a/drivers/power/supply/ds2760_battery.c b/drivers/power/supply/ds2760_battery.c
index 40fba31be174..7cf4ea06b500 100644
--- a/drivers/power/supply/ds2760_battery.c
+++ b/drivers/power/supply/ds2760_battery.c
@@ -739,7 +739,7 @@ static int w1_ds2760_add_slave(struct w1_slave *sl)
if (current_accum)
ds2760_battery_set_current_accum(di, current_accum);
- di->bat = power_supply_register(dev, &di->bat_desc, &psy_cfg);
+ di->bat = devm_power_supply_register(dev, &di->bat_desc, &psy_cfg);
if (IS_ERR(di->bat)) {
dev_err(di->dev, "failed to register battery\n");
retval = PTR_ERR(di->bat);
@@ -762,7 +762,6 @@ static int w1_ds2760_add_slave(struct w1_slave *sl)
goto success;
workqueue_failed:
- power_supply_unregister(di->bat);
batt_failed:
di_alloc_failed:
success:
@@ -777,7 +776,6 @@ static void w1_ds2760_remove_slave(struct w1_slave *sl)
cancel_delayed_work_sync(&di->monitor_work);
cancel_delayed_work_sync(&di->set_charged_work);
destroy_workqueue(di->monitor_wqueue);
- power_supply_unregister(di->bat);
}
#ifdef CONFIG_OF
diff --git a/drivers/power/supply/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c
index 8bb645ad1e5d..479195e35d73 100644
--- a/drivers/power/supply/goldfish_battery.c
+++ b/drivers/power/supply/goldfish_battery.c
@@ -232,31 +232,22 @@ static int goldfish_battery_probe(struct platform_device *pdev)
psy_cfg.drv_data = data;
- data->ac = power_supply_register(&pdev->dev, &ac_desc, &psy_cfg);
+ data->ac = devm_power_supply_register(&pdev->dev,
+ &ac_desc,
+ &psy_cfg);
if (IS_ERR(data->ac))
return PTR_ERR(data->ac);
- data->battery = power_supply_register(&pdev->dev, &battery_desc,
- &psy_cfg);
- if (IS_ERR(data->battery)) {
- power_supply_unregister(data->ac);
+ data->battery = devm_power_supply_register(&pdev->dev,
+ &battery_desc,
+ &psy_cfg);
+ if (IS_ERR(data->battery))
return PTR_ERR(data->battery);
- }
-
- platform_set_drvdata(pdev, data);
GOLDFISH_BATTERY_WRITE(data, BATTERY_INT_ENABLE, BATTERY_INT_MASK);
return 0;
}
-static void goldfish_battery_remove(struct platform_device *pdev)
-{
- struct goldfish_battery_data *data = platform_get_drvdata(pdev);
-
- power_supply_unregister(data->battery);
- power_supply_unregister(data->ac);
-}
-
static const struct of_device_id goldfish_battery_of_match[] = {
{ .compatible = "google,goldfish-battery", },
{},
@@ -273,7 +264,6 @@ MODULE_DEVICE_TABLE(acpi, goldfish_battery_acpi_match);
static struct platform_driver goldfish_battery_device = {
.probe = goldfish_battery_probe,
- .remove_new = goldfish_battery_remove,
.driver = {
.name = "goldfish-battery",
.of_match_table = goldfish_battery_of_match,
diff --git a/drivers/power/supply/lp8727_charger.c b/drivers/power/supply/lp8727_charger.c
index 0875391f7ac6..34548a4da90b 100644
--- a/drivers/power/supply/lp8727_charger.c
+++ b/drivers/power/supply/lp8727_charger.c
@@ -453,39 +453,20 @@ static int lp8727_register_psy(struct lp8727_chg *pchg)
psy_cfg.supplied_to = battery_supplied_to;
psy_cfg.num_supplicants = ARRAY_SIZE(battery_supplied_to);
- psy->ac = power_supply_register(pchg->dev, &lp8727_ac_desc, &psy_cfg);
+ psy->ac = devm_power_supply_register(pchg->dev, &lp8727_ac_desc, &psy_cfg);
if (IS_ERR(psy->ac))
- goto err_psy_ac;
+ return -EPERM;
- psy->usb = power_supply_register(pchg->dev, &lp8727_usb_desc,
- &psy_cfg);
+ psy->usb = devm_power_supply_register(pchg->dev, &lp8727_usb_desc,
+ &psy_cfg);
if (IS_ERR(psy->usb))
- goto err_psy_usb;
+ return -EPERM;
- psy->batt = power_supply_register(pchg->dev, &lp8727_batt_desc, NULL);
+ psy->batt = devm_power_supply_register(pchg->dev, &lp8727_batt_desc, NULL);
if (IS_ERR(psy->batt))
- goto err_psy_batt;
+ return -EPERM;
return 0;
-
-err_psy_batt:
- power_supply_unregister(psy->usb);
-err_psy_usb:
- power_supply_unregister(psy->ac);
-err_psy_ac:
- return -EPERM;
-}
-
-static void lp8727_unregister_psy(struct lp8727_chg *pchg)
-{
- struct lp8727_psy *psy = pchg->psy;
-
- if (!psy)
- return;
-
- power_supply_unregister(psy->ac);
- power_supply_unregister(psy->usb);
- power_supply_unregister(psy->batt);
}
#ifdef CONFIG_OF
@@ -583,7 +564,6 @@ static int lp8727_probe(struct i2c_client *cl)
ret = lp8727_setup_irq(pchg);
if (ret) {
dev_err(pchg->dev, "irq handler err: %d", ret);
- lp8727_unregister_psy(pchg);
return ret;
}
@@ -595,7 +575,6 @@ static void lp8727_remove(struct i2c_client *cl)
struct lp8727_chg *pchg = i2c_get_clientdata(cl);
lp8727_release_irq(pchg);
- lp8727_unregister_psy(pchg);
}
static const struct of_device_id lp8727_dt_ids[] __maybe_unused = {
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 2c81be82a41a..72b170b4ac46 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -406,12 +406,6 @@ static const struct power_supply_desc lp8788_psy_battery_desc = {
.get_property = lp8788_battery_get_property,
};
-static void lp8788_psy_unregister(struct lp8788_charger *pchg)
-{
- power_supply_unregister(pchg->battery);
- power_supply_unregister(pchg->charger);
-}
-
static void lp8788_charger_event(struct work_struct *work)
{
struct lp8788_charger *pchg =
@@ -666,18 +660,16 @@ static int lp8788_psy_register(struct platform_device *pdev,
charger_cfg.supplied_to = battery_supplied_to;
charger_cfg.num_supplicants = ARRAY_SIZE(battery_supplied_to);
- pchg->charger = power_supply_register(&pdev->dev,
- &lp8788_psy_charger_desc,
- &charger_cfg);
+ pchg->charger = devm_power_supply_register(&pdev->dev,
+ &lp8788_psy_charger_desc,
+ &charger_cfg);
if (IS_ERR(pchg->charger))
return -EPERM;
- pchg->battery = power_supply_register(&pdev->dev,
- &lp8788_psy_battery_desc, NULL);
- if (IS_ERR(pchg->battery)) {
- power_supply_unregister(pchg->charger);
+ pchg->battery = devm_power_supply_register(&pdev->dev,
+ &lp8788_psy_battery_desc, NULL);
+ if (IS_ERR(pchg->battery))
return -EPERM;
- }
return 0;
}
@@ -720,7 +712,6 @@ static void lp8788_charger_remove(struct platform_device *pdev)
flush_work(&pchg->charger_work);
lp8788_irq_unregister(pdev, pchg);
- lp8788_psy_unregister(pchg);
}
static struct platform_driver lp8788_charger_driver = {
diff --git a/drivers/power/supply/max14577_charger.c b/drivers/power/supply/max14577_charger.c
index 7c23fa89ea19..b28c04157709 100644
--- a/drivers/power/supply/max14577_charger.c
+++ b/drivers/power/supply/max14577_charger.c
@@ -586,8 +586,9 @@ static int max14577_charger_probe(struct platform_device *pdev)
}
psy_cfg.drv_data = chg;
- chg->charger = power_supply_register(&pdev->dev, &max14577_charger_desc,
- &psy_cfg);
+ chg->charger = devm_power_supply_register(&pdev->dev,
+ &max14577_charger_desc,
+ &psy_cfg);
if (IS_ERR(chg->charger)) {
dev_err(&pdev->dev, "failed: power supply register\n");
ret = PTR_ERR(chg->charger);
@@ -608,10 +609,7 @@ err:
static void max14577_charger_remove(struct platform_device *pdev)
{
- struct max14577_charger *chg = platform_get_drvdata(pdev);
-
device_remove_file(&pdev->dev, &dev_attr_fast_charge_timer);
- power_supply_unregister(chg->charger);
}
static const struct platform_device_id max14577_charger_id[] = {
diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c
index d0157e63b8b5..2001e12c9f7d 100644
--- a/drivers/power/supply/max77693_charger.c
+++ b/drivers/power/supply/max77693_charger.c
@@ -709,9 +709,9 @@ static int max77693_charger_probe(struct platform_device *pdev)
goto err;
}
- chg->charger = power_supply_register(&pdev->dev,
- &max77693_charger_desc,
- &psy_cfg);
+ chg->charger = devm_power_supply_register(&pdev->dev,
+ &max77693_charger_desc,
+ &psy_cfg);
if (IS_ERR(chg->charger)) {
dev_err(&pdev->dev, "failed: power supply register\n");
ret = PTR_ERR(chg->charger);
@@ -730,13 +730,9 @@ err:
static void max77693_charger_remove(struct platform_device *pdev)
{
- struct max77693_charger *chg = platform_get_drvdata(pdev);
-
device_remove_file(&pdev->dev, &dev_attr_top_off_timer);
device_remove_file(&pdev->dev, &dev_attr_top_off_threshold_current);
device_remove_file(&pdev->dev, &dev_attr_fast_charge_timer);
-
- power_supply_unregister(chg->charger);
}
static const struct platform_device_id max77693_charger_id[] = {
diff --git a/drivers/power/supply/max8925_power.c b/drivers/power/supply/max8925_power.c
index 4a2d6894f94e..621a006d52a9 100644
--- a/drivers/power/supply/max8925_power.c
+++ b/drivers/power/supply/max8925_power.c
@@ -507,7 +507,6 @@ static int max8925_power_probe(struct platform_device *pdev)
struct power_supply_config psy_cfg = {}; /* Only for ac and usb */
struct max8925_power_pdata *pdata = NULL;
struct max8925_power_info *info;
- int ret;
pdata = max8925_power_dt_init(pdev);
if (!pdata) {
@@ -528,25 +527,19 @@ static int max8925_power_probe(struct platform_device *pdev)
psy_cfg.supplied_to = pdata->supplied_to;
psy_cfg.num_supplicants = pdata->num_supplicants;
- info->ac = power_supply_register(&pdev->dev, &ac_desc, &psy_cfg);
- if (IS_ERR(info->ac)) {
- ret = PTR_ERR(info->ac);
- goto out;
- }
+ info->ac = devm_power_supply_register(&pdev->dev, &ac_desc, &psy_cfg);
+ if (IS_ERR(info->ac))
+ return PTR_ERR(info->ac);
info->ac->dev.parent = &pdev->dev;
- info->usb = power_supply_register(&pdev->dev, &usb_desc, &psy_cfg);
- if (IS_ERR(info->usb)) {
- ret = PTR_ERR(info->usb);
- goto out_unregister_ac;
- }
+ info->usb = devm_power_supply_register(&pdev->dev, &usb_desc, &psy_cfg);
+ if (IS_ERR(info->usb))
+ return PTR_ERR(info->usb);
info->usb->dev.parent = &pdev->dev;
- info->battery = power_supply_register(&pdev->dev, &battery_desc, NULL);
- if (IS_ERR(info->battery)) {
- ret = PTR_ERR(info->battery);
- goto out_unregister_usb;
- }
+ info->battery = devm_power_supply_register(&pdev->dev, &battery_desc, NULL);
+ if (IS_ERR(info->battery))
+ return PTR_ERR(info->battery);
info->battery->dev.parent = &pdev->dev;
info->batt_detect = pdata->batt_detect;
@@ -558,24 +551,14 @@ static int max8925_power_probe(struct platform_device *pdev)
max8925_init_charger(chip, info);
return 0;
-out_unregister_usb:
- power_supply_unregister(info->usb);
-out_unregister_ac:
- power_supply_unregister(info->ac);
-out:
- return ret;
}
static void max8925_power_remove(struct platform_device *pdev)
{
struct max8925_power_info *info = platform_get_drvdata(pdev);
- if (info) {
- power_supply_unregister(info->ac);
- power_supply_unregister(info->usb);
- power_supply_unregister(info->battery);
+ if (info)
max8925_deinit_charger(info);
- }
}
static struct platform_driver max8925_power_driver = {
diff --git a/drivers/power/supply/mm8013.c b/drivers/power/supply/mm8013.c
index caa272b03564..20c1651ca38e 100644
--- a/drivers/power/supply/mm8013.c
+++ b/drivers/power/supply/mm8013.c
@@ -71,7 +71,6 @@ static int mm8013_checkdevice(struct mm8013_chip *chip)
static enum power_supply_property mm8013_battery_props[] = {
POWER_SUPPLY_PROP_CAPACITY,
- POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_NOW,
@@ -103,16 +102,6 @@ static int mm8013_get_property(struct power_supply *psy,
val->intval = regval;
break;
- case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
- ret = regmap_read(chip->regmap, REG_FLAGS, &regval);
- if (ret < 0)
- return ret;
-
- if (regval & MM8013_FLAG_CHG_INH)
- val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE;
- else
- val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
- break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
ret = regmap_read(chip->regmap, REG_FULL_CHARGE_CAPACITY, &regval);
if (ret < 0)
@@ -187,6 +176,8 @@ static int mm8013_get_property(struct power_supply *psy,
if (regval & MM8013_FLAG_DSG)
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (regval & MM8013_FLAG_CHG_INH)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
else if (regval & MM8013_FLAG_CHG)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (regval & MM8013_FLAG_FC)
diff --git a/drivers/power/supply/pcf50633-charger.c b/drivers/power/supply/pcf50633-charger.c
index 950e30917c63..0e980522fee5 100644
--- a/drivers/power/supply/pcf50633-charger.c
+++ b/drivers/power/supply/pcf50633-charger.c
@@ -404,9 +404,9 @@ static int pcf50633_mbc_probe(struct platform_device *pdev)
psy_cfg.drv_data = mbc;
/* Create power supplies */
- mbc->adapter = power_supply_register(&pdev->dev,
- &pcf50633_mbc_adapter_desc,
- &psy_cfg);
+ mbc->adapter = devm_power_supply_register(&pdev->dev,
+ &pcf50633_mbc_adapter_desc,
+ &psy_cfg);
if (IS_ERR(mbc->adapter)) {
dev_err(mbc->pcf->dev, "failed to register adapter\n");
return PTR_ERR(mbc->adapter);
@@ -415,20 +415,19 @@ static int pcf50633_mbc_probe(struct platform_device *pdev)
usb_psy_cfg = psy_cfg;
usb_psy_cfg.attr_grp = pcf50633_mbc_sysfs_groups;
- mbc->usb = power_supply_register(&pdev->dev, &pcf50633_mbc_usb_desc,
- &usb_psy_cfg);
+ mbc->usb = devm_power_supply_register(&pdev->dev,
+ &pcf50633_mbc_usb_desc,
+ &usb_psy_cfg);
if (IS_ERR(mbc->usb)) {
dev_err(mbc->pcf->dev, "failed to register usb\n");
- power_supply_unregister(mbc->adapter);
return PTR_ERR(mbc->usb);
}
- mbc->ac = power_supply_register(&pdev->dev, &pcf50633_mbc_ac_desc,
- &psy_cfg);
+ mbc->ac = devm_power_supply_register(&pdev->dev,
+ &pcf50633_mbc_ac_desc,
+ &psy_cfg);
if (IS_ERR(mbc->ac)) {
dev_err(mbc->pcf->dev, "failed to register ac\n");
- power_supply_unregister(mbc->adapter);
- power_supply_unregister(mbc->usb);
return PTR_ERR(mbc->ac);
}
@@ -449,10 +448,6 @@ static void pcf50633_mbc_remove(struct platform_device *pdev)
/* Remove IRQ handlers */
for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++)
pcf50633_free_irq(mbc->pcf, mbc_irq_handlers[i]);
-
- power_supply_unregister(mbc->usb);
- power_supply_unregister(mbc->adapter);
- power_supply_unregister(mbc->ac);
}
static struct platform_driver pcf50633_mbc_driver = {
diff --git a/drivers/power/supply/power_supply.h b/drivers/power/supply/power_supply.h
index 645eee4d6b6a..3cbafc58bdad 100644
--- a/drivers/power/supply/power_supply.h
+++ b/drivers/power/supply/power_supply.h
@@ -15,12 +15,14 @@ struct power_supply;
#ifdef CONFIG_SYSFS
-extern void power_supply_init_attrs(struct device_type *dev_type);
+extern void power_supply_init_attrs(void);
extern int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env);
+extern const struct attribute_group *power_supply_attr_groups[];
#else
-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
+static inline void power_supply_init_attrs(void) {}
+#define power_supply_attr_groups NULL
#define power_supply_uevent NULL
#endif /* CONFIG_SYSFS */
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index ecef35ac3b7e..fefe938c9342 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -25,13 +25,17 @@
#include "power_supply.h"
#include "samsung-sdi-battery.h"
-/* exported for the APM Power driver, APM emulation */
-struct class *power_supply_class;
-EXPORT_SYMBOL_GPL(power_supply_class);
+static const struct class power_supply_class = {
+ .name = "power_supply",
+ .dev_uevent = power_supply_uevent,
+};
static BLOCKING_NOTIFIER_HEAD(power_supply_notifier);
-static struct device_type power_supply_dev_type;
+static const struct device_type power_supply_dev_type = {
+ .name = "power_supply",
+ .groups = power_supply_attr_groups,
+};
#define POWER_SUPPLY_DEFERRED_REGISTER_TIME msecs_to_jiffies(10)
@@ -93,8 +97,7 @@ static void power_supply_changed_work(struct work_struct *work)
if (likely(psy->changed)) {
psy->changed = false;
spin_unlock_irqrestore(&psy->changed_lock, flags);
- class_for_each_device(power_supply_class, NULL, psy,
- __power_supply_changed_work);
+ power_supply_for_each_device(psy, __power_supply_changed_work);
power_supply_update_leds(psy);
blocking_notifier_call_chain(&power_supply_notifier,
PSY_EVENT_PROP_CHANGED, psy);
@@ -112,6 +115,12 @@ static void power_supply_changed_work(struct work_struct *work)
spin_unlock_irqrestore(&psy->changed_lock, flags);
}
+int power_supply_for_each_device(void *data, int (*fn)(struct device *dev, void *data))
+{
+ return class_for_each_device(&power_supply_class, NULL, data, fn);
+}
+EXPORT_SYMBOL_GPL(power_supply_for_each_device);
+
void power_supply_changed(struct power_supply *psy)
{
unsigned long flags;
@@ -187,8 +196,7 @@ static int power_supply_populate_supplied_from(struct power_supply *psy)
{
int error;
- error = class_for_each_device(power_supply_class, NULL, psy,
- __power_supply_populate_supplied_from);
+ error = power_supply_for_each_device(psy, __power_supply_populate_supplied_from);
dev_dbg(&psy->dev, "%s %d\n", __func__, error);
@@ -201,7 +209,7 @@ static int __power_supply_find_supply_from_node(struct device *dev,
struct device_node *np = data;
struct power_supply *epsy = dev_get_drvdata(dev);
- /* returning non-zero breaks out of class_for_each_device loop */
+ /* returning non-zero breaks out of power_supply_for_each_device loop */
if (epsy->of_node == np)
return 1;
@@ -213,17 +221,16 @@ static int power_supply_find_supply_from_node(struct device_node *supply_node)
int error;
/*
- * class_for_each_device() either returns its own errors or values
+ * power_supply_for_each_device() either returns its own errors or values
* returned by __power_supply_find_supply_from_node().
*
* __power_supply_find_supply_from_node() will return 0 (no match)
* or 1 (match).
*
- * We return 0 if class_for_each_device() returned 1, -EPROBE_DEFER if
+ * We return 0 if power_supply_for_each_device() returned 1, -EPROBE_DEFER if
* it returned 0, or error as returned by it.
*/
- error = class_for_each_device(power_supply_class, NULL, supply_node,
- __power_supply_find_supply_from_node);
+ error = power_supply_for_each_device(supply_node, __power_supply_find_supply_from_node);
return error ? (error == 1 ? 0 : error) : -EPROBE_DEFER;
}
@@ -329,8 +336,7 @@ int power_supply_am_i_supplied(struct power_supply *psy)
struct psy_am_i_supplied_data data = { psy, 0 };
int error;
- error = class_for_each_device(power_supply_class, NULL, &data,
- __power_supply_am_i_supplied);
+ error = power_supply_for_each_device(&data, __power_supply_am_i_supplied);
dev_dbg(&psy->dev, "%s count %u err %d\n", __func__, data.count, error);
@@ -365,8 +371,7 @@ int power_supply_is_system_supplied(void)
int error;
unsigned int count = 0;
- error = class_for_each_device(power_supply_class, NULL, &count,
- __power_supply_is_system_supplied);
+ error = power_supply_for_each_device(&count, __power_supply_is_system_supplied);
/*
* If no system scope power class device was found at all, most probably we
@@ -412,8 +417,7 @@ int power_supply_get_property_from_supplier(struct power_supply *psy,
* This function is not intended for use with a supply with multiple
* suppliers, we simply pick the first supply to report the psp.
*/
- ret = class_for_each_device(power_supply_class, NULL, &data,
- __power_supply_get_supplier_property);
+ ret = power_supply_for_each_device(&data, __power_supply_get_supplier_property);
if (ret < 0)
return ret;
if (ret == 0)
@@ -458,8 +462,8 @@ static int power_supply_match_device_by_name(struct device *dev, const void *dat
struct power_supply *power_supply_get_by_name(const char *name)
{
struct power_supply *psy = NULL;
- struct device *dev = class_find_device(power_supply_class, NULL, name,
- power_supply_match_device_by_name);
+ struct device *dev = class_find_device(&power_supply_class, NULL, name,
+ power_supply_match_device_by_name);
if (dev) {
psy = dev_get_drvdata(dev);
@@ -515,8 +519,8 @@ struct power_supply *power_supply_get_by_phandle(struct device_node *np,
if (!power_supply_np)
return ERR_PTR(-ENODEV);
- dev = class_find_device(power_supply_class, NULL, power_supply_np,
- power_supply_match_device_node);
+ dev = class_find_device(&power_supply_class, NULL, power_supply_np,
+ power_supply_match_device_node);
of_node_put(power_supply_np);
@@ -1369,7 +1373,7 @@ __power_supply_register(struct device *parent,
device_initialize(dev);
- dev->class = power_supply_class;
+ dev->class = &power_supply_class;
dev->type = &power_supply_dev_type;
dev->parent = parent;
dev->release = power_supply_dev_release;
@@ -1617,20 +1621,13 @@ EXPORT_SYMBOL_GPL(power_supply_get_drvdata);
static int __init power_supply_class_init(void)
{
- power_supply_class = class_create("power_supply");
-
- if (IS_ERR(power_supply_class))
- return PTR_ERR(power_supply_class);
-
- power_supply_class->dev_uevent = power_supply_uevent;
- power_supply_init_attrs(&power_supply_dev_type);
-
- return 0;
+ power_supply_init_attrs();
+ return class_register(&power_supply_class);
}
static void __exit power_supply_class_exit(void)
{
- class_destroy(power_supply_class);
+ class_unregister(&power_supply_class);
}
subsys_initcall(power_supply_class_init);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 977611e16373..0d2c3724d0bc 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -271,6 +271,23 @@ static ssize_t power_supply_show_usb_type(struct device *dev,
return count;
}
+static ssize_t power_supply_show_charge_behaviour(struct device *dev,
+ struct power_supply *psy,
+ union power_supply_propval *value,
+ char *buf)
+{
+ int ret;
+
+ ret = power_supply_get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
+ value);
+ if (ret < 0)
+ return ret;
+
+ return power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours,
+ value->intval, buf);
+}
+
static ssize_t power_supply_show_property(struct device *dev,
struct device_attribute *attr,
char *buf) {
@@ -298,21 +315,24 @@ static ssize_t power_supply_show_property(struct device *dev,
}
}
- if (ps_attr->text_values_len > 0 &&
- value.intval < ps_attr->text_values_len && value.intval >= 0) {
- return sysfs_emit(buf, "%s\n", ps_attr->text_values[value.intval]);
- }
-
switch (psp) {
case POWER_SUPPLY_PROP_USB_TYPE:
ret = power_supply_show_usb_type(dev, psy->desc,
&value, buf);
break;
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ ret = power_supply_show_charge_behaviour(dev, psy, &value, buf);
+ break;
case POWER_SUPPLY_PROP_MODEL_NAME ... POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = sysfs_emit(buf, "%s\n", value.strval);
break;
default:
- ret = sysfs_emit(buf, "%d\n", value.intval);
+ if (ps_attr->text_values_len > 0 &&
+ value.intval < ps_attr->text_values_len && value.intval >= 0) {
+ ret = sysfs_emit(buf, "%s\n", ps_attr->text_values[value.intval]);
+ } else {
+ ret = sysfs_emit(buf, "%d\n", value.intval);
+ }
}
return ret;
@@ -394,17 +414,15 @@ static const struct attribute_group power_supply_attr_group = {
.is_visible = power_supply_attr_is_visible,
};
-static const struct attribute_group *power_supply_attr_groups[] = {
+const struct attribute_group *power_supply_attr_groups[] = {
&power_supply_attr_group,
- NULL,
+ NULL
};
-void power_supply_init_attrs(struct device_type *dev_type)
+void power_supply_init_attrs(void)
{
int i;
- dev_type->groups = power_supply_attr_groups;
-
for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++) {
struct device_attribute *attr;
diff --git a/drivers/power/supply/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c
index d90b96df8e73..32eafe2c00af 100644
--- a/drivers/power/supply/rt5033_battery.c
+++ b/drivers/power/supply/rt5033_battery.c
@@ -159,12 +159,12 @@ static int rt5033_battery_probe(struct i2c_client *client)
return -EINVAL;
}
- i2c_set_clientdata(client, battery);
psy_cfg.of_node = client->dev.of_node;
psy_cfg.drv_data = battery;
- battery->psy = power_supply_register(&client->dev,
- &rt5033_battery_desc, &psy_cfg);
+ battery->psy = devm_power_supply_register(&client->dev,
+ &rt5033_battery_desc,
+ &psy_cfg);
if (IS_ERR(battery->psy))
return dev_err_probe(&client->dev, PTR_ERR(battery->psy),
"Failed to register power supply\n");
@@ -172,13 +172,6 @@ static int rt5033_battery_probe(struct i2c_client *client)
return 0;
}
-static void rt5033_battery_remove(struct i2c_client *client)
-{
- struct rt5033_battery *battery = i2c_get_clientdata(client);
-
- power_supply_unregister(battery->psy);
-}
-
static const struct i2c_device_id rt5033_battery_id[] = {
{ "rt5033-battery", },
{ }
@@ -197,7 +190,6 @@ static struct i2c_driver rt5033_battery_driver = {
.of_match_table = rt5033_battery_of_match,
},
.probe = rt5033_battery_probe,
- .remove = rt5033_battery_remove,
.id_table = rt5033_battery_id,
};
module_i2c_driver(rt5033_battery_driver);
diff --git a/drivers/power/supply/rx51_battery.c b/drivers/power/supply/rx51_battery.c
index e2bfc81f0fd9..7cdcd415e868 100644
--- a/drivers/power/supply/rx51_battery.c
+++ b/drivers/power/supply/rx51_battery.c
@@ -192,14 +192,11 @@ static int rx51_battery_probe(struct platform_device *pdev)
{
struct power_supply_config psy_cfg = {};
struct rx51_device_info *di;
- int ret;
di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
if (!di)
return -ENOMEM;
- platform_set_drvdata(pdev, di);
-
di->dev = &pdev->dev;
di->bat_desc.name = "rx51-battery";
di->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY;
@@ -209,52 +206,23 @@ static int rx51_battery_probe(struct platform_device *pdev)
psy_cfg.drv_data = di;
- di->channel_temp = iio_channel_get(di->dev, "temp");
- if (IS_ERR(di->channel_temp)) {
- ret = PTR_ERR(di->channel_temp);
- goto error;
- }
+ di->channel_temp = devm_iio_channel_get(di->dev, "temp");
+ if (IS_ERR(di->channel_temp))
+ return PTR_ERR(di->channel_temp);
- di->channel_bsi = iio_channel_get(di->dev, "bsi");
- if (IS_ERR(di->channel_bsi)) {
- ret = PTR_ERR(di->channel_bsi);
- goto error_channel_temp;
- }
+ di->channel_bsi = devm_iio_channel_get(di->dev, "bsi");
+ if (IS_ERR(di->channel_bsi))
+ return PTR_ERR(di->channel_bsi);
- di->channel_vbat = iio_channel_get(di->dev, "vbat");
- if (IS_ERR(di->channel_vbat)) {
- ret = PTR_ERR(di->channel_vbat);
- goto error_channel_bsi;
- }
+ di->channel_vbat = devm_iio_channel_get(di->dev, "vbat");
+ if (IS_ERR(di->channel_vbat))
+ return PTR_ERR(di->channel_vbat);
- di->bat = power_supply_register(di->dev, &di->bat_desc, &psy_cfg);
- if (IS_ERR(di->bat)) {
- ret = PTR_ERR(di->bat);
- goto error_channel_vbat;
- }
+ di->bat = devm_power_supply_register(di->dev, &di->bat_desc, &psy_cfg);
+ if (IS_ERR(di->bat))
+ return PTR_ERR(di->bat);
return 0;
-
-error_channel_vbat:
- iio_channel_release(di->channel_vbat);
-error_channel_bsi:
- iio_channel_release(di->channel_bsi);
-error_channel_temp:
- iio_channel_release(di->channel_temp);
-error:
-
- return ret;
-}
-
-static void rx51_battery_remove(struct platform_device *pdev)
-{
- struct rx51_device_info *di = platform_get_drvdata(pdev);
-
- power_supply_unregister(di->bat);
-
- iio_channel_release(di->channel_vbat);
- iio_channel_release(di->channel_bsi);
- iio_channel_release(di->channel_temp);
}
#ifdef CONFIG_OF
@@ -267,7 +235,6 @@ MODULE_DEVICE_TABLE(of, n900_battery_of_match);
static struct platform_driver rx51_battery_driver = {
.probe = rx51_battery_probe,
- .remove_new = rx51_battery_remove,
.driver = {
.name = "rx51-battery",
.of_match_table = of_match_ptr(n900_battery_of_match),
diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c
index c59197d2aa87..d41595764caa 100644
--- a/drivers/power/supply/tps65090-charger.c
+++ b/drivers/power/supply/tps65090-charger.c
@@ -262,7 +262,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = cdata;
- cdata->ac = power_supply_register(&pdev->dev, &tps65090_charger_desc,
+ cdata->ac = devm_power_supply_register(&pdev->dev, &tps65090_charger_desc,
&psy_cfg);
if (IS_ERR(cdata->ac)) {
dev_err(&pdev->dev, "failed: power supply register\n");
@@ -277,7 +277,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
ret = tps65090_config_charger(cdata);
if (ret < 0) {
dev_err(&pdev->dev, "charger config failed, err %d\n", ret);
- goto fail_unregister_supply;
+ return ret;
}
/* Check for charger presence */
@@ -286,14 +286,14 @@ static int tps65090_charger_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(cdata->dev, "%s(): Error in reading reg 0x%x", __func__,
TPS65090_REG_CG_STATUS1);
- goto fail_unregister_supply;
+ return ret;
}
if (status1 != 0) {
ret = tps65090_enable_charging(cdata);
if (ret < 0) {
dev_err(cdata->dev, "error enabling charger\n");
- goto fail_unregister_supply;
+ return ret;
}
cdata->ac_online = 1;
power_supply_changed(cdata->ac);
@@ -306,7 +306,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
dev_err(cdata->dev,
"Unable to register irq %d err %d\n", irq,
ret);
- goto fail_unregister_supply;
+ return ret;
}
} else {
cdata->poll_task = kthread_run(tps65090_charger_poll_task,
@@ -316,16 +316,11 @@ static int tps65090_charger_probe(struct platform_device *pdev)
ret = PTR_ERR(cdata->poll_task);
dev_err(cdata->dev,
"Unable to run kthread err %d\n", ret);
- goto fail_unregister_supply;
+ return ret;
}
}
return 0;
-
-fail_unregister_supply:
- power_supply_unregister(cdata->ac);
-
- return ret;
}
static void tps65090_charger_remove(struct platform_device *pdev)
@@ -334,7 +329,6 @@ static void tps65090_charger_remove(struct platform_device *pdev)
if (cdata->irq == -ENXIO)
kthread_stop(cdata->poll_task);
- power_supply_unregister(cdata->ac);
}
static const struct of_device_id of_tps65090_charger_match[] = {
diff --git a/drivers/power/supply/twl4030_madc_battery.c b/drivers/power/supply/twl4030_madc_battery.c
index 33106476bea2..3935162e350b 100644
--- a/drivers/power/supply/twl4030_madc_battery.c
+++ b/drivers/power/supply/twl4030_madc_battery.c
@@ -188,30 +188,23 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
struct twl4030_madc_battery *twl4030_madc_bat;
struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
struct power_supply_config psy_cfg = {};
- int ret = 0;
twl4030_madc_bat = devm_kzalloc(&pdev->dev, sizeof(*twl4030_madc_bat),
GFP_KERNEL);
if (!twl4030_madc_bat)
return -ENOMEM;
- twl4030_madc_bat->channel_temp = iio_channel_get(&pdev->dev, "temp");
- if (IS_ERR(twl4030_madc_bat->channel_temp)) {
- ret = PTR_ERR(twl4030_madc_bat->channel_temp);
- goto err;
- }
+ twl4030_madc_bat->channel_temp = devm_iio_channel_get(&pdev->dev, "temp");
+ if (IS_ERR(twl4030_madc_bat->channel_temp))
+ return PTR_ERR(twl4030_madc_bat->channel_temp);
- twl4030_madc_bat->channel_ichg = iio_channel_get(&pdev->dev, "ichg");
- if (IS_ERR(twl4030_madc_bat->channel_ichg)) {
- ret = PTR_ERR(twl4030_madc_bat->channel_ichg);
- goto err_temp;
- }
+ twl4030_madc_bat->channel_ichg = devm_iio_channel_get(&pdev->dev, "ichg");
+ if (IS_ERR(twl4030_madc_bat->channel_ichg))
+ return PTR_ERR(twl4030_madc_bat->channel_ichg);
- twl4030_madc_bat->channel_vbat = iio_channel_get(&pdev->dev, "vbat");
- if (IS_ERR(twl4030_madc_bat->channel_vbat)) {
- ret = PTR_ERR(twl4030_madc_bat->channel_vbat);
- goto err_ichg;
- }
+ twl4030_madc_bat->channel_vbat = devm_iio_channel_get(&pdev->dev, "vbat");
+ if (IS_ERR(twl4030_madc_bat->channel_vbat))
+ return PTR_ERR(twl4030_madc_bat->channel_vbat);
/* sort charging and discharging calibration data */
sort(pdata->charging, pdata->charging_size,
@@ -222,37 +215,14 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
twl4030_cmp, NULL);
twl4030_madc_bat->pdata = pdata;
- platform_set_drvdata(pdev, twl4030_madc_bat);
psy_cfg.drv_data = twl4030_madc_bat;
- twl4030_madc_bat->psy = power_supply_register(&pdev->dev,
- &twl4030_madc_bat_desc,
- &psy_cfg);
- if (IS_ERR(twl4030_madc_bat->psy)) {
- ret = PTR_ERR(twl4030_madc_bat->psy);
- goto err_vbat;
- }
+ twl4030_madc_bat->psy = devm_power_supply_register(&pdev->dev,
+ &twl4030_madc_bat_desc,
+ &psy_cfg);
+ if (IS_ERR(twl4030_madc_bat->psy))
+ return PTR_ERR(twl4030_madc_bat->psy);
return 0;
-
-err_vbat:
- iio_channel_release(twl4030_madc_bat->channel_vbat);
-err_ichg:
- iio_channel_release(twl4030_madc_bat->channel_ichg);
-err_temp:
- iio_channel_release(twl4030_madc_bat->channel_temp);
-err:
- return ret;
-}
-
-static void twl4030_madc_battery_remove(struct platform_device *pdev)
-{
- struct twl4030_madc_battery *bat = platform_get_drvdata(pdev);
-
- power_supply_unregister(bat->psy);
-
- iio_channel_release(bat->channel_vbat);
- iio_channel_release(bat->channel_ichg);
- iio_channel_release(bat->channel_temp);
}
static struct platform_driver twl4030_madc_battery_driver = {
@@ -260,7 +230,6 @@ static struct platform_driver twl4030_madc_battery_driver = {
.name = "twl4030_madc_battery",
},
.probe = twl4030_madc_battery_probe,
- .remove_new = twl4030_madc_battery_remove,
};
module_platform_driver(twl4030_madc_battery_driver);
diff --git a/drivers/power/supply/wm831x_backup.c b/drivers/power/supply/wm831x_backup.c
index 1a7265660ade..9673fcf7f3af 100644
--- a/drivers/power/supply/wm831x_backup.c
+++ b/drivers/power/supply/wm831x_backup.c
@@ -171,7 +171,6 @@ static int wm831x_backup_probe(struct platform_device *pdev)
return -ENOMEM;
devdata->wm831x = wm831x;
- platform_set_drvdata(pdev, devdata);
/* We ignore configuration failures since we can still read
* back the status without enabling the charger (which may
@@ -191,22 +190,14 @@ static int wm831x_backup_probe(struct platform_device *pdev)
devdata->backup_desc.properties = wm831x_backup_props;
devdata->backup_desc.num_properties = ARRAY_SIZE(wm831x_backup_props);
devdata->backup_desc.get_property = wm831x_backup_get_prop;
- devdata->backup = power_supply_register(&pdev->dev,
- &devdata->backup_desc, NULL);
+ devdata->backup = devm_power_supply_register(&pdev->dev,
+ &devdata->backup_desc, NULL);
return PTR_ERR_OR_ZERO(devdata->backup);
}
-static void wm831x_backup_remove(struct platform_device *pdev)
-{
- struct wm831x_backup *devdata = platform_get_drvdata(pdev);
-
- power_supply_unregister(devdata->backup);
-}
-
static struct platform_driver wm831x_backup_driver = {
.probe = wm831x_backup_probe,
- .remove_new = wm831x_backup_remove,
.driver = {
.name = "wm831x-backup",
},
diff --git a/drivers/power/supply/wm831x_power.c b/drivers/power/supply/wm831x_power.c
index e49b01ee5f3e..d56e499ac59f 100644
--- a/drivers/power/supply/wm831x_power.c
+++ b/drivers/power/supply/wm831x_power.c
@@ -570,8 +570,9 @@ static int wm831x_power_probe(struct platform_device *pdev)
power->wall_desc.properties = wm831x_wall_props;
power->wall_desc.num_properties = ARRAY_SIZE(wm831x_wall_props);
power->wall_desc.get_property = wm831x_wall_get_prop;
- power->wall = power_supply_register(&pdev->dev, &power->wall_desc,
- NULL);
+ power->wall = devm_power_supply_register(&pdev->dev,
+ &power->wall_desc,
+ NULL);
if (IS_ERR(power->wall)) {
ret = PTR_ERR(power->wall);
goto err;
@@ -582,7 +583,9 @@ static int wm831x_power_probe(struct platform_device *pdev)
power->usb_desc.properties = wm831x_usb_props;
power->usb_desc.num_properties = ARRAY_SIZE(wm831x_usb_props);
power->usb_desc.get_property = wm831x_usb_get_prop;
- power->usb = power_supply_register(&pdev->dev, &power->usb_desc, NULL);
+ power->usb = devm_power_supply_register(&pdev->dev,
+ &power->usb_desc,
+ NULL);
if (IS_ERR(power->usb)) {
ret = PTR_ERR(power->usb);
goto err_wall;
@@ -599,9 +602,9 @@ static int wm831x_power_probe(struct platform_device *pdev)
power->battery_desc.num_properties = ARRAY_SIZE(wm831x_bat_props);
power->battery_desc.get_property = wm831x_bat_get_prop;
power->battery_desc.use_for_apm = 1;
- power->battery = power_supply_register(&pdev->dev,
- &power->battery_desc,
- NULL);
+ power->battery = devm_power_supply_register(&pdev->dev,
+ &power->battery_desc,
+ NULL);
if (IS_ERR(power->battery)) {
ret = PTR_ERR(power->battery);
goto err_usb;
@@ -684,12 +687,8 @@ err_syslo:
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
free_irq(irq, power);
err_battery:
- if (power->have_battery)
- power_supply_unregister(power->battery);
err_usb:
- power_supply_unregister(power->usb);
err_wall:
- power_supply_unregister(power->wall);
err:
return ret;
}
@@ -717,11 +716,6 @@ static void wm831x_power_remove(struct platform_device *pdev)
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
free_irq(irq, wm831x_power);
-
- if (wm831x_power->have_battery)
- power_supply_unregister(wm831x_power->battery);
- power_supply_unregister(wm831x_power->wall);
- power_supply_unregister(wm831x_power->usb);
}
static struct platform_driver wm831x_power_driver = {
diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c
index f23b4f5343bc..3f79ab6f6abf 100644
--- a/drivers/power/supply/wm8350_power.c
+++ b/drivers/power/supply/wm8350_power.c
@@ -540,22 +540,17 @@ static int wm8350_power_probe(struct platform_device *pdev)
struct wm8350_charger_policy *policy = power->policy;
int ret;
- power->ac = power_supply_register(&pdev->dev, &wm8350_ac_desc, NULL);
+ power->ac = devm_power_supply_register(&pdev->dev, &wm8350_ac_desc, NULL);
if (IS_ERR(power->ac))
return PTR_ERR(power->ac);
- power->battery = power_supply_register(&pdev->dev, &wm8350_battery_desc,
- NULL);
- if (IS_ERR(power->battery)) {
- ret = PTR_ERR(power->battery);
- goto battery_failed;
- }
+ power->battery = devm_power_supply_register(&pdev->dev, &wm8350_battery_desc, NULL);
+ if (IS_ERR(power->battery))
+ return PTR_ERR(power->battery);
- power->usb = power_supply_register(&pdev->dev, &wm8350_usb_desc, NULL);
- if (IS_ERR(power->usb)) {
- ret = PTR_ERR(power->usb);
- goto usb_failed;
- }
+ power->usb = devm_power_supply_register(&pdev->dev, &wm8350_usb_desc, NULL);
+ if (IS_ERR(power->usb))
+ return PTR_ERR(power->usb);
ret = device_create_file(&pdev->dev, &dev_attr_charger_state);
if (ret < 0)
@@ -570,25 +565,14 @@ static int wm8350_power_probe(struct platform_device *pdev)
}
return ret;
-
-usb_failed:
- power_supply_unregister(power->battery);
-battery_failed:
- power_supply_unregister(power->ac);
-
- return ret;
}
static void wm8350_power_remove(struct platform_device *pdev)
{
struct wm8350 *wm8350 = platform_get_drvdata(pdev);
- struct wm8350_power *power = &wm8350->power;
free_charger_irq(wm8350);
device_remove_file(&pdev->dev, &dev_attr_charger_state);
- power_supply_unregister(power->battery);
- power_supply_unregister(power->ac);
- power_supply_unregister(power->usb);
}
static struct platform_driver wm8350_power_driver = {
diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c
index ce920f17f45f..f390665743c4 100644
--- a/drivers/powercap/dtpm.c
+++ b/drivers/powercap/dtpm.c
@@ -522,7 +522,7 @@ static int dtpm_for_each_child(const struct dtpm_node *hierarchy,
/**
* dtpm_create_hierarchy - Create the dtpm hierarchy
- * @hierarchy: An array of struct dtpm_node describing the hierarchy
+ * @dtpm_match_table: Pointer to the array of device ID structures
*
* The function is called by the platform specific code with the
* description of the different node in the hierarchy. It creates the
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
index 9193c3b8edeb..bc90126f1b5f 100644
--- a/drivers/powercap/dtpm_cpu.c
+++ b/drivers/powercap/dtpm_cpu.c
@@ -42,6 +42,7 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
{
struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
struct em_perf_domain *pd = em_cpu_get(dtpm_cpu->cpu);
+ struct em_perf_state *table;
struct cpumask cpus;
unsigned long freq;
u64 power;
@@ -50,20 +51,22 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus));
nr_cpus = cpumask_weight(&cpus);
+ rcu_read_lock();
+ table = em_perf_state_from_pd(pd);
for (i = 0; i < pd->nr_perf_states; i++) {
- power = pd->table[i].power * nr_cpus;
+ power = table[i].power * nr_cpus;
if (power > power_limit)
break;
}
- freq = pd->table[i - 1].frequency;
+ freq = table[i - 1].frequency;
+ power_limit = table[i - 1].power * nr_cpus;
+ rcu_read_unlock();
freq_qos_update_request(&dtpm_cpu->qos_req, freq);
- power_limit = pd->table[i - 1].power * nr_cpus;
-
return power_limit;
}
@@ -87,9 +90,11 @@ static u64 scale_pd_power_uw(struct cpumask *pd_mask, u64 power)
static u64 get_pd_power_uw(struct dtpm *dtpm)
{
struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
+ struct em_perf_state *table;
struct em_perf_domain *pd;
struct cpumask *pd_mask;
unsigned long freq;
+ u64 power = 0;
int i;
pd = em_cpu_get(dtpm_cpu->cpu);
@@ -98,33 +103,43 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
freq = cpufreq_quick_get(dtpm_cpu->cpu);
+ rcu_read_lock();
+ table = em_perf_state_from_pd(pd);
for (i = 0; i < pd->nr_perf_states; i++) {
- if (pd->table[i].frequency < freq)
+ if (table[i].frequency < freq)
continue;
- return scale_pd_power_uw(pd_mask, pd->table[i].power);
+ power = scale_pd_power_uw(pd_mask, table[i].power);
+ break;
}
+ rcu_read_unlock();
- return 0;
+ return power;
}
static int update_pd_power_uw(struct dtpm *dtpm)
{
struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
struct em_perf_domain *em = em_cpu_get(dtpm_cpu->cpu);
+ struct em_perf_state *table;
struct cpumask cpus;
int nr_cpus;
cpumask_and(&cpus, cpu_online_mask, to_cpumask(em->cpus));
nr_cpus = cpumask_weight(&cpus);
- dtpm->power_min = em->table[0].power;
+ rcu_read_lock();
+ table = em_perf_state_from_pd(em);
+
+ dtpm->power_min = table[0].power;
dtpm->power_min *= nr_cpus;
- dtpm->power_max = em->table[em->nr_perf_states - 1].power;
+ dtpm->power_max = table[em->nr_perf_states - 1].power;
dtpm->power_max *= nr_cpus;
+ rcu_read_unlock();
+
return 0;
}
@@ -143,7 +158,7 @@ static void pd_release(struct dtpm *dtpm)
cpufreq_cpu_put(policy);
}
-
+
kfree(dtpm_cpu);
}
@@ -180,6 +195,7 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
{
struct dtpm_cpu *dtpm_cpu;
struct cpufreq_policy *policy;
+ struct em_perf_state *table;
struct em_perf_domain *pd;
char name[CPUFREQ_NAME_LEN];
int ret = -ENOMEM;
@@ -216,10 +232,13 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
if (ret)
goto out_kfree_dtpm_cpu;
+ rcu_read_lock();
+ table = em_perf_state_from_pd(pd);
ret = freq_qos_add_request(&policy->constraints,
&dtpm_cpu->qos_req, FREQ_QOS_MAX,
- pd->table[pd->nr_perf_states - 1].frequency);
- if (ret)
+ table[pd->nr_perf_states - 1].frequency);
+ rcu_read_unlock();
+ if (ret < 0)
goto out_dtpm_unregister;
cpufreq_cpu_put(policy);
diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c
index 612c3b59dd5b..f40bce8176df 100644
--- a/drivers/powercap/dtpm_devfreq.c
+++ b/drivers/powercap/dtpm_devfreq.c
@@ -37,11 +37,16 @@ static int update_pd_power_uw(struct dtpm *dtpm)
struct devfreq *devfreq = dtpm_devfreq->devfreq;
struct device *dev = devfreq->dev.parent;
struct em_perf_domain *pd = em_pd_get(dev);
+ struct em_perf_state *table;
- dtpm->power_min = pd->table[0].power;
+ rcu_read_lock();
+ table = em_perf_state_from_pd(pd);
- dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
+ dtpm->power_min = table[0].power;
+ dtpm->power_max = table[pd->nr_perf_states - 1].power;
+
+ rcu_read_unlock();
return 0;
}
@@ -51,20 +56,23 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
struct devfreq *devfreq = dtpm_devfreq->devfreq;
struct device *dev = devfreq->dev.parent;
struct em_perf_domain *pd = em_pd_get(dev);
+ struct em_perf_state *table;
unsigned long freq;
int i;
+ rcu_read_lock();
+ table = em_perf_state_from_pd(pd);
for (i = 0; i < pd->nr_perf_states; i++) {
- if (pd->table[i].power > power_limit)
+ if (table[i].power > power_limit)
break;
}
- freq = pd->table[i - 1].frequency;
+ freq = table[i - 1].frequency;
+ power_limit = table[i - 1].power;
+ rcu_read_unlock();
dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
- power_limit = pd->table[i - 1].power;
-
return power_limit;
}
@@ -89,8 +97,9 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
struct device *dev = devfreq->dev.parent;
struct em_perf_domain *pd = em_pd_get(dev);
struct devfreq_dev_status status;
+ struct em_perf_state *table;
unsigned long freq;
- u64 power;
+ u64 power = 0;
int i;
mutex_lock(&devfreq->lock);
@@ -100,19 +109,22 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
freq = DIV_ROUND_UP(status.current_frequency, HZ_PER_KHZ);
_normalize_load(&status);
+ rcu_read_lock();
+ table = em_perf_state_from_pd(pd);
for (i = 0; i < pd->nr_perf_states; i++) {
- if (pd->table[i].frequency < freq)
+ if (table[i].frequency < freq)
continue;
- power = pd->table[i].power;
+ power = table[i].power;
power *= status.busy_time;
power >>= 10;
- return power;
+ break;
}
+ rcu_read_unlock();
- return 0;
+ return power;
}
static void pd_release(struct dtpm *dtpm)
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 2feed036c1cd..a28d54fd5222 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -5,6 +5,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -759,6 +760,11 @@ static int rapl_config(struct rapl_package *rp)
default:
return -EINVAL;
}
+
+ /* defaults_msr can be NULL on unsupported platforms */
+ if (!rp->priv->defaults || !rp->priv->rpi)
+ return -ENODEV;
+
return 0;
}
@@ -1256,6 +1262,8 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &rapl_defaults_spr_server),
+ X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt),
@@ -1499,7 +1507,7 @@ static int rapl_detect_domains(struct rapl_package *rp)
}
/* called from CPU hotplug notifier, hotplug lock held */
-void rapl_remove_package(struct rapl_package *rp)
+void rapl_remove_package_cpuslocked(struct rapl_package *rp)
{
struct rapl_domain *rd, *rd_package = NULL;
@@ -1528,10 +1536,18 @@ void rapl_remove_package(struct rapl_package *rp)
list_del(&rp->plist);
kfree(rp);
}
+EXPORT_SYMBOL_GPL(rapl_remove_package_cpuslocked);
+
+void rapl_remove_package(struct rapl_package *rp)
+{
+ guard(cpus_read_lock)();
+ rapl_remove_package_cpuslocked(rp);
+}
EXPORT_SYMBOL_GPL(rapl_remove_package);
/* caller to ensure CPU hotplug lock is held */
-struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu)
+struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv,
+ bool id_is_cpu)
{
struct rapl_package *rp;
int uid;
@@ -1549,10 +1565,17 @@ struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv,
return NULL;
}
+EXPORT_SYMBOL_GPL(rapl_find_package_domain_cpuslocked);
+
+struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu)
+{
+ guard(cpus_read_lock)();
+ return rapl_find_package_domain_cpuslocked(id, priv, id_is_cpu);
+}
EXPORT_SYMBOL_GPL(rapl_find_package_domain);
/* called from CPU hotplug notifier, hotplug lock held */
-struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu)
+struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *priv, bool id_is_cpu)
{
struct rapl_package *rp;
int ret;
@@ -1564,7 +1587,7 @@ struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id
if (id_is_cpu) {
rp->id = topology_logical_die_id(id);
rp->lead_cpu = id;
- if (topology_max_die_per_package() > 1)
+ if (topology_max_dies_per_package() > 1)
snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d-die-%d",
topology_physical_package_id(id), topology_die_id(id));
else
@@ -1598,6 +1621,13 @@ err_free_package:
kfree(rp);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(rapl_add_package_cpuslocked);
+
+struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu)
+{
+ guard(cpus_read_lock)();
+ return rapl_add_package_cpuslocked(id, priv, id_is_cpu);
+}
EXPORT_SYMBOL_GPL(rapl_add_package);
static void power_limit_state_save(void)
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index 250bd41a588c..b4b6930cacb0 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -73,9 +73,9 @@ static int rapl_cpu_online(unsigned int cpu)
{
struct rapl_package *rp;
- rp = rapl_find_package_domain(cpu, rapl_msr_priv, true);
+ rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true);
if (!rp) {
- rp = rapl_add_package(cpu, rapl_msr_priv, true);
+ rp = rapl_add_package_cpuslocked(cpu, rapl_msr_priv, true);
if (IS_ERR(rp))
return PTR_ERR(rp);
}
@@ -88,14 +88,14 @@ static int rapl_cpu_down_prep(unsigned int cpu)
struct rapl_package *rp;
int lead_cpu;
- rp = rapl_find_package_domain(cpu, rapl_msr_priv, true);
+ rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true);
if (!rp)
return 0;
cpumask_clear_cpu(cpu, &rp->cpumask);
lead_cpu = cpumask_first(&rp->cpumask);
if (lead_cpu >= nr_cpu_ids)
- rapl_remove_package(rp);
+ rapl_remove_package_cpuslocked(rp);
else if (rp->lead_cpu == cpu)
rp->lead_cpu = lead_cpu;
return 0;
diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c
index 891c90fefd8b..f6b7f085977c 100644
--- a/drivers/powercap/intel_rapl_tpmi.c
+++ b/drivers/powercap/intel_rapl_tpmi.c
@@ -40,6 +40,7 @@ enum tpmi_rapl_register {
TPMI_RAPL_REG_ENERGY_STATUS,
TPMI_RAPL_REG_PERF_STATUS,
TPMI_RAPL_REG_POWER_INFO,
+ TPMI_RAPL_REG_DOMAIN_INFO,
TPMI_RAPL_REG_INTERRUPT,
TPMI_RAPL_REG_MAX = 15,
};
@@ -130,6 +131,12 @@ static void trp_release(struct tpmi_rapl_package *trp)
mutex_unlock(&tpmi_rapl_lock);
}
+/*
+ * Bit 0 of TPMI_RAPL_REG_DOMAIN_INFO indicates if the current package is a domain
+ * root or not. Only domain root packages can enumerate System (Psys) Domain.
+ */
+#define TPMI_RAPL_DOMAIN_ROOT BIT(0)
+
static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
{
u8 tpmi_domain_version;
@@ -139,6 +146,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
enum rapl_domain_reg_id reg_id;
int tpmi_domain_size, tpmi_domain_flags;
u64 tpmi_domain_header = readq(trp->base + offset);
+ u64 tpmi_domain_info;
/* Domain Parent bits are ignored for now */
tpmi_domain_version = tpmi_domain_header & 0xff;
@@ -169,6 +177,13 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
domain_type = RAPL_DOMAIN_PACKAGE;
break;
case TPMI_RAPL_DOMAIN_SYSTEM:
+ if (!(tpmi_domain_flags & BIT(TPMI_RAPL_REG_DOMAIN_INFO))) {
+ pr_warn(FW_BUG "System domain must support Domain Info register\n");
+ return -ENODEV;
+ }
+ tpmi_domain_info = readq(trp->base + offset + TPMI_RAPL_REG_DOMAIN_INFO);
+ if (!(tpmi_domain_info & TPMI_RAPL_DOMAIN_ROOT))
+ return 0;
domain_type = RAPL_DOMAIN_PLATFORM;
break;
case TPMI_RAPL_DOMAIN_MEMORY:
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 5dd5f188e14f..604541dcb320 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -155,6 +155,18 @@ config PTP_1588_CLOCK_IDTCM
To compile this driver as a module, choose M here: the module
will be called ptp_clockmatrix.
+config PTP_1588_CLOCK_FC3W
+ tristate "RENESAS FemtoClock3 Wireless as PTP clock"
+ depends on PTP_1588_CLOCK && I2C
+ default n
+ help
+ This driver adds support for using Renesas FemtoClock3 Wireless
+ as a PTP clock. This clock is only useful if your time stamping
+ MAC is connected to the RENESAS chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_fc3.
+
config PTP_1588_CLOCK_MOCK
tristate "Mock-up PTP clock"
depends on PTP_1588_CLOCK
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index dea0cebd2303..68bf02078053 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
+obj-$(CONFIG_PTP_1588_CLOCK_FC3W) += ptp_fc3.o
obj-$(CONFIG_PTP_1588_CLOCK_IDT82P33) += ptp_idt82p33.o
obj-$(CONFIG_PTP_1588_CLOCK_MOCK) += ptp_mock.o
obj-$(CONFIG_PTP_1588_CLOCK_VMW) += ptp_vmw.o
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 15b804ba4868..c56cd0f63909 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -4,7 +4,6 @@
*
* Copyright (C) 2010 OMICRON electronics GmbH
*/
-#include <linux/idr.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -16,6 +15,7 @@
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
+#include <linux/xarray.h>
#include <uapi/linux/sched/types.h>
#include "ptp_private.h"
@@ -25,13 +25,16 @@
#define PTP_PPS_EVENT PPS_CAPTUREASSERT
#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
-struct class *ptp_class;
+const struct class ptp_class = {
+ .name = "ptp",
+ .dev_groups = ptp_groups
+};
/* private globals */
static dev_t ptp_devt;
-static DEFINE_IDA(ptp_clocks_map);
+static DEFINE_XARRAY_ALLOC(ptp_clocks_map);
/* time stamp event queue operations */
@@ -44,18 +47,31 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
struct ptp_clock_event *src)
{
struct ptp_extts_event *dst;
+ struct timespec64 offset_ts;
unsigned long flags;
s64 seconds;
u32 remainder;
- seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
+ if (src->type == PTP_CLOCK_EXTTS) {
+ seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
+ } else if (src->type == PTP_CLOCK_EXTOFF) {
+ offset_ts = ns_to_timespec64(src->offset);
+ seconds = offset_ts.tv_sec;
+ remainder = offset_ts.tv_nsec;
+ } else {
+ WARN(1, "%s: unknown type %d\n", __func__, src->type);
+ return;
+ }
spin_lock_irqsave(&queue->lock, flags);
dst = &queue->buf[queue->tail];
dst->index = src->index;
+ dst->flags = PTP_EXTTS_EVENT_VALID;
dst->t.sec = seconds;
dst->t.nsec = remainder;
+ if (src->type == PTP_CLOCK_EXTOFF)
+ dst->flags |= PTP_EXT_OFFSET;
/* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
if (!queue_free(queue))
@@ -188,7 +204,7 @@ static void ptp_clock_release(struct device *dev)
bitmap_free(tsevq->mask);
kfree(tsevq);
debugfs_remove(ptp->debugfs_root);
- ida_free(&ptp_clocks_map, ptp->index);
+ xa_erase(&ptp_clocks_map, ptp->index);
kfree(ptp);
}
@@ -220,7 +236,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
{
struct ptp_clock *ptp;
struct timestamp_event_queue *queue = NULL;
- int err = 0, index, major = MAJOR(ptp_devt);
+ int err, index, major = MAJOR(ptp_devt);
char debugfsname[16];
size_t size;
@@ -228,16 +244,16 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
return ERR_PTR(-EINVAL);
/* Initialize a clock structure. */
- err = -ENOMEM;
ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
- if (ptp == NULL)
+ if (!ptp) {
+ err = -ENOMEM;
goto no_memory;
+ }
- index = ida_alloc_max(&ptp_clocks_map, MINORMASK, GFP_KERNEL);
- if (index < 0) {
- err = index;
+ err = xa_alloc(&ptp_clocks_map, &index, ptp, xa_limit_31b,
+ GFP_KERNEL);
+ if (err)
goto no_slot;
- }
ptp->clock.ops = ptp_clock_ops;
ptp->info = info;
@@ -245,13 +261,17 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
ptp->index = index;
INIT_LIST_HEAD(&ptp->tsevqs);
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
- if (!queue)
+ if (!queue) {
+ err = -ENOMEM;
goto no_memory_queue;
+ }
list_add_tail(&queue->qlist, &ptp->tsevqs);
spin_lock_init(&ptp->tsevqs_lock);
queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
- if (!queue->mask)
+ if (!queue->mask) {
+ err = -ENOMEM;
goto no_memory_bitmap;
+ }
bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
spin_lock_init(&queue->lock);
mutex_init(&ptp->pincfg_mux);
@@ -322,7 +342,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
/* Initialize a new device of our class in our clock structure. */
device_initialize(&ptp->dev);
ptp->dev.devt = ptp->devid;
- ptp->dev.class = ptp_class;
+ ptp->dev.class = &ptp_class;
ptp->dev.parent = parent;
ptp->dev.groups = ptp->pin_attr_groups;
ptp->dev.release = ptp_clock_release;
@@ -365,7 +385,7 @@ no_memory_bitmap:
list_del(&queue->qlist);
kfree(queue);
no_memory_queue:
- ida_free(&ptp_clocks_map, index);
+ xa_erase(&ptp_clocks_map, index);
no_slot:
kfree(ptp);
no_memory:
@@ -417,6 +437,7 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
break;
case PTP_CLOCK_EXTTS:
+ case PTP_CLOCK_EXTOFF:
/* Enqueue timestamp on selected queues */
spin_lock_irqsave(&ptp->tsevqs_lock, flags);
list_for_each_entry(tsevq, &ptp->tsevqs, qlist) {
@@ -495,19 +516,19 @@ EXPORT_SYMBOL(ptp_cancel_worker_sync);
static void __exit ptp_exit(void)
{
- class_destroy(ptp_class);
+ class_unregister(&ptp_class);
unregister_chrdev_region(ptp_devt, MINORMASK + 1);
- ida_destroy(&ptp_clocks_map);
+ xa_destroy(&ptp_clocks_map);
}
static int __init ptp_init(void)
{
int err;
- ptp_class = class_create("ptp");
- if (IS_ERR(ptp_class)) {
+ err = class_register(&ptp_class);
+ if (err) {
pr_err("ptp: failed to allocate class\n");
- return PTR_ERR(ptp_class);
+ return err;
}
err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
@@ -516,12 +537,11 @@ static int __init ptp_init(void)
goto no_region;
}
- ptp_class->dev_groups = ptp_groups;
pr_info("PTP clock support registered\n");
return 0;
no_region:
- class_destroy(ptp_class);
+ class_unregister(&ptp_class);
return err;
}
diff --git a/drivers/ptp/ptp_fc3.c b/drivers/ptp/ptp_fc3.c
new file mode 100644
index 000000000000..6ef982862e27
--- /dev/null
+++ b/drivers/ptp/ptp_fc3.c
@@ -0,0 +1,1014 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PTP hardware clock driver for the FemtoClock3 family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2023 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/timekeeping.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <linux/mfd/rsmu.h>
+#include <linux/mfd/idtRC38xxx_reg.h>
+#include <asm/unaligned.h>
+
+#include "ptp_private.h"
+#include "ptp_fc3.h"
+
+MODULE_DESCRIPTION("Driver for IDT FemtoClock3(TM) family");
+MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+/*
+ * The name of the firmware file to be loaded
+ * over-rides any automatic selection
+ */
+static char *firmware;
+module_param(firmware, charp, 0);
+
+static s64 ns2counters(struct idtfc3 *idtfc3, s64 nsec, u32 *sub_ns)
+{
+ s64 sync;
+ s32 rem;
+
+ if (likely(nsec >= 0)) {
+ sync = div_u64_rem(nsec, idtfc3->ns_per_sync, &rem);
+ *sub_ns = rem;
+ } else {
+ sync = -div_u64_rem(-nsec - 1, idtfc3->ns_per_sync, &rem) - 1;
+ *sub_ns = idtfc3->ns_per_sync - rem - 1;
+ }
+
+ return sync * idtfc3->ns_per_sync;
+}
+
+static s64 tdc_meas2offset(struct idtfc3 *idtfc3, u64 meas_read)
+{
+ s64 coarse, fine;
+
+ fine = sign_extend64(FIELD_GET(FINE_MEAS_MASK, meas_read), 12);
+ coarse = sign_extend64(FIELD_GET(COARSE_MEAS_MASK, meas_read), (39 - 13));
+
+ fine = div64_s64(fine * NSEC_PER_SEC, idtfc3->tdc_apll_freq * 62LL);
+ coarse = div64_s64(coarse * NSEC_PER_SEC, idtfc3->time_ref_freq);
+
+ return coarse + fine;
+}
+
+static s64 tdc_offset2phase(struct idtfc3 *idtfc3, s64 offset_ns)
+{
+ if (offset_ns > idtfc3->ns_per_sync / 2)
+ offset_ns -= idtfc3->ns_per_sync;
+
+ return offset_ns * idtfc3->tdc_offset_sign;
+}
+
+static int idtfc3_set_lpf_mode(struct idtfc3 *idtfc3, u8 mode)
+{
+ int err;
+
+ if (mode >= LPF_INVALID)
+ return -EINVAL;
+
+ if (idtfc3->lpf_mode == mode)
+ return 0;
+
+ err = regmap_bulk_write(idtfc3->regmap, LPF_MODE_CNFG, &mode, sizeof(mode));
+ if (err)
+ return err;
+
+ idtfc3->lpf_mode = mode;
+
+ return 0;
+}
+
+static int idtfc3_enable_lpf(struct idtfc3 *idtfc3, bool enable)
+{
+ u8 val;
+ int err;
+
+ err = regmap_bulk_read(idtfc3->regmap, LPF_CTRL, &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (enable == true)
+ val |= LPF_EN;
+ else
+ val &= ~LPF_EN;
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_CTRL, &val, sizeof(val));
+}
+
+static int idtfc3_get_time_ref_freq(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 buf[4];
+ u8 time_ref_div;
+ u8 time_clk_div;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_MEAS_DIV_CNFG, buf, sizeof(buf));
+ if (err)
+ return err;
+ time_ref_div = FIELD_GET(TIME_REF_DIV_MASK, get_unaligned_le32(buf)) + 1;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_COUNT, buf, 1);
+ if (err)
+ return err;
+ time_clk_div = (buf[0] & TIME_CLOCK_COUNT_MASK) + 1;
+ idtfc3->time_ref_freq = idtfc3->hw_param.time_clk_freq *
+ time_clk_div / time_ref_div;
+
+ return 0;
+}
+
+static int idtfc3_get_tdc_offset_sign(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 buf[4];
+ u32 val;
+ u8 sig1, sig2;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_TDC_FANOUT_CNFG, buf, sizeof(buf));
+ if (err)
+ return err;
+
+ val = get_unaligned_le32(buf);
+ if ((val & TIME_SYNC_TO_TDC_EN) != TIME_SYNC_TO_TDC_EN) {
+ dev_err(idtfc3->dev, "TIME_SYNC_TO_TDC_EN is off !!!");
+ return -EINVAL;
+ }
+
+ sig1 = FIELD_GET(SIG1_MUX_SEL_MASK, val);
+ sig2 = FIELD_GET(SIG2_MUX_SEL_MASK, val);
+
+ if ((sig1 == sig2) || ((sig1 != TIME_SYNC) && (sig2 != TIME_SYNC))) {
+ dev_err(idtfc3->dev, "Invalid tdc_mux_sel sig1=%d sig2=%d", sig1, sig2);
+ return -EINVAL;
+ } else if (sig1 == TIME_SYNC) {
+ idtfc3->tdc_offset_sign = 1;
+ } else if (sig2 == TIME_SYNC) {
+ idtfc3->tdc_offset_sign = -1;
+ }
+
+ return 0;
+}
+
+static int idtfc3_lpf_bw(struct idtfc3 *idtfc3, u8 shift, u8 mult)
+{
+ u8 val = FIELD_PREP(LPF_BW_SHIFT, shift) | FIELD_PREP(LPF_BW_MULT, mult);
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_BW_CNFG, &val, sizeof(val));
+}
+
+static int idtfc3_enable_tdc(struct idtfc3 *idtfc3, bool enable, u8 meas_mode)
+{
+ int err;
+ u8 val = 0;
+
+ /* Disable TDC first */
+ err = regmap_bulk_write(idtfc3->regmap, TIME_CLOCK_MEAS_CTRL, &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (enable == false)
+ return idtfc3_lpf_bw(idtfc3, LPF_BW_SHIFT_DEFAULT, LPF_BW_MULT_DEFAULT);
+
+ if (meas_mode >= MEAS_MODE_INVALID)
+ return -EINVAL;
+
+ /* Change TDC meas mode */
+ err = regmap_bulk_write(idtfc3->regmap, TIME_CLOCK_MEAS_CNFG,
+ &meas_mode, sizeof(meas_mode));
+ if (err)
+ return err;
+
+ /* Enable TDC */
+ val = TDC_MEAS_EN;
+ if (meas_mode == CONTINUOUS)
+ val |= TDC_MEAS_START;
+ err = regmap_bulk_write(idtfc3->regmap, TIME_CLOCK_MEAS_CTRL, &val, sizeof(val));
+ if (err)
+ return err;
+
+ return idtfc3_lpf_bw(idtfc3, LPF_BW_SHIFT_1PPS, LPF_BW_MULT_DEFAULT);
+}
+
+static bool get_tdc_meas(struct idtfc3 *idtfc3, s64 *offset_ns)
+{
+ bool valid = false;
+ u8 buf[9];
+ u8 val;
+ int err;
+
+ while (true) {
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FIFO_STS,
+ &val, sizeof(val));
+ if (err)
+ return false;
+
+ if (val & FIFO_EMPTY)
+ break;
+
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FIFO_READ_REQ,
+ &buf, sizeof(buf));
+ if (err)
+ return false;
+
+ valid = true;
+ }
+
+ if (valid)
+ *offset_ns = tdc_meas2offset(idtfc3, get_unaligned_le64(&buf[1]));
+
+ return valid;
+}
+
+static int check_tdc_fifo_overrun(struct idtfc3 *idtfc3)
+{
+ u8 val;
+ int err;
+
+ /* Check if FIFO is overrun */
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FIFO_STS, &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (!(val & FIFO_FULL))
+ return 0;
+
+ dev_warn(idtfc3->dev, "TDC FIFO overrun !!!");
+
+ err = idtfc3_enable_tdc(idtfc3, true, CONTINUOUS);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int get_tdc_meas_continuous(struct idtfc3 *idtfc3)
+{
+ int err;
+ s64 offset_ns;
+ struct ptp_clock_event event;
+
+ err = check_tdc_fifo_overrun(idtfc3);
+ if (err)
+ return err;
+
+ if (get_tdc_meas(idtfc3, &offset_ns) && offset_ns >= 0) {
+ event.index = 0;
+ event.offset = tdc_offset2phase(idtfc3, offset_ns);
+ event.type = PTP_CLOCK_EXTOFF;
+ ptp_clock_event(idtfc3->ptp_clock, &event);
+ }
+
+ return 0;
+}
+
+static int idtfc3_read_subcounter(struct idtfc3 *idtfc3)
+{
+ u8 buf[5] = {0};
+ int err;
+
+ err = regmap_bulk_read(idtfc3->regmap, TOD_COUNTER_READ_REQ,
+ &buf, sizeof(buf));
+ if (err)
+ return err;
+
+ /* sync_counter_value is [31:82] and sub_sync_counter_value is [0:30] */
+ return get_unaligned_le32(&buf[1]) & SUB_SYNC_COUNTER_MASK;
+}
+
+static int idtfc3_tod_update_is_done(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 req;
+
+ err = read_poll_timeout_atomic(regmap_bulk_read, err, !req, USEC_PER_MSEC,
+ idtfc3->tc_write_timeout, true, idtfc3->regmap,
+ TOD_SYNC_LOAD_REQ_CTRL, &req, 1);
+ if (err)
+ dev_err(idtfc3->dev, "TOD counter write timeout !!!");
+
+ return err;
+}
+
+static int idtfc3_write_subcounter(struct idtfc3 *idtfc3, u32 counter)
+{
+ u8 buf[18] = {0};
+ int err;
+
+ /* sync_counter_value is [31:82] and sub_sync_counter_value is [0:30] */
+ put_unaligned_le32(counter & SUB_SYNC_COUNTER_MASK, &buf[0]);
+
+ buf[16] = SUB_SYNC_LOAD_ENABLE | SYNC_LOAD_ENABLE;
+ buf[17] = SYNC_LOAD_REQ;
+
+ err = regmap_bulk_write(idtfc3->regmap, TOD_SYNC_LOAD_VAL_CTRL,
+ &buf, sizeof(buf));
+ if (err)
+ return err;
+
+ return idtfc3_tod_update_is_done(idtfc3);
+}
+
+static int idtfc3_timecounter_update(struct idtfc3 *idtfc3, u32 counter, s64 ns)
+{
+ int err;
+
+ err = idtfc3_write_subcounter(idtfc3, counter);
+ if (err)
+ return err;
+
+ /* Update time counter */
+ idtfc3->ns = ns;
+ idtfc3->last_counter = counter;
+
+ return 0;
+}
+
+static int idtfc3_timecounter_read(struct idtfc3 *idtfc3)
+{
+ int now, delta;
+
+ now = idtfc3_read_subcounter(idtfc3);
+ if (now < 0)
+ return now;
+
+ /* calculate the delta since the last idtfc3_timecounter_read(): */
+ if (now >= idtfc3->last_counter)
+ delta = now - idtfc3->last_counter;
+ else
+ delta = idtfc3->sub_sync_count - idtfc3->last_counter + now;
+
+ /* Update time counter */
+ idtfc3->ns += delta * idtfc3->ns_per_counter;
+ idtfc3->last_counter = now;
+
+ return 0;
+}
+
+static int _idtfc3_gettime(struct idtfc3 *idtfc3, struct timespec64 *ts)
+{
+ int err;
+
+ err = idtfc3_timecounter_read(idtfc3);
+ if (err)
+ return err;
+
+ *ts = ns_to_timespec64(idtfc3->ns);
+
+ return 0;
+}
+
+static int idtfc3_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_gettime(idtfc3, ts);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_settime(struct idtfc3 *idtfc3, const struct timespec64 *ts)
+{
+ s64 offset_ns, now_ns;
+ u32 counter, sub_ns;
+ int now;
+
+ if (timespec64_valid(ts) == false) {
+ dev_err(idtfc3->dev, "%s: invalid timespec", __func__);
+ return -EINVAL;
+ }
+
+ now = idtfc3_read_subcounter(idtfc3);
+ if (now < 0)
+ return now;
+
+ offset_ns = (idtfc3->sub_sync_count - now) * idtfc3->ns_per_counter;
+ now_ns = timespec64_to_ns(ts);
+ (void)ns2counters(idtfc3, offset_ns + now_ns, &sub_ns);
+
+ counter = sub_ns / idtfc3->ns_per_counter;
+ return idtfc3_timecounter_update(idtfc3, counter, now_ns);
+}
+
+static int idtfc3_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_settime(idtfc3, ts);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_adjtime(struct idtfc3 *idtfc3, s64 delta)
+{
+ /*
+ * The TOD counter can be synchronously loaded with any value,
+ * to be loaded on the next Time Sync pulse
+ */
+ s64 sync_ns;
+ u32 sub_ns;
+ u32 counter;
+
+ if (idtfc3->ns + delta < 0) {
+ dev_err(idtfc3->dev, "%lld ns adj is too large", delta);
+ return -EINVAL;
+ }
+
+ sync_ns = ns2counters(idtfc3, delta + idtfc3->ns_per_sync, &sub_ns);
+
+ counter = sub_ns / idtfc3->ns_per_counter;
+ return idtfc3_timecounter_update(idtfc3, counter, idtfc3->ns + sync_ns +
+ counter * idtfc3->ns_per_counter);
+}
+
+static int idtfc3_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_adjtime(idtfc3, delta);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_adjphase(struct idtfc3 *idtfc3, s32 delta)
+{
+ u8 buf[8] = {0};
+ int err;
+ s64 pcw;
+
+ err = idtfc3_set_lpf_mode(idtfc3, LPF_WP);
+ if (err)
+ return err;
+
+ /*
+ * Phase Control Word unit is: 10^9 / (TDC_APLL_FREQ * 124)
+ *
+ * delta * TDC_APLL_FREQ * 124
+ * PCW = ---------------------------
+ * 10^9
+ *
+ */
+ pcw = div_s64((s64)delta * idtfc3->tdc_apll_freq * 124, NSEC_PER_SEC);
+
+ put_unaligned_le64(pcw, buf);
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_WR_PHASE_CTRL, buf, sizeof(buf));
+}
+
+static int idtfc3_adjphase(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_adjphase(idtfc3, delta);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_adjfine(struct idtfc3 *idtfc3, long scaled_ppm)
+{
+ u8 buf[8] = {0};
+ int err;
+ s64 fcw;
+
+ err = idtfc3_set_lpf_mode(idtfc3, LPF_WF);
+ if (err)
+ return err;
+
+ /*
+ * Frequency Control Word unit is: 2^-44 * 10^6 ppm
+ *
+ * adjfreq:
+ * ppb * 2^44
+ * FCW = ----------
+ * 10^9
+ *
+ * adjfine:
+ * ppm_16 * 2^28
+ * FCW = -------------
+ * 10^6
+ */
+ fcw = scaled_ppm * BIT(28);
+ fcw = div_s64(fcw, 1000000);
+
+ put_unaligned_le64(fcw, buf);
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_WR_FREQ_CTRL, buf, sizeof(buf));
+}
+
+static int idtfc3_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_adjfine(idtfc3, scaled_ppm);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int idtfc3_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(idtfc3->lock);
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (!on)
+ err = 0;
+ /* Only accept a 1-PPS aligned to the second. */
+ else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec)
+ err = -ERANGE;
+ else
+ err = 0;
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ if (on) {
+ /* Only accept requests for external phase offset */
+ if ((rq->extts.flags & PTP_EXT_OFFSET) != (PTP_EXT_OFFSET))
+ err = -EOPNOTSUPP;
+ else
+ err = idtfc3_enable_tdc(idtfc3, true, CONTINUOUS);
+ } else {
+ err = idtfc3_enable_tdc(idtfc3, false, MEAS_MODE_INVALID);
+ }
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(idtfc3->lock);
+
+ if (err)
+ dev_err(idtfc3->dev, "Failed in %s with err %d!", __func__, err);
+
+ return err;
+}
+
+static long idtfc3_aux_work(struct ptp_clock_info *ptp)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ static int tdc_get;
+
+ mutex_lock(idtfc3->lock);
+ tdc_get %= TDC_GET_PERIOD;
+ if ((tdc_get == 0) || (tdc_get == TDC_GET_PERIOD / 2))
+ idtfc3_timecounter_read(idtfc3);
+ get_tdc_meas_continuous(idtfc3);
+ tdc_get++;
+ mutex_unlock(idtfc3->lock);
+
+ return idtfc3->tc_update_period;
+}
+
+static const struct ptp_clock_info idtfc3_caps = {
+ .owner = THIS_MODULE,
+ .max_adj = MAX_FFO_PPB,
+ .n_per_out = 1,
+ .n_ext_ts = 1,
+ .adjphase = &idtfc3_adjphase,
+ .adjfine = &idtfc3_adjfine,
+ .adjtime = &idtfc3_adjtime,
+ .gettime64 = &idtfc3_gettime,
+ .settime64 = &idtfc3_settime,
+ .enable = &idtfc3_enable,
+ .do_aux_work = &idtfc3_aux_work,
+};
+
+static int idtfc3_hw_calibrate(struct idtfc3 *idtfc3)
+{
+ int err = 0;
+ u8 val;
+
+ mdelay(10);
+ /*
+ * Toggle TDC_DAC_RECAL_REQ:
+ * (1) set tdc_en to 1
+ * (2) set tdc_dac_recal_req to 0
+ * (3) set tdc_dac_recal_req to 1
+ */
+ val = TDC_EN;
+ err = regmap_bulk_write(idtfc3->regmap, TDC_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ val = TDC_EN | TDC_DAC_RECAL_REQ;
+ err = regmap_bulk_write(idtfc3->regmap, TDC_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ mdelay(10);
+
+ /*
+ * Toggle APLL_REINIT:
+ * (1) set apll_reinit to 0
+ * (2) set apll_reinit to 1
+ */
+ val = 0;
+ err = regmap_bulk_write(idtfc3->regmap, SOFT_RESET_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ val = APLL_REINIT;
+ err = regmap_bulk_write(idtfc3->regmap, SOFT_RESET_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ mdelay(10);
+
+ return err;
+}
+
+static int idtfc3_init_timecounter(struct idtfc3 *idtfc3)
+{
+ int err;
+ u32 period_ms;
+
+ period_ms = idtfc3->sub_sync_count * MSEC_PER_SEC /
+ idtfc3->hw_param.time_clk_freq;
+
+ idtfc3->tc_update_period = msecs_to_jiffies(period_ms / TDC_GET_PERIOD);
+ idtfc3->tc_write_timeout = period_ms * USEC_PER_MSEC;
+
+ err = idtfc3_timecounter_update(idtfc3, 0, 0);
+ if (err)
+ return err;
+
+ err = idtfc3_timecounter_read(idtfc3);
+ if (err)
+ return err;
+
+ ptp_schedule_worker(idtfc3->ptp_clock, idtfc3->tc_update_period);
+
+ return 0;
+}
+
+static int idtfc3_get_tdc_apll_freq(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 tdc_fb_div_int;
+ u8 tdc_ref_div;
+ struct idtfc3_hw_param *param = &idtfc3->hw_param;
+
+ err = regmap_bulk_read(idtfc3->regmap, TDC_REF_DIV_CNFG,
+ &tdc_ref_div, sizeof(tdc_ref_div));
+ if (err)
+ return err;
+
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FB_DIV_INT_CNFG,
+ &tdc_fb_div_int, sizeof(tdc_fb_div_int));
+ if (err)
+ return err;
+
+ tdc_fb_div_int &= TDC_FB_DIV_INT_MASK;
+ tdc_ref_div &= TDC_REF_DIV_CONFIG_MASK;
+
+ idtfc3->tdc_apll_freq = div_u64(param->xtal_freq * (u64)tdc_fb_div_int,
+ 1 << tdc_ref_div);
+
+ return 0;
+}
+
+static int idtfc3_get_fod(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 fod;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_SRC, &fod, sizeof(fod));
+ if (err)
+ return err;
+
+ switch (fod) {
+ case 0:
+ idtfc3->fod_n = FOD_0;
+ break;
+ case 1:
+ idtfc3->fod_n = FOD_1;
+ break;
+ case 2:
+ idtfc3->fod_n = FOD_2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int idtfc3_get_sync_count(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 buf[4];
+
+ err = regmap_bulk_read(idtfc3->regmap, SUB_SYNC_GEN_CNFG, buf, sizeof(buf));
+ if (err)
+ return err;
+
+ idtfc3->sub_sync_count = (get_unaligned_le32(buf) & SUB_SYNC_COUNTER_MASK) + 1;
+ idtfc3->ns_per_counter = NSEC_PER_SEC / idtfc3->hw_param.time_clk_freq;
+ idtfc3->ns_per_sync = idtfc3->sub_sync_count * idtfc3->ns_per_counter;
+
+ return 0;
+}
+
+static int idtfc3_setup_hw_param(struct idtfc3 *idtfc3)
+{
+ int err;
+
+ err = idtfc3_get_fod(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_get_sync_count(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_get_time_ref_freq(idtfc3);
+ if (err)
+ return err;
+
+ return idtfc3_get_tdc_apll_freq(idtfc3);
+}
+
+static int idtfc3_configure_hw(struct idtfc3 *idtfc3)
+{
+ int err = 0;
+
+ err = idtfc3_hw_calibrate(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_enable_lpf(idtfc3, true);
+ if (err)
+ return err;
+
+ err = idtfc3_enable_tdc(idtfc3, false, MEAS_MODE_INVALID);
+ if (err)
+ return err;
+
+ err = idtfc3_get_tdc_offset_sign(idtfc3);
+ if (err)
+ return err;
+
+ return idtfc3_setup_hw_param(idtfc3);
+}
+
+static int idtfc3_set_overhead(struct idtfc3 *idtfc3)
+{
+ s64 current_ns = 0;
+ s64 lowest_ns = 0;
+ int err;
+ u8 i;
+ ktime_t start;
+ ktime_t stop;
+ ktime_t diff;
+
+ char buf[18] = {0};
+
+ for (i = 0; i < 5; i++) {
+ start = ktime_get_raw();
+
+ err = regmap_bulk_write(idtfc3->regmap, TOD_SYNC_LOAD_VAL_CTRL,
+ &buf, sizeof(buf));
+ if (err)
+ return err;
+
+ stop = ktime_get_raw();
+
+ diff = ktime_sub(stop, start);
+
+ current_ns = ktime_to_ns(diff);
+
+ if (i == 0) {
+ lowest_ns = current_ns;
+ } else {
+ if (current_ns < lowest_ns)
+ lowest_ns = current_ns;
+ }
+ }
+
+ idtfc3->tod_write_overhead = lowest_ns;
+
+ return err;
+}
+
+static int idtfc3_enable_ptp(struct idtfc3 *idtfc3)
+{
+ int err;
+
+ idtfc3->caps = idtfc3_caps;
+ snprintf(idtfc3->caps.name, sizeof(idtfc3->caps.name), "IDT FC3W");
+ idtfc3->ptp_clock = ptp_clock_register(&idtfc3->caps, NULL);
+
+ if (IS_ERR(idtfc3->ptp_clock)) {
+ err = PTR_ERR(idtfc3->ptp_clock);
+ idtfc3->ptp_clock = NULL;
+ return err;
+ }
+
+ err = idtfc3_set_overhead(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_init_timecounter(idtfc3);
+ if (err)
+ return err;
+
+ dev_info(idtfc3->dev, "TIME_SYNC_CHANNEL registered as ptp%d",
+ idtfc3->ptp_clock->index);
+
+ return 0;
+}
+
+static int idtfc3_load_firmware(struct idtfc3 *idtfc3)
+{
+ char fname[128] = FW_FILENAME;
+ const struct firmware *fw;
+ struct idtfc3_fwrc *rec;
+ u16 addr;
+ u8 val;
+ int err;
+ s32 len;
+
+ idtfc3_default_hw_param(&idtfc3->hw_param);
+
+ if (firmware) /* module parameter */
+ snprintf(fname, sizeof(fname), "%s", firmware);
+
+ dev_info(idtfc3->dev, "requesting firmware '%s'\n", fname);
+
+ err = request_firmware(&fw, fname, idtfc3->dev);
+
+ if (err) {
+ dev_err(idtfc3->dev,
+ "requesting firmware failed with err %d!\n", err);
+ return err;
+ }
+
+ dev_dbg(idtfc3->dev, "firmware size %zu bytes\n", fw->size);
+
+ rec = (struct idtfc3_fwrc *)fw->data;
+
+ for (len = fw->size; len > 0; len -= sizeof(*rec)) {
+ if (rec->reserved) {
+ dev_err(idtfc3->dev,
+ "bad firmware, reserved field non-zero\n");
+ err = -EINVAL;
+ } else {
+ val = rec->value;
+ addr = rec->hiaddr << 8 | rec->loaddr;
+
+ rec++;
+
+ err = idtfc3_set_hw_param(&idtfc3->hw_param, addr, val);
+ }
+
+ if (err != -EINVAL) {
+ err = 0;
+
+ /* Max register */
+ if (addr >= 0xE88)
+ continue;
+
+ err = regmap_bulk_write(idtfc3->regmap, addr,
+ &val, sizeof(val));
+ }
+
+ if (err)
+ goto out;
+ }
+
+ err = idtfc3_configure_hw(idtfc3);
+out:
+ release_firmware(fw);
+ return err;
+}
+
+static int idtfc3_read_device_id(struct idtfc3 *idtfc3, u16 *device_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = regmap_bulk_read(idtfc3->regmap, DEVICE_ID,
+ &buf, sizeof(buf));
+ if (err) {
+ dev_err(idtfc3->dev, "%s failed with %d", __func__, err);
+ return err;
+ }
+
+ *device_id = get_unaligned_le16(buf);
+
+ return 0;
+}
+
+static int idtfc3_check_device_compatibility(struct idtfc3 *idtfc3)
+{
+ int err;
+ u16 device_id;
+
+ err = idtfc3_read_device_id(idtfc3, &device_id);
+ if (err)
+ return err;
+
+ if ((device_id & DEVICE_ID_MASK) == 0) {
+ dev_err(idtfc3->dev, "invalid device");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int idtfc3_probe(struct platform_device *pdev)
+{
+ struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct idtfc3 *idtfc3;
+ int err;
+
+ idtfc3 = devm_kzalloc(&pdev->dev, sizeof(struct idtfc3), GFP_KERNEL);
+
+ if (!idtfc3)
+ return -ENOMEM;
+
+ idtfc3->dev = &pdev->dev;
+ idtfc3->mfd = pdev->dev.parent;
+ idtfc3->lock = &ddata->lock;
+ idtfc3->regmap = ddata->regmap;
+
+ mutex_lock(idtfc3->lock);
+
+ err = idtfc3_check_device_compatibility(idtfc3);
+ if (err) {
+ mutex_unlock(idtfc3->lock);
+ return err;
+ }
+
+ err = idtfc3_load_firmware(idtfc3);
+ if (err) {
+ if (err == -ENOENT) {
+ mutex_unlock(idtfc3->lock);
+ return -EPROBE_DEFER;
+ }
+ dev_warn(idtfc3->dev, "loading firmware failed with %d", err);
+ }
+
+ err = idtfc3_enable_ptp(idtfc3);
+ if (err) {
+ dev_err(idtfc3->dev, "idtfc3_enable_ptp failed with %d", err);
+ mutex_unlock(idtfc3->lock);
+ return err;
+ }
+
+ mutex_unlock(idtfc3->lock);
+
+ if (err) {
+ ptp_clock_unregister(idtfc3->ptp_clock);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, idtfc3);
+
+ return 0;
+}
+
+static void idtfc3_remove(struct platform_device *pdev)
+{
+ struct idtfc3 *idtfc3 = platform_get_drvdata(pdev);
+
+ ptp_clock_unregister(idtfc3->ptp_clock);
+}
+
+static struct platform_driver idtfc3_driver = {
+ .driver = {
+ .name = "rc38xxx-phc",
+ },
+ .probe = idtfc3_probe,
+ .remove_new = idtfc3_remove,
+};
+
+module_platform_driver(idtfc3_driver);
diff --git a/drivers/ptp/ptp_fc3.h b/drivers/ptp/ptp_fc3.h
new file mode 100644
index 000000000000..897101579207
--- /dev/null
+++ b/drivers/ptp/ptp_fc3.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PTP hardware clock driver for the FemtoClock3 family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2023 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef PTP_IDTFC3_H
+#define PTP_IDTFC3_H
+
+#include <linux/ktime.h>
+#include <linux/ptp_clock.h>
+#include <linux/regmap.h>
+
+#define FW_FILENAME "idtfc3.bin"
+
+#define MAX_FFO_PPB (244000)
+#define TDC_GET_PERIOD (10)
+
+struct idtfc3 {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ struct device *dev;
+ /* Mutex to protect operations from being interrupted */
+ struct mutex *lock;
+ struct device *mfd;
+ struct regmap *regmap;
+ struct idtfc3_hw_param hw_param;
+ u32 sub_sync_count;
+ u32 ns_per_sync;
+ int tdc_offset_sign;
+ u64 tdc_apll_freq;
+ u32 time_ref_freq;
+ u16 fod_n;
+ u8 lpf_mode;
+ /* Time counter */
+ u32 last_counter;
+ s64 ns;
+ u32 ns_per_counter;
+ u32 tc_update_period;
+ u32 tc_write_timeout;
+ s64 tod_write_overhead;
+};
+
+#endif /* PTP_IDTFC3_H */
diff --git a/drivers/ptp/ptp_kvm_common.c b/drivers/ptp/ptp_kvm_common.c
index 2418977989be..15ccb7dd2ed0 100644
--- a/drivers/ptp/ptp_kvm_common.c
+++ b/drivers/ptp/ptp_kvm_common.c
@@ -28,15 +28,15 @@ static int ptp_kvm_get_time_fn(ktime_t *device_time,
struct system_counterval_t *system_counter,
void *ctx)
{
- long ret;
- u64 cycle;
+ enum clocksource_ids cs_id;
struct timespec64 tspec;
- struct clocksource *cs;
+ u64 cycle;
+ int ret;
spin_lock(&kvm_ptp_lock);
preempt_disable_notrace();
- ret = kvm_arch_ptp_get_crosststamp(&cycle, &tspec, &cs);
+ ret = kvm_arch_ptp_get_crosststamp(&cycle, &tspec, &cs_id);
if (ret) {
spin_unlock(&kvm_ptp_lock);
preempt_enable_notrace();
@@ -46,7 +46,7 @@ static int ptp_kvm_get_time_fn(ktime_t *device_time,
preempt_enable_notrace();
system_counter->cycles = cycle;
- system_counter->cs = cs;
+ system_counter->cs_id = cs_id;
*device_time = timespec64_to_ktime(tspec);
diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c
index 902844cc1a17..617c8d6706d3 100644
--- a/drivers/ptp/ptp_kvm_x86.c
+++ b/drivers/ptp/ptp_kvm_x86.c
@@ -93,7 +93,7 @@ int kvm_arch_ptp_get_clock(struct timespec64 *ts)
}
int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
- struct clocksource **cs)
+ enum clocksource_ids *cs_id)
{
struct pvclock_vcpu_time_info *src;
unsigned int version;
@@ -123,7 +123,7 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
*cycle = __pvclock_read_cycles(src, clock_pair->tsc);
} while (pvclock_read_retry(src, version));
- *cs = &kvm_clock;
+ *cs_id = CSID_X86_KVM_CLK;
return 0;
}
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 5f858e426bbd..6506cfb89aa9 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -34,6 +34,9 @@
#define PCI_VENDOR_ID_OROLIA 0x1ad7
#define PCI_DEVICE_ID_OROLIA_ARTCARD 0xa000
+#define PCI_VENDOR_ID_ADVA 0xad5a
+#define PCI_DEVICE_ID_ADVA_TIMECARD 0x0400
+
static struct class timecard_class = {
.name = "timecard",
};
@@ -63,6 +66,13 @@ struct ocp_reg {
u32 status_drift;
};
+struct ptp_ocp_servo_conf {
+ u32 servo_offset_p;
+ u32 servo_offset_i;
+ u32 servo_drift_p;
+ u32 servo_drift_i;
+};
+
#define OCP_CTRL_ENABLE BIT(0)
#define OCP_CTRL_ADJUST_TIME BIT(1)
#define OCP_CTRL_ADJUST_OFFSET BIT(2)
@@ -397,10 +407,14 @@ static int ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr);
static int ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
+static int ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
+
static const struct ocp_attr_group fb_timecard_groups[];
static const struct ocp_attr_group art_timecard_groups[];
+static const struct ocp_attr_group adva_timecard_groups[];
+
struct ptp_ocp_eeprom_map {
u16 off;
u16 len;
@@ -700,6 +714,12 @@ static struct ocp_resource ocp_fb_resource[] = {
},
{
.setup = ptp_ocp_fb_board_init,
+ .extra = &(struct ptp_ocp_servo_conf) {
+ .servo_offset_p = 0x2000,
+ .servo_offset_i = 0x1000,
+ .servo_drift_p = 0,
+ .servo_drift_i = 0,
+ },
},
{ }
};
@@ -831,6 +851,170 @@ static struct ocp_resource ocp_art_resource[] = {
},
{
.setup = ptp_ocp_art_board_init,
+ .extra = &(struct ptp_ocp_servo_conf) {
+ .servo_offset_p = 0x2000,
+ .servo_offset_i = 0x1000,
+ .servo_drift_p = 0,
+ .servo_drift_i = 0,
+ },
+ },
+ { }
+};
+
+static struct ocp_resource ocp_adva_resource[] = {
+ {
+ OCP_MEM_RESOURCE(reg),
+ .offset = 0x01000000, .size = 0x10000,
+ },
+ {
+ OCP_EXT_RESOURCE(ts0),
+ .offset = 0x01010000, .size = 0x10000, .irq_vec = 1,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 0,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts1),
+ .offset = 0x01020000, .size = 0x10000, .irq_vec = 2,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 1,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts2),
+ .offset = 0x01060000, .size = 0x10000, .irq_vec = 6,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 2,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ /* Timestamp for PHC and/or PPS generator */
+ {
+ OCP_EXT_RESOURCE(pps),
+ .offset = 0x010C0000, .size = 0x10000, .irq_vec = 0,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 5,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(signal_out[0]),
+ .offset = 0x010D0000, .size = 0x10000, .irq_vec = 11,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 1,
+ .irq_fcn = ptp_ocp_signal_irq,
+ .enable = ptp_ocp_signal_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(signal_out[1]),
+ .offset = 0x010E0000, .size = 0x10000, .irq_vec = 12,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 2,
+ .irq_fcn = ptp_ocp_signal_irq,
+ .enable = ptp_ocp_signal_enable,
+ },
+ },
+ {
+ OCP_MEM_RESOURCE(pps_to_ext),
+ .offset = 0x01030000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(pps_to_clk),
+ .offset = 0x01040000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(tod),
+ .offset = 0x01050000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(image),
+ .offset = 0x00020000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(pps_select),
+ .offset = 0x00130000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(sma_map1),
+ .offset = 0x00140000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(sma_map2),
+ .offset = 0x00220000, .size = 0x1000,
+ },
+ {
+ OCP_SERIAL_RESOURCE(gnss_port),
+ .offset = 0x00160000 + 0x1000, .irq_vec = 3,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 9600,
+ },
+ },
+ {
+ OCP_SERIAL_RESOURCE(mac_port),
+ .offset = 0x00180000 + 0x1000, .irq_vec = 5,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 115200,
+ },
+ },
+ {
+ OCP_MEM_RESOURCE(freq_in[0]),
+ .offset = 0x01200000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(freq_in[1]),
+ .offset = 0x01210000, .size = 0x10000,
+ },
+ {
+ OCP_SPI_RESOURCE(spi_flash),
+ .offset = 0x00310400, .size = 0x10000, .irq_vec = 9,
+ .extra = &(struct ptp_ocp_flash_info) {
+ .name = "spi_altera", .pci_offset = 0,
+ .data_size = sizeof(struct altera_spi_platform_data),
+ .data = &(struct altera_spi_platform_data) {
+ .num_chipselect = 1,
+ .num_devices = 1,
+ .devices = &(struct spi_board_info) {
+ .modalias = "spi-nor",
+ },
+ },
+ },
+ },
+ {
+ OCP_I2C_RESOURCE(i2c_ctrl),
+ .offset = 0x150000, .size = 0x100, .irq_vec = 7,
+ .extra = &(struct ptp_ocp_i2c_info) {
+ .name = "ocores-i2c",
+ .fixed_rate = 50000000,
+ .data_size = sizeof(struct ocores_i2c_platform_data),
+ .data = &(struct ocores_i2c_platform_data) {
+ .clock_khz = 50000,
+ .bus_khz = 100,
+ .reg_io_width = 4, // 32-bit/4-byte
+ .reg_shift = 2, // 32-bit addressing
+ .num_devices = 2,
+ .devices = (struct i2c_board_info[]) {
+ { I2C_BOARD_INFO("24c02", 0x50) },
+ { I2C_BOARD_INFO("24mac402", 0x58),
+ .platform_data = "mac" },
+ },
+ },
+ },
+ },
+ {
+ .setup = ptp_ocp_adva_board_init,
+ .extra = &(struct ptp_ocp_servo_conf) {
+ .servo_offset_p = 0xc000,
+ .servo_offset_i = 0x1000,
+ .servo_drift_p = 0,
+ .servo_drift_i = 0,
+ },
},
{ }
};
@@ -839,6 +1023,7 @@ static const struct pci_device_id ptp_ocp_pcidev_id[] = {
{ PCI_DEVICE_DATA(FACEBOOK, TIMECARD, &ocp_fb_resource) },
{ PCI_DEVICE_DATA(CELESTICA, TIMECARD, &ocp_fb_resource) },
{ PCI_DEVICE_DATA(OROLIA, ARTCARD, &ocp_art_resource) },
+ { PCI_DEVICE_DATA(ADVA, TIMECARD, &ocp_adva_resource) },
{ }
};
MODULE_DEVICE_TABLE(pci, ptp_ocp_pcidev_id);
@@ -917,6 +1102,30 @@ static const struct ocp_selector ptp_ocp_art_sma_out[] = {
{ }
};
+static const struct ocp_selector ptp_ocp_adva_sma_in[] = {
+ { .name = "10Mhz", .value = 0x0000, .frequency = 10000000},
+ { .name = "PPS1", .value = 0x0001, .frequency = 1 },
+ { .name = "PPS2", .value = 0x0002, .frequency = 1 },
+ { .name = "TS1", .value = 0x0004, .frequency = 0 },
+ { .name = "TS2", .value = 0x0008, .frequency = 0 },
+ { .name = "FREQ1", .value = 0x0100, .frequency = 0 },
+ { .name = "FREQ2", .value = 0x0200, .frequency = 0 },
+ { .name = "None", .value = SMA_DISABLE, .frequency = 0 },
+ { }
+};
+
+static const struct ocp_selector ptp_ocp_adva_sma_out[] = {
+ { .name = "10Mhz", .value = 0x0000, .frequency = 10000000},
+ { .name = "PHC", .value = 0x0001, .frequency = 1 },
+ { .name = "MAC", .value = 0x0002, .frequency = 1 },
+ { .name = "GNSS1", .value = 0x0004, .frequency = 1 },
+ { .name = "GEN1", .value = 0x0040 },
+ { .name = "GEN2", .value = 0x0080 },
+ { .name = "GND", .value = 0x2000 },
+ { .name = "VCC", .value = 0x4000 },
+ { }
+};
+
struct ocp_sma_op {
const struct ocp_selector *tbl[2];
void (*init)(struct ptp_ocp *bp);
@@ -1363,7 +1572,7 @@ ptp_ocp_estimate_pci_timing(struct ptp_ocp *bp)
}
static int
-ptp_ocp_init_clock(struct ptp_ocp *bp)
+ptp_ocp_init_clock(struct ptp_ocp *bp, struct ptp_ocp_servo_conf *servo_conf)
{
struct timespec64 ts;
u32 ctrl;
@@ -1371,12 +1580,11 @@ ptp_ocp_init_clock(struct ptp_ocp *bp)
ctrl = OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
- /* NO DRIFT Correction */
- /* offset_p:i 1/8, offset_i: 1/16, drift_p: 0, drift_i: 0 */
- iowrite32(0x2000, &bp->reg->servo_offset_p);
- iowrite32(0x1000, &bp->reg->servo_offset_i);
- iowrite32(0, &bp->reg->servo_drift_p);
- iowrite32(0, &bp->reg->servo_drift_i);
+ /* servo configuration */
+ iowrite32(servo_conf->servo_offset_p, &bp->reg->servo_offset_p);
+ iowrite32(servo_conf->servo_offset_i, &bp->reg->servo_offset_i);
+ iowrite32(servo_conf->servo_drift_p, &bp->reg->servo_drift_p);
+ iowrite32(servo_conf->servo_drift_p, &bp->reg->servo_drift_i);
/* latch servo values */
ctrl |= OCP_CTRL_ADJUST_SERVO;
@@ -2348,6 +2556,14 @@ static const struct ocp_sma_op ocp_fb_sma_op = {
.set_output = ptp_ocp_sma_fb_set_output,
};
+static const struct ocp_sma_op ocp_adva_sma_op = {
+ .tbl = { ptp_ocp_adva_sma_in, ptp_ocp_adva_sma_out },
+ .init = ptp_ocp_sma_fb_init,
+ .get = ptp_ocp_sma_fb_get,
+ .set_inputs = ptp_ocp_sma_fb_set_inputs,
+ .set_output = ptp_ocp_sma_fb_set_output,
+};
+
static int
ptp_ocp_set_pins(struct ptp_ocp *bp)
{
@@ -2427,7 +2643,7 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
return err;
ptp_ocp_sma_init(bp);
- return ptp_ocp_init_clock(bp);
+ return ptp_ocp_init_clock(bp, r->extra);
}
static bool
@@ -2589,7 +2805,44 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
if (err)
return err;
- return ptp_ocp_init_clock(bp);
+ return ptp_ocp_init_clock(bp, r->extra);
+}
+
+/* ADVA specific board initializers; last "resource" registered. */
+static int
+ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ int err;
+ u32 version;
+
+ bp->flash_start = 0xA00000;
+ bp->eeprom_map = fb_eeprom_map;
+ bp->sma_op = &ocp_adva_sma_op;
+
+ version = ioread32(&bp->image->version);
+ /* if lower 16 bits are empty, this is the fw loader. */
+ if ((version & 0xffff) == 0) {
+ version = version >> 16;
+ bp->fw_loader = true;
+ }
+ bp->fw_tag = 3;
+ bp->fw_version = version & 0xffff;
+ bp->fw_cap = OCP_CAP_BASIC | OCP_CAP_SIGNAL | OCP_CAP_FREQ;
+
+ ptp_ocp_tod_init(bp);
+ ptp_ocp_nmea_out_init(bp);
+ ptp_ocp_signal_init(bp);
+
+ err = ptp_ocp_attr_group_add(bp, adva_timecard_groups);
+ if (err)
+ return err;
+
+ err = ptp_ocp_set_pins(bp);
+ if (err)
+ return err;
+ ptp_ocp_sma_init(bp);
+
+ return ptp_ocp_init_clock(bp, r->extra);
}
static ssize_t
@@ -3564,6 +3817,37 @@ static const struct ocp_attr_group art_timecard_groups[] = {
{ },
};
+static struct attribute *adva_timecard_attrs[] = {
+ &dev_attr_serialnum.attr,
+ &dev_attr_gnss_sync.attr,
+ &dev_attr_clock_source.attr,
+ &dev_attr_available_clock_sources.attr,
+ &dev_attr_sma1.attr,
+ &dev_attr_sma2.attr,
+ &dev_attr_sma3.attr,
+ &dev_attr_sma4.attr,
+ &dev_attr_available_sma_inputs.attr,
+ &dev_attr_available_sma_outputs.attr,
+ &dev_attr_clock_status_drift.attr,
+ &dev_attr_clock_status_offset.attr,
+ &dev_attr_ts_window_adjust.attr,
+ &dev_attr_tod_correction.attr,
+ NULL,
+};
+
+static const struct attribute_group adva_timecard_group = {
+ .attrs = adva_timecard_attrs,
+};
+
+static const struct ocp_attr_group adva_timecard_groups[] = {
+ { .cap = OCP_CAP_BASIC, .group = &adva_timecard_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal0_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal1_group },
+ { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq0_group },
+ { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq1_group },
+ { },
+};
+
static void
gpio_input_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit,
const char *def)
@@ -4209,10 +4493,11 @@ ptp_ocp_detach(struct ptp_ocp *bp)
device_unregister(&bp->dev);
}
-static int ptp_ocp_dpll_lock_status_get(const struct dpll_device *dpll,
- void *priv,
- enum dpll_lock_status *status,
- struct netlink_ext_ack *extack)
+static int
+ptp_ocp_dpll_lock_status_get(const struct dpll_device *dpll, void *priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack)
{
struct ptp_ocp *bp = priv;
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 45f9002a5dca..18934e28469e 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -120,7 +120,7 @@ static inline bool ptp_clock_freerun(struct ptp_clock *ptp)
return ptp_vclock_in_use(ptp);
}
-extern struct class *ptp_class;
+extern const struct class ptp_class;
/*
* see ptp_chardev.c
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index f7a499a1bd39..a15460aaa03b 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -24,8 +24,7 @@ static ssize_t max_phase_adjustment_show(struct device *dev,
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
- return snprintf(page, PAGE_SIZE - 1, "%d\n",
- ptp->info->getmaxphase(ptp->info));
+ return sysfs_emit(page, "%d\n", ptp->info->getmaxphase(ptp->info));
}
static DEVICE_ATTR_RO(max_phase_adjustment);
@@ -34,7 +33,7 @@ static ssize_t var##_show(struct device *dev, \
struct device_attribute *attr, char *page) \
{ \
struct ptp_clock *ptp = dev_get_drvdata(dev); \
- return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->var); \
+ return sysfs_emit(page, "%d\n", ptp->info->var); \
} \
static DEVICE_ATTR(name, 0444, var##_show, NULL);
@@ -102,8 +101,8 @@ static ssize_t extts_fifo_show(struct device *dev,
if (!qcnt)
goto out;
- cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n",
- event.index, event.t.sec, event.t.nsec);
+ cnt = sysfs_emit(page, "%u %lld %u\n",
+ event.index, event.t.sec, event.t.nsec);
out:
return cnt;
}
@@ -194,7 +193,7 @@ static ssize_t n_vclocks_show(struct device *dev,
if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
return -ERESTARTSYS;
- size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->n_vclocks);
+ size = sysfs_emit(page, "%u\n", ptp->n_vclocks);
mutex_unlock(&ptp->n_vclocks_mux);
@@ -270,7 +269,7 @@ static ssize_t max_vclocks_show(struct device *dev,
struct ptp_clock *ptp = dev_get_drvdata(dev);
ssize_t size;
- size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->max_vclocks);
+ size = sysfs_emit(page, "%u\n", ptp->max_vclocks);
return size;
}
diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
index dcf752c9e045..7febfdcbde8b 100644
--- a/drivers/ptp/ptp_vclock.c
+++ b/drivers/ptp/ptp_vclock.c
@@ -241,7 +241,7 @@ int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
return num;
snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index);
- dev = class_find_device_by_name(ptp_class, name);
+ dev = class_find_device_by_name(&ptp_class, name);
if (!dev)
return num;
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index f2728ee787d7..d70f793ce4b3 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -24,310 +24,11 @@
#define CREATE_TRACE_POINTS
#include <trace/events/pwm.h>
-static DEFINE_MUTEX(pwm_lookup_lock);
-static LIST_HEAD(pwm_lookup_list);
-
/* protects access to pwm_chips */
static DEFINE_MUTEX(pwm_lock);
static DEFINE_IDR(pwm_chips);
-static struct pwm_chip *pwmchip_find_by_name(const char *name)
-{
- struct pwm_chip *chip;
- unsigned long id, tmp;
-
- if (!name)
- return NULL;
-
- mutex_lock(&pwm_lock);
-
- idr_for_each_entry_ul(&pwm_chips, chip, tmp, id) {
- const char *chip_name = dev_name(chip->dev);
-
- if (chip_name && strcmp(chip_name, name) == 0) {
- mutex_unlock(&pwm_lock);
- return chip;
- }
- }
-
- mutex_unlock(&pwm_lock);
-
- return NULL;
-}
-
-static int pwm_device_request(struct pwm_device *pwm, const char *label)
-{
- int err;
- struct pwm_chip *chip = pwm->chip;
- const struct pwm_ops *ops = chip->ops;
-
- if (test_bit(PWMF_REQUESTED, &pwm->flags))
- return -EBUSY;
-
- if (!try_module_get(chip->owner))
- return -ENODEV;
-
- if (ops->request) {
- err = ops->request(chip, pwm);
- if (err) {
- module_put(chip->owner);
- return err;
- }
- }
-
- if (ops->get_state) {
- /*
- * Zero-initialize state because most drivers are unaware of
- * .usage_power. The other members of state are supposed to be
- * set by lowlevel drivers. We still initialize the whole
- * structure for simplicity even though this might paper over
- * faulty implementations of .get_state().
- */
- struct pwm_state state = { 0, };
-
- err = ops->get_state(chip, pwm, &state);
- trace_pwm_get(pwm, &state, err);
-
- if (!err)
- pwm->state = state;
-
- if (IS_ENABLED(CONFIG_PWM_DEBUG))
- pwm->last = pwm->state;
- }
-
- set_bit(PWMF_REQUESTED, &pwm->flags);
- pwm->label = label;
-
- return 0;
-}
-
-struct pwm_device *
-of_pwm_xlate_with_flags(struct pwm_chip *chip, const struct of_phandle_args *args)
-{
- struct pwm_device *pwm;
-
- if (chip->of_pwm_n_cells < 2)
- return ERR_PTR(-EINVAL);
-
- /* flags in the third cell are optional */
- if (args->args_count < 2)
- return ERR_PTR(-EINVAL);
-
- if (args->args[0] >= chip->npwm)
- return ERR_PTR(-EINVAL);
-
- pwm = pwm_request_from_chip(chip, args->args[0], NULL);
- if (IS_ERR(pwm))
- return pwm;
-
- pwm->args.period = args->args[1];
- pwm->args.polarity = PWM_POLARITY_NORMAL;
-
- if (chip->of_pwm_n_cells >= 3) {
- if (args->args_count > 2 && args->args[2] & PWM_POLARITY_INVERTED)
- pwm->args.polarity = PWM_POLARITY_INVERSED;
- }
-
- return pwm;
-}
-EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags);
-
-struct pwm_device *
-of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
-{
- struct pwm_device *pwm;
-
- if (chip->of_pwm_n_cells < 1)
- return ERR_PTR(-EINVAL);
-
- /* validate that one cell is specified, optionally with flags */
- if (args->args_count != 1 && args->args_count != 2)
- return ERR_PTR(-EINVAL);
-
- pwm = pwm_request_from_chip(chip, 0, NULL);
- if (IS_ERR(pwm))
- return pwm;
-
- pwm->args.period = args->args[0];
- pwm->args.polarity = PWM_POLARITY_NORMAL;
-
- if (args->args_count == 2 && args->args[1] & PWM_POLARITY_INVERTED)
- pwm->args.polarity = PWM_POLARITY_INVERSED;
-
- return pwm;
-}
-EXPORT_SYMBOL_GPL(of_pwm_single_xlate);
-
-static void of_pwmchip_add(struct pwm_chip *chip)
-{
- if (!chip->dev || !chip->dev->of_node)
- return;
-
- if (!chip->of_xlate) {
- u32 pwm_cells;
-
- if (of_property_read_u32(chip->dev->of_node, "#pwm-cells",
- &pwm_cells))
- pwm_cells = 2;
-
- chip->of_xlate = of_pwm_xlate_with_flags;
- chip->of_pwm_n_cells = pwm_cells;
- }
-
- of_node_get(chip->dev->of_node);
-}
-
-static void of_pwmchip_remove(struct pwm_chip *chip)
-{
- if (chip->dev)
- of_node_put(chip->dev->of_node);
-}
-
-static bool pwm_ops_check(const struct pwm_chip *chip)
-{
- const struct pwm_ops *ops = chip->ops;
-
- if (!ops->apply)
- return false;
-
- if (IS_ENABLED(CONFIG_PWM_DEBUG) && !ops->get_state)
- dev_warn(chip->dev,
- "Please implement the .get_state() callback\n");
-
- return true;
-}
-
-/**
- * __pwmchip_add() - register a new PWM chip
- * @chip: the PWM chip to add
- * @owner: reference to the module providing the chip.
- *
- * Register a new PWM chip. @owner is supposed to be THIS_MODULE, use the
- * pwmchip_add wrapper to do this right.
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-int __pwmchip_add(struct pwm_chip *chip, struct module *owner)
-{
- unsigned int i;
- int ret;
-
- if (!chip || !chip->dev || !chip->ops || !chip->npwm)
- return -EINVAL;
-
- if (!pwm_ops_check(chip))
- return -EINVAL;
-
- chip->owner = owner;
-
- chip->pwms = kcalloc(chip->npwm, sizeof(*chip->pwms), GFP_KERNEL);
- if (!chip->pwms)
- return -ENOMEM;
-
- mutex_lock(&pwm_lock);
-
- ret = idr_alloc(&pwm_chips, chip, 0, 0, GFP_KERNEL);
- if (ret < 0) {
- mutex_unlock(&pwm_lock);
- kfree(chip->pwms);
- return ret;
- }
-
- chip->id = ret;
-
- for (i = 0; i < chip->npwm; i++) {
- struct pwm_device *pwm = &chip->pwms[i];
-
- pwm->chip = chip;
- pwm->hwpwm = i;
- }
-
- mutex_unlock(&pwm_lock);
-
- if (IS_ENABLED(CONFIG_OF))
- of_pwmchip_add(chip);
-
- pwmchip_sysfs_export(chip);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(__pwmchip_add);
-
-/**
- * pwmchip_remove() - remove a PWM chip
- * @chip: the PWM chip to remove
- *
- * Removes a PWM chip.
- */
-void pwmchip_remove(struct pwm_chip *chip)
-{
- pwmchip_sysfs_unexport(chip);
-
- if (IS_ENABLED(CONFIG_OF))
- of_pwmchip_remove(chip);
-
- mutex_lock(&pwm_lock);
-
- idr_remove(&pwm_chips, chip->id);
-
- mutex_unlock(&pwm_lock);
-
- kfree(chip->pwms);
-}
-EXPORT_SYMBOL_GPL(pwmchip_remove);
-
-static void devm_pwmchip_remove(void *data)
-{
- struct pwm_chip *chip = data;
-
- pwmchip_remove(chip);
-}
-
-int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner)
-{
- int ret;
-
- ret = __pwmchip_add(chip, owner);
- if (ret)
- return ret;
-
- return devm_add_action_or_reset(dev, devm_pwmchip_remove, chip);
-}
-EXPORT_SYMBOL_GPL(__devm_pwmchip_add);
-
-/**
- * pwm_request_from_chip() - request a PWM device relative to a PWM chip
- * @chip: PWM chip
- * @index: per-chip index of the PWM to request
- * @label: a literal description string of this PWM
- *
- * Returns: A pointer to the PWM device at the given index of the given PWM
- * chip. A negative error code is returned if the index is not valid for the
- * specified PWM chip or if the PWM device cannot be requested.
- */
-struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
- unsigned int index,
- const char *label)
-{
- struct pwm_device *pwm;
- int err;
-
- if (!chip || index >= chip->npwm)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&pwm_lock);
- pwm = &chip->pwms[index];
-
- err = pwm_device_request(pwm, label);
- if (err < 0)
- pwm = ERR_PTR(err);
-
- mutex_unlock(&pwm_lock);
- return pwm;
-}
-EXPORT_SYMBOL_GPL(pwm_request_from_chip);
-
static void pwm_apply_debug(struct pwm_device *pwm,
const struct pwm_state *state)
{
@@ -370,18 +71,18 @@ static void pwm_apply_debug(struct pwm_device *pwm,
if (s2.polarity != state->polarity &&
state->duty_cycle < state->period)
- dev_warn(chip->dev, ".apply ignored .polarity\n");
+ dev_warn(pwmchip_parent(chip), ".apply ignored .polarity\n");
if (state->enabled &&
last->polarity == state->polarity &&
last->period > s2.period &&
last->period <= state->period)
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
".apply didn't pick the best available period (requested: %llu, applied: %llu, possible: %llu)\n",
state->period, s2.period, last->period);
if (state->enabled && state->period < s2.period)
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
".apply is supposed to round down period (requested: %llu, applied: %llu)\n",
state->period, s2.period);
@@ -390,20 +91,20 @@ static void pwm_apply_debug(struct pwm_device *pwm,
last->period == s2.period &&
last->duty_cycle > s2.duty_cycle &&
last->duty_cycle <= state->duty_cycle)
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
".apply didn't pick the best available duty cycle (requested: %llu/%llu, applied: %llu/%llu, possible: %llu/%llu)\n",
state->duty_cycle, state->period,
s2.duty_cycle, s2.period,
last->duty_cycle, last->period);
if (state->enabled && state->duty_cycle < s2.duty_cycle)
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
".apply is supposed to round down duty_cycle (requested: %llu/%llu, applied: %llu/%llu)\n",
state->duty_cycle, state->period,
s2.duty_cycle, s2.period);
if (!state->enabled && s2.enabled && s2.duty_cycle > 0)
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
"requested disabled, but yielded enabled with duty > 0\n");
/* reapply the state that the driver reported being configured. */
@@ -411,7 +112,7 @@ static void pwm_apply_debug(struct pwm_device *pwm,
trace_pwm_apply(pwm, &s1, err);
if (err) {
*last = s1;
- dev_err(chip->dev, "failed to reapply current setting\n");
+ dev_err(pwmchip_parent(chip), "failed to reapply current setting\n");
return;
}
@@ -426,7 +127,7 @@ static void pwm_apply_debug(struct pwm_device *pwm,
s1.polarity != last->polarity ||
(s1.enabled && s1.period != last->period) ||
(s1.enabled && s1.duty_cycle != last->duty_cycle)) {
- dev_err(chip->dev,
+ dev_err(pwmchip_parent(chip),
".apply is not idempotent (ena=%d pol=%d %llu/%llu) -> (ena=%d pol=%d %llu/%llu)\n",
s1.enabled, s1.polarity, s1.duty_cycle, s1.period,
last->enabled, last->polarity, last->duty_cycle,
@@ -524,33 +225,6 @@ int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state)
EXPORT_SYMBOL_GPL(pwm_apply_atomic);
/**
- * pwm_capture() - capture and report a PWM signal
- * @pwm: PWM device
- * @result: structure to fill with capture result
- * @timeout: time to wait, in milliseconds, before giving up on capture
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
- unsigned long timeout)
-{
- int err;
-
- if (!pwm || !pwm->chip->ops)
- return -EINVAL;
-
- if (!pwm->chip->ops->capture)
- return -ENOSYS;
-
- mutex_lock(&pwm_lock);
- err = pwm->chip->ops->capture(pwm->chip, pwm, result, timeout);
- mutex_unlock(&pwm_lock);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(pwm_capture);
-
-/**
* pwm_adjust_config() - adjust the current PWM config to the PWM arguments
* @pwm: PWM device
*
@@ -606,24 +280,367 @@ int pwm_adjust_config(struct pwm_device *pwm)
}
EXPORT_SYMBOL_GPL(pwm_adjust_config);
-static struct pwm_chip *fwnode_to_pwmchip(struct fwnode_handle *fwnode)
+/**
+ * pwm_capture() - capture and report a PWM signal
+ * @pwm: PWM device
+ * @result: structure to fill with capture result
+ * @timeout: time to wait, in milliseconds, before giving up on capture
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
+ unsigned long timeout)
+{
+ int err;
+
+ if (!pwm || !pwm->chip->ops)
+ return -EINVAL;
+
+ if (!pwm->chip->ops->capture)
+ return -ENOSYS;
+
+ mutex_lock(&pwm_lock);
+ err = pwm->chip->ops->capture(pwm->chip, pwm, result, timeout);
+ mutex_unlock(&pwm_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pwm_capture);
+
+static struct pwm_chip *pwmchip_find_by_name(const char *name)
{
struct pwm_chip *chip;
unsigned long id, tmp;
+ if (!name)
+ return NULL;
+
mutex_lock(&pwm_lock);
- idr_for_each_entry_ul(&pwm_chips, chip, tmp, id)
- if (chip->dev && device_match_fwnode(chip->dev, fwnode)) {
+ idr_for_each_entry_ul(&pwm_chips, chip, tmp, id) {
+ const char *chip_name = dev_name(pwmchip_parent(chip));
+
+ if (chip_name && strcmp(chip_name, name) == 0) {
mutex_unlock(&pwm_lock);
return chip;
}
+ }
mutex_unlock(&pwm_lock);
- return ERR_PTR(-EPROBE_DEFER);
+ return NULL;
+}
+
+static int pwm_device_request(struct pwm_device *pwm, const char *label)
+{
+ int err;
+ struct pwm_chip *chip = pwm->chip;
+ const struct pwm_ops *ops = chip->ops;
+
+ if (test_bit(PWMF_REQUESTED, &pwm->flags))
+ return -EBUSY;
+
+ if (!try_module_get(chip->owner))
+ return -ENODEV;
+
+ if (ops->request) {
+ err = ops->request(chip, pwm);
+ if (err) {
+ module_put(chip->owner);
+ return err;
+ }
+ }
+
+ if (ops->get_state) {
+ /*
+ * Zero-initialize state because most drivers are unaware of
+ * .usage_power. The other members of state are supposed to be
+ * set by lowlevel drivers. We still initialize the whole
+ * structure for simplicity even though this might paper over
+ * faulty implementations of .get_state().
+ */
+ struct pwm_state state = { 0, };
+
+ err = ops->get_state(chip, pwm, &state);
+ trace_pwm_get(pwm, &state, err);
+
+ if (!err)
+ pwm->state = state;
+
+ if (IS_ENABLED(CONFIG_PWM_DEBUG))
+ pwm->last = pwm->state;
+ }
+
+ set_bit(PWMF_REQUESTED, &pwm->flags);
+ pwm->label = label;
+
+ return 0;
+}
+
+/**
+ * pwm_request_from_chip() - request a PWM device relative to a PWM chip
+ * @chip: PWM chip
+ * @index: per-chip index of the PWM to request
+ * @label: a literal description string of this PWM
+ *
+ * Returns: A pointer to the PWM device at the given index of the given PWM
+ * chip. A negative error code is returned if the index is not valid for the
+ * specified PWM chip or if the PWM device cannot be requested.
+ */
+struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
+ unsigned int index,
+ const char *label)
+{
+ struct pwm_device *pwm;
+ int err;
+
+ if (!chip || index >= chip->npwm)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&pwm_lock);
+ pwm = &chip->pwms[index];
+
+ err = pwm_device_request(pwm, label);
+ if (err < 0)
+ pwm = ERR_PTR(err);
+
+ mutex_unlock(&pwm_lock);
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(pwm_request_from_chip);
+
+
+struct pwm_device *
+of_pwm_xlate_with_flags(struct pwm_chip *chip, const struct of_phandle_args *args)
+{
+ struct pwm_device *pwm;
+
+ /* period in the second cell and flags in the third cell are optional */
+ if (args->args_count < 1)
+ return ERR_PTR(-EINVAL);
+
+ pwm = pwm_request_from_chip(chip, args->args[0], NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ if (args->args_count > 1)
+ pwm->args.period = args->args[1];
+
+ pwm->args.polarity = PWM_POLARITY_NORMAL;
+ if (args->args_count > 2 && args->args[2] & PWM_POLARITY_INVERTED)
+ pwm->args.polarity = PWM_POLARITY_INVERSED;
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags);
+
+struct pwm_device *
+of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
+{
+ struct pwm_device *pwm;
+
+ pwm = pwm_request_from_chip(chip, 0, NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ if (args->args_count > 1)
+ pwm->args.period = args->args[0];
+
+ pwm->args.polarity = PWM_POLARITY_NORMAL;
+ if (args->args_count > 1 && args->args[1] & PWM_POLARITY_INVERTED)
+ pwm->args.polarity = PWM_POLARITY_INVERSED;
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(of_pwm_single_xlate);
+
+#define PWMCHIP_ALIGN ARCH_DMA_MINALIGN
+
+static void *pwmchip_priv(struct pwm_chip *chip)
+{
+ return (void *)chip + ALIGN(sizeof(*chip), PWMCHIP_ALIGN);
+}
+
+/* This is the counterpart to pwmchip_alloc() */
+void pwmchip_put(struct pwm_chip *chip)
+{
+ kfree(chip);
+}
+EXPORT_SYMBOL_GPL(pwmchip_put);
+
+struct pwm_chip *pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv)
+{
+ struct pwm_chip *chip;
+ size_t alloc_size;
+
+ alloc_size = size_add(ALIGN(sizeof(*chip), PWMCHIP_ALIGN), sizeof_priv);
+
+ chip = kzalloc(alloc_size, GFP_KERNEL);
+ if (!chip)
+ return ERR_PTR(-ENOMEM);
+
+ chip->dev = parent;
+ chip->npwm = npwm;
+
+ pwmchip_set_drvdata(chip, pwmchip_priv(chip));
+
+ return chip;
+}
+EXPORT_SYMBOL_GPL(pwmchip_alloc);
+
+static void devm_pwmchip_put(void *data)
+{
+ struct pwm_chip *chip = data;
+
+ pwmchip_put(chip);
+}
+
+struct pwm_chip *devm_pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv)
+{
+ struct pwm_chip *chip;
+ int ret;
+
+ chip = pwmchip_alloc(parent, npwm, sizeof_priv);
+ if (IS_ERR(chip))
+ return chip;
+
+ ret = devm_add_action_or_reset(parent, devm_pwmchip_put, chip);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chip;
+}
+EXPORT_SYMBOL_GPL(devm_pwmchip_alloc);
+
+static void of_pwmchip_add(struct pwm_chip *chip)
+{
+ if (!pwmchip_parent(chip) || !pwmchip_parent(chip)->of_node)
+ return;
+
+ if (!chip->of_xlate)
+ chip->of_xlate = of_pwm_xlate_with_flags;
+
+ of_node_get(pwmchip_parent(chip)->of_node);
+}
+
+static void of_pwmchip_remove(struct pwm_chip *chip)
+{
+ if (pwmchip_parent(chip))
+ of_node_put(pwmchip_parent(chip)->of_node);
+}
+
+static bool pwm_ops_check(const struct pwm_chip *chip)
+{
+ const struct pwm_ops *ops = chip->ops;
+
+ if (!ops->apply)
+ return false;
+
+ if (IS_ENABLED(CONFIG_PWM_DEBUG) && !ops->get_state)
+ dev_warn(pwmchip_parent(chip),
+ "Please implement the .get_state() callback\n");
+
+ return true;
}
+/**
+ * __pwmchip_add() - register a new PWM chip
+ * @chip: the PWM chip to add
+ * @owner: reference to the module providing the chip.
+ *
+ * Register a new PWM chip. @owner is supposed to be THIS_MODULE, use the
+ * pwmchip_add wrapper to do this right.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int __pwmchip_add(struct pwm_chip *chip, struct module *owner)
+{
+ unsigned int i;
+ int ret;
+
+ if (!chip || !pwmchip_parent(chip) || !chip->ops || !chip->npwm)
+ return -EINVAL;
+
+ if (!pwm_ops_check(chip))
+ return -EINVAL;
+
+ chip->owner = owner;
+
+ chip->pwms = kcalloc(chip->npwm, sizeof(*chip->pwms), GFP_KERNEL);
+ if (!chip->pwms)
+ return -ENOMEM;
+
+ mutex_lock(&pwm_lock);
+
+ ret = idr_alloc(&pwm_chips, chip, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
+ mutex_unlock(&pwm_lock);
+ kfree(chip->pwms);
+ return ret;
+ }
+
+ chip->id = ret;
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+
+ pwm->chip = chip;
+ pwm->hwpwm = i;
+ }
+
+ mutex_unlock(&pwm_lock);
+
+ if (IS_ENABLED(CONFIG_OF))
+ of_pwmchip_add(chip);
+
+ pwmchip_sysfs_export(chip);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__pwmchip_add);
+
+/**
+ * pwmchip_remove() - remove a PWM chip
+ * @chip: the PWM chip to remove
+ *
+ * Removes a PWM chip.
+ */
+void pwmchip_remove(struct pwm_chip *chip)
+{
+ pwmchip_sysfs_unexport(chip);
+
+ if (IS_ENABLED(CONFIG_OF))
+ of_pwmchip_remove(chip);
+
+ mutex_lock(&pwm_lock);
+
+ idr_remove(&pwm_chips, chip->id);
+
+ mutex_unlock(&pwm_lock);
+
+ kfree(chip->pwms);
+}
+EXPORT_SYMBOL_GPL(pwmchip_remove);
+
+static void devm_pwmchip_remove(void *data)
+{
+ struct pwm_chip *chip = data;
+
+ pwmchip_remove(chip);
+}
+
+int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner)
+{
+ int ret;
+
+ ret = __pwmchip_add(chip, owner);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_pwmchip_remove, chip);
+}
+EXPORT_SYMBOL_GPL(__devm_pwmchip_add);
+
static struct device_link *pwm_device_link_add(struct device *dev,
struct pwm_device *pwm)
{
@@ -635,21 +652,39 @@ static struct device_link *pwm_device_link_add(struct device *dev,
* impact the PM sequence ordering: the PWM supplier may get
* suspended before the consumer.
*/
- dev_warn(pwm->chip->dev,
+ dev_warn(pwmchip_parent(pwm->chip),
"No consumer device specified to create a link to\n");
return NULL;
}
- dl = device_link_add(dev, pwm->chip->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
+ dl = device_link_add(dev, pwmchip_parent(pwm->chip), DL_FLAG_AUTOREMOVE_CONSUMER);
if (!dl) {
dev_err(dev, "failed to create device link to %s\n",
- dev_name(pwm->chip->dev));
+ dev_name(pwmchip_parent(pwm->chip)));
return ERR_PTR(-EINVAL);
}
return dl;
}
+static struct pwm_chip *fwnode_to_pwmchip(struct fwnode_handle *fwnode)
+{
+ struct pwm_chip *chip;
+ unsigned long id, tmp;
+
+ mutex_lock(&pwm_lock);
+
+ idr_for_each_entry_ul(&pwm_chips, chip, tmp, id)
+ if (pwmchip_parent(chip) && device_match_fwnode(pwmchip_parent(chip), fwnode)) {
+ mutex_unlock(&pwm_lock);
+ return chip;
+ }
+
+ mutex_unlock(&pwm_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
/**
* of_pwm_get() - request a PWM via the PWM framework
* @dev: device for PWM consumer
@@ -784,6 +819,9 @@ static struct pwm_device *acpi_pwm_get(const struct fwnode_handle *fwnode)
return pwm;
}
+static DEFINE_MUTEX(pwm_lookup_lock);
+static LIST_HEAD(pwm_lookup_list);
+
/**
* pwm_add_table() - register PWM device consumers
* @table: array of consumers to register
@@ -1105,8 +1143,8 @@ static int pwm_seq_show(struct seq_file *s, void *v)
seq_printf(s, "%s%d: %s/%s, %d PWM device%s\n",
(char *)s->private, chip->id,
- chip->dev->bus ? chip->dev->bus->name : "no-bus",
- dev_name(chip->dev), chip->npwm,
+ pwmchip_parent(chip)->bus ? pwmchip_parent(chip)->bus->name : "no-bus",
+ dev_name(pwmchip_parent(chip)), chip->npwm,
(chip->npwm != 1) ? "s" : "");
pwm_dbg_show(chip, s);
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index 670d33daea84..f000adab85b0 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -24,13 +24,12 @@
#define AB8500_PWM_CLKRATE 9600000
struct ab8500_pwm_chip {
- struct pwm_chip chip;
unsigned int hwid;
};
static struct ab8500_pwm_chip *ab8500_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct ab8500_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -92,12 +91,12 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* when disabled.
*/
if (!state->enabled || duty_steps == 0) {
- ret = abx500_mask_and_set_register_interruptible(chip->dev,
+ ret = abx500_mask_and_set_register_interruptible(pwmchip_parent(chip),
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
1 << ab8500->hwid, 0);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM, Error %d\n",
pwm->label, ret);
return ret;
}
@@ -115,22 +114,22 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
reg = AB8500_PWM_OUT_CTRL1_REG + (ab8500->hwid * 2);
- ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
+ ret = abx500_set_register_interruptible(pwmchip_parent(chip), AB8500_MISC,
reg, lower_val);
if (ret < 0)
return ret;
- ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
+ ret = abx500_set_register_interruptible(pwmchip_parent(chip), AB8500_MISC,
(reg + 1), higher_val);
if (ret < 0)
return ret;
/* enable */
- ret = abx500_mask_and_set_register_interruptible(chip->dev,
+ ret = abx500_mask_and_set_register_interruptible(pwmchip_parent(chip),
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
1 << ab8500->hwid, 1 << ab8500->hwid);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
+ dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM, Error %d\n",
pwm->label, ret);
return ret;
@@ -144,7 +143,7 @@ static int ab8500_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct ab8500_pwm_chip *ab8500 = ab8500_pwm_from_chip(chip);
unsigned int div, duty_steps;
- ret = abx500_get_register_interruptible(chip->dev, AB8500_MISC,
+ ret = abx500_get_register_interruptible(pwmchip_parent(chip), AB8500_MISC,
AB8500_PWM_OUT_CTRL7_REG,
&ctrl7);
if (ret)
@@ -157,13 +156,13 @@ static int ab8500_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- ret = abx500_get_register_interruptible(chip->dev, AB8500_MISC,
+ ret = abx500_get_register_interruptible(pwmchip_parent(chip), AB8500_MISC,
AB8500_PWM_OUT_CTRL1_REG + (ab8500->hwid * 2),
&lower_val);
if (ret)
return ret;
- ret = abx500_get_register_interruptible(chip->dev, AB8500_MISC,
+ ret = abx500_get_register_interruptible(pwmchip_parent(chip), AB8500_MISC,
AB8500_PWM_OUT_CTRL2_REG + (ab8500->hwid * 2),
&higher_val);
if (ret)
@@ -185,6 +184,7 @@ static const struct pwm_ops ab8500_pwm_ops = {
static int ab8500_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct ab8500_pwm_chip *ab8500;
int err;
@@ -195,16 +195,16 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
* Nothing to be done in probe, this is required to get the
* device which is required for ab8500 read and write
*/
- ab8500 = devm_kzalloc(&pdev->dev, sizeof(*ab8500), GFP_KERNEL);
- if (ab8500 == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*ab8500));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
- ab8500->chip.dev = &pdev->dev;
- ab8500->chip.ops = &ab8500_pwm_ops;
- ab8500->chip.npwm = 1;
+ ab8500 = ab8500_pwm_from_chip(chip);
+
+ chip->ops = &ab8500_pwm_ops;
ab8500->hwid = pdev->id - 1;
- err = devm_pwmchip_add(&pdev->dev, &ab8500->chip);
+ err = devm_pwmchip_add(&pdev->dev, chip);
if (err < 0)
return dev_err_probe(&pdev->dev, err, "Failed to add pwm chip\n");
diff --git a/drivers/pwm/pwm-apple.c b/drivers/pwm/pwm-apple.c
index 4d755b628d9e..6e58aca2f13c 100644
--- a/drivers/pwm/pwm-apple.c
+++ b/drivers/pwm/pwm-apple.c
@@ -32,14 +32,13 @@
#define APPLE_PWM_CTRL_OUTPUT_ENABLE BIT(14)
struct apple_pwm {
- struct pwm_chip chip;
void __iomem *base;
u64 clkrate;
};
static inline struct apple_pwm *to_apple_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct apple_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int apple_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -103,13 +102,16 @@ static const struct pwm_ops apple_pwm_ops = {
static int apple_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct apple_pwm *fpwm;
struct clk *clk;
int ret;
- fpwm = devm_kzalloc(&pdev->dev, sizeof(*fpwm), GFP_KERNEL);
- if (!fpwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*fpwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ fpwm = to_apple_pwm(chip);
fpwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fpwm->base))
@@ -129,11 +131,9 @@ static int apple_pwm_probe(struct platform_device *pdev)
if (fpwm->clkrate > NSEC_PER_SEC)
return dev_err_probe(&pdev->dev, -EINVAL, "pwm clock out of range");
- fpwm->chip.dev = &pdev->dev;
- fpwm->chip.npwm = 1;
- fpwm->chip.ops = &apple_pwm_ops;
+ chip->ops = &apple_pwm_ops;
- ret = devm_pwmchip_add(&pdev->dev, &fpwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "unable to add pwm chip");
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 3f2c5031a3ba..2afb302be02c 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -28,7 +28,6 @@ struct atmel_hlcdc_pwm_errata {
};
struct atmel_hlcdc_pwm {
- struct pwm_chip chip;
struct atmel_hlcdc *hlcdc;
struct clk *cur_clk;
const struct atmel_hlcdc_pwm_errata *errata;
@@ -36,7 +35,7 @@ struct atmel_hlcdc_pwm {
static inline struct atmel_hlcdc_pwm *to_atmel_hlcdc_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct atmel_hlcdc_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int atmel_hlcdc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -182,10 +181,12 @@ static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
static int atmel_hlcdc_pwm_suspend(struct device *dev)
{
- struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct atmel_hlcdc_pwm *atmel = to_atmel_hlcdc_pwm(chip);
+ struct pwm_device *pwm = &chip->pwms[0];
/* Keep the periph clock enabled if the PWM is still running. */
- if (pwm_is_enabled(&atmel->chip.pwms[0]))
+ if (!pwm->state.enabled)
clk_disable_unprepare(atmel->hlcdc->periph_clk);
return 0;
@@ -193,21 +194,19 @@ static int atmel_hlcdc_pwm_suspend(struct device *dev)
static int atmel_hlcdc_pwm_resume(struct device *dev)
{
- struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
- struct pwm_state state;
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct atmel_hlcdc_pwm *atmel = to_atmel_hlcdc_pwm(chip);
+ struct pwm_device *pwm = &chip->pwms[0];
int ret;
- pwm_get_state(&atmel->chip.pwms[0], &state);
-
/* Re-enable the periph clock it was stopped during suspend. */
- if (!state.enabled) {
+ if (!pwm->state.enabled) {
ret = clk_prepare_enable(atmel->hlcdc->periph_clk);
if (ret)
return ret;
}
- return atmel_hlcdc_pwm_apply(&atmel->chip, &atmel->chip.pwms[0],
- &state);
+ return atmel_hlcdc_pwm_apply(chip, pwm, &pwm->state);
}
static DEFINE_SIMPLE_DEV_PM_OPS(atmel_hlcdc_pwm_pm_ops,
@@ -243,15 +242,17 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct device *dev = &pdev->dev;
+ struct pwm_chip *chip;
struct atmel_hlcdc_pwm *atmel;
struct atmel_hlcdc *hlcdc;
int ret;
hlcdc = dev_get_drvdata(dev->parent);
- atmel = devm_kzalloc(dev, sizeof(*atmel), GFP_KERNEL);
- if (!atmel)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, 1, sizeof(*atmel));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ atmel = to_atmel_hlcdc_pwm(chip);
ret = clk_prepare_enable(hlcdc->periph_clk);
if (ret)
@@ -262,26 +263,25 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
atmel->errata = match->data;
atmel->hlcdc = hlcdc;
- atmel->chip.ops = &atmel_hlcdc_pwm_ops;
- atmel->chip.dev = dev;
- atmel->chip.npwm = 1;
+ chip->ops = &atmel_hlcdc_pwm_ops;
- ret = pwmchip_add(&atmel->chip);
+ ret = pwmchip_add(chip);
if (ret) {
clk_disable_unprepare(hlcdc->periph_clk);
return ret;
}
- platform_set_drvdata(pdev, atmel);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static void atmel_hlcdc_pwm_remove(struct platform_device *pdev)
{
- struct atmel_hlcdc_pwm *atmel = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct atmel_hlcdc_pwm *atmel = to_atmel_hlcdc_pwm(chip);
- pwmchip_remove(&atmel->chip);
+ pwmchip_remove(chip);
clk_disable_unprepare(atmel->hlcdc->periph_clk);
}
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index d42c897cb85e..528e54c5999d 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -47,7 +47,6 @@ struct atmel_tcb_channel {
};
struct atmel_tcb_pwm_chip {
- struct pwm_chip chip;
spinlock_t lock;
u8 channel;
u8 width;
@@ -63,7 +62,7 @@ static const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128, 0, };
static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct atmel_tcb_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int atmel_tcb_pwm_request(struct pwm_chip *chip,
@@ -327,7 +326,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if ((atcbpwm && atcbpwm->duty > 0 &&
atcbpwm->duty != atcbpwm->period) &&
(atcbpwm->div != i || atcbpwm->period != period)) {
- dev_err(chip->dev,
+ dev_err(pwmchip_parent(chip),
"failed to configure period_ns: PWM group already configured with a different value\n");
return -EINVAL;
}
@@ -388,6 +387,7 @@ static const struct of_device_id atmel_tcb_of_match[] = {
static int atmel_tcb_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
const struct of_device_id *match;
struct atmel_tcb_pwm_chip *tcbpwm;
const struct atmel_tcb_config *config;
@@ -396,9 +396,10 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
int err;
int channel;
- tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
- if (tcbpwm == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, NPWM, sizeof(*tcbpwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ tcbpwm = to_tcb_chip(chip);
err = of_property_read_u32(np, "reg", &channel);
if (err < 0) {
@@ -436,9 +437,7 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
}
}
- tcbpwm->chip.dev = &pdev->dev;
- tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
- tcbpwm->chip.npwm = NPWM;
+ chip->ops = &atmel_tcb_pwm_ops;
tcbpwm->channel = channel;
tcbpwm->width = config->counter_width;
@@ -448,11 +447,11 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
spin_lock_init(&tcbpwm->lock);
- err = pwmchip_add(&tcbpwm->chip);
+ err = pwmchip_add(chip);
if (err < 0)
goto err_disable_clk;
- platform_set_drvdata(pdev, tcbpwm);
+ platform_set_drvdata(pdev, chip);
return 0;
@@ -473,9 +472,10 @@ err_slow_clk:
static void atmel_tcb_pwm_remove(struct platform_device *pdev)
{
- struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct atmel_tcb_pwm_chip *tcbpwm = to_tcb_chip(chip);
- pwmchip_remove(&tcbpwm->chip);
+ pwmchip_remove(chip);
clk_disable_unprepare(tcbpwm->slow_clk);
clk_put(tcbpwm->gclk);
@@ -491,7 +491,8 @@ MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
static int atmel_tcb_pwm_suspend(struct device *dev)
{
- struct atmel_tcb_pwm_chip *tcbpwm = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct atmel_tcb_pwm_chip *tcbpwm = to_tcb_chip(chip);
struct atmel_tcb_channel *chan = &tcbpwm->bkup;
unsigned int channel = tcbpwm->channel;
@@ -505,7 +506,8 @@ static int atmel_tcb_pwm_suspend(struct device *dev)
static int atmel_tcb_pwm_resume(struct device *dev)
{
- struct atmel_tcb_pwm_chip *tcbpwm = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct atmel_tcb_pwm_chip *tcbpwm = to_tcb_chip(chip);
struct atmel_tcb_channel *chan = &tcbpwm->bkup;
unsigned int channel = tcbpwm->channel;
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 47bcc8a3bf9d..b2f0abbbad63 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -77,7 +77,6 @@ struct atmel_pwm_data {
};
struct atmel_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
const struct atmel_pwm_data *data;
@@ -99,7 +98,7 @@ struct atmel_pwm_chip {
static inline struct atmel_pwm_chip *to_atmel_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct atmel_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline u32 atmel_pwm_readl(struct atmel_pwm_chip *chip,
@@ -210,7 +209,7 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
shift = fls(cycles) - atmel_pwm->data->cfg.period_bits;
if (shift > PWM_MAX_PRES) {
- dev_err(chip->dev, "pres exceeds the maximum value\n");
+ dev_err(pwmchip_parent(chip), "pres exceeds the maximum value\n");
return -EINVAL;
} else if (shift > 0) {
*pres = shift;
@@ -294,19 +293,16 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
- struct pwm_state cstate;
unsigned long cprd, cdty;
u32 pres, val;
int ret;
- pwm_get_state(pwm, &cstate);
-
if (state->enabled) {
unsigned long clkrate = clk_get_rate(atmel_pwm->clk);
- if (cstate.enabled &&
- cstate.polarity == state->polarity &&
- cstate.period == state->period) {
+ if (pwm->state.enabled &&
+ pwm->state.polarity == state->polarity &&
+ pwm->state.period == state->period) {
u32 cmr = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
@@ -321,19 +317,19 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ret = atmel_pwm_calculate_cprd_and_pres(chip, clkrate, state, &cprd,
&pres);
if (ret) {
- dev_err(chip->dev,
+ dev_err(pwmchip_parent(chip),
"failed to calculate cprd and prescaler\n");
return ret;
}
atmel_pwm_calculate_cdty(state, clkrate, cprd, pres, &cdty);
- if (cstate.enabled) {
+ if (pwm->state.enabled) {
atmel_pwm_disable(chip, pwm, false);
} else {
ret = clk_enable(atmel_pwm->clk);
if (ret) {
- dev_err(chip->dev, "failed to enable clock\n");
+ dev_err(pwmchip_parent(chip), "failed to enable clock\n");
return ret;
}
}
@@ -348,7 +344,7 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
atmel_pwm_set_cprd_cdty(chip, pwm, cprd, cdty);
atmel_pwm_writel(atmel_pwm, PWM_ENA, 1 << pwm->hwpwm);
- } else if (cstate.enabled) {
+ } else if (pwm->state.enabled) {
atmel_pwm_disable(chip, pwm, true);
}
@@ -462,8 +458,9 @@ static const struct of_device_id atmel_pwm_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
-static int atmel_pwm_enable_clk_if_on(struct atmel_pwm_chip *atmel_pwm, bool on)
+static int atmel_pwm_enable_clk_if_on(struct pwm_chip *chip, bool on)
{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
unsigned int i, cnt = 0;
unsigned long sr;
int ret = 0;
@@ -472,7 +469,7 @@ static int atmel_pwm_enable_clk_if_on(struct atmel_pwm_chip *atmel_pwm, bool on)
if (!sr)
return 0;
- cnt = bitmap_weight(&sr, atmel_pwm->chip.npwm);
+ cnt = bitmap_weight(&sr, chip->npwm);
if (!on)
goto disable_clk;
@@ -480,7 +477,7 @@ static int atmel_pwm_enable_clk_if_on(struct atmel_pwm_chip *atmel_pwm, bool on)
for (i = 0; i < cnt; i++) {
ret = clk_enable(atmel_pwm->clk);
if (ret) {
- dev_err(atmel_pwm->chip.dev,
+ dev_err(pwmchip_parent(chip),
"failed to enable clock for pwm %pe\n",
ERR_PTR(ret));
@@ -501,12 +498,14 @@ disable_clk:
static int atmel_pwm_probe(struct platform_device *pdev)
{
struct atmel_pwm_chip *atmel_pwm;
+ struct pwm_chip *chip;
int ret;
- atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL);
- if (!atmel_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 4, sizeof(*atmel_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ atmel_pwm = to_atmel_pwm_chip(chip);
atmel_pwm->data = of_device_get_match_data(&pdev->dev);
atmel_pwm->update_pending = 0;
@@ -521,15 +520,13 @@ static int atmel_pwm_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(atmel_pwm->clk),
"failed to get prepared PWM clock\n");
- atmel_pwm->chip.dev = &pdev->dev;
- atmel_pwm->chip.ops = &atmel_pwm_ops;
- atmel_pwm->chip.npwm = 4;
+ chip->ops = &atmel_pwm_ops;
- ret = atmel_pwm_enable_clk_if_on(atmel_pwm, true);
+ ret = atmel_pwm_enable_clk_if_on(chip, true);
if (ret < 0)
return ret;
- ret = devm_pwmchip_add(&pdev->dev, &atmel_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
goto disable_clk;
@@ -538,7 +535,7 @@ static int atmel_pwm_probe(struct platform_device *pdev)
return 0;
disable_clk:
- atmel_pwm_enable_clk_if_on(atmel_pwm, false);
+ atmel_pwm_enable_clk_if_on(chip, false);
return ret;
}
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index 758254025683..f4c9f10e490e 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -34,14 +34,13 @@
#define IPROC_PWM_PRESCALE_MAX 0x3f
struct iproc_pwmc {
- struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
static inline struct iproc_pwmc *to_iproc_pwmc(struct pwm_chip *chip)
{
- return container_of(chip, struct iproc_pwmc, chip);
+ return pwmchip_get_drvdata(chip);
}
static void iproc_pwmc_enable(struct iproc_pwmc *ip, unsigned int channel)
@@ -187,20 +186,20 @@ static const struct pwm_ops iproc_pwm_ops = {
static int iproc_pwmc_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct iproc_pwmc *ip;
unsigned int i;
u32 value;
int ret;
- ip = devm_kzalloc(&pdev->dev, sizeof(*ip), GFP_KERNEL);
- if (!ip)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 4, sizeof(*ip));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ ip = to_iproc_pwmc(chip);
platform_set_drvdata(pdev, ip);
- ip->chip.dev = &pdev->dev;
- ip->chip.ops = &iproc_pwm_ops;
- ip->chip.npwm = 4;
+ chip->ops = &iproc_pwm_ops;
ip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ip->base))
@@ -214,14 +213,14 @@ static int iproc_pwmc_probe(struct platform_device *pdev)
/* Set full drive and normal polarity for all channels */
value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
- for (i = 0; i < ip->chip.npwm; i++) {
+ for (i = 0; i < chip->npwm; i++) {
value &= ~(1 << IPROC_PWM_CTRL_TYPE_SHIFT(i));
value |= 1 << IPROC_PWM_CTRL_POLARITY_SHIFT(i);
}
writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
- ret = devm_pwmchip_add(&pdev->dev, &ip->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
"failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 45046a5c20a5..022c078aae84 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -56,14 +56,13 @@
#define DUTY_CYCLE_HIGH_MAX 0x00ffffff
struct kona_pwmc {
- struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
static inline struct kona_pwmc *to_kona_pwmc(struct pwm_chip *chip)
{
- return container_of(chip, struct kona_pwmc, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -164,7 +163,7 @@ static int kona_pwmc_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
ret = clk_prepare_enable(kp->clk);
if (ret < 0) {
- dev_err(chip->dev, "failed to enable clock: %d\n", ret);
+ dev_err(pwmchip_parent(chip), "failed to enable clock: %d\n", ret);
return ret;
}
@@ -193,7 +192,7 @@ static int kona_pwmc_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = clk_prepare_enable(kp->clk);
if (ret < 0) {
- dev_err(chip->dev, "failed to enable clock: %d\n", ret);
+ dev_err(pwmchip_parent(chip), "failed to enable clock: %d\n", ret);
return ret;
}
@@ -273,18 +272,18 @@ static const struct pwm_ops kona_pwm_ops = {
static int kona_pwmc_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct kona_pwmc *kp;
unsigned int chan;
unsigned int value = 0;
int ret = 0;
- kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
- if (kp == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 6, sizeof(*kp));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ kp = to_kona_pwmc(chip);
- kp->chip.dev = &pdev->dev;
- kp->chip.ops = &kona_pwm_ops;
- kp->chip.npwm = 6;
+ chip->ops = &kona_pwm_ops;
kp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kp->base))
@@ -304,14 +303,14 @@ static int kona_pwmc_probe(struct platform_device *pdev)
}
/* Set push/pull for all channels */
- for (chan = 0; chan < kp->chip.npwm; chan++)
+ for (chan = 0; chan < chip->npwm; chan++)
value |= (1 << PWM_CONTROL_TYPE_SHIFT(chan));
writel(value, kp->base + PWM_CONTROL_OFFSET);
clk_disable_unprepare(kp->clk);
- ret = devm_pwmchip_add(&pdev->dev, &kp->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index 283cf27f25ba..aa35acbb0cbc 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -24,8 +24,6 @@
#define PERIOD_MIN 0x2
struct bcm2835_pwm {
- struct pwm_chip chip;
- struct device *dev;
void __iomem *base;
struct clk *clk;
unsigned long rate;
@@ -33,7 +31,7 @@ struct bcm2835_pwm {
static inline struct bcm2835_pwm *to_bcm2835_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct bcm2835_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int bcm2835_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -135,14 +133,14 @@ static void devm_clk_rate_exclusive_put(void *data)
static int bcm2835_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct bcm2835_pwm *pc;
int ret;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
-
- pc->dev = &pdev->dev;
+ chip = devm_pwmchip_alloc(&pdev->dev, 2, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_bcm2835_pwm(chip);
pc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->base))
@@ -168,14 +166,12 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, -EINVAL,
"failed to get clock rate\n");
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &bcm2835_pwm_ops;
- pc->chip.atomic = true;
- pc->chip.npwm = 2;
+ chip->ops = &bcm2835_pwm_ops;
+ chip->atomic = true;
platform_set_drvdata(pdev, pc);
- ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
"failed to add pwmchip\n");
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index 442913232dc0..831aed228caf 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -49,7 +49,6 @@ struct berlin_pwm_channel {
};
struct berlin_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
struct berlin_pwm_channel channel[BERLIN_PWM_NUMPWMS];
@@ -57,7 +56,7 @@ struct berlin_pwm_chip {
static inline struct berlin_pwm_chip *to_berlin_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct berlin_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline u32 berlin_pwm_readl(struct berlin_pwm_chip *bpc,
@@ -198,12 +197,14 @@ MODULE_DEVICE_TABLE(of, berlin_pwm_match);
static int berlin_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct berlin_pwm_chip *bpc;
int ret;
- bpc = devm_kzalloc(&pdev->dev, sizeof(*bpc), GFP_KERNEL);
- if (!bpc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, BERLIN_PWM_NUMPWMS, sizeof(*bpc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ bpc = to_berlin_pwm_chip(chip);
bpc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bpc->base))
@@ -213,25 +214,24 @@ static int berlin_pwm_probe(struct platform_device *pdev)
if (IS_ERR(bpc->clk))
return PTR_ERR(bpc->clk);
- bpc->chip.dev = &pdev->dev;
- bpc->chip.ops = &berlin_pwm_ops;
- bpc->chip.npwm = BERLIN_PWM_NUMPWMS;
+ chip->ops = &berlin_pwm_ops;
- ret = devm_pwmchip_add(&pdev->dev, &bpc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
- platform_set_drvdata(pdev, bpc);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static int berlin_pwm_suspend(struct device *dev)
{
- struct berlin_pwm_chip *bpc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct berlin_pwm_chip *bpc = to_berlin_pwm_chip(chip);
unsigned int i;
- for (i = 0; i < bpc->chip.npwm; i++) {
+ for (i = 0; i < chip->npwm; i++) {
struct berlin_pwm_channel *channel = &bpc->channel[i];
channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_ENABLE);
@@ -247,7 +247,8 @@ static int berlin_pwm_suspend(struct device *dev)
static int berlin_pwm_resume(struct device *dev)
{
- struct berlin_pwm_chip *bpc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct berlin_pwm_chip *bpc = to_berlin_pwm_chip(chip);
unsigned int i;
int ret;
@@ -255,7 +256,7 @@ static int berlin_pwm_resume(struct device *dev)
if (ret)
return ret;
- for (i = 0; i < bpc->chip.npwm; i++) {
+ for (i = 0; i < chip->npwm; i++) {
struct berlin_pwm_channel *channel = &bpc->channel[i];
berlin_pwm_writel(bpc, i, channel->ctrl, BERLIN_PWM_CONTROL);
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index 0fdeb0b2dbf3..82d27d07ba91 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -54,7 +54,6 @@
struct brcmstb_pwm {
void __iomem *base;
struct clk *clk;
- struct pwm_chip chip;
};
static inline u32 brcmstb_pwm_readl(struct brcmstb_pwm *p,
@@ -77,7 +76,7 @@ static inline void brcmstb_pwm_writel(struct brcmstb_pwm *p, u32 value,
static inline struct brcmstb_pwm *to_brcmstb_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct brcmstb_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -230,12 +229,14 @@ MODULE_DEVICE_TABLE(of, brcmstb_pwm_of_match);
static int brcmstb_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct brcmstb_pwm *p;
int ret;
- p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 2, sizeof(*p));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ p = to_brcmstb_pwm(chip);
p->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(p->clk))
@@ -244,15 +245,13 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, p);
- p->chip.dev = &pdev->dev;
- p->chip.ops = &brcmstb_pwm_ops;
- p->chip.npwm = 2;
+ chip->ops = &brcmstb_pwm_ops;
p->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->base))
return PTR_ERR(p->base);
- ret = devm_pwmchip_add(&pdev->dev, &p->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret)
return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-clk.c b/drivers/pwm/pwm-clk.c
index 9dd88b386907..c19a482d7e28 100644
--- a/drivers/pwm/pwm-clk.c
+++ b/drivers/pwm/pwm-clk.c
@@ -28,12 +28,14 @@
#include <linux/pwm.h>
struct pwm_clk_chip {
- struct pwm_chip chip;
struct clk *clk;
bool clk_enabled;
};
-#define to_pwm_clk_chip(_chip) container_of(_chip, struct pwm_clk_chip, chip)
+static inline struct pwm_clk_chip *to_pwm_clk_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static int pwm_clk_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
@@ -81,35 +83,36 @@ static const struct pwm_ops pwm_clk_ops = {
static int pwm_clk_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct pwm_clk_chip *pcchip;
int ret;
- pcchip = devm_kzalloc(&pdev->dev, sizeof(*pcchip), GFP_KERNEL);
- if (!pcchip)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*pcchip));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pcchip = to_pwm_clk_chip(chip);
pcchip->clk = devm_clk_get_prepared(&pdev->dev, NULL);
if (IS_ERR(pcchip->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(pcchip->clk),
"Failed to get clock\n");
- pcchip->chip.dev = &pdev->dev;
- pcchip->chip.ops = &pwm_clk_ops;
- pcchip->chip.npwm = 1;
+ chip->ops = &pwm_clk_ops;
- ret = pwmchip_add(&pcchip->chip);
+ ret = pwmchip_add(chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to add pwm chip\n");
- platform_set_drvdata(pdev, pcchip);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static void pwm_clk_remove(struct platform_device *pdev)
{
- struct pwm_clk_chip *pcchip = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct pwm_clk_chip *pcchip = to_pwm_clk_chip(chip);
- pwmchip_remove(&pcchip->chip);
+ pwmchip_remove(chip);
if (pcchip->clk_enabled)
clk_disable(pcchip->clk);
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index 42179b3f7ec3..c950e1dbd2b8 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -12,7 +12,6 @@
#include <linux/pwm.h>
struct clps711x_chip {
- struct pwm_chip chip;
void __iomem *pmpcon;
struct clk *clk;
spinlock_t lock;
@@ -20,7 +19,7 @@ struct clps711x_chip {
static inline struct clps711x_chip *to_clps711x_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct clps711x_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -74,22 +73,15 @@ static const struct pwm_ops clps711x_pwm_ops = {
.apply = clps711x_pwm_apply,
};
-static struct pwm_device *clps711x_pwm_xlate(struct pwm_chip *chip,
- const struct of_phandle_args *args)
-{
- if (args->args[0] >= chip->npwm)
- return ERR_PTR(-EINVAL);
-
- return pwm_request_from_chip(chip, args->args[0], NULL);
-}
-
static int clps711x_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct clps711x_chip *priv;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 2, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = to_clps711x_chip(chip);
priv->pmpcon = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->pmpcon))
@@ -99,15 +91,11 @@ static int clps711x_pwm_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
- priv->chip.ops = &clps711x_pwm_ops;
- priv->chip.dev = &pdev->dev;
- priv->chip.npwm = 2;
- priv->chip.of_xlate = clps711x_pwm_xlate;
- priv->chip.of_pwm_n_cells = 1;
+ chip->ops = &clps711x_pwm_ops;
spin_lock_init(&priv->lock);
- return devm_pwmchip_add(&pdev->dev, &priv->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
static const struct of_device_id __maybe_unused clps711x_pwm_dt_ids[] = {
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index e09358901ab5..98ee5cdbd0ba 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -26,17 +26,15 @@
/**
* struct crystalcove_pwm - Crystal Cove PWM controller
- * @chip: the abstract pwm_chip structure.
* @regmap: the regmap from the parent device.
*/
struct crystalcove_pwm {
- struct pwm_chip chip;
struct regmap *regmap;
};
static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct crystalcove_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int crc_pwm_calc_clk_div(int period_ns)
@@ -55,7 +53,7 @@ static int crc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct crystalcove_pwm *crc_pwm = to_crc_pwm(chip);
- struct device *dev = crc_pwm->chip.dev;
+ struct device *dev = pwmchip_parent(chip);
int err;
if (state->period > PWM_MAX_PERIOD_NS) {
@@ -125,7 +123,7 @@ static int crc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct crystalcove_pwm *crc_pwm = to_crc_pwm(chip);
- struct device *dev = crc_pwm->chip.dev;
+ struct device *dev = pwmchip_parent(chip);
unsigned int clk_div, clk_div_reg, duty_cycle_reg;
int error;
@@ -160,22 +158,22 @@ static const struct pwm_ops crc_pwm_ops = {
static int crystalcove_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct crystalcove_pwm *crc_pwm;
struct device *dev = pdev->dev.parent;
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
- crc_pwm = devm_kzalloc(&pdev->dev, sizeof(*crc_pwm), GFP_KERNEL);
- if (!crc_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*crc_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ crc_pwm = to_crc_pwm(chip);
- crc_pwm->chip.dev = &pdev->dev;
- crc_pwm->chip.ops = &crc_pwm_ops;
- crc_pwm->chip.npwm = 1;
+ chip->ops = &crc_pwm_ops;
/* get the PMIC regmap */
crc_pwm->regmap = pmic->regmap;
- return devm_pwmchip_add(&pdev->dev, &crc_pwm->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
static struct platform_driver crystalcove_pwm_driver = {
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 5fe303b8656d..606ccfdaf4cc 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -19,13 +19,11 @@
* struct cros_ec_pwm_device - Driver data for EC PWM
*
* @ec: Pointer to EC device
- * @chip: PWM controller chip
* @use_pwm_type: Use PWM types instead of generic channels
* @channel: array with per-channel data
*/
struct cros_ec_pwm_device {
struct cros_ec_device *ec;
- struct pwm_chip chip;
bool use_pwm_type;
struct cros_ec_pwm *channel;
};
@@ -40,7 +38,7 @@ struct cros_ec_pwm {
static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct cros_ec_pwm_device, chip);
+ return pwmchip_get_drvdata(chip);
}
static int cros_ec_dt_type_to_pwm_type(u8 dt_index, u8 *pwm_type)
@@ -93,9 +91,8 @@ static int cros_ec_pwm_set_duty(struct cros_ec_pwm_device *ec_pwm, u8 index,
return cros_ec_cmd_xfer_status(ec, msg);
}
-static int cros_ec_pwm_get_duty(struct cros_ec_pwm_device *ec_pwm, u8 index)
+static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, bool use_pwm_type, u8 index)
{
- struct cros_ec_device *ec = ec_pwm->ec;
struct {
struct cros_ec_command msg;
union {
@@ -115,7 +112,7 @@ static int cros_ec_pwm_get_duty(struct cros_ec_pwm_device *ec_pwm, u8 index)
msg->insize = sizeof(*resp);
msg->outsize = sizeof(*params);
- if (ec_pwm->use_pwm_type) {
+ if (use_pwm_type) {
ret = cros_ec_dt_type_to_pwm_type(index, &params->pwm_type);
if (ret) {
dev_err(ec->dev, "Invalid PWM type index: %d\n", index);
@@ -171,9 +168,9 @@ static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct cros_ec_pwm *channel = &ec_pwm->channel[pwm->hwpwm];
int ret;
- ret = cros_ec_pwm_get_duty(ec_pwm, pwm->hwpwm);
+ ret = cros_ec_pwm_get_duty(ec_pwm->ec, ec_pwm->use_pwm_type, pwm->hwpwm);
if (ret < 0) {
- dev_err(chip->dev, "error getting initial duty: %d\n", ret);
+ dev_err(pwmchip_parent(chip), "error getting initial duty: %d\n", ret);
return ret;
}
@@ -226,13 +223,17 @@ static const struct pwm_ops cros_ec_pwm_ops = {
* of PWMs it supports directly, so we have to read the pwm duty cycle for
* subsequent channels until we get an error.
*/
-static int cros_ec_num_pwms(struct cros_ec_pwm_device *ec_pwm)
+static int cros_ec_num_pwms(struct cros_ec_device *ec)
{
int i, ret;
/* The index field is only 8 bits */
for (i = 0; i <= U8_MAX; i++) {
- ret = cros_ec_pwm_get_duty(ec_pwm, i);
+ /*
+ * Note that this function is only called when use_pwm_type is
+ * false. With use_pwm_type == true the number of PWMs is fixed.
+ */
+ ret = cros_ec_pwm_get_duty(ec, false, i);
/*
* We look for SUCCESS, INVALID_COMMAND, or INVALID_PARAM
* responses; everything else is treated as an error.
@@ -261,34 +262,34 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct cros_ec_pwm_device *ec_pwm;
struct pwm_chip *chip;
+ bool use_pwm_type = false;
+ unsigned int npwm;
int ret;
if (!ec)
return dev_err_probe(dev, -EINVAL, "no parent EC device\n");
- ec_pwm = devm_kzalloc(dev, sizeof(*ec_pwm), GFP_KERNEL);
- if (!ec_pwm)
- return -ENOMEM;
- chip = &ec_pwm->chip;
- ec_pwm->ec = ec;
+ if (of_device_is_compatible(np, "google,cros-ec-pwm-type")) {
+ use_pwm_type = true;
+ npwm = CROS_EC_PWM_DT_COUNT;
+ } else {
+ ret = cros_ec_num_pwms(ec);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Couldn't find PWMs\n");
+ npwm = ret;
+ }
+
+ chip = devm_pwmchip_alloc(dev, npwm, sizeof(*ec_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
- if (of_device_is_compatible(np, "google,cros-ec-pwm-type"))
- ec_pwm->use_pwm_type = true;
+ ec_pwm = pwm_to_cros_ec_pwm(chip);
+ ec_pwm->use_pwm_type = use_pwm_type;
+ ec_pwm->ec = ec;
/* PWM chip */
- chip->dev = dev;
chip->ops = &cros_ec_pwm_ops;
chip->of_xlate = cros_ec_pwm_xlate;
- chip->of_pwm_n_cells = 1;
-
- if (ec_pwm->use_pwm_type) {
- chip->npwm = CROS_EC_PWM_DT_COUNT;
- } else {
- ret = cros_ec_num_pwms(ec_pwm);
- if (ret < 0)
- return dev_err_probe(dev, ret, "Couldn't find PWMs\n");
- chip->npwm = ret;
- }
ec_pwm->channel = devm_kcalloc(dev, chip->npwm, sizeof(*ec_pwm->channel),
GFP_KERNEL);
diff --git a/drivers/pwm/pwm-dwc-core.c b/drivers/pwm/pwm-dwc-core.c
index ea63dd741f5c..043736972cb9 100644
--- a/drivers/pwm/pwm-dwc-core.c
+++ b/drivers/pwm/pwm-dwc-core.c
@@ -105,12 +105,12 @@ static int dwc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->enabled) {
if (!pwm->state.enabled)
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
return __dwc_pwm_configure_timer(dwc, pwm, state);
} else {
if (pwm->state.enabled) {
__dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
- pm_runtime_put_sync(chip->dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
}
@@ -124,7 +124,7 @@ static int dwc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
u64 duty, period;
u32 ctrl, ld, ld2;
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(pwm->hwpwm));
ld = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(pwm->hwpwm));
@@ -149,7 +149,7 @@ static int dwc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
state->period = period;
state->duty_cycle = duty;
- pm_runtime_put_sync(chip->dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
return 0;
}
@@ -159,21 +159,21 @@ static const struct pwm_ops dwc_pwm_ops = {
.get_state = dwc_pwm_get_state,
};
-struct dwc_pwm *dwc_pwm_alloc(struct device *dev)
+struct pwm_chip *dwc_pwm_alloc(struct device *dev)
{
+ struct pwm_chip *chip;
struct dwc_pwm *dwc;
- dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
- if (!dwc)
- return NULL;
+ chip = devm_pwmchip_alloc(dev, DWC_TIMERS_TOTAL, sizeof(*dwc));
+ if (IS_ERR(chip))
+ return chip;
+ dwc = to_dwc_pwm(chip);
dwc->clk_ns = 10;
- dwc->chip.dev = dev;
- dwc->chip.ops = &dwc_pwm_ops;
- dwc->chip.npwm = DWC_TIMERS_TOTAL;
+ chip->ops = &dwc_pwm_ops;
- dev_set_drvdata(dev, dwc);
- return dwc;
+ dev_set_drvdata(dev, chip);
+ return chip;
}
EXPORT_SYMBOL_GPL(dwc_pwm_alloc);
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
index 4929354f8cd9..676eaf8d7a53 100644
--- a/drivers/pwm/pwm-dwc.c
+++ b/drivers/pwm/pwm-dwc.c
@@ -25,39 +25,54 @@
#include "pwm-dwc.h"
-static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
+/* Elkhart Lake */
+static const struct dwc_pwm_info ehl_pwm_info = {
+ .nr = 2,
+ .size = 0x1000,
+};
+
+static int dwc_pwm_init_one(struct device *dev, void __iomem *base, unsigned int offset)
{
- struct device *dev = &pci->dev;
+ struct pwm_chip *chip;
struct dwc_pwm *dwc;
- int ret;
- dwc = dwc_pwm_alloc(dev);
- if (!dwc)
- return -ENOMEM;
+ chip = dwc_pwm_alloc(dev);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ dwc = to_dwc_pwm(chip);
+ dwc->base = base + offset;
+
+ return devm_pwmchip_add(dev, chip);
+}
+
+static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ const struct dwc_pwm_info *info;
+ struct device *dev = &pci->dev;
+ int i, ret;
ret = pcim_enable_device(pci);
- if (ret) {
- dev_err(dev, "Failed to enable device (%pe)\n", ERR_PTR(ret));
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable device\n");
pci_set_master(pci);
ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
- if (ret) {
- dev_err(dev, "Failed to iomap PCI BAR (%pe)\n", ERR_PTR(ret));
- return ret;
- }
-
- dwc->base = pcim_iomap_table(pci)[0];
- if (!dwc->base) {
- dev_err(dev, "Base address missing\n");
- return -ENOMEM;
- }
-
- ret = devm_pwmchip_add(dev, &dwc->chip);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to iomap PCI BAR\n");
+
+ info = (const struct dwc_pwm_info *)id->driver_data;
+
+ for (i = 0; i < info->nr; i++) {
+ /*
+ * No need to check for pcim_iomap_table() failure,
+ * pcim_iomap_regions() already does it for us.
+ */
+ ret = dwc_pwm_init_one(dev, pcim_iomap_table(pci)[0], i * info->size);
+ if (ret)
+ return ret;
+ }
pm_runtime_put(dev);
pm_runtime_allow(dev);
@@ -73,14 +88,14 @@ static void dwc_pwm_remove(struct pci_dev *pci)
static int dwc_pwm_suspend(struct device *dev)
{
- struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
- struct dwc_pwm *dwc = pci_get_drvdata(pdev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
int i;
for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
- if (dwc->chip.pwms[i].state.enabled) {
+ if (chip->pwms[i].state.enabled) {
dev_err(dev, "PWM %u in use by consumer (%s)\n",
- i, dwc->chip.pwms[i].label);
+ i, chip->pwms[i].label);
return -EBUSY;
}
dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
@@ -93,8 +108,8 @@ static int dwc_pwm_suspend(struct device *dev)
static int dwc_pwm_resume(struct device *dev)
{
- struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
- struct dwc_pwm *dwc = pci_get_drvdata(pdev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
int i;
for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
@@ -109,7 +124,7 @@ static int dwc_pwm_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(dwc_pwm_pm_ops, dwc_pwm_suspend, dwc_pwm_resume);
static const struct pci_device_id dwc_pwm_id_table[] = {
- { PCI_VDEVICE(INTEL, 0x4bb7) }, /* Elkhart Lake */
+ { PCI_VDEVICE(INTEL, 0x4bb7), (kernel_ulong_t)&ehl_pwm_info },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, dwc_pwm_id_table);
@@ -120,7 +135,7 @@ static struct pci_driver dwc_pwm_driver = {
.remove = dwc_pwm_remove,
.id_table = dwc_pwm_id_table,
.driver = {
- .pm = pm_ptr(&dwc_pwm_pm_ops),
+ .pm = pm_sleep_ptr(&dwc_pwm_pm_ops),
},
};
diff --git a/drivers/pwm/pwm-dwc.h b/drivers/pwm/pwm-dwc.h
index 64795247c54c..a8b074841ae8 100644
--- a/drivers/pwm/pwm-dwc.h
+++ b/drivers/pwm/pwm-dwc.h
@@ -33,6 +33,11 @@ MODULE_IMPORT_NS(dwc_pwm);
#define DWC_TIM_CTRL_INT_MASK BIT(2)
#define DWC_TIM_CTRL_PWM BIT(3)
+struct dwc_pwm_info {
+ unsigned int nr;
+ unsigned int size;
+};
+
struct dwc_pwm_ctx {
u32 cnt;
u32 cnt2;
@@ -40,12 +45,15 @@ struct dwc_pwm_ctx {
};
struct dwc_pwm {
- struct pwm_chip chip;
void __iomem *base;
unsigned int clk_ns;
struct dwc_pwm_ctx ctx[DWC_TIMERS_TOTAL];
};
-#define to_dwc_pwm(p) (container_of((p), struct dwc_pwm, chip))
+
+static inline struct dwc_pwm *to_dwc_pwm(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static inline u32 dwc_pwm_readl(struct dwc_pwm *dwc, u32 offset)
{
@@ -57,4 +65,4 @@ static inline void dwc_pwm_writel(struct dwc_pwm *dwc, u32 value, u32 offset)
writel(value, dwc->base + offset);
}
-extern struct dwc_pwm *dwc_pwm_alloc(struct device *dev);
+extern struct pwm_chip *dwc_pwm_alloc(struct device *dev);
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index 51e072572a87..666f2954133c 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -36,24 +36,23 @@
struct ep93xx_pwm {
void __iomem *base;
struct clk *clk;
- struct pwm_chip chip;
};
static inline struct ep93xx_pwm *to_ep93xx_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct ep93xx_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int ep93xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct platform_device *pdev = to_platform_device(chip->dev);
+ struct platform_device *pdev = to_platform_device(pwmchip_parent(chip));
return ep93xx_pwm_acquire_gpio(pdev);
}
static void ep93xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct platform_device *pdev = to_platform_device(chip->dev);
+ struct platform_device *pdev = to_platform_device(pwmchip_parent(chip));
ep93xx_pwm_release_gpio(pdev);
}
@@ -163,12 +162,14 @@ static const struct pwm_ops ep93xx_pwm_ops = {
static int ep93xx_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct ep93xx_pwm *ep93xx_pwm;
int ret;
- ep93xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_pwm), GFP_KERNEL);
- if (!ep93xx_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*ep93xx_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ ep93xx_pwm = to_ep93xx_pwm(chip);
ep93xx_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ep93xx_pwm->base))
@@ -178,11 +179,9 @@ static int ep93xx_pwm_probe(struct platform_device *pdev)
if (IS_ERR(ep93xx_pwm->clk))
return PTR_ERR(ep93xx_pwm->clk);
- ep93xx_pwm->chip.dev = &pdev->dev;
- ep93xx_pwm->chip.ops = &ep93xx_pwm_ops;
- ep93xx_pwm->chip.npwm = 1;
+ chip->ops = &ep93xx_pwm_ops;
- ret = devm_pwmchip_add(&pdev->dev, &ep93xx_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return ret;
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index d1b6d1aa4773..2510c10ca473 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -40,7 +40,6 @@ struct fsl_pwm_periodcfg {
};
struct fsl_pwm_chip {
- struct pwm_chip chip;
struct mutex lock;
struct regmap *regmap;
@@ -55,7 +54,7 @@ struct fsl_pwm_chip {
static inline struct fsl_pwm_chip *to_fsl_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct fsl_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static void ftm_clear_write_protection(struct fsl_pwm_chip *fpc)
@@ -221,10 +220,11 @@ static bool fsl_pwm_is_other_pwm_enabled(struct fsl_pwm_chip *fpc,
return false;
}
-static int fsl_pwm_apply_config(struct fsl_pwm_chip *fpc,
+static int fsl_pwm_apply_config(struct pwm_chip *chip,
struct pwm_device *pwm,
const struct pwm_state *newstate)
{
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
unsigned int duty;
u32 reg_polarity;
@@ -232,7 +232,7 @@ static int fsl_pwm_apply_config(struct fsl_pwm_chip *fpc,
bool do_write_period = false;
if (!fsl_pwm_calculate_period(fpc, newstate->period, &periodcfg)) {
- dev_err(fpc->chip.dev, "failed to calculate new period\n");
+ dev_err(pwmchip_parent(chip), "failed to calculate new period\n");
return -EINVAL;
}
@@ -246,7 +246,7 @@ static int fsl_pwm_apply_config(struct fsl_pwm_chip *fpc,
*/
else if (!fsl_pwm_periodcfg_are_equal(&fpc->period, &periodcfg)) {
if (fsl_pwm_is_other_pwm_enabled(fpc, pwm)) {
- dev_err(fpc->chip.dev,
+ dev_err(pwmchip_parent(chip),
"Cannot change period for PWM %u, disable other PWMs first\n",
pwm->hwpwm);
return -EBUSY;
@@ -322,7 +322,7 @@ static int fsl_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
goto end_mutex;
}
- ret = fsl_pwm_apply_config(fpc, pwm, newstate);
+ ret = fsl_pwm_apply_config(chip, pwm, newstate);
if (ret)
goto end_mutex;
@@ -392,18 +392,19 @@ static const struct regmap_config fsl_pwm_regmap_config = {
static int fsl_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct fsl_pwm_chip *fpc;
void __iomem *base;
int ret;
- fpc = devm_kzalloc(&pdev->dev, sizeof(*fpc), GFP_KERNEL);
- if (!fpc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 8, sizeof(*fpc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ fpc = to_fsl_chip(chip);
mutex_init(&fpc->lock);
fpc->soc = of_device_get_match_data(&pdev->dev);
- fpc->chip.dev = &pdev->dev;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -422,16 +423,16 @@ static int fsl_pwm_probe(struct platform_device *pdev)
return PTR_ERR(fpc->clk[FSL_PWM_CLK_SYS]);
}
- fpc->clk[FSL_PWM_CLK_FIX] = devm_clk_get(fpc->chip.dev, "ftm_fix");
+ fpc->clk[FSL_PWM_CLK_FIX] = devm_clk_get(&pdev->dev, "ftm_fix");
if (IS_ERR(fpc->clk[FSL_PWM_CLK_FIX]))
return PTR_ERR(fpc->clk[FSL_PWM_CLK_FIX]);
- fpc->clk[FSL_PWM_CLK_EXT] = devm_clk_get(fpc->chip.dev, "ftm_ext");
+ fpc->clk[FSL_PWM_CLK_EXT] = devm_clk_get(&pdev->dev, "ftm_ext");
if (IS_ERR(fpc->clk[FSL_PWM_CLK_EXT]))
return PTR_ERR(fpc->clk[FSL_PWM_CLK_EXT]);
fpc->clk[FSL_PWM_CLK_CNTEN] =
- devm_clk_get(fpc->chip.dev, "ftm_cnt_clk_en");
+ devm_clk_get(&pdev->dev, "ftm_cnt_clk_en");
if (IS_ERR(fpc->clk[FSL_PWM_CLK_CNTEN]))
return PTR_ERR(fpc->clk[FSL_PWM_CLK_CNTEN]);
@@ -443,17 +444,15 @@ static int fsl_pwm_probe(struct platform_device *pdev)
if (IS_ERR(fpc->ipg_clk))
fpc->ipg_clk = fpc->clk[FSL_PWM_CLK_SYS];
+ chip->ops = &fsl_pwm_ops;
- fpc->chip.ops = &fsl_pwm_ops;
- fpc->chip.npwm = 8;
-
- ret = devm_pwmchip_add(&pdev->dev, &fpc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, fpc);
+ platform_set_drvdata(pdev, chip);
return fsl_pwm_init(fpc);
}
@@ -461,14 +460,15 @@ static int fsl_pwm_probe(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int fsl_pwm_suspend(struct device *dev)
{
- struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
int i;
regcache_cache_only(fpc->regmap, true);
regcache_mark_dirty(fpc->regmap);
- for (i = 0; i < fpc->chip.npwm; i++) {
- struct pwm_device *pwm = &fpc->chip.pwms[i];
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
if (!test_bit(PWMF_REQUESTED, &pwm->flags))
continue;
@@ -487,11 +487,12 @@ static int fsl_pwm_suspend(struct device *dev)
static int fsl_pwm_resume(struct device *dev)
{
- struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
int i;
- for (i = 0; i < fpc->chip.npwm; i++) {
- struct pwm_device *pwm = &fpc->chip.pwms[i];
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
if (!test_bit(PWMF_REQUESTED, &pwm->flags))
continue;
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index c435776e2f78..2eb0b13d4e10 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -33,7 +33,6 @@
#define PWM_DUTY_MASK GENMASK(31, 0)
struct hibvt_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
struct reset_control *rstc;
@@ -65,7 +64,7 @@ static const struct hibvt_pwm_soc hi3559v100_soc_info = {
static inline struct hibvt_pwm_chip *to_hibvt_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct hibvt_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static void hibvt_pwm_set_bits(void __iomem *base, u32 offset,
@@ -191,72 +190,71 @@ static int hibvt_pwm_probe(struct platform_device *pdev)
{
const struct hibvt_pwm_soc *soc =
of_device_get_match_data(&pdev->dev);
- struct hibvt_pwm_chip *pwm_chip;
+ struct pwm_chip *chip;
+ struct hibvt_pwm_chip *hi_pwm_chip;
int ret, i;
- pwm_chip = devm_kzalloc(&pdev->dev, sizeof(*pwm_chip), GFP_KERNEL);
- if (pwm_chip == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, soc->num_pwms, sizeof(*hi_pwm_chip));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ hi_pwm_chip = to_hibvt_pwm_chip(chip);
- pwm_chip->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pwm_chip->clk)) {
+ hi_pwm_chip->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hi_pwm_chip->clk)) {
dev_err(&pdev->dev, "getting clock failed with %ld\n",
- PTR_ERR(pwm_chip->clk));
- return PTR_ERR(pwm_chip->clk);
+ PTR_ERR(hi_pwm_chip->clk));
+ return PTR_ERR(hi_pwm_chip->clk);
}
- pwm_chip->chip.ops = &hibvt_pwm_ops;
- pwm_chip->chip.dev = &pdev->dev;
- pwm_chip->chip.npwm = soc->num_pwms;
- pwm_chip->soc = soc;
+ chip->ops = &hibvt_pwm_ops;
+ hi_pwm_chip->soc = soc;
- pwm_chip->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(pwm_chip->base))
- return PTR_ERR(pwm_chip->base);
+ hi_pwm_chip->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hi_pwm_chip->base))
+ return PTR_ERR(hi_pwm_chip->base);
- ret = clk_prepare_enable(pwm_chip->clk);
+ ret = clk_prepare_enable(hi_pwm_chip->clk);
if (ret < 0)
return ret;
- pwm_chip->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(pwm_chip->rstc)) {
- clk_disable_unprepare(pwm_chip->clk);
- return PTR_ERR(pwm_chip->rstc);
+ hi_pwm_chip->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(hi_pwm_chip->rstc)) {
+ clk_disable_unprepare(hi_pwm_chip->clk);
+ return PTR_ERR(hi_pwm_chip->rstc);
}
- reset_control_assert(pwm_chip->rstc);
+ reset_control_assert(hi_pwm_chip->rstc);
msleep(30);
- reset_control_deassert(pwm_chip->rstc);
+ reset_control_deassert(hi_pwm_chip->rstc);
- ret = pwmchip_add(&pwm_chip->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
- clk_disable_unprepare(pwm_chip->clk);
+ clk_disable_unprepare(hi_pwm_chip->clk);
return ret;
}
- for (i = 0; i < pwm_chip->chip.npwm; i++) {
- hibvt_pwm_set_bits(pwm_chip->base, PWM_CTRL_ADDR(i),
+ for (i = 0; i < chip->npwm; i++) {
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(i),
PWM_KEEP_MASK, (0x1 << PWM_KEEP_SHIFT));
}
- platform_set_drvdata(pdev, pwm_chip);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static void hibvt_pwm_remove(struct platform_device *pdev)
{
- struct hibvt_pwm_chip *pwm_chip;
-
- pwm_chip = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
- pwmchip_remove(&pwm_chip->chip);
+ pwmchip_remove(chip);
- reset_control_assert(pwm_chip->rstc);
+ reset_control_assert(hi_pwm_chip->rstc);
msleep(30);
- reset_control_deassert(pwm_chip->rstc);
+ reset_control_deassert(hi_pwm_chip->rstc);
- clk_disable_unprepare(pwm_chip->clk);
+ clk_disable_unprepare(hi_pwm_chip->clk);
}
static const struct of_device_id hibvt_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 5965ac35b32e..d79a96679a26 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -59,8 +59,6 @@ struct img_pwm_soc_data {
};
struct img_pwm_chip {
- struct device *dev;
- struct pwm_chip chip;
struct clk *pwm_clk;
struct clk *sys_clk;
void __iomem *base;
@@ -74,7 +72,7 @@ struct img_pwm_chip {
static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct img_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline void img_pwm_writel(struct img_pwm_chip *imgchip,
@@ -99,7 +97,7 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (period_ns < imgchip->min_period_ns ||
period_ns > imgchip->max_period_ns) {
- dev_err(chip->dev, "configured period not in range\n");
+ dev_err(pwmchip_parent(chip), "configured period not in range\n");
return -ERANGE;
}
@@ -120,14 +118,14 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
timebase = DIV_ROUND_UP(mul, 512);
} else {
- dev_err(chip->dev,
+ dev_err(pwmchip_parent(chip),
"failed to configure timebase steps/divider value\n");
return -EINVAL;
}
duty = DIV_ROUND_UP(timebase * duty_ns, period_ns);
- ret = pm_runtime_resume_and_get(chip->dev);
+ ret = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (ret < 0)
return ret;
@@ -141,8 +139,8 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
(timebase << PWM_CH_CFG_TMBASE_SHIFT);
img_pwm_writel(imgchip, PWM_CH_CFG(pwm->hwpwm), val);
- pm_runtime_mark_last_busy(chip->dev);
- pm_runtime_put_autosuspend(chip->dev);
+ pm_runtime_mark_last_busy(pwmchip_parent(chip));
+ pm_runtime_put_autosuspend(pwmchip_parent(chip));
return 0;
}
@@ -153,7 +151,7 @@ static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
struct img_pwm_chip *imgchip = to_img_pwm_chip(chip);
int ret;
- ret = pm_runtime_resume_and_get(chip->dev);
+ ret = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (ret < 0)
return ret;
@@ -177,8 +175,8 @@ static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val &= ~BIT(pwm->hwpwm);
img_pwm_writel(imgchip, PWM_CTRL_CFG, val);
- pm_runtime_mark_last_busy(chip->dev);
- pm_runtime_put_autosuspend(chip->dev);
+ pm_runtime_mark_last_busy(pwmchip_parent(chip));
+ pm_runtime_put_autosuspend(pwmchip_parent(chip));
}
static int img_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -225,7 +223,8 @@ MODULE_DEVICE_TABLE(of, img_pwm_of_match);
static int img_pwm_runtime_suspend(struct device *dev)
{
- struct img_pwm_chip *imgchip = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct img_pwm_chip *imgchip = to_img_pwm_chip(chip);
clk_disable_unprepare(imgchip->pwm_clk);
clk_disable_unprepare(imgchip->sys_clk);
@@ -235,7 +234,8 @@ static int img_pwm_runtime_suspend(struct device *dev)
static int img_pwm_runtime_resume(struct device *dev)
{
- struct img_pwm_chip *imgchip = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct img_pwm_chip *imgchip = to_img_pwm_chip(chip);
int ret;
ret = clk_prepare_enable(imgchip->sys_clk);
@@ -259,13 +259,13 @@ static int img_pwm_probe(struct platform_device *pdev)
int ret;
u64 val;
unsigned long clk_rate;
+ struct pwm_chip *chip;
struct img_pwm_chip *imgchip;
- imgchip = devm_kzalloc(&pdev->dev, sizeof(*imgchip), GFP_KERNEL);
- if (!imgchip)
- return -ENOMEM;
-
- imgchip->dev = &pdev->dev;
+ chip = devm_pwmchip_alloc(&pdev->dev, IMG_PWM_NPWM, sizeof(*imgchip));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ imgchip = to_img_pwm_chip(chip);
imgchip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imgchip->base))
@@ -290,7 +290,7 @@ static int img_pwm_probe(struct platform_device *pdev)
return PTR_ERR(imgchip->pwm_clk);
}
- platform_set_drvdata(pdev, imgchip);
+ platform_set_drvdata(pdev, chip);
pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -317,11 +317,9 @@ static int img_pwm_probe(struct platform_device *pdev)
do_div(val, clk_rate);
imgchip->min_period_ns = val;
- imgchip->chip.dev = &pdev->dev;
- imgchip->chip.ops = &img_pwm_ops;
- imgchip->chip.npwm = IMG_PWM_NPWM;
+ chip->ops = &img_pwm_ops;
- ret = pwmchip_add(&imgchip->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
goto err_suspend;
@@ -340,19 +338,20 @@ err_pm_disable:
static void img_pwm_remove(struct platform_device *pdev)
{
- struct img_pwm_chip *imgchip = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
img_pwm_runtime_suspend(&pdev->dev);
- pwmchip_remove(&imgchip->chip);
+ pwmchip_remove(chip);
}
#ifdef CONFIG_PM_SLEEP
static int img_pwm_suspend(struct device *dev)
{
- struct img_pwm_chip *imgchip = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct img_pwm_chip *imgchip = to_img_pwm_chip(chip);
int i, ret;
if (pm_runtime_status_suspended(dev)) {
@@ -361,7 +360,7 @@ static int img_pwm_suspend(struct device *dev)
return ret;
}
- for (i = 0; i < imgchip->chip.npwm; i++)
+ for (i = 0; i < chip->npwm; i++)
imgchip->suspend_ch_cfg[i] = img_pwm_readl(imgchip,
PWM_CH_CFG(i));
@@ -374,7 +373,8 @@ static int img_pwm_suspend(struct device *dev)
static int img_pwm_resume(struct device *dev)
{
- struct img_pwm_chip *imgchip = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct img_pwm_chip *imgchip = to_img_pwm_chip(chip);
int ret;
int i;
@@ -382,13 +382,13 @@ static int img_pwm_resume(struct device *dev)
if (ret)
return ret;
- for (i = 0; i < imgchip->chip.npwm; i++)
+ for (i = 0; i < chip->npwm; i++)
img_pwm_writel(imgchip, PWM_CH_CFG(i),
imgchip->suspend_ch_cfg[i]);
img_pwm_writel(imgchip, PWM_CTRL_CFG, imgchip->suspend_ctrl_cfg);
- for (i = 0; i < imgchip->chip.npwm; i++)
+ for (i = 0; i < chip->npwm; i++)
if (imgchip->suspend_ctrl_cfg & BIT(i))
regmap_clear_bits(imgchip->periph_regs,
PERIP_PWM_PDM_CONTROL,
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index 9fc290e647e1..c50ddbac43c8 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -57,7 +57,6 @@
#define PWM_IMX_TPM_MOD_MOD GENMASK(PWM_IMX_TPM_MOD_WIDTH - 1, 0)
struct imx_tpm_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
struct mutex lock;
@@ -75,7 +74,7 @@ struct imx_tpm_pwm_param {
static inline struct imx_tpm_pwm_chip *
to_imx_tpm_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct imx_tpm_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -336,35 +335,42 @@ static const struct pwm_ops imx_tpm_pwm_ops = {
static int pwm_imx_tpm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct imx_tpm_pwm_chip *tpm;
+ struct clk *clk;
+ void __iomem *base;
int ret;
+ unsigned int npwm;
u32 val;
- tpm = devm_kzalloc(&pdev->dev, sizeof(*tpm), GFP_KERNEL);
- if (!tpm)
- return -ENOMEM;
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- platform_set_drvdata(pdev, tpm);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "failed to get PWM clock\n");
- tpm->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(tpm->base))
- return PTR_ERR(tpm->base);
+ /* get number of channels */
+ val = readl(base + PWM_IMX_TPM_PARAM);
+ npwm = FIELD_GET(PWM_IMX_TPM_PARAM_CHAN, val);
- tpm->clk = devm_clk_get_enabled(&pdev->dev, NULL);
- if (IS_ERR(tpm->clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(tpm->clk),
- "failed to get PWM clock\n");
+ chip = devm_pwmchip_alloc(&pdev->dev, npwm, sizeof(*tpm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ tpm = to_imx_tpm_pwm_chip(chip);
+
+ platform_set_drvdata(pdev, tpm);
- tpm->chip.dev = &pdev->dev;
- tpm->chip.ops = &imx_tpm_pwm_ops;
+ tpm->base = base;
+ tpm->clk = clk;
- /* get number of channels */
- val = readl(tpm->base + PWM_IMX_TPM_PARAM);
- tpm->chip.npwm = FIELD_GET(PWM_IMX_TPM_PARAM_CHAN, val);
+ chip->ops = &imx_tpm_pwm_ops;
mutex_init(&tpm->lock);
- ret = devm_pwmchip_add(&pdev->dev, &tpm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret)
return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
index d175d895f22a..1d2aae2d278f 100644
--- a/drivers/pwm/pwm-imx1.c
+++ b/drivers/pwm/pwm-imx1.c
@@ -28,10 +28,12 @@ struct pwm_imx1_chip {
struct clk *clk_ipg;
struct clk *clk_per;
void __iomem *mmio_base;
- struct pwm_chip chip;
};
-#define to_pwm_imx1_chip(chip) container_of(chip, struct pwm_imx1_chip, chip)
+static inline struct pwm_imx1_chip *to_pwm_imx1_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static int pwm_imx1_clk_prepare_enable(struct pwm_chip *chip)
{
@@ -156,11 +158,13 @@ MODULE_DEVICE_TABLE(of, pwm_imx1_dt_ids);
static int pwm_imx1_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct pwm_imx1_chip *imx;
- imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
- if (!imx)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*imx));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ imx = to_pwm_imx1_chip(chip);
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx->clk_ipg))
@@ -172,15 +176,13 @@ static int pwm_imx1_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_per),
"failed to get peripheral clock\n");
- imx->chip.ops = &pwm_imx1_ops;
- imx->chip.dev = &pdev->dev;
- imx->chip.npwm = 1;
+ chip->ops = &pwm_imx1_ops;
imx->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
- return devm_pwmchip_add(&pdev->dev, &imx->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
static struct platform_driver pwm_imx1_driver = {
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index 7d9bc43f12b0..e1412116ef65 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -83,7 +83,6 @@ struct pwm_imx27_chip {
struct clk *clk_ipg;
struct clk *clk_per;
void __iomem *mmio_base;
- struct pwm_chip chip;
/*
* The driver cannot read the current duty cycle from the hardware if
@@ -93,7 +92,10 @@ struct pwm_imx27_chip {
unsigned int duty_cycle;
};
-#define to_pwm_imx27_chip(chip) container_of(chip, struct pwm_imx27_chip, chip)
+static inline struct pwm_imx27_chip *to_pwm_imx27_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static int pwm_imx27_clk_prepare_enable(struct pwm_imx27_chip *imx)
{
@@ -145,7 +147,7 @@ static int pwm_imx27_get_state(struct pwm_chip *chip,
state->polarity = PWM_POLARITY_INVERSED;
break;
default:
- dev_warn(chip->dev, "can't set polarity, output disconnected");
+ dev_warn(pwmchip_parent(chip), "can't set polarity, output disconnected");
}
prescaler = MX3_PWMCR_PRESCALER_GET(val);
@@ -177,7 +179,7 @@ static int pwm_imx27_get_state(struct pwm_chip *chip,
static void pwm_imx27_sw_reset(struct pwm_chip *chip)
{
struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
- struct device *dev = chip->dev;
+ struct device *dev = pwmchip_parent(chip);
int wait_count = 0;
u32 cr;
@@ -196,7 +198,7 @@ static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip,
struct pwm_device *pwm)
{
struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
- struct device *dev = chip->dev;
+ struct device *dev = pwmchip_parent(chip);
unsigned int period_ms;
int fifoav;
u32 sr;
@@ -204,8 +206,8 @@ static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip,
sr = readl(imx->mmio_base + MX3_PWMSR);
fifoav = FIELD_GET(MX3_PWMSR_FIFOAV, sr);
if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) {
- period_ms = DIV_ROUND_UP_ULL(pwm_get_period(pwm),
- NSEC_PER_MSEC);
+ period_ms = DIV_ROUND_UP_ULL(pwm->state.period,
+ NSEC_PER_MSEC);
msleep(period_ms);
sr = readl(imx->mmio_base + MX3_PWMSR);
@@ -219,14 +221,11 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
{
unsigned long period_cycles, duty_cycles, prescale;
struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
- struct pwm_state cstate;
unsigned long long c;
unsigned long long clkrate;
int ret;
u32 cr;
- pwm_get_state(pwm, &cstate);
-
clkrate = clk_get_rate(imx->clk_per);
c = clkrate * state->period;
@@ -254,7 +253,7 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* Wait for a free FIFO slot if the PWM is already enabled, and flush
* the FIFO if the PWM was disabled and is about to be enabled.
*/
- if (cstate.enabled) {
+ if (pwm->state.enabled) {
pwm_imx27_wait_fifo_slot(chip, pwm);
} else {
ret = pwm_imx27_clk_prepare_enable(imx);
@@ -306,13 +305,15 @@ MODULE_DEVICE_TABLE(of, pwm_imx27_dt_ids);
static int pwm_imx27_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct pwm_imx27_chip *imx;
int ret;
u32 pwmcr;
- imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
- if (imx == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*imx));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ imx = to_pwm_imx27_chip(chip);
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx->clk_ipg))
@@ -324,9 +325,7 @@ static int pwm_imx27_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_per),
"failed to get peripheral clock\n");
- imx->chip.ops = &pwm_imx27_ops;
- imx->chip.dev = &pdev->dev;
- imx->chip.npwm = 1;
+ chip->ops = &pwm_imx27_ops;
imx->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imx->mmio_base))
@@ -341,7 +340,7 @@ static int pwm_imx27_probe(struct platform_device *pdev)
if (!(pwmcr & MX3_PWMCR_EN))
pwm_imx27_clk_disable_unprepare(imx);
- return devm_pwmchip_add(&pdev->dev, &imx->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
static struct platform_driver imx_pwm_driver = {
diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c
index 54ecae7f937e..f9cc7c17c8f0 100644
--- a/drivers/pwm/pwm-intel-lgm.c
+++ b/drivers/pwm/pwm-intel-lgm.c
@@ -42,14 +42,13 @@
#define LGM_PWM_PERIOD_2WIRE_NS (40 * NSEC_PER_MSEC)
struct lgm_pwm_chip {
- struct pwm_chip chip;
struct regmap *regmap;
u32 period;
};
static inline struct lgm_pwm_chip *to_lgm_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct lgm_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int lgm_pwm_enable(struct pwm_chip *chip, bool enable)
@@ -168,14 +167,16 @@ static int lgm_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct reset_control *rst;
+ struct pwm_chip *chip;
struct lgm_pwm_chip *pc;
void __iomem *io_base;
struct clk *clk;
int ret;
- pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, 1, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_lgm_pwm_chip(chip);
io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
@@ -203,13 +204,11 @@ static int lgm_pwm_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "cannot deassert reset control\n");
- pc->chip.dev = dev;
- pc->chip.ops = &lgm_pwm_ops;
- pc->chip.npwm = 1;
+ chip->ops = &lgm_pwm_ops;
lgm_pwm_init(pc);
- ret = devm_pwmchip_add(dev, &pc->chip);
+ ret = devm_pwmchip_add(dev, chip);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 378ab036edfe..13e5e138c8e9 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -34,12 +34,17 @@
struct iqs620_pwm_private {
struct iqs62x_core *iqs62x;
- struct pwm_chip chip;
+ struct device *dev;
struct notifier_block notifier;
struct mutex lock;
unsigned int duty_scale;
};
+static inline struct iqs620_pwm_private *iqs620_pwm_from_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
+
static int iqs620_pwm_init(struct iqs620_pwm_private *iqs620_pwm,
unsigned int duty_scale)
{
@@ -73,7 +78,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->period < IQS620_PWM_PERIOD_NS)
return -EINVAL;
- iqs620_pwm = container_of(chip, struct iqs620_pwm_private, chip);
+ iqs620_pwm = iqs620_pwm_from_chip(chip);
/*
* The duty cycle generated by the device is calculated as follows:
@@ -109,7 +114,7 @@ static int iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct iqs620_pwm_private *iqs620_pwm;
- iqs620_pwm = container_of(chip, struct iqs620_pwm_private, chip);
+ iqs620_pwm = iqs620_pwm_from_chip(chip);
mutex_lock(&iqs620_pwm->lock);
@@ -155,7 +160,7 @@ static int iqs620_pwm_notifier(struct notifier_block *notifier,
mutex_unlock(&iqs620_pwm->lock);
if (ret) {
- dev_err(iqs620_pwm->chip.dev,
+ dev_err(iqs620_pwm->dev,
"Failed to re-initialize device: %d\n", ret);
return NOTIFY_BAD;
}
@@ -176,21 +181,24 @@ static void iqs620_pwm_notifier_unregister(void *context)
ret = blocking_notifier_chain_unregister(&iqs620_pwm->iqs62x->nh,
&iqs620_pwm->notifier);
if (ret)
- dev_err(iqs620_pwm->chip.dev,
+ dev_err(iqs620_pwm->dev,
"Failed to unregister notifier: %d\n", ret);
}
static int iqs620_pwm_probe(struct platform_device *pdev)
{
struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent);
+ struct pwm_chip *chip;
struct iqs620_pwm_private *iqs620_pwm;
unsigned int val;
int ret;
- iqs620_pwm = devm_kzalloc(&pdev->dev, sizeof(*iqs620_pwm), GFP_KERNEL);
- if (!iqs620_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*iqs620_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ iqs620_pwm = iqs620_pwm_from_chip(chip);
+ iqs620_pwm->dev = &pdev->dev;
iqs620_pwm->iqs62x = iqs62x;
ret = regmap_read(iqs62x->regmap, IQS620_PWR_SETTINGS, &val);
@@ -205,9 +213,7 @@ static int iqs620_pwm_probe(struct platform_device *pdev)
iqs620_pwm->duty_scale = val + 1;
}
- iqs620_pwm->chip.dev = &pdev->dev;
- iqs620_pwm->chip.ops = &iqs620_pwm_ops;
- iqs620_pwm->chip.npwm = 1;
+ chip->ops = &iqs620_pwm_ops;
mutex_init(&iqs620_pwm->lock);
@@ -225,7 +231,7 @@ static int iqs620_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_pwmchip_add(&pdev->dev, &iqs620_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret)
dev_err(&pdev->dev, "Failed to add device: %d\n", ret);
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 3933418e551b..da4bf543d357 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -25,23 +25,21 @@ struct soc_info {
};
struct jz4740_pwm_chip {
- struct pwm_chip chip;
struct regmap *map;
struct clk *clk[];
};
static inline struct jz4740_pwm_chip *to_jz4740(struct pwm_chip *chip)
{
- return container_of(chip, struct jz4740_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
-static bool jz4740_pwm_can_use_chn(struct jz4740_pwm_chip *jz,
- unsigned int channel)
+static bool jz4740_pwm_can_use_chn(struct pwm_chip *chip, unsigned int channel)
{
/* Enable all TCU channels for PWM use by default except channels 0/1 */
- u32 pwm_channels_mask = GENMASK(jz->chip.npwm - 1, 2);
+ u32 pwm_channels_mask = GENMASK(chip->npwm - 1, 2);
- device_property_read_u32(jz->chip.dev->parent,
+ device_property_read_u32(pwmchip_parent(chip)->parent,
"ingenic,pwm-channels-mask",
&pwm_channels_mask);
@@ -55,14 +53,15 @@ static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
char name[16];
int err;
- if (!jz4740_pwm_can_use_chn(jz, pwm->hwpwm))
+ if (!jz4740_pwm_can_use_chn(chip, pwm->hwpwm))
return -EBUSY;
snprintf(name, sizeof(name), "timer%u", pwm->hwpwm);
- clk = clk_get(chip->dev, name);
+ clk = clk_get(pwmchip_parent(chip), name);
if (IS_ERR(clk)) {
- dev_err(chip->dev, "error %pe: Failed to get clock\n", clk);
+ dev_err(pwmchip_parent(chip),
+ "error %pe: Failed to get clock\n", clk);
return PTR_ERR(clk);
}
@@ -150,7 +149,7 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
*/
rate = clk_round_rate(clk, tmp);
if (rate < 0) {
- dev_err(chip->dev, "Unable to round rate: %ld\n", rate);
+ dev_err(pwmchip_parent(chip), "Unable to round rate: %ld\n", rate);
return rate;
}
@@ -171,7 +170,7 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
err = clk_set_rate(clk, rate);
if (err) {
- dev_err(chip->dev, "Unable to set rate: %d\n", err);
+ dev_err(pwmchip_parent(chip), "Unable to set rate: %d\n", err);
return err;
}
@@ -224,6 +223,7 @@ static const struct pwm_ops jz4740_pwm_ops = {
static int jz4740_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pwm_chip *chip;
struct jz4740_pwm_chip *jz;
const struct soc_info *info;
@@ -231,10 +231,10 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
if (!info)
return -EINVAL;
- jz = devm_kzalloc(dev, struct_size(jz, clk, info->num_pwms),
- GFP_KERNEL);
- if (!jz)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, info->num_pwms, struct_size(jz, clk, info->num_pwms));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ jz = to_jz4740(chip);
jz->map = device_node_to_regmap(dev->parent->of_node);
if (IS_ERR(jz->map)) {
@@ -242,11 +242,9 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
return PTR_ERR(jz->map);
}
- jz->chip.dev = dev;
- jz->chip.ops = &jz4740_pwm_ops;
- jz->chip.npwm = info->num_pwms;
+ chip->ops = &jz4740_pwm_ops;
- return devm_pwmchip_add(dev, &jz->chip);
+ return devm_pwmchip_add(dev, chip);
}
static const struct soc_info jz4740_soc_info = {
diff --git a/drivers/pwm/pwm-keembay.c b/drivers/pwm/pwm-keembay.c
index ac824ecc3f64..35b641f3f6ed 100644
--- a/drivers/pwm/pwm-keembay.c
+++ b/drivers/pwm/pwm-keembay.c
@@ -36,7 +36,6 @@
#define KMB_PWM_HIGHLOW_OFFSET(ch) (0x20 + 4 * (ch))
struct keembay_pwm {
- struct pwm_chip chip;
struct device *dev;
struct clk *clk;
void __iomem *base;
@@ -44,7 +43,7 @@ struct keembay_pwm {
static inline struct keembay_pwm *to_keembay_pwm_dev(struct pwm_chip *chip)
{
- return container_of(chip, struct keembay_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static void keembay_clk_unprepare(void *data)
@@ -185,12 +184,14 @@ static const struct pwm_ops keembay_pwm_ops = {
static int keembay_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pwm_chip *chip;
struct keembay_pwm *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, KMB_TOTAL_PWM_CHANNELS, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = to_keembay_pwm_dev(chip);
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk))
@@ -204,11 +205,9 @@ static int keembay_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->chip.dev = dev;
- priv->chip.ops = &keembay_pwm_ops;
- priv->chip.npwm = KMB_TOTAL_PWM_CHANNELS;
+ chip->ops = &keembay_pwm_ops;
- ret = devm_pwmchip_add(dev, &priv->chip);
+ ret = devm_pwmchip_add(dev, chip);
if (ret)
return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 32350a357278..61189cea1046 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -20,7 +20,6 @@
#define LP3943_MAX_PERIOD 1600000
struct lp3943_pwm {
- struct pwm_chip chip;
struct lp3943 *lp3943;
struct lp3943_platform_data *pdata;
struct lp3943_pwm_map pwm_map[LP3943_NUM_PWMS];
@@ -28,7 +27,7 @@ struct lp3943_pwm {
static inline struct lp3943_pwm *to_lp3943_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct lp3943_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static struct lp3943_pwm_map *
@@ -273,12 +272,14 @@ static int lp3943_pwm_parse_dt(struct device *dev,
static int lp3943_pwm_probe(struct platform_device *pdev)
{
struct lp3943 *lp3943 = dev_get_drvdata(pdev->dev.parent);
+ struct pwm_chip *chip;
struct lp3943_pwm *lp3943_pwm;
int ret;
- lp3943_pwm = devm_kzalloc(&pdev->dev, sizeof(*lp3943_pwm), GFP_KERNEL);
- if (!lp3943_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, LP3943_NUM_PWMS, sizeof(*lp3943_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ lp3943_pwm = to_lp3943_pwm(chip);
lp3943_pwm->pdata = lp3943->pdata;
if (!lp3943_pwm->pdata) {
@@ -292,11 +293,9 @@ static int lp3943_pwm_probe(struct platform_device *pdev)
}
lp3943_pwm->lp3943 = lp3943;
- lp3943_pwm->chip.dev = &pdev->dev;
- lp3943_pwm->chip.ops = &lp3943_pwm_ops;
- lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
+ chip->ops = &lp3943_pwm_ops;
- return devm_pwmchip_add(&pdev->dev, &lp3943_pwm->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index fe891fa71a1d..04b76d257fd8 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -92,8 +92,6 @@ struct lpc18xx_pwm_data {
};
struct lpc18xx_pwm_chip {
- struct device *dev;
- struct pwm_chip chip;
void __iomem *base;
struct clk *pwm_clk;
unsigned long clk_rate;
@@ -110,7 +108,7 @@ struct lpc18xx_pwm_chip {
static inline struct lpc18xx_pwm_chip *
to_lpc18xx_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct lpc18xx_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline void lpc18xx_pwm_writel(struct lpc18xx_pwm_chip *lpc18xx_pwm,
@@ -198,7 +196,7 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (period_ns < lpc18xx_pwm->min_period_ns ||
period_ns > lpc18xx_pwm->max_period_ns) {
- dev_err(chip->dev, "period %d not in range\n", period_ns);
+ dev_err(pwmchip_parent(chip), "period %d not in range\n", period_ns);
return -ERANGE;
}
@@ -214,7 +212,7 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
*/
if (requested_events > 2 && lpc18xx_pwm->period_ns != period_ns &&
lpc18xx_pwm->period_ns) {
- dev_err(chip->dev, "conflicting period requested for PWM %u\n",
+ dev_err(pwmchip_parent(chip), "conflicting period requested for PWM %u\n",
pwm->hwpwm);
mutex_unlock(&lpc18xx_pwm->period_lock);
return -EBUSY;
@@ -289,7 +287,7 @@ static int lpc18xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
LPC18XX_PWM_EVENT_MAX);
if (event >= LPC18XX_PWM_EVENT_MAX) {
- dev_err(lpc18xx_pwm->dev,
+ dev_err(pwmchip_parent(chip),
"maximum number of simultaneous channels reached\n");
return -EBUSY;
}
@@ -349,16 +347,15 @@ MODULE_DEVICE_TABLE(of, lpc18xx_pwm_of_match);
static int lpc18xx_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct lpc18xx_pwm_chip *lpc18xx_pwm;
int ret;
u64 val;
- lpc18xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*lpc18xx_pwm),
- GFP_KERNEL);
- if (!lpc18xx_pwm)
- return -ENOMEM;
-
- lpc18xx_pwm->dev = &pdev->dev;
+ chip = devm_pwmchip_alloc(&pdev->dev, LPC18XX_NUM_PWMS, sizeof(*lpc18xx_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
lpc18xx_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc18xx_pwm->base))
@@ -389,9 +386,7 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
lpc18xx_pwm->min_period_ns = DIV_ROUND_UP(NSEC_PER_SEC,
lpc18xx_pwm->clk_rate);
- lpc18xx_pwm->chip.dev = &pdev->dev;
- lpc18xx_pwm->chip.ops = &lpc18xx_pwm_ops;
- lpc18xx_pwm->chip.npwm = LPC18XX_NUM_PWMS;
+ chip->ops = &lpc18xx_pwm_ops;
/* SCT counter must be in unify (32 bit) mode */
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CONFIG,
@@ -423,21 +418,22 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
val |= LPC18XX_PWM_PRE(0);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val);
- ret = pwmchip_add(&lpc18xx_pwm->chip);
+ ret = pwmchip_add(chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "pwmchip_add failed\n");
- platform_set_drvdata(pdev, lpc18xx_pwm);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static void lpc18xx_pwm_remove(struct platform_device *pdev)
{
- struct lpc18xx_pwm_chip *lpc18xx_pwm = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
u32 val;
- pwmchip_remove(&lpc18xx_pwm->chip);
+ pwmchip_remove(chip);
val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL,
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 1d9f3e7a2434..c748537e57d1 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
struct lpc32xx_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
};
@@ -23,8 +22,10 @@ struct lpc32xx_pwm_chip {
#define PWM_ENABLE BIT(31)
#define PWM_PIN_LEVEL BIT(30)
-#define to_lpc32xx_pwm_chip(_chip) \
- container_of(_chip, struct lpc32xx_pwm_chip, chip)
+static inline struct lpc32xx_pwm_chip *to_lpc32xx_pwm_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
@@ -119,13 +120,15 @@ static const struct pwm_ops lpc32xx_pwm_ops = {
static int lpc32xx_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct lpc32xx_pwm_chip *lpc32xx;
int ret;
u32 val;
- lpc32xx = devm_kzalloc(&pdev->dev, sizeof(*lpc32xx), GFP_KERNEL);
- if (!lpc32xx)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*lpc32xx));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ lpc32xx = to_lpc32xx_pwm_chip(chip);
lpc32xx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc32xx->base))
@@ -135,16 +138,14 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
if (IS_ERR(lpc32xx->clk))
return PTR_ERR(lpc32xx->clk);
- lpc32xx->chip.dev = &pdev->dev;
- lpc32xx->chip.ops = &lpc32xx_pwm_ops;
- lpc32xx->chip.npwm = 1;
+ chip->ops = &lpc32xx_pwm_ops;
/* If PWM is disabled, configure the output to the default value */
val = readl(lpc32xx->base);
val &= ~PWM_PIN_LEVEL;
writel(val, lpc32xx->base);
- ret = devm_pwmchip_add(&pdev->dev, &lpc32xx->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip, error %d\n", ret);
return ret;
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index b4134bee2863..25045c229520 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -18,7 +18,7 @@ static int pwm_lpss_probe_pci(struct pci_dev *pdev,
const struct pci_device_id *id)
{
const struct pwm_lpss_boardinfo *info;
- struct pwm_lpss_chip *lpwm;
+ struct pwm_chip *chip;
int err;
err = pcim_enable_device(pdev);
@@ -30,11 +30,9 @@ static int pwm_lpss_probe_pci(struct pci_dev *pdev,
return err;
info = (struct pwm_lpss_boardinfo *)id->driver_data;
- lpwm = devm_pwm_lpss_probe(&pdev->dev, pcim_iomap_table(pdev)[0], info);
- if (IS_ERR(lpwm))
- return PTR_ERR(lpwm);
-
- pci_set_drvdata(pdev, lpwm);
+ chip = devm_pwm_lpss_probe(&pdev->dev, pcim_iomap_table(pdev)[0], info);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
pm_runtime_put(&pdev->dev);
pm_runtime_allow(&pdev->dev);
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index 319809aac2c4..dbc9f5b17bdc 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -20,7 +20,7 @@
static int pwm_lpss_probe_platform(struct platform_device *pdev)
{
const struct pwm_lpss_boardinfo *info;
- struct pwm_lpss_chip *lpwm;
+ struct pwm_chip *chip;
void __iomem *base;
info = device_get_match_data(&pdev->dev);
@@ -31,11 +31,9 @@ static int pwm_lpss_probe_platform(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- lpwm = devm_pwm_lpss_probe(&pdev->dev, base, info);
- if (IS_ERR(lpwm))
- return PTR_ERR(lpwm);
-
- platform_set_drvdata(pdev, lpwm);
+ chip = devm_pwm_lpss_probe(&pdev->dev, base, info);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
/*
* On Cherry Trail devices the GFX0._PS0 AML checks if the controller
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index a6ea3ce7e019..867e2bc8c601 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(pwm_lpss_tng_info);
static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
{
- return container_of(chip, struct pwm_lpss_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline u32 pwm_lpss_read(const struct pwm_device *pwm)
@@ -106,7 +106,7 @@ static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
*/
err = readl_poll_timeout(addr, val, !(val & PWM_SW_UPDATE), 40, ms);
if (err)
- dev_err(pwm->chip->dev, "PWM_SW_UPDATE was not cleared\n");
+ dev_err(pwmchip_parent(pwm->chip), "PWM_SW_UPDATE was not cleared\n");
return err;
}
@@ -114,7 +114,7 @@ static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
static inline int pwm_lpss_is_updating(struct pwm_device *pwm)
{
if (pwm_lpss_read(pwm) & PWM_SW_UPDATE) {
- dev_err(pwm->chip->dev, "PWM_SW_UPDATE is still set, skipping update\n");
+ dev_err(pwmchip_parent(pwm->chip), "PWM_SW_UPDATE is still set, skipping update\n");
return -EBUSY;
}
@@ -190,16 +190,16 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->enabled) {
if (!pwm_is_enabled(pwm)) {
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
ret = pwm_lpss_prepare_enable(lpwm, pwm, state);
if (ret)
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
} else {
ret = pwm_lpss_prepare_enable(lpwm, pwm, state);
}
} else if (pwm_is_enabled(pwm)) {
pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
}
return ret;
@@ -213,7 +213,7 @@ static int pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long long base_unit, freq, on_time_div;
u32 ctrl;
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
base_unit_range = BIT(lpwm->info->base_unit_bits);
@@ -235,7 +235,7 @@ static int pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
state->polarity = PWM_POLARITY_NORMAL;
state->enabled = !!(ctrl & PWM_ENABLE);
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
return 0;
}
@@ -245,10 +245,11 @@ static const struct pwm_ops pwm_lpss_ops = {
.get_state = pwm_lpss_get_state,
};
-struct pwm_lpss_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base,
- const struct pwm_lpss_boardinfo *info)
+struct pwm_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base,
+ const struct pwm_lpss_boardinfo *info)
{
struct pwm_lpss_chip *lpwm;
+ struct pwm_chip *chip;
unsigned long c;
int i, ret;
u32 ctrl;
@@ -256,9 +257,10 @@ struct pwm_lpss_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base
if (WARN_ON(info->npwm > LPSS_MAX_PWMS))
return ERR_PTR(-ENODEV);
- lpwm = devm_kzalloc(dev, sizeof(*lpwm), GFP_KERNEL);
- if (!lpwm)
- return ERR_PTR(-ENOMEM);
+ chip = devm_pwmchip_alloc(dev, info->npwm, sizeof(*lpwm));
+ if (IS_ERR(chip))
+ return chip;
+ lpwm = to_lpwm(chip);
lpwm->regs = base;
lpwm->info = info;
@@ -267,23 +269,21 @@ struct pwm_lpss_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base
if (!c)
return ERR_PTR(-EINVAL);
- lpwm->chip.dev = dev;
- lpwm->chip.ops = &pwm_lpss_ops;
- lpwm->chip.npwm = info->npwm;
+ chip->ops = &pwm_lpss_ops;
- ret = devm_pwmchip_add(dev, &lpwm->chip);
+ ret = devm_pwmchip_add(dev, chip);
if (ret) {
dev_err(dev, "failed to add PWM chip: %d\n", ret);
return ERR_PTR(ret);
}
for (i = 0; i < lpwm->info->npwm; i++) {
- ctrl = pwm_lpss_read(&lpwm->chip.pwms[i]);
+ ctrl = pwm_lpss_read(&chip->pwms[i]);
if (ctrl & PWM_ENABLE)
pm_runtime_get(dev);
}
- return lpwm;
+ return chip;
}
EXPORT_SYMBOL_GPL(devm_pwm_lpss_probe);
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index bf841250385f..b5267ab5193b 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -18,7 +18,6 @@
#define LPSS_MAX_PWMS 4
struct pwm_lpss_chip {
- struct pwm_chip chip;
void __iomem *regs;
const struct pwm_lpss_boardinfo *info;
};
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index 17d290f847af..19a87873ad60 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -42,16 +42,13 @@ struct pwm_mediatek_of_data {
/**
* struct pwm_mediatek_chip - struct representing PWM chip
- * @chip: linux PWM chip representation
* @regs: base address of PWM chip
* @clk_top: the top clock generator
* @clk_main: the clock used by PWM core
* @clk_pwms: the clock used by each PWM channel
- * @clk_freq: the fix clock frequency of legacy MIPS SoC
* @soc: pointer to chip's platform data
*/
struct pwm_mediatek_chip {
- struct pwm_chip chip;
void __iomem *regs;
struct clk *clk_top;
struct clk *clk_main;
@@ -70,7 +67,7 @@ static const unsigned int mtk_pwm_reg_offset_v2[] = {
static inline struct pwm_mediatek_chip *
to_pwm_mediatek_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct pwm_mediatek_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int pwm_mediatek_clk_enable(struct pwm_chip *chip,
@@ -150,7 +147,7 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (clkdiv > PWM_CLK_DIV_MAX) {
pwm_mediatek_clk_disable(chip, pwm);
- dev_err(chip->dev, "period of %d ns not supported\n", period_ns);
+ dev_err(pwmchip_parent(chip), "period of %d ns not supported\n", period_ns);
return -EINVAL;
}
@@ -233,21 +230,26 @@ static const struct pwm_ops pwm_mediatek_ops = {
static int pwm_mediatek_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct pwm_mediatek_chip *pc;
+ const struct pwm_mediatek_of_data *soc;
unsigned int i;
int ret;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ soc = of_device_get_match_data(&pdev->dev);
+
+ chip = devm_pwmchip_alloc(&pdev->dev, soc->num_pwms, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_pwm_mediatek_chip(chip);
- pc->soc = of_device_get_match_data(&pdev->dev);
+ pc->soc = soc;
pc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->regs))
return PTR_ERR(pc->regs);
- pc->clk_pwms = devm_kmalloc_array(&pdev->dev, pc->soc->num_pwms,
+ pc->clk_pwms = devm_kmalloc_array(&pdev->dev, soc->num_pwms,
sizeof(*pc->clk_pwms), GFP_KERNEL);
if (!pc->clk_pwms)
return -ENOMEM;
@@ -262,7 +264,7 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk_main),
"Failed to get main clock\n");
- for (i = 0; i < pc->soc->num_pwms; i++) {
+ for (i = 0; i < soc->num_pwms; i++) {
char name[8];
snprintf(name, sizeof(name), "pwm%d", i + 1);
@@ -273,11 +275,9 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
"Failed to get %s clock\n", name);
}
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &pwm_mediatek_ops;
- pc->chip.npwm = pc->soc->num_pwms;
+ chip->ops = &pwm_mediatek_ops;
- ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
@@ -340,6 +340,13 @@ static const struct pwm_mediatek_of_data mt7986_pwm_data = {
.reg_offset = mtk_pwm_reg_offset_v1,
};
+static const struct pwm_mediatek_of_data mt7988_pwm_data = {
+ .num_pwms = 8,
+ .pwm45_fixup = false,
+ .has_ck_26m_sel = false,
+ .reg_offset = mtk_pwm_reg_offset_v2,
+};
+
static const struct pwm_mediatek_of_data mt8183_pwm_data = {
.num_pwms = 4,
.pwm45_fixup = false,
@@ -370,6 +377,7 @@ static const struct of_device_id pwm_mediatek_of_match[] = {
{ .compatible = "mediatek,mt7629-pwm", .data = &mt7629_pwm_data },
{ .compatible = "mediatek,mt7981-pwm", .data = &mt7981_pwm_data },
{ .compatible = "mediatek,mt7986-pwm", .data = &mt7986_pwm_data },
+ { .compatible = "mediatek,mt7988-pwm", .data = &mt7988_pwm_data },
{ .compatible = "mediatek,mt8183-pwm", .data = &mt8183_pwm_data },
{ .compatible = "mediatek,mt8365-pwm", .data = &mt8365_pwm_data },
{ .compatible = "mediatek,mt8516-pwm", .data = &mt8516_pwm_data },
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 2971bbf3b5e7..a02fdbc61256 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -60,7 +60,7 @@
#define MISC_A_EN BIT(0)
#define MESON_NUM_PWMS 2
-#define MESON_MAX_MUX_PARENTS 4
+#define MESON_NUM_MUX_PARENTS 4
static struct meson_pwm_channel_data {
u8 reg_offset;
@@ -97,12 +97,10 @@ struct meson_pwm_channel {
};
struct meson_pwm_data {
- const char * const *parent_names;
- unsigned int num_parents;
+ const char *const parent_names[MESON_NUM_MUX_PARENTS];
};
struct meson_pwm {
- struct pwm_chip chip;
const struct meson_pwm_data *data;
struct meson_pwm_channel channels[MESON_NUM_PWMS];
void __iomem *base;
@@ -115,14 +113,14 @@ struct meson_pwm {
static inline struct meson_pwm *to_meson_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct meson_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
- struct device *dev = chip->dev;
+ struct device *dev = pwmchip_parent(chip);
int err;
err = clk_prepare_enable(channel->clk);
@@ -143,9 +141,10 @@ static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
clk_disable_unprepare(channel->clk);
}
-static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
+static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
+ struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
unsigned int cnt, duty_cnt;
unsigned long fin_freq;
@@ -169,19 +168,19 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
fin_freq = clk_round_rate(channel->clk, freq);
if (fin_freq == 0) {
- dev_err(meson->chip.dev, "invalid source clock frequency\n");
+ dev_err(pwmchip_parent(chip), "invalid source clock frequency\n");
return -EINVAL;
}
- dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq);
+ dev_dbg(pwmchip_parent(chip), "fin_freq: %lu Hz\n", fin_freq);
cnt = div_u64(fin_freq * period, NSEC_PER_SEC);
if (cnt > 0xffff) {
- dev_err(meson->chip.dev, "unable to get period cnt\n");
+ dev_err(pwmchip_parent(chip), "unable to get period cnt\n");
return -EINVAL;
}
- dev_dbg(meson->chip.dev, "period=%llu cnt=%u\n", period, cnt);
+ dev_dbg(pwmchip_parent(chip), "period=%llu cnt=%u\n", period, cnt);
if (duty == period) {
channel->hi = cnt;
@@ -192,7 +191,7 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
} else {
duty_cnt = div_u64(fin_freq * duty, NSEC_PER_SEC);
- dev_dbg(meson->chip.dev, "duty=%llu duty_cnt=%u\n", duty, duty_cnt);
+ dev_dbg(pwmchip_parent(chip), "duty=%llu duty_cnt=%u\n", duty, duty_cnt);
channel->hi = duty_cnt;
channel->lo = cnt - duty_cnt;
@@ -203,8 +202,9 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
return 0;
}
-static void meson_pwm_enable(struct meson_pwm *meson, struct pwm_device *pwm)
+static void meson_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
struct meson_pwm_channel_data *channel_data;
unsigned long flags;
@@ -215,7 +215,7 @@ static void meson_pwm_enable(struct meson_pwm *meson, struct pwm_device *pwm)
err = clk_set_rate(channel->clk, channel->rate);
if (err)
- dev_err(meson->chip.dev, "setting clock rate failed\n");
+ dev_err(pwmchip_parent(chip), "setting clock rate failed\n");
spin_lock_irqsave(&meson->lock, flags);
@@ -230,8 +230,9 @@ static void meson_pwm_enable(struct meson_pwm *meson, struct pwm_device *pwm)
spin_unlock_irqrestore(&meson->lock, flags);
}
-static void meson_pwm_disable(struct meson_pwm *meson, struct pwm_device *pwm)
+static void meson_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct meson_pwm *meson = to_meson_pwm(chip);
unsigned long flags;
u32 value;
@@ -269,16 +270,16 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
channel->hi = ~0;
channel->lo = 0;
- meson_pwm_enable(meson, pwm);
+ meson_pwm_enable(chip, pwm);
} else {
- meson_pwm_disable(meson, pwm);
+ meson_pwm_disable(chip, pwm);
}
} else {
- err = meson_pwm_calc(meson, pwm, state);
+ err = meson_pwm_calc(chip, pwm, state);
if (err < 0)
return err;
- meson_pwm_enable(meson, pwm);
+ meson_pwm_enable(chip, pwm);
}
return 0;
@@ -337,62 +338,32 @@ static const struct pwm_ops meson_pwm_ops = {
.get_state = meson_pwm_get_state,
};
-static const char * const pwm_meson8b_parent_names[] = {
- "xtal", NULL, "fclk_div4", "fclk_div3"
-};
-
static const struct meson_pwm_data pwm_meson8b_data = {
- .parent_names = pwm_meson8b_parent_names,
- .num_parents = ARRAY_SIZE(pwm_meson8b_parent_names),
+ .parent_names = { "xtal", NULL, "fclk_div4", "fclk_div3" },
};
/*
* Only the 2 first inputs of the GXBB AO PWMs are valid
* The last 2 are grounded
*/
-static const char * const pwm_gxbb_ao_parent_names[] = {
- "xtal", "clk81"
-};
-
static const struct meson_pwm_data pwm_gxbb_ao_data = {
- .parent_names = pwm_gxbb_ao_parent_names,
- .num_parents = ARRAY_SIZE(pwm_gxbb_ao_parent_names),
-};
-
-static const char * const pwm_axg_ee_parent_names[] = {
- "xtal", "fclk_div5", "fclk_div4", "fclk_div3"
+ .parent_names = { "xtal", "clk81", NULL, NULL },
};
static const struct meson_pwm_data pwm_axg_ee_data = {
- .parent_names = pwm_axg_ee_parent_names,
- .num_parents = ARRAY_SIZE(pwm_axg_ee_parent_names),
-};
-
-static const char * const pwm_axg_ao_parent_names[] = {
- "xtal", "axg_ao_clk81", "fclk_div4", "fclk_div5"
+ .parent_names = { "xtal", "fclk_div5", "fclk_div4", "fclk_div3" },
};
static const struct meson_pwm_data pwm_axg_ao_data = {
- .parent_names = pwm_axg_ao_parent_names,
- .num_parents = ARRAY_SIZE(pwm_axg_ao_parent_names),
-};
-
-static const char * const pwm_g12a_ao_ab_parent_names[] = {
- "xtal", "g12a_ao_clk81", "fclk_div4", "fclk_div5"
+ .parent_names = { "xtal", "axg_ao_clk81", "fclk_div4", "fclk_div5" },
};
static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
- .parent_names = pwm_g12a_ao_ab_parent_names,
- .num_parents = ARRAY_SIZE(pwm_g12a_ao_ab_parent_names),
-};
-
-static const char * const pwm_g12a_ao_cd_parent_names[] = {
- "xtal", "g12a_ao_clk81",
+ .parent_names = { "xtal", "g12a_ao_clk81", "fclk_div4", "fclk_div5" },
};
static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
- .parent_names = pwm_g12a_ao_cd_parent_names,
- .num_parents = ARRAY_SIZE(pwm_g12a_ao_cd_parent_names),
+ .parent_names = { "xtal", "g12a_ao_clk81", NULL, NULL },
};
static const struct of_device_id meson_pwm_matches[] = {
@@ -432,20 +403,21 @@ static const struct of_device_id meson_pwm_matches[] = {
};
MODULE_DEVICE_TABLE(of, meson_pwm_matches);
-static int meson_pwm_init_channels(struct meson_pwm *meson)
+static int meson_pwm_init_channels(struct pwm_chip *chip)
{
- struct clk_parent_data mux_parent_data[MESON_MAX_MUX_PARENTS] = {};
- struct device *dev = meson->chip.dev;
+ struct meson_pwm *meson = to_meson_pwm(chip);
+ struct clk_parent_data mux_parent_data[MESON_NUM_MUX_PARENTS] = {};
+ struct device *dev = pwmchip_parent(chip);
unsigned int i;
char name[255];
int err;
- for (i = 0; i < meson->data->num_parents; i++) {
+ for (i = 0; i < MESON_NUM_MUX_PARENTS; i++) {
mux_parent_data[i].index = -1;
mux_parent_data[i].name = meson->data->parent_names[i];
}
- for (i = 0; i < meson->chip.npwm; i++) {
+ for (i = 0; i < chip->npwm; i++) {
struct meson_pwm_channel *channel = &meson->channels[i];
struct clk_parent_data div_parent = {}, gate_parent = {};
struct clk_init_data init = {};
@@ -456,7 +428,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson)
init.ops = &clk_mux_ops;
init.flags = 0;
init.parent_data = mux_parent_data;
- init.num_parents = meson->data->num_parents;
+ init.num_parents = MESON_NUM_MUX_PARENTS;
channel->mux.reg = meson->base + REG_MISC_AB;
channel->mux.shift =
@@ -525,29 +497,29 @@ static int meson_pwm_init_channels(struct meson_pwm *meson)
static int meson_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct meson_pwm *meson;
int err;
- meson = devm_kzalloc(&pdev->dev, sizeof(*meson), GFP_KERNEL);
- if (!meson)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, MESON_NUM_PWMS, sizeof(*meson));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ meson = to_meson_pwm(chip);
meson->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(meson->base))
return PTR_ERR(meson->base);
spin_lock_init(&meson->lock);
- meson->chip.dev = &pdev->dev;
- meson->chip.ops = &meson_pwm_ops;
- meson->chip.npwm = MESON_NUM_PWMS;
+ chip->ops = &meson_pwm_ops;
meson->data = of_device_get_match_data(&pdev->dev);
- err = meson_pwm_init_channels(meson);
+ err = meson_pwm_init_channels(chip);
if (err < 0)
return err;
- err = devm_pwmchip_add(&pdev->dev, &meson->chip);
+ err = devm_pwmchip_add(&pdev->dev, chip);
if (err < 0)
return dev_err_probe(&pdev->dev, err,
"failed to register PWM chip\n");
diff --git a/drivers/pwm/pwm-microchip-core.c b/drivers/pwm/pwm-microchip-core.c
index c0c53968f3e9..c1f2287b8e97 100644
--- a/drivers/pwm/pwm-microchip-core.c
+++ b/drivers/pwm/pwm-microchip-core.c
@@ -54,7 +54,6 @@
#define MCHPCOREPWM_TIMEOUT_MS 100u
struct mchp_core_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
struct mutex lock; /* protects the shared period */
@@ -65,7 +64,7 @@ struct mchp_core_pwm_chip {
static inline struct mchp_core_pwm_chip *to_mchp_core_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct mchp_core_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static void mchp_core_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -447,13 +446,15 @@ MODULE_DEVICE_TABLE(of, mchp_core_of_match);
static int mchp_core_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct mchp_core_pwm_chip *mchp_core_pwm;
struct resource *regs;
int ret;
- mchp_core_pwm = devm_kzalloc(&pdev->dev, sizeof(*mchp_core_pwm), GFP_KERNEL);
- if (!mchp_core_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 16, sizeof(*mchp_core_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ mchp_core_pwm = to_mchp_core_pwm(chip);
mchp_core_pwm->base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
if (IS_ERR(mchp_core_pwm->base))
@@ -470,9 +471,7 @@ static int mchp_core_pwm_probe(struct platform_device *pdev)
mutex_init(&mchp_core_pwm->lock);
- mchp_core_pwm->chip.dev = &pdev->dev;
- mchp_core_pwm->chip.ops = &mchp_core_pwm_ops;
- mchp_core_pwm->chip.npwm = 16;
+ chip->ops = &mchp_core_pwm_ops;
mchp_core_pwm->channel_enabled = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_EN(0));
mchp_core_pwm->channel_enabled |=
@@ -485,7 +484,7 @@ static int mchp_core_pwm_probe(struct platform_device *pdev)
writel_relaxed(1U, mchp_core_pwm->base + MCHPCOREPWM_SYNC_UPD);
mchp_core_pwm->update_timestamp = ktime_get();
- ret = devm_pwmchip_add(&pdev->dev, &mchp_core_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to add pwmchip\n");
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index a72f7be36996..bafd6b6195f6 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -42,7 +42,6 @@ struct mtk_pwm_data {
};
struct mtk_disp_pwm {
- struct pwm_chip chip;
const struct mtk_pwm_data *data;
struct clk *clk_main;
struct clk *clk_mm;
@@ -52,7 +51,7 @@ struct mtk_disp_pwm {
static inline struct mtk_disp_pwm *to_mtk_disp_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct mtk_disp_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static void mtk_disp_pwm_update_bits(struct mtk_disp_pwm *mdp, u32 offset,
@@ -91,14 +90,14 @@ static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!mdp->enabled) {
err = clk_prepare_enable(mdp->clk_main);
if (err < 0) {
- dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n",
+ dev_err(pwmchip_parent(chip), "Can't enable mdp->clk_main: %pe\n",
ERR_PTR(err));
return err;
}
err = clk_prepare_enable(mdp->clk_mm);
if (err < 0) {
- dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n",
+ dev_err(pwmchip_parent(chip), "Can't enable mdp->clk_mm: %pe\n",
ERR_PTR(err));
clk_disable_unprepare(mdp->clk_main);
return err;
@@ -181,13 +180,13 @@ static int mtk_disp_pwm_get_state(struct pwm_chip *chip,
err = clk_prepare_enable(mdp->clk_main);
if (err < 0) {
- dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
+ dev_err(pwmchip_parent(chip), "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
return err;
}
err = clk_prepare_enable(mdp->clk_mm);
if (err < 0) {
- dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
+ dev_err(pwmchip_parent(chip), "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
clk_disable_unprepare(mdp->clk_main);
return err;
}
@@ -231,12 +230,14 @@ static const struct pwm_ops mtk_disp_pwm_ops = {
static int mtk_disp_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct mtk_disp_pwm *mdp;
int ret;
- mdp = devm_kzalloc(&pdev->dev, sizeof(*mdp), GFP_KERNEL);
- if (!mdp)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*mdp));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ mdp = to_mtk_disp_pwm(chip);
mdp->data = of_device_get_match_data(&pdev->dev);
@@ -254,11 +255,9 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(mdp->clk_mm),
"Failed to get mm clock\n");
- mdp->chip.dev = &pdev->dev;
- mdp->chip.ops = &mtk_disp_pwm_ops;
- mdp->chip.npwm = 1;
+ chip->ops = &mtk_disp_pwm_ops;
- ret = devm_pwmchip_add(&pdev->dev, &mdp->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index 1b5e787d78f1..8cad214b1c29 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -37,12 +37,14 @@ static const u8 cdiv_shift[PERIOD_CDIV_MAX] = {
};
struct mxs_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
};
-#define to_mxs_pwm_chip(_chip) container_of(_chip, struct mxs_pwm_chip, chip)
+static inline struct mxs_pwm_chip *to_mxs_pwm_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static int mxs_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
@@ -120,12 +122,21 @@ static const struct pwm_ops mxs_pwm_ops = {
static int mxs_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct pwm_chip *chip;
struct mxs_pwm_chip *mxs;
+ u32 npwm;
int ret;
- mxs = devm_kzalloc(&pdev->dev, sizeof(*mxs), GFP_KERNEL);
- if (!mxs)
- return -ENOMEM;
+ ret = of_property_read_u32(np, "fsl,pwm-number", &npwm);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get pwm number: %d\n", ret);
+ return ret;
+ }
+
+ chip = devm_pwmchip_alloc(&pdev->dev, npwm, sizeof(*mxs));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ mxs = to_mxs_pwm_chip(chip);
mxs->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxs->base))
@@ -135,21 +146,14 @@ static int mxs_pwm_probe(struct platform_device *pdev)
if (IS_ERR(mxs->clk))
return PTR_ERR(mxs->clk);
- mxs->chip.dev = &pdev->dev;
- mxs->chip.ops = &mxs_pwm_ops;
-
- ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get pwm number: %d\n", ret);
- return ret;
- }
+ chip->ops = &mxs_pwm_ops;
/* FIXME: Only do this if the PWM isn't already running */
ret = stmp_reset_block(mxs->base);
if (ret)
return dev_err_probe(&pdev->dev, ret, "failed to reset PWM\n");
- ret = devm_pwmchip_add(&pdev->dev, &mxs->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add pwm chip %d\n", ret);
return ret;
diff --git a/drivers/pwm/pwm-ntxec.c b/drivers/pwm/pwm-ntxec.c
index 78606039eda2..28d1c2e5a98f 100644
--- a/drivers/pwm/pwm-ntxec.c
+++ b/drivers/pwm/pwm-ntxec.c
@@ -25,12 +25,11 @@
struct ntxec_pwm {
struct ntxec *ec;
- struct pwm_chip chip;
};
static struct ntxec_pwm *ntxec_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct ntxec_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
#define NTXEC_REG_AUTO_OFF_HI 0xa1
@@ -141,16 +140,13 @@ static int ntxec_pwm_probe(struct platform_device *pdev)
device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = ntxec_pwm_from_chip(chip);
priv->ec = ec;
-
- chip = &priv->chip;
- chip->dev = &pdev->dev;
chip->ops = &ntxec_pwm_ops;
- chip->npwm = 1;
return devm_pwmchip_add(&pdev->dev, chip);
}
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 496bd73d29fe..cd51c4a938f5 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -53,13 +53,11 @@
/**
* struct pwm_omap_dmtimer_chip - Structure representing a pwm chip
* corresponding to omap dmtimer.
- * @chip: PWM chip structure representing PWM controller
* @dm_timer: Pointer to omap dm timer.
* @pdata: Pointer to omap dm timer ops.
* @dm_timer_pdev: Pointer to omap dm timer platform device
*/
struct pwm_omap_dmtimer_chip {
- struct pwm_chip chip;
/* Mutex to protect pwm apply state */
struct omap_dm_timer *dm_timer;
const struct omap_dm_timer_ops *pdata;
@@ -69,7 +67,7 @@ struct pwm_omap_dmtimer_chip {
static inline struct pwm_omap_dmtimer_chip *
to_pwm_omap_dmtimer_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct pwm_omap_dmtimer_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
/**
@@ -155,7 +153,7 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
unsigned long clk_rate;
struct clk *fclk;
- dev_dbg(chip->dev, "requested duty cycle: %d ns, period: %d ns\n",
+ dev_dbg(pwmchip_parent(chip), "requested duty cycle: %d ns, period: %d ns\n",
duty_ns, period_ns);
if (duty_ns == pwm_get_duty_cycle(pwm) &&
@@ -164,17 +162,17 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
fclk = omap->pdata->get_fclk(omap->dm_timer);
if (!fclk) {
- dev_err(chip->dev, "invalid pmtimer fclk\n");
+ dev_err(pwmchip_parent(chip), "invalid pmtimer fclk\n");
return -EINVAL;
}
clk_rate = clk_get_rate(fclk);
if (!clk_rate) {
- dev_err(chip->dev, "invalid pmtimer fclk rate\n");
+ dev_err(pwmchip_parent(chip), "invalid pmtimer fclk rate\n");
return -EINVAL;
}
- dev_dbg(chip->dev, "clk rate: %luHz\n", clk_rate);
+ dev_dbg(pwmchip_parent(chip), "clk rate: %luHz\n", clk_rate);
/*
* Calculate the appropriate load and match values based on the
@@ -196,27 +194,27 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
duty_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, duty_ns);
if (period_cycles < 2) {
- dev_info(chip->dev,
+ dev_info(pwmchip_parent(chip),
"period %d ns too short for clock rate %lu Hz\n",
period_ns, clk_rate);
return -EINVAL;
}
if (duty_cycles < 1) {
- dev_dbg(chip->dev,
+ dev_dbg(pwmchip_parent(chip),
"duty cycle %d ns is too short for clock rate %lu Hz\n",
duty_ns, clk_rate);
- dev_dbg(chip->dev, "using minimum of 1 clock cycle\n");
+ dev_dbg(pwmchip_parent(chip), "using minimum of 1 clock cycle\n");
duty_cycles = 1;
} else if (duty_cycles >= period_cycles) {
- dev_dbg(chip->dev,
+ dev_dbg(pwmchip_parent(chip),
"duty cycle %d ns is too long for period %d ns at clock rate %lu Hz\n",
duty_ns, period_ns, clk_rate);
- dev_dbg(chip->dev, "using maximum of 1 clock cycle less than period\n");
+ dev_dbg(pwmchip_parent(chip), "using maximum of 1 clock cycle less than period\n");
duty_cycles = period_cycles - 1;
}
- dev_dbg(chip->dev, "effective duty cycle: %lld ns, period: %lld ns\n",
+ dev_dbg(pwmchip_parent(chip), "effective duty cycle: %lld ns, period: %lld ns\n",
DIV_ROUND_CLOSEST_ULL((u64)NSEC_PER_SEC * duty_cycles,
clk_rate),
DIV_ROUND_CLOSEST_ULL((u64)NSEC_PER_SEC * period_cycles,
@@ -228,7 +226,7 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
omap->pdata->set_load(omap->dm_timer, load_value);
omap->pdata->set_match(omap->dm_timer, true, match_value);
- dev_dbg(chip->dev, "load value: %#08x (%d), match value: %#08x (%d)\n",
+ dev_dbg(pwmchip_parent(chip), "load value: %#08x (%d), match value: %#08x (%d)\n",
load_value, load_value, match_value, match_value);
return 0;
@@ -311,6 +309,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
struct dmtimer_platform_data *timer_pdata;
const struct omap_dm_timer_ops *pdata;
struct platform_device *timer_pdev;
+ struct pwm_chip *chip;
struct pwm_omap_dmtimer_chip *omap;
struct omap_dm_timer *dm_timer;
struct device_node *timer;
@@ -368,11 +367,12 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
goto err_request_timer;
}
- omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
- if (!omap) {
- ret = -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*omap));
+ if (IS_ERR(chip)) {
+ ret = PTR_ERR(chip);
goto err_alloc_omap;
}
+ omap = to_pwm_omap_dmtimer_chip(chip);
omap->pdata = pdata;
omap->dm_timer = dm_timer;
@@ -392,11 +392,9 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
if (!of_property_read_u32(pdev->dev.of_node, "ti,clock-source", &v))
omap->pdata->set_source(omap->dm_timer, v);
- omap->chip.dev = &pdev->dev;
- omap->chip.ops = &pwm_omap_dmtimer_ops;
- omap->chip.npwm = 1;
+ chip->ops = &pwm_omap_dmtimer_ops;
- ret = pwmchip_add(&omap->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM\n");
goto err_pwmchip_add;
@@ -404,7 +402,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
of_node_put(timer);
- platform_set_drvdata(pdev, omap);
+ platform_set_drvdata(pdev, chip);
return 0;
@@ -432,9 +430,10 @@ err_find_timer_pdev:
static void pwm_omap_dmtimer_remove(struct platform_device *pdev)
{
- struct pwm_omap_dmtimer_chip *omap = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
- pwmchip_remove(&omap->chip);
+ pwmchip_remove(chip);
if (pm_runtime_active(&omap->dm_timer_pdev->dev))
omap->pdata->stop(omap->dm_timer);
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index e79b1de8c4d8..c5da2a6ed846 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -76,7 +76,6 @@
#define REG_OFF_L(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_OFF_L : LED_N_OFF_L((C)))
struct pca9685 {
- struct pwm_chip chip;
struct regmap *regmap;
struct mutex lock;
DECLARE_BITMAP(pwms_enabled, PCA9685_MAXCHAN + 1);
@@ -88,7 +87,7 @@ struct pca9685 {
static inline struct pca9685 *to_pca(struct pwm_chip *chip)
{
- return container_of(chip, struct pca9685, chip);
+ return pwmchip_get_drvdata(chip);
}
/* This function is supposed to be called with the lock mutex held */
@@ -107,9 +106,10 @@ static bool pca9685_prescaler_can_change(struct pca9685 *pca, int channel)
return test_bit(channel, pca->pwms_enabled);
}
-static int pca9685_read_reg(struct pca9685 *pca, unsigned int reg, unsigned int *val)
+static int pca9685_read_reg(struct pwm_chip *chip, unsigned int reg, unsigned int *val)
{
- struct device *dev = pca->chip.dev;
+ struct pca9685 *pca = to_pca(chip);
+ struct device *dev = pwmchip_parent(chip);
int err;
err = regmap_read(pca->regmap, reg, val);
@@ -119,9 +119,10 @@ static int pca9685_read_reg(struct pca9685 *pca, unsigned int reg, unsigned int
return err;
}
-static int pca9685_write_reg(struct pca9685 *pca, unsigned int reg, unsigned int val)
+static int pca9685_write_reg(struct pwm_chip *chip, unsigned int reg, unsigned int val)
{
- struct device *dev = pca->chip.dev;
+ struct pca9685 *pca = to_pca(chip);
+ struct device *dev = pwmchip_parent(chip);
int err;
err = regmap_write(pca->regmap, reg, val);
@@ -132,19 +133,19 @@ static int pca9685_write_reg(struct pca9685 *pca, unsigned int reg, unsigned int
}
/* Helper function to set the duty cycle ratio to duty/4096 (e.g. duty=2048 -> 50%) */
-static void pca9685_pwm_set_duty(struct pca9685 *pca, int channel, unsigned int duty)
+static void pca9685_pwm_set_duty(struct pwm_chip *chip, int channel, unsigned int duty)
{
- struct pwm_device *pwm = &pca->chip.pwms[channel];
+ struct pwm_device *pwm = &chip->pwms[channel];
unsigned int on, off;
if (duty == 0) {
/* Set the full OFF bit, which has the highest precedence */
- pca9685_write_reg(pca, REG_OFF_H(channel), LED_FULL);
+ pca9685_write_reg(chip, REG_OFF_H(channel), LED_FULL);
return;
} else if (duty >= PCA9685_COUNTER_RANGE) {
/* Set the full ON bit and clear the full OFF bit */
- pca9685_write_reg(pca, REG_ON_H(channel), LED_FULL);
- pca9685_write_reg(pca, REG_OFF_H(channel), 0);
+ pca9685_write_reg(chip, REG_ON_H(channel), LED_FULL);
+ pca9685_write_reg(chip, REG_OFF_H(channel), 0);
return;
}
@@ -164,16 +165,16 @@ static void pca9685_pwm_set_duty(struct pca9685 *pca, int channel, unsigned int
off = (on + duty) % PCA9685_COUNTER_RANGE;
/* Set ON time (clears full ON bit) */
- pca9685_write_reg(pca, REG_ON_L(channel), on & 0xff);
- pca9685_write_reg(pca, REG_ON_H(channel), (on >> 8) & 0xf);
+ pca9685_write_reg(chip, REG_ON_L(channel), on & 0xff);
+ pca9685_write_reg(chip, REG_ON_H(channel), (on >> 8) & 0xf);
/* Set OFF time (clears full OFF bit) */
- pca9685_write_reg(pca, REG_OFF_L(channel), off & 0xff);
- pca9685_write_reg(pca, REG_OFF_H(channel), (off >> 8) & 0xf);
+ pca9685_write_reg(chip, REG_OFF_L(channel), off & 0xff);
+ pca9685_write_reg(chip, REG_OFF_H(channel), (off >> 8) & 0xf);
}
-static unsigned int pca9685_pwm_get_duty(struct pca9685 *pca, int channel)
+static unsigned int pca9685_pwm_get_duty(struct pwm_chip *chip, int channel)
{
- struct pwm_device *pwm = &pca->chip.pwms[channel];
+ struct pwm_device *pwm = &chip->pwms[channel];
unsigned int off = 0, on = 0, val = 0;
if (WARN_ON(channel >= PCA9685_MAXCHAN)) {
@@ -181,25 +182,25 @@ static unsigned int pca9685_pwm_get_duty(struct pca9685 *pca, int channel)
return 0;
}
- pca9685_read_reg(pca, LED_N_OFF_H(channel), &off);
+ pca9685_read_reg(chip, LED_N_OFF_H(channel), &off);
if (off & LED_FULL) {
/* Full OFF bit is set */
return 0;
}
- pca9685_read_reg(pca, LED_N_ON_H(channel), &on);
+ pca9685_read_reg(chip, LED_N_ON_H(channel), &on);
if (on & LED_FULL) {
/* Full ON bit is set */
return PCA9685_COUNTER_RANGE;
}
- pca9685_read_reg(pca, LED_N_OFF_L(channel), &val);
+ pca9685_read_reg(chip, LED_N_OFF_L(channel), &val);
off = ((off & 0xf) << 8) | (val & 0xff);
if (!pwm->state.usage_power)
return off;
/* Read ON register to calculate duty cycle of staggered output */
- if (pca9685_read_reg(pca, LED_N_ON_L(channel), &val)) {
+ if (pca9685_read_reg(chip, LED_N_ON_L(channel), &val)) {
/* Reset val to 0 in case reading LED_N_ON_L failed */
val = 0;
}
@@ -247,35 +248,37 @@ static void pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
{
- struct pca9685 *pca = gpiochip_get_data(gpio);
+ struct pwm_chip *chip = gpiochip_get_data(gpio);
+ struct pca9685 *pca = to_pca(chip);
if (pca9685_pwm_test_and_set_inuse(pca, offset))
return -EBUSY;
- pm_runtime_get_sync(pca->chip.dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
return 0;
}
static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset)
{
- struct pca9685 *pca = gpiochip_get_data(gpio);
+ struct pwm_chip *chip = gpiochip_get_data(gpio);
- return pca9685_pwm_get_duty(pca, offset) != 0;
+ return pca9685_pwm_get_duty(chip, offset) != 0;
}
static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
int value)
{
- struct pca9685 *pca = gpiochip_get_data(gpio);
+ struct pwm_chip *chip = gpiochip_get_data(gpio);
- pca9685_pwm_set_duty(pca, offset, value ? PCA9685_COUNTER_RANGE : 0);
+ pca9685_pwm_set_duty(chip, offset, value ? PCA9685_COUNTER_RANGE : 0);
}
static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
{
- struct pca9685 *pca = gpiochip_get_data(gpio);
+ struct pwm_chip *chip = gpiochip_get_data(gpio);
+ struct pca9685 *pca = to_pca(chip);
- pca9685_pwm_set_duty(pca, offset, 0);
- pm_runtime_put(pca->chip.dev);
+ pca9685_pwm_set_duty(chip, offset, 0);
+ pm_runtime_put(pwmchip_parent(chip));
pca9685_pwm_clear_inuse(pca, offset);
}
@@ -306,9 +309,10 @@ static int pca9685_pwm_gpio_direction_output(struct gpio_chip *gpio,
* expose a GPIO chip here which can exclusively take over the underlying
* PWM channel.
*/
-static int pca9685_pwm_gpio_probe(struct pca9685 *pca)
+static int pca9685_pwm_gpio_probe(struct pwm_chip *chip)
{
- struct device *dev = pca->chip.dev;
+ struct pca9685 *pca = to_pca(chip);
+ struct device *dev = pwmchip_parent(chip);
pca->gpio.label = dev_name(dev);
pca->gpio.parent = dev;
@@ -323,7 +327,7 @@ static int pca9685_pwm_gpio_probe(struct pca9685 *pca)
pca->gpio.ngpio = PCA9685_MAXCHAN;
pca->gpio.can_sleep = true;
- return devm_gpiochip_add_data(dev, &pca->gpio, pca);
+ return devm_gpiochip_add_data(dev, &pca->gpio, chip);
}
#else
static inline bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca,
@@ -337,15 +341,16 @@ pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
{
}
-static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca)
+static inline int pca9685_pwm_gpio_probe(struct pwm_chip *chip)
{
return 0;
}
#endif
-static void pca9685_set_sleep_mode(struct pca9685 *pca, bool enable)
+static void pca9685_set_sleep_mode(struct pwm_chip *chip, bool enable)
{
- struct device *dev = pca->chip.dev;
+ struct device *dev = pwmchip_parent(chip);
+ struct pca9685 *pca = to_pca(chip);
int err = regmap_update_bits(pca->regmap, PCA9685_MODE1,
MODE1_SLEEP, enable ? MODE1_SLEEP : 0);
if (err) {
@@ -373,19 +378,19 @@ static int __pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
prescale = DIV_ROUND_CLOSEST_ULL(PCA9685_OSC_CLOCK_MHZ * state->period,
PCA9685_COUNTER_RANGE * 1000) - 1;
if (prescale < PCA9685_PRESCALE_MIN || prescale > PCA9685_PRESCALE_MAX) {
- dev_err(chip->dev, "pwm not changed: period out of bounds!\n");
+ dev_err(pwmchip_parent(chip), "pwm not changed: period out of bounds!\n");
return -EINVAL;
}
if (!state->enabled) {
- pca9685_pwm_set_duty(pca, pwm->hwpwm, 0);
+ pca9685_pwm_set_duty(chip, pwm->hwpwm, 0);
return 0;
}
- pca9685_read_reg(pca, PCA9685_PRESCALE, &val);
+ pca9685_read_reg(chip, PCA9685_PRESCALE, &val);
if (prescale != val) {
if (!pca9685_prescaler_can_change(pca, pwm->hwpwm)) {
- dev_err(chip->dev,
+ dev_err(pwmchip_parent(chip),
"pwm not changed: periods of enabled pwms must match!\n");
return -EBUSY;
}
@@ -397,18 +402,18 @@ static int __pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* state is guaranteed active here.
*/
/* Put chip into sleep mode */
- pca9685_set_sleep_mode(pca, true);
+ pca9685_set_sleep_mode(chip, true);
/* Change the chip-wide output frequency */
- pca9685_write_reg(pca, PCA9685_PRESCALE, prescale);
+ pca9685_write_reg(chip, PCA9685_PRESCALE, prescale);
/* Wake the chip up */
- pca9685_set_sleep_mode(pca, false);
+ pca9685_set_sleep_mode(chip, false);
}
duty = PCA9685_COUNTER_RANGE * state->duty_cycle;
duty = DIV_ROUND_UP_ULL(duty, state->period);
- pca9685_pwm_set_duty(pca, pwm->hwpwm, duty);
+ pca9685_pwm_set_duty(chip, pwm->hwpwm, duty);
return 0;
}
@@ -434,12 +439,11 @@ static int pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static int pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
- struct pca9685 *pca = to_pca(chip);
unsigned long long duty;
unsigned int val = 0;
/* Calculate (chip-wide) period from prescale value */
- pca9685_read_reg(pca, PCA9685_PRESCALE, &val);
+ pca9685_read_reg(chip, PCA9685_PRESCALE, &val);
/*
* PCA9685_OSC_CLOCK_MHZ is 25, i.e. an integer divider of 1000.
* The following calculation is therefore only a multiplication
@@ -462,7 +466,7 @@ static int pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
}
state->enabled = true;
- duty = pca9685_pwm_get_duty(pca, pwm->hwpwm);
+ duty = pca9685_pwm_get_duty(chip, pwm->hwpwm);
state->duty_cycle = DIV_ROUND_DOWN_ULL(duty * state->period, PCA9685_COUNTER_RANGE);
return 0;
@@ -482,7 +486,7 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_unlock(&pca->lock);
}
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
return 0;
}
@@ -492,11 +496,11 @@ static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
struct pca9685 *pca = to_pca(chip);
mutex_lock(&pca->lock);
- pca9685_pwm_set_duty(pca, pwm->hwpwm, 0);
+ pca9685_pwm_set_duty(chip, pwm->hwpwm, 0);
clear_bit(pwm->hwpwm, pca->pwms_enabled);
mutex_unlock(&pca->lock);
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
pca9685_pwm_clear_inuse(pca, pwm->hwpwm);
}
@@ -516,13 +520,16 @@ static const struct regmap_config pca9685_regmap_i2c_config = {
static int pca9685_pwm_probe(struct i2c_client *client)
{
+ struct pwm_chip *chip;
struct pca9685 *pca;
unsigned int reg;
int ret;
- pca = devm_kzalloc(&client->dev, sizeof(*pca), GFP_KERNEL);
- if (!pca)
- return -ENOMEM;
+ /* Add an extra channel for ALL_LED */
+ chip = devm_pwmchip_alloc(&client->dev, PCA9685_MAXCHAN + 1, sizeof(*pca));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pca = to_pca(chip);
pca->regmap = devm_regmap_init_i2c(client, &pca9685_regmap_i2c_config);
if (IS_ERR(pca->regmap)) {
@@ -532,11 +539,11 @@ static int pca9685_pwm_probe(struct i2c_client *client)
return ret;
}
- i2c_set_clientdata(client, pca);
+ i2c_set_clientdata(client, chip);
mutex_init(&pca->lock);
- ret = pca9685_read_reg(pca, PCA9685_MODE2, &reg);
+ ret = pca9685_read_reg(chip, PCA9685_MODE2, &reg);
if (ret)
return ret;
@@ -550,34 +557,30 @@ static int pca9685_pwm_probe(struct i2c_client *client)
else
reg |= MODE2_OUTDRV;
- ret = pca9685_write_reg(pca, PCA9685_MODE2, reg);
+ ret = pca9685_write_reg(chip, PCA9685_MODE2, reg);
if (ret)
return ret;
/* Disable all LED ALLCALL and SUBx addresses to avoid bus collisions */
- pca9685_read_reg(pca, PCA9685_MODE1, &reg);
+ pca9685_read_reg(chip, PCA9685_MODE1, &reg);
reg &= ~(MODE1_ALLCALL | MODE1_SUB1 | MODE1_SUB2 | MODE1_SUB3);
- pca9685_write_reg(pca, PCA9685_MODE1, reg);
+ pca9685_write_reg(chip, PCA9685_MODE1, reg);
/* Reset OFF/ON registers to POR default */
- pca9685_write_reg(pca, PCA9685_ALL_LED_OFF_L, 0);
- pca9685_write_reg(pca, PCA9685_ALL_LED_OFF_H, LED_FULL);
- pca9685_write_reg(pca, PCA9685_ALL_LED_ON_L, 0);
- pca9685_write_reg(pca, PCA9685_ALL_LED_ON_H, LED_FULL);
-
- pca->chip.ops = &pca9685_pwm_ops;
- /* Add an extra channel for ALL_LED */
- pca->chip.npwm = PCA9685_MAXCHAN + 1;
+ pca9685_write_reg(chip, PCA9685_ALL_LED_OFF_L, 0);
+ pca9685_write_reg(chip, PCA9685_ALL_LED_OFF_H, LED_FULL);
+ pca9685_write_reg(chip, PCA9685_ALL_LED_ON_L, 0);
+ pca9685_write_reg(chip, PCA9685_ALL_LED_ON_H, LED_FULL);
- pca->chip.dev = &client->dev;
+ chip->ops = &pca9685_pwm_ops;
- ret = pwmchip_add(&pca->chip);
+ ret = pwmchip_add(chip);
if (ret < 0)
return ret;
- ret = pca9685_pwm_gpio_probe(pca);
+ ret = pca9685_pwm_gpio_probe(chip);
if (ret < 0) {
- pwmchip_remove(&pca->chip);
+ pwmchip_remove(chip);
return ret;
}
@@ -588,11 +591,11 @@ static int pca9685_pwm_probe(struct i2c_client *client)
* Although the chip comes out of power-up in the sleep state,
* we force it to sleep in case it was woken up before
*/
- pca9685_set_sleep_mode(pca, true);
+ pca9685_set_sleep_mode(chip, true);
pm_runtime_set_suspended(&client->dev);
} else {
/* Wake the chip up if runtime PM is disabled */
- pca9685_set_sleep_mode(pca, false);
+ pca9685_set_sleep_mode(chip, false);
}
return 0;
@@ -600,13 +603,13 @@ static int pca9685_pwm_probe(struct i2c_client *client)
static void pca9685_pwm_remove(struct i2c_client *client)
{
- struct pca9685 *pca = i2c_get_clientdata(client);
+ struct pwm_chip *chip = i2c_get_clientdata(client);
- pwmchip_remove(&pca->chip);
+ pwmchip_remove(chip);
if (!pm_runtime_enabled(&client->dev)) {
/* Put chip in sleep state if runtime PM is disabled */
- pca9685_set_sleep_mode(pca, true);
+ pca9685_set_sleep_mode(chip, true);
}
pm_runtime_disable(&client->dev);
@@ -615,18 +618,18 @@ static void pca9685_pwm_remove(struct i2c_client *client)
static int __maybe_unused pca9685_pwm_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct pca9685 *pca = i2c_get_clientdata(client);
+ struct pwm_chip *chip = i2c_get_clientdata(client);
- pca9685_set_sleep_mode(pca, true);
+ pca9685_set_sleep_mode(chip, true);
return 0;
}
static int __maybe_unused pca9685_pwm_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct pca9685 *pca = i2c_get_clientdata(client);
+ struct pwm_chip *chip = i2c_get_clientdata(client);
- pca9685_set_sleep_mode(pca, false);
+ pca9685_set_sleep_mode(chip, false);
return 0;
}
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index 76685f926c75..bb7bb48b2e6d 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -49,7 +49,6 @@ MODULE_DEVICE_TABLE(platform, pwm_id_table);
#define PWMDCR_FD (1 << 10)
struct pxa_pwm_chip {
- struct pwm_chip chip;
struct device *dev;
struct clk *clk;
@@ -58,7 +57,7 @@ struct pxa_pwm_chip {
static inline struct pxa_pwm_chip *to_pxa_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct pxa_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -159,6 +158,7 @@ MODULE_DEVICE_TABLE(of, pwm_of_match);
static int pwm_probe(struct platform_device *pdev)
{
const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct pwm_chip *chip;
struct pxa_pwm_chip *pc;
int ret = 0;
@@ -168,28 +168,27 @@ static int pwm_probe(struct platform_device *pdev)
if (id == NULL)
return -EINVAL;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (pc == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev,
+ (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1,
+ sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_pxa_pwm_chip(chip);
pc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pc->clk))
return PTR_ERR(pc->clk);
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &pxa_pwm_ops;
- pc->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
+ chip->ops = &pxa_pwm_ops;
- if (IS_ENABLED(CONFIG_OF)) {
- pc->chip.of_xlate = of_pwm_single_xlate;
- pc->chip.of_pwm_n_cells = 1;
- }
+ if (IS_ENABLED(CONFIG_OF))
+ chip->of_xlate = of_pwm_single_xlate;
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
- ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
diff --git a/drivers/pwm/pwm-raspberrypi-poe.c b/drivers/pwm/pwm-raspberrypi-poe.c
index 1ad814fdec6b..8921e7ea2cea 100644
--- a/drivers/pwm/pwm-raspberrypi-poe.c
+++ b/drivers/pwm/pwm-raspberrypi-poe.c
@@ -27,7 +27,6 @@
struct raspberrypi_pwm {
struct rpi_firmware *firmware;
- struct pwm_chip chip;
unsigned int duty_cycle;
};
@@ -40,7 +39,7 @@ struct raspberrypi_pwm_prop {
static inline
struct raspberrypi_pwm *raspberrypi_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct raspberrypi_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int raspberrypi_pwm_set_property(struct rpi_firmware *firmware,
@@ -122,7 +121,7 @@ static int raspberrypi_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ret = raspberrypi_pwm_set_property(rpipwm->firmware, RPI_PWM_CUR_DUTY_REG,
duty_cycle);
if (ret) {
- dev_err(chip->dev, "Failed to set duty cycle: %pe\n",
+ dev_err(pwmchip_parent(chip), "Failed to set duty cycle: %pe\n",
ERR_PTR(ret));
return ret;
}
@@ -142,6 +141,7 @@ static int raspberrypi_pwm_probe(struct platform_device *pdev)
struct device_node *firmware_node;
struct device *dev = &pdev->dev;
struct rpi_firmware *firmware;
+ struct pwm_chip *chip;
struct raspberrypi_pwm *rpipwm;
int ret;
@@ -157,14 +157,14 @@ static int raspberrypi_pwm_probe(struct platform_device *pdev)
return dev_err_probe(dev, -EPROBE_DEFER,
"Failed to get firmware handle\n");
- rpipwm = devm_kzalloc(&pdev->dev, sizeof(*rpipwm), GFP_KERNEL);
- if (!rpipwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, RASPBERRYPI_FIRMWARE_PWM_NUM,
+ sizeof(*rpipwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ rpipwm = raspberrypi_pwm_from_chip(chip);
rpipwm->firmware = firmware;
- rpipwm->chip.dev = dev;
- rpipwm->chip.ops = &raspberrypi_pwm_ops;
- rpipwm->chip.npwm = RASPBERRYPI_FIRMWARE_PWM_NUM;
+ chip->ops = &raspberrypi_pwm_ops;
ret = raspberrypi_pwm_get_property(rpipwm->firmware, RPI_PWM_CUR_DUTY_REG,
&rpipwm->duty_cycle);
@@ -173,7 +173,7 @@ static int raspberrypi_pwm_probe(struct platform_device *pdev)
return ret;
}
- return devm_pwmchip_add(dev, &rpipwm->chip);
+ return devm_pwmchip_add(dev, chip);
}
static const struct of_device_id raspberrypi_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 13269f55fccf..4cfecd88ede0 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -38,14 +38,13 @@
#define RCAR_PWMCNT_PH0_SHIFT 0
struct rcar_pwm_chip {
- struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
static inline struct rcar_pwm_chip *to_rcar_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct rcar_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static void rcar_pwm_write(struct rcar_pwm_chip *rp, u32 data,
@@ -132,12 +131,12 @@ static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, int duty_ns,
static int rcar_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- return pm_runtime_get_sync(chip->dev);
+ return pm_runtime_get_sync(pwmchip_parent(chip));
}
static void rcar_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
}
static int rcar_pwm_enable(struct rcar_pwm_chip *rp)
@@ -202,12 +201,14 @@ static const struct pwm_ops rcar_pwm_ops = {
static int rcar_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct rcar_pwm_chip *rcar_pwm;
int ret;
- rcar_pwm = devm_kzalloc(&pdev->dev, sizeof(*rcar_pwm), GFP_KERNEL);
- if (rcar_pwm == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*rcar_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ rcar_pwm = to_rcar_pwm_chip(chip);
rcar_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rcar_pwm->base))
@@ -219,15 +220,13 @@ static int rcar_pwm_probe(struct platform_device *pdev)
return PTR_ERR(rcar_pwm->clk);
}
- platform_set_drvdata(pdev, rcar_pwm);
+ chip->ops = &rcar_pwm_ops;
- rcar_pwm->chip.dev = &pdev->dev;
- rcar_pwm->chip.ops = &rcar_pwm_ops;
- rcar_pwm->chip.npwm = 1;
+ platform_set_drvdata(pdev, chip);
pm_runtime_enable(&pdev->dev);
- ret = pwmchip_add(&rcar_pwm->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM chip: %d\n", ret);
pm_runtime_disable(&pdev->dev);
@@ -239,9 +238,9 @@ static int rcar_pwm_probe(struct platform_device *pdev)
static void rcar_pwm_remove(struct platform_device *pdev)
{
- struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
- pwmchip_remove(&rcar_pwm->chip);
+ pwmchip_remove(chip);
pm_runtime_disable(&pdev->dev);
}
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index 28265fdfc92a..2196080b4177 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -79,7 +79,6 @@ struct tpu_pwm_device {
struct tpu_device {
struct platform_device *pdev;
- struct pwm_chip chip;
spinlock_t lock;
void __iomem *base;
@@ -87,7 +86,10 @@ struct tpu_device {
struct tpu_pwm_device tpd[TPU_CHANNEL_MAX];
};
-#define to_tpu_device(c) container_of(c, struct tpu_device, chip)
+static inline struct tpu_device *to_tpu_device(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static void tpu_pwm_write(struct tpu_pwm_device *tpd, int reg_nr, u16 value)
{
@@ -438,12 +440,14 @@ static const struct pwm_ops tpu_pwm_ops = {
static int tpu_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct tpu_device *tpu;
int ret;
- tpu = devm_kzalloc(&pdev->dev, sizeof(*tpu), GFP_KERNEL);
- if (tpu == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, TPU_CHANNEL_MAX, sizeof(*tpu));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ tpu = to_tpu_device(chip);
spin_lock_init(&tpu->lock);
tpu->pdev = pdev;
@@ -460,15 +464,13 @@ static int tpu_probe(struct platform_device *pdev)
/* Initialize and register the device. */
platform_set_drvdata(pdev, tpu);
- tpu->chip.dev = &pdev->dev;
- tpu->chip.ops = &tpu_pwm_ops;
- tpu->chip.npwm = TPU_CHANNEL_MAX;
+ chip->ops = &tpu_pwm_ops;
ret = devm_pm_runtime_enable(&pdev->dev);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to enable runtime PM\n");
- ret = devm_pwmchip_add(&pdev->dev, &tpu->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to register PWM chip\n");
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index a7c647e37837..0fa7575dbb54 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -30,7 +30,6 @@
#define PWM_LP_DISABLE (0 << 8)
struct rockchip_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
struct clk *pclk;
const struct rockchip_pwm_data *data;
@@ -54,7 +53,7 @@ struct rockchip_pwm_data {
static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct rockchip_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int rockchip_pwm_get_state(struct pwm_chip *chip,
@@ -296,14 +295,16 @@ MODULE_DEVICE_TABLE(of, rockchip_pwm_dt_ids);
static int rockchip_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct rockchip_pwm_chip *pc;
u32 enable_conf, ctrl;
bool enabled;
int ret, count;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_rockchip_pwm_chip(chip);
pc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->base))
@@ -337,18 +338,16 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
goto err_clk;
}
- platform_set_drvdata(pdev, pc);
+ platform_set_drvdata(pdev, chip);
pc->data = device_get_match_data(&pdev->dev);
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &rockchip_pwm_ops;
- pc->chip.npwm = 1;
+ chip->ops = &rockchip_pwm_ops;
enable_conf = pc->data->enable_conf;
ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
enabled = (ctrl & enable_conf) == enable_conf;
- ret = pwmchip_add(&pc->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
goto err_pclk;
@@ -372,9 +371,10 @@ err_clk:
static void rockchip_pwm_remove(struct platform_device *pdev)
{
- struct rockchip_pwm_chip *pc = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
- pwmchip_remove(&pc->chip);
+ pwmchip_remove(chip);
clk_unprepare(pc->pclk);
clk_unprepare(pc->clk);
diff --git a/drivers/pwm/pwm-rz-mtu3.c b/drivers/pwm/pwm-rz-mtu3.c
index bdda315b3bd3..ab39bd37edaf 100644
--- a/drivers/pwm/pwm-rz-mtu3.c
+++ b/drivers/pwm/pwm-rz-mtu3.c
@@ -61,7 +61,6 @@ struct rz_mtu3_pwm_channel {
/**
* struct rz_mtu3_pwm_chip - MTU3 pwm private data
*
- * @chip: MTU3 pwm chip data
* @clk: MTU3 module clock
* @lock: Lock to prevent concurrent access for usage count
* @rate: MTU3 clock rate
@@ -72,7 +71,6 @@ struct rz_mtu3_pwm_channel {
*/
struct rz_mtu3_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
struct mutex lock;
unsigned long rate;
@@ -92,7 +90,7 @@ static const struct rz_mtu3_channel_io_map channel_map[] = {
static inline struct rz_mtu3_pwm_chip *to_rz_mtu3_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct rz_mtu3_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static void rz_mtu3_pwm_read_tgr_registers(struct rz_mtu3_pwm_channel *priv,
@@ -211,15 +209,15 @@ static void rz_mtu3_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_unlock(&rz_mtu3_pwm->lock);
}
-static int rz_mtu3_pwm_enable(struct rz_mtu3_pwm_chip *rz_mtu3_pwm,
- struct pwm_device *pwm)
+static int rz_mtu3_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
struct rz_mtu3_pwm_channel *priv;
u32 ch;
u8 val;
int rc;
- rc = pm_runtime_resume_and_get(rz_mtu3_pwm->chip.dev);
+ rc = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (rc)
return rc;
@@ -243,9 +241,9 @@ static int rz_mtu3_pwm_enable(struct rz_mtu3_pwm_chip *rz_mtu3_pwm,
return 0;
}
-static void rz_mtu3_pwm_disable(struct rz_mtu3_pwm_chip *rz_mtu3_pwm,
- struct pwm_device *pwm)
+static void rz_mtu3_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
struct rz_mtu3_pwm_channel *priv;
u32 ch;
@@ -265,7 +263,7 @@ static void rz_mtu3_pwm_disable(struct rz_mtu3_pwm_chip *rz_mtu3_pwm,
mutex_unlock(&rz_mtu3_pwm->lock);
- pm_runtime_put_sync(rz_mtu3_pwm->chip.dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
static int rz_mtu3_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -274,7 +272,7 @@ static int rz_mtu3_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
int rc;
- rc = pm_runtime_resume_and_get(chip->dev);
+ rc = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (rc)
return rc;
@@ -307,7 +305,7 @@ static int rz_mtu3_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
}
state->polarity = PWM_POLARITY_NORMAL;
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
return 0;
}
@@ -362,7 +360,7 @@ static int rz_mtu3_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (!pwm->state.enabled) {
int rc;
- rc = pm_runtime_resume_and_get(chip->dev);
+ rc = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (rc)
return rc;
}
@@ -399,7 +397,7 @@ static int rz_mtu3_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
/* If the PWM is not enabled, turn the clock off again to save power. */
if (!pwm->state.enabled)
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
return 0;
}
@@ -416,7 +414,7 @@ static int rz_mtu3_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!state->enabled) {
if (enabled)
- rz_mtu3_pwm_disable(rz_mtu3_pwm, pwm);
+ rz_mtu3_pwm_disable(chip, pwm);
return 0;
}
@@ -428,7 +426,7 @@ static int rz_mtu3_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
if (!enabled)
- ret = rz_mtu3_pwm_enable(rz_mtu3_pwm, pwm);
+ ret = rz_mtu3_pwm_enable(chip, pwm);
return ret;
}
@@ -442,7 +440,8 @@ static const struct pwm_ops rz_mtu3_pwm_ops = {
static int rz_mtu3_pwm_pm_runtime_suspend(struct device *dev)
{
- struct rz_mtu3_pwm_chip *rz_mtu3_pwm = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
clk_disable_unprepare(rz_mtu3_pwm->clk);
@@ -451,7 +450,8 @@ static int rz_mtu3_pwm_pm_runtime_suspend(struct device *dev)
static int rz_mtu3_pwm_pm_runtime_resume(struct device *dev)
{
- struct rz_mtu3_pwm_chip *rz_mtu3_pwm = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
return clk_prepare_enable(rz_mtu3_pwm->clk);
}
@@ -462,24 +462,28 @@ static DEFINE_RUNTIME_DEV_PM_OPS(rz_mtu3_pwm_pm_ops,
static void rz_mtu3_pwm_pm_disable(void *data)
{
- struct rz_mtu3_pwm_chip *rz_mtu3_pwm = data;
+ struct pwm_chip *chip = data;
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
clk_rate_exclusive_put(rz_mtu3_pwm->clk);
- pm_runtime_disable(rz_mtu3_pwm->chip.dev);
- pm_runtime_set_suspended(rz_mtu3_pwm->chip.dev);
+ pm_runtime_disable(pwmchip_parent(chip));
+ pm_runtime_set_suspended(pwmchip_parent(chip));
}
static int rz_mtu3_pwm_probe(struct platform_device *pdev)
{
struct rz_mtu3 *parent_ddata = dev_get_drvdata(pdev->dev.parent);
struct rz_mtu3_pwm_chip *rz_mtu3_pwm;
+ struct pwm_chip *chip;
struct device *dev = &pdev->dev;
unsigned int i, j = 0;
int ret;
- rz_mtu3_pwm = devm_kzalloc(&pdev->dev, sizeof(*rz_mtu3_pwm), GFP_KERNEL);
- if (!rz_mtu3_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, RZ_MTU3_MAX_PWM_CHANNELS,
+ sizeof(*rz_mtu3_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
rz_mtu3_pwm->clk = parent_ddata->clk;
@@ -494,7 +498,7 @@ static int rz_mtu3_pwm_probe(struct platform_device *pdev)
}
mutex_init(&rz_mtu3_pwm->lock);
- platform_set_drvdata(pdev, rz_mtu3_pwm);
+ platform_set_drvdata(pdev, chip);
ret = clk_prepare_enable(rz_mtu3_pwm->clk);
if (ret)
return dev_err_probe(dev, ret, "Clock enable failed\n");
@@ -514,15 +518,13 @@ static int rz_mtu3_pwm_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- rz_mtu3_pwm->chip.dev = &pdev->dev;
ret = devm_add_action_or_reset(&pdev->dev, rz_mtu3_pwm_pm_disable,
- rz_mtu3_pwm);
+ chip);
if (ret < 0)
return ret;
- rz_mtu3_pwm->chip.ops = &rz_mtu3_pwm_ops;
- rz_mtu3_pwm->chip.npwm = RZ_MTU3_MAX_PWM_CHANNELS;
- ret = devm_pwmchip_add(&pdev->dev, &rz_mtu3_pwm->chip);
+ chip->ops = &rz_mtu3_pwm_ops;
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret)
return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 6e77302f7368..efb60c9f0cb3 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -69,7 +69,6 @@ struct samsung_pwm_channel {
/**
* struct samsung_pwm_chip - private data of PWM chip
- * @chip: generic PWM chip
* @variant: local copy of hardware variant data
* @inverter_mask: inverter status for all channels - one bit per channel
* @disabled_mask: disabled status for all channels - one bit per channel
@@ -80,7 +79,6 @@ struct samsung_pwm_channel {
* @channel: per channel driver data
*/
struct samsung_pwm_chip {
- struct pwm_chip chip;
struct samsung_pwm_variant variant;
u8 inverter_mask;
u8 disabled_mask;
@@ -110,7 +108,7 @@ static DEFINE_SPINLOCK(samsung_pwm_lock);
static inline
struct samsung_pwm_chip *to_samsung_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct samsung_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline unsigned int to_tcon_channel(unsigned int channel)
@@ -181,9 +179,10 @@ static unsigned long pwm_samsung_get_tin_rate(struct samsung_pwm_chip *our_chip,
return rate / (reg + 1);
}
-static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *our_chip,
+static unsigned long pwm_samsung_calc_tin(struct pwm_chip *chip,
unsigned int chan, unsigned long freq)
{
+ struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
struct samsung_pwm_variant *variant = &our_chip->variant;
unsigned long rate;
struct clk *clk;
@@ -197,12 +196,12 @@ static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *our_chip,
return rate;
}
- dev_warn(our_chip->chip.dev,
+ dev_warn(pwmchip_parent(chip),
"tclk of PWM %d is inoperational, using tdiv\n", chan);
}
rate = pwm_samsung_get_tin_rate(our_chip, chan);
- dev_dbg(our_chip->chip.dev, "tin parent at %lu\n", rate);
+ dev_dbg(pwmchip_parent(chip), "tin parent at %lu\n", rate);
/*
* Compare minimum PWM frequency that can be achieved with possible
@@ -232,7 +231,7 @@ static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm)
struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
if (!(our_chip->variant.output_mask & BIT(pwm->hwpwm))) {
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
"tried to request PWM channel %d without output\n",
pwm->hwpwm);
return -EINVAL;
@@ -326,12 +325,12 @@ static int __pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
period = NSEC_PER_SEC / period_ns;
- dev_dbg(our_chip->chip.dev, "duty_ns=%d, period_ns=%d (%u)\n",
+ dev_dbg(pwmchip_parent(chip), "duty_ns=%d, period_ns=%d (%u)\n",
duty_ns, period_ns, period);
- tin_rate = pwm_samsung_calc_tin(our_chip, pwm->hwpwm, period);
+ tin_rate = pwm_samsung_calc_tin(chip, pwm->hwpwm, period);
- dev_dbg(our_chip->chip.dev, "tin_rate=%lu\n", tin_rate);
+ dev_dbg(pwmchip_parent(chip), "tin_rate=%lu\n", tin_rate);
tin_ns = NSEC_PER_SEC / tin_rate;
tcnt = period_ns / tin_ns;
@@ -355,8 +354,7 @@ static int __pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
/* -1UL will give 100% duty. */
--tcmp;
- dev_dbg(our_chip->chip.dev,
- "tin_ns=%u, tcmp=%u/%u\n", tin_ns, tcmp, tcnt);
+ dev_dbg(pwmchip_parent(chip), "tin_ns=%u, tcmp=%u/%u\n", tin_ns, tcmp, tcnt);
/* Update PWM registers. */
writel(tcnt, our_chip->base + REG_TCNTB(pwm->hwpwm));
@@ -368,7 +366,7 @@ static int __pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
* shortly afer this update (before it autoreloaded the new values).
*/
if (oldtcmp == (u32) -1) {
- dev_dbg(our_chip->chip.dev, "Forcing manual update");
+ dev_dbg(pwmchip_parent(chip), "Forcing manual update");
pwm_samsung_manual_update(our_chip, pwm);
}
@@ -507,9 +505,10 @@ static const struct of_device_id samsung_pwm_matches[] = {
};
MODULE_DEVICE_TABLE(of, samsung_pwm_matches);
-static int pwm_samsung_parse_dt(struct samsung_pwm_chip *our_chip)
+static int pwm_samsung_parse_dt(struct pwm_chip *chip)
{
- struct device_node *np = our_chip->chip.dev->of_node;
+ struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
+ struct device_node *np = pwmchip_parent(chip)->of_node;
const struct of_device_id *match;
struct property *prop;
const __be32 *cur;
@@ -523,7 +522,7 @@ static int pwm_samsung_parse_dt(struct samsung_pwm_chip *our_chip)
of_property_for_each_u32(np, "samsung,pwm-outputs", prop, cur, val) {
if (val >= SAMSUNG_PWM_NUM) {
- dev_err(our_chip->chip.dev,
+ dev_err(pwmchip_parent(chip),
"%s: invalid channel index in samsung,pwm-outputs property\n",
__func__);
continue;
@@ -534,7 +533,7 @@ static int pwm_samsung_parse_dt(struct samsung_pwm_chip *our_chip)
return 0;
}
#else
-static int pwm_samsung_parse_dt(struct samsung_pwm_chip *our_chip)
+static int pwm_samsung_parse_dt(struct pwm_chip *chip)
{
return -ENODEV;
}
@@ -544,27 +543,26 @@ static int pwm_samsung_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct samsung_pwm_chip *our_chip;
+ struct pwm_chip *chip;
unsigned int chan;
int ret;
- our_chip = devm_kzalloc(&pdev->dev, sizeof(*our_chip), GFP_KERNEL);
- if (our_chip == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, SAMSUNG_PWM_NUM, sizeof(*our_chip));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ our_chip = to_samsung_pwm_chip(chip);
- our_chip->chip.dev = &pdev->dev;
- our_chip->chip.ops = &pwm_samsung_ops;
- our_chip->chip.npwm = SAMSUNG_PWM_NUM;
+ chip->ops = &pwm_samsung_ops;
our_chip->inverter_mask = BIT(SAMSUNG_PWM_NUM) - 1;
if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
- ret = pwm_samsung_parse_dt(our_chip);
+ ret = pwm_samsung_parse_dt(chip);
if (ret)
return ret;
} else {
- if (!pdev->dev.platform_data) {
- dev_err(&pdev->dev, "no platform data specified\n");
- return -EINVAL;
- }
+ if (!pdev->dev.platform_data)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "no platform data specified\n");
memcpy(&our_chip->variant, pdev->dev.platform_data,
sizeof(our_chip->variant));
@@ -574,17 +572,10 @@ static int pwm_samsung_probe(struct platform_device *pdev)
if (IS_ERR(our_chip->base))
return PTR_ERR(our_chip->base);
- our_chip->base_clk = devm_clk_get(&pdev->dev, "timers");
- if (IS_ERR(our_chip->base_clk)) {
- dev_err(dev, "failed to get timer base clk\n");
- return PTR_ERR(our_chip->base_clk);
- }
-
- ret = clk_prepare_enable(our_chip->base_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable base clock\n");
- return ret;
- }
+ our_chip->base_clk = devm_clk_get_enabled(&pdev->dev, "timers");
+ if (IS_ERR(our_chip->base_clk))
+ return dev_err_probe(dev, PTR_ERR(our_chip->base_clk),
+ "failed to get timer base clk\n");
for (chan = 0; chan < SAMSUNG_PWM_NUM; ++chan)
if (our_chip->variant.output_mask & BIT(chan))
@@ -594,14 +585,11 @@ static int pwm_samsung_probe(struct platform_device *pdev)
our_chip->tclk0 = devm_clk_get(&pdev->dev, "pwm-tclk0");
our_chip->tclk1 = devm_clk_get(&pdev->dev, "pwm-tclk1");
- platform_set_drvdata(pdev, our_chip);
+ platform_set_drvdata(pdev, chip);
- ret = pwmchip_add(&our_chip->chip);
- if (ret < 0) {
- dev_err(dev, "failed to register PWM chip\n");
- clk_disable_unprepare(our_chip->base_clk);
- return ret;
- }
+ ret = devm_pwmchip_add(&pdev->dev, chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to register PWM chip\n");
dev_dbg(dev, "base_clk at %lu, tclk0 at %lu, tclk1 at %lu\n",
clk_get_rate(our_chip->base_clk),
@@ -611,19 +599,10 @@ static int pwm_samsung_probe(struct platform_device *pdev)
return 0;
}
-static void pwm_samsung_remove(struct platform_device *pdev)
-{
- struct samsung_pwm_chip *our_chip = platform_get_drvdata(pdev);
-
- pwmchip_remove(&our_chip->chip);
-
- clk_disable_unprepare(our_chip->base_clk);
-}
-
static int pwm_samsung_resume(struct device *dev)
{
- struct samsung_pwm_chip *our_chip = dev_get_drvdata(dev);
- struct pwm_chip *chip = &our_chip->chip;
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
unsigned int i;
for (i = 0; i < SAMSUNG_PWM_NUM; i++) {
@@ -662,7 +641,6 @@ static struct platform_driver pwm_samsung_driver = {
.of_match_table = of_match_ptr(samsung_pwm_matches),
},
.probe = pwm_samsung_probe,
- .remove_new = pwm_samsung_remove,
};
module_platform_driver(pwm_samsung_driver);
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 089e50bdbbf0..ed7957cc51fd 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -41,7 +41,7 @@
#define PWM_SIFIVE_DEFAULT_PERIOD 10000000
struct pwm_sifive_ddata {
- struct pwm_chip chip;
+ struct device *parent;
struct mutex lock; /* lock to protect user_count and approx_period */
struct notifier_block notifier;
struct clk *clk;
@@ -54,7 +54,7 @@ struct pwm_sifive_ddata {
static inline
struct pwm_sifive_ddata *pwm_sifive_chip_to_ddata(struct pwm_chip *chip)
{
- return container_of(chip, struct pwm_sifive_ddata, chip);
+ return pwmchip_get_drvdata(chip);
}
static int pwm_sifive_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -102,7 +102,7 @@ static void pwm_sifive_update_clock(struct pwm_sifive_ddata *ddata,
/* As scale <= 15 the shift operation cannot overflow. */
num = (unsigned long long)NSEC_PER_SEC << (PWM_SIFIVE_CMPWIDTH + scale);
ddata->real_period = div64_ul(num, rate);
- dev_dbg(ddata->chip.dev,
+ dev_dbg(ddata->parent,
"New real_period = %u ns\n", ddata->real_period);
}
@@ -185,7 +185,7 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!enabled) {
ret = clk_enable(ddata->clk);
if (ret) {
- dev_err(ddata->chip.dev, "Enable clk failed\n");
+ dev_err(pwmchip_parent(chip), "Enable clk failed\n");
return ret;
}
}
@@ -230,15 +230,14 @@ static int pwm_sifive_probe(struct platform_device *pdev)
u32 val;
unsigned int enabled_pwms = 0, enabled_clks = 1;
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, 4, sizeof(*ddata));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ ddata = pwm_sifive_chip_to_ddata(chip);
+ ddata->parent = dev;
mutex_init(&ddata->lock);
- chip = &ddata->chip;
- chip->dev = dev;
chip->ops = &pwm_sifive_ops;
- chip->npwm = 4;
ddata->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddata->regs))
@@ -296,7 +295,7 @@ static int pwm_sifive_probe(struct platform_device *pdev)
goto unregister_clk;
}
- platform_set_drvdata(pdev, ddata);
+ platform_set_drvdata(pdev, chip);
dev_dbg(dev, "SiFive PWM chip registered %d PWMs\n", chip->npwm);
return 0;
@@ -314,15 +313,16 @@ disable_clk:
static void pwm_sifive_remove(struct platform_device *dev)
{
- struct pwm_sifive_ddata *ddata = platform_get_drvdata(dev);
+ struct pwm_chip *chip = platform_get_drvdata(dev);
+ struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
struct pwm_device *pwm;
int ch;
- pwmchip_remove(&ddata->chip);
+ pwmchip_remove(chip);
clk_notifier_unregister(ddata->clk, &ddata->notifier);
- for (ch = 0; ch < ddata->chip.npwm; ch++) {
- pwm = &ddata->chip.pwms[ch];
+ for (ch = 0; ch < chip->npwm; ch++) {
+ pwm = &chip->pwms[ch];
if (pwm->state.enabled)
clk_disable(ddata->clk);
}
diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
index 88b01ff9e460..934378d6a002 100644
--- a/drivers/pwm/pwm-sl28cpld.c
+++ b/drivers/pwm/pwm-sl28cpld.c
@@ -81,14 +81,13 @@
regmap_write((priv)->regmap, (priv)->offset + (reg), (val))
struct sl28cpld_pwm {
- struct pwm_chip chip;
struct regmap *regmap;
u32 offset;
};
static inline struct sl28cpld_pwm *sl28cpld_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct sl28cpld_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int sl28cpld_pwm_get_state(struct pwm_chip *chip,
@@ -213,9 +212,10 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
return -ENODEV;
}
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = sl28cpld_pwm_from_chip(chip);
priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!priv->regmap) {
@@ -231,10 +231,7 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
}
/* Initialize the pwm_chip structure */
- chip = &priv->chip;
- chip->dev = &pdev->dev;
chip->ops = &sl28cpld_pwm_ops;
- chip->npwm = 1;
ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret) {
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index ff991319feef..6c6f3b38c835 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -48,17 +48,15 @@
*
* @mmio_base: base address of pwm chip
* @clk: pointer to clk structure of pwm chip
- * @chip: linux pwm chip representation
*/
struct spear_pwm_chip {
void __iomem *mmio_base;
struct clk *clk;
- struct pwm_chip chip;
};
static inline struct spear_pwm_chip *to_spear_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct spear_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline u32 spear_pwm_readl(struct spear_pwm_chip *chip, unsigned int num,
@@ -194,13 +192,15 @@ static const struct pwm_ops spear_pwm_ops = {
static int spear_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct pwm_chip *chip;
struct spear_pwm_chip *pc;
int ret;
u32 val;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, NUM_PWM, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_spear_pwm_chip(chip);
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
@@ -211,9 +211,7 @@ static int spear_pwm_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk),
"Failed to get clock\n");
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &spear_pwm_ops;
- pc->chip.npwm = NUM_PWM;
+ chip->ops = &spear_pwm_ops;
if (of_device_is_compatible(np, "st,spear1340-pwm")) {
ret = clk_enable(pc->clk);
@@ -232,7 +230,7 @@ static int spear_pwm_probe(struct platform_device *pdev)
clk_disable(pc->clk);
}
- ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
index 77939e161006..4c76ca5e4cdd 100644
--- a/drivers/pwm/pwm-sprd.c
+++ b/drivers/pwm/pwm-sprd.c
@@ -34,15 +34,12 @@ struct sprd_pwm_chn {
struct sprd_pwm_chip {
void __iomem *base;
- struct device *dev;
- struct pwm_chip chip;
- int num_pwms;
struct sprd_pwm_chn chn[SPRD_PWM_CHN_NUM];
};
static inline struct sprd_pwm_chip* sprd_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct sprd_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -86,7 +83,7 @@ static int sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
*/
ret = clk_bulk_prepare_enable(SPRD_PWM_CHN_CLKS_NUM, chn->clks);
if (ret) {
- dev_err(spc->dev, "failed to enable pwm%u clocks\n",
+ dev_err(pwmchip_parent(chip), "failed to enable pwm%u clocks\n",
pwm->hwpwm);
return ret;
}
@@ -183,7 +180,7 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ret = clk_bulk_prepare_enable(SPRD_PWM_CHN_CLKS_NUM,
chn->clks);
if (ret) {
- dev_err(spc->dev,
+ dev_err(pwmchip_parent(chip),
"failed to enable pwm%u clocks\n",
pwm->hwpwm);
return ret;
@@ -215,65 +212,64 @@ static const struct pwm_ops sprd_pwm_ops = {
.get_state = sprd_pwm_get_state,
};
-static int sprd_pwm_clk_init(struct sprd_pwm_chip *spc)
+static int sprd_pwm_clk_init(struct device *dev,
+ struct sprd_pwm_chn chn[SPRD_PWM_CHN_NUM])
{
struct clk *clk_pwm;
int ret, i;
for (i = 0; i < SPRD_PWM_CHN_NUM; i++) {
- struct sprd_pwm_chn *chn = &spc->chn[i];
int j;
for (j = 0; j < SPRD_PWM_CHN_CLKS_NUM; ++j)
- chn->clks[j].id =
+ chn[i].clks[j].id =
sprd_pwm_clks[i * SPRD_PWM_CHN_CLKS_NUM + j];
- ret = devm_clk_bulk_get(spc->dev, SPRD_PWM_CHN_CLKS_NUM,
- chn->clks);
+ ret = devm_clk_bulk_get(dev, SPRD_PWM_CHN_CLKS_NUM,
+ chn[i].clks);
if (ret) {
if (ret == -ENOENT)
break;
- return dev_err_probe(spc->dev, ret,
+ return dev_err_probe(dev, ret,
"failed to get channel clocks\n");
}
- clk_pwm = chn->clks[SPRD_PWM_CHN_OUTPUT_CLK].clk;
- chn->clk_rate = clk_get_rate(clk_pwm);
+ clk_pwm = chn[i].clks[SPRD_PWM_CHN_OUTPUT_CLK].clk;
+ chn[i].clk_rate = clk_get_rate(clk_pwm);
}
if (!i)
- return dev_err_probe(spc->dev, -ENODEV, "no available PWM channels\n");
+ return dev_err_probe(dev, -ENODEV, "no available PWM channels\n");
- spc->num_pwms = i;
-
- return 0;
+ return i;
}
static int sprd_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct sprd_pwm_chip *spc;
- int ret;
+ struct sprd_pwm_chn chn[SPRD_PWM_CHN_NUM];
+ int ret, npwm;
- spc = devm_kzalloc(&pdev->dev, sizeof(*spc), GFP_KERNEL);
- if (!spc)
- return -ENOMEM;
+ npwm = sprd_pwm_clk_init(&pdev->dev, chn);
+ if (npwm < 0)
+ return npwm;
+
+ chip = devm_pwmchip_alloc(&pdev->dev, npwm, sizeof(*spc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ spc = sprd_pwm_from_chip(chip);
spc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spc->base))
return PTR_ERR(spc->base);
- spc->dev = &pdev->dev;
-
- ret = sprd_pwm_clk_init(spc);
- if (ret)
- return ret;
+ memcpy(spc->chn, chn, sizeof(chn));
- spc->chip.dev = &pdev->dev;
- spc->chip.ops = &sprd_pwm_ops;
- spc->chip.npwm = spc->num_pwms;
+ chip->ops = &sprd_pwm_ops;
- ret = devm_pwmchip_add(&pdev->dev, &spc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret)
dev_err(&pdev->dev, "failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index 6cf55cf34d39..39d80da0e14a 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -94,7 +94,6 @@ struct sti_pwm_chip {
struct regmap_field *pwm_cpt_en;
struct regmap_field *pwm_cpt_int_en;
struct regmap_field *pwm_cpt_int_stat;
- struct pwm_chip chip;
struct pwm_device *cur;
unsigned long configured;
unsigned int en_count;
@@ -114,7 +113,7 @@ static const struct reg_field sti_pwm_regfields[MAX_REGFIELDS] = {
static inline struct sti_pwm_chip *to_sti_pwmchip(struct pwm_chip *chip)
{
- return container_of(chip, struct sti_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -395,8 +394,17 @@ out:
static int sti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
+ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+ struct sti_pwm_compat_data *cdata = pc->cdata;
+ struct device *dev = pc->dev;
int err;
+ if (pwm->hwpwm >= cdata->pwm_num_devs) {
+ dev_err(dev, "device %u is not valid for pwm mode\n",
+ pwm->hwpwm);
+ return -EINVAL;
+ }
+
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
@@ -498,23 +506,7 @@ static int sti_pwm_probe_dt(struct sti_pwm_chip *pc)
{
struct device *dev = pc->dev;
const struct reg_field *reg_fields;
- struct device_node *np = dev->of_node;
struct sti_pwm_compat_data *cdata = pc->cdata;
- u32 num_devs;
- int ret;
-
- ret = of_property_read_u32(np, "st,pwm-num-chan", &num_devs);
- if (!ret)
- cdata->pwm_num_devs = num_devs;
-
- ret = of_property_read_u32(np, "st,capture-num-chan", &num_devs);
- if (!ret)
- cdata->cpt_num_devs = num_devs;
-
- if (!cdata->pwm_num_devs && !cdata->cpt_num_devs) {
- dev_err(dev, "No channels configured\n");
- return -EINVAL;
- }
reg_fields = cdata->reg_fields;
@@ -560,14 +552,33 @@ static const struct regmap_config sti_pwm_regmap_config = {
static int sti_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ u32 num_devs;
+ unsigned int pwm_num_devs = 0;
+ unsigned int cpt_num_devs = 0;
struct sti_pwm_compat_data *cdata;
+ struct pwm_chip *chip;
struct sti_pwm_chip *pc;
unsigned int i;
int irq, ret;
- pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ ret = of_property_read_u32(np, "st,pwm-num-chan", &num_devs);
+ if (!ret)
+ pwm_num_devs = num_devs;
+
+ ret = of_property_read_u32(np, "st,capture-num-chan", &num_devs);
+ if (!ret)
+ cpt_num_devs = num_devs;
+
+ if (!pwm_num_devs && !cpt_num_devs) {
+ dev_err(dev, "No channels configured\n");
+ return -EINVAL;
+ }
+
+ chip = devm_pwmchip_alloc(dev, max(pwm_num_devs, cpt_num_devs), sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_sti_pwmchip(chip);
cdata = devm_kzalloc(dev, sizeof(*cdata), GFP_KERNEL);
if (!cdata)
@@ -600,8 +611,8 @@ static int sti_pwm_probe(struct platform_device *pdev)
cdata->reg_fields = sti_pwm_regfields;
cdata->max_prescale = 0xff;
cdata->max_pwm_cnt = 255;
- cdata->pwm_num_devs = 0;
- cdata->cpt_num_devs = 0;
+ cdata->pwm_num_devs = pwm_num_devs;
+ cdata->cpt_num_devs = cpt_num_devs;
pc->cdata = cdata;
pc->dev = dev;
@@ -644,9 +655,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
return -ENOMEM;
}
- pc->chip.dev = dev;
- pc->chip.ops = &sti_pwm_ops;
- pc->chip.npwm = pc->cdata->pwm_num_devs;
+ chip->ops = &sti_pwm_ops;
for (i = 0; i < cdata->cpt_num_devs; i++) {
struct sti_cpt_ddata *ddata = &cdata->ddata[i];
@@ -655,23 +664,24 @@ static int sti_pwm_probe(struct platform_device *pdev)
mutex_init(&ddata->lock);
}
- ret = pwmchip_add(&pc->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
clk_unprepare(pc->pwm_clk);
clk_unprepare(pc->cpt_clk);
return ret;
}
- platform_set_drvdata(pdev, pc);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static void sti_pwm_remove(struct platform_device *pdev)
{
- struct sti_pwm_chip *pc = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
- pwmchip_remove(&pc->chip);
+ pwmchip_remove(chip);
clk_unprepare(pc->pwm_clk);
clk_unprepare(pc->cpt_clk);
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 439068f3eca1..989731256f50 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -18,14 +18,13 @@
#include <linux/pwm.h>
struct stm32_pwm_lp {
- struct pwm_chip chip;
struct clk *clk;
struct regmap *regmap;
};
static inline struct stm32_pwm_lp *to_stm32_pwm_lp(struct pwm_chip *chip)
{
- return container_of(chip, struct stm32_pwm_lp, chip);
+ return pwmchip_get_drvdata(chip);
}
/* STM32 Low-Power Timer is preceded by a configurable power-of-2 prescaler */
@@ -61,7 +60,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
do_div(div, NSEC_PER_SEC);
if (!div) {
/* Clock is too slow to achieve requested period. */
- dev_dbg(priv->chip.dev, "Can't reach %llu ns\n", state->period);
+ dev_dbg(pwmchip_parent(chip), "Can't reach %llu ns\n", state->period);
return -EINVAL;
}
@@ -69,7 +68,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
while (div > STM32_LPTIM_MAX_ARR) {
presc++;
if ((1 << presc) > STM32_LPTIM_MAX_PRESCALER) {
- dev_err(priv->chip.dev, "max prescaler exceeded\n");
+ dev_err(pwmchip_parent(chip), "max prescaler exceeded\n");
return -EINVAL;
}
div = prd >> presc;
@@ -130,7 +129,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
(val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
100, 1000);
if (ret) {
- dev_err(priv->chip.dev, "ARR/CMP registers write issue\n");
+ dev_err(pwmchip_parent(chip), "ARR/CMP registers write issue\n");
goto err;
}
ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
@@ -197,36 +196,36 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
{
struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
struct stm32_pwm_lp *priv;
+ struct pwm_chip *chip;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = to_stm32_pwm_lp(chip);
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
- priv->chip.dev = &pdev->dev;
- priv->chip.ops = &stm32_pwm_lp_ops;
- priv->chip.npwm = 1;
+ chip->ops = &stm32_pwm_lp_ops;
- ret = devm_pwmchip_add(&pdev->dev, &priv->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static int stm32_pwm_lp_suspend(struct device *dev)
{
- struct stm32_pwm_lp *priv = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
struct pwm_state state;
- pwm_get_state(&priv->chip.pwms[0], &state);
+ pwm_get_state(&chip->pwms[0], &state);
if (state.enabled) {
dev_err(dev, "The consumer didn't stop us (%s)\n",
- priv->chip.pwms[0].label);
+ chip->pwms[0].label);
return -EBUSY;
}
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 5f10cba492ec..0c028d17c075 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -27,7 +27,6 @@ struct stm32_breakinput {
};
struct stm32_pwm {
- struct pwm_chip chip;
struct mutex lock; /* protect pwm config/enable */
struct clk *clk;
struct regmap *regmap;
@@ -40,7 +39,7 @@ struct stm32_pwm {
static inline struct stm32_pwm *to_stm32_pwm_dev(struct pwm_chip *chip)
{
- return container_of(chip, struct stm32_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static u32 active_channels(struct stm32_pwm *dev)
@@ -90,11 +89,12 @@ static u32 active_channels(struct stm32_pwm *dev)
* - Period = t2 - t0
* - Duty cycle = t1 - t0
*/
-static int stm32_pwm_raw_capture(struct stm32_pwm *priv, struct pwm_device *pwm,
+static int stm32_pwm_raw_capture(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long tmo_ms, u32 *raw_prd,
u32 *raw_dty)
{
- struct device *parent = priv->chip.dev->parent;
+ struct stm32_pwm *priv = to_stm32_pwm_dev(chip);
+ struct device *parent = pwmchip_parent(chip)->parent;
enum stm32_timers_dmas dma_id;
u32 ccen, ccr;
int ret;
@@ -170,7 +170,7 @@ static int stm32_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
ret = clk_enable(priv->clk);
if (ret) {
- dev_err(priv->chip.dev, "failed to enable counter clock\n");
+ dev_err(pwmchip_parent(chip), "failed to enable counter clock\n");
goto unlock;
}
@@ -208,7 +208,7 @@ static int stm32_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
TIM_CCER_CC12P : TIM_CCER_CC34P, pwm->hwpwm < 2 ?
TIM_CCER_CC2P : TIM_CCER_CC4P);
- ret = stm32_pwm_raw_capture(priv, pwm, tmo_ms, &raw_prd, &raw_dty);
+ ret = stm32_pwm_raw_capture(chip, pwm, tmo_ms, &raw_prd, &raw_dty);
if (ret)
goto stop;
@@ -229,7 +229,7 @@ static int stm32_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
/* 2nd measure with new scale */
psc /= scale;
regmap_write(priv->regmap, TIM_PSC, psc);
- ret = stm32_pwm_raw_capture(priv, pwm, tmo_ms, &raw_prd,
+ ret = stm32_pwm_raw_capture(chip, pwm, tmo_ms, &raw_prd,
&raw_dty);
if (ret)
goto stop;
@@ -257,7 +257,7 @@ static int stm32_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
FIELD_PREP(TIM_CCMR_IC1PSC, icpsc) |
FIELD_PREP(TIM_CCMR_IC2PSC, icpsc));
- ret = stm32_pwm_raw_capture(priv, pwm, tmo_ms, &raw_prd, &raw_dty);
+ ret = stm32_pwm_raw_capture(chip, pwm, tmo_ms, &raw_prd, &raw_dty);
if (ret)
goto stop;
@@ -605,7 +605,7 @@ static void stm32_pwm_detect_complementary(struct stm32_pwm *priv)
priv->have_complementary_output = (ccer != 0);
}
-static unsigned int stm32_pwm_detect_channels(struct stm32_pwm *priv,
+static unsigned int stm32_pwm_detect_channels(struct regmap *regmap,
unsigned int *num_enabled)
{
u32 ccer, ccer_backup;
@@ -614,10 +614,10 @@ static unsigned int stm32_pwm_detect_channels(struct stm32_pwm *priv,
* If channels enable bits don't exist writing 1 will have no
* effect so we can detect and count them.
*/
- regmap_read(priv->regmap, TIM_CCER, &ccer_backup);
- regmap_set_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE);
- regmap_read(priv->regmap, TIM_CCER, &ccer);
- regmap_write(priv->regmap, TIM_CCER, ccer_backup);
+ regmap_read(regmap, TIM_CCER, &ccer_backup);
+ regmap_set_bits(regmap, TIM_CCER, TIM_CCER_CCXE);
+ regmap_read(regmap, TIM_CCER, &ccer);
+ regmap_write(regmap, TIM_CCER, ccer_backup);
*num_enabled = hweight32(ccer_backup & TIM_CCER_CCXE);
@@ -629,14 +629,18 @@ static int stm32_pwm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct pwm_chip *chip;
struct stm32_pwm *priv;
- unsigned int num_enabled;
+ unsigned int npwm, num_enabled;
unsigned int i;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ npwm = stm32_pwm_detect_channels(ddata->regmap, &num_enabled);
+
+ chip = devm_pwmchip_alloc(dev, npwm, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = to_stm32_pwm_dev(chip);
mutex_init(&priv->lock);
priv->regmap = ddata->regmap;
@@ -652,37 +656,36 @@ static int stm32_pwm_probe(struct platform_device *pdev)
stm32_pwm_detect_complementary(priv);
- priv->chip.dev = dev;
- priv->chip.ops = &stm32pwm_ops;
- priv->chip.npwm = stm32_pwm_detect_channels(priv, &num_enabled);
+ chip->ops = &stm32pwm_ops;
/* Initialize clock refcount to number of enabled PWM channels. */
for (i = 0; i < num_enabled; i++)
clk_enable(priv->clk);
- ret = devm_pwmchip_add(dev, &priv->chip);
+ ret = devm_pwmchip_add(dev, chip);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static int stm32_pwm_suspend(struct device *dev)
{
- struct stm32_pwm *priv = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct stm32_pwm *priv = to_stm32_pwm_dev(chip);
unsigned int i;
u32 ccer, mask;
/* Look for active channels */
ccer = active_channels(priv);
- for (i = 0; i < priv->chip.npwm; i++) {
+ for (i = 0; i < chip->npwm; i++) {
mask = TIM_CCER_CC1E << (i * 4);
if (ccer & mask) {
dev_err(dev, "PWM %u still in use by consumer %s\n",
- i, priv->chip.pwms[i].label);
+ i, chip->pwms[i].label);
return -EBUSY;
}
}
@@ -692,7 +695,8 @@ static int stm32_pwm_suspend(struct device *dev)
static int stm32_pwm_resume(struct device *dev)
{
- struct stm32_pwm *priv = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct stm32_pwm *priv = to_stm32_pwm_dev(chip);
int ret;
ret = pinctrl_pm_select_default_state(dev);
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
index 19c0c0f39675..bb91062d5f1d 100644
--- a/drivers/pwm/pwm-stmpe.c
+++ b/drivers/pwm/pwm-stmpe.c
@@ -27,13 +27,12 @@
struct stmpe_pwm {
struct stmpe *stmpe;
- struct pwm_chip chip;
u8 last_duty;
};
static inline struct stmpe_pwm *to_stmpe_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct stmpe_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -44,7 +43,7 @@ static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = stmpe_reg_read(stmpe_pwm->stmpe, STMPE24XX_PWMCS);
if (ret < 0) {
- dev_dbg(chip->dev, "error reading PWM#%u control\n",
+ dev_dbg(pwmchip_parent(chip), "error reading PWM#%u control\n",
pwm->hwpwm);
return ret;
}
@@ -53,7 +52,7 @@ static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
if (ret) {
- dev_dbg(chip->dev, "error writing PWM#%u control\n",
+ dev_dbg(pwmchip_parent(chip), "error writing PWM#%u control\n",
pwm->hwpwm);
return ret;
}
@@ -70,7 +69,7 @@ static int stmpe_24xx_pwm_disable(struct pwm_chip *chip,
ret = stmpe_reg_read(stmpe_pwm->stmpe, STMPE24XX_PWMCS);
if (ret < 0) {
- dev_dbg(chip->dev, "error reading PWM#%u control\n",
+ dev_dbg(pwmchip_parent(chip), "error reading PWM#%u control\n",
pwm->hwpwm);
return ret;
}
@@ -79,7 +78,7 @@ static int stmpe_24xx_pwm_disable(struct pwm_chip *chip,
ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
if (ret)
- dev_dbg(chip->dev, "error writing PWM#%u control\n",
+ dev_dbg(pwmchip_parent(chip), "error writing PWM#%u control\n",
pwm->hwpwm);
return ret;
}
@@ -125,7 +124,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = stmpe_set_altfunc(stmpe_pwm->stmpe, BIT(pin),
STMPE_BLOCK_PWM);
if (ret) {
- dev_err(chip->dev, "unable to connect PWM#%u to pin\n",
+ dev_err(pwmchip_parent(chip), "unable to connect PWM#%u to pin\n",
pwm->hwpwm);
return ret;
}
@@ -150,7 +149,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return -ENODEV;
}
- dev_dbg(chip->dev, "PWM#%u: config duty %d ns, period %d ns\n",
+ dev_dbg(pwmchip_parent(chip), "PWM#%u: config duty %d ns, period %d ns\n",
pwm->hwpwm, duty_ns, period_ns);
if (duty_ns == 0) {
@@ -216,7 +215,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
program[1] = BRANCH;
}
- dev_dbg(chip->dev,
+ dev_dbg(pwmchip_parent(chip),
"PWM#%u: value = %02x, last_duty = %02x, program=%04x,%04x,%04x\n",
pwm->hwpwm, value, last, program[0], program[1],
program[2]);
@@ -233,7 +232,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = stmpe_reg_write(stmpe_pwm->stmpe, offset, value);
if (ret) {
- dev_dbg(chip->dev, "error writing register %02x: %d\n",
+ dev_dbg(pwmchip_parent(chip), "error writing register %02x: %d\n",
offset, ret);
return ret;
}
@@ -242,7 +241,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = stmpe_reg_write(stmpe_pwm->stmpe, offset, value);
if (ret) {
- dev_dbg(chip->dev, "error writing register %02x: %d\n",
+ dev_dbg(pwmchip_parent(chip), "error writing register %02x: %d\n",
offset, ret);
return ret;
}
@@ -255,7 +254,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
/* Sleep for 200ms so we're sure it will take effect */
msleep(200);
- dev_dbg(chip->dev, "programmed PWM#%u, %u bytes\n", pwm->hwpwm, i);
+ dev_dbg(pwmchip_parent(chip), "programmed PWM#%u, %u bytes\n", pwm->hwpwm, i);
return 0;
}
@@ -292,33 +291,36 @@ static const struct pwm_ops stmpe_24xx_pwm_ops = {
static int __init stmpe_pwm_probe(struct platform_device *pdev)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ struct pwm_chip *chip;
struct stmpe_pwm *stmpe_pwm;
int ret;
- stmpe_pwm = devm_kzalloc(&pdev->dev, sizeof(*stmpe_pwm), GFP_KERNEL);
- if (!stmpe_pwm)
- return -ENOMEM;
+ switch (stmpe->partnum) {
+ case STMPE2401:
+ case STMPE2403:
+ break;
+ case STMPE1601:
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "STMPE1601 not yet supported\n");
+ default:
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Unknown STMPE PWM\n");
+ }
- stmpe_pwm->stmpe = stmpe;
- stmpe_pwm->chip.dev = &pdev->dev;
+ chip = devm_pwmchip_alloc(&pdev->dev, 3, sizeof(*stmpe_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ stmpe_pwm = to_stmpe_pwm(chip);
- if (stmpe->partnum == STMPE2401 || stmpe->partnum == STMPE2403) {
- stmpe_pwm->chip.ops = &stmpe_24xx_pwm_ops;
- stmpe_pwm->chip.npwm = 3;
- } else {
- if (stmpe->partnum == STMPE1601)
- dev_err(&pdev->dev, "STMPE1601 not yet supported\n");
- else
- dev_err(&pdev->dev, "Unknown STMPE PWM\n");
+ stmpe_pwm->stmpe = stmpe;
- return -ENODEV;
- }
+ chip->ops = &stmpe_24xx_pwm_ops;
ret = stmpe_enable(stmpe, STMPE_BLOCK_PWM);
if (ret)
return ret;
- ret = pwmchip_add(&stmpe_pwm->chip);
+ ret = pwmchip_add(chip);
if (ret) {
stmpe_disable(stmpe, STMPE_BLOCK_PWM);
return ret;
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 1a439025540d..5c29590d1821 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -81,7 +81,6 @@ struct sun4i_pwm_data {
};
struct sun4i_pwm_chip {
- struct pwm_chip chip;
struct clk *bus_clk;
struct clk *clk;
struct reset_control *rst;
@@ -92,35 +91,35 @@ struct sun4i_pwm_chip {
static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct sun4i_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
-static inline u32 sun4i_pwm_readl(struct sun4i_pwm_chip *chip,
+static inline u32 sun4i_pwm_readl(struct sun4i_pwm_chip *sun4ichip,
unsigned long offset)
{
- return readl(chip->base + offset);
+ return readl(sun4ichip->base + offset);
}
-static inline void sun4i_pwm_writel(struct sun4i_pwm_chip *chip,
+static inline void sun4i_pwm_writel(struct sun4i_pwm_chip *sun4ichip,
u32 val, unsigned long offset)
{
- writel(val, chip->base + offset);
+ writel(val, sun4ichip->base + offset);
}
static int sun4i_pwm_get_state(struct pwm_chip *chip,
struct pwm_device *pwm,
struct pwm_state *state)
{
- struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
+ struct sun4i_pwm_chip *sun4ichip = to_sun4i_pwm_chip(chip);
u64 clk_rate, tmp;
u32 val;
unsigned int prescaler;
- clk_rate = clk_get_rate(sun4i_pwm->clk);
+ clk_rate = clk_get_rate(sun4ichip->clk);
if (!clk_rate)
return -EINVAL;
- val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+ val = sun4i_pwm_readl(sun4ichip, PWM_CTRL_REG);
/*
* PWM chapter in H6 manual has a diagram which explains that if bypass
@@ -128,7 +127,7 @@ static int sun4i_pwm_get_state(struct pwm_chip *chip,
* proved that also enable bit is ignored in this case.
*/
if ((val & BIT_CH(PWM_BYPASS, pwm->hwpwm)) &&
- sun4i_pwm->data->has_direct_mod_clk_output) {
+ sun4ichip->data->has_direct_mod_clk_output) {
state->period = DIV_ROUND_UP_ULL(NSEC_PER_SEC, clk_rate);
state->duty_cycle = DIV_ROUND_UP_ULL(state->period, 2);
state->polarity = PWM_POLARITY_NORMAL;
@@ -137,7 +136,7 @@ static int sun4i_pwm_get_state(struct pwm_chip *chip,
}
if ((PWM_REG_PRESCAL(val, pwm->hwpwm) == PWM_PRESCAL_MASK) &&
- sun4i_pwm->data->has_prescaler_bypass)
+ sun4ichip->data->has_prescaler_bypass)
prescaler = 1;
else
prescaler = prescaler_table[PWM_REG_PRESCAL(val, pwm->hwpwm)];
@@ -156,7 +155,7 @@ static int sun4i_pwm_get_state(struct pwm_chip *chip,
else
state->enabled = false;
- val = sun4i_pwm_readl(sun4i_pwm, PWM_CH_PRD(pwm->hwpwm));
+ val = sun4i_pwm_readl(sun4ichip, PWM_CH_PRD(pwm->hwpwm));
tmp = (u64)prescaler * NSEC_PER_SEC * PWM_REG_DTY(val);
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
@@ -167,7 +166,7 @@ static int sun4i_pwm_get_state(struct pwm_chip *chip,
return 0;
}
-static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
+static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4ichip,
const struct pwm_state *state,
u32 *dty, u32 *prd, unsigned int *prsclr,
bool *bypass)
@@ -175,9 +174,9 @@ static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
u64 clk_rate, div = 0;
unsigned int prescaler = 0;
- clk_rate = clk_get_rate(sun4i_pwm->clk);
+ clk_rate = clk_get_rate(sun4ichip->clk);
- *bypass = sun4i_pwm->data->has_direct_mod_clk_output &&
+ *bypass = sun4ichip->data->has_direct_mod_clk_output &&
state->enabled &&
(state->period * clk_rate >= NSEC_PER_SEC) &&
(state->period * clk_rate < 2 * NSEC_PER_SEC) &&
@@ -187,7 +186,7 @@ static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
if (*bypass)
return 0;
- if (sun4i_pwm->data->has_prescaler_bypass) {
+ if (sun4ichip->data->has_prescaler_bypass) {
/* First, test without any prescaler when available */
prescaler = PWM_PRESCAL_MASK;
/*
@@ -233,7 +232,7 @@ static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
- struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
+ struct sun4i_pwm_chip *sun4ichip = to_sun4i_pwm_chip(chip);
struct pwm_state cstate;
u32 ctrl, duty = 0, period = 0, val;
int ret;
@@ -243,31 +242,31 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
pwm_get_state(pwm, &cstate);
if (!cstate.enabled) {
- ret = clk_prepare_enable(sun4i_pwm->clk);
+ ret = clk_prepare_enable(sun4ichip->clk);
if (ret) {
- dev_err(chip->dev, "failed to enable PWM clock\n");
+ dev_err(pwmchip_parent(chip), "failed to enable PWM clock\n");
return ret;
}
}
- ret = sun4i_pwm_calculate(sun4i_pwm, state, &duty, &period, &prescaler,
+ ret = sun4i_pwm_calculate(sun4ichip, state, &duty, &period, &prescaler,
&bypass);
if (ret) {
- dev_err(chip->dev, "period exceeds the maximum value\n");
+ dev_err(pwmchip_parent(chip), "period exceeds the maximum value\n");
if (!cstate.enabled)
- clk_disable_unprepare(sun4i_pwm->clk);
+ clk_disable_unprepare(sun4ichip->clk);
return ret;
}
- spin_lock(&sun4i_pwm->ctrl_lock);
- ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+ spin_lock(&sun4ichip->ctrl_lock);
+ ctrl = sun4i_pwm_readl(sun4ichip, PWM_CTRL_REG);
- if (sun4i_pwm->data->has_direct_mod_clk_output) {
+ if (sun4ichip->data->has_direct_mod_clk_output) {
if (bypass) {
ctrl |= BIT_CH(PWM_BYPASS, pwm->hwpwm);
/* We can skip other parameter */
- sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
- spin_unlock(&sun4i_pwm->ctrl_lock);
+ sun4i_pwm_writel(sun4ichip, ctrl, PWM_CTRL_REG);
+ spin_unlock(&sun4ichip->ctrl_lock);
return 0;
}
@@ -277,14 +276,14 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (PWM_REG_PRESCAL(ctrl, pwm->hwpwm) != prescaler) {
/* Prescaler changed, the clock has to be gated */
ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
- sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
+ sun4i_pwm_writel(sun4ichip, ctrl, PWM_CTRL_REG);
ctrl &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
ctrl |= BIT_CH(prescaler, pwm->hwpwm);
}
val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
- sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
+ sun4i_pwm_writel(sun4ichip, val, PWM_CH_PRD(pwm->hwpwm));
if (state->polarity != PWM_POLARITY_NORMAL)
ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
@@ -296,9 +295,9 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->enabled)
ctrl |= BIT_CH(PWM_EN, pwm->hwpwm);
- sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
+ sun4i_pwm_writel(sun4ichip, ctrl, PWM_CTRL_REG);
- spin_unlock(&sun4i_pwm->ctrl_lock);
+ spin_unlock(&sun4ichip->ctrl_lock);
if (state->enabled)
return 0;
@@ -310,14 +309,14 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
else
usleep_range(delay_us, delay_us * 2);
- spin_lock(&sun4i_pwm->ctrl_lock);
- ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+ spin_lock(&sun4ichip->ctrl_lock);
+ ctrl = sun4i_pwm_readl(sun4ichip, PWM_CTRL_REG);
ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
ctrl &= ~BIT_CH(PWM_EN, pwm->hwpwm);
- sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
- spin_unlock(&sun4i_pwm->ctrl_lock);
+ sun4i_pwm_writel(sun4ichip, ctrl, PWM_CTRL_REG);
+ spin_unlock(&sun4ichip->ctrl_lock);
- clk_disable_unprepare(sun4i_pwm->clk);
+ clk_disable_unprepare(sun4ichip->clk);
return 0;
}
@@ -384,17 +383,21 @@ MODULE_DEVICE_TABLE(of, sun4i_pwm_dt_ids);
static int sun4i_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
+ const struct sun4i_pwm_data *data;
struct sun4i_pwm_chip *sun4ichip;
int ret;
- sun4ichip = devm_kzalloc(&pdev->dev, sizeof(*sun4ichip), GFP_KERNEL);
- if (!sun4ichip)
- return -ENOMEM;
-
- sun4ichip->data = of_device_get_match_data(&pdev->dev);
- if (!sun4ichip->data)
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
return -ENODEV;
+ chip = devm_pwmchip_alloc(&pdev->dev, data->npwm, sizeof(*sun4ichip));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ sun4ichip = to_sun4i_pwm_chip(chip);
+
+ sun4ichip->data = data;
sun4ichip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sun4ichip->base))
return PTR_ERR(sun4ichip->base);
@@ -451,19 +454,17 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
goto err_bus;
}
- sun4ichip->chip.dev = &pdev->dev;
- sun4ichip->chip.ops = &sun4i_pwm_ops;
- sun4ichip->chip.npwm = sun4ichip->data->npwm;
+ chip->ops = &sun4i_pwm_ops;
spin_lock_init(&sun4ichip->ctrl_lock);
- ret = pwmchip_add(&sun4ichip->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
goto err_pwm_add;
}
- platform_set_drvdata(pdev, sun4ichip);
+ platform_set_drvdata(pdev, chip);
return 0;
@@ -477,9 +478,10 @@ err_bus:
static void sun4i_pwm_remove(struct platform_device *pdev)
{
- struct sun4i_pwm_chip *sun4ichip = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct sun4i_pwm_chip *sun4ichip = to_sun4i_pwm_chip(chip);
- pwmchip_remove(&sun4ichip->chip);
+ pwmchip_remove(chip);
clk_disable_unprepare(sun4ichip->bus_clk);
reset_control_assert(sun4ichip->rst);
diff --git a/drivers/pwm/pwm-sunplus.c b/drivers/pwm/pwm-sunplus.c
index 773e2f80526e..b342b843247b 100644
--- a/drivers/pwm/pwm-sunplus.c
+++ b/drivers/pwm/pwm-sunplus.c
@@ -43,14 +43,13 @@
#define SP7021_PWM_NUM 4
struct sunplus_pwm {
- struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
static inline struct sunplus_pwm *to_sunplus_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct sunplus_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int sunplus_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -175,12 +174,14 @@ static void sunplus_pwm_clk_release(void *data)
static int sunplus_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pwm_chip *chip;
struct sunplus_pwm *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, SP7021_PWM_NUM, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = to_sunplus_pwm(chip);
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -203,11 +204,9 @@ static int sunplus_pwm_probe(struct platform_device *pdev)
return ret;
}
- priv->chip.dev = dev;
- priv->chip.ops = &sunplus_pwm_ops;
- priv->chip.npwm = SP7021_PWM_NUM;
+ chip->ops = &sunplus_pwm_ops;
- ret = devm_pwmchip_add(dev, &priv->chip);
+ ret = devm_pwmchip_add(dev, chip);
if (ret < 0)
return dev_err_probe(dev, ret, "Cannot register sunplus PWM\n");
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 82ee2f0754f9..a3d69976148f 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -65,9 +65,6 @@ struct tegra_pwm_soc {
};
struct tegra_pwm_chip {
- struct pwm_chip chip;
- struct device *dev;
-
struct clk *clk;
struct reset_control*rst;
@@ -81,7 +78,7 @@ struct tegra_pwm_chip {
static inline struct tegra_pwm_chip *to_tegra_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct tegra_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline u32 pwm_readl(struct tegra_pwm_chip *pc, unsigned int offset)
@@ -158,7 +155,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
*/
required_clk_rate *= 2;
- err = dev_pm_opp_set_rate(pc->dev, required_clk_rate);
+ err = dev_pm_opp_set_rate(pwmchip_parent(chip), required_clk_rate);
if (err < 0)
return -EINVAL;
@@ -194,7 +191,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* before writing the register. Otherwise, keep it enabled.
*/
if (!pwm_is_enabled(pwm)) {
- err = pm_runtime_resume_and_get(pc->dev);
+ err = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (err)
return err;
} else
@@ -206,7 +203,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* If the PWM is not enabled, turn the clock off again to save power.
*/
if (!pwm_is_enabled(pwm))
- pm_runtime_put(pc->dev);
+ pm_runtime_put(pwmchip_parent(chip));
return 0;
}
@@ -217,7 +214,7 @@ static int tegra_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
int rc = 0;
u32 val;
- rc = pm_runtime_resume_and_get(pc->dev);
+ rc = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (rc)
return rc;
@@ -237,7 +234,7 @@ static void tegra_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val &= ~PWM_ENABLE;
pwm_writel(pc, pwm->hwpwm, val);
- pm_runtime_put_sync(pc->dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
static int tegra_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -272,21 +269,25 @@ static const struct pwm_ops tegra_pwm_ops = {
static int tegra_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct tegra_pwm_chip *pc;
+ const struct tegra_pwm_soc *soc;
int ret;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ soc = of_device_get_match_data(&pdev->dev);
+
+ chip = devm_pwmchip_alloc(&pdev->dev, soc->num_channels, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_tegra_pwm_chip(chip);
- pc->soc = of_device_get_match_data(&pdev->dev);
- pc->dev = &pdev->dev;
+ pc->soc = soc;
pc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->regs))
return PTR_ERR(pc->regs);
- platform_set_drvdata(pdev, pc);
+ platform_set_drvdata(pdev, chip);
pc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pc->clk))
@@ -302,7 +303,7 @@ static int tegra_pwm_probe(struct platform_device *pdev)
return ret;
/* Set maximum frequency of the IP */
- ret = dev_pm_opp_set_rate(pc->dev, pc->soc->max_frequency);
+ ret = dev_pm_opp_set_rate(&pdev->dev, pc->soc->max_frequency);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to set max frequency: %d\n", ret);
goto put_pm;
@@ -328,11 +329,9 @@ static int tegra_pwm_probe(struct platform_device *pdev)
reset_control_deassert(pc->rst);
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &tegra_pwm_ops;
- pc->chip.npwm = pc->soc->num_channels;
+ chip->ops = &tegra_pwm_ops;
- ret = pwmchip_add(&pc->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
reset_control_assert(pc->rst);
@@ -350,9 +349,10 @@ put_pm:
static void tegra_pwm_remove(struct platform_device *pdev)
{
- struct tegra_pwm_chip *pc = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
- pwmchip_remove(&pc->chip);
+ pwmchip_remove(chip);
reset_control_assert(pc->rst);
@@ -361,7 +361,8 @@ static void tegra_pwm_remove(struct platform_device *pdev)
static int __maybe_unused tegra_pwm_runtime_suspend(struct device *dev)
{
- struct tegra_pwm_chip *pc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
int err;
clk_disable_unprepare(pc->clk);
@@ -377,7 +378,8 @@ static int __maybe_unused tegra_pwm_runtime_suspend(struct device *dev)
static int __maybe_unused tegra_pwm_runtime_resume(struct device *dev)
{
- struct tegra_pwm_chip *pc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
int err;
err = pinctrl_pm_select_default_state(dev);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index d974f4414ac9..d6c2b1b1387e 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -32,7 +32,6 @@ struct ecap_context {
};
struct ecap_pwm_chip {
- struct pwm_chip chip;
unsigned int clk_rate;
void __iomem *mmio_base;
struct ecap_context ctx;
@@ -40,7 +39,7 @@ struct ecap_pwm_chip {
static inline struct ecap_pwm_chip *to_ecap_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct ecap_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -70,7 +69,7 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
duty_cycles = (u32)c;
}
- pm_runtime_get_sync(pc->chip.dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
value = readw(pc->mmio_base + ECCTL2);
@@ -100,7 +99,7 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
writew(value, pc->mmio_base + ECCTL2);
}
- pm_runtime_put_sync(pc->chip.dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
return 0;
}
@@ -111,7 +110,7 @@ static int ecap_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip);
u16 value;
- pm_runtime_get_sync(pc->chip.dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
value = readw(pc->mmio_base + ECCTL2);
@@ -124,7 +123,7 @@ static int ecap_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
writew(value, pc->mmio_base + ECCTL2);
- pm_runtime_put_sync(pc->chip.dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
return 0;
}
@@ -135,7 +134,7 @@ static int ecap_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
u16 value;
/* Leave clock enabled on enabling PWM */
- pm_runtime_get_sync(pc->chip.dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
/*
* Enable 'Free run Time stamp counter mode' to start counter
@@ -162,7 +161,7 @@ static void ecap_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
writew(value, pc->mmio_base + ECCTL2);
/* Disable clock on PWM disable */
- pm_runtime_put_sync(pc->chip.dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
static int ecap_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -218,12 +217,14 @@ static int ecap_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct ecap_pwm_chip *pc;
+ struct pwm_chip *chip;
struct clk *clk;
int ret;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_ecap_pwm_chip(chip);
clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(clk)) {
@@ -244,21 +245,19 @@ static int ecap_pwm_probe(struct platform_device *pdev)
return -EINVAL;
}
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &ecap_pwm_ops;
- pc->chip.npwm = 1;
+ chip->ops = &ecap_pwm_ops;
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
- ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, pc);
+ platform_set_drvdata(pdev, chip);
pm_runtime_enable(&pdev->dev);
return 0;
@@ -269,17 +268,21 @@ static void ecap_pwm_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static void ecap_pwm_save_context(struct ecap_pwm_chip *pc)
+static void ecap_pwm_save_context(struct pwm_chip *chip)
{
- pm_runtime_get_sync(pc->chip.dev);
+ struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip);
+
+ pm_runtime_get_sync(pwmchip_parent(chip));
pc->ctx.ecctl2 = readw(pc->mmio_base + ECCTL2);
pc->ctx.cap4 = readl(pc->mmio_base + CAP4);
pc->ctx.cap3 = readl(pc->mmio_base + CAP3);
- pm_runtime_put_sync(pc->chip.dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
-static void ecap_pwm_restore_context(struct ecap_pwm_chip *pc)
+static void ecap_pwm_restore_context(struct pwm_chip *chip)
{
+ struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip);
+
writel(pc->ctx.cap3, pc->mmio_base + CAP3);
writel(pc->ctx.cap4, pc->mmio_base + CAP4);
writew(pc->ctx.ecctl2, pc->mmio_base + ECCTL2);
@@ -287,10 +290,10 @@ static void ecap_pwm_restore_context(struct ecap_pwm_chip *pc)
static int ecap_pwm_suspend(struct device *dev)
{
- struct ecap_pwm_chip *pc = dev_get_drvdata(dev);
- struct pwm_device *pwm = pc->chip.pwms;
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct pwm_device *pwm = chip->pwms;
- ecap_pwm_save_context(pc);
+ ecap_pwm_save_context(chip);
/* Disable explicitly if PWM is running */
if (pwm_is_enabled(pwm))
@@ -301,14 +304,14 @@ static int ecap_pwm_suspend(struct device *dev)
static int ecap_pwm_resume(struct device *dev)
{
- struct ecap_pwm_chip *pc = dev_get_drvdata(dev);
- struct pwm_device *pwm = pc->chip.pwms;
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct pwm_device *pwm = chip->pwms;
/* Enable explicitly if PWM was running */
if (pwm_is_enabled(pwm))
pm_runtime_get_sync(dev);
- ecap_pwm_restore_context(pc);
+ ecap_pwm_restore_context(chip);
return 0;
}
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index af231fa74fa9..e5104725d9b7 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -105,7 +105,6 @@ struct ehrpwm_context {
};
struct ehrpwm_pwm_chip {
- struct pwm_chip chip;
unsigned long clk_rate;
void __iomem *mmio_base;
unsigned long period_cycles[NUM_PWM_CHANNEL];
@@ -116,7 +115,7 @@ struct ehrpwm_pwm_chip {
static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct ehrpwm_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline u16 ehrpwm_read(void __iomem *base, unsigned int offset)
@@ -256,7 +255,7 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (i == pwm->hwpwm)
continue;
- dev_err(chip->dev,
+ dev_err(pwmchip_parent(chip),
"period value conflicts with channel %u\n",
i);
return -EINVAL;
@@ -268,11 +267,11 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
/* Configure clock prescaler to support Low frequency PWM wave */
if (set_prescale_div(period_cycles/PERIOD_MAX, &ps_divval,
&tb_divval)) {
- dev_err(chip->dev, "Unsupported values\n");
+ dev_err(pwmchip_parent(chip), "Unsupported values\n");
return -EINVAL;
}
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
/* Update clock prescaler values */
ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval);
@@ -299,7 +298,7 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles);
- pm_runtime_put_sync(chip->dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
return 0;
}
@@ -323,7 +322,7 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
int ret;
/* Leave clock enabled on enabling PWM */
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
/* Disabling Action Qualifier on PWM output */
if (pwm->hwpwm) {
@@ -346,8 +345,8 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Enable TBCLK */
ret = clk_enable(pc->tbclk);
if (ret) {
- dev_err(chip->dev, "Failed to enable TBCLK for %s: %d\n",
- dev_name(pc->chip.dev), ret);
+ dev_err(pwmchip_parent(chip), "Failed to enable TBCLK for %s: %d\n",
+ dev_name(pwmchip_parent(chip)), ret);
return ret;
}
@@ -385,7 +384,7 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
clk_disable(pc->tbclk);
/* Disable clock on PWM disable */
- pm_runtime_put_sync(chip->dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -393,8 +392,8 @@ static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
if (pwm_is_enabled(pwm)) {
- dev_warn(chip->dev, "Removing PWM device without disabling\n");
- pm_runtime_put_sync(chip->dev);
+ dev_warn(pwmchip_parent(chip), "Removing PWM device without disabling\n");
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
/* set period value to zero on free */
@@ -450,12 +449,14 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct ehrpwm_pwm_chip *pc;
+ struct pwm_chip *chip;
struct clk *clk;
int ret;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, NUM_PWM_CHANNEL, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_ehrpwm_pwm_chip(chip);
clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(clk)) {
@@ -474,9 +475,7 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
return -EINVAL;
}
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &ehrpwm_pwm_ops;
- pc->chip.npwm = NUM_PWM_CHANNEL;
+ chip->ops = &ehrpwm_pwm_ops;
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
@@ -493,13 +492,13 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
return ret;
}
- ret = pwmchip_add(&pc->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
goto err_clk_unprepare;
}
- platform_set_drvdata(pdev, pc);
+ platform_set_drvdata(pdev, chip);
pm_runtime_enable(&pdev->dev);
return 0;
@@ -512,18 +511,21 @@ err_clk_unprepare:
static void ehrpwm_pwm_remove(struct platform_device *pdev)
{
- struct ehrpwm_pwm_chip *pc = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
- pwmchip_remove(&pc->chip);
+ pwmchip_remove(chip);
clk_unprepare(pc->tbclk);
pm_runtime_disable(&pdev->dev);
}
-static void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc)
+static void ehrpwm_pwm_save_context(struct pwm_chip *chip)
{
- pm_runtime_get_sync(pc->chip.dev);
+ struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
+
+ pm_runtime_get_sync(pwmchip_parent(chip));
pc->ctx.tbctl = ehrpwm_read(pc->mmio_base, TBCTL);
pc->ctx.tbprd = ehrpwm_read(pc->mmio_base, TBPRD);
@@ -534,11 +536,13 @@ static void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc)
pc->ctx.aqsfrc = ehrpwm_read(pc->mmio_base, AQSFRC);
pc->ctx.aqcsfrc = ehrpwm_read(pc->mmio_base, AQCSFRC);
- pm_runtime_put_sync(pc->chip.dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
-static void ehrpwm_pwm_restore_context(struct ehrpwm_pwm_chip *pc)
+static void ehrpwm_pwm_restore_context(struct pwm_chip *chip)
{
+ struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
+
ehrpwm_write(pc->mmio_base, TBPRD, pc->ctx.tbprd);
ehrpwm_write(pc->mmio_base, CMPA, pc->ctx.cmpa);
ehrpwm_write(pc->mmio_base, CMPB, pc->ctx.cmpb);
@@ -551,13 +555,13 @@ static void ehrpwm_pwm_restore_context(struct ehrpwm_pwm_chip *pc)
static int ehrpwm_pwm_suspend(struct device *dev)
{
- struct ehrpwm_pwm_chip *pc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
unsigned int i;
- ehrpwm_pwm_save_context(pc);
+ ehrpwm_pwm_save_context(chip);
- for (i = 0; i < pc->chip.npwm; i++) {
- struct pwm_device *pwm = &pc->chip.pwms[i];
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
if (!pwm_is_enabled(pwm))
continue;
@@ -571,11 +575,11 @@ static int ehrpwm_pwm_suspend(struct device *dev)
static int ehrpwm_pwm_resume(struct device *dev)
{
- struct ehrpwm_pwm_chip *pc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
unsigned int i;
- for (i = 0; i < pc->chip.npwm; i++) {
- struct pwm_device *pwm = &pc->chip.pwms[i];
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
if (!pwm_is_enabled(pwm))
continue;
@@ -584,7 +588,7 @@ static int ehrpwm_pwm_resume(struct device *dev)
pm_runtime_get_sync(dev);
}
- ehrpwm_pwm_restore_context(pc);
+ ehrpwm_pwm_restore_context(chip);
return 0;
}
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index c670ccb81653..4b10a8dab312 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -62,13 +62,12 @@
#define TWL6040_LED_MODE_MASK 0x03
struct twl_pwmled_chip {
- struct pwm_chip chip;
struct mutex mutex;
};
static inline struct twl_pwmled_chip *to_twl(struct pwm_chip *chip)
{
- return container_of(chip, struct twl_pwmled_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int twl4030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -100,7 +99,7 @@ static int twl4030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = twl_i2c_write(TWL4030_MODULE_LED, pwm_config, base, 2);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to configure PWM\n", pwm->label);
return ret;
}
@@ -114,7 +113,7 @@ static int twl4030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read LEDEN\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to read LEDEN\n", pwm->label);
goto out;
}
@@ -122,7 +121,7 @@ static int twl4030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL4030_MODULE_LED, val, TWL4030_LEDEN_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -139,7 +138,7 @@ static void twl4030_pwmled_disable(struct pwm_chip *chip,
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read LEDEN\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to read LEDEN\n", pwm->label);
goto out;
}
@@ -147,7 +146,7 @@ static void twl4030_pwmled_disable(struct pwm_chip *chip,
ret = twl_i2c_write_u8(TWL4030_MODULE_LED, val, TWL4030_LEDEN_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -203,7 +202,7 @@ static int twl6030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, on_time,
TWL6030_LED_PWM_CTRL1);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to configure PWM\n", pwm->label);
return ret;
}
@@ -217,7 +216,7 @@ static int twl6030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ dev_err(pwmchip_parent(chip), "%s: Failed to read PWM_CTRL2\n",
pwm->label);
goto out;
}
@@ -227,7 +226,7 @@ static int twl6030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -244,7 +243,7 @@ static void twl6030_pwmled_disable(struct pwm_chip *chip,
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ dev_err(pwmchip_parent(chip), "%s: Failed to read PWM_CTRL2\n",
pwm->label);
goto out;
}
@@ -254,7 +253,7 @@ static void twl6030_pwmled_disable(struct pwm_chip *chip,
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -295,7 +294,7 @@ static int twl6030_pwmled_request(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ dev_err(pwmchip_parent(chip), "%s: Failed to read PWM_CTRL2\n",
pwm->label);
goto out;
}
@@ -305,7 +304,7 @@ static int twl6030_pwmled_request(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to request PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to request PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -321,7 +320,7 @@ static void twl6030_pwmled_free(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ dev_err(pwmchip_parent(chip), "%s: Failed to read PWM_CTRL2\n",
pwm->label);
goto out;
}
@@ -331,7 +330,7 @@ static void twl6030_pwmled_free(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to free PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to free PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -345,25 +344,29 @@ static const struct pwm_ops twl6030_pwmled_ops = {
static int twl_pwmled_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct twl_pwmled_chip *twl;
-
- twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
- if (!twl)
- return -ENOMEM;
+ unsigned int npwm;
+ const struct pwm_ops *ops;
if (twl_class_is_4030()) {
- twl->chip.ops = &twl4030_pwmled_ops;
- twl->chip.npwm = 2;
+ ops = &twl4030_pwmled_ops;
+ npwm = 2;
} else {
- twl->chip.ops = &twl6030_pwmled_ops;
- twl->chip.npwm = 1;
+ ops = &twl6030_pwmled_ops;
+ npwm = 1;
}
- twl->chip.dev = &pdev->dev;
+ chip = devm_pwmchip_alloc(&pdev->dev, npwm, sizeof(*twl));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ twl = to_twl(chip);
+
+ chip->ops = ops;
mutex_init(&twl->mutex);
- return devm_pwmchip_add(&pdev->dev, &twl->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index 68e02c9a6bf9..8f981ffff4b4 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -46,7 +46,6 @@
#define TWL6030_PWM_TOGGLE(pwm, x) ((x) << (pwm * 3))
struct twl_pwm_chip {
- struct pwm_chip chip;
struct mutex mutex;
u8 twl6030_toggle3;
u8 twl4030_pwm_mux;
@@ -54,7 +53,7 @@ struct twl_pwm_chip {
static inline struct twl_pwm_chip *to_twl(struct pwm_chip *chip)
{
- return container_of(chip, struct twl_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int twl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -86,7 +85,7 @@ static int twl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = twl_i2c_write(TWL_MODULE_PWM, pwm_config, base, 2);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to configure PWM\n", pwm->label);
return ret;
}
@@ -100,7 +99,7 @@ static int twl4030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_GPBR1_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read GPBR1\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to read GPBR1\n", pwm->label);
goto out;
}
@@ -108,13 +107,13 @@ static int twl4030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM\n", pwm->label);
val |= TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMX_ENABLE);
ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -130,7 +129,7 @@ static void twl4030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_GPBR1_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read GPBR1\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to read GPBR1\n", pwm->label);
goto out;
}
@@ -138,13 +137,13 @@ static void twl4030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
val &= ~TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMXCLK_ENABLE);
ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -167,7 +166,7 @@ static int twl4030_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_PMBR1_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read PMBR1\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to read PMBR1\n", pwm->label);
goto out;
}
@@ -181,7 +180,7 @@ static int twl4030_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_PMBR1_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to request PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to request PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -202,7 +201,7 @@ static void twl4030_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_PMBR1_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read PMBR1\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to read PMBR1\n", pwm->label);
goto out;
}
@@ -212,7 +211,7 @@ static void twl4030_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_PMBR1_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to free PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to free PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -231,7 +230,7 @@ static int twl6030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM\n", pwm->label);
goto out;
}
@@ -254,7 +253,7 @@ static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
goto out;
}
@@ -262,7 +261,7 @@ static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
goto out;
}
@@ -270,7 +269,7 @@ static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
goto out;
}
@@ -341,23 +340,22 @@ static const struct pwm_ops twl6030_pwm_ops = {
static int twl_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct twl_pwm_chip *twl;
- twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
- if (!twl)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 2, sizeof(*twl));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ twl = to_twl(chip);
if (twl_class_is_4030())
- twl->chip.ops = &twl4030_pwm_ops;
+ chip->ops = &twl4030_pwm_ops;
else
- twl->chip.ops = &twl6030_pwm_ops;
-
- twl->chip.dev = &pdev->dev;
- twl->chip.npwm = 2;
+ chip->ops = &twl6030_pwm_ops;
mutex_init(&twl->mutex);
- return devm_pwmchip_add(&pdev->dev, &twl->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c
index 8d736d558122..9e55380957be 100644
--- a/drivers/pwm/pwm-visconti.c
+++ b/drivers/pwm/pwm-visconti.c
@@ -34,13 +34,12 @@
#define PIPGM_PWMC_POLARITY_MASK GENMASK(5, 5)
struct visconti_pwm_chip {
- struct pwm_chip chip;
void __iomem *base;
};
static inline struct visconti_pwm_chip *visconti_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct visconti_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int visconti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -134,22 +133,22 @@ static const struct pwm_ops visconti_pwm_ops = {
static int visconti_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pwm_chip *chip;
struct visconti_pwm_chip *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, 4, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = visconti_pwm_from_chip(chip);
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->chip.dev = dev;
- priv->chip.ops = &visconti_pwm_ops;
- priv->chip.npwm = 4;
+ chip->ops = &visconti_pwm_ops;
- ret = devm_pwmchip_add(&pdev->dev, &priv->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Cannot register visconti PWM\n");
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 7bfeacee05d0..016c82d65527 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -45,16 +45,19 @@
#define STATUS_ALL_UPDATE 0x0F
struct vt8500_chip {
- struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
-#define to_vt8500_chip(chip) container_of(chip, struct vt8500_chip, chip)
+static inline struct vt8500_chip *to_vt8500_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
-static inline void vt8500_pwm_busy_wait(struct vt8500_chip *vt8500, int nr, u8 bitmask)
+static inline void vt8500_pwm_busy_wait(struct pwm_chip *chip, int nr, u8 bitmask)
{
+ struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
int loops = msecs_to_loops(10);
u32 mask = bitmask << (nr << 8);
@@ -62,7 +65,7 @@ static inline void vt8500_pwm_busy_wait(struct vt8500_chip *vt8500, int nr, u8 b
cpu_relax();
if (unlikely(!loops))
- dev_warn(vt8500->chip.dev, "Waiting for status bits 0x%x to clear timed out\n",
+ dev_warn(pwmchip_parent(chip), "Waiting for status bits 0x%x to clear timed out\n",
mask);
}
@@ -77,7 +80,7 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
err = clk_enable(vt8500->clk);
if (err < 0) {
- dev_err(chip->dev, "failed to enable clock\n");
+ dev_err(pwmchip_parent(chip), "failed to enable clock\n");
return err;
}
@@ -103,18 +106,18 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
dc = div64_u64(c, period_ns);
writel(prescale, vt8500->base + REG_SCALAR(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_SCALAR_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_SCALAR_UPDATE);
writel(pv, vt8500->base + REG_PERIOD(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_PERIOD_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_PERIOD_UPDATE);
writel(dc, vt8500->base + REG_DUTY(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_DUTY_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_DUTY_UPDATE);
val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
val |= CTRL_AUTOLOAD;
writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_CTRL_UPDATE);
clk_disable(vt8500->clk);
return 0;
@@ -128,14 +131,14 @@ static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
err = clk_enable(vt8500->clk);
if (err < 0) {
- dev_err(chip->dev, "failed to enable clock\n");
+ dev_err(pwmchip_parent(chip), "failed to enable clock\n");
return err;
}
val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
val |= CTRL_ENABLE;
writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_CTRL_UPDATE);
return 0;
}
@@ -148,7 +151,7 @@ static void vt8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
val &= ~CTRL_ENABLE;
writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_CTRL_UPDATE);
clk_disable(vt8500->clk);
}
@@ -168,7 +171,7 @@ static int vt8500_pwm_set_polarity(struct pwm_chip *chip,
val &= ~CTRL_INVERT;
writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_CTRL_UPDATE);
return 0;
}
@@ -231,6 +234,7 @@ MODULE_DEVICE_TABLE(of, vt8500_pwm_dt_ids);
static int vt8500_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct vt8500_chip *vt8500;
struct device_node *np = pdev->dev.of_node;
int ret;
@@ -238,13 +242,12 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
if (!np)
return dev_err_probe(&pdev->dev, -EINVAL, "invalid devicetree node\n");
- vt8500 = devm_kzalloc(&pdev->dev, sizeof(*vt8500), GFP_KERNEL);
- if (vt8500 == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, VT8500_NR_PWMS, sizeof(*vt8500));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ vt8500 = to_vt8500_chip(chip);
- vt8500->chip.dev = &pdev->dev;
- vt8500->chip.ops = &vt8500_pwm_ops;
- vt8500->chip.npwm = VT8500_NR_PWMS;
+ chip->ops = &vt8500_pwm_ops;
vt8500->clk = devm_clk_get_prepared(&pdev->dev, NULL);
if (IS_ERR(vt8500->clk))
@@ -254,7 +257,7 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
if (IS_ERR(vt8500->base))
return PTR_ERR(vt8500->base);
- ret = devm_pwmchip_add(&pdev->dev, &vt8500->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-xilinx.c b/drivers/pwm/pwm-xilinx.c
index 5f3c2a6fed11..3a7deebb0d0c 100644
--- a/drivers/pwm/pwm-xilinx.c
+++ b/drivers/pwm/pwm-xilinx.c
@@ -80,15 +80,10 @@ unsigned int xilinx_timer_get_period(struct xilinx_timer_priv *priv,
#define TCSR_PWM_CLEAR (TCSR_MDT | TCSR_LOAD)
#define TCSR_PWM_MASK (TCSR_PWM_SET | TCSR_PWM_CLEAR)
-struct xilinx_pwm_device {
- struct pwm_chip chip;
- struct xilinx_timer_priv priv;
-};
-
static inline struct xilinx_timer_priv
*xilinx_pwm_chip_to_priv(struct pwm_chip *chip)
{
- return &container_of(chip, struct xilinx_pwm_device, chip)->priv;
+ return pwmchip_get_drvdata(chip);
}
static bool xilinx_timer_pwm_enabled(u32 tcsr0, u32 tcsr1)
@@ -214,7 +209,7 @@ static int xilinx_pwm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct xilinx_timer_priv *priv;
- struct xilinx_pwm_device *xilinx_pwm;
+ struct pwm_chip *chip;
u32 pwm_cells, one_timer, width;
void __iomem *regs;
@@ -225,11 +220,11 @@ static int xilinx_pwm_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "could not read #pwm-cells\n");
- xilinx_pwm = devm_kzalloc(dev, sizeof(*xilinx_pwm), GFP_KERNEL);
- if (!xilinx_pwm)
- return -ENOMEM;
- platform_set_drvdata(pdev, xilinx_pwm);
- priv = &xilinx_pwm->priv;
+ chip = devm_pwmchip_alloc(dev, 1, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = xilinx_pwm_chip_to_priv(chip);
+ platform_set_drvdata(pdev, chip);
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
@@ -278,10 +273,8 @@ static int xilinx_pwm_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret, "Clock enable failed\n");
clk_rate_exclusive_get(priv->clk);
- xilinx_pwm->chip.dev = dev;
- xilinx_pwm->chip.ops = &xilinx_pwm_ops;
- xilinx_pwm->chip.npwm = 1;
- ret = pwmchip_add(&xilinx_pwm->chip);
+ chip->ops = &xilinx_pwm_ops;
+ ret = pwmchip_add(chip);
if (ret) {
clk_rate_exclusive_put(priv->clk);
clk_disable_unprepare(priv->clk);
@@ -293,11 +286,12 @@ static int xilinx_pwm_probe(struct platform_device *pdev)
static void xilinx_pwm_remove(struct platform_device *pdev)
{
- struct xilinx_pwm_device *xilinx_pwm = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct xilinx_timer_priv *priv = xilinx_pwm_chip_to_priv(chip);
- pwmchip_remove(&xilinx_pwm->chip);
- clk_rate_exclusive_put(xilinx_pwm->priv.clk);
- clk_disable_unprepare(xilinx_pwm->priv.clk);
+ pwmchip_remove(chip);
+ clk_rate_exclusive_put(priv->clk);
+ clk_disable_unprepare(priv->clk);
}
static const struct of_device_id xilinx_pwm_of_match[] = {
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 1698609d91c8..3f434a771fb5 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -509,10 +509,10 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
* If device_create() fails the pwm_chip is still usable by
* the kernel it's just not exported.
*/
- parent = device_create(&pwm_class, chip->dev, MKDEV(0, 0), chip,
+ parent = device_create(&pwm_class, pwmchip_parent(chip), MKDEV(0, 0), chip,
"pwmchip%d", chip->id);
if (IS_ERR(parent)) {
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
"device_create failed for pwm_chip sysfs export\n");
}
}
diff --git a/drivers/ras/Kconfig b/drivers/ras/Kconfig
index c2a236f2e846..fc4f4bb94a4c 100644
--- a/drivers/ras/Kconfig
+++ b/drivers/ras/Kconfig
@@ -32,5 +32,18 @@ menuconfig RAS
if RAS
source "arch/x86/ras/Kconfig"
+source "drivers/ras/amd/atl/Kconfig"
+
+config RAS_FMPM
+ tristate "FRU Memory Poison Manager"
+ default m
+ depends on AMD_ATL && ACPI_APEI
+ help
+ Support saving and restoring memory error information across reboot
+ using ACPI ERST as persistent storage. Error information is saved with
+ the UEFI CPER "FRU Memory Poison" section format.
+
+ Memory will be retired during boot time and run time depending on
+ platform-specific policies.
endif
diff --git a/drivers/ras/Makefile b/drivers/ras/Makefile
index 6f0404f50107..11f95d59d397 100644
--- a/drivers/ras/Makefile
+++ b/drivers/ras/Makefile
@@ -2,3 +2,6 @@
obj-$(CONFIG_RAS) += ras.o
obj-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_RAS_CEC) += cec.o
+
+obj-$(CONFIG_RAS_FMPM) += amd/fmpm.o
+obj-y += amd/atl/
diff --git a/drivers/ras/amd/atl/Kconfig b/drivers/ras/amd/atl/Kconfig
new file mode 100644
index 000000000000..df49c23e7f62
--- /dev/null
+++ b/drivers/ras/amd/atl/Kconfig
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# AMD Address Translation Library Kconfig
+#
+# Copyright (c) 2023, Advanced Micro Devices, Inc.
+# All Rights Reserved.
+#
+# Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+
+config AMD_ATL
+ tristate "AMD Address Translation Library"
+ depends on AMD_NB && X86_64 && RAS
+ depends on MEMORY_FAILURE
+ default N
+ help
+ This library includes support for implementation-specific
+ address translation procedures needed for various error
+ handling cases.
+
+ Enable this option if using DRAM ECC on Zen-based systems
+ and OS-based error handling.
diff --git a/drivers/ras/amd/atl/Makefile b/drivers/ras/amd/atl/Makefile
new file mode 100644
index 000000000000..4acd5f05bd9c
--- /dev/null
+++ b/drivers/ras/amd/atl/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# AMD Address Translation Library Makefile
+#
+# Copyright (c) 2023, Advanced Micro Devices, Inc.
+# All Rights Reserved.
+#
+# Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+
+amd_atl-y := access.o
+amd_atl-y += core.o
+amd_atl-y += dehash.o
+amd_atl-y += denormalize.o
+amd_atl-y += map.o
+amd_atl-y += system.o
+amd_atl-y += umc.o
+
+obj-$(CONFIG_AMD_ATL) += amd_atl.o
diff --git a/drivers/ras/amd/atl/access.c b/drivers/ras/amd/atl/access.c
new file mode 100644
index 000000000000..ee4661ed28ba
--- /dev/null
+++ b/drivers/ras/amd/atl/access.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * access.c : DF Indirect Access functions
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include "internal.h"
+
+/* Protect the PCI config register pairs used for DF indirect access. */
+static DEFINE_MUTEX(df_indirect_mutex);
+
+/*
+ * Data Fabric Indirect Access uses FICAA/FICAD.
+ *
+ * Fabric Indirect Configuration Access Address (FICAA): constructed based
+ * on the device's Instance Id and the PCI function and register offset of
+ * the desired register.
+ *
+ * Fabric Indirect Configuration Access Data (FICAD): there are FICAD
+ * low and high registers but so far only the low register is needed.
+ *
+ * Use Instance Id 0xFF to indicate a broadcast read.
+ */
+#define DF_BROADCAST 0xFF
+
+#define DF_FICAA_INST_EN BIT(0)
+#define DF_FICAA_REG_NUM GENMASK(10, 1)
+#define DF_FICAA_FUNC_NUM GENMASK(13, 11)
+#define DF_FICAA_INST_ID GENMASK(23, 16)
+
+#define DF_FICAA_REG_NUM_LEGACY GENMASK(10, 2)
+
+static u16 get_accessible_node(u16 node)
+{
+ /*
+ * On heterogeneous systems, not all AMD Nodes are accessible
+ * through software-visible registers. The Node ID needs to be
+ * adjusted for register accesses. But its value should not be
+ * changed for the translation methods.
+ */
+ if (df_cfg.flags.heterogeneous) {
+ /* Only Node 0 is accessible on DF3.5 systems. */
+ if (df_cfg.rev == DF3p5)
+ node = 0;
+
+ /*
+ * Only the first Node in each Socket is accessible on
+ * DF4.5 systems, and this is visible to software as one
+ * Fabric per Socket. The Socket ID can be derived from
+ * the Node ID and global shift values.
+ */
+ if (df_cfg.rev == DF4p5)
+ node >>= df_cfg.socket_id_shift - df_cfg.node_id_shift;
+ }
+
+ return node;
+}
+
+static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
+{
+ u32 ficaa_addr = 0x8C, ficad_addr = 0xB8;
+ struct pci_dev *F4;
+ int err = -ENODEV;
+ u32 ficaa = 0;
+
+ node = get_accessible_node(node);
+ if (node >= amd_nb_num())
+ goto out;
+
+ F4 = node_to_amd_nb(node)->link;
+ if (!F4)
+ goto out;
+
+ /* Enable instance-specific access. */
+ if (instance_id != DF_BROADCAST) {
+ ficaa |= FIELD_PREP(DF_FICAA_INST_EN, 1);
+ ficaa |= FIELD_PREP(DF_FICAA_INST_ID, instance_id);
+ }
+
+ /*
+ * The two least-significant bits are masked when inputing the
+ * register offset to FICAA.
+ */
+ reg >>= 2;
+
+ if (df_cfg.flags.legacy_ficaa) {
+ ficaa_addr = 0x5C;
+ ficad_addr = 0x98;
+
+ ficaa |= FIELD_PREP(DF_FICAA_REG_NUM_LEGACY, reg);
+ } else {
+ ficaa |= FIELD_PREP(DF_FICAA_REG_NUM, reg);
+ }
+
+ ficaa |= FIELD_PREP(DF_FICAA_FUNC_NUM, func);
+
+ mutex_lock(&df_indirect_mutex);
+
+ err = pci_write_config_dword(F4, ficaa_addr, ficaa);
+ if (err) {
+ pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
+ goto out_unlock;
+ }
+
+ err = pci_read_config_dword(F4, ficad_addr, lo);
+ if (err)
+ pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
+
+ pr_debug("node=%u inst=0x%x func=0x%x reg=0x%x val=0x%x",
+ node, instance_id, func, reg << 2, *lo);
+
+out_unlock:
+ mutex_unlock(&df_indirect_mutex);
+
+out:
+ return err;
+}
+
+int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
+{
+ return __df_indirect_read(node, func, reg, instance_id, lo);
+}
+
+int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
+{
+ return __df_indirect_read(node, func, reg, DF_BROADCAST, lo);
+}
diff --git a/drivers/ras/amd/atl/core.c b/drivers/ras/amd/atl/core.c
new file mode 100644
index 000000000000..6dc4e06305f7
--- /dev/null
+++ b/drivers/ras/amd/atl/core.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * core.c : Module init and base translation functions
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include <linux/module.h>
+#include <asm/cpu_device_id.h>
+
+#include "internal.h"
+
+struct df_config df_cfg __read_mostly;
+
+static int addr_over_limit(struct addr_ctx *ctx)
+{
+ u64 dram_limit_addr;
+
+ if (df_cfg.rev >= DF4)
+ dram_limit_addr = FIELD_GET(DF4_DRAM_LIMIT_ADDR, ctx->map.limit);
+ else
+ dram_limit_addr = FIELD_GET(DF2_DRAM_LIMIT_ADDR, ctx->map.limit);
+
+ dram_limit_addr <<= DF_DRAM_BASE_LIMIT_LSB;
+ dram_limit_addr |= GENMASK(DF_DRAM_BASE_LIMIT_LSB - 1, 0);
+
+ /* Is calculated system address above DRAM limit address? */
+ if (ctx->ret_addr > dram_limit_addr) {
+ atl_debug(ctx, "Calculated address (0x%016llx) > DRAM limit (0x%016llx)",
+ ctx->ret_addr, dram_limit_addr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool legacy_hole_en(struct addr_ctx *ctx)
+{
+ u32 reg = ctx->map.base;
+
+ if (df_cfg.rev >= DF4)
+ reg = ctx->map.ctl;
+
+ return FIELD_GET(DF_LEGACY_MMIO_HOLE_EN, reg);
+}
+
+static int add_legacy_hole(struct addr_ctx *ctx)
+{
+ u32 dram_hole_base;
+ u8 func = 0;
+
+ if (!legacy_hole_en(ctx))
+ return 0;
+
+ if (df_cfg.rev >= DF4)
+ func = 7;
+
+ if (df_indirect_read_broadcast(ctx->node_id, func, 0x104, &dram_hole_base))
+ return -EINVAL;
+
+ dram_hole_base &= DF_DRAM_HOLE_BASE_MASK;
+
+ if (ctx->ret_addr >= dram_hole_base)
+ ctx->ret_addr += (BIT_ULL(32) - dram_hole_base);
+
+ return 0;
+}
+
+static u64 get_base_addr(struct addr_ctx *ctx)
+{
+ u64 base_addr;
+
+ if (df_cfg.rev >= DF4)
+ base_addr = FIELD_GET(DF4_BASE_ADDR, ctx->map.base);
+ else
+ base_addr = FIELD_GET(DF2_BASE_ADDR, ctx->map.base);
+
+ return base_addr << DF_DRAM_BASE_LIMIT_LSB;
+}
+
+static int add_base_and_hole(struct addr_ctx *ctx)
+{
+ ctx->ret_addr += get_base_addr(ctx);
+
+ if (add_legacy_hole(ctx))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool late_hole_remove(struct addr_ctx *ctx)
+{
+ if (df_cfg.rev == DF3p5)
+ return true;
+
+ if (df_cfg.rev == DF4)
+ return true;
+
+ if (ctx->map.intlv_mode == DF3_6CHAN)
+ return true;
+
+ return false;
+}
+
+unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsigned long addr)
+{
+ struct addr_ctx ctx;
+
+ if (df_cfg.rev == UNKNOWN)
+ return -EINVAL;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ /* Start from the normalized address */
+ ctx.ret_addr = addr;
+ ctx.inst_id = coh_st_inst_id;
+
+ ctx.inputs.norm_addr = addr;
+ ctx.inputs.socket_id = socket_id;
+ ctx.inputs.die_id = die_id;
+ ctx.inputs.coh_st_inst_id = coh_st_inst_id;
+
+ if (determine_node_id(&ctx, socket_id, die_id))
+ return -EINVAL;
+
+ if (get_address_map(&ctx))
+ return -EINVAL;
+
+ if (denormalize_address(&ctx))
+ return -EINVAL;
+
+ if (!late_hole_remove(&ctx) && add_base_and_hole(&ctx))
+ return -EINVAL;
+
+ if (dehash_address(&ctx))
+ return -EINVAL;
+
+ if (late_hole_remove(&ctx) && add_base_and_hole(&ctx))
+ return -EINVAL;
+
+ if (addr_over_limit(&ctx))
+ return -EINVAL;
+
+ return ctx.ret_addr;
+}
+
+static void check_for_legacy_df_access(void)
+{
+ /*
+ * All Zen-based systems before Family 19h use the legacy
+ * DF Indirect Access (FICAA/FICAD) offsets.
+ */
+ if (boot_cpu_data.x86 < 0x19) {
+ df_cfg.flags.legacy_ficaa = true;
+ return;
+ }
+
+ /* All systems after Family 19h use the current offsets. */
+ if (boot_cpu_data.x86 > 0x19)
+ return;
+
+ /* Some Family 19h systems use the legacy offsets. */
+ switch (boot_cpu_data.x86_model) {
+ case 0x00 ... 0x0f:
+ case 0x20 ... 0x5f:
+ df_cfg.flags.legacy_ficaa = true;
+ }
+}
+
+/*
+ * This library provides functionality for AMD-based systems with a Data Fabric.
+ * The set of systems with a Data Fabric is equivalent to the set of Zen-based systems
+ * and the set of systems with the Scalable MCA feature at this time. However, these
+ * are technically independent things.
+ *
+ * It's possible to match on the PCI IDs of the Data Fabric devices, but this will be
+ * an ever expanding list. Instead, match on the SMCA and Zen features to cover all
+ * relevant systems.
+ */
+static const struct x86_cpu_id amd_atl_cpuids[] = {
+ X86_MATCH_FEATURE(X86_FEATURE_SMCA, NULL),
+ X86_MATCH_FEATURE(X86_FEATURE_ZEN, NULL),
+ { }
+};
+MODULE_DEVICE_TABLE(x86cpu, amd_atl_cpuids);
+
+static int __init amd_atl_init(void)
+{
+ if (!x86_match_cpu(amd_atl_cpuids))
+ return -ENODEV;
+
+ if (!amd_nb_num())
+ return -ENODEV;
+
+ check_for_legacy_df_access();
+
+ if (get_df_system_info())
+ return -ENODEV;
+
+ /* Increment this module's recount so that it can't be easily unloaded. */
+ __module_get(THIS_MODULE);
+ amd_atl_register_decoder(convert_umc_mca_addr_to_sys_addr);
+
+ pr_info("AMD Address Translation Library initialized");
+ return 0;
+}
+
+/*
+ * Exit function is only needed for testing and debug. Module unload must be
+ * forced to override refcount check.
+ */
+static void __exit amd_atl_exit(void)
+{
+ amd_atl_unregister_decoder();
+}
+
+module_init(amd_atl_init);
+module_exit(amd_atl_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/ras/amd/atl/dehash.c b/drivers/ras/amd/atl/dehash.c
new file mode 100644
index 000000000000..4ea46262c4f5
--- /dev/null
+++ b/drivers/ras/amd/atl/dehash.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * dehash.c : Functions to account for hashing bits
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include "internal.h"
+
+/*
+ * Verify the interleave bits are correct in the different interleaving
+ * settings.
+ *
+ * If @num_intlv_dies and/or @num_intlv_sockets are 1, it means the
+ * respective interleaving is disabled.
+ */
+static inline bool map_bits_valid(struct addr_ctx *ctx, u8 bit1, u8 bit2,
+ u8 num_intlv_dies, u8 num_intlv_sockets)
+{
+ if (!(ctx->map.intlv_bit_pos == bit1 || ctx->map.intlv_bit_pos == bit2)) {
+ pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos);
+ return false;
+ }
+
+ if (ctx->map.num_intlv_dies > num_intlv_dies) {
+ pr_debug("Invalid number of interleave dies: %u", ctx->map.num_intlv_dies);
+ return false;
+ }
+
+ if (ctx->map.num_intlv_sockets > num_intlv_sockets) {
+ pr_debug("Invalid number of interleave sockets: %u", ctx->map.num_intlv_sockets);
+ return false;
+ }
+
+ return true;
+}
+
+static int df2_dehash_addr(struct addr_ctx *ctx)
+{
+ u8 hashed_bit, intlv_bit, intlv_bit_pos;
+
+ if (!map_bits_valid(ctx, 8, 9, 1, 1))
+ return -EINVAL;
+
+ intlv_bit_pos = ctx->map.intlv_bit_pos;
+ intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(12), ctx->ret_addr);
+ hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr);
+ hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr);
+ hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr);
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(intlv_bit_pos);
+
+ return 0;
+}
+
+static int df3_dehash_addr(struct addr_ctx *ctx)
+{
+ bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G;
+ u8 hashed_bit, intlv_bit, intlv_bit_pos;
+
+ if (!map_bits_valid(ctx, 8, 9, 1, 1))
+ return -EINVAL;
+
+ hash_ctl_64k = FIELD_GET(DF3_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF3_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF3_HASH_CTL_1G, ctx->map.ctl);
+
+ intlv_bit_pos = ctx->map.intlv_bit_pos;
+ intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(14), ctx->ret_addr);
+ hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(intlv_bit_pos);
+
+ /* Calculation complete for 2 channels. Continue for 4 and 8 channels. */
+ if (ctx->map.intlv_mode == DF3_COD4_2CHAN_HASH)
+ return 0;
+
+ intlv_bit = FIELD_GET(BIT_ULL(12), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(16), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(12);
+
+ /* Calculation complete for 4 channels. Continue for 8 channels. */
+ if (ctx->map.intlv_mode == DF3_COD2_4CHAN_HASH)
+ return 0;
+
+ intlv_bit = FIELD_GET(BIT_ULL(13), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(17), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(13);
+
+ return 0;
+}
+
+static int df3_6chan_dehash_addr(struct addr_ctx *ctx)
+{
+ u8 intlv_bit_pos = ctx->map.intlv_bit_pos;
+ u8 hashed_bit, intlv_bit, num_intlv_bits;
+ bool hash_ctl_2M, hash_ctl_1G;
+
+ if (ctx->map.intlv_mode != DF3_6CHAN) {
+ atl_debug_on_bad_intlv_mode(ctx);
+ return -EINVAL;
+ }
+
+ num_intlv_bits = ilog2(ctx->map.num_intlv_chan) + 1;
+
+ hash_ctl_2M = FIELD_GET(DF3_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF3_HASH_CTL_1G, ctx->map.ctl);
+
+ intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= !!(BIT_ULL(intlv_bit_pos + num_intlv_bits) & ctx->ret_addr);
+ hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(intlv_bit_pos);
+
+ intlv_bit_pos++;
+ intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(intlv_bit_pos);
+
+ intlv_bit_pos++;
+ intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(intlv_bit_pos);
+
+ return 0;
+}
+
+static int df4_dehash_addr(struct addr_ctx *ctx)
+{
+ bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G;
+ u8 hashed_bit, intlv_bit;
+
+ if (!map_bits_valid(ctx, 8, 8, 1, 2))
+ return -EINVAL;
+
+ hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
+
+ intlv_bit = FIELD_GET(BIT_ULL(8), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(16), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G;
+
+ if (ctx->map.num_intlv_sockets == 1)
+ hashed_bit ^= FIELD_GET(BIT_ULL(14), ctx->ret_addr);
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(8);
+
+ /*
+ * Hashing is possible with socket interleaving, so check the total number
+ * of channels in the system rather than DRAM map interleaving mode.
+ *
+ * Calculation complete for 2 channels. Continue for 4, 8, and 16 channels.
+ */
+ if (ctx->map.total_intlv_chan <= 2)
+ return 0;
+
+ intlv_bit = FIELD_GET(BIT_ULL(12), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(17), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(12);
+
+ /* Calculation complete for 4 channels. Continue for 8 and 16 channels. */
+ if (ctx->map.total_intlv_chan <= 4)
+ return 0;
+
+ intlv_bit = FIELD_GET(BIT_ULL(13), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(13);
+
+ /* Calculation complete for 8 channels. Continue for 16 channels. */
+ if (ctx->map.total_intlv_chan <= 8)
+ return 0;
+
+ intlv_bit = FIELD_GET(BIT_ULL(14), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(19), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(24), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(33), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(14);
+
+ return 0;
+}
+
+static int df4p5_dehash_addr(struct addr_ctx *ctx)
+{
+ bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G, hash_ctl_1T;
+ u8 hashed_bit, intlv_bit;
+ u64 rehash_vector;
+
+ if (!map_bits_valid(ctx, 8, 8, 1, 2))
+ return -EINVAL;
+
+ hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
+ hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl);
+
+ /*
+ * Generate a unique address to determine which bits
+ * need to be dehashed.
+ *
+ * Start with a contiguous bitmask for the total
+ * number of channels starting at bit 8.
+ *
+ * Then make a gap in the proper place based on
+ * interleave mode.
+ */
+ rehash_vector = ctx->map.total_intlv_chan - 1;
+ rehash_vector <<= 8;
+
+ if (ctx->map.intlv_mode == DF4p5_NPS2_4CHAN_1K_HASH ||
+ ctx->map.intlv_mode == DF4p5_NPS1_8CHAN_1K_HASH ||
+ ctx->map.intlv_mode == DF4p5_NPS1_16CHAN_1K_HASH)
+ rehash_vector = expand_bits(10, 2, rehash_vector);
+ else
+ rehash_vector = expand_bits(9, 3, rehash_vector);
+
+ if (rehash_vector & BIT_ULL(8)) {
+ intlv_bit = FIELD_GET(BIT_ULL(8), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(16), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(40), ctx->ret_addr) & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(8);
+ }
+
+ if (rehash_vector & BIT_ULL(9)) {
+ intlv_bit = FIELD_GET(BIT_ULL(9), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(17), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(41), ctx->ret_addr) & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(9);
+ }
+
+ if (rehash_vector & BIT_ULL(12)) {
+ intlv_bit = FIELD_GET(BIT_ULL(12), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(42), ctx->ret_addr) & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(12);
+ }
+
+ if (rehash_vector & BIT_ULL(13)) {
+ intlv_bit = FIELD_GET(BIT_ULL(13), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(19), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(24), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(33), ctx->ret_addr) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(43), ctx->ret_addr) & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(13);
+ }
+
+ if (rehash_vector & BIT_ULL(14)) {
+ intlv_bit = FIELD_GET(BIT_ULL(14), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(20), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(25), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(34), ctx->ret_addr) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(44), ctx->ret_addr) & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(14);
+ }
+
+ return 0;
+}
+
+/*
+ * MI300 hash bits
+ * 4K 64K 2M 1G 1T 1T
+ * COH_ST_Select[0] = XOR of addr{8, 12, 15, 22, 29, 36, 43}
+ * COH_ST_Select[1] = XOR of addr{9, 13, 16, 23, 30, 37, 44}
+ * COH_ST_Select[2] = XOR of addr{10, 14, 17, 24, 31, 38, 45}
+ * COH_ST_Select[3] = XOR of addr{11, 18, 25, 32, 39, 46}
+ * COH_ST_Select[4] = XOR of addr{14, 19, 26, 33, 40, 47} aka Stack
+ * DieID[0] = XOR of addr{12, 20, 27, 34, 41 }
+ * DieID[1] = XOR of addr{13, 21, 28, 35, 42 }
+ */
+static int mi300_dehash_addr(struct addr_ctx *ctx)
+{
+ bool hash_ctl_4k, hash_ctl_64k, hash_ctl_2M, hash_ctl_1G, hash_ctl_1T;
+ bool hashed_bit, intlv_bit, test_bit;
+ u8 num_intlv_bits, base_bit, i;
+
+ if (!map_bits_valid(ctx, 8, 8, 4, 1))
+ return -EINVAL;
+
+ hash_ctl_4k = FIELD_GET(DF4p5_HASH_CTL_4K, ctx->map.ctl);
+ hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
+ hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl);
+
+ /* Channel bits */
+ num_intlv_bits = ilog2(ctx->map.num_intlv_chan);
+
+ for (i = 0; i < num_intlv_bits; i++) {
+ base_bit = 8 + i;
+
+ /* COH_ST_Select[4] jumps to a base bit of 14. */
+ if (i == 4)
+ base_bit = 14;
+
+ intlv_bit = BIT_ULL(base_bit) & ctx->ret_addr;
+
+ hashed_bit = intlv_bit;
+
+ /* 4k hash bit only applies to the first 3 bits. */
+ if (i <= 2) {
+ test_bit = BIT_ULL(12 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_4k;
+ }
+
+ /* Use temporary 'test_bit' value to avoid Sparse warnings. */
+ test_bit = BIT_ULL(15 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_64k;
+ test_bit = BIT_ULL(22 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_2M;
+ test_bit = BIT_ULL(29 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1G;
+ test_bit = BIT_ULL(36 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1T;
+ test_bit = BIT_ULL(43 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(base_bit);
+ }
+
+ /* Die bits */
+ num_intlv_bits = ilog2(ctx->map.num_intlv_dies);
+
+ for (i = 0; i < num_intlv_bits; i++) {
+ base_bit = 12 + i;
+
+ intlv_bit = BIT_ULL(base_bit) & ctx->ret_addr;
+
+ hashed_bit = intlv_bit;
+
+ test_bit = BIT_ULL(20 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_64k;
+ test_bit = BIT_ULL(27 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_2M;
+ test_bit = BIT_ULL(34 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1G;
+ test_bit = BIT_ULL(41 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(base_bit);
+ }
+
+ return 0;
+}
+
+int dehash_address(struct addr_ctx *ctx)
+{
+ switch (ctx->map.intlv_mode) {
+ /* No hashing cases. */
+ case NONE:
+ case NOHASH_2CHAN:
+ case NOHASH_4CHAN:
+ case NOHASH_8CHAN:
+ case NOHASH_16CHAN:
+ case NOHASH_32CHAN:
+ /* Hashing bits handled earlier during CS ID calculation. */
+ case DF4_NPS4_3CHAN_HASH:
+ case DF4_NPS2_5CHAN_HASH:
+ case DF4_NPS2_6CHAN_HASH:
+ case DF4_NPS1_10CHAN_HASH:
+ case DF4_NPS1_12CHAN_HASH:
+ case DF4p5_NPS2_6CHAN_1K_HASH:
+ case DF4p5_NPS2_6CHAN_2K_HASH:
+ case DF4p5_NPS1_10CHAN_1K_HASH:
+ case DF4p5_NPS1_10CHAN_2K_HASH:
+ case DF4p5_NPS1_12CHAN_1K_HASH:
+ case DF4p5_NPS1_12CHAN_2K_HASH:
+ case DF4p5_NPS0_24CHAN_1K_HASH:
+ case DF4p5_NPS0_24CHAN_2K_HASH:
+ /* No hash physical address bits, so nothing to do. */
+ case DF4p5_NPS4_3CHAN_1K_HASH:
+ case DF4p5_NPS4_3CHAN_2K_HASH:
+ case DF4p5_NPS2_5CHAN_1K_HASH:
+ case DF4p5_NPS2_5CHAN_2K_HASH:
+ return 0;
+
+ case DF2_2CHAN_HASH:
+ return df2_dehash_addr(ctx);
+
+ case DF3_COD4_2CHAN_HASH:
+ case DF3_COD2_4CHAN_HASH:
+ case DF3_COD1_8CHAN_HASH:
+ return df3_dehash_addr(ctx);
+
+ case DF3_6CHAN:
+ return df3_6chan_dehash_addr(ctx);
+
+ case DF4_NPS4_2CHAN_HASH:
+ case DF4_NPS2_4CHAN_HASH:
+ case DF4_NPS1_8CHAN_HASH:
+ return df4_dehash_addr(ctx);
+
+ case DF4p5_NPS4_2CHAN_1K_HASH:
+ case DF4p5_NPS4_2CHAN_2K_HASH:
+ case DF4p5_NPS2_4CHAN_2K_HASH:
+ case DF4p5_NPS2_4CHAN_1K_HASH:
+ case DF4p5_NPS1_8CHAN_1K_HASH:
+ case DF4p5_NPS1_8CHAN_2K_HASH:
+ case DF4p5_NPS1_16CHAN_1K_HASH:
+ case DF4p5_NPS1_16CHAN_2K_HASH:
+ return df4p5_dehash_addr(ctx);
+
+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
+ return mi300_dehash_addr(ctx);
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return -EINVAL;
+ }
+}
diff --git a/drivers/ras/amd/atl/denormalize.c b/drivers/ras/amd/atl/denormalize.c
new file mode 100644
index 000000000000..e279224288d6
--- /dev/null
+++ b/drivers/ras/amd/atl/denormalize.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * denormalize.c : Functions to account for interleaving bits
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include "internal.h"
+
+/*
+ * Returns the Destination Fabric ID. This is the first (lowest)
+ * COH_ST Fabric ID used within a DRAM Address map.
+ */
+static u16 get_dst_fabric_id(struct addr_ctx *ctx)
+{
+ switch (df_cfg.rev) {
+ case DF2: return FIELD_GET(DF2_DST_FABRIC_ID, ctx->map.limit);
+ case DF3: return FIELD_GET(DF3_DST_FABRIC_ID, ctx->map.limit);
+ case DF3p5: return FIELD_GET(DF3p5_DST_FABRIC_ID, ctx->map.limit);
+ case DF4: return FIELD_GET(DF4_DST_FABRIC_ID, ctx->map.ctl);
+ case DF4p5: return FIELD_GET(DF4p5_DST_FABRIC_ID, ctx->map.ctl);
+ default:
+ atl_debug_on_bad_df_rev();
+ return 0;
+ }
+}
+
+/*
+ * Make a contiguous gap in address for N bits starting at bit P.
+ *
+ * Example:
+ * address bits: [20:0]
+ * # of interleave bits (n): 3
+ * starting interleave bit (p): 8
+ *
+ * expanded address bits: [20+n : n+p][n+p-1 : p][p-1 : 0]
+ * [23 : 11][10 : 8][7 : 0]
+ */
+static u64 make_space_for_coh_st_id_at_intlv_bit(struct addr_ctx *ctx)
+{
+ return expand_bits(ctx->map.intlv_bit_pos,
+ ctx->map.total_intlv_bits,
+ ctx->ret_addr);
+}
+
+/*
+ * Make two gaps in address for N bits.
+ * First gap is a single bit at bit P.
+ * Second gap is the remaining N-1 bits at bit 12.
+ *
+ * Example:
+ * address bits: [20:0]
+ * # of interleave bits (n): 3
+ * starting interleave bit (p): 8
+ *
+ * First gap
+ * expanded address bits: [20+1 : p+1][p][p-1 : 0]
+ * [21 : 9][8][7 : 0]
+ *
+ * Second gap uses result from first.
+ * r = n - 1; remaining interleave bits
+ * expanded address bits: [21+r : 12+r][12+r-1: 12][11 : 0]
+ * [23 : 14][13 : 12][11 : 0]
+ */
+static u64 make_space_for_coh_st_id_split_2_1(struct addr_ctx *ctx)
+{
+ /* Make a single space at the interleave bit. */
+ u64 denorm_addr = expand_bits(ctx->map.intlv_bit_pos, 1, ctx->ret_addr);
+
+ /* Done if there's only a single interleave bit. */
+ if (ctx->map.total_intlv_bits <= 1)
+ return denorm_addr;
+
+ /* Make spaces for the remaining interleave bits starting at bit 12. */
+ return expand_bits(12, ctx->map.total_intlv_bits - 1, denorm_addr);
+}
+
+/*
+ * Make space for CS ID at bits [14:8] as follows:
+ *
+ * 8 channels -> bits [10:8]
+ * 16 channels -> bits [11:8]
+ * 32 channels -> bits [14,11:8]
+ *
+ * 1 die -> N/A
+ * 2 dies -> bit [12]
+ * 4 dies -> bits [13:12]
+ */
+static u64 make_space_for_coh_st_id_mi300(struct addr_ctx *ctx)
+{
+ u8 num_intlv_bits = ilog2(ctx->map.num_intlv_chan);
+ u64 denorm_addr;
+
+ if (ctx->map.intlv_bit_pos != 8) {
+ pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos);
+ return ~0ULL;
+ }
+
+ /* Channel bits. Covers up to 4 bits at [11:8]. */
+ denorm_addr = expand_bits(8, min(num_intlv_bits, 4), ctx->ret_addr);
+
+ /* Die bits. Always starts at [12]. */
+ denorm_addr = expand_bits(12, ilog2(ctx->map.num_intlv_dies), denorm_addr);
+
+ /* Additional channel bit at [14]. */
+ if (num_intlv_bits > 4)
+ denorm_addr = expand_bits(14, 1, denorm_addr);
+
+ return denorm_addr;
+}
+
+/*
+ * Take the current calculated address and shift enough bits in the middle
+ * to make a gap where the interleave bits will be inserted.
+ */
+static u64 make_space_for_coh_st_id(struct addr_ctx *ctx)
+{
+ switch (ctx->map.intlv_mode) {
+ case NOHASH_2CHAN:
+ case NOHASH_4CHAN:
+ case NOHASH_8CHAN:
+ case NOHASH_16CHAN:
+ case NOHASH_32CHAN:
+ case DF2_2CHAN_HASH:
+ return make_space_for_coh_st_id_at_intlv_bit(ctx);
+
+ case DF3_COD4_2CHAN_HASH:
+ case DF3_COD2_4CHAN_HASH:
+ case DF3_COD1_8CHAN_HASH:
+ case DF4_NPS4_2CHAN_HASH:
+ case DF4_NPS2_4CHAN_HASH:
+ case DF4_NPS1_8CHAN_HASH:
+ case DF4p5_NPS4_2CHAN_1K_HASH:
+ case DF4p5_NPS4_2CHAN_2K_HASH:
+ case DF4p5_NPS2_4CHAN_2K_HASH:
+ case DF4p5_NPS1_8CHAN_2K_HASH:
+ case DF4p5_NPS1_16CHAN_2K_HASH:
+ return make_space_for_coh_st_id_split_2_1(ctx);
+
+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
+ return make_space_for_coh_st_id_mi300(ctx);
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return ~0ULL;
+ }
+}
+
+static u16 get_coh_st_id_df2(struct addr_ctx *ctx)
+{
+ u8 num_socket_intlv_bits = ilog2(ctx->map.num_intlv_sockets);
+ u8 num_die_intlv_bits = ilog2(ctx->map.num_intlv_dies);
+ u8 num_intlv_bits;
+ u16 coh_st_id, mask;
+
+ coh_st_id = ctx->coh_st_fabric_id - get_dst_fabric_id(ctx);
+
+ /* Channel interleave bits */
+ num_intlv_bits = order_base_2(ctx->map.num_intlv_chan);
+ mask = GENMASK(num_intlv_bits - 1, 0);
+ coh_st_id &= mask;
+
+ /* Die interleave bits */
+ if (num_die_intlv_bits) {
+ u16 die_bits;
+
+ mask = GENMASK(num_die_intlv_bits - 1, 0);
+ die_bits = ctx->coh_st_fabric_id & df_cfg.die_id_mask;
+ die_bits >>= df_cfg.die_id_shift;
+
+ coh_st_id |= (die_bits & mask) << num_intlv_bits;
+ num_intlv_bits += num_die_intlv_bits;
+ }
+
+ /* Socket interleave bits */
+ if (num_socket_intlv_bits) {
+ u16 socket_bits;
+
+ mask = GENMASK(num_socket_intlv_bits - 1, 0);
+ socket_bits = ctx->coh_st_fabric_id & df_cfg.socket_id_mask;
+ socket_bits >>= df_cfg.socket_id_shift;
+
+ coh_st_id |= (socket_bits & mask) << num_intlv_bits;
+ }
+
+ return coh_st_id;
+}
+
+static u16 get_coh_st_id_df4(struct addr_ctx *ctx)
+{
+ /*
+ * Start with the original component mask and the number of interleave
+ * bits for the channels in this map.
+ */
+ u8 num_intlv_bits = ilog2(ctx->map.num_intlv_chan);
+ u16 mask = df_cfg.component_id_mask;
+
+ u16 socket_bits;
+
+ /* Set the derived Coherent Station ID to the input Coherent Station Fabric ID. */
+ u16 coh_st_id = ctx->coh_st_fabric_id & mask;
+
+ /*
+ * Subtract the "base" Destination Fabric ID.
+ * This accounts for systems with disabled Coherent Stations.
+ */
+ coh_st_id -= get_dst_fabric_id(ctx) & mask;
+
+ /*
+ * Generate and use a new mask based on the number of bits
+ * needed for channel interleaving in this map.
+ */
+ mask = GENMASK(num_intlv_bits - 1, 0);
+ coh_st_id &= mask;
+
+ /* Done if socket interleaving is not enabled. */
+ if (ctx->map.num_intlv_sockets <= 1)
+ return coh_st_id;
+
+ /*
+ * Figure out how many bits are needed for the number of
+ * interleaved sockets. And shift the derived Coherent Station ID to account
+ * for these.
+ */
+ num_intlv_bits = ilog2(ctx->map.num_intlv_sockets);
+ coh_st_id <<= num_intlv_bits;
+
+ /* Generate a new mask for the socket interleaving bits. */
+ mask = GENMASK(num_intlv_bits - 1, 0);
+
+ /* Get the socket interleave bits from the original Coherent Station Fabric ID. */
+ socket_bits = (ctx->coh_st_fabric_id & df_cfg.socket_id_mask) >> df_cfg.socket_id_shift;
+
+ /* Apply the appropriate socket bits to the derived Coherent Station ID. */
+ coh_st_id |= socket_bits & mask;
+
+ return coh_st_id;
+}
+
+/*
+ * MI300 hash has:
+ * (C)hannel[3:0] = coh_st_id[3:0]
+ * (S)tack[0] = coh_st_id[4]
+ * (D)ie[1:0] = coh_st_id[6:5]
+ *
+ * Hashed coh_st_id is swizzled so that Stack bit is at the end.
+ * coh_st_id = SDDCCCC
+ */
+static u16 get_coh_st_id_mi300(struct addr_ctx *ctx)
+{
+ u8 channel_bits, die_bits, stack_bit;
+ u16 die_id;
+
+ /* Subtract the "base" Destination Fabric ID. */
+ ctx->coh_st_fabric_id -= get_dst_fabric_id(ctx);
+
+ die_id = (ctx->coh_st_fabric_id & df_cfg.die_id_mask) >> df_cfg.die_id_shift;
+
+ channel_bits = FIELD_GET(GENMASK(3, 0), ctx->coh_st_fabric_id);
+ stack_bit = FIELD_GET(BIT(4), ctx->coh_st_fabric_id) << 6;
+ die_bits = die_id << 4;
+
+ return stack_bit | die_bits | channel_bits;
+}
+
+/*
+ * Derive the correct Coherent Station ID that represents the interleave bits
+ * used within the system physical address. This accounts for the
+ * interleave mode, number of interleaved channels/dies/sockets, and
+ * other system/mode-specific bit swizzling.
+ *
+ * Returns: Coherent Station ID on success.
+ * All bits set on error.
+ */
+static u16 calculate_coh_st_id(struct addr_ctx *ctx)
+{
+ switch (ctx->map.intlv_mode) {
+ case NOHASH_2CHAN:
+ case NOHASH_4CHAN:
+ case NOHASH_8CHAN:
+ case NOHASH_16CHAN:
+ case NOHASH_32CHAN:
+ case DF3_COD4_2CHAN_HASH:
+ case DF3_COD2_4CHAN_HASH:
+ case DF3_COD1_8CHAN_HASH:
+ case DF2_2CHAN_HASH:
+ return get_coh_st_id_df2(ctx);
+
+ case DF4_NPS4_2CHAN_HASH:
+ case DF4_NPS2_4CHAN_HASH:
+ case DF4_NPS1_8CHAN_HASH:
+ case DF4p5_NPS4_2CHAN_1K_HASH:
+ case DF4p5_NPS4_2CHAN_2K_HASH:
+ case DF4p5_NPS2_4CHAN_2K_HASH:
+ case DF4p5_NPS1_8CHAN_2K_HASH:
+ case DF4p5_NPS1_16CHAN_2K_HASH:
+ return get_coh_st_id_df4(ctx);
+
+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
+ return get_coh_st_id_mi300(ctx);
+
+ /* COH_ST ID is simply the COH_ST Fabric ID adjusted by the Destination Fabric ID. */
+ case DF4p5_NPS2_4CHAN_1K_HASH:
+ case DF4p5_NPS1_8CHAN_1K_HASH:
+ case DF4p5_NPS1_16CHAN_1K_HASH:
+ return ctx->coh_st_fabric_id - get_dst_fabric_id(ctx);
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return ~0;
+ }
+}
+
+static u64 insert_coh_st_id_at_intlv_bit(struct addr_ctx *ctx, u64 denorm_addr, u16 coh_st_id)
+{
+ return denorm_addr | (coh_st_id << ctx->map.intlv_bit_pos);
+}
+
+static u64 insert_coh_st_id_split_2_1(struct addr_ctx *ctx, u64 denorm_addr, u16 coh_st_id)
+{
+ /* Insert coh_st_id[0] at the interleave bit. */
+ denorm_addr |= (coh_st_id & BIT(0)) << ctx->map.intlv_bit_pos;
+
+ /* Insert coh_st_id[2:1] at bit 12. */
+ denorm_addr |= (coh_st_id & GENMASK(2, 1)) << 11;
+
+ return denorm_addr;
+}
+
+static u64 insert_coh_st_id_split_2_2(struct addr_ctx *ctx, u64 denorm_addr, u16 coh_st_id)
+{
+ /* Insert coh_st_id[1:0] at bit 8. */
+ denorm_addr |= (coh_st_id & GENMASK(1, 0)) << 8;
+
+ /*
+ * Insert coh_st_id[n:2] at bit 12. 'n' could be 2 or 3.
+ * Grab both because bit 3 will be clear if unused.
+ */
+ denorm_addr |= (coh_st_id & GENMASK(3, 2)) << 10;
+
+ return denorm_addr;
+}
+
+static u64 insert_coh_st_id(struct addr_ctx *ctx, u64 denorm_addr, u16 coh_st_id)
+{
+ switch (ctx->map.intlv_mode) {
+ case NOHASH_2CHAN:
+ case NOHASH_4CHAN:
+ case NOHASH_8CHAN:
+ case NOHASH_16CHAN:
+ case NOHASH_32CHAN:
+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
+ case DF2_2CHAN_HASH:
+ return insert_coh_st_id_at_intlv_bit(ctx, denorm_addr, coh_st_id);
+
+ case DF3_COD4_2CHAN_HASH:
+ case DF3_COD2_4CHAN_HASH:
+ case DF3_COD1_8CHAN_HASH:
+ case DF4_NPS4_2CHAN_HASH:
+ case DF4_NPS2_4CHAN_HASH:
+ case DF4_NPS1_8CHAN_HASH:
+ case DF4p5_NPS4_2CHAN_1K_HASH:
+ case DF4p5_NPS4_2CHAN_2K_HASH:
+ case DF4p5_NPS2_4CHAN_2K_HASH:
+ case DF4p5_NPS1_8CHAN_2K_HASH:
+ case DF4p5_NPS1_16CHAN_2K_HASH:
+ return insert_coh_st_id_split_2_1(ctx, denorm_addr, coh_st_id);
+
+ case DF4p5_NPS2_4CHAN_1K_HASH:
+ case DF4p5_NPS1_8CHAN_1K_HASH:
+ case DF4p5_NPS1_16CHAN_1K_HASH:
+ return insert_coh_st_id_split_2_2(ctx, denorm_addr, coh_st_id);
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return ~0ULL;
+ }
+}
+
+/*
+ * MI300 systems have a fixed, hardware-defined physical-to-logical
+ * Coherent Station mapping. The Remap registers are not used.
+ */
+static const u16 phy_to_log_coh_st_map_mi300[] = {
+ 12, 13, 14, 15,
+ 8, 9, 10, 11,
+ 4, 5, 6, 7,
+ 0, 1, 2, 3,
+ 28, 29, 30, 31,
+ 24, 25, 26, 27,
+ 20, 21, 22, 23,
+ 16, 17, 18, 19,
+};
+
+static u16 get_logical_coh_st_fabric_id_mi300(struct addr_ctx *ctx)
+{
+ if (ctx->inst_id >= ARRAY_SIZE(phy_to_log_coh_st_map_mi300)) {
+ atl_debug(ctx, "Instance ID out of range");
+ return ~0;
+ }
+
+ return phy_to_log_coh_st_map_mi300[ctx->inst_id] | (ctx->node_id << df_cfg.node_id_shift);
+}
+
+static u16 get_logical_coh_st_fabric_id(struct addr_ctx *ctx)
+{
+ u16 component_id, log_fabric_id;
+
+ /* Start with the physical COH_ST Fabric ID. */
+ u16 phys_fabric_id = ctx->coh_st_fabric_id;
+
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ return get_logical_coh_st_fabric_id_mi300(ctx);
+
+ /* Skip logical ID lookup if remapping is disabled. */
+ if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl) &&
+ ctx->map.intlv_mode != DF3_6CHAN)
+ return phys_fabric_id;
+
+ /* Mask off the Node ID bits to get the "local" Component ID. */
+ component_id = phys_fabric_id & df_cfg.component_id_mask;
+
+ /*
+ * Search the list of logical Component IDs for the one that
+ * matches this physical Component ID.
+ */
+ for (log_fabric_id = 0; log_fabric_id < MAX_COH_ST_CHANNELS; log_fabric_id++) {
+ if (ctx->map.remap_array[log_fabric_id] == component_id)
+ break;
+ }
+
+ if (log_fabric_id == MAX_COH_ST_CHANNELS)
+ atl_debug(ctx, "COH_ST remap entry not found for 0x%x",
+ log_fabric_id);
+
+ /* Get the Node ID bits from the physical and apply to the logical. */
+ return (phys_fabric_id & df_cfg.node_id_mask) | log_fabric_id;
+}
+
+static int denorm_addr_common(struct addr_ctx *ctx)
+{
+ u64 denorm_addr;
+ u16 coh_st_id;
+
+ /*
+ * Convert the original physical COH_ST Fabric ID to a logical value.
+ * This is required for non-power-of-two and other interleaving modes.
+ */
+ ctx->coh_st_fabric_id = get_logical_coh_st_fabric_id(ctx);
+
+ denorm_addr = make_space_for_coh_st_id(ctx);
+ coh_st_id = calculate_coh_st_id(ctx);
+ ctx->ret_addr = insert_coh_st_id(ctx, denorm_addr, coh_st_id);
+ return 0;
+}
+
+static int denorm_addr_df3_6chan(struct addr_ctx *ctx)
+{
+ u16 coh_st_id = ctx->coh_st_fabric_id & df_cfg.component_id_mask;
+ u8 total_intlv_bits = ctx->map.total_intlv_bits;
+ u8 low_bit, intlv_bit = ctx->map.intlv_bit_pos;
+ u64 msb_intlv_bits, temp_addr_a, temp_addr_b;
+ u8 np2_bits = ctx->map.np2_bits;
+
+ if (ctx->map.intlv_mode != DF3_6CHAN)
+ return -EINVAL;
+
+ /*
+ * 'np2_bits' holds the number of bits needed to cover the
+ * amount of memory (rounded up) in this map using 64K chunks.
+ *
+ * Example:
+ * Total memory in map: 6GB
+ * Rounded up to next power-of-2: 8GB
+ * Number of 64K chunks: 0x20000
+ * np2_bits = log2(# of chunks): 17
+ *
+ * Get the two most-significant interleave bits from the
+ * input address based on the following:
+ *
+ * [15 + np2_bits - total_intlv_bits : 14 + np2_bits - total_intlv_bits]
+ */
+ low_bit = 14 + np2_bits - total_intlv_bits;
+ msb_intlv_bits = ctx->ret_addr >> low_bit;
+ msb_intlv_bits &= 0x3;
+
+ /*
+ * If MSB are 11b, then logical COH_ST ID is 6 or 7.
+ * Need to adjust based on the mod3 result.
+ */
+ if (msb_intlv_bits == 3) {
+ u8 addr_mod, phys_addr_msb, msb_coh_st_id;
+
+ /* Get the remaining interleave bits from the input address. */
+ temp_addr_b = GENMASK_ULL(low_bit - 1, intlv_bit) & ctx->ret_addr;
+ temp_addr_b >>= intlv_bit;
+
+ /* Calculate the logical COH_ST offset based on mod3. */
+ addr_mod = temp_addr_b % 3;
+
+ /* Get COH_ST ID bits [2:1]. */
+ msb_coh_st_id = (coh_st_id >> 1) & 0x3;
+
+ /* Get the bit that starts the physical address bits. */
+ phys_addr_msb = (intlv_bit + np2_bits + 1);
+ phys_addr_msb &= BIT(0);
+ phys_addr_msb++;
+ phys_addr_msb *= 3 - addr_mod + msb_coh_st_id;
+ phys_addr_msb %= 3;
+
+ /* Move the physical address MSB to the correct place. */
+ temp_addr_b |= phys_addr_msb << (low_bit - total_intlv_bits - intlv_bit);
+
+ /* Generate a new COH_ST ID as follows: coh_st_id = [1, 1, coh_st_id[0]] */
+ coh_st_id &= BIT(0);
+ coh_st_id |= GENMASK(2, 1);
+ } else {
+ temp_addr_b = GENMASK_ULL(63, intlv_bit) & ctx->ret_addr;
+ temp_addr_b >>= intlv_bit;
+ }
+
+ temp_addr_a = GENMASK_ULL(intlv_bit - 1, 0) & ctx->ret_addr;
+ temp_addr_b <<= intlv_bit + total_intlv_bits;
+
+ ctx->ret_addr = temp_addr_a | temp_addr_b;
+ ctx->ret_addr |= coh_st_id << intlv_bit;
+ return 0;
+}
+
+static int denorm_addr_df4_np2(struct addr_ctx *ctx)
+{
+ bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G;
+ u16 group, group_offset, log_coh_st_offset;
+ unsigned int mod_value, shift_value;
+ u16 mask = df_cfg.component_id_mask;
+ u64 temp_addr_a, temp_addr_b;
+ bool hash_pa8, hashed_bit;
+
+ switch (ctx->map.intlv_mode) {
+ case DF4_NPS4_3CHAN_HASH:
+ mod_value = 3;
+ shift_value = 13;
+ break;
+ case DF4_NPS2_6CHAN_HASH:
+ mod_value = 3;
+ shift_value = 12;
+ break;
+ case DF4_NPS1_12CHAN_HASH:
+ mod_value = 3;
+ shift_value = 11;
+ break;
+ case DF4_NPS2_5CHAN_HASH:
+ mod_value = 5;
+ shift_value = 13;
+ break;
+ case DF4_NPS1_10CHAN_HASH:
+ mod_value = 5;
+ shift_value = 12;
+ break;
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return -EINVAL;
+ };
+
+ if (ctx->map.num_intlv_sockets == 1) {
+ hash_pa8 = BIT_ULL(shift_value) & ctx->ret_addr;
+ temp_addr_a = remove_bits(shift_value, shift_value, ctx->ret_addr);
+ } else {
+ hash_pa8 = ctx->coh_st_fabric_id & df_cfg.socket_id_mask;
+ temp_addr_a = ctx->ret_addr;
+ }
+
+ /* Make a gap for the real bit [8]. */
+ temp_addr_a = expand_bits(8, 1, temp_addr_a);
+
+ /* Make an additional gap for bits [13:12], as appropriate.*/
+ if (ctx->map.intlv_mode == DF4_NPS2_6CHAN_HASH ||
+ ctx->map.intlv_mode == DF4_NPS1_10CHAN_HASH) {
+ temp_addr_a = expand_bits(13, 1, temp_addr_a);
+ } else if (ctx->map.intlv_mode == DF4_NPS1_12CHAN_HASH) {
+ temp_addr_a = expand_bits(12, 2, temp_addr_a);
+ }
+
+ /* Keep bits [13:0]. */
+ temp_addr_a &= GENMASK_ULL(13, 0);
+
+ /* Get the appropriate high bits. */
+ shift_value += 1 - ilog2(ctx->map.num_intlv_sockets);
+ temp_addr_b = GENMASK_ULL(63, shift_value) & ctx->ret_addr;
+ temp_addr_b >>= shift_value;
+ temp_addr_b *= mod_value;
+
+ /*
+ * Coherent Stations are divided into groups.
+ *
+ * Multiples of 3 (mod3) are divided into quadrants.
+ * e.g. NP4_3CHAN -> [0, 1, 2] [6, 7, 8]
+ * [3, 4, 5] [9, 10, 11]
+ *
+ * Multiples of 5 (mod5) are divided into sides.
+ * e.g. NP2_5CHAN -> [0, 1, 2, 3, 4] [5, 6, 7, 8, 9]
+ */
+
+ /*
+ * Calculate the logical offset for the COH_ST within its DRAM Address map.
+ * e.g. if map includes [5, 6, 7, 8, 9] and target instance is '8', then
+ * log_coh_st_offset = 8 - 5 = 3
+ */
+ log_coh_st_offset = (ctx->coh_st_fabric_id & mask) - (get_dst_fabric_id(ctx) & mask);
+
+ /*
+ * Figure out the group number.
+ *
+ * Following above example,
+ * log_coh_st_offset = 3
+ * mod_value = 5
+ * group = 3 / 5 = 0
+ */
+ group = log_coh_st_offset / mod_value;
+
+ /*
+ * Figure out the offset within the group.
+ *
+ * Following above example,
+ * log_coh_st_offset = 3
+ * mod_value = 5
+ * group_offset = 3 % 5 = 3
+ */
+ group_offset = log_coh_st_offset % mod_value;
+
+ /* Adjust group_offset if the hashed bit [8] is set. */
+ if (hash_pa8) {
+ if (!group_offset)
+ group_offset = mod_value - 1;
+ else
+ group_offset--;
+ }
+
+ /* Add in the group offset to the high bits. */
+ temp_addr_b += group_offset;
+
+ /* Shift the high bits to the proper starting position. */
+ temp_addr_b <<= 14;
+
+ /* Combine the high and low bits together. */
+ ctx->ret_addr = temp_addr_a | temp_addr_b;
+
+ /* Account for hashing here instead of in dehash_address(). */
+ hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
+
+ hashed_bit = !!hash_pa8;
+ hashed_bit ^= FIELD_GET(BIT_ULL(14), ctx->ret_addr);
+ hashed_bit ^= FIELD_GET(BIT_ULL(16), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G;
+
+ ctx->ret_addr |= hashed_bit << 8;
+
+ /* Done for 3 and 5 channel. */
+ if (ctx->map.intlv_mode == DF4_NPS4_3CHAN_HASH ||
+ ctx->map.intlv_mode == DF4_NPS2_5CHAN_HASH)
+ return 0;
+
+ /* Select the proper 'group' bit to use for Bit 13. */
+ if (ctx->map.intlv_mode == DF4_NPS1_12CHAN_HASH)
+ hashed_bit = !!(group & BIT(1));
+ else
+ hashed_bit = group & BIT(0);
+
+ hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G;
+
+ ctx->ret_addr |= hashed_bit << 13;
+
+ /* Done for 6 and 10 channel. */
+ if (ctx->map.intlv_mode != DF4_NPS1_12CHAN_HASH)
+ return 0;
+
+ hashed_bit = group & BIT(0);
+ hashed_bit ^= FIELD_GET(BIT_ULL(17), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G;
+
+ ctx->ret_addr |= hashed_bit << 12;
+ return 0;
+}
+
+int denormalize_address(struct addr_ctx *ctx)
+{
+ switch (ctx->map.intlv_mode) {
+ case NONE:
+ return 0;
+ case DF4_NPS4_3CHAN_HASH:
+ case DF4_NPS2_6CHAN_HASH:
+ case DF4_NPS1_12CHAN_HASH:
+ case DF4_NPS2_5CHAN_HASH:
+ case DF4_NPS1_10CHAN_HASH:
+ return denorm_addr_df4_np2(ctx);
+ case DF3_6CHAN:
+ return denorm_addr_df3_6chan(ctx);
+ default:
+ return denorm_addr_common(ctx);
+ }
+}
diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
new file mode 100644
index 000000000000..5de69e0bb0f9
--- /dev/null
+++ b/drivers/ras/amd/atl/internal.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Address Translation Library
+ *
+ * internal.h : Helper functions and common defines
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#ifndef __AMD_ATL_INTERNAL_H__
+#define __AMD_ATL_INTERNAL_H__
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/ras.h>
+
+#include <asm/amd_nb.h>
+
+#include "reg_fields.h"
+
+/* Maximum possible number of Coherent Stations within a single Data Fabric. */
+#define MAX_COH_ST_CHANNELS 32
+
+/* PCI ID for Zen4 Server DF Function 0. */
+#define DF_FUNC0_ID_ZEN4_SERVER 0x14AD1022
+
+/* PCI IDs for MI300 DF Function 0. */
+#define DF_FUNC0_ID_MI300 0x15281022
+
+/* Shift needed for adjusting register values to true values. */
+#define DF_DRAM_BASE_LIMIT_LSB 28
+#define MI300_DRAM_LIMIT_LSB 20
+
+enum df_revisions {
+ UNKNOWN,
+ DF2,
+ DF3,
+ DF3p5,
+ DF4,
+ DF4p5,
+};
+
+/* These are mapped 1:1 to the hardware values. Special cases are set at > 0x20. */
+enum intlv_modes {
+ NONE = 0x00,
+ NOHASH_2CHAN = 0x01,
+ NOHASH_4CHAN = 0x03,
+ NOHASH_8CHAN = 0x05,
+ DF3_6CHAN = 0x06,
+ NOHASH_16CHAN = 0x07,
+ NOHASH_32CHAN = 0x08,
+ DF3_COD4_2CHAN_HASH = 0x0C,
+ DF3_COD2_4CHAN_HASH = 0x0D,
+ DF3_COD1_8CHAN_HASH = 0x0E,
+ DF4_NPS4_2CHAN_HASH = 0x10,
+ DF4_NPS2_4CHAN_HASH = 0x11,
+ DF4_NPS1_8CHAN_HASH = 0x12,
+ DF4_NPS4_3CHAN_HASH = 0x13,
+ DF4_NPS2_6CHAN_HASH = 0x14,
+ DF4_NPS1_12CHAN_HASH = 0x15,
+ DF4_NPS2_5CHAN_HASH = 0x16,
+ DF4_NPS1_10CHAN_HASH = 0x17,
+ MI3_HASH_8CHAN = 0x18,
+ MI3_HASH_16CHAN = 0x19,
+ MI3_HASH_32CHAN = 0x1A,
+ DF2_2CHAN_HASH = 0x21,
+ /* DF4.5 modes are all IntLvNumChan + 0x20 */
+ DF4p5_NPS1_16CHAN_1K_HASH = 0x2C,
+ DF4p5_NPS0_24CHAN_1K_HASH = 0x2E,
+ DF4p5_NPS4_2CHAN_1K_HASH = 0x30,
+ DF4p5_NPS2_4CHAN_1K_HASH = 0x31,
+ DF4p5_NPS1_8CHAN_1K_HASH = 0x32,
+ DF4p5_NPS4_3CHAN_1K_HASH = 0x33,
+ DF4p5_NPS2_6CHAN_1K_HASH = 0x34,
+ DF4p5_NPS1_12CHAN_1K_HASH = 0x35,
+ DF4p5_NPS2_5CHAN_1K_HASH = 0x36,
+ DF4p5_NPS1_10CHAN_1K_HASH = 0x37,
+ DF4p5_NPS4_2CHAN_2K_HASH = 0x40,
+ DF4p5_NPS2_4CHAN_2K_HASH = 0x41,
+ DF4p5_NPS1_8CHAN_2K_HASH = 0x42,
+ DF4p5_NPS1_16CHAN_2K_HASH = 0x43,
+ DF4p5_NPS4_3CHAN_2K_HASH = 0x44,
+ DF4p5_NPS2_6CHAN_2K_HASH = 0x45,
+ DF4p5_NPS1_12CHAN_2K_HASH = 0x46,
+ DF4p5_NPS0_24CHAN_2K_HASH = 0x47,
+ DF4p5_NPS2_5CHAN_2K_HASH = 0x48,
+ DF4p5_NPS1_10CHAN_2K_HASH = 0x49,
+};
+
+struct df_flags {
+ __u8 legacy_ficaa : 1,
+ socket_id_shift_quirk : 1,
+ heterogeneous : 1,
+ __reserved_0 : 5;
+};
+
+struct df_config {
+ enum df_revisions rev;
+
+ /*
+ * These masks operate on the 16-bit Coherent Station IDs,
+ * e.g. Instance, Fabric, Destination, etc.
+ */
+ u16 component_id_mask;
+ u16 die_id_mask;
+ u16 node_id_mask;
+ u16 socket_id_mask;
+
+ /*
+ * Least-significant bit of Node ID portion of the
+ * system-wide Coherent Station Fabric ID.
+ */
+ u8 node_id_shift;
+
+ /*
+ * Least-significant bit of Die portion of the Node ID.
+ * Adjusted to include the Node ID shift in order to apply
+ * to the Coherent Station Fabric ID.
+ */
+ u8 die_id_shift;
+
+ /*
+ * Least-significant bit of Socket portion of the Node ID.
+ * Adjusted to include the Node ID shift in order to apply
+ * to the Coherent Station Fabric ID.
+ */
+ u8 socket_id_shift;
+
+ /* Number of DRAM Address maps visible in a Coherent Station. */
+ u8 num_coh_st_maps;
+
+ /* Global flags to handle special cases. */
+ struct df_flags flags;
+};
+
+extern struct df_config df_cfg;
+
+struct dram_addr_map {
+ /*
+ * Each DRAM Address Map can operate independently
+ * in different interleaving modes.
+ */
+ enum intlv_modes intlv_mode;
+
+ /* System-wide number for this address map. */
+ u8 num;
+
+ /* Raw register values */
+ u32 base;
+ u32 limit;
+ u32 ctl;
+ u32 intlv;
+
+ /*
+ * Logical to Physical Coherent Station Remapping array
+ *
+ * Index: Logical Coherent Station Instance ID
+ * Value: Physical Coherent Station Instance ID
+ *
+ * phys_coh_st_inst_id = remap_array[log_coh_st_inst_id]
+ */
+ u8 remap_array[MAX_COH_ST_CHANNELS];
+
+ /*
+ * Number of bits covering DRAM Address map 0
+ * when interleaving is non-power-of-2.
+ *
+ * Used only for DF3_6CHAN.
+ */
+ u8 np2_bits;
+
+ /* Position of the 'interleave bit'. */
+ u8 intlv_bit_pos;
+ /* Number of channels interleaved in this map. */
+ u8 num_intlv_chan;
+ /* Number of dies interleaved in this map. */
+ u8 num_intlv_dies;
+ /* Number of sockets interleaved in this map. */
+ u8 num_intlv_sockets;
+ /*
+ * Total number of channels interleaved accounting
+ * for die and socket interleaving.
+ */
+ u8 total_intlv_chan;
+ /* Total bits needed to cover 'total_intlv_chan'. */
+ u8 total_intlv_bits;
+};
+
+/* Original input values cached for debug printing. */
+struct addr_ctx_inputs {
+ u64 norm_addr;
+ u8 socket_id;
+ u8 die_id;
+ u8 coh_st_inst_id;
+};
+
+struct addr_ctx {
+ u64 ret_addr;
+
+ struct addr_ctx_inputs inputs;
+ struct dram_addr_map map;
+
+ /* AMD Node ID calculated from Socket and Die IDs. */
+ u8 node_id;
+
+ /*
+ * Coherent Station Instance ID
+ * Local ID used within a 'node'.
+ */
+ u16 inst_id;
+
+ /*
+ * Coherent Station Fabric ID
+ * System-wide ID that includes 'node' bits.
+ */
+ u16 coh_st_fabric_id;
+};
+
+int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo);
+int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo);
+
+int get_df_system_info(void);
+int determine_node_id(struct addr_ctx *ctx, u8 socket_num, u8 die_num);
+int get_addr_hash_mi300(void);
+
+int get_address_map(struct addr_ctx *ctx);
+
+int denormalize_address(struct addr_ctx *ctx);
+int dehash_address(struct addr_ctx *ctx);
+
+unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsigned long addr);
+unsigned long convert_umc_mca_addr_to_sys_addr(struct atl_err *err);
+
+/*
+ * Make a gap in @data that is @num_bits long starting at @bit_num.
+ * e.g. data = 11111111'b
+ * bit_num = 3
+ * num_bits = 2
+ * result = 1111100111'b
+ */
+static inline u64 expand_bits(u8 bit_num, u8 num_bits, u64 data)
+{
+ u64 temp1, temp2;
+
+ if (!num_bits)
+ return data;
+
+ if (!bit_num) {
+ WARN_ON_ONCE(num_bits >= BITS_PER_LONG);
+ return data << num_bits;
+ }
+
+ WARN_ON_ONCE(bit_num >= BITS_PER_LONG);
+
+ temp1 = data & GENMASK_ULL(bit_num - 1, 0);
+
+ temp2 = data & GENMASK_ULL(63, bit_num);
+ temp2 <<= num_bits;
+
+ return temp1 | temp2;
+}
+
+/*
+ * Remove bits in @data between @low_bit and @high_bit inclusive.
+ * e.g. data = XXXYYZZZ'b
+ * low_bit = 3
+ * high_bit = 4
+ * result = XXXZZZ'b
+ */
+static inline u64 remove_bits(u8 low_bit, u8 high_bit, u64 data)
+{
+ u64 temp1, temp2;
+
+ WARN_ON_ONCE(high_bit >= BITS_PER_LONG);
+ WARN_ON_ONCE(low_bit >= BITS_PER_LONG);
+ WARN_ON_ONCE(low_bit > high_bit);
+
+ if (!low_bit)
+ return data >> (high_bit++);
+
+ temp1 = GENMASK_ULL(low_bit - 1, 0) & data;
+ temp2 = GENMASK_ULL(63, high_bit + 1) & data;
+ temp2 >>= high_bit - low_bit + 1;
+
+ return temp1 | temp2;
+}
+
+#define atl_debug(ctx, fmt, arg...) \
+ pr_debug("socket_id=%u die_id=%u coh_st_inst_id=%u norm_addr=0x%016llx: " fmt,\
+ (ctx)->inputs.socket_id, (ctx)->inputs.die_id,\
+ (ctx)->inputs.coh_st_inst_id, (ctx)->inputs.norm_addr, ##arg)
+
+static inline void atl_debug_on_bad_df_rev(void)
+{
+ pr_debug("Unrecognized DF rev: %u", df_cfg.rev);
+}
+
+static inline void atl_debug_on_bad_intlv_mode(struct addr_ctx *ctx)
+{
+ atl_debug(ctx, "Unrecognized interleave mode: %u", ctx->map.intlv_mode);
+}
+
+#endif /* __AMD_ATL_INTERNAL_H__ */
diff --git a/drivers/ras/amd/atl/map.c b/drivers/ras/amd/atl/map.c
new file mode 100644
index 000000000000..8b908e8d7495
--- /dev/null
+++ b/drivers/ras/amd/atl/map.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * map.c : Functions to read and decode DRAM address maps
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include "internal.h"
+
+static int df2_get_intlv_mode(struct addr_ctx *ctx)
+{
+ ctx->map.intlv_mode = FIELD_GET(DF2_INTLV_NUM_CHAN, ctx->map.base);
+
+ if (ctx->map.intlv_mode == 8)
+ ctx->map.intlv_mode = DF2_2CHAN_HASH;
+
+ if (ctx->map.intlv_mode != NONE &&
+ ctx->map.intlv_mode != NOHASH_2CHAN &&
+ ctx->map.intlv_mode != DF2_2CHAN_HASH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int df3_get_intlv_mode(struct addr_ctx *ctx)
+{
+ ctx->map.intlv_mode = FIELD_GET(DF3_INTLV_NUM_CHAN, ctx->map.base);
+ return 0;
+}
+
+static int df3p5_get_intlv_mode(struct addr_ctx *ctx)
+{
+ ctx->map.intlv_mode = FIELD_GET(DF3p5_INTLV_NUM_CHAN, ctx->map.base);
+
+ if (ctx->map.intlv_mode == DF3_6CHAN)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int df4_get_intlv_mode(struct addr_ctx *ctx)
+{
+ ctx->map.intlv_mode = FIELD_GET(DF4_INTLV_NUM_CHAN, ctx->map.intlv);
+
+ if (ctx->map.intlv_mode == DF3_COD4_2CHAN_HASH ||
+ ctx->map.intlv_mode == DF3_COD2_4CHAN_HASH ||
+ ctx->map.intlv_mode == DF3_COD1_8CHAN_HASH ||
+ ctx->map.intlv_mode == DF3_6CHAN)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int df4p5_get_intlv_mode(struct addr_ctx *ctx)
+{
+ ctx->map.intlv_mode = FIELD_GET(DF4p5_INTLV_NUM_CHAN, ctx->map.intlv);
+
+ if (ctx->map.intlv_mode <= NOHASH_32CHAN)
+ return 0;
+
+ if (ctx->map.intlv_mode >= MI3_HASH_8CHAN &&
+ ctx->map.intlv_mode <= MI3_HASH_32CHAN)
+ return 0;
+
+ /*
+ * Modes matching the ranges above are returned as-is.
+ *
+ * All other modes are "fixed up" by adding 20h to make a unique value.
+ */
+ ctx->map.intlv_mode += 0x20;
+
+ return 0;
+}
+
+static int get_intlv_mode(struct addr_ctx *ctx)
+{
+ int ret;
+
+ switch (df_cfg.rev) {
+ case DF2:
+ ret = df2_get_intlv_mode(ctx);
+ break;
+ case DF3:
+ ret = df3_get_intlv_mode(ctx);
+ break;
+ case DF3p5:
+ ret = df3p5_get_intlv_mode(ctx);
+ break;
+ case DF4:
+ ret = df4_get_intlv_mode(ctx);
+ break;
+ case DF4p5:
+ ret = df4p5_get_intlv_mode(ctx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ atl_debug_on_bad_df_rev();
+
+ return ret;
+}
+
+static u64 get_hi_addr_offset(u32 reg_dram_offset)
+{
+ u8 shift = DF_DRAM_BASE_LIMIT_LSB;
+ u64 hi_addr_offset;
+
+ switch (df_cfg.rev) {
+ case DF2:
+ hi_addr_offset = FIELD_GET(DF2_HI_ADDR_OFFSET, reg_dram_offset);
+ break;
+ case DF3:
+ case DF3p5:
+ hi_addr_offset = FIELD_GET(DF3_HI_ADDR_OFFSET, reg_dram_offset);
+ break;
+ case DF4:
+ case DF4p5:
+ hi_addr_offset = FIELD_GET(DF4_HI_ADDR_OFFSET, reg_dram_offset);
+ break;
+ default:
+ hi_addr_offset = 0;
+ atl_debug_on_bad_df_rev();
+ }
+
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ shift = MI300_DRAM_LIMIT_LSB;
+
+ return hi_addr_offset << shift;
+}
+
+/*
+ * Returns: 0 if offset is disabled.
+ * 1 if offset is enabled.
+ * -EINVAL on error.
+ */
+static int get_dram_offset(struct addr_ctx *ctx, u64 *norm_offset)
+{
+ u32 reg_dram_offset;
+ u8 map_num;
+
+ /* Should not be called for map 0. */
+ if (!ctx->map.num) {
+ atl_debug(ctx, "Trying to find DRAM offset for map 0");
+ return -EINVAL;
+ }
+
+ /*
+ * DramOffset registers don't exist for map 0, so the base register
+ * actually refers to map 1.
+ * Adjust the map_num for the register offsets.
+ */
+ map_num = ctx->map.num - 1;
+
+ if (df_cfg.rev >= DF4) {
+ /* Read D18F7x140 (DramOffset) */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x140 + (4 * map_num),
+ ctx->inst_id, &reg_dram_offset))
+ return -EINVAL;
+
+ } else {
+ /* Read D18F0x1B4 (DramOffset) */
+ if (df_indirect_read_instance(ctx->node_id, 0, 0x1B4 + (4 * map_num),
+ ctx->inst_id, &reg_dram_offset))
+ return -EINVAL;
+ }
+
+ if (!FIELD_GET(DF_HI_ADDR_OFFSET_EN, reg_dram_offset))
+ return 0;
+
+ *norm_offset = get_hi_addr_offset(reg_dram_offset);
+
+ return 1;
+}
+
+static int df3_6ch_get_dram_addr_map(struct addr_ctx *ctx)
+{
+ u16 dst_fabric_id = FIELD_GET(DF3_DST_FABRIC_ID, ctx->map.limit);
+ u8 i, j, shift = 4, mask = 0xF;
+ u32 reg, offset = 0x60;
+ u16 dst_node_id;
+
+ /* Get Socket 1 register. */
+ if (dst_fabric_id & df_cfg.socket_id_mask)
+ offset = 0x68;
+
+ /* Read D18F0x06{0,8} (DF::Skt0CsTargetRemap0)/(DF::Skt0CsTargetRemap1) */
+ if (df_indirect_read_broadcast(ctx->node_id, 0, offset, &reg))
+ return -EINVAL;
+
+ /* Save 8 remap entries. */
+ for (i = 0, j = 0; i < 8; i++, j++)
+ ctx->map.remap_array[i] = (reg >> (j * shift)) & mask;
+
+ dst_node_id = dst_fabric_id & df_cfg.node_id_mask;
+ dst_node_id >>= df_cfg.node_id_shift;
+
+ /* Read D18F2x090 (DF::Np2ChannelConfig) */
+ if (df_indirect_read_broadcast(dst_node_id, 2, 0x90, &reg))
+ return -EINVAL;
+
+ ctx->map.np2_bits = FIELD_GET(DF_LOG2_ADDR_64K_SPACE0, reg);
+ return 0;
+}
+
+static int df2_get_dram_addr_map(struct addr_ctx *ctx)
+{
+ /* Read D18F0x110 (DramBaseAddress). */
+ if (df_indirect_read_instance(ctx->node_id, 0, 0x110 + (8 * ctx->map.num),
+ ctx->inst_id, &ctx->map.base))
+ return -EINVAL;
+
+ /* Read D18F0x114 (DramLimitAddress). */
+ if (df_indirect_read_instance(ctx->node_id, 0, 0x114 + (8 * ctx->map.num),
+ ctx->inst_id, &ctx->map.limit))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int df3_get_dram_addr_map(struct addr_ctx *ctx)
+{
+ if (df2_get_dram_addr_map(ctx))
+ return -EINVAL;
+
+ /* Read D18F0x3F8 (DfGlobalCtl). */
+ if (df_indirect_read_instance(ctx->node_id, 0, 0x3F8,
+ ctx->inst_id, &ctx->map.ctl))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int df4_get_dram_addr_map(struct addr_ctx *ctx)
+{
+ u8 remap_sel, i, j, shift = 4, mask = 0xF;
+ u32 remap_reg;
+
+ /* Read D18F7xE00 (DramBaseAddress). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0xE00 + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.base))
+ return -EINVAL;
+
+ /* Read D18F7xE04 (DramLimitAddress). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0xE04 + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.limit))
+ return -EINVAL;
+
+ /* Read D18F7xE08 (DramAddressCtl). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0xE08 + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.ctl))
+ return -EINVAL;
+
+ /* Read D18F7xE0C (DramAddressIntlv). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0xE0C + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.intlv))
+ return -EINVAL;
+
+ /* Check if Remap Enable bit is valid. */
+ if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl))
+ return 0;
+
+ /* Fill with bogus values, because '0' is a valid value. */
+ memset(&ctx->map.remap_array, 0xFF, sizeof(ctx->map.remap_array));
+
+ /* Get Remap registers. */
+ remap_sel = FIELD_GET(DF4_REMAP_SEL, ctx->map.ctl);
+
+ /* Read D18F7x180 (CsTargetRemap0A). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x180 + (8 * remap_sel),
+ ctx->inst_id, &remap_reg))
+ return -EINVAL;
+
+ /* Save first 8 remap entries. */
+ for (i = 0, j = 0; i < 8; i++, j++)
+ ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
+
+ /* Read D18F7x184 (CsTargetRemap0B). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x184 + (8 * remap_sel),
+ ctx->inst_id, &remap_reg))
+ return -EINVAL;
+
+ /* Save next 8 remap entries. */
+ for (i = 8, j = 0; i < 16; i++, j++)
+ ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
+
+ return 0;
+}
+
+static int df4p5_get_dram_addr_map(struct addr_ctx *ctx)
+{
+ u8 remap_sel, i, j, shift = 5, mask = 0x1F;
+ u32 remap_reg;
+
+ /* Read D18F7x200 (DramBaseAddress). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x200 + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.base))
+ return -EINVAL;
+
+ /* Read D18F7x204 (DramLimitAddress). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x204 + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.limit))
+ return -EINVAL;
+
+ /* Read D18F7x208 (DramAddressCtl). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x208 + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.ctl))
+ return -EINVAL;
+
+ /* Read D18F7x20C (DramAddressIntlv). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x20C + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.intlv))
+ return -EINVAL;
+
+ /* Check if Remap Enable bit is valid. */
+ if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl))
+ return 0;
+
+ /* Fill with bogus values, because '0' is a valid value. */
+ memset(&ctx->map.remap_array, 0xFF, sizeof(ctx->map.remap_array));
+
+ /* Get Remap registers. */
+ remap_sel = FIELD_GET(DF4p5_REMAP_SEL, ctx->map.ctl);
+
+ /* Read D18F7x180 (CsTargetRemap0A). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x180 + (24 * remap_sel),
+ ctx->inst_id, &remap_reg))
+ return -EINVAL;
+
+ /* Save first 6 remap entries. */
+ for (i = 0, j = 0; i < 6; i++, j++)
+ ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
+
+ /* Read D18F7x184 (CsTargetRemap0B). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x184 + (24 * remap_sel),
+ ctx->inst_id, &remap_reg))
+ return -EINVAL;
+
+ /* Save next 6 remap entries. */
+ for (i = 6, j = 0; i < 12; i++, j++)
+ ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
+
+ /* Read D18F7x188 (CsTargetRemap0C). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x188 + (24 * remap_sel),
+ ctx->inst_id, &remap_reg))
+ return -EINVAL;
+
+ /* Save next 6 remap entries. */
+ for (i = 12, j = 0; i < 18; i++, j++)
+ ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
+
+ return 0;
+}
+
+static int get_dram_addr_map(struct addr_ctx *ctx)
+{
+ switch (df_cfg.rev) {
+ case DF2: return df2_get_dram_addr_map(ctx);
+ case DF3:
+ case DF3p5: return df3_get_dram_addr_map(ctx);
+ case DF4: return df4_get_dram_addr_map(ctx);
+ case DF4p5: return df4p5_get_dram_addr_map(ctx);
+ default:
+ atl_debug_on_bad_df_rev();
+ return -EINVAL;
+ }
+}
+
+static int get_coh_st_fabric_id(struct addr_ctx *ctx)
+{
+ u32 reg;
+
+ /*
+ * On MI300 systems, the Coherent Station Fabric ID is derived
+ * later. And it does not depend on the register value.
+ */
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ return 0;
+
+ /* Read D18F0x50 (FabricBlockInstanceInformation3). */
+ if (df_indirect_read_instance(ctx->node_id, 0, 0x50, ctx->inst_id, &reg))
+ return -EINVAL;
+
+ if (df_cfg.rev < DF4p5)
+ ctx->coh_st_fabric_id = FIELD_GET(DF2_COH_ST_FABRIC_ID, reg);
+ else
+ ctx->coh_st_fabric_id = FIELD_GET(DF4p5_COH_ST_FABRIC_ID, reg);
+
+ return 0;
+}
+
+static int find_normalized_offset(struct addr_ctx *ctx, u64 *norm_offset)
+{
+ u64 last_offset = 0;
+ int ret;
+
+ for (ctx->map.num = 1; ctx->map.num < df_cfg.num_coh_st_maps; ctx->map.num++) {
+ ret = get_dram_offset(ctx, norm_offset);
+ if (ret < 0)
+ return ret;
+
+ /* Continue search if this map's offset is not enabled. */
+ if (!ret)
+ continue;
+
+ /* Enabled offsets should never be 0. */
+ if (*norm_offset == 0) {
+ atl_debug(ctx, "Enabled map %u offset is 0", ctx->map.num);
+ return -EINVAL;
+ }
+
+ /* Offsets should always increase from one map to the next. */
+ if (*norm_offset <= last_offset) {
+ atl_debug(ctx, "Map %u offset (0x%016llx) <= previous (0x%016llx)",
+ ctx->map.num, *norm_offset, last_offset);
+ return -EINVAL;
+ }
+
+ /* Match if this map's offset is less than the current calculated address. */
+ if (ctx->ret_addr >= *norm_offset)
+ break;
+
+ last_offset = *norm_offset;
+ }
+
+ /*
+ * Finished search without finding a match.
+ * Reset to map 0 and no offset.
+ */
+ if (ctx->map.num >= df_cfg.num_coh_st_maps) {
+ ctx->map.num = 0;
+ *norm_offset = 0;
+ }
+
+ return 0;
+}
+
+static bool valid_map(struct addr_ctx *ctx)
+{
+ if (df_cfg.rev >= DF4)
+ return FIELD_GET(DF_ADDR_RANGE_VAL, ctx->map.ctl);
+ else
+ return FIELD_GET(DF_ADDR_RANGE_VAL, ctx->map.base);
+}
+
+static int get_address_map_common(struct addr_ctx *ctx)
+{
+ u64 norm_offset = 0;
+
+ if (get_coh_st_fabric_id(ctx))
+ return -EINVAL;
+
+ if (find_normalized_offset(ctx, &norm_offset))
+ return -EINVAL;
+
+ if (get_dram_addr_map(ctx))
+ return -EINVAL;
+
+ if (!valid_map(ctx))
+ return -EINVAL;
+
+ ctx->ret_addr -= norm_offset;
+
+ return 0;
+}
+
+static u8 get_num_intlv_chan(struct addr_ctx *ctx)
+{
+ switch (ctx->map.intlv_mode) {
+ case NONE:
+ return 1;
+ case NOHASH_2CHAN:
+ case DF2_2CHAN_HASH:
+ case DF3_COD4_2CHAN_HASH:
+ case DF4_NPS4_2CHAN_HASH:
+ case DF4p5_NPS4_2CHAN_1K_HASH:
+ case DF4p5_NPS4_2CHAN_2K_HASH:
+ return 2;
+ case DF4_NPS4_3CHAN_HASH:
+ case DF4p5_NPS4_3CHAN_1K_HASH:
+ case DF4p5_NPS4_3CHAN_2K_HASH:
+ return 3;
+ case NOHASH_4CHAN:
+ case DF3_COD2_4CHAN_HASH:
+ case DF4_NPS2_4CHAN_HASH:
+ case DF4p5_NPS2_4CHAN_1K_HASH:
+ case DF4p5_NPS2_4CHAN_2K_HASH:
+ return 4;
+ case DF4_NPS2_5CHAN_HASH:
+ case DF4p5_NPS2_5CHAN_1K_HASH:
+ case DF4p5_NPS2_5CHAN_2K_HASH:
+ return 5;
+ case DF3_6CHAN:
+ case DF4_NPS2_6CHAN_HASH:
+ case DF4p5_NPS2_6CHAN_1K_HASH:
+ case DF4p5_NPS2_6CHAN_2K_HASH:
+ return 6;
+ case NOHASH_8CHAN:
+ case DF3_COD1_8CHAN_HASH:
+ case DF4_NPS1_8CHAN_HASH:
+ case MI3_HASH_8CHAN:
+ case DF4p5_NPS1_8CHAN_1K_HASH:
+ case DF4p5_NPS1_8CHAN_2K_HASH:
+ return 8;
+ case DF4_NPS1_10CHAN_HASH:
+ case DF4p5_NPS1_10CHAN_1K_HASH:
+ case DF4p5_NPS1_10CHAN_2K_HASH:
+ return 10;
+ case DF4_NPS1_12CHAN_HASH:
+ case DF4p5_NPS1_12CHAN_1K_HASH:
+ case DF4p5_NPS1_12CHAN_2K_HASH:
+ return 12;
+ case NOHASH_16CHAN:
+ case MI3_HASH_16CHAN:
+ case DF4p5_NPS1_16CHAN_1K_HASH:
+ case DF4p5_NPS1_16CHAN_2K_HASH:
+ return 16;
+ case DF4p5_NPS0_24CHAN_1K_HASH:
+ case DF4p5_NPS0_24CHAN_2K_HASH:
+ return 24;
+ case NOHASH_32CHAN:
+ case MI3_HASH_32CHAN:
+ return 32;
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return 0;
+ }
+}
+
+static void calculate_intlv_bits(struct addr_ctx *ctx)
+{
+ ctx->map.num_intlv_chan = get_num_intlv_chan(ctx);
+
+ ctx->map.total_intlv_chan = ctx->map.num_intlv_chan;
+ ctx->map.total_intlv_chan *= ctx->map.num_intlv_dies;
+ ctx->map.total_intlv_chan *= ctx->map.num_intlv_sockets;
+
+ /*
+ * Get the number of bits needed to cover this many channels.
+ * order_base_2() rounds up automatically.
+ */
+ ctx->map.total_intlv_bits = order_base_2(ctx->map.total_intlv_chan);
+}
+
+static u8 get_intlv_bit_pos(struct addr_ctx *ctx)
+{
+ u8 addr_sel = 0;
+
+ switch (df_cfg.rev) {
+ case DF2:
+ addr_sel = FIELD_GET(DF2_INTLV_ADDR_SEL, ctx->map.base);
+ break;
+ case DF3:
+ case DF3p5:
+ addr_sel = FIELD_GET(DF3_INTLV_ADDR_SEL, ctx->map.base);
+ break;
+ case DF4:
+ case DF4p5:
+ addr_sel = FIELD_GET(DF4_INTLV_ADDR_SEL, ctx->map.intlv);
+ break;
+ default:
+ atl_debug_on_bad_df_rev();
+ break;
+ }
+
+ /* Add '8' to get the 'interleave bit position'. */
+ return addr_sel + 8;
+}
+
+static u8 get_num_intlv_dies(struct addr_ctx *ctx)
+{
+ u8 dies = 0;
+
+ switch (df_cfg.rev) {
+ case DF2:
+ dies = FIELD_GET(DF2_INTLV_NUM_DIES, ctx->map.limit);
+ break;
+ case DF3:
+ dies = FIELD_GET(DF3_INTLV_NUM_DIES, ctx->map.base);
+ break;
+ case DF3p5:
+ dies = FIELD_GET(DF3p5_INTLV_NUM_DIES, ctx->map.base);
+ break;
+ case DF4:
+ case DF4p5:
+ dies = FIELD_GET(DF4_INTLV_NUM_DIES, ctx->map.intlv);
+ break;
+ default:
+ atl_debug_on_bad_df_rev();
+ break;
+ }
+
+ /* Register value is log2, e.g. 0 -> 1 die, 1 -> 2 dies, etc. */
+ return 1 << dies;
+}
+
+static u8 get_num_intlv_sockets(struct addr_ctx *ctx)
+{
+ u8 sockets = 0;
+
+ switch (df_cfg.rev) {
+ case DF2:
+ sockets = FIELD_GET(DF2_INTLV_NUM_SOCKETS, ctx->map.limit);
+ break;
+ case DF3:
+ case DF3p5:
+ sockets = FIELD_GET(DF2_INTLV_NUM_SOCKETS, ctx->map.base);
+ break;
+ case DF4:
+ case DF4p5:
+ sockets = FIELD_GET(DF4_INTLV_NUM_SOCKETS, ctx->map.intlv);
+ break;
+ default:
+ atl_debug_on_bad_df_rev();
+ break;
+ }
+
+ /* Register value is log2, e.g. 0 -> 1 sockets, 1 -> 2 sockets, etc. */
+ return 1 << sockets;
+}
+
+static int get_global_map_data(struct addr_ctx *ctx)
+{
+ if (get_intlv_mode(ctx))
+ return -EINVAL;
+
+ if (ctx->map.intlv_mode == DF3_6CHAN &&
+ df3_6ch_get_dram_addr_map(ctx))
+ return -EINVAL;
+
+ ctx->map.intlv_bit_pos = get_intlv_bit_pos(ctx);
+ ctx->map.num_intlv_dies = get_num_intlv_dies(ctx);
+ ctx->map.num_intlv_sockets = get_num_intlv_sockets(ctx);
+ calculate_intlv_bits(ctx);
+
+ return 0;
+}
+
+static void dump_address_map(struct dram_addr_map *map)
+{
+ u8 i;
+
+ pr_debug("intlv_mode=0x%x", map->intlv_mode);
+ pr_debug("num=0x%x", map->num);
+ pr_debug("base=0x%x", map->base);
+ pr_debug("limit=0x%x", map->limit);
+ pr_debug("ctl=0x%x", map->ctl);
+ pr_debug("intlv=0x%x", map->intlv);
+
+ for (i = 0; i < MAX_COH_ST_CHANNELS; i++)
+ pr_debug("remap_array[%u]=0x%x", i, map->remap_array[i]);
+
+ pr_debug("intlv_bit_pos=%u", map->intlv_bit_pos);
+ pr_debug("num_intlv_chan=%u", map->num_intlv_chan);
+ pr_debug("num_intlv_dies=%u", map->num_intlv_dies);
+ pr_debug("num_intlv_sockets=%u", map->num_intlv_sockets);
+ pr_debug("total_intlv_chan=%u", map->total_intlv_chan);
+ pr_debug("total_intlv_bits=%u", map->total_intlv_bits);
+}
+
+int get_address_map(struct addr_ctx *ctx)
+{
+ int ret;
+
+ ret = get_address_map_common(ctx);
+ if (ret)
+ return ret;
+
+ ret = get_global_map_data(ctx);
+ if (ret)
+ return ret;
+
+ dump_address_map(&ctx->map);
+
+ return ret;
+}
diff --git a/drivers/ras/amd/atl/reg_fields.h b/drivers/ras/amd/atl/reg_fields.h
new file mode 100644
index 000000000000..9dcdf6e4a856
--- /dev/null
+++ b/drivers/ras/amd/atl/reg_fields.h
@@ -0,0 +1,606 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Address Translation Library
+ *
+ * reg_fields.h : Register field definitions
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+/*
+ * Notes on naming:
+ * 1) Use "DF_" prefix for fields that are the same for all revisions.
+ * 2) Use "DFx_" prefix for fields that differ between revisions.
+ * a) "x" is the first major revision where the new field appears.
+ * b) E.g., if DF2 and DF3 have the same field, then call it DF2.
+ * c) E.g., if DF3p5 and DF4 have the same field, then call it DF4.
+ */
+
+/*
+ * Coherent Station Fabric ID
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x50 [Fabric Block Instance Information 3]
+ * DF2 BlockFabricId [19:8]
+ * DF3 BlockFabricId [19:8]
+ * DF3p5 BlockFabricId [19:8]
+ * DF4 BlockFabricId [19:8]
+ * DF4p5 BlockFabricId [15:8]
+ */
+#define DF2_COH_ST_FABRIC_ID GENMASK(19, 8)
+#define DF4p5_COH_ST_FABRIC_ID GENMASK(15, 8)
+
+/*
+ * Component ID Mask
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ *
+ * D18F1x208 [System Fabric ID Mask 0]
+ * DF3 ComponentIdMask [9:0]
+ *
+ * D18F1x150 [System Fabric ID Mask 0]
+ * DF3p5 ComponentIdMask [15:0]
+ *
+ * D18F4x1B0 [System Fabric ID Mask 0]
+ * DF4 ComponentIdMask [15:0]
+ * DF4p5 ComponentIdMask [15:0]
+ */
+#define DF3_COMPONENT_ID_MASK GENMASK(9, 0)
+#define DF4_COMPONENT_ID_MASK GENMASK(15, 0)
+
+/*
+ * Destination Fabric ID
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x114 [DRAM Limit Address]
+ * DF2 DstFabricID [7:0]
+ * DF3 DstFabricID [9:0]
+ * DF3 DstFabricID [11:0]
+ *
+ * D18F7xE08 [DRAM Address Control]
+ * DF4 DstFabricID [27:16]
+ *
+ * D18F7x208 [DRAM Address Control]
+ * DF4p5 DstFabricID [23:16]
+ */
+#define DF2_DST_FABRIC_ID GENMASK(7, 0)
+#define DF3_DST_FABRIC_ID GENMASK(9, 0)
+#define DF3p5_DST_FABRIC_ID GENMASK(11, 0)
+#define DF4_DST_FABRIC_ID GENMASK(27, 16)
+#define DF4p5_DST_FABRIC_ID GENMASK(23, 16)
+
+/*
+ * Die ID Mask
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F1x208 [System Fabric ID Mask]
+ * DF2 DieIdMask [15:8]
+ *
+ * D18F1x20C [System Fabric ID Mask 1]
+ * DF3 DieIdMask [18:16]
+ *
+ * D18F1x158 [System Fabric ID Mask 2]
+ * DF3p5 DieIdMask [15:0]
+ *
+ * D18F4x1B8 [System Fabric ID Mask 2]
+ * DF4 DieIdMask [15:0]
+ * DF4p5 DieIdMask [15:0]
+ */
+#define DF2_DIE_ID_MASK GENMASK(15, 8)
+#define DF3_DIE_ID_MASK GENMASK(18, 16)
+#define DF4_DIE_ID_MASK GENMASK(15, 0)
+
+/*
+ * Die ID Shift
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F1x208 [System Fabric ID Mask]
+ * DF2 DieIdShift [27:24]
+ *
+ * DF3 N/A
+ * DF3p5 N/A
+ * DF4 N/A
+ * DF4p5 N/A
+ */
+#define DF2_DIE_ID_SHIFT GENMASK(27, 24)
+
+/*
+ * DRAM Address Range Valid
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF2 AddrRngVal [0]
+ * DF3 AddrRngVal [0]
+ * DF3p5 AddrRngVal [0]
+ *
+ * D18F7xE08 [DRAM Address Control]
+ * DF4 AddrRngVal [0]
+ *
+ * D18F7x208 [DRAM Address Control]
+ * DF4p5 AddrRngVal [0]
+ */
+#define DF_ADDR_RANGE_VAL BIT(0)
+
+/*
+ * DRAM Base Address
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF2 DramBaseAddr [31:12]
+ * DF3 DramBaseAddr [31:12]
+ * DF3p5 DramBaseAddr [31:12]
+ *
+ * D18F7xE00 [DRAM Base Address]
+ * DF4 DramBaseAddr [27:0]
+ *
+ * D18F7x200 [DRAM Base Address]
+ * DF4p5 DramBaseAddr [27:0]
+ */
+#define DF2_BASE_ADDR GENMASK(31, 12)
+#define DF4_BASE_ADDR GENMASK(27, 0)
+
+/*
+ * DRAM Hole Base
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x104 [DRAM Hole Control]
+ * DF2 DramHoleBase [31:24]
+ * DF3 DramHoleBase [31:24]
+ * DF3p5 DramHoleBase [31:24]
+ *
+ * D18F7x104 [DRAM Hole Control]
+ * DF4 DramHoleBase [31:24]
+ * DF4p5 DramHoleBase [31:24]
+ */
+#define DF_DRAM_HOLE_BASE_MASK GENMASK(31, 24)
+
+/*
+ * DRAM Limit Address
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x114 [DRAM Limit Address]
+ * DF2 DramLimitAddr [31:12]
+ * DF3 DramLimitAddr [31:12]
+ * DF3p5 DramLimitAddr [31:12]
+ *
+ * D18F7xE04 [DRAM Limit Address]
+ * DF4 DramLimitAddr [27:0]
+ *
+ * D18F7x204 [DRAM Limit Address]
+ * DF4p5 DramLimitAddr [27:0]
+ */
+#define DF2_DRAM_LIMIT_ADDR GENMASK(31, 12)
+#define DF4_DRAM_LIMIT_ADDR GENMASK(27, 0)
+
+/*
+ * Hash Interleave Controls
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ *
+ * D18F0x3F8 [DF Global Control]
+ * DF3 GlbHashIntlvCtl64K [20]
+ * GlbHashIntlvCtl2M [21]
+ * GlbHashIntlvCtl1G [22]
+ *
+ * DF3p5 GlbHashIntlvCtl64K [20]
+ * GlbHashIntlvCtl2M [21]
+ * GlbHashIntlvCtl1G [22]
+ *
+ * D18F7xE08 [DRAM Address Control]
+ * DF4 HashIntlvCtl64K [8]
+ * HashIntlvCtl2M [9]
+ * HashIntlvCtl1G [10]
+ *
+ * D18F7x208 [DRAM Address Control]
+ * DF4p5 HashIntlvCtl4K [7]
+ * HashIntlvCtl64K [8]
+ * HashIntlvCtl2M [9]
+ * HashIntlvCtl1G [10]
+ * HashIntlvCtl1T [15]
+ */
+#define DF3_HASH_CTL_64K BIT(20)
+#define DF3_HASH_CTL_2M BIT(21)
+#define DF3_HASH_CTL_1G BIT(22)
+#define DF4_HASH_CTL_64K BIT(8)
+#define DF4_HASH_CTL_2M BIT(9)
+#define DF4_HASH_CTL_1G BIT(10)
+#define DF4p5_HASH_CTL_4K BIT(7)
+#define DF4p5_HASH_CTL_1T BIT(15)
+
+/*
+ * High Address Offset
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x1B4 [DRAM Offset]
+ * DF2 HiAddrOffset [31:20]
+ * DF3 HiAddrOffset [31:12]
+ * DF3p5 HiAddrOffset [31:12]
+ *
+ * D18F7x140 [DRAM Offset]
+ * DF4 HiAddrOffset [24:1]
+ * DF4p5 HiAddrOffset [24:1]
+ * MI300 HiAddrOffset [31:1]
+ */
+#define DF2_HI_ADDR_OFFSET GENMASK(31, 20)
+#define DF3_HI_ADDR_OFFSET GENMASK(31, 12)
+
+/* Follow reference code by including reserved bits for simplicity. */
+#define DF4_HI_ADDR_OFFSET GENMASK(31, 1)
+
+/*
+ * High Address Offset Enable
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x1B4 [DRAM Offset]
+ * DF2 HiAddrOffsetEn [0]
+ * DF3 HiAddrOffsetEn [0]
+ * DF3p5 HiAddrOffsetEn [0]
+ *
+ * D18F7x140 [DRAM Offset]
+ * DF4 HiAddrOffsetEn [0]
+ * DF4p5 HiAddrOffsetEn [0]
+ */
+#define DF_HI_ADDR_OFFSET_EN BIT(0)
+
+/*
+ * Interleave Address Select
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF2 IntLvAddrSel [10:8]
+ * DF3 IntLvAddrSel [11:9]
+ * DF3p5 IntLvAddrSel [11:9]
+ *
+ * D18F7xE0C [DRAM Address Interleave]
+ * DF4 IntLvAddrSel [2:0]
+ *
+ * D18F7x20C [DRAM Address Interleave]
+ * DF4p5 IntLvAddrSel [2:0]
+ */
+#define DF2_INTLV_ADDR_SEL GENMASK(10, 8)
+#define DF3_INTLV_ADDR_SEL GENMASK(11, 9)
+#define DF4_INTLV_ADDR_SEL GENMASK(2, 0)
+
+/*
+ * Interleave Number of Channels
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF2 IntLvNumChan [7:4]
+ * DF3 IntLvNumChan [5:2]
+ * DF3p5 IntLvNumChan [6:2]
+ *
+ * D18F7xE0C [DRAM Address Interleave]
+ * DF4 IntLvNumChan [8:4]
+ *
+ * D18F7x20C [DRAM Address Interleave]
+ * DF4p5 IntLvNumChan [9:4]
+ */
+#define DF2_INTLV_NUM_CHAN GENMASK(7, 4)
+#define DF3_INTLV_NUM_CHAN GENMASK(5, 2)
+#define DF3p5_INTLV_NUM_CHAN GENMASK(6, 2)
+#define DF4_INTLV_NUM_CHAN GENMASK(8, 4)
+#define DF4p5_INTLV_NUM_CHAN GENMASK(9, 4)
+
+/*
+ * Interleave Number of Dies
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x114 [DRAM Limit Address]
+ * DF2 IntLvNumDies [11:10]
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF3 IntLvNumDies [7:6]
+ * DF3p5 IntLvNumDies [7]
+ *
+ * D18F7xE0C [DRAM Address Interleave]
+ * DF4 IntLvNumDies [13:12]
+ *
+ * D18F7x20C [DRAM Address Interleave]
+ * DF4p5 IntLvNumDies [13:12]
+ */
+#define DF2_INTLV_NUM_DIES GENMASK(11, 10)
+#define DF3_INTLV_NUM_DIES GENMASK(7, 6)
+#define DF3p5_INTLV_NUM_DIES BIT(7)
+#define DF4_INTLV_NUM_DIES GENMASK(13, 12)
+
+/*
+ * Interleave Number of Sockets
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x114 [DRAM Limit Address]
+ * DF2 IntLvNumSockets [8]
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF3 IntLvNumSockets [8]
+ * DF3p5 IntLvNumSockets [8]
+ *
+ * D18F7xE0C [DRAM Address Interleave]
+ * DF4 IntLvNumSockets [18]
+ *
+ * D18F7x20C [DRAM Address Interleave]
+ * DF4p5 IntLvNumSockets [18]
+ */
+#define DF2_INTLV_NUM_SOCKETS BIT(8)
+#define DF4_INTLV_NUM_SOCKETS BIT(18)
+
+/*
+ * Legacy MMIO Hole Enable
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF2 LgcyMmioHoleEn [1]
+ * DF3 LgcyMmioHoleEn [1]
+ * DF3p5 LgcyMmioHoleEn [1]
+ *
+ * D18F7xE08 [DRAM Address Control]
+ * DF4 LgcyMmioHoleEn [1]
+ *
+ * D18F7x208 [DRAM Address Control]
+ * DF4p5 LgcyMmioHoleEn [1]
+ */
+#define DF_LEGACY_MMIO_HOLE_EN BIT(1)
+
+/*
+ * Log2 Address 64K Space 0
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ *
+ * D18F2x90 [Non-power-of-2 channel Configuration Register for COH_ST DRAM Address Maps]
+ * DF3 Log2Addr64KSpace0 [5:0]
+ *
+ * DF3p5 N/A
+ * DF4 N/A
+ * DF4p5 N/A
+ */
+#define DF_LOG2_ADDR_64K_SPACE0 GENMASK(5, 0)
+
+/*
+ * Major Revision
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ * DF3 N/A
+ * DF3p5 N/A
+ *
+ * D18F0x040 [Fabric Block Instance Count]
+ * DF4 MajorRevision [27:24]
+ * DF4p5 MajorRevision [27:24]
+ */
+#define DF_MAJOR_REVISION GENMASK(27, 24)
+
+/*
+ * Minor Revision
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ * DF3 N/A
+ * DF3p5 N/A
+ *
+ * D18F0x040 [Fabric Block Instance Count]
+ * DF4 MinorRevision [23:16]
+ * DF4p5 MinorRevision [23:16]
+ */
+#define DF_MINOR_REVISION GENMASK(23, 16)
+
+/*
+ * Node ID Mask
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ *
+ * D18F1x208 [System Fabric ID Mask 0]
+ * DF3 NodeIdMask [25:16]
+ *
+ * D18F1x150 [System Fabric ID Mask 0]
+ * DF3p5 NodeIdMask [31:16]
+ *
+ * D18F4x1B0 [System Fabric ID Mask 0]
+ * DF4 NodeIdMask [31:16]
+ * DF4p5 NodeIdMask [31:16]
+ */
+#define DF3_NODE_ID_MASK GENMASK(25, 16)
+#define DF4_NODE_ID_MASK GENMASK(31, 16)
+
+/*
+ * Node ID Shift
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ *
+ * D18F1x20C [System Fabric ID Mask 1]
+ * DF3 NodeIdShift [3:0]
+ *
+ * D18F1x154 [System Fabric ID Mask 1]
+ * DF3p5 NodeIdShift [3:0]
+ *
+ * D18F4x1B4 [System Fabric ID Mask 1]
+ * DF4 NodeIdShift [3:0]
+ * DF4p5 NodeIdShift [3:0]
+ */
+#define DF3_NODE_ID_SHIFT GENMASK(3, 0)
+
+/*
+ * Remap Enable
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ * DF3 N/A
+ * DF3p5 N/A
+ *
+ * D18F7xE08 [DRAM Address Control]
+ * DF4 RemapEn [4]
+ *
+ * D18F7x208 [DRAM Address Control]
+ * DF4p5 RemapEn [4]
+ */
+#define DF4_REMAP_EN BIT(4)
+
+/*
+ * Remap Select
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ * DF3 N/A
+ * DF3p5 N/A
+ *
+ * D18F7xE08 [DRAM Address Control]
+ * DF4 RemapSel [7:5]
+ *
+ * D18F7x208 [DRAM Address Control]
+ * DF4p5 RemapSel [6:5]
+ */
+#define DF4_REMAP_SEL GENMASK(7, 5)
+#define DF4p5_REMAP_SEL GENMASK(6, 5)
+
+/*
+ * Socket ID Mask
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F1x208 [System Fabric ID Mask]
+ * DF2 SocketIdMask [23:16]
+ *
+ * D18F1x20C [System Fabric ID Mask 1]
+ * DF3 SocketIdMask [26:24]
+ *
+ * D18F1x158 [System Fabric ID Mask 2]
+ * DF3p5 SocketIdMask [31:16]
+ *
+ * D18F4x1B8 [System Fabric ID Mask 2]
+ * DF4 SocketIdMask [31:16]
+ * DF4p5 SocketIdMask [31:16]
+ */
+#define DF2_SOCKET_ID_MASK GENMASK(23, 16)
+#define DF3_SOCKET_ID_MASK GENMASK(26, 24)
+#define DF4_SOCKET_ID_MASK GENMASK(31, 16)
+
+/*
+ * Socket ID Shift
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F1x208 [System Fabric ID Mask]
+ * DF2 SocketIdShift [31:28]
+ *
+ * D18F1x20C [System Fabric ID Mask 1]
+ * DF3 SocketIdShift [9:8]
+ *
+ * D18F1x158 [System Fabric ID Mask 2]
+ * DF3p5 SocketIdShift [11:8]
+ *
+ * D18F4x1B4 [System Fabric ID Mask 1]
+ * DF4 SocketIdShift [11:8]
+ * DF4p5 SocketIdShift [11:8]
+ */
+#define DF2_SOCKET_ID_SHIFT GENMASK(31, 28)
+#define DF3_SOCKET_ID_SHIFT GENMASK(9, 8)
+#define DF4_SOCKET_ID_SHIFT GENMASK(11, 8)
diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c
new file mode 100644
index 000000000000..701349e84942
--- /dev/null
+++ b/drivers/ras/amd/atl/system.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * system.c : Functions to read and save system-wide data
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include "internal.h"
+
+int determine_node_id(struct addr_ctx *ctx, u8 socket_id, u8 die_id)
+{
+ u16 socket_id_bits, die_id_bits;
+
+ if (socket_id > 0 && df_cfg.socket_id_mask == 0) {
+ atl_debug(ctx, "Invalid socket inputs: socket_id=%u socket_id_mask=0x%x",
+ socket_id, df_cfg.socket_id_mask);
+ return -EINVAL;
+ }
+
+ /* Do each step independently to avoid shift out-of-bounds issues. */
+ socket_id_bits = socket_id;
+ socket_id_bits <<= df_cfg.socket_id_shift;
+ socket_id_bits &= df_cfg.socket_id_mask;
+
+ if (die_id > 0 && df_cfg.die_id_mask == 0) {
+ atl_debug(ctx, "Invalid die inputs: die_id=%u die_id_mask=0x%x",
+ die_id, df_cfg.die_id_mask);
+ return -EINVAL;
+ }
+
+ /* Do each step independently to avoid shift out-of-bounds issues. */
+ die_id_bits = die_id;
+ die_id_bits <<= df_cfg.die_id_shift;
+ die_id_bits &= df_cfg.die_id_mask;
+
+ ctx->node_id = (socket_id_bits | die_id_bits) >> df_cfg.node_id_shift;
+ return 0;
+}
+
+static void df2_get_masks_shifts(u32 mask0)
+{
+ df_cfg.socket_id_shift = FIELD_GET(DF2_SOCKET_ID_SHIFT, mask0);
+ df_cfg.socket_id_mask = FIELD_GET(DF2_SOCKET_ID_MASK, mask0);
+ df_cfg.die_id_shift = FIELD_GET(DF2_DIE_ID_SHIFT, mask0);
+ df_cfg.die_id_mask = FIELD_GET(DF2_DIE_ID_MASK, mask0);
+ df_cfg.node_id_shift = df_cfg.die_id_shift;
+ df_cfg.node_id_mask = df_cfg.socket_id_mask | df_cfg.die_id_mask;
+ df_cfg.component_id_mask = ~df_cfg.node_id_mask;
+}
+
+static void df3_get_masks_shifts(u32 mask0, u32 mask1)
+{
+ df_cfg.component_id_mask = FIELD_GET(DF3_COMPONENT_ID_MASK, mask0);
+ df_cfg.node_id_mask = FIELD_GET(DF3_NODE_ID_MASK, mask0);
+
+ df_cfg.node_id_shift = FIELD_GET(DF3_NODE_ID_SHIFT, mask1);
+ df_cfg.socket_id_shift = FIELD_GET(DF3_SOCKET_ID_SHIFT, mask1);
+ df_cfg.socket_id_mask = FIELD_GET(DF3_SOCKET_ID_MASK, mask1);
+ df_cfg.die_id_mask = FIELD_GET(DF3_DIE_ID_MASK, mask1);
+}
+
+static void df3p5_get_masks_shifts(u32 mask0, u32 mask1, u32 mask2)
+{
+ df_cfg.component_id_mask = FIELD_GET(DF4_COMPONENT_ID_MASK, mask0);
+ df_cfg.node_id_mask = FIELD_GET(DF4_NODE_ID_MASK, mask0);
+
+ df_cfg.node_id_shift = FIELD_GET(DF3_NODE_ID_SHIFT, mask1);
+ df_cfg.socket_id_shift = FIELD_GET(DF4_SOCKET_ID_SHIFT, mask1);
+
+ df_cfg.socket_id_mask = FIELD_GET(DF4_SOCKET_ID_MASK, mask2);
+ df_cfg.die_id_mask = FIELD_GET(DF4_DIE_ID_MASK, mask2);
+}
+
+static void df4_get_masks_shifts(u32 mask0, u32 mask1, u32 mask2)
+{
+ df3p5_get_masks_shifts(mask0, mask1, mask2);
+
+ if (!(df_cfg.flags.socket_id_shift_quirk && df_cfg.socket_id_shift == 1))
+ return;
+
+ df_cfg.socket_id_shift = 0;
+ df_cfg.socket_id_mask = 1;
+ df_cfg.die_id_shift = 0;
+ df_cfg.die_id_mask = 0;
+ df_cfg.node_id_shift = 8;
+ df_cfg.node_id_mask = 0x100;
+}
+
+static int df4_get_fabric_id_mask_registers(void)
+{
+ u32 mask0, mask1, mask2;
+
+ /* Read D18F4x1B0 (SystemFabricIdMask0) */
+ if (df_indirect_read_broadcast(0, 4, 0x1B0, &mask0))
+ return -EINVAL;
+
+ /* Read D18F4x1B4 (SystemFabricIdMask1) */
+ if (df_indirect_read_broadcast(0, 4, 0x1B4, &mask1))
+ return -EINVAL;
+
+ /* Read D18F4x1B8 (SystemFabricIdMask2) */
+ if (df_indirect_read_broadcast(0, 4, 0x1B8, &mask2))
+ return -EINVAL;
+
+ df4_get_masks_shifts(mask0, mask1, mask2);
+ return 0;
+}
+
+static int df4_determine_df_rev(u32 reg)
+{
+ df_cfg.rev = FIELD_GET(DF_MINOR_REVISION, reg) < 5 ? DF4 : DF4p5;
+
+ /* Check for special cases or quirks based on Device/Vendor IDs.*/
+
+ /* Read D18F0x000 (DeviceVendorId0) */
+ if (df_indirect_read_broadcast(0, 0, 0, &reg))
+ return -EINVAL;
+
+ if (reg == DF_FUNC0_ID_ZEN4_SERVER)
+ df_cfg.flags.socket_id_shift_quirk = 1;
+
+ if (reg == DF_FUNC0_ID_MI300) {
+ df_cfg.flags.heterogeneous = 1;
+
+ if (get_addr_hash_mi300())
+ return -EINVAL;
+ }
+
+ return df4_get_fabric_id_mask_registers();
+}
+
+static int determine_df_rev_legacy(void)
+{
+ u32 fabric_id_mask0, fabric_id_mask1, fabric_id_mask2;
+
+ /*
+ * Check for DF3.5.
+ *
+ * Component ID Mask must be non-zero. Register D18F1x150 is
+ * reserved pre-DF3.5, so value will be Read-as-Zero.
+ */
+
+ /* Read D18F1x150 (SystemFabricIdMask0). */
+ if (df_indirect_read_broadcast(0, 1, 0x150, &fabric_id_mask0))
+ return -EINVAL;
+
+ if (FIELD_GET(DF4_COMPONENT_ID_MASK, fabric_id_mask0)) {
+ df_cfg.rev = DF3p5;
+
+ /* Read D18F1x154 (SystemFabricIdMask1) */
+ if (df_indirect_read_broadcast(0, 1, 0x154, &fabric_id_mask1))
+ return -EINVAL;
+
+ /* Read D18F1x158 (SystemFabricIdMask2) */
+ if (df_indirect_read_broadcast(0, 1, 0x158, &fabric_id_mask2))
+ return -EINVAL;
+
+ df3p5_get_masks_shifts(fabric_id_mask0, fabric_id_mask1, fabric_id_mask2);
+ return 0;
+ }
+
+ /*
+ * Check for DF3.
+ *
+ * Component ID Mask must be non-zero. Field is Read-as-Zero on DF2.
+ */
+
+ /* Read D18F1x208 (SystemFabricIdMask). */
+ if (df_indirect_read_broadcast(0, 1, 0x208, &fabric_id_mask0))
+ return -EINVAL;
+
+ if (FIELD_GET(DF3_COMPONENT_ID_MASK, fabric_id_mask0)) {
+ df_cfg.rev = DF3;
+
+ /* Read D18F1x20C (SystemFabricIdMask1) */
+ if (df_indirect_read_broadcast(0, 1, 0x20C, &fabric_id_mask1))
+ return -EINVAL;
+
+ df3_get_masks_shifts(fabric_id_mask0, fabric_id_mask1);
+ return 0;
+ }
+
+ /* Default to DF2. */
+ df_cfg.rev = DF2;
+ df2_get_masks_shifts(fabric_id_mask0);
+ return 0;
+}
+
+static int determine_df_rev(void)
+{
+ u32 reg;
+ u8 rev;
+
+ if (df_cfg.rev != UNKNOWN)
+ return 0;
+
+ /* Read D18F0x40 (FabricBlockInstanceCount). */
+ if (df_indirect_read_broadcast(0, 0, 0x40, &reg))
+ return -EINVAL;
+
+ /*
+ * Revision fields added for DF4 and later.
+ *
+ * Major revision of '0' is found pre-DF4. Field is Read-as-Zero.
+ */
+ rev = FIELD_GET(DF_MAJOR_REVISION, reg);
+ if (!rev)
+ return determine_df_rev_legacy();
+
+ /*
+ * Fail out for major revisions other than '4'.
+ *
+ * Explicit support should be added for newer systems to avoid issues.
+ */
+ if (rev == 4)
+ return df4_determine_df_rev(reg);
+
+ return -EINVAL;
+}
+
+static void get_num_maps(void)
+{
+ switch (df_cfg.rev) {
+ case DF2:
+ case DF3:
+ case DF3p5:
+ df_cfg.num_coh_st_maps = 2;
+ break;
+ case DF4:
+ case DF4p5:
+ df_cfg.num_coh_st_maps = 4;
+ break;
+ default:
+ atl_debug_on_bad_df_rev();
+ }
+}
+
+static void apply_node_id_shift(void)
+{
+ if (df_cfg.rev == DF2)
+ return;
+
+ df_cfg.die_id_shift = df_cfg.node_id_shift;
+ df_cfg.die_id_mask <<= df_cfg.node_id_shift;
+ df_cfg.socket_id_mask <<= df_cfg.node_id_shift;
+ df_cfg.socket_id_shift += df_cfg.node_id_shift;
+}
+
+static void dump_df_cfg(void)
+{
+ pr_debug("rev=0x%x", df_cfg.rev);
+
+ pr_debug("component_id_mask=0x%x", df_cfg.component_id_mask);
+ pr_debug("die_id_mask=0x%x", df_cfg.die_id_mask);
+ pr_debug("node_id_mask=0x%x", df_cfg.node_id_mask);
+ pr_debug("socket_id_mask=0x%x", df_cfg.socket_id_mask);
+
+ pr_debug("die_id_shift=0x%x", df_cfg.die_id_shift);
+ pr_debug("node_id_shift=0x%x", df_cfg.node_id_shift);
+ pr_debug("socket_id_shift=0x%x", df_cfg.socket_id_shift);
+
+ pr_debug("num_coh_st_maps=%u", df_cfg.num_coh_st_maps);
+
+ pr_debug("flags.legacy_ficaa=%u", df_cfg.flags.legacy_ficaa);
+ pr_debug("flags.socket_id_shift_quirk=%u", df_cfg.flags.socket_id_shift_quirk);
+}
+
+int get_df_system_info(void)
+{
+ if (determine_df_rev()) {
+ pr_warn("amd_atl: Failed to determine DF Revision");
+ df_cfg.rev = UNKNOWN;
+ return -EINVAL;
+ }
+
+ apply_node_id_shift();
+
+ get_num_maps();
+
+ dump_df_cfg();
+
+ return 0;
+}
diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
new file mode 100644
index 000000000000..59b6169093f7
--- /dev/null
+++ b/drivers/ras/amd/atl/umc.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * umc.c : Unified Memory Controller (UMC) topology helpers
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include "internal.h"
+
+/*
+ * MI300 has a fixed, model-specific mapping between a UMC instance and
+ * its related Data Fabric Coherent Station instance.
+ *
+ * The MCA_IPID_UMC[InstanceId] field holds a unique identifier for the
+ * UMC instance within a Node. Use this to find the appropriate Coherent
+ * Station ID.
+ *
+ * Redundant bits were removed from the map below.
+ */
+static const u16 umc_coh_st_map[32] = {
+ 0x393, 0x293, 0x193, 0x093,
+ 0x392, 0x292, 0x192, 0x092,
+ 0x391, 0x291, 0x191, 0x091,
+ 0x390, 0x290, 0x190, 0x090,
+ 0x793, 0x693, 0x593, 0x493,
+ 0x792, 0x692, 0x592, 0x492,
+ 0x791, 0x691, 0x591, 0x491,
+ 0x790, 0x690, 0x590, 0x490,
+};
+
+#define UMC_ID_MI300 GENMASK(23, 12)
+static u8 get_coh_st_inst_id_mi300(struct atl_err *err)
+{
+ u16 umc_id = FIELD_GET(UMC_ID_MI300, err->ipid);
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(umc_coh_st_map); i++) {
+ if (umc_id == umc_coh_st_map[i])
+ break;
+ }
+
+ WARN_ON_ONCE(i >= ARRAY_SIZE(umc_coh_st_map));
+
+ return i;
+}
+
+/* XOR the bits in @val. */
+static u16 bitwise_xor_bits(u16 val)
+{
+ u16 tmp = 0;
+ u8 i;
+
+ for (i = 0; i < 16; i++)
+ tmp ^= (val >> i) & 0x1;
+
+ return tmp;
+}
+
+struct xor_bits {
+ bool xor_enable;
+ u16 col_xor;
+ u32 row_xor;
+};
+
+#define NUM_BANK_BITS 4
+
+static struct {
+ /* UMC::CH::AddrHashBank */
+ struct xor_bits bank[NUM_BANK_BITS];
+
+ /* UMC::CH::AddrHashPC */
+ struct xor_bits pc;
+
+ /* UMC::CH::AddrHashPC2 */
+ u8 bank_xor;
+} addr_hash;
+
+#define MI300_UMC_CH_BASE 0x90000
+#define MI300_ADDR_HASH_BANK0 (MI300_UMC_CH_BASE + 0xC8)
+#define MI300_ADDR_HASH_PC (MI300_UMC_CH_BASE + 0xE0)
+#define MI300_ADDR_HASH_PC2 (MI300_UMC_CH_BASE + 0xE4)
+
+#define ADDR_HASH_XOR_EN BIT(0)
+#define ADDR_HASH_COL_XOR GENMASK(13, 1)
+#define ADDR_HASH_ROW_XOR GENMASK(31, 14)
+#define ADDR_HASH_BANK_XOR GENMASK(5, 0)
+
+/*
+ * Read UMC::CH::AddrHash{Bank,PC,PC2} registers to get XOR bits used
+ * for hashing. Do this during module init, since the values will not
+ * change during run time.
+ *
+ * These registers are instantiated for each UMC across each AMD Node.
+ * However, they should be identically programmed due to the fixed hardware
+ * design of MI300 systems. So read the values from Node 0 UMC 0 and keep a
+ * single global structure for simplicity.
+ */
+int get_addr_hash_mi300(void)
+{
+ u32 temp;
+ int ret;
+ u8 i;
+
+ for (i = 0; i < NUM_BANK_BITS; i++) {
+ ret = amd_smn_read(0, MI300_ADDR_HASH_BANK0 + (i * 4), &temp);
+ if (ret)
+ return ret;
+
+ addr_hash.bank[i].xor_enable = FIELD_GET(ADDR_HASH_XOR_EN, temp);
+ addr_hash.bank[i].col_xor = FIELD_GET(ADDR_HASH_COL_XOR, temp);
+ addr_hash.bank[i].row_xor = FIELD_GET(ADDR_HASH_ROW_XOR, temp);
+ }
+
+ ret = amd_smn_read(0, MI300_ADDR_HASH_PC, &temp);
+ if (ret)
+ return ret;
+
+ addr_hash.pc.xor_enable = FIELD_GET(ADDR_HASH_XOR_EN, temp);
+ addr_hash.pc.col_xor = FIELD_GET(ADDR_HASH_COL_XOR, temp);
+ addr_hash.pc.row_xor = FIELD_GET(ADDR_HASH_ROW_XOR, temp);
+
+ ret = amd_smn_read(0, MI300_ADDR_HASH_PC2, &temp);
+ if (ret)
+ return ret;
+
+ addr_hash.bank_xor = FIELD_GET(ADDR_HASH_BANK_XOR, temp);
+
+ return 0;
+}
+
+/*
+ * MI300 systems report a DRAM address in MCA_ADDR for DRAM ECC errors. This must
+ * be converted to the intermediate normalized address (NA) before translating to a
+ * system physical address.
+ *
+ * The DRAM address includes bank, row, and column. Also included are bits for
+ * pseudochannel (PC) and stack ID (SID).
+ *
+ * Abbreviations: (S)tack ID, (P)seudochannel, (R)ow, (B)ank, (C)olumn, (Z)ero
+ *
+ * The MCA address format is as follows:
+ * MCA_ADDR[27:0] = {S[1:0], P[0], R[14:0], B[3:0], C[4:0], Z[0]}
+ *
+ * The normalized address format is fixed in hardware and is as follows:
+ * NA[30:0] = {S[1:0], R[13:0], C4, B[1:0], B[3:2], C[3:2], P, C[1:0], Z[4:0]}
+ *
+ * Additionally, the PC and Bank bits may be hashed. This must be accounted for before
+ * reconstructing the normalized address.
+ */
+#define MI300_UMC_MCA_COL GENMASK(5, 1)
+#define MI300_UMC_MCA_BANK GENMASK(9, 6)
+#define MI300_UMC_MCA_ROW GENMASK(24, 10)
+#define MI300_UMC_MCA_PC BIT(25)
+#define MI300_UMC_MCA_SID GENMASK(27, 26)
+
+#define MI300_NA_COL_1_0 GENMASK(6, 5)
+#define MI300_NA_PC BIT(7)
+#define MI300_NA_COL_3_2 GENMASK(9, 8)
+#define MI300_NA_BANK_3_2 GENMASK(11, 10)
+#define MI300_NA_BANK_1_0 GENMASK(13, 12)
+#define MI300_NA_COL_4 BIT(14)
+#define MI300_NA_ROW GENMASK(28, 15)
+#define MI300_NA_SID GENMASK(30, 29)
+
+static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
+{
+ u16 i, col, row, bank, pc, sid, temp;
+
+ col = FIELD_GET(MI300_UMC_MCA_COL, addr);
+ bank = FIELD_GET(MI300_UMC_MCA_BANK, addr);
+ row = FIELD_GET(MI300_UMC_MCA_ROW, addr);
+ pc = FIELD_GET(MI300_UMC_MCA_PC, addr);
+ sid = FIELD_GET(MI300_UMC_MCA_SID, addr);
+
+ /* Calculate hash for each Bank bit. */
+ for (i = 0; i < NUM_BANK_BITS; i++) {
+ if (!addr_hash.bank[i].xor_enable)
+ continue;
+
+ temp = bitwise_xor_bits(col & addr_hash.bank[i].col_xor);
+ temp ^= bitwise_xor_bits(row & addr_hash.bank[i].row_xor);
+ bank ^= temp << i;
+ }
+
+ /* Calculate hash for PC bit. */
+ if (addr_hash.pc.xor_enable) {
+ /* Bits SID[1:0] act as Bank[6:5] for PC hash, so apply them here. */
+ bank |= sid << 5;
+
+ temp = bitwise_xor_bits(col & addr_hash.pc.col_xor);
+ temp ^= bitwise_xor_bits(row & addr_hash.pc.row_xor);
+ temp ^= bitwise_xor_bits(bank & addr_hash.bank_xor);
+ pc ^= temp;
+
+ /* Drop SID bits for the sake of debug printing later. */
+ bank &= 0x1F;
+ }
+
+ /* Reconstruct the normalized address starting with NA[4:0] = 0 */
+ addr = 0;
+
+ /* NA[6:5] = Column[1:0] */
+ temp = col & 0x3;
+ addr |= FIELD_PREP(MI300_NA_COL_1_0, temp);
+
+ /* NA[7] = PC */
+ addr |= FIELD_PREP(MI300_NA_PC, pc);
+
+ /* NA[9:8] = Column[3:2] */
+ temp = (col >> 2) & 0x3;
+ addr |= FIELD_PREP(MI300_NA_COL_3_2, temp);
+
+ /* NA[11:10] = Bank[3:2] */
+ temp = (bank >> 2) & 0x3;
+ addr |= FIELD_PREP(MI300_NA_BANK_3_2, temp);
+
+ /* NA[13:12] = Bank[1:0] */
+ temp = bank & 0x3;
+ addr |= FIELD_PREP(MI300_NA_BANK_1_0, temp);
+
+ /* NA[14] = Column[4] */
+ temp = (col >> 4) & 0x1;
+ addr |= FIELD_PREP(MI300_NA_COL_4, temp);
+
+ /* NA[28:15] = Row[13:0] */
+ addr |= FIELD_PREP(MI300_NA_ROW, row);
+
+ /* NA[30:29] = SID[1:0] */
+ addr |= FIELD_PREP(MI300_NA_SID, sid);
+
+ pr_debug("Addr=0x%016lx", addr);
+ pr_debug("Bank=%u Row=%u Column=%u PC=%u SID=%u", bank, row, col, pc, sid);
+
+ return addr;
+}
+
+/*
+ * When a DRAM ECC error occurs on MI300 systems, it is recommended to retire
+ * all memory within that DRAM row. This applies to the memory with a DRAM
+ * bank.
+ *
+ * To find the memory addresses, loop through permutations of the DRAM column
+ * bits and find the System Physical address of each. The column bits are used
+ * to calculate the intermediate Normalized address, so all permutations should
+ * be checked.
+ *
+ * See amd_atl::convert_dram_to_norm_addr_mi300() for MI300 address formats.
+ */
+#define MI300_NUM_COL BIT(HWEIGHT(MI300_UMC_MCA_COL))
+static void retire_row_mi300(struct atl_err *a_err)
+{
+ unsigned long addr;
+ struct page *p;
+ u8 col;
+
+ for (col = 0; col < MI300_NUM_COL; col++) {
+ a_err->addr &= ~MI300_UMC_MCA_COL;
+ a_err->addr |= FIELD_PREP(MI300_UMC_MCA_COL, col);
+
+ addr = amd_convert_umc_mca_addr_to_sys_addr(a_err);
+ if (IS_ERR_VALUE(addr))
+ continue;
+
+ addr = PHYS_PFN(addr);
+
+ /*
+ * Skip invalid or already poisoned pages to avoid unnecessary
+ * error messages from memory_failure().
+ */
+ p = pfn_to_online_page(addr);
+ if (!p)
+ continue;
+
+ if (PageHWPoison(p))
+ continue;
+
+ memory_failure(addr, 0);
+ }
+}
+
+void amd_retire_dram_row(struct atl_err *a_err)
+{
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ return retire_row_mi300(a_err);
+}
+EXPORT_SYMBOL_GPL(amd_retire_dram_row);
+
+static unsigned long get_addr(unsigned long addr)
+{
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ return convert_dram_to_norm_addr_mi300(addr);
+
+ return addr;
+}
+
+#define MCA_IPID_INST_ID_HI GENMASK_ULL(47, 44)
+static u8 get_die_id(struct atl_err *err)
+{
+ /*
+ * AMD Node ID is provided in MCA_IPID[InstanceIdHi], and this
+ * needs to be divided by 4 to get the internal Die ID.
+ */
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous) {
+ u8 node_id = FIELD_GET(MCA_IPID_INST_ID_HI, err->ipid);
+
+ return node_id >> 2;
+ }
+
+ /*
+ * For CPUs, this is the AMD Node ID modulo the number
+ * of AMD Nodes per socket.
+ */
+ return topology_amd_node_id(err->cpu) % topology_amd_nodes_per_pkg();
+}
+
+#define UMC_CHANNEL_NUM GENMASK(31, 20)
+static u8 get_coh_st_inst_id(struct atl_err *err)
+{
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ return get_coh_st_inst_id_mi300(err);
+
+ return FIELD_GET(UMC_CHANNEL_NUM, err->ipid);
+}
+
+unsigned long convert_umc_mca_addr_to_sys_addr(struct atl_err *err)
+{
+ u8 socket_id = topology_physical_package_id(err->cpu);
+ u8 coh_st_inst_id = get_coh_st_inst_id(err);
+ unsigned long addr = get_addr(err->addr);
+ u8 die_id = get_die_id(err);
+
+ pr_debug("socket_id=0x%x die_id=0x%x coh_st_inst_id=0x%x addr=0x%016lx",
+ socket_id, die_id, coh_st_inst_id, addr);
+
+ return norm_to_sys_addr(socket_id, die_id, coh_st_inst_id, addr);
+}
diff --git a/drivers/ras/amd/fmpm.c b/drivers/ras/amd/fmpm.c
new file mode 100644
index 000000000000..2f4ac9591c8f
--- /dev/null
+++ b/drivers/ras/amd/fmpm.c
@@ -0,0 +1,1013 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * FRU (Field-Replaceable Unit) Memory Poison Manager
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors:
+ * Naveen Krishna Chatradhi <naveenkrishna.chatradhi@amd.com>
+ * Muralidhara M K <muralidhara.mk@amd.com>
+ * Yazen Ghannam <Yazen.Ghannam@amd.com>
+ *
+ * Implementation notes, assumptions, and limitations:
+ *
+ * - FRU memory poison section and memory poison descriptor definitions are not yet
+ * included in the UEFI specification. So they are defined here. Afterwards, they
+ * may be moved to linux/cper.h, if appropriate.
+ *
+ * - Platforms based on AMD MI300 systems will be the first to use these structures.
+ * There are a number of assumptions made here that will need to be generalized
+ * to support other platforms.
+ *
+ * AMD MI300-based platform(s) assumptions:
+ * - Memory errors are reported through x86 MCA.
+ * - The entire DRAM row containing a memory error should be retired.
+ * - There will be (1) FRU memory poison section per CPER.
+ * - The FRU will be the CPU package (processor socket).
+ * - The default number of memory poison descriptor entries should be (8).
+ * - The platform will use ACPI ERST for persistent storage.
+ * - All FRU records should be saved to persistent storage. Module init will
+ * fail if any FRU record is not successfully written.
+ *
+ * - Boot time memory retirement may occur later than ideal due to dependencies
+ * on other libraries and drivers. This leaves a gap where bad memory may be
+ * accessed during early boot stages.
+ *
+ * - Enough memory should be pre-allocated for each FRU record to be able to hold
+ * the expected number of descriptor entries. This, mostly empty, record is
+ * written to storage during init time. Subsequent writes to the same record
+ * should allow the Platform to update the stored record in-place. Otherwise,
+ * if the record is extended, then the Platform may need to perform costly memory
+ * management operations on the storage. For example, the Platform may spend time
+ * in Firmware copying and invalidating memory on a relatively slow SPI ROM.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cper.h>
+#include <linux/ras.h>
+#include <linux/cpu.h>
+
+#include <acpi/apei.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/mce.h>
+
+#include "../debugfs.h"
+
+#define INVALID_CPU UINT_MAX
+
+/* Validation Bits */
+#define FMP_VALID_ARCH_TYPE BIT_ULL(0)
+#define FMP_VALID_ARCH BIT_ULL(1)
+#define FMP_VALID_ID_TYPE BIT_ULL(2)
+#define FMP_VALID_ID BIT_ULL(3)
+#define FMP_VALID_LIST_ENTRIES BIT_ULL(4)
+#define FMP_VALID_LIST BIT_ULL(5)
+
+/* FRU Architecture Types */
+#define FMP_ARCH_TYPE_X86_CPUID_1_EAX 0
+
+/* FRU ID Types */
+#define FMP_ID_TYPE_X86_PPIN 0
+
+/* FRU Memory Poison Section */
+struct cper_sec_fru_mem_poison {
+ u32 checksum;
+ u64 validation_bits;
+ u32 fru_arch_type;
+ u64 fru_arch;
+ u32 fru_id_type;
+ u64 fru_id;
+ u32 nr_entries;
+} __packed;
+
+/* FRU Descriptor ID Types */
+#define FPD_HW_ID_TYPE_MCA_IPID 0
+
+/* FRU Descriptor Address Types */
+#define FPD_ADDR_TYPE_MCA_ADDR 0
+
+/* Memory Poison Descriptor */
+struct cper_fru_poison_desc {
+ u64 timestamp;
+ u32 hw_id_type;
+ u64 hw_id;
+ u32 addr_type;
+ u64 addr;
+} __packed;
+
+/* Collection of headers and sections for easy pointer use. */
+struct fru_rec {
+ struct cper_record_header hdr;
+ struct cper_section_descriptor sec_desc;
+ struct cper_sec_fru_mem_poison fmp;
+ struct cper_fru_poison_desc entries[];
+} __packed;
+
+/*
+ * Pointers to the complete CPER record of each FRU.
+ *
+ * Memory allocation will include padded space for descriptor entries.
+ */
+static struct fru_rec **fru_records;
+
+/* system physical addresses array */
+static u64 *spa_entries;
+
+#define INVALID_SPA ~0ULL
+
+static struct dentry *fmpm_dfs_dir;
+static struct dentry *fmpm_dfs_entries;
+
+#define CPER_CREATOR_FMP \
+ GUID_INIT(0xcd5c2993, 0xf4b2, 0x41b2, 0xb5, 0xd4, 0xf9, 0xc3, \
+ 0xa0, 0x33, 0x08, 0x75)
+
+#define CPER_SECTION_TYPE_FMP \
+ GUID_INIT(0x5e4706c1, 0x5356, 0x48c6, 0x93, 0x0b, 0x52, 0xf2, \
+ 0x12, 0x0a, 0x44, 0x58)
+
+/**
+ * DOC: max_nr_entries (byte)
+ * Maximum number of descriptor entries possible for each FRU.
+ *
+ * Values between '1' and '255' are valid.
+ * No input or '0' will default to FMPM_DEFAULT_MAX_NR_ENTRIES.
+ */
+static u8 max_nr_entries;
+module_param(max_nr_entries, byte, 0644);
+MODULE_PARM_DESC(max_nr_entries,
+ "Maximum number of memory poison descriptor entries per FRU");
+
+#define FMPM_DEFAULT_MAX_NR_ENTRIES 8
+
+/* Maximum number of FRUs in the system. */
+#define FMPM_MAX_NR_FRU 256
+static unsigned int max_nr_fru;
+
+/* Total length of record including headers and list of descriptor entries. */
+static size_t max_rec_len;
+
+/* Total number of SPA entries across all FRUs. */
+static unsigned int spa_nr_entries;
+
+/*
+ * Protect the local records cache in fru_records and prevent concurrent
+ * writes to storage. This is only needed after init once notifier block
+ * registration is done.
+ *
+ * The majority of a record is fixed at module init and will not change
+ * during run time. The entries within a record will be updated as new
+ * errors are reported. The mutex should be held whenever the entries are
+ * accessed during run time.
+ */
+static DEFINE_MUTEX(fmpm_update_mutex);
+
+#define for_each_fru(i, rec) \
+ for (i = 0; rec = fru_records[i], i < max_nr_fru; i++)
+
+static inline u32 get_fmp_len(struct fru_rec *rec)
+{
+ return rec->sec_desc.section_length - sizeof(struct cper_section_descriptor);
+}
+
+static struct fru_rec *get_fru_record(u64 fru_id)
+{
+ struct fru_rec *rec;
+ unsigned int i;
+
+ for_each_fru(i, rec) {
+ if (rec->fmp.fru_id == fru_id)
+ return rec;
+ }
+
+ pr_debug("Record not found for FRU 0x%016llx\n", fru_id);
+
+ return NULL;
+}
+
+/*
+ * Sum up all bytes within the FRU Memory Poison Section including the Memory
+ * Poison Descriptor entries.
+ *
+ * Don't include the old checksum here. It's a u32 value, so summing each of its
+ * bytes will give the wrong total.
+ */
+static u32 do_fmp_checksum(struct cper_sec_fru_mem_poison *fmp, u32 len)
+{
+ u32 checksum = 0;
+ u8 *buf, *end;
+
+ /* Skip old checksum. */
+ buf = (u8 *)fmp + sizeof(u32);
+ end = buf + len;
+
+ while (buf < end)
+ checksum += (u8)(*(buf++));
+
+ return checksum;
+}
+
+static int update_record_on_storage(struct fru_rec *rec)
+{
+ u32 len, checksum;
+ int ret;
+
+ /* Calculate a new checksum. */
+ len = get_fmp_len(rec);
+
+ /* Get the current total. */
+ checksum = do_fmp_checksum(&rec->fmp, len);
+
+ /* Use the complement value. */
+ rec->fmp.checksum = -checksum;
+
+ pr_debug("Writing to storage\n");
+
+ ret = erst_write(&rec->hdr);
+ if (ret) {
+ pr_warn("Storage update failed for FRU 0x%016llx\n", rec->fmp.fru_id);
+
+ if (ret == -ENOSPC)
+ pr_warn("Not enough space on storage\n");
+ }
+
+ return ret;
+}
+
+static bool rec_has_valid_entries(struct fru_rec *rec)
+{
+ if (!(rec->fmp.validation_bits & FMP_VALID_LIST_ENTRIES))
+ return false;
+
+ if (!(rec->fmp.validation_bits & FMP_VALID_LIST))
+ return false;
+
+ return true;
+}
+
+static bool fpds_equal(struct cper_fru_poison_desc *old, struct cper_fru_poison_desc *new)
+{
+ /*
+ * Ignore timestamp field.
+ * The same physical error may be reported multiple times due to stuck bits, etc.
+ *
+ * Also, order the checks from most->least likely to fail to shortcut the code.
+ */
+ if (old->addr != new->addr)
+ return false;
+
+ if (old->hw_id != new->hw_id)
+ return false;
+
+ if (old->addr_type != new->addr_type)
+ return false;
+
+ if (old->hw_id_type != new->hw_id_type)
+ return false;
+
+ return true;
+}
+
+static bool rec_has_fpd(struct fru_rec *rec, struct cper_fru_poison_desc *fpd)
+{
+ unsigned int i;
+
+ for (i = 0; i < rec->fmp.nr_entries; i++) {
+ struct cper_fru_poison_desc *fpd_i = &rec->entries[i];
+
+ if (fpds_equal(fpd_i, fpd)) {
+ pr_debug("Found duplicate record\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void save_spa(struct fru_rec *rec, unsigned int entry,
+ u64 addr, u64 id, unsigned int cpu)
+{
+ unsigned int i, fru_idx, spa_entry;
+ struct atl_err a_err;
+ unsigned long spa;
+
+ if (entry >= max_nr_entries) {
+ pr_warn_once("FRU descriptor entry %d out-of-bounds (max: %d)\n",
+ entry, max_nr_entries);
+ return;
+ }
+
+ /* spa_nr_entries is always multiple of max_nr_entries */
+ for (i = 0; i < spa_nr_entries; i += max_nr_entries) {
+ fru_idx = i / max_nr_entries;
+ if (fru_records[fru_idx] == rec)
+ break;
+ }
+
+ if (i >= spa_nr_entries) {
+ pr_warn_once("FRU record %d not found\n", i);
+ return;
+ }
+
+ spa_entry = i + entry;
+ if (spa_entry >= spa_nr_entries) {
+ pr_warn_once("spa_entries[] index out-of-bounds\n");
+ return;
+ }
+
+ memset(&a_err, 0, sizeof(struct atl_err));
+
+ a_err.addr = addr;
+ a_err.ipid = id;
+ a_err.cpu = cpu;
+
+ spa = amd_convert_umc_mca_addr_to_sys_addr(&a_err);
+ if (IS_ERR_VALUE(spa)) {
+ pr_debug("Failed to get system address\n");
+ return;
+ }
+
+ spa_entries[spa_entry] = spa;
+ pr_debug("fru_idx: %u, entry: %u, spa_entry: %u, spa: 0x%016llx\n",
+ fru_idx, entry, spa_entry, spa_entries[spa_entry]);
+}
+
+static void update_fru_record(struct fru_rec *rec, struct mce *m)
+{
+ struct cper_sec_fru_mem_poison *fmp = &rec->fmp;
+ struct cper_fru_poison_desc fpd, *fpd_dest;
+ u32 entry = 0;
+
+ mutex_lock(&fmpm_update_mutex);
+
+ memset(&fpd, 0, sizeof(struct cper_fru_poison_desc));
+
+ fpd.timestamp = m->time;
+ fpd.hw_id_type = FPD_HW_ID_TYPE_MCA_IPID;
+ fpd.hw_id = m->ipid;
+ fpd.addr_type = FPD_ADDR_TYPE_MCA_ADDR;
+ fpd.addr = m->addr;
+
+ /* This is the first entry, so just save it. */
+ if (!rec_has_valid_entries(rec))
+ goto save_fpd;
+
+ /* Ignore already recorded errors. */
+ if (rec_has_fpd(rec, &fpd))
+ goto out_unlock;
+
+ if (rec->fmp.nr_entries >= max_nr_entries) {
+ pr_warn("Exceeded number of entries for FRU 0x%016llx\n", rec->fmp.fru_id);
+ goto out_unlock;
+ }
+
+ entry = fmp->nr_entries;
+
+save_fpd:
+ save_spa(rec, entry, m->addr, m->ipid, m->extcpu);
+ fpd_dest = &rec->entries[entry];
+ memcpy(fpd_dest, &fpd, sizeof(struct cper_fru_poison_desc));
+
+ fmp->nr_entries = entry + 1;
+ fmp->validation_bits |= FMP_VALID_LIST_ENTRIES;
+ fmp->validation_bits |= FMP_VALID_LIST;
+
+ pr_debug("Updated FRU 0x%016llx entry #%u\n", fmp->fru_id, entry);
+
+ update_record_on_storage(rec);
+
+out_unlock:
+ mutex_unlock(&fmpm_update_mutex);
+}
+
+static void retire_dram_row(u64 addr, u64 id, u32 cpu)
+{
+ struct atl_err a_err;
+
+ memset(&a_err, 0, sizeof(struct atl_err));
+
+ a_err.addr = addr;
+ a_err.ipid = id;
+ a_err.cpu = cpu;
+
+ amd_retire_dram_row(&a_err);
+}
+
+static int fru_handle_mem_poison(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct mce *m = (struct mce *)data;
+ struct fru_rec *rec;
+
+ if (!mce_is_memory_error(m))
+ return NOTIFY_DONE;
+
+ retire_dram_row(m->addr, m->ipid, m->extcpu);
+
+ /*
+ * An invalid FRU ID should not happen on real errors. But it
+ * could happen from software error injection, etc.
+ */
+ rec = get_fru_record(m->ppin);
+ if (!rec)
+ return NOTIFY_DONE;
+
+ update_fru_record(rec, m);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fru_mem_poison_nb = {
+ .notifier_call = fru_handle_mem_poison,
+ .priority = MCE_PRIO_LOWEST,
+};
+
+static void retire_mem_fmp(struct fru_rec *rec)
+{
+ struct cper_sec_fru_mem_poison *fmp = &rec->fmp;
+ unsigned int i, cpu;
+
+ for (i = 0; i < fmp->nr_entries; i++) {
+ struct cper_fru_poison_desc *fpd = &rec->entries[i];
+ unsigned int err_cpu = INVALID_CPU;
+
+ if (fpd->hw_id_type != FPD_HW_ID_TYPE_MCA_IPID)
+ continue;
+
+ if (fpd->addr_type != FPD_ADDR_TYPE_MCA_ADDR)
+ continue;
+
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ if (topology_ppin(cpu) == fmp->fru_id) {
+ err_cpu = cpu;
+ break;
+ }
+ }
+ cpus_read_unlock();
+
+ if (err_cpu == INVALID_CPU)
+ continue;
+
+ retire_dram_row(fpd->addr, fpd->hw_id, err_cpu);
+ save_spa(rec, i, fpd->addr, fpd->hw_id, err_cpu);
+ }
+}
+
+static void retire_mem_records(void)
+{
+ struct fru_rec *rec;
+ unsigned int i;
+
+ for_each_fru(i, rec) {
+ if (!rec_has_valid_entries(rec))
+ continue;
+
+ retire_mem_fmp(rec);
+ }
+}
+
+/* Set the CPER Record Header and CPER Section Descriptor fields. */
+static void set_rec_fields(struct fru_rec *rec)
+{
+ struct cper_section_descriptor *sec_desc = &rec->sec_desc;
+ struct cper_record_header *hdr = &rec->hdr;
+
+ memcpy(hdr->signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
+ hdr->revision = CPER_RECORD_REV;
+ hdr->signature_end = CPER_SIG_END;
+
+ /*
+ * Currently, it is assumed that there is one FRU Memory Poison
+ * section per CPER. But this may change for other implementations.
+ */
+ hdr->section_count = 1;
+
+ /* The logged errors are recoverable. Otherwise, they'd never make it here. */
+ hdr->error_severity = CPER_SEV_RECOVERABLE;
+
+ hdr->validation_bits = 0;
+ hdr->record_length = max_rec_len;
+ hdr->creator_id = CPER_CREATOR_FMP;
+ hdr->notification_type = CPER_NOTIFY_MCE;
+ hdr->record_id = cper_next_record_id();
+ hdr->flags = CPER_HW_ERROR_FLAGS_PREVERR;
+
+ sec_desc->section_offset = sizeof(struct cper_record_header);
+ sec_desc->section_length = max_rec_len - sizeof(struct cper_record_header);
+ sec_desc->revision = CPER_SEC_REV;
+ sec_desc->validation_bits = 0;
+ sec_desc->flags = CPER_SEC_PRIMARY;
+ sec_desc->section_type = CPER_SECTION_TYPE_FMP;
+ sec_desc->section_severity = CPER_SEV_RECOVERABLE;
+}
+
+static int save_new_records(void)
+{
+ DECLARE_BITMAP(new_records, FMPM_MAX_NR_FRU);
+ struct fru_rec *rec;
+ unsigned int i;
+ int ret = 0;
+
+ for_each_fru(i, rec) {
+ if (rec->hdr.record_length)
+ continue;
+
+ set_rec_fields(rec);
+
+ ret = update_record_on_storage(rec);
+ if (ret)
+ goto out_clear;
+
+ set_bit(i, new_records);
+ }
+
+ return ret;
+
+out_clear:
+ for_each_fru(i, rec) {
+ if (!test_bit(i, new_records))
+ continue;
+
+ erst_clear(rec->hdr.record_id);
+ }
+
+ return ret;
+}
+
+/* Check that the record matches expected types for the current system.*/
+static bool fmp_is_usable(struct fru_rec *rec)
+{
+ struct cper_sec_fru_mem_poison *fmp = &rec->fmp;
+ u64 cpuid;
+
+ pr_debug("Validation bits: 0x%016llx\n", fmp->validation_bits);
+
+ if (!(fmp->validation_bits & FMP_VALID_ARCH_TYPE)) {
+ pr_debug("Arch type unknown\n");
+ return false;
+ }
+
+ if (fmp->fru_arch_type != FMP_ARCH_TYPE_X86_CPUID_1_EAX) {
+ pr_debug("Arch type not 'x86 Family/Model/Stepping'\n");
+ return false;
+ }
+
+ if (!(fmp->validation_bits & FMP_VALID_ARCH)) {
+ pr_debug("Arch value unknown\n");
+ return false;
+ }
+
+ cpuid = cpuid_eax(1);
+ if (fmp->fru_arch != cpuid) {
+ pr_debug("Arch value mismatch: record = 0x%016llx, system = 0x%016llx\n",
+ fmp->fru_arch, cpuid);
+ return false;
+ }
+
+ if (!(fmp->validation_bits & FMP_VALID_ID_TYPE)) {
+ pr_debug("FRU ID type unknown\n");
+ return false;
+ }
+
+ if (fmp->fru_id_type != FMP_ID_TYPE_X86_PPIN) {
+ pr_debug("FRU ID type is not 'x86 PPIN'\n");
+ return false;
+ }
+
+ if (!(fmp->validation_bits & FMP_VALID_ID)) {
+ pr_debug("FRU ID value unknown\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool fmp_is_valid(struct fru_rec *rec)
+{
+ struct cper_sec_fru_mem_poison *fmp = &rec->fmp;
+ u32 checksum, len;
+
+ len = get_fmp_len(rec);
+ if (len < sizeof(struct cper_sec_fru_mem_poison)) {
+ pr_debug("fmp length is too small\n");
+ return false;
+ }
+
+ /* Checksum must sum to zero for the entire section. */
+ checksum = do_fmp_checksum(fmp, len) + fmp->checksum;
+ if (checksum) {
+ pr_debug("fmp checksum failed: sum = 0x%x\n", checksum);
+ print_hex_dump_debug("fmp record: ", DUMP_PREFIX_NONE, 16, 1, fmp, len, false);
+ return false;
+ }
+
+ if (!fmp_is_usable(rec))
+ return false;
+
+ return true;
+}
+
+static struct fru_rec *get_valid_record(struct fru_rec *old)
+{
+ struct fru_rec *new;
+
+ if (!fmp_is_valid(old)) {
+ pr_debug("Ignoring invalid record\n");
+ return NULL;
+ }
+
+ new = get_fru_record(old->fmp.fru_id);
+ if (!new)
+ pr_debug("Ignoring record for absent FRU\n");
+
+ return new;
+}
+
+/*
+ * Fetch saved records from persistent storage.
+ *
+ * For each found record:
+ * - If it was not created by this module, then ignore it.
+ * - If it is valid, then copy its data to the local cache.
+ * - If it is not valid, then erase it.
+ */
+static int get_saved_records(void)
+{
+ struct fru_rec *old, *new;
+ u64 record_id;
+ int ret, pos;
+ ssize_t len;
+
+ /*
+ * Assume saved records match current max size.
+ *
+ * However, this may not be true depending on module parameters.
+ */
+ old = kmalloc(max_rec_len, GFP_KERNEL);
+ if (!old) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = erst_get_record_id_begin(&pos);
+ if (ret < 0)
+ goto out_end;
+
+ while (!erst_get_record_id_next(&pos, &record_id)) {
+ if (record_id == APEI_ERST_INVALID_RECORD_ID)
+ goto out_end;
+ /*
+ * Make sure to clear temporary buffer between reads to avoid
+ * leftover data from records of various sizes.
+ */
+ memset(old, 0, max_rec_len);
+
+ len = erst_read_record(record_id, &old->hdr, max_rec_len,
+ sizeof(struct fru_rec), &CPER_CREATOR_FMP);
+ if (len < 0)
+ continue;
+
+ if (len > max_rec_len) {
+ pr_debug("Found record larger than max_rec_len\n");
+ continue;
+ }
+
+ new = get_valid_record(old);
+ if (!new)
+ erst_clear(record_id);
+
+ /* Restore the record */
+ memcpy(new, old, len);
+ }
+
+out_end:
+ erst_get_record_id_end();
+ kfree(old);
+out:
+ return ret;
+}
+
+static void set_fmp_fields(struct fru_rec *rec, unsigned int cpu)
+{
+ struct cper_sec_fru_mem_poison *fmp = &rec->fmp;
+
+ fmp->fru_arch_type = FMP_ARCH_TYPE_X86_CPUID_1_EAX;
+ fmp->validation_bits |= FMP_VALID_ARCH_TYPE;
+
+ /* Assume all CPUs in the system have the same value for now. */
+ fmp->fru_arch = cpuid_eax(1);
+ fmp->validation_bits |= FMP_VALID_ARCH;
+
+ fmp->fru_id_type = FMP_ID_TYPE_X86_PPIN;
+ fmp->validation_bits |= FMP_VALID_ID_TYPE;
+
+ fmp->fru_id = topology_ppin(cpu);
+ fmp->validation_bits |= FMP_VALID_ID;
+}
+
+static int init_fmps(void)
+{
+ struct fru_rec *rec;
+ unsigned int i, cpu;
+ int ret = 0;
+
+ for_each_fru(i, rec) {
+ unsigned int fru_cpu = INVALID_CPU;
+
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ if (topology_physical_package_id(cpu) == i) {
+ fru_cpu = cpu;
+ break;
+ }
+ }
+ cpus_read_unlock();
+
+ if (fru_cpu == INVALID_CPU) {
+ pr_debug("Failed to find matching CPU for FRU #%u\n", i);
+ ret = -ENODEV;
+ break;
+ }
+
+ set_fmp_fields(rec, fru_cpu);
+ }
+
+ return ret;
+}
+
+static int get_system_info(void)
+{
+ /* Only load on MI300A systems for now. */
+ if (!(boot_cpu_data.x86_model >= 0x90 &&
+ boot_cpu_data.x86_model <= 0x9f))
+ return -ENODEV;
+
+ if (!cpu_feature_enabled(X86_FEATURE_AMD_PPIN)) {
+ pr_debug("PPIN feature not available\n");
+ return -ENODEV;
+ }
+
+ /* Use CPU socket as FRU for MI300 systems. */
+ max_nr_fru = topology_max_packages();
+ if (!max_nr_fru)
+ return -ENODEV;
+
+ if (max_nr_fru > FMPM_MAX_NR_FRU) {
+ pr_warn("Too many FRUs to manage: found: %u, max: %u\n",
+ max_nr_fru, FMPM_MAX_NR_FRU);
+ return -ENODEV;
+ }
+
+ if (!max_nr_entries)
+ max_nr_entries = FMPM_DEFAULT_MAX_NR_ENTRIES;
+
+ spa_nr_entries = max_nr_fru * max_nr_entries;
+
+ max_rec_len = sizeof(struct fru_rec);
+ max_rec_len += sizeof(struct cper_fru_poison_desc) * max_nr_entries;
+
+ pr_info("max FRUs: %u, max entries: %u, max record length: %lu\n",
+ max_nr_fru, max_nr_entries, max_rec_len);
+
+ return 0;
+}
+
+static void free_records(void)
+{
+ struct fru_rec *rec;
+ int i;
+
+ for_each_fru(i, rec)
+ kfree(rec);
+
+ kfree(fru_records);
+ kfree(spa_entries);
+}
+
+static int allocate_records(void)
+{
+ int i, ret = 0;
+
+ fru_records = kcalloc(max_nr_fru, sizeof(struct fru_rec *), GFP_KERNEL);
+ if (!fru_records) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < max_nr_fru; i++) {
+ fru_records[i] = kzalloc(max_rec_len, GFP_KERNEL);
+ if (!fru_records[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ }
+
+ spa_entries = kcalloc(spa_nr_entries, sizeof(u64), GFP_KERNEL);
+ if (!spa_entries) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ for (i = 0; i < spa_nr_entries; i++)
+ spa_entries[i] = INVALID_SPA;
+
+ return ret;
+
+out_free:
+ while (--i >= 0)
+ kfree(fru_records[i]);
+
+ kfree(fru_records);
+out:
+ return ret;
+}
+
+static void *fmpm_start(struct seq_file *f, loff_t *pos)
+{
+ if (*pos >= (spa_nr_entries + 1))
+ return NULL;
+ return pos;
+}
+
+static void *fmpm_next(struct seq_file *f, void *data, loff_t *pos)
+{
+ if (++(*pos) >= (spa_nr_entries + 1))
+ return NULL;
+ return pos;
+}
+
+static void fmpm_stop(struct seq_file *f, void *data)
+{
+}
+
+#define SHORT_WIDTH 8
+#define U64_WIDTH 18
+#define TIMESTAMP_WIDTH 19
+#define LONG_WIDTH 24
+#define U64_PAD (LONG_WIDTH - U64_WIDTH)
+#define TS_PAD (LONG_WIDTH - TIMESTAMP_WIDTH)
+static int fmpm_show(struct seq_file *f, void *data)
+{
+ unsigned int fru_idx, entry, spa_entry, line;
+ struct cper_fru_poison_desc *fpd;
+ struct fru_rec *rec;
+
+ line = *(loff_t *)data;
+ if (line == 0) {
+ seq_printf(f, "%-*s", SHORT_WIDTH, "fru_idx");
+ seq_printf(f, "%-*s", LONG_WIDTH, "fru_id");
+ seq_printf(f, "%-*s", SHORT_WIDTH, "entry");
+ seq_printf(f, "%-*s", LONG_WIDTH, "timestamp");
+ seq_printf(f, "%-*s", LONG_WIDTH, "hw_id");
+ seq_printf(f, "%-*s", LONG_WIDTH, "addr");
+ seq_printf(f, "%-*s", LONG_WIDTH, "spa");
+ goto out_newline;
+ }
+
+ spa_entry = line - 1;
+ fru_idx = spa_entry / max_nr_entries;
+ entry = spa_entry % max_nr_entries;
+
+ rec = fru_records[fru_idx];
+ if (!rec)
+ goto out;
+
+ seq_printf(f, "%-*u", SHORT_WIDTH, fru_idx);
+ seq_printf(f, "0x%016llx%-*s", rec->fmp.fru_id, U64_PAD, "");
+ seq_printf(f, "%-*u", SHORT_WIDTH, entry);
+
+ mutex_lock(&fmpm_update_mutex);
+
+ if (entry >= rec->fmp.nr_entries) {
+ seq_printf(f, "%-*s", LONG_WIDTH, "*");
+ seq_printf(f, "%-*s", LONG_WIDTH, "*");
+ seq_printf(f, "%-*s", LONG_WIDTH, "*");
+ seq_printf(f, "%-*s", LONG_WIDTH, "*");
+ goto out_unlock;
+ }
+
+ fpd = &rec->entries[entry];
+
+ seq_printf(f, "%ptT%-*s", &fpd->timestamp, TS_PAD, "");
+ seq_printf(f, "0x%016llx%-*s", fpd->hw_id, U64_PAD, "");
+ seq_printf(f, "0x%016llx%-*s", fpd->addr, U64_PAD, "");
+
+ if (spa_entries[spa_entry] == INVALID_SPA)
+ seq_printf(f, "%-*s", LONG_WIDTH, "*");
+ else
+ seq_printf(f, "0x%016llx%-*s", spa_entries[spa_entry], U64_PAD, "");
+
+out_unlock:
+ mutex_unlock(&fmpm_update_mutex);
+out_newline:
+ seq_putc(f, '\n');
+out:
+ return 0;
+}
+
+static const struct seq_operations fmpm_seq_ops = {
+ .start = fmpm_start,
+ .next = fmpm_next,
+ .stop = fmpm_stop,
+ .show = fmpm_show,
+};
+
+static int fmpm_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &fmpm_seq_ops);
+}
+
+static const struct file_operations fmpm_fops = {
+ .open = fmpm_open,
+ .release = seq_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void setup_debugfs(void)
+{
+ struct dentry *dfs = ras_get_debugfs_root();
+
+ if (!dfs)
+ return;
+
+ fmpm_dfs_dir = debugfs_create_dir("fmpm", dfs);
+ if (!fmpm_dfs_dir)
+ return;
+
+ fmpm_dfs_entries = debugfs_create_file("entries", 0400, fmpm_dfs_dir, NULL, &fmpm_fops);
+ if (!fmpm_dfs_entries)
+ debugfs_remove(fmpm_dfs_dir);
+}
+
+static const struct x86_cpu_id fmpm_cpuids[] = {
+ X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
+ { }
+};
+MODULE_DEVICE_TABLE(x86cpu, fmpm_cpuids);
+
+static int __init fru_mem_poison_init(void)
+{
+ int ret;
+
+ if (!x86_match_cpu(fmpm_cpuids)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (erst_disable) {
+ pr_debug("ERST not available\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = get_system_info();
+ if (ret)
+ goto out;
+
+ ret = allocate_records();
+ if (ret)
+ goto out;
+
+ ret = init_fmps();
+ if (ret)
+ goto out_free;
+
+ ret = get_saved_records();
+ if (ret)
+ goto out_free;
+
+ ret = save_new_records();
+ if (ret)
+ goto out_free;
+
+ setup_debugfs();
+
+ retire_mem_records();
+
+ mce_register_decode_chain(&fru_mem_poison_nb);
+
+ pr_info("FRU Memory Poison Manager initialized\n");
+ return 0;
+
+out_free:
+ free_records();
+out:
+ return ret;
+}
+
+static void __exit fru_mem_poison_exit(void)
+{
+ mce_unregister_decode_chain(&fru_mem_poison_nb);
+ debugfs_remove(fmpm_dfs_dir);
+ free_records();
+}
+
+module_init(fru_mem_poison_init);
+module_exit(fru_mem_poison_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FRU Memory Poison Manager");
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index 321af498ee11..e440b15fbabc 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -480,9 +480,15 @@ DEFINE_SHOW_ATTRIBUTE(array);
static int __init create_debugfs_nodes(void)
{
- struct dentry *d, *pfn, *decay, *count, *array;
+ struct dentry *d, *pfn, *decay, *count, *array, *dfs;
- d = debugfs_create_dir("cec", ras_debugfs_dir);
+ dfs = ras_get_debugfs_root();
+ if (!dfs) {
+ pr_warn("Error getting RAS debugfs root!\n");
+ return -1;
+ }
+
+ d = debugfs_create_dir("cec", dfs);
if (!d) {
pr_warn("Error creating cec debugfs node!\n");
return -1;
diff --git a/drivers/ras/debugfs.c b/drivers/ras/debugfs.c
index ffb973c328e3..42afd3de68b2 100644
--- a/drivers/ras/debugfs.c
+++ b/drivers/ras/debugfs.c
@@ -3,10 +3,16 @@
#include <linux/ras.h>
#include "debugfs.h"
-struct dentry *ras_debugfs_dir;
+static struct dentry *ras_debugfs_dir;
static atomic_t trace_count = ATOMIC_INIT(0);
+struct dentry *ras_get_debugfs_root(void)
+{
+ return ras_debugfs_dir;
+}
+EXPORT_SYMBOL_GPL(ras_get_debugfs_root);
+
int ras_userspace_consumers(void)
{
return atomic_read(&trace_count);
diff --git a/drivers/ras/debugfs.h b/drivers/ras/debugfs.h
index c07443b462ad..4749ccdeeba1 100644
--- a/drivers/ras/debugfs.h
+++ b/drivers/ras/debugfs.h
@@ -4,6 +4,6 @@
#include <linux/debugfs.h>
-extern struct dentry *ras_debugfs_dir;
+struct dentry *ras_get_debugfs_root(void);
#endif /* __RAS_DEBUGFS_H__ */
diff --git a/drivers/ras/ras.c b/drivers/ras/ras.c
index 95540ea8dd9d..a6e4792a1b2e 100644
--- a/drivers/ras/ras.c
+++ b/drivers/ras/ras.c
@@ -10,6 +10,37 @@
#include <linux/ras.h>
#include <linux/uuid.h>
+#if IS_ENABLED(CONFIG_AMD_ATL)
+/*
+ * Once set, this function pointer should never be unset.
+ *
+ * The library module will set this pointer if it successfully loads. The module
+ * should not be unloaded except for testing and debug purposes.
+ */
+static unsigned long (*amd_atl_umc_na_to_spa)(struct atl_err *err);
+
+void amd_atl_register_decoder(unsigned long (*f)(struct atl_err *))
+{
+ amd_atl_umc_na_to_spa = f;
+}
+EXPORT_SYMBOL_GPL(amd_atl_register_decoder);
+
+void amd_atl_unregister_decoder(void)
+{
+ amd_atl_umc_na_to_spa = NULL;
+}
+EXPORT_SYMBOL_GPL(amd_atl_unregister_decoder);
+
+unsigned long amd_convert_umc_mca_addr_to_sys_addr(struct atl_err *err)
+{
+ if (!amd_atl_umc_na_to_spa)
+ return -EINVAL;
+
+ return amd_atl_umc_na_to_spa(err);
+}
+EXPORT_SYMBOL_GPL(amd_convert_umc_mca_addr_to_sys_addr);
+#endif /* CONFIG_AMD_ATL */
+
#define CREATE_TRACE_POINTS
#define TRACE_INCLUDE_PATH ../../include/ras
#include <ras/ras_event.h>
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index a968dabb48f5..d019ca6dee9b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3932,7 +3932,6 @@ static int regulator_get_optimal_voltage(struct regulator_dev *rdev,
if (ret < 0)
return ret;
- possible_uV = desired_min_uV;
done = true;
goto finish;
@@ -5891,7 +5890,7 @@ static const struct dev_pm_ops __maybe_unused regulator_pm_ops = {
};
#endif
-struct class regulator_class = {
+const struct class regulator_class = {
.name = "regulator",
.dev_release = regulator_dev_release,
.dev_groups = regulator_dev_groups,
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 8fd9ac787588..352547c375bd 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
@@ -413,31 +412,35 @@ static struct da9055_regulator_info da9055_regulator_info[] = {
* GPIO can control regulator state and/or select the regulator register
* set A/B for voltage ramping.
*/
-static int da9055_gpio_init(struct da9055_regulator *regulator,
+static int da9055_gpio_init(struct device *dev,
+ struct da9055_regulator *regulator,
struct regulator_config *config,
struct da9055_pdata *pdata, int id)
{
struct da9055_regulator_info *info = regulator->info;
+ struct gpio_desc *ren;
+ struct gpio_desc *ena;
+ struct gpio_desc *rsel;
int ret = 0;
- if (!pdata)
- return 0;
+ /* Look for "regulator-enable-gpios" GPIOs in the regulator node */
+ ren = devm_gpiod_get_optional(dev, "regulator-enable", GPIOD_IN);
+ if (IS_ERR(ren))
+ return PTR_ERR(ren);
- if (pdata->gpio_ren && pdata->gpio_ren[id]) {
- char name[18];
- int gpio_mux = pdata->gpio_ren[id];
+ if (ren) {
+ /* This GPIO is not optional at this point */
+ ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(ena))
+ return PTR_ERR(ena);
- config->ena_gpiod = pdata->ena_gpiods[id];
+ config->ena_gpiod = ena;
/*
* GPI pin is muxed with regulator to control the
* regulator state.
*/
- sprintf(name, "DA9055 GPI %d", gpio_mux);
- ret = devm_gpio_request_one(config->dev, gpio_mux, GPIOF_DIR_IN,
- name);
- if (ret < 0)
- goto err;
+ gpiod_set_consumer_name(ren, "DA9055 ren GPI");
/*
* Let the regulator know that its state is controlled
@@ -448,24 +451,22 @@ static int da9055_gpio_init(struct da9055_regulator *regulator,
pdata->reg_ren[id]
<< DA9055_E_GPI_SHIFT);
if (ret < 0)
- goto err;
+ return ret;
}
- if (pdata->gpio_rsel && pdata->gpio_rsel[id]) {
- char name[18];
- int gpio_mux = pdata->gpio_rsel[id];
+ /* Look for "regulator-select-gpios" GPIOs in the regulator node */
+ rsel = devm_gpiod_get_optional(dev, "regulator-select", GPIOD_IN);
+ if (IS_ERR(rsel))
+ return PTR_ERR(rsel);
+ if (rsel) {
regulator->reg_rselect = pdata->reg_rsel[id];
/*
* GPI pin is muxed with regulator to select the
* regulator register set A/B for voltage ramping.
*/
- sprintf(name, "DA9055 GPI %d", gpio_mux);
- ret = devm_gpio_request_one(config->dev, gpio_mux, GPIOF_DIR_IN,
- name);
- if (ret < 0)
- goto err;
+ gpiod_set_consumer_name(rsel, "DA9055 rsel GPI");
/*
* Let the regulator know that its register set A/B
@@ -477,7 +478,6 @@ static int da9055_gpio_init(struct da9055_regulator *regulator,
<< DA9055_V_GPI_SHIFT);
}
-err:
return ret;
}
@@ -532,7 +532,7 @@ static int da9055_regulator_probe(struct platform_device *pdev)
if (pdata)
config.init_data = pdata->regulators[pdev->id];
- ret = da9055_gpio_init(regulator, &config, pdata, pdev->id);
+ ret = da9055_gpio_init(&pdev->dev, regulator, &config, pdata, pdev->id);
if (ret < 0)
return ret;
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index b551a400bdd1..5ee76b533576 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -14,7 +14,6 @@
// Copyright (C) 2020 Dialog Semiconductor
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
diff --git a/drivers/regulator/fixed-helper.c b/drivers/regulator/fixed-helper.c
index 0eb2442456f0..2d5a42b2b3d8 100644
--- a/drivers/regulator/fixed-helper.c
+++ b/drivers/regulator/fixed-helper.c
@@ -15,7 +15,7 @@ static void regulator_fixed_release(struct device *dev)
{
struct fixed_regulator_data *data = container_of(dev,
struct fixed_regulator_data, pdev.dev);
- kfree(data->cfg.supply_name);
+ kfree_const(data->cfg.supply_name);
kfree(data);
}
@@ -36,7 +36,7 @@ struct platform_device *regulator_register_always_on(int id, const char *name,
if (!data)
return NULL;
- data->cfg.supply_name = kstrdup(name, GFP_KERNEL);
+ data->cfg.supply_name = kstrdup_const(name, GFP_KERNEL);
if (!data->cfg.supply_name) {
kfree(data);
return NULL;
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index fb4433068d29..77a502141089 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -58,7 +58,7 @@ struct regulator {
struct dentry *debugfs;
};
-extern struct class regulator_class;
+extern const struct class regulator_class;
static inline struct regulator_dev *dev_to_rdev(struct device *dev)
{
diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c
index 8dfdd1db2070..84a134cfcd9c 100644
--- a/drivers/regulator/lp873x-regulator.c
+++ b/drivers/regulator/lp873x-regulator.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -93,7 +94,7 @@ static int lp873x_buck_set_ramp_delay(struct regulator_dev *rdev,
ret = regmap_update_bits(lp873->regmap, regulators[id].ctrl2_reg,
LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE,
- reg << __ffs(LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE));
+ FIELD_PREP(LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE, reg));
if (ret) {
dev_err(lp873->dev, "SLEW RATE write failed: %d\n", ret);
return ret;
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index 61ee5cf3f241..1259b5d20153 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -5,6 +5,7 @@
* Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -99,7 +100,7 @@ static int lp87565_buck_set_ramp_delay(struct regulator_dev *rdev,
ret = regmap_update_bits(rdev->regmap, regulators[id].ctrl2_reg,
LP87565_BUCK_CTRL_2_SLEW_RATE,
- reg << __ffs(LP87565_BUCK_CTRL_2_SLEW_RATE));
+ FIELD_PREP(LP87565_BUCK_CTRL_2_SLEW_RATE, reg));
if (ret) {
dev_err(&rdev->dev, "SLEW RATE write failed: %d\n", ret);
return ret;
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
index e97ade09dede..2ade249ab6df 100644
--- a/drivers/regulator/lp8788-buck.c
+++ b/drivers/regulator/lp8788-buck.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/mfd/lp8788.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
/* register address */
#define LP8788_EN_BUCK 0x0C
@@ -69,8 +69,8 @@
#define BUCK_FPWM_SHIFT(x) (x)
enum lp8788_dvs_state {
- DVS_LOW = GPIOF_OUT_INIT_LOW,
- DVS_HIGH = GPIOF_OUT_INIT_HIGH,
+ DVS_LOW = 0,
+ DVS_HIGH = 1,
};
enum lp8788_dvs_mode {
@@ -89,6 +89,8 @@ struct lp8788_buck {
struct lp8788 *lp;
struct regulator_dev *regulator;
void *dvs;
+ struct gpio_desc *gpio1;
+ struct gpio_desc *gpio2; /* Only used on BUCK2 */
};
/* BUCK 1 ~ 4 voltage ranges */
@@ -106,8 +108,7 @@ static void lp8788_buck1_set_dvs(struct lp8788_buck *buck)
return;
pinstate = dvs->vsel == DVS_SEL_V0 ? DVS_LOW : DVS_HIGH;
- if (gpio_is_valid(dvs->gpio))
- gpio_set_value(dvs->gpio, pinstate);
+ gpiod_set_value(buck->gpio1, pinstate);
}
static void lp8788_buck2_set_dvs(struct lp8788_buck *buck)
@@ -139,11 +140,8 @@ static void lp8788_buck2_set_dvs(struct lp8788_buck *buck)
return;
}
- if (gpio_is_valid(dvs->gpio[0]))
- gpio_set_value(dvs->gpio[0], pin1);
-
- if (gpio_is_valid(dvs->gpio[1]))
- gpio_set_value(dvs->gpio[1], pin2);
+ gpiod_set_value(buck->gpio1, pin1);
+ gpiod_set_value(buck->gpio2, pin2);
}
static void lp8788_set_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
@@ -202,19 +200,13 @@ static u8 lp8788_select_buck_vout_addr(struct lp8788_buck *buck,
enum lp8788_buck_id id)
{
enum lp8788_dvs_mode mode = lp8788_get_buck_dvs_ctrl_mode(buck, id);
- struct lp8788_buck1_dvs *b1_dvs;
- struct lp8788_buck2_dvs *b2_dvs;
u8 val, idx, addr;
int pin1, pin2;
switch (id) {
case BUCK1:
if (mode == EXTPIN) {
- b1_dvs = (struct lp8788_buck1_dvs *)buck->dvs;
- if (!b1_dvs)
- goto err;
-
- idx = gpio_get_value(b1_dvs->gpio) ? 1 : 0;
+ idx = gpiod_get_value(buck->gpio1);
} else {
lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
idx = (val & LP8788_BUCK1_DVS_M) >> LP8788_BUCK1_DVS_S;
@@ -223,12 +215,8 @@ static u8 lp8788_select_buck_vout_addr(struct lp8788_buck *buck,
break;
case BUCK2:
if (mode == EXTPIN) {
- b2_dvs = (struct lp8788_buck2_dvs *)buck->dvs;
- if (!b2_dvs)
- goto err;
-
- pin1 = gpio_get_value(b2_dvs->gpio[0]);
- pin2 = gpio_get_value(b2_dvs->gpio[1]);
+ pin1 = gpiod_get_value(buck->gpio1);
+ pin2 = gpiod_get_value(buck->gpio2);
if (pin1 == PIN_LOW && pin2 == PIN_LOW)
idx = 0;
@@ -424,28 +412,28 @@ static int lp8788_dvs_gpio_request(struct platform_device *pdev,
enum lp8788_buck_id id)
{
struct lp8788_platform_data *pdata = buck->lp->pdata;
- char *b1_name = "LP8788_B1_DVS";
- char *b2_name[] = { "LP8788_B2_DVS1", "LP8788_B2_DVS2" };
- int i, gpio, ret;
+ struct device *dev = &pdev->dev;
switch (id) {
case BUCK1:
- gpio = pdata->buck1_dvs->gpio;
- ret = devm_gpio_request_one(&pdev->dev, gpio, DVS_LOW,
- b1_name);
- if (ret)
- return ret;
+ buck->gpio1 = devm_gpiod_get(dev, "dvs", GPIOD_OUT_LOW);
+ if (IS_ERR(buck->gpio1))
+ return PTR_ERR(buck->gpio1);
+ gpiod_set_consumer_name(buck->gpio1, "LP8788_B1_DVS");
buck->dvs = pdata->buck1_dvs;
break;
case BUCK2:
- for (i = 0; i < LP8788_NUM_BUCK2_DVS; i++) {
- gpio = pdata->buck2_dvs->gpio[i];
- ret = devm_gpio_request_one(&pdev->dev, gpio,
- DVS_LOW, b2_name[i]);
- if (ret)
- return ret;
- }
+ buck->gpio1 = devm_gpiod_get_index(dev, "dvs", 0, GPIOD_OUT_LOW);
+ if (IS_ERR(buck->gpio1))
+ return PTR_ERR(buck->gpio1);
+ gpiod_set_consumer_name(buck->gpio1, "LP8788_B2_DVS1");
+
+ buck->gpio2 = devm_gpiod_get_index(dev, "dvs", 1, GPIOD_OUT_LOW);
+ if (IS_ERR(buck->gpio2))
+ return PTR_ERR(buck->gpio2);
+ gpiod_set_consumer_name(buck->gpio2, "LP8788_B2_DVS2");
+
buck->dvs = pdata->buck2_dvs;
break;
default:
diff --git a/drivers/regulator/max5970-regulator.c b/drivers/regulator/max5970-regulator.c
index 830a1c4cd705..8bbcd983a74a 100644
--- a/drivers/regulator/max5970-regulator.c
+++ b/drivers/regulator/max5970-regulator.c
@@ -29,8 +29,8 @@ struct max5970_regulator {
};
enum max597x_regulator_id {
- MAX597X_SW0,
- MAX597X_SW1,
+ MAX597X_sw0,
+ MAX597X_sw1,
};
static int max5970_read_adc(struct regmap *regmap, int reg, long *val)
@@ -378,8 +378,8 @@ static int max597x_dt_parse(struct device_node *np,
}
static const struct regulator_desc regulators[] = {
- MAX597X_SWITCH(SW0, MAX5970_REG_CHXEN, 0, "vss1"),
- MAX597X_SWITCH(SW1, MAX5970_REG_CHXEN, 1, "vss2"),
+ MAX597X_SWITCH(sw0, MAX5970_REG_CHXEN, 0, "vss1"),
+ MAX597X_SWITCH(sw1, MAX5970_REG_CHXEN, 1, "vss2"),
};
static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 8d5193207552..f8bb6828feef 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -20,9 +20,7 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/max8973-regulator.h>
#include <linux/regulator/of_regulator.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -102,7 +100,7 @@ struct max8973_chip {
struct regulator_desc desc;
struct regmap *regmap;
bool enable_external_control;
- int dvs_gpio;
+ struct gpio_desc *dvs_gpiod;
int lru_index[MAX8973_MAX_VOUT_REG];
int curr_vout_val[MAX8973_MAX_VOUT_REG];
int curr_vout_reg;
@@ -184,7 +182,7 @@ static int max8973_dcdc_set_voltage_sel(struct regulator_dev *rdev,
* If gpios are available to select the VOUT register then least
* recently used register for new configuration.
*/
- if (gpio_is_valid(max->dvs_gpio))
+ if (max->dvs_gpiod)
found = find_voltage_set_register(max, vsel,
&vout_reg, &gpio_val);
@@ -201,8 +199,8 @@ static int max8973_dcdc_set_voltage_sel(struct regulator_dev *rdev,
}
/* Select proper VOUT register vio gpios */
- if (gpio_is_valid(max->dvs_gpio)) {
- gpio_set_value_cansleep(max->dvs_gpio, gpio_val & 0x1);
+ if (max->dvs_gpiod) {
+ gpiod_set_value_cansleep(max->dvs_gpiod, gpio_val & 0x1);
max->curr_gpio_val = gpio_val;
}
return 0;
@@ -531,7 +529,6 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
pdata->enable_ext_control = of_property_read_bool(np,
"maxim,externally-enable");
- pdata->dvs_gpio = of_get_named_gpio(np, "maxim,dvs-gpio", 0);
ret = of_property_read_u32(np, "maxim,dvs-default-state", &pval);
if (!ret)
@@ -612,13 +609,17 @@ static int max8973_probe(struct i2c_client *client)
return -EIO;
}
- if (pdata->dvs_gpio == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL);
if (!max)
return -ENOMEM;
+ max->dvs_gpiod = devm_gpiod_get_optional(&client->dev, "maxim,dvs",
+ (pdata->dvs_def_state) ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
+ if (IS_ERR(max->dvs_gpiod))
+ return dev_err_probe(&client->dev, PTR_ERR(max->dvs_gpiod),
+ "failed to obtain dvs gpio\n");
+ gpiod_set_consumer_name(max->dvs_gpiod, "max8973-dvs");
+
max->regmap = devm_regmap_init_i2c(client, &max8973_regmap_config);
if (IS_ERR(max->regmap)) {
ret = PTR_ERR(max->regmap);
@@ -663,7 +664,6 @@ static int max8973_probe(struct i2c_client *client)
max->desc.ramp_delay_table = max8973_buck_ramp_table;
max->desc.n_ramp_values = ARRAY_SIZE(max8973_buck_ramp_table);
- max->dvs_gpio = (pdata->dvs_gpio) ? pdata->dvs_gpio : -EINVAL;
max->enable_external_control = pdata->enable_ext_control;
max->curr_gpio_val = pdata->dvs_def_state;
max->curr_vout_reg = MAX8973_VOUT + pdata->dvs_def_state;
@@ -671,21 +671,9 @@ static int max8973_probe(struct i2c_client *client)
max->lru_index[0] = max->curr_vout_reg;
- if (gpio_is_valid(max->dvs_gpio)) {
- int gpio_flags;
+ if (max->dvs_gpiod) {
int i;
- gpio_flags = (pdata->dvs_def_state) ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
- ret = devm_gpio_request_one(&client->dev, max->dvs_gpio,
- gpio_flags, "max8973-dvs");
- if (ret) {
- dev_err(&client->dev,
- "gpio_request for gpio %d failed, err = %d\n",
- max->dvs_gpio, ret);
- return ret;
- }
-
/*
* Initialize the lru index with vout_reg id
* The index 0 will be most recently used and
diff --git a/drivers/regulator/max8997-regulator.c b/drivers/regulator/max8997-regulator.c
index 0b38eaa73597..5f201ee9a5b8 100644
--- a/drivers/regulator/max8997-regulator.c
+++ b/drivers/regulator/max8997-regulator.c
@@ -9,8 +9,7 @@
#include <linux/bug.h>
#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -32,7 +31,7 @@ struct max8997_data {
u8 buck1_vol[8];
u8 buck2_vol[8];
u8 buck5_vol[8];
- int buck125_gpios[3];
+ struct gpio_desc *buck125_gpiods[3];
int buck125_gpioindex;
bool ignore_gpiodvs_side_effect;
@@ -52,9 +51,9 @@ static inline void max8997_set_gpio(struct max8997_data *max8997)
int set2 = ((max8997->buck125_gpioindex) >> 1) & 0x1;
int set1 = ((max8997->buck125_gpioindex) >> 2) & 0x1;
- gpio_set_value(max8997->buck125_gpios[0], set1);
- gpio_set_value(max8997->buck125_gpios[1], set2);
- gpio_set_value(max8997->buck125_gpios[2], set3);
+ gpiod_set_value(max8997->buck125_gpiods[0], set1);
+ gpiod_set_value(max8997->buck125_gpiods[1], set2);
+ gpiod_set_value(max8997->buck125_gpiods[2], set3);
}
struct voltage_map_desc {
@@ -873,31 +872,13 @@ static struct regulator_desc regulators[] = {
};
#ifdef CONFIG_OF
-static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev,
- struct max8997_platform_data *pdata,
- struct device_node *pmic_np)
-{
- int i, gpio;
-
- for (i = 0; i < 3; i++) {
- gpio = of_get_named_gpio(pmic_np,
- "max8997,pmic-buck125-dvs-gpios", i);
- if (!gpio_is_valid(gpio)) {
- dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio);
- return -EINVAL;
- }
- pdata->buck125_gpios[i] = gpio;
- }
- return 0;
-}
-
static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
struct max8997_platform_data *pdata)
{
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct device_node *pmic_np, *regulators_np, *reg_np;
struct max8997_regulator_data *rdata;
- unsigned int i, dvs_voltage_nr = 1, ret;
+ unsigned int i, dvs_voltage_nr = 1;
pmic_np = iodev->dev->of_node;
if (!pmic_np) {
@@ -949,10 +930,6 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
pdata->buck5_gpiodvs) {
- ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np);
- if (ret)
- return -EINVAL;
-
if (of_property_read_u32(pmic_np,
"max8997,pmic-buck125-default-dvs-idx",
&pdata->buck125_default_idx)) {
@@ -1039,7 +1016,6 @@ static int max8997_pmic_probe(struct platform_device *pdev)
max8997->buck1_gpiodvs = pdata->buck1_gpiodvs;
max8997->buck2_gpiodvs = pdata->buck2_gpiodvs;
max8997->buck5_gpiodvs = pdata->buck5_gpiodvs;
- memcpy(max8997->buck125_gpios, pdata->buck125_gpios, sizeof(int) * 3);
max8997->ignore_gpiodvs_side_effect = pdata->ignore_gpiodvs_side_effect;
nr_dvs = (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
@@ -1110,38 +1086,27 @@ static int max8997_pmic_probe(struct platform_device *pdev)
*/
if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
pdata->buck5_gpiodvs) {
+ const char *gpio_names[3] = {"MAX8997 SET1", "MAX8997 SET2", "MAX8997 SET3"};
- if (!gpio_is_valid(pdata->buck125_gpios[0]) ||
- !gpio_is_valid(pdata->buck125_gpios[1]) ||
- !gpio_is_valid(pdata->buck125_gpios[2])) {
- dev_err(&pdev->dev, "GPIO NOT VALID\n");
- return -EINVAL;
- }
+ for (i = 0; i < 3; i++) {
+ enum gpiod_flags flags;
- ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[0],
- "MAX8997 SET1");
- if (ret)
- return ret;
-
- ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[1],
- "MAX8997 SET2");
- if (ret)
- return ret;
-
- ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[2],
- "MAX8997 SET3");
- if (ret)
- return ret;
-
- gpio_direction_output(pdata->buck125_gpios[0],
- (max8997->buck125_gpioindex >> 2)
- & 0x1); /* SET1 */
- gpio_direction_output(pdata->buck125_gpios[1],
- (max8997->buck125_gpioindex >> 1)
- & 0x1); /* SET2 */
- gpio_direction_output(pdata->buck125_gpios[2],
- (max8997->buck125_gpioindex >> 0)
- & 0x1); /* SET3 */
+ if (max8997->buck125_gpioindex & BIT(2 - i))
+ flags = GPIOD_OUT_HIGH;
+ else
+ flags = GPIOD_OUT_LOW;
+
+ max8997->buck125_gpiods[i] = devm_gpiod_get_index(iodev->dev,
+ "max8997,pmic-buck125-dvs",
+ i,
+ flags);
+ if (IS_ERR(max8997->buck125_gpiods[i])) {
+ ret = PTR_ERR(max8997->buck125_gpiods[i]);
+ return dev_err_probe(iodev->dev, ret, "cant get GPIO %d (%d)\n",
+ i, ret);
+ }
+ gpiod_set_consumer_name(max8997->buck125_gpiods[i], gpio_names[i]);
+ }
}
/* DVS-GPIO disabled */
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index fadb4717384a..254a77887f66 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -10,12 +10,12 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/bits.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
@@ -31,6 +31,9 @@ struct max8998_data {
unsigned int buck1_idx; /* index to last changed voltage */
/* value in a set */
unsigned int buck2_idx;
+ struct gpio_desc *buck1_gpio1;
+ struct gpio_desc *buck1_gpio2;
+ struct gpio_desc *buck2_gpio;
};
static const unsigned int charger_current_table[] = {
@@ -227,15 +230,15 @@ static int max8998_set_voltage_ldo_sel(struct regulator_dev *rdev,
return ret;
}
-static inline void buck1_gpio_set(int gpio1, int gpio2, int v)
+static inline void buck1_gpio_set(struct gpio_desc *gpio1, struct gpio_desc *gpio2, int v)
{
- gpio_set_value(gpio1, v & 0x1);
- gpio_set_value(gpio2, (v >> 1) & 0x1);
+ gpiod_set_value(gpio1, v & 0x1);
+ gpiod_set_value(gpio2, (v >> 1) & 0x1);
}
-static inline void buck2_gpio_set(int gpio, int v)
+static inline void buck2_gpio_set(struct gpio_desc *gpio, int v)
{
- gpio_set_value(gpio, v & 0x1);
+ gpiod_set_value(gpio, v & 0x1);
}
static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev,
@@ -260,16 +263,15 @@ static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev,
selector, max8998->buck1_vol[0], max8998->buck1_vol[1],
max8998->buck1_vol[2], max8998->buck1_vol[3]);
- if (gpio_is_valid(pdata->buck1_set1) &&
- gpio_is_valid(pdata->buck1_set2)) {
+ if (max8998->buck1_gpio1 && max8998->buck1_gpio2) {
/* check if requested voltage */
/* value is already defined */
for (j = 0; j < ARRAY_SIZE(max8998->buck1_vol); j++) {
if (max8998->buck1_vol[j] == selector) {
max8998->buck1_idx = j;
- buck1_gpio_set(pdata->buck1_set1,
- pdata->buck1_set2, j);
+ buck1_gpio_set(max8998->buck1_gpio1,
+ max8998->buck1_gpio2, j);
goto buck1_exit;
}
}
@@ -286,13 +288,13 @@ static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev,
&shift,
&mask);
ret = max8998_write_reg(i2c, reg, selector);
- buck1_gpio_set(pdata->buck1_set1,
- pdata->buck1_set2, max8998->buck1_idx);
+ buck1_gpio_set(max8998->buck1_gpio1,
+ max8998->buck1_gpio2, max8998->buck1_idx);
buck1_last_val++;
buck1_exit:
dev_dbg(max8998->dev, "%s: SET1:%d, SET2:%d\n",
- i2c->name, gpio_get_value(pdata->buck1_set1),
- gpio_get_value(pdata->buck1_set2));
+ i2c->name, gpiod_get_value(max8998->buck1_gpio1),
+ gpiod_get_value(max8998->buck1_gpio2));
break;
} else {
ret = max8998_write_reg(i2c, reg, selector);
@@ -303,14 +305,13 @@ buck1_exit:
dev_dbg(max8998->dev,
"BUCK2, selector:%d buck2_vol1:%d, buck2_vol2:%d\n",
selector, max8998->buck2_vol[0], max8998->buck2_vol[1]);
- if (gpio_is_valid(pdata->buck2_set3)) {
-
+ if (max8998->buck2_gpio) {
/* check if requested voltage */
/* value is already defined */
for (j = 0; j < ARRAY_SIZE(max8998->buck2_vol); j++) {
if (max8998->buck2_vol[j] == selector) {
max8998->buck2_idx = j;
- buck2_gpio_set(pdata->buck2_set3, j);
+ buck2_gpio_set(max8998->buck2_gpio, j);
goto buck2_exit;
}
}
@@ -322,10 +323,10 @@ buck1_exit:
&reg, &shift, &mask);
ret = max8998_write_reg(i2c, reg, selector);
max8998->buck2_vol[max8998->buck2_idx] = selector;
- buck2_gpio_set(pdata->buck2_set3, max8998->buck2_idx);
+ buck2_gpio_set(max8998->buck2_gpio, max8998->buck2_idx);
buck2_exit:
dev_dbg(max8998->dev, "%s: SET3:%d\n", i2c->name,
- gpio_get_value(pdata->buck2_set3));
+ gpiod_get_value(max8998->buck2_gpio));
} else {
ret = max8998_write_reg(i2c, reg, selector);
}
@@ -539,36 +540,6 @@ static const struct regulator_desc regulators[] = {
charger_current_table, MAX8998_REG_CHGR1, 0x7),
};
-static int max8998_pmic_dt_parse_dvs_gpio(struct max8998_dev *iodev,
- struct max8998_platform_data *pdata,
- struct device_node *pmic_np)
-{
- int gpio;
-
- gpio = of_get_named_gpio(pmic_np, "max8998,pmic-buck1-dvs-gpios", 0);
- if (!gpio_is_valid(gpio)) {
- dev_err(iodev->dev, "invalid buck1 gpio[0]: %d\n", gpio);
- return -EINVAL;
- }
- pdata->buck1_set1 = gpio;
-
- gpio = of_get_named_gpio(pmic_np, "max8998,pmic-buck1-dvs-gpios", 1);
- if (!gpio_is_valid(gpio)) {
- dev_err(iodev->dev, "invalid buck1 gpio[1]: %d\n", gpio);
- return -EINVAL;
- }
- pdata->buck1_set2 = gpio;
-
- gpio = of_get_named_gpio(pmic_np, "max8998,pmic-buck2-dvs-gpio", 0);
- if (!gpio_is_valid(gpio)) {
- dev_err(iodev->dev, "invalid buck 2 gpio: %d\n", gpio);
- return -EINVAL;
- }
- pdata->buck2_set3 = gpio;
-
- return 0;
-}
-
static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev,
struct max8998_platform_data *pdata)
{
@@ -614,10 +585,6 @@ static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev,
of_node_put(reg_np);
of_node_put(regulators_np);
- ret = max8998_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
- if (ret)
- return -EINVAL;
-
pdata->buck_voltage_lock = of_property_read_bool(pmic_np, "max8998,pmic-buck-voltage-lock");
ret = of_property_read_u32(pmic_np,
@@ -665,6 +632,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
struct max8998_data *max8998;
struct i2c_client *i2c;
+ enum gpiod_flags flags;
int i, ret;
unsigned int v;
@@ -693,37 +661,38 @@ static int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck1_idx = pdata->buck1_default_idx;
max8998->buck2_idx = pdata->buck2_default_idx;
- /* NOTE: */
- /* For unused GPIO NOT marked as -1 (thereof equal to 0) WARN_ON */
- /* will be displayed */
-
/* Check if MAX8998 voltage selection GPIOs are defined */
- if (gpio_is_valid(pdata->buck1_set1) &&
- gpio_is_valid(pdata->buck1_set2)) {
- /* Check if SET1 is not equal to 0 */
- if (!pdata->buck1_set1) {
- dev_err(&pdev->dev,
- "MAX8998 SET1 GPIO defined as 0 !\n");
- WARN_ON(!pdata->buck1_set1);
- return -EIO;
- }
- /* Check if SET2 is not equal to 0 */
- if (!pdata->buck1_set2) {
- dev_err(&pdev->dev,
- "MAX8998 SET2 GPIO defined as 0 !\n");
- WARN_ON(!pdata->buck1_set2);
- return -EIO;
- }
-
- gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1");
- gpio_direction_output(pdata->buck1_set1,
- max8998->buck1_idx & 0x1);
-
-
- gpio_request(pdata->buck1_set2, "MAX8998 BUCK1_SET2");
- gpio_direction_output(pdata->buck1_set2,
- (max8998->buck1_idx >> 1) & 0x1);
-
+ flags = (max8998->buck1_idx & BIT(0)) ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ max8998->buck1_gpio1 = devm_gpiod_get_index_optional(iodev->dev,
+ "max8998,pmic-buck1-dvs",
+ 0,
+ flags);
+ if (IS_ERR(max8998->buck1_gpio1))
+ return dev_err_probe(&pdev->dev, PTR_ERR(max8998->buck1_gpio1),
+ "could not get BUCK1 GPIO1\n");
+ gpiod_set_consumer_name(max8998->buck1_gpio1, "MAX8998 BUCK1_SET1");
+
+ flags = (max8998->buck1_idx & BIT(1)) ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ max8998->buck1_gpio2 = devm_gpiod_get_index_optional(iodev->dev,
+ "max8998,pmic-buck1-dvs",
+ 1,
+ flags);
+ if (IS_ERR(max8998->buck1_gpio2))
+ return dev_err_probe(&pdev->dev, PTR_ERR(max8998->buck1_gpio2),
+ "could not get BUCK1 GPIO2\n");
+ gpiod_set_consumer_name(max8998->buck1_gpio1, "MAX8998 BUCK1_SET2");
+
+ flags = (max8998->buck2_idx & BIT(0)) ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ max8998->buck2_gpio = devm_gpiod_get_index_optional(iodev->dev,
+ "max8998,pmic-buck2-dvs",
+ 0,
+ flags);
+ if (IS_ERR(max8998->buck2_gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(max8998->buck2_gpio),
+ "could not get BUCK2 GPIO\n");
+ gpiod_set_consumer_name(max8998->buck1_gpio1, "MAX8998 BUCK2_SET3");
+
+ if (max8998->buck1_gpio1 && max8998->buck1_gpio2) {
/* Set predefined values for BUCK1 registers */
for (v = 0; v < ARRAY_SIZE(pdata->buck1_voltage); ++v) {
int index = MAX8998_BUCK1 - MAX8998_LDO2;
@@ -742,18 +711,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
}
}
- if (gpio_is_valid(pdata->buck2_set3)) {
- /* Check if SET3 is not equal to 0 */
- if (!pdata->buck2_set3) {
- dev_err(&pdev->dev,
- "MAX8998 SET3 GPIO defined as 0 !\n");
- WARN_ON(!pdata->buck2_set3);
- return -EIO;
- }
- gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3");
- gpio_direction_output(pdata->buck2_set3,
- max8998->buck2_idx & 0x1);
-
+ if (max8998->buck2_gpio) {
/* Set predefined values for BUCK2 registers */
for (v = 0; v < ARRAY_SIZE(pdata->buck2_voltage); ++v) {
int index = MAX8998_BUCK2 - MAX8998_LDO2;
diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c
index b820bd6043e5..ab105ffd6a2e 100644
--- a/drivers/regulator/mp8859.c
+++ b/drivers/regulator/mp8859.c
@@ -35,6 +35,16 @@
#define MP8859_GO_BIT 0x01
+#define MP8859_IOUT_LIM_MASK 0x7f
+
+#define MP8859_ENABLE_MASK 0x80
+#define MP8859_DISCHG_EN_MASK 0x10
+#define MP8859_MODE_MASK 0x08
+
+#define MP8859_PG_MASK 0x80
+#define MP8859_OTP_MASK 0x40
+#define MP8859_OTW_MASK 0x20
+#define MP8859_CC_CV_MASK 0x10
static int mp8859_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel)
{
@@ -73,21 +83,221 @@ static int mp8859_get_voltage_sel(struct regulator_dev *rdev)
return val;
}
+static int mp8859_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int from, unsigned int to)
+{
+ int change;
+
+ /* The voltage ramps at 1mV/uS, selectors are 10mV */
+ if (from > to)
+ change = from - to;
+ else
+ change = to - from;
+
+ return change * 10 * 1000;
+}
+
+static unsigned int mp8859_get_mode(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, MP8859_CTL1_REG, &val);
+ if (ret != 0) {
+ dev_err(&rdev->dev, "Failed to read mode: %d\n", ret);
+ return 0;
+ }
+
+ if (val & MP8859_MODE_MASK)
+ return REGULATOR_MODE_FAST;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int mp8859_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ unsigned int val;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = MP8859_MODE_MASK;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(rdev->regmap, MP8859_CTL1_REG,
+ MP8859_MODE_MASK, val);
+}
+
+static int mp8859_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ unsigned int cur_val, new_val;
+ int ret, i;
+
+ /* Steps of 50mA */
+ new_val = max_uA / 50000;
+ if (new_val > MP8859_IOUT_LIM_MASK)
+ return -EINVAL;
+ if (new_val == 0)
+ return -EINVAL;
+
+ /*
+ * If the regulator is limiting then ramp gradually as per
+ * datasheet, otherwise just set the value directly.
+ */
+ ret = regmap_read(rdev->regmap, MP8859_STATUS_REG, &cur_val);
+ if (ret != 0)
+ return ret;
+ if (!(cur_val & MP8859_CC_CV_MASK)) {
+ return regmap_update_bits(rdev->regmap, MP8859_IOUT_LIM_REG,
+ MP8859_IOUT_LIM_MASK, new_val);
+ }
+
+ ret = regmap_read(rdev->regmap, MP8859_IOUT_LIM_REG, &cur_val);
+ if (ret != 0)
+ return ret;
+
+ if (cur_val >= new_val) {
+ for (i = cur_val; i >= new_val; i--) {
+ ret = regmap_update_bits(rdev->regmap,
+ MP8859_IOUT_LIM_REG,
+ MP8859_IOUT_LIM_MASK,
+ cur_val - i);
+ if (ret != 0)
+ return ret;
+ }
+ } else {
+ for (i = cur_val; i <= new_val; i++) {
+ ret = regmap_update_bits(rdev->regmap,
+ MP8859_IOUT_LIM_REG,
+ MP8859_IOUT_LIM_MASK,
+ cur_val + i);
+ if (ret != 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mp8859_get_status(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ /* Output status is only meaingful when enabled */
+ ret = regmap_read(rdev->regmap, MP8859_CTL1_REG, &val);
+ if (ret != 0)
+ return ret;
+ if (!(val & MP8859_ENABLE_MASK))
+ return REGULATOR_STATUS_UNDEFINED;
+
+ ret = regmap_read(rdev->regmap, MP8859_STATUS_REG, &val);
+ if (ret != 0)
+ return ret;
+
+ if (val & MP8859_PG_MASK)
+ return REGULATOR_STATUS_ON;
+ else
+ return REGULATOR_STATUS_ERROR;
+}
+
+static int mp8859_get_error_flags(struct regulator_dev *rdev,
+ unsigned int *flags)
+{
+ unsigned int status, enabled;
+ int ret;
+
+ *flags = 0;
+
+ /* Output status is only meaingful when enabled */
+ ret = regmap_read(rdev->regmap, MP8859_CTL1_REG, &enabled);
+ if (ret != 0)
+ return ret;
+ enabled &= MP8859_ENABLE_MASK;
+
+ ret = regmap_read(rdev->regmap, MP8859_STATUS_REG, &status);
+ if (ret != 0)
+ return ret;
+
+ if (enabled && !(status & MP8859_PG_MASK))
+ status |= REGULATOR_ERROR_FAIL;
+ if (status & MP8859_OTP_MASK)
+ status |= REGULATOR_ERROR_OVER_TEMP;
+ if (status & MP8859_OTW_MASK)
+ status |= REGULATOR_ERROR_OVER_TEMP_WARN;
+ if (status & MP8859_CC_CV_MASK)
+ status |= REGULATOR_ERROR_OVER_CURRENT;
+
+ return 0;
+}
+
static const struct linear_range mp8859_dcdc_ranges[] = {
REGULATOR_LINEAR_RANGE(0, VOL_MIN_IDX, VOL_MAX_IDX, 10000),
};
+static bool mp8859_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MP8859_VOUT_L_REG:
+ case MP8859_VOUT_H_REG:
+ case MP8859_VOUT_GO_REG:
+ case MP8859_IOUT_LIM_REG:
+ case MP8859_CTL1_REG:
+ case MP8859_CTL2_REG:
+ case MP8859_STATUS_REG:
+ case MP8859_INTERRUPT_REG:
+ case MP8859_MASK_REG:
+ case MP8859_ID1_REG:
+ case MP8859_MFR_ID_REG:
+ case MP8859_DEV_ID_REG:
+ case MP8859_IC_REV_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool mp8859_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MP8859_VOUT_GO_REG:
+ case MP8859_STATUS_REG:
+ case MP8859_INTERRUPT_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct regmap_config mp8859_regmap = {
.reg_bits = 8,
.val_bits = 8,
.max_register = MP8859_MAX_REG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
+ .readable_reg = mp8859_readable,
+ .volatile_reg = mp8859_volatile,
};
static const struct regulator_ops mp8859_ops = {
.set_voltage_sel = mp8859_set_voltage_sel,
.get_voltage_sel = mp8859_get_voltage_sel,
.list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_time_sel = mp8859_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_mode = mp8859_set_mode,
+ .get_mode = mp8859_get_mode,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_current_limit = mp8859_set_current_limit,
+ .get_status = mp8859_get_status,
+ .get_error_flags = mp8859_get_error_flags,
};
static const struct regulator_desc mp8859_regulators[] = {
@@ -100,6 +310,12 @@ static const struct regulator_desc mp8859_regulators[] = {
.n_voltages = VOL_MAX_IDX + 1,
.linear_ranges = mp8859_dcdc_ranges,
.n_linear_ranges = 1,
+ .enable_reg = MP8859_CTL1_REG,
+ .enable_mask = MP8859_ENABLE_MASK,
+ .enable_val = MP8859_ENABLE_MASK,
+ .active_discharge_reg = MP8859_CTL1_REG,
+ .active_discharge_on = MP8859_DISCHG_EN_MASK,
+ .active_discharge_mask = MP8859_DISCHG_EN_MASK,
.ops = &mp8859_ops,
.owner = THIS_MODULE,
},
@@ -111,12 +327,46 @@ static int mp8859_i2c_probe(struct i2c_client *i2c)
struct regulator_config config = {.dev = &i2c->dev};
struct regmap *regmap = devm_regmap_init_i2c(i2c, &mp8859_regmap);
struct regulator_dev *rdev;
+ unsigned int val, rev;
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
return ret;
}
+
+ ret = regmap_read(regmap, MP8859_MFR_ID_REG, &val);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to read manufacturer ID: %d\n", ret);
+ return ret;
+ }
+ if (val != 0x9) {
+ dev_err(&i2c->dev, "Manufacturer ID %x != 9\n", val);
+ return -EINVAL;
+ }
+
+ ret = regmap_read(regmap, MP8859_DEV_ID_REG, &val);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to read device ID: %d\n", ret);
+ return ret;
+ }
+ if (val != 0x58) {
+ dev_err(&i2c->dev, "Manufacturer ID %x != 0x58\n", val);
+ return -EINVAL;
+ }
+
+ ret = regmap_read(regmap, MP8859_IC_REV_REG, &rev);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to read device revision: %d\n", ret);
+ return ret;
+ }
+ ret = regmap_read(regmap, MP8859_ID1_REG, &val);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to read device ID1: %d\n", ret);
+ return ret;
+ }
+ dev_info(&i2c->dev, "MP8859-%04d revision %d\n", val, rev);
+
rdev = devm_regulator_register(&i2c->dev, &mp8859_regulators[0],
&config);
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 60cfcd741c2a..7434b6b22d32 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -271,11 +271,10 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
of_find_property(np, "voltage-table", &length);
if ((length < sizeof(*duty_cycle_table)) ||
- (length % sizeof(*duty_cycle_table))) {
- dev_err(&pdev->dev, "voltage-table length(%d) is invalid\n",
- length);
- return -EINVAL;
- }
+ (length % sizeof(*duty_cycle_table)))
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "voltage-table length(%d) is invalid\n",
+ length);
duty_cycle_table = devm_kzalloc(&pdev->dev, length, GFP_KERNEL);
if (!duty_cycle_table)
@@ -284,10 +283,9 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
ret = of_property_read_u32_array(np, "voltage-table",
(u32 *)duty_cycle_table,
length / sizeof(u32));
- if (ret) {
- dev_err(&pdev->dev, "Failed to read voltage-table: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to read voltage-table\n");
drvdata->state = -ENOTRECOVERABLE;
drvdata->duty_cycle_table = duty_cycle_table;
@@ -359,10 +357,9 @@ static int pwm_regulator_probe(struct platform_device *pdev)
enum gpiod_flags gpio_flags;
int ret;
- if (!np) {
- dev_err(&pdev->dev, "Device Tree node missing\n");
- return -EINVAL;
- }
+ if (!np)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Device Tree node missing\n");
drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
@@ -400,8 +397,7 @@ static int pwm_regulator_probe(struct platform_device *pdev)
gpio_flags);
if (IS_ERR(drvdata->enb_gpio)) {
ret = PTR_ERR(drvdata->enb_gpio);
- dev_err(&pdev->dev, "Failed to get enable GPIO: %d\n", ret);
- return ret;
+ return dev_err_probe(&pdev->dev, ret, "Failed to get enable GPIO\n");
}
ret = pwm_adjust_config(drvdata->pwm);
@@ -409,19 +405,17 @@ static int pwm_regulator_probe(struct platform_device *pdev)
return ret;
ret = pwm_regulator_init_boot_on(pdev, drvdata, init_data);
- if (ret) {
- dev_err(&pdev->dev, "Failed to apply boot_on settings: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to apply boot_on settings\n");
regulator = devm_regulator_register(&pdev->dev,
&drvdata->desc, &config);
if (IS_ERR(regulator)) {
ret = PTR_ERR(regulator);
- dev_err(&pdev->dev, "Failed to register regulator %s: %d\n",
- drvdata->desc.name, ret);
- return ret;
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register regulator %s\n",
+ drvdata->desc.name);
}
return 0;
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index d1be9568025e..3b7e06b9f5ce 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -11,11 +11,10 @@
#include <linux/regulator/of_regulator.h>
#include <linux/soc/qcom/smd-rpm.h>
+struct qcom_smd_rpm *smd_vreg_rpm;
+
struct qcom_rpm_reg {
struct device *dev;
-
- struct qcom_smd_rpm *rpm;
-
u32 type;
u32 id;
@@ -70,7 +69,7 @@ static int rpm_reg_write_active(struct qcom_rpm_reg *vreg)
if (!reqlen)
return 0;
- ret = qcom_rpm_smd_write(vreg->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ ret = qcom_rpm_smd_write(smd_vreg_rpm, QCOM_SMD_RPM_ACTIVE_STATE,
vreg->type, vreg->id,
req, sizeof(req[0]) * reqlen);
if (!ret) {
@@ -1384,14 +1383,13 @@ MODULE_DEVICE_TABLE(of, rpm_of_match);
* @dev: Pointer to the top level qcom_smd-regulator PMIC device
* @node: Pointer to the individual qcom_smd-regulator resource
* device node
- * @rpm: Pointer to the rpm bus node
* @pmic_rpm_data: Pointer to a null-terminated array of qcom_smd-regulator
* resources defined for the top level PMIC device
*
* Return: 0 on success, errno on failure
*/
static int rpm_regulator_init_vreg(struct qcom_rpm_reg *vreg, struct device *dev,
- struct device_node *node, struct qcom_smd_rpm *rpm,
+ struct device_node *node,
const struct rpm_regulator_data *pmic_rpm_data)
{
struct regulator_config config = {};
@@ -1409,7 +1407,6 @@ static int rpm_regulator_init_vreg(struct qcom_rpm_reg *vreg, struct device *dev
}
vreg->dev = dev;
- vreg->rpm = rpm;
vreg->type = rpm_data->type;
vreg->id = rpm_data->id;
@@ -1449,6 +1446,11 @@ static int rpm_reg_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if (smd_vreg_rpm && rpm != smd_vreg_rpm)
+ return dev_err_probe(dev, -EINVAL, "RPM mismatch\n");
+
+ smd_vreg_rpm = rpm;
+
vreg_data = of_device_get_match_data(dev);
if (!vreg_data)
return -ENODEV;
@@ -1460,8 +1462,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
return -ENOMEM;
}
- ret = rpm_regulator_init_vreg(vreg, dev, node, rpm, vreg_data);
-
+ ret = rpm_regulator_init_vreg(vreg, dev, node, vreg_data);
if (ret < 0) {
of_node_put(node);
return ret;
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index e374fa6e5f28..d89ae7f16d7a 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -1017,14 +1017,14 @@ static const struct regulator_desc rk805_reg[] = {
};
static const struct linear_range rk806_buck_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(500000, 0, 160, 6250), /* 500mV ~ 1500mV */
- REGULATOR_LINEAR_RANGE(1500000, 161, 237, 25000), /* 1500mV ~ 3400mV */
- REGULATOR_LINEAR_RANGE(3400000, 238, 255, 0),
+ REGULATOR_LINEAR_RANGE(500000, 0, 159, 6250), /* 500mV ~ 1500mV */
+ REGULATOR_LINEAR_RANGE(1500000, 160, 235, 25000), /* 1500mV ~ 3400mV */
+ REGULATOR_LINEAR_RANGE(3400000, 236, 255, 0),
};
static const struct linear_range rk806_ldo_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(500000, 0, 232, 12500), /* 500mV ~ 3400mV */
- REGULATOR_LINEAR_RANGE(3400000, 233, 255, 0), /* 500mV ~ 3400mV */
+ REGULATOR_LINEAR_RANGE(500000, 0, 231, 12500), /* 500mV ~ 3400mV */
+ REGULATOR_LINEAR_RANGE(3400000, 232, 255, 0),
};
static const struct regulator_desc rk806_reg[] = {
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index 53d1b9d6f69c..86a626a4f610 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -208,6 +208,7 @@ static const struct of_device_id regulator_userspace_consumer_of_match[] = {
{ .compatible = "regulator-output", },
{},
};
+MODULE_DEVICE_TABLE(of, regulator_userspace_consumer_of_match);
static struct platform_driver regulator_userspace_consumer_driver = {
.probe = regulator_userspace_consumer_probe,
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index a1c62d15f16c..d73727a5828a 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -103,12 +103,10 @@ enum imx_dsp_rp_mbox_messages {
* @tx_ch: mailbox tx channel handle
* @rx_ch: mailbox rx channel handle
* @rxdb_ch: mailbox rx doorbell channel handle
- * @pd_dev: power domain device
- * @pd_dev_link: power domain device link
+ * @pd_list: power domain list
* @ipc_handle: System Control Unit ipc handle
* @rproc_work: work for processing virtio interrupts
* @pm_comp: completion primitive to sync for suspend response
- * @num_domains: power domain number
* @flags: control flags
*/
struct imx_dsp_rproc {
@@ -121,12 +119,10 @@ struct imx_dsp_rproc {
struct mbox_chan *tx_ch;
struct mbox_chan *rx_ch;
struct mbox_chan *rxdb_ch;
- struct device **pd_dev;
- struct device_link **pd_dev_link;
+ struct dev_pm_domain_list *pd_list;
struct imx_sc_ipc *ipc_handle;
struct work_struct rproc_work;
struct completion pm_comp;
- int num_domains;
u32 flags;
};
@@ -955,74 +951,14 @@ static const struct rproc_ops imx_dsp_rproc_ops = {
static int imx_dsp_attach_pm_domains(struct imx_dsp_rproc *priv)
{
struct device *dev = priv->rproc->dev.parent;
- int ret, i;
-
- priv->num_domains = of_count_phandle_with_args(dev->of_node,
- "power-domains",
- "#power-domain-cells");
-
- /* If only one domain, then no need to link the device */
- if (priv->num_domains <= 1)
- return 0;
-
- priv->pd_dev = devm_kmalloc_array(dev, priv->num_domains,
- sizeof(*priv->pd_dev),
- GFP_KERNEL);
- if (!priv->pd_dev)
- return -ENOMEM;
-
- priv->pd_dev_link = devm_kmalloc_array(dev, priv->num_domains,
- sizeof(*priv->pd_dev_link),
- GFP_KERNEL);
- if (!priv->pd_dev_link)
- return -ENOMEM;
-
- for (i = 0; i < priv->num_domains; i++) {
- priv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
- if (IS_ERR(priv->pd_dev[i])) {
- ret = PTR_ERR(priv->pd_dev[i]);
- goto detach_pm;
- }
-
- /*
- * device_link_add will check priv->pd_dev[i], if it is
- * NULL, then will break.
- */
- priv->pd_dev_link[i] = device_link_add(dev,
- priv->pd_dev[i],
- DL_FLAG_STATELESS |
- DL_FLAG_PM_RUNTIME);
- if (!priv->pd_dev_link[i]) {
- dev_pm_domain_detach(priv->pd_dev[i], false);
- ret = -EINVAL;
- goto detach_pm;
- }
- }
-
- return 0;
-
-detach_pm:
- while (--i >= 0) {
- device_link_del(priv->pd_dev_link[i]);
- dev_pm_domain_detach(priv->pd_dev[i], false);
- }
-
- return ret;
-}
-
-static int imx_dsp_detach_pm_domains(struct imx_dsp_rproc *priv)
-{
- int i;
+ int ret;
- if (priv->num_domains <= 1)
+ /* A single PM domain is already attached. */
+ if (dev->pm_domain)
return 0;
- for (i = 0; i < priv->num_domains; i++) {
- device_link_del(priv->pd_dev_link[i]);
- dev_pm_domain_detach(priv->pd_dev[i], false);
- }
-
- return 0;
+ ret = dev_pm_domain_attach_list(dev, NULL, &priv->pd_list);
+ return ret < 0 ? ret : 0;
}
/**
@@ -1154,7 +1090,7 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
return 0;
err_detach_domains:
- imx_dsp_detach_pm_domains(priv);
+ dev_pm_domain_detach_list(priv->pd_list);
err_put_rproc:
rproc_free(rproc);
@@ -1168,7 +1104,7 @@ static void imx_dsp_rproc_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
rproc_del(rproc);
- imx_dsp_detach_pm_domains(priv);
+ dev_pm_domain_detach_list(priv->pd_list);
rproc_free(rproc);
}
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 8bb293b9f327..3161f14442bc 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -92,7 +92,6 @@ struct imx_rproc_mem {
static int imx_rproc_xtr_mbox_init(struct rproc *rproc);
static void imx_rproc_free_mbox(struct rproc *rproc);
-static int imx_rproc_detach_pd(struct rproc *rproc);
struct imx_rproc {
struct device *dev;
@@ -113,10 +112,8 @@ struct imx_rproc {
u32 rproc_pt; /* partition id */
u32 rsrc_id; /* resource id */
u32 entry; /* cpu start address */
- int num_pd;
u32 core_index;
- struct device **pd_dev;
- struct device_link **pd_dev_link;
+ struct dev_pm_domain_list *pd_list;
};
static const struct imx_rproc_att imx_rproc_att_imx93[] = {
@@ -853,7 +850,7 @@ static void imx_rproc_put_scu(struct rproc *rproc)
return;
if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id)) {
- imx_rproc_detach_pd(rproc);
+ dev_pm_domain_detach_list(priv->pd_list);
return;
}
@@ -880,72 +877,20 @@ static int imx_rproc_partition_notify(struct notifier_block *nb,
static int imx_rproc_attach_pd(struct imx_rproc *priv)
{
struct device *dev = priv->dev;
- int ret, i;
-
- /*
- * If there is only one power-domain entry, the platform driver framework
- * will handle it, no need handle it in this driver.
- */
- priv->num_pd = of_count_phandle_with_args(dev->of_node, "power-domains",
- "#power-domain-cells");
- if (priv->num_pd <= 1)
- return 0;
-
- priv->pd_dev = devm_kmalloc_array(dev, priv->num_pd, sizeof(*priv->pd_dev), GFP_KERNEL);
- if (!priv->pd_dev)
- return -ENOMEM;
-
- priv->pd_dev_link = devm_kmalloc_array(dev, priv->num_pd, sizeof(*priv->pd_dev_link),
- GFP_KERNEL);
-
- if (!priv->pd_dev_link)
- return -ENOMEM;
-
- for (i = 0; i < priv->num_pd; i++) {
- priv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
- if (IS_ERR(priv->pd_dev[i])) {
- ret = PTR_ERR(priv->pd_dev[i]);
- goto detach_pd;
- }
-
- priv->pd_dev_link[i] = device_link_add(dev, priv->pd_dev[i], DL_FLAG_STATELESS |
- DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
- if (!priv->pd_dev_link[i]) {
- dev_pm_domain_detach(priv->pd_dev[i], false);
- ret = -EINVAL;
- goto detach_pd;
- }
- }
-
- return 0;
-
-detach_pd:
- while (--i >= 0) {
- device_link_del(priv->pd_dev_link[i]);
- dev_pm_domain_detach(priv->pd_dev[i], false);
- }
-
- return ret;
-}
-
-static int imx_rproc_detach_pd(struct rproc *rproc)
-{
- struct imx_rproc *priv = rproc->priv;
- int i;
+ int ret;
+ struct dev_pm_domain_attach_data pd_data = {
+ .pd_flags = PD_FLAG_DEV_LINK_ON,
+ };
/*
* If there is only one power-domain entry, the platform driver framework
* will handle it, no need handle it in this driver.
*/
- if (priv->num_pd <= 1)
+ if (dev->pm_domain)
return 0;
- for (i = 0; i < priv->num_pd; i++) {
- device_link_del(priv->pd_dev_link[i]);
- dev_pm_domain_detach(priv->pd_dev[i], false);
- }
-
- return 0;
+ ret = dev_pm_domain_attach_list(dev, &pd_data, &priv->pd_list);
+ return ret < 0 ? ret : 0;
}
static int imx_rproc_detect_mode(struct imx_rproc *priv)
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 6c67514cc493..93f9a1537ec6 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -55,8 +55,6 @@
#define QDSP6SS_CORE_CBCR 0x20
#define QDSP6SS_SLEEP_CBCR 0x3c
-#define QCOM_Q6V5_RPROC_PROXY_PD_MAX 3
-
#define LPASS_BOOT_CORE_START BIT(0)
#define LPASS_BOOT_CMD_START BIT(0)
#define LPASS_EFUSE_Q6SS_EVB_SEL 0x0
@@ -74,7 +72,8 @@ struct adsp_pil_data {
const char **clk_ids;
int num_clks;
- const char **proxy_pd_names;
+ const char **pd_names;
+ unsigned int num_pds;
const char *load_state;
};
@@ -110,8 +109,7 @@ struct qcom_adsp {
size_t mem_size;
bool has_iommu;
- struct device *proxy_pds[QCOM_Q6V5_RPROC_PROXY_PD_MAX];
- size_t proxy_pd_count;
+ struct dev_pm_domain_list *pd_list;
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_ssr ssr_subdev;
@@ -120,98 +118,92 @@ struct qcom_adsp {
int (*shutdown)(struct qcom_adsp *adsp);
};
-static int qcom_rproc_pds_attach(struct device *dev, struct qcom_adsp *adsp,
- const char **pd_names)
+static int qcom_rproc_pds_attach(struct qcom_adsp *adsp, const char **pd_names,
+ unsigned int num_pds)
{
- struct device **devs = adsp->proxy_pds;
- size_t num_pds = 0;
+ struct device *dev = adsp->dev;
+ struct dev_pm_domain_attach_data pd_data = {
+ .pd_names = pd_names,
+ .num_pd_names = num_pds,
+ };
int ret;
- int i;
-
- if (!pd_names)
- return 0;
/* Handle single power domain */
- if (dev->pm_domain) {
- devs[0] = dev;
- pm_runtime_enable(dev);
- return 1;
- }
+ if (dev->pm_domain)
+ goto out;
- while (pd_names[num_pds])
- num_pds++;
+ if (!pd_names)
+ return 0;
- if (num_pds > ARRAY_SIZE(adsp->proxy_pds))
- return -E2BIG;
+ ret = dev_pm_domain_attach_list(dev, &pd_data, &adsp->pd_list);
+ if (ret < 0)
+ return ret;
- for (i = 0; i < num_pds; i++) {
- devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
- if (IS_ERR_OR_NULL(devs[i])) {
- ret = PTR_ERR(devs[i]) ? : -ENODATA;
- goto unroll_attach;
- }
- }
+out:
+ pm_runtime_enable(dev);
+ return 0;
+}
- return num_pds;
+static void qcom_rproc_pds_detach(struct qcom_adsp *adsp)
+{
+ struct device *dev = adsp->dev;
+ struct dev_pm_domain_list *pds = adsp->pd_list;
-unroll_attach:
- for (i--; i >= 0; i--)
- dev_pm_domain_detach(devs[i], false);
+ dev_pm_domain_detach_list(pds);
- return ret;
+ if (dev->pm_domain || pds)
+ pm_runtime_disable(adsp->dev);
}
-static void qcom_rproc_pds_detach(struct qcom_adsp *adsp, struct device **pds,
- size_t pd_count)
+static int qcom_rproc_pds_enable(struct qcom_adsp *adsp)
{
struct device *dev = adsp->dev;
- int i;
+ struct dev_pm_domain_list *pds = adsp->pd_list;
+ int ret, i = 0;
- /* Handle single power domain */
- if (dev->pm_domain && pd_count) {
- pm_runtime_disable(dev);
- return;
- }
+ if (!dev->pm_domain && !pds)
+ return 0;
- for (i = 0; i < pd_count; i++)
- dev_pm_domain_detach(pds[i], false);
-}
+ if (dev->pm_domain)
+ dev_pm_genpd_set_performance_state(dev, INT_MAX);
-static int qcom_rproc_pds_enable(struct qcom_adsp *adsp, struct device **pds,
- size_t pd_count)
-{
- int ret;
- int i;
-
- for (i = 0; i < pd_count; i++) {
- dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
- ret = pm_runtime_resume_and_get(pds[i]);
- if (ret < 0) {
- dev_pm_genpd_set_performance_state(pds[i], 0);
- goto unroll_pd_votes;
- }
+ while (pds && i < pds->num_pds) {
+ dev_pm_genpd_set_performance_state(pds->pd_devs[i], INT_MAX);
+ i++;
}
- return 0;
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ while (pds && i > 0) {
+ i--;
+ dev_pm_genpd_set_performance_state(pds->pd_devs[i], 0);
+ }
-unroll_pd_votes:
- for (i--; i >= 0; i--) {
- dev_pm_genpd_set_performance_state(pds[i], 0);
- pm_runtime_put(pds[i]);
+ if (dev->pm_domain)
+ dev_pm_genpd_set_performance_state(dev, 0);
}
return ret;
}
-static void qcom_rproc_pds_disable(struct qcom_adsp *adsp, struct device **pds,
- size_t pd_count)
+static void qcom_rproc_pds_disable(struct qcom_adsp *adsp)
{
- int i;
+ struct device *dev = adsp->dev;
+ struct dev_pm_domain_list *pds = adsp->pd_list;
+ int i = 0;
+
+ if (!dev->pm_domain && !pds)
+ return;
+
+ if (dev->pm_domain)
+ dev_pm_genpd_set_performance_state(dev, 0);
- for (i = 0; i < pd_count; i++) {
- dev_pm_genpd_set_performance_state(pds[i], 0);
- pm_runtime_put(pds[i]);
+ while (pds && i < pds->num_pds) {
+ dev_pm_genpd_set_performance_state(pds->pd_devs[i], 0);
+ i++;
}
+
+ pm_runtime_put(dev);
}
static int qcom_wpss_shutdown(struct qcom_adsp *adsp)
@@ -397,8 +389,7 @@ static int adsp_start(struct rproc *rproc)
if (ret)
goto adsp_smmu_unmap;
- ret = qcom_rproc_pds_enable(adsp, adsp->proxy_pds,
- adsp->proxy_pd_count);
+ ret = qcom_rproc_pds_enable(adsp);
if (ret < 0)
goto disable_xo_clk;
@@ -448,7 +439,7 @@ static int adsp_start(struct rproc *rproc)
disable_adsp_clks:
clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
disable_power_domain:
- qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ qcom_rproc_pds_disable(adsp);
disable_xo_clk:
clk_disable_unprepare(adsp->xo);
adsp_smmu_unmap:
@@ -464,7 +455,7 @@ static void qcom_adsp_pil_handover(struct qcom_q6v5 *q6v5)
struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
clk_disable_unprepare(adsp->xo);
- qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ qcom_rproc_pds_disable(adsp);
}
static int adsp_stop(struct rproc *rproc)
@@ -715,13 +706,11 @@ static int adsp_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
- ret = qcom_rproc_pds_attach(adsp->dev, adsp,
- desc->proxy_pd_names);
+ ret = qcom_rproc_pds_attach(adsp, desc->pd_names, desc->num_pds);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to attach proxy power domains\n");
goto free_rproc;
}
- adsp->proxy_pd_count = ret;
ret = adsp_init_reset(adsp);
if (ret)
@@ -753,7 +742,7 @@ static int adsp_probe(struct platform_device *pdev)
return 0;
disable_pm:
- qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ qcom_rproc_pds_detach(adsp);
free_rproc:
rproc_free(rproc);
@@ -771,7 +760,7 @@ static void adsp_remove(struct platform_device *pdev)
qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
qcom_remove_sysmon_subdev(adsp->sysmon);
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
- qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ qcom_rproc_pds_detach(adsp);
rproc_free(adsp->rproc);
}
@@ -788,9 +777,8 @@ static const struct adsp_pil_data adsp_resource_init = {
"qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core", NULL
},
.num_clks = 7,
- .proxy_pd_names = (const char*[]) {
- "cx", NULL
- },
+ .pd_names = (const char*[]) { "cx" },
+ .num_pds = 1,
};
static const struct adsp_pil_data adsp_sc7280_resource_init = {
@@ -821,9 +809,8 @@ static const struct adsp_pil_data cdsp_resource_init = {
"q6_axim", NULL
},
.num_clks = 7,
- .proxy_pd_names = (const char*[]) {
- "cx", NULL
- },
+ .pd_names = (const char*[]) { "cx" },
+ .num_pds = 1,
};
static const struct adsp_pil_data wpss_resource_init = {
@@ -839,9 +826,8 @@ static const struct adsp_pil_data wpss_resource_init = {
"ahb_bdg", "ahb", "rscp", NULL
},
.num_clks = 3,
- .proxy_pd_names = (const char*[]) {
- "cx", "mx", NULL
- },
+ .pd_names = (const char*[]) { "cx", "mx" },
+ .num_pds = 2,
};
static const struct of_device_id adsp_of_match[] = {
diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/lib_test.c
index d5caf36c56cd..225c859d6da5 100644
--- a/drivers/rtc/lib_test.c
+++ b/drivers/rtc/lib_test.c
@@ -54,7 +54,7 @@ static void rtc_time64_to_tm_test_date_range(struct kunit *test)
days = div_s64(secs, 86400);
- #define FAIL_MSG "%d/%02d/%02d (%2d) : %ld", \
+ #define FAIL_MSG "%d/%02d/%02d (%2d) : %lld", \
year, month, mday, yday, days
KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 7327e81352e9..cead018c3f06 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -8,9 +8,6 @@
* Copyright IBM Corp. 1999, 2009
*/
-#define KMSG_COMPONENT "dasd"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -30,9 +27,6 @@
#include <asm/itcw.h>
#include <asm/diag.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd:"
-
#include "dasd_int.h"
/*
* SECTION: Constant definitions to be used within this file
@@ -313,39 +307,57 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
*/
static int dasd_state_basic_to_ready(struct dasd_device *device)
{
- int rc;
- struct dasd_block *block;
- struct gendisk *disk;
+ struct dasd_block *block = device->block;
+ struct queue_limits lim;
+ int rc = 0;
- rc = 0;
- block = device->block;
/* make disk known with correct capacity */
- if (block) {
- if (block->base->discipline->do_analysis != NULL)
- rc = block->base->discipline->do_analysis(block);
- if (rc) {
- if (rc != -EAGAIN) {
- device->state = DASD_STATE_UNFMT;
- disk = device->block->gdp;
- kobject_uevent(&disk_to_dev(disk)->kobj,
- KOBJ_CHANGE);
- goto out;
- }
- return rc;
- }
- if (device->discipline->setup_blk_queue)
- device->discipline->setup_blk_queue(block);
- set_capacity(block->gdp,
- block->blocks << block->s2b_shift);
+ if (!block) {
device->state = DASD_STATE_READY;
- rc = dasd_scan_partitions(block);
- if (rc) {
- device->state = DASD_STATE_BASIC;
+ goto out;
+ }
+
+ if (block->base->discipline->do_analysis != NULL)
+ rc = block->base->discipline->do_analysis(block);
+ if (rc) {
+ if (rc == -EAGAIN)
return rc;
- }
- } else {
- device->state = DASD_STATE_READY;
+ device->state = DASD_STATE_UNFMT;
+ kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
+ KOBJ_CHANGE);
+ goto out;
+ }
+
+ lim = queue_limits_start_update(block->gdp->queue);
+ lim.max_dev_sectors = device->discipline->max_sectors(block);
+ lim.max_hw_sectors = lim.max_dev_sectors;
+ lim.logical_block_size = block->bp_block;
+
+ if (device->discipline->has_discard) {
+ unsigned int max_bytes;
+
+ lim.discard_granularity = block->bp_block;
+
+ /* Calculate max_discard_sectors and make it PAGE aligned */
+ max_bytes = USHRT_MAX * block->bp_block;
+ max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
+
+ lim.max_hw_discard_sectors = max_bytes / block->bp_block;
+ lim.max_write_zeroes_sectors = lim.max_hw_discard_sectors;
}
+ rc = queue_limits_commit_update(block->gdp->queue, &lim);
+ if (rc)
+ return rc;
+
+ set_capacity(block->gdp, block->blocks << block->s2b_shift);
+ device->state = DASD_STATE_READY;
+
+ rc = dasd_scan_partitions(block);
+ if (rc) {
+ device->state = DASD_STATE_BASIC;
+ return rc;
+ }
+
out:
if (device->discipline->basic_to_ready)
rc = device->discipline->basic_to_ready(device);
@@ -412,7 +424,7 @@ dasd_state_ready_to_online(struct dasd_device * device)
KOBJ_CHANGE);
return 0;
}
- disk_uevent(device->block->bdev_handle->bdev->bd_disk,
+ disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
KOBJ_CHANGE);
}
return 0;
@@ -433,7 +445,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
device->state = DASD_STATE_READY;
if (device->block && !(device->features & DASD_FEATURE_USERAW))
- disk_uevent(device->block->bdev_handle->bdev->bd_disk,
+ disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
KOBJ_CHANGE);
return 0;
}
@@ -1301,7 +1313,6 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int retries, rc;
- char errorstring[ERRORLENGTH];
/* Check the cqr */
rc = dasd_check_cqr(cqr);
@@ -1340,10 +1351,8 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
rc = 0;
break;
default:
- /* internal error 10 - unknown rc*/
- snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
- dev_err(&device->cdev->dev, "An error occurred in the "
- "DASD device driver, reason=%s\n", errorstring);
+ dev_err(&device->cdev->dev,
+ "Unexpected error during request termination %d\n", rc);
BUG();
break;
}
@@ -1362,7 +1371,6 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int rc;
- char errorstring[ERRORLENGTH];
/* Check the cqr */
rc = dasd_check_cqr(cqr);
@@ -1382,10 +1390,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
return -EPERM;
}
if (cqr->retries < 0) {
- /* internal error 14 - start_IO run out of retries */
- sprintf(errorstring, "14 %p", cqr);
- dev_err(&device->cdev->dev, "An error occurred in the DASD "
- "device driver, reason=%s\n", errorstring);
+ dev_err(&device->cdev->dev,
+ "Start I/O ran out of retries\n");
cqr->status = DASD_CQR_ERROR;
return -EIO;
}
@@ -1463,11 +1469,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
"not accessible");
break;
default:
- /* internal error 11 - unknown rc */
- snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", errorstring);
+ "Unexpected error during request start %d", rc);
BUG();
break;
}
@@ -1904,8 +1907,6 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device,
static void __dasd_process_cqr(struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
- char errorstring[ERRORLENGTH];
-
switch (cqr->status) {
case DASD_CQR_SUCCESS:
cqr->status = DASD_CQR_DONE;
@@ -1917,11 +1918,8 @@ static void __dasd_process_cqr(struct dasd_device *device,
cqr->status = DASD_CQR_TERMINATED;
break;
default:
- /* internal error 12 - wrong cqr status*/
- snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", errorstring);
+ "Unexpected CQR status %02x", cqr->status);
BUG();
}
if (cqr->callback)
@@ -1986,16 +1984,14 @@ static void __dasd_device_check_expire(struct dasd_device *device)
if (device->discipline->term_IO(cqr) != 0) {
/* Hmpf, try again in 5 sec */
dev_err(&device->cdev->dev,
- "cqr %p timed out (%lus) but cannot be "
- "ended, retrying in 5 s\n",
- cqr, (cqr->expires/HZ));
+ "CQR timed out (%lus) but cannot be ended, retrying in 5s\n",
+ (cqr->expires / HZ));
cqr->expires += 5*HZ;
dasd_device_set_timer(device, 5*HZ);
} else {
dev_err(&device->cdev->dev,
- "cqr %p timed out (%lus), %i retries "
- "remaining\n", cqr, (cqr->expires/HZ),
- cqr->retries);
+ "CQR timed out (%lus), %i retries remaining\n",
+ (cqr->expires / HZ), cqr->retries);
}
__dasd_device_check_autoquiesce_timeout(device, cqr);
}
@@ -2116,8 +2112,7 @@ int dasd_flush_device_queue(struct dasd_device *device)
if (rc) {
/* unable to terminate requeust */
dev_err(&device->cdev->dev,
- "Flushing the DASD request queue "
- "failed for request %p\n", cqr);
+ "Flushing the DASD request queue failed\n");
/* stop flush processing */
goto finished;
}
@@ -2633,8 +2628,7 @@ static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
rc = device->discipline->term_IO(cqr);
if (rc) {
dev_err(&device->cdev->dev,
- "Cancelling request %p failed with rc=%d\n",
- cqr, rc);
+ "Cancelling request failed with rc=%d\n", rc);
} else {
cqr->stopclk = get_tod_clock();
}
@@ -3402,8 +3396,7 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
ret = ccw_device_set_online(cdev);
if (ret)
- pr_warn("%s: Setting the DASD online failed with rc=%d\n",
- dev_name(&cdev->dev), ret);
+ dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret);
}
/*
@@ -3490,8 +3483,11 @@ int dasd_generic_set_online(struct ccw_device *cdev,
{
struct dasd_discipline *discipline;
struct dasd_device *device;
+ struct device *dev;
int rc;
+ dev = &cdev->dev;
+
/* first online clears initial online feature flag */
dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
device = dasd_create_device(cdev);
@@ -3504,11 +3500,10 @@ int dasd_generic_set_online(struct ccw_device *cdev,
/* Try to load the required module. */
rc = request_module(DASD_DIAG_MOD);
if (rc) {
- pr_warn("%s Setting the DASD online failed "
- "because the required module %s "
- "could not be loaded (rc=%d)\n",
- dev_name(&cdev->dev), DASD_DIAG_MOD,
- rc);
+ dev_warn(dev, "Setting the DASD online failed "
+ "because the required module %s "
+ "could not be loaded (rc=%d)\n",
+ DASD_DIAG_MOD, rc);
dasd_delete_device(device);
return -ENODEV;
}
@@ -3516,8 +3511,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
/* Module init could have failed, so check again here after
* request_module(). */
if (!dasd_diag_discipline_pointer) {
- pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
- dev_name(&cdev->dev));
+ dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n");
dasd_delete_device(device);
return -ENODEV;
}
@@ -3527,37 +3521,33 @@ int dasd_generic_set_online(struct ccw_device *cdev,
dasd_delete_device(device);
return -EINVAL;
}
+ device->base_discipline = base_discipline;
if (!try_module_get(discipline->owner)) {
- module_put(base_discipline->owner);
dasd_delete_device(device);
return -EINVAL;
}
- device->base_discipline = base_discipline;
device->discipline = discipline;
/* check_device will allocate block device if necessary */
rc = discipline->check_device(device);
if (rc) {
- pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
- dev_name(&cdev->dev), discipline->name, rc);
- module_put(discipline->owner);
- module_put(base_discipline->owner);
+ dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n",
+ discipline->name, rc);
dasd_delete_device(device);
return rc;
}
dasd_set_target_state(device, DASD_STATE_ONLINE);
if (device->state <= DASD_STATE_KNOWN) {
- pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
- dev_name(&cdev->dev));
+ dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n");
rc = -ENODEV;
dasd_set_target_state(device, DASD_STATE_NEW);
if (device->block)
dasd_free_block(device->block);
dasd_delete_device(device);
- } else
- pr_debug("dasd_generic device %s found\n",
- dev_name(&cdev->dev));
+ } else {
+ dev_dbg(dev, "dasd_generic device found\n");
+ }
wait_event(dasd_init_waitq, _wait_for_device(device));
@@ -3568,10 +3558,13 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online);
int dasd_generic_set_offline(struct ccw_device *cdev)
{
+ int max_count, open_count, rc;
struct dasd_device *device;
struct dasd_block *block;
- int max_count, open_count, rc;
unsigned long flags;
+ struct device *dev;
+
+ dev = &cdev->dev;
rc = 0;
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
@@ -3588,15 +3581,14 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* in the other openers.
*/
if (device->block) {
- max_count = device->block->bdev_handle ? 0 : -1;
+ max_count = device->block->bdev_file ? 0 : -1;
open_count = atomic_read(&device->block->open_count);
if (open_count > max_count) {
if (open_count > 0)
- pr_warn("%s: The DASD cannot be set offline with open count %i\n",
- dev_name(&cdev->dev), open_count);
+ dev_warn(dev, "The DASD cannot be set offline with open count %i\n",
+ open_count);
else
- pr_warn("%s: The DASD cannot be set offline while it is in use\n",
- dev_name(&cdev->dev));
+ dev_warn(dev, "The DASD cannot be set offline while it is in use\n");
rc = -EBUSY;
goto out_err;
}
@@ -3634,8 +3626,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* so sync bdev first and then wait for our queues to become
* empty
*/
- if (device->block && device->block->bdev_handle)
- bdev_mark_dead(device->block->bdev_handle->bdev, false);
+ if (device->block && device->block->bdev_file)
+ bdev_mark_dead(file_bdev(device->block->bdev_file), false);
dasd_schedule_device_bh(device);
rc = wait_event_interruptible(shutdown_waitq,
_wait_for_empty_queues(device));
@@ -3956,8 +3948,8 @@ static int dasd_handle_autoquiesce(struct dasd_device *device,
if (dasd_eer_enabled(device))
dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE);
- pr_info("%s: The DASD has been put in the quiesce state\n",
- dev_name(&device->cdev->dev));
+ dev_info(&device->cdev->dev,
+ "The DASD has been put in the quiesce state\n");
dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE);
if (device->features & DASD_FEATURE_REQUEUEQUIESCE)
@@ -3977,10 +3969,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
NULL);
if (IS_ERR(cqr)) {
- /* internal error 13 - Allocating the RDC request failed*/
- dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", "13");
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate RDC request");
return cqr;
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 89957bb7244d..459b7f8ac883 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -7,13 +7,9 @@
*
*/
-#define KMSG_COMPONENT "dasd-eckd"
-
#include <linux/timer.h>
#include <asm/idals.h>
-#define PRINTK_HEADER "dasd_erp(3990): "
-
#include "dasd_int.h"
#include "dasd_eckd.h"
@@ -398,7 +394,6 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
struct dasd_device *device = erp->startdev;
char msg_format = (sense[7] & 0xF0);
char msg_no = (sense[7] & 0x0F);
- char errorstring[ERRORLENGTH];
switch (msg_format) {
case 0x00: /* Format 0 - Program or System Checks */
@@ -1004,12 +999,9 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
}
break;
- default: /* unknown message format - should not happen
- internal error 03 - unknown message format */
- snprintf(errorstring, ERRORLENGTH, "03 %x02", msg_format);
+ default:
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", errorstring);
+ "Unknown message format %02x", msg_format);
break;
} /* end switch message format */
@@ -1056,11 +1048,9 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags);
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
} else {
- /* fatal error - set status to FAILED
- internal error 09 - Command Reject */
if (!test_bit(DASD_CQR_SUPPRESS_CR, &erp->flags))
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, reason=09\n");
+ "An I/O command request was rejected\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
}
@@ -1128,13 +1118,7 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
erp->function = dasd_3990_erp_equip_check;
if (sense[1] & SNS1_WRITE_INHIBITED) {
- dev_info(&device->cdev->dev,
- "Write inhibited path encountered\n");
-
- /* vary path offline
- internal error 04 - Path should be varied off-line.*/
- dev_err(&device->cdev->dev, "An error occurred in the DASD "
- "device driver, reason=%s\n", "04");
+ dev_err(&device->cdev->dev, "Write inhibited path encountered\n");
erp = dasd_3990_erp_action_1(erp);
@@ -1285,11 +1269,7 @@ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
erp = dasd_3990_erp_action_4(erp, sense);
} else {
- /* internal error 06 - The track format is not valid*/
- dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", "06");
-
+ dev_err(&device->cdev->dev, "Track format is not valid\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
}
@@ -1663,9 +1643,8 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
sizeof(struct LO_eckd_data), device);
if (IS_ERR(erp)) {
- /* internal error 01 - Unable to allocate ERP */
- dev_err(&device->cdev->dev, "An error occurred in the DASD "
- "device driver, reason=%s\n", "01");
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "Unable to allocate ERP request (1B 32)");
return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
}
@@ -1807,10 +1786,8 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
cpa = previous_erp->irb.scsw.cmd.cpa;
if (cpa == 0) {
- /* internal error 02 -
- Unable to determine address of the CCW to be restarted */
- dev_err(&device->cdev->dev, "An error occurred in the DASD "
- "device driver, reason=%s\n", "02");
+ dev_err(&device->cdev->dev,
+ "Unable to determine address of to be restarted CCW\n");
previous_erp->status = DASD_CQR_FAILED;
@@ -2009,15 +1986,9 @@ dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
{
if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
-
- /* set to suspended duplex state then restart
- internal error 05 - Set device to suspended duplex state
- should be done */
struct dasd_device *device = erp->startdev;
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", "05");
-
+ "Compound configuration error occurred\n");
}
erp->function = dasd_3990_erp_compound_config;
@@ -2153,10 +2124,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
erp = dasd_3990_erp_int_req(erp);
break;
- case 0x0F: /* length mismatch during update write command
- internal error 08 - update write command error*/
- dev_err(&device->cdev->dev, "An error occurred in the "
- "DASD device driver, reason=%s\n", "08");
+ case 0x0F:
+ dev_err(&device->cdev->dev,
+ "Update write command error occurred\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
break;
@@ -2165,12 +2135,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
erp = dasd_3990_erp_action_10_32(erp, sense);
break;
- case 0x15: /* next track outside defined extend
- internal error 07 - The next track is not
- within the defined storage extent */
+ case 0x15:
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", "07");
+ "Track outside defined extent error occurred\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
break;
@@ -2663,7 +2630,7 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
* necessary
*/
dev_err(&device->cdev->dev,
- "ERP %p has run out of retries and failed\n", erp);
+ "ERP %px has run out of retries and failed\n", erp);
erp->status = DASD_CQR_FAILED;
}
@@ -2704,8 +2671,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
while (erp_done != erp) {
if (erp_done == NULL) /* end of chain reached */
- panic(PRINTK_HEADER "Programming error in ERP! The "
- "original request was lost\n");
+ panic("Programming error in ERP! The original request was lost\n");
/* remove the request from the device queue */
list_del(&erp_done->blocklist);
@@ -2786,11 +2752,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
"ERP chain at BEGINNING of ERP-ACTION\n");
for (temp_erp = cqr;
temp_erp != NULL; temp_erp = temp_erp->refers) {
-
dev_err(&device->cdev->dev,
- "ERP %p (%02x) refers to %p\n",
- temp_erp, temp_erp->status,
- temp_erp->refers);
+ "ERP %px (%02x) refers to %px\n",
+ temp_erp, temp_erp->status, temp_erp->refers);
}
}
@@ -2837,11 +2801,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
"ERP chain at END of ERP-ACTION\n");
for (temp_erp = erp;
temp_erp != NULL; temp_erp = temp_erp->refers) {
-
dev_err(&device->cdev->dev,
- "ERP %p (%02x) refers to %p\n",
- temp_erp, temp_erp->status,
- temp_erp->refers);
+ "ERP %px (%02x) refers to %px\n",
+ temp_erp, temp_erp->status, temp_erp->refers);
}
}
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index c9740ae88d1a..e84cd5436556 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -6,20 +6,12 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
-#define KMSG_COMPONENT "dasd-eckd"
-
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
-#ifdef PRINTK_HEADER
-#undef PRINTK_HEADER
-#endif /* PRINTK_HEADER */
-#define PRINTK_HEADER "dasd(eckd):"
-
-
/*
* General concept of alias management:
* - PAV and DASD alias management is specific to the eckd discipline.
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index c4e36650c426..0316c20823ee 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -13,8 +13,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -24,8 +22,6 @@
#include <linux/uaccess.h>
#include <asm/ipl.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_devmap:"
#define DASD_MAX_PARAMS 256
#include "dasd_int.h"
@@ -1114,7 +1110,7 @@ dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf)
use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0;
else
use_diag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USEDIAG) != 0;
- return sprintf(buf, use_diag ? "1\n" : "0\n");
+ return sysfs_emit(buf, use_diag ? "1\n" : "0\n");
}
static ssize_t
@@ -1163,7 +1159,7 @@ dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0;
else
use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0;
- return sprintf(buf, use_raw ? "1\n" : "0\n");
+ return sysfs_emit(buf, use_raw ? "1\n" : "0\n");
}
static ssize_t
@@ -1259,7 +1255,7 @@ dasd_access_show(struct device *dev, struct device_attribute *attr,
if (count < 0)
return count;
- return sprintf(buf, "%d\n", count);
+ return sysfs_emit(buf, "%d\n", count);
}
static DEVICE_ATTR(host_access_count, 0444, dasd_access_show, NULL);
@@ -1338,19 +1334,19 @@ static ssize_t dasd_alias_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
if (device->discipline && device->discipline->get_uid &&
!device->discipline->get_uid(device, &uid)) {
if (uid.type == UA_BASE_PAV_ALIAS ||
uid.type == UA_HYPER_PAV_ALIAS) {
dasd_put_device(device);
- return sprintf(buf, "1\n");
+ return sysfs_emit(buf, "1\n");
}
}
dasd_put_device(device);
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
}
static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
@@ -1412,15 +1408,9 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
break;
}
- if (strlen(uid.vduit) > 0)
- snprintf(uid_string, sizeof(uid_string),
- "%s.%s.%04x.%s.%s",
- uid.vendor, uid.serial, uid.ssid, ua_string,
- uid.vduit);
- else
- snprintf(uid_string, sizeof(uid_string),
- "%s.%s.%04x.%s",
- uid.vendor, uid.serial, uid.ssid, ua_string);
+ snprintf(uid_string, sizeof(uid_string), "%s.%s.%04x.%s%s%s",
+ uid.vendor, uid.serial, uid.ssid, ua_string,
+ uid.vduit[0] ? "." : "", uid.vduit);
}
dasd_put_device(device);
@@ -1862,7 +1852,7 @@ static ssize_t dasd_pm_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
opm = dasd_path_get_opm(device);
nppm = dasd_path_get_nppm(device);
@@ -1872,8 +1862,8 @@ static ssize_t dasd_pm_show(struct device *dev,
ifccpm = dasd_path_get_ifccpm(device);
dasd_put_device(device);
- return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
- cablepm, cuirpm, hpfpm, ifccpm);
+ return sysfs_emit(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
+ cablepm, cuirpm, hpfpm, ifccpm);
}
static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 2e4e555b37c3..ea4b1d01bb76 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,8 +8,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/kernel_stat.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
@@ -31,8 +29,6 @@
#include "dasd_int.h"
#include "dasd_diag.h"
-#define PRINTK_HEADER "dasd(diag):"
-
MODULE_LICENSE("GPL");
/* The maximum number of blocks per request (max_blocks) is dependent on the
@@ -621,25 +617,9 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
"dump sense not available for DIAG data");
}
-/*
- * Initialize block layer request queue.
- */
-static void dasd_diag_setup_blk_queue(struct dasd_block *block)
+static unsigned int dasd_diag_max_sectors(struct dasd_block *block)
{
- unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->gdp->queue;
- int max;
-
- max = DIAG_MAX_BLOCKS << block->s2b_shift;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- q->limits.max_dev_sectors = max;
- blk_queue_logical_block_size(q, logical_block_size);
- blk_queue_max_hw_sectors(q, max);
- blk_queue_max_segments(q, USHRT_MAX);
- /* With page sized segments each segment can be translated into one idaw/tidaw */
- blk_queue_max_segment_size(q, PAGE_SIZE);
- blk_queue_segment_boundary(q, PAGE_SIZE - 1);
- blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+ return DIAG_MAX_BLOCKS << block->s2b_shift;
}
static int dasd_diag_pe_handler(struct dasd_device *device,
@@ -652,10 +632,10 @@ static struct dasd_discipline dasd_diag_discipline = {
.owner = THIS_MODULE,
.name = "DIAG",
.ebcname = "DIAG",
+ .max_sectors = dasd_diag_max_sectors,
.check_device = dasd_diag_check_device,
.pe_handler = dasd_diag_pe_handler,
.fill_geometry = dasd_diag_fill_geometry,
- .setup_blk_queue = dasd_diag_setup_blk_queue,
.start_IO = dasd_start_diag,
.term_IO = dasd_diag_term_IO,
.handle_terminated_request = dasd_diag_handle_terminated_request,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bd89b032968a..373c1a86c33e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -10,8 +10,6 @@
* Author.........: Nigel Hislop <hislop_nigel@emc.com>
*/
-#define KMSG_COMPONENT "dasd-eckd"
-
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -37,11 +35,6 @@
#include "dasd_int.h"
#include "dasd_eckd.h"
-#ifdef PRINTK_HEADER
-#undef PRINTK_HEADER
-#endif /* PRINTK_HEADER */
-#define PRINTK_HEADER "dasd(eckd):"
-
/*
* raw track access always map to 64k in memory
* so it maps to 16 blocks of 4k per track
@@ -1072,22 +1065,14 @@ static void dasd_eckd_read_fc_security(struct dasd_device *device)
}
}
-static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
- char *print_uid)
+static void dasd_eckd_get_uid_string(struct dasd_conf *conf, char *print_uid)
{
struct dasd_uid uid;
create_uid(conf, &uid);
- if (strlen(uid.vduit) > 0)
- snprintf(print_uid, DASD_UID_STRLEN,
- "%s.%s.%04x.%02x.%s",
- uid.vendor, uid.serial, uid.ssid,
- uid.real_unit_addr, uid.vduit);
- else
- snprintf(print_uid, DASD_UID_STRLEN,
- "%s.%s.%04x.%02x",
- uid.vendor, uid.serial, uid.ssid,
- uid.real_unit_addr);
+ snprintf(print_uid, DASD_UID_STRLEN, "%s.%s.%04x.%02x%s%s",
+ uid.vendor, uid.serial, uid.ssid, uid.real_unit_addr,
+ uid.vduit[0] ? "." : "", uid.vduit);
}
static int dasd_eckd_check_cabling(struct dasd_device *device,
@@ -5529,15 +5514,15 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
* and return number of printed chars.
*/
static void
-dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
+dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from,
+ struct ccw1 *to, char *page)
{
int len, count;
char *datap;
len = 0;
while (from <= to) {
- len += sprintf(page + len, PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
+ len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
from, ((int *) from)[0], ((int *) from)[1]);
/* get pointer to data (consider IDALs) */
@@ -5560,7 +5545,7 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
from++;
}
if (len > 0)
- printk(KERN_ERR "%s", page);
+ dev_err(&device->cdev->dev, "%s", page);
}
static void
@@ -5591,9 +5576,12 @@ dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
struct dasd_ccw_req *req, struct irb *irb)
{
- char *page;
struct ccw1 *first, *last, *fail, *from, *to;
+ struct device *dev;
int len, sl, sct;
+ char *page;
+
+ dev = &device->cdev->dev;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
@@ -5602,24 +5590,18 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
return;
}
/* dump the sense data */
- len = sprintf(page, PRINTK_HEADER
- " I/O status report for device %s:\n",
- dev_name(&device->cdev->dev));
- len += sprintf(page + len, PRINTK_HEADER
- " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
- "CS:%02X RC:%d\n",
+ len = sprintf(page, "I/O status report:\n");
+ len += sprintf(page + len,
+ "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X CS:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
req ? req->intrc : 0);
- len += sprintf(page + len, PRINTK_HEADER
- " device %s: Failing CCW: %p\n",
- dev_name(&device->cdev->dev),
+ len += sprintf(page + len, "Failing CCW: %px\n",
phys_to_virt(irb->scsw.cmd.cpa));
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len, PRINTK_HEADER
- " Sense(hex) %2d-%2d:",
+ len += sprintf(page + len, "Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
@@ -5631,23 +5613,20 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
if (irb->ecw[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
- sprintf(page + len, PRINTK_HEADER
- " 24 Byte: %x MSG %x, "
- "%s MSGb to SYSOP\n",
+ sprintf(page + len,
+ "24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
irb->ecw[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
- sprintf(page + len, PRINTK_HEADER
- " 32 Byte: Format: %x "
- "Exception class %x\n",
+ sprintf(page + len,
+ "32 Byte: Format: %x Exception class %x\n",
irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
}
} else {
- sprintf(page + len, PRINTK_HEADER
- " SORRY - NO VALID SENSE AVAILABLE\n");
+ sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
}
- printk(KERN_ERR "%s", page);
+ dev_err(dev, "%s", page);
if (req) {
/* req == NULL for unsolicited interrupts */
@@ -5656,8 +5635,8 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
first = req->cpaddr;
for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
to = min(first + 6, last);
- printk(KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req);
- dasd_eckd_dump_ccw_range(first, to, page);
+ dev_err(dev, "Related CP in req: %px\n", req);
+ dasd_eckd_dump_ccw_range(device, first, to, page);
/* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
@@ -5665,19 +5644,19 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
fail = phys_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
- printk(KERN_ERR PRINTK_HEADER "......\n");
+ dev_err(dev, "......\n");
}
to = min(fail + 1, last);
- dasd_eckd_dump_ccw_range(from, to, page + len);
+ dasd_eckd_dump_ccw_range(device, from, to, page + len);
/* print last CCWs (maximum 2) */
len = 0;
from = max(from, ++to);
if (from < last - 1) {
from = last - 1; /* there is a gap - print header */
- printk(KERN_ERR PRINTK_HEADER "......\n");
+ dev_err(dev, "......\n");
}
- dasd_eckd_dump_ccw_range(from, last, page + len);
+ dasd_eckd_dump_ccw_range(device, from, last, page + len);
}
free_page((unsigned long) page);
}
@@ -5701,11 +5680,9 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
return;
}
/* dump the sense data */
- len = sprintf(page, PRINTK_HEADER
- " I/O status report for device %s:\n",
- dev_name(&device->cdev->dev));
- len += sprintf(page + len, PRINTK_HEADER
- " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
+ len = sprintf(page, "I/O status report:\n");
+ len += sprintf(page + len,
+ "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
"CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
@@ -5713,9 +5690,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
irb->scsw.tm.fcxs,
(irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
req ? req->intrc : 0);
- len += sprintf(page + len, PRINTK_HEADER
- " device %s: Failing TCW: %p\n",
- dev_name(&device->cdev->dev),
+ len += sprintf(page + len, "Failing TCW: %px\n",
phys_to_virt(irb->scsw.tm.tcw));
tsb = NULL;
@@ -5724,47 +5699,37 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
if (tsb) {
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->length %d\n", tsb->length);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->flags %x\n", tsb->flags);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->dcw_offset %d\n", tsb->dcw_offset);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->count %d\n", tsb->count);
+ len += sprintf(page + len, "tsb->length %d\n", tsb->length);
+ len += sprintf(page + len, "tsb->flags %x\n", tsb->flags);
+ len += sprintf(page + len, "tsb->dcw_offset %d\n", tsb->dcw_offset);
+ len += sprintf(page + len, "tsb->count %d\n", tsb->count);
residual = tsb->count - 28;
- len += sprintf(page + len, PRINTK_HEADER
- " residual %d\n", residual);
+ len += sprintf(page + len, "residual %d\n", residual);
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.dev_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.dev_time %d\n",
tsb->tsa.iostat.dev_time);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.def_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.def_time %d\n",
tsb->tsa.iostat.def_time);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.queue_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.queue_time %d\n",
tsb->tsa.iostat.queue_time);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.dev_busy_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.dev_busy_time %d\n",
tsb->tsa.iostat.dev_busy_time);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.dev_act_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.dev_act_time %d\n",
tsb->tsa.iostat.dev_act_time);
sense = tsb->tsa.iostat.sense;
break;
case 2: /* ts_ddpc */
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
+ len += sprintf(page + len, "tsb->tsa.ddpc.rc %d\n",
+ tsb->tsa.ddpc.rc);
for (sl = 0; sl < 2; sl++) {
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.ddpc.rcq %2d-%2d: ",
+ len += sprintf(page + len,
+ "tsb->tsa.ddpc.rcq %2d-%2d: ",
(8 * sl), ((8 * sl) + 7));
rcq = tsb->tsa.ddpc.rcq;
for (sct = 0; sct < 8; sct++) {
- len += sprintf(page + len, " %02x",
+ len += sprintf(page + len, "%02x",
rcq[8 * sl + sct]);
}
len += sprintf(page + len, "\n");
@@ -5772,15 +5737,15 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
sense = tsb->tsa.ddpc.sense;
break;
case 3: /* tsa_intrg */
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.intrg.: not supported yet\n");
+ len += sprintf(page + len,
+ "tsb->tsa.intrg.: not supported yet\n");
break;
}
if (sense) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len, PRINTK_HEADER
- " Sense(hex) %2d-%2d:",
+ len += sprintf(page + len,
+ "Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
len += sprintf(page + len, " %02x",
@@ -5791,27 +5756,23 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
if (sense[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
- sprintf(page + len, PRINTK_HEADER
- " 24 Byte: %x MSG %x, "
- "%s MSGb to SYSOP\n",
+ sprintf(page + len,
+ "24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
sense[7] >> 4, sense[7] & 0x0f,
sense[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
- sprintf(page + len, PRINTK_HEADER
- " 32 Byte: Format: %x "
- "Exception class %x\n",
+ sprintf(page + len,
+ "32 Byte: Format: %x Exception class %x\n",
sense[6] & 0x0f, sense[22] >> 4);
}
} else {
- sprintf(page + len, PRINTK_HEADER
- " SORRY - NO VALID SENSE AVAILABLE\n");
+ sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
}
} else {
- sprintf(page + len, PRINTK_HEADER
- " SORRY - NO TSB DATA AVAILABLE\n");
+ sprintf(page + len, "SORRY - NO TSB DATA AVAILABLE\n");
}
- printk(KERN_ERR "%s", page);
+ dev_err(&device->cdev->dev, "%s", page);
free_page((unsigned long) page);
}
@@ -6865,17 +6826,9 @@ static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
dasd_schedule_requeue(device);
}
-/*
- * Initialize block layer request queue.
- */
-static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
+static unsigned int dasd_eckd_max_sectors(struct dasd_block *block)
{
- unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->gdp->queue;
- struct dasd_device *device = block->base;
- int max;
-
- if (device->features & DASD_FEATURE_USERAW) {
+ if (block->base->features & DASD_FEATURE_USERAW) {
/*
* the max_blocks value for raw_track access is 256
* it is higher than the native ECKD value because we
@@ -6883,19 +6836,10 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
* so the max_hw_sectors are
* 2048 x 512B = 1024kB = 16 tracks
*/
- max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
- } else {
- max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
+ return DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
}
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- q->limits.max_dev_sectors = max;
- blk_queue_logical_block_size(q, logical_block_size);
- blk_queue_max_hw_sectors(q, max);
- blk_queue_max_segments(q, USHRT_MAX);
- /* With page sized segments each segment can be translated into one idaw/tidaw */
- blk_queue_max_segment_size(q, PAGE_SIZE);
- blk_queue_segment_boundary(q, PAGE_SIZE - 1);
- blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+
+ return DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
}
static struct ccw_driver dasd_eckd_driver = {
@@ -6927,7 +6871,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
.basic_to_ready = dasd_eckd_basic_to_ready,
.online_to_ready = dasd_eckd_online_to_ready,
.basic_to_known = dasd_eckd_basic_to_known,
- .setup_blk_queue = dasd_eckd_setup_blk_queue,
+ .max_sectors = dasd_eckd_max_sectors,
.fill_geometry = dasd_eckd_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index c956de711cf7..5064a616e041 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -7,8 +7,6 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
-#define KMSG_COMPONENT "dasd-eckd"
-
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/kernel.h>
@@ -28,11 +26,6 @@
#include "dasd_int.h"
#include "dasd_eckd.h"
-#ifdef PRINTK_HEADER
-#undef PRINTK_HEADER
-#endif /* PRINTK_HEADER */
-#define PRINTK_HEADER "dasd(eer):"
-
/*
* SECTION: the internal buffer
*/
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index c07e6e713518..4c0d3a704513 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -9,8 +9,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/ctype.h>
#include <linux/init.h>
@@ -18,9 +16,6 @@
#include <asm/ebcdic.h>
#include <linux/uaccess.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_erp:"
-
#include "dasd_int.h"
struct dasd_ccw_req *
@@ -170,12 +165,12 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
device = cqr->startdev;
if (cqr->intrc == -ETIMEDOUT) {
dev_err(&device->cdev->dev,
- "A timeout error occurred for cqr %p\n", cqr);
+ "A timeout error occurred for cqr %px\n", cqr);
return;
}
if (cqr->intrc == -ENOLINK) {
dev_err(&device->cdev->dev,
- "A transport error occurred for cqr %p\n", cqr);
+ "A transport error occurred for cqr %px\n", cqr);
return;
}
/* dump sense data */
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index c06fa2b27120..bcbb2f8e91fe 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -25,11 +25,6 @@
#include "dasd_int.h"
#include "dasd_fba.h"
-#ifdef PRINTK_HEADER
-#undef PRINTK_HEADER
-#endif /* PRINTK_HEADER */
-#define PRINTK_HEADER "dasd(fba):"
-
#define FBA_DEFAULT_RETRIES 32
#define DASD_FBA_CCW_WRITE 0x41
@@ -660,30 +655,27 @@ static void
dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
struct irb *irb)
{
- char *page;
struct ccw1 *act, *end, *last;
int len, sl, sct, count;
+ struct device *dev;
+ char *page;
+
+ dev = &device->cdev->dev;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
- "No memory to dump sense data");
+ "No memory to dump sense data");
return;
}
- len = sprintf(page, PRINTK_HEADER
- " I/O status report for device %s:\n",
- dev_name(&device->cdev->dev));
- len += sprintf(page + len, PRINTK_HEADER
- " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
- irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
- len += sprintf(page + len, PRINTK_HEADER
- " device %s: Failing CCW: %p\n",
- dev_name(&device->cdev->dev),
+ len = sprintf(page, "I/O status report:\n");
+ len += sprintf(page + len, "in req: %px CS: 0x%02X DS: 0x%02X\n",
+ req, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
+ len += sprintf(page + len, "Failing CCW: %px\n",
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len, PRINTK_HEADER
- " Sense(hex) %2d-%2d:",
+ len += sprintf(page + len, "Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
@@ -693,20 +685,18 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len += sprintf(page + len, "\n");
}
} else {
- len += sprintf(page + len, PRINTK_HEADER
- " SORRY - NO VALID SENSE AVAILABLE\n");
+ len += sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
}
- printk(KERN_ERR "%s", page);
+ dev_err(dev, "%s", page);
/* dump the Channel Program */
/* print first CCWs (maximum 8) */
act = req->cpaddr;
- for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
+ for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
end = min(act + 8, last);
- len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
+ len = sprintf(page, "Related CP in req: %px\n", req);
while (act <= end) {
- len += sprintf(page + len, PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
+ len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
@@ -716,19 +706,17 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len += sprintf(page + len, "\n");
act++;
}
- printk(KERN_ERR "%s", page);
-
+ dev_err(dev, "%s", page);
/* print failing CCW area */
len = 0;
if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
- len += sprintf(page + len, PRINTK_HEADER "......\n");
+ len += sprintf(page + len, "......\n");
}
end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
while (act <= end) {
- len += sprintf(page + len, PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
+ len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
@@ -742,11 +730,10 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
/* print last CCWs */
if (act < last - 2) {
act = last - 2;
- len += sprintf(page + len, PRINTK_HEADER "......\n");
+ len += sprintf(page + len, "......\n");
}
while (act <= last) {
- len += sprintf(page + len, PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
+ len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
@@ -757,39 +744,13 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
act++;
}
if (len > 0)
- printk(KERN_ERR "%s", page);
+ dev_err(dev, "%s", page);
free_page((unsigned long) page);
}
-/*
- * Initialize block layer request queue.
- */
-static void dasd_fba_setup_blk_queue(struct dasd_block *block)
+static unsigned int dasd_fba_max_sectors(struct dasd_block *block)
{
- unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->gdp->queue;
- unsigned int max_bytes, max_discard_sectors;
- int max;
-
- max = DASD_FBA_MAX_BLOCKS << block->s2b_shift;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- q->limits.max_dev_sectors = max;
- blk_queue_logical_block_size(q, logical_block_size);
- blk_queue_max_hw_sectors(q, max);
- blk_queue_max_segments(q, USHRT_MAX);
- /* With page sized segments each segment can be translated into one idaw/tidaw */
- blk_queue_max_segment_size(q, PAGE_SIZE);
- blk_queue_segment_boundary(q, PAGE_SIZE - 1);
-
- q->limits.discard_granularity = logical_block_size;
-
- /* Calculate max_discard_sectors and make it PAGE aligned */
- max_bytes = USHRT_MAX * logical_block_size;
- max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
- max_discard_sectors = max_bytes / logical_block_size;
-
- blk_queue_max_discard_sectors(q, max_discard_sectors);
- blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+ return DASD_FBA_MAX_BLOCKS << block->s2b_shift;
}
static int dasd_fba_pe_handler(struct dasd_device *device,
@@ -802,10 +763,11 @@ static struct dasd_discipline dasd_fba_discipline = {
.owner = THIS_MODULE,
.name = "FBA ",
.ebcname = "FBA ",
+ .has_discard = true,
.check_device = dasd_fba_check_characteristics,
.do_analysis = dasd_fba_do_analysis,
.pe_handler = dasd_fba_pe_handler,
- .setup_blk_queue = dasd_fba_setup_blk_queue,
+ .max_sectors = dasd_fba_max_sectors,
.fill_geometry = dasd_fba_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 55e3abe94cde..4533dd055ca8 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -11,8 +11,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/interrupt.h>
#include <linux/major.h>
#include <linux/fs.h>
@@ -20,9 +18,6 @@
#include <linux/uaccess.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_gendisk:"
-
#include "dasd_int.h"
static unsigned int queue_depth = 32;
@@ -39,6 +34,16 @@ MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD d
*/
int dasd_gendisk_alloc(struct dasd_block *block)
{
+ struct queue_limits lim = {
+ /*
+ * With page sized segments, each segment can be translated into
+ * one idaw/tidaw.
+ */
+ .max_segment_size = PAGE_SIZE,
+ .seg_boundary_mask = PAGE_SIZE - 1,
+ .dma_alignment = PAGE_SIZE - 1,
+ .max_segments = USHRT_MAX,
+ };
struct gendisk *gdp;
struct dasd_device *base;
int len, rc;
@@ -58,11 +63,12 @@ int dasd_gendisk_alloc(struct dasd_block *block)
if (rc)
return rc;
- gdp = blk_mq_alloc_disk(&block->tag_set, block);
+ gdp = blk_mq_alloc_disk(&block->tag_set, &lim, block);
if (IS_ERR(gdp)) {
blk_mq_free_tag_set(&block->tag_set);
return PTR_ERR(gdp);
}
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, gdp->queue);
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;
@@ -127,15 +133,15 @@ void dasd_gendisk_free(struct dasd_block *block)
*/
int dasd_scan_partitions(struct dasd_block *block)
{
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
int rc;
- bdev_handle = bdev_open_by_dev(disk_devt(block->gdp), BLK_OPEN_READ,
+ bdev_file = bdev_file_open_by_dev(disk_devt(block->gdp), BLK_OPEN_READ,
NULL, NULL);
- if (IS_ERR(bdev_handle)) {
+ if (IS_ERR(bdev_file)) {
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, blkdev_get returned %ld",
- PTR_ERR(bdev_handle));
+ PTR_ERR(bdev_file));
return -ENODEV;
}
@@ -147,15 +153,15 @@ int dasd_scan_partitions(struct dasd_block *block)
"scan partitions error, rc %d", rc);
/*
- * Since the matching bdev_release() call to the
- * bdev_open_by_path() in this function is not called before
+ * Since the matching fput() call to the
+ * bdev_file_open_by_path() in this function is not called before
* dasd_destroy_partitions the offline open_count limit needs to be
- * increased from 0 to 1. This is done by setting device->bdev_handle
+ * increased from 0 to 1. This is done by setting device->bdev_file
* (see dasd_generic_set_offline). As long as the partition detection
* is running no offline should be allowed. That is why the assignment
- * to block->bdev_handle is done AFTER the BLKRRPART ioctl.
+ * to block->bdev_file is done AFTER the BLKRRPART ioctl.
*/
- block->bdev_handle = bdev_handle;
+ block->bdev_file = bdev_file;
return 0;
}
@@ -165,21 +171,21 @@ int dasd_scan_partitions(struct dasd_block *block)
*/
void dasd_destroy_partitions(struct dasd_block *block)
{
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
/*
- * Get the bdev_handle pointer from the device structure and clear
- * device->bdev_handle to lower the offline open_count limit again.
+ * Get the bdev_file pointer from the device structure and clear
+ * device->bdev_file to lower the offline open_count limit again.
*/
- bdev_handle = block->bdev_handle;
- block->bdev_handle = NULL;
+ bdev_file = block->bdev_file;
+ block->bdev_file = NULL;
- mutex_lock(&bdev_handle->bdev->bd_disk->open_mutex);
- bdev_disk_changed(bdev_handle->bdev->bd_disk, true);
- mutex_unlock(&bdev_handle->bdev->bd_disk->open_mutex);
+ mutex_lock(&file_bdev(bdev_file)->bd_disk->open_mutex);
+ bdev_disk_changed(file_bdev(bdev_file)->bd_disk, true);
+ mutex_unlock(&file_bdev(bdev_file)->bd_disk->open_mutex);
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
- bdev_release(bdev_handle);
+ fput(bdev_file);
}
int dasd_gendisk_init(void)
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 1b1b8a41c4d4..e5f40536b425 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -113,9 +113,6 @@ do { \
__dev_id.ssid, __dev_id.devno, d_data); \
} while (0)
-/* limit size for an errorstring */
-#define ERRORLENGTH 30
-
/* definition of dbf debug levels */
#define DBF_EMERG 0 /* system is unusable */
#define DBF_ALERT 1 /* action must be taken immediately */
@@ -126,32 +123,6 @@ do { \
#define DBF_INFO 6 /* informational */
#define DBF_DEBUG 6 /* debug-level messages */
-/* messages to be written via klogd and dbf */
-#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
-do { \
- printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
- dev_name(&d_device->cdev->dev), d_args); \
- DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
-} while(0)
-
-#define MESSAGE(d_loglevel,d_string,d_args...)\
-do { \
- printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
- DBF_EVENT(DBF_ALERT, d_string, d_args); \
-} while(0)
-
-/* messages to be written via klogd only */
-#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
-do { \
- printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
- dev_name(&d_device->cdev->dev), d_args); \
-} while(0)
-
-#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
-do { \
- printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
-} while(0)
-
/* Macro to calculate number of blocks per page */
#define BLOCKS_PER_PAGE(blksize) (PAGE_SIZE / blksize)
@@ -322,6 +293,7 @@ struct dasd_discipline {
struct module *owner;
char ebcname[8]; /* a name used for tagging and printks */
char name[8]; /* a name used for tagging and printks */
+ bool has_discard;
struct list_head list; /* used for list of disciplines */
@@ -360,10 +332,7 @@ struct dasd_discipline {
int (*online_to_ready) (struct dasd_device *);
int (*basic_to_known)(struct dasd_device *);
- /*
- * Initialize block layer request queue.
- */
- void (*setup_blk_queue)(struct dasd_block *);
+ unsigned int (*max_sectors)(struct dasd_block *);
/* (struct dasd_device *);
* Device operation functions. build_cp creates a ccw chain for
* a block device request, start_io starts the request and
@@ -650,7 +619,7 @@ struct dasd_block {
struct gendisk *gdp;
spinlock_t request_queue_lock;
struct blk_mq_tag_set tag_set;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
atomic_t open_count;
unsigned long blocks; /* size of volume in blocks */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 61b9675e2a67..7e0ed7032f76 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -10,8 +10,6 @@
* i/o controls for the dasd driver.
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/interrupt.h>
#include <linux/compat.h>
#include <linux/major.h>
@@ -24,12 +22,8 @@
#include <linux/uaccess.h>
#include <linux/dasd_mod.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_ioctl:"
-
#include "dasd_int.h"
-
static int
dasd_ioctl_api_version(void __user *argp)
{
@@ -537,7 +531,7 @@ static int __dasd_ioctl_information(struct dasd_block *block,
* This must be hidden from user-space.
*/
dasd_info->open_count = atomic_read(&block->open_count);
- if (!block->bdev_handle)
+ if (!block->bdev_file)
dasd_info->open_count++;
/*
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 62a859ea67f8..0faaa437d9be 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -11,8 +11,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -23,9 +21,6 @@
#include <asm/debug.h>
#include <linux/uaccess.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_proc:"
-
#include "dasd_int.h"
static struct proc_dir_entry *dasd_proc_root_entry = NULL;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index f363c1d51d9a..ba66aa6a83c6 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -546,6 +546,9 @@ static const struct attribute_group *dcssblk_dev_attr_groups[] = {
static ssize_t
dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
+ struct queue_limits lim = {
+ .logical_block_size = 4096,
+ };
int rc, i, j, num_of_segments;
struct dcssblk_dev_info *dev_info;
struct segment_info *seg_info, *temp;
@@ -630,9 +633,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->dev.release = dcssblk_release_segment;
dev_info->dev.groups = dcssblk_dev_attr_groups;
INIT_LIST_HEAD(&dev_info->lh);
- dev_info->gd = blk_alloc_disk(NUMA_NO_NODE);
- if (dev_info->gd == NULL) {
- rc = -ENOMEM;
+ dev_info->gd = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(dev_info->gd)) {
+ rc = PTR_ERR(dev_info->gd);
goto seg_list_del;
}
dev_info->gd->major = dcssblk_major;
@@ -640,7 +643,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->fops = &dcssblk_devops;
dev_info->gd->private_data = dev_info;
dev_info->gd->flags |= GENHD_FL_NO_PART;
- blk_queue_logical_block_size(dev_info->gd->queue, 4096);
blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue);
seg_byte_size = (dev_info->end - dev_info->start + 1);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index ade95e91b3c8..9f6fdd0daa74 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -435,10 +435,17 @@ static const struct blk_mq_ops scm_mq_ops = {
int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
{
- unsigned int devindex, nr_max_blk;
+ struct queue_limits lim = {
+ .logical_block_size = 1 << 12,
+ };
+ unsigned int devindex;
struct request_queue *rq;
int len, ret;
+ lim.max_segments = min(scmdev->nr_max_block,
+ (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
+ lim.max_hw_sectors = lim.max_segments << 3; /* 8 * 512 = blk_size */
+
devindex = atomic_inc_return(&nr_devices) - 1;
/* scma..scmz + scmaa..scmzz */
if (devindex > 701) {
@@ -462,18 +469,12 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
if (ret)
goto out;
- bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, scmdev);
+ bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, &lim, scmdev);
if (IS_ERR(bdev->gendisk)) {
ret = PTR_ERR(bdev->gendisk);
goto out_tag;
}
rq = bdev->rq = bdev->gendisk->queue;
- nr_max_blk = min(scmdev->nr_max_block,
- (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
-
- blk_queue_logical_block_size(rq, 1 << 12);
- blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
- blk_queue_max_segments(rq, nr_max_blk);
blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 82efdd20ad01..1d17a83569ce 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -195,7 +195,7 @@ static void free_chan_prog(struct ccw1 *cpa)
struct ccw1 *ptr = cpa;
while (ptr->cda) {
- kfree((void *)(addr_t) ptr->cda);
+ kfree(phys_to_virt(ptr->cda));
ptr++;
}
kfree(cpa);
@@ -237,7 +237,7 @@ static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
free_chan_prog(cpa);
return ERR_PTR(-ENOMEM);
}
- cpa[i].cda = (u32)(addr_t) kbuf;
+ cpa[i].cda = (u32)virt_to_phys(kbuf);
if (copy_from_user(kbuf, ubuf, reclen)) {
free_chan_prog(cpa);
return ERR_PTR(-EFAULT);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index bc3be0330f1d..0969fa01df58 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -29,7 +29,6 @@
#include <asm/irqflags.h>
#include <asm/checksum.h>
#include <asm/os_info.h>
-#include <asm/switch_to.h>
#include <asm/maccess.h>
#include "sclp.h"
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index aa3292e57e38..6eb8bcd948dc 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -31,7 +31,7 @@
* to devices that use multiple subchannels.
*/
-static struct bus_type ccwgroup_bus_type;
+static const struct bus_type ccwgroup_bus_type;
static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
{
@@ -465,7 +465,7 @@ static void ccwgroup_shutdown(struct device *dev)
gdrv->shutdown(gdev);
}
-static struct bus_type ccwgroup_bus_type = {
+static const struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.dev_groups = ccwgroup_dev_groups,
.remove = ccwgroup_remove,
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 64ed55c3aed6..3d88899dff7c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1091,8 +1091,8 @@ int __init chsc_init(void)
{
int ret;
- sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sei_page = (void *)get_zeroed_page(GFP_KERNEL);
+ chsc_page = (void *)get_zeroed_page(GFP_KERNEL);
if (!sei_page || !chsc_page) {
ret = -ENOMEM;
goto out_err;
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 902237d0baef..e6c800653f98 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -293,7 +293,7 @@ static int chsc_ioctl_start(void __user *user_area)
if (!css_general_characteristics.dynio)
/* It makes no sense to try. */
return -EOPNOTSUPP;
- chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+ chsc_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!chsc_area)
return -ENOMEM;
request = kzalloc(sizeof(*request), GFP_KERNEL);
@@ -341,7 +341,7 @@ static int chsc_ioctl_on_close_set(void __user *user_area)
ret = -ENOMEM;
goto out_unlock;
}
- on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+ on_close_chsc_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!on_close_chsc_area) {
ret = -ENOMEM;
goto out_free_request;
@@ -393,7 +393,7 @@ static int chsc_ioctl_start_sync(void __user *user_area)
struct chsc_sync_area *chsc_area;
int ret, ccode;
- chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ chsc_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!chsc_area)
return -ENOMEM;
if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
@@ -439,7 +439,7 @@ static int chsc_ioctl_info_channel_path(void __user *user_cd)
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scpcd_area;
- scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ scpcd_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!scpcd_area)
return -ENOMEM;
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
@@ -501,7 +501,7 @@ static int chsc_ioctl_info_cu(void __user *user_cd)
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scucd_area;
- scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ scucd_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!scucd_area)
return -ENOMEM;
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
@@ -564,7 +564,7 @@ static int chsc_ioctl_info_sch_cu(void __user *user_cud)
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *sscud_area;
- sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sscud_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!sscud_area)
return -ENOMEM;
cud = kzalloc(sizeof(*cud), GFP_KERNEL);
@@ -626,7 +626,7 @@ static int chsc_ioctl_conf_info(void __user *user_ci)
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *sci_area;
- sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sci_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!sci_area)
return -ENOMEM;
ci = kzalloc(sizeof(*ci), GFP_KERNEL);
@@ -697,7 +697,7 @@ static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
u32 res;
} __attribute__ ((packed)) *cssids_parm;
- sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sccl_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!sccl_area)
return -ENOMEM;
ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
@@ -757,7 +757,7 @@ static int chsc_ioctl_chpd(void __user *user_chpd)
int ret;
chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
- scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ scpd_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!scpd_area || !chpd) {
ret = -ENOMEM;
goto out_free;
@@ -797,7 +797,7 @@ static int chsc_ioctl_dcal(void __user *user_dcal)
u8 data[PAGE_SIZE - 36];
} __attribute__ ((packed)) *sdcal_area;
- sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sdcal_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!sdcal_area)
return -ENOMEM;
dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 5584aa46c94e..f80dc18e2a76 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -169,7 +169,8 @@ static inline void cmf_activate(void *area, unsigned int onoff)
" lgr 2,%[mbo]\n"
" schm\n"
:
- : [r1] "d" ((unsigned long)onoff), [mbo] "d" (area)
+ : [r1] "d" ((unsigned long)onoff),
+ [mbo] "d" (virt_to_phys(area))
: "1", "2");
}
@@ -501,8 +502,7 @@ static int alloc_cmb(struct ccw_device *cdev)
WARN_ON(!list_empty(&cmb_area.list));
spin_unlock(&cmb_area.lock);
- mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
- get_order(size));
+ mem = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
spin_lock(&cmb_area.lock);
if (cmb_area.mem) {
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 28a88ed2c3aa..094431a62ad5 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -39,7 +39,7 @@ int max_ssid;
#define MAX_CSS_IDX 0
struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
-static struct bus_type css_bus_type;
+static const struct bus_type css_bus_type;
int
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
@@ -1409,7 +1409,7 @@ static int css_uevent(const struct device *dev, struct kobj_uevent_env *env)
return ret;
}
-static struct bus_type css_bus_type = {
+static const struct bus_type css_bus_type = {
.name = "css",
.match = css_bus_match,
.probe = css_probe,
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 0cfb179e1bcb..f95d12345d98 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -49,7 +49,7 @@ static const unsigned long recovery_delay[] = { 3, 30, 300 };
static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
-static struct bus_type ccw_bus_type;
+static const struct bus_type ccw_bus_type;
/******************* bus type handling ***********************/
@@ -1776,7 +1776,7 @@ static void ccw_device_shutdown(struct device *dev)
__disable_cmf(cdev);
}
-static struct bus_type ccw_bus_type = {
+static const struct bus_type ccw_bus_type = {
.name = "ccw",
.match = ccw_bus_match,
.uevent = ccw_uevent,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index c533d1dadc6b..a5dba3829769 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -202,7 +202,8 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
- if (cdev->private->state == DEV_STATE_VERIFY) {
+ if (cdev->private->state == DEV_STATE_VERIFY ||
+ cdev->private->flags.doverify) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_CMD_IRB;
@@ -214,8 +215,7 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
}
if (cdev->private->state != DEV_STATE_ONLINE ||
((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
- !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
- cdev->private->flags.doverify)
+ !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)))
return -EBUSY;
ret = cio_set_options (sch, flags);
if (ret)
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index 6b21ba68c1fe..c7894d61306d 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -42,7 +42,7 @@ static int scmdev_uevent(const struct device *dev, struct kobj_uevent_env *env)
return add_uevent_var(env, "MODALIAS=scm:scmdev");
}
-static struct bus_type scm_bus_type = {
+static const struct bus_type scm_bus_type = {
.name = "scm",
.probe = scmdev_probe,
.remove = scmdev_remove,
@@ -228,7 +228,7 @@ int scm_update_information(void)
size_t num;
int ret;
- scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ scm_info = (void *)__get_free_page(GFP_KERNEL);
if (!scm_info)
return -ENOMEM;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f46dd6abacd7..cce0bafd4c92 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -38,6 +38,7 @@
#include <linux/debugfs.h>
#include <linux/ctype.h>
#include <linux/module.h>
+#include <asm/uv.h>
#include "ap_bus.h"
#include "ap_debug.h"
@@ -83,14 +84,11 @@ EXPORT_SYMBOL(ap_perms);
DEFINE_MUTEX(ap_perms_mutex);
EXPORT_SYMBOL(ap_perms_mutex);
-/* # of bus scans since init */
-static atomic64_t ap_scan_bus_count;
-
/* # of bindings complete since init */
static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
-/* completion for initial APQN bindings complete */
-static DECLARE_COMPLETION(ap_init_apqn_bindings_complete);
+/* completion for APQN bindings complete */
+static DECLARE_COMPLETION(ap_apqn_bindings_complete);
static struct ap_config_info *ap_qci_info;
static struct ap_config_info *ap_qci_info_old;
@@ -101,12 +99,16 @@ static struct ap_config_info *ap_qci_info_old;
debug_info_t *ap_dbf_info;
/*
- * Workqueue timer for bus rescan.
+ * AP bus rescan related things.
*/
-static struct timer_list ap_config_timer;
-static int ap_config_time = AP_CONFIG_TIME;
-static void ap_scan_bus(struct work_struct *);
-static DECLARE_WORK(ap_scan_work, ap_scan_bus);
+static bool ap_scan_bus(void);
+static bool ap_scan_bus_result; /* result of last ap_scan_bus() */
+static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */
+static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */
+static int ap_scan_bus_time = AP_CONFIG_TIME;
+static struct timer_list ap_scan_bus_timer;
+static void ap_scan_bus_wq_callback(struct work_struct *);
+static DECLARE_WORK(ap_scan_bus_work, ap_scan_bus_wq_callback);
/*
* Tasklet & timer for AP request polling and interrupts
@@ -135,7 +137,7 @@ static int ap_max_domain_id = 15;
/* Maximum adapter id, if not given via qci */
static int ap_max_adapter_id = 63;
-static struct bus_type ap_bus_type;
+static const struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
static void ap_interrupt_handler(struct airq_struct *airq,
@@ -753,7 +755,7 @@ static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound)
}
/*
- * After initial ap bus scan do check if all existing APQNs are
+ * After ap bus scan do check if all existing APQNs are
* bound to device drivers.
*/
static void ap_check_bindings_complete(void)
@@ -763,9 +765,9 @@ static void ap_check_bindings_complete(void)
if (atomic64_read(&ap_scan_bus_count) >= 1) {
ap_calc_bound_apqns(&apqns, &bound);
if (bound == apqns) {
- if (!completion_done(&ap_init_apqn_bindings_complete)) {
- complete_all(&ap_init_apqn_bindings_complete);
- AP_DBF_INFO("%s complete\n", __func__);
+ if (!completion_done(&ap_apqn_bindings_complete)) {
+ complete_all(&ap_apqn_bindings_complete);
+ pr_debug("%s all apqn bindings complete\n", __func__);
}
ap_send_bindings_complete_uevent();
}
@@ -782,27 +784,29 @@ static void ap_check_bindings_complete(void)
* -ETIME is returned. On failures negative return values are
* returned to the caller.
*/
-int ap_wait_init_apqn_bindings_complete(unsigned long timeout)
+int ap_wait_apqn_bindings_complete(unsigned long timeout)
{
+ int rc = 0;
long l;
- if (completion_done(&ap_init_apqn_bindings_complete))
+ if (completion_done(&ap_apqn_bindings_complete))
return 0;
if (timeout)
l = wait_for_completion_interruptible_timeout(
- &ap_init_apqn_bindings_complete, timeout);
+ &ap_apqn_bindings_complete, timeout);
else
l = wait_for_completion_interruptible(
- &ap_init_apqn_bindings_complete);
+ &ap_apqn_bindings_complete);
if (l < 0)
- return l == -ERESTARTSYS ? -EINTR : l;
+ rc = l == -ERESTARTSYS ? -EINTR : l;
else if (l == 0 && timeout)
- return -ETIME;
+ rc = -ETIME;
- return 0;
+ pr_debug("%s rc=%d\n", __func__, rc);
+ return rc;
}
-EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete);
+EXPORT_SYMBOL(ap_wait_apqn_bindings_complete);
static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
{
@@ -826,8 +830,8 @@ static int __ap_revise_reserved(struct device *dev, void *dummy)
drvres = to_ap_drv(dev->driver)->flags
& AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres) {
- AP_DBF_DBG("%s reprobing queue=%02x.%04x\n",
- __func__, card, queue);
+ pr_debug("%s reprobing queue=%02x.%04x\n",
+ __func__, card, queue);
rc = device_reprobe(dev);
if (rc)
AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
@@ -939,8 +943,6 @@ static int ap_device_probe(struct device *dev)
if (is_queue_dev(dev))
hash_del(&to_ap_queue(dev)->hnode);
spin_unlock_bh(&ap_queues_lock);
- } else {
- ap_check_bindings_complete();
}
out:
@@ -1012,16 +1014,47 @@ void ap_driver_unregister(struct ap_driver *ap_drv)
}
EXPORT_SYMBOL(ap_driver_unregister);
-void ap_bus_force_rescan(void)
+/*
+ * Enforce a synchronous AP bus rescan.
+ * Returns true if the bus scan finds a change in the AP configuration
+ * and AP devices have been added or deleted when this function returns.
+ */
+bool ap_bus_force_rescan(void)
{
+ unsigned long scan_counter = atomic64_read(&ap_scan_bus_count);
+ bool rc = false;
+
+ pr_debug(">%s scan counter=%lu\n", __func__, scan_counter);
+
/* Only trigger AP bus scans after the initial scan is done */
- if (atomic64_read(&ap_scan_bus_count) <= 0)
- return;
+ if (scan_counter <= 0)
+ goto out;
+
+ /* Try to acquire the AP scan bus mutex */
+ if (mutex_trylock(&ap_scan_bus_mutex)) {
+ /* mutex acquired, run the AP bus scan */
+ ap_scan_bus_result = ap_scan_bus();
+ rc = ap_scan_bus_result;
+ mutex_unlock(&ap_scan_bus_mutex);
+ goto out;
+ }
+
+ /*
+ * Mutex acquire failed. So there is currently another task
+ * already running the AP bus scan. Then let's simple wait
+ * for the lock which means the other task has finished and
+ * stored the result in ap_scan_bus_result.
+ */
+ if (mutex_lock_interruptible(&ap_scan_bus_mutex)) {
+ /* some error occurred, ignore and go out */
+ goto out;
+ }
+ rc = ap_scan_bus_result;
+ mutex_unlock(&ap_scan_bus_mutex);
- /* processing a asynchronous bus rescan */
- del_timer(&ap_config_timer);
- queue_work(system_long_wq, &ap_scan_work);
- flush_work(&ap_scan_work);
+out:
+ pr_debug("%s rc=%d\n", __func__, rc);
+ return rc;
}
EXPORT_SYMBOL(ap_bus_force_rescan);
@@ -1030,7 +1063,7 @@ EXPORT_SYMBOL(ap_bus_force_rescan);
*/
void ap_bus_cfg_chg(void)
{
- AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
+ pr_debug("%s config change, forcing bus rescan\n", __func__);
ap_bus_force_rescan();
}
@@ -1250,7 +1283,7 @@ static BUS_ATTR_RO(ap_interrupts);
static ssize_t config_time_show(const struct bus_type *bus, char *buf)
{
- return sysfs_emit(buf, "%d\n", ap_config_time);
+ return sysfs_emit(buf, "%d\n", ap_scan_bus_time);
}
static ssize_t config_time_store(const struct bus_type *bus,
@@ -1260,8 +1293,8 @@ static ssize_t config_time_store(const struct bus_type *bus,
if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
return -EINVAL;
- ap_config_time = time;
- mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+ ap_scan_bus_time = time;
+ mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
return count;
}
@@ -1603,7 +1636,7 @@ static struct attribute *ap_bus_attrs[] = {
};
ATTRIBUTE_GROUPS(ap_bus);
-static struct bus_type ap_bus_type = {
+static const struct bus_type ap_bus_type = {
.name = "ap",
.bus_groups = ap_bus_groups,
.match = &ap_bus_match,
@@ -1888,8 +1921,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
}
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev checkstop on\n",
- __func__, ac->id, dom);
+ pr_debug("%s(%d,%d) queue dev checkstop on\n",
+ __func__, ac->id, dom);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
goto put_dev_and_continue;
@@ -1899,8 +1932,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
- __func__, ac->id, dom);
+ pr_debug("%s(%d,%d) queue dev checkstop off\n",
+ __func__, ac->id, dom);
goto put_dev_and_continue;
}
/* config state change */
@@ -1912,8 +1945,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
}
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev config off\n",
- __func__, ac->id, dom);
+ pr_debug("%s(%d,%d) queue dev config off\n",
+ __func__, ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
@@ -1924,8 +1957,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
- __func__, ac->id, dom);
+ pr_debug("%s(%d,%d) queue dev config on\n",
+ __func__, ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
goto put_dev_and_continue;
}
@@ -1997,8 +2030,8 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
- __func__, ap);
+ pr_debug("%s(%d) no type info (no APQN found), ignored\n",
+ __func__, ap);
}
return;
}
@@ -2010,8 +2043,8 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
- __func__, ap);
+ pr_debug("%s(%d) no valid type (0) info, ignored\n",
+ __func__, ap);
}
return;
}
@@ -2135,23 +2168,80 @@ static bool ap_get_configuration(void)
sizeof(struct ap_config_info)) != 0;
}
+/*
+ * ap_config_has_new_aps - Check current against old qci info if
+ * new adapters have appeared. Returns true if at least one new
+ * adapter in the apm mask is showing up. Existing adapters or
+ * receding adapters are not counted.
+ */
+static bool ap_config_has_new_aps(void)
+{
+
+ unsigned long m[BITS_TO_LONGS(AP_DEVICES)];
+
+ if (!ap_qci_info)
+ return false;
+
+ bitmap_andnot(m, (unsigned long *)ap_qci_info->apm,
+ (unsigned long *)ap_qci_info_old->apm, AP_DEVICES);
+ if (!bitmap_empty(m, AP_DEVICES))
+ return true;
+
+ return false;
+}
+
+/*
+ * ap_config_has_new_doms - Check current against old qci info if
+ * new (usage) domains have appeared. Returns true if at least one
+ * new domain in the aqm mask is showing up. Existing domains or
+ * receding domains are not counted.
+ */
+static bool ap_config_has_new_doms(void)
+{
+ unsigned long m[BITS_TO_LONGS(AP_DOMAINS)];
+
+ if (!ap_qci_info)
+ return false;
+
+ bitmap_andnot(m, (unsigned long *)ap_qci_info->aqm,
+ (unsigned long *)ap_qci_info_old->aqm, AP_DOMAINS);
+ if (!bitmap_empty(m, AP_DOMAINS))
+ return true;
+
+ return false;
+}
+
/**
* ap_scan_bus(): Scan the AP bus for new devices
- * Runs periodically, workqueue timer (ap_config_time)
- * @unused: Unused pointer.
+ * Always run under mutex ap_scan_bus_mutex protection
+ * which needs to get locked/unlocked by the caller!
+ * Returns true if any config change has been detected
+ * during the scan, otherwise false.
*/
-static void ap_scan_bus(struct work_struct *unused)
+static bool ap_scan_bus(void)
{
- int ap, config_changed = 0;
+ bool config_changed;
+ int ap;
+
+ pr_debug(">%s\n", __func__);
- /* config change notify */
+ /* (re-)fetch configuration via QCI */
config_changed = ap_get_configuration();
- if (config_changed)
+ if (config_changed) {
+ if (ap_config_has_new_aps() || ap_config_has_new_doms()) {
+ /*
+ * Appearance of new adapters and/or domains need to
+ * build new ap devices which need to get bound to an
+ * device driver. Thus reset the APQN bindings complete
+ * completion.
+ */
+ reinit_completion(&ap_apqn_bindings_complete);
+ }
+ /* post a config change notify */
notify_config_changed();
+ }
ap_select_domain();
- AP_DBF_DBG("%s running\n", __func__);
-
/* loop over all possible adapters */
for (ap = 0; ap <= ap_max_adapter_id; ap++)
ap_scan_adapter(ap);
@@ -2174,23 +2264,56 @@ static void ap_scan_bus(struct work_struct *unused)
}
if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
- AP_DBF_DBG("%s init scan complete\n", __func__);
+ pr_debug("%s init scan complete\n", __func__);
ap_send_init_scan_done_uevent();
- ap_check_bindings_complete();
}
- mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+ ap_check_bindings_complete();
+
+ mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
+
+ pr_debug("<%s config_changed=%d\n", __func__, config_changed);
+
+ return config_changed;
}
-static void ap_config_timeout(struct timer_list *unused)
+/*
+ * Callback for the ap_scan_bus_timer
+ * Runs periodically, workqueue timer (ap_scan_bus_time)
+ */
+static void ap_scan_bus_timer_callback(struct timer_list *unused)
{
- queue_work(system_long_wq, &ap_scan_work);
+ /*
+ * schedule work into the system long wq which when
+ * the work is finally executed, calls the AP bus scan.
+ */
+ queue_work(system_long_wq, &ap_scan_bus_work);
+}
+
+/*
+ * Callback for the ap_scan_bus_work
+ */
+static void ap_scan_bus_wq_callback(struct work_struct *unused)
+{
+ /*
+ * Try to invoke an ap_scan_bus(). If the mutex acquisition
+ * fails there is currently another task already running the
+ * AP scan bus and there is no need to wait and re-trigger the
+ * scan again. Please note at the end of the scan bus function
+ * the AP scan bus timer is re-armed which triggers then the
+ * ap_scan_bus_timer_callback which enqueues a work into the
+ * system_long_wq which invokes this function here again.
+ */
+ if (mutex_trylock(&ap_scan_bus_mutex)) {
+ ap_scan_bus_result = ap_scan_bus();
+ mutex_unlock(&ap_scan_bus_mutex);
+ }
}
static int __init ap_debug_init(void)
{
ap_dbf_info = debug_register("ap", 2, 1,
- DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ AP_DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(ap_dbf_info, &debug_sprintf_view);
debug_set_level(ap_dbf_info, DBF_ERR);
@@ -2274,7 +2397,7 @@ static int __init ap_module_init(void)
ap_root_device->bus = &ap_bus_type;
/* Setup the AP bus rescan timer. */
- timer_setup(&ap_config_timer, ap_config_timeout, 0);
+ timer_setup(&ap_scan_bus_timer, ap_scan_bus_timer_callback, 0);
/*
* Setup the high resolution poll timer.
@@ -2292,7 +2415,7 @@ static int __init ap_module_init(void)
goto out_work;
}
- queue_work(system_long_wq, &ap_scan_work);
+ queue_work(system_long_wq, &ap_scan_bus_work);
return 0;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 98814839ef30..59c7ed49aa02 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -266,7 +266,7 @@ int ap_sb_available(void);
bool ap_is_se_guest(void);
void ap_wait(enum ap_sm_wait wait);
void ap_request_timeout(struct timer_list *t);
-void ap_bus_force_rescan(void);
+bool ap_bus_force_rescan(void);
int ap_test_config_usage_domain(unsigned int domain);
int ap_test_config_ctrl_domain(unsigned int domain);
@@ -352,8 +352,12 @@ int ap_parse_mask_str(const char *str,
* the return value is 0. If the timeout (in jiffies) hits instead
* -ETIME is returned. On failures negative return values are
* returned to the caller.
+ * It may be that the AP bus scan finds new devices. Then the
+ * condition that all APQNs are bound to their device drivers
+ * is reset to false and this call again blocks until either all
+ * APQNs are bound to a device driver or the timeout hits again.
*/
-int ap_wait_init_apqn_bindings_complete(unsigned long timeout);
+int ap_wait_apqn_bindings_complete(unsigned long timeout);
void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg);
void ap_send_online_uevent(struct ap_device *ap_dev, int online);
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
index c083ce88a9a6..2f66271b8564 100644
--- a/drivers/s390/crypto/ap_debug.h
+++ b/drivers/s390/crypto/ap_debug.h
@@ -16,7 +16,7 @@
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
-#define DBF_MAX_SPRINTF_ARGS 6
+#define AP_DBF_MAX_SPRINTF_ARGS 6
#define AP_DBF(...) \
debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
@@ -26,8 +26,6 @@
debug_sprintf_event(ap_dbf_info, DBF_WARN, ##__VA_ARGS__)
#define AP_DBF_INFO(...) \
debug_sprintf_event(ap_dbf_info, DBF_INFO, ##__VA_ARGS__)
-#define AP_DBF_DBG(...) \
- debug_sprintf_event(ap_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
extern debug_info_t *ap_dbf_info;
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 682595443145..6e4e8d324a6d 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -136,6 +136,8 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
+ print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ aq->reply->msg, aq->reply->len, false);
aq->queue_count = max_t(int, 0, aq->queue_count - 1);
if (!status.queue_empty && !aq->queue_count)
aq->queue_count++;
@@ -169,6 +171,9 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
aq->queue_count = 0;
list_splice_init(&aq->pendingq, &aq->requestq);
aq->requestq_count += aq->pendingq_count;
+ pr_debug("%s queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
+ __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
+ aq->pendingq_count, aq->requestq_count);
aq->pendingq_count = 0;
break;
default:
@@ -243,6 +248,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
/* Start the next request on the queue. */
ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
+ print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg->msg, ap_msg->len, false);
status = __ap_send(qid, ap_msg->psmid,
ap_msg->msg, ap_msg->len,
ap_msg->flags & AP_MSG_FLAG_SPECIAL);
@@ -446,9 +453,9 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
case AP_BS_Q_USABLE:
/* association is through */
aq->sm_state = AP_SM_STATE_IDLE;
- AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
- __func__, AP_QID_CARD(aq->qid),
- AP_QID_QUEUE(aq->qid), aq->assoc_idx);
+ pr_debug("%s queue 0x%02x.%04x associated with %u\n",
+ __func__, AP_QID_CARD(aq->qid),
+ AP_QID_QUEUE(aq->qid), aq->assoc_idx);
return AP_SM_WAIT_NONE;
case AP_BS_Q_USABLE_NO_SECURE_KEY:
/* association still pending */
@@ -690,9 +697,9 @@ static ssize_t ap_functions_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
- AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
@@ -846,9 +853,9 @@ static ssize_t se_bind_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
- AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
@@ -974,9 +981,9 @@ static ssize_t se_associate_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
- AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 6cfb6b2340c9..dccf664a3d95 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -42,24 +42,23 @@ MODULE_DESCRIPTION("s390 protected key interface");
* debug feature data and functions
*/
-static debug_info_t *debug_info;
+static debug_info_t *pkey_dbf_info;
-#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__)
-#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__)
-#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__)
-#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__)
+#define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__)
+#define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__)
+#define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__)
static void __init pkey_debug_init(void)
{
/* 5 arguments per dbf entry (including the format string ptr) */
- debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
- debug_register_view(debug_info, &debug_sprintf_view);
- debug_set_level(debug_info, 3);
+ pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
+ debug_register_view(pkey_dbf_info, &debug_sprintf_view);
+ debug_set_level(pkey_dbf_info, 3);
}
static void __exit pkey_debug_exit(void)
{
- debug_unregister(debug_info);
+ debug_unregister(pkey_dbf_info);
}
/* inside view of a protected key token (only type 0x00 version 0x01) */
@@ -163,14 +162,14 @@ static int pkey_clr2protkey(u32 keytype, const u8 *clrkey,
fc = CPACF_PCKMO_ENC_ECC_ED448_KEY;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keytype %u\n",
- __func__, keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, keytype);
return -EINVAL;
}
if (*protkeylen < keysize + AES_WK_VP_SIZE) {
- DEBUG_ERR("%s prot key buffer size too small: %u < %d\n",
- __func__, *protkeylen, keysize + AES_WK_VP_SIZE);
+ PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n",
+ __func__, *protkeylen, keysize + AES_WK_VP_SIZE);
return -EINVAL;
}
@@ -182,7 +181,7 @@ static int pkey_clr2protkey(u32 keytype, const u8 *clrkey,
}
/* check for the pckmo subfunction we need now */
if (!cpacf_test_func(&pckmo_functions, fc)) {
- DEBUG_ERR("%s pckmo functions not available\n", __func__);
+ PKEY_DBF_ERR("%s pckmo functions not available\n", __func__);
return -ENODEV;
}
@@ -244,7 +243,7 @@ static int pkey_skey2pkey(const u8 *key, u8 *protkey,
}
if (rc)
- DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ pr_debug("%s failed rc=%d\n", __func__, rc);
return rc;
}
@@ -283,7 +282,7 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
out:
kfree(apqns);
if (rc)
- DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ pr_debug("%s failed rc=%d\n", __func__, rc);
return rc;
}
@@ -294,33 +293,36 @@ static int pkey_ep11key2pkey(const u8 *key, size_t keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
u32 nr_apqns, *apqns = NULL;
+ int i, j, rc = -ENODEV;
u16 card, dom;
- int i, rc;
zcrypt_wait_api_operational();
- /* build a list of apqns suitable for this key */
- rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7,
- ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
- ep11_kb_wkvp(key, keylen));
- if (rc)
- goto out;
+ /* try two times in case of failure */
+ for (i = 0; i < 2 && rc; i++) {
- /* go through the list of apqns and try to derive an pkey */
- for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
- card = apqns[i] >> 16;
- dom = apqns[i] & 0xFFFF;
- rc = ep11_kblob2protkey(card, dom, key, keylen,
- protkey, protkeylen, protkeytype);
- if (rc == 0)
- break;
+ /* build a list of apqns suitable for this key */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7,
+ ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
+ ep11_kb_wkvp(key, keylen));
+ if (rc)
+ continue; /* retry findcard on failure */
+
+ /* go through the list of apqns and try to derive an pkey */
+ for (rc = -ENODEV, j = 0; j < nr_apqns && rc; j++) {
+ card = apqns[j] >> 16;
+ dom = apqns[j] & 0xFFFF;
+ rc = ep11_kblob2protkey(card, dom, key, keylen,
+ protkey, protkeylen, protkeytype);
+ }
+
+ kfree(apqns);
}
-out:
- kfree(apqns);
if (rc)
- DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ pr_debug("%s failed rc=%d\n", __func__, rc);
+
return rc;
}
@@ -336,7 +338,7 @@ static int pkey_verifykey(const struct pkey_seckey *seckey,
int rc;
/* check the secure key for valid AES secure key */
- rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *)seckey, 0);
+ rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, (u8 *)seckey, 0);
if (rc)
goto out;
if (pattributes)
@@ -351,7 +353,7 @@ static int pkey_verifykey(const struct pkey_seckey *seckey,
if (rc > 0) {
/* key mkvp matches to old master key mkvp */
- DEBUG_DBG("%s secure key has old mkvp\n", __func__);
+ pr_debug("%s secure key has old mkvp\n", __func__);
if (pattributes)
*pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
rc = 0;
@@ -363,7 +365,7 @@ static int pkey_verifykey(const struct pkey_seckey *seckey,
*pdomain = domain;
out:
- DEBUG_DBG("%s rc=%d\n", __func__, rc);
+ pr_debug("%s rc=%d\n", __func__, rc);
return rc;
}
@@ -379,8 +381,8 @@ static int pkey_genprotkey(u32 keytype, u8 *protkey,
keysize = pkey_keytype_aes_to_size(keytype);
if (!keysize) {
- DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__,
- keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", __func__,
+ keytype);
return -EINVAL;
}
@@ -428,13 +430,13 @@ static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen,
fc = CPACF_KMC_PAES_256;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keytype %u\n", __func__,
- protkeytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__,
+ protkeytype);
return -EINVAL;
}
if (protkeylen != pkeylen) {
- DEBUG_ERR("%s invalid protected key size %u for keytype %u\n",
- __func__, protkeylen, protkeytype);
+ PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n",
+ __func__, protkeylen, protkeytype);
return -EINVAL;
}
@@ -446,7 +448,7 @@ static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen,
k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
sizeof(null_msg));
if (k != sizeof(null_msg)) {
- DEBUG_ERR("%s protected key is not valid\n", __func__);
+ PKEY_DBF_ERR("%s protected key is not valid\n", __func__);
return -EKEYREJECTED;
}
@@ -464,13 +466,13 @@ static int nonccatokaes2pkey(const struct clearkeytoken *t,
keysize = pkey_keytype_aes_to_size(t->keytype);
if (!keysize) {
- DEBUG_ERR("%s unknown/unsupported keytype %u\n",
- __func__, t->keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, t->keytype);
return -EINVAL;
}
if (t->len != keysize) {
- DEBUG_ERR("%s non clear key aes token: invalid key len %u\n",
- __func__, t->len);
+ PKEY_DBF_ERR("%s non clear key aes token: invalid key len %u\n",
+ __func__, t->len);
return -EINVAL;
}
@@ -505,7 +507,7 @@ try_via_ep11:
goto out;
failure:
- DEBUG_ERR("%s unable to build protected key from clear", __func__);
+ PKEY_DBF_ERR("%s unable to build protected key from clear", __func__);
out:
kfree(tmpbuf);
@@ -536,14 +538,14 @@ static int nonccatokecc2pkey(const struct clearkeytoken *t,
keylen = 64;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keytype %u\n",
- __func__, t->keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, t->keytype);
return -EINVAL;
}
if (t->len != keylen) {
- DEBUG_ERR("%s non clear key ecc token: invalid key len %u\n",
- __func__, t->len);
+ PKEY_DBF_ERR("%s non clear key ecc token: invalid key len %u\n",
+ __func__, t->len);
return -EINVAL;
}
@@ -551,8 +553,8 @@ static int nonccatokecc2pkey(const struct clearkeytoken *t,
rc = pkey_clr2protkey(t->keytype, t->clearkey,
protkey, protkeylen, protkeytype);
if (rc) {
- DEBUG_ERR("%s unable to build protected key from clear",
- __func__);
+ PKEY_DBF_ERR("%s unable to build protected key from clear",
+ __func__);
}
return rc;
@@ -604,15 +606,15 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
protkeylen, protkeytype);
break;
default:
- DEBUG_ERR("%s unknown/unsupported non cca clear key type %u\n",
- __func__, t->keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported non cca clear key type %u\n",
+ __func__, t->keytype);
return -EINVAL;
}
break;
}
case TOKVER_EP11_AES: {
/* check ep11 key for exportable as protected key */
- rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
+ rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
if (rc)
goto out;
rc = pkey_ep11key2pkey(key, keylen,
@@ -621,15 +623,16 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
}
case TOKVER_EP11_AES_WITH_HEADER:
/* check ep11 key with header for exportable as protected key */
- rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1);
+ rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1);
if (rc)
goto out;
rc = pkey_ep11key2pkey(key, keylen,
protkey, protkeylen, protkeytype);
break;
default:
- DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
- __func__, hdr->version);
+ PKEY_DBF_ERR("%s unknown/unsupported non-CCA token version %d\n",
+ __func__, hdr->version);
}
out:
@@ -654,8 +657,8 @@ static int pkey_ccainttok2pkey(const u8 *key, u32 keylen,
return -EINVAL;
break;
default:
- DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n",
- __func__, hdr->version);
+ PKEY_DBF_ERR("%s unknown/unsupported CCA internal token version %d\n",
+ __func__, hdr->version);
return -EINVAL;
}
@@ -672,7 +675,7 @@ int pkey_keyblob2pkey(const u8 *key, u32 keylen,
int rc;
if (keylen < sizeof(struct keytoken_header)) {
- DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen);
+ PKEY_DBF_ERR("%s invalid keylen %d\n", __func__, keylen);
return -EINVAL;
}
@@ -686,12 +689,12 @@ int pkey_keyblob2pkey(const u8 *key, u32 keylen,
protkey, protkeylen, protkeytype);
break;
default:
- DEBUG_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
return -EINVAL;
}
- DEBUG_DBG("%s rc=%d\n", __func__, rc);
+ pr_debug("%s rc=%d\n", __func__, rc);
return rc;
}
EXPORT_SYMBOL(pkey_keyblob2pkey);
@@ -839,7 +842,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
hdr->version == TOKVER_CCA_AES) {
struct secaeskeytoken *t = (struct secaeskeytoken *)key;
- rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
+ rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0);
if (rc)
goto out;
if (ktype)
@@ -869,7 +872,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
hdr->version == TOKVER_CCA_VLSC) {
struct cipherkeytoken *t = (struct cipherkeytoken *)key;
- rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
+ rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1);
if (rc)
goto out;
if (ktype)
@@ -907,7 +910,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
struct ep11keyblob *kb = (struct ep11keyblob *)key;
int api;
- rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
+ rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
if (rc)
goto out;
if (ktype)
@@ -933,8 +936,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
struct ep11kblob_header *kh = (struct ep11kblob_header *)key;
int api;
- rc = ep11_check_aes_key_with_hdr(debug_info, 3,
- key, keylen, 1);
+ rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1);
if (rc)
goto out;
if (ktype)
@@ -981,25 +984,27 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
if (hdr->version == TOKVER_CCA_AES) {
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
- if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
+ if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
return -EINVAL;
} else if (hdr->version == TOKVER_CCA_VLSC) {
if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
return -EINVAL;
- if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
+ if (cca_check_secaescipherkey(pkey_dbf_info,
+ 3, key, 0, 1))
return -EINVAL;
} else {
- DEBUG_ERR("%s unknown CCA internal token version %d\n",
- __func__, hdr->version);
+ PKEY_DBF_ERR("%s unknown CCA internal token version %d\n",
+ __func__, hdr->version);
return -EINVAL;
}
} else if (hdr->type == TOKTYPE_NON_CCA) {
if (hdr->version == TOKVER_EP11_AES) {
- if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+ if (ep11_check_aes_key(pkey_dbf_info,
+ 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
- if (ep11_check_aes_key_with_hdr(debug_info, 3,
- key, keylen, 1))
+ if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
return -EINVAL;
} else {
return pkey_nonccatok2pkey(key, keylen,
@@ -1007,8 +1012,8 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
protkeytype);
}
} else {
- DEBUG_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
return -EINVAL;
}
@@ -1234,50 +1239,53 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
/* EP11 AES key blob with header */
- if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1))
+ if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
/* EP11 ECC key blob with header */
- if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1))
+ if (ep11_check_ecc_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES &&
is_ep11_keyblob(key)) {
/* EP11 AES key blob with header in session field */
- if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+ if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
if (hdr->version == TOKVER_CCA_AES) {
/* CCA AES data key */
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
- if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
+ if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
return -EINVAL;
} else if (hdr->version == TOKVER_CCA_VLSC) {
/* CCA AES cipher key */
if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
return -EINVAL;
- if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
+ if (cca_check_secaescipherkey(pkey_dbf_info,
+ 3, key, 0, 1))
return -EINVAL;
} else {
- DEBUG_ERR("%s unknown CCA internal token version %d\n",
- __func__, hdr->version);
+ PKEY_DBF_ERR("%s unknown CCA internal token version %d\n",
+ __func__, hdr->version);
return -EINVAL;
}
} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
/* CCA ECC (private) key */
if (keylen < sizeof(struct eccprivkeytoken))
return -EINVAL;
- if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1))
+ if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA) {
return pkey_nonccatok2pkey(key, keylen,
protkey, protkeylen, protkeytype);
} else {
- DEBUG_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
return -EINVAL;
}
@@ -1350,7 +1358,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = cca_genseckey(kgs.cardnr, kgs.domain,
kgs.keytype, kgs.seckey.seckey);
- DEBUG_DBG("%s cca_genseckey()=%d\n", __func__, rc);
+ pr_debug("%s cca_genseckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ugs, &kgs, sizeof(kgs)))
@@ -1365,7 +1373,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
kcs.clrkey.clrkey, kcs.seckey.seckey);
- DEBUG_DBG("%s cca_clr2seckey()=%d\n", __func__, rc);
+ pr_debug("%s cca_clr2seckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ucs, &kcs, sizeof(kcs)))
@@ -1383,7 +1391,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
ksp.seckey.seckey, ksp.protkey.protkey,
&ksp.protkey.len, &ksp.protkey.type);
- DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc);
+ pr_debug("%s cca_sec2protkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
@@ -1400,7 +1408,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_clr2protkey(kcp.keytype, kcp.clrkey.clrkey,
kcp.protkey.protkey,
&kcp.protkey.len, &kcp.protkey.type);
- DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_clr2protkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ucp, &kcp, sizeof(kcp)))
@@ -1416,7 +1424,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = cca_findcard(kfc.seckey.seckey,
&kfc.cardnr, &kfc.domain, 1);
- DEBUG_DBG("%s cca_findcard()=%d\n", __func__, rc);
+ pr_debug("%s cca_findcard()=%d\n", __func__, rc);
if (rc < 0)
break;
if (copy_to_user(ufc, &kfc, sizeof(kfc)))
@@ -1432,7 +1440,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
ksp.protkey.len = sizeof(ksp.protkey.protkey);
rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey,
&ksp.protkey.len, &ksp.protkey.type);
- DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_skey2pkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
@@ -1447,7 +1455,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain,
&kvk.keysize, &kvk.attributes);
- DEBUG_DBG("%s pkey_verifykey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_verifykey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(uvk, &kvk, sizeof(kvk)))
@@ -1463,7 +1471,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
kgp.protkey.len = sizeof(kgp.protkey.protkey);
rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey,
&kgp.protkey.len, &kgp.protkey.type);
- DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_genprotkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ugp, &kgp, sizeof(kgp)))
@@ -1478,7 +1486,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = pkey_verifyprotkey(kvp.protkey.protkey,
kvp.protkey.len, kvp.protkey.type);
- DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_verifyprotkey()=%d\n", __func__, rc);
break;
}
case PKEY_KBLOB2PROTK: {
@@ -1494,7 +1502,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
ktp.protkey.len = sizeof(ktp.protkey.protkey);
rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey,
&ktp.protkey.len, &ktp.protkey.type);
- DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
if (rc)
@@ -1523,7 +1531,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_genseckey2(apqns, kgs.apqn_entries,
kgs.type, kgs.size, kgs.keygenflags,
kkey, &klen);
- DEBUG_DBG("%s pkey_genseckey2()=%d\n", __func__, rc);
+ pr_debug("%s pkey_genseckey2()=%d\n", __func__, rc);
kfree(apqns);
if (rc) {
kfree(kkey);
@@ -1565,7 +1573,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_clr2seckey2(apqns, kcs.apqn_entries,
kcs.type, kcs.size, kcs.keygenflags,
kcs.clrkey.clrkey, kkey, &klen);
- DEBUG_DBG("%s pkey_clr2seckey2()=%d\n", __func__, rc);
+ pr_debug("%s pkey_clr2seckey2()=%d\n", __func__, rc);
kfree(apqns);
if (rc) {
kfree(kkey);
@@ -1601,7 +1609,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_verifykey2(kkey, kvk.keylen,
&kvk.cardnr, &kvk.domain,
&kvk.type, &kvk.size, &kvk.flags);
- DEBUG_DBG("%s pkey_verifykey2()=%d\n", __func__, rc);
+ pr_debug("%s pkey_verifykey2()=%d\n", __func__, rc);
kfree(kkey);
if (rc)
break;
@@ -1630,7 +1638,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
kkey, ktp.keylen,
ktp.protkey.protkey, &ktp.protkey.len,
&ktp.protkey.type);
- DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
+ pr_debug("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
kfree(apqns);
memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
@@ -1664,7 +1672,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
rc = pkey_apqns4key(kkey, kak.keylen, kak.flags,
apqns, &nr_apqns);
- DEBUG_DBG("%s pkey_apqns4key()=%d\n", __func__, rc);
+ pr_debug("%s pkey_apqns4key()=%d\n", __func__, rc);
kfree(kkey);
if (rc && rc != -ENOSPC) {
kfree(apqns);
@@ -1707,7 +1715,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp,
kat.flags, apqns, &nr_apqns);
- DEBUG_DBG("%s pkey_apqns4keytype()=%d\n", __func__, rc);
+ pr_debug("%s pkey_apqns4keytype()=%d\n", __func__, rc);
if (rc && rc != -ENOSPC) {
kfree(apqns);
break;
@@ -1757,7 +1765,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries,
kkey, ktp.keylen,
protkey, &protkeylen, &ktp.pkeytype);
- DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
+ pr_debug("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
kfree(apqns);
memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index a5ab03e42ff1..4aeb3e1213c7 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -60,7 +60,7 @@ static void vfio_ap_matrix_dev_release(struct device *dev)
kfree(matrix_dev);
}
-static struct bus_type matrix_bus = {
+static const struct bus_type matrix_bus = {
.name = "matrix",
};
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 983b3b16196c..fc169bc61593 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -659,6 +659,21 @@ static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
AP_DOMAINS);
}
+static bool _queue_passable(struct vfio_ap_queue *q)
+{
+ if (!q)
+ return false;
+
+ switch (q->reset_status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
* vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev
* to ensure no queue devices are passed through to
@@ -687,7 +702,6 @@ static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev,
unsigned long apid, apqi, apqn;
DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS);
- struct vfio_ap_queue *q;
bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
@@ -716,8 +730,7 @@ static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev,
* hardware device.
*/
apqn = AP_MKQID(apid, apqi);
- q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
- if (!q || q->reset_status.response_code) {
+ if (!_queue_passable(vfio_ap_mdev_get_queue(matrix_mdev, apqn))) {
clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
/*
@@ -1691,6 +1704,7 @@ static int apq_status_check(int apqn, struct ap_queue_status *status)
switch (status->response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
return 0;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
@@ -1747,14 +1761,6 @@ static void apq_reset_check(struct work_struct *reset_work)
memcpy(&q->reset_status, &status, sizeof(status));
continue;
}
- /*
- * When an AP adapter is deconfigured, the
- * associated queues are reset, so let's set the
- * status response code to 0 so the queue may be
- * passed through (i.e., not filtered)
- */
- if (status.response_code == AP_RESPONSE_DECONFIGURED)
- q->reset_status.response_code = 0;
if (q->saved_isc != VFIO_AP_ISC_INVALID)
vfio_ap_free_aqic_resources(q);
break;
@@ -1781,12 +1787,7 @@ static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
queue_work(system_long_wq, &q->reset_work);
break;
case AP_RESPONSE_DECONFIGURED:
- /*
- * When an AP adapter is deconfigured, the associated
- * queues are reset, so let's set the status response code to 0
- * so the queue may be passed through (i.e., not filtered).
- */
- q->reset_status.response_code = 0;
+ case AP_RESPONSE_CHECKSTOPPED:
vfio_ap_free_aqic_resources(q);
break;
default:
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 74200f54dfff..02c503f16bc2 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -12,6 +12,9 @@
* Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
*/
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -57,10 +60,6 @@ DEFINE_SPINLOCK(zcrypt_list_lock);
LIST_HEAD(zcrypt_card_list);
static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
-static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
-
-atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
-EXPORT_SYMBOL(zcrypt_rescan_req);
static LIST_HEAD(zcrypt_ops_list);
@@ -69,20 +68,15 @@ debug_info_t *zcrypt_dbf_info;
/*
* Process a rescan of the transport layer.
- *
- * Returns 1, if the rescan has been processed, otherwise 0.
+ * Runs a synchronous AP bus rescan.
+ * Returns true if something has changed (for example the
+ * bus scan has found and build up new devices) and it is
+ * worth to do a retry. Otherwise false is returned meaning
+ * no changes on the AP bus level.
*/
-static inline int zcrypt_process_rescan(void)
-{
- if (atomic_read(&zcrypt_rescan_req)) {
- atomic_set(&zcrypt_rescan_req, 0);
- atomic_inc(&zcrypt_rescan_count);
- ap_bus_force_rescan();
- ZCRYPT_DBF_INFO("%s rescan count=%07d\n", __func__,
- atomic_inc_return(&zcrypt_rescan_count));
- return 1;
- }
- return 0;
+static inline bool zcrypt_process_rescan(void)
+{
+ return ap_bus_force_rescan();
}
void zcrypt_msgtype_register(struct zcrypt_ops *zops)
@@ -715,8 +709,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
- __func__);
+ pr_debug("%s no matching queue found => ENODEV\n", __func__);
rc = -ENODEV;
goto out;
}
@@ -820,8 +813,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
- __func__);
+ pr_debug("%s no matching queue found => ENODEV\n", __func__);
rc = -ENODEV;
goto out;
}
@@ -865,6 +857,8 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
goto out;
+ print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
tdom = *domain;
if (perms != &ap_perms && tdom < AP_DOMAINS) {
@@ -940,8 +934,8 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
- __func__, xcrb->user_defined, *domain);
+ pr_debug("%s no match for address %02x.%04x => ENODEV\n",
+ __func__, xcrb->user_defined, *domain);
rc = -ENODEV;
goto out;
}
@@ -952,6 +946,10 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
*domain = AP_QID_QUEUE(qid);
rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
+ if (!rc) {
+ print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
+ }
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
@@ -970,7 +968,26 @@ out:
long zcrypt_send_cprb(struct ica_xcRB *xcrb)
{
- return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb);
+ struct zcrypt_track tr;
+ int rc;
+
+ memset(&tr, 0, sizeof(tr));
+
+ do {
+ rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
+ do {
+ rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ pr_debug("%s rc=%d\n", __func__, rc);
+
+ return rc;
}
EXPORT_SYMBOL(zcrypt_send_cprb);
@@ -1045,6 +1062,8 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
goto out_free;
+ print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
if (perms != &ap_perms && domain < AUTOSEL_DOM) {
if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
@@ -1113,15 +1132,15 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
if (!pref_zq) {
if (targets && target_num == 1) {
- ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
- __func__, (int)targets->ap_id,
- (int)targets->dom_id);
+ pr_debug("%s no match for address %02x.%04x => ENODEV\n",
+ __func__, (int)targets->ap_id,
+ (int)targets->dom_id);
} else if (targets) {
- ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n",
- __func__, (int)target_num);
+ pr_debug("%s no match for %d target addrs => ENODEV\n",
+ __func__, (int)target_num);
} else {
- ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n",
- __func__);
+ pr_debug("%s no match for address ff.ffff => ENODEV\n",
+ __func__);
}
rc = -ENODEV;
goto out_free;
@@ -1129,6 +1148,10 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
qid = pref_zq->queue->qid;
rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
+ if (!rc) {
+ print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
+ }
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
@@ -1149,7 +1172,26 @@ out:
long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
{
- return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb);
+ struct zcrypt_track tr;
+ int rc;
+
+ memset(&tr, 0, sizeof(tr));
+
+ do {
+ rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
+ do {
+ rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ pr_debug("%s rc=%d\n", __func__, rc);
+
+ return rc;
}
EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
@@ -1199,8 +1241,7 @@ static long zcrypt_rng(char *buffer)
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
- __func__);
+ pr_debug("%s no matching queue found => ENODEV\n", __func__);
rc = -ENODEV;
goto out;
}
@@ -1431,20 +1472,17 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc) {
- ZCRYPT_DBF_DBG("ioctl ICARSAMODEXPO rc=%d\n", rc);
+ pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc);
return rc;
}
return put_user(mex.outputdatalength, &umex->outputdatalength);
@@ -1463,20 +1501,17 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc) {
- ZCRYPT_DBF_DBG("ioctl ICARSACRT rc=%d\n", rc);
+ pr_debug("ioctl ICARSACRT rc=%d\n", rc);
return rc;
}
return put_user(crt.outputdatalength, &ucrt->outputdatalength);
@@ -1495,21 +1530,18 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
- ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n",
- rc, xcrb.status);
+ pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n",
+ rc, xcrb.status);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
return -EFAULT;
return rc;
@@ -1528,20 +1560,17 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
do {
rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
- ZCRYPT_DBF_DBG("ioctl ZSENDEP11CPRB rc=%d\n", rc);
+ pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
return -EFAULT;
return rc;
@@ -1670,7 +1699,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
/* unknown ioctl number */
default:
- ZCRYPT_DBF_DBG("unknown ioctl 0x%08x\n", cmd);
+ pr_debug("unknown ioctl 0x%08x\n", cmd);
return -ENOIOCTLCMD;
}
}
@@ -1708,16 +1737,13 @@ static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
mex64.n_modulus = compat_ptr(mex32.n_modulus);
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
@@ -1761,16 +1787,13 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
@@ -1833,16 +1856,13 @@ static long trans_xcrb32(struct ap_perms *perms, struct file *filp,
xcrb64.status = xcrb32.status;
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length;
@@ -1914,8 +1934,8 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
*/
if (zcrypt_rng_buffer_index == 0) {
rc = zcrypt_rng((char *)zcrypt_rng_buffer);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ /* on ENODEV failure: retry once again after an AP bus rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
rc = zcrypt_rng((char *)zcrypt_rng_buffer);
if (rc < 0)
return -EIO;
@@ -1977,7 +1997,7 @@ void zcrypt_rng_device_remove(void)
* an asynchronous job. This function waits until these initial jobs
* are done and so the zcrypt api should be ready to serve crypto
* requests - if there are resources available. The function uses an
- * internal timeout of 60s. The very first caller will either wait for
+ * internal timeout of 30s. The very first caller will either wait for
* ap bus bindings complete or the timeout happens. This state will be
* remembered for further callers which will only be blocked until a
* decision is made (timeout or bindings complete).
@@ -1996,8 +2016,8 @@ int zcrypt_wait_api_operational(void)
switch (zcrypt_wait_api_state) {
case 0:
/* initial state, invoke wait for the ap bus complete */
- rc = ap_wait_init_apqn_bindings_complete(
- msecs_to_jiffies(60 * 1000));
+ rc = ap_wait_apqn_bindings_complete(
+ msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS));
switch (rc) {
case 0:
/* ap bus bindings are complete */
@@ -2014,8 +2034,8 @@ int zcrypt_wait_api_operational(void)
break;
default:
/* other failure */
- ZCRYPT_DBF_DBG("%s ap_wait_init_apqn_bindings_complete()=%d\n",
- __func__, rc);
+ pr_debug("%s ap_wait_init_apqn_bindings_complete()=%d\n",
+ __func__, rc);
break;
}
break;
@@ -2038,7 +2058,7 @@ EXPORT_SYMBOL(zcrypt_wait_api_operational);
int __init zcrypt_debug_init(void)
{
zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
- DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
debug_set_level(zcrypt_dbf_info, DBF_ERR);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index de659954c8f7..4ed481df57ca 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -38,6 +38,15 @@
*/
#define ZCRYPT_RNG_BUFFER_SIZE 4096
+/**
+ * The zcrypt_wait_api_operational() function waits this
+ * amount in milliseconds for ap_wait_aqpn_bindings_complete().
+ * Also on a cprb send failure with ENODEV the send functions
+ * trigger an ap bus rescan and wait this time in milliseconds
+ * for ap_wait_aqpn_bindings_complete() before resending.
+ */
+#define ZCRYPT_WAIT_BINDINGS_COMPLETE_MS 30000
+
/*
* Identifier for Crypto Request Performance Index
*/
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 263fe182648b..0a3a678ffc7e 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -23,11 +23,6 @@
#include "zcrypt_msgtype6.h"
#include "zcrypt_ccamisc.h"
-#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
-#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
-#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
-#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
-
/* Size of parameter block used for all cca requests/replies */
#define PARMBSIZE 512
@@ -367,8 +362,8 @@ int cca_genseckey(u16 cardnr, u16 domain,
memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
break;
default:
- DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
rc = -EINVAL;
goto out;
}
@@ -386,15 +381,15 @@ int cca_genseckey(u16 cardnr, u16 domain,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR("%s secure key generate failure, card response %d/%d\n",
- __func__,
+ ZCRYPT_DBF_ERR("%s secure key generate failure, card response %d/%d\n",
+ __func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
rc = -EIO;
@@ -411,8 +406,8 @@ int cca_genseckey(u16 cardnr, u16 domain,
- sizeof(prepparm->lv3.keyblock.toklen)
- sizeof(prepparm->lv3.keyblock.tokattr);
if (seckeysize != SECKEYBLOBSIZE) {
- DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
- __func__, seckeysize, SECKEYBLOBSIZE);
+ ZCRYPT_DBF_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
rc = -EIO;
goto out;
}
@@ -505,8 +500,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
keysize = 32;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
rc = -EINVAL;
goto out;
}
@@ -524,17 +519,17 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR("%s clear key import failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s clear key import failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
@@ -549,8 +544,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
- sizeof(prepparm->lv3.keyblock.toklen)
- sizeof(prepparm->lv3.keyblock.tokattr);
if (seckeysize != SECKEYBLOBSIZE) {
- DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
- __func__, seckeysize, SECKEYBLOBSIZE);
+ ZCRYPT_DBF_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
rc = -EIO;
goto out;
}
@@ -651,17 +646,17 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
rc = -EAGAIN;
else
@@ -669,10 +664,10 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
goto out;
}
if (prepcblk->ccp_rscode != 0) {
- DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
@@ -683,8 +678,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
/* check the returned keyblock */
if (prepparm->lv3.ckb.version != 0x01 &&
prepparm->lv3.ckb.version != 0x02) {
- DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
- __func__, (int)prepparm->lv3.ckb.version);
+ ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int)prepparm->lv3.ckb.version);
rc = -EIO;
goto out;
}
@@ -707,8 +702,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
*protkeytype = PKEY_KEYTYPE_AES_256;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keylen %d\n",
- __func__, prepparm->lv3.ckb.len);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->lv3.ckb.len);
rc = -EIO;
goto out;
}
@@ -840,9 +835,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
case 256:
break;
default:
- DEBUG_ERR(
- "%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
rc = -EINVAL;
goto out;
}
@@ -880,19 +874,17 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s cipher key generate failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s cipher key generate failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
@@ -905,8 +897,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
- DEBUG_ERR("%s reply with invalid or unknown key block\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
rc = -EIO;
goto out;
}
@@ -1048,19 +1040,17 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s CSNBKPI2 failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s CSNBKPI2 failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
@@ -1073,8 +1063,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) {
- DEBUG_ERR("%s reply with invalid or unknown key block\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
rc = -EIO;
goto out;
}
@@ -1132,33 +1122,29 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART",
exorbuf, keybitsize, token, &tokensize);
if (rc) {
- DEBUG_ERR(
- "%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
clrkey, keybitsize, token, &tokensize);
if (rc) {
- DEBUG_ERR(
- "%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
exorbuf, keybitsize, token, &tokensize);
if (rc) {
- DEBUG_ERR(
- "%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL,
NULL, keybitsize, token, &tokensize);
if (rc) {
- DEBUG_ERR(
- "%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
goto out;
}
@@ -1265,19 +1251,17 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
rc = -EAGAIN;
else
@@ -1285,11 +1269,10 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
goto out;
}
if (prepcblk->ccp_rscode != 0) {
- DEBUG_WARN(
- "%s unwrap secure key warning, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
@@ -1300,15 +1283,14 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
/* check the returned keyblock */
if (prepparm->vud.ckb.version != 0x01 &&
prepparm->vud.ckb.version != 0x02) {
- DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
- __func__, (int)prepparm->vud.ckb.version);
+ ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int)prepparm->vud.ckb.version);
rc = -EIO;
goto out;
}
if (prepparm->vud.ckb.algo != 0x02) {
- DEBUG_ERR(
- "%s reply param keyblock algo mismatch 0x%02x != 0x02\n",
- __func__, (int)prepparm->vud.ckb.algo);
+ ZCRYPT_DBF_ERR("%s reply param keyblock algo mismatch 0x%02x != 0x02\n",
+ __func__, (int)prepparm->vud.ckb.algo);
rc = -EIO;
goto out;
}
@@ -1331,8 +1313,8 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
*protkeytype = PKEY_KEYTYPE_AES_256;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keylen %d\n",
- __func__, prepparm->vud.ckb.keylen);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->vud.ckb.keylen);
rc = -EIO;
goto out;
}
@@ -1432,19 +1414,17 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
rc = -EAGAIN;
else
@@ -1452,11 +1432,10 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
goto out;
}
if (prepcblk->ccp_rscode != 0) {
- DEBUG_WARN(
- "%s unwrap secure key warning, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
@@ -1466,23 +1445,22 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
/* check the returned keyblock */
if (prepparm->vud.ckb.version != 0x02) {
- DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n",
- __func__, (int)prepparm->vud.ckb.version);
+ ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n",
+ __func__, (int)prepparm->vud.ckb.version);
rc = -EIO;
goto out;
}
if (prepparm->vud.ckb.algo != 0x81) {
- DEBUG_ERR(
- "%s reply param keyblock algo mismatch 0x%02x != 0x81\n",
- __func__, (int)prepparm->vud.ckb.algo);
+ ZCRYPT_DBF_ERR("%s reply param keyblock algo mismatch 0x%02x != 0x81\n",
+ __func__, (int)prepparm->vud.ckb.algo);
rc = -EIO;
goto out;
}
/* copy the translated protected key */
if (prepparm->vud.ckb.keylen > *protkeylen) {
- DEBUG_ERR("%s prot keylen mismatch %d > buffersize %u\n",
- __func__, prepparm->vud.ckb.keylen, *protkeylen);
+ ZCRYPT_DBF_ERR("%s prot keylen mismatch %d > buffersize %u\n",
+ __func__, prepparm->vud.ckb.keylen, *protkeylen);
rc = -EIO;
goto out;
}
@@ -1550,17 +1528,17 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 5cf88aabd64b..9a208dc4c200 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -17,7 +17,7 @@
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
-#define DBF_MAX_SPRINTF_ARGS 6
+#define ZCRYPT_DBF_MAX_SPRINTF_ARGS 6
#define ZCRYPT_DBF(...) \
debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
@@ -27,8 +27,6 @@
debug_sprintf_event(zcrypt_dbf_info, DBF_WARN, ##__VA_ARGS__)
#define ZCRYPT_DBF_INFO(...) \
debug_sprintf_event(zcrypt_dbf_info, DBF_INFO, ##__VA_ARGS__)
-#define ZCRYPT_DBF_DBG(...) \
- debug_sprintf_event(zcrypt_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
extern debug_info_t *zcrypt_dbf_info;
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index 0a877f9792c2..eb7f5489ccf9 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -24,11 +24,6 @@
#include "zcrypt_ep11misc.h"
#include "zcrypt_ccamisc.h"
-#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
-#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
-#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
-#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
-
#define EP11_PINBLOB_V1_BYTES 56
/* default iv used here */
@@ -510,7 +505,7 @@ static int check_reply_pl(const u8 *pl, const char *func)
/* start tag */
if (*pl++ != 0x30) {
- DEBUG_ERR("%s reply start tag mismatch\n", func);
+ ZCRYPT_DBF_ERR("%s reply start tag mismatch\n", func);
return -EIO;
}
@@ -527,40 +522,41 @@ static int check_reply_pl(const u8 *pl, const char *func)
len = *((u16 *)pl);
pl += 2;
} else {
- DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
- func, *pl);
+ ZCRYPT_DBF_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
+ func, *pl);
return -EIO;
}
/* len should cover at least 3 fields with 32 bit value each */
if (len < 3 * 6) {
- DEBUG_ERR("%s reply length %d too small\n", func, len);
+ ZCRYPT_DBF_ERR("%s reply length %d too small\n", func, len);
return -EIO;
}
/* function tag, length and value */
if (pl[0] != 0x04 || pl[1] != 0x04) {
- DEBUG_ERR("%s function tag or length mismatch\n", func);
+ ZCRYPT_DBF_ERR("%s function tag or length mismatch\n", func);
return -EIO;
}
pl += 6;
/* dom tag, length and value */
if (pl[0] != 0x04 || pl[1] != 0x04) {
- DEBUG_ERR("%s dom tag or length mismatch\n", func);
+ ZCRYPT_DBF_ERR("%s dom tag or length mismatch\n", func);
return -EIO;
}
pl += 6;
/* return value tag, length and value */
if (pl[0] != 0x04 || pl[1] != 0x04) {
- DEBUG_ERR("%s return value tag or length mismatch\n", func);
+ ZCRYPT_DBF_ERR("%s return value tag or length mismatch\n",
+ func);
return -EIO;
}
pl += 2;
ret = *((u32 *)pl);
if (ret != 0) {
- DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret);
+ ZCRYPT_DBF_ERR("%s return value 0x%04x != 0\n", func, ret);
return -EIO;
}
@@ -626,9 +622,8 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
@@ -636,13 +631,13 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > buflen) {
- DEBUG_ERR("%s mismatch between reply data len and buffer len\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s mismatch between reply data len and buffer len\n",
+ __func__);
rc = -ENOSPC;
goto out;
}
@@ -816,9 +811,8 @@ static int _ep11_genaeskey(u16 card, u16 domain,
case 256:
break;
default:
- DEBUG_ERR(
- "%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
rc = -EINVAL;
goto out;
}
@@ -878,9 +872,8 @@ static int _ep11_genaeskey(u16 card, u16 domain,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)card, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
goto out;
}
@@ -888,13 +881,13 @@ static int _ep11_genaeskey(u16 card, u16 domain,
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > *keybufsize) {
- DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
rc = -ENOSPC;
goto out;
}
@@ -1030,9 +1023,8 @@ static int ep11_cryptsingle(u16 card, u16 domain,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)card, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
goto out;
}
@@ -1040,7 +1032,7 @@ static int ep11_cryptsingle(u16 card, u16 domain,
if (rc)
goto out;
if (rep_pl->data_tag != 0x04) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
@@ -1053,14 +1045,14 @@ static int ep11_cryptsingle(u16 card, u16 domain,
n = *((u16 *)p);
p += 2;
} else {
- DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n",
- __func__, rep_pl->data_lenfmt);
+ ZCRYPT_DBF_ERR("%s unknown reply data length format 0x%02hhx\n",
+ __func__, rep_pl->data_lenfmt);
rc = -EIO;
goto out;
}
if (n > *outbufsize) {
- DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n",
- __func__, n, *outbufsize);
+ ZCRYPT_DBF_ERR("%s mismatch reply data len %d / output buffer %zu\n",
+ __func__, n, *outbufsize);
rc = -ENOSPC;
goto out;
}
@@ -1188,9 +1180,8 @@ static int _ep11_unwrapkey(u16 card, u16 domain,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)card, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
goto out;
}
@@ -1198,13 +1189,13 @@ static int _ep11_unwrapkey(u16 card, u16 domain,
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > *keybufsize) {
- DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
rc = -ENOSPC;
goto out;
}
@@ -1343,9 +1334,8 @@ static int _ep11_wrapkey(u16 card, u16 domain,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)card, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
goto out;
}
@@ -1353,13 +1343,13 @@ static int _ep11_wrapkey(u16 card, u16 domain,
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > *datasize) {
- DEBUG_ERR("%s mismatch reply data len / data buffer len\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s mismatch reply data len / data buffer len\n",
+ __func__);
rc = -ENOSPC;
goto out;
}
@@ -1386,9 +1376,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) {
clrkeylen = keybitsize / 8;
} else {
- DEBUG_ERR(
- "%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
return -EINVAL;
}
@@ -1405,9 +1394,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
kek, &keklen);
if (rc) {
- DEBUG_ERR(
- "%s generate kek key failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s generate kek key failed, rc=%d\n",
+ __func__, rc);
goto out;
}
@@ -1415,9 +1403,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
clrkey, clrkeylen, encbuf, &encbuflen);
if (rc) {
- DEBUG_ERR(
- "%s encrypting key value with kek key failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s encrypting key value with kek key failed, rc=%d\n",
+ __func__, rc);
goto out;
}
@@ -1426,9 +1413,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
encbuf, encbuflen, 0, def_iv,
keybitsize, 0, keybuf, keybufsize, keytype);
if (rc) {
- DEBUG_ERR(
- "%s importing key value as new key failed,, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s importing key value as new key failed,, rc=%d\n",
+ __func__, rc);
goto out;
}
@@ -1476,17 +1462,16 @@ int ep11_kblob2protkey(u16 card, u16 dom,
rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen,
0, def_iv, wkbuf, &wkbuflen);
if (rc) {
- DEBUG_ERR(
- "%s rewrapping ep11 key to pkey failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s rewrapping ep11 key to pkey failed, rc=%d\n",
+ __func__, rc);
goto out;
}
wki = (struct wk_info *)wkbuf;
/* check struct version and pkey type */
if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) {
- DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
- __func__, (int)wki->version, (int)wki->pkeytype);
+ ZCRYPT_DBF_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
+ __func__, (int)wki->version, (int)wki->pkeytype);
rc = -EIO;
goto out;
}
@@ -1511,8 +1496,8 @@ int ep11_kblob2protkey(u16 card, u16 dom,
*protkeytype = PKEY_KEYTYPE_AES_256;
break;
default:
- DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n",
- __func__, (int)wki->pkeysize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported AES pkeysize %d\n",
+ __func__, (int)wki->pkeysize);
rc = -EIO;
goto out;
}
@@ -1525,16 +1510,16 @@ int ep11_kblob2protkey(u16 card, u16 dom,
break;
case 2: /* TDES */
default:
- DEBUG_ERR("%s unknown/unsupported key type %d\n",
- __func__, (int)wki->pkeytype);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported key type %d\n",
+ __func__, (int)wki->pkeytype);
rc = -EIO;
goto out;
}
/* copy the translated protected key */
if (wki->pkeysize > *protkeylen) {
- DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
- __func__, wki->pkeysize, *protkeylen);
+ ZCRYPT_DBF_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
+ __func__, wki->pkeysize, *protkeylen);
rc = -EINVAL;
goto out;
}
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index a44fcfcec938..46e27b43a8af 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -119,10 +119,9 @@ static inline int convert_error(struct zcrypt_queue *zq,
case REP82_ERROR_MESSAGE_TYPE: /* 0x20 */
case REP82_ERROR_TRANSPORT_FAIL: /* 0x90 */
/*
- * Msg to wrong type or card/infrastructure failure.
- * Trigger rescan of the ap bus, trigger retry request.
+ * Msg to wrong type or card/infrastructure failure. Return
+ * EAGAIN, the upper layer may do a retry on the request.
*/
- atomic_set(&zcrypt_rescan_req, 1);
/* For type 86 response show the apfs value (failure reason) */
if (ehdr->reply_code == REP82_ERROR_TRANSPORT_FAIL &&
ehdr->type == TYPE86_RSP_CODE) {
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 2e155de8abe5..3b39cb8f926d 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -427,7 +427,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq,
len = t80h->len;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("%s len mismatch => EMSGSIZE\n", __func__);
msg->rc = -EMSGSIZE;
goto out;
}
@@ -487,9 +487,9 @@ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq,
out:
ap_msg->private = NULL;
if (rc)
- ZCRYPT_DBF_DBG("%s send me cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), rc);
+ pr_debug("%s send me cprb at dev=%02x.%04x rc=%d\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
@@ -537,9 +537,9 @@ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq,
out:
ap_msg->private = NULL;
if (rc)
- ZCRYPT_DBF_DBG("%s send crt cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), rc);
+ pr_debug("%s send crt cprb at dev=%02x.%04x rc=%d\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 3c53abbdc342..215f257d2360 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -437,9 +437,9 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg,
ap_msg->flags |= AP_MSG_FLAG_ADMIN;
break;
default:
- ZCRYPT_DBF_DBG("%s unknown CPRB minor version '%c%c'\n",
- __func__, msg->cprbx.func_id[0],
- msg->cprbx.func_id[1]);
+ pr_debug("%s unknown CPRB minor version '%c%c'\n",
+ __func__, msg->cprbx.func_id[0],
+ msg->cprbx.func_id[1]);
}
/* copy data block */
@@ -629,9 +629,9 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
/* Copy CPRB to user */
if (xcrb->reply_control_blk_length < msg->fmt2.count1) {
- ZCRYPT_DBF_DBG("%s reply_control_blk_length %u < required %u => EMSGSIZE\n",
- __func__, xcrb->reply_control_blk_length,
- msg->fmt2.count1);
+ pr_debug("%s reply_control_blk_length %u < required %u => EMSGSIZE\n",
+ __func__, xcrb->reply_control_blk_length,
+ msg->fmt2.count1);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr,
@@ -642,9 +642,9 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
/* Copy data buffer to user */
if (msg->fmt2.count2) {
if (xcrb->reply_data_length < msg->fmt2.count2) {
- ZCRYPT_DBF_DBG("%s reply_data_length %u < required %u => EMSGSIZE\n",
- __func__, xcrb->reply_data_length,
- msg->fmt2.count2);
+ pr_debug("%s reply_data_length %u < required %u => EMSGSIZE\n",
+ __func__, xcrb->reply_data_length,
+ msg->fmt2.count2);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_data_addr,
@@ -673,9 +673,9 @@ static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
char *data = reply->msg;
if (xcrb->resp_len < msg->fmt2.count1) {
- ZCRYPT_DBF_DBG("%s resp_len %u < required %u => EMSGSIZE\n",
- __func__, (unsigned int)xcrb->resp_len,
- msg->fmt2.count1);
+ pr_debug("%s resp_len %u < required %u => EMSGSIZE\n",
+ __func__, (unsigned int)xcrb->resp_len,
+ msg->fmt2.count1);
return -EMSGSIZE;
}
@@ -875,7 +875,8 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
len = sizeof(struct type86x_reply) + t86r->length;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("%s len mismatch => EMSGSIZE\n",
+ __func__);
msg->rc = -EMSGSIZE;
goto out;
}
@@ -889,7 +890,8 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("%s len mismatch => EMSGSIZE\n",
+ __func__);
msg->rc = -EMSGSIZE;
goto out;
}
@@ -939,7 +941,8 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("%s len mismatch => EMSGSIZE\n",
+ __func__);
msg->rc = -EMSGSIZE;
goto out;
}
@@ -1151,9 +1154,9 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
out:
if (rc)
- ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), rc);
+ pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
@@ -1274,9 +1277,9 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
out:
if (rc)
- ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), rc);
+ pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index addac7fbe37b..9ce27092729c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1270,7 +1270,7 @@ source "drivers/scsi/arm/Kconfig"
config JAZZ_ESP
bool "MIPS JAZZ FAS216 SCSI support"
- depends on MACH_JAZZ && SCSI
+ depends on MACH_JAZZ && SCSI=y
select SCSI_SPI_ATTRS
help
This is the driver for the onboard SCSI host adapter of MIPS Magnum
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index c0c8ab586957..d32ad46318cb 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -1671,7 +1671,7 @@ mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
void
mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
{
- struct host_port h_port[64];
+ struct host_port *h_port = NULL;
int i, j, found, host_port_count = 0, port_idx;
u16 sz, attached_handle, ioc_status;
struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
@@ -1685,6 +1685,10 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_io_unit_pg0)
return;
+ h_port = kcalloc(64, sizeof(struct host_port), GFP_KERNEL);
+ if (!h_port)
+ goto out;
+
if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
ioc_err(mrioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -1814,6 +1818,7 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
}
}
out:
+ kfree(h_port);
kfree(sas_io_unit_pg0);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8761bc58d965..b8120ca93c79 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -7378,7 +7378,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
return -EFAULT;
}
- issue_diag_reset:
+ return 0;
+
+issue_diag_reset:
rc = _base_diag_reset(ioc);
return rc;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 76d369343c7a..8cad9792a562 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -328,21 +328,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
return result + 4;
}
+enum scsi_vpd_parameters {
+ SCSI_VPD_HEADER_SIZE = 4,
+ SCSI_VPD_LIST_SIZE = 36,
+};
+
static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
{
- unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
+ unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4);
int result;
if (sdev->no_vpd_size)
return SCSI_DEFAULT_VPD_LEN;
/*
+ * Fetch the supported pages VPD and validate that the requested page
+ * number is present.
+ */
+ if (page != 0) {
+ result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd));
+ if (result < SCSI_VPD_HEADER_SIZE)
+ return 0;
+
+ result -= SCSI_VPD_HEADER_SIZE;
+ if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
+ return 0;
+ }
+ /*
* Fetch the VPD page header to find out how big the page
* is. This is done to prevent problems on legacy devices
* which can not handle allocation lengths as large as
* potentially requested by the caller.
*/
- result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
+ result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE);
if (result < 0)
return 0;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 44680f65ea14..9969f4e2f1c3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -332,7 +332,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sg_reserved_size = INT_MAX;
- q = blk_mq_init_queue(&sdev->host->tag_set);
+ q = blk_mq_alloc_queue(&sdev->host->tag_set, NULL, NULL);
if (IS_ERR(q)) {
/* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0833b3e6aa6e..bdd0acf7fa3c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3407,6 +3407,24 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
return true;
}
+static void sd_read_block_zero(struct scsi_disk *sdkp)
+{
+ unsigned int buf_len = sdkp->device->sector_size;
+ char *buffer, cmd[10] = { };
+
+ buffer = kmalloc(buf_len, GFP_KERNEL);
+ if (!buffer)
+ return;
+
+ cmd[0] = READ_10;
+ put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+
+ scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
+ SD_TIMEOUT, sdkp->max_retries, NULL);
+ kfree(buffer);
+}
+
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -3446,7 +3464,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
*/
if (sdkp->media_present) {
sd_read_capacity(sdkp, buffer);
-
+ /*
+ * Some USB/UAS devices return generic values for mode pages
+ * until the media has been accessed. Trigger a READ operation
+ * to force the device to populate mode pages.
+ */
+ if (sdp->read_before_ms)
+ sd_read_block_zero(sdkp);
/*
* set the default to rotational. All non-rotational devices
* support the block characteristics VPD page, which will
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index ceff1ec13f9e..385180c98be4 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -6533,8 +6533,11 @@ static void pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
- blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ if (!ctrl_info->disable_managed_interrupts)
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
ctrl_info->pci_dev, 0);
+ else
+ return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
}
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 0810b5b0c688..50c664b65f4d 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -68,4 +68,13 @@ config MTK_SVS
chip process corner, temperatures and other factors. Then DVFS
driver could apply SVS bank voltage to PMIC/Buck.
+config MTK_SOCINFO
+ tristate "MediaTek SoC Information"
+ default y
+ depends on NVMEM_MTK_EFUSE
+ help
+ The MediaTek SoC Information (mtk-socinfo) driver provides
+ information about the SoC to the userspace including the
+ manufacturer name, marketing name and soc name.
+
endmenu
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index 9d3ce7878c5c..6830512848fd 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_MTK_REGULATOR_COUPLER) += mtk-regulator-coupler.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mutex.o
obj-$(CONFIG_MTK_SVS) += mtk-svs.o
+obj-$(CONFIG_MTK_SOCINFO) += mtk-socinfo.o
diff --git a/drivers/soc/mediatek/mtk-socinfo.c b/drivers/soc/mediatek/mtk-socinfo.c
new file mode 100644
index 000000000000..42572e8c1520
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-socinfo.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/device.h>
+#include <linux/device/bus.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#define MTK_SOCINFO_ENTRY(_soc_name, _segment_name, _marketing_name, _cell_data1, _cell_data2) {\
+ .soc_name = _soc_name, \
+ .segment_name = _segment_name, \
+ .marketing_name = _marketing_name, \
+ .cell_data = {_cell_data1, _cell_data2} \
+}
+#define CELL_NOT_USED (0xFFFFFFFF)
+#define MAX_CELLS (2)
+
+struct mtk_socinfo {
+ struct device *dev;
+ struct name_data *name_data;
+ struct socinfo_data *socinfo_data;
+ struct soc_device *soc_dev;
+};
+
+struct socinfo_data {
+ char *soc_name;
+ char *segment_name;
+ char *marketing_name;
+ u32 cell_data[MAX_CELLS];
+};
+
+static const char *cell_names[MAX_CELLS] = {"socinfo-data1", "socinfo-data2"};
+
+static struct socinfo_data socinfo_data_table[] = {
+ MTK_SOCINFO_ENTRY("MT8173", "MT8173V/AC", "MT8173", 0x6CA20004, 0x10000000),
+ MTK_SOCINFO_ENTRY("MT8183", "MT8183V/AZA", "Kompanio 500", 0x00010043, 0x00000840),
+ MTK_SOCINFO_ENTRY("MT8183", "MT8183V/AZA", "Kompanio 500", 0x00010043, 0x00000940),
+ MTK_SOCINFO_ENTRY("MT8186", "MT8186GV/AZA", "Kompanio 520", 0x81861001, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8186T", "MT8186TV/AZA", "Kompanio 528", 0x81862001, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/AZA", "Kompanio 830", 0x81880000, 0x00000010),
+ MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/HZA", "Kompanio 830", 0x81880000, 0x00000011),
+ MTK_SOCINFO_ENTRY("MT8192", "MT8192V/AZA", "Kompanio 820", 0x00001100, 0x00040080),
+ MTK_SOCINFO_ENTRY("MT8192T", "MT8192V/ATZA", "Kompanio 828", 0x00000100, 0x000400C0),
+ MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EZA", "Kompanio 1200", 0x81950300, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EHZA", "Kompanio 1200", 0x81950304, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EZA", "Kompanio 1380", 0x81950400, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EHZA", "Kompanio 1380", 0x81950404, CELL_NOT_USED),
+};
+
+static int mtk_socinfo_create_socinfo_node(struct mtk_socinfo *mtk_socinfop)
+{
+ struct soc_device_attribute *attrs;
+ static char machine[30] = {0};
+ static const char *soc_manufacturer = "MediaTek";
+
+ attrs = devm_kzalloc(mtk_socinfop->dev, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ snprintf(machine, sizeof(machine), "%s (%s)", mtk_socinfop->socinfo_data->marketing_name,
+ mtk_socinfop->socinfo_data->soc_name);
+ attrs->family = soc_manufacturer;
+ attrs->machine = machine;
+
+ mtk_socinfop->soc_dev = soc_device_register(attrs);
+ if (IS_ERR(mtk_socinfop->soc_dev))
+ return PTR_ERR(mtk_socinfop->soc_dev);
+
+ dev_info(mtk_socinfop->dev, "%s %s SoC detected.\n", soc_manufacturer, attrs->machine);
+ return 0;
+}
+
+static u32 mtk_socinfo_read_cell(struct device *dev, const char *name)
+{
+ struct nvmem_device *nvmemp;
+ struct device_node *np, *nvmem_node = dev->parent->of_node;
+ u32 offset;
+ u32 cell_val = CELL_NOT_USED;
+
+ /* should never fail since the nvmem driver registers this child */
+ nvmemp = nvmem_device_find(nvmem_node, device_match_of_node);
+ if (IS_ERR(nvmemp))
+ goto out;
+
+ np = of_get_child_by_name(nvmem_node, name);
+ if (!np)
+ goto put_device;
+
+ if (of_property_read_u32_index(np, "reg", 0, &offset))
+ goto put_node;
+
+ nvmem_device_read(nvmemp, offset, sizeof(cell_val), &cell_val);
+
+put_node:
+ of_node_put(np);
+put_device:
+ nvmem_device_put(nvmemp);
+out:
+ return cell_val;
+}
+
+static int mtk_socinfo_get_socinfo_data(struct mtk_socinfo *mtk_socinfop)
+{
+ unsigned int i, j;
+ unsigned int num_cell_data = 0;
+ u32 cell_data[MAX_CELLS] = {0};
+ bool match_socinfo;
+ int match_socinfo_index = -1;
+
+ for (i = 0; i < MAX_CELLS; i++) {
+ cell_data[i] = mtk_socinfo_read_cell(mtk_socinfop->dev, cell_names[i]);
+ if (cell_data[i] != CELL_NOT_USED)
+ num_cell_data++;
+ else
+ break;
+ }
+
+ if (!num_cell_data)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(socinfo_data_table); i++) {
+ match_socinfo = true;
+ for (j = 0; j < num_cell_data; j++) {
+ if (cell_data[j] != socinfo_data_table[i].cell_data[j]) {
+ match_socinfo = false;
+ break;
+ }
+ }
+ if (match_socinfo) {
+ mtk_socinfop->socinfo_data = &(socinfo_data_table[i]);
+ match_socinfo_index = i;
+ break;
+ }
+ }
+
+ return match_socinfo_index >= 0 ? match_socinfo_index : -ENOENT;
+}
+
+static int mtk_socinfo_probe(struct platform_device *pdev)
+{
+ struct mtk_socinfo *mtk_socinfop;
+ int ret;
+
+ mtk_socinfop = devm_kzalloc(&pdev->dev, sizeof(*mtk_socinfop), GFP_KERNEL);
+ if (!mtk_socinfop)
+ return -ENOMEM;
+
+ mtk_socinfop->dev = &pdev->dev;
+
+ ret = mtk_socinfo_get_socinfo_data(mtk_socinfop);
+ if (ret < 0)
+ return dev_err_probe(mtk_socinfop->dev, ret, "Failed to get socinfo data\n");
+
+ ret = mtk_socinfo_create_socinfo_node(mtk_socinfop);
+ if (ret)
+ return dev_err_probe(mtk_socinfop->dev, ret, "Cannot create node\n");
+
+ platform_set_drvdata(pdev, mtk_socinfop);
+ return 0;
+}
+
+static void mtk_socinfo_remove(struct platform_device *pdev)
+{
+ struct mtk_socinfo *mtk_socinfop = platform_get_drvdata(pdev);
+
+ soc_device_unregister(mtk_socinfop->soc_dev);
+}
+
+static struct platform_driver mtk_socinfo = {
+ .probe = mtk_socinfo_probe,
+ .remove_new = mtk_socinfo_remove,
+ .driver = {
+ .name = "mtk-socinfo",
+ },
+};
+module_platform_driver(mtk_socinfo);
+
+MODULE_AUTHOR("William-TW LIN <william-tw.lin@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek socinfo driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig
index 9b0fdd95276e..19f4b576f822 100644
--- a/drivers/soc/microchip/Kconfig
+++ b/drivers/soc/microchip/Kconfig
@@ -1,5 +1,5 @@
config POLARFIRE_SOC_SYS_CTRL
- tristate "POLARFIRE_SOC_SYS_CTRL"
+ tristate "Microchip PolarFire SoC (MPFS) system controller support"
depends on POLARFIRE_SOC_MAILBOX
depends on MTD
help
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index c6ca4de42586..5af33b0e3470 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -268,4 +268,13 @@ config QCOM_INLINE_CRYPTO_ENGINE
tristate
select QCOM_SCM
+config QCOM_PBS
+ tristate "PBS trigger support for Qualcomm Technologies, Inc. PMICS"
+ depends on SPMI
+ help
+ This driver supports configuring software programmable boot sequencer (PBS)
+ trigger event through PBS RAM on Qualcomm Technologies, Inc. PMICs.
+ This module provides the APIs to the client drivers that wants to send the
+ PBS trigger event to the PBS RAM.
+
endmenu
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 05b3d54e8dc9..ca0bece0dfff 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS_rpmh-rsc.o := -I$(src)
+CFLAGS_qcom_aoss.o := -I$(src)
obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o
obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
@@ -34,3 +35,4 @@ obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o
obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o
qcom_ice-objs += ice.o
obj-$(CONFIG_QCOM_INLINE_CRYPTO_ENGINE) += qcom_ice.o
+obj-$(CONFIG_QCOM_PBS) += qcom-pbs.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 1f8b315576a4..50749e870efa 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -399,7 +399,7 @@ static int apr_uevent(const struct device *dev, struct kobj_uevent_env *env)
return add_uevent_var(env, "MODALIAS=apr:%s", adev->name);
}
-struct bus_type aprbus = {
+const struct bus_type aprbus = {
.name = "aprbus",
.match = apr_device_match,
.probe = apr_device_probe,
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 4ca88eaebf06..cbef0dea1d5d 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -859,6 +859,8 @@ static int llcc_update_act_ctrl(u32 sid,
ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg,
slice_status, !(slice_status & status),
0, LLCC_STATUS_READ_DELAY);
+ if (ret)
+ return ret;
if (drv_data->version >= LLCC_VERSION_4_1_0_0)
ret = regmap_write(drv_data->bcast_regmap, act_clear_reg,
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index f4bfd24386f1..f913e9bd57ed 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -265,10 +265,17 @@ static int pmic_glink_probe(struct platform_device *pdev)
pg->client_mask = *match_data;
+ pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
+ if (IS_ERR(pg->pdr)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr),
+ "failed to initialize pdr\n");
+ return ret;
+ }
+
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) {
ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi");
if (ret)
- return ret;
+ goto out_release_pdr_handle;
}
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) {
ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode");
@@ -281,17 +288,11 @@ static int pmic_glink_probe(struct platform_device *pdev)
goto out_release_altmode_aux;
}
- pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
- if (IS_ERR(pg->pdr)) {
- ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), "failed to initialize pdr\n");
- goto out_release_aux_devices;
- }
-
service = pdr_add_lookup(pg->pdr, "tms/servreg", "msm/adsp/charger_pd");
if (IS_ERR(service)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(service),
"failed adding pdr lookup for charger_pd\n");
- goto out_release_pdr_handle;
+ goto out_release_aux_devices;
}
mutex_lock(&__pmic_glink_lock);
@@ -300,8 +301,6 @@ static int pmic_glink_probe(struct platform_device *pdev)
return 0;
-out_release_pdr_handle:
- pdr_handle_release(pg->pdr);
out_release_aux_devices:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT))
pmic_glink_del_aux_device(pg, &pg->ps_aux);
@@ -311,6 +310,8 @@ out_release_altmode_aux:
out_release_ucsi_aux:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
+out_release_pdr_handle:
+ pdr_handle_release(pg->pdr);
return ret;
}
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index 5fcd0fdd2faa..b3808fc24c69 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -76,7 +76,7 @@ struct pmic_glink_altmode_port {
struct work_struct work;
- struct device *bridge;
+ struct auxiliary_device *bridge;
enum typec_orientation orientation;
u16 svid;
@@ -230,7 +230,7 @@ static void pmic_glink_altmode_worker(struct work_struct *work)
else
pmic_glink_altmode_enable_usb(altmode, alt_port);
- drm_aux_hpd_bridge_notify(alt_port->bridge,
+ drm_aux_hpd_bridge_notify(&alt_port->bridge->dev,
alt_port->hpd_state ?
connector_status_connected :
connector_status_disconnected);
@@ -454,7 +454,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
alt_port->index = port;
INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
- alt_port->bridge = drm_dp_hpd_bridge_register(dev, to_of_node(fwnode));
+ alt_port->bridge = devm_drm_dp_hpd_bridge_alloc(dev, to_of_node(fwnode));
if (IS_ERR(alt_port->bridge)) {
fwnode_handle_put(fwnode);
return PTR_ERR(alt_port->bridge);
@@ -510,6 +510,16 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
}
}
+ for (port = 0; port < ARRAY_SIZE(altmode->ports); port++) {
+ alt_port = &altmode->ports[port];
+ if (!alt_port->bridge)
+ continue;
+
+ ret = devm_drm_dp_hpd_bridge_add(dev, alt_port->bridge);
+ if (ret)
+ return ret;
+ }
+
altmode->client = devm_pmic_glink_register_client(dev,
altmode->owner_id,
pmic_glink_altmode_callback,
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index bdcf44b85b2f..2e8f24d5da80 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -89,7 +89,6 @@
* @base: Base address of this instance of QUP wrapper core
* @clks: Handle to the primary & optional secondary AHB clocks
* @num_clks: Count of clocks
- * @to_core: Core ICC path
*/
struct geni_wrapper {
struct device *dev;
diff --git a/drivers/soc/qcom/qcom-pbs.c b/drivers/soc/qcom/qcom-pbs.c
new file mode 100644
index 000000000000..6af49b5060e5
--- /dev/null
+++ b/drivers/soc/qcom/qcom-pbs.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/spmi.h>
+#include <linux/soc/qcom/qcom-pbs.h>
+
+#define PBS_CLIENT_TRIG_CTL 0x42
+#define PBS_CLIENT_SW_TRIG_BIT BIT(7)
+#define PBS_CLIENT_SCRATCH1 0x50
+#define PBS_CLIENT_SCRATCH2 0x51
+#define PBS_CLIENT_SCRATCH2_ERROR 0xFF
+
+#define RETRIES 2000
+#define DELAY 1100
+
+struct pbs_dev {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ struct device_link *link;
+
+ u32 base;
+};
+
+static int qcom_pbs_wait_for_ack(struct pbs_dev *pbs, u8 bit_pos)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read_poll_timeout(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2,
+ val, val & BIT(bit_pos), DELAY, DELAY * RETRIES);
+
+ if (ret < 0) {
+ dev_err(pbs->dev, "Timeout for PBS ACK/NACK for bit %u\n", bit_pos);
+ return -ETIMEDOUT;
+ }
+
+ if (val == PBS_CLIENT_SCRATCH2_ERROR) {
+ ret = regmap_write(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, 0);
+ dev_err(pbs->dev, "NACK from PBS for bit %u\n", bit_pos);
+ return -EINVAL;
+ }
+
+ dev_dbg(pbs->dev, "PBS sequence for bit %u executed!\n", bit_pos);
+ return 0;
+}
+
+/**
+ * qcom_pbs_trigger_event() - Trigger the PBS RAM sequence
+ * @pbs: Pointer to PBS device
+ * @bitmap: bitmap
+ *
+ * This function is used to trigger the PBS RAM sequence to be
+ * executed by the client driver.
+ *
+ * The PBS trigger sequence involves
+ * 1. setting the PBS sequence bit in PBS_CLIENT_SCRATCH1
+ * 2. Initiating the SW PBS trigger
+ * 3. Checking the equivalent bit in PBS_CLIENT_SCRATCH2 for the
+ * completion of the sequence.
+ * 4. If PBS_CLIENT_SCRATCH2 == 0xFF, the PBS sequence failed to execute
+ *
+ * Return: 0 on success, < 0 on failure
+ */
+int qcom_pbs_trigger_event(struct pbs_dev *pbs, u8 bitmap)
+{
+ unsigned int val;
+ u16 bit_pos;
+ int ret;
+
+ if (WARN_ON(!bitmap))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(pbs))
+ return -EINVAL;
+
+ mutex_lock(&pbs->lock);
+ ret = regmap_read(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, &val);
+ if (ret < 0)
+ goto out;
+
+ if (val == PBS_CLIENT_SCRATCH2_ERROR) {
+ /* PBS error - clear SCRATCH2 register */
+ ret = regmap_write(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, 0);
+ if (ret < 0)
+ goto out;
+ }
+
+ for (bit_pos = 0; bit_pos < 8; bit_pos++) {
+ if (!(bitmap & BIT(bit_pos)))
+ continue;
+
+ /* Clear the PBS sequence bit position */
+ ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2,
+ BIT(bit_pos), 0);
+ if (ret < 0)
+ goto out_clear_scratch1;
+
+ /* Set the PBS sequence bit position */
+ ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH1,
+ BIT(bit_pos), BIT(bit_pos));
+ if (ret < 0)
+ goto out_clear_scratch1;
+
+ /* Initiate the SW trigger */
+ ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_TRIG_CTL,
+ PBS_CLIENT_SW_TRIG_BIT, PBS_CLIENT_SW_TRIG_BIT);
+ if (ret < 0)
+ goto out_clear_scratch1;
+
+ ret = qcom_pbs_wait_for_ack(pbs, bit_pos);
+ if (ret < 0)
+ goto out_clear_scratch1;
+
+ /* Clear the PBS sequence bit position */
+ regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH1, BIT(bit_pos), 0);
+ regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, BIT(bit_pos), 0);
+ }
+
+out_clear_scratch1:
+ /* Clear all the requested bitmap */
+ ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH1, bitmap, 0);
+
+out:
+ mutex_unlock(&pbs->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_pbs_trigger_event);
+
+/**
+ * get_pbs_client_device() - Get the PBS device used by client
+ * @dev: Client device
+ *
+ * This function is used to get the PBS device that is being
+ * used by the client.
+ *
+ * Return: pbs_dev on success, ERR_PTR on failure
+ */
+struct pbs_dev *get_pbs_client_device(struct device *dev)
+{
+ struct device_node *pbs_dev_node;
+ struct platform_device *pdev;
+ struct pbs_dev *pbs;
+
+ pbs_dev_node = of_parse_phandle(dev->of_node, "qcom,pbs", 0);
+ if (!pbs_dev_node) {
+ dev_err(dev, "Missing qcom,pbs property\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_find_device_by_node(pbs_dev_node);
+ if (!pdev) {
+ dev_err(dev, "Unable to find PBS dev_node\n");
+ pbs = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ pbs = platform_get_drvdata(pdev);
+ if (!pbs) {
+ dev_err(dev, "Cannot get pbs instance from %s\n", dev_name(&pdev->dev));
+ platform_device_put(pdev);
+ pbs = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ pbs->link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
+ if (!pbs->link) {
+ dev_err(&pdev->dev, "Failed to create device link to consumer %s\n", dev_name(dev));
+ platform_device_put(pdev);
+ pbs = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+out:
+ of_node_put(pbs_dev_node);
+ return pbs;
+}
+EXPORT_SYMBOL_GPL(get_pbs_client_device);
+
+static int qcom_pbs_probe(struct platform_device *pdev)
+{
+ struct pbs_dev *pbs;
+ u32 val;
+ int ret;
+
+ pbs = devm_kzalloc(&pdev->dev, sizeof(*pbs), GFP_KERNEL);
+ if (!pbs)
+ return -ENOMEM;
+
+ pbs->dev = &pdev->dev;
+ pbs->regmap = dev_get_regmap(pbs->dev->parent, NULL);
+ if (!pbs->regmap) {
+ dev_err(pbs->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ ret = device_property_read_u32(pbs->dev, "reg", &val);
+ if (ret < 0) {
+ dev_err(pbs->dev, "Couldn't find reg, ret = %d\n", ret);
+ return ret;
+ }
+ pbs->base = val;
+ mutex_init(&pbs->lock);
+
+ platform_set_drvdata(pdev, pbs);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_pbs_match_table[] = {
+ { .compatible = "qcom,pbs" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_pbs_match_table);
+
+static struct platform_driver qcom_pbs_driver = {
+ .driver = {
+ .name = "qcom-pbs",
+ .of_match_table = qcom_pbs_match_table,
+ },
+ .probe = qcom_pbs_probe,
+};
+module_platform_driver(qcom_pbs_driver)
+
+MODULE_DESCRIPTION("QCOM PBS DRIVER");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index aff0cfb71482..ca2f6b7629ce 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -3,6 +3,7 @@
* Copyright (c) 2019, Linaro Ltd
*/
#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mailbox_client.h>
@@ -13,6 +14,9 @@
#include <linux/slab.h>
#include <linux/soc/qcom/qcom_aoss.h>
+#define CREATE_TRACE_POINTS
+#include "trace-aoss.h"
+
#define QMP_DESC_MAGIC 0x0
#define QMP_DESC_VERSION 0x4
#define QMP_DESC_FEATURES 0x8
@@ -44,6 +48,8 @@
#define QMP_NUM_COOLING_RESOURCES 2
+#define QMP_DEBUGFS_FILES 4
+
static bool qmp_cdev_max_state = 1;
struct qmp_cooling_device {
@@ -65,6 +71,8 @@ struct qmp_cooling_device {
* @tx_lock: provides synchronization between multiple callers of qmp_send()
* @qdss_clk: QDSS clock hw struct
* @cooling_devs: thermal cooling devices
+ * @debugfs_root: directory for the developer/tester interface
+ * @debugfs_files: array of individual debugfs entries under debugfs_root
*/
struct qmp {
void __iomem *msgram;
@@ -82,6 +90,8 @@ struct qmp {
struct clk_hw qdss_clk;
struct qmp_cooling_device *cooling_devs;
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_files[QMP_DEBUGFS_FILES];
};
static void qmp_kick(struct qmp *qmp)
@@ -214,7 +224,7 @@ static bool qmp_message_empty(struct qmp *qmp)
*
* Return: 0 on success, negative errno on failure
*/
-int qmp_send(struct qmp *qmp, const char *fmt, ...)
+int __printf(2, 3) qmp_send(struct qmp *qmp, const char *fmt, ...)
{
char buf[QMP_MSG_LEN];
long time_left;
@@ -235,6 +245,8 @@ int qmp_send(struct qmp *qmp, const char *fmt, ...)
mutex_lock(&qmp->tx_lock);
+ trace_aoss_send(buf);
+
/* The message RAM only implements 32-bit accesses */
__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
buf, sizeof(buf) / sizeof(u32));
@@ -256,6 +268,8 @@ int qmp_send(struct qmp *qmp, const char *fmt, ...)
ret = 0;
}
+ trace_aoss_send_done(buf, ret);
+
mutex_unlock(&qmp->tx_lock);
return ret;
@@ -475,6 +489,91 @@ void qmp_put(struct qmp *qmp)
}
EXPORT_SYMBOL_GPL(qmp_put);
+struct qmp_debugfs_entry {
+ const char *name;
+ const char *fmt;
+ bool is_bool;
+ const char *true_val;
+ const char *false_val;
+};
+
+static const struct qmp_debugfs_entry qmp_debugfs_entries[QMP_DEBUGFS_FILES] = {
+ { "ddr_frequency_mhz", "{class: ddr, res: fixed, val: %u}", false },
+ { "prevent_aoss_sleep", "{class: aoss_slp, res: sleep: %s}", true, "enable", "disable" },
+ { "prevent_cx_collapse", "{class: cx_mol, res: cx, val: %s}", true, "mol", "off" },
+ { "prevent_ddr_collapse", "{class: ddr_mol, res: ddr, val: %s}", true, "mol", "off" },
+};
+
+static ssize_t qmp_debugfs_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *pos)
+{
+ const struct qmp_debugfs_entry *entry = NULL;
+ struct qmp *qmp = file->private_data;
+ char buf[QMP_MSG_LEN];
+ unsigned int uint_val;
+ const char *str_val;
+ bool bool_val;
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) {
+ if (qmp->debugfs_files[i] == file->f_path.dentry) {
+ entry = &qmp_debugfs_entries[i];
+ break;
+ }
+ }
+ if (WARN_ON(!entry))
+ return -EFAULT;
+
+ if (entry->is_bool) {
+ ret = kstrtobool_from_user(user_buf, count, &bool_val);
+ if (ret)
+ return ret;
+
+ str_val = bool_val ? entry->true_val : entry->false_val;
+
+ ret = snprintf(buf, sizeof(buf), entry->fmt, str_val);
+ if (ret >= sizeof(buf))
+ return -EINVAL;
+ } else {
+ ret = kstrtou32_from_user(user_buf, count, 0, &uint_val);
+ if (ret)
+ return ret;
+
+ ret = snprintf(buf, sizeof(buf), entry->fmt, uint_val);
+ if (ret >= sizeof(buf))
+ return -EINVAL;
+ }
+
+ ret = qmp_send(qmp, buf);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations qmp_debugfs_fops = {
+ .open = simple_open,
+ .write = qmp_debugfs_write,
+};
+
+static void qmp_debugfs_create(struct qmp *qmp)
+{
+ const struct qmp_debugfs_entry *entry;
+ int i;
+
+ qmp->debugfs_root = debugfs_create_dir("qcom_aoss", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) {
+ entry = &qmp_debugfs_entries[i];
+
+ qmp->debugfs_files[i] = debugfs_create_file(entry->name, 0200,
+ qmp->debugfs_root,
+ qmp,
+ &qmp_debugfs_fops);
+ }
+}
+
static int qmp_probe(struct platform_device *pdev)
{
struct qmp *qmp;
@@ -523,6 +622,8 @@ static int qmp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qmp);
+ qmp_debugfs_create(qmp);
+
return 0;
err_close_qmp:
@@ -537,6 +638,8 @@ static void qmp_remove(struct platform_device *pdev)
{
struct qmp *qmp = platform_get_drvdata(pdev);
+ debugfs_remove_recursive(qmp->debugfs_root);
+
qmp_qdss_clk_remove(qmp);
qmp_cooling_devices_remove(qmp);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 690afc9a12f4..7191fa0c087f 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -655,8 +655,6 @@ invalid_canary:
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
struct smem_partition *part;
- unsigned long flags;
- int ret;
void *ptr = ERR_PTR(-EPROBE_DEFER);
if (!__smem)
@@ -665,12 +663,6 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
if (WARN_ON(item >= __smem->item_count))
return ERR_PTR(-EINVAL);
- ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
- HWSPINLOCK_TIMEOUT,
- &flags);
- if (ret)
- return ERR_PTR(ret);
-
if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
part = &__smem->partitions[host];
ptr = qcom_smem_get_private(__smem, part, item, size);
@@ -681,10 +673,7 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
ptr = qcom_smem_get_global(__smem, item, size);
}
- hwspin_unlock_irqrestore(__smem->hwlock, &flags);
-
return ptr;
-
}
EXPORT_SYMBOL_GPL(qcom_smem_get);
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 914b2246148f..a21241cbeec7 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -58,8 +58,8 @@
* @valid_entries: number of allocated entries
* @flags:
* @entries: individual communication entries
- * @name: name of the entry
- * @value: content of the entry
+ * @entries.name: name of the entry
+ * @entries.value: content of the entry
*/
struct smp2p_smem_item {
u32 magic;
@@ -275,6 +275,8 @@ static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
*
* Handle notifications from the remote side to handle newly allocated entries
* or any changes to the state bits of existing entries.
+ *
+ * Return: %IRQ_HANDLED
*/
static irqreturn_t qcom_smp2p_intr(int irq, void *data)
{
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 6349a0debeb5..e8ff9819ac47 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -124,7 +124,7 @@ static const char *const pmic_models[] = {
[50] = "PM8350B",
[51] = "PMR735A",
[52] = "PMR735B",
- [55] = "PM2250",
+ [55] = "PM4125",
[58] = "PM8450",
[65] = "PM8010",
[69] = "PM8550VS",
@@ -424,8 +424,11 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(IPQ9510) },
{ qcom_board_id(QRB4210) },
{ qcom_board_id(QRB2210) },
+ { qcom_board_id(SM8475) },
+ { qcom_board_id(SM8475P) },
{ qcom_board_id(SA8775P) },
{ qcom_board_id(QRU1000) },
+ { qcom_board_id(SM8475_2) },
{ qcom_board_id(QDU1000) },
{ qcom_board_id(SM8650) },
{ qcom_board_id(SM4450) },
@@ -437,6 +440,8 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(IPQ5322) },
{ qcom_board_id(IPQ5312) },
{ qcom_board_id(IPQ5302) },
+ { qcom_board_id(QCS8550) },
+ { qcom_board_id(QCM8550) },
{ qcom_board_id(IPQ5300) },
};
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
index 2f0b1bfe7658..06e2c4c2a4a8 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/soc/qcom/spm.c
@@ -6,20 +6,40 @@
* SAW power controller driver
*/
-#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/linear_range.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+
+#include <linux/regulator/driver.h>
+
#include <soc/qcom/spm.h>
+#define FIELD_SET(current, mask, val) \
+ (((current) & ~(mask)) | FIELD_PREP((mask), (val)))
+
#define SPM_CTL_INDEX 0x7f
#define SPM_CTL_INDEX_SHIFT 4
#define SPM_CTL_EN BIT(0)
+/* These registers might be specific to SPM 1.1 */
+#define SPM_VCTL_VLVL GENMASK(7, 0)
+#define SPM_PMIC_DATA_0_VLVL GENMASK(7, 0)
+#define SPM_PMIC_DATA_1_MIN_VSEL GENMASK(5, 0)
+#define SPM_PMIC_DATA_1_MAX_VSEL GENMASK(21, 16)
+
+#define SPM_1_1_AVS_CTL_AVS_ENABLED BIT(27)
+#define SPM_AVS_CTL_MAX_VLVL GENMASK(22, 17)
+#define SPM_AVS_CTL_MIN_VLVL GENMASK(15, 10)
+
enum spm_reg {
SPM_REG_CFG,
SPM_REG_SPM_CTL,
@@ -29,13 +49,44 @@ enum spm_reg {
SPM_REG_PMIC_DATA_1,
SPM_REG_VCTL,
SPM_REG_SEQ_ENTRY,
- SPM_REG_SPM_STS,
+ SPM_REG_STS0,
+ SPM_REG_STS1,
SPM_REG_PMIC_STS,
SPM_REG_AVS_CTL,
SPM_REG_AVS_LIMIT,
+ SPM_REG_RST,
SPM_REG_NR,
};
+#define MAX_PMIC_DATA 2
+#define MAX_SEQ_DATA 64
+
+struct spm_reg_data {
+ const u16 *reg_offset;
+ u32 spm_cfg;
+ u32 spm_dly;
+ u32 pmic_dly;
+ u32 pmic_data[MAX_PMIC_DATA];
+ u32 avs_ctl;
+ u32 avs_limit;
+ u8 seq[MAX_SEQ_DATA];
+ u8 start_index[PM_SLEEP_MODE_NR];
+
+ smp_call_func_t set_vdd;
+ /* for now we support only a single range */
+ struct linear_range *range;
+ unsigned int ramp_delay;
+ unsigned int init_uV;
+};
+
+struct spm_driver_data {
+ void __iomem *reg_base;
+ const struct spm_reg_data *reg_data;
+ struct device *dev;
+ unsigned int volt_sel;
+ int reg_cpu;
+};
+
static const u16 spm_reg_offset_v4_1[SPM_REG_NR] = {
[SPM_REG_AVS_CTL] = 0x904,
[SPM_REG_AVS_LIMIT] = 0x908,
@@ -169,6 +220,10 @@ static const struct spm_reg_data spm_reg_8226_cpu = {
static const u16 spm_reg_offset_v1_1[SPM_REG_NR] = {
[SPM_REG_CFG] = 0x08,
+ [SPM_REG_STS0] = 0x0c,
+ [SPM_REG_STS1] = 0x10,
+ [SPM_REG_VCTL] = 0x14,
+ [SPM_REG_AVS_CTL] = 0x18,
[SPM_REG_SPM_CTL] = 0x20,
[SPM_REG_PMIC_DLY] = 0x24,
[SPM_REG_PMIC_DATA_0] = 0x28,
@@ -176,7 +231,12 @@ static const u16 spm_reg_offset_v1_1[SPM_REG_NR] = {
[SPM_REG_SEQ_ENTRY] = 0x80,
};
+static void smp_set_vdd_v1_1(void *data);
+
/* SPM register data for 8064 */
+static struct linear_range spm_v1_1_regulator_range =
+ REGULATOR_LINEAR_RANGE(700000, 0, 56, 12500);
+
static const struct spm_reg_data spm_reg_8064_cpu = {
.reg_offset = spm_reg_offset_v1_1,
.spm_cfg = 0x1F,
@@ -187,6 +247,10 @@ static const struct spm_reg_data spm_reg_8064_cpu = {
0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
.start_index[PM_SLEEP_MODE_STBY] = 0,
.start_index[PM_SLEEP_MODE_SPC] = 2,
+ .set_vdd = smp_set_vdd_v1_1,
+ .range = &spm_v1_1_regulator_range,
+ .init_uV = 1300000,
+ .ramp_delay = 1250,
};
static inline void spm_register_write(struct spm_driver_data *drv,
@@ -238,6 +302,178 @@ void spm_set_low_power_mode(struct spm_driver_data *drv,
spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
}
+static int spm_set_voltage_sel(struct regulator_dev *rdev, unsigned int selector)
+{
+ struct spm_driver_data *drv = rdev_get_drvdata(rdev);
+
+ drv->volt_sel = selector;
+
+ /* Always do the SAW register writes on the corresponding CPU */
+ return smp_call_function_single(drv->reg_cpu, drv->reg_data->set_vdd, drv, true);
+}
+
+static int spm_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct spm_driver_data *drv = rdev_get_drvdata(rdev);
+
+ return drv->volt_sel;
+}
+
+static const struct regulator_ops spm_reg_ops = {
+ .set_voltage_sel = spm_set_voltage_sel,
+ .get_voltage_sel = spm_get_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static void smp_set_vdd_v1_1(void *data)
+{
+ struct spm_driver_data *drv = data;
+ unsigned int vctl, data0, data1, avs_ctl, sts;
+ unsigned int vlevel, volt_sel;
+ bool avs_enabled;
+
+ volt_sel = drv->volt_sel;
+ vlevel = volt_sel | 0x80; /* band */
+
+ avs_ctl = spm_register_read(drv, SPM_REG_AVS_CTL);
+ vctl = spm_register_read(drv, SPM_REG_VCTL);
+ data0 = spm_register_read(drv, SPM_REG_PMIC_DATA_0);
+ data1 = spm_register_read(drv, SPM_REG_PMIC_DATA_1);
+
+ avs_enabled = avs_ctl & SPM_1_1_AVS_CTL_AVS_ENABLED;
+
+ /* If AVS is enabled, switch it off during the voltage change */
+ if (avs_enabled) {
+ avs_ctl &= ~SPM_1_1_AVS_CTL_AVS_ENABLED;
+ spm_register_write(drv, SPM_REG_AVS_CTL, avs_ctl);
+ }
+
+ /* Kick the state machine back to idle */
+ spm_register_write(drv, SPM_REG_RST, 1);
+
+ vctl = FIELD_SET(vctl, SPM_VCTL_VLVL, vlevel);
+ data0 = FIELD_SET(data0, SPM_PMIC_DATA_0_VLVL, vlevel);
+ data1 = FIELD_SET(data1, SPM_PMIC_DATA_1_MIN_VSEL, volt_sel);
+ data1 = FIELD_SET(data1, SPM_PMIC_DATA_1_MAX_VSEL, volt_sel);
+
+ spm_register_write(drv, SPM_REG_VCTL, vctl);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_0, data0);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_1, data1);
+
+ if (read_poll_timeout_atomic(spm_register_read,
+ sts, sts == vlevel,
+ 1, 200, false,
+ drv, SPM_REG_STS1)) {
+ dev_err_ratelimited(drv->dev, "timeout setting the voltage (%x %x)!\n", sts, vlevel);
+ goto enable_avs;
+ }
+
+ if (avs_enabled) {
+ unsigned int max_avs = volt_sel;
+ unsigned int min_avs = max(max_avs, 4U) - 4;
+
+ avs_ctl = FIELD_SET(avs_ctl, SPM_AVS_CTL_MIN_VLVL, min_avs);
+ avs_ctl = FIELD_SET(avs_ctl, SPM_AVS_CTL_MAX_VLVL, max_avs);
+ spm_register_write(drv, SPM_REG_AVS_CTL, avs_ctl);
+ }
+
+enable_avs:
+ if (avs_enabled) {
+ avs_ctl |= SPM_1_1_AVS_CTL_AVS_ENABLED;
+ spm_register_write(drv, SPM_REG_AVS_CTL, avs_ctl);
+ }
+}
+
+static int spm_get_cpu(struct device *dev)
+{
+ int cpu;
+ bool found;
+
+ for_each_possible_cpu(cpu) {
+ struct device_node *cpu_node, *saw_node;
+
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ continue;
+
+ saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ found = (saw_node == dev->of_node);
+ of_node_put(saw_node);
+ of_node_put(cpu_node);
+
+ if (found)
+ return cpu;
+ }
+
+ /* L2 SPM is not bound to any CPU, voltage setting is not supported */
+
+ return -EOPNOTSUPP;
+}
+
+static int spm_register_regulator(struct device *dev, struct spm_driver_data *drv)
+{
+ struct regulator_config config = {
+ .dev = dev,
+ .driver_data = drv,
+ };
+ struct regulator_desc *rdesc;
+ struct regulator_dev *rdev;
+ int ret;
+ bool found;
+
+ if (!drv->reg_data->set_vdd)
+ return 0;
+
+ rdesc = devm_kzalloc(dev, sizeof(*rdesc), GFP_KERNEL);
+ if (!rdesc)
+ return -ENOMEM;
+
+ rdesc->name = "spm";
+ rdesc->of_match = of_match_ptr("regulator");
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->owner = THIS_MODULE;
+ rdesc->ops = &spm_reg_ops;
+
+ rdesc->linear_ranges = drv->reg_data->range;
+ rdesc->n_linear_ranges = 1;
+ rdesc->n_voltages = rdesc->linear_ranges[rdesc->n_linear_ranges - 1].max_sel + 1;
+ rdesc->ramp_delay = drv->reg_data->ramp_delay;
+
+ ret = spm_get_cpu(dev);
+ if (ret < 0)
+ return ret;
+
+ drv->reg_cpu = ret;
+ dev_dbg(dev, "SAW2 bound to CPU %d\n", drv->reg_cpu);
+
+ /*
+ * Program initial voltage, otherwise registration will also try
+ * setting the voltage, which might result in undervolting the CPU.
+ */
+ drv->volt_sel = DIV_ROUND_UP(drv->reg_data->init_uV - rdesc->min_uV,
+ rdesc->uV_step);
+ ret = linear_range_get_selector_high(drv->reg_data->range,
+ drv->reg_data->init_uV,
+ &drv->volt_sel,
+ &found);
+ if (ret) {
+ dev_err(dev, "Initial uV value out of bounds\n");
+ return ret;
+ }
+
+ /* Always do the SAW register writes on the corresponding CPU */
+ smp_call_function_single(drv->reg_cpu, drv->reg_data->set_vdd, drv, true);
+
+ rdev = devm_regulator_register(dev, rdesc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "failed to register regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
static const struct of_device_id spm_match_table[] = {
{ .compatible = "qcom,sdm660-gold-saw2-v4.1-l2",
.data = &spm_reg_660_gold_l2 },
@@ -288,6 +524,7 @@ static int spm_dev_probe(struct platform_device *pdev)
return -ENODEV;
drv->reg_data = match_id->data;
+ drv->dev = &pdev->dev;
platform_set_drvdata(pdev, drv);
/* Write the SPM sequences first.. */
@@ -315,6 +552,9 @@ static int spm_dev_probe(struct platform_device *pdev)
if (drv->reg_data->reg_offset[SPM_REG_SPM_CTL])
spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+ if (IS_ENABLED(CONFIG_REGULATOR))
+ return spm_register_regulator(&pdev->dev, drv);
+
return 0;
}
diff --git a/drivers/soc/qcom/trace-aoss.h b/drivers/soc/qcom/trace-aoss.h
new file mode 100644
index 000000000000..554029b33b44
--- /dev/null
+++ b/drivers/soc/qcom/trace-aoss.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM qcom_aoss
+
+#if !defined(_TRACE_QCOM_AOSS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_QCOM_AOSS_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(aoss_send,
+ TP_PROTO(const char *msg),
+ TP_ARGS(msg),
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+TRACE_EVENT(aoss_send_done,
+ TP_PROTO(const char *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ __entry->ret = ret;
+ ),
+ TP_printk("%s: %d", __get_str(msg), __entry->ret)
+);
+
+#endif /* _TRACE_QCOM_AOSS_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace-aoss
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 0986672f6375..5deca747fb77 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -34,6 +34,10 @@ config ARCH_RCAR_GEN3
select SYS_SUPPORTS_SH_CMT
select SYS_SUPPORTS_SH_TMU
+config ARCH_RCAR_GEN4
+ bool
+ select ARCH_RCAR_GEN3
+
config ARCH_RMOBILE
bool
select PM
@@ -240,7 +244,7 @@ config ARCH_R8A77961
config ARCH_R8A779F0
bool "ARM64 Platform support for R-Car S4-8"
- select ARCH_RCAR_GEN3
+ select ARCH_RCAR_GEN4
select SYSC_R8A779F0
help
This enables support for the Renesas R-Car S4-8 SoC.
@@ -261,18 +265,25 @@ config ARCH_R8A77970
config ARCH_R8A779A0
bool "ARM64 Platform support for R-Car V3U"
- select ARCH_RCAR_GEN3
+ select ARCH_RCAR_GEN4
select SYSC_R8A779A0
help
This enables support for the Renesas R-Car V3U SoC.
config ARCH_R8A779G0
bool "ARM64 Platform support for R-Car V4H"
- select ARCH_RCAR_GEN3
+ select ARCH_RCAR_GEN4
select SYSC_R8A779G0
help
This enables support for the Renesas R-Car V4H SoC.
+config ARCH_R8A779H0
+ bool "ARM64 Platform support for R-Car V4M"
+ select ARCH_RCAR_GEN4
+ select SYSC_R8A779H0
+ help
+ This enables support for the Renesas R-Car V4M SoC.
+
config ARCH_R8A774C0
bool "ARM64 Platform support for RZ/G2E"
select ARCH_RCAR_GEN3
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 98fd97da6cd4..7ba02f3a4a4f 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -117,6 +117,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
{ .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_v3u },
{ .compatible = "renesas,r8a779f0-rst", .data = &rcar_rst_gen4 },
{ .compatible = "renesas,r8a779g0-rst", .data = &rcar_rst_gen4 },
+ { .compatible = "renesas,r8a779h0-rst", .data = &rcar_rst_gen4 },
{ /* sentinel */ }
};
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 27eae1a354ab..8f9b8d3736dc 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -270,6 +270,11 @@ static const struct renesas_soc soc_rcar_v4h __initconst __maybe_unused = {
.id = 0x5c,
};
+static const struct renesas_soc soc_rcar_v4m __initconst __maybe_unused = {
+ .family = &fam_rcar_gen4,
+ .id = 0x5d,
+};
+
static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.family = &fam_shmobile,
.id = 0x37,
@@ -380,6 +385,9 @@ static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R8A779G0
{ .compatible = "renesas,r8a779g0", .data = &soc_rcar_v4h },
#endif
+#ifdef CONFIG_ARCH_R8A779H0
+ { .compatible = "renesas,r8a779h0", .data = &soc_rcar_v4m },
+#endif
#ifdef CONFIG_ARCH_R9A07G043
#ifdef CONFIG_RISCV
{ .compatible = "renesas,r9a07g043", .data = &soc_rz_five },
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 27ec99af77e3..1a5dfdc978dc 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -42,6 +42,7 @@ config EXYNOS_PMU
depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST)
select EXYNOS_PMU_ARM_DRIVERS if ARM && ARCH_EXYNOS
select MFD_CORE
+ select REGMAP_MMIO
# There is no need to enable these drivers for ARMv8
config EXYNOS_PMU_ARM_DRIVERS
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index 250537d7cfd6..fd8b6ac06656 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos - CPU PMU(Power Management Unit) support
+#include <linux/arm-smccc.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/mfd/core.h>
@@ -12,19 +13,134 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/regmap.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
#include "exynos-pmu.h"
+#define PMUALIVE_MASK GENMASK(13, 0)
+#define TENSOR_SET_BITS (BIT(15) | BIT(14))
+#define TENSOR_CLR_BITS BIT(15)
+#define TENSOR_SMC_PMU_SEC_REG 0x82000504
+#define TENSOR_PMUREG_READ 0
+#define TENSOR_PMUREG_WRITE 1
+#define TENSOR_PMUREG_RMW 2
+
struct exynos_pmu_context {
struct device *dev;
const struct exynos_pmu_data *pmu_data;
+ struct regmap *pmureg;
};
void __iomem *pmu_base_addr;
static struct exynos_pmu_context *pmu_context;
+/* forward declaration */
+static struct platform_driver exynos_pmu_driver;
+
+/*
+ * Tensor SoCs are configured so that PMU_ALIVE registers can only be written
+ * from EL3, but are still read accessible. As Linux needs to write some of
+ * these registers, the following functions are provided and exposed via
+ * regmap.
+ *
+ * Note: This SMC interface is known to be implemented on gs101 and derivative
+ * SoCs.
+ */
+
+/* Write to a protected PMU register. */
+static int tensor_sec_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct arm_smccc_res res;
+ unsigned long pmu_base = (unsigned long)context;
+
+ arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
+ TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res);
+
+ /* returns -EINVAL if access isn't allowed or 0 */
+ if (res.a0)
+ pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
+
+ return (int)res.a0;
+}
+
+/* Read/Modify/Write a protected PMU register. */
+static int tensor_sec_reg_rmw(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ struct arm_smccc_res res;
+ unsigned long pmu_base = (unsigned long)context;
+
+ arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
+ TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res);
+
+ /* returns -EINVAL if access isn't allowed or 0 */
+ if (res.a0)
+ pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
+
+ return (int)res.a0;
+}
+
+/*
+ * Read a protected PMU register. All PMU registers can be read by Linux.
+ * Note: The SMC read register is not used, as only registers that can be
+ * written are readable via SMC.
+ */
+static int tensor_sec_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ *val = pmu_raw_readl(reg);
+ return 0;
+}
+
+/*
+ * For SoCs that have set/clear bit hardware this function can be used when
+ * the PMU register will be accessed by multiple masters.
+ *
+ * For example, to set bits 13:8 in PMU reg offset 0x3e80
+ * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00);
+ *
+ * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80
+ * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00);
+ */
+static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
+ u32 mask)
+{
+ int ret;
+ unsigned int i;
+
+ for (i = 0; i < 32; i++) {
+ if (!(mask & BIT(i)))
+ continue;
+
+ offset &= ~TENSOR_SET_BITS;
+
+ if (val & BIT(i))
+ offset |= TENSOR_SET_BITS;
+ else
+ offset |= TENSOR_CLR_BITS;
+
+ ret = tensor_sec_reg_write(ctx, offset, i);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static int tensor_sec_update_bits(void *ctx, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ /*
+ * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF)
+ * as the target registers can be accessed by multiple masters.
+ */
+ if (reg > PMUALIVE_MASK)
+ return tensor_sec_reg_rmw(ctx, reg, mask, val);
+
+ return tensor_set_bits_atomic(ctx, reg, val, mask);
+}
void pmu_raw_writel(u32 val, u32 offset)
{
@@ -75,11 +191,41 @@ void exynos_sys_powerdown_conf(enum sys_powerdown mode)
#define exynos_pmu_data_arm_ptr(data) NULL
#endif
+static const struct regmap_config regmap_smccfg = {
+ .name = "pmu_regs",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .use_single_read = true,
+ .use_single_write = true,
+ .reg_read = tensor_sec_reg_read,
+ .reg_write = tensor_sec_reg_write,
+ .reg_update_bits = tensor_sec_update_bits,
+};
+
+static const struct regmap_config regmap_mmiocfg = {
+ .name = "pmu_regs",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static const struct exynos_pmu_data gs101_pmu_data = {
+ .pmu_secure = true
+};
+
/*
* PMU platform driver and devicetree bindings.
*/
static const struct of_device_id exynos_pmu_of_device_ids[] = {
{
+ .compatible = "google,gs101-pmu",
+ .data = &gs101_pmu_data,
+ }, {
.compatible = "samsung,exynos3250-pmu",
.data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data),
}, {
@@ -113,19 +259,75 @@ static const struct mfd_cell exynos_pmu_devs[] = {
{ .name = "exynos-clkout", },
};
+/**
+ * exynos_get_pmu_regmap() - Obtain pmureg regmap
+ *
+ * Find the pmureg regmap previously configured in probe() and return regmap
+ * pointer.
+ *
+ * Return: A pointer to regmap if found or ERR_PTR error value.
+ */
struct regmap *exynos_get_pmu_regmap(void)
{
struct device_node *np = of_find_matching_node(NULL,
exynos_pmu_of_device_ids);
if (np)
- return syscon_node_to_regmap(np);
+ return exynos_get_pmu_regmap_by_phandle(np, NULL);
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
+/**
+ * exynos_get_pmu_regmap_by_phandle() - Obtain pmureg regmap via phandle
+ * @np: Device node holding PMU phandle property
+ * @propname: Name of property holding phandle value
+ *
+ * Find the pmureg regmap previously configured in probe() and return regmap
+ * pointer.
+ *
+ * Return: A pointer to regmap if found or ERR_PTR error value.
+ */
+struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
+ const char *propname)
+{
+ struct exynos_pmu_context *ctx;
+ struct device_node *pmu_np;
+ struct device *dev;
+
+ if (propname)
+ pmu_np = of_parse_phandle(np, propname, 0);
+ else
+ pmu_np = np;
+
+ if (!pmu_np)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * Determine if exynos-pmu device has probed and therefore regmap
+ * has been created and can be returned to the caller. Otherwise we
+ * return -EPROBE_DEFER.
+ */
+ dev = driver_find_device_by_of_node(&exynos_pmu_driver.driver,
+ (void *)pmu_np);
+
+ if (propname)
+ of_node_put(pmu_np);
+
+ if (!dev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ ctx = dev_get_drvdata(dev);
+
+ return ctx->pmureg;
+}
+EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
+
static int exynos_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct regmap_config pmu_regmcfg;
+ struct regmap *regmap;
+ struct resource *res;
int ret;
pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
@@ -137,9 +339,38 @@ static int exynos_pmu_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!pmu_context)
return -ENOMEM;
- pmu_context->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
pmu_context->pmu_data = of_device_get_match_data(dev);
+ /* For SoCs that secure PMU register writes use custom regmap */
+ if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_secure) {
+ pmu_regmcfg = regmap_smccfg;
+ pmu_regmcfg.max_register = resource_size(res) -
+ pmu_regmcfg.reg_stride;
+ /* Need physical address for SMC call */
+ regmap = devm_regmap_init(dev, NULL,
+ (void *)(uintptr_t)res->start,
+ &pmu_regmcfg);
+ } else {
+ /* All other SoCs use a MMIO regmap */
+ pmu_regmcfg = regmap_mmiocfg;
+ pmu_regmcfg.max_register = resource_size(res) -
+ pmu_regmcfg.reg_stride;
+ regmap = devm_regmap_init_mmio(dev, pmu_base_addr,
+ &pmu_regmcfg);
+ }
+
+ if (IS_ERR(regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "regmap init failed\n");
+
+ pmu_context->pmureg = regmap;
+ pmu_context->dev = dev;
+
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
pmu_context->pmu_data->pmu_init();
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
index 1c652ffd79b4..0a49a2c9a08e 100644
--- a/drivers/soc/samsung/exynos-pmu.h
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -21,6 +21,7 @@ struct exynos_pmu_conf {
struct exynos_pmu_data {
const struct exynos_pmu_conf *pmu_config;
const struct exynos_pmu_conf *pmu_config_extra;
+ bool pmu_secure;
void (*pmu_init)(void);
void (*powerdown_conf)(enum sys_powerdown);
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index f16beeabaa92..33512558af9f 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -133,6 +133,11 @@ config ARCH_TEGRA_234_SOC
help
Enable support for the NVIDIA Tegra234 SoC.
+config ARCH_TEGRA_241_SOC
+ bool "NVIDIA Tegra241 SoC"
+ help
+ Enable support for the NVIDIA Tegra241 SoC.
+
endif
endif
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index a2c28f493a75..b6bfd6729df3 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -3,11 +3,13 @@
* Copyright (c) 2013-2023, NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
@@ -113,6 +115,28 @@ static void tegra_fuse_restore(void *base)
fuse->clk = NULL;
}
+static void tegra_fuse_print_sku_info(struct tegra_sku_info *tegra_sku_info)
+{
+ pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n",
+ tegra_revision_name[tegra_sku_info->revision],
+ tegra_sku_info->sku_id, tegra_sku_info->cpu_process_id,
+ tegra_sku_info->soc_process_id);
+ pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n",
+ tegra_sku_info->cpu_speedo_id, tegra_sku_info->soc_speedo_id);
+}
+
+static int tegra_fuse_add_lookups(struct tegra_fuse *fuse)
+{
+ fuse->lookups = kmemdup_array(fuse->soc->lookups, sizeof(*fuse->lookups),
+ fuse->soc->num_lookups, GFP_KERNEL);
+ if (!fuse->lookups)
+ return -ENOMEM;
+
+ nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups);
+
+ return 0;
+}
+
static int tegra_fuse_probe(struct platform_device *pdev)
{
void __iomem *base = fuse->base;
@@ -130,15 +154,46 @@ static int tegra_fuse_probe(struct platform_device *pdev)
return PTR_ERR(fuse->base);
fuse->phys = res->start;
- fuse->clk = devm_clk_get(&pdev->dev, "fuse");
- if (IS_ERR(fuse->clk)) {
- if (PTR_ERR(fuse->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
- PTR_ERR(fuse->clk));
+ /* Initialize the soc data and lookups if using ACPI boot. */
+ if (is_acpi_node(dev_fwnode(&pdev->dev)) && !fuse->soc) {
+ u8 chip;
- return PTR_ERR(fuse->clk);
+ tegra_acpi_init_apbmisc();
+
+ chip = tegra_get_chip_id();
+ switch (chip) {
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+ case TEGRA194:
+ fuse->soc = &tegra194_fuse_soc;
+ break;
+#endif
+#if defined(CONFIG_ARCH_TEGRA_234_SOC)
+ case TEGRA234:
+ fuse->soc = &tegra234_fuse_soc;
+ break;
+#endif
+#if defined(CONFIG_ARCH_TEGRA_241_SOC)
+ case TEGRA241:
+ fuse->soc = &tegra241_fuse_soc;
+ break;
+#endif
+ default:
+ return dev_err_probe(&pdev->dev, -EINVAL, "Unsupported SoC: %02x\n", chip);
+ }
+
+ fuse->soc->init(fuse);
+ tegra_fuse_print_sku_info(&tegra_sku_info);
+ tegra_soc_device_register();
+
+ err = tegra_fuse_add_lookups(fuse);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "failed to add FUSE lookups\n");
}
+ fuse->clk = devm_clk_get_optional(&pdev->dev, "fuse");
+ if (IS_ERR(fuse->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(fuse->clk), "failed to get FUSE clock\n");
+
platform_set_drvdata(pdev, fuse);
fuse->dev = &pdev->dev;
@@ -179,12 +234,8 @@ static int tegra_fuse_probe(struct platform_device *pdev)
}
fuse->rst = devm_reset_control_get_optional(&pdev->dev, "fuse");
- if (IS_ERR(fuse->rst)) {
- err = PTR_ERR(fuse->rst);
- dev_err(&pdev->dev, "failed to get FUSE reset: %pe\n",
- fuse->rst);
- return err;
- }
+ if (IS_ERR(fuse->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(fuse->rst), "failed to get FUSE reset\n");
/*
* FUSE clock is enabled at a boot time, hence this resume/suspend
@@ -262,10 +313,17 @@ static const struct dev_pm_ops tegra_fuse_pm = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_fuse_suspend, tegra_fuse_resume)
};
+static const struct acpi_device_id tegra_fuse_acpi_match[] = {
+ { "NVDA200F" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, tegra_fuse_acpi_match);
+
static struct platform_driver tegra_fuse_driver = {
.driver = {
.name = "tegra-fuse",
.of_match_table = tegra_fuse_match,
+ .acpi_match_table = tegra_fuse_acpi_match,
.pm = &tegra_fuse_pm,
.suppress_bind_attrs = true,
},
@@ -287,7 +345,16 @@ u32 __init tegra_fuse_read_early(unsigned int offset)
int tegra_fuse_readl(unsigned long offset, u32 *value)
{
- if (!fuse->read || !fuse->clk)
+ if (!fuse->dev)
+ return -EPROBE_DEFER;
+
+ /*
+ * Wait for fuse->clk to be initialized if device-tree boot is used.
+ */
+ if (is_of_node(dev_fwnode(fuse->dev)) && !fuse->clk)
+ return -EPROBE_DEFER;
+
+ if (!fuse->read)
return -EPROBE_DEFER;
if (IS_ERR(fuse->clk))
@@ -343,7 +410,8 @@ const struct attribute_group tegra_soc_attr_group = {
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_241_SOC)
static ssize_t platform_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -370,7 +438,7 @@ const struct attribute_group tegra194_soc_attr_group = {
};
#endif
-struct device * __init tegra_soc_device_register(void)
+struct device *tegra_soc_device_register(void)
{
struct soc_device_attribute *attr;
struct soc_device *dev;
@@ -407,6 +475,7 @@ static int __init tegra_init_fuse(void)
const struct of_device_id *match;
struct device_node *np;
struct resource regs;
+ int err;
tegra_init_apbmisc();
@@ -497,22 +566,13 @@ static int __init tegra_init_fuse(void)
fuse->soc->init(fuse);
- pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n",
- tegra_revision_name[tegra_sku_info.revision],
- tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id,
- tegra_sku_info.soc_process_id);
- pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n",
- tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
+ tegra_fuse_print_sku_info(&tegra_sku_info);
- if (fuse->soc->lookups) {
- size_t size = sizeof(*fuse->lookups) * fuse->soc->num_lookups;
-
- fuse->lookups = kmemdup(fuse->soc->lookups, size, GFP_KERNEL);
- if (fuse->lookups)
- nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups);
- }
+ err = tegra_fuse_add_lookups(fuse);
+ if (err)
+ pr_err("failed to add FUSE lookups\n");
- return 0;
+ return err;
}
early_initcall(tegra_init_fuse);
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index e94d46372a63..eb14e5ff5a0a 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -38,7 +38,8 @@
defined(CONFIG_ARCH_TEGRA_210_SOC) || \
defined(CONFIG_ARCH_TEGRA_186_SOC) || \
defined(CONFIG_ARCH_TEGRA_194_SOC) || \
- defined(CONFIG_ARCH_TEGRA_234_SOC)
+ defined(CONFIG_ARCH_TEGRA_234_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_241_SOC)
static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
{
if (WARN_ON(!fuse->base))
@@ -678,3 +679,23 @@ const struct tegra_fuse_soc tegra234_fuse_soc = {
.clk_suspend_on = false,
};
#endif
+
+#if defined(CONFIG_ARCH_TEGRA_241_SOC)
+static const struct tegra_fuse_info tegra241_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x16008,
+ .spare = 0xcf0,
+};
+
+static const struct nvmem_keepout tegra241_fuse_keepouts[] = {
+ { .start = 0xc, .end = 0x1600c }
+};
+
+const struct tegra_fuse_soc tegra241_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .info = &tegra241_fuse_info,
+ .keepouts = tegra241_fuse_keepouts,
+ .num_keepouts = ARRAY_SIZE(tegra241_fuse_keepouts),
+ .soc_attr_group = &tegra194_soc_attr_group,
+};
+#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index 90f23be73894..9fee6ad6ad9e 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -69,6 +69,7 @@ struct tegra_fuse {
void tegra_init_revision(void);
void tegra_init_apbmisc(void);
+void tegra_acpi_init_apbmisc(void);
u32 __init tegra_fuse_read_spare(unsigned int spare);
u32 __init tegra_fuse_read_early(unsigned int offset);
@@ -123,7 +124,8 @@ extern const struct tegra_fuse_soc tegra186_fuse_soc;
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_241_SOC)
extern const struct attribute_group tegra194_soc_attr_group;
#endif
@@ -135,4 +137,8 @@ extern const struct tegra_fuse_soc tegra194_fuse_soc;
extern const struct tegra_fuse_soc tegra234_fuse_soc;
#endif
+#ifdef CONFIG_ARCH_TEGRA_241_SOC
+extern const struct tegra_fuse_soc tegra241_fuse_soc;
+#endif
+
#endif
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index da970f3dbf35..e2ca5d55fd31 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -3,9 +3,11 @@
* Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -62,6 +64,7 @@ bool tegra_is_silicon(void)
switch (tegra_get_chip_id()) {
case TEGRA194:
case TEGRA234:
+ case TEGRA241:
case TEGRA264:
if (tegra_get_platform() == 0)
return true;
@@ -160,9 +163,34 @@ void __init tegra_init_revision(void)
tegra_sku_info.platform = tegra_get_platform();
}
-void __init tegra_init_apbmisc(void)
+static void tegra_init_apbmisc_resources(struct resource *apbmisc,
+ struct resource *straps)
{
void __iomem *strapping_base;
+
+ apbmisc_base = ioremap(apbmisc->start, resource_size(apbmisc));
+ if (apbmisc_base)
+ chipid = readl_relaxed(apbmisc_base + 4);
+ else
+ pr_err("failed to map APBMISC registers\n");
+
+ strapping_base = ioremap(straps->start, resource_size(straps));
+ if (strapping_base) {
+ strapping = readl_relaxed(strapping_base);
+ iounmap(strapping_base);
+ } else {
+ pr_err("failed to map strapping options registers\n");
+ }
+}
+
+/**
+ * tegra_init_apbmisc - Initializes Tegra APBMISC and Strapping registers.
+ *
+ * This is called during early init as some of the old 32-bit ARM code needs
+ * information from the APBMISC registers very early during boot.
+ */
+void __init tegra_init_apbmisc(void)
+{
struct resource apbmisc, straps;
struct device_node *np;
@@ -219,23 +247,73 @@ void __init tegra_init_apbmisc(void)
}
}
- apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc));
- if (!apbmisc_base) {
- pr_err("failed to map APBMISC registers\n");
- } else {
- chipid = readl_relaxed(apbmisc_base + 4);
+ tegra_init_apbmisc_resources(&apbmisc, &straps);
+ long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
+
+put:
+ of_node_put(np);
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id apbmisc_acpi_match[] = {
+ { "NVDA2010" },
+ { /* sentinel */ }
+};
+
+void tegra_acpi_init_apbmisc(void)
+{
+ struct resource *resources[2] = { NULL };
+ struct resource_entry *rentry;
+ struct acpi_device *adev = NULL;
+ struct list_head resource_list;
+ int rcount = 0;
+ int ret;
+
+ adev = acpi_dev_get_first_match_dev(apbmisc_acpi_match[0].id, NULL, -1);
+ if (!adev)
+ return;
+
+ INIT_LIST_HEAD(&resource_list);
+
+ ret = acpi_dev_get_memory_resources(adev, &resource_list);
+ if (ret < 0) {
+ pr_err("failed to get APBMISC memory resources");
+ goto out_put_acpi_dev;
}
- strapping_base = ioremap(straps.start, resource_size(&straps));
- if (!strapping_base) {
- pr_err("failed to map strapping options registers\n");
- } else {
- strapping = readl_relaxed(strapping_base);
- iounmap(strapping_base);
+ /*
+ * Get required memory resources.
+ *
+ * resources[0]: apbmisc.
+ * resources[1]: straps.
+ */
+ resource_list_for_each_entry(rentry, &resource_list) {
+ if (rcount >= ARRAY_SIZE(resources))
+ break;
+
+ resources[rcount++] = rentry->res;
}
- long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
+ if (!resources[0]) {
+ pr_err("failed to get APBMISC registers\n");
+ goto out_free_resource_list;
+ }
-put:
- of_node_put(np);
+ if (!resources[1]) {
+ pr_err("failed to get strapping options registers\n");
+ goto out_free_resource_list;
+ }
+
+ tegra_init_apbmisc_resources(resources[0], resources[1]);
+
+out_free_resource_list:
+ acpi_dev_free_resource_list(&resource_list);
+
+out_put_acpi_dev:
+ acpi_dev_put(adev);
+}
+#else
+void tegra_acpi_init_apbmisc(void)
+{
}
+#endif
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index f432aa022ace..d6bfcea5ee65 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -3,7 +3,7 @@
* drivers/soc/tegra/pmc.c
*
* Copyright (c) 2010 Google, Inc
- * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Colin Cross <ccross@google.com>
@@ -384,6 +384,7 @@ struct tegra_pmc_soc {
bool has_blink_output;
bool has_usb_sleepwalk;
bool supports_core_domain;
+ bool has_single_mmio_aperture;
};
/**
@@ -1777,30 +1778,6 @@ static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id)
return TEGRA_IO_PAD_VOLTAGE_3V3;
}
-/**
- * tegra_io_rail_power_on() - enable power to I/O rail
- * @id: Tegra I/O pad ID for which to enable power
- *
- * See also: tegra_io_pad_power_enable()
- */
-int tegra_io_rail_power_on(unsigned int id)
-{
- return tegra_io_pad_power_enable(id);
-}
-EXPORT_SYMBOL(tegra_io_rail_power_on);
-
-/**
- * tegra_io_rail_power_off() - disable power to I/O rail
- * @id: Tegra I/O pad ID for which to disable power
- *
- * See also: tegra_io_pad_power_disable()
- */
-int tegra_io_rail_power_off(unsigned int id)
-{
- return tegra_io_pad_power_disable(id);
-}
-EXPORT_SYMBOL(tegra_io_rail_power_off);
-
#ifdef CONFIG_PM_SLEEP
enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void)
{
@@ -2909,31 +2886,33 @@ static int tegra_pmc_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wake");
- if (res) {
+ if (pmc->soc->has_single_mmio_aperture) {
+ pmc->wake = base;
+ pmc->aotag = base;
+ pmc->scratch = base;
+ } else {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "wake");
pmc->wake = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pmc->wake))
return PTR_ERR(pmc->wake);
- } else {
- pmc->wake = base;
- }
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aotag");
- if (res) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "aotag");
pmc->aotag = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pmc->aotag))
return PTR_ERR(pmc->aotag);
- } else {
- pmc->aotag = base;
- }
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scratch");
- if (res) {
- pmc->scratch = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pmc->scratch))
- return PTR_ERR(pmc->scratch);
- } else {
- pmc->scratch = base;
+ /* "scratch" is an optional aperture */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "scratch");
+ if (res) {
+ pmc->scratch = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->scratch))
+ return PTR_ERR(pmc->scratch);
+ } else {
+ pmc->scratch = NULL;
+ }
}
pmc->clk = devm_clk_get_optional(&pdev->dev, "pclk");
@@ -2945,12 +2924,15 @@ static int tegra_pmc_probe(struct platform_device *pdev)
* PMC should be last resort for restarting since it soft-resets
* CPU without resetting everything else.
*/
- err = devm_register_reboot_notifier(&pdev->dev,
- &tegra_pmc_reboot_notifier);
- if (err) {
- dev_err(&pdev->dev, "unable to register reboot notifier, %d\n",
- err);
- return err;
+ if (pmc->scratch) {
+ err = devm_register_reboot_notifier(&pdev->dev,
+ &tegra_pmc_reboot_notifier);
+ if (err) {
+ dev_err(&pdev->dev,
+ "unable to register reboot notifier, %d\n",
+ err);
+ return err;
+ }
}
err = devm_register_sys_off_handler(&pdev->dev,
@@ -3324,6 +3306,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
.num_pmc_clks = 0,
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const char * const tegra30_powergates[] = {
@@ -3385,6 +3368,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const char * const tegra114_powergates[] = {
@@ -3442,6 +3426,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const char * const tegra124_powergates[] = {
@@ -3586,6 +3571,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const char * const tegra210_powergates[] = {
@@ -3749,6 +3735,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const struct tegra_io_pad_soc tegra186_io_pads[] = {
@@ -3946,6 +3933,7 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = {
.num_pmc_clks = 0,
.has_blink_output = false,
.has_usb_sleepwalk = false,
+ .has_single_mmio_aperture = false,
};
static const struct tegra_io_pad_soc tegra194_io_pads[] = {
@@ -4131,6 +4119,7 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
.num_pmc_clks = 0,
.has_blink_output = false,
.has_usb_sleepwalk = false,
+ .has_single_mmio_aperture = false,
};
static const struct tegra_io_pad_soc tegra234_io_pads[] = {
@@ -4220,6 +4209,7 @@ static const char * const tegra234_reset_sources[] = {
};
static const struct tegra_wake_event tegra234_wake_events[] = {
+ TEGRA_WAKE_GPIO("sd-wake", 8, 0, TEGRA234_MAIN_GPIO(G, 7)),
TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA234_AON_GPIO(EE, 4)),
TEGRA_WAKE_GPIO("mgbe", 56, 0, TEGRA234_MAIN_GPIO(Y, 3)),
@@ -4259,6 +4249,7 @@ static const struct tegra_pmc_soc tegra234_pmc_soc = {
.pmc_clks_data = NULL,
.num_pmc_clks = 0,
.has_blink_output = false,
+ .has_single_mmio_aperture = false,
};
static const struct of_device_id tegra_pmc_match[] = {
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index 657f5888a77b..e80a2c2cf3e7 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -20,7 +20,7 @@ soundwire-bus-y += irq.o
endif
#AMD driver
-soundwire-amd-y := amd_manager.o
+soundwire-amd-y := amd_init.o amd_manager.o
obj-$(CONFIG_SOUNDWIRE_AMD) += soundwire-amd.o
#Cadence Objs
diff --git a/drivers/soundwire/amd_init.c b/drivers/soundwire/amd_init.c
new file mode 100644
index 000000000000..e45dc8261ab1
--- /dev/null
+++ b/drivers/soundwire/amd_init.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * SoundWire AMD Manager Initialize routines
+ *
+ * Initializes and creates SDW devices based on ACPI and Hardware values
+ *
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "amd_init.h"
+
+#define ACP_PAD_PULLDOWN_CTRL 0x0001448
+#define ACP_SW_PAD_KEEPER_EN 0x0001454
+#define AMD_SDW_PAD_PULLDOWN_CTRL_ENABLE_MASK 0x7f9a
+#define AMD_SDW0_PAD_PULLDOWN_CTRL_ENABLE_MASK 0x7f9f
+#define AMD_SDW1_PAD_PULLDOWN_CTRL_ENABLE_MASK 0x7ffa
+#define AMD_SDW0_PAD_EN_MASK 1
+#define AMD_SDW1_PAD_EN_MASK 0x10
+#define AMD_SDW_PAD_EN_MASK (AMD_SDW0_PAD_EN_MASK | AMD_SDW1_PAD_EN_MASK)
+
+static int amd_enable_sdw_pads(void __iomem *mmio, u32 link_mask, struct device *dev)
+{
+ u32 val;
+ u32 pad_keeper_en_mask, pad_pulldown_ctrl_mask;
+
+ switch (link_mask) {
+ case 1:
+ pad_keeper_en_mask = AMD_SDW0_PAD_EN_MASK;
+ pad_pulldown_ctrl_mask = AMD_SDW0_PAD_PULLDOWN_CTRL_ENABLE_MASK;
+ break;
+ case 2:
+ pad_keeper_en_mask = AMD_SDW1_PAD_EN_MASK;
+ pad_pulldown_ctrl_mask = AMD_SDW1_PAD_PULLDOWN_CTRL_ENABLE_MASK;
+ break;
+ case 3:
+ pad_keeper_en_mask = AMD_SDW_PAD_EN_MASK;
+ pad_pulldown_ctrl_mask = AMD_SDW_PAD_PULLDOWN_CTRL_ENABLE_MASK;
+ break;
+ default:
+ dev_err(dev, "No SDW Links are enabled\n");
+ return -ENODEV;
+ }
+
+ val = readl(mmio + ACP_SW_PAD_KEEPER_EN);
+ val |= pad_keeper_en_mask;
+ writel(val, mmio + ACP_SW_PAD_KEEPER_EN);
+ val = readl(mmio + ACP_PAD_PULLDOWN_CTRL);
+ val &= pad_pulldown_ctrl_mask;
+ writel(val, mmio + ACP_PAD_PULLDOWN_CTRL);
+ return 0;
+}
+
+static int sdw_amd_cleanup(struct sdw_amd_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < ctx->count; i++) {
+ if (!(ctx->link_mask & BIT(i)))
+ continue;
+ platform_device_unregister(ctx->pdev[i]);
+ }
+
+ return 0;
+}
+
+static struct sdw_amd_ctx *sdw_amd_probe_controller(struct sdw_amd_res *res)
+{
+ struct sdw_amd_ctx *ctx;
+ struct acpi_device *adev;
+ struct resource *sdw_res;
+ struct acp_sdw_pdata sdw_pdata[2];
+ struct platform_device_info pdevinfo[2];
+ u32 link_mask;
+ int count, index;
+ int ret;
+
+ if (!res)
+ return NULL;
+
+ adev = acpi_fetch_acpi_dev(res->handle);
+ if (!adev)
+ return NULL;
+
+ if (!res->count)
+ return NULL;
+
+ count = res->count;
+ dev_dbg(&adev->dev, "Creating %d SDW Link devices\n", count);
+ ret = amd_enable_sdw_pads(res->mmio_base, res->link_mask, res->parent);
+ if (ret)
+ return NULL;
+
+ /*
+ * we need to alloc/free memory manually and can't use devm:
+ * this routine may be called from a workqueue, and not from
+ * the parent .probe.
+ * If devm_ was used, the memory might never be freed on errors.
+ */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->count = count;
+ ctx->link_mask = res->link_mask;
+ sdw_res = kzalloc(sizeof(*sdw_res), GFP_KERNEL);
+ if (!sdw_res) {
+ kfree(ctx);
+ return NULL;
+ }
+ sdw_res->flags = IORESOURCE_MEM;
+ sdw_res->start = res->addr;
+ sdw_res->end = res->addr + res->reg_range;
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ link_mask = ctx->link_mask;
+ for (index = 0; index < count; index++) {
+ if (!(link_mask & BIT(index)))
+ continue;
+
+ sdw_pdata[index].instance = index;
+ sdw_pdata[index].acp_sdw_lock = res->acp_lock;
+ pdevinfo[index].name = "amd_sdw_manager";
+ pdevinfo[index].id = index;
+ pdevinfo[index].parent = res->parent;
+ pdevinfo[index].num_res = 1;
+ pdevinfo[index].res = sdw_res;
+ pdevinfo[index].data = &sdw_pdata[index];
+ pdevinfo[index].size_data = sizeof(struct acp_sdw_pdata);
+ pdevinfo[index].fwnode = acpi_fwnode_handle(adev);
+ ctx->pdev[index] = platform_device_register_full(&pdevinfo[index]);
+ if (IS_ERR(ctx->pdev[index]))
+ goto err;
+ }
+ kfree(sdw_res);
+ return ctx;
+err:
+ while (index--) {
+ if (!(link_mask & BIT(index)))
+ continue;
+
+ platform_device_unregister(ctx->pdev[index]);
+ }
+
+ kfree(sdw_res);
+ kfree(ctx);
+ return NULL;
+}
+
+static int sdw_amd_startup(struct sdw_amd_ctx *ctx)
+{
+ struct amd_sdw_manager *amd_manager;
+ int i, ret;
+
+ /* Startup SDW Manager devices */
+ for (i = 0; i < ctx->count; i++) {
+ if (!(ctx->link_mask & BIT(i)))
+ continue;
+ amd_manager = dev_get_drvdata(&ctx->pdev[i]->dev);
+ ret = amd_sdw_manager_start(amd_manager);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int sdw_amd_probe(struct sdw_amd_res *res, struct sdw_amd_ctx **sdw_ctx)
+{
+ *sdw_ctx = sdw_amd_probe_controller(res);
+ if (!*sdw_ctx)
+ return -ENODEV;
+
+ return sdw_amd_startup(*sdw_ctx);
+}
+EXPORT_SYMBOL_NS(sdw_amd_probe, SOUNDWIRE_AMD_INIT);
+
+void sdw_amd_exit(struct sdw_amd_ctx *ctx)
+{
+ sdw_amd_cleanup(ctx);
+ kfree(ctx->ids);
+ kfree(ctx);
+}
+EXPORT_SYMBOL_NS(sdw_amd_exit, SOUNDWIRE_AMD_INIT);
+
+int sdw_amd_get_slave_info(struct sdw_amd_ctx *ctx)
+{
+ struct amd_sdw_manager *amd_manager;
+ struct sdw_bus *bus;
+ struct sdw_slave *slave;
+ struct list_head *node;
+ int index;
+ int i = 0;
+ int num_slaves = 0;
+
+ for (index = 0; index < ctx->count; index++) {
+ if (!(ctx->link_mask & BIT(index)))
+ continue;
+ amd_manager = dev_get_drvdata(&ctx->pdev[index]->dev);
+ if (!amd_manager)
+ return -ENODEV;
+ bus = &amd_manager->bus;
+ /* Calculate number of slaves */
+ list_for_each(node, &bus->slaves)
+ num_slaves++;
+ }
+
+ ctx->ids = kcalloc(num_slaves, sizeof(*ctx->ids), GFP_KERNEL);
+ if (!ctx->ids)
+ return -ENOMEM;
+ ctx->num_slaves = num_slaves;
+ for (index = 0; index < ctx->count; index++) {
+ if (!(ctx->link_mask & BIT(index)))
+ continue;
+ amd_manager = dev_get_drvdata(&ctx->pdev[index]->dev);
+ if (amd_manager) {
+ bus = &amd_manager->bus;
+ list_for_each_entry(slave, &bus->slaves, node) {
+ ctx->ids[i].id = slave->id;
+ ctx->ids[i].link_id = bus->link_id;
+ i++;
+ }
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_NS(sdw_amd_get_slave_info, SOUNDWIRE_AMD_INIT);
+
+MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
+MODULE_DESCRIPTION("AMD SoundWire Init Library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/soundwire/amd_init.h b/drivers/soundwire/amd_init.h
new file mode 100644
index 000000000000..928b0c707162
--- /dev/null
+++ b/drivers/soundwire/amd_init.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef __AMD_INIT_H
+#define __AMD_INIT_H
+
+#include <linux/soundwire/sdw_amd.h>
+
+int amd_sdw_manager_start(struct amd_sdw_manager *amd_manager);
+
+#endif
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index f54bb4dd2d10..7cd24bd8e224 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -1,8 +1,8 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* SoundWire AMD Manager driver
*
- * Copyright 2023 Advanced Micro Devices, Inc.
+ * Copyright 2023-24 Advanced Micro Devices, Inc.
*/
#include <linux/completion.h>
@@ -19,29 +19,13 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "bus.h"
+#include "amd_init.h"
#include "amd_manager.h"
#define DRV_NAME "amd_sdw_manager"
#define to_amd_sdw(b) container_of(b, struct amd_sdw_manager, bus)
-static void amd_enable_sdw_pads(struct amd_sdw_manager *amd_manager)
-{
- u32 sw_pad_pulldown_val;
- u32 val;
-
- mutex_lock(amd_manager->acp_sdw_lock);
- val = readl(amd_manager->acp_mmio + ACP_SW_PAD_KEEPER_EN);
- val |= amd_manager->reg_mask->sw_pad_enable_mask;
- writel(val, amd_manager->acp_mmio + ACP_SW_PAD_KEEPER_EN);
- usleep_range(1000, 1500);
-
- sw_pad_pulldown_val = readl(amd_manager->acp_mmio + ACP_PAD_PULLDOWN_CTRL);
- sw_pad_pulldown_val &= amd_manager->reg_mask->sw_pad_pulldown_mask;
- writel(sw_pad_pulldown_val, amd_manager->acp_mmio + ACP_PAD_PULLDOWN_CTRL);
- mutex_unlock(amd_manager->acp_sdw_lock);
-}
-
static int amd_init_sdw_manager(struct amd_sdw_manager *amd_manager)
{
u32 val;
@@ -102,12 +86,11 @@ static int amd_disable_sdw_manager(struct amd_sdw_manager *amd_manager)
static void amd_enable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
{
- struct sdw_manager_reg_mask *reg_mask = amd_manager->reg_mask;
u32 val;
mutex_lock(amd_manager->acp_sdw_lock);
val = readl(amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
- val |= reg_mask->acp_sdw_intr_mask;
+ val |= sdw_manager_reg_mask_array[amd_manager->instance];
writel(val, amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
mutex_unlock(amd_manager->acp_sdw_lock);
@@ -120,12 +103,11 @@ static void amd_enable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
static void amd_disable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
{
- struct sdw_manager_reg_mask *reg_mask = amd_manager->reg_mask;
u32 val;
mutex_lock(amd_manager->acp_sdw_lock);
val = readl(amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
- val &= ~reg_mask->acp_sdw_intr_mask;
+ val &= ~sdw_manager_reg_mask_array[amd_manager->instance];
writel(val, amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
mutex_unlock(amd_manager->acp_sdw_lock);
@@ -864,23 +846,20 @@ static void amd_sdw_irq_thread(struct work_struct *work)
writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
}
-static void amd_sdw_probe_work(struct work_struct *work)
+int amd_sdw_manager_start(struct amd_sdw_manager *amd_manager)
{
- struct amd_sdw_manager *amd_manager = container_of(work, struct amd_sdw_manager,
- probe_work);
struct sdw_master_prop *prop;
int ret;
prop = &amd_manager->bus.prop;
if (!prop->hw_disabled) {
- amd_enable_sdw_pads(amd_manager);
ret = amd_init_sdw_manager(amd_manager);
if (ret)
- return;
+ return ret;
amd_enable_sdw_interrupts(amd_manager);
ret = amd_enable_sdw_manager(amd_manager);
if (ret)
- return;
+ return ret;
amd_sdw_set_frameshape(amd_manager);
}
/* Enable runtime PM */
@@ -889,6 +868,7 @@ static void amd_sdw_probe_work(struct work_struct *work)
pm_runtime_mark_last_busy(amd_manager->dev);
pm_runtime_set_active(amd_manager->dev);
pm_runtime_enable(amd_manager->dev);
+ return 0;
}
static int amd_sdw_manager_probe(struct platform_device *pdev)
@@ -948,7 +928,6 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
return -EINVAL;
}
- amd_manager->reg_mask = &sdw_manager_reg_mask_array[amd_manager->instance];
params = &amd_manager->bus.params;
params->col = AMD_SDW_DEFAULT_COLUMNS;
@@ -972,11 +951,6 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
dev_set_drvdata(dev, amd_manager);
INIT_WORK(&amd_manager->amd_sdw_irq_thread, amd_sdw_irq_thread);
INIT_WORK(&amd_manager->amd_sdw_work, amd_sdw_update_slave_status_work);
- INIT_WORK(&amd_manager->probe_work, amd_sdw_probe_work);
- /*
- * Instead of having lengthy probe sequence, use deferred probe.
- */
- schedule_work(&amd_manager->probe_work);
return 0;
}
@@ -986,7 +960,6 @@ static void amd_sdw_manager_remove(struct platform_device *pdev)
int ret;
pm_runtime_disable(&pdev->dev);
- cancel_work_sync(&amd_manager->probe_work);
amd_disable_sdw_interrupts(amd_manager);
sdw_bus_master_delete(&amd_manager->bus);
ret = amd_disable_sdw_manager(amd_manager);
@@ -1215,5 +1188,5 @@ module_platform_driver(amd_sdw_driver);
MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
MODULE_DESCRIPTION("AMD SoundWire driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/soundwire/amd_manager.h b/drivers/soundwire/amd_manager.h
index 5f040151a259..418b679e0b1a 100644
--- a/drivers/soundwire/amd_manager.h
+++ b/drivers/soundwire/amd_manager.h
@@ -1,6 +1,6 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
- * Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright (C) 2023-24 Advanced Micro Devices, Inc. All rights reserved.
*/
#ifndef __AMD_MANAGER_H
@@ -243,16 +243,8 @@ static struct sdw_manager_dp_reg sdw1_manager_dp_reg[AMD_SDW1_MAX_DAI] = {
ACP_SW_AUDIO1_RX_OFFSET, ACP_SW_AUDIO1_RX_CHANNEL_ENABLE_DP0}
};
-static struct sdw_manager_reg_mask sdw_manager_reg_mask_array[2] = {
- {
- AMD_SDW0_PAD_KEEPER_EN_MASK,
- AMD_SDW0_PAD_PULLDOWN_CTRL_ENABLE_MASK,
- AMD_SDW0_EXT_INTR_MASK
- },
- {
- AMD_SDW1_PAD_KEEPER_EN_MASK,
- AMD_SDW1_PAD_PULLDOWN_CTRL_ENABLE_MASK,
+static u32 sdw_manager_reg_mask_array[AMD_SDW_MAX_MANAGER_COUNT] = {
+ AMD_SDW0_EXT_INTR_MASK,
AMD_SDW1_EXT_INTR_MASK
- }
};
#endif
diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
index 9ebdd0cd0b1c..91ab97a456fa 100644
--- a/drivers/soundwire/dmi-quirks.c
+++ b/drivers/soundwire/dmi-quirks.c
@@ -131,6 +131,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
.driver_data = (void *)intel_rooks_county,
},
{
+ /* quirk used for NUC15 LAPRC710 skew */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPRC710"),
+ },
+ .driver_data = (void *)intel_rooks_county,
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A3E")
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ddae0fde798e..bc7021da2fe9 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -694,7 +694,7 @@ config SPI_MTK_SNFI
This enables support for SPI-NAND mode on the MediaTek NAND
Flash Interface found on MediaTek ARM SoCs. This controller
is implemented as a SPI-MEM controller with pipelined ECC
- capcability.
+ capability.
config SPI_WPCM_FIU
tristate "Nuvoton WPCM450 Flash Interface Unit"
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index b7ada981464a..d78762d4db98 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -189,7 +189,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
host->num_chipselect = 3;
host->mem_ops = &ath79_mem_ops;
- sp->bitbang.master = host;
+ sp->bitbang.ctlr = host;
sp->bitbang.chipselect = ath79_spi_chipselect;
sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
sp->bitbang.flags = SPI_CS_HIGH;
@@ -237,7 +237,7 @@ static void ath79_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&sp->bitbang);
ath79_spi_disable(sp);
- spi_controller_put(sp->bitbang.master);
+ spi_controller_put(sp->bitbang.ctlr);
}
static void ath79_spi_shutdown(struct platform_device *pdev)
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index 1011b1a8f241..825d2f1cdff8 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -800,7 +800,7 @@ static int au1550_spi_probe(struct platform_device *pdev)
init_completion(&hw->host_done);
- hw->bitbang.master = hw->host;
+ hw->bitbang.ctlr = hw->host;
hw->bitbang.setup_transfer = au1550_spi_setupxfer;
hw->bitbang.chipselect = au1550_spi_chipsel;
hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs;
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 9ace259d2d29..7cc219d78551 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -6,20 +6,15 @@
*/
#include <linux/clk.h>
-#include <linux/idr.h>
+#include <linux/completion.h>
+#include <linux/fpga/adi-axi-common.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
-#include <linux/timer.h>
-
-#define SPI_ENGINE_VERSION_MAJOR(x) ((x >> 16) & 0xff)
-#define SPI_ENGINE_VERSION_MINOR(x) ((x >> 8) & 0xff)
-#define SPI_ENGINE_VERSION_PATCH(x) (x & 0xff)
-
-#define SPI_ENGINE_REG_VERSION 0x00
#define SPI_ENGINE_REG_RESET 0x40
@@ -62,6 +57,9 @@
#define SPI_ENGINE_TRANSFER_WRITE 0x1
#define SPI_ENGINE_TRANSFER_READ 0x2
+/* Arbitrary sync ID for use by host->cur_msg */
+#define AXI_SPI_ENGINE_CUR_MSG_SYNC_ID 0x1
+
#define SPI_ENGINE_CMD(inst, arg1, arg2) \
(((inst) << 12) | ((arg1) << 8) | (arg2))
@@ -78,15 +76,13 @@
struct spi_engine_program {
unsigned int length;
- uint16_t instructions[];
+ uint16_t instructions[] __counted_by(length);
};
/**
* struct spi_engine_message_state - SPI engine per-message state
*/
struct spi_engine_message_state {
- /** @p: Instructions for executing this message. */
- struct spi_engine_program *p;
/** @cmd_length: Number of elements in cmd_buf array. */
unsigned cmd_length;
/** @cmd_buf: Array of commands not yet written to CMD FIFO. */
@@ -103,8 +99,6 @@ struct spi_engine_message_state {
unsigned int rx_length;
/** @rx_buf: Bytes not yet written to the RX FIFO. */
uint8_t *rx_buf;
- /** @sync_id: ID to correlate SYNC interrupts with this message. */
- u8 sync_id;
};
struct spi_engine {
@@ -114,19 +108,18 @@ struct spi_engine {
spinlock_t lock;
void __iomem *base;
- struct ida sync_ida;
- struct timer_list watchdog_timer;
- struct spi_controller *controller;
-
+ struct spi_engine_message_state msg_state;
+ struct completion msg_complete;
unsigned int int_enable;
};
static void spi_engine_program_add_cmd(struct spi_engine_program *p,
bool dry, uint16_t cmd)
{
- if (!dry)
- p->instructions[p->length] = cmd;
p->length++;
+
+ if (!dry)
+ p->instructions[p->length - 1] = cmd;
}
static unsigned int spi_engine_get_config(struct spi_device *spi)
@@ -488,14 +481,10 @@ static irqreturn_t spi_engine_irq(int irq, void *devid)
}
if (pending & SPI_ENGINE_INT_SYNC && msg) {
- struct spi_engine_message_state *st = msg->state;
-
- if (completed_id == st->sync_id) {
- if (timer_delete_sync(&spi_engine->watchdog_timer)) {
- msg->status = 0;
- msg->actual_length = msg->frame_length;
- spi_finalize_current_message(host);
- }
+ if (completed_id == AXI_SPI_ENGINE_CUR_MSG_SYNC_ID) {
+ msg->status = 0;
+ msg->actual_length = msg->frame_length;
+ complete(&spi_engine->msg_complete);
disable_int |= SPI_ENGINE_INT_SYNC;
}
}
@@ -511,61 +500,32 @@ static irqreturn_t spi_engine_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-static int spi_engine_prepare_message(struct spi_controller *host,
- struct spi_message *msg)
+static int spi_engine_optimize_message(struct spi_message *msg)
{
struct spi_engine_program p_dry, *p;
- struct spi_engine *spi_engine = spi_controller_get_devdata(host);
- struct spi_engine_message_state *st;
- size_t size;
- int ret;
-
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- return -ENOMEM;
spi_engine_precompile_message(msg);
p_dry.length = 0;
spi_engine_compile_message(msg, true, &p_dry);
- size = sizeof(*p->instructions) * (p_dry.length + 1);
- p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
- if (!p) {
- kfree(st);
+ p = kzalloc(struct_size(p, instructions, p_dry.length + 1), GFP_KERNEL);
+ if (!p)
return -ENOMEM;
- }
-
- ret = ida_alloc_range(&spi_engine->sync_ida, 0, U8_MAX, GFP_KERNEL);
- if (ret < 0) {
- kfree(p);
- kfree(st);
- return ret;
- }
-
- st->sync_id = ret;
spi_engine_compile_message(msg, false, p);
- spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(st->sync_id));
+ spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
+ AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
- st->p = p;
- st->cmd_buf = p->instructions;
- st->cmd_length = p->length;
- msg->state = st;
+ msg->opt_state = p;
return 0;
}
-static int spi_engine_unprepare_message(struct spi_controller *host,
- struct spi_message *msg)
+static int spi_engine_unoptimize_message(struct spi_message *msg)
{
- struct spi_engine *spi_engine = spi_controller_get_devdata(host);
- struct spi_engine_message_state *st = msg->state;
-
- ida_free(&spi_engine->sync_ida, st->sync_id);
- kfree(st->p);
- kfree(st);
+ kfree(msg->opt_state);
return 0;
}
@@ -574,11 +534,18 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
struct spi_message *msg)
{
struct spi_engine *spi_engine = spi_controller_get_devdata(host);
- struct spi_engine_message_state *st = msg->state;
+ struct spi_engine_message_state *st = &spi_engine->msg_state;
+ struct spi_engine_program *p = msg->opt_state;
unsigned int int_enable = 0;
unsigned long flags;
- mod_timer(&spi_engine->watchdog_timer, jiffies + msecs_to_jiffies(5000));
+ /* reinitialize message state for this transfer */
+ memset(st, 0, sizeof(*st));
+ st->cmd_buf = p->instructions;
+ st->cmd_length = p->length;
+ msg->state = st;
+
+ reinit_completion(&spi_engine->msg_complete);
spin_lock_irqsave(&spi_engine->lock, flags);
@@ -600,21 +567,16 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
spi_engine->int_enable = int_enable;
spin_unlock_irqrestore(&spi_engine->lock, flags);
- return 0;
-}
-
-static void spi_engine_timeout(struct timer_list *timer)
-{
- struct spi_engine *spi_engine = from_timer(spi_engine, timer, watchdog_timer);
- struct spi_controller *host = spi_engine->controller;
-
- if (WARN_ON(!host->cur_msg))
- return;
+ if (!wait_for_completion_timeout(&spi_engine->msg_complete,
+ msecs_to_jiffies(5000))) {
+ dev_err(&host->dev,
+ "Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
+ msg->status = -ETIMEDOUT;
+ }
- dev_err(&host->dev,
- "Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
- host->cur_msg->status = -ETIMEDOUT;
spi_finalize_current_message(host);
+
+ return msg->status;
}
static void spi_engine_release_hw(void *p)
@@ -645,9 +607,7 @@ static int spi_engine_probe(struct platform_device *pdev)
spi_engine = spi_controller_get_devdata(host);
spin_lock_init(&spi_engine->lock);
- ida_init(&spi_engine->sync_ida);
- timer_setup(&spi_engine->watchdog_timer, spi_engine_timeout, TIMER_IRQSAFE);
- spi_engine->controller = host;
+ init_completion(&spi_engine->msg_complete);
spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
if (IS_ERR(spi_engine->clk))
@@ -661,12 +621,12 @@ static int spi_engine_probe(struct platform_device *pdev)
if (IS_ERR(spi_engine->base))
return PTR_ERR(spi_engine->base);
- version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
- if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
+ version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
+ if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
- SPI_ENGINE_VERSION_MAJOR(version),
- SPI_ENGINE_VERSION_MINOR(version),
- SPI_ENGINE_VERSION_PATCH(version));
+ ADI_AXI_PCORE_VER_MAJOR(version),
+ ADI_AXI_PCORE_VER_MINOR(version),
+ ADI_AXI_PCORE_VER_PATCH(version));
return -ENODEV;
}
@@ -689,8 +649,8 @@ static int spi_engine_probe(struct platform_device *pdev)
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
host->transfer_one_message = spi_engine_transfer_one_message;
- host->prepare_message = spi_engine_prepare_message;
- host->unprepare_message = spi_engine_unprepare_message;
+ host->optimize_message = spi_engine_optimize_message;
+ host->unoptimize_message = spi_engine_unoptimize_message;
host->num_chipselect = 8;
if (host->max_speed_hz == 0)
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index e709887eb2a9..e1b9b1235787 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -1117,19 +1117,6 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
struct spi_device *spi = msg->spi;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
struct bcm2835_spidev *target = spi_get_ctldata(spi);
- int ret;
-
- if (ctlr->can_dma) {
- /*
- * DMA transfers are limited to 16 bit (0 to 65535 bytes) by
- * the SPI HW due to DLEN. Split up transfers (32-bit FIFO
- * aligned) if the limit is exceeded.
- */
- ret = spi_split_transfers_maxsize(ctlr, msg, 65532,
- GFP_KERNEL | GFP_DMA);
- if (ret)
- return ret;
- }
/*
* Set up clock polarity before spi_transfer_one_message() asserts
@@ -1219,6 +1206,19 @@ static int bcm2835_spi_setup_dma(struct spi_controller *ctlr,
return 0;
}
+static size_t bcm2835_spi_max_transfer_size(struct spi_device *spi)
+{
+ /*
+ * DMA transfers are limited to 16 bit (0 to 65535 bytes) by
+ * the SPI HW due to DLEN. Split up transfers (32-bit FIFO
+ * aligned) if the limit is exceeded.
+ */
+ if (spi->controller->can_dma)
+ return 65532;
+
+ return SIZE_MAX;
+}
+
static int bcm2835_spi_setup(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
@@ -1348,6 +1348,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->num_chipselect = 3;
+ ctlr->max_transfer_size = bcm2835_spi_max_transfer_size;
ctlr->setup = bcm2835_spi_setup;
ctlr->cleanup = bcm2835_spi_cleanup;
ctlr->transfer_one = bcm2835_spi_transfer_one;
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index ecd44016c197..a0e2204fc039 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -187,7 +187,7 @@ int spi_bitbang_setup(struct spi_device *spi)
bool initial_setup = false;
int retval;
- bitbang = spi_master_get_devdata(spi->master);
+ bitbang = spi_controller_get_devdata(spi->controller);
if (!cs) {
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
@@ -236,7 +236,7 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
unsigned nsecs = cs->nsecs;
struct spi_bitbang *bitbang;
- bitbang = spi_master_get_devdata(spi->master);
+ bitbang = spi_controller_get_devdata(spi->controller);
if (bitbang->set_line_direction) {
int err;
@@ -268,11 +268,11 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
* transfer-at-a-time ones to leverage dma or fifo hardware.
*/
-static int spi_bitbang_prepare_hardware(struct spi_master *spi)
+static int spi_bitbang_prepare_hardware(struct spi_controller *spi)
{
struct spi_bitbang *bitbang;
- bitbang = spi_master_get_devdata(spi);
+ bitbang = spi_controller_get_devdata(spi);
mutex_lock(&bitbang->lock);
bitbang->busy = 1;
@@ -281,11 +281,11 @@ static int spi_bitbang_prepare_hardware(struct spi_master *spi)
return 0;
}
-static int spi_bitbang_transfer_one(struct spi_master *master,
+static int spi_bitbang_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *transfer)
{
- struct spi_bitbang *bitbang = spi_master_get_devdata(master);
+ struct spi_bitbang *bitbang = spi_controller_get_devdata(ctlr);
int status = 0;
if (bitbang->setup_transfer) {
@@ -303,16 +303,16 @@ static int spi_bitbang_transfer_one(struct spi_master *master,
status = -EREMOTEIO;
out:
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(ctlr);
return status;
}
-static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
+static int spi_bitbang_unprepare_hardware(struct spi_controller *spi)
{
struct spi_bitbang *bitbang;
- bitbang = spi_master_get_devdata(spi);
+ bitbang = spi_controller_get_devdata(spi);
mutex_lock(&bitbang->lock);
bitbang->busy = 0;
@@ -323,7 +323,7 @@ static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
{
- struct spi_bitbang *bitbang = spi_master_get_devdata(spi->master);
+ struct spi_bitbang *bitbang = spi_controller_get_devdata(spi->controller);
/* SPI core provides CS high / low, but bitbang driver
* expects CS active
@@ -341,10 +341,10 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
int spi_bitbang_init(struct spi_bitbang *bitbang)
{
- struct spi_master *master = bitbang->master;
+ struct spi_controller *ctlr = bitbang->ctlr;
bool custom_cs;
- if (!master)
+ if (!ctlr)
return -EINVAL;
/*
* We only need the chipselect callback if we are actually using it.
@@ -352,39 +352,39 @@ int spi_bitbang_init(struct spi_bitbang *bitbang)
* SPI_CONTROLLER_GPIO_SS flag is set, we always need to call the
* driver-specific chipselect routine.
*/
- custom_cs = (!master->use_gpio_descriptors ||
- (master->flags & SPI_CONTROLLER_GPIO_SS));
+ custom_cs = (!ctlr->use_gpio_descriptors ||
+ (ctlr->flags & SPI_CONTROLLER_GPIO_SS));
if (custom_cs && !bitbang->chipselect)
return -EINVAL;
mutex_init(&bitbang->lock);
- if (!master->mode_bits)
- master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
+ if (!ctlr->mode_bits)
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
- if (master->transfer || master->transfer_one_message)
+ if (ctlr->transfer || ctlr->transfer_one_message)
return -EINVAL;
- master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
- master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
- master->transfer_one = spi_bitbang_transfer_one;
+ ctlr->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
+ ctlr->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
+ ctlr->transfer_one = spi_bitbang_transfer_one;
/*
* When using GPIO descriptors, the ->set_cs() callback doesn't even
* get called unless SPI_CONTROLLER_GPIO_SS is set.
*/
if (custom_cs)
- master->set_cs = spi_bitbang_set_cs;
+ ctlr->set_cs = spi_bitbang_set_cs;
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
bitbang->txrx_bufs = spi_bitbang_bufs;
- if (!master->setup) {
+ if (!ctlr->setup) {
if (!bitbang->setup_transfer)
bitbang->setup_transfer =
spi_bitbang_setup_transfer;
- master->setup = spi_bitbang_setup;
- master->cleanup = spi_bitbang_cleanup;
+ ctlr->setup = spi_bitbang_setup;
+ ctlr->cleanup = spi_bitbang_cleanup;
}
}
@@ -411,18 +411,18 @@ EXPORT_SYMBOL_GPL(spi_bitbang_init);
* master methods. Those methods are the defaults if the bitbang->txrx_bufs
* routine isn't initialized.
*
- * This routine registers the spi_master, which will process requests in a
+ * This routine registers the spi_controller, which will process requests in a
* dedicated task, keeping IRQs unblocked most of the time. To stop
* processing those requests, call spi_bitbang_stop().
*
- * On success, this routine will take a reference to master. The caller is
- * responsible for calling spi_bitbang_stop() to decrement the reference and
- * spi_master_put() as counterpart of spi_alloc_master() to prevent a memory
+ * On success, this routine will take a reference to the controller. The caller
+ * is responsible for calling spi_bitbang_stop() to decrement the reference and
+ * spi_controller_put() as counterpart of spi_alloc_master() to prevent a memory
* leak.
*/
int spi_bitbang_start(struct spi_bitbang *bitbang)
{
- struct spi_master *master = bitbang->master;
+ struct spi_controller *ctlr = bitbang->ctlr;
int ret;
ret = spi_bitbang_init(bitbang);
@@ -432,9 +432,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
/* driver may get busy before register() returns, especially
* if someone registered boardinfo for devices
*/
- ret = spi_register_master(spi_master_get(master));
+ ret = spi_register_controller(spi_controller_get(ctlr));
if (ret)
- spi_master_put(master);
+ spi_controller_put(ctlr);
return ret;
}
@@ -445,7 +445,7 @@ EXPORT_SYMBOL_GPL(spi_bitbang_start);
*/
void spi_bitbang_stop(struct spi_bitbang *bitbang)
{
- spi_unregister_master(bitbang->master);
+ spi_unregister_controller(bitbang->ctlr);
}
EXPORT_SYMBOL_GPL(spi_bitbang_stop);
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index 289b4454242a..1d267e6c22a4 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -205,7 +205,7 @@ static void butterfly_attach(struct parport *p)
host->bus_num = 42;
host->num_chipselect = 2;
- pp->bitbang.master = host;
+ pp->bitbang.ctlr = host;
pp->bitbang.chipselect = butterfly_chipselect;
pp->bitbang.txrx_word[SPI_MODE_0] = butterfly_txrx_word_mode0;
@@ -263,7 +263,7 @@ static void butterfly_attach(struct parport *p)
pp->info[0].platform_data = &flash;
pp->info[0].chip_select = 1;
pp->info[0].controller_data = pp;
- pp->dataflash = spi_new_device(pp->bitbang.master, &pp->info[0]);
+ pp->dataflash = spi_new_device(pp->bitbang.ctlr, &pp->info[0]);
if (pp->dataflash)
pr_debug("%s: dataflash at %s\n", p->name,
dev_name(&pp->dataflash->dev));
@@ -308,7 +308,7 @@ static void butterfly_detach(struct parport *p)
parport_release(pp->pd);
parport_unregister_device(pp->pd);
- spi_controller_put(pp->bitbang.master);
+ spi_controller_put(pp->bitbang.ctlr);
}
static struct parport_driver butterfly_driver = {
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index f94e0d370d46..350b3dab3a05 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -31,7 +31,9 @@
#include <linux/timer.h>
#define CQSPI_NAME "cadence-qspi"
-#define CQSPI_MAX_CHIPSELECT 16
+#define CQSPI_MAX_CHIPSELECT 4
+
+static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX);
/* Quirks */
#define CQSPI_NEEDS_WR_DELAY BIT(0)
@@ -1410,7 +1412,7 @@ static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
int ret;
- struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
+ struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller);
struct device *dev = &cqspi->pdev->dev;
ret = pm_runtime_resume_and_get(dev);
@@ -1619,6 +1621,7 @@ static const struct spi_controller_mem_caps cqspi_mem_caps = {
static int cqspi_setup_flash(struct cqspi_st *cqspi)
{
+ unsigned int max_cs = cqspi->num_chipselect - 1;
struct platform_device *pdev = cqspi->pdev;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -1635,10 +1638,12 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi)
return ret;
}
- if (cs >= CQSPI_MAX_CHIPSELECT) {
+ if (cs >= cqspi->num_chipselect) {
dev_err(dev, "Chip select %d out of range.\n", cs);
of_node_put(np);
return -EINVAL;
+ } else if (cs < max_cs) {
+ max_cs = cs;
}
f_pdata = &cqspi->f_pdata[cs];
@@ -1652,6 +1657,7 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi)
}
}
+ cqspi->num_chipselect = max_cs + 1;
return 0;
}
@@ -1712,10 +1718,9 @@ static int cqspi_probe(struct platform_device *pdev)
int irq;
host = devm_spi_alloc_host(&pdev->dev, sizeof(*cqspi));
- if (!host) {
- dev_err(&pdev->dev, "devm_spi_alloc_host failed\n");
+ if (!host)
return -ENOMEM;
- }
+
host->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL;
host->mem_ops = &cqspi_mem_ops;
host->mem_caps = &cqspi_mem_caps;
@@ -1863,14 +1868,14 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->current_cs = -1;
cqspi->sclk = 0;
- host->num_chipselect = cqspi->num_chipselect;
-
ret = cqspi_setup_flash(cqspi);
if (ret) {
dev_err(dev, "failed to setup flash parameters %d\n", ret);
goto probe_setup_failed;
}
+ host->num_chipselect = cqspi->num_chipselect;
+
if (cqspi->use_direct_mode) {
ret = cqspi_request_mmap_dma(cqspi);
if (ret == -EPROBE_DEFER)
@@ -1927,24 +1932,18 @@ static void cqspi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int cqspi_suspend(struct device *dev)
+static int cqspi_runtime_suspend(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
- struct spi_controller *host = dev_get_drvdata(dev);
- int ret;
- ret = spi_controller_suspend(host);
cqspi_controller_enable(cqspi, 0);
-
clk_disable_unprepare(cqspi->clk);
-
- return ret;
+ return 0;
}
-static int cqspi_resume(struct device *dev)
+static int cqspi_runtime_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
- struct spi_controller *host = dev_get_drvdata(dev);
clk_prepare_enable(cqspi->clk);
cqspi_wait_idle(cqspi);
@@ -1952,12 +1951,27 @@ static int cqspi_resume(struct device *dev)
cqspi->current_cs = -1;
cqspi->sclk = 0;
+ return 0;
+}
+
+static int cqspi_suspend(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
- return spi_controller_resume(host);
+ return spi_controller_suspend(cqspi->host);
}
-static DEFINE_RUNTIME_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend,
- cqspi_resume, NULL);
+static int cqspi_resume(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ return spi_controller_resume(cqspi->host);
+}
+
+static const struct dev_pm_ops cqspi_dev_pm_ops = {
+ RUNTIME_PM_OPS(cqspi_runtime_suspend, cqspi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(cqspi_suspend, cqspi_resume)
+};
static const struct cqspi_driver_platdata cdns_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
diff --git a/drivers/spi/spi-cavium.c b/drivers/spi/spi-cavium.c
index dfe224defd6e..26b8cd1c76e1 100644
--- a/drivers/spi/spi-cavium.c
+++ b/drivers/spi/spi-cavium.c
@@ -124,10 +124,10 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
return xfer->len;
}
-int octeon_spi_transfer_one_message(struct spi_master *master,
+int octeon_spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct octeon_spi *p = spi_master_get_devdata(master);
+ struct octeon_spi *p = spi_controller_get_devdata(ctlr);
unsigned int total_len = 0;
int status = 0;
struct spi_transfer *xfer;
@@ -145,6 +145,6 @@ int octeon_spi_transfer_one_message(struct spi_master *master,
err:
msg->status = status;
msg->actual_length = total_len;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(ctlr);
return status;
}
diff --git a/drivers/spi/spi-cavium.h b/drivers/spi/spi-cavium.h
index 1f3ac463a20b..af53a0c31476 100644
--- a/drivers/spi/spi-cavium.h
+++ b/drivers/spi/spi-cavium.h
@@ -28,7 +28,7 @@ struct octeon_spi {
#define OCTEON_SPI_TX(x) (x->regs.tx)
#define OCTEON_SPI_DAT0(x) (x->regs.data)
-int octeon_spi_transfer_one_message(struct spi_master *master,
+int octeon_spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *msg);
/* MPI register descriptions */
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index b24190526ce9..aabef9fc84bd 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -11,7 +11,9 @@
#include <linux/errno.h>
#include <linux/mfd/cs42l43.h>
#include <linux/mfd/cs42l43-regs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -148,8 +150,7 @@ static void cs42l43_set_cs(struct spi_device *spi, bool is_high)
{
struct cs42l43_spi *priv = spi_controller_get_devdata(spi->controller);
- if (spi_get_chipselect(spi, 0) == 0)
- regmap_write(priv->regmap, CS42L43_SPI_CONFIG2, !is_high);
+ regmap_write(priv->regmap, CS42L43_SPI_CONFIG2, !is_high);
}
static int cs42l43_prepare_message(struct spi_controller *ctlr, struct spi_message *msg)
@@ -202,6 +203,11 @@ static size_t cs42l43_spi_max_length(struct spi_device *spi)
return CS42L43_SPI_MAX_LENGTH;
}
+static void cs42l43_release_of_node(void *data)
+{
+ fwnode_handle_put(data);
+}
+
static int cs42l43_spi_probe(struct platform_device *pdev)
{
struct cs42l43 *cs42l43 = dev_get_drvdata(pdev->dev.parent);
@@ -228,12 +234,6 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
priv->ctlr->transfer_one = cs42l43_transfer_one;
priv->ctlr->set_cs = cs42l43_set_cs;
priv->ctlr->max_transfer_size = cs42l43_spi_max_length;
-
- if (is_of_node(fwnode))
- fwnode = fwnode_get_named_child_node(fwnode, "spi");
-
- device_set_node(&priv->ctlr->dev, fwnode);
-
priv->ctlr->mode_bits = SPI_3WIRE | SPI_MODE_X_MASK;
priv->ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
priv->ctlr->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
@@ -257,6 +257,17 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
regmap_write(priv->regmap, CS42L43_SPI_CONFIG3, 0);
regmap_write(priv->regmap, CS42L43_SPI_CONFIG4, CS42L43_SPI_STALL_ENA_MASK);
+ if (is_of_node(fwnode)) {
+ fwnode = fwnode_get_named_child_node(fwnode, "spi");
+ ret = devm_add_action(priv->dev, cs42l43_release_of_node, fwnode);
+ if (ret) {
+ fwnode_handle_put(fwnode);
+ return ret;
+ }
+ }
+
+ device_set_node(&priv->ctlr->dev, fwnode);
+
ret = devm_spi_register_controller(priv->dev, priv->ctlr);
if (ret) {
dev_err(priv->dev, "Failed to register SPI controller: %d\n", ret);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 5688be245c68..be3998104bfb 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -459,7 +459,7 @@ static bool davinci_spi_can_dma(struct spi_controller *host,
static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status)
{
- struct device *sdev = dspi->bitbang.master->dev.parent;
+ struct device *sdev = dspi->bitbang.ctlr->dev.parent;
if (int_status & SPIFLG_TIMEOUT_MASK) {
dev_err(sdev, "SPI Time-out Error\n");
@@ -742,7 +742,7 @@ static irqreturn_t davinci_spi_irq(s32 irq, void *data)
static int davinci_spi_request_dma(struct davinci_spi *dspi)
{
- struct device *sdev = dspi->bitbang.master->dev.parent;
+ struct device *sdev = dspi->bitbang.ctlr->dev.parent;
dspi->dma_rx = dma_request_chan(sdev, "rx");
if (IS_ERR(dspi->dma_rx))
@@ -913,7 +913,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
if (ret)
goto free_host;
- dspi->bitbang.master = host;
+ dspi->bitbang.ctlr = host;
dspi->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(dspi->clk)) {
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
index 0ecbb6c36e23..f4c209e5f52b 100644
--- a/drivers/spi/spi-dw-dma.c
+++ b/drivers/spi/spi-dw-dma.c
@@ -577,7 +577,7 @@ static int dw_spi_dma_transfer_one(struct dw_spi *dws,
sg_init_table(&tx_tmp, 1);
sg_init_table(&rx_tmp, 1);
- for (base = 0, len = 0; base < xfer->len; base += len) {
+ for (base = 0; base < xfer->len; base += len) {
/* Fetch next Tx DMA data chunk */
if (!tx_len) {
tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index c9eae046f66c..38defdcf9370 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -502,15 +502,12 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
return -ENOMEM;
dma->chan_rx = dma_request_chan(dev, "rx");
- if (IS_ERR(dma->chan_rx)) {
- return dev_err_probe(dev, PTR_ERR(dma->chan_rx),
- "rx dma channel not available\n");
- }
+ if (IS_ERR(dma->chan_rx))
+ return dev_err_probe(dev, PTR_ERR(dma->chan_rx), "rx dma channel not available\n");
dma->chan_tx = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan_tx)) {
- ret = PTR_ERR(dma->chan_tx);
- dev_err_probe(dev, ret, "tx dma channel not available\n");
+ ret = dev_err_probe(dev, PTR_ERR(dma->chan_tx), "tx dma channel not available\n");
goto err_tx_channel;
}
@@ -541,16 +538,14 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
cfg.direction = DMA_DEV_TO_MEM;
ret = dmaengine_slave_config(dma->chan_rx, &cfg);
if (ret) {
- dev_err(dev, "can't configure rx dma channel\n");
- ret = -EINVAL;
+ dev_err_probe(dev, ret, "can't configure rx dma channel\n");
goto err_slave_config;
}
cfg.direction = DMA_MEM_TO_DEV;
ret = dmaengine_slave_config(dma->chan_tx, &cfg);
if (ret) {
- dev_err(dev, "can't configure tx dma channel\n");
- ret = -EINVAL;
+ dev_err_probe(dev, ret, "can't configure tx dma channel\n");
goto err_slave_config;
}
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 885757c29fbb..4fc2c56555b5 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -82,18 +82,18 @@ void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct mpc8xxx_spi *mpc8xxx_spi;
- master = dev_get_drvdata(dev);
+ ctlr = dev_get_drvdata(dev);
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
| SPI_LSB_FIRST | SPI_LOOP;
- master->dev.of_node = dev->of_node;
+ ctlr->dev.of_node = dev->of_node;
- mpc8xxx_spi = spi_master_get_devdata(master);
+ mpc8xxx_spi = spi_controller_get_devdata(ctlr);
mpc8xxx_spi->dev = dev;
mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
@@ -104,8 +104,8 @@ void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
mpc8xxx_spi->rx_shift = 0;
mpc8xxx_spi->tx_shift = 0;
- master->bus_num = pdata->bus_num;
- master->num_chipselect = pdata->max_chipselect;
+ ctlr->bus_num = pdata->bus_num;
+ ctlr->num_chipselect = pdata->max_chipselect;
init_completion(&mpc8xxx_spi->done);
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 15f84e68d4d2..37ef8c40b276 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -647,7 +647,7 @@ static void spi_geni_release_dma_chan(struct spi_geni_master *mas)
static int spi_geni_init(struct spi_geni_master *mas)
{
- struct spi_master *spi = dev_get_drvdata(mas->dev);
+ struct spi_controller *spi = dev_get_drvdata(mas->dev);
struct geni_se *se = &mas->se;
unsigned int proto, major, minor, ver;
u32 spi_tx_cfg, fifo_disable;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index d8db4564b406..909cce109bba 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -427,7 +427,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
host->cleanup = spi_gpio_cleanup;
bb = &spi_gpio->bitbang;
- bb->master = host;
+ bb->ctlr = host;
/*
* There is some additional business, apart from driving the CS GPIO
* line, that we need to do on selection. This makes the local
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index 3654ae35d2db..3e5dcf2b3c8a 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -1254,6 +1254,13 @@ static void intel_spi_fill_partition(struct intel_spi *ispi,
if (end > part->size)
part->size = end;
}
+
+ /*
+ * Regions can refer to the second chip too so in this case we
+ * just make the BIOS partition to occupy the whole chip.
+ */
+ if (ispi->chip0_size && part->size > ispi->chip0_size)
+ part->size = MTDPART_SIZ_FULL;
}
static int intel_spi_read_desc(struct intel_spi *ispi)
@@ -1346,9 +1353,14 @@ static int intel_spi_read_desc(struct intel_spi *ispi)
static int intel_spi_populate_chip(struct intel_spi *ispi)
{
struct flash_platform_data *pdata;
+ struct mtd_partition *parts;
struct spi_board_info chip;
int ret;
+ ret = intel_spi_read_desc(ispi);
+ if (ret)
+ return ret;
+
pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -1368,15 +1380,27 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
if (!spi_new_device(ispi->host, &chip))
return -ENODEV;
- ret = intel_spi_read_desc(ispi);
- if (ret)
- return ret;
-
/* Add the second chip if present */
if (ispi->host->num_chipselect < 2)
return 0;
- chip.platform_data = NULL;
+ pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->name = devm_kasprintf(ispi->dev, GFP_KERNEL, "%s-chip1",
+ dev_name(ispi->dev));
+ pdata->nr_parts = 1;
+ parts = devm_kcalloc(ispi->dev, pdata->nr_parts, sizeof(*parts),
+ GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ parts[0].size = MTDPART_SIZ_FULL;
+ parts[0].name = "BIOS1";
+ pdata->parts = parts;
+
+ chip.platform_data = pdata;
chip.chip_select = 1;
if (!spi_new_device(ispi->host, &chip))
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index e6a65b5c8c31..f982bdebd028 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -212,7 +212,7 @@ static void spi_lm70llp_attach(struct parport *p)
/*
* SPI and bitbang hookup.
*/
- pp->bitbang.master = host;
+ pp->bitbang.ctlr = host;
pp->bitbang.chipselect = lm70_chipselect;
pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx;
pp->bitbang.flags = SPI_3WIRE;
@@ -264,7 +264,7 @@ static void spi_lm70llp_attach(struct parport *p)
* the board info's (void *)controller_data.
*/
pp->info.controller_data = pp;
- pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info);
+ pp->spidev_lm70 = spi_new_device(pp->bitbang.ctlr, &pp->info);
if (pp->spidev_lm70)
dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n",
dev_name(&pp->spidev_lm70->dev));
@@ -309,7 +309,7 @@ static void spi_lm70llp_detach(struct parport *p)
parport_release(pp->pd);
parport_unregister_device(pp->pd);
- spi_controller_put(pp->bitbang.master);
+ spi_controller_put(pp->bitbang.ctlr);
lm70llp = NULL;
}
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index bbf2015d8e5c..fee8893d2751 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -1031,8 +1031,8 @@ int spi_test_run_test(struct spi_device *spi, const struct spi_test *test,
#define FOR_EACH_ALIGNMENT(var) \
for (var = 0; \
var < (test->iterate_##var ? \
- (spi->master->dma_alignment ? \
- spi->master->dma_alignment : \
+ (spi->controller->dma_alignment ? \
+ spi->controller->dma_alignment : \
test->iterate_##var) : \
1); \
var++)
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 2dc8ceb85374..c9d6d42a88f5 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -297,6 +297,49 @@ static void spi_mem_access_end(struct spi_mem *mem)
pm_runtime_put(ctlr->dev.parent);
}
+static void spi_mem_add_op_stats(struct spi_statistics __percpu *pcpu_stats,
+ const struct spi_mem_op *op, int exec_op_ret)
+{
+ struct spi_statistics *stats;
+ u64 len, l2len;
+
+ get_cpu();
+ stats = this_cpu_ptr(pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+
+ /*
+ * We do not have the concept of messages or transfers. Let's consider
+ * that one operation is equivalent to one message and one transfer.
+ */
+ u64_stats_inc(&stats->messages);
+ u64_stats_inc(&stats->transfers);
+
+ /* Use the sum of all lengths as bytes count and histogram value. */
+ len = op->cmd.nbytes + op->addr.nbytes;
+ len += op->dummy.nbytes + op->data.nbytes;
+ u64_stats_add(&stats->bytes, len);
+ l2len = min(fls(len), SPI_STATISTICS_HISTO_SIZE) - 1;
+ u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
+
+ /* Only account for data bytes as transferred bytes. */
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+ u64_stats_add(&stats->bytes_tx, op->data.nbytes);
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ u64_stats_add(&stats->bytes_rx, op->data.nbytes);
+
+ /*
+ * A timeout is not an error, following the same behavior as
+ * spi_transfer_one_message().
+ */
+ if (exec_op_ret == -ETIMEDOUT)
+ u64_stats_inc(&stats->timedout);
+ else if (exec_op_ret)
+ u64_stats_inc(&stats->errors);
+
+ u64_stats_update_end(&stats->syncp);
+ put_cpu();
+}
+
/**
* spi_mem_exec_op() - Execute a memory operation
* @mem: the SPI memory
@@ -339,8 +382,12 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
* read path) and expect the core to use the regular SPI
* interface in other cases.
*/
- if (!ret || ret != -ENOTSUPP || ret != -EOPNOTSUPP)
+ if (!ret || ret != -ENOTSUPP || ret != -EOPNOTSUPP) {
+ spi_mem_add_op_stats(ctlr->pcpu_statistics, op, ret);
+ spi_mem_add_op_stats(mem->spi->pcpu_statistics, op, ret);
+
return ret;
+ }
}
tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 8d5d170d49cc..8d4633b353ee 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/platform_data/spi-mt65xx.h>
#include <linux/pm_runtime.h>
@@ -1316,6 +1317,8 @@ static int mtk_spi_suspend(struct device *dev)
clk_disable_unprepare(mdata->spi_hclk);
}
+ pinctrl_pm_select_sleep_state(dev);
+
return 0;
}
@@ -1325,6 +1328,8 @@ static int mtk_spi_resume(struct device *dev)
struct spi_controller *host = dev_get_drvdata(dev);
struct mtk_spi *mdata = spi_controller_get_devdata(host);
+ pinctrl_pm_select_default_state(dev);
+
if (!pm_runtime_suspended(dev)) {
ret = clk_prepare_enable(mdata->spi_clk);
if (ret < 0) {
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index e13f678f2395..88397f712a3b 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -591,7 +591,7 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
for (i = 0; i < ARRAY_SIZE(lutval); i++)
fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i));
- dev_dbg(f->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x], size: 0x%08x\n",
+ dev_dbg(f->dev, "CMD[%02x] lutval[0:%08x 1:%08x 2:%08x 3:%08x], size: 0x%08x\n",
op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3], op->data.nbytes);
/* lock LUT */
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index cf7c111088a6..6ea38f5e7d64 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -194,7 +194,7 @@ static int tiny_spi_of_probe(struct platform_device *pdev)
if (!np)
return 0;
- hw->bitbang.master->dev.of_node = pdev->dev.of_node;
+ hw->bitbang.ctlr->dev.of_node = pdev->dev.of_node;
if (!of_property_read_u32(np, "clock-frequency", &val))
hw->freq = val;
if (!of_property_read_u32(np, "baud-width", &val))
@@ -229,7 +229,7 @@ static int tiny_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
/* setup the state for the bitbang driver */
- hw->bitbang.master = host;
+ hw->bitbang.ctlr = host;
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
@@ -274,7 +274,7 @@ exit:
static void tiny_spi_remove(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
- struct spi_controller *host = hw->bitbang.master;
+ struct spi_controller *host = hw->bitbang.ctlr;
spi_bitbang_stop(&hw->bitbang);
spi_controller_put(host);
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 2dd1c1bcf4bf..210a98d903fa 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -448,7 +448,7 @@ static void uwire_off(struct uwire_spi *uwire)
{
uwire_write_reg(UWIRE_SR3, 0);
clk_disable_unprepare(uwire->ck);
- spi_controller_put(uwire->bitbang.master);
+ spi_controller_put(uwire->bitbang.ctlr);
}
static int uwire_probe(struct platform_device *pdev)
@@ -493,7 +493,7 @@ static int uwire_probe(struct platform_device *pdev)
host->setup = uwire_setup;
host->cleanup = uwire_cleanup;
- uwire->bitbang.master = host;
+ uwire->bitbang.ctlr = host;
uwire->bitbang.chipselect = uwire_chipselect;
uwire->bitbang.setup_transfer = uwire_setup_transfer;
uwire->bitbang.txrx_bufs = uwire_txrx;
diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
index 5b2d3e4e21b7..969965d7bc98 100644
--- a/drivers/spi/spi-pci1xxxx.c
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -5,8 +5,15 @@
// Kumaravel Thiagarajan <Kumaravel.Thiagarajan@microchip.com>
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci_regs.h>
#include <linux/pci.h>
+#include <linux/spinlock.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -32,8 +39,68 @@
#define SPI_MST_CTL_MODE_SEL (BIT(2))
#define SPI_MST_CTL_GO (BIT(0))
+#define SPI_PERI_ADDR_BASE (0x160000)
+#define SPI_SYSTEM_ADDR_BASE (0x2000)
#define SPI_MST1_ADDR_BASE (0x800)
+#define DEV_REV_REG (SPI_SYSTEM_ADDR_BASE + 0x00)
+#define SPI_SYSLOCK_REG (SPI_SYSTEM_ADDR_BASE + 0xA0)
+#define SPI_CONFIG_PERI_ENABLE_REG (SPI_SYSTEM_ADDR_BASE + 0x108)
+
+#define SPI_PERI_ENBLE_PF_MASK (GENMASK(17, 16))
+#define DEV_REV_MASK (GENMASK(7, 0))
+
+#define SPI_SYSLOCK BIT(4)
+#define SPI0 (0)
+#define SPI1 (1)
+
+/* DMA Related Registers */
+#define SPI_DMA_ADDR_BASE (0x1000)
+#define SPI_DMA_GLOBAL_WR_ENGINE_EN (SPI_DMA_ADDR_BASE + 0x0C)
+#define SPI_DMA_WR_DOORBELL_REG (SPI_DMA_ADDR_BASE + 0x10)
+#define SPI_DMA_GLOBAL_RD_ENGINE_EN (SPI_DMA_ADDR_BASE + 0x2C)
+#define SPI_DMA_RD_DOORBELL_REG (SPI_DMA_ADDR_BASE + 0x30)
+#define SPI_DMA_INTR_WR_STS (SPI_DMA_ADDR_BASE + 0x4C)
+#define SPI_DMA_WR_INT_MASK (SPI_DMA_ADDR_BASE + 0x54)
+#define SPI_DMA_INTR_WR_CLR (SPI_DMA_ADDR_BASE + 0x58)
+#define SPI_DMA_ERR_WR_STS (SPI_DMA_ADDR_BASE + 0x5C)
+#define SPI_DMA_INTR_IMWR_WDONE_LOW (SPI_DMA_ADDR_BASE + 0x60)
+#define SPI_DMA_INTR_IMWR_WDONE_HIGH (SPI_DMA_ADDR_BASE + 0x64)
+#define SPI_DMA_INTR_IMWR_WABORT_LOW (SPI_DMA_ADDR_BASE + 0x68)
+#define SPI_DMA_INTR_IMWR_WABORT_HIGH (SPI_DMA_ADDR_BASE + 0x6C)
+#define SPI_DMA_INTR_WR_IMWR_DATA (SPI_DMA_ADDR_BASE + 0x70)
+#define SPI_DMA_INTR_RD_STS (SPI_DMA_ADDR_BASE + 0xA0)
+#define SPI_DMA_RD_INT_MASK (SPI_DMA_ADDR_BASE + 0xA8)
+#define SPI_DMA_INTR_RD_CLR (SPI_DMA_ADDR_BASE + 0xAC)
+#define SPI_DMA_ERR_RD_STS (SPI_DMA_ADDR_BASE + 0xB8)
+#define SPI_DMA_INTR_IMWR_RDONE_LOW (SPI_DMA_ADDR_BASE + 0xCC)
+#define SPI_DMA_INTR_IMWR_RDONE_HIGH (SPI_DMA_ADDR_BASE + 0xD0)
+#define SPI_DMA_INTR_IMWR_RABORT_LOW (SPI_DMA_ADDR_BASE + 0xD4)
+#define SPI_DMA_INTR_IMWR_RABORT_HIGH (SPI_DMA_ADDR_BASE + 0xD8)
+#define SPI_DMA_INTR_RD_IMWR_DATA (SPI_DMA_ADDR_BASE + 0xDC)
+
+#define SPI_DMA_CH0_WR_BASE (SPI_DMA_ADDR_BASE + 0x200)
+#define SPI_DMA_CH0_RD_BASE (SPI_DMA_ADDR_BASE + 0x300)
+#define SPI_DMA_CH1_WR_BASE (SPI_DMA_ADDR_BASE + 0x400)
+#define SPI_DMA_CH1_RD_BASE (SPI_DMA_ADDR_BASE + 0x500)
+
+#define SPI_DMA_CH_CTL1_OFFSET (0x00)
+#define SPI_DMA_CH_XFER_LEN_OFFSET (0x08)
+#define SPI_DMA_CH_SAR_LO_OFFSET (0x0C)
+#define SPI_DMA_CH_SAR_HI_OFFSET (0x10)
+#define SPI_DMA_CH_DAR_LO_OFFSET (0x14)
+#define SPI_DMA_CH_DAR_HI_OFFSET (0x18)
+
+#define SPI_DMA_CH0_DONE_INT BIT(0)
+#define SPI_DMA_CH1_DONE_INT BIT(1)
+#define SPI_DMA_CH0_ABORT_INT BIT(16)
+#define SPI_DMA_CH1_ABORT_INT BIT(17)
+#define SPI_DMA_DONE_INT_MASK (SPI_DMA_CH0_DONE_INT | SPI_DMA_CH1_DONE_INT)
+#define SPI_DMA_ABORT_INT_MASK (SPI_DMA_CH0_ABORT_INT | SPI_DMA_CH1_ABORT_INT)
+#define DMA_CH_CONTROL_LIE BIT(3)
+#define DMA_CH_CONTROL_RIE BIT(4)
+#define DMA_INTR_EN (DMA_CH_CONTROL_RIE | DMA_CH_CONTROL_LIE)
+
/* x refers to SPI Host Controller HW instance id in the below macros - 0 or 1 */
#define SPI_MST_CMD_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x00)
@@ -50,6 +117,9 @@
#define SPI_MAX_DATA_LEN 320
#define PCI1XXXX_SPI_TIMEOUT (msecs_to_jiffies(100))
+#define SYSLOCK_RETRY_CNT (1000)
+#define SPI_DMA_ENGINE_EN (0x1)
+#define SPI_DMA_ENGINE_DIS (0x0)
#define SPI_INTR BIT(8)
#define SPI_FORCE_CE BIT(4)
@@ -62,11 +132,21 @@
struct pci1xxxx_spi_internal {
u8 hw_inst;
- bool spi_xfer_in_progress;
+ u8 clkdiv;
int irq;
+ int mode;
+ bool spi_xfer_in_progress;
+ void *rx_buf;
+ bool dma_aborted_rd;
+ u32 bytes_recvd;
+ u32 tx_sgl_len;
+ u32 rx_sgl_len;
+ struct scatterlist *tx_sgl, *rx_sgl;
+ bool dma_aborted_wr;
struct completion spi_xfer_done;
struct spi_controller *spi_host;
struct pci1xxxx_spi *parent;
+ struct spi_transfer *xfer;
struct {
unsigned int dev_sel : 3;
unsigned int msi_vector_sel : 1;
@@ -76,7 +156,12 @@ struct pci1xxxx_spi_internal {
struct pci1xxxx_spi {
struct pci_dev *dev;
u8 total_hw_instances;
+ u8 dev_rev;
void __iomem *reg_base;
+ void __iomem *dma_offset_bar;
+ /* lock to safely access the DMA registers in isr */
+ spinlock_t dma_reg_lock;
+ bool can_dma;
struct pci1xxxx_spi_internal *spi_int[] __counted_by(total_hw_instances);
};
@@ -106,6 +191,114 @@ static const struct pci_device_id pci1xxxx_spi_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, pci1xxxx_spi_pci_id_table);
+static int pci1xxxx_set_sys_lock(struct pci1xxxx_spi *par)
+{
+ writel(SPI_SYSLOCK, par->reg_base + SPI_SYSLOCK_REG);
+ return readl(par->reg_base + SPI_SYSLOCK_REG);
+}
+
+static int pci1xxxx_acquire_sys_lock(struct pci1xxxx_spi *par)
+{
+ u32 regval;
+
+ return readx_poll_timeout(pci1xxxx_set_sys_lock, par, regval,
+ (regval & SPI_SYSLOCK), 100,
+ SYSLOCK_RETRY_CNT * 100);
+}
+
+static void pci1xxxx_release_sys_lock(struct pci1xxxx_spi *par)
+{
+ writel(0x0, par->reg_base + SPI_SYSLOCK_REG);
+}
+
+static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq)
+{
+ struct pci_dev *pdev = spi_bus->dev;
+ u32 pf_num;
+ u32 regval;
+ int ret;
+
+ /*
+ * DEV REV Registers is a system register, HW Syslock bit
+ * should be acquired before accessing the register
+ */
+ ret = pci1xxxx_acquire_sys_lock(spi_bus);
+ if (ret) {
+ dev_err(&pdev->dev, "Error failed to acquire syslock\n");
+ return ret;
+ }
+
+ regval = readl(spi_bus->reg_base + DEV_REV_REG);
+ spi_bus->dev_rev = regval & DEV_REV_MASK;
+ if (spi_bus->dev_rev >= 0xC0) {
+ regval = readl(spi_bus->reg_base +
+ SPI_CONFIG_PERI_ENABLE_REG);
+ pf_num = regval & SPI_PERI_ENBLE_PF_MASK;
+ }
+
+ pci1xxxx_release_sys_lock(spi_bus);
+
+ /*
+ * DMA is supported only from C0 and SPI can use DMA only if
+ * it is mapped to PF0
+ */
+ if (spi_bus->dev_rev < 0xC0 || pf_num)
+ return -EOPNOTSUPP;
+
+ /*
+ * DMA Supported only with MSI Interrupts
+ * One of the SPI instance's MSI vector address and data
+ * is used for DMA Interrupt
+ */
+ if (!irq_get_msi_desc(irq)) {
+ dev_warn(&pdev->dev, "Error MSI Interrupt not supported, will operate in PIO mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ spi_bus->dma_offset_bar = pcim_iomap(pdev, 2, pci_resource_len(pdev, 2));
+ if (!spi_bus->dma_offset_bar) {
+ dev_warn(&pdev->dev, "Error failed to map dma bar, will operate in PIO mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
+ dev_warn(&pdev->dev, "Error failed to set DMA mask, will operate in PIO mode\n");
+ pcim_iounmap(pdev, spi_bus->dma_offset_bar);
+ spi_bus->dma_offset_bar = NULL;
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int pci1xxxx_spi_dma_init(struct pci1xxxx_spi *spi_bus, int irq)
+{
+ struct msi_msg msi;
+ int ret;
+
+ ret = pci1xxxx_check_spi_can_dma(spi_bus, irq);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&spi_bus->dma_reg_lock);
+ get_cached_msi_msg(irq, &msi);
+ writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN);
+ writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
+ writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_HIGH);
+ writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_HIGH);
+ writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_HIGH);
+ writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_HIGH);
+ writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_LOW);
+ writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_LOW);
+ writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_LOW);
+ writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_LOW);
+ writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA);
+ writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA);
+ dma_set_max_seg_size(&spi_bus->dev->dev, PCI1XXXX_SPI_BUFFER_SIZE);
+ spi_bus->can_dma = true;
+ return 0;
+}
+
static void pci1xxxx_spi_set_cs(struct spi_device *spi, bool enable)
{
struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi->controller);
@@ -146,12 +339,79 @@ static u8 pci1xxxx_get_clock_div(u32 hz)
return val;
}
-static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
- struct spi_device *spi, struct spi_transfer *xfer)
+static void pci1xxxx_spi_setup_dma_to_io(struct pci1xxxx_spi_internal *p,
+ dma_addr_t dma_addr, u32 len)
+{
+ void __iomem *base;
+
+ if (!p->hw_inst)
+ base = p->parent->dma_offset_bar + SPI_DMA_CH0_RD_BASE;
+ else
+ base = p->parent->dma_offset_bar + SPI_DMA_CH1_RD_BASE;
+
+ writel(DMA_INTR_EN, base + SPI_DMA_CH_CTL1_OFFSET);
+ writel(len, base + SPI_DMA_CH_XFER_LEN_OFFSET);
+ writel(lower_32_bits(dma_addr), base + SPI_DMA_CH_SAR_LO_OFFSET);
+ writel(upper_32_bits(dma_addr), base + SPI_DMA_CH_SAR_HI_OFFSET);
+ /* Updated SPI Command Registers */
+ writel(lower_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_CMD_BUF_OFFSET(p->hw_inst)),
+ base + SPI_DMA_CH_DAR_LO_OFFSET);
+ writel(upper_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_CMD_BUF_OFFSET(p->hw_inst)),
+ base + SPI_DMA_CH_DAR_HI_OFFSET);
+}
+
+static void pci1xxxx_spi_setup_dma_from_io(struct pci1xxxx_spi_internal *p,
+ dma_addr_t dma_addr, u32 len)
+{
+ void *base;
+
+ if (!p->hw_inst)
+ base = p->parent->dma_offset_bar + SPI_DMA_CH0_WR_BASE;
+ else
+ base = p->parent->dma_offset_bar + SPI_DMA_CH1_WR_BASE;
+
+ writel(DMA_INTR_EN, base + SPI_DMA_CH_CTL1_OFFSET);
+ writel(len, base + SPI_DMA_CH_XFER_LEN_OFFSET);
+ writel(lower_32_bits(dma_addr), base + SPI_DMA_CH_DAR_LO_OFFSET);
+ writel(upper_32_bits(dma_addr), base + SPI_DMA_CH_DAR_HI_OFFSET);
+ writel(lower_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_RSP_BUF_OFFSET(p->hw_inst)),
+ base + SPI_DMA_CH_SAR_LO_OFFSET);
+ writel(upper_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_RSP_BUF_OFFSET(p->hw_inst)),
+ base + SPI_DMA_CH_SAR_HI_OFFSET);
+}
+
+static void pci1xxxx_spi_setup(struct pci1xxxx_spi *par, u8 hw_inst, u32 mode,
+ u8 clkdiv, u32 len)
+{
+ u32 regval;
+
+ regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+ regval &= ~(SPI_MST_CTL_MODE_SEL | SPI_MST_CTL_CMD_LEN_MASK |
+ SPI_MST_CTL_SPEED_MASK);
+
+ if (mode == SPI_MODE_3)
+ regval |= SPI_MST_CTL_MODE_SEL;
+
+ regval |= FIELD_PREP(SPI_MST_CTL_CMD_LEN_MASK, len);
+ regval |= FIELD_PREP(SPI_MST_CTL_SPEED_MASK, clkdiv);
+ writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+}
+
+static void pci1xxxx_start_spi_xfer(struct pci1xxxx_spi_internal *p, u8 hw_inst)
+{
+ u32 regval;
+
+ regval = readl(p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+ regval |= SPI_MST_CTL_GO;
+ writel(regval, p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
+}
+
+static int pci1xxxx_spi_transfer_with_io(struct spi_controller *spi_ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer)
{
struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr);
- int mode, len, loop_iter, transfer_len;
struct pci1xxxx_spi *par = p->parent;
+ int len, loop_iter, transfer_len;
unsigned long bytes_transfered;
unsigned long bytes_recvd;
unsigned long loop_count;
@@ -161,7 +421,7 @@ static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
u8 clkdiv;
p->spi_xfer_in_progress = true;
- mode = spi->mode;
+ p->bytes_recvd = 0;
clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz);
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
@@ -186,26 +446,8 @@ static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
memcpy_toio(par->reg_base + SPI_MST_CMD_BUF_OFFSET(p->hw_inst),
&tx_buf[bytes_transfered], len);
bytes_transfered += len;
- regval = readl(par->reg_base +
- SPI_MST_CTL_REG_OFFSET(p->hw_inst));
- regval &= ~(SPI_MST_CTL_MODE_SEL | SPI_MST_CTL_CMD_LEN_MASK |
- SPI_MST_CTL_SPEED_MASK);
-
- if (mode == SPI_MODE_3)
- regval |= SPI_MST_CTL_MODE_SEL;
- else
- regval &= ~SPI_MST_CTL_MODE_SEL;
-
- regval |= (clkdiv << 5);
- regval &= ~SPI_MST_CTL_CMD_LEN_MASK;
- regval |= (len << 8);
- writel(regval, par->reg_base +
- SPI_MST_CTL_REG_OFFSET(p->hw_inst));
- regval = readl(par->reg_base +
- SPI_MST_CTL_REG_OFFSET(p->hw_inst));
- regval |= SPI_MST_CTL_GO;
- writel(regval, par->reg_base +
- SPI_MST_CTL_REG_OFFSET(p->hw_inst));
+ pci1xxxx_spi_setup(par, p->hw_inst, spi->mode, clkdiv, len);
+ pci1xxxx_start_spi_xfer(p, p->hw_inst);
/* Wait for DMA_TERM interrupt */
result = wait_for_completion_timeout(&p->spi_xfer_done,
@@ -225,7 +467,113 @@ static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
return 0;
}
-static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
+static int pci1xxxx_spi_transfer_with_dma(struct spi_controller *spi_ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr);
+ struct pci1xxxx_spi *par = p->parent;
+ dma_addr_t rx_dma_addr = 0;
+ dma_addr_t tx_dma_addr = 0;
+ int ret = 0;
+ u32 regval;
+
+ p->spi_xfer_in_progress = true;
+ p->tx_sgl = xfer->tx_sg.sgl;
+ p->rx_sgl = xfer->rx_sg.sgl;
+ p->rx_buf = xfer->rx_buf;
+ regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+ writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+
+ if (!xfer->tx_buf || !p->tx_sgl) {
+ ret = -EINVAL;
+ goto error;
+ }
+ p->xfer = xfer;
+ p->mode = spi->mode;
+ p->clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz);
+ p->bytes_recvd = 0;
+ p->rx_buf = xfer->rx_buf;
+ regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+ writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+
+ tx_dma_addr = sg_dma_address(p->tx_sgl);
+ rx_dma_addr = sg_dma_address(p->rx_sgl);
+ p->tx_sgl_len = sg_dma_len(p->tx_sgl);
+ p->rx_sgl_len = sg_dma_len(p->rx_sgl);
+ pci1xxxx_spi_setup(par, p->hw_inst, p->mode, p->clkdiv, p->tx_sgl_len);
+ pci1xxxx_spi_setup_dma_to_io(p, (tx_dma_addr), p->tx_sgl_len);
+ if (rx_dma_addr)
+ pci1xxxx_spi_setup_dma_from_io(p, rx_dma_addr, p->rx_sgl_len);
+ writel(p->hw_inst, par->dma_offset_bar + SPI_DMA_RD_DOORBELL_REG);
+
+ reinit_completion(&p->spi_xfer_done);
+ /* Wait for DMA_TERM interrupt */
+ ret = wait_for_completion_timeout(&p->spi_xfer_done, PCI1XXXX_SPI_TIMEOUT);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ if (p->dma_aborted_rd) {
+ writel(SPI_DMA_ENGINE_DIS,
+ par->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
+ /*
+ * DMA ENGINE reset takes time if any TLP
+ * completeion in progress, should wait
+ * till DMA Engine reset is completed.
+ */
+ ret = readl_poll_timeout(par->dma_offset_bar +
+ SPI_DMA_GLOBAL_RD_ENGINE_EN, regval,
+ (regval == 0x0), 0, USEC_PER_MSEC);
+ if (ret) {
+ ret = -ECANCELED;
+ goto error;
+ }
+ writel(SPI_DMA_ENGINE_EN,
+ par->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
+ p->dma_aborted_rd = false;
+ ret = -ECANCELED;
+ }
+ if (p->dma_aborted_wr) {
+ writel(SPI_DMA_ENGINE_DIS,
+ par->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN);
+
+ /*
+ * DMA ENGINE reset takes time if any TLP
+ * completeion in progress, should wait
+ * till DMA Engine reset is completed.
+ */
+ ret = readl_poll_timeout(par->dma_offset_bar +
+ SPI_DMA_GLOBAL_WR_ENGINE_EN, regval,
+ (regval == 0x0), 0, USEC_PER_MSEC);
+ if (ret) {
+ ret = -ECANCELED;
+ goto error;
+ }
+
+ writel(SPI_DMA_ENGINE_EN,
+ par->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN);
+ p->dma_aborted_wr = false;
+ ret = -ECANCELED;
+ }
+ goto error;
+ }
+ ret = 0;
+
+error:
+ p->spi_xfer_in_progress = false;
+
+ return ret;
+}
+
+static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer)
+{
+ if (spi_ctlr->can_dma(spi_ctlr, spi, xfer) && spi_ctlr->cur_msg_mapped)
+ return pci1xxxx_spi_transfer_with_dma(spi_ctlr, spi, xfer);
+ else
+ return pci1xxxx_spi_transfer_with_io(spi_ctlr, spi, xfer);
+}
+
+static irqreturn_t pci1xxxx_spi_isr_io(int irq, void *dev)
{
struct pci1xxxx_spi_internal *p = dev;
irqreturn_t spi_int_fired = IRQ_NONE;
@@ -235,15 +583,117 @@ static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
if (regval & SPI_INTR) {
/* Clear xfer_done */
+ if (p->parent->can_dma && p->rx_buf)
+ writel(p->hw_inst, p->parent->dma_offset_bar +
+ SPI_DMA_WR_DOORBELL_REG);
+ else
+ complete(&p->parent->spi_int[p->hw_inst]->spi_xfer_done);
+ spi_int_fired = IRQ_HANDLED;
+ }
+ writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+ return spi_int_fired;
+}
+
+static void pci1xxxx_spi_setup_next_dma_transfer(struct pci1xxxx_spi_internal *p)
+{
+ dma_addr_t tx_dma_addr = 0;
+ dma_addr_t rx_dma_addr = 0;
+ u32 prev_len;
+
+ p->tx_sgl = sg_next(p->tx_sgl);
+ if (p->rx_sgl)
+ p->rx_sgl = sg_next(p->rx_sgl);
+ if (!p->tx_sgl) {
+ /* Clear xfer_done */
complete(&p->spi_xfer_done);
+ } else {
+ tx_dma_addr = sg_dma_address(p->tx_sgl);
+ prev_len = p->tx_sgl_len;
+ p->tx_sgl_len = sg_dma_len(p->tx_sgl);
+ if (prev_len != p->tx_sgl_len)
+ pci1xxxx_spi_setup(p->parent,
+ p->hw_inst, p->mode, p->clkdiv, p->tx_sgl_len);
+ pci1xxxx_spi_setup_dma_to_io(p, tx_dma_addr, p->tx_sgl_len);
+ if (p->rx_sgl) {
+ rx_dma_addr = sg_dma_address(p->rx_sgl);
+ p->rx_sgl_len = sg_dma_len(p->rx_sgl);
+ pci1xxxx_spi_setup_dma_from_io(p, rx_dma_addr, p->rx_sgl_len);
+ }
+ writel(p->hw_inst, p->parent->dma_offset_bar + SPI_DMA_RD_DOORBELL_REG);
+ }
+}
+
+static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev)
+{
+ struct pci1xxxx_spi_internal *p = dev;
+ irqreturn_t spi_int_fired = IRQ_NONE;
+ unsigned long flags;
+ u32 regval;
+
+ spin_lock_irqsave(&p->parent->dma_reg_lock, flags);
+ /* Clear the DMA RD INT and start spi xfer*/
+ regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_RD_STS);
+ if (regval & SPI_DMA_DONE_INT_MASK) {
+ if (regval & SPI_DMA_CH0_DONE_INT)
+ pci1xxxx_start_spi_xfer(p, SPI0);
+ if (regval & SPI_DMA_CH1_DONE_INT)
+ pci1xxxx_start_spi_xfer(p, SPI1);
+ spi_int_fired = IRQ_HANDLED;
+ }
+ if (regval & SPI_DMA_ABORT_INT_MASK) {
+ p->dma_aborted_rd = true;
spi_int_fired = IRQ_HANDLED;
}
+ writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_RD_CLR);
- writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+ /* Clear the DMA WR INT */
+ regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_WR_STS);
+ if (regval & SPI_DMA_DONE_INT_MASK) {
+ if (regval & SPI_DMA_CH0_DONE_INT)
+ pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI0]);
+ if (regval & SPI_DMA_CH1_DONE_INT)
+ pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI1]);
+
+ spi_int_fired = IRQ_HANDLED;
+ }
+ if (regval & SPI_DMA_ABORT_INT_MASK) {
+ p->dma_aborted_wr = true;
+ spi_int_fired = IRQ_HANDLED;
+ }
+ writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_WR_CLR);
+ spin_unlock_irqrestore(&p->parent->dma_reg_lock, flags);
+
+ /* Clear the SPI GO_BIT Interrupt */
+ regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+ if (regval & SPI_INTR) {
+ writel(p->hw_inst, p->parent->dma_offset_bar + SPI_DMA_WR_DOORBELL_REG);
+ spi_int_fired = IRQ_HANDLED;
+ }
+ writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
return spi_int_fired;
}
+static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
+{
+ struct pci1xxxx_spi_internal *p = dev;
+
+ if (p->spi_host->can_dma(p->spi_host, NULL, p->xfer))
+ return pci1xxxx_spi_isr_dma(irq, dev);
+ else
+ return pci1xxxx_spi_isr_io(irq, dev);
+}
+
+static bool pci1xxxx_spi_can_dma(struct spi_controller *host,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(host);
+ struct pci1xxxx_spi *par = p->parent;
+
+ return par->can_dma;
+}
+
static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u8 hw_inst_cnt, iter, start, only_sec_inst;
@@ -324,6 +774,10 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
goto error;
}
+ ret = pci1xxxx_spi_dma_init(spi_bus, spi_sub_ptr->irq);
+ if (ret && ret != -EOPNOTSUPP)
+ goto error;
+
/* This register is only applicable for 1st instance */
regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
if (!only_sec_inst)
@@ -360,7 +814,9 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
spi_host->num_chipselect = SPI_CHIP_SEL_COUNT;
spi_host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_RX_DUAL |
SPI_TX_DUAL | SPI_LOOP;
+ spi_host->can_dma = pci1xxxx_spi_can_dma;
spi_host->transfer_one = pci1xxxx_spi_transfer_one;
+
spi_host->set_cs = pci1xxxx_spi_set_cs;
spi_host->bits_per_word_mask = SPI_BPW_MASK(8);
spi_host->max_speed_hz = PCI1XXXX_SPI_MAX_CLOCK_HZ;
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index f55b38c577e4..709edb70ad7d 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -11,13 +11,13 @@
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 4b9669da2cf3..e1ecd96c7858 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -160,8 +160,7 @@
*/
#define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
-/* 2 for native cs, 2 for cs-gpio */
-#define ROCKCHIP_SPI_MAX_CS_NUM 4
+#define ROCKCHIP_SPI_MAX_NATIVE_CS_NUM 2
#define ROCKCHIP_SPI_VER2_TYPE1 0x05EC0002
#define ROCKCHIP_SPI_VER2_TYPE2 0x00110002
@@ -192,8 +191,6 @@ struct rockchip_spi {
u8 n_bytes;
u8 rsd;
- bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
-
bool target_abort;
bool cs_inactive; /* spi target tansmition stop when cs inactive */
bool cs_high_supported; /* native CS supports active-high polarity */
@@ -245,10 +242,6 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable;
- /* Return immediately for no-op */
- if (cs_asserted == rs->cs_asserted[spi_get_chipselect(spi, 0)])
- return;
-
if (cs_asserted) {
/* Keep things powered as long as CS is asserted */
pm_runtime_get_sync(rs->dev);
@@ -268,8 +261,6 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
/* Drop reference from when we first asserted CS */
pm_runtime_put(rs->dev);
}
-
- rs->cs_asserted[spi_get_chipselect(spi, 0)] = cs_asserted;
}
static void rockchip_spi_handle_err(struct spi_controller *ctlr,
@@ -847,7 +838,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
ctlr->target_abort = rockchip_spi_target_abort;
} else {
ctlr->flags = SPI_CONTROLLER_GPIO_SS;
- ctlr->max_native_cs = ROCKCHIP_SPI_MAX_CS_NUM;
+ ctlr->max_native_cs = ROCKCHIP_SPI_MAX_NATIVE_CS_NUM;
/*
* rk spi0 has two native cs, spi1..5 one cs only
* if num-cs is missing in the dts, default to 1
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 0e48ffd499b9..9fcbe040cb2f 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -3,19 +3,22 @@
// Copyright (c) 2009 Samsung Electronics Co., Ltd.
// Jaswinder Singh <jassi.brar@samsung.com>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/spi-s3c64xx.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
-#include <linux/of.h>
-
-#include <linux/platform_data/spi-s3c64xx.h>
+#include <linux/types.h>
#define MAX_SPI_PORTS 12
#define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
@@ -76,6 +79,9 @@
#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
+#define S3C64XX_SPI_ST_RX_FIFO_RDY_V2 GENMASK(23, 15)
+#define S3C64XX_SPI_ST_TX_FIFO_RDY_V2 GENMASK(14, 6)
+#define S3C64XX_SPI_ST_TX_FIFO_LVL_SHIFT 6
#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
@@ -106,15 +112,15 @@
#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
(1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
-#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
-#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
- FIFO_LVL_MASK(i))
+#define TX_FIFO_LVL(v, sdd) (((v) & (sdd)->tx_fifomask) >> \
+ __ffs((sdd)->tx_fifomask))
+#define RX_FIFO_LVL(v, sdd) (((v) & (sdd)->rx_fifomask) >> \
+ __ffs((sdd)->rx_fifomask))
+#define FIFO_DEPTH(i) ((FIFO_LVL_MASK(i) >> 1) + 1)
#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
#define S3C64XX_SPI_TRAILCNT_OFF 19
-#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
-
#define S3C64XX_SPI_POLLING_SIZE 32
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
@@ -131,8 +137,13 @@ struct s3c64xx_spi_dma_data {
/**
* struct s3c64xx_spi_port_config - SPI Controller hardware info
- * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
- * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
+ * @fifo_lvl_mask: [DEPRECATED] use @{rx, tx}_fifomask instead.
+ * @rx_lvl_offset: [DEPRECATED] use @{rx,tx}_fifomask instead.
+ * @fifo_depth: depth of the FIFO.
+ * @rx_fifomask: SPI_STATUS.RX_FIFO_LVL mask. Shifted mask defining the field's
+ * length and position.
+ * @tx_fifomask: SPI_STATUS.TX_FIFO_LVL mask. Shifted mask defining the field's
+ * length and position.
* @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
* @clk_div: Internal clock divider
* @quirks: Bitmask of known quirks
@@ -141,6 +152,7 @@ struct s3c64xx_spi_dma_data {
* prescaler unit.
* @clk_ioclk: True if clock is present on this device
* @has_loopback: True if loopback mode can be supported
+ * @use_32bit_io: True if the SoC allows only 32-bit register accesses.
*
* The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
* differ in some aspects such as the size of the fifo and spi bus clock
@@ -150,6 +162,9 @@ struct s3c64xx_spi_dma_data {
struct s3c64xx_spi_port_config {
int fifo_lvl_mask[MAX_SPI_PORTS];
int rx_lvl_offset;
+ unsigned int fifo_depth;
+ u32 rx_fifomask;
+ u32 tx_fifomask;
int tx_st_done;
int quirks;
int clk_div;
@@ -157,6 +172,7 @@ struct s3c64xx_spi_port_config {
bool clk_from_cmu;
bool clk_ioclk;
bool has_loopback;
+ bool use_32bit_io;
};
/**
@@ -177,8 +193,13 @@ struct s3c64xx_spi_port_config {
* @cur_speed: Current clock speed
* @rx_dma: Local receive DMA data (e.g. chan and direction)
* @tx_dma: Local transmit DMA data (e.g. chan and direction)
- * @port_conf: Local SPI port configuartion data
- * @port_id: Port identification number
+ * @port_conf: Local SPI port configuration data
+ * @port_id: [DEPRECATED] use @{rx,tx}_fifomask instead.
+ * @fifo_depth: depth of the FIFO.
+ * @rx_fifomask: SPI_STATUS.RX_FIFO_LVL mask. Shifted mask defining the field's
+ * length and position.
+ * @tx_fifomask: SPI_STATUS.TX_FIFO_LVL mask. Shifted mask defining the field's
+ * length and position.
*/
struct s3c64xx_spi_driver_data {
void __iomem *regs;
@@ -198,6 +219,9 @@ struct s3c64xx_spi_driver_data {
struct s3c64xx_spi_dma_data tx_dma;
const struct s3c64xx_spi_port_config *port_conf;
unsigned int port_id;
+ unsigned int fifo_depth;
+ u32 rx_fifomask;
+ u32 tx_fifomask;
};
static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
@@ -276,8 +300,8 @@ static void s3c64xx_spi_dmacb(void *data)
spin_unlock_irqrestore(&sdd->lock, flags);
}
-static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
- struct sg_table *sgt)
+static int s3c64xx_prepare_dma(struct s3c64xx_spi_dma_data *dma,
+ struct sg_table *sgt)
{
struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config;
@@ -289,20 +313,20 @@ static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
if (dma->direction == DMA_DEV_TO_MEM) {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, rx_dma);
- config.direction = dma->direction;
config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
config.src_addr_width = sdd->cur_bpw / 8;
config.src_maxburst = 1;
- dmaengine_slave_config(dma->ch, &config);
} else {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, tx_dma);
- config.direction = dma->direction;
config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
config.dst_addr_width = sdd->cur_bpw / 8;
config.dst_maxburst = 1;
- dmaengine_slave_config(dma->ch, &config);
}
+ config.direction = dma->direction;
+ ret = dmaengine_slave_config(dma->ch, &config);
+ if (ret)
+ return ret;
desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
dma->direction, DMA_PREP_INTERRUPT);
@@ -319,7 +343,7 @@ static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
ret = dma_submit_error(dma->cookie);
if (ret) {
dev_err(&sdd->pdev->dev, "DMA submission failed");
- return -EIO;
+ return ret;
}
dma_async_issue_pending(dma->ch);
@@ -405,12 +429,60 @@ static bool s3c64xx_spi_can_dma(struct spi_controller *host,
{
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
- if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
- return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
- } else {
- return false;
+ if (sdd->rx_dma.ch && sdd->tx_dma.ch)
+ return xfer->len > sdd->fifo_depth;
+
+ return false;
+}
+
+static void s3c64xx_iowrite8_32_rep(volatile void __iomem *addr,
+ const void *buffer, unsigned int count)
+{
+ if (count) {
+ const u8 *buf = buffer;
+
+ do {
+ __raw_writel(*buf++, addr);
+ } while (--count);
}
+}
+static void s3c64xx_iowrite16_32_rep(volatile void __iomem *addr,
+ const void *buffer, unsigned int count)
+{
+ if (count) {
+ const u16 *buf = buffer;
+
+ do {
+ __raw_writel(*buf++, addr);
+ } while (--count);
+ }
+}
+
+static void s3c64xx_iowrite_rep(const struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
+{
+ void __iomem *addr = sdd->regs + S3C64XX_SPI_TX_DATA;
+ const void *buf = xfer->tx_buf;
+ unsigned int len = xfer->len;
+
+ switch (sdd->cur_bpw) {
+ case 32:
+ iowrite32_rep(addr, buf, len / 4);
+ break;
+ case 16:
+ if (sdd->port_conf->use_32bit_io)
+ s3c64xx_iowrite16_32_rep(addr, buf, len / 2);
+ else
+ iowrite16_rep(addr, buf, len / 2);
+ break;
+ default:
+ if (sdd->port_conf->use_32bit_io)
+ s3c64xx_iowrite8_32_rep(addr, buf, len);
+ else
+ iowrite8_rep(addr, buf, len);
+ break;
+ }
}
static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
@@ -444,22 +516,9 @@ static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
- ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
+ ret = s3c64xx_prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
} else {
- switch (sdd->cur_bpw) {
- case 32:
- iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
- xfer->tx_buf, xfer->len / 4);
- break;
- case 16:
- iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
- xfer->tx_buf, xfer->len / 2);
- break;
- default:
- iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
- xfer->tx_buf, xfer->len);
- break;
- }
+ s3c64xx_iowrite_rep(sdd, xfer);
}
}
@@ -476,7 +535,7 @@ static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
- ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
+ ret = s3c64xx_prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
}
}
@@ -495,9 +554,7 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
void __iomem *regs = sdd->regs;
unsigned long val = 1;
u32 status;
-
- /* max fifo depth available */
- u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
+ u32 max_fifo = sdd->fifo_depth;
if (timeout_ms)
val = msecs_to_loops(timeout_ms);
@@ -528,7 +585,7 @@ static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
/*
* If the previous xfer was completed within timeout, then
- * proceed further else return -EIO.
+ * proceed further else return -ETIMEDOUT.
* DmaTx returns after simply writing data in the FIFO,
* w/o waiting for real transmission on the bus to finish.
* DmaRx returns only after Dma read data from FIFO which
@@ -549,7 +606,7 @@ static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
/* If timed out while checking rx/tx status return error */
if (!val)
- return -EIO;
+ return -ETIMEDOUT;
return 0;
}
@@ -579,7 +636,7 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
if (use_irq) {
val = msecs_to_jiffies(ms);
if (!wait_for_completion_timeout(&sdd->xfer_completion, val))
- return -EIO;
+ return -ETIMEDOUT;
}
val = msecs_to_loops(ms);
@@ -604,7 +661,7 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
* For any size less than the fifo size the below code is
* executed atleast once.
*/
- loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
+ loops = xfer->len / sdd->fifo_depth;
buf = xfer->rx_buf;
do {
/* wait for data to be received in the fifo */
@@ -741,7 +798,7 @@ static int s3c64xx_spi_transfer_one(struct spi_controller *host,
struct spi_transfer *xfer)
{
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
- const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
+ const unsigned int fifo_len = sdd->fifo_depth;
const void *tx_buf = NULL;
void *rx_buf = NULL;
int target_len = 0, origin_len = 0;
@@ -1093,8 +1150,7 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd)
val = readl(regs + S3C64XX_SPI_MODE_CFG);
val &= ~S3C64XX_SPI_MODE_4BURST;
- val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
- val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
+ val |= (S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
writel(val, regs + S3C64XX_SPI_MODE_CFG);
s3c64xx_flush_fifo(sdd);
@@ -1111,14 +1167,14 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
return ERR_PTR(-ENOMEM);
if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
- dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
+ dev_dbg(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
sci->src_clk_nr = 0;
} else {
sci->src_clk_nr = temp;
}
if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
- dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
+ dev_dbg(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
sci->num_cs = 1;
} else {
sci->num_cs = temp;
@@ -1146,6 +1202,48 @@ static inline const struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
return (const struct s3c64xx_spi_port_config *)platform_get_device_id(pdev)->driver_data;
}
+static int s3c64xx_spi_set_port_id(struct platform_device *pdev,
+ struct s3c64xx_spi_driver_data *sdd)
+{
+ const struct s3c64xx_spi_port_config *port_conf = sdd->port_conf;
+ int ret;
+
+ if (port_conf->rx_fifomask && port_conf->tx_fifomask)
+ return 0;
+
+ if (pdev->dev.of_node) {
+ ret = of_alias_get_id(pdev->dev.of_node, "spi");
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to get alias id\n");
+ sdd->port_id = ret;
+ } else {
+ if (pdev->id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Negative platform ID is not allowed\n");
+ sdd->port_id = pdev->id;
+ }
+
+ return 0;
+}
+
+static void s3c64xx_spi_set_fifomask(struct s3c64xx_spi_driver_data *sdd)
+{
+ const struct s3c64xx_spi_port_config *port_conf = sdd->port_conf;
+
+ if (port_conf->rx_fifomask)
+ sdd->rx_fifomask = port_conf->rx_fifomask;
+ else
+ sdd->rx_fifomask = FIFO_LVL_MASK(sdd) <<
+ port_conf->rx_lvl_offset;
+
+ if (port_conf->tx_fifomask)
+ sdd->tx_fifomask = port_conf->tx_fifomask;
+ else
+ sdd->tx_fifomask = FIFO_LVL_MASK(sdd) <<
+ S3C64XX_SPI_ST_TX_FIFO_LVL_SHIFT;
+}
+
static int s3c64xx_spi_probe(struct platform_device *pdev)
{
struct resource *mem_res;
@@ -1181,15 +1279,18 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
sdd->host = host;
sdd->cntrlr_info = sci;
sdd->pdev = pdev;
- if (pdev->dev.of_node) {
- ret = of_alias_get_id(pdev->dev.of_node, "spi");
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret,
- "Failed to get alias id\n");
- sdd->port_id = ret;
- } else {
- sdd->port_id = pdev->id;
- }
+
+ ret = s3c64xx_spi_set_port_id(pdev, sdd);
+ if (ret)
+ return ret;
+
+ if (sdd->port_conf->fifo_depth)
+ sdd->fifo_depth = sdd->port_conf->fifo_depth;
+ else if (of_property_read_u32(pdev->dev.of_node, "fifo-depth",
+ &sdd->fifo_depth))
+ sdd->fifo_depth = FIFO_DEPTH(sdd);
+
+ s3c64xx_spi_set_fifomask(sdd);
sdd->cur_bpw = 8;
@@ -1197,7 +1298,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
sdd->rx_dma.direction = DMA_DEV_TO_MEM;
host->dev.of_node = pdev->dev.of_node;
- host->bus_num = sdd->port_id;
+ host->bus_num = -1;
host->setup = s3c64xx_spi_setup;
host->cleanup = s3c64xx_spi_cleanup;
host->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
@@ -1278,9 +1379,9 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
}
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Targets attached\n",
- sdd->port_id, host->num_chipselect);
+ host->bus_num, host->num_chipselect);
dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
- mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
+ mem_res, sdd->fifo_depth);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
@@ -1319,8 +1420,9 @@ static int s3c64xx_spi_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
+ int ret;
- int ret = spi_controller_suspend(host);
+ ret = spi_controller_suspend(host);
if (ret)
return ret;
@@ -1408,7 +1510,9 @@ static const struct dev_pm_ops s3c64xx_spi_pm = {
};
static const struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
+ /* fifo_lvl_mask is deprecated. Use {rx, tx}_fifomask instead. */
.fifo_lvl_mask = { 0x7f },
+ /* rx_lvl_offset is deprecated. Use {rx, tx}_fifomask instead. */
.rx_lvl_offset = 13,
.tx_st_done = 21,
.clk_div = 2,
@@ -1416,14 +1520,18 @@ static const struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
};
static const struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
+ /* fifo_lvl_mask is deprecated. Use {rx, tx}_fifomask instead. */
.fifo_lvl_mask = { 0x7f, 0x7F },
+ /* rx_lvl_offset is deprecated. Use {rx, tx}_fifomask instead. */
.rx_lvl_offset = 13,
.tx_st_done = 21,
.clk_div = 2,
};
static const struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
+ /* fifo_lvl_mask is deprecated. Use {rx, tx}_fifomask instead. */
.fifo_lvl_mask = { 0x1ff, 0x7F },
+ /* rx_lvl_offset is deprecated. Use {rx, tx}_fifomask instead. */
.rx_lvl_offset = 15,
.tx_st_done = 25,
.clk_div = 2,
@@ -1431,7 +1539,9 @@ static const struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
};
static const struct s3c64xx_spi_port_config exynos4_spi_port_config = {
+ /* fifo_lvl_mask is deprecated. Use {rx, tx}_fifomask instead. */
.fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
+ /* rx_lvl_offset is deprecated. Use {rx, tx}_fifomask instead. */
.rx_lvl_offset = 15,
.tx_st_done = 25,
.clk_div = 2,
@@ -1441,7 +1551,9 @@ static const struct s3c64xx_spi_port_config exynos4_spi_port_config = {
};
static const struct s3c64xx_spi_port_config exynos7_spi_port_config = {
+ /* fifo_lvl_mask is deprecated. Use {rx, tx}_fifomask instead. */
.fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
+ /* rx_lvl_offset is deprecated. Use {rx, tx}_fifomask instead. */
.rx_lvl_offset = 15,
.tx_st_done = 25,
.clk_div = 2,
@@ -1451,7 +1563,9 @@ static const struct s3c64xx_spi_port_config exynos7_spi_port_config = {
};
static const struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
+ /* fifo_lvl_mask is deprecated. Use {rx, tx}_fifomask instead. */
.fifo_lvl_mask = { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff},
+ /* rx_lvl_offset is deprecated. Use {rx, tx}_fifomask instead. */
.rx_lvl_offset = 15,
.tx_st_done = 25,
.clk_div = 2,
@@ -1461,9 +1575,23 @@ static const struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
};
+static const struct s3c64xx_spi_port_config exynos850_spi_port_config = {
+ .fifo_depth = 64,
+ .rx_fifomask = S3C64XX_SPI_ST_RX_FIFO_RDY_V2,
+ .tx_fifomask = S3C64XX_SPI_ST_TX_FIFO_RDY_V2,
+ .tx_st_done = 25,
+ .clk_div = 4,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .has_loopback = true,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
+};
+
static const struct s3c64xx_spi_port_config exynosautov9_spi_port_config = {
+ /* fifo_lvl_mask is deprecated. Use {rx, tx}_fifomask instead. */
.fifo_lvl_mask = { 0x1ff, 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f},
+ /* rx_lvl_offset is deprecated. Use {rx, tx}_fifomask instead. */
.rx_lvl_offset = 15,
.tx_st_done = 25,
.clk_div = 4,
@@ -1475,7 +1603,9 @@ static const struct s3c64xx_spi_port_config exynosautov9_spi_port_config = {
};
static const struct s3c64xx_spi_port_config fsd_spi_port_config = {
+ /* fifo_lvl_mask is deprecated. Use {rx, tx}_fifomask instead. */
.fifo_lvl_mask = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f},
+ /* rx_lvl_offset is deprecated. Use {rx, tx}_fifomask instead. */
.rx_lvl_offset = 15,
.tx_st_done = 25,
.clk_div = 2,
@@ -1485,6 +1615,19 @@ static const struct s3c64xx_spi_port_config fsd_spi_port_config = {
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
};
+static const struct s3c64xx_spi_port_config gs101_spi_port_config = {
+ .fifo_depth = 64,
+ .rx_fifomask = S3C64XX_SPI_ST_RX_FIFO_RDY_V2,
+ .tx_fifomask = S3C64XX_SPI_ST_TX_FIFO_RDY_V2,
+ .tx_st_done = 25,
+ .clk_div = 4,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .has_loopback = true,
+ .use_32bit_io = true,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
+};
+
static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
{
.name = "s3c2443-spi",
@@ -1497,29 +1640,35 @@ static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
};
static const struct of_device_id s3c64xx_spi_dt_match[] = {
+ { .compatible = "google,gs101-spi",
+ .data = &gs101_spi_port_config,
+ },
{ .compatible = "samsung,s3c2443-spi",
- .data = (void *)&s3c2443_spi_port_config,
+ .data = &s3c2443_spi_port_config,
},
{ .compatible = "samsung,s3c6410-spi",
- .data = (void *)&s3c6410_spi_port_config,
+ .data = &s3c6410_spi_port_config,
},
{ .compatible = "samsung,s5pv210-spi",
- .data = (void *)&s5pv210_spi_port_config,
+ .data = &s5pv210_spi_port_config,
},
{ .compatible = "samsung,exynos4210-spi",
- .data = (void *)&exynos4_spi_port_config,
+ .data = &exynos4_spi_port_config,
},
{ .compatible = "samsung,exynos7-spi",
- .data = (void *)&exynos7_spi_port_config,
+ .data = &exynos7_spi_port_config,
},
{ .compatible = "samsung,exynos5433-spi",
- .data = (void *)&exynos5433_spi_port_config,
+ .data = &exynos5433_spi_port_config,
+ },
+ { .compatible = "samsung,exynos850-spi",
+ .data = &exynos850_spi_port_config,
},
{ .compatible = "samsung,exynosautov9-spi",
- .data = (void *)&exynosautov9_spi_port_config,
+ .data = &exynosautov9_spi_port_config,
},
{ .compatible = "tesla,fsd-spi",
- .data = (void *)&fsd_spi_port_config,
+ .data = &fsd_spi_port_config,
},
{ },
};
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index 148d615d2f38..3d560b154ad3 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -136,9 +136,9 @@ static int sh_sci_spi_probe(struct platform_device *dev)
}
/* setup spi bitbang adaptor */
- sp->bitbang.master = host;
- sp->bitbang.master->bus_num = sp->info->bus_num;
- sp->bitbang.master->num_chipselect = sp->info->num_chipselect;
+ sp->bitbang.ctlr = host;
+ sp->bitbang.ctlr->bus_num = sp->info->bus_num;
+ sp->bitbang.ctlr->num_chipselect = sp->info->num_chipselect;
sp->bitbang.chipselect = sh_sci_spi_chipselect;
sp->bitbang.txrx_word[SPI_MODE_0] = sh_sci_spi_txrx_mode0;
@@ -166,7 +166,7 @@ static int sh_sci_spi_probe(struct platform_device *dev)
setbits(sp, PIN_INIT, 0);
iounmap(sp->membase);
err1:
- spi_controller_put(sp->bitbang.master);
+ spi_controller_put(sp->bitbang.ctlr);
err0:
return ret;
}
@@ -178,7 +178,7 @@ static void sh_sci_spi_remove(struct platform_device *dev)
spi_bitbang_stop(&sp->bitbang);
setbits(sp, PIN_INIT, 0);
iounmap(sp->membase);
- spi_controller_put(sp->bitbang.master);
+ spi_controller_put(sp->bitbang.ctlr);
}
static struct platform_driver sh_sci_spi_drv = {
diff --git a/drivers/spi/spi-slave-mt27xx.c b/drivers/spi/spi-slave-mt27xx.c
index 6d6772974783..f1ddf4c099a3 100644
--- a/drivers/spi/spi-slave-mt27xx.c
+++ b/drivers/spi/spi-slave-mt27xx.c
@@ -297,7 +297,7 @@ static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr,
static int mtk_spi_slave_setup(struct spi_device *spi)
{
- struct mtk_spi_slave *mdata = spi_controller_get_devdata(spi->master);
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(spi->controller);
u32 reg_val;
reg_val = DMA_DONE_EN | DATA_DONE_EN |
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 385832030459..f1e922fd362a 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -8,13 +8,13 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index e61302ef3c21..e4e7ddb7524a 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -1118,6 +1118,21 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int stm32_spi_optimize_message(struct spi_message *msg)
+{
+ struct spi_controller *ctrl = msg->spi->controller;
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
+
+ /* On STM32H7, messages should not exceed a maximum size set
+ * later via the set_number_of_data function. In order to
+ * ensure that, split large messages into several messages
+ */
+ if (spi->cfg->set_number_of_data)
+ return spi_split_transfers_maxwords(ctrl, msg, spi->t_size_max);
+
+ return 0;
+}
+
/**
* stm32_spi_prepare_msg - set up the controller to transfer a single message
* @ctrl: controller interface
@@ -1163,20 +1178,6 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
!!(spi_dev->mode & SPI_LSB_FIRST),
!!(spi_dev->mode & SPI_CS_HIGH));
- /* On STM32H7, messages should not exceed a maximum size setted
- * afterward via the set_number_of_data function. In order to
- * ensure that, split large messages into several messages
- */
- if (spi->cfg->set_number_of_data) {
- int ret;
-
- ret = spi_split_transfers_maxwords(ctrl, msg,
- spi->t_size_max,
- GFP_KERNEL | GFP_DMA);
- if (ret)
- return ret;
- }
-
spin_lock_irqsave(&spi->lock, flags);
/* CPOL, CPHA and LSB FIRST bits have common register */
@@ -2182,6 +2183,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
ctrl->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
ctrl->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
ctrl->use_gpio_descriptors = true;
+ ctrl->optimize_message = stm32_spi_optimize_message;
ctrl->prepare_message = stm32_spi_prepare_msg;
ctrl->transfer_one = stm32_spi_transfer_one;
ctrl->unprepare_message = stm32_spi_unprepare_msg;
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 12355957be97..7795328427a6 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -434,7 +434,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
xspi = spi_controller_get_devdata(host);
xspi->cs_inactive = 0xffffffff;
- xspi->bitbang.master = host;
+ xspi->bitbang.ctlr = host;
xspi->bitbang.chipselect = xilinx_spi_chipselect;
xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
@@ -516,7 +516,7 @@ static void xilinx_spi_remove(struct platform_device *pdev)
/* Disable the global IPIF interrupt */
xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
- spi_controller_put(xspi->bitbang.master);
+ spi_controller_put(xspi->bitbang.ctlr);
}
/* work with hotplug and coldplug */
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index 3c7721894376..3c2cda315397 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -93,7 +93,7 @@ static int xtfpga_spi_probe(struct platform_device *pdev)
host->dev.of_node = pdev->dev.of_node;
xspi = spi_controller_get_devdata(host);
- xspi->bitbang.master = host;
+ xspi->bitbang.ctlr = host;
xspi->bitbang.chipselect = xtfpga_spi_chipselect;
xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word;
xspi->regs = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index f2170f4b5077..f18738ae95f8 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -459,7 +459,7 @@ static void spi_shutdown(struct device *dev)
}
}
-struct bus_type spi_bus_type = {
+const struct bus_type spi_bus_type = {
.name = "spi",
.dev_groups = spi_dev_groups,
.match = spi_match_device,
@@ -584,7 +584,7 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
return NULL;
}
- spi->master = spi->controller = ctlr;
+ spi->controller = ctlr;
spi->dev.parent = &ctlr->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
@@ -608,23 +608,51 @@ static void spi_dev_set_name(struct spi_device *spi)
spi_get_chipselect(spi, 0));
}
+/*
+ * Zero(0) is a valid physical CS value and can be located at any
+ * logical CS in the spi->chip_select[]. If all the physical CS
+ * are initialized to 0 then It would be difficult to differentiate
+ * between a valid physical CS 0 & an unused logical CS whose physical
+ * CS can be 0. As a solution to this issue initialize all the CS to -1.
+ * Now all the unused logical CS will have -1 physical CS value & can be
+ * ignored while performing physical CS validity checks.
+ */
+#define SPI_INVALID_CS ((s8)-1)
+
+static inline bool is_valid_cs(s8 chip_select)
+{
+ return chip_select != SPI_INVALID_CS;
+}
+
+static inline int spi_dev_check_cs(struct device *dev,
+ struct spi_device *spi, u8 idx,
+ struct spi_device *new_spi, u8 new_idx)
+{
+ u8 cs, cs_new;
+ u8 idx_new;
+
+ cs = spi_get_chipselect(spi, idx);
+ for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) {
+ cs_new = spi_get_chipselect(new_spi, idx_new);
+ if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) {
+ dev_err(dev, "chipselect %u already in use\n", cs_new);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
static int spi_dev_check(struct device *dev, void *data)
{
struct spi_device *spi = to_spi_device(dev);
struct spi_device *new_spi = data;
- int idx, nw_idx;
- u8 cs, cs_nw;
+ int status, idx;
if (spi->controller == new_spi->controller) {
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
- cs = spi_get_chipselect(spi, idx);
- for (nw_idx = 0; nw_idx < SPI_CS_CNT_MAX; nw_idx++) {
- cs_nw = spi_get_chipselect(new_spi, nw_idx);
- if (cs != 0xFF && cs_nw != 0xFF && cs == cs_nw) {
- dev_err(dev, "chipselect %d already in use\n", cs_nw);
- return -EBUSY;
- }
- }
+ status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
+ if (status)
+ return status;
}
}
return 0;
@@ -640,13 +668,13 @@ static int __spi_add_device(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
- int status, idx, nw_idx;
- u8 cs, nw_cs;
+ int status, idx;
+ u8 cs;
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
/* Chipselects are numbered 0..max; validate. */
cs = spi_get_chipselect(spi, idx);
- if (cs != 0xFF && cs >= ctlr->num_chipselect) {
+ if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
ctlr->num_chipselect);
return -EINVAL;
@@ -658,14 +686,9 @@ static int __spi_add_device(struct spi_device *spi)
* For example, spi->chip_select[0] != spi->chip_select[1] and so on.
*/
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
- cs = spi_get_chipselect(spi, idx);
- for (nw_idx = idx + 1; nw_idx < SPI_CS_CNT_MAX; nw_idx++) {
- nw_cs = spi_get_chipselect(spi, nw_idx);
- if (cs != 0xFF && nw_cs != 0xFF && cs == nw_cs) {
- dev_err(dev, "chipselect %d already in use\n", nw_cs);
- return -EBUSY;
- }
- }
+ status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
+ if (status)
+ return status;
}
/* Set the bus ID string */
@@ -691,7 +714,7 @@ static int __spi_add_device(struct spi_device *spi)
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
cs = spi_get_chipselect(spi, idx);
- if (cs != 0xFF)
+ if (is_valid_cs(cs))
spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
}
}
@@ -745,6 +768,14 @@ int spi_add_device(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_add_device);
+static void spi_set_all_cs_unused(struct spi_device *spi)
+{
+ u8 idx;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ spi_set_chipselect(spi, idx, SPI_INVALID_CS);
+}
+
/**
* spi_new_device - instantiate one new SPI device
* @ctlr: Controller to which device is connected
@@ -764,7 +795,6 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
{
struct spi_device *proxy;
int status;
- u8 idx;
/*
* NOTE: caller did any chip->bus_num checks necessary.
@@ -780,19 +810,10 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
- /*
- * Zero(0) is a valid physical CS value and can be located at any
- * logical CS in the spi->chip_select[]. If all the physical CS
- * are initialized to 0 then It would be difficult to differentiate
- * between a valid physical CS 0 & an unused logical CS whose physical
- * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
- * Now all the unused logical CS will have 0xFF physical CS value & can be
- * ignore while performing physical CS validity checks.
- */
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- spi_set_chipselect(proxy, idx, 0xFF);
-
+ /* Use provided chip-select for proxy device */
+ spi_set_all_cs_unused(proxy);
spi_set_chipselect(proxy, 0, chip->chip_select);
+
proxy->max_speed_hz = chip->max_speed_hz;
proxy->mode = chip->mode;
proxy->irq = chip->irq;
@@ -1007,7 +1028,7 @@ static inline bool spi_is_last_cs(struct spi_device *spi)
bool last = false;
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
- if ((spi->cs_index_mask >> idx) & 0x01) {
+ if (spi->cs_index_mask & BIT(idx)) {
if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
last = true;
}
@@ -1036,7 +1057,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi->controller->last_cs_index_mask = spi->cs_index_mask;
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : -1;
+ spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
if (spi->mode & SPI_CS_HIGH)
@@ -1058,8 +1079,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
* into account.
*/
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
- if (((spi->cs_index_mask >> idx) & 0x01) &&
- spi_get_csgpiod(spi, idx)) {
+ if ((spi->cs_index_mask & BIT(idx)) && spi_get_csgpiod(spi, idx)) {
if (has_acpi_companion(&spi->dev))
gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
!enable);
@@ -1747,15 +1767,6 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
trace_spi_message_start(msg);
- ret = spi_split_transfers_maxsize(ctlr, msg,
- spi_max_transfer_size(msg->spi),
- GFP_KERNEL | GFP_DMA);
- if (ret) {
- msg->status = ret;
- spi_finalize_current_message(ctlr);
- return ret;
- }
-
if (ctlr->prepare_message) {
ret = ctlr->prepare_message(ctlr, msg);
if (ret) {
@@ -2083,6 +2094,43 @@ struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
}
EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
+/*
+ * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
+ * and spi_maybe_unoptimize_message()
+ * @msg: the message to unoptimize
+ *
+ * Peripheral drivers should use spi_unoptimize_message() and callers inside
+ * core should use spi_maybe_unoptimize_message() rather than calling this
+ * function directly.
+ *
+ * It is not valid to call this on a message that is not currently optimized.
+ */
+static void __spi_unoptimize_message(struct spi_message *msg)
+{
+ struct spi_controller *ctlr = msg->spi->controller;
+
+ if (ctlr->unoptimize_message)
+ ctlr->unoptimize_message(msg);
+
+ spi_res_release(ctlr, msg);
+
+ msg->optimized = false;
+ msg->opt_state = NULL;
+}
+
+/*
+ * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
+ * @msg: the message to unoptimize
+ *
+ * This function is used to unoptimize a message if and only if it was
+ * optimized by the core (via spi_maybe_optimize_message()).
+ */
+static void spi_maybe_unoptimize_message(struct spi_message *msg)
+{
+ if (!msg->pre_optimized && msg->optimized)
+ __spi_unoptimize_message(msg);
+}
+
/**
* spi_finalize_current_message() - the current message is complete
* @ctlr: the controller to return the message to
@@ -2111,15 +2159,6 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
spi_unmap_msg(ctlr, mesg);
- /*
- * In the prepare_messages callback the SPI bus has the opportunity
- * to split a transfer to smaller chunks.
- *
- * Release the split transfers here since spi_map_msg() is done on
- * the split transfers.
- */
- spi_res_release(ctlr, mesg);
-
if (mesg->prepared && ctlr->unprepare_message) {
ret = ctlr->unprepare_message(ctlr, mesg);
if (ret) {
@@ -2130,6 +2169,8 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
mesg->prepared = false;
+ spi_maybe_unoptimize_message(mesg);
+
WRITE_ONCE(ctlr->cur_msg_incomplete, false);
smp_mb(); /* See __spi_pump_transfer_message()... */
if (READ_ONCE(ctlr->cur_msg_need_completion))
@@ -2397,17 +2438,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
return -EINVAL;
}
- /*
- * Zero(0) is a valid physical CS value and can be located at any
- * logical CS in the spi->chip_select[]. If all the physical CS
- * are initialized to 0 then It would be difficult to differentiate
- * between a valid physical CS 0 & an unused logical CS whose physical
- * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
- * Now all the unused logical CS will have 0xFF physical CS value & can be
- * ignore while performing physical CS validity checks.
- */
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- spi_set_chipselect(spi, idx, 0xFF);
+ spi_set_all_cs_unused(spi);
/* Device address */
rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
@@ -2431,14 +2462,10 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
spi_set_chipselect(spi, idx, cs[idx]);
/*
- * spi->chip_select[i] gives the corresponding physical CS for logical CS i
- * logical CS number is represented by setting the ith bit in spi->cs_index_mask
- * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
- * spi->chip_select[0] will give the physical CS.
- * By default spi->chip_select[0] will hold the physical CS number so, set
- * spi->cs_index_mask as 0x01.
+ * By default spi->chip_select[0] will hold the physical CS number,
+ * so set bit 0 in spi->cs_index_mask.
*/
- spi->cs_index_mask = 0x01;
+ spi->cs_index_mask = BIT(0);
/* Device speed */
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
@@ -2544,7 +2571,6 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
struct spi_controller *ctlr = spi->controller;
struct spi_device *ancillary;
int rc = 0;
- u8 idx;
/* Alloc an spi_device */
ancillary = spi_alloc_device(ctlr);
@@ -2555,33 +2581,18 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
- /*
- * Zero(0) is a valid physical CS value and can be located at any
- * logical CS in the spi->chip_select[]. If all the physical CS
- * are initialized to 0 then It would be difficult to differentiate
- * between a valid physical CS 0 & an unused logical CS whose physical
- * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
- * Now all the unused logical CS will have 0xFF physical CS value & can be
- * ignore while performing physical CS validity checks.
- */
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- spi_set_chipselect(ancillary, idx, 0xFF);
-
/* Use provided chip-select for ancillary device */
+ spi_set_all_cs_unused(ancillary);
spi_set_chipselect(ancillary, 0, chip_select);
/* Take over SPI mode/speed from SPI main device */
ancillary->max_speed_hz = spi->max_speed_hz;
ancillary->mode = spi->mode;
/*
- * spi->chip_select[i] gives the corresponding physical CS for logical CS i
- * logical CS number is represented by setting the ith bit in spi->cs_index_mask
- * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
- * spi->chip_select[0] will give the physical CS.
- * By default spi->chip_select[0] will hold the physical CS number so, set
- * spi->cs_index_mask as 0x01.
+ * By default spi->chip_select[0] will hold the physical CS number,
+ * so set bit 0 in spi->cs_index_mask.
*/
- ancillary->cs_index_mask = 0x01;
+ ancillary->cs_index_mask = BIT(0);
WARN_ON(!mutex_is_locked(&ctlr->add_lock));
@@ -2784,7 +2795,6 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
struct acpi_spi_lookup lookup = {};
struct spi_device *spi;
int ret;
- u8 idx;
if (!ctlr && index == -1)
return ERR_PTR(-EINVAL);
@@ -2820,33 +2830,19 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
return ERR_PTR(-ENOMEM);
}
- /*
- * Zero(0) is a valid physical CS value and can be located at any
- * logical CS in the spi->chip_select[]. If all the physical CS
- * are initialized to 0 then It would be difficult to differentiate
- * between a valid physical CS 0 & an unused logical CS whose physical
- * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
- * Now all the unused logical CS will have 0xFF physical CS value & can be
- * ignore while performing physical CS validity checks.
- */
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- spi_set_chipselect(spi, idx, 0xFF);
+ spi_set_all_cs_unused(spi);
+ spi_set_chipselect(spi, 0, lookup.chip_select);
ACPI_COMPANION_SET(&spi->dev, adev);
spi->max_speed_hz = lookup.max_speed_hz;
spi->mode |= lookup.mode;
spi->irq = lookup.irq;
spi->bits_per_word = lookup.bits_per_word;
- spi_set_chipselect(spi, 0, lookup.chip_select);
/*
- * spi->chip_select[i] gives the corresponding physical CS for logical CS i
- * logical CS number is represented by setting the ith bit in spi->cs_index_mask
- * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
- * spi->chip_select[0] will give the physical CS.
- * By default spi->chip_select[0] will hold the physical CS number so, set
- * spi->cs_index_mask as 0x01.
+ * By default spi->chip_select[0] will hold the physical CS number,
+ * so set bit 0 in spi->cs_index_mask.
*/
- spi->cs_index_mask = 0x01;
+ spi->cs_index_mask = BIT(0);
return spi;
}
@@ -3344,9 +3340,9 @@ int spi_register_controller(struct spi_controller *ctlr)
goto free_bus_id;
}
- /* Setting last_cs to -1 means no chip selected */
+ /* Setting last_cs to SPI_INVALID_CS means no chip selected */
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- ctlr->last_cs[idx] = -1;
+ ctlr->last_cs[idx] = SPI_INVALID_CS;
status = device_add(&ctlr->dev);
if (status < 0)
@@ -3687,8 +3683,7 @@ static struct spi_replaced_transfers *spi_replace_transfers(
static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
struct spi_transfer **xferp,
- size_t maxsize,
- gfp_t gfp)
+ size_t maxsize)
{
struct spi_transfer *xfer = *xferp, *xfers;
struct spi_replaced_transfers *srt;
@@ -3699,7 +3694,7 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
count = DIV_ROUND_UP(xfer->len, maxsize);
/* Create replacement */
- srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
+ srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
if (IS_ERR(srt))
return PTR_ERR(srt);
xfers = srt->inserted_transfers;
@@ -3759,14 +3754,16 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
* @ctlr: the @spi_controller for this transfer
* @msg: the @spi_message to transform
* @maxsize: the maximum when to apply this
- * @gfp: GFP allocation flags
+ *
+ * This function allocates resources that are automatically freed during the
+ * spi message unoptimize phase so this function should only be called from
+ * optimize_message callbacks.
*
* Return: status of transformation
*/
int spi_split_transfers_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
- size_t maxsize,
- gfp_t gfp)
+ size_t maxsize)
{
struct spi_transfer *xfer;
int ret;
@@ -3781,7 +3778,7 @@ int spi_split_transfers_maxsize(struct spi_controller *ctlr,
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->len > maxsize) {
ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
- maxsize, gfp);
+ maxsize);
if (ret)
return ret;
}
@@ -3799,14 +3796,16 @@ EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
* @ctlr: the @spi_controller for this transfer
* @msg: the @spi_message to transform
* @maxwords: the number of words to limit each transfer to
- * @gfp: GFP allocation flags
+ *
+ * This function allocates resources that are automatically freed during the
+ * spi message unoptimize phase so this function should only be called from
+ * optimize_message callbacks.
*
* Return: status of transformation
*/
int spi_split_transfers_maxwords(struct spi_controller *ctlr,
struct spi_message *msg,
- size_t maxwords,
- gfp_t gfp)
+ size_t maxwords)
{
struct spi_transfer *xfer;
@@ -3824,7 +3823,7 @@ int spi_split_transfers_maxwords(struct spi_controller *ctlr,
maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
if (xfer->len > maxsize) {
ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
- maxsize, gfp);
+ maxsize);
if (ret)
return ret;
}
@@ -4063,33 +4062,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (list_empty(&message->transfers))
return -EINVAL;
- /*
- * If an SPI controller does not support toggling the CS line on each
- * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
- * for the CS line, we can emulate the CS-per-word hardware function by
- * splitting transfers into one-word transfers and ensuring that
- * cs_change is set for each transfer.
- */
- if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
- spi_is_csgpiod(spi))) {
- size_t maxsize = BITS_TO_BYTES(spi->bits_per_word);
- int ret;
-
- /* spi_split_transfers_maxsize() requires message->spi */
- message->spi = spi;
-
- ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
- GFP_KERNEL);
- if (ret)
- return ret;
-
- list_for_each_entry(xfer, &message->transfers, transfer_list) {
- /* Don't change cs_change on the last entry in the list */
- if (list_is_last(&xfer->transfer_list, &message->transfers))
- break;
- xfer->cs_change = 1;
- }
- }
+ message->spi = spi;
/*
* Half-duplex links include original MicroWire, and ones with
@@ -4202,6 +4175,167 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return 0;
}
+/*
+ * spi_split_transfers - generic handling of transfer splitting
+ * @msg: the message to split
+ *
+ * Under certain conditions, a SPI controller may not support arbitrary
+ * transfer sizes or other features required by a peripheral. This function
+ * will split the transfers in the message into smaller transfers that are
+ * supported by the controller.
+ *
+ * Controllers with special requirements not covered here can also split
+ * transfers in the optimize_message() callback.
+ *
+ * Context: can sleep
+ * Return: zero on success, else a negative error code
+ */
+static int spi_split_transfers(struct spi_message *msg)
+{
+ struct spi_controller *ctlr = msg->spi->controller;
+ struct spi_transfer *xfer;
+ int ret;
+
+ /*
+ * If an SPI controller does not support toggling the CS line on each
+ * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
+ * for the CS line, we can emulate the CS-per-word hardware function by
+ * splitting transfers into one-word transfers and ensuring that
+ * cs_change is set for each transfer.
+ */
+ if ((msg->spi->mode & SPI_CS_WORD) &&
+ (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
+ ret = spi_split_transfers_maxwords(ctlr, msg, 1);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* Don't change cs_change on the last entry in the list */
+ if (list_is_last(&xfer->transfer_list, &msg->transfers))
+ break;
+
+ xfer->cs_change = 1;
+ }
+ } else {
+ ret = spi_split_transfers_maxsize(ctlr, msg,
+ spi_max_transfer_size(msg->spi));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * __spi_optimize_message - shared implementation for spi_optimize_message()
+ * and spi_maybe_optimize_message()
+ * @spi: the device that will be used for the message
+ * @msg: the message to optimize
+ *
+ * Peripheral drivers will call spi_optimize_message() and the spi core will
+ * call spi_maybe_optimize_message() instead of calling this directly.
+ *
+ * It is not valid to call this on a message that has already been optimized.
+ *
+ * Return: zero on success, else a negative error code
+ */
+static int __spi_optimize_message(struct spi_device *spi,
+ struct spi_message *msg)
+{
+ struct spi_controller *ctlr = spi->controller;
+ int ret;
+
+ ret = __spi_validate(spi, msg);
+ if (ret)
+ return ret;
+
+ ret = spi_split_transfers(msg);
+ if (ret)
+ return ret;
+
+ if (ctlr->optimize_message) {
+ ret = ctlr->optimize_message(msg);
+ if (ret) {
+ spi_res_release(ctlr, msg);
+ return ret;
+ }
+ }
+
+ msg->optimized = true;
+
+ return 0;
+}
+
+/*
+ * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
+ * @spi: the device that will be used for the message
+ * @msg: the message to optimize
+ * Return: zero on success, else a negative error code
+ */
+static int spi_maybe_optimize_message(struct spi_device *spi,
+ struct spi_message *msg)
+{
+ if (msg->pre_optimized)
+ return 0;
+
+ return __spi_optimize_message(spi, msg);
+}
+
+/**
+ * spi_optimize_message - do any one-time validation and setup for a SPI message
+ * @spi: the device that will be used for the message
+ * @msg: the message to optimize
+ *
+ * Peripheral drivers that reuse the same message repeatedly may call this to
+ * perform as much message prep as possible once, rather than repeating it each
+ * time a message transfer is performed to improve throughput and reduce CPU
+ * usage.
+ *
+ * Once a message has been optimized, it cannot be modified with the exception
+ * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
+ * only the data in the memory it points to).
+ *
+ * Calls to this function must be balanced with calls to spi_unoptimize_message()
+ * to avoid leaking resources.
+ *
+ * Context: can sleep
+ * Return: zero on success, else a negative error code
+ */
+int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
+{
+ int ret;
+
+ ret = __spi_optimize_message(spi, msg);
+ if (ret)
+ return ret;
+
+ /*
+ * This flag indicates that the peripheral driver called spi_optimize_message()
+ * and therefore we shouldn't unoptimize message automatically when finalizing
+ * the message but rather wait until spi_unoptimize_message() is called
+ * by the peripheral driver.
+ */
+ msg->pre_optimized = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_optimize_message);
+
+/**
+ * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
+ * @msg: the message to unoptimize
+ *
+ * Calls to this function must be balanced with calls to spi_optimize_message().
+ *
+ * Context: can sleep
+ */
+void spi_unoptimize_message(struct spi_message *msg)
+{
+ __spi_unoptimize_message(msg);
+ msg->pre_optimized = false;
+}
+EXPORT_SYMBOL_GPL(spi_unoptimize_message);
+
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
@@ -4214,8 +4348,6 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
if (!ctlr->transfer)
return -ENOTSUPP;
- message->spi = spi;
-
SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
@@ -4268,8 +4400,8 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
int ret;
unsigned long flags;
- ret = __spi_validate(spi, message);
- if (ret != 0)
+ ret = spi_maybe_optimize_message(spi, message);
+ if (ret)
return ret;
spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
@@ -4281,60 +4413,11 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
- return ret;
-}
-EXPORT_SYMBOL_GPL(spi_async);
-
-/**
- * spi_async_locked - version of spi_async with exclusive bus usage
- * @spi: device with which data will be exchanged
- * @message: describes the data transfers, including completion callback
- * Context: any (IRQs may be blocked, etc)
- *
- * This call may be used in_irq and other contexts which can't sleep,
- * as well as from task contexts which can sleep.
- *
- * The completion callback is invoked in a context which can't sleep.
- * Before that invocation, the value of message->status is undefined.
- * When the callback is issued, message->status holds either zero (to
- * indicate complete success) or a negative error code. After that
- * callback returns, the driver which issued the transfer request may
- * deallocate the associated memory; it's no longer in use by any SPI
- * core or controller driver code.
- *
- * Note that although all messages to a spi_device are handled in
- * FIFO order, messages may go to different devices in other orders.
- * Some device might be higher priority, or have various "hard" access
- * time requirements, for example.
- *
- * On detection of any fault during the transfer, processing of
- * the entire message is aborted, and the device is deselected.
- * Until returning from the associated message completion callback,
- * no other spi_message queued to that device will be processed.
- * (This rule applies equally to all the synchronous transfer calls,
- * which are wrappers around this core asynchronous primitive.)
- *
- * Return: zero on success, else a negative error code.
- */
-static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
-{
- struct spi_controller *ctlr = spi->controller;
- int ret;
- unsigned long flags;
-
- ret = __spi_validate(spi, message);
- if (ret != 0)
- return ret;
-
- spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
-
- ret = __spi_async(spi, message);
-
- spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
+ spi_maybe_unoptimize_message(message);
return ret;
-
}
+EXPORT_SYMBOL_GPL(spi_async);
static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
{
@@ -4383,6 +4466,7 @@ static void spi_complete(void *arg)
static int __spi_sync(struct spi_device *spi, struct spi_message *message)
{
DECLARE_COMPLETION_ONSTACK(done);
+ unsigned long flags;
int status;
struct spi_controller *ctlr = spi->controller;
@@ -4391,12 +4475,10 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
return -ESHUTDOWN;
}
- status = __spi_validate(spi, message);
- if (status != 0)
+ status = spi_maybe_optimize_message(spi, message);
+ if (status)
return status;
- message->spi = spi;
-
SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
@@ -4428,7 +4510,11 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
*/
message->complete = spi_complete;
message->context = &done;
- status = spi_async_locked(spi, message);
+
+ spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
+ status = __spi_async(spi, message);
+ spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
+
if (status == 0) {
wait_for_completion(&done);
status = message->status;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 655f2c959cd4..95fb5f1c91c1 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -802,7 +802,7 @@ static int spidev_probe(struct spi_device *spi)
spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
dev = device_create(&spidev_class, &spi->dev, spidev->devt,
spidev, "spidev%d.%d",
- spi->master->bus_num, spi_get_chipselect(spi, 0));
+ spi->controller->bus_num, spi_get_chipselect(spi, 0));
status = PTR_ERR_OR_ZERO(dev);
} else {
dev_dbg(&spi->dev, "no minor number available!\n");
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index b9934b9c2d70..9f30e0edadfe 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -384,7 +384,7 @@ static struct attribute *ssb_device_attrs[] = {
};
ATTRIBUTE_GROUPS(ssb_device);
-static struct bus_type ssb_bustype = {
+static const struct bus_type ssb_bustype = {
.name = "ssb",
.match = ssb_bus_match,
.probe = ssb_device_probe,
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index b8d55aa8c5c7..72172e870007 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -1,4 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 3626f429b002..68add4d598ae 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -794,7 +794,7 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
if (par->txbuf.buf && par->txbuf.len >= 1024)
sprintf(text1, ", %zu KiB buffer memory", par->txbuf.len >> 10);
if (spi)
- sprintf(text2, ", spi%d.%d at %d MHz", spi->master->bus_num,
+ sprintf(text2, ", spi%d.%d at %d MHz", spi->controller->bus_num,
spi_get_chipselect(spi, 0), spi->max_speed_hz / 1000000);
dev_info(fb_info->dev,
"%s frame buffer, %dx%d, %d KiB video memory%s, fps=%lu%s\n",
@@ -1215,7 +1215,7 @@ int fbtft_probe_common(struct fbtft_display *display,
/* 9-bit SPI setup */
if (par->spi && display->buswidth == 9) {
- if (par->spi->master->bits_per_word_mask & SPI_BPW_MASK(9)) {
+ if (par->spi->controller->bits_per_word_mask & SPI_BPW_MASK(9)) {
par->spi->bits_per_word = 9;
} else {
dev_warn(&par->spi->dev,
diff --git a/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt b/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt
index b1f9474f36d5..f34a95611645 100644
--- a/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt
+++ b/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt
@@ -48,7 +48,7 @@ Example of usage:
-----------------
This example places the bridge on top of the i.MX WEIM parallel bus, see:
-Documentation/devicetree/bindings/bus/imx-weim.txt
+Documentation/devicetree/bindings/memory-controllers/fsl/fsl,imx-weim.yaml
&weim {
controller@0,0 {
diff --git a/drivers/staging/greybus/Kconfig b/drivers/staging/greybus/Kconfig
index 927cfa4bc989..1e745a8d439c 100644
--- a/drivers/staging/greybus/Kconfig
+++ b/drivers/staging/greybus/Kconfig
@@ -64,7 +64,7 @@ config GREYBUS_HID
config GREYBUS_LIGHT
tristate "Greybus LED Class driver"
- depends on LEDS_CLASS
+ depends on LEDS_CLASS_FLASH
help
Select this option if you have a device that follows the
Greybus LED Class specification.
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index 87d36948c610..d62f97249aca 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -29,13 +29,9 @@ struct gb_channel {
struct attribute_group *attr_group;
const struct attribute_group **attr_groups;
struct led_classdev *led;
-#if IS_REACHABLE(CONFIG_LEDS_CLASS_FLASH)
struct led_classdev_flash fled;
struct led_flash_setting intensity_uA;
struct led_flash_setting timeout_us;
-#else
- struct led_classdev cled;
-#endif
struct gb_light *light;
bool is_registered;
bool releasing;
@@ -84,7 +80,6 @@ static bool is_channel_flash(struct gb_channel *channel)
| GB_CHANNEL_MODE_INDICATOR));
}
-#if IS_REACHABLE(CONFIG_LEDS_CLASS_FLASH)
static struct gb_channel *get_channel_from_cdev(struct led_classdev *cdev)
{
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(cdev);
@@ -153,22 +148,6 @@ static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
return __gb_lights_flash_intensity_set(channel, intensity);
}
-#else
-static struct gb_channel *get_channel_from_cdev(struct led_classdev *cdev)
-{
- return container_of(cdev, struct gb_channel, cled);
-}
-
-static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
-{
- return &channel->cled;
-}
-
-static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
-{
- return 0;
-}
-#endif
static int gb_lights_color_set(struct gb_channel *channel, u32 color);
static int gb_lights_fade_set(struct gb_channel *channel);
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
index a3cb68cfa0f9..01883fbcd79b 100644
--- a/drivers/staging/greybus/pwm.c
+++ b/drivers/staging/greybus/pwm.c
@@ -16,8 +16,6 @@
struct gb_pwm_chip {
struct gb_connection *connection;
- u8 pwm_max; /* max pwm number */
-
struct pwm_chip chip;
};
@@ -26,32 +24,33 @@ static inline struct gb_pwm_chip *pwm_chip_to_gb_pwm_chip(struct pwm_chip *chip)
return container_of(chip, struct gb_pwm_chip, chip);
}
-static int gb_pwm_count_operation(struct gb_pwm_chip *pwmc)
+static int gb_pwm_get_npwm(struct gb_connection *connection)
{
struct gb_pwm_count_response response;
int ret;
- ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_PWM_COUNT,
+ ret = gb_operation_sync(connection, GB_PWM_TYPE_PWM_COUNT,
NULL, 0, &response, sizeof(response));
if (ret)
return ret;
- pwmc->pwm_max = response.count;
- return 0;
+
+ /*
+ * The request returns the highest allowed PWM id parameter. So add one
+ * to get the number of PWMs.
+ */
+ return response.count + 1;
}
-static int gb_pwm_activate_operation(struct gb_pwm_chip *pwmc,
- u8 which)
+static int gb_pwm_activate_operation(struct pwm_chip *chip, u8 which)
{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_pwm_activate_request request;
struct gbphy_device *gbphy_dev;
int ret;
- if (which > pwmc->pwm_max)
- return -EINVAL;
-
request.which = which;
- gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
@@ -64,19 +63,16 @@ static int gb_pwm_activate_operation(struct gb_pwm_chip *pwmc,
return ret;
}
-static int gb_pwm_deactivate_operation(struct gb_pwm_chip *pwmc,
- u8 which)
+static int gb_pwm_deactivate_operation(struct pwm_chip *chip, u8 which)
{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_pwm_deactivate_request request;
struct gbphy_device *gbphy_dev;
int ret;
- if (which > pwmc->pwm_max)
- return -EINVAL;
-
request.which = which;
- gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
@@ -89,21 +85,19 @@ static int gb_pwm_deactivate_operation(struct gb_pwm_chip *pwmc,
return ret;
}
-static int gb_pwm_config_operation(struct gb_pwm_chip *pwmc,
+static int gb_pwm_config_operation(struct pwm_chip *chip,
u8 which, u32 duty, u32 period)
{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_pwm_config_request request;
struct gbphy_device *gbphy_dev;
int ret;
- if (which > pwmc->pwm_max)
- return -EINVAL;
-
request.which = which;
request.duty = cpu_to_le32(duty);
request.period = cpu_to_le32(period);
- gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
@@ -116,20 +110,18 @@ static int gb_pwm_config_operation(struct gb_pwm_chip *pwmc,
return ret;
}
-static int gb_pwm_set_polarity_operation(struct gb_pwm_chip *pwmc,
+static int gb_pwm_set_polarity_operation(struct pwm_chip *chip,
u8 which, u8 polarity)
{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_pwm_polarity_request request;
struct gbphy_device *gbphy_dev;
int ret;
- if (which > pwmc->pwm_max)
- return -EINVAL;
-
request.which = which;
request.polarity = polarity;
- gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
@@ -142,19 +134,16 @@ static int gb_pwm_set_polarity_operation(struct gb_pwm_chip *pwmc,
return ret;
}
-static int gb_pwm_enable_operation(struct gb_pwm_chip *pwmc,
- u8 which)
+static int gb_pwm_enable_operation(struct pwm_chip *chip, u8 which)
{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_pwm_enable_request request;
struct gbphy_device *gbphy_dev;
int ret;
- if (which > pwmc->pwm_max)
- return -EINVAL;
-
request.which = which;
- gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
@@ -167,22 +156,19 @@ static int gb_pwm_enable_operation(struct gb_pwm_chip *pwmc,
return ret;
}
-static int gb_pwm_disable_operation(struct gb_pwm_chip *pwmc,
- u8 which)
+static int gb_pwm_disable_operation(struct pwm_chip *chip, u8 which)
{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_pwm_disable_request request;
struct gbphy_device *gbphy_dev;
int ret;
- if (which > pwmc->pwm_max)
- return -EINVAL;
-
request.which = which;
ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_DISABLE,
&request, sizeof(request), NULL, 0);
- gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
gbphy_runtime_put_autosuspend(gbphy_dev);
return ret;
@@ -190,19 +176,15 @@ static int gb_pwm_disable_operation(struct gb_pwm_chip *pwmc,
static int gb_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
-
- return gb_pwm_activate_operation(pwmc, pwm->hwpwm);
+ return gb_pwm_activate_operation(chip, pwm->hwpwm);
};
static void gb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
-
if (pwm_is_enabled(pwm))
- dev_warn(chip->dev, "freeing PWM device without disabling\n");
+ dev_warn(pwmchip_parent(chip), "freeing PWM device without disabling\n");
- gb_pwm_deactivate_operation(pwmc, pwm->hwpwm);
+ gb_pwm_deactivate_operation(chip, pwm->hwpwm);
}
static int gb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -212,22 +194,21 @@ static int gb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
bool enabled = pwm->state.enabled;
u64 period = state->period;
u64 duty_cycle = state->duty_cycle;
- struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
/* Set polarity */
if (state->polarity != pwm->state.polarity) {
if (enabled) {
- gb_pwm_disable_operation(pwmc, pwm->hwpwm);
+ gb_pwm_disable_operation(chip, pwm->hwpwm);
enabled = false;
}
- err = gb_pwm_set_polarity_operation(pwmc, pwm->hwpwm, state->polarity);
+ err = gb_pwm_set_polarity_operation(chip, pwm->hwpwm, state->polarity);
if (err)
return err;
}
if (!state->enabled) {
if (enabled)
- gb_pwm_disable_operation(pwmc, pwm->hwpwm);
+ gb_pwm_disable_operation(chip, pwm->hwpwm);
return 0;
}
@@ -243,13 +224,13 @@ static int gb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (duty_cycle > period)
duty_cycle = period;
- err = gb_pwm_config_operation(pwmc, pwm->hwpwm, duty_cycle, period);
+ err = gb_pwm_config_operation(chip, pwm->hwpwm, duty_cycle, period);
if (err)
return err;
/* enable/disable */
if (!enabled)
- return gb_pwm_enable_operation(pwmc, pwm->hwpwm);
+ return gb_pwm_enable_operation(chip, pwm->hwpwm);
return 0;
}
@@ -266,61 +247,59 @@ static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
struct gb_connection *connection;
struct gb_pwm_chip *pwmc;
struct pwm_chip *chip;
- int ret;
-
- pwmc = kzalloc(sizeof(*pwmc), GFP_KERNEL);
- if (!pwmc)
- return -ENOMEM;
+ int ret, npwm;
connection = gb_connection_create(gbphy_dev->bundle,
le16_to_cpu(gbphy_dev->cport_desc->id),
NULL);
- if (IS_ERR(connection)) {
- ret = PTR_ERR(connection);
- goto exit_pwmc_free;
- }
-
- pwmc->connection = connection;
- gb_connection_set_data(connection, pwmc);
- gb_gbphy_set_data(gbphy_dev, pwmc);
+ if (IS_ERR(connection))
+ return PTR_ERR(connection);
ret = gb_connection_enable(connection);
if (ret)
goto exit_connection_destroy;
/* Query number of pwms present */
- ret = gb_pwm_count_operation(pwmc);
- if (ret)
+ ret = gb_pwm_get_npwm(connection);
+ if (ret < 0)
+ goto exit_connection_disable;
+ npwm = ret;
+
+ chip = pwmchip_alloc(&gbphy_dev->dev, npwm, sizeof(*pwmc));
+ if (IS_ERR(chip)) {
+ ret = PTR_ERR(chip);
goto exit_connection_disable;
+ }
+ gb_gbphy_set_data(gbphy_dev, chip);
- chip = &pwmc->chip;
+ pwmc = pwm_chip_to_gb_pwm_chip(chip);
+ pwmc->connection = connection;
- chip->dev = &gbphy_dev->dev;
chip->ops = &gb_pwm_ops;
- chip->npwm = pwmc->pwm_max + 1;
ret = pwmchip_add(chip);
if (ret) {
dev_err(&gbphy_dev->dev,
"failed to register PWM: %d\n", ret);
- goto exit_connection_disable;
+ goto exit_pwmchip_put;
}
gbphy_runtime_put_autosuspend(gbphy_dev);
return 0;
+exit_pwmchip_put:
+ pwmchip_put(chip);
exit_connection_disable:
gb_connection_disable(connection);
exit_connection_destroy:
gb_connection_destroy(connection);
-exit_pwmc_free:
- kfree(pwmc);
return ret;
}
static void gb_pwm_remove(struct gbphy_device *gbphy_dev)
{
- struct gb_pwm_chip *pwmc = gb_gbphy_get_data(gbphy_dev);
+ struct pwm_chip *chip = gb_gbphy_get_data(gbphy_dev);
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_connection *connection = pwmc->connection;
int ret;
@@ -328,10 +307,10 @@ static void gb_pwm_remove(struct gbphy_device *gbphy_dev)
if (ret)
gbphy_runtime_get_noresume(gbphy_dev);
- pwmchip_remove(&pwmc->chip);
+ pwmchip_remove(chip);
+ pwmchip_put(chip);
gb_connection_disable(connection);
gb_connection_destroy(connection);
- kfree(pwmc);
}
static const struct gbphy_device_id gb_pwm_id_table[] = {
diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
index efb3bec58e15..34f10685139f 100644
--- a/drivers/staging/greybus/spilib.c
+++ b/drivers/staging/greybus/spilib.c
@@ -42,7 +42,7 @@ struct gb_spilib {
#define XFER_TIMEOUT_TOLERANCE 200
-static struct spi_master *get_master_from_spi(struct gb_spilib *spi)
+static struct spi_controller *get_controller_from_spi(struct gb_spilib *spi)
{
return gb_connection_get_data(spi->connection);
}
@@ -324,10 +324,10 @@ static void gb_spi_decode_response(struct gb_spilib *spi,
}
}
-static int gb_spi_transfer_one_message(struct spi_master *master,
+static int gb_spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct gb_spilib *spi = spi_master_get_devdata(master);
+ struct gb_spilib *spi = spi_controller_get_devdata(ctlr);
struct gb_connection *connection = spi->connection;
struct gb_spi_transfer_response *response;
struct gb_operation *operation;
@@ -371,21 +371,21 @@ static int gb_spi_transfer_one_message(struct spi_master *master,
out:
msg->status = ret;
clean_xfer_state(spi);
- spi_finalize_current_message(master);
+ spi_finalize_current_message(ctlr);
return ret;
}
-static int gb_spi_prepare_transfer_hardware(struct spi_master *master)
+static int gb_spi_prepare_transfer_hardware(struct spi_controller *ctlr)
{
- struct gb_spilib *spi = spi_master_get_devdata(master);
+ struct gb_spilib *spi = spi_controller_get_devdata(ctlr);
return spi->ops->prepare_transfer_hardware(spi->parent);
}
-static int gb_spi_unprepare_transfer_hardware(struct spi_master *master)
+static int gb_spi_unprepare_transfer_hardware(struct spi_controller *ctlr)
{
- struct gb_spilib *spi = spi_master_get_devdata(master);
+ struct gb_spilib *spi = spi_controller_get_devdata(ctlr);
spi->ops->unprepare_transfer_hardware(spi->parent);
@@ -440,7 +440,7 @@ static int gb_spi_get_master_config(struct gb_spilib *spi)
static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
{
- struct spi_master *master = get_master_from_spi(spi);
+ struct spi_controller *ctlr = get_controller_from_spi(spi);
struct gb_spi_device_config_request request;
struct gb_spi_device_config_response response;
struct spi_board_info spi_board = { {0} };
@@ -471,11 +471,11 @@ static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
return -EINVAL;
spi_board.mode = le16_to_cpu(response.mode);
- spi_board.bus_num = master->bus_num;
+ spi_board.bus_num = ctlr->bus_num;
spi_board.chip_select = cs;
spi_board.max_speed_hz = le32_to_cpu(response.max_speed_hz);
- spidev = spi_new_device(master, &spi_board);
+ spidev = spi_new_device(ctlr, &spi_board);
if (!spidev)
return -EINVAL;
@@ -486,52 +486,52 @@ int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
struct spilib_ops *ops)
{
struct gb_spilib *spi;
- struct spi_master *master;
+ struct spi_controller *ctlr;
int ret;
u8 i;
/* Allocate master with space for data */
- master = spi_alloc_master(dev, sizeof(*spi));
- if (!master) {
+ ctlr = spi_alloc_master(dev, sizeof(*spi));
+ if (!ctlr) {
dev_err(dev, "cannot alloc SPI master\n");
return -ENOMEM;
}
- spi = spi_master_get_devdata(master);
+ spi = spi_controller_get_devdata(ctlr);
spi->connection = connection;
- gb_connection_set_data(connection, master);
+ gb_connection_set_data(connection, ctlr);
spi->parent = dev;
spi->ops = ops;
- /* get master configuration */
+ /* get controller configuration */
ret = gb_spi_get_master_config(spi);
if (ret)
goto exit_spi_put;
- master->bus_num = -1; /* Allow spi-core to allocate it dynamically */
- master->num_chipselect = spi->num_chipselect;
- master->mode_bits = spi->mode;
- master->flags = spi->flags;
- master->bits_per_word_mask = spi->bits_per_word_mask;
+ ctlr->bus_num = -1; /* Allow spi-core to allocate it dynamically */
+ ctlr->num_chipselect = spi->num_chipselect;
+ ctlr->mode_bits = spi->mode;
+ ctlr->flags = spi->flags;
+ ctlr->bits_per_word_mask = spi->bits_per_word_mask;
/* Attach methods */
- master->cleanup = gb_spi_cleanup;
- master->setup = gb_spi_setup;
- master->transfer_one_message = gb_spi_transfer_one_message;
+ ctlr->cleanup = gb_spi_cleanup;
+ ctlr->setup = gb_spi_setup;
+ ctlr->transfer_one_message = gb_spi_transfer_one_message;
if (ops && ops->prepare_transfer_hardware) {
- master->prepare_transfer_hardware =
+ ctlr->prepare_transfer_hardware =
gb_spi_prepare_transfer_hardware;
}
if (ops && ops->unprepare_transfer_hardware) {
- master->unprepare_transfer_hardware =
+ ctlr->unprepare_transfer_hardware =
gb_spi_unprepare_transfer_hardware;
}
- master->auto_runtime_pm = true;
+ ctlr->auto_runtime_pm = true;
- ret = spi_register_master(master);
+ ret = spi_register_controller(ctlr);
if (ret < 0)
goto exit_spi_put;
@@ -548,12 +548,12 @@ int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
return 0;
exit_spi_put:
- spi_master_put(master);
+ spi_controller_put(ctlr);
return ret;
exit_spi_unregister:
- spi_unregister_master(master);
+ spi_unregister_controller(ctlr);
return ret;
}
@@ -561,9 +561,9 @@ EXPORT_SYMBOL_GPL(gb_spilib_master_init);
void gb_spilib_master_exit(struct gb_connection *connection)
{
- struct spi_master *master = gb_connection_get_data(connection);
+ struct spi_controller *ctlr = gb_connection_get_data(connection);
- spi_unregister_master(master);
+ spi_unregister_controller(ctlr);
}
EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
diff --git a/drivers/staging/sm750fb/Kconfig b/drivers/staging/sm750fb/Kconfig
index ab3d9b057d56..08bcccdd0f1c 100644
--- a/drivers/staging/sm750fb/Kconfig
+++ b/drivers/staging/sm750fb/Kconfig
@@ -6,7 +6,6 @@ config FB_SM750
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
- select VIDEO_NOMODESET
help
Frame buffer driver for the Silicon Motion SM750 chip
with 2D acceleration and dual head support.
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b0b262de6480..283804b49e91 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1515,7 +1515,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_TXPOWER)
RFbSetPower(priv, priv->wCurrentRate,
- conf->chandef.chan->hw_value);
+ conf->chanreq.oper.chan->hw_value);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
dev_dbg(&priv->pcid->dev,
@@ -1684,6 +1684,10 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops vnt_mac_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = vnt_tx_80211,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = vnt_start,
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 2abae90f3f52..7bbed462f062 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -794,7 +794,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
vnt_set_bss_mode(priv);
if (changed & (BSS_CHANGED_TXPOWER | BSS_CHANGED_BANDWIDTH))
- vnt_rf_setpower(priv, conf->chandef.chan);
+ vnt_rf_setpower(priv, conf->chanreq.oper.chan);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
dev_dbg(&priv->usb->dev,
@@ -956,6 +956,10 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops vnt_mac_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = vnt_tx_80211,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = vnt_start,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 8eb9eb7ce5df..7f6ca8177845 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -91,7 +91,7 @@ static int iblock_configure_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct block_device *bd;
struct blk_integrity *bi;
blk_mode_t mode = BLK_OPEN_READ;
@@ -117,14 +117,14 @@ static int iblock_configure_device(struct se_device *dev)
else
dev->dev_flags |= DF_READ_ONLY;
- bdev_handle = bdev_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev,
+ bdev_file = bdev_file_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev,
NULL);
- if (IS_ERR(bdev_handle)) {
- ret = PTR_ERR(bdev_handle);
+ if (IS_ERR(bdev_file)) {
+ ret = PTR_ERR(bdev_file);
goto out_free_bioset;
}
- ib_dev->ibd_bdev_handle = bdev_handle;
- ib_dev->ibd_bd = bd = bdev_handle->bdev;
+ ib_dev->ibd_bdev_file = bdev_file;
+ ib_dev->ibd_bd = bd = file_bdev(bdev_file);
q = bdev_get_queue(bd);
@@ -180,7 +180,7 @@ static int iblock_configure_device(struct se_device *dev)
return 0;
out_blkdev_put:
- bdev_release(ib_dev->ibd_bdev_handle);
+ fput(ib_dev->ibd_bdev_file);
out_free_bioset:
bioset_exit(&ib_dev->ibd_bio_set);
out:
@@ -205,8 +205,8 @@ static void iblock_destroy_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- if (ib_dev->ibd_bdev_handle)
- bdev_release(ib_dev->ibd_bdev_handle);
+ if (ib_dev->ibd_bdev_file)
+ fput(ib_dev->ibd_bdev_file);
bioset_exit(&ib_dev->ibd_bio_set);
}
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 683f9a55945b..91f6f4280666 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -32,7 +32,7 @@ struct iblock_dev {
u32 ibd_flags;
struct bio_set ibd_bio_set;
struct block_device *ibd_bd;
- struct bdev_handle *ibd_bdev_handle;
+ struct file *ibd_bdev_file;
bool ibd_readonly;
struct iblock_dev_plug *ibd_plug;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 41b7489d37ce..f98ebb18666b 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -352,7 +352,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct Scsi_Host *sh = sd->host;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
int ret;
if (scsi_device_get(sd)) {
@@ -366,18 +366,18 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
* Claim exclusive struct block_device access to struct scsi_device
* for TYPE_DISK and TYPE_ZBC using supplied udev_path
*/
- bdev_handle = bdev_open_by_path(dev->udev_path,
+ bdev_file = bdev_file_open_by_path(dev->udev_path,
BLK_OPEN_WRITE | BLK_OPEN_READ, pdv, NULL);
- if (IS_ERR(bdev_handle)) {
+ if (IS_ERR(bdev_file)) {
pr_err("pSCSI: bdev_open_by_path() failed\n");
scsi_device_put(sd);
- return PTR_ERR(bdev_handle);
+ return PTR_ERR(bdev_file);
}
- pdv->pdv_bdev_handle = bdev_handle;
+ pdv->pdv_bdev_file = bdev_file;
ret = pscsi_add_device_to_list(dev, sd);
if (ret) {
- bdev_release(bdev_handle);
+ fput(bdev_file);
scsi_device_put(sd);
return ret;
}
@@ -564,9 +564,9 @@ static void pscsi_destroy_device(struct se_device *dev)
* from pscsi_create_type_disk()
*/
if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) &&
- pdv->pdv_bdev_handle) {
- bdev_release(pdv->pdv_bdev_handle);
- pdv->pdv_bdev_handle = NULL;
+ pdv->pdv_bdev_file) {
+ fput(pdv->pdv_bdev_file);
+ pdv->pdv_bdev_file = NULL;
}
/*
* For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
@@ -907,12 +907,15 @@ new_bio:
return 0;
fail:
- if (bio)
- bio_put(bio);
+ if (bio) {
+ bio_uninit(bio);
+ kfree(bio);
+ }
while (req->bio) {
bio = req->bio;
req->bio = bio->bi_next;
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
}
req->biotail = NULL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -994,8 +997,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
- if (pdv->pdv_bdev_handle)
- return bdev_nr_sectors(pdv->pdv_bdev_handle->bdev);
+ if (pdv->pdv_bdev_file)
+ return bdev_nr_sectors(file_bdev(pdv->pdv_bdev_file));
return 0;
}
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index b0a3ef136592..9acaa21e4c78 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -37,7 +37,7 @@ struct pscsi_dev_virt {
int pdv_channel_id;
int pdv_target_id;
int pdv_lun_id;
- struct bdev_handle *pdv_bdev_handle;
+ struct file *pdv_bdev_file;
struct scsi_device *pdv_sd;
struct Scsi_Host *pdv_lld_host;
} ____cacheline_aligned;
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index 4b1092127694..1892e49a8e6a 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -90,13 +90,14 @@ static int optee_register_device(const uuid_t *device_uuid, u32 func)
if (rc) {
pr_err("device registration failed, err: %d\n", rc);
put_device(&optee_device->dev);
+ return rc;
}
if (func == PTA_CMD_GET_DEVICES_SUPP)
device_create_file(&optee_device->dev,
&dev_attr_need_supplicant);
- return rc;
+ return 0;
}
static int __optee_enumerate_devices(u32 func)
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 792d6fae4354..e59c20d74b36 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -1226,7 +1226,7 @@ static int tee_client_device_uevent(const struct device *dev,
return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
}
-struct bus_type tee_bus_type = {
+const struct bus_type tee_bus_type = {
.name = "tee",
.match = tee_client_device_match,
.uevent = tee_client_device_uevent,
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 17a8ae5e991d..204ed89a3ec9 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -83,17 +83,6 @@ config THERMAL_OF
Say 'Y' here if you need to build thermal infrastructure
based on device tree.
-config THERMAL_WRITABLE_TRIPS
- bool "Enable writable trip points"
- help
- This option allows the system integrator to choose whether
- trip temperatures can be changed from userspace. The
- writable trips need to be specified when setting up the
- thermal zone but the choice here takes precedence.
-
- Say 'Y' here if you would like to allow userspace tools to
- change trip temperatures.
-
choice
prompt "Default Thermal governor"
default THERMAL_DEFAULT_GOV_STEP_WISE
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index e2cc7bd30862..9d1b1459700d 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -91,12 +91,16 @@ struct cpufreq_cooling_device {
static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
unsigned int freq)
{
+ struct em_perf_state *table;
int i;
+ rcu_read_lock();
+ table = em_perf_state_from_pd(cpufreq_cdev->em);
for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
- if (freq > cpufreq_cdev->em->table[i].frequency)
+ if (freq > table[i].frequency)
break;
}
+ rcu_read_unlock();
return cpufreq_cdev->max_level - i - 1;
}
@@ -104,16 +108,20 @@ static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
u32 freq)
{
+ struct em_perf_state *table;
unsigned long power_mw;
int i;
+ rcu_read_lock();
+ table = em_perf_state_from_pd(cpufreq_cdev->em);
for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
- if (freq > cpufreq_cdev->em->table[i].frequency)
+ if (freq > table[i].frequency)
break;
}
- power_mw = cpufreq_cdev->em->table[i + 1].power;
+ power_mw = table[i + 1].power;
power_mw /= MICROWATT_PER_MILLIWATT;
+ rcu_read_unlock();
return power_mw;
}
@@ -121,18 +129,24 @@ static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
u32 power)
{
+ struct em_perf_state *table;
unsigned long em_power_mw;
+ u32 freq;
int i;
+ rcu_read_lock();
+ table = em_perf_state_from_pd(cpufreq_cdev->em);
for (i = cpufreq_cdev->max_level; i > 0; i--) {
/* Convert EM power to milli-Watts to make safe comparison */
- em_power_mw = cpufreq_cdev->em->table[i].power;
+ em_power_mw = table[i].power;
em_power_mw /= MICROWATT_PER_MILLIWATT;
if (power >= em_power_mw)
break;
}
+ freq = table[i].frequency;
+ rcu_read_unlock();
- return cpufreq_cdev->em->table[i].frequency;
+ return freq;
}
/**
@@ -262,8 +276,9 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
static int cpufreq_state2power(struct thermal_cooling_device *cdev,
unsigned long state, u32 *power)
{
- unsigned int freq, num_cpus, idx;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
+ unsigned int freq, num_cpus, idx;
+ struct em_perf_state *table;
/* Request state should be less than max_level */
if (state > cpufreq_cdev->max_level)
@@ -272,7 +287,12 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
idx = cpufreq_cdev->max_level - state;
- freq = cpufreq_cdev->em->table[idx].frequency;
+
+ rcu_read_lock();
+ table = em_perf_state_from_pd(cpufreq_cdev->em);
+ freq = table[idx].frequency;
+ rcu_read_unlock();
+
*power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
return 0;
@@ -378,8 +398,17 @@ static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev,
#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
/* Use the Energy Model table if available */
if (cpufreq_cdev->em) {
+ struct em_perf_state *table;
+ unsigned int freq;
+
idx = cpufreq_cdev->max_level - state;
- return cpufreq_cdev->em->table[idx].frequency;
+
+ rcu_read_lock();
+ table = em_perf_state_from_pd(cpufreq_cdev->em);
+ freq = table[idx].frequency;
+ rcu_read_unlock();
+
+ return freq;
}
#endif
diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c
index 160d64913057..a27aff88cd96 100644
--- a/drivers/thermal/da9062-thermal.c
+++ b/drivers/thermal/da9062-thermal.c
@@ -197,7 +197,7 @@ static int da9062_thermal_probe(struct platform_device *pdev)
mutex_init(&thermal->lock);
thermal->zone = thermal_zone_device_register_with_trips(thermal->config->name,
- trips, ARRAY_SIZE(trips), 0, thermal,
+ trips, ARRAY_SIZE(trips), thermal,
&da9062_thermal_ops, NULL, pp_tmp,
0);
if (IS_ERR(thermal->zone)) {
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 262e62ab6cf2..50dec24e967a 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -87,6 +87,7 @@ static int devfreq_cooling_set_cur_state(struct thermal_cooling_device *cdev,
struct devfreq_cooling_device *dfc = cdev->devdata;
struct devfreq *df = dfc->devfreq;
struct device *dev = df->dev.parent;
+ struct em_perf_state *table;
unsigned long freq;
int perf_idx;
@@ -100,7 +101,11 @@ static int devfreq_cooling_set_cur_state(struct thermal_cooling_device *cdev,
if (dfc->em_pd) {
perf_idx = dfc->max_state - state;
- freq = dfc->em_pd->table[perf_idx].frequency * 1000;
+
+ rcu_read_lock();
+ table = em_perf_state_from_pd(dfc->em_pd);
+ freq = table[perf_idx].frequency * 1000;
+ rcu_read_unlock();
} else {
freq = dfc->freq_table[state];
}
@@ -123,14 +128,21 @@ static int devfreq_cooling_set_cur_state(struct thermal_cooling_device *cdev,
*/
static int get_perf_idx(struct em_perf_domain *em_pd, unsigned long freq)
{
- int i;
+ struct em_perf_state *table;
+ int i, idx = -EINVAL;
+ rcu_read_lock();
+ table = em_perf_state_from_pd(em_pd);
for (i = 0; i < em_pd->nr_perf_states; i++) {
- if (em_pd->table[i].frequency == freq)
- return i;
+ if (table[i].frequency != freq)
+ continue;
+
+ idx = i;
+ break;
}
+ rcu_read_unlock();
- return -EINVAL;
+ return idx;
}
static unsigned long get_voltage(struct devfreq *df, unsigned long freq)
@@ -181,6 +193,7 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd
struct devfreq_cooling_device *dfc = cdev->devdata;
struct devfreq *df = dfc->devfreq;
struct devfreq_dev_status status;
+ struct em_perf_state *table;
unsigned long state;
unsigned long freq;
unsigned long voltage;
@@ -204,7 +217,11 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd
state = dfc->capped_state;
/* Convert EM power into milli-Watts first */
- dfc->res_util = dfc->em_pd->table[state].power;
+ rcu_read_lock();
+ table = em_perf_state_from_pd(dfc->em_pd);
+ dfc->res_util = table[state].power;
+ rcu_read_unlock();
+
dfc->res_util /= MICROWATT_PER_MILLIWATT;
dfc->res_util *= SCALE_ERROR_MITIGATION;
@@ -225,7 +242,11 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd
_normalize_load(&status);
/* Convert EM power into milli-Watts first */
- *power = dfc->em_pd->table[perf_idx].power;
+ rcu_read_lock();
+ table = em_perf_state_from_pd(dfc->em_pd);
+ *power = table[perf_idx].power;
+ rcu_read_unlock();
+
*power /= MICROWATT_PER_MILLIWATT;
/* Scale power for utilization */
*power *= status.busy_time;
@@ -245,13 +266,19 @@ static int devfreq_cooling_state2power(struct thermal_cooling_device *cdev,
unsigned long state, u32 *power)
{
struct devfreq_cooling_device *dfc = cdev->devdata;
+ struct em_perf_state *table;
int perf_idx;
if (state > dfc->max_state)
return -EINVAL;
perf_idx = dfc->max_state - state;
- *power = dfc->em_pd->table[perf_idx].power;
+
+ rcu_read_lock();
+ table = em_perf_state_from_pd(dfc->em_pd);
+ *power = table[perf_idx].power;
+ rcu_read_unlock();
+
*power /= MICROWATT_PER_MILLIWATT;
return 0;
@@ -264,6 +291,7 @@ static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev,
struct devfreq *df = dfc->devfreq;
struct devfreq_dev_status status;
unsigned long freq, em_power_mw;
+ struct em_perf_state *table;
s32 est_power;
int i;
@@ -288,13 +316,16 @@ static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev,
* Find the first cooling state that is within the power
* budget. The EM power table is sorted ascending.
*/
+ rcu_read_lock();
+ table = em_perf_state_from_pd(dfc->em_pd);
for (i = dfc->max_state; i > 0; i--) {
/* Convert EM power to milli-Watts to make safe comparison */
- em_power_mw = dfc->em_pd->table[i].power;
+ em_power_mw = table[i].power;
em_power_mw /= MICROWATT_PER_MILLIWATT;
if (est_power >= em_power_mw)
break;
}
+ rcu_read_unlock();
*state = dfc->max_state - i;
dfc->capped_state = *state;
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index 6ddf0accdc98..c3b2943a2db8 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -49,7 +49,7 @@ static int thermal_zone_trip_update(struct thermal_zone_device *tz,
if (instance->target == 0 && tz->temperature >= trip->temperature)
instance->target = 1;
else if (instance->target == 1 &&
- tz->temperature <= trip->temperature - trip->hysteresis)
+ tz->temperature < trip->temperature - trip->hysteresis)
instance->target = 0;
dev_dbg(&instance->cdev->device, "target=%d\n",
diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
index 538abb7de4e2..4da25a0009d7 100644
--- a/drivers/thermal/gov_fair_share.c
+++ b/drivers/thermal/gov_fair_share.c
@@ -18,22 +18,24 @@
static int get_trip_level(struct thermal_zone_device *tz)
{
const struct thermal_trip *trip, *level_trip = NULL;
- int trip_level;
+ int trip_level = -1;
for_each_trip(tz, trip) {
if (trip->temperature >= tz->temperature)
- break;
+ continue;
+
+ trip_level++;
- level_trip = trip;
+ if (!level_trip || trip->temperature > level_trip->temperature)
+ level_trip = trip;
}
/* Bail out if the temperature is not greater than any trips. */
- if (!level_trip)
+ if (trip_level < 0)
return 0;
- trip_level = thermal_zone_trip_id(tz, level_trip);
-
- trace_thermal_zone_trip(tz, trip_level, level_trip->type);
+ trace_thermal_zone_trip(tz, thermal_zone_trip_id(tz, level_trip),
+ level_trip->type);
return trip_level;
}
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 81e061f183ad..1b17dc4c219c 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -711,6 +711,8 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
if (!tz->tzp->sustainable_power)
dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n");
+ else
+ params->sustainable_power = tz->tzp->sustainable_power;
estimate_pid_constants(tz, tz->tzp->sustainable_power,
params->trip_switch_on,
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 7019c4fdd549..83eaae5ca3b8 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -115,7 +115,8 @@ struct thermal_soc_data {
};
static struct thermal_trip trips[] = {
- [IMX_TRIP_PASSIVE] = { .type = THERMAL_TRIP_PASSIVE },
+ [IMX_TRIP_PASSIVE] = { .type = THERMAL_TRIP_PASSIVE,
+ .flags = THERMAL_TRIP_FLAG_RW_TEMP },
[IMX_TRIP_CRITICAL] = { .type = THERMAL_TRIP_CRITICAL },
};
@@ -354,6 +355,7 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip_id,
return -EINVAL;
imx_set_alarm_temp(data, temp);
+ trips[IMX_TRIP_PASSIVE].temperature = temp;
pm_runtime_put(data->dev);
@@ -699,7 +701,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
data->tz = thermal_zone_device_register_with_trips("imx_thermal_zone",
trips,
ARRAY_SIZE(trips),
- BIT(IMX_TRIP_PASSIVE), data,
+ data,
&imx_tz_ops, NULL,
IMX_PASSIVE_DELAY,
IMX_POLLING_DELAY);
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index b43953b5539f..a31f2f32996a 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -23,7 +23,6 @@ config X86_PKG_TEMP_THERMAL
tristate "X86 package temperature thermal driver"
depends on X86_THERMAL_VECTOR
select THERMAL_GOV_USER_SPACE
- select THERMAL_WRITABLE_TRIPS
select INTEL_TCC
default m
help
@@ -47,7 +46,6 @@ config INTEL_SOC_DTS_THERMAL
tristate "Intel SoCs DTS thermal driver"
depends on X86 && PCI && ACPI
select INTEL_SOC_DTS_IOSF_CORE
- select THERMAL_WRITABLE_TRIPS
help
Enable this to register Intel SoCs (e.g. Bay Trail) platform digital
temperature sensor (DTS). These SoCs have two additional DTSs in
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index 3e4bfe817fac..400fde7cb3b1 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -58,15 +58,10 @@ static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
static void int340x_thermal_critical(struct thermal_zone_device *zone)
{
- dev_dbg(&zone->device, "%s: critical temperature reached\n", zone->type);
+ dev_dbg(thermal_zone_device(zone), "%s: critical temperature reached\n",
+ thermal_zone_device_type(zone));
}
-static struct thermal_zone_device_ops int340x_thermal_zone_ops = {
- .get_temp = int340x_thermal_get_zone_temp,
- .set_trip_temp = int340x_thermal_set_trip_temp,
- .critical = int340x_thermal_critical,
-};
-
static inline void *int_to_trip_priv(int i)
{
return (void *)(long)i;
@@ -126,11 +121,15 @@ static struct thermal_zone_params int340x_thermal_params = {
struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
int (*get_temp) (struct thermal_zone_device *, int *))
{
+ const struct thermal_zone_device_ops zone_ops = {
+ .set_trip_temp = int340x_thermal_set_trip_temp,
+ .critical = int340x_thermal_critical,
+ .get_temp = get_temp ? get_temp : int340x_thermal_get_zone_temp,
+ };
struct int34x_thermal_zone *int34x_zone;
struct thermal_trip *zone_trips;
unsigned long long trip_cnt = 0;
unsigned long long hyst;
- int trip_mask = 0;
acpi_status status;
int i, ret;
@@ -140,21 +139,9 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
int34x_zone->adev = adev;
- int34x_zone->ops = kmemdup(&int340x_thermal_zone_ops,
- sizeof(int340x_thermal_zone_ops), GFP_KERNEL);
- if (!int34x_zone->ops) {
- ret = -ENOMEM;
- goto err_ops_alloc;
- }
-
- if (get_temp)
- int34x_zone->ops->get_temp = get_temp;
-
status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt);
- if (ACPI_SUCCESS(status)) {
+ if (ACPI_SUCCESS(status))
int34x_zone->aux_trip_nr = trip_cnt;
- trip_mask = BIT(trip_cnt) - 1;
- }
zone_trips = kzalloc(sizeof(*zone_trips) * (trip_cnt + INT340X_THERMAL_MAX_TRIP_COUNT),
GFP_KERNEL);
@@ -166,6 +153,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
for (i = 0; i < trip_cnt; i++) {
zone_trips[i].type = THERMAL_TRIP_PASSIVE;
zone_trips[i].temperature = THERMAL_TEMP_INVALID;
+ zone_trips[i].flags |= THERMAL_TRIP_FLAG_RW_TEMP;
}
trip_cnt = int340x_thermal_read_trips(adev, zone_trips, trip_cnt);
@@ -179,17 +167,17 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
for (i = 0; i < trip_cnt; ++i)
zone_trips[i].hysteresis = hyst;
- int34x_zone->trips = zone_trips;
-
int34x_zone->lpat_table = acpi_lpat_get_conversion_table(adev->handle);
int34x_zone->zone = thermal_zone_device_register_with_trips(
acpi_device_bid(adev),
zone_trips, trip_cnt,
- trip_mask, int34x_zone,
- int34x_zone->ops,
+ int34x_zone,
+ &zone_ops,
&int340x_thermal_params,
0, 0);
+ kfree(zone_trips);
+
if (IS_ERR(int34x_zone->zone)) {
ret = PTR_ERR(int34x_zone->zone);
goto err_thermal_zone;
@@ -203,11 +191,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
err_enable:
thermal_zone_device_unregister(int34x_zone->zone);
err_thermal_zone:
- kfree(int34x_zone->trips);
acpi_lpat_free_conversion_table(int34x_zone->lpat_table);
err_trips_alloc:
- kfree(int34x_zone->ops);
-err_ops_alloc:
kfree(int34x_zone);
return ERR_PTR(ret);
}
@@ -217,8 +202,6 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone *int34x_zone)
{
thermal_zone_device_unregister(int34x_zone->zone);
acpi_lpat_free_conversion_table(int34x_zone->lpat_table);
- kfree(int34x_zone->trips);
- kfree(int34x_zone->ops);
kfree(int34x_zone);
}
EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
index e0df6271facc..d504e271009a 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
@@ -20,10 +20,8 @@ struct active_trip {
struct int34x_thermal_zone {
struct acpi_device *adev;
- struct thermal_trip *trips;
int aux_trip_nr;
struct thermal_zone_device *zone;
- struct thermal_zone_device_ops *ops;
void *priv_data;
struct acpi_lpat_conversion_table *lpat_table;
};
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 649f67fdf345..d75fae7b7ed2 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -176,14 +176,14 @@ static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
int *temp)
{
int cpu;
- int curr_temp;
+ int curr_temp, ret;
*temp = 0;
for_each_online_cpu(cpu) {
- curr_temp = intel_tcc_get_temp(cpu, false);
- if (curr_temp < 0)
- return curr_temp;
+ ret = intel_tcc_get_temp(cpu, &curr_temp, false);
+ if (ret < 0)
+ return ret;
if (!*temp || curr_temp > *temp)
*temp = curr_temp;
}
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
index 95c6013a33fb..674f3c85dfbc 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
@@ -25,6 +25,7 @@
#define PCI_DEVICE_ID_INTEL_HSB_THERMAL 0x0A03
#define PCI_DEVICE_ID_INTEL_ICL_THERMAL 0x8a03
#define PCI_DEVICE_ID_INTEL_JSL_THERMAL 0x4E03
+#define PCI_DEVICE_ID_INTEL_LNLM_THERMAL 0x641D
#define PCI_DEVICE_ID_INTEL_MTLP_THERMAL 0x7D03
#define PCI_DEVICE_ID_INTEL_RPL_THERMAL 0xA71D
#define PCI_DEVICE_ID_INTEL_SKL_THERMAL 0x1903
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index d7495571dd5d..14e34eabc419 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -233,11 +233,7 @@ static int get_trip_temp(struct proc_thermal_pci *pci_info)
return temp;
}
-static struct thermal_trip psv_trip = {
- .type = THERMAL_TRIP_PASSIVE,
-};
-
-static struct thermal_zone_device_ops tzone_ops = {
+static const struct thermal_zone_device_ops tzone_ops = {
.get_temp = sys_get_curr_temp,
.set_trip_temp = sys_set_trip_temp,
};
@@ -251,6 +247,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_
{
struct proc_thermal_device *proc_priv;
struct proc_thermal_pci *pci_info;
+ struct thermal_trip psv_trip = {
+ .type = THERMAL_TRIP_PASSIVE,
+ .flags = THERMAL_TRIP_FLAG_RW_TEMP,
+ };
int irq_flag = 0, irq, ret;
bool msi_irq = false;
@@ -290,7 +290,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_
psv_trip.temperature = get_trip_temp(pci_info);
pci_info->tzone = thermal_zone_device_register_with_trips("TCPU_PCI", &psv_trip,
- 1, 1, pci_info,
+ 1, pci_info,
&tzone_ops,
&tzone_params, 0, 0);
if (IS_ERR(pci_info->tzone)) {
@@ -407,6 +407,7 @@ static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend,
static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) },
+ { PCI_DEVICE_DATA(INTEL, LNLM_THERMAL, PROC_THERMAL_FEATURE_RAPL) },
{ PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL |
PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_DLVR |
PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) },
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
index 2f00fc3bf274..e964a9375722 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
@@ -27,9 +27,9 @@ static int rapl_mmio_cpu_online(unsigned int cpu)
if (topology_physical_package_id(cpu))
return 0;
- rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true);
+ rp = rapl_find_package_domain_cpuslocked(cpu, &rapl_mmio_priv, true);
if (!rp) {
- rp = rapl_add_package(cpu, &rapl_mmio_priv, true);
+ rp = rapl_add_package_cpuslocked(cpu, &rapl_mmio_priv, true);
if (IS_ERR(rp))
return PTR_ERR(rp);
}
@@ -42,14 +42,14 @@ static int rapl_mmio_cpu_down_prep(unsigned int cpu)
struct rapl_package *rp;
int lead_cpu;
- rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true);
+ rp = rapl_find_package_domain_cpuslocked(cpu, &rapl_mmio_priv, true);
if (!rp)
return 0;
cpumask_clear_cpu(cpu, &rp->cpumask);
lead_cpu = cpumask_first(&rp->cpumask);
if (lead_cpu >= nr_cpu_ids)
- rapl_remove_package(rp);
+ rapl_remove_package_cpuslocked(rp);
else if (rp->lead_cpu == cpu)
rp->lead_cpu = lead_cpu;
return 0;
diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
index 3b04c6ec4fca..40d664a66cdc 100644
--- a/drivers/thermal/intel/intel_hfi.c
+++ b/drivers/thermal/intel/intel_hfi.c
@@ -607,7 +607,7 @@ void __init intel_hfi_init(void)
/* There is one HFI instance per die/package. */
max_hfi_instances = topology_max_packages() *
- topology_max_die_per_package();
+ topology_max_dies_per_package();
/*
* This allocation may fail. CPU hotplug callbacks must check
diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c
index b3905e34c507..f5be2c389351 100644
--- a/drivers/thermal/intel/intel_pch_thermal.c
+++ b/drivers/thermal/intel/intel_pch_thermal.c
@@ -84,7 +84,6 @@ struct pch_thermal_device {
void __iomem *hw_base;
struct pci_dev *pdev;
struct thermal_zone_device *tzd;
- struct thermal_trip trips[PCH_MAX_TRIPS];
bool bios_enabled;
};
@@ -94,7 +93,8 @@ struct pch_thermal_device {
* passive trip temperature using _PSV method. There is no specific
* passive temperature setting in MMIO interface of this PCI device.
*/
-static int pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd, int trip)
+static int pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+ struct thermal_trip *trip)
{
struct acpi_device *adev;
int temp;
@@ -106,12 +106,13 @@ static int pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd, int trip)
if (thermal_acpi_passive_trip_temp(adev, &temp) || temp <= 0)
return 0;
- ptd->trips[trip].type = THERMAL_TRIP_PASSIVE;
- ptd->trips[trip].temperature = temp;
+ trip->type = THERMAL_TRIP_PASSIVE;
+ trip->temperature = temp;
return 1;
}
#else
-static int pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd, int trip)
+static int pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+ struct thermal_trip *trip)
{
return 0;
}
@@ -131,7 +132,7 @@ static void pch_critical(struct thermal_zone_device *tzd)
thermal_zone_device_type(tzd));
}
-static struct thermal_zone_device_ops tzd_ops = {
+static const struct thermal_zone_device_ops tzd_ops = {
.get_temp = pch_thermal_get_temp,
.critical = pch_critical,
};
@@ -159,6 +160,7 @@ static const char *board_names[] = {
static int intel_pch_thermal_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ struct thermal_trip ptd_trips[PCH_MAX_TRIPS] = { 0 };
enum pch_board_ids board_id = id->driver_data;
struct pch_thermal_device *ptd;
int nr_trips = 0;
@@ -220,22 +222,22 @@ read_trips:
trip_temp = readw(ptd->hw_base + WPT_CTT);
trip_temp &= 0x1FF;
if (trip_temp) {
- ptd->trips[nr_trips].temperature = GET_WPT_TEMP(trip_temp);
- ptd->trips[nr_trips++].type = THERMAL_TRIP_CRITICAL;
+ ptd_trips[nr_trips].temperature = GET_WPT_TEMP(trip_temp);
+ ptd_trips[nr_trips++].type = THERMAL_TRIP_CRITICAL;
}
trip_temp = readw(ptd->hw_base + WPT_PHL);
trip_temp &= 0x1FF;
if (trip_temp) {
- ptd->trips[nr_trips].temperature = GET_WPT_TEMP(trip_temp);
- ptd->trips[nr_trips++].type = THERMAL_TRIP_HOT;
+ ptd_trips[nr_trips].temperature = GET_WPT_TEMP(trip_temp);
+ ptd_trips[nr_trips++].type = THERMAL_TRIP_HOT;
}
- nr_trips += pch_wpt_add_acpi_psv_trip(ptd, nr_trips);
+ nr_trips += pch_wpt_add_acpi_psv_trip(ptd, &ptd_trips[nr_trips]);
ptd->tzd = thermal_zone_device_register_with_trips(board_names[board_id],
- ptd->trips, nr_trips,
- 0, ptd, &tzd_ops,
+ ptd_trips, nr_trips,
+ ptd, &tzd_ops,
NULL, 0, 0);
if (IS_ERR(ptd->tzd)) {
dev_err(&pdev->dev, "Failed to register thermal zone %s\n",
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index bc6eb0dd66a4..4ba649370aa1 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -587,7 +587,7 @@ static int powerclamp_idle_injection_register(void)
poll_pkg_cstate_enable = false;
if (cpumask_equal(cpu_present_mask, idle_injection_cpu_mask)) {
ii_dev = idle_inject_register_full(idle_injection_cpu_mask, idle_inject_update);
- if (topology_max_packages() == 1 && topology_max_die_per_package() == 1)
+ if (topology_max_packages() == 1 && topology_max_dies_per_package() == 1)
poll_pkg_cstate_enable = true;
} else {
ii_dev = idle_inject_register(idle_injection_cpu_mask);
diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c
index 646ca8bd40a9..ec6ad26027bc 100644
--- a/drivers/thermal/intel/intel_quark_dts_thermal.c
+++ b/drivers/thermal/intel/intel_quark_dts_thermal.c
@@ -93,10 +93,6 @@
/* Quark DTS has 2 trip points: hot & catastrophic */
#define QRK_MAX_DTS_TRIPS 2
-/* If DTS not locked, all trip points are configurable */
-#define QRK_DTS_WR_MASK_SET 0x3
-/* If DTS locked, all trip points are not configurable */
-#define QRK_DTS_WR_MASK_CLR 0
#define DEFAULT_POLL_DELAY 2000
@@ -105,7 +101,6 @@ struct soc_sensor_entry {
u32 store_ptps;
u32 store_dts_enable;
struct thermal_zone_device *tzone;
- struct thermal_trip trips[QRK_MAX_DTS_TRIPS];
};
static struct soc_sensor_entry *soc_dts;
@@ -293,7 +288,7 @@ static int sys_change_mode(struct thermal_zone_device *tzd,
return ret;
}
-static struct thermal_zone_device_ops tzone_ops = {
+static const struct thermal_zone_device_ops tzone_ops = {
.get_temp = sys_get_curr_temp,
.set_trip_temp = sys_set_trip_temp,
.change_mode = sys_change_mode,
@@ -320,10 +315,10 @@ static void free_soc_dts(struct soc_sensor_entry *aux_entry)
static struct soc_sensor_entry *alloc_soc_dts(void)
{
+ struct thermal_trip trips[QRK_MAX_DTS_TRIPS] = { 0 };
struct soc_sensor_entry *aux_entry;
int err;
u32 out;
- int wr_mask;
aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL);
if (!aux_entry) {
@@ -337,13 +332,7 @@ static struct soc_sensor_entry *alloc_soc_dts(void)
if (err)
goto err_ret;
- if (out & QRK_DTS_LOCK_BIT) {
- aux_entry->locked = true;
- wr_mask = QRK_DTS_WR_MASK_CLR;
- } else {
- aux_entry->locked = false;
- wr_mask = QRK_DTS_WR_MASK_SET;
- }
+ aux_entry->locked = !!(out & QRK_DTS_LOCK_BIT);
/* Store DTS default state if DTS registers are not locked */
if (!aux_entry->locked) {
@@ -360,19 +349,22 @@ static struct soc_sensor_entry *alloc_soc_dts(void)
&aux_entry->store_ptps);
if (err)
goto err_ret;
+
+ trips[QRK_DTS_ID_TP_CRITICAL].flags |= THERMAL_TRIP_FLAG_RW_TEMP;
+ trips[QRK_DTS_ID_TP_HOT].flags |= THERMAL_TRIP_FLAG_RW_TEMP;
}
- aux_entry->trips[QRK_DTS_ID_TP_CRITICAL].temperature = get_trip_temp(QRK_DTS_ID_TP_CRITICAL);
- aux_entry->trips[QRK_DTS_ID_TP_CRITICAL].type = THERMAL_TRIP_CRITICAL;
+ trips[QRK_DTS_ID_TP_CRITICAL].temperature = get_trip_temp(QRK_DTS_ID_TP_CRITICAL);
+ trips[QRK_DTS_ID_TP_CRITICAL].type = THERMAL_TRIP_CRITICAL;
- aux_entry->trips[QRK_DTS_ID_TP_HOT].temperature = get_trip_temp(QRK_DTS_ID_TP_HOT);
- aux_entry->trips[QRK_DTS_ID_TP_HOT].type = THERMAL_TRIP_HOT;
+ trips[QRK_DTS_ID_TP_HOT].temperature = get_trip_temp(QRK_DTS_ID_TP_HOT);
+ trips[QRK_DTS_ID_TP_HOT].type = THERMAL_TRIP_HOT;
aux_entry->tzone = thermal_zone_device_register_with_trips("quark_dts",
- aux_entry->trips,
+ trips,
QRK_MAX_DTS_TRIPS,
- wr_mask,
- aux_entry, &tzone_ops,
+ aux_entry,
+ &tzone_ops,
NULL, 0, polling_delay);
if (IS_ERR(aux_entry->tzone)) {
err = PTR_ERR(aux_entry->tzone);
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c
index d00def3c4703..2ab943b66f7a 100644
--- a/drivers/thermal/intel/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.c
@@ -129,22 +129,6 @@ err_restore_ptps:
return status;
}
-static int configure_trip(struct intel_soc_dts_sensor_entry *dts,
- int thres_index, enum thermal_trip_type trip_type,
- int temp)
-{
- int ret;
-
- ret = update_trip_temp(dts->sensors, thres_index, temp);
- if (ret)
- return ret;
-
- dts->trips[thres_index].temperature = temp;
- dts->trips[thres_index].type = trip_type;
-
- return 0;
-}
-
static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
int temp)
{
@@ -184,7 +168,7 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd,
return 0;
}
-static struct thermal_zone_device_ops tzone_ops = {
+static const struct thermal_zone_device_ops tzone_ops = {
.get_temp = sys_get_curr_temp,
.set_trip_temp = sys_set_trip_temp,
};
@@ -218,15 +202,10 @@ static void remove_dts_thermal_zone(struct intel_soc_dts_sensor_entry *dts)
}
static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
- bool critical_trip)
+ struct thermal_trip *trips)
{
- int writable_trip_cnt = SOC_MAX_DTS_TRIPS;
char name[10];
- unsigned long trip;
- int trip_mask;
- unsigned long ptps;
u32 store_ptps;
- unsigned long i;
int ret;
/* Store status to restor on exit */
@@ -237,26 +216,20 @@ static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
dts->id = id;
- if (critical_trip)
- writable_trip_cnt--;
-
- trip_mask = GENMASK(writable_trip_cnt - 1, 0);
-
/* Check if the writable trip we provide is not used by BIOS */
ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
SOC_DTS_OFFSET_PTPS, &store_ptps);
- if (ret)
- trip_mask = 0;
- else {
- ptps = store_ptps;
- for_each_set_clump8(i, trip, &ptps, writable_trip_cnt * 8)
- trip_mask &= ~BIT(i / 8);
+ if (!ret) {
+ int i;
+
+ for (i = 0; i <= 1; i++) {
+ if (store_ptps & (0xFFU << i * 8))
+ trips[i].flags &= ~THERMAL_TRIP_FLAG_RW_TEMP;
+ }
}
- dts->trip_mask = trip_mask;
snprintf(name, sizeof(name), "soc_dts%d", id);
- dts->tzone = thermal_zone_device_register_with_trips(name, dts->trips,
+ dts->tzone = thermal_zone_device_register_with_trips(name, trips,
SOC_MAX_DTS_TRIPS,
- trip_mask,
dts, &tzone_ops,
NULL, 0, 0);
if (IS_ERR(dts->tzone)) {
@@ -315,14 +288,23 @@ EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_interrupt_handler);
static void dts_trips_reset(struct intel_soc_dts_sensors *sensors, int dts_index)
{
- configure_trip(&sensors->soc_dts[dts_index], 0, 0, 0);
- configure_trip(&sensors->soc_dts[dts_index], 1, 0, 0);
+ update_trip_temp(sensors, 0, 0);
+ update_trip_temp(sensors, 1, 0);
+}
+
+static void set_trip(struct thermal_trip *trip, enum thermal_trip_type type,
+ u8 flags, int temp)
+{
+ trip->type = type;
+ trip->flags = flags;
+ trip->temperature = temp;
}
struct intel_soc_dts_sensors *
intel_soc_dts_iosf_init(enum intel_soc_dts_interrupt_type intr_type,
bool critical_trip, int crit_offset)
{
+ struct thermal_trip trips[SOC_MAX_DTS_SENSORS][SOC_MAX_DTS_TRIPS] = { 0 };
struct intel_soc_dts_sensors *sensors;
int tj_max;
int ret;
@@ -345,30 +327,33 @@ intel_soc_dts_iosf_init(enum intel_soc_dts_interrupt_type intr_type,
sensors->tj_max = tj_max * 1000;
for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
- enum thermal_trip_type trip_type;
int temp;
sensors->soc_dts[i].sensors = sensors;
- ret = configure_trip(&sensors->soc_dts[i], 0,
- THERMAL_TRIP_PASSIVE, 0);
+ set_trip(&trips[i][0], THERMAL_TRIP_PASSIVE,
+ THERMAL_TRIP_FLAG_RW_TEMP, 0);
+
+ ret = update_trip_temp(sensors, 0, 0);
if (ret)
goto err_reset_trips;
if (critical_trip) {
- trip_type = THERMAL_TRIP_CRITICAL;
temp = sensors->tj_max - crit_offset;
+ set_trip(&trips[i][1], THERMAL_TRIP_CRITICAL, 0, temp);
} else {
- trip_type = THERMAL_TRIP_PASSIVE;
+ set_trip(&trips[i][1], THERMAL_TRIP_PASSIVE,
+ THERMAL_TRIP_FLAG_RW_TEMP, 0);
temp = 0;
}
- ret = configure_trip(&sensors->soc_dts[i], 1, trip_type, temp);
+
+ ret = update_trip_temp(sensors, 1, temp);
if (ret)
goto err_reset_trips;
}
for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
- ret = add_dts_thermal_zone(i, &sensors->soc_dts[i], critical_trip);
+ ret = add_dts_thermal_zone(i, &sensors->soc_dts[i], trips[i]);
if (ret)
goto err_remove_zone;
}
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.h b/drivers/thermal/intel/intel_soc_dts_iosf.h
index 162841df0ebe..44eee844ab3c 100644
--- a/drivers/thermal/intel/intel_soc_dts_iosf.h
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.h
@@ -28,8 +28,6 @@ struct intel_soc_dts_sensors;
struct intel_soc_dts_sensor_entry {
int id;
u32 store_status;
- u32 trip_mask;
- struct thermal_trip trips[SOC_MAX_DTS_TRIPS];
struct thermal_zone_device *tzone;
struct intel_soc_dts_sensors *sensors;
};
diff --git a/drivers/thermal/intel/intel_tcc.c b/drivers/thermal/intel/intel_tcc.c
index 2e5c741c41ca..5e8b7f34b395 100644
--- a/drivers/thermal/intel/intel_tcc.c
+++ b/drivers/thermal/intel/intel_tcc.c
@@ -103,18 +103,19 @@ EXPORT_SYMBOL_NS_GPL(intel_tcc_set_offset, INTEL_TCC);
/**
* intel_tcc_get_temp() - returns the current temperature
* @cpu: cpu that the MSR should be run on, nagative value means any cpu.
+ * @temp: pointer to the memory for saving cpu temperature.
* @pkg: true: Package Thermal Sensor. false: Core Thermal Sensor.
*
* Get the current temperature returned by the CPU core/package level
* thermal sensor, in degrees C.
*
- * Return: Temperature in degrees C on success, negative error code otherwise.
+ * Return: 0 on success, negative error code otherwise.
*/
-int intel_tcc_get_temp(int cpu, bool pkg)
+int intel_tcc_get_temp(int cpu, int *temp, bool pkg)
{
u32 low, high;
u32 msr = pkg ? MSR_IA32_PACKAGE_THERM_STATUS : MSR_IA32_THERM_STATUS;
- int tjmax, temp, err;
+ int tjmax, err;
tjmax = intel_tcc_get_tjmax(cpu);
if (tjmax < 0)
@@ -131,9 +132,8 @@ int intel_tcc_get_temp(int cpu, bool pkg)
if (!(low & BIT(31)))
return -ENODATA;
- temp = tjmax - ((low >> 16) & 0x7f);
+ *temp = tjmax - ((low >> 16) & 0x7f);
- /* Do not allow negative CPU temperature */
- return temp >= 0 ? temp : -ENODATA;
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(intel_tcc_get_temp, INTEL_TCC);
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 11a7f8108bbb..c0ca8e3ff2e7 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -53,7 +53,6 @@ struct zone_device {
u32 msr_pkg_therm_high;
struct delayed_work work;
struct thermal_zone_device *tzone;
- struct thermal_trip *trips;
struct cpumask cpumask;
};
@@ -108,11 +107,11 @@ static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu)
static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
{
struct zone_device *zonedev = thermal_zone_device_priv(tzd);
- int val;
+ int val, ret;
- val = intel_tcc_get_temp(zonedev->cpu, true);
- if (val < 0)
- return val;
+ ret = intel_tcc_get_temp(zonedev->cpu, &val, true);
+ if (ret < 0)
+ return ret;
*temp = val * 1000;
pr_debug("sys_get_curr_temp %d\n", *temp);
@@ -167,7 +166,7 @@ sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
}
/* Thermal zone callback registry */
-static struct thermal_zone_device_ops tzone_ops = {
+static const struct thermal_zone_device_ops tzone_ops = {
.get_temp = sys_get_curr_temp,
.set_trip_temp = sys_set_trip_temp,
};
@@ -268,17 +267,13 @@ static int pkg_thermal_notify(u64 msr_val)
return 0;
}
-static struct thermal_trip *pkg_temp_thermal_trips_init(int cpu, int tj_max, int num_trips)
+static int pkg_temp_thermal_trips_init(int cpu, int tj_max,
+ struct thermal_trip *trips, int num_trips)
{
- struct thermal_trip *trips;
unsigned long thres_reg_value;
u32 mask, shift, eax, edx;
int ret, i;
- trips = kzalloc(sizeof(*trips) * num_trips, GFP_KERNEL);
- if (!trips)
- return ERR_PTR(-ENOMEM);
-
for (i = 0; i < num_trips; i++) {
if (i) {
@@ -291,10 +286,8 @@ static struct thermal_trip *pkg_temp_thermal_trips_init(int cpu, int tj_max, int
ret = rdmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
&eax, &edx);
- if (ret < 0) {
- kfree(trips);
- return ERR_PTR(ret);
- }
+ if (ret < 0)
+ return ret;
thres_reg_value = (eax & mask) >> shift;
@@ -302,16 +295,18 @@ static struct thermal_trip *pkg_temp_thermal_trips_init(int cpu, int tj_max, int
tj_max - thres_reg_value * 1000 : THERMAL_TEMP_INVALID;
trips[i].type = THERMAL_TRIP_PASSIVE;
+ trips[i].flags |= THERMAL_TRIP_FLAG_RW_TEMP;
pr_debug("%s: cpu=%d, trip=%d, temp=%d\n",
__func__, cpu, i, trips[i].temperature);
}
- return trips;
+ return 0;
}
static int pkg_temp_thermal_device_add(unsigned int cpu)
{
+ struct thermal_trip trips[MAX_NUMBER_OF_TRIPS] = { 0 };
int id = topology_logical_die_id(cpu);
u32 eax, ebx, ecx, edx;
struct zone_device *zonedev;
@@ -336,21 +331,18 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
if (!zonedev)
return -ENOMEM;
- zonedev->trips = pkg_temp_thermal_trips_init(cpu, tj_max, thres_count);
- if (IS_ERR(zonedev->trips)) {
- err = PTR_ERR(zonedev->trips);
+ err = pkg_temp_thermal_trips_init(cpu, tj_max, trips, thres_count);
+ if (err)
goto out_kfree_zonedev;
- }
INIT_DELAYED_WORK(&zonedev->work, pkg_temp_thermal_threshold_work_fn);
zonedev->cpu = cpu;
zonedev->tzone = thermal_zone_device_register_with_trips("x86_pkg_temp",
- zonedev->trips, thres_count,
- (thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01,
+ trips, thres_count,
zonedev, &tzone_ops, &pkg_temp_tz_params, 0, 0);
if (IS_ERR(zonedev->tzone)) {
err = PTR_ERR(zonedev->tzone);
- goto out_kfree_trips;
+ goto out_kfree_zonedev;
}
err = thermal_zone_device_enable(zonedev->tzone);
if (err)
@@ -369,8 +361,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
out_unregister_tz:
thermal_zone_device_unregister(zonedev->tzone);
-out_kfree_trips:
- kfree(zonedev->trips);
out_kfree_zonedev:
kfree(zonedev);
return err;
@@ -457,10 +447,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
raw_spin_unlock_irq(&pkg_temp_lock);
/* Final cleanup if this is the last cpu */
- if (lastcpu) {
- kfree(zonedev->trips);
+ if (lastcpu)
kfree(zonedev);
- }
+
return 0;
}
@@ -494,7 +483,7 @@ static int __init pkg_temp_thermal_init(void)
if (!x86_match_cpu(pkg_temp_thermal_ids))
return -ENODEV;
- max_id = topology_max_packages() * topology_max_die_per_package();
+ max_id = topology_max_packages() * topology_max_dies_per_package();
zones = kcalloc(max_id, sizeof(struct zone_device *),
GFP_KERNEL);
if (!zones)
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index feb848d595fa..925183753fcb 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -489,7 +489,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
&rcar_thermal_zone_ops);
} else {
priv->zone = thermal_zone_device_register_with_trips(
- "rcar_thermal", trips, ARRAY_SIZE(trips), 0, priv,
+ "rcar_thermal", trips, ARRAY_SIZE(trips), priv,
&rcar_thermal_zone_ops, NULL, 0,
idle);
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
index 0d6249b36609..2a105409864e 100644
--- a/drivers/thermal/st/st_thermal.c
+++ b/drivers/thermal/st/st_thermal.c
@@ -203,7 +203,7 @@ int st_thermal_register(struct platform_device *pdev,
trip.type = THERMAL_TRIP_CRITICAL;
sensor->thermal_dev =
- thermal_zone_device_register_with_trips(dev_name(dev), &trip, 1, 0, sensor,
+ thermal_zone_device_register_with_trips(dev_name(dev), &trip, 1, sensor,
&st_tz_ops, NULL, 0, polling_delay);
if (IS_ERR(sensor->thermal_dev)) {
dev_err(dev, "failed to register thermal zone device\n");
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index dfaa6341694a..34a31bc72023 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -273,7 +273,6 @@ static int __init thermal_register_governors(void)
/*
* Zone update section: main control loop applied to each zone while monitoring
- *
* in polling mode. The monitoring is done using a workqueue.
* Same update may be done on a zone by calling thermal_zone_device_update().
*
@@ -356,9 +355,9 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
trace_thermal_zone_trip(tz, thermal_zone_trip_id(tz, trip), trip->type);
if (trip->type == THERMAL_TRIP_CRITICAL)
- tz->ops->critical(tz);
- else if (tz->ops->hot)
- tz->ops->hot(tz);
+ tz->ops.critical(tz);
+ else if (tz->ops.hot)
+ tz->ops.hot(tz);
}
static void handle_thermal_trip(struct thermal_zone_device *tz,
@@ -493,8 +492,8 @@ static int thermal_zone_device_set_mode(struct thermal_zone_device *tz,
return ret;
}
- if (tz->ops->change_mode)
- ret = tz->ops->change_mode(tz, mode);
+ if (tz->ops.change_mode)
+ ret = tz->ops.change_mode(tz, mode);
if (!ret)
tz->mode = mode;
@@ -867,8 +866,8 @@ static void bind_cdev(struct thermal_cooling_device *cdev)
struct thermal_zone_device *pos = NULL;
list_for_each_entry(pos, &thermal_tz_list, node) {
- if (pos->ops->bind) {
- ret = pos->ops->bind(pos, cdev);
+ if (pos->ops.bind) {
+ ret = pos->ops.bind(pos, cdev);
if (ret)
print_bind_err_msg(pos, cdev, ret);
}
@@ -1184,8 +1183,8 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
/* Unbind all thermal zones associated with 'this' cdev */
list_for_each_entry(tz, &thermal_tz_list, node) {
- if (tz->ops->unbind)
- tz->ops->unbind(tz, cdev);
+ if (tz->ops.unbind)
+ tz->ops.unbind(tz, cdev);
}
mutex_unlock(&thermal_list_lock);
@@ -1199,13 +1198,13 @@ static void bind_tz(struct thermal_zone_device *tz)
int ret;
struct thermal_cooling_device *pos = NULL;
- if (!tz->ops->bind)
+ if (!tz->ops.bind)
return;
mutex_lock(&thermal_list_lock);
list_for_each_entry(pos, &thermal_cdev_list, node) {
- ret = tz->ops->bind(tz, pos);
+ ret = tz->ops.bind(tz, pos);
if (ret)
print_bind_err_msg(tz, pos, ret);
}
@@ -1224,11 +1223,8 @@ int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp)
{
int i, ret = -EINVAL;
- if (tz->ops->get_crit_temp)
- return tz->ops->get_crit_temp(tz, temp);
-
- if (!tz->trips)
- return -EINVAL;
+ if (tz->ops.get_crit_temp)
+ return tz->ops.get_crit_temp(tz, temp);
mutex_lock(&tz->lock);
@@ -1251,7 +1247,6 @@ EXPORT_SYMBOL_GPL(thermal_zone_get_crit_temp);
* @type: the thermal zone device type
* @trips: a pointer to an array of thermal trips
* @num_trips: the number of trip points the thermal zone support
- * @mask: a bit string indicating the writeablility of trip points
* @devdata: private device data
* @ops: standard thermal zone device callbacks
* @tzp: thermal zone platform parameters
@@ -1272,10 +1267,12 @@ EXPORT_SYMBOL_GPL(thermal_zone_get_crit_temp);
* IS_ERR*() helpers.
*/
struct thermal_zone_device *
-thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *trips, int num_trips, int mask,
- void *devdata, struct thermal_zone_device_ops *ops,
- const struct thermal_zone_params *tzp, int passive_delay,
- int polling_delay)
+thermal_zone_device_register_with_trips(const char *type,
+ const struct thermal_trip *trips,
+ int num_trips, void *devdata,
+ const struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay)
{
struct thermal_zone_device *tz;
int id;
@@ -1293,20 +1290,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
return ERR_PTR(-EINVAL);
}
- /*
- * Max trip count can't exceed 31 as the "mask >> num_trips" condition.
- * For example, shifting by 32 will result in compiler warning:
- * warning: right shift count >= width of type [-Wshift-count- overflow]
- *
- * Also "mask >> num_trips" will always be true with 32 bit shift.
- * E.g. mask = 0x80000000 for trip id 31 to be RW. Then
- * mask >> 32 = 0x80000000
- * This will result in failure for the below condition.
- *
- * Check will be true when the bit 31 of the mask is set.
- * 32 bit shift will cause overflow of 4 byte integer.
- */
- if (num_trips > (BITS_PER_TYPE(int) - 1) || num_trips < 0 || mask >> num_trips) {
+ if (num_trips < 0) {
pr_err("Incorrect number of thermal trips\n");
return ERR_PTR(-EINVAL);
}
@@ -1322,7 +1306,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
if (!thermal_class)
return ERR_PTR(-ENODEV);
- tz = kzalloc(sizeof(*tz), GFP_KERNEL);
+ tz = kzalloc(struct_size(tz, trips, num_trips), GFP_KERNEL);
if (!tz)
return ERR_PTR(-ENOMEM);
@@ -1348,21 +1332,21 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
tz->id = id;
strscpy(tz->type, type, sizeof(tz->type));
- if (!ops->critical)
- ops->critical = thermal_zone_device_critical;
+ tz->ops = *ops;
+ if (!tz->ops.critical)
+ tz->ops.critical = thermal_zone_device_critical;
- tz->ops = ops;
tz->device.class = thermal_class;
tz->devdata = devdata;
- tz->trips = trips;
tz->num_trips = num_trips;
+ memcpy(tz->trips, trips, num_trips * sizeof(*trips));
thermal_set_delay_jiffies(&tz->passive_delay_jiffies, passive_delay);
thermal_set_delay_jiffies(&tz->polling_delay_jiffies, polling_delay);
/* sys I/F */
/* Add nodes that are always present via .groups */
- result = thermal_zone_create_device_groups(tz, mask);
+ result = thermal_zone_create_device_groups(tz);
if (result)
goto remove_id;
@@ -1437,10 +1421,10 @@ EXPORT_SYMBOL_GPL(thermal_zone_device_register_with_trips);
struct thermal_zone_device *thermal_tripless_zone_device_register(
const char *type,
void *devdata,
- struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_device_ops *ops,
const struct thermal_zone_params *tzp)
{
- return thermal_zone_device_register_with_trips(type, NULL, 0, 0, devdata,
+ return thermal_zone_device_register_with_trips(type, NULL, 0, devdata,
ops, tzp, 0, 0);
}
EXPORT_SYMBOL_GPL(thermal_tripless_zone_device_register);
@@ -1499,8 +1483,8 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
/* Unbind all cdevs associated with 'this' thermal zone */
list_for_each_entry(cdev, &thermal_cdev_list, node)
- if (tz->ops->unbind)
- tz->ops->unbind(tz, cdev);
+ if (tz->ops.unbind)
+ tz->ops.unbind(tz, cdev);
mutex_unlock(&thermal_list_lock);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index e9c099ecdd0f..0d8a42bb7ce8 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -131,7 +131,7 @@ void thermal_zone_trip_updated(struct thermal_zone_device *tz,
int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
/* sysfs I/F */
-int thermal_zone_create_device_groups(struct thermal_zone_device *, int);
+int thermal_zone_create_device_groups(struct thermal_zone_device *tz);
void thermal_zone_destroy_device_groups(struct thermal_zone_device *);
void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *);
void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev);
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index 0329f4a71b02..c5a057b59c42 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -26,8 +26,8 @@ int get_tz_trend(struct thermal_zone_device *tz, const struct thermal_trip *trip
{
enum thermal_trend trend;
- if (tz->emul_temperature || !tz->ops->get_trend ||
- tz->ops->get_trend(tz, trip, &trend)) {
+ if (tz->emul_temperature || !tz->ops.get_trend ||
+ tz->ops.get_trend(tz, trip, &trend)) {
if (tz->temperature > tz->last_temperature)
trend = THERMAL_TREND_RAISING;
else if (tz->temperature < tz->last_temperature)
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(get_thermal_instance);
* temperature and fill @temp.
*
* Both tz and tz->ops must be valid pointers when calling this function,
- * and the tz->ops->get_temp callback must be provided.
+ * and the tz->ops.get_temp callback must be provided.
* The function must be called under tz->lock.
*
* Return: On success returns 0, an error code otherwise
@@ -88,7 +88,7 @@ int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
lockdep_assert_held(&tz->lock);
- ret = tz->ops->get_temp(tz, temp);
+ ret = tz->ops.get_temp(tz, temp);
if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) {
for_each_trip(tz, trip) {
@@ -132,7 +132,7 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
mutex_lock(&tz->lock);
- if (!tz->ops->get_temp) {
+ if (!tz->ops.get_temp) {
ret = -EINVAL;
goto unlock;
}
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 252116f1e535..f0e504fd866a 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -80,7 +80,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf)
mutex_lock(&tz->lock);
- ret = tz->ops->get_crit_temp(tz, &temperature);
+ ret = tz->ops.get_crit_temp(tz, &temperature);
mutex_unlock(&tz->lock);
@@ -132,7 +132,7 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz)
{
int temp;
- return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp);
+ return tz->ops.get_crit_temp && !tz->ops.get_crit_temp(tz, &temp);
}
int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 4d6c22e0ed85..f1cbf9aa62cf 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -117,6 +117,8 @@ static int thermal_of_populate_trip(struct device_node *np,
return ret;
}
+ trip->flags = THERMAL_TRIP_FLAG_RW_TEMP;
+
return 0;
}
@@ -438,13 +440,8 @@ static int thermal_of_unbind(struct thermal_zone_device *tz,
*/
static void thermal_of_zone_unregister(struct thermal_zone_device *tz)
{
- struct thermal_trip *trips = tz->trips;
- struct thermal_zone_device_ops *ops = tz->ops;
-
thermal_zone_device_disable(tz);
thermal_zone_device_unregister(tz);
- kfree(trips);
- kfree(ops);
}
/**
@@ -470,33 +467,27 @@ static void thermal_of_zone_unregister(struct thermal_zone_device *tz)
static struct thermal_zone_device *thermal_of_zone_register(struct device_node *sensor, int id, void *data,
const struct thermal_zone_device_ops *ops)
{
+ struct thermal_zone_device_ops of_ops = *ops;
struct thermal_zone_device *tz;
struct thermal_trip *trips;
struct thermal_zone_params tzp = {};
- struct thermal_zone_device_ops *of_ops;
struct device_node *np;
const char *action;
int delay, pdelay;
- int ntrips, mask;
+ int ntrips;
int ret;
- of_ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL);
- if (!of_ops)
- return ERR_PTR(-ENOMEM);
-
np = of_thermal_zone_find(sensor, id);
if (IS_ERR(np)) {
if (PTR_ERR(np) != -ENODEV)
pr_err("Failed to find thermal zone for %pOFn id=%d\n", sensor, id);
- ret = PTR_ERR(np);
- goto out_kfree_of_ops;
+ return ERR_CAST(np);
}
trips = thermal_of_trips_init(np, &ntrips);
if (IS_ERR(trips)) {
pr_err("Failed to find trip points for %pOFn id=%d\n", sensor, id);
- ret = PTR_ERR(trips);
- goto out_kfree_of_ops;
+ return ERR_CAST(trips);
}
ret = thermal_of_monitor_init(np, &delay, &pdelay);
@@ -507,18 +498,16 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node *
thermal_of_parameters_init(np, &tzp);
- of_ops->bind = thermal_of_bind;
- of_ops->unbind = thermal_of_unbind;
-
- mask = GENMASK_ULL((ntrips) - 1, 0);
+ of_ops.bind = thermal_of_bind;
+ of_ops.unbind = thermal_of_unbind;
ret = of_property_read_string(np, "critical-action", &action);
if (!ret)
- if (!of_ops->critical && !strcasecmp(action, "reboot"))
- of_ops->critical = thermal_zone_device_critical_reboot;
+ if (!of_ops.critical && !strcasecmp(action, "reboot"))
+ of_ops.critical = thermal_zone_device_critical_reboot;
tz = thermal_zone_device_register_with_trips(np->name, trips, ntrips,
- mask, data, of_ops, &tzp,
+ data, &of_ops, &tzp,
pdelay, delay);
if (IS_ERR(tz)) {
ret = PTR_ERR(tz);
@@ -526,6 +515,8 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node *
goto out_kfree_trips;
}
+ kfree(trips);
+
ret = thermal_zone_device_enable(tz);
if (ret) {
pr_err("Failed to enabled thermal zone '%s', id=%d: %d\n",
@@ -538,8 +529,6 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node *
out_kfree_trips:
kfree(trips);
-out_kfree_of_ops:
- kfree(of_ops);
return ERR_PTR(ret);
}
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index f4033865b093..5b533fa40437 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -123,8 +123,8 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
trip = &tz->trips[trip_id];
if (temp != trip->temperature) {
- if (tz->ops->set_trip_temp) {
- ret = tz->ops->set_trip_temp(tz, trip_id, temp);
+ if (tz->ops.set_trip_temp) {
+ ret = tz->ops.set_trip_temp(tz, trip_id, temp);
if (ret)
goto unlock;
}
@@ -136,7 +136,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
unlock:
mutex_unlock(&tz->lock);
-
+
return ret ? ret : count;
}
@@ -174,21 +174,14 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
trip = &tz->trips[trip_id];
if (hyst != trip->hysteresis) {
- if (tz->ops->set_trip_hyst) {
- ret = tz->ops->set_trip_hyst(tz, trip_id, hyst);
- if (ret)
- goto unlock;
- }
-
trip->hysteresis = hyst;
thermal_zone_trip_updated(tz, trip);
}
-unlock:
mutex_unlock(&tz->lock);
- return ret ? ret : count;
+ return count;
}
static ssize_t
@@ -250,10 +243,10 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
mutex_lock(&tz->lock);
- if (!tz->ops->set_emul_temp)
+ if (!tz->ops.set_emul_temp)
tz->emul_temperature = temperature;
else
- ret = tz->ops->set_emul_temp(tz, temperature);
+ ret = tz->ops.set_emul_temp(tz, temperature);
if (!ret)
__thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
@@ -392,17 +385,16 @@ static const struct attribute_group *thermal_zone_attribute_groups[] = {
/**
* create_trip_attrs() - create attributes for trip points
* @tz: the thermal zone device
- * @mask: Writeable trip point bitmap.
*
* helper function to instantiate sysfs entries for every trip
* point and its properties of a struct thermal_zone_device.
*
* Return: 0 on success, the proper error value otherwise.
*/
-static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
+static int create_trip_attrs(struct thermal_zone_device *tz)
{
+ const struct thermal_trip *trip;
struct attribute **attrs;
- int indx;
/* This function works only for zones with at least one trip */
if (tz->num_trips <= 0)
@@ -437,7 +429,9 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
return -ENOMEM;
}
- for (indx = 0; indx < tz->num_trips; indx++) {
+ for_each_trip(tz, trip) {
+ int indx = thermal_zone_trip_id(tz, trip);
+
/* create trip type attribute */
snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH,
"trip_point_%d_type", indx);
@@ -458,8 +452,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
tz->trip_temp_attrs[indx].name;
tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO;
tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show;
- if (IS_ENABLED(CONFIG_THERMAL_WRITABLE_TRIPS) &&
- mask & (1 << indx)) {
+ if (trip->flags & THERMAL_TRIP_FLAG_RW_TEMP) {
tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR;
tz->trip_temp_attrs[indx].attr.store =
trip_point_temp_store;
@@ -474,7 +467,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
tz->trip_hyst_attrs[indx].name;
tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO;
tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show;
- if (tz->ops->set_trip_hyst) {
+ if (trip->flags & THERMAL_TRIP_FLAG_RW_HYST) {
tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR;
tz->trip_hyst_attrs[indx].attr.store =
trip_point_hyst_store;
@@ -506,8 +499,7 @@ static void destroy_trip_attrs(struct thermal_zone_device *tz)
kfree(tz->trips_attribute_group.attrs);
}
-int thermal_zone_create_device_groups(struct thermal_zone_device *tz,
- int mask)
+int thermal_zone_create_device_groups(struct thermal_zone_device *tz)
{
const struct attribute_group **groups;
int i, size, result;
@@ -523,7 +515,7 @@ int thermal_zone_create_device_groups(struct thermal_zone_device *tz,
groups[i] = thermal_zone_attribute_groups[i];
if (tz->num_trips) {
- result = create_trip_attrs(tz, mask);
+ result = create_trip_attrs(tz);
if (result) {
kfree(groups);
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index c875a26d5adf..09f6050dd041 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -70,7 +70,7 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
lockdep_assert_held(&tz->lock);
- if (!tz->ops->set_trips)
+ if (!tz->ops.set_trips)
return;
for_each_trip(tz, trip) {
@@ -114,7 +114,7 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
* Set a temperature window. When this window is left the driver
* must inform the thermal core via thermal_zone_device_update.
*/
- ret = tz->ops->set_trips(tz, low, high);
+ ret = tz->ops.set_trips(tz, low, high);
if (ret)
dev_err(&tz->device, "Failed to set trips: %d\n", ret);
}
@@ -122,7 +122,7 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
struct thermal_trip *trip)
{
- if (!tz || !tz->trips || trip_id < 0 || trip_id >= tz->num_trips || !trip)
+ if (!tz || trip_id < 0 || trip_id >= tz->num_trips || !trip)
return -EINVAL;
*trip = tz->trips[trip_id];
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 900114ba4371..fad40c4bc710 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -1249,6 +1249,9 @@ int tb_port_update_credits(struct tb_port *port)
ret = tb_port_do_update_credits(port);
if (ret)
return ret;
+
+ if (!port->dual_link_port)
+ return 0;
return tb_port_do_update_credits(port->dual_link_port);
}
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 6e05c5c7bca1..c2a4e88b328f 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -108,13 +108,15 @@ config HVC_DCC_SERIALIZE_SMP
config HVC_RISCV_SBI
bool "RISC-V SBI console support"
- depends on RISCV_SBI
+ depends on RISCV_SBI && NONPORTABLE
select HVC_DRIVER
help
This enables support for console output via RISC-V SBI calls, which
- is normally used only during boot to output printk.
+ is normally used only during boot to output printk. This driver
+ conflicts with real console drivers and should not be enabled on
+ systems that directly access the console.
- If you don't know what do to here, say Y.
+ If you don't know what do to here, say N.
config HVCS
tristate "IBM Hypervisor Virtual Console Server support"
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 2d1f350a4bea..c1d43f040c43 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -357,9 +357,9 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
long rate;
int ret;
- clk_disable_unprepare(d->clk);
rate = clk_round_rate(d->clk, newrate);
- if (rate > 0) {
+ if (rate > 0 && p->uartclk != rate) {
+ clk_disable_unprepare(d->clk);
/*
* Note that any clock-notifer worker will block in
* serial8250_update_uartclk() until we are done.
@@ -367,8 +367,8 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
ret = clk_set_rate(d->clk, newrate);
if (!ret)
p->uartclk = rate;
+ clk_prepare_enable(d->clk);
}
- clk_prepare_enable(d->clk);
dw8250_do_set_termios(p, termios, old);
}
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index fccec1698a54..cf2c890a560f 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1339,11 +1339,41 @@ static void pl011_start_tx_pio(struct uart_amba_port *uap)
}
}
+static void pl011_rs485_tx_start(struct uart_amba_port *uap)
+{
+ struct uart_port *port = &uap->port;
+ u32 cr;
+
+ /* Enable transmitter */
+ cr = pl011_read(uap, REG_CR);
+ cr |= UART011_CR_TXE;
+
+ /* Disable receiver if half-duplex */
+ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
+ cr &= ~UART011_CR_RXE;
+
+ if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
+ cr &= ~UART011_CR_RTS;
+ else
+ cr |= UART011_CR_RTS;
+
+ pl011_write(cr, uap, REG_CR);
+
+ if (port->rs485.delay_rts_before_send)
+ mdelay(port->rs485.delay_rts_before_send);
+
+ uap->rs485_tx_started = true;
+}
+
static void pl011_start_tx(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
+ if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
+ !uap->rs485_tx_started)
+ pl011_rs485_tx_start(uap);
+
if (!pl011_dma_tx_start(uap))
pl011_start_tx_pio(uap);
}
@@ -1424,42 +1454,12 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
return true;
}
-static void pl011_rs485_tx_start(struct uart_amba_port *uap)
-{
- struct uart_port *port = &uap->port;
- u32 cr;
-
- /* Enable transmitter */
- cr = pl011_read(uap, REG_CR);
- cr |= UART011_CR_TXE;
-
- /* Disable receiver if half-duplex */
- if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
- cr &= ~UART011_CR_RXE;
-
- if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
- cr &= ~UART011_CR_RTS;
- else
- cr |= UART011_CR_RTS;
-
- pl011_write(cr, uap, REG_CR);
-
- if (port->rs485.delay_rts_before_send)
- mdelay(port->rs485.delay_rts_before_send);
-
- uap->rs485_tx_started = true;
-}
-
/* Returns true if tx interrupts have to be (kept) enabled */
static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
{
struct circ_buf *xmit = &uap->port.state->xmit;
int count = uap->fifosize >> 1;
- if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
- !uap->rs485_tx_started)
- pl011_rs485_tx_start(uap);
-
if (uap->port.x_char) {
if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
return true;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 5ddf110aedbe..bbcbc91482af 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -2345,9 +2345,12 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
lpuart32_write(&sport->port, bd, UARTBAUD);
lpuart32_serial_setbrg(sport, baud);
- lpuart32_write(&sport->port, modem, UARTMODIR);
- lpuart32_write(&sport->port, ctrl, UARTCTRL);
+ /* disable CTS before enabling UARTCTRL_TE to avoid pending idle preamble */
+ lpuart32_write(&sport->port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
/* restore control register */
+ lpuart32_write(&sport->port, ctrl, UARTCTRL);
+ /* re-enable the CTS if needed */
+ lpuart32_write(&sport->port, modem, UARTMODIR);
if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE)
sport->is_cs7 = true;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 4aa72d5aeafb..e14813250616 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -462,8 +462,7 @@ static void imx_uart_stop_tx(struct uart_port *port)
}
}
-/* called with port.lock taken and irqs off */
-static void imx_uart_stop_rx(struct uart_port *port)
+static void imx_uart_stop_rx_with_loopback_ctrl(struct uart_port *port, bool loopback)
{
struct imx_port *sport = (struct imx_port *)port;
u32 ucr1, ucr2, ucr4, uts;
@@ -485,7 +484,7 @@ static void imx_uart_stop_rx(struct uart_port *port)
/* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
if (port->rs485.flags & SER_RS485_ENABLED &&
port->rs485.flags & SER_RS485_RTS_ON_SEND &&
- sport->have_rtscts && !sport->have_rtsgpio) {
+ sport->have_rtscts && !sport->have_rtsgpio && loopback) {
uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
uts |= UTS_LOOP;
imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
@@ -498,6 +497,16 @@ static void imx_uart_stop_rx(struct uart_port *port)
}
/* called with port.lock taken and irqs off */
+static void imx_uart_stop_rx(struct uart_port *port)
+{
+ /*
+ * Stop RX and enable loopback in order to make sure RS485 bus
+ * is not blocked. Se comment in imx_uart_probe().
+ */
+ imx_uart_stop_rx_with_loopback_ctrl(port, true);
+}
+
+/* called with port.lock taken and irqs off */
static void imx_uart_enable_ms(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
@@ -682,9 +691,14 @@ static void imx_uart_start_tx(struct uart_port *port)
imx_uart_rts_inactive(sport, &ucr2);
imx_uart_writel(sport, ucr2, UCR2);
+ /*
+ * Since we are about to transmit we can not stop RX
+ * with loopback enabled because that will make our
+ * transmitted data being just looped to RX.
+ */
if (!(port->rs485.flags & SER_RS485_RX_DURING_TX) &&
!port->rs485_rx_during_tx_gpio)
- imx_uart_stop_rx(port);
+ imx_uart_stop_rx_with_loopback_ctrl(port, false);
sport->tx_state = WAIT_AFTER_RTS;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index e63a8fbe63bd..99e08737f293 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -851,19 +851,21 @@ static void qcom_geni_serial_stop_tx(struct uart_port *uport)
}
static void qcom_geni_serial_send_chunk_fifo(struct uart_port *uport,
- unsigned int remaining)
+ unsigned int chunk)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
struct circ_buf *xmit = &uport->state->xmit;
- unsigned int tx_bytes;
+ unsigned int tx_bytes, c, remaining = chunk;
u8 buf[BYTES_PER_FIFO_WORD];
while (remaining) {
memset(buf, 0, sizeof(buf));
tx_bytes = min(remaining, BYTES_PER_FIFO_WORD);
- memcpy(buf, &xmit->buf[xmit->tail], tx_bytes);
- uart_xmit_advance(uport, tx_bytes);
+ for (c = 0; c < tx_bytes ; c++) {
+ buf[c] = xmit->buf[xmit->tail];
+ uart_xmit_advance(uport, 1);
+ }
iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1);
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 88975a4df306..72b6f4f326e2 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -46,8 +46,31 @@ out:
return 0;
}
+static int serial_port_runtime_suspend(struct device *dev)
+{
+ struct serial_port_device *port_dev = to_serial_base_port_device(dev);
+ struct uart_port *port = port_dev->port;
+ unsigned long flags;
+ bool busy;
+
+ if (port->flags & UPF_DEAD)
+ return 0;
+
+ uart_port_lock_irqsave(port, &flags);
+ busy = __serial_port_busy(port);
+ if (busy)
+ port->ops->start_tx(port);
+ uart_port_unlock_irqrestore(port, flags);
+
+ if (busy)
+ pm_runtime_mark_last_busy(dev);
+
+ return busy ? -EBUSY : 0;
+}
+
static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
- NULL, serial_port_runtime_resume, NULL);
+ serial_port_runtime_suspend,
+ serial_port_runtime_resume, NULL);
static int serial_port_probe(struct device *dev)
{
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 794b77512740..693e932d6feb 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -251,7 +251,9 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter
writel_relaxed(cr3, port->membase + ofs->cr3);
writel_relaxed(cr1, port->membase + ofs->cr1);
- rs485conf->flags |= SER_RS485_RX_DURING_TX;
+ if (!port->rs485_rx_during_tx_gpio)
+ rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
} else {
stm32_usart_clr_bits(port, ofs->cr3,
USART_CR3_DEM | USART_CR3_DEP);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 156efda7c80d..38a765eadbe2 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -381,7 +381,7 @@ static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr)
u32 *ln = vc->vc_uni_lines[vc->state.y];
unsigned int x = vc->state.x, cols = vc->vc_cols;
- memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
+ memmove(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
memset32(&ln[cols - nr], ' ', nr);
}
}
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index d77b25b79ae3..eac7fff6992d 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1469,7 +1469,7 @@ static int ufshcd_devfreq_target(struct device *dev,
int ret = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
ktime_t start;
- bool scale_up, sched_clk_scaling_suspend_work = false;
+ bool scale_up = false, sched_clk_scaling_suspend_work = false;
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
unsigned long irq_flags;
@@ -10593,7 +10593,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
if (err < 0)
goto out_remove_scsi_host;
- hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
+ hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL);
if (IS_ERR(hba->tmf_queue)) {
err = PTR_ERR(hba->tmf_queue);
goto free_tmf_tag_set;
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 39eef470f8fa..8fde5204e88b 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1712,8 +1712,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
* 2. Poll queues do not need ESI.
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
- ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
- ufs_qcom_write_msi_msg);
+ ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
+ ufs_qcom_write_msi_msg);
if (ret) {
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
return ret;
@@ -1742,7 +1742,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
devm_free_irq(hba->dev, desc->irq, hba);
}
msi_unlock_descs(hba->dev);
- platform_msi_domain_free_irqs(hba->dev);
+ platform_device_msi_free_irqs_all(hba->dev);
} else {
if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
host->hw_ver.step == 0)
@@ -1818,7 +1818,7 @@ static void ufs_qcom_remove(struct platform_device *pdev)
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
- platform_msi_domain_free_irqs(hba->dev);
+ platform_device_msi_free_irqs_all(hba->dev);
}
static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index aeca902ab6cc..fd1beb10bba7 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -828,7 +828,11 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
return;
}
- if (request->complete) {
+ /*
+ * zlp request is appended by driver, needn't call usb_gadget_giveback_request() to notify
+ * gadget composite driver.
+ */
+ if (request->complete && request->buf != priv_dev->zlp_buf) {
spin_unlock(&priv_dev->lock);
usb_gadget_giveback_request(&priv_ep->endpoint,
request);
@@ -2540,11 +2544,11 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
+ list_del_init(&priv_req->list);
kfree(priv_req->request.buf);
cdns3_gadget_ep_free_request(&priv_ep->endpoint,
&priv_req->request);
- list_del_init(&priv_req->list);
--priv_ep->wa2_counter;
}
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 33548771a0d3..465e9267b49c 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -395,7 +395,6 @@ pm_put:
return ret;
}
-
/**
* cdns_wakeup_irq - interrupt handler for wakeup events
* @irq: irq number for cdns3/cdnsp core device
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index 04b6d12f2b9a..ee917f1b091c 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -156,7 +156,8 @@ bool cdns_is_device(struct cdns *cdns)
*/
static void cdns_otg_disable_irq(struct cdns *cdns)
{
- writel(0, &cdns->otg_irq_regs->ien);
+ if (cdns->version)
+ writel(0, &cdns->otg_irq_regs->ien);
}
/**
@@ -422,15 +423,20 @@ int cdns_drd_init(struct cdns *cdns)
cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd;
- if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) {
+ state = readl(&cdns->otg_cdnsp_regs->did);
+
+ if (OTG_CDNSP_CHECK_DID(state)) {
cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
&cdns->otg_cdnsp_regs->ien;
cdns->version = CDNSP_CONTROLLER_V2;
- } else {
+ } else if (OTG_CDNS3_CHECK_DID(state)) {
cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
&cdns->otg_v1_regs->ien;
writel(1, &cdns->otg_v1_regs->simulate);
cdns->version = CDNS3_CONTROLLER_V1;
+ } else {
+ dev_err(cdns->dev, "not supporte DID=0x%08x\n", state);
+ return -EINVAL;
}
dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
@@ -483,7 +489,6 @@ int cdns_drd_exit(struct cdns *cdns)
return 0;
}
-
/* Indicate the cdns3 core was power lost before */
bool cdns_power_is_lost(struct cdns *cdns)
{
diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
index cbdf94f73ed9..d72370c321d3 100644
--- a/drivers/usb/cdns3/drd.h
+++ b/drivers/usb/cdns3/drd.h
@@ -79,7 +79,11 @@ struct cdnsp_otg_regs {
__le32 susp_timing_ctrl;
};
-#define OTG_CDNSP_DID 0x0004034E
+/* CDNSP driver supports 0x000403xx Cadence USB controller family. */
+#define OTG_CDNSP_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040300)
+
+/* CDNS3 driver supports 0x000402xx Cadence USB controller family. */
+#define OTG_CDNS3_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040200)
/*
* Common registers interface for both CDNS3 and CDNSP version of DRD.
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index 6164fc4c96a4..ceca4d839dfd 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -18,6 +18,11 @@
#include "../host/xhci.h"
#include "../host/xhci-plat.h"
+/*
+ * The XECP_PORT_CAP_REG and XECP_AUX_CTRL_REG1 exist only
+ * in Cadence USB3 dual-role controller, so it can't be used
+ * with Cadence CDNSP dual-role controller.
+ */
#define XECP_PORT_CAP_REG 0x8000
#define XECP_AUX_CTRL_REG1 0x8120
@@ -57,6 +62,8 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
.resume_quirk = xhci_cdns3_resume_quirk,
};
+static const struct xhci_plat_priv xhci_plat_cdnsp_xhci;
+
static int __cdns_host_init(struct cdns *cdns)
{
struct platform_device *xhci;
@@ -81,8 +88,13 @@ static int __cdns_host_init(struct cdns *cdns)
goto err1;
}
- cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
- sizeof(struct xhci_plat_priv), GFP_KERNEL);
+ if (cdns->version < CDNSP_CONTROLLER_V2)
+ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
+ sizeof(struct xhci_plat_priv), GFP_KERNEL);
+ else
+ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdnsp_xhci,
+ sizeof(struct xhci_plat_priv), GFP_KERNEL);
+
if (!cdns->xhci_plat_data) {
ret = -ENOMEM;
goto err1;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 12b6dfeaf658..edf74458474a 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1664,9 +1664,10 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
usb_put_urb(urb);
}
-static void usb_giveback_urb_bh(struct tasklet_struct *t)
+static void usb_giveback_urb_bh(struct work_struct *work)
{
- struct giveback_urb_bh *bh = from_tasklet(bh, t, bh);
+ struct giveback_urb_bh *bh =
+ container_of(work, struct giveback_urb_bh, bh);
struct list_head local_list;
spin_lock_irq(&bh->lock);
@@ -1691,9 +1692,9 @@ static void usb_giveback_urb_bh(struct tasklet_struct *t)
spin_lock_irq(&bh->lock);
if (!list_empty(&bh->head)) {
if (bh->high_prio)
- tasklet_hi_schedule(&bh->bh);
+ queue_work(system_bh_highpri_wq, &bh->bh);
else
- tasklet_schedule(&bh->bh);
+ queue_work(system_bh_wq, &bh->bh);
}
bh->running = false;
spin_unlock_irq(&bh->lock);
@@ -1706,7 +1707,7 @@ static void usb_giveback_urb_bh(struct tasklet_struct *t)
* @status: completion status code for the URB.
*
* Context: atomic. The completion callback is invoked in caller's context.
- * For HCDs with HCD_BH flag set, the completion callback is invoked in tasklet
+ * For HCDs with HCD_BH flag set, the completion callback is invoked in BH
* context (except for URBs submitted to the root hub which always complete in
* caller's context).
*
@@ -1725,7 +1726,7 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
struct giveback_urb_bh *bh;
bool running;
- /* pass status to tasklet via unlinked */
+ /* pass status to BH via unlinked */
if (likely(!urb->unlinked))
urb->unlinked = status;
@@ -1747,9 +1748,9 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
if (running)
;
else if (bh->high_prio)
- tasklet_hi_schedule(&bh->bh);
+ queue_work(system_bh_highpri_wq, &bh->bh);
else
- tasklet_schedule(&bh->bh);
+ queue_work(system_bh_wq, &bh->bh);
}
EXPORT_SYMBOL_GPL(usb_hcd_giveback_urb);
@@ -2540,7 +2541,7 @@ static void init_giveback_urb_bh(struct giveback_urb_bh *bh)
spin_lock_init(&bh->lock);
INIT_LIST_HEAD(&bh->head);
- tasklet_setup(&bh->bh, usb_giveback_urb_bh);
+ INIT_WORK(&bh->bh, usb_giveback_urb_bh);
}
struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
@@ -2926,7 +2927,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
&& device_can_wakeup(&hcd->self.root_hub->dev))
dev_dbg(hcd->self.controller, "supports USB remote wakeup\n");
- /* initialize tasklets */
+ /* initialize BHs */
init_giveback_urb_bh(&hcd->high_prio_bh);
hcd->high_prio_bh.high_prio = true;
init_giveback_urb_bh(&hcd->low_prio_bh);
@@ -3036,7 +3037,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
mutex_unlock(&usb_bus_idr_lock);
/*
- * tasklet_kill() isn't needed here because:
+ * flush_work() isn't needed here because:
* - driver's disconnect() called from usb_disconnect() should
* make sure its URBs are completed during the disconnect()
* callback
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index c628c1abc907..4d63496f98b6 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -573,7 +573,7 @@ static int match_location(struct usb_device *peer_hdev, void *p)
struct usb_hub *peer_hub = usb_hub_to_struct_hub(peer_hdev);
struct usb_device *hdev = to_usb_device(port_dev->dev.parent->parent);
- if (!peer_hub)
+ if (!peer_hub || port_dev->connect_type == USB_PORT_NOT_USED)
return 0;
hcd = bus_to_hcd(hdev->bus);
@@ -584,7 +584,8 @@ static int match_location(struct usb_device *peer_hdev, void *p)
for (port1 = 1; port1 <= peer_hdev->maxchild; port1++) {
peer = peer_hub->ports[port1 - 1];
- if (peer && peer->location == port_dev->location) {
+ if (peer && peer->connect_type != USB_PORT_NOT_USED &&
+ peer->location == port_dev->location) {
link_peers_report(port_dev, peer);
return 1; /* done */
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4c8dd6724678..28f49400f3e8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2650,6 +2650,11 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
int ret;
spin_lock_irqsave(&dwc->lock, flags);
+ if (!dwc->pullups_connected) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return 0;
+ }
+
dwc->connected = false;
/*
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index ca5d5f564998..28f4e6552e84 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1338,7 +1338,15 @@ parse_ntb:
"Parsed NTB with %d frames\n", dgram_counter);
to_process -= block_len;
- if (to_process != 0) {
+
+ /*
+ * Windows NCM driver avoids USB ZLPs by adding a 1-byte
+ * zero pad as needed.
+ */
+ if (to_process == 1 &&
+ (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) {
+ to_process--;
+ } else if ((to_process > 0) && (block_len != 0)) {
ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
goto parse_ntb;
}
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
index 89e8cf2a2a7d..7349ea774adf 100644
--- a/drivers/usb/gadget/udc/max3420_udc.c
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -1201,7 +1201,7 @@ static int max3420_probe(struct spi_device *spi)
int err, irq;
u8 reg[8];
- if (spi->master->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
dev_err(&spi->dev, "UDC needs full duplex to work\n");
return -EINVAL;
}
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 10c5d7f726a1..f90eeecf27de 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2036,7 +2036,8 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
static inline int machine_without_vbus_sense(void)
{
- return machine_is_omap_osk() || machine_is_sx1();
+ return machine_is_omap_osk() || machine_is_omap_palmte() ||
+ machine_is_sx1();
}
static int omap_udc_start(struct usb_gadget *g,
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
index ac3fc5970315..cfebb833668e 100644
--- a/drivers/usb/host/uhci-grlib.c
+++ b/drivers/usb/host/uhci-grlib.c
@@ -22,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
static int uhci_grlib_init(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f0d8a607ff21..4f64b814d4aa 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -326,7 +326,13 @@ static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhc
/* how many trbs will be queued past the enqueue segment? */
trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1);
- if (trbs_past_seg <= 0)
+ /*
+ * Consider expanding the ring already if num_trbs fills the current
+ * segment (i.e. trbs_past_seg == 0), not only when num_trbs goes into
+ * the next segment. Avoids confusing full ring with special empty ring
+ * case below
+ */
+ if (trbs_past_seg < 0)
return 0;
/* Empty ring special case, enqueue stuck on link trb while dequeue advanced */
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 76862ba40f35..0e5e4cb74c87 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -2521,21 +2521,19 @@ static const struct hc_driver isp1760_hc_driver = {
int __init isp1760_init_kmem_once(void)
{
urb_listitem_cachep = kmem_cache_create("isp1760_urb_listitem",
- sizeof(struct urb_listitem), 0, SLAB_TEMPORARY |
- SLAB_MEM_SPREAD, NULL);
+ sizeof(struct urb_listitem), 0, SLAB_TEMPORARY, NULL);
if (!urb_listitem_cachep)
return -ENOMEM;
qtd_cachep = kmem_cache_create("isp1760_qtd",
- sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY |
- SLAB_MEM_SPREAD, NULL);
+ sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY, NULL);
if (!qtd_cachep)
goto destroy_urb_listitem;
qh_cachep = kmem_cache_create("isp1760_qh", sizeof(struct isp1760_qh),
- 0, SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
+ 0, SLAB_TEMPORARY, NULL);
if (!qh_cachep)
goto destroy_qtd;
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index ae41578bd014..70165dd86b5d 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -21,7 +21,9 @@ static const struct class role_class = {
struct usb_role_switch {
struct device dev;
struct mutex lock; /* device lock*/
+ struct module *module; /* the module this device depends on */
enum usb_role role;
+ bool registered;
/* From descriptor */
struct device *usb2_port;
@@ -48,6 +50,9 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
if (IS_ERR_OR_NULL(sw))
return 0;
+ if (!sw->registered)
+ return -EOPNOTSUPP;
+
mutex_lock(&sw->lock);
ret = sw->set(sw, role);
@@ -73,7 +78,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
{
enum usb_role role;
- if (IS_ERR_OR_NULL(sw))
+ if (IS_ERR_OR_NULL(sw) || !sw->registered)
return USB_ROLE_NONE;
mutex_lock(&sw->lock);
@@ -135,7 +140,7 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev)
usb_role_switch_match);
if (!IS_ERR_OR_NULL(sw))
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+ WARN_ON(!try_module_get(sw->module));
return sw;
}
@@ -157,7 +162,7 @@ struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode)
sw = fwnode_connection_find_match(fwnode, "usb-role-switch",
NULL, usb_role_switch_match);
if (!IS_ERR_OR_NULL(sw))
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+ WARN_ON(!try_module_get(sw->module));
return sw;
}
@@ -172,7 +177,7 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
void usb_role_switch_put(struct usb_role_switch *sw)
{
if (!IS_ERR_OR_NULL(sw)) {
- module_put(sw->dev.parent->driver->owner);
+ module_put(sw->module);
put_device(&sw->dev);
}
}
@@ -189,15 +194,18 @@ struct usb_role_switch *
usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
{
struct device *dev;
+ struct usb_role_switch *sw = NULL;
if (!fwnode)
return NULL;
dev = class_find_device_by_fwnode(&role_class, fwnode);
- if (dev)
- WARN_ON(!try_module_get(dev->parent->driver->owner));
+ if (dev) {
+ sw = to_role_switch(dev);
+ WARN_ON(!try_module_get(sw->module));
+ }
- return dev ? to_role_switch(dev) : NULL;
+ return sw;
}
EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
@@ -338,6 +346,7 @@ usb_role_switch_register(struct device *parent,
sw->set = desc->set;
sw->get = desc->get;
+ sw->module = parent->driver->owner;
sw->dev.parent = parent;
sw->dev.fwnode = desc->fwnode;
sw->dev.class = &role_class;
@@ -352,6 +361,8 @@ usb_role_switch_register(struct device *parent,
return ERR_PTR(ret);
}
+ sw->registered = true;
+
/* TODO: Symlinks for the host port and the device controller. */
return sw;
@@ -366,8 +377,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register);
*/
void usb_role_switch_unregister(struct usb_role_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
+ sw->registered = false;
device_unregister(&sw->dev);
+ }
}
EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 4e0eef1440b7..300aeef160e7 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1105,7 +1105,7 @@ static void isd200_dump_driveid(struct us_data *us, u16 *id)
static int isd200_get_inquiry_data( struct us_data *us )
{
struct isd200_info *info = (struct isd200_info *)us->extra;
- int retStatus = ISD200_GOOD;
+ int retStatus;
u16 *id = info->id;
usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n");
@@ -1137,6 +1137,13 @@ static int isd200_get_inquiry_data( struct us_data *us )
isd200_fix_driveid(id);
isd200_dump_driveid(us, id);
+ /* Prevent division by 0 in isd200_scsi_to_ata() */
+ if (id[ATA_ID_HEADS] == 0 || id[ATA_ID_SECTORS] == 0) {
+ usb_stor_dbg(us, " Invalid ATA Identify data\n");
+ retStatus = ISD200_ERROR;
+ goto Done;
+ }
+
memset(&info->InquiryData, 0, sizeof(info->InquiryData));
/* Standard IDE interface only supports disks */
@@ -1202,6 +1209,7 @@ static int isd200_get_inquiry_data( struct us_data *us )
}
}
+ Done:
usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus);
return(retStatus);
@@ -1481,22 +1489,27 @@ static int isd200_init_info(struct us_data *us)
static int isd200_Initialization(struct us_data *us)
{
+ int rc = 0;
+
usb_stor_dbg(us, "ISD200 Initialization...\n");
/* Initialize ISD200 info struct */
- if (isd200_init_info(us) == ISD200_ERROR) {
+ if (isd200_init_info(us) < 0) {
usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n");
+ rc = -ENOMEM;
} else {
/* Get device specific data */
- if (isd200_get_inquiry_data(us) != ISD200_GOOD)
+ if (isd200_get_inquiry_data(us) != ISD200_GOOD) {
usb_stor_dbg(us, "ISD200 Initialization Failure\n");
- else
+ rc = -EINVAL;
+ } else {
usb_stor_dbg(us, "ISD200 Initialization complete\n");
+ }
}
- return 0;
+ return rc;
}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index c54e9805da53..12cf9940e5b6 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -180,6 +180,13 @@ static int slave_configure(struct scsi_device *sdev)
sdev->use_192_bytes_for_3f = 1;
/*
+ * Some devices report generic values until the media has been
+ * accessed. Force a READ(10) prior to querying device
+ * characteristics.
+ */
+ sdev->read_before_ms = 1;
+
+ /*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 9707f53cfda9..71ace274761f 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -879,6 +879,13 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->guess_capacity = 1;
/*
+ * Some devices report generic values until the media has been
+ * accessed. Force a READ(10) prior to querying device
+ * characteristics.
+ */
+ sdev->read_before_ms = 1;
+
+ /*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index f81bec0c7b86..f8ea3054be54 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -559,16 +559,21 @@ static ssize_t hpd_show(struct device *dev, struct device_attribute *attr, char
}
static DEVICE_ATTR_RO(hpd);
-static struct attribute *dp_altmode_attrs[] = {
+static struct attribute *displayport_attrs[] = {
&dev_attr_configuration.attr,
&dev_attr_pin_assignment.attr,
&dev_attr_hpd.attr,
NULL
};
-static const struct attribute_group dp_altmode_group = {
+static const struct attribute_group displayport_group = {
.name = "displayport",
- .attrs = dp_altmode_attrs,
+ .attrs = displayport_attrs,
+};
+
+static const struct attribute_group *displayport_groups[] = {
+ &displayport_group,
+ NULL,
};
int dp_altmode_probe(struct typec_altmode *alt)
@@ -576,7 +581,6 @@ int dp_altmode_probe(struct typec_altmode *alt)
const struct typec_altmode *port = typec_altmode_get_partner(alt);
struct fwnode_handle *fwnode;
struct dp_altmode *dp;
- int ret;
/* FIXME: Port can only be DFP_U. */
@@ -587,10 +591,6 @@ int dp_altmode_probe(struct typec_altmode *alt)
DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
return -ENODEV;
- ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
- if (ret)
- return ret;
-
dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
@@ -624,7 +624,6 @@ void dp_altmode_remove(struct typec_altmode *alt)
{
struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
- sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group);
cancel_work_sync(&dp->work);
if (dp->connector_fwnode) {
@@ -649,6 +648,7 @@ static struct typec_altmode_driver dp_altmode_driver = {
.driver = {
.name = "typec_displayport",
.owner = THIS_MODULE,
+ .dev_groups = displayport_groups,
},
};
module_typec_altmode_driver(dp_altmode_driver);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index f7d7daa60c8d..096597231027 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -3743,9 +3743,6 @@ static void tcpm_detach(struct tcpm_port *port)
if (tcpm_port_is_disconnected(port))
port->hard_reset_count = 0;
- port->try_src_count = 0;
- port->try_snk_count = 0;
-
if (!port->attached)
return;
@@ -4876,7 +4873,11 @@ static void run_state_machine(struct tcpm_port *port)
break;
case PORT_RESET:
tcpm_reset_port(port);
- tcpm_set_cc(port, TYPEC_CC_OPEN);
+ if (port->self_powered)
+ tcpm_set_cc(port, TYPEC_CC_OPEN);
+ else
+ tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
+ TYPEC_CC_RD : tcpm_rp_cc(port));
tcpm_set_state(port, PORT_RESET_WAIT_OFF,
PD_T_ERROR_RECOVERY);
break;
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index 53a7ede8556d..faccc942b381 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -301,6 +301,7 @@ static const struct of_device_id pmic_glink_ucsi_of_quirks[] = {
{ .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{ .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{ .compatible = "qcom,sm8350-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
+ { .compatible = "qcom,sm8550-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{}
};
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f2ed7167c848..4b2fcb228a0a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -141,10 +141,8 @@ struct vhost_net {
unsigned tx_zcopy_err;
/* Flush in progress. Protected by tx vq lock. */
bool tx_flush;
- /* Private page frag */
- struct page_frag page_frag;
- /* Refcount bias of page frag */
- int refcnt_bias;
+ /* Private page frag cache */
+ struct page_frag_cache pf_cache;
};
static unsigned vhost_net_zcopy_mask __read_mostly;
@@ -655,41 +653,6 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
!vhost_vq_avail_empty(vq->dev, vq);
}
-static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz,
- struct page_frag *pfrag, gfp_t gfp)
-{
- if (pfrag->page) {
- if (pfrag->offset + sz <= pfrag->size)
- return true;
- __page_frag_cache_drain(pfrag->page, net->refcnt_bias);
- }
-
- pfrag->offset = 0;
- net->refcnt_bias = 0;
- if (SKB_FRAG_PAGE_ORDER) {
- /* Avoid direct reclaim but allow kswapd to wake */
- pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
- __GFP_COMP | __GFP_NOWARN |
- __GFP_NORETRY,
- SKB_FRAG_PAGE_ORDER);
- if (likely(pfrag->page)) {
- pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
- goto done;
- }
- }
- pfrag->page = alloc_page(gfp);
- if (likely(pfrag->page)) {
- pfrag->size = PAGE_SIZE;
- goto done;
- }
- return false;
-
-done:
- net->refcnt_bias = USHRT_MAX;
- page_ref_add(pfrag->page, USHRT_MAX - 1);
- return true;
-}
-
#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
@@ -699,7 +662,6 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
struct vhost_net *net = container_of(vq->dev, struct vhost_net,
dev);
struct socket *sock = vhost_vq_get_backend(vq);
- struct page_frag *alloc_frag = &net->page_frag;
struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
struct tun_xdp_hdr *hdr;
@@ -710,6 +672,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
int sock_hlen = nvq->sock_hlen;
void *buf;
int copied;
+ int ret;
if (unlikely(len < nvq->sock_hlen))
return -EFAULT;
@@ -719,18 +682,17 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
return -ENOSPC;
buflen += SKB_DATA_ALIGN(len + pad);
- alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
- if (unlikely(!vhost_net_page_frag_refill(net, buflen,
- alloc_frag, GFP_KERNEL)))
+ buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL,
+ SMP_CACHE_BYTES);
+ if (unlikely(!buf))
return -ENOMEM;
- buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
- copied = copy_page_from_iter(alloc_frag->page,
- alloc_frag->offset +
- offsetof(struct tun_xdp_hdr, gso),
- sock_hlen, from);
- if (copied != sock_hlen)
- return -EFAULT;
+ copied = copy_from_iter(buf + offsetof(struct tun_xdp_hdr, gso),
+ sock_hlen, from);
+ if (copied != sock_hlen) {
+ ret = -EFAULT;
+ goto err;
+ }
hdr = buf;
gso = &hdr->gso;
@@ -743,27 +705,30 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
vhost16_to_cpu(vq, gso->csum_start) +
vhost16_to_cpu(vq, gso->csum_offset) + 2);
- if (vhost16_to_cpu(vq, gso->hdr_len) > len)
- return -EINVAL;
+ if (vhost16_to_cpu(vq, gso->hdr_len) > len) {
+ ret = -EINVAL;
+ goto err;
+ }
}
len -= sock_hlen;
- copied = copy_page_from_iter(alloc_frag->page,
- alloc_frag->offset + pad,
- len, from);
- if (copied != len)
- return -EFAULT;
+ copied = copy_from_iter(buf + pad, len, from);
+ if (copied != len) {
+ ret = -EFAULT;
+ goto err;
+ }
xdp_init_buff(xdp, buflen, NULL);
xdp_prepare_buff(xdp, buf, pad, len, true);
hdr->buflen = buflen;
- --net->refcnt_bias;
- alloc_frag->offset += buflen;
-
++nvq->batched_xdp;
return 0;
+
+err:
+ page_frag_free(buf);
+ return ret;
}
static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
@@ -1353,8 +1318,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
vqs[VHOST_NET_VQ_RX]);
f->private_data = n;
- n->page_frag.page = NULL;
- n->refcnt_bias = 0;
+ n->pf_cache.va = NULL;
return 0;
}
@@ -1422,8 +1386,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
kfree(n->dev.vqs);
- if (n->page_frag.page)
- __page_frag_cache_drain(n->page_frag.page, n->refcnt_bias);
+ page_frag_cache_drain(&n->pf_cache);
kvfree(n);
return 0;
}
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index b694d7669d32..44c9ef1435a2 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -11,6 +11,10 @@ config APERTURE_HELPERS
Support tracking and hand-over of aperture ownership. Required
by graphics drivers for firmware-provided framebuffers.
+config SCREEN_INFO
+ bool
+ default n
+
config STI_CORE
bool
depends on PARISC
@@ -18,10 +22,7 @@ config STI_CORE
STI refers to the HP "Standard Text Interface" which is a set of
BIOS routines contained in a ROM chip in HP PA-RISC based machines.
-config VIDEO_CMDLINE
- bool
-
-config VIDEO_NOMODESET
+config VIDEO
bool
default n
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 6bbc03950899..ffbac4387c67 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -1,12 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_APERTURE_HELPERS) += aperture.o
+obj-$(CONFIG_SCREEN_INFO) += screen_info.o
obj-$(CONFIG_STI_CORE) += sticore.o
obj-$(CONFIG_VGASTATE) += vgastate.o
-obj-$(CONFIG_VIDEO_CMDLINE) += cmdline.o
-obj-$(CONFIG_VIDEO_NOMODESET) += nomodeset.o
+obj-$(CONFIG_VIDEO) += cmdline.o nomodeset.o
obj-$(CONFIG_HDMI) += hdmi.o
+screen_info-y := screen_info_generic.o
+screen_info-$(CONFIG_PCI) += screen_info_pci.o
+
obj-$(CONFIG_VT) += console/
obj-$(CONFIG_FB_STI) += console/
obj-$(CONFIG_LOGO) += logo/
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index ea2d0d69bd8c..230bca07b09d 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -183,6 +183,13 @@ config BACKLIGHT_KTD253
which is a 1-wire GPIO-controlled backlight found in some mobile
phones.
+config BACKLIGHT_KTD2801
+ tristate "Backlight Driver for Kinetic KTD2801"
+ select LEDS_EXPRESSWIRE
+ help
+ Say Y to enable the backlight driver for the Kinetic KTD2801 1-wire
+ GPIO-controlled backlight found in Samsung Galaxy Core Prime VE LTE.
+
config BACKLIGHT_KTZ8866
tristate "Backlight Driver for Kinetic KTZ8866"
depends on I2C
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 06966cb20459..8d2cb252042d 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
obj-$(CONFIG_BACKLIGHT_IPAQ_MICRO) += ipaq_micro_bl.o
obj-$(CONFIG_BACKLIGHT_KTD253) += ktd253-backlight.o
+obj-$(CONFIG_BACKLIGHT_KTD2801) += ktd2801-backlight.o
obj-$(CONFIG_BACKLIGHT_KTZ8866) += ktz8866.o
obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
obj-$(CONFIG_BACKLIGHT_LM3630A) += lm3630a_bl.o
diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
index 28437c2da0f5..e6f66bb35ef5 100644
--- a/drivers/video/backlight/as3711_bl.c
+++ b/drivers/video/backlight/as3711_bl.c
@@ -383,10 +383,8 @@ static int as3711_backlight_probe(struct platform_device *pdev)
if (pdev->dev.parent->of_node) {
ret = as3711_backlight_parse_dt(&pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "DT parsing failed: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "DT parsing failed\n");
}
if (!pdata->su1_fb && !pdata->su2_fb) {
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index c95a12bf0ce2..b1e7126380ef 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -119,7 +119,6 @@ static int bd6107_probe(struct i2c_client *client)
struct backlight_device *backlight;
struct backlight_properties props;
struct bd6107 *bd;
- int ret;
if (pdata == NULL) {
dev_err(&client->dev, "No platform data\n");
@@ -147,11 +146,9 @@ static int bd6107_probe(struct i2c_client *client)
* the reset.
*/
bd->reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(bd->reset)) {
- dev_err(&client->dev, "unable to request reset GPIO\n");
- ret = PTR_ERR(bd->reset);
- return ret;
- }
+ if (IS_ERR(bd->reset))
+ return dev_err_probe(&client->dev, PTR_ERR(bd->reset),
+ "unable to request reset GPIO\n");
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 0a57033ae31d..dd765098ad98 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -11,6 +11,7 @@
* by Eric Miao <eric.miao@marvell.com>
*/
+#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
index 1cdc8543310b..b8ff7046510e 100644
--- a/drivers/video/backlight/da9052_bl.c
+++ b/drivers/video/backlight/da9052_bl.c
@@ -117,6 +117,7 @@ static int da9052_backlight_probe(struct platform_device *pdev)
wleds->led_reg = platform_get_device_id(pdev)->driver_data;
wleds->state = DA9052_WLEDS_OFF;
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = DA9052_MAX_BRIGHTNESS;
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index d28c30b2a35d..e0c8c2a3f5dc 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -64,13 +64,9 @@ static int gpio_backlight_probe(struct platform_device *pdev)
def_value = device_property_read_bool(dev, "default-on");
gbl->gpiod = devm_gpiod_get(dev, NULL, GPIOD_ASIS);
- if (IS_ERR(gbl->gpiod)) {
- ret = PTR_ERR(gbl->gpiod);
- if (ret != -EPROBE_DEFER)
- dev_err(dev,
- "Error: The gpios parameter is missing or invalid.\n");
- return ret;
- }
+ if (IS_ERR(gbl->gpiod))
+ return dev_err_probe(dev, PTR_ERR(gbl->gpiod),
+ "The gpios parameter is missing or invalid\n");
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
diff --git a/drivers/video/backlight/hx8357.c b/drivers/video/backlight/hx8357.c
index d7298376cf74..339d9128fbde 100644
--- a/drivers/video/backlight/hx8357.c
+++ b/drivers/video/backlight/hx8357.c
@@ -8,9 +8,9 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/lcd.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#define HX8357_NUM_IM_PINS 3
@@ -564,41 +564,28 @@ static struct lcd_ops hx8357_ops = {
.get_power = hx8357_get_power,
};
-static const struct of_device_id hx8357_dt_ids[] = {
- {
- .compatible = "himax,hx8357",
- .data = hx8357_lcd_init,
- },
- {
- .compatible = "himax,hx8369",
- .data = hx8369_lcd_init,
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, hx8357_dt_ids);
+typedef int (*hx8357_init_fn)(struct lcd_device *);
static int hx8357_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct lcd_device *lcdev;
struct hx8357_data *lcd;
- const struct of_device_id *match;
+ hx8357_init_fn init_fn;
int i, ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ lcd = devm_kzalloc(dev, sizeof(*lcd), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
ret = spi_setup(spi);
- if (ret < 0) {
- dev_err(&spi->dev, "SPI setup failed.\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "SPI setup failed.\n");
lcd->spi = spi;
- match = of_match_device(hx8357_dt_ids, &spi->dev);
- if (!match || !match->data)
+ init_fn = device_get_match_data(dev);
+ if (!init_fn)
return -EINVAL;
lcd->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
@@ -609,14 +596,15 @@ static int hx8357_probe(struct spi_device *spi)
lcd->im_pins = devm_gpiod_get_array_optional(dev, "im", GPIOD_OUT_LOW);
if (IS_ERR(lcd->im_pins))
return dev_err_probe(dev, PTR_ERR(lcd->im_pins), "failed to request im GPIOs\n");
- if (lcd->im_pins->ndescs < HX8357_NUM_IM_PINS)
- return dev_err_probe(dev, -EINVAL, "not enough im GPIOs\n");
+ if (lcd->im_pins) {
+ if (lcd->im_pins->ndescs < HX8357_NUM_IM_PINS)
+ return dev_err_probe(dev, -EINVAL, "not enough im GPIOs\n");
- for (i = 0; i < HX8357_NUM_IM_PINS; i++)
- gpiod_set_consumer_name(lcd->im_pins->desc[i], "im_pins");
+ for (i = 0; i < HX8357_NUM_IM_PINS; i++)
+ gpiod_set_consumer_name(lcd->im_pins->desc[i], "im_pins");
+ }
- lcdev = devm_lcd_device_register(&spi->dev, "mxsfb", &spi->dev, lcd,
- &hx8357_ops);
+ lcdev = devm_lcd_device_register(dev, "mxsfb", dev, lcd, &hx8357_ops);
if (IS_ERR(lcdev)) {
ret = PTR_ERR(lcdev);
return ret;
@@ -625,17 +613,28 @@ static int hx8357_probe(struct spi_device *spi)
hx8357_lcd_reset(lcdev);
- ret = ((int (*)(struct lcd_device *))match->data)(lcdev);
- if (ret) {
- dev_err(&spi->dev, "Couldn't initialize panel\n");
- return ret;
- }
+ ret = init_fn(lcdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Couldn't initialize panel\n");
- dev_info(&spi->dev, "Panel probed\n");
+ dev_info(dev, "Panel probed\n");
return 0;
}
+static const struct of_device_id hx8357_dt_ids[] = {
+ {
+ .compatible = "himax,hx8357",
+ .data = hx8357_lcd_init,
+ },
+ {
+ .compatible = "himax,hx8369",
+ .data = hx8369_lcd_init,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, hx8357_dt_ids);
+
static struct spi_driver hx8357_driver = {
.probe = hx8357_probe,
.driver = {
diff --git a/drivers/video/backlight/ktd2801-backlight.c b/drivers/video/backlight/ktd2801-backlight.c
new file mode 100644
index 000000000000..d295c2766025
--- /dev/null
+++ b/drivers/video/backlight/ktd2801-backlight.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Datasheet:
+ * https://www.kinet-ic.com/uploads/web/KTD2801/KTD2801-04b.pdf
+ */
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/leds-expresswire.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define KTD2801_DEFAULT_BRIGHTNESS 100
+#define KTD2801_MAX_BRIGHTNESS 255
+
+/* These values have been extracted from Samsung's driver. */
+static const struct expresswire_timing ktd2801_timing = {
+ .poweroff_us = 2600,
+ .detect_delay_us = 150,
+ .detect_us = 270,
+ .data_start_us = 5,
+ .short_bitset_us = 5,
+ .long_bitset_us = 15,
+ .end_of_data_low_us = 10,
+ .end_of_data_high_us = 350
+};
+
+struct ktd2801_backlight {
+ struct expresswire_common_props props;
+ struct backlight_device *bd;
+ bool was_on;
+};
+
+static int ktd2801_update_status(struct backlight_device *bd)
+{
+ struct ktd2801_backlight *ktd2801 = bl_get_data(bd);
+ u8 brightness = (u8) backlight_get_brightness(bd);
+
+ if (backlight_is_blank(bd)) {
+ expresswire_power_off(&ktd2801->props);
+ ktd2801->was_on = false;
+ return 0;
+ }
+
+ if (!ktd2801->was_on) {
+ expresswire_enable(&ktd2801->props);
+ ktd2801->was_on = true;
+ }
+
+ expresswire_write_u8(&ktd2801->props, brightness);
+
+ return 0;
+}
+
+static const struct backlight_ops ktd2801_backlight_ops = {
+ .update_status = ktd2801_update_status,
+};
+
+static int ktd2801_backlight_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct backlight_device *bd;
+ struct ktd2801_backlight *ktd2801;
+ u32 brightness, max_brightness;
+ int ret;
+
+ ktd2801 = devm_kzalloc(dev, sizeof(*ktd2801), GFP_KERNEL);
+ if (!ktd2801)
+ return -ENOMEM;
+ ktd2801->was_on = true;
+ ktd2801->props.timing = ktd2801_timing;
+
+ ret = device_property_read_u32(dev, "max-brightness", &max_brightness);
+ if (ret)
+ max_brightness = KTD2801_MAX_BRIGHTNESS;
+ if (max_brightness > KTD2801_MAX_BRIGHTNESS) {
+ dev_err(dev, "illegal max brightness specified\n");
+ max_brightness = KTD2801_MAX_BRIGHTNESS;
+ }
+
+ ret = device_property_read_u32(dev, "default-brightness", &brightness);
+ if (ret)
+ brightness = KTD2801_DEFAULT_BRIGHTNESS;
+ if (brightness > max_brightness) {
+ dev_err(dev, "default brightness exceeds max\n");
+ brightness = max_brightness;
+ }
+
+ ktd2801->props.ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_OUT_HIGH);
+ if (IS_ERR(ktd2801->props.ctrl_gpio))
+ return dev_err_probe(dev, PTR_ERR(ktd2801->props.ctrl_gpio),
+ "failed to get backlight GPIO");
+ gpiod_set_consumer_name(ktd2801->props.ctrl_gpio, dev_name(dev));
+
+ bd = devm_backlight_device_register(dev, dev_name(dev), dev, ktd2801,
+ &ktd2801_backlight_ops, NULL);
+ if (IS_ERR(bd))
+ return dev_err_probe(dev, PTR_ERR(bd),
+ "failed to register backlight");
+
+ bd->props.max_brightness = max_brightness;
+ bd->props.brightness = brightness;
+
+ ktd2801->bd = bd;
+ platform_set_drvdata(pdev, bd);
+ backlight_update_status(bd);
+
+ return 0;
+}
+
+static const struct of_device_id ktd2801_of_match[] = {
+ { .compatible = "kinetic,ktd2801" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ktd2801_of_match);
+
+static struct platform_driver ktd2801_backlight_driver = {
+ .driver = {
+ .name = "ktd2801-backlight",
+ .of_match_table = ktd2801_of_match,
+ },
+ .probe = ktd2801_backlight_probe,
+};
+module_platform_driver(ktd2801_backlight_driver);
+
+MODULE_IMPORT_NS(EXPRESSWIRE);
+MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>");
+MODULE_DESCRIPTION("Kinetic KTD2801 Backlight Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/ktz8866.c b/drivers/video/backlight/ktz8866.c
index 9c980f2571ee..014877b5a984 100644
--- a/drivers/video/backlight/ktz8866.c
+++ b/drivers/video/backlight/ktz8866.c
@@ -97,20 +97,20 @@ static void ktz8866_init(struct ktz8866 *ktz)
{
unsigned int val = 0;
- if (of_property_read_u32(ktz->client->dev.of_node, "current-num-sinks", &val))
+ if (!of_property_read_u32(ktz->client->dev.of_node, "current-num-sinks", &val))
ktz8866_write(ktz, BL_EN, BIT(val) - 1);
else
/* Enable all 6 current sinks if the number of current sinks isn't specified. */
ktz8866_write(ktz, BL_EN, BIT(6) - 1);
- if (of_property_read_u32(ktz->client->dev.of_node, "kinetic,current-ramp-delay-ms", &val)) {
+ if (!of_property_read_u32(ktz->client->dev.of_node, "kinetic,current-ramp-delay-ms", &val)) {
if (val <= 128)
ktz8866_write(ktz, BL_CFG2, BIT(7) | (ilog2(val) << 3) | PWM_HYST);
else
ktz8866_write(ktz, BL_CFG2, BIT(7) | ((5 + val / 64) << 3) | PWM_HYST);
}
- if (of_property_read_u32(ktz->client->dev.of_node, "kinetic,led-enable-ramp-delay-ms", &val)) {
+ if (!of_property_read_u32(ktz->client->dev.of_node, "kinetic,led-enable-ramp-delay-ms", &val)) {
if (val == 0)
ktz8866_write(ktz, BL_DIMMING, 0);
else {
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index cc763cf15f53..bd5137ee203b 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -179,34 +179,28 @@ static int l4f00242t03_probe(struct spi_device *spi)
priv->spi = spi;
priv->reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(priv->reset)) {
- dev_err(&spi->dev,
- "Unable to get the lcd l4f00242t03 reset gpio.\n");
- return PTR_ERR(priv->reset);
- }
+ if (IS_ERR(priv->reset))
+ return dev_err_probe(&spi->dev, PTR_ERR(priv->reset),
+ "Unable to get the lcd l4f00242t03 reset gpio.\n");
gpiod_set_consumer_name(priv->reset, "lcd l4f00242t03 reset");
priv->enable = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(priv->enable)) {
- dev_err(&spi->dev,
- "Unable to get the lcd l4f00242t03 data en gpio.\n");
- return PTR_ERR(priv->enable);
- }
+ if (IS_ERR(priv->enable))
+ return dev_err_probe(&spi->dev, PTR_ERR(priv->enable),
+ "Unable to get the lcd l4f00242t03 data en gpio.\n");
gpiod_set_consumer_name(priv->enable, "lcd l4f00242t03 data enable");
priv->io_reg = devm_regulator_get(&spi->dev, "vdd");
- if (IS_ERR(priv->io_reg)) {
- dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
- __func__);
- return PTR_ERR(priv->io_reg);
- }
+ if (IS_ERR(priv->io_reg))
+ return dev_err_probe(&spi->dev, PTR_ERR(priv->io_reg),
+ "%s: Unable to get the IO regulator\n",
+ __func__);
priv->core_reg = devm_regulator_get(&spi->dev, "vcore");
- if (IS_ERR(priv->core_reg)) {
- dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
- __func__);
- return PTR_ERR(priv->core_reg);
- }
+ if (IS_ERR(priv->core_reg))
+ return dev_err_probe(&spi->dev, PTR_ERR(priv->core_reg),
+ "%s: Unable to get the core regulator\n",
+ __func__);
priv->ld = devm_lcd_device_register(&spi->dev, "l4f00242t03", &spi->dev,
priv, &l4f_ops);
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index a3412c936ca2..76d47e2e8242 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -189,10 +189,11 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
int ret;
struct lm3630a_chip *pchip = bl_get_data(bl);
enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
+ int brightness = backlight_get_brightness(bl);
/* pwm control */
if ((pwm_ctrl & LM3630A_PWM_BANK_A) != 0)
- return lm3630a_pwm_ctrl(pchip, bl->props.brightness,
+ return lm3630a_pwm_ctrl(pchip, brightness,
bl->props.max_brightness);
/* disable sleep */
@@ -201,9 +202,9 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
goto out_i2c_err;
usleep_range(1000, 2000);
/* minimum brightness is 0x04 */
- ret = lm3630a_write(pchip, REG_BRT_A, bl->props.brightness);
+ ret = lm3630a_write(pchip, REG_BRT_A, brightness);
- if (backlight_is_blank(bl) || (backlight_get_brightness(bl) < 0x4))
+ if (brightness < 0x4)
/* turn the string off */
ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDA_ENABLE, 0);
else
@@ -233,7 +234,7 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
if (rval < 0)
goto out_i2c_err;
brightness |= rval;
- goto out;
+ return brightness;
}
/* disable sleep */
@@ -244,11 +245,8 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
rval = lm3630a_read(pchip, REG_BRT_A);
if (rval < 0)
goto out_i2c_err;
- brightness = rval;
+ return rval;
-out:
- bl->props.brightness = brightness;
- return bl->props.brightness;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access register\n");
return 0;
@@ -266,10 +264,11 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
int ret;
struct lm3630a_chip *pchip = bl_get_data(bl);
enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
+ int brightness = backlight_get_brightness(bl);
/* pwm control */
if ((pwm_ctrl & LM3630A_PWM_BANK_B) != 0)
- return lm3630a_pwm_ctrl(pchip, bl->props.brightness,
+ return lm3630a_pwm_ctrl(pchip, brightness,
bl->props.max_brightness);
/* disable sleep */
@@ -278,9 +277,9 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
goto out_i2c_err;
usleep_range(1000, 2000);
/* minimum brightness is 0x04 */
- ret = lm3630a_write(pchip, REG_BRT_B, bl->props.brightness);
+ ret = lm3630a_write(pchip, REG_BRT_B, brightness);
- if (backlight_is_blank(bl) || (backlight_get_brightness(bl) < 0x4))
+ if (brightness < 0x4)
/* turn the string off */
ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDB_ENABLE, 0);
else
@@ -310,7 +309,7 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
if (rval < 0)
goto out_i2c_err;
brightness |= rval;
- goto out;
+ return brightness;
}
/* disable sleep */
@@ -321,11 +320,8 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
rval = lm3630a_read(pchip, REG_BRT_B);
if (rval < 0)
goto out_i2c_err;
- brightness = rval;
+ return rval;
-out:
- bl->props.brightness = brightness;
- return bl->props.brightness;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access register\n");
return 0;
@@ -343,6 +339,7 @@ static int lm3630a_backlight_register(struct lm3630a_chip *pchip)
struct backlight_properties props;
const char *label;
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) {
props.brightness = pdata->leda_init_brt;
@@ -543,10 +540,8 @@ static int lm3630a_probe(struct i2c_client *client)
pchip->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable",
GPIOD_OUT_HIGH);
- if (IS_ERR(pchip->enable_gpio)) {
- rval = PTR_ERR(pchip->enable_gpio);
- return rval;
- }
+ if (IS_ERR(pchip->enable_gpio))
+ return PTR_ERR(pchip->enable_gpio);
/* chip initialize */
rval = lm3630a_chip_init(pchip);
@@ -563,10 +558,9 @@ static int lm3630a_probe(struct i2c_client *client)
/* pwm */
if (pdata->pwm_ctrl != LM3630A_PWM_DISABLE) {
pchip->pwmd = devm_pwm_get(pchip->dev, "lm3630a-pwm");
- if (IS_ERR(pchip->pwmd)) {
- dev_err(&client->dev, "fail : get pwm device\n");
- return PTR_ERR(pchip->pwmd);
- }
+ if (IS_ERR(pchip->pwmd))
+ return dev_err_probe(&client->dev, PTR_ERR(pchip->pwmd),
+ "fail : get pwm device\n");
pwm_init_state(pchip->pwmd, &pchip->pwmd_state);
}
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index 5246c171497d..564f62acd721 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -338,6 +338,7 @@ static int lm3639_probe(struct i2c_client *client)
}
/* backlight */
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.brightness = pdata->init_brt_led;
props.max_brightness = pdata->max_brt_led;
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index d1a14b0db265..31f97230ee50 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -191,6 +191,7 @@ static int lp8788_backlight_register(struct lp8788_bl *bl)
int init_brt;
char *name;
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = MAX_BRIGHTNESS;
diff --git a/drivers/video/backlight/mp3309c.c b/drivers/video/backlight/mp3309c.c
index 34d71259fac1..c80a1481e742 100644
--- a/drivers/video/backlight/mp3309c.c
+++ b/drivers/video/backlight/mp3309c.c
@@ -15,6 +15,8 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
@@ -131,7 +133,7 @@ static int mp3309c_bl_update_status(struct backlight_device *bl)
chip->pdata->levels[brightness],
chip->pdata->levels[chip->pdata->max_brightness]);
pwmstate.enabled = true;
- ret = pwm_apply_state(chip->pwmd, &pwmstate);
+ ret = pwm_apply_might_sleep(chip->pwmd, &pwmstate);
if (ret)
return ret;
@@ -199,20 +201,15 @@ static const struct backlight_ops mp3309c_bl_ops = {
.update_status = mp3309c_bl_update_status,
};
-static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
- struct mp3309c_platform_data *pdata)
+static int mp3309c_parse_fwnode(struct mp3309c_chip *chip,
+ struct mp3309c_platform_data *pdata)
{
- struct device_node *node = chip->dev->of_node;
- struct property *prop_pwms;
- struct property *prop_levels = NULL;
- int length = 0;
int ret, i;
unsigned int num_levels, tmp_value;
+ struct device *dev = chip->dev;
- if (!node) {
- dev_err(chip->dev, "failed to get DT node\n");
- return -ENODEV;
- }
+ if (!dev_fwnode(dev))
+ return dev_err_probe(dev, -ENODEV, "failed to get firmware node\n");
/*
* Dimming mode: the MP3309C provides two dimming control mode:
@@ -224,12 +221,10 @@ static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
* found in the backlight node, the mode switches to PWM mode.
*/
pdata->dimming_mode = DIMMING_ANALOG_I2C;
- prop_pwms = of_find_property(node, "pwms", &length);
- if (prop_pwms) {
- chip->pwmd = devm_pwm_get(chip->dev, NULL);
+ if (device_property_present(dev, "pwms")) {
+ chip->pwmd = devm_pwm_get(dev, NULL);
if (IS_ERR(chip->pwmd))
- return dev_err_probe(chip->dev, PTR_ERR(chip->pwmd),
- "error getting pwm data\n");
+ return dev_err_probe(dev, PTR_ERR(chip->pwmd), "error getting pwm data\n");
pdata->dimming_mode = DIMMING_PWM;
pwm_apply_args(chip->pwmd);
}
@@ -247,21 +242,17 @@ static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
num_levels = ANALOG_I2C_NUM_LEVELS;
/* Enable GPIO used in I2C dimming mode only */
- chip->enable_gpio = devm_gpiod_get(chip->dev, "enable",
- GPIOD_OUT_HIGH);
+ chip->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
if (IS_ERR(chip->enable_gpio))
- return dev_err_probe(chip->dev,
- PTR_ERR(chip->enable_gpio),
+ return dev_err_probe(dev, PTR_ERR(chip->enable_gpio),
"error getting enable gpio\n");
} else {
/*
* PWM control mode: check for brightness level in DT
*/
- prop_levels = of_find_property(node, "brightness-levels",
- &length);
- if (prop_levels) {
+ if (device_property_present(dev, "brightness-levels")) {
/* Read brightness levels from DT */
- num_levels = length / sizeof(u32);
+ num_levels = device_property_count_u32(dev, "brightness-levels");
if (num_levels < 2)
return -EINVAL;
} else {
@@ -271,14 +262,12 @@ static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
}
/* Fill brightness levels array */
- pdata->levels = devm_kcalloc(chip->dev, num_levels,
- sizeof(*pdata->levels), GFP_KERNEL);
+ pdata->levels = devm_kcalloc(dev, num_levels, sizeof(*pdata->levels), GFP_KERNEL);
if (!pdata->levels)
return -ENOMEM;
- if (prop_levels) {
- ret = of_property_read_u32_array(node, "brightness-levels",
- pdata->levels,
- num_levels);
+ if (device_property_present(dev, "brightness-levels")) {
+ ret = device_property_read_u32_array(dev, "brightness-levels",
+ pdata->levels, num_levels);
if (ret < 0)
return ret;
} else {
@@ -288,13 +277,11 @@ static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
pdata->max_brightness = num_levels - 1;
- ret = of_property_read_u32(node, "default-brightness",
- &pdata->default_brightness);
+ ret = device_property_read_u32(dev, "default-brightness", &pdata->default_brightness);
if (ret)
pdata->default_brightness = pdata->max_brightness;
if (pdata->default_brightness > pdata->max_brightness) {
- dev_err(chip->dev,
- "default brightness exceeds max brightness\n");
+ dev_err_probe(dev, -ERANGE, "default brightness exceeds max brightness\n");
pdata->default_brightness = pdata->max_brightness;
}
@@ -310,8 +297,8 @@ static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
* If missing, the default value for OVP is 35.5V
*/
pdata->over_voltage_protection = REG_I2C_1_OVP1;
- if (!of_property_read_u32(node, "mps,overvoltage-protection-microvolt",
- &tmp_value)) {
+ ret = device_property_read_u32(dev, "mps,overvoltage-protection-microvolt", &tmp_value);
+ if (!ret) {
switch (tmp_value) {
case 13500000:
pdata->over_voltage_protection = 0x00;
@@ -328,62 +315,59 @@ static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
}
/* Synchronous (default) and non-synchronous mode */
- pdata->sync_mode = true;
- if (of_property_read_bool(node, "mps,no-sync-mode"))
- pdata->sync_mode = false;
+ pdata->sync_mode = !device_property_read_bool(dev, "mps,no-sync-mode");
return 0;
}
static int mp3309c_probe(struct i2c_client *client)
{
- struct mp3309c_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct device *dev = &client->dev;
+ struct mp3309c_platform_data *pdata = dev_get_platdata(dev);
struct mp3309c_chip *chip;
struct backlight_properties props;
struct pwm_state pwmstate;
int ret;
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "failed to check i2c functionality\n");
- return -EOPNOTSUPP;
- }
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return dev_err_probe(dev, -EOPNOTSUPP, "failed to check i2c functionality\n");
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- chip->dev = &client->dev;
+ chip->dev = dev;
chip->regmap = devm_regmap_init_i2c(client, &mp3309c_regmap);
if (IS_ERR(chip->regmap))
- return dev_err_probe(&client->dev, PTR_ERR(chip->regmap),
+ return dev_err_probe(dev, PTR_ERR(chip->regmap),
"failed to allocate register map\n");
i2c_set_clientdata(client, chip);
if (!pdata) {
- pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- ret = pm3309c_parse_dt_node(chip, pdata);
+ ret = mp3309c_parse_fwnode(chip, pdata);
if (ret)
return ret;
}
chip->pdata = pdata;
/* Backlight properties */
+ memset(&props, 0, sizeof(struct backlight_properties));
props.brightness = pdata->default_brightness;
props.max_brightness = pdata->max_brightness;
props.scale = BACKLIGHT_SCALE_LINEAR;
props.type = BACKLIGHT_RAW;
props.power = FB_BLANK_UNBLANK;
props.fb_blank = FB_BLANK_UNBLANK;
- chip->bl = devm_backlight_device_register(chip->dev, "mp3309c",
- chip->dev, chip,
+ chip->bl = devm_backlight_device_register(dev, "mp3309c", dev, chip,
&mp3309c_bl_ops, &props);
if (IS_ERR(chip->bl))
- return dev_err_probe(chip->dev, PTR_ERR(chip->bl),
+ return dev_err_probe(dev, PTR_ERR(chip->bl),
"error registering backlight device\n");
/* In PWM dimming mode, enable pwm device */
@@ -393,10 +377,9 @@ static int mp3309c_probe(struct i2c_client *client)
chip->pdata->default_brightness,
chip->pdata->max_brightness);
pwmstate.enabled = true;
- ret = pwm_apply_state(chip->pwmd, &pwmstate);
+ ret = pwm_apply_might_sleep(chip->pwmd, &pwmstate);
if (ret)
- return dev_err_probe(chip->dev, ret,
- "error setting pwm device\n");
+ return dev_err_probe(dev, ret, "error setting pwm device\n");
}
chip->pdata->status = FIRST_POWER_ON;
diff --git a/drivers/video/backlight/pandora_bl.c b/drivers/video/backlight/pandora_bl.c
index f946470ce9f6..51faa889e01f 100644
--- a/drivers/video/backlight/pandora_bl.c
+++ b/drivers/video/backlight/pandora_bl.c
@@ -114,10 +114,8 @@ static int pandora_backlight_probe(struct platform_device *pdev)
u8 r;
priv = devm_kmalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "failed to allocate driver private data\n");
+ if (!priv)
return -ENOMEM;
- }
memset(&props, 0, sizeof(props));
props.max_brightness = MAX_USER_VALUE;
diff --git a/drivers/video/cmdline.c b/drivers/video/cmdline.c
index d3d257489c3d..49ee3fb4951a 100644
--- a/drivers/video/cmdline.c
+++ b/drivers/video/cmdline.c
@@ -80,6 +80,7 @@ const char *video_get_options(const char *name)
}
EXPORT_SYMBOL(video_get_options);
+#if IS_ENABLED(CONFIG_FB_CORE)
bool __video_get_options(const char *name, const char **options, bool is_of)
{
bool enabled = true;
@@ -96,6 +97,7 @@ bool __video_get_options(const char *name, const char **options, bool is_of)
return enabled;
}
EXPORT_SYMBOL(__video_get_options);
+#endif
/*
* Process command line options for video adapters. This function is
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 2d0bcc1d786e..a61b8260b8f3 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -75,7 +75,6 @@ config FB_CIRRUS
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select VIDEO_NOMODESET
help
This enables support for Cirrus Logic GD542x/543x based boards on
Amiga: SD64, Piccolo, Picasso II/II+, Picasso IV, or EGS Spectrum.
@@ -95,7 +94,6 @@ config FB_PM2
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for cards based on
the 3D Labs Permedia, Permedia 2 and Permedia 2V chips.
@@ -161,7 +159,6 @@ config FB_CYBER2000
tristate "CyberPro 2000/2010/5000 support"
depends on FB && PCI && (BROKEN || !SPARC64)
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
This enables support for the Integraphics CyberPro 20x0 and 5000
VGA chips used in the Rebel.com Netwinder and other machines.
@@ -312,7 +309,6 @@ config FB_CT65550
bool "Chips 65550 display support"
depends on (FB = y) && PPC32 && PCI
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for the Chips & Technologies
65550 graphics chip in PowerBooks.
@@ -321,7 +317,6 @@ config FB_ASILIANT
bool "Asiliant (Chips) 69000 display support"
depends on (FB = y) && PCI
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for the Asiliant 69030 chipset
@@ -331,7 +326,6 @@ config FB_IMSTT
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
select FB_MACMODES if PPC_PMAC
- select VIDEO_NOMODESET
help
The IMS Twin Turbo is a PCI-based frame buffer card bundled with
many Macintosh and compatible computers.
@@ -396,7 +390,6 @@ config FB_TGA
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for generic TGA and SFB+
graphic cards. These include DEC ZLXp-E1, -E2 and -E3 PCI cards,
@@ -573,7 +566,6 @@ config FB_XVR500
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select VIDEO_NOMODESET
help
This is the framebuffer device for the Sun XVR-500 and similar
graphics cards based upon the 3DLABS Wildcat chipset. The driver
@@ -585,7 +577,6 @@ config FB_XVR2500
bool "Sun XVR-2500 3DLABS Wildcat support"
depends on (FB = y) && PCI && SPARC64
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
This is the framebuffer device for the Sun XVR-2500 and similar
graphics cards based upon the 3DLABS Wildcat chipset. The driver
@@ -611,7 +602,6 @@ config FB_PVR2
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select VIDEO_NOMODESET
help
Say Y here if you have a PowerVR 2 card in your box. If you plan to
run linux on your Dreamcast, you will have to say Y here.
@@ -674,7 +664,6 @@ config FB_NVIDIA
select FB_IOMEM_FOPS
select BITREVERSE
select VGASTATE
- select VIDEO_NOMODESET
help
This driver supports graphics boards with the nVidia chips, TNT
and newer. For very old chipsets, such as the RIVA128, then use
@@ -723,7 +712,6 @@ config FB_RIVA
select FB_MODE_HELPERS
select BITREVERSE
select VGASTATE
- select VIDEO_NOMODESET
help
This driver supports graphics boards with the nVidia Riva/Geforce
chips.
@@ -766,7 +754,6 @@ config FB_I740
select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
select VGASTATE
- select VIDEO_NOMODESET
select FB_DDC
help
This driver supports graphics cards based on Intel740 chip.
@@ -777,7 +764,6 @@ config FB_I810
select FB_IOMEM_FOPS
select FB_MODE_HELPERS
select VGASTATE
- select VIDEO_NOMODESET
help
This driver supports the on-board graphics built in to the Intel 810
and 815 chipsets. Say Y if you have and plan to use such a board.
@@ -830,7 +816,6 @@ config FB_MATROX
select FB_IOMEM_FOPS
select FB_TILEBLITTING
select FB_MACMODES if PPC_PMAC
- select VIDEO_NOMODESET
help
Say Y here if you have a Matrox Millennium, Matrox Millennium II,
Matrox Mystique, Matrox Mystique 220, Matrox Productiva G100, Matrox
@@ -953,7 +938,6 @@ config FB_RADEON
select FB_IOMEM_FOPS
select FB_MACMODES if PPC
select FB_MODE_HELPERS
- select VIDEO_NOMODESET
help
Choose this option if you want to use an ATI Radeon graphics card as
a framebuffer device. There are both PCI and AGP versions. You
@@ -991,7 +975,6 @@ config FB_ATY128
select FB_BACKLIGHT if FB_ATY128_BACKLIGHT
select FB_IOMEM_HELPERS
select FB_MACMODES if PPC_PMAC
- select VIDEO_NOMODESET
help
This driver supports graphics boards with the ATI Rage128 chips.
Say Y if you have such a graphics board and read
@@ -1017,7 +1000,6 @@ config FB_ATY
select FB_IOMEM_FOPS
select FB_MACMODES if PPC
select FB_ATY_CT if SPARC64 && PCI
- select VIDEO_NOMODESET
help
This driver supports graphics boards with the ATI Mach64 chips.
Say Y if you have such a graphics board.
@@ -1069,7 +1051,6 @@ config FB_S3
select FB_TILEBLITTING
select FB_SVGALIB
select VGASTATE
- select VIDEO_NOMODESET
select FONT_8x16 if FRAMEBUFFER_CONSOLE
help
Driver for graphics boards with S3 Trio / S3 Virge chip.
@@ -1091,7 +1072,6 @@ config FB_SAVAGE
select FB_IOMEM_FOPS
select FB_MODE_HELPERS
select VGASTATE
- select VIDEO_NOMODESET
help
This driver supports notebooks and computers with S3 Savage PCI/AGP
chips.
@@ -1131,7 +1111,6 @@ config FB_SIS
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
select FB_SIS_300 if !FB_SIS_315
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for the SiS 300, 315, 330
and 340 series as well as XGI V3XT, V5, V8, Z7 graphics chipsets.
@@ -1162,7 +1141,6 @@ config FB_VIA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
select I2C_ALGOBIT
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for Graphics chips of VIA
UniChrome (Pro) Family (CLE266,PM800/CN400,P4M800CE/P4M800Pro/
@@ -1203,7 +1181,6 @@ config FB_NEOMAGIC
select FB_IOMEM_FOPS
select FB_MODE_HELPERS
select VGASTATE
- select VIDEO_NOMODESET
help
This driver supports notebooks with NeoMagic PCI chips.
Say Y if you have such a graphics card.
@@ -1215,7 +1192,6 @@ config FB_KYRO
tristate "IMG Kyro support"
depends on FB && PCI
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Say Y here if you have a STG4000 / Kyro / PowerVR 3 based
graphics board.
@@ -1231,7 +1207,6 @@ config FB_3DFX
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
select FB_MODE_HELPERS
- select VIDEO_NOMODESET
help
This driver supports graphics boards with the 3Dfx Banshee,
Voodoo3 or VSA-100 (aka Voodoo4/5) chips. Say Y if you have
@@ -1260,7 +1235,6 @@ config FB_VOODOO1
depends on FB && PCI
depends on FB_DEVICE
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
Voodoo2 (cvg) based graphics card.
@@ -1283,7 +1257,6 @@ config FB_VT8623
select FB_TILEBLITTING
select FB_SVGALIB
select VGASTATE
- select VIDEO_NOMODESET
select FONT_8x16 if FRAMEBUFFER_CONSOLE
help
Driver for CastleRock integrated graphics core in the
@@ -1298,7 +1271,6 @@ config FB_TRIDENT
select FB_DDC
select FB_IOMEM_FOPS
select FB_MODE_HELPERS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for Trident PCI/AGP chipsets.
Supported chipset families are TGUI 9440/96XX, 3DImage, Blade3D
@@ -1323,7 +1295,6 @@ config FB_ARK
select FB_TILEBLITTING
select FB_SVGALIB
select VGASTATE
- select VIDEO_NOMODESET
select FONT_8x16 if FRAMEBUFFER_CONSOLE
help
Driver for PCI graphics boards with ARK 2000PV chip
@@ -1336,7 +1307,6 @@ config FB_PM3
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for the 3DLabs Permedia3
chipset, used in Formac ProFormance III, 3DLabs Oxygen VX1 &
@@ -1347,7 +1317,6 @@ config FB_CARMINE
tristate "Fujitsu carmine frame buffer support"
depends on FB && PCI
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
This is the frame buffer device driver for the Fujitsu Carmine chip.
The driver provides two independent frame buffer devices.
@@ -1629,7 +1598,6 @@ config FB_IBM_GXT4500
tristate "Framebuffer support for IBM GXT4000P/4500P/6000P/6500P adaptors"
depends on FB
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Say Y here to enable support for the IBM GXT4000P/6000P and
GXT4500P/6500P display adaptor based on Raster Engine RC1000,
@@ -1747,7 +1715,6 @@ config FB_MB862XX
depends on FB
depends on PCI || (OF && PPC)
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers.
@@ -1813,7 +1780,6 @@ config FB_HYPERV
depends on FB && HYPERV
select DMA_CMA if HAVE_DMA_CONTIGUOUS && CMA
select FB_IOMEM_HELPERS_DEFERRED
- select VIDEO_NOMODESET
help
This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
@@ -1847,7 +1813,6 @@ config FB_SM712
tristate "Silicon Motion SM712 framebuffer support"
depends on FB && PCI
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Frame buffer driver for the Silicon Motion SM710, SM712, SM721
and SM722 chips.
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index b80711f13df8..b16a905588fe 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -15,6 +15,7 @@
*/
#include <linux/aperture.h>
+#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index 21053bf00dc5..db09fe87fcd4 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -4,7 +4,7 @@
#
config FB_CORE
- select VIDEO_CMDLINE
+ select VIDEO
tristate
config FB_NOTIFY
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 1183e7a871f8..46823c2e2ba1 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2399,11 +2399,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
int resize, ret, old_userfont, old_width, old_height, old_charcount;
- char *old_data = NULL;
+ u8 *old_data = vc->vc_font.data;
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
- if (p->userfont)
- old_data = vc->vc_font.data;
vc->vc_font.data = (void *)(p->fontdata = data);
old_userfont = p->userfont;
if ((p->userfont = userfont))
@@ -2437,13 +2435,13 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
update_screen(vc);
}
- if (old_data && (--REFCOUNT(old_data) == 0))
+ if (old_userfont && (--REFCOUNT(old_data) == 0))
kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
return 0;
err_out:
p->fontdata = old_data;
- vc->vc_font.data = (void *)old_data;
+ vc->vc_font.data = old_data;
if (userfont) {
p->userfont = old_userfont;
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index fc206755f5f6..48287366e0d4 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -645,7 +645,6 @@ int fb_new_modelist(struct fb_info *info)
return 0;
}
-#if defined(CONFIG_VIDEO_NOMODESET)
bool fb_modesetting_disabled(const char *drvname)
{
bool fwonly = video_firmware_drivers_only();
@@ -657,6 +656,5 @@ bool fb_modesetting_disabled(const char *drvname)
return fwonly;
}
EXPORT_SYMBOL(fb_modesetting_disabled);
-#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index f9b4ddd592ce..8dd82afb3452 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -13,11 +13,9 @@
#include <linux/efi-bgrt.h>
#include <linux/errno.h>
#include <linux/fb.h>
-#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/screen_info.h>
-#include <linux/pm_runtime.h>
#include <video/vga.h>
#include <asm/efi.h>
#include <drm/drm_utils.h> /* For drm_get_panel_orientation_quirk */
@@ -48,8 +46,6 @@ static bool use_bgrt = true;
static bool request_mem_succeeded = false;
static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
-static struct pci_dev *efifb_pci_dev; /* dev with BAR covering the efifb */
-
struct efifb_par {
u32 pseudo_palette[16];
resource_size_t base;
@@ -108,7 +104,7 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
*/
#if defined CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER && \
defined CONFIG_ACPI_BGRT
-static void efifb_copy_bmp(u8 *src, u32 *dst, int width, struct screen_info *si)
+static void efifb_copy_bmp(u8 *src, u32 *dst, int width, const struct screen_info *si)
{
u8 r, g, b;
@@ -130,7 +126,7 @@ static void efifb_copy_bmp(u8 *src, u32 *dst, int width, struct screen_info *si)
* resolution still fits, it will be displayed very close to the right edge of
* the display looking quite bad. This function checks for this.
*/
-static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
+static bool efifb_bgrt_sanity_check(const struct screen_info *si, u32 bmp_width)
{
/*
* All x86 firmwares horizontally center the image (the yoffset
@@ -141,16 +137,15 @@ static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
return bgrt_tab.image_offset_x == expected_xoffset;
}
#else
-static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
+static bool efifb_bgrt_sanity_check(const struct screen_info *si, u32 bmp_width)
{
return true;
}
#endif
-static void efifb_show_boot_graphics(struct fb_info *info)
+static void efifb_show_boot_graphics(struct fb_info *info, const struct screen_info *si)
{
u32 bmp_width, bmp_height, bmp_pitch, dst_x, y, src_y;
- struct screen_info *si = &screen_info;
struct bmp_file_header *file_header;
struct bmp_dib_header *dib_header;
void *bgrt_image = NULL;
@@ -247,7 +242,8 @@ error:
pr_warn("efifb: Ignoring BGRT: unexpected or invalid BMP data\n");
}
#else
-static inline void efifb_show_boot_graphics(struct fb_info *info) {}
+static inline void efifb_show_boot_graphics(struct fb_info *info, const struct screen_info *si)
+{ }
#endif
/*
@@ -258,9 +254,6 @@ static void efifb_destroy(struct fb_info *info)
{
struct efifb_par *par = info->par;
- if (efifb_pci_dev)
- pm_runtime_put(&efifb_pci_dev->dev);
-
if (info->screen_base) {
if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
iounmap(info->screen_base);
@@ -282,7 +275,7 @@ static const struct fb_ops efifb_ops = {
.fb_setcolreg = efifb_setcolreg,
};
-static int efifb_setup(char *options)
+static int efifb_setup(struct screen_info *si, char *options)
{
char *this_opt;
@@ -290,16 +283,16 @@ static int efifb_setup(char *options)
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt) continue;
- efifb_setup_from_dmi(&screen_info, this_opt);
+ efifb_setup_from_dmi(si, this_opt);
if (!strncmp(this_opt, "base:", 5))
- screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
+ si->lfb_base = simple_strtoul(this_opt+5, NULL, 0);
else if (!strncmp(this_opt, "stride:", 7))
- screen_info.lfb_linelength = simple_strtoul(this_opt+7, NULL, 0) * 4;
+ si->lfb_linelength = simple_strtoul(this_opt+7, NULL, 0) * 4;
else if (!strncmp(this_opt, "height:", 7))
- screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
+ si->lfb_height = simple_strtoul(this_opt+7, NULL, 0);
else if (!strncmp(this_opt, "width:", 6))
- screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
+ si->lfb_width = simple_strtoul(this_opt+6, NULL, 0);
else if (!strcmp(this_opt, "nowc"))
mem_flags &= ~EFI_MEMORY_WC;
else if (!strcmp(this_opt, "nobgrt"))
@@ -310,15 +303,15 @@ static int efifb_setup(char *options)
return 0;
}
-static inline bool fb_base_is_valid(void)
+static inline bool fb_base_is_valid(struct screen_info *si)
{
- if (screen_info.lfb_base)
+ if (si->lfb_base)
return true;
- if (!(screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE))
+ if (!(si->capabilities & VIDEO_CAPABILITY_64BIT_BASE))
return false;
- if (screen_info.ext_lfb_base)
+ if (si->ext_lfb_base)
return true;
return false;
@@ -329,7 +322,10 @@ static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- return sprintf(buf, fmt "\n", (screen_info.lfb_##name)); \
+ struct screen_info *si = dev_get_platdata(dev); \
+ if (!si) \
+ return -ENODEV; \
+ return sprintf(buf, fmt "\n", (si->lfb_##name)); \
} \
static DEVICE_ATTR_RO(name)
@@ -349,13 +345,9 @@ static struct attribute *efifb_attrs[] = {
};
ATTRIBUTE_GROUPS(efifb);
-static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */
-
-static struct resource *bar_resource;
-static u64 bar_offset;
-
static int efifb_probe(struct platform_device *dev)
{
+ struct screen_info *si;
struct fb_info *info;
struct efifb_par *par;
int err, orientation;
@@ -365,62 +357,60 @@ static int efifb_probe(struct platform_device *dev)
char *option = NULL;
efi_memory_desc_t md;
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
+ /*
+ * If we fail probing the device, the kernel might try a different
+ * driver. We get a copy of the attached screen_info, so that we can
+ * modify its values without affecting later drivers.
+ */
+ si = dev_get_platdata(&dev->dev);
+ if (!si)
+ return -ENODEV;
+ si = devm_kmemdup(&dev->dev, si, sizeof(*si), GFP_KERNEL);
+ if (!si)
+ return -ENOMEM;
+
+ if (si->orig_video_isVGA != VIDEO_TYPE_EFI)
return -ENODEV;
if (fb_get_options("efifb", &option))
return -ENODEV;
- efifb_setup(option);
+ efifb_setup(si, option);
/* We don't get linelength from UGA Draw Protocol, only from
* EFI Graphics Protocol. So if it's not in DMI, and it's not
* passed in from the user, we really can't use the framebuffer.
*/
- if (!screen_info.lfb_linelength)
+ if (!si->lfb_linelength)
return -ENODEV;
- if (!screen_info.lfb_depth)
- screen_info.lfb_depth = 32;
- if (!screen_info.pages)
- screen_info.pages = 1;
- if (!fb_base_is_valid()) {
+ if (!si->lfb_depth)
+ si->lfb_depth = 32;
+ if (!si->pages)
+ si->pages = 1;
+ if (!fb_base_is_valid(si)) {
printk(KERN_DEBUG "efifb: invalid framebuffer address\n");
return -ENODEV;
}
printk(KERN_INFO "efifb: probing for efifb\n");
/* just assume they're all unset if any are */
- if (!screen_info.blue_size) {
- screen_info.blue_size = 8;
- screen_info.blue_pos = 0;
- screen_info.green_size = 8;
- screen_info.green_pos = 8;
- screen_info.red_size = 8;
- screen_info.red_pos = 16;
- screen_info.rsvd_size = 8;
- screen_info.rsvd_pos = 24;
+ if (!si->blue_size) {
+ si->blue_size = 8;
+ si->blue_pos = 0;
+ si->green_size = 8;
+ si->green_pos = 8;
+ si->red_size = 8;
+ si->red_pos = 16;
+ si->rsvd_size = 8;
+ si->rsvd_pos = 24;
}
- efifb_fix.smem_start = screen_info.lfb_base;
+ efifb_fix.smem_start = __screen_info_lfb_base(si);
- if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) {
- u64 ext_lfb_base;
-
- ext_lfb_base = (u64)(unsigned long)screen_info.ext_lfb_base << 32;
- efifb_fix.smem_start |= ext_lfb_base;
- }
-
- if (bar_resource &&
- bar_resource->start + bar_offset != efifb_fix.smem_start) {
- dev_info(&efifb_pci_dev->dev,
- "BAR has moved, updating efifb address\n");
- efifb_fix.smem_start = bar_resource->start + bar_offset;
- }
-
- efifb_defined.bits_per_pixel = screen_info.lfb_depth;
- efifb_defined.xres = screen_info.lfb_width;
- efifb_defined.yres = screen_info.lfb_height;
- efifb_fix.line_length = screen_info.lfb_linelength;
+ efifb_defined.bits_per_pixel = si->lfb_depth;
+ efifb_defined.xres = si->lfb_width;
+ efifb_defined.yres = si->lfb_height;
+ efifb_fix.line_length = si->lfb_linelength;
/* size_vmode -- that is the amount of memory needed for the
* used video mode, i.e. the minimum amount of
@@ -430,7 +420,7 @@ static int efifb_probe(struct platform_device *dev)
/* size_total -- all video memory we have. Used for
* entries, ressource allocation and bounds
* checking. */
- size_total = screen_info.lfb_size;
+ size_total = si->lfb_size;
if (size_total < size_vmode)
size_total = size_vmode;
@@ -505,14 +495,14 @@ static int efifb_probe(struct platform_device *dev)
goto err_release_fb;
}
- efifb_show_boot_graphics(info);
+ efifb_show_boot_graphics(info, si);
pr_info("efifb: framebuffer at 0x%lx, using %dk, total %dk\n",
efifb_fix.smem_start, size_remap/1024, size_total/1024);
pr_info("efifb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
efifb_defined.xres, efifb_defined.yres,
efifb_defined.bits_per_pixel, efifb_fix.line_length,
- screen_info.pages);
+ si->pages);
efifb_defined.xres_virtual = efifb_defined.xres;
efifb_defined.yres_virtual = efifb_fix.smem_len /
@@ -526,26 +516,26 @@ static int efifb_probe(struct platform_device *dev)
efifb_defined.left_margin = (efifb_defined.xres / 8) & 0xf8;
efifb_defined.hsync_len = (efifb_defined.xres / 8) & 0xf8;
- efifb_defined.red.offset = screen_info.red_pos;
- efifb_defined.red.length = screen_info.red_size;
- efifb_defined.green.offset = screen_info.green_pos;
- efifb_defined.green.length = screen_info.green_size;
- efifb_defined.blue.offset = screen_info.blue_pos;
- efifb_defined.blue.length = screen_info.blue_size;
- efifb_defined.transp.offset = screen_info.rsvd_pos;
- efifb_defined.transp.length = screen_info.rsvd_size;
+ efifb_defined.red.offset = si->red_pos;
+ efifb_defined.red.length = si->red_size;
+ efifb_defined.green.offset = si->green_pos;
+ efifb_defined.green.length = si->green_size;
+ efifb_defined.blue.offset = si->blue_pos;
+ efifb_defined.blue.length = si->blue_size;
+ efifb_defined.transp.offset = si->rsvd_pos;
+ efifb_defined.transp.length = si->rsvd_size;
pr_info("efifb: %s: "
"size=%d:%d:%d:%d, shift=%d:%d:%d:%d\n",
"Truecolor",
- screen_info.rsvd_size,
- screen_info.red_size,
- screen_info.green_size,
- screen_info.blue_size,
- screen_info.rsvd_pos,
- screen_info.red_pos,
- screen_info.green_pos,
- screen_info.blue_pos);
+ si->rsvd_size,
+ si->red_size,
+ si->green_size,
+ si->blue_size,
+ si->rsvd_pos,
+ si->red_pos,
+ si->green_pos,
+ si->blue_pos);
efifb_fix.ypanstep = 0;
efifb_fix.ywrapstep = 0;
@@ -582,26 +572,20 @@ static int efifb_probe(struct platform_device *dev)
goto err_groups;
}
- if (efifb_pci_dev)
- WARN_ON(pm_runtime_get_sync(&efifb_pci_dev->dev) < 0);
-
err = devm_aperture_acquire_for_platform_device(dev, par->base, par->size);
if (err) {
pr_err("efifb: cannot acquire aperture\n");
- goto err_put_rpm_ref;
+ goto err_fb_dealloc_cmap;
}
err = register_framebuffer(info);
if (err < 0) {
pr_err("efifb: cannot register framebuffer\n");
- goto err_put_rpm_ref;
+ goto err_fb_dealloc_cmap;
}
fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
-err_put_rpm_ref:
- if (efifb_pci_dev)
- pm_runtime_put(&efifb_pci_dev->dev);
-
+err_fb_dealloc_cmap:
fb_dealloc_cmap(&info->cmap);
err_groups:
sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
@@ -636,58 +620,3 @@ static struct platform_driver efifb_driver = {
};
builtin_platform_driver(efifb_driver);
-
-#if defined(CONFIG_PCI)
-
-static void record_efifb_bar_resource(struct pci_dev *dev, int idx, u64 offset)
-{
- u16 word;
-
- efifb_pci_dev = dev;
-
- pci_read_config_word(dev, PCI_COMMAND, &word);
- if (!(word & PCI_COMMAND_MEMORY)) {
- pci_dev_disabled = true;
- dev_err(&dev->dev,
- "BAR %d: assigned to efifb but device is disabled!\n",
- idx);
- return;
- }
-
- bar_resource = &dev->resource[idx];
- bar_offset = offset;
-
- dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
-}
-
-static void efifb_fixup_resources(struct pci_dev *dev)
-{
- u64 base = screen_info.lfb_base;
- u64 size = screen_info.lfb_size;
- int i;
-
- if (efifb_pci_dev || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
- return;
-
- if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
- base |= (u64)screen_info.ext_lfb_base << 32;
-
- if (!base)
- return;
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- struct resource *res = &dev->resource[i];
-
- if (!(res->flags & IORESOURCE_MEM))
- continue;
-
- if (res->start <= base && res->end >= base + size - 1) {
- record_efifb_bar_resource(dev, i, base - res->start);
- break;
- }
- }
-}
-DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
- 16, efifb_fixup_resources);
-
-#endif
diff --git a/drivers/video/fbdev/geode/Kconfig b/drivers/video/fbdev/geode/Kconfig
index 9a49916e0492..3b20420cc94d 100644
--- a/drivers/video/fbdev/geode/Kconfig
+++ b/drivers/video/fbdev/geode/Kconfig
@@ -14,7 +14,6 @@ config FB_GEODE_LX
tristate "AMD Geode LX framebuffer support"
depends on FB && FB_GEODE
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Framebuffer driver for the display controller integrated into the
AMD Geode LX processors.
@@ -28,7 +27,6 @@ config FB_GEODE_GX
tristate "AMD Geode GX framebuffer support"
depends on FB && FB_GEODE
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Framebuffer driver for the display controller integrated into the
AMD Geode GX processors.
@@ -42,7 +40,6 @@ config FB_GEODE_GX1
tristate "AMD Geode GX1 framebuffer support"
depends on FB && FB_GEODE
select FB_IOMEM_HELPERS
- select VIDEO_NOMODESET
help
Framebuffer driver for the display controller integrated into the
AMD Geode GX1 processor.
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index c26ee6fd73c9..8fdccf033b2d 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -1010,8 +1010,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto getmem_done;
}
pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
- } else {
- goto err1;
}
/*
diff --git a/drivers/video/fbdev/mmp/hw/mmp_spi.c b/drivers/video/fbdev/mmp/hw/mmp_spi.c
index 0f8f0312a7c4..cf23650d7f0b 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_spi.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_spi.c
@@ -32,7 +32,7 @@ static inline int lcd_spi_write(struct spi_device *spi, u32 data)
int timeout = 100000, isr, ret = 0;
u32 tmp;
void __iomem *reg_base = (void __iomem *)
- *(void **)spi_master_get_devdata(spi->master);
+ *(void **) spi_controller_get_devdata(spi->controller);
/* clear ISR */
writel_relaxed(~SPI_IRQ_MASK, reg_base + SPU_IRQ_ISR);
@@ -81,7 +81,7 @@ static inline int lcd_spi_write(struct spi_device *spi, u32 data)
static int lcd_spi_setup(struct spi_device *spi)
{
void __iomem *reg_base = (void __iomem *)
- *(void **)spi_master_get_devdata(spi->master);
+ *(void **) spi_controller_get_devdata(spi->controller);
u32 tmp;
tmp = CFG_SCLKCNT(16) |
@@ -136,32 +136,32 @@ static int lcd_spi_one_transfer(struct spi_device *spi, struct spi_message *m)
int lcd_spi_register(struct mmphw_ctrl *ctrl)
{
- struct spi_master *master;
+ struct spi_controller *ctlr;
void **p_regbase;
int err;
- master = spi_alloc_master(ctrl->dev, sizeof(void *));
- if (!master) {
+ ctlr = spi_alloc_master(ctrl->dev, sizeof(void *));
+ if (!ctlr) {
dev_err(ctrl->dev, "unable to allocate SPI master\n");
return -ENOMEM;
}
- p_regbase = spi_master_get_devdata(master);
+ p_regbase = spi_controller_get_devdata(ctlr);
*p_regbase = (void __force *)ctrl->reg_base;
/* set bus num to 5 to avoid conflict with other spi hosts */
- master->bus_num = 5;
- master->num_chipselect = 1;
- master->setup = lcd_spi_setup;
- master->transfer = lcd_spi_one_transfer;
+ ctlr->bus_num = 5;
+ ctlr->num_chipselect = 1;
+ ctlr->setup = lcd_spi_setup;
+ ctlr->transfer = lcd_spi_one_transfer;
- err = spi_register_master(master);
+ err = spi_register_controller(ctlr);
if (err < 0) {
dev_err(ctrl->dev, "unable to register SPI master\n");
- spi_master_put(master);
+ spi_controller_put(ctlr);
return err;
}
- dev_info(&master->dev, "registered\n");
+ dev_info(&ctlr->dev, "registered\n");
return 0;
}
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 6f58ee276ad1..028a56525047 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -470,7 +470,7 @@ static int simplefb_attach_genpds(struct simplefb_par *par,
if (err == -ENOENT)
return 0;
- dev_info(dev, "failed to parse power-domains: %d\n", err);
+ dev_err(dev, "failed to parse power-domains: %d\n", err);
return err;
}
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index c0edceea0a79..8ab64ae4cad3 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -243,6 +243,7 @@ static int vesafb_setup(char *options)
static int vesafb_probe(struct platform_device *dev)
{
+ struct screen_info *si;
struct fb_info *info;
struct vesafb_par *par;
int i, err;
@@ -251,21 +252,33 @@ static int vesafb_probe(struct platform_device *dev)
unsigned int size_total;
char *option = NULL;
+ /*
+ * If we fail probing the device, the kernel might try a different
+ * driver. We get a copy of the attached screen_info, so that we can
+ * modify its values without affecting later drivers.
+ */
+ si = dev_get_platdata(&dev->dev);
+ if (!si)
+ return -ENODEV;
+ si = devm_kmemdup(&dev->dev, si, sizeof(*si), GFP_KERNEL);
+ if (!si)
+ return -ENOMEM;
+
/* ignore error return of fb_get_options */
fb_get_options("vesafb", &option);
vesafb_setup(option);
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
+ if (si->orig_video_isVGA != VIDEO_TYPE_VLFB)
return -ENODEV;
- vga_compat = (screen_info.capabilities & 2) ? 0 : 1;
- vesafb_fix.smem_start = screen_info.lfb_base;
- vesafb_defined.bits_per_pixel = screen_info.lfb_depth;
+ vga_compat = (si->capabilities & 2) ? 0 : 1;
+ vesafb_fix.smem_start = si->lfb_base;
+ vesafb_defined.bits_per_pixel = si->lfb_depth;
if (15 == vesafb_defined.bits_per_pixel)
vesafb_defined.bits_per_pixel = 16;
- vesafb_defined.xres = screen_info.lfb_width;
- vesafb_defined.yres = screen_info.lfb_height;
- vesafb_fix.line_length = screen_info.lfb_linelength;
+ vesafb_defined.xres = si->lfb_width;
+ vesafb_defined.yres = si->lfb_height;
+ vesafb_fix.line_length = si->lfb_linelength;
vesafb_fix.visual = (vesafb_defined.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
@@ -277,7 +290,7 @@ static int vesafb_probe(struct platform_device *dev)
/* size_total -- all video memory we have. Used for mtrr
* entries, resource allocation and bounds
* checking. */
- size_total = screen_info.lfb_size * 65536;
+ size_total = si->lfb_size * 65536;
if (vram_total)
size_total = vram_total * 1024 * 1024;
if (size_total < size_vmode)
@@ -297,7 +310,7 @@ static int vesafb_probe(struct platform_device *dev)
vesafb_fix.smem_len = size_remap;
#ifndef __i386__
- screen_info.vesapm_seg = 0;
+ si->vesapm_seg = 0;
#endif
if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
@@ -317,23 +330,26 @@ static int vesafb_probe(struct platform_device *dev)
par = info->par;
info->pseudo_palette = par->pseudo_palette;
- par->base = screen_info.lfb_base;
+ par->base = si->lfb_base;
par->size = size_total;
printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
- vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
+ vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel,
+ vesafb_fix.line_length, si->pages);
- if (screen_info.vesapm_seg) {
+ if (si->vesapm_seg) {
printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
- screen_info.vesapm_seg,screen_info.vesapm_off);
+ si->vesapm_seg, si->vesapm_off);
}
- if (screen_info.vesapm_seg < 0xc000)
+ if (si->vesapm_seg < 0xc000)
ypan = pmi_setpal = 0; /* not available or some DOS TSR ... */
if (ypan || pmi_setpal) {
+ unsigned long pmi_phys;
unsigned short *pmi_base;
- pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
+ pmi_phys = ((unsigned long)si->vesapm_seg << 4) + si->vesapm_off;
+ pmi_base = (unsigned short *)phys_to_virt(pmi_phys);
pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
@@ -377,14 +393,14 @@ static int vesafb_probe(struct platform_device *dev)
vesafb_defined.left_margin = (vesafb_defined.xres / 8) & 0xf8;
vesafb_defined.hsync_len = (vesafb_defined.xres / 8) & 0xf8;
- vesafb_defined.red.offset = screen_info.red_pos;
- vesafb_defined.red.length = screen_info.red_size;
- vesafb_defined.green.offset = screen_info.green_pos;
- vesafb_defined.green.length = screen_info.green_size;
- vesafb_defined.blue.offset = screen_info.blue_pos;
- vesafb_defined.blue.length = screen_info.blue_size;
- vesafb_defined.transp.offset = screen_info.rsvd_pos;
- vesafb_defined.transp.length = screen_info.rsvd_size;
+ vesafb_defined.red.offset = si->red_pos;
+ vesafb_defined.red.length = si->red_size;
+ vesafb_defined.green.offset = si->green_pos;
+ vesafb_defined.green.length = si->green_size;
+ vesafb_defined.blue.offset = si->blue_pos;
+ vesafb_defined.blue.length = si->blue_size;
+ vesafb_defined.transp.offset = si->rsvd_pos;
+ vesafb_defined.transp.length = si->rsvd_size;
if (vesafb_defined.bits_per_pixel <= 8) {
depth = vesafb_defined.green.length;
@@ -399,14 +415,14 @@ static int vesafb_probe(struct platform_device *dev)
(vesafb_defined.bits_per_pixel > 8) ?
"Truecolor" : (vga_compat || pmi_setpal) ?
"Pseudocolor" : "Static Pseudocolor",
- screen_info.rsvd_size,
- screen_info.red_size,
- screen_info.green_size,
- screen_info.blue_size,
- screen_info.rsvd_pos,
- screen_info.red_pos,
- screen_info.green_pos,
- screen_info.blue_pos);
+ si->rsvd_size,
+ si->red_size,
+ si->green_size,
+ si->blue_size,
+ si->rsvd_pos,
+ si->red_pos,
+ si->green_pos,
+ si->blue_pos);
vesafb_fix.ypanstep = ypan ? 1 : 0;
vesafb_fix.ywrapstep = (ypan>1) ? 1 : 0;
diff --git a/drivers/video/screen_info_generic.c b/drivers/video/screen_info_generic.c
new file mode 100644
index 000000000000..64117c6367ab
--- /dev/null
+++ b/drivers/video/screen_info_generic.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/ioport.h>
+#include <linux/screen_info.h>
+#include <linux/string.h>
+
+static void resource_init_named(struct resource *r,
+ resource_size_t start, resource_size_t size,
+ const char *name, unsigned int flags)
+{
+ memset(r, 0, sizeof(*r));
+
+ r->start = start;
+ r->end = start + size - 1;
+ r->name = name;
+ r->flags = flags;
+}
+
+static void resource_init_io_named(struct resource *r,
+ resource_size_t start, resource_size_t size,
+ const char *name)
+{
+ resource_init_named(r, start, size, name, IORESOURCE_IO);
+}
+
+static void resource_init_mem_named(struct resource *r,
+ resource_size_t start, resource_size_t size,
+ const char *name)
+{
+ resource_init_named(r, start, size, name, IORESOURCE_MEM);
+}
+
+static inline bool __screen_info_has_ega_gfx(unsigned int mode)
+{
+ switch (mode) {
+ case 0x0d: /* 320x200-4 */
+ case 0x0e: /* 640x200-4 */
+ case 0x0f: /* 640x350-1 */
+ case 0x10: /* 640x350-4 */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool __screen_info_has_vga_gfx(unsigned int mode)
+{
+ switch (mode) {
+ case 0x10: /* 640x480-1 */
+ case 0x12: /* 640x480-4 */
+ case 0x13: /* 320-200-8 */
+ case 0x6a: /* 800x600-4 (VESA) */
+ return true;
+ default:
+ return __screen_info_has_ega_gfx(mode);
+ }
+}
+
+/**
+ * screen_info_resources() - Get resources from screen_info structure
+ * @si: the screen_info
+ * @r: pointer to an array of resource structures
+ * @num: number of elements in @r:
+ *
+ * Returns:
+ * The number of resources stored in @r on success, or a negative errno code otherwise.
+ *
+ * A call to screen_info_resources() returns the resources consumed by the
+ * screen_info's device or framebuffer. The result is stored in the caller-supplied
+ * array @r with up to @num elements. The function returns the number of
+ * initialized elements.
+ */
+ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num)
+{
+ struct resource *pos = r;
+ unsigned int type = screen_info_video_type(si);
+ u64 base, size;
+
+ switch (type) {
+ case VIDEO_TYPE_MDA:
+ if (num > 0)
+ resource_init_io_named(pos++, 0x3b0, 12, "mda");
+ if (num > 1)
+ resource_init_io_named(pos++, 0x3bf, 0x01, "mda");
+ if (num > 2)
+ resource_init_mem_named(pos++, 0xb0000, 0x2000, "mda");
+ break;
+ case VIDEO_TYPE_CGA:
+ if (num > 0)
+ resource_init_io_named(pos++, 0x3d4, 0x02, "cga");
+ if (num > 1)
+ resource_init_mem_named(pos++, 0xb8000, 0x2000, "cga");
+ break;
+ case VIDEO_TYPE_EGAM:
+ if (num > 0)
+ resource_init_io_named(pos++, 0x3bf, 0x10, "ega");
+ if (num > 1)
+ resource_init_mem_named(pos++, 0xb0000, 0x8000, "ega");
+ break;
+ case VIDEO_TYPE_EGAC:
+ if (num > 0)
+ resource_init_io_named(pos++, 0x3c0, 0x20, "ega");
+ if (num > 1) {
+ if (__screen_info_has_ega_gfx(si->orig_video_mode))
+ resource_init_mem_named(pos++, 0xa0000, 0x10000, "ega");
+ else
+ resource_init_mem_named(pos++, 0xb8000, 0x8000, "ega");
+ }
+ break;
+ case VIDEO_TYPE_VGAC:
+ if (num > 0)
+ resource_init_io_named(pos++, 0x3c0, 0x20, "vga+");
+ if (num > 1) {
+ if (__screen_info_has_vga_gfx(si->orig_video_mode))
+ resource_init_mem_named(pos++, 0xa0000, 0x10000, "vga+");
+ else
+ resource_init_mem_named(pos++, 0xb8000, 0x8000, "vga+");
+ }
+ break;
+ case VIDEO_TYPE_VLFB:
+ case VIDEO_TYPE_EFI:
+ base = __screen_info_lfb_base(si);
+ if (!base)
+ break;
+ size = __screen_info_lfb_size(si, type);
+ if (!size)
+ break;
+ if (num > 0)
+ resource_init_mem_named(pos++, base, size, "lfb");
+ break;
+ case VIDEO_TYPE_PICA_S3:
+ case VIDEO_TYPE_MIPS_G364:
+ case VIDEO_TYPE_SGI:
+ case VIDEO_TYPE_TGAC:
+ case VIDEO_TYPE_SUN:
+ case VIDEO_TYPE_SUNPCI:
+ case VIDEO_TYPE_PMAC:
+ default:
+ /* not supported */
+ return -EINVAL;
+ }
+
+ return pos - r;
+}
+EXPORT_SYMBOL(screen_info_resources);
diff --git a/drivers/video/screen_info_pci.c b/drivers/video/screen_info_pci.c
new file mode 100644
index 000000000000..6c5833517141
--- /dev/null
+++ b/drivers/video/screen_info_pci.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/pci.h>
+#include <linux/printk.h>
+#include <linux/screen_info.h>
+#include <linux/string.h>
+
+static struct pci_dev *screen_info_lfb_pdev;
+static size_t screen_info_lfb_bar;
+static resource_size_t screen_info_lfb_offset;
+static struct resource screen_info_lfb_res = DEFINE_RES_MEM(0, 0);
+
+static bool __screen_info_relocation_is_valid(const struct screen_info *si, struct resource *pr)
+{
+ u64 size = __screen_info_lfb_size(si, screen_info_video_type(si));
+
+ if (screen_info_lfb_offset > resource_size(pr))
+ return false;
+ if (size > resource_size(pr))
+ return false;
+ if (resource_size(pr) - size < screen_info_lfb_offset)
+ return false;
+
+ return true;
+}
+
+void screen_info_apply_fixups(void)
+{
+ struct screen_info *si = &screen_info;
+
+ if (screen_info_lfb_pdev) {
+ struct resource *pr = &screen_info_lfb_pdev->resource[screen_info_lfb_bar];
+
+ if (pr->start != screen_info_lfb_res.start) {
+ if (__screen_info_relocation_is_valid(si, pr)) {
+ /*
+ * Only update base if we have an actual
+ * relocation to a valid I/O range.
+ */
+ __screen_info_set_lfb_base(si, pr->start + screen_info_lfb_offset);
+ pr_info("Relocating firmware framebuffer to offset %pa[d] within %pr\n",
+ &screen_info_lfb_offset, pr);
+ } else {
+ pr_warn("Invalid relocating, disabling firmware framebuffer\n");
+ }
+ }
+ }
+}
+
+static void screen_info_fixup_lfb(struct pci_dev *pdev)
+{
+ unsigned int type;
+ struct resource res[SCREEN_INFO_MAX_RESOURCES];
+ size_t i, numres;
+ int ret;
+ const struct screen_info *si = &screen_info;
+
+ if (screen_info_lfb_pdev)
+ return; // already found
+
+ type = screen_info_video_type(si);
+ if (type != VIDEO_TYPE_EFI)
+ return; // only applies to EFI
+
+ ret = screen_info_resources(si, res, ARRAY_SIZE(res));
+ if (ret < 0)
+ return;
+ numres = ret;
+
+ for (i = 0; i < numres; ++i) {
+ struct resource *r = &res[i];
+ const struct resource *pr;
+
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
+ pr = pci_find_resource(pdev, r);
+ if (!pr)
+ continue;
+
+ /*
+ * We've found a PCI device with the framebuffer
+ * resource. Store away the parameters to track
+ * relocation of the framebuffer aperture.
+ */
+ screen_info_lfb_pdev = pdev;
+ screen_info_lfb_bar = pr - pdev->resource;
+ screen_info_lfb_offset = r->start - pr->start;
+ memcpy(&screen_info_lfb_res, r, sizeof(screen_info_lfb_res));
+ }
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, 16,
+ screen_info_fixup_lfb);
+
+static struct pci_dev *__screen_info_pci_dev(struct resource *res)
+{
+ struct pci_dev *pdev = NULL;
+ const struct resource *r = NULL;
+
+ if (!(res->flags & IORESOURCE_MEM))
+ return NULL;
+
+ while (!r && (pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) {
+ r = pci_find_resource(pdev, res);
+ }
+
+ return pdev;
+}
+
+/**
+ * screen_info_pci_dev() - Return PCI parent device that contains screen_info's framebuffer
+ * @si: the screen_info
+ *
+ * Returns:
+ * The screen_info's parent device or NULL on success, or a pointer-encoded
+ * errno value otherwise. The value NULL is not an error. It signals that no
+ * PCI device has been found.
+ */
+struct pci_dev *screen_info_pci_dev(const struct screen_info *si)
+{
+ struct resource res[SCREEN_INFO_MAX_RESOURCES];
+ ssize_t i, numres;
+
+ numres = screen_info_resources(si, res, ARRAY_SIZE(res));
+ if (numres < 0)
+ return ERR_PTR(numres);
+
+ for (i = 0; i < numres; ++i) {
+ struct pci_dev *pdev = __screen_info_pci_dev(&res[i]);
+
+ if (pdev)
+ return pdev;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(screen_info_pci_dev);
diff --git a/drivers/virt/coco/efi_secret/efi_secret.c b/drivers/virt/coco/efi_secret/efi_secret.c
index e700a5ef7043..cd29e66b1543 100644
--- a/drivers/virt/coco/efi_secret/efi_secret.c
+++ b/drivers/virt/coco/efi_secret/efi_secret.c
@@ -326,16 +326,15 @@ err_unmap:
return ret;
}
-static int efi_secret_remove(struct platform_device *dev)
+static void efi_secret_remove(struct platform_device *dev)
{
efi_secret_securityfs_teardown(dev);
efi_secret_unmap_area();
- return 0;
}
static struct platform_driver efi_secret_driver = {
.probe = efi_secret_probe,
- .remove = efi_secret_remove,
+ .remove_new = efi_secret_remove,
.driver = {
.name = "efi_secret",
},
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 7d22051b15a2..6bee137cfbe0 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -181,6 +181,17 @@ config BD957XMUF_WATCHDOG
watchdog. Alternatively say M to compile the driver as a module,
which will be called bd9576_wdt.
+config CROS_EC_WATCHDOG
+ tristate "ChromeOS EC-based watchdog"
+ select WATCHDOG_CORE
+ depends on CROS_EC
+ help
+ Watchdog driver for Chromebook devices equipped with embedded controller.
+ Trigger event is recorded in EC and checked on the subsequent boot.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_wdt.
+
config DA9052_WATCHDOG
tristate "Dialog DA9052 Watchdog"
depends on PMIC_DA9052 || COMPILE_TEST
@@ -512,7 +523,6 @@ config S3C2410_WATCHDOG
tristate "S3C6410/S5Pv210/Exynos Watchdog"
depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
select WATCHDOG_CORE
- select MFD_SYSCON if ARCH_EXYNOS
help
Watchdog timer block in the Samsung S3C64xx, S5Pv210 and Exynos
SoCs. This will reboot the system when the timer expires with
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 7cbc34514ec1..3710c218f05e 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -217,6 +217,7 @@ obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
obj-$(CONFIG_BD957XMUF_WATCHDOG) += bd9576_wdt.o
+obj-$(CONFIG_CROS_EC_WATCHDOG) += cros_ec_wdt.o
obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
obj-$(CONFIG_DA9062_WATCHDOG) += da9062_wdt.o
diff --git a/drivers/watchdog/cros_ec_wdt.c b/drivers/watchdog/cros_ec_wdt.c
new file mode 100644
index 000000000000..ba045e29f9a5
--- /dev/null
+++ b/drivers/watchdog/cros_ec_wdt.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024 Google LLC.
+ * Author: Lukasz Majczak <lma@chromium.com>
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define CROS_EC_WATCHDOG_DEFAULT_TIME 30 /* seconds */
+#define DRV_NAME "cros-ec-wdt"
+
+union cros_ec_wdt_data {
+ struct ec_params_hang_detect req;
+ struct ec_response_hang_detect resp;
+} __packed;
+
+static int cros_ec_wdt_send_cmd(struct cros_ec_device *cros_ec,
+ union cros_ec_wdt_data *arg)
+{
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ union cros_ec_wdt_data data;
+ } __packed buf = {
+ .msg = {
+ .version = 0,
+ .command = EC_CMD_HANG_DETECT,
+ .insize = (arg->req.command == EC_HANG_DETECT_CMD_GET_STATUS) ?
+ sizeof(struct ec_response_hang_detect) :
+ 0,
+ .outsize = sizeof(struct ec_params_hang_detect),
+ },
+ .data.req = arg->req
+ };
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, &buf.msg);
+ if (ret < 0)
+ return ret;
+
+ arg->resp = buf.data.resp;
+
+ return 0;
+}
+
+static int cros_ec_wdt_ping(struct watchdog_device *wdd)
+{
+ struct cros_ec_device *cros_ec = watchdog_get_drvdata(wdd);
+ union cros_ec_wdt_data arg;
+ int ret;
+
+ arg.req.command = EC_HANG_DETECT_CMD_RELOAD;
+ ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
+ if (ret < 0)
+ dev_dbg(wdd->parent, "Failed to ping watchdog (%d)", ret);
+
+ return ret;
+}
+
+static int cros_ec_wdt_start(struct watchdog_device *wdd)
+{
+ struct cros_ec_device *cros_ec = watchdog_get_drvdata(wdd);
+ union cros_ec_wdt_data arg;
+ int ret;
+
+ /* Prepare watchdog on EC side */
+ arg.req.command = EC_HANG_DETECT_CMD_SET_TIMEOUT;
+ arg.req.reboot_timeout_sec = wdd->timeout;
+ ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
+ if (ret < 0)
+ dev_dbg(wdd->parent, "Failed to start watchdog (%d)", ret);
+
+ return ret;
+}
+
+static int cros_ec_wdt_stop(struct watchdog_device *wdd)
+{
+ struct cros_ec_device *cros_ec = watchdog_get_drvdata(wdd);
+ union cros_ec_wdt_data arg;
+ int ret;
+
+ arg.req.command = EC_HANG_DETECT_CMD_CANCEL;
+ ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
+ if (ret < 0)
+ dev_dbg(wdd->parent, "Failed to stop watchdog (%d)", ret);
+
+ return ret;
+}
+
+static int cros_ec_wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
+{
+ unsigned int old_timeout = wdd->timeout;
+ int ret;
+
+ wdd->timeout = t;
+ ret = cros_ec_wdt_start(wdd);
+ if (ret < 0)
+ wdd->timeout = old_timeout;
+
+ return ret;
+}
+
+static const struct watchdog_info cros_ec_wdt_ident = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = DRV_NAME,
+};
+
+static const struct watchdog_ops cros_ec_wdt_ops = {
+ .owner = THIS_MODULE,
+ .ping = cros_ec_wdt_ping,
+ .start = cros_ec_wdt_start,
+ .stop = cros_ec_wdt_stop,
+ .set_timeout = cros_ec_wdt_set_timeout,
+};
+
+static int cros_ec_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+ struct cros_ec_device *cros_ec = ec_dev->ec_dev;
+ struct watchdog_device *wdd;
+ union cros_ec_wdt_data arg;
+ int ret = 0;
+
+ wdd = devm_kzalloc(&pdev->dev, sizeof(*wdd), GFP_KERNEL);
+ if (!wdd)
+ return -ENOMEM;
+
+ arg.req.command = EC_HANG_DETECT_CMD_GET_STATUS;
+ ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get watchdog bootstatus");
+
+ wdd->parent = &pdev->dev;
+ wdd->info = &cros_ec_wdt_ident;
+ wdd->ops = &cros_ec_wdt_ops;
+ wdd->timeout = CROS_EC_WATCHDOG_DEFAULT_TIME;
+ wdd->min_timeout = EC_HANG_DETECT_MIN_TIMEOUT;
+ wdd->max_timeout = EC_HANG_DETECT_MAX_TIMEOUT;
+ if (arg.resp.status == EC_HANG_DETECT_AP_BOOT_EC_WDT)
+ wdd->bootstatus = WDIOF_CARDRESET;
+
+ arg.req.command = EC_HANG_DETECT_CMD_CLEAR_STATUS;
+ ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to clear watchdog bootstatus");
+
+ watchdog_stop_on_reboot(wdd);
+ watchdog_stop_on_unregister(wdd);
+ watchdog_set_drvdata(wdd, cros_ec);
+ platform_set_drvdata(pdev, wdd);
+
+ return devm_watchdog_register_device(dev, wdd);
+}
+
+static int __maybe_unused cros_ec_wdt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct watchdog_device *wdd = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (watchdog_active(wdd))
+ ret = cros_ec_wdt_stop(wdd);
+
+ return ret;
+}
+
+static int __maybe_unused cros_ec_wdt_resume(struct platform_device *pdev)
+{
+ struct watchdog_device *wdd = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (watchdog_active(wdd))
+ ret = cros_ec_wdt_start(wdd);
+
+ return ret;
+}
+
+static const struct platform_device_id cros_ec_wdt_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+
+static struct platform_driver cros_ec_wdt_driver = {
+ .probe = cros_ec_wdt_probe,
+ .suspend = pm_ptr(cros_ec_wdt_suspend),
+ .resume = pm_ptr(cros_ec_wdt_resume),
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .id_table = cros_ec_wdt_id,
+};
+
+module_platform_driver(cros_ec_wdt_driver);
+
+MODULE_DEVICE_TABLE(platform, cros_ec_wdt_id);
+MODULE_DESCRIPTION("Cros EC Watchdog Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 349d30462c8c..686cf544d0ae 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -24,9 +24,9 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/delay.h>
+#include <linux/soc/samsung/exynos-pmu.h>
#define S3C2410_WTCON 0x00
#define S3C2410_WTDAT 0x04
@@ -699,11 +699,11 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
return ret;
if (wdt->drv_data->quirks & QUIRKS_HAVE_PMUREG) {
- wdt->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
- "samsung,syscon-phandle");
+ wdt->pmureg = exynos_get_pmu_regmap_by_phandle(dev->of_node,
+ "samsung,syscon-phandle");
if (IS_ERR(wdt->pmureg))
return dev_err_probe(dev, PTR_ERR(wdt->pmureg),
- "syscon regmap lookup failed.\n");
+ "PMU regmap lookup failed.\n");
}
wdt_irq = platform_get_irq(pdev, 0);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 3b9f080109d7..2faa4bf78c7a 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -2220,7 +2220,7 @@ static __init void xen_alloc_callback_vector(void)
return;
pr_info("Xen HVM callback vector for event delivery is enabled\n");
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback);
+ sysvec_install(HYPERVISOR_CALLBACK_VECTOR, sysvec_xen_hvm_callback);
}
#else
void xen_setup_callback_vector(void) {}
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index 025edfccedcf..f49d19977e82 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -150,7 +150,7 @@ static int zorro_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-struct bus_type zorro_bus_type = {
+const struct bus_type zorro_bus_type = {
.name = "zorro",
.dev_name = "zorro",
.dev_groups = zorro_device_attribute_groups,
diff --git a/drivers/zorro/zorro.h b/drivers/zorro/zorro.h
index f84df9fb4c20..df44e35203fd 100644
--- a/drivers/zorro/zorro.h
+++ b/drivers/zorro/zorro.h
@@ -4,7 +4,7 @@
* Zorro bus
*/
-extern struct bus_type zorro_bus_type;
+extern const struct bus_type zorro_bus_type;
#ifdef CONFIG_ZORRO_NAMES